mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-04-29 01:46:53 +00:00
Compare commits
53 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
331d2c7651 | ||
|
|
b35a262378 | ||
|
|
7d400663b6 | ||
|
|
0d0884b069 | ||
|
|
dd400e8c21 | ||
|
|
a60f9bc78b | ||
|
|
47714ecf79 | ||
|
|
d15ed4135b | ||
|
|
bd61228e40 | ||
|
|
26d7c28b33 | ||
|
|
2e533daffa | ||
|
|
6c50119eab | ||
|
|
bc3435b993 | ||
|
|
370f5d8082 | ||
|
|
e77c5413c9 | ||
|
|
800ee1bae0 | ||
|
|
8de8d21062 | ||
|
|
81e71b5034 | ||
|
|
44ce63ed85 | ||
|
|
a3c9c688b9 | ||
|
|
a332ed4429 | ||
|
|
91571f8bff | ||
|
|
43856eaa6f | ||
|
|
ae87b5479a | ||
|
|
42cd462780 | ||
|
|
d871378574 | ||
|
|
983b292399 | ||
|
|
6831aa5501 | ||
|
|
2d8a94a459 | ||
|
|
f721e76fdc | ||
|
|
3eadb9d637 | ||
|
|
033582b696 | ||
|
|
974997594f | ||
|
|
fa8ce6dea8 | ||
|
|
1d90e91528 | ||
|
|
a90e2c8002 | ||
|
|
c506375f2a | ||
|
|
4def9439bd | ||
|
|
023654473b | ||
|
|
a216f15dd9 | ||
|
|
f613983cb4 | ||
|
|
c22199794d | ||
|
|
24b1d92e84 | ||
|
|
4bc44e4062 | ||
|
|
06fd6d8742 | ||
|
|
dd0ae4a003 | ||
|
|
646ca74810 | ||
|
|
d60c107818 | ||
|
|
ef2d14f24e | ||
|
|
b3cde9b8a4 | ||
|
|
dc4222df0d | ||
|
|
b9a89d6d0f | ||
|
|
f48913d91b |
@@ -13,7 +13,7 @@ pr:
|
||||
- stable-*
|
||||
|
||||
schedules:
|
||||
- cron: 0 8 * * *
|
||||
- cron: 0 9 * * *
|
||||
displayName: Nightly
|
||||
always: true
|
||||
branches:
|
||||
@@ -36,7 +36,7 @@ variables:
|
||||
resources:
|
||||
containers:
|
||||
- container: default
|
||||
image: quay.io/ansible/azure-pipelines-test-container:1.9.0
|
||||
image: quay.io/ansible/azure-pipelines-test-container:1.8.0
|
||||
|
||||
pool: Standard
|
||||
|
||||
@@ -56,19 +56,6 @@ stages:
|
||||
- test: 3
|
||||
- test: 4
|
||||
- test: extra
|
||||
- stage: Sanity_2_11
|
||||
displayName: Sanity 2.11
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.11/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- stage: Sanity_2_10
|
||||
displayName: Sanity 2.10
|
||||
dependsOn: []
|
||||
@@ -112,22 +99,6 @@ stages:
|
||||
- test: 3.7
|
||||
- test: 3.8
|
||||
- test: 3.9
|
||||
- stage: Units_2_11
|
||||
displayName: Units 2.11
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.11/units/{0}/1
|
||||
targets:
|
||||
- test: 2.6
|
||||
- test: 2.7
|
||||
- test: 3.5
|
||||
- test: 3.6
|
||||
- test: 3.7
|
||||
- test: 3.8
|
||||
- test: 3.9
|
||||
- stage: Units_2_10
|
||||
displayName: Units 2.10
|
||||
dependsOn: []
|
||||
@@ -175,33 +146,14 @@ stages:
|
||||
test: rhel/7.9
|
||||
- name: RHEL 8.3
|
||||
test: rhel/8.3
|
||||
- name: FreeBSD 11.4
|
||||
test: freebsd/11.4
|
||||
- name: FreeBSD 12.2
|
||||
test: freebsd/12.2
|
||||
- name: FreeBSD 13.0
|
||||
test: freebsd/13.0
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Remote_2_11
|
||||
displayName: Remote 2.11
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.11/{0}
|
||||
targets:
|
||||
- name: macOS 11.1
|
||||
test: macos/11.1
|
||||
- name: RHEL 7.9
|
||||
test: rhel/7.9
|
||||
- name: RHEL 8.3
|
||||
test: rhel/8.3
|
||||
- name: FreeBSD 12.2
|
||||
test: freebsd/12.2
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- stage: Remote_2_10
|
||||
displayName: Remote 2.10
|
||||
dependsOn: []
|
||||
@@ -272,25 +224,6 @@ stages:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_11
|
||||
displayName: Docker 2.11
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.11/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 8
|
||||
test: centos8
|
||||
- name: Fedora 32
|
||||
test: fedora33
|
||||
- name: openSUSE 15 py3
|
||||
test: opensuse15
|
||||
- name: Ubuntu 20.04
|
||||
test: ubuntu2004
|
||||
groups:
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_10
|
||||
displayName: Docker 2.10
|
||||
dependsOn: []
|
||||
@@ -337,16 +270,6 @@ stages:
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: devel/cloud/{0}/1
|
||||
targets:
|
||||
- test: 3.8
|
||||
- stage: Cloud_2_11
|
||||
displayName: Cloud 2.11
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.11/cloud/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.6
|
||||
@@ -376,22 +299,17 @@ stages:
|
||||
- Sanity_devel
|
||||
- Sanity_2_9
|
||||
- Sanity_2_10
|
||||
- Sanity_2_11
|
||||
- Units_devel
|
||||
- Units_2_9
|
||||
- Units_2_10
|
||||
- Units_2_11
|
||||
- Remote_devel
|
||||
- Remote_2_9
|
||||
- Remote_2_10
|
||||
- Remote_2_11
|
||||
- Docker_devel
|
||||
- Docker_2_9
|
||||
- Docker_2_10
|
||||
- Docker_2_11
|
||||
- Cloud_devel
|
||||
- Cloud_2_9
|
||||
- Cloud_2_10
|
||||
- Cloud_2_11
|
||||
jobs:
|
||||
- template: templates/coverage.yml
|
||||
|
||||
82
.github/BOTMETA.yml
vendored
82
.github/BOTMETA.yml
vendored
@@ -1,7 +1,5 @@
|
||||
automerge: true
|
||||
files:
|
||||
plugins/:
|
||||
supershipit: aminvakil russoz
|
||||
changelogs/fragments/:
|
||||
support: community
|
||||
$actions:
|
||||
@@ -17,8 +15,6 @@ files:
|
||||
labels: become
|
||||
$callbacks/:
|
||||
labels: callbacks
|
||||
$callbacks/loganalytics.py:
|
||||
maintainers: zhcli
|
||||
$callbacks/logstash.py:
|
||||
maintainers: ujenmr
|
||||
$callbacks/say.py:
|
||||
@@ -57,24 +53,14 @@ files:
|
||||
$doc_fragments/xenserver.py:
|
||||
maintainers: bvitnik
|
||||
labels: xenserver
|
||||
$filters/dict.py:
|
||||
maintainers: felixfontein
|
||||
$filters/dict_kv.py:
|
||||
maintainers: giner
|
||||
$filters/from_csv.py:
|
||||
maintainers: Ajpantuso
|
||||
$filters/hashids:
|
||||
maintainers: Ajpantuso
|
||||
$filters/jc.py:
|
||||
maintainers: kellyjonbrazil
|
||||
$filters/list.py:
|
||||
maintainers: vbotka
|
||||
$filters/path_join_shim.py:
|
||||
maintainers: felixfontein
|
||||
$filters/time.py:
|
||||
maintainers: resmo
|
||||
$filters/version_sort.py:
|
||||
maintainers: ericzolf
|
||||
$httpapis/:
|
||||
maintainers: $team_networking
|
||||
labels: networking
|
||||
@@ -88,8 +74,6 @@ files:
|
||||
maintainers: $team_linode
|
||||
labels: cloud linode
|
||||
keywords: linode dynamic inventory script
|
||||
$inventories/proxmox.py:
|
||||
maintainers: $team_virt ilijamt
|
||||
$inventories/scaleway.py:
|
||||
maintainers: $team_scaleway
|
||||
labels: cloud scaleway
|
||||
@@ -155,6 +139,7 @@ files:
|
||||
$module_utils/redfish_utils.py:
|
||||
maintainers: $team_redfish
|
||||
labels: redfish_utils
|
||||
$module_utils/remote_management/dellemc/: rajeevarakkal
|
||||
$module_utils/remote_management/lxca/common.py: navalkp prabhosa
|
||||
$module_utils/scaleway.py:
|
||||
maintainers: $team_scaleway
|
||||
@@ -190,14 +175,14 @@ files:
|
||||
maintainers: zbal
|
||||
$modules/cloud/lxc/lxc_container.py:
|
||||
maintainers: cloudnull
|
||||
$modules/cloud/lxc/lxc_profile.py:
|
||||
maintainers: conloos
|
||||
$modules/cloud/lxd/:
|
||||
ignore: hnakamur
|
||||
$modules/cloud/memset/:
|
||||
maintainers: glitchcrab
|
||||
$modules/cloud/misc/cloud_init_data_facts.py:
|
||||
maintainers: resmo
|
||||
$modules/cloud/misc/helm.py:
|
||||
maintainers: flaper87
|
||||
$modules/cloud/misc/proxmox.py:
|
||||
maintainers: $team_virt UnderGreen
|
||||
labels: proxmox virt
|
||||
@@ -309,10 +294,8 @@ files:
|
||||
maintainers: bvitnik
|
||||
$modules/clustering/consul/:
|
||||
maintainers: $team_consul
|
||||
ignore: colin-nolan
|
||||
$modules/clustering/etcd3.py:
|
||||
maintainers: evrardjp
|
||||
ignore: vfauth
|
||||
maintainers: evrardjp vfauth
|
||||
$modules/clustering/nomad/:
|
||||
maintainers: chris93111
|
||||
$modules/clustering/pacemaker_cluster.py:
|
||||
@@ -346,8 +329,6 @@ files:
|
||||
maintainers: dareko
|
||||
$modules/files/archive.py:
|
||||
maintainers: bendoh
|
||||
$modules/files/filesize.py:
|
||||
maintainers: quidame
|
||||
$modules/files/ini_file.py:
|
||||
maintainers: jpmens noseka1
|
||||
$modules/files/iso_extract.py:
|
||||
@@ -361,6 +342,8 @@ files:
|
||||
maintainers: dagwieers magnus919 tbielawa cmprescott sm4rk0
|
||||
labels: m:xml xml
|
||||
ignore: magnus919
|
||||
$modules/identity/onepassword_facts.py:
|
||||
maintainers: Rylon
|
||||
$modules/identity/ipa/:
|
||||
maintainers: $team_ipa
|
||||
$modules/identity/ipa/ipa_pwpolicy.py:
|
||||
@@ -434,8 +417,6 @@ files:
|
||||
maintainers: andsens
|
||||
$modules/monitoring/spectrum_device.py:
|
||||
maintainers: orgito
|
||||
$modules/monitoring/spectrum_model_attrs.py:
|
||||
maintainers: tgates81
|
||||
$modules/monitoring/stackdriver.py:
|
||||
maintainers: bwhaley
|
||||
$modules/monitoring/statsd.py:
|
||||
@@ -452,7 +433,7 @@ files:
|
||||
$modules/net_tools/dnsmadeeasy.py:
|
||||
maintainers: briceburg
|
||||
$modules/net_tools/haproxy.py:
|
||||
maintainers: ravibhure Normo
|
||||
maintainers: ravibhure
|
||||
$modules/net_tools/:
|
||||
maintainers: nerzhul
|
||||
$modules/net_tools/infinity/infinity.py:
|
||||
@@ -465,6 +446,8 @@ files:
|
||||
maintainers: akostyuk
|
||||
$modules/net_tools/ipwcli_dns.py:
|
||||
maintainers: cwollinger
|
||||
$modules/net_tools/ldap/ldap_attr.py:
|
||||
maintainers: jtyr
|
||||
$modules/net_tools/ldap/ldap_attrs.py:
|
||||
maintainers: drybjed jtyr noles
|
||||
$modules/net_tools/ldap/ldap_entry.py:
|
||||
@@ -562,7 +545,7 @@ files:
|
||||
$modules/packaging/language/composer.py:
|
||||
maintainers: dmtrs resmo
|
||||
$modules/packaging/language/cpanm.py:
|
||||
maintainers: fcuny russoz
|
||||
maintainers: fcuny
|
||||
$modules/packaging/language/easy_install.py:
|
||||
maintainers: mattupstate
|
||||
$modules/packaging/language/gem.py:
|
||||
@@ -710,6 +693,12 @@ files:
|
||||
maintainers: matze
|
||||
$modules/remote_management/cobbler/:
|
||||
maintainers: dagwieers
|
||||
$modules/remote_management/dellemc/:
|
||||
maintainers: rajeevarakkal
|
||||
$modules/remote_management/dellemc/idrac_server_config_profile.py:
|
||||
maintainers: jagadeeshnv
|
||||
$modules/remote_management/dellemc/ome_device_info.py:
|
||||
maintainers: Sajna-Shetty
|
||||
$modules/remote_management/hpilo/:
|
||||
maintainers: haad
|
||||
ignore: dagwieers
|
||||
@@ -718,8 +707,6 @@ files:
|
||||
labels: cisco
|
||||
$modules/remote_management/ipmi/:
|
||||
maintainers: bgaifullin cloudnull
|
||||
$modules/remote_management/lenovoxcc/:
|
||||
maintainers: panyy3 renxulei
|
||||
$modules/remote_management/lxca/:
|
||||
maintainers: navalkp prabhosa
|
||||
$modules/remote_management/manageiq/:
|
||||
@@ -729,6 +716,8 @@ files:
|
||||
maintainers: evertmulder
|
||||
$modules/remote_management/manageiq/manageiq_tenant.py:
|
||||
maintainers: evertmulder
|
||||
$modules/remote_management/oneview/oneview_datacenter_facts.py:
|
||||
maintainers: aalexmonteiro madhav-bharadwaj ricardogpsf soodpr
|
||||
$modules/remote_management/oneview/:
|
||||
maintainers: adriane-cardozo fgbulsoni tmiotto
|
||||
$modules/remote_management/oneview/oneview_datacenter_info.py:
|
||||
@@ -738,7 +727,7 @@ files:
|
||||
$modules/remote_management/oneview/oneview_fcoe_network.py:
|
||||
maintainers: fgbulsoni
|
||||
$modules/remote_management/redfish/:
|
||||
maintainers: $team_redfish
|
||||
maintainers: $team_redfish billdodd
|
||||
ignore: jose-delarosa
|
||||
$modules/remote_management/stacki/stacki_host.py:
|
||||
maintainers: bsanders bbyhuy
|
||||
@@ -761,8 +750,6 @@ files:
|
||||
ignore: erydo
|
||||
$modules/source_control/github/github_release.py:
|
||||
maintainers: adrianmoisey
|
||||
$modules/source_control/github/github_repo.py:
|
||||
maintainers: atorrescogollo
|
||||
$modules/source_control/github/:
|
||||
maintainers: stpierre
|
||||
$modules/source_control/gitlab/:
|
||||
@@ -777,6 +764,12 @@ files:
|
||||
maintainers: yeukhon
|
||||
$modules/storage/emc/emc_vnx_sg_member.py:
|
||||
maintainers: remixtj
|
||||
$modules/storage/glusterfs/:
|
||||
maintainers: devyanikota
|
||||
$modules/storage/glusterfs/gluster_peer.py:
|
||||
maintainers: sac
|
||||
$modules/storage/glusterfs/gluster_volume.py:
|
||||
maintainers: rosmo
|
||||
$modules/storage/hpe3par/ss_3par_cpg.py:
|
||||
maintainers: farhan7500 gautamphegde
|
||||
$modules/storage/ibm/:
|
||||
@@ -798,6 +791,9 @@ files:
|
||||
maintainers: johanwiren
|
||||
$modules/storage/zfs/zfs_delegate_admin.py:
|
||||
maintainers: natefoo
|
||||
$modules/system/python_requirements_facts.py:
|
||||
maintainers: willthames
|
||||
ignore: ryansb
|
||||
$modules/system/aix:
|
||||
maintainers: $team_aix
|
||||
labels: aix
|
||||
@@ -837,7 +833,7 @@ files:
|
||||
$modules/system/iptables_state.py:
|
||||
maintainers: quidame
|
||||
$modules/system/java_cert.py:
|
||||
maintainers: haad absynth76
|
||||
maintainers: haad
|
||||
$modules/system/java_keystore.py:
|
||||
maintainers: Mogztter
|
||||
$modules/system/kernel_blacklist.py:
|
||||
@@ -922,12 +918,16 @@ files:
|
||||
maintainers: ahtik ovcharenko pyykkis
|
||||
labels: ufw
|
||||
$modules/system/vdo.py:
|
||||
maintainers: rhawalsh
|
||||
maintainers: bgurney-rh
|
||||
$modules/system/xfconf.py:
|
||||
maintainers: russoz jbenden
|
||||
labels: xfconf
|
||||
$modules/system/xfs_quota.py:
|
||||
maintainers: bushvin
|
||||
$modules/web_infrastructure/jenkins_job_facts.py:
|
||||
maintainers: stpierre
|
||||
$modules/web_infrastructure/nginx_status_facts.py:
|
||||
maintainers: resmo
|
||||
$modules/web_infrastructure/apache2_mod_proxy.py:
|
||||
maintainers: oboukili
|
||||
$modules/web_infrastructure/apache2_module.py:
|
||||
@@ -1003,27 +1003,27 @@ macros:
|
||||
terminals: plugins/terminal
|
||||
team_aix: MorrisA bcoca d-little flynn1973 gforster kairoaraujo marvin-sinister mator molekuul ramooncamacho wtcross
|
||||
team_bsd: JoergFiedler MacLemon bcoca dch jasperla mekanix opoplawski overhacked tuxillo
|
||||
team_consul: sgargan
|
||||
team_consul: colin-nolan sgargan
|
||||
team_cyberark_conjur: jvanderhoof ryanprior
|
||||
team_e_spirit: MatrixCrawler getjack
|
||||
team_flatpak: JayKayy oolongbrothers
|
||||
team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman metanovii
|
||||
team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman
|
||||
team_hpux: bcoca davx8342
|
||||
team_huawei: QijunPan TommyLike edisonxiang freesky-edward hwDCN niuzhenguo xuxiaowei0512 yanzhangi zengchen1024 zhongjun2
|
||||
team_ipa: Akasurde Nosmoht fxfitz justchris1
|
||||
team_ipa: Akasurde Nosmoht fxfitz
|
||||
team_jboss: Wolfant jairojunior wbrefvem
|
||||
team_keycloak: eikef ndclt
|
||||
team_linode: InTheCloudDan decentral1se displague rmcintosh Charliekenney23 LBGarber
|
||||
team_linode: InTheCloudDan decentral1se displague rmcintosh
|
||||
team_macos: Akasurde kyleabenson martinm82 danieljaouen indrajitr
|
||||
team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder
|
||||
team_netapp: amit0701 carchi8py hulquest lmprice lonico ndswartz schmots1
|
||||
team_networking: NilashishC Qalthos danielmellado ganeshrn justjais trishnaguha sganesh-infoblox privateip
|
||||
team_opennebula: ilicmilan meerkampdvv rsmontero xorel nilsding
|
||||
team_opennebula: ilicmilan meerkampdvv rsmontero xorel
|
||||
team_oracle: manojmeda mross22 nalsaber
|
||||
team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16
|
||||
team_redfish: mraineri tomasg2012 xmadsen renxulei
|
||||
team_redfish: billdodd mraineri tomasg2012
|
||||
team_rhn: FlossWare alikins barnabycourt vritant
|
||||
team_scaleway: QuentinBrosse abarbare jerome-quere kindermoumoute remyleone sieben
|
||||
team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l
|
||||
team_suse: commel dcermak evrardjp lrupp toabctl AnderEnder alxgu andytom
|
||||
team_virt: joshainglis karmab tleguern Thulium-Drake Ajpantuso
|
||||
team_virt: joshainglis karmab Aversiste Thulium-Drake
|
||||
|
||||
135
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
135
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -1,135 +0,0 @@
|
||||
---
|
||||
name: Bug report
|
||||
description: Create a report to help us improve
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
⚠
|
||||
Verify first that your issue is not [already reported on GitHub][issue search].
|
||||
Also test if the latest release and devel branch are affected too.
|
||||
*Complete **all** sections as described, this form is processed automatically.*
|
||||
|
||||
[issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
|
||||
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Summary
|
||||
description: Explain the problem briefly below.
|
||||
placeholder: >-
|
||||
When I try to do X with the collection from the main branch on GitHub, Y
|
||||
breaks in a way Z under the env E. Here are all the details I know
|
||||
about this problem...
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Issue Type
|
||||
# FIXME: Once GitHub allows defining the default choice, update this
|
||||
options:
|
||||
- Bug Report
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
# For smaller collections we could use a multi-select and hardcode the list
|
||||
# May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins
|
||||
# Select from list, filter as you type (`mysql` would only show the 3 mysql components)
|
||||
# OR freeform - doesn't seem to be supported in adaptivecards
|
||||
label: Component Name
|
||||
description: >-
|
||||
Write the short name of the module, plugin, task or feature below,
|
||||
*use your best guess if unsure*.
|
||||
placeholder: dnf, apt, yum, pip, user etc.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Ansible Version
|
||||
description: >-
|
||||
Paste verbatim output from `ansible --version` between
|
||||
tripple backticks.
|
||||
value: |
|
||||
```console (paste below)
|
||||
$ ansible --version
|
||||
|
||||
```
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Configuration
|
||||
description: >-
|
||||
If this issue has an example piece of YAML that can help to reproduce this problem, please provide it.
|
||||
This can be a piece of YAML from, e.g., an automation, script, scene or configuration.
|
||||
Paste verbatim output from `ansible-config dump --only-changed` between quotes
|
||||
value: |
|
||||
```console (paste below)
|
||||
$ ansible-config dump --only-changed
|
||||
|
||||
```
|
||||
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: OS / Environment
|
||||
description: >-
|
||||
Provide all relevant information below, e.g. target OS versions,
|
||||
network device firmware, etc.
|
||||
placeholder: RHEL 8, CentOS Stream etc.
|
||||
validations:
|
||||
required: false
|
||||
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Steps to Reproduce
|
||||
description: |
|
||||
Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also pased any playbooks, configs and commands you used.
|
||||
|
||||
**HINT:** You can paste https://gist.github.com links for larger files.
|
||||
value: |
|
||||
<!--- Paste example playbooks or commands between quotes below -->
|
||||
```yaml (paste below)
|
||||
|
||||
```
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Expected Results
|
||||
description: >-
|
||||
Describe what you expected to happen when running the steps above.
|
||||
placeholder: >-
|
||||
I expected X to happen because I assumed Y.
|
||||
that it did not.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Actual Results
|
||||
description: |
|
||||
Describe what actually happened. If possible run with extra verbosity (`-vvvv`).
|
||||
|
||||
Paste verbatim command output between quotes.
|
||||
value: |
|
||||
```console (paste below)
|
||||
|
||||
```
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Code of Conduct
|
||||
description: |
|
||||
Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
|
||||
options:
|
||||
- label: I agree to follow the Ansible Code of Conduct
|
||||
required: true
|
||||
...
|
||||
27
.github/ISSUE_TEMPLATE/config.yml
vendored
27
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,27 +0,0 @@
|
||||
---
|
||||
# Ref: https://help.github.com/en/github/building-a-strong-community/configuring-issue-templates-for-your-repository#configuring-the-template-chooser
|
||||
blank_issues_enabled: false # default: true
|
||||
contact_links:
|
||||
- name: Security bug report
|
||||
url: https://docs.ansible.com/ansible-core/devel/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
|
||||
about: |
|
||||
Please learn how to report security vulnerabilities here.
|
||||
|
||||
For all security related bugs, email security@ansible.com
|
||||
instead of using this issue tracker and you will receive
|
||||
a prompt response.
|
||||
|
||||
For more information, see
|
||||
https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html
|
||||
- name: Ansible Code of Conduct
|
||||
url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
|
||||
about: Be nice to other members of the community.
|
||||
- name: Talks to the community
|
||||
url: https://docs.ansible.com/ansible/latest/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information
|
||||
about: Please ask and answer usage questions here
|
||||
- name: Working groups
|
||||
url: https://github.com/ansible/community/wiki
|
||||
about: Interested in improving a specific area? Become a part of a working group!
|
||||
- name: For Enterprise
|
||||
url: https://www.ansible.com/products/engine?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
|
||||
about: Red Hat offers support for the Ansible Automation Platform
|
||||
111
.github/ISSUE_TEMPLATE/documentation_report.yml
vendored
111
.github/ISSUE_TEMPLATE/documentation_report.yml
vendored
@@ -1,111 +0,0 @@
|
||||
---
|
||||
name: Documentation Report
|
||||
description: Ask us about docs
|
||||
# NOTE: issue body is enabled to allow screenshots
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
⚠
|
||||
Verify first that your issue is not [already reported on GitHub][issue search].
|
||||
Also test if the latest release and devel branch are affected too.
|
||||
*Complete **all** sections as described, this form is processed automatically.*
|
||||
|
||||
[issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
|
||||
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Summary
|
||||
description: |
|
||||
Explain the problem briefly below, add suggestions to wording or structure.
|
||||
|
||||
**HINT:** Did you know the documentation has an `Edit on GitHub` link on every page?
|
||||
placeholder: >-
|
||||
I was reading the Collection documentation of version X and I'm having
|
||||
problems understanding Y. It would be very helpful if that got
|
||||
rephrased as Z.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Issue Type
|
||||
# FIXME: Once GitHub allows defining the default choice, update this
|
||||
options:
|
||||
- Documentation Report
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
attributes:
|
||||
label: Component Name
|
||||
description: >-
|
||||
Write the short name of the rst file, module, plugin, task or
|
||||
feature below, *use your best guess if unsure*.
|
||||
placeholder: mysql_user
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Ansible Version
|
||||
description: >-
|
||||
Paste verbatim output from `ansible --version` between
|
||||
tripple backticks.
|
||||
value: |
|
||||
```console (paste below)
|
||||
$ ansible --version
|
||||
|
||||
```
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Configuration
|
||||
description: >-
|
||||
Paste verbatim output from `ansible-config dump --only-changed` between quotes.
|
||||
value: |
|
||||
```console (paste below)
|
||||
$ ansible-config dump --only-changed
|
||||
|
||||
```
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: OS / Environment
|
||||
description: >-
|
||||
Provide all relevant information below, e.g. OS version,
|
||||
browser, etc.
|
||||
placeholder: Fedora 33, Firefox etc.
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional Information
|
||||
description: |
|
||||
Describe how this improves the documentation, e.g. before/after situation or screenshots.
|
||||
|
||||
**Tip:** It's not possible to upload the screenshot via this field directly but you can use the last textarea in this form to attach them.
|
||||
|
||||
**HINT:** You can paste https://gist.github.com links for larger files.
|
||||
placeholder: >-
|
||||
When the improvement is applied, it makes it more straightforward
|
||||
to understand X.
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Code of Conduct
|
||||
description: |
|
||||
Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
|
||||
options:
|
||||
- label: I agree to follow the Ansible Code of Conduct
|
||||
required: true
|
||||
...
|
||||
69
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
69
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@@ -1,69 +0,0 @@
|
||||
---
|
||||
name: Feature request
|
||||
description: Suggest an idea for this project
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
⚠
|
||||
Verify first that your issue is not [already reported on GitHub][issue search].
|
||||
Also test if the latest release and devel branch are affected too.
|
||||
*Complete **all** sections as described, this form is processed automatically.*
|
||||
|
||||
[issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
|
||||
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Summary
|
||||
description: Describe the new feature/improvement briefly below.
|
||||
placeholder: >-
|
||||
I am trying to do X with the collection from the main branch on GitHub and
|
||||
I think that implementing a feature Y would be very helpful for me and
|
||||
every other user of ansible-core because of Z.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Issue Type
|
||||
# FIXME: Once GitHub allows defining the default choice, update this
|
||||
options:
|
||||
- Feature Idea
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
attributes:
|
||||
label: Component Name
|
||||
description: >-
|
||||
Write the short name of the module, plugin, task or feature below,
|
||||
*use your best guess if unsure*.
|
||||
placeholder: dnf, apt, yum, pip, user etc.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional Information
|
||||
description: |
|
||||
Describe how the feature would be used, why it is needed and what it would solve.
|
||||
|
||||
**HINT:** You can paste https://gist.github.com links for larger files.
|
||||
value: |
|
||||
<!--- Paste example playbooks or commands between quotes below -->
|
||||
```yaml (paste below)
|
||||
|
||||
```
|
||||
validations:
|
||||
required: false
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Code of Conduct
|
||||
description: |
|
||||
Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
|
||||
options:
|
||||
- label: I agree to follow the Ansible Code of Conduct
|
||||
required: true
|
||||
...
|
||||
906
CHANGELOG.rst
906
CHANGELOG.rst
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
# Community General Collection
|
||||
|
||||
[](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
|
||||
[](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
|
||||
[](https://codecov.io/gh/ansible-collections/community.general)
|
||||
|
||||
This repo contains the `community.general` Ansible Collection. The collection includes many modules and plugins supported by Ansible community which are not part of more specialized community collections.
|
||||
@@ -9,7 +9,7 @@ You can find [documentation for this collection on the Ansible docs site](https:
|
||||
|
||||
## Tested with Ansible
|
||||
|
||||
Tested with the current Ansible 2.9, ansible-base 2.10 and ansible-core 2.11 releases and the current development version of ansible-core. Ansible versions before 2.9.10 are not supported.
|
||||
Tested with the current Ansible 2.9 and 2.10 releases and the current development version of Ansible. Ansible versions before 2.9.10 are not supported.
|
||||
|
||||
## External requirements
|
||||
|
||||
@@ -76,7 +76,7 @@ Basic instructions without release branches:
|
||||
|
||||
## Release notes
|
||||
|
||||
See the [changelog](https://github.com/ansible-collections/community.general/blob/stable-3/CHANGELOG.rst).
|
||||
See the [changelog](https://github.com/ansible-collections/community.general/blob/stable-2/CHANGELOG.rst).
|
||||
|
||||
## Roadmap
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,72 +0,0 @@
|
||||
Committers Guidelines for community.general
|
||||
===========================================
|
||||
|
||||
This document is based on the [Ansible committer guidelines](https://github.com/ansible/ansible/blob/b57444af14062ec96e0af75fdfc2098c74fe2d9a/docs/docsite/rst/community/committer_guidelines.rst) ([latest version](https://docs.ansible.com/ansible/devel/community/committer_guidelines.html)).
|
||||
|
||||
These are the guidelines for people with commit privileges on the Ansible Community General Collection GitHub repository. Please read the guidelines before you commit.
|
||||
|
||||
These guidelines apply to everyone. At the same time, this is NOT a process document. So just use good judgment. You have been given commit access because we trust your judgment.
|
||||
|
||||
That said, use the trust wisely.
|
||||
|
||||
If you abuse the trust and break components and builds, and so on, the trust level falls and you may be asked not to commit or you may lose your commit privileges.
|
||||
|
||||
Our workflow on GitHub
|
||||
----------------------
|
||||
|
||||
As a committer, you may already know this, but our workflow forms a lot of our team policies. Please ensure you are aware of the following workflow steps:
|
||||
|
||||
* Fork the repository upon which you want to do some work to your own personal repository
|
||||
* Work on the specific branch upon which you need to commit
|
||||
* Create a Pull Request back to the collection repository and await reviews
|
||||
* Adjust code as necessary based on the Comments provided
|
||||
* Ask someone from the other committers to do a final review and merge
|
||||
|
||||
Sometimes, committers merge their own pull requests. This section is a set of guidelines. If you are changing a comma in a doc or making a very minor change, you can use your best judgement. This is another trust thing. The process is critical for any major change, but for little things or getting something done quickly, use your best judgement and make sure people on the team are aware of your work.
|
||||
|
||||
Roles
|
||||
-----
|
||||
* Release managers: Merge pull requests to `stable-X` branches, create tags to do releases.
|
||||
* Committers: Fine to do PRs for most things, but we should have a timebox. Hanging PRs may merge on the judgement of these devs.
|
||||
* Module maintainers: Module maintainers own specific modules and have indirect commit access through the current module PR mechanisms. This is primary [ansibullbot](https://github.com/ansibullbot)'s `shipit` mechanism.
|
||||
|
||||
General rules
|
||||
-------------
|
||||
Individuals with direct commit access to this collection repository are entrusted with powers that allow them to do a broad variety of things--probably more than we can write down. Rather than rules, treat these as general *guidelines*, individuals with this power are expected to use their best judgement.
|
||||
|
||||
* Do NOTs:
|
||||
|
||||
- Do not commit directly.
|
||||
- Do not merge your own PRs. Someone else should have a chance to review and approve the PR merge. You have a small amount of leeway here for very minor changes.
|
||||
- Do not forget about non-standard / alternate environments. Consider the alternatives. Yes, people have bad/unusual/strange environments (like binaries from multiple init systems installed), but they are the ones who need us the most.
|
||||
- Do not drag your community team members down. Discuss the technical merits of any pull requests you review. Avoid negativity and personal comments. For more guidance on being a good community member, read the [Ansible Community Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
|
||||
- Do not forget about the maintenance burden. High-maintenance features may not be worth adding.
|
||||
- Do not break playbooks. Always keep backwards compatibility in mind.
|
||||
- Do not forget to keep it simple. Complexity breeds all kinds of problems.
|
||||
- Do not merge to branches other than `main`, especially not to `stable-X`, if you do not have explicit permission to do so.
|
||||
- Do not create tags. Tags are used in the release process, and should only be created by the people responsible for managing the stable branches.
|
||||
|
||||
* Do:
|
||||
|
||||
- Squash, avoid merges whenever possible, use GitHub's squash commits or cherry pick if needed (bisect thanks you).
|
||||
- Be active. Committers who have no activity on the project (through merges, triage, commits, and so on) will have their permissions suspended.
|
||||
- Consider backwards compatibility (goes back to "do not break existing playbooks").
|
||||
- Write tests. PRs with tests are looked at with more priority than PRs without tests that should have them included. While not all changes require tests, be sure to add them for bug fixes or functionality changes.
|
||||
- Discuss with other committers, specially when you are unsure of something.
|
||||
- Document! If your PR is a new feature or a change to behavior, make sure you've updated all associated documentation or have notified the right people to do so.
|
||||
- Consider scope, sometimes a fix can be generalized.
|
||||
- Keep it simple, then things are maintainable, debuggable and intelligible.
|
||||
|
||||
Committers are expected to continue to follow the same community and contribution guidelines followed by the rest of the Ansible community.
|
||||
|
||||
|
||||
People
|
||||
------
|
||||
|
||||
Individuals who have been asked to become a part of this group have generally been contributing in significant ways to the community.general collection for some time. Should they agree, they are requested to add their names and GitHub IDs to this file, in the section below, through a pull request. Doing so indicates that these individuals agree to act in the ways that their fellow committers trust that they will act.
|
||||
|
||||
| Name | GitHub ID | IRC Nick | Other |
|
||||
| ------------------- | -------------------- | ------------------ | -------------------- |
|
||||
| Andrew Klychkov | andersson007 | andersson007_ | |
|
||||
| Felix Fontein | felixfontein | felixfontein | |
|
||||
| John R Barker | gundalow | gundalow | |
|
||||
@@ -1,6 +1,6 @@
|
||||
namespace: community
|
||||
name: general
|
||||
version: 3.0.0
|
||||
version: 2.1.0
|
||||
readme: README.md
|
||||
authors:
|
||||
- Ansible (https://github.com/ansible)
|
||||
|
||||
293
meta/runtime.yml
293
meta/runtime.yml
@@ -39,9 +39,9 @@ plugin_routing:
|
||||
redirect: community.hashi_vault.hashi_vault
|
||||
modules:
|
||||
ali_instance_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.ali_instance_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
docker_compose:
|
||||
redirect: community.docker.docker_compose
|
||||
docker_config:
|
||||
@@ -159,7 +159,8 @@ plugin_routing:
|
||||
gcpubsub_info:
|
||||
redirect: community.google.gcpubsub_info
|
||||
gcpubsub_facts:
|
||||
tombstone:
|
||||
redirect: community.google.gcpubsub_info
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.google.gcpubsub_info instead.
|
||||
gcspanner:
|
||||
@@ -170,23 +171,22 @@ plugin_routing:
|
||||
tombstone:
|
||||
removal_version: 2.0.0
|
||||
warning_text: Use community.general.github_webhook and community.general.github_webhook_info instead.
|
||||
# Adding tombstones burns the old name, so we simply remove the entries:
|
||||
# gluster_heal_info:
|
||||
# tombstone:
|
||||
# removal_version: 3.0.0
|
||||
# warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_heal_info instead.
|
||||
# gluster_peer:
|
||||
# tombstone:
|
||||
# removal_version: 3.0.0
|
||||
# warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_peer instead.
|
||||
# gluster_volume:
|
||||
# tombstone:
|
||||
# removal_version: 3.0.0
|
||||
# warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_volume instead.
|
||||
# helm:
|
||||
# tombstone:
|
||||
# removal_version: 3.0.0
|
||||
# warning_text: Use community.kubernetes.helm instead.
|
||||
gluster_heal_info:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_heal_info instead.
|
||||
gluster_peer:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_peer instead.
|
||||
gluster_volume:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_volume instead.
|
||||
helm:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: The helm module in community.general has been deprecated. Use community.kubernetes.helm instead.
|
||||
hetzner_failover_ip:
|
||||
redirect: community.hrobot.failover_ip
|
||||
hetzner_failover_ip_info:
|
||||
@@ -196,21 +196,17 @@ plugin_routing:
|
||||
hetzner_firewall_info:
|
||||
redirect: community.hrobot.firewall_info
|
||||
hpilo_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.hpilo_info instead.
|
||||
idrac_firmware:
|
||||
redirect: dellemc.openmanage.idrac_firmware
|
||||
warning_text: see plugin documentation for details
|
||||
idrac_redfish_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.idrac_redfish_info instead.
|
||||
idrac_server_config_profile:
|
||||
redirect: dellemc.openmanage.idrac_server_config_profile
|
||||
warning_text: see plugin documentation for details
|
||||
jenkins_job_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.jenkins_job_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
katello:
|
||||
tombstone:
|
||||
removal_version: 2.0.0
|
||||
@@ -228,9 +224,9 @@ plugin_routing:
|
||||
kubevirt_vm:
|
||||
redirect: community.kubevirt.kubevirt_vm
|
||||
ldap_attr:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.ldap_attrs instead.
|
||||
warning_text: see plugin documentation for details
|
||||
logicmonitor:
|
||||
tombstone:
|
||||
removal_version: 1.0.0
|
||||
@@ -240,13 +236,13 @@ plugin_routing:
|
||||
removal_version: 1.0.0
|
||||
warning_text: The logicmonitor_facts module is no longer maintained and the API used has been disabled in 2017.
|
||||
memset_memstore_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.memset_memstore_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
memset_server_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.memset_server_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
na_cdot_aggregate:
|
||||
tombstone:
|
||||
removal_version: 2.0.0
|
||||
@@ -280,163 +276,161 @@ plugin_routing:
|
||||
removal_version: 2.0.0
|
||||
warning_text: Use netapp.ontap.na_ontap_volume instead.
|
||||
na_ontap_gather_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use netapp.ontap.na_ontap_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
nginx_status_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.nginx_status_info instead.
|
||||
ome_device_info:
|
||||
redirect: dellemc.openmanage.ome_device_info
|
||||
warning_text: see plugin documentation for details
|
||||
one_image_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.one_image_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
onepassword_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.onepassword_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
oneview_datacenter_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.oneview_datacenter_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
oneview_enclosure_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.oneview_enclosure_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
oneview_ethernet_network_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.oneview_ethernet_network_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
oneview_fc_network_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.oneview_fc_network_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
oneview_fcoe_network_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.oneview_fcoe_network_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
oneview_logical_interconnect_group_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.oneview_logical_interconnect_group_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
oneview_network_set_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.oneview_network_set_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
oneview_san_manager_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.oneview_san_manager_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
online_server_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.online_server_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
online_user_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.online_user_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
ovirt:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_vm instead.
|
||||
warning_text: see plugin documentation for details
|
||||
ovirt_affinity_label_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_affinity_label_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
ovirt_api_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_api_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
ovirt_cluster_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_cluster_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
ovirt_datacenter_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_datacenter_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
ovirt_disk_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_disk_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
ovirt_event_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_event_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
ovirt_external_provider_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_external_provider_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
ovirt_group_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_group_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
ovirt_host_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_host_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
ovirt_host_storage_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_host_storage_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
ovirt_network_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_network_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
ovirt_nic_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_nic_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
ovirt_permission_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_permission_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
ovirt_quota_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_quota_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
ovirt_scheduling_policy_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_scheduling_policy_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
ovirt_snapshot_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_snapshot_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
ovirt_storage_domain_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_storage_domain_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
ovirt_storage_template_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_storage_template_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
ovirt_storage_vm_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_storage_vm_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
ovirt_tag_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_tag_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
ovirt_template_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_template_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
ovirt_user_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_user_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
ovirt_vm_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_vm_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
ovirt_vmpool_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use ovirt.ovirt.ovirt_vmpool_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
postgresql_copy:
|
||||
redirect: community.postgresql.postgresql_copy
|
||||
postgresql_db:
|
||||
@@ -482,49 +476,49 @@ plugin_routing:
|
||||
postgresql_user:
|
||||
redirect: community.postgresql.postgresql_user
|
||||
purefa_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use purestorage.flasharray.purefa_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
purefb_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use purestorage.flashblade.purefb_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
python_requirements_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.python_requirements_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
redfish_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.redfish_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
scaleway_image_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.scaleway_image_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
scaleway_ip_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.scaleway_ip_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
scaleway_organization_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.scaleway_organization_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
scaleway_security_group_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.scaleway_security_group_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
scaleway_server_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.scaleway_server_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
scaleway_snapshot_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.scaleway_snapshot_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
scaleway_volume_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.scaleway_volume_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
sf_account_manager:
|
||||
tombstone:
|
||||
removal_version: 2.0.0
|
||||
@@ -546,17 +540,17 @@ plugin_routing:
|
||||
removal_version: 2.0.0
|
||||
warning_text: Use netapp.elementsw.na_elementsw_volume instead.
|
||||
smartos_image_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.smartos_image_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
vertica_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.vertica_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
xenserver_guest_facts:
|
||||
tombstone:
|
||||
deprecation:
|
||||
removal_version: 3.0.0
|
||||
warning_text: Use community.general.xenserver_guest_info instead.
|
||||
warning_text: see plugin documentation for details
|
||||
doc_fragments:
|
||||
_gcp:
|
||||
redirect: community.google._gcp
|
||||
@@ -571,8 +565,6 @@ plugin_routing:
|
||||
postgresql:
|
||||
redirect: community.postgresql.postgresql
|
||||
module_utils:
|
||||
remote_management.dellemc.dellemc_idrac:
|
||||
redirect: dellemc.openmanage.dellemc_idrac
|
||||
docker.common:
|
||||
redirect: community.docker.common
|
||||
docker.swarm:
|
||||
@@ -587,8 +579,6 @@ plugin_routing:
|
||||
redirect: community.hrobot.robot
|
||||
kubevirt:
|
||||
redirect: community.kubevirt.kubevirt
|
||||
remote_management.dellemc.ome:
|
||||
redirect: dellemc.openmanage.ome
|
||||
postgresql:
|
||||
redirect: community.postgresql.postgresql
|
||||
callback:
|
||||
@@ -611,10 +601,3 @@ plugin_routing:
|
||||
redirect: community.docker.docker_swarm
|
||||
kubevirt:
|
||||
redirect: community.kubevirt.kubevirt
|
||||
filter:
|
||||
path_join:
|
||||
# The ansible.builtin.path_join filter has been added in ansible-base 2.10.
|
||||
# Since plugin routing is only available since ansible-base 2.10, this
|
||||
# redirect will be used for ansible-base 2.10 or later, and the included
|
||||
# path_join filter will be used for Ansible 2.9 or earlier.
|
||||
redirect: ansible.builtin.path_join
|
||||
|
||||
@@ -1,91 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2021, Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = """
|
||||
become: sudosu
|
||||
short_description: Run tasks using sudo su -
|
||||
description:
|
||||
- This become plugins allows your remote/login user to execute commands as another user via the C(sudo) and C(su) utilities combined.
|
||||
author:
|
||||
- Dag Wieers (@dagwieers)
|
||||
version_added: 2.4.0
|
||||
options:
|
||||
become_user:
|
||||
description: User you 'become' to execute the task.
|
||||
default: root
|
||||
ini:
|
||||
- section: privilege_escalation
|
||||
key: become_user
|
||||
- section: sudo_become_plugin
|
||||
key: user
|
||||
vars:
|
||||
- name: ansible_become_user
|
||||
- name: ansible_sudo_user
|
||||
env:
|
||||
- name: ANSIBLE_BECOME_USER
|
||||
- name: ANSIBLE_SUDO_USER
|
||||
become_flags:
|
||||
description: Options to pass to C(sudo).
|
||||
default: -H -S -n
|
||||
ini:
|
||||
- section: privilege_escalation
|
||||
key: become_flags
|
||||
- section: sudo_become_plugin
|
||||
key: flags
|
||||
vars:
|
||||
- name: ansible_become_flags
|
||||
- name: ansible_sudo_flags
|
||||
env:
|
||||
- name: ANSIBLE_BECOME_FLAGS
|
||||
- name: ANSIBLE_SUDO_FLAGS
|
||||
become_pass:
|
||||
description: Password to pass to C(sudo).
|
||||
required: false
|
||||
vars:
|
||||
- name: ansible_become_password
|
||||
- name: ansible_become_pass
|
||||
- name: ansible_sudo_pass
|
||||
env:
|
||||
- name: ANSIBLE_BECOME_PASS
|
||||
- name: ANSIBLE_SUDO_PASS
|
||||
ini:
|
||||
- section: sudo_become_plugin
|
||||
key: password
|
||||
"""
|
||||
|
||||
|
||||
from ansible.plugins.become import BecomeBase
|
||||
|
||||
|
||||
class BecomeModule(BecomeBase):
|
||||
|
||||
name = 'community.general.sudosu'
|
||||
|
||||
# messages for detecting prompted password issues
|
||||
fail = ('Sorry, try again.',)
|
||||
missing = ('Sorry, a password is required to run sudo', 'sudo: a password is required')
|
||||
|
||||
def build_become_command(self, cmd, shell):
|
||||
super(BecomeModule, self).build_become_command(cmd, shell)
|
||||
|
||||
if not cmd:
|
||||
return cmd
|
||||
|
||||
becomecmd = 'sudo'
|
||||
|
||||
flags = self.get_option('become_flags') or ''
|
||||
prompt = ''
|
||||
if self.get_option('become_pass'):
|
||||
self.prompt = '[sudo via ansible, key=%s] password:' % self._id
|
||||
if flags: # this could be simplified, but kept as is for now for backwards string matching
|
||||
flags = flags.replace('-n', '')
|
||||
prompt = '-p "%s"' % (self.prompt)
|
||||
|
||||
user = self.get_option('become_user') or ''
|
||||
if user:
|
||||
user = '%s' % (user)
|
||||
|
||||
return ' '.join([becomecmd, flags, prompt, 'su -l', user, self._build_success_command(cmd, shell)])
|
||||
2
plugins/cache/memcached.py
vendored
2
plugins/cache/memcached.py
vendored
@@ -162,7 +162,7 @@ class CacheModuleKeys(MutableSet):
|
||||
self._cache.set(self.PREFIX, self._keyset)
|
||||
|
||||
def remove_by_timerange(self, s_min, s_max):
|
||||
for k in list(self._keyset.keys()):
|
||||
for k in self._keyset.keys():
|
||||
t = self._keyset[k]
|
||||
if s_min < t < s_max:
|
||||
del self._keyset[k]
|
||||
|
||||
6
plugins/cache/redis.py
vendored
6
plugins/cache/redis.py
vendored
@@ -217,12 +217,14 @@ class CacheModule(BaseCacheModule):
|
||||
self._db.zrem(self._keys_set, key)
|
||||
|
||||
def flush(self):
|
||||
for key in list(self.keys()):
|
||||
for key in self.keys():
|
||||
self.delete(key)
|
||||
|
||||
def copy(self):
|
||||
# TODO: there is probably a better way to do this in redis
|
||||
ret = dict([(k, self.get(k)) for k in self.keys()])
|
||||
ret = dict()
|
||||
for key in self.keys():
|
||||
ret[key] = self.get(key)
|
||||
return ret
|
||||
|
||||
def __getstate__(self):
|
||||
|
||||
@@ -1013,7 +1013,7 @@ class CallbackModule(Default):
|
||||
for attr in _stats_attributes:
|
||||
_ret[self.DIY_NS]['stats'].update({attr: _get_value(obj=stats, attr=attr)})
|
||||
|
||||
_ret[self.DIY_NS].update({'top_level_var_names': list(_ret.keys())})
|
||||
_ret[self.DIY_NS].update({'top_level_var_names': _ret.keys()})
|
||||
|
||||
return _ret
|
||||
|
||||
|
||||
@@ -1,234 +0,0 @@
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
callback: loganalytics
|
||||
type: aggregate
|
||||
short_description: Posts task results to Azure Log Analytics
|
||||
author: "Cyrus Li (@zhcli) <cyrus1006@gmail.com>"
|
||||
description:
|
||||
- This callback plugin will post task results in JSON formatted to an Azure Log Analytics workspace.
|
||||
- Credits to authors of splunk callback plugin.
|
||||
version_added: "2.4.0"
|
||||
requirements:
|
||||
- Whitelisting this callback plugin.
|
||||
- An Azure log analytics work space has been established.
|
||||
options:
|
||||
workspace_id:
|
||||
description: Workspace ID of the Azure log analytics workspace.
|
||||
required: true
|
||||
env:
|
||||
- name: WORKSPACE_ID
|
||||
ini:
|
||||
- section: callback_loganalytics
|
||||
key: workspace_id
|
||||
shared_key:
|
||||
description: Shared key to connect to Azure log analytics workspace.
|
||||
required: true
|
||||
env:
|
||||
- name: WORKSPACE_SHARED_KEY
|
||||
ini:
|
||||
- section: callback_loganalytics
|
||||
key: shared_key
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
examples: |
|
||||
Whitelist the plugin in ansible.cfg:
|
||||
[defaults]
|
||||
callback_whitelist = community.general.loganalytics
|
||||
Set the environment variable:
|
||||
export WORKSPACE_ID=01234567-0123-0123-0123-01234567890a
|
||||
export WORKSPACE_SHARED_KEY=dZD0kCbKl3ehZG6LHFMuhtE0yHiFCmetzFMc2u+roXIUQuatqU924SsAAAAPemhjbGlAemhjbGktTUJQAQIDBA==
|
||||
Or configure the plugin in ansible.cfg in the callback_loganalytics block:
|
||||
[callback_loganalytics]
|
||||
workspace_id = 01234567-0123-0123-0123-01234567890a
|
||||
shared_key = dZD0kCbKl3ehZG6LHFMuhtE0yHiFCmetzFMc2u+roXIUQuatqU924SsAAAAPemhjbGlAemhjbGktTUJQAQIDBA==
|
||||
'''
|
||||
|
||||
import hashlib
|
||||
import hmac
|
||||
import base64
|
||||
import logging
|
||||
import json
|
||||
import uuid
|
||||
import socket
|
||||
import getpass
|
||||
|
||||
from datetime import datetime
|
||||
from os.path import basename
|
||||
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.parsing.ajson import AnsibleJSONEncoder
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
|
||||
class AzureLogAnalyticsSource(object):
|
||||
def __init__(self):
|
||||
self.ansible_check_mode = False
|
||||
self.ansible_playbook = ""
|
||||
self.ansible_version = ""
|
||||
self.session = str(uuid.uuid4())
|
||||
self.host = socket.gethostname()
|
||||
self.user = getpass.getuser()
|
||||
self.extra_vars = ""
|
||||
|
||||
def __build_signature(self, date, workspace_id, shared_key, content_length):
|
||||
# Build authorisation signature for Azure log analytics API call
|
||||
sigs = "POST\n{0}\napplication/json\nx-ms-date:{1}\n/api/logs".format(
|
||||
str(content_length), date)
|
||||
utf8_sigs = sigs.encode('utf-8')
|
||||
decoded_shared_key = base64.b64decode(shared_key)
|
||||
hmac_sha256_sigs = hmac.new(
|
||||
decoded_shared_key, utf8_sigs, digestmod=hashlib.sha256).digest()
|
||||
encoded_hash = base64.b64encode(hmac_sha256_sigs).decode('utf-8')
|
||||
signature = "SharedKey {0}:{1}".format(workspace_id, encoded_hash)
|
||||
return signature
|
||||
|
||||
def __build_workspace_url(self, workspace_id):
|
||||
return "https://{0}.ods.opinsights.azure.com/api/logs?api-version=2016-04-01".format(workspace_id)
|
||||
|
||||
def __rfc1123date(self):
|
||||
return datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
|
||||
|
||||
def send_event(self, workspace_id, shared_key, state, result, runtime):
|
||||
if result._task_fields['args'].get('_ansible_check_mode') is True:
|
||||
self.ansible_check_mode = True
|
||||
|
||||
if result._task_fields['args'].get('_ansible_version'):
|
||||
self.ansible_version = \
|
||||
result._task_fields['args'].get('_ansible_version')
|
||||
|
||||
if result._task._role:
|
||||
ansible_role = str(result._task._role)
|
||||
else:
|
||||
ansible_role = None
|
||||
|
||||
data = {}
|
||||
data['uuid'] = result._task._uuid
|
||||
data['session'] = self.session
|
||||
data['status'] = state
|
||||
data['timestamp'] = self.__rfc1123date()
|
||||
data['host'] = self.host
|
||||
data['user'] = self.user
|
||||
data['runtime'] = runtime
|
||||
data['ansible_version'] = self.ansible_version
|
||||
data['ansible_check_mode'] = self.ansible_check_mode
|
||||
data['ansible_host'] = result._host.name
|
||||
data['ansible_playbook'] = self.ansible_playbook
|
||||
data['ansible_role'] = ansible_role
|
||||
data['ansible_task'] = result._task_fields
|
||||
# Removing args since it can contain sensitive data
|
||||
if 'args' in data['ansible_task']:
|
||||
data['ansible_task'].pop('args')
|
||||
data['ansible_result'] = result._result
|
||||
if 'content' in data['ansible_result']:
|
||||
data['ansible_result'].pop('content')
|
||||
|
||||
# Adding extra vars info
|
||||
data['extra_vars'] = self.extra_vars
|
||||
|
||||
# Preparing the playbook logs as JSON format and send to Azure log analytics
|
||||
jsondata = json.dumps({'event': data}, cls=AnsibleJSONEncoder, sort_keys=True)
|
||||
content_length = len(jsondata)
|
||||
rfc1123date = self.__rfc1123date()
|
||||
signature = self.__build_signature(rfc1123date, workspace_id, shared_key, content_length)
|
||||
workspace_url = self.__build_workspace_url(workspace_id)
|
||||
|
||||
open_url(
|
||||
workspace_url,
|
||||
jsondata,
|
||||
headers={
|
||||
'content-type': 'application/json',
|
||||
'Authorization': signature,
|
||||
'Log-Type': 'ansible_playbook',
|
||||
'x-ms-date': rfc1123date
|
||||
},
|
||||
method='POST'
|
||||
)
|
||||
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'aggregate'
|
||||
CALLBACK_NAME = 'loganalytics'
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
def __init__(self, display=None):
|
||||
super(CallbackModule, self).__init__(display=display)
|
||||
self.start_datetimes = {} # Collect task start times
|
||||
self.workspace_id = None
|
||||
self.shared_key = None
|
||||
self.loganalytics = AzureLogAnalyticsSource()
|
||||
|
||||
def _seconds_since_start(self, result):
|
||||
return (
|
||||
datetime.utcnow() -
|
||||
self.start_datetimes[result._task._uuid]
|
||||
).total_seconds()
|
||||
|
||||
def set_options(self, task_keys=None, var_options=None, direct=None):
|
||||
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
|
||||
self.workspace_id = self.get_option('workspace_id')
|
||||
self.shared_key = self.get_option('shared_key')
|
||||
|
||||
def v2_playbook_on_play_start(self, play):
|
||||
vm = play.get_variable_manager()
|
||||
extra_vars = vm.extra_vars
|
||||
self.loganalytics.extra_vars = extra_vars
|
||||
|
||||
def v2_playbook_on_start(self, playbook):
|
||||
self.loganalytics.ansible_playbook = basename(playbook._file_name)
|
||||
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
self.start_datetimes[task._uuid] = datetime.utcnow()
|
||||
|
||||
def v2_playbook_on_handler_task_start(self, task):
|
||||
self.start_datetimes[task._uuid] = datetime.utcnow()
|
||||
|
||||
def v2_runner_on_ok(self, result, **kwargs):
|
||||
self.loganalytics.send_event(
|
||||
self.workspace_id,
|
||||
self.shared_key,
|
||||
'OK',
|
||||
result,
|
||||
self._seconds_since_start(result)
|
||||
)
|
||||
|
||||
def v2_runner_on_skipped(self, result, **kwargs):
|
||||
self.loganalytics.send_event(
|
||||
self.workspace_id,
|
||||
self.shared_key,
|
||||
'SKIPPED',
|
||||
result,
|
||||
self._seconds_since_start(result)
|
||||
)
|
||||
|
||||
def v2_runner_on_failed(self, result, **kwargs):
|
||||
self.loganalytics.send_event(
|
||||
self.workspace_id,
|
||||
self.shared_key,
|
||||
'FAILED',
|
||||
result,
|
||||
self._seconds_since_start(result)
|
||||
)
|
||||
|
||||
def runner_on_async_failed(self, result, **kwargs):
|
||||
self.loganalytics.send_event(
|
||||
self.workspace_id,
|
||||
self.shared_key,
|
||||
'FAILED',
|
||||
result,
|
||||
self._seconds_since_start(result)
|
||||
)
|
||||
|
||||
def v2_runner_on_unreachable(self, result, **kwargs):
|
||||
self.loganalytics.send_event(
|
||||
self.workspace_id,
|
||||
self.shared_key,
|
||||
'UNREACHABLE',
|
||||
result,
|
||||
self._seconds_since_start(result)
|
||||
)
|
||||
@@ -67,7 +67,7 @@ COLORS = {
|
||||
|
||||
def dict_diff(prv, nxt):
|
||||
"""Return a dict of keys that differ with another config object."""
|
||||
keys = set(list(prv.keys()) + list(nxt.keys()))
|
||||
keys = set(prv.keys() + nxt.keys())
|
||||
result = {}
|
||||
for k in keys:
|
||||
if prv.get(k) != nxt.get(k):
|
||||
|
||||
@@ -37,13 +37,12 @@ import tempfile
|
||||
import shutil
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.connection import ConnectionBase
|
||||
from ansible.utils.display import Display
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
class Connection(object):
|
||||
''' Func-based connections '''
|
||||
|
||||
has_pipelining = False
|
||||
|
||||
@@ -30,6 +30,7 @@ options:
|
||||
description:
|
||||
- Keycloak realm name to authenticate to for API access.
|
||||
type: str
|
||||
required: true
|
||||
|
||||
auth_client_secret:
|
||||
description:
|
||||
@@ -40,6 +41,7 @@ options:
|
||||
description:
|
||||
- Username to authenticate for API access with.
|
||||
type: str
|
||||
required: true
|
||||
aliases:
|
||||
- username
|
||||
|
||||
@@ -47,15 +49,10 @@ options:
|
||||
description:
|
||||
- Password to authenticate for API access with.
|
||||
type: str
|
||||
required: true
|
||||
aliases:
|
||||
- password
|
||||
|
||||
token:
|
||||
description:
|
||||
- Authentication token for Keycloak API.
|
||||
type: str
|
||||
version_added: 3.0.0
|
||||
|
||||
validate_certs:
|
||||
description:
|
||||
- Verify TLS certificates (do not disable this in production).
|
||||
|
||||
@@ -13,32 +13,12 @@ class ModuleDocFragment(object):
|
||||
DOCUMENTATION = r'''
|
||||
options:
|
||||
config:
|
||||
description:
|
||||
description:
|
||||
- Path to a .json configuration file containing the OneView client configuration.
|
||||
The configuration file is optional and when used should be present in the host running the ansible commands.
|
||||
If the file path is not provided, the configuration will be loaded from environment variables.
|
||||
For links to example configuration files or how to use the environment variables verify the notes section.
|
||||
type: path
|
||||
api_version:
|
||||
description:
|
||||
- OneView API Version.
|
||||
type: int
|
||||
image_streamer_hostname:
|
||||
description:
|
||||
- IP address or hostname for the HPE Image Streamer REST API.
|
||||
type: str
|
||||
hostname:
|
||||
description:
|
||||
- IP address or hostname for the appliance.
|
||||
type: str
|
||||
username:
|
||||
description:
|
||||
- Username for API authentication.
|
||||
type: str
|
||||
password:
|
||||
description:
|
||||
- Password for API authentication.
|
||||
type: str
|
||||
type: path
|
||||
|
||||
requirements:
|
||||
- python >= 2.7.9
|
||||
|
||||
59
plugins/doc_fragments/ovirt_facts.py
Normal file
59
plugins/doc_fragments/ovirt_facts.py
Normal file
@@ -0,0 +1,59 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2016, Red Hat, Inc.
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
# info standard oVirt documentation fragment
|
||||
DOCUMENTATION = r'''
|
||||
options:
|
||||
fetch_nested:
|
||||
description:
|
||||
- If I(yes) the module will fetch additional data from the API.
|
||||
- It will fetch only IDs of nested entity. It doesn't fetch multiple levels of nested attributes.
|
||||
Only the attributes of the current entity. User can configure to fetch other
|
||||
attributes of the nested entities by specifying C(nested_attributes).
|
||||
type: bool
|
||||
default: false
|
||||
nested_attributes:
|
||||
description:
|
||||
- Specifies list of the attributes which should be fetched from the API.
|
||||
- This parameter apply only when C(fetch_nested) is I(true).
|
||||
type: list
|
||||
auth:
|
||||
description:
|
||||
- "Dictionary with values needed to create HTTP/HTTPS connection to oVirt:"
|
||||
- C(username)[I(required)] - The name of the user, something like I(admin@internal).
|
||||
Default value is set by I(OVIRT_USERNAME) environment variable.
|
||||
- "C(password)[I(required)] - The password of the user. Default value is set by I(OVIRT_PASSWORD) environment variable."
|
||||
- "C(url)- A string containing the API URL of the server, usually
|
||||
something like `I(https://server.example.com/ovirt-engine/api)`. Default value is set by I(OVIRT_URL) environment variable.
|
||||
Either C(url) or C(hostname) is required."
|
||||
- "C(hostname) - A string containing the hostname of the server, usually
|
||||
something like `I(server.example.com)`. Default value is set by I(OVIRT_HOSTNAME) environment variable.
|
||||
Either C(url) or C(hostname) is required."
|
||||
- "C(token) - Token to be used instead of login with username/password. Default value is set by I(OVIRT_TOKEN) environment variable."
|
||||
- "C(insecure) - A boolean flag that indicates if the server TLS
|
||||
certificate and host name should be checked."
|
||||
- "C(ca_file) - A PEM file containing the trusted CA certificates. The
|
||||
certificate presented by the server will be verified using these CA
|
||||
certificates. If `C(ca_file)` parameter is not set, system wide
|
||||
CA certificate store is used. Default value is set by I(OVIRT_CAFILE) environment variable."
|
||||
- "C(kerberos) - A boolean flag indicating if Kerberos authentication
|
||||
should be used instead of the default basic authentication."
|
||||
- "C(headers) - Dictionary of HTTP headers to be added to each API call."
|
||||
type: dict
|
||||
required: true
|
||||
requirements:
|
||||
- python >= 2.7
|
||||
- ovirt-engine-sdk-python >= 4.3.0
|
||||
notes:
|
||||
- "In order to use this module you have to install oVirt Python SDK.
|
||||
To ensure it's installed with correct version you can create the following task:
|
||||
ansible.builtin.pip: name=ovirt-engine-sdk-python version=4.3.0"
|
||||
'''
|
||||
@@ -1,43 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Florian Dambrine <android.florian@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
options:
|
||||
pritunl_url:
|
||||
type: str
|
||||
required: true
|
||||
description:
|
||||
- URL and port of the Pritunl server on which the API is enabled.
|
||||
|
||||
pritunl_api_token:
|
||||
type: str
|
||||
required: true
|
||||
description:
|
||||
- API Token of a Pritunl admin user.
|
||||
- It needs to be enabled in Administrators > USERNAME > Enable Token Authentication.
|
||||
|
||||
pritunl_api_secret:
|
||||
type: str
|
||||
required: true
|
||||
description:
|
||||
- API Secret found in Administrators > USERNAME > API Secret.
|
||||
|
||||
validate_certs:
|
||||
type: bool
|
||||
required: false
|
||||
default: true
|
||||
description:
|
||||
- If certificates should be validated or not.
|
||||
- This should never be set to C(false), except if you are very sure that
|
||||
your connection to the server can not be subject to a Man In The Middle
|
||||
attack.
|
||||
"""
|
||||
@@ -1,24 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
def dict_filter(sequence):
|
||||
'''Convert a list of tuples to a dictionary.
|
||||
|
||||
Example: ``[[1, 2], ['a', 'b']] | community.general.dict`` results in ``{1: 2, 'a': 'b'}``
|
||||
'''
|
||||
return dict(sequence)
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
'''Ansible jinja2 filters'''
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
'dict': dict_filter,
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) <ajpantuso@gmail.com>
|
||||
# Copyright: (c) 2018, Dag Wieers (@dagwieers) <dag@wieers.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.errors import AnsibleFilterError
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.csv import (initialize_dialect, read_csv, CSVError,
|
||||
DialectNotAvailableError,
|
||||
CustomDialectFailureError)
|
||||
|
||||
|
||||
def from_csv(data, dialect='excel', fieldnames=None, delimiter=None, skipinitialspace=None, strict=None):
|
||||
|
||||
dialect_params = {
|
||||
"delimiter": delimiter,
|
||||
"skipinitialspace": skipinitialspace,
|
||||
"strict": strict,
|
||||
}
|
||||
|
||||
try:
|
||||
dialect = initialize_dialect(dialect, **dialect_params)
|
||||
except (CustomDialectFailureError, DialectNotAvailableError) as e:
|
||||
raise AnsibleFilterError(to_native(e))
|
||||
|
||||
reader = read_csv(data, dialect, fieldnames)
|
||||
|
||||
data_list = []
|
||||
|
||||
try:
|
||||
for row in reader:
|
||||
data_list.append(row)
|
||||
except CSVError as e:
|
||||
raise AnsibleFilterError("Unable to process file: %s" % to_native(e))
|
||||
|
||||
return data_list
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
'from_csv': from_csv
|
||||
}
|
||||
@@ -1,97 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) <ajpantuso@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.errors import (
|
||||
AnsibleError,
|
||||
AnsibleFilterError,
|
||||
AnsibleFilterTypeError,
|
||||
)
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.common.collections import is_sequence
|
||||
|
||||
try:
|
||||
from hashids import Hashids
|
||||
HAS_HASHIDS = True
|
||||
except ImportError:
|
||||
HAS_HASHIDS = False
|
||||
|
||||
|
||||
def initialize_hashids(**kwargs):
|
||||
if not HAS_HASHIDS:
|
||||
raise AnsibleError("The hashids library must be installed in order to use this plugin")
|
||||
|
||||
params = dict((k, v) for k, v in kwargs.items() if v)
|
||||
|
||||
try:
|
||||
return Hashids(**params)
|
||||
except TypeError as e:
|
||||
raise AnsibleFilterError(
|
||||
"The provided parameters %s are invalid: %s" % (
|
||||
', '.join(["%s=%s" % (k, v) for k, v in params.items()]),
|
||||
to_native(e)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def hashids_encode(nums, salt=None, alphabet=None, min_length=None):
|
||||
"""Generates a YouTube-like hash from a sequence of ints
|
||||
|
||||
:nums: Sequence of one or more ints to hash
|
||||
:salt: String to use as salt when hashing
|
||||
:alphabet: String of 16 or more unique characters to produce a hash
|
||||
:min_length: Minimum length of hash produced
|
||||
"""
|
||||
|
||||
hashids = initialize_hashids(
|
||||
salt=salt,
|
||||
alphabet=alphabet,
|
||||
min_length=min_length
|
||||
)
|
||||
|
||||
# Handles the case where a single int is not encapsulated in a list or tuple.
|
||||
# User convenience seems preferable to strict typing in this case
|
||||
# Also avoids obfuscated error messages related to single invalid inputs
|
||||
if not is_sequence(nums):
|
||||
nums = [nums]
|
||||
|
||||
try:
|
||||
hashid = hashids.encode(*nums)
|
||||
except TypeError as e:
|
||||
raise AnsibleFilterTypeError(
|
||||
"Data to encode must by a tuple or list of ints: %s" % to_native(e)
|
||||
)
|
||||
|
||||
return hashid
|
||||
|
||||
|
||||
def hashids_decode(hashid, salt=None, alphabet=None, min_length=None):
|
||||
"""Decodes a YouTube-like hash to a sequence of ints
|
||||
|
||||
:hashid: Hash string to decode
|
||||
:salt: String to use as salt when hashing
|
||||
:alphabet: String of 16 or more unique characters to produce a hash
|
||||
:min_length: Minimum length of hash produced
|
||||
"""
|
||||
|
||||
hashids = initialize_hashids(
|
||||
salt=salt,
|
||||
alphabet=alphabet,
|
||||
min_length=min_length
|
||||
)
|
||||
nums = hashids.decode(hashid)
|
||||
return list(nums)
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
'hashids_encode': hashids_encode,
|
||||
'hashids_decode': hashids_decode,
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2020-2021, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
import os.path
|
||||
|
||||
|
||||
def path_join(list):
|
||||
'''Join list of paths.
|
||||
|
||||
This is a minimal shim for ansible.builtin.path_join included in ansible-base 2.10.
|
||||
This should only be called by Ansible 2.9 or earlier. See meta/runtime.yml for details.
|
||||
'''
|
||||
return os.path.join(*list)
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
'''Ansible jinja2 filters'''
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
'path_join': path_join,
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
# Copyright (C) 2021 Eric Lavarde <elavarde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
|
||||
def version_sort(value, reverse=False):
|
||||
'''Sort a list according to loose versions so that e.g. 2.9 is smaller than 2.10'''
|
||||
return sorted(value, key=LooseVersion, reverse=reverse)
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
''' Version sort filter '''
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
'version_sort': version_sort
|
||||
}
|
||||
@@ -34,15 +34,18 @@ DOCUMENTATION = r'''
|
||||
description: Populate inventory with instances in this region.
|
||||
default: []
|
||||
type: list
|
||||
required: false
|
||||
tags:
|
||||
description: Populate inventory only with instances which have at least one of the tags listed here.
|
||||
default: []
|
||||
type: list
|
||||
reqired: false
|
||||
version_added: 2.0.0
|
||||
types:
|
||||
description: Populate inventory with instances with this type.
|
||||
default: []
|
||||
type: list
|
||||
required: false
|
||||
strict:
|
||||
version_added: 2.0.0
|
||||
compose:
|
||||
|
||||
@@ -1,950 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2021, Frank Dornheim <dornheim@posteo.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
name: community.general.lxd
|
||||
short_description: Returns Ansible inventory from lxd host
|
||||
description:
|
||||
- Get inventory from the lxd.
|
||||
- Uses a YAML configuration file that ends with 'lxd.(yml|yaml)'.
|
||||
version_added: "3.0.0"
|
||||
author: "Frank Dornheim (@conloos)"
|
||||
options:
|
||||
plugin:
|
||||
description: Token that ensures this is a source file for the 'lxd' plugin.
|
||||
required: true
|
||||
choices: [ 'community.general.lxd' ]
|
||||
url:
|
||||
description:
|
||||
- The unix domain socket path or the https URL for the lxd server.
|
||||
- Sockets in filesystem have to start with C(unix:).
|
||||
- Mostly C(unix:/var/lib/lxd/unix.socket) or C(unix:/var/snap/lxd/common/lxd/unix.socket).
|
||||
default: unix:/var/snap/lxd/common/lxd/unix.socket
|
||||
type: str
|
||||
client_key:
|
||||
description:
|
||||
- The client certificate key file path.
|
||||
aliases: [ key_file ]
|
||||
default: $HOME/.config/lxc/client.key
|
||||
type: path
|
||||
client_cert:
|
||||
description:
|
||||
- The client certificate file path.
|
||||
aliases: [ cert_file ]
|
||||
default: $HOME/.config/lxc/client.crt
|
||||
type: path
|
||||
trust_password:
|
||||
description:
|
||||
- The client trusted password.
|
||||
- You need to set this password on the lxd server before
|
||||
running this module using the following command
|
||||
C(lxc config set core.trust_password <some random password>)
|
||||
See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).
|
||||
- If I(trust_password) is set, this module send a request for authentication before sending any requests.
|
||||
type: str
|
||||
state:
|
||||
description: Filter the container according to the current status.
|
||||
type: str
|
||||
default: none
|
||||
choices: [ 'STOPPED', 'STARTING', 'RUNNING', 'none' ]
|
||||
prefered_container_network_interface:
|
||||
description:
|
||||
- If a container has multiple network interfaces, select which one is the prefered as pattern.
|
||||
- Combined with the first number that can be found e.g. 'eth' + 0.
|
||||
type: str
|
||||
default: eth
|
||||
prefered_container_network_family:
|
||||
description:
|
||||
- If a container has multiple network interfaces, which one is the prefered by family.
|
||||
- Specify C(inet) for IPv4 and C(inet6) for IPv6.
|
||||
type: str
|
||||
default: inet
|
||||
choices: [ 'inet', 'inet6' ]
|
||||
groupby:
|
||||
description:
|
||||
- Create groups by the following keywords C(location), C(pattern), C(network_range), C(os), C(release), C(profile), C(vlanid).
|
||||
- See example for syntax.
|
||||
type: json
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# simple lxd.yml
|
||||
plugin: community.general.lxd
|
||||
url: unix:/var/snap/lxd/common/lxd/unix.socket
|
||||
|
||||
# simple lxd.yml including filter
|
||||
plugin: community.general.lxd
|
||||
url: unix:/var/snap/lxd/common/lxd/unix.socket
|
||||
state: RUNNING
|
||||
|
||||
# grouping lxd.yml
|
||||
groupby:
|
||||
testpattern:
|
||||
type: pattern
|
||||
attribute: test
|
||||
vlan666:
|
||||
type: vlanid
|
||||
attribute: 666
|
||||
locationBerlin:
|
||||
type: location
|
||||
attribute: Berlin
|
||||
osUbuntu:
|
||||
type: os
|
||||
attribute: ubuntu
|
||||
releaseFocal:
|
||||
type: release
|
||||
attribute: focal
|
||||
releaseBionic:
|
||||
type: release
|
||||
attribute: bionic
|
||||
profileDefault:
|
||||
type: profile
|
||||
attribute: default
|
||||
profileX11:
|
||||
type: profile
|
||||
attribute: x11
|
||||
netRangeIPv4:
|
||||
type: network_range
|
||||
attribute: 10.98.143.0/24
|
||||
netRangeIPv6:
|
||||
type: network_range
|
||||
attribute: fd42:bd00:7b11:2167:216:3eff::/24
|
||||
'''
|
||||
|
||||
import binascii
|
||||
import json
|
||||
import re
|
||||
import time
|
||||
import os
|
||||
import socket
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin
|
||||
from ansible.module_utils._text import to_native, to_text
|
||||
from ansible.module_utils.common.dict_transformations import dict_merge
|
||||
from ansible.errors import AnsibleError, AnsibleParserError
|
||||
from ansible_collections.community.general.plugins.module_utils.compat import ipaddress
|
||||
from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin):
|
||||
DEBUG = 4
|
||||
NAME = 'community.general.lxd'
|
||||
SNAP_SOCKET_URL = 'unix:/var/snap/lxd/common/lxd/unix.socket'
|
||||
SOCKET_URL = 'unix:/var/lib/lxd/unix.socket'
|
||||
|
||||
@staticmethod
|
||||
def load_json_data(path):
|
||||
"""Load json data
|
||||
|
||||
Load json data from file
|
||||
|
||||
Args:
|
||||
list(path): Path elements
|
||||
str(file_name): Filename of data
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
dict(json_data): json data"""
|
||||
try:
|
||||
with open(path, 'r') as json_file:
|
||||
return json.load(json_file)
|
||||
except (IOError, json.decoder.JSONDecodeError) as err:
|
||||
raise AnsibleParserError('Could not load the test data from {0}: {1}'.format(to_native(path), to_native(err)))
|
||||
|
||||
def save_json_data(self, path, file_name=None):
|
||||
"""save data as json
|
||||
|
||||
Save data as json file
|
||||
|
||||
Args:
|
||||
list(path): Path elements
|
||||
str(file_name): Filename of data
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
|
||||
if file_name:
|
||||
path.append(file_name)
|
||||
else:
|
||||
prefix = 'lxd_data-'
|
||||
time_stamp = time.strftime('%Y%m%d-%H%M%S')
|
||||
suffix = '.atd'
|
||||
path.append(prefix + time_stamp + suffix)
|
||||
|
||||
try:
|
||||
cwd = os.path.abspath(os.path.dirname(__file__))
|
||||
with open(os.path.abspath(os.path.join(cwd, *path)), 'w') as json_file:
|
||||
json.dump(self.data, json_file)
|
||||
except IOError as err:
|
||||
raise AnsibleParserError('Could not save data: {0}'.format(to_native(err)))
|
||||
|
||||
def verify_file(self, path):
|
||||
"""Check the config
|
||||
|
||||
Return true/false if the config-file is valid for this plugin
|
||||
|
||||
Args:
|
||||
str(path): path to the config
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
bool(valid): is valid"""
|
||||
valid = False
|
||||
if super(InventoryModule, self).verify_file(path):
|
||||
if path.endswith(('lxd.yaml', 'lxd.yml')):
|
||||
valid = True
|
||||
else:
|
||||
self.display.vvv('Inventory source not ending in "lxd.yaml" or "lxd.yml"')
|
||||
return valid
|
||||
|
||||
@staticmethod
|
||||
def validate_url(url):
|
||||
"""validate url
|
||||
|
||||
check whether the url is correctly formatted
|
||||
|
||||
Args:
|
||||
url
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
AnsibleError
|
||||
Returns:
|
||||
bool"""
|
||||
if not isinstance(url, str):
|
||||
return False
|
||||
if not url.startswith(('unix:', 'https:')):
|
||||
raise AnsibleError('URL is malformed: {0}'.format(to_native(url)))
|
||||
return True
|
||||
|
||||
def _connect_to_socket(self):
|
||||
"""connect to lxd socket
|
||||
|
||||
Connect to lxd socket by provided url or defaults
|
||||
|
||||
Args:
|
||||
None
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
AnsibleError
|
||||
Returns:
|
||||
None"""
|
||||
error_storage = {}
|
||||
url_list = [self.get_option('url'), self.SNAP_SOCKET_URL, self.SOCKET_URL]
|
||||
urls = (url for url in url_list if self.validate_url(url))
|
||||
for url in urls:
|
||||
try:
|
||||
socket_connection = LXDClient(url, self.client_key, self.client_cert, self.debug)
|
||||
return socket_connection
|
||||
except LXDClientException as err:
|
||||
error_storage[url] = err
|
||||
raise AnsibleError('No connection to the socket: {0}'.format(to_native(error_storage)))
|
||||
|
||||
def _get_networks(self):
|
||||
"""Get Networknames
|
||||
|
||||
Returns all network config names
|
||||
|
||||
Args:
|
||||
None
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
list(names): names of all network_configs"""
|
||||
# e.g. {'type': 'sync',
|
||||
# 'status': 'Success',
|
||||
# 'status_code': 200,
|
||||
# 'operation': '',
|
||||
# 'error_code': 0,
|
||||
# 'error': '',
|
||||
# 'metadata': ['/1.0/networks/lxdbr0']}
|
||||
network_configs = self.socket.do('GET', '/1.0/networks')
|
||||
return [m.split('/')[3] for m in network_configs['metadata']]
|
||||
|
||||
def _get_containers(self):
|
||||
"""Get Containernames
|
||||
|
||||
Returns all containernames
|
||||
|
||||
Args:
|
||||
None
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
list(names): names of all containers"""
|
||||
# e.g. {'type': 'sync',
|
||||
# 'status': 'Success',
|
||||
# 'status_code': 200,
|
||||
# 'operation': '',
|
||||
# 'error_code': 0,
|
||||
# 'error': '',
|
||||
# 'metadata': ['/1.0/containers/udemy-ansible-ubuntu-2004']}
|
||||
containers = self.socket.do('GET', '/1.0/containers')
|
||||
return [m.split('/')[3] for m in containers['metadata']]
|
||||
|
||||
def _get_config(self, branch, name):
|
||||
"""Get inventory of container
|
||||
|
||||
Get config of container
|
||||
|
||||
Args:
|
||||
str(branch): Name oft the API-Branch
|
||||
str(name): Name of Container
|
||||
Kwargs:
|
||||
None
|
||||
Source:
|
||||
https://github.com/lxc/lxd/blob/master/doc/rest-api.md
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
dict(config): Config of the container"""
|
||||
config = {}
|
||||
if isinstance(branch, (tuple, list)):
|
||||
config[name] = {branch[1]: self.socket.do('GET', '/1.0/{0}/{1}/{2}'.format(to_native(branch[0]), to_native(name), to_native(branch[1])))}
|
||||
else:
|
||||
config[name] = {branch: self.socket.do('GET', '/1.0/{0}/{1}'.format(to_native(branch), to_native(name)))}
|
||||
return config
|
||||
|
||||
def get_container_data(self, names):
|
||||
"""Create Inventory of the container
|
||||
|
||||
Iterate through the different branches of the containers and collect Informations.
|
||||
|
||||
Args:
|
||||
list(names): List of container names
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
# tuple(('instances','metadata/templates')) to get section in branch
|
||||
# e.g. /1.0/instances/<name>/metadata/templates
|
||||
branches = ['containers', ('instances', 'state')]
|
||||
container_config = {}
|
||||
for branch in branches:
|
||||
for name in names:
|
||||
container_config['containers'] = self._get_config(branch, name)
|
||||
self.data = dict_merge(container_config, self.data)
|
||||
|
||||
def get_network_data(self, names):
|
||||
"""Create Inventory of the container
|
||||
|
||||
Iterate through the different branches of the containers and collect Informations.
|
||||
|
||||
Args:
|
||||
list(names): List of container names
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
# tuple(('instances','metadata/templates')) to get section in branch
|
||||
# e.g. /1.0/instances/<name>/metadata/templates
|
||||
branches = [('networks', 'state')]
|
||||
network_config = {}
|
||||
for branch in branches:
|
||||
for name in names:
|
||||
try:
|
||||
network_config['networks'] = self._get_config(branch, name)
|
||||
except LXDClientException:
|
||||
network_config['networks'] = {name: None}
|
||||
self.data = dict_merge(network_config, self.data)
|
||||
|
||||
def extract_network_information_from_container_config(self, container_name):
|
||||
"""Returns the network interface configuration
|
||||
|
||||
Returns the network ipv4 and ipv6 config of the container without local-link
|
||||
|
||||
Args:
|
||||
str(container_name): Name oft he container
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
dict(network_configuration): network config"""
|
||||
container_network_interfaces = self._get_data_entry('containers/{0}/state/metadata/network'.format(container_name))
|
||||
network_configuration = None
|
||||
if container_network_interfaces:
|
||||
network_configuration = {}
|
||||
gen_interface_names = [interface_name for interface_name in container_network_interfaces if interface_name != 'lo']
|
||||
for interface_name in gen_interface_names:
|
||||
gen_address = [address for address in container_network_interfaces[interface_name]['addresses'] if address.get('scope') != 'link']
|
||||
network_configuration[interface_name] = []
|
||||
for address in gen_address:
|
||||
address_set = {}
|
||||
address_set['family'] = address.get('family')
|
||||
address_set['address'] = address.get('address')
|
||||
address_set['netmask'] = address.get('netmask')
|
||||
address_set['combined'] = address.get('address') + '/' + address.get('netmask')
|
||||
network_configuration[interface_name].append(address_set)
|
||||
return network_configuration
|
||||
|
||||
def get_prefered_container_network_interface(self, container_name):
|
||||
"""Helper to get the prefered interface of thr container
|
||||
|
||||
Helper to get the prefered interface provide by neme pattern from 'prefered_container_network_interface'.
|
||||
|
||||
Args:
|
||||
str(containe_name): name of container
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
str(prefered_interface): None or interface name"""
|
||||
container_network_interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(container_name))
|
||||
prefered_interface = None # init
|
||||
if container_network_interfaces: # container have network interfaces
|
||||
# generator if interfaces which start with the desired pattern
|
||||
net_generator = [interface for interface in container_network_interfaces if interface.startswith(self.prefered_container_network_interface)]
|
||||
selected_interfaces = [] # init
|
||||
for interface in net_generator:
|
||||
selected_interfaces.append(interface)
|
||||
if len(selected_interfaces) > 0:
|
||||
prefered_interface = sorted(selected_interfaces)[0]
|
||||
return prefered_interface
|
||||
|
||||
def get_container_vlans(self, container_name):
|
||||
"""Get VLAN(s) from container
|
||||
|
||||
Helper to get the VLAN_ID from the container
|
||||
|
||||
Args:
|
||||
str(containe_name): name of container
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
# get network device configuration and store {network: vlan_id}
|
||||
network_vlans = {}
|
||||
for network in self._get_data_entry('networks'):
|
||||
if self._get_data_entry('state/metadata/vlan/vid', data=self.data['networks'].get(network)):
|
||||
network_vlans[network] = self._get_data_entry('state/metadata/vlan/vid', data=self.data['networks'].get(network))
|
||||
|
||||
# get networkdevices of container and return
|
||||
# e.g.
|
||||
# "eth0":{ "name":"eth0",
|
||||
# "network":"lxdbr0",
|
||||
# "type":"nic"},
|
||||
vlan_ids = {}
|
||||
devices = self._get_data_entry('containers/{0}/containers/metadata/expanded_devices'.format(to_native(container_name)))
|
||||
for device in devices:
|
||||
if 'network' in devices[device]:
|
||||
if devices[device]['network'] in network_vlans:
|
||||
vlan_ids[devices[device].get('network')] = network_vlans[devices[device].get('network')]
|
||||
return vlan_ids if vlan_ids else None
|
||||
|
||||
def _get_data_entry(self, path, data=None, delimiter='/'):
|
||||
"""Helper to get data
|
||||
|
||||
Helper to get data from self.data by a path like 'path/to/target'
|
||||
Attention: Escaping of the delimiter is not (yet) provided.
|
||||
|
||||
Args:
|
||||
str(path): path to nested dict
|
||||
Kwargs:
|
||||
dict(data): datastore
|
||||
str(delimiter): delimiter in Path.
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
*(value)"""
|
||||
try:
|
||||
if not data:
|
||||
data = self.data
|
||||
if delimiter in path:
|
||||
path = path.split(delimiter)
|
||||
|
||||
if isinstance(path, list) and len(path) > 1:
|
||||
data = data[path.pop(0)]
|
||||
path = delimiter.join(path)
|
||||
return self._get_data_entry(path, data, delimiter) # recursion
|
||||
return data[path]
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
def _set_data_entry(self, container_name, key, value, path=None):
|
||||
"""Helper to save data
|
||||
|
||||
Helper to save the data in self.data
|
||||
Detect if data is allready in branch and use dict_merge() to prevent that branch is overwritten.
|
||||
|
||||
Args:
|
||||
str(container_name): name of container
|
||||
str(key): same as dict
|
||||
*(value): same as dict
|
||||
Kwargs:
|
||||
str(path): path to branch-part
|
||||
Raises:
|
||||
AnsibleParserError
|
||||
Returns:
|
||||
None"""
|
||||
if not path:
|
||||
path = self.data['inventory']
|
||||
if container_name not in path:
|
||||
path[container_name] = {}
|
||||
|
||||
try:
|
||||
if isinstance(value, dict) and key in path[container_name]:
|
||||
path[container_name] = dict_merge(value, path[container_name][key])
|
||||
else:
|
||||
path[container_name][key] = value
|
||||
except KeyError as err:
|
||||
raise AnsibleParserError("Unable to store Informations: {0}".format(to_native(err)))
|
||||
|
||||
def extract_information_from_container_configs(self):
|
||||
"""Process configuration information
|
||||
|
||||
Preparation of the data
|
||||
|
||||
Args:
|
||||
dict(configs): Container configurations
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
# create branch "inventory"
|
||||
if 'inventory' not in self.data:
|
||||
self.data['inventory'] = {}
|
||||
|
||||
for container_name in self.data['containers']:
|
||||
self._set_data_entry(container_name, 'os', self._get_data_entry(
|
||||
'containers/{0}/containers/metadata/config/image.os'.format(container_name)))
|
||||
self._set_data_entry(container_name, 'release', self._get_data_entry(
|
||||
'containers/{0}/containers/metadata/config/image.release'.format(container_name)))
|
||||
self._set_data_entry(container_name, 'version', self._get_data_entry(
|
||||
'containers/{0}/containers/metadata/config/image.version'.format(container_name)))
|
||||
self._set_data_entry(container_name, 'profile', self._get_data_entry(
|
||||
'containers/{0}/containers/metadata/profiles'.format(container_name)))
|
||||
self._set_data_entry(container_name, 'location', self._get_data_entry(
|
||||
'containers/{0}/containers/metadata/location'.format(container_name)))
|
||||
self._set_data_entry(container_name, 'state', self._get_data_entry(
|
||||
'containers/{0}/containers/metadata/config/volatile.last_state.power'.format(container_name)))
|
||||
self._set_data_entry(container_name, 'network_interfaces', self.extract_network_information_from_container_config(container_name))
|
||||
self._set_data_entry(container_name, 'preferred_interface', self.get_prefered_container_network_interface(container_name))
|
||||
self._set_data_entry(container_name, 'vlan_ids', self.get_container_vlans(container_name))
|
||||
|
||||
def build_inventory_network(self, container_name):
|
||||
"""Add the network interfaces of the container to the inventory
|
||||
|
||||
Logic:
|
||||
- if the container have no interface -> 'ansible_connection: local'
|
||||
- get preferred_interface & prefered_container_network_family -> 'ansible_connection: ssh' & 'ansible_host: <IP>'
|
||||
- first Interface from: network_interfaces prefered_container_network_family -> 'ansible_connection: ssh' & 'ansible_host: <IP>'
|
||||
|
||||
Args:
|
||||
str(container_name): name of container
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
|
||||
def interface_selection(container_name):
|
||||
"""Select container Interface for inventory
|
||||
|
||||
Logic:
|
||||
- get preferred_interface & prefered_container_network_family -> str(IP)
|
||||
- first Interface from: network_interfaces prefered_container_network_family -> str(IP)
|
||||
|
||||
Args:
|
||||
str(container_name): name of container
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
dict(interface_name: ip)"""
|
||||
prefered_interface = self._get_data_entry('inventory/{0}/preferred_interface'.format(container_name)) # name or None
|
||||
prefered_container_network_family = self.prefered_container_network_family
|
||||
|
||||
ip_address = ''
|
||||
if prefered_interface:
|
||||
interface = self._get_data_entry('inventory/{0}/network_interfaces/{1}'.format(container_name, prefered_interface))
|
||||
for config in interface:
|
||||
if config['family'] == prefered_container_network_family:
|
||||
ip_address = config['address']
|
||||
break
|
||||
else:
|
||||
interface = self._get_data_entry('inventory/{0}/network_interfaces'.format(container_name))
|
||||
for config in interface:
|
||||
if config['family'] == prefered_container_network_family:
|
||||
ip_address = config['address']
|
||||
break
|
||||
return ip_address
|
||||
|
||||
if self._get_data_entry('inventory/{0}/network_interfaces'.format(container_name)): # container have network interfaces
|
||||
if self._get_data_entry('inventory/{0}/preferred_interface'.format(container_name)): # container have a preferred interface
|
||||
self.inventory.set_variable(container_name, 'ansible_connection', 'ssh')
|
||||
self.inventory.set_variable(container_name, 'ansible_host', interface_selection(container_name))
|
||||
else:
|
||||
self.inventory.set_variable(container_name, 'ansible_connection', 'local')
|
||||
|
||||
def build_inventory_hosts(self):
|
||||
"""Build host-part dynamic inventory
|
||||
|
||||
Build the host-part of the dynamic inventory.
|
||||
Add Hosts and host_vars to the inventory.
|
||||
|
||||
Args:
|
||||
None
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
for container_name in self.data['inventory']:
|
||||
# Only consider containers that match the "state" filter, if self.state is not None
|
||||
if self.filter:
|
||||
if self.filter.lower() != self._get_data_entry('inventory/{0}/state'.format(container_name)).lower():
|
||||
continue
|
||||
# add container
|
||||
self.inventory.add_host(container_name)
|
||||
# add network informations
|
||||
self.build_inventory_network(container_name)
|
||||
# add os
|
||||
self.inventory.set_variable(container_name, 'ansible_lxd_os', self._get_data_entry('inventory/{0}/os'.format(container_name)).lower())
|
||||
# add release
|
||||
self.inventory.set_variable(container_name, 'ansible_lxd_release', self._get_data_entry('inventory/{0}/release'.format(container_name)).lower())
|
||||
# add profile
|
||||
self.inventory.set_variable(container_name, 'ansible_lxd_profile', self._get_data_entry('inventory/{0}/profile'.format(container_name)))
|
||||
# add state
|
||||
self.inventory.set_variable(container_name, 'ansible_lxd_state', self._get_data_entry('inventory/{0}/state'.format(container_name)).lower())
|
||||
# add location information
|
||||
if self._get_data_entry('inventory/{0}/location'.format(container_name)) != "none": # wrong type by lxd 'none' != 'None'
|
||||
self.inventory.set_variable(container_name, 'ansible_lxd_location', self._get_data_entry('inventory/{0}/location'.format(container_name)))
|
||||
# add VLAN_ID information
|
||||
if self._get_data_entry('inventory/{0}/vlan_ids'.format(container_name)):
|
||||
self.inventory.set_variable(container_name, 'ansible_lxd_vlan_ids', self._get_data_entry('inventory/{0}/vlan_ids'.format(container_name)))
|
||||
|
||||
def build_inventory_groups_location(self, group_name):
|
||||
"""create group by attribute: location
|
||||
|
||||
Args:
|
||||
str(group_name): Group name
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
# maybe we just want to expand one group
|
||||
if group_name not in self.inventory.groups:
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
for container_name in self.inventory.hosts:
|
||||
if 'ansible_lxd_location' in self.inventory.get_host(container_name).get_vars():
|
||||
self.inventory.add_child(group_name, container_name)
|
||||
|
||||
def build_inventory_groups_pattern(self, group_name):
|
||||
"""create group by name pattern
|
||||
|
||||
Args:
|
||||
str(group_name): Group name
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
# maybe we just want to expand one group
|
||||
if group_name not in self.inventory.groups:
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
regex_pattern = self.groupby[group_name].get('attribute')
|
||||
|
||||
for container_name in self.inventory.hosts:
|
||||
result = re.search(regex_pattern, container_name)
|
||||
if result:
|
||||
self.inventory.add_child(group_name, container_name)
|
||||
|
||||
def build_inventory_groups_network_range(self, group_name):
|
||||
"""check if IP is in network-class
|
||||
|
||||
Args:
|
||||
str(group_name): Group name
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
# maybe we just want to expand one group
|
||||
if group_name not in self.inventory.groups:
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
try:
|
||||
network = ipaddress.ip_network(to_text(self.groupby[group_name].get('attribute')))
|
||||
except ValueError as err:
|
||||
raise AnsibleParserError(
|
||||
'Error while parsing network range {0}: {1}'.format(self.groupby[group_name].get('attribute'), to_native(err)))
|
||||
|
||||
for container_name in self.inventory.hosts:
|
||||
if self.data['inventory'][container_name].get('network_interfaces') is not None:
|
||||
for interface in self.data['inventory'][container_name].get('network_interfaces'):
|
||||
for interface_family in self.data['inventory'][container_name].get('network_interfaces')[interface]:
|
||||
try:
|
||||
address = ipaddress.ip_address(to_text(interface_family['address']))
|
||||
if address.version == network.version and address in network:
|
||||
self.inventory.add_child(group_name, container_name)
|
||||
except ValueError:
|
||||
# Ignore invalid IP addresses returned by lxd
|
||||
pass
|
||||
|
||||
def build_inventory_groups_os(self, group_name):
|
||||
"""create group by attribute: os
|
||||
|
||||
Args:
|
||||
str(group_name): Group name
|
||||
Kwargs:
|
||||
Noneself.data['inventory'][container_name][interface]
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
# maybe we just want to expand one group
|
||||
if group_name not in self.inventory.groups:
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
gen_containers = [
|
||||
container_name for container_name in self.inventory.hosts
|
||||
if 'ansible_lxd_os' in self.inventory.get_host(container_name).get_vars()]
|
||||
for container_name in gen_containers:
|
||||
if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(container_name).get_vars().get('ansible_lxd_os'):
|
||||
self.inventory.add_child(group_name, container_name)
|
||||
|
||||
def build_inventory_groups_release(self, group_name):
|
||||
"""create group by attribute: release
|
||||
|
||||
Args:
|
||||
str(group_name): Group name
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
# maybe we just want to expand one group
|
||||
if group_name not in self.inventory.groups:
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
gen_containers = [
|
||||
container_name for container_name in self.inventory.hosts
|
||||
if 'ansible_lxd_release' in self.inventory.get_host(container_name).get_vars()]
|
||||
for container_name in gen_containers:
|
||||
if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(container_name).get_vars().get('ansible_lxd_release'):
|
||||
self.inventory.add_child(group_name, container_name)
|
||||
|
||||
def build_inventory_groups_profile(self, group_name):
|
||||
"""create group by attribute: profile
|
||||
|
||||
Args:
|
||||
str(group_name): Group name
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
# maybe we just want to expand one group
|
||||
if group_name not in self.inventory.groups:
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
gen_containers = [
|
||||
container_name for container_name in self.inventory.hosts.keys()
|
||||
if 'ansible_lxd_profile' in self.inventory.get_host(container_name).get_vars().keys()]
|
||||
for container_name in gen_containers:
|
||||
if self.groupby[group_name].get('attribute').lower() in self.inventory.get_host(container_name).get_vars().get('ansible_lxd_profile'):
|
||||
self.inventory.add_child(group_name, container_name)
|
||||
|
||||
def build_inventory_groups_vlanid(self, group_name):
|
||||
"""create group by attribute: vlanid
|
||||
|
||||
Args:
|
||||
str(group_name): Group name
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
# maybe we just want to expand one group
|
||||
if group_name not in self.inventory.groups:
|
||||
self.inventory.add_group(group_name)
|
||||
|
||||
gen_containers = [
|
||||
container_name for container_name in self.inventory.hosts.keys()
|
||||
if 'ansible_lxd_vlan_ids' in self.inventory.get_host(container_name).get_vars().keys()]
|
||||
for container_name in gen_containers:
|
||||
if self.groupby[group_name].get('attribute') in self.inventory.get_host(container_name).get_vars().get('ansible_lxd_vlan_ids').values():
|
||||
self.inventory.add_child(group_name, container_name)
|
||||
|
||||
def build_inventory_groups(self):
|
||||
"""Build group-part dynamic inventory
|
||||
|
||||
Build the group-part of the dynamic inventory.
|
||||
Add groups to the inventory.
|
||||
|
||||
Args:
|
||||
None
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
|
||||
def group_type(group_name):
|
||||
"""create groups defined by lxd.yml or defaultvalues
|
||||
|
||||
create groups defined by lxd.yml or defaultvalues
|
||||
supportetd:
|
||||
* 'location'
|
||||
* 'pattern'
|
||||
* 'network_range'
|
||||
* 'os'
|
||||
* 'release'
|
||||
* 'profile'
|
||||
* 'vlanid'
|
||||
|
||||
Args:
|
||||
str(group_name): Group name
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
|
||||
# Due to the compatibility with python 2 no use of map
|
||||
if self.groupby[group_name].get('type') == 'location':
|
||||
self.build_inventory_groups_location(group_name)
|
||||
elif self.groupby[group_name].get('type') == 'pattern':
|
||||
self.build_inventory_groups_pattern(group_name)
|
||||
elif self.groupby[group_name].get('type') == 'network_range':
|
||||
self.build_inventory_groups_network_range(group_name)
|
||||
elif self.groupby[group_name].get('type') == 'os':
|
||||
self.build_inventory_groups_os(group_name)
|
||||
elif self.groupby[group_name].get('type') == 'release':
|
||||
self.build_inventory_groups_release(group_name)
|
||||
elif self.groupby[group_name].get('type') == 'profile':
|
||||
self.build_inventory_groups_profile(group_name)
|
||||
elif self.groupby[group_name].get('type') == 'vlanid':
|
||||
self.build_inventory_groups_vlanid(group_name)
|
||||
else:
|
||||
raise AnsibleParserError('Unknown group type: {0}'.format(to_native(group_name)))
|
||||
|
||||
if self.groupby:
|
||||
for group_name in self.groupby:
|
||||
if not group_name.isalnum():
|
||||
raise AnsibleParserError('Invalid character(s) in groupname: {0}'.format(to_native(group_name)))
|
||||
group_type(group_name)
|
||||
|
||||
def build_inventory(self):
|
||||
"""Build dynamic inventory
|
||||
|
||||
Build the dynamic inventory.
|
||||
|
||||
Args:
|
||||
None
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
|
||||
self.build_inventory_hosts()
|
||||
self.build_inventory_groups()
|
||||
|
||||
def _populate(self):
|
||||
"""Return the hosts and groups
|
||||
|
||||
Returns the processed container configurations from the lxd import
|
||||
|
||||
Args:
|
||||
None
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
None
|
||||
Returns:
|
||||
None"""
|
||||
|
||||
if len(self.data) == 0: # If no data is injected by unittests open socket
|
||||
self.socket = self._connect_to_socket()
|
||||
self.get_container_data(self._get_containers())
|
||||
self.get_network_data(self._get_networks())
|
||||
|
||||
self.extract_information_from_container_configs()
|
||||
|
||||
# self.display.vvv(self.save_json_data([os.path.abspath(__file__)]))
|
||||
|
||||
self.build_inventory()
|
||||
|
||||
def parse(self, inventory, loader, path, cache):
|
||||
"""Return dynamic inventory from source
|
||||
|
||||
Returns the processed inventory from the lxd import
|
||||
|
||||
Args:
|
||||
str(inventory): inventory object with existing data and
|
||||
the methods to add hosts/groups/variables
|
||||
to inventory
|
||||
str(loader): Ansible's DataLoader
|
||||
str(path): path to the config
|
||||
bool(cache): use or avoid caches
|
||||
Kwargs:
|
||||
None
|
||||
Raises:
|
||||
AnsibleParserError
|
||||
Returns:
|
||||
None"""
|
||||
|
||||
super(InventoryModule, self).parse(inventory, loader, path, cache=False)
|
||||
# Read the inventory YAML file
|
||||
self._read_config_data(path)
|
||||
try:
|
||||
self.client_key = self.get_option('client_key')
|
||||
self.client_cert = self.get_option('client_cert')
|
||||
self.debug = self.DEBUG
|
||||
self.data = {} # store for inventory-data
|
||||
self.groupby = self.get_option('groupby')
|
||||
self.plugin = self.get_option('plugin')
|
||||
self.prefered_container_network_family = self.get_option('prefered_container_network_family')
|
||||
self.prefered_container_network_interface = self.get_option('prefered_container_network_interface')
|
||||
if self.get_option('state').lower() == 'none': # none in config is str()
|
||||
self.filter = None
|
||||
else:
|
||||
self.filter = self.get_option('state').lower()
|
||||
self.trust_password = self.get_option('trust_password')
|
||||
self.url = self.get_option('url')
|
||||
except Exception as err:
|
||||
raise AnsibleParserError(
|
||||
'All correct options required: {0}'.format(to_native(err)))
|
||||
# Call our internal helper to populate the dynamic inventory
|
||||
self._populate()
|
||||
@@ -71,25 +71,6 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
self._nmap = None
|
||||
super(InventoryModule, self).__init__()
|
||||
|
||||
def _populate(self, hosts):
|
||||
# Use constructed if applicable
|
||||
strict = self.get_option('strict')
|
||||
|
||||
for host in hosts:
|
||||
hostname = host['name']
|
||||
self.inventory.add_host(hostname)
|
||||
for var, value in host.items():
|
||||
self.inventory.set_variable(hostname, var, value)
|
||||
|
||||
# Composed variables
|
||||
self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict)
|
||||
|
||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||
self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict)
|
||||
|
||||
# Create groups based on variable values and add the corresponding hosts to it
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict)
|
||||
|
||||
def verify_file(self, path):
|
||||
|
||||
valid = False
|
||||
@@ -101,7 +82,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
|
||||
return valid
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
def parse(self, inventory, loader, path, cache=False):
|
||||
|
||||
try:
|
||||
self._nmap = get_bin_path('nmap')
|
||||
@@ -112,101 +93,75 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
|
||||
self._read_config_data(path)
|
||||
|
||||
cache_key = self.get_cache_key(path)
|
||||
# setup command
|
||||
cmd = [self._nmap]
|
||||
if not self._options['ports']:
|
||||
cmd.append('-sP')
|
||||
|
||||
# cache may be True or False at this point to indicate if the inventory is being refreshed
|
||||
# get the user's cache option too to see if we should save the cache if it is changing
|
||||
user_cache_setting = self.get_option('cache')
|
||||
if self._options['ipv4'] and not self._options['ipv6']:
|
||||
cmd.append('-4')
|
||||
elif self._options['ipv6'] and not self._options['ipv4']:
|
||||
cmd.append('-6')
|
||||
elif not self._options['ipv6'] and not self._options['ipv4']:
|
||||
raise AnsibleParserError('One of ipv4 or ipv6 must be enabled for this plugin')
|
||||
|
||||
# read if the user has caching enabled and the cache isn't being refreshed
|
||||
attempt_to_read_cache = user_cache_setting and cache
|
||||
# update if the user has caching enabled and the cache is being refreshed; update this value to True if the cache has expired below
|
||||
cache_needs_update = user_cache_setting and not cache
|
||||
if self._options['exclude']:
|
||||
cmd.append('--exclude')
|
||||
cmd.append(','.join(self._options['exclude']))
|
||||
|
||||
cmd.append(self._options['address'])
|
||||
try:
|
||||
# execute
|
||||
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
|
||||
stdout, stderr = p.communicate()
|
||||
if p.returncode != 0:
|
||||
raise AnsibleParserError('Failed to run nmap, rc=%s: %s' % (p.returncode, to_native(stderr)))
|
||||
|
||||
# parse results
|
||||
host = None
|
||||
ip = None
|
||||
ports = []
|
||||
|
||||
if attempt_to_read_cache:
|
||||
try:
|
||||
results = self._cache[cache_key]
|
||||
except KeyError:
|
||||
# This occurs if the cache_key is not in the cache or if the cache_key expired, so the cache needs to be updated
|
||||
cache_needs_update = True
|
||||
t_stdout = to_text(stdout, errors='surrogate_or_strict')
|
||||
except UnicodeError as e:
|
||||
raise AnsibleParserError('Invalid (non unicode) input returned: %s' % to_native(e))
|
||||
|
||||
if cache_needs_update:
|
||||
# setup command
|
||||
cmd = [self._nmap]
|
||||
if not self._options['ports']:
|
||||
cmd.append('-sP')
|
||||
for line in t_stdout.splitlines():
|
||||
hits = self.find_host.match(line)
|
||||
if hits:
|
||||
if host is not None:
|
||||
self.inventory.set_variable(host, 'ports', ports)
|
||||
|
||||
if self._options['ipv4'] and not self._options['ipv6']:
|
||||
cmd.append('-4')
|
||||
elif self._options['ipv6'] and not self._options['ipv4']:
|
||||
cmd.append('-6')
|
||||
elif not self._options['ipv6'] and not self._options['ipv4']:
|
||||
raise AnsibleParserError('One of ipv4 or ipv6 must be enabled for this plugin')
|
||||
# if dns only shows arpa, just use ip instead as hostname
|
||||
if hits.group(1).endswith('.in-addr.arpa'):
|
||||
host = hits.group(2)
|
||||
else:
|
||||
host = hits.group(1)
|
||||
|
||||
if self._options['exclude']:
|
||||
cmd.append('--exclude')
|
||||
cmd.append(','.join(self._options['exclude']))
|
||||
# if no reverse dns exists, just use ip instead as hostname
|
||||
if hits.group(2) is not None:
|
||||
ip = hits.group(2)
|
||||
else:
|
||||
ip = hits.group(1)
|
||||
|
||||
cmd.append(self._options['address'])
|
||||
try:
|
||||
# execute
|
||||
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
|
||||
stdout, stderr = p.communicate()
|
||||
if p.returncode != 0:
|
||||
raise AnsibleParserError('Failed to run nmap, rc=%s: %s' % (p.returncode, to_native(stderr)))
|
||||
if host is not None:
|
||||
# update inventory
|
||||
self.inventory.add_host(host)
|
||||
self.inventory.set_variable(host, 'ip', ip)
|
||||
ports = []
|
||||
continue
|
||||
|
||||
# parse results
|
||||
host = None
|
||||
ip = None
|
||||
ports = []
|
||||
results = []
|
||||
host_ports = self.find_port.match(line)
|
||||
if host is not None and host_ports:
|
||||
ports.append({'port': host_ports.group(1), 'protocol': host_ports.group(2), 'state': host_ports.group(3), 'service': host_ports.group(4)})
|
||||
continue
|
||||
|
||||
try:
|
||||
t_stdout = to_text(stdout, errors='surrogate_or_strict')
|
||||
except UnicodeError as e:
|
||||
raise AnsibleParserError('Invalid (non unicode) input returned: %s' % to_native(e))
|
||||
# TODO: parse more data, OS?
|
||||
|
||||
for line in t_stdout.splitlines():
|
||||
hits = self.find_host.match(line)
|
||||
if hits:
|
||||
if host is not None and ports:
|
||||
results[-1]['ports'] = ports
|
||||
# if any leftovers
|
||||
if host and ports:
|
||||
self.inventory.set_variable(host, 'ports', ports)
|
||||
|
||||
# if dns only shows arpa, just use ip instead as hostname
|
||||
if hits.group(1).endswith('.in-addr.arpa'):
|
||||
host = hits.group(2)
|
||||
else:
|
||||
host = hits.group(1)
|
||||
|
||||
# if no reverse dns exists, just use ip instead as hostname
|
||||
if hits.group(2) is not None:
|
||||
ip = hits.group(2)
|
||||
else:
|
||||
ip = hits.group(1)
|
||||
|
||||
if host is not None:
|
||||
# update inventory
|
||||
results.append(dict())
|
||||
results[-1]['name'] = host
|
||||
results[-1]['ip'] = ip
|
||||
ports = []
|
||||
continue
|
||||
|
||||
host_ports = self.find_port.match(line)
|
||||
if host is not None and host_ports:
|
||||
ports.append({'port': host_ports.group(1),
|
||||
'protocol': host_ports.group(2),
|
||||
'state': host_ports.group(3),
|
||||
'service': host_ports.group(4)})
|
||||
continue
|
||||
|
||||
# if any leftovers
|
||||
if host and ports:
|
||||
results[-1]['ports'] = ports
|
||||
|
||||
except Exception as e:
|
||||
raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e)))
|
||||
|
||||
self._cache[cache_key] = results
|
||||
|
||||
self._populate(results)
|
||||
except Exception as e:
|
||||
raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e)))
|
||||
|
||||
@@ -19,7 +19,6 @@ DOCUMENTATION = '''
|
||||
- Will retrieve the first network interface with an IP for Proxmox nodes.
|
||||
- Can retrieve LXC/QEMU configuration as facts.
|
||||
extends_documentation_fragment:
|
||||
- constructed
|
||||
- inventory_cache
|
||||
options:
|
||||
plugin:
|
||||
@@ -70,21 +69,6 @@ DOCUMENTATION = '''
|
||||
description: Gather LXC/QEMU configuration facts.
|
||||
default: no
|
||||
type: bool
|
||||
want_proxmox_nodes_ansible_host:
|
||||
version_added: 3.0.0
|
||||
description:
|
||||
- Whether to set C(ansbile_host) for proxmox nodes.
|
||||
- When set to C(true) (default), will use the first available interface. This can be different from what you expect.
|
||||
default: true
|
||||
type: bool
|
||||
strict:
|
||||
version_added: 2.5.0
|
||||
compose:
|
||||
version_added: 2.5.0
|
||||
groups:
|
||||
version_added: 2.5.0
|
||||
keyed_groups:
|
||||
version_added: 2.5.0
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
@@ -94,15 +78,6 @@ url: http://localhost:8006
|
||||
user: ansible@pve
|
||||
password: secure
|
||||
validate_certs: no
|
||||
keyed_groups:
|
||||
- key: proxmox_tags_parsed
|
||||
separator: ""
|
||||
prefix: group
|
||||
groups:
|
||||
webservers: "'web' in (proxmox_tags_parsed|list)"
|
||||
mailservers: "'mail' in (proxmox_tags_parsed|list)"
|
||||
compose:
|
||||
ansible_port: 2222
|
||||
'''
|
||||
|
||||
import re
|
||||
@@ -111,7 +86,7 @@ from ansible.module_utils.common._collections_compat import MutableMapping
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
||||
|
||||
# 3rd party imports
|
||||
@@ -124,7 +99,7 @@ except ImportError:
|
||||
HAS_REQUESTS = False
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
class InventoryModule(BaseInventoryPlugin, Cacheable):
|
||||
''' Host inventory parser for ansible using Proxmox as source. '''
|
||||
|
||||
NAME = 'community.general.proxmox'
|
||||
@@ -231,45 +206,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def _get_agent_network_interfaces(self, node, vmid, vmtype):
|
||||
result = []
|
||||
|
||||
try:
|
||||
ifaces = self._get_json(
|
||||
"%s/api2/json/nodes/%s/%s/%s/agent/network-get-interfaces" % (
|
||||
self.proxmox_url, node, vmtype, vmid
|
||||
)
|
||||
)['result']
|
||||
|
||||
if "error" in ifaces:
|
||||
if "class" in ifaces["error"]:
|
||||
# This happens on Windows, even though qemu agent is running, the IP address
|
||||
# cannot be fetched, as it's unsupported, also a command disabled can happen.
|
||||
errorClass = ifaces["error"]["class"]
|
||||
if errorClass in ["Unsupported"]:
|
||||
self.display.v("Retrieving network interfaces from guest agents on windows with older qemu-guest-agents is not supported")
|
||||
elif errorClass in ["CommandDisabled"]:
|
||||
self.display.v("Retrieving network interfaces from guest agents has been disabled")
|
||||
return result
|
||||
|
||||
for iface in ifaces:
|
||||
result.append({
|
||||
'name': iface['name'],
|
||||
'mac-address': iface['hardware-address'] if 'hardware-address' in iface else '',
|
||||
'ip-addresses': ["%s/%s" % (ip['ip-address'], ip['prefix']) for ip in iface['ip-addresses']] if 'ip-addresses' in iface else []
|
||||
})
|
||||
except requests.HTTPError:
|
||||
pass
|
||||
|
||||
return result
|
||||
|
||||
def _get_vm_config(self, node, vmid, vmtype, name):
|
||||
ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/config" % (self.proxmox_url, node, vmtype, vmid))
|
||||
|
||||
node_key = 'node'
|
||||
node_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), node_key.lower()))
|
||||
self.inventory.set_variable(name, node_key, node)
|
||||
|
||||
vmid_key = 'vmid'
|
||||
vmid_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), vmid_key.lower()))
|
||||
self.inventory.set_variable(name, vmid_key, vmid)
|
||||
@@ -278,10 +217,6 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
vmtype_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), vmtype_key.lower()))
|
||||
self.inventory.set_variable(name, vmtype_key, vmtype)
|
||||
|
||||
plaintext_configs = [
|
||||
'tags',
|
||||
]
|
||||
|
||||
for config in ret:
|
||||
key = config
|
||||
key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), key.lower()))
|
||||
@@ -291,20 +226,6 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
if config == 'rootfs' or config.startswith(('virtio', 'sata', 'ide', 'scsi')):
|
||||
value = ('disk_image=' + value)
|
||||
|
||||
# Additional field containing parsed tags as list
|
||||
if config == 'tags':
|
||||
parsed_key = self.to_safe('%s%s' % (key, "_parsed"))
|
||||
parsed_value = [tag.strip() for tag in value.split(",")]
|
||||
self.inventory.set_variable(name, parsed_key, parsed_value)
|
||||
|
||||
# The first field in the agent string tells you whether the agent is enabled
|
||||
# the rest of the comma separated string is extra config for the agent
|
||||
if config == 'agent' and int(value.split(',')[0]):
|
||||
agent_iface_key = self.to_safe('%s%s' % (key, "_interfaces"))
|
||||
agent_iface_value = self._get_agent_network_interfaces(node, vmid, vmtype)
|
||||
if agent_iface_value:
|
||||
self.inventory.set_variable(name, agent_iface_key, agent_iface_value)
|
||||
|
||||
if not (isinstance(value, int) or ',' not in value):
|
||||
# split off strings with commas to a dict
|
||||
# skip over any keys that cannot be processed
|
||||
@@ -333,12 +254,6 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
regex = r"[^A-Za-z0-9\_]"
|
||||
return re.sub(regex, "_", word.replace(" ", ""))
|
||||
|
||||
def _apply_constructable(self, name, variables):
|
||||
strict = self.get_option('strict')
|
||||
self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=strict)
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=strict)
|
||||
self._set_composite_vars(self.get_option('compose'), variables, name, strict=strict)
|
||||
|
||||
def _populate(self):
|
||||
|
||||
self._get_auth()
|
||||
@@ -370,9 +285,8 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
self.inventory.add_child(nodes_group, node['node'])
|
||||
|
||||
# get node IP address
|
||||
if self.get_option("want_proxmox_nodes_ansible_host"):
|
||||
ip = self._get_node_ip(node['node'])
|
||||
self.inventory.set_variable(node['node'], 'ansible_host', ip)
|
||||
ip = self._get_node_ip(node['node'])
|
||||
self.inventory.set_variable(node['node'], 'ansible_host', ip)
|
||||
|
||||
# get LXC containers for this node
|
||||
node_lxc_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), ('%s_lxc' % node['node']).lower()))
|
||||
@@ -394,8 +308,6 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
if self.get_option('want_facts'):
|
||||
self._get_vm_config(node['node'], lxc['vmid'], 'lxc', lxc['name'])
|
||||
|
||||
self._apply_constructable(lxc["name"], self.inventory.get_host(lxc['name']).get_vars())
|
||||
|
||||
# get QEMU vm's for this node
|
||||
node_qemu_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), ('%s_qemu' % node['node']).lower()))
|
||||
self.inventory.add_group(node_qemu_group)
|
||||
@@ -418,8 +330,6 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
if self.get_option('want_facts'):
|
||||
self._get_vm_config(node['node'], qemu['vmid'], 'qemu', qemu['name'])
|
||||
|
||||
self._apply_constructable(qemu["name"], self.inventory.get_host(qemu['name']).get_vars())
|
||||
|
||||
# gather vm's in pools
|
||||
for pool in self._get_pools():
|
||||
if pool.get('poolid'):
|
||||
@@ -429,8 +339,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
|
||||
for member in self._get_members_per_pool(pool['poolid']):
|
||||
if member.get('name'):
|
||||
if not member.get('template'):
|
||||
self.inventory.add_child(pool_group, member['name'])
|
||||
self.inventory.add_child(pool_group, member['name'])
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
if not HAS_REQUESTS:
|
||||
@@ -443,7 +352,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
self._read_config_data(path)
|
||||
|
||||
# get connection host
|
||||
self.proxmox_url = self.get_option('url').rstrip('/')
|
||||
self.proxmox_url = self.get_option('url')
|
||||
self.proxmox_user = self.get_option('user')
|
||||
self.proxmox_password = self.get_option('password')
|
||||
self.cache_key = self.get_cache_key(path)
|
||||
|
||||
@@ -81,7 +81,7 @@ class LookupModule(LookupBase):
|
||||
)
|
||||
if args:
|
||||
raise AnsibleError(
|
||||
"unrecognized arguments to with_sequence: %r" % list(args.keys())
|
||||
"unrecognized arguments to with_sequence: %r" % args.keys()
|
||||
)
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
|
||||
@@ -31,9 +31,7 @@ EXAMPLES = r"""
|
||||
- name: Template files (explicitly skip directories in order to use the 'src' attribute)
|
||||
ansible.builtin.template:
|
||||
src: '{{ item.src }}'
|
||||
# Your template files should be stored with a .j2 file extension,
|
||||
# but should not be deployed with it. splitext|first removes it.
|
||||
dest: /web/{{ item.path | splitext | first }}
|
||||
dest: /web/{{ item.path }}
|
||||
mode: '{{ item.mode }}'
|
||||
with_community.general.filetree: web/
|
||||
when: item.state == 'file'
|
||||
@@ -43,7 +41,6 @@ EXAMPLES = r"""
|
||||
src: '{{ item.src }}'
|
||||
dest: /web/{{ item.path }}
|
||||
state: link
|
||||
follow: false # avoid corrupting target files if the link already exists
|
||||
force: yes
|
||||
mode: '{{ item.mode }}'
|
||||
with_community.general.filetree: web/
|
||||
|
||||
@@ -63,7 +63,6 @@ import os
|
||||
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible.utils.cmd_functions import run_cmd
|
||||
from ansible.module_utils._text import to_text
|
||||
|
||||
ANSIBLE_HIERA_CFG = os.getenv('ANSIBLE_HIERA_CFG', '/etc/hiera.yaml')
|
||||
ANSIBLE_HIERA_BIN = os.getenv('ANSIBLE_HIERA_BIN', '/usr/bin/hiera')
|
||||
@@ -79,11 +78,13 @@ class Hiera(object):
|
||||
rc, output, err = run_cmd("{0} -c {1} {2}".format(
|
||||
ANSIBLE_HIERA_BIN, ANSIBLE_HIERA_CFG, hiera_key[0]))
|
||||
|
||||
return to_text(output.strip())
|
||||
return output.strip()
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
def run(self, terms, variables=''):
|
||||
hiera = Hiera()
|
||||
ret = [hiera.get(terms)]
|
||||
ret = []
|
||||
|
||||
ret.append(hiera.get(terms))
|
||||
return ret
|
||||
|
||||
@@ -82,27 +82,6 @@ EXAMPLES = r"""
|
||||
| items2dict(key_name='slug',
|
||||
value_name='itemValue'))['password']
|
||||
}}
|
||||
|
||||
- hosts: localhost
|
||||
vars:
|
||||
secret: >-
|
||||
{{
|
||||
lookup(
|
||||
'community.general.tss',
|
||||
102,
|
||||
base_url='https://secretserver.domain.com/SecretServer/',
|
||||
username='user.name',
|
||||
password='password'
|
||||
)
|
||||
}}
|
||||
tasks:
|
||||
- ansible.builtin.debug:
|
||||
msg: >
|
||||
the password is {{
|
||||
(secret['items']
|
||||
| items2dict(key_name='slug',
|
||||
value_name='itemValue'))['password']
|
||||
}}
|
||||
"""
|
||||
|
||||
from ansible.errors import AnsibleError, AnsibleOptionsError
|
||||
|
||||
871
plugins/module_utils/_ovirt.py
Normal file
871
plugins/module_utils/_ovirt.py
Normal file
@@ -0,0 +1,871 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2016 Red Hat, Inc.
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import inspect
|
||||
import os
|
||||
import time
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from datetime import datetime
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.cloud import CloudRetry
|
||||
from ansible.module_utils.common._collections_compat import Mapping
|
||||
|
||||
try:
|
||||
from enum import Enum # enum is a ovirtsdk4 requirement
|
||||
import ovirtsdk4 as sdk
|
||||
import ovirtsdk4.version as sdk_version
|
||||
import ovirtsdk4.types as otypes
|
||||
HAS_SDK = LooseVersion(sdk_version.VERSION) >= LooseVersion('4.3.0')
|
||||
except ImportError:
|
||||
HAS_SDK = False
|
||||
|
||||
|
||||
BYTES_MAP = {
|
||||
'kib': 2**10,
|
||||
'mib': 2**20,
|
||||
'gib': 2**30,
|
||||
'tib': 2**40,
|
||||
'pib': 2**50,
|
||||
}
|
||||
|
||||
|
||||
def check_sdk(module):
|
||||
if not HAS_SDK:
|
||||
module.fail_json(
|
||||
msg='ovirtsdk4 version 4.3.0 or higher is required for this module'
|
||||
)
|
||||
|
||||
|
||||
def get_dict_of_struct(struct, connection=None, fetch_nested=False, attributes=None):
|
||||
"""
|
||||
Convert SDK Struct type into dictionary.
|
||||
"""
|
||||
res = {}
|
||||
|
||||
def resolve_href(value):
|
||||
# Fetch nested values of struct:
|
||||
try:
|
||||
value = connection.follow_link(value)
|
||||
except sdk.Error:
|
||||
value = None
|
||||
nested_obj = dict(
|
||||
(attr, convert_value(getattr(value, attr)))
|
||||
for attr in attributes if getattr(value, attr, None) is not None
|
||||
)
|
||||
nested_obj['id'] = getattr(value, 'id', None)
|
||||
nested_obj['href'] = getattr(value, 'href', None)
|
||||
return nested_obj
|
||||
|
||||
def remove_underscore(val):
|
||||
if val.startswith('_'):
|
||||
val = val[1:]
|
||||
remove_underscore(val)
|
||||
return val
|
||||
|
||||
def convert_value(value):
|
||||
nested = False
|
||||
|
||||
if isinstance(value, sdk.Struct):
|
||||
if not fetch_nested or not value.href:
|
||||
return get_dict_of_struct(value)
|
||||
return resolve_href(value)
|
||||
|
||||
elif isinstance(value, Enum) or isinstance(value, datetime):
|
||||
return str(value)
|
||||
elif isinstance(value, list) or isinstance(value, sdk.List):
|
||||
if isinstance(value, sdk.List) and fetch_nested and value.href:
|
||||
try:
|
||||
value = connection.follow_link(value)
|
||||
nested = True
|
||||
except sdk.Error:
|
||||
value = []
|
||||
|
||||
ret = []
|
||||
for i in value:
|
||||
if isinstance(i, sdk.Struct):
|
||||
if not nested and fetch_nested and i.href:
|
||||
ret.append(resolve_href(i))
|
||||
elif not nested:
|
||||
ret.append(get_dict_of_struct(i))
|
||||
else:
|
||||
nested_obj = dict(
|
||||
(attr, convert_value(getattr(i, attr)))
|
||||
for attr in attributes if getattr(i, attr, None)
|
||||
)
|
||||
nested_obj['id'] = getattr(i, 'id', None)
|
||||
ret.append(nested_obj)
|
||||
elif isinstance(i, Enum):
|
||||
ret.append(str(i))
|
||||
else:
|
||||
ret.append(i)
|
||||
return ret
|
||||
else:
|
||||
return value
|
||||
|
||||
if struct is not None:
|
||||
for key, value in struct.__dict__.items():
|
||||
if value is None:
|
||||
continue
|
||||
|
||||
key = remove_underscore(key)
|
||||
res[key] = convert_value(value)
|
||||
|
||||
return res
|
||||
|
||||
|
||||
def engine_version(connection):
|
||||
"""
|
||||
Return string representation of oVirt engine version.
|
||||
"""
|
||||
engine_api = connection.system_service().get()
|
||||
engine_version = engine_api.product_info.version
|
||||
return '%s.%s' % (engine_version.major, engine_version.minor)
|
||||
|
||||
|
||||
def create_connection(auth):
|
||||
"""
|
||||
Create a connection to Python SDK, from task `auth` parameter.
|
||||
If user doesnt't have SSO token the `auth` dictionary has following parameters mandatory:
|
||||
url, username, password
|
||||
|
||||
If user has SSO token the `auth` dictionary has following parameters mandatory:
|
||||
url, token
|
||||
|
||||
The `ca_file` parameter is mandatory in case user want to use secure connection,
|
||||
in case user want to use insecure connection, it's mandatory to send insecure=True.
|
||||
|
||||
:param auth: dictionary which contains needed values for connection creation
|
||||
:return: Python SDK connection
|
||||
"""
|
||||
|
||||
url = auth.get('url')
|
||||
if url is None and auth.get('hostname') is not None:
|
||||
url = 'https://{0}/ovirt-engine/api'.format(auth.get('hostname'))
|
||||
|
||||
return sdk.Connection(
|
||||
url=url,
|
||||
username=auth.get('username'),
|
||||
password=auth.get('password'),
|
||||
ca_file=auth.get('ca_file', None),
|
||||
insecure=auth.get('insecure', False),
|
||||
token=auth.get('token', None),
|
||||
kerberos=auth.get('kerberos', None),
|
||||
headers=auth.get('headers', None),
|
||||
)
|
||||
|
||||
|
||||
def convert_to_bytes(param):
|
||||
"""
|
||||
This method convert units to bytes, which follow IEC standard.
|
||||
|
||||
:param param: value to be converted
|
||||
"""
|
||||
if param is None:
|
||||
return None
|
||||
|
||||
# Get rid of whitespaces:
|
||||
param = ''.join(param.split())
|
||||
|
||||
# Convert to bytes:
|
||||
if len(param) > 3 and param[-3].lower() in ['k', 'm', 'g', 't', 'p']:
|
||||
return int(param[:-3]) * BYTES_MAP.get(param[-3:].lower(), 1)
|
||||
elif param.isdigit():
|
||||
return int(param) * 2**10
|
||||
else:
|
||||
raise ValueError(
|
||||
"Unsupported value(IEC supported): '{value}'".format(value=param)
|
||||
)
|
||||
|
||||
|
||||
def follow_link(connection, link):
|
||||
"""
|
||||
This method returns the entity of the element which link points to.
|
||||
|
||||
:param connection: connection to the Python SDK
|
||||
:param link: link of the entity
|
||||
:return: entity which link points to
|
||||
"""
|
||||
|
||||
if link:
|
||||
return connection.follow_link(link)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def get_link_name(connection, link):
|
||||
"""
|
||||
This method returns the name of the element which link points to.
|
||||
|
||||
:param connection: connection to the Python SDK
|
||||
:param link: link of the entity
|
||||
:return: name of the entity, which link points to
|
||||
"""
|
||||
|
||||
if link:
|
||||
return connection.follow_link(link).name
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def equal(param1, param2, ignore_case=False):
|
||||
"""
|
||||
Compare two parameters and return if they are equal.
|
||||
This parameter doesn't run equal operation if first parameter is None.
|
||||
With this approach we don't run equal operation in case user don't
|
||||
specify parameter in their task.
|
||||
|
||||
:param param1: user inputted parameter
|
||||
:param param2: value of entity parameter
|
||||
:return: True if parameters are equal or first parameter is None, otherwise False
|
||||
"""
|
||||
if param1 is not None:
|
||||
if ignore_case:
|
||||
return param1.lower() == param2.lower()
|
||||
return param1 == param2
|
||||
return True
|
||||
|
||||
|
||||
def search_by_attributes(service, list_params=None, **kwargs):
|
||||
"""
|
||||
Search for the entity by attributes. Nested entities don't support search
|
||||
via REST, so in case using search for nested entity we return all entities
|
||||
and filter them by specified attributes.
|
||||
"""
|
||||
list_params = list_params or {}
|
||||
# Check if 'list' method support search(look for search parameter):
|
||||
if 'search' in inspect.getargspec(service.list)[0]:
|
||||
res = service.list(
|
||||
# There must be double quotes around name, because some oVirt resources it's possible to create then with space in name.
|
||||
search=' and '.join('{0}="{1}"'.format(k, v) for k, v in kwargs.items()),
|
||||
**list_params
|
||||
)
|
||||
else:
|
||||
res = [
|
||||
e for e in service.list(**list_params) if len([
|
||||
k for k, v in kwargs.items() if getattr(e, k, None) == v
|
||||
]) == len(kwargs)
|
||||
]
|
||||
|
||||
res = res or [None]
|
||||
return res[0]
|
||||
|
||||
|
||||
def search_by_name(service, name, **kwargs):
|
||||
"""
|
||||
Search for the entity by its name. Nested entities don't support search
|
||||
via REST, so in case using search for nested entity we return all entities
|
||||
and filter them by name.
|
||||
|
||||
:param service: service of the entity
|
||||
:param name: name of the entity
|
||||
:return: Entity object returned by Python SDK
|
||||
"""
|
||||
# Check if 'list' method support search(look for search parameter):
|
||||
if 'search' in inspect.getargspec(service.list)[0]:
|
||||
res = service.list(
|
||||
# There must be double quotes around name, because some oVirt resources it's possible to create then with space in name.
|
||||
search='name="{name}"'.format(name=name)
|
||||
)
|
||||
else:
|
||||
res = [e for e in service.list() if e.name == name]
|
||||
|
||||
if kwargs:
|
||||
res = [
|
||||
e for e in service.list() if len([
|
||||
k for k, v in kwargs.items() if getattr(e, k, None) == v
|
||||
]) == len(kwargs)
|
||||
]
|
||||
|
||||
res = res or [None]
|
||||
return res[0]
|
||||
|
||||
|
||||
def get_entity(service, get_params=None):
|
||||
"""
|
||||
Ignore SDK Error in case of getting an entity from service.
|
||||
"""
|
||||
entity = None
|
||||
try:
|
||||
if get_params is not None:
|
||||
entity = service.get(**get_params)
|
||||
else:
|
||||
entity = service.get()
|
||||
except sdk.Error:
|
||||
# We can get here 404, we should ignore it, in case
|
||||
# of removing entity for example.
|
||||
pass
|
||||
return entity
|
||||
|
||||
|
||||
def get_id_by_name(service, name, raise_error=True, ignore_case=False):
|
||||
"""
|
||||
Search an entity ID by it's name.
|
||||
"""
|
||||
entity = search_by_name(service, name)
|
||||
|
||||
if entity is not None:
|
||||
return entity.id
|
||||
|
||||
if raise_error:
|
||||
raise Exception("Entity '%s' was not found." % name)
|
||||
|
||||
|
||||
def wait(
|
||||
service,
|
||||
condition,
|
||||
fail_condition=lambda e: False,
|
||||
timeout=180,
|
||||
wait=True,
|
||||
poll_interval=3,
|
||||
):
|
||||
"""
|
||||
Wait until entity fulfill expected condition.
|
||||
|
||||
:param service: service of the entity
|
||||
:param condition: condition to be fulfilled
|
||||
:param fail_condition: if this condition is true, raise Exception
|
||||
:param timeout: max time to wait in seconds
|
||||
:param wait: if True wait for condition, if False don't wait
|
||||
:param poll_interval: Number of seconds we should wait until next condition check
|
||||
"""
|
||||
# Wait until the desired state of the entity:
|
||||
if wait:
|
||||
start = time.time()
|
||||
while time.time() < start + timeout:
|
||||
# Exit if the condition of entity is valid:
|
||||
entity = get_entity(service)
|
||||
if condition(entity):
|
||||
return
|
||||
elif fail_condition(entity):
|
||||
raise Exception("Error while waiting on result state of the entity.")
|
||||
|
||||
# Sleep for `poll_interval` seconds if none of the conditions apply:
|
||||
time.sleep(float(poll_interval))
|
||||
|
||||
raise Exception("Timeout exceed while waiting on result state of the entity.")
|
||||
|
||||
|
||||
def __get_auth_dict():
|
||||
OVIRT_URL = os.environ.get('OVIRT_URL')
|
||||
OVIRT_HOSTNAME = os.environ.get('OVIRT_HOSTNAME')
|
||||
OVIRT_USERNAME = os.environ.get('OVIRT_USERNAME')
|
||||
OVIRT_PASSWORD = os.environ.get('OVIRT_PASSWORD')
|
||||
OVIRT_TOKEN = os.environ.get('OVIRT_TOKEN')
|
||||
OVIRT_CAFILE = os.environ.get('OVIRT_CAFILE')
|
||||
OVIRT_INSECURE = OVIRT_CAFILE is None
|
||||
|
||||
env_vars = None
|
||||
if OVIRT_URL is None and OVIRT_HOSTNAME is not None:
|
||||
OVIRT_URL = 'https://{0}/ovirt-engine/api'.format(OVIRT_HOSTNAME)
|
||||
if OVIRT_URL and ((OVIRT_USERNAME and OVIRT_PASSWORD) or OVIRT_TOKEN):
|
||||
env_vars = {
|
||||
'url': OVIRT_URL,
|
||||
'username': OVIRT_USERNAME,
|
||||
'password': OVIRT_PASSWORD,
|
||||
'insecure': OVIRT_INSECURE,
|
||||
'token': OVIRT_TOKEN,
|
||||
'ca_file': OVIRT_CAFILE,
|
||||
}
|
||||
if env_vars is not None:
|
||||
auth = dict(default=env_vars, type='dict')
|
||||
else:
|
||||
auth = dict(required=True, type='dict')
|
||||
|
||||
return auth
|
||||
|
||||
|
||||
def ovirt_info_full_argument_spec(**kwargs):
|
||||
"""
|
||||
Extend parameters of info module with parameters which are common to all
|
||||
oVirt info modules.
|
||||
|
||||
:param kwargs: kwargs to be extended
|
||||
:return: extended dictionary with common parameters
|
||||
"""
|
||||
spec = dict(
|
||||
auth=__get_auth_dict(),
|
||||
fetch_nested=dict(default=False, type='bool'),
|
||||
nested_attributes=dict(type='list', default=list()),
|
||||
)
|
||||
spec.update(kwargs)
|
||||
return spec
|
||||
|
||||
|
||||
# Left for third-party module compatibility
|
||||
def ovirt_facts_full_argument_spec(**kwargs):
|
||||
"""
|
||||
This is deprecated. Please use ovirt_info_full_argument_spec instead!
|
||||
|
||||
:param kwargs: kwargs to be extended
|
||||
:return: extended dictionary with common parameters
|
||||
"""
|
||||
return ovirt_info_full_argument_spec(**kwargs)
|
||||
|
||||
|
||||
def ovirt_full_argument_spec(**kwargs):
|
||||
"""
|
||||
Extend parameters of module with parameters which are common to all oVirt modules.
|
||||
|
||||
:param kwargs: kwargs to be extended
|
||||
:return: extended dictionary with common parameters
|
||||
"""
|
||||
spec = dict(
|
||||
auth=__get_auth_dict(),
|
||||
timeout=dict(default=180, type='int'),
|
||||
wait=dict(default=True, type='bool'),
|
||||
poll_interval=dict(default=3, type='int'),
|
||||
fetch_nested=dict(default=False, type='bool'),
|
||||
nested_attributes=dict(type='list', default=list()),
|
||||
)
|
||||
spec.update(kwargs)
|
||||
return spec
|
||||
|
||||
|
||||
def check_params(module):
|
||||
"""
|
||||
Most modules must have either `name` or `id` specified.
|
||||
"""
|
||||
if module.params.get('name') is None and module.params.get('id') is None:
|
||||
module.fail_json(msg='"name" or "id" is required')
|
||||
|
||||
|
||||
def engine_supported(connection, version):
|
||||
return LooseVersion(engine_version(connection)) >= LooseVersion(version)
|
||||
|
||||
|
||||
def check_support(version, connection, module, params):
|
||||
"""
|
||||
Check if parameters used by user are supported by oVirt Python SDK
|
||||
and oVirt engine.
|
||||
"""
|
||||
api_version = LooseVersion(engine_version(connection))
|
||||
version = LooseVersion(version)
|
||||
for param in params:
|
||||
if module.params.get(param) is not None:
|
||||
return LooseVersion(sdk_version.VERSION) >= version and api_version >= version
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class BaseModule(object):
|
||||
"""
|
||||
This is base class for oVirt modules. oVirt modules should inherit this
|
||||
class and override method to customize specific needs of the module.
|
||||
The only abstract method of this class is `build_entity`, which must
|
||||
to be implemented in child class.
|
||||
"""
|
||||
__metaclass__ = ABCMeta
|
||||
|
||||
def __init__(self, connection, module, service, changed=False):
|
||||
self._connection = connection
|
||||
self._module = module
|
||||
self._service = service
|
||||
self._changed = changed
|
||||
self._diff = {'after': dict(), 'before': dict()}
|
||||
|
||||
@property
|
||||
def changed(self):
|
||||
return self._changed
|
||||
|
||||
@changed.setter
|
||||
def changed(self, changed):
|
||||
if not self._changed:
|
||||
self._changed = changed
|
||||
|
||||
@abstractmethod
|
||||
def build_entity(self):
|
||||
"""
|
||||
This method should return oVirt Python SDK type, which we want to
|
||||
create or update, initialized by values passed by Ansible module.
|
||||
|
||||
For example if we want to create VM, we will return following:
|
||||
types.Vm(name=self._module.params['vm_name'])
|
||||
|
||||
:return: Specific instance of sdk.Struct.
|
||||
"""
|
||||
pass
|
||||
|
||||
def param(self, name, default=None):
|
||||
"""
|
||||
Return a module parameter specified by it's name.
|
||||
"""
|
||||
return self._module.params.get(name, default)
|
||||
|
||||
def update_check(self, entity):
|
||||
"""
|
||||
This method handle checks whether the entity values are same as values
|
||||
passed to ansible module. By default we don't compare any values.
|
||||
|
||||
:param entity: Entity we want to compare with Ansible module values.
|
||||
:return: True if values are same, so we don't need to update the entity.
|
||||
"""
|
||||
return True
|
||||
|
||||
def pre_create(self, entity):
|
||||
"""
|
||||
This method is called right before entity is created.
|
||||
|
||||
:param entity: Entity to be created or updated.
|
||||
"""
|
||||
pass
|
||||
|
||||
def post_create(self, entity):
|
||||
"""
|
||||
This method is called right after entity is created.
|
||||
|
||||
:param entity: Entity which was created.
|
||||
"""
|
||||
pass
|
||||
|
||||
def post_update(self, entity):
|
||||
"""
|
||||
This method is called right after entity is updated.
|
||||
|
||||
:param entity: Entity which was updated.
|
||||
"""
|
||||
pass
|
||||
|
||||
def diff_update(self, after, update):
|
||||
for k, v in update.items():
|
||||
if isinstance(v, Mapping):
|
||||
after[k] = self.diff_update(after.get(k, dict()), v)
|
||||
else:
|
||||
after[k] = update[k]
|
||||
return after
|
||||
|
||||
def create(
|
||||
self,
|
||||
entity=None,
|
||||
result_state=None,
|
||||
fail_condition=lambda e: False,
|
||||
search_params=None,
|
||||
update_params=None,
|
||||
_wait=None,
|
||||
force_create=False,
|
||||
**kwargs
|
||||
):
|
||||
"""
|
||||
Method which is called when state of the entity is 'present'. If user
|
||||
don't provide `entity` parameter the entity is searched using
|
||||
`search_params` parameter. If entity is found it's updated, whether
|
||||
the entity should be updated is checked by `update_check` method.
|
||||
The corresponding updated entity is build by `build_entity` method.
|
||||
|
||||
Function executed after entity is created can optionally be specified
|
||||
in `post_create` parameter. Function executed after entity is updated
|
||||
can optionally be specified in `post_update` parameter.
|
||||
|
||||
:param entity: Entity we want to update, if exists.
|
||||
:param result_state: State which should entity has in order to finish task.
|
||||
:param fail_condition: Function which checks incorrect state of entity, if it returns `True` Exception is raised.
|
||||
:param search_params: Dictionary of parameters to be used for search.
|
||||
:param update_params: The params which should be passed to update method.
|
||||
:param kwargs: Additional parameters passed when creating entity.
|
||||
:return: Dictionary with values returned by Ansible module.
|
||||
"""
|
||||
if entity is None and not force_create:
|
||||
entity = self.search_entity(search_params)
|
||||
|
||||
self.pre_create(entity)
|
||||
|
||||
if entity:
|
||||
# Entity exists, so update it:
|
||||
entity_service = self._service.service(entity.id)
|
||||
if not self.update_check(entity):
|
||||
new_entity = self.build_entity()
|
||||
if not self._module.check_mode:
|
||||
update_params = update_params or {}
|
||||
updated_entity = entity_service.update(
|
||||
new_entity,
|
||||
**update_params
|
||||
)
|
||||
self.post_update(entity)
|
||||
|
||||
# Update diffs only if user specified --diff parameter,
|
||||
# so we don't useless overload API:
|
||||
if self._module._diff:
|
||||
before = get_dict_of_struct(
|
||||
entity,
|
||||
self._connection,
|
||||
fetch_nested=True,
|
||||
attributes=['name'],
|
||||
)
|
||||
after = before.copy()
|
||||
self.diff_update(after, get_dict_of_struct(new_entity))
|
||||
self._diff['before'] = before
|
||||
self._diff['after'] = after
|
||||
|
||||
self.changed = True
|
||||
else:
|
||||
# Entity don't exists, so create it:
|
||||
if not self._module.check_mode:
|
||||
entity = self._service.add(
|
||||
self.build_entity(),
|
||||
**kwargs
|
||||
)
|
||||
self.post_create(entity)
|
||||
self.changed = True
|
||||
|
||||
if not self._module.check_mode:
|
||||
# Wait for the entity to be created and to be in the defined state:
|
||||
entity_service = self._service.service(entity.id)
|
||||
|
||||
def state_condition(entity):
|
||||
return entity
|
||||
|
||||
if result_state:
|
||||
|
||||
def state_condition(entity):
|
||||
return entity and entity.status == result_state
|
||||
|
||||
wait(
|
||||
service=entity_service,
|
||||
condition=state_condition,
|
||||
fail_condition=fail_condition,
|
||||
wait=_wait if _wait is not None else self._module.params['wait'],
|
||||
timeout=self._module.params['timeout'],
|
||||
poll_interval=self._module.params['poll_interval'],
|
||||
)
|
||||
|
||||
return {
|
||||
'changed': self.changed,
|
||||
'id': getattr(entity, 'id', None),
|
||||
type(entity).__name__.lower(): get_dict_of_struct(
|
||||
struct=entity,
|
||||
connection=self._connection,
|
||||
fetch_nested=self._module.params.get('fetch_nested'),
|
||||
attributes=self._module.params.get('nested_attributes'),
|
||||
),
|
||||
'diff': self._diff,
|
||||
}
|
||||
|
||||
def pre_remove(self, entity):
|
||||
"""
|
||||
This method is called right before entity is removed.
|
||||
|
||||
:param entity: Entity which we want to remove.
|
||||
"""
|
||||
pass
|
||||
|
||||
def entity_name(self, entity):
|
||||
return "{e_type} '{e_name}'".format(
|
||||
e_type=type(entity).__name__.lower(),
|
||||
e_name=getattr(entity, 'name', None),
|
||||
)
|
||||
|
||||
def remove(self, entity=None, search_params=None, **kwargs):
|
||||
"""
|
||||
Method which is called when state of the entity is 'absent'. If user
|
||||
don't provide `entity` parameter the entity is searched using
|
||||
`search_params` parameter. If entity is found it's removed.
|
||||
|
||||
Function executed before remove is executed can optionally be specified
|
||||
in `pre_remove` parameter.
|
||||
|
||||
:param entity: Entity we want to remove.
|
||||
:param search_params: Dictionary of parameters to be used for search.
|
||||
:param kwargs: Additional parameters passed when removing entity.
|
||||
:return: Dictionary with values returned by Ansible module.
|
||||
"""
|
||||
if entity is None:
|
||||
entity = self.search_entity(search_params)
|
||||
|
||||
if entity is None:
|
||||
return {
|
||||
'changed': self.changed,
|
||||
'msg': "Entity wasn't found."
|
||||
}
|
||||
|
||||
self.pre_remove(entity)
|
||||
|
||||
entity_service = self._service.service(entity.id)
|
||||
if not self._module.check_mode:
|
||||
entity_service.remove(**kwargs)
|
||||
wait(
|
||||
service=entity_service,
|
||||
condition=lambda entity: not entity,
|
||||
wait=self._module.params['wait'],
|
||||
timeout=self._module.params['timeout'],
|
||||
poll_interval=self._module.params['poll_interval'],
|
||||
)
|
||||
self.changed = True
|
||||
|
||||
return {
|
||||
'changed': self.changed,
|
||||
'id': entity.id,
|
||||
type(entity).__name__.lower(): get_dict_of_struct(
|
||||
struct=entity,
|
||||
connection=self._connection,
|
||||
fetch_nested=self._module.params.get('fetch_nested'),
|
||||
attributes=self._module.params.get('nested_attributes'),
|
||||
),
|
||||
}
|
||||
|
||||
def action(
|
||||
self,
|
||||
action,
|
||||
entity=None,
|
||||
action_condition=lambda e: e,
|
||||
wait_condition=lambda e: e,
|
||||
fail_condition=lambda e: False,
|
||||
pre_action=lambda e: e,
|
||||
post_action=lambda e: None,
|
||||
search_params=None,
|
||||
**kwargs
|
||||
):
|
||||
"""
|
||||
This method is executed when we want to change the state of some oVirt
|
||||
entity. The action to be executed on oVirt service is specified by
|
||||
`action` parameter. Whether the action should be executed can be
|
||||
specified by passing `action_condition` parameter. State which the
|
||||
entity should be in after execution of the action can be specified
|
||||
by `wait_condition` parameter.
|
||||
|
||||
Function executed before an action on entity can optionally be specified
|
||||
in `pre_action` parameter. Function executed after an action on entity can
|
||||
optionally be specified in `post_action` parameter.
|
||||
|
||||
:param action: Action which should be executed by service on entity.
|
||||
:param entity: Entity we want to run action on.
|
||||
:param action_condition: Function which is executed when checking if action should be executed.
|
||||
:param fail_condition: Function which checks incorrect state of entity, if it returns `True` Exception is raised.
|
||||
:param wait_condition: Function which is executed when waiting on result state.
|
||||
:param pre_action: Function which is executed before running the action.
|
||||
:param post_action: Function which is executed after running the action.
|
||||
:param search_params: Dictionary of parameters to be used for search.
|
||||
:param kwargs: Additional parameters passed to action.
|
||||
:return: Dictionary with values returned by Ansible module.
|
||||
"""
|
||||
if entity is None:
|
||||
entity = self.search_entity(search_params)
|
||||
|
||||
entity = pre_action(entity)
|
||||
|
||||
if entity is None:
|
||||
self._module.fail_json(
|
||||
msg="Entity not found, can't run action '{0}'.".format(
|
||||
action
|
||||
)
|
||||
)
|
||||
|
||||
entity_service = self._service.service(entity.id)
|
||||
entity = entity_service.get()
|
||||
if action_condition(entity):
|
||||
if not self._module.check_mode:
|
||||
getattr(entity_service, action)(**kwargs)
|
||||
self.changed = True
|
||||
|
||||
post_action(entity)
|
||||
|
||||
wait(
|
||||
service=self._service.service(entity.id),
|
||||
condition=wait_condition,
|
||||
fail_condition=fail_condition,
|
||||
wait=self._module.params['wait'],
|
||||
timeout=self._module.params['timeout'],
|
||||
poll_interval=self._module.params['poll_interval'],
|
||||
)
|
||||
return {
|
||||
'changed': self.changed,
|
||||
'id': entity.id,
|
||||
type(entity).__name__.lower(): get_dict_of_struct(
|
||||
struct=entity,
|
||||
connection=self._connection,
|
||||
fetch_nested=self._module.params.get('fetch_nested'),
|
||||
attributes=self._module.params.get('nested_attributes'),
|
||||
),
|
||||
'diff': self._diff,
|
||||
}
|
||||
|
||||
def wait_for_import(self, condition=lambda e: True):
|
||||
if self._module.params['wait']:
|
||||
start = time.time()
|
||||
timeout = self._module.params['timeout']
|
||||
poll_interval = self._module.params['poll_interval']
|
||||
while time.time() < start + timeout:
|
||||
entity = self.search_entity()
|
||||
if entity and condition(entity):
|
||||
return entity
|
||||
time.sleep(poll_interval)
|
||||
|
||||
def search_entity(self, search_params=None, list_params=None):
|
||||
"""
|
||||
Always first try to search by `ID`, if ID isn't specified,
|
||||
check if user constructed special search in `search_params`,
|
||||
if not search by `name`.
|
||||
"""
|
||||
entity = None
|
||||
|
||||
if 'id' in self._module.params and self._module.params['id'] is not None:
|
||||
entity = get_entity(self._service.service(self._module.params['id']), get_params=list_params)
|
||||
elif search_params is not None:
|
||||
entity = search_by_attributes(self._service, list_params=list_params, **search_params)
|
||||
elif self._module.params.get('name') is not None:
|
||||
entity = search_by_attributes(self._service, list_params=list_params, name=self._module.params['name'])
|
||||
|
||||
return entity
|
||||
|
||||
def _get_major(self, full_version):
|
||||
if full_version is None or full_version == "":
|
||||
return None
|
||||
if isinstance(full_version, otypes.Version):
|
||||
return int(full_version.major)
|
||||
return int(full_version.split('.')[0])
|
||||
|
||||
def _get_minor(self, full_version):
|
||||
if full_version is None or full_version == "":
|
||||
return None
|
||||
if isinstance(full_version, otypes.Version):
|
||||
return int(full_version.minor)
|
||||
return int(full_version.split('.')[1])
|
||||
|
||||
|
||||
def _sdk4_error_maybe():
|
||||
"""
|
||||
Allow for ovirtsdk4 not being installed.
|
||||
"""
|
||||
if HAS_SDK:
|
||||
return sdk.Error
|
||||
return type(None)
|
||||
|
||||
|
||||
class OvirtRetry(CloudRetry):
|
||||
base_class = _sdk4_error_maybe()
|
||||
|
||||
@staticmethod
|
||||
def status_code_from_exception(error):
|
||||
return error.code
|
||||
|
||||
@staticmethod
|
||||
def found(response_code, catch_extra_error_codes=None):
|
||||
# This is a list of error codes to retry.
|
||||
retry_on = [
|
||||
# HTTP status: Conflict
|
||||
409,
|
||||
]
|
||||
if catch_extra_error_codes:
|
||||
retry_on.extend(catch_extra_error_codes)
|
||||
|
||||
return response_code in retry_on
|
||||
@@ -1,67 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) <ajpantuso@gmail.com>
|
||||
# Copyright: (c) 2018, Dag Wieers (@dagwieers) <dag@wieers.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import csv
|
||||
from io import BytesIO, StringIO
|
||||
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.six import PY3
|
||||
|
||||
|
||||
class CustomDialectFailureError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class DialectNotAvailableError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
CSVError = csv.Error
|
||||
|
||||
|
||||
def initialize_dialect(dialect, **kwargs):
|
||||
# Add Unix dialect from Python 3
|
||||
class unix_dialect(csv.Dialect):
|
||||
"""Describe the usual properties of Unix-generated CSV files."""
|
||||
delimiter = ','
|
||||
quotechar = '"'
|
||||
doublequote = True
|
||||
skipinitialspace = False
|
||||
lineterminator = '\n'
|
||||
quoting = csv.QUOTE_ALL
|
||||
|
||||
csv.register_dialect("unix", unix_dialect)
|
||||
|
||||
if dialect not in csv.list_dialects():
|
||||
raise DialectNotAvailableError("Dialect '%s' is not supported by your version of python." % dialect)
|
||||
|
||||
# Create a dictionary from only set options
|
||||
dialect_params = dict((k, v) for k, v in kwargs.items() if v is not None)
|
||||
if dialect_params:
|
||||
try:
|
||||
csv.register_dialect('custom', dialect, **dialect_params)
|
||||
except TypeError as e:
|
||||
raise CustomDialectFailureError("Unable to create custom dialect: %s" % to_native(e))
|
||||
dialect = 'custom'
|
||||
|
||||
return dialect
|
||||
|
||||
|
||||
def read_csv(data, dialect, fieldnames=None):
|
||||
|
||||
data = to_native(data, errors='surrogate_or_strict')
|
||||
|
||||
if PY3:
|
||||
fake_fh = StringIO(data)
|
||||
else:
|
||||
fake_fh = BytesIO(data)
|
||||
|
||||
reader = csv.DictReader(fake_fh, fieldnames=fieldnames, dialect=dialect)
|
||||
|
||||
return reader
|
||||
@@ -1,234 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2019 Gregory Thiemonge <gregory.thiemonge@gmail.com>
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
|
||||
from ansible.module_utils._text import to_native, to_text
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
|
||||
|
||||
class GandiLiveDNSAPI(object):
|
||||
|
||||
api_endpoint = 'https://api.gandi.net/v5/livedns'
|
||||
changed = False
|
||||
|
||||
error_strings = {
|
||||
400: 'Bad request',
|
||||
401: 'Permission denied',
|
||||
404: 'Resource not found',
|
||||
}
|
||||
|
||||
attribute_map = {
|
||||
'record': 'rrset_name',
|
||||
'type': 'rrset_type',
|
||||
'ttl': 'rrset_ttl',
|
||||
'values': 'rrset_values'
|
||||
}
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.api_key = module.params['api_key']
|
||||
|
||||
def _build_error_message(self, module, info):
|
||||
s = ''
|
||||
body = info.get('body')
|
||||
if body:
|
||||
errors = module.from_json(body).get('errors')
|
||||
if errors:
|
||||
error = errors[0]
|
||||
name = error.get('name')
|
||||
if name:
|
||||
s += '{0} :'.format(name)
|
||||
description = error.get('description')
|
||||
if description:
|
||||
s += description
|
||||
return s
|
||||
|
||||
def _gandi_api_call(self, api_call, method='GET', payload=None, error_on_404=True):
|
||||
headers = {'Authorization': 'Apikey {0}'.format(self.api_key),
|
||||
'Content-Type': 'application/json'}
|
||||
data = None
|
||||
if payload:
|
||||
try:
|
||||
data = json.dumps(payload)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Failed to encode payload as JSON: %s " % to_native(e))
|
||||
|
||||
resp, info = fetch_url(self.module,
|
||||
self.api_endpoint + api_call,
|
||||
headers=headers,
|
||||
data=data,
|
||||
method=method)
|
||||
|
||||
error_msg = ''
|
||||
if info['status'] >= 400 and (info['status'] != 404 or error_on_404):
|
||||
err_s = self.error_strings.get(info['status'], '')
|
||||
|
||||
error_msg = "API Error {0}: {1}".format(err_s, self._build_error_message(self.module, info))
|
||||
|
||||
result = None
|
||||
try:
|
||||
content = resp.read()
|
||||
except AttributeError:
|
||||
content = None
|
||||
|
||||
if content:
|
||||
try:
|
||||
result = json.loads(to_text(content, errors='surrogate_or_strict'))
|
||||
except (getattr(json, 'JSONDecodeError', ValueError)) as e:
|
||||
error_msg += "; Failed to parse API response with error {0}: {1}".format(to_native(e), content)
|
||||
|
||||
if error_msg:
|
||||
self.module.fail_json(msg=error_msg)
|
||||
|
||||
return result, info['status']
|
||||
|
||||
def build_result(self, result, domain):
|
||||
if result is None:
|
||||
return None
|
||||
|
||||
res = {}
|
||||
for k in self.attribute_map:
|
||||
v = result.get(self.attribute_map[k], None)
|
||||
if v is not None:
|
||||
if k == 'record' and v == '@':
|
||||
v = ''
|
||||
res[k] = v
|
||||
|
||||
res['domain'] = domain
|
||||
|
||||
return res
|
||||
|
||||
def build_results(self, results, domain):
|
||||
if results is None:
|
||||
return []
|
||||
return [self.build_result(r, domain) for r in results]
|
||||
|
||||
def get_records(self, record, type, domain):
|
||||
url = '/domains/%s/records' % (domain)
|
||||
if record:
|
||||
url += '/%s' % (record)
|
||||
if type:
|
||||
url += '/%s' % (type)
|
||||
|
||||
records, status = self._gandi_api_call(url, error_on_404=False)
|
||||
|
||||
if status == 404:
|
||||
return []
|
||||
|
||||
if not isinstance(records, list):
|
||||
records = [records]
|
||||
|
||||
# filter by type if record is not set
|
||||
if not record and type:
|
||||
records = [r
|
||||
for r in records
|
||||
if r['rrset_type'] == type]
|
||||
|
||||
return records
|
||||
|
||||
def create_record(self, record, type, values, ttl, domain):
|
||||
url = '/domains/%s/records' % (domain)
|
||||
new_record = {
|
||||
'rrset_name': record,
|
||||
'rrset_type': type,
|
||||
'rrset_values': values,
|
||||
'rrset_ttl': ttl,
|
||||
}
|
||||
record, status = self._gandi_api_call(url, method='POST', payload=new_record)
|
||||
|
||||
if status in (200, 201,):
|
||||
return new_record
|
||||
|
||||
return None
|
||||
|
||||
def update_record(self, record, type, values, ttl, domain):
|
||||
url = '/domains/%s/records/%s/%s' % (domain, record, type)
|
||||
new_record = {
|
||||
'rrset_values': values,
|
||||
'rrset_ttl': ttl,
|
||||
}
|
||||
record = self._gandi_api_call(url, method='PUT', payload=new_record)[0]
|
||||
return record
|
||||
|
||||
def delete_record(self, record, type, domain):
|
||||
url = '/domains/%s/records/%s/%s' % (domain, record, type)
|
||||
|
||||
self._gandi_api_call(url, method='DELETE')
|
||||
|
||||
def delete_dns_record(self, record, type, values, domain):
|
||||
if record == '':
|
||||
record = '@'
|
||||
|
||||
records = self.get_records(record, type, domain)
|
||||
|
||||
if records:
|
||||
cur_record = records[0]
|
||||
|
||||
self.changed = True
|
||||
|
||||
if values is not None and set(cur_record['rrset_values']) != set(values):
|
||||
new_values = set(cur_record['rrset_values']) - set(values)
|
||||
if new_values:
|
||||
# Removing one or more values from a record, we update the record with the remaining values
|
||||
self.update_record(record, type, list(new_values), cur_record['rrset_ttl'], domain)
|
||||
records = self.get_records(record, type, domain)
|
||||
return records[0], self.changed
|
||||
|
||||
if not self.module.check_mode:
|
||||
self.delete_record(record, type, domain)
|
||||
else:
|
||||
cur_record = None
|
||||
|
||||
return None, self.changed
|
||||
|
||||
def ensure_dns_record(self, record, type, ttl, values, domain):
|
||||
if record == '':
|
||||
record = '@'
|
||||
|
||||
records = self.get_records(record, type, domain)
|
||||
|
||||
if records:
|
||||
cur_record = records[0]
|
||||
|
||||
do_update = False
|
||||
if ttl is not None and cur_record['rrset_ttl'] != ttl:
|
||||
do_update = True
|
||||
if values is not None and set(cur_record['rrset_values']) != set(values):
|
||||
do_update = True
|
||||
|
||||
if do_update:
|
||||
if self.module.check_mode:
|
||||
result = dict(
|
||||
rrset_type=type,
|
||||
rrset_name=record,
|
||||
rrset_values=values,
|
||||
rrset_ttl=ttl
|
||||
)
|
||||
else:
|
||||
self.update_record(record, type, values, ttl, domain)
|
||||
|
||||
records = self.get_records(record, type, domain)
|
||||
result = records[0]
|
||||
self.changed = True
|
||||
return result, self.changed
|
||||
else:
|
||||
return cur_record, self.changed
|
||||
|
||||
if self.module.check_mode:
|
||||
new_record = dict(
|
||||
rrset_type=type,
|
||||
rrset_name=record,
|
||||
rrset_values=values,
|
||||
rrset_ttl=ttl
|
||||
)
|
||||
result = new_record
|
||||
else:
|
||||
result = self.create_record(record, type, values, ttl, domain)
|
||||
|
||||
self.changed = True
|
||||
return result, self.changed
|
||||
@@ -30,16 +30,12 @@ from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
||||
from ansible.module_utils.six.moves.urllib.error import HTTPError
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
URL_REALMS = "{url}/admin/realms"
|
||||
URL_REALM = "{url}/admin/realms/{realm}"
|
||||
|
||||
URL_TOKEN = "{url}/realms/{realm}/protocol/openid-connect/token"
|
||||
URL_CLIENT = "{url}/admin/realms/{realm}/clients/{id}"
|
||||
URL_CLIENTS = "{url}/admin/realms/{realm}/clients"
|
||||
@@ -59,14 +55,13 @@ def keycloak_argument_spec():
|
||||
:return: argument_spec dict
|
||||
"""
|
||||
return dict(
|
||||
auth_keycloak_url=dict(type='str', aliases=['url'], required=True, no_log=False),
|
||||
auth_keycloak_url=dict(type='str', aliases=['url'], required=True),
|
||||
auth_client_id=dict(type='str', default='admin-cli'),
|
||||
auth_realm=dict(type='str'),
|
||||
auth_realm=dict(type='str', required=True),
|
||||
auth_client_secret=dict(type='str', default=None, no_log=True),
|
||||
auth_username=dict(type='str', aliases=['username']),
|
||||
auth_password=dict(type='str', aliases=['password'], no_log=True),
|
||||
validate_certs=dict(type='bool', default=True),
|
||||
token=dict(type='str', no_log=True),
|
||||
auth_username=dict(type='str', aliases=['username'], required=True),
|
||||
auth_password=dict(type='str', aliases=['password'], required=True, no_log=True),
|
||||
validate_certs=dict(type='bool', default=True)
|
||||
)
|
||||
|
||||
|
||||
@@ -78,58 +73,41 @@ class KeycloakError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def get_token(module_params):
|
||||
""" Obtains connection header with token for the authentication,
|
||||
token already given or obtained from credentials
|
||||
:param module_params: parameters of the module
|
||||
:return: connection header
|
||||
"""
|
||||
token = module_params.get('token')
|
||||
base_url = module_params.get('auth_keycloak_url')
|
||||
|
||||
def get_token(base_url, validate_certs, auth_realm, client_id,
|
||||
auth_username, auth_password, client_secret):
|
||||
if not base_url.lower().startswith(('http', 'https')):
|
||||
raise KeycloakError("auth_url '%s' should either start with 'http' or 'https'." % base_url)
|
||||
|
||||
if token is None:
|
||||
base_url = module_params.get('auth_keycloak_url')
|
||||
validate_certs = module_params.get('validate_certs')
|
||||
auth_realm = module_params.get('auth_realm')
|
||||
client_id = module_params.get('auth_client_id')
|
||||
auth_username = module_params.get('auth_username')
|
||||
auth_password = module_params.get('auth_password')
|
||||
client_secret = module_params.get('auth_client_secret')
|
||||
auth_url = URL_TOKEN.format(url=base_url, realm=auth_realm)
|
||||
temp_payload = {
|
||||
'grant_type': 'password',
|
||||
'client_id': client_id,
|
||||
'client_secret': client_secret,
|
||||
'username': auth_username,
|
||||
'password': auth_password,
|
||||
}
|
||||
# Remove empty items, for instance missing client_secret
|
||||
payload = dict(
|
||||
(k, v) for k, v in temp_payload.items() if v is not None)
|
||||
try:
|
||||
r = json.loads(to_native(open_url(auth_url, method='POST',
|
||||
validate_certs=validate_certs,
|
||||
data=urlencode(payload)).read()))
|
||||
except ValueError as e:
|
||||
raise KeycloakError(
|
||||
'API returned invalid JSON when trying to obtain access token from %s: %s'
|
||||
% (auth_url, str(e)))
|
||||
except Exception as e:
|
||||
raise KeycloakError('Could not obtain access token from %s: %s'
|
||||
% (auth_url, str(e)))
|
||||
|
||||
try:
|
||||
token = r['access_token']
|
||||
except KeyError:
|
||||
raise KeycloakError(
|
||||
'Could not obtain access token from %s' % auth_url)
|
||||
return {
|
||||
'Authorization': 'Bearer ' + token,
|
||||
'Content-Type': 'application/json'
|
||||
auth_url = URL_TOKEN.format(url=base_url, realm=auth_realm)
|
||||
temp_payload = {
|
||||
'grant_type': 'password',
|
||||
'client_id': client_id,
|
||||
'client_secret': client_secret,
|
||||
'username': auth_username,
|
||||
'password': auth_password,
|
||||
}
|
||||
# Remove empty items, for instance missing client_secret
|
||||
payload = dict(
|
||||
(k, v) for k, v in temp_payload.items() if v is not None)
|
||||
try:
|
||||
r = json.loads(to_native(open_url(auth_url, method='POST',
|
||||
validate_certs=validate_certs,
|
||||
data=urlencode(payload)).read()))
|
||||
except ValueError as e:
|
||||
raise KeycloakError(
|
||||
'API returned invalid JSON when trying to obtain access token from %s: %s'
|
||||
% (auth_url, str(e)))
|
||||
except Exception as e:
|
||||
raise KeycloakError('Could not obtain access token from %s: %s'
|
||||
% (auth_url, str(e)))
|
||||
|
||||
try:
|
||||
return {
|
||||
'Authorization': 'Bearer ' + r['access_token'],
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
except KeyError:
|
||||
raise KeycloakError(
|
||||
'Could not obtain access token from %s' % auth_url)
|
||||
|
||||
|
||||
class KeycloakAPI(object):
|
||||
@@ -142,75 +120,6 @@ class KeycloakAPI(object):
|
||||
self.validate_certs = self.module.params.get('validate_certs')
|
||||
self.restheaders = connection_header
|
||||
|
||||
def get_realm_by_id(self, realm='master'):
|
||||
""" Obtain realm representation by id
|
||||
|
||||
:param realm: realm id
|
||||
:return: dict of real, representation or None if none matching exist
|
||||
"""
|
||||
realm_url = URL_REALM.format(url=self.baseurl, realm=realm)
|
||||
|
||||
try:
|
||||
return json.loads(to_native(open_url(realm_url, method='GET', headers=self.restheaders,
|
||||
validate_certs=self.validate_certs).read()))
|
||||
|
||||
except HTTPError as e:
|
||||
if e.code == 404:
|
||||
return None
|
||||
else:
|
||||
self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)),
|
||||
exception=traceback.format_exc())
|
||||
except ValueError as e:
|
||||
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain realm %s: %s' % (realm, str(e)),
|
||||
exception=traceback.format_exc())
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
def update_realm(self, realmrep, realm="master"):
|
||||
""" Update an existing realm
|
||||
:param realmrep: corresponding (partial/full) realm representation with updates
|
||||
:param realm: realm to be updated in Keycloak
|
||||
:return: HTTPResponse object on success
|
||||
"""
|
||||
realm_url = URL_REALM.format(url=self.baseurl, realm=realm)
|
||||
|
||||
try:
|
||||
return open_url(realm_url, method='PUT', headers=self.restheaders,
|
||||
data=json.dumps(realmrep), validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not update realm %s: %s' % (realm, str(e)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
def create_realm(self, realmrep):
|
||||
""" Create a realm in keycloak
|
||||
:param realmrep: Realm representation of realm to be created.
|
||||
:return: HTTPResponse object on success
|
||||
"""
|
||||
realm_url = URL_REALMS.format(url=self.baseurl)
|
||||
|
||||
try:
|
||||
return open_url(realm_url, method='POST', headers=self.restheaders,
|
||||
data=json.dumps(realmrep), validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not create realm %s: %s' % (realmrep['id'], str(e)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
def delete_realm(self, realm="master"):
|
||||
""" Delete a realm from Keycloak
|
||||
|
||||
:param realm: realm to be deleted
|
||||
:return: HTTPResponse object on success
|
||||
"""
|
||||
realm_url = URL_REALM.format(url=self.baseurl, realm=realm)
|
||||
|
||||
try:
|
||||
return open_url(realm_url, method='DELETE', headers=self.restheaders,
|
||||
validate_certs=self.validate_certs)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg='Could not delete realm %s: %s' % (realm, str(e)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
def get_clients(self, realm='master', filter=None):
|
||||
""" Obtains client representations for clients in a realm
|
||||
|
||||
|
||||
@@ -119,9 +119,9 @@ class IPAClient(object):
|
||||
data = dict(method=method)
|
||||
|
||||
# TODO: We should probably handle this a little better.
|
||||
if method in ('ping', 'config_show', 'otpconfig_show'):
|
||||
if method in ('ping', 'config_show'):
|
||||
data['params'] = [[], {}]
|
||||
elif method in ('config_mod', 'otpconfig_mod'):
|
||||
elif method == 'config_mod':
|
||||
data['params'] = [[], item]
|
||||
else:
|
||||
data['params'] = [[name], item]
|
||||
|
||||
@@ -87,12 +87,11 @@ def not_in_host_file(self, host):
|
||||
user_host_file = "~/.ssh/known_hosts"
|
||||
user_host_file = os.path.expanduser(user_host_file)
|
||||
|
||||
host_file_list = [
|
||||
user_host_file,
|
||||
"/etc/ssh/ssh_known_hosts",
|
||||
"/etc/ssh/ssh_known_hosts2",
|
||||
"/etc/openssh/ssh_known_hosts",
|
||||
]
|
||||
host_file_list = []
|
||||
host_file_list.append(user_host_file)
|
||||
host_file_list.append("/etc/ssh/ssh_known_hosts")
|
||||
host_file_list.append("/etc/ssh/ssh_known_hosts2")
|
||||
host_file_list.append("/etc/openssh/ssh_known_hosts")
|
||||
|
||||
hfiles_not_found = 0
|
||||
for hf in host_file_list:
|
||||
|
||||
@@ -10,7 +10,6 @@ from functools import partial, wraps
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.dict_transformations import dict_merge
|
||||
|
||||
|
||||
class ModuleHelperException(Exception):
|
||||
@@ -25,12 +24,12 @@ class ModuleHelperException(Exception):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.msg = self._get_remove('msg', kwargs) or "Module failed with exception: {0}".format(self)
|
||||
self.update_output = self._get_remove('update_output', kwargs) or {}
|
||||
super(ModuleHelperException, self).__init__(*args)
|
||||
super(ModuleHelperException, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
class ArgFormat(object):
|
||||
"""
|
||||
Argument formatter for use as a command line parameter. Used in CmdMixin.
|
||||
Argument formatter
|
||||
"""
|
||||
BOOLEAN = 0
|
||||
PRINTF = 1
|
||||
@@ -51,8 +50,7 @@ class ArgFormat(object):
|
||||
|
||||
def __init__(self, name, fmt=None, style=FORMAT, stars=0):
|
||||
"""
|
||||
Creates a CLI-formatter for one specific argument. The argument may be a module parameter or just a named parameter for
|
||||
the CLI command execution.
|
||||
Creates a new formatter
|
||||
:param name: Name of the argument to be formatted
|
||||
:param fmt: Either a str to be formatted (using or not printf-style) or a callable that does that
|
||||
:param style: Whether arg_format (as str) should use printf-style formatting.
|
||||
@@ -95,33 +93,22 @@ class ArgFormat(object):
|
||||
self.arg_format = (self.stars_deco(stars))(self.arg_format)
|
||||
|
||||
def to_text(self, value):
|
||||
if value is None:
|
||||
return []
|
||||
func = self.arg_format
|
||||
return [str(p) for p in func(value)]
|
||||
|
||||
|
||||
def cause_changes(on_success=None, on_failure=None):
|
||||
|
||||
def deco(func):
|
||||
if on_success is None and on_failure is None:
|
||||
return func
|
||||
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
self = args[0]
|
||||
func(*args, **kwargs)
|
||||
if on_success is not None:
|
||||
self.changed = on_success
|
||||
except Exception:
|
||||
if on_failure is not None:
|
||||
self.changed = on_failure
|
||||
raise
|
||||
|
||||
return wrapper
|
||||
|
||||
return deco
|
||||
def cause_changes(func, on_success=True, on_failure=False):
|
||||
@wraps(func)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
try:
|
||||
func(*args, **kwargs)
|
||||
if on_success:
|
||||
self.changed = True
|
||||
except Exception as e:
|
||||
if on_failure:
|
||||
self.changed = True
|
||||
raise
|
||||
return wrapper
|
||||
|
||||
|
||||
def module_fails_on_exception(func):
|
||||
@@ -134,12 +121,10 @@ def module_fails_on_exception(func):
|
||||
except ModuleHelperException as e:
|
||||
if e.update_output:
|
||||
self.update_output(e.update_output)
|
||||
self.module.fail_json(msg=e.msg, exception=traceback.format_exc(),
|
||||
output=self.output, vars=self.vars.output(), **self.output)
|
||||
except Exception as e:
|
||||
msg = "Module failed with exception: {0}".format(str(e).strip())
|
||||
self.module.fail_json(msg=msg, exception=traceback.format_exc(),
|
||||
output=self.output, vars=self.vars.output(), **self.output)
|
||||
self.vars.msg = "Module failed with exception: {0}".format(str(e).strip())
|
||||
self.vars.exception = traceback.format_exc()
|
||||
self.module.fail_json(changed=False, msg=self.vars.msg, exception=self.vars.exception, output=self.output, vars=self.vars)
|
||||
return wrapper
|
||||
|
||||
|
||||
@@ -153,7 +138,7 @@ class DependencyCtxMgr(object):
|
||||
self.exc_tb = None
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
pass
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.has_it = exc_type is None
|
||||
@@ -167,157 +152,32 @@ class DependencyCtxMgr(object):
|
||||
return self.msg or str(self.exc_val)
|
||||
|
||||
|
||||
class VarMeta(object):
|
||||
NOTHING = object()
|
||||
|
||||
def __init__(self, diff=False, output=True, change=None, fact=False):
|
||||
self.init = False
|
||||
self.initial_value = None
|
||||
self.value = None
|
||||
|
||||
self.diff = diff
|
||||
self.change = diff if change is None else change
|
||||
self.output = output
|
||||
self.fact = fact
|
||||
|
||||
def set(self, diff=None, output=None, change=None, fact=None, initial_value=NOTHING):
|
||||
if diff is not None:
|
||||
self.diff = diff
|
||||
if output is not None:
|
||||
self.output = output
|
||||
if change is not None:
|
||||
self.change = change
|
||||
if fact is not None:
|
||||
self.fact = fact
|
||||
if initial_value is not self.NOTHING:
|
||||
self.initial_value = initial_value
|
||||
|
||||
def set_value(self, value):
|
||||
if not self.init:
|
||||
self.initial_value = value
|
||||
self.init = True
|
||||
self.value = value
|
||||
return self
|
||||
|
||||
@property
|
||||
def has_changed(self):
|
||||
return self.change and (self.initial_value != self.value)
|
||||
|
||||
@property
|
||||
def diff_result(self):
|
||||
return None if not (self.diff and self.has_changed) else {
|
||||
'before': self.initial_value,
|
||||
'after': self.value,
|
||||
}
|
||||
|
||||
def __str__(self):
|
||||
return "<VarMeta: value={0}, initial={1}, diff={2}, output={3}, change={4}>".format(
|
||||
self.value, self.initial_value, self.diff, self.output, self.change
|
||||
)
|
||||
|
||||
|
||||
class ModuleHelper(object):
|
||||
_output_conflict_list = ('msg', 'exception', 'output', 'vars', 'changed')
|
||||
_dependencies = []
|
||||
module = None
|
||||
module = {}
|
||||
facts_name = None
|
||||
output_params = ()
|
||||
diff_params = ()
|
||||
change_params = ()
|
||||
facts_params = ()
|
||||
|
||||
class VarDict(object):
|
||||
def __init__(self):
|
||||
self._data = dict()
|
||||
self._meta = dict()
|
||||
|
||||
def __getitem__(self, item):
|
||||
return self._data[item]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self.set(key, value)
|
||||
|
||||
class AttrDict(dict):
|
||||
def __getattr__(self, item):
|
||||
try:
|
||||
return self._data[item]
|
||||
except KeyError:
|
||||
return getattr(self._data, item)
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
if key in ('_data', '_meta'):
|
||||
super(ModuleHelper.VarDict, self).__setattr__(key, value)
|
||||
else:
|
||||
self.set(key, value)
|
||||
|
||||
def meta(self, name):
|
||||
return self._meta[name]
|
||||
|
||||
def set_meta(self, name, **kwargs):
|
||||
self.meta(name).set(**kwargs)
|
||||
|
||||
def set(self, name, value, **kwargs):
|
||||
if name in ('_data', '_meta'):
|
||||
raise ValueError("Names _data and _meta are reserved for use by ModuleHelper")
|
||||
self._data[name] = value
|
||||
if name in self._meta:
|
||||
meta = self.meta(name)
|
||||
else:
|
||||
meta = VarMeta(**kwargs)
|
||||
meta.set_value(value)
|
||||
self._meta[name] = meta
|
||||
|
||||
def output(self):
|
||||
return dict((k, v) for k, v in self._data.items() if self.meta(k).output)
|
||||
|
||||
def diff(self):
|
||||
diff_results = [(k, self.meta(k).diff_result) for k in self._data]
|
||||
diff_results = [dr for dr in diff_results if dr[1] is not None]
|
||||
if diff_results:
|
||||
before = dict((dr[0], dr[1]['before']) for dr in diff_results)
|
||||
after = dict((dr[0], dr[1]['after']) for dr in diff_results)
|
||||
return {'before': before, 'after': after}
|
||||
return None
|
||||
|
||||
def facts(self):
|
||||
facts_result = dict((k, v) for k, v in self._data.items() if self._meta[k].fact)
|
||||
return facts_result if facts_result else None
|
||||
|
||||
def change_vars(self):
|
||||
return [v for v in self._data if self.meta(v).change]
|
||||
|
||||
def has_changed(self, v):
|
||||
return self._meta[v].has_changed
|
||||
return self[item]
|
||||
|
||||
def __init__(self, module=None):
|
||||
self.vars = ModuleHelper.VarDict()
|
||||
self.vars = ModuleHelper.AttrDict()
|
||||
self.output_dict = dict()
|
||||
self.facts_dict = dict()
|
||||
self._changed = False
|
||||
|
||||
if module:
|
||||
self.module = module
|
||||
|
||||
if not isinstance(self.module, AnsibleModule):
|
||||
if isinstance(self.module, dict):
|
||||
self.module = AnsibleModule(**self.module)
|
||||
|
||||
for name, value in self.module.params.items():
|
||||
self.vars.set(
|
||||
name, value,
|
||||
diff=name in self.diff_params,
|
||||
output=name in self.output_params,
|
||||
change=None if not self.change_params else name in self.change_params,
|
||||
fact=name in self.facts_params,
|
||||
)
|
||||
|
||||
def update_vars(self, meta=None, **kwargs):
|
||||
if meta is None:
|
||||
meta = {}
|
||||
for k, v in kwargs.items():
|
||||
self.vars.set(k, v, **meta)
|
||||
|
||||
def update_output(self, **kwargs):
|
||||
self.update_vars(meta={"output": True}, **kwargs)
|
||||
self.output_dict.update(kwargs)
|
||||
|
||||
def update_facts(self, **kwargs):
|
||||
self.update_vars(meta={"fact": True}, **kwargs)
|
||||
self.facts_dict.update(kwargs)
|
||||
|
||||
def __init_module__(self):
|
||||
pass
|
||||
@@ -328,9 +188,6 @@ class ModuleHelper(object):
|
||||
def __quit_module__(self):
|
||||
pass
|
||||
|
||||
def _vars_changed(self):
|
||||
return any(self.vars.has_changed(v) for v in self.vars.change_vars())
|
||||
|
||||
@property
|
||||
def changed(self):
|
||||
return self._changed
|
||||
@@ -339,25 +196,12 @@ class ModuleHelper(object):
|
||||
def changed(self, value):
|
||||
self._changed = value
|
||||
|
||||
def has_changed(self):
|
||||
return self.changed or self._vars_changed()
|
||||
|
||||
@property
|
||||
def output(self):
|
||||
result = dict(self.vars.output())
|
||||
result = dict(self.vars)
|
||||
result.update(self.output_dict)
|
||||
if self.facts_name:
|
||||
facts = self.vars.facts()
|
||||
if facts is not None:
|
||||
result['ansible_facts'] = {self.facts_name: facts}
|
||||
if self.module._diff:
|
||||
diff = result.get('diff', {})
|
||||
vars_diff = self.vars.diff() or {}
|
||||
result['diff'] = dict_merge(dict(diff), vars_diff)
|
||||
|
||||
for varname in result:
|
||||
if varname in self._output_conflict_list:
|
||||
result["_" + varname] = result[varname]
|
||||
del result[varname]
|
||||
result['ansible_facts'] = {self.facts_name: self.facts_dict}
|
||||
return result
|
||||
|
||||
@module_fails_on_exception
|
||||
@@ -366,7 +210,7 @@ class ModuleHelper(object):
|
||||
self.__init_module__()
|
||||
self.__run__()
|
||||
self.__quit_module__()
|
||||
self.module.exit_json(changed=self.has_changed(), **self.output)
|
||||
self.module.exit_json(changed=self.changed, **self.output_dict)
|
||||
|
||||
@classmethod
|
||||
def dependency(cls, name, msg):
|
||||
@@ -377,9 +221,9 @@ class ModuleHelper(object):
|
||||
for d in self._dependencies:
|
||||
if not d.has_it:
|
||||
self.module.fail_json(changed=False,
|
||||
exception="\n".join(traceback.format_exception(d.exc_type, d.exc_val, d.exc_tb)),
|
||||
exception=d.exc_val.__traceback__.format_exc(),
|
||||
msg=d.text,
|
||||
**self.output)
|
||||
**self.output_dict)
|
||||
|
||||
|
||||
class StateMixin(object):
|
||||
@@ -448,35 +292,21 @@ class CmdMixin(object):
|
||||
|
||||
extra_params = extra_params or dict()
|
||||
cmd_args = list([self.command]) if isinstance(self.command, str) else list(self.command)
|
||||
try:
|
||||
cmd_args[0] = self.module.get_bin_path(cmd_args[0], required=True)
|
||||
except ValueError:
|
||||
pass
|
||||
cmd_args[0] = self.module.get_bin_path(cmd_args[0])
|
||||
param_list = params if params else self.module.params.keys()
|
||||
|
||||
for param in param_list:
|
||||
if isinstance(param, dict):
|
||||
if len(param) != 1:
|
||||
raise ModuleHelperException("run_command parameter as a dict must "
|
||||
"contain only one key: {0}".format(param))
|
||||
_param = list(param.keys())[0]
|
||||
fmt = find_format(_param)
|
||||
value = param[_param]
|
||||
elif isinstance(param, str):
|
||||
if param in self.module.argument_spec:
|
||||
fmt = find_format(param)
|
||||
value = self.module.params[param]
|
||||
elif param in extra_params:
|
||||
fmt = find_format(param)
|
||||
value = extra_params[param]
|
||||
else:
|
||||
self.module.deprecate("Cannot determine value for parameter: {0}. "
|
||||
"From version 4.0.0 onwards this will generate an exception".format(param),
|
||||
version="4.0.0", collection_name="community.general")
|
||||
if param in self.module.argument_spec:
|
||||
if param not in self.module.params:
|
||||
continue
|
||||
|
||||
fmt = find_format(param)
|
||||
value = self.module.params[param]
|
||||
else:
|
||||
raise ModuleHelperException("run_command parameter must be either a str or a dict: {0}".format(param))
|
||||
if param not in extra_params:
|
||||
continue
|
||||
fmt = find_format(param)
|
||||
value = extra_params[param]
|
||||
self.cmd_args = cmd_args
|
||||
cmd_args = add_arg_formatted_param(cmd_args, fmt, value)
|
||||
|
||||
return cmd_args
|
||||
@@ -485,7 +315,7 @@ class CmdMixin(object):
|
||||
return rc, out, err
|
||||
|
||||
def run_command(self, extra_params=None, params=None, *args, **kwargs):
|
||||
self.vars.cmd_args = self._calculate_args(extra_params, params)
|
||||
self.vars['cmd_args'] = self._calculate_args(extra_params, params)
|
||||
options = dict(self.run_command_fixed_options)
|
||||
env_update = dict(options.get('environ_update', {}))
|
||||
options['check_rc'] = options.get('check_rc', self.check_rc)
|
||||
@@ -494,7 +324,7 @@ class CmdMixin(object):
|
||||
self.update_output(force_lang=self.force_lang)
|
||||
options['environ_update'] = env_update
|
||||
options.update(kwargs)
|
||||
rc, out, err = self.module.run_command(self.vars.cmd_args, *args, **options)
|
||||
rc, out, err = self.module.run_command(self.vars['cmd_args'], *args, **options)
|
||||
self.update_output(rc=rc, stdout=out, stderr=err)
|
||||
return self.process_command_output(rc, out, err)
|
||||
|
||||
|
||||
@@ -18,7 +18,6 @@ from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.six import iteritems
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible.module_utils.basic import env_fallback
|
||||
from ansible.module_utils.common.validation import check_type_dict
|
||||
|
||||
try:
|
||||
from infoblox_client.connector import Connector
|
||||
@@ -400,11 +399,11 @@ class WapiModule(WapiBase):
|
||||
|
||||
if 'ipv4addrs' in proposed_object:
|
||||
if 'nios_next_ip' in proposed_object['ipv4addrs'][0]['ipv4addr']:
|
||||
ip_range = check_type_dict(proposed_object['ipv4addrs'][0]['ipv4addr'])['nios_next_ip']
|
||||
ip_range = self.module._check_type_dict(proposed_object['ipv4addrs'][0]['ipv4addr'])['nios_next_ip']
|
||||
proposed_object['ipv4addrs'][0]['ipv4addr'] = NIOS_NEXT_AVAILABLE_IP + ':' + ip_range
|
||||
elif 'ipv4addr' in proposed_object:
|
||||
if 'nios_next_ip' in proposed_object['ipv4addr']:
|
||||
ip_range = check_type_dict(proposed_object['ipv4addr'])['nios_next_ip']
|
||||
ip_range = self.module._check_type_dict(proposed_object['ipv4addr'])['nios_next_ip']
|
||||
proposed_object['ipv4addr'] = NIOS_NEXT_AVAILABLE_IP + ':' + ip_range
|
||||
|
||||
return proposed_object
|
||||
@@ -486,7 +485,7 @@ class WapiModule(WapiBase):
|
||||
if ('name' in obj_filter):
|
||||
# gets and returns the current object based on name/old_name passed
|
||||
try:
|
||||
name_obj = check_type_dict(obj_filter['name'])
|
||||
name_obj = self.module._check_type_dict(obj_filter['name'])
|
||||
old_name = name_obj['old_name']
|
||||
new_name = name_obj['new_name']
|
||||
except TypeError:
|
||||
@@ -500,12 +499,12 @@ class WapiModule(WapiBase):
|
||||
else:
|
||||
test_obj_filter = dict([('name', old_name)])
|
||||
# get the object reference
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=list(ib_spec.keys()))
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=ib_spec.keys())
|
||||
if ib_obj:
|
||||
obj_filter['name'] = new_name
|
||||
else:
|
||||
test_obj_filter['name'] = new_name
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=list(ib_spec.keys()))
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=ib_spec.keys())
|
||||
update = True
|
||||
return ib_obj, update, new_name
|
||||
if (ib_obj_type == NIOS_HOST_RECORD):
|
||||
@@ -522,7 +521,7 @@ class WapiModule(WapiBase):
|
||||
test_obj_filter['name'] = test_obj_filter['name'].lower()
|
||||
# resolves issue where multiple a_records with same name and different IP address
|
||||
try:
|
||||
ipaddr_obj = check_type_dict(obj_filter['ipv4addr'])
|
||||
ipaddr_obj = self.module._check_type_dict(obj_filter['ipv4addr'])
|
||||
ipaddr = ipaddr_obj['old_ipv4addr']
|
||||
except TypeError:
|
||||
ipaddr = obj_filter['ipv4addr']
|
||||
@@ -531,7 +530,7 @@ class WapiModule(WapiBase):
|
||||
# resolves issue where multiple txt_records with same name and different text
|
||||
test_obj_filter = obj_filter
|
||||
try:
|
||||
text_obj = check_type_dict(obj_filter['text'])
|
||||
text_obj = self.module._check_type_dict(obj_filter['text'])
|
||||
txt = text_obj['old_text']
|
||||
except TypeError:
|
||||
txt = obj_filter['text']
|
||||
@@ -539,32 +538,32 @@ class WapiModule(WapiBase):
|
||||
# check if test_obj_filter is empty copy passed obj_filter
|
||||
else:
|
||||
test_obj_filter = obj_filter
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=list(ib_spec.keys()))
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys())
|
||||
elif (ib_obj_type == NIOS_A_RECORD):
|
||||
# resolves issue where multiple a_records with same name and different IP address
|
||||
test_obj_filter = obj_filter
|
||||
try:
|
||||
ipaddr_obj = check_type_dict(obj_filter['ipv4addr'])
|
||||
ipaddr_obj = self.module._check_type_dict(obj_filter['ipv4addr'])
|
||||
ipaddr = ipaddr_obj['old_ipv4addr']
|
||||
except TypeError:
|
||||
ipaddr = obj_filter['ipv4addr']
|
||||
test_obj_filter['ipv4addr'] = ipaddr
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=list(ib_spec.keys()))
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys())
|
||||
elif (ib_obj_type == NIOS_TXT_RECORD):
|
||||
# resolves issue where multiple txt_records with same name and different text
|
||||
test_obj_filter = obj_filter
|
||||
try:
|
||||
text_obj = check_type_dict(obj_filter['text'])
|
||||
text_obj = self.module._check_type_dict(obj_filter['text'])
|
||||
txt = text_obj['old_text']
|
||||
except TypeError:
|
||||
txt = obj_filter['text']
|
||||
test_obj_filter['text'] = txt
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=list(ib_spec.keys()))
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys())
|
||||
elif (ib_obj_type == NIOS_ZONE):
|
||||
# del key 'restart_if_needed' as nios_zone get_object fails with the key present
|
||||
temp = ib_spec['restart_if_needed']
|
||||
del ib_spec['restart_if_needed']
|
||||
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=list(ib_spec.keys()))
|
||||
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys())
|
||||
# reinstate restart_if_needed if ib_obj is none, meaning there's no existing nios_zone ref
|
||||
if not ib_obj:
|
||||
ib_spec['restart_if_needed'] = temp
|
||||
@@ -572,12 +571,12 @@ class WapiModule(WapiBase):
|
||||
# del key 'create_token' as nios_member get_object fails with the key present
|
||||
temp = ib_spec['create_token']
|
||||
del ib_spec['create_token']
|
||||
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=list(ib_spec.keys()))
|
||||
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys())
|
||||
if temp:
|
||||
# reinstate 'create_token' key
|
||||
ib_spec['create_token'] = temp
|
||||
else:
|
||||
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=list(ib_spec.keys()))
|
||||
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys())
|
||||
return ib_obj, update, new_name
|
||||
|
||||
def on_update(self, proposed_object, ib_spec):
|
||||
|
||||
@@ -1,370 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2021, Florian Dambrine <android.florian@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
"""
|
||||
Pritunl API that offers CRUD operations on Pritunl Organizations and Users
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import base64
|
||||
import hashlib
|
||||
import hmac
|
||||
import json
|
||||
import time
|
||||
import uuid
|
||||
|
||||
from ansible.module_utils.six import iteritems
|
||||
from ansible.module_utils.urls import open_url
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class PritunlException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def pritunl_argument_spec():
|
||||
return dict(
|
||||
pritunl_url=dict(required=True, type="str"),
|
||||
pritunl_api_token=dict(required=True, type="str", no_log=False),
|
||||
pritunl_api_secret=dict(required=True, type="str", no_log=True),
|
||||
validate_certs=dict(required=False, type="bool", default=True),
|
||||
)
|
||||
|
||||
|
||||
def get_pritunl_settings(module):
|
||||
"""
|
||||
Helper function to set required Pritunl request params from module arguments.
|
||||
"""
|
||||
return {
|
||||
"api_token": module.params.get("pritunl_api_token"),
|
||||
"api_secret": module.params.get("pritunl_api_secret"),
|
||||
"base_url": module.params.get("pritunl_url"),
|
||||
"validate_certs": module.params.get("validate_certs"),
|
||||
}
|
||||
|
||||
|
||||
def _get_pritunl_organizations(api_token, api_secret, base_url, validate_certs=True):
|
||||
return pritunl_auth_request(
|
||||
base_url=base_url,
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
method="GET",
|
||||
path="/organization",
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
|
||||
def _delete_pritunl_organization(
|
||||
api_token, api_secret, base_url, organization_id, validate_certs=True
|
||||
):
|
||||
return pritunl_auth_request(
|
||||
base_url=base_url,
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
method="DELETE",
|
||||
path="/organization/%s" % (organization_id),
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
|
||||
def _post_pritunl_organization(
|
||||
api_token, api_secret, base_url, organization_data, validate_certs=True
|
||||
):
|
||||
return pritunl_auth_request(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
method="POST",
|
||||
path="/organization/%s",
|
||||
headers={"Content-Type": "application/json"},
|
||||
data=json.dumps(organization_data),
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
|
||||
def _get_pritunl_users(
|
||||
api_token, api_secret, base_url, organization_id, validate_certs=True
|
||||
):
|
||||
return pritunl_auth_request(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
method="GET",
|
||||
path="/user/%s" % organization_id,
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
|
||||
def _delete_pritunl_user(
|
||||
api_token, api_secret, base_url, organization_id, user_id, validate_certs=True
|
||||
):
|
||||
return pritunl_auth_request(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
method="DELETE",
|
||||
path="/user/%s/%s" % (organization_id, user_id),
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
|
||||
def _post_pritunl_user(
|
||||
api_token, api_secret, base_url, organization_id, user_data, validate_certs=True
|
||||
):
|
||||
return pritunl_auth_request(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
method="POST",
|
||||
path="/user/%s" % organization_id,
|
||||
headers={"Content-Type": "application/json"},
|
||||
data=json.dumps(user_data),
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
|
||||
def _put_pritunl_user(
|
||||
api_token,
|
||||
api_secret,
|
||||
base_url,
|
||||
organization_id,
|
||||
user_id,
|
||||
user_data,
|
||||
validate_certs=True,
|
||||
):
|
||||
return pritunl_auth_request(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
method="PUT",
|
||||
path="/user/%s/%s" % (organization_id, user_id),
|
||||
headers={"Content-Type": "application/json"},
|
||||
data=json.dumps(user_data),
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
|
||||
def list_pritunl_organizations(
|
||||
api_token, api_secret, base_url, validate_certs=True, filters=None
|
||||
):
|
||||
orgs = []
|
||||
|
||||
response = _get_pritunl_organizations(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
raise PritunlException("Could not retrieve organizations from Pritunl")
|
||||
else:
|
||||
for org in json.loads(response.read()):
|
||||
# No filtering
|
||||
if filters is None:
|
||||
orgs.append(org)
|
||||
else:
|
||||
if not any(
|
||||
filter_val != org[filter_key]
|
||||
for filter_key, filter_val in iteritems(filters)
|
||||
):
|
||||
orgs.append(org)
|
||||
|
||||
return orgs
|
||||
|
||||
|
||||
def list_pritunl_users(
|
||||
api_token, api_secret, base_url, organization_id, validate_certs=True, filters=None
|
||||
):
|
||||
users = []
|
||||
|
||||
response = _get_pritunl_users(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
validate_certs=validate_certs,
|
||||
organization_id=organization_id,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
raise PritunlException("Could not retrieve users from Pritunl")
|
||||
else:
|
||||
for user in json.loads(response.read()):
|
||||
# No filtering
|
||||
if filters is None:
|
||||
users.append(user)
|
||||
|
||||
else:
|
||||
if not any(
|
||||
filter_val != user[filter_key]
|
||||
for filter_key, filter_val in iteritems(filters)
|
||||
):
|
||||
users.append(user)
|
||||
|
||||
return users
|
||||
|
||||
|
||||
def post_pritunl_organization(
|
||||
api_token,
|
||||
api_secret,
|
||||
base_url,
|
||||
organization_name,
|
||||
validate_certs=True,
|
||||
):
|
||||
response = _post_pritunl_organization(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
organization_data={"name": organization_name},
|
||||
validate_certs=True,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
raise PritunlException(
|
||||
"Could not add organization %s to Pritunl" % (organization_name)
|
||||
)
|
||||
# The user PUT request returns the updated user object
|
||||
return json.loads(response.read())
|
||||
|
||||
|
||||
def post_pritunl_user(
|
||||
api_token,
|
||||
api_secret,
|
||||
base_url,
|
||||
organization_id,
|
||||
user_data,
|
||||
user_id=None,
|
||||
validate_certs=True,
|
||||
):
|
||||
# If user_id is provided will do PUT otherwise will do POST
|
||||
if user_id is None:
|
||||
response = _post_pritunl_user(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
organization_id=organization_id,
|
||||
user_data=user_data,
|
||||
validate_certs=True,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
raise PritunlException(
|
||||
"Could not remove user %s from organization %s from Pritunl"
|
||||
% (user_id, organization_id)
|
||||
)
|
||||
# user POST request returns an array of a single item,
|
||||
# so return this item instead of the list
|
||||
return json.loads(response.read())[0]
|
||||
else:
|
||||
response = _put_pritunl_user(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
organization_id=organization_id,
|
||||
user_data=user_data,
|
||||
user_id=user_id,
|
||||
validate_certs=True,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
raise PritunlException(
|
||||
"Could not update user %s from organization %s from Pritunl"
|
||||
% (user_id, organization_id)
|
||||
)
|
||||
# The user PUT request returns the updated user object
|
||||
return json.loads(response.read())
|
||||
|
||||
|
||||
def delete_pritunl_organization(
|
||||
api_token, api_secret, base_url, organization_id, validate_certs=True
|
||||
):
|
||||
response = _delete_pritunl_organization(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
organization_id=organization_id,
|
||||
validate_certs=True,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
raise PritunlException(
|
||||
"Could not remove organization %s from Pritunl" % (organization_id)
|
||||
)
|
||||
|
||||
return json.loads(response.read())
|
||||
|
||||
|
||||
def delete_pritunl_user(
|
||||
api_token, api_secret, base_url, organization_id, user_id, validate_certs=True
|
||||
):
|
||||
response = _delete_pritunl_user(
|
||||
api_token=api_token,
|
||||
api_secret=api_secret,
|
||||
base_url=base_url,
|
||||
organization_id=organization_id,
|
||||
user_id=user_id,
|
||||
validate_certs=True,
|
||||
)
|
||||
|
||||
if response.getcode() != 200:
|
||||
raise PritunlException(
|
||||
"Could not remove user %s from organization %s from Pritunl"
|
||||
% (user_id, organization_id)
|
||||
)
|
||||
|
||||
return json.loads(response.read())
|
||||
|
||||
|
||||
def pritunl_auth_request(
|
||||
api_token,
|
||||
api_secret,
|
||||
base_url,
|
||||
method,
|
||||
path,
|
||||
validate_certs=True,
|
||||
headers=None,
|
||||
data=None,
|
||||
):
|
||||
"""
|
||||
Send an API call to a Pritunl server.
|
||||
Taken from https://pritunl.com/api and adaped work with Ansible open_url
|
||||
"""
|
||||
auth_timestamp = str(int(time.time()))
|
||||
auth_nonce = uuid.uuid4().hex
|
||||
|
||||
auth_string = "&".join(
|
||||
[api_token, auth_timestamp, auth_nonce, method.upper(), path]
|
||||
+ ([data] if data else [])
|
||||
)
|
||||
|
||||
auth_signature = base64.b64encode(
|
||||
hmac.new(
|
||||
api_secret.encode("utf-8"), auth_string.encode("utf-8"), hashlib.sha256
|
||||
).digest()
|
||||
)
|
||||
|
||||
auth_headers = {
|
||||
"Auth-Token": api_token,
|
||||
"Auth-Timestamp": auth_timestamp,
|
||||
"Auth-Nonce": auth_nonce,
|
||||
"Auth-Signature": auth_signature,
|
||||
}
|
||||
|
||||
if headers:
|
||||
auth_headers.update(headers)
|
||||
|
||||
try:
|
||||
uri = "%s%s" % (base_url, path)
|
||||
|
||||
return open_url(
|
||||
uri,
|
||||
method=method.upper(),
|
||||
headers=auth_headers,
|
||||
data=data,
|
||||
validate_certs=validate_certs,
|
||||
)
|
||||
except Exception as e:
|
||||
raise PritunlException(e)
|
||||
@@ -39,16 +39,14 @@ class OpenNebulaModule:
|
||||
wait_timeout=dict(type='int', default=300),
|
||||
)
|
||||
|
||||
def __init__(self, argument_spec, supports_check_mode=False, mutually_exclusive=None, required_one_of=None, required_if=None):
|
||||
def __init__(self, argument_spec, supports_check_mode=False, mutually_exclusive=None):
|
||||
|
||||
module_args = OpenNebulaModule.common_args.copy()
|
||||
module_args = OpenNebulaModule.common_args
|
||||
module_args.update(argument_spec)
|
||||
|
||||
self.module = AnsibleModule(argument_spec=module_args,
|
||||
supports_check_mode=supports_check_mode,
|
||||
mutually_exclusive=mutually_exclusive,
|
||||
required_one_of=required_one_of,
|
||||
required_if=required_if)
|
||||
mutually_exclusive=mutually_exclusive)
|
||||
self.result = dict(changed=False,
|
||||
original_message='',
|
||||
message='')
|
||||
|
||||
@@ -104,7 +104,7 @@ def get_common_arg_spec(supports_create=False, supports_wait=False):
|
||||
|
||||
if supports_create:
|
||||
common_args.update(
|
||||
key_by=dict(type="list", elements="str", no_log=False),
|
||||
key_by=dict(type="list", elements="str"),
|
||||
force_create=dict(type="bool", default=False),
|
||||
)
|
||||
|
||||
|
||||
@@ -19,10 +19,11 @@ PATCH_HEADERS = {'content-type': 'application/json', 'accept': 'application/json
|
||||
'OData-Version': '4.0'}
|
||||
DELETE_HEADERS = {'accept': 'application/json', 'OData-Version': '4.0'}
|
||||
|
||||
FAIL_MSG = 'Issuing a data modification command without specifying the '\
|
||||
'ID of the target %(resource)s resource when there is more '\
|
||||
'than one %(resource)s is no longer allowed. Use the `resource_id` '\
|
||||
'option to specify the target %(resource)s ID.'
|
||||
DEPRECATE_MSG = 'Issuing a data modification command without specifying the '\
|
||||
'ID of the target %(resource)s resource when there is more '\
|
||||
'than one %(resource)s will use the first one in the '\
|
||||
'collection. Use the `resource_id` option to specify the '\
|
||||
'target %(resource)s ID'
|
||||
|
||||
|
||||
class RedfishUtils(object):
|
||||
@@ -38,34 +39,13 @@ class RedfishUtils(object):
|
||||
self.data_modification = data_modification
|
||||
self._init_session()
|
||||
|
||||
def _auth_params(self, headers):
|
||||
"""
|
||||
Return tuple of required authentication params based on the presence
|
||||
of a token in the self.creds dict. If using a token, set the
|
||||
X-Auth-Token header in the `headers` param.
|
||||
|
||||
:param headers: dict containing headers to send in request
|
||||
:return: tuple of username, password and force_basic_auth
|
||||
"""
|
||||
if self.creds.get('token'):
|
||||
username = None
|
||||
password = None
|
||||
force_basic_auth = False
|
||||
headers['X-Auth-Token'] = self.creds['token']
|
||||
else:
|
||||
username = self.creds['user']
|
||||
password = self.creds['pswd']
|
||||
force_basic_auth = True
|
||||
return username, password, force_basic_auth
|
||||
|
||||
# The following functions are to send GET/POST/PATCH/DELETE requests
|
||||
def get_request(self, uri):
|
||||
req_headers = dict(GET_HEADERS)
|
||||
username, password, basic_auth = self._auth_params(req_headers)
|
||||
try:
|
||||
resp = open_url(uri, method="GET", headers=req_headers,
|
||||
url_username=username, url_password=password,
|
||||
force_basic_auth=basic_auth, validate_certs=False,
|
||||
resp = open_url(uri, method="GET", headers=GET_HEADERS,
|
||||
url_username=self.creds['user'],
|
||||
url_password=self.creds['pswd'],
|
||||
force_basic_auth=True, validate_certs=False,
|
||||
follow_redirects='all',
|
||||
use_proxy=True, timeout=self.timeout)
|
||||
data = json.loads(to_native(resp.read()))
|
||||
@@ -86,16 +66,14 @@ class RedfishUtils(object):
|
||||
return {'ret': True, 'data': data, 'headers': headers}
|
||||
|
||||
def post_request(self, uri, pyld):
|
||||
req_headers = dict(POST_HEADERS)
|
||||
username, password, basic_auth = self._auth_params(req_headers)
|
||||
try:
|
||||
resp = open_url(uri, data=json.dumps(pyld),
|
||||
headers=req_headers, method="POST",
|
||||
url_username=username, url_password=password,
|
||||
force_basic_auth=basic_auth, validate_certs=False,
|
||||
headers=POST_HEADERS, method="POST",
|
||||
url_username=self.creds['user'],
|
||||
url_password=self.creds['pswd'],
|
||||
force_basic_auth=True, validate_certs=False,
|
||||
follow_redirects='all',
|
||||
use_proxy=True, timeout=self.timeout)
|
||||
headers = dict((k.lower(), v) for (k, v) in resp.info().items())
|
||||
except HTTPError as e:
|
||||
msg = self._get_extended_message(e)
|
||||
return {'ret': False,
|
||||
@@ -109,10 +87,10 @@ class RedfishUtils(object):
|
||||
except Exception as e:
|
||||
return {'ret': False,
|
||||
'msg': "Failed POST request to '%s': '%s'" % (uri, to_text(e))}
|
||||
return {'ret': True, 'headers': headers, 'resp': resp}
|
||||
return {'ret': True, 'resp': resp}
|
||||
|
||||
def patch_request(self, uri, pyld):
|
||||
req_headers = dict(PATCH_HEADERS)
|
||||
headers = PATCH_HEADERS
|
||||
r = self.get_request(uri)
|
||||
if r['ret']:
|
||||
# Get etag from etag header or @odata.etag property
|
||||
@@ -120,13 +98,15 @@ class RedfishUtils(object):
|
||||
if not etag:
|
||||
etag = r['data'].get('@odata.etag')
|
||||
if etag:
|
||||
req_headers['If-Match'] = etag
|
||||
username, password, basic_auth = self._auth_params(req_headers)
|
||||
# Make copy of headers and add If-Match header
|
||||
headers = dict(headers)
|
||||
headers['If-Match'] = etag
|
||||
try:
|
||||
resp = open_url(uri, data=json.dumps(pyld),
|
||||
headers=req_headers, method="PATCH",
|
||||
url_username=username, url_password=password,
|
||||
force_basic_auth=basic_auth, validate_certs=False,
|
||||
headers=headers, method="PATCH",
|
||||
url_username=self.creds['user'],
|
||||
url_password=self.creds['pswd'],
|
||||
force_basic_auth=True, validate_certs=False,
|
||||
follow_redirects='all',
|
||||
use_proxy=True, timeout=self.timeout)
|
||||
except HTTPError as e:
|
||||
@@ -145,14 +125,13 @@ class RedfishUtils(object):
|
||||
return {'ret': True, 'resp': resp}
|
||||
|
||||
def delete_request(self, uri, pyld=None):
|
||||
req_headers = dict(DELETE_HEADERS)
|
||||
username, password, basic_auth = self._auth_params(req_headers)
|
||||
try:
|
||||
data = json.dumps(pyld) if pyld else None
|
||||
resp = open_url(uri, data=data,
|
||||
headers=req_headers, method="DELETE",
|
||||
url_username=username, url_password=password,
|
||||
force_basic_auth=basic_auth, validate_certs=False,
|
||||
headers=DELETE_HEADERS, method="DELETE",
|
||||
url_username=self.creds['user'],
|
||||
url_password=self.creds['pswd'],
|
||||
force_basic_auth=True, validate_certs=False,
|
||||
follow_redirects='all',
|
||||
use_proxy=True, timeout=self.timeout)
|
||||
except HTTPError as e:
|
||||
@@ -266,7 +245,8 @@ class RedfishUtils(object):
|
||||
'ret': False,
|
||||
'msg': "System resource %s not found" % self.resource_id}
|
||||
elif len(self.systems_uris) > 1:
|
||||
self.module.fail_json(msg=FAIL_MSG % {'resource': 'System'})
|
||||
self.module.deprecate(DEPRECATE_MSG % {'resource': 'System'},
|
||||
version='3.0.0', collection_name='community.general') # was Ansible 2.14
|
||||
return {'ret': True}
|
||||
|
||||
def _find_updateservice_resource(self):
|
||||
@@ -316,7 +296,8 @@ class RedfishUtils(object):
|
||||
'ret': False,
|
||||
'msg': "Chassis resource %s not found" % self.resource_id}
|
||||
elif len(self.chassis_uris) > 1:
|
||||
self.module.fail_json(msg=FAIL_MSG % {'resource': 'Chassis'})
|
||||
self.module.deprecate(DEPRECATE_MSG % {'resource': 'Chassis'},
|
||||
version='3.0.0', collection_name='community.general') # was Ansible 2.14
|
||||
return {'ret': True}
|
||||
|
||||
def _find_managers_resource(self):
|
||||
@@ -345,7 +326,8 @@ class RedfishUtils(object):
|
||||
'ret': False,
|
||||
'msg': "Manager resource %s not found" % self.resource_id}
|
||||
elif len(self.manager_uris) > 1:
|
||||
self.module.fail_json(msg=FAIL_MSG % {'resource': 'Manager'})
|
||||
self.module.deprecate(DEPRECATE_MSG % {'resource': 'Manager'},
|
||||
version='3.0.0', collection_name='community.general') # was Ansible 2.14
|
||||
return {'ret': True}
|
||||
|
||||
def _get_all_action_info_values(self, action):
|
||||
@@ -1214,54 +1196,6 @@ class RedfishUtils(object):
|
||||
|
||||
return {'ret': True, 'changed': True, 'msg': "Clear all sessions successfully"}
|
||||
|
||||
def create_session(self):
|
||||
if not self.creds.get('user') or not self.creds.get('pswd'):
|
||||
return {'ret': False, 'msg':
|
||||
'Must provide the username and password parameters for '
|
||||
'the CreateSession command'}
|
||||
|
||||
payload = {
|
||||
'UserName': self.creds['user'],
|
||||
'Password': self.creds['pswd']
|
||||
}
|
||||
response = self.post_request(self.root_uri + self.sessions_uri, payload)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
|
||||
headers = response['headers']
|
||||
if 'x-auth-token' not in headers:
|
||||
return {'ret': False, 'msg':
|
||||
'The service did not return the X-Auth-Token header in '
|
||||
'the response from the Sessions collection POST'}
|
||||
|
||||
if 'location' not in headers:
|
||||
self.module.warn(
|
||||
'The service did not return the Location header for the '
|
||||
'session URL in the response from the Sessions collection '
|
||||
'POST')
|
||||
session_uri = None
|
||||
else:
|
||||
session_uri = urlparse(headers.get('location')).path
|
||||
|
||||
session = dict()
|
||||
session['token'] = headers.get('x-auth-token')
|
||||
session['uri'] = session_uri
|
||||
return {'ret': True, 'changed': True, 'session': session,
|
||||
'msg': 'Session created successfully'}
|
||||
|
||||
def delete_session(self, session_uri):
|
||||
if not session_uri:
|
||||
return {'ret': False, 'msg':
|
||||
'Must provide the session_uri parameter for the '
|
||||
'DeleteSession command'}
|
||||
|
||||
response = self.delete_request(self.root_uri + session_uri)
|
||||
if response['ret'] is False:
|
||||
return response
|
||||
|
||||
return {'ret': True, 'changed': True,
|
||||
'msg': 'Session deleted successfully'}
|
||||
|
||||
def get_firmware_update_capabilities(self):
|
||||
result = {}
|
||||
response = self.get_request(self.root_uri + self.update_uri)
|
||||
@@ -2742,10 +2676,6 @@ class RedfishUtils(object):
|
||||
need_change = True
|
||||
# type is list
|
||||
if isinstance(set_value, list):
|
||||
if len(set_value) != len(cur_value):
|
||||
# if arrays are not the same len, no need to check each element
|
||||
need_change = True
|
||||
continue
|
||||
for i in range(len(set_value)):
|
||||
for subprop in payload[property][i].keys():
|
||||
if subprop not in target_ethernet_current_setting[property][i]:
|
||||
|
||||
@@ -0,0 +1,56 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
#
|
||||
# Dell EMC OpenManage Ansible Modules
|
||||
# Version 1.0
|
||||
# Copyright (C) 2018 Dell Inc.
|
||||
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# All rights reserved. Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
|
||||
# Other trademarks may be trademarks of their respective owners.
|
||||
#
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
try:
|
||||
from omsdk.sdkinfra import sdkinfra
|
||||
from omsdk.sdkcreds import UserCredentials
|
||||
from omsdk.sdkfile import FileOnShare, file_share_manager
|
||||
from omsdk.sdkprotopref import ProtoPreference, ProtocolEnum
|
||||
from omsdk.http.sdkwsmanbase import WsManOptions
|
||||
HAS_OMSDK = True
|
||||
except ImportError:
|
||||
HAS_OMSDK = False
|
||||
|
||||
|
||||
class iDRACConnection:
|
||||
|
||||
def __init__(self, module_params):
|
||||
if not HAS_OMSDK:
|
||||
raise ImportError("Dell EMC OMSDK library is required for this module")
|
||||
self.idrac_ip = module_params['idrac_ip']
|
||||
self.idrac_user = module_params['idrac_user']
|
||||
self.idrac_pwd = module_params['idrac_password']
|
||||
self.idrac_port = module_params['idrac_port']
|
||||
if not all((self.idrac_ip, self.idrac_user, self.idrac_pwd)):
|
||||
raise ValueError("hostname, username and password required")
|
||||
self.handle = None
|
||||
self.creds = UserCredentials(self.idrac_user, self.idrac_pwd)
|
||||
self.pOp = WsManOptions(port=self.idrac_port)
|
||||
self.sdk = sdkinfra()
|
||||
if self.sdk is None:
|
||||
msg = "Could not initialize iDRAC drivers."
|
||||
raise RuntimeError(msg)
|
||||
|
||||
def __enter__(self):
|
||||
self.sdk.importPath()
|
||||
self.handle = self.sdk.get_driver(self.sdk.driver_enum.iDRAC, self.idrac_ip, self.creds, pOptions=self.pOp)
|
||||
if self.handle is None:
|
||||
msg = "Could not find device driver for iDRAC with IP Address: {0}".format(self.idrac_ip)
|
||||
raise RuntimeError(msg)
|
||||
return self.handle
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.handle.disconnect()
|
||||
return False
|
||||
163
plugins/module_utils/remote_management/dellemc/ome.py
Normal file
163
plugins/module_utils/remote_management/dellemc/ome.py
Normal file
@@ -0,0 +1,163 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Dell EMC OpenManage Ansible Modules
|
||||
# Version 1.3
|
||||
# Copyright (C) 2019 Dell Inc. or its subsidiaries. All Rights Reserved.
|
||||
#
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
|
||||
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
||||
|
||||
SESSION_RESOURCE_COLLECTION = {
|
||||
"SESSION": "SessionService/Sessions",
|
||||
"SESSION_ID": "SessionService/Sessions('{Id}')",
|
||||
}
|
||||
|
||||
|
||||
class OpenURLResponse(object):
|
||||
"""Handles HTTPResponse"""
|
||||
|
||||
def __init__(self, resp):
|
||||
self.body = None
|
||||
self.resp = resp
|
||||
if self.resp:
|
||||
self.body = self.resp.read()
|
||||
|
||||
@property
|
||||
def json_data(self):
|
||||
try:
|
||||
return json.loads(self.body)
|
||||
except ValueError:
|
||||
raise ValueError("Unable to parse json")
|
||||
|
||||
@property
|
||||
def status_code(self):
|
||||
return self.resp.getcode()
|
||||
|
||||
@property
|
||||
def success(self):
|
||||
return self.status_code in (200, 201, 202, 204)
|
||||
|
||||
@property
|
||||
def token_header(self):
|
||||
return self.resp.headers.get('X-Auth-Token')
|
||||
|
||||
|
||||
class RestOME(object):
|
||||
"""Handles OME API requests"""
|
||||
|
||||
def __init__(self, module_params=None, req_session=False):
|
||||
self.module_params = module_params
|
||||
self.hostname = self.module_params["hostname"]
|
||||
self.username = self.module_params["username"]
|
||||
self.password = self.module_params["password"]
|
||||
self.port = self.module_params["port"]
|
||||
self.req_session = req_session
|
||||
self.session_id = None
|
||||
self.protocol = 'https'
|
||||
self._headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
|
||||
|
||||
def _get_base_url(self):
|
||||
"""builds base url"""
|
||||
return '{0}://{1}:{2}/api'.format(self.protocol, self.hostname, self.port)
|
||||
|
||||
def _build_url(self, path, query_param=None):
|
||||
"""builds complete url"""
|
||||
url = path
|
||||
base_uri = self._get_base_url()
|
||||
if path:
|
||||
url = '{0}/{1}'.format(base_uri, path)
|
||||
if query_param:
|
||||
url += "?{0}".format(urlencode(query_param))
|
||||
return url
|
||||
|
||||
def _url_common_args_spec(self, method, api_timeout, headers=None):
|
||||
"""Creates an argument common spec"""
|
||||
req_header = self._headers
|
||||
if headers:
|
||||
req_header.update(headers)
|
||||
url_kwargs = {
|
||||
"method": method,
|
||||
"validate_certs": False,
|
||||
"use_proxy": True,
|
||||
"headers": req_header,
|
||||
"timeout": api_timeout,
|
||||
"follow_redirects": 'all',
|
||||
}
|
||||
return url_kwargs
|
||||
|
||||
def _args_without_session(self, method, api_timeout=30, headers=None):
|
||||
"""Creates an argument spec in case of basic authentication"""
|
||||
req_header = self._headers
|
||||
if headers:
|
||||
req_header.update(headers)
|
||||
url_kwargs = self._url_common_args_spec(method, api_timeout, headers=headers)
|
||||
url_kwargs["url_username"] = self.username
|
||||
url_kwargs["url_password"] = self.password
|
||||
url_kwargs["force_basic_auth"] = True
|
||||
return url_kwargs
|
||||
|
||||
def _args_with_session(self, method, api_timeout=30, headers=None):
|
||||
"""Creates an argument spec, in case of authentication with session"""
|
||||
url_kwargs = self._url_common_args_spec(method, api_timeout, headers=headers)
|
||||
url_kwargs["force_basic_auth"] = False
|
||||
return url_kwargs
|
||||
|
||||
def invoke_request(self, method, path, data=None, query_param=None, headers=None,
|
||||
api_timeout=30, dump=True):
|
||||
"""
|
||||
Sends a request via open_url
|
||||
Returns :class:`OpenURLResponse` object.
|
||||
:arg method: HTTP verb to use for the request
|
||||
:arg path: path to request without query parameter
|
||||
:arg data: (optional) Payload to send with the request
|
||||
:arg query_param: (optional) Dictionary of query parameter to send with request
|
||||
:arg headers: (optional) Dictionary of HTTP Headers to send with the
|
||||
request
|
||||
:arg api_timeout: (optional) How long to wait for the server to send
|
||||
data before giving up
|
||||
:arg dump: (Optional) boolean value for dumping payload data.
|
||||
:returns: OpenURLResponse
|
||||
"""
|
||||
try:
|
||||
if 'X-Auth-Token' in self._headers:
|
||||
url_kwargs = self._args_with_session(method, api_timeout, headers=headers)
|
||||
else:
|
||||
url_kwargs = self._args_without_session(method, api_timeout, headers=headers)
|
||||
if data and dump:
|
||||
data = json.dumps(data)
|
||||
url = self._build_url(path, query_param=query_param)
|
||||
resp = open_url(url, data=data, **url_kwargs)
|
||||
resp_data = OpenURLResponse(resp)
|
||||
except (HTTPError, URLError, SSLValidationError, ConnectionError) as err:
|
||||
raise err
|
||||
return resp_data
|
||||
|
||||
def __enter__(self):
|
||||
"""Creates sessions by passing it to header"""
|
||||
if self.req_session:
|
||||
payload = {'UserName': self.username,
|
||||
'Password': self.password,
|
||||
'SessionType': 'API', }
|
||||
path = SESSION_RESOURCE_COLLECTION["SESSION"]
|
||||
resp = self.invoke_request('POST', path, data=payload)
|
||||
if resp and resp.success:
|
||||
self.session_id = resp.json_data.get("Id")
|
||||
self._headers["X-Auth-Token"] = resp.token_header
|
||||
else:
|
||||
msg = "Could not create the session"
|
||||
raise ConnectionError(msg)
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
"""Deletes a session id, which is in use for request"""
|
||||
if self.session_id:
|
||||
path = SESSION_RESOURCE_COLLECTION["SESSION_ID"].format(Id=self.session_id)
|
||||
self.invoke_request('DELETE', path)
|
||||
return False
|
||||
@@ -39,7 +39,7 @@ class ScalewayException(Exception):
|
||||
R_LINK_HEADER = r'''<[^>]+>;\srel="(first|previous|next|last)"
|
||||
(,<[^>]+>;\srel="(first|previous|next|last)")*'''
|
||||
# Specify a single relation, for iteration and string extraction purposes
|
||||
R_RELATION = r'</?(?P<target_IRI>[^>]+)>; rel="(?P<relation>first|previous|next|last)"'
|
||||
R_RELATION = r'<(?P<target_IRI>[^>]+)>; rel="(?P<relation>first|previous|next|last)"'
|
||||
|
||||
|
||||
def parse_pagination_link(header):
|
||||
|
||||
@@ -84,7 +84,7 @@ class UTM:
|
||||
raise UTMModuleConfigurationError(
|
||||
"The keys " + to_native(
|
||||
self.change_relevant_keys) + " to check are not in the modules keys:\n" + to_native(
|
||||
list(module.params.keys())))
|
||||
module.params.keys()))
|
||||
|
||||
def execute(self):
|
||||
try:
|
||||
|
||||
@@ -20,6 +20,7 @@ except ImportError:
|
||||
XENAPI_IMP_ERR = traceback.format_exc()
|
||||
|
||||
from ansible.module_utils.basic import env_fallback, missing_required_lib
|
||||
from ansible.module_utils.common.network import is_mac
|
||||
from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION
|
||||
|
||||
|
||||
|
||||
1
plugins/modules/ali_instance_facts.py
Symbolic link
1
plugins/modules/ali_instance_facts.py
Symbolic link
@@ -0,0 +1 @@
|
||||
cloud/alicloud/ali_instance_facts.py
|
||||
1
plugins/modules/cloud/alicloud/ali_instance_facts.py
Symbolic link
1
plugins/modules/cloud/alicloud/ali_instance_facts.py
Symbolic link
@@ -0,0 +1 @@
|
||||
ali_instance_info.py
|
||||
@@ -383,6 +383,9 @@ def main():
|
||||
)
|
||||
)
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
if module._name in ('ali_instance_facts', 'community.general.ali_instance_facts'):
|
||||
module.deprecate("The 'ali_instance_facts' module has been renamed to 'ali_instance_info'",
|
||||
version='3.0.0', collection_name='community.general') # was Ansible 2.13
|
||||
|
||||
if HAS_FOOTMARK is False:
|
||||
module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR)
|
||||
|
||||
@@ -102,8 +102,7 @@ def do_install(module, mode, rootfs, container, image, values_list, backend):
|
||||
system_list = ["--system"] if mode == 'system' else []
|
||||
user_list = ["--user"] if mode == 'user' else []
|
||||
rootfs_list = ["--rootfs=%s" % rootfs] if rootfs else []
|
||||
atomic_bin = module.get_bin_path('atomic')
|
||||
args = [atomic_bin, 'install', "--storage=%s" % backend, '--name=%s' % container] + system_list + user_list + rootfs_list + values_list + [image]
|
||||
args = ['atomic', 'install', "--storage=%s" % backend, '--name=%s' % container] + system_list + user_list + rootfs_list + values_list + [image]
|
||||
rc, out, err = module.run_command(args, check_rc=False)
|
||||
if rc != 0:
|
||||
module.fail_json(rc=rc, msg=err)
|
||||
@@ -113,8 +112,7 @@ def do_install(module, mode, rootfs, container, image, values_list, backend):
|
||||
|
||||
|
||||
def do_update(module, container, image, values_list):
|
||||
atomic_bin = module.get_bin_path('atomic')
|
||||
args = [atomic_bin, 'containers', 'update', "--rebase=%s" % image] + values_list + [container]
|
||||
args = ['atomic', 'containers', 'update', "--rebase=%s" % image] + values_list + [container]
|
||||
rc, out, err = module.run_command(args, check_rc=False)
|
||||
if rc != 0:
|
||||
module.fail_json(rc=rc, msg=err)
|
||||
@@ -124,8 +122,7 @@ def do_update(module, container, image, values_list):
|
||||
|
||||
|
||||
def do_uninstall(module, name, backend):
|
||||
atomic_bin = module.get_bin_path('atomic')
|
||||
args = [atomic_bin, 'uninstall', "--storage=%s" % backend, name]
|
||||
args = ['atomic', 'uninstall', "--storage=%s" % backend, name]
|
||||
rc, out, err = module.run_command(args, check_rc=False)
|
||||
if rc != 0:
|
||||
module.fail_json(rc=rc, msg=err)
|
||||
@@ -133,8 +130,7 @@ def do_uninstall(module, name, backend):
|
||||
|
||||
|
||||
def do_rollback(module, name):
|
||||
atomic_bin = module.get_bin_path('atomic')
|
||||
args = [atomic_bin, 'containers', 'rollback', name]
|
||||
args = ['atomic', 'containers', 'rollback', name]
|
||||
rc, out, err = module.run_command(args, check_rc=False)
|
||||
if rc != 0:
|
||||
module.fail_json(rc=rc, msg=err)
|
||||
@@ -152,12 +148,14 @@ def core(module):
|
||||
backend = module.params['backend']
|
||||
state = module.params['state']
|
||||
|
||||
atomic_bin = module.get_bin_path('atomic')
|
||||
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
|
||||
out = {}
|
||||
err = {}
|
||||
rc = 0
|
||||
|
||||
values_list = ["--set=%s" % x for x in values] if values else []
|
||||
|
||||
args = [atomic_bin, 'containers', 'list', '--no-trunc', '-n', '--all', '-f', 'backend=%s' % backend, '-f', 'container=%s' % name]
|
||||
args = ['atomic', 'containers', 'list', '--no-trunc', '-n', '--all', '-f', 'backend=%s' % backend, '-f', 'container=%s' % name]
|
||||
rc, out, err = module.run_command(args, check_rc=False)
|
||||
if rc != 0:
|
||||
module.fail_json(rc=rc, msg=err)
|
||||
@@ -196,7 +194,9 @@ def main():
|
||||
module.fail_json(msg="values is supported only with user or system mode")
|
||||
|
||||
# Verify that the platform supports atomic command
|
||||
dummy = module.get_bin_path('atomic', required=True)
|
||||
rc, out, err = module.run_command('atomic -v', check_rc=False)
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Error in running atomic command", err=err)
|
||||
|
||||
try:
|
||||
core(module)
|
||||
|
||||
@@ -57,14 +57,18 @@ from ansible.module_utils._text import to_native
|
||||
|
||||
def core(module):
|
||||
revision = module.params['revision']
|
||||
atomic_bin = module.get_bin_path('atomic', required=True)
|
||||
args = []
|
||||
|
||||
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
|
||||
|
||||
if revision == 'latest':
|
||||
args = [atomic_bin, 'host', 'upgrade']
|
||||
args = ['atomic', 'host', 'upgrade']
|
||||
else:
|
||||
args = [atomic_bin, 'host', 'deploy', revision]
|
||||
args = ['atomic', 'host', 'deploy', revision]
|
||||
|
||||
out = {}
|
||||
err = {}
|
||||
rc = 0
|
||||
|
||||
rc, out, err = module.run_command(args, check_rc=False)
|
||||
|
||||
|
||||
@@ -73,8 +73,7 @@ from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
def do_upgrade(module, image):
|
||||
atomic_bin = module.get_bin_path('atomic')
|
||||
args = [atomic_bin, 'update', '--force', image]
|
||||
args = ['atomic', 'update', '--force', image]
|
||||
rc, out, err = module.run_command(args, check_rc=False)
|
||||
if rc != 0: # something went wrong emit the msg
|
||||
module.fail_json(rc=rc, msg=err)
|
||||
@@ -92,21 +91,20 @@ def core(module):
|
||||
is_upgraded = False
|
||||
|
||||
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
|
||||
atomic_bin = module.get_bin_path('atomic')
|
||||
out = {}
|
||||
err = {}
|
||||
rc = 0
|
||||
|
||||
if backend:
|
||||
if state == 'present' or state == 'latest':
|
||||
args = [atomic_bin, 'pull', "--storage=%s" % backend, image]
|
||||
args = ['atomic', 'pull', "--storage=%s" % backend, image]
|
||||
rc, out, err = module.run_command(args, check_rc=False)
|
||||
if rc < 0:
|
||||
module.fail_json(rc=rc, msg=err)
|
||||
else:
|
||||
out_run = ""
|
||||
if started:
|
||||
args = [atomic_bin, 'run', "--storage=%s" % backend, image]
|
||||
args = ['atomic', 'run', "--storage=%s" % backend, image]
|
||||
rc, out_run, err = module.run_command(args, check_rc=False)
|
||||
if rc < 0:
|
||||
module.fail_json(rc=rc, msg=err)
|
||||
@@ -114,7 +112,7 @@ def core(module):
|
||||
changed = "Extracting" in out or "Copying blob" in out
|
||||
module.exit_json(msg=(out + out_run), changed=changed)
|
||||
elif state == 'absent':
|
||||
args = [atomic_bin, 'images', 'delete', "--storage=%s" % backend, image]
|
||||
args = ['atomic', 'images', 'delete', "--storage=%s" % backend, image]
|
||||
rc, out, err = module.run_command(args, check_rc=False)
|
||||
if rc < 0:
|
||||
module.fail_json(rc=rc, msg=err)
|
||||
@@ -128,11 +126,11 @@ def core(module):
|
||||
is_upgraded = do_upgrade(module, image)
|
||||
|
||||
if started:
|
||||
args = [atomic_bin, 'run', image]
|
||||
args = ['atomic', 'run', image]
|
||||
else:
|
||||
args = [atomic_bin, 'install', image]
|
||||
args = ['atomic', 'install', image]
|
||||
elif state == 'absent':
|
||||
args = [atomic_bin, 'uninstall', image]
|
||||
args = ['atomic', 'uninstall', image]
|
||||
|
||||
rc, out, err = module.run_command(args, check_rc=False)
|
||||
|
||||
@@ -157,7 +155,9 @@ def main():
|
||||
)
|
||||
|
||||
# Verify that the platform supports atomic command
|
||||
dummy = module.get_bin_path('atomic', required=True)
|
||||
rc, out, err = module.run_command('atomic -v', check_rc=False)
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Error in running atomic command", err=err)
|
||||
|
||||
try:
|
||||
core(module)
|
||||
|
||||
@@ -30,6 +30,10 @@ options:
|
||||
required: False
|
||||
default: present
|
||||
choices: ['present','absent']
|
||||
wait:
|
||||
description:
|
||||
- This option does nothing and will be removed in community.general 3.0.0.
|
||||
type: bool
|
||||
requirements:
|
||||
- python = 2.7
|
||||
- requests >= 2.5.0
|
||||
@@ -181,6 +185,7 @@ class ClcAntiAffinityPolicy:
|
||||
argument_spec = dict(
|
||||
name=dict(required=True),
|
||||
location=dict(required=True),
|
||||
wait=dict(type='bool', removed_in_version='3.0.0', removed_from_collection='community.general'), # was Ansible 2.14
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
)
|
||||
return argument_spec
|
||||
|
||||
@@ -260,7 +260,7 @@ class DimensionDataNetworkModule(DimensionDataModule):
|
||||
)
|
||||
|
||||
self.module.fail_json(
|
||||
"Unexpected failure deleting network with id %s" % network.id
|
||||
"Unexpected failure deleting network with id %s", network.id
|
||||
)
|
||||
|
||||
except DimensionDataAPIException as e:
|
||||
|
||||
@@ -26,7 +26,6 @@ options:
|
||||
- Heroku API key
|
||||
apps:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- List of Heroku App names
|
||||
required: true
|
||||
@@ -110,7 +109,7 @@ def main():
|
||||
argument_spec = HerokuHelper.heroku_argument_spec()
|
||||
argument_spec.update(
|
||||
user=dict(required=True, type='str'),
|
||||
apps=dict(required=True, type='list', elements='str'),
|
||||
apps=dict(required=True, type='list'),
|
||||
suppress_invitation=dict(default=False, type='bool'),
|
||||
state=dict(default='present', type='str', choices=['present', 'absent']),
|
||||
)
|
||||
|
||||
@@ -28,6 +28,7 @@ options:
|
||||
- The region of the instance. This is a required parameter only when
|
||||
creating Linode instances. See
|
||||
U(https://www.linode.com/docs/api/regions/).
|
||||
required: false
|
||||
type: str
|
||||
image:
|
||||
description:
|
||||
@@ -35,12 +36,14 @@ options:
|
||||
creating Linode instances. See
|
||||
U(https://www.linode.com/docs/api/images/).
|
||||
type: str
|
||||
required: false
|
||||
type:
|
||||
description:
|
||||
- The type of the instance. This is a required parameter only when
|
||||
creating Linode instances. See
|
||||
U(https://www.linode.com/docs/api/linode-types/).
|
||||
type: str
|
||||
required: false
|
||||
label:
|
||||
description:
|
||||
- The instance label. This label is used as the main determiner for
|
||||
@@ -53,30 +56,25 @@ options:
|
||||
group labelling is deprecated but still supported. The encouraged
|
||||
method for marking instances is to use tags.
|
||||
type: str
|
||||
private_ip:
|
||||
description:
|
||||
- If C(true), the created Linode will have private networking enabled and
|
||||
assigned a private IPv4 address.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 3.0.0
|
||||
required: false
|
||||
tags:
|
||||
description:
|
||||
- The tags that the instance should be marked under. See
|
||||
U(https://www.linode.com/docs/api/tags/).
|
||||
required: false
|
||||
type: list
|
||||
elements: str
|
||||
root_pass:
|
||||
description:
|
||||
- The password for the root user. If not specified, one will be
|
||||
generated. This generated password will be available in the task
|
||||
success JSON.
|
||||
required: false
|
||||
type: str
|
||||
authorized_keys:
|
||||
description:
|
||||
- A list of SSH public key parts to deploy for the root user.
|
||||
required: false
|
||||
type: list
|
||||
elements: str
|
||||
state:
|
||||
description:
|
||||
- The desired instance state.
|
||||
@@ -242,16 +240,15 @@ def initialise_module():
|
||||
no_log=True,
|
||||
fallback=(env_fallback, ['LINODE_ACCESS_TOKEN']),
|
||||
),
|
||||
authorized_keys=dict(type='list', elements='str', no_log=False),
|
||||
group=dict(type='str'),
|
||||
image=dict(type='str'),
|
||||
private_ip=dict(type='bool', default=False),
|
||||
region=dict(type='str'),
|
||||
root_pass=dict(type='str', no_log=True),
|
||||
tags=dict(type='list', elements='str'),
|
||||
type=dict(type='str'),
|
||||
stackscript_id=dict(type='int'),
|
||||
stackscript_data=dict(type='dict'),
|
||||
authorized_keys=dict(type='list', required=False),
|
||||
group=dict(type='str', required=False),
|
||||
image=dict(type='str', required=False),
|
||||
region=dict(type='str', required=False),
|
||||
root_pass=dict(type='str', required=False, no_log=True),
|
||||
tags=dict(type='list', required=False),
|
||||
type=dict(type='str', required=False),
|
||||
stackscript_id=dict(type='int', required=False),
|
||||
stackscript_data=dict(type='dict', required=False),
|
||||
),
|
||||
supports_check_mode=False,
|
||||
required_one_of=(
|
||||
@@ -291,7 +288,6 @@ def main():
|
||||
group=module.params['group'],
|
||||
image=module.params['image'],
|
||||
label=module.params['label'],
|
||||
private_ip=module.params['private_ip'],
|
||||
region=module.params['region'],
|
||||
root_pass=module.params['root_pass'],
|
||||
tags=module.params['tags'],
|
||||
|
||||
@@ -1662,7 +1662,7 @@ def main():
|
||||
),
|
||||
backing_store=dict(
|
||||
type='str',
|
||||
choices=list(LXC_BACKING_STORE.keys()),
|
||||
choices=LXC_BACKING_STORE.keys(),
|
||||
default='dir'
|
||||
),
|
||||
template_options=dict(
|
||||
@@ -1699,7 +1699,7 @@ def main():
|
||||
type='path'
|
||||
),
|
||||
state=dict(
|
||||
choices=list(LXC_ANSIBLE_STATES.keys()),
|
||||
choices=LXC_ANSIBLE_STATES.keys(),
|
||||
default='started'
|
||||
),
|
||||
container_command=dict(
|
||||
@@ -1733,7 +1733,7 @@ def main():
|
||||
type='path',
|
||||
),
|
||||
archive_compression=dict(
|
||||
choices=list(LXC_COMPRESSION_MAP.keys()),
|
||||
choices=LXC_COMPRESSION_MAP.keys(),
|
||||
default='gzip'
|
||||
)
|
||||
),
|
||||
|
||||
@@ -665,7 +665,7 @@ def main():
|
||||
type='dict',
|
||||
),
|
||||
state=dict(
|
||||
choices=list(LXD_ANSIBLE_STATES.keys()),
|
||||
choices=LXD_ANSIBLE_STATES.keys(),
|
||||
default='started'
|
||||
),
|
||||
target=dict(
|
||||
|
||||
1
plugins/modules/cloud/memset/memset_memstore_facts.py
Symbolic link
1
plugins/modules/cloud/memset/memset_memstore_facts.py
Symbolic link
@@ -0,0 +1 @@
|
||||
memset_memstore_info.py
|
||||
@@ -151,6 +151,9 @@ def main():
|
||||
),
|
||||
supports_check_mode=False
|
||||
)
|
||||
if module._name in ('memset_memstore_facts', 'community.general.memset_memstore_facts'):
|
||||
module.deprecate("The 'memset_memstore_facts' module has been renamed to 'memset_memstore_info'",
|
||||
version='3.0.0', collection_name='community.general') # was Ansible 2.13
|
||||
|
||||
# populate the dict with the user-provided vars.
|
||||
args = dict()
|
||||
|
||||
1
plugins/modules/cloud/memset/memset_server_facts.py
Symbolic link
1
plugins/modules/cloud/memset/memset_server_facts.py
Symbolic link
@@ -0,0 +1 @@
|
||||
memset_server_info.py
|
||||
@@ -276,6 +276,9 @@ def main():
|
||||
),
|
||||
supports_check_mode=False
|
||||
)
|
||||
if module._name in ('memset_server_facts', 'community.general.memset_server_facts'):
|
||||
module.deprecate("The 'memset_server_facts' module has been renamed to 'memset_server_info'",
|
||||
version='3.0.0', collection_name='community.general') # was Ansible 2.13
|
||||
|
||||
# populate the dict with the user-provided vars.
|
||||
args = dict()
|
||||
|
||||
216
plugins/modules/cloud/misc/helm.py
Normal file
216
plugins/modules/cloud/misc/helm.py
Normal file
@@ -0,0 +1,216 @@
|
||||
#!/usr/bin/python
|
||||
# (c) 2016, Flavio Percoco <flavio@redhat.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
deprecated:
|
||||
removed_in: 3.0.0 # was Ansible 2.14
|
||||
why: For more details https://github.com/ansible/ansible/issues/61546.
|
||||
alternative: Use M(community.kubernetes.helm) instead.
|
||||
module: helm
|
||||
short_description: Manages Kubernetes packages with the Helm package manager
|
||||
author: "Flavio Percoco (@flaper87)"
|
||||
description:
|
||||
- Install, upgrade, delete and list packages with the Helm package manager.
|
||||
requirements:
|
||||
- "pyhelm"
|
||||
- "grpcio"
|
||||
options:
|
||||
host:
|
||||
description:
|
||||
- Tiller's server host.
|
||||
type: str
|
||||
default: "localhost"
|
||||
port:
|
||||
description:
|
||||
- Tiller's server port.
|
||||
type: int
|
||||
default: 44134
|
||||
namespace:
|
||||
description:
|
||||
- Kubernetes namespace where the chart should be installed.
|
||||
type: str
|
||||
default: "default"
|
||||
name:
|
||||
description:
|
||||
- Release name to manage.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Whether to install C(present), remove C(absent), or purge C(purged) a package.
|
||||
choices: ['absent', 'purged', 'present']
|
||||
type: str
|
||||
default: "present"
|
||||
chart:
|
||||
description:
|
||||
- A map describing the chart to install. See examples for available options.
|
||||
type: dict
|
||||
default: {}
|
||||
values:
|
||||
description:
|
||||
- A map of value options for the chart.
|
||||
type: dict
|
||||
default: {}
|
||||
disable_hooks:
|
||||
description:
|
||||
- Whether to disable hooks during the uninstall process.
|
||||
type: bool
|
||||
default: 'no'
|
||||
'''
|
||||
|
||||
RETURN = ''' # '''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Install helm chart
|
||||
community.general.helm:
|
||||
host: localhost
|
||||
chart:
|
||||
name: memcached
|
||||
version: 0.4.0
|
||||
source:
|
||||
type: repo
|
||||
location: https://kubernetes-charts.storage.googleapis.com
|
||||
state: present
|
||||
name: my-memcached
|
||||
namespace: default
|
||||
|
||||
- name: Uninstall helm chart
|
||||
community.general.helm:
|
||||
host: localhost
|
||||
state: absent
|
||||
name: my-memcached
|
||||
|
||||
- name: Install helm chart from a git repo
|
||||
community.general.helm:
|
||||
host: localhost
|
||||
chart:
|
||||
source:
|
||||
type: git
|
||||
location: https://github.com/user/helm-chart.git
|
||||
state: present
|
||||
name: my-example
|
||||
namespace: default
|
||||
values:
|
||||
foo: "bar"
|
||||
|
||||
- name: Install helm chart from a git repo specifying path
|
||||
community.general.helm:
|
||||
host: localhost
|
||||
chart:
|
||||
source:
|
||||
type: git
|
||||
location: https://github.com/helm/charts.git
|
||||
path: stable/memcached
|
||||
state: present
|
||||
name: my-memcached
|
||||
namespace: default
|
||||
values: "{{ lookup('file', '/path/to/file/values.yaml') | from_yaml }}"
|
||||
'''
|
||||
|
||||
import traceback
|
||||
HELM_IMPORT_ERR = None
|
||||
try:
|
||||
import grpc
|
||||
from pyhelm import tiller
|
||||
from pyhelm import chartbuilder
|
||||
except ImportError:
|
||||
HELM_IMPORT_ERR = traceback.format_exc()
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
|
||||
|
||||
def install(module, tserver):
|
||||
changed = False
|
||||
params = module.params
|
||||
name = params['name']
|
||||
values = params['values']
|
||||
chart = module.params['chart']
|
||||
namespace = module.params['namespace']
|
||||
|
||||
chartb = chartbuilder.ChartBuilder(chart)
|
||||
r_matches = (x for x in tserver.list_releases()
|
||||
if x.name == name and x.namespace == namespace)
|
||||
installed_release = next(r_matches, None)
|
||||
if installed_release:
|
||||
if installed_release.chart.metadata.version != chart['version']:
|
||||
tserver.update_release(chartb.get_helm_chart(), False,
|
||||
namespace, name=name, values=values)
|
||||
changed = True
|
||||
else:
|
||||
tserver.install_release(chartb.get_helm_chart(), namespace,
|
||||
dry_run=False, name=name,
|
||||
values=values)
|
||||
changed = True
|
||||
|
||||
return dict(changed=changed)
|
||||
|
||||
|
||||
def delete(module, tserver, purge=False):
|
||||
changed = False
|
||||
params = module.params
|
||||
|
||||
if not module.params['name']:
|
||||
module.fail_json(msg='Missing required field name')
|
||||
|
||||
name = module.params['name']
|
||||
disable_hooks = params['disable_hooks']
|
||||
|
||||
try:
|
||||
tserver.uninstall_release(name, disable_hooks, purge)
|
||||
changed = True
|
||||
except grpc._channel._Rendezvous as exc:
|
||||
if 'not found' not in str(exc):
|
||||
raise exc
|
||||
|
||||
return dict(changed=changed)
|
||||
|
||||
|
||||
def main():
|
||||
"""The main function."""
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
host=dict(type='str', default='localhost'),
|
||||
port=dict(type='int', default=44134),
|
||||
name=dict(type='str', default=''),
|
||||
chart=dict(type='dict'),
|
||||
state=dict(
|
||||
choices=['absent', 'purged', 'present'],
|
||||
default='present'
|
||||
),
|
||||
# Install options
|
||||
values=dict(type='dict'),
|
||||
namespace=dict(type='str', default='default'),
|
||||
|
||||
# Uninstall options
|
||||
disable_hooks=dict(type='bool', default=False),
|
||||
),
|
||||
supports_check_mode=True)
|
||||
|
||||
if HELM_IMPORT_ERR:
|
||||
module.fail_json(msg=missing_required_lib('pyhelm'), exception=HELM_IMPORT_ERR)
|
||||
|
||||
host = module.params['host']
|
||||
port = module.params['port']
|
||||
state = module.params['state']
|
||||
tserver = tiller.Tiller(host, port)
|
||||
|
||||
if state == 'present':
|
||||
rst = install(module, tserver)
|
||||
|
||||
if state in 'absent':
|
||||
rst = delete(module, tserver)
|
||||
|
||||
if state in 'purged':
|
||||
rst = delete(module, tserver, True)
|
||||
|
||||
module.exit_json(**rst)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
503
plugins/modules/cloud/misc/ovirt.py
Normal file
503
plugins/modules/cloud/misc/ovirt.py
Normal file
@@ -0,0 +1,503 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright: (c) 2013, Vincent Van der Kussen <vincent at vanderkussen.org>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ovirt
|
||||
author:
|
||||
- Vincent Van der Kussen (@vincentvdk)
|
||||
short_description: oVirt/RHEV platform management
|
||||
deprecated:
|
||||
removed_in: 3.0.0 # was Ansible 2.14
|
||||
why: This module is for deprecated version of ovirt.
|
||||
alternative: Use C(ovirt_vm) from the C(ovirt.ovirt) collection instead
|
||||
description:
|
||||
- This module only supports oVirt/RHEV version 3. A newer module M(ovirt.ovirt.ovirt_vm) supports oVirt/RHV version 4.
|
||||
- Allows you to create new instances, either from scratch or an image, in addition to deleting or stopping instances on the oVirt/RHEV platform.
|
||||
options:
|
||||
user:
|
||||
description:
|
||||
- The user to authenticate with.
|
||||
type: str
|
||||
required: true
|
||||
url:
|
||||
description:
|
||||
- The url of the oVirt instance.
|
||||
type: str
|
||||
required: true
|
||||
instance_name:
|
||||
description:
|
||||
- The name of the instance to use.
|
||||
type: str
|
||||
required: true
|
||||
aliases: [ vmname ]
|
||||
password:
|
||||
description:
|
||||
- Password of the user to authenticate with.
|
||||
type: str
|
||||
required: true
|
||||
image:
|
||||
description:
|
||||
- The template to use for the instance.
|
||||
type: str
|
||||
resource_type:
|
||||
description:
|
||||
- Whether you want to deploy an image or create an instance from scratch.
|
||||
type: str
|
||||
choices: [ new, template ]
|
||||
zone:
|
||||
description:
|
||||
- Deploy the image to this oVirt cluster.
|
||||
type: str
|
||||
instance_disksize:
|
||||
description:
|
||||
- Size of the instance's disk in GB.
|
||||
type: str
|
||||
aliases: [ vm_disksize]
|
||||
instance_cpus:
|
||||
description:
|
||||
- The instance's number of CPUs.
|
||||
type: str
|
||||
default: 1
|
||||
aliases: [ vmcpus ]
|
||||
instance_nic:
|
||||
description:
|
||||
- The name of the network interface in oVirt/RHEV.
|
||||
type: str
|
||||
aliases: [ vmnic ]
|
||||
instance_network:
|
||||
description:
|
||||
- The logical network the machine should belong to.
|
||||
type: str
|
||||
default: rhevm
|
||||
aliases: [ vmnetwork ]
|
||||
instance_mem:
|
||||
description:
|
||||
- The instance's amount of memory in MB.
|
||||
type: str
|
||||
aliases: [ vmmem ]
|
||||
instance_type:
|
||||
description:
|
||||
- Define whether the instance is a server, desktop or high_performance.
|
||||
- I(high_performance) is supported since Ansible 2.5 and oVirt/RHV 4.2.
|
||||
type: str
|
||||
choices: [ desktop, server, high_performance ]
|
||||
default: server
|
||||
aliases: [ vmtype ]
|
||||
disk_alloc:
|
||||
description:
|
||||
- Define whether disk is thin or preallocated.
|
||||
type: str
|
||||
choices: [ preallocated, thin ]
|
||||
default: thin
|
||||
disk_int:
|
||||
description:
|
||||
- Interface type of the disk.
|
||||
type: str
|
||||
choices: [ ide, virtio ]
|
||||
default: virtio
|
||||
instance_os:
|
||||
description:
|
||||
- Type of Operating System.
|
||||
type: str
|
||||
aliases: [ vmos ]
|
||||
instance_cores:
|
||||
description:
|
||||
- Define the instance's number of cores.
|
||||
type: str
|
||||
default: 1
|
||||
aliases: [ vmcores ]
|
||||
sdomain:
|
||||
description:
|
||||
- The Storage Domain where you want to create the instance's disk on.
|
||||
type: str
|
||||
region:
|
||||
description:
|
||||
- The oVirt/RHEV datacenter where you want to deploy to.
|
||||
type: str
|
||||
instance_dns:
|
||||
description:
|
||||
- Define the instance's Primary DNS server.
|
||||
type: str
|
||||
aliases: [ dns ]
|
||||
instance_domain:
|
||||
description:
|
||||
- Define the instance's Domain.
|
||||
type: str
|
||||
aliases: [ domain ]
|
||||
instance_hostname:
|
||||
description:
|
||||
- Define the instance's Hostname.
|
||||
type: str
|
||||
aliases: [ hostname ]
|
||||
instance_ip:
|
||||
description:
|
||||
- Define the instance's IP.
|
||||
type: str
|
||||
aliases: [ ip ]
|
||||
instance_netmask:
|
||||
description:
|
||||
- Define the instance's Netmask.
|
||||
type: str
|
||||
aliases: [ netmask ]
|
||||
instance_gateway:
|
||||
description:
|
||||
- Define the instance's Gateway.
|
||||
type: str
|
||||
aliases: [ gateway ]
|
||||
instance_rootpw:
|
||||
description:
|
||||
- Define the instance's Root password.
|
||||
type: str
|
||||
aliases: [ rootpw ]
|
||||
instance_key:
|
||||
description:
|
||||
- Define the instance's Authorized key.
|
||||
type: str
|
||||
aliases: [ key ]
|
||||
state:
|
||||
description:
|
||||
- Create, terminate or remove instances.
|
||||
type: str
|
||||
choices: [ absent, present, restart, shutdown, started ]
|
||||
default: present
|
||||
requirements:
|
||||
- ovirt-engine-sdk-python
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Basic example to provision from image
|
||||
community.general.ovirt:
|
||||
user: admin@internal
|
||||
url: https://ovirt.example.com
|
||||
instance_name: ansiblevm04
|
||||
password: secret
|
||||
image: centos_64
|
||||
zone: cluster01
|
||||
resource_type: template
|
||||
|
||||
- name: Full example to create new instance from scratch
|
||||
community.general.ovirt:
|
||||
instance_name: testansible
|
||||
resource_type: new
|
||||
instance_type: server
|
||||
user: admin@internal
|
||||
password: secret
|
||||
url: https://ovirt.example.com
|
||||
instance_disksize: 10
|
||||
zone: cluster01
|
||||
region: datacenter1
|
||||
instance_cpus: 1
|
||||
instance_nic: nic1
|
||||
instance_network: rhevm
|
||||
instance_mem: 1000
|
||||
disk_alloc: thin
|
||||
sdomain: FIBER01
|
||||
instance_cores: 1
|
||||
instance_os: rhel_6x64
|
||||
disk_int: virtio
|
||||
|
||||
- name: Stopping an existing instance
|
||||
community.general.ovirt:
|
||||
instance_name: testansible
|
||||
state: stopped
|
||||
user: admin@internal
|
||||
password: secret
|
||||
url: https://ovirt.example.com
|
||||
|
||||
- name: Start an existing instance
|
||||
community.general.ovirt:
|
||||
instance_name: testansible
|
||||
state: started
|
||||
user: admin@internal
|
||||
password: secret
|
||||
url: https://ovirt.example.com
|
||||
|
||||
- name: Start an instance with cloud init information
|
||||
community.general.ovirt:
|
||||
instance_name: testansible
|
||||
state: started
|
||||
user: admin@internal
|
||||
password: secret
|
||||
url: https://ovirt.example.com
|
||||
hostname: testansible
|
||||
domain: ansible.local
|
||||
ip: 192.0.2.100
|
||||
netmask: 255.255.255.0
|
||||
gateway: 192.0.2.1
|
||||
rootpw: bigsecret
|
||||
'''
|
||||
|
||||
import time
|
||||
|
||||
try:
|
||||
from ovirtsdk.api import API
|
||||
from ovirtsdk.xml import params
|
||||
HAS_OVIRTSDK = True
|
||||
except ImportError:
|
||||
HAS_OVIRTSDK = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
|
||||
|
||||
# ------------------------------------------------------------------- #
|
||||
# create connection with API
|
||||
#
|
||||
def conn(url, user, password):
|
||||
api = API(url=url, username=user, password=password, insecure=True)
|
||||
try:
|
||||
value = api.test()
|
||||
except Exception:
|
||||
raise Exception("error connecting to the oVirt API")
|
||||
return api
|
||||
|
||||
|
||||
# ------------------------------------------------------------------- #
|
||||
# Create VM from scratch
|
||||
def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int):
|
||||
if vmdisk_alloc == 'thin':
|
||||
# define VM params
|
||||
vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), os=params.OperatingSystem(type_=vmos),
|
||||
template=conn.templates.get(name="Blank"), memory=1024 * 1024 * int(vmmem),
|
||||
cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores), sockets=vmcpus)), type_=vmtype)
|
||||
# define disk params
|
||||
vmdisk = params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=True, interface=vmdisk_int, type_="System",
|
||||
format='cow',
|
||||
storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
|
||||
# define network parameters
|
||||
network_net = params.Network(name=vmnetwork)
|
||||
nic_net1 = params.NIC(name='nic1', network=network_net, interface='virtio')
|
||||
elif vmdisk_alloc == 'preallocated':
|
||||
# define VM params
|
||||
vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), os=params.OperatingSystem(type_=vmos),
|
||||
template=conn.templates.get(name="Blank"), memory=1024 * 1024 * int(vmmem),
|
||||
cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores), sockets=vmcpus)), type_=vmtype)
|
||||
# define disk params
|
||||
vmdisk = params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=False, interface=vmdisk_int, type_="System",
|
||||
format='raw', storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
|
||||
# define network parameters
|
||||
network_net = params.Network(name=vmnetwork)
|
||||
nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio')
|
||||
|
||||
try:
|
||||
conn.vms.add(vmparams)
|
||||
except Exception:
|
||||
raise Exception("Error creating VM with specified parameters")
|
||||
vm = conn.vms.get(name=vmname)
|
||||
try:
|
||||
vm.disks.add(vmdisk)
|
||||
except Exception:
|
||||
raise Exception("Error attaching disk")
|
||||
try:
|
||||
vm.nics.add(nic_net1)
|
||||
except Exception:
|
||||
raise Exception("Error adding nic")
|
||||
|
||||
|
||||
# create an instance from a template
|
||||
def create_vm_template(conn, vmname, image, zone):
|
||||
vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), template=conn.templates.get(name=image), disks=params.Disks(clone=True))
|
||||
try:
|
||||
conn.vms.add(vmparams)
|
||||
except Exception:
|
||||
raise Exception('error adding template %s' % image)
|
||||
|
||||
|
||||
# start instance
|
||||
def vm_start(conn, vmname, hostname=None, ip=None, netmask=None, gateway=None,
|
||||
domain=None, dns=None, rootpw=None, key=None):
|
||||
vm = conn.vms.get(name=vmname)
|
||||
use_cloud_init = False
|
||||
nics = None
|
||||
nic = None
|
||||
if hostname or ip or netmask or gateway or domain or dns or rootpw or key:
|
||||
use_cloud_init = True
|
||||
if ip and netmask and gateway:
|
||||
ipinfo = params.IP(address=ip, netmask=netmask, gateway=gateway)
|
||||
nic = params.GuestNicConfiguration(name='eth0', boot_protocol='STATIC', ip=ipinfo, on_boot=True)
|
||||
nics = params.Nics()
|
||||
nics = params.GuestNicsConfiguration(nic_configuration=[nic])
|
||||
initialization = params.Initialization(regenerate_ssh_keys=True, host_name=hostname, domain=domain, user_name='root',
|
||||
root_password=rootpw, nic_configurations=nics, dns_servers=dns,
|
||||
authorized_ssh_keys=key)
|
||||
action = params.Action(use_cloud_init=use_cloud_init, vm=params.VM(initialization=initialization))
|
||||
vm.start(action=action)
|
||||
|
||||
|
||||
# Stop instance
|
||||
def vm_stop(conn, vmname):
|
||||
vm = conn.vms.get(name=vmname)
|
||||
vm.stop()
|
||||
|
||||
|
||||
# restart instance
|
||||
def vm_restart(conn, vmname):
|
||||
state = vm_status(conn, vmname)
|
||||
vm = conn.vms.get(name=vmname)
|
||||
vm.stop()
|
||||
while conn.vms.get(vmname).get_status().get_state() != 'down':
|
||||
time.sleep(5)
|
||||
vm.start()
|
||||
|
||||
|
||||
# remove an instance
|
||||
def vm_remove(conn, vmname):
|
||||
vm = conn.vms.get(name=vmname)
|
||||
vm.delete()
|
||||
|
||||
|
||||
# ------------------------------------------------------------------- #
|
||||
# VM statuses
|
||||
#
|
||||
# Get the VMs status
|
||||
def vm_status(conn, vmname):
|
||||
status = conn.vms.get(name=vmname).status.state
|
||||
return status
|
||||
|
||||
|
||||
# Get VM object and return it's name if object exists
|
||||
def get_vm(conn, vmname):
|
||||
vm = conn.vms.get(name=vmname)
|
||||
if vm is None:
|
||||
name = "empty"
|
||||
else:
|
||||
name = vm.get_name()
|
||||
return name
|
||||
|
||||
# ------------------------------------------------------------------- #
|
||||
# Hypervisor operations
|
||||
#
|
||||
# not available yet
|
||||
# ------------------------------------------------------------------- #
|
||||
# Main
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
state=dict(type='str', default='present', choices=['absent', 'present', 'restart', 'shutdown', 'started']),
|
||||
user=dict(type='str', required=True),
|
||||
url=dict(type='str', required=True),
|
||||
instance_name=dict(type='str', required=True, aliases=['vmname']),
|
||||
password=dict(type='str', required=True, no_log=True),
|
||||
image=dict(type='str'),
|
||||
resource_type=dict(type='str', choices=['new', 'template']),
|
||||
zone=dict(type='str'),
|
||||
instance_disksize=dict(type='str', aliases=['vm_disksize']),
|
||||
instance_cpus=dict(type='str', default=1, aliases=['vmcpus']),
|
||||
instance_nic=dict(type='str', aliases=['vmnic']),
|
||||
instance_network=dict(type='str', default='rhevm', aliases=['vmnetwork']),
|
||||
instance_mem=dict(type='str', aliases=['vmmem']),
|
||||
instance_type=dict(type='str', default='server', aliases=['vmtype'], choices=['desktop', 'server', 'high_performance']),
|
||||
disk_alloc=dict(type='str', default='thin', choices=['preallocated', 'thin']),
|
||||
disk_int=dict(type='str', default='virtio', choices=['ide', 'virtio']),
|
||||
instance_os=dict(type='str', aliases=['vmos']),
|
||||
instance_cores=dict(type='str', default=1, aliases=['vmcores']),
|
||||
instance_hostname=dict(type='str', aliases=['hostname']),
|
||||
instance_ip=dict(type='str', aliases=['ip']),
|
||||
instance_netmask=dict(type='str', aliases=['netmask']),
|
||||
instance_gateway=dict(type='str', aliases=['gateway']),
|
||||
instance_domain=dict(type='str', aliases=['domain']),
|
||||
instance_dns=dict(type='str', aliases=['dns']),
|
||||
instance_rootpw=dict(type='str', aliases=['rootpw'], no_log=True),
|
||||
instance_key=dict(type='str', aliases=['key'], no_log=True),
|
||||
sdomain=dict(type='str'),
|
||||
region=dict(type='str'),
|
||||
),
|
||||
)
|
||||
|
||||
if not HAS_OVIRTSDK:
|
||||
module.fail_json(msg='ovirtsdk required for this module')
|
||||
|
||||
state = module.params['state']
|
||||
user = module.params['user']
|
||||
url = module.params['url']
|
||||
vmname = module.params['instance_name']
|
||||
password = module.params['password']
|
||||
image = module.params['image'] # name of the image to deploy
|
||||
resource_type = module.params['resource_type'] # template or from scratch
|
||||
zone = module.params['zone'] # oVirt cluster
|
||||
vmdisk_size = module.params['instance_disksize'] # disksize
|
||||
vmcpus = module.params['instance_cpus'] # number of cpu
|
||||
vmnic = module.params['instance_nic'] # network interface
|
||||
vmnetwork = module.params['instance_network'] # logical network
|
||||
vmmem = module.params['instance_mem'] # mem size
|
||||
vmdisk_alloc = module.params['disk_alloc'] # thin, preallocated
|
||||
vmdisk_int = module.params['disk_int'] # disk interface virtio or ide
|
||||
vmos = module.params['instance_os'] # Operating System
|
||||
vmtype = module.params['instance_type'] # server, desktop or high_performance
|
||||
vmcores = module.params['instance_cores'] # number of cores
|
||||
sdomain = module.params['sdomain'] # storage domain to store disk on
|
||||
region = module.params['region'] # oVirt Datacenter
|
||||
hostname = module.params['instance_hostname']
|
||||
ip = module.params['instance_ip']
|
||||
netmask = module.params['instance_netmask']
|
||||
gateway = module.params['instance_gateway']
|
||||
domain = module.params['instance_domain']
|
||||
dns = module.params['instance_dns']
|
||||
rootpw = module.params['instance_rootpw']
|
||||
key = module.params['instance_key']
|
||||
# initialize connection
|
||||
try:
|
||||
c = conn(url + "/api", user, password)
|
||||
except Exception as e:
|
||||
module.fail_json(msg='%s' % e)
|
||||
|
||||
if state == 'present':
|
||||
if get_vm(c, vmname) == "empty":
|
||||
if resource_type == 'template':
|
||||
try:
|
||||
create_vm_template(c, vmname, image, zone)
|
||||
except Exception as e:
|
||||
module.fail_json(msg='%s' % e)
|
||||
module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmname, image))
|
||||
elif resource_type == 'new':
|
||||
# FIXME: refactor, use keyword args.
|
||||
try:
|
||||
create_vm(c, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int)
|
||||
except Exception as e:
|
||||
module.fail_json(msg='%s' % e)
|
||||
module.exit_json(changed=True, msg="deployed VM %s from scratch" % vmname)
|
||||
else:
|
||||
module.exit_json(changed=False, msg="You did not specify a resource type")
|
||||
else:
|
||||
module.exit_json(changed=False, msg="VM %s already exists" % vmname)
|
||||
|
||||
if state == 'started':
|
||||
if vm_status(c, vmname) == 'up':
|
||||
module.exit_json(changed=False, msg="VM %s is already running" % vmname)
|
||||
else:
|
||||
# vm_start(c, vmname)
|
||||
vm_start(c, vmname, hostname, ip, netmask, gateway, domain, dns, rootpw, key)
|
||||
module.exit_json(changed=True, msg="VM %s started" % vmname)
|
||||
|
||||
if state == 'shutdown':
|
||||
if vm_status(c, vmname) == 'down':
|
||||
module.exit_json(changed=False, msg="VM %s is already shutdown" % vmname)
|
||||
else:
|
||||
vm_stop(c, vmname)
|
||||
module.exit_json(changed=True, msg="VM %s is shutting down" % vmname)
|
||||
|
||||
if state == 'restart':
|
||||
if vm_status(c, vmname) == 'up':
|
||||
vm_restart(c, vmname)
|
||||
module.exit_json(changed=True, msg="VM %s is restarted" % vmname)
|
||||
else:
|
||||
module.exit_json(changed=False, msg="VM %s is not running" % vmname)
|
||||
|
||||
if state == 'absent':
|
||||
if get_vm(c, vmname) == "empty":
|
||||
module.exit_json(changed=False, msg="VM %s does not exist" % vmname)
|
||||
else:
|
||||
vm_remove(c, vmname)
|
||||
module.exit_json(changed=True, msg="VM %s removed" % vmname)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -17,6 +17,7 @@ options:
|
||||
password:
|
||||
description:
|
||||
- the instance root password
|
||||
- required only for C(state=present)
|
||||
type: str
|
||||
hostname:
|
||||
description:
|
||||
@@ -123,15 +124,6 @@ options:
|
||||
- with states C(stopped) , C(restarted) allow to force stop instance
|
||||
type: bool
|
||||
default: 'no'
|
||||
purge:
|
||||
description:
|
||||
- Remove container from all related configurations.
|
||||
- For example backup jobs, replication jobs, or HA.
|
||||
- Related ACLs and Firewall entries will always be removed.
|
||||
- Used with state C(absent).
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 2.3.0
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the instance
|
||||
@@ -515,7 +507,6 @@ def main():
|
||||
searchdomain=dict(),
|
||||
timeout=dict(type='int', default=30),
|
||||
force=dict(type='bool', default=False),
|
||||
purge=dict(type='bool', default=False),
|
||||
state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']),
|
||||
pubkey=dict(type='str', default=None),
|
||||
unprivileged=dict(type='bool', default=False),
|
||||
@@ -523,7 +514,7 @@ def main():
|
||||
hookscript=dict(type='str'),
|
||||
proxmox_default_behavior=dict(type='str', choices=['compatibility', 'no_defaults']),
|
||||
),
|
||||
required_if=[('state', 'present', ['node', 'hostname', 'ostemplate'])],
|
||||
required_if=[('state', 'present', ['node', 'hostname', 'password', 'ostemplate'])],
|
||||
required_together=[('api_token_id', 'api_token_secret')],
|
||||
required_one_of=[('api_password', 'api_token_id')],
|
||||
)
|
||||
@@ -696,13 +687,7 @@ def main():
|
||||
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
|
||||
module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid)
|
||||
|
||||
delete_params = {}
|
||||
|
||||
if module.params['purge']:
|
||||
delete_params['purge'] = 1
|
||||
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid, **delete_params)
|
||||
|
||||
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid)
|
||||
while timeout:
|
||||
if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
|
||||
proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: Tristan Le Guern (@tleguern) <tleguern at bouledef.eu>
|
||||
# Copyright: Tristan Le Guern (@Aversiste) <tleguern at bouledef.eu>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
@@ -21,7 +21,7 @@ options:
|
||||
- Restrict results to a specific authentication realm.
|
||||
aliases: ['realm', 'name']
|
||||
type: str
|
||||
author: Tristan Le Guern (@tleguern)
|
||||
author: Tristan Le Guern (@Aversiste)
|
||||
extends_documentation_fragment: community.general.proxmox.documentation
|
||||
'''
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ options:
|
||||
- Restrict results to a specific group.
|
||||
aliases: ['groupid', 'name']
|
||||
type: str
|
||||
author: Tristan Le Guern (@tleguern)
|
||||
author: Tristan Le Guern (@Aversiste)
|
||||
extends_documentation_fragment: community.general.proxmox.documentation
|
||||
'''
|
||||
|
||||
|
||||
@@ -425,14 +425,6 @@ options:
|
||||
option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
|
||||
changes in community.general 4.0.0.
|
||||
type: bool
|
||||
tags:
|
||||
description:
|
||||
- List of tags to apply to the VM instance.
|
||||
- Tags must start with C([a-z0-9_]) followed by zero or more of the following characters C([a-z0-9_-+.]).
|
||||
- Tags are only available in Proxmox 6+.
|
||||
type: list
|
||||
elements: str
|
||||
version_added: 2.3.0
|
||||
target:
|
||||
description:
|
||||
- Target node. Only allowed if the original VM is on shared storage.
|
||||
@@ -742,20 +734,20 @@ EXAMPLES = '''
|
||||
|
||||
RETURN = '''
|
||||
vmid:
|
||||
description: The VM vmid.
|
||||
returned: success
|
||||
type: int
|
||||
sample: 115
|
||||
description: The VM vmid.
|
||||
returned: success
|
||||
type: int
|
||||
sample: 115
|
||||
status:
|
||||
description: The current virtual machine status.
|
||||
returned: success, not clone, not absent, not update
|
||||
type: str
|
||||
sample: running
|
||||
msg:
|
||||
description: A short message
|
||||
returned: always
|
||||
type: str
|
||||
sample: "VM kropta with vmid = 110 is running"
|
||||
description:
|
||||
- The current virtual machine status.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: '{
|
||||
"changed": false,
|
||||
"msg": "VM kropta with vmid = 110 is running",
|
||||
"status": "running"
|
||||
}'
|
||||
'''
|
||||
|
||||
import re
|
||||
@@ -866,7 +858,7 @@ def wait_for_task(module, proxmox, node, taskid):
|
||||
def create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sockets, update, **kwargs):
|
||||
# Available only in PVE 4
|
||||
only_v4 = ['force', 'protection', 'skiplock']
|
||||
only_v6 = ['ciuser', 'cipassword', 'sshkeys', 'ipconfig', 'tags']
|
||||
only_v6 = ['ciuser', 'cipassword', 'sshkeys', 'ipconfig']
|
||||
|
||||
# valide clone parameters
|
||||
valid_clone_params = ['format', 'full', 'pool', 'snapname', 'storage', 'target']
|
||||
@@ -936,13 +928,6 @@ def create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sock
|
||||
if searchdomains:
|
||||
kwargs['searchdomain'] = ' '.join(searchdomains)
|
||||
|
||||
# VM tags are expected to be valid and presented as a comma/semi-colon delimited string
|
||||
if 'tags' in kwargs:
|
||||
for tag in kwargs['tags']:
|
||||
if not re.match(r'^[a-z0-9_][a-z0-9_\-\+\.]*$', tag):
|
||||
module.fail_json(msg='%s is not a valid tag' % tag)
|
||||
kwargs['tags'] = ",".join(kwargs['tags'])
|
||||
|
||||
# -args and skiplock require root@pam user - but can not use api tokens
|
||||
if module.params['api_user'] == "root@pam" and module.params['args'] is None:
|
||||
if not update and module.params['proxmox_default_behavior'] == 'compatibility':
|
||||
@@ -1072,13 +1057,12 @@ def main():
|
||||
smbios=dict(type='str'),
|
||||
snapname=dict(type='str'),
|
||||
sockets=dict(type='int'),
|
||||
sshkeys=dict(type='str', no_log=False),
|
||||
sshkeys=dict(type='str'),
|
||||
startdate=dict(type='str'),
|
||||
startup=dict(),
|
||||
state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted', 'current']),
|
||||
storage=dict(type='str'),
|
||||
tablet=dict(type='bool'),
|
||||
tags=dict(type='list', elements='str'),
|
||||
target=dict(type='str'),
|
||||
tdf=dict(type='bool'),
|
||||
template=dict(type='bool'),
|
||||
@@ -1283,7 +1267,6 @@ def main():
|
||||
startdate=module.params['startdate'],
|
||||
startup=module.params['startup'],
|
||||
tablet=module.params['tablet'],
|
||||
tags=module.params['tags'],
|
||||
target=module.params['target'],
|
||||
tdf=module.params['tdf'],
|
||||
template=module.params['template'],
|
||||
@@ -1304,7 +1287,7 @@ def main():
|
||||
elif clone is not None:
|
||||
module.exit_json(changed=True, vmid=vmid, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid))
|
||||
else:
|
||||
module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results)
|
||||
module.exit_json(changed=True, vmid=vmid, msg="VM %s with vmid %s deployed" % (name, vmid), **results)
|
||||
except Exception as e:
|
||||
if update:
|
||||
module.fail_json(vmid=vmid, msg="Unable to update vm {0} with vmid {1}=".format(name, vmid) + str(e))
|
||||
@@ -1314,14 +1297,12 @@ def main():
|
||||
module.fail_json(vmid=vmid, msg="creation of qemu VM %s with vmid %s failed with exception=%s" % (name, vmid, e))
|
||||
|
||||
elif state == 'started':
|
||||
status = {}
|
||||
try:
|
||||
if -1 == vmid:
|
||||
module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
|
||||
vm = get_vm(proxmox, vmid)
|
||||
if not vm:
|
||||
module.fail_json(vmid=vmid, msg='VM with vmid <%s> does not exist in cluster' % vmid)
|
||||
status['status'] = vm[0]['status']
|
||||
if vm[0]['status'] == 'running':
|
||||
module.exit_json(changed=False, vmid=vmid, msg="VM %s is already running" % vmid, **status)
|
||||
|
||||
@@ -1331,7 +1312,6 @@ def main():
|
||||
module.fail_json(vmid=vmid, msg="starting of VM %s failed with exception: %s" % (vmid, e), **status)
|
||||
|
||||
elif state == 'stopped':
|
||||
status = {}
|
||||
try:
|
||||
if -1 == vmid:
|
||||
module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
|
||||
@@ -1340,7 +1320,6 @@ def main():
|
||||
if not vm:
|
||||
module.fail_json(vmid=vmid, msg='VM with vmid = %s does not exist in cluster' % vmid)
|
||||
|
||||
status['status'] = vm[0]['status']
|
||||
if vm[0]['status'] == 'stopped':
|
||||
module.exit_json(changed=False, vmid=vmid, msg="VM %s is already stopped" % vmid, **status)
|
||||
|
||||
@@ -1350,7 +1329,6 @@ def main():
|
||||
module.fail_json(vmid=vmid, msg="stopping of VM %s failed with exception: %s" % (vmid, e), **status)
|
||||
|
||||
elif state == 'restarted':
|
||||
status = {}
|
||||
try:
|
||||
if -1 == vmid:
|
||||
module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
|
||||
@@ -1358,7 +1336,6 @@ def main():
|
||||
vm = get_vm(proxmox, vmid)
|
||||
if not vm:
|
||||
module.fail_json(vmid=vmid, msg='VM with vmid = %s does not exist in cluster' % vmid)
|
||||
status['status'] = vm[0]['status']
|
||||
if vm[0]['status'] == 'stopped':
|
||||
module.exit_json(changed=False, vmid=vmid, msg="VM %s is not running" % vmid, **status)
|
||||
|
||||
@@ -1368,14 +1345,12 @@ def main():
|
||||
module.fail_json(vmid=vmid, msg="restarting of VM %s failed with exception: %s" % (vmid, e), **status)
|
||||
|
||||
elif state == 'absent':
|
||||
status = {}
|
||||
try:
|
||||
vm = get_vm(proxmox, vmid)
|
||||
if not vm:
|
||||
module.exit_json(changed=False, vmid=vmid)
|
||||
|
||||
proxmox_node = proxmox.nodes(vm[0]['node'])
|
||||
status['status'] = vm[0]['status']
|
||||
if vm[0]['status'] == 'running':
|
||||
if module.params['force']:
|
||||
stop_vm(module, proxmox, vm, True)
|
||||
@@ -1397,8 +1372,6 @@ def main():
|
||||
vm = get_vm(proxmox, vmid)
|
||||
if not vm:
|
||||
module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
|
||||
if not name:
|
||||
name = vm[0]['name']
|
||||
current = proxmox.nodes(vm[0]['node']).qemu(vmid).status.current.get()['status']
|
||||
status['status'] = current
|
||||
if status:
|
||||
|
||||
@@ -1,190 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: Tristan Le Guern (@tleguern) <tleguern at bouledef.eu>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: proxmox_storage_info
|
||||
short_description: Retrieve information about one or more Proxmox VE storages
|
||||
version_added: 2.2.0
|
||||
description:
|
||||
- Retrieve information about one or more Proxmox VE storages.
|
||||
options:
|
||||
storage:
|
||||
description:
|
||||
- Only return informations on a specific storage.
|
||||
aliases: ['name']
|
||||
type: str
|
||||
type:
|
||||
description:
|
||||
- Filter on a specifc storage type.
|
||||
type: str
|
||||
author: Tristan Le Guern (@tleguern)
|
||||
extends_documentation_fragment: community.general.proxmox.documentation
|
||||
notes:
|
||||
- Storage specific options can be returned by this module, please look at the documentation at U(https://pve.proxmox.com/wiki/Storage).
|
||||
'''
|
||||
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: List existing storages
|
||||
community.general.proxmox_storage_info:
|
||||
api_host: helldorado
|
||||
api_user: root@pam
|
||||
api_password: "{{ password | default(omit) }}"
|
||||
api_token_id: "{{ token_id | default(omit) }}"
|
||||
api_token_secret: "{{ token_secret | default(omit) }}"
|
||||
register: proxmox_storages
|
||||
|
||||
- name: List NFS storages only
|
||||
community.general.proxmox_storage_info:
|
||||
api_host: helldorado
|
||||
api_user: root@pam
|
||||
api_password: "{{ password | default(omit) }}"
|
||||
api_token_id: "{{ token_id | default(omit) }}"
|
||||
api_token_secret: "{{ token_secret | default(omit) }}"
|
||||
type: nfs
|
||||
register: proxmox_storages_nfs
|
||||
|
||||
- name: Retrieve information about the lvm2 storage
|
||||
community.general.proxmox_storage_info:
|
||||
api_host: helldorado
|
||||
api_user: root@pam
|
||||
api_password: "{{ password | default(omit) }}"
|
||||
api_token_id: "{{ token_id | default(omit) }}"
|
||||
api_token_secret: "{{ token_secret | default(omit) }}"
|
||||
storage: lvm2
|
||||
register: proxmox_storage_lvm
|
||||
'''
|
||||
|
||||
|
||||
RETURN = '''
|
||||
proxmox_storages:
|
||||
description: List of storage pools.
|
||||
returned: on success
|
||||
type: list
|
||||
elements: dict
|
||||
contains:
|
||||
content:
|
||||
description: Proxmox content types available in this storage
|
||||
returned: on success
|
||||
type: list
|
||||
elements: str
|
||||
digest:
|
||||
description: Storage's digest
|
||||
returned: on success
|
||||
type: str
|
||||
nodes:
|
||||
description: List of nodes associated to this storage
|
||||
returned: on success, if storage is not local
|
||||
type: list
|
||||
elements: str
|
||||
path:
|
||||
description: Physical path to this storage
|
||||
returned: on success
|
||||
type: str
|
||||
prune-backups:
|
||||
description: Backup retention options
|
||||
returned: on success
|
||||
type: list
|
||||
elements: dict
|
||||
shared:
|
||||
description: Is this storage shared
|
||||
returned: on success
|
||||
type: bool
|
||||
storage:
|
||||
description: Storage name
|
||||
returned: on success
|
||||
type: str
|
||||
type:
|
||||
description: Storage type
|
||||
returned: on success
|
||||
type: str
|
||||
'''
|
||||
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible_collections.community.general.plugins.module_utils.proxmox import (
|
||||
proxmox_auth_argument_spec, ProxmoxAnsible, HAS_PROXMOXER, PROXMOXER_IMP_ERR, proxmox_to_ansible_bool)
|
||||
|
||||
|
||||
class ProxmoxStorageInfoAnsible(ProxmoxAnsible):
|
||||
def get_storage(self, storage):
|
||||
try:
|
||||
storage = self.proxmox_api.storage.get(storage)
|
||||
except Exception:
|
||||
self.module.fail_json(msg="Storage '%s' does not exist" % storage)
|
||||
return ProxmoxStorage(storage)
|
||||
|
||||
def get_storages(self, type=None):
|
||||
storages = self.proxmox_api.storage.get(type=type)
|
||||
storages = [ProxmoxStorage(storage) for storage in storages]
|
||||
return storages
|
||||
|
||||
|
||||
class ProxmoxStorage:
|
||||
def __init__(self, storage):
|
||||
self.storage = storage
|
||||
# Convert proxmox representation of lists, dicts and boolean for easier
|
||||
# manipulation within ansible.
|
||||
if 'shared' in self.storage:
|
||||
self.storage['shared'] = proxmox_to_ansible_bool(self.storage['shared'])
|
||||
if 'content' in self.storage:
|
||||
self.storage['content'] = self.storage['content'].split(',')
|
||||
if 'nodes' in self.storage:
|
||||
self.storage['nodes'] = self.storage['nodes'].split(',')
|
||||
if 'prune-backups' in storage:
|
||||
options = storage['prune-backups'].split(',')
|
||||
self.storage['prune-backups'] = dict()
|
||||
for option in options:
|
||||
k, v = option.split('=')
|
||||
self.storage['prune-backups'][k] = v
|
||||
|
||||
|
||||
def proxmox_storage_info_argument_spec():
|
||||
return dict(
|
||||
storage=dict(type='str', aliases=['name']),
|
||||
type=dict(type='str'),
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
module_args = proxmox_auth_argument_spec()
|
||||
storage_info_args = proxmox_storage_info_argument_spec()
|
||||
module_args.update(storage_info_args)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=module_args,
|
||||
required_one_of=[('api_password', 'api_token_id')],
|
||||
required_together=[('api_token_id', 'api_token_secret')],
|
||||
mutually_exclusive=[('storage', 'type')],
|
||||
supports_check_mode=True
|
||||
)
|
||||
result = dict(
|
||||
changed=False
|
||||
)
|
||||
|
||||
if not HAS_PROXMOXER:
|
||||
module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR)
|
||||
|
||||
proxmox = ProxmoxStorageInfoAnsible(module)
|
||||
storage = module.params['storage']
|
||||
storagetype = module.params['type']
|
||||
|
||||
if storage:
|
||||
storages = [proxmox.get_storage(storage)]
|
||||
else:
|
||||
storages = proxmox.get_storages(type=storagetype)
|
||||
result['proxmox_storages'] = [storage.storage for storage in storages]
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -30,7 +30,7 @@ options:
|
||||
description:
|
||||
- Restrict results to a specific user ID, which is a concatenation of a user and domain parts.
|
||||
type: str
|
||||
author: Tristan Le Guern (@tleguern)
|
||||
author: Tristan Le Guern (@Aversiste)
|
||||
extends_documentation_fragment: community.general.proxmox.documentation
|
||||
'''
|
||||
|
||||
|
||||
@@ -1229,6 +1229,24 @@ class RHEV(object):
|
||||
self.__get_conn()
|
||||
return self.conn.set_VM_Host(vmname, vmhost)
|
||||
|
||||
# pylint: disable=unreachable
|
||||
VM = self.conn.get_VM(vmname)
|
||||
HOST = self.conn.get_Host(vmhost)
|
||||
|
||||
if VM.placement_policy.host is None:
|
||||
self.conn.set_VM_Host(vmname, vmhost)
|
||||
elif str(VM.placement_policy.host.id) != str(HOST.id):
|
||||
self.conn.set_VM_Host(vmname, vmhost)
|
||||
else:
|
||||
setMsg("VM's startup host was already set to " + vmhost)
|
||||
checkFail()
|
||||
|
||||
if str(VM.status.state) == "up":
|
||||
self.conn.migrate_VM(vmname, vmhost)
|
||||
checkFail()
|
||||
|
||||
return True
|
||||
|
||||
def setHost(self, hostname, cluster, ifaces):
|
||||
self.__get_conn()
|
||||
return self.conn.set_Host(hostname, cluster, ifaces)
|
||||
|
||||
@@ -8,7 +8,7 @@ from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: terraform
|
||||
short_description: Manages a Terraform deployment (and plans)
|
||||
@@ -33,19 +33,6 @@ options:
|
||||
vars.tf/main.tf/etc to use.
|
||||
type: path
|
||||
required: true
|
||||
plugin_paths:
|
||||
description:
|
||||
- List of paths containing Terraform plugin executable files.
|
||||
- Plugin executables can be downloaded from U(https://releases.hashicorp.com/).
|
||||
- When set, the plugin discovery and auto-download behavior of Terraform is disabled.
|
||||
- The directory structure in the plugin path can be tricky. The Terraform docs
|
||||
U(https://learn.hashicorp.com/tutorials/terraform/automate-terraform#pre-installed-plugins)
|
||||
show a simple directory of files, but actually, the directory structure
|
||||
has to follow the same structure you would see if Terraform auto-downloaded the plugins.
|
||||
See the examples below for a tree output of an example plugin directory.
|
||||
type: list
|
||||
elements: path
|
||||
version_added: 3.0.0
|
||||
workspace:
|
||||
description:
|
||||
- The terraform workspace to work with.
|
||||
@@ -154,28 +141,6 @@ EXAMPLES = """
|
||||
backend_config_files:
|
||||
- /path/to/backend_config_file_1
|
||||
- /path/to/backend_config_file_2
|
||||
|
||||
- name: Disable plugin discovery and auto-download by setting plugin_paths
|
||||
community.general.terraform:
|
||||
project_path: 'project/'
|
||||
state: "{{ state }}"
|
||||
force_init: true
|
||||
plugin_paths:
|
||||
- /path/to/plugins_dir_1
|
||||
- /path/to/plugins_dir_2
|
||||
|
||||
### Example directory structure for plugin_paths example
|
||||
# $ tree /path/to/plugins_dir_1
|
||||
# /path/to/plugins_dir_1/
|
||||
# └── registry.terraform.io
|
||||
# └── hashicorp
|
||||
# └── vsphere
|
||||
# ├── 1.24.0
|
||||
# │ └── linux_amd64
|
||||
# │ └── terraform-provider-vsphere_v1.24.0_x4
|
||||
# └── 1.26.0
|
||||
# └── linux_amd64
|
||||
# └── terraform-provider-vsphere_v1.26.0_x4
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
@@ -212,31 +177,24 @@ command:
|
||||
import os
|
||||
import json
|
||||
import tempfile
|
||||
from distutils.version import LooseVersion
|
||||
from ansible.module_utils.six.moves import shlex_quote
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
DESTROY_ARGS = ('destroy', '-no-color', '-force')
|
||||
APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve=true')
|
||||
module = None
|
||||
|
||||
|
||||
def get_version(bin_path):
|
||||
extract_version = module.run_command([bin_path, 'version', '-json'])
|
||||
terraform_version = (json.loads(extract_version[1]))['terraform_version']
|
||||
return terraform_version
|
||||
|
||||
|
||||
def preflight_validation(bin_path, project_path, version, variables_args=None, plan_file=None):
|
||||
def preflight_validation(bin_path, project_path, variables_args=None, plan_file=None):
|
||||
if project_path in [None, ''] or '/' not in project_path:
|
||||
module.fail_json(msg="Path for Terraform project can not be None or ''.")
|
||||
if not os.path.exists(bin_path):
|
||||
module.fail_json(msg="Path for Terraform binary '{0}' doesn't exist on this host - check the path and try again please.".format(bin_path))
|
||||
if not os.path.isdir(project_path):
|
||||
module.fail_json(msg="Path for Terraform project '{0}' doesn't exist on this host - check the path and try again please.".format(project_path))
|
||||
if LooseVersion(version) < LooseVersion('0.15.0'):
|
||||
rc, out, err = module.run_command([bin_path, 'validate'] + variables_args, check_rc=True, cwd=project_path)
|
||||
else:
|
||||
rc, out, err = module.run_command([bin_path, 'validate'], check_rc=True, cwd=project_path)
|
||||
|
||||
rc, out, err = module.run_command([bin_path, 'validate'] + variables_args, check_rc=True, cwd=project_path, use_unsafe_shell=True)
|
||||
|
||||
|
||||
def _state_args(state_file):
|
||||
@@ -247,7 +205,7 @@ def _state_args(state_file):
|
||||
return []
|
||||
|
||||
|
||||
def init_plugins(bin_path, project_path, backend_config, backend_config_files, init_reconfigure, plugin_paths):
|
||||
def init_plugins(bin_path, project_path, backend_config, backend_config_files, init_reconfigure):
|
||||
command = [bin_path, 'init', '-input=false']
|
||||
if backend_config:
|
||||
for key, val in backend_config.items():
|
||||
@@ -260,9 +218,6 @@ def init_plugins(bin_path, project_path, backend_config, backend_config_files, i
|
||||
command.extend(['-backend-config', f])
|
||||
if init_reconfigure:
|
||||
command.extend(['-reconfigure'])
|
||||
if plugin_paths:
|
||||
for plugin_path in plugin_paths:
|
||||
command.extend(['-plugin-dir', plugin_path])
|
||||
rc, out, err = module.run_command(command, check_rc=True, cwd=project_path)
|
||||
|
||||
|
||||
@@ -312,7 +267,7 @@ def build_plan(command, project_path, variables_args, state_file, targets, state
|
||||
|
||||
plan_command.extend(_state_args(state_file))
|
||||
|
||||
rc, out, err = module.run_command(plan_command + variables_args, cwd=project_path)
|
||||
rc, out, err = module.run_command(plan_command + variables_args, cwd=project_path, use_unsafe_shell=True)
|
||||
|
||||
if rc == 0:
|
||||
# no changes
|
||||
@@ -333,7 +288,6 @@ def main():
|
||||
argument_spec=dict(
|
||||
project_path=dict(required=True, type='path'),
|
||||
binary_path=dict(type='path'),
|
||||
plugin_paths=dict(type='list', elements='path'),
|
||||
workspace=dict(required=False, type='str', default='default'),
|
||||
purge_workspace=dict(type='bool', default=False),
|
||||
state=dict(default='present', choices=['present', 'absent', 'planned']),
|
||||
@@ -355,7 +309,6 @@ def main():
|
||||
|
||||
project_path = module.params.get('project_path')
|
||||
bin_path = module.params.get('binary_path')
|
||||
plugin_paths = module.params.get('plugin_paths')
|
||||
workspace = module.params.get('workspace')
|
||||
purge_workspace = module.params.get('purge_workspace')
|
||||
state = module.params.get('state')
|
||||
@@ -373,17 +326,8 @@ def main():
|
||||
else:
|
||||
command = [module.get_bin_path('terraform', required=True)]
|
||||
|
||||
checked_version = get_version(command[0])
|
||||
|
||||
if LooseVersion(checked_version) < LooseVersion('0.15.0'):
|
||||
DESTROY_ARGS = ('destroy', '-no-color', '-force')
|
||||
APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve=true')
|
||||
else:
|
||||
DESTROY_ARGS = ('destroy', '-no-color', '-auto-approve')
|
||||
APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve')
|
||||
|
||||
if force_init:
|
||||
init_plugins(command[0], project_path, backend_config, backend_config_files, init_reconfigure, plugin_paths)
|
||||
init_plugins(command[0], project_path, backend_config, backend_config_files, init_reconfigure)
|
||||
|
||||
workspace_ctx = get_workspace_context(command[0], project_path)
|
||||
if workspace_ctx["current"] != workspace:
|
||||
@@ -407,7 +351,7 @@ def main():
|
||||
for f in variables_files:
|
||||
variables_args.extend(['-var-file', f])
|
||||
|
||||
preflight_validation(command[0], project_path, checked_version, variables_args)
|
||||
preflight_validation(command[0], project_path, variables_args)
|
||||
|
||||
if module.params.get('lock') is not None:
|
||||
if module.params.get('lock'):
|
||||
|
||||
@@ -57,32 +57,27 @@ options:
|
||||
Each rule must contain protocol parameter, in addition to three optional parameters
|
||||
(port_from, port_to, and source)
|
||||
type: list
|
||||
elements: dict
|
||||
add_server_ips:
|
||||
description:
|
||||
- A list of server identifiers (id or name) to be assigned to a firewall policy.
|
||||
Used in combination with update state.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
remove_server_ips:
|
||||
description:
|
||||
- A list of server IP ids to be unassigned from a firewall policy. Used in combination with update state.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
add_rules:
|
||||
description:
|
||||
- A list of rules that will be added to an existing firewall policy.
|
||||
It is syntax is the same as the one used for rules parameter. Used in combination with update state.
|
||||
type: list
|
||||
elements: dict
|
||||
required: false
|
||||
remove_rules:
|
||||
description:
|
||||
- A list of rule ids that will be removed from an existing firewall policy. Used in combination with update state.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
description:
|
||||
description:
|
||||
@@ -513,11 +508,11 @@ def main():
|
||||
name=dict(type='str'),
|
||||
firewall_policy=dict(type='str'),
|
||||
description=dict(type='str'),
|
||||
rules=dict(type='list', elements="dict", default=[]),
|
||||
add_server_ips=dict(type='list', elements="str", default=[]),
|
||||
remove_server_ips=dict(type='list', elements="str", default=[]),
|
||||
add_rules=dict(type='list', elements="dict", default=[]),
|
||||
remove_rules=dict(type='list', elements="str", default=[]),
|
||||
rules=dict(type='list', default=[]),
|
||||
add_server_ips=dict(type='list', default=[]),
|
||||
remove_server_ips=dict(type='list', default=[]),
|
||||
add_rules=dict(type='list', default=[]),
|
||||
remove_rules=dict(type='list', default=[]),
|
||||
wait=dict(type='bool', default=True),
|
||||
wait_timeout=dict(type='int', default=600),
|
||||
wait_interval=dict(type='int', default=5),
|
||||
|
||||
@@ -95,7 +95,6 @@ options:
|
||||
- A list of rule objects that will be set for the load balancer. Each rule must contain protocol,
|
||||
port_balancer, and port_server parameters, in addition to source parameter, which is optional.
|
||||
type: list
|
||||
elements: dict
|
||||
description:
|
||||
description:
|
||||
- Description of the load balancer. maxLength=256
|
||||
@@ -106,26 +105,22 @@ options:
|
||||
- A list of server identifiers (id or name) to be assigned to a load balancer.
|
||||
Used in combination with update state.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
remove_server_ips:
|
||||
description:
|
||||
- A list of server IP ids to be unassigned from a load balancer. Used in combination with update state.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
add_rules:
|
||||
description:
|
||||
- A list of rules that will be added to an existing load balancer.
|
||||
It is syntax is the same as the one used for rules parameter. Used in combination with update state.
|
||||
type: list
|
||||
elements: dict
|
||||
required: false
|
||||
remove_rules:
|
||||
description:
|
||||
- A list of rule ids that will be removed from an existing load balancer. Used in combination with update state.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
wait:
|
||||
description:
|
||||
@@ -618,11 +613,11 @@ def main():
|
||||
choices=METHODS),
|
||||
datacenter=dict(
|
||||
choices=DATACENTERS),
|
||||
rules=dict(type='list', elements="dict", default=[]),
|
||||
add_server_ips=dict(type='list', elements="str", default=[]),
|
||||
remove_server_ips=dict(type='list', elements="str", default=[]),
|
||||
add_rules=dict(type='list', elements="dict", default=[]),
|
||||
remove_rules=dict(type='list', elements="str", default=[]),
|
||||
rules=dict(type='list', default=[]),
|
||||
add_server_ips=dict(type='list', default=[]),
|
||||
remove_server_ips=dict(type='list', default=[]),
|
||||
add_rules=dict(type='list', default=[]),
|
||||
remove_rules=dict(type='list', default=[]),
|
||||
wait=dict(type='bool', default=True),
|
||||
wait_timeout=dict(type='int', default=600),
|
||||
wait_interval=dict(type='int', default=5),
|
||||
|
||||
@@ -71,7 +71,6 @@ options:
|
||||
warning alerts, critical is used to set critical alerts. alert enables alert,
|
||||
and value is used to advise when the value is exceeded.
|
||||
type: list
|
||||
elements: dict
|
||||
suboptions:
|
||||
cpu:
|
||||
description:
|
||||
@@ -97,7 +96,6 @@ options:
|
||||
description:
|
||||
- Array of ports that will be monitoring.
|
||||
type: list
|
||||
elements: dict
|
||||
suboptions:
|
||||
protocol:
|
||||
description:
|
||||
@@ -121,7 +119,6 @@ options:
|
||||
description:
|
||||
- Array of processes that will be monitoring.
|
||||
type: list
|
||||
elements: dict
|
||||
suboptions:
|
||||
process:
|
||||
description:
|
||||
@@ -136,49 +133,41 @@ options:
|
||||
description:
|
||||
- Ports to add to the monitoring policy.
|
||||
type: list
|
||||
elements: dict
|
||||
required: false
|
||||
add_processes:
|
||||
description:
|
||||
- Processes to add to the monitoring policy.
|
||||
type: list
|
||||
elements: dict
|
||||
required: false
|
||||
add_servers:
|
||||
description:
|
||||
- Servers to add to the monitoring policy.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
remove_ports:
|
||||
description:
|
||||
- Ports to remove from the monitoring policy.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
remove_processes:
|
||||
description:
|
||||
- Processes to remove from the monitoring policy.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
remove_servers:
|
||||
description:
|
||||
- Servers to remove from the monitoring policy.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
update_ports:
|
||||
description:
|
||||
- Ports to be updated on the monitoring policy.
|
||||
type: list
|
||||
elements: dict
|
||||
required: false
|
||||
update_processes:
|
||||
description:
|
||||
- Processes to be updated on the monitoring policy.
|
||||
type: list
|
||||
elements: dict
|
||||
required: false
|
||||
wait:
|
||||
description:
|
||||
@@ -208,7 +197,7 @@ author:
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a monitoring policy
|
||||
community.general.oneandone_monitoring_policy:
|
||||
oneandone_moitoring_policy:
|
||||
auth_token: oneandone_private_api_key
|
||||
name: ansible monitoring policy
|
||||
description: Testing creation of a monitoring policy with ansible
|
||||
@@ -269,13 +258,13 @@ EXAMPLES = '''
|
||||
wait: true
|
||||
|
||||
- name: Destroy a monitoring policy
|
||||
community.general.oneandone_monitoring_policy:
|
||||
oneandone_moitoring_policy:
|
||||
auth_token: oneandone_private_api_key
|
||||
state: absent
|
||||
name: ansible monitoring policy
|
||||
|
||||
- name: Update a monitoring policy
|
||||
community.general.oneandone_monitoring_policy:
|
||||
oneandone_moitoring_policy:
|
||||
auth_token: oneandone_private_api_key
|
||||
monitoring_policy: ansible monitoring policy
|
||||
name: ansible monitoring policy updated
|
||||
@@ -326,7 +315,7 @@ EXAMPLES = '''
|
||||
state: update
|
||||
|
||||
- name: Add a port to a monitoring policy
|
||||
community.general.oneandone_monitoring_policy:
|
||||
oneandone_moitoring_policy:
|
||||
auth_token: oneandone_private_api_key
|
||||
monitoring_policy: ansible monitoring policy updated
|
||||
add_ports:
|
||||
@@ -339,7 +328,7 @@ EXAMPLES = '''
|
||||
state: update
|
||||
|
||||
- name: Update existing ports of a monitoring policy
|
||||
community.general.oneandone_monitoring_policy:
|
||||
oneandone_moitoring_policy:
|
||||
auth_token: oneandone_private_api_key
|
||||
monitoring_policy: ansible monitoring policy updated
|
||||
update_ports:
|
||||
@@ -359,7 +348,7 @@ EXAMPLES = '''
|
||||
state: update
|
||||
|
||||
- name: Remove a port from a monitoring policy
|
||||
community.general.oneandone_monitoring_policy:
|
||||
oneandone_moitoring_policy:
|
||||
auth_token: oneandone_private_api_key
|
||||
monitoring_policy: ansible monitoring policy updated
|
||||
remove_ports:
|
||||
@@ -367,7 +356,7 @@ EXAMPLES = '''
|
||||
state: update
|
||||
|
||||
- name: Add a process to a monitoring policy
|
||||
community.general.oneandone_monitoring_policy:
|
||||
oneandone_moitoring_policy:
|
||||
auth_token: oneandone_private_api_key
|
||||
monitoring_policy: ansible monitoring policy updated
|
||||
add_processes:
|
||||
@@ -379,7 +368,7 @@ EXAMPLES = '''
|
||||
state: update
|
||||
|
||||
- name: Update existing processes of a monitoring policy
|
||||
community.general.oneandone_monitoring_policy:
|
||||
oneandone_moitoring_policy:
|
||||
auth_token: oneandone_private_api_key
|
||||
monitoring_policy: ansible monitoring policy updated
|
||||
update_processes:
|
||||
@@ -397,7 +386,7 @@ EXAMPLES = '''
|
||||
state: update
|
||||
|
||||
- name: Remove a process from a monitoring policy
|
||||
community.general.oneandone_monitoring_policy:
|
||||
oneandone_moitoring_policy:
|
||||
auth_token: oneandone_private_api_key
|
||||
monitoring_policy: ansible monitoring policy updated
|
||||
remove_processes:
|
||||
@@ -406,7 +395,7 @@ EXAMPLES = '''
|
||||
state: update
|
||||
|
||||
- name: Add server to a monitoring policy
|
||||
community.general.oneandone_monitoring_policy:
|
||||
oneandone_moitoring_policy:
|
||||
auth_token: oneandone_private_api_key
|
||||
monitoring_policy: ansible monitoring policy updated
|
||||
add_servers:
|
||||
@@ -415,7 +404,7 @@ EXAMPLES = '''
|
||||
state: update
|
||||
|
||||
- name: Remove server from a monitoring policy
|
||||
community.general.oneandone_monitoring_policy:
|
||||
oneandone_moitoring_policy:
|
||||
auth_token: oneandone_private_api_key
|
||||
monitoring_policy: ansible monitoring policy updated
|
||||
remove_servers:
|
||||
@@ -706,15 +695,15 @@ def update_monitoring_policy(module, oneandone_conn):
|
||||
threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer']
|
||||
|
||||
_thresholds = []
|
||||
for threshold in thresholds:
|
||||
key = list(threshold.keys())[0]
|
||||
for treshold in thresholds:
|
||||
key = treshold.keys()[0]
|
||||
if key in threshold_entities:
|
||||
_threshold = oneandone.client.Threshold(
|
||||
entity=key,
|
||||
warning_value=threshold[key]['warning']['value'],
|
||||
warning_alert=str(threshold[key]['warning']['alert']).lower(),
|
||||
critical_value=threshold[key]['critical']['value'],
|
||||
critical_alert=str(threshold[key]['critical']['alert']).lower())
|
||||
warning_value=treshold[key]['warning']['value'],
|
||||
warning_alert=str(treshold[key]['warning']['alert']).lower(),
|
||||
critical_value=treshold[key]['critical']['value'],
|
||||
critical_alert=str(treshold[key]['critical']['alert']).lower())
|
||||
_thresholds.append(_threshold)
|
||||
|
||||
if name or description or email or thresholds:
|
||||
@@ -875,15 +864,15 @@ def create_monitoring_policy(module, oneandone_conn):
|
||||
threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer']
|
||||
|
||||
_thresholds = []
|
||||
for threshold in thresholds:
|
||||
key = list(threshold.keys())[0]
|
||||
for treshold in thresholds:
|
||||
key = treshold.keys()[0]
|
||||
if key in threshold_entities:
|
||||
_threshold = oneandone.client.Threshold(
|
||||
entity=key,
|
||||
warning_value=threshold[key]['warning']['value'],
|
||||
warning_alert=str(threshold[key]['warning']['alert']).lower(),
|
||||
critical_value=threshold[key]['critical']['value'],
|
||||
critical_alert=str(threshold[key]['critical']['alert']).lower())
|
||||
warning_value=treshold[key]['warning']['value'],
|
||||
warning_alert=str(treshold[key]['warning']['alert']).lower(),
|
||||
critical_value=treshold[key]['critical']['value'],
|
||||
critical_alert=str(treshold[key]['critical']['alert']).lower())
|
||||
_thresholds.append(_threshold)
|
||||
|
||||
_ports = []
|
||||
@@ -968,17 +957,17 @@ def main():
|
||||
agent=dict(type='str'),
|
||||
email=dict(type='str'),
|
||||
description=dict(type='str'),
|
||||
thresholds=dict(type='list', elements="dict", default=[]),
|
||||
ports=dict(type='list', elements="dict", default=[]),
|
||||
processes=dict(type='list', elements="dict", default=[]),
|
||||
add_ports=dict(type='list', elements="dict", default=[]),
|
||||
update_ports=dict(type='list', elements="dict", default=[]),
|
||||
remove_ports=dict(type='list', elements="str", default=[]),
|
||||
add_processes=dict(type='list', elements="dict", default=[]),
|
||||
update_processes=dict(type='list', elements="dict", default=[]),
|
||||
remove_processes=dict(type='list', elements="str", default=[]),
|
||||
add_servers=dict(type='list', elements="str", default=[]),
|
||||
remove_servers=dict(type='list', elements="str", default=[]),
|
||||
thresholds=dict(type='list', default=[]),
|
||||
ports=dict(type='list', default=[]),
|
||||
processes=dict(type='list', default=[]),
|
||||
add_ports=dict(type='list', default=[]),
|
||||
update_ports=dict(type='list', default=[]),
|
||||
remove_ports=dict(type='list', default=[]),
|
||||
add_processes=dict(type='list', default=[]),
|
||||
update_processes=dict(type='list', default=[]),
|
||||
remove_processes=dict(type='list', default=[]),
|
||||
add_servers=dict(type='list', default=[]),
|
||||
remove_servers=dict(type='list', default=[]),
|
||||
wait=dict(type='bool', default=True),
|
||||
wait_timeout=dict(type='int', default=600),
|
||||
wait_interval=dict(type='int', default=5),
|
||||
|
||||
@@ -71,12 +71,10 @@ options:
|
||||
description:
|
||||
- List of server identifiers (name or id) to be added to the private network.
|
||||
type: list
|
||||
elements: str
|
||||
remove_members:
|
||||
description:
|
||||
- List of server identifiers (name or id) to be removed from the private network.
|
||||
type: list
|
||||
elements: str
|
||||
wait:
|
||||
description:
|
||||
- wait for the instance to be in state 'running' before returning
|
||||
@@ -396,8 +394,8 @@ def main():
|
||||
description=dict(type='str'),
|
||||
network_address=dict(type='str'),
|
||||
subnet_mask=dict(type='str'),
|
||||
add_members=dict(type='list', elements="str", default=[]),
|
||||
remove_members=dict(type='list', elements="str", default=[]),
|
||||
add_members=dict(type='list', default=[]),
|
||||
remove_members=dict(type='list', default=[]),
|
||||
datacenter=dict(
|
||||
choices=DATACENTERS),
|
||||
wait=dict(type='bool', default=True),
|
||||
|
||||
@@ -87,7 +87,6 @@ options:
|
||||
- A list of hard disks with nested "size" and "is_main" properties.
|
||||
It must be provided with vcore, cores_per_processor, and ram parameters.
|
||||
type: list
|
||||
elements: dict
|
||||
private_network:
|
||||
description:
|
||||
- The private network name or ID.
|
||||
@@ -628,9 +627,9 @@ def main():
|
||||
vcore=dict(type='int'),
|
||||
cores_per_processor=dict(type='int'),
|
||||
ram=dict(type='float'),
|
||||
hdds=dict(type='list', elements='dict'),
|
||||
hdds=dict(type='list'),
|
||||
count=dict(type='int', default=1),
|
||||
ssh_key=dict(type='raw', no_log=False),
|
||||
ssh_key=dict(type='raw'),
|
||||
auto_increment=dict(type='bool', default=True),
|
||||
server=dict(type='str'),
|
||||
datacenter=dict(
|
||||
|
||||
175
plugins/modules/cloud/online/online_server_facts.py
Normal file
175
plugins/modules/cloud/online/online_server_facts.py
Normal file
@@ -0,0 +1,175 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: online_server_facts
|
||||
deprecated:
|
||||
removed_in: 3.0.0 # was Ansible 2.13
|
||||
why: Deprecated in favour of C(_info) module.
|
||||
alternative: Use M(community.general.online_server_info) instead.
|
||||
short_description: Gather facts about Online servers.
|
||||
description:
|
||||
- Gather facts about the servers.
|
||||
- U(https://www.online.net/en/dedicated-server)
|
||||
author:
|
||||
- "Remy Leone (@sieben)"
|
||||
extends_documentation_fragment:
|
||||
- community.general.online
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Gather Online server facts
|
||||
community.general.online_server_facts:
|
||||
api_token: '0d1627e8-bbf0-44c5-a46f-5c4d3aef033f'
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
---
|
||||
online_server_facts:
|
||||
description: Response from Online API
|
||||
returned: success
|
||||
type: complex
|
||||
sample:
|
||||
"online_server_facts": [
|
||||
{
|
||||
"abuse": "abuse@example.com",
|
||||
"anti_ddos": false,
|
||||
"bmc": {
|
||||
"session_key": null
|
||||
},
|
||||
"boot_mode": "normal",
|
||||
"contacts": {
|
||||
"owner": "foobar",
|
||||
"tech": "foobar"
|
||||
},
|
||||
"disks": [
|
||||
{
|
||||
"$ref": "/api/v1/server/hardware/disk/68452"
|
||||
},
|
||||
{
|
||||
"$ref": "/api/v1/server/hardware/disk/68453"
|
||||
}
|
||||
],
|
||||
"drive_arrays": [
|
||||
{
|
||||
"disks": [
|
||||
{
|
||||
"$ref": "/api/v1/server/hardware/disk/68452"
|
||||
},
|
||||
{
|
||||
"$ref": "/api/v1/server/hardware/disk/68453"
|
||||
}
|
||||
],
|
||||
"raid_controller": {
|
||||
"$ref": "/api/v1/server/hardware/raidController/9910"
|
||||
},
|
||||
"raid_level": "RAID1"
|
||||
}
|
||||
],
|
||||
"hardware_watch": true,
|
||||
"hostname": "sd-42",
|
||||
"id": 42,
|
||||
"ip": [
|
||||
{
|
||||
"address": "195.154.172.149",
|
||||
"mac": "28:92:4a:33:5e:c6",
|
||||
"reverse": "195-154-172-149.rev.poneytelecom.eu.",
|
||||
"switch_port_state": "up",
|
||||
"type": "public"
|
||||
},
|
||||
{
|
||||
"address": "10.90.53.212",
|
||||
"mac": "28:92:4a:33:5e:c7",
|
||||
"reverse": null,
|
||||
"switch_port_state": "up",
|
||||
"type": "private"
|
||||
}
|
||||
],
|
||||
"last_reboot": "2018-08-23T08:32:03.000Z",
|
||||
"location": {
|
||||
"block": "A",
|
||||
"datacenter": "DC3",
|
||||
"position": 19,
|
||||
"rack": "A23",
|
||||
"room": "4 4-4"
|
||||
},
|
||||
"network": {
|
||||
"ip": [
|
||||
"195.154.172.149"
|
||||
],
|
||||
"ipfo": [],
|
||||
"private": [
|
||||
"10.90.53.212"
|
||||
]
|
||||
},
|
||||
"offer": "Pro-1-S-SATA",
|
||||
"os": {
|
||||
"name": "FreeBSD",
|
||||
"version": "11.1-RELEASE"
|
||||
},
|
||||
"power": "ON",
|
||||
"proactive_monitoring": false,
|
||||
"raid_controllers": [
|
||||
{
|
||||
"$ref": "/api/v1/server/hardware/raidController/9910"
|
||||
}
|
||||
],
|
||||
"support": "Basic service level"
|
||||
}
|
||||
]
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.online import (
|
||||
Online, OnlineException, online_argument_spec
|
||||
)
|
||||
|
||||
|
||||
class OnlineServerFacts(Online):
|
||||
|
||||
def __init__(self, module):
|
||||
super(OnlineServerFacts, self).__init__(module)
|
||||
self.name = 'api/v1/server'
|
||||
|
||||
def _get_server_detail(self, server_path):
|
||||
try:
|
||||
return self.get(path=server_path).json
|
||||
except OnlineException as exc:
|
||||
self.module.fail_json(msg="A problem occurred while fetching: %s (%s)" % (server_path, exc))
|
||||
|
||||
def all_detailed_servers(self):
|
||||
servers_api_path = self.get_resources()
|
||||
|
||||
server_data = (
|
||||
self._get_server_detail(server_api_path)
|
||||
for server_api_path in servers_api_path
|
||||
)
|
||||
|
||||
return [s for s in server_data if s is not None]
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=online_argument_spec(),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
try:
|
||||
servers_facts = OnlineServerFacts(module).all_detailed_servers()
|
||||
module.exit_json(
|
||||
ansible_facts={'online_server_facts': servers_facts}
|
||||
)
|
||||
except OnlineException as exc:
|
||||
module.fail_json(msg=exc.message)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
76
plugins/modules/cloud/online/online_user_facts.py
Normal file
76
plugins/modules/cloud/online/online_user_facts.py
Normal file
@@ -0,0 +1,76 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: online_user_facts
|
||||
deprecated:
|
||||
removed_in: 3.0.0 # was Ansible 2.13
|
||||
why: Deprecated in favour of C(_info) module.
|
||||
alternative: Use M(community.general.online_user_info) instead.
|
||||
short_description: Gather facts about Online user.
|
||||
description:
|
||||
- Gather facts about the user.
|
||||
author:
|
||||
- "Remy Leone (@sieben)"
|
||||
extends_documentation_fragment:
|
||||
- community.general.online
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Gather Online user facts
|
||||
community.general.online_user_facts:
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
---
|
||||
online_user_facts:
|
||||
description: Response from Online API
|
||||
returned: success
|
||||
type: complex
|
||||
sample:
|
||||
"online_user_facts": {
|
||||
"company": "foobar LLC",
|
||||
"email": "foobar@example.com",
|
||||
"first_name": "foo",
|
||||
"id": 42,
|
||||
"last_name": "bar",
|
||||
"login": "foobar"
|
||||
}
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.online import (
|
||||
Online, OnlineException, online_argument_spec
|
||||
)
|
||||
|
||||
|
||||
class OnlineUserFacts(Online):
|
||||
|
||||
def __init__(self, module):
|
||||
super(OnlineUserFacts, self).__init__(module)
|
||||
self.name = 'api/v1/user'
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=online_argument_spec(),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
try:
|
||||
module.exit_json(
|
||||
ansible_facts={'online_user_facts': OnlineUserFacts(module).get_resources()}
|
||||
)
|
||||
except OnlineException as exc:
|
||||
module.fail_json(msg=exc.message)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -66,7 +66,6 @@ options:
|
||||
description:
|
||||
- The labels for this host.
|
||||
type: list
|
||||
elements: str
|
||||
template:
|
||||
description:
|
||||
- The template or attribute changes to merge into the host template.
|
||||
@@ -131,7 +130,7 @@ class HostModule(OpenNebulaModule):
|
||||
vmm_mad_name=dict(type='str', default="kvm"),
|
||||
cluster_id=dict(type='int', default=0),
|
||||
cluster_name=dict(type='str'),
|
||||
labels=dict(type='list', elements='str'),
|
||||
labels=dict(type='list'),
|
||||
template=dict(type='dict', aliases=['attributes']),
|
||||
)
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ short_description: Manages OpenNebula images
|
||||
description:
|
||||
- Manages OpenNebula images
|
||||
requirements:
|
||||
- pyone
|
||||
- python-oca
|
||||
options:
|
||||
api_url:
|
||||
description:
|
||||
@@ -88,7 +88,7 @@ EXAMPLES = '''
|
||||
|
||||
- name: Print the IMAGE properties
|
||||
ansible.builtin.debug:
|
||||
var: result
|
||||
msg: result
|
||||
|
||||
- name: Rename existing IMAGE
|
||||
community.general.one_image:
|
||||
@@ -168,20 +168,21 @@ running_vms:
|
||||
'''
|
||||
|
||||
try:
|
||||
import pyone
|
||||
HAS_PYONE = True
|
||||
import oca
|
||||
HAS_OCA = True
|
||||
except ImportError:
|
||||
HAS_PYONE = False
|
||||
HAS_OCA = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
import os
|
||||
|
||||
|
||||
def get_image(module, client, predicate):
|
||||
pool = oca.ImagePool(client)
|
||||
# Filter -2 means fetch all images user can Use
|
||||
pool = client.imagepool.info(-2, -1, -1, -1)
|
||||
pool.info(filter=-2)
|
||||
|
||||
for image in pool.IMAGE:
|
||||
for image in pool:
|
||||
if predicate(image):
|
||||
return image
|
||||
|
||||
@@ -189,11 +190,11 @@ def get_image(module, client, predicate):
|
||||
|
||||
|
||||
def get_image_by_name(module, client, image_name):
|
||||
return get_image(module, client, lambda image: (image.NAME == image_name))
|
||||
return get_image(module, client, lambda image: (image.name == image_name))
|
||||
|
||||
|
||||
def get_image_by_id(module, client, image_id):
|
||||
return get_image(module, client, lambda image: (image.ID == image_id))
|
||||
return get_image(module, client, lambda image: (image.id == image_id))
|
||||
|
||||
|
||||
def get_image_instance(module, client, requested_id, requested_name):
|
||||
@@ -207,28 +208,30 @@ IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE',
|
||||
|
||||
|
||||
def get_image_info(image):
|
||||
image.info()
|
||||
|
||||
info = {
|
||||
'id': image.ID,
|
||||
'name': image.NAME,
|
||||
'state': IMAGE_STATES[image.STATE],
|
||||
'running_vms': image.RUNNING_VMS,
|
||||
'used': bool(image.RUNNING_VMS),
|
||||
'user_name': image.UNAME,
|
||||
'user_id': image.UID,
|
||||
'group_name': image.GNAME,
|
||||
'group_id': image.GID,
|
||||
'id': image.id,
|
||||
'name': image.name,
|
||||
'state': IMAGE_STATES[image.state],
|
||||
'running_vms': image.running_vms,
|
||||
'used': bool(image.running_vms),
|
||||
'user_name': image.uname,
|
||||
'user_id': image.uid,
|
||||
'group_name': image.gname,
|
||||
'group_id': image.gid,
|
||||
}
|
||||
|
||||
return info
|
||||
|
||||
|
||||
def wait_for_state(module, client, image_id, wait_timeout, state_predicate):
|
||||
def wait_for_state(module, image, wait_timeout, state_predicate):
|
||||
import time
|
||||
start_time = time.time()
|
||||
|
||||
while (time.time() - start_time) < wait_timeout:
|
||||
image = client.image.info(image_id)
|
||||
state = image.STATE
|
||||
image.info()
|
||||
state = image.state
|
||||
|
||||
if state_predicate(state):
|
||||
return image
|
||||
@@ -238,19 +241,19 @@ def wait_for_state(module, client, image_id, wait_timeout, state_predicate):
|
||||
module.fail_json(msg="Wait timeout has expired!")
|
||||
|
||||
|
||||
def wait_for_ready(module, client, image_id, wait_timeout=60):
|
||||
return wait_for_state(module, client, image_id, wait_timeout, lambda state: (state in [IMAGE_STATES.index('READY')]))
|
||||
def wait_for_ready(module, image, wait_timeout=60):
|
||||
return wait_for_state(module, image, wait_timeout, lambda state: (state in [IMAGE_STATES.index('READY')]))
|
||||
|
||||
|
||||
def wait_for_delete(module, client, image_id, wait_timeout=60):
|
||||
return wait_for_state(module, client, image_id, wait_timeout, lambda state: (state in [IMAGE_STATES.index('DELETE')]))
|
||||
def wait_for_delete(module, image, wait_timeout=60):
|
||||
return wait_for_state(module, image, wait_timeout, lambda state: (state in [IMAGE_STATES.index('DELETE')]))
|
||||
|
||||
|
||||
def enable_image(module, client, image, enable):
|
||||
image = client.image.info(image.ID)
|
||||
image.info()
|
||||
changed = False
|
||||
|
||||
state = image.STATE
|
||||
state = image.state
|
||||
|
||||
if state not in [IMAGE_STATES.index('READY'), IMAGE_STATES.index('DISABLED'), IMAGE_STATES.index('ERROR')]:
|
||||
if enable:
|
||||
@@ -263,7 +266,7 @@ def enable_image(module, client, image, enable):
|
||||
changed = True
|
||||
|
||||
if changed and not module.check_mode:
|
||||
client.image.enable(image.ID, enable)
|
||||
client.call('image.enable', image.id, enable)
|
||||
|
||||
result = get_image_info(image)
|
||||
result['changed'] = changed
|
||||
@@ -273,7 +276,7 @@ def enable_image(module, client, image, enable):
|
||||
|
||||
def clone_image(module, client, image, new_name):
|
||||
if new_name is None:
|
||||
new_name = "Copy of " + image.NAME
|
||||
new_name = "Copy of " + image.name
|
||||
|
||||
tmp_image = get_image_by_name(module, client, new_name)
|
||||
if tmp_image:
|
||||
@@ -281,13 +284,13 @@ def clone_image(module, client, image, new_name):
|
||||
result['changed'] = False
|
||||
return result
|
||||
|
||||
if image.STATE == IMAGE_STATES.index('DISABLED'):
|
||||
if image.state == IMAGE_STATES.index('DISABLED'):
|
||||
module.fail_json(msg="Cannot clone DISABLED image")
|
||||
|
||||
if not module.check_mode:
|
||||
new_id = client.image.clone(image.ID, new_name)
|
||||
wait_for_ready(module, client, new_id)
|
||||
image = client.image.info(new_id)
|
||||
new_id = client.call('image.clone', image.id, new_name)
|
||||
image = get_image_by_id(module, client, new_id)
|
||||
wait_for_ready(module, image)
|
||||
|
||||
result = get_image_info(image)
|
||||
result['changed'] = True
|
||||
@@ -299,7 +302,7 @@ def rename_image(module, client, image, new_name):
|
||||
if new_name is None:
|
||||
module.fail_json(msg="'new_name' option has to be specified when the state is 'renamed'")
|
||||
|
||||
if new_name == image.NAME:
|
||||
if new_name == image.name:
|
||||
result = get_image_info(image)
|
||||
result['changed'] = False
|
||||
return result
|
||||
@@ -309,7 +312,7 @@ def rename_image(module, client, image, new_name):
|
||||
module.fail_json(msg="Name '" + new_name + "' is already taken by IMAGE with id=" + str(tmp_image.id))
|
||||
|
||||
if not module.check_mode:
|
||||
client.image.rename(image.ID, new_name)
|
||||
client.call('image.rename', image.id, new_name)
|
||||
|
||||
result = get_image_info(image)
|
||||
result['changed'] = True
|
||||
@@ -321,12 +324,12 @@ def delete_image(module, client, image):
|
||||
if not image:
|
||||
return {'changed': False}
|
||||
|
||||
if image.RUNNING_VMS > 0:
|
||||
module.fail_json(msg="Cannot delete image. There are " + str(image.RUNNING_VMS) + " VMs using it.")
|
||||
if image.running_vms > 0:
|
||||
module.fail_json(msg="Cannot delete image. There are " + str(image.running_vms) + " VMs using it.")
|
||||
|
||||
if not module.check_mode:
|
||||
client.image.delete(image.ID)
|
||||
wait_for_delete(module, client, image.ID)
|
||||
client.call('image.delete', image.id)
|
||||
wait_for_delete(module, image)
|
||||
|
||||
return {'changed': True}
|
||||
|
||||
@@ -375,8 +378,8 @@ def main():
|
||||
mutually_exclusive=[['id', 'name']],
|
||||
supports_check_mode=True)
|
||||
|
||||
if not HAS_PYONE:
|
||||
module.fail_json(msg='This module requires pyone to work!')
|
||||
if not HAS_OCA:
|
||||
module.fail_json(msg='This module requires python-oca to work!')
|
||||
|
||||
auth = get_connection_info(module)
|
||||
params = module.params
|
||||
@@ -385,7 +388,7 @@ def main():
|
||||
state = params.get('state')
|
||||
enabled = params.get('enabled')
|
||||
new_name = params.get('new_name')
|
||||
client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
|
||||
client = oca.Client(auth.username + ':' + auth.password, auth.url)
|
||||
|
||||
result = {}
|
||||
|
||||
|
||||
1
plugins/modules/cloud/opennebula/one_image_facts.py
Symbolic link
1
plugins/modules/cloud/opennebula/one_image_facts.py
Symbolic link
@@ -0,0 +1 @@
|
||||
one_image_info.py
|
||||
@@ -56,7 +56,6 @@ options:
|
||||
- A list of images ids whose facts you want to gather.
|
||||
aliases: ['id']
|
||||
type: list
|
||||
elements: str
|
||||
name:
|
||||
description:
|
||||
- A C(name) of the image whose facts will be gathered.
|
||||
@@ -254,13 +253,16 @@ def main():
|
||||
"api_url": {"required": False, "type": "str"},
|
||||
"api_username": {"required": False, "type": "str"},
|
||||
"api_password": {"required": False, "type": "str", "no_log": True},
|
||||
"ids": {"required": False, "aliases": ['id'], "type": "list", "elements": "str"},
|
||||
"ids": {"required": False, "aliases": ['id'], "type": "list"},
|
||||
"name": {"required": False, "type": "str"},
|
||||
}
|
||||
|
||||
module = AnsibleModule(argument_spec=fields,
|
||||
mutually_exclusive=[['ids', 'name']],
|
||||
supports_check_mode=True)
|
||||
if module._name in ('one_image_facts', 'community.general.one_image_facts'):
|
||||
module.deprecate("The 'one_image_facts' module has been renamed to 'one_image_info'",
|
||||
version='3.0.0', collection_name='community.general') # was Ansible 2.13
|
||||
|
||||
if not HAS_PYONE:
|
||||
module.fail_json(msg='This module requires pyone to work!')
|
||||
@@ -271,6 +273,9 @@ def main():
|
||||
name = params.get('name')
|
||||
client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
|
||||
|
||||
result = {'images': []}
|
||||
images = []
|
||||
|
||||
if ids:
|
||||
images = get_images_by_ids(module, client, ids)
|
||||
elif name:
|
||||
@@ -278,9 +283,8 @@ def main():
|
||||
else:
|
||||
images = get_all_images(client).IMAGE
|
||||
|
||||
result = {
|
||||
'images': [get_image_info(image) for image in images],
|
||||
}
|
||||
for image in images:
|
||||
result['images'].append(get_image_info(image))
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
@@ -1,276 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright: (c) 2021, Georg Gadinger <nilsding@nilsding.org>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: one_template
|
||||
|
||||
short_description: Manages OpenNebula templates
|
||||
|
||||
version_added: 2.4.0
|
||||
|
||||
requirements:
|
||||
- pyone
|
||||
|
||||
description:
|
||||
- "Manages OpenNebula templates."
|
||||
|
||||
options:
|
||||
id:
|
||||
description:
|
||||
- A I(id) of the template you would like to manage. If not set then a
|
||||
- new template will be created with the given I(name).
|
||||
type: int
|
||||
name:
|
||||
description:
|
||||
- A I(name) of the template you would like to manage. If a template with
|
||||
- the given name does not exist it will be created, otherwise it will be
|
||||
- managed by this module.
|
||||
type: str
|
||||
template:
|
||||
description:
|
||||
- A string containing the template contents.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- C(present) - state that is used to manage the template.
|
||||
- C(absent) - delete the template.
|
||||
choices: ["present", "absent"]
|
||||
default: present
|
||||
type: str
|
||||
|
||||
notes:
|
||||
- Supports C(check_mode). Note that check mode always returns C(changed=true) for existing templates, even if the template would not actually change.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.opennebula
|
||||
|
||||
author:
|
||||
- "Georg Gadinger (@nilsding)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Fetch the TEMPLATE by id
|
||||
community.general.one_template:
|
||||
id: 6459
|
||||
register: result
|
||||
|
||||
- name: Print the TEMPLATE properties
|
||||
ansible.builtin.debug:
|
||||
var: result
|
||||
|
||||
- name: Fetch the TEMPLATE by name
|
||||
community.general.one_template:
|
||||
name: tf-prd-users-workerredis-p6379a
|
||||
register: result
|
||||
|
||||
- name: Create a new or update an existing TEMPLATE
|
||||
community.general.one_template:
|
||||
name: generic-opensuse
|
||||
template: |
|
||||
CONTEXT = [
|
||||
HOSTNAME = "generic-opensuse"
|
||||
]
|
||||
CPU = "1"
|
||||
CUSTOM_ATTRIBUTE = ""
|
||||
DISK = [
|
||||
CACHE = "writeback",
|
||||
DEV_PREFIX = "sd",
|
||||
DISCARD = "unmap",
|
||||
IMAGE = "opensuse-leap-15.2",
|
||||
IMAGE_UNAME = "oneadmin",
|
||||
IO = "threads",
|
||||
SIZE = "" ]
|
||||
MEMORY = "2048"
|
||||
NIC = [
|
||||
MODEL = "virtio",
|
||||
NETWORK = "testnet",
|
||||
NETWORK_UNAME = "oneadmin" ]
|
||||
OS = [
|
||||
ARCH = "x86_64",
|
||||
BOOT = "disk0" ]
|
||||
SCHED_REQUIREMENTS = "CLUSTER_ID=\\"100\\""
|
||||
VCPU = "2"
|
||||
|
||||
- name: Delete the TEMPLATE by id
|
||||
community.general.one_template:
|
||||
id: 6459
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description: template id
|
||||
type: int
|
||||
returned: when I(state=present)
|
||||
sample: 153
|
||||
name:
|
||||
description: template name
|
||||
type: str
|
||||
returned: when I(state=present)
|
||||
sample: app1
|
||||
template:
|
||||
description: the parsed template
|
||||
type: dict
|
||||
returned: when I(state=present)
|
||||
group_id:
|
||||
description: template's group id
|
||||
type: int
|
||||
returned: when I(state=present)
|
||||
sample: 1
|
||||
group_name:
|
||||
description: template's group name
|
||||
type: str
|
||||
returned: when I(state=present)
|
||||
sample: one-users
|
||||
owner_id:
|
||||
description: template's owner id
|
||||
type: int
|
||||
returned: when I(state=present)
|
||||
sample: 143
|
||||
owner_name:
|
||||
description: template's owner name
|
||||
type: str
|
||||
returned: when I(state=present)
|
||||
sample: ansible-test
|
||||
'''
|
||||
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule
|
||||
|
||||
|
||||
class TemplateModule(OpenNebulaModule):
|
||||
def __init__(self):
|
||||
argument_spec = dict(
|
||||
id=dict(type='int', required=False),
|
||||
name=dict(type='str', required=False),
|
||||
state=dict(type='str', choices=['present', 'absent'], default='present'),
|
||||
template=dict(type='str', required=False),
|
||||
)
|
||||
|
||||
mutually_exclusive = [
|
||||
['id', 'name']
|
||||
]
|
||||
|
||||
required_one_of = [('id', 'name')]
|
||||
|
||||
required_if = [
|
||||
['state', 'present', ['template']]
|
||||
]
|
||||
|
||||
OpenNebulaModule.__init__(self,
|
||||
argument_spec,
|
||||
supports_check_mode=True,
|
||||
mutually_exclusive=mutually_exclusive,
|
||||
required_one_of=required_one_of,
|
||||
required_if=required_if)
|
||||
|
||||
def run(self, one, module, result):
|
||||
params = module.params
|
||||
id = params.get('id')
|
||||
name = params.get('name')
|
||||
desired_state = params.get('state')
|
||||
template_data = params.get('template')
|
||||
|
||||
self.result = {}
|
||||
|
||||
template = self.get_template_instance(id, name)
|
||||
needs_creation = False
|
||||
if not template and desired_state != 'absent':
|
||||
if id:
|
||||
module.fail_json(msg="There is no template with id=" + str(id))
|
||||
else:
|
||||
needs_creation = True
|
||||
|
||||
if desired_state == 'absent':
|
||||
self.result = self.delete_template(template)
|
||||
else:
|
||||
if needs_creation:
|
||||
self.result = self.create_template(name, template_data)
|
||||
else:
|
||||
self.result = self.update_template(template, template_data)
|
||||
|
||||
self.exit()
|
||||
|
||||
def get_template(self, predicate):
|
||||
# -3 means "Resources belonging to the user"
|
||||
# the other two parameters are used for pagination, -1 for both essentially means "return all"
|
||||
pool = self.one.templatepool.info(-3, -1, -1)
|
||||
|
||||
for template in pool.VMTEMPLATE:
|
||||
if predicate(template):
|
||||
return template
|
||||
|
||||
return None
|
||||
|
||||
def get_template_by_id(self, template_id):
|
||||
return self.get_template(lambda template: (template.ID == template_id))
|
||||
|
||||
def get_template_by_name(self, template_name):
|
||||
return self.get_template(lambda template: (template.NAME == template_name))
|
||||
|
||||
def get_template_instance(self, requested_id, requested_name):
|
||||
if requested_id:
|
||||
return self.get_template_by_id(requested_id)
|
||||
else:
|
||||
return self.get_template_by_name(requested_name)
|
||||
|
||||
def get_template_info(self, template):
|
||||
info = {
|
||||
'id': template.ID,
|
||||
'name': template.NAME,
|
||||
'template': template.TEMPLATE,
|
||||
'user_name': template.UNAME,
|
||||
'user_id': template.UID,
|
||||
'group_name': template.GNAME,
|
||||
'group_id': template.GID,
|
||||
}
|
||||
|
||||
return info
|
||||
|
||||
def create_template(self, name, template_data):
|
||||
if not self.module.check_mode:
|
||||
self.one.template.allocate("NAME = \"" + name + "\"\n" + template_data)
|
||||
|
||||
result = self.get_template_info(self.get_template_by_name(name))
|
||||
result['changed'] = True
|
||||
|
||||
return result
|
||||
|
||||
def update_template(self, template, template_data):
|
||||
if not self.module.check_mode:
|
||||
# 0 = replace the whole template
|
||||
self.one.template.update(template.ID, template_data, 0)
|
||||
|
||||
result = self.get_template_info(self.get_template_by_id(template.ID))
|
||||
if self.module.check_mode:
|
||||
# Unfortunately it is not easy to detect if the template would have changed, therefore always report a change here.
|
||||
result['changed'] = True
|
||||
else:
|
||||
# if the previous parsed template data is not equal to the updated one, this has changed
|
||||
result['changed'] = template.TEMPLATE != result['template']
|
||||
|
||||
return result
|
||||
|
||||
def delete_template(self, template):
|
||||
if not template:
|
||||
return {'changed': False}
|
||||
|
||||
if not self.module.check_mode:
|
||||
self.one.template.delete(template.ID)
|
||||
|
||||
return {'changed': True}
|
||||
|
||||
|
||||
def main():
|
||||
TemplateModule().run_module()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -72,7 +72,6 @@ options:
|
||||
- A list of instance ids used for states':' C(absent), C(running), C(rebooted), C(poweredoff)
|
||||
aliases: ['ids']
|
||||
type: list
|
||||
elements: int
|
||||
state:
|
||||
description:
|
||||
- C(present) - create instances from a template specified with C(template_id)/C(template_name).
|
||||
@@ -121,7 +120,6 @@ options:
|
||||
- C(state) of instances with these labels.
|
||||
default: []
|
||||
type: list
|
||||
elements: str
|
||||
count_attributes:
|
||||
description:
|
||||
- A dictionary of key/value attributes that can only be used with
|
||||
@@ -136,7 +134,6 @@ options:
|
||||
- This can be expressed in multiple ways and is shown in the EXAMPLES
|
||||
- section.
|
||||
type: list
|
||||
elements: str
|
||||
count:
|
||||
description:
|
||||
- Number of instances to launch
|
||||
@@ -171,7 +168,6 @@ options:
|
||||
- NOTE':' If The Template hats Multiple Disks the Order of the Sizes is
|
||||
- matched against the order specified in C(template_id)/C(template_name).
|
||||
type: list
|
||||
elements: str
|
||||
cpu:
|
||||
description:
|
||||
- Percentage of CPU divided by 100 required for the new instance. Half a
|
||||
@@ -186,7 +182,6 @@ options:
|
||||
- A list of dictionaries with network parameters. See examples for more details.
|
||||
default: []
|
||||
type: list
|
||||
elements: dict
|
||||
disk_saveas:
|
||||
description:
|
||||
- Creates an image from a VM disk.
|
||||
@@ -1354,7 +1349,7 @@ def main():
|
||||
"api_url": {"required": False, "type": "str"},
|
||||
"api_username": {"required": False, "type": "str"},
|
||||
"api_password": {"required": False, "type": "str", "no_log": True},
|
||||
"instance_ids": {"required": False, "aliases": ['ids'], "type": "list", "elements": "int"},
|
||||
"instance_ids": {"required": False, "aliases": ['ids'], "type": "list"},
|
||||
"template_name": {"required": False, "type": "str"},
|
||||
"template_id": {"required": False, "type": "int"},
|
||||
"vm_start_on_hold": {"default": False, "type": "bool"},
|
||||
@@ -1372,16 +1367,16 @@ def main():
|
||||
"memory": {"required": False, "type": "str"},
|
||||
"cpu": {"required": False, "type": "float"},
|
||||
"vcpu": {"required": False, "type": "int"},
|
||||
"disk_size": {"required": False, "type": "list", "elements": "str"},
|
||||
"disk_size": {"required": False, "type": "list"},
|
||||
"datastore_name": {"required": False, "type": "str"},
|
||||
"datastore_id": {"required": False, "type": "int"},
|
||||
"networks": {"default": [], "type": "list", "elements": "dict"},
|
||||
"networks": {"default": [], "type": "list"},
|
||||
"count": {"default": 1, "type": "int"},
|
||||
"exact_count": {"required": False, "type": "int"},
|
||||
"attributes": {"default": {}, "type": "dict"},
|
||||
"count_attributes": {"required": False, "type": "dict"},
|
||||
"labels": {"default": [], "type": "list", "elements": "str"},
|
||||
"count_labels": {"required": False, "type": "list", "elements": "str"},
|
||||
"labels": {"default": [], "type": "list"},
|
||||
"count_labels": {"required": False, "type": "list"},
|
||||
"disk_saveas": {"type": "dict"},
|
||||
"persistent": {"default": False, "type": "bool"}
|
||||
}
|
||||
|
||||
@@ -128,7 +128,7 @@ def update_vcn(virtual_network_client, module):
|
||||
primitive_params_update=["vcn_id"],
|
||||
kwargs_non_primitive_update={UpdateVcnDetails: "update_vcn_details"},
|
||||
module=module,
|
||||
update_attributes=list(UpdateVcnDetails().attribute_map.keys()),
|
||||
update_attributes=UpdateVcnDetails().attribute_map.keys(),
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
@@ -162,6 +162,7 @@ def waitForTaskDone(client, name, taskId, timeout):
|
||||
currentTimeout -= 5
|
||||
if currentTimeout < 0:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
196
plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py
Normal file
196
plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py
Normal file
@@ -0,0 +1,196 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2016 Red Hat, Inc.
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ovirt_affinity_label_facts
|
||||
short_description: Retrieve information about one or more oVirt/RHV affinity labels
|
||||
author: "Ondra Machacek (@machacekondra)"
|
||||
deprecated:
|
||||
removed_in: 3.0.0 # was Ansible 2.13
|
||||
why: When migrating to collection we decided to use only _info modules.
|
||||
alternative: Use M(ovirt.ovirt.ovirt_affinity_label_info) instead.
|
||||
description:
|
||||
- "Retrieve information about one or more oVirt/RHV affinity labels."
|
||||
notes:
|
||||
- "This module returns a variable C(ovirt_affinity_labels), which
|
||||
contains a list of affinity labels. You need to register the result with
|
||||
the I(register) keyword to use it."
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- "Name of the affinity labels which should be listed."
|
||||
vm:
|
||||
description:
|
||||
- "Name of the VM, which affinity labels should be listed."
|
||||
host:
|
||||
description:
|
||||
- "Name of the host, which affinity labels should be listed."
|
||||
extends_documentation_fragment:
|
||||
- community.general.ovirt_facts
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Examples don't contain auth parameter for simplicity,
|
||||
# look at ovirt_auth module to see how to reuse authentication:
|
||||
|
||||
- name: Gather information about all affinity labels, which names start with label
|
||||
ovirt_affinity_label_info:
|
||||
name: label*
|
||||
register: result
|
||||
|
||||
- name: Print gathered information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.ovirt_affinity_labels }}"
|
||||
|
||||
- name: >
|
||||
Gather information about all affinity labels, which are assigned to VMs
|
||||
which names start with postgres
|
||||
ovirt_affinity_label_info:
|
||||
vm: postgres*
|
||||
register: result
|
||||
|
||||
- name: Print gathered information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.ovirt_affinity_labels }}"
|
||||
|
||||
- name: >
|
||||
Gather information about all affinity labels, which are assigned to hosts
|
||||
which names start with west
|
||||
ovirt_affinity_label_info:
|
||||
host: west*
|
||||
register: result
|
||||
|
||||
- name: Print gathered information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.ovirt_affinity_labels }}"
|
||||
|
||||
- name: >
|
||||
Gather information about all affinity labels, which are assigned to hosts
|
||||
which names start with west or VMs which names start with postgres
|
||||
ovirt_affinity_label_info:
|
||||
host: west*
|
||||
vm: postgres*
|
||||
register: result
|
||||
|
||||
- name: Print gathered information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.ovirt_affinity_labels }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
ovirt_affinity_labels:
|
||||
description: "List of dictionaries describing the affinity labels. Affinity labels attributes are mapped to dictionary keys,
|
||||
all affinity labels attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/affinity_label."
|
||||
returned: On success.
|
||||
type: list
|
||||
'''
|
||||
|
||||
import fnmatch
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils._ovirt import (
|
||||
check_sdk,
|
||||
create_connection,
|
||||
get_dict_of_struct,
|
||||
ovirt_info_full_argument_spec,
|
||||
search_by_name,
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ovirt_info_full_argument_spec(
|
||||
name=dict(default=None),
|
||||
host=dict(default=None),
|
||||
vm=dict(default=None),
|
||||
)
|
||||
module = AnsibleModule(argument_spec)
|
||||
is_old_facts = module._name in ('ovirt_affinity_label_facts', 'community.general.ovirt_affinity_label_facts')
|
||||
if is_old_facts:
|
||||
module.deprecate("The 'ovirt_affinity_label_facts' module has been renamed to 'ovirt_affinity_label_info', "
|
||||
"and the renamed one no longer returns ansible_facts",
|
||||
version='3.0.0', collection_name='community.general') # was Ansible 2.13
|
||||
|
||||
check_sdk(module)
|
||||
|
||||
try:
|
||||
auth = module.params.pop('auth')
|
||||
connection = create_connection(auth)
|
||||
affinity_labels_service = connection.system_service().affinity_labels_service()
|
||||
labels = []
|
||||
all_labels = affinity_labels_service.list()
|
||||
if module.params['name']:
|
||||
labels.extend([
|
||||
l for l in all_labels
|
||||
if fnmatch.fnmatch(l.name, module.params['name'])
|
||||
])
|
||||
if module.params['host']:
|
||||
hosts_service = connection.system_service().hosts_service()
|
||||
if search_by_name(hosts_service, module.params['host']) is None:
|
||||
raise Exception("Host '%s' was not found." % module.params['host'])
|
||||
labels.extend([
|
||||
label
|
||||
for label in all_labels
|
||||
for host in connection.follow_link(label.hosts)
|
||||
if fnmatch.fnmatch(hosts_service.service(host.id).get().name, module.params['host'])
|
||||
])
|
||||
if module.params['vm']:
|
||||
vms_service = connection.system_service().vms_service()
|
||||
if search_by_name(vms_service, module.params['vm']) is None:
|
||||
raise Exception("Vm '%s' was not found." % module.params['vm'])
|
||||
labels.extend([
|
||||
label
|
||||
for label in all_labels
|
||||
for vm in connection.follow_link(label.vms)
|
||||
if fnmatch.fnmatch(vms_service.service(vm.id).get().name, module.params['vm'])
|
||||
])
|
||||
|
||||
if not (module.params['vm'] or module.params['host'] or module.params['name']):
|
||||
labels = all_labels
|
||||
|
||||
result = dict(
|
||||
ovirt_affinity_labels=[
|
||||
get_dict_of_struct(
|
||||
struct=l,
|
||||
connection=connection,
|
||||
fetch_nested=module.params.get('fetch_nested'),
|
||||
attributes=module.params.get('nested_attributes'),
|
||||
) for l in labels
|
||||
],
|
||||
)
|
||||
if is_old_facts:
|
||||
module.exit_json(changed=False, ansible_facts=result)
|
||||
else:
|
||||
module.exit_json(changed=False, **result)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=str(e), exception=traceback.format_exc())
|
||||
finally:
|
||||
connection.close(logout=auth.get('token') is None)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
98
plugins/modules/cloud/ovirt/ovirt_api_facts.py
Normal file
98
plugins/modules/cloud/ovirt/ovirt_api_facts.py
Normal file
@@ -0,0 +1,98 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ovirt_api_facts
|
||||
short_description: Retrieve information about the oVirt/RHV API
|
||||
author: "Ondra Machacek (@machacekondra)"
|
||||
deprecated:
|
||||
removed_in: 3.0.0 # was Ansible 2.13
|
||||
why: When migrating to collection we decided to use only _info modules.
|
||||
alternative: Use M(ovirt.ovirt.ovirt_api_info) instead.
|
||||
description:
|
||||
- "Retrieve information about the oVirt/RHV API."
|
||||
notes:
|
||||
- "This module returns a variable C(ovirt_api),
|
||||
which contains a information about oVirt/RHV API. You need to register the result with
|
||||
the I(register) keyword to use it."
|
||||
extends_documentation_fragment:
|
||||
- community.general.ovirt_facts
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Examples don't contain auth parameter for simplicity,
|
||||
# look at ovirt_auth module to see how to reuse authentication:
|
||||
|
||||
- name: Gather information oVirt API
|
||||
ovirt_api_info:
|
||||
register: result
|
||||
|
||||
- name: Print gathered information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.ovirt_api }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
ovirt_api:
|
||||
description: "Dictionary describing the oVirt API information.
|
||||
Api attributes are mapped to dictionary keys,
|
||||
all API attributes can be found at following
|
||||
url: https://ovirt.example.com/ovirt-engine/api/model#types/api."
|
||||
returned: On success.
|
||||
type: dict
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils._ovirt import (
|
||||
check_sdk,
|
||||
create_connection,
|
||||
get_dict_of_struct,
|
||||
ovirt_info_full_argument_spec,
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ovirt_info_full_argument_spec()
|
||||
module = AnsibleModule(argument_spec)
|
||||
is_old_facts = module._name in ('ovirt_api_facts', 'community.general.ovirt_api_facts')
|
||||
if is_old_facts:
|
||||
module.deprecate("The 'ovirt_api_facts' module has been renamed to 'ovirt_api_info', "
|
||||
"and the renamed one no longer returns ansible_facts",
|
||||
version='3.0.0', collection_name='community.general') # was Ansible 2.13
|
||||
check_sdk(module)
|
||||
|
||||
try:
|
||||
auth = module.params.pop('auth')
|
||||
connection = create_connection(auth)
|
||||
api = connection.system_service().get()
|
||||
result = dict(
|
||||
ovirt_api=get_dict_of_struct(
|
||||
struct=api,
|
||||
connection=connection,
|
||||
fetch_nested=module.params.get('fetch_nested'),
|
||||
attributes=module.params.get('nested_attributes'),
|
||||
)
|
||||
)
|
||||
if is_old_facts:
|
||||
module.exit_json(changed=False, ansible_facts=result)
|
||||
else:
|
||||
module.exit_json(changed=False, **result)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=str(e), exception=traceback.format_exc())
|
||||
finally:
|
||||
connection.close(logout=auth.get('token') is None)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
125
plugins/modules/cloud/ovirt/ovirt_cluster_facts.py
Normal file
125
plugins/modules/cloud/ovirt/ovirt_cluster_facts.py
Normal file
@@ -0,0 +1,125 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2016 Red Hat, Inc.
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ovirt_cluster_facts
|
||||
short_description: Retrieve information about one or more oVirt/RHV clusters
|
||||
author: "Ondra Machacek (@machacekondra)"
|
||||
deprecated:
|
||||
removed_in: 3.0.0 # was Ansible 2.13
|
||||
why: When migrating to collection we decided to use only _info modules.
|
||||
alternative: Use M(ovirt.ovirt.ovirt_cluster_info) instead.
|
||||
description:
|
||||
- "Retrieve information about one or more oVirt/RHV clusters."
|
||||
notes:
|
||||
- "This module returns a variable C(ovirt_clusters), which
|
||||
contains a list of clusters. You need to register the result with
|
||||
the I(register) keyword to use it."
|
||||
options:
|
||||
pattern:
|
||||
description:
|
||||
- "Search term which is accepted by oVirt/RHV search backend."
|
||||
- "For example to search cluster X from datacenter Y use following pattern:
|
||||
name=X and datacenter=Y"
|
||||
extends_documentation_fragment:
|
||||
- community.general.ovirt_facts
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Examples don't contain auth parameter for simplicity,
|
||||
# look at ovirt_auth module to see how to reuse authentication:
|
||||
|
||||
- name: Gather information about all clusters which names start with production
|
||||
ovirt_cluster_info:
|
||||
pattern:
|
||||
name: 'production*'
|
||||
register: result
|
||||
|
||||
- name: Print gathered information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.ovirt_clusters }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
ovirt_clusters:
|
||||
description: "List of dictionaries describing the clusters. Cluster attributes are mapped to dictionary keys,
|
||||
all clusters attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/cluster."
|
||||
returned: On success.
|
||||
type: list
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils._ovirt import (
|
||||
check_sdk,
|
||||
create_connection,
|
||||
get_dict_of_struct,
|
||||
ovirt_info_full_argument_spec,
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ovirt_info_full_argument_spec(
|
||||
pattern=dict(default='', required=False),
|
||||
)
|
||||
module = AnsibleModule(argument_spec)
|
||||
is_old_facts = module._name in ('ovirt_cluster_facts', 'community.general.ovirt_cluster_facts')
|
||||
if is_old_facts:
|
||||
module.deprecate("The 'ovirt_cluster_facts' module has been renamed to 'ovirt_cluster_info', "
|
||||
"and the renamed one no longer returns ansible_facts",
|
||||
version='3.0.0', collection_name='community.general') # was Ansible 2.13
|
||||
|
||||
check_sdk(module)
|
||||
|
||||
try:
|
||||
auth = module.params.pop('auth')
|
||||
connection = create_connection(auth)
|
||||
clusters_service = connection.system_service().clusters_service()
|
||||
clusters = clusters_service.list(search=module.params['pattern'])
|
||||
result = dict(
|
||||
ovirt_clusters=[
|
||||
get_dict_of_struct(
|
||||
struct=c,
|
||||
connection=connection,
|
||||
fetch_nested=module.params.get('fetch_nested'),
|
||||
attributes=module.params.get('nested_attributes'),
|
||||
) for c in clusters
|
||||
],
|
||||
)
|
||||
if is_old_facts:
|
||||
module.exit_json(changed=False, ansible_facts=result)
|
||||
else:
|
||||
module.exit_json(changed=False, **result)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=str(e), exception=traceback.format_exc())
|
||||
finally:
|
||||
connection.close(logout=auth.get('token') is None)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
108
plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py
Normal file
108
plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py
Normal file
@@ -0,0 +1,108 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2016 Red Hat, Inc.
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ovirt_datacenter_facts
|
||||
short_description: Retrieve information about one or more oVirt/RHV datacenters
|
||||
author: "Ondra Machacek (@machacekondra)"
|
||||
deprecated:
|
||||
removed_in: 3.0.0 # was Ansible 2.13
|
||||
why: When migrating to collection we decided to use only _info modules.
|
||||
alternative: Use M(ovirt.ovirt.ovirt_datacenter_info) instead.
|
||||
description:
|
||||
- "Retrieve information about one or more oVirt/RHV datacenters."
|
||||
notes:
|
||||
- "This module returns a variable C(ovirt_datacenters), which
|
||||
contains a list of datacenters. You need to register the result with
|
||||
the I(register) keyword to use it."
|
||||
options:
|
||||
pattern:
|
||||
description:
|
||||
- "Search term which is accepted by oVirt/RHV search backend."
|
||||
- "For example to search datacenter I(X) use following pattern: I(name=X)"
|
||||
extends_documentation_fragment:
|
||||
- community.general.ovirt_facts
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Examples don't contain auth parameter for simplicity,
|
||||
# look at ovirt_auth module to see how to reuse authentication:
|
||||
|
||||
- name: Gather information about all data centers which names start with production
|
||||
ovirt_datacenter_info:
|
||||
pattern: name=production*
|
||||
register: result
|
||||
|
||||
- name: Print gathered information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.ovirt_datacenters }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
ovirt_datacenters:
|
||||
description: "List of dictionaries describing the datacenters. Datacenter attributes are mapped to dictionary keys,
|
||||
all datacenters attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/data_center."
|
||||
returned: On success.
|
||||
type: list
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils._ovirt import (
|
||||
check_sdk,
|
||||
create_connection,
|
||||
get_dict_of_struct,
|
||||
ovirt_info_full_argument_spec,
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ovirt_info_full_argument_spec(
|
||||
pattern=dict(default='', required=False),
|
||||
)
|
||||
module = AnsibleModule(argument_spec)
|
||||
is_old_facts = module._name in ('ovirt_datacenter_facts', 'community.general.ovirt_datacenter_facts')
|
||||
if is_old_facts:
|
||||
module.deprecate("The 'ovirt_datacenter_facts' module has been renamed to 'ovirt_datacenter_info', "
|
||||
"and the renamed one no longer returns ansible_facts",
|
||||
version='3.0.0', collection_name='community.general') # was Ansible 2.13
|
||||
|
||||
check_sdk(module)
|
||||
|
||||
try:
|
||||
auth = module.params.pop('auth')
|
||||
connection = create_connection(auth)
|
||||
datacenters_service = connection.system_service().data_centers_service()
|
||||
datacenters = datacenters_service.list(search=module.params['pattern'])
|
||||
result = dict(
|
||||
ovirt_datacenters=[
|
||||
get_dict_of_struct(
|
||||
struct=d,
|
||||
connection=connection,
|
||||
fetch_nested=module.params.get('fetch_nested'),
|
||||
attributes=module.params.get('nested_attributes'),
|
||||
) for d in datacenters
|
||||
],
|
||||
)
|
||||
if is_old_facts:
|
||||
module.exit_json(changed=False, ansible_facts=result)
|
||||
else:
|
||||
module.exit_json(changed=False, **result)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=str(e), exception=traceback.format_exc())
|
||||
finally:
|
||||
connection.close(logout=auth.get('token') is None)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
125
plugins/modules/cloud/ovirt/ovirt_disk_facts.py
Normal file
125
plugins/modules/cloud/ovirt/ovirt_disk_facts.py
Normal file
@@ -0,0 +1,125 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2017 Red Hat, Inc.
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ovirt_disk_facts
|
||||
short_description: Retrieve information about one or more oVirt/RHV disks
|
||||
author: "Katerina Koukiou (@KKoukiou)"
|
||||
deprecated:
|
||||
removed_in: 3.0.0 # was Ansible 2.13
|
||||
why: When migrating to collection we decided to use only _info modules.
|
||||
alternative: Use M(ovirt.ovirt.ovirt_disk_info) instead
|
||||
description:
|
||||
- "Retrieve information about one or more oVirt/RHV disks."
|
||||
notes:
|
||||
- "This module returns a variable C(ovirt_disks), which
|
||||
contains a list of disks. You need to register the result with
|
||||
the I(register) keyword to use it."
|
||||
options:
|
||||
pattern:
|
||||
description:
|
||||
- "Search term which is accepted by oVirt/RHV search backend."
|
||||
- "For example to search Disk X from storage Y use following pattern:
|
||||
name=X and storage.name=Y"
|
||||
extends_documentation_fragment:
|
||||
- community.general.ovirt_facts
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Examples don't contain auth parameter for simplicity,
|
||||
# look at ovirt_auth module to see how to reuse authentication:
|
||||
|
||||
- name: Gather information about all Disks which names start with centos
|
||||
ovirt_disk_info:
|
||||
pattern: name=centos*
|
||||
register: result
|
||||
|
||||
- name: Print gathered information
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.ovirt_disks }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
ovirt_disks:
|
||||
description: "List of dictionaries describing the Disks. Disk attributes are mapped to dictionary keys,
|
||||
all Disks attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/disk."
|
||||
returned: On success.
|
||||
type: list
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.removed import removed_module
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils._ovirt import (
|
||||
check_sdk,
|
||||
create_connection,
|
||||
get_dict_of_struct,
|
||||
ovirt_info_full_argument_spec,
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ovirt_info_full_argument_spec(
|
||||
pattern=dict(default='', required=False),
|
||||
)
|
||||
module = AnsibleModule(argument_spec)
|
||||
is_old_facts = module._name in ('ovirt_disk_facts', 'community.general.ovirt_disk_facts')
|
||||
if is_old_facts:
|
||||
module.deprecate("The 'ovirt_disk_facts' module has been renamed to 'ovirt_disk_info', "
|
||||
"and the renamed one no longer returns ansible_facts",
|
||||
version='3.0.0', collection_name='community.general') # was Ansible 2.13
|
||||
check_sdk(module)
|
||||
|
||||
try:
|
||||
auth = module.params.pop('auth')
|
||||
connection = create_connection(auth)
|
||||
disks_service = connection.system_service().disks_service()
|
||||
disks = disks_service.list(
|
||||
search=module.params['pattern'],
|
||||
)
|
||||
result = dict(
|
||||
ovirt_disks=[
|
||||
get_dict_of_struct(
|
||||
struct=c,
|
||||
connection=connection,
|
||||
fetch_nested=module.params.get('fetch_nested'),
|
||||
attributes=module.params.get('nested_attributes'),
|
||||
) for c in disks
|
||||
],
|
||||
)
|
||||
if is_old_facts:
|
||||
module.exit_json(changed=False, ansible_facts=result)
|
||||
else:
|
||||
module.exit_json(changed=False, **result)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=str(e), exception=traceback.format_exc())
|
||||
finally:
|
||||
connection.close(logout=auth.get('token') is None)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user