mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-05-08 14:22:46 +00:00
Compare commits
424 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
331d2c7651 | ||
|
|
b35a262378 | ||
|
|
7d400663b6 | ||
|
|
0d0884b069 | ||
|
|
dd400e8c21 | ||
|
|
a60f9bc78b | ||
|
|
47714ecf79 | ||
|
|
d15ed4135b | ||
|
|
bd61228e40 | ||
|
|
26d7c28b33 | ||
|
|
2e533daffa | ||
|
|
6c50119eab | ||
|
|
bc3435b993 | ||
|
|
370f5d8082 | ||
|
|
e77c5413c9 | ||
|
|
800ee1bae0 | ||
|
|
8de8d21062 | ||
|
|
81e71b5034 | ||
|
|
44ce63ed85 | ||
|
|
a3c9c688b9 | ||
|
|
a332ed4429 | ||
|
|
91571f8bff | ||
|
|
43856eaa6f | ||
|
|
ae87b5479a | ||
|
|
42cd462780 | ||
|
|
d871378574 | ||
|
|
983b292399 | ||
|
|
6831aa5501 | ||
|
|
2d8a94a459 | ||
|
|
f721e76fdc | ||
|
|
3eadb9d637 | ||
|
|
033582b696 | ||
|
|
974997594f | ||
|
|
fa8ce6dea8 | ||
|
|
1d90e91528 | ||
|
|
a90e2c8002 | ||
|
|
c506375f2a | ||
|
|
4def9439bd | ||
|
|
023654473b | ||
|
|
a216f15dd9 | ||
|
|
f613983cb4 | ||
|
|
c22199794d | ||
|
|
24b1d92e84 | ||
|
|
4bc44e4062 | ||
|
|
06fd6d8742 | ||
|
|
dd0ae4a003 | ||
|
|
646ca74810 | ||
|
|
d60c107818 | ||
|
|
ef2d14f24e | ||
|
|
b3cde9b8a4 | ||
|
|
dc4222df0d | ||
|
|
b9a89d6d0f | ||
|
|
f48913d91b | ||
|
|
66c6d0ee16 | ||
|
|
db656705b0 | ||
|
|
f955a85848 | ||
|
|
5a52b573fe | ||
|
|
f3e640d5a0 | ||
|
|
711405507d | ||
|
|
7a01c5809c | ||
|
|
cd022c3e2a | ||
|
|
bb323ab12f | ||
|
|
2a53edd9bc | ||
|
|
85fc920a0c | ||
|
|
ebaa17f59f | ||
|
|
73b3ec09e5 | ||
|
|
11cb136971 | ||
|
|
298e0f60be | ||
|
|
b3d3b108bf | ||
|
|
1dd5e71cff | ||
|
|
dd0d460132 | ||
|
|
4223f48d38 | ||
|
|
c1eb0a232c | ||
|
|
a207298260 | ||
|
|
144855e820 | ||
|
|
1479544029 | ||
|
|
e901d281cf | ||
|
|
b9408dc8ef | ||
|
|
f1a31611b7 | ||
|
|
5b1bede4cb | ||
|
|
25e246bdc2 | ||
|
|
7d20ff7cac | ||
|
|
74174f11ff | ||
|
|
52bb601f31 | ||
|
|
3b9c6d496b | ||
|
|
b31583b441 | ||
|
|
aa33ac349c | ||
|
|
3e9a6acff7 | ||
|
|
1d0c5e2ba4 | ||
|
|
a9c64655de | ||
|
|
9f4fd4c899 | ||
|
|
19fdb29db7 | ||
|
|
df1fa397db | ||
|
|
3560aeb12f | ||
|
|
63817f7c1b | ||
|
|
1de0e9eaba | ||
|
|
99cfb993d5 | ||
|
|
6c7f8f97ad | ||
|
|
637571993a | ||
|
|
740883e7fd | ||
|
|
01c892ddf1 | ||
|
|
e16029db64 | ||
|
|
10c180bfee | ||
|
|
126c397d6c | ||
|
|
74fcb0335e | ||
|
|
e7b16a96b9 | ||
|
|
3f3ed407a3 | ||
|
|
e53f153e30 | ||
|
|
ddaad1e650 | ||
|
|
d12951b9c7 | ||
|
|
6528aefcb5 | ||
|
|
0bd4b3cbc9 | ||
|
|
785951484b | ||
|
|
491b622041 | ||
|
|
325a19d88a | ||
|
|
bed1dc479f | ||
|
|
fd741ed663 | ||
|
|
df9f0741b5 | ||
|
|
777d36b9fb | ||
|
|
ce83bde742 | ||
|
|
ba50d114d4 | ||
|
|
eacbf45632 | ||
|
|
7b529c72b3 | ||
|
|
1c53894920 | ||
|
|
ed813176ce | ||
|
|
9f71073d37 | ||
|
|
6c88b69d6f | ||
|
|
620dd7e8da | ||
|
|
f78e08bc37 | ||
|
|
818cafc580 | ||
|
|
eb2cb56a55 | ||
|
|
2b824f2d7b | ||
|
|
39bf187a25 | ||
|
|
5016f402a5 | ||
|
|
58a9287689 | ||
|
|
09e2699d1c | ||
|
|
da2a629919 | ||
|
|
c63f3f9956 | ||
|
|
6c3a5cf9b1 | ||
|
|
1faf8ef08b | ||
|
|
50aead4636 | ||
|
|
0a7ed3b019 | ||
|
|
0d1417dcfa | ||
|
|
5ee5c004b4 | ||
|
|
e9dafb3467 | ||
|
|
47b940fc63 | ||
|
|
eb79c14e9c | ||
|
|
1ed5a36a81 | ||
|
|
dd55c3c3bb | ||
|
|
33126b7267 | ||
|
|
4c14df6d88 | ||
|
|
d495d3969b | ||
|
|
c3ef9bf668 | ||
|
|
757427cadf | ||
|
|
19a5975181 | ||
|
|
496be77a2b | ||
|
|
f37eb12580 | ||
|
|
51dfc1f288 | ||
|
|
1f8173b797 | ||
|
|
8e53b3df6f | ||
|
|
91272d027b | ||
|
|
8d9fd52d3d | ||
|
|
1110e93c5d | ||
|
|
e1bf23d27d | ||
|
|
68dec29df3 | ||
|
|
1d1d934bdd | ||
|
|
c49de1f4d6 | ||
|
|
57a373f4f2 | ||
|
|
4566812591 | ||
|
|
8cedec381f | ||
|
|
65d4fe2f4f | ||
|
|
0f88c71f59 | ||
|
|
b80854ff50 | ||
|
|
e1ca4ce1e8 | ||
|
|
5319437bc2 | ||
|
|
47371041c7 | ||
|
|
d1acf52906 | ||
|
|
d981f388fb | ||
|
|
70ba401602 | ||
|
|
1d8530aff1 | ||
|
|
dc60e71fd5 | ||
|
|
ae0d3cb090 | ||
|
|
b8794c35e8 | ||
|
|
db61a899d5 | ||
|
|
b69ea1dfd9 | ||
|
|
cff8463882 | ||
|
|
47c456f740 | ||
|
|
1db167b12a | ||
|
|
25c52d56bd | ||
|
|
63fbcdb4b1 | ||
|
|
7f890c4645 | ||
|
|
382be570ec | ||
|
|
cc7d0f7670 | ||
|
|
d669e2b60d | ||
|
|
0ba9ea6e48 | ||
|
|
5cd6d18b34 | ||
|
|
a99d011867 | ||
|
|
4a7379b61d | ||
|
|
dbae7da6bc | ||
|
|
e73451f09e | ||
|
|
1f7649fcd7 | ||
|
|
51a08ea398 | ||
|
|
72b59c764e | ||
|
|
31443e57b1 | ||
|
|
28ac4b79e2 | ||
|
|
c52839c601 | ||
|
|
eab9a43d2e | ||
|
|
8e3931d9b0 | ||
|
|
44fd157a2b | ||
|
|
ce5fc7764a | ||
|
|
5a567b80c6 | ||
|
|
88c6e6ac61 | ||
|
|
00c2ec062d | ||
|
|
3d66ed3ae3 | ||
|
|
19b5fceeab | ||
|
|
5fc17ff022 | ||
|
|
f896c2986c | ||
|
|
a96f90ff94 | ||
|
|
3d1f9ed657 | ||
|
|
5c768dc6f1 | ||
|
|
4c88a8edc0 | ||
|
|
ece0202507 | ||
|
|
8f99f9cb1c | ||
|
|
0a28a0c8b0 | ||
|
|
6c94ab7c6b | ||
|
|
e3fcc7de2a | ||
|
|
2ebf2861b6 | ||
|
|
2be2d30f3b | ||
|
|
9ccc0464ff | ||
|
|
d95910963b | ||
|
|
9787e8a6bf | ||
|
|
5cc900cfdb | ||
|
|
6cec8759d0 | ||
|
|
f4c63ede7f | ||
|
|
41550b5205 | ||
|
|
64c6f20b55 | ||
|
|
33b8d1c57e | ||
|
|
726aa5ecf7 | ||
|
|
0109310aa2 | ||
|
|
75fd32ca55 | ||
|
|
07fa7ea409 | ||
|
|
37c1453601 | ||
|
|
deddce02fa | ||
|
|
a9346f0e68 | ||
|
|
637eaa15de | ||
|
|
82e33a0ce5 | ||
|
|
ef49950b96 | ||
|
|
4ea632b4e5 | ||
|
|
9b593fd46c | ||
|
|
adbf624a42 | ||
|
|
4b26990d8b | ||
|
|
7db2ce5be3 | ||
|
|
2b0f7b858e | ||
|
|
6ecc95980d | ||
|
|
d9e734d662 | ||
|
|
4fe87879ff | ||
|
|
42999867b4 | ||
|
|
9e51469e01 | ||
|
|
af4474efd7 | ||
|
|
6441814f8b | ||
|
|
ce0f327875 | ||
|
|
c85aa96177 | ||
|
|
24f780ec9a | ||
|
|
890ff574c3 | ||
|
|
1202d034b3 | ||
|
|
b661c9476f | ||
|
|
549f228e1c | ||
|
|
3b9be01d5b | ||
|
|
fbc56c5a1d | ||
|
|
85371e7b6d | ||
|
|
20ca01e486 | ||
|
|
282c1d546c | ||
|
|
c488cb1dd3 | ||
|
|
d6f5029187 | ||
|
|
e13ca30e01 | ||
|
|
7f5c668433 | ||
|
|
25b38d907e | ||
|
|
20f470cc64 | ||
|
|
b7713830dc | ||
|
|
4c379bd3b2 | ||
|
|
4842f67da1 | ||
|
|
8670eff750 | ||
|
|
a5ca990857 | ||
|
|
c776387daa | ||
|
|
0b13fd2c76 | ||
|
|
19fdfcf0b3 | ||
|
|
167153bff5 | ||
|
|
a93fce6755 | ||
|
|
9aeac26583 | ||
|
|
909a98d1d9 | ||
|
|
86b7efaf06 | ||
|
|
8de1c0c205 | ||
|
|
6d960e9e10 | ||
|
|
7722800561 | ||
|
|
307c54750f | ||
|
|
7caba156fa | ||
|
|
39a23a05f0 | ||
|
|
3a5669991f | ||
|
|
0f00c65d2c | ||
|
|
16baefd167 | ||
|
|
3bc31f286e | ||
|
|
18a5330e62 | ||
|
|
55629b311a | ||
|
|
8267fd3809 | ||
|
|
a6c950a44b | ||
|
|
bcfd648855 | ||
|
|
6c1d014044 | ||
|
|
b2e075e6d3 | ||
|
|
b5b5410575 | ||
|
|
135cc1d337 | ||
|
|
a1b7949fa5 | ||
|
|
07d123a71a | ||
|
|
72a1e805b4 | ||
|
|
398421a9d1 | ||
|
|
7f1e26167a | ||
|
|
555eb62199 | ||
|
|
da7f9ffc3f | ||
|
|
159f38f4f2 | ||
|
|
f7656ac0d3 | ||
|
|
873f1fb7fd | ||
|
|
434b83170a | ||
|
|
e3e66a57ec | ||
|
|
277f2a7df5 | ||
|
|
98486c0ee2 | ||
|
|
3af4be34b2 | ||
|
|
08b81b570e | ||
|
|
08c96d94e6 | ||
|
|
2aec1d1bbf | ||
|
|
af21a0eaf7 | ||
|
|
3f1c93cccf | ||
|
|
03a913109e | ||
|
|
ee34fdb4ac | ||
|
|
eba5216be5 | ||
|
|
951a7e2758 | ||
|
|
0243eabd30 | ||
|
|
75d1894866 | ||
|
|
dd9e999c9f | ||
|
|
e48083e66b | ||
|
|
220051768b | ||
|
|
104f6a3e96 | ||
|
|
9b24b7a969 | ||
|
|
097c609aab | ||
|
|
fbe66994a1 | ||
|
|
3d19e15a7d | ||
|
|
7310a34b55 | ||
|
|
ea1fb83b0c | ||
|
|
4931fb2681 | ||
|
|
13fb60f58f | ||
|
|
e5da25915d | ||
|
|
71bbabb96f | ||
|
|
564a625603 | ||
|
|
4e1f6683d9 | ||
|
|
c173d4d5bc | ||
|
|
a353202716 | ||
|
|
954fb0a311 | ||
|
|
2794dc7b02 | ||
|
|
9d5044ac1a | ||
|
|
62ae120c50 | ||
|
|
cf450e3a43 | ||
|
|
77bf8b9a66 | ||
|
|
02e80c610b | ||
|
|
7613e0fb04 | ||
|
|
e382044e42 | ||
|
|
5e8b27a224 | ||
|
|
60c9da76e7 | ||
|
|
09d89da0ab | ||
|
|
ba5b86cf4a | ||
|
|
1a5702cf21 | ||
|
|
51121e54d0 | ||
|
|
7cf472855c | ||
|
|
38996b7544 | ||
|
|
905239f530 | ||
|
|
2b3c8f4582 | ||
|
|
4c33e2ccb8 | ||
|
|
2b0879cdc4 | ||
|
|
fcee84b947 | ||
|
|
d7ec65c19c | ||
|
|
bfdb76e60d | ||
|
|
eb24b5707e | ||
|
|
9a5fe4c9af | ||
|
|
6d60d3fa7f | ||
|
|
10fb2ffe5d | ||
|
|
7ac6db2490 | ||
|
|
1eb3ab3b27 | ||
|
|
e20eb64c6e | ||
|
|
73f8338980 | ||
|
|
8a16b51202 | ||
|
|
bf41ddc8ef | ||
|
|
6dc98c08fd | ||
|
|
8f2b2d9dc6 | ||
|
|
6ff6cc96d5 | ||
|
|
d046dc34bf | ||
|
|
b36f77515c | ||
|
|
e5d15a56c3 | ||
|
|
19b1a0049b | ||
|
|
88893b8204 | ||
|
|
d4e9b7575c | ||
|
|
7e6bde2ce1 | ||
|
|
73be912bf7 | ||
|
|
f3b82a9470 | ||
|
|
b797922e20 | ||
|
|
2aabf5e62b | ||
|
|
0ae3d0aecb | ||
|
|
92242d898d | ||
|
|
05556dc671 | ||
|
|
4e56347fc1 | ||
|
|
c475effeed | ||
|
|
d13b026f47 | ||
|
|
0e8cc31799 | ||
|
|
a00d615d68 | ||
|
|
b2a222b136 | ||
|
|
fefc4b3423 | ||
|
|
0d31899fe7 | ||
|
|
76174602dc | ||
|
|
9e039cc4a0 | ||
|
|
7f37103df3 | ||
|
|
ea21341686 | ||
|
|
0951833a6c | ||
|
|
d0879bfaf9 | ||
|
|
0eb4954339 | ||
|
|
d2d4997fa8 | ||
|
|
107e956565 | ||
|
|
ce48751033 | ||
|
|
ba115031f6 |
@@ -13,25 +13,13 @@ pr:
|
|||||||
- stable-*
|
- stable-*
|
||||||
|
|
||||||
schedules:
|
schedules:
|
||||||
- cron: 0 8 * * *
|
- cron: 0 9 * * *
|
||||||
displayName: Nightly (main)
|
displayName: Nightly
|
||||||
always: true
|
always: true
|
||||||
branches:
|
branches:
|
||||||
include:
|
include:
|
||||||
- main
|
- main
|
||||||
- cron: 0 10 * * *
|
- stable-*
|
||||||
displayName: Nightly (active stable branches)
|
|
||||||
always: true
|
|
||||||
branches:
|
|
||||||
include:
|
|
||||||
- stable-2
|
|
||||||
- stable-3
|
|
||||||
- cron: 0 11 * * 0
|
|
||||||
displayName: Weekly (old stable branches)
|
|
||||||
always: true
|
|
||||||
branches:
|
|
||||||
include:
|
|
||||||
- stable-1
|
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
- name: checkoutPath
|
- name: checkoutPath
|
||||||
@@ -48,20 +36,20 @@ variables:
|
|||||||
resources:
|
resources:
|
||||||
containers:
|
containers:
|
||||||
- container: default
|
- container: default
|
||||||
image: quay.io/ansible/azure-pipelines-test-container:1.9.0
|
image: quay.io/ansible/azure-pipelines-test-container:1.8.0
|
||||||
|
|
||||||
pool: Standard
|
pool: Standard
|
||||||
|
|
||||||
stages:
|
stages:
|
||||||
### Sanity
|
### Sanity
|
||||||
- stage: Sanity_2_11
|
- stage: Sanity_devel
|
||||||
displayName: Sanity 2.11
|
displayName: Sanity devel
|
||||||
dependsOn: []
|
dependsOn: []
|
||||||
jobs:
|
jobs:
|
||||||
- template: templates/matrix.yml
|
- template: templates/matrix.yml
|
||||||
parameters:
|
parameters:
|
||||||
nameFormat: Test {0}
|
nameFormat: Test {0}
|
||||||
testFormat: 2.11/sanity/{0}
|
testFormat: devel/sanity/{0}
|
||||||
targets:
|
targets:
|
||||||
- test: 1
|
- test: 1
|
||||||
- test: 2
|
- test: 2
|
||||||
@@ -95,14 +83,14 @@ stages:
|
|||||||
- test: 3
|
- test: 3
|
||||||
- test: 4
|
- test: 4
|
||||||
### Units
|
### Units
|
||||||
- stage: Units_2_11
|
- stage: Units_devel
|
||||||
displayName: Units 2.11
|
displayName: Units devel
|
||||||
dependsOn: []
|
dependsOn: []
|
||||||
jobs:
|
jobs:
|
||||||
- template: templates/matrix.yml
|
- template: templates/matrix.yml
|
||||||
parameters:
|
parameters:
|
||||||
nameFormat: Python {0}
|
nameFormat: Python {0}
|
||||||
testFormat: 2.11/units/{0}/1
|
testFormat: devel/units/{0}/1
|
||||||
targets:
|
targets:
|
||||||
- test: 2.6
|
- test: 2.6
|
||||||
- test: 2.7
|
- test: 2.7
|
||||||
@@ -144,13 +132,13 @@ stages:
|
|||||||
- test: 3.8
|
- test: 3.8
|
||||||
|
|
||||||
## Remote
|
## Remote
|
||||||
- stage: Remote_2_11
|
- stage: Remote_devel
|
||||||
displayName: Remote 2.11
|
displayName: Remote devel
|
||||||
dependsOn: []
|
dependsOn: []
|
||||||
jobs:
|
jobs:
|
||||||
- template: templates/matrix.yml
|
- template: templates/matrix.yml
|
||||||
parameters:
|
parameters:
|
||||||
testFormat: 2.11/{0}
|
testFormat: devel/{0}
|
||||||
targets:
|
targets:
|
||||||
- name: macOS 11.1
|
- name: macOS 11.1
|
||||||
test: macos/11.1
|
test: macos/11.1
|
||||||
@@ -166,8 +154,6 @@ stages:
|
|||||||
- 1
|
- 1
|
||||||
- 2
|
- 2
|
||||||
- 3
|
- 3
|
||||||
- 4
|
|
||||||
- 5
|
|
||||||
- stage: Remote_2_10
|
- stage: Remote_2_10
|
||||||
displayName: Remote 2.10
|
displayName: Remote 2.10
|
||||||
dependsOn: []
|
dependsOn: []
|
||||||
@@ -208,13 +194,13 @@ stages:
|
|||||||
- 2
|
- 2
|
||||||
|
|
||||||
### Docker
|
### Docker
|
||||||
- stage: Docker_2_11
|
- stage: Docker_devel
|
||||||
displayName: Docker 2.11
|
displayName: Docker devel
|
||||||
dependsOn: []
|
dependsOn: []
|
||||||
jobs:
|
jobs:
|
||||||
- template: templates/matrix.yml
|
- template: templates/matrix.yml
|
||||||
parameters:
|
parameters:
|
||||||
testFormat: 2.11/linux/{0}
|
testFormat: devel/linux/{0}
|
||||||
targets:
|
targets:
|
||||||
- name: CentOS 6
|
- name: CentOS 6
|
||||||
test: centos6
|
test: centos6
|
||||||
@@ -238,8 +224,6 @@ stages:
|
|||||||
- 1
|
- 1
|
||||||
- 2
|
- 2
|
||||||
- 3
|
- 3
|
||||||
- 4
|
|
||||||
- 5
|
|
||||||
- stage: Docker_2_10
|
- stage: Docker_2_10
|
||||||
displayName: Docker 2.10
|
displayName: Docker 2.10
|
||||||
dependsOn: []
|
dependsOn: []
|
||||||
@@ -259,8 +243,6 @@ stages:
|
|||||||
groups:
|
groups:
|
||||||
- 2
|
- 2
|
||||||
- 3
|
- 3
|
||||||
- 4
|
|
||||||
- 5
|
|
||||||
- stage: Docker_2_9
|
- stage: Docker_2_9
|
||||||
displayName: Docker 2.9
|
displayName: Docker 2.9
|
||||||
dependsOn: []
|
dependsOn: []
|
||||||
@@ -278,18 +260,16 @@ stages:
|
|||||||
groups:
|
groups:
|
||||||
- 2
|
- 2
|
||||||
- 3
|
- 3
|
||||||
- 4
|
|
||||||
- 5
|
|
||||||
|
|
||||||
### Cloud
|
### Cloud
|
||||||
- stage: Cloud_2_11
|
- stage: Cloud_devel
|
||||||
displayName: Cloud 2.11
|
displayName: Cloud devel
|
||||||
dependsOn: []
|
dependsOn: []
|
||||||
jobs:
|
jobs:
|
||||||
- template: templates/matrix.yml
|
- template: templates/matrix.yml
|
||||||
parameters:
|
parameters:
|
||||||
nameFormat: Python {0}
|
nameFormat: Python {0}
|
||||||
testFormat: 2.11/cloud/{0}/1
|
testFormat: devel/cloud/{0}/1
|
||||||
targets:
|
targets:
|
||||||
- test: 2.7
|
- test: 2.7
|
||||||
- test: 3.6
|
- test: 3.6
|
||||||
@@ -316,19 +296,19 @@ stages:
|
|||||||
- stage: Summary
|
- stage: Summary
|
||||||
condition: succeededOrFailed()
|
condition: succeededOrFailed()
|
||||||
dependsOn:
|
dependsOn:
|
||||||
- Sanity_2_11
|
- Sanity_devel
|
||||||
- Sanity_2_9
|
- Sanity_2_9
|
||||||
- Sanity_2_10
|
- Sanity_2_10
|
||||||
- Units_2_11
|
- Units_devel
|
||||||
- Units_2_9
|
- Units_2_9
|
||||||
- Units_2_10
|
- Units_2_10
|
||||||
- Remote_2_11
|
- Remote_devel
|
||||||
- Remote_2_9
|
- Remote_2_9
|
||||||
- Remote_2_10
|
- Remote_2_10
|
||||||
- Docker_2_11
|
- Docker_devel
|
||||||
- Docker_2_9
|
- Docker_2_9
|
||||||
- Docker_2_10
|
- Docker_2_10
|
||||||
- Cloud_2_11
|
- Cloud_devel
|
||||||
- Cloud_2_9
|
- Cloud_2_9
|
||||||
- Cloud_2_10
|
- Cloud_2_10
|
||||||
jobs:
|
jobs:
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ set -o pipefail -eu
|
|||||||
|
|
||||||
output_path="$1"
|
output_path="$1"
|
||||||
|
|
||||||
curl --silent --show-error https://ansible-ci-files.s3.us-east-1.amazonaws.com/codecov/codecov.sh > codecov.sh
|
curl --silent --show-error https://codecov.io/bash > codecov.sh
|
||||||
|
|
||||||
for file in "${output_path}"/reports/coverage*.xml; do
|
for file in "${output_path}"/reports/coverage*.xml; do
|
||||||
name="${file}"
|
name="${file}"
|
||||||
|
|||||||
177
.github/BOTMETA.yml
vendored
177
.github/BOTMETA.yml
vendored
@@ -1,7 +1,5 @@
|
|||||||
automerge: true
|
automerge: true
|
||||||
files:
|
files:
|
||||||
plugins/:
|
|
||||||
supershipit: quidame Ajpantuso
|
|
||||||
changelogs/fragments/:
|
changelogs/fragments/:
|
||||||
support: community
|
support: community
|
||||||
$actions:
|
$actions:
|
||||||
@@ -17,14 +15,13 @@ files:
|
|||||||
labels: become
|
labels: become
|
||||||
$callbacks/:
|
$callbacks/:
|
||||||
labels: callbacks
|
labels: callbacks
|
||||||
|
$callbacks/logstash.py:
|
||||||
|
maintainers: ujenmr
|
||||||
$callbacks/say.py:
|
$callbacks/say.py:
|
||||||
notify: chris-short
|
notify: chris-short
|
||||||
maintainers: $team_macos
|
maintainers: $team_macos
|
||||||
labels: macos say
|
labels: macos say
|
||||||
keywords: brew cask darwin homebrew macosx macports osx
|
keywords: brew cask darwin homebrew macosx macports osx
|
||||||
$callbacks/stderr.py:
|
|
||||||
maintainers: ysn2233
|
|
||||||
labels: stderr
|
|
||||||
$callbacks/sumologic.py:
|
$callbacks/sumologic.py:
|
||||||
maintainers: ryancurrah
|
maintainers: ryancurrah
|
||||||
labels: sumologic
|
labels: sumologic
|
||||||
@@ -33,11 +30,6 @@ files:
|
|||||||
$callbacks/unixy.py:
|
$callbacks/unixy.py:
|
||||||
maintainers: akatch
|
maintainers: akatch
|
||||||
labels: unixy
|
labels: unixy
|
||||||
$connections/docker.py:
|
|
||||||
maintainers: $team_docker
|
|
||||||
labels: cloud docker
|
|
||||||
ignore: cove
|
|
||||||
supershipit: felixfontein
|
|
||||||
$connections/:
|
$connections/:
|
||||||
labels: connections
|
labels: connections
|
||||||
$connections/kubectl.py:
|
$connections/kubectl.py:
|
||||||
@@ -46,24 +38,10 @@ files:
|
|||||||
$connections/lxd.py:
|
$connections/lxd.py:
|
||||||
maintainers: mattclay
|
maintainers: mattclay
|
||||||
labels: lxd
|
labels: lxd
|
||||||
$connections/oc.py:
|
|
||||||
maintainers: chouseknecht fabianvf flaper87 maxamillion
|
|
||||||
labels: oc
|
|
||||||
$connections/saltstack.py:
|
$connections/saltstack.py:
|
||||||
labels: saltstack
|
labels: saltstack
|
||||||
$doc_fragments/:
|
$doc_fragments/:
|
||||||
labels: docs_fragments
|
labels: docs_fragments
|
||||||
$doc_fragments/docker.py:
|
|
||||||
maintainers: $team_docker
|
|
||||||
labels: cloud docker
|
|
||||||
ignore: cove
|
|
||||||
supershipit: felixfontein
|
|
||||||
$doc_fragments/gcp.py:
|
|
||||||
maintainers: $team_google
|
|
||||||
labels: gcp
|
|
||||||
supershipit: erjohnso rambleraptor
|
|
||||||
$doc_fragments/hetzner.py:
|
|
||||||
labels: hetzner
|
|
||||||
$doc_fragments/hpe3par.py:
|
$doc_fragments/hpe3par.py:
|
||||||
maintainers: farhan7500 gautamphegde
|
maintainers: farhan7500 gautamphegde
|
||||||
labels: hpe3par
|
labels: hpe3par
|
||||||
@@ -72,19 +50,17 @@ files:
|
|||||||
labels: hwc
|
labels: hwc
|
||||||
$doc_fragments/nomad.py:
|
$doc_fragments/nomad.py:
|
||||||
maintainers: chris93111
|
maintainers: chris93111
|
||||||
$doc_fragments/postgres.py:
|
|
||||||
maintainers: $team_postgresql
|
|
||||||
labels: postgres postgresql
|
|
||||||
keywords: database postgres postgresql
|
|
||||||
$doc_fragments/xenserver.py:
|
$doc_fragments/xenserver.py:
|
||||||
maintainers: bvitnik
|
maintainers: bvitnik
|
||||||
labels: xenserver
|
labels: xenserver
|
||||||
$filters/dict_kv.py:
|
$filters/dict_kv.py:
|
||||||
maintainers: giner
|
maintainers: giner
|
||||||
$filters/time.py:
|
|
||||||
maintainers: resmo
|
|
||||||
$filters/jc.py:
|
$filters/jc.py:
|
||||||
maintainers: kellyjonbrazil
|
maintainers: kellyjonbrazil
|
||||||
|
$filters/list.py:
|
||||||
|
maintainers: vbotka
|
||||||
|
$filters/time.py:
|
||||||
|
maintainers: resmo
|
||||||
$httpapis/:
|
$httpapis/:
|
||||||
maintainers: $team_networking
|
maintainers: $team_networking
|
||||||
labels: networking
|
labels: networking
|
||||||
@@ -94,22 +70,10 @@ files:
|
|||||||
keywords: firepower ftd
|
keywords: firepower ftd
|
||||||
$inventories/:
|
$inventories/:
|
||||||
labels: inventories
|
labels: inventories
|
||||||
$inventories/docker_machine.py:
|
|
||||||
maintainers: $team_docker
|
|
||||||
labels: cloud docker
|
|
||||||
ignore: cove
|
|
||||||
supershipit: felixfontein
|
|
||||||
$inventories/docker_swarm.py:
|
|
||||||
maintainers: $team_docker morph027
|
|
||||||
labels: cloud docker docker_swarm
|
|
||||||
ignore: cove
|
|
||||||
supershipit: felixfontein
|
|
||||||
$inventories/linode.py:
|
$inventories/linode.py:
|
||||||
maintainers: $team_linode
|
maintainers: $team_linode
|
||||||
labels: cloud linode
|
labels: cloud linode
|
||||||
keywords: linode dynamic inventory script
|
keywords: linode dynamic inventory script
|
||||||
$inventories/proxmox.py:
|
|
||||||
maintainers: $team_virt ilijamt
|
|
||||||
$inventories/scaleway.py:
|
$inventories/scaleway.py:
|
||||||
maintainers: $team_scaleway
|
maintainers: $team_scaleway
|
||||||
labels: cloud scaleway
|
labels: cloud scaleway
|
||||||
@@ -132,9 +96,6 @@ files:
|
|||||||
maintainers: amigus
|
maintainers: amigus
|
||||||
$lookups/dsv.py:
|
$lookups/dsv.py:
|
||||||
maintainers: amigus
|
maintainers: amigus
|
||||||
$lookups/hashi_vault.py:
|
|
||||||
labels: hashi_vault
|
|
||||||
maintainers: briantist
|
|
||||||
$lookups/manifold.py:
|
$lookups/manifold.py:
|
||||||
maintainers: galanoff
|
maintainers: galanoff
|
||||||
labels: manifold
|
labels: manifold
|
||||||
@@ -143,11 +104,6 @@ files:
|
|||||||
labels: infoblox networking
|
labels: infoblox networking
|
||||||
$module_utils/:
|
$module_utils/:
|
||||||
labels: module_utils
|
labels: module_utils
|
||||||
$module_utils/docker/:
|
|
||||||
maintainers: $team_docker
|
|
||||||
labels: cloud
|
|
||||||
ignore: cove
|
|
||||||
supershipit: felixfontein
|
|
||||||
$module_utils/gitlab.py:
|
$module_utils/gitlab.py:
|
||||||
notify: jlozadad
|
notify: jlozadad
|
||||||
maintainers: $team_gitlab
|
maintainers: $team_gitlab
|
||||||
@@ -162,10 +118,6 @@ files:
|
|||||||
$module_utils/ipa.py:
|
$module_utils/ipa.py:
|
||||||
maintainers: $team_ipa
|
maintainers: $team_ipa
|
||||||
labels: ipa
|
labels: ipa
|
||||||
$module_utils/kubevirt.py:
|
|
||||||
maintainers: $team_kubevirt
|
|
||||||
labels: cloud kubevirt
|
|
||||||
keywords: kubevirt
|
|
||||||
$module_utils/manageiq.py:
|
$module_utils/manageiq.py:
|
||||||
maintainers: $team_manageiq
|
maintainers: $team_manageiq
|
||||||
labels: manageiq
|
labels: manageiq
|
||||||
@@ -181,10 +133,6 @@ files:
|
|||||||
$module_utils/oracle/oci_utils.py:
|
$module_utils/oracle/oci_utils.py:
|
||||||
maintainers: $team_oracle
|
maintainers: $team_oracle
|
||||||
labels: cloud
|
labels: cloud
|
||||||
$module_utils/postgres.py:
|
|
||||||
maintainers: $team_postgresql
|
|
||||||
labels: postgres postgresql
|
|
||||||
keywords: database postgres postgresql
|
|
||||||
$module_utils/pure.py:
|
$module_utils/pure.py:
|
||||||
maintainers: $team_purestorage
|
maintainers: $team_purestorage
|
||||||
labels: pure pure_storage
|
labels: pure pure_storage
|
||||||
@@ -216,54 +164,17 @@ files:
|
|||||||
labels: dimensiondata_network
|
labels: dimensiondata_network
|
||||||
$modules/cloud/dimensiondata/dimensiondata_vlan.py:
|
$modules/cloud/dimensiondata/dimensiondata_vlan.py:
|
||||||
maintainers: tintoy
|
maintainers: tintoy
|
||||||
$modules/cloud/docker/:
|
|
||||||
maintainers: $team_docker
|
|
||||||
ignore: cove
|
|
||||||
supershipit: felixfontein
|
|
||||||
$modules/cloud/docker/docker_compose.py:
|
|
||||||
maintainers: sluther
|
|
||||||
labels: docker_compose
|
|
||||||
$modules/cloud/docker/docker_config.py:
|
|
||||||
maintainers: ushuz
|
|
||||||
$modules/cloud/docker/docker_container.py:
|
|
||||||
maintainers: dusdanig softzilla zfil
|
|
||||||
ignore: ThomasSteinbach cove joshuaconner
|
|
||||||
$modules/cloud/docker/docker_image.py:
|
|
||||||
maintainers: softzilla ssbarnea
|
|
||||||
$modules/cloud/docker/docker_login.py:
|
|
||||||
maintainers: olsaki
|
|
||||||
$modules/cloud/docker/docker_network.py:
|
|
||||||
maintainers: keitwb
|
|
||||||
labels: docker_network
|
|
||||||
$modules/cloud/docker/docker_stack_task_info.py:
|
|
||||||
maintainers: imjoseangel
|
|
||||||
$modules/cloud/docker/docker_swarm_service.py:
|
|
||||||
maintainers: hannseman
|
|
||||||
labels: docker_swarm_service
|
|
||||||
$modules/cloud/docker/docker_swarm_service_info.py:
|
|
||||||
maintainers: hannseman
|
|
||||||
$modules/cloud/docker/docker_volume.py:
|
|
||||||
maintainers: agronholm
|
|
||||||
$modules/cloud/google/:
|
|
||||||
maintainers: $team_google
|
|
||||||
ignore: supertom
|
|
||||||
supershipit: $team_google
|
|
||||||
$modules/cloud/heroku/heroku_collaborator.py:
|
$modules/cloud/heroku/heroku_collaborator.py:
|
||||||
maintainers: marns93
|
maintainers: marns93
|
||||||
$modules/cloud/huawei/:
|
$modules/cloud/huawei/:
|
||||||
maintainers: $team_huawei huaweicloud
|
maintainers: $team_huawei huaweicloud
|
||||||
keywords: cloud huawei hwc
|
keywords: cloud huawei hwc
|
||||||
$modules/cloud/kubevirt/:
|
|
||||||
maintainers: $team_kubevirt kubevirt
|
|
||||||
keywords: kubevirt
|
|
||||||
$modules/cloud/linode/:
|
$modules/cloud/linode/:
|
||||||
maintainers: $team_linode
|
maintainers: $team_linode
|
||||||
$modules/cloud/linode/linode.py:
|
$modules/cloud/linode/linode.py:
|
||||||
maintainers: zbal
|
maintainers: zbal
|
||||||
$modules/cloud/lxc/lxc_container.py:
|
$modules/cloud/lxc/lxc_container.py:
|
||||||
maintainers: cloudnull
|
maintainers: cloudnull
|
||||||
$modules/cloud/lxc/lxc_profile.py:
|
|
||||||
maintainers: conloos
|
|
||||||
$modules/cloud/lxd/:
|
$modules/cloud/lxd/:
|
||||||
ignore: hnakamur
|
ignore: hnakamur
|
||||||
$modules/cloud/memset/:
|
$modules/cloud/memset/:
|
||||||
@@ -282,6 +193,10 @@ files:
|
|||||||
labels: proxmox_kvm virt
|
labels: proxmox_kvm virt
|
||||||
ignore: skvidal
|
ignore: skvidal
|
||||||
keywords: kvm libvirt proxmox qemu
|
keywords: kvm libvirt proxmox qemu
|
||||||
|
$modules/cloud/misc/proxmox_snap.py:
|
||||||
|
maintainers: $team_virt
|
||||||
|
labels: proxmox virt
|
||||||
|
keywords: kvm libvirt proxmox qemu
|
||||||
$modules/cloud/misc/proxmox_template.py:
|
$modules/cloud/misc/proxmox_template.py:
|
||||||
maintainers: $team_virt UnderGreen
|
maintainers: $team_virt UnderGreen
|
||||||
labels: proxmox_template virt
|
labels: proxmox_template virt
|
||||||
@@ -379,10 +294,8 @@ files:
|
|||||||
maintainers: bvitnik
|
maintainers: bvitnik
|
||||||
$modules/clustering/consul/:
|
$modules/clustering/consul/:
|
||||||
maintainers: $team_consul
|
maintainers: $team_consul
|
||||||
ignore: colin-nolan
|
|
||||||
$modules/clustering/etcd3.py:
|
$modules/clustering/etcd3.py:
|
||||||
maintainers: evrardjp
|
maintainers: evrardjp vfauth
|
||||||
ignore: vfauth
|
|
||||||
$modules/clustering/nomad/:
|
$modules/clustering/nomad/:
|
||||||
maintainers: chris93111
|
maintainers: chris93111
|
||||||
$modules/clustering/pacemaker_cluster.py:
|
$modules/clustering/pacemaker_cluster.py:
|
||||||
@@ -412,20 +325,6 @@ files:
|
|||||||
$modules/database/mssql/mssql_db.py:
|
$modules/database/mssql/mssql_db.py:
|
||||||
maintainers: vedit Jmainguy kenichi-ogawa-1988
|
maintainers: vedit Jmainguy kenichi-ogawa-1988
|
||||||
labels: mssql_db
|
labels: mssql_db
|
||||||
$modules/database/postgresql/:
|
|
||||||
keywords: database postgres postgresql
|
|
||||||
labels: postgres postgresql
|
|
||||||
maintainers: $team_postgresql
|
|
||||||
$modules/database/postgresql/postgresql_ext.py:
|
|
||||||
maintainers: dschep strk
|
|
||||||
$modules/database/postgresql/postgresql_lang.py:
|
|
||||||
maintainers: jensdepuydt
|
|
||||||
$modules/database/postgresql/postgresql_privs.py:
|
|
||||||
maintainers: b6d
|
|
||||||
$modules/database/postgresql/postgresql_query.py:
|
|
||||||
maintainers: archf wrouesnel
|
|
||||||
$modules/database/postgresql/postgresql_tablespace.py:
|
|
||||||
maintainers: antoinell
|
|
||||||
$modules/database/vertica/:
|
$modules/database/vertica/:
|
||||||
maintainers: dareko
|
maintainers: dareko
|
||||||
$modules/files/archive.py:
|
$modules/files/archive.py:
|
||||||
@@ -447,6 +346,8 @@ files:
|
|||||||
maintainers: Rylon
|
maintainers: Rylon
|
||||||
$modules/identity/ipa/:
|
$modules/identity/ipa/:
|
||||||
maintainers: $team_ipa
|
maintainers: $team_ipa
|
||||||
|
$modules/identity/ipa/ipa_pwpolicy.py:
|
||||||
|
maintainers: adralioh
|
||||||
$modules/identity/ipa/ipa_service.py:
|
$modules/identity/ipa/ipa_service.py:
|
||||||
maintainers: cprh
|
maintainers: cprh
|
||||||
$modules/identity/ipa/ipa_vault.py:
|
$modules/identity/ipa/ipa_vault.py:
|
||||||
@@ -518,6 +419,8 @@ files:
|
|||||||
maintainers: orgito
|
maintainers: orgito
|
||||||
$modules/monitoring/stackdriver.py:
|
$modules/monitoring/stackdriver.py:
|
||||||
maintainers: bwhaley
|
maintainers: bwhaley
|
||||||
|
$modules/monitoring/statsd.py:
|
||||||
|
maintainers: mamercad
|
||||||
$modules/monitoring/statusio_maintenance.py:
|
$modules/monitoring/statusio_maintenance.py:
|
||||||
maintainers: bhcopeland
|
maintainers: bhcopeland
|
||||||
$modules/monitoring/uptimerobot.py:
|
$modules/monitoring/uptimerobot.py:
|
||||||
@@ -530,15 +433,7 @@ files:
|
|||||||
$modules/net_tools/dnsmadeeasy.py:
|
$modules/net_tools/dnsmadeeasy.py:
|
||||||
maintainers: briceburg
|
maintainers: briceburg
|
||||||
$modules/net_tools/haproxy.py:
|
$modules/net_tools/haproxy.py:
|
||||||
maintainers: ravibhure Normo
|
maintainers: ravibhure
|
||||||
$modules/net_tools/hetzner_failover_ip.py:
|
|
||||||
maintainers: felixfontein
|
|
||||||
$modules/net_tools/hetzner_failover_ip_info.py:
|
|
||||||
maintainers: felixfontein
|
|
||||||
$modules/net_tools/hetzner_firewall.py:
|
|
||||||
maintainers: felixfontein
|
|
||||||
$modules/net_tools/hetzner_firewall_info.py:
|
|
||||||
maintainers: felixfontein
|
|
||||||
$modules/net_tools/:
|
$modules/net_tools/:
|
||||||
maintainers: nerzhul
|
maintainers: nerzhul
|
||||||
$modules/net_tools/infinity/infinity.py:
|
$modules/net_tools/infinity/infinity.py:
|
||||||
@@ -638,7 +533,7 @@ files:
|
|||||||
$modules/notification/syslogger.py:
|
$modules/notification/syslogger.py:
|
||||||
maintainers: garbled1
|
maintainers: garbled1
|
||||||
$modules/notification/telegram.py:
|
$modules/notification/telegram.py:
|
||||||
maintainers: tyouxa
|
maintainers: tyouxa loms
|
||||||
$modules/notification/twilio.py:
|
$modules/notification/twilio.py:
|
||||||
maintainers: makaimc
|
maintainers: makaimc
|
||||||
$modules/notification/typetalk.py:
|
$modules/notification/typetalk.py:
|
||||||
@@ -677,6 +572,8 @@ files:
|
|||||||
ignore: kbrebanov
|
ignore: kbrebanov
|
||||||
$modules/packaging/os/apt_rpm.py:
|
$modules/packaging/os/apt_rpm.py:
|
||||||
maintainers: evgkrsk
|
maintainers: evgkrsk
|
||||||
|
$modules/packaging/os/copr.py:
|
||||||
|
maintainers: schlupov
|
||||||
$modules/packaging/os/flatpak.py:
|
$modules/packaging/os/flatpak.py:
|
||||||
maintainers: $team_flatpak
|
maintainers: $team_flatpak
|
||||||
$modules/packaging/os/flatpak_remote.py:
|
$modules/packaging/os/flatpak_remote.py:
|
||||||
@@ -763,6 +660,8 @@ files:
|
|||||||
maintainers: seandst
|
maintainers: seandst
|
||||||
$modules/packaging/os/rhsm_repository.py:
|
$modules/packaging/os/rhsm_repository.py:
|
||||||
maintainers: giovannisciortino
|
maintainers: giovannisciortino
|
||||||
|
$modules/packaging/os/rpm_ostree_pkg.py:
|
||||||
|
maintainers: dustymabe Akasurde
|
||||||
$modules/packaging/os/slackpkg.py:
|
$modules/packaging/os/slackpkg.py:
|
||||||
maintainers: KimNorgaard
|
maintainers: KimNorgaard
|
||||||
$modules/packaging/os/snap.py:
|
$modules/packaging/os/snap.py:
|
||||||
@@ -784,6 +683,8 @@ files:
|
|||||||
maintainers: pmakowski
|
maintainers: pmakowski
|
||||||
$modules/packaging/os/xbps.py:
|
$modules/packaging/os/xbps.py:
|
||||||
maintainers: dinoocch the-maldridge
|
maintainers: dinoocch the-maldridge
|
||||||
|
$modules/packaging/os/yum_versionlock.py:
|
||||||
|
maintainers: florianpaulhoberg aminvakil
|
||||||
$modules/packaging/os/zypper.py:
|
$modules/packaging/os/zypper.py:
|
||||||
maintainers: $team_suse
|
maintainers: $team_suse
|
||||||
labels: zypper
|
labels: zypper
|
||||||
@@ -798,8 +699,6 @@ files:
|
|||||||
maintainers: jagadeeshnv
|
maintainers: jagadeeshnv
|
||||||
$modules/remote_management/dellemc/ome_device_info.py:
|
$modules/remote_management/dellemc/ome_device_info.py:
|
||||||
maintainers: Sajna-Shetty
|
maintainers: Sajna-Shetty
|
||||||
$modules/remote_management/foreman/:
|
|
||||||
maintainers: ehelms ares ekohl xprazak2
|
|
||||||
$modules/remote_management/hpilo/:
|
$modules/remote_management/hpilo/:
|
||||||
maintainers: haad
|
maintainers: haad
|
||||||
ignore: dagwieers
|
ignore: dagwieers
|
||||||
@@ -828,7 +727,7 @@ files:
|
|||||||
$modules/remote_management/oneview/oneview_fcoe_network.py:
|
$modules/remote_management/oneview/oneview_fcoe_network.py:
|
||||||
maintainers: fgbulsoni
|
maintainers: fgbulsoni
|
||||||
$modules/remote_management/redfish/:
|
$modules/remote_management/redfish/:
|
||||||
maintainers: $team_redfish
|
maintainers: $team_redfish billdodd
|
||||||
ignore: jose-delarosa
|
ignore: jose-delarosa
|
||||||
$modules/remote_management/stacki/stacki_host.py:
|
$modules/remote_management/stacki/stacki_host.py:
|
||||||
maintainers: bsanders bbyhuy
|
maintainers: bsanders bbyhuy
|
||||||
@@ -841,8 +740,6 @@ files:
|
|||||||
maintainers: andreparames
|
maintainers: andreparames
|
||||||
$modules/source_control/git_config.py:
|
$modules/source_control/git_config.py:
|
||||||
maintainers: djmattyg007 mgedmin
|
maintainers: djmattyg007 mgedmin
|
||||||
$modules/source_control/github/github_hooks.py:
|
|
||||||
maintainers: pcgentry
|
|
||||||
$modules/source_control/github/github_deploy_key.py:
|
$modules/source_control/github/github_deploy_key.py:
|
||||||
maintainers: bincyber
|
maintainers: bincyber
|
||||||
$modules/source_control/github/github_issue.py:
|
$modules/source_control/github/github_issue.py:
|
||||||
@@ -853,8 +750,6 @@ files:
|
|||||||
ignore: erydo
|
ignore: erydo
|
||||||
$modules/source_control/github/github_release.py:
|
$modules/source_control/github/github_release.py:
|
||||||
maintainers: adrianmoisey
|
maintainers: adrianmoisey
|
||||||
$modules/source_control/github/github_repo.py:
|
|
||||||
maintainers: atorrescogollo
|
|
||||||
$modules/source_control/github/:
|
$modules/source_control/github/:
|
||||||
maintainers: stpierre
|
maintainers: stpierre
|
||||||
$modules/source_control/gitlab/:
|
$modules/source_control/gitlab/:
|
||||||
@@ -938,7 +833,7 @@ files:
|
|||||||
$modules/system/iptables_state.py:
|
$modules/system/iptables_state.py:
|
||||||
maintainers: quidame
|
maintainers: quidame
|
||||||
$modules/system/java_cert.py:
|
$modules/system/java_cert.py:
|
||||||
maintainers: haad absynth76
|
maintainers: haad
|
||||||
$modules/system/java_keystore.py:
|
$modules/system/java_keystore.py:
|
||||||
maintainers: Mogztter
|
maintainers: Mogztter
|
||||||
$modules/system/kernel_blacklist.py:
|
$modules/system/kernel_blacklist.py:
|
||||||
@@ -1012,6 +907,8 @@ files:
|
|||||||
maintainers: bcoca
|
maintainers: bcoca
|
||||||
$modules/system/syspatch.py:
|
$modules/system/syspatch.py:
|
||||||
maintainers: precurse
|
maintainers: precurse
|
||||||
|
$modules/system/sysrc.py:
|
||||||
|
maintainers: dlundgren
|
||||||
$modules/system/sysupgrade.py:
|
$modules/system/sysupgrade.py:
|
||||||
maintainers: precurse
|
maintainers: precurse
|
||||||
$modules/system/timezone.py:
|
$modules/system/timezone.py:
|
||||||
@@ -1021,7 +918,7 @@ files:
|
|||||||
maintainers: ahtik ovcharenko pyykkis
|
maintainers: ahtik ovcharenko pyykkis
|
||||||
labels: ufw
|
labels: ufw
|
||||||
$modules/system/vdo.py:
|
$modules/system/vdo.py:
|
||||||
maintainers: rhawalsh
|
maintainers: bgurney-rh
|
||||||
$modules/system/xfconf.py:
|
$modules/system/xfconf.py:
|
||||||
maintainers: russoz jbenden
|
maintainers: russoz jbenden
|
||||||
labels: xfconf
|
labels: xfconf
|
||||||
@@ -1106,31 +1003,27 @@ macros:
|
|||||||
terminals: plugins/terminal
|
terminals: plugins/terminal
|
||||||
team_aix: MorrisA bcoca d-little flynn1973 gforster kairoaraujo marvin-sinister mator molekuul ramooncamacho wtcross
|
team_aix: MorrisA bcoca d-little flynn1973 gforster kairoaraujo marvin-sinister mator molekuul ramooncamacho wtcross
|
||||||
team_bsd: JoergFiedler MacLemon bcoca dch jasperla mekanix opoplawski overhacked tuxillo
|
team_bsd: JoergFiedler MacLemon bcoca dch jasperla mekanix opoplawski overhacked tuxillo
|
||||||
team_consul: sgargan
|
team_consul: colin-nolan sgargan
|
||||||
team_cyberark_conjur: jvanderhoof ryanprior
|
team_cyberark_conjur: jvanderhoof ryanprior
|
||||||
team_docker: DBendit WojciechowskiPiotr akshay196 danihodovic dariko felixfontein jwitko kassiansun tbouvet chouseknecht
|
|
||||||
team_e_spirit: MatrixCrawler getjack
|
team_e_spirit: MatrixCrawler getjack
|
||||||
team_flatpak: JayKayy oolongbrothers
|
team_flatpak: JayKayy oolongbrothers
|
||||||
team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman metanovii
|
team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman
|
||||||
team_google: erjohnso rambleraptor
|
|
||||||
team_hpux: bcoca davx8342
|
team_hpux: bcoca davx8342
|
||||||
team_huawei: QijunPan TommyLike edisonxiang freesky-edward hwDCN niuzhenguo xuxiaowei0512 yanzhangi zengchen1024 zhongjun2
|
team_huawei: QijunPan TommyLike edisonxiang freesky-edward hwDCN niuzhenguo xuxiaowei0512 yanzhangi zengchen1024 zhongjun2
|
||||||
team_ipa: Akasurde Nosmoht fxfitz justchris1
|
team_ipa: Akasurde Nosmoht fxfitz
|
||||||
team_jboss: Wolfant jairojunior wbrefvem
|
team_jboss: Wolfant jairojunior wbrefvem
|
||||||
team_keycloak: eikef ndclt
|
team_keycloak: eikef ndclt
|
||||||
team_kubevirt: machacekondra mmazur pkliczewski
|
team_linode: InTheCloudDan decentral1se displague rmcintosh
|
||||||
team_linode: InTheCloudDan decentral1se displague rmcintosh Charliekenney23 LBGarber
|
|
||||||
team_macos: Akasurde kyleabenson martinm82 danieljaouen indrajitr
|
team_macos: Akasurde kyleabenson martinm82 danieljaouen indrajitr
|
||||||
team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder
|
team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder
|
||||||
team_netapp: amit0701 carchi8py hulquest lmprice lonico ndswartz schmots1
|
team_netapp: amit0701 carchi8py hulquest lmprice lonico ndswartz schmots1
|
||||||
team_networking: NilashishC Qalthos danielmellado ganeshrn justjais trishnaguha sganesh-infoblox privateip
|
team_networking: NilashishC Qalthos danielmellado ganeshrn justjais trishnaguha sganesh-infoblox privateip
|
||||||
team_opennebula: ilicmilan meerkampdvv rsmontero xorel nilsding
|
team_opennebula: ilicmilan meerkampdvv rsmontero xorel
|
||||||
team_oracle: manojmeda mross22 nalsaber
|
team_oracle: manojmeda mross22 nalsaber
|
||||||
team_postgresql: Andersson007 Dorn- andytom jbscalia kostiantyn-nemchenko matburt nerzhul sebasmannem tcraxs ilicmilan
|
|
||||||
team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16
|
team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16
|
||||||
team_redfish: mraineri tomasg2012 xmadsen renxulei
|
team_redfish: billdodd mraineri tomasg2012
|
||||||
team_rhn: FlossWare alikins barnabycourt vritant
|
team_rhn: FlossWare alikins barnabycourt vritant
|
||||||
team_scaleway: QuentinBrosse abarbare jerome-quere kindermoumoute remyleone sieben
|
team_scaleway: QuentinBrosse abarbare jerome-quere kindermoumoute remyleone sieben
|
||||||
team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l
|
team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l
|
||||||
team_suse: commel dcermak evrardjp lrupp toabctl AnderEnder alxgu andytom
|
team_suse: commel dcermak evrardjp lrupp toabctl AnderEnder alxgu andytom
|
||||||
team_virt: joshainglis karmab tleguern Thulium-Drake Ajpantuso
|
team_virt: joshainglis karmab Aversiste Thulium-Drake
|
||||||
|
|||||||
81
.gitignore
vendored
81
.gitignore
vendored
@@ -1,6 +1,6 @@
|
|||||||
|
|
||||||
# Created by https://www.toptal.com/developers/gitignore/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
|
# Created by https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
|
||||||
# Edit at https://www.toptal.com/developers/gitignore?templates=git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
|
# Edit at https://www.gitignore.io/?templates=git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
|
||||||
|
|
||||||
### dotenv ###
|
### dotenv ###
|
||||||
.env
|
.env
|
||||||
@@ -88,7 +88,7 @@ flycheck_*.el
|
|||||||
.nfs*
|
.nfs*
|
||||||
|
|
||||||
### PyCharm+all ###
|
### PyCharm+all ###
|
||||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
|
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
|
||||||
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
||||||
|
|
||||||
# User-specific stuff
|
# User-specific stuff
|
||||||
@@ -98,9 +98,6 @@ flycheck_*.el
|
|||||||
.idea/**/dictionaries
|
.idea/**/dictionaries
|
||||||
.idea/**/shelf
|
.idea/**/shelf
|
||||||
|
|
||||||
# AWS User-specific
|
|
||||||
.idea/**/aws.xml
|
|
||||||
|
|
||||||
# Generated files
|
# Generated files
|
||||||
.idea/**/contentModel.xml
|
.idea/**/contentModel.xml
|
||||||
|
|
||||||
@@ -121,9 +118,6 @@ flycheck_*.el
|
|||||||
# When using Gradle or Maven with auto-import, you should exclude module files,
|
# When using Gradle or Maven with auto-import, you should exclude module files,
|
||||||
# since they will be recreated, and may cause churn. Uncomment if using
|
# since they will be recreated, and may cause churn. Uncomment if using
|
||||||
# auto-import.
|
# auto-import.
|
||||||
# .idea/artifacts
|
|
||||||
# .idea/compiler.xml
|
|
||||||
# .idea/jarRepositories.xml
|
|
||||||
# .idea/modules.xml
|
# .idea/modules.xml
|
||||||
# .idea/*.iml
|
# .idea/*.iml
|
||||||
# .idea/modules
|
# .idea/modules
|
||||||
@@ -204,6 +198,7 @@ parts/
|
|||||||
sdist/
|
sdist/
|
||||||
var/
|
var/
|
||||||
wheels/
|
wheels/
|
||||||
|
pip-wheel-metadata/
|
||||||
share/python-wheels/
|
share/python-wheels/
|
||||||
*.egg-info/
|
*.egg-info/
|
||||||
.installed.cfg
|
.installed.cfg
|
||||||
@@ -230,25 +225,13 @@ htmlcov/
|
|||||||
nosetests.xml
|
nosetests.xml
|
||||||
coverage.xml
|
coverage.xml
|
||||||
*.cover
|
*.cover
|
||||||
*.py,cover
|
|
||||||
.hypothesis/
|
.hypothesis/
|
||||||
.pytest_cache/
|
.pytest_cache/
|
||||||
cover/
|
|
||||||
|
|
||||||
# Translations
|
# Translations
|
||||||
*.mo
|
*.mo
|
||||||
*.pot
|
*.pot
|
||||||
|
|
||||||
# Django stuff:
|
|
||||||
*.log
|
|
||||||
local_settings.py
|
|
||||||
db.sqlite3
|
|
||||||
db.sqlite3-journal
|
|
||||||
|
|
||||||
# Flask stuff:
|
|
||||||
instance/
|
|
||||||
.webassets-cache
|
|
||||||
|
|
||||||
# Scrapy stuff:
|
# Scrapy stuff:
|
||||||
.scrapy
|
.scrapy
|
||||||
|
|
||||||
@@ -256,19 +239,9 @@ instance/
|
|||||||
docs/_build/
|
docs/_build/
|
||||||
|
|
||||||
# PyBuilder
|
# PyBuilder
|
||||||
.pybuilder/
|
|
||||||
target/
|
target/
|
||||||
|
|
||||||
# Jupyter Notebook
|
|
||||||
.ipynb_checkpoints
|
|
||||||
|
|
||||||
# IPython
|
|
||||||
profile_default/
|
|
||||||
ipython_config.py
|
|
||||||
|
|
||||||
# pyenv
|
# pyenv
|
||||||
# For a library or package, you might want to ignore these files since the code is
|
|
||||||
# intended to run in multiple environments; otherwise, check them in:
|
|
||||||
.python-version
|
.python-version
|
||||||
|
|
||||||
# pipenv
|
# pipenv
|
||||||
@@ -278,24 +251,12 @@ ipython_config.py
|
|||||||
# install all needed dependencies.
|
# install all needed dependencies.
|
||||||
#Pipfile.lock
|
#Pipfile.lock
|
||||||
|
|
||||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
# celery beat schedule file
|
||||||
__pypackages__/
|
|
||||||
|
|
||||||
# Celery stuff
|
|
||||||
celerybeat-schedule
|
celerybeat-schedule
|
||||||
celerybeat.pid
|
|
||||||
|
|
||||||
# SageMath parsed files
|
# SageMath parsed files
|
||||||
*.sage.py
|
*.sage.py
|
||||||
|
|
||||||
# Environments
|
|
||||||
.venv
|
|
||||||
env/
|
|
||||||
venv/
|
|
||||||
ENV/
|
|
||||||
env.bak/
|
|
||||||
venv.bak/
|
|
||||||
|
|
||||||
# Spyder project settings
|
# Spyder project settings
|
||||||
.spyderproject
|
.spyderproject
|
||||||
.spyproject
|
.spyproject
|
||||||
@@ -303,6 +264,10 @@ venv.bak/
|
|||||||
# Rope project settings
|
# Rope project settings
|
||||||
.ropeproject
|
.ropeproject
|
||||||
|
|
||||||
|
# Mr Developer
|
||||||
|
.mr.developer.cfg
|
||||||
|
.project
|
||||||
|
|
||||||
# mkdocs documentation
|
# mkdocs documentation
|
||||||
/site
|
/site
|
||||||
|
|
||||||
@@ -314,16 +279,9 @@ dmypy.json
|
|||||||
# Pyre type checker
|
# Pyre type checker
|
||||||
.pyre/
|
.pyre/
|
||||||
|
|
||||||
# pytype static type analyzer
|
|
||||||
.pytype/
|
|
||||||
|
|
||||||
# Cython debug symbols
|
|
||||||
cython_debug/
|
|
||||||
|
|
||||||
### Vim ###
|
### Vim ###
|
||||||
# Swap
|
# Swap
|
||||||
[._]*.s[a-v][a-z]
|
[._]*.s[a-v][a-z]
|
||||||
!*.svg # comment out if you don't need vector files
|
|
||||||
[._]*.sw[a-p]
|
[._]*.sw[a-p]
|
||||||
[._]s[a-rt-v][a-z]
|
[._]s[a-rt-v][a-z]
|
||||||
[._]ss[a-gi-z]
|
[._]ss[a-gi-z]
|
||||||
@@ -341,13 +299,11 @@ tags
|
|||||||
[._]*.un~
|
[._]*.un~
|
||||||
|
|
||||||
### WebStorm ###
|
### WebStorm ###
|
||||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
|
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
|
||||||
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
||||||
|
|
||||||
# User-specific stuff
|
# User-specific stuff
|
||||||
|
|
||||||
# AWS User-specific
|
|
||||||
|
|
||||||
# Generated files
|
# Generated files
|
||||||
|
|
||||||
# Sensitive or high-churn files
|
# Sensitive or high-churn files
|
||||||
@@ -358,9 +314,6 @@ tags
|
|||||||
# When using Gradle or Maven with auto-import, you should exclude module files,
|
# When using Gradle or Maven with auto-import, you should exclude module files,
|
||||||
# since they will be recreated, and may cause churn. Uncomment if using
|
# since they will be recreated, and may cause churn. Uncomment if using
|
||||||
# auto-import.
|
# auto-import.
|
||||||
# .idea/artifacts
|
|
||||||
# .idea/compiler.xml
|
|
||||||
# .idea/jarRepositories.xml
|
|
||||||
# .idea/modules.xml
|
# .idea/modules.xml
|
||||||
# .idea/*.iml
|
# .idea/*.iml
|
||||||
# .idea/modules
|
# .idea/modules
|
||||||
@@ -396,27 +349,15 @@ tags
|
|||||||
# *.ipr
|
# *.ipr
|
||||||
|
|
||||||
# Sonarlint plugin
|
# Sonarlint plugin
|
||||||
# https://plugins.jetbrains.com/plugin/7973-sonarlint
|
|
||||||
.idea/**/sonarlint/
|
.idea/**/sonarlint/
|
||||||
|
|
||||||
# SonarQube Plugin
|
# SonarQube Plugin
|
||||||
# https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin
|
|
||||||
.idea/**/sonarIssues.xml
|
.idea/**/sonarIssues.xml
|
||||||
|
|
||||||
# Markdown Navigator plugin
|
# Markdown Navigator plugin
|
||||||
# https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced
|
|
||||||
.idea/**/markdown-navigator.xml
|
.idea/**/markdown-navigator.xml
|
||||||
.idea/**/markdown-navigator-enh.xml
|
|
||||||
.idea/**/markdown-navigator/
|
.idea/**/markdown-navigator/
|
||||||
|
|
||||||
# Cache file creation bug
|
|
||||||
# See https://youtrack.jetbrains.com/issue/JBR-2257
|
|
||||||
.idea/$CACHE_FILE$
|
|
||||||
|
|
||||||
# CodeStream plugin
|
|
||||||
# https://plugins.jetbrains.com/plugin/12206-codestream
|
|
||||||
.idea/codestream.xml
|
|
||||||
|
|
||||||
### Windows ###
|
### Windows ###
|
||||||
# Windows thumbnail cache files
|
# Windows thumbnail cache files
|
||||||
Thumbs.db
|
Thumbs.db
|
||||||
@@ -443,4 +384,4 @@ $RECYCLE.BIN/
|
|||||||
# Windows shortcuts
|
# Windows shortcuts
|
||||||
*.lnk
|
*.lnk
|
||||||
|
|
||||||
# End of https://www.toptal.com/developers/gitignore/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
|
# End of https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
|
||||||
|
|||||||
1549
CHANGELOG.rst
1549
CHANGELOG.rst
File diff suppressed because it is too large
Load Diff
@@ -1,32 +0,0 @@
|
|||||||
# Contributing
|
|
||||||
|
|
||||||
We follow [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) in all our contributions and interactions within this repository.
|
|
||||||
|
|
||||||
If you are a committer, also refer to the [collection's committer guidelines](https://github.com/ansible-collections/community.general/blob/main/commit-rights.md).
|
|
||||||
|
|
||||||
## Issue tracker
|
|
||||||
|
|
||||||
Whether you are looking for an opportunity to contribute or you found a bug and already know how to solve it, please go to the [issue tracker](https://github.com/ansible-collections/community.general/issues).
|
|
||||||
There you can find feature ideas to implement, reports about bugs to solve, or submit an issue to discuss your idea before implementing it which can help choose a right direction at the beginning of your work and potentially save a lot of time and effort.
|
|
||||||
Also somebody may already have started discussing or working on implementing the same or a similar idea,
|
|
||||||
so you can cooperate to create a better solution together.
|
|
||||||
|
|
||||||
* If you are interested in starting with an easy issue, look for [issues with an `easyfix` label](https://github.com/ansible-collections/community.general/labels/easyfix).
|
|
||||||
* Often issues that are waiting for contributors to pick up have [the `waiting_on_contributor` label](https://github.com/ansible-collections/community.general/labels/waiting_on_contributor).
|
|
||||||
|
|
||||||
## Open pull requests
|
|
||||||
|
|
||||||
Look through currently [open pull requests](https://github.com/ansible-collections/community.general/pulls).
|
|
||||||
You can help by reviewing them. Reviews help move pull requests to merge state. Some good pull requests cannot be merged only due to a lack of reviews. And it is always worth saying that good reviews are often more valuable than pull requests themselves.
|
|
||||||
Note that reviewing does not only mean code review, but also offering comments on new interfaces added to existing plugins/modules, interfaces of new plugins/modules, improving language (not everyone is a native english speaker), or testing bugfixes and new features!
|
|
||||||
|
|
||||||
Also, consider taking up a valuable, reviewed, but abandoned pull request which you could politely ask the original authors to complete yourself.
|
|
||||||
|
|
||||||
* Try committing your changes with an informative but short commit message.
|
|
||||||
* Do not squash your commits and force-push to your branch if not needed. Reviews of your pull request are much easier with individual commits to comprehend the pull request history. All commits of your pull request branch will be squashed into one commit by GitHub upon merge.
|
|
||||||
* Do not add merge commits to your PR. The bot will complain and you will have to rebase ([instructions for rebasing](https://docs.ansible.com/ansible/latest/dev_guide/developing_rebasing.html)) to remove them before your PR can be merged. To avoid that git automatically does merges during pulls, you can configure it to do rebases instead by running `git config pull.rebase true` inside the respository checkout.
|
|
||||||
* Make sure your PR includes a [changelog fragment](https://docs.ansible.com/ansible/devel/community/development_process.html#changelogs-how-to). (You must not include a fragment for new modules or new plugins, except for test and filter plugins. Also you shouldn't include one for docs-only changes. If you're not sure, simply don't include one, we'll tell you whether one is needed or not :) )
|
|
||||||
|
|
||||||
You can also read [our Quick-start development guide](https://github.com/ansible/community-docs/blob/main/create_pr_quick_start_guide.rst).
|
|
||||||
|
|
||||||
If you find any inconsistencies or places in this document which can be improved, feel free to raise an issue or pull request to fix it.
|
|
||||||
16
README.md
16
README.md
@@ -1,17 +1,15 @@
|
|||||||
# Community General Collection
|
# Community General Collection
|
||||||
|
|
||||||
[](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
|
[](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
|
||||||
[](https://codecov.io/gh/ansible-collections/community.general)
|
[](https://codecov.io/gh/ansible-collections/community.general)
|
||||||
|
|
||||||
This repo contains the `community.general` Ansible Collection. The collection includes many modules and plugins supported by Ansible community which are not part of more specialized community collections.
|
This repo contains the `community.general` Ansible Collection. The collection includes many modules and plugins supported by Ansible community which are not part of more specialized community collections.
|
||||||
|
|
||||||
You can find [documentation for this collection on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/).
|
You can find [documentation for this collection on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/).
|
||||||
|
|
||||||
Please note that this collection does **not** support Windows targets. Only connection plugins included in this collection might support Windows targets, and will explicitly mention that in their documentation if they do so.
|
|
||||||
|
|
||||||
## Tested with Ansible
|
## Tested with Ansible
|
||||||
|
|
||||||
Tested with the current Ansible 2.9, ansible-base 2.10 and ansible-core 2.11 releases. Ansible versions before 2.9.10 are not supported.
|
Tested with the current Ansible 2.9 and 2.10 releases and the current development version of Ansible. Ansible versions before 2.9.10 are not supported.
|
||||||
|
|
||||||
## External requirements
|
## External requirements
|
||||||
|
|
||||||
@@ -50,8 +48,6 @@ export COLLECTIONS_PATH=$(pwd)/collections:$COLLECTIONS_PATH
|
|||||||
|
|
||||||
You can find more information in the [developer guide for collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections), and in the [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html).
|
You can find more information in the [developer guide for collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections), and in the [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html).
|
||||||
|
|
||||||
Also for some notes specific to this collection see [our CONTRIBUTING documentation](https://github.com/ansible-collections/community.general/blob/main/CONTRIBUTING.md).
|
|
||||||
|
|
||||||
### Running tests
|
### Running tests
|
||||||
|
|
||||||
See [here](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#testing-collections).
|
See [here](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#testing-collections).
|
||||||
@@ -60,10 +56,10 @@ See [here](https://docs.ansible.com/ansible/devel/dev_guide/developing_collectio
|
|||||||
|
|
||||||
We have a dedicated Working Group for Ansible development.
|
We have a dedicated Working Group for Ansible development.
|
||||||
|
|
||||||
You can find other people interested on the following [Libera.chat](https://libera.chat/) IRC channels -
|
You can find other people interested on the following Freenode IRC channels -
|
||||||
- `#ansible` - For general use questions and support.
|
- `#ansible` - For general use questions and support.
|
||||||
- `#ansible-devel` - For discussions on developer topics and code related to features or bugs in ansible-core.
|
- `#ansible-devel` - For discussions on developer topics and code related to features or bugs.
|
||||||
- `#ansible-community` - For discussions on community topics and community meetings, and for general development questions for community collections.
|
- `#ansible-community` - For discussions on community topics and community meetings.
|
||||||
|
|
||||||
For more information about communities, meetings and agendas see [Community Wiki](https://github.com/ansible/community/wiki/Community).
|
For more information about communities, meetings and agendas see [Community Wiki](https://github.com/ansible/community/wiki/Community).
|
||||||
|
|
||||||
@@ -80,7 +76,7 @@ Basic instructions without release branches:
|
|||||||
|
|
||||||
## Release notes
|
## Release notes
|
||||||
|
|
||||||
See the [changelog](https://github.com/ansible-collections/community.general/blob/main/CHANGELOG.rst).
|
See the [changelog](https://github.com/ansible-collections/community.general/blob/stable-2/CHANGELOG.rst).
|
||||||
|
|
||||||
## Roadmap
|
## Roadmap
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,73 +0,0 @@
|
|||||||
Committers Guidelines for community.general
|
|
||||||
===========================================
|
|
||||||
|
|
||||||
This document is based on the [Ansible committer guidelines](https://github.com/ansible/ansible/blob/b57444af14062ec96e0af75fdfc2098c74fe2d9a/docs/docsite/rst/community/committer_guidelines.rst) ([latest version](https://docs.ansible.com/ansible/devel/community/committer_guidelines.html)).
|
|
||||||
|
|
||||||
These are the guidelines for people with commit privileges on the Ansible Community General Collection GitHub repository. Please read the guidelines before you commit.
|
|
||||||
|
|
||||||
These guidelines apply to everyone. At the same time, this is NOT a process document. So just use good judgment. You have been given commit access because we trust your judgment.
|
|
||||||
|
|
||||||
That said, use the trust wisely.
|
|
||||||
|
|
||||||
If you abuse the trust and break components and builds, and so on, the trust level falls and you may be asked not to commit or you may lose your commit privileges.
|
|
||||||
|
|
||||||
Our workflow on GitHub
|
|
||||||
----------------------
|
|
||||||
|
|
||||||
As a committer, you may already know this, but our workflow forms a lot of our team policies. Please ensure you are aware of the following workflow steps:
|
|
||||||
|
|
||||||
* Fork the repository upon which you want to do some work to your own personal repository
|
|
||||||
* Work on the specific branch upon which you need to commit
|
|
||||||
* Create a Pull Request back to the collection repository and await reviews
|
|
||||||
* Adjust code as necessary based on the Comments provided
|
|
||||||
* Ask someone from the other committers to do a final review and merge
|
|
||||||
|
|
||||||
Sometimes, committers merge their own pull requests. This section is a set of guidelines. If you are changing a comma in a doc or making a very minor change, you can use your best judgement. This is another trust thing. The process is critical for any major change, but for little things or getting something done quickly, use your best judgement and make sure people on the team are aware of your work.
|
|
||||||
|
|
||||||
Roles
|
|
||||||
-----
|
|
||||||
* Release managers: Merge pull requests to `stable-X` branches, create tags to do releases.
|
|
||||||
* Committers: Fine to do PRs for most things, but we should have a timebox. Hanging PRs may merge on the judgement of these devs.
|
|
||||||
* Module maintainers: Module maintainers own specific modules and have indirect commit access through the current module PR mechanisms. This is primary [ansibullbot](https://github.com/ansibullbot)'s `shipit` mechanism.
|
|
||||||
|
|
||||||
General rules
|
|
||||||
-------------
|
|
||||||
Individuals with direct commit access to this collection repository are entrusted with powers that allow them to do a broad variety of things--probably more than we can write down. Rather than rules, treat these as general *guidelines*, individuals with this power are expected to use their best judgement.
|
|
||||||
|
|
||||||
* Do NOTs:
|
|
||||||
|
|
||||||
- Do not commit directly.
|
|
||||||
- Do not merge your own PRs. Someone else should have a chance to review and approve the PR merge. You have a small amount of leeway here for very minor changes.
|
|
||||||
- Do not forget about non-standard / alternate environments. Consider the alternatives. Yes, people have bad/unusual/strange environments (like binaries from multiple init systems installed), but they are the ones who need us the most.
|
|
||||||
- Do not drag your community team members down. Discuss the technical merits of any pull requests you review. Avoid negativity and personal comments. For more guidance on being a good community member, read the [Ansible Community Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
|
|
||||||
- Do not forget about the maintenance burden. High-maintenance features may not be worth adding.
|
|
||||||
- Do not break playbooks. Always keep backwards compatibility in mind.
|
|
||||||
- Do not forget to keep it simple. Complexity breeds all kinds of problems.
|
|
||||||
- Do not merge to branches other than `main`, especially not to `stable-X`, if you do not have explicit permission to do so.
|
|
||||||
- Do not create tags. Tags are used in the release process, and should only be created by the people responsible for managing the stable branches.
|
|
||||||
|
|
||||||
* Do:
|
|
||||||
|
|
||||||
- Squash, avoid merges whenever possible, use GitHub's squash commits or cherry pick if needed (bisect thanks you).
|
|
||||||
- Be active. Committers who have no activity on the project (through merges, triage, commits, and so on) will have their permissions suspended.
|
|
||||||
- Consider backwards compatibility (goes back to "do not break existing playbooks").
|
|
||||||
- Write tests. PRs with tests are looked at with more priority than PRs without tests that should have them included. While not all changes require tests, be sure to add them for bug fixes or functionality changes.
|
|
||||||
- Discuss with other committers, specially when you are unsure of something.
|
|
||||||
- Document! If your PR is a new feature or a change to behavior, make sure you've updated all associated documentation or have notified the right people to do so.
|
|
||||||
- Consider scope, sometimes a fix can be generalized.
|
|
||||||
- Keep it simple, then things are maintainable, debuggable and intelligible.
|
|
||||||
|
|
||||||
Committers are expected to continue to follow the same community and contribution guidelines followed by the rest of the Ansible community.
|
|
||||||
|
|
||||||
|
|
||||||
People
|
|
||||||
------
|
|
||||||
|
|
||||||
Individuals who have been asked to become a part of this group have generally been contributing in significant ways to the community.general collection for some time. Should they agree, they are requested to add their names and GitHub IDs to this file, in the section below, through a pull request. Doing so indicates that these individuals agree to act in the ways that their fellow committers trust that they will act.
|
|
||||||
|
|
||||||
| Name | GitHub ID | IRC Nick | Other |
|
|
||||||
| ------------------- | -------------------- | ------------------ | -------------------- |
|
|
||||||
| Alexei Znamensky | russoz | russoz | |
|
|
||||||
| Andrew Klychkov | andersson007 | andersson007_ | |
|
|
||||||
| Felix Fontein | felixfontein | felixfontein | |
|
|
||||||
| John R Barker | gundalow | gundalow | |
|
|
||||||
@@ -1,17 +1,14 @@
|
|||||||
namespace: community
|
namespace: community
|
||||||
name: general
|
name: general
|
||||||
version: 1.3.12
|
version: 2.1.0
|
||||||
readme: README.md
|
readme: README.md
|
||||||
authors:
|
authors:
|
||||||
- Ansible (https://github.com/ansible)
|
- Ansible (https://github.com/ansible)
|
||||||
description: null
|
description: null
|
||||||
license_file: COPYING
|
license_file: COPYING
|
||||||
tags: [community]
|
tags: [community]
|
||||||
# NOTE: No more dependencies can be added to this list
|
# NOTE: No dependencies are expected to be added here
|
||||||
dependencies:
|
# dependencies:
|
||||||
ansible.netcommon: '>=1.0.0'
|
|
||||||
community.kubernetes: '>=1.0.0,<2.0.0'
|
|
||||||
google.cloud: '>=1.0.0'
|
|
||||||
repository: https://github.com/ansible-collections/community.general
|
repository: https://github.com/ansible-collections/community.general
|
||||||
documentation: https://docs.ansible.com/ansible/latest/collections/community/general/
|
documentation: https://docs.ansible.com/ansible/latest/collections/community/general/
|
||||||
homepage: https://github.com/ansible-collections/community.general
|
homepage: https://github.com/ansible-collections/community.general
|
||||||
|
|||||||
662
meta/runtime.yml
662
meta/runtime.yml
@@ -1,36 +1,6 @@
|
|||||||
|
---
|
||||||
requires_ansible: '>=2.9.10'
|
requires_ansible: '>=2.9.10'
|
||||||
action_groups:
|
action_groups:
|
||||||
docker:
|
|
||||||
- docker_swarm
|
|
||||||
- docker_image_facts
|
|
||||||
- docker_service
|
|
||||||
- docker_compose
|
|
||||||
- docker_config
|
|
||||||
- docker_container
|
|
||||||
- docker_container_info
|
|
||||||
- docker_host_info
|
|
||||||
- docker_image
|
|
||||||
- docker_image_info
|
|
||||||
- docker_login
|
|
||||||
- docker_network
|
|
||||||
- docker_network_info
|
|
||||||
- docker_node
|
|
||||||
- docker_node_info
|
|
||||||
- docker_prune
|
|
||||||
- docker_secret
|
|
||||||
- docker_swarm
|
|
||||||
- docker_swarm_info
|
|
||||||
- docker_swarm_service
|
|
||||||
- docker_swarm_service_info
|
|
||||||
- docker_volume
|
|
||||||
- docker_volume_info
|
|
||||||
k8s:
|
|
||||||
- kubevirt_cdi_upload
|
|
||||||
- kubevirt_preset
|
|
||||||
- kubevirt_pvc
|
|
||||||
- kubevirt_rs
|
|
||||||
- kubevirt_template
|
|
||||||
- kubevirt_vm
|
|
||||||
ovirt:
|
ovirt:
|
||||||
- ovirt_affinity_label_facts
|
- ovirt_affinity_label_facts
|
||||||
- ovirt_api_facts
|
- ovirt_api_facts
|
||||||
@@ -57,252 +27,174 @@ action_groups:
|
|||||||
- ovirt_vm_facts
|
- ovirt_vm_facts
|
||||||
- ovirt_vmpool_facts
|
- ovirt_vmpool_facts
|
||||||
plugin_routing:
|
plugin_routing:
|
||||||
|
connection:
|
||||||
|
docker:
|
||||||
|
redirect: community.docker.docker
|
||||||
|
oc:
|
||||||
|
redirect: community.okd.oc
|
||||||
lookup:
|
lookup:
|
||||||
conjur_variable:
|
gcp_storage_file:
|
||||||
redirect: cyberark.conjur.conjur_variable
|
redirect: community.google.gcp_storage_file
|
||||||
deprecation:
|
hashi_vault:
|
||||||
removal_version: 2.0.0
|
redirect: community.hashi_vault.hashi_vault
|
||||||
warning_text: The conjur_variable lookup has been moved to the cyberark.conjur collection.
|
|
||||||
modules:
|
modules:
|
||||||
ali_instance_facts:
|
ali_instance_facts:
|
||||||
deprecation:
|
deprecation:
|
||||||
removal_version: 3.0.0
|
removal_version: 3.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: see plugin documentation for details
|
||||||
digital_ocean:
|
docker_compose:
|
||||||
deprecation:
|
redirect: community.docker.docker_compose
|
||||||
removal_version: 2.0.0
|
docker_config:
|
||||||
warning_text: The digital_ocean module has been moved to the community.digitalocean collection.
|
redirect: community.docker.docker_config
|
||||||
redirect: community.digitalocean.digital_ocean
|
docker_container:
|
||||||
digital_ocean_account_facts:
|
redirect: community.docker.docker_container
|
||||||
deprecation:
|
docker_container_info:
|
||||||
removal_version: 2.0.0
|
redirect: community.docker.docker_container_info
|
||||||
warning_text: The digital_ocean_account_facts module has been moved to the community.digitalocean collection.
|
docker_host_info:
|
||||||
redirect: community.digitalocean.digital_ocean_account_facts
|
redirect: community.docker.docker_host_info
|
||||||
digital_ocean_account_info:
|
docker_image:
|
||||||
deprecation:
|
redirect: community.docker.docker_image
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_account_info module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_account_info
|
|
||||||
digital_ocean_block_storage:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_block_storage module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_block_storage
|
|
||||||
digital_ocean_certificate:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_certificate module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_certificate
|
|
||||||
digital_ocean_certificate_facts:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_certificate_facts module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_certificate_facts
|
|
||||||
digital_ocean_certificate_info:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_certificate_info module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_certificate_info
|
|
||||||
digital_ocean_domain:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_domain module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_domain
|
|
||||||
digital_ocean_domain_facts:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_domain_facts module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_domain_facts
|
|
||||||
digital_ocean_domain_info:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_domain_info module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_domain_info
|
|
||||||
digital_ocean_droplet:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_droplet module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_droplet
|
|
||||||
digital_ocean_firewall_facts:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_firewall_facts module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_firewall_facts
|
|
||||||
digital_ocean_firewall_info:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_firewall_info module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_firewall_info
|
|
||||||
digital_ocean_floating_ip:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_floating_ip module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_floating_ip
|
|
||||||
digital_ocean_floating_ip_facts:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_floating_ip_facts module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_floating_ip_facts
|
|
||||||
digital_ocean_floating_ip_info:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_floating_ip_info module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_floating_ip_info
|
|
||||||
digital_ocean_image_facts:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_image_facts module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_image_facts
|
|
||||||
digital_ocean_image_info:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_image_info module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_image_info
|
|
||||||
digital_ocean_load_balancer_facts:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_load_balancer_facts module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_load_balancer_facts
|
|
||||||
digital_ocean_load_balancer_info:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_load_balancer_info module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_load_balancer_info
|
|
||||||
digital_ocean_region_facts:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_region_facts module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_region_facts
|
|
||||||
digital_ocean_region_info:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_region_info module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_region_info
|
|
||||||
digital_ocean_size_facts:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_size_facts module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_size_facts
|
|
||||||
digital_ocean_size_info:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_size_info module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_size_info
|
|
||||||
digital_ocean_snapshot_facts:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_snapshot_facts module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_snapshot_facts
|
|
||||||
digital_ocean_snapshot_info:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_snapshot_info module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_snapshot_info
|
|
||||||
digital_ocean_sshkey:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_sshkey module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_sshkey
|
|
||||||
digital_ocean_sshkey_facts:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_sshkey_facts module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_sshkey_facts
|
|
||||||
digital_ocean_sshkey_info:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_sshkey_info module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_sshkey_info
|
|
||||||
digital_ocean_tag:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_tag module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_tag
|
|
||||||
digital_ocean_tag_facts:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_tag_facts module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_tag_facts
|
|
||||||
digital_ocean_tag_info:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_tag_info module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_tag_info
|
|
||||||
digital_ocean_volume_facts:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_volume_facts module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_volume_facts
|
|
||||||
digital_ocean_volume_info:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The digital_ocean_volume_info module has been moved to the community.digitalocean collection.
|
|
||||||
redirect: community.digitalocean.digital_ocean_volume_info
|
|
||||||
docker_image_facts:
|
docker_image_facts:
|
||||||
deprecation:
|
tombstone:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use community.docker.docker_image_info instead.
|
||||||
|
docker_image_info:
|
||||||
|
redirect: community.docker.docker_image_info
|
||||||
|
docker_login:
|
||||||
|
redirect: community.docker.docker_login
|
||||||
|
docker_network:
|
||||||
|
redirect: community.docker.docker_network
|
||||||
|
docker_network_info:
|
||||||
|
redirect: community.docker.docker_network_info
|
||||||
|
docker_node:
|
||||||
|
redirect: community.docker.docker_node
|
||||||
|
docker_node_info:
|
||||||
|
redirect: community.docker.docker_node_info
|
||||||
|
docker_prune:
|
||||||
|
redirect: community.docker.docker_prune
|
||||||
|
docker_secret:
|
||||||
|
redirect: community.docker.docker_secret
|
||||||
docker_service:
|
docker_service:
|
||||||
deprecation:
|
tombstone:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use community.docker.docker_compose instead.
|
||||||
firewalld:
|
docker_stack:
|
||||||
deprecation:
|
redirect: community.docker.docker_stack
|
||||||
removal_version: 2.0.0
|
docker_stack_info:
|
||||||
warning_text: The firewalld module has been moved to the ansible.posix collection.
|
redirect: community.docker.docker_stack_info
|
||||||
redirect: ansible.posix.firewalld
|
docker_stack_task_info:
|
||||||
|
redirect: community.docker.docker_stack_task_info
|
||||||
|
docker_swarm:
|
||||||
|
redirect: community.docker.docker_swarm
|
||||||
|
docker_swarm_info:
|
||||||
|
redirect: community.docker.docker_swarm_info
|
||||||
|
docker_swarm_service:
|
||||||
|
redirect: community.docker.docker_swarm_service
|
||||||
|
docker_swarm_service_info:
|
||||||
|
redirect: community.docker.docker_swarm_service_info
|
||||||
|
docker_volume:
|
||||||
|
redirect: community.docker.docker_volume
|
||||||
|
docker_volume_info:
|
||||||
|
redirect: community.docker.docker_volume_info
|
||||||
foreman:
|
foreman:
|
||||||
deprecation:
|
tombstone:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use the modules from the theforeman.foreman collection instead.
|
||||||
|
gc_storage:
|
||||||
|
redirect: community.google.gc_storage
|
||||||
gcdns_record:
|
gcdns_record:
|
||||||
deprecation:
|
tombstone:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use google.cloud.gcp_dns_resource_record_set instead.
|
||||||
gcdns_zone:
|
gcdns_zone:
|
||||||
deprecation:
|
tombstone:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use google.cloud.gcp_dns_managed_zone instead.
|
||||||
gce:
|
gce:
|
||||||
deprecation:
|
tombstone:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use google.cloud.gcp_compute_instance instead.
|
||||||
|
gce_eip:
|
||||||
|
redirect: community.google.gce_eip
|
||||||
|
gce_img:
|
||||||
|
redirect: community.google.gce_img
|
||||||
|
gce_instance_template:
|
||||||
|
redirect: community.google.gce_instance_template
|
||||||
|
gce_labels:
|
||||||
|
redirect: community.google.gce_labels
|
||||||
|
gce_lb:
|
||||||
|
redirect: community.google.gce_lb
|
||||||
|
gce_mig:
|
||||||
|
redirect: community.google.gce_mig
|
||||||
|
gce_net:
|
||||||
|
redirect: community.google.gce_net
|
||||||
|
gce_pd:
|
||||||
|
redirect: community.google.gce_pd
|
||||||
|
gce_snapshot:
|
||||||
|
redirect: community.google.gce_snapshot
|
||||||
|
gce_tag:
|
||||||
|
redirect: community.google.gce_tag
|
||||||
gcp_backend_service:
|
gcp_backend_service:
|
||||||
deprecation:
|
tombstone:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use google.cloud.gcp_compute_backend_service instead.
|
||||||
gcp_forwarding_rule:
|
gcp_forwarding_rule:
|
||||||
deprecation:
|
tombstone:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use google.cloud.gcp_compute_forwarding_rule or google.cloud.gcp_compute_global_forwarding_rule instead.
|
||||||
gcp_healthcheck:
|
gcp_healthcheck:
|
||||||
deprecation:
|
tombstone:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use google.cloud.gcp_compute_health_check, google.cloud.gcp_compute_http_health_check or google.cloud.gcp_compute_https_health_check instead.
|
||||||
gcp_target_proxy:
|
gcp_target_proxy:
|
||||||
deprecation:
|
tombstone:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use google.cloud.gcp_compute_target_http_proxy instead.
|
||||||
gcp_url_map:
|
gcp_url_map:
|
||||||
deprecation:
|
tombstone:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use google.cloud.gcp_compute_url_map instead.
|
||||||
|
gcpubsub:
|
||||||
|
redirect: community.google.gcpubsub
|
||||||
|
gcpubsub_info:
|
||||||
|
redirect: community.google.gcpubsub_info
|
||||||
gcpubsub_facts:
|
gcpubsub_facts:
|
||||||
|
redirect: community.google.gcpubsub_info
|
||||||
deprecation:
|
deprecation:
|
||||||
removal_version: 3.0.0
|
removal_version: 3.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use community.google.gcpubsub_info instead.
|
||||||
gcspanner:
|
gcspanner:
|
||||||
deprecation:
|
tombstone:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use google.cloud.gcp_spanner_database and/or google.cloud.gcp_spanner_instance instead.
|
||||||
github_hooks:
|
github_hooks:
|
||||||
deprecation:
|
tombstone:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use community.general.github_webhook and community.general.github_webhook_info instead.
|
||||||
|
gluster_heal_info:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 3.0.0
|
||||||
|
warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_heal_info instead.
|
||||||
|
gluster_peer:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 3.0.0
|
||||||
|
warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_peer instead.
|
||||||
|
gluster_volume:
|
||||||
|
deprecation:
|
||||||
|
removal_version: 3.0.0
|
||||||
|
warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_volume instead.
|
||||||
helm:
|
helm:
|
||||||
deprecation:
|
deprecation:
|
||||||
removal_version: 3.0.0
|
removal_version: 3.0.0
|
||||||
warning_text: The helm module in community.general has been deprecated. Use community.kubernetes.helm instead.
|
warning_text: The helm module in community.general has been deprecated. Use community.kubernetes.helm instead.
|
||||||
|
hetzner_failover_ip:
|
||||||
|
redirect: community.hrobot.failover_ip
|
||||||
|
hetzner_failover_ip_info:
|
||||||
|
redirect: community.hrobot.failover_ip_info
|
||||||
|
hetzner_firewall:
|
||||||
|
redirect: community.hrobot.firewall
|
||||||
|
hetzner_firewall_info:
|
||||||
|
redirect: community.hrobot.firewall_info
|
||||||
hpilo_facts:
|
hpilo_facts:
|
||||||
deprecation:
|
deprecation:
|
||||||
removal_version: 3.0.0
|
removal_version: 3.0.0
|
||||||
@@ -311,44 +203,26 @@ plugin_routing:
|
|||||||
deprecation:
|
deprecation:
|
||||||
removal_version: 3.0.0
|
removal_version: 3.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: see plugin documentation for details
|
||||||
infini_export:
|
|
||||||
redirect: infinidat.infinibox.infini_export
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The infini_export module has been moved to the infinidat collection.
|
|
||||||
infini_export_client:
|
|
||||||
redirect: infinidat.infinibox.infini_export_client
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The infini_export_client module has been moved to the infinidat collection.
|
|
||||||
infini_fs:
|
|
||||||
redirect: infinidat.infinibox.infini_fs
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The infini_fs module has been moved to the infinidat collection.
|
|
||||||
infini_host:
|
|
||||||
redirect: infinidat.infinibox.infini_host
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The infini_host module has been moved to the infinidat collection.
|
|
||||||
infini_pool:
|
|
||||||
redirect: infinidat.infinibox.infini_pool
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The infini_pool module has been moved to the infinidat collection.
|
|
||||||
infini_vol:
|
|
||||||
redirect: infinidat.infinibox.infini_vol
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The infini_vol module has been moved to the infinidat collection.
|
|
||||||
jenkins_job_facts:
|
jenkins_job_facts:
|
||||||
deprecation:
|
deprecation:
|
||||||
removal_version: 3.0.0
|
removal_version: 3.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: see plugin documentation for details
|
||||||
katello:
|
katello:
|
||||||
deprecation:
|
tombstone:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use the modules from the theforeman.foreman collection instead.
|
||||||
|
kubevirt_cdi_upload:
|
||||||
|
redirect: community.kubevirt.kubevirt_cdi_upload
|
||||||
|
kubevirt_preset:
|
||||||
|
redirect: community.kubevirt.kubevirt_preset
|
||||||
|
kubevirt_pvc:
|
||||||
|
redirect: community.kubevirt.kubevirt_pvc
|
||||||
|
kubevirt_rs:
|
||||||
|
redirect: community.kubevirt.kubevirt_rs
|
||||||
|
kubevirt_template:
|
||||||
|
redirect: community.kubevirt.kubevirt_template
|
||||||
|
kubevirt_vm:
|
||||||
|
redirect: community.kubevirt.kubevirt_vm
|
||||||
ldap_attr:
|
ldap_attr:
|
||||||
deprecation:
|
deprecation:
|
||||||
removal_version: 3.0.0
|
removal_version: 3.0.0
|
||||||
@@ -369,68 +243,38 @@ plugin_routing:
|
|||||||
deprecation:
|
deprecation:
|
||||||
removal_version: 3.0.0
|
removal_version: 3.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: see plugin documentation for details
|
||||||
mysql_db:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The mysql_db module has been moved to the community.mysql collection.
|
|
||||||
redirect: community.mysql.mysql_db
|
|
||||||
mysql_info:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The mysql_info module has been moved to the community.mysql collection.
|
|
||||||
redirect: community.mysql.mysql_info
|
|
||||||
mysql_query:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The mysql_query module has been moved to the community.mysql collection.
|
|
||||||
redirect: community.mysql.mysql_query
|
|
||||||
mysql_replication:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The mysql_replication module has been moved to the community.mysql collection.
|
|
||||||
redirect: community.mysql.mysql_replication
|
|
||||||
mysql_user:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The mysql_user module has been moved to the community.mysql collection.
|
|
||||||
redirect: community.mysql.mysql_user
|
|
||||||
mysql_variables:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The mysql_variables module has been moved to the community.mysql collection.
|
|
||||||
redirect: community.mysql.mysql_variables
|
|
||||||
na_cdot_aggregate:
|
na_cdot_aggregate:
|
||||||
deprecation:
|
tombstone:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use netapp.ontap.na_ontap_aggregate instead.
|
||||||
na_cdot_license:
|
na_cdot_license:
|
||||||
deprecation:
|
tombstone:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use netapp.ontap.na_ontap_license instead.
|
||||||
na_cdot_lun:
|
na_cdot_lun:
|
||||||
deprecation:
|
tombstone:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use netapp.ontap.na_ontap_lun instead.
|
||||||
na_cdot_qtree:
|
na_cdot_qtree:
|
||||||
deprecation:
|
tombstone:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use netapp.ontap.na_ontap_qtree instead.
|
||||||
na_cdot_svm:
|
na_cdot_svm:
|
||||||
deprecation:
|
tombstone:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use netapp.ontap.na_ontap_svm instead.
|
||||||
na_cdot_user:
|
na_cdot_user:
|
||||||
deprecation:
|
tombstone:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use netapp.ontap.na_ontap_user instead.
|
||||||
na_cdot_user_role:
|
na_cdot_user_role:
|
||||||
deprecation:
|
tombstone:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use netapp.ontap.na_ontap_user_role instead.
|
||||||
na_cdot_volume:
|
na_cdot_volume:
|
||||||
deprecation:
|
tombstone:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use netapp.ontap.na_ontap_volume instead.
|
||||||
na_ontap_gather_facts:
|
na_ontap_gather_facts:
|
||||||
deprecation:
|
deprecation:
|
||||||
removal_version: 3.0.0
|
removal_version: 3.0.0
|
||||||
@@ -587,41 +431,50 @@ plugin_routing:
|
|||||||
deprecation:
|
deprecation:
|
||||||
removal_version: 3.0.0
|
removal_version: 3.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: see plugin documentation for details
|
||||||
proxysql_backend_servers:
|
postgresql_copy:
|
||||||
deprecation:
|
redirect: community.postgresql.postgresql_copy
|
||||||
removal_version: 2.0.0
|
postgresql_db:
|
||||||
warning_text: The proxysql_backend_servers module has been moved to the community.proxysql collection.
|
redirect: community.postgresql.postgresql_db
|
||||||
redirect: community.proxysql.proxysql_backend_servers
|
postgresql_ext:
|
||||||
proxysql_global_variables:
|
redirect: community.postgresql.postgresql_ext
|
||||||
deprecation:
|
postgresql_idx:
|
||||||
removal_version: 2.0.0
|
redirect: community.postgresql.postgresql_idx
|
||||||
warning_text: The proxysql_global_variables module has been moved to the community.proxysql collection.
|
postgresql_info:
|
||||||
redirect: community.proxysql.proxysql_global_variables
|
redirect: community.postgresql.postgresql_info
|
||||||
proxysql_manage_config:
|
postgresql_lang:
|
||||||
deprecation:
|
redirect: community.postgresql.postgresql_lang
|
||||||
removal_version: 2.0.0
|
postgresql_membership:
|
||||||
warning_text: The proxysql_manage_config module has been moved to the community.proxysql collection.
|
redirect: community.postgresql.postgresql_membership
|
||||||
redirect: community.proxysql.proxysql_manage_config
|
postgresql_owner:
|
||||||
proxysql_mysql_users:
|
redirect: community.postgresql.postgresql_owner
|
||||||
deprecation:
|
postgresql_pg_hba:
|
||||||
removal_version: 2.0.0
|
redirect: community.postgresql.postgresql_pg_hba
|
||||||
warning_text: The proxysql_mysql_users module has been moved to the community.proxysql collection.
|
postgresql_ping:
|
||||||
redirect: community.proxysql.proxysql_mysql_users
|
redirect: community.postgresql.postgresql_ping
|
||||||
proxysql_query_rules:
|
postgresql_privs:
|
||||||
deprecation:
|
redirect: community.postgresql.postgresql_privs
|
||||||
removal_version: 2.0.0
|
postgresql_publication:
|
||||||
warning_text: The proxysql_query_rules module has been moved to the community.proxysql collection.
|
redirect: community.postgresql.postgresql_publication
|
||||||
redirect: community.proxysql.proxysql_query_rules
|
postgresql_query:
|
||||||
proxysql_replication_hostgroups:
|
redirect: community.postgresql.postgresql_query
|
||||||
deprecation:
|
postgresql_schema:
|
||||||
removal_version: 2.0.0
|
redirect: community.postgresql.postgresql_schema
|
||||||
warning_text: The proxysql_replication_hostgroups module has been moved to the community.proxysql collection.
|
postgresql_sequence:
|
||||||
redirect: community.proxysql.proxysql_replication_hostgroups
|
redirect: community.postgresql.postgresql_sequence
|
||||||
proxysql_scheduler:
|
postgresql_set:
|
||||||
deprecation:
|
redirect: community.postgresql.postgresql_set
|
||||||
removal_version: 2.0.0
|
postgresql_slot:
|
||||||
warning_text: The proxysql_scheduler module has been moved to the community.proxysql collection.
|
redirect: community.postgresql.postgresql_slot
|
||||||
redirect: community.proxysql.proxysql_scheduler
|
postgresql_subscription:
|
||||||
|
redirect: community.postgresql.postgresql_subscription
|
||||||
|
postgresql_table:
|
||||||
|
redirect: community.postgresql.postgresql_table
|
||||||
|
postgresql_tablespace:
|
||||||
|
redirect: community.postgresql.postgresql_tablespace
|
||||||
|
postgresql_user_obj_stat_info:
|
||||||
|
redirect: community.postgresql.postgresql_user_obj_stat_info
|
||||||
|
postgresql_user:
|
||||||
|
redirect: community.postgresql.postgresql_user
|
||||||
purefa_facts:
|
purefa_facts:
|
||||||
deprecation:
|
deprecation:
|
||||||
removal_version: 3.0.0
|
removal_version: 3.0.0
|
||||||
@@ -667,25 +520,25 @@ plugin_routing:
|
|||||||
removal_version: 3.0.0
|
removal_version: 3.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: see plugin documentation for details
|
||||||
sf_account_manager:
|
sf_account_manager:
|
||||||
deprecation:
|
tombstone:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use netapp.elementsw.na_elementsw_account instead.
|
||||||
sf_check_connections:
|
sf_check_connections:
|
||||||
deprecation:
|
tombstone:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use netapp.elementsw.na_elementsw_check_connections instead.
|
||||||
sf_snapshot_schedule_manager:
|
sf_snapshot_schedule_manager:
|
||||||
deprecation:
|
tombstone:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use netapp.elementsw.na_elementsw_snapshot_schedule instead.
|
||||||
sf_volume_access_group_manager:
|
sf_volume_access_group_manager:
|
||||||
deprecation:
|
tombstone:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use netapp.elementsw.na_elementsw_access_group instead.
|
||||||
sf_volume_manager:
|
sf_volume_manager:
|
||||||
deprecation:
|
tombstone:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use netapp.elementsw.na_elementsw_volume instead.
|
||||||
smartos_image_facts:
|
smartos_image_facts:
|
||||||
deprecation:
|
deprecation:
|
||||||
removal_version: 3.0.0
|
removal_version: 3.0.0
|
||||||
@@ -699,57 +552,52 @@ plugin_routing:
|
|||||||
removal_version: 3.0.0
|
removal_version: 3.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: see plugin documentation for details
|
||||||
doc_fragments:
|
doc_fragments:
|
||||||
digital_ocean:
|
_gcp:
|
||||||
deprecation:
|
redirect: community.google._gcp
|
||||||
removal_version: 2.0.0
|
docker:
|
||||||
warning_text: The digital_ocean docs_fragment has been moved to the community.digitalocean collection.
|
redirect: community.docker.docker
|
||||||
redirect: community.digitalocean.digital_ocean
|
hetzner:
|
||||||
infinibox:
|
redirect: community.hrobot.robot
|
||||||
redirect: infinidat.infinibox.infinibox
|
kubevirt_common_options:
|
||||||
deprecation:
|
redirect: community.kubevirt.kubevirt_common_options
|
||||||
removal_version: 2.0.0
|
kubevirt_vm_options:
|
||||||
warning_text: The infinibox doc_fragments plugin has been moved to the infinidat.infinibox collection.
|
redirect: community.kubevirt.kubevirt_vm_options
|
||||||
mysql:
|
postgresql:
|
||||||
deprecation:
|
redirect: community.postgresql.postgresql
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The mysql docs_fragment has been moved to the community.mysql collection.
|
|
||||||
redirect: community.mysql.mysql
|
|
||||||
proxysql:
|
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The proxysql docs_fragment has been moved to the community.proxysql collection.
|
|
||||||
redirect: community.proxysql.proxysql
|
|
||||||
module_utils:
|
module_utils:
|
||||||
digital_ocean:
|
docker.common:
|
||||||
deprecation:
|
redirect: community.docker.common
|
||||||
removal_version: 2.0.0
|
docker.swarm:
|
||||||
warning_text: The digital_ocean module_utils has been moved to the community.digitalocean collection.
|
redirect: community.docker.swarm
|
||||||
redirect: community.digitalocean.digital_ocean
|
gcdns:
|
||||||
firewalld:
|
redirect: community.google.gcdns
|
||||||
deprecation:
|
gce:
|
||||||
removal_version: 2.0.0
|
redirect: community.google.gce
|
||||||
warning_text: The firewalld module_utils has been moved to the ansible.posix collection.
|
gcp:
|
||||||
redirect: ansible.posix.firewalld
|
redirect: community.google.gcp
|
||||||
infinibox:
|
hetzner:
|
||||||
redirect: infinidat.infinibox.infinibox
|
redirect: community.hrobot.robot
|
||||||
deprecation:
|
kubevirt:
|
||||||
removal_version: 2.0.0
|
redirect: community.kubevirt.kubevirt
|
||||||
warning_text: The infinibox module_utils plugin has been moved to the infinidat.infinibox collection.
|
postgresql:
|
||||||
mysql:
|
redirect: community.postgresql.postgresql
|
||||||
deprecation:
|
|
||||||
removal_version: 2.0.0
|
|
||||||
warning_text: The mysql module_utils has been moved to the community.mysql collection.
|
|
||||||
redirect: community.mysql.mysql
|
|
||||||
callback:
|
callback:
|
||||||
actionable:
|
actionable:
|
||||||
deprecation:
|
tombstone:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use the 'default' callback plugin with 'display_skipped_hosts = no' and 'display_ok_hosts = no' options.
|
||||||
full_skip:
|
full_skip:
|
||||||
deprecation:
|
tombstone:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use the 'default' callback plugin with 'display_skipped_hosts = no' option.
|
||||||
stderr:
|
stderr:
|
||||||
deprecation:
|
tombstone:
|
||||||
removal_version: 2.0.0
|
removal_version: 2.0.0
|
||||||
warning_text: see plugin documentation for details
|
warning_text: Use the 'default' callback plugin with 'display_failed_stderr = yes' option.
|
||||||
|
inventory:
|
||||||
|
docker_machine:
|
||||||
|
redirect: community.docker.docker_machine
|
||||||
|
docker_swarm:
|
||||||
|
redirect: community.docker.docker_swarm
|
||||||
|
kubevirt:
|
||||||
|
redirect: community.kubevirt.kubevirt
|
||||||
|
|||||||
@@ -98,25 +98,9 @@ class ActionModule(ActionBase):
|
|||||||
task_async,
|
task_async,
|
||||||
max_timeout))
|
max_timeout))
|
||||||
|
|
||||||
# BEGIN snippet from async_status action plugin
|
# inject the async directory based on the shell option into the
|
||||||
env_async_dir = [e for e in self._task.environment if
|
# module args
|
||||||
"ANSIBLE_ASYNC_DIR" in e]
|
async_dir = self.get_shell_option('async_dir', default="~/.ansible_async")
|
||||||
if len(env_async_dir) > 0:
|
|
||||||
# for backwards compatibility we need to get the dir from
|
|
||||||
# ANSIBLE_ASYNC_DIR that is defined in the environment. This is
|
|
||||||
# deprecated and will be removed in favour of shell options
|
|
||||||
async_dir = env_async_dir[0]['ANSIBLE_ASYNC_DIR']
|
|
||||||
|
|
||||||
msg = "Setting the async dir from the environment keyword " \
|
|
||||||
"ANSIBLE_ASYNC_DIR is deprecated. Set the async_dir " \
|
|
||||||
"shell option instead"
|
|
||||||
display.deprecated(msg, version='2.0.0',
|
|
||||||
collection_name='community.general') # was Ansible 2.12
|
|
||||||
else:
|
|
||||||
# inject the async directory based on the shell option into the
|
|
||||||
# module args
|
|
||||||
async_dir = self.get_shell_option('async_dir', default="~/.ansible_async")
|
|
||||||
# END snippet from async_status action plugin
|
|
||||||
|
|
||||||
# Bind the loop max duration to consistent values on both
|
# Bind the loop max duration to consistent values on both
|
||||||
# remote and local sides (if not the same, make the loop
|
# remote and local sides (if not the same, make the loop
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
become: doas
|
name: doas
|
||||||
short_description: Do As user
|
short_description: Do As user
|
||||||
description:
|
description:
|
||||||
- This become plugins allows your remote/login user to execute commands as another user via the doas utility.
|
- This become plugins allows your remote/login user to execute commands as another user via the doas utility.
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
become: dzdo
|
name: dzdo
|
||||||
short_description: Centrify's Direct Authorize
|
short_description: Centrify's Direct Authorize
|
||||||
description:
|
description:
|
||||||
- This become plugins allows your remote/login user to execute commands as another user via the dzdo utility.
|
- This become plugins allows your remote/login user to execute commands as another user via the dzdo utility.
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
become: ksu
|
name: ksu
|
||||||
short_description: Kerberos substitute user
|
short_description: Kerberos substitute user
|
||||||
description:
|
description:
|
||||||
- This become plugins allows your remote/login user to execute commands as another user via the ksu utility.
|
- This become plugins allows your remote/login user to execute commands as another user via the ksu utility.
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
become: machinectl
|
name: machinectl
|
||||||
short_description: Systemd's machinectl privilege escalation
|
short_description: Systemd's machinectl privilege escalation
|
||||||
description:
|
description:
|
||||||
- This become plugins allows your remote/login user to execute commands as another user via the machinectl utility.
|
- This become plugins allows your remote/login user to execute commands as another user via the machinectl utility.
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
become: pbrun
|
name: pbrun
|
||||||
short_description: PowerBroker run
|
short_description: PowerBroker run
|
||||||
description:
|
description:
|
||||||
- This become plugins allows your remote/login user to execute commands as another user via the pbrun utility.
|
- This become plugins allows your remote/login user to execute commands as another user via the pbrun utility.
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
become: pfexec
|
name: pfexec
|
||||||
short_description: profile based execution
|
short_description: profile based execution
|
||||||
description:
|
description:
|
||||||
- This become plugins allows your remote/login user to execute commands as another user via the pfexec utility.
|
- This become plugins allows your remote/login user to execute commands as another user via the pfexec utility.
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
become: pmrun
|
name: pmrun
|
||||||
short_description: Privilege Manager run
|
short_description: Privilege Manager run
|
||||||
description:
|
description:
|
||||||
- This become plugins allows your remote/login user to execute commands as another user via the pmrun utility.
|
- This become plugins allows your remote/login user to execute commands as another user via the pmrun utility.
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
become: sesu
|
name: sesu
|
||||||
short_description: CA Privileged Access Manager
|
short_description: CA Privileged Access Manager
|
||||||
description:
|
description:
|
||||||
- This become plugins allows your remote/login user to execute commands as another user via the sesu utility.
|
- This become plugins allows your remote/login user to execute commands as another user via the sesu utility.
|
||||||
|
|||||||
11
plugins/cache/memcached.py
vendored
11
plugins/cache/memcached.py
vendored
@@ -7,7 +7,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
cache: memcached
|
name: memcached
|
||||||
short_description: Use memcached DB for cache
|
short_description: Use memcached DB for cache
|
||||||
description:
|
description:
|
||||||
- This cache uses JSON formatted, per host records saved in memcached.
|
- This cache uses JSON formatted, per host records saved in memcached.
|
||||||
@@ -53,6 +53,7 @@ from ansible import constants as C
|
|||||||
from ansible.errors import AnsibleError
|
from ansible.errors import AnsibleError
|
||||||
from ansible.module_utils.common._collections_compat import MutableSet
|
from ansible.module_utils.common._collections_compat import MutableSet
|
||||||
from ansible.plugins.cache import BaseCacheModule
|
from ansible.plugins.cache import BaseCacheModule
|
||||||
|
from ansible.release import __version__ as ansible_base_version
|
||||||
from ansible.utils.display import Display
|
from ansible.utils.display import Display
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -161,7 +162,7 @@ class CacheModuleKeys(MutableSet):
|
|||||||
self._cache.set(self.PREFIX, self._keyset)
|
self._cache.set(self.PREFIX, self._keyset)
|
||||||
|
|
||||||
def remove_by_timerange(self, s_min, s_max):
|
def remove_by_timerange(self, s_min, s_max):
|
||||||
for k in list(self._keyset.keys()):
|
for k in self._keyset.keys():
|
||||||
t = self._keyset[k]
|
t = self._keyset[k]
|
||||||
if s_min < t < s_max:
|
if s_min < t < s_max:
|
||||||
del self._keyset[k]
|
del self._keyset[k]
|
||||||
@@ -180,9 +181,9 @@ class CacheModule(BaseCacheModule):
|
|||||||
self._timeout = self.get_option('_timeout')
|
self._timeout = self.get_option('_timeout')
|
||||||
self._prefix = self.get_option('_prefix')
|
self._prefix = self.get_option('_prefix')
|
||||||
except KeyError:
|
except KeyError:
|
||||||
display.deprecated('Rather than importing CacheModules directly, '
|
# TODO: remove once we no longer support Ansible 2.9
|
||||||
'use ansible.plugins.loader.cache_loader',
|
if not ansible_base_version.startswith('2.9.'):
|
||||||
version='2.0.0', collection_name='community.general') # was Ansible 2.12
|
raise AnsibleError("Do not import CacheModules directly. Use ansible.plugins.loader.cache_loader instead.")
|
||||||
if C.CACHE_PLUGIN_CONNECTION:
|
if C.CACHE_PLUGIN_CONNECTION:
|
||||||
connection = C.CACHE_PLUGIN_CONNECTION.split(',')
|
connection = C.CACHE_PLUGIN_CONNECTION.split(',')
|
||||||
self._timeout = C.CACHE_PLUGIN_TIMEOUT
|
self._timeout = C.CACHE_PLUGIN_TIMEOUT
|
||||||
|
|||||||
2
plugins/cache/pickle.py
vendored
2
plugins/cache/pickle.py
vendored
@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
cache: pickle
|
name: pickle
|
||||||
short_description: Pickle formatted files.
|
short_description: Pickle formatted files.
|
||||||
description:
|
description:
|
||||||
- This cache uses Python's pickle serialization format, in per host files, saved to the filesystem.
|
- This cache uses Python's pickle serialization format, in per host files, saved to the filesystem.
|
||||||
|
|||||||
15
plugins/cache/redis.py
vendored
15
plugins/cache/redis.py
vendored
@@ -6,7 +6,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
cache: redis
|
name: redis
|
||||||
short_description: Use Redis DB for cache
|
short_description: Use Redis DB for cache
|
||||||
description:
|
description:
|
||||||
- This cache uses JSON formatted, per host records saved in Redis.
|
- This cache uses JSON formatted, per host records saved in Redis.
|
||||||
@@ -69,6 +69,7 @@ from ansible.errors import AnsibleError
|
|||||||
from ansible.module_utils._text import to_native
|
from ansible.module_utils._text import to_native
|
||||||
from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
|
from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
|
||||||
from ansible.plugins.cache import BaseCacheModule
|
from ansible.plugins.cache import BaseCacheModule
|
||||||
|
from ansible.release import __version__ as ansible_base_version
|
||||||
from ansible.utils.display import Display
|
from ansible.utils.display import Display
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -103,9 +104,9 @@ class CacheModule(BaseCacheModule):
|
|||||||
self._keys_set = self.get_option('_keyset_name')
|
self._keys_set = self.get_option('_keyset_name')
|
||||||
self._sentinel_service_name = self.get_option('_sentinel_service_name')
|
self._sentinel_service_name = self.get_option('_sentinel_service_name')
|
||||||
except KeyError:
|
except KeyError:
|
||||||
display.deprecated('Rather than importing CacheModules directly, '
|
# TODO: remove once we no longer support Ansible 2.9
|
||||||
'use ansible.plugins.loader.cache_loader',
|
if not ansible_base_version.startswith('2.9.'):
|
||||||
version='2.0.0', collection_name='community.general') # was Ansible 2.12
|
raise AnsibleError("Do not import CacheModules directly. Use ansible.plugins.loader.cache_loader instead.")
|
||||||
if C.CACHE_PLUGIN_CONNECTION:
|
if C.CACHE_PLUGIN_CONNECTION:
|
||||||
uri = C.CACHE_PLUGIN_CONNECTION
|
uri = C.CACHE_PLUGIN_CONNECTION
|
||||||
self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
|
self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
|
||||||
@@ -216,12 +217,14 @@ class CacheModule(BaseCacheModule):
|
|||||||
self._db.zrem(self._keys_set, key)
|
self._db.zrem(self._keys_set, key)
|
||||||
|
|
||||||
def flush(self):
|
def flush(self):
|
||||||
for key in list(self.keys()):
|
for key in self.keys():
|
||||||
self.delete(key)
|
self.delete(key)
|
||||||
|
|
||||||
def copy(self):
|
def copy(self):
|
||||||
# TODO: there is probably a better way to do this in redis
|
# TODO: there is probably a better way to do this in redis
|
||||||
ret = dict([(k, self.get(k)) for k in self.keys()])
|
ret = dict()
|
||||||
|
for key in self.keys():
|
||||||
|
ret[key] = self.get(key)
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
def __getstate__(self):
|
def __getstate__(self):
|
||||||
|
|||||||
2
plugins/cache/yaml.py
vendored
2
plugins/cache/yaml.py
vendored
@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
cache: yaml
|
name: yaml
|
||||||
short_description: YAML formatted files.
|
short_description: YAML formatted files.
|
||||||
description:
|
description:
|
||||||
- This cache uses YAML formatted, per host, files saved to the filesystem.
|
- This cache uses YAML formatted, per host, files saved to the filesystem.
|
||||||
|
|||||||
@@ -1,61 +0,0 @@
|
|||||||
# (c) 2015, Andrew Gaffney <andrew@agaffney.org>
|
|
||||||
# (c) 2017 Ansible Project
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
# Make coding more python3-ish
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
author: Unknown (!UNKNOWN)
|
|
||||||
callback: actionable
|
|
||||||
type: stdout
|
|
||||||
short_description: shows only items that need attention
|
|
||||||
description:
|
|
||||||
- Use this callback when you dont care about OK nor Skipped.
|
|
||||||
- This callback suppresses any non Failed or Changed status.
|
|
||||||
deprecated:
|
|
||||||
why: The 'default' callback plugin now supports this functionality
|
|
||||||
removed_in: '2.0.0' # was Ansible 2.11
|
|
||||||
alternative: "'default' callback plugin with 'display_skipped_hosts = no' and 'display_ok_hosts = no' options"
|
|
||||||
extends_documentation_fragment:
|
|
||||||
- default_callback
|
|
||||||
requirements:
|
|
||||||
- set as stdout callback in configuration
|
|
||||||
# Override defaults from 'default' callback plugin
|
|
||||||
options:
|
|
||||||
display_skipped_hosts:
|
|
||||||
name: Show skipped hosts
|
|
||||||
description: "Toggle to control displaying skipped task/host results in a task"
|
|
||||||
type: bool
|
|
||||||
default: no
|
|
||||||
env:
|
|
||||||
- name: DISPLAY_SKIPPED_HOSTS
|
|
||||||
deprecated:
|
|
||||||
why: environment variables without "ANSIBLE_" prefix are deprecated
|
|
||||||
version: "2.0.0" # was Ansible 2.12
|
|
||||||
alternatives: the "ANSIBLE_DISPLAY_SKIPPED_HOSTS" environment variable
|
|
||||||
- name: ANSIBLE_DISPLAY_SKIPPED_HOSTS
|
|
||||||
ini:
|
|
||||||
- key: display_skipped_hosts
|
|
||||||
section: defaults
|
|
||||||
display_ok_hosts:
|
|
||||||
name: Show 'ok' hosts
|
|
||||||
description: "Toggle to control displaying 'ok' task/host results in a task"
|
|
||||||
type: bool
|
|
||||||
default: no
|
|
||||||
env:
|
|
||||||
- name: ANSIBLE_DISPLAY_OK_HOSTS
|
|
||||||
ini:
|
|
||||||
- key: display_ok_hosts
|
|
||||||
section: defaults
|
|
||||||
'''
|
|
||||||
|
|
||||||
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
|
|
||||||
|
|
||||||
|
|
||||||
class CallbackModule(CallbackModule_default):
|
|
||||||
|
|
||||||
CALLBACK_VERSION = 2.0
|
|
||||||
CALLBACK_TYPE = 'stdout'
|
|
||||||
CALLBACK_NAME = 'community.general.actionable'
|
|
||||||
@@ -8,7 +8,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
callback: cgroup_memory_recap
|
name: cgroup_memory_recap
|
||||||
type: aggregate
|
type: aggregate
|
||||||
requirements:
|
requirements:
|
||||||
- whitelist in configuration
|
- whitelist in configuration
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
callback: context_demo
|
name: context_demo
|
||||||
type: aggregate
|
type: aggregate
|
||||||
short_description: demo callback that adds play/task context
|
short_description: demo callback that adds play/task context
|
||||||
description:
|
description:
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
callback: counter_enabled
|
name: counter_enabled
|
||||||
type: stdout
|
type: stdout
|
||||||
short_description: adds counters to the output items (tasks and hosts/task)
|
short_description: adds counters to the output items (tasks and hosts/task)
|
||||||
description:
|
description:
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
callback: dense
|
name: dense
|
||||||
type: stdout
|
type: stdout
|
||||||
short_description: minimal stdout output
|
short_description: minimal stdout output
|
||||||
extends_documentation_fragment:
|
extends_documentation_fragment:
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = r'''
|
DOCUMENTATION = r'''
|
||||||
callback: diy
|
name: diy
|
||||||
type: stdout
|
type: stdout
|
||||||
short_description: Customize the output
|
short_description: Customize the output
|
||||||
version_added: 0.2.0
|
version_added: 0.2.0
|
||||||
@@ -1013,7 +1013,7 @@ class CallbackModule(Default):
|
|||||||
for attr in _stats_attributes:
|
for attr in _stats_attributes:
|
||||||
_ret[self.DIY_NS]['stats'].update({attr: _get_value(obj=stats, attr=attr)})
|
_ret[self.DIY_NS]['stats'].update({attr: _get_value(obj=stats, attr=attr)})
|
||||||
|
|
||||||
_ret[self.DIY_NS].update({'top_level_var_names': list(_ret.keys())})
|
_ret[self.DIY_NS].update({'top_level_var_names': _ret.keys()})
|
||||||
|
|
||||||
return _ret
|
return _ret
|
||||||
|
|
||||||
|
|||||||
@@ -1,76 +0,0 @@
|
|||||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
|
||||||
# (c) 2017 Ansible Project
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
# Make coding more python3-ish
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
author: Unknown (!UNKNOWN)
|
|
||||||
callback: full_skip
|
|
||||||
type: stdout
|
|
||||||
short_description: suppresses tasks if all hosts skipped
|
|
||||||
description:
|
|
||||||
- Use this plugin when you do not care about any output for tasks that were completely skipped
|
|
||||||
deprecated:
|
|
||||||
why: The 'default' callback plugin now supports this functionality
|
|
||||||
removed_in: '2.0.0' # was Ansible 2.11
|
|
||||||
alternative: "'default' callback plugin with 'display_skipped_hosts = no' option"
|
|
||||||
extends_documentation_fragment:
|
|
||||||
- default_callback
|
|
||||||
requirements:
|
|
||||||
- set as stdout in configuration
|
|
||||||
'''
|
|
||||||
|
|
||||||
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
|
|
||||||
|
|
||||||
|
|
||||||
class CallbackModule(CallbackModule_default):
|
|
||||||
|
|
||||||
'''
|
|
||||||
This is the default callback interface, which simply prints messages
|
|
||||||
to stdout when new callback events are received.
|
|
||||||
'''
|
|
||||||
|
|
||||||
CALLBACK_VERSION = 2.0
|
|
||||||
CALLBACK_TYPE = 'stdout'
|
|
||||||
CALLBACK_NAME = 'community.general.full_skip'
|
|
||||||
|
|
||||||
def v2_runner_on_skipped(self, result):
|
|
||||||
self.outlines = []
|
|
||||||
|
|
||||||
def v2_playbook_item_on_skipped(self, result):
|
|
||||||
self.outlines = []
|
|
||||||
|
|
||||||
def v2_runner_item_on_skipped(self, result):
|
|
||||||
self.outlines = []
|
|
||||||
|
|
||||||
def v2_runner_on_failed(self, result, ignore_errors=False):
|
|
||||||
self.display()
|
|
||||||
super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)
|
|
||||||
|
|
||||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
|
||||||
self.outlines = []
|
|
||||||
self.outlines.append("TASK [%s]" % task.get_name().strip())
|
|
||||||
if self._display.verbosity >= 2:
|
|
||||||
path = task.get_path()
|
|
||||||
if path:
|
|
||||||
self.outlines.append("task path: %s" % path)
|
|
||||||
|
|
||||||
def v2_playbook_item_on_ok(self, result):
|
|
||||||
self.display()
|
|
||||||
super(CallbackModule, self).v2_playbook_item_on_ok(result)
|
|
||||||
|
|
||||||
def v2_runner_on_ok(self, result):
|
|
||||||
self.display()
|
|
||||||
super(CallbackModule, self).v2_runner_on_ok(result)
|
|
||||||
|
|
||||||
def display(self):
|
|
||||||
if len(self.outlines) == 0:
|
|
||||||
return
|
|
||||||
(first, rest) = self.outlines[0], self.outlines[1:]
|
|
||||||
self._display.banner(first)
|
|
||||||
for line in rest:
|
|
||||||
self._display.display(line)
|
|
||||||
self.outlines = []
|
|
||||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
callback: hipchat
|
name: hipchat
|
||||||
type: notification
|
type: notification
|
||||||
requirements:
|
requirements:
|
||||||
- whitelist in configuration.
|
- whitelist in configuration.
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
callback: jabber
|
name: jabber
|
||||||
type: notification
|
type: notification
|
||||||
short_description: post task events to a jabber server
|
short_description: post task events to a jabber server
|
||||||
description:
|
description:
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
callback: log_plays
|
name: log_plays
|
||||||
type: notification
|
type: notification
|
||||||
short_description: write playbook output to log file
|
short_description: write playbook output to log file
|
||||||
description:
|
description:
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
callback: logdna
|
name: logdna
|
||||||
type: aggregate
|
type: aggregate
|
||||||
short_description: Sends playbook logs to LogDNA
|
short_description: Sends playbook logs to LogDNA
|
||||||
description:
|
description:
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
callback: logentries
|
name: logentries
|
||||||
type: notification
|
type: notification
|
||||||
short_description: Sends events to Logentries
|
short_description: Sends events to Logentries
|
||||||
description:
|
description:
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
# (C) 2016, Ievgen Khmelenko <ujenmr@gmail.com>
|
# (C) 2020, Yevhen Khmelenko <ujenmr@gmail.com>
|
||||||
# (C) 2017 Ansible Project
|
# (C) 2017 Ansible Project
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
from __future__ import (absolute_import, division, print_function)
|
||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = r'''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Yevhen Khmelenko (@ujenmr)
|
||||||
callback: logstash
|
name: logstash
|
||||||
type: notification
|
type: notification
|
||||||
short_description: Sends events to Logstash
|
short_description: Sends events to Logstash
|
||||||
description:
|
description:
|
||||||
@@ -43,15 +43,60 @@ DOCUMENTATION = '''
|
|||||||
key: type
|
key: type
|
||||||
version_added: 1.0.0
|
version_added: 1.0.0
|
||||||
default: ansible
|
default: ansible
|
||||||
|
pre_command:
|
||||||
|
description: Executes command before run and result put to ansible_pre_command_output field.
|
||||||
|
version_added: 2.0.0
|
||||||
|
ini:
|
||||||
|
- section: callback_logstash
|
||||||
|
key: pre_command
|
||||||
|
env:
|
||||||
|
- name: LOGSTASH_PRE_COMMAND
|
||||||
|
format_version:
|
||||||
|
description: Logging format
|
||||||
|
type: str
|
||||||
|
version_added: 2.0.0
|
||||||
|
ini:
|
||||||
|
- section: callback_logstash
|
||||||
|
key: format_version
|
||||||
|
env:
|
||||||
|
- name: LOGSTASH_FORMAT_VERSION
|
||||||
|
default: v1
|
||||||
|
choices:
|
||||||
|
- v1
|
||||||
|
- v2
|
||||||
|
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = r'''
|
||||||
|
ansible.cfg: |
|
||||||
|
# Enable Callback plugin
|
||||||
|
[defaults]
|
||||||
|
callback_whitelist = community.general.logstash
|
||||||
|
|
||||||
|
[callback_logstash]
|
||||||
|
server = logstash.example.com
|
||||||
|
port = 5000
|
||||||
|
pre_command = git rev-parse HEAD
|
||||||
|
type = ansible
|
||||||
|
|
||||||
|
11-input-tcp.conf: |
|
||||||
|
# Enable Logstash TCP Input
|
||||||
|
input {
|
||||||
|
tcp {
|
||||||
|
port => 5000
|
||||||
|
codec => json
|
||||||
|
add_field => { "[@metadata][beat]" => "notify" }
|
||||||
|
add_field => { "[@metadata][type]" => "ansible" }
|
||||||
|
}
|
||||||
|
}
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import json
|
import json
|
||||||
import socket
|
import socket
|
||||||
import uuid
|
import uuid
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import logstash
|
import logstash
|
||||||
@@ -63,76 +108,78 @@ from ansible.plugins.callback import CallbackBase
|
|||||||
|
|
||||||
|
|
||||||
class CallbackModule(CallbackBase):
|
class CallbackModule(CallbackBase):
|
||||||
"""
|
|
||||||
ansible logstash callback plugin
|
|
||||||
ansible.cfg:
|
|
||||||
callback_plugins = <path_to_callback_plugins_folder>
|
|
||||||
callback_whitelist = logstash
|
|
||||||
and put the plugin in <path_to_callback_plugins_folder>
|
|
||||||
|
|
||||||
logstash config:
|
|
||||||
input {
|
|
||||||
tcp {
|
|
||||||
port => 5000
|
|
||||||
codec => json
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Requires:
|
|
||||||
python-logstash
|
|
||||||
|
|
||||||
This plugin makes use of the following environment variables or ini config:
|
|
||||||
LOGSTASH_SERVER (optional): defaults to localhost
|
|
||||||
LOGSTASH_PORT (optional): defaults to 5000
|
|
||||||
LOGSTASH_TYPE (optional): defaults to ansible
|
|
||||||
"""
|
|
||||||
|
|
||||||
CALLBACK_VERSION = 2.0
|
CALLBACK_VERSION = 2.0
|
||||||
CALLBACK_TYPE = 'aggregate'
|
CALLBACK_TYPE = 'aggregate'
|
||||||
CALLBACK_NAME = 'community.general.logstash'
|
CALLBACK_NAME = 'community.general.logstash'
|
||||||
CALLBACK_NEEDS_WHITELIST = True
|
CALLBACK_NEEDS_WHITELIST = True
|
||||||
|
|
||||||
def __init__(self, display=None):
|
def __init__(self):
|
||||||
super(CallbackModule, self).__init__(display=display)
|
super(CallbackModule, self).__init__()
|
||||||
|
|
||||||
if not HAS_LOGSTASH:
|
if not HAS_LOGSTASH:
|
||||||
self.disabled = True
|
self.disabled = True
|
||||||
self._display.warning("The required python-logstash is not installed. "
|
self._display.warning("The required python-logstash/python3-logstash is not installed. "
|
||||||
"pip install python-logstash")
|
"pip install python-logstash for Python 2"
|
||||||
|
"pip install python3-logstash for Python 3")
|
||||||
|
|
||||||
self.start_time = datetime.utcnow()
|
self.start_time = datetime.utcnow()
|
||||||
|
|
||||||
def set_options(self, task_keys=None, var_options=None, direct=None):
|
def _init_plugin(self):
|
||||||
|
if not self.disabled:
|
||||||
|
self.logger = logging.getLogger('python-logstash-logger')
|
||||||
|
self.logger.setLevel(logging.DEBUG)
|
||||||
|
|
||||||
|
self.handler = logstash.TCPLogstashHandler(
|
||||||
|
self.ls_server,
|
||||||
|
self.ls_port,
|
||||||
|
version=1,
|
||||||
|
message_type=self.ls_type
|
||||||
|
)
|
||||||
|
|
||||||
|
self.logger.addHandler(self.handler)
|
||||||
|
self.hostname = socket.gethostname()
|
||||||
|
self.session = str(uuid.uuid4())
|
||||||
|
self.errors = 0
|
||||||
|
|
||||||
|
self.base_data = {
|
||||||
|
'session': self.session,
|
||||||
|
'host': self.hostname
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.ls_pre_command is not None:
|
||||||
|
self.base_data['ansible_pre_command_output'] = os.popen(
|
||||||
|
self.ls_pre_command).read()
|
||||||
|
|
||||||
|
if self._options is not None:
|
||||||
|
self.base_data['ansible_checkmode'] = self._options.check
|
||||||
|
self.base_data['ansible_tags'] = self._options.tags
|
||||||
|
self.base_data['ansible_skip_tags'] = self._options.skip_tags
|
||||||
|
self.base_data['inventory'] = self._options.inventory
|
||||||
|
|
||||||
|
def set_options(self, task_keys=None, var_options=None, direct=None):
|
||||||
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
|
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
|
||||||
|
|
||||||
self.logger = logging.getLogger('python-logstash-logger')
|
self.ls_server = self.get_option('server')
|
||||||
self.logger.setLevel(logging.DEBUG)
|
self.ls_port = int(self.get_option('port'))
|
||||||
|
self.ls_type = self.get_option('type')
|
||||||
|
self.ls_pre_command = self.get_option('pre_command')
|
||||||
|
self.ls_format_version = self.get_option('format_version')
|
||||||
|
|
||||||
self.logstash_server = self.get_option('server')
|
self._init_plugin()
|
||||||
self.logstash_port = self.get_option('port')
|
|
||||||
self.logstash_type = self.get_option('type')
|
|
||||||
self.handler = logstash.TCPLogstashHandler(
|
|
||||||
self.logstash_server,
|
|
||||||
int(self.logstash_port),
|
|
||||||
version=1,
|
|
||||||
message_type=self.logstash_type
|
|
||||||
)
|
|
||||||
self.logger.addHandler(self.handler)
|
|
||||||
self.hostname = socket.gethostname()
|
|
||||||
self.session = str(uuid.uuid1())
|
|
||||||
self.errors = 0
|
|
||||||
|
|
||||||
def v2_playbook_on_start(self, playbook):
|
def v2_playbook_on_start(self, playbook):
|
||||||
self.playbook = playbook._file_name
|
data = self.base_data.copy()
|
||||||
data = {
|
data['ansible_type'] = "start"
|
||||||
'status': "OK",
|
data['status'] = "OK"
|
||||||
'host': self.hostname,
|
data['ansible_playbook'] = playbook._file_name
|
||||||
'session': self.session,
|
|
||||||
'ansible_type': "start",
|
if (self.ls_format_version == "v2"):
|
||||||
'ansible_playbook': self.playbook,
|
self.logger.info(
|
||||||
}
|
"START PLAYBOOK | %s", data['ansible_playbook'], extra=data
|
||||||
self.logger.info("ansible start", extra=data)
|
)
|
||||||
|
else:
|
||||||
|
self.logger.info("ansible start", extra=data)
|
||||||
|
|
||||||
def v2_playbook_on_stats(self, stats):
|
def v2_playbook_on_stats(self, stats):
|
||||||
end_time = datetime.utcnow()
|
end_time = datetime.utcnow()
|
||||||
@@ -146,103 +193,201 @@ class CallbackModule(CallbackBase):
|
|||||||
else:
|
else:
|
||||||
status = "FAILED"
|
status = "FAILED"
|
||||||
|
|
||||||
data = {
|
data = self.base_data.copy()
|
||||||
'status': status,
|
data['ansible_type'] = "finish"
|
||||||
'host': self.hostname,
|
data['status'] = status
|
||||||
'session': self.session,
|
data['ansible_playbook_duration'] = runtime.total_seconds()
|
||||||
'ansible_type': "finish",
|
data['ansible_result'] = json.dumps(summarize_stat) # deprecated field
|
||||||
'ansible_playbook': self.playbook,
|
|
||||||
'ansible_playbook_duration': runtime.total_seconds(),
|
if (self.ls_format_version == "v2"):
|
||||||
'ansible_result': json.dumps(summarize_stat),
|
self.logger.info(
|
||||||
}
|
"FINISH PLAYBOOK | %s", json.dumps(summarize_stat), extra=data
|
||||||
self.logger.info("ansible stats", extra=data)
|
)
|
||||||
|
else:
|
||||||
|
self.logger.info("ansible stats", extra=data)
|
||||||
|
|
||||||
|
def v2_playbook_on_play_start(self, play):
|
||||||
|
self.play_id = str(play._uuid)
|
||||||
|
|
||||||
|
if play.name:
|
||||||
|
self.play_name = play.name
|
||||||
|
|
||||||
|
data = self.base_data.copy()
|
||||||
|
data['ansible_type'] = "start"
|
||||||
|
data['status'] = "OK"
|
||||||
|
data['ansible_play_id'] = self.play_id
|
||||||
|
data['ansible_play_name'] = self.play_name
|
||||||
|
|
||||||
|
if (self.ls_format_version == "v2"):
|
||||||
|
self.logger.info("START PLAY | %s", self.play_name, extra=data)
|
||||||
|
else:
|
||||||
|
self.logger.info("ansible play", extra=data)
|
||||||
|
|
||||||
|
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||||
|
self.task_id = str(task._uuid)
|
||||||
|
|
||||||
|
'''
|
||||||
|
Tasks and handler tasks are dealt with here
|
||||||
|
'''
|
||||||
|
|
||||||
def v2_runner_on_ok(self, result, **kwargs):
|
def v2_runner_on_ok(self, result, **kwargs):
|
||||||
data = {
|
task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '')
|
||||||
'status': "OK",
|
|
||||||
'host': self.hostname,
|
data = self.base_data.copy()
|
||||||
'session': self.session,
|
if task_name == 'setup':
|
||||||
'ansible_type': "task",
|
data['ansible_type'] = "setup"
|
||||||
'ansible_playbook': self.playbook,
|
data['status'] = "OK"
|
||||||
'ansible_host': result._host.name,
|
data['ansible_host'] = result._host.name
|
||||||
'ansible_task': result._task,
|
data['ansible_play_id'] = self.play_id
|
||||||
'ansible_result': self._dump_results(result._result)
|
data['ansible_play_name'] = self.play_name
|
||||||
}
|
data['ansible_task'] = task_name
|
||||||
self.logger.info("ansible ok", extra=data)
|
data['ansible_facts'] = self._dump_results(result._result)
|
||||||
|
|
||||||
|
if (self.ls_format_version == "v2"):
|
||||||
|
self.logger.info(
|
||||||
|
"SETUP FACTS | %s", self._dump_results(result._result), extra=data
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.logger.info("ansible facts", extra=data)
|
||||||
|
else:
|
||||||
|
if 'changed' in result._result.keys():
|
||||||
|
data['ansible_changed'] = result._result['changed']
|
||||||
|
else:
|
||||||
|
data['ansible_changed'] = False
|
||||||
|
|
||||||
|
data['ansible_type'] = "task"
|
||||||
|
data['status'] = "OK"
|
||||||
|
data['ansible_host'] = result._host.name
|
||||||
|
data['ansible_play_id'] = self.play_id
|
||||||
|
data['ansible_play_name'] = self.play_name
|
||||||
|
data['ansible_task'] = task_name
|
||||||
|
data['ansible_task_id'] = self.task_id
|
||||||
|
data['ansible_result'] = self._dump_results(result._result)
|
||||||
|
|
||||||
|
if (self.ls_format_version == "v2"):
|
||||||
|
self.logger.info(
|
||||||
|
"TASK OK | %s | RESULT | %s",
|
||||||
|
task_name, self._dump_results(result._result), extra=data
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.logger.info("ansible ok", extra=data)
|
||||||
|
|
||||||
def v2_runner_on_skipped(self, result, **kwargs):
|
def v2_runner_on_skipped(self, result, **kwargs):
|
||||||
data = {
|
task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '')
|
||||||
'status': "SKIPPED",
|
|
||||||
'host': self.hostname,
|
data = self.base_data.copy()
|
||||||
'session': self.session,
|
data['ansible_type'] = "task"
|
||||||
'ansible_type': "task",
|
data['status'] = "SKIPPED"
|
||||||
'ansible_playbook': self.playbook,
|
data['ansible_host'] = result._host.name
|
||||||
'ansible_task': result._task,
|
data['ansible_play_id'] = self.play_id
|
||||||
'ansible_host': result._host.name
|
data['ansible_play_name'] = self.play_name
|
||||||
}
|
data['ansible_task'] = task_name
|
||||||
self.logger.info("ansible skipped", extra=data)
|
data['ansible_task_id'] = self.task_id
|
||||||
|
data['ansible_result'] = self._dump_results(result._result)
|
||||||
|
|
||||||
|
if (self.ls_format_version == "v2"):
|
||||||
|
self.logger.info("TASK SKIPPED | %s", task_name, extra=data)
|
||||||
|
else:
|
||||||
|
self.logger.info("ansible skipped", extra=data)
|
||||||
|
|
||||||
def v2_playbook_on_import_for_host(self, result, imported_file):
|
def v2_playbook_on_import_for_host(self, result, imported_file):
|
||||||
data = {
|
data = self.base_data.copy()
|
||||||
'status': "IMPORTED",
|
data['ansible_type'] = "import"
|
||||||
'host': self.hostname,
|
data['status'] = "IMPORTED"
|
||||||
'session': self.session,
|
data['ansible_host'] = result._host.name
|
||||||
'ansible_type': "import",
|
data['ansible_play_id'] = self.play_id
|
||||||
'ansible_playbook': self.playbook,
|
data['ansible_play_name'] = self.play_name
|
||||||
'ansible_host': result._host.name,
|
data['imported_file'] = imported_file
|
||||||
'imported_file': imported_file
|
|
||||||
}
|
if (self.ls_format_version == "v2"):
|
||||||
self.logger.info("ansible import", extra=data)
|
self.logger.info("IMPORT | %s", imported_file, extra=data)
|
||||||
|
else:
|
||||||
|
self.logger.info("ansible import", extra=data)
|
||||||
|
|
||||||
def v2_playbook_on_not_import_for_host(self, result, missing_file):
|
def v2_playbook_on_not_import_for_host(self, result, missing_file):
|
||||||
data = {
|
data = self.base_data.copy()
|
||||||
'status': "NOT IMPORTED",
|
data['ansible_type'] = "import"
|
||||||
'host': self.hostname,
|
data['status'] = "NOT IMPORTED"
|
||||||
'session': self.session,
|
data['ansible_host'] = result._host.name
|
||||||
'ansible_type': "import",
|
data['ansible_play_id'] = self.play_id
|
||||||
'ansible_playbook': self.playbook,
|
data['ansible_play_name'] = self.play_name
|
||||||
'ansible_host': result._host.name,
|
data['imported_file'] = missing_file
|
||||||
'missing_file': missing_file
|
|
||||||
}
|
if (self.ls_format_version == "v2"):
|
||||||
self.logger.info("ansible import", extra=data)
|
self.logger.info("NOT IMPORTED | %s", missing_file, extra=data)
|
||||||
|
else:
|
||||||
|
self.logger.info("ansible import", extra=data)
|
||||||
|
|
||||||
def v2_runner_on_failed(self, result, **kwargs):
|
def v2_runner_on_failed(self, result, **kwargs):
|
||||||
data = {
|
task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '')
|
||||||
'status': "FAILED",
|
|
||||||
'host': self.hostname,
|
data = self.base_data.copy()
|
||||||
'session': self.session,
|
if 'changed' in result._result.keys():
|
||||||
'ansible_type': "task",
|
data['ansible_changed'] = result._result['changed']
|
||||||
'ansible_playbook': self.playbook,
|
else:
|
||||||
'ansible_host': result._host.name,
|
data['ansible_changed'] = False
|
||||||
'ansible_task': result._task,
|
|
||||||
'ansible_result': self._dump_results(result._result)
|
data['ansible_type'] = "task"
|
||||||
}
|
data['status'] = "FAILED"
|
||||||
|
data['ansible_host'] = result._host.name
|
||||||
|
data['ansible_play_id'] = self.play_id
|
||||||
|
data['ansible_play_name'] = self.play_name
|
||||||
|
data['ansible_task'] = task_name
|
||||||
|
data['ansible_task_id'] = self.task_id
|
||||||
|
data['ansible_result'] = self._dump_results(result._result)
|
||||||
|
|
||||||
self.errors += 1
|
self.errors += 1
|
||||||
self.logger.error("ansible failed", extra=data)
|
if (self.ls_format_version == "v2"):
|
||||||
|
self.logger.error(
|
||||||
|
"TASK FAILED | %s | HOST | %s | RESULT | %s",
|
||||||
|
task_name, self.hostname,
|
||||||
|
self._dump_results(result._result), extra=data
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.logger.error("ansible failed", extra=data)
|
||||||
|
|
||||||
def v2_runner_on_unreachable(self, result, **kwargs):
|
def v2_runner_on_unreachable(self, result, **kwargs):
|
||||||
data = {
|
task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '')
|
||||||
'status': "UNREACHABLE",
|
|
||||||
'host': self.hostname,
|
data = self.base_data.copy()
|
||||||
'session': self.session,
|
data['ansible_type'] = "task"
|
||||||
'ansible_type': "task",
|
data['status'] = "UNREACHABLE"
|
||||||
'ansible_playbook': self.playbook,
|
data['ansible_host'] = result._host.name
|
||||||
'ansible_host': result._host.name,
|
data['ansible_play_id'] = self.play_id
|
||||||
'ansible_task': result._task,
|
data['ansible_play_name'] = self.play_name
|
||||||
'ansible_result': self._dump_results(result._result)
|
data['ansible_task'] = task_name
|
||||||
}
|
data['ansible_task_id'] = self.task_id
|
||||||
self.logger.error("ansible unreachable", extra=data)
|
data['ansible_result'] = self._dump_results(result._result)
|
||||||
|
|
||||||
|
self.errors += 1
|
||||||
|
if (self.ls_format_version == "v2"):
|
||||||
|
self.logger.error(
|
||||||
|
"UNREACHABLE | %s | HOST | %s | RESULT | %s",
|
||||||
|
task_name, self.hostname,
|
||||||
|
self._dump_results(result._result), extra=data
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.logger.error("ansible unreachable", extra=data)
|
||||||
|
|
||||||
def v2_runner_on_async_failed(self, result, **kwargs):
|
def v2_runner_on_async_failed(self, result, **kwargs):
|
||||||
data = {
|
task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '')
|
||||||
'status': "FAILED",
|
|
||||||
'host': self.hostname,
|
data = self.base_data.copy()
|
||||||
'session': self.session,
|
data['ansible_type'] = "task"
|
||||||
'ansible_type': "task",
|
data['status'] = "FAILED"
|
||||||
'ansible_playbook': self.playbook,
|
data['ansible_host'] = result._host.name
|
||||||
'ansible_host': result._host.name,
|
data['ansible_play_id'] = self.play_id
|
||||||
'ansible_task': result._task,
|
data['ansible_play_name'] = self.play_name
|
||||||
'ansible_result': self._dump_results(result._result)
|
data['ansible_task'] = task_name
|
||||||
}
|
data['ansible_task_id'] = self.task_id
|
||||||
|
data['ansible_result'] = self._dump_results(result._result)
|
||||||
|
|
||||||
self.errors += 1
|
self.errors += 1
|
||||||
self.logger.error("ansible async", extra=data)
|
if (self.ls_format_version == "v2"):
|
||||||
|
self.logger.error(
|
||||||
|
"ASYNC FAILED | %s | HOST | %s | RESULT | %s",
|
||||||
|
task_name, self.hostname,
|
||||||
|
self._dump_results(result._result), extra=data
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.logger.error("ansible async", extra=data)
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
callback: mail
|
name: mail
|
||||||
type: notification
|
type: notification
|
||||||
short_description: Sends failure events via email
|
short_description: Sends failure events via email
|
||||||
description:
|
description:
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
callback: nrdp
|
name: nrdp
|
||||||
type: notification
|
type: notification
|
||||||
author: "Remi VERCHERE (@rverchere)"
|
author: "Remi VERCHERE (@rverchere)"
|
||||||
short_description: post task result to a nagios server through nrdp
|
short_description: post task result to a nagios server through nrdp
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
callback: 'null'
|
name: 'null'
|
||||||
type: stdout
|
type: stdout
|
||||||
requirements:
|
requirements:
|
||||||
- set as main display callback
|
- set as main display callback
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
callback: say
|
name: say
|
||||||
type: notification
|
type: notification
|
||||||
requirements:
|
requirements:
|
||||||
- whitelisting in configuration
|
- whitelisting in configuration
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
callback: selective
|
name: selective
|
||||||
type: stdout
|
type: stdout
|
||||||
requirements:
|
requirements:
|
||||||
- set as main display callback
|
- set as main display callback
|
||||||
@@ -67,7 +67,7 @@ COLORS = {
|
|||||||
|
|
||||||
def dict_diff(prv, nxt):
|
def dict_diff(prv, nxt):
|
||||||
"""Return a dict of keys that differ with another config object."""
|
"""Return a dict of keys that differ with another config object."""
|
||||||
keys = set(list(prv.keys()) + list(nxt.keys()))
|
keys = set(prv.keys() + nxt.keys())
|
||||||
result = {}
|
result = {}
|
||||||
for k in keys:
|
for k in keys:
|
||||||
if prv.get(k) != nxt.get(k):
|
if prv.get(k) != nxt.get(k):
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
callback: slack
|
name: slack
|
||||||
type: notification
|
type: notification
|
||||||
requirements:
|
requirements:
|
||||||
- whitelist in configuration
|
- whitelist in configuration
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
callback: splunk
|
name: splunk
|
||||||
type: aggregate
|
type: aggregate
|
||||||
short_description: Sends task result events to Splunk HTTP Event Collector
|
short_description: Sends task result events to Splunk HTTP Event Collector
|
||||||
author: "Stuart Hirst (!UNKNOWN) <support@convergingdata.com>"
|
author: "Stuart Hirst (!UNKNOWN) <support@convergingdata.com>"
|
||||||
@@ -57,6 +57,17 @@ DOCUMENTATION = '''
|
|||||||
type: bool
|
type: bool
|
||||||
default: true
|
default: true
|
||||||
version_added: '1.0.0'
|
version_added: '1.0.0'
|
||||||
|
include_milliseconds:
|
||||||
|
description: Whether to include milliseconds as part of the generated timestamp field in the event
|
||||||
|
sent to the Splunk HTTP collector
|
||||||
|
env:
|
||||||
|
- name: SPLUNK_INCLUDE_MILLISECONDS
|
||||||
|
ini:
|
||||||
|
- section: callback_splunk
|
||||||
|
key: include_milliseconds
|
||||||
|
type: bool
|
||||||
|
default: false
|
||||||
|
version_added: 2.0.0
|
||||||
'''
|
'''
|
||||||
|
|
||||||
EXAMPLES = '''
|
EXAMPLES = '''
|
||||||
@@ -96,7 +107,7 @@ class SplunkHTTPCollectorSource(object):
|
|||||||
self.ip_address = socket.gethostbyname(socket.gethostname())
|
self.ip_address = socket.gethostbyname(socket.gethostname())
|
||||||
self.user = getpass.getuser()
|
self.user = getpass.getuser()
|
||||||
|
|
||||||
def send_event(self, url, authtoken, validate_certs, state, result, runtime):
|
def send_event(self, url, authtoken, validate_certs, include_milliseconds, state, result, runtime):
|
||||||
if result._task_fields['args'].get('_ansible_check_mode') is True:
|
if result._task_fields['args'].get('_ansible_check_mode') is True:
|
||||||
self.ansible_check_mode = True
|
self.ansible_check_mode = True
|
||||||
|
|
||||||
@@ -116,8 +127,13 @@ class SplunkHTTPCollectorSource(object):
|
|||||||
data['uuid'] = result._task._uuid
|
data['uuid'] = result._task._uuid
|
||||||
data['session'] = self.session
|
data['session'] = self.session
|
||||||
data['status'] = state
|
data['status'] = state
|
||||||
data['timestamp'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S '
|
|
||||||
'+0000')
|
if include_milliseconds:
|
||||||
|
time_format = '%Y-%m-%d %H:%M:%S.%f +0000'
|
||||||
|
else:
|
||||||
|
time_format = '%Y-%m-%d %H:%M:%S +0000'
|
||||||
|
|
||||||
|
data['timestamp'] = datetime.utcnow().strftime(time_format)
|
||||||
data['host'] = self.host
|
data['host'] = self.host
|
||||||
data['ip_address'] = self.ip_address
|
data['ip_address'] = self.ip_address
|
||||||
data['user'] = self.user
|
data['user'] = self.user
|
||||||
@@ -158,6 +174,7 @@ class CallbackModule(CallbackBase):
|
|||||||
self.url = None
|
self.url = None
|
||||||
self.authtoken = None
|
self.authtoken = None
|
||||||
self.validate_certs = None
|
self.validate_certs = None
|
||||||
|
self.include_milliseconds = None
|
||||||
self.splunk = SplunkHTTPCollectorSource()
|
self.splunk = SplunkHTTPCollectorSource()
|
||||||
|
|
||||||
def _runtime(self, result):
|
def _runtime(self, result):
|
||||||
@@ -193,6 +210,8 @@ class CallbackModule(CallbackBase):
|
|||||||
|
|
||||||
self.validate_certs = self.get_option('validate_certs')
|
self.validate_certs = self.get_option('validate_certs')
|
||||||
|
|
||||||
|
self.include_milliseconds = self.get_option('include_milliseconds')
|
||||||
|
|
||||||
def v2_playbook_on_start(self, playbook):
|
def v2_playbook_on_start(self, playbook):
|
||||||
self.splunk.ansible_playbook = basename(playbook._file_name)
|
self.splunk.ansible_playbook = basename(playbook._file_name)
|
||||||
|
|
||||||
@@ -207,6 +226,7 @@ class CallbackModule(CallbackBase):
|
|||||||
self.url,
|
self.url,
|
||||||
self.authtoken,
|
self.authtoken,
|
||||||
self.validate_certs,
|
self.validate_certs,
|
||||||
|
self.include_milliseconds,
|
||||||
'OK',
|
'OK',
|
||||||
result,
|
result,
|
||||||
self._runtime(result)
|
self._runtime(result)
|
||||||
@@ -217,6 +237,7 @@ class CallbackModule(CallbackBase):
|
|||||||
self.url,
|
self.url,
|
||||||
self.authtoken,
|
self.authtoken,
|
||||||
self.validate_certs,
|
self.validate_certs,
|
||||||
|
self.include_milliseconds,
|
||||||
'SKIPPED',
|
'SKIPPED',
|
||||||
result,
|
result,
|
||||||
self._runtime(result)
|
self._runtime(result)
|
||||||
@@ -227,6 +248,7 @@ class CallbackModule(CallbackBase):
|
|||||||
self.url,
|
self.url,
|
||||||
self.authtoken,
|
self.authtoken,
|
||||||
self.validate_certs,
|
self.validate_certs,
|
||||||
|
self.include_milliseconds,
|
||||||
'FAILED',
|
'FAILED',
|
||||||
result,
|
result,
|
||||||
self._runtime(result)
|
self._runtime(result)
|
||||||
@@ -237,6 +259,7 @@ class CallbackModule(CallbackBase):
|
|||||||
self.url,
|
self.url,
|
||||||
self.authtoken,
|
self.authtoken,
|
||||||
self.validate_certs,
|
self.validate_certs,
|
||||||
|
self.include_milliseconds,
|
||||||
'FAILED',
|
'FAILED',
|
||||||
result,
|
result,
|
||||||
self._runtime(result)
|
self._runtime(result)
|
||||||
@@ -247,6 +270,7 @@ class CallbackModule(CallbackBase):
|
|||||||
self.url,
|
self.url,
|
||||||
self.authtoken,
|
self.authtoken,
|
||||||
self.validate_certs,
|
self.validate_certs,
|
||||||
|
self.include_milliseconds,
|
||||||
'UNREACHABLE',
|
'UNREACHABLE',
|
||||||
result,
|
result,
|
||||||
self._runtime(result)
|
self._runtime(result)
|
||||||
|
|||||||
@@ -1,71 +0,0 @@
|
|||||||
# (c) 2017, Frederic Van Espen <github@freh.be>
|
|
||||||
# (c) 2017 Ansible Project
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
# Make coding more python3-ish
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
author: Unknown (!UNKNOWN)
|
|
||||||
callback: stderr
|
|
||||||
type: stdout
|
|
||||||
requirements:
|
|
||||||
- set as main display callback
|
|
||||||
short_description: Splits output, sending failed tasks to stderr
|
|
||||||
deprecated:
|
|
||||||
why: The 'default' callback plugin now supports this functionality
|
|
||||||
removed_in: '2.0.0' # was Ansible 2.11
|
|
||||||
alternative: "'default' callback plugin with 'display_failed_stderr = yes' option"
|
|
||||||
extends_documentation_fragment:
|
|
||||||
- default_callback
|
|
||||||
description:
|
|
||||||
- This is the stderr callback plugin, it behaves like the default callback plugin but sends error output to stderr.
|
|
||||||
- Also it does not output skipped host/task/item status
|
|
||||||
'''
|
|
||||||
|
|
||||||
from ansible import constants as C
|
|
||||||
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
|
|
||||||
|
|
||||||
|
|
||||||
class CallbackModule(CallbackModule_default):
|
|
||||||
|
|
||||||
'''
|
|
||||||
This is the stderr callback plugin, which reuses the default
|
|
||||||
callback plugin but sends error output to stderr.
|
|
||||||
'''
|
|
||||||
|
|
||||||
CALLBACK_VERSION = 2.0
|
|
||||||
CALLBACK_TYPE = 'stdout'
|
|
||||||
CALLBACK_NAME = 'community.general.stderr'
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
|
|
||||||
self.super_ref = super(CallbackModule, self)
|
|
||||||
self.super_ref.__init__()
|
|
||||||
|
|
||||||
def v2_runner_on_failed(self, result, ignore_errors=False):
|
|
||||||
|
|
||||||
delegated_vars = result._result.get('_ansible_delegated_vars', None)
|
|
||||||
self._clean_results(result._result, result._task.action)
|
|
||||||
|
|
||||||
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
|
|
||||||
self._print_task_banner(result._task)
|
|
||||||
|
|
||||||
self._handle_exception(result._result, use_stderr=True)
|
|
||||||
self._handle_warnings(result._result)
|
|
||||||
|
|
||||||
if result._task.loop and 'results' in result._result:
|
|
||||||
self._process_items(result)
|
|
||||||
|
|
||||||
else:
|
|
||||||
if delegated_vars:
|
|
||||||
self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'],
|
|
||||||
self._dump_results(result._result)), color=C.COLOR_ERROR,
|
|
||||||
stderr=True)
|
|
||||||
else:
|
|
||||||
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)),
|
|
||||||
color=C.COLOR_ERROR, stderr=True)
|
|
||||||
|
|
||||||
if ignore_errors:
|
|
||||||
self._display.display("...ignoring", color=C.COLOR_SKIP)
|
|
||||||
@@ -18,7 +18,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
callback: sumologic
|
name: sumologic
|
||||||
type: aggregate
|
type: aggregate
|
||||||
short_description: Sends task result events to Sumologic
|
short_description: Sends task result events to Sumologic
|
||||||
author: "Ryan Currah (@ryancurrah)"
|
author: "Ryan Currah (@ryancurrah)"
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
callback: syslog_json
|
name: syslog_json
|
||||||
type: notification
|
type: notification
|
||||||
requirements:
|
requirements:
|
||||||
- whitelist in configuration
|
- whitelist in configuration
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
callback: unixy
|
name: unixy
|
||||||
type: stdout
|
type: stdout
|
||||||
author: Allyson Bowles (@akatch)
|
author: Allyson Bowles (@akatch)
|
||||||
short_description: condensed Ansible output
|
short_description: condensed Ansible output
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
callback: yaml
|
name: yaml
|
||||||
type: stdout
|
type: stdout
|
||||||
short_description: yaml-ized Ansible screen output
|
short_description: yaml-ized Ansible screen output
|
||||||
description:
|
description:
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Maykel Moya (!UNKNOWN) <mmoya@speedyrails.com>
|
author: Maykel Moya (!UNKNOWN) <mmoya@speedyrails.com>
|
||||||
connection: chroot
|
name: chroot
|
||||||
short_description: Interact with local chroot
|
short_description: Interact with local chroot
|
||||||
description:
|
description:
|
||||||
- Run commands or put/fetch files to an existing chroot on the Ansible controller.
|
- Run commands or put/fetch files to an existing chroot on the Ansible controller.
|
||||||
|
|||||||
@@ -1,366 +0,0 @@
|
|||||||
# Based on the chroot connection plugin by Maykel Moya
|
|
||||||
#
|
|
||||||
# (c) 2014, Lorin Hochstein
|
|
||||||
# (c) 2015, Leendert Brouwer (https://github.com/objectified)
|
|
||||||
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
|
|
||||||
# Copyright (c) 2017 Ansible Project
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
author:
|
|
||||||
- Lorin Hochestein (!UNKNOWN)
|
|
||||||
- Leendert Brouwer (!UNKNOWN)
|
|
||||||
connection: docker
|
|
||||||
short_description: Run tasks in docker containers
|
|
||||||
description:
|
|
||||||
- Run commands or put/fetch files to an existing docker container.
|
|
||||||
options:
|
|
||||||
remote_user:
|
|
||||||
description:
|
|
||||||
- The user to execute as inside the container
|
|
||||||
vars:
|
|
||||||
- name: ansible_user
|
|
||||||
- name: ansible_docker_user
|
|
||||||
docker_extra_args:
|
|
||||||
description:
|
|
||||||
- Extra arguments to pass to the docker command line
|
|
||||||
default: ''
|
|
||||||
remote_addr:
|
|
||||||
description:
|
|
||||||
- The name of the container you want to access.
|
|
||||||
default: inventory_hostname
|
|
||||||
vars:
|
|
||||||
- name: ansible_host
|
|
||||||
- name: ansible_docker_host
|
|
||||||
'''
|
|
||||||
|
|
||||||
import distutils.spawn
|
|
||||||
import fcntl
|
|
||||||
import os
|
|
||||||
import os.path
|
|
||||||
import subprocess
|
|
||||||
import re
|
|
||||||
|
|
||||||
from distutils.version import LooseVersion
|
|
||||||
|
|
||||||
import ansible.constants as C
|
|
||||||
from ansible.compat import selectors
|
|
||||||
from ansible.errors import AnsibleError, AnsibleFileNotFound
|
|
||||||
from ansible.module_utils.six.moves import shlex_quote
|
|
||||||
from ansible.module_utils._text import to_bytes, to_native, to_text
|
|
||||||
from ansible.plugins.connection import ConnectionBase, BUFSIZE
|
|
||||||
from ansible.utils.display import Display
|
|
||||||
|
|
||||||
display = Display()
|
|
||||||
|
|
||||||
|
|
||||||
class Connection(ConnectionBase):
|
|
||||||
''' Local docker based connections '''
|
|
||||||
|
|
||||||
transport = 'community.general.docker'
|
|
||||||
has_pipelining = True
|
|
||||||
|
|
||||||
def __init__(self, play_context, new_stdin, *args, **kwargs):
|
|
||||||
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
|
|
||||||
|
|
||||||
# Note: docker supports running as non-root in some configurations.
|
|
||||||
# (For instance, setting the UNIX socket file to be readable and
|
|
||||||
# writable by a specific UNIX group and then putting users into that
|
|
||||||
# group). Therefore we don't check that the user is root when using
|
|
||||||
# this connection. But if the user is getting a permission denied
|
|
||||||
# error it probably means that docker on their system is only
|
|
||||||
# configured to be connected to by root and they are not running as
|
|
||||||
# root.
|
|
||||||
|
|
||||||
# Windows uses Powershell modules
|
|
||||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
|
||||||
self.module_implementation_preferences = ('.ps1', '.exe', '')
|
|
||||||
|
|
||||||
if 'docker_command' in kwargs:
|
|
||||||
self.docker_cmd = kwargs['docker_command']
|
|
||||||
else:
|
|
||||||
self.docker_cmd = distutils.spawn.find_executable('docker')
|
|
||||||
if not self.docker_cmd:
|
|
||||||
raise AnsibleError("docker command not found in PATH")
|
|
||||||
|
|
||||||
docker_version = self._get_docker_version()
|
|
||||||
if docker_version == u'dev':
|
|
||||||
display.warning(u'Docker version number is "dev". Will assume latest version.')
|
|
||||||
if docker_version != u'dev' and LooseVersion(docker_version) < LooseVersion(u'1.3'):
|
|
||||||
raise AnsibleError('docker connection type requires docker 1.3 or higher')
|
|
||||||
|
|
||||||
# The remote user we will request from docker (if supported)
|
|
||||||
self.remote_user = None
|
|
||||||
# The actual user which will execute commands in docker (if known)
|
|
||||||
self.actual_user = None
|
|
||||||
|
|
||||||
if self._play_context.remote_user is not None:
|
|
||||||
if docker_version == u'dev' or LooseVersion(docker_version) >= LooseVersion(u'1.7'):
|
|
||||||
# Support for specifying the exec user was added in docker 1.7
|
|
||||||
self.remote_user = self._play_context.remote_user
|
|
||||||
self.actual_user = self.remote_user
|
|
||||||
else:
|
|
||||||
self.actual_user = self._get_docker_remote_user()
|
|
||||||
|
|
||||||
if self.actual_user != self._play_context.remote_user:
|
|
||||||
display.warning(u'docker {0} does not support remote_user, using container default: {1}'
|
|
||||||
.format(docker_version, self.actual_user or u'?'))
|
|
||||||
elif self._display.verbosity > 2:
|
|
||||||
# Since we're not setting the actual_user, look it up so we have it for logging later
|
|
||||||
# Only do this if display verbosity is high enough that we'll need the value
|
|
||||||
# This saves overhead from calling into docker when we don't need to
|
|
||||||
self.actual_user = self._get_docker_remote_user()
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _sanitize_version(version):
|
|
||||||
version = re.sub(u'[^0-9a-zA-Z.]', u'', version)
|
|
||||||
version = re.sub(u'^v', u'', version)
|
|
||||||
return version
|
|
||||||
|
|
||||||
def _old_docker_version(self):
|
|
||||||
cmd_args = []
|
|
||||||
if self._play_context.docker_extra_args:
|
|
||||||
cmd_args += self._play_context.docker_extra_args.split(' ')
|
|
||||||
|
|
||||||
old_version_subcommand = ['version']
|
|
||||||
|
|
||||||
old_docker_cmd = [self.docker_cmd] + cmd_args + old_version_subcommand
|
|
||||||
p = subprocess.Popen(old_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
||||||
cmd_output, err = p.communicate()
|
|
||||||
|
|
||||||
return old_docker_cmd, to_native(cmd_output), err, p.returncode
|
|
||||||
|
|
||||||
def _new_docker_version(self):
|
|
||||||
# no result yet, must be newer Docker version
|
|
||||||
cmd_args = []
|
|
||||||
if self._play_context.docker_extra_args:
|
|
||||||
cmd_args += self._play_context.docker_extra_args.split(' ')
|
|
||||||
|
|
||||||
new_version_subcommand = ['version', '--format', "'{{.Server.Version}}'"]
|
|
||||||
|
|
||||||
new_docker_cmd = [self.docker_cmd] + cmd_args + new_version_subcommand
|
|
||||||
p = subprocess.Popen(new_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
||||||
cmd_output, err = p.communicate()
|
|
||||||
return new_docker_cmd, to_native(cmd_output), err, p.returncode
|
|
||||||
|
|
||||||
def _get_docker_version(self):
|
|
||||||
|
|
||||||
cmd, cmd_output, err, returncode = self._old_docker_version()
|
|
||||||
if returncode == 0:
|
|
||||||
for line in to_text(cmd_output, errors='surrogate_or_strict').split(u'\n'):
|
|
||||||
if line.startswith(u'Server version:'): # old docker versions
|
|
||||||
return self._sanitize_version(line.split()[2])
|
|
||||||
|
|
||||||
cmd, cmd_output, err, returncode = self._new_docker_version()
|
|
||||||
if returncode:
|
|
||||||
raise AnsibleError('Docker version check (%s) failed: %s' % (to_native(cmd), to_native(err)))
|
|
||||||
|
|
||||||
return self._sanitize_version(to_text(cmd_output, errors='surrogate_or_strict'))
|
|
||||||
|
|
||||||
def _get_docker_remote_user(self):
|
|
||||||
""" Get the default user configured in the docker container """
|
|
||||||
p = subprocess.Popen([self.docker_cmd, 'inspect', '--format', '{{.Config.User}}', self._play_context.remote_addr],
|
|
||||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
||||||
|
|
||||||
out, err = p.communicate()
|
|
||||||
out = to_text(out, errors='surrogate_or_strict')
|
|
||||||
|
|
||||||
if p.returncode != 0:
|
|
||||||
display.warning(u'unable to retrieve default user from docker container: %s %s' % (out, to_text(err)))
|
|
||||||
return None
|
|
||||||
|
|
||||||
# The default exec user is root, unless it was changed in the Dockerfile with USER
|
|
||||||
return out.strip() or u'root'
|
|
||||||
|
|
||||||
def _build_exec_cmd(self, cmd):
|
|
||||||
""" Build the local docker exec command to run cmd on remote_host
|
|
||||||
|
|
||||||
If remote_user is available and is supported by the docker
|
|
||||||
version we are using, it will be provided to docker exec.
|
|
||||||
"""
|
|
||||||
|
|
||||||
local_cmd = [self.docker_cmd]
|
|
||||||
|
|
||||||
if self._play_context.docker_extra_args:
|
|
||||||
local_cmd += self._play_context.docker_extra_args.split(' ')
|
|
||||||
|
|
||||||
local_cmd += [b'exec']
|
|
||||||
|
|
||||||
if self.remote_user is not None:
|
|
||||||
local_cmd += [b'-u', self.remote_user]
|
|
||||||
|
|
||||||
# -i is needed to keep stdin open which allows pipelining to work
|
|
||||||
local_cmd += [b'-i', self._play_context.remote_addr] + cmd
|
|
||||||
|
|
||||||
return local_cmd
|
|
||||||
|
|
||||||
def _connect(self, port=None):
|
|
||||||
""" Connect to the container. Nothing to do """
|
|
||||||
super(Connection, self)._connect()
|
|
||||||
if not self._connected:
|
|
||||||
display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format(
|
|
||||||
self.actual_user or u'?'), host=self._play_context.remote_addr
|
|
||||||
)
|
|
||||||
self._connected = True
|
|
||||||
|
|
||||||
def exec_command(self, cmd, in_data=None, sudoable=False):
|
|
||||||
""" Run a command on the docker host """
|
|
||||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
|
||||||
|
|
||||||
local_cmd = self._build_exec_cmd([self._play_context.executable, '-c', cmd])
|
|
||||||
|
|
||||||
display.vvv(u"EXEC {0}".format(to_text(local_cmd)), host=self._play_context.remote_addr)
|
|
||||||
display.debug("opening command with Popen()")
|
|
||||||
|
|
||||||
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
|
||||||
|
|
||||||
p = subprocess.Popen(
|
|
||||||
local_cmd,
|
|
||||||
stdin=subprocess.PIPE,
|
|
||||||
stdout=subprocess.PIPE,
|
|
||||||
stderr=subprocess.PIPE,
|
|
||||||
)
|
|
||||||
display.debug("done running command with Popen()")
|
|
||||||
|
|
||||||
if self.become and self.become.expect_prompt() and sudoable:
|
|
||||||
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
|
|
||||||
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
|
|
||||||
selector = selectors.DefaultSelector()
|
|
||||||
selector.register(p.stdout, selectors.EVENT_READ)
|
|
||||||
selector.register(p.stderr, selectors.EVENT_READ)
|
|
||||||
|
|
||||||
become_output = b''
|
|
||||||
try:
|
|
||||||
while not self.become.check_success(become_output) and not self.become.check_password_prompt(become_output):
|
|
||||||
events = selector.select(self._play_context.timeout)
|
|
||||||
if not events:
|
|
||||||
stdout, stderr = p.communicate()
|
|
||||||
raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output))
|
|
||||||
|
|
||||||
for key, event in events:
|
|
||||||
if key.fileobj == p.stdout:
|
|
||||||
chunk = p.stdout.read()
|
|
||||||
elif key.fileobj == p.stderr:
|
|
||||||
chunk = p.stderr.read()
|
|
||||||
|
|
||||||
if not chunk:
|
|
||||||
stdout, stderr = p.communicate()
|
|
||||||
raise AnsibleError('privilege output closed while waiting for password prompt:\n' + to_native(become_output))
|
|
||||||
become_output += chunk
|
|
||||||
finally:
|
|
||||||
selector.close()
|
|
||||||
|
|
||||||
if not self.become.check_success(become_output):
|
|
||||||
become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
|
|
||||||
p.stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
|
|
||||||
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
|
||||||
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
|
||||||
|
|
||||||
display.debug("getting output with communicate()")
|
|
||||||
stdout, stderr = p.communicate(in_data)
|
|
||||||
display.debug("done communicating")
|
|
||||||
|
|
||||||
display.debug("done with docker.exec_command()")
|
|
||||||
return (p.returncode, stdout, stderr)
|
|
||||||
|
|
||||||
def _prefix_login_path(self, remote_path):
|
|
||||||
''' Make sure that we put files into a standard path
|
|
||||||
|
|
||||||
If a path is relative, then we need to choose where to put it.
|
|
||||||
ssh chooses $HOME but we aren't guaranteed that a home dir will
|
|
||||||
exist in any given chroot. So for now we're choosing "/" instead.
|
|
||||||
This also happens to be the former default.
|
|
||||||
|
|
||||||
Can revisit using $HOME instead if it's a problem
|
|
||||||
'''
|
|
||||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
|
||||||
import ntpath
|
|
||||||
return ntpath.normpath(remote_path)
|
|
||||||
else:
|
|
||||||
if not remote_path.startswith(os.path.sep):
|
|
||||||
remote_path = os.path.join(os.path.sep, remote_path)
|
|
||||||
return os.path.normpath(remote_path)
|
|
||||||
|
|
||||||
def put_file(self, in_path, out_path):
|
|
||||||
""" Transfer a file from local to docker container """
|
|
||||||
super(Connection, self).put_file(in_path, out_path)
|
|
||||||
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
|
|
||||||
|
|
||||||
out_path = self._prefix_login_path(out_path)
|
|
||||||
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
|
|
||||||
raise AnsibleFileNotFound(
|
|
||||||
"file or module does not exist: %s" % to_native(in_path))
|
|
||||||
|
|
||||||
out_path = shlex_quote(out_path)
|
|
||||||
# Older docker doesn't have native support for copying files into
|
|
||||||
# running containers, so we use docker exec to implement this
|
|
||||||
# Although docker version 1.8 and later provide support, the
|
|
||||||
# owner and group of the files are always set to root
|
|
||||||
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
|
|
||||||
if not os.fstat(in_file.fileno()).st_size:
|
|
||||||
count = ' count=0'
|
|
||||||
else:
|
|
||||||
count = ''
|
|
||||||
args = self._build_exec_cmd([self._play_context.executable, "-c", "dd of=%s bs=%s%s" % (out_path, BUFSIZE, count)])
|
|
||||||
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
|
|
||||||
try:
|
|
||||||
p = subprocess.Popen(args, stdin=in_file,
|
|
||||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
||||||
except OSError:
|
|
||||||
raise AnsibleError("docker connection requires dd command in the container to put files")
|
|
||||||
stdout, stderr = p.communicate()
|
|
||||||
|
|
||||||
if p.returncode != 0:
|
|
||||||
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" %
|
|
||||||
(to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))
|
|
||||||
|
|
||||||
def fetch_file(self, in_path, out_path):
|
|
||||||
""" Fetch a file from container to local. """
|
|
||||||
super(Connection, self).fetch_file(in_path, out_path)
|
|
||||||
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
|
|
||||||
|
|
||||||
in_path = self._prefix_login_path(in_path)
|
|
||||||
# out_path is the final file path, but docker takes a directory, not a
|
|
||||||
# file path
|
|
||||||
out_dir = os.path.dirname(out_path)
|
|
||||||
|
|
||||||
args = [self.docker_cmd, "cp", "%s:%s" % (self._play_context.remote_addr, in_path), out_dir]
|
|
||||||
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
|
|
||||||
|
|
||||||
p = subprocess.Popen(args, stdin=subprocess.PIPE,
|
|
||||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
||||||
p.communicate()
|
|
||||||
|
|
||||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
|
||||||
import ntpath
|
|
||||||
actual_out_path = ntpath.join(out_dir, ntpath.basename(in_path))
|
|
||||||
else:
|
|
||||||
actual_out_path = os.path.join(out_dir, os.path.basename(in_path))
|
|
||||||
|
|
||||||
if p.returncode != 0:
|
|
||||||
# Older docker doesn't have native support for fetching files command `cp`
|
|
||||||
# If `cp` fails, try to use `dd` instead
|
|
||||||
args = self._build_exec_cmd([self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)])
|
|
||||||
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
|
|
||||||
with open(to_bytes(actual_out_path, errors='surrogate_or_strict'), 'wb') as out_file:
|
|
||||||
try:
|
|
||||||
p = subprocess.Popen(args, stdin=subprocess.PIPE,
|
|
||||||
stdout=out_file, stderr=subprocess.PIPE)
|
|
||||||
except OSError:
|
|
||||||
raise AnsibleError("docker connection requires dd command in the container to put files")
|
|
||||||
stdout, stderr = p.communicate()
|
|
||||||
|
|
||||||
if p.returncode != 0:
|
|
||||||
raise AnsibleError("failed to fetch file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
|
|
||||||
|
|
||||||
# Rename if needed
|
|
||||||
if actual_out_path != out_path:
|
|
||||||
os.rename(to_bytes(actual_out_path, errors='strict'), to_bytes(out_path, errors='strict'))
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
""" Terminate the connection. Nothing to do for Docker"""
|
|
||||||
super(Connection, self).close()
|
|
||||||
self._connected = False
|
|
||||||
@@ -9,7 +9,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Michael Scherer (@msherer) <misc@zarb.org>
|
author: Michael Scherer (@msherer) <misc@zarb.org>
|
||||||
connection: funcd
|
name: funcd
|
||||||
short_description: Use funcd to connect to target
|
short_description: Use funcd to connect to target
|
||||||
description:
|
description:
|
||||||
- This transport permits you to use Ansible over Func.
|
- This transport permits you to use Ansible over Func.
|
||||||
@@ -37,13 +37,12 @@ import tempfile
|
|||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
from ansible.errors import AnsibleError
|
from ansible.errors import AnsibleError
|
||||||
from ansible.plugins.connection import ConnectionBase
|
|
||||||
from ansible.utils.display import Display
|
from ansible.utils.display import Display
|
||||||
|
|
||||||
display = Display()
|
display = Display()
|
||||||
|
|
||||||
|
|
||||||
class Connection(ConnectionBase):
|
class Connection(object):
|
||||||
''' Func-based connections '''
|
''' Func-based connections '''
|
||||||
|
|
||||||
has_pipelining = False
|
has_pipelining = False
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Stephan Lohse (!UNKNOWN) <dev-github@ploek.org>
|
author: Stephan Lohse (!UNKNOWN) <dev-github@ploek.org>
|
||||||
connection: iocage
|
name: iocage
|
||||||
short_description: Run tasks in iocage jails
|
short_description: Run tasks in iocage jails
|
||||||
description:
|
description:
|
||||||
- Run commands or put/fetch files to an existing iocage jail
|
- Run commands or put/fetch files to an existing iocage jail
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Ansible Core Team
|
author: Ansible Core Team
|
||||||
connection: jail
|
name: jail
|
||||||
short_description: Run tasks in jails
|
short_description: Run tasks in jails
|
||||||
description:
|
description:
|
||||||
- Run commands or put/fetch files to an existing jail
|
- Run commands or put/fetch files to an existing jail
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Joerg Thalheim (!UNKNOWN) <joerg@higgsboson.tk>
|
author: Joerg Thalheim (!UNKNOWN) <joerg@higgsboson.tk>
|
||||||
connection: lxc
|
name: lxc
|
||||||
short_description: Run tasks in lxc containers via lxc python library
|
short_description: Run tasks in lxc containers via lxc python library
|
||||||
description:
|
description:
|
||||||
- Run commands or put/fetch files to an existing lxc container using lxc python library
|
- Run commands or put/fetch files to an existing lxc container using lxc python library
|
||||||
|
|||||||
@@ -7,14 +7,14 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Matt Clay (@mattclay) <matt@mystile.com>
|
author: Matt Clay (@mattclay) <matt@mystile.com>
|
||||||
connection: lxd
|
name: lxd
|
||||||
short_description: Run tasks in lxc containers via lxc CLI
|
short_description: Run tasks in lxc containers via lxc CLI
|
||||||
description:
|
description:
|
||||||
- Run commands or put/fetch files to an existing lxc container using lxc CLI
|
- Run commands or put/fetch files to an existing lxc container using lxc CLI
|
||||||
options:
|
options:
|
||||||
remote_addr:
|
remote_addr:
|
||||||
description:
|
description:
|
||||||
- Container identifier
|
- Container identifier.
|
||||||
default: inventory_hostname
|
default: inventory_hostname
|
||||||
vars:
|
vars:
|
||||||
- name: ansible_host
|
- name: ansible_host
|
||||||
@@ -26,6 +26,19 @@ DOCUMENTATION = '''
|
|||||||
vars:
|
vars:
|
||||||
- name: ansible_executable
|
- name: ansible_executable
|
||||||
- name: ansible_lxd_executable
|
- name: ansible_lxd_executable
|
||||||
|
remote:
|
||||||
|
description:
|
||||||
|
- Name of the LXD remote to use.
|
||||||
|
default: local
|
||||||
|
vars:
|
||||||
|
- name: ansible_lxd_remote
|
||||||
|
version_added: 2.0.0
|
||||||
|
project:
|
||||||
|
description:
|
||||||
|
- Name of the LXD project to use.
|
||||||
|
vars:
|
||||||
|
- name: ansible_lxd_project
|
||||||
|
version_added: 2.0.0
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import os
|
import os
|
||||||
@@ -70,7 +83,15 @@ class Connection(ConnectionBase):
|
|||||||
|
|
||||||
self._display.vvv(u"EXEC {0}".format(cmd), host=self._host)
|
self._display.vvv(u"EXEC {0}".format(cmd), host=self._host)
|
||||||
|
|
||||||
local_cmd = [self._lxc_cmd, "exec", self._host, "--", self._play_context.executable, "-c", cmd]
|
local_cmd = [self._lxc_cmd]
|
||||||
|
if self.get_option("project"):
|
||||||
|
local_cmd.extend(["--project", self.get_option("project")])
|
||||||
|
local_cmd.extend([
|
||||||
|
"exec",
|
||||||
|
"%s:%s" % (self.get_option("remote"), self._host),
|
||||||
|
"--",
|
||||||
|
self._play_context.executable, "-c", cmd
|
||||||
|
])
|
||||||
|
|
||||||
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
||||||
in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru')
|
in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru')
|
||||||
@@ -98,7 +119,14 @@ class Connection(ConnectionBase):
|
|||||||
if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')):
|
if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')):
|
||||||
raise AnsibleFileNotFound("input path is not a file: %s" % in_path)
|
raise AnsibleFileNotFound("input path is not a file: %s" % in_path)
|
||||||
|
|
||||||
local_cmd = [self._lxc_cmd, "file", "push", in_path, self._host + "/" + out_path]
|
local_cmd = [self._lxc_cmd]
|
||||||
|
if self.get_option("project"):
|
||||||
|
local_cmd.extend(["--project", self.get_option("project")])
|
||||||
|
local_cmd.extend([
|
||||||
|
"file", "push",
|
||||||
|
in_path,
|
||||||
|
"%s:%s/%s" % (self.get_option("remote"), self._host, out_path)
|
||||||
|
])
|
||||||
|
|
||||||
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
||||||
|
|
||||||
@@ -111,7 +139,14 @@ class Connection(ConnectionBase):
|
|||||||
|
|
||||||
self._display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._host)
|
self._display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._host)
|
||||||
|
|
||||||
local_cmd = [self._lxc_cmd, "file", "pull", self._host + "/" + in_path, out_path]
|
local_cmd = [self._lxc_cmd]
|
||||||
|
if self.get_option("project"):
|
||||||
|
local_cmd.extend(["--project", self.get_option("project")])
|
||||||
|
local_cmd.extend([
|
||||||
|
"file", "pull",
|
||||||
|
"%s:%s/%s" % (self.get_option("remote"), self._host, in_path),
|
||||||
|
out_path
|
||||||
|
])
|
||||||
|
|
||||||
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
||||||
|
|
||||||
|
|||||||
@@ -1,173 +0,0 @@
|
|||||||
# Based on the docker connection plugin
|
|
||||||
#
|
|
||||||
# Connection plugin for configuring kubernetes containers with kubectl
|
|
||||||
# (c) 2017, XuXinkun <xuxinkun@gmail.com>
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
author:
|
|
||||||
- xuxinkun (!UNKNOWN)
|
|
||||||
|
|
||||||
connection: oc
|
|
||||||
|
|
||||||
short_description: Execute tasks in pods running on OpenShift.
|
|
||||||
|
|
||||||
description:
|
|
||||||
- Use the oc exec command to run tasks in, or put/fetch files to, pods running on the OpenShift
|
|
||||||
container platform.
|
|
||||||
|
|
||||||
|
|
||||||
requirements:
|
|
||||||
- oc (go binary)
|
|
||||||
|
|
||||||
options:
|
|
||||||
oc_pod:
|
|
||||||
description:
|
|
||||||
- Pod name. Required when the host name does not match pod name.
|
|
||||||
default: ''
|
|
||||||
vars:
|
|
||||||
- name: ansible_oc_pod
|
|
||||||
env:
|
|
||||||
- name: K8S_AUTH_POD
|
|
||||||
oc_container:
|
|
||||||
description:
|
|
||||||
- Container name. Required when a pod contains more than one container.
|
|
||||||
default: ''
|
|
||||||
vars:
|
|
||||||
- name: ansible_oc_container
|
|
||||||
env:
|
|
||||||
- name: K8S_AUTH_CONTAINER
|
|
||||||
oc_namespace:
|
|
||||||
description:
|
|
||||||
- The namespace of the pod
|
|
||||||
default: ''
|
|
||||||
vars:
|
|
||||||
- name: ansible_oc_namespace
|
|
||||||
env:
|
|
||||||
- name: K8S_AUTH_NAMESPACE
|
|
||||||
oc_extra_args:
|
|
||||||
description:
|
|
||||||
- Extra arguments to pass to the oc command line.
|
|
||||||
default: ''
|
|
||||||
vars:
|
|
||||||
- name: ansible_oc_extra_args
|
|
||||||
env:
|
|
||||||
- name: K8S_AUTH_EXTRA_ARGS
|
|
||||||
oc_kubeconfig:
|
|
||||||
description:
|
|
||||||
- Path to a oc config file. Defaults to I(~/.kube/conig)
|
|
||||||
default: ''
|
|
||||||
vars:
|
|
||||||
- name: ansible_oc_kubeconfig
|
|
||||||
- name: ansible_oc_config
|
|
||||||
env:
|
|
||||||
- name: K8S_AUTH_KUBECONFIG
|
|
||||||
oc_context:
|
|
||||||
description:
|
|
||||||
- The name of a context found in the K8s config file.
|
|
||||||
default: ''
|
|
||||||
vars:
|
|
||||||
- name: ansible_oc_context
|
|
||||||
env:
|
|
||||||
- name: K8S_AUTH_CONTEXT
|
|
||||||
oc_host:
|
|
||||||
description:
|
|
||||||
- URL for accessing the API.
|
|
||||||
default: ''
|
|
||||||
vars:
|
|
||||||
- name: ansible_oc_host
|
|
||||||
- name: ansible_oc_server
|
|
||||||
env:
|
|
||||||
- name: K8S_AUTH_HOST
|
|
||||||
- name: K8S_AUTH_SERVER
|
|
||||||
oc_token:
|
|
||||||
description:
|
|
||||||
- API authentication bearer token.
|
|
||||||
vars:
|
|
||||||
- name: ansible_oc_token
|
|
||||||
- name: ansible_oc_api_key
|
|
||||||
env:
|
|
||||||
- name: K8S_AUTH_TOKEN
|
|
||||||
- name: K8S_AUTH_API_KEY
|
|
||||||
client_cert:
|
|
||||||
description:
|
|
||||||
- Path to a certificate used to authenticate with the API.
|
|
||||||
default: ''
|
|
||||||
vars:
|
|
||||||
- name: ansible_oc_cert_file
|
|
||||||
- name: ansible_oc_client_cert
|
|
||||||
env:
|
|
||||||
- name: K8S_AUTH_CERT_FILE
|
|
||||||
aliases: [ oc_cert_file ]
|
|
||||||
client_key:
|
|
||||||
description:
|
|
||||||
- Path to a key file used to authenticate with the API.
|
|
||||||
default: ''
|
|
||||||
vars:
|
|
||||||
- name: ansible_oc_key_file
|
|
||||||
- name: ansible_oc_client_key
|
|
||||||
env:
|
|
||||||
- name: K8S_AUTH_KEY_FILE
|
|
||||||
aliases: [ oc_key_file ]
|
|
||||||
ca_cert:
|
|
||||||
description:
|
|
||||||
- Path to a CA certificate used to authenticate with the API.
|
|
||||||
default: ''
|
|
||||||
vars:
|
|
||||||
- name: ansible_oc_ssl_ca_cert
|
|
||||||
- name: ansible_oc_ca_cert
|
|
||||||
env:
|
|
||||||
- name: K8S_AUTH_SSL_CA_CERT
|
|
||||||
aliases: [ oc_ssl_ca_cert ]
|
|
||||||
validate_certs:
|
|
||||||
description:
|
|
||||||
- Whether or not to verify the API server's SSL certificate. Defaults to I(true).
|
|
||||||
default: ''
|
|
||||||
vars:
|
|
||||||
- name: ansible_oc_verify_ssl
|
|
||||||
- name: ansible_oc_validate_certs
|
|
||||||
env:
|
|
||||||
- name: K8S_AUTH_VERIFY_SSL
|
|
||||||
aliases: [ oc_verify_ssl ]
|
|
||||||
'''
|
|
||||||
|
|
||||||
from ansible_collections.community.kubernetes.plugins.connection.kubectl import Connection as KubectlConnection
|
|
||||||
|
|
||||||
|
|
||||||
CONNECTION_TRANSPORT = 'community.general.oc'
|
|
||||||
|
|
||||||
CONNECTION_OPTIONS = {
|
|
||||||
'oc_container': '-c',
|
|
||||||
'oc_namespace': '-n',
|
|
||||||
'oc_kubeconfig': '--config',
|
|
||||||
'oc_context': '--context',
|
|
||||||
'oc_host': '--server',
|
|
||||||
'client_cert': '--client-certificate',
|
|
||||||
'client_key': '--client-key',
|
|
||||||
'ca_cert': '--certificate-authority',
|
|
||||||
'validate_certs': '--insecure-skip-tls-verify',
|
|
||||||
'oc_token': '--token'
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class Connection(KubectlConnection):
|
|
||||||
''' Local oc based connections '''
|
|
||||||
transport = CONNECTION_TRANSPORT
|
|
||||||
connection_options = CONNECTION_OPTIONS
|
|
||||||
documentation = DOCUMENTATION
|
|
||||||
@@ -11,7 +11,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
connection: qubes
|
name: qubes
|
||||||
short_description: Interact with an existing QubesOS AppVM
|
short_description: Interact with an existing QubesOS AppVM
|
||||||
|
|
||||||
description:
|
description:
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Michael Scherer (@mscherer) <misc@zarb.org>
|
author: Michael Scherer (@mscherer) <misc@zarb.org>
|
||||||
connection: saltstack
|
name: saltstack
|
||||||
short_description: Allow ansible to piggyback on salt minions
|
short_description: Allow ansible to piggyback on salt minions
|
||||||
description:
|
description:
|
||||||
- This allows you to use existing Saltstack infrastructure to connect to targets.
|
- This allows you to use existing Saltstack infrastructure to connect to targets.
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Ansible Core Team
|
author: Ansible Core Team
|
||||||
connection: zone
|
name: zone
|
||||||
short_description: Run tasks in a zone instance
|
short_description: Run tasks in a zone instance
|
||||||
description:
|
description:
|
||||||
- Run commands or put/fetch files to an existing zone
|
- Run commands or put/fetch files to an existing zone
|
||||||
|
|||||||
@@ -1,62 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright: (c) 2018, Google Inc.
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
|
|
||||||
class ModuleDocFragment(object):
|
|
||||||
# GCP doc fragment.
|
|
||||||
DOCUMENTATION = r'''
|
|
||||||
options:
|
|
||||||
project:
|
|
||||||
description:
|
|
||||||
- The Google Cloud Platform project to use.
|
|
||||||
type: str
|
|
||||||
auth_kind:
|
|
||||||
description:
|
|
||||||
- The type of credential used.
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
choices: [ application, machineaccount, serviceaccount ]
|
|
||||||
service_account_contents:
|
|
||||||
description:
|
|
||||||
- The contents of a Service Account JSON file, either in a dictionary or as a JSON string that represents it.
|
|
||||||
type: jsonarg
|
|
||||||
service_account_file:
|
|
||||||
description:
|
|
||||||
- The path of a Service Account JSON file if serviceaccount is selected as type.
|
|
||||||
type: path
|
|
||||||
service_account_email:
|
|
||||||
description:
|
|
||||||
- An optional service account email address if machineaccount is selected
|
|
||||||
and the user does not wish to use the default email.
|
|
||||||
type: str
|
|
||||||
scopes:
|
|
||||||
description:
|
|
||||||
- Array of scopes to be used.
|
|
||||||
type: list
|
|
||||||
elements: str
|
|
||||||
env_type:
|
|
||||||
description:
|
|
||||||
- Specifies which Ansible environment you're running this module within.
|
|
||||||
- This should not be set unless you know what you're doing.
|
|
||||||
- This only alters the User Agent string for any API requests.
|
|
||||||
type: str
|
|
||||||
notes:
|
|
||||||
- for authentication, you can set service_account_file using the
|
|
||||||
c(gcp_service_account_file) env variable.
|
|
||||||
- for authentication, you can set service_account_contents using the
|
|
||||||
c(GCP_SERVICE_ACCOUNT_CONTENTS) env variable.
|
|
||||||
- For authentication, you can set service_account_email using the
|
|
||||||
C(GCP_SERVICE_ACCOUNT_EMAIL) env variable.
|
|
||||||
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env
|
|
||||||
variable.
|
|
||||||
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
|
|
||||||
- Environment variables values will only be used if the playbook values are
|
|
||||||
not set.
|
|
||||||
- The I(service_account_email) and I(service_account_file) options are
|
|
||||||
mutually exclusive.
|
|
||||||
'''
|
|
||||||
@@ -1,136 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
|
|
||||||
class ModuleDocFragment(object):
|
|
||||||
|
|
||||||
# Docker doc fragment
|
|
||||||
DOCUMENTATION = r'''
|
|
||||||
|
|
||||||
options:
|
|
||||||
docker_host:
|
|
||||||
description:
|
|
||||||
- The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
|
|
||||||
TCP connection string. For example, C(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection,
|
|
||||||
the module will automatically replace C(tcp) in the connection URL with C(https).
|
|
||||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_HOST) will be used
|
|
||||||
instead. If the environment variable is not set, the default value will be used.
|
|
||||||
type: str
|
|
||||||
default: unix://var/run/docker.sock
|
|
||||||
aliases: [ docker_url ]
|
|
||||||
tls_hostname:
|
|
||||||
description:
|
|
||||||
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
|
|
||||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_HOSTNAME) will
|
|
||||||
be used instead. If the environment variable is not set, the default value will be used.
|
|
||||||
type: str
|
|
||||||
default: localhost
|
|
||||||
api_version:
|
|
||||||
description:
|
|
||||||
- The version of the Docker API running on the Docker Host.
|
|
||||||
- Defaults to the latest version of the API supported by Docker SDK for Python and the docker daemon.
|
|
||||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_API_VERSION) will be
|
|
||||||
used instead. If the environment variable is not set, the default value will be used.
|
|
||||||
type: str
|
|
||||||
default: auto
|
|
||||||
aliases: [ docker_api_version ]
|
|
||||||
timeout:
|
|
||||||
description:
|
|
||||||
- The maximum amount of time in seconds to wait on a response from the API.
|
|
||||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT) will be used
|
|
||||||
instead. If the environment variable is not set, the default value will be used.
|
|
||||||
type: int
|
|
||||||
default: 60
|
|
||||||
ca_cert:
|
|
||||||
description:
|
|
||||||
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
|
||||||
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
|
|
||||||
the file C(ca.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
|
|
||||||
type: path
|
|
||||||
aliases: [ tls_ca_cert, cacert_path ]
|
|
||||||
client_cert:
|
|
||||||
description:
|
|
||||||
- Path to the client's TLS certificate file.
|
|
||||||
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
|
|
||||||
the file C(cert.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
|
|
||||||
type: path
|
|
||||||
aliases: [ tls_client_cert, cert_path ]
|
|
||||||
client_key:
|
|
||||||
description:
|
|
||||||
- Path to the client's TLS key file.
|
|
||||||
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
|
|
||||||
the file C(key.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
|
|
||||||
type: path
|
|
||||||
aliases: [ tls_client_key, key_path ]
|
|
||||||
ssl_version:
|
|
||||||
description:
|
|
||||||
- Provide a valid SSL version number. Default value determined by ssl.py module.
|
|
||||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_SSL_VERSION) will be
|
|
||||||
used instead.
|
|
||||||
type: str
|
|
||||||
tls:
|
|
||||||
description:
|
|
||||||
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
|
|
||||||
server. Note that if I(validate_certs) is set to C(yes) as well, it will take precedence.
|
|
||||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS) will be used
|
|
||||||
instead. If the environment variable is not set, the default value will be used.
|
|
||||||
type: bool
|
|
||||||
default: no
|
|
||||||
validate_certs:
|
|
||||||
description:
|
|
||||||
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
|
|
||||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_VERIFY) will be
|
|
||||||
used instead. If the environment variable is not set, the default value will be used.
|
|
||||||
type: bool
|
|
||||||
default: no
|
|
||||||
aliases: [ tls_verify ]
|
|
||||||
debug:
|
|
||||||
description:
|
|
||||||
- Debug mode
|
|
||||||
type: bool
|
|
||||||
default: no
|
|
||||||
|
|
||||||
notes:
|
|
||||||
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
|
|
||||||
You can define C(DOCKER_HOST), C(DOCKER_TLS_HOSTNAME), C(DOCKER_API_VERSION), C(DOCKER_CERT_PATH), C(DOCKER_SSL_VERSION),
|
|
||||||
C(DOCKER_TLS), C(DOCKER_TLS_VERIFY) and C(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped
|
|
||||||
with the product that sets up the environment. It will set these variables for you. See
|
|
||||||
U(https://docs.docker.com/machine/reference/env/) for more details.
|
|
||||||
- When connecting to Docker daemon with TLS, you might need to install additional Python packages.
|
|
||||||
For the Docker SDK for Python, version 2.4 or newer, this can be done by installing C(docker[tls]) with M(ansible.builtin.pip).
|
|
||||||
- Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions.
|
|
||||||
In general, it will use C($HOME/.docker/config.json) if the C(DOCKER_CONFIG) environment variable is not specified,
|
|
||||||
and use C($DOCKER_CONFIG/config.json) otherwise.
|
|
||||||
'''
|
|
||||||
|
|
||||||
# Additional, more specific stuff for minimal Docker SDK for Python version < 2.0
|
|
||||||
|
|
||||||
DOCKER_PY_1_DOCUMENTATION = r'''
|
|
||||||
options: {}
|
|
||||||
requirements:
|
|
||||||
- "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
|
|
||||||
Python module has been superseded by L(docker,https://pypi.org/project/docker/)
|
|
||||||
(see L(here,https://github.com/docker/docker-py/issues/1310) for details).
|
|
||||||
For Python 2.6, C(docker-py) must be used. Otherwise, it is recommended to
|
|
||||||
install the C(docker) Python module. Note that both modules should *not*
|
|
||||||
be installed at the same time. Also note that when both modules are installed
|
|
||||||
and one of them is uninstalled, the other might no longer function and a
|
|
||||||
reinstall of it is required."
|
|
||||||
'''
|
|
||||||
|
|
||||||
# Additional, more specific stuff for minimal Docker SDK for Python version >= 2.0.
|
|
||||||
# Note that Docker SDK for Python >= 2.0 requires Python 2.7 or newer.
|
|
||||||
|
|
||||||
DOCKER_PY_2_DOCUMENTATION = r'''
|
|
||||||
options: {}
|
|
||||||
requirements:
|
|
||||||
- "Python >= 2.7"
|
|
||||||
- "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
|
|
||||||
Python module has been superseded by L(docker,https://pypi.org/project/docker/)
|
|
||||||
(see L(here,https://github.com/docker/docker-py/issues/1310) for details).
|
|
||||||
This module does *not* work with docker-py."
|
|
||||||
'''
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright: (c) 2019 Felix Fontein <felix@fontein.de>
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
|
|
||||||
class ModuleDocFragment(object):
|
|
||||||
|
|
||||||
# Standard files documentation fragment
|
|
||||||
DOCUMENTATION = r'''
|
|
||||||
options:
|
|
||||||
hetzner_user:
|
|
||||||
description: The username for the Robot webservice user.
|
|
||||||
type: str
|
|
||||||
required: yes
|
|
||||||
hetzner_password:
|
|
||||||
description: The password for the Robot webservice user.
|
|
||||||
type: str
|
|
||||||
required: yes
|
|
||||||
'''
|
|
||||||
@@ -1,133 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright: (c) 2018, KubeVirt Team <@kubevirt>
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
|
|
||||||
class ModuleDocFragment(object):
|
|
||||||
DOCUMENTATION = r'''
|
|
||||||
options:
|
|
||||||
resource_definition:
|
|
||||||
description:
|
|
||||||
- "A partial YAML definition of the object being created/updated. Here you can define Kubernetes
|
|
||||||
resource parameters not covered by this module's parameters."
|
|
||||||
- "NOTE: I(resource_definition) has lower priority than module parameters. If you try to define e.g.
|
|
||||||
I(metadata.namespace) here, that value will be ignored and I(namespace) used instead."
|
|
||||||
aliases:
|
|
||||||
- definition
|
|
||||||
- inline
|
|
||||||
type: dict
|
|
||||||
wait:
|
|
||||||
description:
|
|
||||||
- "I(True) if the module should wait for the resource to get into desired state."
|
|
||||||
type: bool
|
|
||||||
default: yes
|
|
||||||
force:
|
|
||||||
description:
|
|
||||||
- If set to C(no), and I(state) is C(present), an existing object will be replaced.
|
|
||||||
type: bool
|
|
||||||
default: no
|
|
||||||
wait_timeout:
|
|
||||||
description:
|
|
||||||
- The amount of time in seconds the module should wait for the resource to get into desired state.
|
|
||||||
type: int
|
|
||||||
default: 120
|
|
||||||
wait_sleep:
|
|
||||||
description:
|
|
||||||
- Number of seconds to sleep between checks.
|
|
||||||
default: 5
|
|
||||||
version_added: '0.2.0'
|
|
||||||
memory:
|
|
||||||
description:
|
|
||||||
- The amount of memory to be requested by virtual machine.
|
|
||||||
- For example 1024Mi.
|
|
||||||
type: str
|
|
||||||
memory_limit:
|
|
||||||
description:
|
|
||||||
- The maximum memory to be used by virtual machine.
|
|
||||||
- For example 1024Mi.
|
|
||||||
type: str
|
|
||||||
machine_type:
|
|
||||||
description:
|
|
||||||
- QEMU machine type is the actual chipset of the virtual machine.
|
|
||||||
type: str
|
|
||||||
merge_type:
|
|
||||||
description:
|
|
||||||
- Whether to override the default patch merge approach with a specific type.
|
|
||||||
- If more than one merge type is given, the merge types will be tried in order.
|
|
||||||
- "Defaults to C(['strategic-merge', 'merge']), which is ideal for using the same parameters
|
|
||||||
on resource kinds that combine Custom Resources and built-in resources, as
|
|
||||||
Custom Resource Definitions typically aren't updatable by the usual strategic merge."
|
|
||||||
- "See U(https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment)"
|
|
||||||
type: list
|
|
||||||
choices: [ json, merge, strategic-merge ]
|
|
||||||
cpu_shares:
|
|
||||||
description:
|
|
||||||
- "Specify CPU shares."
|
|
||||||
type: int
|
|
||||||
cpu_limit:
|
|
||||||
description:
|
|
||||||
- "Is converted to its millicore value and multiplied by 100. The resulting value is the total amount of CPU time that a container can use
|
|
||||||
every 100ms. A virtual machine cannot use more than its share of CPU time during this interval."
|
|
||||||
type: int
|
|
||||||
cpu_cores:
|
|
||||||
description:
|
|
||||||
- "Number of CPU cores."
|
|
||||||
type: int
|
|
||||||
cpu_model:
|
|
||||||
description:
|
|
||||||
- "CPU model."
|
|
||||||
- "You can check list of available models here: U(https://github.com/libvirt/libvirt/blob/master/src/cpu_map/index.xml)."
|
|
||||||
- "I(Note:) User can define default CPU model via as I(default-cpu-model) in I(kubevirt-config) I(ConfigMap), if not set I(host-model) is used."
|
|
||||||
- "I(Note:) Be sure that node CPU model where you run a VM, has the same or higher CPU family."
|
|
||||||
- "I(Note:) If CPU model wasn't defined, the VM will have CPU model closest to one that used on the node where the VM is running."
|
|
||||||
type: str
|
|
||||||
bootloader:
|
|
||||||
description:
|
|
||||||
- "Specify the bootloader of the virtual machine."
|
|
||||||
- "All virtual machines use BIOS by default for booting."
|
|
||||||
type: str
|
|
||||||
smbios_uuid:
|
|
||||||
description:
|
|
||||||
- "In order to provide a consistent view on the virtualized hardware for the guest OS, the SMBIOS UUID can be set."
|
|
||||||
type: str
|
|
||||||
cpu_features:
|
|
||||||
description:
|
|
||||||
- "List of dictionary to fine-tune features provided by the selected CPU model."
|
|
||||||
- "I(Note): Policy attribute can either be omitted or contain one of the following policies: force, require, optional, disable, forbid."
|
|
||||||
- "I(Note): In case a policy is omitted for a feature, it will default to require."
|
|
||||||
- "More information about policies: U(https://libvirt.org/formatdomain.html#elementsCPU)"
|
|
||||||
type: list
|
|
||||||
headless:
|
|
||||||
description:
|
|
||||||
- "Specify if the virtual machine should have attached a minimal Video and Graphics device configuration."
|
|
||||||
- "By default a minimal Video and Graphics device configuration will be applied to the VirtualMachineInstance. The video device is vga
|
|
||||||
compatible and comes with a memory size of 16 MB."
|
|
||||||
hugepage_size:
|
|
||||||
description:
|
|
||||||
- "Specify huge page size."
|
|
||||||
type: str
|
|
||||||
tablets:
|
|
||||||
description:
|
|
||||||
- "Specify tablets to be used as input devices"
|
|
||||||
type: list
|
|
||||||
hostname:
|
|
||||||
description:
|
|
||||||
- "Specifies the hostname of the virtual machine. The hostname will be set either by dhcp, cloud-init if configured or virtual machine
|
|
||||||
name will be used."
|
|
||||||
subdomain:
|
|
||||||
description:
|
|
||||||
- "If specified, the fully qualified virtual machine hostname will be hostname.subdomain.namespace.svc.cluster_domain. If not specified,
|
|
||||||
the virtual machine will not have a domain name at all. The DNS entry will resolve to the virtual machine, no matter if the virtual machine
|
|
||||||
itself can pick up a hostname."
|
|
||||||
requirements:
|
|
||||||
- python >= 2.7
|
|
||||||
- openshift >= 0.8.2
|
|
||||||
notes:
|
|
||||||
- "In order to use this module you have to install Openshift Python SDK.
|
|
||||||
To ensure it's installed with correct version you can create the following task:
|
|
||||||
I(pip: name=openshift>=0.8.2)"
|
|
||||||
'''
|
|
||||||
@@ -1,103 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright: (c) 2018, KubeVirt Team <@kubevirt>
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
|
|
||||||
class ModuleDocFragment(object):
|
|
||||||
|
|
||||||
# Standard oVirt documentation fragment
|
|
||||||
DOCUMENTATION = r'''
|
|
||||||
options:
|
|
||||||
disks:
|
|
||||||
description:
|
|
||||||
- List of dictionaries which specify disks of the virtual machine.
|
|
||||||
- "A disk can be made accessible via four different types: I(disk), I(lun), I(cdrom), I(floppy)."
|
|
||||||
- "All possible configuration options are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_disk)"
|
|
||||||
- Each disk must have specified a I(volume) that declares which volume type of the disk
|
|
||||||
All possible configuration options of volume are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_volume).
|
|
||||||
type: list
|
|
||||||
labels:
|
|
||||||
description:
|
|
||||||
- Labels are key/value pairs that are attached to virtual machines. Labels are intended to be used to
|
|
||||||
specify identifying attributes of virtual machines that are meaningful and relevant to users, but do not directly
|
|
||||||
imply semantics to the core system. Labels can be used to organize and to select subsets of virtual machines.
|
|
||||||
Labels can be attached to virtual machines at creation time and subsequently added and modified at any time.
|
|
||||||
- More on labels that are used for internal implementation U(https://kubevirt.io/user-guide/#/misc/annotations_and_labels)
|
|
||||||
type: dict
|
|
||||||
interfaces:
|
|
||||||
description:
|
|
||||||
- An interface defines a virtual network interface of a virtual machine (also called a frontend).
|
|
||||||
- All possible configuration options interfaces are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_interface)
|
|
||||||
- Each interface must have specified a I(network) that declares which logical or physical device it is connected to (also called as backend).
|
|
||||||
All possible configuration options of network are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_network).
|
|
||||||
type: list
|
|
||||||
cloud_init_nocloud:
|
|
||||||
description:
|
|
||||||
- "Represents a cloud-init NoCloud user-data source. The NoCloud data will be added
|
|
||||||
as a disk to the virtual machine. A proper cloud-init installation is required inside the guest.
|
|
||||||
More information U(https://kubevirt.io/api-reference/master/definitions.html#_v1_cloudinitnocloudsource)"
|
|
||||||
type: dict
|
|
||||||
affinity:
|
|
||||||
description:
|
|
||||||
- "Describes node affinity scheduling rules for the vm."
|
|
||||||
type: dict
|
|
||||||
suboptions:
|
|
||||||
soft:
|
|
||||||
description:
|
|
||||||
- "The scheduler will prefer to schedule vms to nodes that satisfy the affinity expressions specified by this field, but it may choose a
|
|
||||||
node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for
|
|
||||||
each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute
|
|
||||||
a sum by iterating through the elements of this field and adding C(weight) to the sum if the node has vms which matches the corresponding
|
|
||||||
C(term); the nodes with the highest sum are the most preferred."
|
|
||||||
type: dict
|
|
||||||
hard:
|
|
||||||
description:
|
|
||||||
- "If the affinity requirements specified by this field are not met at scheduling time, the vm will not be scheduled onto the node. If
|
|
||||||
the affinity requirements specified by this field cease to be met at some point during vm execution (e.g. due to a vm label update), the
|
|
||||||
system may or may not try to eventually evict the vm from its node. When there are multiple elements, the lists of nodes corresponding to
|
|
||||||
each C(term) are intersected, i.e. all terms must be satisfied."
|
|
||||||
type: dict
|
|
||||||
node_affinity:
|
|
||||||
description:
|
|
||||||
- "Describes vm affinity scheduling rules e.g. co-locate this vm in the same node, zone, etc. as some other vms"
|
|
||||||
type: dict
|
|
||||||
suboptions:
|
|
||||||
soft:
|
|
||||||
description:
|
|
||||||
- "The scheduler will prefer to schedule vms to nodes that satisfy the affinity expressions specified by this field, but it may choose
|
|
||||||
a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e.
|
|
||||||
for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.),
|
|
||||||
compute a sum by iterating through the elements of this field and adding C(weight) to the sum if the node matches the corresponding
|
|
||||||
match_expressions; the nodes with the highest sum are the most preferred."
|
|
||||||
type: dict
|
|
||||||
hard:
|
|
||||||
description:
|
|
||||||
- "If the affinity requirements specified by this field are not met at scheduling time, the vm will not be scheduled onto the node. If
|
|
||||||
the affinity requirements specified by this field cease to be met at some point during vm execution (e.g. due to an update), the system
|
|
||||||
may or may not try to eventually evict the vm from its node."
|
|
||||||
type: dict
|
|
||||||
anti_affinity:
|
|
||||||
description:
|
|
||||||
- "Describes vm anti-affinity scheduling rules e.g. avoid putting this vm in the same node, zone, etc. as some other vms."
|
|
||||||
type: dict
|
|
||||||
suboptions:
|
|
||||||
soft:
|
|
||||||
description:
|
|
||||||
- "The scheduler will prefer to schedule vms to nodes that satisfy the anti-affinity expressions specified by this field, but it may
|
|
||||||
choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights,
|
|
||||||
i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions,
|
|
||||||
etc.), compute a sum by iterating through the elements of this field and adding C(weight) to the sum if the node has vms which matches
|
|
||||||
the corresponding C(term); the nodes with the highest sum are the most preferred."
|
|
||||||
type: dict
|
|
||||||
hard:
|
|
||||||
description:
|
|
||||||
- "If the anti-affinity requirements specified by this field are not met at scheduling time, the vm will not be scheduled onto the node.
|
|
||||||
If the anti-affinity requirements specified by this field cease to be met at some point during vm execution (e.g. due to a vm label
|
|
||||||
update), the system may or may not try to eventually evict the vm from its node. When there are multiple elements, the lists of nodes
|
|
||||||
corresponding to each C(term) are intersected, i.e. all terms must be satisfied."
|
|
||||||
type: dict
|
|
||||||
'''
|
|
||||||
@@ -15,7 +15,7 @@ class ModuleDocFragment(object):
|
|||||||
options:
|
options:
|
||||||
bind_dn:
|
bind_dn:
|
||||||
description:
|
description:
|
||||||
- A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism.
|
- A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism as default.
|
||||||
- If this is blank, we'll use an anonymous bind.
|
- If this is blank, we'll use an anonymous bind.
|
||||||
type: str
|
type: str
|
||||||
bind_pw:
|
bind_pw:
|
||||||
@@ -27,6 +27,15 @@ options:
|
|||||||
description:
|
description:
|
||||||
- The DN of the entry to add or remove.
|
- The DN of the entry to add or remove.
|
||||||
type: str
|
type: str
|
||||||
|
referrals_chasing:
|
||||||
|
choices: [disabled, anonymous]
|
||||||
|
default: anonymous
|
||||||
|
type: str
|
||||||
|
description:
|
||||||
|
- Set the referrals chasing behavior.
|
||||||
|
- C(anonymous) follow referrals anonymously. This is the default behavior.
|
||||||
|
- C(disabled) disable referrals chasing. This sets C(OPT_REFERRALS) to off.
|
||||||
|
version_added: 2.0.0
|
||||||
server_uri:
|
server_uri:
|
||||||
description:
|
description:
|
||||||
- A URI to the LDAP server.
|
- A URI to the LDAP server.
|
||||||
@@ -44,4 +53,12 @@ options:
|
|||||||
- This should only be used on sites using self-signed certificates.
|
- This should only be used on sites using self-signed certificates.
|
||||||
type: bool
|
type: bool
|
||||||
default: yes
|
default: yes
|
||||||
|
sasl_class:
|
||||||
|
description:
|
||||||
|
- The class to use for SASL authentication.
|
||||||
|
- possible choices are C(external), C(gssapi).
|
||||||
|
type: str
|
||||||
|
choices: ['external', 'gssapi']
|
||||||
|
default: external
|
||||||
|
version_added: "2.0.0"
|
||||||
'''
|
'''
|
||||||
|
|||||||
@@ -78,6 +78,24 @@ options:
|
|||||||
variable.
|
variable.
|
||||||
type: int
|
type: int
|
||||||
default: 1000
|
default: 1000
|
||||||
|
http_pool_connections:
|
||||||
|
description:
|
||||||
|
- Number of pools to be used by the C(infoblox_client.Connector) object.
|
||||||
|
- This is passed as-is to the underlying C(requests.adapters.HTTPAdapter) class.
|
||||||
|
type: int
|
||||||
|
default: 10
|
||||||
|
http_pool_maxsize:
|
||||||
|
description:
|
||||||
|
- Maximum number of connections per pool to be used by the C(infoblox_client.Connector) object.
|
||||||
|
- This is passed as-is to the underlying C(requests.adapters.HTTPAdapter) class.
|
||||||
|
type: int
|
||||||
|
default: 10
|
||||||
|
silent_ssl_warnings:
|
||||||
|
description:
|
||||||
|
- Disable C(urllib3) SSL warnings in the C(infoblox_client.Connector) object.
|
||||||
|
- This is passed as-is to the underlying C(requests.adapters.HTTPAdapter) class.
|
||||||
|
type: bool
|
||||||
|
default: true
|
||||||
notes:
|
notes:
|
||||||
- "This module must be run locally, which can be achieved by specifying C(connection: local)."
|
- "This module must be run locally, which can be achieved by specifying C(connection: local)."
|
||||||
- Please read the :ref:`nios_guide` for more detailed information on how to use Infoblox with Ansible.
|
- Please read the :ref:`nios_guide` for more detailed information on how to use Infoblox with Ansible.
|
||||||
|
|||||||
@@ -13,32 +13,12 @@ class ModuleDocFragment(object):
|
|||||||
DOCUMENTATION = r'''
|
DOCUMENTATION = r'''
|
||||||
options:
|
options:
|
||||||
config:
|
config:
|
||||||
description:
|
description:
|
||||||
- Path to a .json configuration file containing the OneView client configuration.
|
- Path to a .json configuration file containing the OneView client configuration.
|
||||||
The configuration file is optional and when used should be present in the host running the ansible commands.
|
The configuration file is optional and when used should be present in the host running the ansible commands.
|
||||||
If the file path is not provided, the configuration will be loaded from environment variables.
|
If the file path is not provided, the configuration will be loaded from environment variables.
|
||||||
For links to example configuration files or how to use the environment variables verify the notes section.
|
For links to example configuration files or how to use the environment variables verify the notes section.
|
||||||
type: path
|
type: path
|
||||||
api_version:
|
|
||||||
description:
|
|
||||||
- OneView API Version.
|
|
||||||
type: int
|
|
||||||
image_streamer_hostname:
|
|
||||||
description:
|
|
||||||
- IP address or hostname for the HPE Image Streamer REST API.
|
|
||||||
type: str
|
|
||||||
hostname:
|
|
||||||
description:
|
|
||||||
- IP address or hostname for the appliance.
|
|
||||||
type: str
|
|
||||||
username:
|
|
||||||
description:
|
|
||||||
- Username for API authentication.
|
|
||||||
type: str
|
|
||||||
password:
|
|
||||||
description:
|
|
||||||
- Password for API authentication.
|
|
||||||
type: str
|
|
||||||
|
|
||||||
requirements:
|
requirements:
|
||||||
- python >= 2.7.9
|
- python >= 2.7.9
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ class ModuleDocFragment(object):
|
|||||||
OCI_USER_KEY_FILE variable, if any, is used. This option is required if the private key is
|
OCI_USER_KEY_FILE variable, if any, is used. This option is required if the private key is
|
||||||
not specified through a configuration file (See C(config_file_location)). If the key is encrypted
|
not specified through a configuration file (See C(config_file_location)). If the key is encrypted
|
||||||
with a pass-phrase, the C(api_user_key_pass_phrase) option must also be provided.
|
with a pass-phrase, the C(api_user_key_pass_phrase) option must also be provided.
|
||||||
type: str
|
type: path
|
||||||
api_user_key_pass_phrase:
|
api_user_key_pass_phrase:
|
||||||
description:
|
description:
|
||||||
- Passphrase used by the key referenced in C(api_user_key_file), if it is encrypted. If not set, then
|
- Passphrase used by the key referenced in C(api_user_key_file), if it is encrypted. If not set, then
|
||||||
|
|||||||
@@ -1,62 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
|
|
||||||
class ModuleDocFragment(object):
|
|
||||||
# Postgres documentation fragment
|
|
||||||
DOCUMENTATION = r'''
|
|
||||||
options:
|
|
||||||
login_user:
|
|
||||||
description:
|
|
||||||
- The username used to authenticate with.
|
|
||||||
type: str
|
|
||||||
default: postgres
|
|
||||||
login_password:
|
|
||||||
description:
|
|
||||||
- The password used to authenticate with.
|
|
||||||
type: str
|
|
||||||
login_host:
|
|
||||||
description:
|
|
||||||
- Host running the database.
|
|
||||||
type: str
|
|
||||||
login_unix_socket:
|
|
||||||
description:
|
|
||||||
- Path to a Unix domain socket for local connections.
|
|
||||||
type: str
|
|
||||||
port:
|
|
||||||
description:
|
|
||||||
- Database port to connect to.
|
|
||||||
type: int
|
|
||||||
default: 5432
|
|
||||||
aliases: [ login_port ]
|
|
||||||
ssl_mode:
|
|
||||||
description:
|
|
||||||
- Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
|
|
||||||
- See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
|
|
||||||
- Default of C(prefer) matches libpq default.
|
|
||||||
type: str
|
|
||||||
default: prefer
|
|
||||||
choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
|
|
||||||
ca_cert:
|
|
||||||
description:
|
|
||||||
- Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
|
|
||||||
- If the file exists, the server's certificate will be verified to be signed by one of these authorities.
|
|
||||||
type: str
|
|
||||||
aliases: [ ssl_rootcert ]
|
|
||||||
notes:
|
|
||||||
- The default authentication assumes that you are either logging in as or sudo'ing to the C(postgres) account on the host.
|
|
||||||
- To avoid "Peer authentication failed for user postgres" error,
|
|
||||||
use postgres user as a I(become_user).
|
|
||||||
- This module uses psycopg2, a Python PostgreSQL database adapter. You must
|
|
||||||
ensure that psycopg2 is installed on the host before using this module.
|
|
||||||
- If the remote host is the PostgreSQL server (which is the default case), then
|
|
||||||
PostgreSQL must also be installed on the remote host.
|
|
||||||
- For Ubuntu-based systems, install the postgresql, libpq-dev, and python-psycopg2 packages
|
|
||||||
on the remote host before using this module.
|
|
||||||
- The ca_cert parameter requires at least Postgres version 8.4 and I(psycopg2) version 2.4.3.
|
|
||||||
requirements: [ psycopg2 ]
|
|
||||||
'''
|
|
||||||
@@ -42,4 +42,23 @@ options:
|
|||||||
type: bool
|
type: bool
|
||||||
default: no
|
default: no
|
||||||
requirements: [ "proxmoxer", "requests" ]
|
requirements: [ "proxmoxer", "requests" ]
|
||||||
|
'''
|
||||||
|
|
||||||
|
SELECTION = r'''
|
||||||
|
options:
|
||||||
|
vmid:
|
||||||
|
description:
|
||||||
|
- Specifies the instance ID.
|
||||||
|
- If not set the next available ID will be fetched from ProxmoxAPI.
|
||||||
|
type: int
|
||||||
|
node:
|
||||||
|
description:
|
||||||
|
- Proxmox VE node on which to operate.
|
||||||
|
- Only required for I(state=present).
|
||||||
|
- For every other states it will be autodiscovered.
|
||||||
|
type: str
|
||||||
|
pool:
|
||||||
|
description:
|
||||||
|
- Add the new VM to the specified pool.
|
||||||
|
type: str
|
||||||
'''
|
'''
|
||||||
|
|||||||
47
plugins/filter/list.py
Normal file
47
plugins/filter/list.py
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright (c) 2020, Vladimir Botka <vbotka@gmail.com>
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
from ansible.errors import AnsibleError, AnsibleFilterError
|
||||||
|
from ansible.module_utils.six import string_types
|
||||||
|
from ansible.module_utils.common._collections_compat import Mapping, Sequence
|
||||||
|
from collections import defaultdict
|
||||||
|
from operator import itemgetter
|
||||||
|
|
||||||
|
|
||||||
|
def lists_mergeby(l1, l2, index):
|
||||||
|
''' merge lists by attribute index. Example:
|
||||||
|
- debug: msg="{{ l1|community.general.lists_mergeby(l2, 'index')|list }}" '''
|
||||||
|
|
||||||
|
if not isinstance(l1, Sequence):
|
||||||
|
raise AnsibleFilterError('First argument for community.general.lists_mergeby must be list. %s is %s' %
|
||||||
|
(l1, type(l1)))
|
||||||
|
|
||||||
|
if not isinstance(l2, Sequence):
|
||||||
|
raise AnsibleFilterError('Second argument for community.general.lists_mergeby must be list. %s is %s' %
|
||||||
|
(l2, type(l2)))
|
||||||
|
|
||||||
|
if not isinstance(index, string_types):
|
||||||
|
raise AnsibleFilterError('Third argument for community.general.lists_mergeby must be string. %s is %s' %
|
||||||
|
(index, type(index)))
|
||||||
|
|
||||||
|
d = defaultdict(dict)
|
||||||
|
for l in (l1, l2):
|
||||||
|
for elem in l:
|
||||||
|
if not isinstance(elem, Mapping):
|
||||||
|
raise AnsibleFilterError('Elements of list arguments for lists_mergeby must be dictionaries. Found {0!r}.'.format(elem))
|
||||||
|
if index in elem.keys():
|
||||||
|
d[elem[index]].update(elem)
|
||||||
|
return sorted(d.values(), key=itemgetter(index))
|
||||||
|
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
''' Ansible list filters '''
|
||||||
|
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
'lists_mergeby': lists_mergeby,
|
||||||
|
}
|
||||||
@@ -8,7 +8,6 @@ __metaclass__ = type
|
|||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Orion Poplawski (@opoplawski)
|
author: Orion Poplawski (@opoplawski)
|
||||||
name: cobbler
|
name: cobbler
|
||||||
plugin_type: inventory
|
|
||||||
short_description: Cobbler inventory source
|
short_description: Cobbler inventory source
|
||||||
version_added: 1.0.0
|
version_added: 1.0.0
|
||||||
description:
|
description:
|
||||||
|
|||||||
@@ -1,272 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright (c) 2019, Ximon Eighteen <ximon.eighteen@gmail.com>
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
name: docker_machine
|
|
||||||
plugin_type: inventory
|
|
||||||
author: Ximon Eighteen (@ximon18)
|
|
||||||
short_description: Docker Machine inventory source
|
|
||||||
requirements:
|
|
||||||
- L(Docker Machine,https://docs.docker.com/machine/)
|
|
||||||
extends_documentation_fragment:
|
|
||||||
- constructed
|
|
||||||
description:
|
|
||||||
- Get inventory hosts from Docker Machine.
|
|
||||||
- Uses a YAML configuration file that ends with docker_machine.(yml|yaml).
|
|
||||||
- The plugin sets standard host variables C(ansible_host), C(ansible_port), C(ansible_user) and C(ansible_ssh_private_key).
|
|
||||||
- The plugin stores the Docker Machine 'env' output variables in I(dm_) prefixed host variables.
|
|
||||||
|
|
||||||
options:
|
|
||||||
plugin:
|
|
||||||
description: token that ensures this is a source file for the C(docker_machine) plugin.
|
|
||||||
required: yes
|
|
||||||
choices: ['docker_machine', 'community.general.docker_machine']
|
|
||||||
daemon_env:
|
|
||||||
description:
|
|
||||||
- Whether docker daemon connection environment variables should be fetched, and how to behave if they cannot be fetched.
|
|
||||||
- With C(require) and C(require-silently), fetch them and skip any host for which they cannot be fetched.
|
|
||||||
A warning will be issued for any skipped host if the choice is C(require).
|
|
||||||
- With C(optional) and C(optional-silently), fetch them and not skip hosts for which they cannot be fetched.
|
|
||||||
A warning will be issued for hosts where they cannot be fetched if the choice is C(optional).
|
|
||||||
- With C(skip), do not attempt to fetch the docker daemon connection environment variables.
|
|
||||||
- If fetched successfully, the variables will be prefixed with I(dm_) and stored as host variables.
|
|
||||||
type: str
|
|
||||||
choices:
|
|
||||||
- require
|
|
||||||
- require-silently
|
|
||||||
- optional
|
|
||||||
- optional-silently
|
|
||||||
- skip
|
|
||||||
default: require
|
|
||||||
running_required:
|
|
||||||
description: when true, hosts which Docker Machine indicates are in a state other than C(running) will be skipped.
|
|
||||||
type: bool
|
|
||||||
default: yes
|
|
||||||
verbose_output:
|
|
||||||
description: when true, include all available nodes metadata (e.g. Image, Region, Size) as a JSON object named C(docker_machine_node_attributes).
|
|
||||||
type: bool
|
|
||||||
default: yes
|
|
||||||
'''
|
|
||||||
|
|
||||||
EXAMPLES = '''
|
|
||||||
# Minimal example
|
|
||||||
plugin: community.general.docker_machine
|
|
||||||
|
|
||||||
# Example using constructed features to create a group per Docker Machine driver
|
|
||||||
# (https://docs.docker.com/machine/drivers/), e.g.:
|
|
||||||
# $ docker-machine create --driver digitalocean ... mymachine
|
|
||||||
# $ ansible-inventory -i ./path/to/docker-machine.yml --host=mymachine
|
|
||||||
# {
|
|
||||||
# ...
|
|
||||||
# "digitalocean": {
|
|
||||||
# "hosts": [
|
|
||||||
# "mymachine"
|
|
||||||
# ]
|
|
||||||
# ...
|
|
||||||
# }
|
|
||||||
strict: no
|
|
||||||
keyed_groups:
|
|
||||||
- separator: ''
|
|
||||||
key: docker_machine_node_attributes.DriverName
|
|
||||||
|
|
||||||
# Example grouping hosts by Digital Machine tag
|
|
||||||
strict: no
|
|
||||||
keyed_groups:
|
|
||||||
- prefix: tag
|
|
||||||
key: 'dm_tags'
|
|
||||||
|
|
||||||
# Example using compose to override the default SSH behaviour of asking the user to accept the remote host key
|
|
||||||
compose:
|
|
||||||
ansible_ssh_common_args: '"-o StrictHostKeyChecking=accept-new"'
|
|
||||||
'''
|
|
||||||
|
|
||||||
from ansible.errors import AnsibleError
|
|
||||||
from ansible.module_utils._text import to_native
|
|
||||||
from ansible.module_utils._text import to_text
|
|
||||||
from ansible.module_utils.common.process import get_bin_path
|
|
||||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
|
|
||||||
from ansible.utils.display import Display
|
|
||||||
|
|
||||||
import json
|
|
||||||
import re
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
display = Display()
|
|
||||||
|
|
||||||
|
|
||||||
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
|
||||||
''' Host inventory parser for ansible using Docker machine as source. '''
|
|
||||||
|
|
||||||
NAME = 'community.general.docker_machine'
|
|
||||||
|
|
||||||
DOCKER_MACHINE_PATH = None
|
|
||||||
|
|
||||||
def _run_command(self, args):
|
|
||||||
if not self.DOCKER_MACHINE_PATH:
|
|
||||||
try:
|
|
||||||
self.DOCKER_MACHINE_PATH = get_bin_path('docker-machine')
|
|
||||||
except ValueError as e:
|
|
||||||
raise AnsibleError(to_native(e))
|
|
||||||
|
|
||||||
command = [self.DOCKER_MACHINE_PATH]
|
|
||||||
command.extend(args)
|
|
||||||
display.debug('Executing command {0}'.format(command))
|
|
||||||
try:
|
|
||||||
result = subprocess.check_output(command)
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
display.warning('Exception {0} caught while executing command {1}, this was the original exception: {2}'.format(type(e).__name__, command, e))
|
|
||||||
raise e
|
|
||||||
|
|
||||||
return to_text(result).strip()
|
|
||||||
|
|
||||||
def _get_docker_daemon_variables(self, machine_name):
|
|
||||||
'''
|
|
||||||
Capture settings from Docker Machine that would be needed to connect to the remote Docker daemon installed on
|
|
||||||
the Docker Machine remote host. Note: passing '--shell=sh' is a workaround for 'Error: Unknown shell'.
|
|
||||||
'''
|
|
||||||
try:
|
|
||||||
env_lines = self._run_command(['env', '--shell=sh', machine_name]).splitlines()
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
# This can happen when the machine is created but provisioning is incomplete
|
|
||||||
return []
|
|
||||||
|
|
||||||
# example output of docker-machine env --shell=sh:
|
|
||||||
# export DOCKER_TLS_VERIFY="1"
|
|
||||||
# export DOCKER_HOST="tcp://134.209.204.160:2376"
|
|
||||||
# export DOCKER_CERT_PATH="/root/.docker/machine/machines/routinator"
|
|
||||||
# export DOCKER_MACHINE_NAME="routinator"
|
|
||||||
# # Run this command to configure your shell:
|
|
||||||
# # eval $(docker-machine env --shell=bash routinator)
|
|
||||||
|
|
||||||
# capture any of the DOCKER_xxx variables that were output and create Ansible host vars
|
|
||||||
# with the same name and value but with a dm_ name prefix.
|
|
||||||
vars = []
|
|
||||||
for line in env_lines:
|
|
||||||
match = re.search('(DOCKER_[^=]+)="([^"]+)"', line)
|
|
||||||
if match:
|
|
||||||
env_var_name = match.group(1)
|
|
||||||
env_var_value = match.group(2)
|
|
||||||
vars.append((env_var_name, env_var_value))
|
|
||||||
|
|
||||||
return vars
|
|
||||||
|
|
||||||
def _get_machine_names(self):
|
|
||||||
# Filter out machines that are not in the Running state as we probably can't do anything useful actions
|
|
||||||
# with them.
|
|
||||||
ls_command = ['ls', '-q']
|
|
||||||
if self.get_option('running_required'):
|
|
||||||
ls_command.extend(['--filter', 'state=Running'])
|
|
||||||
|
|
||||||
try:
|
|
||||||
ls_lines = self._run_command(ls_command)
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
return []
|
|
||||||
|
|
||||||
return ls_lines.splitlines()
|
|
||||||
|
|
||||||
def _inspect_docker_machine_host(self, node):
|
|
||||||
try:
|
|
||||||
inspect_lines = self._run_command(['inspect', self.node])
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
return None
|
|
||||||
|
|
||||||
return json.loads(inspect_lines)
|
|
||||||
|
|
||||||
def _ip_addr_docker_machine_host(self, node):
|
|
||||||
try:
|
|
||||||
ip_addr = self._run_command(['ip', self.node])
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
return None
|
|
||||||
|
|
||||||
return ip_addr
|
|
||||||
|
|
||||||
def _should_skip_host(self, machine_name, env_var_tuples, daemon_env):
|
|
||||||
if not env_var_tuples:
|
|
||||||
warning_prefix = 'Unable to fetch Docker daemon env vars from Docker Machine for host {0}'.format(machine_name)
|
|
||||||
if daemon_env in ('require', 'require-silently'):
|
|
||||||
if daemon_env == 'require':
|
|
||||||
display.warning('{0}: host will be skipped'.format(warning_prefix))
|
|
||||||
return True
|
|
||||||
else: # 'optional', 'optional-silently'
|
|
||||||
if daemon_env == 'optional':
|
|
||||||
display.warning('{0}: host will lack dm_DOCKER_xxx variables'.format(warning_prefix))
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _populate(self):
|
|
||||||
daemon_env = self.get_option('daemon_env')
|
|
||||||
try:
|
|
||||||
for self.node in self._get_machine_names():
|
|
||||||
self.node_attrs = self._inspect_docker_machine_host(self.node)
|
|
||||||
if not self.node_attrs:
|
|
||||||
continue
|
|
||||||
|
|
||||||
machine_name = self.node_attrs['Driver']['MachineName']
|
|
||||||
|
|
||||||
# query `docker-machine env` to obtain remote Docker daemon connection settings in the form of commands
|
|
||||||
# that could be used to set environment variables to influence a local Docker client:
|
|
||||||
if daemon_env == 'skip':
|
|
||||||
env_var_tuples = []
|
|
||||||
else:
|
|
||||||
env_var_tuples = self._get_docker_daemon_variables(machine_name)
|
|
||||||
if self._should_skip_host(machine_name, env_var_tuples, daemon_env):
|
|
||||||
continue
|
|
||||||
|
|
||||||
# add an entry in the inventory for this host
|
|
||||||
self.inventory.add_host(machine_name)
|
|
||||||
|
|
||||||
# check for valid ip address from inspect output, else explicitly use ip command to find host ip address
|
|
||||||
# this works around an issue seen with Google Compute Platform where the IP address was not available
|
|
||||||
# via the 'inspect' subcommand but was via the 'ip' subcomannd.
|
|
||||||
if self.node_attrs['Driver']['IPAddress']:
|
|
||||||
ip_addr = self.node_attrs['Driver']['IPAddress']
|
|
||||||
else:
|
|
||||||
ip_addr = self._ip_addr_docker_machine_host(self.node)
|
|
||||||
|
|
||||||
# set standard Ansible remote host connection settings to details captured from `docker-machine`
|
|
||||||
# see: https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html
|
|
||||||
self.inventory.set_variable(machine_name, 'ansible_host', ip_addr)
|
|
||||||
self.inventory.set_variable(machine_name, 'ansible_port', self.node_attrs['Driver']['SSHPort'])
|
|
||||||
self.inventory.set_variable(machine_name, 'ansible_user', self.node_attrs['Driver']['SSHUser'])
|
|
||||||
self.inventory.set_variable(machine_name, 'ansible_ssh_private_key_file', self.node_attrs['Driver']['SSHKeyPath'])
|
|
||||||
|
|
||||||
# set variables based on Docker Machine tags
|
|
||||||
tags = self.node_attrs['Driver'].get('Tags') or ''
|
|
||||||
self.inventory.set_variable(machine_name, 'dm_tags', tags)
|
|
||||||
|
|
||||||
# set variables based on Docker Machine env variables
|
|
||||||
for kv in env_var_tuples:
|
|
||||||
self.inventory.set_variable(machine_name, 'dm_{0}'.format(kv[0]), kv[1])
|
|
||||||
|
|
||||||
if self.get_option('verbose_output'):
|
|
||||||
self.inventory.set_variable(machine_name, 'docker_machine_node_attributes', self.node_attrs)
|
|
||||||
|
|
||||||
# Use constructed if applicable
|
|
||||||
strict = self.get_option('strict')
|
|
||||||
|
|
||||||
# Composed variables
|
|
||||||
self._set_composite_vars(self.get_option('compose'), self.node_attrs, machine_name, strict=strict)
|
|
||||||
|
|
||||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
|
||||||
self._add_host_to_composed_groups(self.get_option('groups'), self.node_attrs, machine_name, strict=strict)
|
|
||||||
|
|
||||||
# Create groups based on variable values and add the corresponding hosts to it
|
|
||||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), self.node_attrs, machine_name, strict=strict)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
raise AnsibleError('Unable to fetch hosts from Docker Machine, this was the original exception: %s' %
|
|
||||||
to_native(e), orig_exc=e)
|
|
||||||
|
|
||||||
def verify_file(self, path):
|
|
||||||
"""Return the possibility of a file being consumable by this plugin."""
|
|
||||||
return (
|
|
||||||
super(InventoryModule, self).verify_file(path) and
|
|
||||||
path.endswith(('docker_machine.yaml', 'docker_machine.yml')))
|
|
||||||
|
|
||||||
def parse(self, inventory, loader, path, cache=True):
|
|
||||||
super(InventoryModule, self).parse(inventory, loader, path, cache)
|
|
||||||
self._read_config_data(path)
|
|
||||||
self._populate()
|
|
||||||
@@ -1,255 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright (c) 2018, Stefan Heitmueller <stefan.heitmueller@gmx.com>
|
|
||||||
# Copyright (c) 2018 Ansible Project
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
name: docker_swarm
|
|
||||||
plugin_type: inventory
|
|
||||||
author:
|
|
||||||
- Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
|
|
||||||
short_description: Ansible dynamic inventory plugin for Docker swarm nodes.
|
|
||||||
requirements:
|
|
||||||
- python >= 2.7
|
|
||||||
- L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0
|
|
||||||
extends_documentation_fragment:
|
|
||||||
- constructed
|
|
||||||
description:
|
|
||||||
- Reads inventories from the Docker swarm API.
|
|
||||||
- Uses a YAML configuration file docker_swarm.[yml|yaml].
|
|
||||||
- "The plugin returns following groups of swarm nodes: I(all) - all hosts; I(workers) - all worker nodes;
|
|
||||||
I(managers) - all manager nodes; I(leader) - the swarm leader node;
|
|
||||||
I(nonleaders) - all nodes except the swarm leader."
|
|
||||||
options:
|
|
||||||
plugin:
|
|
||||||
description: The name of this plugin, it should always be set to C(community.general.docker_swarm)
|
|
||||||
for this plugin to recognize it as it's own.
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
choices: [ docker_swarm, community.general.docker_swarm ]
|
|
||||||
docker_host:
|
|
||||||
description:
|
|
||||||
- Socket of a Docker swarm manager node (C(tcp), C(unix)).
|
|
||||||
- "Use C(unix://var/run/docker.sock) to connect via local socket."
|
|
||||||
type: str
|
|
||||||
required: true
|
|
||||||
aliases: [ docker_url ]
|
|
||||||
verbose_output:
|
|
||||||
description: Toggle to (not) include all available nodes metadata (e.g. C(Platform), C(Architecture), C(OS),
|
|
||||||
C(EngineVersion))
|
|
||||||
type: bool
|
|
||||||
default: yes
|
|
||||||
tls:
|
|
||||||
description: Connect using TLS without verifying the authenticity of the Docker host server.
|
|
||||||
type: bool
|
|
||||||
default: no
|
|
||||||
validate_certs:
|
|
||||||
description: Toggle if connecting using TLS with or without verifying the authenticity of the Docker
|
|
||||||
host server.
|
|
||||||
type: bool
|
|
||||||
default: no
|
|
||||||
aliases: [ tls_verify ]
|
|
||||||
client_key:
|
|
||||||
description: Path to the client's TLS key file.
|
|
||||||
type: path
|
|
||||||
aliases: [ tls_client_key, key_path ]
|
|
||||||
ca_cert:
|
|
||||||
description: Use a CA certificate when performing server verification by providing the path to a CA
|
|
||||||
certificate file.
|
|
||||||
type: path
|
|
||||||
aliases: [ tls_ca_cert, cacert_path ]
|
|
||||||
client_cert:
|
|
||||||
description: Path to the client's TLS certificate file.
|
|
||||||
type: path
|
|
||||||
aliases: [ tls_client_cert, cert_path ]
|
|
||||||
tls_hostname:
|
|
||||||
description: When verifying the authenticity of the Docker host server, provide the expected name of
|
|
||||||
the server.
|
|
||||||
type: str
|
|
||||||
ssl_version:
|
|
||||||
description: Provide a valid SSL version number. Default value determined by ssl.py module.
|
|
||||||
type: str
|
|
||||||
api_version:
|
|
||||||
description:
|
|
||||||
- The version of the Docker API running on the Docker Host.
|
|
||||||
- Defaults to the latest version of the API supported by docker-py.
|
|
||||||
type: str
|
|
||||||
aliases: [ docker_api_version ]
|
|
||||||
timeout:
|
|
||||||
description:
|
|
||||||
- The maximum amount of time in seconds to wait on a response from the API.
|
|
||||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT)
|
|
||||||
will be used instead. If the environment variable is not set, the default value will be used.
|
|
||||||
type: int
|
|
||||||
default: 60
|
|
||||||
aliases: [ time_out ]
|
|
||||||
include_host_uri:
|
|
||||||
description: Toggle to return the additional attribute C(ansible_host_uri) which contains the URI of the
|
|
||||||
swarm leader in format of C(tcp://172.16.0.1:2376). This value may be used without additional
|
|
||||||
modification as value of option I(docker_host) in Docker Swarm modules when connecting via API.
|
|
||||||
The port always defaults to C(2376).
|
|
||||||
type: bool
|
|
||||||
default: no
|
|
||||||
include_host_uri_port:
|
|
||||||
description: Override the detected port number included in I(ansible_host_uri)
|
|
||||||
type: int
|
|
||||||
'''
|
|
||||||
|
|
||||||
EXAMPLES = '''
|
|
||||||
# Minimal example using local docker
|
|
||||||
plugin: community.general.docker_swarm
|
|
||||||
docker_host: unix://var/run/docker.sock
|
|
||||||
|
|
||||||
# Minimal example using remote docker
|
|
||||||
plugin: community.general.docker_swarm
|
|
||||||
docker_host: tcp://my-docker-host:2375
|
|
||||||
|
|
||||||
# Example using remote docker with unverified TLS
|
|
||||||
plugin: community.general.docker_swarm
|
|
||||||
docker_host: tcp://my-docker-host:2376
|
|
||||||
tls: yes
|
|
||||||
|
|
||||||
# Example using remote docker with verified TLS and client certificate verification
|
|
||||||
plugin: community.general.docker_swarm
|
|
||||||
docker_host: tcp://my-docker-host:2376
|
|
||||||
validate_certs: yes
|
|
||||||
ca_cert: /somewhere/ca.pem
|
|
||||||
client_key: /somewhere/key.pem
|
|
||||||
client_cert: /somewhere/cert.pem
|
|
||||||
|
|
||||||
# Example using constructed features to create groups and set ansible_host
|
|
||||||
plugin: community.general.docker_swarm
|
|
||||||
docker_host: tcp://my-docker-host:2375
|
|
||||||
strict: False
|
|
||||||
keyed_groups:
|
|
||||||
# add e.g. x86_64 hosts to an arch_x86_64 group
|
|
||||||
- prefix: arch
|
|
||||||
key: 'Description.Platform.Architecture'
|
|
||||||
# add e.g. linux hosts to an os_linux group
|
|
||||||
- prefix: os
|
|
||||||
key: 'Description.Platform.OS'
|
|
||||||
# create a group per node label
|
|
||||||
# e.g. a node labeled w/ "production" ends up in group "label_production"
|
|
||||||
# hint: labels containing special characters will be converted to safe names
|
|
||||||
- key: 'Spec.Labels'
|
|
||||||
prefix: label
|
|
||||||
'''
|
|
||||||
|
|
||||||
from ansible.errors import AnsibleError
|
|
||||||
from ansible.module_utils._text import to_native
|
|
||||||
from ansible_collections.community.general.plugins.module_utils.docker.common import update_tls_hostname, get_connect_params
|
|
||||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
|
||||||
from ansible.parsing.utils.addresses import parse_address
|
|
||||||
|
|
||||||
try:
|
|
||||||
import docker
|
|
||||||
HAS_DOCKER = True
|
|
||||||
except ImportError:
|
|
||||||
HAS_DOCKER = False
|
|
||||||
|
|
||||||
|
|
||||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
|
||||||
''' Host inventory parser for ansible using Docker swarm as source. '''
|
|
||||||
|
|
||||||
NAME = 'community.general.docker_swarm'
|
|
||||||
|
|
||||||
def _fail(self, msg):
|
|
||||||
raise AnsibleError(msg)
|
|
||||||
|
|
||||||
def _populate(self):
|
|
||||||
raw_params = dict(
|
|
||||||
docker_host=self.get_option('docker_host'),
|
|
||||||
tls=self.get_option('tls'),
|
|
||||||
tls_verify=self.get_option('validate_certs'),
|
|
||||||
key_path=self.get_option('client_key'),
|
|
||||||
cacert_path=self.get_option('ca_cert'),
|
|
||||||
cert_path=self.get_option('client_cert'),
|
|
||||||
tls_hostname=self.get_option('tls_hostname'),
|
|
||||||
api_version=self.get_option('api_version'),
|
|
||||||
timeout=self.get_option('timeout'),
|
|
||||||
ssl_version=self.get_option('ssl_version'),
|
|
||||||
debug=None,
|
|
||||||
)
|
|
||||||
update_tls_hostname(raw_params)
|
|
||||||
connect_params = get_connect_params(raw_params, fail_function=self._fail)
|
|
||||||
self.client = docker.DockerClient(**connect_params)
|
|
||||||
self.inventory.add_group('all')
|
|
||||||
self.inventory.add_group('manager')
|
|
||||||
self.inventory.add_group('worker')
|
|
||||||
self.inventory.add_group('leader')
|
|
||||||
self.inventory.add_group('nonleaders')
|
|
||||||
|
|
||||||
if self.get_option('include_host_uri'):
|
|
||||||
if self.get_option('include_host_uri_port'):
|
|
||||||
host_uri_port = str(self.get_option('include_host_uri_port'))
|
|
||||||
elif self.get_option('tls') or self.get_option('validate_certs'):
|
|
||||||
host_uri_port = '2376'
|
|
||||||
else:
|
|
||||||
host_uri_port = '2375'
|
|
||||||
|
|
||||||
try:
|
|
||||||
self.nodes = self.client.nodes.list()
|
|
||||||
for self.node in self.nodes:
|
|
||||||
self.node_attrs = self.client.nodes.get(self.node.id).attrs
|
|
||||||
self.inventory.add_host(self.node_attrs['ID'])
|
|
||||||
self.inventory.add_host(self.node_attrs['ID'], group=self.node_attrs['Spec']['Role'])
|
|
||||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host',
|
|
||||||
self.node_attrs['Status']['Addr'])
|
|
||||||
if self.get_option('include_host_uri'):
|
|
||||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
|
|
||||||
'tcp://' + self.node_attrs['Status']['Addr'] + ':' + host_uri_port)
|
|
||||||
if self.get_option('verbose_output'):
|
|
||||||
self.inventory.set_variable(self.node_attrs['ID'], 'docker_swarm_node_attributes', self.node_attrs)
|
|
||||||
if 'ManagerStatus' in self.node_attrs:
|
|
||||||
if self.node_attrs['ManagerStatus'].get('Leader'):
|
|
||||||
# This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
|
|
||||||
# Check moby/moby#35437 for details
|
|
||||||
swarm_leader_ip = parse_address(self.node_attrs['ManagerStatus']['Addr'])[0] or \
|
|
||||||
self.node_attrs['Status']['Addr']
|
|
||||||
if self.get_option('include_host_uri'):
|
|
||||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
|
|
||||||
'tcp://' + swarm_leader_ip + ':' + host_uri_port)
|
|
||||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host', swarm_leader_ip)
|
|
||||||
self.inventory.add_host(self.node_attrs['ID'], group='leader')
|
|
||||||
else:
|
|
||||||
self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
|
|
||||||
else:
|
|
||||||
self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
|
|
||||||
# Use constructed if applicable
|
|
||||||
strict = self.get_option('strict')
|
|
||||||
# Composed variables
|
|
||||||
self._set_composite_vars(self.get_option('compose'),
|
|
||||||
self.node_attrs,
|
|
||||||
self.node_attrs['ID'],
|
|
||||||
strict=strict)
|
|
||||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
|
||||||
self._add_host_to_composed_groups(self.get_option('groups'),
|
|
||||||
self.node_attrs,
|
|
||||||
self.node_attrs['ID'],
|
|
||||||
strict=strict)
|
|
||||||
# Create groups based on variable values and add the corresponding hosts to it
|
|
||||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'),
|
|
||||||
self.node_attrs,
|
|
||||||
self.node_attrs['ID'],
|
|
||||||
strict=strict)
|
|
||||||
except Exception as e:
|
|
||||||
raise AnsibleError('Unable to fetch hosts from Docker swarm API, this was the original exception: %s' %
|
|
||||||
to_native(e))
|
|
||||||
|
|
||||||
def verify_file(self, path):
|
|
||||||
"""Return the possibly of a file being consumable by this plugin."""
|
|
||||||
return (
|
|
||||||
super(InventoryModule, self).verify_file(path) and
|
|
||||||
path.endswith(('docker_swarm.yaml', 'docker_swarm.yml')))
|
|
||||||
|
|
||||||
def parse(self, inventory, loader, path, cache=True):
|
|
||||||
if not HAS_DOCKER:
|
|
||||||
raise AnsibleError('The Docker swarm dynamic inventory plugin requires the Docker SDK for Python: '
|
|
||||||
'https://github.com/docker/docker-py.')
|
|
||||||
super(InventoryModule, self).parse(inventory, loader, path, cache)
|
|
||||||
self._read_config_data(path)
|
|
||||||
self._populate()
|
|
||||||
@@ -9,7 +9,6 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: gitlab_runners
|
name: gitlab_runners
|
||||||
plugin_type: inventory
|
|
||||||
author:
|
author:
|
||||||
- Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
|
- Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
|
||||||
short_description: Ansible dynamic inventory plugin for GitLab runners.
|
short_description: Ansible dynamic inventory plugin for GitLab runners.
|
||||||
|
|||||||
@@ -1,256 +0,0 @@
|
|||||||
# Copyright (c) 2018 Ansible Project
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
name: kubevirt
|
|
||||||
plugin_type: inventory
|
|
||||||
author:
|
|
||||||
- KubeVirt Team (@kubevirt)
|
|
||||||
|
|
||||||
short_description: KubeVirt inventory source
|
|
||||||
extends_documentation_fragment:
|
|
||||||
- inventory_cache
|
|
||||||
- constructed
|
|
||||||
description:
|
|
||||||
- Fetch running VirtualMachines for one or more namespaces.
|
|
||||||
- Groups by namespace, namespace_vms and labels.
|
|
||||||
- Uses kubevirt.(yml|yaml) YAML configuration file to set parameter values.
|
|
||||||
|
|
||||||
options:
|
|
||||||
plugin:
|
|
||||||
description: token that ensures this is a source file for the 'kubevirt' plugin.
|
|
||||||
required: True
|
|
||||||
choices: ['kubevirt', 'community.general.kubevirt']
|
|
||||||
type: str
|
|
||||||
host_format:
|
|
||||||
description:
|
|
||||||
- Specify the format of the host in the inventory group.
|
|
||||||
default: "{namespace}-{name}-{uid}"
|
|
||||||
connections:
|
|
||||||
type: list
|
|
||||||
description:
|
|
||||||
- Optional list of cluster connection settings. If no connections are provided, the default
|
|
||||||
I(~/.kube/config) and active context will be used, and objects will be returned for all namespaces
|
|
||||||
the active user is authorized to access.
|
|
||||||
suboptions:
|
|
||||||
name:
|
|
||||||
description:
|
|
||||||
- Optional name to assign to the cluster. If not provided, a name is constructed from the server
|
|
||||||
and port.
|
|
||||||
type: str
|
|
||||||
kubeconfig:
|
|
||||||
description:
|
|
||||||
- Path to an existing Kubernetes config file. If not provided, and no other connection
|
|
||||||
options are provided, the OpenShift client will attempt to load the default
|
|
||||||
configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG
|
|
||||||
environment variable.
|
|
||||||
type: str
|
|
||||||
context:
|
|
||||||
description:
|
|
||||||
- The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment
|
|
||||||
variable.
|
|
||||||
type: str
|
|
||||||
host:
|
|
||||||
description:
|
|
||||||
- Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
|
|
||||||
type: str
|
|
||||||
api_key:
|
|
||||||
description:
|
|
||||||
- Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment
|
|
||||||
variable.
|
|
||||||
type: str
|
|
||||||
username:
|
|
||||||
description:
|
|
||||||
- Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME
|
|
||||||
environment variable.
|
|
||||||
type: str
|
|
||||||
password:
|
|
||||||
description:
|
|
||||||
- Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD
|
|
||||||
environment variable.
|
|
||||||
type: str
|
|
||||||
cert_file:
|
|
||||||
description:
|
|
||||||
- Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE
|
|
||||||
environment variable.
|
|
||||||
type: str
|
|
||||||
key_file:
|
|
||||||
description:
|
|
||||||
- Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_HOST
|
|
||||||
environment variable.
|
|
||||||
type: str
|
|
||||||
ssl_ca_cert:
|
|
||||||
description:
|
|
||||||
- Path to a CA certificate used to authenticate with the API. Can also be specified via
|
|
||||||
K8S_AUTH_SSL_CA_CERT environment variable.
|
|
||||||
type: str
|
|
||||||
verify_ssl:
|
|
||||||
description:
|
|
||||||
- "Whether or not to verify the API server's SSL certificates. Can also be specified via
|
|
||||||
K8S_AUTH_VERIFY_SSL environment variable."
|
|
||||||
type: bool
|
|
||||||
namespaces:
|
|
||||||
description:
|
|
||||||
- List of namespaces. If not specified, will fetch all virtual machines for all namespaces user is authorized
|
|
||||||
to access.
|
|
||||||
type: list
|
|
||||||
network_name:
|
|
||||||
description:
|
|
||||||
- In case of multiple network attached to virtual machine, define which interface should be returned as primary IP
|
|
||||||
address.
|
|
||||||
type: str
|
|
||||||
aliases: [ interface_name ]
|
|
||||||
api_version:
|
|
||||||
description:
|
|
||||||
- "Specify the KubeVirt API version."
|
|
||||||
type: str
|
|
||||||
annotation_variable:
|
|
||||||
description:
|
|
||||||
- "Specify the name of the annotation which provides data, which should be used as inventory host variables."
|
|
||||||
- "Note, that the value in ansible annotations should be json."
|
|
||||||
type: str
|
|
||||||
default: 'ansible'
|
|
||||||
requirements:
|
|
||||||
- "openshift >= 0.6"
|
|
||||||
- "PyYAML >= 3.11"
|
|
||||||
'''
|
|
||||||
|
|
||||||
EXAMPLES = '''
|
|
||||||
# File must be named kubevirt.yaml or kubevirt.yml
|
|
||||||
|
|
||||||
# Authenticate with token, and return all virtual machines for all namespaces
|
|
||||||
plugin: community.general.kubevirt
|
|
||||||
connections:
|
|
||||||
- host: https://kubevirt.io
|
|
||||||
token: xxxxxxxxxxxxxxxx
|
|
||||||
ssl_verify: false
|
|
||||||
|
|
||||||
# Use default config (~/.kube/config) file and active context, and return vms with interfaces
|
|
||||||
# connected to network myovsnetwork and from namespace vms
|
|
||||||
plugin: community.general.kubevirt
|
|
||||||
connections:
|
|
||||||
- namespaces:
|
|
||||||
- vms
|
|
||||||
network_name: myovsnetwork
|
|
||||||
'''
|
|
||||||
|
|
||||||
import json
|
|
||||||
|
|
||||||
from ansible_collections.community.kubernetes.plugins.inventory.k8s import K8sInventoryException, InventoryModule as K8sInventoryModule, format_dynamic_api_exc
|
|
||||||
|
|
||||||
try:
|
|
||||||
from openshift.dynamic.exceptions import DynamicApiError
|
|
||||||
except ImportError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
API_VERSION = 'kubevirt.io/v1alpha3'
|
|
||||||
|
|
||||||
|
|
||||||
class InventoryModule(K8sInventoryModule):
|
|
||||||
NAME = 'community.general.kubevirt'
|
|
||||||
|
|
||||||
def setup(self, config_data, cache, cache_key):
|
|
||||||
self.config_data = config_data
|
|
||||||
super(InventoryModule, self).setup(config_data, cache, cache_key)
|
|
||||||
|
|
||||||
def fetch_objects(self, connections):
|
|
||||||
client = self.get_api_client()
|
|
||||||
vm_format = self.config_data.get('host_format', '{namespace}-{name}-{uid}')
|
|
||||||
|
|
||||||
if connections:
|
|
||||||
for connection in connections:
|
|
||||||
client = self.get_api_client(**connection)
|
|
||||||
name = connection.get('name', self.get_default_host_name(client.configuration.host))
|
|
||||||
if connection.get('namespaces'):
|
|
||||||
namespaces = connection['namespaces']
|
|
||||||
else:
|
|
||||||
namespaces = self.get_available_namespaces(client)
|
|
||||||
interface_name = connection.get('network_name')
|
|
||||||
api_version = connection.get('api_version', API_VERSION)
|
|
||||||
annotation_variable = connection.get('annotation_variable', 'ansible')
|
|
||||||
for namespace in namespaces:
|
|
||||||
self.get_vms_for_namespace(client, name, namespace, vm_format, interface_name, api_version, annotation_variable)
|
|
||||||
else:
|
|
||||||
name = self.get_default_host_name(client.configuration.host)
|
|
||||||
namespaces = self.get_available_namespaces(client)
|
|
||||||
for namespace in namespaces:
|
|
||||||
self.get_vms_for_namespace(client, name, namespace, vm_format, None, api_version, annotation_variable)
|
|
||||||
|
|
||||||
def get_vms_for_namespace(self, client, name, namespace, name_format, interface_name=None, api_version=None, annotation_variable=None):
|
|
||||||
v1_vm = client.resources.get(api_version=api_version, kind='VirtualMachineInstance')
|
|
||||||
try:
|
|
||||||
obj = v1_vm.get(namespace=namespace)
|
|
||||||
except DynamicApiError as exc:
|
|
||||||
self.display.debug(exc)
|
|
||||||
raise K8sInventoryException('Error fetching Virtual Machines list: %s' % format_dynamic_api_exc(exc))
|
|
||||||
|
|
||||||
namespace_group = 'namespace_{0}'.format(namespace)
|
|
||||||
namespace_vms_group = '{0}_vms'.format(namespace_group)
|
|
||||||
|
|
||||||
name = self._sanitize_group_name(name)
|
|
||||||
namespace_group = self._sanitize_group_name(namespace_group)
|
|
||||||
namespace_vms_group = self._sanitize_group_name(namespace_vms_group)
|
|
||||||
self.inventory.add_group(name)
|
|
||||||
self.inventory.add_group(namespace_group)
|
|
||||||
self.inventory.add_child(name, namespace_group)
|
|
||||||
self.inventory.add_group(namespace_vms_group)
|
|
||||||
self.inventory.add_child(namespace_group, namespace_vms_group)
|
|
||||||
for vm in obj.items:
|
|
||||||
if not (vm.status and vm.status.interfaces):
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Find interface by its name:
|
|
||||||
if interface_name is None:
|
|
||||||
interface = vm.status.interfaces[0]
|
|
||||||
else:
|
|
||||||
interface = next(
|
|
||||||
(i for i in vm.status.interfaces if i.name == interface_name),
|
|
||||||
None
|
|
||||||
)
|
|
||||||
|
|
||||||
# If interface is not found or IP address is not reported skip this VM:
|
|
||||||
if interface is None or interface.ipAddress is None:
|
|
||||||
continue
|
|
||||||
|
|
||||||
vm_name = name_format.format(namespace=vm.metadata.namespace, name=vm.metadata.name, uid=vm.metadata.uid)
|
|
||||||
vm_ip = interface.ipAddress
|
|
||||||
vm_annotations = {} if not vm.metadata.annotations else dict(vm.metadata.annotations)
|
|
||||||
|
|
||||||
self.inventory.add_host(vm_name)
|
|
||||||
|
|
||||||
if vm.metadata.labels:
|
|
||||||
# create a group for each label_value
|
|
||||||
for key, value in vm.metadata.labels:
|
|
||||||
group_name = 'label_{0}_{1}'.format(key, value)
|
|
||||||
group_name = self._sanitize_group_name(group_name)
|
|
||||||
self.inventory.add_group(group_name)
|
|
||||||
self.inventory.add_child(group_name, vm_name)
|
|
||||||
vm_labels = dict(vm.metadata.labels)
|
|
||||||
else:
|
|
||||||
vm_labels = {}
|
|
||||||
|
|
||||||
self.inventory.add_child(namespace_vms_group, vm_name)
|
|
||||||
|
|
||||||
# add hostvars
|
|
||||||
self.inventory.set_variable(vm_name, 'ansible_host', vm_ip)
|
|
||||||
self.inventory.set_variable(vm_name, 'labels', vm_labels)
|
|
||||||
self.inventory.set_variable(vm_name, 'annotations', vm_annotations)
|
|
||||||
self.inventory.set_variable(vm_name, 'object_type', 'vm')
|
|
||||||
self.inventory.set_variable(vm_name, 'resource_version', vm.metadata.resourceVersion)
|
|
||||||
self.inventory.set_variable(vm_name, 'uid', vm.metadata.uid)
|
|
||||||
|
|
||||||
# Add all variables which are listed in 'ansible' annotation:
|
|
||||||
annotations_data = json.loads(vm_annotations.get(annotation_variable, "{}"))
|
|
||||||
for k, v in annotations_data.items():
|
|
||||||
self.inventory.set_variable(vm_name, k, v)
|
|
||||||
|
|
||||||
def verify_file(self, path):
|
|
||||||
if super(InventoryModule, self).verify_file(path):
|
|
||||||
if path.endswith(('kubevirt.yml', 'kubevirt.yaml')):
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
@@ -6,7 +6,6 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = r'''
|
DOCUMENTATION = r'''
|
||||||
name: linode
|
name: linode
|
||||||
plugin_type: inventory
|
|
||||||
author:
|
author:
|
||||||
- Luke Murphy (@decentral1se)
|
- Luke Murphy (@decentral1se)
|
||||||
short_description: Ansible dynamic inventory plugin for Linode.
|
short_description: Ansible dynamic inventory plugin for Linode.
|
||||||
@@ -17,7 +16,10 @@ DOCUMENTATION = r'''
|
|||||||
- Reads inventories from the Linode API v4.
|
- Reads inventories from the Linode API v4.
|
||||||
- Uses a YAML configuration file that ends with linode.(yml|yaml).
|
- Uses a YAML configuration file that ends with linode.(yml|yaml).
|
||||||
- Linode labels are used by default as the hostnames.
|
- Linode labels are used by default as the hostnames.
|
||||||
- The inventory groups are built from groups and not tags.
|
- The default inventory groups are built from groups (deprecated by
|
||||||
|
Linode) and not tags.
|
||||||
|
extends_documentation_fragment:
|
||||||
|
- constructed
|
||||||
options:
|
options:
|
||||||
plugin:
|
plugin:
|
||||||
description: marks this as an instance of the 'linode' plugin
|
description: marks this as an instance of the 'linode' plugin
|
||||||
@@ -32,10 +34,26 @@ DOCUMENTATION = r'''
|
|||||||
description: Populate inventory with instances in this region.
|
description: Populate inventory with instances in this region.
|
||||||
default: []
|
default: []
|
||||||
type: list
|
type: list
|
||||||
|
required: false
|
||||||
|
tags:
|
||||||
|
description: Populate inventory only with instances which have at least one of the tags listed here.
|
||||||
|
default: []
|
||||||
|
type: list
|
||||||
|
reqired: false
|
||||||
|
version_added: 2.0.0
|
||||||
types:
|
types:
|
||||||
description: Populate inventory with instances with this type.
|
description: Populate inventory with instances with this type.
|
||||||
default: []
|
default: []
|
||||||
type: list
|
type: list
|
||||||
|
required: false
|
||||||
|
strict:
|
||||||
|
version_added: 2.0.0
|
||||||
|
compose:
|
||||||
|
version_added: 2.0.0
|
||||||
|
groups:
|
||||||
|
version_added: 2.0.0
|
||||||
|
keyed_groups:
|
||||||
|
version_added: 2.0.0
|
||||||
'''
|
'''
|
||||||
|
|
||||||
EXAMPLES = r'''
|
EXAMPLES = r'''
|
||||||
@@ -49,13 +67,27 @@ regions:
|
|||||||
- eu-west
|
- eu-west
|
||||||
types:
|
types:
|
||||||
- g5-standard-2
|
- g5-standard-2
|
||||||
|
|
||||||
|
# Example with keyed_groups, groups, and compose
|
||||||
|
plugin: community.general.linode
|
||||||
|
access_token: foobar
|
||||||
|
keyed_groups:
|
||||||
|
- key: tags
|
||||||
|
separator: ''
|
||||||
|
- key: region
|
||||||
|
prefix: region
|
||||||
|
groups:
|
||||||
|
webservers: "'web' in (tags|list)"
|
||||||
|
mailservers: "'mail' in (tags|list)"
|
||||||
|
compose:
|
||||||
|
ansible_port: 2222
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from ansible.errors import AnsibleError, AnsibleParserError
|
from ansible.errors import AnsibleError, AnsibleParserError
|
||||||
from ansible.module_utils.six import string_types
|
from ansible.module_utils.six import string_types
|
||||||
from ansible.plugins.inventory import BaseInventoryPlugin
|
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -66,7 +98,7 @@ except ImportError:
|
|||||||
HAS_LINODE = False
|
HAS_LINODE = False
|
||||||
|
|
||||||
|
|
||||||
class InventoryModule(BaseInventoryPlugin):
|
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||||
|
|
||||||
NAME = 'community.general.linode'
|
NAME = 'community.general.linode'
|
||||||
|
|
||||||
@@ -109,7 +141,7 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
for linode_group in self.linode_groups:
|
for linode_group in self.linode_groups:
|
||||||
self.inventory.add_group(linode_group)
|
self.inventory.add_group(linode_group)
|
||||||
|
|
||||||
def _filter_by_config(self, regions, types):
|
def _filter_by_config(self, regions, types, tags):
|
||||||
"""Filter instances by user specified configuration."""
|
"""Filter instances by user specified configuration."""
|
||||||
if regions:
|
if regions:
|
||||||
self.instances = [
|
self.instances = [
|
||||||
@@ -123,6 +155,12 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
if instance.type.id in types
|
if instance.type.id in types
|
||||||
]
|
]
|
||||||
|
|
||||||
|
if tags:
|
||||||
|
self.instances = [
|
||||||
|
instance for instance in self.instances
|
||||||
|
if any(tag in instance.tags for tag in tags)
|
||||||
|
]
|
||||||
|
|
||||||
def _add_instances_to_groups(self):
|
def _add_instances_to_groups(self):
|
||||||
"""Add instance names to their dynamic inventory groups."""
|
"""Add instance names to their dynamic inventory groups."""
|
||||||
for instance in self.instances:
|
for instance in self.instances:
|
||||||
@@ -167,6 +205,10 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
'type_to_be': list,
|
'type_to_be': list,
|
||||||
'value': config_data.get('types', [])
|
'value': config_data.get('types', [])
|
||||||
},
|
},
|
||||||
|
'tags': {
|
||||||
|
'type_to_be': list,
|
||||||
|
'value': config_data.get('tags', [])
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for name in options:
|
for name in options:
|
||||||
@@ -178,8 +220,9 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
|
|
||||||
regions = options['regions']['value']
|
regions = options['regions']['value']
|
||||||
types = options['types']['value']
|
types = options['types']['value']
|
||||||
|
tags = options['tags']['value']
|
||||||
|
|
||||||
return regions, types
|
return regions, types, tags
|
||||||
|
|
||||||
def verify_file(self, path):
|
def verify_file(self, path):
|
||||||
"""Verify the Linode configuration file."""
|
"""Verify the Linode configuration file."""
|
||||||
@@ -201,9 +244,27 @@ class InventoryModule(BaseInventoryPlugin):
|
|||||||
|
|
||||||
self._get_instances_inventory()
|
self._get_instances_inventory()
|
||||||
|
|
||||||
regions, types = self._get_query_options(config_data)
|
strict = self.get_option('strict')
|
||||||
self._filter_by_config(regions, types)
|
regions, types, tags = self._get_query_options(config_data)
|
||||||
|
self._filter_by_config(regions, types, tags)
|
||||||
|
|
||||||
self._add_groups()
|
self._add_groups()
|
||||||
self._add_instances_to_groups()
|
self._add_instances_to_groups()
|
||||||
self._add_hostvars_for_instances()
|
self._add_hostvars_for_instances()
|
||||||
|
for instance in self.instances:
|
||||||
|
variables = self.inventory.get_host(instance.label).get_vars()
|
||||||
|
self._add_host_to_composed_groups(
|
||||||
|
self.get_option('groups'),
|
||||||
|
variables,
|
||||||
|
instance.label,
|
||||||
|
strict=strict)
|
||||||
|
self._add_host_to_keyed_groups(
|
||||||
|
self.get_option('keyed_groups'),
|
||||||
|
variables,
|
||||||
|
instance.label,
|
||||||
|
strict=strict)
|
||||||
|
self._set_composite_vars(
|
||||||
|
self.get_option('compose'),
|
||||||
|
variables,
|
||||||
|
instance.label,
|
||||||
|
strict=strict)
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ __metaclass__ = type
|
|||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
name: nmap
|
name: nmap
|
||||||
plugin_type: inventory
|
|
||||||
short_description: Uses nmap to find hosts to target
|
short_description: Uses nmap to find hosts to target
|
||||||
description:
|
description:
|
||||||
- Uses a YAML configuration file with a valid YAML extension.
|
- Uses a YAML configuration file with a valid YAML extension.
|
||||||
@@ -72,25 +71,6 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
|||||||
self._nmap = None
|
self._nmap = None
|
||||||
super(InventoryModule, self).__init__()
|
super(InventoryModule, self).__init__()
|
||||||
|
|
||||||
def _populate(self, hosts):
|
|
||||||
# Use constructed if applicable
|
|
||||||
strict = self.get_option('strict')
|
|
||||||
|
|
||||||
for host in hosts:
|
|
||||||
hostname = host['name']
|
|
||||||
self.inventory.add_host(hostname)
|
|
||||||
for var, value in host.items():
|
|
||||||
self.inventory.set_variable(hostname, var, value)
|
|
||||||
|
|
||||||
# Composed variables
|
|
||||||
self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict)
|
|
||||||
|
|
||||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
|
||||||
self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict)
|
|
||||||
|
|
||||||
# Create groups based on variable values and add the corresponding hosts to it
|
|
||||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict)
|
|
||||||
|
|
||||||
def verify_file(self, path):
|
def verify_file(self, path):
|
||||||
|
|
||||||
valid = False
|
valid = False
|
||||||
@@ -102,7 +82,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
|||||||
|
|
||||||
return valid
|
return valid
|
||||||
|
|
||||||
def parse(self, inventory, loader, path, cache=True):
|
def parse(self, inventory, loader, path, cache=False):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self._nmap = get_bin_path('nmap')
|
self._nmap = get_bin_path('nmap')
|
||||||
@@ -113,101 +93,75 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
|||||||
|
|
||||||
self._read_config_data(path)
|
self._read_config_data(path)
|
||||||
|
|
||||||
cache_key = self.get_cache_key(path)
|
# setup command
|
||||||
|
cmd = [self._nmap]
|
||||||
|
if not self._options['ports']:
|
||||||
|
cmd.append('-sP')
|
||||||
|
|
||||||
# cache may be True or False at this point to indicate if the inventory is being refreshed
|
if self._options['ipv4'] and not self._options['ipv6']:
|
||||||
# get the user's cache option too to see if we should save the cache if it is changing
|
cmd.append('-4')
|
||||||
user_cache_setting = self.get_option('cache')
|
elif self._options['ipv6'] and not self._options['ipv4']:
|
||||||
|
cmd.append('-6')
|
||||||
|
elif not self._options['ipv6'] and not self._options['ipv4']:
|
||||||
|
raise AnsibleParserError('One of ipv4 or ipv6 must be enabled for this plugin')
|
||||||
|
|
||||||
# read if the user has caching enabled and the cache isn't being refreshed
|
if self._options['exclude']:
|
||||||
attempt_to_read_cache = user_cache_setting and cache
|
cmd.append('--exclude')
|
||||||
# update if the user has caching enabled and the cache is being refreshed; update this value to True if the cache has expired below
|
cmd.append(','.join(self._options['exclude']))
|
||||||
cache_needs_update = user_cache_setting and not cache
|
|
||||||
|
cmd.append(self._options['address'])
|
||||||
|
try:
|
||||||
|
# execute
|
||||||
|
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
|
||||||
|
stdout, stderr = p.communicate()
|
||||||
|
if p.returncode != 0:
|
||||||
|
raise AnsibleParserError('Failed to run nmap, rc=%s: %s' % (p.returncode, to_native(stderr)))
|
||||||
|
|
||||||
|
# parse results
|
||||||
|
host = None
|
||||||
|
ip = None
|
||||||
|
ports = []
|
||||||
|
|
||||||
if attempt_to_read_cache:
|
|
||||||
try:
|
try:
|
||||||
results = self._cache[cache_key]
|
t_stdout = to_text(stdout, errors='surrogate_or_strict')
|
||||||
except KeyError:
|
except UnicodeError as e:
|
||||||
# This occurs if the cache_key is not in the cache or if the cache_key expired, so the cache needs to be updated
|
raise AnsibleParserError('Invalid (non unicode) input returned: %s' % to_native(e))
|
||||||
cache_needs_update = True
|
|
||||||
|
|
||||||
if cache_needs_update:
|
for line in t_stdout.splitlines():
|
||||||
# setup command
|
hits = self.find_host.match(line)
|
||||||
cmd = [self._nmap]
|
if hits:
|
||||||
if not self._options['ports']:
|
if host is not None:
|
||||||
cmd.append('-sP')
|
self.inventory.set_variable(host, 'ports', ports)
|
||||||
|
|
||||||
if self._options['ipv4'] and not self._options['ipv6']:
|
# if dns only shows arpa, just use ip instead as hostname
|
||||||
cmd.append('-4')
|
if hits.group(1).endswith('.in-addr.arpa'):
|
||||||
elif self._options['ipv6'] and not self._options['ipv4']:
|
host = hits.group(2)
|
||||||
cmd.append('-6')
|
else:
|
||||||
elif not self._options['ipv6'] and not self._options['ipv4']:
|
host = hits.group(1)
|
||||||
raise AnsibleParserError('One of ipv4 or ipv6 must be enabled for this plugin')
|
|
||||||
|
|
||||||
if self._options['exclude']:
|
# if no reverse dns exists, just use ip instead as hostname
|
||||||
cmd.append('--exclude')
|
if hits.group(2) is not None:
|
||||||
cmd.append(','.join(self._options['exclude']))
|
ip = hits.group(2)
|
||||||
|
else:
|
||||||
|
ip = hits.group(1)
|
||||||
|
|
||||||
cmd.append(self._options['address'])
|
if host is not None:
|
||||||
try:
|
# update inventory
|
||||||
# execute
|
self.inventory.add_host(host)
|
||||||
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
|
self.inventory.set_variable(host, 'ip', ip)
|
||||||
stdout, stderr = p.communicate()
|
ports = []
|
||||||
if p.returncode != 0:
|
continue
|
||||||
raise AnsibleParserError('Failed to run nmap, rc=%s: %s' % (p.returncode, to_native(stderr)))
|
|
||||||
|
|
||||||
# parse results
|
host_ports = self.find_port.match(line)
|
||||||
host = None
|
if host is not None and host_ports:
|
||||||
ip = None
|
ports.append({'port': host_ports.group(1), 'protocol': host_ports.group(2), 'state': host_ports.group(3), 'service': host_ports.group(4)})
|
||||||
ports = []
|
continue
|
||||||
results = []
|
|
||||||
|
|
||||||
try:
|
# TODO: parse more data, OS?
|
||||||
t_stdout = to_text(stdout, errors='surrogate_or_strict')
|
|
||||||
except UnicodeError as e:
|
|
||||||
raise AnsibleParserError('Invalid (non unicode) input returned: %s' % to_native(e))
|
|
||||||
|
|
||||||
for line in t_stdout.splitlines():
|
# if any leftovers
|
||||||
hits = self.find_host.match(line)
|
if host and ports:
|
||||||
if hits:
|
self.inventory.set_variable(host, 'ports', ports)
|
||||||
if host is not None and ports:
|
|
||||||
results[-1]['ports'] = ports
|
|
||||||
|
|
||||||
# if dns only shows arpa, just use ip instead as hostname
|
except Exception as e:
|
||||||
if hits.group(1).endswith('.in-addr.arpa'):
|
raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e)))
|
||||||
host = hits.group(2)
|
|
||||||
else:
|
|
||||||
host = hits.group(1)
|
|
||||||
|
|
||||||
# if no reverse dns exists, just use ip instead as hostname
|
|
||||||
if hits.group(2) is not None:
|
|
||||||
ip = hits.group(2)
|
|
||||||
else:
|
|
||||||
ip = hits.group(1)
|
|
||||||
|
|
||||||
if host is not None:
|
|
||||||
# update inventory
|
|
||||||
results.append(dict())
|
|
||||||
results[-1]['name'] = host
|
|
||||||
results[-1]['ip'] = ip
|
|
||||||
ports = []
|
|
||||||
continue
|
|
||||||
|
|
||||||
host_ports = self.find_port.match(line)
|
|
||||||
if host is not None and host_ports:
|
|
||||||
ports.append({'port': host_ports.group(1),
|
|
||||||
'protocol': host_ports.group(2),
|
|
||||||
'state': host_ports.group(3),
|
|
||||||
'service': host_ports.group(4)})
|
|
||||||
continue
|
|
||||||
|
|
||||||
# if any leftovers
|
|
||||||
if host and ports:
|
|
||||||
results[-1]['ports'] = ports
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e)))
|
|
||||||
|
|
||||||
self._cache[cache_key] = results
|
|
||||||
|
|
||||||
self._populate(results)
|
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = r'''
|
DOCUMENTATION = r'''
|
||||||
name: online
|
name: online
|
||||||
plugin_type: inventory
|
|
||||||
author:
|
author:
|
||||||
- Remy Leone (@sieben)
|
- Remy Leone (@sieben)
|
||||||
short_description: Scaleway (previously Online SAS or Online.net) inventory source
|
short_description: Scaleway (previously Online SAS or Online.net) inventory source
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: proxmox
|
name: proxmox
|
||||||
plugin_type: inventory
|
|
||||||
short_description: Proxmox inventory source
|
short_description: Proxmox inventory source
|
||||||
version_added: "1.2.0"
|
version_added: "1.2.0"
|
||||||
author:
|
author:
|
||||||
@@ -28,17 +27,32 @@ DOCUMENTATION = '''
|
|||||||
choices: ['community.general.proxmox']
|
choices: ['community.general.proxmox']
|
||||||
type: str
|
type: str
|
||||||
url:
|
url:
|
||||||
description: URL to Proxmox cluster.
|
description:
|
||||||
|
- URL to Proxmox cluster.
|
||||||
|
- If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_URL) will be used instead.
|
||||||
default: 'http://localhost:8006'
|
default: 'http://localhost:8006'
|
||||||
type: str
|
type: str
|
||||||
|
env:
|
||||||
|
- name: PROXMOX_URL
|
||||||
|
version_added: 2.0.0
|
||||||
user:
|
user:
|
||||||
description: Proxmox authentication user.
|
description:
|
||||||
|
- Proxmox authentication user.
|
||||||
|
- If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_USER) will be used instead.
|
||||||
required: yes
|
required: yes
|
||||||
type: str
|
type: str
|
||||||
|
env:
|
||||||
|
- name: PROXMOX_USER
|
||||||
|
version_added: 2.0.0
|
||||||
password:
|
password:
|
||||||
description: Proxmox authentication password.
|
description:
|
||||||
|
- Proxmox authentication password.
|
||||||
|
- If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_PASSWORD) will be used instead.
|
||||||
required: yes
|
required: yes
|
||||||
type: str
|
type: str
|
||||||
|
env:
|
||||||
|
- name: PROXMOX_PASSWORD
|
||||||
|
version_added: 2.0.0
|
||||||
validate_certs:
|
validate_certs:
|
||||||
description: Verify SSL certificate if using HTTPS.
|
description: Verify SSL certificate if using HTTPS.
|
||||||
type: boolean
|
type: boolean
|
||||||
@@ -325,8 +339,7 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
|
|||||||
|
|
||||||
for member in self._get_members_per_pool(pool['poolid']):
|
for member in self._get_members_per_pool(pool['poolid']):
|
||||||
if member.get('name'):
|
if member.get('name'):
|
||||||
if not member.get('template'):
|
self.inventory.add_child(pool_group, member['name'])
|
||||||
self.inventory.add_child(pool_group, member['name'])
|
|
||||||
|
|
||||||
def parse(self, inventory, loader, path, cache=True):
|
def parse(self, inventory, loader, path, cache=True):
|
||||||
if not HAS_REQUESTS:
|
if not HAS_REQUESTS:
|
||||||
@@ -339,7 +352,7 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
|
|||||||
self._read_config_data(path)
|
self._read_config_data(path)
|
||||||
|
|
||||||
# get connection host
|
# get connection host
|
||||||
self.proxmox_url = self.get_option('url').rstrip('/')
|
self.proxmox_url = self.get_option('url')
|
||||||
self.proxmox_user = self.get_option('user')
|
self.proxmox_user = self.get_option('user')
|
||||||
self.proxmox_password = self.get_option('password')
|
self.proxmox_password = self.get_option('password')
|
||||||
self.cache_key = self.get_cache_key(path)
|
self.cache_key = self.get_cache_key(path)
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: scaleway
|
name: scaleway
|
||||||
plugin_type: inventory
|
|
||||||
author:
|
author:
|
||||||
- Remy Leone (@sieben)
|
- Remy Leone (@sieben)
|
||||||
short_description: Scaleway inventory source
|
short_description: Scaleway inventory source
|
||||||
|
|||||||
@@ -8,11 +8,8 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
name: stackpath_compute
|
name: stackpath_compute
|
||||||
plugin_type: inventory
|
|
||||||
short_description: StackPath Edge Computing inventory source
|
short_description: StackPath Edge Computing inventory source
|
||||||
version_added: 1.2.0
|
version_added: 1.2.0
|
||||||
author:
|
|
||||||
- UNKNOWN (@shayrybak)
|
|
||||||
extends_documentation_fragment:
|
extends_documentation_fragment:
|
||||||
- inventory_cache
|
- inventory_cache
|
||||||
- constructed
|
- constructed
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ __metaclass__ = type
|
|||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
name: virtualbox
|
name: virtualbox
|
||||||
plugin_type: inventory
|
|
||||||
short_description: virtualbox inventory source
|
short_description: virtualbox inventory source
|
||||||
description:
|
description:
|
||||||
- Get inventory hosts from the local virtualbox installation.
|
- Get inventory hosts from the local virtualbox installation.
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
lookup: cartesian
|
name: cartesian
|
||||||
short_description: returns the cartesian product of lists
|
short_description: returns the cartesian product of lists
|
||||||
description:
|
description:
|
||||||
- Takes the input lists and returns a list that represents the product of the input lists.
|
- Takes the input lists and returns a list that represents the product of the input lists.
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
lookup: chef_databag
|
name: chef_databag
|
||||||
short_description: fetches data from a Chef Databag
|
short_description: fetches data from a Chef Databag
|
||||||
description:
|
description:
|
||||||
- "This is a lookup plugin to provide access to chef data bags using the pychef package.
|
- "This is a lookup plugin to provide access to chef data bags using the pychef package.
|
||||||
@@ -81,7 +81,7 @@ class LookupModule(LookupBase):
|
|||||||
)
|
)
|
||||||
if args:
|
if args:
|
||||||
raise AnsibleError(
|
raise AnsibleError(
|
||||||
"unrecognized arguments to with_sequence: %r" % list(args.keys())
|
"unrecognized arguments to with_sequence: %r" % args.keys()
|
||||||
)
|
)
|
||||||
|
|
||||||
def run(self, terms, variables=None, **kwargs):
|
def run(self, terms, variables=None, **kwargs):
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
lookup: consul_kv
|
name: consul_kv
|
||||||
short_description: Fetch metadata from a Consul key value store.
|
short_description: Fetch metadata from a Consul key value store.
|
||||||
description:
|
description:
|
||||||
- Lookup metadata for a playbook from the key value store in a Consul cluster.
|
- Lookup metadata for a playbook from the key value store in a Consul cluster.
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
lookup: credstash
|
name: credstash
|
||||||
short_description: retrieve secrets from Credstash on AWS
|
short_description: retrieve secrets from Credstash on AWS
|
||||||
requirements:
|
requirements:
|
||||||
- credstash (python library)
|
- credstash (python library)
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ __metaclass__ = type
|
|||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author: Unknown (!UNKNOWN)
|
author: Unknown (!UNKNOWN)
|
||||||
lookup: cyberarkpassword
|
name: cyberarkpassword
|
||||||
short_description: get secrets from CyberArk AIM
|
short_description: get secrets from CyberArk AIM
|
||||||
requirements:
|
requirements:
|
||||||
- CyberArk AIM tool installed
|
- CyberArk AIM tool installed
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
lookup: dig
|
name: dig
|
||||||
author: Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
|
author: Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
|
||||||
short_description: query DNS using the dnspython library
|
short_description: query DNS using the dnspython library
|
||||||
requirements:
|
requirements:
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
lookup: dnstxt
|
name: dnstxt
|
||||||
author: Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
|
author: Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
|
||||||
short_description: query a domain(s)'s DNS txt fields
|
short_description: query a domain(s)'s DNS txt fields
|
||||||
requirements:
|
requirements:
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from __future__ import absolute_import, division, print_function
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = r"""
|
DOCUMENTATION = r"""
|
||||||
lookup: dsv
|
name: dsv
|
||||||
author: Adam Migus (@amigus) <adam@migus.org>
|
author: Adam Migus (@amigus) <adam@migus.org>
|
||||||
short_description: Get secrets from Thycotic DevOps Secrets Vault
|
short_description: Get secrets from Thycotic DevOps Secrets Vault
|
||||||
version_added: 1.0.0
|
version_added: 1.0.0
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ __metaclass__ = type
|
|||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author:
|
author:
|
||||||
- Jan-Piet Mens (@jpmens)
|
- Jan-Piet Mens (@jpmens)
|
||||||
lookup: etcd
|
name: etcd
|
||||||
short_description: get info from an etcd server
|
short_description: get info from an etcd server
|
||||||
description:
|
description:
|
||||||
- Retrieves data from an etcd server
|
- Retrieves data from an etcd server
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ DOCUMENTATION = '''
|
|||||||
author:
|
author:
|
||||||
- Eric Belhomme (@eric-belhomme) <ebelhomme@fr.scc.com>
|
- Eric Belhomme (@eric-belhomme) <ebelhomme@fr.scc.com>
|
||||||
version_added: '0.2.0'
|
version_added: '0.2.0'
|
||||||
lookup: etcd3
|
name: etcd3
|
||||||
short_description: Get key values from etcd3 server
|
short_description: Get key values from etcd3 server
|
||||||
description:
|
description:
|
||||||
- Retrieves key values and/or key prefixes from etcd3 server using its native gRPC API.
|
- Retrieves key values and/or key prefixes from etcd3 server using its native gRPC API.
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = r'''
|
DOCUMENTATION = r'''
|
||||||
lookup: filetree
|
name: filetree
|
||||||
author: Dag Wieers (@dagwieers) <dag@wieers.com>
|
author: Dag Wieers (@dagwieers) <dag@wieers.com>
|
||||||
short_description: recursively match all files in a directory tree
|
short_description: recursively match all files in a directory tree
|
||||||
description:
|
description:
|
||||||
@@ -31,9 +31,7 @@ EXAMPLES = r"""
|
|||||||
- name: Template files (explicitly skip directories in order to use the 'src' attribute)
|
- name: Template files (explicitly skip directories in order to use the 'src' attribute)
|
||||||
ansible.builtin.template:
|
ansible.builtin.template:
|
||||||
src: '{{ item.src }}'
|
src: '{{ item.src }}'
|
||||||
# Your template files should be stored with a .j2 file extension,
|
dest: /web/{{ item.path }}
|
||||||
# but should not be deployed with it. splitext|first removes it.
|
|
||||||
dest: /web/{{ item.path | splitext | first }}
|
|
||||||
mode: '{{ item.mode }}'
|
mode: '{{ item.mode }}'
|
||||||
with_community.general.filetree: web/
|
with_community.general.filetree: web/
|
||||||
when: item.state == 'file'
|
when: item.state == 'file'
|
||||||
@@ -43,7 +41,6 @@ EXAMPLES = r"""
|
|||||||
src: '{{ item.src }}'
|
src: '{{ item.src }}'
|
||||||
dest: /web/{{ item.path }}
|
dest: /web/{{ item.path }}
|
||||||
state: link
|
state: link
|
||||||
follow: false # avoid corrupting target files if the link already exists
|
|
||||||
force: yes
|
force: yes
|
||||||
mode: '{{ item.mode }}'
|
mode: '{{ item.mode }}'
|
||||||
with_community.general.filetree: web/
|
with_community.general.filetree: web/
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
lookup: flattened
|
name: flattened
|
||||||
author: Serge van Ginderachter (!UNKNOWN) <serge@vanginderachter.be>
|
author: Serge van Ginderachter (!UNKNOWN) <serge@vanginderachter.be>
|
||||||
short_description: return single list completely flattened
|
short_description: return single list completely flattened
|
||||||
description:
|
description:
|
||||||
|
|||||||
@@ -1,156 +0,0 @@
|
|||||||
# (c) 2019, Eric Anderson <eric.sysmin@gmail.com>
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
from __future__ import absolute_import, division, print_function
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
|
||||||
lookup: gcp_storage_file
|
|
||||||
description:
|
|
||||||
- This lookup returns the contents from a file residing on Google Cloud Storage
|
|
||||||
short_description: Return GC Storage content
|
|
||||||
author: Eric Anderson (!UNKNOWN) <eanderson@avinetworks.com>
|
|
||||||
requirements:
|
|
||||||
- python >= 2.6
|
|
||||||
- requests >= 2.18.4
|
|
||||||
- google-auth >= 1.3.0
|
|
||||||
options:
|
|
||||||
src:
|
|
||||||
description:
|
|
||||||
- Source location of file (may be local machine or cloud depending on action).
|
|
||||||
required: false
|
|
||||||
bucket:
|
|
||||||
description:
|
|
||||||
- The name of the bucket.
|
|
||||||
required: false
|
|
||||||
extends_documentation_fragment:
|
|
||||||
- community.general._gcp
|
|
||||||
|
|
||||||
'''
|
|
||||||
|
|
||||||
EXAMPLES = '''
|
|
||||||
- ansible.builtin.debug:
|
|
||||||
msg: |
|
|
||||||
the value of foo.txt is {{ lookup('community.general.gcp_storage_file',
|
|
||||||
bucket='gcp-bucket', src='mydir/foo.txt', project='project-name',
|
|
||||||
auth_kind='serviceaccount', service_account_file='/tmp/myserviceaccountfile.json') }}
|
|
||||||
'''
|
|
||||||
|
|
||||||
RETURN = '''
|
|
||||||
_raw:
|
|
||||||
description:
|
|
||||||
- base64 encoded file content
|
|
||||||
type: list
|
|
||||||
elements: str
|
|
||||||
'''
|
|
||||||
|
|
||||||
import base64
|
|
||||||
import json
|
|
||||||
import mimetypes
|
|
||||||
import os
|
|
||||||
from ansible.errors import AnsibleError
|
|
||||||
from ansible.plugins.lookup import LookupBase
|
|
||||||
from ansible.utils.display import Display
|
|
||||||
|
|
||||||
try:
|
|
||||||
import requests
|
|
||||||
HAS_REQUESTS = True
|
|
||||||
except ImportError:
|
|
||||||
HAS_REQUESTS = False
|
|
||||||
|
|
||||||
try:
|
|
||||||
from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession
|
|
||||||
HAS_GOOGLE_CLOUD_COLLECTION = True
|
|
||||||
except ImportError:
|
|
||||||
HAS_GOOGLE_CLOUD_COLLECTION = False
|
|
||||||
|
|
||||||
|
|
||||||
display = Display()
|
|
||||||
|
|
||||||
|
|
||||||
class GcpMockModule(object):
|
|
||||||
def __init__(self, params):
|
|
||||||
self.params = params
|
|
||||||
|
|
||||||
def fail_json(self, *args, **kwargs):
|
|
||||||
raise AnsibleError(kwargs['msg'])
|
|
||||||
|
|
||||||
def raise_for_status(self, response):
|
|
||||||
try:
|
|
||||||
response.raise_for_status()
|
|
||||||
except getattr(requests.exceptions, 'RequestException'):
|
|
||||||
self.fail_json(msg="GCP returned error: %s" % response.json())
|
|
||||||
|
|
||||||
|
|
||||||
class GcpFileLookup():
|
|
||||||
def get_file_contents(self, module):
|
|
||||||
auth = GcpSession(module, 'storage')
|
|
||||||
data = auth.get(self.media_link(module))
|
|
||||||
return base64.b64encode(data.content.rstrip())
|
|
||||||
|
|
||||||
def fetch_resource(self, module, link, allow_not_found=True):
|
|
||||||
auth = GcpSession(module, 'storage')
|
|
||||||
return self.return_if_object(module, auth.get(link), allow_not_found)
|
|
||||||
|
|
||||||
def self_link(self, module):
|
|
||||||
return "https://www.googleapis.com/storage/v1/b/{bucket}/o/{src}".format(**module.params)
|
|
||||||
|
|
||||||
def media_link(self, module):
|
|
||||||
return "https://www.googleapis.com/storage/v1/b/{bucket}/o/{src}?alt=media".format(**module.params)
|
|
||||||
|
|
||||||
def return_if_object(self, module, response, allow_not_found=False):
|
|
||||||
# If not found, return nothing.
|
|
||||||
if allow_not_found and response.status_code == 404:
|
|
||||||
return None
|
|
||||||
# If no content, return nothing.
|
|
||||||
if response.status_code == 204:
|
|
||||||
return None
|
|
||||||
try:
|
|
||||||
module.raise_for_status(response)
|
|
||||||
result = response.json()
|
|
||||||
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
|
|
||||||
raise AnsibleError("Invalid JSON response with error: %s" % inst)
|
|
||||||
if navigate_hash(result, ['error', 'errors']):
|
|
||||||
raise AnsibleError(navigate_hash(result, ['error', 'errors']))
|
|
||||||
return result
|
|
||||||
|
|
||||||
def object_headers(self, module):
|
|
||||||
return {
|
|
||||||
"name": module.params['src'],
|
|
||||||
"Content-Type": mimetypes.guess_type(module.params['src'])[0],
|
|
||||||
"Content-Length": str(os.path.getsize(module.params['src'])),
|
|
||||||
}
|
|
||||||
|
|
||||||
def run(self, terms, variables=None, **kwargs):
|
|
||||||
params = {
|
|
||||||
'bucket': kwargs.get('bucket', None),
|
|
||||||
'src': kwargs.get('src', None),
|
|
||||||
'projects': kwargs.get('projects', None),
|
|
||||||
'scopes': kwargs.get('scopes', None),
|
|
||||||
'zones': kwargs.get('zones', None),
|
|
||||||
'auth_kind': kwargs.get('auth_kind', None),
|
|
||||||
'service_account_file': kwargs.get('service_account_file', None),
|
|
||||||
'service_account_email': kwargs.get('service_account_email', None),
|
|
||||||
}
|
|
||||||
|
|
||||||
if not params['scopes']:
|
|
||||||
params['scopes'] = ['https://www.googleapis.com/auth/devstorage.full_control']
|
|
||||||
|
|
||||||
fake_module = GcpMockModule(params)
|
|
||||||
|
|
||||||
# Check if files exist.
|
|
||||||
remote_object = self.fetch_resource(fake_module, self.self_link(fake_module))
|
|
||||||
if not remote_object:
|
|
||||||
raise AnsibleError("File does not exist in bucket")
|
|
||||||
|
|
||||||
result = self.get_file_contents(fake_module)
|
|
||||||
return [result]
|
|
||||||
|
|
||||||
|
|
||||||
class LookupModule(LookupBase):
|
|
||||||
def run(self, terms, variables=None, **kwargs):
|
|
||||||
if not HAS_GOOGLE_CLOUD_COLLECTION:
|
|
||||||
raise AnsibleError("community.general.gcp_storage_file needs a supported version of the google.cloud collection installed")
|
|
||||||
if not HAS_REQUESTS:
|
|
||||||
raise AnsibleError("community.general.gcp_storage_file needs requests installed. Use `pip install requests` to install it")
|
|
||||||
return GcpFileLookup().run(terms, variables=variables, **kwargs)
|
|
||||||
@@ -1,650 +0,0 @@
|
|||||||
# (c) 2020, Brian Scholer (@briantist)
|
|
||||||
# (c) 2015, Jonathan Davila <jonathan(at)davila.io>
|
|
||||||
# (c) 2017 Ansible Project
|
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
||||||
|
|
||||||
from __future__ import (absolute_import, division, print_function)
|
|
||||||
__metaclass__ = type
|
|
||||||
|
|
||||||
DOCUMENTATION = """
|
|
||||||
lookup: hashi_vault
|
|
||||||
author:
|
|
||||||
- Jonathan Davila (!UNKNOWN) <jdavila(at)ansible.com>
|
|
||||||
- Brian Scholer (@briantist)
|
|
||||||
short_description: Retrieve secrets from HashiCorp's Vault
|
|
||||||
requirements:
|
|
||||||
- hvac (python library)
|
|
||||||
- hvac 0.7.0+ (for namespace support)
|
|
||||||
- hvac 0.9.6+ (to avoid all deprecation warnings)
|
|
||||||
- botocore (only if inferring aws params from boto)
|
|
||||||
- boto3 (only if using a boto profile)
|
|
||||||
description:
|
|
||||||
- Retrieve secrets from HashiCorp's Vault.
|
|
||||||
notes:
|
|
||||||
- Due to a current limitation in the HVAC library there won't necessarily be an error if a bad endpoint is specified.
|
|
||||||
- As of community.general 0.2.0, only the latest version of a secret is returned when specifying a KV v2 path.
|
|
||||||
- As of community.general 0.2.0, all options can be supplied via term string (space delimited key=value pairs) or by parameters (see examples).
|
|
||||||
- As of community.general 0.2.0, when C(secret) is the first option in the term string, C(secret=) is not required (see examples).
|
|
||||||
options:
|
|
||||||
secret:
|
|
||||||
description: Vault path to the secret being requested in the format C(path[:field]).
|
|
||||||
required: True
|
|
||||||
token:
|
|
||||||
description:
|
|
||||||
- Vault token. If using token auth and no token is supplied, explicitly or through env, then the plugin will check
|
|
||||||
- for a token file, as determined by C(token_path) and C(token_file).
|
|
||||||
env:
|
|
||||||
- name: VAULT_TOKEN
|
|
||||||
token_path:
|
|
||||||
description: If no token is specified, will try to read the token file from this path.
|
|
||||||
env:
|
|
||||||
- name: VAULT_TOKEN_PATH
|
|
||||||
version_added: 1.2.0
|
|
||||||
ini:
|
|
||||||
- section: lookup_hashi_vault
|
|
||||||
key: token_path
|
|
||||||
version_added: '0.2.0'
|
|
||||||
token_file:
|
|
||||||
description: If no token is specified, will try to read the token from this file in C(token_path).
|
|
||||||
env:
|
|
||||||
- name: VAULT_TOKEN_FILE
|
|
||||||
version_added: 1.2.0
|
|
||||||
ini:
|
|
||||||
- section: lookup_hashi_vault
|
|
||||||
key: token_file
|
|
||||||
default: '.vault-token'
|
|
||||||
version_added: '0.2.0'
|
|
||||||
url:
|
|
||||||
description: URL to the Vault service.
|
|
||||||
env:
|
|
||||||
- name: VAULT_ADDR
|
|
||||||
ini:
|
|
||||||
- section: lookup_hashi_vault
|
|
||||||
key: url
|
|
||||||
version_added: '0.2.0'
|
|
||||||
default: 'http://127.0.0.1:8200'
|
|
||||||
username:
|
|
||||||
description: Authentication user name.
|
|
||||||
password:
|
|
||||||
description: Authentication password.
|
|
||||||
role_id:
|
|
||||||
description: Vault Role ID. Used in approle and aws_iam_login auth methods.
|
|
||||||
env:
|
|
||||||
- name: VAULT_ROLE_ID
|
|
||||||
ini:
|
|
||||||
- section: lookup_hashi_vault
|
|
||||||
key: role_id
|
|
||||||
version_added: '0.2.0'
|
|
||||||
secret_id:
|
|
||||||
description: Secret ID to be used for Vault AppRole authentication.
|
|
||||||
env:
|
|
||||||
- name: VAULT_SECRET_ID
|
|
||||||
auth_method:
|
|
||||||
description:
|
|
||||||
- Authentication method to be used.
|
|
||||||
- C(userpass) is added in Ansible 2.8.
|
|
||||||
- C(aws_iam_login) is added in community.general 0.2.0.
|
|
||||||
- C(jwt) is added in community.general 1.3.0.
|
|
||||||
env:
|
|
||||||
- name: VAULT_AUTH_METHOD
|
|
||||||
ini:
|
|
||||||
- section: lookup_hashi_vault
|
|
||||||
key: auth_method
|
|
||||||
version_added: '0.2.0'
|
|
||||||
choices:
|
|
||||||
- token
|
|
||||||
- userpass
|
|
||||||
- ldap
|
|
||||||
- approle
|
|
||||||
- aws_iam_login
|
|
||||||
- jwt
|
|
||||||
default: token
|
|
||||||
return_format:
|
|
||||||
description:
|
|
||||||
- Controls how multiple key/value pairs in a path are treated on return.
|
|
||||||
- C(dict) returns a single dict containing the key/value pairs (same behavior as before community.general 0.2.0).
|
|
||||||
- C(values) returns a list of all the values only. Use when you don't care about the keys.
|
|
||||||
- C(raw) returns the actual API result, which includes metadata and may have the data nested in other keys.
|
|
||||||
choices:
|
|
||||||
- dict
|
|
||||||
- values
|
|
||||||
- raw
|
|
||||||
default: dict
|
|
||||||
aliases: [ as ]
|
|
||||||
version_added: '0.2.0'
|
|
||||||
mount_point:
|
|
||||||
description: Vault mount point, only required if you have a custom mount point. Does not apply to token authentication.
|
|
||||||
jwt:
|
|
||||||
description: The JSON Web Token (JWT) to use for JWT authentication to Vault.
|
|
||||||
env:
|
|
||||||
- name: ANSIBLE_HASHI_VAULT_JWT
|
|
||||||
version_added: 1.3.0
|
|
||||||
ca_cert:
|
|
||||||
description: Path to certificate to use for authentication.
|
|
||||||
aliases: [ cacert ]
|
|
||||||
validate_certs:
|
|
||||||
description:
|
|
||||||
- Controls verification and validation of SSL certificates, mostly you only want to turn off with self signed ones.
|
|
||||||
- Will be populated with the inverse of C(VAULT_SKIP_VERIFY) if that is set and I(validate_certs) is not explicitly
|
|
||||||
provided (added in community.general 1.3.0).
|
|
||||||
- Will default to C(true) if neither I(validate_certs) or C(VAULT_SKIP_VERIFY) are set.
|
|
||||||
type: boolean
|
|
||||||
namespace:
|
|
||||||
description:
|
|
||||||
- Vault namespace where secrets reside. This option requires HVAC 0.7.0+ and Vault 0.11+.
|
|
||||||
- Optionally, this may be achieved by prefixing the authentication mount point and/or secret path with the namespace
|
|
||||||
(e.g C(mynamespace/secret/mysecret)).
|
|
||||||
env:
|
|
||||||
- name: VAULT_NAMESPACE
|
|
||||||
version_added: 1.2.0
|
|
||||||
aws_profile:
|
|
||||||
description: The AWS profile
|
|
||||||
type: str
|
|
||||||
aliases: [ boto_profile ]
|
|
||||||
env:
|
|
||||||
- name: AWS_DEFAULT_PROFILE
|
|
||||||
- name: AWS_PROFILE
|
|
||||||
version_added: '0.2.0'
|
|
||||||
aws_access_key:
|
|
||||||
description: The AWS access key to use.
|
|
||||||
type: str
|
|
||||||
aliases: [ aws_access_key_id ]
|
|
||||||
env:
|
|
||||||
- name: EC2_ACCESS_KEY
|
|
||||||
- name: AWS_ACCESS_KEY
|
|
||||||
- name: AWS_ACCESS_KEY_ID
|
|
||||||
version_added: '0.2.0'
|
|
||||||
aws_secret_key:
|
|
||||||
description: The AWS secret key that corresponds to the access key.
|
|
||||||
type: str
|
|
||||||
aliases: [ aws_secret_access_key ]
|
|
||||||
env:
|
|
||||||
- name: EC2_SECRET_KEY
|
|
||||||
- name: AWS_SECRET_KEY
|
|
||||||
- name: AWS_SECRET_ACCESS_KEY
|
|
||||||
version_added: '0.2.0'
|
|
||||||
aws_security_token:
|
|
||||||
description: The AWS security token if using temporary access and secret keys.
|
|
||||||
type: str
|
|
||||||
env:
|
|
||||||
- name: EC2_SECURITY_TOKEN
|
|
||||||
- name: AWS_SESSION_TOKEN
|
|
||||||
- name: AWS_SECURITY_TOKEN
|
|
||||||
version_added: '0.2.0'
|
|
||||||
region:
|
|
||||||
description: The AWS region for which to create the connection.
|
|
||||||
type: str
|
|
||||||
env:
|
|
||||||
- name: EC2_REGION
|
|
||||||
- name: AWS_REGION
|
|
||||||
version_added: '0.2.0'
|
|
||||||
"""
|
|
||||||
|
|
||||||
EXAMPLES = """
|
|
||||||
- ansible.builtin.debug:
|
|
||||||
msg: "{{ lookup('community.general.hashi_vault', 'secret=secret/hello:value token=c975b780-d1be-8016-866b-01d0f9b688a5 url=http://myvault:8200') }}"
|
|
||||||
|
|
||||||
- name: Return all secrets from a path
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: "{{ lookup('community.general.hashi_vault', 'secret=secret/hello token=c975b780-d1be-8016-866b-01d0f9b688a5 url=http://myvault:8200') }}"
|
|
||||||
|
|
||||||
- name: Vault that requires authentication via LDAP
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: "{{ lookup('community.general.hashi_vault', 'secret/hello:value auth_method=ldap mount_point=ldap username=myuser password=mypas') }}"
|
|
||||||
|
|
||||||
- name: Vault that requires authentication via username and password
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: "{{ lookup('community.general.hashi_vault', 'secret=secret/hello:value auth_method=userpass username=myuser password=psw url=http://myvault:8200') }}"
|
|
||||||
|
|
||||||
- name: Connect to Vault using TLS
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: "{{ lookup('community.general.hashi_vault', 'secret=secret/hola:value token=c975b780-d1be-8016-866b-01d0f9b688a5 validate_certs=False') }}"
|
|
||||||
|
|
||||||
- name: using certificate auth
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: "{{ lookup('community.general.hashi_vault', 'secret/hi:value token=xxxx url=https://myvault:8200 validate_certs=True cacert=/cacert/path/ca.pem') }}"
|
|
||||||
|
|
||||||
- name: Authenticate with a Vault app role
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: "{{ lookup('community.general.hashi_vault', 'secret=secret/hello:value auth_method=approle role_id=myroleid secret_id=mysecretid') }}"
|
|
||||||
|
|
||||||
- name: Return all secrets from a path in a namespace
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: "{{ lookup('community.general.hashi_vault', 'secret=secret/hello token=c975b780-d1be-8016-866b-01d0f9b688a5 namespace=teama/admins') }}"
|
|
||||||
|
|
||||||
# When using KV v2 the PATH should include "data" between the secret engine mount and path (e.g. "secret/data/:path")
|
|
||||||
# see: https://www.vaultproject.io/api/secret/kv/kv-v2.html#read-secret-version
|
|
||||||
- name: Return latest KV v2 secret from path
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: "{{ lookup('community.general.hashi_vault', 'secret=secret/data/hello token=my_vault_token url=http://myvault_url:8200') }}"
|
|
||||||
|
|
||||||
# The following examples work in collection releases after community.general 0.2.0
|
|
||||||
|
|
||||||
- name: secret= is not required if secret is first
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: "{{ lookup('community.general.hashi_vault', 'secret/data/hello token=<token> url=http://myvault_url:8200') }}"
|
|
||||||
|
|
||||||
- name: options can be specified as parameters rather than put in term string
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: "{{ lookup('community.general.hashi_vault', 'secret/data/hello', token=my_token_var, url='http://myvault_url:8200') }}"
|
|
||||||
|
|
||||||
# return_format (or its alias 'as') can control how secrets are returned to you
|
|
||||||
- name: return secrets as a dict (default)
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
my_secrets: "{{ lookup('community.general.hashi_vault', 'secret/data/manysecrets', token=my_token_var, url='http://myvault_url:8200') }}"
|
|
||||||
- ansible.builtin.debug:
|
|
||||||
msg: "{{ my_secrets['secret_key'] }}"
|
|
||||||
- ansible.builtin.debug:
|
|
||||||
msg: "Secret '{{ item.key }}' has value '{{ item.value }}'"
|
|
||||||
loop: "{{ my_secrets | dict2items }}"
|
|
||||||
|
|
||||||
- name: return secrets as values only
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: "A secret value: {{ item }}"
|
|
||||||
loop: "{{ query('community.general.hashi_vault', 'secret/data/manysecrets', token=my_token_var, url='http://myvault_url:8200', return_format='values') }}"
|
|
||||||
|
|
||||||
- name: return raw secret from API, including metadata
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
my_secret: "{{ lookup('community.general.hashi_vault', 'secret/data/hello:value', token=my_token_var, url='http://myvault_url:8200', as='raw') }}"
|
|
||||||
- ansible.builtin.debug:
|
|
||||||
msg: "This is version {{ my_secret['metadata']['version'] }} of hello:value. The secret data is {{ my_secret['data']['data']['value'] }}"
|
|
||||||
|
|
||||||
# AWS IAM authentication method
|
|
||||||
# uses Ansible standard AWS options
|
|
||||||
|
|
||||||
- name: authenticate with aws_iam_login
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: "{{ lookup('community.general.hashi_vault', 'secret/hello:value', auth_method='aws_iam_login', role_id='myroleid', profile=my_boto_profile) }}"
|
|
||||||
|
|
||||||
# The following examples work in collection releases after community.general 1.3.0
|
|
||||||
|
|
||||||
- name: Authenticate with a JWT
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: "{{ lookup('community.general.hashi_vault', 'secret/hello:value', auth_method='jwt', role_id='myroleid', jwt='myjwt', url='https://myvault:8200')}}"
|
|
||||||
"""
|
|
||||||
|
|
||||||
RETURN = """
|
|
||||||
_raw:
|
|
||||||
description:
|
|
||||||
- secrets(s) requested
|
|
||||||
type: list
|
|
||||||
elements: dict
|
|
||||||
"""
|
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
from ansible.errors import AnsibleError
|
|
||||||
from ansible.plugins.lookup import LookupBase
|
|
||||||
from ansible.utils.display import Display
|
|
||||||
from ansible.module_utils.parsing.convert_bool import boolean
|
|
||||||
|
|
||||||
HAS_HVAC = False
|
|
||||||
try:
|
|
||||||
import hvac
|
|
||||||
HAS_HVAC = True
|
|
||||||
except ImportError:
|
|
||||||
HAS_HVAC = False
|
|
||||||
|
|
||||||
HAS_BOTOCORE = False
|
|
||||||
try:
|
|
||||||
# import boto3
|
|
||||||
import botocore
|
|
||||||
HAS_BOTOCORE = True
|
|
||||||
except ImportError:
|
|
||||||
HAS_BOTOCORE = False
|
|
||||||
|
|
||||||
HAS_BOTO3 = False
|
|
||||||
try:
|
|
||||||
import boto3
|
|
||||||
# import botocore
|
|
||||||
HAS_BOTO3 = True
|
|
||||||
except ImportError:
|
|
||||||
HAS_BOTO3 = False
|
|
||||||
|
|
||||||
|
|
||||||
class HashiVault:
|
|
||||||
def get_options(self, *option_names, **kwargs):
|
|
||||||
ret = {}
|
|
||||||
include_falsey = kwargs.get('include_falsey', False)
|
|
||||||
for option in option_names:
|
|
||||||
val = self.options.get(option)
|
|
||||||
if val or include_falsey:
|
|
||||||
ret[option] = val
|
|
||||||
return ret
|
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
self.options = kwargs
|
|
||||||
|
|
||||||
# check early that auth method is actually available
|
|
||||||
self.auth_function = 'auth_' + self.options['auth_method']
|
|
||||||
if not (hasattr(self, self.auth_function) and callable(getattr(self, self.auth_function))):
|
|
||||||
raise AnsibleError(
|
|
||||||
"Authentication method '%s' is not implemented. ('%s' member function not found)" % (self.options['auth_method'], self.auth_function)
|
|
||||||
)
|
|
||||||
|
|
||||||
client_args = {
|
|
||||||
'url': self.options['url'],
|
|
||||||
'verify': self.options['ca_cert']
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.options.get('namespace'):
|
|
||||||
client_args['namespace'] = self.options['namespace']
|
|
||||||
|
|
||||||
# this is the only auth_method-specific thing here, because if we're using a token, we need it now
|
|
||||||
if self.options['auth_method'] == 'token':
|
|
||||||
client_args['token'] = self.options.get('token')
|
|
||||||
|
|
||||||
self.client = hvac.Client(**client_args)
|
|
||||||
|
|
||||||
# Check for old version, before auth_methods class (added in 0.7.0):
|
|
||||||
# https://github.com/hvac/hvac/releases/tag/v0.7.0
|
|
||||||
#
|
|
||||||
# hvac is moving auth methods into the auth_methods class
|
|
||||||
# which lives in the client.auth member.
|
|
||||||
#
|
|
||||||
# Attempting to find which backends were moved into the class when (this is primarily for warnings):
|
|
||||||
# 0.7.0 -- github, ldap, mfa, azure?, gcp
|
|
||||||
# 0.7.1 -- okta
|
|
||||||
# 0.8.0 -- kubernetes
|
|
||||||
# 0.9.0 -- azure?, radius
|
|
||||||
# 0.9.3 -- aws
|
|
||||||
# 0.9.6 -- userpass
|
|
||||||
self.hvac_has_auth_methods = hasattr(self.client, 'auth')
|
|
||||||
|
|
||||||
# We've already checked to ensure a method exists for a particular auth_method, of the form:
|
|
||||||
#
|
|
||||||
# auth_<method_name>
|
|
||||||
#
|
|
||||||
def authenticate(self):
|
|
||||||
getattr(self, self.auth_function)()
|
|
||||||
|
|
||||||
def get(self):
|
|
||||||
'''gets a secret. should always return a list'''
|
|
||||||
secret = self.options['secret']
|
|
||||||
field = self.options['secret_field']
|
|
||||||
return_as = self.options['return_format']
|
|
||||||
|
|
||||||
try:
|
|
||||||
data = self.client.read(secret)
|
|
||||||
except hvac.exceptions.Forbidden:
|
|
||||||
raise AnsibleError("Forbidden: Permission Denied to secret '%s'." % secret)
|
|
||||||
|
|
||||||
if data is None:
|
|
||||||
raise AnsibleError("The secret '%s' doesn't seem to exist." % secret)
|
|
||||||
|
|
||||||
if return_as == 'raw':
|
|
||||||
return [data]
|
|
||||||
|
|
||||||
# Check response for KV v2 fields and flatten nested secret data.
|
|
||||||
# https://vaultproject.io/api/secret/kv/kv-v2.html#sample-response-1
|
|
||||||
try:
|
|
||||||
# sentinel field checks
|
|
||||||
check_dd = data['data']['data']
|
|
||||||
check_md = data['data']['metadata']
|
|
||||||
# unwrap nested data
|
|
||||||
data = data['data']
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
if return_as == 'values':
|
|
||||||
return list(data['data'].values())
|
|
||||||
|
|
||||||
# everything after here implements return_as == 'dict'
|
|
||||||
if not field:
|
|
||||||
return [data['data']]
|
|
||||||
|
|
||||||
if field not in data['data']:
|
|
||||||
raise AnsibleError("The secret %s does not contain the field '%s'. for hashi_vault lookup" % (secret, field))
|
|
||||||
|
|
||||||
return [data['data'][field]]
|
|
||||||
|
|
||||||
# begin auth implementation methods
|
|
||||||
#
|
|
||||||
# To add new backends, 3 things should be added:
|
|
||||||
#
|
|
||||||
# 1. Add a new validate_auth_<method_name> method to the LookupModule, which is responsible for validating
|
|
||||||
# that it has the necessary options and whatever else it needs.
|
|
||||||
#
|
|
||||||
# 2. Add a new auth_<method_name> method to this class. These implementations are faily minimal as they should
|
|
||||||
# already have everything they need. This is also the place to check for deprecated auth methods as hvac
|
|
||||||
# continues to move backends into the auth_methods class.
|
|
||||||
#
|
|
||||||
# 3. Update the avail_auth_methods list in the LookupModules auth_methods() method (for now this is static).
|
|
||||||
#
|
|
||||||
def auth_token(self):
|
|
||||||
if not self.client.is_authenticated():
|
|
||||||
raise AnsibleError("Invalid Hashicorp Vault Token Specified for hashi_vault lookup.")
|
|
||||||
|
|
||||||
def auth_userpass(self):
|
|
||||||
params = self.get_options('username', 'password', 'mount_point')
|
|
||||||
if self.hvac_has_auth_methods and hasattr(self.client.auth.userpass, 'login'):
|
|
||||||
self.client.auth.userpass.login(**params)
|
|
||||||
else:
|
|
||||||
Display().warning("HVAC should be updated to version 0.9.6 or higher. Deprecated method 'auth_userpass' will be used.")
|
|
||||||
self.client.auth_userpass(**params)
|
|
||||||
|
|
||||||
def auth_ldap(self):
|
|
||||||
params = self.get_options('username', 'password', 'mount_point')
|
|
||||||
# not hasattr(self.client, 'auth')
|
|
||||||
if self.hvac_has_auth_methods and hasattr(self.client.auth.ldap, 'login'):
|
|
||||||
self.client.auth.ldap.login(**params)
|
|
||||||
else:
|
|
||||||
Display().warning("HVAC should be updated to version 0.7.0 or higher. Deprecated method 'auth_ldap' will be used.")
|
|
||||||
self.client.auth_ldap(**params)
|
|
||||||
|
|
||||||
def auth_approle(self):
|
|
||||||
params = self.get_options('role_id', 'secret_id', 'mount_point')
|
|
||||||
self.client.auth_approle(**params)
|
|
||||||
|
|
||||||
def auth_aws_iam_login(self):
|
|
||||||
params = self.options['iam_login_credentials']
|
|
||||||
if self.hvac_has_auth_methods and hasattr(self.client.auth.aws, 'iam_login'):
|
|
||||||
self.client.auth.aws.iam_login(**params)
|
|
||||||
else:
|
|
||||||
Display().warning("HVAC should be updated to version 0.9.3 or higher. Deprecated method 'auth_aws_iam' will be used.")
|
|
||||||
self.client.auth_aws_iam(**params)
|
|
||||||
|
|
||||||
def auth_jwt(self):
|
|
||||||
params = self.get_options('role_id', 'jwt', 'mount_point')
|
|
||||||
params['role'] = params.pop('role_id')
|
|
||||||
if self.hvac_has_auth_methods and hasattr(self.client.auth, 'jwt') and hasattr(self.client.auth.jwt, 'jwt_login'):
|
|
||||||
response = self.client.auth.jwt.jwt_login(**params)
|
|
||||||
# must manually set the client token with JWT login
|
|
||||||
# see https://github.com/hvac/hvac/issues/644
|
|
||||||
self.client.token = response['auth']['client_token']
|
|
||||||
else:
|
|
||||||
raise AnsibleError("JWT authentication requires HVAC version 0.10.5 or higher.")
|
|
||||||
|
|
||||||
# end auth implementation methods
|
|
||||||
|
|
||||||
|
|
||||||
class LookupModule(LookupBase):
|
|
||||||
def run(self, terms, variables=None, **kwargs):
|
|
||||||
if not HAS_HVAC:
|
|
||||||
raise AnsibleError("Please pip install hvac to use the hashi_vault lookup module.")
|
|
||||||
|
|
||||||
ret = []
|
|
||||||
|
|
||||||
for term in terms:
|
|
||||||
opts = kwargs.copy()
|
|
||||||
opts.update(self.parse_term(term))
|
|
||||||
self.set_options(direct=opts)
|
|
||||||
self.process_options()
|
|
||||||
# FUTURE: Create one object, authenticate once, and re-use it,
|
|
||||||
# for gets, for better use during with_ loops.
|
|
||||||
client = HashiVault(**self._options)
|
|
||||||
client.authenticate()
|
|
||||||
ret.extend(client.get())
|
|
||||||
|
|
||||||
return ret
|
|
||||||
|
|
||||||
def parse_term(self, term):
|
|
||||||
'''parses a term string into options'''
|
|
||||||
param_dict = {}
|
|
||||||
|
|
||||||
for i, param in enumerate(term.split()):
|
|
||||||
try:
|
|
||||||
key, value = param.split('=', 1)
|
|
||||||
except ValueError:
|
|
||||||
if (i == 0):
|
|
||||||
# allow secret to be specified as value only if it's first
|
|
||||||
key = 'secret'
|
|
||||||
value = param
|
|
||||||
else:
|
|
||||||
raise AnsibleError("hashi_vault lookup plugin needs key=value pairs, but received %s" % term)
|
|
||||||
param_dict[key] = value
|
|
||||||
return param_dict
|
|
||||||
|
|
||||||
def process_options(self):
|
|
||||||
'''performs deep validation and value loading for options'''
|
|
||||||
|
|
||||||
# ca_cert to verify
|
|
||||||
self.boolean_or_cacert()
|
|
||||||
|
|
||||||
# auth methods
|
|
||||||
self.auth_methods()
|
|
||||||
|
|
||||||
# secret field splitter
|
|
||||||
self.field_ops()
|
|
||||||
|
|
||||||
# begin options processing methods
|
|
||||||
|
|
||||||
def boolean_or_cacert(self):
|
|
||||||
# This is needed because of this (https://hvac.readthedocs.io/en/stable/source/hvac_v1.html):
|
|
||||||
#
|
|
||||||
# # verify (Union[bool,str]) - Either a boolean to indicate whether TLS verification should
|
|
||||||
# # be performed when sending requests to Vault, or a string pointing at the CA bundle to use for verification.
|
|
||||||
#
|
|
||||||
'''' return a bool or cacert '''
|
|
||||||
ca_cert = self.get_option('ca_cert')
|
|
||||||
|
|
||||||
validate_certs = self.get_option('validate_certs')
|
|
||||||
|
|
||||||
if validate_certs is None:
|
|
||||||
# Validate certs option was not explicitly set
|
|
||||||
|
|
||||||
# Check if VAULT_SKIP_VERIFY is set
|
|
||||||
vault_skip_verify = os.environ.get('VAULT_SKIP_VERIFY')
|
|
||||||
|
|
||||||
if vault_skip_verify is not None:
|
|
||||||
# VAULT_SKIP_VERIFY is set
|
|
||||||
try:
|
|
||||||
# Check that we have a boolean value
|
|
||||||
vault_skip_verify = boolean(vault_skip_verify)
|
|
||||||
# Use the inverse of VAULT_SKIP_VERIFY
|
|
||||||
validate_certs = not vault_skip_verify
|
|
||||||
except TypeError:
|
|
||||||
# Not a boolean value fallback to default value (True)
|
|
||||||
validate_certs = True
|
|
||||||
else:
|
|
||||||
validate_certs = True
|
|
||||||
|
|
||||||
if not (validate_certs and ca_cert):
|
|
||||||
self.set_option('ca_cert', validate_certs)
|
|
||||||
|
|
||||||
def field_ops(self):
|
|
||||||
# split secret and field
|
|
||||||
secret = self.get_option('secret')
|
|
||||||
|
|
||||||
s_f = secret.rsplit(':', 1)
|
|
||||||
self.set_option('secret', s_f[0])
|
|
||||||
if len(s_f) >= 2:
|
|
||||||
field = s_f[1]
|
|
||||||
else:
|
|
||||||
field = None
|
|
||||||
self.set_option('secret_field', field)
|
|
||||||
|
|
||||||
def auth_methods(self):
|
|
||||||
# enforce and set the list of available auth methods
|
|
||||||
# TODO: can this be read from the choices: field in documentation?
|
|
||||||
avail_auth_methods = ['token', 'approle', 'userpass', 'ldap', 'aws_iam_login', 'jwt']
|
|
||||||
self.set_option('avail_auth_methods', avail_auth_methods)
|
|
||||||
auth_method = self.get_option('auth_method')
|
|
||||||
|
|
||||||
if auth_method not in avail_auth_methods:
|
|
||||||
raise AnsibleError(
|
|
||||||
"Authentication method '%s' not supported. Available options are %r" % (auth_method, avail_auth_methods)
|
|
||||||
)
|
|
||||||
|
|
||||||
# run validator if available
|
|
||||||
auth_validator = 'validate_auth_' + auth_method
|
|
||||||
if hasattr(self, auth_validator) and callable(getattr(self, auth_validator)):
|
|
||||||
getattr(self, auth_validator)(auth_method)
|
|
||||||
|
|
||||||
# end options processing methods
|
|
||||||
|
|
||||||
# begin auth method validators
|
|
||||||
|
|
||||||
def validate_by_required_fields(self, auth_method, *field_names):
|
|
||||||
missing = [field for field in field_names if not self.get_option(field)]
|
|
||||||
|
|
||||||
if missing:
|
|
||||||
raise AnsibleError("Authentication method %s requires options %r to be set, but these are missing: %r" % (auth_method, field_names, missing))
|
|
||||||
|
|
||||||
def validate_auth_userpass(self, auth_method):
|
|
||||||
self.validate_by_required_fields(auth_method, 'username', 'password')
|
|
||||||
|
|
||||||
def validate_auth_ldap(self, auth_method):
|
|
||||||
self.validate_by_required_fields(auth_method, 'username', 'password')
|
|
||||||
|
|
||||||
def validate_auth_approle(self, auth_method):
|
|
||||||
self.validate_by_required_fields(auth_method, 'role_id')
|
|
||||||
|
|
||||||
def validate_auth_token(self, auth_method):
|
|
||||||
if auth_method == 'token':
|
|
||||||
if not self.get_option('token_path'):
|
|
||||||
# generally we want env vars defined in the spec, but in this case we want
|
|
||||||
# the env var HOME to have lower precedence than any other value source,
|
|
||||||
# including ini, so we're doing it here after all other processing has taken place
|
|
||||||
self.set_option('token_path', os.environ.get('HOME'))
|
|
||||||
if not self.get_option('token') and self.get_option('token_path'):
|
|
||||||
token_filename = os.path.join(
|
|
||||||
self.get_option('token_path'),
|
|
||||||
self.get_option('token_file')
|
|
||||||
)
|
|
||||||
if os.path.exists(token_filename):
|
|
||||||
with open(token_filename) as token_file:
|
|
||||||
self.set_option('token', token_file.read().strip())
|
|
||||||
|
|
||||||
if not self.get_option('token'):
|
|
||||||
raise AnsibleError("No Vault Token specified or discovered.")
|
|
||||||
|
|
||||||
def validate_auth_aws_iam_login(self, auth_method):
|
|
||||||
params = {
|
|
||||||
'access_key': self.get_option('aws_access_key'),
|
|
||||||
'secret_key': self.get_option('aws_secret_key')
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.get_option('role_id'):
|
|
||||||
params['role'] = self.get_option('role_id')
|
|
||||||
|
|
||||||
if self.get_option('region'):
|
|
||||||
params['region'] = self.get_option('region')
|
|
||||||
|
|
||||||
if not (params['access_key'] and params['secret_key']):
|
|
||||||
profile = self.get_option('aws_profile')
|
|
||||||
if profile:
|
|
||||||
# try to load boto profile
|
|
||||||
if not HAS_BOTO3:
|
|
||||||
raise AnsibleError("boto3 is required for loading a boto profile.")
|
|
||||||
session_credentials = boto3.session.Session(profile_name=profile).get_credentials()
|
|
||||||
else:
|
|
||||||
# try to load from IAM credentials
|
|
||||||
if not HAS_BOTOCORE:
|
|
||||||
raise AnsibleError("botocore is required for loading IAM role credentials.")
|
|
||||||
session_credentials = botocore.session.get_session().get_credentials()
|
|
||||||
|
|
||||||
if not session_credentials:
|
|
||||||
raise AnsibleError("No AWS credentials supplied or available.")
|
|
||||||
|
|
||||||
params['access_key'] = session_credentials.access_key
|
|
||||||
params['secret_key'] = session_credentials.secret_key
|
|
||||||
if session_credentials.token:
|
|
||||||
params['session_token'] = session_credentials.token
|
|
||||||
|
|
||||||
self.set_option('iam_login_credentials', params)
|
|
||||||
|
|
||||||
def validate_auth_jwt(self, auth_method):
|
|
||||||
self.validate_by_required_fields(auth_method, 'role_id', 'jwt')
|
|
||||||
|
|
||||||
# end auth method validators
|
|
||||||
@@ -7,7 +7,7 @@ __metaclass__ = type
|
|||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
author:
|
author:
|
||||||
- Juan Manuel Parrilla (@jparrill)
|
- Juan Manuel Parrilla (@jparrill)
|
||||||
lookup: hiera
|
name: hiera
|
||||||
short_description: get info from hiera data
|
short_description: get info from hiera data
|
||||||
requirements:
|
requirements:
|
||||||
- hiera (command line utility)
|
- hiera (command line utility)
|
||||||
@@ -63,7 +63,6 @@ import os
|
|||||||
|
|
||||||
from ansible.plugins.lookup import LookupBase
|
from ansible.plugins.lookup import LookupBase
|
||||||
from ansible.utils.cmd_functions import run_cmd
|
from ansible.utils.cmd_functions import run_cmd
|
||||||
from ansible.module_utils._text import to_text
|
|
||||||
|
|
||||||
ANSIBLE_HIERA_CFG = os.getenv('ANSIBLE_HIERA_CFG', '/etc/hiera.yaml')
|
ANSIBLE_HIERA_CFG = os.getenv('ANSIBLE_HIERA_CFG', '/etc/hiera.yaml')
|
||||||
ANSIBLE_HIERA_BIN = os.getenv('ANSIBLE_HIERA_BIN', '/usr/bin/hiera')
|
ANSIBLE_HIERA_BIN = os.getenv('ANSIBLE_HIERA_BIN', '/usr/bin/hiera')
|
||||||
@@ -79,11 +78,13 @@ class Hiera(object):
|
|||||||
rc, output, err = run_cmd("{0} -c {1} {2}".format(
|
rc, output, err = run_cmd("{0} -c {1} {2}".format(
|
||||||
ANSIBLE_HIERA_BIN, ANSIBLE_HIERA_CFG, hiera_key[0]))
|
ANSIBLE_HIERA_BIN, ANSIBLE_HIERA_CFG, hiera_key[0]))
|
||||||
|
|
||||||
return to_text(output.strip())
|
return output.strip()
|
||||||
|
|
||||||
|
|
||||||
class LookupModule(LookupBase):
|
class LookupModule(LookupBase):
|
||||||
def run(self, terms, variables=''):
|
def run(self, terms, variables=''):
|
||||||
hiera = Hiera()
|
hiera = Hiera()
|
||||||
ret = [hiera.get(terms)]
|
ret = []
|
||||||
|
|
||||||
|
ret.append(hiera.get(terms))
|
||||||
return ret
|
return ret
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
DOCUMENTATION = '''
|
DOCUMENTATION = '''
|
||||||
lookup: keyring
|
name: keyring
|
||||||
author:
|
author:
|
||||||
- Samuel Boucher (!UNKNOWN) <boucher.samuel.c@gmail.com>
|
- Samuel Boucher (!UNKNOWN) <boucher.samuel.c@gmail.com>
|
||||||
requirements:
|
requirements:
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user