mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-04-28 17:36:49 +00:00
Compare commits
49 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
433d021c42 | ||
|
|
c12fd2474b | ||
|
|
39875affa7 | ||
|
|
7b6cc1bf5c | ||
|
|
85aa288f8f | ||
|
|
0566169758 | ||
|
|
01c287ed6c | ||
|
|
78f6e594fc | ||
|
|
14584b261d | ||
|
|
4229f6d04a | ||
|
|
6173cf0d42 | ||
|
|
29d66b1c21 | ||
|
|
c071fb1df3 | ||
|
|
dd7e8b4463 | ||
|
|
da85b37764 | ||
|
|
8806d31d4c | ||
|
|
841d3b25b9 | ||
|
|
bbe74d2b17 | ||
|
|
a7783c48ff | ||
|
|
bacd64e4dc | ||
|
|
939d30862c | ||
|
|
a4a102ae68 | ||
|
|
9b13efe654 | ||
|
|
4ea084cc29 | ||
|
|
c5c8decea5 | ||
|
|
7abf7cc7c7 | ||
|
|
c6f395e46b | ||
|
|
a6ce5eaa8e | ||
|
|
15ad2448f1 | ||
|
|
ff2b016c66 | ||
|
|
44e522d311 | ||
|
|
b94800036b | ||
|
|
d119905bd5 | ||
|
|
2754d86ac5 | ||
|
|
03ba48cf78 | ||
|
|
147fbe602c | ||
|
|
ec2efb26d0 | ||
|
|
150495a15f | ||
|
|
b2b3c056ca | ||
|
|
557594c392 | ||
|
|
b6a6edd403 | ||
|
|
e42770d4bf | ||
|
|
1b78f18bf4 | ||
|
|
ec11d13825 | ||
|
|
eb066335f8 | ||
|
|
cb26897b3e | ||
|
|
b7b5c1852e | ||
|
|
97dce1f621 | ||
|
|
0618af9b1e |
@@ -1,3 +0,0 @@
|
||||
## Azure Pipelines Configuration
|
||||
|
||||
Please see the [Documentation](https://github.com/ansible/community/wiki/Testing:-Azure-Pipelines) for more information.
|
||||
@@ -1,541 +0,0 @@
|
||||
trigger:
|
||||
batch: true
|
||||
branches:
|
||||
include:
|
||||
- main
|
||||
- stable-*
|
||||
|
||||
pr:
|
||||
autoCancel: true
|
||||
branches:
|
||||
include:
|
||||
- main
|
||||
- stable-*
|
||||
|
||||
schedules:
|
||||
- cron: 0 8 * * *
|
||||
displayName: Nightly (main)
|
||||
always: true
|
||||
branches:
|
||||
include:
|
||||
- main
|
||||
- cron: 0 10 * * *
|
||||
displayName: Nightly (active stable branches)
|
||||
always: true
|
||||
branches:
|
||||
include:
|
||||
- stable-4
|
||||
- cron: 0 11 * * 0
|
||||
displayName: Weekly (old stable branches)
|
||||
always: true
|
||||
branches:
|
||||
include:
|
||||
- stable-3
|
||||
|
||||
variables:
|
||||
- name: checkoutPath
|
||||
value: ansible_collections/community/general
|
||||
- name: coverageBranches
|
||||
value: main
|
||||
- name: pipelinesCoverage
|
||||
value: coverage
|
||||
- name: entryPoint
|
||||
value: tests/utils/shippable/shippable.sh
|
||||
- name: fetchDepth
|
||||
value: 0
|
||||
|
||||
resources:
|
||||
containers:
|
||||
- container: default
|
||||
image: quay.io/ansible/azure-pipelines-test-container:3.0.0
|
||||
|
||||
pool: Standard
|
||||
|
||||
stages:
|
||||
### Sanity
|
||||
- stage: Sanity_2_14
|
||||
displayName: Sanity 2.14
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.14/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- test: extra
|
||||
- stage: Sanity_2_13
|
||||
displayName: Sanity 2.13
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.13/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- stage: Sanity_2_12
|
||||
displayName: Sanity 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.12/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- stage: Sanity_2_11
|
||||
displayName: Sanity 2.11
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.11/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- stage: Sanity_2_10
|
||||
displayName: Sanity 2.10
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.10/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
- stage: Sanity_2_9
|
||||
displayName: Sanity 2.9
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Test {0}
|
||||
testFormat: 2.9/sanity/{0}
|
||||
targets:
|
||||
- test: 1
|
||||
- test: 2
|
||||
- test: 3
|
||||
- test: 4
|
||||
### Units
|
||||
- stage: Units_2_14
|
||||
displayName: Units 2.14
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.14/units/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.5
|
||||
- test: 3.6
|
||||
- test: 3.7
|
||||
- test: 3.8
|
||||
- test: 3.9
|
||||
- test: '3.10'
|
||||
- stage: Units_2_13
|
||||
displayName: Units 2.13
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.13/units/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.6
|
||||
- test: 3.8
|
||||
- test: 3.9
|
||||
- stage: Units_2_12
|
||||
displayName: Units 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.12/units/{0}/1
|
||||
targets:
|
||||
- test: 2.6
|
||||
- test: 3.5
|
||||
- test: 3.8
|
||||
- stage: Units_2_11
|
||||
displayName: Units 2.11
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.11/units/{0}/1
|
||||
targets:
|
||||
- test: 2.6
|
||||
- test: 2.7
|
||||
- test: 3.5
|
||||
- test: 3.9
|
||||
- stage: Units_2_10
|
||||
displayName: Units 2.10
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.10/units/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: 3.6
|
||||
- stage: Units_2_9
|
||||
displayName: Units 2.9
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.9/units/{0}/1
|
||||
targets:
|
||||
- test: 2.6
|
||||
- test: 3.5
|
||||
|
||||
## Remote
|
||||
- stage: Remote_2_14
|
||||
displayName: Remote 2.14
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.14/{0}
|
||||
targets:
|
||||
- name: macOS 12.0
|
||||
test: macos/12.0
|
||||
- name: RHEL 7.9
|
||||
test: rhel/7.9
|
||||
- name: RHEL 9.0
|
||||
test: rhel/9.0
|
||||
- name: FreeBSD 12.3
|
||||
test: freebsd/12.3
|
||||
- name: FreeBSD 13.1
|
||||
test: freebsd/13.1
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Remote_2_13
|
||||
displayName: Remote 2.13
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.13/{0}
|
||||
targets:
|
||||
- name: macOS 12.0
|
||||
test: macos/12.0
|
||||
- name: RHEL 8.5
|
||||
test: rhel/8.5
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Remote_2_12
|
||||
displayName: Remote 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.12/{0}
|
||||
targets:
|
||||
- name: macOS 11.1
|
||||
test: macos/11.1
|
||||
- name: RHEL 8.4
|
||||
test: rhel/8.4
|
||||
- name: FreeBSD 13.0
|
||||
test: freebsd/13.0
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- stage: Remote_2_11
|
||||
displayName: Remote 2.11
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.11/{0}
|
||||
targets:
|
||||
- name: RHEL 7.9
|
||||
test: rhel/7.9
|
||||
- name: RHEL 8.3
|
||||
test: rhel/8.3
|
||||
#- name: FreeBSD 12.2
|
||||
# test: freebsd/12.2
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- stage: Remote_2_10
|
||||
displayName: Remote 2.10
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.10/{0}
|
||||
targets:
|
||||
- name: OS X 10.11
|
||||
test: osx/10.11
|
||||
- name: macOS 10.15
|
||||
test: macos/10.15
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- stage: Remote_2_9
|
||||
displayName: Remote 2.9
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.9/{0}
|
||||
targets:
|
||||
- name: RHEL 8.2
|
||||
test: rhel/8.2
|
||||
- name: RHEL 7.8
|
||||
test: rhel/7.8
|
||||
#- name: FreeBSD 12.0
|
||||
# test: freebsd/12.0
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
|
||||
### Docker
|
||||
- stage: Docker_2_14
|
||||
displayName: Docker 2.14
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.14/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 7
|
||||
test: centos7
|
||||
- name: Fedora 36
|
||||
test: fedora36
|
||||
- name: openSUSE 15
|
||||
test: opensuse15
|
||||
- name: Ubuntu 20.04
|
||||
test: ubuntu2004
|
||||
- name: Ubuntu 22.04
|
||||
test: ubuntu2204
|
||||
- name: Alpine 3
|
||||
test: alpine3
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_13
|
||||
displayName: Docker 2.13
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.13/linux/{0}
|
||||
targets:
|
||||
- name: Fedora 35
|
||||
test: fedora35
|
||||
- name: openSUSE 15 py2
|
||||
test: opensuse15py2
|
||||
- name: Alpine 3
|
||||
test: alpine3
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_12
|
||||
displayName: Docker 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.12/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 6
|
||||
test: centos6
|
||||
- name: Fedora 34
|
||||
test: fedora34
|
||||
- name: Ubuntu 18.04
|
||||
test: ubuntu1804
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_11
|
||||
displayName: Docker 2.11
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.11/linux/{0}
|
||||
targets:
|
||||
- name: Fedora 33
|
||||
test: fedora33
|
||||
- name: Alpine 3
|
||||
test: alpine3
|
||||
groups:
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_10
|
||||
displayName: Docker 2.10
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.10/linux/{0}
|
||||
targets:
|
||||
- name: Fedora 32
|
||||
test: fedora32
|
||||
- name: Ubuntu 16.04
|
||||
test: ubuntu1604
|
||||
groups:
|
||||
- 2
|
||||
- 3
|
||||
- stage: Docker_2_9
|
||||
displayName: Docker 2.9
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.9/linux/{0}
|
||||
targets:
|
||||
- name: Fedora 31
|
||||
test: fedora31
|
||||
groups:
|
||||
- 2
|
||||
- 3
|
||||
|
||||
### Community Docker
|
||||
- stage: Docker_community_2_14
|
||||
displayName: Docker (community images) 2.14
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.14/linux-community/{0}
|
||||
targets:
|
||||
- name: Debian Bullseye
|
||||
test: debian-bullseye/3.9
|
||||
- name: ArchLinux
|
||||
test: archlinux/3.10
|
||||
- name: CentOS Stream 8
|
||||
test: centos-stream8/3.8
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
|
||||
### Cloud
|
||||
- stage: Cloud_2_14
|
||||
displayName: Cloud 2.14
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.14/cloud/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- test: '3.10'
|
||||
- stage: Cloud_2_13
|
||||
displayName: Cloud 2.13
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.13/cloud/{0}/1
|
||||
targets:
|
||||
- test: 3.9
|
||||
- stage: Cloud_2_12
|
||||
displayName: Cloud 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.12/cloud/{0}/1
|
||||
targets:
|
||||
- test: 3.8
|
||||
- stage: Cloud_2_11
|
||||
displayName: Cloud 2.11
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.11/cloud/{0}/1
|
||||
targets:
|
||||
- test: 3.6
|
||||
- stage: Cloud_2_10
|
||||
displayName: Cloud 2.10
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.10/cloud/{0}/1
|
||||
targets:
|
||||
- test: 3.5
|
||||
- stage: Cloud_2_9
|
||||
displayName: Cloud 2.9
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: Python {0}
|
||||
testFormat: 2.9/cloud/{0}/1
|
||||
targets:
|
||||
- test: 2.7
|
||||
- stage: Summary
|
||||
condition: succeededOrFailed()
|
||||
dependsOn:
|
||||
- Sanity_2_14
|
||||
- Sanity_2_9
|
||||
- Sanity_2_10
|
||||
- Sanity_2_11
|
||||
- Sanity_2_12
|
||||
- Sanity_2_13
|
||||
- Units_2_14
|
||||
- Units_2_9
|
||||
- Units_2_10
|
||||
- Units_2_11
|
||||
- Units_2_12
|
||||
- Units_2_13
|
||||
- Remote_2_14
|
||||
- Remote_2_9
|
||||
- Remote_2_10
|
||||
- Remote_2_11
|
||||
- Remote_2_12
|
||||
- Remote_2_13
|
||||
- Docker_2_14
|
||||
- Docker_2_9
|
||||
- Docker_2_10
|
||||
- Docker_2_11
|
||||
- Docker_2_12
|
||||
- Docker_2_13
|
||||
- Docker_community_2_14
|
||||
- Cloud_2_14
|
||||
- Cloud_2_9
|
||||
- Cloud_2_10
|
||||
- Cloud_2_11
|
||||
- Cloud_2_12
|
||||
- Cloud_2_13
|
||||
jobs:
|
||||
- template: templates/coverage.yml
|
||||
@@ -1,24 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Aggregate code coverage results for later processing.
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
agent_temp_directory="$1"
|
||||
|
||||
PATH="${PWD}/bin:${PATH}"
|
||||
|
||||
mkdir "${agent_temp_directory}/coverage/"
|
||||
|
||||
if [[ "$(ansible --version)" =~ \ 2\.9\. ]]; then
|
||||
exit
|
||||
fi
|
||||
|
||||
options=(--venv --venv-system-site-packages --color -v)
|
||||
|
||||
ansible-test coverage combine --group-by command --export "${agent_temp_directory}/coverage/" "${options[@]}"
|
||||
|
||||
if ansible-test coverage analyze targets generate --help >/dev/null 2>&1; then
|
||||
# Only analyze coverage if the installed version of ansible-test supports it.
|
||||
# Doing so allows this script to work unmodified for multiple Ansible versions.
|
||||
ansible-test coverage analyze targets generate "${agent_temp_directory}/coverage/coverage-analyze-targets.json" "${options[@]}"
|
||||
fi
|
||||
@@ -1,60 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Combine coverage data from multiple jobs, keeping the data only from the most recent attempt from each job.
|
||||
Coverage artifacts must be named using the format: "Coverage $(System.JobAttempt) {StableUniqueNameForEachJob}"
|
||||
The recommended coverage artifact name format is: Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)
|
||||
Keep in mind that Azure Pipelines does not enforce unique job display names (only names).
|
||||
It is up to pipeline authors to avoid name collisions when deviating from the recommended format.
|
||||
"""
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
|
||||
def main():
|
||||
"""Main program entry point."""
|
||||
source_directory = sys.argv[1]
|
||||
|
||||
if '/ansible_collections/' in os.getcwd():
|
||||
output_path = "tests/output"
|
||||
else:
|
||||
output_path = "test/results"
|
||||
|
||||
destination_directory = os.path.join(output_path, 'coverage')
|
||||
|
||||
if not os.path.exists(destination_directory):
|
||||
os.makedirs(destination_directory)
|
||||
|
||||
jobs = {}
|
||||
count = 0
|
||||
|
||||
for name in os.listdir(source_directory):
|
||||
match = re.search('^Coverage (?P<attempt>[0-9]+) (?P<label>.+)$', name)
|
||||
label = match.group('label')
|
||||
attempt = int(match.group('attempt'))
|
||||
jobs[label] = max(attempt, jobs.get(label, 0))
|
||||
|
||||
for label, attempt in jobs.items():
|
||||
name = 'Coverage {attempt} {label}'.format(label=label, attempt=attempt)
|
||||
source = os.path.join(source_directory, name)
|
||||
source_files = os.listdir(source)
|
||||
|
||||
for source_file in source_files:
|
||||
source_path = os.path.join(source, source_file)
|
||||
destination_path = os.path.join(destination_directory, source_file + '.' + label)
|
||||
print('"%s" -> "%s"' % (source_path, destination_path))
|
||||
shutil.copyfile(source_path, destination_path)
|
||||
count += 1
|
||||
|
||||
print('Coverage file count: %d' % count)
|
||||
print('##vso[task.setVariable variable=coverageFileCount]%d' % count)
|
||||
print('##vso[task.setVariable variable=outputPath]%s' % output_path)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -1,24 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Check the test results and set variables for use in later steps.
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
if [[ "$PWD" =~ /ansible_collections/ ]]; then
|
||||
output_path="tests/output"
|
||||
else
|
||||
output_path="test/results"
|
||||
fi
|
||||
|
||||
echo "##vso[task.setVariable variable=outputPath]${output_path}"
|
||||
|
||||
if compgen -G "${output_path}"'/junit/*.xml' > /dev/null; then
|
||||
echo "##vso[task.setVariable variable=haveTestResults]true"
|
||||
fi
|
||||
|
||||
if compgen -G "${output_path}"'/bot/ansible-test-*' > /dev/null; then
|
||||
echo "##vso[task.setVariable variable=haveBotResults]true"
|
||||
fi
|
||||
|
||||
if compgen -G "${output_path}"'/coverage/*' > /dev/null; then
|
||||
echo "##vso[task.setVariable variable=haveCoverageData]true"
|
||||
fi
|
||||
@@ -1,101 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Upload code coverage reports to codecov.io.
|
||||
Multiple coverage files from multiple languages are accepted and aggregated after upload.
|
||||
Python coverage, as well as PowerShell and Python stubs can all be uploaded.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import dataclasses
|
||||
import pathlib
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
import typing as t
|
||||
import urllib.request
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class CoverageFile:
|
||||
name: str
|
||||
path: pathlib.Path
|
||||
flags: t.List[str]
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class Args:
|
||||
dry_run: bool
|
||||
path: pathlib.Path
|
||||
|
||||
|
||||
def parse_args() -> Args:
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('-n', '--dry-run', action='store_true')
|
||||
parser.add_argument('path', type=pathlib.Path)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Store arguments in a typed dataclass
|
||||
fields = dataclasses.fields(Args)
|
||||
kwargs = {field.name: getattr(args, field.name) for field in fields}
|
||||
|
||||
return Args(**kwargs)
|
||||
|
||||
|
||||
def process_files(directory: pathlib.Path) -> t.Tuple[CoverageFile, ...]:
|
||||
processed = []
|
||||
for file in directory.joinpath('reports').glob('coverage*.xml'):
|
||||
name = file.stem.replace('coverage=', '')
|
||||
|
||||
# Get flags from name
|
||||
flags = name.replace('-powershell', '').split('=') # Drop '-powershell' suffix
|
||||
flags = [flag if not flag.startswith('stub') else flag.split('-')[0] for flag in flags] # Remove "-01" from stub files
|
||||
|
||||
processed.append(CoverageFile(name, file, flags))
|
||||
|
||||
return tuple(processed)
|
||||
|
||||
|
||||
def upload_files(codecov_bin: pathlib.Path, files: t.Tuple[CoverageFile, ...], dry_run: bool = False) -> None:
|
||||
for file in files:
|
||||
cmd = [
|
||||
str(codecov_bin),
|
||||
'--name', file.name,
|
||||
'--file', str(file.path),
|
||||
]
|
||||
for flag in file.flags:
|
||||
cmd.extend(['--flags', flag])
|
||||
|
||||
if dry_run:
|
||||
print(f'DRY-RUN: Would run command: {cmd}')
|
||||
continue
|
||||
|
||||
subprocess.run(cmd, check=True)
|
||||
|
||||
|
||||
def download_file(url: str, dest: pathlib.Path, flags: int, dry_run: bool = False) -> None:
|
||||
if dry_run:
|
||||
print(f'DRY-RUN: Would download {url} to {dest} and set mode to {flags:o}')
|
||||
return
|
||||
|
||||
with urllib.request.urlopen(url) as resp:
|
||||
with dest.open('w+b') as f:
|
||||
# Read data in chunks rather than all at once
|
||||
shutil.copyfileobj(resp, f, 64 * 1024)
|
||||
|
||||
dest.chmod(flags)
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
url = 'https://ansible-ci-files.s3.amazonaws.com/codecov/linux/codecov'
|
||||
with tempfile.TemporaryDirectory(prefix='codecov-') as tmpdir:
|
||||
codecov_bin = pathlib.Path(tmpdir) / 'codecov'
|
||||
download_file(url, codecov_bin, 0o755, args.dry_run)
|
||||
|
||||
files = process_files(args.path)
|
||||
upload_files(codecov_bin, files, args.dry_run)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -1,19 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Generate code coverage reports for uploading to Azure Pipelines and codecov.io.
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
PATH="${PWD}/bin:${PATH}"
|
||||
|
||||
if [[ "$(ansible --version)" =~ \ 2\.9\. ]]; then
|
||||
exit
|
||||
fi
|
||||
|
||||
if ! ansible-test --help >/dev/null 2>&1; then
|
||||
# Install the devel version of ansible-test for generating code coverage reports.
|
||||
# This is only used by Ansible Collections, which are typically tested against multiple Ansible versions (in separate jobs).
|
||||
# Since a version of ansible-test is required that can work the output from multiple older releases, the devel version is used.
|
||||
pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check
|
||||
fi
|
||||
|
||||
ansible-test coverage xml --group-by command --stub --venv --venv-system-site-packages --color -v
|
||||
@@ -1,34 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Configure the test environment and run the tests.
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
entry_point="$1"
|
||||
test="$2"
|
||||
read -r -a coverage_branches <<< "$3" # space separated list of branches to run code coverage on for scheduled builds
|
||||
|
||||
export COMMIT_MESSAGE
|
||||
export COMPLETE
|
||||
export COVERAGE
|
||||
export IS_PULL_REQUEST
|
||||
|
||||
if [ "${SYSTEM_PULLREQUEST_TARGETBRANCH:-}" ]; then
|
||||
IS_PULL_REQUEST=true
|
||||
COMMIT_MESSAGE=$(git log --format=%B -n 1 HEAD^2)
|
||||
else
|
||||
IS_PULL_REQUEST=
|
||||
COMMIT_MESSAGE=$(git log --format=%B -n 1 HEAD)
|
||||
fi
|
||||
|
||||
COMPLETE=
|
||||
COVERAGE=
|
||||
|
||||
if [ "${BUILD_REASON}" = "Schedule" ]; then
|
||||
COMPLETE=yes
|
||||
|
||||
if printf '%s\n' "${coverage_branches[@]}" | grep -q "^${BUILD_SOURCEBRANCHNAME}$"; then
|
||||
COVERAGE=yes
|
||||
fi
|
||||
fi
|
||||
|
||||
"${entry_point}" "${test}" 2>&1 | "$(dirname "$0")/time-command.py"
|
||||
@@ -1,25 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
"""Prepends a relative timestamp to each input line from stdin and writes it to stdout."""
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import sys
|
||||
import time
|
||||
|
||||
|
||||
def main():
|
||||
"""Main program entry point."""
|
||||
start = time.time()
|
||||
|
||||
sys.stdin.reconfigure(errors='surrogateescape')
|
||||
sys.stdout.reconfigure(errors='surrogateescape')
|
||||
|
||||
for line in sys.stdin:
|
||||
seconds = time.time() - start
|
||||
sys.stdout.write('%02d:%02d %s' % (seconds // 60, seconds % 60, line))
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -1,39 +0,0 @@
|
||||
# This template adds a job for processing code coverage data.
|
||||
# It will upload results to Azure Pipelines and codecov.io.
|
||||
# Use it from a job stage that completes after all other jobs have completed.
|
||||
# This can be done by placing it in a separate summary stage that runs after the test stage(s) have completed.
|
||||
|
||||
jobs:
|
||||
- job: Coverage
|
||||
displayName: Code Coverage
|
||||
container: default
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
- checkout: self
|
||||
fetchDepth: $(fetchDepth)
|
||||
path: $(checkoutPath)
|
||||
- task: DownloadPipelineArtifact@2
|
||||
displayName: Download Coverage Data
|
||||
inputs:
|
||||
path: coverage/
|
||||
patterns: "Coverage */*=coverage.combined"
|
||||
- bash: .azure-pipelines/scripts/combine-coverage.py coverage/
|
||||
displayName: Combine Coverage Data
|
||||
- bash: .azure-pipelines/scripts/report-coverage.sh
|
||||
displayName: Generate Coverage Report
|
||||
condition: gt(variables.coverageFileCount, 0)
|
||||
- task: PublishCodeCoverageResults@1
|
||||
inputs:
|
||||
codeCoverageTool: Cobertura
|
||||
# Azure Pipelines only accepts a single coverage data file.
|
||||
# That means only Python or PowerShell coverage can be uploaded, but not both.
|
||||
# Set the "pipelinesCoverage" variable to determine which type is uploaded.
|
||||
# Use "coverage" for Python and "coverage-powershell" for PowerShell.
|
||||
summaryFileLocation: "$(outputPath)/reports/$(pipelinesCoverage).xml"
|
||||
displayName: Publish to Azure Pipelines
|
||||
condition: gt(variables.coverageFileCount, 0)
|
||||
- bash: .azure-pipelines/scripts/publish-codecov.py "$(outputPath)"
|
||||
displayName: Publish to codecov.io
|
||||
condition: gt(variables.coverageFileCount, 0)
|
||||
continueOnError: true
|
||||
@@ -1,55 +0,0 @@
|
||||
# This template uses the provided targets and optional groups to generate a matrix which is then passed to the test template.
|
||||
# If this matrix template does not provide the required functionality, consider using the test template directly instead.
|
||||
|
||||
parameters:
|
||||
# A required list of dictionaries, one per test target.
|
||||
# Each item in the list must contain a "test" or "name" key.
|
||||
# Both may be provided. If one is omitted, the other will be used.
|
||||
- name: targets
|
||||
type: object
|
||||
|
||||
# An optional list of values which will be used to multiply the targets list into a matrix.
|
||||
# Values can be strings or numbers.
|
||||
- name: groups
|
||||
type: object
|
||||
default: []
|
||||
|
||||
# An optional format string used to generate the job name.
|
||||
# - {0} is the name of an item in the targets list.
|
||||
- name: nameFormat
|
||||
type: string
|
||||
default: "{0}"
|
||||
|
||||
# An optional format string used to generate the test name.
|
||||
# - {0} is the name of an item in the targets list.
|
||||
- name: testFormat
|
||||
type: string
|
||||
default: "{0}"
|
||||
|
||||
# An optional format string used to add the group to the job name.
|
||||
# {0} is the formatted name of an item in the targets list.
|
||||
# {{1}} is the group -- be sure to include the double "{{" and "}}".
|
||||
- name: nameGroupFormat
|
||||
type: string
|
||||
default: "{0} - {{1}}"
|
||||
|
||||
# An optional format string used to add the group to the test name.
|
||||
# {0} is the formatted test of an item in the targets list.
|
||||
# {{1}} is the group -- be sure to include the double "{{" and "}}".
|
||||
- name: testGroupFormat
|
||||
type: string
|
||||
default: "{0}/{{1}}"
|
||||
|
||||
jobs:
|
||||
- template: test.yml
|
||||
parameters:
|
||||
jobs:
|
||||
- ${{ if eq(length(parameters.groups), 0) }}:
|
||||
- ${{ each target in parameters.targets }}:
|
||||
- name: ${{ format(parameters.nameFormat, coalesce(target.name, target.test)) }}
|
||||
test: ${{ format(parameters.testFormat, coalesce(target.test, target.name)) }}
|
||||
- ${{ if not(eq(length(parameters.groups), 0)) }}:
|
||||
- ${{ each group in parameters.groups }}:
|
||||
- ${{ each target in parameters.targets }}:
|
||||
- name: ${{ format(format(parameters.nameGroupFormat, parameters.nameFormat), coalesce(target.name, target.test), group) }}
|
||||
test: ${{ format(format(parameters.testGroupFormat, parameters.testFormat), coalesce(target.test, target.name), group) }}
|
||||
@@ -1,45 +0,0 @@
|
||||
# This template uses the provided list of jobs to create test one or more test jobs.
|
||||
# It can be used directly if needed, or through the matrix template.
|
||||
|
||||
parameters:
|
||||
# A required list of dictionaries, one per test job.
|
||||
# Each item in the list must contain a "job" and "name" key.
|
||||
- name: jobs
|
||||
type: object
|
||||
|
||||
jobs:
|
||||
- ${{ each job in parameters.jobs }}:
|
||||
- job: test_${{ replace(replace(replace(job.test, '/', '_'), '.', '_'), '-', '_') }}
|
||||
displayName: ${{ job.name }}
|
||||
container: default
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
- checkout: self
|
||||
fetchDepth: $(fetchDepth)
|
||||
path: $(checkoutPath)
|
||||
- bash: .azure-pipelines/scripts/run-tests.sh "$(entryPoint)" "${{ job.test }}" "$(coverageBranches)"
|
||||
displayName: Run Tests
|
||||
- bash: .azure-pipelines/scripts/process-results.sh
|
||||
condition: succeededOrFailed()
|
||||
displayName: Process Results
|
||||
- bash: .azure-pipelines/scripts/aggregate-coverage.sh "$(Agent.TempDirectory)"
|
||||
condition: eq(variables.haveCoverageData, 'true')
|
||||
displayName: Aggregate Coverage Data
|
||||
- task: PublishTestResults@2
|
||||
condition: eq(variables.haveTestResults, 'true')
|
||||
inputs:
|
||||
testResultsFiles: "$(outputPath)/junit/*.xml"
|
||||
displayName: Publish Test Results
|
||||
- task: PublishPipelineArtifact@1
|
||||
condition: eq(variables.haveBotResults, 'true')
|
||||
displayName: Publish Bot Results
|
||||
inputs:
|
||||
targetPath: "$(outputPath)/bot/"
|
||||
artifactName: "Bot $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
|
||||
- task: PublishPipelineArtifact@1
|
||||
condition: eq(variables.haveCoverageData, 'true')
|
||||
displayName: Publish Coverage Data
|
||||
inputs:
|
||||
targetPath: "$(Agent.TempDirectory)/coverage/"
|
||||
artifactName: "Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
|
||||
6
.github/BOTMETA.yml
vendored
6
.github/BOTMETA.yml
vendored
@@ -747,7 +747,8 @@ files:
|
||||
labels: rocketchat
|
||||
ignore: ramondelafuente
|
||||
$modules/notification/say.py:
|
||||
maintainers: $team_ansible_core mpdehaan
|
||||
maintainers: $team_ansible_core
|
||||
ignore: mpdehaan
|
||||
$modules/notification/sendgrid.py:
|
||||
maintainers: makaimc
|
||||
$modules/notification/slack.py:
|
||||
@@ -1099,7 +1100,8 @@ files:
|
||||
$modules/system/nosh.py:
|
||||
maintainers: tacatac
|
||||
$modules/system/ohai.py:
|
||||
maintainers: $team_ansible_core mpdehaan
|
||||
maintainers: $team_ansible_core
|
||||
ignore: mpdehaan
|
||||
labels: ohai
|
||||
$modules/system/open_iscsi.py:
|
||||
maintainers: srvg
|
||||
|
||||
49
.github/workflows/codeql-analysis.yml
vendored
49
.github/workflows/codeql-analysis.yml
vendored
@@ -1,49 +0,0 @@
|
||||
name: "Code scanning - action"
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '26 19 * * 1'
|
||||
|
||||
jobs:
|
||||
CodeQL-Build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We must fetch at least the immediate parents so that if this is
|
||||
# a pull request then we can checkout the head.
|
||||
fetch-depth: 2
|
||||
|
||||
# If this run was triggered by a pull request event, then checkout
|
||||
# the head of the pull request instead of the merge commit.
|
||||
- run: git checkout HEAD^2
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
# Override language selection by uncommenting this and choosing your languages
|
||||
# with:
|
||||
# languages: go, javascript, csharp, python, cpp, java
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||
# and modify them (or add more) to build your code if your project
|
||||
# uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
||||
@@ -6,6 +6,73 @@ Community General Release Notes
|
||||
|
||||
This changelog describes changes after version 3.0.0.
|
||||
|
||||
v4.8.11
|
||||
=======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Final maintenance release of community.general major version 4.
|
||||
|
||||
Major Changes
|
||||
-------------
|
||||
|
||||
- The community.general 4.x.y release stream is now effectively **End of Life**. No more releases will be made, and regular CI runs will stop.
|
||||
|
||||
v4.8.10
|
||||
=======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Maintenance release.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- ModuleHelper - fix bug when adjusting the name of reserved output variables (https://github.com/ansible-collections/community.general/pull/5755).
|
||||
- loganalytics callback plugin - adjust type of callback to ``notification``, it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
|
||||
- logdna callback plugin - adjust type of callback to ``notification``, it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
|
||||
- logstash callback plugin - adjust type of callback to ``notification``, it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
|
||||
- splunk callback plugin - adjust type of callback to ``notification``, it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
|
||||
- sumologic callback plugin - adjust type of callback to ``notification``, it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
|
||||
- syslog_json callback plugin - adjust type of callback to ``notification``, it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
|
||||
- terraform and timezone - slight refactoring to avoid linter reporting potentially undefined variables (https://github.com/ansible-collections/community.general/pull/5933).
|
||||
|
||||
v4.8.9
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Bugfix release.
|
||||
|
||||
Note that from now on, community.general 4.x.y only receives security fixes and major bugfixes, but no longer regular bugfixes.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- ldap_attrs - fix bug which caused a ``Bad search filter`` error. The error was occuring when the ldap attribute value contained special characters such as ``(`` or ``*`` (https://github.com/ansible-collections/community.general/issues/5434, https://github.com/ansible-collections/community.general/pull/5435).
|
||||
- ldap_attrs - fix ordering issue by ignoring the ``{x}`` prefix on attribute values (https://github.com/ansible-collections/community.general/issues/977, https://github.com/ansible-collections/community.general/pull/5385).
|
||||
|
||||
v4.8.8
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular bugfix release.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- archive - avoid crash when ``lzma`` is not present and ``format`` is not ``xz`` (https://github.com/ansible-collections/community.general/pull/5393).
|
||||
- opentelemetry callback plugin - support opentelemetry-api 1.13.0 that removed support for ``_time_ns`` (https://github.com/ansible-collections/community.general/pull/5342).
|
||||
- pfexec become plugin - remove superflous quotes preventing exe wrap from working as expected (https://github.com/ansible-collections/community.general/issues/3671, https://github.com/ansible-collections/community.general/pull/3889).
|
||||
- pkgng - fix case when ``pkg`` fails when trying to upgrade all packages (https://github.com/ansible-collections/community.general/issues/5363).
|
||||
- redhat_subscription - make module idempotent when ``pool_ids`` are used (https://github.com/ansible-collections/community.general/issues/5313).
|
||||
- xenserver_facts - fix broken ``AnsibleModule`` call that prevented the module from working at all (https://github.com/ansible-collections/community.general/pull/5383).
|
||||
|
||||
v4.8.7
|
||||
======
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# Community General Collection
|
||||
|
||||
[](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
|
||||
[](https://github.com/ansible-collections/community.general/actions)
|
||||
[](https://codecov.io/gh/ansible-collections/community.general)
|
||||
|
||||
This repository contains the `community.general` Ansible Collection. The collection is a part of the Ansible package and includes many modules and plugins supported by Ansible community which are not part of more specialized community collections.
|
||||
|
||||
@@ -1808,6 +1808,42 @@ releases:
|
||||
- 4647-gconftool2-command-arg.yaml
|
||||
- psf-license.yml
|
||||
release_date: '2022-05-16'
|
||||
4.8.10:
|
||||
changes:
|
||||
bugfixes:
|
||||
- ModuleHelper - fix bug when adjusting the name of reserved output variables
|
||||
(https://github.com/ansible-collections/community.general/pull/5755).
|
||||
- loganalytics callback plugin - adjust type of callback to ``notification``,
|
||||
it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
|
||||
- logdna callback plugin - adjust type of callback to ``notification``, it was
|
||||
incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
|
||||
- logstash callback plugin - adjust type of callback to ``notification``, it
|
||||
was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
|
||||
- splunk callback plugin - adjust type of callback to ``notification``, it was
|
||||
incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
|
||||
- sumologic callback plugin - adjust type of callback to ``notification``, it
|
||||
was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
|
||||
- syslog_json callback plugin - adjust type of callback to ``notification``,
|
||||
it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
|
||||
- terraform and timezone - slight refactoring to avoid linter reporting potentially
|
||||
undefined variables (https://github.com/ansible-collections/community.general/pull/5933).
|
||||
release_summary: Maintenance release.
|
||||
fragments:
|
||||
- 4.8.10.yml
|
||||
- 5755-mh-fix-output-conflict.yml
|
||||
- 5761-callback-types.yml
|
||||
- 5933-linting.yml
|
||||
release_date: '2023-03-26'
|
||||
4.8.11:
|
||||
changes:
|
||||
major_changes:
|
||||
- The community.general 4.x.y release stream is now effectively **End of Life**.
|
||||
No more releases will be made, and regular CI runs will stop.
|
||||
release_summary: Final maintenance release of community.general major version
|
||||
4.
|
||||
fragments:
|
||||
- eol.yml
|
||||
release_date: '2023-05-08'
|
||||
4.8.2:
|
||||
changes:
|
||||
bugfixes:
|
||||
@@ -1955,3 +1991,47 @@ releases:
|
||||
- 5259-gitlab-imports.yaml
|
||||
- 5282-locale_gen.yaml
|
||||
release_date: '2022-10-03'
|
||||
4.8.8:
|
||||
changes:
|
||||
bugfixes:
|
||||
- archive - avoid crash when ``lzma`` is not present and ``format`` is not ``xz``
|
||||
(https://github.com/ansible-collections/community.general/pull/5393).
|
||||
- opentelemetry callback plugin - support opentelemetry-api 1.13.0 that removed
|
||||
support for ``_time_ns`` (https://github.com/ansible-collections/community.general/pull/5342).
|
||||
- pfexec become plugin - remove superflous quotes preventing exe wrap from working
|
||||
as expected (https://github.com/ansible-collections/community.general/issues/3671,
|
||||
https://github.com/ansible-collections/community.general/pull/3889).
|
||||
- pkgng - fix case when ``pkg`` fails when trying to upgrade all packages (https://github.com/ansible-collections/community.general/issues/5363).
|
||||
- redhat_subscription - make module idempotent when ``pool_ids`` are used (https://github.com/ansible-collections/community.general/issues/5313).
|
||||
- xenserver_facts - fix broken ``AnsibleModule`` call that prevented the module
|
||||
from working at all (https://github.com/ansible-collections/community.general/pull/5383).
|
||||
release_summary: Regular bugfix release.
|
||||
fragments:
|
||||
- 3671-illumos-pfexec.yml
|
||||
- 4.8.8.yml
|
||||
- 5313-fix-redhat_subscription-idempotency-pool_ids.yml
|
||||
- 5342-opentelemetry_bug_fix_opentelemetry-api-1.13.yml
|
||||
- 5369-pkgng-fix-update-all.yaml
|
||||
- 5383-xenserver_facts.yml
|
||||
- 5393-archive.yml
|
||||
release_date: '2022-10-24'
|
||||
4.8.9:
|
||||
changes:
|
||||
bugfixes:
|
||||
- ldap_attrs - fix bug which caused a ``Bad search filter`` error. The error
|
||||
was occuring when the ldap attribute value contained special characters such
|
||||
as ``(`` or ``*`` (https://github.com/ansible-collections/community.general/issues/5434,
|
||||
https://github.com/ansible-collections/community.general/pull/5435).
|
||||
- ldap_attrs - fix ordering issue by ignoring the ``{x}`` prefix on attribute
|
||||
values (https://github.com/ansible-collections/community.general/issues/977,
|
||||
https://github.com/ansible-collections/community.general/pull/5385).
|
||||
release_summary: 'Bugfix release.
|
||||
|
||||
|
||||
Note that from now on, community.general 4.x.y only receives security fixes
|
||||
and major bugfixes, but no longer regular bugfixes.'
|
||||
fragments:
|
||||
- 4.8.9.yml
|
||||
- 5385-search_s-based-_is_value_present.yaml
|
||||
- 5435-escape-ldap-param.yml
|
||||
release_date: '2022-11-06'
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
namespace: community
|
||||
name: general
|
||||
version: 4.8.7
|
||||
version: 4.8.11
|
||||
readme: README.md
|
||||
authors:
|
||||
- Ansible (https://github.com/ansible)
|
||||
|
||||
@@ -101,4 +101,4 @@ class BecomeModule(BecomeBase):
|
||||
|
||||
flags = self.get_option('become_flags')
|
||||
noexe = not self.get_option('wrap_exe')
|
||||
return '%s %s "%s"' % (exe, flags, self._build_success_command(cmd, shell, noexe=noexe))
|
||||
return '%s %s %s' % (exe, flags, self._build_success_command(cmd, shell, noexe=noexe))
|
||||
|
||||
@@ -6,7 +6,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: loganalytics
|
||||
type: aggregate
|
||||
type: notification
|
||||
short_description: Posts task results to Azure Log Analytics
|
||||
author: "Cyrus Li (@zhcli) <cyrus1006@gmail.com>"
|
||||
description:
|
||||
@@ -153,7 +153,7 @@ class AzureLogAnalyticsSource(object):
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'aggregate'
|
||||
CALLBACK_TYPE = 'notification'
|
||||
CALLBACK_NAME = 'loganalytics'
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ __metaclass__ = type
|
||||
DOCUMENTATION = '''
|
||||
author: Unknown (!UNKNOWN)
|
||||
name: logdna
|
||||
type: aggregate
|
||||
type: notification
|
||||
short_description: Sends playbook logs to LogDNA
|
||||
description:
|
||||
- This callback will report logs from playbook actions, tasks, and events to LogDNA (https://app.logdna.com)
|
||||
@@ -110,7 +110,7 @@ def isJSONable(obj):
|
||||
class CallbackModule(CallbackBase):
|
||||
|
||||
CALLBACK_VERSION = 0.1
|
||||
CALLBACK_TYPE = 'aggregate'
|
||||
CALLBACK_TYPE = 'notification'
|
||||
CALLBACK_NAME = 'community.general.logdna'
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
|
||||
@@ -112,7 +112,7 @@ from ansible.plugins.callback import CallbackBase
|
||||
class CallbackModule(CallbackBase):
|
||||
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'aggregate'
|
||||
CALLBACK_TYPE = 'notification'
|
||||
CALLBACK_NAME = 'community.general.logstash'
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
|
||||
@@ -94,13 +94,32 @@ try:
|
||||
from opentelemetry.sdk.trace.export import (
|
||||
BatchSpanProcessor
|
||||
)
|
||||
from opentelemetry.util._time import _time_ns
|
||||
|
||||
# Support for opentelemetry-api <= 1.12
|
||||
try:
|
||||
from opentelemetry.util._time import _time_ns
|
||||
except ImportError as imp_exc:
|
||||
OTEL_LIBRARY_TIME_NS_ERROR = imp_exc
|
||||
else:
|
||||
OTEL_LIBRARY_TIME_NS_ERROR = None
|
||||
|
||||
except ImportError as imp_exc:
|
||||
OTEL_LIBRARY_IMPORT_ERROR = imp_exc
|
||||
OTEL_LIBRARY_TIME_NS_ERROR = imp_exc
|
||||
else:
|
||||
OTEL_LIBRARY_IMPORT_ERROR = None
|
||||
|
||||
|
||||
if sys.version_info >= (3, 7):
|
||||
time_ns = time.time_ns
|
||||
elif not OTEL_LIBRARY_TIME_NS_ERROR:
|
||||
time_ns = _time_ns
|
||||
else:
|
||||
def time_ns():
|
||||
# Support versions older than 3.7 with opentelemetry-api > 1.12
|
||||
return int(time.time() * 1e9)
|
||||
|
||||
|
||||
class TaskData:
|
||||
"""
|
||||
Data about an individual task.
|
||||
@@ -112,10 +131,7 @@ class TaskData:
|
||||
self.path = path
|
||||
self.play = play
|
||||
self.host_data = OrderedDict()
|
||||
if sys.version_info >= (3, 7):
|
||||
self.start = time.time_ns()
|
||||
else:
|
||||
self.start = _time_ns()
|
||||
self.start = time_ns()
|
||||
self.action = action
|
||||
self.args = args
|
||||
|
||||
@@ -140,10 +156,7 @@ class HostData:
|
||||
self.name = name
|
||||
self.status = status
|
||||
self.result = result
|
||||
if sys.version_info >= (3, 7):
|
||||
self.finish = time.time_ns()
|
||||
else:
|
||||
self.finish = _time_ns()
|
||||
self.finish = time_ns()
|
||||
|
||||
|
||||
class OpenTelemetrySource(object):
|
||||
|
||||
@@ -19,7 +19,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: splunk
|
||||
type: aggregate
|
||||
type: notification
|
||||
short_description: Sends task result events to Splunk HTTP Event Collector
|
||||
author: "Stuart Hirst (!UNKNOWN) <support@convergingdata.com>"
|
||||
description:
|
||||
@@ -176,7 +176,7 @@ class SplunkHTTPCollectorSource(object):
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'aggregate'
|
||||
CALLBACK_TYPE = 'notification'
|
||||
CALLBACK_NAME = 'community.general.splunk'
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ __metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: sumologic
|
||||
type: aggregate
|
||||
type: notification
|
||||
short_description: Sends task result events to Sumologic
|
||||
author: "Ryan Currah (@ryancurrah)"
|
||||
description:
|
||||
@@ -122,7 +122,7 @@ class SumologicHTTPCollectorSource(object):
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'aggregate'
|
||||
CALLBACK_TYPE = 'notification'
|
||||
CALLBACK_NAME = 'community.general.sumologic'
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
|
||||
@@ -70,7 +70,7 @@ class CallbackModule(CallbackBase):
|
||||
"""
|
||||
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'aggregate'
|
||||
CALLBACK_TYPE = 'notification'
|
||||
CALLBACK_NAME = 'community.general.syslog_json'
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
|
||||
@@ -42,6 +42,7 @@ options:
|
||||
- The path on which InfluxDB server is accessible
|
||||
- Only available when using python-influxdb >= 5.1.0
|
||||
type: str
|
||||
default: ''
|
||||
version_added: '0.2.0'
|
||||
validate_certs:
|
||||
description:
|
||||
@@ -79,4 +80,5 @@ options:
|
||||
description:
|
||||
- HTTP(S) proxy to use for Requests to connect to InfluxDB server.
|
||||
type: dict
|
||||
default: {}
|
||||
'''
|
||||
|
||||
@@ -22,6 +22,7 @@ options:
|
||||
description:
|
||||
- The password to use with I(bind_dn).
|
||||
type: str
|
||||
default: ''
|
||||
dn:
|
||||
required: true
|
||||
description:
|
||||
@@ -58,7 +59,7 @@ options:
|
||||
sasl_class:
|
||||
description:
|
||||
- The class to use for SASL authentication.
|
||||
- possible choices are C(external), C(gssapi).
|
||||
- Possible choices are C(external), C(gssapi).
|
||||
type: str
|
||||
choices: ['external', 'gssapi']
|
||||
default: external
|
||||
|
||||
@@ -16,6 +16,7 @@ options:
|
||||
- Is needed for some modules
|
||||
type: dict
|
||||
required: false
|
||||
default: {}
|
||||
utm_host:
|
||||
description:
|
||||
- The REST Endpoint of the Sophos UTM.
|
||||
|
||||
@@ -51,10 +51,16 @@ DOCUMENTATION = '''
|
||||
type: boolean
|
||||
default: false
|
||||
requirements:
|
||||
- jc (https://github.com/kellyjonbrazil/jc)
|
||||
- jc installed as a Python library (U(https://pypi.org/project/jc/))
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Install the prereqs of the jc filter (jc Python package) on the Ansible controller
|
||||
delegate_to: localhost
|
||||
ansible.builtin.pip:
|
||||
name: jc
|
||||
state: present
|
||||
|
||||
- name: Run command
|
||||
ansible.builtin.command: uname -a
|
||||
register: result
|
||||
@@ -107,15 +113,19 @@ def jc(data, parser, quiet=True, raw=False):
|
||||
dictionary or list of dictionaries
|
||||
|
||||
Example:
|
||||
|
||||
- name: run date command
|
||||
hosts: ubuntu
|
||||
tasks:
|
||||
- shell: date
|
||||
- name: install the prereqs of the jc filter (jc Python package) on the Ansible controller
|
||||
delegate_to: localhost
|
||||
ansible.builtin.pip:
|
||||
name: jc
|
||||
state: present
|
||||
- ansible.builtin.shell: date
|
||||
register: result
|
||||
- set_fact:
|
||||
- ansible.builtin.set_fact:
|
||||
myvar: "{{ result.stdout | community.general.jc('date') }}"
|
||||
- debug:
|
||||
- ansible.builtin.debug:
|
||||
msg: "{{ myvar }}"
|
||||
|
||||
produces:
|
||||
@@ -137,7 +147,7 @@ def jc(data, parser, quiet=True, raw=False):
|
||||
"""
|
||||
|
||||
if not HAS_LIB:
|
||||
raise AnsibleError('You need to install "jc" prior to running jc filter')
|
||||
raise AnsibleError('You need to install "jc" as a Python library on the Ansible controller prior to running jc filter')
|
||||
|
||||
try:
|
||||
jc_parser = importlib.import_module('jc.parsers.' + parser)
|
||||
|
||||
@@ -70,7 +70,7 @@ class ModuleHelper(DeprecateAttrsMixin, VarsMixin, DependencyMixin, ModuleHelper
|
||||
vars_diff = self.vars.diff() or {}
|
||||
result['diff'] = dict_merge(dict(diff), vars_diff)
|
||||
|
||||
for varname in result:
|
||||
for varname in list(result):
|
||||
if varname in self._output_conflict_list:
|
||||
result["_" + varname] = result[varname]
|
||||
del result[varname]
|
||||
|
||||
@@ -60,6 +60,7 @@ options:
|
||||
- The values specified here will be used at installation time as --set arguments for atomic install.
|
||||
type: list
|
||||
elements: str
|
||||
default: []
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
|
||||
@@ -44,6 +44,7 @@ options:
|
||||
description:
|
||||
- A description of the VLAN.
|
||||
type: str
|
||||
default: ''
|
||||
network_domain:
|
||||
description:
|
||||
- The Id or name of the target network domain.
|
||||
@@ -53,11 +54,13 @@ options:
|
||||
description:
|
||||
- The base address for the VLAN's IPv4 network (e.g. 192.168.1.0).
|
||||
type: str
|
||||
default: ''
|
||||
private_ipv4_prefix_size:
|
||||
description:
|
||||
- The size of the IPv4 address space, e.g 24.
|
||||
- Required, if C(private_ipv4_base_address) is specified.
|
||||
type: int
|
||||
default: 0
|
||||
state:
|
||||
description:
|
||||
- The desired state for the target VLAN.
|
||||
|
||||
@@ -33,6 +33,7 @@ options:
|
||||
description:
|
||||
- The timeouts for each operations.
|
||||
type: dict
|
||||
default: {}
|
||||
suboptions:
|
||||
create:
|
||||
description:
|
||||
|
||||
@@ -33,6 +33,7 @@ options:
|
||||
description:
|
||||
- The timeouts for each operations.
|
||||
type: dict
|
||||
default: {}
|
||||
suboptions:
|
||||
create:
|
||||
description:
|
||||
|
||||
@@ -33,6 +33,7 @@ options:
|
||||
description:
|
||||
- The timeouts for each operations.
|
||||
type: dict
|
||||
default: {}
|
||||
suboptions:
|
||||
create:
|
||||
description:
|
||||
|
||||
@@ -33,6 +33,7 @@ options:
|
||||
description:
|
||||
- The timeouts for each operations.
|
||||
type: dict
|
||||
default: {}
|
||||
suboptions:
|
||||
create:
|
||||
description:
|
||||
|
||||
@@ -34,6 +34,7 @@ options:
|
||||
description:
|
||||
- The timeouts for each operations.
|
||||
type: dict
|
||||
default: {}
|
||||
suboptions:
|
||||
create:
|
||||
description:
|
||||
|
||||
@@ -33,6 +33,7 @@ options:
|
||||
description:
|
||||
- The timeouts for each operations.
|
||||
type: dict
|
||||
default: {}
|
||||
suboptions:
|
||||
create:
|
||||
description:
|
||||
|
||||
@@ -33,6 +33,7 @@ options:
|
||||
description:
|
||||
- The timeouts for each operations.
|
||||
type: dict
|
||||
default: {}
|
||||
suboptions:
|
||||
create:
|
||||
description:
|
||||
|
||||
@@ -36,6 +36,7 @@ options:
|
||||
description:
|
||||
- Add the instance to a Display Group in Linode Manager.
|
||||
type: str
|
||||
default: ''
|
||||
linode_id:
|
||||
description:
|
||||
- Unique ID of a linode server. This value is read-only in the sense that
|
||||
|
||||
@@ -43,6 +43,7 @@ options:
|
||||
- The default TTL for all records created in the zone. This must be a
|
||||
valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_create).
|
||||
type: int
|
||||
default: 0
|
||||
choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ]
|
||||
force:
|
||||
required: false
|
||||
|
||||
@@ -43,11 +43,13 @@ options:
|
||||
description:
|
||||
- C(SRV) and C(TXT) record priority, in the range 0 > 999 (inclusive).
|
||||
type: int
|
||||
default: 0
|
||||
record:
|
||||
required: false
|
||||
description:
|
||||
- The subdomain to create.
|
||||
type: str
|
||||
default: ''
|
||||
type:
|
||||
required: true
|
||||
description:
|
||||
@@ -64,6 +66,7 @@ options:
|
||||
description:
|
||||
- The record's TTL in seconds (will inherit zone's TTL if not explicitly set). This must be a
|
||||
valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create).
|
||||
default: 0
|
||||
choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ]
|
||||
type: int
|
||||
zone:
|
||||
|
||||
@@ -53,6 +53,7 @@ options:
|
||||
description:
|
||||
- The RHEV/oVirt cluster in which you want you VM to start.
|
||||
type: str
|
||||
default: ''
|
||||
datacenter:
|
||||
description:
|
||||
- The RHEV/oVirt datacenter in which you want you VM to start.
|
||||
|
||||
@@ -34,6 +34,7 @@ options:
|
||||
- The name of the serverless framework project stage to deploy to.
|
||||
- This uses the serverless framework default "dev".
|
||||
type: str
|
||||
default: ''
|
||||
functions:
|
||||
description:
|
||||
- A list of specific functions to deploy.
|
||||
@@ -41,12 +42,12 @@ options:
|
||||
- Deprecated parameter, it will be removed in community.general 5.0.0.
|
||||
type: list
|
||||
elements: str
|
||||
default: []
|
||||
region:
|
||||
description:
|
||||
- AWS region to deploy the service to.
|
||||
- This parameter defaults to C(us-east-1).
|
||||
type: str
|
||||
default: ''
|
||||
deploy:
|
||||
description:
|
||||
- Whether or not to deploy artifacts after building them.
|
||||
|
||||
@@ -89,6 +89,7 @@ options:
|
||||
resources selected here will also auto-include any dependencies.
|
||||
type: list
|
||||
elements: str
|
||||
default: []
|
||||
lock:
|
||||
description:
|
||||
- Enable statefile locking, if you use a service that accepts locks (such
|
||||
@@ -507,9 +508,9 @@ def main():
|
||||
|
||||
outputs_command = [command[0], 'output', '-no-color', '-json'] + _state_args(state_file)
|
||||
rc, outputs_text, outputs_err = module.run_command(outputs_command, cwd=project_path)
|
||||
outputs = {}
|
||||
if rc == 1:
|
||||
module.warn("Could not get Terraform outputs. This usually means none have been defined.\nstdout: {0}\nstderr: {1}".format(outputs_text, outputs_err))
|
||||
outputs = {}
|
||||
elif rc != 0:
|
||||
module.fail_json(
|
||||
msg="Failure when getting Terraform outputs. "
|
||||
|
||||
@@ -161,9 +161,7 @@ def get_srs(session):
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
supports_check_mode=True,
|
||||
)
|
||||
module = AnsibleModule({}, supports_check_mode=True)
|
||||
|
||||
if not HAVE_XENAPI:
|
||||
module.fail_json(changed=False, msg="python xen api required for this module")
|
||||
|
||||
@@ -59,6 +59,7 @@ options:
|
||||
(port_from, port_to, and source)
|
||||
type: list
|
||||
elements: dict
|
||||
default: []
|
||||
add_server_ips:
|
||||
description:
|
||||
- A list of server identifiers (id or name) to be assigned to a firewall policy.
|
||||
@@ -66,12 +67,14 @@ options:
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
default: []
|
||||
remove_server_ips:
|
||||
description:
|
||||
- A list of server IP ids to be unassigned from a firewall policy. Used in combination with update state.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
default: []
|
||||
add_rules:
|
||||
description:
|
||||
- A list of rules that will be added to an existing firewall policy.
|
||||
@@ -79,12 +82,14 @@ options:
|
||||
type: list
|
||||
elements: dict
|
||||
required: false
|
||||
default: []
|
||||
remove_rules:
|
||||
description:
|
||||
- A list of rule ids that will be removed from an existing firewall policy. Used in combination with update state.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
default: []
|
||||
description:
|
||||
description:
|
||||
- Firewall policy description. maxLength=256
|
||||
|
||||
@@ -97,6 +97,7 @@ options:
|
||||
port_balancer, and port_server parameters, in addition to source parameter, which is optional.
|
||||
type: list
|
||||
elements: dict
|
||||
default: []
|
||||
description:
|
||||
description:
|
||||
- Description of the load balancer. maxLength=256
|
||||
@@ -109,12 +110,14 @@ options:
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
default: []
|
||||
remove_server_ips:
|
||||
description:
|
||||
- A list of server IP ids to be unassigned from a load balancer. Used in combination with update state.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
default: []
|
||||
add_rules:
|
||||
description:
|
||||
- A list of rules that will be added to an existing load balancer.
|
||||
@@ -122,12 +125,14 @@ options:
|
||||
type: list
|
||||
elements: dict
|
||||
required: false
|
||||
default: []
|
||||
remove_rules:
|
||||
description:
|
||||
- A list of rule ids that will be removed from an existing load balancer. Used in combination with update state.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
default: []
|
||||
wait:
|
||||
description:
|
||||
- wait for the instance to be in state 'running' before returning
|
||||
|
||||
@@ -73,6 +73,7 @@ options:
|
||||
and value is used to advise when the value is exceeded.
|
||||
type: list
|
||||
elements: dict
|
||||
default: []
|
||||
suboptions:
|
||||
cpu:
|
||||
description:
|
||||
@@ -99,6 +100,7 @@ options:
|
||||
- Array of ports that will be monitoring.
|
||||
type: list
|
||||
elements: dict
|
||||
default: []
|
||||
suboptions:
|
||||
protocol:
|
||||
description:
|
||||
@@ -123,6 +125,7 @@ options:
|
||||
- Array of processes that will be monitoring.
|
||||
type: list
|
||||
elements: dict
|
||||
default: []
|
||||
suboptions:
|
||||
process:
|
||||
description:
|
||||
@@ -139,48 +142,56 @@ options:
|
||||
type: list
|
||||
elements: dict
|
||||
required: false
|
||||
default: []
|
||||
add_processes:
|
||||
description:
|
||||
- Processes to add to the monitoring policy.
|
||||
type: list
|
||||
elements: dict
|
||||
required: false
|
||||
default: []
|
||||
add_servers:
|
||||
description:
|
||||
- Servers to add to the monitoring policy.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
default: []
|
||||
remove_ports:
|
||||
description:
|
||||
- Ports to remove from the monitoring policy.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
default: []
|
||||
remove_processes:
|
||||
description:
|
||||
- Processes to remove from the monitoring policy.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
default: []
|
||||
remove_servers:
|
||||
description:
|
||||
- Servers to remove from the monitoring policy.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
default: []
|
||||
update_ports:
|
||||
description:
|
||||
- Ports to be updated on the monitoring policy.
|
||||
type: list
|
||||
elements: dict
|
||||
required: false
|
||||
default: []
|
||||
update_processes:
|
||||
description:
|
||||
- Processes to be updated on the monitoring policy.
|
||||
type: list
|
||||
elements: dict
|
||||
required: false
|
||||
default: []
|
||||
wait:
|
||||
description:
|
||||
- wait for the instance to be in state 'running' before returning
|
||||
|
||||
@@ -73,11 +73,13 @@ options:
|
||||
- List of server identifiers (name or id) to be added to the private network.
|
||||
type: list
|
||||
elements: str
|
||||
default: []
|
||||
remove_members:
|
||||
description:
|
||||
- List of server identifiers (name or id) to be removed from the private network.
|
||||
type: list
|
||||
elements: str
|
||||
default: []
|
||||
wait:
|
||||
description:
|
||||
- wait for the instance to be in state 'running' before returning
|
||||
|
||||
@@ -136,6 +136,7 @@ options:
|
||||
- URL of custom iPXE script for provisioning.
|
||||
- More about custom iPXE for Packet devices at U(https://help.packet.net/technical/infrastructure/custom-ipxe).
|
||||
type: str
|
||||
default: ''
|
||||
|
||||
always_pxe:
|
||||
description:
|
||||
|
||||
@@ -37,6 +37,7 @@ options:
|
||||
- Public SSH keys allowing access to the virtual machine.
|
||||
type: list
|
||||
elements: str
|
||||
default: []
|
||||
datacenter:
|
||||
description:
|
||||
- The datacenter to provision this virtual machine.
|
||||
@@ -73,6 +74,7 @@ options:
|
||||
- list of instance ids, currently only used when state='absent' to remove instances.
|
||||
type: list
|
||||
elements: str
|
||||
default: []
|
||||
count:
|
||||
description:
|
||||
- The number of virtual machines to create.
|
||||
|
||||
@@ -49,7 +49,7 @@ options:
|
||||
- Public SSH keys allowing access to the virtual machine.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
default: []
|
||||
disk_type:
|
||||
description:
|
||||
- The disk type of the volume.
|
||||
@@ -80,7 +80,7 @@ options:
|
||||
- list of instance ids, currently only used when state='absent' to remove instances.
|
||||
type: list
|
||||
elements: str
|
||||
required: false
|
||||
default: []
|
||||
subscription_user:
|
||||
description:
|
||||
- The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
|
||||
|
||||
@@ -36,6 +36,7 @@ options:
|
||||
same play)."
|
||||
required: false
|
||||
type: str
|
||||
default: ''
|
||||
password:
|
||||
description:
|
||||
- Password which match to account to which specified C(email) belong.
|
||||
@@ -43,6 +44,7 @@ options:
|
||||
same play)."
|
||||
required: false
|
||||
type: str
|
||||
default: ''
|
||||
cache:
|
||||
description: >
|
||||
In case if single play use blocks management module few times it is
|
||||
@@ -57,7 +59,7 @@ options:
|
||||
manage blocks."
|
||||
- "User's account will be used if value not set or empty."
|
||||
type: str
|
||||
required: false
|
||||
default: ''
|
||||
application:
|
||||
description:
|
||||
- "Name of target PubNub application for which blocks configuration on
|
||||
|
||||
@@ -81,17 +81,20 @@ options:
|
||||
default: 'no'
|
||||
extra_client_args:
|
||||
type: dict
|
||||
default: {}
|
||||
description:
|
||||
- A hash of key/value pairs to be used when creating the cloudservers
|
||||
client. This is considered an advanced option, use it wisely and
|
||||
with caution.
|
||||
extra_create_args:
|
||||
type: dict
|
||||
default: {}
|
||||
description:
|
||||
- A hash of key/value pairs to be used when creating a new server.
|
||||
This is considered an advanced option, use it wisely and with caution.
|
||||
files:
|
||||
type: dict
|
||||
default: {}
|
||||
description:
|
||||
- Files to insert into the instance. remotefilename:localcontent
|
||||
flavor:
|
||||
@@ -123,6 +126,7 @@ options:
|
||||
- keypair
|
||||
meta:
|
||||
type: dict
|
||||
default: {}
|
||||
description:
|
||||
- A hash of metadata to associate with the instance
|
||||
name:
|
||||
|
||||
@@ -25,6 +25,7 @@ options:
|
||||
C(name). This option requires C(pyrax>=1.9.3)
|
||||
meta:
|
||||
type: dict
|
||||
default: {}
|
||||
description:
|
||||
- A hash of metadata to associate with the volume
|
||||
name:
|
||||
|
||||
@@ -27,6 +27,7 @@ options:
|
||||
default: LEAST_CONNECTIONS
|
||||
meta:
|
||||
type: dict
|
||||
default: {}
|
||||
description:
|
||||
- A hash of metadata to associate with the instance
|
||||
name:
|
||||
|
||||
@@ -27,6 +27,7 @@ options:
|
||||
- The container to use for container or metadata operations.
|
||||
meta:
|
||||
type: dict
|
||||
default: {}
|
||||
description:
|
||||
- A hash of items to set as metadata values on a container
|
||||
private:
|
||||
|
||||
@@ -39,6 +39,7 @@ options:
|
||||
Requires an integer, specifying expiration in seconds
|
||||
meta:
|
||||
type: dict
|
||||
default: {}
|
||||
description:
|
||||
- A hash of items to set as metadata values on an uploaded file or folder
|
||||
method:
|
||||
|
||||
@@ -29,6 +29,7 @@ options:
|
||||
- Server name to modify metadata for
|
||||
meta:
|
||||
type: dict
|
||||
default: {}
|
||||
description:
|
||||
- A hash of metadata to associate with the instance
|
||||
author: "Matt Martz (@sivel)"
|
||||
|
||||
@@ -86,6 +86,7 @@ options:
|
||||
I(ip_addresses) hash to resolve an IP address to target.
|
||||
details:
|
||||
type: dict
|
||||
default: {}
|
||||
description:
|
||||
- Additional details specific to the check type. Must be a hash of strings
|
||||
between 1 and 255 characters long, or an array or object containing 0 to
|
||||
@@ -97,6 +98,7 @@ options:
|
||||
default: false
|
||||
metadata:
|
||||
type: dict
|
||||
default: {}
|
||||
description:
|
||||
- Hash of arbitrary key-value pairs to accompany this check if it fires.
|
||||
Keys and values must be strings between 1 and 255 characters long.
|
||||
|
||||
@@ -37,6 +37,7 @@ options:
|
||||
bound. Necessary to collect C(agent.) rax_mon_checks against this entity.
|
||||
named_ip_addresses:
|
||||
type: dict
|
||||
default: {}
|
||||
description:
|
||||
- Hash of IP addresses that may be referenced by name by rax_mon_checks
|
||||
added to this entity. Must be a dictionary of with keys that are names
|
||||
@@ -44,6 +45,7 @@ options:
|
||||
addresses.
|
||||
metadata:
|
||||
type: dict
|
||||
default: {}
|
||||
description:
|
||||
- Hash of arbitrary C(name), C(value) pairs that are passed to associated
|
||||
rax_mon_alarms. Names and values must all be between 1 and 255 characters
|
||||
|
||||
@@ -36,6 +36,7 @@ options:
|
||||
- manual
|
||||
files:
|
||||
type: dict
|
||||
default: {}
|
||||
description:
|
||||
- 'Files to insert into the instance. Hash of C(remotepath: localpath)'
|
||||
flavor:
|
||||
@@ -65,6 +66,7 @@ options:
|
||||
required: true
|
||||
meta:
|
||||
type: dict
|
||||
default: {}
|
||||
description:
|
||||
- A hash of metadata to associate with the instance
|
||||
min_entities:
|
||||
|
||||
@@ -65,6 +65,7 @@ options:
|
||||
tags:
|
||||
type: list
|
||||
elements: str
|
||||
default: []
|
||||
description:
|
||||
- List of tags to apply to the load-balancer
|
||||
|
||||
|
||||
@@ -142,6 +142,7 @@ options:
|
||||
- List of ssh keys by their Id to be assigned to a virtual instance.
|
||||
type: list
|
||||
elements: str
|
||||
default: []
|
||||
post_uri:
|
||||
description:
|
||||
- URL of a post provisioning script to be loaded and executed on virtual instance.
|
||||
|
||||
@@ -502,6 +502,12 @@ options:
|
||||
Only works if wait_for_instances is True.
|
||||
type: int
|
||||
|
||||
do_not_update:
|
||||
description:
|
||||
- TODO document.
|
||||
type: list
|
||||
default: []
|
||||
|
||||
'''
|
||||
EXAMPLES = '''
|
||||
# Basic configuration YAML example
|
||||
|
||||
@@ -42,11 +42,13 @@ options:
|
||||
nameserver:
|
||||
type: list
|
||||
elements: str
|
||||
default: []
|
||||
description:
|
||||
- List of appropriate name servers. Required if C(state=present).
|
||||
interfaces:
|
||||
type: list
|
||||
elements: str
|
||||
default: []
|
||||
description:
|
||||
- List of interface IP addresses, on which the server should
|
||||
response this zone. Required if C(state=present).
|
||||
|
||||
@@ -44,11 +44,13 @@ options:
|
||||
- define the whole ldap position of the group, e.g.
|
||||
C(cn=g123m-1A,cn=classes,cn=schueler,cn=groups,ou=schule,dc=example,dc=com).
|
||||
type: str
|
||||
default: ''
|
||||
ou:
|
||||
required: false
|
||||
description:
|
||||
- LDAP OU, e.g. school for LDAP OU C(ou=school,dc=example,dc=com).
|
||||
type: str
|
||||
default: ''
|
||||
subpath:
|
||||
required: false
|
||||
description:
|
||||
|
||||
@@ -168,6 +168,7 @@ options:
|
||||
description:
|
||||
- List of telephone numbers.
|
||||
type: list
|
||||
default: []
|
||||
postcode:
|
||||
description:
|
||||
- Postal code of users business address.
|
||||
@@ -199,11 +200,13 @@ options:
|
||||
join."
|
||||
aliases: [ sambaPrivileges ]
|
||||
type: list
|
||||
default: []
|
||||
samba_user_workstations:
|
||||
description:
|
||||
- Allow the authentication only on this Microsoft Windows host.
|
||||
aliases: [ sambaUserWorkstations ]
|
||||
type: list
|
||||
default: []
|
||||
sambahome:
|
||||
description:
|
||||
- Windows home path, e.g. C('\\$FQDN\$USERNAME').
|
||||
|
||||
@@ -26,10 +26,12 @@ options:
|
||||
description:
|
||||
- The username used to authenticate with
|
||||
type: str
|
||||
default: ''
|
||||
login_password:
|
||||
description:
|
||||
- The password used to authenticate with
|
||||
type: str
|
||||
default: ''
|
||||
login_host:
|
||||
description:
|
||||
- Host running the database
|
||||
|
||||
@@ -65,13 +65,15 @@ options:
|
||||
type: bool
|
||||
default: no
|
||||
notes:
|
||||
- Requires tarfile, zipfile, gzip and bzip2 packages on target host.
|
||||
- Requires lzma or backports.lzma if using xz format.
|
||||
- Can produce I(gzip), I(bzip2), I(lzma) and I(zip) compressed files or archives.
|
||||
- Can produce I(gzip), I(bzip2), I(lzma), and I(zip) compressed files or archives.
|
||||
- This module uses C(tarfile), C(zipfile), C(gzip), and C(bz2) packages on the target host to create archives.
|
||||
These are part of the Python standard library for Python 2 and 3.
|
||||
requirements:
|
||||
- Requires C(lzma) (standard library of Python 3) or L(backports.lzma, https://pypi.org/project/backports.lzma/) (Python 2) if using C(xz) format.
|
||||
seealso:
|
||||
- module: ansible.builtin.unarchive
|
||||
- module: ansible.builtin.unarchive
|
||||
author:
|
||||
- Ben Doherty (@bendoh)
|
||||
- Ben Doherty (@bendoh)
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
@@ -581,6 +583,11 @@ class TarArchive(Archive):
|
||||
self.file.add(path, archive_name, recursive=False, exclude=py26_filter)
|
||||
|
||||
def _get_checksums(self, path):
|
||||
if HAS_LZMA:
|
||||
LZMAError = lzma.LZMAError
|
||||
else:
|
||||
# Just picking another exception that's also listed below
|
||||
LZMAError = tarfile.ReadError
|
||||
try:
|
||||
if self.format == 'xz':
|
||||
with lzma.open(_to_native_ascii(path), 'r') as f:
|
||||
@@ -591,7 +598,7 @@ class TarArchive(Archive):
|
||||
archive = tarfile.open(_to_native_ascii(path), 'r|' + self.format)
|
||||
checksums = set((info.name, info.chksum) for info in archive.getmembers())
|
||||
archive.close()
|
||||
except (lzma.LZMAError, tarfile.ReadError, tarfile.CompressionError):
|
||||
except (LZMAError, tarfile.ReadError, tarfile.CompressionError):
|
||||
try:
|
||||
# The python implementations of gzip, bz2, and lzma do not support restoring compressed files
|
||||
# to their original names so only file checksum is returned
|
||||
|
||||
@@ -39,6 +39,7 @@ options:
|
||||
- The namespace C(prefix:uri) mapping for the XPath expression.
|
||||
- Needs to be a C(dict), not a C(list) of items.
|
||||
type: dict
|
||||
default: {}
|
||||
state:
|
||||
description:
|
||||
- Set or remove an xpath selection (node(s), attribute(s)).
|
||||
|
||||
@@ -129,6 +129,7 @@ options:
|
||||
vendor:
|
||||
description:
|
||||
- LDAP vendor (provider).
|
||||
- Use short name. For instance, write C(rhds) for "Red Hat Directory Server".
|
||||
type: str
|
||||
|
||||
usernameLDAPAttribute:
|
||||
|
||||
@@ -85,7 +85,6 @@ options:
|
||||
description:
|
||||
- The secret key for your subdomain.
|
||||
- Only required for initial sign in.
|
||||
default: {}
|
||||
required: False
|
||||
cli_path:
|
||||
type: path
|
||||
|
||||
@@ -52,14 +52,12 @@ options:
|
||||
elements: str
|
||||
description:
|
||||
- List of handlers to notify when the check fails
|
||||
default: []
|
||||
subscribers:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- List of subscribers/channels this check should run for
|
||||
- See sensu_subscribers to subscribe a machine to a channel
|
||||
default: []
|
||||
interval:
|
||||
type: int
|
||||
description:
|
||||
@@ -91,7 +89,6 @@ options:
|
||||
elements: str
|
||||
description:
|
||||
- Other checks this check depends on, if dependencies fail handling of this check will be disabled
|
||||
default: []
|
||||
metric:
|
||||
description:
|
||||
- Whether the check is a metric
|
||||
@@ -137,7 +134,6 @@ options:
|
||||
description:
|
||||
- A hash/dictionary of custom parameters for mixing to the configuration.
|
||||
- You can't rewrite others module parameters using this
|
||||
default: {}
|
||||
source:
|
||||
type: str
|
||||
description:
|
||||
|
||||
@@ -61,6 +61,7 @@ options:
|
||||
type: str
|
||||
description:
|
||||
- The prefix to add to the metric.
|
||||
default: ''
|
||||
value:
|
||||
type: int
|
||||
required: true
|
||||
|
||||
@@ -126,7 +126,6 @@ options:
|
||||
description:
|
||||
- Name or id of the contact list that the monitor will notify.
|
||||
- The default C('') means the Account Owner.
|
||||
default: ''
|
||||
type: str
|
||||
|
||||
httpFqdn:
|
||||
|
||||
@@ -41,27 +41,22 @@ options:
|
||||
description:
|
||||
- Network ID.
|
||||
type: str
|
||||
default: ''
|
||||
ip_address:
|
||||
description:
|
||||
- IP Address for a reservation or a release.
|
||||
type: str
|
||||
default: ''
|
||||
network_address:
|
||||
description:
|
||||
- Network address with CIDR format (e.g., 192.168.310.0).
|
||||
type: str
|
||||
default: ''
|
||||
network_size:
|
||||
description:
|
||||
- Network bitmask (e.g. 255.255.255.220) or CIDR format (e.g., /26).
|
||||
type: str
|
||||
default: ''
|
||||
network_name:
|
||||
description:
|
||||
- The name of a network.
|
||||
type: str
|
||||
default: ''
|
||||
network_location:
|
||||
description:
|
||||
- The parent network id for a given network.
|
||||
|
||||
@@ -166,13 +166,15 @@ modlist:
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.common.text.converters import to_native, to_bytes
|
||||
from ansible.module_utils.common.text.converters import to_native, to_bytes, to_text
|
||||
from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs
|
||||
|
||||
import re
|
||||
|
||||
LDAP_IMP_ERR = None
|
||||
try:
|
||||
import ldap
|
||||
import ldap.filter
|
||||
|
||||
HAS_LDAP = True
|
||||
except ImportError:
|
||||
@@ -261,9 +263,11 @@ class LdapAttrs(LdapGeneric):
|
||||
def _is_value_present(self, name, value):
|
||||
""" True if the target attribute has the given value. """
|
||||
try:
|
||||
is_present = bool(
|
||||
self.connection.compare_s(self.dn, name, value))
|
||||
except ldap.NO_SUCH_ATTRIBUTE:
|
||||
escaped_value = ldap.filter.escape_filter_chars(to_text(value))
|
||||
filterstr = "(%s=%s)" % (name, escaped_value)
|
||||
dns = self.connection.search_s(self.dn, ldap.SCOPE_BASE, filterstr)
|
||||
is_present = len(dns) == 1
|
||||
except ldap.NO_SUCH_OBJECT:
|
||||
is_present = False
|
||||
|
||||
return is_present
|
||||
|
||||
@@ -36,6 +36,7 @@ options:
|
||||
entries are never modified. To assert specific attribute values on an
|
||||
existing entry, use M(community.general.ldap_attrs) module instead.
|
||||
type: dict
|
||||
default: {}
|
||||
objectClass:
|
||||
description:
|
||||
- If I(state=present), value or list of values to use when creating
|
||||
|
||||
@@ -180,7 +180,7 @@ def main():
|
||||
required=False, choices=["present", "absent"], default="present"
|
||||
),
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
|
||||
@@ -115,7 +115,7 @@ def main():
|
||||
dict(
|
||||
organization=dict(required=False, type="str", default=None, aliases=["org"])
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
|
||||
|
||||
|
||||
@@ -324,7 +324,7 @@ def main():
|
||||
user_disabled=dict(required=False, type="bool", default=None),
|
||||
user_gravatar=dict(required=False, type="bool", default=None),
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
|
||||
@@ -157,7 +157,7 @@ def main():
|
||||
default="client",
|
||||
),
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
|
||||
|
||||
|
||||
@@ -48,12 +48,14 @@ options:
|
||||
- This is a list, which may contain address and phrase portions.
|
||||
type: list
|
||||
elements: str
|
||||
default: []
|
||||
bcc:
|
||||
description:
|
||||
- The email-address(es) the mail is being 'blind' copied to.
|
||||
- This is a list, which may contain address and phrase portions.
|
||||
type: list
|
||||
elements: str
|
||||
default: []
|
||||
subject:
|
||||
description:
|
||||
- The subject of the email being sent.
|
||||
|
||||
@@ -30,6 +30,7 @@ options:
|
||||
type: str
|
||||
description:
|
||||
- Composer arguments like required package, version and so on.
|
||||
default: ''
|
||||
executable:
|
||||
type: path
|
||||
description:
|
||||
|
||||
@@ -50,6 +50,7 @@ options:
|
||||
type: str
|
||||
description:
|
||||
- The maven classifier coordinate
|
||||
default: ''
|
||||
extension:
|
||||
type: str
|
||||
description:
|
||||
|
||||
@@ -46,6 +46,7 @@ options:
|
||||
- "remove"
|
||||
- "checksum"
|
||||
- "removal-of-dependent-packages"
|
||||
default: ""
|
||||
type: str
|
||||
update_cache:
|
||||
description:
|
||||
|
||||
@@ -72,7 +72,7 @@ options:
|
||||
extra_args:
|
||||
description:
|
||||
- Additional option to pass to pacman when enforcing C(state).
|
||||
default:
|
||||
default: ''
|
||||
type: str
|
||||
|
||||
update_cache:
|
||||
@@ -91,7 +91,7 @@ options:
|
||||
update_cache_extra_args:
|
||||
description:
|
||||
- Additional option to pass to pacman when enforcing C(update_cache).
|
||||
default:
|
||||
default: ''
|
||||
type: str
|
||||
|
||||
upgrade:
|
||||
@@ -104,7 +104,7 @@ options:
|
||||
upgrade_extra_args:
|
||||
description:
|
||||
- Additional option to pass to pacman when enforcing C(upgrade).
|
||||
default:
|
||||
default: ''
|
||||
type: str
|
||||
|
||||
notes:
|
||||
|
||||
@@ -37,7 +37,7 @@ options:
|
||||
state:
|
||||
description:
|
||||
- State of the package.
|
||||
- 'Note: "latest" added in 2.7'
|
||||
- 'Note: C(latest) added in 2.7.'
|
||||
choices: [ 'present', 'latest', 'absent' ]
|
||||
required: false
|
||||
default: present
|
||||
@@ -148,10 +148,7 @@ def query_package(module, run_pkgng, name):
|
||||
|
||||
rc, out, err = run_pkgng('info', '-g', '-e', name)
|
||||
|
||||
if rc == 0:
|
||||
return True
|
||||
|
||||
return False
|
||||
return rc == 0
|
||||
|
||||
|
||||
def query_update(module, run_pkgng, name):
|
||||
@@ -161,10 +158,7 @@ def query_update(module, run_pkgng, name):
|
||||
# rc = 1, updates available
|
||||
rc, out, err = run_pkgng('upgrade', '-g', '-n', name)
|
||||
|
||||
if rc == 1:
|
||||
return True
|
||||
|
||||
return False
|
||||
return rc == 1
|
||||
|
||||
|
||||
def pkgng_older_than(module, pkgng_path, compare_version):
|
||||
@@ -190,7 +184,7 @@ def upgrade_packages(module, run_pkgng):
|
||||
|
||||
pkgng_args = ['upgrade']
|
||||
pkgng_args.append('-n' if module.check_mode else '-y')
|
||||
rc, out, err = run_pkgng(*pkgng_args)
|
||||
rc, out, err = run_pkgng(*pkgng_args, check_rc=(not module.check_mode))
|
||||
|
||||
matches = re.findall('^Number of packages to be (?:upgraded|reinstalled): ([0-9]+)', out, re.MULTILINE)
|
||||
for match in matches:
|
||||
|
||||
@@ -151,7 +151,6 @@ options:
|
||||
When some attribute is not listed in the new list of attributes, the existing
|
||||
attribute will be removed from C(syspurpose.json) file. Unknown attributes are ignored.
|
||||
type: dict
|
||||
default: {}
|
||||
suboptions:
|
||||
usage:
|
||||
description: Syspurpose attribute usage
|
||||
@@ -592,15 +591,22 @@ class Rhsm(RegistrationBase):
|
||||
consumed_pools = RhsmPools(self.module, consumed=True)
|
||||
|
||||
existing_pools = {}
|
||||
serials_to_remove = []
|
||||
for p in consumed_pools:
|
||||
existing_pools[p.get_pool_id()] = p.QuantityUsed
|
||||
pool_id = p.get_pool_id()
|
||||
quantity_used = p.get_quantity_used()
|
||||
existing_pools[pool_id] = quantity_used
|
||||
|
||||
quantity = pool_ids.get(pool_id, 0)
|
||||
if quantity is not None and quantity != quantity_used:
|
||||
serials_to_remove.append(p.Serial)
|
||||
|
||||
serials_to_remove = [p.Serial for p in consumed_pools if pool_ids.get(p.get_pool_id(), 0) != p.QuantityUsed]
|
||||
serials = self.unsubscribe(serials=serials_to_remove)
|
||||
|
||||
missing_pools = {}
|
||||
for pool_id, quantity in sorted(pool_ids.items()):
|
||||
if existing_pools.get(pool_id, 0) != quantity:
|
||||
quantity_used = existing_pools.get(pool_id, 0)
|
||||
if quantity is None and quantity_used == 0 or quantity not in (None, 0, quantity_used):
|
||||
missing_pools[pool_id] = quantity
|
||||
|
||||
self.subscribe_by_pool_ids(missing_pools)
|
||||
@@ -634,6 +640,9 @@ class RhsmPool(object):
|
||||
def get_pool_id(self):
|
||||
return getattr(self, 'PoolId', getattr(self, 'PoolID'))
|
||||
|
||||
def get_quantity_used(self):
|
||||
return int(getattr(self, 'QuantityUsed'))
|
||||
|
||||
def subscribe(self):
|
||||
args = "subscription-manager attach --pool %s" % self.get_pool_id()
|
||||
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
|
||||
|
||||
@@ -75,6 +75,7 @@ options:
|
||||
- especially useful for SCM and rsync grimoires
|
||||
- makes sense only in pair with C(update_cache)
|
||||
type: int
|
||||
default: 0
|
||||
'''
|
||||
|
||||
|
||||
|
||||
@@ -68,6 +68,7 @@ options:
|
||||
- The list of media types appropriate for the image.
|
||||
type: list
|
||||
elements: str
|
||||
default: []
|
||||
image_url:
|
||||
description:
|
||||
- The URL of the image to insert or eject.
|
||||
|
||||
@@ -79,7 +79,7 @@ options:
|
||||
- ' - C(vms_allocated) (int): use null to remove the quota.'
|
||||
- ' - C(templates_allocated) (int): use null to remove the quota.'
|
||||
required: false
|
||||
default: null
|
||||
default: {}
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
||||
@@ -115,6 +115,7 @@ options:
|
||||
description:
|
||||
- Properties of account service to update.
|
||||
type: dict
|
||||
default: {}
|
||||
version_added: '0.2.0'
|
||||
resource_id:
|
||||
required: false
|
||||
@@ -140,6 +141,7 @@ options:
|
||||
- List of target resource URIs to apply the update to.
|
||||
type: list
|
||||
elements: str
|
||||
default: []
|
||||
version_added: '0.2.0'
|
||||
update_creds:
|
||||
required: false
|
||||
@@ -171,6 +173,7 @@ options:
|
||||
- List of media types appropriate for the image.
|
||||
type: list
|
||||
elements: str
|
||||
default: []
|
||||
image_url:
|
||||
required: false
|
||||
description:
|
||||
|
||||
@@ -71,6 +71,7 @@ options:
|
||||
description:
|
||||
- Setting dict of manager services to update.
|
||||
type: dict
|
||||
default: {}
|
||||
version_added: '0.2.0'
|
||||
resource_id:
|
||||
required: false
|
||||
@@ -90,6 +91,7 @@ options:
|
||||
description:
|
||||
- Setting dict of EthernetInterface on OOB controller.
|
||||
type: dict
|
||||
default: {}
|
||||
version_added: '0.2.0'
|
||||
strip_etag_quotes:
|
||||
description:
|
||||
@@ -105,6 +107,7 @@ options:
|
||||
description:
|
||||
- Setting dict of HostInterface on OOB controller.
|
||||
type: dict
|
||||
default: {}
|
||||
version_added: '4.1.0'
|
||||
hostinterface_id:
|
||||
required: false
|
||||
|
||||
@@ -73,12 +73,14 @@ options:
|
||||
- Rack to be used in host creation.
|
||||
- Required if I(state) is C(present) and host does not yet exist.
|
||||
type: int
|
||||
default: 0
|
||||
rank:
|
||||
description:
|
||||
- Rank to be used in host creation.
|
||||
- In Stacki terminology, the rank is the position of the machine in a rack.
|
||||
- Required if I(state) is C(present) and host does not yet exist.
|
||||
type: int
|
||||
default: 0
|
||||
network:
|
||||
description:
|
||||
- Network to be configured in the host.
|
||||
|
||||
@@ -54,6 +54,7 @@ options:
|
||||
- Branch name of wildcard to trigger hook on push events
|
||||
type: str
|
||||
version_added: '0.2.0'
|
||||
default: ''
|
||||
issues_events:
|
||||
description:
|
||||
- Trigger hook on issues events.
|
||||
|
||||
@@ -37,6 +37,7 @@ options:
|
||||
- A dictionary of zfs properties to be set.
|
||||
- See the zfs(8) man page for more information.
|
||||
type: dict
|
||||
default: {}
|
||||
notes:
|
||||
- C(check_mode) is supported, but in certain situations it may report a task
|
||||
as changed that will not be reported as changed when C(check_mode) is disabled.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user