mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-05-07 22:02:50 +00:00
Relocating extras into lib/ansible/modules/ after merge
This commit is contained in:
committed by
Matt Clay
parent
c65ba07d2c
commit
011ea55a8f
264
lib/ansible/modules/cloud/amazon/GUIDELINES.md
Normal file
264
lib/ansible/modules/cloud/amazon/GUIDELINES.md
Normal file
@@ -0,0 +1,264 @@
|
||||
# Guidelines for AWS modules
|
||||
|
||||
## Getting Started
|
||||
|
||||
Since Ansible 2.0, it is a requirement that all new AWS modules are written to use boto3.
|
||||
|
||||
Prior to 2.0, modules may of been written in boto or boto3. Modules written using boto can continue to be extended using boto.
|
||||
|
||||
Backward compatibility of older modules must be maintained.
|
||||
|
||||
## Bug fixing
|
||||
|
||||
If you are writing a bugfix for a module that uses boto, you should continue to use boto to maintain backward compatibility.
|
||||
|
||||
If you are adding new functionality to an existing module that uses boto but the new functionality requires boto3, you
|
||||
must maintain backward compatibility of the module and ensure the module still works without boto3.
|
||||
|
||||
## Naming your module
|
||||
|
||||
Base the name of the module on the part of AWS that
|
||||
you actually use. (A good rule of thumb is to take
|
||||
whatever module you use with boto as a starting point).
|
||||
|
||||
Don't further abbreviate names - if something is a well
|
||||
known abbreviation due to it being a major component of
|
||||
AWS, that's fine, but don't create new ones independently
|
||||
(e.g. VPC, ELB, etc. are fine)
|
||||
|
||||
## Adding new features
|
||||
|
||||
Try and keep backward compatibility with relatively recent
|
||||
versions of boto. That means that if want to implement some
|
||||
functionality that uses a new feature of boto, it should only
|
||||
fail if that feature actually needs to be run, with a message
|
||||
saying which version of boto is needed.
|
||||
|
||||
Use feature testing (e.g. `hasattr('boto.module', 'shiny_new_method')`)
|
||||
to check whether boto supports a feature rather than version checking
|
||||
|
||||
e.g. from the `ec2` module:
|
||||
```python
|
||||
if boto_supports_profile_name_arg(ec2):
|
||||
params['instance_profile_name'] = instance_profile_name
|
||||
else:
|
||||
if instance_profile_name is not None:
|
||||
module.fail_json(msg="instance_profile_name parameter requires boto version 2.5.0 or higher")
|
||||
```
|
||||
|
||||
## Using boto and boto3
|
||||
|
||||
### Importing
|
||||
|
||||
Wrap import statements in a try block and fail the module later if the import fails
|
||||
|
||||
#### boto
|
||||
|
||||
```python
|
||||
try:
|
||||
import boto.ec2
|
||||
from boto.exception import BotoServerError
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
def main():
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
```
|
||||
|
||||
#### boto3
|
||||
|
||||
```python
|
||||
try:
|
||||
import boto3
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
def main():
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='boto3 required for this module')
|
||||
```
|
||||
|
||||
#### boto and boto3 combined
|
||||
|
||||
If you want to add boto3 functionality to a module written using boto, you must maintain backward compatibility.
|
||||
Ensure that you clearly document if a new parameter requires boto3. Import boto3 at the top of the
|
||||
module as normal and then use the HAS_BOTO3 bool when necessary, before the new feature.
|
||||
|
||||
```python
|
||||
try:
|
||||
import boto
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
try:
|
||||
import boto3
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
if my_new_feauture_Parameter_is_set:
|
||||
if HAS_BOTO3:
|
||||
# do feature
|
||||
else:
|
||||
module.fail_json(msg="boto3 is required for this feature")
|
||||
```
|
||||
|
||||
### Connecting to AWS
|
||||
|
||||
To connect to AWS, you should use `get_aws_connection_info` and then
|
||||
`connect_to_aws`.
|
||||
|
||||
The reason for using `get_aws_connection_info` and `connect_to_aws` rather than doing it
|
||||
yourself is that they handle some of the more esoteric connection
|
||||
options such as security tokens and boto profiles.
|
||||
|
||||
Some boto services require region to be specified. You should check for the region parameter if required.
|
||||
|
||||
#### boto
|
||||
|
||||
An example of connecting to ec2:
|
||||
|
||||
```python
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
if region:
|
||||
try:
|
||||
connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
```
|
||||
|
||||
#### boto3
|
||||
|
||||
An example of connecting to ec2 is shown below. Note that there is no 'NoAuthHandlerFound' exception handling like in boto.
|
||||
Instead, an AuthFailure exception will be thrown when you use 'connection'. See exception handling.
|
||||
|
||||
```python
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
|
||||
if region:
|
||||
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
```
|
||||
|
||||
### Exception Handling
|
||||
|
||||
You should wrap any boto call in a try block. If an exception is thrown, it is up to you decide how to handle it
|
||||
but usually calling fail_json with the error message will suffice.
|
||||
|
||||
#### boto
|
||||
|
||||
```python
|
||||
# Import BotoServerError
|
||||
try:
|
||||
import boto.ec2
|
||||
from boto.exception import BotoServerError
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
# Connect to AWS
|
||||
...
|
||||
|
||||
# Make a call to AWS
|
||||
try:
|
||||
result = connection.aws_call()
|
||||
except BotoServerError, e:
|
||||
module.fail_json(msg=e.message)
|
||||
```
|
||||
|
||||
#### boto3
|
||||
|
||||
For more information on botocore exception handling see [http://botocore.readthedocs.org/en/latest/client_upgrades.html#error-handling]
|
||||
|
||||
Boto3 provides lots of useful info when an exception is thrown so pass this to the user along with the message.
|
||||
|
||||
```python
|
||||
# Import ClientError from botocore
|
||||
try:
|
||||
from botocore.exceptions import ClientError
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
# Connect to AWS
|
||||
...
|
||||
|
||||
# Make a call to AWS
|
||||
try:
|
||||
result = connection.aws_call()
|
||||
except ClientError, e:
|
||||
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
|
||||
```
|
||||
|
||||
If you need to perform an action based on the error boto3 returned, use the error code.
|
||||
|
||||
```python
|
||||
# Make a call to AWS
|
||||
try:
|
||||
result = connection.aws_call()
|
||||
except ClientError, e:
|
||||
if e.response['Error']['Code'] == 'NoSuchEntity':
|
||||
return None
|
||||
else:
|
||||
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
|
||||
```
|
||||
|
||||
### Returning Values
|
||||
|
||||
When you make a call using boto3, you will probably get back some useful information that you should return in the module.
|
||||
|
||||
As well as information related to the call itself, you will also have some response metadata. It is OK to return this to
|
||||
the user as well as they may find it useful.
|
||||
|
||||
Boto3 returns all values CamelCased. Ansible follows Python standards for variable names and uses snake_case. There is a
|
||||
helper function in module_utils/ec2.py called `camel_dict_to_snake_dict` that allows you to easily convert the boto3
|
||||
response to snake_case.
|
||||
|
||||
You should use this helper function and avoid changing the names of values returned by Boto3. E.g. if boto3 returns a
|
||||
value called 'SecretAccessKey' do not change it to 'AccessKey'.
|
||||
|
||||
```python
|
||||
# Make a call to AWS
|
||||
result = connection.aws_call()
|
||||
|
||||
# Return the result to the user
|
||||
module.exit_json(changed=True, **camel_dict_to_snake_dict(result))
|
||||
```
|
||||
|
||||
### Helper functions
|
||||
|
||||
Along with the connection functions in Ansible ec2.py module_utils, there are some other useful functions detailed below.
|
||||
|
||||
#### camel_dict_to_snake_dict
|
||||
|
||||
boto3 returns results in a dict. The keys of the dict are in CamelCase format. In keeping
|
||||
with Ansible format, this function will convert the keys to snake_case.
|
||||
|
||||
#### ansible_dict_to_boto3_filter_list
|
||||
|
||||
Converts a an Ansible list of filters to a boto3 friendly list of dicts. This is useful for
|
||||
any boto3 _facts modules.
|
||||
|
||||
#### boto3_tag_list_to_ansible_dict
|
||||
|
||||
Converts a boto3 tag list to an Ansible dict. Boto3 returns tags as a list of dicts containing keys called
|
||||
'Key' and 'Value'. This function converts this list in to a single dict where the dict key is the tag
|
||||
key and the dict value is the tag value.
|
||||
|
||||
#### ansible_dict_to_boto3_tag_list
|
||||
|
||||
Opposite of above. Converts an Ansible dict to a boto3 tag list of dicts.
|
||||
|
||||
#### get_ec2_security_group_ids_from_names
|
||||
|
||||
Pass this function a list of security group names or combination of security group names and IDs and this function will
|
||||
return a list of IDs. You should also pass the VPC ID if known because security group names are not necessarily unique
|
||||
across VPCs.
|
||||
290
lib/ansible/modules/cloud/amazon/cloudformation_facts.py
Normal file
290
lib/ansible/modules/cloud/amazon/cloudformation_facts.py
Normal file
@@ -0,0 +1,290 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: cloudformation_facts
|
||||
short_description: Obtain facts about an AWS CloudFormation stack
|
||||
description:
|
||||
- Gets information about an AWS CloudFormation stack
|
||||
requirements:
|
||||
- boto3 >= 1.0.0
|
||||
- python >= 2.6
|
||||
version_added: "2.2"
|
||||
author: Justin Menga (@jmenga)
|
||||
options:
|
||||
stack_name:
|
||||
description:
|
||||
- The name or id of the CloudFormation stack
|
||||
required: true
|
||||
all_facts:
|
||||
description:
|
||||
- Get all stack information for the stack
|
||||
required: false
|
||||
default: false
|
||||
stack_events:
|
||||
description:
|
||||
- Get stack events for the stack
|
||||
required: false
|
||||
default: false
|
||||
stack_template:
|
||||
description:
|
||||
- Get stack template body for the stack
|
||||
required: false
|
||||
default: false
|
||||
stack_resources:
|
||||
description:
|
||||
- Get stack resources for the stack
|
||||
required: false
|
||||
default: false
|
||||
stack_policy:
|
||||
description:
|
||||
- Get stack policy for the stack
|
||||
required: false
|
||||
default: false
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
# Get summary information about a stack
|
||||
- cloudformation_facts:
|
||||
stack_name: my-cloudformation-stack
|
||||
|
||||
# Facts are published in ansible_facts['cloudformation'][<stack_name>]
|
||||
- debug:
|
||||
msg: '{{ ansible_facts['cloudformation']['my-cloudformation-stack'] }}'
|
||||
|
||||
# Get all stack information about a stack
|
||||
- cloudformation_facts:
|
||||
stack_name: my-cloudformation-stack
|
||||
all_facts: true
|
||||
|
||||
# Get stack resource and stack policy information about a stack
|
||||
- cloudformation_facts:
|
||||
stack_name: my-cloudformation-stack
|
||||
stack_resources: true
|
||||
stack_policy: true
|
||||
|
||||
# Example dictionary outputs for stack_outputs, stack_parameters and stack_resources:
|
||||
"stack_outputs": {
|
||||
"ApplicationDatabaseName": "dazvlpr01xj55a.ap-southeast-2.rds.amazonaws.com",
|
||||
...
|
||||
},
|
||||
"stack_parameters": {
|
||||
"DatabaseEngine": "mysql",
|
||||
"DatabasePassword": "****",
|
||||
...
|
||||
},
|
||||
"stack_resources": {
|
||||
"AutoscalingGroup": "dev-someapp-AutoscalingGroup-1SKEXXBCAN0S7",
|
||||
"AutoscalingSecurityGroup": "sg-abcd1234",
|
||||
"ApplicationDatabase": "dazvlpr01xj55a",
|
||||
"EcsTaskDefinition": "arn:aws:ecs:ap-southeast-2:123456789:task-definition/dev-someapp-EcsTaskDefinition-1F2VM9QB0I7K9:1"
|
||||
...
|
||||
}
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
stack_description:
|
||||
description: Summary facts about the stack
|
||||
returned: always
|
||||
type: dict
|
||||
stack_outputs:
|
||||
description: Dictionary of stack outputs keyed by the value of each output 'OutputKey' parameter and corresponding value of each output 'OutputValue' parameter
|
||||
returned: always
|
||||
type: dict
|
||||
stack_parameters:
|
||||
description: Dictionary of stack parameters keyed by the value of each parameter 'ParameterKey' parameter and corresponding value of each parameter 'ParameterValue' parameter
|
||||
returned: always
|
||||
type: dict
|
||||
stack_events:
|
||||
description: All stack events for the stack
|
||||
returned: only if all_facts or stack_events is true
|
||||
type: list of events
|
||||
stack_policy:
|
||||
description: Describes the stack policy for the stack
|
||||
returned: only if all_facts or stack_policy is true
|
||||
type: dict
|
||||
stack_template:
|
||||
description: Describes the stack template for the stack
|
||||
returned: only if all_facts or stack_template is true
|
||||
type: dict
|
||||
stack_resource_list:
|
||||
description: Describes stack resources for the stack
|
||||
returned: only if all_facts or stack_resourses is true
|
||||
type: list of resources
|
||||
stack_resources:
|
||||
description: Dictionary of stack resources keyed by the value of each resource 'LogicalResourceId' parameter and corresponding value of each resource 'PhysicalResourceId' parameter
|
||||
returned: only if all_facts or stack_resourses is true
|
||||
type: dict
|
||||
'''
|
||||
|
||||
try:
|
||||
import boto3
|
||||
import botocore
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
from ansible.module_utils.ec2 import get_aws_connection_info, ec2_argument_spec
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from functools import partial
|
||||
import json
|
||||
import traceback
|
||||
|
||||
class CloudFormationServiceManager:
|
||||
"""Handles CloudFormation Services"""
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
|
||||
try:
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
|
||||
self.client = boto3_conn(module, conn_type='client',
|
||||
resource='cloudformation', region=region,
|
||||
endpoint=ec2_url, **aws_connect_kwargs)
|
||||
except botocore.exceptions.NoRegionError:
|
||||
self.module.fail_json(msg="Region must be specified as a parameter, in AWS_DEFAULT_REGION environment variable or in boto configuration file")
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Can't establish connection - " + str(e), exception=traceback.format_exc(e))
|
||||
|
||||
def describe_stack(self, stack_name):
|
||||
try:
|
||||
func = partial(self.client.describe_stacks,StackName=stack_name)
|
||||
response = self.paginated_response(func, 'Stacks')
|
||||
if response:
|
||||
return response[0]
|
||||
self.module.fail_json(msg="Error describing stack - an empty response was returned")
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Error describing stack - " + str(e), exception=traceback.format_exc(e))
|
||||
|
||||
def list_stack_resources(self, stack_name):
|
||||
try:
|
||||
func = partial(self.client.list_stack_resources,StackName=stack_name)
|
||||
return self.paginated_response(func, 'StackResourceSummaries')
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Error listing stack resources - " + str(e), exception=traceback.format_exc(e))
|
||||
|
||||
def describe_stack_events(self, stack_name):
|
||||
try:
|
||||
func = partial(self.client.describe_stack_events,StackName=stack_name)
|
||||
return self.paginated_response(func, 'StackEvents')
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Error describing stack events - " + str(e), exception=traceback.format_exc(e))
|
||||
|
||||
def get_stack_policy(self, stack_name):
|
||||
try:
|
||||
response = self.client.get_stack_policy(StackName=stack_name)
|
||||
stack_policy = response.get('StackPolicyBody')
|
||||
if stack_policy:
|
||||
return json.loads(stack_policy)
|
||||
return dict()
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Error getting stack policy - " + str(e), exception=traceback.format_exc(e))
|
||||
|
||||
def get_template(self, stack_name):
|
||||
try:
|
||||
response = self.client.get_template(StackName=stack_name)
|
||||
return response.get('TemplateBody')
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Error getting stack template - " + str(e), exception=traceback.format_exc(e))
|
||||
|
||||
def paginated_response(self, func, result_key, next_token=None):
|
||||
'''
|
||||
Returns expanded response for paginated operations.
|
||||
The 'result_key' is used to define the concatenated results that are combined from each paginated response.
|
||||
'''
|
||||
args=dict()
|
||||
if next_token:
|
||||
args['NextToken'] = next_token
|
||||
response = func(**args)
|
||||
result = response.get(result_key)
|
||||
next_token = response.get('NextToken')
|
||||
if not next_token:
|
||||
return result
|
||||
return result + self.paginated_response(func, result_key, next_token)
|
||||
|
||||
def to_dict(items, key, value):
|
||||
''' Transforms a list of items to a Key/Value dictionary '''
|
||||
if items:
|
||||
return dict(zip([i[key] for i in items], [i[value] for i in items]))
|
||||
else:
|
||||
return dict()
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
stack_name=dict(required=True, type='str' ),
|
||||
all_facts=dict(required=False, default=False, type='bool'),
|
||||
stack_policy=dict(required=False, default=False, type='bool'),
|
||||
stack_events=dict(required=False, default=False, type='bool'),
|
||||
stack_resources=dict(required=False, default=False, type='bool'),
|
||||
stack_template=dict(required=False, default=False, type='bool'),
|
||||
))
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='boto3 is required.')
|
||||
|
||||
# Describe the stack
|
||||
service_mgr = CloudFormationServiceManager(module)
|
||||
stack_name = module.params.get('stack_name')
|
||||
result = {
|
||||
'ansible_facts': { 'cloudformation': { stack_name:{} } }
|
||||
}
|
||||
facts = result['ansible_facts']['cloudformation'][stack_name]
|
||||
facts['stack_description'] = service_mgr.describe_stack(stack_name)
|
||||
|
||||
# Create stack output and stack parameter dictionaries
|
||||
if facts['stack_description']:
|
||||
facts['stack_outputs'] = to_dict(facts['stack_description'].get('Outputs'), 'OutputKey', 'OutputValue')
|
||||
facts['stack_parameters'] = to_dict(facts['stack_description'].get('Parameters'), 'ParameterKey', 'ParameterValue')
|
||||
|
||||
# normalize stack description API output
|
||||
facts['stack_description'] = camel_dict_to_snake_dict(facts['stack_description'])
|
||||
# camel2snake doesn't handle NotificationARNs properly, so let's fix that
|
||||
facts['stack_description']['notification_arns'] = facts['stack_description'].pop('notification_ar_ns', [])
|
||||
|
||||
# Create optional stack outputs
|
||||
all_facts = module.params.get('all_facts')
|
||||
if all_facts or module.params.get('stack_resources'):
|
||||
facts['stack_resource_list'] = service_mgr.list_stack_resources(stack_name)
|
||||
facts['stack_resources'] = to_dict(facts.get('stack_resource_list'), 'LogicalResourceId', 'PhysicalResourceId')
|
||||
if all_facts or module.params.get('stack_template'):
|
||||
facts['stack_template'] = service_mgr.get_template(stack_name)
|
||||
if all_facts or module.params.get('stack_policy'):
|
||||
facts['stack_policy'] = service_mgr.get_stack_policy(stack_name)
|
||||
if all_facts or module.params.get('stack_events'):
|
||||
facts['stack_events'] = service_mgr.describe_stack_events(stack_name)
|
||||
|
||||
result['changed'] = False
|
||||
module.exit_json(**result)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
245
lib/ansible/modules/cloud/amazon/cloudtrail.py
Normal file
245
lib/ansible/modules/cloud/amazon/cloudtrail.py
Normal file
@@ -0,0 +1,245 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: cloudtrail
|
||||
short_description: manage CloudTrail creation and deletion
|
||||
description:
|
||||
- Creates or deletes CloudTrail configuration. Ensures logging is also enabled.
|
||||
version_added: "2.0"
|
||||
author:
|
||||
- "Ansible Core Team"
|
||||
- "Ted Timmons"
|
||||
requirements:
|
||||
- "boto >= 2.21"
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- add or remove CloudTrail configuration.
|
||||
required: true
|
||||
choices: ['enabled', 'disabled']
|
||||
name:
|
||||
description:
|
||||
- name for given CloudTrail configuration.
|
||||
- This is a primary key and is used to identify the configuration.
|
||||
s3_bucket_prefix:
|
||||
description:
|
||||
- bucket to place CloudTrail in.
|
||||
- this bucket should exist and have the proper policy. See U(http://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_bucket_policy.html)
|
||||
- required when state=enabled.
|
||||
required: false
|
||||
s3_key_prefix:
|
||||
description:
|
||||
- prefix to keys in bucket. A trailing slash is not necessary and will be removed.
|
||||
required: false
|
||||
include_global_events:
|
||||
description:
|
||||
- record API calls from global services such as IAM and STS?
|
||||
required: false
|
||||
default: false
|
||||
choices: ["true", "false"]
|
||||
|
||||
aws_secret_key:
|
||||
description:
|
||||
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
|
||||
required: false
|
||||
default: null
|
||||
aliases: [ 'ec2_secret_key', 'secret_key' ]
|
||||
version_added: "1.5"
|
||||
aws_access_key:
|
||||
description:
|
||||
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
|
||||
required: false
|
||||
default: null
|
||||
aliases: [ 'ec2_access_key', 'access_key' ]
|
||||
version_added: "1.5"
|
||||
region:
|
||||
description:
|
||||
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
|
||||
required: false
|
||||
aliases: ['aws_region', 'ec2_region']
|
||||
version_added: "1.5"
|
||||
|
||||
extends_documentation_fragment: aws
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
- name: enable cloudtrail
|
||||
local_action: cloudtrail
|
||||
state: enabled
|
||||
name: main
|
||||
s3_bucket_name: ourbucket
|
||||
s3_key_prefix: cloudtrail
|
||||
region: us-east-1
|
||||
|
||||
- name: enable cloudtrail with different configuration
|
||||
local_action: cloudtrail
|
||||
state: enabled
|
||||
name: main
|
||||
s3_bucket_name: ourbucket2
|
||||
s3_key_prefix: ''
|
||||
region: us-east-1
|
||||
|
||||
- name: remove cloudtrail
|
||||
local_action: cloudtrail
|
||||
state: disabled
|
||||
name: main
|
||||
region: us-east-1
|
||||
"""
|
||||
|
||||
HAS_BOTO = False
|
||||
try:
|
||||
import boto
|
||||
import boto.cloudtrail
|
||||
from boto.regioninfo import RegionInfo
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import connect_to_aws, ec2_argument_spec, get_ec2_creds
|
||||
|
||||
|
||||
class CloudTrailManager:
|
||||
"""Handles cloudtrail configuration"""
|
||||
|
||||
def __init__(self, module, region=None, **aws_connect_params):
|
||||
self.module = module
|
||||
self.region = region
|
||||
self.aws_connect_params = aws_connect_params
|
||||
self.changed = False
|
||||
|
||||
try:
|
||||
self.conn = connect_to_aws(boto.cloudtrail, self.region, **self.aws_connect_params)
|
||||
except boto.exception.NoAuthHandlerFound as e:
|
||||
self.module.fail_json(msg=str(e))
|
||||
|
||||
def view_status(self, name):
|
||||
return self.conn.get_trail_status(name)
|
||||
|
||||
def view(self, name):
|
||||
ret = self.conn.describe_trails(trail_name_list=[name])
|
||||
trailList = ret.get('trailList', [])
|
||||
if len(trailList) == 1:
|
||||
return trailList[0]
|
||||
return None
|
||||
|
||||
def exists(self, name=None):
|
||||
ret = self.view(name)
|
||||
if ret:
|
||||
return True
|
||||
return False
|
||||
|
||||
def enable_logging(self, name):
|
||||
'''Turn on logging for a cloudtrail that already exists. Throws Exception on error.'''
|
||||
self.conn.start_logging(name)
|
||||
|
||||
|
||||
def enable(self, **create_args):
|
||||
return self.conn.create_trail(**create_args)
|
||||
|
||||
def update(self, **create_args):
|
||||
return self.conn.update_trail(**create_args)
|
||||
|
||||
def delete(self, name):
|
||||
'''Delete a given cloudtrial configuration. Throws Exception on error.'''
|
||||
self.conn.delete_trail(name)
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
state={'required': True, 'choices': ['enabled', 'disabled']},
|
||||
name={'required': True, 'type': 'str'},
|
||||
s3_bucket_name={'required': False, 'type': 'str'},
|
||||
s3_key_prefix={'default': '', 'required': False, 'type': 'str'},
|
||||
include_global_events={'default': True, 'required': False, 'type': 'bool'},
|
||||
))
|
||||
required_together = (['state', 's3_bucket_name'])
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto is required.')
|
||||
|
||||
ec2_url, access_key, secret_key, region = get_ec2_creds(module)
|
||||
aws_connect_params = dict(aws_access_key_id=access_key,
|
||||
aws_secret_access_key=secret_key)
|
||||
|
||||
if not region:
|
||||
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
|
||||
|
||||
ct_name = module.params['name']
|
||||
s3_bucket_name = module.params['s3_bucket_name']
|
||||
# remove trailing slash from the key prefix, really messes up the key structure.
|
||||
s3_key_prefix = module.params['s3_key_prefix'].rstrip('/')
|
||||
|
||||
include_global_events = module.params['include_global_events']
|
||||
|
||||
#if module.params['state'] == 'present' and 'ec2_elbs' not in module.params:
|
||||
# module.fail_json(msg="ELBs are required for registration or viewing")
|
||||
|
||||
cf_man = CloudTrailManager(module, region=region, **aws_connect_params)
|
||||
|
||||
results = { 'changed': False }
|
||||
if module.params['state'] == 'enabled':
|
||||
results['exists'] = cf_man.exists(name=ct_name)
|
||||
if results['exists']:
|
||||
results['view'] = cf_man.view(ct_name)
|
||||
# only update if the values have changed.
|
||||
if results['view']['S3BucketName'] != s3_bucket_name or \
|
||||
results['view'].get('S3KeyPrefix', '') != s3_key_prefix or \
|
||||
results['view']['IncludeGlobalServiceEvents'] != include_global_events:
|
||||
if not module.check_mode:
|
||||
results['update'] = cf_man.update(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix, include_global_service_events=include_global_events)
|
||||
results['changed'] = True
|
||||
else:
|
||||
if not module.check_mode:
|
||||
# doesn't exist. create it.
|
||||
results['enable'] = cf_man.enable(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix, include_global_service_events=include_global_events)
|
||||
results['changed'] = True
|
||||
|
||||
# given cloudtrail should exist now. Enable the logging.
|
||||
results['view_status'] = cf_man.view_status(ct_name)
|
||||
results['was_logging_enabled'] = results['view_status'].get('IsLogging', False)
|
||||
if not results['was_logging_enabled']:
|
||||
if not module.check_mode:
|
||||
cf_man.enable_logging(ct_name)
|
||||
results['logging_enabled'] = True
|
||||
results['changed'] = True
|
||||
|
||||
# delete the cloudtrai
|
||||
elif module.params['state'] == 'disabled':
|
||||
# check to see if it exists before deleting.
|
||||
results['exists'] = cf_man.exists(name=ct_name)
|
||||
if results['exists']:
|
||||
# it exists, so we should delete it and mark changed.
|
||||
if not module.check_mode:
|
||||
cf_man.delete(ct_name)
|
||||
results['changed'] = True
|
||||
|
||||
module.exit_json(**results)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
415
lib/ansible/modules/cloud/amazon/cloudwatchevent_rule.py
Normal file
415
lib/ansible/modules/cloud/amazon/cloudwatchevent_rule.py
Normal file
@@ -0,0 +1,415 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: cloudwatchevent_rule
|
||||
short_description: Manage CloudWatch Event rules and targets
|
||||
description:
|
||||
- This module creates and manages CloudWatch event rules and targets.
|
||||
version_added: "2.2"
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
author: "Jim Dalton (@jsdalton) <jim.dalton@gmail.com>"
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- boto3
|
||||
notes:
|
||||
- A rule must contain at least an I(event_pattern) or I(schedule_expression). A
|
||||
rule can have both an I(event_pattern) and a I(schedule_expression), in which
|
||||
case the rule will trigger on matching events as well as on a schedule.
|
||||
- When specifying targets, I(input) and I(input_path) are mutually-exclusive
|
||||
and optional parameters.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of the rule you are creating, updating or deleting. No spaces
|
||||
or special characters allowed (i.e. must match C([\.\-_A-Za-z0-9]+))
|
||||
required: true
|
||||
schedule_expression:
|
||||
description:
|
||||
- A cron or rate expression that defines the schedule the rule will
|
||||
trigger on. For example, C(cron(0 20 * * ? *)), C(rate(5 minutes))
|
||||
required: false
|
||||
event_pattern:
|
||||
description:
|
||||
- A string pattern (in valid JSON format) that is used to match against
|
||||
incoming events to determine if the rule should be triggered
|
||||
required: false
|
||||
state:
|
||||
description:
|
||||
- Whether the rule is present (and enabled), disabled, or absent
|
||||
choices: ["present", "disabled", "absent"]
|
||||
default: present
|
||||
required: false
|
||||
description:
|
||||
description:
|
||||
- A description of the rule
|
||||
required: false
|
||||
role_arn:
|
||||
description:
|
||||
- The Amazon Resource Name (ARN) of the IAM role associated with the rule
|
||||
required: false
|
||||
targets:
|
||||
description:
|
||||
- "A dictionary array of targets to add to or update for the rule, in the
|
||||
form C({ id: [string], arn: [string], input: [valid JSON string], input_path: [valid JSONPath string] }).
|
||||
I(id) [required] is the unique target assignment ID. I(arn) (required)
|
||||
is the Amazon Resource Name associated with the target. I(input)
|
||||
(optional) is a JSON object that will override the event data when
|
||||
passed to the target. I(input_path) (optional) is a JSONPath string
|
||||
(e.g. C($.detail)) that specifies the part of the event data to be
|
||||
passed to the target. If neither I(input) nor I(input_path) is
|
||||
specified, then the entire event is passed to the target in JSON form."
|
||||
required: false
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- cloudwatchevent_rule:
|
||||
name: MyCronTask
|
||||
schedule_expression: "cron(0 20 * * ? *)"
|
||||
description: Run my scheduled task
|
||||
targets:
|
||||
- id: MyTargetId
|
||||
arn: arn:aws:lambda:us-east-1:123456789012:function:MyFunction
|
||||
|
||||
- cloudwatchevent_rule:
|
||||
name: MyDisabledCronTask
|
||||
schedule_expression: "cron(5 minutes)"
|
||||
description: Run my disabled scheduled task
|
||||
state: disabled
|
||||
targets:
|
||||
- id: MyOtherTargetId
|
||||
arn: arn:aws:lambda:us-east-1:123456789012:function:MyFunction
|
||||
input: '{"foo": "bar"}'
|
||||
|
||||
- cloudwatchevent_rule:
|
||||
name: MyCronTask
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
rule:
|
||||
description: CloudWatch Event rule data
|
||||
returned: success
|
||||
type: dict
|
||||
sample: "{ 'arn': 'arn:aws:events:us-east-1:123456789012:rule/MyCronTask', 'description': 'Run my scheduled task', 'name': 'MyCronTask', 'schedule_expression': 'cron(0 20 * * ? *)', 'state': 'ENABLED' }"
|
||||
targets:
|
||||
description: CloudWatch Event target(s) assigned to the rule
|
||||
returned: success
|
||||
type: list
|
||||
sample: "[{ 'arn': 'arn:aws:lambda:us-east-1:123456789012:function:MyFunction', 'id': 'MyTargetId' }]"
|
||||
'''
|
||||
|
||||
|
||||
class CloudWatchEventRule(object):
|
||||
def __init__(self, module, name, client, schedule_expression=None,
|
||||
event_pattern=None, description=None, role_arn=None):
|
||||
self.name = name
|
||||
self.client = client
|
||||
self.changed = False
|
||||
self.schedule_expression = schedule_expression
|
||||
self.event_pattern = event_pattern
|
||||
self.description = description
|
||||
self.role_arn = role_arn
|
||||
|
||||
def describe(self):
|
||||
"""Returns the existing details of the rule in AWS"""
|
||||
try:
|
||||
rule_info = self.client.describe_rule(Name=self.name)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
error_code = e.response.get('Error', {}).get('Code')
|
||||
if error_code == 'ResourceNotFoundException':
|
||||
return {}
|
||||
raise
|
||||
return self._snakify(rule_info)
|
||||
|
||||
def put(self, enabled=True):
|
||||
"""Creates or updates the rule in AWS"""
|
||||
request = {
|
||||
'Name': self.name,
|
||||
'State': "ENABLED" if enabled else "DISABLED",
|
||||
}
|
||||
if self.schedule_expression:
|
||||
request['ScheduleExpression'] = self.schedule_expression
|
||||
if self.event_pattern:
|
||||
request['EventPattern'] = self.event_pattern
|
||||
if self.description:
|
||||
request['Description'] = self.description
|
||||
if self.role_arn:
|
||||
request['RoleArn'] = self.role_arn
|
||||
response = self.client.put_rule(**request)
|
||||
self.changed = True
|
||||
return response
|
||||
|
||||
def delete(self):
|
||||
"""Deletes the rule in AWS"""
|
||||
self.remove_all_targets()
|
||||
response = self.client.delete_rule(Name=self.name)
|
||||
self.changed = True
|
||||
return response
|
||||
|
||||
def enable(self):
|
||||
"""Enables the rule in AWS"""
|
||||
response = self.client.enable_rule(Name=self.name)
|
||||
self.changed = True
|
||||
return response
|
||||
|
||||
def disable(self):
|
||||
"""Disables the rule in AWS"""
|
||||
response = self.client.disable_rule(Name=self.name)
|
||||
self.changed = True
|
||||
return response
|
||||
|
||||
def list_targets(self):
|
||||
"""Lists the existing targets for the rule in AWS"""
|
||||
try:
|
||||
targets = self.client.list_targets_by_rule(Rule=self.name)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
error_code = e.response.get('Error', {}).get('Code')
|
||||
if error_code == 'ResourceNotFoundException':
|
||||
return []
|
||||
raise
|
||||
return self._snakify(targets)['targets']
|
||||
|
||||
def put_targets(self, targets):
|
||||
"""Creates or updates the provided targets on the rule in AWS"""
|
||||
if not targets:
|
||||
return
|
||||
request = {
|
||||
'Rule': self.name,
|
||||
'Targets': self._targets_request(targets),
|
||||
}
|
||||
response = self.client.put_targets(**request)
|
||||
self.changed = True
|
||||
return response
|
||||
|
||||
def remove_targets(self, target_ids):
|
||||
"""Removes the provided targets from the rule in AWS"""
|
||||
if not target_ids:
|
||||
return
|
||||
request = {
|
||||
'Rule': self.name,
|
||||
'Ids': target_ids
|
||||
}
|
||||
response = self.client.remove_targets(**request)
|
||||
self.changed = True
|
||||
return response
|
||||
|
||||
def remove_all_targets(self):
|
||||
"""Removes all targets on rule"""
|
||||
targets = self.list_targets()
|
||||
return self.remove_targets([t['id'] for t in targets])
|
||||
|
||||
def _targets_request(self, targets):
|
||||
"""Formats each target for the request"""
|
||||
targets_request = []
|
||||
for target in targets:
|
||||
target_request = {
|
||||
'Id': target['id'],
|
||||
'Arn': target['arn']
|
||||
}
|
||||
if 'input' in target:
|
||||
target_request['Input'] = target['input']
|
||||
if 'input_path' in target:
|
||||
target_request['InputPath'] = target['input_path']
|
||||
targets_request.append(target_request)
|
||||
return targets_request
|
||||
|
||||
def _snakify(self, dict):
|
||||
"""Converts cammel case to snake case"""
|
||||
return camel_dict_to_snake_dict(dict)
|
||||
|
||||
|
||||
class CloudWatchEventRuleManager(object):
|
||||
RULE_FIELDS = ['name', 'event_pattern', 'schedule_expression', 'description', 'role_arn']
|
||||
|
||||
def __init__(self, rule, targets):
|
||||
self.rule = rule
|
||||
self.targets = targets
|
||||
|
||||
def ensure_present(self, enabled=True):
|
||||
"""Ensures the rule and targets are present and synced"""
|
||||
rule_description = self.rule.describe()
|
||||
if rule_description:
|
||||
# Rule exists so update rule, targets and state
|
||||
self._sync_rule(enabled)
|
||||
self._sync_targets()
|
||||
self._sync_state(enabled)
|
||||
else:
|
||||
# Rule does not exist, so create new rule and targets
|
||||
self._create(enabled)
|
||||
|
||||
def ensure_disabled(self):
|
||||
"""Ensures the rule and targets are present, but disabled, and synced"""
|
||||
self.ensure_present(enabled=False)
|
||||
|
||||
def ensure_absent(self):
|
||||
"""Ensures the rule and targets are absent"""
|
||||
rule_description = self.rule.describe()
|
||||
if not rule_description:
|
||||
# Rule doesn't exist so don't need to delete
|
||||
return
|
||||
self.rule.delete()
|
||||
|
||||
def fetch_aws_state(self):
|
||||
"""Retrieves rule and target state from AWS"""
|
||||
aws_state = {
|
||||
'rule': {},
|
||||
'targets': [],
|
||||
'changed': self.rule.changed
|
||||
}
|
||||
rule_description = self.rule.describe()
|
||||
if not rule_description:
|
||||
return aws_state
|
||||
|
||||
# Don't need to include response metadata noise in response
|
||||
del rule_description['response_metadata']
|
||||
|
||||
aws_state['rule'] = rule_description
|
||||
aws_state['targets'].extend(self.rule.list_targets())
|
||||
return aws_state
|
||||
|
||||
def _sync_rule(self, enabled=True):
|
||||
"""Syncs local rule state with AWS"""
|
||||
if not self._rule_matches_aws():
|
||||
self.rule.put(enabled)
|
||||
|
||||
def _sync_targets(self):
|
||||
"""Syncs local targets with AWS"""
|
||||
# Identify and remove extraneous targets on AWS
|
||||
target_ids_to_remove = self._remote_target_ids_to_remove()
|
||||
if target_ids_to_remove:
|
||||
self.rule.remove_targets(target_ids_to_remove)
|
||||
|
||||
# Identify targets that need to be added or updated on AWS
|
||||
targets_to_put = self._targets_to_put()
|
||||
if targets_to_put:
|
||||
self.rule.put_targets(targets_to_put)
|
||||
|
||||
def _sync_state(self, enabled=True):
|
||||
"""Syncs local rule state with AWS"""
|
||||
remote_state = self._remote_state()
|
||||
if enabled and remote_state != 'ENABLED':
|
||||
self.rule.enable()
|
||||
elif not enabled and remote_state != 'DISABLED':
|
||||
self.rule.disable()
|
||||
|
||||
def _create(self, enabled=True):
|
||||
"""Creates rule and targets on AWS"""
|
||||
self.rule.put(enabled)
|
||||
self.rule.put_targets(self.targets)
|
||||
|
||||
def _rule_matches_aws(self):
|
||||
"""Checks if the local rule data matches AWS"""
|
||||
aws_rule_data = self.rule.describe()
|
||||
|
||||
# The rule matches AWS only if all rule data fields are equal
|
||||
# to their corresponding local value defined in the task
|
||||
return all([
|
||||
getattr(self.rule, field) == aws_rule_data.get(field, None)
|
||||
for field in self.RULE_FIELDS
|
||||
])
|
||||
|
||||
def _targets_to_put(self):
|
||||
"""Returns a list of targets that need to be updated or added remotely"""
|
||||
remote_targets = self.rule.list_targets()
|
||||
return [t for t in self.targets if t not in remote_targets]
|
||||
|
||||
def _remote_target_ids_to_remove(self):
|
||||
"""Returns a list of targets that need to be removed remotely"""
|
||||
target_ids = [t['id'] for t in self.targets]
|
||||
remote_targets = self.rule.list_targets()
|
||||
return [
|
||||
rt['id'] for rt in remote_targets if rt['id'] not in target_ids
|
||||
]
|
||||
|
||||
def _remote_state(self):
|
||||
"""Returns the remote state from AWS"""
|
||||
description = self.rule.describe()
|
||||
if not description:
|
||||
return
|
||||
return description['state']
|
||||
|
||||
|
||||
def get_cloudwatchevents_client(module):
|
||||
"""Returns a boto3 client for accessing CloudWatch Events"""
|
||||
try:
|
||||
region, ec2_url, aws_conn_kwargs = get_aws_connection_info(module,
|
||||
boto3=True)
|
||||
if not region:
|
||||
module.fail_json(msg="Region must be specified as a parameter, in \
|
||||
EC2_REGION or AWS_REGION environment variables \
|
||||
or in boto configuration file")
|
||||
return boto3_conn(module, conn_type='client',
|
||||
resource='events',
|
||||
region=region, endpoint=ec2_url,
|
||||
**aws_conn_kwargs)
|
||||
except boto3.exception.NoAuthHandlerFound as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
name = dict(required=True),
|
||||
schedule_expression = dict(),
|
||||
event_pattern = dict(),
|
||||
state = dict(choices=['present', 'disabled', 'absent'],
|
||||
default='present'),
|
||||
description = dict(),
|
||||
role_arn = dict(),
|
||||
targets = dict(type='list', default=[]),
|
||||
))
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='boto3 required for this module')
|
||||
|
||||
rule_data = dict(
|
||||
[(rf, module.params.get(rf)) for rf in CloudWatchEventRuleManager.RULE_FIELDS]
|
||||
)
|
||||
targets = module.params.get('targets')
|
||||
state = module.params.get('state')
|
||||
|
||||
cwe_rule = CloudWatchEventRule(module,
|
||||
client=get_cloudwatchevents_client(module),
|
||||
**rule_data)
|
||||
cwe_rule_manager = CloudWatchEventRuleManager(cwe_rule, targets)
|
||||
|
||||
if state == 'present':
|
||||
cwe_rule_manager.ensure_present()
|
||||
elif state == 'disabled':
|
||||
cwe_rule_manager.ensure_disabled()
|
||||
elif state == 'absent':
|
||||
cwe_rule_manager.ensure_absent()
|
||||
else:
|
||||
module.fail_json(msg="Invalid state '{0}' provided".format(state))
|
||||
|
||||
module.exit_json(**cwe_rule_manager.fetch_aws_state())
|
||||
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
422
lib/ansible/modules/cloud/amazon/dynamodb_table.py
Normal file
422
lib/ansible/modules/cloud/amazon/dynamodb_table.py
Normal file
@@ -0,0 +1,422 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: dynamodb_table
|
||||
short_description: Create, update or delete AWS Dynamo DB tables.
|
||||
version_added: "2.0"
|
||||
description:
|
||||
- Create or delete AWS Dynamo DB tables.
|
||||
- Can update the provisioned throughput on existing tables.
|
||||
- Returns the status of the specified table.
|
||||
author: Alan Loi (@loia)
|
||||
requirements:
|
||||
- "boto >= 2.37.0"
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Create or delete the table
|
||||
required: false
|
||||
choices: ['present', 'absent']
|
||||
default: 'present'
|
||||
name:
|
||||
description:
|
||||
- Name of the table.
|
||||
required: true
|
||||
hash_key_name:
|
||||
description:
|
||||
- Name of the hash key.
|
||||
- Required when C(state=present).
|
||||
required: false
|
||||
default: null
|
||||
hash_key_type:
|
||||
description:
|
||||
- Type of the hash key.
|
||||
required: false
|
||||
choices: ['STRING', 'NUMBER', 'BINARY']
|
||||
default: 'STRING'
|
||||
range_key_name:
|
||||
description:
|
||||
- Name of the range key.
|
||||
required: false
|
||||
default: null
|
||||
range_key_type:
|
||||
description:
|
||||
- Type of the range key.
|
||||
required: false
|
||||
choices: ['STRING', 'NUMBER', 'BINARY']
|
||||
default: 'STRING'
|
||||
read_capacity:
|
||||
description:
|
||||
- Read throughput capacity (units) to provision.
|
||||
required: false
|
||||
default: 1
|
||||
write_capacity:
|
||||
description:
|
||||
- Write throughput capacity (units) to provision.
|
||||
required: false
|
||||
default: 1
|
||||
indexes:
|
||||
description:
|
||||
- list of dictionaries describing indexes to add to the table. global indexes can be updated. local indexes don't support updates or have throughput.
|
||||
- "required options: ['name', 'type', 'hash_key_name']"
|
||||
- "valid types: ['all', 'global_all', 'global_include', 'global_keys_only', 'include', 'keys_only']"
|
||||
- "other options: ['hash_key_type', 'range_key_name', 'range_key_type', 'includes', 'read_capacity', 'write_capacity']"
|
||||
required: false
|
||||
default: []
|
||||
version_added: "2.1"
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
"""
|
||||
|
||||
EXAMPLES = '''
|
||||
# Create dynamo table with hash and range primary key
|
||||
- dynamodb_table:
|
||||
name: my-table
|
||||
region: us-east-1
|
||||
hash_key_name: id
|
||||
hash_key_type: STRING
|
||||
range_key_name: create_time
|
||||
range_key_type: NUMBER
|
||||
read_capacity: 2
|
||||
write_capacity: 2
|
||||
|
||||
# Update capacity on existing dynamo table
|
||||
- dynamodb_table:
|
||||
name: my-table
|
||||
region: us-east-1
|
||||
read_capacity: 10
|
||||
write_capacity: 10
|
||||
|
||||
# set index on existing dynamo table
|
||||
- dynamodb_table:
|
||||
name: my-table
|
||||
region: us-east-1
|
||||
indexes:
|
||||
- name: NamedIndex
|
||||
type: global_include
|
||||
hash_key_name: id
|
||||
range_key_name: create_time
|
||||
includes:
|
||||
- other_field
|
||||
- other_field2
|
||||
read_capacity: 10
|
||||
write_capacity: 10
|
||||
|
||||
# Delete dynamo table
|
||||
- dynamodb_table:
|
||||
name: my-table
|
||||
region: us-east-1
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
table_status:
|
||||
description: The current status of the table.
|
||||
returned: success
|
||||
type: string
|
||||
sample: ACTIVE
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
try:
|
||||
import boto
|
||||
import boto.dynamodb2
|
||||
from boto.dynamodb2.table import Table
|
||||
from boto.dynamodb2.fields import HashKey, RangeKey, AllIndex, GlobalAllIndex, GlobalIncludeIndex, GlobalKeysOnlyIndex, IncludeIndex, KeysOnlyIndex
|
||||
from boto.dynamodb2.types import STRING, NUMBER, BINARY
|
||||
from boto.exception import BotoServerError, NoAuthHandlerFound, JSONResponseError
|
||||
from boto.dynamodb2.exceptions import ValidationException
|
||||
HAS_BOTO = True
|
||||
|
||||
DYNAMO_TYPE_MAP = {
|
||||
'STRING': STRING,
|
||||
'NUMBER': NUMBER,
|
||||
'BINARY': BINARY
|
||||
}
|
||||
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
|
||||
|
||||
|
||||
DYNAMO_TYPE_DEFAULT = 'STRING'
|
||||
INDEX_REQUIRED_OPTIONS = ['name', 'type', 'hash_key_name']
|
||||
INDEX_OPTIONS = INDEX_REQUIRED_OPTIONS + ['hash_key_type', 'range_key_name', 'range_key_type', 'includes', 'read_capacity', 'write_capacity']
|
||||
INDEX_TYPE_OPTIONS = ['all', 'global_all', 'global_include', 'global_keys_only', 'include', 'keys_only']
|
||||
|
||||
|
||||
def create_or_update_dynamo_table(connection, module):
|
||||
table_name = module.params.get('name')
|
||||
hash_key_name = module.params.get('hash_key_name')
|
||||
hash_key_type = module.params.get('hash_key_type')
|
||||
range_key_name = module.params.get('range_key_name')
|
||||
range_key_type = module.params.get('range_key_type')
|
||||
read_capacity = module.params.get('read_capacity')
|
||||
write_capacity = module.params.get('write_capacity')
|
||||
all_indexes = module.params.get('indexes')
|
||||
|
||||
for index in all_indexes:
|
||||
validate_index(index, module)
|
||||
|
||||
schema = get_schema_param(hash_key_name, hash_key_type, range_key_name, range_key_type)
|
||||
|
||||
throughput = {
|
||||
'read': read_capacity,
|
||||
'write': write_capacity
|
||||
}
|
||||
|
||||
indexes, global_indexes = get_indexes(all_indexes)
|
||||
|
||||
result = dict(
|
||||
region=module.params.get('region'),
|
||||
table_name=table_name,
|
||||
hash_key_name=hash_key_name,
|
||||
hash_key_type=hash_key_type,
|
||||
range_key_name=range_key_name,
|
||||
range_key_type=range_key_type,
|
||||
read_capacity=read_capacity,
|
||||
write_capacity=write_capacity,
|
||||
indexes=all_indexes,
|
||||
)
|
||||
|
||||
try:
|
||||
table = Table(table_name, connection=connection)
|
||||
|
||||
|
||||
if dynamo_table_exists(table):
|
||||
result['changed'] = update_dynamo_table(table, throughput=throughput, check_mode=module.check_mode, global_indexes=global_indexes)
|
||||
else:
|
||||
if not module.check_mode:
|
||||
Table.create(table_name, connection=connection, schema=schema, throughput=throughput, indexes=indexes, global_indexes=global_indexes)
|
||||
result['changed'] = True
|
||||
|
||||
if not module.check_mode:
|
||||
result['table_status'] = table.describe()['Table']['TableStatus']
|
||||
|
||||
except BotoServerError:
|
||||
result['msg'] = 'Failed to create/update dynamo table due to error: ' + traceback.format_exc()
|
||||
module.fail_json(**result)
|
||||
else:
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
def delete_dynamo_table(connection, module):
|
||||
table_name = module.params.get('name')
|
||||
|
||||
result = dict(
|
||||
region=module.params.get('region'),
|
||||
table_name=table_name,
|
||||
)
|
||||
|
||||
try:
|
||||
table = Table(table_name, connection=connection)
|
||||
|
||||
if dynamo_table_exists(table):
|
||||
if not module.check_mode:
|
||||
table.delete()
|
||||
result['changed'] = True
|
||||
|
||||
else:
|
||||
result['changed'] = False
|
||||
|
||||
except BotoServerError:
|
||||
result['msg'] = 'Failed to delete dynamo table due to error: ' + traceback.format_exc()
|
||||
module.fail_json(**result)
|
||||
else:
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
def dynamo_table_exists(table):
|
||||
try:
|
||||
table.describe()
|
||||
return True
|
||||
|
||||
except JSONResponseError as e:
|
||||
if e.message and e.message.startswith('Requested resource not found'):
|
||||
return False
|
||||
else:
|
||||
raise e
|
||||
|
||||
|
||||
def update_dynamo_table(table, throughput=None, check_mode=False, global_indexes=None):
|
||||
table.describe() # populate table details
|
||||
throughput_changed = False
|
||||
global_indexes_changed = False
|
||||
if has_throughput_changed(table, throughput):
|
||||
if not check_mode:
|
||||
throughput_changed = table.update(throughput=throughput)
|
||||
else:
|
||||
throughput_changed = True
|
||||
|
||||
removed_indexes, added_indexes, index_throughput_changes = get_changed_global_indexes(table, global_indexes)
|
||||
if removed_indexes:
|
||||
if not check_mode:
|
||||
for name, index in removed_indexes.iteritems():
|
||||
global_indexes_changed = table.delete_global_secondary_index(name) or global_indexes_changed
|
||||
else:
|
||||
global_indexes_changed = True
|
||||
|
||||
if added_indexes:
|
||||
if not check_mode:
|
||||
for name, index in added_indexes.iteritems():
|
||||
global_indexes_changed = table.create_global_secondary_index(global_index=index) or global_indexes_changed
|
||||
else:
|
||||
global_indexes_changed = True
|
||||
|
||||
if index_throughput_changes:
|
||||
if not check_mode:
|
||||
# todo: remove try once boto has https://github.com/boto/boto/pull/3447 fixed
|
||||
try:
|
||||
global_indexes_changed = table.update_global_secondary_index(global_indexes=index_throughput_changes) or global_indexes_changed
|
||||
except ValidationException:
|
||||
pass
|
||||
else:
|
||||
global_indexes_changed = True
|
||||
|
||||
return throughput_changed or global_indexes_changed
|
||||
|
||||
|
||||
def has_throughput_changed(table, new_throughput):
|
||||
if not new_throughput:
|
||||
return False
|
||||
|
||||
return new_throughput['read'] != table.throughput['read'] or \
|
||||
new_throughput['write'] != table.throughput['write']
|
||||
|
||||
|
||||
def get_schema_param(hash_key_name, hash_key_type, range_key_name, range_key_type):
|
||||
if range_key_name:
|
||||
schema = [
|
||||
HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT])),
|
||||
RangeKey(range_key_name, DYNAMO_TYPE_MAP.get(range_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT]))
|
||||
]
|
||||
else:
|
||||
schema = [
|
||||
HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT]))
|
||||
]
|
||||
return schema
|
||||
|
||||
|
||||
def get_changed_global_indexes(table, global_indexes):
|
||||
table.describe()
|
||||
|
||||
table_index_info = dict((index.name, index.schema()) for index in table.global_indexes)
|
||||
table_index_objects = dict((index.name, index) for index in table.global_indexes)
|
||||
set_index_info = dict((index.name, index.schema()) for index in global_indexes)
|
||||
set_index_objects = dict((index.name, index) for index in global_indexes)
|
||||
|
||||
removed_indexes = dict((name, index) for name, index in table_index_info.iteritems() if name not in set_index_info)
|
||||
added_indexes = dict((name, set_index_objects[name]) for name, index in set_index_info.iteritems() if name not in table_index_info)
|
||||
# todo: uncomment once boto has https://github.com/boto/boto/pull/3447 fixed
|
||||
# index_throughput_changes = dict((name, index.throughput) for name, index in set_index_objects.iteritems() if name not in added_indexes and (index.throughput['read'] != str(table_index_objects[name].throughput['read']) or index.throughput['write'] != str(table_index_objects[name].throughput['write'])))
|
||||
# todo: remove once boto has https://github.com/boto/boto/pull/3447 fixed
|
||||
index_throughput_changes = dict((name, index.throughput) for name, index in set_index_objects.iteritems() if name not in added_indexes)
|
||||
|
||||
return removed_indexes, added_indexes, index_throughput_changes
|
||||
|
||||
|
||||
def validate_index(index, module):
|
||||
for key, val in index.iteritems():
|
||||
if key not in INDEX_OPTIONS:
|
||||
module.fail_json(msg='%s is not a valid option for an index' % key)
|
||||
for required_option in INDEX_REQUIRED_OPTIONS:
|
||||
if required_option not in index:
|
||||
module.fail_json(msg='%s is a required option for an index' % required_option)
|
||||
if index['type'] not in INDEX_TYPE_OPTIONS:
|
||||
module.fail_json(msg='%s is not a valid index type, must be one of %s' % (index['type'], INDEX_TYPE_OPTIONS))
|
||||
|
||||
def get_indexes(all_indexes):
|
||||
indexes = []
|
||||
global_indexes = []
|
||||
for index in all_indexes:
|
||||
name = index['name']
|
||||
schema = get_schema_param(index.get('hash_key_name'), index.get('hash_key_type'), index.get('range_key_name'), index.get('range_key_type'))
|
||||
throughput = {
|
||||
'read': index.get('read_capacity', 1),
|
||||
'write': index.get('write_capacity', 1)
|
||||
}
|
||||
|
||||
if index['type'] == 'all':
|
||||
indexes.append(AllIndex(name, parts=schema))
|
||||
|
||||
elif index['type'] == 'global_all':
|
||||
global_indexes.append(GlobalAllIndex(name, parts=schema, throughput=throughput))
|
||||
|
||||
elif index['type'] == 'global_include':
|
||||
global_indexes.append(GlobalIncludeIndex(name, parts=schema, throughput=throughput, includes=index['includes']))
|
||||
|
||||
elif index['type'] == 'global_keys_only':
|
||||
global_indexes.append(GlobalKeysOnlyIndex(name, parts=schema, throughput=throughput))
|
||||
|
||||
elif index['type'] == 'include':
|
||||
indexes.append(IncludeIndex(name, parts=schema, includes=index['includes']))
|
||||
|
||||
elif index['type'] == 'keys_only':
|
||||
indexes.append(KeysOnlyIndex(name, parts=schema))
|
||||
|
||||
return indexes, global_indexes
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
name=dict(required=True, type='str'),
|
||||
hash_key_name=dict(required=True, type='str'),
|
||||
hash_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']),
|
||||
range_key_name=dict(type='str'),
|
||||
range_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']),
|
||||
read_capacity=dict(default=1, type='int'),
|
||||
write_capacity=dict(default=1, type='int'),
|
||||
indexes=dict(default=[], type='list'),
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
if not region:
|
||||
module.fail_json(msg='region must be specified')
|
||||
|
||||
try:
|
||||
connection = connect_to_aws(boto.dynamodb2, region, **aws_connect_params)
|
||||
except (NoAuthHandlerFound, AnsibleAWSError) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
state = module.params.get('state')
|
||||
if state == 'present':
|
||||
create_or_update_dynamo_table(connection, module)
|
||||
elif state == 'absent':
|
||||
delete_dynamo_table(connection, module)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
259
lib/ansible/modules/cloud/amazon/ec2_ami_copy.py
Normal file
259
lib/ansible/modules/cloud/amazon/ec2_ami_copy.py
Normal file
@@ -0,0 +1,259 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_ami_copy
|
||||
short_description: copies AMI between AWS regions, return new image id
|
||||
description:
|
||||
- Copies AMI from a source region to a destination region. This module has a dependency on python-boto >= 2.5
|
||||
version_added: "2.0"
|
||||
options:
|
||||
source_region:
|
||||
description:
|
||||
- the source region that AMI should be copied from
|
||||
required: true
|
||||
source_image_id:
|
||||
description:
|
||||
- the id of the image in source region that should be copied
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- The name of the new image to copy
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
description:
|
||||
- An optional human-readable string describing the contents and purpose of the new AMI.
|
||||
required: false
|
||||
default: null
|
||||
encrypted:
|
||||
description:
|
||||
- Whether or not to encrypt the target image
|
||||
required: false
|
||||
default: null
|
||||
version_added: "2.2"
|
||||
kms_key_id:
|
||||
description:
|
||||
- KMS key id used to encrypt image. If not specified, uses default EBS Customer Master Key (CMK) for your account.
|
||||
required: false
|
||||
default: null
|
||||
version_added: "2.2"
|
||||
wait:
|
||||
description:
|
||||
- wait for the copied AMI to be in state 'available' before returning.
|
||||
required: false
|
||||
default: "no"
|
||||
choices: [ "yes", "no" ]
|
||||
wait_timeout:
|
||||
description:
|
||||
- how long before wait gives up, in seconds
|
||||
required: false
|
||||
default: 1200
|
||||
tags:
|
||||
description:
|
||||
- a hash/dictionary of tags to add to the new copied AMI; '{"key":"value"}' and '{"key":"value","key":"value"}'
|
||||
required: false
|
||||
default: null
|
||||
|
||||
author: Amir Moulavi <amir.moulavi@gmail.com>
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Basic AMI Copy
|
||||
- ec2_ami_copy:
|
||||
source_region: us-east-1
|
||||
region: eu-west-1
|
||||
source_image_id: ami-xxxxxxx
|
||||
|
||||
# AMI copy wait until available
|
||||
- ec2_ami_copy:
|
||||
source_region: us-east-1
|
||||
region: eu-west-1
|
||||
source_image_id: ami-xxxxxxx
|
||||
wait: yes
|
||||
register: image_id
|
||||
|
||||
# Named AMI copy
|
||||
- ec2_ami_copy:
|
||||
source_region: us-east-1
|
||||
region: eu-west-1
|
||||
source_image_id: ami-xxxxxxx
|
||||
name: My-Awesome-AMI
|
||||
description: latest patch
|
||||
|
||||
# Tagged AMI copy
|
||||
- ec2_ami_copy:
|
||||
source_region: us-east-1
|
||||
region: eu-west-1
|
||||
source_image_id: ami-xxxxxxx
|
||||
tags:
|
||||
Name: My-Super-AMI
|
||||
Patch: 1.2.3
|
||||
|
||||
# Encrypted AMI copy
|
||||
- ec2_ami_copy:
|
||||
source_region: us-east-1
|
||||
region: eu-west-1
|
||||
source_image_id: ami-xxxxxxx
|
||||
encrypted: yes
|
||||
|
||||
# Encrypted AMI copy with specified key
|
||||
- ec2_ami_copy:
|
||||
source_region: us-east-1
|
||||
region: eu-west-1
|
||||
source_image_id: ami-xxxxxxx
|
||||
encrypted: yes
|
||||
kms_key_id: arn:aws:kms:us-east-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b
|
||||
'''
|
||||
|
||||
import time
|
||||
|
||||
try:
|
||||
import boto
|
||||
import boto.ec2
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import ec2_argument_spec, ec2_connect, get_aws_connection_info
|
||||
|
||||
|
||||
def copy_image(module, ec2):
|
||||
"""
|
||||
Copies an AMI
|
||||
|
||||
module : AnsibleModule object
|
||||
ec2: authenticated ec2 connection object
|
||||
"""
|
||||
|
||||
source_region = module.params.get('source_region')
|
||||
source_image_id = module.params.get('source_image_id')
|
||||
name = module.params.get('name')
|
||||
description = module.params.get('description')
|
||||
encrypted = module.params.get('encrypted')
|
||||
kms_key_id = module.params.get('kms_key_id')
|
||||
tags = module.params.get('tags')
|
||||
wait_timeout = int(module.params.get('wait_timeout'))
|
||||
wait = module.params.get('wait')
|
||||
|
||||
try:
|
||||
params = {'source_region': source_region,
|
||||
'source_image_id': source_image_id,
|
||||
'name': name,
|
||||
'description': description,
|
||||
'encrypted': encrypted,
|
||||
'kms_key_id': kms_key_id
|
||||
}
|
||||
|
||||
image_id = ec2.copy_image(**params).image_id
|
||||
except boto.exception.BotoServerError as e:
|
||||
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
|
||||
|
||||
img = wait_until_image_is_recognized(module, ec2, wait_timeout, image_id, wait)
|
||||
|
||||
img = wait_until_image_is_copied(module, ec2, wait_timeout, img, image_id, wait)
|
||||
|
||||
register_tags_if_any(module, ec2, tags, image_id)
|
||||
|
||||
module.exit_json(msg="AMI copy operation complete", image_id=image_id, state=img.state, changed=True)
|
||||
|
||||
|
||||
# register tags to the copied AMI
|
||||
def register_tags_if_any(module, ec2, tags, image_id):
|
||||
if tags:
|
||||
try:
|
||||
ec2.create_tags([image_id], tags)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
# wait here until the image is copied (i.e. the state becomes available
|
||||
def wait_until_image_is_copied(module, ec2, wait_timeout, img, image_id, wait):
|
||||
wait_timeout = time.time() + wait_timeout
|
||||
while wait and wait_timeout > time.time() and (img is None or img.state != 'available'):
|
||||
img = ec2.get_image(image_id)
|
||||
time.sleep(3)
|
||||
if wait and wait_timeout <= time.time():
|
||||
# waiting took too long
|
||||
module.fail_json(msg="timed out waiting for image to be copied")
|
||||
return img
|
||||
|
||||
|
||||
# wait until the image is recognized.
|
||||
def wait_until_image_is_recognized(module, ec2, wait_timeout, image_id, wait):
|
||||
for i in range(wait_timeout):
|
||||
try:
|
||||
return ec2.get_image(image_id)
|
||||
except boto.exception.EC2ResponseError as e:
|
||||
# This exception we expect initially right after registering the copy with EC2 API
|
||||
if 'InvalidAMIID.NotFound' in e.error_code and wait:
|
||||
time.sleep(1)
|
||||
else:
|
||||
# On any other exception we should fail
|
||||
module.fail_json(
|
||||
msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help: " + str(
|
||||
e))
|
||||
else:
|
||||
module.fail_json(msg="timed out waiting for image to be recognized")
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
source_region=dict(required=True),
|
||||
source_image_id=dict(required=True),
|
||||
name=dict(),
|
||||
description=dict(default=""),
|
||||
encrypted=dict(type='bool', required=False),
|
||||
kms_key_id=dict(type='str', required=False),
|
||||
wait=dict(type='bool', default=False),
|
||||
wait_timeout=dict(default=1200),
|
||||
tags=dict(type='dict')))
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
|
||||
try:
|
||||
ec2 = ec2_connect(module)
|
||||
except boto.exception.NoAuthHandlerFound as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
try:
|
||||
region, ec2_url, boto_params = get_aws_connection_info(module)
|
||||
except boto.exception.NoAuthHandlerFound as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
if not region:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
copy_image(module, ec2)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
359
lib/ansible/modules/cloud/amazon/ec2_asg_facts.py
Normal file
359
lib/ansible/modules/cloud/amazon/ec2_asg_facts.py
Normal file
@@ -0,0 +1,359 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_asg_facts
|
||||
short_description: Gather facts about ec2 Auto Scaling Groups (ASGs) in AWS
|
||||
description:
|
||||
- Gather facts about ec2 Auto Scaling Groups (ASGs) in AWS
|
||||
version_added: "2.2"
|
||||
author: "Rob White (@wimnat)"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The prefix or name of the auto scaling group(s) you are searching for.
|
||||
- "Note: This is a regular expression match with implicit '^' (beginning of string). Append '$' for a complete name match."
|
||||
required: false
|
||||
tags:
|
||||
description:
|
||||
- "A dictionary/hash of tags in the format { tag1_name: 'tag1_value', tag2_name: 'tag2_value' } to match against the auto scaling group(s) you are searching for."
|
||||
required: false
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
# Find all groups
|
||||
- ec2_asg_facts:
|
||||
register: asgs
|
||||
|
||||
# Find a group with matching name/prefix
|
||||
- ec2_asg_facts:
|
||||
name: public-webserver-asg
|
||||
register: asgs
|
||||
|
||||
# Find a group with matching tags
|
||||
- ec2_asg_facts:
|
||||
tags:
|
||||
project: webapp
|
||||
env: production
|
||||
register: asgs
|
||||
|
||||
# Find a group with matching name/prefix and tags
|
||||
- ec2_asg_facts:
|
||||
name: myproject
|
||||
tags:
|
||||
env: production
|
||||
register: asgs
|
||||
|
||||
# Fail if no groups are found
|
||||
- ec2_asg_facts:
|
||||
name: public-webserver-asg
|
||||
register: asgs
|
||||
failed_when: "{{ asgs.results | length == 0 }}"
|
||||
|
||||
# Fail if more than 1 group is found
|
||||
- ec2_asg_facts:
|
||||
name: public-webserver-asg
|
||||
register: asgs
|
||||
failed_when: "{{ asgs.results | length > 1 }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
---
|
||||
auto_scaling_group_arn:
|
||||
description: The Amazon Resource Name of the ASG
|
||||
returned: success
|
||||
type: string
|
||||
sample: "arn:aws:autoscaling:us-west-2:1234567890:autoScalingGroup:10787c52-0bcb-427d-82ba-c8e4b008ed2e:autoScalingGroupName/public-webapp-production-1"
|
||||
auto_scaling_group_name:
|
||||
description: Name of autoscaling group
|
||||
returned: success
|
||||
type: str
|
||||
sample: "public-webapp-production-1"
|
||||
availability_zones:
|
||||
description: List of Availability Zones that are enabled for this ASG.
|
||||
returned: success
|
||||
type: list
|
||||
sample: ["us-west-2a", "us-west-2b", "us-west-2a"]
|
||||
created_time:
|
||||
description: The date and time this ASG was created, in ISO 8601 format.
|
||||
returned: success
|
||||
type: string
|
||||
sample: "2015-11-25T00:05:36.309Z"
|
||||
default_cooldown:
|
||||
description: The default cooldown time in seconds.
|
||||
returned: success
|
||||
type: int
|
||||
sample: 300
|
||||
desired_capacity:
|
||||
description: The number of EC2 instances that should be running in this group.
|
||||
returned: success
|
||||
type: int
|
||||
sample: 3
|
||||
health_check_period:
|
||||
description: Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
|
||||
returned: success
|
||||
type: int
|
||||
sample: 30
|
||||
health_check_type:
|
||||
description: The service you want the health status from, one of "EC2" or "ELB".
|
||||
returned: success
|
||||
type: str
|
||||
sample: "ELB"
|
||||
instances:
|
||||
description: List of EC2 instances and their status as it relates to the ASG.
|
||||
returned: success
|
||||
type: list
|
||||
sample: [
|
||||
{
|
||||
"availability_zone": "us-west-2a",
|
||||
"health_status": "Healthy",
|
||||
"instance_id": "i-es22ad25",
|
||||
"launch_configuration_name": "public-webapp-production-1",
|
||||
"lifecycle_state": "InService",
|
||||
"protected_from_scale_in": "false"
|
||||
}
|
||||
]
|
||||
launch_configuration_name:
|
||||
description: Name of launch configuration associated with the ASG.
|
||||
returned: success
|
||||
type: str
|
||||
sample: "public-webapp-production-1"
|
||||
load_balancer_names:
|
||||
description: List of load balancers names attached to the ASG.
|
||||
returned: success
|
||||
type: list
|
||||
sample: ["elb-webapp-prod"]
|
||||
max_size:
|
||||
description: Maximum size of group
|
||||
returned: success
|
||||
type: int
|
||||
sample: 3
|
||||
min_size:
|
||||
description: Minimum size of group
|
||||
returned: success
|
||||
type: int
|
||||
sample: 1
|
||||
new_instances_protected_from_scale_in:
|
||||
description: Whether or not new instances a protected from automatic scaling in.
|
||||
returned: success
|
||||
type: boolean
|
||||
sample: "false"
|
||||
placement_group:
|
||||
description: Placement group into which instances are launched, if any.
|
||||
returned: success
|
||||
type: str
|
||||
sample: None
|
||||
status:
|
||||
description: The current state of the group when DeleteAutoScalingGroup is in progress.
|
||||
returned: success
|
||||
type: str
|
||||
sample: None
|
||||
tags:
|
||||
description: List of tags for the ASG, and whether or not each tag propagates to instances at launch.
|
||||
returned: success
|
||||
type: list
|
||||
sample: [
|
||||
{
|
||||
"key": "Name",
|
||||
"value": "public-webapp-production-1",
|
||||
"resource_id": "public-webapp-production-1",
|
||||
"resource_type": "auto-scaling-group",
|
||||
"propagate_at_launch": "true"
|
||||
},
|
||||
{
|
||||
"key": "env",
|
||||
"value": "production",
|
||||
"resource_id": "public-webapp-production-1",
|
||||
"resource_type": "auto-scaling-group",
|
||||
"propagate_at_launch": "true"
|
||||
}
|
||||
]
|
||||
termination_policies:
|
||||
description: A list of termination policies for the group.
|
||||
returned: success
|
||||
type: str
|
||||
sample: ["Default"]
|
||||
'''
|
||||
|
||||
try:
|
||||
import boto3
|
||||
from botocore.exceptions import ClientError
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
def match_asg_tags(tags_to_match, asg):
|
||||
for key, value in tags_to_match.iteritems():
|
||||
for tag in asg['Tags']:
|
||||
if key == tag['Key'] and value == tag['Value']:
|
||||
break
|
||||
else: return False
|
||||
return True
|
||||
|
||||
def find_asgs(conn, module, name=None, tags=None):
|
||||
"""
|
||||
Args:
|
||||
conn (boto3.AutoScaling.Client): Valid Boto3 ASG client.
|
||||
name (str): Optional name of the ASG you are looking for.
|
||||
tags (dict): Optional dictionary of tags and values to search for.
|
||||
|
||||
Basic Usage:
|
||||
>>> name = 'public-webapp-production'
|
||||
>>> tags = { 'env': 'production' }
|
||||
>>> conn = boto3.client('autoscaling', region_name='us-west-2')
|
||||
>>> results = find_asgs(name, conn)
|
||||
|
||||
Returns:
|
||||
List
|
||||
[
|
||||
{
|
||||
"auto_scaling_group_arn": "arn:aws:autoscaling:us-west-2:275977225706:autoScalingGroup:58abc686-9783-4528-b338-3ad6f1cbbbaf:autoScalingGroupName/public-webapp-production",
|
||||
"auto_scaling_group_name": "public-webapp-production",
|
||||
"availability_zones": ["us-west-2c", "us-west-2b", "us-west-2a"],
|
||||
"created_time": "2016-02-02T23:28:42.481000+00:00",
|
||||
"default_cooldown": 300,
|
||||
"desired_capacity": 2,
|
||||
"enabled_metrics": [],
|
||||
"health_check_grace_period": 300,
|
||||
"health_check_type": "ELB",
|
||||
"instances":
|
||||
[
|
||||
{
|
||||
"availability_zone": "us-west-2c",
|
||||
"health_status": "Healthy",
|
||||
"instance_id": "i-047a12cb",
|
||||
"launch_configuration_name": "public-webapp-production-1",
|
||||
"lifecycle_state": "InService",
|
||||
"protected_from_scale_in": false
|
||||
},
|
||||
{
|
||||
"availability_zone": "us-west-2a",
|
||||
"health_status": "Healthy",
|
||||
"instance_id": "i-7a29df2c",
|
||||
"launch_configuration_name": "public-webapp-production-1",
|
||||
"lifecycle_state": "InService",
|
||||
"protected_from_scale_in": false
|
||||
}
|
||||
],
|
||||
"launch_configuration_name": "public-webapp-production-1",
|
||||
"load_balancer_names": ["public-webapp-production-lb"],
|
||||
"max_size": 4,
|
||||
"min_size": 2,
|
||||
"new_instances_protected_from_scale_in": false,
|
||||
"placement_group": None,
|
||||
"status": None,
|
||||
"suspended_processes": [],
|
||||
"tags":
|
||||
[
|
||||
{
|
||||
"key": "Name",
|
||||
"propagate_at_launch": true,
|
||||
"resource_id": "public-webapp-production",
|
||||
"resource_type": "auto-scaling-group",
|
||||
"value": "public-webapp-production"
|
||||
},
|
||||
{
|
||||
"key": "env",
|
||||
"propagate_at_launch": true,
|
||||
"resource_id": "public-webapp-production",
|
||||
"resource_type": "auto-scaling-group",
|
||||
"value": "production"
|
||||
}
|
||||
],
|
||||
"termination_policies":
|
||||
[
|
||||
"Default"
|
||||
],
|
||||
"vpc_zone_identifier":
|
||||
[
|
||||
"subnet-a1b1c1d1",
|
||||
"subnet-a2b2c2d2",
|
||||
"subnet-a3b3c3d3"
|
||||
]
|
||||
}
|
||||
]
|
||||
"""
|
||||
|
||||
try:
|
||||
asgs = conn.describe_auto_scaling_groups()
|
||||
except ClientError as e:
|
||||
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
|
||||
|
||||
matched_asgs = []
|
||||
|
||||
if name is not None:
|
||||
# if the user didn't specify a name
|
||||
name_prog = re.compile(r'^' + name)
|
||||
|
||||
for asg in asgs['AutoScalingGroups']:
|
||||
if name:
|
||||
matched_name = name_prog.search(asg['AutoScalingGroupName'])
|
||||
else:
|
||||
matched_name = True
|
||||
|
||||
if tags:
|
||||
matched_tags = match_asg_tags(tags, asg)
|
||||
else:
|
||||
matched_tags = True
|
||||
|
||||
if matched_name and matched_tags:
|
||||
matched_asgs.append(camel_dict_to_snake_dict(asg))
|
||||
|
||||
return matched_asgs
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
name=dict(type='str'),
|
||||
tags=dict(type='dict'),
|
||||
)
|
||||
)
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='boto3 required for this module')
|
||||
|
||||
asg_name = module.params.get('name')
|
||||
asg_tags = module.params.get('tags')
|
||||
|
||||
try:
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
|
||||
autoscaling = boto3_conn(module, conn_type='client', resource='autoscaling', region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||
except ClientError as e:
|
||||
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
|
||||
|
||||
results = find_asgs(autoscaling, module, name=asg_name, tags=asg_tags)
|
||||
module.exit_json(results=results)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
271
lib/ansible/modules/cloud/amazon/ec2_customer_gateway.py
Normal file
271
lib/ansible/modules/cloud/amazon/ec2_customer_gateway.py
Normal file
@@ -0,0 +1,271 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# This is a free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This Ansible library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_customer_gateway
|
||||
short_description: Manage an AWS customer gateway
|
||||
description:
|
||||
- Manage an AWS customer gateway
|
||||
version_added: "2.2"
|
||||
author: Michael Baydoun (@MichaelBaydoun)
|
||||
requirements: [ botocore, boto3 ]
|
||||
notes:
|
||||
- You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent requests do not create new customer gateway resources.
|
||||
- Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use
|
||||
customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details.
|
||||
options:
|
||||
bgp_asn:
|
||||
description:
|
||||
- Border Gateway Protocol (BGP) Autonomous System Number (ASN), required when state=present.
|
||||
required: false
|
||||
default: null
|
||||
ip_address:
|
||||
description:
|
||||
- Internet-routable IP address for customers gateway, must be a static address.
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- Name of the customer gateway.
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Create or terminate the Customer Gateway.
|
||||
required: false
|
||||
default: present
|
||||
choices: [ 'present', 'absent' ]
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
||||
# Create Customer Gateway
|
||||
- ec2_customer_gateway:
|
||||
bgp_asn: 12345
|
||||
ip_address: 1.2.3.4
|
||||
name: IndianapolisOffice
|
||||
region: us-east-1
|
||||
register: cgw
|
||||
|
||||
# Delete Customer Gateway
|
||||
- ec2_customer_gateway:
|
||||
ip_address: 1.2.3.4
|
||||
name: IndianapolisOffice
|
||||
state: absent
|
||||
region: us-east-1
|
||||
register: cgw
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
gateway.customer_gateways:
|
||||
description: details about the gateway that was created.
|
||||
returned: success
|
||||
type: complex
|
||||
contains:
|
||||
bgp_asn:
|
||||
description: The Border Gateway Autonomous System Number.
|
||||
returned: when exists and gateway is available.
|
||||
sample: 65123
|
||||
type: string
|
||||
customer_gateway_id:
|
||||
description: gateway id assigned by amazon.
|
||||
returned: when exists and gateway is available.
|
||||
sample: cgw-cb6386a2
|
||||
type: string
|
||||
ip_address:
|
||||
description: ip address of your gateway device.
|
||||
returned: when exists and gateway is available.
|
||||
sample: 1.2.3.4
|
||||
type: string
|
||||
state:
|
||||
description: state of gateway.
|
||||
returned: when gateway exists and is available.
|
||||
state: available
|
||||
type: string
|
||||
tags:
|
||||
description: any tags on the gateway.
|
||||
returned: when gateway exists and is available, and when tags exist.
|
||||
state: available
|
||||
type: string
|
||||
type:
|
||||
description: encryption type.
|
||||
returned: when gateway exists and is available.
|
||||
sample: ipsec.1
|
||||
type: string
|
||||
'''
|
||||
|
||||
try:
|
||||
from botocore.exceptions import ClientError
|
||||
HAS_BOTOCORE = True
|
||||
except ImportError:
|
||||
HAS_BOTOCORE = False
|
||||
|
||||
try:
|
||||
import boto3
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import (boto3_conn, camel_dict_to_snake_dict,
|
||||
ec2_argument_spec, get_aws_connection_info)
|
||||
|
||||
|
||||
class Ec2CustomerGatewayManager:
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
|
||||
try:
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
|
||||
if not region:
|
||||
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
|
||||
self.ec2 = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||
except ClientError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
def ensure_cgw_absent(self, gw_id):
|
||||
response = self.ec2.delete_customer_gateway(
|
||||
DryRun=False,
|
||||
CustomerGatewayId=gw_id
|
||||
)
|
||||
return response
|
||||
|
||||
def ensure_cgw_present(self, bgp_asn, ip_address):
|
||||
response = self.ec2.create_customer_gateway(
|
||||
DryRun=False,
|
||||
Type='ipsec.1',
|
||||
PublicIp=ip_address,
|
||||
BgpAsn=bgp_asn,
|
||||
)
|
||||
return response
|
||||
|
||||
def tag_cgw_name(self, gw_id, name):
|
||||
response = self.ec2.create_tags(
|
||||
DryRun=False,
|
||||
Resources=[
|
||||
gw_id,
|
||||
],
|
||||
Tags=[
|
||||
{
|
||||
'Key': 'Name',
|
||||
'Value': name
|
||||
},
|
||||
]
|
||||
)
|
||||
return response
|
||||
|
||||
def describe_gateways(self, ip_address):
|
||||
response = self.ec2.describe_customer_gateways(
|
||||
DryRun=False,
|
||||
Filters=[
|
||||
{
|
||||
'Name': 'state',
|
||||
'Values': [
|
||||
'available',
|
||||
]
|
||||
},
|
||||
{
|
||||
'Name': 'ip-address',
|
||||
'Values': [
|
||||
ip_address,
|
||||
]
|
||||
}
|
||||
]
|
||||
)
|
||||
return response
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
bgp_asn=dict(required=False, type='int'),
|
||||
ip_address=dict(required=True),
|
||||
name=dict(required=True),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_if=[
|
||||
('state', 'present', ['bgp_asn'])
|
||||
]
|
||||
)
|
||||
|
||||
if not HAS_BOTOCORE:
|
||||
module.fail_json(msg='botocore is required.')
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='boto3 is required.')
|
||||
|
||||
gw_mgr = Ec2CustomerGatewayManager(module)
|
||||
|
||||
name = module.params.get('name')
|
||||
|
||||
existing = gw_mgr.describe_gateways(module.params['ip_address'])
|
||||
# describe_gateways returns a key of CustomerGateways where as create_gateway returns a
|
||||
# key of CustomerGateway. For consistency, change it here
|
||||
existing['CustomerGateway'] = existing['CustomerGateways']
|
||||
|
||||
results = dict(changed=False)
|
||||
if module.params['state'] == 'present':
|
||||
if existing['CustomerGateway']:
|
||||
results['gateway'] = existing
|
||||
if existing['CustomerGateway'][0]['Tags']:
|
||||
tag_array = existing['CustomerGateway'][0]['Tags']
|
||||
for key, value in enumerate(tag_array):
|
||||
if value['Key'] == 'Name':
|
||||
current_name = value['Value']
|
||||
if current_name != name:
|
||||
results['name'] = gw_mgr.tag_cgw_name(
|
||||
results['gateway']['CustomerGateway'][0]['CustomerGatewayId'],
|
||||
module.params['name'],
|
||||
)
|
||||
results['changed'] = True
|
||||
else:
|
||||
if not module.check_mode:
|
||||
results['gateway'] = gw_mgr.ensure_cgw_present(
|
||||
module.params['bgp_asn'],
|
||||
module.params['ip_address'],
|
||||
)
|
||||
results['name'] = gw_mgr.tag_cgw_name(
|
||||
results['gateway']['CustomerGateway']['CustomerGatewayId'],
|
||||
module.params['name'],
|
||||
)
|
||||
results['changed'] = True
|
||||
|
||||
elif module.params['state'] == 'absent':
|
||||
if existing['CustomerGateway']:
|
||||
results['gateway'] = existing
|
||||
if not module.check_mode:
|
||||
results['gateway'] = gw_mgr.ensure_cgw_absent(
|
||||
existing['CustomerGateway'][0]['CustomerGatewayId']
|
||||
)
|
||||
results['changed'] = True
|
||||
|
||||
pretty_results = camel_dict_to_snake_dict(results)
|
||||
module.exit_json(**pretty_results)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
253
lib/ansible/modules/cloud/amazon/ec2_elb_facts.py
Normal file
253
lib/ansible/modules/cloud/amazon/ec2_elb_facts.py
Normal file
@@ -0,0 +1,253 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# This is a free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This Ansible library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_elb_facts
|
||||
short_description: Gather facts about EC2 Elastic Load Balancers in AWS
|
||||
description:
|
||||
- Gather facts about EC2 Elastic Load Balancers in AWS
|
||||
version_added: "2.0"
|
||||
author:
|
||||
- "Michael Schultz (github.com/mjschultz)"
|
||||
- "Fernando Jose Pando (@nand0p)"
|
||||
options:
|
||||
names:
|
||||
description:
|
||||
- List of ELB names to gather facts about. Pass this option to gather facts about a set of ELBs, otherwise, all ELBs are returned.
|
||||
required: false
|
||||
default: null
|
||||
aliases: ['elb_ids', 'ec2_elbs']
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
# Output format tries to match ec2_elb_lb module input parameters
|
||||
|
||||
# Gather facts about all ELBs
|
||||
- action:
|
||||
module: ec2_elb_facts
|
||||
register: elb_facts
|
||||
|
||||
- action:
|
||||
module: debug
|
||||
msg: "{{ item.dns_name }}"
|
||||
with_items: "{{ elb_facts.elbs }}"
|
||||
|
||||
# Gather facts about a particular ELB
|
||||
- action:
|
||||
module: ec2_elb_facts
|
||||
names: frontend-prod-elb
|
||||
register: elb_facts
|
||||
|
||||
- action:
|
||||
module: debug
|
||||
msg: "{{ elb_facts.elbs.0.dns_name }}"
|
||||
|
||||
# Gather facts about a set of ELBs
|
||||
- action:
|
||||
module: ec2_elb_facts
|
||||
names:
|
||||
- frontend-prod-elb
|
||||
- backend-prod-elb
|
||||
register: elb_facts
|
||||
|
||||
- action:
|
||||
module: debug
|
||||
msg: "{{ item.dns_name }}"
|
||||
with_items: "{{ elb_facts.elbs }}"
|
||||
|
||||
'''
|
||||
|
||||
try:
|
||||
import boto.ec2.elb
|
||||
from boto.ec2.tag import Tag
|
||||
from boto.exception import BotoServerError
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
class ElbInformation(object):
|
||||
""" Handles ELB information """
|
||||
|
||||
def __init__(self,
|
||||
module,
|
||||
names,
|
||||
region,
|
||||
**aws_connect_params):
|
||||
|
||||
self.module = module
|
||||
self.names = names
|
||||
self.region = region
|
||||
self.aws_connect_params = aws_connect_params
|
||||
self.connection = self._get_elb_connection()
|
||||
|
||||
def _get_tags(self, elbname):
|
||||
params = {'LoadBalancerNames.member.1': elbname}
|
||||
try:
|
||||
elb_tags = self.connection.get_list('DescribeTags', params, [('member', Tag)])
|
||||
return dict((tag.Key, tag.Value) for tag in elb_tags if hasattr(tag, 'Key'))
|
||||
except:
|
||||
return {}
|
||||
|
||||
def _get_elb_connection(self):
|
||||
try:
|
||||
return connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params)
|
||||
except BotoServerError as err:
|
||||
self.module.fail_json(msg=err.message)
|
||||
|
||||
def _get_elb_listeners(self, listeners):
|
||||
listener_list = []
|
||||
|
||||
for listener in listeners:
|
||||
listener_dict = {
|
||||
'load_balancer_port': listener[0],
|
||||
'instance_port': listener[1],
|
||||
'protocol': listener[2],
|
||||
}
|
||||
|
||||
try:
|
||||
ssl_certificate_id = listener[4]
|
||||
except IndexError:
|
||||
pass
|
||||
else:
|
||||
if ssl_certificate_id:
|
||||
listener_dict['ssl_certificate_id'] = ssl_certificate_id
|
||||
|
||||
listener_list.append(listener_dict)
|
||||
|
||||
return listener_list
|
||||
|
||||
def _get_health_check(self, health_check):
|
||||
protocol, port_path = health_check.target.split(':')
|
||||
try:
|
||||
port, path = port_path.split('/', 1)
|
||||
path = '/{}'.format(path)
|
||||
except ValueError:
|
||||
port = port_path
|
||||
path = None
|
||||
|
||||
health_check_dict = {
|
||||
'ping_protocol': protocol.lower(),
|
||||
'ping_port': int(port),
|
||||
'response_timeout': health_check.timeout,
|
||||
'interval': health_check.interval,
|
||||
'unhealthy_threshold': health_check.unhealthy_threshold,
|
||||
'healthy_threshold': health_check.healthy_threshold,
|
||||
}
|
||||
|
||||
if path:
|
||||
health_check_dict['ping_path'] = path
|
||||
return health_check_dict
|
||||
|
||||
def _get_elb_info(self, elb):
|
||||
elb_info = {
|
||||
'name': elb.name,
|
||||
'zones': elb.availability_zones,
|
||||
'dns_name': elb.dns_name,
|
||||
'canonical_hosted_zone_name': elb.canonical_hosted_zone_name,
|
||||
'canonical_hosted_zone_name_id': elb.canonical_hosted_zone_name_id,
|
||||
'hosted_zone_name': elb.canonical_hosted_zone_name,
|
||||
'hosted_zone_id': elb.canonical_hosted_zone_name_id,
|
||||
'instances': [instance.id for instance in elb.instances],
|
||||
'listeners': self._get_elb_listeners(elb.listeners),
|
||||
'scheme': elb.scheme,
|
||||
'security_groups': elb.security_groups,
|
||||
'health_check': self._get_health_check(elb.health_check),
|
||||
'subnets': elb.subnets,
|
||||
'instances_inservice': [],
|
||||
'instances_inservice_count': 0,
|
||||
'instances_outofservice': [],
|
||||
'instances_outofservice_count': 0,
|
||||
'instances_inservice_percent': 0.0,
|
||||
'tags': self._get_tags(elb.name)
|
||||
}
|
||||
|
||||
if elb.vpc_id:
|
||||
elb_info['vpc_id'] = elb.vpc_id
|
||||
|
||||
if elb.instances:
|
||||
try:
|
||||
instance_health = self.connection.describe_instance_health(elb.name)
|
||||
except BotoServerError as err:
|
||||
self.module.fail_json(msg=err.message)
|
||||
elb_info['instances_inservice'] = [inst.instance_id for inst in instance_health if inst.state == 'InService']
|
||||
elb_info['instances_inservice_count'] = len(elb_info['instances_inservice'])
|
||||
elb_info['instances_outofservice'] = [inst.instance_id for inst in instance_health if inst.state == 'OutOfService']
|
||||
elb_info['instances_outofservice_count'] = len(elb_info['instances_outofservice'])
|
||||
elb_info['instances_inservice_percent'] = float(elb_info['instances_inservice_count'])/(
|
||||
float(elb_info['instances_inservice_count']) +
|
||||
float(elb_info['instances_outofservice_count']))*100
|
||||
return elb_info
|
||||
|
||||
|
||||
def list_elbs(self):
|
||||
elb_array = []
|
||||
|
||||
try:
|
||||
all_elbs = self.connection.get_all_load_balancers()
|
||||
except BotoServerError as err:
|
||||
self.module.fail_json(msg = "%s: %s" % (err.error_code, err.error_message))
|
||||
|
||||
if all_elbs:
|
||||
if self.names:
|
||||
for existing_lb in all_elbs:
|
||||
if existing_lb.name in self.names:
|
||||
elb_array.append(existing_lb)
|
||||
else:
|
||||
elb_array = all_elbs
|
||||
|
||||
return list(map(self._get_elb_info, elb_array))
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
names={'default': [], 'type': 'list'}
|
||||
)
|
||||
)
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
|
||||
if not region:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
names = module.params['names']
|
||||
elb_information = ElbInformation(module,
|
||||
names,
|
||||
region,
|
||||
**aws_connect_params)
|
||||
|
||||
ec2_facts_result = dict(changed=False,
|
||||
elbs=elb_information.list_elbs())
|
||||
|
||||
module.exit_json(**ec2_facts_result)
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
576
lib/ansible/modules/cloud/amazon/ec2_eni.py
Normal file
576
lib/ansible/modules/cloud/amazon/ec2_eni.py
Normal file
@@ -0,0 +1,576 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# This is a free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This Ansible library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_eni
|
||||
short_description: Create and optionally attach an Elastic Network Interface (ENI) to an instance
|
||||
description:
|
||||
- Create and optionally attach an Elastic Network Interface (ENI) to an instance. If an ENI ID or private_ip is \
|
||||
provided, the existing ENI (if any) will be modified. The 'attached' parameter controls the attachment status \
|
||||
of the network interface.
|
||||
version_added: "2.0"
|
||||
author: "Rob White (@wimnat)"
|
||||
options:
|
||||
eni_id:
|
||||
description:
|
||||
- The ID of the ENI
|
||||
required: false
|
||||
default: null
|
||||
instance_id:
|
||||
description:
|
||||
- Instance ID that you wish to attach ENI to. Since version 2.2, use the 'attached' parameter to attach or \
|
||||
detach an ENI. Prior to 2.2, to detach an ENI from an instance, use 'None'.
|
||||
required: false
|
||||
default: null
|
||||
private_ip_address:
|
||||
description:
|
||||
- Private IP address.
|
||||
required: false
|
||||
default: null
|
||||
subnet_id:
|
||||
description:
|
||||
- ID of subnet in which to create the ENI. Only required when state=present.
|
||||
required: true
|
||||
description:
|
||||
description:
|
||||
- Optional description of the ENI.
|
||||
required: false
|
||||
default: null
|
||||
security_groups:
|
||||
description:
|
||||
- List of security groups associated with the interface. Only used when state=present. Since version 2.2, you \
|
||||
can specify security groups by ID or by name or a combination of both. Prior to 2.2, you can specify only by ID.
|
||||
required: false
|
||||
default: null
|
||||
state:
|
||||
description:
|
||||
- Create or delete ENI
|
||||
required: false
|
||||
default: present
|
||||
choices: [ 'present', 'absent' ]
|
||||
device_index:
|
||||
description:
|
||||
- The index of the device for the network interface attachment on the instance.
|
||||
required: false
|
||||
default: 0
|
||||
attached:
|
||||
description:
|
||||
- Specifies if network interface should be attached or detached from instance. If ommited, attachment status \
|
||||
won't change
|
||||
required: false
|
||||
default: yes
|
||||
version_added: 2.2
|
||||
force_detach:
|
||||
description:
|
||||
- Force detachment of the interface. This applies either when explicitly detaching the interface by setting instance_id to None or when deleting an interface with state=absent.
|
||||
required: false
|
||||
default: no
|
||||
delete_on_termination:
|
||||
description:
|
||||
- Delete the interface when the instance it is attached to is terminated. You can only specify this flag when the interface is being modified, not on creation.
|
||||
required: false
|
||||
source_dest_check:
|
||||
description:
|
||||
- By default, interfaces perform source/destination checks. NAT instances however need this check to be disabled. You can only specify this flag when the interface is being modified, not on creation.
|
||||
required: false
|
||||
secondary_private_ip_addresses:
|
||||
description:
|
||||
- A list of IP addresses to assign as secondary IP addresses to the network interface. This option is mutually exclusive of secondary_private_ip_address_count
|
||||
required: false
|
||||
version_added: 2.2
|
||||
secondary_private_ip_address_count:
|
||||
description:
|
||||
- The number of secondary IP addresses to assign to the network interface. This option is mutually exclusive of secondary_private_ip_addresses
|
||||
required: false
|
||||
version_added: 2.2
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
# Create an ENI. As no security group is defined, ENI will be created in default security group
|
||||
- ec2_eni:
|
||||
private_ip_address: 172.31.0.20
|
||||
subnet_id: subnet-xxxxxxxx
|
||||
state: present
|
||||
|
||||
# Create an ENI and attach it to an instance
|
||||
- ec2_eni:
|
||||
instance_id: i-xxxxxxx
|
||||
device_index: 1
|
||||
private_ip_address: 172.31.0.20
|
||||
subnet_id: subnet-xxxxxxxx
|
||||
state: present
|
||||
|
||||
# Create an ENI with two secondary addresses
|
||||
- ec2_eni:
|
||||
subnet_id: subnet-xxxxxxxx
|
||||
state: present
|
||||
secondary_private_ip_address_count: 2
|
||||
|
||||
# Assign a secondary IP address to an existing ENI
|
||||
# This will purge any existing IPs
|
||||
- ec2_eni:
|
||||
subnet_id: subnet-xxxxxxxx
|
||||
eni_id: eni-yyyyyyyy
|
||||
state: present
|
||||
secondary_private_ip_addresses:
|
||||
- 172.16.1.1
|
||||
|
||||
# Remove any secondary IP addresses from an existing ENI
|
||||
- ec2_eni:
|
||||
subnet_id: subnet-xxxxxxxx
|
||||
eni_id: eni-yyyyyyyy
|
||||
state: present
|
||||
secondary_private_ip_addresses:
|
||||
-
|
||||
|
||||
# Destroy an ENI, detaching it from any instance if necessary
|
||||
- ec2_eni:
|
||||
eni_id: eni-xxxxxxx
|
||||
force_detach: yes
|
||||
state: absent
|
||||
|
||||
# Update an ENI
|
||||
- ec2_eni:
|
||||
eni_id: eni-xxxxxxx
|
||||
description: "My new description"
|
||||
state: present
|
||||
|
||||
# Detach an ENI from an instance
|
||||
- ec2_eni:
|
||||
eni_id: eni-xxxxxxx
|
||||
instance_id: None
|
||||
state: present
|
||||
|
||||
### Delete an interface on termination
|
||||
# First create the interface
|
||||
- ec2_eni:
|
||||
instance_id: i-xxxxxxx
|
||||
device_index: 1
|
||||
private_ip_address: 172.31.0.20
|
||||
subnet_id: subnet-xxxxxxxx
|
||||
state: present
|
||||
register: eni
|
||||
|
||||
# Modify the interface to enable the delete_on_terminaton flag
|
||||
- ec2_eni:
|
||||
eni_id: {{ "eni.interface.id" }}
|
||||
delete_on_termination: true
|
||||
|
||||
'''
|
||||
|
||||
|
||||
RETURN = '''
|
||||
interface:
|
||||
description: Network interface attributes
|
||||
returned: when state != absent
|
||||
type: dictionary
|
||||
contains:
|
||||
description:
|
||||
description: interface description
|
||||
type: string
|
||||
sample: Firewall network interface
|
||||
groups:
|
||||
description: list of security groups
|
||||
type: list of dictionaries
|
||||
sample: [ { "sg-f8a8a9da": "default" } ]
|
||||
id:
|
||||
description: network interface id
|
||||
type: string
|
||||
sample: "eni-1d889198"
|
||||
mac_address:
|
||||
description: interface's physical address
|
||||
type: string
|
||||
sample: "00:00:5E:00:53:23"
|
||||
owner_id:
|
||||
description: aws account id
|
||||
type: string
|
||||
sample: 812381371
|
||||
private_ip_address:
|
||||
description: primary ip address of this interface
|
||||
type: string
|
||||
sample: 10.20.30.40
|
||||
private_ip_addresses:
|
||||
description: list of all private ip addresses associated to this interface
|
||||
type: list of dictionaries
|
||||
sample: [ { "primary_address": true, "private_ip_address": "10.20.30.40" } ]
|
||||
source_dest_check:
|
||||
description: value of source/dest check flag
|
||||
type: boolean
|
||||
sample: True
|
||||
status:
|
||||
description: network interface status
|
||||
type: string
|
||||
sample: "pending"
|
||||
subnet_id:
|
||||
description: which vpc subnet the interface is bound
|
||||
type: string
|
||||
sample: subnet-b0a0393c
|
||||
vpc_id:
|
||||
description: which vpc this network interface is bound
|
||||
type: string
|
||||
sample: vpc-9a9a9da
|
||||
|
||||
'''
|
||||
|
||||
import time
|
||||
import re
|
||||
|
||||
try:
|
||||
import boto.ec2
|
||||
import boto.vpc
|
||||
from boto.exception import BotoServerError
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import (AnsibleAWSError, connect_to_aws,
|
||||
ec2_argument_spec, get_aws_connection_info,
|
||||
get_ec2_security_group_ids_from_names)
|
||||
|
||||
|
||||
def get_eni_info(interface):
|
||||
|
||||
# Private addresses
|
||||
private_addresses = []
|
||||
for ip in interface.private_ip_addresses:
|
||||
private_addresses.append({ 'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary })
|
||||
|
||||
interface_info = {'id': interface.id,
|
||||
'subnet_id': interface.subnet_id,
|
||||
'vpc_id': interface.vpc_id,
|
||||
'description': interface.description,
|
||||
'owner_id': interface.owner_id,
|
||||
'status': interface.status,
|
||||
'mac_address': interface.mac_address,
|
||||
'private_ip_address': interface.private_ip_address,
|
||||
'source_dest_check': interface.source_dest_check,
|
||||
'groups': dict((group.id, group.name) for group in interface.groups),
|
||||
'private_ip_addresses': private_addresses
|
||||
}
|
||||
|
||||
if interface.attachment is not None:
|
||||
interface_info['attachment'] = {'attachment_id': interface.attachment.id,
|
||||
'instance_id': interface.attachment.instance_id,
|
||||
'device_index': interface.attachment.device_index,
|
||||
'status': interface.attachment.status,
|
||||
'attach_time': interface.attachment.attach_time,
|
||||
'delete_on_termination': interface.attachment.delete_on_termination,
|
||||
}
|
||||
|
||||
return interface_info
|
||||
|
||||
|
||||
def wait_for_eni(eni, status):
|
||||
|
||||
while True:
|
||||
time.sleep(3)
|
||||
eni.update()
|
||||
# If the status is detached we just need attachment to disappear
|
||||
if eni.attachment is None:
|
||||
if status == "detached":
|
||||
break
|
||||
else:
|
||||
if status == "attached" and eni.attachment.status == "attached":
|
||||
break
|
||||
|
||||
|
||||
def create_eni(connection, vpc_id, module):
|
||||
|
||||
instance_id = module.params.get("instance_id")
|
||||
attached = module.params.get("attached")
|
||||
if instance_id == 'None':
|
||||
instance_id = None
|
||||
device_index = module.params.get("device_index")
|
||||
subnet_id = module.params.get('subnet_id')
|
||||
private_ip_address = module.params.get('private_ip_address')
|
||||
description = module.params.get('description')
|
||||
security_groups = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), connection, vpc_id=vpc_id, boto3=False)
|
||||
secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses")
|
||||
secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count")
|
||||
changed = False
|
||||
|
||||
try:
|
||||
eni = find_eni(connection, module)
|
||||
if eni is None:
|
||||
eni = connection.create_network_interface(subnet_id, private_ip_address, description, security_groups)
|
||||
if attached == True and instance_id is not None:
|
||||
try:
|
||||
eni.attach(instance_id, device_index)
|
||||
except BotoServerError:
|
||||
eni.delete()
|
||||
raise
|
||||
# Wait to allow creation / attachment to finish
|
||||
wait_for_eni(eni, "attached")
|
||||
eni.update()
|
||||
|
||||
if secondary_private_ip_address_count is not None:
|
||||
try:
|
||||
connection.assign_private_ip_addresses(network_interface_id=eni.id, secondary_private_ip_address_count=secondary_private_ip_address_count)
|
||||
except BotoServerError:
|
||||
eni.delete()
|
||||
raise
|
||||
|
||||
if secondary_private_ip_addresses is not None:
|
||||
try:
|
||||
connection.assign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=secondary_private_ip_addresses)
|
||||
except BotoServerError:
|
||||
eni.delete()
|
||||
raise
|
||||
|
||||
changed = True
|
||||
|
||||
except BotoServerError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
module.exit_json(changed=changed, interface=get_eni_info(eni))
|
||||
|
||||
|
||||
def modify_eni(connection, vpc_id, module, eni):
|
||||
|
||||
instance_id = module.params.get("instance_id")
|
||||
attached = module.params.get("attached")
|
||||
do_detach = module.params.get('state') == 'detached'
|
||||
device_index = module.params.get("device_index")
|
||||
description = module.params.get('description')
|
||||
security_groups = module.params.get('security_groups')
|
||||
force_detach = module.params.get("force_detach")
|
||||
source_dest_check = module.params.get("source_dest_check")
|
||||
delete_on_termination = module.params.get("delete_on_termination")
|
||||
secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses")
|
||||
secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count")
|
||||
changed = False
|
||||
|
||||
try:
|
||||
if description is not None:
|
||||
if eni.description != description:
|
||||
connection.modify_network_interface_attribute(eni.id, "description", description)
|
||||
changed = True
|
||||
if len(security_groups) > 0:
|
||||
groups = get_ec2_security_group_ids_from_names(security_groups, connection, vpc_id=vpc_id, boto3=False)
|
||||
if sorted(get_sec_group_list(eni.groups)) != sorted(groups):
|
||||
connection.modify_network_interface_attribute(eni.id, "groupSet", groups)
|
||||
changed = True
|
||||
if source_dest_check is not None:
|
||||
if eni.source_dest_check != source_dest_check:
|
||||
connection.modify_network_interface_attribute(eni.id, "sourceDestCheck", source_dest_check)
|
||||
changed = True
|
||||
if delete_on_termination is not None and eni.attachment is not None:
|
||||
if eni.attachment.delete_on_termination is not delete_on_termination:
|
||||
connection.modify_network_interface_attribute(eni.id, "deleteOnTermination", delete_on_termination, eni.attachment.id)
|
||||
changed = True
|
||||
|
||||
current_secondary_addresses = [i.private_ip_address for i in eni.private_ip_addresses if not i.primary]
|
||||
if secondary_private_ip_addresses is not None:
|
||||
secondary_addresses_to_remove = list(set(current_secondary_addresses) - set(secondary_private_ip_addresses))
|
||||
if secondary_addresses_to_remove:
|
||||
connection.unassign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=list(set(current_secondary_addresses) - set(secondary_private_ip_addresses)), dry_run=False)
|
||||
connection.assign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=secondary_private_ip_addresses, secondary_private_ip_address_count=None, allow_reassignment=False, dry_run=False)
|
||||
if secondary_private_ip_address_count is not None:
|
||||
current_secondary_address_count = len(current_secondary_addresses)
|
||||
|
||||
if secondary_private_ip_address_count > current_secondary_address_count:
|
||||
connection.assign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=None, secondary_private_ip_address_count=(secondary_private_ip_address_count - current_secondary_address_count), allow_reassignment=False, dry_run=False)
|
||||
changed = True
|
||||
elif secondary_private_ip_address_count < current_secondary_address_count:
|
||||
# How many of these addresses do we want to remove
|
||||
secondary_addresses_to_remove_count = current_secondary_address_count - secondary_private_ip_address_count
|
||||
connection.unassign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=current_secondary_addresses[:secondary_addresses_to_remove_count], dry_run=False)
|
||||
|
||||
if attached == True:
|
||||
if eni.attachment and eni.attachment.instance_id != instance_id:
|
||||
detach_eni(eni, module)
|
||||
if eni.attachment is None:
|
||||
eni.attach(instance_id, device_index)
|
||||
wait_for_eni(eni, "attached")
|
||||
changed = True
|
||||
elif attached == False:
|
||||
detach_eni(eni, module)
|
||||
|
||||
except BotoServerError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
eni.update()
|
||||
module.exit_json(changed=changed, interface=get_eni_info(eni))
|
||||
|
||||
|
||||
def delete_eni(connection, module):
|
||||
|
||||
eni_id = module.params.get("eni_id")
|
||||
force_detach = module.params.get("force_detach")
|
||||
|
||||
try:
|
||||
eni_result_set = connection.get_all_network_interfaces(eni_id)
|
||||
eni = eni_result_set[0]
|
||||
|
||||
if force_detach is True:
|
||||
if eni.attachment is not None:
|
||||
eni.detach(force_detach)
|
||||
# Wait to allow detachment to finish
|
||||
wait_for_eni(eni, "detached")
|
||||
eni.update()
|
||||
eni.delete()
|
||||
changed = True
|
||||
else:
|
||||
eni.delete()
|
||||
changed = True
|
||||
|
||||
module.exit_json(changed=changed)
|
||||
except BotoServerError as e:
|
||||
regex = re.compile('The networkInterface ID \'.*\' does not exist')
|
||||
if regex.search(e.message) is not None:
|
||||
module.exit_json(changed=False)
|
||||
else:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
|
||||
def detach_eni(eni, module):
|
||||
|
||||
force_detach = module.params.get("force_detach")
|
||||
if eni.attachment is not None:
|
||||
eni.detach(force_detach)
|
||||
wait_for_eni(eni, "detached")
|
||||
eni.update()
|
||||
module.exit_json(changed=True, interface=get_eni_info(eni))
|
||||
else:
|
||||
module.exit_json(changed=False, interface=get_eni_info(eni))
|
||||
|
||||
|
||||
def find_eni(connection, module):
|
||||
|
||||
eni_id = module.params.get("eni_id")
|
||||
subnet_id = module.params.get('subnet_id')
|
||||
private_ip_address = module.params.get('private_ip_address')
|
||||
instance_id = module.params.get('instance_id')
|
||||
device_index = module.params.get('device_index')
|
||||
|
||||
try:
|
||||
filters = {}
|
||||
if subnet_id:
|
||||
filters['subnet-id'] = subnet_id
|
||||
if private_ip_address:
|
||||
filters['private-ip-address'] = private_ip_address
|
||||
else:
|
||||
if instance_id:
|
||||
filters['attachment.instance-id'] = instance_id
|
||||
if device_index:
|
||||
filters['attachment.device-index'] = device_index
|
||||
|
||||
eni_result = connection.get_all_network_interfaces(eni_id, filters=filters)
|
||||
if len(eni_result) > 0:
|
||||
return eni_result[0]
|
||||
else:
|
||||
return None
|
||||
|
||||
except BotoServerError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_sec_group_list(groups):
|
||||
|
||||
# Build list of remote security groups
|
||||
remote_security_groups = []
|
||||
for group in groups:
|
||||
remote_security_groups.append(group.id.encode())
|
||||
|
||||
return remote_security_groups
|
||||
|
||||
|
||||
def _get_vpc_id(connection, module, subnet_id):
|
||||
|
||||
try:
|
||||
return connection.get_all_subnets(subnet_ids=[subnet_id])[0].vpc_id
|
||||
except BotoServerError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
eni_id=dict(default=None, type='str'),
|
||||
instance_id=dict(default=None, type='str'),
|
||||
private_ip_address=dict(type='str'),
|
||||
subnet_id=dict(type='str'),
|
||||
description=dict(type='str'),
|
||||
security_groups=dict(default=[], type='list'),
|
||||
device_index=dict(default=0, type='int'),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
force_detach=dict(default='no', type='bool'),
|
||||
source_dest_check=dict(default=None, type='bool'),
|
||||
delete_on_termination=dict(default=None, type='bool'),
|
||||
secondary_private_ip_addresses=dict(default=None, type='list'),
|
||||
secondary_private_ip_address_count=dict(default=None, type='int'),
|
||||
attached=dict(default=None, type='bool')
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
mutually_exclusive=[
|
||||
['secondary_private_ip_addresses', 'secondary_private_ip_address_count']
|
||||
],
|
||||
required_if=([
|
||||
('state', 'present', ['subnet_id']),
|
||||
('state', 'absent', ['eni_id']),
|
||||
('attached', True, ['instance_id'])
|
||||
])
|
||||
)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
|
||||
if region:
|
||||
try:
|
||||
connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
|
||||
vpc_connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
state = module.params.get("state")
|
||||
eni_id = module.params.get("eni_id")
|
||||
private_ip_address = module.params.get('private_ip_address')
|
||||
|
||||
if state == 'present':
|
||||
subnet_id = module.params.get("subnet_id")
|
||||
vpc_id = _get_vpc_id(vpc_connection, module, subnet_id)
|
||||
|
||||
eni = find_eni(connection, module)
|
||||
if eni is None:
|
||||
create_eni(connection, vpc_id, module)
|
||||
else:
|
||||
modify_eni(connection, vpc_id, module, eni)
|
||||
|
||||
elif state == 'absent':
|
||||
delete_eni(connection, module)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
185
lib/ansible/modules/cloud/amazon/ec2_eni_facts.py
Normal file
185
lib/ansible/modules/cloud/amazon/ec2_eni_facts.py
Normal file
@@ -0,0 +1,185 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# This is a free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This Ansible library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_eni_facts
|
||||
short_description: Gather facts about ec2 ENI interfaces in AWS
|
||||
description:
|
||||
- Gather facts about ec2 ENI interfaces in AWS
|
||||
version_added: "2.0"
|
||||
author: "Rob White (@wimnat)"
|
||||
options:
|
||||
filters:
|
||||
description:
|
||||
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkInterfaces.html) for possible filters.
|
||||
required: false
|
||||
default: null
|
||||
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
# Gather facts about all ENIs
|
||||
- ec2_eni_facts:
|
||||
|
||||
# Gather facts about a particular ENI
|
||||
- ec2_eni_facts:
|
||||
filters:
|
||||
network-interface-id: eni-xxxxxxx
|
||||
|
||||
'''
|
||||
|
||||
try:
|
||||
import boto.ec2
|
||||
from boto.exception import BotoServerError
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
try:
|
||||
import boto3
|
||||
from botocore.exceptions import ClientError, NoCredentialsError
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import (AnsibleAWSError,
|
||||
ansible_dict_to_boto3_filter_list, boto3_conn,
|
||||
boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
|
||||
connect_to_aws, ec2_argument_spec, get_aws_connection_info)
|
||||
|
||||
|
||||
def list_ec2_snapshots_boto3(connection, module):
|
||||
|
||||
if module.params.get("filters") is None:
|
||||
filters = []
|
||||
else:
|
||||
filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
|
||||
|
||||
try:
|
||||
network_interfaces_result = connection.describe_network_interfaces(Filters=filters)
|
||||
except (ClientError, NoCredentialsError) as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
# Turn the boto3 result in to ansible_friendly_snaked_names
|
||||
snaked_network_interfaces_result = camel_dict_to_snake_dict(network_interfaces_result)
|
||||
for network_interfaces in snaked_network_interfaces_result['network_interfaces']:
|
||||
network_interfaces['tag_set'] = boto3_tag_list_to_ansible_dict(network_interfaces['tag_set'])
|
||||
|
||||
module.exit_json(**snaked_network_interfaces_result)
|
||||
|
||||
|
||||
def get_eni_info(interface):
|
||||
|
||||
# Private addresses
|
||||
private_addresses = []
|
||||
for ip in interface.private_ip_addresses:
|
||||
private_addresses.append({ 'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary })
|
||||
|
||||
interface_info = {'id': interface.id,
|
||||
'subnet_id': interface.subnet_id,
|
||||
'vpc_id': interface.vpc_id,
|
||||
'description': interface.description,
|
||||
'owner_id': interface.owner_id,
|
||||
'status': interface.status,
|
||||
'mac_address': interface.mac_address,
|
||||
'private_ip_address': interface.private_ip_address,
|
||||
'source_dest_check': interface.source_dest_check,
|
||||
'groups': dict((group.id, group.name) for group in interface.groups),
|
||||
'private_ip_addresses': private_addresses
|
||||
}
|
||||
|
||||
if hasattr(interface, 'publicDnsName'):
|
||||
interface_info['association'] = {'public_ip_address': interface.publicIp,
|
||||
'public_dns_name': interface.publicDnsName,
|
||||
'ip_owner_id': interface.ipOwnerId
|
||||
}
|
||||
|
||||
if interface.attachment is not None:
|
||||
interface_info['attachment'] = {'attachment_id': interface.attachment.id,
|
||||
'instance_id': interface.attachment.instance_id,
|
||||
'device_index': interface.attachment.device_index,
|
||||
'status': interface.attachment.status,
|
||||
'attach_time': interface.attachment.attach_time,
|
||||
'delete_on_termination': interface.attachment.delete_on_termination,
|
||||
}
|
||||
|
||||
return interface_info
|
||||
|
||||
|
||||
def list_eni(connection, module):
|
||||
|
||||
filters = module.params.get("filters")
|
||||
interface_dict_array = []
|
||||
|
||||
try:
|
||||
all_eni = connection.get_all_network_interfaces(filters=filters)
|
||||
except BotoServerError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
for interface in all_eni:
|
||||
interface_dict_array.append(get_eni_info(interface))
|
||||
|
||||
module.exit_json(interfaces=interface_dict_array)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
filters = dict(default=None, type='dict')
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
|
||||
if HAS_BOTO3:
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
|
||||
|
||||
if region:
|
||||
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
list_ec2_snapshots_boto3(connection, module)
|
||||
else:
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
|
||||
if region:
|
||||
try:
|
||||
connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
list_eni(connection, module)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
167
lib/ansible/modules/cloud/amazon/ec2_group_facts.py
Normal file
167
lib/ansible/modules/cloud/amazon/ec2_group_facts.py
Normal file
@@ -0,0 +1,167 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_group_facts
|
||||
short_description: Gather facts about ec2 security groups in AWS.
|
||||
description:
|
||||
- Gather facts about ec2 security groups in AWS.
|
||||
version_added: "2.3"
|
||||
author: "Henrique Rodrigues (github.com/Sodki)"
|
||||
options:
|
||||
filters:
|
||||
description:
|
||||
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See \
|
||||
U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html) for \
|
||||
possible filters. Filter names and values are case sensitive. You can also use underscores (_) \
|
||||
instead of dashes (-) in the filter keys, which will take precedence in case of conflict.
|
||||
required: false
|
||||
default: {}
|
||||
notes:
|
||||
- By default, the module will return all security groups. To limit results use the appropriate filters.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
# Gather facts about all security groups
|
||||
- ec2_group_facts:
|
||||
|
||||
# Gather facts about all security groups in a specific VPC
|
||||
- ec2_group_facts:
|
||||
filters:
|
||||
vpc-id: vpc-12345678
|
||||
|
||||
# Gather facts about all security groups in a specific VPC
|
||||
- ec2_group_facts:
|
||||
filters:
|
||||
vpc-id: vpc-12345678
|
||||
|
||||
# Gather facts about a security group
|
||||
- ec2_group_facts:
|
||||
filters:
|
||||
group-name: example-1
|
||||
|
||||
# Gather facts about a security group by id
|
||||
- ec2_group_facts:
|
||||
filters:
|
||||
group-id: sg-12345678
|
||||
|
||||
# Gather facts about a security group with multiple filters, also mixing the use of underscores as filter keys
|
||||
- ec2_group_facts:
|
||||
filters:
|
||||
group_id: sg-12345678
|
||||
vpc-id: vpc-12345678
|
||||
|
||||
# Gather facts about various security groups
|
||||
- ec2_group_facts:
|
||||
filters:
|
||||
group-name:
|
||||
- example-1
|
||||
- example-2
|
||||
- example-3
|
||||
|
||||
# Gather facts about any security group with a tag key Name and value Example. The quotes around 'tag:name' are important because of the colon in the value
|
||||
- ec2_group_facts:
|
||||
filters:
|
||||
"tag:Name": Example
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
security_groups:
|
||||
description: Security groups that match the provided filters. Each element consists of a dict with all the information related to that security group.
|
||||
type: list
|
||||
sample:
|
||||
'''
|
||||
|
||||
|
||||
try:
|
||||
import boto3
|
||||
from botocore.exceptions import ClientError
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = Falsentry
|
||||
|
||||
import traceback
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
filters=dict(default={}, type='dict')
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='boto3 required for this module')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
|
||||
|
||||
if region:
|
||||
connection = boto3_conn(
|
||||
module,
|
||||
conn_type='client',
|
||||
resource='ec2',
|
||||
region=region,
|
||||
endpoint=ec2_url,
|
||||
**aws_connect_params
|
||||
)
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
# Replace filter key underscores with dashes, for compatibility, except if we're dealing with tags
|
||||
sanitized_filters = module.params.get("filters")
|
||||
for key in sanitized_filters:
|
||||
if not key.startswith("tag:"):
|
||||
sanitized_filters[key.replace("_", "-")] = sanitized_filters.pop(key)
|
||||
|
||||
try:
|
||||
security_groups = connection.describe_security_groups(
|
||||
Filters=ansible_dict_to_boto3_filter_list(sanitized_filters)
|
||||
)
|
||||
except ClientError as e:
|
||||
module.fail_json(msg=e.message, exception=traceback.format_exc(e))
|
||||
|
||||
# Turn the boto3 result in to ansible_friendly_snaked_names
|
||||
snaked_security_groups = []
|
||||
for security_group in security_groups['SecurityGroups']:
|
||||
snaked_security_groups.append(camel_dict_to_snake_dict(security_group))
|
||||
|
||||
# Turn the boto3 result in to ansible friendly tag dictionary
|
||||
for security_group in snaked_security_groups:
|
||||
if 'tags' in security_group:
|
||||
security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group['tags'])
|
||||
|
||||
module.exit_json(security_groups=snaked_security_groups)
|
||||
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
229
lib/ansible/modules/cloud/amazon/ec2_lc_facts.py
Normal file
229
lib/ansible/modules/cloud/amazon/ec2_lc_facts.py
Normal file
@@ -0,0 +1,229 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# This is a free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This Ansible library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_lc_facts
|
||||
short_description: Gather facts about AWS Autoscaling Launch Configurations
|
||||
description:
|
||||
- Gather facts about AWS Autoscaling Launch Configurations
|
||||
version_added: "2.3"
|
||||
author: "Loïc Latreille (@psykotox)"
|
||||
requirements: [ boto3 ]
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- A name or a list of name to match.
|
||||
required: false
|
||||
default: []
|
||||
sort:
|
||||
description:
|
||||
- Optional attribute which with to sort the results.
|
||||
choices: ['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']
|
||||
default: null
|
||||
required: false
|
||||
sort_order:
|
||||
description:
|
||||
- Order in which to sort results.
|
||||
- Only used when the 'sort' parameter is specified.
|
||||
choices: ['ascending', 'descending']
|
||||
default: 'ascending'
|
||||
required: false
|
||||
sort_start:
|
||||
description:
|
||||
- Which result to start with (when sorting).
|
||||
- Corresponds to Python slice notation.
|
||||
default: null
|
||||
required: false
|
||||
sort_end:
|
||||
description:
|
||||
- Which result to end with (when sorting).
|
||||
- Corresponds to Python slice notation.
|
||||
default: null
|
||||
required: false
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
# Gather facts about all launch configurations
|
||||
- ec2_lc_facts:
|
||||
|
||||
# Gather facts about launch configuration with name "example"
|
||||
- ec2_lc_facts:
|
||||
name: example
|
||||
|
||||
# Gather facts sorted by created_time from most recent to least recent
|
||||
- ec2_lc_facts:
|
||||
sort: created_time
|
||||
sort_order: descending
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
block_device_mapping:
|
||||
description: Block device mapping for the instances of launch configuration
|
||||
type: list of block devices
|
||||
sample: "[{
|
||||
'device_name': '/dev/xvda':,
|
||||
'ebs': {
|
||||
'delete_on_termination': true,
|
||||
'volume_size': 8,
|
||||
'volume_type': 'gp2'
|
||||
}]"
|
||||
classic_link_vpc_security_groups:
|
||||
description: IDs of one or more security groups for the VPC specified in classic_link_vpc_id
|
||||
type: string
|
||||
sample:
|
||||
created_time:
|
||||
description: The creation date and time for the launch configuration
|
||||
type: string
|
||||
sample: "2016-05-27T13:47:44.216000+00:00"
|
||||
ebs_optimized:
|
||||
description: EBS I/O optimized (true ) or not (false )
|
||||
type: bool
|
||||
sample: true,
|
||||
image_id:
|
||||
description: ID of the Amazon Machine Image (AMI)
|
||||
type: string
|
||||
sample: "ami-12345678"
|
||||
instance_monitoring:
|
||||
description: Launched with detailed monitoring or not
|
||||
type: dict
|
||||
sample: "{
|
||||
'enabled': true
|
||||
}"
|
||||
instance_type:
|
||||
description: Instance type
|
||||
type: string
|
||||
sample: "t2.micro"
|
||||
kernel_id:
|
||||
description: ID of the kernel associated with the AMI
|
||||
type: string
|
||||
sample:
|
||||
key_name:
|
||||
description: Name of the key pair
|
||||
type: string
|
||||
sample: "user_app"
|
||||
launch_configuration_arn:
|
||||
description: Amazon Resource Name (ARN) of the launch configuration
|
||||
type: string
|
||||
sample: "arn:aws:autoscaling:us-east-1:666612345678:launchConfiguration:ba785e3a-dd42-6f02-4585-ea1a2b458b3d:launchConfigurationName/lc-app"
|
||||
launch_configuration_name:
|
||||
description: Name of the launch configuration
|
||||
type: string
|
||||
sample: "lc-app"
|
||||
ramdisk_id:
|
||||
description: ID of the RAM disk associated with the AMI
|
||||
type: string
|
||||
sample:
|
||||
security_groups:
|
||||
description: Security groups to associated
|
||||
type: list
|
||||
sample: "[
|
||||
'web'
|
||||
]"
|
||||
user_data:
|
||||
description: User data available
|
||||
type: string
|
||||
sample:
|
||||
'''
|
||||
|
||||
try:
|
||||
import boto3
|
||||
from botocore.exceptions import ClientError, NoCredentialsError
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
|
||||
def list_launch_configs(connection, module):
|
||||
|
||||
launch_config_name = module.params.get("name")
|
||||
sort = module.params.get('sort')
|
||||
sort_order = module.params.get('sort_order')
|
||||
sort_start = module.params.get('sort_start')
|
||||
sort_end = module.params.get('sort_end')
|
||||
|
||||
try:
|
||||
launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=launch_config_name)
|
||||
except ClientError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
snaked_launch_configs = []
|
||||
for launch_config in launch_configs['LaunchConfigurations']:
|
||||
snaked_launch_configs.append(camel_dict_to_snake_dict(launch_config))
|
||||
|
||||
for launch_config in snaked_launch_configs:
|
||||
if 'CreatedTime' in launch_config:
|
||||
launch_config['CreatedTime'] = str(launch_config['CreatedTime'])
|
||||
|
||||
if sort:
|
||||
snaked_launch_configs.sort(key=lambda e: e[sort], reverse=(sort_order=='descending'))
|
||||
|
||||
try:
|
||||
if sort and sort_start and sort_end:
|
||||
snaked_launch_configs = snaked_launch_configs[int(sort_start):int(sort_end)]
|
||||
elif sort and sort_start:
|
||||
snaked_launch_configs = snaked_launch_configs[int(sort_start):]
|
||||
elif sort and sort_end:
|
||||
snaked_launch_configs = snaked_launch_configs[:int(sort_end)]
|
||||
except TypeError:
|
||||
module.fail_json(msg="Please supply numeric values for sort_start and/or sort_end")
|
||||
|
||||
module.exit_json(launch_configurations=snaked_launch_configs)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
name = dict(required=False, default=[], type='list'),
|
||||
sort = dict(required=False, default=None,
|
||||
choices=['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']),
|
||||
sort_order = dict(required=False, default='ascending',
|
||||
choices=['ascending', 'descending']),
|
||||
sort_start = dict(required=False),
|
||||
sort_end = dict(required=False),
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='boto3 required for this module')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
|
||||
|
||||
if region:
|
||||
connection = boto3_conn(module, conn_type='client', resource='autoscaling', region=region, endpoint=ec2_url, **aws_connect_params)
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
list_launch_configs(connection, module)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
229
lib/ansible/modules/cloud/amazon/ec2_lc_find.py
Normal file
229
lib/ansible/modules/cloud/amazon/ec2_lc_find.py
Normal file
@@ -0,0 +1,229 @@
|
||||
#!/usr/bin/python
|
||||
# encoding: utf-8
|
||||
|
||||
# (c) 2015, Jose Armesto <jose@armesto.net>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# This module is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This software is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this software. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: ec2_lc_find
|
||||
short_description: Find AWS Autoscaling Launch Configurations
|
||||
description:
|
||||
- Returns list of matching Launch Configurations for a given name, along with other useful information
|
||||
- Results can be sorted and sliced
|
||||
- It depends on boto
|
||||
- Based on the work by Tom Bamford (https://github.com/tombamford)
|
||||
|
||||
version_added: "2.2"
|
||||
author: "Jose Armesto (@fiunchinho)"
|
||||
options:
|
||||
region:
|
||||
description:
|
||||
- The AWS region to use.
|
||||
required: true
|
||||
aliases: ['aws_region', 'ec2_region']
|
||||
name_regex:
|
||||
description:
|
||||
- A Launch Configuration to match
|
||||
- It'll be compiled as regex
|
||||
required: True
|
||||
sort_order:
|
||||
description:
|
||||
- Order in which to sort results.
|
||||
choices: ['ascending', 'descending']
|
||||
default: 'ascending'
|
||||
required: false
|
||||
limit:
|
||||
description:
|
||||
- How many results to show.
|
||||
- Corresponds to Python slice notation like list[:limit].
|
||||
default: null
|
||||
required: false
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- boto3
|
||||
"""
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
# Search for the Launch Configurations that start with "app"
|
||||
- ec2_lc_find:
|
||||
name_regex: app.*
|
||||
sort_order: descending
|
||||
limit: 2
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
image_id:
|
||||
description: AMI id
|
||||
returned: when Launch Configuration was found
|
||||
type: string
|
||||
sample: "ami-0d75df7e"
|
||||
user_data:
|
||||
description: User data used to start instance
|
||||
returned: when Launch Configuration was found
|
||||
type: string
|
||||
user_data: "ZXhwb3J0IENMT1VE"
|
||||
name:
|
||||
description: Name of the AMI
|
||||
returned: when Launch Configuration was found
|
||||
type: string
|
||||
sample: "myapp-v123"
|
||||
arn:
|
||||
description: Name of the AMI
|
||||
returned: when Launch Configuration was found
|
||||
type: string
|
||||
sample: "arn:aws:autoscaling:eu-west-1:12345:launchConfiguration:d82f050e-e315:launchConfigurationName/yourproject"
|
||||
instance_type:
|
||||
description: Type of ec2 instance
|
||||
returned: when Launch Configuration was found
|
||||
type: string
|
||||
sample: "t2.small"
|
||||
created_time:
|
||||
description: When it was created
|
||||
returned: when Launch Configuration was found
|
||||
type: string
|
||||
sample: "2016-06-29T14:59:22.222000+00:00"
|
||||
ebs_optimized:
|
||||
description: Launch Configuration EBS optimized property
|
||||
returned: when Launch Configuration was found
|
||||
type: boolean
|
||||
sample: False
|
||||
instance_monitoring:
|
||||
description: Launch Configuration instance monitoring property
|
||||
returned: when Launch Configuration was found
|
||||
type: string
|
||||
sample: {"Enabled": false}
|
||||
classic_link_vpc_security_groups:
|
||||
description: Launch Configuration classic link vpc security groups property
|
||||
returned: when Launch Configuration was found
|
||||
type: list
|
||||
sample: []
|
||||
block_device_mappings:
|
||||
description: Launch Configuration block device mappings property
|
||||
returned: when Launch Configuration was found
|
||||
type: list
|
||||
sample: []
|
||||
keyname:
|
||||
description: Launch Configuration ssh key
|
||||
returned: when Launch Configuration was found
|
||||
type: string
|
||||
sample: mykey
|
||||
security_groups:
|
||||
description: Launch Configuration security groups
|
||||
returned: when Launch Configuration was found
|
||||
type: list
|
||||
sample: []
|
||||
kernel_id:
|
||||
description: Launch Configuration kernel to use
|
||||
returned: when Launch Configuration was found
|
||||
type: string
|
||||
sample: ''
|
||||
ram_disk_id:
|
||||
description: Launch Configuration ram disk property
|
||||
returned: when Launch Configuration was found
|
||||
type: string
|
||||
sample: ''
|
||||
associate_public_address:
|
||||
description: Assign public address or not
|
||||
returned: when Launch Configuration was found
|
||||
type: boolean
|
||||
sample: True
|
||||
...
|
||||
'''
|
||||
|
||||
|
||||
def find_launch_configs(client, module):
|
||||
name_regex = module.params.get('name_regex')
|
||||
sort_order = module.params.get('sort_order')
|
||||
limit = module.params.get('limit')
|
||||
|
||||
paginator = client.get_paginator('describe_launch_configurations')
|
||||
|
||||
response_iterator = paginator.paginate(
|
||||
PaginationConfig={
|
||||
'MaxItems': 1000,
|
||||
'PageSize': 100
|
||||
}
|
||||
)
|
||||
|
||||
results = []
|
||||
|
||||
for response in response_iterator:
|
||||
response['LaunchConfigurations'] = filter(lambda lc: re.compile(name_regex).match(lc['LaunchConfigurationName']),
|
||||
response['LaunchConfigurations'])
|
||||
|
||||
for lc in response['LaunchConfigurations']:
|
||||
data = {
|
||||
'name': lc['LaunchConfigurationName'],
|
||||
'arn': lc['LaunchConfigurationARN'],
|
||||
'created_time': lc['CreatedTime'],
|
||||
'user_data': lc['UserData'],
|
||||
'instance_type': lc['InstanceType'],
|
||||
'image_id': lc['ImageId'],
|
||||
'ebs_optimized': lc['EbsOptimized'],
|
||||
'instance_monitoring': lc['InstanceMonitoring'],
|
||||
'classic_link_vpc_security_groups': lc['ClassicLinkVPCSecurityGroups'],
|
||||
'block_device_mappings': lc['BlockDeviceMappings'],
|
||||
'keyname': lc['KeyName'],
|
||||
'security_groups': lc['SecurityGroups'],
|
||||
'kernel_id': lc['KernelId'],
|
||||
'ram_disk_id': lc['RamdiskId'],
|
||||
'associate_public_address': lc.get('AssociatePublicIpAddress', False),
|
||||
}
|
||||
|
||||
results.append(data)
|
||||
|
||||
results.sort(key=lambda e: e['name'], reverse=(sort_order == 'descending'))
|
||||
|
||||
if limit:
|
||||
results = results[:int(limit)]
|
||||
|
||||
module.exit_json(changed=False, results=results)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
region=dict(required=True, aliases=['aws_region', 'ec2_region']),
|
||||
name_regex=dict(required=True),
|
||||
sort_order=dict(required=False, default='ascending', choices=['ascending', 'descending']),
|
||||
limit=dict(required=False, type='int'),
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
)
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module, True)
|
||||
|
||||
client = boto3_conn(module=module, conn_type='client', resource='autoscaling', region=region, **aws_connect_params)
|
||||
find_launch_configs(client, module)
|
||||
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
192
lib/ansible/modules/cloud/amazon/ec2_remote_facts.py
Normal file
192
lib/ansible/modules/cloud/amazon/ec2_remote_facts.py
Normal file
@@ -0,0 +1,192 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# This is a free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This Ansible library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['stableinterface'],
|
||||
'supported_by': 'committer',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_remote_facts
|
||||
short_description: Gather facts about ec2 instances in AWS
|
||||
description:
|
||||
- Gather facts about ec2 instances in AWS
|
||||
version_added: "2.0"
|
||||
options:
|
||||
filters:
|
||||
description:
|
||||
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html) for possible filters.
|
||||
required: false
|
||||
default: null
|
||||
author:
|
||||
- "Michael Schuett (@michaeljs1990)"
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
# Gather facts about all ec2 instances
|
||||
- ec2_remote_facts:
|
||||
|
||||
# Gather facts about all running ec2 instances with a tag of Name:Example
|
||||
- ec2_remote_facts:
|
||||
filters:
|
||||
instance-state-name: running
|
||||
"tag:Name": Example
|
||||
|
||||
# Gather facts about instance i-123456
|
||||
- ec2_remote_facts:
|
||||
filters:
|
||||
instance-id: i-123456
|
||||
|
||||
# Gather facts about all instances in vpc-123456 that are t2.small type
|
||||
- ec2_remote_facts:
|
||||
filters:
|
||||
vpc-id: vpc-123456
|
||||
instance-type: t2.small
|
||||
|
||||
'''
|
||||
|
||||
try:
|
||||
import boto.ec2
|
||||
from boto.exception import BotoServerError
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
|
||||
|
||||
|
||||
def get_instance_info(instance):
|
||||
|
||||
# Get groups
|
||||
groups = []
|
||||
for group in instance.groups:
|
||||
groups.append({ 'id': group.id, 'name': group.name }.copy())
|
||||
|
||||
# Get interfaces
|
||||
interfaces = []
|
||||
for interface in instance.interfaces:
|
||||
interfaces.append({ 'id': interface.id, 'mac_address': interface.mac_address }.copy())
|
||||
|
||||
# If an instance is terminated, sourceDestCheck is no longer returned
|
||||
try:
|
||||
source_dest_check = instance.sourceDestCheck
|
||||
except AttributeError:
|
||||
source_dest_check = None
|
||||
|
||||
# Get block device mapping
|
||||
try:
|
||||
bdm_dict = []
|
||||
bdm = getattr(instance, 'block_device_mapping')
|
||||
for device_name in bdm.keys():
|
||||
bdm_dict.append({
|
||||
'device_name': device_name,
|
||||
'status': bdm[device_name].status,
|
||||
'volume_id': bdm[device_name].volume_id,
|
||||
'delete_on_termination': bdm[device_name].delete_on_termination,
|
||||
'attach_time': bdm[device_name].attach_time
|
||||
})
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
instance_info = { 'id': instance.id,
|
||||
'kernel': instance.kernel,
|
||||
'instance_profile': instance.instance_profile,
|
||||
'root_device_type': instance.root_device_type,
|
||||
'private_dns_name': instance.private_dns_name,
|
||||
'public_dns_name': instance.public_dns_name,
|
||||
'ebs_optimized': instance.ebs_optimized,
|
||||
'client_token': instance.client_token,
|
||||
'virtualization_type': instance.virtualization_type,
|
||||
'architecture': instance.architecture,
|
||||
'ramdisk': instance.ramdisk,
|
||||
'tags': instance.tags,
|
||||
'key_name': instance.key_name,
|
||||
'source_destination_check': source_dest_check,
|
||||
'image_id': instance.image_id,
|
||||
'groups': groups,
|
||||
'interfaces': interfaces,
|
||||
'spot_instance_request_id': instance.spot_instance_request_id,
|
||||
'requester_id': instance.requester_id,
|
||||
'monitoring_state': instance.monitoring_state,
|
||||
'placement': {
|
||||
'tenancy': instance._placement.tenancy,
|
||||
'zone': instance._placement.zone
|
||||
},
|
||||
'ami_launch_index': instance.ami_launch_index,
|
||||
'launch_time': instance.launch_time,
|
||||
'hypervisor': instance.hypervisor,
|
||||
'region': instance.region.name,
|
||||
'persistent': instance.persistent,
|
||||
'private_ip_address': instance.private_ip_address,
|
||||
'public_ip_address': instance.ip_address,
|
||||
'state': instance._state.name,
|
||||
'vpc_id': instance.vpc_id,
|
||||
'block_device_mapping': bdm_dict,
|
||||
}
|
||||
|
||||
return instance_info
|
||||
|
||||
|
||||
def list_ec2_instances(connection, module):
|
||||
|
||||
filters = module.params.get("filters")
|
||||
instance_dict_array = []
|
||||
|
||||
try:
|
||||
all_instances = connection.get_only_instances(filters=filters)
|
||||
except BotoServerError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
for instance in all_instances:
|
||||
instance_dict_array.append(get_instance_info(instance))
|
||||
|
||||
module.exit_json(instances=instance_dict_array)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
filters = dict(default=None, type='dict')
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
|
||||
if region:
|
||||
try:
|
||||
connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
list_ec2_instances(connection, module)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
233
lib/ansible/modules/cloud/amazon/ec2_snapshot_facts.py
Normal file
233
lib/ansible/modules/cloud/amazon/ec2_snapshot_facts.py
Normal file
@@ -0,0 +1,233 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_snapshot_facts
|
||||
short_description: Gather facts about ec2 volume snapshots in AWS
|
||||
description:
|
||||
- Gather facts about ec2 volume snapshots in AWS
|
||||
version_added: "2.1"
|
||||
author: "Rob White (@wimnat)"
|
||||
options:
|
||||
snapshot_ids:
|
||||
description:
|
||||
- If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned.
|
||||
required: false
|
||||
default: []
|
||||
owner_ids:
|
||||
description:
|
||||
- If you specify one or more snapshot owners, only snapshots from the specified owners and for which you have \
|
||||
access are returned.
|
||||
required: false
|
||||
default: []
|
||||
restorable_by_user_ids:
|
||||
description:
|
||||
- If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are \
|
||||
returned.
|
||||
required: false
|
||||
default: []
|
||||
filters:
|
||||
description:
|
||||
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See \
|
||||
U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSnapshots.html) for possible filters. Filter \
|
||||
names and values are case sensitive.
|
||||
required: false
|
||||
default: {}
|
||||
notes:
|
||||
- By default, the module will return all snapshots, including public ones. To limit results to snapshots owned by \
|
||||
the account use the filter 'owner-id'.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
# Gather facts about all snapshots, including public ones
|
||||
- ec2_snapshot_facts:
|
||||
|
||||
# Gather facts about all snapshots owned by the account 0123456789
|
||||
- ec2_snapshot_facts:
|
||||
filters:
|
||||
owner-id: 0123456789
|
||||
|
||||
# Or alternatively...
|
||||
- ec2_snapshot_facts:
|
||||
owner_ids:
|
||||
- 0123456789
|
||||
|
||||
# Gather facts about a particular snapshot using ID
|
||||
- ec2_snapshot_facts:
|
||||
filters:
|
||||
snapshot-id: snap-00112233
|
||||
|
||||
# Or alternatively...
|
||||
- ec2_snapshot_facts:
|
||||
snapshot_ids:
|
||||
- snap-00112233
|
||||
|
||||
# Gather facts about any snapshot with a tag key Name and value Example
|
||||
- ec2_snapshot_facts:
|
||||
filters:
|
||||
"tag:Name": Example
|
||||
|
||||
# Gather facts about any snapshot with an error status
|
||||
- ec2_snapshot_facts:
|
||||
filters:
|
||||
status: error
|
||||
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
snapshot_id:
|
||||
description: The ID of the snapshot. Each snapshot receives a unique identifier when it is created.
|
||||
type: string
|
||||
sample: snap-01234567
|
||||
volume_id:
|
||||
description: The ID of the volume that was used to create the snapshot.
|
||||
type: string
|
||||
sample: vol-01234567
|
||||
state:
|
||||
description: The snapshot state (completed, pending or error).
|
||||
type: string
|
||||
sample: completed
|
||||
state_message:
|
||||
description: Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper AWS Key Management Service (AWS KMS) permissions are not obtained) this field displays error state details to help you diagnose why the error occurred.
|
||||
type: string
|
||||
sample:
|
||||
start_time:
|
||||
description: The time stamp when the snapshot was initiated.
|
||||
type: datetime
|
||||
sample: 2015-02-12T02:14:02+00:00
|
||||
progress:
|
||||
description: The progress of the snapshot, as a percentage.
|
||||
type: string
|
||||
sample: 100%
|
||||
owner_id:
|
||||
description: The AWS account ID of the EBS snapshot owner.
|
||||
type: string
|
||||
sample: 099720109477
|
||||
description:
|
||||
description: The description for the snapshot.
|
||||
type: string
|
||||
sample: My important backup
|
||||
volume_size:
|
||||
description: The size of the volume, in GiB.
|
||||
type: integer
|
||||
sample: 8
|
||||
owner_alias:
|
||||
description: The AWS account alias (for example, amazon, self) or AWS account ID that owns the snapshot.
|
||||
type: string
|
||||
sample: 033440102211
|
||||
tags:
|
||||
description: Any tags assigned to the snapshot.
|
||||
type: list
|
||||
sample: "{ 'my_tag_key': 'my_tag_value' }"
|
||||
encrypted:
|
||||
description: Indicates whether the snapshot is encrypted.
|
||||
type: boolean
|
||||
sample: True
|
||||
kms_key_id:
|
||||
description: The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to \
|
||||
protect the volume encryption key for the parent volume.
|
||||
type: string
|
||||
sample: 74c9742a-a1b2-45cb-b3fe-abcdef123456
|
||||
data_encryption_key_id:
|
||||
description: The data encryption key identifier for the snapshot. This value is a unique identifier that \
|
||||
corresponds to the data encryption key that was used to encrypt the original volume or snapshot copy.
|
||||
type: string
|
||||
sample: "arn:aws:kms:ap-southeast-2:012345678900:key/74c9742a-a1b2-45cb-b3fe-abcdef123456"
|
||||
|
||||
'''
|
||||
|
||||
try:
|
||||
import boto3
|
||||
from botocore.exceptions import ClientError, NoCredentialsError
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
|
||||
boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
|
||||
ec2_argument_spec, get_aws_connection_info)
|
||||
|
||||
|
||||
def list_ec2_snapshots(connection, module):
|
||||
|
||||
snapshot_ids = module.params.get("snapshot_ids")
|
||||
owner_ids = map(str, module.params.get("owner_ids"))
|
||||
restorable_by_user_ids = module.params.get("restorable_by_user_ids")
|
||||
filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
|
||||
|
||||
try:
|
||||
snapshots = connection.describe_snapshots(SnapshotIds=snapshot_ids, OwnerIds=owner_ids, RestorableByUserIds=restorable_by_user_ids, Filters=filters)
|
||||
except ClientError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
# Turn the boto3 result in to ansible_friendly_snaked_names
|
||||
snaked_snapshots = []
|
||||
for snapshot in snapshots['Snapshots']:
|
||||
snaked_snapshots.append(camel_dict_to_snake_dict(snapshot))
|
||||
|
||||
# Turn the boto3 result in to ansible friendly tag dictionary
|
||||
for snapshot in snaked_snapshots:
|
||||
if 'tags' in snapshot:
|
||||
snapshot['tags'] = boto3_tag_list_to_ansible_dict(snapshot['tags'])
|
||||
|
||||
module.exit_json(snapshots=snaked_snapshots)
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
snapshot_ids=dict(default=[], type='list'),
|
||||
owner_ids=dict(default=[], type='list'),
|
||||
restorable_by_user_ids=dict(default=[], type='list'),
|
||||
filters=dict(default={}, type='dict')
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
mutually_exclusive=[
|
||||
['snapshot_ids', 'owner_ids', 'restorable_by_user_ids', 'filters']
|
||||
]
|
||||
)
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='boto3 required for this module')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
|
||||
|
||||
if region:
|
||||
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
list_ec2_snapshots(connection, module)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
145
lib/ansible/modules/cloud/amazon/ec2_vol_facts.py
Normal file
145
lib/ansible/modules/cloud/amazon/ec2_vol_facts.py
Normal file
@@ -0,0 +1,145 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# This is a free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This Ansible library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_vol_facts
|
||||
short_description: Gather facts about ec2 volumes in AWS
|
||||
description:
|
||||
- Gather facts about ec2 volumes in AWS
|
||||
version_added: "2.1"
|
||||
author: "Rob White (@wimnat)"
|
||||
options:
|
||||
filters:
|
||||
description:
|
||||
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVolumes.html) for possible filters.
|
||||
required: false
|
||||
default: null
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
# Gather facts about all volumes
|
||||
- ec2_vol_facts:
|
||||
|
||||
# Gather facts about a particular volume using volume ID
|
||||
- ec2_vol_facts:
|
||||
filters:
|
||||
volume-id: vol-00112233
|
||||
|
||||
# Gather facts about any volume with a tag key Name and value Example
|
||||
- ec2_vol_facts:
|
||||
filters:
|
||||
"tag:Name": Example
|
||||
|
||||
# Gather facts about any volume that is attached
|
||||
- ec2_vol_facts:
|
||||
filters:
|
||||
attachment.status: attached
|
||||
|
||||
'''
|
||||
|
||||
# TODO: Disabled the RETURN as it was breaking docs building. Someone needs to
|
||||
# fix this
|
||||
RETURN = '''# '''
|
||||
|
||||
try:
|
||||
import boto.ec2
|
||||
from boto.exception import BotoServerError
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import connect_to_aws, ec2_argument_spec, get_aws_connection_info
|
||||
|
||||
|
||||
def get_volume_info(volume):
|
||||
|
||||
attachment = volume.attach_data
|
||||
|
||||
volume_info = {
|
||||
'create_time': volume.create_time,
|
||||
'id': volume.id,
|
||||
'iops': volume.iops,
|
||||
'size': volume.size,
|
||||
'snapshot_id': volume.snapshot_id,
|
||||
'status': volume.status,
|
||||
'type': volume.type,
|
||||
'zone': volume.zone,
|
||||
'region': volume.region.name,
|
||||
'attachment_set': {
|
||||
'attach_time': attachment.attach_time,
|
||||
'device': attachment.device,
|
||||
'instance_id': attachment.instance_id,
|
||||
'status': attachment.status
|
||||
},
|
||||
'tags': volume.tags
|
||||
}
|
||||
|
||||
return volume_info
|
||||
|
||||
def list_ec2_volumes(connection, module):
|
||||
|
||||
filters = module.params.get("filters")
|
||||
volume_dict_array = []
|
||||
|
||||
try:
|
||||
all_volumes = connection.get_all_volumes(filters=filters)
|
||||
except BotoServerError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
for volume in all_volumes:
|
||||
volume_dict_array.append(get_volume_info(volume))
|
||||
|
||||
module.exit_json(volumes=volume_dict_array)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
filters = dict(default=None, type='dict')
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
|
||||
if region:
|
||||
try:
|
||||
connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, StandardError) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
list_ec2_volumes(connection, module)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
389
lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_options.py
Normal file
389
lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_options.py
Normal file
@@ -0,0 +1,389 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['stableinterface'],
|
||||
'supported_by': 'committer',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: ec2_vpc_dhcp_options
|
||||
short_description: Manages DHCP Options, and can ensure the DHCP options for the given VPC match what's
|
||||
requested
|
||||
description:
|
||||
- This module removes, or creates DHCP option sets, and can associate them to a VPC.
|
||||
Optionally, a new DHCP Options set can be created that converges a VPC's existing
|
||||
DHCP option set with values provided.
|
||||
When dhcp_options_id is provided, the module will
|
||||
1. remove (with state='absent')
|
||||
2. ensure tags are applied (if state='present' and tags are provided
|
||||
3. attach it to a VPC (if state='present' and a vpc_id is provided.
|
||||
If any of the optional values are missing, they will either be treated
|
||||
as a no-op (i.e., inherit what already exists for the VPC)
|
||||
To remove existing options while inheriting, supply an empty value
|
||||
(e.g. set ntp_servers to [] if you want to remove them from the VPC's options)
|
||||
Most of the options should be self-explanatory.
|
||||
author: "Joel Thompson (@joelthompson)"
|
||||
version_added: 2.1
|
||||
options:
|
||||
domain_name:
|
||||
description:
|
||||
- The domain name to set in the DHCP option sets
|
||||
required: false
|
||||
default: None
|
||||
dns_servers:
|
||||
description:
|
||||
- A list of hosts to set the DNS servers for the VPC to. (Should be a
|
||||
list of IP addresses rather than host names.)
|
||||
required: false
|
||||
default: None
|
||||
ntp_servers:
|
||||
description:
|
||||
- List of hosts to advertise as NTP servers for the VPC.
|
||||
required: false
|
||||
default: None
|
||||
netbios_name_servers:
|
||||
description:
|
||||
- List of hosts to advertise as NetBIOS servers.
|
||||
required: false
|
||||
default: None
|
||||
netbios_node_type:
|
||||
description:
|
||||
- NetBIOS node type to advertise in the DHCP options.
|
||||
The AWS recommendation is to use 2 (when using netbios name services)
|
||||
http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html
|
||||
required: false
|
||||
default: None
|
||||
vpc_id:
|
||||
description:
|
||||
- VPC ID to associate with the requested DHCP option set.
|
||||
If no vpc id is provided, and no matching option set is found then a new
|
||||
DHCP option set is created.
|
||||
required: false
|
||||
default: None
|
||||
delete_old:
|
||||
description:
|
||||
- Whether to delete the old VPC DHCP option set when associating a new one.
|
||||
This is primarily useful for debugging/development purposes when you
|
||||
want to quickly roll back to the old option set. Note that this setting
|
||||
will be ignored, and the old DHCP option set will be preserved, if it
|
||||
is in use by any other VPC. (Otherwise, AWS will return an error.)
|
||||
required: false
|
||||
default: true
|
||||
inherit_existing:
|
||||
description:
|
||||
- For any DHCP options not specified in these parameters, whether to
|
||||
inherit them from the options set already applied to vpc_id, or to
|
||||
reset them to be empty.
|
||||
required: false
|
||||
default: false
|
||||
tags:
|
||||
description:
|
||||
- Tags to be applied to a VPC options set if a new one is created, or
|
||||
if the resource_id is provided. (options must match)
|
||||
required: False
|
||||
default: None
|
||||
aliases: [ 'resource_tags']
|
||||
version_added: "2.1"
|
||||
dhcp_options_id:
|
||||
description:
|
||||
- The resource_id of an existing DHCP options set.
|
||||
If this is specified, then it will override other settings, except tags
|
||||
(which will be updated to match)
|
||||
required: False
|
||||
default: None
|
||||
version_added: "2.1"
|
||||
state:
|
||||
description:
|
||||
- create/assign or remove the DHCP options.
|
||||
If state is set to absent, then a DHCP options set matched either
|
||||
by id, or tags and options will be removed if possible.
|
||||
required: False
|
||||
default: present
|
||||
choices: [ 'absent', 'present' ]
|
||||
version_added: "2.1"
|
||||
extends_documentation_fragment: aws
|
||||
requirements:
|
||||
- boto
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
new_options:
|
||||
description: The DHCP options created, associated or found
|
||||
returned: when appropriate
|
||||
type: dict
|
||||
sample:
|
||||
domain-name-servers:
|
||||
- 10.0.0.1
|
||||
- 10.0.1.1
|
||||
netbois-name-servers:
|
||||
- 10.0.0.1
|
||||
- 10.0.1.1
|
||||
netbios-node-type: 2
|
||||
domain-name: "my.example.com"
|
||||
dhcp_options_id:
|
||||
description: The aws resource id of the primary DCHP options set created, found or removed
|
||||
type: string
|
||||
returned: when available
|
||||
changed:
|
||||
description: Whether the dhcp options were changed
|
||||
type: bool
|
||||
returned: always
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
# Completely overrides the VPC DHCP options associated with VPC vpc-123456 and deletes any existing
|
||||
# DHCP option set that may have been attached to that VPC.
|
||||
- ec2_vpc_dhcp_options:
|
||||
domain_name: "foo.example.com"
|
||||
region: us-east-1
|
||||
dns_servers:
|
||||
- 10.0.0.1
|
||||
- 10.0.1.1
|
||||
ntp_servers:
|
||||
- 10.0.0.2
|
||||
- 10.0.1.2
|
||||
netbios_name_servers:
|
||||
- 10.0.0.1
|
||||
- 10.0.1.1
|
||||
netbios_node_type: 2
|
||||
vpc_id: vpc-123456
|
||||
delete_old: True
|
||||
inherit_existing: False
|
||||
|
||||
|
||||
# Ensure the DHCP option set for the VPC has 10.0.0.4 and 10.0.1.4 as the specified DNS servers, but
|
||||
# keep any other existing settings. Also, keep the old DHCP option set around.
|
||||
- ec2_vpc_dhcp_options:
|
||||
region: us-east-1
|
||||
dns_servers:
|
||||
- "{{groups['dns-primary']}}"
|
||||
- "{{groups['dns-secondary']}}"
|
||||
vpc_id: vpc-123456
|
||||
inherit_existing: True
|
||||
delete_old: False
|
||||
|
||||
|
||||
## Create a DHCP option set with 4.4.4.4 and 8.8.8.8 as the specified DNS servers, with tags
|
||||
## but do not assign to a VPC
|
||||
- ec2_vpc_dhcp_options:
|
||||
region: us-east-1
|
||||
dns_servers:
|
||||
- 4.4.4.4
|
||||
- 8.8.8.8
|
||||
tags:
|
||||
Name: google servers
|
||||
Environment: Test
|
||||
|
||||
## Delete a DHCP options set that matches the tags and options specified
|
||||
- ec2_vpc_dhcp_options:
|
||||
region: us-east-1
|
||||
dns_servers:
|
||||
- 4.4.4.4
|
||||
- 8.8.8.8
|
||||
tags:
|
||||
Name: google servers
|
||||
Environment: Test
|
||||
state: absent
|
||||
|
||||
## Associate a DHCP options set with a VPC by ID
|
||||
- ec2_vpc_dhcp_options:
|
||||
region: us-east-1
|
||||
dhcp_options_id: dopt-12345678
|
||||
vpc_id: vpc-123456
|
||||
|
||||
"""
|
||||
|
||||
import boto.vpc
|
||||
import boto.ec2
|
||||
from boto.exception import EC2ResponseError
|
||||
import socket
|
||||
import collections
|
||||
|
||||
def get_resource_tags(vpc_conn, resource_id):
|
||||
return dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': resource_id}))
|
||||
|
||||
def ensure_tags(vpc_conn, resource_id, tags, add_only, check_mode):
|
||||
try:
|
||||
cur_tags = get_resource_tags(vpc_conn, resource_id)
|
||||
if tags == cur_tags:
|
||||
return {'changed': False, 'tags': cur_tags}
|
||||
|
||||
to_delete = dict((k, cur_tags[k]) for k in cur_tags if k not in tags)
|
||||
if to_delete and not add_only:
|
||||
vpc_conn.delete_tags(resource_id, to_delete, dry_run=check_mode)
|
||||
|
||||
to_add = dict((k, tags[k]) for k in tags if k not in cur_tags)
|
||||
if to_add:
|
||||
vpc_conn.create_tags(resource_id, to_add, dry_run=check_mode)
|
||||
|
||||
latest_tags = get_resource_tags(vpc_conn, resource_id)
|
||||
return {'changed': True, 'tags': latest_tags}
|
||||
except EC2ResponseError as e:
|
||||
module.fail_json(msg=get_error_message(e.args[2]))
|
||||
|
||||
def fetch_dhcp_options_for_vpc(vpc_conn, vpc_id):
|
||||
"""
|
||||
Returns the DHCP options object currently associated with the requested VPC ID using the VPC
|
||||
connection variable.
|
||||
"""
|
||||
vpcs = vpc_conn.get_all_vpcs(vpc_ids=[vpc_id])
|
||||
if len(vpcs) != 1 or vpcs[0].dhcp_options_id == "default":
|
||||
return None
|
||||
dhcp_options = vpc_conn.get_all_dhcp_options(dhcp_options_ids=[vpcs[0].dhcp_options_id])
|
||||
if len(dhcp_options) != 1:
|
||||
return None
|
||||
return dhcp_options[0]
|
||||
|
||||
def match_dhcp_options(vpc_conn, tags=None, options=None):
|
||||
"""
|
||||
Finds a DHCP Options object that optionally matches the tags and options provided
|
||||
"""
|
||||
dhcp_options = vpc_conn.get_all_dhcp_options()
|
||||
for dopts in dhcp_options:
|
||||
if (not tags) or get_resource_tags(vpc_conn, dopts.id) == tags:
|
||||
if (not options) or dopts.options == options:
|
||||
return(True, dopts)
|
||||
return(False, None)
|
||||
|
||||
def remove_dhcp_options_by_id(vpc_conn, dhcp_options_id):
|
||||
associations = vpc_conn.get_all_vpcs(filters={'dhcpOptionsId': dhcp_options_id})
|
||||
if len(associations) > 0:
|
||||
return False
|
||||
else:
|
||||
vpc_conn.delete_dhcp_options(dhcp_options_id)
|
||||
return True
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
dhcp_options_id=dict(type='str', default=None),
|
||||
domain_name=dict(type='str', default=None),
|
||||
dns_servers=dict(type='list', default=None),
|
||||
ntp_servers=dict(type='list', default=None),
|
||||
netbios_name_servers=dict(type='list', default=None),
|
||||
netbios_node_type=dict(type='int', default=None),
|
||||
vpc_id=dict(type='str', default=None),
|
||||
delete_old=dict(type='bool', default=True),
|
||||
inherit_existing=dict(type='bool', default=False),
|
||||
tags=dict(type='dict', default=None, aliases=['resource_tags']),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent'])
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
|
||||
params = module.params
|
||||
found = False
|
||||
changed = False
|
||||
new_options = collections.defaultdict(lambda: None)
|
||||
|
||||
|
||||
region, ec2_url, boto_params = get_aws_connection_info(module)
|
||||
connection = connect_to_aws(boto.vpc, region, **boto_params)
|
||||
|
||||
existing_options = None
|
||||
|
||||
# First check if we were given a dhcp_options_id
|
||||
if not params['dhcp_options_id']:
|
||||
# No, so create new_options from the parameters
|
||||
if params['dns_servers'] != None:
|
||||
new_options['domain-name-servers'] = params['dns_servers']
|
||||
if params['netbios_name_servers'] != None:
|
||||
new_options['netbios-name-servers'] = params['netbios_name_servers']
|
||||
if params['ntp_servers'] != None:
|
||||
new_options['ntp-servers'] = params['ntp_servers']
|
||||
if params['domain_name'] != None:
|
||||
# needs to be a list for comparison with boto objects later
|
||||
new_options['domain-name'] = [ params['domain_name'] ]
|
||||
if params['netbios_node_type'] != None:
|
||||
# needs to be a list for comparison with boto objects later
|
||||
new_options['netbios-node-type'] = [ str(params['netbios_node_type']) ]
|
||||
# If we were given a vpc_id then we need to look at the options on that
|
||||
if params['vpc_id']:
|
||||
existing_options = fetch_dhcp_options_for_vpc(connection, params['vpc_id'])
|
||||
# if we've been asked to inherit existing options, do that now
|
||||
if params['inherit_existing']:
|
||||
if existing_options:
|
||||
for option in [ 'domain-name-servers', 'netbios-name-servers', 'ntp-servers', 'domain-name', 'netbios-node-type']:
|
||||
if existing_options.options.get(option) and new_options[option] != [] and (not new_options[option] or [''] == new_options[option]):
|
||||
new_options[option] = existing_options.options.get(option)
|
||||
|
||||
# Do the vpc's dhcp options already match what we're asked for? if so we are done
|
||||
if existing_options and new_options == existing_options.options:
|
||||
module.exit_json(changed=changed, new_options=new_options, dhcp_options_id=existing_options.id)
|
||||
|
||||
# If no vpc_id was given, or the options don't match then look for an existing set using tags
|
||||
found, dhcp_option = match_dhcp_options(connection, params['tags'], new_options)
|
||||
|
||||
# Now let's cover the case where there are existing options that we were told about by id
|
||||
# If a dhcp_options_id was supplied we don't look at options inside, just set tags (if given)
|
||||
else:
|
||||
supplied_options = connection.get_all_dhcp_options(filters={'dhcp-options-id':params['dhcp_options_id']})
|
||||
if len(supplied_options) != 1:
|
||||
if params['state'] != 'absent':
|
||||
module.fail_json(msg=" a dhcp_options_id was supplied, but does not exist")
|
||||
else:
|
||||
found = True
|
||||
dhcp_option = supplied_options[0]
|
||||
if params['state'] != 'absent' and params['tags']:
|
||||
ensure_tags(connection, dhcp_option.id, params['tags'], False, module.check_mode)
|
||||
|
||||
# Now we have the dhcp options set, let's do the necessary
|
||||
|
||||
# if we found options we were asked to remove then try to do so
|
||||
if params['state'] == 'absent':
|
||||
if not module.check_mode:
|
||||
if found:
|
||||
changed = remove_dhcp_options_by_id(connection, dhcp_option.id)
|
||||
module.exit_json(changed=changed, new_options={})
|
||||
|
||||
# otherwise if we haven't found the required options we have something to do
|
||||
elif not module.check_mode and not found:
|
||||
|
||||
# create some dhcp options if we weren't able to use existing ones
|
||||
if not found:
|
||||
# Convert netbios-node-type and domain-name back to strings
|
||||
if new_options['netbios-node-type']:
|
||||
new_options['netbios-node-type'] = new_options['netbios-node-type'][0]
|
||||
if new_options['domain-name']:
|
||||
new_options['domain-name'] = new_options['domain-name'][0]
|
||||
|
||||
# create the new dhcp options set requested
|
||||
dhcp_option = connection.create_dhcp_options(
|
||||
new_options['domain-name'],
|
||||
new_options['domain-name-servers'],
|
||||
new_options['ntp-servers'],
|
||||
new_options['netbios-name-servers'],
|
||||
new_options['netbios-node-type'])
|
||||
changed = True
|
||||
if params['tags']:
|
||||
ensure_tags(connection, dhcp_option.id, params['tags'], False, module.check_mode)
|
||||
|
||||
# If we were given a vpc_id, then attach the options we now have to that before we finish
|
||||
if params['vpc_id'] and not module.check_mode:
|
||||
changed = True
|
||||
connection.associate_dhcp_options(dhcp_option.id, params['vpc_id'])
|
||||
# and remove old ones if that was requested
|
||||
if params['delete_old'] and existing_options:
|
||||
remove_dhcp_options_by_id(connection, existing_options.id)
|
||||
|
||||
module.exit_json(changed=changed, new_options=new_options, dhcp_options_id=dhcp_option.id)
|
||||
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
171
lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_options_facts.py
Normal file
171
lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_options_facts.py
Normal file
@@ -0,0 +1,171 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# This is a free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This Ansible library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_vpc_dhcp_options_facts
|
||||
short_description: Gather facts about dhcp options sets in AWS
|
||||
description:
|
||||
- Gather facts about dhcp options sets in AWS
|
||||
version_added: "2.2"
|
||||
requirements: [ boto3 ]
|
||||
author: "Nick Aslanidis (@naslanidis)"
|
||||
options:
|
||||
filters:
|
||||
description:
|
||||
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters.
|
||||
required: false
|
||||
default: null
|
||||
DhcpOptionsIds:
|
||||
description:
|
||||
- Get details of specific DHCP Option ID
|
||||
- Provide this value as a list
|
||||
required: false
|
||||
default: None
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# # Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
- name: Gather facts about all DHCP Option sets for an account or profile
|
||||
ec2_vpc_dhcp_options_facts:
|
||||
region: ap-southeast-2
|
||||
profile: production
|
||||
register: dhcp_facts
|
||||
|
||||
- name: Gather facts about a filtered list of DHCP Option sets
|
||||
ec2_vpc_dhcp_options_facts:
|
||||
region: ap-southeast-2
|
||||
profile: production
|
||||
filters:
|
||||
"tag:Name": "abc-123"
|
||||
register: dhcp_facts
|
||||
|
||||
- name: Gather facts about a specific DHCP Option set by DhcpOptionId
|
||||
ec2_vpc_dhcp_options_facts:
|
||||
region: ap-southeast-2
|
||||
profile: production
|
||||
DhcpOptionsIds: dopt-123fece2
|
||||
register: dhcp_facts
|
||||
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
dhcp_options:
|
||||
description: The dhcp option sets for the account
|
||||
returned: always
|
||||
type: list
|
||||
|
||||
changed:
|
||||
description: True if listing the dhcp options succeeds
|
||||
type: bool
|
||||
returned: always
|
||||
'''
|
||||
|
||||
import json
|
||||
|
||||
try:
|
||||
import botocore
|
||||
import boto3
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
|
||||
def get_dhcp_options_info(dhcp_option):
|
||||
dhcp_option_info = {'DhcpOptionsId': dhcp_option['DhcpOptionsId'],
|
||||
'DhcpConfigurations': dhcp_option['DhcpConfigurations'],
|
||||
'Tags': dhcp_option['Tags']
|
||||
}
|
||||
return dhcp_option_info
|
||||
|
||||
|
||||
def list_dhcp_options(client, module):
|
||||
dryrun = module.params.get("DryRun")
|
||||
all_dhcp_options_array = []
|
||||
params = dict()
|
||||
|
||||
if module.params.get('filters'):
|
||||
params['Filters'] = []
|
||||
for key, value in module.params.get('filters').iteritems():
|
||||
temp_dict = dict()
|
||||
temp_dict['Name'] = key
|
||||
if isinstance(value, basestring):
|
||||
temp_dict['Values'] = [value]
|
||||
else:
|
||||
temp_dict['Values'] = value
|
||||
params['Filters'].append(temp_dict)
|
||||
|
||||
if module.params.get("DryRun"):
|
||||
params['DryRun'] = module.params.get("DryRun")
|
||||
|
||||
if module.params.get("DhcpOptionsIds"):
|
||||
params['DhcpOptionsIds'] = module.params.get("DhcpOptionsIds")
|
||||
|
||||
try:
|
||||
all_dhcp_options = client.describe_dhcp_options(**params)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
for dhcp_option in all_dhcp_options['DhcpOptions']:
|
||||
all_dhcp_options_array.append(get_dhcp_options_info(dhcp_option))
|
||||
|
||||
snaked_dhcp_options_array = []
|
||||
for dhcp_option in all_dhcp_options_array:
|
||||
snaked_dhcp_options_array.append(camel_dict_to_snake_dict(dhcp_option))
|
||||
|
||||
module.exit_json(dhcp_options=snaked_dhcp_options_array)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
filters = dict(type='dict', default=None, ),
|
||||
DryRun = dict(type='bool', default=False),
|
||||
DhcpOptionsIds = dict(type='list', default=None)
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
# Validate Requirements
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='json and botocore/boto3 is required.')
|
||||
|
||||
try:
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
|
||||
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||
except botocore.exceptions.NoCredentialsError as e:
|
||||
module.fail_json(msg="Can't authorize connection - "+str(e))
|
||||
|
||||
# call your function here
|
||||
results = list_dhcp_options(connection, module)
|
||||
|
||||
module.exit_json(result=results)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
162
lib/ansible/modules/cloud/amazon/ec2_vpc_igw.py
Normal file
162
lib/ansible/modules/cloud/amazon/ec2_vpc_igw.py
Normal file
@@ -0,0 +1,162 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# This is a free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This Ansible library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['stableinterface'],
|
||||
'supported_by': 'committer',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_vpc_igw
|
||||
short_description: Manage an AWS VPC Internet gateway
|
||||
description:
|
||||
- Manage an AWS VPC Internet gateway
|
||||
version_added: "2.0"
|
||||
author: Robert Estelle (@erydo)
|
||||
options:
|
||||
vpc_id:
|
||||
description:
|
||||
- The VPC ID for the VPC in which to manage the Internet Gateway.
|
||||
required: true
|
||||
default: null
|
||||
state:
|
||||
description:
|
||||
- Create or terminate the IGW
|
||||
required: false
|
||||
default: present
|
||||
choices: [ 'present', 'absent' ]
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
# Ensure that the VPC has an Internet Gateway.
|
||||
# The Internet Gateway ID is can be accessed via {{igw.gateway_id}} for use in setting up NATs etc.
|
||||
ec2_vpc_igw:
|
||||
vpc_id: vpc-abcdefgh
|
||||
state: present
|
||||
register: igw
|
||||
|
||||
'''
|
||||
|
||||
try:
|
||||
import boto.ec2
|
||||
import boto.vpc
|
||||
from boto.exception import EC2ResponseError
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
if __name__ != '__main__':
|
||||
raise
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
|
||||
|
||||
|
||||
class AnsibleIGWException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def ensure_igw_absent(vpc_conn, vpc_id, check_mode):
|
||||
igws = vpc_conn.get_all_internet_gateways(
|
||||
filters={'attachment.vpc-id': vpc_id})
|
||||
|
||||
if not igws:
|
||||
return {'changed': False}
|
||||
|
||||
if check_mode:
|
||||
return {'changed': True}
|
||||
|
||||
for igw in igws:
|
||||
try:
|
||||
vpc_conn.detach_internet_gateway(igw.id, vpc_id)
|
||||
vpc_conn.delete_internet_gateway(igw.id)
|
||||
except EC2ResponseError as e:
|
||||
raise AnsibleIGWException(
|
||||
'Unable to delete Internet Gateway, error: {0}'.format(e))
|
||||
|
||||
return {'changed': True}
|
||||
|
||||
|
||||
def ensure_igw_present(vpc_conn, vpc_id, check_mode):
|
||||
igws = vpc_conn.get_all_internet_gateways(
|
||||
filters={'attachment.vpc-id': vpc_id})
|
||||
|
||||
if len(igws) > 1:
|
||||
raise AnsibleIGWException(
|
||||
'EC2 returned more than one Internet Gateway for VPC {0}, aborting'
|
||||
.format(vpc_id))
|
||||
|
||||
if igws:
|
||||
return {'changed': False, 'gateway_id': igws[0].id}
|
||||
else:
|
||||
if check_mode:
|
||||
return {'changed': True, 'gateway_id': None}
|
||||
|
||||
try:
|
||||
igw = vpc_conn.create_internet_gateway()
|
||||
vpc_conn.attach_internet_gateway(igw.id, vpc_id)
|
||||
return {'changed': True, 'gateway_id': igw.id}
|
||||
except EC2ResponseError as e:
|
||||
raise AnsibleIGWException(
|
||||
'Unable to create Internet Gateway, error: {0}'.format(e))
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
vpc_id = dict(required=True),
|
||||
state = dict(default='present', choices=['present', 'absent'])
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto is required for this module')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
|
||||
if region:
|
||||
try:
|
||||
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
vpc_id = module.params.get('vpc_id')
|
||||
state = module.params.get('state', 'present')
|
||||
|
||||
try:
|
||||
if state == 'present':
|
||||
result = ensure_igw_present(connection, vpc_id, check_mode=module.check_mode)
|
||||
elif state == 'absent':
|
||||
result = ensure_igw_absent(connection, vpc_id, check_mode=module.check_mode)
|
||||
except AnsibleIGWException as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
548
lib/ansible/modules/cloud/amazon/ec2_vpc_nacl.py
Normal file
548
lib/ansible/modules/cloud/amazon/ec2_vpc_nacl.py
Normal file
@@ -0,0 +1,548 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
ANSIBLE_METADATA = {'status': ['stableinterface'],
|
||||
'supported_by': 'committer',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: ec2_vpc_nacl
|
||||
short_description: create and delete Network ACLs.
|
||||
description:
|
||||
- Read the AWS documentation for Network ACLS
|
||||
U(http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html)
|
||||
version_added: "2.2"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Tagged name identifying a network ACL.
|
||||
required: true
|
||||
vpc_id:
|
||||
description:
|
||||
- VPC id of the requesting VPC.
|
||||
required: true
|
||||
subnets:
|
||||
description:
|
||||
- The list of subnets that should be associated with the network ACL.
|
||||
- Must be specified as a list
|
||||
- Each subnet can be specified as subnet ID, or its tagged name.
|
||||
required: false
|
||||
egress:
|
||||
description:
|
||||
- A list of rules for outgoing traffic.
|
||||
- Each rule must be specified as a list.
|
||||
required: false
|
||||
ingress:
|
||||
description:
|
||||
- List of rules for incoming traffic.
|
||||
- Each rule must be specified as a list.
|
||||
required: false
|
||||
tags:
|
||||
description:
|
||||
- Dictionary of tags to look for and apply when creating a network ACL.
|
||||
required: false
|
||||
state:
|
||||
description:
|
||||
- Creates or modifies an existing NACL
|
||||
- Deletes a NACL and reassociates subnets to the default NACL
|
||||
required: false
|
||||
choices: ['present', 'absent']
|
||||
default: present
|
||||
author: Mike Mochan(@mmochan)
|
||||
extends_documentation_fragment: aws
|
||||
requirements: [ botocore, boto3, json ]
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
||||
# Complete example to create and delete a network ACL
|
||||
# that allows SSH, HTTP and ICMP in, and all traffic out.
|
||||
- name: "Create and associate production DMZ network ACL with DMZ subnets"
|
||||
ec2_vpc_nacl:
|
||||
vpc_id: vpc-12345678
|
||||
name: prod-dmz-nacl
|
||||
region: ap-southeast-2
|
||||
subnets: ['prod-dmz-1', 'prod-dmz-2']
|
||||
tags:
|
||||
CostCode: CC1234
|
||||
Project: phoenix
|
||||
Description: production DMZ
|
||||
ingress: [
|
||||
# rule no, protocol, allow/deny, cidr, icmp_code, icmp_type,
|
||||
# port from, port to
|
||||
[100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22],
|
||||
[200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80],
|
||||
[300, 'icmp', 'allow', '0.0.0.0/0', 0, 8],
|
||||
]
|
||||
egress: [
|
||||
[100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
|
||||
]
|
||||
state: 'present'
|
||||
|
||||
- name: "Remove the ingress and egress rules - defaults to deny all"
|
||||
ec2_vpc_nacl:
|
||||
vpc_id: vpc-12345678
|
||||
name: prod-dmz-nacl
|
||||
region: ap-southeast-2
|
||||
subnets:
|
||||
- prod-dmz-1
|
||||
- prod-dmz-2
|
||||
tags:
|
||||
CostCode: CC1234
|
||||
Project: phoenix
|
||||
Description: production DMZ
|
||||
state: present
|
||||
|
||||
- name: "Remove the NACL subnet associations and tags"
|
||||
ec2_vpc_nacl:
|
||||
vpc_id: 'vpc-12345678'
|
||||
name: prod-dmz-nacl
|
||||
region: ap-southeast-2
|
||||
state: present
|
||||
|
||||
- name: "Delete nacl and subnet associations"
|
||||
ec2_vpc_nacl:
|
||||
vpc_id: vpc-12345678
|
||||
name: prod-dmz-nacl
|
||||
state: absent
|
||||
'''
|
||||
RETURN = '''
|
||||
task:
|
||||
description: The result of the create, or delete action.
|
||||
returned: success
|
||||
type: dictionary
|
||||
'''
|
||||
|
||||
try:
|
||||
import botocore
|
||||
import boto3
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
|
||||
|
||||
|
||||
# Common fields for the default rule that is contained within every VPC NACL.
|
||||
DEFAULT_RULE_FIELDS = {
|
||||
'RuleNumber': 32767,
|
||||
'RuleAction': 'deny',
|
||||
'CidrBlock': '0.0.0.0/0',
|
||||
'Protocol': '-1'
|
||||
}
|
||||
|
||||
DEFAULT_INGRESS = dict(DEFAULT_RULE_FIELDS.items() + [('Egress', False)])
|
||||
DEFAULT_EGRESS = dict(DEFAULT_RULE_FIELDS.items() + [('Egress', True)])
|
||||
|
||||
# VPC-supported IANA protocol numbers
|
||||
# http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
|
||||
PROTOCOL_NUMBERS = {'all': -1, 'icmp': 1, 'tcp': 6, 'udp': 17, }
|
||||
|
||||
|
||||
#Utility methods
|
||||
def icmp_present(entry):
|
||||
if len(entry) == 6 and entry[1] == 'icmp' or entry[1] == 1:
|
||||
return True
|
||||
|
||||
|
||||
def load_tags(module):
|
||||
tags = []
|
||||
if module.params.get('tags'):
|
||||
for name, value in module.params.get('tags').iteritems():
|
||||
tags.append({'Key': name, 'Value': str(value)})
|
||||
tags.append({'Key': "Name", 'Value': module.params.get('name')})
|
||||
else:
|
||||
tags.append({'Key': "Name", 'Value': module.params.get('name')})
|
||||
return tags
|
||||
|
||||
|
||||
def subnets_removed(nacl_id, subnets, client, module):
|
||||
results = find_acl_by_id(nacl_id, client, module)
|
||||
associations = results['NetworkAcls'][0]['Associations']
|
||||
subnet_ids = [assoc['SubnetId'] for assoc in associations]
|
||||
return [subnet for subnet in subnet_ids if subnet not in subnets]
|
||||
|
||||
|
||||
def subnets_added(nacl_id, subnets, client, module):
|
||||
results = find_acl_by_id(nacl_id, client, module)
|
||||
associations = results['NetworkAcls'][0]['Associations']
|
||||
subnet_ids = [assoc['SubnetId'] for assoc in associations]
|
||||
return [subnet for subnet in subnets if subnet not in subnet_ids]
|
||||
|
||||
|
||||
def subnets_changed(nacl, client, module):
|
||||
changed = False
|
||||
vpc_id = module.params.get('vpc_id')
|
||||
nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
|
||||
subnets = subnets_to_associate(nacl, client, module)
|
||||
if not subnets:
|
||||
default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)[0]
|
||||
subnets = find_subnet_ids_by_nacl_id(nacl_id, client, module)
|
||||
if subnets:
|
||||
replace_network_acl_association(default_nacl_id, subnets, client, module)
|
||||
changed = True
|
||||
return changed
|
||||
changed = False
|
||||
return changed
|
||||
subs_added = subnets_added(nacl_id, subnets, client, module)
|
||||
if subs_added:
|
||||
replace_network_acl_association(nacl_id, subs_added, client, module)
|
||||
changed = True
|
||||
subs_removed = subnets_removed(nacl_id, subnets, client, module)
|
||||
if subs_removed:
|
||||
default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)[0]
|
||||
replace_network_acl_association(default_nacl_id, subs_removed, client, module)
|
||||
changed = True
|
||||
return changed
|
||||
|
||||
|
||||
def nacls_changed(nacl, client, module):
|
||||
changed = False
|
||||
params = dict()
|
||||
params['egress'] = module.params.get('egress')
|
||||
params['ingress'] = module.params.get('ingress')
|
||||
|
||||
nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
|
||||
nacl = describe_network_acl(client, module)
|
||||
entries = nacl['NetworkAcls'][0]['Entries']
|
||||
tmp_egress = [entry for entry in entries if entry['Egress'] is True and DEFAULT_EGRESS !=entry]
|
||||
tmp_ingress = [entry for entry in entries if entry['Egress'] is False]
|
||||
egress = [rule for rule in tmp_egress if DEFAULT_EGRESS != rule]
|
||||
ingress = [rule for rule in tmp_ingress if DEFAULT_INGRESS != rule]
|
||||
if rules_changed(egress, params['egress'], True, nacl_id, client, module):
|
||||
changed = True
|
||||
if rules_changed(ingress, params['ingress'], False, nacl_id, client, module):
|
||||
changed = True
|
||||
return changed
|
||||
|
||||
|
||||
def tags_changed(nacl_id, client, module):
|
||||
changed = False
|
||||
tags = dict()
|
||||
if module.params.get('tags'):
|
||||
tags = module.params.get('tags')
|
||||
tags['Name'] = module.params.get('name')
|
||||
nacl = find_acl_by_id(nacl_id, client, module)
|
||||
if nacl['NetworkAcls']:
|
||||
nacl_values = [t.values() for t in nacl['NetworkAcls'][0]['Tags']]
|
||||
nacl_tags = [item for sublist in nacl_values for item in sublist]
|
||||
tag_values = [[key, str(value)] for key, value in tags.iteritems()]
|
||||
tags = [item for sublist in tag_values for item in sublist]
|
||||
if sorted(nacl_tags) == sorted(tags):
|
||||
changed = False
|
||||
return changed
|
||||
else:
|
||||
delete_tags(nacl_id, client, module)
|
||||
create_tags(nacl_id, client, module)
|
||||
changed = True
|
||||
return changed
|
||||
return changed
|
||||
|
||||
|
||||
def rules_changed(aws_rules, param_rules, Egress, nacl_id, client, module):
|
||||
changed = False
|
||||
rules = list()
|
||||
for entry in param_rules:
|
||||
rules.append(process_rule_entry(entry, Egress))
|
||||
if rules == aws_rules:
|
||||
return changed
|
||||
else:
|
||||
removed_rules = [x for x in aws_rules if x not in rules]
|
||||
if removed_rules:
|
||||
params = dict()
|
||||
for rule in removed_rules:
|
||||
params['NetworkAclId'] = nacl_id
|
||||
params['RuleNumber'] = rule['RuleNumber']
|
||||
params['Egress'] = Egress
|
||||
delete_network_acl_entry(params, client, module)
|
||||
changed = True
|
||||
added_rules = [x for x in rules if x not in aws_rules]
|
||||
if added_rules:
|
||||
for rule in added_rules:
|
||||
rule['NetworkAclId'] = nacl_id
|
||||
create_network_acl_entry(rule, client, module)
|
||||
changed = True
|
||||
return changed
|
||||
|
||||
|
||||
def process_rule_entry(entry, Egress):
|
||||
params = dict()
|
||||
params['RuleNumber'] = entry[0]
|
||||
params['Protocol'] = str(PROTOCOL_NUMBERS[entry[1]])
|
||||
params['RuleAction'] = entry[2]
|
||||
params['Egress'] = Egress
|
||||
params['CidrBlock'] = entry[3]
|
||||
if icmp_present(entry):
|
||||
params['IcmpTypeCode'] = {"Type": int(entry[4]), "Code": int(entry[5])}
|
||||
else:
|
||||
if entry[6] or entry[7]:
|
||||
params['PortRange'] = {"From": entry[6], 'To': entry[7]}
|
||||
return params
|
||||
|
||||
|
||||
def restore_default_associations(assoc_ids, default_nacl_id, client, module):
|
||||
if assoc_ids:
|
||||
params = dict()
|
||||
params['NetworkAclId'] = default_nacl_id[0]
|
||||
for assoc_id in assoc_ids:
|
||||
params['AssociationId'] = assoc_id
|
||||
restore_default_acl_association(params, client, module)
|
||||
return True
|
||||
|
||||
|
||||
def construct_acl_entries(nacl, client, module):
|
||||
for entry in module.params.get('ingress'):
|
||||
params = process_rule_entry(entry, Egress=False)
|
||||
params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId']
|
||||
create_network_acl_entry(params, client, module)
|
||||
for rule in module.params.get('egress'):
|
||||
params = process_rule_entry(rule, Egress=True)
|
||||
params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId']
|
||||
create_network_acl_entry(params, client, module)
|
||||
|
||||
|
||||
## Module invocations
|
||||
def setup_network_acl(client, module):
|
||||
changed = False
|
||||
nacl = describe_network_acl(client, module)
|
||||
if not nacl['NetworkAcls']:
|
||||
nacl = create_network_acl(module.params.get('vpc_id'), client, module)
|
||||
nacl_id = nacl['NetworkAcl']['NetworkAclId']
|
||||
create_tags(nacl_id, client, module)
|
||||
subnets = subnets_to_associate(nacl, client, module)
|
||||
replace_network_acl_association(nacl_id, subnets, client, module)
|
||||
construct_acl_entries(nacl, client, module)
|
||||
changed = True
|
||||
return(changed, nacl['NetworkAcl']['NetworkAclId'])
|
||||
else:
|
||||
changed = False
|
||||
nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
|
||||
subnet_result = subnets_changed(nacl, client, module)
|
||||
nacl_result = nacls_changed(nacl, client, module)
|
||||
tag_result = tags_changed(nacl_id, client, module)
|
||||
if subnet_result is True or nacl_result is True or tag_result is True:
|
||||
changed = True
|
||||
return(changed, nacl_id)
|
||||
return (changed, nacl_id)
|
||||
|
||||
|
||||
def remove_network_acl(client, module):
|
||||
changed = False
|
||||
result = dict()
|
||||
vpc_id = module.params.get('vpc_id')
|
||||
nacl = describe_network_acl(client, module)
|
||||
if nacl['NetworkAcls']:
|
||||
nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
|
||||
associations = nacl['NetworkAcls'][0]['Associations']
|
||||
assoc_ids = [a['NetworkAclAssociationId'] for a in associations]
|
||||
default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)
|
||||
if not default_nacl_id:
|
||||
result = {vpc_id: "Default NACL ID not found - Check the VPC ID"}
|
||||
return changed, result
|
||||
if restore_default_associations(assoc_ids, default_nacl_id, client, module):
|
||||
delete_network_acl(nacl_id, client, module)
|
||||
changed = True
|
||||
result[nacl_id] = "Successfully deleted"
|
||||
return changed, result
|
||||
if not assoc_ids:
|
||||
delete_network_acl(nacl_id, client, module)
|
||||
changed = True
|
||||
result[nacl_id] = "Successfully deleted"
|
||||
return changed, result
|
||||
return changed, result
|
||||
|
||||
|
||||
#Boto3 client methods
|
||||
def create_network_acl(vpc_id, client, module):
|
||||
try:
|
||||
nacl = client.create_network_acl(VpcId=vpc_id)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
return nacl
|
||||
|
||||
|
||||
def create_network_acl_entry(params, client, module):
|
||||
try:
|
||||
result = client.create_network_acl_entry(**params)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
return result
|
||||
|
||||
|
||||
def create_tags(nacl_id, client, module):
|
||||
try:
|
||||
delete_tags(nacl_id, client, module)
|
||||
client.create_tags(Resources=[nacl_id], Tags=load_tags(module))
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
def delete_network_acl(nacl_id, client, module):
|
||||
try:
|
||||
client.delete_network_acl(NetworkAclId=nacl_id)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
def delete_network_acl_entry(params, client, module):
|
||||
try:
|
||||
client.delete_network_acl_entry(**params)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
def delete_tags(nacl_id, client, module):
|
||||
try:
|
||||
client.delete_tags(Resources=[nacl_id])
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
def describe_acl_associations(subnets, client, module):
|
||||
if not subnets:
|
||||
return []
|
||||
try:
|
||||
results = client.describe_network_acls(Filters=[
|
||||
{'Name': 'association.subnet-id', 'Values': subnets}
|
||||
])
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
associations = results['NetworkAcls'][0]['Associations']
|
||||
return [a['NetworkAclAssociationId'] for a in associations if a['SubnetId'] in subnets]
|
||||
|
||||
|
||||
def describe_network_acl(client, module):
|
||||
try:
|
||||
nacl = client.describe_network_acls(Filters=[
|
||||
{'Name': 'tag:Name', 'Values': [module.params.get('name')]}
|
||||
])
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
return nacl
|
||||
|
||||
|
||||
def find_acl_by_id(nacl_id, client, module):
|
||||
try:
|
||||
return client.describe_network_acls(NetworkAclIds=[nacl_id])
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
def find_default_vpc_nacl(vpc_id, client, module):
|
||||
try:
|
||||
response = client.describe_network_acls(Filters=[
|
||||
{'Name': 'vpc-id', 'Values': [vpc_id]}])
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
nacls = response['NetworkAcls']
|
||||
return [n['NetworkAclId'] for n in nacls if n['IsDefault'] == True]
|
||||
|
||||
|
||||
def find_subnet_ids_by_nacl_id(nacl_id, client, module):
|
||||
try:
|
||||
results = client.describe_network_acls(Filters=[
|
||||
{'Name': 'association.network-acl-id', 'Values': [nacl_id]}
|
||||
])
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
if results['NetworkAcls']:
|
||||
associations = results['NetworkAcls'][0]['Associations']
|
||||
return [s['SubnetId'] for s in associations if s['SubnetId']]
|
||||
else:
|
||||
return []
|
||||
|
||||
|
||||
def replace_network_acl_association(nacl_id, subnets, client, module):
|
||||
params = dict()
|
||||
params['NetworkAclId'] = nacl_id
|
||||
for association in describe_acl_associations(subnets, client, module):
|
||||
params['AssociationId'] = association
|
||||
try:
|
||||
client.replace_network_acl_association(**params)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
def replace_network_acl_entry(entries, Egress, nacl_id, client, module):
|
||||
params = dict()
|
||||
for entry in entries:
|
||||
params = entry
|
||||
params['NetworkAclId'] = nacl_id
|
||||
try:
|
||||
client.replace_network_acl_entry(**params)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
def restore_default_acl_association(params, client, module):
|
||||
try:
|
||||
client.replace_network_acl_association(**params)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
def subnets_to_associate(nacl, client, module):
|
||||
params = list(module.params.get('subnets'))
|
||||
if not params:
|
||||
return []
|
||||
if params[0].startswith("subnet-"):
|
||||
try:
|
||||
subnets = client.describe_subnets(Filters=[
|
||||
{'Name': 'subnet-id', 'Values': params}])
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
try:
|
||||
subnets = client.describe_subnets(Filters=[
|
||||
{'Name': 'tag:Name', 'Values': params}])
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
return [s['SubnetId'] for s in subnets['Subnets'] if s['SubnetId']]
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
vpc_id=dict(required=True),
|
||||
name=dict(required=True),
|
||||
subnets=dict(required=False, type='list', default=list()),
|
||||
tags=dict(required=False, type='dict'),
|
||||
ingress=dict(required=False, type='list', default=list()),
|
||||
egress=dict(required=False, type='list', default=list(),),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
),
|
||||
)
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='json, botocore and boto3 are required.')
|
||||
state = module.params.get('state').lower()
|
||||
try:
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
|
||||
client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||
except botocore.exceptions.NoCredentialsError as e:
|
||||
module.fail_json(msg="Can't authorize connection - %s" % str(e))
|
||||
|
||||
invocations = {
|
||||
"present": setup_network_acl,
|
||||
"absent": remove_network_acl
|
||||
}
|
||||
(changed, results) = invocations[state](client, module)
|
||||
module.exit_json(changed=changed, nacl_id=results)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
205
lib/ansible/modules/cloud/amazon/ec2_vpc_nacl_facts.py
Normal file
205
lib/ansible/modules/cloud/amazon/ec2_vpc_nacl_facts.py
Normal file
@@ -0,0 +1,205 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['stableinterface'],
|
||||
'supported_by': 'committer',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_vpc_nacl_facts
|
||||
short_description: Gather facts about Network ACLs in an AWS VPC
|
||||
description:
|
||||
- Gather facts about Network ACLs in an AWS VPC
|
||||
version_added: "2.2"
|
||||
author: "Brad Davidson (@brandond)"
|
||||
requires: [ boto3 ]
|
||||
options:
|
||||
nacl_ids:
|
||||
description:
|
||||
- A list of Network ACL IDs to retrieve facts about.
|
||||
required: false
|
||||
default: []
|
||||
filters:
|
||||
description:
|
||||
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See \
|
||||
U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkAcls.html) for possible filters. Filter \
|
||||
names and values are case sensitive.
|
||||
required: false
|
||||
default: {}
|
||||
notes:
|
||||
- By default, the module will return all Network ACLs.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
# Gather facts about all Network ACLs:
|
||||
- name: Get All NACLs
|
||||
register: all_nacls
|
||||
ec2_vpc_nacl_facts:
|
||||
region: us-west-2
|
||||
|
||||
# Retrieve default Network ACLs:
|
||||
- name: Get Default NACLs
|
||||
register: default_nacls
|
||||
ec2_vpc_nacl_facts:
|
||||
region: us-west-2
|
||||
filters:
|
||||
'default': 'true'
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
nacl:
|
||||
description: Returns an array of complex objects as described below.
|
||||
returned: success
|
||||
type: list of complex
|
||||
contains:
|
||||
nacl_id:
|
||||
description: The ID of the Network Access Control List.
|
||||
returned: always
|
||||
type: string
|
||||
vpc_id:
|
||||
description: The ID of the VPC that the NACL is attached to.
|
||||
returned: always
|
||||
type: string
|
||||
is_default:
|
||||
description: True if the NACL is the default for its VPC.
|
||||
returned: always
|
||||
type: boolean
|
||||
tags:
|
||||
description: A dict of tags associated with the NACL.
|
||||
returned: always
|
||||
type: dict
|
||||
subnets:
|
||||
description: A list of subnet IDs that are associated with the NACL.
|
||||
returned: always
|
||||
type: list of string
|
||||
ingress:
|
||||
description: A list of NACL ingress rules.
|
||||
returned: always
|
||||
type: list of list
|
||||
egress:
|
||||
description: A list of NACL egress rules.
|
||||
returned: always
|
||||
type: list of list
|
||||
'''
|
||||
|
||||
try:
|
||||
import boto3
|
||||
from botocore.exceptions import ClientError, NoCredentialsError
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
# VPC-supported IANA protocol numbers
|
||||
# http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
|
||||
PROTOCOL_NAMES = {'-1': 'all', '1': 'icmp', '6': 'tcp', '17': 'udp'}
|
||||
|
||||
def list_ec2_vpc_nacls(connection, module):
|
||||
|
||||
nacl_ids = module.params.get("nacl_ids")
|
||||
filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
|
||||
|
||||
try:
|
||||
nacls = connection.describe_network_acls(NetworkAclIds=nacl_ids, Filters=filters)
|
||||
except (ClientError, NoCredentialsError) as e:
|
||||
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
|
||||
|
||||
# Turn the boto3 result in to ansible_friendly_snaked_names
|
||||
snaked_nacls = []
|
||||
for nacl in nacls['NetworkAcls']:
|
||||
snaked_nacls.append(camel_dict_to_snake_dict(nacl))
|
||||
|
||||
# Turn the boto3 result in to ansible friendly tag dictionary
|
||||
for nacl in snaked_nacls:
|
||||
if 'tags' in nacl:
|
||||
nacl['tags'] = boto3_tag_list_to_ansible_dict(nacl['tags'])
|
||||
if 'entries' in nacl:
|
||||
nacl['egress'] = [nacl_entry_to_list(e) for e in nacl['entries']
|
||||
if e['rule_number'] != 32767 and e['egress']]
|
||||
nacl['ingress'] = [nacl_entry_to_list(e) for e in nacl['entries']
|
||||
if e['rule_number'] != 32767 and not e['egress']]
|
||||
del nacl['entries']
|
||||
if 'associations' in nacl:
|
||||
nacl['subnets'] = [a['subnet_id'] for a in nacl['associations']]
|
||||
del nacl['associations']
|
||||
if 'network_acl_id' in nacl:
|
||||
nacl['nacl_id'] = nacl['network_acl_id']
|
||||
del nacl['network_acl_id']
|
||||
|
||||
module.exit_json(nacls=snaked_nacls)
|
||||
|
||||
def nacl_entry_to_list(entry):
|
||||
|
||||
elist = [entry['rule_number'],
|
||||
PROTOCOL_NAMES[entry['protocol']],
|
||||
entry['rule_action'],
|
||||
entry['cidr_block']
|
||||
]
|
||||
if entry['protocol'] == '1':
|
||||
elist = elist + [-1, -1]
|
||||
else:
|
||||
elist = elist + [None, None, None, None]
|
||||
|
||||
if 'icmp_type_code' in entry:
|
||||
elist[4] = entry['icmp_type_code']['type']
|
||||
elist[5] = entry['icmp_type_code']['code']
|
||||
|
||||
if 'port_range' in entry:
|
||||
elist[6] = entry['port_range']['from']
|
||||
elist[7] = entry['port_range']['to']
|
||||
|
||||
return elist
|
||||
|
||||
def main():
|
||||
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
nacl_ids=dict(default=[], type='list'),
|
||||
filters=dict(default={}, type='dict')
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
mutually_exclusive=[
|
||||
['nacl_ids', 'filters']
|
||||
]
|
||||
)
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='boto3 required for this module')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
|
||||
|
||||
if region:
|
||||
connection = boto3_conn(module, conn_type='client', resource='ec2',
|
||||
region=region, endpoint=ec2_url, **aws_connect_params)
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
list_ec2_vpc_nacls(connection, module)
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
1089
lib/ansible/modules/cloud/amazon/ec2_vpc_nat_gateway.py
Normal file
1089
lib/ansible/modules/cloud/amazon/ec2_vpc_nat_gateway.py
Normal file
File diff suppressed because it is too large
Load Diff
131
lib/ansible/modules/cloud/amazon/ec2_vpc_net_facts.py
Normal file
131
lib/ansible/modules/cloud/amazon/ec2_vpc_net_facts.py
Normal file
@@ -0,0 +1,131 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# This is a free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This Ansible library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['stableinterface'],
|
||||
'supported_by': 'committer',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_vpc_net_facts
|
||||
short_description: Gather facts about ec2 VPCs in AWS
|
||||
description:
|
||||
- Gather facts about ec2 VPCs in AWS
|
||||
version_added: "2.1"
|
||||
author: "Rob White (@wimnat)"
|
||||
options:
|
||||
filters:
|
||||
description:
|
||||
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html) for possible filters.
|
||||
required: false
|
||||
default: null
|
||||
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
# Gather facts about all VPCs
|
||||
- ec2_vpc_net_facts:
|
||||
|
||||
# Gather facts about a particular VPC using VPC ID
|
||||
- ec2_vpc_net_facts:
|
||||
filters:
|
||||
vpc-id: vpc-00112233
|
||||
|
||||
# Gather facts about any VPC with a tag key Name and value Example
|
||||
- ec2_vpc_net_facts:
|
||||
filters:
|
||||
"tag:Name": Example
|
||||
|
||||
'''
|
||||
|
||||
try:
|
||||
import boto.vpc
|
||||
from boto.exception import BotoServerError
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import connect_to_aws, ec2_argument_spec, get_aws_connection_info
|
||||
|
||||
|
||||
def get_vpc_info(vpc):
|
||||
|
||||
try:
|
||||
classic_link = vpc.classic_link_enabled
|
||||
except AttributeError:
|
||||
classic_link = False
|
||||
|
||||
vpc_info = { 'id': vpc.id,
|
||||
'instance_tenancy': vpc.instance_tenancy,
|
||||
'classic_link_enabled': classic_link,
|
||||
'dhcp_options_id': vpc.dhcp_options_id,
|
||||
'state': vpc.state,
|
||||
'is_default': vpc.is_default,
|
||||
'cidr_block': vpc.cidr_block,
|
||||
'tags': vpc.tags
|
||||
}
|
||||
|
||||
return vpc_info
|
||||
|
||||
def list_ec2_vpcs(connection, module):
|
||||
|
||||
filters = module.params.get("filters")
|
||||
vpc_dict_array = []
|
||||
|
||||
try:
|
||||
all_vpcs = connection.get_all_vpcs(filters=filters)
|
||||
except BotoServerError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
for vpc in all_vpcs:
|
||||
vpc_dict_array.append(get_vpc_info(vpc))
|
||||
|
||||
module.exit_json(vpcs=vpc_dict_array)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
filters = dict(default=None, type='dict')
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
|
||||
if region:
|
||||
try:
|
||||
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, StandardError) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
list_ec2_vpcs(connection, module)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
367
lib/ansible/modules/cloud/amazon/ec2_vpc_peer.py
Normal file
367
lib/ansible/modules/cloud/amazon/ec2_vpc_peer.py
Normal file
@@ -0,0 +1,367 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
ANSIBLE_METADATA = {'status': ['stableinterface'],
|
||||
'supported_by': 'committer',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: ec2_vpc_peer
|
||||
short_description: create, delete, accept, and reject VPC peering connections between two VPCs.
|
||||
description:
|
||||
- Read the AWS documentation for VPC Peering Connections
|
||||
U(http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-peering.html)
|
||||
version_added: "2.2"
|
||||
options:
|
||||
vpc_id:
|
||||
description:
|
||||
- VPC id of the requesting VPC.
|
||||
required: false
|
||||
peer_vpc_id:
|
||||
description:
|
||||
- VPC id of the accepting VPC.
|
||||
required: false
|
||||
peer_owner_id:
|
||||
description:
|
||||
- The AWS account number for cross account peering.
|
||||
required: false
|
||||
tags:
|
||||
description:
|
||||
- Dictionary of tags to look for and apply when creating a Peering Connection.
|
||||
required: false
|
||||
state:
|
||||
description:
|
||||
- Create, delete, accept, reject a peering connection.
|
||||
required: false
|
||||
default: present
|
||||
choices: ['present', 'absent', 'accept', 'reject']
|
||||
author: Mike Mochan(@mmochan)
|
||||
extends_documentation_fragment: aws
|
||||
requirements: [ botocore, boto3, json ]
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Complete example to create and accept a local peering connection.
|
||||
- name: Create local account VPC peering Connection
|
||||
ec2_vpc_peer:
|
||||
region: ap-southeast-2
|
||||
vpc_id: vpc-12345678
|
||||
peer_vpc_id: vpc-87654321
|
||||
state: present
|
||||
tags:
|
||||
Name: Peering conenction for VPC 21 to VPC 22
|
||||
CostCode: CC1234
|
||||
Project: phoenix
|
||||
register: vpc_peer
|
||||
|
||||
- name: Accept local VPC peering request
|
||||
ec2_vpc_peer:
|
||||
region: ap-southeast-2
|
||||
peering_id: "{{ vpc_peer.peering_id }}"
|
||||
state: accept
|
||||
register: action_peer
|
||||
|
||||
# Complete example to delete a local peering connection.
|
||||
- name: Create local account VPC peering Connection
|
||||
ec2_vpc_peer:
|
||||
region: ap-southeast-2
|
||||
vpc_id: vpc-12345678
|
||||
peer_vpc_id: vpc-87654321
|
||||
state: present
|
||||
tags:
|
||||
Name: Peering conenction for VPC 21 to VPC 22
|
||||
CostCode: CC1234
|
||||
Project: phoenix
|
||||
register: vpc_peer
|
||||
|
||||
- name: delete a local VPC peering Connection
|
||||
ec2_vpc_peer:
|
||||
region: ap-southeast-2
|
||||
peering_id: "{{ vpc_peer.peering_id }}"
|
||||
state: absent
|
||||
register: vpc_peer
|
||||
|
||||
# Complete example to create and accept a cross account peering connection.
|
||||
- name: Create cross account VPC peering Connection
|
||||
ec2_vpc_peer:
|
||||
region: ap-southeast-2
|
||||
vpc_id: vpc-12345678
|
||||
peer_vpc_id: vpc-12345678
|
||||
peer_owner_id: 123456789102
|
||||
state: present
|
||||
tags:
|
||||
Name: Peering conenction for VPC 21 to VPC 22
|
||||
CostCode: CC1234
|
||||
Project: phoenix
|
||||
register: vpc_peer
|
||||
|
||||
- name: Accept peering connection from remote account
|
||||
ec2_vpc_peer:
|
||||
region: ap-southeast-2
|
||||
peering_id: "{{ vpc_peer.peering_id }}"
|
||||
profile: bot03_profile_for_cross_account
|
||||
state: accept
|
||||
register: vpc_peer
|
||||
|
||||
# Complete example to create and reject a local peering connection.
|
||||
- name: Create local account VPC peering Connection
|
||||
ec2_vpc_peer:
|
||||
region: ap-southeast-2
|
||||
vpc_id: vpc-12345678
|
||||
peer_vpc_id: vpc-87654321
|
||||
state: present
|
||||
tags:
|
||||
Name: Peering conenction for VPC 21 to VPC 22
|
||||
CostCode: CC1234
|
||||
Project: phoenix
|
||||
register: vpc_peer
|
||||
|
||||
- name: Reject a local VPC peering Connection
|
||||
ec2_vpc_peer:
|
||||
region: ap-southeast-2
|
||||
peering_id: "{{ vpc_peer.peering_id }}"
|
||||
state: reject
|
||||
|
||||
# Complete example to create and accept a cross account peering connection.
|
||||
- name: Create cross account VPC peering Connection
|
||||
ec2_vpc_peer:
|
||||
region: ap-southeast-2
|
||||
vpc_id: vpc-12345678
|
||||
peer_vpc_id: vpc-12345678
|
||||
peer_owner_id: 123456789102
|
||||
state: present
|
||||
tags:
|
||||
Name: Peering conenction for VPC 21 to VPC 22
|
||||
CostCode: CC1234
|
||||
Project: phoenix
|
||||
register: vpc_peer
|
||||
|
||||
- name: Accept a cross account VPC peering connection request
|
||||
ec2_vpc_peer:
|
||||
region: ap-southeast-2
|
||||
peering_id: "{{ vpc_peer.peering_id }}"
|
||||
profile: bot03_profile_for_cross_account
|
||||
state: accept
|
||||
tags:
|
||||
Name: Peering conenction for VPC 21 to VPC 22
|
||||
CostCode: CC1234
|
||||
Project: phoenix
|
||||
|
||||
# Complete example to create and reject a cross account peering connection.
|
||||
- name: Create cross account VPC peering Connection
|
||||
ec2_vpc_peer:
|
||||
region: ap-southeast-2
|
||||
vpc_id: vpc-12345678
|
||||
peer_vpc_id: vpc-12345678
|
||||
peer_owner_id: 123456789102
|
||||
state: present
|
||||
tags:
|
||||
Name: Peering conenction for VPC 21 to VPC 22
|
||||
CostCode: CC1234
|
||||
Project: phoenix
|
||||
register: vpc_peer
|
||||
|
||||
- name: Reject a cross account VPC peering Connection
|
||||
ec2_vpc_peer:
|
||||
region: ap-southeast-2
|
||||
peering_id: "{{ vpc_peer.peering_id }}"
|
||||
profile: bot03_profile_for_cross_account
|
||||
state: reject
|
||||
|
||||
'''
|
||||
RETURN = '''
|
||||
task:
|
||||
description: The result of the create, accept, reject or delete action.
|
||||
returned: success
|
||||
type: dictionary
|
||||
'''
|
||||
|
||||
try:
|
||||
import json
|
||||
import botocore
|
||||
import boto3
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
|
||||
def tags_changed(pcx_id, client, module):
|
||||
changed = False
|
||||
tags = dict()
|
||||
if module.params.get('tags'):
|
||||
tags = module.params.get('tags')
|
||||
pcx = find_pcx_by_id(pcx_id, client, module)
|
||||
if pcx['VpcPeeringConnections']:
|
||||
pcx_values = [t.values() for t in pcx['VpcPeeringConnections'][0]['Tags']]
|
||||
pcx_tags = [item for sublist in pcx_values for item in sublist]
|
||||
tag_values = [[key, str(value)] for key, value in tags.iteritems()]
|
||||
tags = [item for sublist in tag_values for item in sublist]
|
||||
if sorted(pcx_tags) == sorted(tags):
|
||||
changed = False
|
||||
return changed
|
||||
else:
|
||||
delete_tags(pcx_id, client, module)
|
||||
create_tags(pcx_id, client, module)
|
||||
changed = True
|
||||
return changed
|
||||
return changed
|
||||
|
||||
|
||||
def describe_peering_connections(params, client):
|
||||
result = client.describe_vpc_peering_connections(Filters=[
|
||||
{'Name': 'requester-vpc-info.vpc-id', 'Values': [params['VpcId']]},
|
||||
{'Name': 'accepter-vpc-info.vpc-id', 'Values': [params['PeerVpcId']]}
|
||||
])
|
||||
if result['VpcPeeringConnections'] == []:
|
||||
result = client.describe_vpc_peering_connections(Filters=[
|
||||
{'Name': 'requester-vpc-info.vpc-id', 'Values': [params['PeerVpcId']]},
|
||||
{'Name': 'accepter-vpc-info.vpc-id', 'Values': [params['VpcId']]}
|
||||
])
|
||||
return result
|
||||
|
||||
|
||||
def is_active(peering_conn):
|
||||
return peering_conn['Status']['Code'] == 'active'
|
||||
|
||||
|
||||
def is_pending(peering_conn):
|
||||
return peering_conn['Status']['Code'] == 'pending-acceptance'
|
||||
|
||||
|
||||
def create_peer_connection(client, module):
|
||||
changed = False
|
||||
params = dict()
|
||||
params['VpcId'] = module.params.get('vpc_id')
|
||||
params['PeerVpcId'] = module.params.get('peer_vpc_id')
|
||||
if module.params.get('peer_owner_id'):
|
||||
params['PeerOwnerId'] = str(module.params.get('peer_owner_id'))
|
||||
params['DryRun'] = module.check_mode
|
||||
peering_conns = describe_peering_connections(params, client)
|
||||
for peering_conn in peering_conns['VpcPeeringConnections']:
|
||||
pcx_id = peering_conn['VpcPeeringConnectionId']
|
||||
if tags_changed(pcx_id, client, module):
|
||||
changed = True
|
||||
if is_active(peering_conn):
|
||||
return (changed, peering_conn['VpcPeeringConnectionId'])
|
||||
if is_pending(peering_conn):
|
||||
return (changed, peering_conn['VpcPeeringConnectionId'])
|
||||
try:
|
||||
peering_conn = client.create_vpc_peering_connection(**params)
|
||||
pcx_id = peering_conn['VpcPeeringConnection']['VpcPeeringConnectionId']
|
||||
if module.params.get('tags'):
|
||||
create_tags(pcx_id, client, module)
|
||||
changed = True
|
||||
return (changed, peering_conn['VpcPeeringConnection']['VpcPeeringConnectionId'])
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
def peer_status(client, module):
|
||||
params = dict()
|
||||
params['VpcPeeringConnectionIds'] = [module.params.get('peering_id')]
|
||||
vpc_peering_connection = client.describe_vpc_peering_connections(**params)
|
||||
return vpc_peering_connection['VpcPeeringConnections'][0]['Status']['Code']
|
||||
|
||||
|
||||
def accept_reject_delete(state, client, module):
|
||||
changed = False
|
||||
params = dict()
|
||||
params['VpcPeeringConnectionId'] = module.params.get('peering_id')
|
||||
params['DryRun'] = module.check_mode
|
||||
invocations = {
|
||||
'accept': client.accept_vpc_peering_connection,
|
||||
'reject': client.reject_vpc_peering_connection,
|
||||
'absent': client.delete_vpc_peering_connection
|
||||
}
|
||||
if state == 'absent' or peer_status(client, module) != 'active':
|
||||
try:
|
||||
invocations[state](**params)
|
||||
if module.params.get('tags'):
|
||||
create_tags(params['VpcPeeringConnectionId'], client, module)
|
||||
changed = True
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
if tags_changed(params['VpcPeeringConnectionId'], client, module):
|
||||
changed = True
|
||||
return changed, params['VpcPeeringConnectionId']
|
||||
|
||||
|
||||
def load_tags(module):
|
||||
tags = []
|
||||
if module.params.get('tags'):
|
||||
for name, value in module.params.get('tags').iteritems():
|
||||
tags.append({'Key': name, 'Value': str(value)})
|
||||
return tags
|
||||
|
||||
|
||||
def create_tags(pcx_id, client, module):
|
||||
try:
|
||||
delete_tags(pcx_id, client, module)
|
||||
client.create_tags(Resources=[pcx_id], Tags=load_tags(module))
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
def delete_tags(pcx_id, client, module):
|
||||
try:
|
||||
client.delete_tags(Resources=[pcx_id])
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
def find_pcx_by_id(pcx_id, client, module):
|
||||
try:
|
||||
return client.describe_vpc_peering_connections(VpcPeeringConnectionIds=[pcx_id])
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
vpc_id=dict(),
|
||||
peer_vpc_id=dict(),
|
||||
peering_id=dict(),
|
||||
peer_owner_id=dict(),
|
||||
tags=dict(required=False, type='dict'),
|
||||
profile=dict(),
|
||||
state=dict(default='present', choices=['present', 'absent', 'accept', 'reject'])
|
||||
)
|
||||
)
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='json, botocore and boto3 are required.')
|
||||
state = module.params.get('state').lower()
|
||||
try:
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
|
||||
client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||
except botocore.exceptions.NoCredentialsError as e:
|
||||
module.fail_json(msg="Can't authorize connection - "+str(e))
|
||||
|
||||
if state == 'present':
|
||||
(changed, results) = create_peer_connection(client, module)
|
||||
module.exit_json(changed=changed, peering_id=results)
|
||||
else:
|
||||
(changed, results) = accept_reject_delete(state, client, module)
|
||||
module.exit_json(changed=changed, peering_id=results)
|
||||
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
637
lib/ansible/modules/cloud/amazon/ec2_vpc_route_table.py
Normal file
637
lib/ansible/modules/cloud/amazon/ec2_vpc_route_table.py
Normal file
@@ -0,0 +1,637 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# This is a free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This Ansible library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['stableinterface'],
|
||||
'supported_by': 'committer',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_vpc_route_table
|
||||
short_description: Manage route tables for AWS virtual private clouds
|
||||
description:
|
||||
- Manage route tables for AWS virtual private clouds
|
||||
version_added: "2.0"
|
||||
author: Robert Estelle (@erydo), Rob White (@wimnat)
|
||||
options:
|
||||
lookup:
|
||||
description:
|
||||
- "Look up route table by either tags or by route table ID. Non-unique tag lookup will fail. If no tags are specifed then no lookup for an existing route table is performed and a new route table will be created. To change tags of a route table, you must look up by id."
|
||||
required: false
|
||||
default: tag
|
||||
choices: [ 'tag', 'id' ]
|
||||
propagating_vgw_ids:
|
||||
description:
|
||||
- "Enable route propagation from virtual gateways specified by ID."
|
||||
default: None
|
||||
required: false
|
||||
route_table_id:
|
||||
description:
|
||||
- "The ID of the route table to update or delete."
|
||||
required: false
|
||||
default: null
|
||||
routes:
|
||||
description:
|
||||
- "List of routes in the route table.
|
||||
Routes are specified as dicts containing the keys 'dest' and one of 'gateway_id',
|
||||
'instance_id', 'interface_id', or 'vpc_peering_connection_id'.
|
||||
If 'gateway_id' is specified, you can refer to the VPC's IGW by using the value 'igw'. Routes are required for present states."
|
||||
required: false
|
||||
default: None
|
||||
state:
|
||||
description:
|
||||
- "Create or destroy the VPC route table"
|
||||
required: false
|
||||
default: present
|
||||
choices: [ 'present', 'absent' ]
|
||||
subnets:
|
||||
description:
|
||||
- "An array of subnets to add to this route table. Subnets may be specified by either subnet ID, Name tag, or by a CIDR such as '10.0.0.0/24'."
|
||||
required: true
|
||||
tags:
|
||||
description:
|
||||
- "A dictionary of resource tags of the form: { tag1: value1, tag2: value2 }. Tags are used to uniquely identify route tables within a VPC when the route_table_id is not supplied."
|
||||
required: false
|
||||
default: null
|
||||
aliases: [ "resource_tags" ]
|
||||
vpc_id:
|
||||
description:
|
||||
- "VPC ID of the VPC in which to create the route table."
|
||||
required: true
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
# Basic creation example:
|
||||
- name: Set up public subnet route table
|
||||
ec2_vpc_route_table:
|
||||
vpc_id: vpc-1245678
|
||||
region: us-west-1
|
||||
tags:
|
||||
Name: Public
|
||||
subnets:
|
||||
- "{{ jumpbox_subnet.subnet.id }}"
|
||||
- "{{ frontend_subnet.subnet.id }}"
|
||||
- "{{ vpn_subnet.subnet_id }}"
|
||||
routes:
|
||||
- dest: 0.0.0.0/0
|
||||
gateway_id: "{{ igw.gateway_id }}"
|
||||
register: public_route_table
|
||||
|
||||
- name: Set up NAT-protected route table
|
||||
ec2_vpc_route_table:
|
||||
vpc_id: vpc-1245678
|
||||
region: us-west-1
|
||||
tags:
|
||||
Name: Internal
|
||||
subnets:
|
||||
- "{{ application_subnet.subnet.id }}"
|
||||
- 'Database Subnet'
|
||||
- '10.0.0.0/8'
|
||||
routes:
|
||||
- dest: 0.0.0.0/0
|
||||
instance_id: "{{ nat.instance_id }}"
|
||||
register: nat_route_table
|
||||
|
||||
'''
|
||||
|
||||
import re
|
||||
|
||||
try:
|
||||
import boto.ec2
|
||||
import boto.vpc
|
||||
from boto.exception import EC2ResponseError
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
if __name__ != '__main__':
|
||||
raise
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
|
||||
|
||||
|
||||
class AnsibleRouteTableException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class AnsibleIgwSearchException(AnsibleRouteTableException):
|
||||
pass
|
||||
|
||||
|
||||
class AnsibleTagCreationException(AnsibleRouteTableException):
|
||||
pass
|
||||
|
||||
|
||||
class AnsibleSubnetSearchException(AnsibleRouteTableException):
|
||||
pass
|
||||
|
||||
CIDR_RE = re.compile('^(\d{1,3}\.){3}\d{1,3}\/\d{1,2}$')
|
||||
SUBNET_RE = re.compile('^subnet-[A-z0-9]+$')
|
||||
ROUTE_TABLE_RE = re.compile('^rtb-[A-z0-9]+$')
|
||||
|
||||
|
||||
def find_subnets(vpc_conn, vpc_id, identified_subnets):
|
||||
"""
|
||||
Finds a list of subnets, each identified either by a raw ID, a unique
|
||||
'Name' tag, or a CIDR such as 10.0.0.0/8.
|
||||
|
||||
Note that this function is duplicated in other ec2 modules, and should
|
||||
potentially be moved into potentially be moved into a shared module_utils
|
||||
"""
|
||||
subnet_ids = []
|
||||
subnet_names = []
|
||||
subnet_cidrs = []
|
||||
for subnet in (identified_subnets or []):
|
||||
if re.match(SUBNET_RE, subnet):
|
||||
subnet_ids.append(subnet)
|
||||
elif re.match(CIDR_RE, subnet):
|
||||
subnet_cidrs.append(subnet)
|
||||
else:
|
||||
subnet_names.append(subnet)
|
||||
|
||||
subnets_by_id = []
|
||||
if subnet_ids:
|
||||
subnets_by_id = vpc_conn.get_all_subnets(
|
||||
subnet_ids, filters={'vpc_id': vpc_id})
|
||||
|
||||
for subnet_id in subnet_ids:
|
||||
if not any(s.id == subnet_id for s in subnets_by_id):
|
||||
raise AnsibleSubnetSearchException(
|
||||
'Subnet ID "{0}" does not exist'.format(subnet_id))
|
||||
|
||||
subnets_by_cidr = []
|
||||
if subnet_cidrs:
|
||||
subnets_by_cidr = vpc_conn.get_all_subnets(
|
||||
filters={'vpc_id': vpc_id, 'cidr': subnet_cidrs})
|
||||
|
||||
for cidr in subnet_cidrs:
|
||||
if not any(s.cidr_block == cidr for s in subnets_by_cidr):
|
||||
raise AnsibleSubnetSearchException(
|
||||
'Subnet CIDR "{0}" does not exist'.format(cidr))
|
||||
|
||||
subnets_by_name = []
|
||||
if subnet_names:
|
||||
subnets_by_name = vpc_conn.get_all_subnets(
|
||||
filters={'vpc_id': vpc_id, 'tag:Name': subnet_names})
|
||||
|
||||
for name in subnet_names:
|
||||
matching_count = len([1 for s in subnets_by_name if s.tags.get('Name') == name])
|
||||
if matching_count == 0:
|
||||
raise AnsibleSubnetSearchException(
|
||||
'Subnet named "{0}" does not exist'.format(name))
|
||||
elif matching_count > 1:
|
||||
raise AnsibleSubnetSearchException(
|
||||
'Multiple subnets named "{0}"'.format(name))
|
||||
|
||||
return subnets_by_id + subnets_by_cidr + subnets_by_name
|
||||
|
||||
|
||||
def find_igw(vpc_conn, vpc_id):
|
||||
"""
|
||||
Finds the Internet gateway for the given VPC ID.
|
||||
|
||||
Raises an AnsibleIgwSearchException if either no IGW can be found, or more
|
||||
than one found for the given VPC.
|
||||
|
||||
Note that this function is duplicated in other ec2 modules, and should
|
||||
potentially be moved into potentially be moved into a shared module_utils
|
||||
"""
|
||||
igw = vpc_conn.get_all_internet_gateways(
|
||||
filters={'attachment.vpc-id': vpc_id})
|
||||
|
||||
if not igw:
|
||||
raise AnsibleIgwSearchException('No IGW found for VPC {0}'.
|
||||
format(vpc_id))
|
||||
elif len(igw) == 1:
|
||||
return igw[0].id
|
||||
else:
|
||||
raise AnsibleIgwSearchException('Multiple IGWs found for VPC {0}'.
|
||||
format(vpc_id))
|
||||
|
||||
|
||||
def get_resource_tags(vpc_conn, resource_id):
|
||||
return dict((t.name, t.value) for t in
|
||||
vpc_conn.get_all_tags(filters={'resource-id': resource_id}))
|
||||
|
||||
|
||||
def tags_match(match_tags, candidate_tags):
|
||||
return all((k in candidate_tags and candidate_tags[k] == v
|
||||
for k, v in match_tags.iteritems()))
|
||||
|
||||
|
||||
def ensure_tags(vpc_conn, resource_id, tags, add_only, check_mode):
|
||||
try:
|
||||
cur_tags = get_resource_tags(vpc_conn, resource_id)
|
||||
if tags == cur_tags:
|
||||
return {'changed': False, 'tags': cur_tags}
|
||||
|
||||
to_delete = dict((k, cur_tags[k]) for k in cur_tags if k not in tags)
|
||||
if to_delete and not add_only:
|
||||
vpc_conn.delete_tags(resource_id, to_delete, dry_run=check_mode)
|
||||
|
||||
to_add = dict((k, tags[k]) for k in tags if k not in cur_tags)
|
||||
if to_add:
|
||||
vpc_conn.create_tags(resource_id, to_add, dry_run=check_mode)
|
||||
|
||||
latest_tags = get_resource_tags(vpc_conn, resource_id)
|
||||
return {'changed': True, 'tags': latest_tags}
|
||||
except EC2ResponseError as e:
|
||||
raise AnsibleTagCreationException(
|
||||
'Unable to update tags for {0}, error: {1}'.format(resource_id, e))
|
||||
|
||||
|
||||
def get_route_table_by_id(vpc_conn, vpc_id, route_table_id):
|
||||
|
||||
route_table = None
|
||||
route_tables = vpc_conn.get_all_route_tables(route_table_ids=[route_table_id], filters={'vpc_id': vpc_id})
|
||||
if route_tables:
|
||||
route_table = route_tables[0]
|
||||
|
||||
return route_table
|
||||
|
||||
def get_route_table_by_tags(vpc_conn, vpc_id, tags):
|
||||
|
||||
count = 0
|
||||
route_table = None
|
||||
route_tables = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc_id})
|
||||
for table in route_tables:
|
||||
this_tags = get_resource_tags(vpc_conn, table.id)
|
||||
if tags_match(tags, this_tags):
|
||||
route_table = table
|
||||
count +=1
|
||||
|
||||
if count > 1:
|
||||
raise RuntimeError("Tags provided do not identify a unique route table")
|
||||
else:
|
||||
return route_table
|
||||
|
||||
|
||||
def route_spec_matches_route(route_spec, route):
|
||||
key_attr_map = {
|
||||
'destination_cidr_block': 'destination_cidr_block',
|
||||
'gateway_id': 'gateway_id',
|
||||
'instance_id': 'instance_id',
|
||||
'interface_id': 'interface_id',
|
||||
'vpc_peering_connection_id': 'vpc_peering_connection_id',
|
||||
}
|
||||
|
||||
# This is a workaround to catch managed NAT gateways as they do not show
|
||||
# up in any of the returned values when describing route tables.
|
||||
# The caveat of doing it this way is that if there was an existing
|
||||
# route for another nat gateway in this route table there is not a way to
|
||||
# change to another nat gateway id. Long term solution would be to utilise
|
||||
# boto3 which is a very big task for this module or to update boto.
|
||||
if route_spec.get('gateway_id') and 'nat-' in route_spec['gateway_id']:
|
||||
if route.destination_cidr_block == route_spec['destination_cidr_block']:
|
||||
if all((not route.gateway_id, not route.instance_id, not route.interface_id, not route.vpc_peering_connection_id)):
|
||||
return True
|
||||
|
||||
for k in key_attr_map:
|
||||
if k in route_spec:
|
||||
if route_spec[k] != getattr(route, k):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def rename_key(d, old_key, new_key):
|
||||
d[new_key] = d[old_key]
|
||||
del d[old_key]
|
||||
|
||||
|
||||
def index_of_matching_route(route_spec, routes_to_match):
|
||||
for i, route in enumerate(routes_to_match):
|
||||
if route_spec_matches_route(route_spec, route):
|
||||
return i
|
||||
|
||||
|
||||
def ensure_routes(vpc_conn, route_table, route_specs, propagating_vgw_ids,
|
||||
check_mode):
|
||||
routes_to_match = list(route_table.routes)
|
||||
route_specs_to_create = []
|
||||
for route_spec in route_specs:
|
||||
i = index_of_matching_route(route_spec, routes_to_match)
|
||||
if i is None:
|
||||
route_specs_to_create.append(route_spec)
|
||||
else:
|
||||
del routes_to_match[i]
|
||||
|
||||
# NOTE: As of boto==2.38.0, the origin of a route is not available
|
||||
# (for example, whether it came from a gateway with route propagation
|
||||
# enabled). Testing for origin == 'EnableVgwRoutePropagation' is more
|
||||
# correct than checking whether the route uses a propagating VGW.
|
||||
# The current logic will leave non-propagated routes using propagating
|
||||
# VGWs in place.
|
||||
routes_to_delete = []
|
||||
for r in routes_to_match:
|
||||
if r.gateway_id:
|
||||
if r.gateway_id != 'local' and not r.gateway_id.startswith('vpce-'):
|
||||
if not propagating_vgw_ids or r.gateway_id not in propagating_vgw_ids:
|
||||
routes_to_delete.append(r)
|
||||
else:
|
||||
routes_to_delete.append(r)
|
||||
|
||||
changed = bool(routes_to_delete or route_specs_to_create)
|
||||
if changed:
|
||||
for route in routes_to_delete:
|
||||
try:
|
||||
vpc_conn.delete_route(route_table.id,
|
||||
route.destination_cidr_block,
|
||||
dry_run=check_mode)
|
||||
except EC2ResponseError as e:
|
||||
if e.error_code == 'DryRunOperation':
|
||||
pass
|
||||
|
||||
for route_spec in route_specs_to_create:
|
||||
try:
|
||||
vpc_conn.create_route(route_table.id,
|
||||
dry_run=check_mode,
|
||||
**route_spec)
|
||||
except EC2ResponseError as e:
|
||||
if e.error_code == 'DryRunOperation':
|
||||
pass
|
||||
|
||||
return {'changed': bool(changed)}
|
||||
|
||||
|
||||
def ensure_subnet_association(vpc_conn, vpc_id, route_table_id, subnet_id,
|
||||
check_mode):
|
||||
route_tables = vpc_conn.get_all_route_tables(
|
||||
filters={'association.subnet_id': subnet_id, 'vpc_id': vpc_id}
|
||||
)
|
||||
for route_table in route_tables:
|
||||
if route_table.id is None:
|
||||
continue
|
||||
for a in route_table.associations:
|
||||
if a.subnet_id == subnet_id:
|
||||
if route_table.id == route_table_id:
|
||||
return {'changed': False, 'association_id': a.id}
|
||||
else:
|
||||
if check_mode:
|
||||
return {'changed': True}
|
||||
vpc_conn.disassociate_route_table(a.id)
|
||||
|
||||
association_id = vpc_conn.associate_route_table(route_table_id, subnet_id)
|
||||
return {'changed': True, 'association_id': association_id}
|
||||
|
||||
|
||||
def ensure_subnet_associations(vpc_conn, vpc_id, route_table, subnets,
|
||||
check_mode):
|
||||
current_association_ids = [a.id for a in route_table.associations]
|
||||
new_association_ids = []
|
||||
changed = False
|
||||
for subnet in subnets:
|
||||
result = ensure_subnet_association(
|
||||
vpc_conn, vpc_id, route_table.id, subnet.id, check_mode)
|
||||
changed = changed or result['changed']
|
||||
if changed and check_mode:
|
||||
return {'changed': True}
|
||||
new_association_ids.append(result['association_id'])
|
||||
|
||||
to_delete = [a_id for a_id in current_association_ids
|
||||
if a_id not in new_association_ids]
|
||||
|
||||
for a_id in to_delete:
|
||||
changed = True
|
||||
vpc_conn.disassociate_route_table(a_id, dry_run=check_mode)
|
||||
|
||||
return {'changed': changed}
|
||||
|
||||
|
||||
def ensure_propagation(vpc_conn, route_table, propagating_vgw_ids,
|
||||
check_mode):
|
||||
|
||||
# NOTE: As of boto==2.38.0, it is not yet possible to query the existing
|
||||
# propagating gateways. However, EC2 does support this as shown in its API
|
||||
# documentation. For now, a reasonable proxy for this is the presence of
|
||||
# propagated routes using the gateway in the route table. If such a route
|
||||
# is found, propagation is almost certainly enabled.
|
||||
changed = False
|
||||
for vgw_id in propagating_vgw_ids:
|
||||
for r in list(route_table.routes):
|
||||
if r.gateway_id == vgw_id:
|
||||
return {'changed': False}
|
||||
|
||||
changed = True
|
||||
vpc_conn.enable_vgw_route_propagation(route_table.id,
|
||||
vgw_id,
|
||||
dry_run=check_mode)
|
||||
|
||||
return {'changed': changed}
|
||||
|
||||
|
||||
def ensure_route_table_absent(connection, module):
|
||||
|
||||
lookup = module.params.get('lookup')
|
||||
route_table_id = module.params.get('route_table_id')
|
||||
tags = module.params.get('tags')
|
||||
vpc_id = module.params.get('vpc_id')
|
||||
|
||||
if lookup == 'tag':
|
||||
if tags is not None:
|
||||
try:
|
||||
route_table = get_route_table_by_tags(connection, vpc_id, tags)
|
||||
except EC2ResponseError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
except RuntimeError as e:
|
||||
module.fail_json(msg=e.args[0])
|
||||
else:
|
||||
route_table = None
|
||||
elif lookup == 'id':
|
||||
try:
|
||||
route_table = get_route_table_by_id(connection, vpc_id, route_table_id)
|
||||
except EC2ResponseError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
if route_table is None:
|
||||
return {'changed': False}
|
||||
|
||||
try:
|
||||
connection.delete_route_table(route_table.id, dry_run=module.check_mode)
|
||||
except EC2ResponseError as e:
|
||||
if e.error_code == 'DryRunOperation':
|
||||
pass
|
||||
else:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
return {'changed': True}
|
||||
|
||||
|
||||
def get_route_table_info(route_table):
|
||||
|
||||
# Add any routes to array
|
||||
routes = []
|
||||
for route in route_table.routes:
|
||||
routes.append(route.__dict__)
|
||||
|
||||
route_table_info = { 'id': route_table.id,
|
||||
'routes': routes,
|
||||
'tags': route_table.tags,
|
||||
'vpc_id': route_table.vpc_id
|
||||
}
|
||||
|
||||
return route_table_info
|
||||
|
||||
|
||||
def create_route_spec(connection, module, vpc_id):
|
||||
routes = module.params.get('routes')
|
||||
|
||||
for route_spec in routes:
|
||||
rename_key(route_spec, 'dest', 'destination_cidr_block')
|
||||
|
||||
if route_spec.get('gateway_id') and route_spec['gateway_id'].lower() == 'igw':
|
||||
igw = find_igw(connection, vpc_id)
|
||||
route_spec['gateway_id'] = igw
|
||||
|
||||
return routes
|
||||
|
||||
|
||||
def ensure_route_table_present(connection, module):
|
||||
|
||||
lookup = module.params.get('lookup')
|
||||
propagating_vgw_ids = module.params.get('propagating_vgw_ids')
|
||||
route_table_id = module.params.get('route_table_id')
|
||||
subnets = module.params.get('subnets')
|
||||
tags = module.params.get('tags')
|
||||
vpc_id = module.params.get('vpc_id')
|
||||
try:
|
||||
routes = create_route_spec(connection, module, vpc_id)
|
||||
except AnsibleIgwSearchException as e:
|
||||
module.fail_json(msg=e[0])
|
||||
|
||||
changed = False
|
||||
tags_valid = False
|
||||
|
||||
if lookup == 'tag':
|
||||
if tags is not None:
|
||||
try:
|
||||
route_table = get_route_table_by_tags(connection, vpc_id, tags)
|
||||
except EC2ResponseError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
except RuntimeError as e:
|
||||
module.fail_json(msg=e.args[0])
|
||||
else:
|
||||
route_table = None
|
||||
elif lookup == 'id':
|
||||
try:
|
||||
route_table = get_route_table_by_id(connection, vpc_id, route_table_id)
|
||||
except EC2ResponseError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
# If no route table returned then create new route table
|
||||
if route_table is None:
|
||||
try:
|
||||
route_table = connection.create_route_table(vpc_id, module.check_mode)
|
||||
changed = True
|
||||
except EC2ResponseError as e:
|
||||
if e.error_code == 'DryRunOperation':
|
||||
module.exit_json(changed=True)
|
||||
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
if routes is not None:
|
||||
try:
|
||||
result = ensure_routes(connection, route_table, routes, propagating_vgw_ids, module.check_mode)
|
||||
changed = changed or result['changed']
|
||||
except EC2ResponseError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
if propagating_vgw_ids is not None:
|
||||
result = ensure_propagation(connection, route_table,
|
||||
propagating_vgw_ids,
|
||||
check_mode=module.check_mode)
|
||||
changed = changed or result['changed']
|
||||
|
||||
if not tags_valid and tags is not None:
|
||||
result = ensure_tags(connection, route_table.id, tags,
|
||||
add_only=True, check_mode=module.check_mode)
|
||||
changed = changed or result['changed']
|
||||
|
||||
if subnets:
|
||||
associated_subnets = []
|
||||
try:
|
||||
associated_subnets = find_subnets(connection, vpc_id, subnets)
|
||||
except EC2ResponseError as e:
|
||||
raise AnsibleRouteTableException(
|
||||
'Unable to find subnets for route table {0}, error: {1}'
|
||||
.format(route_table, e)
|
||||
)
|
||||
|
||||
try:
|
||||
result = ensure_subnet_associations(connection, vpc_id, route_table, associated_subnets, module.check_mode)
|
||||
changed = changed or result['changed']
|
||||
except EC2ResponseError as e:
|
||||
raise AnsibleRouteTableException(
|
||||
'Unable to associate subnets for route table {0}, error: {1}'
|
||||
.format(route_table, e)
|
||||
)
|
||||
|
||||
module.exit_json(changed=changed, route_table=get_route_table_info(route_table))
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
lookup = dict(default='tag', required=False, choices=['tag', 'id']),
|
||||
propagating_vgw_ids = dict(default=None, required=False, type='list'),
|
||||
route_table_id = dict(default=None, required=False),
|
||||
routes = dict(default=[], required=False, type='list'),
|
||||
state = dict(default='present', choices=['present', 'absent']),
|
||||
subnets = dict(default=None, required=False, type='list'),
|
||||
tags = dict(default=None, required=False, type='dict', aliases=['resource_tags']),
|
||||
vpc_id = dict(default=None, required=True)
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto is required for this module')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
|
||||
if region:
|
||||
try:
|
||||
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
lookup = module.params.get('lookup')
|
||||
route_table_id = module.params.get('route_table_id')
|
||||
state = module.params.get('state', 'present')
|
||||
|
||||
if lookup == 'id' and route_table_id is None:
|
||||
module.fail_json("You must specify route_table_id if lookup is set to id")
|
||||
|
||||
try:
|
||||
if state == 'present':
|
||||
result = ensure_route_table_present(connection, module)
|
||||
elif state == 'absent':
|
||||
result = ensure_route_table_absent(connection, module)
|
||||
except AnsibleRouteTableException as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
131
lib/ansible/modules/cloud/amazon/ec2_vpc_route_table_facts.py
Normal file
131
lib/ansible/modules/cloud/amazon/ec2_vpc_route_table_facts.py
Normal file
@@ -0,0 +1,131 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# This is a free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This Ansible library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_vpc_route_table_facts
|
||||
short_description: Gather facts about ec2 VPC route tables in AWS
|
||||
description:
|
||||
- Gather facts about ec2 VPC route tables in AWS
|
||||
version_added: "2.0"
|
||||
author: "Rob White (@wimnat)"
|
||||
options:
|
||||
filters:
|
||||
description:
|
||||
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters.
|
||||
required: false
|
||||
default: null
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
# Gather facts about all VPC route tables
|
||||
- ec2_vpc_route_table_facts:
|
||||
|
||||
# Gather facts about a particular VPC route table using route table ID
|
||||
- ec2_vpc_route_table_facts:
|
||||
filters:
|
||||
route-table-id: rtb-00112233
|
||||
|
||||
# Gather facts about any VPC route table with a tag key Name and value Example
|
||||
- ec2_vpc_route_table_facts:
|
||||
filters:
|
||||
"tag:Name": Example
|
||||
|
||||
# Gather facts about any VPC route table within VPC with ID vpc-abcdef00
|
||||
- ec2_vpc_route_table_facts:
|
||||
filters:
|
||||
vpc-id: vpc-abcdef00
|
||||
|
||||
'''
|
||||
|
||||
try:
|
||||
import boto.vpc
|
||||
from boto.exception import BotoServerError
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
|
||||
|
||||
|
||||
def get_route_table_info(route_table):
|
||||
|
||||
# Add any routes to array
|
||||
routes = []
|
||||
for route in route_table.routes:
|
||||
routes.append(route.__dict__)
|
||||
|
||||
route_table_info = { 'id': route_table.id,
|
||||
'routes': routes,
|
||||
'tags': route_table.tags,
|
||||
'vpc_id': route_table.vpc_id
|
||||
}
|
||||
|
||||
return route_table_info
|
||||
|
||||
def list_ec2_vpc_route_tables(connection, module):
|
||||
|
||||
filters = module.params.get("filters")
|
||||
route_table_dict_array = []
|
||||
|
||||
try:
|
||||
all_route_tables = connection.get_all_route_tables(filters=filters)
|
||||
except BotoServerError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
for route_table in all_route_tables:
|
||||
route_table_dict_array.append(get_route_table_info(route_table))
|
||||
|
||||
module.exit_json(route_tables=route_table_dict_array)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
filters = dict(default=None, type='dict')
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
|
||||
if region:
|
||||
try:
|
||||
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
list_ec2_vpc_route_tables(connection, module)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
276
lib/ansible/modules/cloud/amazon/ec2_vpc_subnet.py
Normal file
276
lib/ansible/modules/cloud/amazon/ec2_vpc_subnet.py
Normal file
@@ -0,0 +1,276 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# This is a free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This Ansible library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['stableinterface'],
|
||||
'supported_by': 'committer',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_vpc_subnet
|
||||
short_description: Manage subnets in AWS virtual private clouds
|
||||
description:
|
||||
- Manage subnets in AWS virtual private clouds
|
||||
version_added: "2.0"
|
||||
author: Robert Estelle (@erydo)
|
||||
options:
|
||||
az:
|
||||
description:
|
||||
- "The availability zone for the subnet. Only required when state=present."
|
||||
required: false
|
||||
default: null
|
||||
cidr:
|
||||
description:
|
||||
- "The CIDR block for the subnet. E.g. 192.0.2.0/24. Only required when state=present."
|
||||
required: false
|
||||
default: null
|
||||
tags:
|
||||
description:
|
||||
- "A dict of tags to apply to the subnet. Any tags currently applied to the subnet and not present here will be removed."
|
||||
required: false
|
||||
default: null
|
||||
aliases: [ 'resource_tags' ]
|
||||
state:
|
||||
description:
|
||||
- "Create or remove the subnet"
|
||||
required: false
|
||||
default: present
|
||||
choices: [ 'present', 'absent' ]
|
||||
vpc_id:
|
||||
description:
|
||||
- "VPC ID of the VPC in which to create the subnet."
|
||||
required: false
|
||||
default: null
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
- name: Create subnet for database servers
|
||||
ec2_vpc_subnet:
|
||||
state: present
|
||||
vpc_id: vpc-123456
|
||||
cidr: 10.0.1.16/28
|
||||
resource_tags:
|
||||
Name: Database Subnet
|
||||
register: database_subnet
|
||||
|
||||
- name: Remove subnet for database servers
|
||||
ec2_vpc_subnet:
|
||||
state: absent
|
||||
vpc_id: vpc-123456
|
||||
cidr: 10.0.1.16/28
|
||||
|
||||
'''
|
||||
|
||||
import time
|
||||
|
||||
try:
|
||||
import boto.ec2
|
||||
import boto.vpc
|
||||
from boto.exception import EC2ResponseError
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
if __name__ != '__main__':
|
||||
raise
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
|
||||
|
||||
|
||||
class AnsibleVPCSubnetException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class AnsibleVPCSubnetCreationException(AnsibleVPCSubnetException):
|
||||
pass
|
||||
|
||||
|
||||
class AnsibleVPCSubnetDeletionException(AnsibleVPCSubnetException):
|
||||
pass
|
||||
|
||||
|
||||
class AnsibleTagCreationException(AnsibleVPCSubnetException):
|
||||
pass
|
||||
|
||||
|
||||
def get_subnet_info(subnet):
|
||||
|
||||
subnet_info = { 'id': subnet.id,
|
||||
'availability_zone': subnet.availability_zone,
|
||||
'available_ip_address_count': subnet.available_ip_address_count,
|
||||
'cidr_block': subnet.cidr_block,
|
||||
'default_for_az': subnet.defaultForAz,
|
||||
'map_public_ip_on_launch': subnet.mapPublicIpOnLaunch,
|
||||
'state': subnet.state,
|
||||
'tags': subnet.tags,
|
||||
'vpc_id': subnet.vpc_id
|
||||
}
|
||||
|
||||
return subnet_info
|
||||
|
||||
def subnet_exists(vpc_conn, subnet_id):
|
||||
filters = {'subnet-id': subnet_id}
|
||||
subnet = vpc_conn.get_all_subnets(filters=filters)
|
||||
if subnet and subnet[0].state == "available":
|
||||
return subnet[0]
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def create_subnet(vpc_conn, vpc_id, cidr, az, check_mode):
|
||||
try:
|
||||
new_subnet = vpc_conn.create_subnet(vpc_id, cidr, az, dry_run=check_mode)
|
||||
# Sometimes AWS takes its time to create a subnet and so using
|
||||
# new subnets's id to do things like create tags results in
|
||||
# exception. boto doesn't seem to refresh 'state' of the newly
|
||||
# created subnet, i.e.: it's always 'pending'.
|
||||
subnet = False
|
||||
while subnet is False:
|
||||
subnet = subnet_exists(vpc_conn, new_subnet.id)
|
||||
time.sleep(0.1)
|
||||
except EC2ResponseError as e:
|
||||
if e.error_code == "DryRunOperation":
|
||||
subnet = None
|
||||
else:
|
||||
raise AnsibleVPCSubnetCreationException(
|
||||
'Unable to create subnet {0}, error: {1}'.format(cidr, e))
|
||||
|
||||
return subnet
|
||||
|
||||
|
||||
def get_resource_tags(vpc_conn, resource_id):
|
||||
return dict((t.name, t.value) for t in
|
||||
vpc_conn.get_all_tags(filters={'resource-id': resource_id}))
|
||||
|
||||
|
||||
def ensure_tags(vpc_conn, resource_id, tags, add_only, check_mode):
|
||||
try:
|
||||
cur_tags = get_resource_tags(vpc_conn, resource_id)
|
||||
if cur_tags == tags:
|
||||
return {'changed': False, 'tags': cur_tags}
|
||||
|
||||
to_delete = dict((k, cur_tags[k]) for k in cur_tags if k not in tags)
|
||||
if to_delete and not add_only:
|
||||
vpc_conn.delete_tags(resource_id, to_delete, dry_run=check_mode)
|
||||
|
||||
to_add = dict((k, tags[k]) for k in tags if k not in cur_tags or cur_tags[k] != tags[k])
|
||||
if to_add:
|
||||
vpc_conn.create_tags(resource_id, to_add, dry_run=check_mode)
|
||||
|
||||
latest_tags = get_resource_tags(vpc_conn, resource_id)
|
||||
return {'changed': True, 'tags': latest_tags}
|
||||
except EC2ResponseError as e:
|
||||
raise AnsibleTagCreationException(
|
||||
'Unable to update tags for {0}, error: {1}'.format(resource_id, e))
|
||||
|
||||
|
||||
def get_matching_subnet(vpc_conn, vpc_id, cidr):
|
||||
subnets = vpc_conn.get_all_subnets(filters={'vpc_id': vpc_id})
|
||||
return next((s for s in subnets if s.cidr_block == cidr), None)
|
||||
|
||||
|
||||
def ensure_subnet_present(vpc_conn, vpc_id, cidr, az, tags, check_mode):
|
||||
subnet = get_matching_subnet(vpc_conn, vpc_id, cidr)
|
||||
changed = False
|
||||
if subnet is None:
|
||||
subnet = create_subnet(vpc_conn, vpc_id, cidr, az, check_mode)
|
||||
changed = True
|
||||
# Subnet will be None when check_mode is true
|
||||
if subnet is None:
|
||||
return {
|
||||
'changed': changed,
|
||||
'subnet': {}
|
||||
}
|
||||
|
||||
if tags != subnet.tags:
|
||||
ensure_tags(vpc_conn, subnet.id, tags, False, check_mode)
|
||||
subnet.tags = tags
|
||||
changed = True
|
||||
|
||||
subnet_info = get_subnet_info(subnet)
|
||||
|
||||
return {
|
||||
'changed': changed,
|
||||
'subnet': subnet_info
|
||||
}
|
||||
|
||||
|
||||
def ensure_subnet_absent(vpc_conn, vpc_id, cidr, check_mode):
|
||||
subnet = get_matching_subnet(vpc_conn, vpc_id, cidr)
|
||||
if subnet is None:
|
||||
return {'changed': False}
|
||||
|
||||
try:
|
||||
vpc_conn.delete_subnet(subnet.id, dry_run=check_mode)
|
||||
return {'changed': True}
|
||||
except EC2ResponseError as e:
|
||||
raise AnsibleVPCSubnetDeletionException(
|
||||
'Unable to delete subnet {0}, error: {1}'
|
||||
.format(subnet.cidr_block, e))
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
az = dict(default=None, required=False),
|
||||
cidr = dict(default=None, required=True),
|
||||
state = dict(default='present', choices=['present', 'absent']),
|
||||
tags = dict(default=None, required=False, type='dict', aliases=['resource_tags']),
|
||||
vpc_id = dict(default=None, required=True)
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto is required for this module')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
|
||||
if region:
|
||||
try:
|
||||
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
vpc_id = module.params.get('vpc_id')
|
||||
tags = module.params.get('tags')
|
||||
cidr = module.params.get('cidr')
|
||||
az = module.params.get('az')
|
||||
state = module.params.get('state')
|
||||
|
||||
try:
|
||||
if state == 'present':
|
||||
result = ensure_subnet_present(connection, vpc_id, cidr, az, tags,
|
||||
check_mode=module.check_mode)
|
||||
elif state == 'absent':
|
||||
result = ensure_subnet_absent(connection, vpc_id, cidr,
|
||||
check_mode=module.check_mode)
|
||||
except AnsibleVPCSubnetException as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
147
lib/ansible/modules/cloud/amazon/ec2_vpc_subnet_facts.py
Normal file
147
lib/ansible/modules/cloud/amazon/ec2_vpc_subnet_facts.py
Normal file
@@ -0,0 +1,147 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# This is a free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This Ansible library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['stableinterface'],
|
||||
'supported_by': 'committer',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_vpc_subnet_facts
|
||||
short_description: Gather facts about ec2 VPC subnets in AWS
|
||||
description:
|
||||
- Gather facts about ec2 VPC subnets in AWS
|
||||
version_added: "2.1"
|
||||
author: "Rob White (@wimnat)"
|
||||
options:
|
||||
filters:
|
||||
description:
|
||||
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html) for possible filters.
|
||||
required: false
|
||||
default: null
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
# Gather facts about all VPC subnets
|
||||
- ec2_vpc_subnet_facts:
|
||||
|
||||
# Gather facts about a particular VPC subnet using ID
|
||||
- ec2_vpc_subnet_facts:
|
||||
filters:
|
||||
subnet-id: subnet-00112233
|
||||
|
||||
# Gather facts about any VPC subnet with a tag key Name and value Example
|
||||
- ec2_vpc_subnet_facts:
|
||||
filters:
|
||||
"tag:Name": Example
|
||||
|
||||
# Gather facts about any VPC subnet within VPC with ID vpc-abcdef00
|
||||
- ec2_vpc_subnet_facts:
|
||||
filters:
|
||||
vpc-id: vpc-abcdef00
|
||||
|
||||
# Gather facts about a set of VPC subnets, publicA, publicB and publicC within a
|
||||
# VPC with ID vpc-abcdef00 and then use the jinja map function to return the
|
||||
# subnet_ids as a list.
|
||||
|
||||
- ec2_vpc_subnet_facts:
|
||||
filters:
|
||||
vpc-id: vpc-abcdef00
|
||||
"tag:Name": "{{ item }}"
|
||||
with_items:
|
||||
- publicA
|
||||
- publicB
|
||||
- publicC
|
||||
register: subnet_facts
|
||||
|
||||
- set_fact:
|
||||
subnet_ids: "{{ subnet_facts.results|map(attribute='subnets.0.id')|list }}"
|
||||
'''
|
||||
|
||||
try:
|
||||
import boto.vpc
|
||||
from boto.exception import BotoServerError
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
|
||||
|
||||
|
||||
def get_subnet_info(subnet):
|
||||
|
||||
subnet_info = { 'id': subnet.id,
|
||||
'availability_zone': subnet.availability_zone,
|
||||
'available_ip_address_count': subnet.available_ip_address_count,
|
||||
'cidr_block': subnet.cidr_block,
|
||||
'default_for_az': subnet.defaultForAz,
|
||||
'map_public_ip_on_launch': subnet.mapPublicIpOnLaunch,
|
||||
'state': subnet.state,
|
||||
'tags': subnet.tags,
|
||||
'vpc_id': subnet.vpc_id
|
||||
}
|
||||
|
||||
return subnet_info
|
||||
|
||||
def list_ec2_vpc_subnets(connection, module):
|
||||
|
||||
filters = module.params.get("filters")
|
||||
subnet_dict_array = []
|
||||
|
||||
try:
|
||||
all_subnets = connection.get_all_subnets(filters=filters)
|
||||
except BotoServerError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
for subnet in all_subnets:
|
||||
subnet_dict_array.append(get_subnet_info(subnet))
|
||||
|
||||
module.exit_json(subnets=subnet_dict_array)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
filters = dict(default=None, type='dict')
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
|
||||
if region:
|
||||
try:
|
||||
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
list_ec2_vpc_subnets(connection, module)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
602
lib/ansible/modules/cloud/amazon/ec2_vpc_vgw.py
Normal file
602
lib/ansible/modules/cloud/amazon/ec2_vpc_vgw.py
Normal file
@@ -0,0 +1,602 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['stableinterface'],
|
||||
'supported_by': 'committer',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: ec2_vpc_vgw
|
||||
short_description: Create and delete AWS VPN Virtual Gateways.
|
||||
description:
|
||||
- Creates AWS VPN Virtual Gateways
|
||||
- Deletes AWS VPN Virtual Gateways
|
||||
- Attaches Virtual Gateways to VPCs
|
||||
- Detaches Virtual Gateways from VPCs
|
||||
version_added: "2.2"
|
||||
requirements: [ boto3 ]
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- present to ensure resource is created.
|
||||
- absent to remove resource
|
||||
required: false
|
||||
default: present
|
||||
choices: [ "present", "absent"]
|
||||
name:
|
||||
description:
|
||||
- name of the vgw to be created or deleted
|
||||
required: false
|
||||
type:
|
||||
description:
|
||||
- type of the virtual gateway to be created
|
||||
required: false
|
||||
choices: [ "ipsec.1" ]
|
||||
vpn_gateway_id:
|
||||
description:
|
||||
- vpn gateway id of an existing virtual gateway
|
||||
required: false
|
||||
vpc_id:
|
||||
description:
|
||||
- the vpc-id of a vpc to attach or detach
|
||||
required: false
|
||||
wait_timeout:
|
||||
description:
|
||||
- number of seconds to wait for status during vpc attach and detach
|
||||
required: false
|
||||
default: 320
|
||||
tags:
|
||||
description:
|
||||
- dictionary of resource tags
|
||||
required: false
|
||||
default: null
|
||||
aliases: [ "resource_tags" ]
|
||||
author: Nick Aslanidis (@naslanidis)
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a new vgw attached to a specific VPC
|
||||
ec2_vpc_vgw:
|
||||
state: present
|
||||
region: ap-southeast-2
|
||||
profile: personal
|
||||
vpc_id: vpc-12345678
|
||||
name: personal-testing
|
||||
type: ipsec.1
|
||||
register: created_vgw
|
||||
|
||||
- name: Create a new unattached vgw
|
||||
ec2_vpc_vgw:
|
||||
state: present
|
||||
region: ap-southeast-2
|
||||
profile: personal
|
||||
name: personal-testing
|
||||
type: ipsec.1
|
||||
tags:
|
||||
environment: production
|
||||
owner: ABC
|
||||
register: created_vgw
|
||||
|
||||
- name: Remove a new vgw using the name
|
||||
ec2_vpc_vgw:
|
||||
state: absent
|
||||
region: ap-southeast-2
|
||||
profile: personal
|
||||
name: personal-testing
|
||||
type: ipsec.1
|
||||
register: deleted_vgw
|
||||
|
||||
- name: Remove a new vgw using the vpn_gateway_id
|
||||
ec2_vpc_vgw:
|
||||
state: absent
|
||||
region: ap-southeast-2
|
||||
profile: personal
|
||||
vpn_gateway_id: vgw-3a9aa123
|
||||
register: deleted_vgw
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
result:
|
||||
description: The result of the create, or delete action.
|
||||
returned: success
|
||||
type: dictionary
|
||||
'''
|
||||
|
||||
try:
|
||||
import json
|
||||
import time
|
||||
import botocore
|
||||
import boto3
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
def get_vgw_info(vgws):
|
||||
if not isinstance(vgws, list):
|
||||
return
|
||||
|
||||
for vgw in vgws:
|
||||
vgw_info = {
|
||||
'id': vgw['VpnGatewayId'],
|
||||
'type': vgw['Type'],
|
||||
'state': vgw['State'],
|
||||
'vpc_id': None,
|
||||
'tags': dict()
|
||||
}
|
||||
|
||||
for tag in vgw['Tags']:
|
||||
vgw_info['tags'][tag['Key']] = tag['Value']
|
||||
|
||||
if len(vgw['VpcAttachments']) != 0 and vgw['VpcAttachments'][0]['State'] == 'attached':
|
||||
vgw_info['vpc_id'] = vgw['VpcAttachments'][0]['VpcId']
|
||||
|
||||
return vgw_info
|
||||
|
||||
def wait_for_status(client, module, vpn_gateway_id, status):
|
||||
polling_increment_secs = 15
|
||||
max_retries = (module.params.get('wait_timeout') / polling_increment_secs)
|
||||
status_achieved = False
|
||||
|
||||
for x in range(0, max_retries):
|
||||
try:
|
||||
response = find_vgw(client, module, vpn_gateway_id)
|
||||
if response[0]['VpcAttachments'][0]['State'] == status:
|
||||
status_achieved = True
|
||||
break
|
||||
else:
|
||||
time.sleep(polling_increment_secs)
|
||||
except botocore.exceptions.ClientError:
|
||||
e = get_exception()
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
result = response
|
||||
return status_achieved, result
|
||||
|
||||
|
||||
def attach_vgw(client, module, vpn_gateway_id):
|
||||
params = dict()
|
||||
params['VpcId'] = module.params.get('vpc_id')
|
||||
|
||||
try:
|
||||
response = client.attach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=params['VpcId'])
|
||||
except botocore.exceptions.ClientError:
|
||||
e = get_exception()
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'attached')
|
||||
if not status_achieved:
|
||||
module.fail_json(msg='Error waiting for vpc to attach to vgw - please check the AWS console')
|
||||
|
||||
result = response
|
||||
return result
|
||||
|
||||
|
||||
def detach_vgw(client, module, vpn_gateway_id, vpc_id=None):
|
||||
params = dict()
|
||||
params['VpcId'] = module.params.get('vpc_id')
|
||||
|
||||
if vpc_id:
|
||||
try:
|
||||
response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=vpc_id)
|
||||
except botocore.exceptions.ClientError:
|
||||
e = get_exception()
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
try:
|
||||
response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=params['VpcId'])
|
||||
except botocore.exceptions.ClientError:
|
||||
e = get_exception()
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'detached')
|
||||
if not status_achieved:
|
||||
module.fail_json(msg='Error waiting for vpc to detach from vgw - please check the AWS console')
|
||||
|
||||
result = response
|
||||
return result
|
||||
|
||||
|
||||
def create_vgw(client, module):
|
||||
params = dict()
|
||||
params['Type'] = module.params.get('type')
|
||||
|
||||
try:
|
||||
response = client.create_vpn_gateway(Type=params['Type'])
|
||||
except botocore.exceptions.ClientError:
|
||||
e = get_exception()
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
result = response
|
||||
return result
|
||||
|
||||
|
||||
def delete_vgw(client, module, vpn_gateway_id):
|
||||
|
||||
try:
|
||||
response = client.delete_vpn_gateway(VpnGatewayId=vpn_gateway_id)
|
||||
except botocore.exceptions.ClientError:
|
||||
e = get_exception()
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
#return the deleted VpnGatewayId as this is not included in the above response
|
||||
result = vpn_gateway_id
|
||||
return result
|
||||
|
||||
|
||||
def create_tags(client, module, vpn_gateway_id):
|
||||
params = dict()
|
||||
|
||||
try:
|
||||
response = client.create_tags(Resources=[vpn_gateway_id],Tags=load_tags(module))
|
||||
except botocore.exceptions.ClientError:
|
||||
e = get_exception()
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
result = response
|
||||
return result
|
||||
|
||||
|
||||
def delete_tags(client, module, vpn_gateway_id, tags_to_delete=None):
|
||||
params = dict()
|
||||
|
||||
if tags_to_delete:
|
||||
try:
|
||||
response = client.delete_tags(Resources=[vpn_gateway_id], Tags=tags_to_delete)
|
||||
except botocore.exceptions.ClientError:
|
||||
e = get_exception()
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
try:
|
||||
response = client.delete_tags(Resources=[vpn_gateway_id])
|
||||
except botocore.exceptions.ClientError:
|
||||
e = get_exception()
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
result = response
|
||||
return result
|
||||
|
||||
|
||||
def load_tags(module):
|
||||
tags = []
|
||||
|
||||
if module.params.get('tags'):
|
||||
for name, value in module.params.get('tags').iteritems():
|
||||
tags.append({'Key': name, 'Value': str(value)})
|
||||
tags.append({'Key': "Name", 'Value': module.params.get('name')})
|
||||
else:
|
||||
tags.append({'Key': "Name", 'Value': module.params.get('name')})
|
||||
return tags
|
||||
|
||||
|
||||
def find_tags(client, module, resource_id=None):
|
||||
|
||||
if resource_id:
|
||||
try:
|
||||
response = client.describe_tags(Filters=[
|
||||
{'Name': 'resource-id', 'Values': [resource_id]}
|
||||
])
|
||||
except botocore.exceptions.ClientError:
|
||||
e = get_exception()
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
result = response
|
||||
return result
|
||||
|
||||
|
||||
def check_tags(client, module, existing_vgw, vpn_gateway_id):
|
||||
params = dict()
|
||||
params['Tags'] = module.params.get('tags')
|
||||
vgw = existing_vgw
|
||||
changed = False
|
||||
tags_list = {}
|
||||
|
||||
#format tags for comparison
|
||||
for tags in existing_vgw[0]['Tags']:
|
||||
if tags['Key'] != 'Name':
|
||||
tags_list[tags['Key']] = tags['Value']
|
||||
|
||||
# if existing tags don't match the tags arg, delete existing and recreate with new list
|
||||
if params['Tags'] != None and tags_list != params['Tags']:
|
||||
delete_tags(client, module, vpn_gateway_id)
|
||||
create_tags(client, module, vpn_gateway_id)
|
||||
vgw = find_vgw(client, module)
|
||||
changed = True
|
||||
|
||||
#if no tag args are supplied, delete any existing tags with the exception of the name tag
|
||||
if params['Tags'] == None and tags_list != {}:
|
||||
tags_to_delete = []
|
||||
for tags in existing_vgw[0]['Tags']:
|
||||
if tags['Key'] != 'Name':
|
||||
tags_to_delete.append(tags)
|
||||
|
||||
delete_tags(client, module, vpn_gateway_id, tags_to_delete)
|
||||
vgw = find_vgw(client, module)
|
||||
changed = True
|
||||
|
||||
return vgw, changed
|
||||
|
||||
|
||||
def find_vpc(client, module):
|
||||
params = dict()
|
||||
params['vpc_id'] = module.params.get('vpc_id')
|
||||
|
||||
if params['vpc_id']:
|
||||
try:
|
||||
response = client.describe_vpcs(VpcIds=[params['vpc_id']])
|
||||
except botocore.exceptions.ClientError:
|
||||
e = get_exception()
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
result = response
|
||||
return result
|
||||
|
||||
|
||||
def find_vgw(client, module, vpn_gateway_id=None):
|
||||
params = dict()
|
||||
params['Name'] = module.params.get('name')
|
||||
params['Type'] = module.params.get('type')
|
||||
params['State'] = module.params.get('state')
|
||||
|
||||
if params['State'] == 'present':
|
||||
try:
|
||||
response = client.describe_vpn_gateways(Filters=[
|
||||
{'Name': 'type', 'Values': [params['Type']]},
|
||||
{'Name': 'tag:Name', 'Values': [params['Name']]}
|
||||
])
|
||||
except botocore.exceptions.ClientError:
|
||||
e = get_exception()
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
else:
|
||||
if vpn_gateway_id:
|
||||
try:
|
||||
response = client.describe_vpn_gateways(VpnGatewayIds=vpn_gateway_id)
|
||||
except botocore.exceptions.ClientError:
|
||||
e = get_exception()
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
else:
|
||||
try:
|
||||
response = client.describe_vpn_gateways(Filters=[
|
||||
{'Name': 'type', 'Values': [params['Type']]},
|
||||
{'Name': 'tag:Name', 'Values': [params['Name']]}
|
||||
])
|
||||
except botocore.exceptions.ClientError:
|
||||
e = get_exception()
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
result = response['VpnGateways']
|
||||
return result
|
||||
|
||||
|
||||
def ensure_vgw_present(client, module):
|
||||
|
||||
# If an existing vgw name and type matches our args, then a match is considered to have been
|
||||
# found and we will not create another vgw.
|
||||
|
||||
changed = False
|
||||
params = dict()
|
||||
result = dict()
|
||||
params['Name'] = module.params.get('name')
|
||||
params['VpcId'] = module.params.get('vpc_id')
|
||||
params['Type'] = module.params.get('type')
|
||||
params['Tags'] = module.params.get('tags')
|
||||
params['VpnGatewayIds'] = module.params.get('vpn_gateway_id')
|
||||
|
||||
# Check that a name argument has been supplied.
|
||||
if not module.params.get('name'):
|
||||
module.fail_json(msg='A name is required when a status of \'present\' is suppled')
|
||||
|
||||
# check if a gateway matching our module args already exists
|
||||
existing_vgw = find_vgw(client, module)
|
||||
|
||||
if existing_vgw != [] and existing_vgw[0]['State'] != 'deleted':
|
||||
vpn_gateway_id = existing_vgw[0]['VpnGatewayId']
|
||||
vgw, changed = check_tags(client, module, existing_vgw, vpn_gateway_id)
|
||||
|
||||
# if a vpc_id was provided, check if it exists and if it's attached
|
||||
if params['VpcId']:
|
||||
|
||||
# check that the vpc_id exists. If not, an exception is thrown
|
||||
vpc = find_vpc(client, module)
|
||||
current_vpc_attachments = existing_vgw[0]['VpcAttachments']
|
||||
|
||||
if current_vpc_attachments != [] and current_vpc_attachments[0]['State'] == 'attached':
|
||||
if current_vpc_attachments[0]['VpcId'] == params['VpcId'] and current_vpc_attachments[0]['State'] == 'attached':
|
||||
changed = False
|
||||
else:
|
||||
|
||||
# detach the existing vpc from the virtual gateway
|
||||
vpc_to_detach = current_vpc_attachments[0]['VpcId']
|
||||
detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
|
||||
time.sleep(5)
|
||||
attached_vgw = attach_vgw(client, module, vpn_gateway_id)
|
||||
vgw = find_vgw(client, module, [vpn_gateway_id])
|
||||
changed = True
|
||||
else:
|
||||
# attach the vgw to the supplied vpc
|
||||
attached_vgw = attach_vgw(client, module, vpn_gateway_id)
|
||||
vgw = find_vgw(client, module, [vpn_gateway_id])
|
||||
changed = True
|
||||
|
||||
# if params['VpcId'] is not provided, check the vgw is attached to a vpc. if so, detach it.
|
||||
else:
|
||||
existing_vgw = find_vgw(client, module, [vpn_gateway_id])
|
||||
|
||||
if existing_vgw[0]['VpcAttachments'] != []:
|
||||
if existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
|
||||
# detach the vpc from the vgw
|
||||
vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
|
||||
detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
|
||||
changed = True
|
||||
|
||||
vgw = find_vgw(client, module, [vpn_gateway_id])
|
||||
|
||||
else:
|
||||
# create a new vgw
|
||||
new_vgw = create_vgw(client, module)
|
||||
changed = True
|
||||
vpn_gateway_id = new_vgw['VpnGateway']['VpnGatewayId']
|
||||
|
||||
# tag the new virtual gateway
|
||||
create_tags(client, module, vpn_gateway_id)
|
||||
|
||||
# return current state of the vgw
|
||||
vgw = find_vgw(client, module, [vpn_gateway_id])
|
||||
|
||||
# if a vpc-id was supplied, attempt to attach it to the vgw
|
||||
if params['VpcId']:
|
||||
attached_vgw = attach_vgw(client, module, vpn_gateway_id)
|
||||
changed = True
|
||||
vgw = find_vgw(client, module, [vpn_gateway_id])
|
||||
|
||||
result = get_vgw_info(vgw)
|
||||
return changed, result
|
||||
|
||||
|
||||
def ensure_vgw_absent(client, module):
|
||||
|
||||
# If an existing vgw name and type matches our args, then a match is considered to have been
|
||||
# found and we will take steps to delete it.
|
||||
|
||||
changed = False
|
||||
params = dict()
|
||||
result = dict()
|
||||
params['Name'] = module.params.get('name')
|
||||
params['VpcId'] = module.params.get('vpc_id')
|
||||
params['Type'] = module.params.get('type')
|
||||
params['Tags'] = module.params.get('tags')
|
||||
params['VpnGatewayIds'] = module.params.get('vpn_gateway_id')
|
||||
|
||||
# check if a gateway matching our module args already exists
|
||||
if params['VpnGatewayIds']:
|
||||
existing_vgw_with_id = find_vgw(client, module, [params['VpnGatewayIds']])
|
||||
if existing_vgw_with_id != [] and existing_vgw_with_id[0]['State'] != 'deleted':
|
||||
existing_vgw = existing_vgw_with_id
|
||||
if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
|
||||
if params['VpcId']:
|
||||
if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']:
|
||||
module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console')
|
||||
|
||||
else:
|
||||
# detach the vpc from the vgw
|
||||
detach_vgw(client, module, params['VpnGatewayIds'], params['VpcId'])
|
||||
deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
|
||||
changed = True
|
||||
|
||||
else:
|
||||
# attempt to detach any attached vpcs
|
||||
vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
|
||||
detach_vgw(client, module, params['VpnGatewayIds'], vpc_to_detach)
|
||||
deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
|
||||
changed = True
|
||||
|
||||
else:
|
||||
# no vpc's are attached so attempt to delete the vgw
|
||||
deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
|
||||
changed = True
|
||||
|
||||
else:
|
||||
changed = False
|
||||
deleted_vgw = "Nothing to do"
|
||||
|
||||
else:
|
||||
#Check that a name and type argument has been supplied if no vgw-id
|
||||
if not module.params.get('name') or not module.params.get('type'):
|
||||
module.fail_json(msg='A name and type is required when no vgw-id and a status of \'absent\' is suppled')
|
||||
|
||||
existing_vgw = find_vgw(client, module)
|
||||
if existing_vgw != [] and existing_vgw[0]['State'] != 'deleted':
|
||||
vpn_gateway_id = existing_vgw[0]['VpnGatewayId']
|
||||
if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
|
||||
if params['VpcId']:
|
||||
if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']:
|
||||
module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console')
|
||||
|
||||
else:
|
||||
# detach the vpc from the vgw
|
||||
detach_vgw(client, module, vpn_gateway_id, params['VpcId'])
|
||||
|
||||
#now that the vpc has been detached, delete the vgw
|
||||
deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
|
||||
changed = True
|
||||
|
||||
else:
|
||||
# attempt to detach any attached vpcs
|
||||
vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
|
||||
detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
|
||||
changed = True
|
||||
|
||||
#now that the vpc has been detached, delete the vgw
|
||||
deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
|
||||
|
||||
else:
|
||||
# no vpc's are attached so attempt to delete the vgw
|
||||
deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
|
||||
changed = True
|
||||
|
||||
else:
|
||||
changed = False
|
||||
deleted_vgw = None
|
||||
|
||||
result = deleted_vgw
|
||||
return changed, result
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
region=dict(required=True),
|
||||
name=dict(),
|
||||
vpn_gateway_id=dict(),
|
||||
vpc_id=dict(),
|
||||
wait_timeout=dict(type='int', default=320),
|
||||
type=dict(default='ipsec.1', choices=['ipsec.1']),
|
||||
tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']),
|
||||
)
|
||||
)
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='json and boto3 is required.')
|
||||
|
||||
state = module.params.get('state').lower()
|
||||
|
||||
try:
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
|
||||
client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||
except botocore.exceptions.NoCredentialsError:
|
||||
e = get_exception()
|
||||
module.fail_json(msg="Can't authorize connection - "+str(e))
|
||||
|
||||
if state == 'present':
|
||||
(changed, results) = ensure_vgw_present(client, module)
|
||||
else:
|
||||
(changed, results) = ensure_vgw_absent(client, module)
|
||||
module.exit_json(changed=changed, vgw=results)
|
||||
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
180
lib/ansible/modules/cloud/amazon/ec2_win_password.py
Normal file
180
lib/ansible/modules/cloud/amazon/ec2_win_password.py
Normal file
@@ -0,0 +1,180 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# This is a free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This Ansible library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_win_password
|
||||
short_description: gets the default administrator password for ec2 windows instances
|
||||
description:
|
||||
- Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. i-XXXXXXX). This module has a dependency on python-boto.
|
||||
version_added: "2.0"
|
||||
author: "Rick Mendes (@rickmendes)"
|
||||
options:
|
||||
instance_id:
|
||||
description:
|
||||
- The instance id to get the password data from.
|
||||
required: true
|
||||
key_file:
|
||||
description:
|
||||
- Path to the file containing the key pair used on the instance.
|
||||
required: true
|
||||
key_passphrase:
|
||||
version_added: "2.0"
|
||||
description:
|
||||
- The passphrase for the instance key pair. The key must use DES or 3DES encryption for this module to decrypt it. You can use openssl to convert your password protected keys if they do not use DES or 3DES. ex) openssl rsa -in current_key -out new_key -des3.
|
||||
required: false
|
||||
default: null
|
||||
wait:
|
||||
version_added: "2.0"
|
||||
description:
|
||||
- Whether or not to wait for the password to be available before returning.
|
||||
required: false
|
||||
default: "no"
|
||||
choices: [ "yes", "no" ]
|
||||
wait_timeout:
|
||||
version_added: "2.0"
|
||||
description:
|
||||
- Number of seconds to wait before giving up.
|
||||
required: false
|
||||
default: 120
|
||||
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Example of getting a password
|
||||
tasks:
|
||||
- name: get the Administrator password
|
||||
ec2_win_password:
|
||||
profile: my-boto-profile
|
||||
instance_id: i-XXXXXX
|
||||
region: us-east-1
|
||||
key_file: "~/aws-creds/my_test_key.pem"
|
||||
|
||||
# Example of getting a password with a password protected key
|
||||
tasks:
|
||||
- name: get the Administrator password
|
||||
ec2_win_password:
|
||||
profile: my-boto-profile
|
||||
instance_id: i-XXXXXX
|
||||
region: us-east-1
|
||||
key_file: "~/aws-creds/my_protected_test_key.pem"
|
||||
key_passphrase: "secret"
|
||||
|
||||
# Example of waiting for a password
|
||||
tasks:
|
||||
- name: get the Administrator password
|
||||
ec2_win_password:
|
||||
profile: my-boto-profile
|
||||
instance_id: i-XXXXXX
|
||||
region: us-east-1
|
||||
key_file: "~/aws-creds/my_test_key.pem"
|
||||
wait: yes
|
||||
wait_timeout: 45
|
||||
'''
|
||||
|
||||
from base64 import b64decode
|
||||
from os.path import expanduser
|
||||
from Crypto.Cipher import PKCS1_v1_5
|
||||
from Crypto.PublicKey import RSA
|
||||
import datetime
|
||||
|
||||
try:
|
||||
import boto.ec2
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
instance_id = dict(required=True),
|
||||
key_file = dict(required=True),
|
||||
key_passphrase = dict(no_log=True, default=None, required=False),
|
||||
wait = dict(type='bool', default=False, required=False),
|
||||
wait_timeout = dict(default=120, required=False),
|
||||
)
|
||||
)
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='Boto required for this module.')
|
||||
|
||||
instance_id = module.params.get('instance_id')
|
||||
key_file = expanduser(module.params.get('key_file'))
|
||||
key_passphrase = module.params.get('key_passphrase')
|
||||
wait = module.params.get('wait')
|
||||
wait_timeout = int(module.params.get('wait_timeout'))
|
||||
|
||||
ec2 = ec2_connect(module)
|
||||
|
||||
if wait:
|
||||
start = datetime.datetime.now()
|
||||
end = start + datetime.timedelta(seconds=wait_timeout)
|
||||
|
||||
while datetime.datetime.now() < end:
|
||||
data = ec2.get_password_data(instance_id)
|
||||
decoded = b64decode(data)
|
||||
if wait and not decoded:
|
||||
time.sleep(5)
|
||||
else:
|
||||
break
|
||||
else:
|
||||
data = ec2.get_password_data(instance_id)
|
||||
decoded = b64decode(data)
|
||||
|
||||
if wait and datetime.datetime.now() >= end:
|
||||
module.fail_json(msg = "wait for password timeout after %d seconds" % wait_timeout)
|
||||
|
||||
try:
|
||||
f = open(key_file, 'r')
|
||||
except IOError as e:
|
||||
module.fail_json(msg = "I/O error (%d) opening key file: %s" % (e.errno, e.strerror))
|
||||
else:
|
||||
try:
|
||||
with f:
|
||||
key = RSA.importKey(f.read(), key_passphrase)
|
||||
except (ValueError, IndexError, TypeError) as e:
|
||||
module.fail_json(msg = "unable to parse key file")
|
||||
|
||||
cipher = PKCS1_v1_5.new(key)
|
||||
sentinel = 'password decryption failed!!!'
|
||||
|
||||
try:
|
||||
decrypted = cipher.decrypt(decoded, sentinel)
|
||||
except ValueError as e:
|
||||
decrypted = None
|
||||
|
||||
if decrypted == None:
|
||||
module.exit_json(win_password='', changed=False)
|
||||
else:
|
||||
if wait:
|
||||
elapsed = datetime.datetime.now() - start
|
||||
module.exit_json(win_password=decrypted, changed=True, elapsed=elapsed.seconds)
|
||||
else:
|
||||
module.exit_json(win_password=decrypted, changed=True)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
243
lib/ansible/modules/cloud/amazon/ecs_cluster.py
Normal file
243
lib/ansible/modules/cloud/amazon/ecs_cluster.py
Normal file
@@ -0,0 +1,243 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ecs_cluster
|
||||
short_description: create or terminate ecs clusters
|
||||
notes:
|
||||
- When deleting a cluster, the information returned is the state of the cluster prior to deletion.
|
||||
- It will also wait for a cluster to have instances registered to it.
|
||||
description:
|
||||
- Creates or terminates ecs clusters.
|
||||
version_added: "2.0"
|
||||
author: Mark Chance(@Java1Guy)
|
||||
requirements: [ boto, boto3 ]
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- The desired state of the cluster
|
||||
required: true
|
||||
choices: ['present', 'absent', 'has_instances']
|
||||
name:
|
||||
description:
|
||||
- The cluster name
|
||||
required: true
|
||||
delay:
|
||||
description:
|
||||
- Number of seconds to wait
|
||||
required: false
|
||||
repeat:
|
||||
description:
|
||||
- The number of times to wait for the cluster to have an instance
|
||||
required: false
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
# Cluster creation
|
||||
- ecs_cluster:
|
||||
name: default
|
||||
state: present
|
||||
|
||||
# Cluster deletion
|
||||
- ecs_cluster:
|
||||
name: default
|
||||
state: absent
|
||||
|
||||
- name: Wait for register
|
||||
ecs_cluster:
|
||||
name: "{{ new_cluster }}"
|
||||
state: has_instances
|
||||
delay: 10
|
||||
repeat: 10
|
||||
register: task_output
|
||||
|
||||
'''
|
||||
RETURN = '''
|
||||
activeServicesCount:
|
||||
description: how many services are active in this cluster
|
||||
returned: 0 if a new cluster
|
||||
type: int
|
||||
clusterArn:
|
||||
description: the ARN of the cluster just created
|
||||
type: string (ARN)
|
||||
sample: arn:aws:ecs:us-west-2:172139249013:cluster/test-cluster-mfshcdok
|
||||
clusterName:
|
||||
description: name of the cluster just created (should match the input argument)
|
||||
type: string
|
||||
sample: test-cluster-mfshcdok
|
||||
pendingTasksCount:
|
||||
description: how many tasks are waiting to run in this cluster
|
||||
returned: 0 if a new cluster
|
||||
type: int
|
||||
registeredContainerInstancesCount:
|
||||
description: how many container instances are available in this cluster
|
||||
returned: 0 if a new cluster
|
||||
type: int
|
||||
runningTasksCount:
|
||||
description: how many tasks are running in this cluster
|
||||
returned: 0 if a new cluster
|
||||
type: int
|
||||
status:
|
||||
description: the status of the new cluster
|
||||
returned: ACTIVE
|
||||
type: string
|
||||
'''
|
||||
import time
|
||||
|
||||
try:
|
||||
import boto
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
try:
|
||||
import boto3
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
|
||||
|
||||
|
||||
class EcsClusterManager:
|
||||
"""Handles ECS Clusters"""
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
|
||||
try:
|
||||
# self.ecs = boto3.client('ecs')
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
|
||||
if not region:
|
||||
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
|
||||
self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||
except boto.exception.NoAuthHandlerFound as e:
|
||||
self.module.fail_json(msg="Can't authorize connection - %s" % str(e))
|
||||
|
||||
def find_in_array(self, array_of_clusters, cluster_name, field_name='clusterArn'):
|
||||
for c in array_of_clusters:
|
||||
if c[field_name].endswith(cluster_name):
|
||||
return c
|
||||
return None
|
||||
|
||||
def describe_cluster(self, cluster_name):
|
||||
response = self.ecs.describe_clusters(clusters=[
|
||||
cluster_name
|
||||
])
|
||||
if len(response['failures'])>0:
|
||||
c = self.find_in_array(response['failures'], cluster_name, 'arn')
|
||||
if c and c['reason']=='MISSING':
|
||||
return None
|
||||
# fall thru and look through found ones
|
||||
if len(response['clusters'])>0:
|
||||
c = self.find_in_array(response['clusters'], cluster_name)
|
||||
if c:
|
||||
return c
|
||||
raise Exception("Unknown problem describing cluster %s." % cluster_name)
|
||||
|
||||
def create_cluster(self, clusterName = 'default'):
|
||||
response = self.ecs.create_cluster(clusterName=clusterName)
|
||||
return response['cluster']
|
||||
|
||||
def delete_cluster(self, clusterName):
|
||||
return self.ecs.delete_cluster(cluster=clusterName)
|
||||
|
||||
def main():
|
||||
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
state=dict(required=True, choices=['present', 'absent', 'has_instances'] ),
|
||||
name=dict(required=True, type='str' ),
|
||||
delay=dict(required=False, type='int', default=10),
|
||||
repeat=dict(required=False, type='int', default=10)
|
||||
))
|
||||
required_together = ( ['state', 'name'] )
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto is required.')
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='boto3 is required.')
|
||||
|
||||
cluster_mgr = EcsClusterManager(module)
|
||||
try:
|
||||
existing = cluster_mgr.describe_cluster(module.params['name'])
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Exception describing cluster '"+module.params['name']+"': "+str(e))
|
||||
|
||||
results = dict(changed=False)
|
||||
if module.params['state'] == 'present':
|
||||
if existing and 'status' in existing and existing['status']=="ACTIVE":
|
||||
results['cluster']=existing
|
||||
else:
|
||||
if not module.check_mode:
|
||||
# doesn't exist. create it.
|
||||
results['cluster'] = cluster_mgr.create_cluster(module.params['name'])
|
||||
results['changed'] = True
|
||||
|
||||
# delete the cluster
|
||||
elif module.params['state'] == 'absent':
|
||||
if not existing:
|
||||
pass
|
||||
else:
|
||||
# it exists, so we should delete it and mark changed.
|
||||
# return info about the cluster deleted
|
||||
results['cluster'] = existing
|
||||
if 'status' in existing and existing['status']=="INACTIVE":
|
||||
results['changed'] = False
|
||||
else:
|
||||
if not module.check_mode:
|
||||
cluster_mgr.delete_cluster(module.params['name'])
|
||||
results['changed'] = True
|
||||
elif module.params['state'] == 'has_instances':
|
||||
if not existing:
|
||||
module.fail_json(msg="Cluster '"+module.params['name']+" not found.")
|
||||
return
|
||||
# it exists, so we should delete it and mark changed.
|
||||
# return info about the cluster deleted
|
||||
delay = module.params['delay']
|
||||
repeat = module.params['repeat']
|
||||
time.sleep(delay)
|
||||
count = 0
|
||||
for i in range(repeat):
|
||||
existing = cluster_mgr.describe_cluster(module.params['name'])
|
||||
count = existing['registeredContainerInstancesCount']
|
||||
if count > 0:
|
||||
results['changed'] = True
|
||||
break
|
||||
time.sleep(delay)
|
||||
if count == 0 and i is repeat-1:
|
||||
module.fail_json(msg="Cluster instance count still zero after "+str(repeat)+" tries of "+str(delay)+" seconds each.")
|
||||
return
|
||||
|
||||
module.exit_json(**results)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
433
lib/ansible/modules/cloud/amazon/ecs_service.py
Normal file
433
lib/ansible/modules/cloud/amazon/ecs_service.py
Normal file
@@ -0,0 +1,433 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ecs_service
|
||||
short_description: create, terminate, start or stop a service in ecs
|
||||
description:
|
||||
- Creates or terminates ecs services.
|
||||
notes:
|
||||
- the service role specified must be assumable (i.e. have a trust relationship for the ecs service, ecs.amazonaws.com)
|
||||
- for details of the parameters and returns see U(http://boto3.readthedocs.org/en/latest/reference/services/ecs.html)
|
||||
dependencies:
|
||||
- An IAM role must have been created
|
||||
version_added: "2.1"
|
||||
author:
|
||||
- "Mark Chance (@java1guy)"
|
||||
- "Darek Kaczynski (@kaczynskid)"
|
||||
requirements: [ json, boto, botocore, boto3 ]
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- The desired state of the service
|
||||
required: true
|
||||
choices: ["present", "absent", "deleting"]
|
||||
name:
|
||||
description:
|
||||
- The name of the service
|
||||
required: true
|
||||
cluster:
|
||||
description:
|
||||
- The name of the cluster in which the service exists
|
||||
required: false
|
||||
task_definition:
|
||||
description:
|
||||
- The task definition the service will run
|
||||
required: false
|
||||
load_balancers:
|
||||
description:
|
||||
- The list of ELBs defined for this service
|
||||
required: false
|
||||
|
||||
desired_count:
|
||||
description:
|
||||
- The count of how many instances of the service
|
||||
required: false
|
||||
client_token:
|
||||
description:
|
||||
- Unique, case-sensitive identifier you provide to ensure the idempotency of the request. Up to 32 ASCII characters are allowed.
|
||||
required: false
|
||||
role:
|
||||
description:
|
||||
- The name or full Amazon Resource Name (ARN) of the IAM role that allows your Amazon ECS container agent to make calls to your load balancer on your behalf. This parameter is only required if you are using a load balancer with your service.
|
||||
required: false
|
||||
delay:
|
||||
description:
|
||||
- The time to wait before checking that the service is available
|
||||
required: false
|
||||
default: 10
|
||||
repeat:
|
||||
description:
|
||||
- The number of times to check that the service is available
|
||||
required: false
|
||||
default: 10
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
- ecs_service:
|
||||
state: present
|
||||
name: console-test-service
|
||||
cluster: new_cluster
|
||||
task_definition: new_cluster-task:1"
|
||||
desired_count: 0
|
||||
|
||||
# Basic provisioning example
|
||||
- ecs_service:
|
||||
name: default
|
||||
state: present
|
||||
cluster: new_cluster
|
||||
|
||||
# Simple example to delete
|
||||
- ecs_service:
|
||||
name: default
|
||||
state: absent
|
||||
cluster: new_cluster
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
service:
|
||||
description: Details of created service.
|
||||
returned: when creating a service
|
||||
type: complex
|
||||
contains:
|
||||
clusterArn:
|
||||
description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service.
|
||||
returned: always
|
||||
type: string
|
||||
desiredCount:
|
||||
description: The desired number of instantiations of the task definition to keep running on the service.
|
||||
returned: always
|
||||
type: int
|
||||
loadBalancers:
|
||||
description: A list of load balancer objects
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
loadBalancerName:
|
||||
description: the name
|
||||
returned: always
|
||||
type: string
|
||||
containerName:
|
||||
description: The name of the container to associate with the load balancer.
|
||||
returned: always
|
||||
type: string
|
||||
containerPort:
|
||||
description: The port on the container to associate with the load balancer.
|
||||
returned: always
|
||||
type: int
|
||||
pendingCount:
|
||||
description: The number of tasks in the cluster that are in the PENDING state.
|
||||
returned: always
|
||||
type: int
|
||||
runningCount:
|
||||
description: The number of tasks in the cluster that are in the RUNNING state.
|
||||
returned: always
|
||||
type: int
|
||||
serviceArn:
|
||||
description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, arn:aws:ecs:region :012345678910 :service/my-service .
|
||||
returned: always
|
||||
type: string
|
||||
serviceName:
|
||||
description: A user-generated string used to identify the service
|
||||
returned: always
|
||||
type: string
|
||||
status:
|
||||
description: The valid values are ACTIVE, DRAINING, or INACTIVE.
|
||||
returned: always
|
||||
type: string
|
||||
taskDefinition:
|
||||
description: The ARN of a task definition to use for tasks in the service.
|
||||
returned: always
|
||||
type: string
|
||||
deployments:
|
||||
description: list of service deployments
|
||||
returned: always
|
||||
type: list of complex
|
||||
events:
|
||||
description: lost of service events
|
||||
returned: always
|
||||
type: list of complex
|
||||
ansible_facts:
|
||||
description: Facts about deleted service.
|
||||
returned: when deleting a service
|
||||
type: complex
|
||||
contains:
|
||||
service:
|
||||
description: Details of deleted service in the same structure described above for service creation.
|
||||
returned: when service existed and was deleted
|
||||
type: complex
|
||||
'''
|
||||
import time
|
||||
|
||||
try:
|
||||
import boto
|
||||
import botocore
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
try:
|
||||
import boto3
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
|
||||
|
||||
|
||||
class EcsServiceManager:
|
||||
"""Handles ECS Services"""
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
|
||||
try:
|
||||
# self.ecs = boto3.client('ecs')
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
|
||||
if not region:
|
||||
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
|
||||
self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||
except boto.exception.NoAuthHandlerFound as e:
|
||||
self.module.fail_json(msg="Can't authorize connection - %s" % str(e))
|
||||
|
||||
# def list_clusters(self):
|
||||
# return self.client.list_clusters()
|
||||
# {'failures=[],
|
||||
# 'ResponseMetadata={'HTTPStatusCode=200, 'RequestId='ce7b5880-1c41-11e5-8a31-47a93a8a98eb'},
|
||||
# 'clusters=[{'activeServicesCount=0, 'clusterArn='arn:aws:ecs:us-west-2:777110527155:cluster/default', 'status='ACTIVE', 'pendingTasksCount=0, 'runningTasksCount=0, 'registeredContainerInstancesCount=0, 'clusterName='default'}]}
|
||||
# {'failures=[{'arn='arn:aws:ecs:us-west-2:777110527155:cluster/bogus', 'reason='MISSING'}],
|
||||
# 'ResponseMetadata={'HTTPStatusCode=200, 'RequestId='0f66c219-1c42-11e5-8a31-47a93a8a98eb'},
|
||||
# 'clusters=[]}
|
||||
|
||||
def find_in_array(self, array_of_services, service_name, field_name='serviceArn'):
|
||||
for c in array_of_services:
|
||||
if c[field_name].endswith(service_name):
|
||||
return c
|
||||
return None
|
||||
|
||||
def describe_service(self, cluster_name, service_name):
|
||||
response = self.ecs.describe_services(
|
||||
cluster=cluster_name,
|
||||
services=[
|
||||
service_name
|
||||
])
|
||||
msg = ''
|
||||
if len(response['failures'])>0:
|
||||
c = self.find_in_array(response['failures'], service_name, 'arn')
|
||||
msg += ", failure reason is "+c['reason']
|
||||
if c and c['reason']=='MISSING':
|
||||
return None
|
||||
# fall thru and look through found ones
|
||||
if len(response['services'])>0:
|
||||
c = self.find_in_array(response['services'], service_name)
|
||||
if c:
|
||||
return c
|
||||
raise StandardError("Unknown problem describing service %s." % service_name)
|
||||
|
||||
def is_matching_service(self, expected, existing):
|
||||
if expected['task_definition'] != existing['taskDefinition']:
|
||||
return False
|
||||
|
||||
if (expected['load_balancers'] or []) != existing['loadBalancers']:
|
||||
return False
|
||||
|
||||
if (expected['desired_count'] or 0) != existing['desiredCount']:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def create_service(self, service_name, cluster_name, task_definition,
|
||||
load_balancers, desired_count, client_token, role):
|
||||
response = self.ecs.create_service(
|
||||
cluster=cluster_name,
|
||||
serviceName=service_name,
|
||||
taskDefinition=task_definition,
|
||||
loadBalancers=load_balancers,
|
||||
desiredCount=desired_count,
|
||||
clientToken=client_token,
|
||||
role=role)
|
||||
return self.jsonize(response['service'])
|
||||
|
||||
def update_service(self, service_name, cluster_name, task_definition,
|
||||
load_balancers, desired_count, client_token, role):
|
||||
response = self.ecs.update_service(
|
||||
cluster=cluster_name,
|
||||
service=service_name,
|
||||
taskDefinition=task_definition,
|
||||
desiredCount=desired_count)
|
||||
return self.jsonize(response['service'])
|
||||
|
||||
def jsonize(self, service):
|
||||
# some fields are datetime which is not JSON serializable
|
||||
# make them strings
|
||||
if 'deployments' in service:
|
||||
for d in service['deployments']:
|
||||
if 'createdAt' in d:
|
||||
d['createdAt'] = str(d['createdAt'])
|
||||
if 'updatedAt' in d:
|
||||
d['updatedAt'] = str(d['updatedAt'])
|
||||
if 'events' in service:
|
||||
for e in service['events']:
|
||||
if 'createdAt' in e:
|
||||
e['createdAt'] = str(e['createdAt'])
|
||||
return service
|
||||
|
||||
def delete_service(self, service, cluster=None):
|
||||
return self.ecs.delete_service(cluster=cluster, service=service)
|
||||
|
||||
def main():
|
||||
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
state=dict(required=True, choices=['present', 'absent', 'deleting'] ),
|
||||
name=dict(required=True, type='str' ),
|
||||
cluster=dict(required=False, type='str' ),
|
||||
task_definition=dict(required=False, type='str' ),
|
||||
load_balancers=dict(required=False, type='list' ),
|
||||
desired_count=dict(required=False, type='int' ),
|
||||
client_token=dict(required=False, type='str' ),
|
||||
role=dict(required=False, type='str' ),
|
||||
delay=dict(required=False, type='int', default=10),
|
||||
repeat=dict(required=False, type='int', default=10)
|
||||
))
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto is required.')
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='boto3 is required.')
|
||||
|
||||
if module.params['state'] == 'present':
|
||||
if not 'task_definition' in module.params and module.params['task_definition'] is None:
|
||||
module.fail_json(msg="To use create a service, a task_definition must be specified")
|
||||
if not 'desired_count' in module.params and module.params['desired_count'] is None:
|
||||
module.fail_json(msg="To use create a service, a desired_count must be specified")
|
||||
|
||||
service_mgr = EcsServiceManager(module)
|
||||
try:
|
||||
existing = service_mgr.describe_service(module.params['cluster'], module.params['name'])
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Exception describing service '"+module.params['name']+"' in cluster '"+module.params['cluster']+"': "+str(e))
|
||||
|
||||
results = dict(changed=False )
|
||||
if module.params['state'] == 'present':
|
||||
|
||||
matching = False
|
||||
update = False
|
||||
if existing and 'status' in existing and existing['status']=="ACTIVE":
|
||||
if service_mgr.is_matching_service(module.params, existing):
|
||||
matching = True
|
||||
results['service'] = service_mgr.jsonize(existing)
|
||||
else:
|
||||
update = True
|
||||
|
||||
if not matching:
|
||||
if not module.check_mode:
|
||||
if module.params['load_balancers'] is None:
|
||||
loadBalancers = []
|
||||
else:
|
||||
loadBalancers = module.params['load_balancers']
|
||||
if module.params['role'] is None:
|
||||
role = ''
|
||||
else:
|
||||
role = module.params['role']
|
||||
if module.params['client_token'] is None:
|
||||
clientToken = ''
|
||||
else:
|
||||
clientToken = module.params['client_token']
|
||||
|
||||
if update:
|
||||
# update required
|
||||
response = service_mgr.update_service(module.params['name'],
|
||||
module.params['cluster'],
|
||||
module.params['task_definition'],
|
||||
loadBalancers,
|
||||
module.params['desired_count'],
|
||||
clientToken,
|
||||
role)
|
||||
else:
|
||||
# doesn't exist. create it.
|
||||
response = service_mgr.create_service(module.params['name'],
|
||||
module.params['cluster'],
|
||||
module.params['task_definition'],
|
||||
loadBalancers,
|
||||
module.params['desired_count'],
|
||||
clientToken,
|
||||
role)
|
||||
|
||||
results['service'] = response
|
||||
|
||||
results['changed'] = True
|
||||
|
||||
elif module.params['state'] == 'absent':
|
||||
if not existing:
|
||||
pass
|
||||
else:
|
||||
# it exists, so we should delete it and mark changed.
|
||||
# return info about the cluster deleted
|
||||
del existing['deployments']
|
||||
del existing['events']
|
||||
results['ansible_facts'] = existing
|
||||
if 'status' in existing and existing['status']=="INACTIVE":
|
||||
results['changed'] = False
|
||||
else:
|
||||
if not module.check_mode:
|
||||
try:
|
||||
service_mgr.delete_service(
|
||||
module.params['name'],
|
||||
module.params['cluster']
|
||||
)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
results['changed'] = True
|
||||
|
||||
elif module.params['state'] == 'deleting':
|
||||
if not existing:
|
||||
module.fail_json(msg="Service '"+module.params['name']+" not found.")
|
||||
return
|
||||
# it exists, so we should delete it and mark changed.
|
||||
# return info about the cluster deleted
|
||||
delay = module.params['delay']
|
||||
repeat = module.params['repeat']
|
||||
time.sleep(delay)
|
||||
for i in range(repeat):
|
||||
existing = service_mgr.describe_service(module.params['cluster'], module.params['name'])
|
||||
status = existing['status']
|
||||
if status == "INACTIVE":
|
||||
results['changed'] = True
|
||||
break
|
||||
time.sleep(delay)
|
||||
if i is repeat-1:
|
||||
module.fail_json(msg="Service still not deleted after "+str(repeat)+" tries of "+str(delay)+" seconds each.")
|
||||
return
|
||||
|
||||
module.exit_json(**results)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
240
lib/ansible/modules/cloud/amazon/ecs_service_facts.py
Normal file
240
lib/ansible/modules/cloud/amazon/ecs_service_facts.py
Normal file
@@ -0,0 +1,240 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ecs_service_facts
|
||||
short_description: list or describe services in ecs
|
||||
notes:
|
||||
- for details of the parameters and returns see U(http://boto3.readthedocs.org/en/latest/reference/services/ecs.html)
|
||||
description:
|
||||
- Lists or describes services in ecs.
|
||||
version_added: "2.1"
|
||||
author:
|
||||
- "Mark Chance (@java1guy)"
|
||||
- "Darek Kaczynski (@kaczynskid)"
|
||||
requirements: [ json, boto, botocore, boto3 ]
|
||||
options:
|
||||
details:
|
||||
description:
|
||||
- Set this to true if you want detailed information about the services.
|
||||
required: false
|
||||
default: 'false'
|
||||
choices: ['true', 'false']
|
||||
cluster:
|
||||
description:
|
||||
- The cluster ARNS in which to list the services.
|
||||
required: false
|
||||
default: 'default'
|
||||
service:
|
||||
description:
|
||||
- The service to get details for (required if details is true)
|
||||
required: false
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
# Basic listing example
|
||||
- ecs_service_facts:
|
||||
cluster: test-cluster
|
||||
service: console-test-service
|
||||
details: true
|
||||
|
||||
# Basic listing example
|
||||
- ecs_service_facts:
|
||||
cluster: test-cluster
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
services:
|
||||
description: When details is false, returns an array of service ARNs, otherwise an array of complex objects as described below.
|
||||
returned: success
|
||||
type: list of complex
|
||||
contains:
|
||||
clusterArn:
|
||||
description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service.
|
||||
returned: always
|
||||
type: string
|
||||
desiredCount:
|
||||
description: The desired number of instantiations of the task definition to keep running on the service.
|
||||
returned: always
|
||||
type: int
|
||||
loadBalancers:
|
||||
description: A list of load balancer objects
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
loadBalancerName:
|
||||
description: the name
|
||||
returned: always
|
||||
type: string
|
||||
containerName:
|
||||
description: The name of the container to associate with the load balancer.
|
||||
returned: always
|
||||
type: string
|
||||
containerPort:
|
||||
description: The port on the container to associate with the load balancer.
|
||||
returned: always
|
||||
type: int
|
||||
pendingCount:
|
||||
description: The number of tasks in the cluster that are in the PENDING state.
|
||||
returned: always
|
||||
type: int
|
||||
runningCount:
|
||||
description: The number of tasks in the cluster that are in the RUNNING state.
|
||||
returned: always
|
||||
type: int
|
||||
serviceArn:
|
||||
description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, arn:aws:ecs:region :012345678910 :service/my-service .
|
||||
returned: always
|
||||
type: string
|
||||
serviceName:
|
||||
description: A user-generated string used to identify the service
|
||||
returned: always
|
||||
type: string
|
||||
status:
|
||||
description: The valid values are ACTIVE, DRAINING, or INACTIVE.
|
||||
returned: always
|
||||
type: string
|
||||
taskDefinition:
|
||||
description: The ARN of a task definition to use for tasks in the service.
|
||||
returned: always
|
||||
type: string
|
||||
deployments:
|
||||
description: list of service deployments
|
||||
returned: always
|
||||
type: list of complex
|
||||
events:
|
||||
description: lost of service events
|
||||
returned: always
|
||||
type: list of complex
|
||||
'''
|
||||
try:
|
||||
import boto
|
||||
import botocore
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
try:
|
||||
import boto3
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
|
||||
|
||||
|
||||
class EcsServiceManager:
|
||||
"""Handles ECS Services"""
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
|
||||
try:
|
||||
# self.ecs = boto3.client('ecs')
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
|
||||
if not region:
|
||||
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
|
||||
self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||
except boto.exception.NoAuthHandlerFound as e:
|
||||
self.module.fail_json(msg="Can't authorize connection - %s" % str(e))
|
||||
|
||||
# def list_clusters(self):
|
||||
# return self.client.list_clusters()
|
||||
# {'failures': [],
|
||||
# 'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': 'ce7b5880-1c41-11e5-8a31-47a93a8a98eb'},
|
||||
# 'clusters': [{'activeServicesCount': 0, 'clusterArn': 'arn:aws:ecs:us-west-2:777110527155:cluster/default', 'status': 'ACTIVE', 'pendingTasksCount': 0, 'runningTasksCount': 0, 'registeredContainerInstancesCount': 0, 'clusterName': 'default'}]}
|
||||
# {'failures': [{'arn': 'arn:aws:ecs:us-west-2:777110527155:cluster/bogus', 'reason': 'MISSING'}],
|
||||
# 'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': '0f66c219-1c42-11e5-8a31-47a93a8a98eb'},
|
||||
# 'clusters': []}
|
||||
|
||||
def list_services(self, cluster):
|
||||
fn_args = dict()
|
||||
if cluster and cluster is not None:
|
||||
fn_args['cluster'] = cluster
|
||||
response = self.ecs.list_services(**fn_args)
|
||||
relevant_response = dict(services = response['serviceArns'])
|
||||
return relevant_response
|
||||
|
||||
def describe_services(self, cluster, services):
|
||||
fn_args = dict()
|
||||
if cluster and cluster is not None:
|
||||
fn_args['cluster'] = cluster
|
||||
fn_args['services']=services.split(",")
|
||||
response = self.ecs.describe_services(**fn_args)
|
||||
relevant_response = dict(services = map(self.extract_service_from, response['services']))
|
||||
if 'failures' in response and len(response['failures'])>0:
|
||||
relevant_response['services_not_running'] = response['failures']
|
||||
return relevant_response
|
||||
|
||||
def extract_service_from(self, service):
|
||||
# some fields are datetime which is not JSON serializable
|
||||
# make them strings
|
||||
if 'deployments' in service:
|
||||
for d in service['deployments']:
|
||||
if 'createdAt' in d:
|
||||
d['createdAt'] = str(d['createdAt'])
|
||||
if 'updatedAt' in d:
|
||||
d['updatedAt'] = str(d['updatedAt'])
|
||||
if 'events' in service:
|
||||
for e in service['events']:
|
||||
if 'createdAt' in e:
|
||||
e['createdAt'] = str(e['createdAt'])
|
||||
return service
|
||||
|
||||
def main():
|
||||
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
details=dict(required=False, type='bool', default=False ),
|
||||
cluster=dict(required=False, type='str' ),
|
||||
service=dict(required=False, type='str' )
|
||||
))
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto is required.')
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='boto3 is required.')
|
||||
|
||||
show_details = module.params.get('details', False)
|
||||
|
||||
task_mgr = EcsServiceManager(module)
|
||||
if show_details:
|
||||
if 'service' not in module.params or not module.params['service']:
|
||||
module.fail_json(msg="service must be specified for ecs_service_facts")
|
||||
ecs_facts = task_mgr.describe_services(module.params['cluster'], module.params['service'])
|
||||
else:
|
||||
ecs_facts = task_mgr.list_services(module.params['cluster'])
|
||||
|
||||
ecs_facts_result = dict(changed=False, ansible_facts=ecs_facts)
|
||||
module.exit_json(**ecs_facts_result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
329
lib/ansible/modules/cloud/amazon/ecs_task.py
Normal file
329
lib/ansible/modules/cloud/amazon/ecs_task.py
Normal file
@@ -0,0 +1,329 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ecs_task
|
||||
short_description: run, start or stop a task in ecs
|
||||
description:
|
||||
- Creates or deletes instances of task definitions.
|
||||
version_added: "2.0"
|
||||
author: Mark Chance(@Java1Guy)
|
||||
requirements: [ json, boto, botocore, boto3 ]
|
||||
options:
|
||||
operation:
|
||||
description:
|
||||
- Which task operation to execute
|
||||
required: True
|
||||
choices: ['run', 'start', 'stop']
|
||||
cluster:
|
||||
description:
|
||||
- The name of the cluster to run the task on
|
||||
required: False
|
||||
task_definition:
|
||||
description:
|
||||
- The task definition to start or run
|
||||
required: False
|
||||
overrides:
|
||||
description:
|
||||
- A dictionary of values to pass to the new instances
|
||||
required: False
|
||||
count:
|
||||
description:
|
||||
- How many new instances to start
|
||||
required: False
|
||||
task:
|
||||
description:
|
||||
- The task to stop
|
||||
required: False
|
||||
container_instances:
|
||||
description:
|
||||
- The list of container instances on which to deploy the task
|
||||
required: False
|
||||
started_by:
|
||||
description:
|
||||
- A value showing who or what started the task (for informational purposes)
|
||||
required: False
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Simple example of run task
|
||||
- name: Run task
|
||||
ecs_task:
|
||||
operation: run
|
||||
cluster: console-sample-app-static-cluster
|
||||
task_definition: console-sample-app-static-taskdef
|
||||
count: 1
|
||||
started_by: ansible_user
|
||||
register: task_output
|
||||
|
||||
# Simple example of start task
|
||||
|
||||
- name: Start a task
|
||||
ecs_task:
|
||||
operation: start
|
||||
cluster: console-sample-app-static-cluster
|
||||
task_definition: console-sample-app-static-taskdef
|
||||
task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
|
||||
container_instances:
|
||||
- arn:aws:ecs:us-west-2:172139249013:container-instance/79c23f22-876c-438a-bddf-55c98a3538a8
|
||||
started_by: ansible_user
|
||||
register: task_output
|
||||
|
||||
- name: Stop a task
|
||||
ecs_task:
|
||||
operation: stop
|
||||
cluster: console-sample-app-static-cluster
|
||||
task_definition: console-sample-app-static-taskdef
|
||||
task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
|
||||
'''
|
||||
RETURN = '''
|
||||
task:
|
||||
description: details about the tast that was started
|
||||
returned: success
|
||||
type: complex
|
||||
contains:
|
||||
taskArn:
|
||||
description: The Amazon Resource Name (ARN) that identifies the task.
|
||||
returned: always
|
||||
type: string
|
||||
clusterArn:
|
||||
description: The Amazon Resource Name (ARN) of the of the cluster that hosts the task.
|
||||
returned: only when details is true
|
||||
type: string
|
||||
taskDefinitionArn:
|
||||
description: The Amazon Resource Name (ARN) of the task definition.
|
||||
returned: only when details is true
|
||||
type: string
|
||||
containerInstanceArn:
|
||||
description: The Amazon Resource Name (ARN) of the container running the task.
|
||||
returned: only when details is true
|
||||
type: string
|
||||
overrides:
|
||||
description: The container overrides set for this task.
|
||||
returned: only when details is true
|
||||
type: list of complex
|
||||
lastStatus:
|
||||
description: The last recorded status of the task.
|
||||
returned: only when details is true
|
||||
type: string
|
||||
desiredStatus:
|
||||
description: The desired status of the task.
|
||||
returned: only when details is true
|
||||
type: string
|
||||
containers:
|
||||
description: The container details.
|
||||
returned: only when details is true
|
||||
type: list of complex
|
||||
startedBy:
|
||||
description: The used who started the task.
|
||||
returned: only when details is true
|
||||
type: string
|
||||
stoppedReason:
|
||||
description: The reason why the task was stopped.
|
||||
returned: only when details is true
|
||||
type: string
|
||||
createdAt:
|
||||
description: The timestamp of when the task was created.
|
||||
returned: only when details is true
|
||||
type: string
|
||||
startedAt:
|
||||
description: The timestamp of when the task was started.
|
||||
returned: only when details is true
|
||||
type: string
|
||||
stoppedAt:
|
||||
description: The timestamp of when the task was stopped.
|
||||
returned: only when details is true
|
||||
type: string
|
||||
'''
|
||||
try:
|
||||
import boto
|
||||
import botocore
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
try:
|
||||
import boto3
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
|
||||
|
||||
|
||||
class EcsExecManager:
|
||||
"""Handles ECS Tasks"""
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
|
||||
try:
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
|
||||
if not region:
|
||||
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
|
||||
self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||
except boto.exception.NoAuthHandlerFound as e:
|
||||
module.fail_json(msg="Can't authorize connection - %s " % str(e))
|
||||
|
||||
def list_tasks(self, cluster_name, service_name, status):
|
||||
response = self.ecs.list_tasks(
|
||||
cluster=cluster_name,
|
||||
family=service_name,
|
||||
desiredStatus=status
|
||||
)
|
||||
if len(response['taskArns'])>0:
|
||||
for c in response['taskArns']:
|
||||
if c.endswith(service_name):
|
||||
return c
|
||||
return None
|
||||
|
||||
def run_task(self, cluster, task_definition, overrides, count, startedBy):
|
||||
if overrides is None:
|
||||
overrides = dict()
|
||||
response = self.ecs.run_task(
|
||||
cluster=cluster,
|
||||
taskDefinition=task_definition,
|
||||
overrides=overrides,
|
||||
count=count,
|
||||
startedBy=startedBy)
|
||||
# include tasks and failures
|
||||
return response['tasks']
|
||||
|
||||
def start_task(self, cluster, task_definition, overrides, container_instances, startedBy):
|
||||
args = dict()
|
||||
if cluster:
|
||||
args['cluster'] = cluster
|
||||
if task_definition:
|
||||
args['taskDefinition']=task_definition
|
||||
if overrides:
|
||||
args['overrides']=overrides
|
||||
if container_instances:
|
||||
args['containerInstances']=container_instances
|
||||
if startedBy:
|
||||
args['startedBy']=startedBy
|
||||
response = self.ecs.start_task(**args)
|
||||
# include tasks and failures
|
||||
return response['tasks']
|
||||
|
||||
def stop_task(self, cluster, task):
|
||||
response = self.ecs.stop_task(cluster=cluster, task=task)
|
||||
return response['task']
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
operation=dict(required=True, choices=['run', 'start', 'stop'] ),
|
||||
cluster=dict(required=False, type='str' ), # R S P
|
||||
task_definition=dict(required=False, type='str' ), # R* S*
|
||||
overrides=dict(required=False, type='dict'), # R S
|
||||
count=dict(required=False, type='int' ), # R
|
||||
task=dict(required=False, type='str' ), # P*
|
||||
container_instances=dict(required=False, type='list'), # S*
|
||||
started_by=dict(required=False, type='str' ) # R S
|
||||
))
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
|
||||
|
||||
# Validate Requirements
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto is required.')
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='boto3 is required.')
|
||||
|
||||
# Validate Inputs
|
||||
if module.params['operation'] == 'run':
|
||||
if not 'task_definition' in module.params and module.params['task_definition'] is None:
|
||||
module.fail_json(msg="To run a task, a task_definition must be specified")
|
||||
task_to_list = module.params['task_definition']
|
||||
status_type = "RUNNING"
|
||||
|
||||
if module.params['operation'] == 'start':
|
||||
if not 'task_definition' in module.params and module.params['task_definition'] is None:
|
||||
module.fail_json(msg="To start a task, a task_definition must be specified")
|
||||
if not 'container_instances' in module.params and module.params['container_instances'] is None:
|
||||
module.fail_json(msg="To start a task, container instances must be specified")
|
||||
task_to_list = module.params['task']
|
||||
status_type = "RUNNING"
|
||||
|
||||
if module.params['operation'] == 'stop':
|
||||
if not 'task' in module.params and module.params['task'] is None:
|
||||
module.fail_json(msg="To stop a task, a task must be specified")
|
||||
if not 'task_definition' in module.params and module.params['task_definition'] is None:
|
||||
module.fail_json(msg="To stop a task, a task definition must be specified")
|
||||
task_to_list = module.params['task_definition']
|
||||
status_type = "STOPPED"
|
||||
|
||||
service_mgr = EcsExecManager(module)
|
||||
existing = service_mgr.list_tasks(module.params['cluster'], task_to_list, status_type)
|
||||
|
||||
results = dict(changed=False)
|
||||
if module.params['operation'] == 'run':
|
||||
if existing:
|
||||
# TBD - validate the rest of the details
|
||||
results['task']=existing
|
||||
else:
|
||||
if not module.check_mode:
|
||||
results['task'] = service_mgr.run_task(
|
||||
module.params['cluster'],
|
||||
module.params['task_definition'],
|
||||
module.params['overrides'],
|
||||
module.params['count'],
|
||||
module.params['started_by'])
|
||||
results['changed'] = True
|
||||
|
||||
elif module.params['operation'] == 'start':
|
||||
if existing:
|
||||
# TBD - validate the rest of the details
|
||||
results['task']=existing
|
||||
else:
|
||||
if not module.check_mode:
|
||||
results['task'] = service_mgr.start_task(
|
||||
module.params['cluster'],
|
||||
module.params['task_definition'],
|
||||
module.params['overrides'],
|
||||
module.params['container_instances'],
|
||||
module.params['started_by']
|
||||
)
|
||||
results['changed'] = True
|
||||
|
||||
elif module.params['operation'] == 'stop':
|
||||
if existing:
|
||||
results['task']=existing
|
||||
else:
|
||||
if not module.check_mode:
|
||||
# it exists, so we should delete it and mark changed.
|
||||
# return info about the cluster deleted
|
||||
results['task'] = service_mgr.stop_task(
|
||||
module.params['cluster'],
|
||||
module.params['task']
|
||||
)
|
||||
results['changed'] = True
|
||||
|
||||
module.exit_json(**results)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
344
lib/ansible/modules/cloud/amazon/ecs_taskdefinition.py
Normal file
344
lib/ansible/modules/cloud/amazon/ecs_taskdefinition.py
Normal file
@@ -0,0 +1,344 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ecs_taskdefinition
|
||||
short_description: register a task definition in ecs
|
||||
description:
|
||||
- Creates or terminates task definitions
|
||||
version_added: "2.0"
|
||||
author: Mark Chance(@Java1Guy)
|
||||
requirements: [ json, boto, botocore, boto3 ]
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- State whether the task definition should exist or be deleted
|
||||
required: true
|
||||
choices: ['present', 'absent']
|
||||
arn:
|
||||
description:
|
||||
- The arn of the task description to delete
|
||||
required: false
|
||||
family:
|
||||
description:
|
||||
- A Name that would be given to the task definition
|
||||
required: false
|
||||
revision:
|
||||
description:
|
||||
- A revision number for the task definition
|
||||
required: False
|
||||
type: int
|
||||
containers:
|
||||
description:
|
||||
- A list of containers definitions
|
||||
required: False
|
||||
type: list of dicts with container definitions
|
||||
volumes:
|
||||
description:
|
||||
- A list of names of volumes to be attached
|
||||
required: False
|
||||
type: list of name
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: "Create task definition"
|
||||
ecs_taskdefinition:
|
||||
containers:
|
||||
- name: simple-app
|
||||
cpu: 10
|
||||
essential: true
|
||||
image: "httpd:2.4"
|
||||
memory: 300
|
||||
mountPoints:
|
||||
- containerPath: /usr/local/apache2/htdocs
|
||||
sourceVolume: my-vol
|
||||
portMappings:
|
||||
- containerPort: 80
|
||||
hostPort: 80
|
||||
- name: busybox
|
||||
command:
|
||||
- "/bin/sh -c \"while true; do echo '<html> <head> <title>Amazon ECS Sample App</title> <style>body {margin-top: 40px; background-color: #333;} </style> </head><body> <div style=color:white;text-align:center> <h1>Amazon ECS Sample App</h1> <h2>Congratulations!</h2> <p>Your application is now running on a container in Amazon ECS.</p>' > top; /bin/date > date ; echo '</div></body></html>' > bottom; cat top date bottom > /usr/local/apache2/htdocs/index.html ; sleep 1; done\""
|
||||
cpu: 10
|
||||
entryPoint:
|
||||
- sh
|
||||
- "-c"
|
||||
essential: false
|
||||
image: busybox
|
||||
memory: 200
|
||||
volumesFrom:
|
||||
- sourceContainer: simple-app
|
||||
volumes:
|
||||
- name: my-vol
|
||||
family: test-cluster-taskdef
|
||||
state: present
|
||||
register: task_output
|
||||
'''
|
||||
RETURN = '''
|
||||
taskdefinition:
|
||||
description: a reflection of the input parameters
|
||||
type: dict inputs plus revision, status, taskDefinitionArn
|
||||
'''
|
||||
try:
|
||||
import boto
|
||||
import botocore
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
try:
|
||||
import boto3
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
|
||||
|
||||
|
||||
class EcsTaskManager:
|
||||
"""Handles ECS Tasks"""
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
|
||||
try:
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
|
||||
if not region:
|
||||
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
|
||||
self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||
except boto.exception.NoAuthHandlerFound as e:
|
||||
module.fail_json(msg="Can't authorize connection - " % str(e))
|
||||
|
||||
def describe_task(self, task_name):
|
||||
try:
|
||||
response = self.ecs.describe_task_definition(taskDefinition=task_name)
|
||||
return response['taskDefinition']
|
||||
except botocore.exceptions.ClientError:
|
||||
return None
|
||||
|
||||
def register_task(self, family, container_definitions, volumes):
|
||||
response = self.ecs.register_task_definition(family=family,
|
||||
containerDefinitions=container_definitions, volumes=volumes)
|
||||
return response['taskDefinition']
|
||||
|
||||
def describe_task_definitions(self, family):
|
||||
data = {
|
||||
"taskDefinitionArns": [],
|
||||
"nextToken": None
|
||||
}
|
||||
|
||||
def fetch():
|
||||
# Boto3 is weird about params passed, so only pass nextToken if we have a value
|
||||
params = {
|
||||
'familyPrefix': family
|
||||
}
|
||||
|
||||
if data['nextToken']:
|
||||
params['nextToken'] = data['nextToken']
|
||||
|
||||
result = self.ecs.list_task_definitions(**params)
|
||||
data['taskDefinitionArns'] += result['taskDefinitionArns']
|
||||
data['nextToken'] = result.get('nextToken', None)
|
||||
return data['nextToken'] is not None
|
||||
|
||||
# Fetch all the arns, possibly across multiple pages
|
||||
while fetch():
|
||||
pass
|
||||
|
||||
# Return the full descriptions of the task definitions, sorted ascending by revision
|
||||
return list(sorted([self.ecs.describe_task_definition(taskDefinition=arn)['taskDefinition'] for arn in data['taskDefinitionArns']], key=lambda td: td['revision']))
|
||||
|
||||
def deregister_task(self, taskArn):
|
||||
response = self.ecs.deregister_task_definition(taskDefinition=taskArn)
|
||||
return response['taskDefinition']
|
||||
|
||||
def main():
|
||||
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
state=dict(required=True, choices=['present', 'absent']),
|
||||
arn=dict(required=False, type='str'),
|
||||
family=dict(required=False, type='str'),
|
||||
revision=dict(required=False, type='int'),
|
||||
containers=dict(required=False, type='list'),
|
||||
volumes=dict(required=False, type='list')
|
||||
))
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto is required.')
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='boto3 is required.')
|
||||
|
||||
task_to_describe = None
|
||||
task_mgr = EcsTaskManager(module)
|
||||
results = dict(changed=False)
|
||||
|
||||
if module.params['state'] == 'present':
|
||||
if 'containers' not in module.params or not module.params['containers']:
|
||||
module.fail_json(msg="To use task definitions, a list of containers must be specified")
|
||||
|
||||
if 'family' not in module.params or not module.params['family']:
|
||||
module.fail_json(msg="To use task definitions, a family must be specified")
|
||||
|
||||
family = module.params['family']
|
||||
existing_definitions_in_family = task_mgr.describe_task_definitions(module.params['family'])
|
||||
|
||||
if 'revision' in module.params and module.params['revision']:
|
||||
# The definition specifies revision. We must gurantee that an active revision of that number will result from this.
|
||||
revision = int(module.params['revision'])
|
||||
|
||||
# A revision has been explicitly specified. Attempt to locate a matching revision
|
||||
tasks_defs_for_revision = [td for td in existing_definitions_in_family if td['revision'] == revision]
|
||||
existing = tasks_defs_for_revision[0] if len(tasks_defs_for_revision) > 0 else None
|
||||
|
||||
if existing and existing['status'] != "ACTIVE":
|
||||
# We cannot reactivate an inactive revision
|
||||
module.fail_json(msg="A task in family '%s' already exists for revsion %d, but it is inactive" % (family, revision))
|
||||
elif not existing:
|
||||
if len(existing_definitions_in_family) == 0 and revision != 1:
|
||||
module.fail_json(msg="You have specified a revision of %d but a created revision would be 1" % revision)
|
||||
elif existing_definitions_in_family[-1]['revision'] + 1 != revision:
|
||||
module.fail_json(msg="You have specified a revision of %d but a created revision would be %d" % (revision, existing_definitions_in_family[-1]['revision'] + 1))
|
||||
else:
|
||||
existing = None
|
||||
|
||||
def _right_has_values_of_left(left, right):
|
||||
# Make sure the values are equivalent for everything left has
|
||||
for k, v in left.iteritems():
|
||||
if not ((not v and (k not in right or not right[k])) or (k in right and v == right[k])):
|
||||
# We don't care about list ordering because ECS can change things
|
||||
if isinstance(v, list) and k in right:
|
||||
left_list = v
|
||||
right_list = right[k] or []
|
||||
|
||||
if len(left_list) != len(right_list):
|
||||
return False
|
||||
|
||||
for list_val in left_list:
|
||||
if list_val not in right_list:
|
||||
return False
|
||||
else:
|
||||
return False
|
||||
|
||||
# Make sure right doesn't have anything that left doesn't
|
||||
for k, v in right.iteritems():
|
||||
if v and k not in left:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def _task_definition_matches(requested_volumes, requested_containers, existing_task_definition):
|
||||
if td['status'] != "ACTIVE":
|
||||
return None
|
||||
|
||||
existing_volumes = td.get('volumes', []) or []
|
||||
|
||||
if len(requested_volumes) != len(existing_volumes):
|
||||
# Nope.
|
||||
return None
|
||||
|
||||
if len(requested_volumes) > 0:
|
||||
for requested_vol in requested_volumes:
|
||||
found = False
|
||||
|
||||
for actual_vol in existing_volumes:
|
||||
if _right_has_values_of_left(requested_vol, actual_vol):
|
||||
found = True
|
||||
break
|
||||
|
||||
if not found:
|
||||
return None
|
||||
|
||||
existing_containers = td.get('containerDefinitions', []) or []
|
||||
|
||||
if len(requested_containers) != len(existing_containers):
|
||||
# Nope.
|
||||
return None
|
||||
|
||||
for requested_container in requested_containers:
|
||||
found = False
|
||||
|
||||
for actual_container in existing_containers:
|
||||
if _right_has_values_of_left(requested_container, actual_container):
|
||||
found = True
|
||||
break
|
||||
|
||||
if not found:
|
||||
return None
|
||||
|
||||
return existing_task_definition
|
||||
|
||||
# No revision explicitly specified. Attempt to find an active, matching revision that has all the properties requested
|
||||
for td in existing_definitions_in_family:
|
||||
requested_volumes = module.params.get('volumes', []) or []
|
||||
requested_containers = module.params.get('containers', []) or []
|
||||
existing = _task_definition_matches(requested_volumes, requested_containers, td)
|
||||
|
||||
if existing:
|
||||
break
|
||||
|
||||
if existing:
|
||||
# Awesome. Have an existing one. Nothing to do.
|
||||
results['taskdefinition'] = existing
|
||||
else:
|
||||
if not module.check_mode:
|
||||
# Doesn't exist. create it.
|
||||
volumes = module.params.get('volumes', []) or []
|
||||
results['taskdefinition'] = task_mgr.register_task(module.params['family'],
|
||||
module.params['containers'], volumes)
|
||||
results['changed'] = True
|
||||
|
||||
elif module.params['state'] == 'absent':
|
||||
# When de-registering a task definition, we can specify the ARN OR the family and revision.
|
||||
if module.params['state'] == 'absent':
|
||||
if 'arn' in module.params and module.params['arn'] is not None:
|
||||
task_to_describe = module.params['arn']
|
||||
elif 'family' in module.params and module.params['family'] is not None and 'revision' in module.params and \
|
||||
module.params['revision'] is not None:
|
||||
task_to_describe = module.params['family'] + ":" + str(module.params['revision'])
|
||||
else:
|
||||
module.fail_json(msg="To use task definitions, an arn or family and revision must be specified")
|
||||
|
||||
existing = task_mgr.describe_task(task_to_describe)
|
||||
|
||||
if not existing:
|
||||
pass
|
||||
else:
|
||||
# It exists, so we should delete it and mark changed. Return info about the task definition deleted
|
||||
results['taskdefinition'] = existing
|
||||
if 'status' in existing and existing['status'] == "INACTIVE":
|
||||
results['changed'] = False
|
||||
else:
|
||||
if not module.check_mode:
|
||||
task_mgr.deregister_task(task_to_describe)
|
||||
results['changed'] = True
|
||||
|
||||
module.exit_json(**results)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
630
lib/ansible/modules/cloud/amazon/efs.py
Normal file
630
lib/ansible/modules/cloud/amazon/efs.py
Normal file
@@ -0,0 +1,630 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'committer',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: efs
|
||||
short_description: create and maintain EFS file systems
|
||||
description:
|
||||
- Module allows create, search and destroy Amazon EFS file systems
|
||||
version_added: "2.2"
|
||||
requirements: [ boto3 ]
|
||||
author:
|
||||
- "Ryan Sydnor (@ryansydnor)"
|
||||
- "Artem Kazakov (@akazakov)"
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Allows to create, search and destroy Amazon EFS file system
|
||||
required: false
|
||||
default: 'present'
|
||||
choices: ['present', 'absent']
|
||||
name:
|
||||
description:
|
||||
- Creation Token of Amazon EFS file system. Required for create. Either name or ID required for delete.
|
||||
required: false
|
||||
default: None
|
||||
id:
|
||||
description:
|
||||
- ID of Amazon EFS. Either name or ID required for delete.
|
||||
required: false
|
||||
default: None
|
||||
performance_mode:
|
||||
description:
|
||||
- File system's performance mode to use. Only takes effect during creation.
|
||||
required: false
|
||||
default: 'general_purpose'
|
||||
choices: ['general_purpose', 'max_io']
|
||||
tags:
|
||||
description:
|
||||
- "List of tags of Amazon EFS. Should be defined as dictionary
|
||||
In case of 'present' state with list of tags and existing EFS (matched by 'name'), tags of EFS will be replaced with provided data."
|
||||
required: false
|
||||
default: None
|
||||
targets:
|
||||
description:
|
||||
- "List of mounted targets. It should be a list of dictionaries, every dictionary should include next attributes:
|
||||
- subnet_id - Mandatory. The ID of the subnet to add the mount target in.
|
||||
- ip_address - Optional. A valid IPv4 address within the address range of the specified subnet.
|
||||
- security_groups - Optional. List of security group IDs, of the form 'sg-xxxxxxxx'. These must be for the same VPC as subnet specified
|
||||
This data may be modified for existing EFS using state 'present' and new list of mount targets."
|
||||
required: false
|
||||
default: None
|
||||
wait:
|
||||
description:
|
||||
- "In case of 'present' state should wait for EFS 'available' life cycle state (of course, if current state not 'deleting' or 'deleted')
|
||||
In case of 'absent' state should wait for EFS 'deleted' life cycle state"
|
||||
required: false
|
||||
default: "no"
|
||||
choices: ["yes", "no"]
|
||||
wait_timeout:
|
||||
description:
|
||||
- How long the module should wait (in seconds) for desired state before returning. Zero means wait as long as necessary.
|
||||
required: false
|
||||
default: 0
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# EFS provisioning
|
||||
- efs:
|
||||
state: present
|
||||
name: myTestEFS
|
||||
tags:
|
||||
name: myTestNameTag
|
||||
purpose: file-storage
|
||||
targets:
|
||||
- subnet_id: subnet-748c5d03
|
||||
security_groups: [ "sg-1a2b3c4d" ]
|
||||
|
||||
# Modifying EFS data
|
||||
- efs:
|
||||
state: present
|
||||
name: myTestEFS
|
||||
tags:
|
||||
name: myAnotherTestTag
|
||||
targets:
|
||||
- subnet_id: subnet-7654fdca
|
||||
security_groups: [ "sg-4c5d6f7a" ]
|
||||
|
||||
# Deleting EFS
|
||||
- efs:
|
||||
state: absent
|
||||
name: myTestEFS
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
creation_time:
|
||||
description: timestamp of creation date
|
||||
returned:
|
||||
type: datetime
|
||||
sample: 2015-11-16 07:30:57-05:00
|
||||
creation_token:
|
||||
description: EFS creation token
|
||||
returned:
|
||||
type: UUID
|
||||
sample: console-88609e04-9a0e-4a2e-912c-feaa99509961
|
||||
file_system_id:
|
||||
description: ID of the file system
|
||||
returned:
|
||||
type: unique ID
|
||||
sample: fs-xxxxxxxx
|
||||
life_cycle_state:
|
||||
description: state of the EFS file system
|
||||
returned:
|
||||
type: str
|
||||
sample: creating, available, deleting, deleted
|
||||
mount_point:
|
||||
description: url of file system
|
||||
returned:
|
||||
type: str
|
||||
sample: .fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/
|
||||
mount_targets:
|
||||
description: list of mount targets
|
||||
returned:
|
||||
type: list of dicts
|
||||
sample:
|
||||
[
|
||||
{
|
||||
"file_system_id": "fs-a7ad440e",
|
||||
"ip_address": "172.31.17.173",
|
||||
"life_cycle_state": "available",
|
||||
"mount_target_id": "fsmt-d8907871",
|
||||
"network_interface_id": "eni-6e387e26",
|
||||
"owner_id": "740748460359",
|
||||
"security_groups": [
|
||||
"sg-a30b22c6"
|
||||
],
|
||||
"subnet_id": "subnet-e265c895"
|
||||
},
|
||||
...
|
||||
]
|
||||
name:
|
||||
description: name of the file system
|
||||
returned:
|
||||
type: str
|
||||
sample: my-efs
|
||||
number_of_mount_targets:
|
||||
description: the number of targets mounted
|
||||
returned:
|
||||
type: int
|
||||
sample: 3
|
||||
owner_id:
|
||||
description: AWS account ID of EFS owner
|
||||
returned:
|
||||
type: str
|
||||
sample: XXXXXXXXXXXX
|
||||
size_in_bytes:
|
||||
description: size of the file system in bytes as of a timestamp
|
||||
returned:
|
||||
type: dict
|
||||
sample:
|
||||
{
|
||||
"timestamp": "2015-12-21 13:59:59-05:00",
|
||||
"value": 12288
|
||||
}
|
||||
performance_mode:
|
||||
description: performance mode of the file system
|
||||
returned:
|
||||
type: str
|
||||
sample: "generalPurpose"
|
||||
tags:
|
||||
description: tags on the efs instance
|
||||
returned:
|
||||
type: dict
|
||||
sample:
|
||||
{
|
||||
"name": "my-efs",
|
||||
"key": "Value"
|
||||
}
|
||||
|
||||
'''
|
||||
|
||||
import sys
|
||||
from time import sleep
|
||||
from time import time as timestamp
|
||||
from collections import defaultdict
|
||||
|
||||
try:
|
||||
from botocore.exceptions import ClientError
|
||||
import boto3
|
||||
HAS_BOTO3 = True
|
||||
except ImportError as e:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
|
||||
class EFSConnection(object):
|
||||
|
||||
DEFAULT_WAIT_TIMEOUT_SECONDS = 0
|
||||
|
||||
STATE_CREATING = 'creating'
|
||||
STATE_AVAILABLE = 'available'
|
||||
STATE_DELETING = 'deleting'
|
||||
STATE_DELETED = 'deleted'
|
||||
|
||||
def __init__(self, module, region, **aws_connect_params):
|
||||
try:
|
||||
self.connection = boto3_conn(module, conn_type='client',
|
||||
resource='efs', region=region,
|
||||
**aws_connect_params)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Failed to connect to AWS: %s" % str(e))
|
||||
|
||||
self.region = region
|
||||
self.wait = module.params.get('wait')
|
||||
self.wait_timeout = module.params.get('wait_timeout')
|
||||
|
||||
def get_file_systems(self, **kwargs):
|
||||
"""
|
||||
Returns generator of file systems including all attributes of FS
|
||||
"""
|
||||
items = iterate_all(
|
||||
'FileSystems',
|
||||
self.connection.describe_file_systems,
|
||||
**kwargs
|
||||
)
|
||||
for item in items:
|
||||
item['CreationTime'] = str(item['CreationTime'])
|
||||
"""
|
||||
Suffix of network path to be used as NFS device for mount. More detail here:
|
||||
http://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html
|
||||
"""
|
||||
item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
|
||||
if 'Timestamp' in item['SizeInBytes']:
|
||||
item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp'])
|
||||
if item['LifeCycleState'] == self.STATE_AVAILABLE:
|
||||
item['Tags'] = self.get_tags(FileSystemId=item['FileSystemId'])
|
||||
item['MountTargets'] = list(self.get_mount_targets(FileSystemId=item['FileSystemId']))
|
||||
else:
|
||||
item['Tags'] = {}
|
||||
item['MountTargets'] = []
|
||||
yield item
|
||||
|
||||
def get_tags(self, **kwargs):
|
||||
"""
|
||||
Returns tag list for selected instance of EFS
|
||||
"""
|
||||
tags = iterate_all(
|
||||
'Tags',
|
||||
self.connection.describe_tags,
|
||||
**kwargs
|
||||
)
|
||||
return dict((tag['Key'], tag['Value']) for tag in tags)
|
||||
|
||||
def get_mount_targets(self, **kwargs):
|
||||
"""
|
||||
Returns mount targets for selected instance of EFS
|
||||
"""
|
||||
targets = iterate_all(
|
||||
'MountTargets',
|
||||
self.connection.describe_mount_targets,
|
||||
**kwargs
|
||||
)
|
||||
for target in targets:
|
||||
if target['LifeCycleState'] == self.STATE_AVAILABLE:
|
||||
target['SecurityGroups'] = list(self.get_security_groups(
|
||||
MountTargetId=target['MountTargetId']
|
||||
))
|
||||
else:
|
||||
target['SecurityGroups'] = []
|
||||
yield target
|
||||
|
||||
def get_security_groups(self, **kwargs):
|
||||
"""
|
||||
Returns security groups for selected instance of EFS
|
||||
"""
|
||||
return iterate_all(
|
||||
'SecurityGroups',
|
||||
self.connection.describe_mount_target_security_groups,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
def get_file_system_id(self, name):
|
||||
"""
|
||||
Returns ID of instance by instance name
|
||||
"""
|
||||
info = first_or_default(iterate_all(
|
||||
'FileSystems',
|
||||
self.connection.describe_file_systems,
|
||||
CreationToken=name
|
||||
))
|
||||
return info and info['FileSystemId'] or None
|
||||
|
||||
def get_file_system_state(self, name, file_system_id=None):
|
||||
"""
|
||||
Returns state of filesystem by EFS id/name
|
||||
"""
|
||||
info = first_or_default(iterate_all(
|
||||
'FileSystems',
|
||||
self.connection.describe_file_systems,
|
||||
CreationToken=name,
|
||||
FileSystemId=file_system_id
|
||||
))
|
||||
return info and info['LifeCycleState'] or self.STATE_DELETED
|
||||
|
||||
def get_mount_targets_in_state(self, file_system_id, states=None):
|
||||
"""
|
||||
Returns states of mount targets of selected EFS with selected state(s) (optional)
|
||||
"""
|
||||
targets = iterate_all(
|
||||
'MountTargets',
|
||||
self.connection.describe_mount_targets,
|
||||
FileSystemId=file_system_id
|
||||
)
|
||||
|
||||
if states:
|
||||
if not isinstance(states, list):
|
||||
states = [states]
|
||||
targets = filter(lambda target: target['LifeCycleState'] in states, targets)
|
||||
|
||||
return list(targets)
|
||||
|
||||
def create_file_system(self, name, performance_mode):
|
||||
"""
|
||||
Creates new filesystem with selected name
|
||||
"""
|
||||
changed = False
|
||||
state = self.get_file_system_state(name)
|
||||
if state in [self.STATE_DELETING, self.STATE_DELETED]:
|
||||
wait_for(
|
||||
lambda: self.get_file_system_state(name),
|
||||
self.STATE_DELETED
|
||||
)
|
||||
self.connection.create_file_system(CreationToken=name, PerformanceMode=performance_mode)
|
||||
changed = True
|
||||
|
||||
# we always wait for the state to be available when creating.
|
||||
# if we try to take any actions on the file system before it's available
|
||||
# we'll throw errors
|
||||
wait_for(
|
||||
lambda: self.get_file_system_state(name),
|
||||
self.STATE_AVAILABLE,
|
||||
self.wait_timeout
|
||||
)
|
||||
|
||||
return changed
|
||||
|
||||
def converge_file_system(self, name, tags, targets):
|
||||
"""
|
||||
Change attributes (mount targets and tags) of filesystem by name
|
||||
"""
|
||||
result = False
|
||||
fs_id = self.get_file_system_id(name)
|
||||
|
||||
if tags is not None:
|
||||
tags_to_create, _, tags_to_delete = dict_diff(self.get_tags(FileSystemId=fs_id), tags)
|
||||
|
||||
if tags_to_delete:
|
||||
self.connection.delete_tags(
|
||||
FileSystemId=fs_id,
|
||||
TagKeys=[item[0] for item in tags_to_delete]
|
||||
)
|
||||
result = True
|
||||
|
||||
if tags_to_create:
|
||||
self.connection.create_tags(
|
||||
FileSystemId=fs_id,
|
||||
Tags=[{'Key': item[0], 'Value': item[1]} for item in tags_to_create]
|
||||
)
|
||||
result = True
|
||||
|
||||
if targets is not None:
|
||||
incomplete_states = [self.STATE_CREATING, self.STATE_DELETING]
|
||||
wait_for(
|
||||
lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
|
||||
0
|
||||
)
|
||||
|
||||
index_by_subnet_id = lambda items: dict((item['SubnetId'], item) for item in items)
|
||||
|
||||
current_targets = index_by_subnet_id(self.get_mount_targets(FileSystemId=fs_id))
|
||||
targets = index_by_subnet_id(targets)
|
||||
|
||||
targets_to_create, intersection, targets_to_delete = dict_diff(current_targets,
|
||||
targets, True)
|
||||
|
||||
""" To modify mount target it should be deleted and created again """
|
||||
changed = filter(
|
||||
lambda sid: not targets_equal(['SubnetId', 'IpAddress', 'NetworkInterfaceId'],
|
||||
current_targets[sid], targets[sid]), intersection)
|
||||
targets_to_delete = list(targets_to_delete) + changed
|
||||
targets_to_create = list(targets_to_create) + changed
|
||||
|
||||
if targets_to_delete:
|
||||
for sid in targets_to_delete:
|
||||
self.connection.delete_mount_target(
|
||||
MountTargetId=current_targets[sid]['MountTargetId']
|
||||
)
|
||||
wait_for(
|
||||
lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
|
||||
0
|
||||
)
|
||||
result = True
|
||||
|
||||
if targets_to_create:
|
||||
for sid in targets_to_create:
|
||||
self.connection.create_mount_target(
|
||||
FileSystemId=fs_id,
|
||||
**targets[sid]
|
||||
)
|
||||
wait_for(
|
||||
lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
|
||||
0,
|
||||
self.wait_timeout
|
||||
)
|
||||
result = True
|
||||
|
||||
security_groups_to_update = filter(
|
||||
lambda sid: 'SecurityGroups' in targets[sid] and
|
||||
current_targets[sid]['SecurityGroups'] != targets[sid]['SecurityGroups'],
|
||||
intersection
|
||||
)
|
||||
|
||||
if security_groups_to_update:
|
||||
for sid in security_groups_to_update:
|
||||
self.connection.modify_mount_target_security_groups(
|
||||
MountTargetId=current_targets[sid]['MountTargetId'],
|
||||
SecurityGroups=targets[sid]['SecurityGroups']
|
||||
)
|
||||
result = True
|
||||
|
||||
return result
|
||||
|
||||
def delete_file_system(self, name, file_system_id=None):
|
||||
"""
|
||||
Removes EFS instance by id/name
|
||||
"""
|
||||
result = False
|
||||
state = self.get_file_system_state(name, file_system_id)
|
||||
if state in [self.STATE_CREATING, self.STATE_AVAILABLE]:
|
||||
wait_for(
|
||||
lambda: self.get_file_system_state(name),
|
||||
self.STATE_AVAILABLE
|
||||
)
|
||||
if not file_system_id:
|
||||
file_system_id = self.get_file_system_id(name)
|
||||
self.delete_mount_targets(file_system_id)
|
||||
self.connection.delete_file_system(FileSystemId=file_system_id)
|
||||
result = True
|
||||
|
||||
if self.wait:
|
||||
wait_for(
|
||||
lambda: self.get_file_system_state(name),
|
||||
self.STATE_DELETED,
|
||||
self.wait_timeout
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
def delete_mount_targets(self, file_system_id):
|
||||
"""
|
||||
Removes mount targets by EFS id
|
||||
"""
|
||||
wait_for(
|
||||
lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_CREATING)),
|
||||
0
|
||||
)
|
||||
|
||||
targets = self.get_mount_targets_in_state(file_system_id, self.STATE_AVAILABLE)
|
||||
for target in targets:
|
||||
self.connection.delete_mount_target(MountTargetId=target['MountTargetId'])
|
||||
|
||||
wait_for(
|
||||
lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_DELETING)),
|
||||
0
|
||||
)
|
||||
|
||||
return len(targets) > 0
|
||||
|
||||
|
||||
def iterate_all(attr, map_method, **kwargs):
|
||||
"""
|
||||
Method creates iterator from boto result set
|
||||
"""
|
||||
args = dict((key, value) for (key, value) in kwargs.items() if value is not None)
|
||||
wait = 1
|
||||
while True:
|
||||
try:
|
||||
data = map_method(**args)
|
||||
for elm in data[attr]:
|
||||
yield elm
|
||||
if 'NextMarker' in data:
|
||||
args['Marker'] = data['Nextmarker']
|
||||
continue
|
||||
break
|
||||
except ClientError as e:
|
||||
if e.response['Error']['Code'] == "ThrottlingException" and wait < 600:
|
||||
sleep(wait)
|
||||
wait = wait * 2
|
||||
continue
|
||||
|
||||
def targets_equal(keys, a, b):
|
||||
"""
|
||||
Method compare two mount targets by specified attributes
|
||||
"""
|
||||
for key in keys:
|
||||
if key in b and a[key] != b[key]:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def dict_diff(dict1, dict2, by_key=False):
|
||||
"""
|
||||
Helper method to calculate difference of two dictionaries
|
||||
"""
|
||||
keys1 = set(dict1.keys() if by_key else dict1.items())
|
||||
keys2 = set(dict2.keys() if by_key else dict2.items())
|
||||
|
||||
intersection = keys1 & keys2
|
||||
|
||||
return keys2 ^ intersection, intersection, keys1 ^ intersection
|
||||
|
||||
|
||||
def first_or_default(items, default=None):
|
||||
"""
|
||||
Helper method to fetch first element of list (if exists)
|
||||
"""
|
||||
for item in items:
|
||||
return item
|
||||
return default
|
||||
|
||||
|
||||
def wait_for(callback, value, timeout=EFSConnection.DEFAULT_WAIT_TIMEOUT_SECONDS):
|
||||
"""
|
||||
Helper method to wait for desired value returned by callback method
|
||||
"""
|
||||
wait_start = timestamp()
|
||||
while True:
|
||||
if callback() != value:
|
||||
if timeout != 0 and (timestamp() - wait_start > timeout):
|
||||
raise RuntimeError('Wait timeout exceeded (' + str(timeout) + ' sec)')
|
||||
else:
|
||||
sleep(5)
|
||||
continue
|
||||
break
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Module action handler
|
||||
"""
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
state=dict(required=False, type='str', choices=["present", "absent"], default="present"),
|
||||
id=dict(required=False, type='str', default=None),
|
||||
name=dict(required=False, type='str', default=None),
|
||||
tags=dict(required=False, type="dict", default={}),
|
||||
targets=dict(required=False, type="list", default=[]),
|
||||
performance_mode=dict(required=False, type='str', choices=["general_purpose", "max_io"], default="general_purpose"),
|
||||
wait=dict(required=False, type="bool", default=False),
|
||||
wait_timeout=dict(required=False, type="int", default=0)
|
||||
))
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='boto3 required for this module')
|
||||
|
||||
region, _, aws_connect_params = get_aws_connection_info(module, boto3=True)
|
||||
connection = EFSConnection(module, region, **aws_connect_params)
|
||||
|
||||
name = module.params.get('name')
|
||||
fs_id = module.params.get('id')
|
||||
tags = module.params.get('tags')
|
||||
target_translations = {
|
||||
'ip_address': 'IpAddress',
|
||||
'security_groups': 'SecurityGroups',
|
||||
'subnet_id': 'SubnetId'
|
||||
}
|
||||
targets = [dict((target_translations[key], value) for (key, value) in x.items()) for x in module.params.get('targets')]
|
||||
performance_mode_translations = {
|
||||
'general_purpose': 'generalPurpose',
|
||||
'max_io': 'maxIO'
|
||||
}
|
||||
performance_mode = performance_mode_translations[module.params.get('performance_mode')]
|
||||
changed = False
|
||||
|
||||
state = str(module.params.get('state')).lower()
|
||||
|
||||
if state == 'present':
|
||||
if not name:
|
||||
module.fail_json(msg='Name parameter is required for create')
|
||||
|
||||
changed = connection.create_file_system(name, performance_mode)
|
||||
changed = connection.converge_file_system(name=name, tags=tags, targets=targets) or changed
|
||||
result = first_or_default(connection.get_file_systems(CreationToken=name))
|
||||
|
||||
elif state == 'absent':
|
||||
if not name and not fs_id:
|
||||
module.fail_json(msg='Either name or id parameter is required for delete')
|
||||
|
||||
changed = connection.delete_file_system(name, fs_id)
|
||||
result = None
|
||||
if result:
|
||||
result = camel_dict_to_snake_dict(result)
|
||||
module.exit_json(changed=changed, efs=result)
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
379
lib/ansible/modules/cloud/amazon/efs_facts.py
Normal file
379
lib/ansible/modules/cloud/amazon/efs_facts.py
Normal file
@@ -0,0 +1,379 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'committer',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: efs_facts
|
||||
short_description: Get information about Amazon EFS file systems
|
||||
description:
|
||||
- Module searches Amazon EFS file systems
|
||||
version_added: "2.2"
|
||||
requirements: [ boto3 ]
|
||||
author:
|
||||
- "Ryan Sydnor (@ryansydnor)"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Creation Token of Amazon EFS file system.
|
||||
required: false
|
||||
default: None
|
||||
id:
|
||||
description:
|
||||
- ID of Amazon EFS.
|
||||
required: false
|
||||
default: None
|
||||
tags:
|
||||
description:
|
||||
- List of tags of Amazon EFS. Should be defined as dictionary
|
||||
required: false
|
||||
default: None
|
||||
targets:
|
||||
description:
|
||||
- "List of mounted targets. It should be a list of dictionaries, every dictionary should include next attributes:
|
||||
- SubnetId - Mandatory. The ID of the subnet to add the mount target in.
|
||||
- IpAddress - Optional. A valid IPv4 address within the address range of the specified subnet.
|
||||
- SecurityGroups - Optional. List of security group IDs, of the form 'sg-xxxxxxxx'. These must be for the same VPC as subnet specified."
|
||||
required: false
|
||||
default: None
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# find all existing efs
|
||||
- efs_facts:
|
||||
register: result
|
||||
|
||||
- efs_facts:
|
||||
name: myTestNameTag
|
||||
|
||||
- efs_facts:
|
||||
id: fs-1234abcd
|
||||
|
||||
# Searching all EFS instances with tag Name = 'myTestNameTag', in subnet 'subnet-1a2b3c4d' and with security group 'sg-4d3c2b1a'
|
||||
- efs_facts:
|
||||
tags:
|
||||
name: myTestNameTag
|
||||
targets:
|
||||
- subnet-1a2b3c4d
|
||||
- sg-4d3c2b1a
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
creation_time:
|
||||
description: timestamp of creation date
|
||||
returned:
|
||||
type: datetime
|
||||
sample: 2015-11-16 07:30:57-05:00
|
||||
creation_token:
|
||||
description: EFS creation token
|
||||
returned:
|
||||
type: UUID
|
||||
sample: console-88609e04-9a0e-4a2e-912c-feaa99509961
|
||||
file_system_id:
|
||||
description: ID of the file system
|
||||
returned:
|
||||
type: unique ID
|
||||
sample: fs-xxxxxxxx
|
||||
life_cycle_state:
|
||||
description: state of the EFS file system
|
||||
returned:
|
||||
type: str
|
||||
sample: creating, available, deleting, deleted
|
||||
mount_point:
|
||||
description: url of file system
|
||||
returned:
|
||||
type: str
|
||||
sample: .fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/
|
||||
mount_targets:
|
||||
description: list of mount targets
|
||||
returned:
|
||||
type: list of dicts
|
||||
sample:
|
||||
[
|
||||
{
|
||||
"file_system_id": "fs-a7ad440e",
|
||||
"ip_address": "172.31.17.173",
|
||||
"life_cycle_state": "available",
|
||||
"mount_target_id": "fsmt-d8907871",
|
||||
"network_interface_id": "eni-6e387e26",
|
||||
"owner_id": "740748460359",
|
||||
"security_groups": [
|
||||
"sg-a30b22c6"
|
||||
],
|
||||
"subnet_id": "subnet-e265c895"
|
||||
},
|
||||
...
|
||||
]
|
||||
name:
|
||||
description: name of the file system
|
||||
returned:
|
||||
type: str
|
||||
sample: my-efs
|
||||
number_of_mount_targets:
|
||||
description: the number of targets mounted
|
||||
returned:
|
||||
type: int
|
||||
sample: 3
|
||||
owner_id:
|
||||
description: AWS account ID of EFS owner
|
||||
returned:
|
||||
type: str
|
||||
sample: XXXXXXXXXXXX
|
||||
size_in_bytes:
|
||||
description: size of the file system in bytes as of a timestamp
|
||||
returned:
|
||||
type: dict
|
||||
sample:
|
||||
{
|
||||
"timestamp": "2015-12-21 13:59:59-05:00",
|
||||
"value": 12288
|
||||
}
|
||||
performance_mode:
|
||||
description: performance mode of the file system
|
||||
returned:
|
||||
type: str
|
||||
sample: "generalPurpose"
|
||||
tags:
|
||||
description: tags on the efs instance
|
||||
returned:
|
||||
type: dict
|
||||
sample:
|
||||
{
|
||||
"name": "my-efs",
|
||||
"key": "Value"
|
||||
}
|
||||
|
||||
'''
|
||||
|
||||
|
||||
from time import sleep
|
||||
from collections import defaultdict
|
||||
|
||||
try:
|
||||
from botocore.exceptions import ClientError
|
||||
import boto3
|
||||
HAS_BOTO3 = True
|
||||
except ImportError as e:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
class EFSConnection(object):
|
||||
STATE_CREATING = 'creating'
|
||||
STATE_AVAILABLE = 'available'
|
||||
STATE_DELETING = 'deleting'
|
||||
STATE_DELETED = 'deleted'
|
||||
|
||||
def __init__(self, module, region, **aws_connect_params):
|
||||
try:
|
||||
self.connection = boto3_conn(module, conn_type='client',
|
||||
resource='efs', region=region,
|
||||
**aws_connect_params)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Failed to connect to AWS: %s" % str(e))
|
||||
|
||||
self.region = region
|
||||
|
||||
def get_file_systems(self, **kwargs):
|
||||
"""
|
||||
Returns generator of file systems including all attributes of FS
|
||||
"""
|
||||
items = iterate_all(
|
||||
'FileSystems',
|
||||
self.connection.describe_file_systems,
|
||||
**kwargs
|
||||
)
|
||||
for item in items:
|
||||
item['CreationTime'] = str(item['CreationTime'])
|
||||
"""
|
||||
Suffix of network path to be used as NFS device for mount. More detail here:
|
||||
http://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html
|
||||
"""
|
||||
item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
|
||||
if 'Timestamp' in item['SizeInBytes']:
|
||||
item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp'])
|
||||
if item['LifeCycleState'] == self.STATE_AVAILABLE:
|
||||
item['Tags'] = self.get_tags(FileSystemId=item['FileSystemId'])
|
||||
item['MountTargets'] = list(self.get_mount_targets(FileSystemId=item['FileSystemId']))
|
||||
else:
|
||||
item['Tags'] = {}
|
||||
item['MountTargets'] = []
|
||||
yield item
|
||||
|
||||
def get_tags(self, **kwargs):
|
||||
"""
|
||||
Returns tag list for selected instance of EFS
|
||||
"""
|
||||
tags = iterate_all(
|
||||
'Tags',
|
||||
self.connection.describe_tags,
|
||||
**kwargs
|
||||
)
|
||||
return dict((tag['Key'], tag['Value']) for tag in tags)
|
||||
|
||||
def get_mount_targets(self, **kwargs):
|
||||
"""
|
||||
Returns mount targets for selected instance of EFS
|
||||
"""
|
||||
targets = iterate_all(
|
||||
'MountTargets',
|
||||
self.connection.describe_mount_targets,
|
||||
**kwargs
|
||||
)
|
||||
for target in targets:
|
||||
if target['LifeCycleState'] == self.STATE_AVAILABLE:
|
||||
target['SecurityGroups'] = list(self.get_security_groups(
|
||||
MountTargetId=target['MountTargetId']
|
||||
))
|
||||
else:
|
||||
target['SecurityGroups'] = []
|
||||
yield target
|
||||
|
||||
def get_security_groups(self, **kwargs):
|
||||
"""
|
||||
Returns security groups for selected instance of EFS
|
||||
"""
|
||||
return iterate_all(
|
||||
'SecurityGroups',
|
||||
self.connection.describe_mount_target_security_groups,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
|
||||
def iterate_all(attr, map_method, **kwargs):
|
||||
"""
|
||||
Method creates iterator from boto result set
|
||||
"""
|
||||
args = dict((key, value) for (key, value) in kwargs.items() if value is not None)
|
||||
wait = 1
|
||||
while True:
|
||||
try:
|
||||
data = map_method(**args)
|
||||
for elm in data[attr]:
|
||||
yield elm
|
||||
if 'NextMarker' in data:
|
||||
args['Marker'] = data['Nextmarker']
|
||||
continue
|
||||
break
|
||||
except ClientError as e:
|
||||
if e.response['Error']['Code'] == "ThrottlingException" and wait < 600:
|
||||
sleep(wait)
|
||||
wait = wait * 2
|
||||
continue
|
||||
|
||||
|
||||
def prefix_to_attr(attr_id):
|
||||
"""
|
||||
Helper method to convert ID prefix to mount target attribute
|
||||
"""
|
||||
attr_by_prefix = {
|
||||
'fsmt-': 'MountTargetId',
|
||||
'subnet-': 'SubnetId',
|
||||
'eni-': 'NetworkInterfaceId',
|
||||
'sg-': 'SecurityGroups'
|
||||
}
|
||||
prefix = first_or_default(filter(
|
||||
lambda pref: str(attr_id).startswith(pref),
|
||||
attr_by_prefix.keys()
|
||||
))
|
||||
if prefix:
|
||||
return attr_by_prefix[prefix]
|
||||
return 'IpAddress'
|
||||
|
||||
def first_or_default(items, default=None):
|
||||
"""
|
||||
Helper method to fetch first element of list (if exists)
|
||||
"""
|
||||
for item in items:
|
||||
return item
|
||||
return default
|
||||
|
||||
def has_tags(available, required):
|
||||
"""
|
||||
Helper method to determine if tag requested already exists
|
||||
"""
|
||||
for key, value in required.items():
|
||||
if key not in available or value != available[key]:
|
||||
return False
|
||||
return True
|
||||
|
||||
def has_targets(available, required):
|
||||
"""
|
||||
Helper method to determine if mount tager requested already exists
|
||||
"""
|
||||
grouped = group_list_of_dict(available)
|
||||
for (value, field) in required:
|
||||
if field not in grouped or value not in grouped[field]:
|
||||
return False
|
||||
return True
|
||||
|
||||
def group_list_of_dict(array):
|
||||
"""
|
||||
Helper method to group list of dict to dict with all possible values
|
||||
"""
|
||||
result = defaultdict(list)
|
||||
for item in array:
|
||||
for key, value in item.items():
|
||||
result[key] += value if isinstance(value, list) else [value]
|
||||
return result
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Module action handler
|
||||
"""
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
id=dict(required=False, type='str', default=None),
|
||||
name=dict(required=False, type='str', default=None),
|
||||
tags=dict(required=False, type="dict", default={}),
|
||||
targets=dict(required=False, type="list", default=[])
|
||||
))
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='boto3 required for this module')
|
||||
|
||||
region, _, aws_connect_params = get_aws_connection_info(module, boto3=True)
|
||||
connection = EFSConnection(module, region, **aws_connect_params)
|
||||
|
||||
name = module.params.get('name')
|
||||
fs_id = module.params.get('id')
|
||||
tags = module.params.get('tags')
|
||||
targets = module.params.get('targets')
|
||||
|
||||
file_systems_info = connection.get_file_systems(FileSystemId=fs_id, CreationToken=name)
|
||||
|
||||
if tags:
|
||||
file_systems_info = filter(lambda item: has_tags(item['Tags'], tags), file_systems_info)
|
||||
|
||||
if targets:
|
||||
targets = [(item, prefix_to_attr(item)) for item in targets]
|
||||
file_systems_info = filter(lambda item:
|
||||
has_targets(item['MountTargets'], targets), file_systems_info)
|
||||
|
||||
file_systems_info = [camel_dict_to_snake_dict(x) for x in file_systems_info]
|
||||
module.exit_json(changed=False, ansible_facts={'efs': file_systems_info})
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
287
lib/ansible/modules/cloud/amazon/execute_lambda.py
Normal file
287
lib/ansible/modules/cloud/amazon/execute_lambda.py
Normal file
@@ -0,0 +1,287 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: execute_lambda
|
||||
short_description: Execute an AWS Lambda function
|
||||
description:
|
||||
- This module executes AWS Lambda functions, allowing synchronous and asynchronous
|
||||
invocation.
|
||||
version_added: "2.2"
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
author: "Ryan Scott Brown (@ryansb) <ryansb@redhat.com>"
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- boto3
|
||||
notes:
|
||||
- Async invocation will always return an empty C(output) key.
|
||||
- Synchronous invocation may result in a function timeout, resulting in an
|
||||
empty C(output) key.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of the function to be invoked. This can only be used for
|
||||
invocations within the calling account. To invoke a function in another
|
||||
account, use I(function_arn) to specify the full ARN.
|
||||
required: false
|
||||
default: None
|
||||
function_arn:
|
||||
description:
|
||||
- The name of the function to be invoked
|
||||
required: false
|
||||
default: None
|
||||
tail_log:
|
||||
description:
|
||||
- If C(tail_log=true), the result of the task will include the last 4 KB
|
||||
of the CloudWatch log for the function execution. Log tailing only
|
||||
works if you use synchronous invocation C(wait=true). This is usually
|
||||
used for development or testing Lambdas.
|
||||
required: false
|
||||
default: false
|
||||
wait:
|
||||
description:
|
||||
- Whether to wait for the function results or not. If I(wait) is false,
|
||||
the task will not return any results. To wait for the Lambda function
|
||||
to complete, set C(wait=true) and the result will be available in the
|
||||
I(output) key.
|
||||
required: false
|
||||
default: true
|
||||
dry_run:
|
||||
description:
|
||||
- Do not *actually* invoke the function. A C(DryRun) call will check that
|
||||
the caller has permissions to call the function, especially for
|
||||
checking cross-account permissions.
|
||||
required: false
|
||||
default: False
|
||||
version_qualifier:
|
||||
description:
|
||||
- Which version/alias of the function to run. This defaults to the
|
||||
C(LATEST) revision, but can be set to any existing version or alias.
|
||||
See https;//docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html
|
||||
for details.
|
||||
required: false
|
||||
default: LATEST
|
||||
payload:
|
||||
description:
|
||||
- A dictionary in any form to be provided as input to the Lambda function.
|
||||
required: false
|
||||
default: {}
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- execute_lambda:
|
||||
name: test-function
|
||||
# the payload is automatically serialized and sent to the function
|
||||
payload:
|
||||
foo: bar
|
||||
value: 8
|
||||
register: response
|
||||
|
||||
# Test that you have sufficient permissions to execute a Lambda function in
|
||||
# another account
|
||||
- execute_lambda:
|
||||
function_arn: arn:aws:lambda:us-east-1:123456789012:function/some-function
|
||||
dry_run: true
|
||||
|
||||
- execute_lambda:
|
||||
name: test-function
|
||||
payload:
|
||||
foo: bar
|
||||
value: 8
|
||||
wait: true
|
||||
tail_log: true
|
||||
register: response
|
||||
# the response will have a `logs` key that will contain a log (up to 4KB) of the function execution in Lambda.
|
||||
|
||||
- execute_lambda:
|
||||
name: test-function
|
||||
version_qualifier: PRODUCTION
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
output:
|
||||
description: Function output if wait=true and the function returns a value
|
||||
returned: success
|
||||
type: dict
|
||||
sample: "{ 'output': 'something' }"
|
||||
logs:
|
||||
description: The last 4KB of the function logs. Only provided if I(tail_log) is true
|
||||
type: string
|
||||
status:
|
||||
description: C(StatusCode) of API call exit (200 for synchronous invokes, 202 for async)
|
||||
type: int
|
||||
sample: 200
|
||||
'''
|
||||
|
||||
import base64
|
||||
import json
|
||||
import traceback
|
||||
|
||||
try:
|
||||
import boto3
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
name = dict(),
|
||||
function_arn = dict(),
|
||||
wait = dict(choices=BOOLEANS, default=True, type='bool'),
|
||||
tail_log = dict(choices=BOOLEANS, default=False, type='bool'),
|
||||
dry_run = dict(choices=BOOLEANS, default=False, type='bool'),
|
||||
version_qualifier = dict(),
|
||||
payload = dict(default={}, type='dict'),
|
||||
))
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
mutually_exclusive=[
|
||||
['name', 'function_arn'],
|
||||
]
|
||||
)
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='boto3 required for this module')
|
||||
|
||||
name = module.params.get('name')
|
||||
function_arn = module.params.get('function_arn')
|
||||
await_return = module.params.get('wait')
|
||||
dry_run = module.params.get('dry_run')
|
||||
tail_log = module.params.get('tail_log')
|
||||
version_qualifier = module.params.get('version_qualifier')
|
||||
payload = module.params.get('payload')
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='Python module "boto3" is missing, please install it')
|
||||
|
||||
if not (name or function_arn):
|
||||
module.fail_json(msg="Must provide either a function_arn or a name to invoke.")
|
||||
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=HAS_BOTO3)
|
||||
if not region:
|
||||
module.fail_json(msg="The AWS region must be specified as an "
|
||||
"environment variable or in the AWS credentials "
|
||||
"profile.")
|
||||
|
||||
try:
|
||||
client = boto3_conn(module, conn_type='client', resource='lambda',
|
||||
region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||
except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e:
|
||||
module.fail_json(msg="Failure connecting boto3 to AWS", exception=traceback.format_exc(e))
|
||||
|
||||
invoke_params = {}
|
||||
|
||||
if await_return:
|
||||
# await response
|
||||
invoke_params['InvocationType'] = 'RequestResponse'
|
||||
else:
|
||||
# fire and forget
|
||||
invoke_params['InvocationType'] = 'Event'
|
||||
if dry_run or module.check_mode:
|
||||
# dry_run overrides invocation type
|
||||
invoke_params['InvocationType'] = 'DryRun'
|
||||
|
||||
if tail_log and await_return:
|
||||
invoke_params['LogType'] = 'Tail'
|
||||
elif tail_log and not await_return:
|
||||
module.fail_json(msg="The `tail_log` parameter is only available if "
|
||||
"the invocation waits for the function to complete. "
|
||||
"Set `wait` to true or turn off `tail_log`.")
|
||||
else:
|
||||
invoke_params['LogType'] = 'None'
|
||||
|
||||
if version_qualifier:
|
||||
invoke_params['Qualifier'] = version_qualifier
|
||||
|
||||
if payload:
|
||||
invoke_params['Payload'] = json.dumps(payload)
|
||||
|
||||
if function_arn:
|
||||
invoke_params['FunctionName'] = function_arn
|
||||
elif name:
|
||||
invoke_params['FunctionName'] = name
|
||||
|
||||
try:
|
||||
response = client.invoke(**invoke_params)
|
||||
except botocore.exceptions.ClientError as ce:
|
||||
if ce.response['Error']['Code'] == 'ResourceNotFoundException':
|
||||
module.fail_json(msg="Could not find Lambda to execute. Make sure "
|
||||
"the ARN is correct and your profile has "
|
||||
"permissions to execute this function.",
|
||||
exception=traceback.format_exc(ce))
|
||||
module.fail_json("Client-side error when invoking Lambda, check inputs and specific error",
|
||||
exception=traceback.format_exc(ce))
|
||||
except botocore.exceptions.ParamValidationError as ve:
|
||||
module.fail_json(msg="Parameters to `invoke` failed to validate",
|
||||
exception=traceback.format_exc(ve))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Unexpected failure while invoking Lambda function",
|
||||
exception=traceback.format_exc(e))
|
||||
|
||||
results ={
|
||||
'logs': '',
|
||||
'status': response['StatusCode'],
|
||||
'output': '',
|
||||
}
|
||||
|
||||
if response.get('LogResult'):
|
||||
try:
|
||||
# logs are base64 encoded in the API response
|
||||
results['logs'] = base64.b64decode(response.get('LogResult', ''))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Failed while decoding logs", exception=traceback.format_exc(e))
|
||||
|
||||
if invoke_params['InvocationType'] == 'RequestResponse':
|
||||
try:
|
||||
results['output'] = json.loads(response['Payload'].read())
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Failed while decoding function return value", exception=traceback.format_exc(e))
|
||||
|
||||
if isinstance(results.get('output'), dict) and any(
|
||||
[results['output'].get('stackTrace'), results['output'].get('errorMessage')]):
|
||||
# AWS sends back stack traces and error messages when a function failed
|
||||
# in a RequestResponse (synchronous) context.
|
||||
template = ("Function executed, but there was an error in the Lambda function. "
|
||||
"Message: {errmsg}, Type: {type}, Stack Trace: {trace}")
|
||||
error_data = {
|
||||
# format the stacktrace sent back as an array into a multiline string
|
||||
'trace': '\n'.join(
|
||||
[' '.join([
|
||||
str(x) for x in line # cast line numbers to strings
|
||||
]) for line in results.get('output', {}).get('stackTrace', [])]
|
||||
),
|
||||
'errmsg': results['output'].get('errorMessage'),
|
||||
'type': results['output'].get('errorType')
|
||||
}
|
||||
module.fail_json(msg=template.format(**error_data), result=results)
|
||||
|
||||
module.exit_json(changed=True, result=results)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
122
lib/ansible/modules/cloud/amazon/iam_mfa_device_facts.py
Normal file
122
lib/ansible/modules/cloud/amazon/iam_mfa_device_facts.py
Normal file
@@ -0,0 +1,122 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: iam_mfa_device_facts
|
||||
short_description: List the MFA (Multi-Factor Authentication) devices registered for a user
|
||||
description:
|
||||
- List the MFA (Multi-Factor Authentication) devices registered for a user
|
||||
version_added: "2.2"
|
||||
author: Victor Costan (@pwnall)
|
||||
options:
|
||||
user_name:
|
||||
description:
|
||||
- The name of the user whose MFA devices will be listed
|
||||
required: false
|
||||
default: null
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
requirements:
|
||||
- boto3
|
||||
- botocore
|
||||
'''
|
||||
|
||||
RETURN = """
|
||||
mfa_devices:
|
||||
description: The MFA devices registered for the given user
|
||||
returned: always
|
||||
type: list
|
||||
sample:
|
||||
- enable_date: "2016-03-11T23:25:36+00:00"
|
||||
serial_number: arn:aws:iam::085120003701:mfa/pwnall
|
||||
user_name: pwnall
|
||||
- enable_date: "2016-03-11T23:25:37+00:00"
|
||||
serial_number: arn:aws:iam::085120003702:mfa/pwnall
|
||||
user_name: pwnall
|
||||
"""
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
# List MFA devices (more details: http://docs.aws.amazon.com/IAM/latest/APIReference/API_ListMFADevices.html)
|
||||
iam_mfa_device_facts:
|
||||
register: mfa_devices
|
||||
|
||||
# Assume an existing role (more details: http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html)
|
||||
sts_assume_role:
|
||||
mfa_serial_number: "{{ mfa_devices.mfa_devices[0].serial_number }}"
|
||||
role_arn: "arn:aws:iam::123456789012:role/someRole"
|
||||
role_session_name: "someRoleSession"
|
||||
register: assumed_role
|
||||
'''
|
||||
|
||||
try:
|
||||
import boto3
|
||||
from botocore.exceptions import ClientError
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
|
||||
def list_mfa_devices(connection, module):
|
||||
user_name = module.params.get('user_name')
|
||||
changed = False
|
||||
|
||||
args = {}
|
||||
if user_name is not None:
|
||||
args['UserName'] = user_name
|
||||
try:
|
||||
response = connection.list_mfa_devices(**args)
|
||||
except ClientError as e:
|
||||
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
|
||||
|
||||
module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
user_name=dict(required=False, default=None)
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='boto3 required for this module')
|
||||
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
|
||||
if region:
|
||||
connection = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
list_mfa_devices(connection, module)
|
||||
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
176
lib/ansible/modules/cloud/amazon/iam_server_certificate_facts.py
Normal file
176
lib/ansible/modules/cloud/amazon/iam_server_certificate_facts.py
Normal file
@@ -0,0 +1,176 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: iam_server_certificate_facts
|
||||
short_description: Retrieve the facts of a server certificate
|
||||
description:
|
||||
- Retrieve the attributes of a server certificate
|
||||
version_added: "2.2"
|
||||
author: "Allen Sanabria (@linuxdynasty)"
|
||||
requirements: [boto3, botocore]
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of the server certificate you are retrieving attributes for.
|
||||
required: true
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Retrieve server certificate
|
||||
- iam_server_certificate_facts:
|
||||
name: production-cert
|
||||
register: server_cert
|
||||
|
||||
# Fail if the server certificate name was not found
|
||||
- iam_server_certificate_facts:
|
||||
name: production-cert
|
||||
register: server_cert
|
||||
failed_when: "{{ server_cert.results | length == 0 }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
server_certificate_id:
|
||||
description: The 21 character certificate id
|
||||
returned: success
|
||||
type: str
|
||||
sample: "ADWAJXWTZAXIPIMQHMJPO"
|
||||
certificate_body:
|
||||
description: The asn1der encoded PEM string
|
||||
returned: success
|
||||
type: str
|
||||
sample: "-----BEGIN CERTIFICATE-----\nbunch of random data\n-----END CERTIFICATE-----"
|
||||
server_certificate_name:
|
||||
description: The name of the server certificate
|
||||
returned: success
|
||||
type: str
|
||||
sample: "server-cert-name"
|
||||
arn:
|
||||
description: The Amazon resource name of the server certificate
|
||||
returned: success
|
||||
type: str
|
||||
sample: "arn:aws:iam::911277865346:server-certificate/server-cert-name"
|
||||
path:
|
||||
description: The path of the server certificate
|
||||
returned: success
|
||||
type: str
|
||||
sample: "/"
|
||||
expiration:
|
||||
description: The date and time this server certificate will expire, in ISO 8601 format.
|
||||
returned: success
|
||||
type: str
|
||||
sample: "2017-06-15T12:00:00+00:00"
|
||||
upload_date:
|
||||
description: The date and time this server certificate was uploaded, in ISO 8601 format.
|
||||
returned: success
|
||||
type: str
|
||||
sample: "2015-04-25T00:36:40+00:00"
|
||||
'''
|
||||
|
||||
|
||||
try:
|
||||
import boto3
|
||||
import botocore.exceptions
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
|
||||
def get_server_certs(iam, name=None):
|
||||
"""Retrieve the attributes of a server certificate if it exists or all certs.
|
||||
Args:
|
||||
iam (botocore.client.IAM): The boto3 iam instance.
|
||||
|
||||
Kwargs:
|
||||
name (str): The name of the server certificate.
|
||||
|
||||
Basic Usage:
|
||||
>>> import boto3
|
||||
>>> iam = boto3.client('iam')
|
||||
>>> name = "server-cert-name"
|
||||
>>> results = get_server_certs(iam, name)
|
||||
{
|
||||
"upload_date": "2015-04-25T00:36:40+00:00",
|
||||
"server_certificate_id": "ADWAJXWTZAXIPIMQHMJPO",
|
||||
"certificate_body": "-----BEGIN CERTIFICATE-----\nbunch of random data\n-----END CERTIFICATE-----",
|
||||
"server_certificate_name": "server-cert-name",
|
||||
"expiration": "2017-06-15T12:00:00+00:00",
|
||||
"path": "/",
|
||||
"arn": "arn:aws:iam::911277865346:server-certificate/server-cert-name"
|
||||
}
|
||||
"""
|
||||
results = dict()
|
||||
try:
|
||||
if name:
|
||||
server_certs = [iam.get_server_certificate(ServerCertificateName=name)['ServerCertificate']]
|
||||
else:
|
||||
server_certs = iam.list_server_certificates()['ServerCertificateMetadataList']
|
||||
|
||||
for server_cert in server_certs:
|
||||
if not name:
|
||||
server_cert = iam.get_server_certificate(ServerCertificateName=server_cert['ServerCertificateName'])['ServerCertificate']
|
||||
cert_md = server_cert['ServerCertificateMetadata']
|
||||
results[cert_md['ServerCertificateName']] = {
|
||||
'certificate_body': server_cert['CertificateBody'],
|
||||
'server_certificate_id': cert_md['ServerCertificateId'],
|
||||
'server_certificate_name': cert_md['ServerCertificateName'],
|
||||
'arn': cert_md['Arn'],
|
||||
'path': cert_md['Path'],
|
||||
'expiration': cert_md['Expiration'].isoformat(),
|
||||
'upload_date': cert_md['UploadDate'].isoformat(),
|
||||
}
|
||||
|
||||
except botocore.exceptions.ClientError:
|
||||
pass
|
||||
|
||||
return results
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
name=dict(type='str'),
|
||||
))
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,)
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='boto3 required for this module')
|
||||
|
||||
try:
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
|
||||
iam = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
module.fail_json(msg="Boto3 Client Error - " + str(e.msg))
|
||||
|
||||
cert_name = module.params.get('name')
|
||||
results = get_server_certs(iam, cert_name)
|
||||
module.exit_json(results=results)
|
||||
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
1102
lib/ansible/modules/cloud/amazon/kinesis_stream.py
Normal file
1102
lib/ansible/modules/cloud/amazon/kinesis_stream.py
Normal file
File diff suppressed because it is too large
Load Diff
473
lib/ansible/modules/cloud/amazon/lambda.py
Normal file
473
lib/ansible/modules/cloud/amazon/lambda.py
Normal file
@@ -0,0 +1,473 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: lambda
|
||||
short_description: Manage AWS Lambda functions
|
||||
description:
|
||||
- Allows for the management of Lambda functions.
|
||||
version_added: '2.2'
|
||||
requirements: [ boto3 ]
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name you want to assign to the function you are uploading. Cannot be changed.
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Create or delete Lambda function
|
||||
required: false
|
||||
default: present
|
||||
choices: [ 'present', 'absent' ]
|
||||
runtime:
|
||||
description:
|
||||
- The runtime environment for the Lambda function you are uploading. Required when creating a function. Use parameters as described in boto3 docs. Current example runtime environments are nodejs, nodejs4.3, java8 or python2.7
|
||||
required: true
|
||||
role:
|
||||
description:
|
||||
- The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access any other Amazon Web Services (AWS) resources. You may use the bare ARN if the role belongs to the same AWS account.
|
||||
default: null
|
||||
handler:
|
||||
description:
|
||||
- The function within your code that Lambda calls to begin execution
|
||||
default: null
|
||||
zip_file:
|
||||
description:
|
||||
- A .zip file containing your deployment package
|
||||
required: false
|
||||
default: null
|
||||
aliases: [ 'src' ]
|
||||
s3_bucket:
|
||||
description:
|
||||
- Amazon S3 bucket name where the .zip file containing your deployment package is stored
|
||||
required: false
|
||||
default: null
|
||||
s3_key:
|
||||
description:
|
||||
- The Amazon S3 object (the deployment package) key name you want to upload
|
||||
required: false
|
||||
default: null
|
||||
s3_object_version:
|
||||
description:
|
||||
- The Amazon S3 object (the deployment package) version you want to upload.
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
description:
|
||||
- A short, user-defined function description. Lambda does not use this value. Assign a meaningful description as you see fit.
|
||||
required: false
|
||||
default: null
|
||||
timeout:
|
||||
description:
|
||||
- The function execution time at which Lambda should terminate the function.
|
||||
required: false
|
||||
default: 3
|
||||
memory_size:
|
||||
description:
|
||||
- The amount of memory, in MB, your Lambda function is given
|
||||
required: false
|
||||
default: 128
|
||||
vpc_subnet_ids:
|
||||
description:
|
||||
- List of subnet IDs to run Lambda function in. Use this option if you need to access resources in your VPC. Leave empty if you don't want to run the function in a VPC.
|
||||
required: false
|
||||
default: None
|
||||
vpc_security_group_ids:
|
||||
description:
|
||||
- List of VPC security group IDs to associate with the Lambda function. Required when vpc_subnet_ids is used.
|
||||
required: false
|
||||
default: None
|
||||
notes:
|
||||
- 'Currently this module only supports uploaded code via S3'
|
||||
author:
|
||||
- 'Steyn Huizinga (@steynovich)'
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Create Lambda functions
|
||||
tasks:
|
||||
- name: looped creation
|
||||
lambda:
|
||||
name: '{{ item.name }}'
|
||||
state: present
|
||||
zip_file: '{{ item.zip_file }}'
|
||||
runtime: 'python2.7'
|
||||
role: 'arn:aws:iam::987654321012:role/lambda_basic_execution'
|
||||
handler: 'hello_python.my_handler'
|
||||
vpc_subnet_ids:
|
||||
- subnet-123abcde
|
||||
- subnet-edcba321
|
||||
vpc_security_group_ids:
|
||||
- sg-123abcde
|
||||
- sg-edcba321
|
||||
with_items:
|
||||
- name: HelloWorld
|
||||
zip_file: hello-code.zip
|
||||
- name: ByeBye
|
||||
zip_file: bye-code.zip
|
||||
|
||||
# Basic Lambda function deletion
|
||||
tasks:
|
||||
- name: Delete Lambda functions HelloWorld and ByeBye
|
||||
lambda:
|
||||
name: '{{ item }}'
|
||||
state: absent
|
||||
with_items:
|
||||
- HelloWorld
|
||||
- ByeBye
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
output:
|
||||
description: the data returned by create_function in boto3
|
||||
returned: success
|
||||
type: dict
|
||||
sample:
|
||||
'code':
|
||||
{
|
||||
'location': 'an S3 URL',
|
||||
'repository_type': 'S3',
|
||||
}
|
||||
'configuration':
|
||||
{
|
||||
'function_name': 'string',
|
||||
'function_arn': 'string',
|
||||
'runtime': 'nodejs',
|
||||
'role': 'string',
|
||||
'handler': 'string',
|
||||
'code_size': 123,
|
||||
'description': 'string',
|
||||
'timeout': 123,
|
||||
'memory_size': 123,
|
||||
'last_modified': 'string',
|
||||
'code_sha256': 'string',
|
||||
'version': 'string',
|
||||
}
|
||||
'''
|
||||
|
||||
# Import from Python standard library
|
||||
import base64
|
||||
import hashlib
|
||||
|
||||
try:
|
||||
import botocore
|
||||
HAS_BOTOCORE = True
|
||||
except ImportError:
|
||||
HAS_BOTOCORE = False
|
||||
|
||||
try:
|
||||
import boto3
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
|
||||
def get_current_function(connection, function_name, qualifier=None):
|
||||
try:
|
||||
if qualifier is not None:
|
||||
return connection.get_function(FunctionName=function_name,
|
||||
Qualifier=qualifier)
|
||||
return connection.get_function(FunctionName=function_name)
|
||||
except botocore.exceptions.ClientError:
|
||||
return None
|
||||
|
||||
|
||||
def sha256sum(filename):
|
||||
hasher = hashlib.sha256()
|
||||
with open(filename, 'rb') as f:
|
||||
hasher.update(f.read())
|
||||
|
||||
code_hash = hasher.digest()
|
||||
code_b64 = base64.b64encode(code_hash)
|
||||
hex_digest = code_b64.decode('utf-8')
|
||||
|
||||
return hex_digest
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
name=dict(type='str', required=True),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
runtime=dict(type='str', required=True),
|
||||
role=dict(type='str', default=None),
|
||||
handler=dict(type='str', default=None),
|
||||
zip_file=dict(type='str', default=None, aliases=['src']),
|
||||
s3_bucket=dict(type='str'),
|
||||
s3_key=dict(type='str'),
|
||||
s3_object_version=dict(type='str', default=None),
|
||||
description=dict(type='str', default=''),
|
||||
timeout=dict(type='int', default=3),
|
||||
memory_size=dict(type='int', default=128),
|
||||
vpc_subnet_ids=dict(type='list', default=None),
|
||||
vpc_security_group_ids=dict(type='list', default=None),
|
||||
)
|
||||
)
|
||||
|
||||
mutually_exclusive = [['zip_file', 's3_key'],
|
||||
['zip_file', 's3_bucket'],
|
||||
['zip_file', 's3_object_version']]
|
||||
|
||||
required_together = [['s3_key', 's3_bucket', 's3_object_version'],
|
||||
['vpc_subnet_ids', 'vpc_security_group_ids']]
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
mutually_exclusive=mutually_exclusive,
|
||||
required_together=required_together)
|
||||
|
||||
name = module.params.get('name')
|
||||
state = module.params.get('state').lower()
|
||||
runtime = module.params.get('runtime')
|
||||
role = module.params.get('role')
|
||||
handler = module.params.get('handler')
|
||||
s3_bucket = module.params.get('s3_bucket')
|
||||
s3_key = module.params.get('s3_key')
|
||||
s3_object_version = module.params.get('s3_object_version')
|
||||
zip_file = module.params.get('zip_file')
|
||||
description = module.params.get('description')
|
||||
timeout = module.params.get('timeout')
|
||||
memory_size = module.params.get('memory_size')
|
||||
vpc_subnet_ids = module.params.get('vpc_subnet_ids')
|
||||
vpc_security_group_ids = module.params.get('vpc_security_group_ids')
|
||||
|
||||
check_mode = module.check_mode
|
||||
changed = False
|
||||
|
||||
if not HAS_BOTOCORE:
|
||||
module.fail_json(msg='Python module "botocore" is missing, please install it')
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='Python module "boto3" is missing, please install it')
|
||||
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
|
||||
if not region:
|
||||
module.fail_json(msg='region must be specified')
|
||||
|
||||
try:
|
||||
client = boto3_conn(module, conn_type='client', resource='lambda',
|
||||
region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||
except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
if role.startswith('arn:aws:iam'):
|
||||
role_arn = role
|
||||
else:
|
||||
# get account ID and assemble ARN
|
||||
try:
|
||||
iam_client = boto3_conn(module, conn_type='client', resource='iam',
|
||||
region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||
account_id = iam_client.get_user()['User']['Arn'].split(':')[4]
|
||||
role_arn = 'arn:aws:iam::{0}:role/{1}'.format(account_id, role)
|
||||
except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
# Get function configuration if present, False otherwise
|
||||
current_function = get_current_function(client, name)
|
||||
|
||||
# Update existing Lambda function
|
||||
if state == 'present' and current_function:
|
||||
|
||||
# Get current state
|
||||
current_config = current_function['Configuration']
|
||||
current_version = None
|
||||
|
||||
# Update function configuration
|
||||
func_kwargs = {'FunctionName': name}
|
||||
|
||||
# Update configuration if needed
|
||||
if role_arn and current_config['Role'] != role_arn:
|
||||
func_kwargs.update({'Role': role_arn})
|
||||
if handler and current_config['Handler'] != handler:
|
||||
func_kwargs.update({'Handler': handler})
|
||||
if description and current_config['Description'] != description:
|
||||
func_kwargs.update({'Description': description})
|
||||
if timeout and current_config['Timeout'] != timeout:
|
||||
func_kwargs.update({'Timeout': timeout})
|
||||
if memory_size and current_config['MemorySize'] != memory_size:
|
||||
func_kwargs.update({'MemorySize': memory_size})
|
||||
|
||||
# Check for unsupported mutation
|
||||
if current_config['Runtime'] != runtime:
|
||||
module.fail_json(msg='Cannot change runtime. Please recreate the function')
|
||||
|
||||
# If VPC configuration is desired
|
||||
if vpc_subnet_ids or vpc_security_group_ids:
|
||||
if len(vpc_subnet_ids) < 1:
|
||||
module.fail_json(msg='At least 1 subnet is required')
|
||||
|
||||
if len(vpc_security_group_ids) < 1:
|
||||
module.fail_json(msg='At least 1 security group is required')
|
||||
|
||||
if 'VpcConfig' in current_config:
|
||||
# Compare VPC config with current config
|
||||
current_vpc_subnet_ids = current_config['VpcConfig']['SubnetIds']
|
||||
current_vpc_security_group_ids = current_config['VpcConfig']['SecurityGroupIds']
|
||||
|
||||
subnet_net_id_changed = sorted(vpc_subnet_ids) != sorted(current_vpc_subnet_ids)
|
||||
vpc_security_group_ids_changed = sorted(vpc_security_group_ids) != sorted(current_vpc_security_group_ids)
|
||||
|
||||
if any((subnet_net_id_changed, vpc_security_group_ids_changed)):
|
||||
func_kwargs.update({'VpcConfig':
|
||||
{'SubnetIds': vpc_subnet_ids,'SecurityGroupIds': vpc_security_group_ids}})
|
||||
else:
|
||||
# No VPC configuration is desired, assure VPC config is empty when present in current config
|
||||
if ('VpcConfig' in current_config and
|
||||
'VpcId' in current_config['VpcConfig'] and
|
||||
current_config['VpcConfig']['VpcId'] != ''):
|
||||
func_kwargs.update({'VpcConfig':{'SubnetIds': [], 'SecurityGroupIds': []}})
|
||||
|
||||
# Upload new configuration if configuration has changed
|
||||
if len(func_kwargs) > 2:
|
||||
try:
|
||||
if not check_mode:
|
||||
response = client.update_function_configuration(**func_kwargs)
|
||||
current_version = response['Version']
|
||||
changed = True
|
||||
except (botocore.exceptions.ParamValidationError, botocore.exceptions.ClientError) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
# Update code configuration
|
||||
code_kwargs = {'FunctionName': name, 'Publish': True}
|
||||
|
||||
# Update S3 location
|
||||
if s3_bucket and s3_key:
|
||||
# If function is stored on S3 always update
|
||||
code_kwargs.update({'S3Bucket': s3_bucket, 'S3Key': s3_key})
|
||||
|
||||
# If S3 Object Version is given
|
||||
if s3_object_version:
|
||||
code_kwargs.update({'S3ObjectVersion': s3_object_version})
|
||||
|
||||
# Compare local checksum, update remote code when different
|
||||
elif zip_file:
|
||||
local_checksum = sha256sum(zip_file)
|
||||
remote_checksum = current_config['CodeSha256']
|
||||
|
||||
# Only upload new code when local code is different compared to the remote code
|
||||
if local_checksum != remote_checksum:
|
||||
try:
|
||||
with open(zip_file, 'rb') as f:
|
||||
encoded_zip = f.read()
|
||||
code_kwargs.update({'ZipFile': encoded_zip})
|
||||
except IOError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
# Upload new code if needed (e.g. code checksum has changed)
|
||||
if len(code_kwargs) > 2:
|
||||
try:
|
||||
if not check_mode:
|
||||
response = client.update_function_code(**code_kwargs)
|
||||
current_version = response['Version']
|
||||
changed = True
|
||||
except (botocore.exceptions.ParamValidationError, botocore.exceptions.ClientError) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
# Describe function code and configuration
|
||||
response = get_current_function(client, name, qualifier=current_version)
|
||||
if not response:
|
||||
module.fail_json(msg='Unable to get function information after updating')
|
||||
|
||||
# We're done
|
||||
module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
|
||||
|
||||
# Function doesn't exists, create new Lambda function
|
||||
elif state == 'present':
|
||||
if s3_bucket and s3_key:
|
||||
# If function is stored on S3
|
||||
code = {'S3Bucket': s3_bucket,
|
||||
'S3Key': s3_key}
|
||||
if s3_object_version:
|
||||
code.update({'S3ObjectVersion': s3_object_version})
|
||||
elif zip_file:
|
||||
# If function is stored in local zipfile
|
||||
try:
|
||||
with open(zip_file, 'rb') as f:
|
||||
zip_content = f.read()
|
||||
|
||||
code = {'ZipFile': zip_content}
|
||||
except IOError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
else:
|
||||
module.fail_json(msg='Either S3 object or path to zipfile required')
|
||||
|
||||
func_kwargs = {'FunctionName': name,
|
||||
'Description': description,
|
||||
'Publish': True,
|
||||
'Runtime': runtime,
|
||||
'Role': role_arn,
|
||||
'Handler': handler,
|
||||
'Code': code,
|
||||
'Timeout': timeout,
|
||||
'MemorySize': memory_size,
|
||||
}
|
||||
|
||||
# If VPC configuration is given
|
||||
if vpc_subnet_ids or vpc_security_group_ids:
|
||||
if len(vpc_subnet_ids) < 1:
|
||||
module.fail_json(msg='At least 1 subnet is required')
|
||||
|
||||
if len(vpc_security_group_ids) < 1:
|
||||
module.fail_json(msg='At least 1 security group is required')
|
||||
|
||||
func_kwargs.update({'VpcConfig': {'SubnetIds': vpc_subnet_ids,
|
||||
'SecurityGroupIds': vpc_security_group_ids}})
|
||||
|
||||
# Finally try to create function
|
||||
try:
|
||||
if not check_mode:
|
||||
response = client.create_function(**func_kwargs)
|
||||
current_version = response['Version']
|
||||
changed = True
|
||||
except (botocore.exceptions.ParamValidationError, botocore.exceptions.ClientError) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
response = get_current_function(client, name, qualifier=current_version)
|
||||
if not response:
|
||||
module.fail_json(msg='Unable to get function information after creating')
|
||||
module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
|
||||
|
||||
# Delete existing Lambda function
|
||||
if state == 'absent' and current_function:
|
||||
try:
|
||||
if not check_mode:
|
||||
client.delete_function(FunctionName=name)
|
||||
changed = True
|
||||
except (botocore.exceptions.ParamValidationError, botocore.exceptions.ClientError) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
# Function already absent, do nothing
|
||||
elif state == 'absent':
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
389
lib/ansible/modules/cloud/amazon/lambda_alias.py
Normal file
389
lib/ansible/modules/cloud/amazon/lambda_alias.py
Normal file
@@ -0,0 +1,389 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
try:
|
||||
import boto3
|
||||
from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: lambda_alias
|
||||
short_description: Creates, updates or deletes AWS Lambda function aliases.
|
||||
description:
|
||||
- This module allows the management of AWS Lambda functions aliases via the Ansible
|
||||
framework. It is idempotent and supports "Check" mode. Use module M(lambda) to manage the lambda function
|
||||
itself and M(lambda_event) to manage event source mappings.
|
||||
|
||||
version_added: "2.2"
|
||||
|
||||
author: Pierre Jodouin (@pjodouin), Ryan Scott Brown (@ryansb)
|
||||
options:
|
||||
function_name:
|
||||
description:
|
||||
- The name of the function alias.
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Describes the desired state.
|
||||
required: true
|
||||
default: "present"
|
||||
choices: ["present", "absent"]
|
||||
name:
|
||||
description:
|
||||
- Name of the function alias.
|
||||
required: true
|
||||
aliases: ['alias_name']
|
||||
description:
|
||||
description:
|
||||
- A short, user-defined function alias description.
|
||||
required: false
|
||||
version:
|
||||
description:
|
||||
- Version associated with the Lambda function alias.
|
||||
A value of 0 (or omitted parameter) sets the alias to the $LATEST version.
|
||||
required: false
|
||||
aliases: ['function_version']
|
||||
requirements:
|
||||
- boto3
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
---
|
||||
# Simple example to create a lambda function and publish a version
|
||||
- hosts: localhost
|
||||
gather_facts: no
|
||||
vars:
|
||||
state: present
|
||||
project_folder: /path/to/deployment/package
|
||||
deployment_package: lambda.zip
|
||||
account: 123456789012
|
||||
production_version: 5
|
||||
tasks:
|
||||
- name: AWS Lambda Function
|
||||
lambda:
|
||||
state: "{{ state | default('present') }}"
|
||||
name: myLambdaFunction
|
||||
publish: True
|
||||
description: lambda function description
|
||||
code_s3_bucket: package-bucket
|
||||
code_s3_key: "lambda/{{ deployment_package }}"
|
||||
local_path: "{{ project_folder }}/{{ deployment_package }}"
|
||||
runtime: python2.7
|
||||
timeout: 5
|
||||
handler: lambda.handler
|
||||
memory_size: 128
|
||||
role: "arn:aws:iam::{{ account }}:role/API2LambdaExecRole"
|
||||
|
||||
- name: show results
|
||||
debug:
|
||||
var: lambda_facts
|
||||
|
||||
# The following will set the Dev alias to the latest version ($LATEST) since version is omitted (or = 0)
|
||||
- name: "alias 'Dev' for function {{ lambda_facts.FunctionName }} "
|
||||
lambda_alias:
|
||||
state: "{{ state | default('present') }}"
|
||||
function_name: "{{ lambda_facts.FunctionName }}"
|
||||
name: Dev
|
||||
description: Development is $LATEST version
|
||||
|
||||
# The QA alias will only be created when a new version is published (i.e. not = '$LATEST')
|
||||
- name: "alias 'QA' for function {{ lambda_facts.FunctionName }} "
|
||||
lambda_alias:
|
||||
state: "{{ state | default('present') }}"
|
||||
function_name: "{{ lambda_facts.FunctionName }}"
|
||||
name: QA
|
||||
version: "{{ lambda_facts.Version }}"
|
||||
description: "QA is version {{ lambda_facts.Version }}"
|
||||
when: lambda_facts.Version != "$LATEST"
|
||||
|
||||
# The Prod alias will have a fixed version based on a variable
|
||||
- name: "alias 'Prod' for function {{ lambda_facts.FunctionName }} "
|
||||
lambda_alias:
|
||||
state: "{{ state | default('present') }}"
|
||||
function_name: "{{ lambda_facts.FunctionName }}"
|
||||
name: Prod
|
||||
version: "{{ production_version }}"
|
||||
description: "Production is version {{ production_version }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
---
|
||||
alias_arn:
|
||||
description: Full ARN of the function, including the alias
|
||||
returned: success
|
||||
type: string
|
||||
sample: arn:aws:lambda:us-west-2:123456789012:function:myFunction:dev
|
||||
description:
|
||||
description: A short description of the alias
|
||||
returned: success
|
||||
type: string
|
||||
sample: The development stage for my hot new app
|
||||
function_version:
|
||||
description: The qualifier that the alias refers to
|
||||
returned: success
|
||||
type: string
|
||||
sample: $LATEST
|
||||
name:
|
||||
description: The name of the alias assigned
|
||||
returned: success
|
||||
type: string
|
||||
sample: dev
|
||||
'''
|
||||
|
||||
|
||||
class AWSConnection:
|
||||
"""
|
||||
Create the connection object and client objects as required.
|
||||
"""
|
||||
|
||||
def __init__(self, ansible_obj, resources, boto3=True):
|
||||
|
||||
try:
|
||||
self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=boto3)
|
||||
|
||||
self.resource_client = dict()
|
||||
if not resources:
|
||||
resources = ['lambda']
|
||||
|
||||
resources.append('iam')
|
||||
|
||||
for resource in resources:
|
||||
aws_connect_kwargs.update(dict(region=self.region,
|
||||
endpoint=self.endpoint,
|
||||
conn_type='client',
|
||||
resource=resource
|
||||
))
|
||||
self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
|
||||
|
||||
# if region is not provided, then get default profile/session region
|
||||
if not self.region:
|
||||
self.region = self.resource_client['lambda'].meta.region_name
|
||||
|
||||
except (ClientError, ParamValidationError, MissingParametersError) as e:
|
||||
ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e))
|
||||
|
||||
try:
|
||||
self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4]
|
||||
except (ClientError, ValueError, KeyError, IndexError):
|
||||
self.account_id = ''
|
||||
|
||||
def client(self, resource='lambda'):
|
||||
return self.resource_client[resource]
|
||||
|
||||
|
||||
def pc(key):
|
||||
"""
|
||||
Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
|
||||
|
||||
:param key:
|
||||
:return:
|
||||
"""
|
||||
|
||||
return "".join([token.capitalize() for token in key.split('_')])
|
||||
|
||||
|
||||
def set_api_params(module, module_params):
|
||||
"""
|
||||
Sets module parameters to those expected by the boto3 API.
|
||||
|
||||
:param module:
|
||||
:param module_params:
|
||||
:return:
|
||||
"""
|
||||
|
||||
api_params = dict()
|
||||
|
||||
for param in module_params:
|
||||
module_param = module.params.get(param, None)
|
||||
if module_param:
|
||||
api_params[pc(param)] = module_param
|
||||
|
||||
return api_params
|
||||
|
||||
|
||||
def validate_params(module, aws):
|
||||
"""
|
||||
Performs basic parameter validation.
|
||||
|
||||
:param module: Ansible module reference
|
||||
:param aws: AWS client connection
|
||||
:return:
|
||||
"""
|
||||
|
||||
function_name = module.params['function_name']
|
||||
|
||||
# validate function name
|
||||
if not re.search('^[\w\-:]+$', function_name):
|
||||
module.fail_json(
|
||||
msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
|
||||
)
|
||||
if len(function_name) > 64:
|
||||
module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
|
||||
|
||||
# if parameter 'function_version' is zero, set it to $LATEST, else convert it to a string
|
||||
if module.params['function_version'] == 0:
|
||||
module.params['function_version'] = '$LATEST'
|
||||
else:
|
||||
module.params['function_version'] = str(module.params['function_version'])
|
||||
|
||||
return
|
||||
|
||||
|
||||
def get_lambda_alias(module, aws):
|
||||
"""
|
||||
Returns the lambda function alias if it exists.
|
||||
|
||||
:param module: Ansible module reference
|
||||
:param aws: AWS client connection
|
||||
:return:
|
||||
"""
|
||||
|
||||
client = aws.client('lambda')
|
||||
|
||||
# set API parameters
|
||||
api_params = set_api_params(module, ('function_name', 'name'))
|
||||
|
||||
# check if alias exists and get facts
|
||||
try:
|
||||
results = client.get_alias(**api_params)
|
||||
|
||||
except (ClientError, ParamValidationError, MissingParametersError) as e:
|
||||
if e.response['Error']['Code'] == 'ResourceNotFoundException':
|
||||
results = None
|
||||
else:
|
||||
module.fail_json(msg='Error retrieving function alias: {0}'.format(e))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def lambda_alias(module, aws):
|
||||
"""
|
||||
Adds, updates or deletes lambda function aliases.
|
||||
|
||||
:param module: Ansible module reference
|
||||
:param aws: AWS client connection
|
||||
:return dict:
|
||||
"""
|
||||
client = aws.client('lambda')
|
||||
results = dict()
|
||||
changed = False
|
||||
current_state = 'absent'
|
||||
state = module.params['state']
|
||||
|
||||
facts = get_lambda_alias(module, aws)
|
||||
if facts:
|
||||
current_state = 'present'
|
||||
|
||||
if state == 'present':
|
||||
if current_state == 'present':
|
||||
|
||||
# check if alias has changed -- only version and description can change
|
||||
alias_params = ('function_version', 'description')
|
||||
for param in alias_params:
|
||||
if module.params.get(param) != facts.get(pc(param)):
|
||||
changed = True
|
||||
break
|
||||
|
||||
if changed:
|
||||
api_params = set_api_params(module, ('function_name', 'name'))
|
||||
api_params.update(set_api_params(module, alias_params))
|
||||
|
||||
if not module.check_mode:
|
||||
try:
|
||||
results = client.update_alias(**api_params)
|
||||
except (ClientError, ParamValidationError, MissingParametersError) as e:
|
||||
module.fail_json(msg='Error updating function alias: {0}'.format(e))
|
||||
|
||||
else:
|
||||
# create new function alias
|
||||
api_params = set_api_params(module, ('function_name', 'name', 'function_version', 'description'))
|
||||
|
||||
try:
|
||||
if not module.check_mode:
|
||||
results = client.create_alias(**api_params)
|
||||
changed = True
|
||||
except (ClientError, ParamValidationError, MissingParametersError) as e:
|
||||
module.fail_json(msg='Error creating function alias: {0}'.format(e))
|
||||
|
||||
else: # state = 'absent'
|
||||
if current_state == 'present':
|
||||
# delete the function
|
||||
api_params = set_api_params(module, ('function_name', 'name'))
|
||||
|
||||
try:
|
||||
if not module.check_mode:
|
||||
results = client.delete_alias(**api_params)
|
||||
changed = True
|
||||
except (ClientError, ParamValidationError, MissingParametersError) as e:
|
||||
module.fail_json(msg='Error deleting function alias: {0}'.format(e))
|
||||
|
||||
return dict(changed=changed, **dict(results or facts))
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Main entry point.
|
||||
|
||||
:return dict: ansible facts
|
||||
"""
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
state=dict(required=False, default='present', choices=['present', 'absent']),
|
||||
function_name=dict(required=True, default=None),
|
||||
name=dict(required=True, default=None, aliases=['alias_name']),
|
||||
function_version=dict(type='int', required=False, default=0, aliases=['version']),
|
||||
description=dict(required=False, default=None),
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
mutually_exclusive=[],
|
||||
required_together=[]
|
||||
)
|
||||
|
||||
# validate dependencies
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='boto3 is required for this module.')
|
||||
|
||||
aws = AWSConnection(module, ['lambda'])
|
||||
|
||||
validate_params(module, aws)
|
||||
|
||||
results = lambda_alias(module, aws)
|
||||
|
||||
module.exit_json(**camel_dict_to_snake_dict(results))
|
||||
|
||||
|
||||
# ansible import module(s) kept at ~eof as recommended
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
427
lib/ansible/modules/cloud/amazon/lambda_event.py
Normal file
427
lib/ansible/modules/cloud/amazon/lambda_event.py
Normal file
@@ -0,0 +1,427 @@
|
||||
#!/usr/bin/python
|
||||
# (c) 2016, Pierre Jodouin <pjodouin@virtualcomputing.solutions>
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import sys
|
||||
|
||||
try:
|
||||
import boto3
|
||||
from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: lambda_event
|
||||
short_description: Creates, updates or deletes AWS Lambda function event mappings.
|
||||
description:
|
||||
- This module allows the management of AWS Lambda function event source mappings such as DynamoDB and Kinesis stream
|
||||
events via the Ansible framework. These event source mappings are relevant only in the AWS Lambda pull model, where
|
||||
AWS Lambda invokes the function.
|
||||
It is idempotent and supports "Check" mode. Use module M(lambda) to manage the lambda
|
||||
function itself and M(lambda_alias) to manage function aliases.
|
||||
|
||||
version_added: "2.2"
|
||||
|
||||
author: Pierre Jodouin (@pjodouin), Ryan Brown (@ryansb)
|
||||
options:
|
||||
lambda_function_arn:
|
||||
description:
|
||||
- The name or ARN of the lambda function.
|
||||
required: true
|
||||
aliases: ['function_name', 'function_arn']
|
||||
state:
|
||||
description:
|
||||
- Describes the desired state.
|
||||
required: true
|
||||
default: "present"
|
||||
choices: ["present", "absent"]
|
||||
alias:
|
||||
description:
|
||||
- Name of the function alias. Mutually exclusive with C(version).
|
||||
required: true
|
||||
version:
|
||||
description:
|
||||
- Version of the Lambda function. Mutually exclusive with C(alias).
|
||||
required: false
|
||||
event_source:
|
||||
description:
|
||||
- Source of the event that triggers the lambda function.
|
||||
required: false
|
||||
default: stream
|
||||
choices: ['stream']
|
||||
source_params:
|
||||
description:
|
||||
- Sub-parameters required for event source.
|
||||
- I(== stream event source ==)
|
||||
- C(source_arn) The Amazon Resource Name (ARN) of the Kinesis or DynamoDB stream that is the event source.
|
||||
- C(enabled) Indicates whether AWS Lambda should begin polling the event source. Default is True.
|
||||
- C(batch_size) The largest number of records that AWS Lambda will retrieve from your event source at the
|
||||
time of invoking your function. Default is 100.
|
||||
- C(starting_position) The position in the stream where AWS Lambda should start reading.
|
||||
Choices are TRIM_HORIZON or LATEST.
|
||||
required: true
|
||||
requirements:
|
||||
- boto3
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
---
|
||||
# Example that creates a lambda event notification for a DynamoDB stream
|
||||
- hosts: localhost
|
||||
gather_facts: no
|
||||
vars:
|
||||
state: present
|
||||
tasks:
|
||||
- name: DynamoDB stream event mapping
|
||||
lambda_event:
|
||||
state: "{{ state | default('present') }}"
|
||||
event_source: stream
|
||||
function_name: "{{ function_name }}"
|
||||
alias: Dev
|
||||
source_params:
|
||||
source_arn: arn:aws:dynamodb:us-east-1:123456789012:table/tableName/stream/2016-03-19T19:51:37.457
|
||||
enabled: True
|
||||
batch_size: 100
|
||||
starting_position: TRIM_HORIZON
|
||||
|
||||
- name: Show source event
|
||||
debug:
|
||||
var: lambda_stream_events
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
---
|
||||
lambda_stream_events:
|
||||
description: list of dictionaries returned by the API describing stream event mappings
|
||||
returned: success
|
||||
type: list
|
||||
'''
|
||||
|
||||
# ---------------------------------------------------------------------------------------------------
|
||||
#
|
||||
# Helper Functions & classes
|
||||
#
|
||||
# ---------------------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
class AWSConnection:
|
||||
"""
|
||||
Create the connection object and client objects as required.
|
||||
"""
|
||||
|
||||
def __init__(self, ansible_obj, resources, use_boto3=True):
|
||||
|
||||
try:
|
||||
self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=use_boto3)
|
||||
|
||||
self.resource_client = dict()
|
||||
if not resources:
|
||||
resources = ['lambda']
|
||||
|
||||
resources.append('iam')
|
||||
|
||||
for resource in resources:
|
||||
aws_connect_kwargs.update(dict(region=self.region,
|
||||
endpoint=self.endpoint,
|
||||
conn_type='client',
|
||||
resource=resource
|
||||
))
|
||||
self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
|
||||
|
||||
# if region is not provided, then get default profile/session region
|
||||
if not self.region:
|
||||
self.region = self.resource_client['lambda'].meta.region_name
|
||||
|
||||
except (ClientError, ParamValidationError, MissingParametersError) as e:
|
||||
ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e))
|
||||
|
||||
# set account ID
|
||||
try:
|
||||
self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4]
|
||||
except (ClientError, ValueError, KeyError, IndexError):
|
||||
self.account_id = ''
|
||||
|
||||
def client(self, resource='lambda'):
|
||||
return self.resource_client[resource]
|
||||
|
||||
|
||||
def pc(key):
|
||||
"""
|
||||
Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
|
||||
|
||||
:param key:
|
||||
:return:
|
||||
"""
|
||||
|
||||
return "".join([token.capitalize() for token in key.split('_')])
|
||||
|
||||
|
||||
def ordered_obj(obj):
|
||||
"""
|
||||
Order object for comparison purposes
|
||||
|
||||
:param obj:
|
||||
:return:
|
||||
"""
|
||||
|
||||
if isinstance(obj, dict):
|
||||
return sorted((k, ordered_obj(v)) for k, v in obj.items())
|
||||
if isinstance(obj, list):
|
||||
return sorted(ordered_obj(x) for x in obj)
|
||||
else:
|
||||
return obj
|
||||
|
||||
|
||||
def set_api_sub_params(params):
|
||||
"""
|
||||
Sets module sub-parameters to those expected by the boto3 API.
|
||||
|
||||
:param params:
|
||||
:return:
|
||||
"""
|
||||
|
||||
api_params = dict()
|
||||
|
||||
for param in params.keys():
|
||||
param_value = params.get(param, None)
|
||||
if param_value:
|
||||
api_params[pc(param)] = param_value
|
||||
|
||||
return api_params
|
||||
|
||||
|
||||
def validate_params(module, aws):
|
||||
"""
|
||||
Performs basic parameter validation.
|
||||
|
||||
:param module:
|
||||
:param aws:
|
||||
:return:
|
||||
"""
|
||||
|
||||
function_name = module.params['lambda_function_arn']
|
||||
|
||||
# validate function name
|
||||
if not re.search('^[\w\-:]+$', function_name):
|
||||
module.fail_json(
|
||||
msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
|
||||
)
|
||||
if len(function_name) > 64:
|
||||
module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
|
||||
|
||||
# check if 'function_name' needs to be expanded in full ARN format
|
||||
if not module.params['lambda_function_arn'].startswith('arn:aws:lambda:'):
|
||||
function_name = module.params['lambda_function_arn']
|
||||
module.params['lambda_function_arn'] = 'arn:aws:lambda:{0}:{1}:function:{2}'.format(aws.region, aws.account_id, function_name)
|
||||
|
||||
qualifier = get_qualifier(module)
|
||||
if qualifier:
|
||||
function_arn = module.params['lambda_function_arn']
|
||||
module.params['lambda_function_arn'] = '{0}:{1}'.format(function_arn, qualifier)
|
||||
|
||||
return
|
||||
|
||||
|
||||
def get_qualifier(module):
|
||||
"""
|
||||
Returns the function qualifier as a version or alias or None.
|
||||
|
||||
:param module:
|
||||
:return:
|
||||
"""
|
||||
|
||||
qualifier = None
|
||||
if module.params['version'] > 0:
|
||||
qualifier = str(module.params['version'])
|
||||
elif module.params['alias']:
|
||||
qualifier = str(module.params['alias'])
|
||||
|
||||
return qualifier
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------------------------------
|
||||
#
|
||||
# Lambda Event Handlers
|
||||
#
|
||||
# This section defines a lambda_event_X function where X is an AWS service capable of initiating
|
||||
# the execution of a Lambda function (pull only).
|
||||
#
|
||||
# ---------------------------------------------------------------------------------------------------
|
||||
|
||||
def lambda_event_stream(module, aws):
|
||||
"""
|
||||
Adds, updates or deletes lambda stream (DynamoDb, Kinesis) event notifications.
|
||||
:param module:
|
||||
:param aws:
|
||||
:return:
|
||||
"""
|
||||
|
||||
client = aws.client('lambda')
|
||||
facts = dict()
|
||||
changed = False
|
||||
current_state = 'absent'
|
||||
state = module.params['state']
|
||||
|
||||
api_params = dict(FunctionName=module.params['lambda_function_arn'])
|
||||
|
||||
# check if required sub-parameters are present and valid
|
||||
source_params = module.params['source_params']
|
||||
|
||||
source_arn = source_params.get('source_arn')
|
||||
if source_arn:
|
||||
api_params.update(EventSourceArn=source_arn)
|
||||
else:
|
||||
module.fail_json(msg="Source parameter 'source_arn' is required for stream event notification.")
|
||||
|
||||
# check if optional sub-parameters are valid, if present
|
||||
batch_size = source_params.get('batch_size')
|
||||
if batch_size:
|
||||
try:
|
||||
source_params['batch_size'] = int(batch_size)
|
||||
except ValueError:
|
||||
module.fail_json(msg="Source parameter 'batch_size' must be an integer, found: {0}".format(source_params['batch_size']))
|
||||
|
||||
# optional boolean value needs special treatment as not present does not imply False
|
||||
source_param_enabled = module.boolean(source_params.get('enabled', 'True'))
|
||||
|
||||
# check if event mapping exist
|
||||
try:
|
||||
facts = client.list_event_source_mappings(**api_params)['EventSourceMappings']
|
||||
if facts:
|
||||
current_state = 'present'
|
||||
except ClientError as e:
|
||||
module.fail_json(msg='Error retrieving stream event notification configuration: {0}'.format(e))
|
||||
|
||||
if state == 'present':
|
||||
if current_state == 'absent':
|
||||
|
||||
starting_position = source_params.get('starting_position')
|
||||
if starting_position:
|
||||
api_params.update(StartingPosition=starting_position)
|
||||
else:
|
||||
module.fail_json(msg="Source parameter 'starting_position' is required for stream event notification.")
|
||||
|
||||
if source_arn:
|
||||
api_params.update(Enabled=source_param_enabled)
|
||||
if source_params.get('batch_size'):
|
||||
api_params.update(BatchSize=source_params.get('batch_size'))
|
||||
|
||||
try:
|
||||
if not module.check_mode:
|
||||
facts = client.create_event_source_mapping(**api_params)
|
||||
changed = True
|
||||
except (ClientError, ParamValidationError, MissingParametersError) as e:
|
||||
module.fail_json(msg='Error creating stream source event mapping: {0}'.format(e))
|
||||
|
||||
else:
|
||||
# current_state is 'present'
|
||||
api_params = dict(FunctionName=module.params['lambda_function_arn'])
|
||||
current_mapping = facts[0]
|
||||
api_params.update(UUID=current_mapping['UUID'])
|
||||
mapping_changed = False
|
||||
|
||||
# check if anything changed
|
||||
if source_params.get('batch_size') and source_params['batch_size'] != current_mapping['BatchSize']:
|
||||
api_params.update(BatchSize=source_params['batch_size'])
|
||||
mapping_changed = True
|
||||
|
||||
if source_param_enabled is not None:
|
||||
if source_param_enabled:
|
||||
if current_mapping['State'] not in ('Enabled', 'Enabling'):
|
||||
api_params.update(Enabled=True)
|
||||
mapping_changed = True
|
||||
else:
|
||||
if current_mapping['State'] not in ('Disabled', 'Disabling'):
|
||||
api_params.update(Enabled=False)
|
||||
mapping_changed = True
|
||||
|
||||
if mapping_changed:
|
||||
try:
|
||||
if not module.check_mode:
|
||||
facts = client.update_event_source_mapping(**api_params)
|
||||
changed = True
|
||||
except (ClientError, ParamValidationError, MissingParametersError) as e:
|
||||
module.fail_json(msg='Error updating stream source event mapping: {0}'.format(e))
|
||||
|
||||
else:
|
||||
if current_state == 'present':
|
||||
# remove the stream event mapping
|
||||
api_params = dict(UUID=facts[0]['UUID'])
|
||||
|
||||
try:
|
||||
if not module.check_mode:
|
||||
facts = client.delete_event_source_mapping(**api_params)
|
||||
changed = True
|
||||
except (ClientError, ParamValidationError, MissingParametersError) as e:
|
||||
module.fail_json(msg='Error removing stream source event mapping: {0}'.format(e))
|
||||
|
||||
return camel_dict_to_snake_dict(dict(changed=changed, events=facts))
|
||||
|
||||
|
||||
def main():
|
||||
"""Produce a list of function suffixes which handle lambda events."""
|
||||
this_module = sys.modules[__name__]
|
||||
source_choices = ["stream"]
|
||||
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
state=dict(required=False, default='present', choices=['present', 'absent']),
|
||||
lambda_function_arn=dict(required=True, default=None, aliases=['function_name', 'function_arn']),
|
||||
event_source=dict(required=False, default="stream", choices=source_choices),
|
||||
source_params=dict(type='dict', required=True, default=None),
|
||||
alias=dict(required=False, default=None),
|
||||
version=dict(type='int', required=False, default=0),
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
mutually_exclusive=[['alias', 'version']],
|
||||
required_together=[]
|
||||
)
|
||||
|
||||
# validate dependencies
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='boto3 is required for this module.')
|
||||
|
||||
aws = AWSConnection(module, ['lambda'])
|
||||
|
||||
validate_params(module, aws)
|
||||
|
||||
this_module_function = getattr(this_module, 'lambda_event_{}'.format(module.params['event_source'].lower()))
|
||||
|
||||
results = this_module_function(module, aws)
|
||||
|
||||
module.exit_json(**results)
|
||||
|
||||
|
||||
# ansible import module(s) kept at ~eof as recommended
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
413
lib/ansible/modules/cloud/amazon/lambda_facts.py
Normal file
413
lib/ansible/modules/cloud/amazon/lambda_facts.py
Normal file
@@ -0,0 +1,413 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import datetime
|
||||
import sys
|
||||
|
||||
try:
|
||||
import boto3
|
||||
from botocore.exceptions import ClientError
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: lambda_facts
|
||||
short_description: Gathers AWS Lambda function details as Ansible facts
|
||||
description:
|
||||
- Gathers various details related to Lambda functions, including aliases, versions and event source mappings.
|
||||
Use module M(lambda) to manage the lambda function itself, M(lambda_alias) to manage function aliases and
|
||||
M(lambda_event) to manage lambda event source mappings.
|
||||
|
||||
version_added: "2.2"
|
||||
|
||||
options:
|
||||
query:
|
||||
description:
|
||||
- Specifies the resource type for which to gather facts. Leave blank to retrieve all facts.
|
||||
required: true
|
||||
choices: [ "aliases", "all", "config", "mappings", "policy", "versions" ]
|
||||
default: "all"
|
||||
function_name:
|
||||
description:
|
||||
- The name of the lambda function for which facts are requested.
|
||||
required: false
|
||||
default: null
|
||||
aliases: [ "function", "name"]
|
||||
event_source_arn:
|
||||
description:
|
||||
- For query type 'mappings', this is the Amazon Resource Name (ARN) of the Amazon Kinesis or DynamoDB stream.
|
||||
default: null
|
||||
required: false
|
||||
author: Pierre Jodouin (@pjodouin)
|
||||
requirements:
|
||||
- boto3
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
---
|
||||
# Simple example of listing all info for a function
|
||||
- name: List all for a specific function
|
||||
lambda_facts:
|
||||
query: all
|
||||
function_name: myFunction
|
||||
register: my_function_details
|
||||
# List all versions of a function
|
||||
- name: List function versions
|
||||
lambda_facts:
|
||||
query: versions
|
||||
function_name: myFunction
|
||||
register: my_function_versions
|
||||
# List all lambda function versions
|
||||
- name: List all function
|
||||
lambda_facts:
|
||||
query: all
|
||||
max_items: 20
|
||||
- name: show Lambda facts
|
||||
debug:
|
||||
var: lambda_facts
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
---
|
||||
lambda_facts:
|
||||
description: lambda facts
|
||||
returned: success
|
||||
type: dict
|
||||
lambda_facts.function:
|
||||
description: lambda function list
|
||||
returned: success
|
||||
type: dict
|
||||
lambda_facts.function.TheName:
|
||||
description: lambda function information, including event, mapping, and version information
|
||||
returned: success
|
||||
type: dict
|
||||
'''
|
||||
|
||||
|
||||
def fix_return(node):
|
||||
"""
|
||||
fixup returned dictionary
|
||||
|
||||
:param node:
|
||||
:return:
|
||||
"""
|
||||
|
||||
if isinstance(node, datetime.datetime):
|
||||
node_value = str(node)
|
||||
|
||||
elif isinstance(node, list):
|
||||
node_value = [fix_return(item) for item in node]
|
||||
|
||||
elif isinstance(node, dict):
|
||||
node_value = dict([(item, fix_return(node[item])) for item in node.keys()])
|
||||
|
||||
else:
|
||||
node_value = node
|
||||
|
||||
return node_value
|
||||
|
||||
|
||||
def alias_details(client, module):
|
||||
"""
|
||||
Returns list of aliases for a specified function.
|
||||
|
||||
:param client: AWS API client reference (boto3)
|
||||
:param module: Ansible module reference
|
||||
:return dict:
|
||||
"""
|
||||
|
||||
lambda_facts = dict()
|
||||
|
||||
function_name = module.params.get('function_name')
|
||||
if function_name:
|
||||
params = dict()
|
||||
if module.params.get('max_items'):
|
||||
params['MaxItems'] = module.params.get('max_items')
|
||||
|
||||
if module.params.get('next_marker'):
|
||||
params['Marker'] = module.params.get('next_marker')
|
||||
try:
|
||||
lambda_facts.update(aliases=client.list_aliases(FunctionName=function_name, **params)['Aliases'])
|
||||
except ClientError as e:
|
||||
if e.response['Error']['Code'] == 'ResourceNotFoundException':
|
||||
lambda_facts.update(aliases=[])
|
||||
else:
|
||||
module.fail_json(msg='Unable to get {0} aliases, error: {1}'.format(function_name, e))
|
||||
else:
|
||||
module.fail_json(msg='Parameter function_name required for query=aliases.')
|
||||
|
||||
return {function_name: camel_dict_to_snake_dict(lambda_facts)}
|
||||
|
||||
|
||||
def all_details(client, module):
|
||||
"""
|
||||
Returns all lambda related facts.
|
||||
|
||||
:param client: AWS API client reference (boto3)
|
||||
:param module: Ansible module reference
|
||||
:return dict:
|
||||
"""
|
||||
|
||||
if module.params.get('max_items') or module.params.get('next_marker'):
|
||||
module.fail_json(msg='Cannot specify max_items nor next_marker for query=all.')
|
||||
|
||||
lambda_facts = dict()
|
||||
|
||||
function_name = module.params.get('function_name')
|
||||
if function_name:
|
||||
lambda_facts[function_name] = {}
|
||||
lambda_facts[function_name].update(config_details(client, module)[function_name])
|
||||
lambda_facts[function_name].update(alias_details(client, module)[function_name])
|
||||
lambda_facts[function_name].update(policy_details(client, module)[function_name])
|
||||
lambda_facts[function_name].update(version_details(client, module)[function_name])
|
||||
lambda_facts[function_name].update(mapping_details(client, module)[function_name])
|
||||
else:
|
||||
lambda_facts.update(config_details(client, module))
|
||||
|
||||
return lambda_facts
|
||||
|
||||
|
||||
def config_details(client, module):
|
||||
"""
|
||||
Returns configuration details for one or all lambda functions.
|
||||
|
||||
:param client: AWS API client reference (boto3)
|
||||
:param module: Ansible module reference
|
||||
:return dict:
|
||||
"""
|
||||
|
||||
lambda_facts = dict()
|
||||
|
||||
function_name = module.params.get('function_name')
|
||||
if function_name:
|
||||
try:
|
||||
lambda_facts.update(client.get_function_configuration(FunctionName=function_name))
|
||||
except ClientError as e:
|
||||
if e.response['Error']['Code'] == 'ResourceNotFoundException':
|
||||
lambda_facts.update(function={})
|
||||
else:
|
||||
module.fail_json(msg='Unable to get {0} configuration, error: {1}'.format(function_name, e))
|
||||
else:
|
||||
params = dict()
|
||||
if module.params.get('max_items'):
|
||||
params['MaxItems'] = module.params.get('max_items')
|
||||
|
||||
if module.params.get('next_marker'):
|
||||
params['Marker'] = module.params.get('next_marker')
|
||||
|
||||
try:
|
||||
lambda_facts.update(function_list=client.list_functions(**params)['Functions'])
|
||||
except ClientError as e:
|
||||
if e.response['Error']['Code'] == 'ResourceNotFoundException':
|
||||
lambda_facts.update(function_list=[])
|
||||
else:
|
||||
module.fail_json(msg='Unable to get function list, error: {0}'.format(e))
|
||||
|
||||
functions = dict()
|
||||
for func in lambda_facts.pop('function_list', []):
|
||||
functions[func['FunctionName']] = camel_dict_to_snake_dict(func)
|
||||
return functions
|
||||
|
||||
return {function_name: camel_dict_to_snake_dict(lambda_facts)}
|
||||
|
||||
|
||||
def mapping_details(client, module):
|
||||
"""
|
||||
Returns all lambda event source mappings.
|
||||
|
||||
:param client: AWS API client reference (boto3)
|
||||
:param module: Ansible module reference
|
||||
:return dict:
|
||||
"""
|
||||
|
||||
lambda_facts = dict()
|
||||
params = dict()
|
||||
function_name = module.params.get('function_name')
|
||||
|
||||
if function_name:
|
||||
params['FunctionName'] = module.params.get('function_name')
|
||||
|
||||
if module.params.get('event_source_arn'):
|
||||
params['EventSourceArn'] = module.params.get('event_source_arn')
|
||||
|
||||
if module.params.get('max_items'):
|
||||
params['MaxItems'] = module.params.get('max_items')
|
||||
|
||||
if module.params.get('next_marker'):
|
||||
params['Marker'] = module.params.get('next_marker')
|
||||
|
||||
try:
|
||||
lambda_facts.update(mappings=client.list_event_source_mappings(**params)['EventSourceMappings'])
|
||||
except ClientError as e:
|
||||
if e.response['Error']['Code'] == 'ResourceNotFoundException':
|
||||
lambda_facts.update(mappings=[])
|
||||
else:
|
||||
module.fail_json(msg='Unable to get source event mappings, error: {0}'.format(e))
|
||||
|
||||
if function_name:
|
||||
return {function_name: camel_dict_to_snake_dict(lambda_facts)}
|
||||
|
||||
return camel_dict_to_snake_dict(lambda_facts)
|
||||
|
||||
|
||||
def policy_details(client, module):
|
||||
"""
|
||||
Returns policy attached to a lambda function.
|
||||
|
||||
:param client: AWS API client reference (boto3)
|
||||
:param module: Ansible module reference
|
||||
:return dict:
|
||||
"""
|
||||
|
||||
if module.params.get('max_items') or module.params.get('next_marker'):
|
||||
module.fail_json(msg='Cannot specify max_items nor next_marker for query=policy.')
|
||||
|
||||
lambda_facts = dict()
|
||||
|
||||
function_name = module.params.get('function_name')
|
||||
if function_name:
|
||||
try:
|
||||
# get_policy returns a JSON string so must convert to dict before reassigning to its key
|
||||
lambda_facts.update(policy=json.loads(client.get_policy(FunctionName=function_name)['Policy']))
|
||||
except ClientError as e:
|
||||
if e.response['Error']['Code'] == 'ResourceNotFoundException':
|
||||
lambda_facts.update(policy={})
|
||||
else:
|
||||
module.fail_json(msg='Unable to get {0} policy, error: {1}'.format(function_name, e))
|
||||
else:
|
||||
module.fail_json(msg='Parameter function_name required for query=policy.')
|
||||
|
||||
return {function_name: camel_dict_to_snake_dict(lambda_facts)}
|
||||
|
||||
|
||||
def version_details(client, module):
|
||||
"""
|
||||
Returns all lambda function versions.
|
||||
|
||||
:param client: AWS API client reference (boto3)
|
||||
:param module: Ansible module reference
|
||||
:return dict:
|
||||
"""
|
||||
|
||||
lambda_facts = dict()
|
||||
|
||||
function_name = module.params.get('function_name')
|
||||
if function_name:
|
||||
params = dict()
|
||||
if module.params.get('max_items'):
|
||||
params['MaxItems'] = module.params.get('max_items')
|
||||
|
||||
if module.params.get('next_marker'):
|
||||
params['Marker'] = module.params.get('next_marker')
|
||||
|
||||
try:
|
||||
lambda_facts.update(versions=client.list_versions_by_function(FunctionName=function_name, **params)['Versions'])
|
||||
except ClientError as e:
|
||||
if e.response['Error']['Code'] == 'ResourceNotFoundException':
|
||||
lambda_facts.update(versions=[])
|
||||
else:
|
||||
module.fail_json(msg='Unable to get {0} versions, error: {1}'.format(function_name, e))
|
||||
else:
|
||||
module.fail_json(msg='Parameter function_name required for query=versions.')
|
||||
|
||||
return {function_name: camel_dict_to_snake_dict(lambda_facts)}
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Main entry point.
|
||||
|
||||
:return dict: ansible facts
|
||||
"""
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
function_name=dict(required=False, default=None, aliases=['function', 'name']),
|
||||
query=dict(required=False, choices=['aliases', 'all', 'config', 'mappings', 'policy', 'versions'], default='all'),
|
||||
event_source_arn=dict(required=False, default=None)
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
mutually_exclusive=[],
|
||||
required_together=[]
|
||||
)
|
||||
|
||||
# validate dependencies
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='boto3 is required for this module.')
|
||||
|
||||
# validate function_name if present
|
||||
function_name = module.params['function_name']
|
||||
if function_name:
|
||||
if not re.search("^[\w\-:]+$", function_name):
|
||||
module.fail_json(
|
||||
msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
|
||||
)
|
||||
if len(function_name) > 64:
|
||||
module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
|
||||
|
||||
try:
|
||||
region, endpoint, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
|
||||
aws_connect_kwargs.update(dict(region=region,
|
||||
endpoint=endpoint,
|
||||
conn_type='client',
|
||||
resource='lambda'
|
||||
))
|
||||
client = boto3_conn(module, **aws_connect_kwargs)
|
||||
except ClientError as e:
|
||||
module.fail_json(msg="Can't authorize connection - {0}".format(e))
|
||||
|
||||
this_module = sys.modules[__name__]
|
||||
|
||||
invocations = dict(
|
||||
aliases='alias_details',
|
||||
all='all_details',
|
||||
config='config_details',
|
||||
mappings='mapping_details',
|
||||
policy='policy_details',
|
||||
versions='version_details',
|
||||
)
|
||||
|
||||
this_module_function = getattr(this_module, invocations[module.params['query']])
|
||||
all_facts = fix_return(this_module_function(client, module))
|
||||
|
||||
results = dict(ansible_facts={'lambda_facts': {'function': all_facts}}, changed=False)
|
||||
|
||||
if module.check_mode:
|
||||
results['msg'] = 'Check mode set but ignored for fact gathering only.'
|
||||
|
||||
module.exit_json(**results)
|
||||
|
||||
|
||||
# ansible import module(s) kept at ~eof as recommended
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
479
lib/ansible/modules/cloud/amazon/redshift.py
Normal file
479
lib/ansible/modules/cloud/amazon/redshift.py
Normal file
@@ -0,0 +1,479 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright 2014 Jens Carl, Hothead Games Inc.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
author:
|
||||
- "Jens Carl (@j-carl), Hothead Games Inc."
|
||||
module: redshift
|
||||
version_added: "2.2"
|
||||
short_description: create, delete, or modify an Amazon Redshift instance
|
||||
description:
|
||||
- Creates, deletes, or modifies amazon Redshift cluster instances.
|
||||
options:
|
||||
command:
|
||||
description:
|
||||
- Specifies the action to take.
|
||||
required: true
|
||||
choices: [ 'create', 'facts', 'delete', 'modify' ]
|
||||
identifier:
|
||||
description:
|
||||
- Redshift cluster identifier.
|
||||
required: true
|
||||
node_type:
|
||||
description:
|
||||
- The node type of the cluster. Must be specified when command=create.
|
||||
choices: ['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large', 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large', 'dw2.8xlarge']
|
||||
username:
|
||||
description:
|
||||
- Master database username. Used only when command=create.
|
||||
password:
|
||||
description:
|
||||
- Master database password. Used only when command=create.
|
||||
cluster_type:
|
||||
description:
|
||||
- The type of cluster.
|
||||
choices: ['multi-node', 'single-node' ]
|
||||
default: 'single-node'
|
||||
db_name:
|
||||
description:
|
||||
- Name of the database.
|
||||
default: null
|
||||
availability_zone:
|
||||
description:
|
||||
- availability zone in which to launch cluster
|
||||
aliases: ['zone', 'aws_zone']
|
||||
number_of_nodes:
|
||||
description:
|
||||
- Number of nodes. Only used when cluster_type=multi-node.
|
||||
default: null
|
||||
cluster_subnet_group_name:
|
||||
description:
|
||||
- which subnet to place the cluster
|
||||
aliases: ['subnet']
|
||||
cluster_security_groups:
|
||||
description:
|
||||
- in which security group the cluster belongs
|
||||
default: null
|
||||
aliases: ['security_groups']
|
||||
vpc_security_group_ids:
|
||||
description:
|
||||
- VPC security group
|
||||
aliases: ['vpc_security_groups']
|
||||
default: null
|
||||
preferred_maintenance_window:
|
||||
description:
|
||||
- maintenance window
|
||||
aliases: ['maintance_window', 'maint_window']
|
||||
default: null
|
||||
cluster_parameter_group_name:
|
||||
description:
|
||||
- name of the cluster parameter group
|
||||
aliases: ['param_group_name']
|
||||
default: null
|
||||
automated_snapshot_retention_period:
|
||||
description:
|
||||
- period when the snapshot take place
|
||||
aliases: ['retention_period']
|
||||
default: null
|
||||
port:
|
||||
description:
|
||||
- which port the cluster is listining
|
||||
default: null
|
||||
cluster_version:
|
||||
description:
|
||||
- which version the cluster should have
|
||||
aliases: ['version']
|
||||
choices: ['1.0']
|
||||
default: null
|
||||
allow_version_upgrade:
|
||||
description:
|
||||
- flag to determinate if upgrade of version is possible
|
||||
aliases: ['version_upgrade']
|
||||
default: true
|
||||
publicly_accessible:
|
||||
description:
|
||||
- if the cluster is accessible publicly or not
|
||||
default: false
|
||||
encrypted:
|
||||
description:
|
||||
- if the cluster is encrypted or not
|
||||
default: false
|
||||
elastic_ip:
|
||||
description:
|
||||
- if the cluster has an elastic IP or not
|
||||
default: null
|
||||
new_cluster_identifier:
|
||||
description:
|
||||
- Only used when command=modify.
|
||||
aliases: ['new_identifier']
|
||||
default: null
|
||||
wait:
|
||||
description:
|
||||
- When command=create, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be terminated.
|
||||
default: "no"
|
||||
choices: [ "yes", "no" ]
|
||||
wait_timeout:
|
||||
description:
|
||||
- how long before wait gives up, in seconds
|
||||
default: 300
|
||||
requirements: [ 'boto' ]
|
||||
extends_documentation_fragment: aws
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Basic cluster provisioning example
|
||||
- redshift: >
|
||||
command=create
|
||||
node_type=ds1.xlarge
|
||||
identifier=new_cluster
|
||||
username=cluster_admin
|
||||
password=1nsecure
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
cluster:
|
||||
description: dictionary containing all the cluster information
|
||||
returned: success
|
||||
type: dictionary
|
||||
contains:
|
||||
identifier:
|
||||
description: Id of the cluster.
|
||||
returned: success
|
||||
type: string
|
||||
sample: "new_redshift_cluster"
|
||||
create_time:
|
||||
description: Time of the cluster creation as timestamp.
|
||||
returned: success
|
||||
type: float
|
||||
sample: 1430158536.308
|
||||
status:
|
||||
description: Stutus of the cluster.
|
||||
returned: success
|
||||
type: string
|
||||
sample: "available"
|
||||
db_name:
|
||||
description: Name of the database.
|
||||
returned: success
|
||||
type: string
|
||||
sample: "new_db_name"
|
||||
availability_zone:
|
||||
description: Amazon availability zone where the cluster is located.
|
||||
returned: success
|
||||
type: string
|
||||
sample: "us-east-1b"
|
||||
maintenance_window:
|
||||
description: Time frame when maintenance/upgrade are done.
|
||||
returned: success
|
||||
type: string
|
||||
sample: "sun:09:30-sun:10:00"
|
||||
private_ip_address:
|
||||
description: Private IP address of the main node.
|
||||
returned: success
|
||||
type: string
|
||||
sample: "10.10.10.10"
|
||||
public_ip_address:
|
||||
description: Public IP address of the main node.
|
||||
returned: success
|
||||
type: string
|
||||
sample: "0.0.0.0"
|
||||
port:
|
||||
description: Port of the cluster.
|
||||
returned: success
|
||||
type: int
|
||||
sample: 5439
|
||||
url:
|
||||
description: FQDN of the main cluster node.
|
||||
returned: success
|
||||
type: string
|
||||
sample: "new-redshift_cluster.jfkdjfdkj.us-east-1.redshift.amazonaws.com"
|
||||
'''
|
||||
|
||||
import time
|
||||
|
||||
try:
|
||||
import boto
|
||||
from boto import redshift
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
|
||||
def _collect_facts(resource):
|
||||
"""Transfrom cluster information to dict."""
|
||||
facts = {
|
||||
'identifier' : resource['ClusterIdentifier'],
|
||||
'create_time' : resource['ClusterCreateTime'],
|
||||
'status' : resource['ClusterStatus'],
|
||||
'username' : resource['MasterUsername'],
|
||||
'db_name' : resource['DBName'],
|
||||
'availability_zone' : resource['AvailabilityZone'],
|
||||
'maintenance_window': resource['PreferredMaintenanceWindow'],
|
||||
}
|
||||
|
||||
for node in resource['ClusterNodes']:
|
||||
if node['NodeRole'] in ('SHARED', 'LEADER'):
|
||||
facts['private_ip_address'] = node['PrivateIPAddress']
|
||||
break
|
||||
|
||||
return facts
|
||||
|
||||
|
||||
def create_cluster(module, redshift):
|
||||
"""
|
||||
Create a new cluster
|
||||
|
||||
module: AnsibleModule object
|
||||
redshift: authenticated redshift connection object
|
||||
|
||||
Returns:
|
||||
"""
|
||||
|
||||
identifier = module.params.get('identifier')
|
||||
node_type = module.params.get('node_type')
|
||||
username = module.params.get('username')
|
||||
password = module.params.get('password')
|
||||
wait = module.params.get('wait')
|
||||
wait_timeout = module.params.get('wait_timeout')
|
||||
|
||||
changed = True
|
||||
# Package up the optional parameters
|
||||
params = {}
|
||||
for p in ('db_name', 'cluster_type', 'cluster_security_groups',
|
||||
'vpc_security_group_ids', 'cluster_subnet_group_name',
|
||||
'availability_zone', 'preferred_maintenance_window',
|
||||
'cluster_parameter_group_name',
|
||||
'automated_snapshot_retention_period', 'port',
|
||||
'cluster_version', 'allow_version_upgrade',
|
||||
'number_of_nodes', 'publicly_accessible',
|
||||
'encrypted', 'elastic_ip'):
|
||||
if p in module.params:
|
||||
params[ p ] = module.params.get( p )
|
||||
|
||||
try:
|
||||
redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
|
||||
changed = False
|
||||
except boto.exception.JSONResponseError as e:
|
||||
try:
|
||||
redshift.create_cluster(identifier, node_type, username, password, **params)
|
||||
except boto.exception.JSONResponseError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
try:
|
||||
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
|
||||
except boto.exception.JSONResponseError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
if wait:
|
||||
try:
|
||||
wait_timeout = time.time() + wait_timeout
|
||||
time.sleep(5)
|
||||
|
||||
while wait_timeout > time.time() and resource['ClusterStatus'] != 'available':
|
||||
time.sleep(5)
|
||||
if wait_timeout <= time.time():
|
||||
module.fail_json(msg = "Timeout waiting for resource %s" % resource.id)
|
||||
|
||||
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
|
||||
|
||||
except boto.exception.JSONResponseError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
return(changed, _collect_facts(resource))
|
||||
|
||||
|
||||
def describe_cluster(module, redshift):
|
||||
"""
|
||||
Collect data about the cluster.
|
||||
|
||||
module: Ansible module object
|
||||
redshift: authenticated redshift connection object
|
||||
"""
|
||||
identifier = module.params.get('identifier')
|
||||
|
||||
try:
|
||||
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
|
||||
except boto.exception.JSONResponseError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
return(True, _collect_facts(resource))
|
||||
|
||||
|
||||
def delete_cluster(module, redshift):
|
||||
"""
|
||||
Delete a cluster.
|
||||
|
||||
module: Ansible module object
|
||||
redshift: authenticated redshift connection object
|
||||
"""
|
||||
|
||||
identifier = module.params.get('identifier')
|
||||
wait = module.params.get('wait')
|
||||
wait_timeout = module.params.get('wait_timeout')
|
||||
|
||||
try:
|
||||
redshift.delete_custer( identifier )
|
||||
except boto.exception.JSONResponseError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
if wait:
|
||||
try:
|
||||
wait_timeout = time.time() + wait_timeout
|
||||
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
|
||||
|
||||
while wait_timeout > time.time() and resource['ClusterStatus'] != 'deleting':
|
||||
time.sleep(5)
|
||||
if wait_timeout <= time.time():
|
||||
module.fail_json(msg = "Timeout waiting for resource %s" % resource.id)
|
||||
|
||||
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
|
||||
|
||||
except boto.exception.JSONResponseError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
return(True, {})
|
||||
|
||||
|
||||
def modify_cluster(module, redshift):
|
||||
"""
|
||||
Modify an existing cluster.
|
||||
|
||||
module: Ansible module object
|
||||
redshift: authenticated redshift connection object
|
||||
"""
|
||||
|
||||
identifier = module.params.get('identifier')
|
||||
wait = module.params.get('wait')
|
||||
wait_timeout = module.params.get('wait_timeout')
|
||||
|
||||
# Package up the optional parameters
|
||||
params = {}
|
||||
for p in ('cluster_type', 'cluster_security_groups',
|
||||
'vpc_security_group_ids', 'cluster_subnet_group_name',
|
||||
'availability_zone', 'preferred_maintenance_window',
|
||||
'cluster_parameter_group_name',
|
||||
'automated_snapshot_retention_period', 'port', 'cluster_version',
|
||||
'allow_version_upgrade', 'number_of_nodes', 'new_cluster_identifier'):
|
||||
if p in module.params:
|
||||
params[p] = module.params.get(p)
|
||||
|
||||
try:
|
||||
redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
|
||||
except boto.exception.JSONResponseError as e:
|
||||
try:
|
||||
redshift.modify_cluster(identifier, **params)
|
||||
except boto.exception.JSONResponseError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
try:
|
||||
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
|
||||
except boto.exception.JSONResponseError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
if wait:
|
||||
try:
|
||||
wait_timeout = time.time() + wait_timeout
|
||||
time.sleep(5)
|
||||
|
||||
while wait_timeout > time.time() and resource['ClusterStatus'] != 'available':
|
||||
time.sleep(5)
|
||||
if wait_timeout <= time.time():
|
||||
module.fail_json(msg = "Timeout waiting for resource %s" % resource.id)
|
||||
|
||||
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
|
||||
|
||||
except boto.exception.JSONResponseError as e:
|
||||
# https://github.com/boto/boto/issues/2776 is fixed.
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
return(True, _collect_facts(resource))
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
command = dict(choices=['create', 'facts', 'delete', 'modify'], required=True),
|
||||
identifier = dict(required=True),
|
||||
node_type = dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large', 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large', 'dw2.8xlarge'], required=False),
|
||||
username = dict(required=False),
|
||||
password = dict(no_log=True, required=False),
|
||||
db_name = dict(require=False),
|
||||
cluster_type = dict(choices=['multi-node', 'single-node', ], default='single-node'),
|
||||
cluster_security_groups = dict(aliases=['security_groups'], type='list'),
|
||||
vpc_security_group_ids = dict(aliases=['vpc_security_groups'], type='list'),
|
||||
cluster_subnet_group_name = dict(aliases=['subnet']),
|
||||
availability_zone = dict(aliases=['aws_zone', 'zone']),
|
||||
preferred_maintenance_window = dict(aliases=['maintance_window', 'maint_window']),
|
||||
cluster_parameter_group_name = dict(aliases=['param_group_name']),
|
||||
automated_snapshot_retention_period = dict(aliases=['retention_period']),
|
||||
port = dict(type='int'),
|
||||
cluster_version = dict(aliases=['version'], choices=['1.0']),
|
||||
allow_version_upgrade = dict(aliases=['version_upgrade'], type='bool', default=True),
|
||||
number_of_nodes = dict(type='int'),
|
||||
publicly_accessible = dict(type='bool', default=False),
|
||||
encrypted = dict(type='bool', default=False),
|
||||
elastic_ip = dict(required=False),
|
||||
new_cluster_identifier = dict(aliases=['new_identifier']),
|
||||
wait = dict(type='bool', default=False),
|
||||
wait_timeout = dict(default=300),
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto v2.9.0+ required for this module')
|
||||
|
||||
command = module.params.get('command')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
if not region:
|
||||
module.fail_json(msg=str("region not specified and unable to determine region from EC2_REGION."))
|
||||
|
||||
# connect to the rds endpoint
|
||||
try:
|
||||
conn = connect_to_aws(boto.redshift, region, **aws_connect_params)
|
||||
except boto.exception.JSONResponseError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
changed = True
|
||||
if command == 'create':
|
||||
(changed, cluster) = create_cluster(module, conn)
|
||||
|
||||
elif command == 'facts':
|
||||
(changed, cluster) = describe_cluster(module, conn)
|
||||
|
||||
elif command == 'delete':
|
||||
(changed, cluster) = delete_cluster(module, conn)
|
||||
|
||||
elif command == 'modify':
|
||||
(changed, cluster) = modify_cluster(module, conn)
|
||||
|
||||
module.exit_json(changed=changed, cluster=cluster)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
186
lib/ansible/modules/cloud/amazon/redshift_subnet_group.py
Normal file
186
lib/ansible/modules/cloud/amazon/redshift_subnet_group.py
Normal file
@@ -0,0 +1,186 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright 2014 Jens Carl, Hothead Games Inc.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
author:
|
||||
- "Jens Carl (@j-carl), Hothead Games Inc."
|
||||
module: redshift_subnet_group
|
||||
version_added: "2.2"
|
||||
short_description: mange Redshift cluster subnet groups
|
||||
description:
|
||||
- Create, modifies, and deletes Redshift cluster subnet groups.
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Specifies whether the subnet should be present or absent.
|
||||
default: 'present'
|
||||
choices: ['present', 'absent' ]
|
||||
group_name:
|
||||
description:
|
||||
- Cluster subnet group name.
|
||||
required: true
|
||||
aliases: ['name']
|
||||
group_description:
|
||||
description:
|
||||
- Database subnet group description.
|
||||
required: false
|
||||
default: null
|
||||
aliases: ['description']
|
||||
group_subnets:
|
||||
description:
|
||||
- List of subnet IDs that make up the cluster subnet group.
|
||||
required: false
|
||||
default: null
|
||||
aliases: ['subnets']
|
||||
requirements: [ 'boto' ]
|
||||
extends_documentation_fragment: aws
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Create a Redshift subnet group
|
||||
- local_action:
|
||||
module: redshift_subnet_group
|
||||
state: present
|
||||
group_name: redshift-subnet
|
||||
group_description: Redshift subnet
|
||||
group_subnets:
|
||||
- 'subnet-aaaaa'
|
||||
- 'subnet-bbbbb'
|
||||
|
||||
# Remove subnet group
|
||||
redshift_subnet_group: >
|
||||
state: absent
|
||||
group_name: redshift-subnet
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
group:
|
||||
description: dictionary containing all Redshift subnet group information
|
||||
returned: success
|
||||
type: dictionary
|
||||
contains:
|
||||
name:
|
||||
description: name of the Redshift subnet group
|
||||
returned: success
|
||||
type: string
|
||||
sample: "redshift_subnet_group_name"
|
||||
vpc_id:
|
||||
description: Id of the VPC where the subnet is located
|
||||
returned: success
|
||||
type: string
|
||||
sample: "vpc-aabb1122"
|
||||
'''
|
||||
|
||||
try:
|
||||
import boto
|
||||
import boto.redshift
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
state=dict(required=True, choices=['present', 'absent']),
|
||||
group_name=dict(required=True, aliases=['name']),
|
||||
group_description=dict(required=False, aliases=['description']),
|
||||
group_subnets=dict(required=False, aliases=['subnets'], type='list'),
|
||||
))
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto v2.9.0+ required for this module')
|
||||
|
||||
state = module.params.get('state')
|
||||
group_name = module.params.get('group_name')
|
||||
group_description = module.params.get('group_description')
|
||||
group_subnets = module.params.get('group_subnets')
|
||||
|
||||
if state == 'present':
|
||||
for required in ('group_name', 'group_description', 'group_subnets'):
|
||||
if not module.params.get(required):
|
||||
module.fail_json(msg=str("parameter %s required for state='present'" % required))
|
||||
else:
|
||||
for not_allowed in ('group_description', 'group_subnets'):
|
||||
if module.params.get(not_allowed):
|
||||
module.fail_json(msg=str("parameter %s not allowed for state='absent'" % not_allowed))
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
if not region:
|
||||
module.fail_json(msg=str("region not specified and unable to determine region from EC2_REGION."))
|
||||
|
||||
# Connect to the Redshift endpoint.
|
||||
try:
|
||||
conn = connect_to_aws(boto.redshift, region, **aws_connect_params)
|
||||
except boto.exception.JSONResponseError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
try:
|
||||
changed = False
|
||||
exists = False
|
||||
group = None
|
||||
|
||||
try:
|
||||
matching_groups = conn.describe_cluster_subnet_groups(group_name, max_records=100)
|
||||
exists = len(matching_groups) > 0
|
||||
except boto.exception.JSONResponseError as e:
|
||||
if e.body['Error']['Code'] != 'ClusterSubnetGroupNotFoundFault':
|
||||
#if e.code != 'ClusterSubnetGroupNotFoundFault':
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
if state == 'absent':
|
||||
if exists:
|
||||
conn.delete_cluster_subnet_group(group_name)
|
||||
changed = True
|
||||
|
||||
else:
|
||||
if not exists:
|
||||
new_group = conn.create_cluster_subnet_group(group_name, group_description, group_subnets)
|
||||
group = {
|
||||
'name': new_group['CreateClusterSubnetGroupResponse']['CreateClusterSubnetGroupResult']
|
||||
['ClusterSubnetGroup']['ClusterSubnetGroupName'],
|
||||
'vpc_id': new_group['CreateClusterSubnetGroupResponse']['CreateClusterSubnetGroupResult']
|
||||
['ClusterSubnetGroup']['VpcId'],
|
||||
}
|
||||
else:
|
||||
changed_group = conn.modify_cluster_subnet_group(group_name, group_subnets, description=group_description)
|
||||
group = {
|
||||
'name': changed_group['ModifyClusterSubnetGroupResponse']['ModifyClusterSubnetGroupResult']
|
||||
['ClusterSubnetGroup']['ClusterSubnetGroupName'],
|
||||
'vpc_id': changed_group['ModifyClusterSubnetGroupResponse']['ModifyClusterSubnetGroupResult']
|
||||
['ClusterSubnetGroup']['VpcId'],
|
||||
}
|
||||
|
||||
changed = True
|
||||
|
||||
except boto.exception.JSONResponseError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
module.exit_json(changed=changed, group=group)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
440
lib/ansible/modules/cloud/amazon/route53_facts.py
Normal file
440
lib/ansible/modules/cloud/amazon/route53_facts.py
Normal file
@@ -0,0 +1,440 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: route53_facts
|
||||
short_description: Retrieves route53 details using AWS methods
|
||||
description:
|
||||
- Gets various details related to Route53 zone, record set or health check details
|
||||
version_added: "2.0"
|
||||
options:
|
||||
query:
|
||||
description:
|
||||
- specifies the query action to take
|
||||
required: True
|
||||
choices: [
|
||||
'change',
|
||||
'checker_ip_range',
|
||||
'health_check',
|
||||
'hosted_zone',
|
||||
'record_sets',
|
||||
'reusable_delegation_set',
|
||||
]
|
||||
change_id:
|
||||
description:
|
||||
- The ID of the change batch request.
|
||||
The value that you specify here is the value that
|
||||
ChangeResourceRecordSets returned in the Id element
|
||||
when you submitted the request.
|
||||
required: false
|
||||
hosted_zone_id:
|
||||
description:
|
||||
- The Hosted Zone ID of the DNS zone
|
||||
required: false
|
||||
max_items:
|
||||
description:
|
||||
- Maximum number of items to return for various get/list requests
|
||||
required: false
|
||||
next_marker:
|
||||
description:
|
||||
- "Some requests such as list_command: hosted_zones will return a maximum
|
||||
number of entries - EG 100. If the number of entries exceeds this maximum
|
||||
another request can be sent using the NextMarker entry from the first response
|
||||
to get the next page of results"
|
||||
required: false
|
||||
delegation_set_id:
|
||||
description:
|
||||
- The DNS Zone delegation set ID
|
||||
required: false
|
||||
start_record_name:
|
||||
description:
|
||||
- "The first name in the lexicographic ordering of domain names that you want
|
||||
the list_command: record_sets to start listing from"
|
||||
required: false
|
||||
type:
|
||||
description:
|
||||
- The type of DNS record
|
||||
required: false
|
||||
choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS' ]
|
||||
dns_name:
|
||||
description:
|
||||
- The first name in the lexicographic ordering of domain names that you want
|
||||
the list_command to start listing from
|
||||
required: false
|
||||
resource_id:
|
||||
description:
|
||||
- The ID/s of the specified resource/s
|
||||
required: false
|
||||
aliases: ['resource_ids']
|
||||
health_check_id:
|
||||
description:
|
||||
- The ID of the health check
|
||||
required: false
|
||||
hosted_zone_method:
|
||||
description:
|
||||
- "This is used in conjunction with query: hosted_zone.
|
||||
It allows for listing details, counts or tags of various
|
||||
hosted zone details."
|
||||
required: false
|
||||
choices: [
|
||||
'details',
|
||||
'list',
|
||||
'list_by_name',
|
||||
'count',
|
||||
'tags',
|
||||
]
|
||||
default: 'list'
|
||||
health_check_method:
|
||||
description:
|
||||
- "This is used in conjunction with query: health_check.
|
||||
It allows for listing details, counts or tags of various
|
||||
health check details."
|
||||
required: false
|
||||
choices: [
|
||||
'list',
|
||||
'details',
|
||||
'status',
|
||||
'failure_reason',
|
||||
'count',
|
||||
'tags',
|
||||
]
|
||||
default: 'list'
|
||||
author: Karen Cheng(@Etherdaemon)
|
||||
extends_documentation_fragment: aws
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Simple example of listing all hosted zones
|
||||
- name: List all hosted zones
|
||||
route53_facts:
|
||||
query: hosted_zone
|
||||
register: hosted_zones
|
||||
|
||||
# Getting a count of hosted zones
|
||||
- name: Return a count of all hosted zones
|
||||
route53_facts:
|
||||
query: hosted_zone
|
||||
hosted_zone_method: count
|
||||
register: hosted_zone_count
|
||||
|
||||
- name: List the first 20 resource record sets in a given hosted zone
|
||||
route53_facts:
|
||||
profile: account_name
|
||||
query: record_sets
|
||||
hosted_zone_id: ZZZ1111112222
|
||||
max_items: 20
|
||||
register: record_sets
|
||||
|
||||
- name: List first 20 health checks
|
||||
route53_facts:
|
||||
query: health_check
|
||||
health_check_method: list
|
||||
max_items: 20
|
||||
register: health_checks
|
||||
|
||||
- name: Get health check last failure_reason
|
||||
route53_facts:
|
||||
query: health_check
|
||||
health_check_method: failure_reason
|
||||
health_check_id: 00000000-1111-2222-3333-12345678abcd
|
||||
register: health_check_failure_reason
|
||||
|
||||
- name: Retrieve reusable delegation set details
|
||||
route53_facts:
|
||||
query: reusable_delegation_set
|
||||
delegation_set_id: delegation id
|
||||
register: delegation_sets
|
||||
|
||||
'''
|
||||
try:
|
||||
import boto
|
||||
import botocore
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
try:
|
||||
import boto3
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
|
||||
|
||||
|
||||
def get_hosted_zone(client, module):
|
||||
params = dict()
|
||||
|
||||
if module.params.get('hosted_zone_id'):
|
||||
params['Id'] = module.params.get('hosted_zone_id')
|
||||
else:
|
||||
module.fail_json(msg="Hosted Zone Id is required")
|
||||
|
||||
results = client.get_hosted_zone(**params)
|
||||
return results
|
||||
|
||||
|
||||
def reusable_delegation_set_details(client, module):
|
||||
params = dict()
|
||||
if not module.params.get('delegation_set_id'):
|
||||
if module.params.get('max_items'):
|
||||
params['MaxItems'] = module.params.get('max_items')
|
||||
|
||||
if module.params.get('next_marker'):
|
||||
params['Marker'] = module.params.get('next_marker')
|
||||
|
||||
results = client.list_reusable_delegation_sets(**params)
|
||||
else:
|
||||
params['DelegationSetId'] = module.params.get('delegation_set_id')
|
||||
results = client.get_reusable_delegation_set(**params)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def list_hosted_zones(client, module):
|
||||
params = dict()
|
||||
|
||||
if module.params.get('max_items'):
|
||||
params['MaxItems'] = module.params.get('max_items')
|
||||
|
||||
if module.params.get('next_marker'):
|
||||
params['Marker'] = module.params.get('next_marker')
|
||||
|
||||
if module.params.get('delegation_set_id'):
|
||||
params['DelegationSetId'] = module.params.get('delegation_set_id')
|
||||
|
||||
results = client.list_hosted_zones(**params)
|
||||
return results
|
||||
|
||||
|
||||
def list_hosted_zones_by_name(client, module):
|
||||
params = dict()
|
||||
|
||||
if module.params.get('hosted_zone_id'):
|
||||
params['HostedZoneId'] = module.params.get('hosted_zone_id')
|
||||
|
||||
if module.params.get('dns_name'):
|
||||
params['DNSName'] = module.params.get('dns_name')
|
||||
|
||||
if module.params.get('max_items'):
|
||||
params['MaxItems'] = module.params.get('max_items')
|
||||
|
||||
results = client.list_hosted_zones_by_name(**params)
|
||||
return results
|
||||
|
||||
|
||||
def change_details(client, module):
|
||||
params = dict()
|
||||
|
||||
if module.params.get('change_id'):
|
||||
params['Id'] = module.params.get('change_id')
|
||||
else:
|
||||
module.fail_json(msg="change_id is required")
|
||||
|
||||
results = client.get_change(**params)
|
||||
return results
|
||||
|
||||
|
||||
def checker_ip_range_details(client, module):
|
||||
results = client.get_checker_ip_ranges()
|
||||
return results
|
||||
|
||||
|
||||
def get_count(client, module):
|
||||
if module.params.get('query') == 'health_check':
|
||||
results = client.get_health_check_count()
|
||||
else:
|
||||
results = client.get_hosted_zone_count()
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def get_health_check(client, module):
|
||||
params = dict()
|
||||
|
||||
if not module.params.get('health_check_id'):
|
||||
module.fail_json(msg="health_check_id is required")
|
||||
else:
|
||||
params['HealthCheckId'] = module.params.get('health_check_id')
|
||||
|
||||
if module.params.get('health_check_method') == 'details':
|
||||
results = client.get_health_check(**params)
|
||||
elif module.params.get('health_check_method') == 'failure_reason':
|
||||
results = client.get_health_check_last_failure_reason(**params)
|
||||
elif module.params.get('health_check_method') == 'status':
|
||||
results = client.get_health_check_status(**params)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def get_resource_tags(client, module):
|
||||
params = dict()
|
||||
|
||||
if module.params.get('resource_id'):
|
||||
params['ResourceIds'] = module.params.get('resource_id')
|
||||
else:
|
||||
module.fail_json(msg="resource_id or resource_ids is required")
|
||||
|
||||
if module.params.get('query') == 'health_check':
|
||||
params['ResourceType'] = 'healthcheck'
|
||||
else:
|
||||
params['ResourceType'] = 'hostedzone'
|
||||
|
||||
results = client.list_tags_for_resources(**params)
|
||||
return results
|
||||
|
||||
|
||||
def list_health_checks(client, module):
|
||||
params = dict()
|
||||
|
||||
if module.params.get('max_items'):
|
||||
params['MaxItems'] = module.params.get('max_items')
|
||||
|
||||
if module.params.get('next_marker'):
|
||||
params['Marker'] = module.params.get('next_marker')
|
||||
|
||||
results = client.list_health_checks(**params)
|
||||
return results
|
||||
|
||||
|
||||
def record_sets_details(client, module):
|
||||
params = dict()
|
||||
|
||||
if module.params.get('hosted_zone_id'):
|
||||
params['HostedZoneId'] = module.params.get('hosted_zone_id')
|
||||
else:
|
||||
module.fail_json(msg="Hosted Zone Id is required")
|
||||
|
||||
if module.params.get('max_items'):
|
||||
params['MaxItems'] = module.params.get('max_items')
|
||||
|
||||
if module.params.get('start_record_name'):
|
||||
params['StartRecordName'] = module.params.get('start_record_name')
|
||||
|
||||
if module.params.get('type') and not module.params.get('start_record_name'):
|
||||
module.fail_json(msg="start_record_name must be specified if type is set")
|
||||
elif module.params.get('type'):
|
||||
params['StartRecordType'] = module.params.get('type')
|
||||
|
||||
results = client.list_resource_record_sets(**params)
|
||||
return results
|
||||
|
||||
|
||||
def health_check_details(client, module):
|
||||
health_check_invocations = {
|
||||
'list': list_health_checks,
|
||||
'details': get_health_check,
|
||||
'status': get_health_check,
|
||||
'failure_reason': get_health_check,
|
||||
'count': get_count,
|
||||
'tags': get_resource_tags,
|
||||
}
|
||||
|
||||
results = health_check_invocations[module.params.get('health_check_method')](client, module)
|
||||
return results
|
||||
|
||||
|
||||
def hosted_zone_details(client, module):
|
||||
hosted_zone_invocations = {
|
||||
'details': get_hosted_zone,
|
||||
'list': list_hosted_zones,
|
||||
'list_by_name': list_hosted_zones_by_name,
|
||||
'count': get_count,
|
||||
'tags': get_resource_tags,
|
||||
}
|
||||
|
||||
results = hosted_zone_invocations[module.params.get('hosted_zone_method')](client, module)
|
||||
return results
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
query=dict(choices=[
|
||||
'change',
|
||||
'checker_ip_range',
|
||||
'health_check',
|
||||
'hosted_zone',
|
||||
'record_sets',
|
||||
'reusable_delegation_set',
|
||||
], required=True),
|
||||
change_id=dict(),
|
||||
hosted_zone_id=dict(),
|
||||
max_items=dict(type='str'),
|
||||
next_marker=dict(),
|
||||
delegation_set_id=dict(),
|
||||
start_record_name=dict(),
|
||||
type=dict(choices=[
|
||||
'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS'
|
||||
]),
|
||||
dns_name=dict(),
|
||||
resource_id=dict(type='list', aliases=['resource_ids']),
|
||||
health_check_id=dict(),
|
||||
hosted_zone_method=dict(choices=[
|
||||
'details',
|
||||
'list',
|
||||
'list_by_name',
|
||||
'count',
|
||||
'tags'
|
||||
], default='list'),
|
||||
health_check_method=dict(choices=[
|
||||
'list',
|
||||
'details',
|
||||
'status',
|
||||
'failure_reason',
|
||||
'count',
|
||||
'tags',
|
||||
], default='list'),
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
mutually_exclusive=[
|
||||
['hosted_zone_method', 'health_check_method'],
|
||||
],
|
||||
)
|
||||
|
||||
# Validate Requirements
|
||||
if not (HAS_BOTO or HAS_BOTO3):
|
||||
module.fail_json(msg='json and boto/boto3 is required.')
|
||||
|
||||
try:
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
|
||||
route53 = boto3_conn(module, conn_type='client', resource='route53', region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||
except boto.exception.NoAuthHandlerFound as e:
|
||||
module.fail_json(msg="Can't authorize connection - %s " % str(e))
|
||||
|
||||
invocations = {
|
||||
'change': change_details,
|
||||
'checker_ip_range': checker_ip_range_details,
|
||||
'health_check': health_check_details,
|
||||
'hosted_zone': hosted_zone_details,
|
||||
'record_sets': record_sets_details,
|
||||
'reusable_delegation_set': reusable_delegation_set_details,
|
||||
}
|
||||
results = invocations[module.params.get('query')](route53, module)
|
||||
|
||||
module.exit_json(**results)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
364
lib/ansible/modules/cloud/amazon/route53_health_check.py
Normal file
364
lib/ansible/modules/cloud/amazon/route53_health_check.py
Normal file
@@ -0,0 +1,364 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['stableinterface'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: route53_health_check
|
||||
short_description: add or delete health-checks in Amazons Route53 DNS service
|
||||
description:
|
||||
- Creates and deletes DNS Health checks in Amazons Route53 service
|
||||
- Only the port, resource_path, string_match and request_interval are
|
||||
considered when updating existing health-checks.
|
||||
version_added: "2.0"
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Specifies the action to take.
|
||||
required: true
|
||||
choices: [ 'present', 'absent' ]
|
||||
ip_address:
|
||||
description:
|
||||
- IP address of the end-point to check. Either this or `fqdn` has to be
|
||||
provided.
|
||||
required: false
|
||||
default: null
|
||||
port:
|
||||
description:
|
||||
- The port on the endpoint on which you want Amazon Route 53 to perform
|
||||
health checks. Required for TCP checks.
|
||||
required: false
|
||||
default: null
|
||||
type:
|
||||
description:
|
||||
- The type of health check that you want to create, which indicates how
|
||||
Amazon Route 53 determines whether an endpoint is healthy.
|
||||
required: true
|
||||
choices: [ 'HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP' ]
|
||||
resource_path:
|
||||
description:
|
||||
- The path that you want Amazon Route 53 to request when performing
|
||||
health checks. The path can be any value for which your endpoint will
|
||||
return an HTTP status code of 2xx or 3xx when the endpoint is healthy,
|
||||
for example the file /docs/route53-health-check.html.
|
||||
- Required for all checks except TCP.
|
||||
- The path must begin with a /
|
||||
- Maximum 255 characters.
|
||||
required: false
|
||||
default: null
|
||||
fqdn:
|
||||
description:
|
||||
- Domain name of the endpoint to check. Either this or `ip_address` has
|
||||
to be provided. When both are given the `fqdn` is used in the `Host:`
|
||||
header of the HTTP request.
|
||||
required: false
|
||||
string_match:
|
||||
description:
|
||||
- If the check type is HTTP_STR_MATCH or HTTP_STR_MATCH, the string
|
||||
that you want Amazon Route 53 to search for in the response body from
|
||||
the specified resource. If the string appears in the first 5120 bytes
|
||||
of the response body, Amazon Route 53 considers the resource healthy.
|
||||
required: false
|
||||
default: null
|
||||
request_interval:
|
||||
description:
|
||||
- The number of seconds between the time that Amazon Route 53 gets a
|
||||
response from your endpoint and the time that it sends the next
|
||||
health-check request.
|
||||
required: true
|
||||
default: 30
|
||||
choices: [ 10, 30 ]
|
||||
failure_threshold:
|
||||
description:
|
||||
- The number of consecutive health checks that an endpoint must pass or
|
||||
fail for Amazon Route 53 to change the current status of the endpoint
|
||||
from unhealthy to healthy or vice versa.
|
||||
required: true
|
||||
default: 3
|
||||
choices: [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ]
|
||||
author: "zimbatm (@zimbatm)"
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Create a health-check for host1.example.com and use it in record
|
||||
- route53_health_check:
|
||||
state: present
|
||||
fqdn: host1.example.com
|
||||
type: HTTP_STR_MATCH
|
||||
resource_path: /
|
||||
string_match: "Hello"
|
||||
request_interval: 10
|
||||
failure_threshold: 2
|
||||
register: my_health_check
|
||||
|
||||
- route53:
|
||||
action: create
|
||||
zone: "example.com"
|
||||
type: CNAME
|
||||
record: "www.example.com"
|
||||
value: host1.example.com
|
||||
ttl: 30
|
||||
# Routing policy
|
||||
identifier: "host1@www"
|
||||
weight: 100
|
||||
health_check: "{{ my_health_check.health_check.id }}"
|
||||
|
||||
# Delete health-check
|
||||
- route53_health_check:
|
||||
state: absent
|
||||
fqdn: host1.example.com
|
||||
|
||||
'''
|
||||
|
||||
import uuid
|
||||
|
||||
try:
|
||||
import boto
|
||||
import boto.ec2
|
||||
from boto import route53
|
||||
from boto.route53 import Route53Connection, exception
|
||||
from boto.route53.healthcheck import HealthCheck
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info
|
||||
|
||||
|
||||
# Things that can't get changed:
|
||||
# protocol
|
||||
# ip_address or domain
|
||||
# request_interval
|
||||
# string_match if not previously enabled
|
||||
def find_health_check(conn, wanted):
|
||||
"""Searches for health checks that have the exact same set of immutable values"""
|
||||
for check in conn.get_list_health_checks().HealthChecks:
|
||||
config = check.HealthCheckConfig
|
||||
if config.get('IPAddress') == wanted.ip_addr and config.get('FullyQualifiedDomainName') == wanted.fqdn and config.get('Type') == wanted.hc_type and config.get('RequestInterval') == str(wanted.request_interval):
|
||||
return check
|
||||
return None
|
||||
|
||||
def to_health_check(config):
|
||||
return HealthCheck(
|
||||
config.get('IPAddress'),
|
||||
config.get('Port'),
|
||||
config.get('Type'),
|
||||
config.get('ResourcePath'),
|
||||
fqdn=config.get('FullyQualifiedDomainName'),
|
||||
string_match=config.get('SearchString'),
|
||||
request_interval=int(config.get('RequestInterval')),
|
||||
failure_threshold=int(config.get('FailureThreshold')),
|
||||
)
|
||||
|
||||
def health_check_diff(a, b):
|
||||
a = a.__dict__
|
||||
b = b.__dict__
|
||||
if a == b:
|
||||
return {}
|
||||
diff = {}
|
||||
for key in set(a.keys()) | set(b.keys()):
|
||||
if a.get(key) != b.get(key):
|
||||
diff[key] = b.get(key)
|
||||
return diff
|
||||
|
||||
def to_template_params(health_check):
|
||||
params = {
|
||||
'ip_addr_part': '',
|
||||
'port': health_check.port,
|
||||
'type': health_check.hc_type,
|
||||
'resource_path_part': '',
|
||||
'fqdn_part': '',
|
||||
'string_match_part': '',
|
||||
'request_interval': health_check.request_interval,
|
||||
'failure_threshold': health_check.failure_threshold,
|
||||
}
|
||||
if health_check.ip_addr:
|
||||
params['ip_addr_part'] = HealthCheck.XMLIpAddrPart % {'ip_addr': health_check.ip_addr}
|
||||
if health_check.resource_path:
|
||||
params['resource_path_part'] = XMLResourcePathPart % {'resource_path': health_check.resource_path}
|
||||
if health_check.fqdn:
|
||||
params['fqdn_part'] = HealthCheck.XMLFQDNPart % {'fqdn': health_check.fqdn}
|
||||
if health_check.string_match:
|
||||
params['string_match_part'] = HealthCheck.XMLStringMatchPart % {'string_match': health_check.string_match}
|
||||
return params
|
||||
|
||||
XMLResourcePathPart = """<ResourcePath>%(resource_path)s</ResourcePath>"""
|
||||
|
||||
POSTXMLBody = """
|
||||
<CreateHealthCheckRequest xmlns="%(xmlns)s">
|
||||
<CallerReference>%(caller_ref)s</CallerReference>
|
||||
<HealthCheckConfig>
|
||||
%(ip_addr_part)s
|
||||
<Port>%(port)s</Port>
|
||||
<Type>%(type)s</Type>
|
||||
%(resource_path_part)s
|
||||
%(fqdn_part)s
|
||||
%(string_match_part)s
|
||||
<RequestInterval>%(request_interval)s</RequestInterval>
|
||||
<FailureThreshold>%(failure_threshold)s</FailureThreshold>
|
||||
</HealthCheckConfig>
|
||||
</CreateHealthCheckRequest>
|
||||
"""
|
||||
|
||||
UPDATEHCXMLBody = """
|
||||
<UpdateHealthCheckRequest xmlns="%(xmlns)s">
|
||||
<HealthCheckVersion>%(health_check_version)s</HealthCheckVersion>
|
||||
%(ip_addr_part)s
|
||||
<Port>%(port)s</Port>
|
||||
%(resource_path_part)s
|
||||
%(fqdn_part)s
|
||||
%(string_match_part)s
|
||||
<FailureThreshold>%(failure_threshold)i</FailureThreshold>
|
||||
</UpdateHealthCheckRequest>
|
||||
"""
|
||||
|
||||
def create_health_check(conn, health_check, caller_ref = None):
|
||||
if caller_ref is None:
|
||||
caller_ref = str(uuid.uuid4())
|
||||
uri = '/%s/healthcheck' % conn.Version
|
||||
params = to_template_params(health_check)
|
||||
params.update(xmlns=conn.XMLNameSpace, caller_ref=caller_ref)
|
||||
|
||||
xml_body = POSTXMLBody % params
|
||||
response = conn.make_request('POST', uri, {'Content-Type': 'text/xml'}, xml_body)
|
||||
body = response.read()
|
||||
boto.log.debug(body)
|
||||
if response.status == 201:
|
||||
e = boto.jsonresponse.Element()
|
||||
h = boto.jsonresponse.XmlHandler(e, None)
|
||||
h.parse(body)
|
||||
return e
|
||||
else:
|
||||
raise exception.DNSServerError(response.status, response.reason, body)
|
||||
|
||||
def update_health_check(conn, health_check_id, health_check_version, health_check):
|
||||
uri = '/%s/healthcheck/%s' % (conn.Version, health_check_id)
|
||||
params = to_template_params(health_check)
|
||||
params.update(
|
||||
xmlns=conn.XMLNameSpace,
|
||||
health_check_version=health_check_version,
|
||||
)
|
||||
xml_body = UPDATEHCXMLBody % params
|
||||
response = conn.make_request('POST', uri, {'Content-Type': 'text/xml'}, xml_body)
|
||||
body = response.read()
|
||||
boto.log.debug(body)
|
||||
if response.status not in (200, 204):
|
||||
raise exception.DNSServerError(response.status,
|
||||
response.reason,
|
||||
body)
|
||||
e = boto.jsonresponse.Element()
|
||||
h = boto.jsonresponse.XmlHandler(e, None)
|
||||
h.parse(body)
|
||||
return e
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
state = dict(choices=['present', 'absent'], default='present'),
|
||||
ip_address = dict(),
|
||||
port = dict(type='int'),
|
||||
type = dict(required=True, choices=['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP']),
|
||||
resource_path = dict(),
|
||||
fqdn = dict(),
|
||||
string_match = dict(),
|
||||
request_interval = dict(type='int', choices=[10, 30], default=30),
|
||||
failure_threshold = dict(type='int', choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], default=3),
|
||||
)
|
||||
)
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto 2.27.0+ required for this module')
|
||||
|
||||
state_in = module.params.get('state')
|
||||
ip_addr_in = module.params.get('ip_address')
|
||||
port_in = module.params.get('port')
|
||||
type_in = module.params.get('type')
|
||||
resource_path_in = module.params.get('resource_path')
|
||||
fqdn_in = module.params.get('fqdn')
|
||||
string_match_in = module.params.get('string_match')
|
||||
request_interval_in = module.params.get('request_interval')
|
||||
failure_threshold_in = module.params.get('failure_threshold')
|
||||
|
||||
if ip_addr_in is None and fqdn_in is None:
|
||||
module.fail_json(msg="parameter 'ip_address' or 'fqdn' is required")
|
||||
|
||||
# Default port
|
||||
if port_in is None:
|
||||
if type_in in ['HTTP', 'HTTP_STR_MATCH']:
|
||||
port_in = 80
|
||||
elif type_in in ['HTTPS', 'HTTPS_STR_MATCH']:
|
||||
port_in = 443
|
||||
else:
|
||||
module.fail_json(msg="parameter 'port' is required for 'type' TCP")
|
||||
|
||||
# string_match in relation with type
|
||||
if type_in in ['HTTP_STR_MATCH', 'HTTPS_STR_MATCH']:
|
||||
if string_match_in is None:
|
||||
module.fail_json(msg="parameter 'string_match' is required for the HTTP(S)_STR_MATCH types")
|
||||
elif len(string_match_in) > 255:
|
||||
module.fail_json(msg="parameter 'string_match' is limited to 255 characters max")
|
||||
elif string_match_in:
|
||||
module.fail_json(msg="parameter 'string_match' argument is only for the HTTP(S)_STR_MATCH types")
|
||||
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
|
||||
# connect to the route53 endpoint
|
||||
try:
|
||||
conn = Route53Connection(**aws_connect_kwargs)
|
||||
except boto.exception.BotoServerError as e:
|
||||
module.fail_json(msg = e.error_message)
|
||||
|
||||
changed = False
|
||||
action = None
|
||||
check_id = None
|
||||
wanted_config = HealthCheck(ip_addr_in, port_in, type_in, resource_path_in, fqdn_in, string_match_in, request_interval_in, failure_threshold_in)
|
||||
existing_check = find_health_check(conn, wanted_config)
|
||||
if existing_check:
|
||||
check_id = existing_check.Id
|
||||
existing_config = to_health_check(existing_check.HealthCheckConfig)
|
||||
|
||||
if state_in == 'present':
|
||||
if existing_check is None:
|
||||
action = "create"
|
||||
check_id = create_health_check(conn, wanted_config).HealthCheck.Id
|
||||
changed = True
|
||||
else:
|
||||
diff = health_check_diff(existing_config, wanted_config)
|
||||
if not diff:
|
||||
action = "update"
|
||||
update_health_check(conn, existing_check.Id, int(existing_check.HealthCheckVersion), wanted_config)
|
||||
changed = True
|
||||
elif state_in == 'absent':
|
||||
if check_id:
|
||||
action = "delete"
|
||||
conn.delete_health_check(check_id)
|
||||
changed = True
|
||||
else:
|
||||
module.fail_json(msg = "Logic Error: Unknown state")
|
||||
|
||||
module.exit_json(changed=changed, health_check=dict(id=check_id), action=action)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
236
lib/ansible/modules/cloud/amazon/route53_zone.py
Normal file
236
lib/ansible/modules/cloud/amazon/route53_zone.py
Normal file
@@ -0,0 +1,236 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['stableinterface'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: route53_zone
|
||||
short_description: add or delete Route53 zones
|
||||
description:
|
||||
- Creates and deletes Route53 private and public zones
|
||||
version_added: "2.0"
|
||||
options:
|
||||
zone:
|
||||
description:
|
||||
- "The DNS zone record (eg: foo.com.)"
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- whether or not the zone should exist or not
|
||||
required: false
|
||||
default: true
|
||||
choices: [ "present", "absent" ]
|
||||
vpc_id:
|
||||
description:
|
||||
- The VPC ID the zone should be a part of (if this is going to be a private zone)
|
||||
required: false
|
||||
default: null
|
||||
vpc_region:
|
||||
description:
|
||||
- The VPC Region the zone should be a part of (if this is going to be a private zone)
|
||||
required: false
|
||||
default: null
|
||||
comment:
|
||||
description:
|
||||
- Comment associated with the zone
|
||||
required: false
|
||||
default: ''
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
author: "Christopher Troup (@minichate)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# create a public zone
|
||||
- route53_zone:
|
||||
zone: example.com
|
||||
state: present
|
||||
comment: this is an example
|
||||
|
||||
# delete a public zone
|
||||
- route53_zone:
|
||||
zone: example.com
|
||||
state: absent
|
||||
|
||||
- name: private zone for devel
|
||||
route53_zone:
|
||||
zone: devel.example.com
|
||||
state: present
|
||||
vpc_id: '{{ myvpc_id }}'
|
||||
comment: developer domain
|
||||
|
||||
# more complex example
|
||||
- name: register output after creating zone in parameterized region
|
||||
route53_zone:
|
||||
vpc_id: '{{ vpc.vpc_id }}'
|
||||
vpc_region: '{{ ec2_region }}'
|
||||
zone: '{{ vpc_dns_zone }}'
|
||||
state: present
|
||||
register: zone_out
|
||||
|
||||
- debug:
|
||||
var: zone_out
|
||||
'''
|
||||
|
||||
RETURN='''
|
||||
comment:
|
||||
description: optional hosted zone comment
|
||||
returned: when hosted zone exists
|
||||
type: string
|
||||
sample: "Private zone"
|
||||
name:
|
||||
description: hosted zone name
|
||||
returned: when hosted zone exists
|
||||
type: string
|
||||
sample: "private.local."
|
||||
private_zone:
|
||||
description: whether hosted zone is private or public
|
||||
returned: when hosted zone exists
|
||||
type: bool
|
||||
sample: true
|
||||
vpc_id:
|
||||
description: id of vpc attached to private hosted zone
|
||||
returned: for private hosted zone
|
||||
type: string
|
||||
sample: "vpc-1d36c84f"
|
||||
vpc_region:
|
||||
description: region of vpc attached to private hosted zone
|
||||
returned: for private hosted zone
|
||||
type: string
|
||||
sample: "eu-west-1"
|
||||
zone_id:
|
||||
description: hosted zone id
|
||||
returned: when hosted zone exists
|
||||
type: string
|
||||
sample: "Z6JQG9820BEFMW"
|
||||
'''
|
||||
|
||||
try:
|
||||
import boto
|
||||
import boto.ec2
|
||||
from boto import route53
|
||||
from boto.route53 import Route53Connection
|
||||
from boto.route53.zone import Zone
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
zone=dict(required=True),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
vpc_id=dict(default=None),
|
||||
vpc_region=dict(default=None),
|
||||
comment=dict(default='')))
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
|
||||
zone_in = module.params.get('zone').lower()
|
||||
state = module.params.get('state').lower()
|
||||
vpc_id = module.params.get('vpc_id')
|
||||
vpc_region = module.params.get('vpc_region')
|
||||
comment = module.params.get('comment')
|
||||
|
||||
if zone_in[-1:] != '.':
|
||||
zone_in += "."
|
||||
|
||||
private_zone = vpc_id is not None and vpc_region is not None
|
||||
|
||||
_, _, aws_connect_kwargs = get_aws_connection_info(module)
|
||||
|
||||
# connect to the route53 endpoint
|
||||
try:
|
||||
conn = Route53Connection(**aws_connect_kwargs)
|
||||
except boto.exception.BotoServerError as e:
|
||||
module.fail_json(msg=e.error_message)
|
||||
|
||||
results = conn.get_all_hosted_zones()
|
||||
zones = {}
|
||||
|
||||
for r53zone in results['ListHostedZonesResponse']['HostedZones']:
|
||||
zone_id = r53zone['Id'].replace('/hostedzone/', '')
|
||||
zone_details = conn.get_hosted_zone(zone_id)['GetHostedZoneResponse']
|
||||
if vpc_id and 'VPCs' in zone_details:
|
||||
# this is to deal with this boto bug: https://github.com/boto/boto/pull/2882
|
||||
if isinstance(zone_details['VPCs'], dict):
|
||||
if zone_details['VPCs']['VPC']['VPCId'] == vpc_id:
|
||||
zones[r53zone['Name']] = zone_id
|
||||
else: # Forward compatibility for when boto fixes that bug
|
||||
if vpc_id in [v['VPCId'] for v in zone_details['VPCs']]:
|
||||
zones[r53zone['Name']] = zone_id
|
||||
else:
|
||||
zones[r53zone['Name']] = zone_id
|
||||
|
||||
record = {
|
||||
'private_zone': private_zone,
|
||||
'vpc_id': vpc_id,
|
||||
'vpc_region': vpc_region,
|
||||
'comment': comment,
|
||||
}
|
||||
|
||||
if state == 'present' and zone_in in zones:
|
||||
if private_zone:
|
||||
details = conn.get_hosted_zone(zones[zone_in])
|
||||
|
||||
if 'VPCs' not in details['GetHostedZoneResponse']:
|
||||
module.fail_json(
|
||||
msg="Can't change VPC from public to private"
|
||||
)
|
||||
|
||||
vpc_details = details['GetHostedZoneResponse']['VPCs']['VPC']
|
||||
current_vpc_id = vpc_details['VPCId']
|
||||
current_vpc_region = vpc_details['VPCRegion']
|
||||
|
||||
if current_vpc_id != vpc_id:
|
||||
module.fail_json(
|
||||
msg="Can't change VPC ID once a zone has been created"
|
||||
)
|
||||
if current_vpc_region != vpc_region:
|
||||
module.fail_json(
|
||||
msg="Can't change VPC Region once a zone has been created"
|
||||
)
|
||||
|
||||
record['zone_id'] = zones[zone_in]
|
||||
record['name'] = zone_in
|
||||
module.exit_json(changed=False, set=record)
|
||||
|
||||
elif state == 'present':
|
||||
result = conn.create_hosted_zone(zone_in, **record)
|
||||
hosted_zone = result['CreateHostedZoneResponse']['HostedZone']
|
||||
zone_id = hosted_zone['Id'].replace('/hostedzone/', '')
|
||||
record['zone_id'] = zone_id
|
||||
record['name'] = zone_in
|
||||
module.exit_json(changed=True, set=record)
|
||||
|
||||
elif state == 'absent' and zone_in in zones:
|
||||
conn.delete_hosted_zone(zones[zone_in])
|
||||
module.exit_json(changed=True)
|
||||
|
||||
elif state == 'absent':
|
||||
module.exit_json(changed=False)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
437
lib/ansible/modules/cloud/amazon/s3_bucket.py
Normal file
437
lib/ansible/modules/cloud/amazon/s3_bucket.py
Normal file
@@ -0,0 +1,437 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# This is a free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This Ansible library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['stableinterface'],
|
||||
'supported_by': 'committer',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: s3_bucket
|
||||
short_description: Manage S3 buckets in AWS, Ceph, Walrus and FakeS3
|
||||
description:
|
||||
- Manage S3 buckets in AWS, Ceph, Walrus and FakeS3
|
||||
version_added: "2.0"
|
||||
author: "Rob White (@wimnat)"
|
||||
options:
|
||||
force:
|
||||
description:
|
||||
- When trying to delete a bucket, delete all keys in the bucket first (an s3 bucket must be empty for a successful deletion)
|
||||
required: false
|
||||
default: no
|
||||
choices: [ 'yes', 'no' ]
|
||||
name:
|
||||
description:
|
||||
- Name of the s3 bucket
|
||||
required: true
|
||||
default: null
|
||||
policy:
|
||||
description:
|
||||
- The JSON policy as a string.
|
||||
required: false
|
||||
default: null
|
||||
s3_url:
|
||||
description:
|
||||
- S3 URL endpoint for usage with Ceph, Eucalypus, fakes3, etc. Otherwise assumes AWS
|
||||
default: null
|
||||
aliases: [ S3_URL ]
|
||||
ceph:
|
||||
description:
|
||||
- Enable API compatibility with Ceph. It takes into account the S3 API subset working with Ceph in order to provide the same module behaviour where possible.
|
||||
version_added: "2.2"
|
||||
requester_pays:
|
||||
description:
|
||||
- With Requester Pays buckets, the requester instead of the bucket owner pays the cost of the request and the data download from the bucket.
|
||||
required: false
|
||||
default: no
|
||||
choices: [ 'yes', 'no' ]
|
||||
state:
|
||||
description:
|
||||
- Create or remove the s3 bucket
|
||||
required: false
|
||||
default: present
|
||||
choices: [ 'present', 'absent' ]
|
||||
tags:
|
||||
description:
|
||||
- tags dict to apply to bucket
|
||||
required: false
|
||||
default: null
|
||||
versioning:
|
||||
description:
|
||||
- Whether versioning is enabled or disabled (note that once versioning is enabled, it can only be suspended)
|
||||
required: false
|
||||
default: null
|
||||
choices: [ 'yes', 'no' ]
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
# Create a simple s3 bucket
|
||||
- s3_bucket:
|
||||
name: mys3bucket
|
||||
|
||||
# Create a simple s3 bucket on Ceph Rados Gateway
|
||||
- s3_bucket:
|
||||
name: mys3bucket
|
||||
s3_url: http://your-ceph-rados-gateway-server.xxx
|
||||
ceph: true
|
||||
|
||||
# Remove an s3 bucket and any keys it contains
|
||||
- s3_bucket:
|
||||
name: mys3bucket
|
||||
state: absent
|
||||
force: yes
|
||||
|
||||
# Create a bucket, add a policy from a file, enable requester pays, enable versioning and tag
|
||||
- s3_bucket:
|
||||
name: mys3bucket
|
||||
policy: "{{ lookup('file','policy.json') }}"
|
||||
requester_pays: yes
|
||||
versioning: yes
|
||||
tags:
|
||||
example: tag1
|
||||
another: tag2
|
||||
|
||||
'''
|
||||
|
||||
import os
|
||||
import xml.etree.ElementTree as ET
|
||||
import urlparse
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
try:
|
||||
import boto.ec2
|
||||
from boto.s3.connection import OrdinaryCallingFormat, Location
|
||||
from boto.s3.tagging import Tags, TagSet
|
||||
from boto.exception import BotoServerError, S3CreateError, S3ResponseError
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
|
||||
def get_request_payment_status(bucket):
|
||||
|
||||
response = bucket.get_request_payment()
|
||||
root = ET.fromstring(response)
|
||||
for message in root.findall('.//{http://s3.amazonaws.com/doc/2006-03-01/}Payer'):
|
||||
payer = message.text
|
||||
|
||||
return (payer != "BucketOwner")
|
||||
|
||||
|
||||
def create_tags_container(tags):
|
||||
|
||||
tag_set = TagSet()
|
||||
tags_obj = Tags()
|
||||
for key, val in tags.iteritems():
|
||||
tag_set.add_tag(key, val)
|
||||
|
||||
tags_obj.add_tag_set(tag_set)
|
||||
return tags_obj
|
||||
|
||||
|
||||
def _create_or_update_bucket(connection, module, location):
|
||||
|
||||
policy = module.params.get("policy")
|
||||
name = module.params.get("name")
|
||||
requester_pays = module.params.get("requester_pays")
|
||||
tags = module.params.get("tags")
|
||||
versioning = module.params.get("versioning")
|
||||
changed = False
|
||||
|
||||
try:
|
||||
bucket = connection.get_bucket(name)
|
||||
except S3ResponseError as e:
|
||||
try:
|
||||
bucket = connection.create_bucket(name, location=location)
|
||||
changed = True
|
||||
except S3CreateError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
# Versioning
|
||||
versioning_status = bucket.get_versioning_status()
|
||||
if versioning_status:
|
||||
if versioning is not None:
|
||||
if versioning and versioning_status['Versioning'] != "Enabled":
|
||||
try:
|
||||
bucket.configure_versioning(versioning)
|
||||
changed = True
|
||||
versioning_status = bucket.get_versioning_status()
|
||||
except S3ResponseError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
elif not versioning and versioning_status['Versioning'] != "Enabled":
|
||||
try:
|
||||
bucket.configure_versioning(versioning)
|
||||
changed = True
|
||||
versioning_status = bucket.get_versioning_status()
|
||||
except S3ResponseError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
# Requester pays
|
||||
requester_pays_status = get_request_payment_status(bucket)
|
||||
if requester_pays_status != requester_pays:
|
||||
if requester_pays:
|
||||
payer='Requester'
|
||||
else:
|
||||
payer='BucketOwner'
|
||||
bucket.set_request_payment(payer=payer)
|
||||
changed = True
|
||||
requester_pays_status = get_request_payment_status(bucket)
|
||||
|
||||
# Policy
|
||||
try:
|
||||
current_policy = json.loads(bucket.get_policy())
|
||||
except S3ResponseError as e:
|
||||
if e.error_code == "NoSuchBucketPolicy":
|
||||
current_policy = {}
|
||||
else:
|
||||
module.fail_json(msg=e.message)
|
||||
if policy is not None:
|
||||
if isinstance(policy, basestring):
|
||||
policy = json.loads(policy)
|
||||
|
||||
if not policy:
|
||||
bucket.delete_policy()
|
||||
# only show changed if there was already a policy
|
||||
changed = bool(current_policy)
|
||||
|
||||
elif current_policy != policy:
|
||||
try:
|
||||
bucket.set_policy(json.dumps(policy))
|
||||
changed = True
|
||||
current_policy = json.loads(bucket.get_policy())
|
||||
except S3ResponseError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
# Tags
|
||||
try:
|
||||
current_tags = bucket.get_tags()
|
||||
except S3ResponseError as e:
|
||||
if e.error_code == "NoSuchTagSet":
|
||||
current_tags = None
|
||||
else:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
if current_tags is None:
|
||||
current_tags_dict = {}
|
||||
else:
|
||||
current_tags_dict = dict((t.key, t.value) for t in current_tags[0])
|
||||
|
||||
if tags is not None:
|
||||
if current_tags_dict != tags:
|
||||
try:
|
||||
if tags:
|
||||
bucket.set_tags(create_tags_container(tags))
|
||||
else:
|
||||
bucket.delete_tags()
|
||||
current_tags_dict = tags
|
||||
changed = True
|
||||
except S3ResponseError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
module.exit_json(changed=changed, name=bucket.name, versioning=versioning_status, requester_pays=requester_pays_status, policy=current_policy, tags=current_tags_dict)
|
||||
|
||||
|
||||
def _destroy_bucket(connection, module):
|
||||
|
||||
force = module.params.get("force")
|
||||
name = module.params.get("name")
|
||||
changed = False
|
||||
|
||||
try:
|
||||
bucket = connection.get_bucket(name)
|
||||
except S3ResponseError as e:
|
||||
if e.error_code != "NoSuchBucket":
|
||||
module.fail_json(msg=e.message)
|
||||
else:
|
||||
# Bucket already absent
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
if force:
|
||||
try:
|
||||
# Empty the bucket
|
||||
for key in bucket.list():
|
||||
key.delete()
|
||||
|
||||
except BotoServerError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
try:
|
||||
bucket = connection.delete_bucket(name)
|
||||
changed = True
|
||||
except S3ResponseError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
|
||||
def _create_or_update_bucket_ceph(connection, module, location):
|
||||
#TODO: add update
|
||||
|
||||
name = module.params.get("name")
|
||||
|
||||
changed = False
|
||||
|
||||
try:
|
||||
bucket = connection.get_bucket(name)
|
||||
except S3ResponseError as e:
|
||||
try:
|
||||
bucket = connection.create_bucket(name, location=location)
|
||||
changed = True
|
||||
except S3CreateError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
if bucket:
|
||||
module.exit_json(changed=changed)
|
||||
else:
|
||||
module.fail_json(msg='Unable to create bucket, no error from the API')
|
||||
|
||||
|
||||
def _destroy_bucket_ceph(connection, module):
|
||||
|
||||
_destroy_bucket(connection, module)
|
||||
|
||||
|
||||
def create_or_update_bucket(connection, module, location, flavour='aws'):
|
||||
if flavour == 'ceph':
|
||||
_create_or_update_bucket_ceph(connection, module, location)
|
||||
else:
|
||||
_create_or_update_bucket(connection, module, location)
|
||||
|
||||
|
||||
def destroy_bucket(connection, module, flavour='aws'):
|
||||
if flavour == 'ceph':
|
||||
_destroy_bucket_ceph(connection, module)
|
||||
else:
|
||||
_destroy_bucket(connection, module)
|
||||
|
||||
|
||||
def is_fakes3(s3_url):
|
||||
""" Return True if s3_url has scheme fakes3:// """
|
||||
if s3_url is not None:
|
||||
return urlparse.urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def is_walrus(s3_url):
|
||||
""" Return True if it's Walrus endpoint, not S3
|
||||
|
||||
We assume anything other than *.amazonaws.com is Walrus"""
|
||||
if s3_url is not None:
|
||||
o = urlparse.urlparse(s3_url)
|
||||
return not o.hostname.endswith('amazonaws.com')
|
||||
else:
|
||||
return False
|
||||
|
||||
def main():
|
||||
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
force=dict(required=False, default='no', type='bool'),
|
||||
policy=dict(required=False, default=None, type='json'),
|
||||
name=dict(required=True, type='str'),
|
||||
requester_pays=dict(default='no', type='bool'),
|
||||
s3_url=dict(aliases=['S3_URL'], type='str'),
|
||||
state=dict(default='present', type='str', choices=['present', 'absent']),
|
||||
tags=dict(required=False, default=None, type='dict'),
|
||||
versioning=dict(default=None, type='bool'),
|
||||
ceph=dict(default='no', type='bool')
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
|
||||
if region in ('us-east-1', '', None):
|
||||
# S3ism for the US Standard region
|
||||
location = Location.DEFAULT
|
||||
else:
|
||||
# Boto uses symbolic names for locations but region strings will
|
||||
# actually work fine for everything except us-east-1 (US Standard)
|
||||
location = region
|
||||
|
||||
s3_url = module.params.get('s3_url')
|
||||
|
||||
# allow eucarc environment variables to be used if ansible vars aren't set
|
||||
if not s3_url and 'S3_URL' in os.environ:
|
||||
s3_url = os.environ['S3_URL']
|
||||
|
||||
ceph = module.params.get('ceph')
|
||||
|
||||
if ceph and not s3_url:
|
||||
module.fail_json(msg='ceph flavour requires s3_url')
|
||||
|
||||
flavour = 'aws'
|
||||
|
||||
# Look at s3_url and tweak connection settings
|
||||
# if connecting to Walrus or fakes3
|
||||
try:
|
||||
if s3_url and ceph:
|
||||
ceph = urlparse.urlparse(s3_url)
|
||||
connection = boto.connect_s3(
|
||||
host=ceph.hostname,
|
||||
port=ceph.port,
|
||||
is_secure=ceph.scheme == 'https',
|
||||
calling_format=OrdinaryCallingFormat(),
|
||||
**aws_connect_params
|
||||
)
|
||||
flavour = 'ceph'
|
||||
elif is_fakes3(s3_url):
|
||||
fakes3 = urlparse.urlparse(s3_url)
|
||||
connection = S3Connection(
|
||||
is_secure=fakes3.scheme == 'fakes3s',
|
||||
host=fakes3.hostname,
|
||||
port=fakes3.port,
|
||||
calling_format=OrdinaryCallingFormat(),
|
||||
**aws_connect_params
|
||||
)
|
||||
elif is_walrus(s3_url):
|
||||
walrus = urlparse.urlparse(s3_url).hostname
|
||||
connection = boto.connect_walrus(walrus, **aws_connect_params)
|
||||
else:
|
||||
connection = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_params)
|
||||
# use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
|
||||
if connection is None:
|
||||
connection = boto.connect_s3(**aws_connect_params)
|
||||
|
||||
except boto.exception.NoAuthHandlerFound as e:
|
||||
module.fail_json(msg='No Authentication Handler found: %s ' % str(e))
|
||||
except Exception as e:
|
||||
module.fail_json(msg='Failed to connect to S3: %s' % str(e))
|
||||
|
||||
if connection is None: # this should never happen
|
||||
module.fail_json(msg ='Unknown error, failed to create s3 connection, no information from boto.')
|
||||
|
||||
state = module.params.get("state")
|
||||
|
||||
if state == 'present':
|
||||
create_or_update_bucket(connection, module, location)
|
||||
elif state == 'absent':
|
||||
destroy_bucket(connection, module, flavour=flavour)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
439
lib/ansible/modules/cloud/amazon/s3_lifecycle.py
Normal file
439
lib/ansible/modules/cloud/amazon/s3_lifecycle.py
Normal file
@@ -0,0 +1,439 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# This is a free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This Ansible library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['stableinterface'],
|
||||
'supported_by': 'committer',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: s3_lifecycle
|
||||
short_description: Manage s3 bucket lifecycle rules in AWS
|
||||
description:
|
||||
- Manage s3 bucket lifecycle rules in AWS
|
||||
version_added: "2.0"
|
||||
author: "Rob White (@wimnat)"
|
||||
notes:
|
||||
- If specifying expiration time as days then transition time must also be specified in days
|
||||
- If specifying expiration time as a date then transition time must also be specified as a date
|
||||
requirements:
|
||||
- python-dateutil
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- "Name of the s3 bucket"
|
||||
required: true
|
||||
expiration_date:
|
||||
description:
|
||||
- "Indicates the lifetime of the objects that are subject to the rule by the date they will expire. The value must be ISO-8601 format, the time must be midnight and a GMT timezone must be specified."
|
||||
required: false
|
||||
default: null
|
||||
expiration_days:
|
||||
description:
|
||||
- "Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer."
|
||||
required: false
|
||||
default: null
|
||||
prefix:
|
||||
description:
|
||||
- "Prefix identifying one or more objects to which the rule applies. If no prefix is specified, the rule will apply to the whole bucket."
|
||||
required: false
|
||||
default: null
|
||||
rule_id:
|
||||
description:
|
||||
- "Unique identifier for the rule. The value cannot be longer than 255 characters. A unique value for the rule will be generated if no value is provided."
|
||||
required: false
|
||||
default: null
|
||||
state:
|
||||
description:
|
||||
- "Create or remove the lifecycle rule"
|
||||
required: false
|
||||
default: present
|
||||
choices: [ 'present', 'absent' ]
|
||||
status:
|
||||
description:
|
||||
- "If 'enabled', the rule is currently being applied. If 'disabled', the rule is not currently being applied."
|
||||
required: false
|
||||
default: enabled
|
||||
choices: [ 'enabled', 'disabled' ]
|
||||
storage_class:
|
||||
description:
|
||||
- "The storage class to transition to. Currently there are two supported values - 'glacier' or 'standard_ia'."
|
||||
- "The 'standard_ia' class is only being available from Ansible version 2.2."
|
||||
required: false
|
||||
default: glacier
|
||||
choices: [ 'glacier', 'standard_ia']
|
||||
transition_date:
|
||||
description:
|
||||
- "Indicates the lifetime of the objects that are subject to the rule by the date they will transition to a different storage class. The value must be ISO-8601 format, the time must be midnight and a GMT timezone must be specified. If transition_days is not specified, this parameter is required."
|
||||
required: false
|
||||
default: null
|
||||
transition_days:
|
||||
description:
|
||||
- "Indicates when, in days, an object transitions to a different storage class. If transition_date is not specified, this parameter is required."
|
||||
required: false
|
||||
default: null
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
# Configure a lifecycle rule on a bucket to expire (delete) items with a prefix of /logs/ after 30 days
|
||||
- s3_lifecycle:
|
||||
name: mybucket
|
||||
expiration_days: 30
|
||||
prefix: /logs/
|
||||
status: enabled
|
||||
state: present
|
||||
|
||||
# Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier after 7 days and then delete after 90 days
|
||||
- s3_lifecycle:
|
||||
name: mybucket
|
||||
transition_days: 7
|
||||
expiration_days: 90
|
||||
prefix: /logs/
|
||||
status: enabled
|
||||
state: present
|
||||
|
||||
# Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier on 31 Dec 2020 and then delete on 31 Dec 2030. Note that midnight GMT must be specified.
|
||||
# Be sure to quote your date strings
|
||||
- s3_lifecycle:
|
||||
name: mybucket
|
||||
transition_date: "2020-12-30T00:00:00.000Z"
|
||||
expiration_date: "2030-12-30T00:00:00.000Z"
|
||||
prefix: /logs/
|
||||
status: enabled
|
||||
state: present
|
||||
|
||||
# Disable the rule created above
|
||||
- s3_lifecycle:
|
||||
name: mybucket
|
||||
prefix: /logs/
|
||||
status: disabled
|
||||
state: present
|
||||
|
||||
# Delete the lifecycle rule created above
|
||||
- s3_lifecycle:
|
||||
name: mybucket
|
||||
prefix: /logs/
|
||||
state: absent
|
||||
|
||||
# Configure a lifecycle rule to transition all backup files older than 31 days in /backups/ to standard infrequent access class.
|
||||
- s3_lifecycle:
|
||||
name: mybucket
|
||||
prefix: /backups/
|
||||
storage_class: standard_ia
|
||||
transition_days: 31
|
||||
state: present
|
||||
status: enabled
|
||||
|
||||
'''
|
||||
|
||||
import xml.etree.ElementTree as ET
|
||||
import copy
|
||||
import datetime
|
||||
|
||||
try:
|
||||
import dateutil.parser
|
||||
HAS_DATEUTIL = True
|
||||
except ImportError:
|
||||
HAS_DATEUTIL = False
|
||||
|
||||
try:
|
||||
import boto
|
||||
import boto.ec2
|
||||
from boto.s3.connection import OrdinaryCallingFormat, Location
|
||||
from boto.s3.lifecycle import Lifecycle, Rule, Expiration, Transition
|
||||
from boto.exception import BotoServerError, S3CreateError, S3ResponseError
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import AnsibleAWSError, ec2_argument_spec, get_aws_connection_info
|
||||
|
||||
def create_lifecycle_rule(connection, module):
|
||||
|
||||
name = module.params.get("name")
|
||||
expiration_date = module.params.get("expiration_date")
|
||||
expiration_days = module.params.get("expiration_days")
|
||||
prefix = module.params.get("prefix")
|
||||
rule_id = module.params.get("rule_id")
|
||||
status = module.params.get("status")
|
||||
storage_class = module.params.get("storage_class")
|
||||
transition_date = module.params.get("transition_date")
|
||||
transition_days = module.params.get("transition_days")
|
||||
changed = False
|
||||
|
||||
try:
|
||||
bucket = connection.get_bucket(name)
|
||||
except S3ResponseError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
# Get the bucket's current lifecycle rules
|
||||
try:
|
||||
current_lifecycle_obj = bucket.get_lifecycle_config()
|
||||
except S3ResponseError as e:
|
||||
if e.error_code == "NoSuchLifecycleConfiguration":
|
||||
current_lifecycle_obj = Lifecycle()
|
||||
else:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
# Create expiration
|
||||
if expiration_days is not None:
|
||||
expiration_obj = Expiration(days=expiration_days)
|
||||
elif expiration_date is not None:
|
||||
expiration_obj = Expiration(date=expiration_date)
|
||||
else:
|
||||
expiration_obj = None
|
||||
|
||||
# Create transition
|
||||
if transition_days is not None:
|
||||
transition_obj = Transition(days=transition_days, storage_class=storage_class.upper())
|
||||
elif transition_date is not None:
|
||||
transition_obj = Transition(date=transition_date, storage_class=storage_class.upper())
|
||||
else:
|
||||
transition_obj = None
|
||||
|
||||
# Create rule
|
||||
rule = Rule(rule_id, prefix, status.title(), expiration_obj, transition_obj)
|
||||
|
||||
# Create lifecycle
|
||||
lifecycle_obj = Lifecycle()
|
||||
|
||||
appended = False
|
||||
# If current_lifecycle_obj is not None then we have rules to compare, otherwise just add the rule
|
||||
if current_lifecycle_obj:
|
||||
# If rule ID exists, use that for comparison otherwise compare based on prefix
|
||||
for existing_rule in current_lifecycle_obj:
|
||||
if rule.id == existing_rule.id:
|
||||
if compare_rule(rule, existing_rule):
|
||||
lifecycle_obj.append(rule)
|
||||
appended = True
|
||||
else:
|
||||
lifecycle_obj.append(rule)
|
||||
changed = True
|
||||
appended = True
|
||||
elif rule.prefix == existing_rule.prefix:
|
||||
existing_rule.id = None
|
||||
if compare_rule(rule, existing_rule):
|
||||
lifecycle_obj.append(rule)
|
||||
appended = True
|
||||
else:
|
||||
lifecycle_obj.append(rule)
|
||||
changed = True
|
||||
appended = True
|
||||
else:
|
||||
lifecycle_obj.append(existing_rule)
|
||||
# If nothing appended then append now as the rule must not exist
|
||||
if not appended:
|
||||
lifecycle_obj.append(rule)
|
||||
changed = True
|
||||
else:
|
||||
lifecycle_obj.append(rule)
|
||||
changed = True
|
||||
|
||||
# Write lifecycle to bucket
|
||||
try:
|
||||
bucket.configure_lifecycle(lifecycle_obj)
|
||||
except S3ResponseError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
def compare_rule(rule_a, rule_b):
|
||||
|
||||
# Copy objects
|
||||
rule1 = copy.deepcopy(rule_a)
|
||||
rule2 = copy.deepcopy(rule_b)
|
||||
|
||||
# Delete Rule from Rule
|
||||
try:
|
||||
del rule1.Rule
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
try:
|
||||
del rule2.Rule
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
# Extract Expiration and Transition objects
|
||||
rule1_expiration = rule1.expiration
|
||||
rule1_transition = rule1.transition
|
||||
rule2_expiration = rule2.expiration
|
||||
rule2_transition = rule2.transition
|
||||
|
||||
# Delete the Expiration and Transition objects from the Rule objects
|
||||
del rule1.expiration
|
||||
del rule1.transition
|
||||
del rule2.expiration
|
||||
del rule2.transition
|
||||
|
||||
# Compare
|
||||
if rule1_transition is None:
|
||||
rule1_transition = Transition()
|
||||
if rule2_transition is None:
|
||||
rule2_transition = Transition()
|
||||
if rule1_expiration is None:
|
||||
rule1_expiration = Expiration()
|
||||
if rule2_expiration is None:
|
||||
rule2_expiration = Expiration()
|
||||
|
||||
if (rule1.__dict__ == rule2.__dict__) and (rule1_expiration.__dict__ == rule2_expiration.__dict__) and (rule1_transition.__dict__ == rule2_transition.__dict__):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def destroy_lifecycle_rule(connection, module):
|
||||
|
||||
name = module.params.get("name")
|
||||
prefix = module.params.get("prefix")
|
||||
rule_id = module.params.get("rule_id")
|
||||
changed = False
|
||||
|
||||
if prefix is None:
|
||||
prefix = ""
|
||||
|
||||
try:
|
||||
bucket = connection.get_bucket(name)
|
||||
except S3ResponseError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
# Get the bucket's current lifecycle rules
|
||||
try:
|
||||
current_lifecycle_obj = bucket.get_lifecycle_config()
|
||||
except S3ResponseError as e:
|
||||
if e.error_code == "NoSuchLifecycleConfiguration":
|
||||
module.exit_json(changed=changed)
|
||||
else:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
# Create lifecycle
|
||||
lifecycle_obj = Lifecycle()
|
||||
|
||||
# Check if rule exists
|
||||
# If an ID exists, use that otherwise compare based on prefix
|
||||
if rule_id is not None:
|
||||
for existing_rule in current_lifecycle_obj:
|
||||
if rule_id == existing_rule.id:
|
||||
# We're not keeping the rule (i.e. deleting) so mark as changed
|
||||
changed = True
|
||||
else:
|
||||
lifecycle_obj.append(existing_rule)
|
||||
else:
|
||||
for existing_rule in current_lifecycle_obj:
|
||||
if prefix == existing_rule.prefix:
|
||||
# We're not keeping the rule (i.e. deleting) so mark as changed
|
||||
changed = True
|
||||
else:
|
||||
lifecycle_obj.append(existing_rule)
|
||||
|
||||
# Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration
|
||||
try:
|
||||
if lifecycle_obj:
|
||||
bucket.configure_lifecycle(lifecycle_obj)
|
||||
else:
|
||||
bucket.delete_lifecycle_configuration()
|
||||
except BotoServerError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
name = dict(required=True, type='str'),
|
||||
expiration_days = dict(default=None, required=False, type='int'),
|
||||
expiration_date = dict(default=None, required=False, type='str'),
|
||||
prefix = dict(default=None, required=False),
|
||||
requester_pays = dict(default='no', type='bool'),
|
||||
rule_id = dict(required=False, type='str'),
|
||||
state = dict(default='present', choices=['present', 'absent']),
|
||||
status = dict(default='enabled', choices=['enabled', 'disabled']),
|
||||
storage_class = dict(default='glacier', type='str', choices=['glacier', 'standard_ia']),
|
||||
transition_days = dict(default=None, required=False, type='int'),
|
||||
transition_date = dict(default=None, required=False, type='str')
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
mutually_exclusive = [
|
||||
[ 'expiration_days', 'expiration_date' ],
|
||||
[ 'expiration_days', 'transition_date' ],
|
||||
[ 'transition_days', 'transition_date' ],
|
||||
[ 'transition_days', 'expiration_date' ]
|
||||
]
|
||||
)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
|
||||
if not HAS_DATEUTIL:
|
||||
module.fail_json(msg='dateutil required for this module')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
|
||||
if region in ('us-east-1', '', None):
|
||||
# S3ism for the US Standard region
|
||||
location = Location.DEFAULT
|
||||
else:
|
||||
# Boto uses symbolic names for locations but region strings will
|
||||
# actually work fine for everything except us-east-1 (US Standard)
|
||||
location = region
|
||||
try:
|
||||
connection = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_params)
|
||||
# use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
|
||||
if connection is None:
|
||||
connection = boto.connect_s3(**aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
expiration_date = module.params.get("expiration_date")
|
||||
transition_date = module.params.get("transition_date")
|
||||
state = module.params.get("state")
|
||||
storage_class = module.params.get("storage_class")
|
||||
|
||||
# If expiration_date set, check string is valid
|
||||
if expiration_date is not None:
|
||||
try:
|
||||
datetime.datetime.strptime(expiration_date, "%Y-%m-%dT%H:%M:%S.000Z")
|
||||
except ValueError as e:
|
||||
module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included")
|
||||
|
||||
if transition_date is not None:
|
||||
try:
|
||||
datetime.datetime.strptime(transition_date, "%Y-%m-%dT%H:%M:%S.000Z")
|
||||
except ValueError as e:
|
||||
module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included")
|
||||
|
||||
boto_required_version = (2,40,0)
|
||||
if storage_class == 'standard_ia' and tuple(map(int, (boto.__version__.split(".")))) < boto_required_version:
|
||||
module.fail_json(msg="'standard_ia' class requires boto >= 2.40.0")
|
||||
|
||||
if state == 'present':
|
||||
create_lifecycle_rule(connection, module)
|
||||
elif state == 'absent':
|
||||
destroy_lifecycle_rule(connection, module)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
184
lib/ansible/modules/cloud/amazon/s3_logging.py
Normal file
184
lib/ansible/modules/cloud/amazon/s3_logging.py
Normal file
@@ -0,0 +1,184 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# This is a free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This Ansible library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['stableinterface'],
|
||||
'supported_by': 'committer',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: s3_logging
|
||||
short_description: Manage logging facility of an s3 bucket in AWS
|
||||
description:
|
||||
- Manage logging facility of an s3 bucket in AWS
|
||||
version_added: "2.0"
|
||||
author: Rob White (@wimnat)
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- "Name of the s3 bucket."
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- "Enable or disable logging."
|
||||
required: false
|
||||
default: present
|
||||
choices: [ 'present', 'absent' ]
|
||||
target_bucket:
|
||||
description:
|
||||
- "The bucket to log to. Required when state=present."
|
||||
required: false
|
||||
default: null
|
||||
target_prefix:
|
||||
description:
|
||||
- "The prefix that should be prepended to the generated log files written to the target_bucket."
|
||||
required: false
|
||||
default: ""
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
- name: Enable logging of s3 bucket mywebsite.com to s3 bucket mylogs
|
||||
s3_logging:
|
||||
name: mywebsite.com
|
||||
target_bucket: mylogs
|
||||
target_prefix: logs/mywebsite.com
|
||||
state: present
|
||||
|
||||
- name: Remove logging on an s3 bucket
|
||||
s3_logging:
|
||||
name: mywebsite.com
|
||||
state: absent
|
||||
|
||||
'''
|
||||
|
||||
try:
|
||||
import boto.ec2
|
||||
from boto.s3.connection import OrdinaryCallingFormat, Location
|
||||
from boto.exception import BotoServerError, S3CreateError, S3ResponseError
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import AnsibleAWSError, ec2_argument_spec, get_aws_connection_info
|
||||
|
||||
|
||||
def compare_bucket_logging(bucket, target_bucket, target_prefix):
|
||||
|
||||
bucket_log_obj = bucket.get_logging_status()
|
||||
if bucket_log_obj.target != target_bucket or bucket_log_obj.prefix != target_prefix:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def enable_bucket_logging(connection, module):
|
||||
|
||||
bucket_name = module.params.get("name")
|
||||
target_bucket = module.params.get("target_bucket")
|
||||
target_prefix = module.params.get("target_prefix")
|
||||
changed = False
|
||||
|
||||
try:
|
||||
bucket = connection.get_bucket(bucket_name)
|
||||
except S3ResponseError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
try:
|
||||
if not compare_bucket_logging(bucket, target_bucket, target_prefix):
|
||||
# Before we can enable logging we must give the log-delivery group WRITE and READ_ACP permissions to the target bucket
|
||||
try:
|
||||
target_bucket_obj = connection.get_bucket(target_bucket)
|
||||
except S3ResponseError as e:
|
||||
if e.status == 301:
|
||||
module.fail_json(msg="the logging target bucket must be in the same region as the bucket being logged")
|
||||
else:
|
||||
module.fail_json(msg=e.message)
|
||||
target_bucket_obj.set_as_logging_target()
|
||||
|
||||
bucket.enable_logging(target_bucket, target_prefix)
|
||||
changed = True
|
||||
|
||||
except S3ResponseError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
|
||||
def disable_bucket_logging(connection, module):
|
||||
|
||||
bucket_name = module.params.get("name")
|
||||
changed = False
|
||||
|
||||
try:
|
||||
bucket = connection.get_bucket(bucket_name)
|
||||
if not compare_bucket_logging(bucket, None, None):
|
||||
bucket.disable_logging()
|
||||
changed = True
|
||||
except S3ResponseError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
name = dict(required=True),
|
||||
target_bucket = dict(required=False, default=None),
|
||||
target_prefix = dict(required=False, default=""),
|
||||
state = dict(required=False, default='present', choices=['present', 'absent'])
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
|
||||
if region in ('us-east-1', '', None):
|
||||
# S3ism for the US Standard region
|
||||
location = Location.DEFAULT
|
||||
else:
|
||||
# Boto uses symbolic names for locations but region strings will
|
||||
# actually work fine for everything except us-east-1 (US Standard)
|
||||
location = region
|
||||
try:
|
||||
connection = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_params)
|
||||
# use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
|
||||
if connection is None:
|
||||
connection = boto.connect_s3(**aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
state = module.params.get("state")
|
||||
|
||||
if state == 'present':
|
||||
enable_bucket_logging(connection, module)
|
||||
elif state == 'absent':
|
||||
disable_bucket_logging(connection, module)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
297
lib/ansible/modules/cloud/amazon/s3_website.py
Normal file
297
lib/ansible/modules/cloud/amazon/s3_website.py
Normal file
@@ -0,0 +1,297 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# This is a free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This Ansible library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: s3_website
|
||||
short_description: Configure an s3 bucket as a website
|
||||
description:
|
||||
- Configure an s3 bucket as a website
|
||||
version_added: "2.2"
|
||||
author: Rob White (@wimnat)
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- "Name of the s3 bucket"
|
||||
required: true
|
||||
default: null
|
||||
error_key:
|
||||
description:
|
||||
- "The object key name to use when a 4XX class error occurs. To remove an error key, set to None."
|
||||
required: false
|
||||
default: null
|
||||
redirect_all_requests:
|
||||
description:
|
||||
- "Describes the redirect behavior for every request to this s3 bucket website endpoint"
|
||||
required: false
|
||||
default: null
|
||||
region:
|
||||
description:
|
||||
- "AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard."
|
||||
required: false
|
||||
default: null
|
||||
state:
|
||||
description:
|
||||
- "Add or remove s3 website configuration"
|
||||
required: false
|
||||
default: present
|
||||
choices: [ 'present', 'absent' ]
|
||||
suffix:
|
||||
description:
|
||||
- "Suffix that is appended to a request that is for a directory on the website endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html). The suffix must not include a slash character."
|
||||
required: false
|
||||
default: index.html
|
||||
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
# Configure an s3 bucket to redirect all requests to example.com
|
||||
- s3_website:
|
||||
name: mybucket.com
|
||||
redirect_all_requests: example.com
|
||||
state: present
|
||||
|
||||
# Remove website configuration from an s3 bucket
|
||||
- s3_website:
|
||||
name: mybucket.com
|
||||
state: absent
|
||||
|
||||
# Configure an s3 bucket as a website with index and error pages
|
||||
- s3_website:
|
||||
name: mybucket.com
|
||||
suffix: home.htm
|
||||
error_key: errors/404.htm
|
||||
state: present
|
||||
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
index_document:
|
||||
suffix:
|
||||
description: suffix that is appended to a request that is for a directory on the website endpoint
|
||||
returned: success
|
||||
type: string
|
||||
sample: index.html
|
||||
error_document:
|
||||
key:
|
||||
description: object key name to use when a 4XX class error occurs
|
||||
returned: when error_document parameter set
|
||||
type: string
|
||||
sample: error.html
|
||||
redirect_all_requests_to:
|
||||
host_name:
|
||||
description: name of the host where requests will be redirected.
|
||||
returned: when redirect all requests parameter set
|
||||
type: string
|
||||
sample: ansible.com
|
||||
routing_rules:
|
||||
routing_rule:
|
||||
host_name:
|
||||
description: name of the host where requests will be redirected.
|
||||
returned: when host name set as part of redirect rule
|
||||
type: string
|
||||
sample: ansible.com
|
||||
condition:
|
||||
key_prefix_equals:
|
||||
description: object key name prefix when the redirect is applied. For example, to redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html
|
||||
returned: when routing rule present
|
||||
type: string
|
||||
sample: docs/
|
||||
redirect:
|
||||
replace_key_prefix_with:
|
||||
description: object key prefix to use in the redirect request
|
||||
returned: when routing rule present
|
||||
type: string
|
||||
sample: documents/
|
||||
|
||||
'''
|
||||
|
||||
import time
|
||||
|
||||
try:
|
||||
from botocore.exceptions import ClientError, ParamValidationError, NoCredentialsError
|
||||
import boto3
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
def _create_redirect_dict(url):
|
||||
|
||||
redirect_dict = {}
|
||||
url_split = url.split(':')
|
||||
|
||||
# Did we split anything?
|
||||
if len(url_split) == 2:
|
||||
redirect_dict[u'Protocol'] = url_split[0]
|
||||
redirect_dict[u'HostName'] = url_split[1].replace('//', '')
|
||||
elif len(url_split) == 1:
|
||||
redirect_dict[u'HostName'] = url_split[0]
|
||||
else:
|
||||
raise ValueError('Redirect URL appears invalid')
|
||||
|
||||
return redirect_dict
|
||||
|
||||
|
||||
def _create_website_configuration(suffix, error_key, redirect_all_requests):
|
||||
|
||||
website_configuration = {}
|
||||
|
||||
if error_key is not None:
|
||||
website_configuration['ErrorDocument'] = { 'Key': error_key }
|
||||
|
||||
if suffix is not None:
|
||||
website_configuration['IndexDocument'] = { 'Suffix': suffix }
|
||||
|
||||
if redirect_all_requests is not None:
|
||||
website_configuration['RedirectAllRequestsTo'] = _create_redirect_dict(redirect_all_requests)
|
||||
|
||||
return website_configuration
|
||||
|
||||
|
||||
def enable_or_update_bucket_as_website(client_connection, resource_connection, module):
|
||||
|
||||
bucket_name = module.params.get("name")
|
||||
redirect_all_requests = module.params.get("redirect_all_requests")
|
||||
# If redirect_all_requests is set then don't use the default suffix that has been set
|
||||
if redirect_all_requests is not None:
|
||||
suffix = None
|
||||
else:
|
||||
suffix = module.params.get("suffix")
|
||||
error_key = module.params.get("error_key")
|
||||
changed = False
|
||||
|
||||
try:
|
||||
bucket_website = resource_connection.BucketWebsite(bucket_name)
|
||||
except ClientError as e:
|
||||
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
|
||||
|
||||
try:
|
||||
website_config = client_connection.get_bucket_website(Bucket=bucket_name)
|
||||
except ClientError as e:
|
||||
if e.response['Error']['Code'] == 'NoSuchWebsiteConfiguration':
|
||||
website_config = None
|
||||
else:
|
||||
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
|
||||
|
||||
if website_config is None:
|
||||
try:
|
||||
bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests))
|
||||
changed = True
|
||||
except (ClientError, ParamValidationError) as e:
|
||||
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
|
||||
except ValueError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
try:
|
||||
if (suffix is not None and website_config['IndexDocument']['Suffix'] != suffix) or \
|
||||
(error_key is not None and website_config['ErrorDocument']['Key'] != error_key) or \
|
||||
(redirect_all_requests is not None and website_config['RedirectAllRequestsTo'] != _create_redirect_dict(redirect_all_requests)):
|
||||
|
||||
try:
|
||||
bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests))
|
||||
changed = True
|
||||
except (ClientError, ParamValidationError) as e:
|
||||
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
|
||||
except KeyError as e:
|
||||
try:
|
||||
bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests))
|
||||
changed = True
|
||||
except (ClientError, ParamValidationError) as e:
|
||||
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
|
||||
except ValueError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
# Wait 5 secs before getting the website_config again to give it time to update
|
||||
time.sleep(5)
|
||||
|
||||
website_config = client_connection.get_bucket_website(Bucket=bucket_name)
|
||||
module.exit_json(changed=changed, **camel_dict_to_snake_dict(website_config))
|
||||
|
||||
|
||||
def disable_bucket_as_website(client_connection, module):
|
||||
|
||||
changed = False
|
||||
bucket_name = module.params.get("name")
|
||||
|
||||
try:
|
||||
client_connection.get_bucket_website(Bucket=bucket_name)
|
||||
except ClientError as e:
|
||||
if e.response['Error']['Code'] == 'NoSuchWebsiteConfiguration':
|
||||
module.exit_json(changed=changed)
|
||||
else:
|
||||
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
|
||||
|
||||
try:
|
||||
client_connection.delete_bucket_website(Bucket=bucket_name)
|
||||
changed = True
|
||||
except ClientError as e:
|
||||
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
|
||||
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
name=dict(type='str', required=True),
|
||||
state=dict(type='str', required=True, choices=['present', 'absent']),
|
||||
suffix=dict(type='str', required=False, default='index.html'),
|
||||
error_key=dict(type='str', required=False),
|
||||
redirect_all_requests=dict(type='str', required=False)
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
mutually_exclusive = [
|
||||
['redirect_all_requests', 'suffix'],
|
||||
['redirect_all_requests', 'error_key']
|
||||
])
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='boto3 required for this module')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
|
||||
|
||||
if region:
|
||||
client_connection = boto3_conn(module, conn_type='client', resource='s3', region=region, endpoint=ec2_url, **aws_connect_params)
|
||||
resource_connection = boto3_conn(module, conn_type='resource', resource='s3', region=region, endpoint=ec2_url, **aws_connect_params)
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
state = module.params.get("state")
|
||||
|
||||
if state == 'present':
|
||||
enable_or_update_bucket_as_website(client_connection, resource_connection, module)
|
||||
elif state == 'absent':
|
||||
disable_bucket_as_website(client_connection, module)
|
||||
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
410
lib/ansible/modules/cloud/amazon/sns_topic.py
Normal file
410
lib/ansible/modules/cloud/amazon/sns_topic.py
Normal file
@@ -0,0 +1,410 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# This is a free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This Ansible library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['stableinterface'],
|
||||
'supported_by': 'committer',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = """
|
||||
module: sns_topic
|
||||
short_description: Manages AWS SNS topics and subscriptions
|
||||
description:
|
||||
- The M(sns_topic) module allows you to create, delete, and manage subscriptions for AWS SNS topics.
|
||||
version_added: 2.0
|
||||
author:
|
||||
- "Joel Thompson (@joelthompson)"
|
||||
- "Fernando Jose Pando (@nand0p)"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name or ARN of the SNS topic to converge
|
||||
required: True
|
||||
state:
|
||||
description:
|
||||
- Whether to create or destroy an SNS topic
|
||||
required: False
|
||||
default: present
|
||||
choices: ["absent", "present"]
|
||||
display_name:
|
||||
description:
|
||||
- Display name of the topic
|
||||
required: False
|
||||
default: None
|
||||
policy:
|
||||
description:
|
||||
- Policy to apply to the SNS topic
|
||||
required: False
|
||||
default: None
|
||||
delivery_policy:
|
||||
description:
|
||||
- Delivery policy to apply to the SNS topic
|
||||
required: False
|
||||
default: None
|
||||
subscriptions:
|
||||
description:
|
||||
- List of subscriptions to apply to the topic. Note that AWS requires
|
||||
subscriptions to be confirmed, so you will need to confirm any new
|
||||
subscriptions.
|
||||
required: False
|
||||
default: []
|
||||
purge_subscriptions:
|
||||
description:
|
||||
- "Whether to purge any subscriptions not listed here. NOTE: AWS does not
|
||||
allow you to purge any PendingConfirmation subscriptions, so if any
|
||||
exist and would be purged, they are silently skipped. This means that
|
||||
somebody could come back later and confirm the subscription. Sorry.
|
||||
Blame Amazon."
|
||||
required: False
|
||||
default: True
|
||||
extends_documentation_fragment: aws
|
||||
requirements: [ "boto" ]
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
|
||||
- name: Create alarm SNS topic
|
||||
sns_topic:
|
||||
name: "alarms"
|
||||
state: present
|
||||
display_name: "alarm SNS topic"
|
||||
delivery_policy:
|
||||
http:
|
||||
defaultHealthyRetryPolicy:
|
||||
minDelayTarget: 2
|
||||
maxDelayTarget: 4
|
||||
numRetries: 3
|
||||
numMaxDelayRetries: 5
|
||||
backoffFunction: "<linear|arithmetic|geometric|exponential>"
|
||||
disableSubscriptionOverrides: True
|
||||
defaultThrottlePolicy:
|
||||
maxReceivesPerSecond: 10
|
||||
subscriptions:
|
||||
- endpoint: "my_email_address@example.com"
|
||||
protocol: "email"
|
||||
- endpoint: "my_mobile_number"
|
||||
protocol: "sms"
|
||||
|
||||
"""
|
||||
|
||||
RETURN = '''
|
||||
sns_arn:
|
||||
description: The ARN of the topic you are modifying
|
||||
type: string
|
||||
sample: "arn:aws:sns:us-east-1:123456789012:my_topic_name"
|
||||
|
||||
sns_topic:
|
||||
description: Dict of sns topic details
|
||||
type: dict
|
||||
sample:
|
||||
name: sns-topic-name
|
||||
state: present
|
||||
display_name: default
|
||||
policy: {}
|
||||
delivery_policy: {}
|
||||
subscriptions_new: []
|
||||
subscriptions_existing: []
|
||||
subscriptions_deleted: []
|
||||
subscriptions_added: []
|
||||
subscriptions_purge': false
|
||||
check_mode: false
|
||||
topic_created: false
|
||||
topic_deleted: false
|
||||
attributes_set: []
|
||||
'''
|
||||
|
||||
import time
|
||||
import json
|
||||
import re
|
||||
|
||||
try:
|
||||
import boto.sns
|
||||
from boto.exception import BotoServerError
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import connect_to_aws, ec2_argument_spec, get_aws_connection_info
|
||||
|
||||
|
||||
class SnsTopicManager(object):
|
||||
""" Handles SNS Topic creation and destruction """
|
||||
|
||||
def __init__(self,
|
||||
module,
|
||||
name,
|
||||
state,
|
||||
display_name,
|
||||
policy,
|
||||
delivery_policy,
|
||||
subscriptions,
|
||||
purge_subscriptions,
|
||||
check_mode,
|
||||
region,
|
||||
**aws_connect_params):
|
||||
|
||||
self.region = region
|
||||
self.aws_connect_params = aws_connect_params
|
||||
self.connection = self._get_boto_connection()
|
||||
self.changed = False
|
||||
self.module = module
|
||||
self.name = name
|
||||
self.state = state
|
||||
self.display_name = display_name
|
||||
self.policy = policy
|
||||
self.delivery_policy = delivery_policy
|
||||
self.subscriptions = subscriptions
|
||||
self.subscriptions_existing = []
|
||||
self.subscriptions_deleted = []
|
||||
self.subscriptions_added = []
|
||||
self.purge_subscriptions = purge_subscriptions
|
||||
self.check_mode = check_mode
|
||||
self.topic_created = False
|
||||
self.topic_deleted = False
|
||||
self.arn_topic = None
|
||||
self.attributes_set = []
|
||||
|
||||
def _get_boto_connection(self):
|
||||
try:
|
||||
return connect_to_aws(boto.sns, self.region,
|
||||
**self.aws_connect_params)
|
||||
except BotoServerError as err:
|
||||
self.module.fail_json(msg=err.message)
|
||||
|
||||
def _get_all_topics(self):
|
||||
next_token = None
|
||||
topics = []
|
||||
while True:
|
||||
try:
|
||||
response = self.connection.get_all_topics(next_token)
|
||||
except BotoServerError as err:
|
||||
self.module.fail_json(msg=err.message)
|
||||
topics.extend(response['ListTopicsResponse']['ListTopicsResult']['Topics'])
|
||||
next_token = response['ListTopicsResponse']['ListTopicsResult']['NextToken']
|
||||
if not next_token:
|
||||
break
|
||||
return [t['TopicArn'] for t in topics]
|
||||
|
||||
|
||||
def _arn_topic_lookup(self):
|
||||
# topic names cannot have colons, so this captures the full topic name
|
||||
all_topics = self._get_all_topics()
|
||||
lookup_topic = ':%s' % self.name
|
||||
for topic in all_topics:
|
||||
if topic.endswith(lookup_topic):
|
||||
return topic
|
||||
|
||||
|
||||
def _create_topic(self):
|
||||
self.changed = True
|
||||
self.topic_created = True
|
||||
if not self.check_mode:
|
||||
self.connection.create_topic(self.name)
|
||||
self.arn_topic = self._arn_topic_lookup()
|
||||
while not self.arn_topic:
|
||||
time.sleep(3)
|
||||
self.arn_topic = self._arn_topic_lookup()
|
||||
|
||||
|
||||
def _set_topic_attrs(self):
|
||||
topic_attributes = self.connection.get_topic_attributes(self.arn_topic) \
|
||||
['GetTopicAttributesResponse'] ['GetTopicAttributesResult'] \
|
||||
['Attributes']
|
||||
|
||||
if self.display_name and self.display_name != topic_attributes['DisplayName']:
|
||||
self.changed = True
|
||||
self.attributes_set.append('display_name')
|
||||
if not self.check_mode:
|
||||
self.connection.set_topic_attributes(self.arn_topic, 'DisplayName',
|
||||
self.display_name)
|
||||
|
||||
if self.policy and self.policy != json.loads(topic_attributes['Policy']):
|
||||
self.changed = True
|
||||
self.attributes_set.append('policy')
|
||||
if not self.check_mode:
|
||||
self.connection.set_topic_attributes(self.arn_topic, 'Policy',
|
||||
json.dumps(self.policy))
|
||||
|
||||
if self.delivery_policy and ('DeliveryPolicy' not in topic_attributes or \
|
||||
self.delivery_policy != json.loads(topic_attributes['DeliveryPolicy'])):
|
||||
self.changed = True
|
||||
self.attributes_set.append('delivery_policy')
|
||||
if not self.check_mode:
|
||||
self.connection.set_topic_attributes(self.arn_topic, 'DeliveryPolicy',
|
||||
json.dumps(self.delivery_policy))
|
||||
|
||||
|
||||
def _canonicalize_endpoint(self, protocol, endpoint):
|
||||
if protocol == 'sms':
|
||||
return re.sub('[^0-9]*', '', endpoint)
|
||||
return endpoint
|
||||
|
||||
|
||||
def _get_topic_subs(self):
|
||||
next_token = None
|
||||
while True:
|
||||
response = self.connection.get_all_subscriptions_by_topic(self.arn_topic, next_token)
|
||||
self.subscriptions_existing.extend(response['ListSubscriptionsByTopicResponse'] \
|
||||
['ListSubscriptionsByTopicResult']['Subscriptions'])
|
||||
next_token = response['ListSubscriptionsByTopicResponse'] \
|
||||
['ListSubscriptionsByTopicResult']['NextToken']
|
||||
if not next_token:
|
||||
break
|
||||
|
||||
def _set_topic_subs(self):
|
||||
subscriptions_existing_list = []
|
||||
desired_subscriptions = [(sub['protocol'],
|
||||
self._canonicalize_endpoint(sub['protocol'], sub['endpoint'])) for sub in
|
||||
self.subscriptions]
|
||||
|
||||
if self.subscriptions_existing:
|
||||
for sub in self.subscriptions_existing:
|
||||
sub_key = (sub['Protocol'], sub['Endpoint'])
|
||||
subscriptions_existing_list.append(sub_key)
|
||||
if self.purge_subscriptions and sub_key not in desired_subscriptions and \
|
||||
sub['SubscriptionArn'] != 'PendingConfirmation':
|
||||
self.changed = True
|
||||
self.subscriptions_deleted.append(sub_key)
|
||||
if not self.check_mode:
|
||||
self.connection.unsubscribe(sub['SubscriptionArn'])
|
||||
|
||||
for (protocol, endpoint) in desired_subscriptions:
|
||||
if (protocol, endpoint) not in subscriptions_existing_list:
|
||||
self.changed = True
|
||||
self.subscriptions_added.append(sub)
|
||||
if not self.check_mode:
|
||||
self.connection.subscribe(self.arn_topic, protocol, endpoint)
|
||||
|
||||
|
||||
def _delete_subscriptions(self):
|
||||
# NOTE: subscriptions in 'PendingConfirmation' timeout in 3 days
|
||||
# https://forums.aws.amazon.com/thread.jspa?threadID=85993
|
||||
for sub in self.subscriptions_existing:
|
||||
if sub['SubscriptionArn'] != 'PendingConfirmation':
|
||||
self.subscriptions_deleted.append(sub['SubscriptionArn'])
|
||||
self.changed = True
|
||||
if not self.check_mode:
|
||||
self.connection.unsubscribe(sub['SubscriptionArn'])
|
||||
|
||||
|
||||
def _delete_topic(self):
|
||||
self.topic_deleted = True
|
||||
self.changed = True
|
||||
if not self.check_mode:
|
||||
self.connection.delete_topic(self.arn_topic)
|
||||
|
||||
|
||||
def ensure_ok(self):
|
||||
self.arn_topic = self._arn_topic_lookup()
|
||||
if not self.arn_topic:
|
||||
self._create_topic()
|
||||
self._set_topic_attrs()
|
||||
self._get_topic_subs()
|
||||
self._set_topic_subs()
|
||||
|
||||
def ensure_gone(self):
|
||||
self.arn_topic = self._arn_topic_lookup()
|
||||
if self.arn_topic:
|
||||
self._get_topic_subs()
|
||||
if self.subscriptions_existing:
|
||||
self._delete_subscriptions()
|
||||
self._delete_topic()
|
||||
|
||||
|
||||
def get_info(self):
|
||||
info = {
|
||||
'name': self.name,
|
||||
'state': self.state,
|
||||
'display_name': self.display_name,
|
||||
'policy': self.policy,
|
||||
'delivery_policy': self.delivery_policy,
|
||||
'subscriptions_new': self.subscriptions,
|
||||
'subscriptions_existing': self.subscriptions_existing,
|
||||
'subscriptions_deleted': self.subscriptions_deleted,
|
||||
'subscriptions_added': self.subscriptions_added,
|
||||
'subscriptions_purge': self.purge_subscriptions,
|
||||
'check_mode': self.check_mode,
|
||||
'topic_created': self.topic_created,
|
||||
'topic_deleted': self.topic_deleted,
|
||||
'attributes_set': self.attributes_set
|
||||
}
|
||||
|
||||
return info
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
name=dict(type='str', required=True),
|
||||
state=dict(type='str', default='present', choices=['present',
|
||||
'absent']),
|
||||
display_name=dict(type='str', required=False),
|
||||
policy=dict(type='dict', required=False),
|
||||
delivery_policy=dict(type='dict', required=False),
|
||||
subscriptions=dict(default=[], type='list', required=False),
|
||||
purge_subscriptions=dict(type='bool', default=True),
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
|
||||
name = module.params.get('name')
|
||||
state = module.params.get('state')
|
||||
display_name = module.params.get('display_name')
|
||||
policy = module.params.get('policy')
|
||||
delivery_policy = module.params.get('delivery_policy')
|
||||
subscriptions = module.params.get('subscriptions')
|
||||
purge_subscriptions = module.params.get('purge_subscriptions')
|
||||
check_mode = module.check_mode
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
if not region:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
sns_topic = SnsTopicManager(module,
|
||||
name,
|
||||
state,
|
||||
display_name,
|
||||
policy,
|
||||
delivery_policy,
|
||||
subscriptions,
|
||||
purge_subscriptions,
|
||||
check_mode,
|
||||
region,
|
||||
**aws_connect_params)
|
||||
|
||||
if state == 'present':
|
||||
sns_topic.ensure_ok()
|
||||
|
||||
elif state == 'absent':
|
||||
sns_topic.ensure_gone()
|
||||
|
||||
sns_facts = dict(changed=sns_topic.changed,
|
||||
sns_arn=sns_topic.arn_topic,
|
||||
sns_topic=sns_topic.get_info())
|
||||
|
||||
module.exit_json(**sns_facts)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
321
lib/ansible/modules/cloud/amazon/sqs_queue.py
Normal file
321
lib/ansible/modules/cloud/amazon/sqs_queue.py
Normal file
@@ -0,0 +1,321 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['stableinterface'],
|
||||
'supported_by': 'committer',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: sqs_queue
|
||||
short_description: Creates or deletes AWS SQS queues.
|
||||
description:
|
||||
- Create or delete AWS SQS queues.
|
||||
- Update attributes on existing queues.
|
||||
version_added: "2.0"
|
||||
author:
|
||||
- Alan Loi (@loia)
|
||||
- Fernando Jose Pando (@nand0p)
|
||||
- Nadir Lloret (@nadirollo)
|
||||
requirements:
|
||||
- "boto >= 2.33.0"
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Create or delete the queue
|
||||
required: false
|
||||
choices: ['present', 'absent']
|
||||
default: 'present'
|
||||
name:
|
||||
description:
|
||||
- Name of the queue.
|
||||
required: true
|
||||
default_visibility_timeout:
|
||||
description:
|
||||
- The default visibility timeout in seconds.
|
||||
required: false
|
||||
default: null
|
||||
message_retention_period:
|
||||
description:
|
||||
- The message retention period in seconds.
|
||||
required: false
|
||||
default: null
|
||||
maximum_message_size:
|
||||
description:
|
||||
- The maximum message size in bytes.
|
||||
required: false
|
||||
default: null
|
||||
delivery_delay:
|
||||
description:
|
||||
- The delivery delay in seconds.
|
||||
required: false
|
||||
default: null
|
||||
receive_message_wait_time:
|
||||
description:
|
||||
- The receive message wait time in seconds.
|
||||
required: false
|
||||
default: null
|
||||
policy:
|
||||
description:
|
||||
- The json dict policy to attach to queue
|
||||
required: false
|
||||
default: null
|
||||
version_added: "2.1"
|
||||
redrive_policy:
|
||||
description:
|
||||
- json dict with the redrive_policy (see example)
|
||||
required: false
|
||||
default: null
|
||||
version_added: "2.2"
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
"""
|
||||
|
||||
RETURN = '''
|
||||
default_visibility_timeout:
|
||||
description: The default visibility timeout in seconds.
|
||||
returned: always
|
||||
sample: 30
|
||||
delivery_delay:
|
||||
description: The delivery delay in seconds.
|
||||
returned: always
|
||||
sample: 0
|
||||
maximum_message_size:
|
||||
description: The maximum message size in bytes.
|
||||
returned: always
|
||||
sample: 262144
|
||||
message_retention_period:
|
||||
description: The message retention period in seconds.
|
||||
returned: always
|
||||
sample: 345600
|
||||
name:
|
||||
description: Name of the SQS Queue
|
||||
returned: always
|
||||
sample: "queuename-987d2de0"
|
||||
queue_arn:
|
||||
description: The queue's Amazon resource name (ARN).
|
||||
returned: on successful creation or update of the queue
|
||||
sample: 'arn:aws:sqs:us-east-1:199999999999:queuename-987d2de0'
|
||||
receive_message_wait_time:
|
||||
description: The receive message wait time in seconds.
|
||||
returned: always
|
||||
sample: 0
|
||||
region:
|
||||
description: Region that the queue was created within
|
||||
returned: always
|
||||
sample: 'us-east-1'
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Create SQS queue with redrive policy
|
||||
- sqs_queue:
|
||||
name: my-queue
|
||||
region: ap-southeast-2
|
||||
default_visibility_timeout: 120
|
||||
message_retention_period: 86400
|
||||
maximum_message_size: 1024
|
||||
delivery_delay: 30
|
||||
receive_message_wait_time: 20
|
||||
policy: "{{ json_dict }}"
|
||||
redrive_policy:
|
||||
maxReceiveCount: 5
|
||||
deadLetterTargetArn: arn:aws:sqs:eu-west-1:123456789012:my-dead-queue
|
||||
|
||||
# Delete SQS queue
|
||||
- sqs_queue:
|
||||
name: my-queue
|
||||
region: ap-southeast-2
|
||||
state: absent
|
||||
'''
|
||||
|
||||
import json
|
||||
import traceback
|
||||
|
||||
try:
|
||||
import boto.sqs
|
||||
from boto.exception import BotoServerError, NoAuthHandlerFound
|
||||
HAS_BOTO = True
|
||||
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
|
||||
|
||||
|
||||
def create_or_update_sqs_queue(connection, module):
|
||||
queue_name = module.params.get('name')
|
||||
|
||||
queue_attributes = dict(
|
||||
default_visibility_timeout=module.params.get('default_visibility_timeout'),
|
||||
message_retention_period=module.params.get('message_retention_period'),
|
||||
maximum_message_size=module.params.get('maximum_message_size'),
|
||||
delivery_delay=module.params.get('delivery_delay'),
|
||||
receive_message_wait_time=module.params.get('receive_message_wait_time'),
|
||||
policy=module.params.get('policy'),
|
||||
redrive_policy=module.params.get('redrive_policy')
|
||||
)
|
||||
|
||||
result = dict(
|
||||
region=module.params.get('region'),
|
||||
name=queue_name,
|
||||
)
|
||||
result.update(queue_attributes)
|
||||
|
||||
try:
|
||||
queue = connection.get_queue(queue_name)
|
||||
if queue:
|
||||
# Update existing
|
||||
result['changed'] = update_sqs_queue(queue, check_mode=module.check_mode, **queue_attributes)
|
||||
else:
|
||||
# Create new
|
||||
if not module.check_mode:
|
||||
queue = connection.create_queue(queue_name)
|
||||
update_sqs_queue(queue, **queue_attributes)
|
||||
result['changed'] = True
|
||||
|
||||
if not module.check_mode:
|
||||
result['queue_arn'] = queue.get_attributes('QueueArn')['QueueArn']
|
||||
result['default_visibility_timeout'] = queue.get_attributes('VisibilityTimeout')['VisibilityTimeout']
|
||||
result['message_retention_period'] = queue.get_attributes('MessageRetentionPeriod')['MessageRetentionPeriod']
|
||||
result['maximum_message_size'] = queue.get_attributes('MaximumMessageSize')['MaximumMessageSize']
|
||||
result['delivery_delay'] = queue.get_attributes('DelaySeconds')['DelaySeconds']
|
||||
result['receive_message_wait_time'] = queue.get_attributes('ReceiveMessageWaitTimeSeconds')['ReceiveMessageWaitTimeSeconds']
|
||||
|
||||
except BotoServerError:
|
||||
result['msg'] = 'Failed to create/update sqs queue due to error: ' + traceback.format_exc()
|
||||
module.fail_json(**result)
|
||||
else:
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
def update_sqs_queue(queue,
|
||||
check_mode=False,
|
||||
default_visibility_timeout=None,
|
||||
message_retention_period=None,
|
||||
maximum_message_size=None,
|
||||
delivery_delay=None,
|
||||
receive_message_wait_time=None,
|
||||
policy=None,
|
||||
redrive_policy=None):
|
||||
changed = False
|
||||
|
||||
changed = set_queue_attribute(queue, 'VisibilityTimeout', default_visibility_timeout,
|
||||
check_mode=check_mode) or changed
|
||||
changed = set_queue_attribute(queue, 'MessageRetentionPeriod', message_retention_period,
|
||||
check_mode=check_mode) or changed
|
||||
changed = set_queue_attribute(queue, 'MaximumMessageSize', maximum_message_size,
|
||||
check_mode=check_mode) or changed
|
||||
changed = set_queue_attribute(queue, 'DelaySeconds', delivery_delay,
|
||||
check_mode=check_mode) or changed
|
||||
changed = set_queue_attribute(queue, 'ReceiveMessageWaitTimeSeconds', receive_message_wait_time,
|
||||
check_mode=check_mode) or changed
|
||||
changed = set_queue_attribute(queue, 'Policy', policy,
|
||||
check_mode=check_mode) or changed
|
||||
changed = set_queue_attribute(queue, 'RedrivePolicy', redrive_policy,
|
||||
check_mode=check_mode) or changed
|
||||
return changed
|
||||
|
||||
|
||||
def set_queue_attribute(queue, attribute, value, check_mode=False):
|
||||
if not value:
|
||||
return False
|
||||
|
||||
try:
|
||||
existing_value = queue.get_attributes(attributes=attribute)[attribute]
|
||||
except:
|
||||
existing_value = ''
|
||||
|
||||
# convert dict attributes to JSON strings (sort keys for comparing)
|
||||
if attribute in ['Policy', 'RedrivePolicy']:
|
||||
value = json.dumps(value, sort_keys=True)
|
||||
if existing_value:
|
||||
existing_value = json.dumps(json.loads(existing_value), sort_keys=True)
|
||||
|
||||
if str(value) != existing_value:
|
||||
if not check_mode:
|
||||
queue.set_attribute(attribute, value)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def delete_sqs_queue(connection, module):
|
||||
queue_name = module.params.get('name')
|
||||
|
||||
result = dict(
|
||||
region=module.params.get('region'),
|
||||
name=queue_name,
|
||||
)
|
||||
|
||||
try:
|
||||
queue = connection.get_queue(queue_name)
|
||||
if queue:
|
||||
if not module.check_mode:
|
||||
connection.delete_queue(queue)
|
||||
result['changed'] = True
|
||||
|
||||
else:
|
||||
result['changed'] = False
|
||||
|
||||
except BotoServerError:
|
||||
result['msg'] = 'Failed to delete sqs queue due to error: ' + traceback.format_exc()
|
||||
module.fail_json(**result)
|
||||
else:
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
name=dict(required=True, type='str'),
|
||||
default_visibility_timeout=dict(type='int'),
|
||||
message_retention_period=dict(type='int'),
|
||||
maximum_message_size=dict(type='int'),
|
||||
delivery_delay=dict(type='int'),
|
||||
receive_message_wait_time=dict(type='int'),
|
||||
policy=dict(type='dict', required=False),
|
||||
redrive_policy=dict(type='dict', required=False),
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
if not region:
|
||||
module.fail_json(msg='region must be specified')
|
||||
|
||||
try:
|
||||
connection = connect_to_aws(boto.sqs, region, **aws_connect_params)
|
||||
|
||||
except (NoAuthHandlerFound, AnsibleAWSError) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
state = module.params.get('state')
|
||||
if state == 'present':
|
||||
create_or_update_sqs_queue(connection, module)
|
||||
elif state == 'absent':
|
||||
delete_sqs_queue(connection, module)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
157
lib/ansible/modules/cloud/amazon/sts_assume_role.py
Normal file
157
lib/ansible/modules/cloud/amazon/sts_assume_role.py
Normal file
@@ -0,0 +1,157 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['stableinterface'],
|
||||
'supported_by': 'committer',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: sts_assume_role
|
||||
short_description: Assume a role using AWS Security Token Service and obtain temporary credentials
|
||||
description:
|
||||
- Assume a role using AWS Security Token Service and obtain temporary credentials
|
||||
version_added: "2.0"
|
||||
author: Boris Ekelchik (@bekelchik)
|
||||
options:
|
||||
role_arn:
|
||||
description:
|
||||
- The Amazon Resource Name (ARN) of the role that the caller is assuming (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs)
|
||||
required: true
|
||||
role_session_name:
|
||||
description:
|
||||
- Name of the role's session - will be used by CloudTrail
|
||||
required: true
|
||||
policy:
|
||||
description:
|
||||
- Supplemental policy to use in addition to assumed role's policies.
|
||||
required: false
|
||||
default: null
|
||||
duration_seconds:
|
||||
description:
|
||||
- The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set to 3600 seconds.
|
||||
required: false
|
||||
default: null
|
||||
external_id:
|
||||
description:
|
||||
- A unique identifier that is used by third parties to assume a role in their customers' accounts.
|
||||
required: false
|
||||
default: null
|
||||
mfa_serial_number:
|
||||
description:
|
||||
- he identification number of the MFA device that is associated with the user who is making the AssumeRole call.
|
||||
required: false
|
||||
default: null
|
||||
mfa_token:
|
||||
description:
|
||||
- The value provided by the MFA device, if the trust policy of the role being assumed requires MFA.
|
||||
required: false
|
||||
default: null
|
||||
notes:
|
||||
- In order to use the assumed role in a following playbook task you must pass the access_key, access_secret and access_token
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
# Assume an existing role (more details: http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html)
|
||||
sts_assume_role:
|
||||
role_arn: "arn:aws:iam::123456789012:role/someRole"
|
||||
role_session_name: "someRoleSession"
|
||||
register: assumed_role
|
||||
|
||||
# Use the assumed role above to tag an instance in account 123456789012
|
||||
ec2_tag:
|
||||
aws_access_key: "{{ assumed_role.sts_creds.access_key }}"
|
||||
aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}"
|
||||
security_token: "{{ assumed_role.sts_creds.session_token }}"
|
||||
resource: i-xyzxyz01
|
||||
state: present
|
||||
tags:
|
||||
MyNewTag: value
|
||||
|
||||
'''
|
||||
|
||||
try:
|
||||
import boto.sts
|
||||
from boto.exception import BotoServerError
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
|
||||
|
||||
|
||||
def assume_role_policy(connection, module):
|
||||
|
||||
role_arn = module.params.get('role_arn')
|
||||
role_session_name = module.params.get('role_session_name')
|
||||
policy = module.params.get('policy')
|
||||
duration_seconds = module.params.get('duration_seconds')
|
||||
external_id = module.params.get('external_id')
|
||||
mfa_serial_number = module.params.get('mfa_serial_number')
|
||||
mfa_token = module.params.get('mfa_token')
|
||||
changed = False
|
||||
|
||||
try:
|
||||
assumed_role = connection.assume_role(role_arn, role_session_name, policy, duration_seconds, external_id, mfa_serial_number, mfa_token)
|
||||
changed = True
|
||||
except BotoServerError as e:
|
||||
module.fail_json(msg=e)
|
||||
|
||||
module.exit_json(changed=changed, sts_creds=assumed_role.credentials.__dict__, sts_user=assumed_role.user.__dict__)
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
role_arn = dict(required=True, default=None),
|
||||
role_session_name = dict(required=True, default=None),
|
||||
duration_seconds = dict(required=False, default=None, type='int'),
|
||||
external_id = dict(required=False, default=None),
|
||||
policy = dict(required=False, default=None),
|
||||
mfa_serial_number = dict(required=False, default=None),
|
||||
mfa_token = dict(required=False, default=None)
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
|
||||
if region:
|
||||
try:
|
||||
connection = connect_to_aws(boto.sts, region, **aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
try:
|
||||
assume_role_policy(connection, module)
|
||||
except BotoServerError as e:
|
||||
module.fail_json(msg=e)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
164
lib/ansible/modules/cloud/amazon/sts_session_token.py
Normal file
164
lib/ansible/modules/cloud/amazon/sts_session_token.py
Normal file
@@ -0,0 +1,164 @@
|
||||
#!/usr/bin/python
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['stableinterface'],
|
||||
'supported_by': 'committer',
|
||||
'version': '1.0'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: sts_session_token
|
||||
short_description: Obtain a session token from the AWS Security Token Service
|
||||
description:
|
||||
- Obtain a session token from the AWS Security Token Service
|
||||
version_added: "2.2"
|
||||
author: Victor Costan (@pwnall)
|
||||
options:
|
||||
duration_seconds:
|
||||
description:
|
||||
- The duration, in seconds, of the session token. See http://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html#API_GetSessionToken_RequestParameters for acceptable and default values.
|
||||
required: false
|
||||
default: null
|
||||
mfa_serial_number:
|
||||
description:
|
||||
- The identification number of the MFA device that is associated with the user who is making the GetSessionToken call.
|
||||
required: false
|
||||
default: null
|
||||
mfa_token:
|
||||
description:
|
||||
- The value provided by the MFA device, if the trust policy of the user requires MFA.
|
||||
required: false
|
||||
default: null
|
||||
notes:
|
||||
- In order to use the session token in a following playbook task you must pass the I(access_key), I(access_secret) and I(access_token).
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
requirements:
|
||||
- boto3
|
||||
- botocore
|
||||
- python >= 2.6
|
||||
'''
|
||||
|
||||
RETURN = """
|
||||
sts_creds:
|
||||
description: The Credentials object returned by the AWS Security Token Service
|
||||
returned: always
|
||||
type: list
|
||||
sample:
|
||||
access_key: ASXXXXXXXXXXXXXXXXXX
|
||||
expiration: "2016-04-08T11:59:47+00:00"
|
||||
secret_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
||||
session_token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
||||
changed:
|
||||
description: True if obtaining the credentials succeeds
|
||||
type: bool
|
||||
returned: always
|
||||
"""
|
||||
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
|
||||
# Get a session token (more details: http://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html)
|
||||
sts_session_token:
|
||||
duration: 3600
|
||||
register: session_credentials
|
||||
|
||||
# Use the session token obtained above to tag an instance in account 123456789012
|
||||
ec2_tag:
|
||||
aws_access_key: "{{ session_credentials.sts_creds.access_key }}"
|
||||
aws_secret_key: "{{ session_credentials.sts_creds.secret_key }}"
|
||||
security_token: "{{ session_credentials.sts_creds.session_token }}"
|
||||
resource: i-xyzxyz01
|
||||
state: present
|
||||
tags:
|
||||
MyNewTag: value
|
||||
|
||||
'''
|
||||
|
||||
try:
|
||||
import boto3
|
||||
from botocore.exceptions import ClientError
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
|
||||
|
||||
|
||||
def normalize_credentials(credentials):
|
||||
access_key = credentials.get('AccessKeyId', None)
|
||||
secret_key = credentials.get('SecretAccessKey', None)
|
||||
session_token = credentials.get('SessionToken', None)
|
||||
expiration = credentials.get('Expiration', None)
|
||||
return {
|
||||
'access_key': access_key,
|
||||
'secret_key': secret_key,
|
||||
'session_token': session_token,
|
||||
'expiration': expiration
|
||||
}
|
||||
|
||||
def get_session_token(connection, module):
|
||||
duration_seconds = module.params.get('duration_seconds')
|
||||
mfa_serial_number = module.params.get('mfa_serial_number')
|
||||
mfa_token = module.params.get('mfa_token')
|
||||
changed = False
|
||||
|
||||
args = {}
|
||||
if duration_seconds is not None:
|
||||
args['DurationSeconds'] = duration_seconds
|
||||
if mfa_serial_number is not None:
|
||||
args['SerialNumber'] = mfa_serial_number
|
||||
if mfa_token is not None:
|
||||
args['TokenCode'] = mfa_token
|
||||
|
||||
try:
|
||||
response = connection.get_session_token(**args)
|
||||
changed = True
|
||||
except ClientError as e:
|
||||
module.fail_json(msg=e)
|
||||
|
||||
credentials = normalize_credentials(response.get('Credentials', {}))
|
||||
module.exit_json(changed=changed, sts_creds=credentials)
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
duration_seconds = dict(required=False, default=None, type='int'),
|
||||
mfa_serial_number = dict(required=False, default=None),
|
||||
mfa_token = dict(required=False, default=None)
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='boto3 and botocore are required.')
|
||||
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
|
||||
if region:
|
||||
connection = boto3_conn(module, conn_type='client', resource='sts', region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
get_session_token(connection, module)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
Reference in New Issue
Block a user