Issue #1: Give more CPU and memory, move settings into ConfigMaps.

This commit is contained in:
Jeff Geerling
2019-11-08 16:20:47 -06:00
parent 2c527990fa
commit 16738e116b
8 changed files with 328 additions and 39 deletions

View File

@@ -6,13 +6,31 @@ An [Ansible Tower](https://www.ansible.com/products/tower) operator for Kubernet
## Testing
This Operator includes a molecule-based test framework, which can be executed standalone in Docker (e.g. in CI or in a single Docker container anywhere), or inside any kind of Kubernetes cluster (e.g. Minikube).
This Operator includes a [Molecule](https://molecule.readthedocs.io/en/stable/)-based test environment, which can be executed standalone in Docker (e.g. in CI or in a single Docker container anywhere), or inside any kind of Kubernetes cluster (e.g. Minikube).
You need to make sure you have Molecule installed before running the following commands. You can install Molecule with:
pip install 'molecule[docker]
Running `molecule test` sets up a clean environment, builds the operator, runs all configured tests on an example operator instance, then tears down the environment (at least in the case of Docker).
If you want to actively develop the operator, use `molecule converge`, which does everything but tear down the environment at the end.
### Testing in Docker (standalone)
1. `molecule converge -s test-local`
molecule test -s test-local
This environment is meant for headless testing (e.g. in a CI environment, or when making smaller changes which don't need to be verified through a web interface). It is difficult to test things like Tower's web UI or to connect other applications on your local machine to the services running inside the cluster, since it is inside a Docker container with no static IP address.
### Testing in Minikube
1. `minikube start`
1. `molecule converge -s test-minikube`
minikube start --memory 6g --cpus 2
minikube addons enable ingress
molecule test -s test-minikube
[Minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/) is a more full-featured test environment running inside a full VM on your computer, with an assigned IP address. This makes it easier to test things like NodePort services and Ingress from outside the Kubernetes cluster (e.g. in a browser on your computer).
Once the operator is deployed, you can visit the Tower UI in your browser by following these steps:
1. Make sure you have an entry like `IP_ADDRESS example-tower.test` in your `/etc/hosts` file. (Get the IP address with `minikube ip`.)
2. Visit `http://example-tower.test/` in your browser.

View File

@@ -4,7 +4,10 @@ metadata:
name: example-tower
namespace: example-tower
spec:
tower_hostname: example-tower.test
tower_secret_key: aabbcc
tower_admin_user: test
tower_admin_password: changeme
tower_task_image: ansible/awx_task:9.0.1
tower_web_image: ansible/awx_web:9.0.1
tower_memcached_image: memcached:alpine

View File

@@ -25,6 +25,7 @@ rules:
- daemonsets
- replicasets
- statefulsets
- ingresses
verbs:
- '*'
- apiGroups:

View File

@@ -1,4 +1,7 @@
---
# TODO: For some reason prepare is not run after a destroy in the Minikube env.
- import_playbook: ../default/prepare.yml
- name: Build Operator in Minikube
hosts: localhost
connection: local

View File

@@ -1,5 +1,8 @@
---
tower_hostname: example-tower.test
tower_secret_key: aabbcc
tower_admin_user: test
tower_admin_password: changeme
tower_task_image: ansible/awx_task:9.0.1
tower_web_image: ansible/awx_web:9.0.1
tower_memcached_image: memcached:alpine

View File

@@ -6,5 +6,6 @@
- tower_memcached.yaml.j2
- tower_postgres.yaml.j2
- tower_rabbitmq.yaml.j2
- tower_config.yaml.j2
- tower.yaml.j2
- tower_task.yaml.j2

View File

@@ -4,33 +4,10 @@ apiVersion: v1
kind: Secret
metadata:
name: '{{ meta.name }}-tower-secret'
namespace: {{ meta.namespace }}
namespace: '{{ meta.namespace }}'
data:
SECRET_KEY: {{ tower_secret_key | b64encode }}
# Tower Web ConfigMap.
---
apiVersion: v1
kind: ConfigMap
metadata:
name: '{{ meta.name }}-tower-configmap'
namespace: {{ meta.namespace }}
labels:
app: tower
data:
# SECRET_KEY: {{ tower_secret_key }}
DATABASE_USER: awx
DATABASE_PASSWORD: awxpass
DATABASE_NAME: awx
DATABASE_PORT: '5432'
DATABASE_HOST: {{ meta.name }}-postgres.{{ meta.namespace }}.svc.cluster.local
RABBITMQ_USER: guest
RABBITMQ_PASSWORD: guest
RABBITMQ_HOST: {{ meta.name }}-rabbitmq.{{ meta.namespace }}.svc.cluster.local
RABBITMQ_PORT: '5672'
RABBITMQ_VHOST: awx
MEMCACHED_HOST: {{ meta.name }}-memcached.{{ meta.namespace }}.svc.cluster.local
MEMCACHED_PORT: '11211'
secret_key: '{{ tower_secret_key | b64encode }}'
admin_password: '{{ tower_admin_password | b64encode }}'
# Tower Web Deployment.
---
@@ -38,7 +15,7 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: '{{ meta.name }}-tower'
namespace: {{ meta.namespace }}
namespace: '{{ meta.namespace }}'
labels:
app: tower
spec:
@@ -54,20 +31,57 @@ spec:
containers:
- image: '{{ tower_web_image }}'
name: tower
envFrom:
- configMapRef:
name: '{{ meta.name }}-tower-configmap'
- secretRef:
name: '{{ meta.name }}-tower-secret'
ports:
- containerPort: 80
- containerPort: 8052
volumeMounts:
- name: secret-key
mountPath: /etc/tower/SECRET_KEY
subPath: SECRET_KEY
readOnly: true
- name: environment
mountPath: /etc/tower/conf.d/environment.sh
subPath: environment.sh
readOnly: true
- name: settings
mountPath: /etc/tower/settings.py
subPath: settings.py
readOnly: true
- name: nginx-conf
mountPath: /etc/nginx/nginx.conf
subPath: nginx.conf
readOnly: true
volumes:
- name: secret-key
secret:
secretName: '{{ meta.name }}-tower-secret'
items:
- key: secret_key
path: SECRET_KEY
- name: environment
configMap:
name: '{{ meta.name }}-tower-configmap'
items:
- key: environment
path: environment.sh
- name: settings
configMap:
name: '{{ meta.name }}-tower-configmap'
items:
- key: settings
path: settings.py
- name: nginx-conf
configMap:
name: '{{ meta.name }}-tower-configmap'
items:
- key: nginx_conf
path: nginx.conf
# Tower Web Service.
---
apiVersion: v1
kind: Service
metadata:
name: '{{ meta.name }}-tower'
name: '{{ meta.name }}-service'
namespace: '{{ meta.namespace }}'
labels:
app: tower
@@ -75,6 +89,23 @@ spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
targetPort: 8052
selector:
app: tower
# Tower Web Ingress.
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: '{{ meta.name }}-ingress'
namespace: '{{ meta.namespace }}'
spec:
rules:
- host: '{{ tower_hostname }}'
http:
paths:
- path: /
backend:
serviceName: '{{ meta.name }}-service'
servicePort: 80

View File

@@ -0,0 +1,229 @@
# Tower Web ConfigMap.
---
apiVersion: v1
kind: ConfigMap
metadata:
name: '{{ meta.name }}-tower-configmap'
namespace: '{{ meta.namespace }}'
labels:
app: tower
data:
environment: |
DATABASE_USER=awx
DATABASE_NAME=awx
DATABASE_HOST='{{ meta.name }}-postgres.{{ meta.namespace }}.svc.cluster.local'
DATABASE_PORT='5432'
DATABASE_PASSWORD={{ tower_postgres_pass | quote }}
MEMCACHED_HOST='{{ meta.name }}-memcached.{{ meta.namespace }}.svc.cluster.local'
MEMCACHED_PORT='11211'
RABBITMQ_HOST='{{ meta.name }}-rabbitmq.{{ meta.namespace }}.svc.cluster.local'
RABBITMQ_PORT='5672'
AWX_ADMIN_USER={{ tower_admin_user }}
AWX_ADMIN_PASSWORD={{ tower_admin_password | quote }}
settings: |
import os
import socket
def get_secret():
if os.path.exists("/etc/tower/SECRET_KEY"):
return open('/etc/tower/SECRET_KEY', 'rb').read().strip()
ADMINS = ()
STATIC_ROOT = '/var/lib/awx/public/static'
PROJECTS_ROOT = '/var/lib/awx/projects'
JOBOUTPUT_ROOT = '/var/lib/awx/job_status'
SECRET_KEY = get_secret()
ALLOWED_HOSTS = ['*']
INTERNAL_API_URL = 'http://127.0.0.1:8052'
# Container environments don't like chroots
AWX_PROOT_ENABLED = False
# Automatically deprovision pods that go offline
AWX_AUTO_DEPROVISION_INSTANCES = True
CLUSTER_HOST_ID = socket.gethostname()
SYSTEM_UUID = '00000000-0000-0000-0000-000000000000'
CSRF_COOKIE_SECURE = False
SESSION_COOKIE_SECURE = False
SERVER_EMAIL = 'root@localhost'
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
EMAIL_SUBJECT_PREFIX = '[AWX] '
EMAIL_HOST = 'localhost'
EMAIL_PORT = 25
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
LOGGING['handlers']['console'] = {
'()': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'simple',
}
LOGGING['loggers']['django.request']['handlers'] = ['console']
LOGGING['loggers']['rest_framework.request']['handlers'] = ['console']
LOGGING['loggers']['awx']['handlers'] = ['console', 'external_logger']
LOGGING['loggers']['awx.main.commands.run_callback_receiver']['handlers'] = ['console']
LOGGING['loggers']['awx.main.tasks']['handlers'] = ['console', 'external_logger']
LOGGING['loggers']['awx.main.scheduler']['handlers'] = ['console', 'external_logger']
LOGGING['loggers']['django_auth_ldap']['handlers'] = ['console']
LOGGING['loggers']['social']['handlers'] = ['console']
LOGGING['loggers']['system_tracking_migrations']['handlers'] = ['console']
LOGGING['loggers']['rbac_migrations']['handlers'] = ['console']
LOGGING['loggers']['awx.isolated.manager.playbooks']['handlers'] = ['console']
LOGGING['handlers']['callback_receiver'] = {'class': 'logging.NullHandler'}
LOGGING['handlers']['task_system'] = {'class': 'logging.NullHandler'}
LOGGING['handlers']['tower_warnings'] = {'class': 'logging.NullHandler'}
LOGGING['handlers']['rbac_migrations'] = {'class': 'logging.NullHandler'}
LOGGING['handlers']['system_tracking_migrations'] = {'class': 'logging.NullHandler'}
LOGGING['handlers']['management_playbooks'] = {'class': 'logging.NullHandler'}
DATABASES = {
'default': {
'ATOMIC_REQUESTS': True,
'ENGINE': 'awx.main.db.profiled_pg',
'NAME': 'awx',
'USER': 'awx',
'PASSWORD': '{{ tower_postgres_pass | quote }}',
'HOST': '{{ meta.name }}-postgres.{{ meta.namespace }}.svc.cluster.local',
'PORT': '5432',
}
}
if os.getenv("DATABASE_SSLMODE", False):
DATABASES['default']['OPTIONS'] = {'sslmode': os.getenv("DATABASE_SSLMODE")}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '{}:{}'.format("{{ meta.name }}-memcached.{{ meta.namespace }}.svc.cluster.local", "11211")
},
'ephemeral': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
}
BROKER_URL = 'amqp://{}:{}@{}:{}/{}'.format(
'guest',
'guest',
'{{ meta.name }}-rabbitmq.{{ meta.namespace }}.svc.cluster.local',
'5672',
'awx')
CHANNEL_LAYERS = {
'default': {'BACKEND': 'asgi_amqp.AMQPChannelLayer',
'ROUTING': 'awx.main.routing.channel_routing',
'CONFIG': {'url': BROKER_URL}}
}
USE_X_FORWARDED_PORT = True
nginx_conf: |
worker_processes 1;
pid /tmp/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
server_tokens off;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /dev/stdout main;
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
sendfile on;
#tcp_nopush on;
#gzip on;
upstream uwsgi {
server 127.0.0.1:8050;
}
upstream daphne {
server 127.0.0.1:8051;
}
server {
listen 8052 default_server;
# If you have a domain name, this is where to add it
server_name _;
keepalive_timeout 65;
# HSTS (ngx_http_headers_module is required) (15768000 seconds = 6 months)
add_header Strict-Transport-Security max-age=15768000;
add_header Content-Security-Policy "default-src 'self'; connect-src 'self' ws: wss:; style-src 'self' 'unsafe-inline'; script-src 'self' 'unsafe-inline' *.pendo.io; img-src 'self' *.pendo.io data:; report-uri /csp-violation/";
add_header X-Content-Security-Policy "default-src 'self'; connect-src 'self' ws: wss:; style-src 'self' 'unsafe-inline'; script-src 'self' 'unsafe-inline' *.pendo.io; img-src 'self' *.pendo.io data:; report-uri /csp-violation/";
# Protect against click-jacking https://www.owasp.org/index.php/Testing_for_Clickjacking_(OTG-CLIENT-009)
add_header X-Frame-Options "DENY";
location /nginx_status {
stub_status on;
access_log off;
allow 127.0.0.1;
deny all;
}
location /static/ {
alias /var/lib/awx/public/static/;
}
location /favicon.ico {
alias /var/lib/awx/public/static/favicon.ico;
}
location /websocket {
# Pass request to the upstream alias
proxy_pass http://daphne;
# Require http version 1.1 to allow for upgrade requests
proxy_http_version 1.1;
# We want proxy_buffering off for proxying to websockets.
proxy_buffering off;
# http://en.wikipedia.org/wiki/X-Forwarded-For
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# enable this if you use HTTPS:
proxy_set_header X-Forwarded-Proto https;
# pass the Host: header from the client for the sake of redirects
proxy_set_header Host $http_host;
# We've set the Host header, so we don't need Nginx to muddle
# about with redirects
proxy_redirect off;
# Depending on the request value, set the Upgrade and
# connection headers
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
}
location / {
# Add trailing / if missing
rewrite ^(.*)$http_host(.*[^/])$ $1$http_host$2/ permanent;
uwsgi_read_timeout 120s;
uwsgi_pass uwsgi;
include /etc/nginx/uwsgi_params;
{%- if extra_nginx_include is defined %}
include {{ extra_nginx_include }};
{%- endif %}
proxy_set_header X-Forwarded-Port 443;
}
}
}