Guide complet des meilleures pratiques Ansible pour l'automatisation infrastructure et déploiement applicatif
ansible-project/
├── ansible.cfg # Configuration Ansible
├── requirements.yml # Collections et rôles Galaxy
├── inventory/
│ ├── production/
│ │ ├── hosts.yml # Inventaire production
│ │ └── group_vars/
│ │ ├── all.yml
│ │ ├── webservers.yml
│ │ └── databases.yml
│ ├── staging/
│ │ ├── hosts.yml
│ │ └── group_vars/
│ └── development/
│ ├── hosts.yml
│ └── group_vars/
├── playbooks/
│ ├── site.yml # Playbook principal
│ ├── webservers.yml
│ ├── databases.yml
│ └── monitoring.yml
├── roles/
│ ├── common/
│ │ ├── tasks/
│ │ ├── handlers/
│ │ ├── templates/
│ │ ├── files/
│ │ ├── vars/
│ │ ├── defaults/
│ │ ├── meta/
│ │ └── README.md
│ ├── nginx/
│ ├── postgresql/
│ └── monitoring/
├── group_vars/
│ ├── all.yml # Variables globales
│ └── vault.yml # Secrets chiffrés
├── host_vars/
│ └── server01.yml
├── library/ # Modules personnalisés
├── filter_plugins/ # Filtres Jinja2 custom
├── callback_plugins/ # Plugins de callback
└── files/
└── scripts/
[defaults]
# Inventaire
inventory = ./inventory/production/hosts.yml
# Ou inventaire dynamique
# inventory = ./inventory/dynamic/
# Rôles
roles_path = ./roles:~/.ansible/roles:/usr/share/ansible/roles
# Comportement
host_key_checking = False
retry_files_enabled = False
gathering = smart
fact_caching = jsonfile
fact_caching_connection = /tmp/ansible_facts
fact_caching_timeout = 86400
# Performance
forks = 20
poll_interval = 10
timeout = 30
internal_poll_interval = 0.01
# Output
stdout_callback = yaml
# Ou pour plus de détails
# stdout_callback = debug
bin_ansible_callbacks = True
nocows = 1
# SSH
pipelining = True
ssh_args = -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
control_path = %(directory)s/%%h-%%r
# Logging
log_path = ./ansible.log
# Vault
vault_password_file = ~/.ansible/vault_pass.txt
# Ou
# vault_identity_list = dev@~/.ansible/vault_pass_dev.txt, prod@~/.ansible/vault_pass_prod.txt
[privilege_escalation]
become = True
become_method = sudo
become_user = root
become_ask_pass = False
[ssh_connection]
pipelining = True
ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o StrictHostKeyChecking=no
control_path = %(directory)s/%%h-%%r
[persistent_connection]
connect_timeout = 30
command_timeout = 30
inventory/production/hosts.yml
all:
vars:
ansible_user: deploy
ansible_python_interpreter: /usr/bin/python3
children:
webservers:
hosts:
web01:
ansible_host: 192.168.1.10
web02:
ansible_host: 192.168.1.11
vars:
nginx_workers: 4
app_port: 8080
databases:
hosts:
db01:
ansible_host: 192.168.1.20
postgres_version: 14
db02:
ansible_host: 192.168.1.21
postgres_version: 14
postgres_role: replica
vars:
postgres_max_connections: 200
loadbalancers:
hosts:
lb01:
ansible_host: 192.168.1.30
lb_vip: 192.168.1.100
lb02:
ansible_host: 192.168.1.31
lb_vip: 192.168.1.100
monitoring:
hosts:
mon01:
ansible_host: 192.168.1.40
vars:
grafana_port: 3000
prometheus_port: 9090
group_vars/all.yml
---
# Variables globales
environment: production
domain: example.com
# NTP
ntp_servers:
- 0.pool.ntp.org
- 1.pool.ntp.org
# DNS
dns_servers:
- 8.8.8.8
- 8.8.4.4
# Utilisateurs système
admin_users:
- name: admin
groups: sudo,docker
ssh_key: "{{ vault_admin_ssh_key }}"
# Monitoring
monitoring_enabled: true
log_aggregation: true
group_vars/webservers.yml
---
# Configuration Web
nginx_version: latest
nginx_user: www-data
nginx_worker_processes: auto
nginx_worker_connections: 4096
# SSL
ssl_protocols: "TLSv1.2 TLSv1.3"
ssl_ciphers: "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256"
# Application
app_name: myapp
app_version: "{{ lookup('env', 'APP_VERSION') | default('latest') }}"
app_port: 8080
app_max_memory: 2G
---
- name: Configuration complète de l'infrastructure
hosts: all
become: true
gather_facts: yes
pre_tasks:
- name: Vérifier la connectivité
ping:
tags: always
- name: Mettre à jour le cache APT
apt:
update_cache: yes
cache_valid_time: 3600
when: ansible_os_family == "Debian"
tags: always
roles:
- role: common
tags: common
- name: Configuration des serveurs web
hosts: webservers
become: true
roles:
- role: nginx
tags: nginx
- role: application
tags: app
- name: Configuration des bases de données
hosts: databases
become: true
serial: 1 # Un serveur à la fois
roles:
- role: postgresql
tags: database
- name: Configuration du monitoring
hosts: monitoring
become: true
roles:
- role: prometheus
tags: prometheus
- role: grafana
tags: grafana
roles/nginx/tasks/main.yml
---
# Main tasks for nginx role
- name: Include OS-specific variables
include_vars: "{{ ansible_os_family }}.yml"
tags: always
- name: Include installation tasks
include_tasks: install.yml
tags: install
- name: Include configuration tasks
include_tasks: configure.yml
tags: configure
- name: Include SSL tasks
include_tasks: ssl.yml
when: nginx_ssl_enabled
tags: ssl
- name: Include security hardening tasks
include_tasks: security.yml
tags: security
- name: Ensure nginx is started and enabled
service:
name: nginx
state: started
enabled: yes
tags: service
roles/nginx/tasks/install.yml
---
- name: Install nginx repository
apt_repository:
repo: ppa:nginx/stable
state: present
when: ansible_distribution == "Ubuntu"
- name: Install nginx
package:
name: nginx
state: present
- name: Install additional packages
package:
name: "{{ item }}"
state: present
loop:
- python3-certbot-nginx
- apache2-utils
roles/nginx/tasks/configure.yml
---
- name: Configure nginx.conf
template:
src: nginx.conf.j2
dest: /etc/nginx/nginx.conf
owner: root
group: root
mode: '0644'
validate: 'nginx -t -c %s'
notify: reload nginx
- name: Create sites-available directory
file:
path: /etc/nginx/sites-available
state: directory
mode: '0755'
- name: Create sites-enabled directory
file:
path: /etc/nginx/sites-enabled
state: directory
mode: '0755'
- name: Deploy site configurations
template:
src: site.conf.j2
dest: "/etc/nginx/sites-available/{{ item.name }}.conf"
mode: '0644'
loop: "{{ nginx_sites }}"
notify: reload nginx
- name: Enable sites
file:
src: "/etc/nginx/sites-available/{{ item.name }}.conf"
dest: "/etc/nginx/sites-enabled/{{ item.name }}.conf"
state: link
loop: "{{ nginx_sites }}"
when: item.enabled | default(true)
notify: reload nginx
- name: Remove default site
file:
path: /etc/nginx/sites-enabled/default
state: absent
notify: reload nginx
roles/nginx/handlers/main.yml
---
- name: reload nginx
service:
name: nginx
state: reloaded
- name: restart nginx
service:
name: nginx
state: restarted
- name: check nginx config
command: nginx -t
register: nginx_test
changed_when: false
failed_when: nginx_test.rc != 0
roles/nginx/templates/nginx.conf.j2
user {{ nginx_user }};
worker_processes {{ nginx_worker_processes }};
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;
events {
worker_connections {{ nginx_worker_connections }};
use epoll;
multi_accept on;
}
http {
# Basic Settings
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
server_tokens off;
# Buffer Settings
client_body_buffer_size 128k;
client_max_body_size {{ nginx_client_max_body_size | default('20M') }};
client_header_buffer_size 1k;
large_client_header_buffers 4 32k;
include /etc/nginx/mime.types;
default_type application/octet-stream;
# SSL Settings
ssl_protocols {{ ssl_protocols }};
ssl_ciphers {{ ssl_ciphers }};
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
# Logging
access_log /var/log/nginx/access.log combined;
error_log /var/log/nginx/error.log warn;
# Gzip Settings
gzip on;
gzip_vary on;
gzip_proxied any;
gzip_comp_level 6;
gzip_types text/plain text/css text/xml text/javascript application/json application/javascript application/xml+rss application/rss+xml;
# Virtual Host Configs
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
roles/nginx/defaults/main.yml
---
# Default variables for nginx role
nginx_user: www-data
nginx_worker_processes: auto
nginx_worker_connections: 4096
nginx_client_max_body_size: 20M
nginx_ssl_enabled: false
ssl_protocols: "TLSv1.2 TLSv1.3"
ssl_ciphers: "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256"
nginx_sites: []
# Exemple:
# nginx_sites:
# - name: myapp
# server_name: myapp.example.com
# root: /var/www/myapp
# port: 80
# ssl_enabled: false
# enabled: true
- name: Deploy application with retries
block:
- name: Download application
get_url:
url: "{{ app_download_url }}"
dest: "/tmp/{{ app_name }}.tar.gz"
register: download_result
retries: 3
delay: 5
until: download_result is succeeded
- name: Extract application
unarchive:
src: "/tmp/{{ app_name }}.tar.gz"
dest: "{{ app_install_dir }}"
remote_src: yes
rescue:
- name: Rollback on failure
command: /usr/local/bin/rollback.sh
- name: Notify admin
mail:
subject: "Deployment failed on {{ inventory_hostname }}"
body: "{{ ansible_failed_result }}"
to: admin@example.com
always:
- name: Cleanup temporary files
file:
path: "/tmp/{{ app_name }}.tar.gz"
state: absent
- name: Generate SSL certificate locally
local_action:
module: command
cmd: openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /tmp/{{ inventory_hostname }}.key -out /tmp/{{ inventory_hostname }}.crt
become: false
run_once: true
- name: Copy certificate to remote
copy:
src: "/tmp/{{ inventory_hostname }}.crt"
dest: /etc/ssl/certs/
mode: '0644'
- name: Update load balancer
uri:
url: "http://{{ loadbalancer_host }}/api/pools/{{ pool_name }}/members"
method: POST
body_format: json
body:
ip: "{{ ansible_default_ipv4.address }}"
port: 8080
delegate_to: localhost
- name: Create users with complex conditions
user:
name: "{{ item.name }}"
groups: "{{ item.groups | default([]) | join(',') }}"
shell: "{{ item.shell | default('/bin/bash') }}"
state: "{{ item.state | default('present') }}"
loop: "{{ users }}"
when:
- item.state | default('present') == 'present'
- item.name not in system_users
- environment in item.environments | default(['all'])
- name: Configure services dynamically
template:
src: "service.conf.j2"
dest: "/etc/{{ item.key }}/{{ item.key }}.conf"
loop: "{{ services | dict2items }}"
when: item.value.enabled
notify: "restart {{ item.key }}"
- name: Include tasks based on OS
include_tasks: "setup-{{ ansible_os_family }}.yml"
- name: Import role dynamically
include_role:
name: "{{ item }}"
loop:
- common
- security
- monitoring
when: item in enabled_roles
- name: Load variables based on environment
include_vars:
file: "{{ item }}"
with_first_found:
- "vars/{{ environment }}-{{ ansible_distribution }}.yml"
- "vars/{{ environment }}.yml"
- "vars/default.yml"
inventory/aws_ec2.yml
---
plugin: amazon.aws.aws_ec2
regions:
- us-east-1
- eu-west-1
filters:
instance-state-name: running
tag:Managed: Ansible
keyed_groups:
# Par tag Environment
- key: tags.Environment
prefix: env
- key: tags.Role
prefix: role
- key: tags.Application
prefix: app
# Par type d'instance
- key: instance_type
prefix: instance_type
# Par AZ
- key: placement.availability_zone
prefix: az
hostnames:
- tag:Name
- dns-name
- private-ip-address
compose:
ansible_host: public_ip_address
ansible_user: "'ec2-user' if 'amazon' in image.name else 'ubuntu'"
# Cache
cache: yes
cache_plugin: jsonfile
cache_connection: /tmp/aws_inventory
cache_timeout: 3600
Utilisation
# Lister l'inventaire
ansible-inventory -i inventory/aws_ec2.yml --list
ansible-inventory -i inventory/aws_ec2.yml --graph
# Exécuter un playbook
ansible-playbook -i inventory/aws_ec2.yml site.yml
# Limiter à un groupe
ansible-playbook -i inventory/aws_ec2.yml site.yml --limit env_production
# Limiter par tag
ansible-playbook -i inventory/aws_ec2.yml site.yml --limit 'tag_Environment_production'
inventory/gcp_compute.yml
---
plugin: google.cloud.gcp_compute
projects:
- my-gcp-project
filters:
- status = RUNNING
- labels.managed = ansible
keyed_groups:
- key: labels.environment
prefix: env
- key: labels.role
prefix: role
- key: zone
prefix: zone
hostnames:
- name
- public_ip
- private_ip
compose:
ansible_host: networkInterfaces[0].accessConfigs[0].natIP
cache: yes
cache_plugin: jsonfile
cache_connection: /tmp/gcp_inventory
cache_timeout: 3600
inventory/custom_inventory.py
#!/usr/bin/env python3
"""
Custom dynamic inventory script
"""
import json
import sys
import requests
def get_inventory():
"""Fetch inventory from API or database"""
# Exemple: récupérer depuis une API
response = requests.get('https://api.example.com/inventory')
data = response.json()
inventory = {
'_meta': {
'hostvars': {}
},
'all': {
'children': ['webservers', 'databases']
},
'webservers': {
'hosts': [],
'vars': {
'ansible_user': 'deploy'
}
},
'databases': {
'hosts': [],
'vars': {
'ansible_user': 'deploy'
}
}
}
# Construire l'inventaire
for server in data['servers']:
if server['role'] == 'web':
inventory['webservers']['hosts'].append(server['hostname'])
elif server['role'] == 'db':
inventory['databases']['hosts'].append(server['hostname'])
# Host vars
inventory['_meta']['hostvars'][server['hostname']] = {
'ansible_host': server['ip'],
'ansible_port': server.get('ssh_port', 22),
'datacenter': server['datacenter']
}
return inventory
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1] == '--list':
print(json.dumps(get_inventory(), indent=2))
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
# Pour --host, retourner les vars spécifiques
print(json.dumps({}))
else:
print("Usage: {} --list or {} --host <hostname>".format(sys.argv[0], sys.argv[0]))
sys.exit(1)
Rendre exécutable et utiliser
chmod +x inventory/custom_inventory.py
ansible-playbook -i inventory/custom_inventory.py site.yml
# Créer un nouveau fichier vault
ansible-vault create group_vars/vault.yml
# Éditer un fichier vault
ansible-vault edit group_vars/vault.yml
# Chiffrer un fichier existant
ansible-vault encrypt group_vars/secrets.yml
# Déchiffrer un fichier
ansible-vault decrypt group_vars/secrets.yml
# Voir le contenu sans déchiffrer
ansible-vault view group_vars/vault.yml
# Rechiffrer avec un nouveau mot de passe
ansible-vault rekey group_vars/vault.yml
# Chiffrer une string
ansible-vault encrypt_string 'my_secret_password' --name 'db_password'
# Chiffrer depuis stdin
echo -n 'my_secret' | ansible-vault encrypt_string --stdin-name 'api_key'
Configuration ansible.cfg
[defaults]
vault_identity_list = dev@~/.ansible/vault_pass_dev.txt, prod@~/.ansible/vault_pass_prod.txt
Utilisation
# Créer avec une identité spécifique
ansible-vault create --vault-id dev@prompt group_vars/dev/vault.yml
ansible-vault create --vault-id prod@prompt group_vars/prod/vault.yml
# Éditer
ansible-vault edit --vault-id dev@prompt group_vars/dev/vault.yml
# Exécuter playbook
ansible-playbook --vault-id dev@prompt site.yml
ansible-playbook --vault-id prod@prompt site.yml
group_vars/vault.yml
---
# Database credentials
vault_db_root_password: !vault |
$ANSIBLE_VAULT;1.1;AES256
66386439653931653933666134333431626233633963...
vault_db_app_password: !vault |
$ANSIBLE_VAULT;1.1;AES256
33383435366434646334313436353265383831...
# API Keys
vault_api_key: !vault |
$ANSIBLE_VAULT;1.1;AES256
62353465653165663735616564333532...
# SSL Certificates
vault_ssl_key: !vault |
$ANSIBLE_VAULT;1.1;AES256
34653031376239636263613265643863...
group_vars/all.yml (référencer les variables vault)
---
# Reference vault variables
db_root_password: "{{ vault_db_root_password }}"
db_app_password: "{{ vault_db_app_password }}"
api_key: "{{ vault_api_key }}"
ssl_key: "{{ vault_ssl_key }}"
---
# Dans n'importe quel fichier de variables
db_password: !vault |
$ANSIBLE_VAULT;1.1;AES256
66386439653931653933666134333431626233633963...
users:
- name: admin
password: !vault |
$ANSIBLE_VAULT;1.1;AES256
33383435366434646334313436353265383831...
vault_# Installation
pip install molecule molecule-docker ansible-lint
# Créer un nouveau rôle avec molecule
molecule init role my_role --driver-name docker
# Dans un rôle existant
cd roles/nginx
molecule init scenario --driver-name docker
roles/nginx/
└── molecule/
└── default/
├── molecule.yml # Configuration Molecule
├── converge.yml # Playbook de test
├── verify.yml # Tests de vérification
└── prepare.yml # Préparation (optionnel)
molecule/default/molecule.yml
---
dependency:
name: galaxy
options:
requirements-file: requirements.yml
driver:
name: docker
platforms:
- name: ubuntu-20
image: ubuntu:20.04
pre_build_image: true
command: /sbin/init
tmpfs:
- /run
- /tmp
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:ro
capabilities:
- SYS_ADMIN
privileged: true
- name: centos-8
image: centos:8
pre_build_image: true
command: /sbin/init
tmpfs:
- /run
- /tmp
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:ro
capabilities:
- SYS_ADMIN
privileged: true
provisioner:
name: ansible
config_options:
defaults:
callbacks_enabled: profile_tasks,timer
stdout_callback: yaml
inventory:
host_vars:
ubuntu-20:
ansible_python_interpreter: /usr/bin/python3
centos-8:
ansible_python_interpreter: /usr/bin/python3
lint: |
set -e
yamllint .
ansible-lint .
verifier:
name: ansible
scenario:
test_sequence:
- dependency
- lint
- cleanup
- destroy
- syntax
- create
- prepare
- converge
- idempotence
- side_effect
- verify
- cleanup
- destroy
molecule/default/converge.yml
---
- name: Converge
hosts: all
become: true
vars:
nginx_sites:
- name: test
server_name: test.local
port: 80
enabled: true
roles:
- role: nginx
molecule/default/verify.yml
---
- name: Verify
hosts: all
gather_facts: true
become: true
tasks:
- name: Check nginx is installed
package:
name: nginx
state: present
check_mode: yes
register: nginx_installed
failed_when: nginx_installed is changed
- name: Check nginx is running
service:
name: nginx
state: started
check_mode: yes
register: nginx_running
failed_when: nginx_running is changed
- name: Check nginx is listening on port 80
wait_for:
port: 80
timeout: 5
- name: Check nginx configuration is valid
command: nginx -t
changed_when: false
- name: Test HTTP response
uri:
url: http://localhost
return_content: yes
register: web_response
failed_when: web_response.status != 200
# Cycle complet de test
molecule test
# Créer les instances
molecule create
# Appliquer le playbook
molecule converge
# Tester l'idempotence
molecule idempotence
# Vérifier
molecule verify
# Se connecter à une instance
molecule login
molecule login -h ubuntu-20
# Détruire les instances
molecule destroy
# Linter
molecule lint
# Liste des instances
molecule list
molecule/default/tests/test_default.py
"""Test nginx installation and configuration"""
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']
).get_hosts('all')
def test_nginx_installed(host):
"""Test nginx package is installed"""
nginx = host.package("nginx")
assert nginx.is_installed
def test_nginx_running(host):
"""Test nginx service is running"""
nginx = host.service("nginx")
assert nginx.is_running
assert nginx.is_enabled
def test_nginx_listening(host):
"""Test nginx is listening on port 80"""
assert host.socket("tcp://0.0.0.0:80").is_listening
def test_nginx_config(host):
"""Test nginx configuration is valid"""
cmd = host.run("nginx -t")
assert cmd.rc == 0
def test_nginx_response(host):
"""Test HTTP response"""
cmd = host.run("curl -s -o /dev/null -w '%{http_code}' http://localhost")
assert cmd.stdout == "200"
def test_nginx_conf_file(host):
"""Test nginx.conf exists and has correct permissions"""
conf = host.file("/etc/nginx/nginx.conf")
assert conf.exists
assert conf.is_file
assert conf.user == "root"
assert conf.group == "root"
assert conf.mode == 0o644
# Rechercher des rôles
ansible-galaxy search nginx
ansible-galaxy search nginx --author geerlingguy
# Informations sur un rôle
ansible-galaxy info geerlingguy.nginx
# Installer un rôle
ansible-galaxy install geerlingguy.nginx
# Installer dans un path spécifique
ansible-galaxy install geerlingguy.nginx -p ./roles
# Installer depuis requirements
ansible-galaxy install -r requirements.yml
# Lister les rôles installés
ansible-galaxy list
# Supprimer un rôle
ansible-galaxy remove geerlingguy.nginx
requirements.yml
---
# Rôles depuis Galaxy
roles:
- name: geerlingguy.nginx
version: 3.1.0
- name: geerlingguy.postgresql
version: 3.3.0
- name: geerlingguy.docker
version: 4.1.0
# Depuis Git
- src: https://github.com/username/ansible-role-myapp.git
version: main
name: myapp
- src: git+https://github.com/username/another-role.git
version: v1.2.3
name: another_role
# Collections
collections:
- name: community.general
version: 5.0.0
- name: ansible.posix
version: 1.4.0
- name: community.docker
version: 3.0.0
- name: amazon.aws
version: 5.0.0
# Depuis Git
- name: company.internal
source: https://github.com/company/ansible-collection-internal.git
type: git
version: main
Installation
# Installer roles et collections
ansible-galaxy install -r requirements.yml
# Seulement les rôles
ansible-galaxy role install -r requirements.yml
# Seulement les collections
ansible-galaxy collection install -r requirements.yml
# Forcer la réinstallation
ansible-galaxy install -r requirements.yml --force
# Initialiser un rôle
ansible-galaxy init my_role
# Structure créée
my_role/
├── README.md
├── defaults/
│ └── main.yml
├── files/
├── handlers/
│ └── main.yml
├── meta/
│ └── main.yml
├── tasks/
│ └── main.yml
├── templates/
├── tests/
│ ├── inventory
│ └── test.yml
└── vars/
└── main.yml
# Éditer meta/main.yml
cat > my_role/meta/main.yml <<EOF
---
galaxy_info:
author: Your Name
description: Description of your role
company: Your Company
license: MIT
min_ansible_version: 2.9
platforms:
- name: Ubuntu
versions:
- focal
- jammy
galaxy_tags:
- web
- nginx
dependencies: []
EOF
# Publier sur Galaxy (nécessite un compte)
ansible-galaxy role import username role-name
# Créer une collection
ansible-galaxy collection init company.internal
# Structure
company/internal/
├── docs/
├── galaxy.yml
├── plugins/
│ ├── modules/
│ ├── inventory/
│ ├── filters/
│ └── lookup/
├── roles/
├── playbooks/
└── README.md
# Build
ansible-galaxy collection build
# Installer localement
ansible-galaxy collection install company-internal-1.0.0.tar.gz
# Publier
ansible-galaxy collection publish company-internal-1.0.0.tar.gz
.gitlab-ci.yml
---
stages:
- lint
- test
- deploy-staging
- deploy-production
variables:
ANSIBLE_FORCE_COLOR: "true"
ANSIBLE_HOST_KEY_CHECKING: "false"
# Linting
ansible-lint:
stage: lint
image: cytopia/ansible-lint:latest
script:
- ansible-lint playbooks/
- ansible-lint roles/
only:
- merge_requests
- main
yaml-lint:
stage: lint
image: cytopia/yamllint:latest
script:
- yamllint .
only:
- merge_requests
- main
# Tests avec Molecule
molecule-test:
stage: test
image: cytopia/ansible:latest-tools
services:
- docker:dind
variables:
DOCKER_HOST: tcp://docker:2375
DOCKER_TLS_CERTDIR: ""
before_script:
- pip install molecule molecule-docker
script:
- cd roles/nginx
- molecule test
only:
- merge_requests
- main
# Déploiement Staging
deploy-staging:
stage: deploy-staging
image: cytopia/ansible:latest
before_script:
- mkdir -p ~/.ssh
- echo "$SSH_PRIVATE_KEY" > ~/.ssh/id_rsa
- chmod 600 ~/.ssh/id_rsa
- echo "$ANSIBLE_VAULT_PASSWORD" > ~/.vault_pass.txt
script:
- ansible-playbook -i inventory/staging/hosts.yml playbooks/site.yml --vault-password-file ~/.vault_pass.txt
after_script:
- rm -f ~/.ssh/id_rsa ~/.vault_pass.txt
environment:
name: staging
url: https://staging.example.com
only:
- main
# Déploiement Production (manuel)
deploy-production:
stage: deploy-production
image: cytopia/ansible:latest
before_script:
- mkdir -p ~/.ssh
- echo "$SSH_PRIVATE_KEY_PROD" > ~/.ssh/id_rsa
- chmod 600 ~/.ssh/id_rsa
- echo "$ANSIBLE_VAULT_PASSWORD_PROD" > ~/.vault_pass.txt
script:
- ansible-playbook -i inventory/production/hosts.yml playbooks/site.yml --vault-password-file ~/.vault_pass.txt
after_script:
- rm -f ~/.ssh/id_rsa ~/.vault_pass.txt
environment:
name: production
url: https://www.example.com
when: manual
only:
- main
.github/workflows/ansible.yml
---
name: Ansible CI/CD
on:
push:
branches: [main]
pull_request:
branches: [main]
jobs:
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install dependencies
run: |
pip install ansible ansible-lint yamllint
- name: Run ansible-lint
run: ansible-lint playbooks/ roles/
- name: Run yamllint
run: yamllint .
molecule:
name: Molecule Test
runs-on: ubuntu-latest
strategy:
matrix:
role: [nginx, postgresql, common]
steps:
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install dependencies
run: |
pip install molecule molecule-docker ansible-lint
- name: Run Molecule
run: |
cd roles/${{ matrix.role }}
molecule test
deploy-staging:
name: Deploy to Staging
runs-on: ubuntu-latest
needs: [lint, molecule]
if: github.ref == 'refs/heads/main'
environment:
name: staging
url: https://staging.example.com
steps:
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install Ansible
run: pip install ansible
- name: Setup SSH
run: |
mkdir -p ~/.ssh
echo "${{ secrets.SSH_PRIVATE_KEY }}" > ~/.ssh/id_rsa
chmod 600 ~/.ssh/id_rsa
ssh-keyscan -H ${{ secrets.STAGING_HOST }} >> ~/.ssh/known_hosts
- name: Setup Vault
run: echo "${{ secrets.VAULT_PASSWORD }}" > ~/.vault_pass.txt
- name: Deploy
run: |
ansible-playbook -i inventory/staging/hosts.yml playbooks/site.yml \
--vault-password-file ~/.vault_pass.txt
deploy-production:
name: Deploy to Production
runs-on: ubuntu-latest
needs: [deploy-staging]
if: github.ref == 'refs/heads/main'
environment:
name: production
url: https://www.example.com
steps:
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install Ansible
run: pip install ansible
- name: Setup SSH
run: |
mkdir -p ~/.ssh
echo "${{ secrets.SSH_PRIVATE_KEY_PROD }}" > ~/.ssh/id_rsa
chmod 600 ~/.ssh/id_rsa
ssh-keyscan -H ${{ secrets.PRODUCTION_HOST }} >> ~/.ssh/known_hosts
- name: Setup Vault
run: echo "${{ secrets.VAULT_PASSWORD_PROD }}" > ~/.vault_pass.txt
- name: Deploy
run: |
ansible-playbook -i inventory/production/hosts.yml playbooks/site.yml \
--vault-password-file ~/.vault_pass.txt \
--diff
playbooks/deploy-webapp.yml
---
- name: Deploy Web Application
hosts: webservers
become: true
serial: 2 # Rolling deployment
max_fail_percentage: 50
vars:
app_version: "{{ lookup('env', 'APP_VERSION') | default('latest') }}"
health_check_url: "http://localhost:{{ app_port }}/health"
health_check_retries: 30
health_check_delay: 10
pre_tasks:
- name: Notify deployment start
uri:
url: "{{ slack_webhook_url }}"
method: POST
body_format: json
body:
text: "🚀 Deploying {{ app_name }} v{{ app_version }} to {{ inventory_hostname }}"
delegate_to: localhost
- name: Remove from load balancer
uri:
url: "{{ lb_api_url }}/pools/{{ lb_pool }}/members/{{ inventory_hostname }}"
method: DELETE
status_code: [200, 204, 404]
delegate_to: localhost
ignore_errors: yes
- name: Wait for connections to drain
wait_for:
timeout: 30
tasks:
- name: Download application package
get_url:
url: "{{ app_download_url }}/{{ app_name }}-{{ app_version }}.tar.gz"
dest: "/tmp/{{ app_name }}-{{ app_version }}.tar.gz"
checksum: "sha256:{{ app_checksum }}"
register: download
retries: 3
delay: 5
until: download is succeeded
- name: Stop application
systemd:
name: "{{ app_name }}"
state: stopped
- name: Backup current version
archive:
path: "{{ app_install_dir }}"
dest: "/backup/{{ app_name }}-{{ ansible_date_time.epoch }}.tar.gz"
ignore_errors: yes
- name: Extract new version
unarchive:
src: "/tmp/{{ app_name }}-{{ app_version }}.tar.gz"
dest: "{{ app_install_dir }}"
remote_src: yes
owner: "{{ app_user }}"
group: "{{ app_group }}"
- name: Update configuration
template:
src: app.conf.j2
dest: "{{ app_install_dir }}/config/app.conf"
owner: "{{ app_user }}"
group: "{{ app_group }}"
mode: '0640'
- name: Run database migrations
command: "{{ app_install_dir }}/bin/migrate"
become_user: "{{ app_user }}"
run_once: true
delegate_to: "{{ groups['webservers'][0] }}"
- name: Start application
systemd:
name: "{{ app_name }}"
state: started
daemon_reload: yes
- name: Wait for application to be ready
uri:
url: "{{ health_check_url }}"
status_code: 200
register: health_check
retries: "{{ health_check_retries }}"
delay: "{{ health_check_delay }}"
until: health_check is succeeded
post_tasks:
- name: Add back to load balancer
uri:
url: "{{ lb_api_url }}/pools/{{ lb_pool }}/members"
method: POST
body_format: json
body:
ip: "{{ ansible_default_ipv4.address }}"
port: "{{ app_port }}"
status_code: [200, 201]
delegate_to: localhost
- name: Cleanup old versions
find:
paths: /backup
patterns: "{{ app_name }}-*.tar.gz"
age: 30d
register: old_backups
- name: Remove old backups
file:
path: "{{ item.path }}"
state: absent
loop: "{{ old_backups.files }}"
rescue:
- name: Rollback on failure
block:
- name: Restore previous version
unarchive:
src: "{{ latest_backup.path }}"
dest: "{{ app_install_dir }}"
remote_src: yes
- name: Restart application
systemd:
name: "{{ app_name }}"
state: restarted
- name: Notify deployment failure
uri:
url: "{{ slack_webhook_url }}"
method: POST
body_format: json
body:
text: "❌ Deployment failed on {{ inventory_hostname }}. Rolled back."
delegate_to: localhost
- fail:
msg: "Deployment failed and rolled back"
always:
- name: Cleanup temp files
file:
path: "/tmp/{{ app_name }}-{{ app_version }}.tar.gz"
state: absent
playbooks/security-hardening.yml
---
- name: Linux Security Hardening
hosts: all
become: true
tasks:
# SSH Hardening
- name: Configure SSH security
lineinfile:
path: /etc/ssh/sshd_config
regexp: "{{ item.regexp }}"
line: "{{ item.line }}"
loop:
- { regexp: '^#?PermitRootLogin', line: 'PermitRootLogin no' }
- { regexp: '^#?PasswordAuthentication', line: 'PasswordAuthentication no' }
- { regexp: '^#?PubkeyAuthentication', line: 'PubkeyAuthentication yes' }
- { regexp: '^#?X11Forwarding', line: 'X11Forwarding no' }
- { regexp: '^#?MaxAuthTries', line: 'MaxAuthTries 3' }
- { regexp: '^#?ClientAliveInterval', line: 'ClientAliveInterval 300' }
- { regexp: '^#?ClientAliveCountMax', line: 'ClientAliveCountMax 2' }
- { regexp: '^#?Protocol', line: 'Protocol 2' }
notify: restart sshd
# Firewall
- name: Install UFW
package:
name: ufw
state: present
- name: Configure UFW defaults
ufw:
direction: "{{ item.direction }}"
policy: "{{ item.policy }}"
loop:
- { direction: 'incoming', policy: 'deny' }
- { direction: 'outgoing', policy: 'allow' }
- name: Allow SSH
ufw:
rule: limit
port: '22'
proto: tcp
- name: Enable UFW
ufw:
state: enabled
# Fail2ban
- name: Install fail2ban
package:
name: fail2ban
state: present
- name: Configure fail2ban
copy:
dest: /etc/fail2ban/jail.local
content: |
[DEFAULT]
bantime = 3600
findtime = 600
maxretry = 5
[sshd]
enabled = true
notify: restart fail2ban
# Automatic updates
- name: Install unattended-upgrades
package:
name: unattended-upgrades
state: present
when: ansible_os_family == "Debian"
- name: Configure automatic security updates
copy:
dest: /etc/apt/apt.conf.d/50unattended-upgrades
content: |
Unattended-Upgrade::Allowed-Origins {
"${distro_id}:${distro_codename}-security";
};
Unattended-Upgrade::AutoFixInterruptedDpkg "true";
Unattended-Upgrade::Remove-Unused-Dependencies "true";
when: ansible_os_family == "Debian"
# Disable root account
- name: Lock root account
user:
name: root
password_lock: yes
# Audit
- name: Install auditd
package:
name: auditd
state: present
- name: Start and enable auditd
service:
name: auditd
state: started
enabled: yes
handlers:
- name: restart sshd
service:
name: sshd
state: restarted
- name: restart fail2ban
service:
name: fail2ban
state: restarted
changed_when pour contrôler les changements--check modenginx_port)forks pour parallélisationpipelining SSHasync pour tâches longuesserial pour rolling deployments--syntax-checkDocumentation créée le 2026-02-02 | Ansible Best Practices & Production Playbooks