Merge branch 'master' into framadate

certbot_on_virtu
Michaël Paulon 2020-06-02 23:12:34 +02:00
commit a4e09e92f9
91 changed files with 3192 additions and 351 deletions

View File

@ -2,6 +2,10 @@
[defaults]
# Explicitely redefined some defaults to make play execution work
roles_path = ./roles
vars_plugins = ./vars_plugins
# Do not create .retry files
retry_files_enabled = False
@ -21,6 +25,12 @@ forks = 15
# Some SSH connection will take time
timeout = 60
# Enable fact_caching
gathering = smart
fact_caching = jsonfile
fact_caching_connection = ~/.cache/ansible/json/
fact_caching_timeout = 86400
[privilege_escalation]
# Use sudo to get priviledge access
@ -45,3 +55,18 @@ api_hostname = intranet.crans.org
# Whether or not using vault_cranspasswords
use_cpasswords = True
# Specify cache plugin for re2o API. By default, cache nothing
cache = jsonfile
# Only used for memcached plugin
# List of connection information for the memcached DBs
# Default is ['127.0.0.1:11211']
# memcached_connection = ['127.0.0.1:11211']
# Time in second before the cache expired. 0 means never expire cache.
# Default is 24 hours.
timeout = 86400
# Default is 12 hours.
timeout_token = 43200

View File

@ -35,9 +35,6 @@
# Scripts will tell users to go there to manage their account
intranet_url: 'https://intranet.crans.org/'
# Backup password
backuppc_rsyncd_passwd: "{{ vault_backuppc_rsyncd_passwd }}"
# Will be in /usr/scripts/
crans_scripts_git: "http://gitlab.adm.crans.org/nounous/scripts.git"
@ -51,46 +48,13 @@
- ldap-client
- openssh
- sudo
- rsync-client
- ntp-client
- crans-scripts
# Deploy NFS only on campus
- hosts: crans_server
roles:
- nfs-common
# Deploy LDAP replica
- hosts: odlyd.adm.crans.org,soyouz.adm.crans.org,fy.adm.crans.org,thot.adm.crans.org
roles: [] # TODO
# Playbook to deploy autofs NFS
- hosts: crans_server,!odlyd.adm.crans.org,!zamok.adm.crans.org,!omnomnom.adm.crans.org,!owl.adm.crans.org,!owncloud-srv.adm.crans.org
roles:
- nfs-autofs
# Deploy home permanent
- hosts: zamok.adm.crans.org,omnomnom.adm.crans.org,owl.adm.crans.org,owncloud-srv.adm.crans.org
roles:
- home-permanent
# Redirect local mail to mailserver
- hosts: crans_server,test_vm,!redisdead.adm.crans.org
vars:
mail_root: root@crans.org
mail_snmp_server: smtp.adm.crans.org
mail_defaulthost: crans.org
roles:
- nullmailer
# Send logs to thot
- hosts: server,!thot.adm.crans.org
vars:
rsyslog:
server: thot.adm.crans.org
roles:
- rsyslog-client
- hosts: otis.adm.crans.org
roles:
- ansible
@ -99,3 +63,17 @@
- hosts: zamok.adm.crans.org
roles:
- zamok-tools
- import_playbook: plays/mail.yml
- import_playbook: plays/nfs.yml
- import_playbook: plays/logs.yml
- import_playbook: plays/backup.yml
- import_playbook: plays/network-interfaces.yml
- import_playbook: plays/monitoring.yml
# Services that only apply to a subset of server
- import_playbook: plays/tv.yml
- import_playbook: plays/mailman.yml
- import_playbook: plays/dhcp.yml
- import_playbook: plays/dns.yml
- import_playbook: plays/wireguard.yml

View File

@ -9,6 +9,7 @@
apt:
state: absent
name:
- at
- arpwatch # old sniffing
- collectd
- collectd-utils # old monitoring
@ -28,6 +29,7 @@
- monitoring-plugins-standard
- monitoring-plugins-basic
- monitoring-plugins-common
- monit
- libmonitoring-plugin-perl
- snmp
- nagios-plugins-contrib
@ -64,6 +66,9 @@
path: "{{ item }}"
state: absent
loop:
- /etc/bcfg2.conf
- /etc/bcfg2.conf.ucf-dist
- /etc/crans
- /etc/cron.d/munin-crans
- /etc/cron.d/munin-node
- /etc/cron.d/munin-node.dpkg-dist
@ -76,15 +81,31 @@
- /etc/cron.d/autobcfg2
- /etc/cron.d/bcfg2-run
- /etc/cron.d/pull-repos-scripts
- /etc/default/bcfg2
- /etc/default/bcfg2.ucf-dist
- /etc/munin
- /etc/icinga2
- /etc/init.d/bcfg2
- /etc/nagios
- /etc/nagios-plugins
- /etc/nut
- /etc/nginx/sites-enabled/status
- /etc/nginx/sites-available/status
- /etc/pnp4nagios
- /var/local/aptdater
- /etc/apt-dater-host.conf
- /etc/sudoers.d/apt-dater-host
- /etc/apt/apt.conf.d/70debconf
- /etc/apt/apt.conf.d/01aptitude
- /etc/cron.weekly/git_dirty_repo
- /etc/cron.daily/git_dirty_repo
- /etc/cron.hourly/bcfg2
- /etc/cron.d/letsencrypt_check_cert
- /etc/nss-ldapd.conf
- /etc/cron.daily/bcfg2
- /etc/monit
- /etc/ldap/ldap.conf
- /etc/letsencrypt/conf.d/localhost.ini
# - name: Upgrade
# apt:
# upgrade: dist

View File

@ -1,4 +1,5 @@
---
# Custom header
dirty: "{{lookup('pipe', 'git diff --quiet || echo dirty')}}"
ansible_header: |
+++++++++++++++++++++++++++++++++++++++++++++++++++
@ -11,3 +12,6 @@ ansible_header: |
{% endif %}
+++++++++++++++++++++++++++++++++++++++++++++++++++
# Crans subnets
adm_subnet: 10.231.136.0/24

View File

@ -0,0 +1,8 @@
---
postfix:
primary: false
secondary: true
public: true
dkim: true
mailman: false
titanic: true

View File

@ -0,0 +1,8 @@
---
postfix:
primary: true
secondary: false
public: true
dkim: true
mailman: true
titanic: false

View File

@ -0,0 +1,8 @@
---
postfix:
primary: false
secondary: true
public: true
dkim: true
mailman: false
titanic: false

View File

@ -0,0 +1,8 @@
---
postfix:
primary: false
secondary: true
public: true
dkim: true
mailman: false
titanic: true

BIN
logos/crans.png 100644

Binary file not shown.

After

Width:  |  Height:  |  Size: 10 KiB

View File

@ -7,6 +7,8 @@ For a detailed example look at https://github.com/ansible/ansible/blob/3dbf89e8a
The API Client has been adapted from https://gitlab.federez.net/re2o/re2oapi
"""
from ansible.plugins.loader import cache_loader
from pathlib import Path
import datetime
import requests
@ -28,38 +30,67 @@ from ansible.config.manager import ConfigManager
# Ansible Logger to stdout
display = Display()
# Number of seconds before expiration where renewing the token is done
TIME_FOR_RENEW = 120
# Default name of the file to store tokens. Path $HOME/{DEFAUlt_TOKEN_FILENAME}
DEFAULT_TOKEN_FILENAME = '.re2o.token'
# If no plugin is used, then use this as token timeout.
# Overriden by key timeout_token from ansible configuration.
TIME_FOR_RENEW = 43200 # 12 jours
class Client:
"""
Class based client to contact re2o API.
"""
def __init__(self, hostname, username, password, use_tls=True):
def __init__(self, hostname, username, password,
use_tls=True, cachetoken=None):
"""
:arg hostname: The hostname of the Re2o instance to use.
:arg username: The username to use.
:arg password: The password to use.
:arg use_tls: A boolean to specify whether the client should use a
a TLS connection. Default is True. Please, keep it.
:arg cachetoken: The cache to use to manage authentication token.
If it is None, then store the token in a file.
"""
self.use_tls = use_tls
self.hostname = hostname
self._username = username
self._password = password
self.token_file = Path.home() / DEFAULT_TOKEN_FILENAME
self._cachetoken = cachetoken
self.token_file = None
if self._cachetoken is None:
self.token_file = Path.home() / DEFAULT_TOKEN_FILENAME
display.vvv("Setting token file to {}".format(self.token_file))
else:
try:
display.vvv("Using {} as cache plugin"
.format(self._cachetoken.plugin_name))
except AttributeError:
# Happens when plugin_name is not implemented...
# For example with memcached
display.vvv("Using cache plugin specified in configuration.")
display.v("Connecting to {hostname} as user {user}".format(
hostname=to_native(self.hostname), user=to_native(self._username)))
try:
self.token = self._get_token_from_file()
except AnsibleFileNotFound:
display.vv("Force renew the token")
self._force_renew_token()
@property
def token(self):
if self._cachetoken:
display.vvv("Trying to get token from cache.")
if self._cachetoken.contains("auth_token"):
display.vvv("Found token in cache.")
return self._cachetoken.get("auth_token")
else:
display.vvv("Token not found. Forcing renew.")
return self._force_renew_token()
else:
try:
token = self._get_token_from_file()
if token['expiration'] < datetime.datetime.now() + \
datetime.timedelta(seconds=TIME_FOR_RENEW):
return self._force_renew_token()
except AnsibleError:
return self._force_renew_token()
def _get_token_from_file(self):
display.vv("Trying to fetch token from {}".format(self.token_file))
@ -93,13 +124,18 @@ class Client:
)
)
else:
display.vv("""Token successfully retreived from
file {token}""".format(token=self.token_file))
display.vv("Token successfully retreived from "
"file {token}".format(token=self.token_file))
return ret
def _force_renew_token(self):
self.token = self._get_token_from_server()
self._save_token_to_file()
token = self._get_token_from_server()
if self._cachetoken:
display.vvv("Storing authentication token in cache")
self._cachetoken.set("auth_token", token.get('token'))
else:
self._save_token_to_file(token)
return token.get('token')
def _get_token_from_server(self):
display.vv("Requesting a new token for {user}@{host}".format(
@ -139,7 +175,7 @@ class Client:
def _parse_date(self, date, date_format="%Y-%m-%dT%H:%M:%S"):
return datetime.datetime.strptime(date.split('.')[0], date_format)
def _save_token_to_file(self):
def _save_token_to_file(self, token):
display.vv("Saving token to file {}".format(self.token_file))
try:
# Read previous data to avoid erasures
@ -153,8 +189,8 @@ class Client:
if self.hostname not in data.keys():
data[self.hostname] = {}
data[self.hostname][self._username] = {
'token': self.token['token'],
'expiration': self.token['expiration'].isoformat(),
'token': token['token'],
'expiration': token['expiration'].isoformat(),
}
try:
@ -169,22 +205,6 @@ class Client:
display.vv("Token successfully written to file {}"
.format(self.token_file))
def get_token(self):
"""
Retrieves the token to use for the current connection.
Automatically renewed if needed.
"""
if self.need_renew_token:
self._force_renew_token()
return self.token['token']
@property
def need_renew_token(self):
return self.token['expiration'] < \
datetime.datetime.now() + \
datetime.timedelta(seconds=TIME_FOR_RENEW)
def _request(self, method, url, headers={}, params={}, *args, **kwargs):
display.vv("Building the {method} request to {url}.".format(
method=method.upper(),
@ -192,9 +212,9 @@ class Client:
))
# Force the 'Authorization' field with the right token.
display.vvv("Forcing authentication token.")
display.vvv("Forcing authentication token in headers.")
headers.update({
'Authorization': 'Token {}'.format(self.get_token())
'Authorization': 'Token {}'.format(self.token)
})
# Use a json format unless the user already specified something
@ -213,10 +233,10 @@ class Client:
# Force re-login to the server (case of a wrong token but valid
# credentials) and then retry the request without catching errors.
display.vv("Token refused. Trying to refresh the token.")
self._force_renew_token()
token = self._force_renew_token()
headers.update({
'Authorization': 'Token {}'.format(self.get_token())
'Authorization': 'Token {}'.format(token)
})
display.vv("Re-performing the request {method} {url}".format(
method=method.upper(),
@ -320,6 +340,18 @@ class LookupModule(LookupBase):
Queries the re2o API and returns the list of
all machines whose role_type is role_name.
- cidrs, a list of subnet_names: Will get back the list of all cidrs
corresponding to this particular
subnet.
- prefixv6, a list of subnet_names: Will get back the list of all ipv6
prefixes corresponding to this
particular subnet.
- A simple endpoint: Will make a raw query to the API using this
this endpoint.
If a term is not in the previous list, make a raw query to the API
with endpoint term.
@ -338,8 +370,135 @@ class LookupModule(LookupBase):
dnszones: "{{ lookup('re2oapi', 'dnszones') }}"
tasks:
- debug: var=dnszones
The following play will use the debug module to output
all the ipv6 corresponding to adherents and adm networks
- hosts: sputnik.adm.crans.org
vars:
prefixv6: "{{ lookup('re2oapi', 'previxv6', 'adherents', 'adm') }}"
tasks:
- debug:
msg: "{{ prefixv6 | ipwrap }}"
The following will get the ip addresses of all servers with role
dns-authorithary-master on vlan 2.
- hosts: sputnik.adm.crans.org
vars:
bind:
masters: "{{ lookup('re2oapi', 'get_role', 'dns-authoritary-master')[0] }}"
tasks:
- name: Display ipv6
debug:
ipv6: "{{ masters | json_query('servers[].interface[?vlan_id==`2`].ipv6[][].ipv6') }}"
- name: Display ipv4
debug:
ipv4: "{{ masters | json_query('servers[].interface[?vlan_id==`2`].ipv4[]') }}"
"""
def _readconfig(self, section="re2o", key=None, default=None,
boolean=False, integer=False):
config = self._config
if not config:
return default
else:
if config.has_option(section, key):
display.vvv("Found key {} in configuration file".format(key))
if boolean:
return config.getboolean(section, key)
elif integer:
return config.getint(section, key)
else:
return config.get(section, key)
else:
return default
def _manage_cachedir(self, cachedir=None, plugin=None):
try:
self._uri = cachedir / plugin
except Exception:
raise AnsibleError("Undefined specification for cache plugin")
display.vvv("Cache directory is {}".format(self._uri))
if not self._uri.exists():
# Creates Ansible cache directory with right permissions
# if it doesn't exist yet.
display.vvv("Cache directory doesn't exist. Creating it.")
try:
self._uri.mkdir(mode=0o700, parents=True)
except Exception as e:
raise AnsibleError("""Unable to create {dir}.
Original error was : {err}""".format(dir=self._uri,
err=to_native(e)))
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
config_manager = ConfigManager()
config_file = config_manager.data.get_setting(name="CONFIG_FILE").value
self._config = ConfigParser()
self._config.read(config_file)
display.vvv("Using {} as configuration file.".format(config_file))
self._api_hostname = None
self._api_username = None
self._api_password = None
self._use_cpasswords = None
self._cache_plugin = None
self._cache = None
self._timeout = 86400 # 1 day
self._cachetoken = None
self._timeouttoken = TIME_FOR_RENEW # 12 hours
if self._config.has_section("re2o"):
display.vvv("Found section re2o in configuration file")
self._api_hostname = self._readconfig(key="api_hostname")
self._use_cpasswords = self._readconfig(key="use_cpasswords",
boolean=True)
self._cache_plugin = self._readconfig(key="cache")
self._timeout = self._readconfig(key="timeout", integer=True,
default=86400)
self._timeouttoken = self._readconfig(key="timeout_token",
integer=True,
default=TIME_FOR_RENEW)
if self._cache_plugin is not None:
display.vvv("Using {} as cache plugin".format(self._cache_plugin))
cachedir = Path.home() / ".cache/ansible/re2oapi"
if self._cache_plugin == 'jsonfile':
self._manage_cachedir(cachedir=cachedir, plugin='json')
elif self._cache_plugin == 'yaml':
self._manage_cachedir(cachedir=cachedir, plugin='yaml')
elif self._cache_plugin == 'pickle':
self._manage_cachedir(cachedir=cachedir, plugin='pickle')
elif self._cache_plugin == 'memcached':
# requires packages python3-memcache and memcached
display.vvvv("Please make sure you have installed packages"
"python3-memcache and memcached"
)
self._uri = self._readconfig(key='memcached_connection',
default=['127.0.0.1:11211'],
)
else:
raise AnsibleError("Cache plugin {} not supported"
.format(self._cache_plugin))
self._cache = cache_loader.get(self._cache_plugin,
_uri=self._uri,
_timeout=self._timeout,
)
self._cachetoken = cache_loader.get(self._cache_plugin,
_uri=self._uri,
_timeout=self._timeouttoken,
)
def run(self, terms, variables=None, api_hostname=None, api_username=None,
api_password=None, use_tls=True):
@ -354,33 +513,20 @@ class LookupModule(LookupBase):
:returns: A list of results to the specific queries.
"""
config_manager = ConfigManager()
config_file = config_manager.data.get_setting(name="CONFIG_FILE").value
config = ConfigParser()
config.read(config_file)
# Use the hostname specified by the user if it exists.
if api_hostname is not None:
display.vvv("Overriding api_hostname with {}".format(api_hostname))
else:
api_hostname = self._api_hostname
use_cpasswords = False
if config.has_section("re2o"):
display.vvv("Found section re2o in configuration file")
if config.has_option("re2o", "api_hostname"):
display.vvv("Found option api_hostname in config file")
api_hostname = config.get("re2o", "api_hostname")
display.vvv("Override api_hostname with {} from configuration"
.format(api_hostname))
if config.has_option("re2o", "use_cpasswords"):
display.vvv("Found option use_cpasswords in config file")
use_cpasswords = config.getboolean("re2o", "use_cpasswords")
display.vvv("Override api_hostname with {} from configuration"
.format(use_cpasswords))
if api_hostname is None:
if self._api_hostname is None:
raise AnsibleError(to_native(
'You must specify a hostname to contact re2oAPI'
))
if api_username is None and api_password is None and use_cpasswords:
display.vvv("Use cpasswords vault to get API credentials.")
if (api_username is None and api_password is None
and self._use_cpasswords):
display.vvv("Using cpasswords vault to get API credentials.")
api_username = variables.get('vault_re2o_service_user')
api_password = variables.get('vault_re2o_service_password')
@ -394,12 +540,12 @@ class LookupModule(LookupBase):
'You must specify a valid password to connect to re2oAPI'
))
api_client = Client(api_hostname, api_username,
api_password, use_tls=True)
api_client = Client(api_hostname, api_username, api_password,
use_tls=True, cachetoken=self._cachetoken)
res = []
dterms = collections.deque(terms)
machines_roles = None # TODO : Cache this.
display.vvv("Lookup terms are {}".format(terms))
while dterms:
term = dterms.popleft()
@ -411,14 +557,31 @@ class LookupModule(LookupBase):
elif term == 'get_role':
try:
role_name = dterms.popleft()
roles, machines_roles = self._get_role(api_client,
role_name,
machines_roles,
)
roles = self._get_role(api_client, role_name)
res.append(roles)
except IndexError:
display.v("Error in re2oapi : No role_name provided")
raise AnsibleError("role_name not found in arguments.")
elif term == 'prefixv6':
prefixes = []
while dterms:
subnet_name = dterms.popleft()
prefixes.append([self._get_prefix(api_client, subnet_name)])
if prefixes:
res.extend(prefixes)
else:
display.v("Error in re2oapi : No subnet_name provided")
raise AnsibleError("subnet_name not found in arguments.")
elif term == 'cidrs':
cidrs = []
while dterms:
subnet_name = dterms.popleft()
cidrs.append([self._get_cidrs(api_client, subnet_name)])
if cidrs:
res.extend(cidrs)
else:
display.v("Error in re2oapi : No subnet_name provided")
raise AnsibleError("subnet_name not found in arguments.")
else:
try:
res.append(self._rawquery(api_client, term))
@ -429,59 +592,185 @@ class LookupModule(LookupBase):
.format(to_native(e)))
return res
def _get_cache(self, key):
if self._cache:
return self._cache.get(key)
else:
return None
def _set_cache(self, key, value):
if self._cache:
return self._cache.set(key, value)
else:
return None
def _is_cached(self, key):
if self._cache:
return self._cache.contains(key)
else:
return False
def _getzones(self, api_client):
display.v("Getting dns zone names")
zones = api_client.list('dns/zones')
zones_name = [zone["name"][1:] for zone in zones]
zones, zones_name = None, None
if self._is_cached('dnszones'):
zones_name = self._get_cache('dnszones')
if zones_name is not None:
display.vvv("Found dnszones in cache.")
else:
if self._is_cached('dns_zones'):
zones = self._get_cache('dns_zones')
if zones is not None:
display.vvv("Found dns/zones in cache.")
else:
display.vvv("Contacting the API, endpoint dns/zones...")
zones = api_client.list('dns/zones')
display.vvv("...Done")
zones_name = [zone["name"][1:] for zone in zones]
display.vvv("Storing dnszones in cache.")
self._set_cache('dnszones', zones_name)
display.vvv('\n')
return zones_name
def _getreverse(self, api_client):
display.v("Getting dns reverse zones")
display.vvv("Contacting the API, endpoint dns/reverse-zones...")
zones = api_client.list('dns/reverse-zones')
display.vvv("...Done")
res = []
for zone in zones:
if zone['ptr_records']:
display.vvv('Found PTR records')
subnets = []
for net in zone['cidrs']:
net = netaddr.IPNetwork(net)
if net.prefixlen > 24:
subnets.extend(net.subnet(32))
elif net.prefixlen > 16:
subnets.extend(net.subnet(24))
elif net.prefixlen > 8:
subnets.extend(net.subnet(16))
else:
subnets.extend(net.subnet(8))
for subnet in subnets:
_address = netaddr.IPAddress(subnet.first)
rev_dns_a = _address.reverse_dns.split('.')[:-1]
if subnet.prefixlen == 8:
zone_name = '.'.join(rev_dns_a[3:])
elif subnet.prefixlen == 16:
zone_name = '.'.join(rev_dns_a[2:])
elif subnet.prefixlen == 24:
zone_name = '.'.join(rev_dns_a[1:])
res.append(zone_name)
display.vvv("Found reverse zone {}".format(zone_name))
zones, res = None, None
if self._is_cached('dnsreverse'):
res = self._get_cache('dnsreverse')
if res is not None:
display.vvv("Found dnsreverse in cache.")
else:
if self._is_cached('dns_reverse-zones'):
zones = self._get_cache('dns_reverse-zones')
if zones is not None:
display.vvv("Found dns/reverse-zones in cache.")
else:
display.vvv("Contacting the API, endpoint dns/reverse-zones..")
zones = api_client.list('dns/reverse-zones')
display.vvv("...Done")
display.vvv("Trying to format dns reverse in a nice way.")
res = []
for zone in zones:
if zone['ptr_records']:
display.vvv('Found PTR records')
subnets = []
for net in zone['cidrs']:
net = netaddr.IPNetwork(net)
if net.prefixlen > 24:
subnets.extend(net.subnet(32))
elif net.prefixlen > 16:
subnets.extend(net.subnet(24))
elif net.prefixlen > 8:
subnets.extend(net.subnet(16))
else:
subnets.extend(net.subnet(8))
for subnet in subnets:
_address = netaddr.IPAddress(subnet.first)
rev_dns_a = _address.reverse_dns.split('.')[:-1]
if subnet.prefixlen == 8:
zone_name = '.'.join(rev_dns_a[3:])
elif subnet.prefixlen == 16:
zone_name = '.'.join(rev_dns_a[2:])
elif subnet.prefixlen == 24:
zone_name = '.'.join(rev_dns_a[1:])
res.append(zone_name)
display.vvv("Found reverse zone {}".format(zone_name))
if zone['ptr_v6_records']:
display.vvv("Found PTR v6 record")
net = netaddr.IPNetwork(zone['prefix_v6']+'/'+str(zone['prefix_v6_length']))
net_class = max(((net.prefixlen -1) // 4) +1, 1)
net = netaddr.IPNetwork(zone['prefix_v6']
+ '/'
+ str(zone['prefix_v6_length']))
net_class = max(((net.prefixlen - 1) // 4) + 1, 1)
zone6_name = ".".join(
netaddr.IPAddress(net.first).reverse_dns.split('.')[32 - net_class:])[:-1]
netaddr.IPAddress(net.first)
.reverse_dns.split('.')[32 - net_class:])[:-1]
res.append(zone6_name)
display.vvv("Found reverse zone {}".format(zone6_name))
return list(set(res))
display.vvv("Storing dns reverse zones in cache.")
self._set_cache('dnsreverse', list(set(res)))
display.vvv('\n')
return res
def _rawquery(self, api_client, endpoint):
display.v("Make a raw query to endpoint {}".format(endpoint))
return api_client.list(endpoint)
res = None
if self._is_cached(endpoint.replace('/', '_')):
res = self._get_cache(endpoint.replace('/', '_'))
if res is not None:
display.vvv("Found {} in cache.".format(endpoint))
else:
display.v("Making a raw query to {host}/api/{endpoint}"
.format(host=self._api_hostname, endpoint=endpoint))
res = api_client.list(endpoint)
display.vvv("Storing result in cache.")
self._set_cache(endpoint.replace('/', '_'), res)
def _get_role(self, api_client, role_name, machines_roles):
if machines_roles is None:
machines_roles = api_client.list("machines/role")
return list(filter(lambda machine: machine["role_type"] == role_name,
machines_roles)), machines_roles
display.vvv('\n')
return res
def _get_role(self, api_client, role_name):
res, machines_roles = None, None
if self._is_cached(role_name):
res = self._get_cache(role_name)
if res is not None:
display.vvv("Found {} in cache.".format(role_name))
else:
if self._is_cached("machines_role"):
machines_roles = self._get_cache("machines_role")
if machines_roles is not None:
display.vvv("Found machines/roles in cache.")
else:
machines_roles = api_client.list("machines/role")
display.vvv("Storing machines/role in cache.")
self._set_cache("machines_role", machines_roles)
res = list(filter(lambda m: m["role_type"] == role_name,
machines_roles))
display.vvv("Storing {} in cache.".format(role_name))
self._set_cache(role_name, res)
display.vvv('\n')
return res
def _get_prefix(self, api_client, subnet_name):
prefixv6 = None
if self._is_cached(subnet_name + '_v6'):
display.vvv("Found subnet {} in cache.".format(subnet_name))
prefixv6 = self._get_cache(subnet_name + '_v6')
else:
Mtypes = self._rawquery(api_client, 'machines/iptype')
iptype = list(filter(lambda x: x['type'] == subnet_name, Mtypes))
prefixv6 = iptype[0]['prefix_v6'] + '/64'
display.vvv("Storing subnet {} in cache".format(subnet_name))
self._set_cache(subnet_name + '_v6', prefixv6)
return prefixv6
def _get_cidrs(self, api_client, subnet_name):
cidrs = None
if self._is_cached(subnet_name):
display.vvv("Found subnet {} in cache.".format(subnet_name))
cidrs = self._get_cache(subnet_name)
else:
Mtypes = self._rawquery(api_client, 'machines/iptype')
iptype = list(filter(lambda x: x['type'] == subnet_name, Mtypes))[0]
ips = iptype['domaine_ip_start']
ipe = iptype['domaine_ip_stop']
cidrs = list(map(lambda a: str(a), netaddr.iprange_to_cidrs(ips, ipe)))
display.vvv("Storing subnet {} in cache".format(subnet_name))
self._set_cache(subnet_name, cidrs)
return cidrs

View File

@ -1,70 +0,0 @@
#!/usr/bin/env ansible-playbook
---
# Deploy Prometheus
- hosts: fyre.adm.crans.org
vars:
# Prometheus targets.json
prometheus_targets:
- targets: "{{ groups['server'] | list | sort }}"
prometheus_ups_snmp_targets:
- targets: [pulsar.adm.crans.org]
prometheus_unifi_snmp_targets:
- targets: "{{ groups['crans_unifi'] | list | sort }}"
prometheus_blackbox_targets:
- targets:
- https://crans.org
- https://www.crans.org
- https://grafana.crans.org
- https://wiki.crans.org
- https://pad.crans.org
prometheus_apache_targets:
- targets: [zamok.adm.crans.org]
snmp_unifi_password: "{{ vault_snmp_unifi_password }}"
roles:
- prometheus
- prometheus-alertmanager
- prometheus-snmp-exporter
- prometheus-blackbox-exporter
# Monitor all hosts
- hosts: server,test_vm
roles:
- prometheus-node-exporter
# Export apache metrics
- hosts: zamok.adm.crans.org
roles:
- prometheus-apache-exporter
# Configure HP RAID monitoring
# You can list SCSI drives with `lsscsi -g`
- hosts: fyre.adm.crans.org,gateau.adm.crans.org
roles:
- smartd-hp-smartarray
# Deploy grafana
- hosts: fyre.adm.crans.org
vars:
grafana_root_url: https://grafana.crans.org
ldap_base: 'dc=crans,dc=org'
ldap_master_ipv4: '10.231.136.19'
ldap_user_tree: "cn=Utilisateurs,{{ ldap_base }}"
ldap_grafana_bind_dn: "cn=grafana,ou=service-users,{{ ldap_base }}"
ldap_grafana_passwd: "{{ vault_ldap_grafana_passwd }}"
roles:
- grafana
# Deploy NinjaBot
- hosts: fyre.adm.crans.org
roles:
- ninjabot
# Monitor mailq with a special text exporter
- hosts: redisdead.adm.crans.org
roles:
- prometheus-node-exporter-postfix
# Monitor logs with mtail
- hosts: thot.adm.crans.org
roles:
- mtail

View File

@ -1,54 +1,5 @@
#!/usr/bin/env ansible-playbook
---
# Deploy tunnel
- hosts: sputnik.adm.crans.org
vars:
debian_mirror: http://mirror.crans.org/debian
wireguard:
sputnik: true
private_key: "{{ vault_wireguard_sputnik_private_key }}"
peer_public_key: "{{ vault_wireguard_boeing_public_key }}"
roles:
- wireguard
- hosts: boeing.adm.crans.org
vars:
# Debian mirror on adm
debian_mirror: http://mirror.adm.crans.org/debian
wireguard:
sputnik: false
if: ens20
private_key: "{{ vault_wireguard_boeing_private_key }}"
peer_public_key: "{{ vault_wireguard_sputnik_public_key }}"
roles:
- wireguard
# Deploy DHCP server
- hosts: dhcp.adm.crans.org
vars:
dhcp:
authoritative: true
roles:
- isc-dhcp-server
# Deploy recursive DNS cache server
- hosts: odlyd.adm.crans.org
roles:
- bind-recursive
# Deploy authoritative DNS server
- hosts: silice.adm.crans.org,sputnik.adm.crans.org,boeing.adm.crans.org
vars:
certbot_dns_secret: "{{ vault_certbot_dns_secret }}"
certbot_adm_dns_secret: "{{ vault_certbot_adm_dns_secret }}"
bind:
masters: "{{ lookup('re2oapi', 'get_role', 'dns-authoritary-master')[0] }}"
slaves: "{{ lookup('re2oapi', 'get_role', 'dns-authoritary-slave')[0] }}"
zones: "{{ lookup('re2oapi', 'dnszones') }}"
reverse: "{{ lookup('re2oapi', 'dnsreverse') }}"
roles:
- bind-authoritative
# Deploy reverse proxy
- hosts: bakdaur.adm.crans.org,frontdaur.adm.crans.org
vars:
@ -75,7 +26,7 @@
- {from: lutim.crans.org, to: 10.231.136.69}
- {from: zero.crans.org, to: 10.231.136.76}
- {from: pad.crans.org, to: "10.231.136.76:9001"}
- {from: ethercalc.crans.org, to: 10.231.136.203}
- {from: ethercalc.crans.org, to: "10.231.136.203:8000"}
- {from: mediadrop.crans.org, to: 10.231.136.106}
- {from: videos.crans.org, to: 10.231.136.106}
- {from: video.crans.org, to: 10.231.136.106}
@ -190,28 +141,3 @@
remote_as: 8218
roles:
- quagga-ipv6
# Deploy postfix on mail servers
- hosts: titanic.adm.crans.org
vars:
postfix:
primary: false
secondary: true
public: true
dkim: true
mailman: false
titanic: true
roles:
- postfix
- hosts: sputnik.adm.crans.org
vars:
postfix:
primary: false
secondary: true
public: true
dkim: true
mailman: false
titanic: false
roles:
- postfix

15
plays/backup.yml 100755
View File

@ -0,0 +1,15 @@
#!/usr/bin/env ansible-playbook
---
# zephir backups virtual machines.
# omnomnom backups home dirs.
# Rsync client on all server to allow backup
- hosts: server
vars:
# Backup password
backuppc_rsyncd_passwd: "{{ vault_backuppc_rsyncd_passwd }}"
roles: ["rsync-client"]
# Backuppc backup software
- hosts: zephir.adm.crans.org,omnomnom.adm.crans.org
roles: ["backuppc"]

8
plays/dhcp.yml 100755
View File

@ -0,0 +1,8 @@
#!/usr/bin/env ansible-playbook
---
# Deploy DHCP server
- hosts: dhcp.adm.crans.org
vars:
dhcp:
authoritative: true
roles: ["isc-dhcp-server"]

17
plays/dns.yml 100755
View File

@ -0,0 +1,17 @@
#!/usr/bin/env ansible-playbook
---
# Deploy recursive DNS cache server
- hosts: odlyd.adm.crans.org
roles: ["bind-recursive"]
# Deploy authoritative DNS server
- hosts: silice.adm.crans.org,sputnik.adm.crans.org,boeing.adm.crans.org
vars:
certbot_dns_secret: "{{ vault_certbot_dns_secret }}"
certbot_adm_dns_secret: "{{ vault_certbot_adm_dns_secret }}"
bind:
masters: "{{ lookup('re2oapi', 'get_role', 'dns-authoritary-master')[0] }}"
slaves: "{{ lookup('re2oapi', 'get_role', 'dns-authoritary-slave')[0] }}"
zones: "{{ lookup('re2oapi', 'dnszones') }}"
reverse: "{{ lookup('re2oapi', 'dnsreverse') }}"
roles: ["bind-authoritative"]

11
plays/logs.yml 100755
View File

@ -0,0 +1,11 @@
#!/usr/bin/env ansible-playbook
---
# thot is the log server.
# Servers need to send their logs to thot.
# Send logs to thot
- hosts: server,!thot.adm.crans.org
vars:
rsyslog:
server: thot.adm.crans.org
roles: ["rsyslog-client"]

14
plays/mail.yml 100755
View File

@ -0,0 +1,14 @@
#!/usr/bin/env ansible-playbook
---
# Redisdead is the main MX.
# Soyouz and titanic are the old backup MX.
# Boeing and sputnik are the new MX (still in installation ?).
# All other servers uses nullmailer to send local mail to Crans SMTP.
# Redirect local mail to mailserver
- hosts: crans_server,!redisdead.adm.crans.org,!soyouz.adm.crans.org,!titanic.adm.crans.org,!boeing.adm.crans.org,!sputnik.adm.crans.org,!zamok.adm.crans.org
vars:
mail_root: root@crans.org
mail_snmp_server: smtp.adm.crans.org
mail_defaulthost: crans.org
roles: ["nullmailer"]

23
plays/mailman.yml 100755
View File

@ -0,0 +1,23 @@
#!/usr/bin/env ansible-playbook
---
# Deploy Mailman
- hosts: redisdead.adm.crans.org
vars:
mailman:
site_list: "nounou"
default_url: "https://lists.crans.org/"
default_host: "lists.crans.org"
default_language: "fr"
auth_basic: |
"On n'aime pas les spambots, donc on a mis un mot de passe. Le login est Stop et le mot de passe est Spam.";
spamassassin: "SpamAssassin_crans"
smtphost: "smtp.adm.crans.org"
mynetworks: ['138.231.0.0/16', '185.230.76.0/22', '2a0c:700:0::/40']
nginx:
ssl:
cert: /etc/letsencrypt/live/crans.org/fullchain.pem
key: /etc/letsencrypt/live/crans.org/privkey.pem
trusted_cert: /etc/letsencrypt/live/crans.org/chain.pem
roles:
- mailman
- nginx-mailman

View File

@ -0,0 +1,62 @@
#!/usr/bin/env ansible-playbook
---
# Deploy Prometheus and Grafana on monitoring server
- hosts: fyre.adm.crans.org
vars:
# Prometheus targets.json
prometheus:
node_targets: "{{ groups['server'] | list | sort }}"
ups_snmp_targets:
- pulsar.adm.crans.org # 0B
- quasar.adm.crans.org # 4J
unifi_snmp_targets: "{{ groups['crans_unifi'] | list | sort }}"
blackbox_targets:
- https://crans.org
- https://www.crans.org
- https://grafana.crans.org
- https://wiki.crans.org
- https://pad.crans.org
apache_targets: [zamok.adm.crans.org]
snmp_unifi_password: "{{ vault_snmp_unifi_password }}"
grafana:
root_url: https://grafana.crans.org
ldap_bind_dn: "cn=grafana,ou=service-users,{{ ldap_base }}"
ldap_passwd: "{{ vault_ldap_grafana_passwd }}"
ldap_base: 'dc=crans,dc=org'
ldap_master_ipv4: '10.231.136.19'
ldap_user_tree: "cn=Utilisateurs,{{ ldap_base }}"
roles:
- prometheus
- prometheus-alertmanager
- prometheus-snmp-exporter
- prometheus-blackbox-exporter
- ninjabot
- grafana
# Monitor all hosts
- hosts: server,test_vm
vars:
adm_ipv4: "{{ ansible_all_ipv4_addresses | ipaddr(adm_subnet) | first }}"
roles: ["prometheus-node-exporter"]
# Export apache metrics
- hosts: zamok.adm.crans.org
vars:
adm_ipv4: "{{ ansible_all_ipv4_addresses | ipaddr(adm_subnet) | first }}"
roles: ["prometheus-apache-exporter"]
# Configure HP RAID monitoring
# You can list SCSI drives with `lsscsi -g`
- hosts: fyre.adm.crans.org,gateau.adm.crans.org
roles: ["smartd-hp-smartarray"]
# Monitor mailq with a special text exporter
- hosts: redisdead.adm.crans.org
roles: ["prometheus-node-exporter-postfix"]
# Monitor logs with mtail
- hosts: thot.adm.crans.org
roles: ["mtail"]

View File

@ -14,7 +14,7 @@
- switch
- fil
- hosts: boeing.adm.crans.org,cochon.adm.crans.org,tracker.adm.crans.org,voyager.adm.crans.org,lutim.adm.crans.org,gateau.adm.crans.org,owncloud-srv.adm.crans.org,charybde.adm.crans.org,cas-srv.adm.crans.org,fyre.adm.crans.org,silice.adm.crans.org,frontdaur.adm.crans.org,bakdaur.adm.crans.org
- hosts: boeing.adm.crans.org,cochon.adm.crans.org,tracker.adm.crans.org,voyager.adm.crans.org,lutim.adm.crans.org,gateau.adm.crans.org,owncloud-srv.adm.crans.org,charybde.adm.crans.org,cas-srv.adm.crans.org,fyre.adm.crans.org,silice.adm.crans.org,frontdaur.adm.crans.org,bakdaur.adm.crans.org,ethercalc-srv.adm.crans.org,alice.adm.crans.org
vars:
vlan:
- name: srv
@ -66,5 +66,4 @@
dns: 185.230.78.152 185.230.78.4
dns_search: crans.org
ifnames: "{{ ifaces | json_query('results[?item==`adh`].stdout') }}"
roles:
- interfaces
roles: ["interfaces"]

18
plays/nfs.yml 100755
View File

@ -0,0 +1,18 @@
#!/usr/bin/env ansible-playbook
---
# Odlyd do not use NFS as it is the master backup.
# Servers outside of campus do not use NFS.
# zamok, omnomnom, owl and owncloud-srv uses permanently mounted home dirs.
# all other servers on campus uses autofs to dynamically mount home dirs.
# Deploy NFS only on campus
- hosts: crans_server
roles: ["nfs-common"]
# Deploy autofs NFS
- hosts: crans_server,!odlyd.adm.crans.org,!zamok.adm.crans.org,!omnomnom.adm.crans.org,!owl.adm.crans.org,!owncloud-srv.adm.crans.org
roles: ["nfs-autofs"]
# Deploy home permanent
- hosts: zamok.adm.crans.org,omnomnom.adm.crans.org,owl.adm.crans.org,owncloud-srv.adm.crans.org
roles: ["home-permanent"]

6
plays/tv.yml 100755
View File

@ -0,0 +1,6 @@
#!/usr/bin/env ansible-playbook
---
# Cochon contains DVB cards
- hosts: cochon.adm.crans.org
roles: ["mumudvb"]

View File

@ -0,0 +1,22 @@
#!/usr/bin/env ansible-playbook
---
# Deploy tunnel
- hosts: sputnik.adm.crans.org
vars:
debian_mirror: http://mirror.crans.org/debian
wireguard:
sputnik: true
private_key: "{{ vault_wireguard_sputnik_private_key }}"
peer_public_key: "{{ vault_wireguard_boeing_public_key }}"
roles: ["wireguard"]
- hosts: boeing.adm.crans.org
vars:
# Debian mirror on adm
debian_mirror: http://mirror.adm.crans.org/debian
wireguard:
sputnik: false
if: ens20
private_key: "{{ vault_wireguard_boeing_private_key }}"
peer_public_key: "{{ vault_wireguard_sputnik_public_key }}"
roles: ["wireguard"]

45
postfix.yml 100755
View File

@ -0,0 +1,45 @@
#!/usr/bin/env ansible-playbook
# Postfix playbook
---
- hosts: sputnik.adm.crans.org, boeing.adm.crans.org, redisdead.adm.crans.org, titanic.adm.crans.org
vars:
certbot:
dns_rfc2136_name: certbot_challenge.
dns_rfc2136_secret: "{{ vault_certbot_dns_secret }}"
mail: root@crans.org
certname: crans.org
domains: "*.crans.org"
bind:
masters: "{{ lookup('re2oapi', 'get_role', 'dns-authoritary-master')[0] }}"
opendkim:
private_key: "{{ vault_opendkim_private_key }}"
policyd:
mail: root@crans.org
exemptions: "{{ lookup('re2oapi', 'get_role', 'user-server')[0] }}"
mynetworks:
ipv4:
"{{ lookup('re2oapi', 'cidrs', 'serveurs',
'adherents',
'wifi-new-pub',
'fil-new-pub',
'fil-pub',
'wifi-new-serveurs',
'wifi-new-adherents',
'wifi-new-federez',
'fil-new-serveurs',
'fil-new-adherents')
| flatten }}"
ipv6:
"{{ lookup('re2oapi', 'prefixv6', 'adherents',
'fil-new-pub',
'wifi-new-pub')
| flatten }}"
roles:
- certbot
- postfix
- opendkim
- policyd
- hosts: redisdead.adm.crans.org
roles:
- sqlgrey

15
radius.yml 100755
View File

@ -0,0 +1,15 @@
#!/usr/bin/env ansible-playbook
---
- hosts: eap.adm.crans.org, odlyd.adm.crans.org, radius.adm.crans.org
vars:
certbot:
dns_rfc2136_name: certbot_challenge.
dns_rfc2136_secret: "{{ vault_certbot_dns_secret }}"
mail: root@crans.org
certname: crans.org
domains: "crans.org"
bind:
masters: "{{ lookup('re2oapi', 'get_role', 'dns-authoritary-master')[0] }}"
roles:
- certbot
- freeradius

View File

@ -1,7 +1,7 @@
{{ ansible_header | comment(decoration='# ') }}
# Pour appliquer cette conf et générer la conf de renewal :
# certbot --config wildcard.ini certonly
# To generate the certificate, please use the following command
# certbot --config /etc/letsencrypt/conf.d/{{ certbot.certname }}.ini certonly
# Use a 4096 bit RSA key instead of 2048
rsa-key-size = 4096

View File

@ -4,8 +4,10 @@
update_cache: true
install_recommends: false
name:
- apt-file
- sudo
- molly-guard # prevent reboot
- debsums
- ntp # network time sync
- apt # better than apt-get
- nano # for vulcain

View File

@ -0,0 +1,38 @@
---
- name: Install Redis and NPM
apt:
update_cache: true
name:
- redis-server
- nodejs
- npm
register: apt_result
retries: 3
until: apt_result is succeeded
- name: Install EtherCalc
npm:
name: ethercalc
global: true
state: latest
register: npm_result
retries: 3
until: npm_result is succeeded
- name: Install EtherCalc systemd unit
template:
src: systemd/system/ethercalc.service.j2
dest: /etc/systemd/system/ethercalc.service
- name: Activate EtherCalc service
systemd:
daemon_reload: true
name: ethercalc
enabled: true
state: started
- name: Indicate role in motd
template:
src: update-motd.d/05-service.j2
dest: /etc/update-motd.d/05-ethercalc
mode: 0755

View File

@ -0,0 +1,17 @@
{{ ansible_header | comment }}
[Unit]
Description=Ethercalc
Require=redis-server.service
[Service]
Type=simple
Restart=on-failure
RestartSec=3
User=redis
Group=redis
PIDFile=/var/run/ethercalc.pid
ExecStart=/usr/bin/ethercalc --host 10.231.136.203 --port 8000
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,3 @@
#!/usr/bin/tail +14
{{ ansible_header | comment }}
> EtherCalc a été déployé sur cette machine. Voir /usr/lib/node_modules/ethercalc/.

View File

@ -0,0 +1,20 @@
---
- name: Symlink radius certificates
file:
src: /etc/letsencrypt/live/crans.org/{{ item }}
dest: /etc/freeradius/3.0/certs/letsencrypt/{{ item }}
state: link
force: yes
loop:
- fullchain.pem
- privkey.pem
- name: Set permissions on certificates
file:
path: /etc/letsencrypt/{{ item }}
group: freerad
mode: '0755'
recurse: yes
loop:
- live
- archive

View File

@ -43,7 +43,7 @@
loop:
- section: server
option: root_url
value: "{{ grafana_root_url }}"
value: "{{ grafana.root_url }}"
- section: session # This will break with HTTPS
option: cookie_secure
value: "true"

View File

@ -21,10 +21,10 @@ ssl_skip_verify = false
# client_key = "/path/to/client.key"
# Search user bind dn
bind_dn = "{{ ldap_grafana_bind_dn }}"
bind_dn = "{{ grafana.ldap_bind_dn }}"
# Search user bind password
# If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;"""
bind_password = '{{ ldap_grafana_passwd }}'
bind_password = '{{ grafana.ldap_passwd }}'
# User search filter, for example "(cn=%s)" or "(sAMAccountName=%s)" or "(uid=%s)"
search_filter = "(cn=%s)"

View File

@ -36,7 +36,7 @@
# Disable passwd and chsh
- name: Copy passwd and chsh scripts
template:
src: bin/passwd.j2
src: "bin/{{ item }}.j2"
dest: "/usr/local/bin/{{ item }}"
mode: 0755
loop:

View File

@ -0,0 +1,4 @@
#!/bin/sh
{{ ansible_header | comment }}
echo "Pour changer votre shell,\nAllez sur l'intranet : {{intranet_url}}"

View File

@ -0,0 +1,4 @@
#!/bin/sh
{{ ansible_header | comment }}
echo "Pour changer votre shell,\nAllez sur l'intranet : {{intranet_url}}"
echo "De toutes façons la vraie commande aurait pas marché, on installe pas nslcd-utils sur les serveurs normalement."

View File

@ -0,0 +1,5 @@
---
- name: Reload mailman
systemd:
name: mailman
state: reloaded

View File

@ -0,0 +1,39 @@
---
- name: Install mailman and SpamAssassin
apt:
update_cache: true
name:
- mailman
- spamassassin
register: apt_result
retries: 3
until: apt_result is succeeded
- name: Deploy mailman config
template:
src: "mailman/{{ item }}.j2"
dest: "/etc/mailman/{{ item }}"
mode: 0755
loop:
- mm_cfg.py
- create.html
notify: Reload mailman
# Fanciness
- name: Deploy crans logo
copy:
src: ../../../logos/crans.png
dest: /usr/share/images/mailman/crans.png
- name: Deploy crans logo
template:
src: usr/lib/mailman/Mailman/htmlformat.py.j2
dest: /usr/lib/mailman/Mailman/htmlformat.py
mode: 0755
notify: Reload mailman
- name: Indicate role in motd
template:
src: update-motd.d/05-mailman.j2
dest: /etc/update-motd.d/05-mailman
mode: 0755

View File

@ -0,0 +1,13 @@
{{ ansible_header | comment('xml') }}
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Creation de mailing list</title>
</head>
<body>
<h1>Creation de mailing list</h1>
Il faut s'adresser a nounou arobase crans point org.
</body>
</html>

View File

@ -0,0 +1,226 @@
{{ ansible_header | comment }}
# -*- python -*-
# Copyright (C) 1998,1999,2000 by the Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""This is the module which takes your site-specific settings.
From a raw distribution it should be copied to mm_cfg.py. If you
already have an mm_cfg.py, be careful to add in only the new settings
you want. The complete set of distributed defaults, with annotation,
are in ./Defaults. In mm_cfg, override only those you want to
change, after the
from Defaults import *
line (see below).
Note that these are just default settings - many can be overridden via the
admin and user interfaces on a per-list or per-user basis.
Note also that some of the settings are resolved against the active list
setting by using the value as a format string against the
list-instance-object's dictionary - see the distributed value of
DEFAULT_MSG_FOOTER for an example."""
#######################################################
# Here's where we get the distributed defaults. #
from Defaults import *
#####
# General system-wide defaults
#####
# Should image logos be used? Set this to 0 to disable image logos from "our
# sponsors" and just use textual links instead (this will also disable the
# shortcut "favicon"). Otherwise, this should contain the URL base path to
# the logo images (and must contain the trailing slash).. If you want to
# disable Mailman's logo footer altogther, hack
# Mailman/htmlformat.py:MailmanLogo(), which also contains the hardcoded links
# and image names.
IMAGE_LOGOS = '/images/mailman/'
#-------------------------------------------------------------
# The name of the list Mailman uses to send password reminders
# and similar. Don't change if you want mailman-owner to be
# a valid local part.
MAILMAN_SITE_LIST = '{{ mailman.site_list }}'
DEFAULT_URL= '{{ mailman.default_url }}'
DEFAULT_URL_PATTERN = 'https://%s/'
add_virtualhost(DEFAULT_URL_HOST, DEFAULT_EMAIL_HOST)
#-------------------------------------------------------------
# Default domain for email addresses of newly created MLs
DEFAULT_EMAIL_HOST = '{{ mailman.default_host }}'
#-------------------------------------------------------------
# Default host for web interface of newly created MLs
DEFAULT_URL_HOST = '{{ mailman.default_host }}'
#-------------------------------------------------------------
# Required when setting any of its arguments.
add_virtualhost(DEFAULT_URL_HOST, DEFAULT_EMAIL_HOST)
#-------------------------------------------------------------
# Do we send monthly reminders?
DEFAULT_SEND_REMINDERS = No
# Normally when a site administrator authenticates to a web page with the site
# password, they get a cookie which authorizes them as the list admin. It
# makes me nervous to hand out site auth cookies because if this cookie is
# cracked or intercepted, the intruder will have access to every list on the
# site. OTOH, it's dang handy to not have to re-authenticate to every list on
# the site. Set this value to Yes to allow site admin cookies.
ALLOW_SITE_ADMIN_COOKIES = Yes
#####
# Archive defaults
#####
PUBLIC_ARCHIVE_URL = '{{ mailman.default_url }}archives/%(listname)s'
# Are archives on or off by default?
DEFAULT_ARCHIVE = Off
# Are archives public or private by default?
# 0=public, 1=private
DEFAULT_ARCHIVE_PRIVATE = 1
# Pipermail assumes that messages bodies contain US-ASCII text.
# Change this option to define a different character set to be used as
# the default character set for the archive. The term "character set"
# is used in MIME to refer to a method of converting a sequence of
# octets into a sequence of characters. If you change the default
# charset, you might need to add it to VERBATIM_ENCODING below.
DEFAULT_CHARSET = 'utf-8'
# Most character set encodings require special HTML entity characters to be
# quoted, otherwise they won't look right in the Pipermail archives. However
# some character sets must not quote these characters so that they can be
# rendered properly in the browsers. The primary issue is multi-byte
# encodings where the octet 0x26 does not always represent the & character.
# This variable contains a list of such characters sets which are not
# HTML-quoted in the archives.
VERBATIM_ENCODING = ['utf-8']
#####
# General defaults
#####
# The default language for this server. Whenever we can't figure out the list
# context or user context, we'll fall back to using this language. See
# LC_DESCRIPTIONS below for legal values.
DEFAULT_SERVER_LANGUAGE = '{{ mailman.default_language }}'
# How many members to display at a time on the admin cgi to unsubscribe them
# or change their options?
DEFAULT_ADMIN_MEMBER_CHUNKSIZE = 50
# set this variable to Yes to allow list owners to delete their own mailing
# lists. You may not want to give them this power, in which case, setting
# this variable to No instead requires list removal to be done by the site
# administrator, via the command line script bin/rmlist.
#OWNERS_CAN_DELETE_THEIR_OWN_LISTS = No
# Set this variable to Yes to allow list owners to set the "personalized"
# flags on their mailing lists. Turning these on tells Mailman to send
# separate email messages to each user instead of batching them together for
# delivery to the MTA. This gives each member a more personalized message,
# but can have a heavy impact on the performance of your system.
#OWNERS_CAN_ENABLE_PERSONALIZATION = No
#####
# List defaults. NOTE: Changing these values does NOT change the
# configuration of an existing list. It only defines the default for new
# lists you subsequently create.
#####
# Should a list, by default be advertised? What is the default maximum number
# of explicit recipients allowed? What is the default maximum message size
# allowed?
DEFAULT_LIST_ADVERTISED = Yes
# {header-name: regexp} spam filtering - we include some for example sake.
DEFAULT_BOUNCE_MATCHING_HEADERS = """
# Les lignes commencant par # sont des commentairtes.
#from: .*-owner@yahoogroups.com
#from: .*@uplinkpro.com
#from: .*@coolstats.comic.com
#from: .*@trafficmagnet.com
#from: .*@hotmail.com
#X-Reject: 450
#X-Reject: 554
"""
# Mailman can be configured to strip any existing Reply-To: header, or simply
# extend any existing Reply-To: with one based on the above setting.
DEFAULT_FIRST_STRIP_REPLY_TO = Yes
# SUBSCRIBE POLICY
# 0 - open list (only when ALLOW_OPEN_SUBSCRIBE is set to 1) **
# 1 - confirmation required for subscribes
# 2 - admin approval required for subscribes
# 3 - both confirmation and admin approval required
#
# ** please do not choose option 0 if you are not allowing open
# subscribes (next variable)
DEFAULT_SUBSCRIBE_POLICY = 3
# Is the list owner notified of subscribes/unsubscribes?
DEFAULT_ADMIN_NOTIFY_MCHANGES = Yes
# Do we send monthly reminders?
DEFAULT_SEND_REMINDERS = No
# What should happen to non-member posts which do not match explicit
# non-member actions?
# 0 = Accept
# 1 = Hold
# 2 = Reject
# 3 = Discard
DEFAULT_GENERIC_NONMEMBER_ACTION = 1
# Use spamassassin automatically
GLOBAL_PIPELINE.insert(5, '{{ spamassassin }}')
# Discard messages with score higher than ...
SPAMASSASSIN_DISCARD_SCORE = 8
# Hold in moderation messages with score higher than ...
SPAMASSASSIN_HOLD_SCORE = 2.1
# Add SpamAssassin administration interface on gui
# To make it work, you need to edit Gui/__init__.py
# with
# from SpamAssassin import SpamAssassin
ADMIN_CATEGORIES.append("spamassassin")
# Add header to keep
PLAIN_DIGEST_KEEP_HEADERS.append('X-Spam-Score')
# configure MTA
MTA = 'Postfix'
SMTPHOST = '{{ smtphost }}'
SMTP_MAX_RCPTS = 50
POSTFIX_STYLE_VIRTUAL_DOMAINS = ["{{ mailman.default_host }}"]
# Note - if you're looking for something that is imported from mm_cfg, but you
# didn't find it above, it's probably in /usr/lib/mailman/Mailman/Defaults.py.

View File

@ -0,0 +1,3 @@
#!/usr/bin/tail +14
{{ ansible_header | comment }}
> Mailman a été déployé sur cette machine. Voir /etc/mailman/ et /var/lib/mailman/.

View File

@ -0,0 +1,742 @@
{{ ansible_header | comment }}
# Copyright (C) 1998-2018 by the Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
"""Library for program-based construction of an HTML documents.
Encapsulate HTML formatting directives in classes that act as containers
for python and, recursively, for nested HTML formatting objects.
"""
# Eventually could abstract down to HtmlItem, which outputs an arbitrary html
# object given start / end tags, valid options, and a value. Ug, objects
# shouldn't be adding their own newlines. The next object should.
import types
from Mailman import mm_cfg
from Mailman import Utils
from Mailman.i18n import _, get_translation
from Mailman.CSRFcheck import csrf_token
SPACE = ' '
EMPTYSTRING = ''
NL = '\n'
# Format an arbitrary object.
def HTMLFormatObject(item, indent):
"Return a presentation of an object, invoking their Format method if any."
if type(item) == type(''):
return item
elif not hasattr(item, "Format"):
return `item`
else:
return item.Format(indent)
def CaseInsensitiveKeyedDict(d):
result = {}
for (k,v) in d.items():
result[k.lower()] = v
return result
# Given references to two dictionaries, copy the second dictionary into the
# first one.
def DictMerge(destination, fresh_dict):
for (key, value) in fresh_dict.items():
destination[key] = value
class Table:
def __init__(self, **table_opts):
self.cells = []
self.cell_info = {}
self.row_info = {}
self.opts = table_opts
def AddOptions(self, opts):
DictMerge(self.opts, opts)
# Sets all of the cells. It writes over whatever cells you had there
# previously.
def SetAllCells(self, cells):
self.cells = cells
# Add a new blank row at the end
def NewRow(self):
self.cells.append([])
# Add a new blank cell at the end
def NewCell(self):
self.cells[-1].append('')
def AddRow(self, row):
self.cells.append(row)
def AddCell(self, cell):
self.cells[-1].append(cell)
def AddCellInfo(self, row, col, **kws):
kws = CaseInsensitiveKeyedDict(kws)
if not self.cell_info.has_key(row):
self.cell_info[row] = { col : kws }
elif self.cell_info[row].has_key(col):
DictMerge(self.cell_info[row], kws)
else:
self.cell_info[row][col] = kws
def AddRowInfo(self, row, **kws):
kws = CaseInsensitiveKeyedDict(kws)
if not self.row_info.has_key(row):
self.row_info[row] = kws
else:
DictMerge(self.row_info[row], kws)
# What's the index for the row we just put in?
def GetCurrentRowIndex(self):
return len(self.cells)-1
# What's the index for the col we just put in?
def GetCurrentCellIndex(self):
return len(self.cells[-1])-1
def ExtractCellInfo(self, info):
valid_mods = ['align', 'valign', 'nowrap', 'rowspan', 'colspan',
'bgcolor']
output = ''
for (key, val) in info.items():
if not key in valid_mods:
continue
if key == 'nowrap':
output = output + ' NOWRAP'
continue
else:
output = output + ' %s="%s"' % (key.upper(), val)
return output
def ExtractRowInfo(self, info):
valid_mods = ['align', 'valign', 'bgcolor']
output = ''
for (key, val) in info.items():
if not key in valid_mods:
continue
output = output + ' %s="%s"' % (key.upper(), val)
return output
def ExtractTableInfo(self, info):
valid_mods = ['align', 'width', 'border', 'cellspacing', 'cellpadding',
'bgcolor']
output = ''
for (key, val) in info.items():
if not key in valid_mods:
continue
if key == 'border' and val == None:
output = output + ' BORDER'
continue
else:
output = output + ' %s="%s"' % (key.upper(), val)
return output
def FormatCell(self, row, col, indent):
try:
my_info = self.cell_info[row][col]
except:
my_info = None
output = '\n' + ' '*indent + '<td'
if my_info:
output = output + self.ExtractCellInfo(my_info)
item = self.cells[row][col]
item_format = HTMLFormatObject(item, indent+4)
output = '%s>%s</td>' % (output, item_format)
return output
def FormatRow(self, row, indent):
try:
my_info = self.row_info[row]
except:
my_info = None
output = '\n' + ' '*indent + '<tr'
if my_info:
output = output + self.ExtractRowInfo(my_info)
output = output + '>'
for i in range(len(self.cells[row])):
output = output + self.FormatCell(row, i, indent + 2)
output = output + '\n' + ' '*indent + '</tr>'
return output
def Format(self, indent=0):
output = '\n' + ' '*indent + '<table'
output = output + self.ExtractTableInfo(self.opts)
output = output + '>'
for i in range(len(self.cells)):
output = output + self.FormatRow(i, indent + 2)
output = output + '\n' + ' '*indent + '</table>\n'
return output
class Link:
def __init__(self, href, text, target=None):
self.href = href
self.text = text
self.target = target
def Format(self, indent=0):
texpr = ""
if self.target != None:
texpr = ' target="%s"' % self.target
return '<a href="%s"%s>%s</a>' % (HTMLFormatObject(self.href, indent),
texpr,
HTMLFormatObject(self.text, indent))
class FontSize:
"""FontSize is being deprecated - use FontAttr(..., size="...") instead."""
def __init__(self, size, *items):
self.items = list(items)
self.size = size
def Format(self, indent=0):
output = '<font size="%s">' % self.size
for item in self.items:
output = output + HTMLFormatObject(item, indent)
output = output + '</font>'
return output
class FontAttr:
"""Present arbitrary font attributes."""
def __init__(self, *items, **kw):
self.items = list(items)
self.attrs = kw
def Format(self, indent=0):
seq = []
for k, v in self.attrs.items():
seq.append('%s="%s"' % (k, v))
output = '<font %s>' % SPACE.join(seq)
for item in self.items:
output = output + HTMLFormatObject(item, indent)
output = output + '</font>'
return output
class Container:
def __init__(self, *items):
if not items:
self.items = []
else:
self.items = items
def AddItem(self, obj):
self.items.append(obj)
def Format(self, indent=0):
output = []
for item in self.items:
output.append(HTMLFormatObject(item, indent))
return EMPTYSTRING.join(output)
class Label(Container):
align = 'right'
def __init__(self, *items):
Container.__init__(self, *items)
def Format(self, indent=0):
return ('<div align="%s">' % self.align) + \
Container.Format(self, indent) + \
'</div>'
# My own standard document template. YMMV.
# something more abstract would be more work to use...
class Document(Container):
title = None
language = None
bgcolor = mm_cfg.WEB_BG_COLOR
suppress_head = 0
def set_language(self, lang=None):
self.language = lang
def set_bgcolor(self, color):
self.bgcolor = color
def SetTitle(self, title):
self.title = title
def Format(self, indent=0, **kws):
charset = 'us-ascii'
if self.language and Utils.IsLanguage(self.language):
charset = Utils.GetCharSet(self.language)
output = ['Content-Type: text/html; charset=%s' % charset]
output.append('Cache-control: no-cache\n')
if not self.suppress_head:
kws.setdefault('bgcolor', self.bgcolor)
tab = ' ' * indent
output.extend([tab,
'<HTML>',
'<HEAD>'
])
if mm_cfg.IMAGE_LOGOS:
output.append('<LINK REL="SHORTCUT ICON" HREF="%s">' %
(mm_cfg.IMAGE_LOGOS + mm_cfg.SHORTCUT_ICON))
# Hit all the bases
output.append('<META http-equiv="Content-Type" '
'content="text/html; charset=%s">' % charset)
if self.title:
output.append('%s<TITLE>%s</TITLE>' % (tab, self.title))
# Add CSS to visually hide some labeling text but allow screen
# readers to read it.
output.append("""\
<style type="text/css">
div.hidden
{position:absolute;
left:-10000px;
top:auto;
width:1px;
height:1px;
overflow:hidden;}
</style>
""")
if mm_cfg.WEB_HEAD_ADD:
output.append(mm_cfg.WEB_HEAD_ADD)
output.append('%s</HEAD>' % tab)
quals = []
# Default link colors
if mm_cfg.WEB_VLINK_COLOR:
kws.setdefault('vlink', mm_cfg.WEB_VLINK_COLOR)
if mm_cfg.WEB_ALINK_COLOR:
kws.setdefault('alink', mm_cfg.WEB_ALINK_COLOR)
if mm_cfg.WEB_LINK_COLOR:
kws.setdefault('link', mm_cfg.WEB_LINK_COLOR)
for k, v in kws.items():
quals.append('%s="%s"' % (k, v))
output.append('%s<BODY %s' % (tab, SPACE.join(quals)))
# Language direction
direction = Utils.GetDirection(self.language)
output.append('dir="%s">' % direction)
# Always do this...
output.append(Container.Format(self, indent))
if not self.suppress_head:
output.append('%s</BODY>' % tab)
output.append('%s</HTML>' % tab)
return NL.join(output)
def addError(self, errmsg, tag=None):
if tag is None:
tag = _('Error: ')
self.AddItem(Header(3, Bold(FontAttr(
_(tag), color=mm_cfg.WEB_ERROR_COLOR, size='+2')).Format() +
Italic(errmsg).Format()))
class HeadlessDocument(Document):
"""Document without head section, for templates that provide their own."""
suppress_head = 1
class StdContainer(Container):
def Format(self, indent=0):
# If I don't start a new I ignore indent
output = '<%s>' % self.tag
output = output + Container.Format(self, indent)
output = '%s</%s>' % (output, self.tag)
return output
class QuotedContainer(Container):
def Format(self, indent=0):
# If I don't start a new I ignore indent
output = '<%s>%s</%s>' % (
self.tag,
Utils.websafe(Container.Format(self, indent)),
self.tag)
return output
class Header(StdContainer):
def __init__(self, num, *items):
self.items = items
self.tag = 'h%d' % num
class Address(StdContainer):
tag = 'address'
class Underline(StdContainer):
tag = 'u'
class Bold(StdContainer):
tag = 'strong'
class Italic(StdContainer):
tag = 'em'
class Preformatted(QuotedContainer):
tag = 'pre'
class Subscript(StdContainer):
tag = 'sub'
class Superscript(StdContainer):
tag = 'sup'
class Strikeout(StdContainer):
tag = 'strike'
class Center(StdContainer):
tag = 'center'
class Form(Container):
def __init__(self, action='', method='POST', encoding=None,
mlist=None, contexts=None, user=None, *items):
apply(Container.__init__, (self,) + items)
self.action = action
self.method = method
self.encoding = encoding
self.mlist = mlist
self.contexts = contexts
self.user = user
def set_action(self, action):
self.action = action
def Format(self, indent=0):
spaces = ' ' * indent
encoding = ''
if self.encoding:
encoding = 'enctype="%s"' % self.encoding
output = '\n%s<FORM action="%s" method="%s" %s>\n' % (
spaces, self.action, self.method, encoding)
if self.mlist:
output = output + \
'<input type="hidden" name="csrf_token" value="%s">\n' \
% csrf_token(self.mlist, self.contexts, self.user)
output = output + Container.Format(self, indent+2)
output = '%s\n%s</FORM>\n' % (output, spaces)
return output
class InputObj:
def __init__(self, name, ty, value, checked, **kws):
self.name = name
self.type = ty
self.value = value
self.checked = checked
self.kws = kws
def Format(self, indent=0):
charset = get_translation().charset() or 'us-ascii'
output = ['<INPUT name="%s" type="%s" value="%s"' %
(self.name, self.type, self.value)]
for item in self.kws.items():
output.append('%s="%s"' % item)
if self.checked:
output.append('CHECKED')
output.append('>')
ret = SPACE.join(output)
if self.type == 'TEXT' and isinstance(ret, unicode):
ret = ret.encode(charset, 'xmlcharrefreplace')
return ret
class SubmitButton(InputObj):
def __init__(self, name, button_text):
InputObj.__init__(self, name, "SUBMIT", button_text, checked=0)
class PasswordBox(InputObj):
def __init__(self, name, value='', size=mm_cfg.TEXTFIELDWIDTH):
InputObj.__init__(self, name, "PASSWORD", value, checked=0, size=size)
class TextBox(InputObj):
def __init__(self, name, value='', size=mm_cfg.TEXTFIELDWIDTH):
if isinstance(value, str):
safevalue = Utils.websafe(value)
else:
safevalue = value
InputObj.__init__(self, name, "TEXT", safevalue, checked=0, size=size)
class Hidden(InputObj):
def __init__(self, name, value=''):
InputObj.__init__(self, name, 'HIDDEN', value, checked=0)
class TextArea:
def __init__(self, name, text='', rows=None, cols=None, wrap='soft',
readonly=0):
if isinstance(text, str):
# Double escape HTML entities in non-readonly areas.
doubleescape = not readonly
safetext = Utils.websafe(text, doubleescape)
else:
safetext = text
self.name = name
self.text = safetext
self.rows = rows
self.cols = cols
self.wrap = wrap
self.readonly = readonly
def Format(self, indent=0):
charset = get_translation().charset() or 'us-ascii'
output = '<TEXTAREA NAME=%s' % self.name
if self.rows:
output += ' ROWS=%s' % self.rows
if self.cols:
output += ' COLS=%s' % self.cols
if self.wrap:
output += ' WRAP=%s' % self.wrap
if self.readonly:
output += ' READONLY'
output += '>%s</TEXTAREA>' % self.text
if isinstance(output, unicode):
output = output.encode(charset, 'xmlcharrefreplace')
return output
class FileUpload(InputObj):
def __init__(self, name, rows=None, cols=None, **kws):
apply(InputObj.__init__, (self, name, 'FILE', '', 0), kws)
class RadioButton(InputObj):
def __init__(self, name, value, checked=0, **kws):
apply(InputObj.__init__, (self, name, 'RADIO', value, checked), kws)
class CheckBox(InputObj):
def __init__(self, name, value, checked=0, **kws):
apply(InputObj.__init__, (self, name, "CHECKBOX", value, checked), kws)
class VerticalSpacer:
def __init__(self, size=10):
self.size = size
def Format(self, indent=0):
output = '<spacer type="vertical" height="%d">' % self.size
return output
class WidgetArray:
Widget = None
def __init__(self, name, button_names, checked, horizontal, values):
self.name = name
self.button_names = button_names
self.checked = checked
self.horizontal = horizontal
self.values = values
assert len(values) == len(button_names)
# Don't assert `checked' because for RadioButtons it is a scalar while
# for CheckedBoxes it is a vector. Subclasses will assert length.
def ischecked(self, i):
raise NotImplemented
def Format(self, indent=0):
t = Table(cellspacing=5)
items = []
for i, name, value in zip(range(len(self.button_names)),
self.button_names,
self.values):
ischecked = (self.ischecked(i))
item = ('<label>' +
self.Widget(self.name, value, ischecked).Format() +
name + '</label>')
items.append(item)
if not self.horizontal:
t.AddRow(items)
items = []
if self.horizontal:
t.AddRow(items)
return t.Format(indent)
class RadioButtonArray(WidgetArray):
Widget = RadioButton
def __init__(self, name, button_names, checked=None, horizontal=1,
values=None):
if values is None:
values = range(len(button_names))
# BAW: assert checked is a scalar...
WidgetArray.__init__(self, name, button_names, checked, horizontal,
values)
def ischecked(self, i):
return self.checked == i
class CheckBoxArray(WidgetArray):
Widget = CheckBox
def __init__(self, name, button_names, checked=None, horizontal=0,
values=None):
if checked is None:
checked = [0] * len(button_names)
else:
assert len(checked) == len(button_names)
if values is None:
values = range(len(button_names))
WidgetArray.__init__(self, name, button_names, checked, horizontal,
values)
def ischecked(self, i):
return self.checked[i]
class UnorderedList(Container):
def Format(self, indent=0):
spaces = ' ' * indent
output = '\n%s<ul>\n' % spaces
for item in self.items:
output = output + '%s<li>%s\n' % \
(spaces, HTMLFormatObject(item, indent + 2))
output = output + '%s</ul>\n' % spaces
return output
class OrderedList(Container):
def Format(self, indent=0):
spaces = ' ' * indent
output = '\n%s<ol>\n' % spaces
for item in self.items:
output = output + '%s<li>%s\n' % \
(spaces, HTMLFormatObject(item, indent + 2))
output = output + '%s</ol>\n' % spaces
return output
class DefinitionList(Container):
def Format(self, indent=0):
spaces = ' ' * indent
output = '\n%s<dl>\n' % spaces
for dt, dd in self.items:
output = output + '%s<dt>%s\n<dd>%s\n' % \
(spaces, HTMLFormatObject(dt, indent+2),
HTMLFormatObject(dd, indent+2))
output = output + '%s</dl>\n' % spaces
return output
# Logo constants
#
# These are the URLs which the image logos link to. The Mailman home page now
# points at the gnu.org site instead of the www.list.org mirror.
#
from mm_cfg import MAILMAN_URL
PYTHON_URL = 'http://www.python.org/'
GNU_URL = 'http://www.gnu.org/'
CRANS_URL = 'http://www.crans.org/'
# The names of the image logo files. These are concatentated onto
# mm_cfg.IMAGE_LOGOS (not urljoined).
DELIVERED_BY = 'mailman.jpg'
PYTHON_POWERED = 'PythonPowered.png'
GNU_HEAD = 'gnu-head-tiny.jpg'
CRANS_LOGO = 'crans.png'
def MailmanLogo():
t = Table(border=0, width='100%')
version = mm_cfg.VERSION
mmlink = _("Delivered by Mailman")
pylink = _("Python Powered")
gnulink = _("GNU's Not Unix")
cranslink = _("CRANS")
if mm_cfg.SITE_LINK:
sitelink = mm_cfg.SITE_TEXT
if mm_cfg.IMAGE_LOGOS:
def logo(file, alt, base=mm_cfg.IMAGE_LOGOS):
return '<img src="%s" alt="%s" border="0" />' % \
(base + file, alt)
mmlink = logo(DELIVERED_BY, mmlink)
pylink = logo(PYTHON_POWERED, pylink)
gnulink = logo(GNU_HEAD, gnulink)
cranslink = logo(CRANS_LOGO, cranslink)
if mm_cfg.SITE_LINK:
sitelink = logo(mm_cfg.SITE_LOGO, sitelink, "")
mmlink = Link(MAILMAN_URL, mmlink + _('<br>version %(version)s'))
pylink = Link(PYTHON_URL, pylink)
gnulink = Link(GNU_URL, gnulink)
cranslink = Link(CRANS_URL, cranslink)
links = [mmlink, pylink, gnulink, cranslink]
if mm_cfg.SITE_LINK:
if mm_cfg.SITE_URL:
sitelink = Link(mm_cfg.SITE_URL, sitelink)
links.append(sitelink)
t.AddRow(links)
return t
class SelectOptions:
def __init__(self, varname, values, legend,
selected=0, size=1, multiple=None):
self.varname = varname
self.values = values
self.legend = legend
self.size = size
self.multiple = multiple
# we convert any type to tuple, commas are needed
if not multiple:
if type(selected) == types.IntType:
self.selected = (selected,)
elif type(selected) == types.TupleType:
self.selected = (selected[0],)
elif type(selected) == types.ListType:
self.selected = (selected[0],)
else:
self.selected = (0,)
def Format(self, indent=0):
spaces = " " * indent
items = min( len(self.values), len(self.legend) )
# jcrey: If there is no argument, we return nothing to avoid errors
if items == 0:
return ""
text = "\n" + spaces + "<Select name=\"%s\"" % self.varname
if self.size > 1:
text = text + " size=%d" % self.size
if self.multiple:
text = text + " multiple"
text = text + ">\n"
for i in range(items):
if i in self.selected:
checked = " Selected"
else:
checked = ""
opt = " <option value=\"%s\"%s> %s </option>" % (
self.values[i], checked, self.legend[i])
text = text + spaces + opt + "\n"
return text + spaces + '</Select>'

View File

@ -19,6 +19,7 @@
dest: "/etc/mtail/{{ item }}"
loop:
- dhcpd.mtail
- radiusd.mtail
notify: Restart mtail
- name: Indicate role in motd

View File

@ -0,0 +1,47 @@
{{ ansible_header | comment }}
# radiusd template by erdnaxe@crans.org
# Define the exported metric names. The `by' keyword indicates the metric has
# dimensions. For example, `request_total' counts the frequency of each
# request's "command". The name `command' will be exported as the label name
# for the metric. The command provided in the code below will be exported as
# the label value.
counter radiusd_access_ok
counter radiusd_access_refused by reason
# The `syslog' decorator defines a procedure. When a block of mtail code is
# "decorated", it is called before entering the block. The block is entered
# when the keyword `next' is reached.
def syslog {
/^(?P<date>(?P<legacy_date>\w+\s+\d+\s+\d+:\d+:\d+)|(?P<rfc3339_date>\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d+[+-]\d{2}:\d{2}))/ +
/\s+(?:\w+@)?(?P<hostname>[\w\.-]+)\s+(?P<application>[\w\.-]+)(?:\[(?P<pid>\d+)\])?:\s+(?P<message>.*)/ {
# If the legacy_date regexp matched, try this format.
len($legacy_date) > 0 {
strptime($2, "Jan _2 15:04:05")
}
# If the RFC3339 style matched, parse it this way.
len($rfc3339_date) > 0 {
strptime($rfc3339_date, "2006-01-02T15:04:05.999999999Z07:00")
}
# Call into the decorated block
next
}
}
# Define some pattern constants for reuse in the patterns below.
const IP /\d+(\.\d+){3}/
const MATCH_IP /(?P<ip>/ + IP + /)/
const MATCH_NETWORK /(?P<network>\d+(\.\d+){1,3}\/\d+)/
const MATCH_MAC /(?P<mac>([\da-f]{2}:){5}[\da-f]{2})/
@syslog {
# Access ok!
/Access ok/ {
radiusd_access_ok++
}
# Bouh!
/Adherent non cotisant/ {
radiusd_access_refused["Did not pay"]++
}
}

View File

@ -0,0 +1,5 @@
---
- name: Reload nginx
systemd:
name: nginx
state: reloaded

View File

@ -0,0 +1,43 @@
---
- name: Install NGINX
apt:
update_cache: true
name:
- nginx
register: apt_result
retries: 3
until: apt_result is succeeded
- name: Copy configuration files
template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
loop:
- src: nginx/sites-available/mailman.j2
dest: /etc/nginx/sites-available/mailman
- src: nginx/mailman_passwd.j2
dest: /etc/nginx/mailman_passwd
- src: nginx/snippets/fastcgi-mailman.conf.j2
dest: /etc/nginx/snippets/fastcgi-mailman.conf
- src: nginx/snippets/options-ssl.conf.j2
dest: /etc/nginx/snippets/options-ssl.conf
- src: var/www/robots.txt.j2
dest: /var/www/robots.txt
- src: var/www/custom_401.html.j2
dest: /var/www/custom_401.html
notify: Reload nginx
- name: Enable mailman
file:
src: /etc/nginx/sites-available/mailman
dest: /etc/nginx/sites-enabled/mailman
state: link
force: true
when: not ansible_check_mode
notify: Reload nginx
- name: Indicate role in motd
template:
src: update-motd.d/05-service.j2
dest: /etc/update-motd.d/05-nginx-mailman
mode: 0755

View File

@ -0,0 +1,2 @@
{{ ansible_header | comment }}
Stop:$apr1$NXaV5H7Q$J3ora3Jo5h775Y1nm93PN1

View File

@ -0,0 +1,94 @@
{{ ansible_header | comment }}
server {
listen 80 default;
listen [::]:80 default;
server_name _;
location / {
return 302 https://{{ mailman.default_host }}$request_uri;
}
}
# Redirect everybody to mailing lists
server {
listen 443 default_server ssl;
listen [::]:443 default_server ssl;
server_name _;
include "/etc/nginx/snippets/options-ssl.conf";
location / {
return 302 https://{{ mailman.default_host }}$request_uri;
}
}
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name {{ mailman.default_host }};
include "/etc/nginx/snippets/options-ssl.conf";
root /usr/lib/cgi-bin/mailman/;
index index.htm index.html;
location /error/ {
internal;
alias /var/www/;
}
location /create {
default_type text/html;
alias /etc/mailman/create.html;
}
location ~ ^/$ {
return 302 https://{{ mailman.default_host }}/listinfo;
}
location / {
include "/etc/nginx/snippets/fastcgi-mailman.conf";
}
location ~ ^/listinfo {
satisfy any;
include "/etc/nginx/snippets/fastcgi-mailman.conf";
{% for net in mynetworks -%}
allow {{ net }};
{% endfor -%}
deny all;
auth_basic {{ mailman.auth_basic }}
auth_basic_user_file /etc/nginx/mailman_passwd;
error_page 401 /error/custom_401.html;
}
location ~ ^/admin {
satisfy any;
include "/etc/nginx/snippets/fastcgi-mailman.conf";
{% for net in mynetworks -%}
allow {{ net }};
{% endfor -%}
deny all;
auth_basic {{ mailman.auth_basic }}
auth_basic_user_file /etc/nginx/mailman_passwd;
error_page 401 /error/custom_401.html;
}
location /images/mailman { alias /usr/share/images/mailman;}
location /robots.txt { alias /var/www/robots.txt;}
location /archives {
alias /var/lib/mailman/archives/public;
autoindex on;
}
}

View File

@ -0,0 +1,18 @@
{{ ansible_header | comment }}
# regex to split $uri to $fastcgi_script_name and $fastcgi_path
fastcgi_split_path_info (^/[^/]*)(.*)$;
# check that the PHP script exists before passing it
try_files $fastcgi_script_name =404;
# Bypass the fact that try_files resets $fastcgi_path_info
# see: http://trac.nginx.org/nginx/ticket/321
set $path_info $fastcgi_path_info;
fastcgi_param PATH_INFO $path_info;
# Let NGINX handle errors
fastcgi_intercept_errors on;
include /etc/nginx/fastcgi.conf;
fastcgi_pass unix:/var/run/fcgiwrap.socket;

View File

@ -0,0 +1,18 @@
{{ ansible_header | comment }}
# regex to split $uri to $fastcgi_script_name and $fastcgi_path
fastcgi_split_path_info (^/[^/]*)(.*)$;
# check that the PHP script exists before passing it
try_files $fastcgi_script_name =404;
# Bypass the fact that try_files resets $fastcgi_path_info
# see: http://trac.nginx.org/nginx/ticket/321
set $path_info $fastcgi_path_info;
fastcgi_param PATH_INFO $path_info;
# Let NGINX handle errors
fastcgi_intercept_errors on;
include /etc/nginx/fastcgi.conf;
fastcgi_pass unix:/var/run/fcgiwrap.socket;

View File

@ -0,0 +1,17 @@
{{ ansible_header | comment }}
ssl_certificate {{ nginx.ssl.cert }};
ssl_certificate_key {{ nginx.ssl.key }};
ssl_session_timeout 1d;
ssl_session_cache shared:MozSSL:10m;
ssl_session_tickets off;
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
ssl_prefer_server_ciphers off;
# Enable OCSP Stapling, point to certificate chain
ssl_stapling on;
ssl_stapling_verify on;
ssl_trusted_certificate {{ nginx.ssl.trusted_cert }};

View File

@ -0,0 +1,3 @@
#!/usr/bin/tail +14
{{ ansible_header | comment }}
> NGINX a été déployé sur cette machine. Voir /etc/nginx/.

View File

@ -0,0 +1,18 @@
{{ ansible_header | comment('xml') }}
<html>
<head>
<title>Accès refusé</title>
<meta http-equiv="content-type" content="text/html; charset=UTF-8">
</head>
<body>
<h1>Accès refusé</h1>
<p>
Pour éviter le scan des adresses de diffusions par un robot, cette page demande un identifiant et mot de passe.
</p>
<ul>
<li>Identifiant : <em>Stop</em></li>
<li>Mot de passe : <em>Spam</em></li>
</ul>
</body>
</html>

View File

@ -0,0 +1,4 @@
{{ ansible_header | comment }}
User-agent: *
Disallow: /

View File

@ -2,9 +2,7 @@
- name: Install NGINX
apt:
update_cache: true
name:
- nginx
- python3-certbot-nginx # for options-ssl-nginx.conf
name: nginx
register: apt_result
retries: 3
until: apt_result is succeeded
@ -17,10 +15,16 @@
- options-ssl.conf
- options-proxypass.conf
- name: Has dhparam been copied?
stat:
path: /etc/letsencrypt/dhparam
register: stat_result
- name: Copy dhparam
template:
src: letsencrypt/dhparam.j2
dest: /etc/letsencrypt/dhparam
when: not stat_result.stat.exists
- name: Copy reverse proxy sites
template:

View File

@ -15,3 +15,5 @@ proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
# For Owncloud WebDav
client_max_body_size 10G;

View File

@ -0,0 +1,50 @@
---
- name: Install opendkim
apt:
update_cache: true
name:
- opendkim
- opendkim-tools
register: apt_result
retries: 3
until: apt_result is succeeded
- name: Ensure opendkim directories are here
file:
path: /etc/opendkim/keys/crans.org
state: directory
mode: 0750
owner: opendkim
group: opendkim
when: not ansible_check_mode
- name: Deploy opendkim configuration
template:
src: opendkim.conf.j2
dest: /etc/opendkim.conf
mode: 644
owner: opendkim
group: opendkim
- name: Deploy opendkim configuration
template:
src: opendkim/{{ item }}.j2
dest: /etc/opendkim/{{ item }}
mode: 0644
owner: opendkim
group: opendkim
loop:
- KeyTable
- SigningTable
- TrustedHosts
- name: Deploy opendkim key
template:
src: opendkim/keys/crans.org/{{ item }}.j2
dest: /etc/opendkim/keys/crans.org/{{ item }}
mode: 0600
owner: opendkim
group: opendkim
loop:
- mail.private
- mail.txt

View File

@ -0,0 +1,110 @@
{{ ansible_header | comment }}
# This is a basic configuration that can easily be adapted to suit a standard
# installation. For more advanced options, see opendkim.conf(5) and/or
# /usr/share/doc/opendkim/examples/opendkim.conf.sample.
AutoRestart Yes
AutoRestartRate 10/1h
# Log to syslog
Syslog yes
SyslogSuccess Yes
LogWhy Yes
# Required to use local socket with MTAs that access the socket as a non-
# privileged user (e.g. Postfix)
UMask 002
# Sign for example.com with key in /etc/mail/dkim.key using
# selector '2007' (e.g. 2007._domainkey.example.com)
#Domain example.com
#KeyFile /etc/mail/dkim.key
#Selector 2007
# Commonly-used options; the commented-out versions show the defaults.
Canonicalization relaxed/simple
#mode sv
#subdomains no
# socket smtp://localhost
#
# ## socket socketspec
# ##
# ## names the socket where this filter should listen for milter connections
# ## from the mta. required. should be in one of these forms:
# ##
# ## inet:port@address to listen on a specific interface
# ## inet:port to listen on all interfaces
# ## local:/path/to/socket to listen on a unix domain socket
#
#socket inet:8892@localhost
socket inet:12301@localhost
## pidfile filename
### default (none)
###
### name of the file where the filter should write its pid before beginning
### normal operations.
#
pidfile /var/run/opendkim/opendkim.pid
# list domains to use for rfc 6541 dkim authorized third-party signatures
# (atps) (experimental)
#atpsdomains example.com
signaturealgorithm rsa-sha256
ExternalIgnoreList refile:/etc/opendkim/TrustedHosts
InternalHosts refile:/etc/opendkim/TrustedHosts
KeyTable refile:/etc/opendkim/KeyTable
SigningTable refile:/etc/opendkim/SigningTable
Mode sv
#SubDomains no
#ADSPDiscard no
# Always oversign From (sign using actual From and a null From to prevent
# malicious signatures header fields (From and/or others) between the signer
# and the verifier. From is oversigned by default in the Debian pacakge
# because it is often the identity key used by reputation systems and thus
# somewhat security sensitive.
OversignHeaders From
## resolverconfiguration filename
## default (none)
##
## specifies a configuration file to be passed to the unbound library that
## performs dns queries applying the dnssec protocol. see the unbound
## documentation at http://unbound.net for the expected content of this file.
## the results of using this and the trustanchorfile setting at the same
## time are undefined.
## in debian, /etc/unbound/unbound.conf is shipped as part of the suggested
## unbound package
# resolverconfiguration /etc/unbound/unbound.conf
## trustanchorfile filename
## default (none)
##
## specifies a file from which trust anchor data should be read when doing
## dns queries and applying the dnssec protocol. see the unbound documentation
## at http://unbound.net for the expected format of this file.
trustanchorfile /usr/share/dns/root.key
## userid userid
### default (none)
###
### change to user "userid" before starting normal operation? may include
### a group id as well, separated from the userid by a colon.
#
userid opendkim:opendkim
# Whether to decode non- UTF-8 and non-ASCII textual parts and recode
# them to UTF-8 before the text is given over to rules processing.
#
# normalize_charset 1

View File

@ -0,0 +1 @@
mail._domainkey.crans.org crans.org:mail:/etc/opendkim/keys/crans.org/mail.private

View File

@ -0,0 +1,2 @@
*@crans.org mail._domainkey.crans.org
*@crans.eu mail._domainkey.crans.org

View File

@ -0,0 +1,19 @@
127.0.0.1
localhost
::1
138.231.136.0/21
138.231.144.0/21
10.231.136.0/24
10.2.9.0/24
2a0c:700:0:1::/64
2a0c:700:0:2::/64
2a0c:700:0:21::/64
2a0c:700:0:22::/64
2a0c:700:0:23::/64
*.crans.org
*.crans.fr
*.crans.eu

View File

@ -0,0 +1 @@
{{ opendkim.private_key }}

View File

@ -0,0 +1 @@
mail._domainkey IN TXT "v=DKIM1; k=rsa; p=MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAtwkNVd9Mmz8S4WcfuPk0X2drG39gS8+uxAv8igRILgzWeN8j2hjeZesl8pm/1UTVU87bYcdfUgXiGfQy9nR5p/Vmt2kS7sXk9nsJ/VYENgb3IJQ6paWupSTFMyeKycJ4ZHCEZB/bVvifoG6vLKqW5jpsfCiOcfdcgXATn0UPuVx9t93yRrhoEMntMv9TSodjqd3FKCtJUoh5cNQHo0T6dWKtxoIgNi/mvZ92D/IACwu/XOU+Rq9fnoEI8GukBQUR5AkP0B/JrvwWXWX/3EjY8X37ljEX0XUdq/ShzTl5iK+CM83stgkFUQh/rpww5mnxYEW3X4uirJ7VJHmY4KPoIU+2DPjLQj9Hz63CMWY3Ks2pXWzxD3V+GI1aJTMFOv2LeHnI3ScqFaKj9FR4ZKMb0OW2BEFBIY3J3aeo/paRwdbVCMM7twDtZY9uInR/NhVa1v9hlOxwp4/2pGSKQYoN2CkAZ1Alzwf8M3EONLKeiC43JLYwKH1uBB1oikSVhMnLjG0219XvfG/tphyoOqJR/bCc2rdv5pLwKUl4wVuygfpvOw12bcvnTfYuk/BXzVHg9t4H8k/DJR6GAoeNAapXIS8AfAScF8QdKfplhKLJyQGJ6lQ75YD9IwRAN0oV+8NTjl46lI/C+b7mpfXCew+p6YPwfNvV2shiR0Ez8ZGUQIcCAwEAAQ==" ; ----- DKIM key mail for crans.org

View File

@ -0,0 +1,29 @@
- name: Install policyd-rate-limit
apt:
update_cache: true
name:
- policyd-rate-limit
register: apt_result
retries: 3
until: apt_result is succeeded
when: postfix.primary
- name: Deploy policyd-rate-limit
vars:
exempt_v4: "{{ policyd.exemptions | json_query('servers[].interface[?vlan_id==`2`].ipv4[]') }}"
exempt_v6: "{{ policyd.exemptions | json_query('servers[].interface[?vlan_id==`2`].ipv6[][].ipv6') }}"
template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
chmod: 0640
loop:
- { src: policyd/policyd-rate-limit.yaml.j2, dest: /etc/policyd-rate-limit.yaml }
- { src: policyd/policyd.py.j2, dest: /usr/lib/python3/dist-packages/policyd_rate_limit }
when: postfix.primary
- name: Indicate role in motd
template:
src: update-motd.d/05-policyd.j2
dest: /etc/update-motd.d/05-policyd
mode: 0755
when: postfix.primary

View File

@ -0,0 +1,107 @@
{{ ansible_header | comment }}
# Make policyd-rate-limit output logs to stderr
debug: False
# The user policyd-rate-limit will use to drop privileges.
user: "policyd-rate-limit"
# The group policyd-rate-limit will use to drop privileges.
group: "policyd-rate-limit"
# path where the program will try to write its pid to.
pidfile: "/var/run/policyd-rate-limit/policyd-rate-limit.pid"
# The config to connect to a mysql server.
mysql_config:
user: "username"
passwd: "*****"
db: "database"
host: "localhost"
charset: 'utf8'
# The config to connect to a sqlite3 database.
sqlite_config:
database: "/var/lib/policyd-rate-limit/db.sqlite3"
# The config to connect to a postgresql server.
pgsql_config:
database: "database"
user: "username"
password: "*****"
host: "localhost"
# Which data backend to use. Possible values are 0 for sqlite3, 1 for mysql and 2 for postgresql.
backend: 0
# The socket to bind to. Can be a path to an unix socket or a couple [ip, port].
# SOCKET: ["127.0.0.1", 8552]
SOCKET: "/var/spool/postfix/ratelimit/policy"
# Permissions on the unix socket (if unix socket used).
socket_permission: 0666
# A list of couple [number of emails, number of seconds]. If one of the element of the list is
# exeeded (more than 'number of emails' on 'number of seconds' for an ip address or an sasl
# username), postfix will return a temporary failure.
limits:
- [75, 60] # limit to 75 mails by minutes
- [200, 86400] # limits to 200 mails by days
# dict of id -> limit list. Used to override limits and use custom limits for
# a particular id. Use an empty list for no limits for a particular id.
# ids are sasl usernames or ip addresses
# limits_by_id:
# foo: []
# 192.168.0.254:
# - [1000, 86400] # limits to 1000 mails by days
# 2a06:e042:100:4:219:bbff:fe3c:4f76: []
limits_by_id:
{% for server in exempt_v4 %}
{{ server }} : []
{% endfor %}
{% for server in exempt_v6 %}
{{ server }} : []
{% endfor %}
# Apply limits by sasl usernames.
limit_by_sasl: True
# If no sasl username is found, apply limits by ip addresses.
limit_by_ip: True
# A list of ip networks in cidr notation on which limits are applied. An empty list is equal
# to limit_by_ip: False, put "0.0.0.0/0" and "::/0" for every ip addresses.
limited_networks: {{ policyd.mynetworks.ipv4 | union(policyd.mynetworks.ipv6) }}
# If not limits are reach, which action postfix should do.
# see http://www.postfix.org/access.5.html for a list of actions.
success_action: "dunno"
# If a limit is reach, which action postfix should do.
# see http://www.postfix.org/access.5.html for a list of actions.
fail_action: "defer_if_permit Rate limit reach, retry later"
# If we are unable to to contect the database backend, which action postfix should do.
# see http://www.postfix.org/access.5.html for a list of actions.
db_error_action: "dunno"
# If True, send a report to report_to about users reaching limits each time --clean is called
report: True
# from who to send emails reports. Must be defined if report: True
report_from: "{{ policyd.mail }}"
# Address to send emails reports to. Must be defined if report: True
report_to: "{{ policyd.mail }}"
# Subject of the report email
report_subject: "policyd-rate-limit report"
# List of number of seconds from the limits list for which you want to be reported.
report_limits: [86400]
# Only send a report if some users have reach a reported limit.
# Otherwise, empty reports may be sent.
report_only_if_needed: True
# The smtp server to use to send emails [host, port]
smtp_server: ["localhost", 25]
# Should we use starttls (you should set this to True if you use smtp_credentials)
smtp_starttls: False
# Should we use credentials to connect to smtp_server ? if yes set ["user", "password"], else null
smtp_credentials: null
delay_to_close: 300

View File

@ -0,0 +1,285 @@
{{ ansible_header | comment }}
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License version 3 for
# more details.
#
# You should have received a copy of the GNU General Public License version 3
# along with this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# (c) 2015-2016 Valentin Samir
import os
import sys
import socket
import time
import select
import traceback
from policyd_rate_limit import utils
from policyd_rate_limit.utils import config
class PolicydError(Exception):
pass
class PolicydConnectionClosed(PolicydError):
pass
class Pass(Exception):
pass
class Policyd(object):
"""The policy server class"""
socket_data_read = {}
socket_data_write = {}
last_used = {}
def socket(self):
"""initialize the socket from the config parameters"""
# if socket is a string assume it is the path to an unix socket
if isinstance(config.SOCKET, str):
try:
os.remove(config.SOCKET)
except OSError:
if os.path.exists(config.SOCKET): # pragma: no cover (should not happen)
raise
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# else asume its a tuple (bind_ip, bind_port)
elif ':' in config.SOCKET[0]: # assume ipv6 bind addresse
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
elif '.' in config.SOCKET[0]: # assume ipv4 bind addresse
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
raise ValueError("bad socket %s" % (config.SOCKET,))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock = sock
def close_socket(self):
"""close the socket depending of the config parameters"""
self.sock.close()
# if socket was an unix socket, delete it after closing.
if isinstance(config.SOCKET, str):
try:
os.remove(config.SOCKET)
except OSError as error: # pragma: no cover (should not happen)
sys.stderr.write("%s\n" % error)
sys.stderr.flush()
def close_connection(self, connection):
"""close a connection and clean read/write dict"""
# Clean up the connection
try:
del self.socket_data_read[connection]
except KeyError:
pass
try:
del self.socket_data_write[connection]
except KeyError:
pass
connection.close()
def close_write_conn(self, connection):
"""Removes a socket from the write dict"""
try:
del self.socket_data_write[connection]
except KeyError:
if config.debug:
sys.stderr.write(
(
"Hmmm, a socket actually used to write a little "
"time ago wasn\'t in socket_data_write. Weird.\n"
)
)
def run(self):
"""The main server loop"""
try:
sock = self.sock
sock.bind(config.SOCKET)
if isinstance(config.SOCKET, str):
os.chmod(config.SOCKET, config.socket_permission)
sock.listen(5)
self.socket_data_read[sock] = []
if config.debug:
sys.stderr.write('waiting for connections\n')
sys.stderr.flush()
while True:
# wait for a socket to read to or to write to
(rlist, wlist, _) = select.select(
self.socket_data_read.keys(), self.socket_data_write.keys(), []
)
for socket in rlist:
# if the socket is the main socket, there is a new connection to accept
if socket == sock:
connection, client_address = sock.accept()
if config.debug:
sys.stderr.write('connection from %s\n' % (client_address,))
sys.stderr.flush()
self.socket_data_read[connection] = []
# Updates the last_sed time for the socket.
self.last_used[connection] = time.time()
# else there is data to read on a client socket
else:
self.read(socket)
for socket in wlist:
try:
data = self.socket_data_write[socket]
sent = socket.send(data)
data_not_sent = data[sent:]
if data_not_sent:
self.socket_data_write[socket] = data_not_sent
else:
self.close_write_conn(socket)
# Socket has been used, let's update its last_used time.
self.last_used[socket] = time.time()
# the socket has been closed during read
except KeyError:
pass
# Closes unused socket for a long time.
__to_rm = []
for (socket, last_used) in self.last_used.items():
if socket == sock:
continue
if time.time() - last_used > config.delay_to_close:
self.close_connection(socket)
__to_rm.append(socket)
for socket in __to_rm:
self.last_used.pop(socket)
except (KeyboardInterrupt, utils.Exit):
for socket in list(self.socket_data_read.keys()):
if socket != self.sock:
self.close_connection(sock)
raise
def read(self, connection):
"""Called then a connection is ready for reads"""
try:
# get the current buffer of the connection
buffer = self.socket_data_read[connection]
# read data
data = connection.recv(1024).decode('UTF-8')
if not data:
#raise ValueError("connection closed")
raise PolicydConnectionClosed()
if config.debug:
sys.stderr.write(data)
sys.stderr.flush()
# accumulate it in buffer
buffer.append(data)
# if data len too short to determine if we are on an empty line, we
# concatene datas in buffer
if len(data) < 2:
data = u"".join(buffer)
buffer = [data]
# We reach on empty line so the client has finish to send and wait for a response
if data[-2:] == "\n\n":
data = u"".join(buffer)
request = {}
# read data are like one key=value per line
for line in data.split("\n"):
line = line.strip()
try:
key, value = line.split(u"=", 1)
if value:
request[key] = value
# if value is empty, ignore it
except ValueError:
pass
# process the collected data in the action method
self.action(connection, request)
else:
self.socket_data_read[connection] = buffer
# Socket has been used, let's update its last_used time.
self.last_used[connection] = time.time()
except (KeyboardInterrupt, utils.Exit):
self.close_connection(connection)
raise
except PolicydConnectionClosed:
if config.debug:
sys.stderr.write("Connection closed\n")
sys.stderr.flush()
self.close_connection(connection)
except Exception as error:
traceback.print_exc()
sys.stderr.flush()
self.close_connection(connection)
def action(self, connection, request):
"""Called then the client has sent an empty line"""
id = None
# By default, we do not block emails
action = config.success_action
try:
if not config.database_is_initialized:
utils.database_init()
with utils.cursor() as cur:
try:
# only care if the protocol states is RCTP. If the policy delegation in postfix
# configuration is in smtpd_recipient_restrictions as said in the doc,
# possible states are RCPT and VRFY.
if 'protocol_state' in request and request['protocol_state'].upper() != "RCPT":
raise Pass()
# if user is authenticated, we filter by sasl username
if config.limit_by_sasl and u'sasl_username' in request:
id = request[u'sasl_username']
# else, if activated, we filter by sender
elif config.limit_by_sender and u'sender' in request:
id = request[u'sender']
# else, if activated, we filter by ip source addresse
elif (
config.limit_by_ip and
u'client_address' in request and
utils.is_ip_limited(request[u'client_address'])
):
id = request[u'client_address']
# if the client neither send us client ip adresse nor sasl username, jump
# to the next section
else:
raise Pass()
# Here we are limiting against sasl username, sender or source ip addresses.
# for each limit periods, we count the number of mails already send.
# if the a limit is reach, we change action to fail (deny the mail).
for mail_nb, delta in config.limits_by_id.get(id, config.limits):
cur.execute(
(
"SELECT COUNT(*) FROM mail_count "
"WHERE id = %s AND date >= %s"
) % ((config.format_str,)*2),
(id, int(time.time() - delta))
)
nb = cur.fetchone()[0]
if config.debug:
sys.stderr.write("%03d/%03d hit since %ss\n" % (nb, mail_nb, delta))
sys.stderr.flush()
if nb >= mail_nb:
action = config.fail_action
if config.report and delta in config.report_limits:
utils.hit(cur, delta, id)
raise Pass()
except Pass:
pass
# If action is a success, record in the database that a new mail has just been sent
if action == config.success_action and id is not None:
if config.debug:
sys.stderr.write(u"insert id %s\n" % id)
sys.stderr.flush()
cur.execute(
"INSERT INTO mail_count VALUES (%s, %s)" % ((config.format_str,)*2),
(id, int(time.time()))
)
except utils.cursor.backend_module.Error as error:
utils.cursor.del_db()
action = config.db_error_action
sys.stderr.write("Database error: %r\n" % error)
data = u"action=%s\n\n" % action
if config.debug:
sys.stderr.write(data)
sys.stderr.flush()
# return the result to the client
self.socket_data_write[connection] = data.encode('UTF-8')
# Socket has been used, let's update its last_used time.
self.last_used[connection] = time.time()

View File

@ -0,0 +1,3 @@
#!/usr/bin/tail +14
{{ ansible_header | comment }}
> policyd-rate-limit a été déployé sur cette machine.

View File

@ -2,8 +2,8 @@
- name: generate postmaps
command: /usr/sbin/postmap {{ item }}
loop:
- /etc/postfix/canonical
- /etc/postfix/mime_header_checks
- /etc/postfix/recipient_access
- /etc/postfix/sender_login_maps
- /etc/postfix/transport
- /etc/postfix/client_checks

View File

@ -24,6 +24,24 @@
- sender_login_maps
- postscreen_access.cidr
- sasl/smtpd.conf
- canonical
- client_checks
notify:
- generate postmaps
- name: Make sure let's encrypt renewal-hooks exists
file:
path: /etc/letsencrypt/renewal-hooks/deploy
state: directory
when: not ansible_check_mode
- name: Reload postfix after certificate renewal
template:
src: letsencrypt/renewal-hooks/deploy/reload-postfix.sh.j2
dest: /etc/letsencrypt/renewal-hooks/deploy/reload-postfix.sh
mode: 0755
- name: Indicate role in motd
template:
src: update-motd.d/05-postfix.j2
dest: /etc/update-motd.d/05-postfix
mode: 0755

View File

@ -0,0 +1,3 @@
#!/bin/sh
{{ ansible_header | comment }}
systemctl reload postfix

View File

@ -1,6 +0,0 @@
{{ ansible_header | comment }}
# Fichier fournissant des méthodes pour traduire certaines adresses
/^(.*)@localhost(\.crans\.org)?$/ ${1}@crans.org
/^(.*)@{{ ansible_hostname }}.adm.crans.org$/ ${1}@crans.org
/^(.*)@{{ ansible_hostname }}.crans.org$/ ${1}@crans.org

View File

@ -0,0 +1,3 @@
{{ ansible_header | comment }}
185.50.149.0/24 REJECT Spammers are not welcome here!

View File

@ -24,15 +24,12 @@ mydestination = {{ ansible_hostname }}, $myhostname, localhost, localhost.$mydom
{% endif %}
# Domaine relaye par ce MX
relay_domains = $mydestination
{% if postfix.mailman %}
{% if postfix.mailman or postfix.public %}
lists.$mydomain
{% endif %}
{% if postfix.secondary %}
$mydomain, crans.ens-cachan.fr, clubs.ens-cachan.fr, install-party.ens-cachan.fr, crans.fr, crans.eu
{% endif %}
{% if postfix.public %}
lists.$mydomain
{% endif %}
{% if postfix.mailman %}
relay_recipient_maps =
hash:/var/local/re2o-services/mail-server/generated/virtual
@ -86,8 +83,8 @@ virtual_alias_maps = hash:/var/local/re2o-services/mail-server/generated/virtual
# TLS pour la reception
smtpd_use_tls=yes
smtpd_tls_security_level=may
smtpd_tls_cert_file=/etc/ssl/certs/smtp.pem
smtpd_tls_key_file=/etc/ssl/private/smtp.pem
smtpd_tls_cert_file=/etc/letsencrypt/live/crans.org/fullchain.pem
smtpd_tls_key_file=/etc/letsencrypt/live/crans.org/privkey.pem
smtpd_tls_loglevel=0
smtpd_tls_received_header=yes
@ -120,6 +117,16 @@ smtpd_helo_required = yes
smtpd_helo_restrictions = permit_mynetworks
reject_invalid_helo_hostname
reject_non_fqdn_helo_hostname
# Vérifie que le client n'est pas dans un / d'ips blacklistées
check_client_access cidr:/etc/postfix/client_checks
{% endif %}
{% if postfix.primary %}
submission_client_restrictions =
check_client_access cidr:/etc/postfix/client_checks
submission_relay_restrictions =
permit_sasl_authenticated
reject
{% endif %}
## Limitation des messages envoyés par minute
# On n'ignore que les messages venant d'adresses "protégées"
@ -154,7 +161,7 @@ smtpd_policy_service_request_limit = 1
smtpd_recipient_restrictions =
{% if postfix.primary %}
# Test avec policyd-rate-limit pour limiter le nombre de mails par utilisateur SASL
check_policy_service unix:ratelimit/policy
check_policy_service { unix:ratelimit/policy, default_action=DUNNO }
{% endif %}
# permet si le client est dans le reseau local
permit_mynetworks

View File

@ -71,8 +71,8 @@
# DO NOT SHARE THE POSTFIX QUEUE BETWEEN MULTIPLE POSTFIX INSTANCES.
#
# ==========================================================================
# service type private unpriv chroot wakeup maxproc command + args
# (yes) (yes) (yes) (never) (50)
# service type private unpriv chroot wakeup maxproc command + args
# (yes) (yes) (yes) (never) (50)
# ==========================================================================
{% if postfix.primary or postfix.secondary %}
smtp inet n - - - 1 postscreen
@ -87,14 +87,17 @@ dnsblog unix - - - - 0 dnsblog
submission inet n - - - - smtpd
-o smtpd_tls_security_level=encrypt
-o smtpd_sasl_auth_enable=yes
-o smtpd_client_restrictions=permit_sasl_authenticated,reject
-o smtpd_delay_reject=no
-o smtpd_client_restrictions=$submission_client_restrictions
-o smtpd_relay_restrictions=$submission_relay_restrictions
-o milter_macro_daemon_name=ORIGINATING
smtps inet n - - - - smtpd
-o smtpd_tls_wrappermode=yes
-o smtpd_sasl_auth_enable=yes
-o smtpd_client_restrictions=permit_sasl_authenticated,reject
-o smtpd_delay_reject=no
-o smtpd_client_restrictions=$submission_client_restrictions
-o smtpd_relay_restrictions=$submission_relay_restrictions
{% endif %}
#628 inet n - - - - qmqpd
pickup fifo n - - 60 1 pickup
cleanup unix n - - - 0 cleanup
qmgr fifo n - - 300 1 qmgr

View File

@ -59,3 +59,6 @@
# Non, nous ne voulons pas traiter l'alcoolisme à l'insu du patient.
94.242.206.15 reject
91.188.222.33 reject
# Et les russes ils dégagent aussi
185.50.149.0/24 reject

View File

@ -1,3 +1,5 @@
{{ ansible_header | comment }}
@crans.org root
@crans.fr root
@crans.eu root

View File

@ -0,0 +1,3 @@
#!/usr/bin/tail +14
{{ ansible_header | comment }}
> Postfix a été déployé sur cette machine. Voir /etc/postfix/.

View File

@ -12,5 +12,5 @@
path: /etc/default/prometheus-apache-exporter
regexp: '^ARGS='
line: |
ARGS="-telemetry.address={{ ansible_hostname }}.adm.crans.org:9117"
ARGS="-telemetry.address={{ adm_ipv4 }}:9117"
notify: Restart prometheus-apache-exporter

View File

@ -31,11 +31,9 @@
# Doesn't work on Debian Stretch with the old prometheus package
- name: Make Prometheus node-exporter listen on adm only
lineinfile:
path: /etc/default/prometheus-node-exporter
regexp: '^ARGS='
line: |
ARGS="--web.listen-address={{ ansible_hostname }}.adm.crans.org:9100"
template:
src: default/prometheus-node-exporter.j2
dest: /etc/default/prometheus-node-exporter
notify: Restart prometheus-node-exporter
tags: restart-node-exporter

View File

@ -0,0 +1,130 @@
{{ ansible_header | comment }}
# Set the command-line arguments to pass to the server.
# Due to shell scaping, to pass backslashes for regexes, you need to double
# them (\\d for \d). If running under systemd, you need to double them again
# (\\\\d to mean \d), and escape newlines too.
ARGS="--web.listen-address={{ adm_ipv4 }}:9100"
# Prometheus-node-exporter supports the following options:
#
# --collector.diskstats.ignored-devices="^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\\d+n\\d+p)\\d+$"
# Regexp of devices to ignore for diskstats.
# --collector.filesystem.ignored-mount-points="^/(dev|proc|run|sys|mnt|media|var/lib/docker)($|/)"
# Regexp of mount points to ignore for filesystem
# collector.
# --collector.filesystem.ignored-fs-types="^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$"
# Regexp of filesystem types to ignore for
# filesystem collector.
# --collector.netdev.ignored-devices="^lo$"
# Regexp of net devices to ignore for netdev
# collector.
# --collector.netstat.fields="^(.*_(InErrors|InErrs)|Ip_Forwarding|Ip(6|Ext)_(InOctets|OutOctets)|Icmp6?_(InMsgs|OutMsgs)|TcpExt_(Listen.*|Syncookies.*)|Tcp_(ActiveOpens|PassiveOpens|RetransSegs|CurrEstab)|Udp6?_(InDatagrams|OutDatagrams|NoPorts))$"
# Regexp of fields to return for netstat
# collector.
# --collector.ntp.server="127.0.0.1"
# NTP server to use for ntp collector
# --collector.ntp.protocol-version=4
# NTP protocol version
# --collector.ntp.server-is-local
# Certify that collector.ntp.server address is the
# same local host as this collector.
# --collector.ntp.ip-ttl=1 IP TTL to use while sending NTP query
# --collector.ntp.max-distance=3.46608s
# Max accumulated distance to the root
# --collector.ntp.local-offset-tolerance=1ms
# Offset between local clock and local ntpd time
# to tolerate
# --path.procfs="/proc" procfs mountpoint.
# --path.sysfs="/sys" sysfs mountpoint.
# --collector.qdisc.fixtures=""
# test fixtures to use for qdisc collector
# end-to-end testing
# --collector.runit.servicedir="/etc/service"
# Path to runit service directory.
# --collector.supervisord.url="http://localhost:9001/RPC2"
# XML RPC endpoint.
# --collector.systemd.unit-whitelist=".+"
# Regexp of systemd units to whitelist. Units must
# both match whitelist and not match blacklist to
# be included.
# --collector.systemd.unit-blacklist=".+(\\.device|\\.scope|\\.slice|\\.target)"
# Regexp of systemd units to blacklist. Units must
# both match whitelist and not match blacklist to
# be included.
# --collector.systemd.private
# Establish a private, direct connection to
# systemd without dbus.
# --collector.textfile.directory="/var/lib/prometheus/node-exporter"
# Directory to read text files with metrics from.
# --collector.vmstat.fields="^(oom_kill|pgpg|pswp|pg.*fault).*"
# Regexp of fields to return for vmstat collector.
# --collector.wifi.fixtures=""
# test fixtures to use for wifi collector metrics
# --collector.arp Enable the arp collector (default: enabled).
# --collector.bcache Enable the bcache collector (default: enabled).
# --collector.bonding Enable the bonding collector (default: enabled).
# --collector.buddyinfo Enable the buddyinfo collector (default:
# disabled).
# --collector.conntrack Enable the conntrack collector (default:
# enabled).
# --collector.cpu Enable the cpu collector (default: enabled).
# --collector.diskstats Enable the diskstats collector (default:
# enabled).
# --collector.drbd Enable the drbd collector (default: disabled).
# --collector.edac Enable the edac collector (default: enabled).
# --collector.entropy Enable the entropy collector (default: enabled).
# --collector.filefd Enable the filefd collector (default: enabled).
# --collector.filesystem Enable the filesystem collector (default:
# enabled).
# --collector.hwmon Enable the hwmon collector (default: enabled).
# --collector.infiniband Enable the infiniband collector (default:
# enabled).
# --collector.interrupts Enable the interrupts collector (default:
# disabled).
# --collector.ipvs Enable the ipvs collector (default: enabled).
# --collector.ksmd Enable the ksmd collector (default: disabled).
# --collector.loadavg Enable the loadavg collector (default: enabled).
# --collector.logind Enable the logind collector (default: disabled).
# --collector.mdadm Enable the mdadm collector (default: enabled).
# --collector.meminfo Enable the meminfo collector (default: enabled).
# --collector.meminfo_numa Enable the meminfo_numa collector (default:
# disabled).
# --collector.mountstats Enable the mountstats collector (default:
# disabled).
# --collector.netdev Enable the netdev collector (default: enabled).
# --collector.netstat Enable the netstat collector (default: enabled).
# --collector.nfs Enable the nfs collector (default: enabled).
# --collector.nfsd Enable the nfsd collector (default: enabled).
# --collector.ntp Enable the ntp collector (default: disabled).
# --collector.qdisc Enable the qdisc collector (default: disabled).
# --collector.runit Enable the runit collector (default: disabled).
# --collector.sockstat Enable the sockstat collector (default:
# enabled).
# --collector.stat Enable the stat collector (default: enabled).
# --collector.supervisord Enable the supervisord collector (default:
# disabled).
# --collector.systemd Enable the systemd collector (default: enabled).
# --collector.tcpstat Enable the tcpstat collector (default:
# disabled).
# --collector.textfile Enable the textfile collector (default:
# enabled).
# --collector.time Enable the time collector (default: enabled).
# --collector.uname Enable the uname collector (default: enabled).
# --collector.vmstat Enable the vmstat collector (default: enabled).
# --collector.wifi Enable the wifi collector (default: enabled).
# --collector.xfs Enable the xfs collector (default: enabled).
# --collector.zfs Enable the zfs collector (default: enabled).
# --collector.timex Enable the timex collector (default: enabled).
# --web.listen-address=":9100"
# Address on which to expose metrics and web
# interface.
# --web.telemetry-path="/metrics"
# Path under which to expose metrics.
# --log.level="info" Only log messages with the given severity or
# above. Valid levels: [debug, info, warn, error,
# fatal]
# --log.format="logger:stderr"
# Set the log target and format. Example:
# "logger:syslog?appname=bob&local=7" or
# "logger:stdout?json=true"

View File

@ -25,31 +25,31 @@
# We don't need to restart Prometheus when updating nodes
- name: Configure Prometheus nodes
copy:
content: "{{ prometheus_targets | to_nice_json }}"
content: "{{ [{'targets': prometheus.node_targets}] | to_nice_json }}"
dest: /etc/prometheus/targets.json
# We don't need to restart Prometheus when updating nodes
- name: Configure Prometheus UPS SNMP devices
copy:
content: "{{ prometheus_ups_snmp_targets | to_nice_json }}"
content: "{{ [{'targets': prometheus.ups_snmp_targets}] | to_nice_json }}"
dest: /etc/prometheus/targets_ups_snmp.json
# We don't need to restart Prometheus when updating nodes
- name: Configure Prometheus Ubiquity Unifi SNMP devices
copy:
content: "{{ prometheus_unifi_snmp_targets | to_nice_json }}"
content: "{{ [{'targets': prometheus.unifi_snmp_targets}] | to_nice_json }}"
dest: /etc/prometheus/targets_unifi_snmp.json
# We don't need to restart Prometheus when updating nodes
- name: Configure Prometheus Apache targets
copy:
content: "{{ prometheus_apache_targets | to_nice_json }}"
content: "{{ [{'targets': prometheus.apache_targets}] | to_nice_json }}"
dest: /etc/prometheus/targets_apache.json
# We don't need to restart Prometheus when updating nodes
- name: Configure Prometheus Blackbox targets
copy:
content: "{{ prometheus_blackbox_targets | to_nice_json }}"
content: "{{ [{'targets': prometheus.blackbox_targets}] | to_nice_json }}"
dest: /etc/prometheus/targets_blackbox.json
- name: Activate prometheus service

View File

@ -44,7 +44,7 @@ groups:
# Alert for high CPU usage
- alert: CpuBusy
expr: node_load5 > 5
expr: node_load5{instance="zbee.adm.crans.org"} > 7 or node_load5{instance!="zbee.adm.crans.org"} > 5
for: 10m
labels:
severity: warning
@ -89,7 +89,7 @@ groups:
description: "https://grafana.crans.org/d/qtbg59mZz/alimentation"
- alert: UpsTemperatureWarning
expr: (xupsEnvRemoteTemp < 10) or (xupsEnvRemoteTemp > 24)
expr: (xupsEnvRemoteTemp < 10) or (xupsEnvRemoteTemp > 26)
for: 5m
labels:
severity: warning

@ -1 +1 @@
Subproject commit 3fa31a218d75835aa196ebd174906f3656ef22bd
Subproject commit 1869e9e08e926da376c2f7a6db69a6a5dc126b86

View File

@ -62,6 +62,16 @@ hosts allow = *
read only = yes
{% endif %}
{# on veut backuper /var/lib/mailman sur redisdead #}
{% if ansible_hostname == "redisdead" %}
[mailman]
path = /var/lib/mailman
auth users = backupcrans
secrets file = /etc/rsyncd.secrets
hosts allow = zephir.adm.crans.org 10.231.136.6
{% endif %}
{# TODO: implémenter le vrai système comme dans BCFG2 #}
{# TODO: implémenter le cas particulier cpasswords-main et wiki #}

View File

@ -0,0 +1,19 @@
---
- name: Install sqlgrey
apt:
update_cache: true
name:
- sqlgrey
register: apt_result
retries: 3
until: apt_result is succeeded
- name: Deploy sqlgrey configuration
template:
src: sqlgrey/{{ item }}.j2
dest: /etc/sqlgrey/{{ item }}
mode: 0644
loop:
- sqlgrey.conf
- clients_fqdn_whitelist.local
- clients_ip_whitelist.local

View File

@ -0,0 +1,4 @@
{{ ansible_header | comment }}
# Gandi
*.mail.gandi.net

View File

@ -0,0 +1,4 @@
{{ ansible_header | comment }}
# Bouygues Télécom... les MX ne rententent pas la délivrance des mails.
62.201.140

View File

@ -0,0 +1,189 @@
{{ ansible_header | comment }}
#########################
## SQLgrey config file ##
#########################
# Notes:
# - Unless specified otherwise commented settings are SQLgrey's defaults
# - SQLgrey uses a specific config file when called with -f <conf_file>
## Configuration files
# conf_dir = /etc/sqlgrey
## Log level
# Uncomment to change the log level (default is normal: 2)
# nothing: O, errors only: 0, warnings: 1, normal: 2, verbose: 3, debug: 4
loglevel = 2
## log categories can be fine-tuned,
# here are the log messages sorted by types and levels,
# (anything over the loglevel is discarded):
#
# grey : (0) internal errors,
# (2) initial connections, early reconnections,
# awl matches, successful reconnections, AWL additions,
# (3) smart decision process debug,
# whitelist: (2) whitelisted connections,
# (3) actual whitelist hit,
# (4) whitelists reloads,
# optin: (3) optin/optout global result
# (4) optin/optout SQL query results
# spam : (2) attempts never retried,
# mail : (1) error sending mails,
# (4) rate-limiter debug,
# dbaccess : (0) DB errors,
# (1) DB upgrade,
# (2) DB upgrade details,
# martians : (2) invalid e-mail addresses,
# perf : (2) cleanup time,
# system : (0) error forking,
# (3) forked children PIDs, children exits,
# conf : (0) errors in config files, missing required file,
# (1) warnings in config files,
# missing optional configuration files,
# (2) reloading configuration files,
# other : (4) Startup cleanup
# you can set a level to O (capital o) to disable logs completely,
# but be aware that then SQLgrey can come back to haunt you...
# Provide a coma-separated "logtype:loglevel" string
# For example if you set the loglevel to 3 (verbose) but want SQLgrey to be:
# . quiet for whitelists
# . normal for greylisting
# uncomment the following line.
# log_override = whitelist:1,grey:2
# By default, log_override is empty
## Log identification
# by default this is the process name. If you define the following variable
# SQLgrey will use whatever you set it to
# log_ident =
## username and groupname the daemon runs as
user = sqlgrey
group = nogroup
## Socket
# On which socket do SQLgrey wait for queries
# use the following if you need to bind on a public IP address
# inet = <public ip>:port
# default :
# inet = 2501 # bind to localhost:2501
## PID
# where to store the process PID
# pidfile = /var/run/sqlgrey.pid
## Config directory
# where to look for other configuration files (whitelists)
# confdir = /etc/sqlgrey
## Greylisting delays
# If you want to be really strict (RFC-wise) use these
# This is *not* recommended, you'll have false positives
# reconnect_delay = 15 # don't allow a reconnection before 15 minutes
# max_connect_age = 2 # don't allow a reconnection after 2 hours
# default: (based on real-life experience)
reconnect_delay = 6
max_connect_age = 24
## Throttling too many new entries from new host
# Setting this optional parameter will refuse an excessive number of
# new entries in the connect table from the same host, in the following
# manner:
# - If there are already "connect_src_throttle" entries in the connect
# table from the same host (e-mails which have not been retried yet)
# - And there is NO entry for this host in domain_awl
# - And there are LESS than "connect_src_throttle" entries in the
# from_awl table for this host
# THEN further incoming connections from this host will be (temporarily)
# refused without new entries being created in the connect table (until
# some already waiting entries have been successfully retried).
# This feature may prevent the connect table from growing too big and
# being polluted by spambots, viruses, zombie machines and the like.
# If set to "0" (default), this feature won't be used.
connect_src_throttle = 5
## Auto whitelists settings
# default is tailored for small sites
# awl_age = 60
# group_domain_level = 2
# For bigger sites you may want
# a smaller awl_age and a bigger group_domain_level
# AWL must be renewed at least once a month
# 32 > 31 (max delay between monthly newsletters)
awl_age = 33
# wait for 10 validated adresses to add a whole
# domain in AWL
group_domain_level = 10
## Database settings
# instead of Pg below use "mysql" for MySQL, "SQLite" for SQLite
# any DBD driver is allowed, but only the previous 3 have been tested
db_type = Pg
db_name = sqlgrey
# Note: the following are not used with SQLite
# On laisse pgsql meme pour ovh, sqlgrey sait detecter s'il perd le
# lien avec la base.
db_host = pgsql.adm.crans.org
db_user = sqlgrey
# db_pass = spaces_are_not_supported
# db_cleandelay = 1800 # in seconds, how much time between database cleanups
# clean_method = sync # sync : cleanup is done in the main process,
# delaying other operations
# async: cleanup is done in a forked process,
# it won't delay mail processing
# BEWARE: lockups have been reported
# and are still investigated
## X-Greylist header added?
# This adds delay, whitelist and autowhitelist information in the headers
prepend = 1
## Greylisting method:
# - full : greylist by IP address
# - classc : greylist by class C network. eg:
# 2.3.4.6 connection accepted if 2.3.4.145 did connect earlier
# - smart : greylist by class C network unless there is no reverse lookup
# or it looks like a home-user address
# Default is smart
greymethod = smart
## Optin/Optout (see README.OPTINOUT for details)
# - none : everyone is greylisted (default)
# - optin : one must optin to have its (incoming) messages being greylisted
# - optout : one must optout to not have its messages being greylisted
optmethod = optout
## SQLgrey return value.
# SQLgrey can tell Postfix to:
# - immediately reject a message with a temporary reject code
# - only do so if following rules would allow the message to pass
# The first choice will prevent Postfix from spending time evaluating
# potentially expensive rules.
# In some cases you may want following rules to be aware of the connection
# this.
#
# We can specify a different rejection strategy for the first connection
# attempt, and for early reconnections. 'immed' chooses immediate rejection
# 'delay' choose delayed rejection
#
# By default we use delay on first attempt
# reject_first_attempt = delay
# Default for early reconnection is the value affected to reject_first_attempt
# reject_early_reconnect = delay
## Update server
# where to get updates for whitelists
# whitelists_host = sqlgrey.bouton.name
## Postmaster address
# who gets urgent notifications (DB is down for example)
# default or empty: don't send mail notifications
admin_mail = roots@crans.org

View File

@ -17,20 +17,15 @@
roles:
- framadate
# Deploy CAS
- hosts: cas-srv.adm.crans.org
roles:
- django-cas
roles: ["django-cas"]
# Deploy Gitlab CI
- hosts: gateau.adm.crans.org
roles:
- docker
roles: ["docker"]
# Deploy TV
- hosts: cochon.adm.crans.org
roles:
- mumudvb
- hosts: ethercalc-srv.adm.crans.org
roles: ["ethercalc"]
# Deploy OwnCloud
- hosts: owncloud-srv.adm.crans.org
@ -113,7 +108,3 @@
- ftpsync
- rsync-mirror
- nginx-pubftp
- hosts: zephir.adm.crans.org,omnomnom.adm.crans.org
roles:
- backuppc