diff --git a/ansible.cfg b/ansible.cfg index ec5d521e..7a2e7b37 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -2,6 +2,10 @@ [defaults] +# Explicitely redefined some defaults to make play execution work +roles_path = ./roles +vars_plugins = ./vars_plugins + # Do not create .retry files retry_files_enabled = False @@ -21,6 +25,12 @@ forks = 15 # Some SSH connection will take time timeout = 60 +# Enable fact_caching +gathering = smart +fact_caching = jsonfile +fact_caching_connection = ~/.cache/ansible/json/ +fact_caching_timeout = 86400 + [privilege_escalation] # Use sudo to get priviledge access @@ -45,3 +55,18 @@ api_hostname = intranet.crans.org # Whether or not using vault_cranspasswords use_cpasswords = True + +# Specify cache plugin for re2o API. By default, cache nothing +cache = jsonfile + +# Only used for memcached plugin +# List of connection information for the memcached DBs +# Default is ['127.0.0.1:11211'] +# memcached_connection = ['127.0.0.1:11211'] + +# Time in second before the cache expired. 0 means never expire cache. +# Default is 24 hours. +timeout = 86400 + +# Default is 12 hours. +timeout_token = 43200 diff --git a/base.yml b/base.yml index 1f3d6506..a30279a9 100755 --- a/base.yml +++ b/base.yml @@ -35,9 +35,6 @@ # Scripts will tell users to go there to manage their account intranet_url: 'https://intranet.crans.org/' - # Backup password - backuppc_rsyncd_passwd: "{{ vault_backuppc_rsyncd_passwd }}" - # Will be in /usr/scripts/ crans_scripts_git: "http://gitlab.adm.crans.org/nounous/scripts.git" @@ -51,46 +48,13 @@ - ldap-client - openssh - sudo - - rsync-client - ntp-client - crans-scripts -# Deploy NFS only on campus -- hosts: crans_server - roles: - - nfs-common - # Deploy LDAP replica - hosts: odlyd.adm.crans.org,soyouz.adm.crans.org,fy.adm.crans.org,thot.adm.crans.org roles: [] # TODO -# Playbook to deploy autofs NFS -- hosts: crans_server,!odlyd.adm.crans.org,!zamok.adm.crans.org,!omnomnom.adm.crans.org,!owl.adm.crans.org,!owncloud-srv.adm.crans.org - roles: - - nfs-autofs - -# Deploy home permanent -- hosts: zamok.adm.crans.org,omnomnom.adm.crans.org,owl.adm.crans.org,owncloud-srv.adm.crans.org - roles: - - home-permanent - -# Redirect local mail to mailserver -- hosts: crans_server,test_vm,!redisdead.adm.crans.org - vars: - mail_root: root@crans.org - mail_snmp_server: smtp.adm.crans.org - mail_defaulthost: crans.org - roles: - - nullmailer - -# Send logs to thot -- hosts: server,!thot.adm.crans.org - vars: - rsyslog: - server: thot.adm.crans.org - roles: - - rsyslog-client - - hosts: otis.adm.crans.org roles: - ansible @@ -99,3 +63,17 @@ - hosts: zamok.adm.crans.org roles: - zamok-tools + +- import_playbook: plays/mail.yml +- import_playbook: plays/nfs.yml +- import_playbook: plays/logs.yml +- import_playbook: plays/backup.yml +- import_playbook: plays/network-interfaces.yml +- import_playbook: plays/monitoring.yml + +# Services that only apply to a subset of server +- import_playbook: plays/tv.yml +- import_playbook: plays/mailman.yml +- import_playbook: plays/dhcp.yml +- import_playbook: plays/dns.yml +- import_playbook: plays/wireguard.yml diff --git a/clean_servers.yml b/clean_servers.yml index 0f68d4cc..824f52e8 100755 --- a/clean_servers.yml +++ b/clean_servers.yml @@ -9,6 +9,7 @@ apt: state: absent name: + - at - arpwatch # old sniffing - collectd - collectd-utils # old monitoring @@ -28,6 +29,7 @@ - monitoring-plugins-standard - monitoring-plugins-basic - monitoring-plugins-common + - monit - libmonitoring-plugin-perl - snmp - nagios-plugins-contrib @@ -64,6 +66,9 @@ path: "{{ item }}" state: absent loop: + - /etc/bcfg2.conf + - /etc/bcfg2.conf.ucf-dist + - /etc/crans - /etc/cron.d/munin-crans - /etc/cron.d/munin-node - /etc/cron.d/munin-node.dpkg-dist @@ -76,15 +81,31 @@ - /etc/cron.d/autobcfg2 - /etc/cron.d/bcfg2-run - /etc/cron.d/pull-repos-scripts + - /etc/default/bcfg2 + - /etc/default/bcfg2.ucf-dist - /etc/munin - /etc/icinga2 + - /etc/init.d/bcfg2 + - /etc/nagios + - /etc/nagios-plugins - /etc/nut - /etc/nginx/sites-enabled/status - /etc/nginx/sites-available/status + - /etc/pnp4nagios - /var/local/aptdater - /etc/apt-dater-host.conf - /etc/sudoers.d/apt-dater-host - + - /etc/apt/apt.conf.d/70debconf + - /etc/apt/apt.conf.d/01aptitude + - /etc/cron.weekly/git_dirty_repo + - /etc/cron.daily/git_dirty_repo + - /etc/cron.hourly/bcfg2 + - /etc/cron.d/letsencrypt_check_cert + - /etc/nss-ldapd.conf + - /etc/cron.daily/bcfg2 + - /etc/monit + - /etc/ldap/ldap.conf + - /etc/letsencrypt/conf.d/localhost.ini # - name: Upgrade # apt: # upgrade: dist diff --git a/group_vars/all/vars.yaml b/group_vars/all/vars.yaml index a7a7c2a9..d0766e03 100644 --- a/group_vars/all/vars.yaml +++ b/group_vars/all/vars.yaml @@ -1,4 +1,5 @@ --- +# Custom header dirty: "{{lookup('pipe', 'git diff --quiet || echo dirty')}}" ansible_header: | +++++++++++++++++++++++++++++++++++++++++++++++++++ @@ -11,3 +12,6 @@ ansible_header: | {% endif %} +++++++++++++++++++++++++++++++++++++++++++++++++++ + +# Crans subnets +adm_subnet: 10.231.136.0/24 diff --git a/host_vars/boeing.adm.crans.org.yml b/host_vars/boeing.adm.crans.org.yml new file mode 100644 index 00000000..1f2c6005 --- /dev/null +++ b/host_vars/boeing.adm.crans.org.yml @@ -0,0 +1,8 @@ +--- +postfix: + primary: false + secondary: true + public: true + dkim: true + mailman: false + titanic: true diff --git a/host_vars/redisdead.adm.crans.org.yml b/host_vars/redisdead.adm.crans.org.yml new file mode 100644 index 00000000..d42f5d22 --- /dev/null +++ b/host_vars/redisdead.adm.crans.org.yml @@ -0,0 +1,8 @@ +--- +postfix: + primary: true + secondary: false + public: true + dkim: true + mailman: true + titanic: false diff --git a/host_vars/sputnik.adm.crans.org.yml b/host_vars/sputnik.adm.crans.org.yml new file mode 100644 index 00000000..22d6f2e7 --- /dev/null +++ b/host_vars/sputnik.adm.crans.org.yml @@ -0,0 +1,8 @@ +--- +postfix: + primary: false + secondary: true + public: true + dkim: true + mailman: false + titanic: false diff --git a/host_vars/titanic.adm.crans.org.yml b/host_vars/titanic.adm.crans.org.yml new file mode 100644 index 00000000..1f2c6005 --- /dev/null +++ b/host_vars/titanic.adm.crans.org.yml @@ -0,0 +1,8 @@ +--- +postfix: + primary: false + secondary: true + public: true + dkim: true + mailman: false + titanic: true diff --git a/logos/crans.png b/logos/crans.png new file mode 100644 index 00000000..9c5e281a Binary files /dev/null and b/logos/crans.png differ diff --git a/lookup_plugins/re2oapi.py b/lookup_plugins/re2oapi.py index 9099c9e3..7202b30a 100644 --- a/lookup_plugins/re2oapi.py +++ b/lookup_plugins/re2oapi.py @@ -7,6 +7,8 @@ For a detailed example look at https://github.com/ansible/ansible/blob/3dbf89e8a The API Client has been adapted from https://gitlab.federez.net/re2o/re2oapi """ +from ansible.plugins.loader import cache_loader + from pathlib import Path import datetime import requests @@ -28,38 +30,67 @@ from ansible.config.manager import ConfigManager # Ansible Logger to stdout display = Display() -# Number of seconds before expiration where renewing the token is done -TIME_FOR_RENEW = 120 # Default name of the file to store tokens. Path $HOME/{DEFAUlt_TOKEN_FILENAME} DEFAULT_TOKEN_FILENAME = '.re2o.token' +# If no plugin is used, then use this as token timeout. +# Overriden by key timeout_token from ansible configuration. +TIME_FOR_RENEW = 43200 # 12 jours class Client: """ Class based client to contact re2o API. """ - def __init__(self, hostname, username, password, use_tls=True): + def __init__(self, hostname, username, password, + use_tls=True, cachetoken=None): """ :arg hostname: The hostname of the Re2o instance to use. :arg username: The username to use. :arg password: The password to use. :arg use_tls: A boolean to specify whether the client should use a a TLS connection. Default is True. Please, keep it. + :arg cachetoken: The cache to use to manage authentication token. + If it is None, then store the token in a file. """ self.use_tls = use_tls self.hostname = hostname self._username = username self._password = password - - self.token_file = Path.home() / DEFAULT_TOKEN_FILENAME + self._cachetoken = cachetoken + self.token_file = None + if self._cachetoken is None: + self.token_file = Path.home() / DEFAULT_TOKEN_FILENAME + display.vvv("Setting token file to {}".format(self.token_file)) + else: + try: + display.vvv("Using {} as cache plugin" + .format(self._cachetoken.plugin_name)) + except AttributeError: + # Happens when plugin_name is not implemented... + # For example with memcached + display.vvv("Using cache plugin specified in configuration.") display.v("Connecting to {hostname} as user {user}".format( hostname=to_native(self.hostname), user=to_native(self._username))) - try: - self.token = self._get_token_from_file() - except AnsibleFileNotFound: - display.vv("Force renew the token") - self._force_renew_token() + + @property + def token(self): + if self._cachetoken: + display.vvv("Trying to get token from cache.") + if self._cachetoken.contains("auth_token"): + display.vvv("Found token in cache.") + return self._cachetoken.get("auth_token") + else: + display.vvv("Token not found. Forcing renew.") + return self._force_renew_token() + else: + try: + token = self._get_token_from_file() + if token['expiration'] < datetime.datetime.now() + \ + datetime.timedelta(seconds=TIME_FOR_RENEW): + return self._force_renew_token() + except AnsibleError: + return self._force_renew_token() def _get_token_from_file(self): display.vv("Trying to fetch token from {}".format(self.token_file)) @@ -93,13 +124,18 @@ class Client: ) ) else: - display.vv("""Token successfully retreived from - file {token}""".format(token=self.token_file)) + display.vv("Token successfully retreived from " + "file {token}".format(token=self.token_file)) return ret def _force_renew_token(self): - self.token = self._get_token_from_server() - self._save_token_to_file() + token = self._get_token_from_server() + if self._cachetoken: + display.vvv("Storing authentication token in cache") + self._cachetoken.set("auth_token", token.get('token')) + else: + self._save_token_to_file(token) + return token.get('token') def _get_token_from_server(self): display.vv("Requesting a new token for {user}@{host}".format( @@ -139,7 +175,7 @@ class Client: def _parse_date(self, date, date_format="%Y-%m-%dT%H:%M:%S"): return datetime.datetime.strptime(date.split('.')[0], date_format) - def _save_token_to_file(self): + def _save_token_to_file(self, token): display.vv("Saving token to file {}".format(self.token_file)) try: # Read previous data to avoid erasures @@ -153,8 +189,8 @@ class Client: if self.hostname not in data.keys(): data[self.hostname] = {} data[self.hostname][self._username] = { - 'token': self.token['token'], - 'expiration': self.token['expiration'].isoformat(), + 'token': token['token'], + 'expiration': token['expiration'].isoformat(), } try: @@ -169,22 +205,6 @@ class Client: display.vv("Token successfully written to file {}" .format(self.token_file)) - def get_token(self): - """ - Retrieves the token to use for the current connection. - Automatically renewed if needed. - """ - if self.need_renew_token: - self._force_renew_token() - - return self.token['token'] - - @property - def need_renew_token(self): - return self.token['expiration'] < \ - datetime.datetime.now() + \ - datetime.timedelta(seconds=TIME_FOR_RENEW) - def _request(self, method, url, headers={}, params={}, *args, **kwargs): display.vv("Building the {method} request to {url}.".format( method=method.upper(), @@ -192,9 +212,9 @@ class Client: )) # Force the 'Authorization' field with the right token. - display.vvv("Forcing authentication token.") + display.vvv("Forcing authentication token in headers.") headers.update({ - 'Authorization': 'Token {}'.format(self.get_token()) + 'Authorization': 'Token {}'.format(self.token) }) # Use a json format unless the user already specified something @@ -213,10 +233,10 @@ class Client: # Force re-login to the server (case of a wrong token but valid # credentials) and then retry the request without catching errors. display.vv("Token refused. Trying to refresh the token.") - self._force_renew_token() + token = self._force_renew_token() headers.update({ - 'Authorization': 'Token {}'.format(self.get_token()) + 'Authorization': 'Token {}'.format(token) }) display.vv("Re-performing the request {method} {url}".format( method=method.upper(), @@ -320,6 +340,18 @@ class LookupModule(LookupBase): Queries the re2o API and returns the list of all machines whose role_type is role_name. + - cidrs, a list of subnet_names: Will get back the list of all cidrs + corresponding to this particular + subnet. + + - prefixv6, a list of subnet_names: Will get back the list of all ipv6 + prefixes corresponding to this + particular subnet. + + - A simple endpoint: Will make a raw query to the API using this + this endpoint. + + If a term is not in the previous list, make a raw query to the API with endpoint term. @@ -338,8 +370,135 @@ class LookupModule(LookupBase): dnszones: "{{ lookup('re2oapi', 'dnszones') }}" tasks: - debug: var=dnszones + + The following play will use the debug module to output + all the ipv6 corresponding to adherents and adm networks + + - hosts: sputnik.adm.crans.org + vars: + prefixv6: "{{ lookup('re2oapi', 'previxv6', 'adherents', 'adm') }}" + tasks: + - debug: + msg: "{{ prefixv6 | ipwrap }}" + + The following will get the ip addresses of all servers with role + dns-authorithary-master on vlan 2. + + - hosts: sputnik.adm.crans.org + vars: + bind: + masters: "{{ lookup('re2oapi', 'get_role', 'dns-authoritary-master')[0] }}" + tasks: + - name: Display ipv6 + debug: + ipv6: "{{ masters | json_query('servers[].interface[?vlan_id==`2`].ipv6[][].ipv6') }}" + + - name: Display ipv4 + debug: + ipv4: "{{ masters | json_query('servers[].interface[?vlan_id==`2`].ipv4[]') }}" """ + def _readconfig(self, section="re2o", key=None, default=None, + boolean=False, integer=False): + config = self._config + if not config: + return default + else: + if config.has_option(section, key): + display.vvv("Found key {} in configuration file".format(key)) + if boolean: + return config.getboolean(section, key) + elif integer: + return config.getint(section, key) + else: + return config.get(section, key) + else: + return default + + def _manage_cachedir(self, cachedir=None, plugin=None): + try: + self._uri = cachedir / plugin + except Exception: + raise AnsibleError("Undefined specification for cache plugin") + + display.vvv("Cache directory is {}".format(self._uri)) + if not self._uri.exists(): + # Creates Ansible cache directory with right permissions + # if it doesn't exist yet. + display.vvv("Cache directory doesn't exist. Creating it.") + try: + self._uri.mkdir(mode=0o700, parents=True) + except Exception as e: + raise AnsibleError("""Unable to create {dir}. + Original error was : {err}""".format(dir=self._uri, + err=to_native(e))) + + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + config_manager = ConfigManager() + config_file = config_manager.data.get_setting(name="CONFIG_FILE").value + self._config = ConfigParser() + self._config.read(config_file) + + display.vvv("Using {} as configuration file.".format(config_file)) + + self._api_hostname = None + self._api_username = None + self._api_password = None + self._use_cpasswords = None + self._cache_plugin = None + self._cache = None + self._timeout = 86400 # 1 day + self._cachetoken = None + self._timeouttoken = TIME_FOR_RENEW # 12 hours + + if self._config.has_section("re2o"): + display.vvv("Found section re2o in configuration file") + + self._api_hostname = self._readconfig(key="api_hostname") + self._use_cpasswords = self._readconfig(key="use_cpasswords", + boolean=True) + self._cache_plugin = self._readconfig(key="cache") + self._timeout = self._readconfig(key="timeout", integer=True, + default=86400) + self._timeouttoken = self._readconfig(key="timeout_token", + integer=True, + default=TIME_FOR_RENEW) + + if self._cache_plugin is not None: + display.vvv("Using {} as cache plugin".format(self._cache_plugin)) + cachedir = Path.home() / ".cache/ansible/re2oapi" + + if self._cache_plugin == 'jsonfile': + self._manage_cachedir(cachedir=cachedir, plugin='json') + elif self._cache_plugin == 'yaml': + self._manage_cachedir(cachedir=cachedir, plugin='yaml') + elif self._cache_plugin == 'pickle': + self._manage_cachedir(cachedir=cachedir, plugin='pickle') + elif self._cache_plugin == 'memcached': + # requires packages python3-memcache and memcached + display.vvvv("Please make sure you have installed packages" + "python3-memcache and memcached" + ) + self._uri = self._readconfig(key='memcached_connection', + default=['127.0.0.1:11211'], + ) + else: + raise AnsibleError("Cache plugin {} not supported" + .format(self._cache_plugin)) + + self._cache = cache_loader.get(self._cache_plugin, + _uri=self._uri, + _timeout=self._timeout, + ) + self._cachetoken = cache_loader.get(self._cache_plugin, + _uri=self._uri, + _timeout=self._timeouttoken, + ) + + def run(self, terms, variables=None, api_hostname=None, api_username=None, api_password=None, use_tls=True): @@ -354,33 +513,20 @@ class LookupModule(LookupBase): :returns: A list of results to the specific queries. """ - config_manager = ConfigManager() - config_file = config_manager.data.get_setting(name="CONFIG_FILE").value - config = ConfigParser() - config.read(config_file) + # Use the hostname specified by the user if it exists. + if api_hostname is not None: + display.vvv("Overriding api_hostname with {}".format(api_hostname)) + else: + api_hostname = self._api_hostname - use_cpasswords = False - - if config.has_section("re2o"): - display.vvv("Found section re2o in configuration file") - if config.has_option("re2o", "api_hostname"): - display.vvv("Found option api_hostname in config file") - api_hostname = config.get("re2o", "api_hostname") - display.vvv("Override api_hostname with {} from configuration" - .format(api_hostname)) - if config.has_option("re2o", "use_cpasswords"): - display.vvv("Found option use_cpasswords in config file") - use_cpasswords = config.getboolean("re2o", "use_cpasswords") - display.vvv("Override api_hostname with {} from configuration" - .format(use_cpasswords)) - - if api_hostname is None: + if self._api_hostname is None: raise AnsibleError(to_native( 'You must specify a hostname to contact re2oAPI' )) - if api_username is None and api_password is None and use_cpasswords: - display.vvv("Use cpasswords vault to get API credentials.") + if (api_username is None and api_password is None + and self._use_cpasswords): + display.vvv("Using cpasswords vault to get API credentials.") api_username = variables.get('vault_re2o_service_user') api_password = variables.get('vault_re2o_service_password') @@ -394,12 +540,12 @@ class LookupModule(LookupBase): 'You must specify a valid password to connect to re2oAPI' )) - api_client = Client(api_hostname, api_username, - api_password, use_tls=True) + api_client = Client(api_hostname, api_username, api_password, + use_tls=True, cachetoken=self._cachetoken) res = [] dterms = collections.deque(terms) - machines_roles = None # TODO : Cache this. + display.vvv("Lookup terms are {}".format(terms)) while dterms: term = dterms.popleft() @@ -411,14 +557,31 @@ class LookupModule(LookupBase): elif term == 'get_role': try: role_name = dterms.popleft() - roles, machines_roles = self._get_role(api_client, - role_name, - machines_roles, - ) + roles = self._get_role(api_client, role_name) res.append(roles) except IndexError: display.v("Error in re2oapi : No role_name provided") raise AnsibleError("role_name not found in arguments.") + elif term == 'prefixv6': + prefixes = [] + while dterms: + subnet_name = dterms.popleft() + prefixes.append([self._get_prefix(api_client, subnet_name)]) + if prefixes: + res.extend(prefixes) + else: + display.v("Error in re2oapi : No subnet_name provided") + raise AnsibleError("subnet_name not found in arguments.") + elif term == 'cidrs': + cidrs = [] + while dterms: + subnet_name = dterms.popleft() + cidrs.append([self._get_cidrs(api_client, subnet_name)]) + if cidrs: + res.extend(cidrs) + else: + display.v("Error in re2oapi : No subnet_name provided") + raise AnsibleError("subnet_name not found in arguments.") else: try: res.append(self._rawquery(api_client, term)) @@ -429,59 +592,185 @@ class LookupModule(LookupBase): .format(to_native(e))) return res + def _get_cache(self, key): + if self._cache: + return self._cache.get(key) + else: + return None + + def _set_cache(self, key, value): + if self._cache: + return self._cache.set(key, value) + else: + return None + + def _is_cached(self, key): + if self._cache: + return self._cache.contains(key) + else: + return False + def _getzones(self, api_client): display.v("Getting dns zone names") - zones = api_client.list('dns/zones') - zones_name = [zone["name"][1:] for zone in zones] + zones, zones_name = None, None + + if self._is_cached('dnszones'): + zones_name = self._get_cache('dnszones') + + if zones_name is not None: + display.vvv("Found dnszones in cache.") + + else: + if self._is_cached('dns_zones'): + zones = self._get_cache('dns_zones') + if zones is not None: + display.vvv("Found dns/zones in cache.") + else: + display.vvv("Contacting the API, endpoint dns/zones...") + zones = api_client.list('dns/zones') + display.vvv("...Done") + zones_name = [zone["name"][1:] for zone in zones] + display.vvv("Storing dnszones in cache.") + self._set_cache('dnszones', zones_name) + display.vvv('\n') return zones_name def _getreverse(self, api_client): display.v("Getting dns reverse zones") - display.vvv("Contacting the API, endpoint dns/reverse-zones...") - zones = api_client.list('dns/reverse-zones') - display.vvv("...Done") - res = [] - for zone in zones: - if zone['ptr_records']: - display.vvv('Found PTR records') - subnets = [] - for net in zone['cidrs']: - net = netaddr.IPNetwork(net) - if net.prefixlen > 24: - subnets.extend(net.subnet(32)) - elif net.prefixlen > 16: - subnets.extend(net.subnet(24)) - elif net.prefixlen > 8: - subnets.extend(net.subnet(16)) - else: - subnets.extend(net.subnet(8)) - for subnet in subnets: - _address = netaddr.IPAddress(subnet.first) - rev_dns_a = _address.reverse_dns.split('.')[:-1] - if subnet.prefixlen == 8: - zone_name = '.'.join(rev_dns_a[3:]) - elif subnet.prefixlen == 16: - zone_name = '.'.join(rev_dns_a[2:]) - elif subnet.prefixlen == 24: - zone_name = '.'.join(rev_dns_a[1:]) - res.append(zone_name) - display.vvv("Found reverse zone {}".format(zone_name)) + + zones, res = None, None + + if self._is_cached('dnsreverse'): + res = self._get_cache('dnsreverse') + + if res is not None: + display.vvv("Found dnsreverse in cache.") + + else: + if self._is_cached('dns_reverse-zones'): + zones = self._get_cache('dns_reverse-zones') + + if zones is not None: + display.vvv("Found dns/reverse-zones in cache.") + else: + display.vvv("Contacting the API, endpoint dns/reverse-zones..") + zones = api_client.list('dns/reverse-zones') + display.vvv("...Done") + + display.vvv("Trying to format dns reverse in a nice way.") + res = [] + for zone in zones: + if zone['ptr_records']: + display.vvv('Found PTR records') + subnets = [] + for net in zone['cidrs']: + net = netaddr.IPNetwork(net) + if net.prefixlen > 24: + subnets.extend(net.subnet(32)) + elif net.prefixlen > 16: + subnets.extend(net.subnet(24)) + elif net.prefixlen > 8: + subnets.extend(net.subnet(16)) + else: + subnets.extend(net.subnet(8)) + + for subnet in subnets: + _address = netaddr.IPAddress(subnet.first) + rev_dns_a = _address.reverse_dns.split('.')[:-1] + if subnet.prefixlen == 8: + zone_name = '.'.join(rev_dns_a[3:]) + elif subnet.prefixlen == 16: + zone_name = '.'.join(rev_dns_a[2:]) + elif subnet.prefixlen == 24: + zone_name = '.'.join(rev_dns_a[1:]) + res.append(zone_name) + display.vvv("Found reverse zone {}".format(zone_name)) + if zone['ptr_v6_records']: display.vvv("Found PTR v6 record") - net = netaddr.IPNetwork(zone['prefix_v6']+'/'+str(zone['prefix_v6_length'])) - net_class = max(((net.prefixlen -1) // 4) +1, 1) + net = netaddr.IPNetwork(zone['prefix_v6'] + + '/' + + str(zone['prefix_v6_length'])) + net_class = max(((net.prefixlen - 1) // 4) + 1, 1) zone6_name = ".".join( - netaddr.IPAddress(net.first).reverse_dns.split('.')[32 - net_class:])[:-1] + netaddr.IPAddress(net.first) + .reverse_dns.split('.')[32 - net_class:])[:-1] res.append(zone6_name) display.vvv("Found reverse zone {}".format(zone6_name)) - return list(set(res)) + + display.vvv("Storing dns reverse zones in cache.") + self._set_cache('dnsreverse', list(set(res))) + + display.vvv('\n') + return res def _rawquery(self, api_client, endpoint): - display.v("Make a raw query to endpoint {}".format(endpoint)) - return api_client.list(endpoint) + res = None + if self._is_cached(endpoint.replace('/', '_')): + res = self._get_cache(endpoint.replace('/', '_')) + if res is not None: + display.vvv("Found {} in cache.".format(endpoint)) + else: + display.v("Making a raw query to {host}/api/{endpoint}" + .format(host=self._api_hostname, endpoint=endpoint)) + res = api_client.list(endpoint) + display.vvv("Storing result in cache.") + self._set_cache(endpoint.replace('/', '_'), res) - def _get_role(self, api_client, role_name, machines_roles): - if machines_roles is None: - machines_roles = api_client.list("machines/role") - return list(filter(lambda machine: machine["role_type"] == role_name, - machines_roles)), machines_roles + display.vvv('\n') + return res + + def _get_role(self, api_client, role_name): + res, machines_roles = None, None + + if self._is_cached(role_name): + res = self._get_cache(role_name) + + if res is not None: + display.vvv("Found {} in cache.".format(role_name)) + else: + if self._is_cached("machines_role"): + machines_roles = self._get_cache("machines_role") + + if machines_roles is not None: + display.vvv("Found machines/roles in cache.") + else: + machines_roles = api_client.list("machines/role") + display.vvv("Storing machines/role in cache.") + self._set_cache("machines_role", machines_roles) + + res = list(filter(lambda m: m["role_type"] == role_name, + machines_roles)) + display.vvv("Storing {} in cache.".format(role_name)) + self._set_cache(role_name, res) + + display.vvv('\n') + return res + + def _get_prefix(self, api_client, subnet_name): + prefixv6 = None + if self._is_cached(subnet_name + '_v6'): + display.vvv("Found subnet {} in cache.".format(subnet_name)) + prefixv6 = self._get_cache(subnet_name + '_v6') + else: + Mtypes = self._rawquery(api_client, 'machines/iptype') + iptype = list(filter(lambda x: x['type'] == subnet_name, Mtypes)) + prefixv6 = iptype[0]['prefix_v6'] + '/64' + display.vvv("Storing subnet {} in cache".format(subnet_name)) + self._set_cache(subnet_name + '_v6', prefixv6) + return prefixv6 + + def _get_cidrs(self, api_client, subnet_name): + cidrs = None + if self._is_cached(subnet_name): + display.vvv("Found subnet {} in cache.".format(subnet_name)) + cidrs = self._get_cache(subnet_name) + else: + Mtypes = self._rawquery(api_client, 'machines/iptype') + iptype = list(filter(lambda x: x['type'] == subnet_name, Mtypes))[0] + ips = iptype['domaine_ip_start'] + ipe = iptype['domaine_ip_stop'] + cidrs = list(map(lambda a: str(a), netaddr.iprange_to_cidrs(ips, ipe))) + display.vvv("Storing subnet {} in cache".format(subnet_name)) + self._set_cache(subnet_name, cidrs) + return cidrs diff --git a/monitoring.yml b/monitoring.yml deleted file mode 100755 index f2084bda..00000000 --- a/monitoring.yml +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env ansible-playbook ---- -# Deploy Prometheus -- hosts: fyre.adm.crans.org - vars: - # Prometheus targets.json - prometheus_targets: - - targets: "{{ groups['server'] | list | sort }}" - prometheus_ups_snmp_targets: - - targets: [pulsar.adm.crans.org] - prometheus_unifi_snmp_targets: - - targets: "{{ groups['crans_unifi'] | list | sort }}" - prometheus_blackbox_targets: - - targets: - - https://crans.org - - https://www.crans.org - - https://grafana.crans.org - - https://wiki.crans.org - - https://pad.crans.org - prometheus_apache_targets: - - targets: [zamok.adm.crans.org] - snmp_unifi_password: "{{ vault_snmp_unifi_password }}" - roles: - - prometheus - - prometheus-alertmanager - - prometheus-snmp-exporter - - prometheus-blackbox-exporter - -# Monitor all hosts -- hosts: server,test_vm - roles: - - prometheus-node-exporter - -# Export apache metrics -- hosts: zamok.adm.crans.org - roles: - - prometheus-apache-exporter - -# Configure HP RAID monitoring -# You can list SCSI drives with `lsscsi -g` -- hosts: fyre.adm.crans.org,gateau.adm.crans.org - roles: - - smartd-hp-smartarray - -# Deploy grafana -- hosts: fyre.adm.crans.org - vars: - grafana_root_url: https://grafana.crans.org - ldap_base: 'dc=crans,dc=org' - ldap_master_ipv4: '10.231.136.19' - ldap_user_tree: "cn=Utilisateurs,{{ ldap_base }}" - ldap_grafana_bind_dn: "cn=grafana,ou=service-users,{{ ldap_base }}" - ldap_grafana_passwd: "{{ vault_ldap_grafana_passwd }}" - roles: - - grafana - -# Deploy NinjaBot -- hosts: fyre.adm.crans.org - roles: - - ninjabot - -# Monitor mailq with a special text exporter -- hosts: redisdead.adm.crans.org - roles: - - prometheus-node-exporter-postfix - -# Monitor logs with mtail -- hosts: thot.adm.crans.org - roles: - - mtail diff --git a/network.yml b/network.yml index 23160615..b033433a 100755 --- a/network.yml +++ b/network.yml @@ -1,54 +1,5 @@ #!/usr/bin/env ansible-playbook --- -# Deploy tunnel -- hosts: sputnik.adm.crans.org - vars: - debian_mirror: http://mirror.crans.org/debian - wireguard: - sputnik: true - private_key: "{{ vault_wireguard_sputnik_private_key }}" - peer_public_key: "{{ vault_wireguard_boeing_public_key }}" - roles: - - wireguard - -- hosts: boeing.adm.crans.org - vars: - # Debian mirror on adm - debian_mirror: http://mirror.adm.crans.org/debian - wireguard: - sputnik: false - if: ens20 - private_key: "{{ vault_wireguard_boeing_private_key }}" - peer_public_key: "{{ vault_wireguard_sputnik_public_key }}" - roles: - - wireguard - -# Deploy DHCP server -- hosts: dhcp.adm.crans.org - vars: - dhcp: - authoritative: true - roles: - - isc-dhcp-server - -# Deploy recursive DNS cache server -- hosts: odlyd.adm.crans.org - roles: - - bind-recursive - -# Deploy authoritative DNS server -- hosts: silice.adm.crans.org,sputnik.adm.crans.org,boeing.adm.crans.org - vars: - certbot_dns_secret: "{{ vault_certbot_dns_secret }}" - certbot_adm_dns_secret: "{{ vault_certbot_adm_dns_secret }}" - bind: - masters: "{{ lookup('re2oapi', 'get_role', 'dns-authoritary-master')[0] }}" - slaves: "{{ lookup('re2oapi', 'get_role', 'dns-authoritary-slave')[0] }}" - zones: "{{ lookup('re2oapi', 'dnszones') }}" - reverse: "{{ lookup('re2oapi', 'dnsreverse') }}" - roles: - - bind-authoritative - # Deploy reverse proxy - hosts: bakdaur.adm.crans.org,frontdaur.adm.crans.org vars: @@ -75,7 +26,7 @@ - {from: lutim.crans.org, to: 10.231.136.69} - {from: zero.crans.org, to: 10.231.136.76} - {from: pad.crans.org, to: "10.231.136.76:9001"} - - {from: ethercalc.crans.org, to: 10.231.136.203} + - {from: ethercalc.crans.org, to: "10.231.136.203:8000"} - {from: mediadrop.crans.org, to: 10.231.136.106} - {from: videos.crans.org, to: 10.231.136.106} - {from: video.crans.org, to: 10.231.136.106} @@ -190,28 +141,3 @@ remote_as: 8218 roles: - quagga-ipv6 - -# Deploy postfix on mail servers -- hosts: titanic.adm.crans.org - vars: - postfix: - primary: false - secondary: true - public: true - dkim: true - mailman: false - titanic: true - roles: - - postfix - -- hosts: sputnik.adm.crans.org - vars: - postfix: - primary: false - secondary: true - public: true - dkim: true - mailman: false - titanic: false - roles: - - postfix diff --git a/plays/backup.yml b/plays/backup.yml new file mode 100755 index 00000000..a85202e0 --- /dev/null +++ b/plays/backup.yml @@ -0,0 +1,15 @@ +#!/usr/bin/env ansible-playbook +--- +# zephir backups virtual machines. +# omnomnom backups home dirs. + +# Rsync client on all server to allow backup +- hosts: server + vars: + # Backup password + backuppc_rsyncd_passwd: "{{ vault_backuppc_rsyncd_passwd }}" + roles: ["rsync-client"] + +# Backuppc backup software +- hosts: zephir.adm.crans.org,omnomnom.adm.crans.org + roles: ["backuppc"] diff --git a/plays/dhcp.yml b/plays/dhcp.yml new file mode 100755 index 00000000..07cd132b --- /dev/null +++ b/plays/dhcp.yml @@ -0,0 +1,8 @@ +#!/usr/bin/env ansible-playbook +--- +# Deploy DHCP server +- hosts: dhcp.adm.crans.org + vars: + dhcp: + authoritative: true + roles: ["isc-dhcp-server"] diff --git a/plays/dns.yml b/plays/dns.yml new file mode 100755 index 00000000..7f133c1a --- /dev/null +++ b/plays/dns.yml @@ -0,0 +1,17 @@ +#!/usr/bin/env ansible-playbook +--- +# Deploy recursive DNS cache server +- hosts: odlyd.adm.crans.org + roles: ["bind-recursive"] + +# Deploy authoritative DNS server +- hosts: silice.adm.crans.org,sputnik.adm.crans.org,boeing.adm.crans.org + vars: + certbot_dns_secret: "{{ vault_certbot_dns_secret }}" + certbot_adm_dns_secret: "{{ vault_certbot_adm_dns_secret }}" + bind: + masters: "{{ lookup('re2oapi', 'get_role', 'dns-authoritary-master')[0] }}" + slaves: "{{ lookup('re2oapi', 'get_role', 'dns-authoritary-slave')[0] }}" + zones: "{{ lookup('re2oapi', 'dnszones') }}" + reverse: "{{ lookup('re2oapi', 'dnsreverse') }}" + roles: ["bind-authoritative"] diff --git a/plays/logs.yml b/plays/logs.yml new file mode 100755 index 00000000..77cc27b0 --- /dev/null +++ b/plays/logs.yml @@ -0,0 +1,11 @@ +#!/usr/bin/env ansible-playbook +--- +# thot is the log server. +# Servers need to send their logs to thot. + +# Send logs to thot +- hosts: server,!thot.adm.crans.org + vars: + rsyslog: + server: thot.adm.crans.org + roles: ["rsyslog-client"] diff --git a/plays/mail.yml b/plays/mail.yml new file mode 100755 index 00000000..b9efe607 --- /dev/null +++ b/plays/mail.yml @@ -0,0 +1,14 @@ +#!/usr/bin/env ansible-playbook +--- +# Redisdead is the main MX. +# Soyouz and titanic are the old backup MX. +# Boeing and sputnik are the new MX (still in installation ?). +# All other servers uses nullmailer to send local mail to Crans SMTP. + +# Redirect local mail to mailserver +- hosts: crans_server,!redisdead.adm.crans.org,!soyouz.adm.crans.org,!titanic.adm.crans.org,!boeing.adm.crans.org,!sputnik.adm.crans.org,!zamok.adm.crans.org + vars: + mail_root: root@crans.org + mail_snmp_server: smtp.adm.crans.org + mail_defaulthost: crans.org + roles: ["nullmailer"] diff --git a/plays/mailman.yml b/plays/mailman.yml new file mode 100755 index 00000000..0a51655e --- /dev/null +++ b/plays/mailman.yml @@ -0,0 +1,23 @@ +#!/usr/bin/env ansible-playbook +--- +# Deploy Mailman +- hosts: redisdead.adm.crans.org + vars: + mailman: + site_list: "nounou" + default_url: "https://lists.crans.org/" + default_host: "lists.crans.org" + default_language: "fr" + auth_basic: | + "On n'aime pas les spambots, donc on a mis un mot de passe. Le login est Stop et le mot de passe est Spam."; + spamassassin: "SpamAssassin_crans" + smtphost: "smtp.adm.crans.org" + mynetworks: ['138.231.0.0/16', '185.230.76.0/22', '2a0c:700:0::/40'] + nginx: + ssl: + cert: /etc/letsencrypt/live/crans.org/fullchain.pem + key: /etc/letsencrypt/live/crans.org/privkey.pem + trusted_cert: /etc/letsencrypt/live/crans.org/chain.pem + roles: + - mailman + - nginx-mailman diff --git a/plays/monitoring.yml b/plays/monitoring.yml new file mode 100755 index 00000000..03493c7d --- /dev/null +++ b/plays/monitoring.yml @@ -0,0 +1,62 @@ +#!/usr/bin/env ansible-playbook +--- +# Deploy Prometheus and Grafana on monitoring server +- hosts: fyre.adm.crans.org + vars: + # Prometheus targets.json + prometheus: + node_targets: "{{ groups['server'] | list | sort }}" + ups_snmp_targets: + - pulsar.adm.crans.org # 0B + - quasar.adm.crans.org # 4J + unifi_snmp_targets: "{{ groups['crans_unifi'] | list | sort }}" + blackbox_targets: + - https://crans.org + - https://www.crans.org + - https://grafana.crans.org + - https://wiki.crans.org + - https://pad.crans.org + apache_targets: [zamok.adm.crans.org] + + snmp_unifi_password: "{{ vault_snmp_unifi_password }}" + + grafana: + root_url: https://grafana.crans.org + ldap_bind_dn: "cn=grafana,ou=service-users,{{ ldap_base }}" + ldap_passwd: "{{ vault_ldap_grafana_passwd }}" + + ldap_base: 'dc=crans,dc=org' + ldap_master_ipv4: '10.231.136.19' + ldap_user_tree: "cn=Utilisateurs,{{ ldap_base }}" + roles: + - prometheus + - prometheus-alertmanager + - prometheus-snmp-exporter + - prometheus-blackbox-exporter + - ninjabot + - grafana + +# Monitor all hosts +- hosts: server,test_vm + vars: + adm_ipv4: "{{ ansible_all_ipv4_addresses | ipaddr(adm_subnet) | first }}" + roles: ["prometheus-node-exporter"] + +# Export apache metrics +- hosts: zamok.adm.crans.org + vars: + adm_ipv4: "{{ ansible_all_ipv4_addresses | ipaddr(adm_subnet) | first }}" + roles: ["prometheus-apache-exporter"] + +# Configure HP RAID monitoring +# You can list SCSI drives with `lsscsi -g` +- hosts: fyre.adm.crans.org,gateau.adm.crans.org + roles: ["smartd-hp-smartarray"] + +# Monitor mailq with a special text exporter +- hosts: redisdead.adm.crans.org + roles: ["prometheus-node-exporter-postfix"] + +# Monitor logs with mtail +- hosts: thot.adm.crans.org + roles: ["mtail"] diff --git a/interfaces.yml b/plays/network-interfaces.yml similarity index 95% rename from interfaces.yml rename to plays/network-interfaces.yml index 04b2d828..930d315c 100755 --- a/interfaces.yml +++ b/plays/network-interfaces.yml @@ -14,7 +14,7 @@ - switch - fil -- hosts: boeing.adm.crans.org,cochon.adm.crans.org,tracker.adm.crans.org,voyager.adm.crans.org,lutim.adm.crans.org,gateau.adm.crans.org,owncloud-srv.adm.crans.org,charybde.adm.crans.org,cas-srv.adm.crans.org,fyre.adm.crans.org,silice.adm.crans.org,frontdaur.adm.crans.org,bakdaur.adm.crans.org +- hosts: boeing.adm.crans.org,cochon.adm.crans.org,tracker.adm.crans.org,voyager.adm.crans.org,lutim.adm.crans.org,gateau.adm.crans.org,owncloud-srv.adm.crans.org,charybde.adm.crans.org,cas-srv.adm.crans.org,fyre.adm.crans.org,silice.adm.crans.org,frontdaur.adm.crans.org,bakdaur.adm.crans.org,ethercalc-srv.adm.crans.org,alice.adm.crans.org vars: vlan: - name: srv @@ -66,5 +66,4 @@ dns: 185.230.78.152 185.230.78.4 dns_search: crans.org ifnames: "{{ ifaces | json_query('results[?item==`adh`].stdout') }}" - roles: - - interfaces + roles: ["interfaces"] diff --git a/plays/nfs.yml b/plays/nfs.yml new file mode 100755 index 00000000..61ccb4da --- /dev/null +++ b/plays/nfs.yml @@ -0,0 +1,18 @@ +#!/usr/bin/env ansible-playbook +--- +# Odlyd do not use NFS as it is the master backup. +# Servers outside of campus do not use NFS. +# zamok, omnomnom, owl and owncloud-srv uses permanently mounted home dirs. +# all other servers on campus uses autofs to dynamically mount home dirs. + +# Deploy NFS only on campus +- hosts: crans_server + roles: ["nfs-common"] + +# Deploy autofs NFS +- hosts: crans_server,!odlyd.adm.crans.org,!zamok.adm.crans.org,!omnomnom.adm.crans.org,!owl.adm.crans.org,!owncloud-srv.adm.crans.org + roles: ["nfs-autofs"] + +# Deploy home permanent +- hosts: zamok.adm.crans.org,omnomnom.adm.crans.org,owl.adm.crans.org,owncloud-srv.adm.crans.org + roles: ["home-permanent"] diff --git a/plays/tv.yml b/plays/tv.yml new file mode 100755 index 00000000..2410f772 --- /dev/null +++ b/plays/tv.yml @@ -0,0 +1,6 @@ +#!/usr/bin/env ansible-playbook +--- +# Cochon contains DVB cards + +- hosts: cochon.adm.crans.org + roles: ["mumudvb"] diff --git a/plays/wireguard.yml b/plays/wireguard.yml new file mode 100755 index 00000000..2de147e1 --- /dev/null +++ b/plays/wireguard.yml @@ -0,0 +1,22 @@ +#!/usr/bin/env ansible-playbook +--- +# Deploy tunnel +- hosts: sputnik.adm.crans.org + vars: + debian_mirror: http://mirror.crans.org/debian + wireguard: + sputnik: true + private_key: "{{ vault_wireguard_sputnik_private_key }}" + peer_public_key: "{{ vault_wireguard_boeing_public_key }}" + roles: ["wireguard"] + +- hosts: boeing.adm.crans.org + vars: + # Debian mirror on adm + debian_mirror: http://mirror.adm.crans.org/debian + wireguard: + sputnik: false + if: ens20 + private_key: "{{ vault_wireguard_boeing_private_key }}" + peer_public_key: "{{ vault_wireguard_sputnik_public_key }}" + roles: ["wireguard"] diff --git a/postfix.yml b/postfix.yml new file mode 100755 index 00000000..8f66e28b --- /dev/null +++ b/postfix.yml @@ -0,0 +1,45 @@ +#!/usr/bin/env ansible-playbook +# Postfix playbook +--- +- hosts: sputnik.adm.crans.org, boeing.adm.crans.org, redisdead.adm.crans.org, titanic.adm.crans.org + vars: + certbot: + dns_rfc2136_name: certbot_challenge. + dns_rfc2136_secret: "{{ vault_certbot_dns_secret }}" + mail: root@crans.org + certname: crans.org + domains: "*.crans.org" + bind: + masters: "{{ lookup('re2oapi', 'get_role', 'dns-authoritary-master')[0] }}" + opendkim: + private_key: "{{ vault_opendkim_private_key }}" + policyd: + mail: root@crans.org + exemptions: "{{ lookup('re2oapi', 'get_role', 'user-server')[0] }}" + mynetworks: + ipv4: + "{{ lookup('re2oapi', 'cidrs', 'serveurs', + 'adherents', + 'wifi-new-pub', + 'fil-new-pub', + 'fil-pub', + 'wifi-new-serveurs', + 'wifi-new-adherents', + 'wifi-new-federez', + 'fil-new-serveurs', + 'fil-new-adherents') + | flatten }}" + ipv6: + "{{ lookup('re2oapi', 'prefixv6', 'adherents', + 'fil-new-pub', + 'wifi-new-pub') + | flatten }}" + roles: + - certbot + - postfix + - opendkim + - policyd + +- hosts: redisdead.adm.crans.org + roles: + - sqlgrey diff --git a/radius.yml b/radius.yml new file mode 100755 index 00000000..2727fa78 --- /dev/null +++ b/radius.yml @@ -0,0 +1,15 @@ +#!/usr/bin/env ansible-playbook +--- +- hosts: eap.adm.crans.org, odlyd.adm.crans.org, radius.adm.crans.org + vars: + certbot: + dns_rfc2136_name: certbot_challenge. + dns_rfc2136_secret: "{{ vault_certbot_dns_secret }}" + mail: root@crans.org + certname: crans.org + domains: "crans.org" + bind: + masters: "{{ lookup('re2oapi', 'get_role', 'dns-authoritary-master')[0] }}" + roles: + - certbot + - freeradius diff --git a/roles/certbot/templates/letsencrypt/conf.d/certname.ini.j2 b/roles/certbot/templates/letsencrypt/conf.d/certname.ini.j2 index 837a60a9..1f8350b7 100644 --- a/roles/certbot/templates/letsencrypt/conf.d/certname.ini.j2 +++ b/roles/certbot/templates/letsencrypt/conf.d/certname.ini.j2 @@ -1,7 +1,7 @@ {{ ansible_header | comment(decoration='# ') }} -# Pour appliquer cette conf et générer la conf de renewal : -# certbot --config wildcard.ini certonly +# To generate the certificate, please use the following command +# certbot --config /etc/letsencrypt/conf.d/{{ certbot.certname }}.ini certonly # Use a 4096 bit RSA key instead of 2048 rsa-key-size = 4096 diff --git a/roles/common-tools/tasks/main.yml b/roles/common-tools/tasks/main.yml index 70488e80..7189b872 100644 --- a/roles/common-tools/tasks/main.yml +++ b/roles/common-tools/tasks/main.yml @@ -4,8 +4,10 @@ update_cache: true install_recommends: false name: + - apt-file - sudo - molly-guard # prevent reboot + - debsums - ntp # network time sync - apt # better than apt-get - nano # for vulcain diff --git a/roles/ethercalc/tasks/main.yml b/roles/ethercalc/tasks/main.yml new file mode 100644 index 00000000..e5e04bfa --- /dev/null +++ b/roles/ethercalc/tasks/main.yml @@ -0,0 +1,38 @@ +--- +- name: Install Redis and NPM + apt: + update_cache: true + name: + - redis-server + - nodejs + - npm + register: apt_result + retries: 3 + until: apt_result is succeeded + +- name: Install EtherCalc + npm: + name: ethercalc + global: true + state: latest + register: npm_result + retries: 3 + until: npm_result is succeeded + +- name: Install EtherCalc systemd unit + template: + src: systemd/system/ethercalc.service.j2 + dest: /etc/systemd/system/ethercalc.service + +- name: Activate EtherCalc service + systemd: + daemon_reload: true + name: ethercalc + enabled: true + state: started + +- name: Indicate role in motd + template: + src: update-motd.d/05-service.j2 + dest: /etc/update-motd.d/05-ethercalc + mode: 0755 diff --git a/roles/ethercalc/templates/systemd/system/ethercalc.service.j2 b/roles/ethercalc/templates/systemd/system/ethercalc.service.j2 new file mode 100644 index 00000000..22fb27e5 --- /dev/null +++ b/roles/ethercalc/templates/systemd/system/ethercalc.service.j2 @@ -0,0 +1,17 @@ +{{ ansible_header | comment }} + +[Unit] +Description=Ethercalc +Require=redis-server.service + +[Service] +Type=simple +Restart=on-failure +RestartSec=3 +User=redis +Group=redis +PIDFile=/var/run/ethercalc.pid +ExecStart=/usr/bin/ethercalc --host 10.231.136.203 --port 8000 + +[Install] +WantedBy=multi-user.target diff --git a/roles/ethercalc/templates/update-motd.d/05-service.j2 b/roles/ethercalc/templates/update-motd.d/05-service.j2 new file mode 100755 index 00000000..00b76513 --- /dev/null +++ b/roles/ethercalc/templates/update-motd.d/05-service.j2 @@ -0,0 +1,3 @@ +#!/usr/bin/tail +14 +{{ ansible_header | comment }} +> EtherCalc a été déployé sur cette machine. Voir /usr/lib/node_modules/ethercalc/. diff --git a/roles/freeradius/tasks/main.yml b/roles/freeradius/tasks/main.yml new file mode 100644 index 00000000..36df1917 --- /dev/null +++ b/roles/freeradius/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: Symlink radius certificates + file: + src: /etc/letsencrypt/live/crans.org/{{ item }} + dest: /etc/freeradius/3.0/certs/letsencrypt/{{ item }} + state: link + force: yes + loop: + - fullchain.pem + - privkey.pem + +- name: Set permissions on certificates + file: + path: /etc/letsencrypt/{{ item }} + group: freerad + mode: '0755' + recurse: yes + loop: + - live + - archive diff --git a/roles/grafana/tasks/main.yml b/roles/grafana/tasks/main.yml index 1d472f15..6b290178 100644 --- a/roles/grafana/tasks/main.yml +++ b/roles/grafana/tasks/main.yml @@ -43,7 +43,7 @@ loop: - section: server option: root_url - value: "{{ grafana_root_url }}" + value: "{{ grafana.root_url }}" - section: session # This will break with HTTPS option: cookie_secure value: "true" diff --git a/roles/grafana/templates/ldap.toml.j2 b/roles/grafana/templates/ldap.toml.j2 index 8fee2473..1fd96e12 100644 --- a/roles/grafana/templates/ldap.toml.j2 +++ b/roles/grafana/templates/ldap.toml.j2 @@ -21,10 +21,10 @@ ssl_skip_verify = false # client_key = "/path/to/client.key" # Search user bind dn -bind_dn = "{{ ldap_grafana_bind_dn }}" +bind_dn = "{{ grafana.ldap_bind_dn }}" # Search user bind password # If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;""" -bind_password = '{{ ldap_grafana_passwd }}' +bind_password = '{{ grafana.ldap_passwd }}' # User search filter, for example "(cn=%s)" or "(sAMAccountName=%s)" or "(uid=%s)" search_filter = "(cn=%s)" diff --git a/roles/ldap-client/tasks/main.yml b/roles/ldap-client/tasks/main.yml index 3912f981..8195e6f1 100644 --- a/roles/ldap-client/tasks/main.yml +++ b/roles/ldap-client/tasks/main.yml @@ -36,7 +36,7 @@ # Disable passwd and chsh - name: Copy passwd and chsh scripts template: - src: bin/passwd.j2 + src: "bin/{{ item }}.j2" dest: "/usr/local/bin/{{ item }}" mode: 0755 loop: diff --git a/roles/ldap-client/templates/bin/chsh.j2 b/roles/ldap-client/templates/bin/chsh.j2 new file mode 100644 index 00000000..37462f78 --- /dev/null +++ b/roles/ldap-client/templates/bin/chsh.j2 @@ -0,0 +1,4 @@ +#!/bin/sh +{{ ansible_header | comment }} +echo "Pour changer votre shell,\nAllez sur l'intranet : {{intranet_url}}" + diff --git a/roles/ldap-client/templates/bin/chsh.ldap.j2 b/roles/ldap-client/templates/bin/chsh.ldap.j2 new file mode 100644 index 00000000..175fdfc1 --- /dev/null +++ b/roles/ldap-client/templates/bin/chsh.ldap.j2 @@ -0,0 +1,4 @@ +#!/bin/sh +{{ ansible_header | comment }} +echo "Pour changer votre shell,\nAllez sur l'intranet : {{intranet_url}}" +echo "De toutes façons la vraie commande aurait pas marché, on installe pas nslcd-utils sur les serveurs normalement." diff --git a/roles/mailman/handlers/main.yml b/roles/mailman/handlers/main.yml new file mode 100644 index 00000000..77550456 --- /dev/null +++ b/roles/mailman/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: Reload mailman + systemd: + name: mailman + state: reloaded diff --git a/roles/mailman/tasks/main.yml b/roles/mailman/tasks/main.yml new file mode 100644 index 00000000..53ae09de --- /dev/null +++ b/roles/mailman/tasks/main.yml @@ -0,0 +1,39 @@ +--- +- name: Install mailman and SpamAssassin + apt: + update_cache: true + name: + - mailman + - spamassassin + register: apt_result + retries: 3 + until: apt_result is succeeded + +- name: Deploy mailman config + template: + src: "mailman/{{ item }}.j2" + dest: "/etc/mailman/{{ item }}" + mode: 0755 + loop: + - mm_cfg.py + - create.html + notify: Reload mailman + +# Fanciness +- name: Deploy crans logo + copy: + src: ../../../logos/crans.png + dest: /usr/share/images/mailman/crans.png + +- name: Deploy crans logo + template: + src: usr/lib/mailman/Mailman/htmlformat.py.j2 + dest: /usr/lib/mailman/Mailman/htmlformat.py + mode: 0755 + notify: Reload mailman + +- name: Indicate role in motd + template: + src: update-motd.d/05-mailman.j2 + dest: /etc/update-motd.d/05-mailman + mode: 0755 diff --git a/roles/mailman/templates/mailman/create.html.j2 b/roles/mailman/templates/mailman/create.html.j2 new file mode 100644 index 00000000..68236402 --- /dev/null +++ b/roles/mailman/templates/mailman/create.html.j2 @@ -0,0 +1,13 @@ +{{ ansible_header | comment('xml') }} + + + + +Creation de mailing list + + + +

Creation de mailing list

+Il faut s'adresser a nounou arobase crans point org. + + diff --git a/roles/mailman/templates/mailman/mm_cfg.py.j2 b/roles/mailman/templates/mailman/mm_cfg.py.j2 new file mode 100644 index 00000000..25f82461 --- /dev/null +++ b/roles/mailman/templates/mailman/mm_cfg.py.j2 @@ -0,0 +1,226 @@ +{{ ansible_header | comment }} +# -*- python -*- + +# Copyright (C) 1998,1999,2000 by the Free Software Foundation, Inc. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301 USA + + +"""This is the module which takes your site-specific settings. + +From a raw distribution it should be copied to mm_cfg.py. If you +already have an mm_cfg.py, be careful to add in only the new settings +you want. The complete set of distributed defaults, with annotation, +are in ./Defaults. In mm_cfg, override only those you want to +change, after the + + from Defaults import * + +line (see below). + +Note that these are just default settings - many can be overridden via the +admin and user interfaces on a per-list or per-user basis. + +Note also that some of the settings are resolved against the active list +setting by using the value as a format string against the +list-instance-object's dictionary - see the distributed value of +DEFAULT_MSG_FOOTER for an example.""" + + +####################################################### +# Here's where we get the distributed defaults. # + +from Defaults import * + + +##### +# General system-wide defaults +##### + +# Should image logos be used? Set this to 0 to disable image logos from "our +# sponsors" and just use textual links instead (this will also disable the +# shortcut "favicon"). Otherwise, this should contain the URL base path to +# the logo images (and must contain the trailing slash).. If you want to +# disable Mailman's logo footer altogther, hack +# Mailman/htmlformat.py:MailmanLogo(), which also contains the hardcoded links +# and image names. +IMAGE_LOGOS = '/images/mailman/' + +#------------------------------------------------------------- +# The name of the list Mailman uses to send password reminders +# and similar. Don't change if you want mailman-owner to be +# a valid local part. +MAILMAN_SITE_LIST = '{{ mailman.site_list }}' + +DEFAULT_URL= '{{ mailman.default_url }}' +DEFAULT_URL_PATTERN = 'https://%s/' +add_virtualhost(DEFAULT_URL_HOST, DEFAULT_EMAIL_HOST) + +#------------------------------------------------------------- +# Default domain for email addresses of newly created MLs +DEFAULT_EMAIL_HOST = '{{ mailman.default_host }}' +#------------------------------------------------------------- +# Default host for web interface of newly created MLs +DEFAULT_URL_HOST = '{{ mailman.default_host }}' +#------------------------------------------------------------- +# Required when setting any of its arguments. +add_virtualhost(DEFAULT_URL_HOST, DEFAULT_EMAIL_HOST) + +#------------------------------------------------------------- +# Do we send monthly reminders? +DEFAULT_SEND_REMINDERS = No + +# Normally when a site administrator authenticates to a web page with the site +# password, they get a cookie which authorizes them as the list admin. It +# makes me nervous to hand out site auth cookies because if this cookie is +# cracked or intercepted, the intruder will have access to every list on the +# site. OTOH, it's dang handy to not have to re-authenticate to every list on +# the site. Set this value to Yes to allow site admin cookies. +ALLOW_SITE_ADMIN_COOKIES = Yes + +##### +# Archive defaults +##### + +PUBLIC_ARCHIVE_URL = '{{ mailman.default_url }}archives/%(listname)s' + +# Are archives on or off by default? +DEFAULT_ARCHIVE = Off + +# Are archives public or private by default? +# 0=public, 1=private +DEFAULT_ARCHIVE_PRIVATE = 1 + +# Pipermail assumes that messages bodies contain US-ASCII text. +# Change this option to define a different character set to be used as +# the default character set for the archive. The term "character set" +# is used in MIME to refer to a method of converting a sequence of +# octets into a sequence of characters. If you change the default +# charset, you might need to add it to VERBATIM_ENCODING below. +DEFAULT_CHARSET = 'utf-8' + +# Most character set encodings require special HTML entity characters to be +# quoted, otherwise they won't look right in the Pipermail archives. However +# some character sets must not quote these characters so that they can be +# rendered properly in the browsers. The primary issue is multi-byte +# encodings where the octet 0x26 does not always represent the & character. +# This variable contains a list of such characters sets which are not +# HTML-quoted in the archives. +VERBATIM_ENCODING = ['utf-8'] + +##### +# General defaults +##### + +# The default language for this server. Whenever we can't figure out the list +# context or user context, we'll fall back to using this language. See +# LC_DESCRIPTIONS below for legal values. +DEFAULT_SERVER_LANGUAGE = '{{ mailman.default_language }}' + +# How many members to display at a time on the admin cgi to unsubscribe them +# or change their options? +DEFAULT_ADMIN_MEMBER_CHUNKSIZE = 50 + +# set this variable to Yes to allow list owners to delete their own mailing +# lists. You may not want to give them this power, in which case, setting +# this variable to No instead requires list removal to be done by the site +# administrator, via the command line script bin/rmlist. +#OWNERS_CAN_DELETE_THEIR_OWN_LISTS = No + +# Set this variable to Yes to allow list owners to set the "personalized" +# flags on their mailing lists. Turning these on tells Mailman to send +# separate email messages to each user instead of batching them together for +# delivery to the MTA. This gives each member a more personalized message, +# but can have a heavy impact on the performance of your system. +#OWNERS_CAN_ENABLE_PERSONALIZATION = No + +##### +# List defaults. NOTE: Changing these values does NOT change the +# configuration of an existing list. It only defines the default for new +# lists you subsequently create. +##### + +# Should a list, by default be advertised? What is the default maximum number +# of explicit recipients allowed? What is the default maximum message size +# allowed? +DEFAULT_LIST_ADVERTISED = Yes + +# {header-name: regexp} spam filtering - we include some for example sake. +DEFAULT_BOUNCE_MATCHING_HEADERS = """ +# Les lignes commencant par # sont des commentairtes. +#from: .*-owner@yahoogroups.com +#from: .*@uplinkpro.com +#from: .*@coolstats.comic.com +#from: .*@trafficmagnet.com +#from: .*@hotmail.com +#X-Reject: 450 +#X-Reject: 554 +""" + +# Mailman can be configured to strip any existing Reply-To: header, or simply +# extend any existing Reply-To: with one based on the above setting. +DEFAULT_FIRST_STRIP_REPLY_TO = Yes + +# SUBSCRIBE POLICY +# 0 - open list (only when ALLOW_OPEN_SUBSCRIBE is set to 1) ** +# 1 - confirmation required for subscribes +# 2 - admin approval required for subscribes +# 3 - both confirmation and admin approval required +# +# ** please do not choose option 0 if you are not allowing open +# subscribes (next variable) +DEFAULT_SUBSCRIBE_POLICY = 3 + +# Is the list owner notified of subscribes/unsubscribes? +DEFAULT_ADMIN_NOTIFY_MCHANGES = Yes + +# Do we send monthly reminders? +DEFAULT_SEND_REMINDERS = No + +# What should happen to non-member posts which do not match explicit +# non-member actions? +# 0 = Accept +# 1 = Hold +# 2 = Reject +# 3 = Discard +DEFAULT_GENERIC_NONMEMBER_ACTION = 1 + +# Use spamassassin automatically +GLOBAL_PIPELINE.insert(5, '{{ spamassassin }}') +# Discard messages with score higher than ... +SPAMASSASSIN_DISCARD_SCORE = 8 +# Hold in moderation messages with score higher than ... +SPAMASSASSIN_HOLD_SCORE = 2.1 + +# Add SpamAssassin administration interface on gui +# To make it work, you need to edit Gui/__init__.py +# with +# from SpamAssassin import SpamAssassin +ADMIN_CATEGORIES.append("spamassassin") + +# Add header to keep +PLAIN_DIGEST_KEEP_HEADERS.append('X-Spam-Score') + +# configure MTA +MTA = 'Postfix' +SMTPHOST = '{{ smtphost }}' +SMTP_MAX_RCPTS = 50 + + +POSTFIX_STYLE_VIRTUAL_DOMAINS = ["{{ mailman.default_host }}"] + +# Note - if you're looking for something that is imported from mm_cfg, but you +# didn't find it above, it's probably in /usr/lib/mailman/Mailman/Defaults.py. diff --git a/roles/mailman/templates/update-motd.d/05-mailman.j2 b/roles/mailman/templates/update-motd.d/05-mailman.j2 new file mode 100755 index 00000000..d3fee0db --- /dev/null +++ b/roles/mailman/templates/update-motd.d/05-mailman.j2 @@ -0,0 +1,3 @@ +#!/usr/bin/tail +14 +{{ ansible_header | comment }} +> Mailman a été déployé sur cette machine. Voir /etc/mailman/ et /var/lib/mailman/. diff --git a/roles/mailman/templates/usr/lib/mailman/Mailman/htmlformat.py.j2 b/roles/mailman/templates/usr/lib/mailman/Mailman/htmlformat.py.j2 new file mode 100644 index 00000000..146f9576 --- /dev/null +++ b/roles/mailman/templates/usr/lib/mailman/Mailman/htmlformat.py.j2 @@ -0,0 +1,742 @@ +{{ ansible_header | comment }} +# Copyright (C) 1998-2018 by the Free Software Foundation, Inc. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, +# USA. + + +"""Library for program-based construction of an HTML documents. + +Encapsulate HTML formatting directives in classes that act as containers +for python and, recursively, for nested HTML formatting objects. +""" + + +# Eventually could abstract down to HtmlItem, which outputs an arbitrary html +# object given start / end tags, valid options, and a value. Ug, objects +# shouldn't be adding their own newlines. The next object should. + + +import types + +from Mailman import mm_cfg +from Mailman import Utils +from Mailman.i18n import _, get_translation + +from Mailman.CSRFcheck import csrf_token + +SPACE = ' ' +EMPTYSTRING = '' +NL = '\n' + + + +# Format an arbitrary object. +def HTMLFormatObject(item, indent): + "Return a presentation of an object, invoking their Format method if any." + if type(item) == type(''): + return item + elif not hasattr(item, "Format"): + return `item` + else: + return item.Format(indent) + +def CaseInsensitiveKeyedDict(d): + result = {} + for (k,v) in d.items(): + result[k.lower()] = v + return result + +# Given references to two dictionaries, copy the second dictionary into the +# first one. +def DictMerge(destination, fresh_dict): + for (key, value) in fresh_dict.items(): + destination[key] = value + +class Table: + def __init__(self, **table_opts): + self.cells = [] + self.cell_info = {} + self.row_info = {} + self.opts = table_opts + + def AddOptions(self, opts): + DictMerge(self.opts, opts) + + # Sets all of the cells. It writes over whatever cells you had there + # previously. + + def SetAllCells(self, cells): + self.cells = cells + + # Add a new blank row at the end + def NewRow(self): + self.cells.append([]) + + # Add a new blank cell at the end + def NewCell(self): + self.cells[-1].append('') + + def AddRow(self, row): + self.cells.append(row) + + def AddCell(self, cell): + self.cells[-1].append(cell) + + def AddCellInfo(self, row, col, **kws): + kws = CaseInsensitiveKeyedDict(kws) + if not self.cell_info.has_key(row): + self.cell_info[row] = { col : kws } + elif self.cell_info[row].has_key(col): + DictMerge(self.cell_info[row], kws) + else: + self.cell_info[row][col] = kws + + def AddRowInfo(self, row, **kws): + kws = CaseInsensitiveKeyedDict(kws) + if not self.row_info.has_key(row): + self.row_info[row] = kws + else: + DictMerge(self.row_info[row], kws) + + # What's the index for the row we just put in? + def GetCurrentRowIndex(self): + return len(self.cells)-1 + + # What's the index for the col we just put in? + def GetCurrentCellIndex(self): + return len(self.cells[-1])-1 + + def ExtractCellInfo(self, info): + valid_mods = ['align', 'valign', 'nowrap', 'rowspan', 'colspan', + 'bgcolor'] + output = '' + + for (key, val) in info.items(): + if not key in valid_mods: + continue + if key == 'nowrap': + output = output + ' NOWRAP' + continue + else: + output = output + ' %s="%s"' % (key.upper(), val) + + return output + + def ExtractRowInfo(self, info): + valid_mods = ['align', 'valign', 'bgcolor'] + output = '' + + for (key, val) in info.items(): + if not key in valid_mods: + continue + output = output + ' %s="%s"' % (key.upper(), val) + + return output + + def ExtractTableInfo(self, info): + valid_mods = ['align', 'width', 'border', 'cellspacing', 'cellpadding', + 'bgcolor'] + + output = '' + + for (key, val) in info.items(): + if not key in valid_mods: + continue + if key == 'border' and val == None: + output = output + ' BORDER' + continue + else: + output = output + ' %s="%s"' % (key.upper(), val) + + return output + + def FormatCell(self, row, col, indent): + try: + my_info = self.cell_info[row][col] + except: + my_info = None + + output = '\n' + ' '*indent + '' + + for i in range(len(self.cells[row])): + output = output + self.FormatCell(row, i, indent + 2) + + output = output + '\n' + ' '*indent + '' + + return output + + def Format(self, indent=0): + output = '\n' + ' '*indent + '' + + for i in range(len(self.cells)): + output = output + self.FormatRow(i, indent + 2) + + output = output + '\n' + ' '*indent + '\n' + + return output + + +class Link: + def __init__(self, href, text, target=None): + self.href = href + self.text = text + self.target = target + + def Format(self, indent=0): + texpr = "" + if self.target != None: + texpr = ' target="%s"' % self.target + return '%s' % (HTMLFormatObject(self.href, indent), + texpr, + HTMLFormatObject(self.text, indent)) + +class FontSize: + """FontSize is being deprecated - use FontAttr(..., size="...") instead.""" + def __init__(self, size, *items): + self.items = list(items) + self.size = size + + def Format(self, indent=0): + output = '' % self.size + for item in self.items: + output = output + HTMLFormatObject(item, indent) + output = output + '' + return output + +class FontAttr: + """Present arbitrary font attributes.""" + def __init__(self, *items, **kw): + self.items = list(items) + self.attrs = kw + + def Format(self, indent=0): + seq = [] + for k, v in self.attrs.items(): + seq.append('%s="%s"' % (k, v)) + output = '' % SPACE.join(seq) + for item in self.items: + output = output + HTMLFormatObject(item, indent) + output = output + '' + return output + + +class Container: + def __init__(self, *items): + if not items: + self.items = [] + else: + self.items = items + + def AddItem(self, obj): + self.items.append(obj) + + def Format(self, indent=0): + output = [] + for item in self.items: + output.append(HTMLFormatObject(item, indent)) + return EMPTYSTRING.join(output) + + +class Label(Container): + align = 'right' + + def __init__(self, *items): + Container.__init__(self, *items) + + def Format(self, indent=0): + return ('
' % self.align) + \ + Container.Format(self, indent) + \ + '
' + + +# My own standard document template. YMMV. +# something more abstract would be more work to use... + +class Document(Container): + title = None + language = None + bgcolor = mm_cfg.WEB_BG_COLOR + suppress_head = 0 + + def set_language(self, lang=None): + self.language = lang + + def set_bgcolor(self, color): + self.bgcolor = color + + def SetTitle(self, title): + self.title = title + + def Format(self, indent=0, **kws): + charset = 'us-ascii' + if self.language and Utils.IsLanguage(self.language): + charset = Utils.GetCharSet(self.language) + output = ['Content-Type: text/html; charset=%s' % charset] + output.append('Cache-control: no-cache\n') + if not self.suppress_head: + kws.setdefault('bgcolor', self.bgcolor) + tab = ' ' * indent + output.extend([tab, + '', + '' + ]) + if mm_cfg.IMAGE_LOGOS: + output.append('' % + (mm_cfg.IMAGE_LOGOS + mm_cfg.SHORTCUT_ICON)) + # Hit all the bases + output.append('' % charset) + if self.title: + output.append('%s%s' % (tab, self.title)) + # Add CSS to visually hide some labeling text but allow screen + # readers to read it. + output.append("""\ + +""") + if mm_cfg.WEB_HEAD_ADD: + output.append(mm_cfg.WEB_HEAD_ADD) + output.append('%s' % tab) + quals = [] + # Default link colors + if mm_cfg.WEB_VLINK_COLOR: + kws.setdefault('vlink', mm_cfg.WEB_VLINK_COLOR) + if mm_cfg.WEB_ALINK_COLOR: + kws.setdefault('alink', mm_cfg.WEB_ALINK_COLOR) + if mm_cfg.WEB_LINK_COLOR: + kws.setdefault('link', mm_cfg.WEB_LINK_COLOR) + for k, v in kws.items(): + quals.append('%s="%s"' % (k, v)) + output.append('%s' % direction) + # Always do this... + output.append(Container.Format(self, indent)) + if not self.suppress_head: + output.append('%s' % tab) + output.append('%s' % tab) + return NL.join(output) + + def addError(self, errmsg, tag=None): + if tag is None: + tag = _('Error: ') + self.AddItem(Header(3, Bold(FontAttr( + _(tag), color=mm_cfg.WEB_ERROR_COLOR, size='+2')).Format() + + Italic(errmsg).Format())) + + +class HeadlessDocument(Document): + """Document without head section, for templates that provide their own.""" + suppress_head = 1 + + +class StdContainer(Container): + def Format(self, indent=0): + # If I don't start a new I ignore indent + output = '<%s>' % self.tag + output = output + Container.Format(self, indent) + output = '%s' % (output, self.tag) + return output + + +class QuotedContainer(Container): + def Format(self, indent=0): + # If I don't start a new I ignore indent + output = '<%s>%s' % ( + self.tag, + Utils.websafe(Container.Format(self, indent)), + self.tag) + return output + +class Header(StdContainer): + def __init__(self, num, *items): + self.items = items + self.tag = 'h%d' % num + +class Address(StdContainer): + tag = 'address' + +class Underline(StdContainer): + tag = 'u' + +class Bold(StdContainer): + tag = 'strong' + +class Italic(StdContainer): + tag = 'em' + +class Preformatted(QuotedContainer): + tag = 'pre' + +class Subscript(StdContainer): + tag = 'sub' + +class Superscript(StdContainer): + tag = 'sup' + +class Strikeout(StdContainer): + tag = 'strike' + +class Center(StdContainer): + tag = 'center' + +class Form(Container): + def __init__(self, action='', method='POST', encoding=None, + mlist=None, contexts=None, user=None, *items): + apply(Container.__init__, (self,) + items) + self.action = action + self.method = method + self.encoding = encoding + self.mlist = mlist + self.contexts = contexts + self.user = user + + def set_action(self, action): + self.action = action + + def Format(self, indent=0): + spaces = ' ' * indent + encoding = '' + if self.encoding: + encoding = 'enctype="%s"' % self.encoding + output = '\n%s
\n' % ( + spaces, self.action, self.method, encoding) + if self.mlist: + output = output + \ + '\n' \ + % csrf_token(self.mlist, self.contexts, self.user) + output = output + Container.Format(self, indent+2) + output = '%s\n%s
\n' % (output, spaces) + return output + + +class InputObj: + def __init__(self, name, ty, value, checked, **kws): + self.name = name + self.type = ty + self.value = value + self.checked = checked + self.kws = kws + + def Format(self, indent=0): + charset = get_translation().charset() or 'us-ascii' + output = ['') + ret = SPACE.join(output) + if self.type == 'TEXT' and isinstance(ret, unicode): + ret = ret.encode(charset, 'xmlcharrefreplace') + return ret + + +class SubmitButton(InputObj): + def __init__(self, name, button_text): + InputObj.__init__(self, name, "SUBMIT", button_text, checked=0) + +class PasswordBox(InputObj): + def __init__(self, name, value='', size=mm_cfg.TEXTFIELDWIDTH): + InputObj.__init__(self, name, "PASSWORD", value, checked=0, size=size) + +class TextBox(InputObj): + def __init__(self, name, value='', size=mm_cfg.TEXTFIELDWIDTH): + if isinstance(value, str): + safevalue = Utils.websafe(value) + else: + safevalue = value + InputObj.__init__(self, name, "TEXT", safevalue, checked=0, size=size) + +class Hidden(InputObj): + def __init__(self, name, value=''): + InputObj.__init__(self, name, 'HIDDEN', value, checked=0) + +class TextArea: + def __init__(self, name, text='', rows=None, cols=None, wrap='soft', + readonly=0): + if isinstance(text, str): + # Double escape HTML entities in non-readonly areas. + doubleescape = not readonly + safetext = Utils.websafe(text, doubleescape) + else: + safetext = text + self.name = name + self.text = safetext + self.rows = rows + self.cols = cols + self.wrap = wrap + self.readonly = readonly + + def Format(self, indent=0): + charset = get_translation().charset() or 'us-ascii' + output = '