Merge branch 'newinfra' into 'nano'

# Conflicts:
#   roles/root-config/templates/nanorc.j2
certbot_on_virtu
vulcain 2020-08-09 02:09:20 +02:00
commit 26ae5d595a
47 changed files with 754 additions and 487 deletions

View File

@ -1,52 +1,40 @@
#!/usr/bin/env ansible-playbook #!/usr/bin/env ansible-playbook
--- ---
# Set variable adm_iface for all servers # Set variable adm_iface for all servers
- import_playbook: plays/get_adm_iface.yml # - hosts: server
# tasks:
# - name: Register adm interface in adm_iface variable
# shell: set -o pipefail && grep adm /sys/class/net/*/ifalias | sed "s|/sys/class/net/||" | sed "s|/ifalias:.*||"
# register: adm_iface
# check_mode: false
# changed_when: true
# args:
# executable: /bin/bash
# Common CRANS configuration for all servers
- hosts: server
vars:
# Debian mirror on adm
debian_mirror: http://mirror.adm.crans.org/debian
debian_components: main non-free
# LDAP binding - hosts: otis.adm.crans.org
ldap_base: 'dc=crans,dc=org'
ldap_master_ipv4: '10.231.136.19'
ldap_local_replica_uri:
- "ldap://10.231.136.38"
- "ldap://10.231.136.4"
ldap_master_uri: "ldap://{{ ldap_master_ipv4 }}"
ldap_user_tree: "cn=Utilisateurs,{{ ldap_base }}"
ldap_nslcd_bind_dn: "cn=nslcd,ou=service-users,{{ ldap_base }}"
ldap_nslcd_passwd: "{{ vault_ldap_nslcd_passwd }}"
# Group permissions
ssh_allow_groups: ssh nounou apprenti cableur root
# Scripts will tell users to go there to manage their account
intranet_url: 'https://intranet.crans.org/'
# Will be in /usr/scripts/
crans_scripts_git: "http://gitlab.adm.crans.org/nounous/scripts.git"
# NTP servers
ntp_servers:
- charybde.adm.crans.org
- silice.adm.crans.org
roles: roles:
- common-tools - ansible
- debian-apt-sources
- ldap-client
- openssh
- sudo
- ntp-client
- crans-scripts
- root-config
- import_playbook: plays/mail.yml # Tools for members
- hosts: zamok.adm.crans.org
roles:
# - zamok-tools
# - import_playbook: plays/mail.yml
- import_playbook: plays/nfs.yml - import_playbook: plays/nfs.yml
- import_playbook: plays/logs.yml # - import_playbook: plays/logs.yml
- import_playbook: plays/backup.yml # - import_playbook: plays/backup.yml
- import_playbook: plays/network-interfaces.yml # - import_playbook: plays/network-interfaces.yml
- import_playbook: plays/monitoring.yml # - import_playbook: plays/monitoring.yml
# - import_playbook: plays/generate_documentation.yml
# Services that only apply to a subset of server
# - import_playbook: plays/tv.yml
# - import_playbook: plays/mailman.yml
# - import_playbook: plays/dhcp.yml
# - import_playbook: plays/dns.yml
# - import_playbook: plays/wireguard.yml
# - import_playbook: plays/mirror.yml
# - import_playbook: plays/owncloud.yml
# - import_playbook: plays/reverse-proxy.yml

View File

@ -16,28 +16,39 @@ ansible_header: |
# Crans subnets # Crans subnets
adm_subnet: 10.231.136.0/24 adm_subnet: 10.231.136.0/24
# Role rsync-client # # Role rsync-client
to_backup: # to_backup:
- { # - {
name: "var", # name: "var",
path: "/var", # path: "/var",
auth_users: "backupcrans", # auth_users: "backupcrans",
secrets_file: "/etc/rsyncd.secrets", # secrets_file: "/etc/rsyncd.secrets",
hosts_allow: ["zephir.adm.crans.org", "10.231.136.6"], # hosts_allow: ["zephir.adm.crans.org", "10.231.136.6"],
} # }
- { # - {
name: "slash", # name: "slash",
path: "/", # path: "/",
auth_users: "backupcrans", # auth_users: "backupcrans",
secrets_file: "/etc/rsyncd.secrets", # secrets_file: "/etc/rsyncd.secrets",
hosts_allow: ["zephir.adm.crans.org", "10.231.136.6"], # hosts_allow: ["zephir.adm.crans.org", "10.231.136.6"],
} # }
#
# re2o:
# server: re2o.adm.crans.org
# service_user: "{{ vault_re2o_service_user }}"
# service_password: "{{ vault_re2o_service_password }}"
#
#
# # global server definitions
# mail_server: smtp.adm.crans.org
glob_ldap:
servers:
- 172.16.10.1
- 172.16.10.11
- 172.16.10.12
- 172.16.10.13
base: 'dc=crans,dc=org'
local: false # local configuration but default value
re2o: home_nounous:
server: re2o.adm.crans.org ip: 172.16.10.1
service_user: "{{ vault_re2o_service_user }}"
service_password: "{{ vault_re2o_service_password }}"
# global server definitions
mail_server: smtp.adm.crans.org

View File

@ -0,0 +1,9 @@
ldap:
local: False
servers: ["172.16.1.1"]
base: "dc=crans,dc=org"
# Parameters for debian mirror
debian_mirror: http://mirror.adm.crans.org/debian
debian_components: main non-free

View File

@ -3,80 +3,26 @@
dhcp: dhcp:
authoritative: True authoritative: True
global_options: global_options:
- { key: "interface-mtu", value: "1496" } - { key: "interface-mtu", value: "1500" }
global_parameters: [] global_parameters: []
subnets: subnets:
- network: "10.51.0.0/16" - network: "100.64.0.0/16"
deny_unknown: False deny_unknown: True
vlan: "accueil" vlan: "adh-nat"
default_lease_time: "600" default_lease_time: "600"
max_lease_time: "7200" max_lease_time: "7200"
routers: "10.51.0.10" routers: "100.64.0.99"
dns: ["10.51.0.152", "10.51.0.4"] dns: ["100.64.0.101", "100.64.0.102"]
domain_name: "accueil.crans.org" domain_name: "adh-nat.crans.org"
domain_search: "accueil.crans.org" domain_search: "adh-nat.crans.org"
options: options: []
- { key: "time-servers", value: "10.51.0.10" } lease_file: "/tmp/dhcp.list"
- { key: "ntp-servers", value: "10.51.0.10" }
- { key: "ip-forwarding", value: "off" }
range: ["10.51.1.0", "10.51.255.255"]
- network: "10.231.148.0/24" re2o:
deny_unknown: False server: re2o.adm.crans.org
vlan: "bornes" service_user: "ploptotoisverysecure"
default_lease_time: "8600" service_password: "ploptotoisverysecure"
routers: "10.231.148.254" dhcp:
dns: ["10.231.148.152", "10.231.148.4"] uri: "/tmp/re2o-dhcp.git"
domain_name: "borne.crans.org"
domain_search: "borne.crans.org"
options:
- { key: "time-servers", value: "10.231.148.98" }
- { key: "ntp-servers", value: "10.231.148.98" }
- { key: "ip-forwarding", value: "off" }
lease_file: "/var/local/re2o-services/dhcp/generated/dhcp.borne.crans.org.list"
- network: "185.230.78.0/24" mail_server: smtp.new-infra.adm.crans.org
deny_unknown: True
vlan: "fil_pub"
default_lease_time: "86400"
routers: "185.230.78.254"
dns: ["185.230.78.152", "185.230.78.4"]
domain_name: "adh.crans.org"
domain_search: "adh.crans.org"
options:
- { key: "time-servers", value: "185.230.79.98" }
- { key: "ntp-servers", value: "185.230.79.98" }
- { key: "ip-forwarding", value: "off" }
- { key: "smtp-server", value: "185.230.79.39" }
lease_file: "/var/local/re2o-services/dhcp/generated/dhcp.adh.crans.org.list"
- network: "10.54.0.0/19"
deny_unknown: True
vlan: "fil_new"
default_lease_time: "86400"
routers: "10.54.0.254"
dns: ["10.54.0.152", "10.54.0.4"]
domain_name: "fil.crans.org"
domain_search: "fil.crans.org"
options:
- { key: "time-servers", value: "185.230.79.98" }
- { key: "ntp-servers", value: "185.230.79.98" }
- { key: "ip-forwarding", value: "off" }
- { key: "smtp-server", value: "185.230.79.39" }
lease_file: "/var/local/re2o-services/dhcp/generated/dhcp.fil.crans.org.list"
- network: "10.53.0.0/19"
deny_unknown: False # For Federez
vlan: "wifi_new"
default_lease_time: "86400"
routers: "10.53.0.254"
dns: ["10.53.0.152", "10.53.0.4"]
domain_name: "wifi.crans.org"
domain_search: "wifi.crans.org"
options:
- { key: "time-servers", value: "185.230.79.98" }
- { key: "ntp-servers", value: "185.230.79.98" }
- { key: "ip-forwarding", value: "off" }
- { key: "smtp-server", value: "185.230.79.39" }
lease_file: "/var/local/re2o-services/dhcp/generated/dhcp.wifi.crans.org.list"
range: ["10.53.21.0", "10.53.25.254"]

View File

@ -1,52 +1,16 @@
--- ---
keepalived: glob_keepalived:
radius: mail_source: keepalived@crans.org
password: "{{ vault_keepalived_radius_password }}" mail_destination: root@crans.org
id: 52 smtp_server: smtp.adm.crans.org
ipv6: yes pool:
zones: dhcp:
- vlan: adm password: "plopisverysecure"
ipv4: 10.231.136.11/24 id: 60
brd: 10.231.136.255
ipv6: 2a0c:700:0:2:ad:adff:fef0:f002/64
- vlan: bornes
ipv4: 10.231.148.11/24
brd: 10.231.148.255
ipv6: fd01:240:fe3d:3:ad:adff:fef0:f003/64
- vlan: switches
ipv4: 10.231.100.11/24
brd: 10.231.100.255
ipv6: fd01:240:fe3d:c804:ad:adff:fef0:f004/64
router:
password: "{{ vault_keepalived_router_password }}"
id: 53
ipv6: no ipv6: no
notify: /usr/scripts/notify-dhcp
zones: zones:
- vlan: adm - vlan: adh-nat
ipv4: 10.231.136.254/24 ipv4: 100.64.0.99/16
brd: 10.231.136.255 brd: 100.64.255.255
- vlan: fil_pub
ipv4: 185.230.78.254/24
brd: 185.230.78.255
- vlan: srv
ipv4: 185.230.79.254/24
brd: 185.230.79.255
- vlan: fil_new # Nat filaire
ipv4: 10.54.0.254/16
brd: 10.54.255.255
- vlan: wifi_new
ipv4: 10.53.0.254/16
brd: 10.53.255.255
- vlan: zayo
ipv4: 158.255.113.73/31
proxy:
password: "{{ vault_keepalived_proxy_password }}"
id: 51
ipv6: yes
zones:
- vlan: srv
ipv4: 185.230.79.194/32
brd: 185.230.79.255
ipv6: 2a0c:700:0:24:ba:ccff:feda:aa00/64

View File

@ -0,0 +1,6 @@
---
glob_slapd:
master_ip: 172.16.10.1
replication_credentials: "{{ vault_ldap_replication_credentials }}"

View File

@ -3,7 +3,8 @@ interfaces:
adm: eth0 adm: eth0
srv: eth1 srv: eth1
keepalived_instances: loc_keepalived:
instances:
- name: proxy - name: proxy
tag: VI_DAUR tag: VI_DAUR
state: MASTER state: MASTER

View File

@ -0,0 +1,5 @@
---
loc_slapd:
ip: 172.16.10.12
replica: true
replica_rid: 2

View File

@ -5,7 +5,8 @@ interfaces:
bornes: eth1 bornes: eth1
switches: eth2 switches: eth2
keepalived_instances: loc_keepalived:
instances:
- name: radius - name: radius
tag: VI_RAD tag: VI_RAD
state: BACKUP state: BACKUP

View File

@ -3,7 +3,8 @@ interfaces:
adm: eth1 adm: eth1
srv: eth0 srv: eth0
keepalived_instances: loc_keepalived:
instances:
- name: proxy - name: proxy
tag: VI_DAUR tag: VI_DAUR
state: BACKUP state: BACKUP

View File

@ -7,7 +7,8 @@ interfaces:
wifi_new: ens1f0.22 wifi_new: ens1f0.22
zayo: ens1f0.26 zayo: ens1f0.26
keepalived_instances: loc_keepalived:
instances:
- name: router - name: router
tag: VI_ROUT tag: VI_ROUT
state: MASTER state: MASTER

View File

@ -0,0 +1,5 @@
---
loc_slapd:
ip: 172.16.10.13
replica: true
replica_rid: 3

View File

@ -10,7 +10,8 @@ interfaces:
srv: ens1f0.24 srv: ens1f0.24
zayo: ens1f0.26 zayo: ens1f0.26
keepalived_instances: loc_keepalived:
instances:
- name: radius - name: radius
tag: VI_RAD tag: VI_RAD
state: BACKUP state: BACKUP

View File

@ -5,7 +5,8 @@ interfaces:
bornes: eth1 bornes: eth1
switches: eth2 switches: eth2
keepalived_instances: loc_keepalived:
instances:
- name: radius - name: radius
tag: VI_RAD tag: VI_RAD
state: MASTER state: MASTER

View File

@ -0,0 +1,16 @@
---
interfaces:
adm: ens18
srv: ens19
srv-nat: ens20
infra: ens21
adh: ens22
adh-nat: ens23
loc_keepalived:
instances:
- name: dhcp
tag: VI_DHCP
state: BACKUP
priority: 100

View File

@ -0,0 +1,16 @@
---
interfaces:
adm: ens18
srv: ens19
srv-nat: ens20
infra: ens21
adh: ens22
adh-nat: ens23
loc_keepalived:
instances:
- name: dhcp
tag: VI_DHCP
state: MASTER
priority: 150

View File

@ -0,0 +1,5 @@
---
loc_slapd:
ip: 172.16.10.11
replica: true
replica_rid: 1

243
hosts
View File

@ -4,220 +4,74 @@
# > We name servers according to location, then type. # > We name servers according to location, then type.
# > Then we regroup everything in global geographic and type groups. # > Then we regroup everything in global geographic and type groups.
[horde] # [horde]
horde-srv.adm.crans.org # horde-srv.adm.crans.org
#
# [framadate]
# voyager.adm.crans.org
#
# [dhcp]
# dhcp.adm.crans.org
# odlyd.adm.crans.org
#
# [keepalived]
# gulp.adm.crans.org
# odlyd.adm.crans.org
# eap.adm.crans.org
# radius.adm.crans.org
# frontdaur.adm.crans.org
# bakdaur.adm.crans.org
#
# [test_vm]
# re2o-test.adm.crans.org
[framadate] [virtu]
voyager.adm.crans.org sam.adm.crans.org
daniel.adm.crans.org
jack.adm.crans.org
[dhcp] [slapd]
dhcp.adm.crans.org tealc.adm.crans.org
odlyd.adm.crans.org sam.adm.crans.org
daniel.adm.crans.org
jack.adm.crans.org
[keepalived] [keepalived]
gulp.adm.crans.org routeur-sam.adm.crans.org
odlyd.adm.crans.org routeur-daniel.adm.crans.org
eap.adm.crans.org
radius.adm.crans.org
frontdaur.adm.crans.org
bakdaur.adm.crans.org
[test_vm] [dhcp]
re2o-test.adm.crans.org routeur-sam.adm.crans.org
routeur-daniel.adm.crans.org
[crans_routeurs:children]
dhcp
keepalived
[crans_physical] [crans_physical]
charybde.adm.crans.org tealc.adm.crans.org
cochon.adm.crans.org sam.adm.crans.org
ft.adm.crans.org daniel.adm.crans.org
fyre.adm.crans.org jack.adm.crans.org
fz.adm.crans.org
gateau.adm.crans.org
gulp.adm.crans.org
odlyd.adm.crans.org
omnomnom.adm.crans.org
stitch.adm.crans.org
thot.adm.crans.org
vo.adm.crans.org
zamok.adm.crans.org
zbee.adm.crans.org
zephir.adm.crans.org
[crans_vm] [crans_vm]
alice.adm.crans.org routeur-sam.adm.crans.org
bakdaur.adm.crans.org routeur-daniel.adm.crans.org
boeing.adm.crans.org belenios # on changera plus tard
cas-srv.adm.crans.org
#civet.adm.crans.org
#cups.adm.crans.org
dhcp.adm.crans.org
eap.adm.crans.org
ethercalc-srv.adm.crans.org
frontdaur.adm.crans.org
gitzly.adm.crans.org
horde-srv.adm.crans.org
ipv6-zayo.adm.crans.org
irc.adm.crans.org
jitsi.adm.crans.org
kenobi.adm.crans.org
kiwi.adm.crans.org
lutim.adm.crans.org
#mediadrop-srv.adm.crans.org
mailman.adm.crans.org
nem.adm.crans.org
#news.adm.crans.org
otis.adm.crans.org
owl.adm.crans.org
owncloud-srv.adm.crans.org
radius.adm.crans.org
re2o-bcfg2.adm.crans.org
re2o-ldap.adm.crans.org
re2o-srv.adm.crans.org
redisdead.adm.crans.org
roundcube-srv.adm.crans.org
routeur.adm.crans.org
silice.adm.crans.org
titanic.adm.crans.org
tracker.adm.crans.org
unifi.adm.crans.org
voyager.adm.crans.org
xmpp.adm.crans.org
ytrap-llatsni.adm.crans.org
sitesweb.adm.crans.org
[crans_unifi]
0g-2.borne.crans.org
0g-3.borne.crans.org
0g-4.borne.crans.org
0h-2.borne.crans.org
0h-3.borne.crans.org
0m-2.borne.crans.org
1g-1.borne.crans.org
1g-3.borne.crans.org
1g-4.borne.crans.org
1g-5.borne.crans.org
1h-2.borne.crans.org
1h-3.borne.crans.org
1i-2.borne.crans.org
1i-3.borne.crans.org
1j-2.borne.crans.org
1j-3.borne.crans.org
1m-1.borne.crans.org
1m-2.borne.crans.org
1m-5.borne.crans.org
2a-1.borne.crans.org
2b-3.borne.crans.org
2c-2.borne.crans.org
2c-3.borne.crans.org
2g-1.borne.crans.org
2g-3.borne.crans.org
2g-5.borne.crans.org
2h-2.borne.crans.org
2h-3.borne.crans.org
2i-2.borne.crans.org
2i-3.borne.crans.org
2j-2.borne.crans.org
2j-3.borne.crans.org
2m-2.borne.crans.org
3a-2.borne.crans.org
3b-3.borne.crans.org
3c-2.borne.crans.org
3c-3.borne.crans.org
3g-1.borne.crans.org
3g-5.borne.crans.org
3h-2.borne.crans.org
3h-3.borne.crans.org
3i-2.borne.crans.org
3i-3.borne.crans.org
3j-2.borne.crans.org
3m-2.borne.crans.org
3m-4.borne.crans.org
3m-5.borne.crans.org
4a-1.borne.crans.org
4a-2.borne.crans.org
4a-3.borne.crans.org
4b-1.borne.crans.org
4c-2.borne.crans.org
4c-3.borne.crans.org
4g-1.borne.crans.org
4g-3.borne.crans.org
4g-5.borne.crans.org
4h-2.borne.crans.org
4h-3.borne.crans.org
4i-2.borne.crans.org
4i-3.borne.crans.org
4j-1.borne.crans.org
4j-2.borne.crans.org
4j-3.borne.crans.org
4m-2.borne.crans.org
4m-4.borne.crans.org
5a-1.borne.crans.org
5b-1.borne.crans.org
5c-1.borne.crans.org
5g-1.borne.crans.org
5g-3.borne.crans.org
5m-4.borne.crans.org
6a-1.borne.crans.org
6a-2.borne.crans.org
6c-1.borne.crans.org
adonis.borne.crans.org # 5a
atlas.borne.crans.org # 1a
baba-au-rhum.borne.crans.org # 3b
bacchus.borne.crans.org # 1b
baucis.borne.crans.org # 2b
bellerophon.borne.crans.org # 2b
benedict-cumberbatch.borne.crans.org # 1b
benthesicyme.borne.crans.org # 4b
boree.borne.crans.org # 6b
branchos.borne.crans.org # 3b
calypso.borne.crans.org # 4c
chaos.borne.crans.org # 1c
chronos.borne.crans.org # 2c
crios.borne.crans.org # 3c
gaia.borne.crans.org # 0g
hades.borne.crans.org # 4h
hephaistos.borne.crans.org # 1h
hermes.borne.crans.org # 3h
hypnos.borne.crans.org # 2h
iaso.borne.crans.org # 1i
idothee.borne.crans.org # 3i
idyie.borne.crans.org # 0i
ino.borne.crans.org # 2i
ioke.borne.crans.org # 4i
jaipudidees.borne.crans.org # 2j
jaipudpapier.borne.crans.org # 3j
japavolonte.borne.crans.org # 1j
jesuischarlie.borne.crans.org # 0j
jveuxduwifi.borne.crans.org # 0j
mania.borne.crans.org # 2m
marquis.borne.crans.org # manoir
mercure.borne.crans.org # 3m
#5m-5.borne.crans.org Déplacée au 2b
# TODO Récupérer ces bornes
#kakia.borne.crans.org # kfet
#koios.borne.crans.org # kfet
#gym-1.borne.crans.org # gymnase
#gym-2.borne.crans.org # gymnase
#0d-1.borne.crans.org
# TODO La fibre vers le resto U est coupée.
#rhea.borne.crans.org # resto-univ
#romulus.borne.crans.org # resto-univ
[ovh_physical] [ovh_physical]
soyouz.adm.crans.org
sputnik.adm.crans.org sputnik.adm.crans.org
# every server at crans # every server at crans
[crans_server:children] [crans_server:children]
crans_physical crans_physical
crans_vm crans_vm
crans_routeurs
# everything at crans # everything at crans
[crans:children] [crans:children]
crans_physical crans_server
crans_vm
crans_unifi
# everything at ovh # everything at ovh
[ovh:children] [ovh:children]
@ -231,6 +85,7 @@ ovh_physical
# every virtual machine # every virtual machine
[vm:children] [vm:children]
crans_vm crans_vm
crans_routeurs
# every server # every server
[server:children] [server:children]

5
ldap.yml 100755
View File

@ -0,0 +1,5 @@
#!/usr/bin/env ansible-playbook
---
- hosts: daniel
roles:
- slapd

View File

@ -1,5 +1,7 @@
#!/usr/bin/env ansible-playbook #!/usr/bin/env ansible-playbook
--- ---
- hosts: keepalived - hosts: keepalived
vars:
keepalived: "{{ glob_keepalived | combine(loc_keepalived) }}"
roles: roles:
- keepalived - keepalived

View File

@ -7,12 +7,4 @@
# Deploy NFS only on campus # Deploy NFS only on campus
- hosts: crans_server - hosts: crans_server
roles: ["nfs-common"] roles: ["home-nounous"]
# Deploy autofs NFS
- hosts: crans_server,!odlyd.adm.crans.org,!zamok.adm.crans.org,!omnomnom.adm.crans.org,!owl.adm.crans.org,!owncloud-srv.adm.crans.org
roles: ["nfs-autofs"]
# Deploy home permanent
- hosts: zamok.adm.crans.org,omnomnom.adm.crans.org,owl.adm.crans.org,owncloud-srv.adm.crans.org
roles: ["home-permanent"]

42
plays/root.yml 100755
View File

@ -0,0 +1,42 @@
#!/usr/bin/env ansible-playbook
---
- hosts: virtu
roles:
- proxmox-apt-sources
- hosts: server
vars:
# # Will be in /usr/scripts/
# crans_scripts_git: "http://gitlab.adm.crans.org/nounous/scripts.git"
# NTP servers
ntp_servers:
- charybde.adm.crans.org
# - silice.adm.crans.org
roles:
- debian-apt-sources
- common-tools
- sudo
- ntp-client
# - crans-scripts
- root-config
- hosts: crans_vm
roles:
- qemu-guest-agent
- hosts: slapd
vars:
slapd: '{{ glob_slapd | combine(loc_slapd | default({})) }}'
ldap:
private_key: "{{ vault_ldap_private_key }}"
certificate: "{{ vault_ldap_certificate }}"
roles:
- slapd
- hosts: server
vars:
ldap: '{{ glob_ldap | combine(loc_ldap | default({})) }}'
roles:
- ldap-client
- home-nounous

View File

@ -53,6 +53,7 @@
owner: root owner: root
group: utmp group: utmp
mode: '4755' mode: '4755'
check_mode: no
- name: Deploy screen tmpfile - name: Deploy screen tmpfile
template: template:

View File

@ -0,0 +1,8 @@
# HOME-NOUNOUS
Ce rôle permet d'exporter les homes vers les différents serveurs.
## VARS
home_nounous:
ip: l'ip du serveur nfs

View File

@ -0,0 +1,23 @@
---
- name: Install NFS client
apt:
update_cache: true
name:
- nfs-common
state: present
register: apt_result
retries: 3
until: apt_result is succeeded
- name: Deploy nfs systemd mount
template:
src: systemd/system/home.mount.j2
dest: /etc/systemd/system/home.mount
mode: 0755
- name: Load and activate nfs systemd mount
systemd:
name: home.mount
daemon_reload: true
enabled: true
state: started

View File

@ -0,0 +1,14 @@
{{ ansible_header | comment }}
[Unit]
Description=Mount home
Wants=network-online.target
After=network-online.target
[Mount]
What={{ home_nounous.ip }}:/pool/home
Where=/home
Type=nfs
Options=rw,nosuid
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,38 @@
# KEEPALIVED
Ce rôle installe keepalived pour permettre la redondance de certain service
entre plusieurs services.
/!\ Ce rôle déploie un script pour relancer automatiquement le serveur dhcp /!\
## VARS
keepalived:
- mail_destination: a qui envoyé les mails en cas de switching
- mail_source: qui envoie les mails
- smtp_server: le serveur smtp par qui passer pour envoyer les mails
- pool: Une liste de différentes instances installable sur la machine. Les
instances sont des dictionnaires comprenant les champs suivant :
- name: le nom de l'instance
- password: le mot de passe que vont utilisé les marchines d'une même
instance pour se synchroniser
- id: l'indentifiant qu'elles vont utiliser pour discuter
- ipv6: s'il est necessaire de configurer une instance supplémentaire pour
de l'ipv6
- notify: le script a notifé en cas de switching (s'il n'est pas précisé
aucun script n'est utilisé)
- administration: le vlan d'administration sur lequel les machines d'une
même instances vont discuter
- zones: une liste de zone sur lequel vont parler les instances keepalived.
Chaque zone est un disctionnaire comprenant les champs suivants:
- vlan: le vlan sur lequel est installé la zone
- ipv4: l'ipv4 au format CIDR partagé par les machines
- brd: s'il faut préciser ou non l'interface de broadcast
- ipv6: une ipv6 (elle peut ne pas être précisé, si elle est présente mais
que l'instance ne précise pas ipv6, elle sera ignoré)
- instances: Une liste d'instance a déployer sur la machine. Les instances
sont des dictionnaires comprenant les champs suivants:
- name: le nom de linstance a deployer
- tag: le petit nom à lui donner
- state: l'état (entre BACKUP et MASTER)
- priority: la priorité (pour un MASTER on met par défaut 150 puis on reduit
de 50 par 50)

View File

@ -13,3 +13,16 @@
dest: /etc/keepalived/keepalived.conf dest: /etc/keepalived/keepalived.conf
mode: 0644 mode: 0644
notify: Reload keepalived.service notify: Reload keepalived.service
- name: Create scripts directory
file:
path: /usr/scripts
state: directory
- name: Deploy keepalived dhcp scripts
template:
src: bin/notify-dhcp
dest: /usr/scripts/notify-dhcp
mode: 0744
when: not ansible_check_mode
notify: Reload keepalived.service

View File

@ -0,0 +1,24 @@
#!/bin/bash
TYPE=$1
NAME=$2
STATE=$3
case $STATE in
"MASTER")
logger -s '[DHCP-NOTIFY] Entering state MASTER, starting isc-dhcp-server.service'
systemctl start isc-dhcp-server.service
exit 0;;
"BACKUP")
logger -s '[DHCP-NOTIFY] Entering state BACKUP, stopping isc-dhcp-server.service'
systemctl stop isc-dhcp-server.service
exit 0;;
"FAULT")
logger -s '[DHCP-NOTIFY] Entering state FAULT, stopping isc-dhcp-server.service'
systemctl stop isc-dhcp-server.service
exit 0;;
*)
logger -s '[DHCP-NOTIFY] Entering UNKNOWN state, doing nothing'
exit 1;;
esac

View File

@ -1,31 +1,33 @@
{{ ansible_header | comment }} {{ ansible_header | comment }}
global_defs { global_defs {
notification_email { notification_email { {{ keepalived.mail_destination }} }
root@crans.org notification_email_from {{ keepalived.mail_source }}
} smtp_server {{ keepalived.smtp_server }}
notification_email_from keepalived@crans.org
smtp_server smtp.adm.crans.org
} }
{% for instance in keepalived_instances %} {% for instance in keepalived.instances %}
vrrp_instance {{ instance.tag }}4 { vrrp_instance {{ instance.tag }}4 {
state {{ instance.state }} state {{ instance.state }}
priority {{ instance.priority }} priority {{ instance.priority }}
smtp_alert smtp_alert
interface {{ interfaces.adm }} interface {{ interfaces.adm }}
virtual_router_id {{ keepalived[instance.name].id }} virtual_router_id {{ keepalived.pool[instance.name].id }}
advert_int 2 advert_int 2
authentication { authentication {
auth_type PASS auth_type PASS
auth_pass {{ keepalived[instance.name].password }} auth_pass {{ keepalived.pool[instance.name].password }}
} }
{% if keepalived.pool[instance.name].notify is defined %}
notify {{ keepalived.pool[instance.name].notify }}
{% endif %}
virtual_ipaddress { virtual_ipaddress {
{% for zone in keepalived[instance.name].zones %} {% for zone in keepalived.pool[instance.name].zones %}
{% if zone.brd is defined %} {% if zone.brd %}
{{ zone.ipv4 }} brd {{ zone.brd }} dev {{ interfaces[zone.vlan] }} scope global {{ zone.ipv4 }} brd {{ zone.ipv4 | ipaddr('broadcast') }} dev {{ interfaces[zone.vlan] }} scope global
{% else %} {% else %}
{{ zone.ipv4 }} dev {{ interfaces[zone.vlan] }} scope global {{ zone.ipv4 }} dev {{ interfaces[zone.vlan] }} scope global
{% endif %} {% endif %}
@ -33,23 +35,25 @@ vrrp_instance {{ instance.tag }}4 {
} }
} }
{% if keepalived[instance.name].ipv6 %} {% if keepalived.pool[instance.name].ipv6 %}
vrrp_instance {{ instance.tag }}6 { vrrp_instance {{ instance.tag }}6 {
state {{ instance.state }} state {{ instance.state }}
priority {{ instance.priority }} priority {{ instance.priority }}
smtp_alert smtp_alert
interface {{ interfaces.adm }} interface {{ keepalived.pool[instance.name].administration }}
virtual_router_id {{ keepalived[instance.name].id }} virtual_router_id {{ keepalived.pool[instance.name].id }}
advert_int 2 advert_int 2
authentication { authentication {
auth_type PASS auth_type PASS
auth_pass {{ keepalived[instance.name].password }} auth_pass {{ keepalived.pool[instance.name].password }}
} }
virtual_ipaddress { virtual_ipaddress {
{% for zone in keepalived[instance.name].zones %} {% for zone in keepalived.pool[instance.name].zones %}
{% if zone.ipv6 is defined %}
{{ zone.ipv6 }} dev {{ interfaces[zone.vlan] }} scope global {{ zone.ipv6 }} dev {{ interfaces[zone.vlan] }} scope global
{% endif %}
{% endfor %} {% endfor %}
} }
} }

View File

@ -0,0 +1,10 @@
# LDAP-CLIENT
Configure un client ldap pour les utilisateurs
## VARS
ldap:
- local: si le serveur est installé en local
- servers: la liste des servers ldap a contacté
- base: le search term du ldap

View File

@ -4,17 +4,14 @@
apt: apt:
update_cache: true update_cache: true
name: name:
- nslcd
- libnss-ldapd - libnss-ldapd
- libpam-ldapd
- nscd # local cache
state: present state: present
register: apt_result register: apt_result
retries: 3 retries: 3
until: apt_result is succeeded until: apt_result is succeeded
# Configure /etc/nslcd.conf # Configure /etc/nslcd.conf
- name: Configure nslcd LDAP credentials - name: Configure nslcd
template: template:
src: nslcd.conf.j2 src: nslcd.conf.j2
dest: /etc/nslcd.conf dest: /etc/nslcd.conf
@ -26,20 +23,27 @@
lineinfile: lineinfile:
dest: /etc/nsswitch.conf dest: /etc/nsswitch.conf
regexp: "^{{ item }}:" regexp: "^{{ item }}:"
line: "{{ item }}: files ldap" line: "{{ item }}: files systemd ldap"
loop: loop:
- passwd - passwd
- group - group
- shadow
notify: Restart nslcd service notify: Restart nslcd service
# Disable passwd and chsh - name: Configure NSS to use LDAP
- name: Copy passwd and chsh scripts lineinfile:
template: dest: /etc/nsswitch.conf
src: "bin/{{ item }}.j2" regexp: "^{{ item }}:"
dest: "/usr/local/bin/{{ item }}" line: "{{ item }}: files ldap"
mode: 0755
loop: loop:
- chsh - shadow
- chsh.ldap - networks
- passwd notify: Restart nslcd service
- name: Configure NSS to use LDAP
lineinfile:
dest: /etc/nsswitch.conf
regexp: "^{{ item }}:"
line: "{{ item }}: files ldap dns"
loop:
- hosts
notify: Restart nslcd service

View File

@ -1,4 +0,0 @@
#!/bin/sh
{{ ansible_header | comment }}
echo "Pour changer votre shell,\nAllez sur l'intranet : {{intranet_url}}"

View File

@ -1,4 +0,0 @@
#!/bin/sh
{{ ansible_header | comment }}
echo "Pour changer votre shell,\nAllez sur l'intranet : {{intranet_url}}"
echo "De toutes façons la vraie commande aurait pas marché, on installe pas nslcd-utils sur les serveurs normalement."

View File

@ -1,3 +0,0 @@
#!/bin/sh
{{ ansible_header | comment }}
echo "Pour changer votre mot de passe,\nAllez sur l'intranet : {{intranet_url}}"

View File

@ -1,35 +1,30 @@
{{ ansible_header | comment }} {{ ansible_header | comment }}
# /etc/nslcd.conf
# nslcd configuration file. See nslcd.conf(5)
# for details.
# The user and group nslcd should run as. # The user and group nslcd should run as.
uid nslcd uid nslcd
gid nslcd gid nslcd
# The location at which the LDAP server(s) should be reachable. # The location at which the LDAP server(s) should be reachable.
{% if ldap_local_replica_uri is defined %} {% if ldap.local %}
{% for uri in ldap_local_replica_uri %} uri ldapi:///
uri {{ uri }} {% else %}
{% for server in ldap.servers %}
uri ldaps://{{ server }}/
{% endfor %} {% endfor %}
{% endif %} {% endif %}
uri {{ ldap_master_uri }}
# The search base that will be used for all queries. # The search base that will be used for all queries.
base {{ ldap_base }} base {{ ldap.base }}
base passwd {{ ldap_user_tree }}
base shadow {{ ldap_user_tree }}
base group ou=posix,ou=groups,{{ ldap_base }}
# The LDAP protocol version to use. # The LDAP protocol version to use.
ldap_version 3 #ldap_version 3
# Time limit to wait for an answer
timelimit 5
# Time limit to wait for a bind
bind_timelimit 5
# The DN to bind with for normal lookups. # The DN to bind with for normal lookups.
binddn {{ ldap_nslcd_bind_dn }} #binddn cn=annonymous,dc=example,dc=net
bindpw {{ ldap_nslcd_passwd }} #bindpw secret
# The DN used for password modifications by root. # The DN used for password modifications by root.
#rootpwmoddn cn=admin,dc=example,dc=com #rootpwmoddn cn=admin,dc=example,dc=com
@ -41,4 +36,3 @@ tls_cacertfile /etc/ssl/certs/ca-certificates.crt
# The search scope. # The search scope.
#scope sub #scope sub

View File

@ -12,6 +12,7 @@
path: /etc/default/ntp path: /etc/default/ntp
regexp: '^NTPD_OPTS' regexp: '^NTPD_OPTS'
line: NTPD_OPTS='-g -x' line: NTPD_OPTS='-g -x'
check_mode: no
- name: Configure NTP - name: Configure NTP
template: template:

View File

@ -0,0 +1,5 @@
---
- name: Configure Proxmox repositories
template:
src: apt/sources.list.d/pve-enterprise.list.j2
dest: /etc/apt/sources.list.d/pve-enterprise.list

View File

@ -0,0 +1,2 @@
{{ ansible_header | comment }}
deb http://download.proxmox.com/debian/pve {{ ansible_lsb.codename }} pve-no-subscription

View File

@ -0,0 +1,10 @@
---
- name: Install qemu guest agent
apt:
update_cache: true
install_recommends: false
name:
- qemu-guest-agent
register: apt_result
retries: 3
until: apt_result is succeeded

View File

@ -15,10 +15,11 @@
etype: group etype: group
permissions: rwx permissions: rwx
state: query state: query
when: not ansible_check_mode
- name: Clone re2o-dhcp repository - name: Clone re2o-dhcp repository
git: git:
repo: 'http://gitlab.adm.crans.org/nounous/re2o-dhcp.git' repo: "{{ re2o.dhcp.uri }}"
dest: /var/local/re2o-services/dhcp dest: /var/local/re2o-services/dhcp
version: crans version: crans
umask: '002' umask: '002'
@ -30,6 +31,7 @@
owner: root owner: root
group: root group: root
state: link state: link
force: yes
- name: Create generated directory - name: Create generated directory
file: file:

View File

@ -0,0 +1,13 @@
# SLAPD
Deploie un serveur ldap master ou replica
## VARS
slapd:
- ip : l'ip sur lequel il va installer le serveur ldap
- replica : s'il s'agit d'un master ou d'une replica
- replica_rid : le numéro de replica du serveur
- master_ip : l'ip du master
- replication_credentials : les credientials pour authentifier les replicas
auprès du master

View File

@ -0,0 +1,6 @@
---
- name: Restart slapd
service:
name: slapd.service
state: restarted

View File

@ -0,0 +1,35 @@
---
- name: Install slapd
apt:
update_cache: true
name:
- slapd
register: apt_result
retries: 3
until: apt_result is succeeded
- name: Remove slapd config directory
file:
path: /etc/ldap/slapd.d/
state: absent
- name: Deploy slapd configuration
template:
src: "ldap/{{ item.dest }}.j2"
dest: "/etc/ldap/{{ item.dest }}"
mode: "{{ item.mode }}"
owner: openldap
group: openldap
loop:
- { dest: slapd.conf, mode: "0600" }
- { dest: ldap.key, mode: "0600" }
- { dest: ldap.pem, mode: "0644" }
notify: Restart slapd
- name: Deploy ldap services
lineinfile:
path: /etc/default/slapd
regexp: '^SLAPD_SERVICES='
line: 'SLAPD_SERVICES="ldaps://{{ slapd.ip }}/ ldapi:///"'
notify: Restart slapd
check_mode: no

View File

@ -0,0 +1 @@
{{ ldap.private_key }}

View File

@ -0,0 +1 @@
{{ ldap.certificate }}

View File

@ -0,0 +1,196 @@
# This is the main slapd configuration file. See slapd.conf(5) for more
# info on the configuration options.
#######################################################################
# Global Directives:
# Schema and objectClass definitions
include /etc/ldap/schema/core.schema
include /etc/ldap/schema/cosine.schema
include /etc/ldap/schema/nis.schema
include /etc/ldap/schema/inetorgperson.schema
# Where the pid file is put. The init.d script
# will not stop the server if you change this.
pidfile /var/run/slapd/slapd.pid
# List of arguments that were passed to the server
argsfile /var/run/slapd/slapd.args
# Read slapd.conf(5) for possible values
loglevel none
# Where the dynamically loaded modules are stored
modulepath /usr/lib/ldap
moduleload back_mdb
{% if not slapd.replica %}
moduleload auditlog
overlay auditlog
auditlog /var/log/openldap/auditlog.log
moduleload syncprov
{% endif %}
# TODO FAIRE LES CERTIFICATS
# TLS Certificates
#TLSCipherSuite HIGH:MEDIUM:-SSLv2:-SSLv3
TLSCertificateFile /etc/ldap/ldap.pem
TLSCertificateKeyFile /etc/ldap/ldap.key
# The maximum number of entries that is returned for a search operation
sizelimit 500
# The tool-threads parameter sets the actual amount of cpu's that is used
# for indexing.
tool-threads 1
#######################################################################
# Specific Backend Directives for mdb:
# Backend specific directives apply to this backend until another
# 'backend' directive occurs
backend mdb
#######################################################################
# Specific Backend Directives for 'other':
# Backend specific directives apply to this backend until another
# 'backend' directive occurs
#backend <other>
#######################################################################
# Specific Directives for database #1, of type mdb:
# Database specific directives apply to this databasse until another
# 'database' directive occurs
database mdb
# The base of your directory in database #1
suffix "dc=crans,dc=org"
# rootdn directive for specifying a superuser on the database. This is needed
# for syncrepl.
rootdn "cn=admin,dc=crans,dc=org"
# Where the database file are physically stored for database #1
directory "/var/lib/ldap"
# The dbconfig settings are used to generate a DB_CONFIG file the first
# time slapd starts. They do NOT override existing an existing DB_CONFIG
# file. You should therefore change these settings in DB_CONFIG directly
# or remove DB_CONFIG and restart slapd for changes to take effect.
# For the Debian package we use 2MB as default but be sure to update this
# value if you have plenty of RAM
#dbconfig set_cachesize 0 2097152 0
# Sven Hartge reported that he had to set this value incredibly high
# to get slapd running at all. See http://bugs.debian.org/303057 for more
# information.
# Number of objects that can be locked at the same time.
#dbconfig set_lk_max_objects 1500
# Number of locks (both requested and granted)
#dbconfig set_lk_max_locks 1500
# Number of lockers
#dbconfig set_lk_max_lockers 1500
# Indexing options for database #1
index objectClass eq
# Save the time that the entry gets modified, for database #1
lastmod on
# Checkpoint the BerkeleyDB database periodically in case of system
# failure and to speed slapd shutdown.
checkpoint 512 30
{% if slapd.replica %}
syncrepl
rid={{ slapd.replica_rid }}
provider=ldaps://{{ slapd.master_ip }}:636
bindmethod=simple
binddn="cn=replicator,dc=crans,dc=org"
credentials={{ slapd.replication_credentials }}
searchbase="dc=crans,dc=org"
scope=sub
schemachecking=on
type=refreshAndPersist
timeout=0
network-timeout=0
retry="30 20 300 +"
tls_reqcert=allow
{% endif %}
{% if slapd.replica %}
# The userPassword by default can be changed
# by the entry owning it if they are authenticated.
# Others should not be able to see it, except the
# admin entry below
# These access lines apply to database #1 only
access to attrs=userPassword,shadowLastChange
by anonymous auth
by * none
# Ensure read access to the base for things like
# supportedSASLMechanisms. Without this you may
# have problems with SASL not knowing what
# mechanisms are available and the like.
# Note that this is covered by the 'access to *'
# ACL below too but if you change that as people
# are wont to do you'll still need this if you
# want SASL (and possible other things) to work
# happily.
access to dn.base="" by * read
# The admin dn has full write access, everyone else
# can read everything.
access to *
by * read
{% else %}
overlay syncprov
# The userPassword by default can be changed
# by the entry owning it if they are authenticated.
# Others should not be able to see it, except the
# admin entry below
# These access lines apply to database #1 only
access to attrs=userPassword,shadowLastChange
by anonymous auth
by self write
by set="[cn=nounou,ou=group,dc=crans,dc=org]/memberUid & user/uid" write
by dn="cn=replicator,dc=crans,dc=org" read
by * none
access to attrs=loginShell,mail,telephoneNumber
by self write
by set="[cn=nounou,ou=group,dc=crans,dc=org]/memberUid & user/uid" write
by dn="cn=replicator,dc=crans,dc=org" read
by * read
# Ensure read access to the base for things like
# supportedSASLMechanisms. Without this you may
# have problems with SASL not knowing what
# mechanisms are available and the like.
# Note that this is covered by the 'access to *'
# ACL below too but if you change that as people
# are wont to do you'll still need this if you
# want SASL (and possible other things) to work
# happily.
access to dn.base="" by * read
# The admin dn has full write access, everyone else
# can read everything.
access to *
by set="[cn=nounou,ou=group,dc=crans,dc=org]/memberUid & user/uid" write
by dn="cn=replicator,dc=crans,dc=org" read
by * read
{% endif %}
#######################################################################
# Specific Directives for database #2, of type 'other' (can be mdb too):
# Database specific directives apply to this databasse until another
# 'database' directive occurs
#database <other>
# The base of your directory for database #2
#suffix "dc=debian,dc=org"