[defaults]
inventory = ./hosts.ini
+roles_path = ./roles:../roles
log_path = ./log
nocows=1
vault_password_file = ./gpg/get-vault-pass.sh
--- /dev/null
+---
+- name: Basic Setup for server: testvm
+ hosts:
+ roles:
+ - role: base
+ - role: vm-grub
+ - role: vm-network
--- /dev/null
+---
+vm_host:
+ installer:
+ net_if: br-mgmt
+ preseed_path: /srv/preseed
+ path: /srv/installer
+ distros:
+ - distro: debian
+ codename: stretch
+ arch:
+ - amd64
+ - i386
+ - distro: ubuntu
+ codename: xenial
+ arch:
+ - amd64
+ - i386
+ network:
+ interface: br-mgmt
+ ip: 192.168.33.65
+ mask: 255.255.255.0
+ gateway: 192.168.33.1
+ nameservers:
+ - 192.168.33.1
+ - 10.12.0.10
+ indices:
+ testvm: 200
--- /dev/null
+---
+vm_install_host: alfred.mgmt
+
+vm_install:
+ host: "{{ vm_install_host }}"
+ mem: 1024
+ numcpu: 2
+ disks:
+ primary: vda
+ virtio:
+ vda:
+ vg: storage
+ lv: "{{ inventory_hostname }}"
+ size: 10g
+ interfaces:
+ - idx: 1
+ bridge: "{{ hostvars[vm_install_host].vm_host.network.interface }}"
+ name: primary0
+ autostart: True
+
+vm_network:
+ systemd_link:
+ interfaces: "{{ vm_install.interfaces }}"
+ primary:
+ interface: primary0
+ ip: "{{ (hostvars[vm_install_host].vm_host.network.ip+'/'+hostvars[vm_install_host].vm_host.network.mask) | ipaddr(hostvars[vm_install_host].vm_host.network.indices[inventory_hostname]) | ipaddr('address') }}"
+ mask: "{{ hostvars[vm_install_host].vm_host.network.mask }}"
+ gateway: "{{ hostvars[vm_install_host].vm_host.network.gateway | default(hostvars[vm_install_host].vm_host.network.ip) }}"
+ nameservers: "{{ hostvars[vm_install_host].vm_host.network.nameservers }}"
+ domain: realraum.at
--- /dev/null
+#!/bin/bash
+
+if [ -z "$1" ]; then
+ echo "$0 <host>"
+ exit 1
+fi
+
+SHORT="$1"
+SSH_HOST=$(ssh -G "$1" | grep "^hostname " | awk '{ print($2) }' )
+
+for name in $SHORT $SSH_HOST; do
+ ssh-keygen -f "$HOME/.ssh/known_hosts" -R "$name"
+done
+
+exit 0
--- /dev/null
+---
+reboot_delay: 60
+reboot_timeout: 300
--- /dev/null
+- name: reboot machine
+ shell: sleep 2 && shutdown -r now
+ async: 1
+ poll: 0
+ ignore_errors: true
+
+- name: waiting for host to come back
+ wait_for_connection:
+ delay: "{{ reboot_delay }}"
+ timeout: "{{ reboot_timeout }}"
--- /dev/null
+---
+- name: update grub
+ command: /usr/sbin/update-grub
--- /dev/null
+---
+- name: enable serial console in grub and for kernel
+ with_items:
+ - regexp: '^GRUB_TIMEOUT='
+ line: 'GRUB_TIMEOUT=2'
+ - regexp: '^GRUB_CMDLINE_LINUX='
+ line: 'GRUB_CMDLINE_LINUX="console=ttyS0,115200n8"'
+ - regexp: '^GRUB_TERMINAL='
+ line: 'GRUB_TERMINAL=serial'
+ - regexp: '^GRUB_SERIAL_COMMAND='
+ line: 'GRUB_SERIAL_COMMAND="serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1"'
+ lineinfile:
+ dest: /etc/default/grub
+ regexp: "{{ item.regexp }}"
+ line: "{{ item.line }}"
+ notify: update grub
--- /dev/null
+---
+vm_host_force_download_installer: False
+vm_host_installer_url:
+ debian: "http://debian.mur.at/debian"
+ ubuntu: "http://ubuntu.uni-klu.ac.at/ubuntu"
--- /dev/null
+---
+- name: restart inetd
+ service:
+ name: openbsd-inetd
+ state: restarted
--- /dev/null
+---
+- name: install tftpd and python-libvirt
+ apt:
+ name:
+ - atftpd
+ - openbsd-inetd
+ - qemu-kvm
+ - libvirt-bin
+ - python-libvirt
+ state: present
+
+- name: configure tftpd via inetd
+ lineinfile:
+ regexp: "^#?({{ vm_host.network.ip }}:)?tftp"
+ line: "{{ vm_host.network.ip }}:tftp dgram udp4 wait nobody /usr/sbin/tcpd /usr/sbin/in.tftpd --tftpd-timeout 300 --retry-timeout 5 --maxthread 10 --verbose=5 {{ vm_host.installer.preseed_path }}"
+ path: /etc/inetd.conf
+ notify: restart inetd
+
+- name: make sure installer directories exists
+ with_items:
+ - "{{ vm_host.installer.path }}"
+ - "{{ vm_host.installer.preseed_path }}"
+ file:
+ name: "{{ item }}"
+ state: directory
+
+- name: prepare directories for installer images
+ with_subelements:
+ - "{{ vm_host.installer.distros }}"
+ - arch
+ file:
+ name: "{{ vm_host.installer.path }}/{{ item.0.distro }}-{{ item.0.codename }}/{{ item.1 }}"
+ state: directory
+
+- name: download installer kernel images
+ with_subelements:
+ - "{{ vm_host.installer.distros }}"
+ - arch
+ get_url:
+ url: "{{ vm_host_installer_url[item.0.distro] }}/dists/{{ item.0.codename }}/main/installer-{{ item.1 }}/current/images/netboot/{{ item.0.distro }}-installer/{{ item.1 }}/linux"
+ dest: "{{ vm_host.installer.path }}/{{ item.0.distro }}-{{ item.0.codename }}/{{ item.1 }}/linux"
+ mode: 0644
+ force: "{{ vm_host_force_download_installer }}"
+
+- name: download installer initrd.gz
+ with_subelements:
+ - "{{ vm_host.installer.distros }}"
+ - arch
+ get_url:
+ url: "{{ vm_host_installer_url[item.0.distro] }}/dists/{{ item.0.codename }}/main/installer-{{ item.1 }}/current/images/netboot/{{ item.0.distro }}-installer/{{ item.1 }}/initrd.gz"
+ dest: "{{ vm_host.installer.path }}/{{ item.0.distro }}-{{ item.0.codename }}/{{ item.1 }}/initrd.gz"
+ mode: 0644
+ force: "{{ vm_host_force_download_installer }}"
--- /dev/null
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+import traceback
+import time
+
+try:
+ import libvirt
+except ImportError:
+ HAS_VIRT = False
+else:
+ HAS_VIRT = True
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+VIRT_FAILED = 1
+VIRT_SUCCESS = 0
+VIRT_UNAVAILABLE = 2
+
+VIRT_STATE_NAME_MAP = {
+ 0: "running",
+ 1: "running",
+ 2: "running",
+ 3: "paused",
+ 4: "shutdown",
+ 5: "shutdown",
+ 6: "crashed"
+}
+
+
+class VMNotFound(Exception):
+ pass
+
+
+class LibvirtConnection(object):
+
+ def __init__(self, uri, module):
+
+ self.module = module
+
+ cmd = "uname -r"
+ rc, stdout, stderr = self.module.run_command(cmd)
+
+ if "xen" in stdout:
+ conn = libvirt.open(None)
+ elif "esx" in uri:
+ auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], [], None]
+ conn = libvirt.openAuth(uri, auth)
+ else:
+ conn = libvirt.open(uri)
+
+ if not conn:
+ raise Exception("hypervisor connection failure")
+
+ self.conn = conn
+
+ def find_vm(self, vmid):
+ """
+ Extra bonus feature: vmid = -1 returns a list of everything
+ """
+ conn = self.conn
+
+ vms = []
+
+ # this block of code borrowed from virt-manager:
+ # get working domain's name
+ ids = conn.listDomainsID()
+ for id in ids:
+ vm = conn.lookupByID(id)
+ vms.append(vm)
+ # get defined domain
+ names = conn.listDefinedDomains()
+ for name in names:
+ vm = conn.lookupByName(name)
+ vms.append(vm)
+
+ if vmid == -1:
+ return vms
+
+ for vm in vms:
+ if vm.name() == vmid:
+ return vm
+
+ raise VMNotFound("virtual machine %s not found" % vmid)
+
+ def get_status(self, vmid):
+ state = self.find_vm(vmid).info()[0]
+ return VIRT_STATE_NAME_MAP.get(state, "unknown")
+
+
+class Virt(object):
+
+ def __init__(self, uri, module):
+ self.module = module
+ self.uri = uri
+
+ def __get_conn(self):
+ self.conn = LibvirtConnection(self.uri, self.module)
+ return self.conn
+
+ def status(self, vmid):
+ """
+ Return a state suitable for server consumption. Aka, codes.py values, not XM output.
+ """
+ self.__get_conn()
+ return self.conn.get_status(vmid)
+
+
+def core(module):
+
+ states = module.params.get('states', None)
+ guest = module.params.get('name', None)
+ uri = module.params.get('uri', None)
+ delay = module.params.get('delay', None)
+ sleep = module.params.get('sleep', None)
+ timeout = module.params.get('timeout', None)
+
+ v = Virt(uri, module)
+ res = {'changed': False, 'failed': True}
+
+ if delay > 0:
+ time.sleep(delay)
+
+ for _ in range(0, timeout, sleep):
+ state = v.status(guest)
+ if state in states:
+ res['state'] = state
+ res['failed'] = False
+ res['msg'] = "guest '%s' has reached state: %s" % (guest, state)
+ return VIRT_SUCCESS, res
+
+ time.sleep(sleep)
+
+ res['msg'] = "timeout waiting for guest '%s' to reach one of states: %s" % (guest, ', '.join(states))
+ return VIRT_FAILED, res
+
+
+def main():
+
+ module = AnsibleModule(argument_spec=dict(
+ name=dict(aliases=['guest'], required=True),
+ states=dict(type='list', required=True),
+ uri=dict(default='qemu:///system'),
+ delay=dict(type='int', default=0),
+ sleep=dict(type='int', default=1),
+ timeout=dict(type='int', default=300),
+ ))
+
+ if not HAS_VIRT:
+ module.fail_json(
+ msg='The `libvirt` module is not importable. Check the requirements.'
+ )
+
+ for state in module.params.get('states', None):
+ if state not in set(VIRT_STATE_NAME_MAP.values()):
+ module.fail_json(
+ msg="states contains invalid state '%s', must be one of %s" % (state, ', '.join(set(VIRT_STATE_NAME_MAP.values())))
+ )
+
+ rc = VIRT_SUCCESS
+ try:
+ rc, result = core(module)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ if rc != 0: # something went wrong emit the msg
+ module.fail_json(rc=rc, msg=result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+---
+- name: generate preseed file
+ template:
+ src: "preseed_{{ vmdistro }}-{{ vmdistcodename }}.cfg.j2"
+ dest: "{{ hostvars[vm_install.host].vm_host.installer.preseed_path }}/vm-{{ inventory_hostname }}-{{ vmdistro }}-{{ vmdistcodename }}.cfg"
+ delegate_to: "{{ vm_install.host }}"
+
+- name: create disks for vm
+ with_dict: "{{ vm_install.disks.virtio | default({}) | combine(vm_install.disks.scsi | default({})) }}"
+ lvol:
+ vg: "{{ item.value.vg }}"
+ lv: "{{ item.value.lv }}"
+ size: "{{ item.value.size }}"
+ delegate_to: "{{ vm_install.host }}"
+
+- name: check if vm already exists
+ virt:
+ name: "{{ inventory_hostname }}"
+ command: info
+ delegate_to: "{{ vm_install.host }}"
+ register: vmhost_info
+
+- name: destroy exisiting vm
+ virt:
+ name: "{{ inventory_hostname }}"
+ state: destroyed
+ delegate_to: "{{ vm_install.host }}"
+ when: inventory_hostname in vmhost_info
+
+- name: wait for vm to be destroyed
+ wait_for_virt:
+ name: "{{ inventory_hostname }}"
+ states: shutdown,crashed
+ timeout: 5
+ delegate_to: "{{ vm_install.host }}"
+ when: inventory_hostname in vmhost_info
+
+- name: undefining exisiting vm
+ virt:
+ name: "{{ inventory_hostname }}"
+ command: undefine
+ delegate_to: "{{ vm_install.host }}"
+ when: inventory_hostname in vmhost_info
+
+- name: enable installer in VM config
+ set_fact:
+ run_installer: True
+
+- name: define new installer vm
+ virt:
+ name: "{{ inventory_hostname }}"
+ command: define
+ xml: "{{ lookup('template', 'libvirt-domain.xml.j2') }}"
+ delegate_to: "{{ vm_install.host }}"
+
+- name: start vm
+ virt:
+ name: "{{ inventory_hostname }}"
+ state: running
+ delegate_to: "{{ vm_install.host }}"
+
+- name: wait for installer to start
+ wait_for_virt:
+ name: "{{ inventory_hostname }}"
+ states: running
+ timeout: 10
+ delegate_to: "{{ vm_install.host }}"
+
+- debug:
+ msg: "you can check on the status of the installer running this command 'virsh console {{ inventory_hostname }}' on host {{ vm_install.host }}."
+
+- name: wait for installer to finish or crash
+ wait_for_virt:
+ name: "{{ inventory_hostname }}"
+ states: shutdown,crashed
+ timeout: 900
+ delegate_to: "{{ vm_install.host }}"
+ register: installer_result
+ failed_when: installer_result.failed or installer_result.state == "crashed"
+
+- name: undefining installer vm
+ virt:
+ name: "{{ inventory_hostname }}"
+ command: undefine
+ delegate_to: "{{ vm_install.host }}"
+
+- name: disable installer in VM config
+ set_fact:
+ run_installer: False
+
+- name: define new production vm
+ virt:
+ name: "{{ inventory_hostname }}"
+ command: define
+ xml: "{{ lookup('template', 'libvirt-domain.xml.j2') }}"
+ delegate_to: "{{ vm_install.host }}"
+
+- name: start vm
+ virt:
+ name: "{{ inventory_hostname }}"
+ state: running
+ delegate_to: "{{ vm_install.host }}"
+
+- name: mark vm as autostarted
+ virt:
+ name: "{{ inventory_hostname }}"
+ autostart: "{{ vm_install.autostart }}"
+ command: info ## virt module needs either command or state
+ delegate_to: "{{ vm_install.host }}"
+ when: vm_install.autostart is defined
+
+ ## TODO: find a better way to fetch host key of new VMs
+- name: disable ssh StrictHostKeyChecking for the next step
+ set_fact:
+ ansible_ssh_extra_args: -o StrictHostKeyChecking=no
+
+- name: wait for vm to start up
+ wait_for_connection:
+ delay: 5
+ timeout: 120
+
+- name: reenable StrictHostKeyChecking
+ set_fact:
+ ansible_ssh_extra_args: ""
--- /dev/null
+<domain type='kvm'>
+ <name>{{ inventory_hostname }}</name>
+ <memory>{{ vm_install.mem * 1024 }}</memory>
+ <currentMemory>{{ vm_install.mem * 1024 }}</currentMemory>
+ <vcpu>{{ vm_install.numcpu }}</vcpu>
+ <os>
+ <type arch='x86_64' machine='pc-0.12'>hvm</type>
+{% if run_installer %}
+ <kernel>{{ hostvars[vm_install.host].vm_host.installer.path }}/{{ vmdistro }}-{{ vmdistcodename }}/{{ vm_install.arch | default('amd64') }}/linux</kernel>
+ <initrd>{{ hostvars[vm_install.host].vm_host.installer.path }}/{{ vmdistro }}-{{ vmdistcodename }}/{{ vm_install.arch | default('amd64') }}/initrd.gz</initrd>
+ <cmdline>console=ttyS0,115200n8 auto=true interface=auto url=tftp://{{ hostvars[vm_install.host]['ansible_' + hostvars[vm_install.host].vm_host.installer.net_if].ipv4.address }}/vm-{{ inventory_hostname }}-{{ vmdistro }}-{{ vmdistcodename }}.cfg netcfg/choose_interface=enp1s1 netcfg/disable_autoconfig=true netcfg/get_ipaddress={{ vm_network.primary.ip }} netcfg/get_netmask={{ vm_network.primary.mask }} netcfg/get_gateway={{ vm_network.primary.gateway }} netcfg/get_nameservers="{{ vm_network.primary.nameservers | join(' ') }}" netcfg/confirm_static=true netcfg/get_hostname={{ inventory_hostname }} netcfg/get_domain={{ vm_network.primary.domain }}</cmdline>
+{% endif %}
+ <boot dev='hd'/>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <clock offset='utc'/>
+ <on_poweroff>destroy</on_poweroff>
+{% if run_installer %}
+ <on_reboot>destroy</on_reboot>
+ <on_crash>destroy</on_crash>
+{% else %}
+ <on_reboot>restart</on_reboot>
+ <on_crash>restart</on_crash>
+{% endif %}
+ <devices>
+ <emulator>/usr/bin/kvm</emulator>
+
+{% if 'virtio' in vm_install.disks %}
+{% for device, lv in vm_install.disks.virtio.items() %}
+ <disk type='block' device='disk'>
+ <driver name='qemu' type='raw' cache='none' discard='unmap'/>
+ <source dev='/dev/mapper/{{ lv.vg | replace('-', '--') }}-{{ lv.lv | replace('-', '--') }}'/>
+ <target dev='{{ device }}' bus='virtio'/>
+ </disk>
+{% endfor %}
+{% endif %}
+
+{% if 'scsi' in vm_install.disks %}
+ <controller type='scsi' index='0' model='virtio-scsi'/>
+{% for device, lv in vm_install.disks.scsi.items() %}
+ <disk type='block' device='disk'>
+ <driver name='qemu' type='raw' cache='none' discard='unmap'/>
+ <source dev='/dev/mapper/{{ lv.vg | replace('-', '--') }}-{{ lv.lv | replace('-', '--') }}'/>
+ <target dev='{{ device }}' bus='scsi'/>
+ </disk>
+{% endfor %}
+{% endif %}
+
+{% if vm_install.interfaces %}
+{% for if in vm_install.interfaces %}
+ <interface type='bridge'>
+ <source bridge='{{ if.bridge }}'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x01' slot='0x0{{ if.idx }}' function='0x0'/>
+ </interface>
+{% endfor %}
+{% endif %}
+
+ <serial type='pty'>
+ <target port='0'/>
+ </serial>
+ <console type='pty'>
+ <target type='serial' port='0'/>
+ </console>
+ </devices>
+</domain>
--- /dev/null
+#########################################################################
+# realraum preseed file for Debian stretch based VMs
+#########################################################################
+
+d-i debian-installer/language string en
+d-i debian-installer/country string AT
+d-i debian-installer/locale string de_AT.UTF-8
+d-i keyboard-configuration/xkb-keymap select de
+
+
+#d-i netcfg/choose_interface select enp1s1
+#d-i netcfg/disable_autoconfig boolean false
+#d-i netcfg/get_ipaddress string {{ vm_network.primary.ip }}
+#d-i netcfg/get_netmask string {{ vm_network.primary.mask }}
+#d-i netcfg/get_gateway string {{ vm_network.primary.gateway }}
+#d-i netcfg/get_nameservers string {{ vm_network.primary.nameservers | join(' ') }}
+#d-i netcfg/confirm_static boolean true
+
+d-i netcfg/get_hostname string {{ inventory_hostname }}
+d-i netcfg/get_domain string {{ vm_network.primary.domain }}
+d-i netcfg/wireless_wep string
+
+
+d-i mirror/country string manual
+d-i mirror/http/hostname string debian.ffgraz.net
+d-i mirror/http/directory string /debian
+d-i mirror/http/proxy string
+
+
+d-i passwd/make-user boolean false
+d-i passwd/root-password password this-very-very-secure-password-will-be-removed-by-latecommand
+d-i passwd/root-password-again password this-very-very-secure-password-will-be-removed-by-latecommand
+
+
+d-i clock-setup/utc boolean true
+d-i time/zone string Europe/Vienna
+d-i clock-setup/ntp boolean false
+
+
+d-i partman-auto/disk string /dev/{{ vm_install.disks.primary }}
+d-i partman-auto/method string lvm
+d-i partman-lvm/device_remove_lvm boolean true
+d-i partman-md/device_remove_md boolean true
+
+d-i partman-lvm/confirm boolean true
+d-i partman-lvm/confirm_nooverwrite boolean true
+
+d-i partman-auto/expert_recipe string \
+ boot-root :: \
+ 1000 10000 -1 ext4 \
+ $defaultignore{ } $primary{ } $bootable{ } \
+ method{ lvm } vg_name{ {{ inventory_hostname }} } \
+ . \
+ 2048 10000 2560 ext4 \
+ $lvmok{ } in_vg{ {{ inventory_hostname }} } \
+ method{ format } format{ } \
+ use_filesystem{ } filesystem{ ext4 } \
+ mountpoint{ / } \
+ . \
+ 1024 11000 1280 ext4 \
+ $lvmok{ } in_vg{ {{ inventory_hostname }} } \
+ method{ format } format{ } \
+ use_filesystem{ } filesystem{ ext4 } \
+ mountpoint{ /var } \
+ . \
+ 768 10000 768 ext4 \
+ $lvmok{ } in_vg{ {{ inventory_hostname }} } \
+ method{ format } format{ } \
+ use_filesystem{ } filesystem{ ext4 } \
+ mountpoint{ /var/log } \
+ options/nodev{ nodev } options/noatime{ noatime } \
+ options/noexec{ noexec } \
+ . \
+ 16 20000 -1 ext4 \
+ $lvmok{ } in_vg{ {{ inventory_hostname }} } \
+ method( keep } lv_name{ dummy } \
+ .
+
+d-i partman-auto-lvm/no_boot boolean true
+d-i partman-basicfilesystems/no_swap true
+d-i partman-partitioning/confirm_write_new_label boolean true
+d-i partman/choose_partition select finish
+d-i partman/confirm boolean true
+d-i partman/confirm_nooverwrite boolean true
+
+
+d-i base-installer/install-recommends boolean false
+d-i apt-setup/security_host string debian.ffgraz.net
+
+tasksel tasksel/first multiselect
+d-i pkgsel/include string openssh-server python
+d-i pkgsel/upgrade select safe-upgrade
+popularity-contest popularity-contest/participate boolean false
+
+d-i grub-installer/choose_bootdev string /dev/{{ vm_install.disks.primary }}
+d-i grub-installer/only_debian boolean true
+d-i grub-installer/with_other_os boolean false
+
+d-i finish-install/reboot_in_progress note
+
+
+d-i preseed/late_command string \
+ lvremove -f {{ inventory_hostname }}/dummy; \
+ in-target bash -c "apt-get update -q && apt-get full-upgrade -y -q"; \
+ in-target bash -c "passwd -d root; passwd -l root; umask 077; mkdir -p /root/.ssh/; echo -e '{{ sshserver_root_keys }}' > /root/.ssh/authorized_keys"
--- /dev/null
+---
+- name: rebuild initramfs
+ command: update-initramfs -u
--- /dev/null
+---
+- block:
+ - name: remove legacy systemd.link units
+ with_items:
+ - 50-virtio-kernel-names.link
+ - 99-default.link
+ file:
+ name: "/etc/systemd/network/{{ item }}"
+ state: absent
+
+ - name: install systemd network link units
+ template:
+ src: systemd.link.j2
+ dest: "/etc/systemd/network/{{ '%02d' | format(item.idx + 10) }}-{{ item.name }}.link"
+ with_items: "{{ vm_network.systemd_link.interfaces }}"
+ notify: rebuild initramfs
+
+ when: vm_network.systemd_link is defined
+
+- name: install basic interface config
+ template:
+ src: interfaces.j2
+ dest: /etc/network/interfaces
+ mode: 0644
--- /dev/null
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+source /etc/network/interfaces.d/*
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+# The primary network interface
+auto {{ srv_network.primary.interface }}
+iface {{ srv_network.primary.interface }} inet static
+ address {{ srv_network.primary.ip }}
+ netmask {{ srv_network.primary.mask }}
+ gateway {{ srv_network.primary.gateway }}
+ pre-up echo 0 > /proc/sys/net/ipv6/conf/$IFACE/accept_ra
+ pre-up echo 0 > /proc/sys/net/ipv6/conf/$IFACE/autoconf
--- /dev/null
+[Match]
+Path=pci-0000:01:{{ "%02d" | format(item.idx) }}.0
+
+[Link]
+Name={{ item.name }}
--- /dev/null
+#!/bin/bash
+
+if [ -z "$1" ] || [ -z "$2" ] || [ -z "$3" ]; then
+ echo "$0 <vm> <distro> <codename>"
+ exit 1
+fi
+
+echo "installing vm: $1 with $2/$3"
+echo ""
+
+echo "########## clearing old ssh host keys #########"
+./remove-known-host.sh "$1"
+echo ""
+
+echo "######## running the install playbook ########"
+exec ansible-playbook -e "vmname=$1" -e "vmdistro=$2" -e "vmdistcodename=$3" vm-install.yml
--- /dev/null
+---
+- name: Basic Installation
+ hosts: "{{ vmname }}"
+ gather_facts: no
+ pre_tasks:
+ - name: Gather facts of vm host
+ setup:
+ delegate_to: "{{ vm_install.host }}"
+ delegate_facts: yes
+ roles:
+ - role: vm-install
+
+- import_playbook: "host_playbooks/{{ vmname }}.yml"
+
+- name: Reboot and wait for VM come back
+ hosts: "{{ vmname }}"
+ gather_facts: no
+ roles:
+ - role: reboot-and-wait
+ reboot_delay: 10
+ reboot_timeout: 120