From 320cd6f207345a4101fe4f353d86d3a66a6eabd3 Mon Sep 17 00:00:00 2001 From: nicoo Date: Sun, 22 Apr 2018 17:43:15 +0200 Subject: [PATCH] roles/vm-*: Move to roles/vm/* --- ansible/host_playbooks/alfred.mgmt.yml | 2 +- ansible/host_playbooks/testvm.mgmt.yml | 4 +- ansible/roles/vm-grub/handlers/main.yml | 3 - ansible/roles/vm-grub/tasks/main.yml | 16 -- ansible/roles/vm-host/defaults/main.yml | 5 - ansible/roles/vm-host/handlers/main.yml | 5 - ansible/roles/vm-host/tasks/main.yml | 53 ------ ansible/roles/vm-install/library/wait_for_virt.py | 179 -------------------- ansible/roles/vm-install/tasks/main.yml | 96 ----------- .../vm-install/templates/libvirt-domain.xml.j2 | 70 -------- .../templates/preseed_debian-stretch.cfg.j2 | 106 ------------ ansible/roles/vm-network/handlers/main.yml | 3 - ansible/roles/vm-network/tasks/main.yml | 24 --- ansible/roles/vm-network/templates/interfaces.j2 | 17 -- ansible/roles/vm-network/templates/systemd.link.j2 | 5 - ansible/roles/vm/grub/handlers/main.yml | 3 + ansible/roles/vm/grub/tasks/main.yml | 16 ++ ansible/roles/vm/host/defaults/main.yml | 5 + ansible/roles/vm/host/handlers/main.yml | 5 + ansible/roles/vm/host/tasks/main.yml | 53 ++++++ ansible/roles/vm/install/library/wait_for_virt.py | 179 ++++++++++++++++++++ ansible/roles/vm/install/tasks/main.yml | 96 +++++++++++ .../vm/install/templates/libvirt-domain.xml.j2 | 70 ++++++++ .../templates/preseed_debian-stretch.cfg.j2 | 106 ++++++++++++ ansible/roles/vm/network/handlers/main.yml | 3 + ansible/roles/vm/network/tasks/main.yml | 24 +++ ansible/roles/vm/network/templates/interfaces.j2 | 17 ++ ansible/roles/vm/network/templates/systemd.link.j2 | 5 + ansible/vm-install.yml | 2 +- 29 files changed, 586 insertions(+), 586 deletions(-) delete mode 100644 ansible/roles/vm-grub/handlers/main.yml delete mode 100644 ansible/roles/vm-grub/tasks/main.yml delete mode 100644 ansible/roles/vm-host/defaults/main.yml delete mode 100644 ansible/roles/vm-host/handlers/main.yml delete mode 100644 ansible/roles/vm-host/tasks/main.yml delete mode 100644 ansible/roles/vm-install/library/wait_for_virt.py delete mode 100644 ansible/roles/vm-install/tasks/main.yml delete mode 100644 ansible/roles/vm-install/templates/libvirt-domain.xml.j2 delete mode 100644 ansible/roles/vm-install/templates/preseed_debian-stretch.cfg.j2 delete mode 100644 ansible/roles/vm-network/handlers/main.yml delete mode 100644 ansible/roles/vm-network/tasks/main.yml delete mode 100644 ansible/roles/vm-network/templates/interfaces.j2 delete mode 100644 ansible/roles/vm-network/templates/systemd.link.j2 create mode 100644 ansible/roles/vm/grub/handlers/main.yml create mode 100644 ansible/roles/vm/grub/tasks/main.yml create mode 100644 ansible/roles/vm/host/defaults/main.yml create mode 100644 ansible/roles/vm/host/handlers/main.yml create mode 100644 ansible/roles/vm/host/tasks/main.yml create mode 100644 ansible/roles/vm/install/library/wait_for_virt.py create mode 100644 ansible/roles/vm/install/tasks/main.yml create mode 100644 ansible/roles/vm/install/templates/libvirt-domain.xml.j2 create mode 100644 ansible/roles/vm/install/templates/preseed_debian-stretch.cfg.j2 create mode 100644 ansible/roles/vm/network/handlers/main.yml create mode 100644 ansible/roles/vm/network/tasks/main.yml create mode 100644 ansible/roles/vm/network/templates/interfaces.j2 create mode 100644 ansible/roles/vm/network/templates/systemd.link.j2 diff --git a/ansible/host_playbooks/alfred.mgmt.yml b/ansible/host_playbooks/alfred.mgmt.yml index 95d9371..d5c93e3 100644 --- a/ansible/host_playbooks/alfred.mgmt.yml +++ b/ansible/host_playbooks/alfred.mgmt.yml @@ -3,4 +3,4 @@ hosts: alfred.mgmt roles: - role: base - - role: vm-host + - role: vm/host diff --git a/ansible/host_playbooks/testvm.mgmt.yml b/ansible/host_playbooks/testvm.mgmt.yml index 0640da7..80fbc6b 100644 --- a/ansible/host_playbooks/testvm.mgmt.yml +++ b/ansible/host_playbooks/testvm.mgmt.yml @@ -3,5 +3,5 @@ hosts: testvm.mgmt roles: - role: base - - role: vm-grub - - role: vm-network + - role: vm/grub + - role: vm/network diff --git a/ansible/roles/vm-grub/handlers/main.yml b/ansible/roles/vm-grub/handlers/main.yml deleted file mode 100644 index 4bddbb1..0000000 --- a/ansible/roles/vm-grub/handlers/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -- name: update grub - command: /usr/sbin/update-grub diff --git a/ansible/roles/vm-grub/tasks/main.yml b/ansible/roles/vm-grub/tasks/main.yml deleted file mode 100644 index f751243..0000000 --- a/ansible/roles/vm-grub/tasks/main.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -- name: enable serial console in grub and for kernel - with_items: - - regexp: '^GRUB_TIMEOUT=' - line: 'GRUB_TIMEOUT=2' - - regexp: '^GRUB_CMDLINE_LINUX=' - line: 'GRUB_CMDLINE_LINUX="console=ttyS0,115200n8"' - - regexp: '^GRUB_TERMINAL=' - line: 'GRUB_TERMINAL=serial' - - regexp: '^GRUB_SERIAL_COMMAND=' - line: 'GRUB_SERIAL_COMMAND="serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1"' - lineinfile: - dest: /etc/default/grub - regexp: "{{ item.regexp }}" - line: "{{ item.line }}" - notify: update grub diff --git a/ansible/roles/vm-host/defaults/main.yml b/ansible/roles/vm-host/defaults/main.yml deleted file mode 100644 index deaa50a..0000000 --- a/ansible/roles/vm-host/defaults/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -vm_host_force_download_installer: False -vm_host_installer_url: - debian: "http://debian.mur.at/debian" - ubuntu: "http://ubuntu.uni-klu.ac.at/ubuntu" diff --git a/ansible/roles/vm-host/handlers/main.yml b/ansible/roles/vm-host/handlers/main.yml deleted file mode 100644 index 158f4dc..0000000 --- a/ansible/roles/vm-host/handlers/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -- name: restart inetd - service: - name: openbsd-inetd - state: restarted diff --git a/ansible/roles/vm-host/tasks/main.yml b/ansible/roles/vm-host/tasks/main.yml deleted file mode 100644 index 248f855..0000000 --- a/ansible/roles/vm-host/tasks/main.yml +++ /dev/null @@ -1,53 +0,0 @@ ---- -- name: install tftpd and python-libvirt - apt: - name: - - atftpd - - openbsd-inetd - - qemu-kvm - - libvirt-bin - - python-libvirt - state: present - -- name: configure tftpd via inetd - lineinfile: - regexp: "^#?({{ vm_host.network.ip }}:)?tftp" - line: "{{ vm_host.network.ip }}:tftp dgram udp4 wait nobody /usr/sbin/tcpd /usr/sbin/in.tftpd --tftpd-timeout 300 --retry-timeout 5 --maxthread 10 --verbose=5 {{ vm_host.installer.preseed_path }}" - path: /etc/inetd.conf - notify: restart inetd - -- name: make sure installer directories exists - with_items: - - "{{ vm_host.installer.path }}" - - "{{ vm_host.installer.preseed_path }}" - file: - name: "{{ item }}" - state: directory - -- name: prepare directories for installer images - with_subelements: - - "{{ vm_host.installer.distros }}" - - arch - file: - name: "{{ vm_host.installer.path }}/{{ item.0.distro }}-{{ item.0.codename }}/{{ item.1 }}" - state: directory - -- name: download installer kernel images - with_subelements: - - "{{ vm_host.installer.distros }}" - - arch - get_url: - url: "{{ vm_host_installer_url[item.0.distro] }}/dists/{{ item.0.codename }}/main/installer-{{ item.1 }}/current/images/netboot/{{ item.0.distro }}-installer/{{ item.1 }}/linux" - dest: "{{ vm_host.installer.path }}/{{ item.0.distro }}-{{ item.0.codename }}/{{ item.1 }}/linux" - mode: 0644 - force: "{{ vm_host_force_download_installer }}" - -- name: download installer initrd.gz - with_subelements: - - "{{ vm_host.installer.distros }}" - - arch - get_url: - url: "{{ vm_host_installer_url[item.0.distro] }}/dists/{{ item.0.codename }}/main/installer-{{ item.1 }}/current/images/netboot/{{ item.0.distro }}-installer/{{ item.1 }}/initrd.gz" - dest: "{{ vm_host.installer.path }}/{{ item.0.distro }}-{{ item.0.codename }}/{{ item.1 }}/initrd.gz" - mode: 0644 - force: "{{ vm_host_force_download_installer }}" diff --git a/ansible/roles/vm-install/library/wait_for_virt.py b/ansible/roles/vm-install/library/wait_for_virt.py deleted file mode 100644 index 6c49fae..0000000 --- a/ansible/roles/vm-install/library/wait_for_virt.py +++ /dev/null @@ -1,179 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -import traceback -import time - -try: - import libvirt -except ImportError: - HAS_VIRT = False -else: - HAS_VIRT = True - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native - - -VIRT_FAILED = 1 -VIRT_SUCCESS = 0 -VIRT_UNAVAILABLE = 2 - -VIRT_STATE_NAME_MAP = { - 0: "running", - 1: "running", - 2: "running", - 3: "paused", - 4: "shutdown", - 5: "shutdown", - 6: "crashed" -} - - -class VMNotFound(Exception): - pass - - -class LibvirtConnection(object): - - def __init__(self, uri, module): - - self.module = module - - cmd = "uname -r" - rc, stdout, stderr = self.module.run_command(cmd) - - if "xen" in stdout: - conn = libvirt.open(None) - elif "esx" in uri: - auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], [], None] - conn = libvirt.openAuth(uri, auth) - else: - conn = libvirt.open(uri) - - if not conn: - raise Exception("hypervisor connection failure") - - self.conn = conn - - def find_vm(self, vmid): - """ - Extra bonus feature: vmid = -1 returns a list of everything - """ - conn = self.conn - - vms = [] - - # this block of code borrowed from virt-manager: - # get working domain's name - ids = conn.listDomainsID() - for id in ids: - vm = conn.lookupByID(id) - vms.append(vm) - # get defined domain - names = conn.listDefinedDomains() - for name in names: - vm = conn.lookupByName(name) - vms.append(vm) - - if vmid == -1: - return vms - - for vm in vms: - if vm.name() == vmid: - return vm - - raise VMNotFound("virtual machine %s not found" % vmid) - - def get_status(self, vmid): - state = self.find_vm(vmid).info()[0] - return VIRT_STATE_NAME_MAP.get(state, "unknown") - - -class Virt(object): - - def __init__(self, uri, module): - self.module = module - self.uri = uri - - def __get_conn(self): - self.conn = LibvirtConnection(self.uri, self.module) - return self.conn - - def status(self, vmid): - """ - Return a state suitable for server consumption. Aka, codes.py values, not XM output. - """ - self.__get_conn() - return self.conn.get_status(vmid) - - -def core(module): - - states = module.params.get('states', None) - guest = module.params.get('name', None) - uri = module.params.get('uri', None) - delay = module.params.get('delay', None) - sleep = module.params.get('sleep', None) - timeout = module.params.get('timeout', None) - - v = Virt(uri, module) - res = {'changed': False, 'failed': True} - - if delay > 0: - time.sleep(delay) - - for _ in range(0, timeout, sleep): - state = v.status(guest) - if state in states: - res['state'] = state - res['failed'] = False - res['msg'] = "guest '%s' has reached state: %s" % (guest, state) - return VIRT_SUCCESS, res - - time.sleep(sleep) - - res['msg'] = "timeout waiting for guest '%s' to reach one of states: %s" % (guest, ', '.join(states)) - return VIRT_FAILED, res - - -def main(): - - module = AnsibleModule(argument_spec=dict( - name=dict(aliases=['guest'], required=True), - states=dict(type='list', required=True), - uri=dict(default='qemu:///system'), - delay=dict(type='int', default=0), - sleep=dict(type='int', default=1), - timeout=dict(type='int', default=300), - )) - - if not HAS_VIRT: - module.fail_json( - msg='The `libvirt` module is not importable. Check the requirements.' - ) - - for state in module.params.get('states', None): - if state not in set(VIRT_STATE_NAME_MAP.values()): - module.fail_json( - msg="states contains invalid state '%s', must be one of %s" % (state, ', '.join(set(VIRT_STATE_NAME_MAP.values()))) - ) - - rc = VIRT_SUCCESS - try: - rc, result = core(module) - except Exception as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - - if rc != 0: # something went wrong emit the msg - module.fail_json(rc=rc, msg=result) - else: - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible/roles/vm-install/tasks/main.yml b/ansible/roles/vm-install/tasks/main.yml deleted file mode 100644 index 1603483..0000000 --- a/ansible/roles/vm-install/tasks/main.yml +++ /dev/null @@ -1,96 +0,0 @@ ---- -- name: generate preseed file - template: - src: "preseed_{{ vmdistro }}-{{ vmdistcodename }}.cfg.j2" - dest: "{{ vm_host.installer.preseed_path }}/vm-{{ vmname }}-{{ vmdistro }}-{{ vmdistcodename }}.cfg" - -- name: create disks for vm - with_dict: "{{ hostvars[vmname].vm_install_cooked.disks.virtio | default({}) | combine(hostvars[vmname].vm_install_cooked.disks.scsi | default({})) }}" - lvol: - vg: "{{ item.value.vg }}" - lv: "{{ item.value.lv }}" - size: "{{ item.value.size }}" - -- name: check if vm already exists - virt: - name: "{{ vmname }}" - command: info - register: vmhost_info - -- name: destroy exisiting vm - virt: - name: "{{ vmname }}" - state: destroyed - when: vmname in vmhost_info - -- name: wait for vm to be destroyed - wait_for_virt: - name: "{{ vmname }}" - states: shutdown,crashed - timeout: 5 - when: vmname in vmhost_info - -- name: undefining exisiting vm - virt: - name: "{{ vmname }}" - command: undefine - when: vmname in vmhost_info - -- name: enable installer in VM config - set_fact: - run_installer: True - -- name: define new installer vm - virt: - name: "{{ vmname }}" - command: define - xml: "{{ lookup('template', 'libvirt-domain.xml.j2') }}" - -- name: start vm - virt: - name: "{{ vmname }}" - state: running - -- name: wait for installer to start - wait_for_virt: - name: "{{ vmname }}" - states: running - timeout: 10 - -- debug: - msg: "you can check on the status of the installer running this command 'virsh console {{ vmname }}' on host {{ inventory_hostname }}." - -- name: wait for installer to finish or crash - wait_for_virt: - name: "{{ vmname }}" - states: shutdown,crashed - timeout: 900 - register: installer_result - failed_when: installer_result.failed or installer_result.state == "crashed" - -- name: undefining installer vm - virt: - name: "{{ vmname }}" - command: undefine - -- name: disable installer in VM config - set_fact: - run_installer: False - -- name: define new production vm - virt: - name: "{{ vmname }}" - command: define - xml: "{{ lookup('template', 'libvirt-domain.xml.j2') }}" - -- name: start vm - virt: - name: "{{ vmname }}" - state: running - -- name: mark vm as autostarted - virt: - name: "{{ vmname }}" - autostart: "{{ hostvars[vmname].vm_install_cooked.autostart }}" - command: info ## virt module needs either command or state - when: hostvars[vmname].vm_install_cooked.autostart is defined diff --git a/ansible/roles/vm-install/templates/libvirt-domain.xml.j2 b/ansible/roles/vm-install/templates/libvirt-domain.xml.j2 deleted file mode 100644 index 9364a7d..0000000 --- a/ansible/roles/vm-install/templates/libvirt-domain.xml.j2 +++ /dev/null @@ -1,70 +0,0 @@ - - {{ vmname }} - {{ hostvars[vmname].vm_install_cooked.mem * 1024 }} - {{ hostvars[vmname].vm_install_cooked.mem * 1024 }} - {{ hostvars[vmname].vm_install_cooked.numcpu }} - - hvm -{% if run_installer %} - {{ vm_host.installer.path }}/{{ vmdistro }}-{{ vmdistcodename }}/{{ hostvars[vmname].vm_install_cooked.arch | default('amd64') }}/linux - {{ vm_host.installer.path }}/{{ vmdistro }}-{{ vmdistcodename }}/{{ hostvars[vmname].vm_install_cooked.arch | default('amd64') }}/initrd.gz - console=ttyS0,115200n8 auto=true interface=auto url=tftp://{{ hostvars[inventory_hostname]['ansible_' + (vm_host.installer.net_if | replace('-', '_'))].ipv4.address }}/vm-{{ vmname }}-{{ vmdistro }}-{{ vmdistcodename }}.cfg netcfg/choose_interface=enp1s1 netcfg/disable_autoconfig=true netcfg/get_ipaddress={{ hostvars[vmname].vm_network_cooked.primary.ip }} netcfg/get_netmask={{ hostvars[vmname].vm_network_cooked.primary.mask }} netcfg/get_gateway={{ hostvars[vmname].vm_network_cooked.primary.gateway }} netcfg/get_nameservers="{{ hostvars[vmname].vm_network_cooked.primary.nameservers | join(' ') }}" netcfg/confirm_static=true netcfg/get_hostname={{ vmname }} netcfg/get_domain={{ hostvars[vmname].vm_network_cooked.primary.domain }} -{% endif %} - - - - - - - - - destroy -{% if run_installer %} - destroy - destroy -{% else %} - restart - restart -{% endif %} - - /usr/bin/kvm - -{% if 'virtio' in hostvars[vmname].vm_install_cooked.disks %} -{% for device, lv in hostvars[vmname].vm_install_cooked.disks.virtio.items() %} - - - - - -{% endfor %} -{% endif %} - -{% if 'scsi' in hostvars[vmname].vm_install_cooked.disks %} - -{% for device, lv in hostvars[vmname].vm_install_cooked.disks.scsi.items() %} - - - - - -{% endfor %} -{% endif %} - -{% if hostvars[vmname].vm_install_cooked.interfaces %} -{% for if in hostvars[vmname].vm_install_cooked.interfaces %} - - - -
- -{% endfor %} -{% endif %} - - - - - - - - - diff --git a/ansible/roles/vm-install/templates/preseed_debian-stretch.cfg.j2 b/ansible/roles/vm-install/templates/preseed_debian-stretch.cfg.j2 deleted file mode 100644 index e8694ec..0000000 --- a/ansible/roles/vm-install/templates/preseed_debian-stretch.cfg.j2 +++ /dev/null @@ -1,106 +0,0 @@ -######################################################################### -# realraum preseed file for Debian stretch based VMs -######################################################################### - -d-i debian-installer/language string en -d-i debian-installer/country string AT -d-i debian-installer/locale string de_AT.UTF-8 -d-i keyboard-configuration/xkb-keymap select de - - -#d-i netcfg/choose_interface select enp1s1 -#d-i netcfg/disable_autoconfig boolean false -#d-i netcfg/get_ipaddress string {{ hostvars[vmname].vm_network_cooked.primary.ip }} -#d-i netcfg/get_netmask string {{ hostvars[vmname].vm_network_cooked.primary.mask }} -#d-i netcfg/get_gateway string {{ hostvars[vmname].vm_network_cooked.primary.gateway }} -#d-i netcfg/get_nameservers string {{ hostvars[vmname].vm_network_cooked.primary.nameservers | join(' ') }} -#d-i netcfg/confirm_static boolean true - -d-i netcfg/get_hostname string {{ vmname }} -d-i netcfg/get_domain string {{ hostvars[vmname].vm_network_cooked.primary.domain }} -d-i netcfg/wireless_wep string - - -d-i mirror/country string manual -d-i mirror/http/hostname string debian.ffgraz.net -d-i mirror/http/directory string /debian -d-i mirror/http/proxy string - - -d-i passwd/make-user boolean false -d-i passwd/root-password password this-very-very-secure-password-will-be-removed-by-latecommand -d-i passwd/root-password-again password this-very-very-secure-password-will-be-removed-by-latecommand - - -d-i clock-setup/utc boolean true -d-i time/zone string Europe/Vienna -d-i clock-setup/ntp boolean false - - -d-i partman-auto/disk string /dev/{{ hostvars[vmname].vm_install_cooked.disks.primary }} -d-i partman-auto/method string lvm -d-i partman-lvm/device_remove_lvm boolean true -d-i partman-md/device_remove_md boolean true - -d-i partman-lvm/confirm boolean true -d-i partman-lvm/confirm_nooverwrite boolean true - -d-i partman-auto/expert_recipe string \ - boot-root :: \ - 1000 10000 -1 ext4 \ - $defaultignore{ } $primary{ } $bootable{ } \ - method{ lvm } vg_name{ {{ vmname }} } \ - . \ - 2048 10000 2560 ext4 \ - $lvmok{ } in_vg{ {{ vmname }} } \ - method{ format } format{ } \ - use_filesystem{ } filesystem{ ext4 } \ - mountpoint{ / } \ - . \ - 1024 11000 1280 ext4 \ - $lvmok{ } in_vg{ {{ vmname }} } \ - method{ format } format{ } \ - use_filesystem{ } filesystem{ ext4 } \ - mountpoint{ /var } \ - . \ - 768 10000 768 ext4 \ - $lvmok{ } in_vg{ {{ vmname }} } \ - method{ format } format{ } \ - use_filesystem{ } filesystem{ ext4 } \ - mountpoint{ /var/log } \ - options/nodev{ nodev } options/noatime{ noatime } \ - options/noexec{ noexec } \ - . \ - 16 20000 -1 ext4 \ - $lvmok{ } in_vg{ {{ vmname }} } \ - method( keep } lv_name{ dummy } \ - . - -d-i partman-auto-lvm/no_boot boolean true -d-i partman-basicfilesystems/no_swap true -d-i partman-partitioning/confirm_write_new_label boolean true -d-i partman/choose_partition select finish -d-i partman/confirm boolean true -d-i partman/confirm_nooverwrite boolean true - - -d-i base-installer/install-recommends boolean false -d-i apt-setup/security_host string debian.ffgraz.net - -tasksel tasksel/first multiselect -d-i pkgsel/include string openssh-server python -d-i pkgsel/upgrade select safe-upgrade -popularity-contest popularity-contest/participate boolean false - -d-i grub-installer/choose_bootdev string /dev/{{ hostvars[vmname].vm_install_cooked.disks.primary }} -d-i grub-installer/only_debian boolean true -d-i grub-installer/with_other_os boolean false - -d-i finish-install/reboot_in_progress note - - -d-i preseed/late_command string \ - lvremove -f {{ vmname }}/dummy; \ - in-target bash -c "apt-get update -q && apt-get full-upgrade -y -q"; \ - in-target bash -c "passwd -d root; passwd -l root; umask 077; mkdir -p /root/.ssh/; echo -e '{{ lookup('pipe','cat ssh/noc/*.pub') | replace('\n', '\\n') }}' > /root/.ssh/authorized_keys"; \ - in-target bash -c "sed 's/^\(\s*#\s*Port.*\)/Port 22000/' -i /etc/ssh/sshd_config" diff --git a/ansible/roles/vm-network/handlers/main.yml b/ansible/roles/vm-network/handlers/main.yml deleted file mode 100644 index f967fa8..0000000 --- a/ansible/roles/vm-network/handlers/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -- name: rebuild initramfs - command: update-initramfs -u diff --git a/ansible/roles/vm-network/tasks/main.yml b/ansible/roles/vm-network/tasks/main.yml deleted file mode 100644 index 6668a4c..0000000 --- a/ansible/roles/vm-network/tasks/main.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -- block: - - name: remove legacy systemd.link units - with_items: - - 50-virtio-kernel-names.link - - 99-default.link - file: - name: "/etc/systemd/network/{{ item }}" - state: absent - - - name: install systemd network link units - template: - src: systemd.link.j2 - dest: "/etc/systemd/network/{{ '%02d' | format(item.idx + 10) }}-{{ item.name }}.link" - with_items: "{{ vm_network.systemd_link.interfaces }}" - notify: rebuild initramfs - - when: vm_network.systemd_link is defined - -- name: install basic interface config - template: - src: interfaces.j2 - dest: /etc/network/interfaces - mode: 0644 diff --git a/ansible/roles/vm-network/templates/interfaces.j2 b/ansible/roles/vm-network/templates/interfaces.j2 deleted file mode 100644 index 542e18d..0000000 --- a/ansible/roles/vm-network/templates/interfaces.j2 +++ /dev/null @@ -1,17 +0,0 @@ -# This file describes the network interfaces available on your system -# and how to activate them. For more information, see interfaces(5). - -source /etc/network/interfaces.d/* - -# The loopback network interface -auto lo -iface lo inet loopback - -# The primary network interface -auto {{ vm_network.primary.interface }} -iface {{ vm_network.primary.interface }} inet static - address {{ vm_network.primary.ip }} - netmask {{ vm_network.primary.mask }} - gateway {{ vm_network.primary.gateway }} - pre-up echo 0 > /proc/sys/net/ipv6/conf/$IFACE/accept_ra - pre-up echo 0 > /proc/sys/net/ipv6/conf/$IFACE/autoconf diff --git a/ansible/roles/vm-network/templates/systemd.link.j2 b/ansible/roles/vm-network/templates/systemd.link.j2 deleted file mode 100644 index 753fd58..0000000 --- a/ansible/roles/vm-network/templates/systemd.link.j2 +++ /dev/null @@ -1,5 +0,0 @@ -[Match] -Path=pci-0000:01:{{ "%02d" | format(item.idx) }}.0 - -[Link] -Name={{ item.name }} diff --git a/ansible/roles/vm/grub/handlers/main.yml b/ansible/roles/vm/grub/handlers/main.yml new file mode 100644 index 0000000..4bddbb1 --- /dev/null +++ b/ansible/roles/vm/grub/handlers/main.yml @@ -0,0 +1,3 @@ +--- +- name: update grub + command: /usr/sbin/update-grub diff --git a/ansible/roles/vm/grub/tasks/main.yml b/ansible/roles/vm/grub/tasks/main.yml new file mode 100644 index 0000000..f751243 --- /dev/null +++ b/ansible/roles/vm/grub/tasks/main.yml @@ -0,0 +1,16 @@ +--- +- name: enable serial console in grub and for kernel + with_items: + - regexp: '^GRUB_TIMEOUT=' + line: 'GRUB_TIMEOUT=2' + - regexp: '^GRUB_CMDLINE_LINUX=' + line: 'GRUB_CMDLINE_LINUX="console=ttyS0,115200n8"' + - regexp: '^GRUB_TERMINAL=' + line: 'GRUB_TERMINAL=serial' + - regexp: '^GRUB_SERIAL_COMMAND=' + line: 'GRUB_SERIAL_COMMAND="serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1"' + lineinfile: + dest: /etc/default/grub + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + notify: update grub diff --git a/ansible/roles/vm/host/defaults/main.yml b/ansible/roles/vm/host/defaults/main.yml new file mode 100644 index 0000000..deaa50a --- /dev/null +++ b/ansible/roles/vm/host/defaults/main.yml @@ -0,0 +1,5 @@ +--- +vm_host_force_download_installer: False +vm_host_installer_url: + debian: "http://debian.mur.at/debian" + ubuntu: "http://ubuntu.uni-klu.ac.at/ubuntu" diff --git a/ansible/roles/vm/host/handlers/main.yml b/ansible/roles/vm/host/handlers/main.yml new file mode 100644 index 0000000..158f4dc --- /dev/null +++ b/ansible/roles/vm/host/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: restart inetd + service: + name: openbsd-inetd + state: restarted diff --git a/ansible/roles/vm/host/tasks/main.yml b/ansible/roles/vm/host/tasks/main.yml new file mode 100644 index 0000000..248f855 --- /dev/null +++ b/ansible/roles/vm/host/tasks/main.yml @@ -0,0 +1,53 @@ +--- +- name: install tftpd and python-libvirt + apt: + name: + - atftpd + - openbsd-inetd + - qemu-kvm + - libvirt-bin + - python-libvirt + state: present + +- name: configure tftpd via inetd + lineinfile: + regexp: "^#?({{ vm_host.network.ip }}:)?tftp" + line: "{{ vm_host.network.ip }}:tftp dgram udp4 wait nobody /usr/sbin/tcpd /usr/sbin/in.tftpd --tftpd-timeout 300 --retry-timeout 5 --maxthread 10 --verbose=5 {{ vm_host.installer.preseed_path }}" + path: /etc/inetd.conf + notify: restart inetd + +- name: make sure installer directories exists + with_items: + - "{{ vm_host.installer.path }}" + - "{{ vm_host.installer.preseed_path }}" + file: + name: "{{ item }}" + state: directory + +- name: prepare directories for installer images + with_subelements: + - "{{ vm_host.installer.distros }}" + - arch + file: + name: "{{ vm_host.installer.path }}/{{ item.0.distro }}-{{ item.0.codename }}/{{ item.1 }}" + state: directory + +- name: download installer kernel images + with_subelements: + - "{{ vm_host.installer.distros }}" + - arch + get_url: + url: "{{ vm_host_installer_url[item.0.distro] }}/dists/{{ item.0.codename }}/main/installer-{{ item.1 }}/current/images/netboot/{{ item.0.distro }}-installer/{{ item.1 }}/linux" + dest: "{{ vm_host.installer.path }}/{{ item.0.distro }}-{{ item.0.codename }}/{{ item.1 }}/linux" + mode: 0644 + force: "{{ vm_host_force_download_installer }}" + +- name: download installer initrd.gz + with_subelements: + - "{{ vm_host.installer.distros }}" + - arch + get_url: + url: "{{ vm_host_installer_url[item.0.distro] }}/dists/{{ item.0.codename }}/main/installer-{{ item.1 }}/current/images/netboot/{{ item.0.distro }}-installer/{{ item.1 }}/initrd.gz" + dest: "{{ vm_host.installer.path }}/{{ item.0.distro }}-{{ item.0.codename }}/{{ item.1 }}/initrd.gz" + mode: 0644 + force: "{{ vm_host_force_download_installer }}" diff --git a/ansible/roles/vm/install/library/wait_for_virt.py b/ansible/roles/vm/install/library/wait_for_virt.py new file mode 100644 index 0000000..6c49fae --- /dev/null +++ b/ansible/roles/vm/install/library/wait_for_virt.py @@ -0,0 +1,179 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +import traceback +import time + +try: + import libvirt +except ImportError: + HAS_VIRT = False +else: + HAS_VIRT = True + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +VIRT_FAILED = 1 +VIRT_SUCCESS = 0 +VIRT_UNAVAILABLE = 2 + +VIRT_STATE_NAME_MAP = { + 0: "running", + 1: "running", + 2: "running", + 3: "paused", + 4: "shutdown", + 5: "shutdown", + 6: "crashed" +} + + +class VMNotFound(Exception): + pass + + +class LibvirtConnection(object): + + def __init__(self, uri, module): + + self.module = module + + cmd = "uname -r" + rc, stdout, stderr = self.module.run_command(cmd) + + if "xen" in stdout: + conn = libvirt.open(None) + elif "esx" in uri: + auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], [], None] + conn = libvirt.openAuth(uri, auth) + else: + conn = libvirt.open(uri) + + if not conn: + raise Exception("hypervisor connection failure") + + self.conn = conn + + def find_vm(self, vmid): + """ + Extra bonus feature: vmid = -1 returns a list of everything + """ + conn = self.conn + + vms = [] + + # this block of code borrowed from virt-manager: + # get working domain's name + ids = conn.listDomainsID() + for id in ids: + vm = conn.lookupByID(id) + vms.append(vm) + # get defined domain + names = conn.listDefinedDomains() + for name in names: + vm = conn.lookupByName(name) + vms.append(vm) + + if vmid == -1: + return vms + + for vm in vms: + if vm.name() == vmid: + return vm + + raise VMNotFound("virtual machine %s not found" % vmid) + + def get_status(self, vmid): + state = self.find_vm(vmid).info()[0] + return VIRT_STATE_NAME_MAP.get(state, "unknown") + + +class Virt(object): + + def __init__(self, uri, module): + self.module = module + self.uri = uri + + def __get_conn(self): + self.conn = LibvirtConnection(self.uri, self.module) + return self.conn + + def status(self, vmid): + """ + Return a state suitable for server consumption. Aka, codes.py values, not XM output. + """ + self.__get_conn() + return self.conn.get_status(vmid) + + +def core(module): + + states = module.params.get('states', None) + guest = module.params.get('name', None) + uri = module.params.get('uri', None) + delay = module.params.get('delay', None) + sleep = module.params.get('sleep', None) + timeout = module.params.get('timeout', None) + + v = Virt(uri, module) + res = {'changed': False, 'failed': True} + + if delay > 0: + time.sleep(delay) + + for _ in range(0, timeout, sleep): + state = v.status(guest) + if state in states: + res['state'] = state + res['failed'] = False + res['msg'] = "guest '%s' has reached state: %s" % (guest, state) + return VIRT_SUCCESS, res + + time.sleep(sleep) + + res['msg'] = "timeout waiting for guest '%s' to reach one of states: %s" % (guest, ', '.join(states)) + return VIRT_FAILED, res + + +def main(): + + module = AnsibleModule(argument_spec=dict( + name=dict(aliases=['guest'], required=True), + states=dict(type='list', required=True), + uri=dict(default='qemu:///system'), + delay=dict(type='int', default=0), + sleep=dict(type='int', default=1), + timeout=dict(type='int', default=300), + )) + + if not HAS_VIRT: + module.fail_json( + msg='The `libvirt` module is not importable. Check the requirements.' + ) + + for state in module.params.get('states', None): + if state not in set(VIRT_STATE_NAME_MAP.values()): + module.fail_json( + msg="states contains invalid state '%s', must be one of %s" % (state, ', '.join(set(VIRT_STATE_NAME_MAP.values()))) + ) + + rc = VIRT_SUCCESS + try: + rc, result = core(module) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + if rc != 0: # something went wrong emit the msg + module.fail_json(rc=rc, msg=result) + else: + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/ansible/roles/vm/install/tasks/main.yml b/ansible/roles/vm/install/tasks/main.yml new file mode 100644 index 0000000..1603483 --- /dev/null +++ b/ansible/roles/vm/install/tasks/main.yml @@ -0,0 +1,96 @@ +--- +- name: generate preseed file + template: + src: "preseed_{{ vmdistro }}-{{ vmdistcodename }}.cfg.j2" + dest: "{{ vm_host.installer.preseed_path }}/vm-{{ vmname }}-{{ vmdistro }}-{{ vmdistcodename }}.cfg" + +- name: create disks for vm + with_dict: "{{ hostvars[vmname].vm_install_cooked.disks.virtio | default({}) | combine(hostvars[vmname].vm_install_cooked.disks.scsi | default({})) }}" + lvol: + vg: "{{ item.value.vg }}" + lv: "{{ item.value.lv }}" + size: "{{ item.value.size }}" + +- name: check if vm already exists + virt: + name: "{{ vmname }}" + command: info + register: vmhost_info + +- name: destroy exisiting vm + virt: + name: "{{ vmname }}" + state: destroyed + when: vmname in vmhost_info + +- name: wait for vm to be destroyed + wait_for_virt: + name: "{{ vmname }}" + states: shutdown,crashed + timeout: 5 + when: vmname in vmhost_info + +- name: undefining exisiting vm + virt: + name: "{{ vmname }}" + command: undefine + when: vmname in vmhost_info + +- name: enable installer in VM config + set_fact: + run_installer: True + +- name: define new installer vm + virt: + name: "{{ vmname }}" + command: define + xml: "{{ lookup('template', 'libvirt-domain.xml.j2') }}" + +- name: start vm + virt: + name: "{{ vmname }}" + state: running + +- name: wait for installer to start + wait_for_virt: + name: "{{ vmname }}" + states: running + timeout: 10 + +- debug: + msg: "you can check on the status of the installer running this command 'virsh console {{ vmname }}' on host {{ inventory_hostname }}." + +- name: wait for installer to finish or crash + wait_for_virt: + name: "{{ vmname }}" + states: shutdown,crashed + timeout: 900 + register: installer_result + failed_when: installer_result.failed or installer_result.state == "crashed" + +- name: undefining installer vm + virt: + name: "{{ vmname }}" + command: undefine + +- name: disable installer in VM config + set_fact: + run_installer: False + +- name: define new production vm + virt: + name: "{{ vmname }}" + command: define + xml: "{{ lookup('template', 'libvirt-domain.xml.j2') }}" + +- name: start vm + virt: + name: "{{ vmname }}" + state: running + +- name: mark vm as autostarted + virt: + name: "{{ vmname }}" + autostart: "{{ hostvars[vmname].vm_install_cooked.autostart }}" + command: info ## virt module needs either command or state + when: hostvars[vmname].vm_install_cooked.autostart is defined diff --git a/ansible/roles/vm/install/templates/libvirt-domain.xml.j2 b/ansible/roles/vm/install/templates/libvirt-domain.xml.j2 new file mode 100644 index 0000000..9364a7d --- /dev/null +++ b/ansible/roles/vm/install/templates/libvirt-domain.xml.j2 @@ -0,0 +1,70 @@ + + {{ vmname }} + {{ hostvars[vmname].vm_install_cooked.mem * 1024 }} + {{ hostvars[vmname].vm_install_cooked.mem * 1024 }} + {{ hostvars[vmname].vm_install_cooked.numcpu }} + + hvm +{% if run_installer %} + {{ vm_host.installer.path }}/{{ vmdistro }}-{{ vmdistcodename }}/{{ hostvars[vmname].vm_install_cooked.arch | default('amd64') }}/linux + {{ vm_host.installer.path }}/{{ vmdistro }}-{{ vmdistcodename }}/{{ hostvars[vmname].vm_install_cooked.arch | default('amd64') }}/initrd.gz + console=ttyS0,115200n8 auto=true interface=auto url=tftp://{{ hostvars[inventory_hostname]['ansible_' + (vm_host.installer.net_if | replace('-', '_'))].ipv4.address }}/vm-{{ vmname }}-{{ vmdistro }}-{{ vmdistcodename }}.cfg netcfg/choose_interface=enp1s1 netcfg/disable_autoconfig=true netcfg/get_ipaddress={{ hostvars[vmname].vm_network_cooked.primary.ip }} netcfg/get_netmask={{ hostvars[vmname].vm_network_cooked.primary.mask }} netcfg/get_gateway={{ hostvars[vmname].vm_network_cooked.primary.gateway }} netcfg/get_nameservers="{{ hostvars[vmname].vm_network_cooked.primary.nameservers | join(' ') }}" netcfg/confirm_static=true netcfg/get_hostname={{ vmname }} netcfg/get_domain={{ hostvars[vmname].vm_network_cooked.primary.domain }} +{% endif %} + + + + + + + + + destroy +{% if run_installer %} + destroy + destroy +{% else %} + restart + restart +{% endif %} + + /usr/bin/kvm + +{% if 'virtio' in hostvars[vmname].vm_install_cooked.disks %} +{% for device, lv in hostvars[vmname].vm_install_cooked.disks.virtio.items() %} + + + + + +{% endfor %} +{% endif %} + +{% if 'scsi' in hostvars[vmname].vm_install_cooked.disks %} + +{% for device, lv in hostvars[vmname].vm_install_cooked.disks.scsi.items() %} + + + + + +{% endfor %} +{% endif %} + +{% if hostvars[vmname].vm_install_cooked.interfaces %} +{% for if in hostvars[vmname].vm_install_cooked.interfaces %} + + + +
+ +{% endfor %} +{% endif %} + + + + + + + + + diff --git a/ansible/roles/vm/install/templates/preseed_debian-stretch.cfg.j2 b/ansible/roles/vm/install/templates/preseed_debian-stretch.cfg.j2 new file mode 100644 index 0000000..e8694ec --- /dev/null +++ b/ansible/roles/vm/install/templates/preseed_debian-stretch.cfg.j2 @@ -0,0 +1,106 @@ +######################################################################### +# realraum preseed file for Debian stretch based VMs +######################################################################### + +d-i debian-installer/language string en +d-i debian-installer/country string AT +d-i debian-installer/locale string de_AT.UTF-8 +d-i keyboard-configuration/xkb-keymap select de + + +#d-i netcfg/choose_interface select enp1s1 +#d-i netcfg/disable_autoconfig boolean false +#d-i netcfg/get_ipaddress string {{ hostvars[vmname].vm_network_cooked.primary.ip }} +#d-i netcfg/get_netmask string {{ hostvars[vmname].vm_network_cooked.primary.mask }} +#d-i netcfg/get_gateway string {{ hostvars[vmname].vm_network_cooked.primary.gateway }} +#d-i netcfg/get_nameservers string {{ hostvars[vmname].vm_network_cooked.primary.nameservers | join(' ') }} +#d-i netcfg/confirm_static boolean true + +d-i netcfg/get_hostname string {{ vmname }} +d-i netcfg/get_domain string {{ hostvars[vmname].vm_network_cooked.primary.domain }} +d-i netcfg/wireless_wep string + + +d-i mirror/country string manual +d-i mirror/http/hostname string debian.ffgraz.net +d-i mirror/http/directory string /debian +d-i mirror/http/proxy string + + +d-i passwd/make-user boolean false +d-i passwd/root-password password this-very-very-secure-password-will-be-removed-by-latecommand +d-i passwd/root-password-again password this-very-very-secure-password-will-be-removed-by-latecommand + + +d-i clock-setup/utc boolean true +d-i time/zone string Europe/Vienna +d-i clock-setup/ntp boolean false + + +d-i partman-auto/disk string /dev/{{ hostvars[vmname].vm_install_cooked.disks.primary }} +d-i partman-auto/method string lvm +d-i partman-lvm/device_remove_lvm boolean true +d-i partman-md/device_remove_md boolean true + +d-i partman-lvm/confirm boolean true +d-i partman-lvm/confirm_nooverwrite boolean true + +d-i partman-auto/expert_recipe string \ + boot-root :: \ + 1000 10000 -1 ext4 \ + $defaultignore{ } $primary{ } $bootable{ } \ + method{ lvm } vg_name{ {{ vmname }} } \ + . \ + 2048 10000 2560 ext4 \ + $lvmok{ } in_vg{ {{ vmname }} } \ + method{ format } format{ } \ + use_filesystem{ } filesystem{ ext4 } \ + mountpoint{ / } \ + . \ + 1024 11000 1280 ext4 \ + $lvmok{ } in_vg{ {{ vmname }} } \ + method{ format } format{ } \ + use_filesystem{ } filesystem{ ext4 } \ + mountpoint{ /var } \ + . \ + 768 10000 768 ext4 \ + $lvmok{ } in_vg{ {{ vmname }} } \ + method{ format } format{ } \ + use_filesystem{ } filesystem{ ext4 } \ + mountpoint{ /var/log } \ + options/nodev{ nodev } options/noatime{ noatime } \ + options/noexec{ noexec } \ + . \ + 16 20000 -1 ext4 \ + $lvmok{ } in_vg{ {{ vmname }} } \ + method( keep } lv_name{ dummy } \ + . + +d-i partman-auto-lvm/no_boot boolean true +d-i partman-basicfilesystems/no_swap true +d-i partman-partitioning/confirm_write_new_label boolean true +d-i partman/choose_partition select finish +d-i partman/confirm boolean true +d-i partman/confirm_nooverwrite boolean true + + +d-i base-installer/install-recommends boolean false +d-i apt-setup/security_host string debian.ffgraz.net + +tasksel tasksel/first multiselect +d-i pkgsel/include string openssh-server python +d-i pkgsel/upgrade select safe-upgrade +popularity-contest popularity-contest/participate boolean false + +d-i grub-installer/choose_bootdev string /dev/{{ hostvars[vmname].vm_install_cooked.disks.primary }} +d-i grub-installer/only_debian boolean true +d-i grub-installer/with_other_os boolean false + +d-i finish-install/reboot_in_progress note + + +d-i preseed/late_command string \ + lvremove -f {{ vmname }}/dummy; \ + in-target bash -c "apt-get update -q && apt-get full-upgrade -y -q"; \ + in-target bash -c "passwd -d root; passwd -l root; umask 077; mkdir -p /root/.ssh/; echo -e '{{ lookup('pipe','cat ssh/noc/*.pub') | replace('\n', '\\n') }}' > /root/.ssh/authorized_keys"; \ + in-target bash -c "sed 's/^\(\s*#\s*Port.*\)/Port 22000/' -i /etc/ssh/sshd_config" diff --git a/ansible/roles/vm/network/handlers/main.yml b/ansible/roles/vm/network/handlers/main.yml new file mode 100644 index 0000000..f967fa8 --- /dev/null +++ b/ansible/roles/vm/network/handlers/main.yml @@ -0,0 +1,3 @@ +--- +- name: rebuild initramfs + command: update-initramfs -u diff --git a/ansible/roles/vm/network/tasks/main.yml b/ansible/roles/vm/network/tasks/main.yml new file mode 100644 index 0000000..6668a4c --- /dev/null +++ b/ansible/roles/vm/network/tasks/main.yml @@ -0,0 +1,24 @@ +--- +- block: + - name: remove legacy systemd.link units + with_items: + - 50-virtio-kernel-names.link + - 99-default.link + file: + name: "/etc/systemd/network/{{ item }}" + state: absent + + - name: install systemd network link units + template: + src: systemd.link.j2 + dest: "/etc/systemd/network/{{ '%02d' | format(item.idx + 10) }}-{{ item.name }}.link" + with_items: "{{ vm_network.systemd_link.interfaces }}" + notify: rebuild initramfs + + when: vm_network.systemd_link is defined + +- name: install basic interface config + template: + src: interfaces.j2 + dest: /etc/network/interfaces + mode: 0644 diff --git a/ansible/roles/vm/network/templates/interfaces.j2 b/ansible/roles/vm/network/templates/interfaces.j2 new file mode 100644 index 0000000..542e18d --- /dev/null +++ b/ansible/roles/vm/network/templates/interfaces.j2 @@ -0,0 +1,17 @@ +# This file describes the network interfaces available on your system +# and how to activate them. For more information, see interfaces(5). + +source /etc/network/interfaces.d/* + +# The loopback network interface +auto lo +iface lo inet loopback + +# The primary network interface +auto {{ vm_network.primary.interface }} +iface {{ vm_network.primary.interface }} inet static + address {{ vm_network.primary.ip }} + netmask {{ vm_network.primary.mask }} + gateway {{ vm_network.primary.gateway }} + pre-up echo 0 > /proc/sys/net/ipv6/conf/$IFACE/accept_ra + pre-up echo 0 > /proc/sys/net/ipv6/conf/$IFACE/autoconf diff --git a/ansible/roles/vm/network/templates/systemd.link.j2 b/ansible/roles/vm/network/templates/systemd.link.j2 new file mode 100644 index 0000000..753fd58 --- /dev/null +++ b/ansible/roles/vm/network/templates/systemd.link.j2 @@ -0,0 +1,5 @@ +[Match] +Path=pci-0000:01:{{ "%02d" | format(item.idx) }}.0 + +[Link] +Name={{ item.name }} diff --git a/ansible/vm-install.yml b/ansible/vm-install.yml index 198b26f..e0685f9 100644 --- a/ansible/vm-install.yml +++ b/ansible/vm-install.yml @@ -17,7 +17,7 @@ - name: basic installation hosts: _vmhost_ roles: - - role: vm-install + - role: vm/install - name: wait for new vm to start up hosts: "{{ vmname }}" -- 1.7.10.4