+++ /dev/null
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@author: Frank Brehm
-@contact: frank.brehm@pixelpark.com
-@copyright: © 2020 by Frank Brehm, Berlin
-@summary: A handler module for creating the VMWare template
-"""
-from __future__ import absolute_import, print_function
-
-# Standard module
-import logging
-import re
-import random
-import time
-import datetime
-import socket
-import textwrap
-import signal
-import tempfile
-import os
-
-from pathlib import Path
-
-# Third party modules
-import pytz
-import paramiko
-
-from pyVmomi import vim
-
-import ldap3
-
-# Own modules
-
-from fb_tools.common import pp, to_str, is_sequence
-from fb_tools.errors import HandlerError, ExpectedHandlerError
-from fb_tools.handler import BaseHandler
-from fb_tools.xlate import format_list
-
-from fb_vmware.errors import VSphereExpectedError
-from fb_vmware.errors import VSphereDatacenterNotFoundError
-
-# from fb_tools.vsphere.server import VsphereServer
-from fb_vmware.connect import VsphereConnection
-
-from fb_vmware.iface import VsphereVmInterface
-from fb_vmware.datastore import VsphereDatastore
-
-from . import print_section_start, print_section_end
-from . import DEFAULT_PORT_LDAP, DEFAULT_PORT_LDAPS
-
-from .config import CrTplConfiguration
-
-from .cobbler import Cobbler
-
-from .errors import MSG_NO_CLUSTER, TempVmExistsError, NoDatastoreFoundError
-
-from .xlate import XLATOR
-
-__version__ = '2.3.4'
-
-LOG = logging.getLogger(__name__)
-TZ = pytz.timezone('Europe/Berlin')
-
-_ = XLATOR.gettext
-ngettext = XLATOR.ngettext
-
-# =============================================================================
-class CrTplHandler(BaseHandler):
- """
- A handler class for creating a vSphere template.
- """
-
- max_depth = 10
- vm_boot_delay_secs = 5
- re_local_ds = re.compile(r'^local[_-]', re.IGNORECASE)
- re_share_nfs_ds = re.compile(r'(?:share[_-]*nfs|nfs[_-]*share)', re.IGNORECASE)
-
- # -------------------------------------------------------------------------
- def __init__(
- self, appname=None, verbose=0, version=__version__, base_dir=None,
- cfg=None, terminal_has_colors=False, simulate=None, force=None, initialized=False):
-
- super(CrTplHandler, self).__init__(
- appname=appname, verbose=verbose, version=version, base_dir=base_dir,
- terminal_has_colors=terminal_has_colors, simulate=simulate,
- force=force, initialized=False,
- )
-
- if not isinstance(cfg, CrTplConfiguration):
- msg = _("{w} is not an instance of {c}, but an instance of {i} instead.").format(
- w='Parameter cfg', c='CrTplConfiguration', i=cfg.__class__.__name__)
- raise HandlerError(msg)
-
- self.cfg = cfg
- self.service_instance = None
- self.tpl_vm_folder = None
- self.tpl_data_store = None
- self.tpl_network = None
- self.tpl_vm = None
- self.tpl_vm_hostname = None
- self.tpl_macaddress = None
- self.tpl_ip = None
- self.tpl_ips = []
- self.ts_start_install = None
- self.ts_finish_install = None
- self.initial_sleep = 60
- self.interval_poll = 0.5
- self.interval_dot = 2
- self.private_ssh_key = str(self.base_dir.joinpath('keys', 'id_rsa_cr_vmw_tpl'))
- self.ssh_port = 22
- self.ssh_user = 'root'
- self.ssh_timeout = 30
- self.rotate_only = False
- self.abort = False
- self.postinstall_errors = None
- self.cobbler = None
- self.ldap = None
- self.ldap_server = None
- self.auth_keys_file = None
-
- self.vsphere = VsphereConnection(
- self.cfg.vsphere_info, cluster=self.cfg.vsphere_cluster,
- appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
- auto_close=True, simulate=self.simulate, force=self.force, tz=TZ,
- terminal_has_colors=self.terminal_has_colors, initialized=False)
-
- self.cluster = None
-
- self.cobbler = Cobbler(
- appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
- cfg=cfg, simulate=self.simulate, force=self.force,
- terminal_has_colors=self.terminal_has_colors, initialized=False)
- self.cobbler.initialized = True
-
- if not self.cfg.os_id:
- msg = _("No ID for Operating system defined, please check the configuration.")
- raise HandlerError(msg)
-
- distro_info = self.cfg.current_distro
-
- cur_ts = datetime.datetime.now()
- cur_ts_str = cur_ts.strftime('%Y-%m-%d-%H-%M-%S')
- self.tpl_vm_hostname = distro_info.shortname + '-' + cur_ts_str
-
- if initialized:
- self.initialized = True
- self.vsphere.initialized = True
-
- # -------------------------------------------------------------------------
- @property
- def tpl_vm_fqdn(self):
- """The FQDN of the template VM."""
- if not self.tpl_vm_hostname:
- return None
- if not self.cfg:
- return self.tpl_vm_hostname
- if not self.cfg.tpl_vm_domain:
- return self.tpl_vm_hostname
- return self.tpl_vm_hostname + '.' + self.cfg.tpl_vm_domain
-
- # -------------------------------------------------------------------------
- @property
- def tpl_name(self):
- """The final name of the VSphere template."""
- if not self.cfg:
- return None
- if not self.cfg.os_id:
- return None
- return 'template-' + self.cfg.os_id
-
- # -------------------------------------------------------------------------
- def as_dict(self, short=True):
- """
- Transforms the elements of the object into a dict
-
- @param short: don't include local properties in resulting dict.
- @type short: bool
-
- @return: structure as dict
- @rtype: dict
- """
-
- res = super(CrTplHandler, self).as_dict(short=short)
- res['tpl_name'] = self.tpl_name
- res['tpl_vm_fqdn'] = self.tpl_vm_fqdn
-
- return res
-
- # -------------------------------------------------------------------------
- def connect_ldap(self):
-
- ldap_config = self.cfg.ldap_connection['default']
-
- server_opts = {}
- if ldap_config.use_ldaps:
- server_opts['use_ssl'] = True
- if ldap_config.port != DEFAULT_PORT_LDAPS:
- server_opts['port'] = ldap_config.port
- else:
- server_opts['use_ssl'] = False
- if ldap_config.port != DEFAULT_PORT_LDAP:
- server_opts['port'] = ldap_config.port
-
- server_opts['get_info'] = ldap3.DSA
- server_opts['mode'] = ldap3.IP_V4_PREFERRED
- server_opts['connect_timeout'] = self.cfg.ldap_timeout
-
- LOG.info(_("Connecting to LDAP server {!r} ...").format(ldap_config.url))
-
- if self.verbose > 1:
- msg = _("Connect options to LDAP server {!r}:").format(ldap_config.url)
- msg += '\n' + pp(server_opts)
- LOG.debug(msg)
-
- self.ldap_server = ldap3.Server(ldap_config.host, **server_opts)
-
- if self.verbose > 2:
- LOG.debug("LDAP server {s}: {re}".format(
- s=ldap_config.host, re=repr(self.ldap_server)))
-
- self.ldap = ldap3.Connection(
- self.ldap_server, ldap_config.bind_dn, ldap_config.bind_pw,
- client_strategy=ldap3.SAFE_SYNC, auto_bind=True)
-
- if self.verbose > 2:
- msg = _("Info about LDAP server {}:").format(ldap_config.url)
- msg += '\n' + repr(self.ldap_server.info)
- LOG.debug(msg)
-
- # -------------------------------------------------------------------------
- def disconnect_ldap(self):
-
- if 'default' in self.cfg.ldap_connection:
- ldap_config = self.cfg.ldap_connection['default']
- ldap_server = ldap_config.url
- else:
- ldap_server = 'unknown'
-
- if self.ldap:
- LOG.info(_("Unbinding from LDAP server {} ...").format(ldap_server))
- self.ldap.unbind()
- self.ldap = None
-
- if self.ldap_server:
- LOG.info(_("Disconnecting from LDAP server {} ...").format(ldap_server))
- self.ldap_server = None
-
- # -------------------------------------------------------------------------
- def __del__(self):
- """Destructor."""
-
- self.disconnect_ldap()
-
- if self.auth_keys_file:
- if self.auth_keys_file.exists():
- LOG.debug(_("Removing {!r} ...").format(str(self.auth_keys_file)))
- self.auth_keys_file.unlink()
-
- # -------------------------------------------------------------------------
- def __call__(self):
- """Executing the underlying action."""
-
- if not self.initialized:
- raise HandlerError(_("{}-object not initialized.").format(self.__class__.__name__))
-
- if not isinstance(self.cfg, CrTplConfiguration):
- msg = _("{w} is not an instance of {c}, but an instance of {i} instead.").format(
- w='self.cfg', c='CrTplConfiguration', i=self.cfg.__class__.__name__)
- raise HandlerError(msg)
-
- retval = 0
- try:
-
- signal.signal(signal.SIGHUP, self.signal_handler)
- signal.signal(signal.SIGINT, self.signal_handler)
- signal.signal(signal.SIGABRT, self.signal_handler)
- signal.signal(signal.SIGTERM, self.signal_handler)
- signal.signal(signal.SIGUSR1, self.signal_handler)
- signal.signal(signal.SIGUSR2, self.signal_handler)
-
- retval = self.run()
-
- except VSphereExpectedError as e:
- msg = _("Got a {n}: {e}").format(n=e.__class__.__name__, e=e)
- LOG.error(msg)
- retval = 9
-
- finally:
- # Aufräumen ...
- self.cluster = None
- LOG.debug(_("Closing ..."))
- self.vsphere.disconnect()
- self.vsphere = None
- self.disconnect_ldap()
-
- if self.auth_keys_file:
- if self.auth_keys_file.exists():
- LOG.debug(_("Removing {!r} ...").format(str(self.auth_keys_file)))
- self.auth_keys_file.unlink()
-
- return retval
-
- # -------------------------------------------------------------------------
- def run(self):
-
- if self.verbose > 2:
- LOG.debug(_("Current configuration:") + '\n' + pp(self.cfg.as_dict()))
- elif self.verbose > 1:
- LOG.debug(_("Current distribution:") + '\n' + pp(self.cfg.current_distro.as_dict()))
-
- LOG.debug(_("Starting handling ..."))
- self.cobbler.get_cobbler_version()
- self.check_for_cobbler_distro()
- self.cobbler.ensure_profile_ks()
- self.create_root_authkeys()
- self.cobbler.ensure_profile()
- return 0
-
- self.cobbler.ensure_webroot()
- # self.cobbler.ensure_root_authkeys(self.auth_keys_file)
- self.cobbler.ensure_rsyslog_cfg_files()
- self.cobbler.ensure_snippets()
-
- self.cobbler.ensure_keys(self.auth_keys_file)
- if self.auth_keys_file:
- if self.auth_keys_file.exists():
- LOG.debug(_("Removing {!r} ...").format(str(self.auth_keys_file)))
- self.auth_keys_file.unlink()
- self.auth_keys_file = None
-
- # self.cobbler.ensure_system_ks()
- self.cobbler.ensure_repo_files()
- self.cobbler.ensure_bashrc()
- self.cobbler.ensure_vimrc()
- self.cobbler.ensure_create_motd()
- self.cobbler.ensure_postfix_files()
- self.cobbler.ensure_logrotate_files()
-
- print_section_start('vmw_info', 'Collecting VMWare info', collapsed=True)
- self.vsphere.get_about()
- self.vsphere.get_clusters()
- self.cluster = self.vsphere.get_cluster_by_name(self.cfg.vsphere_cluster)
- print_section_end('vmw_info')
- if self.cluster:
- LOG.debug(_("Found VSphere cluster {!r}.").format(self.cluster.name))
- else:
- LOG.error(_("Could not find VSphere cluster {!r}.").format(
- self.cfg.vsphere_cluster))
- return 6
-
- if self.cfg.network not in self.cluster.networks:
- LOG.error(_("Network {n!r} not available in cluster {c!r}.").format(
- n=self.cfg.network, c=self.cluster.name))
- return 6
-
- print_section_start('vmw_networks', 'Collecting VSPhere networks ...', collapsed=True)
- self.vsphere.get_networks()
- print_section_end('vmw_networks')
-
- print_section_start('vmw_folder', 'Ensuring VSPhere folder ...', collapsed=True)
- self.vsphere.ensure_vm_folder(self.cfg.folder)
- print_section_end('vmw_folder')
-
- self.check_for_temp_tpl_vm(no_error=True)
- self.select_data_store()
-
- if self.rotate_only:
- LOG.warn(_("Only executing of template rotating."))
- else:
- self.create_vm()
- self.tpl_vm = self.get_temp_tpl_vm()
- if self.tpl_vm:
- LOG.debug(_("Created VM as {cls}: {vm!r}").format(
- cls=self.tpl_vm.__class__.__name__, vm=self.tpl_vm))
- for device in self.tpl_vm.config.hardware.device:
- if isinstance(device, vim.vm.device.VirtualEthernetCard):
- self.tpl_macaddress = device.macAddress
- LOG.debug(_("Found Ethernet card as {}.").format(
- device.__class__.__name__))
- if self.verbose > 2:
- LOG.debug(_("Found Ethernet card:") + "\n{}".format(device))
- break
- if not self.tpl_macaddress:
- msg = _("Did not found MAC address of ethernet card.")
- raise HandlerError(msg)
- else:
- if self.simulate:
- LOG.warn(_("Simulation mode - VM not created in real."))
- self.tpl_macaddress = self.cfg.default_mac_address
- else:
- raise HandlerError(_("Could not find VM after creating."))
-
- LOG.info(_("Using MAC address of template VM: {!r}").format(self.tpl_macaddress))
-
- tpl_sysname = 'template-' + self.tpl_vm_hostname
- self.cobbler.add_system(
- name=tpl_sysname, fqdn=self.tpl_vm_fqdn, mac_address=self.tpl_macaddress)
-
- self.vsphere.poweron_vm(self.tpl_vm, max_wait=self.cfg.max_wait_for_poweron_vm)
- self.ts_start_install = time.time()
- self.eval_tpl_ips()
- self.wait_for_finish_install()
-
- self.show_install_log()
- self.get_postinstall_error()
- if self.abort:
- LOG.warn(_("Aborting after creation of template VM."))
- LOG.warn(_("You are responsible yourself to cleaning up the VM!!!"))
- else:
- self.post_install_tasks_ssh()
- if self.postinstall_errors:
- self.vsphere.purge_vm(self.tpl_vm, max_wait=self.cfg.max_wait_for_purge_vm)
- return 10
- else:
- self.poweroff_vm()
- self.cobbler.remove_system(tpl_sysname)
-
- if not self.abort and not self.postinstall_errors:
- self.rotate_templates()
- if not self.rotate_only:
- self.rename_and_change_vm()
-
- return 0
-
- # -------------------------------------------------------------------------
- def check_for_cobbler_distro(self):
- LOG.debug(_(
- "Checking, whether distro {!r} is available "
- "on the cobbler host.").format(self.cfg.cobbler_distro))
-
- distro_list = self.cobbler.get_distro_list()
- if self.cfg.cobbler_distro not in distro_list:
- msg = _("Did not found distro {!r} on the cobbler host.").format(
- self.cfg.cobbler_distro)
- raise ExpectedHandlerError(msg)
- if self.verbose > 1:
- msg = _("Distro {!r} is available on the cobbler host.").format(
- self.cfg.cobbler_distro)
- LOG.debug(msg)
-
- if not self.cobbler.verify_distro_repos(self.cfg.current_distro):
- msg = _("Not all repos for distro {!r} were found on Cobbler server.").format(
- self.cfg.current_distro.name)
- raise ExpectedHandlerError(msg)
-
- # -------------------------------------------------------------------------
- def check_for_temp_tpl_vm(self, no_error=False):
-
- LOG.info(_("First checking, whether {!r} exists ...").format(self.tpl_vm_fqdn))
- print_section_start(
- 'check_existing_template', "Checking for existence of template ...",
- collapsed=True)
- vm = self.vsphere.get_vm(self.tpl_vm_fqdn, no_error=no_error)
-
- if vm:
- if self.verbose > 1:
- LOG.debug(_("Temporary VM {n!r} exists, raising {e}.").format(
- n=self.tpl_vm_fqdn, e='TempVmExistsError'))
- if self.verbose > 2:
- msg = "Info about Temporary VM {!r}:".format(self.tpl_vm_fqdn)
- msg += '\n' + pp(vm.config)
- LOG.debug(msg)
- print_section_end('check_existing_template')
- raise TempVmExistsError(self.tpl_vm_fqdn)
-
- LOG.debug(_("Temporary VM {!r} does not exists, will be created.").format(
- self.tpl_vm_fqdn))
- print_section_end('check_existing_template')
-
- # -------------------------------------------------------------------------
- def get_temp_tpl_vm(self):
-
- print_section_start('get_temp_tpl_vm', "Get created template VM ...", collapsed=True)
- vm = self.vsphere.get_vm(self.tpl_vm_fqdn, as_vmw_obj=True)
- print_section_end('get_temp_tpl_vm')
-
- return vm
-
- # -------------------------------------------------------------------------
- def select_data_store(self):
-
- LOG.info(_(
- "Selecting a SAN based datastore with at least {:0.1f} GiB available "
- "space.").format(self.cfg.data_size_gb))
- print_section_start('select_data_store', "Selecting data store ...", collapsed=True)
-
- self.vsphere.get_ds_clusters()
- self.vsphere.get_datastores()
-
- ds_to_use = None
- if self.cfg.storage_cluster:
- ds_to_use = self.select_data_store_from_cluster()
- if ds_to_use:
- msg = _(
- "Got datastore {n!r} as a member of datastore cluster {c!r}.").format(
- n=ds_to_use.name, c=self.cfg.storage_cluster)
- LOG.info(msg)
- else:
- msg = MSG_NO_CLUSTER.format(
- size=self.cfg.data_size_gb, c_name=self.cfg.storage_cluster)
- LOG.warn(msg)
- if not ds_to_use:
- ds_to_use = self.select_simple_data_store()
-
- if not ds_to_use:
- print_section_end('select_data_store')
- raise NoDatastoreFoundError(self.cfg.data_size_gb)
-
- self.tpl_data_store = ds_to_use
- LOG.info(_("Using datastore {!r} for volume of temporary VM to create.").format(
- ds_to_use.name))
- print_section_end('select_data_store')
- return
-
- # -------------------------------------------------------------------------
- def select_data_store_from_cluster(self):
-
- # Searching for the right storage cluster
- c_name = self.cfg.storage_cluster
- used_c_name = None
- for cluster_name in self.vsphere.ds_clusters.keys():
- if cluster_name.lower() == c_name.lower():
- msg = _("Found storage cluster {!r}.").format(cluster_name)
- used_c_name = cluster_name
- break
- if not used_c_name:
- return None
-
- cluster = self.vsphere.ds_clusters[used_c_name]
- if cluster.free_space_gb <= self.cfg.data_size_gb:
- msg = _(
- "Cannot use datastore cluster {n!r}, free space "
- "{free:0.1f} GiB is less than {min:0.1f} GiB.").format(
- n=used_c_name, free=cluster.free_space_gb, min=self.cfg.data_size_gb)
- LOG.warn(msg)
- return None
-
- pod = self._get_storage_pod_obj(used_c_name)
- if not pod:
- msg = _("Could not get {c} object with name {n!r}.").format(
- c="vim.StoragePod", n=used_c_name)
- raise HandlerError(msg)
-
- vmconf = vim.vm.ConfigSpec()
- podsel = vim.storageDrs.PodSelectionSpec()
- podsel.storagePod = pod
-
- folder_obj = self.vsphere.get_vm_folder(self.cfg.folder)
-
- storagespec = vim.storageDrs.StoragePlacementSpec()
- storagespec.podSelectionSpec = podsel
- storagespec.type = 'create'
- storagespec.folder = folder_obj
- storagespec.resourcePool = self.cluster.resource_pool
- storagespec.configSpec = vmconf
-
- LOG.debug(_(
- "Trying to get a recommendation for a datastore from "
- "VSphere storageResourceManager ..."))
- if self.verbose > 2:
- msg = "storagespec:\n" + pp(storagespec)
- LOG.debug(msg)
- content = self.vsphere.service_instance.RetrieveContent()
- try:
- rec = content.storageResourceManager.RecommendDatastores(storageSpec=storagespec)
- rec_action = rec.recommendations[0].action[0]
- real_datastore_name = rec_action.destination.name
- except Exception as e:
- msg = _(
- "Got no recommendation for a datastore from VSphere storageResourceManager: "
- "{c} - {e}").format(c=e.__class__.__name__, e=e)
- LOG.warn(msg)
- return None
-
- datastore = self.vsphere.get_obj(content, [vim.Datastore], real_datastore_name)
- ds = VsphereDatastore.from_summary(
- datastore, appname=self.appname, verbose=self.verbose, base_dir=self.base_dir)
- return ds
-
- # -------------------------------------------------------------------------
- def _get_storage_pod_obj(self, used_c_name):
-
- content = self.vsphere.service_instance.RetrieveContent()
- dc = self.vsphere.get_obj(content, [vim.Datacenter], self.cfg.vsphere_info.dc)
- if not dc:
- raise VSphereDatacenterNotFoundError(self.cfg.vsphere_info.dc)
-
- for child in dc.datastoreFolder.childEntity:
- pod = self._get_storage_pod_obj_rec(child, used_c_name)
- if pod:
- return pod
-
- return pod
-
- # -------------------------------------------------------------------------
- def _get_storage_pod_obj_rec(self, child, used_c_name, depth=1):
-
- if hasattr(child, 'childEntity'):
- if depth > self.vsphere.max_search_depth:
- return None
- for sub_child in child.childEntity:
- pod = self._get_storage_pod_obj_rec(sub_child, used_c_name, depth + 1)
- if pod:
- return pod
-
- if isinstance(child, vim.StoragePod):
- if child.summary.name == used_c_name:
- return child
-
- return None
-
- # -------------------------------------------------------------------------
- def select_simple_data_store(self):
-
- usable_ds = []
- for ds in self.vsphere.datastores.values():
- if not ds.accessible:
- if self.verbose > 1:
- LOG.debug(_("Cannot use datastore {n!r} - not accessible.").format(n=ds.name))
- continue
- if ds.name not in self.cluster.datastores:
- if self.verbose > 1:
- LOG.debug(_("Cannot use datastore {n!r}, not in cluster {c!r}.").format(
- n=ds.name, c=self.cluster.name))
- continue
- if self.verbose > 3:
- LOG.debug(_("Checking datastore:") + '\n' + pp(ds.as_dict()))
- if ds.storage_type not in ('SAS', 'SSD', 'SATA'):
- if self.verbose > 1:
- LOG.debug(_("Cannot use datastore {n!r}, is of type {t!r}.").format(
- n=ds.name, t=ds.storage_type))
- continue
- if ds.free_space_gb <= self.cfg.data_size_gb:
- if self.verbose > 1:
- LOG.debug(_(
- "Cannot use datastore {n!r}, free space "
- "{free:0.1f} GiB is less than {min:0.1f} GiB.").format(
- n=ds.name, free=ds.free_space_gb, min=self.cfg.data_size_gb))
- continue
-
- usable_ds.append(ds)
-
- LOG.debug(_("Found {} usable datastores.").format(len(usable_ds)))
- if len(usable_ds) < 1:
- msg = _("Did not found an usable datastore.")
- raise ExpectedHandlerError(msg)
-
- for st_type in ('SATA', 'SAS', 'SSD'):
-
- ds_list = []
- for ds in usable_ds:
- if ds.storage_type == st_type:
- ds_list.append(ds)
- if not len(ds_list):
- continue
-
- return random.choice(ds_list)
-
- return None
-
- # -------------------------------------------------------------------------
- def create_vm(self):
-
- disk_size = self.cfg.data_size_gb
-
- iface = VsphereVmInterface(
- appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
- name='eth', network=self.tpl_network, network_name=self.cfg.network,
- summary='Primary network device')
-
- if self.verbose > 1:
- msg = _("Defined interface to create:") + "\n{}".format(pp(iface.as_dict()))
- LOG.debug(msg)
-
- vm_spec = self.vsphere.generate_vm_create_spec(
- name=self.tpl_vm_fqdn, datastore=self.tpl_data_store.name,
- disks=[disk_size], nw_interfaces=[iface], graphic_ram_mb=256,
- videao_ram_mb=32, boot_delay_secs=self.vm_boot_delay_secs, ram_mb=self.cfg.ram_mb,
- num_cpus=self.cfg.num_cpus, ds_with_timestamp=True,
- os_version=self.cfg.os_version, cfg_version=self.cfg.vmware_cfg_version)
-
- tpl_vm_folder = self.vsphere.get_vm_folder(self.cfg.folder)
- if self.verbose > 1:
- msg = _("VM-Folder object for template VM: {c} - {n!r}").format(
- c=tpl_vm_folder, n=tpl_vm_folder.name)
- msg += '\n' + pp(tpl_vm_folder.childType)
- LOG.debug(msg)
-
- self.vsphere.create_vm(
- name=self.tpl_vm_fqdn, vm_folder=tpl_vm_folder, vm_config_spec=vm_spec,
- pool=self.cluster.resource_pool, max_wait=self.cfg.max_wait_for_create_vm)
-
- # -------------------------------------------------------------------------
- def eval_tpl_ips(self):
-
- LOG.info(_("Trying to evaluate the IP address of the template VM ..."))
-
- initial_delay = (2 * self.vm_boot_delay_secs) + 120
-
- LOG.debug(_("Waiting initially for {} seconds:").format(initial_delay))
- print(' ==> ', end='', flush=True)
-
- start_time = time.time()
- cur_time = start_time
- cur_duration = 0
-
- while cur_duration <= initial_delay:
- time.sleep(1)
- cur_time = time.time()
- print('.', end='', flush=True)
- cur_duration = cur_time - start_time
- print('', flush=True)
-
- self.tpl_ips = self.cobbler.get_dhcp_ips(self.tpl_macaddress)
- if not self.tpl_ips:
- msg = _(
- "Did not got the IP address of MAC address {mac!r} after "
- "{delay} seconds.").format(mac=self.tpl_macaddress, delay=initial_delay)
- raise ExpectedHandlerError(msg)
-
- LOG.info(_("Got IP addresses for template VM:") + ' ' + format_list(self.tpl_ips))
-
- # -------------------------------------------------------------------------
- def wait_for_finish_install(self):
-
- LOG.info(_("Waiting for finishing installation ..."))
-
- LOG.debug(_("Waiting initially for {} seconds:").format(self.initial_sleep))
- print(' ==> ', end='', flush=True)
-
- cur_time = time.time()
- cur_duration = cur_time - self.ts_start_install
- last_dot = cur_time
-
- while cur_duration <= self.initial_sleep:
- time.sleep(self.interval_poll)
- cur_time = time.time()
- if (cur_time - last_dot) >= self.interval_dot:
- print('.', end='', flush=True)
- last_dot = cur_time
- cur_duration = cur_time - self.ts_start_install
- print('', flush=True)
-
- LOG.debug(_("Waiting for SSH available ..."))
-
- addr_infos = {}
- for ip in self.tpl_ips:
- ai = socket.getaddrinfo(ip, 22, socket.AF_INET, socket.SOCK_STREAM)
- if self.verbose > 1:
- msg = _("Got following address_infos for {h!r}, IPv4 TCP port {p}:").format(
- h=ip, p=22)
- msg += '\n' + pp(ai)
- LOG.debug(msg)
- if not ai:
- raise HandlerError(_(
- "Did not get address infos for {h!r}, IPv4 TCP port {p}.").format(
- h=ip, p=22))
-
- addr_info = random.choice(ai)
- LOG.debug(_("Using address info: {}").format(pp(addr_info)))
- addr_infos[ip] = addr_info
-
- if self.verbose <= 3:
- print(' ==> ', end='', flush=True)
-
- ssh_available = False
- cur_duration = cur_time - self.ts_start_install
- cur_time = time.time()
- last_dot = cur_time
- i = 0
- first = True
- while not ssh_available and cur_duration <= self.cfg.max_wait_for_finish_install:
-
- if not first:
- time.sleep(self.interval_poll)
- else:
- first = False
-
- cur_time = time.time()
- cur_duration = cur_time - self.ts_start_install
- if (self.verbose <= 3) and ((cur_time - last_dot) >= self.interval_dot):
- print('.', end='', flush=True)
- i += 1
- if i >= 60:
- print('', flush=True)
- print(' ', end='', flush=True)
- i = 0
- last_dot = cur_time
-
- for ip in addr_infos.keys():
-
- addr_info = addr_infos[ip]
- if self.check_ssh_available(addr_info):
- ssh_available = True
- self.tpl_ip = ip
- break
-
- if not ssh_available:
- continue
-
- if self.verbose <= 3:
- print('', flush=True)
- self.ts_finish_install = time.time()
-
- self.ts_finish_install = time.time()
- duration = self.ts_finish_install - self.ts_start_install
- minutes = int(int(duration) / 60)
- seconds = duration - float(minutes * 60)
-
- LOG.info(_("Needed {m} minutes and {s:0.1f} seconds.").format(
- m=minutes, s=seconds))
-
- if not ssh_available:
- raise ExpectedHandlerError(
- _("SSH not available after {:0.1f} seconds, giving up.").format(duration))
-
- # -------------------------------------------------------------------------
- def check_ssh_available(self, addr_info):
-
- family, socktype, proto, canonname, sockaddr = addr_info
-
- if self.verbose > 3:
- LOG.debug(_("Trying to connect to {a} via TCP port {p} ...").format(
- a=sockaddr[0], p=sockaddr[1]))
-
- try:
- sock = socket.socket(family, socktype, proto)
- except socket.error as e:
- sock = None
- LOG.warn(_("Error creating socket: {}").format(e))
- return False
-
- try:
- sock.connect(sockaddr)
- except socket.error as e:
- sock.close()
- sock = None
- if self.verbose > 3:
- LOG.debug(_("Could not connect: {}").format(e))
- return False
-
- LOG.info(_("Connected to {a} via TCP port {p}.").format(
- a=sockaddr[0], p=sockaddr[1]))
-
- data = sock.recv(4096)
- if data:
- msg = to_str(data).strip()
- LOG.info(_("Got SSHD banner: {}").format(msg))
-
- sock.close()
- sock = None
-
- return True
-
- # -------------------------------------------------------------------------
- def exec_remote(self, cmd, strict_host_key_checking=False):
-
- ssh = None
- result = {'out': None, 'err': None}
- if strict_host_key_checking:
- policy = paramiko.client.AutoAddPolicy()
- else:
- policy = paramiko.client.MissingHostKeyPolicy()
-
- try:
-
- LOG.debug(_("Initializing {} ...").format('paramiko SSHClient'))
- ssh = paramiko.SSHClient()
- LOG.debug(_("Loading SSH system host keys."))
- ssh.load_system_host_keys()
- LOG.debug(_("Setting SSH missing host key policy to {}.").format(
- policy.__class__.__name__))
- ssh.set_missing_host_key_policy(policy)
-
- LOG.debug(_("Connecting to {h!r}, port {p} as {u!r} per SSH ...").format(
- h=self.tpl_ip, p=self.ssh_port, u=self.ssh_user))
- ssh.connect(
- self.tpl_ip, port=self.ssh_port, timeout=self.ssh_timeout,
- username=self.ssh_user, key_filename=self.private_ssh_key)
-
- if self.verbose > 1:
- LOG.debug(_("Commands to execute:") + '\n' + cmd)
-
- stdin, stdout, stderr = ssh.exec_command(
- cmd, timeout=self.ssh_timeout)
-
- result['out'] = to_str(stdout.read()).strip()
- result['err'] = to_str(stderr.read()).strip()
-
- LOG.debug(_("Output on {}:").format('STDERR') + '\n' + result['err'])
-
- finally:
- if ssh:
- if self.verbose > 2:
- LOG.debug(_("Closing SSH connection."))
- ssh.close()
-
- return result
-
- # -------------------------------------------------------------------------
- def show_install_log(self):
-
- LOG.info(_("Showing post install log ..."))
- install_logfile = '/var/log/anaconda/post-install.log'
-
- cmd = textwrap.dedent("""\
- if [ -f {log} ] ; then
- echo "-----------------------------------------------------------"
- cat {log}
- echo "-----------------------------------------------------------"
- echo
- else
- echo "Post install log {log} not found!" >&2
- fi
-
- """).format(log=install_logfile)
-
- result = self.exec_remote(cmd)
-
- if result['err']:
- LOG.error(result['err'])
- if self.postinstall_errors:
- self.postinstall_errors += result['err']
- else:
- self.postinstall_errors = result['err']
- else:
- LOG.debug("Post install log:\n\n" + result['out'])
-
- # -------------------------------------------------------------------------
- def get_postinstall_error(self):
-
- LOG.info(_("Trying to get possible post-installation errors ..."))
-
- cmd = textwrap.dedent("""\
- if [ -f /root/postinst-error.txt ] ; then
- cat /root/postinst-error.txt
- fi
- """)
-
- result = self.exec_remote(cmd)
- if result['out']:
- if self.postinstall_errors:
- self.postinstall_errors += result['out']
- else:
- self.postinstall_errors = result['out']
- LOG.error(_("Got postinstall errors:") + '\n' + result['out'])
- else:
- LOG.info(_("No postinstall errors found."))
-
- if self.postinstall_errors:
- LOG.warn(_("Template VM {!r} has to be removed.").format(self.tpl_ip))
-
- # -------------------------------------------------------------------------
- def post_install_tasks_ssh(self):
-
- LOG.info(_("Executing tasks per SSH after installation ..."))
- print_section_start('post_install_tasks', 'Exec post install tasks ...', collapsed=True)
-
- logfiles = (
- '/var/log/boot.log',
- '/var/log/cron',
- '/var/log/dnf*.log',
- '/var/log/hawkey.log',
- '/var/log/messages',
- '/var/log/secure',
- '/var/log/wtmp',
- '/var/log/vmware-*.log*',
- '/var/log/yum*.log',
- )
-
- cmd = textwrap.dedent("""\
- printf "Current host FQDN: "
- hostname -f
-
- for ks_cfg in "/root/original-ks.cfg" "/root/anaconda-ks.cfg" ; do
- echo
- echo "-----------------------------------------------------------"
- if [ -f "${ks_cfg}" ] ; then
- echo "Moving ${ks_cfg} => /var/log/anaconda/ ..."
- mv -v "${ks_cfg}" /var/log/anaconda/
- else
- echo "File ${ks_cfg} not found." >&2
- fi
- done
-
- for f in @@@LOGFILES@@@ ; do
- if [ -f "${f}" ] ; then
- echo "Truncating ${f} ..."
- cp /dev/null "${f}"
- fi
- done
-
- echo
- echo "-----------------------------------------------------------"
- echo "Current network configuration:"
- echo
- /usr/sbin/ip address show
- echo
- echo "Current routing configuration:"
- echo
- /usr/sbin/ip route show
- echo
-
- """).replace('@@@LOGFILES@@@', ' '.join(logfiles))
-
- result = self.exec_remote(cmd)
- LOG.debug(_("Output on {}:").format('STDOUT') + '\n' + result['out'])
- if not result['err']:
- LOG.debug(_("No output on {}.").format('STDERR'))
-
- print_section_end('post_install_tasks')
-
- if result['err']:
- LOG.warn(_("Output on {}:").format('STDERR') + '\n' + result['err'])
-
- # -------------------------------------------------------------------------
- def poweroff_vm(self):
-
- wait_for_shutdown = 15
-
- LOG.info(_("Waiting for {} seconds before shutting down:").format(wait_for_shutdown))
- print(' ==> ', end='', flush=True)
-
- start_waiting = time.time()
- cur_time = start_waiting
- cur_duration = 0
- last_dot = cur_time
-
- i = 0
- while cur_duration <= wait_for_shutdown:
- time.sleep(0.1)
- cur_time = time.time()
- if (cur_time - last_dot) >= 1:
- print('.', end='', flush=True)
- last_dot = cur_time
- i += 1
- if not i % 60:
- print('\n ', end='', flush=True)
- cur_duration = cur_time - start_waiting
- print('', flush=True)
-
- LOG.info(_("Last actions before powering off VM {!r} ...").format(self.tpl_ip))
-
- cmd = textwrap.dedent("""\
- echo
- echo "-----------------------------------------------------------"
- echo "Cleaning up /root/.ssh/authorized_keys ..."
- afile="/root/.ssh/authorized_keys"
- tfile="/tmp/authorized_keys"
- pattern="create-vmware-tpl@pixelpark.com"
-
- cat "${afile}" | grep -v "${pattern}" > "${tfile}"
- mv -v "${tfile}" "${afile}"
-
- echo
- echo "-----------------------------------------------------------"
- echo "Removing SSH host keys ..."
- rm -v /etc/ssh/ssh_host_*
-
- echo
- echo "Sleeping some seconds ..."
- sleep 3
-
- echo
- echo "-----------------------------------------------------------"
- echo "Powering off ..."
- poweroff && logout
-
- """)
-
- vm = self.get_temp_tpl_vm()
- power_state = vm.runtime.powerState
- LOG.debug(_("Current state of template VM is {!r}").format(power_state))
- if power_state.strip().lower() == "poweredoff":
- LOG.info(_("Template VM is already shut off."))
- return
-
- if power_state.strip().lower() != "poweredon":
- raise ExpectedHandlerError(
- _("Cannot shut down VM {h!r}, is currently in state {s!r}.").format(
- h=self.tpl_ip, s=power_state))
-
- LOG.info(_("Powering off VM {!r} per SSH ...").format(self.tpl_ip))
- print_section_start('poweroff', 'Powering off VM ...', collapsed=True)
- result = self.exec_remote(cmd)
-
- LOG.debug(_("Output on {}:").format('STDOUT') + '\n' + result['out'])
- if not result['err']:
- LOG.debug(_("No output on {}.").format('STDERR'))
-
- print_section_end('poweroff')
-
- if result['err']:
- LOG.warn(_("Output on {}:").format('STDERR') + '\n' + result['err'])
-
- cur_diff = 0
- start_shutdown = time.time()
- cur_time = start_shutdown
- last_dot = cur_time
- i = 0
-
- LOG.debug(_("Waiting for successful shut down of VM ..."))
- if self.verbose <= 3:
- print(' ==> ', end='', flush=True)
- if self.verbose > 3:
- LOG.debug(_("Current state of template VM is {!r}").format(power_state))
-
- while power_state.strip().lower() != "poweredoff":
-
- time.sleep(0.2)
-
- dot = '.'
- if power_state.lower().strip() != 'poweredon':
- dot = 'S'
-
- cur_time = time.time()
- cur_diff = cur_time - start_shutdown
- if (self.verbose <= 3) and ((cur_time - last_dot) >= self.interval_dot):
- print(dot, end='', flush=True)
- i += 1
- if i >= 60:
- print('', flush=True)
- print(' ', end='', flush=True)
- i = 0
- last_dot = cur_time
-
- vm = self.get_temp_tpl_vm()
- power_state = vm.runtime.powerState
- if self.verbose > 3:
- LOG.debug(_(
- "Still waiting for completing shutdown, current state is {!r}.").format(
- power_state))
- if power_state.strip().lower() == "poweredoff":
- print('', flush=True)
- LOG.info(_(
- "Template VM {h!r} was shutting down in {t:0.1f} seconds.").format(
- h=self.tpl_ip, t=cur_diff))
- return
- if cur_diff >= self.cfg.max_wait_for_shutdown_vm:
- break
-
- print('', flush=True)
- raise ExpectedHandlerError(_(
- "VM {h!r} was not shut down after {t:0.1f} seconds, current state is {s!r}.").format(
- h=self.tpl_ip, t=cur_diff, s=power_state))
-
- # -------------------------------------------------------------------------
- def change_mac_address(self):
-
- LOG.info(_("Setting a new, randomized MAC address for template VM ..."))
-
- last_tuple1 = random.randint(1, 254)
- last_tuple2 = random.randint(1, 254)
- new_mac = self.cfg.mac_address_template.format(last_tuple1, last_tuple2)
- LOG.debug(_("New MAC address: {!r}.").format(new_mac))
-
- vm = self.get_temp_tpl_vm()
- self.vsphere.set_mac_of_nic(vm, new_mac, nic_nr=0)
-
- # -------------------------------------------------------------------------
- def rotate_templates(self):
-
- LOG.info(_("Searching for existing templates and rotate them ..."))
- print_section_start('rotate_templates', "Rotating templates ...", collapsed=True)
- re_is_numeric = re.compile(r'^\s*(\d+)\s*$')
-
- pattern_tpl = r'^' + re.escape(self.cfg.template_name)
- re_tpl = re.compile(pattern_tpl, re.IGNORECASE)
-
- templates = self.vsphere.get_vms(re_tpl, is_template=True, as_vmw_obj=True)
- if not templates:
- LOG.info(_("Did not found any existing templates."))
- return
- msg = ngettext(
- "Found one existing template.", "Found {} existing templates.",
- len(templates)).format(len(templates))
- LOG.debug(msg)
-
- templates_ts = {}
- templates_sorted = []
- new_template_names = {}
-
- for template in templates:
- tpl_name = template.summary.config.name
- val_map = {}
- for extra_cfg in template.config.extraConfig:
- key = extra_cfg.key
- value = extra_cfg.value
- val_map[key] = value
- created = time.time()
- if 'created' in val_map:
- if val_map['created'] and re_is_numeric.match(val_map['created']):
- created = float(val_map['created'])
- ts_created = datetime.datetime.fromtimestamp(created, tz=TZ)
- LOG.debug(_("Found template {n!r}, created: {ts}.").format(
- n=tpl_name, ts=ts_created.isoformat(' ')))
- if self.verbose > 2:
- LOG.debug("Template Summary Config:\n{}".format(template.summary.config))
- LOG.debug("Template Extra Config:\n{}".format(pp(val_map)))
-
- templates_ts[tpl_name] = created
-
- for tpl_name in sorted(templates_ts.keys(), key=lambda tpl: templates_ts[tpl]):
- templates_sorted.append(tpl_name)
-
- LOG.debug(_("Templates sorted by creation date:") + '\n' + pp(templates_sorted))
- templates_sorted.reverse()
- templates_to_remove = []
- i = 0
- for tpl_name in templates_sorted:
- if i > self.cfg.max_nr_templates_stay - 2:
- templates_to_remove.append(tpl_name)
- i += 1
- templates_to_remove.reverse()
- if templates_to_remove:
- LOG.debug(_("Templates to remove:") + '\n' + pp(templates_to_remove))
- else:
- LOG.debug(_("There are no templates to remove."))
-
- for template in templates:
- tpl_name = template.summary.config.name
- if tpl_name in templates_to_remove:
- LOG.info(_("Removing template {!r} ...").format(tpl_name))
- self.vsphere.purge_vm(template)
- LOG.debug(_("Successful removed template {!r}.").format(tpl_name))
- continue
- if tpl_name.strip().lower() == self.cfg.template_name.strip().lower():
- created = templates_ts[tpl_name]
- ts_created = datetime.datetime.fromtimestamp(created, tz=TZ)
- i = 0
- dt = ts_created.strftime('%Y-%m-%d_%H-%M-%S')
- new_name = "{t}.{d}".format(t=tpl_name, d=dt)
- tname = new_name.strip().lower()
- while tname in new_template_names:
- new_name = "{t}.{d}-{i}".format(t=tpl_name, d=dt, i=i)
- tname = new_name.strip().lower()
- i += 1
- new_template_names[tname] = 1
- LOG.info(_("Renaming template {o!r} => {n!r} ...").format(o=tpl_name, n=new_name))
- task = template.Rename_Task(new_name)
- self.vsphere.wait_for_tasks([task])
- LOG.debug(_("Successful renamed template into {!r}.").format(new_name))
- else:
- tname = tpl_name.strip().lower()
- new_template_names[tname] = 1
-
- print_section_end('rotate_templates')
-
- # -------------------------------------------------------------------------
- def rename_and_change_vm(self):
-
- LOG.info(_("Renaming VM {o!r} => {n!r} ...").format(
- o=self.tpl_vm_fqdn, n=self.cfg.template_name))
- print_section_start(
- 'rename_and_change_vm', "Renaming VM and mark as template ...", collapsed=True)
-
- vm = self.get_temp_tpl_vm()
- task = vm.Rename_Task(self.cfg.template_name)
- self.vsphere.wait_for_tasks([task])
- LOG.debug(_("Successful renamed VM into {!r}.").format(self.cfg.template_name))
-
- LOG.info(_("Changing VM {!r} into a VMWare template ...").format(
- self.cfg.template_name))
- vm.MarkAsTemplate()
- LOG.debug(_("Object {!r} is now a VMWare template.").format(self.cfg.template_name))
- print_section_end('rename_and_change_vm')
-
- # -------------------------------------------------------------------------
- def create_root_authkeys(self):
-
- LOG.info(_("Creating authorized keys of root from LDAP ..."))
-
- print_section_start(
- 'create_root_authkeys', "Generating authorized_keys of root.", collapsed=True)
- prefix = 'tmp.authorized_keys.root.'
- (fh, tmp_keys_file) = tempfile.mkstemp(prefix=prefix, text=True)
- self.auth_keys_file = Path(tmp_keys_file)
- os.close(fh)
- LOG.debug(_("Using temporary file {!r} for authorized keys of root.").format(
- tmp_keys_file))
-
- try:
- self.connect_ldap()
-
- line = ('#' * 60) + '\n'
- auth_keys = line
-
- admins = self.get_ldap_admins()
-
- for uid in sorted(admins.keys(), key=str.lower):
-
- admin = admins[uid]
-
- for ssh_key in sorted(admin['keys']):
-
- parts = ssh_key.split()
- used_key = parts[0] + ' ' + parts[1] + ' '
- used_key += admin['cn'] + ' <' + admin['mail'] + '>'
- auth_keys += used_key + '\n'
- auth_keys += line
-
- msg = _("Generated authorized keys for root:") + '\n' + auth_keys
- LOG.debug(msg)
-
- finally:
- self.disconnect_ldap()
-
- self.auth_keys_file.write_text(auth_keys)
- print_section_end('create_root_authkeys')
-
- # -------------------------------------------------------------------------
- def get_ldap_admins(self):
-
- if not self.ldap:
- msg = _("No LDAP connection initialized.")
- raise HandlerError(msg)
-
- admins = {}
-
- attrs = ['cn', 'dn', 'mail', 'sshPublicKey', 'uid']
- ldap_config = self.cfg.ldap_connection['default']
- fltr = ldap_config.admin_filter
-
- msg = _("Trying to get a list of all DPX admins with their public SSH keys ...")
- LOG.debug(msg)
-
- msg = _("LDAP search starting in {!r} with filter:").format(ldap_config.base_dn)
- msg += '\n' + fltr
- LOG.debug(msg)
-
- status, result, response, request = self.ldap.search(
- search_base=ldap_config.base_dn, search_scope=ldap3.SUBTREE, search_filter=fltr,
- attributes=attrs, time_limit=self.cfg.ldap_timeout)
-
- if not status:
- msg = _("Error retrieving DPX admin list from LDAP:")
- msg += ' ' + result
- raise HandlerError(msg)
-
- for entry in response:
-
- uid = None
- admin = {
- 'cn': None,
- 'dn': None,
- 'mail': None,
- 'keys': [],
- 'uid': None,
- }
-
- admin['dn'] = entry['dn']
-
- for attr in entry['attributes']:
-
- val = entry['attributes'][attr]
-
- if attr.lower() == 'uid':
- if is_sequence(val):
- uid = val[0]
- else:
- uid = val
- admin['uid'] = uid
-
- if attr.lower() == 'cn':
- if is_sequence(val):
- admin['cn'] = val[0]
- else:
- admin['cn'] = val
-
- if attr.lower() == 'mail':
- if is_sequence(val):
- admin['mail'] = val[0]
- else:
- admin['mail'] = val
-
- if attr.lower() == 'sshpublickey':
- if is_sequence(val):
- for key in val:
- admin['keys'].append(key)
- else:
- admin['keys'].append(val)
-
- if self.verbose == 2:
- msg = _("Got an admin {cn} <{mail}>.").format(cn=admin['cn'], mail=admin['mail'])
- LOG.debug(msg)
- elif self.verbose > 2:
- msg = _("Got an admin:") + '\n' + pp(admin)
- LOG.debug(msg)
-
- admins[uid] = admin
-
- if not admins:
- msg = _("Did not found any admins below base DN {!r} with filter:")
- msg = msg.format(self.cfg.ldap_connection['default'].base_dn)
- msg += '\n' + fltr
- raise HandlerError(msg)
-
- return admins
-
-
-# =============================================================================
-if __name__ == "__main__":
-
- pass
-
-# =============================================================================
-
-# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+@author: Frank Brehm
+@contact: frank.brehm@pixelpark.com
+@copyright: © 2020 by Frank Brehm, Berlin
+@summary: A handler module for creating the VMWare template
+"""
+from __future__ import absolute_import, print_function
+
+# Standard module
+import logging
+import re
+import random
+import time
+import datetime
+import socket
+import textwrap
+import signal
+import tempfile
+import os
+
+from pathlib import Path
+
+# Third party modules
+import pytz
+import paramiko
+
+from pyVmomi import vim
+
+import ldap3
+
+# Own modules
+
+from fb_tools.common import pp, to_str, is_sequence
+from fb_tools.errors import HandlerError, ExpectedHandlerError
+from fb_tools.handler import BaseHandler
+from fb_tools.xlate import format_list
+
+from fb_vmware.errors import VSphereExpectedError
+from fb_vmware.errors import VSphereDatacenterNotFoundError
+
+# from fb_tools.vsphere.server import VsphereServer
+from fb_vmware.connect import VsphereConnection
+
+from fb_vmware.iface import VsphereVmInterface
+from fb_vmware.datastore import VsphereDatastore
+
+from .. import print_section_start, print_section_end
+from .. import DEFAULT_PORT_LDAP, DEFAULT_PORT_LDAPS
+
+from ..config import CrTplConfiguration
+
+from ..cobbler import Cobbler
+
+from ..errors import MSG_NO_CLUSTER, TempVmExistsError, NoDatastoreFoundError
+
+from ..xlate import XLATOR
+
+__version__ = '2.4.0'
+
+LOG = logging.getLogger(__name__)
+TZ = pytz.timezone('Europe/Berlin')
+
+_ = XLATOR.gettext
+ngettext = XLATOR.ngettext
+
+# =============================================================================
+class CrTplHandler(BaseHandler):
+ """
+ A handler class for creating a vSphere template.
+ """
+
+ max_depth = 10
+ vm_boot_delay_secs = 5
+ re_local_ds = re.compile(r'^local[_-]', re.IGNORECASE)
+ re_share_nfs_ds = re.compile(r'(?:share[_-]*nfs|nfs[_-]*share)', re.IGNORECASE)
+
+ # -------------------------------------------------------------------------
+ def __init__(
+ self, appname=None, verbose=0, version=__version__, base_dir=None,
+ cfg=None, terminal_has_colors=False, simulate=None, force=None, initialized=False):
+
+ super(CrTplHandler, self).__init__(
+ appname=appname, verbose=verbose, version=version, base_dir=base_dir,
+ terminal_has_colors=terminal_has_colors, simulate=simulate,
+ force=force, initialized=False,
+ )
+
+ if not isinstance(cfg, CrTplConfiguration):
+ msg = _("{w} is not an instance of {c}, but an instance of {i} instead.").format(
+ w='Parameter cfg', c='CrTplConfiguration', i=cfg.__class__.__name__)
+ raise HandlerError(msg)
+
+ self.cfg = cfg
+ self.service_instance = None
+ self.tpl_vm_folder = None
+ self.tpl_data_store = None
+ self.tpl_network = None
+ self.tpl_vm = None
+ self.tpl_vm_hostname = None
+ self.tpl_macaddress = None
+ self.tpl_ip = None
+ self.tpl_ips = []
+ self.ts_start_install = None
+ self.ts_finish_install = None
+ self.initial_sleep = 60
+ self.interval_poll = 0.5
+ self.interval_dot = 2
+ self.private_ssh_key = str(self.base_dir.joinpath('keys', 'id_rsa_cr_vmw_tpl'))
+ self.ssh_port = 22
+ self.ssh_user = 'root'
+ self.ssh_timeout = 30
+ self.rotate_only = False
+ self.abort = False
+ self.postinstall_errors = None
+ self.cobbler = None
+ self.ldap = None
+ self.ldap_server = None
+ self.auth_keys_file = None
+
+ self.vsphere = VsphereConnection(
+ self.cfg.vsphere_info, cluster=self.cfg.vsphere_cluster,
+ appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
+ auto_close=True, simulate=self.simulate, force=self.force, tz=TZ,
+ terminal_has_colors=self.terminal_has_colors, initialized=False)
+
+ self.cluster = None
+
+ self.cobbler = Cobbler(
+ appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
+ cfg=cfg, simulate=self.simulate, force=self.force,
+ terminal_has_colors=self.terminal_has_colors, initialized=False)
+ self.cobbler.initialized = True
+
+ if not self.cfg.os_id:
+ msg = _("No ID for Operating system defined, please check the configuration.")
+ raise HandlerError(msg)
+
+ distro_info = self.cfg.current_distro
+
+ cur_ts = datetime.datetime.now()
+ cur_ts_str = cur_ts.strftime('%Y-%m-%d-%H-%M-%S')
+ self.tpl_vm_hostname = distro_info.shortname + '-' + cur_ts_str
+
+ if initialized:
+ self.initialized = True
+ self.vsphere.initialized = True
+
+ # -------------------------------------------------------------------------
+ @property
+ def tpl_vm_fqdn(self):
+ """The FQDN of the template VM."""
+ if not self.tpl_vm_hostname:
+ return None
+ if not self.cfg:
+ return self.tpl_vm_hostname
+ if not self.cfg.tpl_vm_domain:
+ return self.tpl_vm_hostname
+ return self.tpl_vm_hostname + '.' + self.cfg.tpl_vm_domain
+
+ # -------------------------------------------------------------------------
+ @property
+ def tpl_name(self):
+ """The final name of the VSphere template."""
+ if not self.cfg:
+ return None
+ if not self.cfg.os_id:
+ return None
+ return 'template-' + self.cfg.os_id
+
+ # -------------------------------------------------------------------------
+ def as_dict(self, short=True):
+ """
+ Transforms the elements of the object into a dict
+
+ @param short: don't include local properties in resulting dict.
+ @type short: bool
+
+ @return: structure as dict
+ @rtype: dict
+ """
+
+ res = super(CrTplHandler, self).as_dict(short=short)
+ res['tpl_name'] = self.tpl_name
+ res['tpl_vm_fqdn'] = self.tpl_vm_fqdn
+
+ return res
+
+ # -------------------------------------------------------------------------
+ def connect_ldap(self):
+
+ ldap_config = self.cfg.ldap_connection['default']
+
+ server_opts = {}
+ if ldap_config.use_ldaps:
+ server_opts['use_ssl'] = True
+ if ldap_config.port != DEFAULT_PORT_LDAPS:
+ server_opts['port'] = ldap_config.port
+ else:
+ server_opts['use_ssl'] = False
+ if ldap_config.port != DEFAULT_PORT_LDAP:
+ server_opts['port'] = ldap_config.port
+
+ server_opts['get_info'] = ldap3.DSA
+ server_opts['mode'] = ldap3.IP_V4_PREFERRED
+ server_opts['connect_timeout'] = self.cfg.ldap_timeout
+
+ LOG.info(_("Connecting to LDAP server {!r} ...").format(ldap_config.url))
+
+ if self.verbose > 1:
+ msg = _("Connect options to LDAP server {!r}:").format(ldap_config.url)
+ msg += '\n' + pp(server_opts)
+ LOG.debug(msg)
+
+ self.ldap_server = ldap3.Server(ldap_config.host, **server_opts)
+
+ if self.verbose > 2:
+ LOG.debug("LDAP server {s}: {re}".format(
+ s=ldap_config.host, re=repr(self.ldap_server)))
+
+ self.ldap = ldap3.Connection(
+ self.ldap_server, ldap_config.bind_dn, ldap_config.bind_pw,
+ client_strategy=ldap3.SAFE_SYNC, auto_bind=True)
+
+ if self.verbose > 2:
+ msg = _("Info about LDAP server {}:").format(ldap_config.url)
+ msg += '\n' + repr(self.ldap_server.info)
+ LOG.debug(msg)
+
+ # -------------------------------------------------------------------------
+ def disconnect_ldap(self):
+
+ if 'default' in self.cfg.ldap_connection:
+ ldap_config = self.cfg.ldap_connection['default']
+ ldap_server = ldap_config.url
+ else:
+ ldap_server = 'unknown'
+
+ if self.ldap:
+ LOG.info(_("Unbinding from LDAP server {} ...").format(ldap_server))
+ self.ldap.unbind()
+ self.ldap = None
+
+ if self.ldap_server:
+ LOG.info(_("Disconnecting from LDAP server {} ...").format(ldap_server))
+ self.ldap_server = None
+
+ # -------------------------------------------------------------------------
+ def __del__(self):
+ """Destructor."""
+
+ self.disconnect_ldap()
+
+ if self.auth_keys_file:
+ if self.auth_keys_file.exists():
+ LOG.debug(_("Removing {!r} ...").format(str(self.auth_keys_file)))
+ self.auth_keys_file.unlink()
+
+ # -------------------------------------------------------------------------
+ def __call__(self):
+ """Executing the underlying action."""
+
+ if not self.initialized:
+ raise HandlerError(_("{}-object not initialized.").format(self.__class__.__name__))
+
+ if not isinstance(self.cfg, CrTplConfiguration):
+ msg = _("{w} is not an instance of {c}, but an instance of {i} instead.").format(
+ w='self.cfg', c='CrTplConfiguration', i=self.cfg.__class__.__name__)
+ raise HandlerError(msg)
+
+ retval = 0
+ try:
+
+ signal.signal(signal.SIGHUP, self.signal_handler)
+ signal.signal(signal.SIGINT, self.signal_handler)
+ signal.signal(signal.SIGABRT, self.signal_handler)
+ signal.signal(signal.SIGTERM, self.signal_handler)
+ signal.signal(signal.SIGUSR1, self.signal_handler)
+ signal.signal(signal.SIGUSR2, self.signal_handler)
+
+ retval = self.run()
+
+ except VSphereExpectedError as e:
+ msg = _("Got a {n}: {e}").format(n=e.__class__.__name__, e=e)
+ LOG.error(msg)
+ retval = 9
+
+ finally:
+ # Aufräumen ...
+ self.cluster = None
+ LOG.debug(_("Closing ..."))
+ self.vsphere.disconnect()
+ self.vsphere = None
+ self.disconnect_ldap()
+
+ if self.auth_keys_file:
+ if self.auth_keys_file.exists():
+ LOG.debug(_("Removing {!r} ...").format(str(self.auth_keys_file)))
+ self.auth_keys_file.unlink()
+
+ return retval
+
+ # -------------------------------------------------------------------------
+ def run(self):
+
+ if self.verbose > 2:
+ LOG.debug(_("Current configuration:") + '\n' + pp(self.cfg.as_dict()))
+ elif self.verbose > 1:
+ LOG.debug(_("Current distribution:") + '\n' + pp(self.cfg.current_distro.as_dict()))
+
+ LOG.debug(_("Starting handling ..."))
+ self.cobbler.get_cobbler_version()
+ self.check_for_cobbler_distro()
+ self.cobbler.ensure_profile_ks()
+ self.create_root_authkeys()
+ self.cobbler.ensure_profile()
+ return 0
+
+ self.cobbler.ensure_webroot()
+ # self.cobbler.ensure_root_authkeys(self.auth_keys_file)
+ self.cobbler.ensure_rsyslog_cfg_files()
+ self.cobbler.ensure_snippets()
+
+ self.cobbler.ensure_keys(self.auth_keys_file)
+ if self.auth_keys_file:
+ if self.auth_keys_file.exists():
+ LOG.debug(_("Removing {!r} ...").format(str(self.auth_keys_file)))
+ self.auth_keys_file.unlink()
+ self.auth_keys_file = None
+
+ # self.cobbler.ensure_system_ks()
+ self.cobbler.ensure_repo_files()
+ self.cobbler.ensure_bashrc()
+ self.cobbler.ensure_vimrc()
+ self.cobbler.ensure_create_motd()
+ self.cobbler.ensure_postfix_files()
+ self.cobbler.ensure_logrotate_files()
+
+ print_section_start('vmw_info', 'Collecting VMWare info', collapsed=True)
+ self.vsphere.get_about()
+ self.vsphere.get_clusters()
+ self.cluster = self.vsphere.get_cluster_by_name(self.cfg.vsphere_cluster)
+ print_section_end('vmw_info')
+ if self.cluster:
+ LOG.debug(_("Found VSphere cluster {!r}.").format(self.cluster.name))
+ else:
+ LOG.error(_("Could not find VSphere cluster {!r}.").format(
+ self.cfg.vsphere_cluster))
+ return 6
+
+ if self.cfg.network not in self.cluster.networks:
+ LOG.error(_("Network {n!r} not available in cluster {c!r}.").format(
+ n=self.cfg.network, c=self.cluster.name))
+ return 6
+
+ print_section_start('vmw_networks', 'Collecting VSPhere networks ...', collapsed=True)
+ self.vsphere.get_networks()
+ print_section_end('vmw_networks')
+
+ print_section_start('vmw_folder', 'Ensuring VSPhere folder ...', collapsed=True)
+ self.vsphere.ensure_vm_folder(self.cfg.folder)
+ print_section_end('vmw_folder')
+
+ self.check_for_temp_tpl_vm(no_error=True)
+ self.select_data_store()
+
+ if self.rotate_only:
+ LOG.warn(_("Only executing of template rotating."))
+ else:
+ self.create_vm()
+ self.tpl_vm = self.get_temp_tpl_vm()
+ if self.tpl_vm:
+ LOG.debug(_("Created VM as {cls}: {vm!r}").format(
+ cls=self.tpl_vm.__class__.__name__, vm=self.tpl_vm))
+ for device in self.tpl_vm.config.hardware.device:
+ if isinstance(device, vim.vm.device.VirtualEthernetCard):
+ self.tpl_macaddress = device.macAddress
+ LOG.debug(_("Found Ethernet card as {}.").format(
+ device.__class__.__name__))
+ if self.verbose > 2:
+ LOG.debug(_("Found Ethernet card:") + "\n{}".format(device))
+ break
+ if not self.tpl_macaddress:
+ msg = _("Did not found MAC address of ethernet card.")
+ raise HandlerError(msg)
+ else:
+ if self.simulate:
+ LOG.warn(_("Simulation mode - VM not created in real."))
+ self.tpl_macaddress = self.cfg.default_mac_address
+ else:
+ raise HandlerError(_("Could not find VM after creating."))
+
+ LOG.info(_("Using MAC address of template VM: {!r}").format(self.tpl_macaddress))
+
+ tpl_sysname = 'template-' + self.tpl_vm_hostname
+ self.cobbler.add_system(
+ name=tpl_sysname, fqdn=self.tpl_vm_fqdn, mac_address=self.tpl_macaddress)
+
+ self.vsphere.poweron_vm(self.tpl_vm, max_wait=self.cfg.max_wait_for_poweron_vm)
+ self.ts_start_install = time.time()
+ self.eval_tpl_ips()
+ self.wait_for_finish_install()
+
+ self.show_install_log()
+ self.get_postinstall_error()
+ if self.abort:
+ LOG.warn(_("Aborting after creation of template VM."))
+ LOG.warn(_("You are responsible yourself to cleaning up the VM!!!"))
+ else:
+ self.post_install_tasks_ssh()
+ if self.postinstall_errors:
+ self.vsphere.purge_vm(self.tpl_vm, max_wait=self.cfg.max_wait_for_purge_vm)
+ return 10
+ else:
+ self.poweroff_vm()
+ self.cobbler.remove_system(tpl_sysname)
+
+ if not self.abort and not self.postinstall_errors:
+ self.rotate_templates()
+ if not self.rotate_only:
+ self.rename_and_change_vm()
+
+ return 0
+
+ # -------------------------------------------------------------------------
+ def check_for_cobbler_distro(self):
+ LOG.debug(_(
+ "Checking, whether distro {!r} is available "
+ "on the cobbler host.").format(self.cfg.cobbler_distro))
+
+ distro_list = self.cobbler.get_distro_list()
+ if self.cfg.cobbler_distro not in distro_list:
+ msg = _("Did not found distro {!r} on the cobbler host.").format(
+ self.cfg.cobbler_distro)
+ raise ExpectedHandlerError(msg)
+ if self.verbose > 1:
+ msg = _("Distro {!r} is available on the cobbler host.").format(
+ self.cfg.cobbler_distro)
+ LOG.debug(msg)
+
+ if not self.cobbler.verify_distro_repos(self.cfg.current_distro):
+ msg = _("Not all repos for distro {!r} were found on Cobbler server.").format(
+ self.cfg.current_distro.name)
+ raise ExpectedHandlerError(msg)
+
+ # -------------------------------------------------------------------------
+ def check_for_temp_tpl_vm(self, no_error=False):
+
+ LOG.info(_("First checking, whether {!r} exists ...").format(self.tpl_vm_fqdn))
+ print_section_start(
+ 'check_existing_template', "Checking for existence of template ...",
+ collapsed=True)
+ vm = self.vsphere.get_vm(self.tpl_vm_fqdn, no_error=no_error)
+
+ if vm:
+ if self.verbose > 1:
+ LOG.debug(_("Temporary VM {n!r} exists, raising {e}.").format(
+ n=self.tpl_vm_fqdn, e='TempVmExistsError'))
+ if self.verbose > 2:
+ msg = "Info about Temporary VM {!r}:".format(self.tpl_vm_fqdn)
+ msg += '\n' + pp(vm.config)
+ LOG.debug(msg)
+ print_section_end('check_existing_template')
+ raise TempVmExistsError(self.tpl_vm_fqdn)
+
+ LOG.debug(_("Temporary VM {!r} does not exists, will be created.").format(
+ self.tpl_vm_fqdn))
+ print_section_end('check_existing_template')
+
+ # -------------------------------------------------------------------------
+ def get_temp_tpl_vm(self):
+
+ print_section_start('get_temp_tpl_vm', "Get created template VM ...", collapsed=True)
+ vm = self.vsphere.get_vm(self.tpl_vm_fqdn, as_vmw_obj=True)
+ print_section_end('get_temp_tpl_vm')
+
+ return vm
+
+ # -------------------------------------------------------------------------
+ def select_data_store(self):
+
+ LOG.info(_(
+ "Selecting a SAN based datastore with at least {:0.1f} GiB available "
+ "space.").format(self.cfg.data_size_gb))
+ print_section_start('select_data_store', "Selecting data store ...", collapsed=True)
+
+ self.vsphere.get_ds_clusters()
+ self.vsphere.get_datastores()
+
+ ds_to_use = None
+ if self.cfg.storage_cluster:
+ ds_to_use = self.select_data_store_from_cluster()
+ if ds_to_use:
+ msg = _(
+ "Got datastore {n!r} as a member of datastore cluster {c!r}.").format(
+ n=ds_to_use.name, c=self.cfg.storage_cluster)
+ LOG.info(msg)
+ else:
+ msg = MSG_NO_CLUSTER.format(
+ size=self.cfg.data_size_gb, c_name=self.cfg.storage_cluster)
+ LOG.warn(msg)
+ if not ds_to_use:
+ ds_to_use = self.select_simple_data_store()
+
+ if not ds_to_use:
+ print_section_end('select_data_store')
+ raise NoDatastoreFoundError(self.cfg.data_size_gb)
+
+ self.tpl_data_store = ds_to_use
+ LOG.info(_("Using datastore {!r} for volume of temporary VM to create.").format(
+ ds_to_use.name))
+ print_section_end('select_data_store')
+ return
+
+ # -------------------------------------------------------------------------
+ def select_data_store_from_cluster(self):
+
+ # Searching for the right storage cluster
+ c_name = self.cfg.storage_cluster
+ used_c_name = None
+ for cluster_name in self.vsphere.ds_clusters.keys():
+ if cluster_name.lower() == c_name.lower():
+ msg = _("Found storage cluster {!r}.").format(cluster_name)
+ used_c_name = cluster_name
+ break
+ if not used_c_name:
+ return None
+
+ cluster = self.vsphere.ds_clusters[used_c_name]
+ if cluster.free_space_gb <= self.cfg.data_size_gb:
+ msg = _(
+ "Cannot use datastore cluster {n!r}, free space "
+ "{free:0.1f} GiB is less than {min:0.1f} GiB.").format(
+ n=used_c_name, free=cluster.free_space_gb, min=self.cfg.data_size_gb)
+ LOG.warn(msg)
+ return None
+
+ pod = self._get_storage_pod_obj(used_c_name)
+ if not pod:
+ msg = _("Could not get {c} object with name {n!r}.").format(
+ c="vim.StoragePod", n=used_c_name)
+ raise HandlerError(msg)
+
+ vmconf = vim.vm.ConfigSpec()
+ podsel = vim.storageDrs.PodSelectionSpec()
+ podsel.storagePod = pod
+
+ folder_obj = self.vsphere.get_vm_folder(self.cfg.folder)
+
+ storagespec = vim.storageDrs.StoragePlacementSpec()
+ storagespec.podSelectionSpec = podsel
+ storagespec.type = 'create'
+ storagespec.folder = folder_obj
+ storagespec.resourcePool = self.cluster.resource_pool
+ storagespec.configSpec = vmconf
+
+ LOG.debug(_(
+ "Trying to get a recommendation for a datastore from "
+ "VSphere storageResourceManager ..."))
+ if self.verbose > 2:
+ msg = "storagespec:\n" + pp(storagespec)
+ LOG.debug(msg)
+ content = self.vsphere.service_instance.RetrieveContent()
+ try:
+ rec = content.storageResourceManager.RecommendDatastores(storageSpec=storagespec)
+ rec_action = rec.recommendations[0].action[0]
+ real_datastore_name = rec_action.destination.name
+ except Exception as e:
+ msg = _(
+ "Got no recommendation for a datastore from VSphere storageResourceManager: "
+ "{c} - {e}").format(c=e.__class__.__name__, e=e)
+ LOG.warn(msg)
+ return None
+
+ datastore = self.vsphere.get_obj(content, [vim.Datastore], real_datastore_name)
+ ds = VsphereDatastore.from_summary(
+ datastore, appname=self.appname, verbose=self.verbose, base_dir=self.base_dir)
+ return ds
+
+ # -------------------------------------------------------------------------
+ def _get_storage_pod_obj(self, used_c_name):
+
+ content = self.vsphere.service_instance.RetrieveContent()
+ dc = self.vsphere.get_obj(content, [vim.Datacenter], self.cfg.vsphere_info.dc)
+ if not dc:
+ raise VSphereDatacenterNotFoundError(self.cfg.vsphere_info.dc)
+
+ for child in dc.datastoreFolder.childEntity:
+ pod = self._get_storage_pod_obj_rec(child, used_c_name)
+ if pod:
+ return pod
+
+ return pod
+
+ # -------------------------------------------------------------------------
+ def _get_storage_pod_obj_rec(self, child, used_c_name, depth=1):
+
+ if hasattr(child, 'childEntity'):
+ if depth > self.vsphere.max_search_depth:
+ return None
+ for sub_child in child.childEntity:
+ pod = self._get_storage_pod_obj_rec(sub_child, used_c_name, depth + 1)
+ if pod:
+ return pod
+
+ if isinstance(child, vim.StoragePod):
+ if child.summary.name == used_c_name:
+ return child
+
+ return None
+
+ # -------------------------------------------------------------------------
+ def select_simple_data_store(self):
+
+ usable_ds = []
+ for ds in self.vsphere.datastores.values():
+ if not ds.accessible:
+ if self.verbose > 1:
+ LOG.debug(_("Cannot use datastore {n!r} - not accessible.").format(n=ds.name))
+ continue
+ if ds.name not in self.cluster.datastores:
+ if self.verbose > 1:
+ LOG.debug(_("Cannot use datastore {n!r}, not in cluster {c!r}.").format(
+ n=ds.name, c=self.cluster.name))
+ continue
+ if self.verbose > 3:
+ LOG.debug(_("Checking datastore:") + '\n' + pp(ds.as_dict()))
+ if ds.storage_type not in ('SAS', 'SSD', 'SATA'):
+ if self.verbose > 1:
+ LOG.debug(_("Cannot use datastore {n!r}, is of type {t!r}.").format(
+ n=ds.name, t=ds.storage_type))
+ continue
+ if ds.free_space_gb <= self.cfg.data_size_gb:
+ if self.verbose > 1:
+ LOG.debug(_(
+ "Cannot use datastore {n!r}, free space "
+ "{free:0.1f} GiB is less than {min:0.1f} GiB.").format(
+ n=ds.name, free=ds.free_space_gb, min=self.cfg.data_size_gb))
+ continue
+
+ usable_ds.append(ds)
+
+ LOG.debug(_("Found {} usable datastores.").format(len(usable_ds)))
+ if len(usable_ds) < 1:
+ msg = _("Did not found an usable datastore.")
+ raise ExpectedHandlerError(msg)
+
+ for st_type in ('SATA', 'SAS', 'SSD'):
+
+ ds_list = []
+ for ds in usable_ds:
+ if ds.storage_type == st_type:
+ ds_list.append(ds)
+ if not len(ds_list):
+ continue
+
+ return random.choice(ds_list)
+
+ return None
+
+ # -------------------------------------------------------------------------
+ def create_vm(self):
+
+ disk_size = self.cfg.data_size_gb
+
+ iface = VsphereVmInterface(
+ appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
+ name='eth', network=self.tpl_network, network_name=self.cfg.network,
+ summary='Primary network device')
+
+ if self.verbose > 1:
+ msg = _("Defined interface to create:") + "\n{}".format(pp(iface.as_dict()))
+ LOG.debug(msg)
+
+ vm_spec = self.vsphere.generate_vm_create_spec(
+ name=self.tpl_vm_fqdn, datastore=self.tpl_data_store.name,
+ disks=[disk_size], nw_interfaces=[iface], graphic_ram_mb=256,
+ videao_ram_mb=32, boot_delay_secs=self.vm_boot_delay_secs, ram_mb=self.cfg.ram_mb,
+ num_cpus=self.cfg.num_cpus, ds_with_timestamp=True,
+ os_version=self.cfg.os_version, cfg_version=self.cfg.vmware_cfg_version)
+
+ tpl_vm_folder = self.vsphere.get_vm_folder(self.cfg.folder)
+ if self.verbose > 1:
+ msg = _("VM-Folder object for template VM: {c} - {n!r}").format(
+ c=tpl_vm_folder, n=tpl_vm_folder.name)
+ msg += '\n' + pp(tpl_vm_folder.childType)
+ LOG.debug(msg)
+
+ self.vsphere.create_vm(
+ name=self.tpl_vm_fqdn, vm_folder=tpl_vm_folder, vm_config_spec=vm_spec,
+ pool=self.cluster.resource_pool, max_wait=self.cfg.max_wait_for_create_vm)
+
+ # -------------------------------------------------------------------------
+ def eval_tpl_ips(self):
+
+ LOG.info(_("Trying to evaluate the IP address of the template VM ..."))
+
+ initial_delay = (2 * self.vm_boot_delay_secs) + 120
+
+ LOG.debug(_("Waiting initially for {} seconds:").format(initial_delay))
+ print(' ==> ', end='', flush=True)
+
+ start_time = time.time()
+ cur_time = start_time
+ cur_duration = 0
+
+ while cur_duration <= initial_delay:
+ time.sleep(1)
+ cur_time = time.time()
+ print('.', end='', flush=True)
+ cur_duration = cur_time - start_time
+ print('', flush=True)
+
+ self.tpl_ips = self.cobbler.get_dhcp_ips(self.tpl_macaddress)
+ if not self.tpl_ips:
+ msg = _(
+ "Did not got the IP address of MAC address {mac!r} after "
+ "{delay} seconds.").format(mac=self.tpl_macaddress, delay=initial_delay)
+ raise ExpectedHandlerError(msg)
+
+ LOG.info(_("Got IP addresses for template VM:") + ' ' + format_list(self.tpl_ips))
+
+ # -------------------------------------------------------------------------
+ def wait_for_finish_install(self):
+
+ LOG.info(_("Waiting for finishing installation ..."))
+
+ LOG.debug(_("Waiting initially for {} seconds:").format(self.initial_sleep))
+ print(' ==> ', end='', flush=True)
+
+ cur_time = time.time()
+ cur_duration = cur_time - self.ts_start_install
+ last_dot = cur_time
+
+ while cur_duration <= self.initial_sleep:
+ time.sleep(self.interval_poll)
+ cur_time = time.time()
+ if (cur_time - last_dot) >= self.interval_dot:
+ print('.', end='', flush=True)
+ last_dot = cur_time
+ cur_duration = cur_time - self.ts_start_install
+ print('', flush=True)
+
+ LOG.debug(_("Waiting for SSH available ..."))
+
+ addr_infos = {}
+ for ip in self.tpl_ips:
+ ai = socket.getaddrinfo(ip, 22, socket.AF_INET, socket.SOCK_STREAM)
+ if self.verbose > 1:
+ msg = _("Got following address_infos for {h!r}, IPv4 TCP port {p}:").format(
+ h=ip, p=22)
+ msg += '\n' + pp(ai)
+ LOG.debug(msg)
+ if not ai:
+ raise HandlerError(_(
+ "Did not get address infos for {h!r}, IPv4 TCP port {p}.").format(
+ h=ip, p=22))
+
+ addr_info = random.choice(ai)
+ LOG.debug(_("Using address info: {}").format(pp(addr_info)))
+ addr_infos[ip] = addr_info
+
+ if self.verbose <= 3:
+ print(' ==> ', end='', flush=True)
+
+ ssh_available = False
+ cur_duration = cur_time - self.ts_start_install
+ cur_time = time.time()
+ last_dot = cur_time
+ i = 0
+ first = True
+ while not ssh_available and cur_duration <= self.cfg.max_wait_for_finish_install:
+
+ if not first:
+ time.sleep(self.interval_poll)
+ else:
+ first = False
+
+ cur_time = time.time()
+ cur_duration = cur_time - self.ts_start_install
+ if (self.verbose <= 3) and ((cur_time - last_dot) >= self.interval_dot):
+ print('.', end='', flush=True)
+ i += 1
+ if i >= 60:
+ print('', flush=True)
+ print(' ', end='', flush=True)
+ i = 0
+ last_dot = cur_time
+
+ for ip in addr_infos.keys():
+
+ addr_info = addr_infos[ip]
+ if self.check_ssh_available(addr_info):
+ ssh_available = True
+ self.tpl_ip = ip
+ break
+
+ if not ssh_available:
+ continue
+
+ if self.verbose <= 3:
+ print('', flush=True)
+ self.ts_finish_install = time.time()
+
+ self.ts_finish_install = time.time()
+ duration = self.ts_finish_install - self.ts_start_install
+ minutes = int(int(duration) / 60)
+ seconds = duration - float(minutes * 60)
+
+ LOG.info(_("Needed {m} minutes and {s:0.1f} seconds.").format(
+ m=minutes, s=seconds))
+
+ if not ssh_available:
+ raise ExpectedHandlerError(
+ _("SSH not available after {:0.1f} seconds, giving up.").format(duration))
+
+ # -------------------------------------------------------------------------
+ def check_ssh_available(self, addr_info):
+
+ family, socktype, proto, canonname, sockaddr = addr_info
+
+ if self.verbose > 3:
+ LOG.debug(_("Trying to connect to {a} via TCP port {p} ...").format(
+ a=sockaddr[0], p=sockaddr[1]))
+
+ try:
+ sock = socket.socket(family, socktype, proto)
+ except socket.error as e:
+ sock = None
+ LOG.warn(_("Error creating socket: {}").format(e))
+ return False
+
+ try:
+ sock.connect(sockaddr)
+ except socket.error as e:
+ sock.close()
+ sock = None
+ if self.verbose > 3:
+ LOG.debug(_("Could not connect: {}").format(e))
+ return False
+
+ LOG.info(_("Connected to {a} via TCP port {p}.").format(
+ a=sockaddr[0], p=sockaddr[1]))
+
+ data = sock.recv(4096)
+ if data:
+ msg = to_str(data).strip()
+ LOG.info(_("Got SSHD banner: {}").format(msg))
+
+ sock.close()
+ sock = None
+
+ return True
+
+ # -------------------------------------------------------------------------
+ def exec_remote(self, cmd, strict_host_key_checking=False):
+
+ ssh = None
+ result = {'out': None, 'err': None}
+ if strict_host_key_checking:
+ policy = paramiko.client.AutoAddPolicy()
+ else:
+ policy = paramiko.client.MissingHostKeyPolicy()
+
+ try:
+
+ LOG.debug(_("Initializing {} ...").format('paramiko SSHClient'))
+ ssh = paramiko.SSHClient()
+ LOG.debug(_("Loading SSH system host keys."))
+ ssh.load_system_host_keys()
+ LOG.debug(_("Setting SSH missing host key policy to {}.").format(
+ policy.__class__.__name__))
+ ssh.set_missing_host_key_policy(policy)
+
+ LOG.debug(_("Connecting to {h!r}, port {p} as {u!r} per SSH ...").format(
+ h=self.tpl_ip, p=self.ssh_port, u=self.ssh_user))
+ ssh.connect(
+ self.tpl_ip, port=self.ssh_port, timeout=self.ssh_timeout,
+ username=self.ssh_user, key_filename=self.private_ssh_key)
+
+ if self.verbose > 1:
+ LOG.debug(_("Commands to execute:") + '\n' + cmd)
+
+ stdin, stdout, stderr = ssh.exec_command(
+ cmd, timeout=self.ssh_timeout)
+
+ result['out'] = to_str(stdout.read()).strip()
+ result['err'] = to_str(stderr.read()).strip()
+
+ LOG.debug(_("Output on {}:").format('STDERR') + '\n' + result['err'])
+
+ finally:
+ if ssh:
+ if self.verbose > 2:
+ LOG.debug(_("Closing SSH connection."))
+ ssh.close()
+
+ return result
+
+ # -------------------------------------------------------------------------
+ def show_install_log(self):
+
+ LOG.info(_("Showing post install log ..."))
+ install_logfile = '/var/log/anaconda/post-install.log'
+
+ cmd = textwrap.dedent("""\
+ if [ -f {log} ] ; then
+ echo "-----------------------------------------------------------"
+ cat {log}
+ echo "-----------------------------------------------------------"
+ echo
+ else
+ echo "Post install log {log} not found!" >&2
+ fi
+
+ """).format(log=install_logfile)
+
+ result = self.exec_remote(cmd)
+
+ if result['err']:
+ LOG.error(result['err'])
+ if self.postinstall_errors:
+ self.postinstall_errors += result['err']
+ else:
+ self.postinstall_errors = result['err']
+ else:
+ LOG.debug("Post install log:\n\n" + result['out'])
+
+ # -------------------------------------------------------------------------
+ def get_postinstall_error(self):
+
+ LOG.info(_("Trying to get possible post-installation errors ..."))
+
+ cmd = textwrap.dedent("""\
+ if [ -f /root/postinst-error.txt ] ; then
+ cat /root/postinst-error.txt
+ fi
+ """)
+
+ result = self.exec_remote(cmd)
+ if result['out']:
+ if self.postinstall_errors:
+ self.postinstall_errors += result['out']
+ else:
+ self.postinstall_errors = result['out']
+ LOG.error(_("Got postinstall errors:") + '\n' + result['out'])
+ else:
+ LOG.info(_("No postinstall errors found."))
+
+ if self.postinstall_errors:
+ LOG.warn(_("Template VM {!r} has to be removed.").format(self.tpl_ip))
+
+ # -------------------------------------------------------------------------
+ def post_install_tasks_ssh(self):
+
+ LOG.info(_("Executing tasks per SSH after installation ..."))
+ print_section_start('post_install_tasks', 'Exec post install tasks ...', collapsed=True)
+
+ logfiles = (
+ '/var/log/boot.log',
+ '/var/log/cron',
+ '/var/log/dnf*.log',
+ '/var/log/hawkey.log',
+ '/var/log/messages',
+ '/var/log/secure',
+ '/var/log/wtmp',
+ '/var/log/vmware-*.log*',
+ '/var/log/yum*.log',
+ )
+
+ cmd = textwrap.dedent("""\
+ printf "Current host FQDN: "
+ hostname -f
+
+ for ks_cfg in "/root/original-ks.cfg" "/root/anaconda-ks.cfg" ; do
+ echo
+ echo "-----------------------------------------------------------"
+ if [ -f "${ks_cfg}" ] ; then
+ echo "Moving ${ks_cfg} => /var/log/anaconda/ ..."
+ mv -v "${ks_cfg}" /var/log/anaconda/
+ else
+ echo "File ${ks_cfg} not found." >&2
+ fi
+ done
+
+ for f in @@@LOGFILES@@@ ; do
+ if [ -f "${f}" ] ; then
+ echo "Truncating ${f} ..."
+ cp /dev/null "${f}"
+ fi
+ done
+
+ echo
+ echo "-----------------------------------------------------------"
+ echo "Current network configuration:"
+ echo
+ /usr/sbin/ip address show
+ echo
+ echo "Current routing configuration:"
+ echo
+ /usr/sbin/ip route show
+ echo
+
+ """).replace('@@@LOGFILES@@@', ' '.join(logfiles))
+
+ result = self.exec_remote(cmd)
+ LOG.debug(_("Output on {}:").format('STDOUT') + '\n' + result['out'])
+ if not result['err']:
+ LOG.debug(_("No output on {}.").format('STDERR'))
+
+ print_section_end('post_install_tasks')
+
+ if result['err']:
+ LOG.warn(_("Output on {}:").format('STDERR') + '\n' + result['err'])
+
+ # -------------------------------------------------------------------------
+ def poweroff_vm(self):
+
+ wait_for_shutdown = 15
+
+ LOG.info(_("Waiting for {} seconds before shutting down:").format(wait_for_shutdown))
+ print(' ==> ', end='', flush=True)
+
+ start_waiting = time.time()
+ cur_time = start_waiting
+ cur_duration = 0
+ last_dot = cur_time
+
+ i = 0
+ while cur_duration <= wait_for_shutdown:
+ time.sleep(0.1)
+ cur_time = time.time()
+ if (cur_time - last_dot) >= 1:
+ print('.', end='', flush=True)
+ last_dot = cur_time
+ i += 1
+ if not i % 60:
+ print('\n ', end='', flush=True)
+ cur_duration = cur_time - start_waiting
+ print('', flush=True)
+
+ LOG.info(_("Last actions before powering off VM {!r} ...").format(self.tpl_ip))
+
+ cmd = textwrap.dedent("""\
+ echo
+ echo "-----------------------------------------------------------"
+ echo "Cleaning up /root/.ssh/authorized_keys ..."
+ afile="/root/.ssh/authorized_keys"
+ tfile="/tmp/authorized_keys"
+ pattern="create-vmware-tpl@pixelpark.com"
+
+ cat "${afile}" | grep -v "${pattern}" > "${tfile}"
+ mv -v "${tfile}" "${afile}"
+
+ echo
+ echo "-----------------------------------------------------------"
+ echo "Removing SSH host keys ..."
+ rm -v /etc/ssh/ssh_host_*
+
+ echo
+ echo "Sleeping some seconds ..."
+ sleep 3
+
+ echo
+ echo "-----------------------------------------------------------"
+ echo "Powering off ..."
+ poweroff && logout
+
+ """)
+
+ vm = self.get_temp_tpl_vm()
+ power_state = vm.runtime.powerState
+ LOG.debug(_("Current state of template VM is {!r}").format(power_state))
+ if power_state.strip().lower() == "poweredoff":
+ LOG.info(_("Template VM is already shut off."))
+ return
+
+ if power_state.strip().lower() != "poweredon":
+ raise ExpectedHandlerError(
+ _("Cannot shut down VM {h!r}, is currently in state {s!r}.").format(
+ h=self.tpl_ip, s=power_state))
+
+ LOG.info(_("Powering off VM {!r} per SSH ...").format(self.tpl_ip))
+ print_section_start('poweroff', 'Powering off VM ...', collapsed=True)
+ result = self.exec_remote(cmd)
+
+ LOG.debug(_("Output on {}:").format('STDOUT') + '\n' + result['out'])
+ if not result['err']:
+ LOG.debug(_("No output on {}.").format('STDERR'))
+
+ print_section_end('poweroff')
+
+ if result['err']:
+ LOG.warn(_("Output on {}:").format('STDERR') + '\n' + result['err'])
+
+ cur_diff = 0
+ start_shutdown = time.time()
+ cur_time = start_shutdown
+ last_dot = cur_time
+ i = 0
+
+ LOG.debug(_("Waiting for successful shut down of VM ..."))
+ if self.verbose <= 3:
+ print(' ==> ', end='', flush=True)
+ if self.verbose > 3:
+ LOG.debug(_("Current state of template VM is {!r}").format(power_state))
+
+ while power_state.strip().lower() != "poweredoff":
+
+ time.sleep(0.2)
+
+ dot = '.'
+ if power_state.lower().strip() != 'poweredon':
+ dot = 'S'
+
+ cur_time = time.time()
+ cur_diff = cur_time - start_shutdown
+ if (self.verbose <= 3) and ((cur_time - last_dot) >= self.interval_dot):
+ print(dot, end='', flush=True)
+ i += 1
+ if i >= 60:
+ print('', flush=True)
+ print(' ', end='', flush=True)
+ i = 0
+ last_dot = cur_time
+
+ vm = self.get_temp_tpl_vm()
+ power_state = vm.runtime.powerState
+ if self.verbose > 3:
+ LOG.debug(_(
+ "Still waiting for completing shutdown, current state is {!r}.").format(
+ power_state))
+ if power_state.strip().lower() == "poweredoff":
+ print('', flush=True)
+ LOG.info(_(
+ "Template VM {h!r} was shutting down in {t:0.1f} seconds.").format(
+ h=self.tpl_ip, t=cur_diff))
+ return
+ if cur_diff >= self.cfg.max_wait_for_shutdown_vm:
+ break
+
+ print('', flush=True)
+ raise ExpectedHandlerError(_(
+ "VM {h!r} was not shut down after {t:0.1f} seconds, current state is {s!r}.").format(
+ h=self.tpl_ip, t=cur_diff, s=power_state))
+
+ # -------------------------------------------------------------------------
+ def change_mac_address(self):
+
+ LOG.info(_("Setting a new, randomized MAC address for template VM ..."))
+
+ last_tuple1 = random.randint(1, 254)
+ last_tuple2 = random.randint(1, 254)
+ new_mac = self.cfg.mac_address_template.format(last_tuple1, last_tuple2)
+ LOG.debug(_("New MAC address: {!r}.").format(new_mac))
+
+ vm = self.get_temp_tpl_vm()
+ self.vsphere.set_mac_of_nic(vm, new_mac, nic_nr=0)
+
+ # -------------------------------------------------------------------------
+ def rotate_templates(self):
+
+ LOG.info(_("Searching for existing templates and rotate them ..."))
+ print_section_start('rotate_templates', "Rotating templates ...", collapsed=True)
+ re_is_numeric = re.compile(r'^\s*(\d+)\s*$')
+
+ pattern_tpl = r'^' + re.escape(self.cfg.template_name)
+ re_tpl = re.compile(pattern_tpl, re.IGNORECASE)
+
+ templates = self.vsphere.get_vms(re_tpl, is_template=True, as_vmw_obj=True)
+ if not templates:
+ LOG.info(_("Did not found any existing templates."))
+ return
+ msg = ngettext(
+ "Found one existing template.", "Found {} existing templates.",
+ len(templates)).format(len(templates))
+ LOG.debug(msg)
+
+ templates_ts = {}
+ templates_sorted = []
+ new_template_names = {}
+
+ for template in templates:
+ tpl_name = template.summary.config.name
+ val_map = {}
+ for extra_cfg in template.config.extraConfig:
+ key = extra_cfg.key
+ value = extra_cfg.value
+ val_map[key] = value
+ created = time.time()
+ if 'created' in val_map:
+ if val_map['created'] and re_is_numeric.match(val_map['created']):
+ created = float(val_map['created'])
+ ts_created = datetime.datetime.fromtimestamp(created, tz=TZ)
+ LOG.debug(_("Found template {n!r}, created: {ts}.").format(
+ n=tpl_name, ts=ts_created.isoformat(' ')))
+ if self.verbose > 2:
+ LOG.debug("Template Summary Config:\n{}".format(template.summary.config))
+ LOG.debug("Template Extra Config:\n{}".format(pp(val_map)))
+
+ templates_ts[tpl_name] = created
+
+ for tpl_name in sorted(templates_ts.keys(), key=lambda tpl: templates_ts[tpl]):
+ templates_sorted.append(tpl_name)
+
+ LOG.debug(_("Templates sorted by creation date:") + '\n' + pp(templates_sorted))
+ templates_sorted.reverse()
+ templates_to_remove = []
+ i = 0
+ for tpl_name in templates_sorted:
+ if i > self.cfg.max_nr_templates_stay - 2:
+ templates_to_remove.append(tpl_name)
+ i += 1
+ templates_to_remove.reverse()
+ if templates_to_remove:
+ LOG.debug(_("Templates to remove:") + '\n' + pp(templates_to_remove))
+ else:
+ LOG.debug(_("There are no templates to remove."))
+
+ for template in templates:
+ tpl_name = template.summary.config.name
+ if tpl_name in templates_to_remove:
+ LOG.info(_("Removing template {!r} ...").format(tpl_name))
+ self.vsphere.purge_vm(template)
+ LOG.debug(_("Successful removed template {!r}.").format(tpl_name))
+ continue
+ if tpl_name.strip().lower() == self.cfg.template_name.strip().lower():
+ created = templates_ts[tpl_name]
+ ts_created = datetime.datetime.fromtimestamp(created, tz=TZ)
+ i = 0
+ dt = ts_created.strftime('%Y-%m-%d_%H-%M-%S')
+ new_name = "{t}.{d}".format(t=tpl_name, d=dt)
+ tname = new_name.strip().lower()
+ while tname in new_template_names:
+ new_name = "{t}.{d}-{i}".format(t=tpl_name, d=dt, i=i)
+ tname = new_name.strip().lower()
+ i += 1
+ new_template_names[tname] = 1
+ LOG.info(_("Renaming template {o!r} => {n!r} ...").format(o=tpl_name, n=new_name))
+ task = template.Rename_Task(new_name)
+ self.vsphere.wait_for_tasks([task])
+ LOG.debug(_("Successful renamed template into {!r}.").format(new_name))
+ else:
+ tname = tpl_name.strip().lower()
+ new_template_names[tname] = 1
+
+ print_section_end('rotate_templates')
+
+ # -------------------------------------------------------------------------
+ def rename_and_change_vm(self):
+
+ LOG.info(_("Renaming VM {o!r} => {n!r} ...").format(
+ o=self.tpl_vm_fqdn, n=self.cfg.template_name))
+ print_section_start(
+ 'rename_and_change_vm', "Renaming VM and mark as template ...", collapsed=True)
+
+ vm = self.get_temp_tpl_vm()
+ task = vm.Rename_Task(self.cfg.template_name)
+ self.vsphere.wait_for_tasks([task])
+ LOG.debug(_("Successful renamed VM into {!r}.").format(self.cfg.template_name))
+
+ LOG.info(_("Changing VM {!r} into a VMWare template ...").format(
+ self.cfg.template_name))
+ vm.MarkAsTemplate()
+ LOG.debug(_("Object {!r} is now a VMWare template.").format(self.cfg.template_name))
+ print_section_end('rename_and_change_vm')
+
+ # -------------------------------------------------------------------------
+ def create_root_authkeys(self):
+
+ LOG.info(_("Creating authorized keys of root from LDAP ..."))
+
+ print_section_start(
+ 'create_root_authkeys', "Generating authorized_keys of root.", collapsed=True)
+ prefix = 'tmp.authorized_keys.root.'
+ (fh, tmp_keys_file) = tempfile.mkstemp(prefix=prefix, text=True)
+ self.auth_keys_file = Path(tmp_keys_file)
+ os.close(fh)
+ LOG.debug(_("Using temporary file {!r} for authorized keys of root.").format(
+ tmp_keys_file))
+
+ try:
+ self.connect_ldap()
+
+ line = ('#' * 60) + '\n'
+ auth_keys = line
+
+ admins = self.get_ldap_admins()
+
+ for uid in sorted(admins.keys(), key=str.lower):
+
+ admin = admins[uid]
+
+ for ssh_key in sorted(admin['keys']):
+
+ parts = ssh_key.split()
+ used_key = parts[0] + ' ' + parts[1] + ' '
+ used_key += admin['cn'] + ' <' + admin['mail'] + '>'
+ auth_keys += used_key + '\n'
+ auth_keys += line
+
+ msg = _("Generated authorized keys for root:") + '\n' + auth_keys
+ LOG.debug(msg)
+
+ finally:
+ self.disconnect_ldap()
+
+ self.auth_keys_file.write_text(auth_keys)
+ print_section_end('create_root_authkeys')
+
+ # -------------------------------------------------------------------------
+ def get_ldap_admins(self):
+
+ if not self.ldap:
+ msg = _("No LDAP connection initialized.")
+ raise HandlerError(msg)
+
+ admins = {}
+
+ attrs = ['cn', 'dn', 'mail', 'sshPublicKey', 'uid']
+ ldap_config = self.cfg.ldap_connection['default']
+ fltr = ldap_config.admin_filter
+
+ msg = _("Trying to get a list of all DPX admins with their public SSH keys ...")
+ LOG.debug(msg)
+
+ msg = _("LDAP search starting in {!r} with filter:").format(ldap_config.base_dn)
+ msg += '\n' + fltr
+ LOG.debug(msg)
+
+ status, result, response, request = self.ldap.search(
+ search_base=ldap_config.base_dn, search_scope=ldap3.SUBTREE, search_filter=fltr,
+ attributes=attrs, time_limit=self.cfg.ldap_timeout)
+
+ if not status:
+ msg = _("Error retrieving DPX admin list from LDAP:")
+ msg += ' ' + result
+ raise HandlerError(msg)
+
+ for entry in response:
+
+ uid = None
+ admin = {
+ 'cn': None,
+ 'dn': None,
+ 'mail': None,
+ 'keys': [],
+ 'uid': None,
+ }
+
+ admin['dn'] = entry['dn']
+
+ for attr in entry['attributes']:
+
+ val = entry['attributes'][attr]
+
+ if attr.lower() == 'uid':
+ if is_sequence(val):
+ uid = val[0]
+ else:
+ uid = val
+ admin['uid'] = uid
+
+ if attr.lower() == 'cn':
+ if is_sequence(val):
+ admin['cn'] = val[0]
+ else:
+ admin['cn'] = val
+
+ if attr.lower() == 'mail':
+ if is_sequence(val):
+ admin['mail'] = val[0]
+ else:
+ admin['mail'] = val
+
+ if attr.lower() == 'sshpublickey':
+ if is_sequence(val):
+ for key in val:
+ admin['keys'].append(key)
+ else:
+ admin['keys'].append(val)
+
+ if self.verbose == 2:
+ msg = _("Got an admin {cn} <{mail}>.").format(cn=admin['cn'], mail=admin['mail'])
+ LOG.debug(msg)
+ elif self.verbose > 2:
+ msg = _("Got an admin:") + '\n' + pp(admin)
+ LOG.debug(msg)
+
+ admins[uid] = admin
+
+ if not admins:
+ msg = _("Did not found any admins below base DN {!r} with filter:")
+ msg = msg.format(self.cfg.ldap_connection['default'].base_dn)
+ msg += '\n' + fltr
+ raise HandlerError(msg)
+
+ return admins
+
+
+# =============================================================================
+if __name__ == "__main__":
+
+ pass
+
+# =============================================================================
+
+# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list