Source code for apama.docker.framework

#!/usr/bin/env python
# Copyright (c) 2015-2023 Software AG, Darmstadt, Germany and/or Software AG USA Inc., Reston, VA, USA, and/or its subsidiaries and/or its affiliates and/or their licensors. 
# Use, reproduction, transfer, publication or disclosure is prohibited except as specifically provided for in your License Agreement with Software AG 

"""
Support for using Docker, Docker Compose, or Docker Swarm from PySys tests. 

.. autosummary::
  DockerImage
  DockerContainer
  DockerService
  DockerSecret
  DockerSwarm
  DockerComposition
"""

import tempfile, os, binascii, time, shutil, io, collections
import threading
import subprocess
import io

from pysys.utils import filecopy
from pysys.utils import filereplace
from pysys.constants import *

DOCKER_COMMAND_RETURN = collections.namedtuple('Docker', ['process', 'stdOut', 'stdErr'])

try:
	import getpass
	_THIS_USER = getpass.getuser()
except:
	_THIS_USER = 'unknownuser'

# Lightweight container similar to Enum but with associated data; not public API
class Config:

	# Private class variables. Don't use directly
	manual_config = 0
	display_name_without_count = 1
	display_name_with_count = 2
	custom_name_without_count = 3
	custom_name_with_count = 4

	def __init__(self, number, val=None):
		self.val = val
		self.number = number

	# Associated value does not take part in equals
	def __eq__(self, other):
		if not isinstance(other, Config):
			return False
		else:
			return self.number == other.number

	@classmethod
	def MANUAL_CONFIG(cls):
		"""
		User specified stdout and stderr. Will not provide any default stdout, stderr
		"""
		return cls(Config.manual_config)

	@classmethod
	def DISPLAY_NAME_WITHOUT_COUNT(cls):
		"""
		Uses <DisplayName> as base stdout, stderr filename
		Do not specify stdout, stderr or stdouterr with this configuration
		"""
		return cls(Config.display_name_without_count)

	@classmethod
	def DISPLAY_NAME_WITH_COUNT(cls):
		"""
		Uses <DisplayName> and instanceCount to generate stdout, stderr filename. Generated filenames may differ
		when using LEGACY_STD_OUT_ERR_FILENAMES
		Do not specify stdout, stderr or stdouterr with this configuration
		"""
		return cls(Config.display_name_with_count)

	@classmethod
	def CUSTOM_NAME_WITHOUT_COUNT(cls, str=None):
		"""
		Uses user specified str as base stdout, stderr filename
		Do not specify stdout, stderr or stdouterr with this configuration
		"""
		return cls(Config.custom_name_without_count, str)

	@classmethod
	def CUSTOM_NAME_WITH_COUNT(cls, str=None):
		"""
		Uses user specified str and instanceCount to generate stdout, stderr filename. Generated filenames may differ
		when using LEGACY_STD_OUT_ERR_FILENAMES
		Do not specify stdout, stderr or stdouterr with this configuration
		"""
		return cls(Config.custom_name_with_count, str)

[docs]class DockerHelper(object): """Base class which provides common functionality needed by both Docker and Kubernetes. This class is not intended to be used directly, but is invoked or subclassed by other classes in the Apama docker framework. """ def __init__(self, parent, displayName): self.__parent = parent self.__displayName = displayName def __str__(self): return self.__displayName def _safelyOpenProcessOutputFile(self, path, **kwargs): """ This is for internal use only and not part of public supported API - do not use. :meta private: Opens a file which contains process stdout/err for reading. Uses unicode character string mode using the OS default encoding (or UTF-8 if in 7 bit ASCII/LANG=C locale) with errors='replace' so that we downgrade gracefully if unexpected characters are found. :param path: Path of file, relative to output dir or absolute path. :return: The open file handle, in character string mode. """ path = os.path.join(self.__parent.output, path) encoding = PREFERRED_ENCODING if encoding in ['ANSI_X3.4-1968', 'ascii', 'us-ascii']: # docker executables have sometimes been seen to return UTF-8 chars # (e.g. for ...") even in LANG=C/ascii encoding, so since utf-8 # is a superset of ascii, permit any utf-8 char when reading output in # ascii locales encoding='utf-8' return io.open(path, encoding=encoding, errors='replace')
[docs] def getAllContainers(self, stdout='__getcontainers.out', **kwargs): """ Returns all the service containers for a running environment :return: tuple containing (process, stdout, stderr) """ assert (self.isUp) return DockerHelper._executeDockerCommand(self.parent, arguments=["ps", "-a"], stdouterrConfig=Config.MANUAL_CONFIG(), stdout=stdout, **kwargs)
[docs] def getVolumes(self, name=None, **kwargs): """ Returns all the volumes for a running environment :return: A map from volume name to tuple of (driver type, full name) """ if not name: name = self.name _, stdout, _ = DockerHelper._executeDockerCommand(self.parent, arguments=["volume", "ls"], stdouterrConfig=Config.MANUAL_CONFIG(), stdout='__getvolumes.out', **kwargs) with self._safelyOpenProcessOutputFile(stdout) as f: lines = f.readlines() ret = {} for line in lines: m = re.match("(\w*)\s*" + name + "_(.*)", line) if m: volumeDriver = m.group(1) volumeName = m.group(2) m2 = re.match("(\w*)\s*(\w*)", line) ret[volumeName] = (volumeDriver, m2.group(2)) return ret
[docs] def getNetworks(self, name=None, **kwargs): """ Returns all the networks for a running environment :return: A map from network name to tuple of (driver type, full name, id) """ if not name: name = self.name _, stdout, _ = DockerHelper._executeDockerCommand(self.parent, arguments=["network", "ls"], stdouterrConfig=Config.MANUAL_CONFIG(), stdout='__getnetworks.out', **kwargs) with self._safelyOpenProcessOutputFile(stdout) as f: lines = f.readlines() ret = {} for line in lines: m = re.match("(\w*)\s*" + name + "_(\w*)\s*(\w*)", line) if m: networkId = m.group(1) networkName = m.group(2) networkDriver = m.group(3) m2 = re.match("(\w*)\s*(\w*)", line) ret[networkName] = (networkDriver, m2.group(2), networkId) return ret
[docs] def _gcImp(self, name): """ Garbage-collects volumes and networks for a specific name """ volumes = self.getVolumes(name) volumeGC = [] for vol in volumes: volType, volName = volumes[vol] volumeGC.append(volName) networks = self.getNetworks(name) networkGC = [] for net in networks: netType, netName, netId = networks[net] networkGC.append(netName) if len(volumeGC) > 0: self.parent.log.info("GC-ing Docker volumes " + str(volumeGC)) DockerHelper._executeDockerCommand(self.parent, arguments=["volume", "rm"] + volumeGC, displayNamePostfix='rm', stdouterrConfig=Config.MANUAL_CONFIG(), stdout='__gcVolumes.out', ignoreExitStatus=True, abortOnError=False) if len(networkGC) > 0: self.parent.log.info("GC-ing Docker networks " + str(networkGC)) DockerHelper._executeDockerCommand(self.parent, arguments=["network", "rm"] + networkGC, displayName='docker volume rm', stdouterrConfig=Config.MANUAL_CONFIG(), stdout='__gcNetworks.out', ignoreExitStatus=True, abortOnError=False)
# This method is made static so that this can be called without creating obejct of DockerHelper. Useful for few cases # where we need to run docker commands prior to creating DockerHelper instance
[docs] @staticmethod def _executeDockerCommand(parent, arguments, command=None, environs=None, state=FOREGROUND, timeout=TIMEOUTS['WaitForProcess'], stdout=None, stderr=None, displayName=None, stdouterrConfig=Config.DISPLAY_NAME_WITH_COUNT(), abortOnError=None, ignoreExitStatus=None, displayNamePostfix=None, stdOutErr=None): """ Executes docker commands with default values :param parent: Reference to the parent PySys testcase :param arguments: the arguments to docker command :param command: the docker command to run. If not provided, defaults to parent.project.DOCKER_EXE. Added this field to execute docker-compose commands in a similar fashion :param environs: A dictionary of the environment to run the process in. If "DOCKER_HOST" key is absent, default value is added :param state: Run the process either in the C{FOREGROUND} or C{BACKGROUND} (defaults to C{FOREGROUND}) :param timeout: The timeout period after which to terminate processes running in the C{FOREGROUND}. :param stdout: User specified stdout. To be used with Config.MANUAL_CONFIG() :param stderr: User specified stderr. To be used with Config.MANUAL_CONFIG() :param stdOutErr: User specified stdouterr. To be used with Config.MANUAL_CONFIG() :param displayName: Logical name of the process used for display. Defaults to 'docker <arguments[0]>' :param stdouterrConfig: Specifies the way to allocate stdout, stderr files :param abortOnError: If true abort the test on any error outcome :param ignoreExitStatus: If False, a BLOCKED outcome is added if the process terminates with non-zero exit code :param displayNamePostfix: Appended to display name before calling startProcess. Used to make displayName more verbose without affecting stdout, stderr filename generation :return: tuple containing (process, stdout, stderr) """ if not environs: environs = {'DOCKER_HOST': parent.project.DOCKER_HOST, 'PATH':os.environ['PATH'], 'SSH_AUTH_SOCK':os.environ.get('SSH_AUTH_SOCK', ''), 'HOME': os.environ.get('HOME', '')} elif 'DOCKER_HOST' not in environs.keys(): environs['DOCKER_HOST'] = parent.project.DOCKER_HOST if not 'PATH' in environs: environs['PATH']=os.environ['PATH'] if not 'SSH_AUTH_SOCK' in environs and 'SSH_AUTH_SOCK' in os.environ: environs['SSH_AUTH_SOCK']=os.environ['SSH_AUTH_SOCK'] if not 'HOME' in environs and 'HOME' in os.environ: environs['HOME']=os.environ['HOME'] if not abortOnError: abortOnError = parent.defaultAbortOnError if not ignoreExitStatus: ignoreExitStatus = parent.project.getProperty('defaultApamaIgnoreExitStatus', False) if hasattr(parent.project, 'defaultApamaIgnoreExitStatus') else parent.defaultIgnoreExitStatus if not displayName: displayName = 'docker %s' % arguments[0] if not command: command = parent.project.DOCKER_EXE if stdouterrConfig == Config.MANUAL_CONFIG(): pass elif stdouterrConfig == Config.DISPLAY_NAME_WITHOUT_COUNT(): stdOutErr = displayName.replace(' ', '_').replace('-', '_') elif stdouterrConfig == Config.CUSTOM_NAME_WITHOUT_COUNT(): stdOutErr = stdouterrConfig.val elif (not hasattr(parent.project, 'LEGACY_STD_OUT_ERR_FILENAMES')) or parent.project.LEGACY_STD_OUT_ERR_FILENAMES.lower() == 'true': if stdouterrConfig == Config.DISPLAY_NAME_WITH_COUNT(): instanceCount = parent.getInstanceCount(displayName) name = displayName elif stdouterrConfig == Config.CUSTOM_NAME_WITH_COUNT(): instanceCount = parent.getInstanceCount(stdouterrConfig.val) name = stdouterrConfig.val stdOutErr = ('%s %s' % (name, instanceCount)).replace(' ', '_').replace('-', '_') else: stdout, stderr = parent.allocateUniqueStdOutErr(displayName.replace(' ', '_').replace('-', '_')) if displayNamePostfix: displayName = '%s %s' % (displayName, displayNamePostfix) process = parent.startProcess(command=command, arguments=arguments, environs=environs, state=state, displayName=displayName, timeout=timeout, stdout=stdout, stderr=stderr, abortOnError=abortOnError, ignoreExitStatus=ignoreExitStatus, stdouterr=stdOutErr, onError=lambda process: parent.grepOrNone(stderr, '.+')) return DOCKER_COMMAND_RETURN(process, stdout, stderr)
[docs]class DockerImage(DockerHelper): """Helper class for working with Docker images. Sort-of manages the lifetime of the images it wraps; in most cases, it will keep them around for a long time so that the Docker cache can speed up other tests on the same machine, but it knows how to GC them at a later date due to deterministic naming that encodes the ownership of the image. """ def __init__(self, parent, name, own=True): """ Create from a named image and parent testcase """ DockerHelper.__init__(self, parent=parent, displayName=name) self.name = name self.parent = parent self.tag = None self.own = own if not DockerImage.__doneGC: self.__gcImages() self.parent.addResource(self)
[docs] @staticmethod def generateUniqueName(): """ Return a unique name, used as docker image name. The Name starts with string pysys followed by username, process id, localtime and random number. :return: Docker Image name """ # regex used for valid names is [a-z0-9]([-a-z0-9]*[a-z0-9])? return "pysys-{}{:06d}{:011d}{}".format(_THIS_USER.lower(), os.getpid(), int(time.mktime(time.localtime())), binascii.b2a_hex(os.urandom(2)).decode('utf-8'))
[docs] @classmethod def fromBlank(cls, parent): """ For when you are generating a docker image by some external means, but want the test framework to manage its lifetime. Contains an auto-generated name (use getName()) for you to tag your image with. :param parent: Reference to the parent PySys testcase :return: a DockerImage """ name = DockerImage.__makeName() return DockerImage(parent, name, True)
[docs] @classmethod def fromExisting(cls, parent, name, own=False, pull=False, **kwargs): """ Wrap an existing image :param parent: Reference to the parent PySys testcase :param name: Name of the existing image :param own: Whether or not this object owns the image i.e. deletes it later :param pull: Whether to pull this tag before running """ if pull: with DockerImage.__buildMutex: DockerHelper._executeDockerCommand(parent, arguments=['pull', name], displayNamePostfix=name, **kwargs) return DockerImage(parent, name, own)
""" Mutex to cover invocations of docker build. Empirical testing and https://github.com/docker/docker/issues/9656 suggest that this is the right thing for performance. Further, two tests kicking off 'docker build' at the same time for the same content will fail to take advantage of the cache. """ __buildMutex = threading.Lock()
[docs] @classmethod def fromDockerfile(cls, parent, file, context=None, otherfiles=None, imageSubst=None, abortOnError=True, noCache=False, buildArgs=None, timeout=1200): """ Construct an image from a Dockerfile :param parent: Reference to the parent PySys testcase :param file: Absolute location of the Dockerfile :param context: Directory containing the build context. If it's a totally context-free Dockerfile, it'll just run in a blank directory. :param otherfiles: Other files to copy into the root of the context before build (e.g. a wrapper that we're packaging). If the path is relative, it's taken relative to the directory the Dockerfile is taken from. If the path is absolute, it's treated as absolute. :param imageSubst: Maps a nice friendly image name in the FROM of a Dockerfile to a DockerImage that should actually be used (so auto-testing doesn't have namespacing problems). It's a tuple (name, image). :param abortOnError: Abort the test if the build fails :param noCache: Whether or not to apply --no-cache to docker build :param timeout: The timeout in seconds. """ parent.log.info('Creating image from dockerfile: %s', file) # Generate a unique name/tag for the image name = DockerImage.__makeName() dockerImage = DockerImage(parent, name, True) # Copy the context into our output directory; using cp -a, because shutil.copytree changes stats on symlinks only (!?) which breaks # Docker's build caching context_tmp = os.path.join(parent.output, name + "_buildcontext") if context: parent.log.info("Copying context from %s to %s ", context, context_tmp) parent.startProcess(command='/bin/cp', arguments=["-a", context, context_tmp], stdout="__cp.out", stderr="__cp.err", timeout=timeout) else: os.mkdir(context_tmp) # Copy & Preprocess the Dockerfile into the copied context dst_dockerfile = os.path.join(context_tmp, 'Dockerfile') substs = {} if imageSubst: substs["FROM " + imageSubst[0]] ="FROM " + imageSubst[1].getName() substs["ARG " + imageSubst[0].upper() + "_IMAGE=" + imageSubst[0]] = "ARG " + imageSubst[0].upper() + "_IMAGE=" + imageSubst[1].getName() substs["FROM ${APAMA_IMAGE}"] = "FROM " + imageSubst[1].getName() filereplace.replace(file,dst_dockerfile, substs) if otherfiles: for otherfile in otherfiles: if os.path.isabs(otherfile): shutil.copy2(otherfile, context_tmp) else: shutil.copy2(os.path.dirname(file) + '/' + otherfile, context_tmp) # copy the docker file to the test output dir so we can debug any problems (though not if someone has already created one) if not os.path.exists(parent.output+'/Dockerfile'): shutil.copy2(dst_dockerfile, parent.output+'/Dockerfile') reqArgs = ["build", "--force-rm", "--no-cache=" + str(noCache).lower()] # additional --build-args lines will originate from the param buildArgs if buildArgs: for kv in buildArgs: reqArgs.append( "--build-arg" ) reqArgs.append( kv ) reqArgs.append( "-t" ) reqArgs.append( name ) reqArgs.append( context_tmp ) # Build the image try: DockerImage.__buildMutex.acquire() build, stdout, stderr = DockerHelper._executeDockerCommand(parent, arguments=reqArgs, ignoreExitStatus=True, abortOnError=abortOnError) finally: DockerImage.__buildMutex.release() # Remove our copy of the context shutil.rmtree(context_tmp) if(build.exitStatus != 0 and abortOnError): parent.logFileContents(stderr, tail=True) or parent.logFileContents(stdout, tail=True, maxLines=30) parent.log.info('docker build failed - arguments: %s', reqArgs) parent.abort(BLOCKED, "docker build failed: %s" % (parent.grepOrNone(stderr, '.*') or build.exitStatus)) return dockerImage
[docs] def getName(self): """ The name/tag of this image """ return self.tag if self.tag else self.name
def addTag(self, tag, **kwargs): with DockerImage.__buildMutex: DockerHelper._executeDockerCommand(self.parent, arguments=['tag', self.getName(), tag], displayNamePostfix=tag, **kwargs) self.tag = tag def push(self, **kwargs): with DockerImage.__buildMutex: try: _, stdout, stderr = DockerHelper._executeDockerCommand(self.parent, arguments=["--config", self.parent.project.DOCKER_CONFIG, "push", self.getName()], displayName='docker push', **kwargs) except Exception: self.parent.logFileContents(kwargs.get('stdOutErr','')+'.err', tail=True) or self.parent.logFileContents(kwargs.get('stderr'), tail=True) or self.parent.logFileContents(kwargs.get('stdout'), tail=True, maxLines=30) raise """ Class variable used to confine use of __gcImages to a single invocation of pysys, rather than once per test """ __doneGC = False def __gcImages(self): """ Garbage-collects images that we don't need any more. Approximately, this gets rid of pysys-created images from pysys processes that are not currently running, as long as they are fairly old (aids in speeding up everybody's test runs, so that they can share images that are likely to be identical within the space of a day's development) Since this function is called only once in a single invocation of pysys and is not directly related to any testcases, we are not setting 'ignoreExitCode' and 'abortOnError' explicitly. It will use default values specified in pysysproject """ DockerImage.__doneGC = True DockerHelper._executeDockerCommand(self.parent, arguments=["images", "--no-trunc"], stdouterrConfig=Config.MANUAL_CONFIG(), stdout='__gcImages.out', ignoreExitStatus=True, abortOnError=False) with self._safelyOpenProcessOutputFile("__gcImages.out") as f: image_lines = f.readlines()[1:] # chars pendingGC = [] for line in image_lines: match = re.match("^pysys(\d\d\d\d\d\d)(\d\d\d\d\d\d\d\d\d\d\d)[^ ]+", line) if match: (name, pid, timestamp) = ( match.group(0), int(match.group(1)), int(match.group(2)) ) ageSeconds = time.time() - timestamp if (pid != os.getpid() and ageSeconds > 2*60*60): # 2 hours pendingGC.append(name) if len(pendingGC) > 0: self.parent.log.info("GC-ing Docker images " + str(pendingGC)) # Ignore images running in containers DockerHelper._executeDockerCommand(self.parent, arguments=['rmi'] + pendingGC, stdouterrConfig=Config.MANUAL_CONFIG(), abortOnError=False, ignoreExitStatus=True) self.parent.log.info('Done GC-ing images, commencing test now.') self.parent.log.info('') else: self.parent.log.debug('No docker images that meet GC criteria') @classmethod def __makeName(cls): """ Generate a unique name for an image """ return DockerImage.generateUniqueName() def __del__(self): """ Make tests fail if unable to remove/untag a docker image""" arg = None if self.own: arg = self.name if self.tag: arg = self.tag if arg: DockerHelper._executeDockerCommand(self.parent, arguments=['rmi', arg], stdouterrConfig=Config.MANUAL_CONFIG(), abortOnError=False, ignoreExitStatus=True)
[docs]class DockerService(DockerHelper): """Helper class for working with Docker services. Owns the service that it wraps, and will garbage collect it when this object is deleted (which will only happen when the parent testcase finishes). Non-returning methods are Fluent - that is, they return 'self' to allow method-chaining """
[docs] @classmethod def fromImage(cls, image): """ Prepare to create and run a service from an image :param image: a DockerImage to create from """ ret = DockerService(displayName='DockerService %s'%image,parent=image.parent) assert image.__class__.__name__=='DockerImage', type(image) ret.image = image ret.parent = image.parent return ret
[docs] @classmethod def fromExisting(cls, parent, name, own=True): """ Wrap an existing running service :param parent: Reference to the parent PySys testcase :param name: Name of the existing image :param own: Whether or not this object owns the service i.e. deletes it later """ ret = DockerService(displayName='DockerService %s'%name,parent=parent) ret.image = None ret.parent = parent ret.name = name ret.own = own ret.started = True ret.__acquirePorts() return ret
def __init__(self, displayName=None, parent=None): DockerHelper.__init__(self, parent=parent, displayName=displayName) self.parent=parent self.started = False self.extraArgs = [] self.own = True self.totalStarts = 0 # Used to keep track of total number of times a Docker instance has been started # which helps when grepping logfiles etc.
[docs] def run(self, cmd=None, extraArgs=None, foreground=False, abortOnError=True, environs=None, **kwargs): """ Run the service. The default UID for processes inside the service will be the current uid to aid with host-volume sharing :param cmd: Optional command + arguments list to run in the service :param extraArgs: list of extra arguments to pass to "docker run" :param foreground: True if this method should only return after the command has finished executing :param abortOnError: Abort the test if starting the service fails :param environs: A dictionary of the environment to run the process in (defaults to clean environment) """ assert(not self.started) self.started = True # Generate a unique name for the image self.name = DockerImage.generateUniqueName() # Create the service from the image args = ["service", "create", "--name", self.name] if not foreground: args.append('-d') args.extend(self.extraArgs) if extraArgs: args.extend(extraArgs) args.append(self.image.getName()) if cmd: args.extend(cmd) run, _, _ = DockerHelper._executeDockerCommand(self.parent, arguments=args, environs=environs, ignoreExitStatus=True, abortOnError=abortOnError, **kwargs) if run.exitStatus != 0 and abortOnError: self.parent.abort(BLOCKED, "docker service failed") self.parent.addResource(self) if not DockerService.__doneGC: self.__gcServices() self.totalStarts = 1 return self
[docs] def log(self, logfile, **kwargs): """ Stream the docker logs (i.e. the stdout and stderr) for a service that has been run. Sends it live to two files, one for stdout, another for stderr. :param logfile: A named file prefix in the output directory. Suffixed with '.out' and '.err' """ assert(self.started) args = ["service", "logs", "-f", self.name] # Send stdout and stderr to different files as rit and jamc have both separately # observed logging going missing when we log to the same file. DockerHelper._executeDockerCommand(self.parent, arguments=args, displayNamePostfix='logs', state=BACKGROUND, stdouterrConfig=Config.CUSTOM_NAME_WITHOUT_COUNT(logfile), **kwargs) return self
def __del__(self): """ Make tests fail if unable to remove/untag a docker image""" if self.started and self.own: rm, _, _ = DockerHelper._executeDockerCommand(self.parent, arguments=["service", "rm", self.name], displayNamePostfix='rm', stdouterrConfig=Config.MANUAL_CONFIG(), ignoreExitStatus=True, abortOnError=False) if rm.exitStatus != 0: self.parent.addOutcome(BLOCKED, 'docker service rm %s failed'%self.name) """ Class variable used to confine use of __gcServices to a single invocation of pysys, rather than once per test """ __doneGC = False def __gcServices(self): """ Garbage-collects old running services; the test framework is supposed to remove services on shutdown, but sometimes a serious fault bypasses this, so we take a look at them occasionally Since this function is called only once in a single invocation of pysys and is not directly related to any testcases, we are not setting 'ignoreExitCode' and 'abortOnError' explicitly. It will use default values specified in pysysproject """ DockerService.__doneGC = True DockerHelper._executeDockerCommand(self.parent, arguments=["service", "ls"], stdout='__gcServices.out', displayNamePostfix='ls', stdouterrConfig=Config.MANUAL_CONFIG(), ignoreExitStatus=True, abortOnError=False) with self._safelyOpenProcessOutputFile("__gcServices.out") as f: service_lines = f.readlines()[1:] pendingGC = [] for line in service_lines: line = line.rstrip() match = re.match(".* pysys(\d\d\d\d\d\d)(\d\d\d\d\d\d\d\d\d\d\d)([a-z0-9_]*)( )*$", line) if match: name = "pysys%s%s%s" % (match.group(1), match.group(2), match.group(3)) pid = int(match.group(1)) age = int(time.mktime(time.localtime())) - int(match.group(2)) if(pid != os.getpid() and age > 7200): pendingGC.append(name) if len(pendingGC) > 0: self.parent.log.info("GC-ing Docker services " + str(pendingGC)) DockerHelper._executeDockerCommand(self.parent, arguments=["service", "rm",] + pendingGC, displayNamePostfix='rm', stdouterrConfig=Config.MANUAL_CONFIG(), ignoreExitStatus=True, abortOnError=False) def __waitLogs(self, logfile_base, startCount = 0): # Check the logfile to be sure the server is ready self.log(logfile_base) if startCount != 0: self.totalStarts = startCount else: self.totalStarts = self.totalStarts + 1 condition = ">%d" % (self.totalStarts-1) matches=self.parent.waitForGrep(file=logfile_base+'.out', expr='Realm Server Startup sequence completed', condition=condition, timeout=TIMEOUTS['WaitForSignal']*4) if len(matches) != self.totalStarts: self.parent.abort(BLOCKED, "Realm server has not started successfully. Expected %d starts, got %d (see %s.out/err)" % (self.totalStarts, len(matches), logfile_base))
[docs]class DockerSecret(DockerHelper): """Helper class for working with Docker secrets. Owns the secret that it wraps, and will garbage collect it when this object is deleted (which will only happen when the parent testcase finishes). Non-returning methods are Fluent - that is, they return 'self' to allow method-chaining """
[docs] @classmethod def fromFile(cls, parent, name, file): """ Create a secret from a file :param parent: Reference to the parent PySys testcase :param file: The path to the file containing the secret contents, in whatever file format is required by the process that will use it :param name: the name of the secret used by Docker. This represents both the canonical name of the secret within a docker server, and the filename through which the secret can be accessed. Ensure the specified name does not conflict with other testcases, for example by using DockerImage.generateUniqueName(), e.g. name="my-secret-%s.properties"%DockerImage.generateUniqueName(). """ ret = DockerSecret(parent=parent, file=os.path.join(parent.output, file), name=name) return ret
[docs] @classmethod def fromContent(cls, parent, name, secretContents): """ Create a named secret containing th specified string contents :param parent: Reference to the parent PySys testcase :param secretContents: The contents of the secret as a character string, in whatever file format is expected by the process that will use it :param name: the name of the secret used by Docker. This represents both the canonical name of the secret within a docker server, and the filename through which the secret can be accessed. Ensure the specified name does not conflict with other testcases, for example by using DockerImage.generateUniqueName(), e.g. name="my-secret-%s.properties"%DockerImage.generateUniqueName(). """ ret = DockerSecret(parent=parent, file='-', secretContents=secretContents, name=name) return ret
def __init__(self,parent,file=None,secretContents=None,name=None): assert name, 'a name must be specified when creating a docker secret' self.__started = False DockerHelper.__init__(self, parent=parent, displayName="DockerSecret %s" % name) self.extraArgs = [] self.own = True self.parent = parent self.file = file self.secretContents = secretContents self.name = name self.__create()
[docs] def getName(self): """ The name of this secret """ return self.name
def __create(self): """ Create the secret """ assert(not self.__started) self.__started = True # Create the secret from the image args = ["secret", "create", self.name] out, err = self.parent.allocateUniqueStdOutErr('docker_secret') try: if not self.file or self.file == '-': command = 'echo "{}" | {} '.format(self.secretContents, self.parent.project.DOCKER_EXE) command += ' '.join(args+['-']) self.parent.log.debug('Running Popen: %s', command) # Have to use Popen instead of parent.startProcess because startProcess didn't seem to work with the combination of commands # nb: there is no timeout here so this will block forever with open(out, "w+b") as fout, open(err, 'w+b') as ferr: # Popen commands must be unicode character strings process = subprocess.Popen(command, shell=True, stdout=fout, stderr=ferr) process.communicate() if(process.returncode != 0): self.parent.abort(BLOCKED, "docker secret create failed") else: run, _, _ = DockerHelper._executeDockerCommand(self.parent, arguments=args+[self.file], displayNamePostfix='create', stdouterrConfig=Config.MANUAL_CONFIG(), stdout=out, stderr=err, ignoreExitStatus=True) if(run.exitStatus != 0): self.parent.abort(BLOCKED, "docker secret create failed") except Exception: self.parent.logFileContents(err) raise self.parent.addResource(self) return self def __del__(self): if self.__started and self.own: rm, _, _ = DockerHelper._executeDockerCommand(self.parent, arguments=["secret", "rm", self.name], displayNamePostfix='rm', stdouterrConfig=Config.MANUAL_CONFIG(), ignoreExitStatus=True, abortOnError=False) if(rm.exitStatus != 0): self.parent.addOutcome(BLOCKED, 'docker secret rm %s failed'%self.name)
[docs]class DockerContainer(DockerHelper): """Helper class for working with Docker containers. Owns the container that it wraps, and will garbage collect it when this object is deleted (which will only happen when the parent testcase finishes). Non-returning methods are Fluent - that is, they return 'self' to allow method-chaining """
[docs] @classmethod def fromImage(cls, image): """ Prepare to create and run a container from an image :param image: a DockerImage to create from """ ret = DockerContainer(parent = image.parent, displayName='DockerContainer %s'%image.name) assert image.__class__.__name__=='DockerImage', type(image) ret.image = image return ret
[docs] @classmethod def fromExisting(cls, parent, name, own=True): """ Wrap an existing running container :param parent: Reference to the parent PySys testcase :param name: Name of the existing image :param own: Whether or not this object owns the container i.e. deletes it later """ ret = DockerContainer(parent=parent, displayName='DockerContainer %s'%name) ret.image = None ret.name = name ret.own = own ret.started = True ret.__acquirePorts() return ret
def __init__(self, parent=None, displayName=None): DockerHelper.__init__(self, parent=parent, displayName=displayName) self.parent = parent self.started = False self.extraArgs = [] self.own = True self.name = '' # Initialize this when container is ran self.totalStarts = 0 # Used to keep track of total number of times a Docker instance has been started # which helps when grepping logfiles etc.
[docs] def run(self, cmd=None, extraArgs=None, foreground=False, abortOnError=True, environs=None): """ Run the container. The default UID for processes inside the container will be the current uid to aid with host-volume sharing :param cmd: Optional command + arguments list to run in the container :param extraArgs: list of extra arguments to pass to "docker run" :param foreground: True if this method should only return after the command has finished executing :param abortOnError: Abort the test if starting the container fails :param environs: A dictionary of the environment to run the process in (defaults to clean environment) """ if not DockerContainer.__doneGC: self.__gcContainers() assert(not self.started) self.started = True # Generate a unique name for the image self.name = DockerImage.generateUniqueName() # Create the container from the image args = ["run", "--name", self.name] if not foreground: args.append('-d') args.extend(self.extraArgs) if extraArgs: args.extend(extraArgs) args.append(self.image.getName()) if cmd: args.extend(cmd) instance = self.parent.getInstanceCount("docker run") run, _, _ = DockerHelper._executeDockerCommand(self.parent, arguments=args, environs=environs, ignoreExitStatus=not abortOnError, abortOnError=abortOnError) self.parent.addResource(self) self.__acquirePorts() self.totalStarts = 1 return self
[docs] def log(self, logfile, **kwargs): """ Stream the docker logs (i.e. the stdout and stderr) for a container that has been run. Sends it live to two files, one for stdout, another for stderr. :param logfile: A named file prefix in the output directory. Suffixed with '.out' and '.err' """ assert(self.started) args = ["logs", "-f", self.name] # Send stdout and stderr to different files as rit and jamc have both separately # observed logging going missing when we log to the same file. stdouterr = os.path.join(self.parent.output, logfile) DockerHelper._executeDockerCommand(self.parent, arguments=args, state=BACKGROUND, stdouterrConfig=Config.MANUAL_CONFIG(), stdOutErr=stdouterr, **kwargs) return self
[docs] def volumeFromHost(self, hostpath, containerpath): """ Bind mount a directory from the host to the container i.e. -v Can't do this to a container that has already been started with run() :param hostpath: Absolute path on the host :param containerpath: Absolute path in the container """ assert(not self.started) self.extraArgs.extend(["-v", hostpath + ":" + containerpath]) return self
[docs] def exposePort(self, containerPort): """ Expose a port on the container to a port on the host, only for a container that hasn't been run yet :param containerPort: Port within the container """ assert(not self.started) self.extraArgs.extend(["-p", "%i" % containerPort]) return self
[docs] def getExternalPort(self, port): """ Return the external port for a port inside the container :param port: The port inside the container :return: The port on the host operating system """ return self.portsContainerToHost[port]
[docs] def commit(self, **kwargs): """ Do a 'docker commit' on this container :return: A DockerImage covering the newly created image """ img = DockerImage.fromBlank(self.parent) DockerHelper._executeDockerCommand(self.parent, arguments=["commit", self.name, img.getName()], stdouterrConfig=Config.CUSTOM_NAME_WITHOUT_COUNT('__commit'), **kwargs) return img
[docs] def start(self, startCount = 0, **kwargs): """ Do a 'docker start' on this container (starts a stopped container)""" instance = self.parent.getInstanceCount("docker start") logfile_base = "docker_start_%i" % instance start = self.parent.startProcess(command=self.parent.project.DOCKER_EXE, arguments=["start", self.name], environs={'DOCKER_HOST':self.parent.project.DOCKER_HOST}, displayName="docker start", stdout=logfile_base+'.cmd.out', stderr=logfile_base+'.cmd.err', **kwargs) if(start.exitStatus != 0): self.parent.abort(BLOCKED, "docker start failed") self.started = True self.__waitLogs(logfile_base, startCount) return self
[docs] def stop(self, **kwargs): """ Do a 'docker stop' on this container """ DockerHelper._executeDockerCommand(self.parent, arguments=["stop", "--time=60", self.name], stdouterrConfig=Config.CUSTOM_NAME_WITHOUT_COUNT('__stop'), **kwargs) self.started = False return self
[docs] def restart(self, startCount = 0, **kwargs): """ Do a 'docker restart' on this container """ instance = self.parent.getInstanceCount("docker restart") logfile_base = "docker_restart_%i" % instance restart = self.parent.startProcess(command=self.parent.project.DOCKER_EXE, arguments=["restart", "--time=60", self.name], environs={'DOCKER_HOST':self.parent.project.DOCKER_HOST}, displayName="docker restart", stdout=logfile_base+'.cmd.out', stderr=logfile_base+'.cmd.err',**kwargs) self.started = True self.__waitLogs(logfile_base, startCount) return self
[docs] def wait(self, timeout=TIMEOUTS['WaitForSocket'], **kwargs): """ Block until this container has exited :return: The container's return code, or None if not known """ DockerHelper._executeDockerCommand(self.parent, arguments=["wait", self.name], timeout=timeout, stdouterrConfig=Config.CUSTOM_NAME_WITHOUT_COUNT('__wait'), **kwargs) with self._safelyOpenProcessOutputFile('__wait.out') as f: wait_out = f.read() if len(wait_out) > 0: return int(wait_out) return None
[docs] def cp(self, file, **kwargs): """ 'docker cp' a file out of the container into your output directory :param file: Absolute path of the file in the container. The file in your output directory will share its basename. """ cp, _, _ = DockerHelper._executeDockerCommand(self.parent, arguments=["cp", self.name + ':' + file, self.parent.output], stdouterrConfig=Config.CUSTOM_NAME_WITHOUT_COUNT('__dockercp'), **kwargs) if (cp.exitStatus != 0): self.parent.abort(BLOCKED, "docker cp failed") return self
def __del__(self): if self.started and self.own: rm, _, _ = DockerHelper._executeDockerCommand(self.parent, arguments=["rm", "-f", "-v", self.name], displayNamePostfix=self.name, stdouterrConfig=Config.MANUAL_CONFIG(), ignoreExitStatus=True, abortOnError=False) if(rm.exitStatus != 0): self.parent.addOutcome(BLOCKED, 'docker rm %s failed' %self.name) """ Class variable used to confine use of __gcContainers to a single invocation of pysys, rather than once per test """ __doneGC = False def __gcContainers(self): """ Garbage-collects old running containers; the test framework is supposed to remove containers on shutdown, but sometimes a serious fault bypasses this, so we take a look at them occasionally """ DockerContainer.__doneGC = True DockerHelper._executeDockerCommand(self.parent, arguments=["ps", "-a"], stdouterrConfig=Config.MANUAL_CONFIG(), stdout='__gcContainers.out', ignoreExitStatus=True, abortOnError=False) with self._safelyOpenProcessOutputFile("__gcContainers.out") as f: container_lines = f.readlines()[1:] # characters pendingGC = [] for line in container_lines: line = line.rstrip() match = re.match(".* pysys(\d\d\d\d\d\d)(\d\d\d\d\d\d\d\d\d\d\d)([a-z0-9_]*)( )*$", line) if match: name = "pysys%s%s%s" % (match.group(1), match.group(2), match.group(3)) pid = int(match.group(1)) age = int(time.mktime(time.localtime())) - int(match.group(2)) if(pid != os.getpid() and age > 7200): pendingGC.append(name) if len(pendingGC) > 0: self.parent.log.info("GC-ing Docker containers " + str(pendingGC)) DockerHelper._executeDockerCommand(self.parent, arguments=["stop", "-t", "60"] + pendingGC, stdouterrConfig=Config.MANUAL_CONFIG(), ignoreExitStatus=True, abortOnError=False) DockerHelper._executeDockerCommand(self.parent, arguments=["rm", "-f", "-v"] + pendingGC, stdouterrConfig=Config.MANUAL_CONFIG(), ignoreExitStatus=True, abortOnError=False) def __acquirePorts(self): """ Calculates all ports that are exposed to the host by this container, populating the dictionary self.portsContainerToHost """ self.portsContainerToHost = {} DockerHelper._executeDockerCommand(self.parent, arguments=["port", self.name], stdouterrConfig=Config.MANUAL_CONFIG(), stdout='__ports.out', ignoreExitStatus=True, abortOnError=False) with self._safelyOpenProcessOutputFile("__ports.out") as f: for line in f.readlines(): # chars m = re.match("^([\d]+)\/tcp -> .*:([\d]+)", line) self.portsContainerToHost[int(m.group(1))] = int(m.group(2)) def __waitLogs(self, logfile_base, startCount = 0): # Check the logfile to be sure the server is ready self.log(logfile_base) if startCount != 0: self.totalStarts = startCount else: self.totalStarts = self.totalStarts + 1 condition = ">%d" % (self.totalStarts-1) matches=self.parent.waitForGrep(file=logfile_base+'.out', expr='Realm Server Startup sequence completed', condition=condition, timeout=TIMEOUTS['WaitForSignal']*4) if len(matches) != self.totalStarts: self.parent.abort(BLOCKED, "Realm server has not started successfully. Expected %d starts, got %d (see %s.out/err)" % (self.totalStarts, len(matches), logfile_base))
[docs]class DockerSwarm(DockerHelper): """ Helper class for working with Docker Swarm environments. As with the other Docker classes, it attempts to clean up after itself """ def __init__(self, parent, file, imageSubst=None, externalPorts=None, hostPathSubst=None, buildSubst=None, additionalFiles=None, otherSubsts=None): """ Create from a Docker Swarm file. We're going to be testing sample files that have hardcoded aspects to them; exposing ports to fixed external ports; naming specific images from earlier steps; refering a specific path to build a Dockerfile in. However, this is inappropriate for auto-tests, so this method will tailor the file according to substitutions. :param parent: Reference to the parent PySys testcase :param file: Absolute path to the docker-compose file :param imageSubst: dictionary from fixed image names to DockerImages that should be used instead :param externalPorts: a list of external port numbers mentioned in this compose that get randomly reallocated. Only the DockerContainer will know which to. :param hostPathSubst: dictionary from the host path in a volume declaration, to the real host path we'd like it to be :param buildSubst: dictionary from a build: path to a (directory of real context, [Dockerfile names within context]) :param additionalFiles: dictionary from a build path to [absolute paths of additional files to copy into the context] You might use this to copy Dockerfiles into the context if they're not already there :param otherSubsts: Any other kind of arbitrary textual substitution not covered by the above options. Mapping of string to replacement string. """ DockerHelper.__init__(self, parent=parent, displayName='DockerSwarm %s'%os.path.basename(file)) if not imageSubst: imageSubst = {} if not externalPorts: externalPorts = [] if not hostPathSubst: hostPathSubst = {} if not buildSubst: buildSubst = {} if not otherSubsts: otherSubsts = {} self.parent = parent self.isUp = False parent.log.info("Tailoring a docker-compose file from " + file) self.tailoredFile = os.path.join(parent.output, "docker-compose-%s.yml" % binascii.b2a_hex(os.urandom(8))) # Create a globally unique project name self.name = DockerImage.generateUniqueName() # Perform substitutions, in the compose and in any referenced Dockerfiles replaceMap = {} dfReplaceMap = {} for i in imageSubst: assert imageSubst[i].__class__.__name__=='DockerImage', type(imageSubst[i]) replaceMap["image: " + i] = "image: " + imageSubst[i].getName() dfReplaceMap["FROM " + i] = "FROM " + imageSubst[i].getName() dfReplaceMap["ARG " + i.upper() + "_IMAGE=" + i] = "ARG " + i.upper() + "_IMAGE=" + imageSubst[i].getName() if i.lower() == "apama": dfReplaceMap["FROM ${APAMA_IMAGE}"] = "FROM " + imageSubst[i].getName() for i in externalPorts: assert(isinstance(i, int)) replaceMap["- \"%s:" % i] = "- \"" replaceMap["- %s:" % i] = "- " for i in hostPathSubst: assert(isinstance(i, str) and isinstance(hostPathSubst[i], str)) replaceMap["- %s:" % i] = "- %s:" % hostPathSubst[i] for i in otherSubsts: replaceMap[i] = otherSubsts[i] j = 0 self.buildContexts = [] for i in buildSubst: # Set up the copy of the build context, including the additional files context_tmp = os.path.join(parent.output, "%s_%i_buildcontext" % (self.name, j)) (context, dockerfiles) = buildSubst[i] parent.startProcess(command='/bin/cp', arguments=["-a", context, context_tmp], stdout="__cp.out", stderr="__cp.err") if additionalFiles and i in additionalFiles: for f in additionalFiles.get(i): shutil.copy2(f, os.path.join(context_tmp, os.path.basename(f))) # Tailor any Dockerfiles, and tailor the compose file to point to this copy of the build context for df in dockerfiles: filereplace.replace(os.path.join(context_tmp, df), os.path.join(context_tmp, df + "_"), dfReplaceMap) os.rename(os.path.join(context_tmp, df + "_"), os.path.join(context_tmp, df)) replaceMap["build: " + i] = "build: " + context_tmp replaceMap["context: " + i] = "context: " + context_tmp self.buildContexts.append(context_tmp) j = j + 1 filereplace.replace(file, self.tailoredFile, replaceMap) self.commonArgs = ["-c", self.tailoredFile] """ Mutex to cover invocations of docker stack deploy, to indirectly limit the number of simultaneous builds. See DockerImage.__buildMutex. N.B. possibly redundant in stack because build must be done explicitly """ __deployMutex = threading.Lock()
[docs] def deploy(self, abortOnError=True, environs=None): """ Bring up all services in the environment :param abortOnError: Abort the test if this fails :param environs: A dictionary of the environment to run the process in (defaults to clean environment) """ assert(not self.isUp) self.isUp = True instance = self.parent.getInstanceCount("docker stack deploy") try: DockerSwarm.__deployMutex.acquire() deploy, _, _ = DockerHelper._executeDockerCommand(self.parent, arguments= ["stack", "deploy"] + self.commonArgs + [ self.name ], ignoreExitStatus=True, abortOnError=abortOnError, environs=environs, displayName='docker stack deploy') finally: DockerSwarm.__deployMutex.release() if(deploy.exitStatus != 0 and abortOnError): self.parent.abort(BLOCKED, "docker stack deploy failed") self.parent.addResource(self)
[docs] def getContainers(self): """ Returns all the service containers for a running environment :return: A map from service name to a list of all DockerContainers running that service """ assert(self.isUp) _, stdout, _ = self.getAllContainers() with self._safelyOpenProcessOutputFile(stdout) as f: lines = f.readlines() # chars ret = {} for line in lines: m = re.match(".* " + self.name + "_(.*)\.([0-9]+)\.(.*)", line) if m: serviceName = m.group(1) serviceNumber = m.group(2) serviceId = m.group(3) if serviceName not in ret: ret[serviceName] = [] containerName = self.name + "_" + serviceName + "." + serviceNumber +"."+serviceId ret[serviceName].append(DockerContainer.fromExisting(self.parent, containerName, own=False)) return ret
[docs] def rm(self, services=None): """ docker stack rm :param services: Names of services to rm. If none provided, removes them all. """ myServices = [] if not services else services rm, _, _ = DockerHelper._executeDockerCommand(self.parent, arguments=["stack","rm",self.name], displayNamePostfix='rm', stdouterrConfig=Config.MANUAL_CONFIG(), stdOutErr='docker_stack_rm_%s' % self.name, ignoreExitStatus=True, abortOnError=False) if(rm.exitStatus != 0): self.parent.abort(BLOCKED, "docker stack rm failed") # If removing all services, also clean up Volumes and Networks if not services: self._gcImp(self.name)
def __del__(self): if self.isUp: self.rm() # Remove our copies of build contexts for i in self.buildContexts: shutil.rmtree(i) self._gcImp(self.name) if not DockerSwarm.__doneGC: self.__gc() """ Class variable used to confine use of __gc to a single invocation of pysys, rather than once per test """ __doneGC = False def __gc(self): """ Garbage-collects old volumes and networks; the test framework is supposed to remove networks and volumes on shutdown, but sometimes a serious fault bypasses this, so we take a look at them occasionally """ DockerSwarm.__doneGC = True self._gcImp("pysys(\d\d\d\d\d\d)(\d\d\d\d\d\d\d\d\d\d\d)([a-z0-9_]*)")
[docs]class DockerComposition(DockerHelper): """ Helper class for working with Docker Compose environments. As with the other Docker classes, it attempts to clean up after itself. """ def __init__(self, parent, file, imageSubst=None, externalPorts=None, hostPathSubst=None, buildSubst=None, additionalFiles=None, otherSubsts=None): """ Create from a docker-compose file. We're going to be testing sample files that have hardcoded aspects to them; exposing ports to fixed external ports; naming specific images from earlier steps; refering a specific path to build a Dockerfile in. However, this is inappropriate for auto-tests, so this method will tailor the file according to substitutions. :param parent: Reference to the parent PySys testcase :param file: Absolute path to the docker-compose file :param imageSubst: dictionary from fixed image names to DockerImages that should be used instead :param externalPorts: a list of external port numbers mentioned in this compose that get randomly reallocated. Only the DockerContainer will know which to. :param hostPathSubst: dictionary from the host path in a volume declaration, to the real host path we'd like it to be :param buildSubst: dictionary from a build: path to a (directory of real context, [Dockerfile names within context]) :param additionalFiles: dictionary from a build path to [absolute paths of additional files to copy into the context] You might use this to copy Dockerfiles into the context if they're not already there :param otherSubsts: Any other kind of arbitrary textual substitution not covered by the above options. Mapping of string to replacement string. """ #TODO - substitute user: if necessary DockerHelper.__init__(self, parent=parent, displayName='DockerComposition %s'%os.path.basename(file)) if not imageSubst: imageSubst = {} if not externalPorts: externalPorts = [] if not hostPathSubst: hostPathSubst = {} if not buildSubst: buildSubst = {} if not otherSubsts: otherSubsts = {} self.parent = parent self.isUp = False parent.log.info("Tailoring a docker-compose file from " + file) self.tailoredFile = os.path.join(parent.output, "docker-compose-%s.yml" % binascii.b2a_hex(os.urandom(8))) # Create a globally unique project name self.name = DockerImage.generateUniqueName() # Perform substitutions, in the compose and in any referenced Dockerfiles replaceMap = {} dfReplaceMap = {} for i in imageSubst: assert imageSubst[i].__class__.__name__=='DockerImage', type(imageSubst[i]) replaceMap["image: " + i] = "image: " + imageSubst[i].getName() dfReplaceMap["FROM " + i] = "FROM " + imageSubst[i].getName() dfReplaceMap["ARG " + i.upper() + "_IMAGE=" + i] = "ARG " + i.upper() + "_IMAGE=" + imageSubst[i].getName() if i.lower() == "apama": dfReplaceMap["FROM ${APAMA_IMAGE}"] = "FROM " + imageSubst[i].getName() for i in externalPorts: assert(isinstance(i, int)) replaceMap["- \"%s:" % i] = "- \"" replaceMap["- %s:" % i] = "- " for i in hostPathSubst: assert(isinstance(i, str) and isinstance(hostPathSubst[i], str)) replaceMap["- %s:" % i] = "- %s:" % hostPathSubst[i] for i in otherSubsts: replaceMap[i] = otherSubsts[i] j = 0 self.buildContexts = [] for i in buildSubst: # Set up the copy of the build context, including the additional files context_tmp = os.path.join(parent.output, "%s_%i_buildcontext" % (self.name, j)) (context, dockerfiles) = buildSubst[i] parent.startProcess(command='/bin/cp', arguments=["-a", context, context_tmp], stdout="__cp.out", stderr="__cp.err") if additionalFiles and i in additionalFiles: for f in additionalFiles.get(i): shutil.copy2(f, os.path.join(context_tmp, os.path.basename(f))) # Tailor any Dockerfiles, and tailor the compose file to point to this copy of the build context for df in dockerfiles: filereplace.replace(os.path.join(context_tmp, df), os.path.join(context_tmp, df + "_"), dfReplaceMap) os.rename(os.path.join(context_tmp, df + "_"), os.path.join(context_tmp, df)) replaceMap["build: " + i] = "build: " + context_tmp replaceMap["context: " + i] = "context: " + context_tmp self.buildContexts.append(context_tmp) j = j + 1 filereplace.replace(file, self.tailoredFile, replaceMap) self.commonArgs = ["-p", self.name, "-f", self.tailoredFile] """ Mutex to cover invocations of docker-compose up, to indirectly limit the number of simultaneous builds. See DockerImage.__buildMutex. """ __upMutex = threading.Lock()
[docs] def up(self, abortOnError=True, environs=None): """ Bring up all services in the environment :param abortOnError: Abort the test if this fails :param environs: A dictionary of the environment to run the process in (defaults to clean environment) """ assert(not self.isUp) self.isUp = True try: DockerComposition.__upMutex.acquire() up, _, stderr = DockerHelper._executeDockerCommand(self.parent, command=self.parent.project.DOCKER_COMPOSE_EXE, arguments=self.commonArgs + ["up", "-d", "--no-recreate"], displayName='docker-compose up', environs=environs, ignoreExitStatus=True, abortOnError=abortOnError) finally: DockerComposition.__upMutex.release() if(up.exitStatus != 0 and abortOnError): self.parent.logFileContents(stderr, includes=['ERROR:.*']) or self.parent.logFileContents(stderr) self.parent.abort(BLOCKED, "docker-compose up failed") self.parent.addResource(self)
[docs] def getContainers(self): """ Returns all the service containers for a running environment :return: A map from service name to a list of all DockerContainers running that service """ _, stdout, _ = self.getAllContainers() with self._safelyOpenProcessOutputFile(stdout) as f: lines = f.readlines() # chars ret = {} for line in lines: m = re.match(".* " + self.name + "_(.*)_([0-9]+)", line) if m: serviceName = m.group(1) serviceNumber = m.group(2) if serviceName not in ret: ret[serviceName] = [] containerName = self.name + "_" + serviceName + "_" + serviceNumber ret[serviceName].append(DockerContainer.fromExisting(self.parent, containerName, own=False)) return ret
[docs] def stop(self, **kwargs): """ docker-compose stop """ stop, _, _ = DockerHelper._executeDockerCommand(self.parent, command=self.parent.project.DOCKER_COMPOSE_EXE, arguments=self.commonArgs + ["stop"], displayName='docker-compose stop', stdouterrConfig=Config.MANUAL_CONFIG(), **kwargs) if(stop.exitStatus != 0): self.parent.abort(BLOCKED, "docker-compose stop failed") self.isUp = False
[docs] def rm(self, services=None): """ docker-compose rm :param services: Names of services to rm. If none provided, removes them all. """ myServices = [] if not services else services rm, _, _ = DockerHelper._executeDockerCommand(self.parent, command=self.parent.project.DOCKER_COMPOSE_EXE, arguments=self.commonArgs + ["rm", "-v", "--force"] + myServices, ignoreExitStatus=True, abortOnError=False, displayName='docker-compose rm') if(rm.exitStatus != 0): self.parent.abort(BLOCKED, "docker-compose rm failed") # If removing all services, also clean up Volumes and Networks if not services: self._gcImp(self.name)
def __del__(self): if self.isUp: self.stop() self.rm() # Remove our copies of build contexts for i in self.buildContexts: shutil.rmtree(i) self._gcImp(self.name) if not DockerComposition.__doneGC: self.__gc() """ Class variable used to confine use of __gc to a single invocation of pysys, rather than once per test """ __doneGC = False def __gc(self): """ Garbage-collects old volumes and networks; the test framework is supposed to remove networks and volumes on shutdown, but sometimes a serious fault bypasses this, so we take a look at them occasionally """ DockerComposition.__doneGC = True self._gcImp("pysys(\d\d\d\d\d\d)(\d\d\d\d\d\d\d\d\d\d\d)([a-z0-9_]*)")