diff --git a/buildrunner/config.py b/buildrunner/config.py index fa820ff6..7a4fabf6 100644 --- a/buildrunner/config.py +++ b/buildrunner/config.py @@ -34,7 +34,7 @@ load_config, ) -from buildrunner.validation.config_model import validate_config +from buildrunner.validation.config import validate_config from . import fetch diff --git a/buildrunner/docker/multiplatform_image_builder.py b/buildrunner/docker/multiplatform_image_builder.py index e98def38..c3f255fe 100644 --- a/buildrunner/docker/multiplatform_image_builder.py +++ b/buildrunner/docker/multiplatform_image_builder.py @@ -17,7 +17,7 @@ from buildrunner.docker import get_dockerfile -LOGGER = logging.getLogger(__name__) +logger = logging.getLogger(__name__) class ImageInfo: @@ -139,14 +139,14 @@ def __exit__(self, exc_type, exc_value, traceback): for name, images in self._intermediate_built_images.items(): for image in images: for tag in image.tags: - LOGGER.debug(f"Removing image {image.repo}:{tag} for {name}") + logger.debug(f"Removing image {image.repo}:{tag} for {name}") docker.image.remove(f"{image.repo}:{tag}", force=True) # Removes all tagged images if keep_images is False if self._tagged_images_names and not self._keep_images: for name, images in self._tagged_images_names.items(): for image in images: - LOGGER.debug(f"Removing tagged image {image} for {name}") + logger.debug(f"Removing tagged image {image} for {name}") docker.image.remove(image, force=True) @property @@ -180,7 +180,7 @@ def _start_local_registry(self): str: The name of the registry container """ if not self._local_registry_is_running: - LOGGER.debug("Starting local docker registry") + logger.debug("Starting local docker registry") container = docker.run("registry", detach=True, publish_all=True) ports = container.network_settings.ports @@ -194,23 +194,23 @@ def _start_local_registry(self): self._registry_info = RegistryInfo(container.name, "localhost", ports.get("5000/tcp")[0].get("HostPort")) self._local_registry_is_running = True - LOGGER.debug(f"Started local registry {self._registry_info}") + logger.debug(f"Started local registry {self._registry_info}") else: - LOGGER.warning("Local registry is already running") + logger.warning("Local registry is already running") def _stop_local_registry(self): """ Stops and removes the local registry along with any images """ if self._local_registry_is_running: - LOGGER.debug(f"Stopping and removing local registry {self._registry_info}") + logger.debug(f"Stopping and removing local registry {self._registry_info}") try: docker.remove(self._registry_info.name, volumes=True, force=True) except python_on_whales.exceptions.NoSuchContainer as err: - LOGGER.error(f"Failed to stop and remove local registry {self._registry_info.name}: {err}") + logger.error(f"Failed to stop and remove local registry {self._registry_info.name}: {err}") self._local_registry_is_running = False else: - LOGGER.warning("Local registry is not running when attempting to stop it") + logger.warning("Local registry is not running when attempting to stop it") # pylint: disable=too-many-arguments @retry(python_on_whales.exceptions.DockerException, tries=5, delay=1) @@ -248,7 +248,7 @@ def _build_single_image(self, f"'{file}'({os.path.exists(f'{file}')}) does not exist!" tagged_names = [f"{name}:{tag}" for tag in tags] - LOGGER.debug(f"Building {tagged_names} for {platform}") + logger.debug(f"Building {tagged_names} for {platform}") # Build the image with the specified tags image = docker.buildx.build(path, @@ -270,9 +270,9 @@ def _build_single_image(self, try: docker.image.remove(images, force=True) except python_on_whales.exceptions.DockerException as err: - LOGGER.warning(f"Failed to remove {images}: {err}") + logger.warning(f"Failed to remove {images}: {err}") except python_on_whales.exceptions.DockerException as err: - LOGGER.error(f"Failed to build {tag_name}: {err}") + logger.error(f"Failed to build {tag_name}: {err}") raise err built_images.append(ImageInfo(repo=name, @@ -321,7 +321,7 @@ def build_multiple_images(self, dockerfile, cleanup_dockerfile = get_dockerfile(file) - LOGGER.debug(f"Building {name}:{tags} for platforms {platforms} from {dockerfile}") + logger.debug(f"Building {name}:{tags} for platforms {platforms} from {dockerfile}") if self._use_local_registry and not self._local_registry_is_running: # Starts local registry container to do ephemeral image storage @@ -341,7 +341,7 @@ def build_multiple_images(self, processes = [] for platform in platforms: curr_name = f"{base_image_name}-{platform.replace('/', '-')}" - LOGGER.debug(f"Building {curr_name} for {platform}") + logger.debug(f"Building {curr_name} for {platform}") if do_multiprocessing: processes.append(Process(target=self._build_single_image, args=(curr_name, @@ -412,7 +412,7 @@ def push(self, name: str, dest_names: List[str] = None) -> None: timeout_seconds = initial_timeout_seconds while retries > 0: retries -= 1 - LOGGER.debug(f"Creating manifest list {name} with timeout {timeout_seconds} seconds") + logger.debug(f"Creating manifest list {name} with timeout {timeout_seconds} seconds") curr_process = Process(target=docker.buildx.imagetools.create, kwargs={"sources": src_names, "tags": tagged_names}) curr_process.start() @@ -424,7 +424,7 @@ def push(self, name: str, dest_names: List[str] = None) -> None: f" and {timeout_seconds} seconds each try") else: # Process finished within timeout - LOGGER.info(f"Successfully created multiplatform images {dest_names}") + logger.info(f"Successfully created multiplatform images {dest_names}") break timeout_seconds += timeout_step_seconds @@ -444,7 +444,7 @@ def _find_native_platform_images(self, name: str) -> str: """ host_os = system() host_arch = machine() - LOGGER.debug(f"Finding native platform for {name} for {host_os}/{host_arch}") + logger.debug(f"Finding native platform for {name} for {host_os}/{host_arch}") pattern = f"{host_os}-{host_arch}" # No images built for this name @@ -481,7 +481,7 @@ def tag_single_platform(self, name: str, tags: List[str] = None, dest_name: str # This is to handle pylint's "dangerous-default-value" error if tags is None: tags = ["latest"] - LOGGER.debug(f"Tagging {name} with tags {tags} - Dest name: {dest_name}") + logger.debug(f"Tagging {name} with tags {tags} - Dest name: {dest_name}") source_image = self._find_native_platform_images(name) if dest_name is None: dest_name = name @@ -497,11 +497,11 @@ def tag_single_platform(self, name: str, tags: List[str] = None, dest_name: str for tag in tags: dest_tag = f"{dest_name}:{tag}" docker.tag(image, dest_tag) - LOGGER.debug(f"Tagged {image} as {dest_tag}") + logger.debug(f"Tagged {image} as {dest_tag}") self._tagged_images_names[name].append(dest_tag) docker.image.remove(image, force=True) except python_on_whales.exceptions.DockerException as err: - LOGGER.error(f"Failed while tagging {dest_name}: {err}") + logger.error(f"Failed while tagging {dest_name}: {err}") raise err def get_built_images(self, name: str) -> List[str]: diff --git a/buildrunner/validation/config_model.py b/buildrunner/validation/config.py similarity index 71% rename from buildrunner/validation/config_model.py rename to buildrunner/validation/config.py index 92d5879e..16ff3a6a 100644 --- a/buildrunner/validation/config_model.py +++ b/buildrunner/validation/config.py @@ -9,68 +9,45 @@ from typing import Dict, List, Optional, Set, Union # pylint: disable=no-name-in-module -from pydantic import BaseModel, validator, ValidationError +from pydantic import BaseModel, Field, validator, ValidationError +from buildrunner.validation.errors import Errors, get_validation_errors +from buildrunner.validation.step import Step, StepPushCommitDict -class Errors: - """ Error class for storing validation errors """ - class Error: - """ Error class for storing validation error """ - def __init__(self, field: str, message: str): - self.field: str = field - self.message: Union[str, None] = message - def __init__(self): - self.errors = [] - - def add(self, field: str, message: str): - """ Add an error """ - self.errors.append(self.Error(field, message)) - - def count(self): - """ Return the number of errors """ - return len(self.errors) - - def __str__(self): - return '\n'.join([f' {error.field}: {error.message}' for error in self.errors]) - - def __repr__(self): - return self.__str__() - - -class StepBuild(BaseModel): - """ Build model within a step """ - path: Optional[str] - dockerfile: Optional[str] - pull: Optional[bool] - platform: Optional[str] - platforms: Optional[List[str]] - - -class StepPushDict(BaseModel): - """ Push model within a step """ - repository: str - tags: Optional[List[str]] - - -class Step(BaseModel): - """ Step model """ - build: Optional[Union[StepBuild, str]] - push: Optional[Union[StepPushDict, List[Union[str, StepPushDict]], str]] - - def is_multi_platform(self): - """ - Check if the step is a multi-platform build step - """ - return isinstance(self.build, StepBuild) and \ - self.build.platforms is not None +class Config(BaseModel, extra='forbid'): + """ Top level config model """ + # Unclear if this is actively used + class GithubModel(BaseModel, extra='forbid'): + """ Github model """ + endpoint: str + version: str + username: str + app_token: str + + class SSHKey(BaseModel, extra='forbid'): + """ SSH key model """ + file: Optional[str] + key: Optional[str] + password: Optional[str] + prompt_password: Optional[bool] = Field(alias='prompt-password') + aliases: Optional[List[str]] -class Config(BaseModel): - """ Top level config model """ version: Optional[float] steps: Optional[Dict[str, Step]] + github: Optional[Dict[str, GithubModel]] + # Global config attributes + env: Optional[Dict[str, str]] + build_servers: Optional[Dict[str, Union[str, List[str]]]] = Field(alias='build-servers') + # Intentionally has loose restrictions on ssh-keys since documentation isn't clear + ssh_keys: Optional[Union[SSHKey, List[SSHKey]]] = Field(alias='ssh-keys') + local_files: Optional[Dict[str, str]] = Field(alias='local-files') + caches_root: Optional[str] = Field(alias='caches-root') + docker_registry: Optional[str] = Field(alias='docker-registry') + temp_dir: Optional[str] = Field(alias='temp-dir') + # Note this is pydantic version 1.10 syntax @validator('steps') @classmethod @@ -82,7 +59,7 @@ def validate_steps(cls, values) -> None: ValueError | pydantic.ValidationError : If the config file is invalid """ - def validate_push(push: Union[StepPushDict, List[Union[str, StepPushDict]], str], + def validate_push(push: Union[StepPushCommitDict, str, List[Union[str, StepPushCommitDict]]], mp_push_tags: Set[str], step_name: str, update_mp_push_tags: bool = True): @@ -107,7 +84,7 @@ def validate_push(push: Union[StepPushDict, List[Union[str, StepPushDict]], str] if ":" not in name: name = f'{name}:latest' - if isinstance(push, StepPushDict): + if isinstance(push, StepPushCommitDict): names = [f"{push.repository}:{tag}" for tag in push.tags] if names is not None: @@ -169,14 +146,6 @@ def validate_multi_platform_build(mp_push_tags: Set[str]): return values -def _add_validation_errors(exc: ValidationError) -> Errors: - errors = Errors() - for error in exc.errors(): - loc = [str(item) for item in error["loc"]] - errors.add(field='.'.join(loc), message=f'{error["msg"]} ({error["type"]})') - return errors - - def validate_config(**kwargs) -> Errors: """ Check if the config file is valid @@ -188,5 +157,5 @@ def validate_config(**kwargs) -> Errors: try: Config(**kwargs) except ValidationError as exc: - errors = _add_validation_errors(exc) + errors = get_validation_errors(exc) return errors diff --git a/buildrunner/validation/errors.py b/buildrunner/validation/errors.py new file mode 100644 index 00000000..b5c9815d --- /dev/null +++ b/buildrunner/validation/errors.py @@ -0,0 +1,48 @@ +""" +Copyright 2023 Adobe +All Rights Reserved. + +NOTICE: Adobe permits you to use, modify, and distribute this file in accordance +with the terms of the Adobe license agreement accompanying it. +""" +from typing import Union + +from pydantic import ValidationError + + +class Errors: + """ Error class for storing validation errors """ + class Error: + """ Error class for storing validation error """ + def __init__(self, field: str, message: str): + self.field: str = field + self.message: Union[str, None] = message + + def __init__(self): + self.errors = [] + + def add(self, field: str, message: str): + """ Add an error """ + self.errors.append(self.Error(field, message)) + + def count(self): + """ Return the number of errors """ + return len(self.errors) + + def __str__(self): + return '\n'.join([f' {error.field}: {error.message}' for error in self.errors]) + + def __repr__(self): + return self.__str__() + + +def get_validation_errors(exc: ValidationError) -> Errors: + """ Get validation errors to an Errors object """ + errors = Errors() + for error in exc.errors(): + loc = [str(item) for item in error["loc"]] + if error["type"] == "value_error.extra": + errors.add(field='.'.join(loc), message='not a valid field, please check the spelling and documentation') + else: + errors.add(field='.'.join(loc), message=f'{error["msg"]} ({error["type"]})') + return errors diff --git a/buildrunner/validation/step.py b/buildrunner/validation/step.py new file mode 100644 index 00000000..f16e29ef --- /dev/null +++ b/buildrunner/validation/step.py @@ -0,0 +1,123 @@ +""" +Copyright 2023 Adobe +All Rights Reserved. + +NOTICE: Adobe permits you to use, modify, and distribute this file in accordance +with the terms of the Adobe license agreement accompanying it. +""" + +from typing import Any, Dict, List, Optional, Union + +# pylint: disable=no-name-in-module +from pydantic import BaseModel, Field + + +class StepPypiPush(BaseModel, extra='forbid'): + """ Step pypi push model""" + repository: str + username: str + password: str + + +class Artifact(BaseModel): + """ Artifact model """ + # Intentionally loose restrictions + format: Optional[str] + type: Optional[Any] + compression: Optional[str] + push: Optional[bool] + + +class StepBuild(BaseModel, extra='forbid'): + """ Build model within a step """ + path: Optional[str] + dockerfile: Optional[str] + pull: Optional[bool] + platform: Optional[str] + platforms: Optional[List[str]] + inject: Optional[Dict[str, Optional[str]]] + no_cache: Optional[bool] = Field(alias='no-cache') + buildargs: Optional[Dict[str, Any]] + + +class RunAndServicesBase(BaseModel): + """ + Base model for Run and Service + which has several common fields + """ + image: Optional[str] + cmd: Optional[str] + # Intentionally loose restrictions + provisioners: Optional[Dict[str, str]] + shell: Optional[str] + cwd: Optional[str] + user: Optional[str] + hostname: Optional[str] + dns: Optional[List[str]] + dns_search: Optional[str] + extra_hosts: Optional[Dict[str, str]] + env: Optional[Dict[str, str]] + files: Optional[Dict[str, str]] + volumes_from: Optional[List[str]] + ports: Optional[Dict[str, str]] + pull: Optional[bool] + systemd: Optional[bool] + containers: Optional[List[str]] + + +class Service(RunAndServicesBase, extra='forbid'): + """ Service model """ + build: Optional[Union[StepBuild, str]] + dns_search: Optional[str] = Field(alias='dns-search') + wait_for: Optional[List[Any]] + inject_ssh_agent: Optional[bool] = Field(alias='inject-ssh-agent') + # Not sure if this is valid, but it is in a test file + # Didn't use StepRun because of the potential to have a infinitely nested model + run: Optional[Any] + + +class StepRun(RunAndServicesBase, extra='forbid'): + """ Run model within a step """ + xfail: Optional[bool] + services: Optional[Dict[str, Service]] + cmds: Optional[List[str]] + caches: Optional[Dict[str, Union[str, List[str]]]] + ssh_keys: Optional[List[str]] = Field(alias='ssh-keys') + artifacts: Optional[Dict[str, Optional[Artifact]]] + platform: Optional[str] + cap_add: Optional[Union[str, List[str]]] + privileged: Optional[bool] + post_build: Optional[Union[str, Dict[str, Any]]] = Field(alias='post-build') + no_cache: Optional[bool] = Field(alias='no-cache') + + +class StepRemote(BaseModel, extra='forbid'): + """ Remote model within a step """ + # Not sure if host is optional or required + host: Optional[str] + cmd: str + artifacts: Optional[Dict[str, Union[Artifact, None]]] + + +class StepPushCommitDict(BaseModel, extra='forbid'): + """ Push model within a step """ + repository: str + tags: Optional[List[str]] + + +class Step(BaseModel, extra='forbid'): + """ Step model """ + build: Optional[Union[StepBuild, str]] + push: Optional[Union[StepPushCommitDict, List[Union[str, StepPushCommitDict]], str]] + commit: Optional[Union[StepPushCommitDict, List[Union[str, StepPushCommitDict]], str]] + remote: Optional[StepRemote] + run: Optional[StepRun] + depends: Optional[List[str]] + pypi_push: Optional[Union[StepPypiPush, str]] = Field(alias='pypi-push') + + def is_multi_platform(self): + """ + Check if the step is a multi-platform build step + """ + return isinstance(self.build, StepBuild) and \ + self.build.platforms is not None diff --git a/tests/test-files/test-general.yaml b/tests/test-files/test-general.yaml index a1bad244..536faf5c 100644 --- a/tests/test-files/test-general.yaml +++ b/tests/test-files/test-general.yaml @@ -1,5 +1,3 @@ -tags: &tags ['tag1', 'tag2'] - steps: test: @@ -49,7 +47,7 @@ steps: rabbitmq: image: {{ DOCKER_REGISTRY }}/rabbitmq:3.4 hostname: rabbitmq - environment: + env: RABBITMQ_NODENAME: test-rabbitmq web: build: tests/webservicecontainer @@ -112,7 +110,7 @@ steps: 'tests/postbuildpath/Dockerfile': '/' push: repository: adobe/hello-inline - tags: *tags + tags: [latest] archive-container-config: run: diff --git a/tests/test_config_validation.py b/tests/test_config_validation.py deleted file mode 100644 index 3db47daa..00000000 --- a/tests/test_config_validation.py +++ /dev/null @@ -1,350 +0,0 @@ - -from buildrunner.validation.config_model import validate_config, Errors - - -def test_valid_version_config(): - # Invalid version - config = { - 'version': 'string' - } - errors = validate_config(**config) - assert isinstance(errors, Errors) - assert errors.count() == 1 - - # Valid version - config = { - 'version': 2.0, - 'steps': { - } - } - errors = validate_config(**config) - assert errors is None - - # Optional version - config = { - 'steps': { - } - } - errors = validate_config(**config) - assert errors is None - - -def test_platform_and_platforms_invalid(): - # Invalid to have platform and platforms - config = { - 'steps': { - 'build-container-multi-platform': { - 'build': { - 'path': '.', - 'dockerfile': 'Dockerfile', - 'pull': False, - 'platform': 'linux/amd64', - 'platforms': [ - 'linux/amd64', - 'linux/arm64', - ], - }, - 'push': { - 'repository': 'mytest-reg/buildrunner-test-multi-platform', - 'tags': [ 'latest' ], - }, - }, - } - } - errors = validate_config(**config) - assert isinstance(errors, Errors) - assert errors.count() == 1 - - -def test_platforms_invalid(): - # Invalid to have platforms as a string, it should be a list - config = { - 'steps': { - 'build-container-multi-platform': { - 'build': { - 'path': '.', - 'dockerfile': 'Dockerfile', - 'pull': False, - 'platforms': 'linux/amd64', - }, - 'push': { - 'repository': 'mytest-reg/buildrunner-test-multi-platform', - 'tags': [ 'latest' ], - }, - }, - } - } - errors = validate_config(**config) - assert isinstance(errors, Errors) - assert errors.count() == 2 - - -def test_build_is_path(): - config = { - 'steps': { - 'build-is-path': { - 'build': '.', - }, - } - } - errors = validate_config(**config) - assert errors is None - - -def test_valid_platforms(): - config = { - 'steps': { - 'build-container-multi-platform': { - 'build': { - 'path': '.', - 'dockerfile': 'Dockerfile', - 'pull': False, - 'platforms': [ - 'linux/amd64', - 'linux/arm64', - ], - }, - 'push': { - 'repository': 'mytest-reg/buildrunner-test-multi-platform', - 'tags': [ 'latest' ], - }, - }, - } - } - errors = validate_config(**config) - assert errors is None - - -def test_duplicate_mp_tags_dictionary_invalid(): - # Invalid to have duplicate multi-platform tag - # Testing with both dictionary format - config = { - 'steps': { - 'build-container-multi-platform1': { - 'build': { - 'platforms': [ - 'linux/amd64', - 'linux/arm64', - ], - }, - 'push': { - 'repository': 'mytest-reg/buildrunner-test-multi-platform', - 'tags': [ 'latest' ], - }, - }, - 'build-container-multi-platform2': { - 'build': { - 'platforms': [ - 'linux/amd64', - 'linux/arm64', - ], - }, - 'push': { - 'repository': 'mytest-reg/buildrunner-test-multi-platform', - 'tags': [ 'latest' ], - }, - }, - } - } - errors = validate_config(**config) - assert isinstance(errors, Errors) - assert errors.count() == 1 - - -def test_duplicate_mp_tags_strings_invalid(): - # Invalid to have duplicate multi-platform tag - # Testing with both string format, one inferred 'latest' the other explicit 'latest' - config = { - 'steps': { - 'build-container-multi-platform1': { - 'build': { - 'platforms': [ - 'linux/amd64', - 'linux/arm64', - ], - }, - # No tag is given so 'latest' is assumed - 'push': 'mytest-reg/buildrunner-test-multi-platform', - }, - 'build-container-multi-platform2': { - 'build': { - 'platforms': [ - 'linux/amd64', - 'linux/arm64', - ], - }, - 'push': 'mytest-reg/buildrunner-test-multi-platform:latest', - }, - } - } - errors = validate_config(**config) - assert isinstance(errors, Errors) - assert errors.count() == 1 - - # Indentical tags in same string format - config = { - 'steps': { - 'build-container-multi-platform1': { - 'build': { - 'platforms': [ - 'linux/amd64', - 'linux/arm64', - ], - }, - 'push': 'mytest-reg/buildrunner-test-multi-platform:latest', - }, - 'build-container-multi-platform2': { - 'build': { - 'platforms': [ - 'linux/amd64', - 'linux/arm64', - ], - }, - 'push': 'mytest-reg/buildrunner-test-multi-platform:latest', - }, - } - } - errors = validate_config(**config) - assert isinstance(errors, Errors) - assert errors.count() == 1 - - -def test_duplicate_mp_tags_strings_valid(): - # Same string format but different MP tags - config = { - 'steps': { - 'build-container-multi-platform1': { - 'build': { - 'platforms': [ - 'linux/amd64', - 'linux/arm64', - ], - }, - 'push': 'mytest-reg/buildrunner-test-multi-platform:latest', - }, - 'build-container-multi-platform2': { - 'build': { - 'platforms': [ - 'linux/amd64', - 'linux/arm64', - ], - }, - 'push': 'mytest-reg/buildrunner-test-multi-platform:not-latest', - }, - } - } - errors = validate_config(**config) - assert errors is None - - -def test_duplicate_mp_tags_platform_platforms_invalid(): - # Invalid to have duplicate multi-platform tag and single platform tag - config = { - 'steps': { - 'build-container-multi-platform1': { - 'build': { - 'platforms': [ - 'linux/amd64', - 'linux/arm64', - ], - }, - 'push': 'mytest-reg/buildrunner-test-multi-platform:latest', - }, - 'build-container-single-platform': { - 'build': { - 'platform': 'linux/arm64' - }, - 'push': 'mytest-reg/buildrunner-test-multi-platform:latest', - }, - } - } - errors = validate_config(**config) - assert isinstance(errors, Errors) - assert errors.count() == 1 - - -def test_valid_config(): - # Sample valid config, but not exhaustive - config = { - 'version': 2.0, - 'steps': { - 'build-container-single-platform1': { - 'build': { - 'path': '.', - 'dockerfile': 'Dockerfile', - 'pull': False, - 'platform': 'linux/amd64', - }, - 'push': { - 'repository': 'mytest-reg/buildrunner-test', - 'tags': [ 'latest' ], - }, - }, - 'build-container-multi-platform2': { - 'build': { - 'path': '.', - 'dockerfile': 'Dockerfile', - 'pull': False, - 'platforms': [ - 'linux/amd64', - 'linux/arm64', - ], - }, - 'push': { - 'repository': 'mytest-reg/buildrunner-test-multi-platform', - 'tags': [ 'latest' ], - }, - }, - 'build-container-multi-platform-push3': { - 'build': { - 'path': '.', - 'dockerfile': 'Dockerfile', - 'pull': False, - 'platforms': [ - 'linux/amd64', - 'linux/arm64', - ], - }, - 'push': [ - 'myimages/image1', - { - 'repository': 'myimages/image2', - 'tags': [ 'latest' ], - } - ], - }, - } - } - - errors = validate_config(**config) - assert errors is None - - -def test_multiple_errors(): - # Multiple errors - # Invalid to have version as a string - # Invalid to have platforms and platform - config = { - 'version': 'string', - 'steps': { - 'build-container-multi-platform': { - 'build': { - 'path': '.', - 'dockerfile': 'Dockerfile', - 'pull': False, - 'platform': 'linux/amd64', - 'platforms': [ - 'linux/amd64', - 'linux/arm64', - ], - }, - 'push': { - 'repository': 'mytest-reg/buildrunner-test-multi-platform', - 'tags': [ 'latest' ], - }, - }, - } - } - errors = validate_config(**config) - assert isinstance(errors, Errors) - assert errors.count() == 2 diff --git a/tests/test_config_validation/__init__.py b/tests/test_config_validation/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/test_config_validation/test_global_config.py b/tests/test_config_validation/test_global_config.py new file mode 100644 index 00000000..8c944a6c --- /dev/null +++ b/tests/test_config_validation/test_global_config.py @@ -0,0 +1,107 @@ + +import yaml + +from buildrunner.validation.config import validate_config +from buildrunner.validation.errors import Errors + + +def test_global_config(): + config_yaml = """ + # The 'env' global configuration may be used to set environment variables + # available to all buildrunner runs that load this config file. Env vars do + # not need to begin with a prefix to be included in this list (i.e. + # BUILDRUNNER_ prefix is not needed for variables listed here). + env: + ENV_VAR1: 'value1' + # Values must always be strings + ENV_VAR2: 'true' + + # The 'build-servers' global configuration consists of a map where each key + # is a server user@host string and the value is a list of host aliases that + # map to the server. This allows builders to configure Buildrunner to talk to + # specific servers within their environment on a project by project basis. + build-servers: + user@host: + - alias1 + - alias2 + + # The 'ssh-keys' global configuration is a list of ssh key configurations. + # The file attribute specifies the path to a local ssh private key. The key + # attribute provides a ASCII-armored private key. Only one or the other is + # required. If the private key is password protected the password attribute + # specifies the password. The alias attribute is a list of aliases assigned + # to the given key (see the "ssh-keys" configuration example of the "run" + # step attribute below). + ssh-keys: + # - file: /path/to/ssh/private/key.pem + # + key: | + -----INLINE KEY----- + ... + password: + # If set, prompt for the ssh key password. Ignored if password is set. + prompt-password: True + aliases: + - 'my-github-key' + + # The "local-files" global configuration consists of a map where each key + # is a file alias and the value is either the path where the file resides on + # the local server OR the contents of the file. See the "local-files" + # configuration example of the "run" step attribute below. Entries in the + # master global configuration may specify any "local-files" alias while + # user configuration files may only specify "local-files" aliases that + # are in the user's home directory or a path owned by the user. Home + # directory expansions (e.g. ``~``, ``~/foo``, ``~username`` and + # ``~username/foo``) are honored. The ``~`` and ``~/foo`` cases will map + # to the home directory of the user executing buildrunner. + # NOTE: remember to quote ``~`` in YAML files! + local-files: + digitalmarketing.mvn.settings: '~/.m2/settings.xml' + some.other.file.alias: | + The contents of the file... + + # The 'caches-root' global configuration specifies the directory to use for + # build caches. The default directory is ~/.buildrunner/caches. + caches-root: ~/.buildrunner/caches + + # Change the default docker registry, see the FAQ below for more information + docker-registry: docker-mirror.example.com + + # Change the temp directory used for *most* files + # Setting the TMP, TMPDIR, or TEMP env vars should do the same thing, + # but on some systems it may be necessary to use this instead. + temp-dir: /my/tmp/dir + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert errors is None + + +def test_global_config_ssh_key_file(): + config_yaml = """ + ssh-keys: + - file: /path/to/ssh/private/key.pem + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert errors is None + + +def test_global_config_ssh_invalid(): + config_yaml = """ + ssh-keys: + key: | + -----INLINE KEY----- + ... + password: + # If set, prompt for the ssh key password. Ignored if password is set. + prompt-password: True + aliases: + - 'my-github-key' + bogus-attribute: 'bogus' + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + print(errors) + assert isinstance(errors, Errors) + assert errors.count() > 0 diff --git a/tests/test_config_validation/test_validation_artifacts.py b/tests/test_config_validation/test_validation_artifacts.py new file mode 100644 index 00000000..665395e7 --- /dev/null +++ b/tests/test_config_validation/test_validation_artifacts.py @@ -0,0 +1,141 @@ + +import yaml +from buildrunner.validation.config import validate_config, Errors + + +def test_step_remote_artifacts_valid(): + config_yaml = """ + steps: + build-remote: + remote: + host: myserver.ut1 + cmd: docker build -t mytest-reg/buildrunner-test . + artifacts: + bogus/path/to/artifacts/*: + type: tar + compression: lzma + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert errors is None + + +def test_step_run_artifacts_valid(): + config_yaml = """ + steps: + build-run: + run: + artifacts: + bogus/path/to/artifacts/*: + type: zip + compression: lzma + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + print(errors) + assert errors is None + + +def test_step_artifacts_valid_compression(): + config_yaml = """ + steps: + build-remote: + remote: + host: myserver.ut1 + cmd: docker build -t mytest-reg/buildrunner-test . + artifacts: + bogus/path/to/artifacts/*: + type: tar + compression: gz + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert errors is None + + +def test_step_run_format_valid(): + config_yaml = """ + steps: + build-run: + run: + artifacts: + bogus/path/to/artifacts/*: + format: uncompressed + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert errors is None + + +def test_step_type_valid(): + # Checks zip type + config_yaml = """ + steps: + build-run: + run: + artifacts: + bogus/path/to/artifacts/*: + type: zip + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert errors is None + + # Checks tar type + config_yaml = """ + steps: + build-run: + run: + artifacts: + bogus/path/to/artifacts/*: + type: tar + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert errors is None + + +def test_push_invalid(): + config_yaml = """ + steps: + build-run: + run: + artifacts: + bogus/path/to/artifacts/*: + push: bogus + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert isinstance(errors, Errors) + assert errors.count() == 1 + + +def test_push_valid(): + config_yaml = """ + steps: + build-run: + run: + artifacts: + bogus/path/to/artifacts/*: + push: True + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert errors is None + + +def test_valid_extra_properties(): + config_yaml = """ + steps: + build-run: + run: + artifacts: + bogus/path/to/artifacts/*: + push: True + something_else: awesome data + something_else2: True + something_else3: 123 + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert errors is None diff --git a/tests/test_config_validation/test_validation_config.py b/tests/test_config_validation/test_validation_config.py new file mode 100644 index 00000000..0ee89629 --- /dev/null +++ b/tests/test_config_validation/test_validation_config.py @@ -0,0 +1,522 @@ + +import yaml +from buildrunner.validation.config import validate_config, Errors + + +def test_valid_version_config(): + # Invalid version + config = { + 'version': 'string' + } + errors = validate_config(**config) + assert isinstance(errors, Errors) + assert errors.count() == 1 + + # Valid version + config = { + 'version': 2.0, + 'steps': { + } + } + errors = validate_config(**config) + assert errors is None + + # Optional version + config = { + 'steps': { + } + } + errors = validate_config(**config) + assert errors is None + + +def test_valid_config(): + # Sample valid config, but not exhaustive + config_yaml = """ + version: 2.0 + steps: + build-container-single-platform1: + build: + path: . + dockerfile: Dockerfile + pull: false + platform: linux/amd64 + push: + repository: mytest-reg/buildrunner-test + tags: + - latest + build-container-multi-platform2: + build: + path: . + dockerfile: Dockerfile + pull: false + platforms: + - linux/amd64 + - linux/arm64 + push: + repository: mytest-reg/buildrunner-test-multi-platform + tags: + - latest + build-container-multi-platform-push3: + build: + path: . + dockerfile: Dockerfile + pull: false + platforms: + - linux/amd64 + - linux/arm64 + push: + - myimages/image1 + - repository: myimages/image2 + tags: + - latest + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert errors is None + + +def test_multiple_errors(): + # Multiple errors + # Invalid to have version as a string + # Invalid to have platforms and platform + config_yaml = """ + version: string + steps: + build-container-multi-platform: + build: + path: . + dockerfile: Dockerfile + pull: false + platform: linux/amd64 + platforms: + - linux/amd64 + - linux/arm64 + push: + repository: mytest-reg/buildrunner-test-multi-platform + tags: + - latest + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert isinstance(errors, Errors) + assert errors.count() == 2 + + +def test_doc_confg(): + # Tests the documentation example with minimal changes to make valid yaml + config_yaml = """ + version: 2.0 + steps: + my-build-step: + # Optional step dependency definition to specify which steps need to be processed before this step. + # The `version` must be present and set to `2.0` or higher for buildrunner to utilize the step dependencies list. + # An buildrunner error will occur if `depends` is present but `version` is missing or value is lower than `2.0`. + depends: + - test-step + - validation-step + + run: + # xfail indicates whether the run operation is expected to fail. The + # default is false - the operation is expected to succeed. If xfail + # is true and the operation succeeds then it will result in a failure. + xfail: True + + # A map of additional containers that should be created and linked to + # the primary run container. These can be used to bring up services + # (such as databases) that are required to run the step. More details + # on services below. + services: + service-name-1: + image: + service-name-2: + cmd: + + # The Docker image to run. If empty the image created with the 'build' + # attribute will be used. + image: + + # The command(s) to run. If omitted Buildrunner runs the command + # configured in the Docker image without modification. If provided + # Buildrunner always sets the container command to a shell, running the + # given command here within the shell. If both 'cmd' and 'cmds' are + # present the command in 'cmd' is run before the commands in the 'cmds' + # list are run. + cmd: + cmds: + - + - + + # A collection of provisioners to run. Provisioners work similar to the + # way Packer provisioners do and are always run within a shell. + # When a provisioner is specified Buildrunner always sets the container + # command to a shell, running the provisioners within the shell. + # Currently Buildrunner supports shell and salt provisioners. + provisioners: + shell: path/to/script.sh | [path/to/script.sh, ARG1, ...] + salt: + + # The shell to use when specifying the cmd or provisioners attributes. + # Defaults to /bin/sh. If the cmd and provisioners attributes are not + # specified this setting has no effect. + shell: /bin/sh + + # The directory to run commands within. Defaults to /source. + cwd: /source + + # The user to run commands as. Defaults to the user specified in the + # Docker image. + user: + + # The hostname assigned to the run container. + hostname: + + # Custom dns servers to use in the run container. + dns: + - 8.8.8.8 + - 8.8.4.4 + + # A custom dns search path to use in the run container. + dns_search: mydomain.com + + # Add entries to the hosts file + # The keys are the hostnames. The values can be either + # ip addresses or references to service containers. + extra_hosts: + "www1.test.com": "192.168.0.1" + "www2.test.com": "192.168.0.2" + + # A map specifying additional environment variables to be injected into + # the container. Keys are the variable names and values are variable + # values. + env: + ENV_VARIABLE_ONE: value1 + ENV_VARIABLE_TWO: value2 + + # A map specifying files that should be injected into the container. + # The map key is the alias referencing a given file (as configured in + # the "local-files" section of the global configuration file) or a + # relative path to a file/directory in the build directory. The value + # is the path the given file should be mounted at within the container. + files: + namespaced.file.alias1: "/path/to/readonly/file/or/dir" + namespaced.file.alias2: "/path/to/readwrite/file/or/dir:rw" + build/dir/file: "/path/to/build/dir/file" + + # A map specifying cache directories that are stored as archive files on the + # host system as `local cache key` and extracted as a directory in + # the container named `docker path`. The cache directories are maintained + # between builds and can be used to store files, such as downloaded + # dependencies, to speed up builds. + # Caches can be shared between any builds or projects on the system + # as the names are not prefixed with any project-specific information. + # Caches should be treated as ephemeral and should only store items + # that can be obtained/generated by subsequent builds. + # + # Two formats are supported when defining caches. + # 1) RECOMMENDED + # : + # - + # - + # + # Restore Cache: + # This format allows for prefix matching. The order of the list dictates the + # order which should be searched in the local system cache location. + # When an item isn't found it will search for archive files which prefix matches + # the item in the list. If more than one archive file is matched for a prefix + # the archive file most recently modified will be used. If there is no + # matching archive file then nothing will be restored in the docker container. + # + # Save Cache: + # The first local cache key in the list is used for the name of the local + # cache archive file. + # + # 2) : (backwards compatible with older caching method, but more limited) + # + caches: + # Recommended format. + : + - + - + + "/root/.m2/repository": + # Buildrunner will look for a cache that matches this cache key/prefix, + # typically the first key should be the most specific as it is the closest match + # Note that this first key will also be used to save the cache for use across builds or projects + - m2repo-{{ checksum("pom.xml", "subproj/pom.xml") }} + # If the first cache key is not found in the caches, use this prefix to look for a cache that may not + # be an exact match, but may still be close and not require as much downloading of dependencies, etc + # Note that this may match across any cache done by any build on the same system, so it may be wise to + # use a unique prefix for any number of builds that have a similar dependency tree, etc + - m2repo- + # If no cache is found, nothing will be extracted and the application will need to rebuild the cache + + # Backwards compatible format. Not recommended for future or updated configurations. + : + maven: "/root/.m2/repository" + + # A map specifying ports to expose, this is only used when the + # --publish-ports parameter is passed to buildrunner + ports: + : + + # A list specifying service containers (see below) whose exposed + # volumes should be mapped into the run container's file system. + # An exposed volume is one created by the volume Dockerfile command. + # See https://docs.docker.com/engine/reference/builder/#volume for more + # details regarding the volume Dockerfile command. + volumes_from: + - my-service-container + + # A list specifying ssh keys that should be injected into the container + # via an ssh agent. The list should specify the ssh key aliases (as + # configured in the "ssh-keys" section of the global configuration + # file) that buildrunner should inject into the container. Buildrunner + # injects the keys by mounting a ssh-agent socket and setting the + # appropriate environment variable, meaning that the private key itself + # is never available inside the container. + ssh-keys: + - my_ssh_key_alias + + # A map specifying the artifacts that should be archived for the step. + # The keys in the map specify glob patterns of files to archive. If a + # value is present it should be a map of additional properties that + # should be added to the build artifacts.json file. The artifacts.json + # file can be used to publish artifacts to another system (such as + # Gauntlet) with the accompanying metadata. By default artifacts will be + # listed in the artifacts.json file; this can be disabled by adding the + # ``push`` property and set it to false. + # + # When archiving *directories* special properties can be set to change + # the behavior of the archiver. Directories by default are archived as + # gzip'ed TARs. The compression can be changed by setting the + # ``compression`` property to one of the below-listed values. The + # archive type can be changed by setting the property ``type:zip``. + # When a zip archive is requested then the ``compression`` property is + # ignored. If the directory tree should be gathered verbatim without + # archiving then the property ``format:uncompressed`` can be used. + # + # NOTE: Artifacts can only be archived from the /source directory using + # a relative path or a full path. Files outside of this directory will + # fail to be archived. + artifacts: + artifacts/to/archive/*: + format: uncompressed + type: tar + compression: gz + push: true + property1: value1 + property2: value2 + + # Whether or not to pull the image from upstream prior to running + # the step. This is almost always desirable, as it ensures the + # most up to date source image. + # NOTE: If the image was created from a 'push' or 'commit' earlier in + # this ``buildrunner.yaml`` then this will default to false + pull: true + + # Specify a different platform architecture when pulling and running images. + # This is useful if you are running an image that was built for a different architecture + # than what buildrunner is running on, such as using a linux/arm64/v8 Apple M1 architecture + # development machine to run or test an image built for linux/amd64 architecture. + platform: linux/amd64 + # + # platform: linux/arm64/v8 # an apple m1 architecture + + # systemd does not play well with docker typically, but you can + # use this setting to tell buildrunner to set the necessary docker + # flags to get systemd to work properly: + # - /usr/sbin/init needs to run as pid 1 + # - /sys/fs/cgroup needs to be mounted as readonly + # (-v /sys/fs/cgroup:/sys/fs/cgroup:ro) + # - The security setting seccomp=unconfined must be set + # (--security-opt seccomp=unconfined) + # If this is ommitted, the image will be inspected for the label + # 'BUILDRUNNER_SYSTEMD'. + # If found, systemd=true will be assumed. + # systemd: true/false + systemd: true + + # Docker supports certain kernel capabilities, like 'SYS_ADMIN'. + # see https://goo.gl/gTQrqW for more infromation on setting these. + cap_add: 'SYS_ADMIN' + # + # cap_add: + # - 'SYS_ADMIN' + # - 'SYS_RAWIO' + + # Docker can run in a privileged mode. This allows access to all devices + # on the host. Using privileged is rare, but there are good use cases + # for this feature. see https://goo.gl/gTQrqW for more infromation on + # setting these. + # Default: false + # privileged: true/false + privileged: true + + # The post-build attribute commits the resulting run container as an + # image and allows additional Docker build processing to occur. This is + # useful for adding Docker configuration, such as EXPOSE and CMD + # instructions, when building an image via the run task that cannot be + # done without running a Docker build. The post-build attribute + # functions the same way as the 'build' step attribute does, except + # that it prepends the committed run container image to the provided + post-build: path/to/build/context + # + # post-build: + # dockerfile: | + # EXPOSE 80 + # CMD /runserver.sh + + # A list of container names or labels created within any run container + # that buildrunner should clean up. (Use if you call + containers: + - container1 + - container2 + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert errors is None + + +def test_full_confg(): + # Minus template areas + config_yaml = """ + steps: + generate_files: + run: + image: docker.company.com/abc-xdm-proto-build:latest + ssh-keys: ["company-github"] + env: + GIT_TOKEN: 'blahblahblahblahblahblah' + cmd: sbt clean generateAwareJsonFiles combineXDM generateProtobufFiles + artifacts: + 'target/protobufFiles/Database*.proto': + 'target/rawJson/Database*.json': + 'target/AwareJson/Aware.json': + 'target/combinedXDM/complete-schema-template.schema.json': + build-dev-rpm: + build: + inject: + "buildrunner.results/generate_files/*.proto": "proto/" + "buildrunner.results/generate_files/A*.json": "json/" + "db_build/dms.repo.centos7": db_build/dms.repo + dockerfile: | + FROM docker-release.dr.corp.company.com/centos-7-x86_64-obuild:latest + ADD db_build/dms.repo /etc/yum.repos.d/dms.repo + RUN rpm --rebuilddb; yum clean all; yum install -y db-omniture-libs-protobuf-2.6.1 db-scds-proto-1.0 db-scds-json-1.0 + ADD proto/*.proto /tmp/proto/ + ADD json/*.json /tmp/json/ + run: + cmds: + - "chown -R httpd:www /source" + - "echo ~ Compiling previous proto version..." + - "mkdir -p /tmp/existingscds && for f in `ls -d /home/omniture/protobuf/scds/*.proto`; do protoc -I=/home/omniture/protobuf --cpp_out /tmp/existingscds $f; done" + - "echo ~ Compiling current proto version..." + artifacts: + # pull the log if rpmbuild fails + "db_tmp/rpm/TMPDIR/*.log": {type: 'log'} + # pull the noarch packages + "db_tmp/rpm/RPMS/noarch/*.noarch.rpm": {platform: 'centos-noarch'} + build-proto-java: + build: + inject: + "buildrunner.results/generate_files/*.proto": "proto" + dockerfile: | + FROM docker.company.com/abc-base-containers/protobuf-builder:java8-2.5.0 + ADD proto/*.proto /tmp/proto/scds/ + run: + caches: + maven: "/root/.m2/repository" + cmds: [ + 'mvn package ${BUILDRUNNER_DO_PUSH+deploy} -am -pl proto-java' + ] + artifacts: + '*/target/*.jar': + download-country: + build: + inject: + "db_build/bin/*": "db_build/bin/" + dockerfile: | + FROM docker-release.dr.corp.company.com/centos-7-x86_64-obuild + ADD db_build/bin/* /tmp/ + run: + cmds: + - '/tmp/download_country.sh' + # strip all quotes + - "sed -i 's/bogus//g' country_codes.csv" + # Add missing ?,? because it's not in the DB + - 'echo "?,?" >> country_codes.csv' + # keep first 2 columns, uppercase 2nd column + - 'awk -F, ''{OFS=","; $2 = toupper($2); {print $1,$2}}'' country_codes.csv > country_code_map.csv' + artifacts: + 'country_code_map.csv': + build-transform-proto-xdm: + build: + inject: + "buildrunner.results/generate_files/*.proto": "proto" + "buildrunner.results/generate_files/*.json": "json" + dockerfile: | + FROM docker.company.com/abc-base-containers/protobuf-builder:java8-2.5.0 + RUN apt-get update && apt-get -y install openssh-client + ADD proto/*.proto /tmp/proto/scds/ + run: + env: + ARTIFACTORY_USER: 'cool_user' + ARTIFACTORY_API_TOKEN: 'blahblahblahblahblahblahblah' + caches: + maven: "/root/.m2/repository" + shell: /bin/bash + cmds: [ + 'cp /tmp/json/raw/*json json/raw', + 'mkdir -p csv', + 'cp /tmp/csv/*csv csv', + 'curl -L https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64 > jq', + 'chmod +x jq', + ] + artifacts: + 'transform-proto-xdm/target/*': + 'transform-proto-xdm-generator/target/*': + 'validator-xdm/target/*': + generate_docs: + run: + image: docker.company.com/abc-xdm-proto-build:latest + ssh-keys: ["company-github"] + env: + GIT_TOKEN: 'blahblahblahblahblahblahblah' + cmd: "sbt clean generateDocs ${BUILDRUNNER_DO_PUSH+publishGHPages}" + artifacts: + 'target/docs/*': + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert errors is None + + +def test_github_config(): + # Valid github config + config_yaml = """ + github: + company_github: + endpoint: 'https://git.company.com/api' + version: 'v3' + username: 'USERNAME' + app_token: 'APP_TOKEN' + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert errors is None + + # Invalid github config + config_yaml = """ + github: + company_github: + endpoint: 'https://git.company.com/api' + version: 'v3' + username: 'USERNAME' + app_token: 'APP_TOKEN' + bogus: 'bogus' + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert isinstance(errors, Errors) + assert errors.count() == 1 diff --git a/tests/test_config_validation/test_validation_run.py b/tests/test_config_validation/test_validation_run.py new file mode 100644 index 00000000..6c51c0a5 --- /dev/null +++ b/tests/test_config_validation/test_validation_run.py @@ -0,0 +1,52 @@ + +from buildrunner.validation.config import validate_config, Errors +import yaml + + +def test_step_run_artifacts_valid(): + config_yaml = """ + steps: + build-run: + run: + image: mytest-reg/buildrunner-test + artifacts: + bogus/path/to/artifacts/*: + type: zip + compression: lzma + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert errors is None + + +def test_push_valid(): + config_yaml = """ + steps: + build-run: + run: + artifacts: + bogus/path/to/artifacts/*: + type: zip + compression: lzma + push: True + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert errors is None + + +def test_push_invalid(): + config_yaml = """ + steps: + build-run: + run: + artifacts: + bogus/path/to/artifacts/*: + type: zip + compression: lzma + push: 1212 + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert isinstance(errors, Errors) + assert errors.count() == 1 diff --git a/tests/test_config_validation/test_validation_step.py b/tests/test_config_validation/test_validation_step.py new file mode 100644 index 00000000..9122562b --- /dev/null +++ b/tests/test_config_validation/test_validation_step.py @@ -0,0 +1,415 @@ + +import yaml +from buildrunner.validation.config import validate_config, Errors + + +def test_platform_and_platforms_invalid(): + # Invalid to have platform and platforms + config_yaml = """ + steps: + build-container-multi-platform: + build: + path: . + dockerfile: Dockerfile + pull: false + platform: linux/amd64 + platforms: + - linux/amd64 + - linux/arm64 + push: + repository: mytest-reg/buildrunner-test-multi-platform + tags: + - latest + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert isinstance(errors, Errors) + assert errors.count() == 1 + + +def test_platforms_invalid(): + # Invalid to have platforms as a string, it should be a list + config_yaml = """ + steps: + build-container-multi-platform: + build: + path: . + dockerfile: Dockerfile + pull: false + platforms: linux/amd64 + push: + repository: mytest-reg/buildrunner-test-multi-platform + tags: + - latest + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert isinstance(errors, Errors) + assert errors.count() == 2 + + +def test_build_is_path(): + config_yaml = """ + steps: + build-is-path: + build: . + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert errors is None + + +def test_valid_platforms(): + config_yaml = """ + steps: + build-container-multi-platform: + build: + path: . + dockerfile: Dockerfile + pull: false + platforms: + - linux/amd64 + - linux/arm64 + push: + repository: mytest-reg/buildrunner-test-multi-platform + tags: + - latest + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert errors is None + + +def test_duplicate_mp_tags_dictionary_invalid(): + # Invalid to have duplicate multi-platform tag + config_yaml = """ + steps: + build-container-multi-platform1: + build: + platforms: + - linux/amd64 + - linux/arm64 + push: + repository: mytest-reg/buildrunner-test-multi-platform + tags: + - latest + build-container-multi-platform2: + build: + platforms: + - linux/amd64 + - linux/arm64 + push: + repository: mytest-reg/buildrunner-test-multi-platform + tags: + - latest + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert isinstance(errors, Errors) + assert errors.count() == 1 + + +def test_duplicate_mp_tags_strings_invalid(): + # Invalid to have duplicate multi-platform tag + # Testing with both string format, one inferred 'latest' the other explicit 'latest' + config_yaml = """ + steps: + build-container-multi-platform1: + build: + platforms: + - linux/amd64 + - linux/arm64 + push: mytest-reg/buildrunner-test-multi-platform + build-container-multi-platform2: + build: + platforms: + - linux/amd64 + - linux/arm64 + push: mytest-reg/buildrunner-test-multi-platform:latest + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert isinstance(errors, Errors) + assert errors.count() == 1 + + # Indentical tags in same string format + config_yaml = """ + steps: + build-container-multi-platform1: + build: + platforms: + - linux/amd64 + - linux/arm64 + push: mytest-reg/buildrunner-test-multi-platform:latest + build-container-multi-platform2: + build: + platforms: + - linux/amd64 + - linux/arm64 + push: mytest-reg/buildrunner-test-multi-platform:latest + """ + errors = validate_config(**config) + assert isinstance(errors, Errors) + assert errors.count() == 1 + + +def test_duplicate_mp_tags_strings_valid(): + # Same string format but different MP tags + config_yaml = """ + steps: + build-container-multi-platform1: + build: + platforms: + - linux/amd64 + - linux/arm64 + push: mytest-reg/buildrunner-test-multi-platform:latest + build-container-multi-platform2: + build: + platforms: + - linux/amd64 + - linux/arm64 + push: mytest-reg/buildrunner-test-multi-platform:not-latest + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert errors is None + + +def test_duplicate_mp_tags_platform_platforms_invalid(): + # Invalid to have duplicate multi-platform tag and single platform tag + config_yaml = """ + steps: + build-container-multi-platform1: + build: + platforms: + - linux/amd64 + - linux/arm64 + push: mytest-reg/buildrunner-test-multi-platform:latest + build-container-single-platform: + build: + platform: linux/arm64 + push: mytest-reg/buildrunner-test-multi-platform:latest + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert isinstance(errors, Errors) + assert errors.count() == 1 + + +def test_step_remote_valid(): + config_yaml = """ + steps: + build-remote: + remote: + host: myserver.ut1 + cmd: docker build -t mytest-reg/buildrunner-test . + artifacts: + bogus/path/to/artifacts/*: + type: tar + compression: lzma + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert errors is None + + +def test_step_remote_missing_cmd(): + config_yaml = """ + steps: + build-remote: + remote: + host: myserver.ut1 + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert isinstance(errors, Errors) + assert errors.count() == 1 + + +def test_commit(): + config_yaml = """ + steps: + step1: + build: + path: . + dockerfile: Dockerfile + pull: false + commit: + repository: mytest-reg/image1 + tags: + - latest + step2: + build: + path: . + dockerfile: Dockerfile + pull: false + commit: mytest-reg/image1 + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert errors is None + + +def test_pypi_push(): + config_yaml = """ + steps: + pypi1: + run: + image: python:2 + cmds: + - python setup.py sdist + artifacts: + "dist/*.tar.gz": { type: 'python-sdist' } + pypi-push: artifactory-releng + pypi2: + run: + image: python:2 + cmds: + - python -m build + artifacts: + "dist/*.tar.gz": { type: 'python-sdist' } + "dist/*.whl": { type: 'python-wheel' } + pypi-push: + repository: https://artifactory.example.com/artifactory/api/pypi/pypi-myownrepo + username: myuser + password: mypass + """ + config = yaml.load(config_yaml, Loader=yaml.Loader) + errors = validate_config(**config) + assert errors is None + + +def test_services(): + config_yaml = """ + steps: + my-build-step: + run: + services: + my-service-container: + # The 'build' attribute functions the same way that the step + # 'build' attribute does. The only difference is that the image + # produced by a service container build attribute cannot be pushed + # to a remote repository. + build: + + # The pre-built image to base the container on. The 'build' and + # 'image' attributes are mutually exclusive in the service + # container context. + image: + + # The command to run. If ommitted Buildrunner runs the command + # configured in the Docker image without modification. If provided + # Buildrunner always sets the container command to a shell, running + # the given command here within the shell. + cmd: + + # A collection of provisioners to run. Provisioners work similar to + # the way Packer provisioners do and are always run within a shell. + # When a provisioner is specified Buildrunner always sets the + # container command to a shell, running the provisioners within the + # shell. Currently Buildrunner supports shell and salt + # provisioners. + provisioners: + shell: path/to/script.sh + salt: + + # The shell to use when specifying the cmd or provisioners + # attributes. Defaults to /bin/sh. If the cmd and provisioners + # attributes are not specified this setting has no effect. + shell: /bin/sh + + # The directory to run commands within. Defaults to /source. + cwd: /source + + # The user to run commands as. Defaults to the user specified in + # the Docker image. + user: + + # The hostname assigned to the service container. + hostname: + + # Custom dns servers to use in the service container. + dns: + - 8.8.8.8 + - 8.8.4.4 + + # A custom dns search path to use in the service container. + dns-search: mydomain.com + + # Add entries to the hosts file + # The keys are the hostnames. The values can be either + # ip addresses or references to other service containers. + extra_hosts: + "www1.test.com": "192.168.0.1" + "www2.test.com": "192.168.0.2" + + # A map specifying additional environment variables to be injected + # into the container. Keys are the variable names and values are + # variable values. + env: + ENV_VARIABLE_ONE: value1 + ENV_VARIABLE_TWO: value2 + + # A map specifying files that should be injected into the container. + # The map key is the alias referencing a given file (as configured in + # the "local-files" section of the global configuration file) and the + # value is the path the given file should be mounted at within the + # container. + files: + namespaced.file.alias1: "/path/to/readonly/file/or/dir" + namespaced.file.alias2: "/path/to/readwrite/file/or/dir:rw" + + # A list specifying other service containers whose exposed volumes + # should be mapped into this service container's file system. Any + # service containers in this list must be defined before this + # container is. + # An exposed volume is one created by the volume Dockerfile command. + # See https://docs.docker.com/engine/reference/builder/#volume for more + # details regarding the volume Dockerfile command. + volumes_from: + - my-service-container + + # A map specifying ports to expose and link within other containers + # within the step. + ports: + : + + # Whether or not to pull the image from upstream prior to running + # the step. This is almost always desirable, as it ensures the + # most up to date source image. There are situations, however, when + # this can be set to false as an optimization. For example, if a + # container is built at the beginning of a buildrunner file and then + # used repeatedly. In this case, it is clear that the cached version + # is appropriate and we don't need to check upstream for changes. + pull: true + + # See above + systemd: true + + # A list of container names or labels created within any run container + # that buildrunner should clean up. (Use if you call + # 'docker run --name ' or 'docker run --label