diff --git a/testplan/runnable/base.py b/testplan/runnable/base.py index 90c1bba30..d43c628bc 100644 --- a/testplan/runnable/base.py +++ b/testplan/runnable/base.py @@ -9,6 +9,7 @@ import uuid import webbrowser from collections import OrderedDict +from dataclasses import dataclass from typing import ( Any, Callable, @@ -57,13 +58,28 @@ from testplan.runnable.interactive import TestRunnerIHandler from testplan.runners.base import Executor from testplan.runners.pools.tasks import Task, TaskResult -from testplan.runners.pools.tasks.base import is_task_target +from testplan.runners.pools.tasks.base import ( + is_task_target, + TaskTargetInformation, + get_task_target_information, +) from testplan.testing import filtering, listing, ordering, tagging from testplan.testing.base import Test, TestResult +from testplan.testing.listing import Lister from testplan.testing.multitest import MultiTest from testplan.runners.pools.base import Pool +TestTask = Union[Test, Task, Callable] + + +@dataclass +class TaskInformation: + target: TestTask + materialized_test: Test + uid: str + + def get_exporters(values): """ Validation function for exporter declarations. @@ -180,7 +196,7 @@ def get_options(cls): # Test lister is None by default, otherwise Testplan would # list tests, not run them ConfigOption("test_lister", default=None): Or( - None, listing.BaseLister + None, listing.BaseLister, listing.MetadataBasedLister ), ConfigOption("verbose", default=False): bool, ConfigOption("debug", default=False): bool, @@ -513,11 +529,110 @@ def _stop_remote_services(self): self.logger.info("Stopping Remote Server %s", name) rmt_svc.stop() + def _clone_task_for_part(self, task_info, _task_arguments, part): + _task_arguments["part"] = part + self.logger.debug( + "Task re-created with arguments: %s", + _task_arguments, + ) + _multitest_params = task_info.materialized_test.cfg._cfg_input + _multitest_params["part"] = part + target = Task(**_task_arguments) + materialized_test = task_info.materialized_test.__class__( + **_multitest_params + ) + target._uid = materialized_test.uid() + new_task = TaskInformation( + target, + materialized_test, + materialized_test.uid(), + ) + return new_task + + def _get_tasks( + self, _task_arguments, num_of_parts, runtime_data + ) -> List[TaskInformation]: + self.logger.debug( + "Task created with arguments: %s", + _task_arguments, + ) + task = Task(**_task_arguments) + task_info = self._collect_task_info(task) + + uid = task_info.uid + + tasks: List[TaskInformation] = [] + time_info = runtime_data.get(uid, None) + if num_of_parts: + + if not isinstance(task_info.materialized_test, MultiTest): + raise TypeError( + "multitest_parts specified in @task_target," + " but the Runnable is not a MultiTest" + ) + + if num_of_parts == "auto": + if not time_info: + self.logger.warning( + "%s parts is auto but cannot find it in runtime-data", + uid, + ) + num_of_parts = 1 + else: + num_of_parts = math.ceil( + time_info["execution_time"] + / ( + self.cfg.auto_part_runtime_limit + - time_info["setup_time"] + ) + ) + if "weight" not in _task_arguments: + _task_arguments["weight"] = _task_arguments.get( + "weight", None + ) or ( + math.ceil( + (time_info["execution_time"] / num_of_parts) + + time_info["setup_time"] + ) + if time_info + else self.cfg.auto_part_runtime_limit + ) + self.logger.user_info( + "%s: parts=%d, weight=%d", + uid, + num_of_parts, + _task_arguments["weight"], + ) + if num_of_parts == 1: + task_info.target.weight = _task_arguments["weight"] + tasks.append(task_info) + else: + for i in range(num_of_parts): + + part = (i, num_of_parts) + new_task = self._clone_task_for_part( + task_info, _task_arguments, part + ) + + tasks.append(new_task) + + else: + if time_info and not task.weight: + task_info.target.weight = math.ceil( + time_info["execution_time"] + time_info["setup_time"] + ) + self.logger.user_info( + "%s: weight=%d", uid, task_info.target.weight + ) + tasks.append(task_info) + + return tasks + def discover( self, path: str = ".", name_pattern: Union[str, Pattern] = r".*\.py$", - ) -> List[Task]: + ) -> List[TaskInformation]: """ Discover task targets under path in the modules that matches name pattern, and return the created Task object. @@ -534,76 +649,10 @@ def discover( path, ) regex = re.compile(name_pattern) - tasks = [] + tasks: List[TaskInformation] = [] runtime_data: dict = self.cfg.runtime_data or {} - def _add_task(_target, _task_arguments): - self.logger.debug( - "Task created with arguments: %s", - _task_arguments, - ) - task = Task(**_task_arguments) - uid = self._verify_test_target(task) - - # nothing to run - if not uid: - return - time_info = runtime_data.get(uid, None) - if getattr(_target, "__multitest_parts__", None): - num_of_parts = _target.__multitest_parts__ - if num_of_parts == "auto": - if not time_info: - self.logger.warning( - "%s parts is auto but cannot find it in runtime-data", - uid, - ) - num_of_parts = 1 - else: - num_of_parts = math.ceil( - time_info["execution_time"] - / ( - self.cfg.auto_part_runtime_limit - - time_info["setup_time"] - ) - ) - if "weight" not in _task_arguments: - _task_arguments["weight"] = _task_arguments.get( - "weight", None - ) or ( - math.ceil( - (time_info["execution_time"] / num_of_parts) - + time_info["setup_time"] - ) - if time_info - else self.cfg.auto_part_runtime_limit - ) - self.logger.user_info( - "%s: parts=%d, weight=%d", - uid, - num_of_parts, - _task_arguments["weight"], - ) - if num_of_parts == 1: - task.weight = _task_arguments["weight"] - tasks.append(task) - else: - for i in range(num_of_parts): - _task_arguments["part"] = (i, num_of_parts) - self.logger.debug( - "Task re-created with arguments: %s", - _task_arguments, - ) - tasks.append(Task(**_task_arguments)) - - else: - if time_info and not task.weight: - task.weight = math.ceil( - time_info["execution_time"] + time_info["setup_time"] - ) - self.logger.user_info("%s: weight=%d", uid, task.weight) - tasks.append(task) - for root, dirs, files in os.walk(path or "."): for filename in files: if not regex.match(filename): @@ -621,15 +670,17 @@ def _add_task(_target, _task_arguments): self.logger.debug( "Discovered task target %s::%s", filepath, attr ) + + task_target_info = get_task_target_information(target) task_arguments = dict( target=attr, module=module, path=root, - **target.__task_kwargs__, + **task_target_info.task_kwargs, ) - if target.__target_params__: - for param in target.__target_params__: + if task_target_info.target_params: + for param in task_target_info.target_params: if isinstance(param, dict): task_arguments["args"] = None task_arguments["kwargs"] = param @@ -643,9 +694,21 @@ def _add_task(_target, _task_arguments): " received: {param}" ) task_arguments["part"] = None - _add_task(target, task_arguments) + tasks.extend( + self._get_tasks( + task_arguments, + task_target_info.multitest_parts, + runtime_data, + ) + ) else: - _add_task(target, task_arguments) + tasks.extend( + self._get_tasks( + task_arguments, + task_target_info.multitest_parts, + runtime_data, + ) + ) return tasks @@ -733,7 +796,7 @@ def schedule_all( tasks = self.discover(path=path, name_pattern=name_pattern) for task in tasks: - self.add(task, resource=resource) + self._add(task, resource=resource) pool: Pool = self.resources[resource] if pool.is_auto_size: pool_size = self.calculate_pool_size_by_tasks( @@ -761,75 +824,62 @@ def add( :return: Assigned uid for test. :rtype: ``str`` or ```NoneType`` """ + + # Get the real test entity and verify if it should be added + task_info = self._collect_task_info(target) + return self._add(task_info, resource) + + def _add( + self, task_info: TaskInformation, resource: Optional[str] = None + ) -> Optional[str]: local_runner = self.resources.first() - resource: Union[Executor, str, None] = resource or local_runner + resource: Union[str, None] = resource or local_runner if resource not in self.resources: raise RuntimeError( 'Resource "{}" does not exist.'.format(resource) ) - # Get the real test entity and verify if it should be added - uid = self._verify_test_target(target) - if not uid: + self._verify_task_info(task_info) + + uid = task_info.uid + + # let see if it is filtered + if not self._should_task_running(task_info): return None - # Reset the task uid which will be used for test result transport in - # a pool executor, it makes logging or debugging easier. - if isinstance(target, Task): - target._uid = uid + # "--list" option always means not executing tests + lister: Lister = self.cfg.test_lister + if lister is not None and not lister.metadata_based: + self.cfg.test_lister.log_test_info(task_info.materialized_test) + return None - # In batch mode the original target is added into executors, it can be: - # 1> A runnable object (generally a test entity or customized by user) - # 2> A callable that returns a runnable object - # 3> A task that wrapped a runnable object - if uid in self._tests: - raise ValueError( - '{} with uid "{}" already added.'.format(self._tests[uid], uid) - ) + if self.cfg.interactive_port is not None: + self._register_task_for_interactive(task_info) + # for interactive always use the local runner + resource = local_runner + + target = task_info.target + # if running in the local runner we can just enqueue the materialized test + if resource == local_runner: + target = task_info.materialized_test + + self._register_task( + resource, + target, + uid, + ) + return uid + def _register_task(self, resource, target, uid): self._tests[uid] = resource self.resources[resource].add(target, uid) - return uid - - def _verify_test_target( - self, target: Union[Test, Task, Callable] - ) -> Optional[str]: - """ - Materialize the test target, and: - - check uniqueness - - check against filter - - check against lister - - check runnable type - - cut corner for interactive mode - Return the runnable uid if it should run, otherwise None. - """ - # The target added into TestRunner can be: 1> a real test entity - # 2> a task wraps a test entity 3> a callable returns a test entity - - if hasattr(target, "uid"): - key = target.uid() - else: - key = id(target) - - if key in self._verified_targets: - return self._verified_targets[key] - else: - self._verified_targets[key] = None + def _collect_task_info(self, target: TestTask) -> TaskInformation: if isinstance(target, Test): target_test = target elif isinstance(target, Task): target_test = target.materialize() - if self.cfg.interactive_port is not None and isinstance( - target._target, str - ): - self.scheduled_modules.append( - ( - target._module or target._target.rsplit(".", 1)[0], - os.path.abspath(target._path), - ) - ) elif callable(target): target_test = target() else: @@ -841,49 +891,54 @@ def _verify_test_target( target_test.parent = self target_test.cfg.parent = self.cfg - # verify runnable is unique uid = target_test.uid() + + # Reset the task uid which will be used for test result transport in + # a pool executor, it makes logging or debugging easier. + + # TODO: This mutating target should we do a copy? + if isinstance(target, Task): + target._uid = uid + + return TaskInformation(target, target_test, uid) + + def _register_task_for_interactive(self, task_info: TaskInformation): + target = task_info.target + if isinstance(target, Task) and isinstance(target._target, str): + self.scheduled_modules.append( + ( + target._module or target._target.rsplit(".", 1)[0], + os.path.abspath(target._path), + ) + ) + + def _verify_task_info(self, task_info: TaskInformation) -> None: + uid = task_info.uid + if uid in self._tests: + raise ValueError( + '{} with uid "{}" already added.'.format(self._tests[uid], uid) + ) + if uid in self._runnable_uids: raise RuntimeError( f"Runnable with uid {uid} has already been verified" ) else: + # TODO: this should be part of the add self._runnable_uids.add(uid) - self._verified_targets[key] = uid - - # if filter is defined + def _should_task_running(self, task_info: TaskInformation) -> bool: + should_run = True if type(self.cfg.test_filter) is not filtering.Filter: - should_run = target_test.should_run() + test = task_info.materialized_test + should_run = test.should_run() self.logger.debug( "Should run %s? %s", - target_test.name, + test.name, "Yes" if should_run else "No", ) - if not should_run: - return None - - # "--list" option always means not executing tests - if self.cfg.test_lister is not None: - self.cfg.test_lister.log_test_info(target_test) - return None - - if getattr(target, "__multitest_parts__", None): - if not isinstance(target_test, MultiTest): - raise TypeError( - "multitest_parts specified in @task_target," - " but the Runnable is not a MultiTest" - ) - # cut corner for interactive mode as we already have the runnable - # so add it to local runner and continue - if self.cfg.interactive_port is not None: - local_runner = self.resources.first() - self._tests[uid] = local_runner - self.resources[local_runner].add(target_test, uid) - return None - - return uid + return should_run def _record_start(self): self.report.timer.start("run") diff --git a/testplan/runners/pools/tasks/base.py b/testplan/runners/pools/tasks/base.py index 9a8ed84be..6ad2594c7 100644 --- a/testplan/runners/pools/tasks/base.py +++ b/testplan/runners/pools/tasks/base.py @@ -5,7 +5,17 @@ import os import warnings from collections import OrderedDict -from typing import Optional, Tuple, Union, Dict, Sequence, Callable +from dataclasses import dataclass +from typing import ( + Optional, + Tuple, + Union, + Dict, + Sequence, + Callable, + Any, + Literal, +) from testplan.testing.base import Test, TestResult from testplan.common.serialization import SelectiveSerializable @@ -345,9 +355,16 @@ def uid(self): return strings.uuid4() +@dataclass +class TaskTargetInformation: + target_params: Sequence[Union[Sequence, dict]] + task_kwargs: Dict[str, Any] + multitest_parts: Union[int, str, None] + + def task_target( parameters: Union[Callable, Sequence[Union[Sequence, dict]]] = None, - multitest_parts: Union[int, str, None] = None, + multitest_parts: Union[int, Literal["auto"], None] = None, **kwargs, ): """ @@ -369,30 +386,32 @@ def task_target( # `task_target` is used without parentheses, then `parameters` is the # real callable object (task target) to be decorated. if callable(parameters) and len(kwargs) == 0: - set_task_target(parameters) - parameters.__target_params__ = None - parameters.__task_kwargs__ = {} - return parameters + func = parameters + set_task_target(func, TaskTargetInformation(None, {}, None)) + return func def inner(func): - set_task_target(func) - func.__target_params__ = parameters - func.__multitest_parts__ = multitest_parts - func.__task_kwargs__ = kwargs + set_task_target( + func, TaskTargetInformation(parameters, kwargs, multitest_parts) + ) return func return inner -def set_task_target(func): +def set_task_target(func: Callable, info: TaskTargetInformation): """ Mark a callable object as a task target which can be packaged in a :py:class:`~testplan.runners.pools.tasks.base.Task` object. """ - func.__is_task_target__ = True + func.__task_target_info__ = info def is_task_target(func): """Check if a callable object is a task target.""" - return getattr(func, "__is_task_target__", False) + return getattr(func, "__task_target_info__", False) + + +def get_task_target_information(func) -> TaskTargetInformation: + return getattr(func, "__task_target_info__") diff --git a/testplan/testing/listing.py b/testplan/testing/listing.py index e15a2b39d..19928ff54 100644 --- a/testplan/testing/listing.py +++ b/testplan/testing/listing.py @@ -1,20 +1,37 @@ """ This module contains logic for listing representing test context of a plan. """ +import dataclasses +import json import os from enum import Enum +from typing import List, Union from testplan.common.utils.parser import ArgMixin from testplan.common.utils.logger import TESTPLAN_LOGGER from testplan.testing import tagging from testplan.testing.multitest import MultiTest +from testplan.testing.multitest.test_metadata import TestPlanMetadata INDENT = " " MAX_TESTCASES = 25 -class BaseLister: +class Listertype: + NAME = None + DESCRIPTION = None + + metadata_based = False + + def name(self): + return self.NAME + + def description(self): + return self.DESCRIPTION + + +class BaseLister(Listertype): """ Base of all listers, implement the :py:meth:`get_output` give it a name in :py:attr:`NAME` and a description in :py:attr:`DESCRIPTION` or alternatively @@ -22,9 +39,6 @@ class BaseLister: added to :py:data:`listing_registry`. """ - NAME = None - DESCRIPTION = None - def log_test_info(self, instance): output = self.get_output(instance) if output: @@ -33,12 +47,6 @@ def log_test_info(self, instance): def get_output(self, instance): raise NotImplementedError - def name(self): - return self.NAME - - def description(self): - return self.DESCRIPTION - class ExpandedNameLister(BaseLister): """ @@ -228,6 +236,33 @@ def get_descriptions(cls): return dict([(lister, lister.value.description()) for lister in cls]) +class MetadataBasedLister(Listertype): + """ + Base of all metadata based listers, implement the :py:meth:`get_output` give it a name in + :py:attr:`NAME` and a description in :py:attr:`DESCRIPTION` or alternatively + override :py:meth:`name` and/or :py:meth:`description` and it is good to be + added to :py:data:`listing_registry`. + """ + + metadata_based = True + + def log_test_info(self, metadata: TestPlanMetadata): + output = self.get_output(metadata) + if output: + TESTPLAN_LOGGER.user_info(output) + + def get_output(self, metadata: TestPlanMetadata): + raise NotImplementedError + + +class SimpleJsonLister(MetadataBasedLister): + NAME = "JSON" + DESCRIPTION = "Dump test information in json" + + def get_output(self, metadata: TestPlanMetadata): + return json.dumps(dataclasses.asdict(metadata)) + + class ListingRegistry: """ A registry to store listers, add listers to the :py:data:`listing_registry` @@ -235,7 +270,7 @@ class ListingRegistry: """ def __init__(self): - self.listers = [] + self.listers: List[Listertype] = [] def add_lister(self, lister): self.listers.append(lister) @@ -261,3 +296,7 @@ def to_arg(self): listing_registry.add_lister(ExpandedPatternLister()) listing_registry.add_lister(ExpandedNameLister()) listing_registry.add_lister(CountLister()) +listing_registry.add_lister(SimpleJsonLister()) + + +Lister = Union[BaseLister, MetadataBasedLister] diff --git a/testplan/testing/multitest/suite.py b/testplan/testing/multitest/suite.py index 3c0e430f7..38521e4a5 100644 --- a/testplan/testing/multitest/suite.py +++ b/testplan/testing/multitest/suite.py @@ -1,18 +1,25 @@ """Multitest testsuite/testcase module.""" import collections import copy +import dataclasses import functools import inspect import itertools import types import warnings -from typing import Optional +from typing import Optional, Callable from testplan import defaults from testplan.common.utils import interface, strings from testplan.testing import tagging from . import parametrization +from .test_metadata import ( + TestCaseMetadata, + LocationMetadata, + TestSuiteMetadata, + ExtendedTestSuiteMetadata, +) # Global variables __TESTCASES__ = [] @@ -20,6 +27,10 @@ __GENERATED_TESTCASES__ = [] +TESTCASE_METADATA_ATTRIBUTE = "__testcase_metadata__" +TESTSUITE_METADATA_ATTRIBUTE = "__testsuite_metadata__" + + def _reset_globals(): # pylint: disable=global-statement global __TESTCASES__ @@ -433,6 +444,12 @@ def _testsuite(klass): # Suite resolved, clear global variables for resolving the next suite. _reset_globals() + setattr( + klass, + TESTSUITE_METADATA_ATTRIBUTE, + TestSuiteMetadata(klass.name, LocationMetadata.from_object(klass)), + ) + return klass @@ -571,6 +588,10 @@ def _testcase(function): return _testcase_meta()(function) +def add_testcase_metadata(func: Callable, metadata: TestCaseMetadata): + setattr(func, TESTCASE_METADATA_ATTRIBUTE, metadata) + + def _testcase_meta( name=None, tags=None, @@ -652,6 +673,13 @@ def wrapper(function): __GENERATED_TESTCASES__.append(func) + add_testcase_metadata( + func, + TestCaseMetadata( + func.__name__, LocationMetadata.from_object(function) + ), + ) + return function else: @@ -678,6 +706,13 @@ def wrapper(function): function = wrapper_func(function) __TESTCASES__.append(function.__name__) + + add_testcase_metadata( + function, + TestCaseMetadata( + function.__name__, LocationMetadata.from_object(function) + ), + ) return function return wrapper @@ -878,3 +913,19 @@ def inner(function): return function return inner + + +def get_metadata(suite: type) -> ExtendedTestSuiteMetadata: + metadata = getattr(suite, TESTSUITE_METADATA_ATTRIBUTE) + testcase_metadata = [ + getattr( + tc, + TESTCASE_METADATA_ATTRIBUTE, + ) + for _, tc in inspect.getmembers(suite) + if hasattr(tc, TESTCASE_METADATA_ATTRIBUTE) + ] + + return ExtendedTestSuiteMetadata( + **dataclasses.asdict(metadata), test_cases=testcase_metadata + ) diff --git a/testplan/testing/multitest/test_metadata.py b/testplan/testing/multitest/test_metadata.py new file mode 100644 index 000000000..84b3add47 --- /dev/null +++ b/testplan/testing/multitest/test_metadata.py @@ -0,0 +1,47 @@ +from dataclasses import dataclass +from inspect import getsourcefile, getsourcelines +from typing import List + + +@dataclass +class LocationMetadata: + + file: str + line_no: int + + @classmethod + def from_object(cls, func): + file = getsourcefile(func) + _, line_no = getsourcelines(func) + return cls(file, line_no) + + +@dataclass +class TestCaseMetadata: + + name: str + location: LocationMetadata + + +@dataclass +class TestSuiteMetadata: + + name: str + location: LocationMetadata + + +@dataclass +class ExtendedTestSuiteMetadata(TestSuiteMetadata): + test_cases: List[TestCaseMetadata] + + +@dataclass +class TestMetadata: + name: str + test_suites: List[ExtendedTestSuiteMetadata] + + +@dataclass +class TestPlanMetadata: + name: str + tests: List[TestMetadata] diff --git a/tests/unit/testplan/testing/test_listing.py b/tests/unit/testplan/testing/test_listing.py index 4632d0791..228aae1c7 100644 --- a/tests/unit/testplan/testing/test_listing.py +++ b/tests/unit/testplan/testing/test_listing.py @@ -14,11 +14,18 @@ def test_defaults(): - assert len(listing_registry.listers) == 5 + assert len(listing_registry.listers) == 6 arg_enum = listing_registry.to_arg() assert issubclass(arg_enum, ArgMixin) - for enum in ["NAME", "NAME_FULL", "COUNT", "PATTERN", "PATTERN_FULL"]: + for enum in [ + "NAME", + "NAME_FULL", + "COUNT", + "PATTERN", + "PATTERN_FULL", + "JSON", + ]: assert arg_enum[enum]