diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index d1d76b4e4d1..674542d4e63 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -21,7 +21,7 @@ jobs: python: ['3.7', '3.8', '3.9', '3.10', '3.11'] include: - os: 'macos-latest' - python: '3.7' + python: '3.8' steps: - name: Checkout uses: actions/checkout@v4 diff --git a/changes.d/6046.break.md b/changes.d/6046.break.md new file mode 100644 index 00000000000..06503d9c987 --- /dev/null +++ b/changes.d/6046.break.md @@ -0,0 +1,4 @@ +The `submit-fail` and `expire` task outputs must now be +[optional](https://cylc.github.io/cylc-doc/stable/html/glossary.html#term-optional-output) +and can no longer be +[required](https://cylc.github.io/cylc-doc/stable/html/glossary.html#term-required-output). diff --git a/changes.d/6046.feat.md b/changes.d/6046.feat.md new file mode 100644 index 00000000000..fb731b872dd --- /dev/null +++ b/changes.d/6046.feat.md @@ -0,0 +1,4 @@ +The condition that Cylc uses to evaluate task output completion can now be +customized in the `[runtime]` section with the new `completion` configuration. +This provides a more advanced way to check that tasks generate their required +outputs when run. diff --git a/cylc/flow/cfgspec/workflow.py b/cylc/flow/cfgspec/workflow.py index d22f0f415bb..a0cf51005f4 100644 --- a/cylc/flow/cfgspec/workflow.py +++ b/cylc/flow/cfgspec/workflow.py @@ -996,6 +996,150 @@ def get_script_common_text(this: str, example: Optional[str] = None): can be explicitly configured to provide or override default settings for all tasks in the workflow. '''): + Conf('completion', VDR.V_STRING, desc=''' + Define the condition for task output completion. + + The completion condition is evaluated when a task reaches + a final state - i.e. once it finished executing (``succeeded`` + or ``failed``) or it ``submit-failed``, or ``expired``. + It is a validation check which confirms that the + task has generated the outputs it was expected to. + + If the task fails this check its outputs are considered + :term:`incomplete` and a warning will be raised alerting you + that something has gone wrong which requires investigation. + + .. note:: + + An event hook for this warning will follow in a future + release of Cylc. + + By default, the completion condition ensures that all required + outputs, i.e. outputs which appear in the graph but are not + marked as optional with the ``?`` character, are completed. + + E.g., in this example, the task ``foo`` must generate the + required outputs ``succeeded`` and ``x`` and it may or may not + generate the optional output ``y``: + + .. code-block:: cylc-graph + + foo => bar + foo:x => x + foo:y? => y + + The default completion condition would be this: + + .. code-block:: python + + # the task must succeed and generate the custom output "x" + succeeded and x + + You can override this default to suit your needs. E.g., in this + example, the task ``foo`` has three optional outputs, ``x``, + ``y`` and ``z``: + + .. code-block:: cylc-graph + + foo:x? => x + foo:y? => y + foo:z? => z + x | y | z => bar + + Because all three of these outputs are optional, if none of + them are generated, the task's outputs will still be + considered complete. + + If you wanted to require that at least one of these outputs is + generated you can configure the completion condition like so: + + .. code-block:: python + + # the task must succeed and generate at least one of the + # outputs "x" or "y" or "z": + succeeded and (x or y or z) + + .. note:: + + For the completion expression, hyphens in task outputs + must be replaced with underscores to allow evaluation by + Python, e.g.: + + .. code-block:: cylc + + [runtime] + [[foo]] + completion = succeeded and my_output # underscore + [[[outputs]]] + my-output = 'my custom task output' # hyphen + + .. note:: + + In some cases the ``succeeded`` output might not explicitly + appear in the graph, e.g: + + .. code-block:: cylc-graph + + foo:x? => x + + In these cases succeess is presumed to be required unless + explicitly stated otherwise, either in the graph e.g: + + .. code-block:: cylc-graph + + foo? + foo:x? => x + + Or in the completion expression e.g: + + .. code-block:: cylc + + completion = x # no reference to succeeded + # or + completion = succeeded or failed # success is optional + + + .. hint:: + + If task outputs are optional in the graph they must also + be optional in the completion condition and vice versa. + + For example this graph conflicts with the completion + statement: + + .. code-block:: cylc-graph + + # "a" must succeed + a => b + + .. code-block:: cylc + + # "a" may either succeed or fail + completion = succeeded or failed + + Which could be fixed by ammending the graph like so: + + .. code-block:: cylc-graph + + # "a" may either succeed or fail + a? => b + + .. rubric:: Examples + + ``succeeded`` + The task must succeed. + ``succeeded or (failed and my_error)`` + The task can fail, but only if it also yields the custom + output ``my_error``. + ``succeeded and (x or y or z)`` + The task must succeed and yield at least one of the + custom outputs, x, y or z. + ``(a and b) or (c and d)`` + One pair of these outputs must be yielded for the task + to be complete. + + .. versionadded:: 8.3.0 + ''') Conf('platform', VDR.V_STRING, desc=''' The name of a compute resource defined in :cylc:conf:`global.cylc[platforms]` or diff --git a/cylc/flow/config.py b/cylc/flow/config.py index 4739dd0ffc9..05e5fc2f3cb 100644 --- a/cylc/flow/config.py +++ b/cylc/flow/config.py @@ -81,6 +81,7 @@ is_relative_to, ) from cylc.flow.print_tree import print_tree +from cylc.flow.task_qualifiers import ALT_QUALIFIERS from cylc.flow.simulation import configure_sim_modes from cylc.flow.subprocctx import SubFuncContext from cylc.flow.task_events_mgr import ( @@ -89,8 +90,14 @@ ) from cylc.flow.task_id import TaskID from cylc.flow.task_outputs import ( + TASK_OUTPUT_FAILED, + TASK_OUTPUT_FINISHED, TASK_OUTPUT_SUCCEEDED, - TaskOutputs + TaskOutputs, + get_completion_expression, + get_optional_outputs, + get_trigger_completion_variable_maps, + trigger_to_completion_variable, ) from cylc.flow.task_trigger import TaskTrigger, Dependency from cylc.flow.taskdef import TaskDef @@ -497,6 +504,8 @@ def __init__( self.load_graph() self.mem_log("config.py: after load_graph()") + self._set_completion_expressions() + self.process_runahead_limit() run_mode = self.run_mode() @@ -985,6 +994,216 @@ def _check_sequence_bounds(self): ) LOG.warning(msg) + def _set_completion_expressions(self): + """Sets and checks completion expressions for each task. + + If a task does not have a user-defined completion expression, then set + one according to the default rules. + + If a task does have a used-defined completion expression, then ensure + it is consistent with the use of outputs in the graph. + """ + for name, taskdef in self.taskdefs.items(): + expr = taskdef.rtconfig['completion'] + if expr: + # check the user-defined expression + self._check_completion_expression(name, expr) + else: + # derive a completion expression for this taskdef + expr = get_completion_expression(taskdef) + + # update both the sparse and dense configs to make these values + # visible to "cylc config" to make the completion expression more + # transparent to users. + # NOTE: we have to update both because we are setting this value + # late on in the process after the dense copy has been made + self.pcfg.sparse.setdefault( + 'runtime', {} + ).setdefault( + name, {} + )['completion'] = expr + self.pcfg.dense['runtime'][name]['completion'] = expr + + # update the task's runtime config to make this value visible to + # the data store + # NOTE: we have to do this because we are setting this value late + # on after the TaskDef has been created + taskdef.rtconfig['completion'] = expr + + def _check_completion_expression(self, task_name: str, expr: str) -> None: + """Checks a user-defined completion expression. + + Args: + task_name: + The name of the task we are checking. + expr: + The completion expression as defined in the config. + + """ + # check completion expressions are not being used in compat mode + if cylc.flow.flags.cylc7_back_compat: + raise WorkflowConfigError( + '[runtime][]completion cannot be used' + ' in Cylc 7 compatibility mode.' + ) + + # check for invalid triggers in the expression + if 'submit-failed' in expr: + raise WorkflowConfigError( + f'Error in [runtime][{task_name}]completion:' + f'\nUse "submit_failed" rather than "submit-failed"' + ' in completion expressions.' + ) + elif '-' in expr: + raise WorkflowConfigError( + f'Error in [runtime][{task_name}]completion:' + f'\n {expr}' + '\nReplace hyphens with underscores in task outputs when' + ' used in completion expressions.' + ) + + # get the outputs and completion expression for this task + try: + outputs = self.taskdefs[task_name].outputs + except KeyError: + # this is a family -> we'll check integrity for each task that + # inherits from it + return + + ( + _trigger_to_completion_variable, + _completion_variable_to_trigger, + ) = get_trigger_completion_variable_maps(outputs.keys()) + + # get the optional/required outputs defined in the graph + graph_optionals = { + # completion_variable: is_optional + _trigger_to_completion_variable[trigger]: ( + None if is_required is None else not is_required + ) + for trigger, (_, is_required) + in outputs.items() + } + if ( + graph_optionals[TASK_OUTPUT_SUCCEEDED] is True + and graph_optionals[TASK_OUTPUT_FAILED] is None + ): + # failed is implicitly optional if succeeded is optional + # https://github.com/cylc/cylc-flow/pull/6046#issuecomment-2059266086 + graph_optionals[TASK_OUTPUT_FAILED] = True + + # get the optional/required outputs defined in the expression + try: + # this involves running the expression which also validates it + expression_optionals = get_optional_outputs(expr, outputs) + except NameError as exc: + # expression references an output which has not been registered + error = exc.args[0][5:] + + if f"'{TASK_OUTPUT_FINISHED}'" in error: + # the finished output cannot be used in completion expressions + # see proposal point 5:: + # https://cylc.github.io/cylc-admin/proposal-optional-output-extension.html#proposal + raise WorkflowConfigError( + f'Error in [runtime][{task_name}]completion:' + f'\n {expr}' + '\nThe "finished" output cannot be used in completion' + ' expressions, use "succeeded or failed".' + ) + + for alt_qualifier, qualifier in ALT_QUALIFIERS.items(): + _alt_compvar = trigger_to_completion_variable(alt_qualifier) + _compvar = trigger_to_completion_variable(qualifier) + if re.search(rf'\b{_alt_compvar}\b', error): + raise WorkflowConfigError( + f'Error in [runtime][{task_name}]completion:' + f'\n {expr}' + f'\nUse "{_compvar}" not "{_alt_compvar}" ' + 'in completion expressions.' + ) + + raise WorkflowConfigError( + # NOTE: str(exc) == "name 'x' is not defined" tested in + # tests/integration/test_optional_outputs.py + f'Error in [runtime][{task_name}]completion:' + f'\n{error}' + ) + except Exception as exc: # includes InvalidCompletionExpression + # expression contains non-whitelisted syntax or any other error in + # the expression e.g. SyntaxError + raise WorkflowConfigError( + f'Error in [runtime][{task_name}]completion:' + f'\n{str(exc)}' + ) + + # ensure consistency between the graph and the completion expression + for compvar in ( + { + *graph_optionals, + *expression_optionals + } + ): + # is the output optional in the graph? + graph_opt = graph_optionals.get(compvar) + # is the output optional in the completion expression? + expr_opt = expression_optionals.get(compvar) + + # True = is optional + # False = is required + # None = is not referenced + + # graph_opt expr_opt + # True True ok + # True False not ok + # True None not ok [1] + # False True not ok [1] + # False False ok + # False None not ok + # None True ok + # None False ok + # None None ok + + # [1] applies only to "submit-failed" and "expired" + + trigger = _completion_variable_to_trigger[compvar] + + if graph_opt is True and expr_opt is False: + raise WorkflowConfigError( + f'{task_name}:{trigger} is optional in the graph' + ' (? symbol), but required in the completion' + f' expression:\n{expr}' + ) + + if graph_opt is False and expr_opt is None: + raise WorkflowConfigError( + f'{task_name}:{trigger} is required in the graph,' + ' but not referenced in the completion' + f' expression\n{expr}' + ) + + if ( + graph_opt is True + and expr_opt is None + and compvar in {'submit_failed', 'expired'} + ): + raise WorkflowConfigError( + f'{task_name}:{trigger} is permitted in the graph' + ' but is not referenced in the completion' + ' expression (so is not permitted by it).' + f'\nTry: completion = "{expr} or {compvar}"' + ) + + if ( + graph_opt is False + and expr_opt is True + and compvar not in {'submit_failed', 'expired'} + ): + raise WorkflowConfigError( + f'{task_name}:{trigger} is required in the graph,' + ' but optional in the completion expression' + f'\n{expr}' + ) + def _expand_name_list(self, orig_names): """Expand any parameters in lists of names.""" name_expander = NameExpander(self.parameters) diff --git a/cylc/flow/data_messages.proto b/cylc/flow/data_messages.proto index 6068bb1c5df..bc0355e6d4c 100644 --- a/cylc/flow/data_messages.proto +++ b/cylc/flow/data_messages.proto @@ -127,6 +127,7 @@ message PbRuntime { optional string directives = 15; optional string environment = 16; optional string outputs = 17; + optional string completion = 18; } diff --git a/cylc/flow/data_messages_pb2.py b/cylc/flow/data_messages_pb2.py index 82c620bcacf..5ecb96fc122 100644 --- a/cylc/flow/data_messages_pb2.py +++ b/cylc/flow/data_messages_pb2.py @@ -2,6 +2,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: data_messages.proto +# Protobuf Python Version: 4.25.3 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool @@ -14,7 +15,7 @@ -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13\x64\x61ta_messages.proto\"\x96\x01\n\x06PbMeta\x12\x12\n\x05title\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x18\n\x0b\x64\x65scription\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x10\n\x03URL\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x19\n\x0cuser_defined\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\x08\n\x06_titleB\x0e\n\x0c_descriptionB\x06\n\x04_URLB\x0f\n\r_user_defined\"\xaa\x01\n\nPbTimeZone\x12\x12\n\x05hours\x18\x01 \x01(\x05H\x00\x88\x01\x01\x12\x14\n\x07minutes\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12\x19\n\x0cstring_basic\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1c\n\x0fstring_extended\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\x08\n\x06_hoursB\n\n\x08_minutesB\x0f\n\r_string_basicB\x12\n\x10_string_extended\"\'\n\x0fPbTaskProxyRefs\x12\x14\n\x0ctask_proxies\x18\x01 \x03(\t\"\xd4\x0c\n\nPbWorkflow\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06status\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x11\n\x04host\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x11\n\x04port\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x12\n\x05owner\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\r\n\x05tasks\x18\x08 \x03(\t\x12\x10\n\x08\x66\x61milies\x18\t \x03(\t\x12\x1c\n\x05\x65\x64ges\x18\n \x01(\x0b\x32\x08.PbEdgesH\x07\x88\x01\x01\x12\x18\n\x0b\x61pi_version\x18\x0b \x01(\x05H\x08\x88\x01\x01\x12\x19\n\x0c\x63ylc_version\x18\x0c \x01(\tH\t\x88\x01\x01\x12\x19\n\x0clast_updated\x18\r \x01(\x01H\n\x88\x01\x01\x12\x1a\n\x04meta\x18\x0e \x01(\x0b\x32\x07.PbMetaH\x0b\x88\x01\x01\x12&\n\x19newest_active_cycle_point\x18\x10 \x01(\tH\x0c\x88\x01\x01\x12&\n\x19oldest_active_cycle_point\x18\x11 \x01(\tH\r\x88\x01\x01\x12\x15\n\x08reloaded\x18\x12 \x01(\x08H\x0e\x88\x01\x01\x12\x15\n\x08run_mode\x18\x13 \x01(\tH\x0f\x88\x01\x01\x12\x19\n\x0c\x63ycling_mode\x18\x14 \x01(\tH\x10\x88\x01\x01\x12\x32\n\x0cstate_totals\x18\x15 \x03(\x0b\x32\x1c.PbWorkflow.StateTotalsEntry\x12\x1d\n\x10workflow_log_dir\x18\x16 \x01(\tH\x11\x88\x01\x01\x12(\n\x0etime_zone_info\x18\x17 \x01(\x0b\x32\x0b.PbTimeZoneH\x12\x88\x01\x01\x12\x17\n\ntree_depth\x18\x18 \x01(\x05H\x13\x88\x01\x01\x12\x15\n\rjob_log_names\x18\x19 \x03(\t\x12\x14\n\x0cns_def_order\x18\x1a \x03(\t\x12\x0e\n\x06states\x18\x1b \x03(\t\x12\x14\n\x0ctask_proxies\x18\x1c \x03(\t\x12\x16\n\x0e\x66\x61mily_proxies\x18\x1d \x03(\t\x12\x17\n\nstatus_msg\x18\x1e \x01(\tH\x14\x88\x01\x01\x12\x1a\n\ris_held_total\x18\x1f \x01(\x05H\x15\x88\x01\x01\x12\x0c\n\x04jobs\x18 \x03(\t\x12\x15\n\x08pub_port\x18! \x01(\x05H\x16\x88\x01\x01\x12\x17\n\nbroadcasts\x18\" \x01(\tH\x17\x88\x01\x01\x12\x1c\n\x0fis_queued_total\x18# \x01(\x05H\x18\x88\x01\x01\x12=\n\x12latest_state_tasks\x18$ \x03(\x0b\x32!.PbWorkflow.LatestStateTasksEntry\x12\x13\n\x06pruned\x18% \x01(\x08H\x19\x88\x01\x01\x12\x1e\n\x11is_runahead_total\x18& \x01(\x05H\x1a\x88\x01\x01\x12\x1b\n\x0estates_updated\x18\' \x01(\x08H\x1b\x88\x01\x01\x12\x1c\n\x0fn_edge_distance\x18( \x01(\x05H\x1c\x88\x01\x01\x1a\x32\n\x10StateTotalsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1aI\n\x15LatestStateTasksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1f\n\x05value\x18\x02 \x01(\x0b\x32\x10.PbTaskProxyRefs:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\t\n\x07_statusB\x07\n\x05_hostB\x07\n\x05_portB\x08\n\x06_ownerB\x08\n\x06_edgesB\x0e\n\x0c_api_versionB\x0f\n\r_cylc_versionB\x0f\n\r_last_updatedB\x07\n\x05_metaB\x1c\n\x1a_newest_active_cycle_pointB\x1c\n\x1a_oldest_active_cycle_pointB\x0b\n\t_reloadedB\x0b\n\t_run_modeB\x0f\n\r_cycling_modeB\x13\n\x11_workflow_log_dirB\x11\n\x0f_time_zone_infoB\r\n\x0b_tree_depthB\r\n\x0b_status_msgB\x10\n\x0e_is_held_totalB\x0b\n\t_pub_portB\r\n\x0b_broadcastsB\x12\n\x10_is_queued_totalB\t\n\x07_prunedB\x14\n\x12_is_runahead_totalB\x11\n\x0f_states_updatedB\x12\n\x10_n_edge_distance\"\xb9\x06\n\tPbRuntime\x12\x15\n\x08platform\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06script\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0binit_script\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x17\n\nenv_script\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x17\n\nerr_script\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x18\n\x0b\x65xit_script\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x17\n\npre_script\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\x18\n\x0bpost_script\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x19\n\x0cwork_sub_dir\x18\t \x01(\tH\x08\x88\x01\x01\x12(\n\x1b\x65xecution_polling_intervals\x18\n \x01(\tH\t\x88\x01\x01\x12#\n\x16\x65xecution_retry_delays\x18\x0b \x01(\tH\n\x88\x01\x01\x12!\n\x14\x65xecution_time_limit\x18\x0c \x01(\tH\x0b\x88\x01\x01\x12)\n\x1csubmission_polling_intervals\x18\r \x01(\tH\x0c\x88\x01\x01\x12$\n\x17submission_retry_delays\x18\x0e \x01(\tH\r\x88\x01\x01\x12\x17\n\ndirectives\x18\x0f \x01(\tH\x0e\x88\x01\x01\x12\x18\n\x0b\x65nvironment\x18\x10 \x01(\tH\x0f\x88\x01\x01\x12\x14\n\x07outputs\x18\x11 \x01(\tH\x10\x88\x01\x01\x42\x0b\n\t_platformB\t\n\x07_scriptB\x0e\n\x0c_init_scriptB\r\n\x0b_env_scriptB\r\n\x0b_err_scriptB\x0e\n\x0c_exit_scriptB\r\n\x0b_pre_scriptB\x0e\n\x0c_post_scriptB\x0f\n\r_work_sub_dirB\x1e\n\x1c_execution_polling_intervalsB\x19\n\x17_execution_retry_delaysB\x17\n\x15_execution_time_limitB\x1f\n\x1d_submission_polling_intervalsB\x1a\n\x18_submission_retry_delaysB\r\n\x0b_directivesB\x0e\n\x0c_environmentB\n\n\x08_outputs\"\x9d\x05\n\x05PbJob\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nsubmit_num\x18\x03 \x01(\x05H\x02\x88\x01\x01\x12\x12\n\x05state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x17\n\ntask_proxy\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x1b\n\x0esubmitted_time\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x19\n\x0cstarted_time\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\x1a\n\rfinished_time\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x13\n\x06job_id\x18\t \x01(\tH\x08\x88\x01\x01\x12\x1c\n\x0fjob_runner_name\x18\n \x01(\tH\t\x88\x01\x01\x12!\n\x14\x65xecution_time_limit\x18\x0e \x01(\x02H\n\x88\x01\x01\x12\x15\n\x08platform\x18\x0f \x01(\tH\x0b\x88\x01\x01\x12\x18\n\x0bjob_log_dir\x18\x11 \x01(\tH\x0c\x88\x01\x01\x12\x11\n\x04name\x18\x1e \x01(\tH\r\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x1f \x01(\tH\x0e\x88\x01\x01\x12\x10\n\x08messages\x18 \x03(\t\x12 \n\x07runtime\x18! \x01(\x0b\x32\n.PbRuntimeH\x0f\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\r\n\x0b_submit_numB\x08\n\x06_stateB\r\n\x0b_task_proxyB\x11\n\x0f_submitted_timeB\x0f\n\r_started_timeB\x10\n\x0e_finished_timeB\t\n\x07_job_idB\x12\n\x10_job_runner_nameB\x17\n\x15_execution_time_limitB\x0b\n\t_platformB\x0e\n\x0c_job_log_dirB\x07\n\x05_nameB\x0e\n\x0c_cycle_pointB\n\n\x08_runtimeJ\x04\x08\x1d\x10\x1e\"\xe2\x02\n\x06PbTask\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\x04meta\x18\x04 \x01(\x0b\x32\x07.PbMetaH\x03\x88\x01\x01\x12\x1e\n\x11mean_elapsed_time\x18\x05 \x01(\x02H\x04\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x0f\n\x07proxies\x18\x07 \x03(\t\x12\x11\n\tnamespace\x18\x08 \x03(\t\x12\x0f\n\x07parents\x18\t \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\n \x01(\tH\x06\x88\x01\x01\x12 \n\x07runtime\x18\x0b \x01(\x0b\x32\n.PbRuntimeH\x07\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\x07\n\x05_metaB\x14\n\x12_mean_elapsed_timeB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_runtime\"\xd8\x01\n\nPbPollTask\x12\x18\n\x0blocal_proxy\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x15\n\x08workflow\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x19\n\x0cremote_proxy\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\treq_state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x19\n\x0cgraph_string\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x0e\n\x0c_local_proxyB\x0b\n\t_workflowB\x0f\n\r_remote_proxyB\x0c\n\n_req_stateB\x0f\n\r_graph_string\"\xcb\x01\n\x0bPbCondition\x12\x17\n\ntask_proxy\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x17\n\nexpr_alias\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\treq_state\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x14\n\x07message\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\r\n\x0b_task_proxyB\r\n\x0b_expr_aliasB\x0c\n\n_req_stateB\x0c\n\n_satisfiedB\n\n\x08_message\"\x96\x01\n\x0ePbPrerequisite\x12\x17\n\nexpression\x18\x01 \x01(\tH\x00\x88\x01\x01\x12 \n\nconditions\x18\x02 \x03(\x0b\x32\x0c.PbCondition\x12\x14\n\x0c\x63ycle_points\x18\x03 \x03(\t\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x01\x88\x01\x01\x42\r\n\x0b_expressionB\x0c\n\n_satisfied\"\x8c\x01\n\x08PbOutput\x12\x12\n\x05label\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x14\n\x07message\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\tsatisfied\x18\x03 \x01(\x08H\x02\x88\x01\x01\x12\x11\n\x04time\x18\x04 \x01(\x01H\x03\x88\x01\x01\x42\x08\n\x06_labelB\n\n\x08_messageB\x0c\n\n_satisfiedB\x07\n\x05_time\"\xa5\x01\n\tPbTrigger\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05label\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x14\n\x07message\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x11\n\x04time\x18\x05 \x01(\x01H\x04\x88\x01\x01\x42\x05\n\x03_idB\x08\n\x06_labelB\n\n\x08_messageB\x0c\n\n_satisfiedB\x07\n\x05_time\"\x91\x08\n\x0bPbTaskProxy\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04task\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x12\n\x05state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x18\n\x0bjob_submits\x18\x07 \x01(\x05H\x06\x88\x01\x01\x12*\n\x07outputs\x18\t \x03(\x0b\x32\x19.PbTaskProxy.OutputsEntry\x12\x11\n\tnamespace\x18\x0b \x03(\t\x12&\n\rprerequisites\x18\x0c \x03(\x0b\x32\x0f.PbPrerequisite\x12\x0c\n\x04jobs\x18\r \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\x0f \x01(\tH\x07\x88\x01\x01\x12\x11\n\x04name\x18\x10 \x01(\tH\x08\x88\x01\x01\x12\x14\n\x07is_held\x18\x11 \x01(\x08H\t\x88\x01\x01\x12\r\n\x05\x65\x64ges\x18\x12 \x03(\t\x12\x11\n\tancestors\x18\x13 \x03(\t\x12\x16\n\tflow_nums\x18\x14 \x01(\tH\n\x88\x01\x01\x12=\n\x11\x65xternal_triggers\x18\x17 \x03(\x0b\x32\".PbTaskProxy.ExternalTriggersEntry\x12.\n\txtriggers\x18\x18 \x03(\x0b\x32\x1b.PbTaskProxy.XtriggersEntry\x12\x16\n\tis_queued\x18\x19 \x01(\x08H\x0b\x88\x01\x01\x12\x18\n\x0bis_runahead\x18\x1a \x01(\x08H\x0c\x88\x01\x01\x12\x16\n\tflow_wait\x18\x1b \x01(\x08H\r\x88\x01\x01\x12 \n\x07runtime\x18\x1c \x01(\x0b\x32\n.PbRuntimeH\x0e\x88\x01\x01\x12\x18\n\x0bgraph_depth\x18\x1d \x01(\x05H\x0f\x88\x01\x01\x1a\x39\n\x0cOutputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x18\n\x05value\x18\x02 \x01(\x0b\x32\t.PbOutput:\x02\x38\x01\x1a\x43\n\x15\x45xternalTriggersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.PbTrigger:\x02\x38\x01\x1a<\n\x0eXtriggersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.PbTrigger:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_taskB\x08\n\x06_stateB\x0e\n\x0c_cycle_pointB\x08\n\x06_depthB\x0e\n\x0c_job_submitsB\x0f\n\r_first_parentB\x07\n\x05_nameB\n\n\x08_is_heldB\x0c\n\n_flow_numsB\x0c\n\n_is_queuedB\x0e\n\x0c_is_runaheadB\x0c\n\n_flow_waitB\n\n\x08_runtimeB\x0e\n\x0c_graph_depth\"\xc8\x02\n\x08PbFamily\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\x04meta\x18\x04 \x01(\x0b\x32\x07.PbMetaH\x03\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x05 \x01(\x05H\x04\x88\x01\x01\x12\x0f\n\x07proxies\x18\x06 \x03(\t\x12\x0f\n\x07parents\x18\x07 \x03(\t\x12\x13\n\x0b\x63hild_tasks\x18\x08 \x03(\t\x12\x16\n\x0e\x63hild_families\x18\t \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\n \x01(\tH\x05\x88\x01\x01\x12 \n\x07runtime\x18\x0b \x01(\x0b\x32\n.PbRuntimeH\x06\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\x07\n\x05_metaB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_runtime\"\xae\x06\n\rPbFamilyProxy\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x11\n\x04name\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x13\n\x06\x66\x61mily\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x12\n\x05state\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x07 \x01(\x05H\x06\x88\x01\x01\x12\x19\n\x0c\x66irst_parent\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x13\n\x0b\x63hild_tasks\x18\n \x03(\t\x12\x16\n\x0e\x63hild_families\x18\x0b \x03(\t\x12\x14\n\x07is_held\x18\x0c \x01(\x08H\x08\x88\x01\x01\x12\x11\n\tancestors\x18\r \x03(\t\x12\x0e\n\x06states\x18\x0e \x03(\t\x12\x35\n\x0cstate_totals\x18\x0f \x03(\x0b\x32\x1f.PbFamilyProxy.StateTotalsEntry\x12\x1a\n\ris_held_total\x18\x10 \x01(\x05H\t\x88\x01\x01\x12\x16\n\tis_queued\x18\x11 \x01(\x08H\n\x88\x01\x01\x12\x1c\n\x0fis_queued_total\x18\x12 \x01(\x05H\x0b\x88\x01\x01\x12\x18\n\x0bis_runahead\x18\x13 \x01(\x08H\x0c\x88\x01\x01\x12\x1e\n\x11is_runahead_total\x18\x14 \x01(\x05H\r\x88\x01\x01\x12 \n\x07runtime\x18\x15 \x01(\x0b\x32\n.PbRuntimeH\x0e\x88\x01\x01\x12\x18\n\x0bgraph_depth\x18\x16 \x01(\x05H\x0f\x88\x01\x01\x1a\x32\n\x10StateTotalsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x0e\n\x0c_cycle_pointB\x07\n\x05_nameB\t\n\x07_familyB\x08\n\x06_stateB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_is_heldB\x10\n\x0e_is_held_totalB\x0c\n\n_is_queuedB\x12\n\x10_is_queued_totalB\x0e\n\x0c_is_runaheadB\x14\n\x12_is_runahead_totalB\n\n\x08_runtimeB\x0e\n\x0c_graph_depth\"\xbc\x01\n\x06PbEdge\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x13\n\x06source\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06target\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x14\n\x07suicide\x18\x05 \x01(\x08H\x04\x88\x01\x01\x12\x11\n\x04\x63ond\x18\x06 \x01(\x08H\x05\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\t\n\x07_sourceB\t\n\x07_targetB\n\n\x08_suicideB\x07\n\x05_cond\"{\n\x07PbEdges\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\r\n\x05\x65\x64ges\x18\x02 \x03(\t\x12+\n\x16workflow_polling_tasks\x18\x03 \x03(\x0b\x32\x0b.PbPollTask\x12\x0e\n\x06leaves\x18\x04 \x03(\t\x12\x0c\n\x04\x66\x65\x65t\x18\x05 \x03(\tB\x05\n\x03_id\"\xf2\x01\n\x10PbEntireWorkflow\x12\"\n\x08workflow\x18\x01 \x01(\x0b\x32\x0b.PbWorkflowH\x00\x88\x01\x01\x12\x16\n\x05tasks\x18\x02 \x03(\x0b\x32\x07.PbTask\x12\"\n\x0ctask_proxies\x18\x03 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x14\n\x04jobs\x18\x04 \x03(\x0b\x32\x06.PbJob\x12\x1b\n\x08\x66\x61milies\x18\x05 \x03(\x0b\x32\t.PbFamily\x12&\n\x0e\x66\x61mily_proxies\x18\x06 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x16\n\x05\x65\x64ges\x18\x07 \x03(\x0b\x32\x07.PbEdgeB\x0b\n\t_workflow\"\xaf\x01\n\x07\x45\x44\x65ltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x16\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x07.PbEdge\x12\x18\n\x07updated\x18\x04 \x03(\x0b\x32\x07.PbEdge\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xb3\x01\n\x07\x46\x44\x65ltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x18\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\t.PbFamily\x12\x1a\n\x07updated\x18\x04 \x03(\x0b\x32\t.PbFamily\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xbe\x01\n\x08\x46PDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1d\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x1f\n\x07updated\x18\x04 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xad\x01\n\x07JDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x15\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x06.PbJob\x12\x17\n\x07updated\x18\x04 \x03(\x0b\x32\x06.PbJob\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xaf\x01\n\x07TDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x16\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x07.PbTask\x12\x18\n\x07updated\x18\x04 \x03(\x0b\x32\x07.PbTask\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xba\x01\n\x08TPDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1b\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x1d\n\x07updated\x18\x04 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xc3\x01\n\x07WDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x1f\n\x05\x61\x64\x64\x65\x64\x18\x02 \x01(\x0b\x32\x0b.PbWorkflowH\x01\x88\x01\x01\x12!\n\x07updated\x18\x03 \x01(\x0b\x32\x0b.PbWorkflowH\x02\x88\x01\x01\x12\x15\n\x08reloaded\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x13\n\x06pruned\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x07\n\x05_timeB\x08\n\x06_addedB\n\n\x08_updatedB\x0b\n\t_reloadedB\t\n\x07_pruned\"\xd1\x01\n\tAllDeltas\x12\x1a\n\x08\x66\x61milies\x18\x01 \x01(\x0b\x32\x08.FDeltas\x12!\n\x0e\x66\x61mily_proxies\x18\x02 \x01(\x0b\x32\t.FPDeltas\x12\x16\n\x04jobs\x18\x03 \x01(\x0b\x32\x08.JDeltas\x12\x17\n\x05tasks\x18\x04 \x01(\x0b\x32\x08.TDeltas\x12\x1f\n\x0ctask_proxies\x18\x05 \x01(\x0b\x32\t.TPDeltas\x12\x17\n\x05\x65\x64ges\x18\x06 \x01(\x0b\x32\x08.EDeltas\x12\x1a\n\x08workflow\x18\x07 \x01(\x0b\x32\x08.WDeltasb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13\x64\x61ta_messages.proto\"\x96\x01\n\x06PbMeta\x12\x12\n\x05title\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x18\n\x0b\x64\x65scription\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x10\n\x03URL\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x19\n\x0cuser_defined\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\x08\n\x06_titleB\x0e\n\x0c_descriptionB\x06\n\x04_URLB\x0f\n\r_user_defined\"\xaa\x01\n\nPbTimeZone\x12\x12\n\x05hours\x18\x01 \x01(\x05H\x00\x88\x01\x01\x12\x14\n\x07minutes\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12\x19\n\x0cstring_basic\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1c\n\x0fstring_extended\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\x08\n\x06_hoursB\n\n\x08_minutesB\x0f\n\r_string_basicB\x12\n\x10_string_extended\"\'\n\x0fPbTaskProxyRefs\x12\x14\n\x0ctask_proxies\x18\x01 \x03(\t\"\xd4\x0c\n\nPbWorkflow\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06status\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x11\n\x04host\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x11\n\x04port\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x12\n\x05owner\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\r\n\x05tasks\x18\x08 \x03(\t\x12\x10\n\x08\x66\x61milies\x18\t \x03(\t\x12\x1c\n\x05\x65\x64ges\x18\n \x01(\x0b\x32\x08.PbEdgesH\x07\x88\x01\x01\x12\x18\n\x0b\x61pi_version\x18\x0b \x01(\x05H\x08\x88\x01\x01\x12\x19\n\x0c\x63ylc_version\x18\x0c \x01(\tH\t\x88\x01\x01\x12\x19\n\x0clast_updated\x18\r \x01(\x01H\n\x88\x01\x01\x12\x1a\n\x04meta\x18\x0e \x01(\x0b\x32\x07.PbMetaH\x0b\x88\x01\x01\x12&\n\x19newest_active_cycle_point\x18\x10 \x01(\tH\x0c\x88\x01\x01\x12&\n\x19oldest_active_cycle_point\x18\x11 \x01(\tH\r\x88\x01\x01\x12\x15\n\x08reloaded\x18\x12 \x01(\x08H\x0e\x88\x01\x01\x12\x15\n\x08run_mode\x18\x13 \x01(\tH\x0f\x88\x01\x01\x12\x19\n\x0c\x63ycling_mode\x18\x14 \x01(\tH\x10\x88\x01\x01\x12\x32\n\x0cstate_totals\x18\x15 \x03(\x0b\x32\x1c.PbWorkflow.StateTotalsEntry\x12\x1d\n\x10workflow_log_dir\x18\x16 \x01(\tH\x11\x88\x01\x01\x12(\n\x0etime_zone_info\x18\x17 \x01(\x0b\x32\x0b.PbTimeZoneH\x12\x88\x01\x01\x12\x17\n\ntree_depth\x18\x18 \x01(\x05H\x13\x88\x01\x01\x12\x15\n\rjob_log_names\x18\x19 \x03(\t\x12\x14\n\x0cns_def_order\x18\x1a \x03(\t\x12\x0e\n\x06states\x18\x1b \x03(\t\x12\x14\n\x0ctask_proxies\x18\x1c \x03(\t\x12\x16\n\x0e\x66\x61mily_proxies\x18\x1d \x03(\t\x12\x17\n\nstatus_msg\x18\x1e \x01(\tH\x14\x88\x01\x01\x12\x1a\n\ris_held_total\x18\x1f \x01(\x05H\x15\x88\x01\x01\x12\x0c\n\x04jobs\x18 \x03(\t\x12\x15\n\x08pub_port\x18! \x01(\x05H\x16\x88\x01\x01\x12\x17\n\nbroadcasts\x18\" \x01(\tH\x17\x88\x01\x01\x12\x1c\n\x0fis_queued_total\x18# \x01(\x05H\x18\x88\x01\x01\x12=\n\x12latest_state_tasks\x18$ \x03(\x0b\x32!.PbWorkflow.LatestStateTasksEntry\x12\x13\n\x06pruned\x18% \x01(\x08H\x19\x88\x01\x01\x12\x1e\n\x11is_runahead_total\x18& \x01(\x05H\x1a\x88\x01\x01\x12\x1b\n\x0estates_updated\x18\' \x01(\x08H\x1b\x88\x01\x01\x12\x1c\n\x0fn_edge_distance\x18( \x01(\x05H\x1c\x88\x01\x01\x1a\x32\n\x10StateTotalsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1aI\n\x15LatestStateTasksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1f\n\x05value\x18\x02 \x01(\x0b\x32\x10.PbTaskProxyRefs:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\t\n\x07_statusB\x07\n\x05_hostB\x07\n\x05_portB\x08\n\x06_ownerB\x08\n\x06_edgesB\x0e\n\x0c_api_versionB\x0f\n\r_cylc_versionB\x0f\n\r_last_updatedB\x07\n\x05_metaB\x1c\n\x1a_newest_active_cycle_pointB\x1c\n\x1a_oldest_active_cycle_pointB\x0b\n\t_reloadedB\x0b\n\t_run_modeB\x0f\n\r_cycling_modeB\x13\n\x11_workflow_log_dirB\x11\n\x0f_time_zone_infoB\r\n\x0b_tree_depthB\r\n\x0b_status_msgB\x10\n\x0e_is_held_totalB\x0b\n\t_pub_portB\r\n\x0b_broadcastsB\x12\n\x10_is_queued_totalB\t\n\x07_prunedB\x14\n\x12_is_runahead_totalB\x11\n\x0f_states_updatedB\x12\n\x10_n_edge_distance\"\xe1\x06\n\tPbRuntime\x12\x15\n\x08platform\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06script\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0binit_script\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x17\n\nenv_script\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x17\n\nerr_script\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x18\n\x0b\x65xit_script\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x17\n\npre_script\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\x18\n\x0bpost_script\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x19\n\x0cwork_sub_dir\x18\t \x01(\tH\x08\x88\x01\x01\x12(\n\x1b\x65xecution_polling_intervals\x18\n \x01(\tH\t\x88\x01\x01\x12#\n\x16\x65xecution_retry_delays\x18\x0b \x01(\tH\n\x88\x01\x01\x12!\n\x14\x65xecution_time_limit\x18\x0c \x01(\tH\x0b\x88\x01\x01\x12)\n\x1csubmission_polling_intervals\x18\r \x01(\tH\x0c\x88\x01\x01\x12$\n\x17submission_retry_delays\x18\x0e \x01(\tH\r\x88\x01\x01\x12\x17\n\ndirectives\x18\x0f \x01(\tH\x0e\x88\x01\x01\x12\x18\n\x0b\x65nvironment\x18\x10 \x01(\tH\x0f\x88\x01\x01\x12\x14\n\x07outputs\x18\x11 \x01(\tH\x10\x88\x01\x01\x12\x17\n\ncompletion\x18\x12 \x01(\tH\x11\x88\x01\x01\x42\x0b\n\t_platformB\t\n\x07_scriptB\x0e\n\x0c_init_scriptB\r\n\x0b_env_scriptB\r\n\x0b_err_scriptB\x0e\n\x0c_exit_scriptB\r\n\x0b_pre_scriptB\x0e\n\x0c_post_scriptB\x0f\n\r_work_sub_dirB\x1e\n\x1c_execution_polling_intervalsB\x19\n\x17_execution_retry_delaysB\x17\n\x15_execution_time_limitB\x1f\n\x1d_submission_polling_intervalsB\x1a\n\x18_submission_retry_delaysB\r\n\x0b_directivesB\x0e\n\x0c_environmentB\n\n\x08_outputsB\r\n\x0b_completion\"\x9d\x05\n\x05PbJob\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nsubmit_num\x18\x03 \x01(\x05H\x02\x88\x01\x01\x12\x12\n\x05state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x17\n\ntask_proxy\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x1b\n\x0esubmitted_time\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x19\n\x0cstarted_time\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\x1a\n\rfinished_time\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x13\n\x06job_id\x18\t \x01(\tH\x08\x88\x01\x01\x12\x1c\n\x0fjob_runner_name\x18\n \x01(\tH\t\x88\x01\x01\x12!\n\x14\x65xecution_time_limit\x18\x0e \x01(\x02H\n\x88\x01\x01\x12\x15\n\x08platform\x18\x0f \x01(\tH\x0b\x88\x01\x01\x12\x18\n\x0bjob_log_dir\x18\x11 \x01(\tH\x0c\x88\x01\x01\x12\x11\n\x04name\x18\x1e \x01(\tH\r\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x1f \x01(\tH\x0e\x88\x01\x01\x12\x10\n\x08messages\x18 \x03(\t\x12 \n\x07runtime\x18! \x01(\x0b\x32\n.PbRuntimeH\x0f\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\r\n\x0b_submit_numB\x08\n\x06_stateB\r\n\x0b_task_proxyB\x11\n\x0f_submitted_timeB\x0f\n\r_started_timeB\x10\n\x0e_finished_timeB\t\n\x07_job_idB\x12\n\x10_job_runner_nameB\x17\n\x15_execution_time_limitB\x0b\n\t_platformB\x0e\n\x0c_job_log_dirB\x07\n\x05_nameB\x0e\n\x0c_cycle_pointB\n\n\x08_runtimeJ\x04\x08\x1d\x10\x1e\"\xe2\x02\n\x06PbTask\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\x04meta\x18\x04 \x01(\x0b\x32\x07.PbMetaH\x03\x88\x01\x01\x12\x1e\n\x11mean_elapsed_time\x18\x05 \x01(\x02H\x04\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x0f\n\x07proxies\x18\x07 \x03(\t\x12\x11\n\tnamespace\x18\x08 \x03(\t\x12\x0f\n\x07parents\x18\t \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\n \x01(\tH\x06\x88\x01\x01\x12 \n\x07runtime\x18\x0b \x01(\x0b\x32\n.PbRuntimeH\x07\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\x07\n\x05_metaB\x14\n\x12_mean_elapsed_timeB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_runtime\"\xd8\x01\n\nPbPollTask\x12\x18\n\x0blocal_proxy\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x15\n\x08workflow\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x19\n\x0cremote_proxy\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\treq_state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x19\n\x0cgraph_string\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x0e\n\x0c_local_proxyB\x0b\n\t_workflowB\x0f\n\r_remote_proxyB\x0c\n\n_req_stateB\x0f\n\r_graph_string\"\xcb\x01\n\x0bPbCondition\x12\x17\n\ntask_proxy\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x17\n\nexpr_alias\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\treq_state\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x14\n\x07message\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\r\n\x0b_task_proxyB\r\n\x0b_expr_aliasB\x0c\n\n_req_stateB\x0c\n\n_satisfiedB\n\n\x08_message\"\x96\x01\n\x0ePbPrerequisite\x12\x17\n\nexpression\x18\x01 \x01(\tH\x00\x88\x01\x01\x12 \n\nconditions\x18\x02 \x03(\x0b\x32\x0c.PbCondition\x12\x14\n\x0c\x63ycle_points\x18\x03 \x03(\t\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x01\x88\x01\x01\x42\r\n\x0b_expressionB\x0c\n\n_satisfied\"\x8c\x01\n\x08PbOutput\x12\x12\n\x05label\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x14\n\x07message\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\tsatisfied\x18\x03 \x01(\x08H\x02\x88\x01\x01\x12\x11\n\x04time\x18\x04 \x01(\x01H\x03\x88\x01\x01\x42\x08\n\x06_labelB\n\n\x08_messageB\x0c\n\n_satisfiedB\x07\n\x05_time\"\xa5\x01\n\tPbTrigger\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05label\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x14\n\x07message\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x11\n\x04time\x18\x05 \x01(\x01H\x04\x88\x01\x01\x42\x05\n\x03_idB\x08\n\x06_labelB\n\n\x08_messageB\x0c\n\n_satisfiedB\x07\n\x05_time\"\x91\x08\n\x0bPbTaskProxy\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04task\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x12\n\x05state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x18\n\x0bjob_submits\x18\x07 \x01(\x05H\x06\x88\x01\x01\x12*\n\x07outputs\x18\t \x03(\x0b\x32\x19.PbTaskProxy.OutputsEntry\x12\x11\n\tnamespace\x18\x0b \x03(\t\x12&\n\rprerequisites\x18\x0c \x03(\x0b\x32\x0f.PbPrerequisite\x12\x0c\n\x04jobs\x18\r \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\x0f \x01(\tH\x07\x88\x01\x01\x12\x11\n\x04name\x18\x10 \x01(\tH\x08\x88\x01\x01\x12\x14\n\x07is_held\x18\x11 \x01(\x08H\t\x88\x01\x01\x12\r\n\x05\x65\x64ges\x18\x12 \x03(\t\x12\x11\n\tancestors\x18\x13 \x03(\t\x12\x16\n\tflow_nums\x18\x14 \x01(\tH\n\x88\x01\x01\x12=\n\x11\x65xternal_triggers\x18\x17 \x03(\x0b\x32\".PbTaskProxy.ExternalTriggersEntry\x12.\n\txtriggers\x18\x18 \x03(\x0b\x32\x1b.PbTaskProxy.XtriggersEntry\x12\x16\n\tis_queued\x18\x19 \x01(\x08H\x0b\x88\x01\x01\x12\x18\n\x0bis_runahead\x18\x1a \x01(\x08H\x0c\x88\x01\x01\x12\x16\n\tflow_wait\x18\x1b \x01(\x08H\r\x88\x01\x01\x12 \n\x07runtime\x18\x1c \x01(\x0b\x32\n.PbRuntimeH\x0e\x88\x01\x01\x12\x18\n\x0bgraph_depth\x18\x1d \x01(\x05H\x0f\x88\x01\x01\x1a\x39\n\x0cOutputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x18\n\x05value\x18\x02 \x01(\x0b\x32\t.PbOutput:\x02\x38\x01\x1a\x43\n\x15\x45xternalTriggersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.PbTrigger:\x02\x38\x01\x1a<\n\x0eXtriggersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.PbTrigger:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_taskB\x08\n\x06_stateB\x0e\n\x0c_cycle_pointB\x08\n\x06_depthB\x0e\n\x0c_job_submitsB\x0f\n\r_first_parentB\x07\n\x05_nameB\n\n\x08_is_heldB\x0c\n\n_flow_numsB\x0c\n\n_is_queuedB\x0e\n\x0c_is_runaheadB\x0c\n\n_flow_waitB\n\n\x08_runtimeB\x0e\n\x0c_graph_depth\"\xc8\x02\n\x08PbFamily\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\x04meta\x18\x04 \x01(\x0b\x32\x07.PbMetaH\x03\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x05 \x01(\x05H\x04\x88\x01\x01\x12\x0f\n\x07proxies\x18\x06 \x03(\t\x12\x0f\n\x07parents\x18\x07 \x03(\t\x12\x13\n\x0b\x63hild_tasks\x18\x08 \x03(\t\x12\x16\n\x0e\x63hild_families\x18\t \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\n \x01(\tH\x05\x88\x01\x01\x12 \n\x07runtime\x18\x0b \x01(\x0b\x32\n.PbRuntimeH\x06\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\x07\n\x05_metaB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_runtime\"\xae\x06\n\rPbFamilyProxy\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x11\n\x04name\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x13\n\x06\x66\x61mily\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x12\n\x05state\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x07 \x01(\x05H\x06\x88\x01\x01\x12\x19\n\x0c\x66irst_parent\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x13\n\x0b\x63hild_tasks\x18\n \x03(\t\x12\x16\n\x0e\x63hild_families\x18\x0b \x03(\t\x12\x14\n\x07is_held\x18\x0c \x01(\x08H\x08\x88\x01\x01\x12\x11\n\tancestors\x18\r \x03(\t\x12\x0e\n\x06states\x18\x0e \x03(\t\x12\x35\n\x0cstate_totals\x18\x0f \x03(\x0b\x32\x1f.PbFamilyProxy.StateTotalsEntry\x12\x1a\n\ris_held_total\x18\x10 \x01(\x05H\t\x88\x01\x01\x12\x16\n\tis_queued\x18\x11 \x01(\x08H\n\x88\x01\x01\x12\x1c\n\x0fis_queued_total\x18\x12 \x01(\x05H\x0b\x88\x01\x01\x12\x18\n\x0bis_runahead\x18\x13 \x01(\x08H\x0c\x88\x01\x01\x12\x1e\n\x11is_runahead_total\x18\x14 \x01(\x05H\r\x88\x01\x01\x12 \n\x07runtime\x18\x15 \x01(\x0b\x32\n.PbRuntimeH\x0e\x88\x01\x01\x12\x18\n\x0bgraph_depth\x18\x16 \x01(\x05H\x0f\x88\x01\x01\x1a\x32\n\x10StateTotalsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x0e\n\x0c_cycle_pointB\x07\n\x05_nameB\t\n\x07_familyB\x08\n\x06_stateB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_is_heldB\x10\n\x0e_is_held_totalB\x0c\n\n_is_queuedB\x12\n\x10_is_queued_totalB\x0e\n\x0c_is_runaheadB\x14\n\x12_is_runahead_totalB\n\n\x08_runtimeB\x0e\n\x0c_graph_depth\"\xbc\x01\n\x06PbEdge\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x13\n\x06source\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06target\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x14\n\x07suicide\x18\x05 \x01(\x08H\x04\x88\x01\x01\x12\x11\n\x04\x63ond\x18\x06 \x01(\x08H\x05\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\t\n\x07_sourceB\t\n\x07_targetB\n\n\x08_suicideB\x07\n\x05_cond\"{\n\x07PbEdges\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\r\n\x05\x65\x64ges\x18\x02 \x03(\t\x12+\n\x16workflow_polling_tasks\x18\x03 \x03(\x0b\x32\x0b.PbPollTask\x12\x0e\n\x06leaves\x18\x04 \x03(\t\x12\x0c\n\x04\x66\x65\x65t\x18\x05 \x03(\tB\x05\n\x03_id\"\xf2\x01\n\x10PbEntireWorkflow\x12\"\n\x08workflow\x18\x01 \x01(\x0b\x32\x0b.PbWorkflowH\x00\x88\x01\x01\x12\x16\n\x05tasks\x18\x02 \x03(\x0b\x32\x07.PbTask\x12\"\n\x0ctask_proxies\x18\x03 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x14\n\x04jobs\x18\x04 \x03(\x0b\x32\x06.PbJob\x12\x1b\n\x08\x66\x61milies\x18\x05 \x03(\x0b\x32\t.PbFamily\x12&\n\x0e\x66\x61mily_proxies\x18\x06 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x16\n\x05\x65\x64ges\x18\x07 \x03(\x0b\x32\x07.PbEdgeB\x0b\n\t_workflow\"\xaf\x01\n\x07\x45\x44\x65ltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x16\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x07.PbEdge\x12\x18\n\x07updated\x18\x04 \x03(\x0b\x32\x07.PbEdge\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xb3\x01\n\x07\x46\x44\x65ltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x18\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\t.PbFamily\x12\x1a\n\x07updated\x18\x04 \x03(\x0b\x32\t.PbFamily\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xbe\x01\n\x08\x46PDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1d\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x1f\n\x07updated\x18\x04 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xad\x01\n\x07JDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x15\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x06.PbJob\x12\x17\n\x07updated\x18\x04 \x03(\x0b\x32\x06.PbJob\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xaf\x01\n\x07TDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x16\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x07.PbTask\x12\x18\n\x07updated\x18\x04 \x03(\x0b\x32\x07.PbTask\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xba\x01\n\x08TPDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1b\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x1d\n\x07updated\x18\x04 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xc3\x01\n\x07WDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x1f\n\x05\x61\x64\x64\x65\x64\x18\x02 \x01(\x0b\x32\x0b.PbWorkflowH\x01\x88\x01\x01\x12!\n\x07updated\x18\x03 \x01(\x0b\x32\x0b.PbWorkflowH\x02\x88\x01\x01\x12\x15\n\x08reloaded\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x13\n\x06pruned\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x07\n\x05_timeB\x08\n\x06_addedB\n\n\x08_updatedB\x0b\n\t_reloadedB\t\n\x07_pruned\"\xd1\x01\n\tAllDeltas\x12\x1a\n\x08\x66\x61milies\x18\x01 \x01(\x0b\x32\x08.FDeltas\x12!\n\x0e\x66\x61mily_proxies\x18\x02 \x01(\x0b\x32\t.FPDeltas\x12\x16\n\x04jobs\x18\x03 \x01(\x0b\x32\x08.JDeltas\x12\x17\n\x05tasks\x18\x04 \x01(\x0b\x32\x08.TDeltas\x12\x1f\n\x0ctask_proxies\x18\x05 \x01(\x0b\x32\t.TPDeltas\x12\x17\n\x05\x65\x64ges\x18\x06 \x01(\x0b\x32\x08.EDeltas\x12\x1a\n\x08workflow\x18\x07 \x01(\x0b\x32\x08.WDeltasb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -46,55 +47,55 @@ _globals['_PBWORKFLOW_LATESTSTATETASKSENTRY']._serialized_start=1493 _globals['_PBWORKFLOW_LATESTSTATETASKSENTRY']._serialized_end=1566 _globals['_PBRUNTIME']._serialized_start=2014 - _globals['_PBRUNTIME']._serialized_end=2839 - _globals['_PBJOB']._serialized_start=2842 - _globals['_PBJOB']._serialized_end=3511 - _globals['_PBTASK']._serialized_start=3514 - _globals['_PBTASK']._serialized_end=3868 - _globals['_PBPOLLTASK']._serialized_start=3871 - _globals['_PBPOLLTASK']._serialized_end=4087 - _globals['_PBCONDITION']._serialized_start=4090 - _globals['_PBCONDITION']._serialized_end=4293 - _globals['_PBPREREQUISITE']._serialized_start=4296 - _globals['_PBPREREQUISITE']._serialized_end=4446 - _globals['_PBOUTPUT']._serialized_start=4449 - _globals['_PBOUTPUT']._serialized_end=4589 - _globals['_PBTRIGGER']._serialized_start=4592 - _globals['_PBTRIGGER']._serialized_end=4757 - _globals['_PBTASKPROXY']._serialized_start=4760 - _globals['_PBTASKPROXY']._serialized_end=5801 - _globals['_PBTASKPROXY_OUTPUTSENTRY']._serialized_start=5411 - _globals['_PBTASKPROXY_OUTPUTSENTRY']._serialized_end=5468 - _globals['_PBTASKPROXY_EXTERNALTRIGGERSENTRY']._serialized_start=5470 - _globals['_PBTASKPROXY_EXTERNALTRIGGERSENTRY']._serialized_end=5537 - _globals['_PBTASKPROXY_XTRIGGERSENTRY']._serialized_start=5539 - _globals['_PBTASKPROXY_XTRIGGERSENTRY']._serialized_end=5599 - _globals['_PBFAMILY']._serialized_start=5804 - _globals['_PBFAMILY']._serialized_end=6132 - _globals['_PBFAMILYPROXY']._serialized_start=6135 - _globals['_PBFAMILYPROXY']._serialized_end=6949 + _globals['_PBRUNTIME']._serialized_end=2879 + _globals['_PBJOB']._serialized_start=2882 + _globals['_PBJOB']._serialized_end=3551 + _globals['_PBTASK']._serialized_start=3554 + _globals['_PBTASK']._serialized_end=3908 + _globals['_PBPOLLTASK']._serialized_start=3911 + _globals['_PBPOLLTASK']._serialized_end=4127 + _globals['_PBCONDITION']._serialized_start=4130 + _globals['_PBCONDITION']._serialized_end=4333 + _globals['_PBPREREQUISITE']._serialized_start=4336 + _globals['_PBPREREQUISITE']._serialized_end=4486 + _globals['_PBOUTPUT']._serialized_start=4489 + _globals['_PBOUTPUT']._serialized_end=4629 + _globals['_PBTRIGGER']._serialized_start=4632 + _globals['_PBTRIGGER']._serialized_end=4797 + _globals['_PBTASKPROXY']._serialized_start=4800 + _globals['_PBTASKPROXY']._serialized_end=5841 + _globals['_PBTASKPROXY_OUTPUTSENTRY']._serialized_start=5451 + _globals['_PBTASKPROXY_OUTPUTSENTRY']._serialized_end=5508 + _globals['_PBTASKPROXY_EXTERNALTRIGGERSENTRY']._serialized_start=5510 + _globals['_PBTASKPROXY_EXTERNALTRIGGERSENTRY']._serialized_end=5577 + _globals['_PBTASKPROXY_XTRIGGERSENTRY']._serialized_start=5579 + _globals['_PBTASKPROXY_XTRIGGERSENTRY']._serialized_end=5639 + _globals['_PBFAMILY']._serialized_start=5844 + _globals['_PBFAMILY']._serialized_end=6172 + _globals['_PBFAMILYPROXY']._serialized_start=6175 + _globals['_PBFAMILYPROXY']._serialized_end=6989 _globals['_PBFAMILYPROXY_STATETOTALSENTRY']._serialized_start=1441 _globals['_PBFAMILYPROXY_STATETOTALSENTRY']._serialized_end=1491 - _globals['_PBEDGE']._serialized_start=6952 - _globals['_PBEDGE']._serialized_end=7140 - _globals['_PBEDGES']._serialized_start=7142 - _globals['_PBEDGES']._serialized_end=7265 - _globals['_PBENTIREWORKFLOW']._serialized_start=7268 - _globals['_PBENTIREWORKFLOW']._serialized_end=7510 - _globals['_EDELTAS']._serialized_start=7513 - _globals['_EDELTAS']._serialized_end=7688 - _globals['_FDELTAS']._serialized_start=7691 - _globals['_FDELTAS']._serialized_end=7870 - _globals['_FPDELTAS']._serialized_start=7873 - _globals['_FPDELTAS']._serialized_end=8063 - _globals['_JDELTAS']._serialized_start=8066 - _globals['_JDELTAS']._serialized_end=8239 - _globals['_TDELTAS']._serialized_start=8242 - _globals['_TDELTAS']._serialized_end=8417 - _globals['_TPDELTAS']._serialized_start=8420 - _globals['_TPDELTAS']._serialized_end=8606 - _globals['_WDELTAS']._serialized_start=8609 - _globals['_WDELTAS']._serialized_end=8804 - _globals['_ALLDELTAS']._serialized_start=8807 - _globals['_ALLDELTAS']._serialized_end=9016 + _globals['_PBEDGE']._serialized_start=6992 + _globals['_PBEDGE']._serialized_end=7180 + _globals['_PBEDGES']._serialized_start=7182 + _globals['_PBEDGES']._serialized_end=7305 + _globals['_PBENTIREWORKFLOW']._serialized_start=7308 + _globals['_PBENTIREWORKFLOW']._serialized_end=7550 + _globals['_EDELTAS']._serialized_start=7553 + _globals['_EDELTAS']._serialized_end=7728 + _globals['_FDELTAS']._serialized_start=7731 + _globals['_FDELTAS']._serialized_end=7910 + _globals['_FPDELTAS']._serialized_start=7913 + _globals['_FPDELTAS']._serialized_end=8103 + _globals['_JDELTAS']._serialized_start=8106 + _globals['_JDELTAS']._serialized_end=8279 + _globals['_TDELTAS']._serialized_start=8282 + _globals['_TDELTAS']._serialized_end=8457 + _globals['_TPDELTAS']._serialized_start=8460 + _globals['_TPDELTAS']._serialized_end=8646 + _globals['_WDELTAS']._serialized_start=8649 + _globals['_WDELTAS']._serialized_end=8844 + _globals['_ALLDELTAS']._serialized_start=8847 + _globals['_ALLDELTAS']._serialized_end=9056 # @@protoc_insertion_point(module_scope) diff --git a/cylc/flow/data_store_mgr.py b/cylc/flow/data_store_mgr.py index 744daeb4dda..f49c5bd9eaa 100644 --- a/cylc/flow/data_store_mgr.py +++ b/cylc/flow/data_store_mgr.py @@ -247,6 +247,7 @@ def runtime_from_config(rtconfig): return PbRuntime( platform=platform, script=rtconfig['script'], + completion=rtconfig['completion'], init_script=rtconfig['init-script'], env_script=rtconfig['env-script'], err_script=rtconfig['err-script'], @@ -1440,7 +1441,7 @@ def apply_task_proxy_db_history(self): ) ): for message in json.loads(outputs_str): - itask.state.outputs.set_completion(message, True) + itask.state.outputs.set_message_complete(message) # Gather tasks with flow id. prereq_ids.add(f'{relative_id}/{flow_nums_str}') @@ -1502,7 +1503,7 @@ def _process_internal_task_proxy(self, itask, tproxy): del tproxy.prerequisites[:] tproxy.prerequisites.extend(prereq_list) - for label, message, satisfied in itask.state.outputs.get_all(): + for label, message, satisfied in itask.state.outputs: output = tproxy.outputs[label] output.label = label output.message = message @@ -2393,10 +2394,8 @@ def delta_task_output( tp_id, tproxy = self.store_node_fetcher(itask.tokens) if not tproxy: return - item = itask.state.outputs.get_item(message) - if item is None: - return - label, _, satisfied = item + outputs = itask.state.outputs + label = outputs.get_trigger(message) # update task instance update_time = time() tp_delta = self.updated[TASK_PROXIES].setdefault( @@ -2405,7 +2404,7 @@ def delta_task_output( output = tp_delta.outputs[label] output.label = label output.message = message - output.satisfied = satisfied + output.satisfied = outputs.is_message_complete(message) output.time = update_time self.updates_pending = True @@ -2425,9 +2424,10 @@ def delta_task_outputs(self, itask: TaskProxy) -> None: tp_delta = self.updated[TASK_PROXIES].setdefault( tp_id, PbTaskProxy(id=tp_id)) tp_delta.stamp = f'{tp_id}@{update_time}' - for label, _, satisfied in itask.state.outputs.get_all(): - output = tp_delta.outputs[label] - output.label = label + for trigger, message, satisfied in itask.state.outputs: + output = tp_delta.outputs[trigger] + output.label = trigger + output.message = message output.satisfied = satisfied output.time = update_time diff --git a/cylc/flow/exceptions.py b/cylc/flow/exceptions.py index 0800a914888..9881631484b 100644 --- a/cylc/flow/exceptions.py +++ b/cylc/flow/exceptions.py @@ -487,3 +487,16 @@ def __str__(self): ) else: return "Installed workflow is not compatible with Cylc 8." + + +class InvalidCompletionExpression(CylcError): + """For the [runtime][]completion configuration. + + Raised when non-whitelisted syntax is present. + """ + def __init__(self, message, expr=None): + self.message = message + self.expr = expr + + def __str__(self): + return self.message diff --git a/cylc/flow/graph_parser.py b/cylc/flow/graph_parser.py index 16334fc2a9a..64dcdecaf6f 100644 --- a/cylc/flow/graph_parser.py +++ b/cylc/flow/graph_parser.py @@ -760,9 +760,14 @@ def _set_output_opt( if suicide: return - if output == TASK_OUTPUT_EXPIRED and not optional: - raise GraphParseError( - f"Expired-output {name}:{output} must be optional") + if ( + output in {TASK_OUTPUT_EXPIRED, TASK_OUTPUT_SUBMIT_FAILED} + and not optional + ): + # ":expire" and ":submit-fail" cannot be required + # proposal point 4: + # https://cylc.github.io/cylc-admin/proposal-optional-output-extension.html#proposal + raise GraphParseError(f"{name}:{output} must be optional") if output == TASK_OUTPUT_FINISHED: # Interpret :finish pseudo-output diff --git a/cylc/flow/host_select.py b/cylc/flow/host_select.py index 383855039fa..0eb34d088ca 100644 --- a/cylc/flow/host_select.py +++ b/cylc/flow/host_select.py @@ -14,7 +14,53 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -"""Functionality for selecting a host from pre-defined list.""" +"""Functionality for selecting a host from pre-defined list. + +Ranking/filtering hosts can be achieved using Python expressions which work +with the `psutil` interfaces. + +These expressions are used-defined, buy run a restricted evluation environment +where only certain whitelisted operations are permitted. + +Examples: + >>> RankingExpressionEvaluator('1 + 1') + 2 + >>> RankingExpressionEvaluator('1 * -1') + -1 + >>> RankingExpressionEvaluator('1 < a', a=2) + True + >>> RankingExpressionEvaluator('1 in (1, 2, 3)') + True + >>> import psutil + >>> RankingExpressionEvaluator( + ... 'a.available > 0', + ... a=psutil.virtual_memory() + ... ) + True + + If you try to get it to do something you're not allowed to: + >>> RankingExpressionEvaluator('open("foo")') + Traceback (most recent call last): + ValueError: Invalid expression: open("foo") + "Call" not permitted + + >>> RankingExpressionEvaluator('import sys') + Traceback (most recent call last): + ValueError: invalid syntax: import sys + + If you try to get hold of something you aren't supposed to: + >>> answer = 42 # only variables explicitly passed in should work + >>> RankingExpressionEvaluator('answer') + Traceback (most recent call last): + NameError: name 'answer' is not defined + + If you try to do something which doesn't make sense: + >>> RankingExpressionEvaluator('a.b.c') # no value "a.b.c" + Traceback (most recent call last): + NameError: name 'a' is not defined + +""" + import ast from collections import namedtuple from functools import lru_cache @@ -35,6 +81,22 @@ from cylc.flow.hostuserutil import get_fqdn_by_host, is_remote_host from cylc.flow.remote import run_cmd, cylc_server_cmd from cylc.flow.terminal import parse_dirty_json +from cylc.flow.util import restricted_evaluator + + +# evaluates ranking expressions +# (see module docstring for examples) +RankingExpressionEvaluator = restricted_evaluator( + ast.Expression, + # variables + ast.Name, ast.Load, ast.Attribute, ast.Subscript, ast.Index, + # opers + ast.BinOp, ast.operator, ast.UnaryOp, ast.unaryop, + # types + ast.Num, ast.Str, + # comparisons + ast.Compare, ast.cmpop, ast.List, ast.Tuple, +) GLBL_CFG_STR = 'global.cylc[scheduler][run hosts]ranking' @@ -301,7 +363,10 @@ def _filter_by_ranking(hosts, rankings, results, data=None): for key, expression in rankings: item = _reformat_expr(key, expression) try: - result = _simple_eval(expression, RESULT=results[host][key]) + result = RankingExpressionEvaluator( + expression, + RESULT=results[host][key], + ) except Exception as exc: raise GlobalConfigError( 'Invalid host ranking expression' @@ -334,84 +399,6 @@ def _filter_by_ranking(hosts, rankings, results, data=None): ) -class SimpleVisitor(ast.NodeVisitor): - """Abstract syntax tree node visitor for simple safe operations.""" - - def visit(self, node): - if not isinstance(node, self.whitelist): - # permit only whitelisted operations - raise ValueError(type(node)) - return super().visit(node) - - whitelist = ( - ast.Expression, - # variables - ast.Name, ast.Load, ast.Attribute, ast.Subscript, ast.Index, - # opers - ast.BinOp, ast.operator, ast.UnaryOp, ast.unaryop, - # types - ast.Num, ast.Str, - # comparisons - ast.Compare, ast.cmpop, ast.List, ast.Tuple, - ) - - -def _simple_eval(expr, **variables): - """Safely evaluates simple python expressions. - - Supports a minimal subset of Python operators: - * Binary operations - * Simple comparisons - - Supports a minimal subset of Python data types: - * Numbers - * Strings - * Tuples - * Lists - - Examples: - >>> _simple_eval('1 + 1') - 2 - >>> _simple_eval('1 * -1') - -1 - >>> _simple_eval('1 < a', a=2) - True - >>> _simple_eval('1 in (1, 2, 3)') - True - >>> import psutil - >>> _simple_eval('a.available > 0', a=psutil.virtual_memory()) - True - - If you try to get it to do something you're not allowed to: - >>> _simple_eval('open("foo")') - Traceback (most recent call last): - ValueError: - >>> _simple_eval('import sys') - Traceback (most recent call last): - SyntaxError: ... - - If you try to get hold of something you aren't supposed to: - >>> answer = 42 # only variables explicitly passed in should work - >>> _simple_eval('answer') - Traceback (most recent call last): - NameError: name 'answer' is not defined - - If you try to do something which doesn't make sense: - >>> _simple_eval('a.b.c') # no value "a.b.c" - Traceback (most recent call last): - NameError: name 'a' is not defined - - """ - node = ast.parse(expr.strip(), mode='eval') - SimpleVisitor().visit(node) - # acceptable use of eval due to restricted language features - return eval( # nosec - compile(node, '', 'eval'), - {'__builtins__': {}}, - variables - ) - - def _get_rankings(string): """Yield parsed ranking expressions. diff --git a/cylc/flow/loggingutil.py b/cylc/flow/loggingutil.py index 35884729ea0..3d77bdcb037 100644 --- a/cylc/flow/loggingutil.py +++ b/cylc/flow/loggingutil.py @@ -33,7 +33,7 @@ import textwrap from typing import List, Optional, Union -from ansimarkup import parse as cparse +from ansimarkup import parse as cparse, strip as cstrip from cylc.flow.cfgspec.glbl_cfg import glbl_cfg from cylc.flow.wallclock import get_time_string_from_unix_time @@ -53,10 +53,10 @@ class CylcLogFormatter(logging.Formatter): """ COLORS = { - 'CRITICAL': cparse('{0}'), - 'ERROR': cparse('{0}'), - 'WARNING': cparse('{0}'), - 'DEBUG': cparse('{0}') + 'CRITICAL': '{0}', + 'ERROR': '{0}', + 'WARNING': '{0}', + 'DEBUG': '{0}' } # default hard-coded max width for log entries @@ -99,7 +99,9 @@ def format(self, record): # noqa: A003 (method name not local) if not self.timestamp: _, text = text.split(' ', 1) # ISO8601 time points have no spaces if self.color and record.levelname in self.COLORS: - text = self.COLORS[record.levelname].format(text) + text = cparse(self.COLORS[record.levelname].format(text)) + elif not self.color: + text = cstrip(text) if self.max_width: return '\n'.join( line @@ -329,7 +331,7 @@ def _filter(self, record): def re_formatter(log_string): """Read in an uncoloured log_string file and apply colour formatting.""" for sub, repl in LOG_LEVEL_REGEXES: - log_string = sub.sub(repl, log_string) + log_string = cparse(sub.sub(repl, log_string)) return log_string diff --git a/cylc/flow/network/schema.py b/cylc/flow/network/schema.py index a1ae2ea26f7..8da9f79f10e 100644 --- a/cylc/flow/network/schema.py +++ b/cylc/flow/network/schema.py @@ -20,7 +20,6 @@ from functools import partial import json from operator import attrgetter -from textwrap import dedent from typing import ( TYPE_CHECKING, AsyncGenerator, @@ -39,6 +38,10 @@ from cylc.flow import LOG_LEVELS from cylc.flow.broadcast_mgr import ALL_CYCLE_POINTS_STRS, addict +from cylc.flow.data_store_mgr import ( + FAMILIES, FAMILY_PROXIES, JOBS, TASKS, TASK_PROXIES, + DELTA_ADDED, DELTA_UPDATED +) from cylc.flow.flow_mgr import FLOW_ALL, FLOW_NEW, FLOW_NONE from cylc.flow.id import Tokens from cylc.flow.task_outputs import SORT_ORDERS @@ -54,10 +57,7 @@ TASK_STATUS_FAILED, TASK_STATUS_SUCCEEDED ) -from cylc.flow.data_store_mgr import ( - FAMILIES, FAMILY_PROXIES, JOBS, TASKS, TASK_PROXIES, - DELTA_ADDED, DELTA_UPDATED -) +from cylc.flow.util import sstrip from cylc.flow.workflow_status import StopMode if TYPE_CHECKING: @@ -65,23 +65,6 @@ from cylc.flow.network.resolvers import BaseResolvers -def sstrip(text): - """Simple function to dedent and strip text. - - Examples: - >>> print(sstrip(''' - ... foo - ... bar - ... baz - ... ''')) - foo - bar - baz - - """ - return dedent(text).strip() - - def sort_elements(elements, args): """Sort iterable of elements by given attribute.""" sort_args = args.get('sort') @@ -807,6 +790,7 @@ class Meta: """) platform = String(default_value=None) script = String(default_value=None) + completion = String(default_value=None) init_script = String(default_value=None) env_script = String(default_value=None) err_script = String(default_value=None) diff --git a/cylc/flow/scripts/show.py b/cylc/flow/scripts/show.py index dae42f637b8..b146f339e10 100755 --- a/cylc/flow/scripts/show.py +++ b/cylc/flow/scripts/show.py @@ -40,6 +40,7 @@ import re import json import sys +from textwrap import indent from typing import Any, Dict, TYPE_CHECKING from ansimarkup import ansiprint @@ -51,6 +52,7 @@ from cylc.flow.id import Tokens from cylc.flow.id_cli import parse_ids from cylc.flow.network.client_factory import get_client +from cylc.flow.task_outputs import TaskOutputs from cylc.flow.task_state import ( TASK_STATUSES_ORDERED, TASK_STATUS_RUNNING @@ -60,6 +62,7 @@ ID_MULTI_ARG_DOC, ) from cylc.flow.terminal import cli_function +from cylc.flow.util import BOOL_SYMBOLS if TYPE_CHECKING: @@ -135,16 +138,39 @@ label satisfied } + runtime { + completion + } } } ''' +SATISFIED = BOOL_SYMBOLS[True] +UNSATISFIED = BOOL_SYMBOLS[False] + + def print_msg_state(msg, state): if state: - ansiprint(f' + {msg}') + ansiprint(f' {SATISFIED} {msg}') else: - ansiprint(f' - {msg}') + ansiprint(f' {UNSATISFIED} {msg}') + + +def print_completion_state(t_proxy): + # create task outputs object + outputs = TaskOutputs(t_proxy["runtime"]["completion"]) + + for output in t_proxy['outputs']: + outputs.add(output['label'], output['message']) + if output['satisfied']: + outputs.set_message_complete(output['message']) + + ansiprint( + f'output completion:' + f' {"complete" if outputs.is_complete() else "incomplete"}' + f'\n{indent(outputs.format_completion_status(ansimarkup=2), " ")}' + ) def flatten_data(data, flat_data=None): @@ -316,14 +342,16 @@ async def prereqs_and_outputs_query( ansiprint(f"{pre_txt} (n/a for past tasks)") else: ansiprint( - f"{pre_txt} ('-': not satisfied)") + f"{pre_txt}" + f" ('{UNSATISFIED}': not satisfied)" + ) for _, prefix, msg, state in prereqs: print_msg_state(f'{prefix}{msg}', state) # outputs ansiprint( 'outputs:' - " ('-': not completed)") + f" ('{UNSATISFIED}': not completed)") if not t_proxy['outputs']: # (Not possible - standard outputs) print(' (None)') for output in t_proxy['outputs']: @@ -334,7 +362,9 @@ async def prereqs_and_outputs_query( or t_proxy['xtriggers'] ): ansiprint( - "other: ('-': not satisfied)") + "other:" + f" ('{UNSATISFIED}': not satisfied)" + ) for ext_trig in t_proxy['externalTriggers']: state = ext_trig['satisfied'] print_msg_state( @@ -346,6 +376,9 @@ async def prereqs_and_outputs_query( print_msg_state( f'xtrigger "{xtrig["label"]} = {label}"', state) + + print_completion_state(t_proxy) + if not results['taskProxies']: ansiprint( f"No matching active tasks found: {', '.join(ids_list)}", diff --git a/cylc/flow/task_events_mgr.py b/cylc/flow/task_events_mgr.py index e3275d76aed..82960a312bf 100644 --- a/cylc/flow/task_events_mgr.py +++ b/cylc/flow/task_events_mgr.py @@ -661,6 +661,7 @@ def process_message( True: if polling is required to confirm a reversal of status. """ + # Log messages if event_time is None: event_time = get_current_time_string() @@ -696,10 +697,9 @@ def process_message( if message.startswith(ABORT_MESSAGE_PREFIX): msg0 = TASK_OUTPUT_FAILED - completed_output = None + completed_output: Optional[bool] = False if msg0 not in [TASK_OUTPUT_SUBMIT_FAILED, TASK_OUTPUT_FAILED]: - completed_output = itask.state.outputs.set_msg_trg_completion( - message=msg0, is_completed=True) + completed_output = itask.state.outputs.set_message_complete(msg0) if completed_output: self.data_store_mgr.delta_task_output(itask, msg0) @@ -832,8 +832,9 @@ def process_message( # Message of a custom task output. # No state change. # Log completion of o (not needed for standard outputs) - LOG.info(f"[{itask}] completed output {completed_output}") - self.setup_event_handlers(itask, completed_output, message) + trigger = itask.state.outputs.get_trigger(message) + LOG.info(f"[{itask}] completed output {trigger}") + self.setup_event_handlers(itask, trigger, message) self.spawn_children(itask, msg0) else: @@ -1315,8 +1316,7 @@ def _process_message_failed(self, itask, event_time, message, forced): if itask.state_reset(TASK_STATUS_FAILED, forced=forced): self.setup_event_handlers(itask, self.EVENT_FAILED, message) self.data_store_mgr.delta_task_state(itask) - itask.state.outputs.set_msg_trg_completion( - message=TASK_OUTPUT_FAILED, is_completed=True) + itask.state.outputs.set_message_complete(TASK_OUTPUT_FAILED) self.data_store_mgr.delta_task_output( itask, TASK_OUTPUT_FAILED) self.data_store_mgr.delta_task_state(itask) @@ -1417,8 +1417,9 @@ def _process_message_submit_failed( self.setup_event_handlers( itask, self.EVENT_SUBMIT_FAILED, f'job {self.EVENT_SUBMIT_FAILED}') - itask.state.outputs.set_msg_trg_completion( - message=TASK_OUTPUT_SUBMIT_FAILED, is_completed=True) + itask.state.outputs.set_message_complete( + TASK_OUTPUT_SUBMIT_FAILED + ) self.data_store_mgr.delta_task_output( itask, TASK_OUTPUT_SUBMIT_FAILED) self.data_store_mgr.delta_task_state(itask) @@ -1462,7 +1463,7 @@ def _process_message_submitted( itask.set_summary_time('started', event_time) if itask.state_reset(TASK_STATUS_RUNNING, forced=forced): self.data_store_mgr.delta_task_state(itask) - itask.state.outputs.set_completion(TASK_OUTPUT_STARTED, True) + itask.state.outputs.set_message_complete(TASK_OUTPUT_STARTED) self.data_store_mgr.delta_task_output(itask, TASK_OUTPUT_STARTED) else: diff --git a/cylc/flow/task_outputs.py b/cylc/flow/task_outputs.py index 644b7b0dd3c..a9af2d34dcc 100644 --- a/cylc/flow/task_outputs.py +++ b/cylc/flow/task_outputs.py @@ -15,7 +15,29 @@ # along with this program. If not, see . """Task output message manager and constants.""" -from typing import List +import ast +import re +from typing import ( + Dict, + Iterable, + Iterator, + List, + Optional, + TYPE_CHECKING, + Tuple, + Union, +) + +from cylc.flow.exceptions import InvalidCompletionExpression +from cylc.flow.util import ( + BOOL_SYMBOLS, + get_variable_names, + restricted_evaluator, +) + +if TYPE_CHECKING: + from cylc.flow.taskdef import TaskDef + # Standard task output strings, used for triggering. TASK_OUTPUT_EXPIRED = "expired" @@ -32,7 +54,8 @@ TASK_OUTPUT_SUBMIT_FAILED, TASK_OUTPUT_STARTED, TASK_OUTPUT_SUCCEEDED, - TASK_OUTPUT_FAILED) + TASK_OUTPUT_FAILED, +) TASK_OUTPUTS = ( TASK_OUTPUT_EXPIRED, @@ -44,15 +67,219 @@ TASK_OUTPUT_FINISHED, ) -_TRIGGER = 0 -_MESSAGE = 1 -_IS_COMPLETED = 2 +# this evaluates task completion expressions +CompletionEvaluator = restricted_evaluator( + # expressions + ast.Expression, + # variables + ast.Name, ast.Load, + # operations + ast.BoolOp, ast.And, ast.Or, ast.BinOp, + error_class=InvalidCompletionExpression, +) + +# regex for splitting expressions into individual parts for formatting +RE_EXPR_SPLIT = re.compile(r'([\(\) ])') + + +def trigger_to_completion_variable(output: str) -> str: + """Turn a trigger into something that can be used in an expression. + + Examples: + >>> trigger_to_completion_variable('succeeded') + 'succeeded' + >>> trigger_to_completion_variable('submit-failed') + 'submit_failed' + + """ + return output.replace('-', '_') + + +def get_trigger_completion_variable_maps(triggers: Iterable[str]): + """Return a bi-map of trigger to completion variable. + + Args: + triggers: All triggers for a task. + + Returns: + (trigger_to_completion_variable, completion_variable_to_trigger) + + Tuple of mappings for converting in either direction. + + """ + _trigger_to_completion_variable = {} + _completion_variable_to_trigger = {} + for trigger in triggers: + compvar = trigger_to_completion_variable(trigger) + _trigger_to_completion_variable[trigger] = compvar + _completion_variable_to_trigger[compvar] = trigger + + return ( + _trigger_to_completion_variable, + _completion_variable_to_trigger, + ) + + +def get_completion_expression(tdef: 'TaskDef') -> str: + """Return a completion expression for this task definition. + + If there is *not* a user provided completion statement: + + 1. Create a completion expression that ensures all required ouputs are + completed. + 2. If success is optional add "or succeeded or failed" onto the end. + 3. If submission is optional add "or submit-failed" onto the end of it. + 4. If expiry is optional add "or expired" onto the end of it. + """ + # check if there is a user-configured completion expression + completion = tdef.rtconfig.get('completion') + if completion: + # completion expression is defined in the runtime -> return it + return completion + + # (1) start with an expression that ensures all required outputs are + # generated (if the task runs) + required = { + trigger_to_completion_variable(trigger) + for trigger, (_message, required) in tdef.outputs.items() + if required + } + parts = [] + if required: + _part = ' and '.join(sorted(required)) + if len(required) > 1: + # wrap the expression in brackets for clarity + parts.append(f'({_part})') + else: + parts.append(_part) + + # (2) handle optional success + if ( + tdef.outputs[TASK_OUTPUT_SUCCEEDED][1] is False + or tdef.outputs[TASK_OUTPUT_FAILED][1] is False + ): + # failure is tolerated -> ensure the task succeeds OR fails + if required: + # required outputs are required only if the task actually runs + parts = [ + f'({parts[0]} and {TASK_OUTPUT_SUCCEEDED})' + f' or {TASK_OUTPUT_FAILED}' + ] + else: + parts.append( + f'{TASK_OUTPUT_SUCCEEDED} or {TASK_OUTPUT_FAILED}' + ) + + # (3) handle optional submission + if ( + tdef.outputs[TASK_OUTPUT_SUBMITTED][1] is False + or tdef.outputs[TASK_OUTPUT_SUBMIT_FAILED][1] is False + ): + # submit-fail tolerated -> ensure the task executes OR submit-fails + parts.append( + trigger_to_completion_variable(TASK_OUTPUT_SUBMIT_FAILED) + ) + + # (4) handle optional expiry + if tdef.outputs[TASK_OUTPUT_EXPIRED][1] is False: + # expiry tolerated -> ensure the task executes OR expires + parts.append(TASK_OUTPUT_EXPIRED) + + return ' or '.join(parts) + + +def get_optional_outputs( + expression: str, + outputs: Iterable[str], +) -> Dict[str, Optional[bool]]: + """Determine which outputs in an expression are optional. + + Args: + expression: + The completion expression. + outputs: + All outputs that apply to this task. + + Returns: + dict: compvar: is_optional + + compvar: + The completion variable, i.e. the trigger as used in the completion + expression. + is_optional: + * True if var is optional. + * False if var is required. + * None if var is not referenced. + + Examples: + >>> sorted(get_optional_outputs( + ... '(succeeded and (x or y)) or failed', + ... {'succeeded', 'x', 'y', 'failed', 'expired'} + ... ).items()) + [('expired', None), ('failed', True), ('succeeded', True), + ('x', True), ('y', True)] + + >>> sorted(get_optional_outputs( + ... '(succeeded and x and y) or expired', + ... {'succeeded', 'x', 'y', 'failed', 'expired'} + ... ).items()) + [('expired', True), ('failed', None), ('succeeded', False), + ('x', False), ('y', False)] + + """ + # determine which triggers are used in the expression + used_compvars = get_variable_names(expression) + + # all completion variables which could appear in the expression + all_compvars = {trigger_to_completion_variable(out) for out in outputs} + + return { # output: is_optional + # the outputs that are used in the expression + **{ + output: CompletionEvaluator( + expression, + **{ + **{out: out != output for out in all_compvars}, + # don't consider pre-execution conditions as optional + # (pre-conditions are considered separately) + 'expired': False, + 'submit_failed': False, + }, + ) + for output in used_compvars + }, + # the outputs that are not used in the expression + **{ + output: None + for output in all_compvars - used_compvars + }, + } + + +# a completion expression that considers the outputs complete if any final task +# output is received +FINAL_OUTPUT_COMPLETION = ' or '.join( + map( + trigger_to_completion_variable, + [ + TASK_OUTPUT_SUCCEEDED, + TASK_OUTPUT_FAILED, + TASK_OUTPUT_SUBMIT_FAILED, + TASK_OUTPUT_EXPIRED, + ], + ) +) class TaskOutputs: - """Task output message manager. + """Represents a collection of outputs for a task. + + Task outputs have a trigger and a message: + * The trigger is used in the graph and with "cylc set". + * Messages map onto triggers and are used with "cylc message", they can + provide additional context to an output which will appear in the workflow + log. - Manage standard task outputs and custom outputs, e.g.: [scheduling] [[graph]] R1 = t1:trigger1 => t2 @@ -61,209 +288,145 @@ class TaskOutputs: [[[outputs]]] trigger1 = message 1 - Can search item by message string or by trigger string. - """ + Args: + tdef: + The task definition for the task these outputs represent. - # Memory optimization - constrain possible attributes to this list. - __slots__ = ["_by_message", "_by_trigger", "_required"] - - def __init__(self, tdef): - self._by_message = {} - self._by_trigger = {} - self._required = {} # trigger: message - - # Add outputs from task def. - for trigger, (message, required) in tdef.outputs.items(): - self._add(message, trigger, required=required) - - # Handle implicit submit requirement - if ( - # "submitted" is not declared as optional/required - tdef.outputs[TASK_OUTPUT_SUBMITTED][1] is None - # and "submit-failed" is not declared as optional/required - and tdef.outputs[TASK_OUTPUT_SUBMIT_FAILED][1] is None - ): - self._add( - TASK_OUTPUT_SUBMITTED, - TASK_OUTPUT_SUBMITTED, - required=True, - ) + For use outside of the scheduler, this argument can be completion + expression string. - def _add(self, message, trigger, is_completed=False, required=False): - """Add a new output message""" - self._by_message[message] = [trigger, message, is_completed] - self._by_trigger[trigger] = self._by_message[message] - if required: - self._required[trigger] = message - - def set_completed_by_msg(self, message): - """For flow trigger --wait: set completed outputs from the DB.""" - for trig, msg, _ in self._by_trigger.values(): - if message == msg: - self._add(message, trig, True, trig in self._required) - break - - def all_completed(self): - """Return True if all all outputs completed.""" - return all(val[_IS_COMPLETED] for val in self._by_message.values()) - - def exists(self, message=None, trigger=None): - """Return True if message/trigger is identified as an output.""" - try: - return self._get_item(message, trigger) is not None - except KeyError: - return False - - def get_all(self): - """Return an iterator for all output messages.""" - return sorted(self._by_message.values(), key=self.msg_sort_key) - - def get_completed(self): - """Return all completed output messages.""" - ret = [] - for value in self.get_all(): - if value[_IS_COMPLETED]: - ret.append(value[_MESSAGE]) - return ret - - def get_completed_all(self): - """Return all completed outputs. - - Return a list in this form: [(trigger1, message1), ...] - """ - ret = [] - for value in self.get_all(): - if value[_IS_COMPLETED]: - ret.append((value[_TRIGGER], value[_MESSAGE])) - return ret - - def has_custom_triggers(self): - """Return True if it has any custom triggers.""" - return any(key not in SORT_ORDERS for key in self._by_trigger) - - def _get_custom_triggers(self, required: bool = False) -> List[str]: - """Return list of all, or required, custom trigger messages.""" - custom = [ - out[1] for trg, out in self._by_trigger.items() - if trg not in SORT_ORDERS - ] - if required: - custom = [out for out in custom if out in self._required.values()] - return custom - - def get_not_completed(self): - """Return all not-completed output messages.""" - ret = [] - for value in self.get_all(): - if not value[_IS_COMPLETED]: - ret.append(value[_MESSAGE]) - return ret - - def is_completed(self, message=None, trigger=None): - """Return True if output of message is completed.""" - try: - return self._get_item(message, trigger)[_IS_COMPLETED] - except KeyError: - return False - - def remove(self, message=None, trigger=None): - """Remove an output by message, if it exists.""" - try: - trigger, message = self._get_item(message, trigger)[:2] - except KeyError: - pass - else: - del self._by_message[message] - del self._by_trigger[trigger] + """ - def set_all_completed(self): - """Set all outputs to complete.""" - for value in self._by_message.values(): - value[_IS_COMPLETED] = True + __slots__ = ( + "_message_to_trigger", + "_message_to_compvar", + "_completed", + "_completion_expression", + ) + + _message_to_trigger: Dict[str, str] # message: trigger + _message_to_compvar: Dict[str, str] # message: completion variable + _completed: Dict[str, bool] # message: is_complete + _completion_expression: str + + def __init__(self, tdef: 'Union[TaskDef, str]'): + self._message_to_trigger = {} + self._message_to_compvar = {} + self._completed = {} + + if isinstance(tdef, str): + # abnormal use e.g. from the "cylc show" command + self._completion_expression = tdef + else: + # normal use e.g. from within the scheduler + self._completion_expression = get_completion_expression(tdef) + for trigger, (message, _required) in tdef.outputs.items(): + self.add(trigger, message) + + def add(self, trigger: str, message: str) -> None: + """Register a new output. + + Note, normally outputs are listed automatically from the provided + TaskDef so there is no need to call this interface. It exists for cases + where TaskOutputs are used outside of the scheduler where there is no + TaskDef object handy so outputs must be listed manually. + """ + self._message_to_trigger[message] = trigger + self._message_to_compvar[message] = trigger_to_completion_variable( + trigger + ) + self._completed[message] = False - def set_all_incomplete(self): - """Set all outputs to incomplete.""" - for value in self._by_message.values(): - value[_IS_COMPLETED] = False + def get_trigger(self, message: str) -> str: + """Return the trigger associated with this message.""" + return self._message_to_trigger[message] - def set_completion(self, message, is_completed): - """Set output message completion status to is_completed (bool).""" - if message in self._by_message: - self._by_message[message][_IS_COMPLETED] = is_completed + def set_message_complete(self, message: str) -> Optional[bool]: + """Set the provided task message as complete. - def set_msg_trg_completion(self, message=None, trigger=None, - is_completed=True): - """Set the output identified by message/trigger to is_completed. + Args: + message: + The task output message to satisfy. - Return: - - Value of trigger (True) if completion flag is changed, - - False if completion is unchanged, or - - None if message/trigger is not found. + Returns: + True: + If the output was unset before. + False: + If the output was already set. + None + If the output does not apply. """ - try: - item = self._get_item(message, trigger) - old_is_completed = item[_IS_COMPLETED] - item[_IS_COMPLETED] = is_completed - except KeyError: + if message not in self._completed: + # no matching output return None - else: - if bool(old_is_completed) == bool(is_completed): - return False - else: - return item[_TRIGGER] - - def is_incomplete(self): - """Return True if any required outputs are not complete.""" - return any( - not completed - and trigger in self._required - for trigger, (_, _, completed) in self._by_trigger.items() - ) - - def get_incomplete(self): - """Return a list of required outputs that are not complete. - A task is incomplete if: + if self._completed[message] is False: + # output was incomplete + self._completed[message] = True + return True - * it finished executing without completing all required outputs - * or if job submission failed and the :submit output was not optional + # output was already completed + return False - https://github.com/cylc/cylc-admin/blob/master/docs/proposal-new-output-syntax.md#output-syntax + def is_message_complete(self, message: str) -> Optional[bool]: + """Return True if this message is complete. + Returns: + * True if the message is complete. + * False if the message is not complete. + * None if the message does not apply to these outputs. """ - return [ - trigger - for trigger, (_, _, is_completed) in self._by_trigger.items() - if not is_completed and trigger in self._required - ] - - def get_item(self, message): - """Return output item by message. + if message in self._completed: + return self._completed[message] + return None - Args: - message (str): Output message. + def iter_completed_messages(self) -> Iterator[str]: + """A generator that yields completed messages. - Returns: - item (tuple): - label (str), message (str), satisfied (bool) + Yields: + message: A completed task message. """ - if message in self._by_message: - return self._by_message[message] + for message, is_completed in self._completed.items(): + if is_completed: + yield message + + def __iter__(self) -> Iterator[Tuple[str, str, bool]]: + """A generator that yields all outputs. + + Yields: + (trigger, message, is_complete) - def _get_item(self, message, trigger): - """Return self._by_trigger[trigger] or self._by_message[message]. + trigger: + The output trigger. + message: + The output message. + is_complete: + True if the output is complete, else False. - whichever is relevant. """ - if message is None: - return self._by_trigger[trigger] - else: - return self._by_message[message] + for message, is_complete in self._completed.items(): + yield self._message_to_trigger[message], message, is_complete + + def is_complete(self) -> bool: + """Return True if the outputs are complete.""" + # NOTE: If a task has been removed from the workflow via restart / + # reload, then it is possible for the completion expression to be blank + # (empty string). In this case, we consider the task outputs to be + # complete when any final output has been generated. + # See https://github.com/cylc/cylc-flow/pull/5067 + expr = self._completion_expression or FINAL_OUTPUT_COMPLETION + return CompletionEvaluator( + expr, + **{ + self._message_to_compvar[message]: completed + for message, completed in self._completed.items() + }, + ) - def get_incomplete_implied(self, output: str) -> List[str]: - """Return an ordered list of incomplete implied outputs. + def get_incomplete_implied(self, message: str) -> List[str]: + """Return an ordered list of incomplete implied messages. Use to determined implied outputs to complete automatically. @@ -276,41 +439,156 @@ def get_incomplete_implied(self, output: str) -> List[str]: """ implied: List[str] = [] - if output in [TASK_OUTPUT_SUCCEEDED, TASK_OUTPUT_FAILED]: + if message in [TASK_OUTPUT_SUCCEEDED, TASK_OUTPUT_FAILED]: # Finished, so it must have submitted and started. implied = [TASK_OUTPUT_SUBMITTED, TASK_OUTPUT_STARTED] - - elif output == TASK_OUTPUT_STARTED: + elif message == TASK_OUTPUT_STARTED: # It must have submitted. implied = [TASK_OUTPUT_SUBMITTED] - return [out for out in implied if not self.is_completed(out)] + return [ + message + for message in implied + if not self.is_message_complete(message) + ] + + def format_completion_status( + self, + indent: int = 2, + gutter: int = 2, + ansimarkup: int = 0, + ) -> str: + """Return a text representation of the status of these outputs. + + Returns a multiline string representing the status of each output used + in the expression within the context of the expression itself. + + Args: + indent: + Number of spaces to indent each level of the expression. + gutter: + Number of spaces to pad the left column from the expression. + ansimarkup: + Turns on colour coding using ansimarkup tags. These will need + to be parsed before display. There are three options + + 0: + No colour coding. + 1: + Only success colours will be used. This is easier to read + in colour coded logs. + 2: + Both success and fail colours will be used. + + Returns: + A multiline textural representation of the completion status. + + """ + indent_space: str = ' ' * indent + _gutter: str = ' ' * gutter + + def color_wrap(string, is_complete): + nonlocal ansimarkup + if ansimarkup == 0: + return string + if is_complete: + return f'{string}' + if ansimarkup == 2: + return f'{string}' + return string + + ret: List[str] = [] + indent_level: int = 0 + op: Optional[str] = None + fence = '⦙' # U+2999 (dotted fence) + for part in RE_EXPR_SPLIT.split(self._completion_expression): + if not part.strip(): + continue + + if part in {'and', 'or'}: + op = part + continue + + elif part == '(': + if op: + ret.append( + f' {fence}{_gutter}{(indent_space * indent_level)}' + f'{op} {part}' + ) + else: + ret.append( + f' {fence}{_gutter}' + f'{(indent_space * indent_level)}{part}' + ) + indent_level += 1 + elif part == ')': + indent_level -= 1 + ret.append( + f' {fence}{_gutter}{(indent_space * indent_level)}{part}' + ) + + else: + _symbol = BOOL_SYMBOLS[bool(self._is_compvar_complete(part))] + is_complete = self._is_compvar_complete(part) + _pre = ( + f'{color_wrap(_symbol, is_complete)} {fence}' + f'{_gutter}{(indent_space * indent_level)}' + ) + if op: + ret.append(f'{_pre}{op} {color_wrap(part, is_complete)}') + else: + ret.append(f'{_pre}{color_wrap(part, is_complete)}') + + op = None + + return '\n'.join(ret) @staticmethod - def is_valid_std_name(name): + def is_valid_std_name(name: str) -> bool: """Check name is a valid standard output name.""" return name in SORT_ORDERS @staticmethod - def msg_sort_key(item): - """Compare by _MESSAGE.""" - try: - ind = SORT_ORDERS.index(item[_MESSAGE]) - except ValueError: - ind = 999 - return (ind, item[_MESSAGE] or '') - - @staticmethod - def output_sort_key(item): + def output_sort_key(item: Iterable[str]) -> float: """Compare by output order. Examples: - >>> this = TaskOutputs.output_sort_key >>> sorted(['finished', 'started', 'custom'], key=this) ['started', 'custom', 'finished'] + """ if item in TASK_OUTPUTS: return TASK_OUTPUTS.index(item) # Sort custom outputs after started. return TASK_OUTPUTS.index(TASK_OUTPUT_STARTED) + .5 + + def _is_compvar_complete(self, compvar: str) -> Optional[bool]: + """Return True if the completion variable is complete. + + Returns: + * True if var is optional. + * False if var is required. + * None if var is not referenced. + + """ + for message, _compvar in self._message_to_compvar.items(): + if _compvar == compvar: + return self.is_message_complete(message) + else: + raise KeyError(compvar) + + def iter_required_messages(self) -> Iterator[str]: + """Yield task messages that are required for this task to be complete. + + Note, in some cases tasks might not have any required messages, + e.g. "completion = succeeded or failed". + """ + for compvar, is_optional in get_optional_outputs( + self._completion_expression, + set(self._message_to_compvar.values()), + ).items(): + if is_optional is False: + for message, _compvar in self._message_to_compvar.items(): + if _compvar == compvar: + yield message diff --git a/cylc/flow/task_pool.py b/cylc/flow/task_pool.py index 2f49de1e158..d37049f11ae 100644 --- a/cylc/flow/task_pool.py +++ b/cylc/flow/task_pool.py @@ -19,6 +19,7 @@ from contextlib import suppress from collections import Counter import json +from textwrap import indent from typing import ( Dict, Iterable, @@ -526,7 +527,7 @@ def load_db_task_pool_for_restart(self, row_idx, row): TASK_STATUS_SUCCEEDED ): for message in json.loads(outputs_str): - itask.state.outputs.set_completion(message, True) + itask.state.outputs.set_message_complete(message) self.data_store_mgr.delta_task_output(itask, message) if platform_name and status != TASK_STATUS_WAITING: @@ -1157,15 +1158,22 @@ def log_incomplete_tasks(self) -> bool: for itask in self.get_tasks(): if not itask.state(*TASK_STATUSES_FINAL): continue - outputs = itask.state.outputs.get_incomplete() - if outputs: - incomplete.append((itask.identity, outputs)) + if not itask.state.outputs.is_complete(): + incomplete.append( + ( + itask.identity, + itask.state.outputs.format_completion_status( + ansimarkup=1 + ), + ) + ) if incomplete: LOG.error( "Incomplete tasks:\n" + "\n".join( - f" * {id_} did not complete required outputs: {outputs}" + f"* {id_} did not complete the required outputs:" + f"\n{indent(outputs, ' ')}" for id_, outputs in incomplete ) ) @@ -1450,22 +1458,17 @@ def remove_if_complete( self.release_runahead_tasks() return ret - if itask.state(TASK_STATUS_EXPIRED): - self.remove(itask, "expired") - if self.compute_runahead(): - self.release_runahead_tasks() - return True - - incomplete = itask.state.outputs.get_incomplete() - if incomplete: + if not itask.state.outputs.is_complete(): # Keep incomplete tasks in the pool. if output in TASK_STATUSES_FINAL: # Log based on the output, not the state, to avoid warnings # due to use of "cylc set" to set internal outputs on an # already-finished task. LOG.warning( - f"[{itask}] did not complete required outputs:" - f" {incomplete}" + f"[{itask}] did not complete the required outputs:\n" + + itask.state.outputs.format_completion_status( + ansimarkup=1 + ) ) return False @@ -1491,14 +1494,12 @@ def spawn_on_all_outputs( """ if not itask.flow_nums: return - if completed_only: - outputs = itask.state.outputs.get_completed() - else: - outputs = itask.state.outputs._by_message - for output in outputs: + for _trigger, message, is_completed in itask.state.outputs: + if completed_only and not is_completed: + continue try: - children = itask.graph_children[output] + children = itask.graph_children[message] except KeyError: continue @@ -1518,7 +1519,7 @@ def spawn_on_all_outputs( continue if completed_only: c_task.satisfy_me( - [itask.tokens.duplicate(task_sel=output)] + [itask.tokens.duplicate(task_sel=message)] ) self.data_store_mgr.delta_task_prerequisite(c_task) self.add_to_pool(c_task) @@ -1620,7 +1621,7 @@ def _load_historical_outputs(self, itask): for outputs_str, fnums in info.items(): if itask.flow_nums.intersection(fnums): for msg in json.loads(outputs_str): - itask.state.outputs.set_completed_by_msg(msg) + itask.state.outputs.set_message_complete(msg) def spawn_task( self, @@ -1776,7 +1777,7 @@ def _get_task_proxy_db_outputs( for outputs_str, fnums in info.items(): if flow_nums.intersection(fnums): for msg in json.loads(outputs_str): - itask.state.outputs.set_completed_by_msg(msg) + itask.state.outputs.set_message_complete(msg) return itask def _standardise_prereqs( @@ -1919,16 +1920,15 @@ def _set_outputs_itask( outputs: List[str], ) -> None: """Set requested outputs on a task proxy and spawn children.""" - if not outputs: - outputs = itask.tdef.get_required_output_messages() + outputs = list(itask.state.outputs.iter_required_messages()) else: outputs = self._standardise_outputs( - itask.point, itask.tdef, outputs) + itask.point, itask.tdef, outputs + ) - outputs = sorted(outputs, key=itask.state.outputs.output_sort_key) - for output in outputs: - if itask.state.outputs.is_completed(output): + for output in sorted(outputs, key=itask.state.outputs.output_sort_key): + if itask.state.outputs.is_message_complete(output): LOG.info(f"output {itask.identity}:{output} completed already") continue self.task_events_mgr.process_message( @@ -2194,10 +2194,25 @@ def spawn_parentless_sequential_xtriggers(self): def clock_expire_tasks(self): """Expire any tasks past their clock-expiry time.""" for itask in self.get_tasks(): - if not itask.clock_expire(): - continue - self.task_events_mgr.process_message( - itask, logging.WARNING, TASK_OUTPUT_EXPIRED) + if ( + # force triggered tasks can not clock-expire + # see proposal point 10: + # https://cylc.github.io/cylc-admin/proposal-optional-output-extension.html#proposal + not itask.is_manual_submit + + # only waiting tasks can clock-expire + # see https://github.com/cylc/cylc-flow/issues/6025 + # (note retrying tasks will be in the waiting state) + and itask.state(TASK_STATUS_WAITING) + + # check if this task is clock expired + and itask.clock_expire() + ): + self.task_events_mgr.process_message( + itask, + logging.WARNING, + TASK_OUTPUT_EXPIRED, + ) def task_succeeded(self, id_): """Return True if task with id_ is in the succeeded state.""" @@ -2443,7 +2458,7 @@ def merge_flows(self, itask: TaskProxy, flow_nums: 'FlowNums') -> None: if ( itask.state(*TASK_STATUSES_FINAL) - and itask.state.outputs.get_incomplete() + and not itask.state.outputs.is_complete() ): # Re-queue incomplete task to run again in the merged flow. LOG.info(f"[{itask}] incomplete task absorbed by new flow.") diff --git a/cylc/flow/task_proxy.py b/cylc/flow/task_proxy.py index 898017c8da1..4e7b60d6e0a 100644 --- a/cylc/flow/task_proxy.py +++ b/cylc/flow/task_proxy.py @@ -38,19 +38,10 @@ from cylc.flow.flow_mgr import stringify_flow_nums from cylc.flow.platforms import get_platform from cylc.flow.task_action_timer import TimerFlags -from cylc.flow.task_outputs import ( - TASK_OUTPUT_FAILED, - TASK_OUTPUT_EXPIRED, - TASK_OUTPUT_SUCCEEDED, - TASK_OUTPUT_SUBMIT_FAILED -) from cylc.flow.task_state import ( TaskState, TASK_STATUS_WAITING, TASK_STATUS_EXPIRED, - TASK_STATUS_SUCCEEDED, - TASK_STATUS_SUBMIT_FAILED, - TASK_STATUS_FAILED ) from cylc.flow.taskdef import generate_graph_children from cylc.flow.wallclock import get_unix_time_from_time_string as str2time @@ -576,34 +567,6 @@ def clock_expire(self) -> bool: return False return True - def is_finished(self) -> bool: - """Return True if a final state achieved.""" - return ( - self.state( - TASK_STATUS_EXPIRED, - TASK_STATUS_SUBMIT_FAILED, - TASK_STATUS_FAILED, - TASK_STATUS_SUCCEEDED - ) - ) - def is_complete(self) -> bool: """Return True if complete or expired.""" - return ( - self.state(TASK_STATUS_EXPIRED) - or not self.state.outputs.is_incomplete() - ) - - def set_state_by_outputs(self) -> None: - """Set state according to which final output is completed.""" - for output in ( - TASK_OUTPUT_EXPIRED, TASK_OUTPUT_SUBMIT_FAILED, - TASK_OUTPUT_FAILED, TASK_OUTPUT_SUCCEEDED - ): - if self.state.outputs.is_completed(output, output): - # This assumes status and output strings are the same: - self.state_reset( - status=output, - silent=True, is_queued=False, is_runahead=False - ) - break + return self.state.outputs.is_complete() diff --git a/cylc/flow/taskdef.py b/cylc/flow/taskdef.py index 1da5101306b..68f754277d8 100644 --- a/cylc/flow/taskdef.py +++ b/cylc/flow/taskdef.py @@ -23,9 +23,7 @@ from cylc.flow.exceptions import TaskDefError from cylc.flow.task_id import TaskID from cylc.flow.task_outputs import ( - TASK_OUTPUT_EXPIRED, TASK_OUTPUT_SUBMITTED, - TASK_OUTPUT_SUBMIT_FAILED, TASK_OUTPUT_SUCCEEDED, TASK_OUTPUT_FAILED, SORT_ORDERS @@ -75,7 +73,7 @@ def generate_graph_children(tdef, point): def generate_graph_parents(tdef, point, taskdefs): - """Determine concrent graph parents of task tdef at point. + """Determine concrete graph parents of task tdef at point. Infer parents be reversing upstream triggers that lead to point/task. """ @@ -198,27 +196,15 @@ def set_required_output(self, output, required): message, _ = self.outputs[output] self.outputs[output] = (message, required) - def get_required_output_messages(self): - """Return list of required outputs (as task messages).""" - return [msg for (msg, req) in self.outputs.values() if req] - def tweak_outputs(self): """Output consistency checking and tweaking.""" - # If :succeed or :fail not set, assume success is required. - # Unless submit (and submit-fail) is optional (don't stall - # because of missing succeed if submit is optional). if ( self.outputs[TASK_OUTPUT_SUCCEEDED][1] is None and self.outputs[TASK_OUTPUT_FAILED][1] is None - and self.outputs[TASK_OUTPUT_SUBMITTED][1] is not False - and self.outputs[TASK_OUTPUT_SUBMIT_FAILED][1] is not False ): self.set_required_output(TASK_OUTPUT_SUCCEEDED, True) - # Expired must be optional - self.set_required_output(TASK_OUTPUT_EXPIRED, False) - # In Cylc 7 back compat mode, make all success outputs required. if cylc.flow.flags.cylc7_back_compat: for output in [ diff --git a/cylc/flow/unicode_rules.py b/cylc/flow/unicode_rules.py index 4608559798d..a6974888248 100644 --- a/cylc/flow/unicode_rules.py +++ b/cylc/flow/unicode_rules.py @@ -346,11 +346,11 @@ class TaskOutputValidator(UnicodeRuleChecker): RULES = [ # restrict outputs to sensible characters - allowed_characters(r'\w', r'\d', r'\-', r'\.'), + allowed_characters(r'\w', r'\d', r'\-'), # blacklist the _cylc prefix not_starts_with('_cylc'), # blacklist keywords - not_equals('required', 'optional', 'all'), + not_equals('required', 'optional', 'all', 'and', 'or'), # blacklist built-in task qualifiers and statuses (e.g. "waiting") not_equals(*sorted({*TASK_QUALIFIERS, *TASK_STATUSES_ORDERED})), ] diff --git a/cylc/flow/util.py b/cylc/flow/util.py index 059237f4e16..8b8b613787a 100644 --- a/cylc/flow/util.py +++ b/cylc/flow/util.py @@ -15,20 +15,48 @@ # along with this program. If not, see . """Misc functionality.""" +import ast from contextlib import suppress from functools import partial import json import re +from textwrap import dedent from typing import ( Any, + Callable, + Dict, List, Sequence, + Tuple, ) +BOOL_SYMBOLS: Dict[bool, str] = { + # U+2A2F (vector cross product) + False: '⨯', + # U+2713 (check) + True: '✓' +} _NAT_SORT_SPLIT = re.compile(r'([\d\.]+)') +def sstrip(text): + """Simple function to dedent and strip text. + + Examples: + >>> print(sstrip(''' + ... foo + ... bar + ... baz + ... ''')) + foo + bar + baz + + """ + return dedent(text).strip() + + def natural_sort_key(key: str, fcns=(int, str)) -> List[Any]: """Returns a key suitable for sorting. @@ -133,3 +161,236 @@ def serialise(flow_nums: set): def deserialise(flow_num_str: str): """Converts string to set.""" return set(json.loads(flow_num_str)) + + +def restricted_evaluator( + *whitelist: type, + error_class: Callable = ValueError, +) -> Callable: + """Returns a Python eval statement restricted to whitelisted operations. + + The "eval" function can be used to run arbitrary code. This is useful + but presents security issues. This returns an "eval" method which will + only allow whitelisted operations to be performed allowing it to be used + safely with user-provided input. + + The code passed into the evaluator will be parsed into an abstract syntax + tree (AST), then that tree will be executed using Python's internal logic. + The evaluator will check the type of each node before it is executed and + fail with a ValueError if it is not permitted. + + The node types are documented in the ast module: + https://docs.python.org/3/library/ast.html + + The evaluator returned is only as safe as the nodes you whitelist, read the + docs carefully. + + Note: + If you don't need to parse expressions, use ast.literal_eval instead. + + Args: + whitelist: + Types to permit e.g. `ast.Expression`, see the ast docs for + details. + error_class: + An Exception class or callable which returns an Exception instance. + This is called and its result raised in the event that an + expression contains non-whitelisted operations. It will be provided + with the error message as an argument, additionally the following + keyword arguments will be provided if defined: + expr: + The expression the evaluator was called with. + expr_node: + The AST node containing the parsed expression. + error_node: + The first non-whitelisted AST node in the expression. + E.G. `` for a `-` operator. + error_type: + error_node.__class__.__name__. + E.G. `Sub` for a `-` operator. + + Returns: + An "eval" function restricted to the whitelisted nodes. + + Examples: + Optionally, provide an error class to be raised in the event of + non-whitelisted syntax (or you'll get ValueError): + >>> class RestrictedSyntaxError(Exception): + ... def __init__(self, message, error_node): + ... self.args = (str(error_node.__class__),) + + Create an evaluator, whitelisting allowed node types: + >>> evaluator = restricted_evaluator( + ... ast.Expression, # required for all uses + ... ast.BinOp, # an operation (e.g. addition or division) + ... ast.Add, # the "+" operator + ... ast.Constant, # required for literals e.g. "1" + ... ast.Name, # required for using variables in expressions + ... ast.Load, # required for accessing variable values + ... ast.Num, # for Python 3.7 compatibility + ... error_class=RestrictedSyntaxError, # error to raise + ... ) + + This will correctly evaluate intended expressions: + >>> evaluator('1 + 1') + 2 + + But will fail if a non-whitelisted node type is present: + >>> evaluator('1 - 1') + Traceback (most recent call last): + flow.util.RestrictedSyntaxError: + >>> evaluator('my_function()') + Traceback (most recent call last): + flow.util.RestrictedSyntaxError: + >>> evaluator('__import__("os")') + Traceback (most recent call last): + flow.util.RestrictedSyntaxError: + + The evaluator cannot see the containing scope: + >>> a = b = 1 + >>> evaluator('a + b') + Traceback (most recent call last): + NameError: name 'a' is not defined + + To use variables you must explicitly pass them in: + >>> evaluator('a + b', a=1, b=2) + 3 + + """ + # the node visitor is called for each node in the AST, + # this is the bit which rejects types which are not whitelisted + visitor = RestrictedNodeVisitor(whitelist) + + def _eval(expr, **variables): + nonlocal visitor + + # parse the expression + try: + expr_node = ast.parse(expr.strip(), mode='eval') + except SyntaxError as exc: + raise _get_exception( + error_class, + f'{exc.msg}: {exc.text}', + {'expr': expr} + ) + + # check against whitelisted types + try: + visitor.visit(expr_node) + except _RestrictedEvalError as exc: + # non-whitelisted node detected in expression + # => raise exception + error_node = exc.args[0] + raise _get_exception( + error_class, + ( + f'Invalid expression: {expr}' + f'\n"{error_node.__class__.__name__}" not permitted' + ), + { + 'expr': expr, + 'expr_node': expr_node, + 'error_node': error_node, + 'error_type': error_node.__class__.__name__, + }, + ) + + # run the expresion + # Note: this may raise runtime errors + return eval( # nosec + # acceptable use of eval as only whitelisted operations are + # permitted + compile(expr_node, '', 'eval'), + # deny access to builtins + {'__builtins__': {}}, + # provide access to explicitly provided variables + variables, + ) + + return _eval + + +class RestrictedNodeVisitor(ast.NodeVisitor): + """AST node visitor which errors on non-whitelisted syntax. + + Raises _RestrictedEvalError if a non-whitelisted node is visited. + """ + + def __init__(self, whitelist): + super().__init__() + self._whitelist: Tuple[type] = whitelist + + def visit(self, node): + if not isinstance(node, self._whitelist): + # only permit whitelisted operations + raise _RestrictedEvalError(node) + return super().visit(node) + + +class _RestrictedEvalError(Exception): + """For internal use. + + Raised in the event non-whitelisted syntax is detected in an expression. + """ + + def __init__(self, node): + self.node = node + + +def _get_exception( + error_class: Callable, + message: str, + context: dict +) -> Exception: + """Helper which returns exception instances. + + Filters the arguments in context by the parameters of the error_class. + + This allows the error_class to decide what fields it wants, and for us + to add/change these params in the future. + """ + import inspect # no need to import unless errors occur + try: + params = dict(inspect.signature(error_class).parameters) + except ValueError: + params = {} + + context = { + key: value + for key, value in context.items() + if key in params + } + + return error_class(message, **context) + + +class NameWalker(ast.NodeVisitor): + """AST node visitor which records all variable names in an expression. + + Examples: + >>> tree = ast.parse('(foo and bar) or baz or qux') + >>> walker = NameWalker() + >>> walker.visit(tree) + >>> sorted(walker.names) + ['bar', 'baz', 'foo', 'qux'] + + """ + + def __init__(self): + super().__init__() + self._names = set() + + def visit(self, node): + if isinstance(node, ast.Name): + self._names.add(node.id) + return super().visit(node) + + @property + def names(self): + return self._names + + +def get_variable_names(expression): + walker = NameWalker() + walker.visit(ast.parse(expression)) + return walker.names diff --git a/cylc/flow/workflow_db_mgr.py b/cylc/flow/workflow_db_mgr.py index 128cfd45126..0a92e7312bf 100644 --- a/cylc/flow/workflow_db_mgr.py +++ b/cylc/flow/workflow_db_mgr.py @@ -630,11 +630,10 @@ def put_update_task_jobs(self, itask, set_args): def put_update_task_outputs(self, itask): """Put UPDATE statement for task_outputs table.""" - outputs = [] - for _, message in itask.state.outputs.get_completed_all(): - outputs.append(message) set_args = { - "outputs": json.dumps(outputs) + "outputs": json.dumps( + list(itask.state.outputs.iter_completed_messages()) + ) } where_args = { "cycle": str(itask.point), diff --git a/setup.cfg b/setup.cfg index 45a66d81c88..5af5dcb98e5 100644 --- a/setup.cfg +++ b/setup.cfg @@ -123,7 +123,9 @@ tests = pytest-cov>=2.8.0 pytest-xdist>=2 pytest-mock>=3.7 - pytest>=6 + # pytest-8.2.0 causing integration test failures: + # AttributeError: 'FixtureDef' object has no attribute 'unittest' + pytest>=6,!=8.2.0 testfixtures>=6.11.0 towncrier>=23 # Type annotation stubs diff --git a/tests/flakyfunctional/cylc-show/00-simple.t b/tests/flakyfunctional/cylc-show/00-simple.t index bc00175565d..8e6b9156924 100644 --- a/tests/flakyfunctional/cylc-show/00-simple.t +++ b/tests/flakyfunctional/cylc-show/00-simple.t @@ -54,15 +54,20 @@ description: jumped over the lazy dog baz: pub URL: (not given) state: running -prerequisites: ('-': not satisfied) - + 20141106T0900Z/bar succeeded -outputs: ('-': not completed) - - 20141106T0900Z/foo expired - + 20141106T0900Z/foo submitted - - 20141106T0900Z/foo submit-failed - + 20141106T0900Z/foo started - - 20141106T0900Z/foo succeeded - - 20141106T0900Z/foo failed +prerequisites: ('⨯': not satisfied) + ✓ 20141106T0900Z/bar succeeded +outputs: ('⨯': not completed) + ⨯ 20141106T0900Z/foo expired + ✓ 20141106T0900Z/foo submitted + ⨯ 20141106T0900Z/foo submit-failed + ✓ 20141106T0900Z/foo started + ⨯ 20141106T0900Z/foo succeeded + ⨯ 20141106T0900Z/foo failed +output completion: incomplete + ⦙ ( + ✓ ⦙ started + ⨯ ⦙ and succeeded + ⦙ ) __SHOW_OUTPUT__ #------------------------------------------------------------------------------- TEST_NAME="${TEST_NAME_BASE}-show-json" @@ -104,6 +109,7 @@ cmp_json "${TEST_NAME}-taskinstance" "${TEST_NAME}-taskinstance" \ } } }, + "runtime": {"completion": "(started and succeeded)"}, "prerequisites": [ { "expression": "c0", diff --git a/tests/flakyfunctional/cylc-show/04-multi.t b/tests/flakyfunctional/cylc-show/04-multi.t index b3ee401d546..eac79132aaf 100644 --- a/tests/flakyfunctional/cylc-show/04-multi.t +++ b/tests/flakyfunctional/cylc-show/04-multi.t @@ -34,45 +34,51 @@ title: (not given) description: (not given) URL: (not given) state: running -prerequisites: ('-': not satisfied) - + 2015/t1 started -outputs: ('-': not completed) - - 2016/t1 expired - + 2016/t1 submitted - - 2016/t1 submit-failed - + 2016/t1 started - - 2016/t1 succeeded - - 2016/t1 failed +prerequisites: ('⨯': not satisfied) + ✓ 2015/t1 started +outputs: ('⨯': not completed) + ⨯ 2016/t1 expired + ✓ 2016/t1 submitted + ⨯ 2016/t1 submit-failed + ✓ 2016/t1 started + ⨯ 2016/t1 succeeded + ⨯ 2016/t1 failed +output completion: incomplete + ⨯ ⦙ succeeded Task ID: 2017/t1 title: (not given) description: (not given) URL: (not given) state: running -prerequisites: ('-': not satisfied) - + 2016/t1 started -outputs: ('-': not completed) - - 2017/t1 expired - + 2017/t1 submitted - - 2017/t1 submit-failed - + 2017/t1 started - - 2017/t1 succeeded - - 2017/t1 failed +prerequisites: ('⨯': not satisfied) + ✓ 2016/t1 started +outputs: ('⨯': not completed) + ⨯ 2017/t1 expired + ✓ 2017/t1 submitted + ⨯ 2017/t1 submit-failed + ✓ 2017/t1 started + ⨯ 2017/t1 succeeded + ⨯ 2017/t1 failed +output completion: incomplete + ⨯ ⦙ succeeded Task ID: 2018/t1 title: (not given) description: (not given) URL: (not given) state: running -prerequisites: ('-': not satisfied) - + 2017/t1 started -outputs: ('-': not completed) - - 2018/t1 expired - + 2018/t1 submitted - - 2018/t1 submit-failed - + 2018/t1 started - - 2018/t1 succeeded - - 2018/t1 failed +prerequisites: ('⨯': not satisfied) + ✓ 2017/t1 started +outputs: ('⨯': not completed) + ⨯ 2018/t1 expired + ✓ 2018/t1 submitted + ⨯ 2018/t1 submit-failed + ✓ 2018/t1 started + ⨯ 2018/t1 succeeded + ⨯ 2018/t1 failed +output completion: incomplete + ⨯ ⦙ succeeded __TXT__ contains_ok "${RUND}/show2.txt" <<'__TXT__' diff --git a/tests/flakyfunctional/job-submission/19-chatty.t b/tests/flakyfunctional/job-submission/19-chatty.t index f7480b4469b..6f03593cdc4 100755 --- a/tests/flakyfunctional/job-submission/19-chatty.t +++ b/tests/flakyfunctional/job-submission/19-chatty.t @@ -41,7 +41,7 @@ install_workflow "${TEST_NAME_BASE}" "${TEST_NAME_BASE}" run_ok "${TEST_NAME_BASE}-validate" cylc validate "${WORKFLOW_NAME}" workflow_run_ok "${TEST_NAME_BASE}-workflow-run" \ - cylc play --debug --no-detach "${WORKFLOW_NAME}" + cylc play --debug --no-detach "${WORKFLOW_NAME}" --reference-test # Logged killed jobs-submit command cylc cat-log "${WORKFLOW_NAME}" | sed -n ' @@ -70,20 +70,20 @@ done # Task pool in database contains the correct states TEST_NAME="${TEST_NAME_BASE}-db-task-pool" DB_FILE="${WORKFLOW_RUN_DIR}/log/db" -QUERY='SELECT cycle, name, status, is_held FROM task_pool' +QUERY='SELECT cycle, name, status FROM task_states WHERE name LIKE "nh%"' run_ok "$TEST_NAME" sqlite3 "$DB_FILE" "$QUERY" sort "${TEST_NAME}.stdout" > "${TEST_NAME}.stdout.sorted" cmp_ok "${TEST_NAME}.stdout.sorted" << '__OUT__' -1|nh0|submit-failed|0 -1|nh1|submit-failed|0 -1|nh2|submit-failed|0 -1|nh3|submit-failed|0 -1|nh4|submit-failed|0 -1|nh5|submit-failed|0 -1|nh6|submit-failed|0 -1|nh7|submit-failed|0 -1|nh8|submit-failed|0 -1|nh9|submit-failed|0 +1|nh0|submit-failed +1|nh1|submit-failed +1|nh2|submit-failed +1|nh3|submit-failed +1|nh4|submit-failed +1|nh5|submit-failed +1|nh6|submit-failed +1|nh7|submit-failed +1|nh8|submit-failed +1|nh9|submit-failed __OUT__ purge diff --git a/tests/flakyfunctional/job-submission/19-chatty/flow.cylc b/tests/flakyfunctional/job-submission/19-chatty/flow.cylc index cf0eaf9f724..f67409217d1 100644 --- a/tests/flakyfunctional/job-submission/19-chatty/flow.cylc +++ b/tests/flakyfunctional/job-submission/19-chatty/flow.cylc @@ -22,7 +22,7 @@ R1 = "starter:start => NOHOPE" R1 = "starter => HOPEFUL" R1 = HOPEFUL:succeed-all - R1 = "NOHOPE:submit-fail-all => stopper" + R1 = "NOHOPE:submit-fail-all? => stopper" [runtime] [[starter]] diff --git a/tests/flakyfunctional/job-submission/19-chatty/reference.log b/tests/flakyfunctional/job-submission/19-chatty/reference.log new file mode 100644 index 00000000000..a08d9635e4b --- /dev/null +++ b/tests/flakyfunctional/job-submission/19-chatty/reference.log @@ -0,0 +1,22 @@ +1/starter -triggered off [] in flow 1 +1/nh2 -triggered off ['1/starter'] in flow 1 +1/nh3 -triggered off ['1/starter'] in flow 1 +1/nh1 -triggered off ['1/starter'] in flow 1 +1/nh5 -triggered off ['1/starter'] in flow 1 +1/nh0 -triggered off ['1/starter'] in flow 1 +1/nh7 -triggered off ['1/starter'] in flow 1 +1/nh9 -triggered off ['1/starter'] in flow 1 +1/nh4 -triggered off ['1/starter'] in flow 1 +1/nh6 -triggered off ['1/starter'] in flow 1 +1/nh8 -triggered off ['1/starter'] in flow 1 +1/h6 -triggered off ['1/starter'] in flow 1 +1/h7 -triggered off ['1/starter'] in flow 1 +1/h9 -triggered off ['1/starter'] in flow 1 +1/h1 -triggered off ['1/starter'] in flow 1 +1/h3 -triggered off ['1/starter'] in flow 1 +1/h5 -triggered off ['1/starter'] in flow 1 +1/h8 -triggered off ['1/starter'] in flow 1 +1/h0 -triggered off ['1/starter'] in flow 1 +1/h2 -triggered off ['1/starter'] in flow 1 +1/h4 -triggered off ['1/starter'] in flow 1 +1/stopper -triggered off ['1/nh0', '1/nh1', '1/nh2', '1/nh3', '1/nh4', '1/nh5', '1/nh6', '1/nh7', '1/nh8', '1/nh9'] in flow 1 diff --git a/tests/functional/cylc-cat-log/00-local.t b/tests/functional/cylc-cat-log/00-local.t index 5a5d20edb71..d816d20d744 100755 --- a/tests/functional/cylc-cat-log/00-local.t +++ b/tests/functional/cylc-cat-log/00-local.t @@ -24,7 +24,8 @@ install_workflow "${TEST_NAME_BASE}" "${TEST_NAME_BASE}" TEST_NAME="${TEST_NAME_BASE}-validate" run_ok "${TEST_NAME}" cylc validate "${WORKFLOW_NAME}" #------------------------------------------------------------------------------- -workflow_run_ok "${TEST_NAME_BASE}-run" cylc play --no-detach "${WORKFLOW_NAME}" +workflow_run_ok "${TEST_NAME_BASE}-run" \ + cylc play --no-detach "${WORKFLOW_NAME}" --reference-test #------------------------------------------------------------------------------- TEST_NAME=${TEST_NAME_BASE}-workflow-log-log run_ok "${TEST_NAME}" cylc cat-log "${WORKFLOW_NAME}" @@ -58,6 +59,7 @@ install/01-install.log scheduler/01-start-01.log scheduler/02-restart-02.log scheduler/03-restart-02.log +scheduler/reftest.log __END__ #------------------------------------------------------------------------------- TEST_NAME=${TEST_NAME_BASE}-task-out diff --git a/tests/functional/cylc-cat-log/00-local/flow.cylc b/tests/functional/cylc-cat-log/00-local/flow.cylc index 102fc8218df..a71ee32caea 100644 --- a/tests/functional/cylc-cat-log/00-local/flow.cylc +++ b/tests/functional/cylc-cat-log/00-local/flow.cylc @@ -6,7 +6,7 @@ inactivity timeout = PT3M [scheduling] [[graph]] - R1 = submit-failed:submit-failed => a-task + R1 = submit-failed:submit-failed? => a-task [runtime] [[a-task]] script = """ diff --git a/tests/functional/cylc-cat-log/00-local/reference.log b/tests/functional/cylc-cat-log/00-local/reference.log new file mode 100644 index 00000000000..1b10209edab --- /dev/null +++ b/tests/functional/cylc-cat-log/00-local/reference.log @@ -0,0 +1,2 @@ +1/submit-failed -triggered off [] in flow 1 +1/a-task -triggered off ['1/submit-failed'] in flow 1 diff --git a/tests/functional/cylc-config/00-simple/section2.stdout b/tests/functional/cylc-config/00-simple/section2.stdout index 4d3989c8387..f46992c8026 100644 --- a/tests/functional/cylc-config/00-simple/section2.stdout +++ b/tests/functional/cylc-config/00-simple/section2.stdout @@ -1,12 +1,13 @@ [[root]] + completion = platform = inherit = + script = init-script = env-script = err-script = exit-script = pre-script = - script = post-script = work sub-directory = execution polling intervals = @@ -76,6 +77,7 @@ [[[parameter environment templates]]] [[OPS]] script = echo "RUN: run-ops.sh" + completion = platform = inherit = init-script = @@ -152,6 +154,7 @@ [[[parameter environment templates]]] [[VAR]] script = echo "RUN: run-var.sh" + completion = platform = inherit = init-script = @@ -227,14 +230,15 @@ [[[outputs]]] [[[parameter environment templates]]] [[SERIAL]] + completion = platform = inherit = + script = init-script = env-script = err-script = exit-script = pre-script = - script = post-script = work sub-directory = execution polling intervals = @@ -304,14 +308,15 @@ [[[outputs]]] [[[parameter environment templates]]] [[PARALLEL]] + completion = platform = inherit = + script = init-script = env-script = err-script = exit-script = pre-script = - script = post-script = work sub-directory = execution polling intervals = @@ -383,6 +388,7 @@ [[ops_s1]] script = echo "RUN: run-ops.sh" inherit = OPS, SERIAL + completion = succeeded or failed platform = init-script = env-script = @@ -460,6 +466,7 @@ [[ops_s2]] script = echo "RUN: run-ops.sh" inherit = OPS, SERIAL + completion = succeeded or failed platform = init-script = env-script = @@ -537,6 +544,7 @@ [[ops_p1]] script = echo "RUN: run-ops.sh" inherit = OPS, PARALLEL + completion = succeeded or failed platform = init-script = env-script = @@ -614,6 +622,7 @@ [[ops_p2]] script = echo "RUN: run-ops.sh" inherit = OPS, PARALLEL + completion = succeeded or failed platform = init-script = env-script = @@ -691,6 +700,7 @@ [[var_s1]] script = echo "RUN: run-var.sh" inherit = VAR, SERIAL + completion = succeeded platform = init-script = env-script = @@ -768,6 +778,7 @@ [[var_s2]] script = echo "RUN: run-var.sh" inherit = VAR, SERIAL + completion = succeeded platform = init-script = env-script = @@ -845,6 +856,7 @@ [[var_p1]] script = echo "RUN: run-var.sh" inherit = VAR, PARALLEL + completion = succeeded platform = init-script = env-script = @@ -922,6 +934,7 @@ [[var_p2]] script = echo "RUN: run-var.sh" inherit = VAR, PARALLEL + completion = succeeded platform = init-script = env-script = diff --git a/tests/functional/cylc-diff/00-basic.t b/tests/functional/cylc-diff/00-basic.t index 18d57e07da9..20c387c044a 100755 --- a/tests/functional/cylc-diff/00-basic.t +++ b/tests/functional/cylc-diff/00-basic.t @@ -51,17 +51,21 @@ Workflow definitions ${WORKFLOW_NAME1} and ${WORKFLOW_NAME2} differ [runtime] [[foo]] < script = true + < completion = succeeded [runtime] [[bar]] < script = true + < completion = succeeded 2 items only in ${WORKFLOW_NAME2} (>) [runtime] [[food]] > script = true + > completion = succeeded [runtime] [[barley]] > script = true + > completion = succeeded 1 common items differ ${WORKFLOW_NAME1}(<) ${WORKFLOW_NAME2}(>) diff --git a/tests/functional/cylc-diff/03-icp.t b/tests/functional/cylc-diff/03-icp.t index a598ed985cd..7eea32d49f0 100755 --- a/tests/functional/cylc-diff/03-icp.t +++ b/tests/functional/cylc-diff/03-icp.t @@ -56,17 +56,21 @@ Workflow definitions ${WORKFLOW_NAME1} and ${WORKFLOW_NAME2} differ [runtime] [[foo]] < script = true + < completion = succeeded [runtime] [[bar]] < script = true + < completion = succeeded 2 items only in ${WORKFLOW_NAME2} (>) [runtime] [[food]] > script = true + > completion = succeeded [runtime] [[barley]] > script = true + > completion = succeeded 1 common items differ ${WORKFLOW_NAME1}(<) ${WORKFLOW_NAME2}(>) diff --git a/tests/functional/cylc-diff/04-icp-2.t b/tests/functional/cylc-diff/04-icp-2.t index 32405f7e06d..98c5a471369 100755 --- a/tests/functional/cylc-diff/04-icp-2.t +++ b/tests/functional/cylc-diff/04-icp-2.t @@ -57,17 +57,21 @@ Workflow definitions ${WORKFLOW_NAME1} and ${WORKFLOW_NAME2} differ [runtime] [[foo]] < script = true + < completion = succeeded [runtime] [[bar]] < script = true + < completion = succeeded 2 items only in ${WORKFLOW_NAME2} (>) [runtime] [[food]] > script = true + > completion = succeeded [runtime] [[barley]] > script = true + > completion = succeeded 1 common items differ ${WORKFLOW_NAME1}(<) ${WORKFLOW_NAME2}(>) diff --git a/tests/functional/cylc-set/03-set-failed.t b/tests/functional/cylc-set/03-set-failed.t index 1910e5b3120..4d561182b3a 100644 --- a/tests/functional/cylc-set/03-set-failed.t +++ b/tests/functional/cylc-set/03-set-failed.t @@ -36,7 +36,7 @@ cylc set -o failed "${WORKFLOW_NAME}//1/foo" # - implied outputs reported as already completed poll_grep_workflow_log -E "1/foo.* => failed" -poll_grep_workflow_log -E "1/foo.* did not complete required outputs" +poll_grep_workflow_log -E "1/foo.* did not complete the required outputs" cylc stop --now --now --interval=2 --max-polls=5 "${WORKFLOW_NAME}" diff --git a/tests/functional/cylc-show/05-complex.t b/tests/functional/cylc-show/05-complex.t index e49082d350f..d26c6b4f070 100644 --- a/tests/functional/cylc-show/05-complex.t +++ b/tests/functional/cylc-show/05-complex.t @@ -37,21 +37,23 @@ title: (not given) description: (not given) URL: (not given) state: running -prerequisites: ('-': not satisfied) - + 1 & 2 & (3 | (4 & 5)) & 0 - + 0 = 19991231T0000Z/f succeeded - + 1 = 20000101T0000Z/a succeeded - + 2 = 20000101T0000Z/b succeeded - + 3 = 20000101T0000Z/c succeeded - + 4 = 20000101T0000Z/d succeeded - + 5 = 20000101T0000Z/e succeeded -outputs: ('-': not completed) - - 20000101T0000Z/f expired - + 20000101T0000Z/f submitted - - 20000101T0000Z/f submit-failed - + 20000101T0000Z/f started - - 20000101T0000Z/f succeeded - - 20000101T0000Z/f failed +prerequisites: ('⨯': not satisfied) + ✓ 1 & 2 & (3 | (4 & 5)) & 0 + ✓ 0 = 19991231T0000Z/f succeeded + ✓ 1 = 20000101T0000Z/a succeeded + ✓ 2 = 20000101T0000Z/b succeeded + ✓ 3 = 20000101T0000Z/c succeeded + ✓ 4 = 20000101T0000Z/d succeeded + ✓ 5 = 20000101T0000Z/e succeeded +outputs: ('⨯': not completed) + ⨯ 20000101T0000Z/f expired + ✓ 20000101T0000Z/f submitted + ⨯ 20000101T0000Z/f submit-failed + ✓ 20000101T0000Z/f started + ⨯ 20000101T0000Z/f succeeded + ⨯ 20000101T0000Z/f failed +output completion: incomplete + ⨯ ⦙ succeeded 19991231T0000Z/f succeeded 20000101T0000Z/a succeeded 20000101T0000Z/b succeeded @@ -62,21 +64,23 @@ title: (not given) description: (not given) URL: (not given) state: running -prerequisites: ('-': not satisfied) - + 1 & 2 & (3 | (4 & 5)) & 0 - + 0 = 20000101T0000Z/f succeeded - + 1 = 20000102T0000Z/a succeeded - + 2 = 20000102T0000Z/b succeeded - + 3 = 20000102T0000Z/c succeeded - + 4 = 20000102T0000Z/d succeeded - + 5 = 20000102T0000Z/e succeeded -outputs: ('-': not completed) - - 20000102T0000Z/f expired - + 20000102T0000Z/f submitted - - 20000102T0000Z/f submit-failed - + 20000102T0000Z/f started - - 20000102T0000Z/f succeeded - - 20000102T0000Z/f failed +prerequisites: ('⨯': not satisfied) + ✓ 1 & 2 & (3 | (4 & 5)) & 0 + ✓ 0 = 20000101T0000Z/f succeeded + ✓ 1 = 20000102T0000Z/a succeeded + ✓ 2 = 20000102T0000Z/b succeeded + ✓ 3 = 20000102T0000Z/c succeeded + ✓ 4 = 20000102T0000Z/d succeeded + ✓ 5 = 20000102T0000Z/e succeeded +outputs: ('⨯': not completed) + ⨯ 20000102T0000Z/f expired + ✓ 20000102T0000Z/f submitted + ⨯ 20000102T0000Z/f submit-failed + ✓ 20000102T0000Z/f started + ⨯ 20000102T0000Z/f succeeded + ⨯ 20000102T0000Z/f failed +output completion: incomplete + ⨯ ⦙ succeeded 20000101T0000Z/f succeeded 20000102T0000Z/a succeeded 20000102T0000Z/b succeeded diff --git a/tests/functional/cylc-show/06-past-present-future.t b/tests/functional/cylc-show/06-past-present-future.t index bf6381cab65..7ff9762212d 100644 --- a/tests/functional/cylc-show/06-past-present-future.t +++ b/tests/functional/cylc-show/06-past-present-future.t @@ -45,15 +45,15 @@ __END__ TEST_NAME="${TEST_NAME_BASE}-show.present" contains_ok "${WORKFLOW_RUN_DIR}/show-c.txt" <<__END__ state: running -prerequisites: ('-': not satisfied) - + 1/b succeeded +prerequisites: ('⨯': not satisfied) + ✓ 1/b succeeded __END__ TEST_NAME="${TEST_NAME_BASE}-show.future" contains_ok "${WORKFLOW_RUN_DIR}/show-d.txt" <<__END__ state: waiting -prerequisites: ('-': not satisfied) - - 1/c succeeded +prerequisites: ('⨯': not satisfied) + ⨯ 1/c succeeded __END__ purge diff --git a/tests/functional/events/26-workflow-stalled-dump-prereq.t b/tests/functional/events/26-workflow-stalled-dump-prereq.t index 13d0b72579e..808ea8ae82f 100755 --- a/tests/functional/events/26-workflow-stalled-dump-prereq.t +++ b/tests/functional/events/26-workflow-stalled-dump-prereq.t @@ -30,8 +30,9 @@ grep_ok '"abort on stall timeout" is set' "${TEST_NAME_BASE}-run.stderr" grep_ok "ERROR - Incomplete tasks:" "${TEST_NAME_BASE}-run.stderr" -grep_ok "20100101T0000Z/bar did not complete required outputs: \['succeeded'\]" \ - "${TEST_NAME_BASE}-run.stderr" +grep_ok "20100101T0000Z/bar did not complete the required outputs:\n.*succeeded" \ + "${TEST_NAME_BASE}-run.stderr" \ + -Pizo grep_ok "WARNING - Partially satisfied prerequisites:" \ "${TEST_NAME_BASE}-run.stderr" diff --git a/tests/functional/events/27-workflow-stalled-dump-prereq-fam.t b/tests/functional/events/27-workflow-stalled-dump-prereq-fam.t index b75f31e86b1..03dd1b3dfef 100755 --- a/tests/functional/events/27-workflow-stalled-dump-prereq-fam.t +++ b/tests/functional/events/27-workflow-stalled-dump-prereq-fam.t @@ -31,8 +31,9 @@ grep_ok '"abort on stall timeout" is set' "${TEST_NAME_BASE}-run.stderr" grep_ok "ERROR - Incomplete tasks:" "${TEST_NAME_BASE}-run.stderr" -grep_ok "1/foo did not complete required outputs: \['succeeded'\]" \ - "${TEST_NAME_BASE}-run.stderr" +grep_ok "1/foo did not complete the required outputs:\n.*succeeded" \ + "${TEST_NAME_BASE}-run.stderr" \ + -Pizo grep_ok "WARNING - Partially satisfied prerequisites:" \ "${TEST_NAME_BASE}-run.stderr" diff --git a/tests/functional/graph-equivalence/multiline_and_refs/c-ref b/tests/functional/graph-equivalence/multiline_and_refs/c-ref index a9ea18051a0..3ff04f654a4 100644 --- a/tests/functional/graph-equivalence/multiline_and_refs/c-ref +++ b/tests/functional/graph-equivalence/multiline_and_refs/c-ref @@ -1,5 +1,5 @@ -prerequisites: ('-': not satisfied) - - 1/a succeeded - - 1/b succeeded -outputs: ('-': not completed) +prerequisites: ('⨯': not satisfied) + ⨯ 1/a succeeded + ⨯ 1/b succeeded +outputs: ('⨯': not completed) (None) diff --git a/tests/functional/graph-equivalence/multiline_and_refs/c-ref-2 b/tests/functional/graph-equivalence/multiline_and_refs/c-ref-2 index a9ea18051a0..3ff04f654a4 100644 --- a/tests/functional/graph-equivalence/multiline_and_refs/c-ref-2 +++ b/tests/functional/graph-equivalence/multiline_and_refs/c-ref-2 @@ -1,5 +1,5 @@ -prerequisites: ('-': not satisfied) - - 1/a succeeded - - 1/b succeeded -outputs: ('-': not completed) +prerequisites: ('⨯': not satisfied) + ⨯ 1/a succeeded + ⨯ 1/b succeeded +outputs: ('⨯': not completed) (None) diff --git a/tests/functional/graph-equivalence/splitline_refs/a-ref b/tests/functional/graph-equivalence/splitline_refs/a-ref index a5d954e5314..8f042683c0a 100644 --- a/tests/functional/graph-equivalence/splitline_refs/a-ref +++ b/tests/functional/graph-equivalence/splitline_refs/a-ref @@ -1,2 +1,2 @@ prerequisites: (None) -outputs: ('-': not completed) +outputs: ('⨯': not completed) diff --git a/tests/functional/graph-equivalence/splitline_refs/b-ref b/tests/functional/graph-equivalence/splitline_refs/b-ref index 1e9fd5768db..cad0fe89d0d 100644 --- a/tests/functional/graph-equivalence/splitline_refs/b-ref +++ b/tests/functional/graph-equivalence/splitline_refs/b-ref @@ -1,3 +1,3 @@ -prerequisites: ('-': not satisfied) - + 1/a succeeded -outputs: ('-': not completed) +prerequisites: ('⨯': not satisfied) + ✓ 1/a succeeded +outputs: ('⨯': not completed) diff --git a/tests/functional/graph-equivalence/splitline_refs/c-ref b/tests/functional/graph-equivalence/splitline_refs/c-ref index f3ec5407750..a0145574643 100644 --- a/tests/functional/graph-equivalence/splitline_refs/c-ref +++ b/tests/functional/graph-equivalence/splitline_refs/c-ref @@ -1,3 +1,3 @@ -prerequisites: ('-': not satisfied) - + 1/b succeeded -outputs: ('-': not completed) +prerequisites: ('⨯': not satisfied) + ✓ 1/b succeeded +outputs: ('⨯': not completed) diff --git a/tests/functional/inheritance/00-namespace-list.t b/tests/functional/inheritance/00-namespace-list.t index 43c204f3d4c..d1a58de401b 100755 --- a/tests/functional/inheritance/00-namespace-list.t +++ b/tests/functional/inheritance/00-namespace-list.t @@ -33,14 +33,17 @@ cmp_ok runtime.out <<'__DONE__' [[FAMILY]] [[m1]] inherit = FAMILY + completion = succeeded [[[environment]]] FOO = foo [[m2]] inherit = FAMILY + completion = succeeded [[[environment]]] FOO = bar [[m3]] inherit = FAMILY + completion = succeeded [[[environment]]] FOO = foo __DONE__ diff --git a/tests/functional/intelligent-host-selection/06-from-platform-group-fails.t b/tests/functional/intelligent-host-selection/06-from-platform-group-fails.t index 682a89e4e31..32eaf01d1e2 100644 --- a/tests/functional/intelligent-host-selection/06-from-platform-group-fails.t +++ b/tests/functional/intelligent-host-selection/06-from-platform-group-fails.t @@ -54,7 +54,7 @@ logfile="${WORKFLOW_RUN_DIR}/log/scheduler/log" # Check workflow fails for the reason we want it to fail named_grep_ok \ "Workflow stalled with 1/bad (submit-failed)" \ - "1/bad did not complete required outputs" \ + "1/bad did not complete the required outputs" \ "$logfile" # Look for message indicating that remote init has failed on each bad_host diff --git a/tests/functional/job-submission/06-garbage/flow.cylc b/tests/functional/job-submission/06-garbage/flow.cylc index bf5c4a55e3f..d994d840fd2 100644 --- a/tests/functional/job-submission/06-garbage/flow.cylc +++ b/tests/functional/job-submission/06-garbage/flow.cylc @@ -4,7 +4,7 @@ [scheduling] [[graph]] - R1 = """t1:submit-fail => t2""" + R1 = t1:submit-fail? => t2 [runtime] [[t1]] diff --git a/tests/functional/job-submission/09-activity-log-host-bad-submit/flow.cylc b/tests/functional/job-submission/09-activity-log-host-bad-submit/flow.cylc index 0a5bdb05b60..c76d8729cb2 100644 --- a/tests/functional/job-submission/09-activity-log-host-bad-submit/flow.cylc +++ b/tests/functional/job-submission/09-activity-log-host-bad-submit/flow.cylc @@ -9,7 +9,7 @@ initial cycle point=1999 final cycle point=1999 [[graph]] - P1Y = bad-submitter:submit-failed => grepper + P1Y = bad-submitter:submit-failed? => grepper [runtime] [[root]] diff --git a/tests/functional/job-submission/16-timeout.t b/tests/functional/job-submission/16-timeout.t index 266b04b0164..b6042b56dad 100755 --- a/tests/functional/job-submission/16-timeout.t +++ b/tests/functional/job-submission/16-timeout.t @@ -53,6 +53,7 @@ __END__ cylc workflow-state "${WORKFLOW_NAME}" > workflow-state.log +# make sure foo submit failed and the stopper ran contains_ok workflow-state.log << __END__ stopper, 1, succeeded foo, 1, submit-failed diff --git a/tests/functional/job-submission/16-timeout/flow.cylc b/tests/functional/job-submission/16-timeout/flow.cylc index 30883ea8048..0f37c3f6551 100644 --- a/tests/functional/job-submission/16-timeout/flow.cylc +++ b/tests/functional/job-submission/16-timeout/flow.cylc @@ -2,7 +2,7 @@ [scheduling] [[graph]] - R1 = "foo:submit-fail => stopper" + R1 = "foo:submit-fail? => stopper" [runtime] [[foo]] platform = {{ environ['CYLC_TEST_PLATFORM'] }} diff --git a/tests/functional/job-submission/19-platform_select.t b/tests/functional/job-submission/19-platform_select.t index c55c72a3c28..bcd8179426a 100755 --- a/tests/functional/job-submission/19-platform_select.t +++ b/tests/functional/job-submission/19-platform_select.t @@ -22,8 +22,7 @@ set_test_number 6 install_workflow "${TEST_NAME_BASE}" run_ok "${TEST_NAME_BASE}-validate" cylc validate "${WORKFLOW_NAME}" -run_ok "${TEST_NAME_BASE}-run" \ - cylc play --debug --no-detach "${WORKFLOW_NAME}" +reftest_run logfile="${WORKFLOW_RUN_DIR}/log/scheduler/log" diff --git a/tests/functional/job-submission/19-platform_select/flow.cylc b/tests/functional/job-submission/19-platform_select/flow.cylc index a6dcc446eec..5211df39699 100644 --- a/tests/functional/job-submission/19-platform_select/flow.cylc +++ b/tests/functional/job-submission/19-platform_select/flow.cylc @@ -15,10 +15,10 @@ purpose = """ R1 = """ host_no_subshell localhost_subshell - platform_subshell:submit-fail => fin_platform - platform_no_subshell:submit-fail => fin_platform - host_subshell:submit-fail => fin_host - host_subshell_backticks:submit-fail => fin_host + platform_subshell:submit-fail? => fin_platform + platform_no_subshell:submit-fail? => fin_platform + host_subshell:submit-fail? => fin_host + host_subshell_backticks:submit-fail? => fin_host """ [runtime] diff --git a/tests/functional/job-submission/19-platform_select/reference.log b/tests/functional/job-submission/19-platform_select/reference.log new file mode 100644 index 00000000000..b60c413e6cc --- /dev/null +++ b/tests/functional/job-submission/19-platform_select/reference.log @@ -0,0 +1,8 @@ +1/platform_no_subshell -triggered off [] in flow 1 +1/host_no_subshell -triggered off [] in flow 1 +1/platform_subshell -triggered off [] in flow 1 +1/host_subshell -triggered off [] in flow 1 +1/host_subshell_backticks -triggered off [] in flow 1 +1/localhost_subshell -triggered off [] in flow 1 +1/fin_platform -triggered off ['1/platform_no_subshell', '1/platform_subshell'] in flow 1 +1/fin_host -triggered off ['1/host_subshell', '1/host_subshell_backticks'] in flow 1 diff --git a/tests/functional/modes/05-sim-trigger.t b/tests/functional/modes/05-sim-trigger.t index 661a5843eda..7c7d1351db1 100644 --- a/tests/functional/modes/05-sim-trigger.t +++ b/tests/functional/modes/05-sim-trigger.t @@ -38,7 +38,7 @@ grep_ok '\[1/fail_fail_fail/01:running\] => failed' "${SCHD_LOG}" cylc trigger "${WORKFLOW_NAME}//1/fail_fail_fail" poll_grep_workflow_log -E \ - '1/fail_fail_fail/02.* did not complete required outputs' + '1/fail_fail_fail/02.* did not complete the required outputs' grep_ok '\[1/fail_fail_fail/02:running\] => failed' "${SCHD_LOG}" diff --git a/tests/functional/n-window/01-past-present-future.t b/tests/functional/n-window/01-past-present-future.t index d5ed27a8085..b3ad0edf23c 100644 --- a/tests/functional/n-window/01-past-present-future.t +++ b/tests/functional/n-window/01-past-present-future.t @@ -45,22 +45,22 @@ __END__ TEST_NAME="${TEST_NAME_BASE}-show-c.present" contains_ok "${WORKFLOW_RUN_DIR}/show-c.txt" <<__END__ -prerequisites: ('-': not satisfied) - + 1/b succeeded +prerequisites: ('⨯': not satisfied) + ✓ 1/b succeeded __END__ TEST_NAME="${TEST_NAME_BASE}-show-d.future" contains_ok "${WORKFLOW_RUN_DIR}/show-d.txt" <<__END__ state: waiting -prerequisites: ('-': not satisfied) - - 1/c succeeded +prerequisites: ('⨯': not satisfied) + ⨯ 1/c succeeded __END__ TEST_NAME="${TEST_NAME_BASE}-show-e.future" contains_ok "${WORKFLOW_RUN_DIR}/show-e.txt" <<__END__ state: waiting -prerequisites: ('-': not satisfied) - - 1/d succeeded +prerequisites: ('⨯': not satisfied) + ⨯ 1/d succeeded __END__ purge diff --git a/tests/functional/n-window/02-big-window.t b/tests/functional/n-window/02-big-window.t index e6aa45fae24..a084b6dd69b 100644 --- a/tests/functional/n-window/02-big-window.t +++ b/tests/functional/n-window/02-big-window.t @@ -42,15 +42,15 @@ __END__ TEST_NAME="${TEST_NAME_BASE}-show-j.parallel" contains_ok "${WORKFLOW_RUN_DIR}/show-j.txt" <<__END__ state: waiting -prerequisites: ('-': not satisfied) - - 1/i succeeded +prerequisites: ('⨯': not satisfied) + ⨯ 1/i succeeded __END__ TEST_NAME="${TEST_NAME_BASE}-show-h.future" contains_ok "${WORKFLOW_RUN_DIR}/show-h.txt" <<__END__ state: waiting -prerequisites: ('-': not satisfied) - - 1/g succeeded +prerequisites: ('⨯': not satisfied) + ⨯ 1/g succeeded __END__ purge diff --git a/tests/functional/optional-outputs/01-stall-on-incomplete.t b/tests/functional/optional-outputs/01-stall-on-incomplete.t index 81c71116471..7a460ffcaf9 100644 --- a/tests/functional/optional-outputs/01-stall-on-incomplete.t +++ b/tests/functional/optional-outputs/01-stall-on-incomplete.t @@ -30,7 +30,7 @@ workflow_run_fail "${TEST_NAME_BASE}-run" \ LOG="${WORKFLOW_RUN_DIR}/log/scheduler/log" grep_ok "Incomplete tasks" "${LOG}" -grep_ok "1/foo did not complete required outputs: \['y'\]" "${LOG}" +grep_ok '1/foo did not complete the required outputs:\n(.*\n){3}.*y\n' "${LOG}" -Pizo grep_ok "Workflow stalled" "${LOG}" purge diff --git a/tests/functional/optional-outputs/07-finish-fail-c7-backcompat.t b/tests/functional/optional-outputs/07-finish-fail-c7-backcompat.t index 37d13e2ee88..85b121ae0f9 100644 --- a/tests/functional/optional-outputs/07-finish-fail-c7-backcompat.t +++ b/tests/functional/optional-outputs/07-finish-fail-c7-backcompat.t @@ -37,7 +37,7 @@ workflow_run_fail "${TEST_NAME_BASE}-run" cylc play --no-detach --debug "${WORKF grep_workflow_log_ok grep-0 "Workflow stalled" grep_workflow_log_ok grep-1 "ERROR - Incomplete tasks:" -grep_workflow_log_ok grep-2 "1/foo did not complete required outputs" -grep_workflow_log_ok grep-3 "2/foo did not complete required outputs" +grep_workflow_log_ok grep-2 "1/foo did not complete the required outputs" +grep_workflow_log_ok grep-3 "2/foo did not complete the required outputs" purge diff --git a/tests/functional/param_expand/01-basic.t b/tests/functional/param_expand/01-basic.t index 20c06d5a3b9..e058224649a 100644 --- a/tests/functional/param_expand/01-basic.t +++ b/tests/functional/param_expand/01-basic.t @@ -390,10 +390,12 @@ cmp_ok '19.cylc' <<'__FLOW_CONFIG__' [[root]] [[c++]] script = true + completion = succeeded [[[environment]]] CC = gcc [[fortran-2008]] script = true + completion = succeeded [[[environment]]] FC = gfortran __FLOW_CONFIG__ diff --git a/tests/functional/remote/05-remote-init.t b/tests/functional/remote/05-remote-init.t index 37a28aac1da..f0f8ef65266 100644 --- a/tests/functional/remote/05-remote-init.t +++ b/tests/functional/remote/05-remote-init.t @@ -21,7 +21,7 @@ export REQUIRE_PLATFORM='loc:remote fs:indep comms:tcp' . "$(dirname "$0")/test_header" #------------------------------------------------------------------------------- -set_test_number 6 +set_test_number 5 create_test_global_config "" " [platforms] [[belle]] @@ -53,8 +53,7 @@ g|0|0|localhost __SELECT__ grep_ok "ERROR - Incomplete tasks:" "${TEST_NAME_BASE}-run.stderr" -grep_ok "1/a did not complete required outputs" "${TEST_NAME_BASE}-run.stderr" -grep_ok "1/b did not complete required outputs" "${TEST_NAME_BASE}-run.stderr" +grep_ok "1/a did not complete the required outputs" "${TEST_NAME_BASE}-run.stderr" purge exit diff --git a/tests/functional/restart/submit-failed/flow.cylc b/tests/functional/restart/submit-failed/flow.cylc index 6ad698828af..cdce1c692e9 100644 --- a/tests/functional/restart/submit-failed/flow.cylc +++ b/tests/functional/restart/submit-failed/flow.cylc @@ -14,7 +14,9 @@ final cycle point = 20130923T00 [[graph]] R1 = """ - submit_failed_task:submit-fail => shutdown + submit_failed_task:submit-fail? => shutdown + submit_failed_task:submitted? => error + shutdown => output_states output_states => remove => finish """ @@ -29,7 +31,12 @@ description = "Submit-failed task (runs before restart)" [[remove]] script = """ - cylc remove ${CYLC_WORKFLOW_ID} submit_failed_task.${CYLC_TASK_CYCLE_POINT} + cylc remove "${CYLC_WORKFLOW_ID}//${CYLC_TASK_CYCLE_POINT}/submit_failed_task" + """ + [[error]] + script = """ + cylc message -- "ERROR:this-task-should-have-submit-failed" + exit 1 """ {% include 'flow-runtime-restart.cylc' %} diff --git a/tests/functional/retries/submission/flow.cylc b/tests/functional/retries/submission/flow.cylc index 9d650a18b71..be4d9be1615 100644 --- a/tests/functional/retries/submission/flow.cylc +++ b/tests/functional/retries/submission/flow.cylc @@ -7,7 +7,7 @@ expected task failures = 1/foo [scheduling] [[graph]] - R1 = "foo:submit-fail => !foo" + R1 = "foo:submit-fail? => !foo" [runtime] [[foo]] script = true diff --git a/tests/functional/spawn-on-demand/18-submitted.t b/tests/functional/spawn-on-demand/18-submitted.t index 30f022ebafd..103fd1ad27f 100644 --- a/tests/functional/spawn-on-demand/18-submitted.t +++ b/tests/functional/spawn-on-demand/18-submitted.t @@ -40,7 +40,7 @@ reftest_run for number in 1 2 3; do grep_workflow_log_ok \ "${TEST_NAME_BASE}-a${number}" \ - "${number}/a${number}.* did not complete required outputs: \['submitted'\]" + "${number}/a${number}.* did not complete the required outputs:" done purge diff --git a/tests/functional/spawn-on-demand/18-submitted/flow.cylc b/tests/functional/spawn-on-demand/18-submitted/flow.cylc index 975f7827ffb..5c7494c9627 100644 --- a/tests/functional/spawn-on-demand/18-submitted/flow.cylc +++ b/tests/functional/spawn-on-demand/18-submitted/flow.cylc @@ -12,6 +12,7 @@ cycling mode = integer runahead limit = P10 [[graph]] + # tasks will finish with *in*complete outputs R/1 = """ # a1 should be incomplete (submission is implicitly required) a1? => b @@ -25,6 +26,8 @@ a3? => b a3:submitted => s """ + + # tasks will finish with complete outputs R/4 = """ # a4 should be complete (submission is explicitly optional) a4? => b diff --git a/tests/functional/spawn-on-demand/19-submitted-compat.t b/tests/functional/spawn-on-demand/19-submitted-compat.t index 98c603d55a7..20a7adbb765 100644 --- a/tests/functional/spawn-on-demand/19-submitted-compat.t +++ b/tests/functional/spawn-on-demand/19-submitted-compat.t @@ -54,6 +54,7 @@ grep_workflow_log_ok \ '\[1/a/01:running\] => succeeded' grep_workflow_log_ok \ "${TEST_NAME_BASE}-b-incomplete" \ - "1/b did not complete required outputs: \['submitted', 'succeeded'\]" + '1/b did not complete the required outputs:\n.*\n.*submitted.*\n.*succeeded\n' \ + -Pizoq purge diff --git a/tests/functional/triggering/14-submit-fail/flow.cylc b/tests/functional/triggering/14-submit-fail/flow.cylc index e2e865fa39c..ec28b82d49d 100644 --- a/tests/functional/triggering/14-submit-fail/flow.cylc +++ b/tests/functional/triggering/14-submit-fail/flow.cylc @@ -5,7 +5,7 @@ [scheduling] [[graph]] R1 = """ - foo:submit-fail => bar + foo:submit-fail? => bar bar => !foo """ [runtime] diff --git a/tests/functional/triggering/16-fam-expansion.t b/tests/functional/triggering/16-fam-expansion.t index 4b48161588c..809647a711e 100644 --- a/tests/functional/triggering/16-fam-expansion.t +++ b/tests/functional/triggering/16-fam-expansion.t @@ -31,13 +31,13 @@ workflow_run_ok "${TEST_NAME}" \ cylc play --debug --no-detach --set="SHOW_OUT='$SHOW_OUT'" "${WORKFLOW_NAME}" #------------------------------------------------------------------------------- contains_ok "$SHOW_OUT" <<'__SHOW_DUMP__' - + (((1 | 0) & (3 | 2) & (5 | 4)) & (0 | 2 | 4)) - + 0 = 1/foo1 failed - - 1 = 1/foo1 succeeded - + 2 = 1/foo2 failed - - 3 = 1/foo2 succeeded - + 4 = 1/foo3 failed - - 5 = 1/foo3 succeeded + ✓ (((1 | 0) & (3 | 2) & (5 | 4)) & (0 | 2 | 4)) + ✓ 0 = 1/foo1 failed + ⨯ 1 = 1/foo1 succeeded + ✓ 2 = 1/foo2 failed + ⨯ 3 = 1/foo2 succeeded + ✓ 4 = 1/foo3 failed + ⨯ 5 = 1/foo3 succeeded __SHOW_DUMP__ #------------------------------------------------------------------------------- purge diff --git a/tests/functional/xtriggers/03-sequence.t b/tests/functional/xtriggers/03-sequence.t index a41b970b3f1..1bb24d521a9 100644 --- a/tests/functional/xtriggers/03-sequence.t +++ b/tests/functional/xtriggers/03-sequence.t @@ -51,11 +51,11 @@ cylc play "${WORKFLOW_NAME}" poll_grep_workflow_log -E '2025/start.* => succeeded' -cylc show "${WORKFLOW_NAME}//2026/foo" | grep -E '^ - xtrigger' > 2026.foo.log +cylc show "${WORKFLOW_NAME}//2026/foo" | grep -E '^ ⨯ xtrigger' > 2026.foo.log # 2026/foo should get only xtrigger e2. cmp_ok 2026.foo.log - <<__END__ - - xtrigger "e2 = echo(name=alice, succeed=False)" + ⨯ xtrigger "e2 = echo(name=alice, succeed=False)" __END__ cylc stop --now --max-polls=10 --interval=2 "${WORKFLOW_NAME}" diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index d575f2a6af2..edfe56e2a1f 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -360,6 +360,28 @@ def _validate(id_: Union[str, Path], **kwargs) -> WorkflowConfig: return _validate +@pytest.fixture(scope='module') +def mod_validate(run_dir): + """Provides a function for validating workflow configurations. + + Attempts to load the configuration, will raise exceptions if there are + errors. + + Args: + id_ - The flow to validate + kwargs - Arguments to pass to ValidateOptions + """ + def _validate(id_: Union[str, Path], **kwargs) -> WorkflowConfig: + id_ = str(id_) + return WorkflowConfig( + id_, + str(Path(run_dir, id_, 'flow.cylc')), + ValidateOptions(**kwargs) + ) + + return _validate + + @pytest.fixture def capture_submission(): """Suppress job submission and capture submitted tasks. diff --git a/tests/integration/test_optional_outputs.py b/tests/integration/test_optional_outputs.py new file mode 100644 index 00000000000..d5c4e41ce81 --- /dev/null +++ b/tests/integration/test_optional_outputs.py @@ -0,0 +1,510 @@ +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +"""Tests optional output and task completion logic. + +This functionality is defined by the "optional-output-extension" proposal: + +https://cylc.github.io/cylc-admin/proposal-optional-output-extension.html#proposal +""" + +from itertools import combinations +from typing import TYPE_CHECKING + +import pytest + +from cylc.flow.cycling.integer import IntegerPoint +from cylc.flow.cycling.iso8601 import ISO8601Point +from cylc.flow.network.resolvers import TaskMsg +from cylc.flow.task_events_mgr import ( + TaskEventsManager, +) +from cylc.flow.task_outputs import ( + TASK_OUTPUTS, + TASK_OUTPUT_EXPIRED, + TASK_OUTPUT_FAILED, + TASK_OUTPUT_FINISHED, + TASK_OUTPUT_SUCCEEDED, + get_completion_expression, +) +from cylc.flow.task_state import ( + TASK_STATUSES_ACTIVE, + TASK_STATUS_EXPIRED, + TASK_STATUS_PREPARING, + TASK_STATUS_RUNNING, + TASK_STATUS_WAITING, +) + +if TYPE_CHECKING: + from cylc.flow.task_proxy import TaskProxy + from cylc.flow.scheduler import Scheduler + + +def reset_outputs(itask: 'TaskProxy'): + """Undo the consequences of setting task outputs. + + This assumes you haven't completed the task. + """ + itask.state.outputs._completed = { + message: False + for message in itask.state.outputs._completed + } + itask.state_reset( + TASK_STATUS_WAITING, + is_queued=False, + is_held=False, + is_runahead=False, + ) + + +@pytest.mark.parametrize( + 'graph, completion_outputs', + [ + pytest.param( + 'a:x', + [{TASK_OUTPUT_SUCCEEDED, 'x'}], + id='1', + ), + pytest.param( + 'a\na:x\na:expired?', + [{TASK_OUTPUT_SUCCEEDED, 'x'}, {TASK_OUTPUT_EXPIRED}], + id='2', + ), + ], +) +async def test_task_completion( + flow, + scheduler, + start, + graph, + completion_outputs, + capcall, +): + """Ensure that task completion is watertight. + + Run through every possible permutation of outputs MINUS the ones that would + actually complete a task to ensure that task completion is correctly + handled. + + Note, the building and evaluation of completion expressions is also tested, + this is more of an end-to-end test to ensure everything is connected + properly. + """ + # prevent tasks from being removed from the pool when complete + capcall( + 'cylc.flow.task_pool.TaskPool.remove_if_complete' + ) + id_ = flow({ + 'scheduling': { + 'graph': {'R1': graph}, + }, + 'runtime': { + 'a': { + 'outputs': { + 'x': 'xxx', + }, + }, + }, + }) + schd = scheduler(id_) + all_outputs = { + # all built-in outputs + *TASK_OUTPUTS, + # all registered custom outputs + 'x' + # but not the finished psudo output + } - {TASK_OUTPUT_FINISHED} + + async with start(schd): + a1 = schd.pool.get_task(IntegerPoint('1'), 'a') + + # try every set of outputs that *shouldn't* complete the task + for combination in { + comb + # every possible combination of outputs + for _length in range(1, len(all_outputs)) + for comb in combinations(all_outputs, _length) + # that doesn't contain the outputs that would satisfy the task + if not any( + set(comb) & output_set == output_set + for output_set in completion_outputs + ) + }: + # set the combination of outputs + schd.pool.set_prereqs_and_outputs( + ['1/a'], + combination, + [], + ['1'], + ) + + # ensure these outputs do *not* complete the task + assert not a1.state.outputs.is_complete() + + # reset any changes + reset_outputs(a1) + + # now try the outputs that *should* satisfy the task + for combination in completion_outputs: + # set the combination of outputs + schd.pool.set_prereqs_and_outputs( + ['1/a'], + combination, + [], + ['1'], + ) + + # ensure the task *is* completed + assert a1.state.outputs.is_complete() + + # reset any changes + reset_outputs(a1) + + +async def test_expire_orthogonality(flow, scheduler, start): + """Ensure "expired?" does not infer "succeeded?". + + Asserts proposal point 2: + https://cylc.github.io/cylc-admin/proposal-optional-output-extension.html#proposal + """ + id_ = flow({ + 'scheduling': { + 'graph': { + 'R1': 'a:expire? => e' + }, + }, + }) + schd: 'Scheduler' = scheduler(id_, paused_start=False) + async with start(schd): + a_1 = schd.pool.get_task(IntegerPoint('1'), 'a') + + # wait for the task to submit + while not a_1.state(TASK_STATUS_WAITING, TASK_STATUS_PREPARING): + schd.release_queued_tasks() + + # NOTE: The submit number isn't presently incremented via this code + # pathway so we have to hack it here. If the task messages in this test + # get ignored because of some future change, then you can safely remove + # this line (it's not what this test is testing). + a_1.submit_num += 1 + + # tell the scheduler that the task *submit-failed* + schd.message_queue.put( + TaskMsg( + '1/a/01', + '2000-01-01T00:00:00+00', + 'INFO', + TaskEventsManager.EVENT_SUBMIT_FAILED + ), + ) + schd.process_queued_task_messages() + # ensure that the scheduler is stalled + assert not a_1.state.outputs.is_complete() + assert schd.pool.is_stalled() + + # tell the scheduler that the task *failed* + schd.message_queue.put( + TaskMsg( + '1/a/01', + '2000-01-01T00:00:00+00', + 'INFO', + TaskEventsManager.EVENT_FAILED, + ), + ) + schd.process_queued_task_messages() + # ensure that the scheduler is stalled + assert not a_1.state.outputs.is_complete() + assert schd.pool.is_stalled() + + # tell the scheduler that the task *expired* + schd.message_queue.put( + TaskMsg( + '1/a/01', + '2000-01-01T00:00:00+00', + 'INFO', + TaskEventsManager.EVENT_EXPIRED, + ), + ) + schd.process_queued_task_messages() + # ensure that the scheduler is *not* stalled + assert a_1.state.outputs.is_complete() + assert not schd.pool.is_stalled() + + +@pytest.fixture(scope='module') +def implicit_completion_config(mod_flow, mod_validate): + id_ = mod_flow({ + 'scheduling': { + 'graph': { + 'R1': ''' + a + + b? + + c:x + + d:x? + d:y? + d:z? + + e:x + e:y + e:z + + f? + f:x + + g:expired? + + h:succeeded? + h:expired? + + i:expired? + i:submitted + + j:expired? + j:submitted? + + k:submit-failed? + k:succeeded? + + l:expired? + l:submit-failed? + l:succeeded? + ''' + } + }, + 'runtime': { + 'root': { + 'outputs': { + 'x': 'xxx', + 'y': 'yyy', + 'z': 'zzz', + } + } + } + }) + return mod_validate(id_) + + +@pytest.mark.parametrize( + 'task, condition', + [ + pytest.param('a', 'succeeded', id='a'), + pytest.param('b', 'succeeded or failed', id='b'), + pytest.param('c', '(succeeded and x)', id='c'), + pytest.param('d', 'succeeded', id='d'), + pytest.param('e', '(succeeded and x and y and z)', id='e'), + pytest.param('f', '(x and succeeded) or failed', id='f'), + pytest.param('g', 'succeeded or expired', id='h'), + pytest.param('h', 'succeeded or failed or expired', id='h'), + pytest.param('i', '(submitted and succeeded) or expired', id='i'), + pytest.param('j', 'succeeded or submit_failed or expired', id='j'), + pytest.param('k', 'succeeded or failed or submit_failed', id='k'), + pytest.param( + 'l', 'succeeded or failed or submit_failed or expired', id='l' + ), + ], +) +async def test_implicit_completion_expression( + implicit_completion_config, + task, + condition, +): + """It should generate a completion expression from the graph. + + If no completion expression is provided in the runtime section, then it + should auto generate one inferring whether outputs are required or not from + the graph. + """ + completion_expression = get_completion_expression( + implicit_completion_config.taskdefs[task] + ) + assert completion_expression == condition + + +async def test_clock_expire_partially_satisfied_task( + flow, + scheduler, + start, +): + """Clock expire should take effect on a partially satisfied task. + + Tests proposal point 8: + https://cylc.github.io/cylc-admin/proposal-optional-output-extension.html#proposal + """ + id_ = flow({ + 'scheduling': { + 'initial cycle point': '2000', + 'runahead limit': 'P0', + 'special tasks': { + 'clock-expire': 'e', + }, + 'graph': { + 'P1D': ''' + # this prerequisite we will satisfy + a => e + + # this prerequisite we will leave unsatisfied creating a + # partially-satisfied task + b => e + ''' + }, + }, + }) + schd = scheduler(id_) + async with start(schd): + # satisfy one of the prerequisites + a = schd.pool.get_task(ISO8601Point('20000101T0000Z'), 'a') + assert a + schd.pool.spawn_on_output(a, TASK_OUTPUT_SUCCEEDED) + + # the task "e" should now be spawned + e = schd.pool.get_task(ISO8601Point('20000101T0000Z'), 'e') + assert e + + # check for clock-expired tasks + schd.pool.clock_expire_tasks() + + # the task should now be in the expired state + assert e.state(TASK_STATUS_EXPIRED) + + +async def test_clock_expiry( + flow, + scheduler, + start, +): + """Waiting tasks should be considered for clock-expiry. + + Tests two things: + + * Manually triggered tasks should not be considered for clock-expiry. + + Tests proposal point 10: + https://cylc.github.io/cylc-admin/proposal-optional-output-extension.html#proposal + + * Active tasks should not be considered for clock-expiry. + + Closes https://github.com/cylc/cylc-flow/issues/6025 + """ + id_ = flow({ + 'scheduling': { + 'initial cycle point': '2000', + 'runahead limit': 'P1', + 'special tasks': { + 'clock-expire': 'x' + }, + 'graph': { + 'P1Y': 'x' + }, + }, + }) + schd = scheduler(id_) + async with start(schd): + # the first task (waiting) + one = schd.pool.get_task(ISO8601Point('20000101T0000Z'), 'x') + assert one + + # the second task (preparing) + two = schd.pool.get_task(ISO8601Point('20010101T0000Z'), 'x') + assert two + two.state_reset(TASK_STATUS_PREPARING) + + # the third task (force-triggered) + schd.pool.force_trigger_tasks(['20100101T0000Z/x'], ['1']) + three = schd.pool.get_task(ISO8601Point('20100101T0000Z'), 'x') + assert three + + # check for expiry + schd.pool.clock_expire_tasks() + + # the first task should be expired (it was waiting) + assert one.state(TASK_STATUS_EXPIRED) + assert one.state.outputs.is_message_complete(TASK_OUTPUT_EXPIRED) + + # the second task should *not* be expired (it was active) + assert not two.state(TASK_STATUS_EXPIRED) + assert not two.state.outputs.is_message_complete(TASK_OUTPUT_EXPIRED) + + # the third task should *not* be expired (it was a manual submit) + assert not three.state(TASK_STATUS_EXPIRED) + assert not three.state.outputs.is_message_complete(TASK_OUTPUT_EXPIRED) + + +async def test_removed_taskdef( + flow, + scheduler, + start, +): + """It should handle tasks being removed from the config. + + If the config of an active task is removed from the config by restart / + reload, then we must provide a fallback completion expression, otherwise + the expression will be blank (task has no required or optional outputs). + + The fallback is to consider the outputs complete if *any* final output is + received. Since the task has been removed from the workflow its outputs + should be inconsequential. + + See: https://github.com/cylc/cylc-flow/issues/5057 + """ + id_ = flow({ + 'scheduling': { + 'graph': { + 'R1': 'a & z' + } + } + }) + + # start the workflow and mark the tasks as running + schd: 'Scheduler' = scheduler(id_) + async with start(schd): + for itask in schd.pool.get_tasks(): + itask.state_reset(TASK_STATUS_RUNNING) + assert itask.state.outputs._completion_expression == 'succeeded' + + # remove the task "z" from the config + id_ = flow({ + 'scheduling': { + 'graph': { + 'R1': 'a' + } + } + }, id_=id_) + + # restart the workflow + schd: 'Scheduler' = scheduler(id_) + async with start(schd): + # 1/a: + # * is still in the config + # * is should still have a sensible completion expression + # * its outputs should be incomplete if the task fails + a_1 = schd.pool.get_task(IntegerPoint('1'), 'a') + assert a_1 + assert a_1.state.outputs._completion_expression == 'succeeded' + a_1.state.outputs.set_message_complete(TASK_OUTPUT_FAILED) + assert not a_1.is_complete() + + # 1/z: + # * is no longer in the config + # * should have a blank completion expression + # * its outputs should be completed by any final output + z_1 = schd.pool.get_task(IntegerPoint('1'), 'z') + assert z_1 + assert z_1.state.outputs._completion_expression == '' + z_1.state.outputs.set_message_complete(TASK_OUTPUT_FAILED) + assert z_1.is_complete() diff --git a/tests/integration/test_simulation.py b/tests/integration/test_simulation.py index 72cf23996a4..66842fade25 100644 --- a/tests/integration/test_simulation.py +++ b/tests/integration/test_simulation.py @@ -184,11 +184,10 @@ def test_task_finishes(sim_time_check_setup, monkeytime, caplog): # After simulation time is up it Fails and records custom outputs: assert sim_time_check(schd.task_events_mgr, [fail_all_1066], '') is True - outputs = { - o[0]: (o[1], o[2]) for o in fail_all_1066.state.outputs.get_all()} - assert outputs['succeeded'] == ('succeeded', False) - assert outputs['foo'] == ('bar', True) - assert outputs['failed'] == ('failed', True) + outputs = fail_all_1066.state.outputs + assert outputs.is_message_complete('succeeded') is False + assert outputs.is_message_complete('bar') is True + assert outputs.is_message_complete('failed') is True def test_task_sped_up(sim_time_check_setup, monkeytime): @@ -334,7 +333,7 @@ async def test_settings_reload( one_1066 = schd.pool.get_tasks()[0] itask = run_simjob(schd, one_1066.point, 'one') - assert ['failed', 'failed', False] in itask.state.outputs.get_all() + assert itask.state.outputs.is_message_complete('failed') is False # Modify config as if reinstall had taken place: conf_file = Path(schd.workflow_run_dir) / 'flow.cylc' @@ -346,8 +345,7 @@ async def test_settings_reload( # Submit second psuedo-job and "run" to success: itask = run_simjob(schd, one_1066.point, 'one') - assert [ - 'succeeded', 'succeeded', True] in itask.state.outputs.get_all() + assert itask.state.outputs.is_message_complete('succeeded') is True async def test_settings_broadcast( diff --git a/tests/integration/test_task_pool.py b/tests/integration/test_task_pool.py index 43d6d50b520..4e72ed0bac4 100644 --- a/tests/integration/test_task_pool.py +++ b/tests/integration/test_task_pool.py @@ -1179,9 +1179,10 @@ async def test_detect_incomplete_tasks( start, log_filter, ): - """Finished but incomplete tasks should be retains as incomplete.""" - - final_task_states = { + """Finished but incomplete tasks should be retained as incomplete.""" + incomplete_final_task_states = { + # final task states that would leave a task with + # completion=succeeded incomplete TASK_STATUS_FAILED: TaskEventsManager.EVENT_FAILED, TASK_STATUS_EXPIRED: TaskEventsManager.EVENT_EXPIRED, TASK_STATUS_SUBMIT_FAILED: TaskEventsManager.EVENT_SUBMIT_FAILED @@ -1193,7 +1194,7 @@ async def test_detect_incomplete_tasks( 'scheduling': { 'graph': { # a workflow with one task for each of the final task states - 'R1': '\n'.join(final_task_states.keys()) + 'R1': '\n'.join(incomplete_final_task_states.keys()) } } }) @@ -1205,30 +1206,20 @@ async def test_detect_incomplete_tasks( # spawn the output corresponding to the task schd.pool.task_events_mgr.process_message( itask, 1, - final_task_states[itask.tdef.name] + incomplete_final_task_states[itask.tdef.name] ) # ensure that it is correctly identified as incomplete - assert itask.state.outputs.get_incomplete() - assert itask.state.outputs.is_incomplete() - if itask.tdef.name == TASK_STATUS_EXPIRED: - assert log_filter( - log, - contains=f"[{itask}] removed from active task pool: expired" - ) - # the task should have been removed - assert itask not in schd.pool.get_tasks() - else: - assert log_filter( - log, - contains=( - f"[{itask}] did not complete " - "required outputs:" - ) - ) - # the task should not have been removed - assert itask in schd.pool.get_tasks() - + assert not itask.state.outputs.is_complete() + assert log_filter( + log, + contains=( + f"[{itask}] did not complete the required outputs:" + ), + ) + # the task should not have been removed + assert itask in schd.pool.get_tasks() + async def test_future_trigger_final_point( flow, scheduler, @@ -1289,7 +1280,7 @@ async def test_set_failed_complete( assert log_filter( log, regex="1/one.* setting implied output: started") assert log_filter( - log, regex="failed.* did not complete required outputs") + log, regex="failed.* did not complete the required outputs") # Set failed task complete via default "set" args. schd.pool.set_prereqs_and_outputs([one.identity], None, None, ['all']) diff --git a/tests/integration/tui/screenshots/test_show.success.html b/tests/integration/tui/screenshots/test_show.success.html index 5f9c192b04b..b392130363b 100644 --- a/tests/integration/tui/screenshots/test_show.success.html +++ b/tests/integration/tui/screenshots/test_show.success.html @@ -8,7 +8,6 @@ - ──────────────────────────────────────────────── title: Foo description: The first metasyntactic @@ -16,13 +15,15 @@ URL: (not given) state: waiting prerequisites: (None) - outputs: ('-': not completed) - - 1/foo expired - - 1/foo submitted - - 1/foo submit-failed - - 1/foo started - - 1/foo succeeded - - 1/foo failed + outputs: ('⨯': not completed) + ⨯ 1/foo expired + ⨯ 1/foo submitted + ⨯ 1/foo submit-failed + ⨯ 1/foo started + ⨯ 1/foo succeeded + ⨯ 1/foo failed + output completion: incomplete + ⨯ ⦙ succeeded q to close @@ -35,7 +36,6 @@ - quit: q help: h context: enter tree: - ← + → navigation: ↑ ↓ ↥ ↧ Home End filter tasks: T f s r R filter workflows: W E p \ No newline at end of file diff --git a/tests/integration/validate/test_outputs.py b/tests/integration/validate/test_outputs.py index 5675372a09f..b26bce529fb 100644 --- a/tests/integration/validate/test_outputs.py +++ b/tests/integration/validate/test_outputs.py @@ -32,7 +32,6 @@ 'foo', 'foo-bar', 'foo_bar', - 'foo.bar', '0foo0', '123', ], @@ -152,7 +151,7 @@ def test_messages(messages, valid, flow, validate): 'runtime': { 'foo': { 'outputs': { - str(random()): message + str(random())[2:]: message for message in messages } } @@ -164,3 +163,179 @@ def test_messages(messages, valid, flow, validate): else: with pytest.raises(WorkflowConfigError): val() + + +@pytest.mark.parametrize( + 'graph, expression, message', [ + pytest.param( + 'foo:x', + 'succeeded and (x or y)', + r'foo:x is required in the graph.*' + r' but optional in the completion expression', + id='required-in-graph-optional-in-completion', + ), + pytest.param( + 'foo:x?', + 'succeeded and x', + r'foo:x is optional in the graph.*' + r' but required in the completion expression', + id='optional-in-graph-required-in-completion', + ), + pytest.param( + 'foo:x', + 'succeeded', + 'foo:x is required in the graph.*' + 'but not referenced in the completion expression', + id='required-in-graph-not-referenced-in-completion', + ), + pytest.param( + # tests proposal point 4: + # https://cylc.github.io/cylc-admin/proposal-optional-output-extension.html#proposal + 'foo:expired', + 'succeeded', + 'foo:expired must be optional', + id='expire-required-in-graph', + ), + pytest.param( + 'foo:expired?', + 'succeeded', + 'foo:expired is permitted in the graph.*' + '\nTry: completion = "succeeded or expired"', + id='expire-optional-in-graph-but-not-used-in-completion' + ), + pytest.param( + # tests part of proposal point 5: + # https://cylc.github.io/cylc-admin/proposal-optional-output-extension.html#proposal + 'foo', + 'finished and x', + '"finished" output cannot be used in completion expressions', + id='finished-output-used-in-completion-expression', + ), + pytest.param( + # https://github.com/cylc/cylc-flow/pull/6046#issuecomment-2059266086 + 'foo?', + 'x and failed', + 'foo:failed is optional in the graph.*' + 'but required in the completion expression', + id='failed-implicitly-optional-in-graph-required-in-completion', + ), + pytest.param( + 'foo', + '(succeed and x) or failed', + 'Use "succeeded" not "succeed" in completion expressions', + id='alt-compvar1', + ), + pytest.param( + 'foo? & foo:submitted?', + 'submit_fail or succeeded', + 'Use "submit_failed" not "submit_fail" in completion expressions', + id='alt-compvar2', + ), + pytest.param( + 'foo? & foo:submitted?', + 'submit-failed or succeeded', + 'Use "submit_failed" rather than "submit-failed"' + ' in completion expressions.', + id='submit-failed used in completion expression', + ), + pytest.param( + 'foo:file-1', + 'succeeded or file-1', + 'Replace hyphens with underscores in task outputs when' + ' used in completion expressions.', + id='Hyphen used in completion expression', + ), + pytest.param( + 'foo:x', + 'not succeeded or x', + 'Error in .*' + '\nInvalid expression', + id='Non-whitelisted syntax used in completion expression', + ), + ] +) +def test_completion_expression_invalid( + flow, + validate, + graph, + expression, + message, +): + """It should ensure the completion is logically consistent with the graph. + + Tests proposal point 5: + https://cylc.github.io/cylc-admin/proposal-optional-output-extension.html#proposal + """ + id_ = flow({ + 'scheduling': { + 'graph': {'R1': graph}, + }, + 'runtime': { + 'foo': { + 'completion': expression, + 'outputs': { + 'x': 'xxx', + 'y': 'yyy', + }, + }, + }, + }) + with pytest.raises(WorkflowConfigError, match=message): + validate(id_) + + +@pytest.mark.parametrize( + 'graph, expression', [ + ('foo', 'succeeded and (x or y or z)'), + ('foo?', 'succeeded and (x or y or z) or failed or expired'), + ('foo', '(succeeded and x) or (expired and y)'), + ] +) +def test_completion_expression_valid( + flow, + validate, + graph, + expression, +): + id_ = flow({ + 'scheduling': { + 'graph': {'R1': graph}, + }, + 'runtime': { + 'foo': { + 'completion': expression, + 'outputs': { + 'x': 'xxx', + 'y': 'yyy', + 'z': 'zzz', + }, + }, + }, + }) + validate(id_) + + +def test_completion_expression_cylc7_compat( + flow, + validate, + monkeypatch +): + id_ = flow({ + 'scheduling': { + 'graph': {'R1': 'foo'}, + }, + 'runtime': { + 'foo': { + 'completion': 'succeeded and x', + 'outputs': { + 'x': 'xxx', + }, + }, + }, + }) + monkeypatch.setattr('cylc.flow.flags.cylc7_back_compat', True) + with pytest.raises( + WorkflowConfigError, + match="completion cannot be used in Cylc 7 compatibility mode." + ): + validate(id_) diff --git a/tests/unit/scripts/test_config.py b/tests/unit/scripts/test_config.py index 55d1b69dcdd..a7f67c4da69 100644 --- a/tests/unit/scripts/test_config.py +++ b/tests/unit/scripts/test_config.py @@ -239,4 +239,6 @@ def test_cylc_config_xtriggers(tmp_run_dir, capsys: pytest.CaptureFixture): R1 = @rotund => foo [runtime] [[root]] + [[foo]] + completion = succeeded """) diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py index 98d9fc2f4ce..a69ab176412 100644 --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -1317,28 +1317,6 @@ def test_implicit_success_required(tmp_flow_config, graph): assert cfg.taskdefs['foo'].outputs[TASK_OUTPUT_SUCCEEDED][1] -@pytest.mark.parametrize( - 'graph', - [ - "foo:submit? => bar", - "foo:submit-fail? => bar", - ] -) -def test_success_after_optional_submit(tmp_flow_config, graph): - """Check foo:succeed is not required if foo:submit is optional.""" - id_ = 'blargh' - flow_file = tmp_flow_config(id_, f""" - [scheduling] - [[graph]] - R1 = {graph} - [runtime] - [[bar]] - [[foo]] - """) - cfg = WorkflowConfig(workflow=id_, fpath=flow_file, options=None) - assert not cfg.taskdefs['foo'].outputs[TASK_OUTPUT_SUCCEEDED][1] - - @pytest.mark.parametrize( 'allow_implicit_tasks', [ diff --git a/tests/unit/test_graph_parser.py b/tests/unit/test_graph_parser.py index 69726a0730d..84a8e4611fd 100644 --- a/tests/unit/test_graph_parser.py +++ b/tests/unit/test_graph_parser.py @@ -735,7 +735,6 @@ def test_task_optional_outputs(): ('succeed', TASK_OUTPUT_SUCCEEDED), ('fail', TASK_OUTPUT_FAILED), ('submit', TASK_OUTPUT_SUBMITTED), - ('submit-fail', TASK_OUTPUT_SUBMIT_FAILED), ] ) def test_family_optional_outputs(qual, task_output): @@ -766,6 +765,26 @@ def test_family_optional_outputs(qual, task_output): assert gp.task_output_opt[(member, task_output)][0] == optional +def test_cannot_be_required(): + """Is should not allow :expired or :submit-failed to be required. + + See proposal point 4: + https://cylc.github.io/cylc-admin/proposal-optional-output-extension.html#proposal + """ + gp = GraphParser({}) + + # outputs can be optional + gp.parse_graph('a:expired? => b') + gp.parse_graph('a:submit-failed? => b') + + # but cannot be required + with pytest.raises(GraphParseError, match='must be optional'): + gp.parse_graph('a:expired => b') + with pytest.raises(GraphParseError, match='must be optional'): + gp.parse_graph('a:submit-failed => b') + + + @pytest.mark.parametrize( 'graph, error', [ diff --git a/tests/unit/test_subprocpool.py b/tests/unit/test_subprocpool.py index c72ffc4d094..7da14e9e73c 100644 --- a/tests/unit/test_subprocpool.py +++ b/tests/unit/test_subprocpool.py @@ -30,6 +30,13 @@ from cylc.flow.task_events_mgr import TaskJobLogsRetrieveContext from cylc.flow.subprocctx import SubProcContext from cylc.flow.subprocpool import SubProcPool, _XTRIG_FUNC_CACHE, _XTRIG_MOD_CACHE, get_xtrig_func +from cylc.flow.task_outputs import ( + TASK_OUTPUT_SUBMITTED, + TASK_OUTPUT_SUBMIT_FAILED, + TASK_OUTPUT_SUCCEEDED, + TASK_OUTPUT_FAILED, + TASK_OUTPUT_EXPIRED, +) from cylc.flow.task_proxy import TaskProxy @@ -316,8 +323,7 @@ def test__run_command_exit_add_to_badhosts(mock_ctx): def test__run_command_exit_add_to_badhosts_log(caplog, mock_ctx): - """It gets platform name from the callback args. - """ + """It gets platform name from the callback args.""" badhosts = {'foo', 'bar'} SubProcPool._run_command_exit( mock_ctx(cmd=['ssh']), @@ -330,7 +336,11 @@ def test__run_command_exit_add_to_badhosts_log(caplog, mock_ctx): external_triggers=[], xtrig_labels={}, expiration_offset=None, outputs={ - 'submitted': [None, None], 'submit-failed': [None, None] + TASK_OUTPUT_SUBMITTED: [None, None], + TASK_OUTPUT_SUBMIT_FAILED: [None, None], + TASK_OUTPUT_SUCCEEDED: [None, None], + TASK_OUTPUT_FAILED: [None, None], + TASK_OUTPUT_EXPIRED: [None, None], }, graph_children={}, rtconfig={'platform': 'foo'} diff --git a/tests/unit/test_task_outputs.py b/tests/unit/test_task_outputs.py index 4f61e696fbc..70a297edff5 100644 --- a/tests/unit/test_task_outputs.py +++ b/tests/unit/test_task_outputs.py @@ -13,30 +13,284 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import random -import unittest - -from cylc.flow.task_outputs import TaskOutputs - - -class TestMessageSorting(unittest.TestCase): - - TEST_MESSAGES = [ - ['expired', 'expired', False], - ['submitted', 'submitted', False], - ['submit-failed', 'submit-failed', False], - ['started', 'started', False], - ['succeeded', 'succeeded', False], - ['failed', 'failed', False], - [None, None, False], - ['foo', 'bar', False], - ['foot', 'bart', False], - # NOTE: [None, 'bar', False] is unstable under Python2 - ] - - def test_sorting(self): - messages = list(self.TEST_MESSAGES) - for _ in range(5): - random.shuffle(messages) - output = sorted(messages, key=TaskOutputs.msg_sort_key) - self.assertEqual(output, self.TEST_MESSAGES, output) + +from types import SimpleNamespace + +import pytest + +from cylc.flow.task_outputs import ( + TASK_OUTPUTS, + TASK_OUTPUT_EXPIRED, + TASK_OUTPUT_FAILED, + TASK_OUTPUT_SUBMITTED, + TASK_OUTPUT_SUBMIT_FAILED, + TASK_OUTPUT_SUCCEEDED, + TaskOutputs, + get_completion_expression, + get_trigger_completion_variable_maps, +) +from cylc.flow.util import sstrip + + +def tdef(required, optional, completion=None): + """Stub a task definition. + + Args: + required: Collection of required outputs. + optional: Collection of optional outputs. + completion: User defined execution completion expression. + + """ + return SimpleNamespace( + rtconfig={ + 'completion': completion, + }, + outputs={ + output: ( + output, + ( + # output is required: + True if output in required + # output is optional: + else False if output in optional + # output is ambiguous (i.e. not referenced in graph): + else None + ) + ) + for output in set(TASK_OUTPUTS) | set(required) | set(optional) + }, + ) + + +def test_completion_implicit(): + """It should generate a completion expression when none is provided. + + The outputs should be considered "complete" according to the logic in + proposal point 5: + https://cylc.github.io/cylc-admin/proposal-optional-output-extension.html#proposal + """ + # one required output - succeeded + outputs = TaskOutputs(tdef([TASK_OUTPUT_SUCCEEDED], [])) + + # the completion expression should only contain the one required output + assert outputs._completion_expression == 'succeeded' + # the outputs should be incomplete - it hasn't run yet + assert outputs.is_complete() is False + + # set the submit-failed output + outputs.set_message_complete(TASK_OUTPUT_SUBMIT_FAILED) + # the outputs should be incomplete - submited-failed is a "final" output + assert outputs.is_complete() is False + + # set the submitted and succeeded outputs + outputs.set_message_complete(TASK_OUTPUT_SUBMITTED) + outputs.set_message_complete(TASK_OUTPUT_SUCCEEDED) + # the outputs should be complete - it has run an succeedd + assert outputs.is_complete() is True + + # set the expired output + outputs.set_message_complete(TASK_OUTPUT_EXPIRED) + # the outputs should still be complete - it has run and succeeded + assert outputs.is_complete() is True + + +def test_completion_explicit(): + """It should use the provided completion expression. + + The outputs should be considered "complete" according to the logic in + proposal point 5: + https://cylc.github.io/cylc-admin/proposal-optional-output-extension.html#proposal + """ + outputs = TaskOutputs(tdef( + # no required outputs + [], + # four optional outputs + [ + TASK_OUTPUT_SUCCEEDED, + TASK_OUTPUT_FAILED, + 'x', + 'y', + ], + # one pair must be satisfied for the outputs to be complete + completion='(succeeded and x) or (failed and y)', + )) + + # the outputs should be incomplete - it hasn't run yet + assert outputs.is_complete() is False + + # set the succeeded and failed outputs + outputs.set_message_complete(TASK_OUTPUT_SUCCEEDED) + outputs.set_message_complete(TASK_OUTPUT_FAILED) + + # the task should be incomplete - it has executed but the completion + # expression is not satisfied + assert outputs.is_complete() is False + + # satisfy the (failed and y) pair + outputs.set_message_complete('y') + assert outputs.is_complete() is True + + # satisfy the (succeeded and x) pair + outputs._completed['y'] = False + outputs.set_message_complete('x') + assert outputs.is_complete() is True + + +@pytest.mark.parametrize( + 'required, optional, expression', [ + pytest.param( + {TASK_OUTPUT_SUCCEEDED}, + [], + 'succeeded', + id='0', + ), + pytest.param( + {TASK_OUTPUT_SUCCEEDED, 'x'}, + [], + '(succeeded and x)', + id='1', + ), + pytest.param( + [], + {TASK_OUTPUT_SUCCEEDED}, + 'succeeded or failed', + id='2', + ), + pytest.param( + {TASK_OUTPUT_SUCCEEDED}, + {TASK_OUTPUT_EXPIRED}, + 'succeeded or expired', + id='3', + ), + pytest.param( + [], + {TASK_OUTPUT_SUCCEEDED, TASK_OUTPUT_EXPIRED}, + 'succeeded or failed or expired', + id='4', + ), + pytest.param( + {TASK_OUTPUT_SUCCEEDED}, + {TASK_OUTPUT_EXPIRED, TASK_OUTPUT_SUBMITTED}, + 'succeeded or submit_failed or expired', + id='5', + ), + pytest.param( + {TASK_OUTPUT_SUCCEEDED, TASK_OUTPUT_SUBMITTED}, + {TASK_OUTPUT_EXPIRED}, + '(submitted and succeeded) or expired', + id='6', + ), + pytest.param( + [], + {TASK_OUTPUT_SUCCEEDED, TASK_OUTPUT_SUBMIT_FAILED}, + 'succeeded or failed or submit_failed', + id='7', + ), + pytest.param( + {'x'}, + { + TASK_OUTPUT_SUCCEEDED, + TASK_OUTPUT_SUBMIT_FAILED, + TASK_OUTPUT_EXPIRED, + }, + '(x and succeeded) or failed or submit_failed or expired', + id='8', + ), + ], +) +def test_get_completion_expression_implicit(required, optional, expression): + """It should generate a completion expression if none is provided.""" + assert get_completion_expression(tdef(required, optional)) == expression + + +def test_get_completion_expression_explicit(): + """If a completion expression is used, it should be used unmodified.""" + assert get_completion_expression(tdef( + {'x', 'y'}, + {TASK_OUTPUT_SUCCEEDED, TASK_OUTPUT_FAILED, TASK_OUTPUT_EXPIRED}, + '((failed and x) or (succeeded and y)) or expired' + )) == '((failed and x) or (succeeded and y)) or expired' + + +def test_format_completion_status(): + outputs = TaskOutputs( + tdef( + {TASK_OUTPUT_SUCCEEDED, 'x', 'y'}, + {TASK_OUTPUT_EXPIRED}, + ) + ) + assert outputs.format_completion_status( + indent=2, gutter=2 + ) == ' ' + sstrip( + ''' + ⦙ ( + ⨯ ⦙ succeeded + ⨯ ⦙ and x + ⨯ ⦙ and y + ⦙ ) + ⨯ ⦙ or expired + ''' + ) + outputs.set_message_complete('succeeded') + outputs.set_message_complete('x') + assert outputs.format_completion_status( + indent=2, gutter=2 + ) == ' ' + sstrip( + ''' + ⦙ ( + ✓ ⦙ succeeded + ✓ ⦙ and x + ⨯ ⦙ and y + ⦙ ) + ⨯ ⦙ or expired + ''' + ) + + +def test_iter_required_outputs(): + """It should yield required outputs only.""" + # this task has three required outputs and one optional output + outputs = TaskOutputs( + tdef( + {TASK_OUTPUT_SUCCEEDED, 'x', 'y'}, + {'z'} + ) + ) + assert set(outputs.iter_required_messages()) == { + TASK_OUTPUT_SUCCEEDED, + 'x', + 'y', + } + + # this task does not have any required outputs (besides the implicitly + # required submitted/started outputs) + outputs = TaskOutputs( + tdef( + # Note: validation should prevent this at the config level + {TASK_OUTPUT_SUCCEEDED, 'x', 'y'}, + {TASK_OUTPUT_FAILED}, # task may fail + ) + ) + assert set(outputs.iter_required_messages()) == set() + + # the preconditions expiry/submitted are excluded from this logic when + # defined as optional + outputs = TaskOutputs( + tdef( + {TASK_OUTPUT_SUCCEEDED, 'x', 'y'}, + {TASK_OUTPUT_EXPIRED}, # task may expire + ) + ) + assert outputs._completion_expression == '(succeeded and x and y) or expired' + assert set(outputs.iter_required_messages()) == { + TASK_OUTPUT_SUCCEEDED, + 'x', + 'y', + } + + +def test_get_trigger_completion_variable_maps(): + """It should return a bi-map of triggers to compvars.""" + t2c, c2t = get_trigger_completion_variable_maps(('a', 'b-b', 'c-c-c')) + assert t2c == {'a': 'a', 'b-b': 'b_b', 'c-c-c': 'c_c_c'} + assert c2t == {'a': 'a', 'b_b': 'b-b', 'c_c_c': 'c-c-c'} diff --git a/tests/unit/test_xtrigger_mgr.py b/tests/unit/test_xtrigger_mgr.py index 3cfee363d15..5192a7d3dd8 100644 --- a/tests/unit/test_xtrigger_mgr.py +++ b/tests/unit/test_xtrigger_mgr.py @@ -154,10 +154,10 @@ def test_housekeeping_with_xtrigger_satisfied(xtrigger_mgr): xtrig.out = "[\"True\", {\"name\": \"Yossarian\"}]" tdef = TaskDef( name="foo", - rtcfg=None, + rtcfg={'completion': None}, run_mode="live", start_point=1, - initial_point=1 + initial_point=1, ) init() sequence = ISO8601Sequence('P1D', '2019') @@ -197,7 +197,7 @@ def test__call_xtriggers_async(xtrigger_mgr): # create a task tdef = TaskDef( name="foo", - rtcfg=None, + rtcfg={'completion': None}, run_mode="live", start_point=1, initial_point=1