]]
+ [[foo, bar]]
+ script = false
+ [[setter]]
+ script = """
+ # wait for foo and bar to fail.
+ for TASK in foo bar
+ do
+ cylc workflow-state \
+ --max-polls=10 \
+ --interval=1 \
+ --task=$TASK \
+ --point=${CYLC_TASK_CYCLE_POINT} \
+ --status=failed \
+ $CYLC_WORKFLOW_ID
+ done
+ # set foo succeeded (via --output)
+ cylc set -o succeeded $CYLC_WORKFLOW_ID//$CYLC_TASK_CYCLE_POINT/foo
+ # set bar succeeded (via default)
+ cylc set $CYLC_WORKFLOW_ID//$CYLC_TASK_CYCLE_POINT/bar
+ """
diff --git a/tests/functional/cylc-set/00-set-succeeded/reference.log b/tests/functional/cylc-set/00-set-succeeded/reference.log
new file mode 100644
index 00000000000..26468845a5c
--- /dev/null
+++ b/tests/functional/cylc-set/00-set-succeeded/reference.log
@@ -0,0 +1,5 @@
+1/setter -triggered off [] in flow 1
+1/foo -triggered off [] in flow 1
+1/bar -triggered off [] in flow 1
+1/post_m1 -triggered off ['1/bar', '1/foo'] in flow 1
+1/post_m2 -triggered off ['1/bar', '1/foo'] in flow 1
diff --git a/tests/functional/cylc-set/01-off-flow-pre.t b/tests/functional/cylc-set/01-off-flow-pre.t
new file mode 100644
index 00000000000..06a1e864ce3
--- /dev/null
+++ b/tests/functional/cylc-set/01-off-flow-pre.t
@@ -0,0 +1,37 @@
+#!/usr/bin/env bash
+# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
+# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#-------------------------------------------------------------------------------
+#
+# "cylc set" proposal examples: 2 - Set off-flow prerequisites to prevent a new flow from stalling.
+# https://cylc.github.io/cylc-admin/proposal-cylc-set.html#2-set-off-flow-prerequisites-to-prep-for-a-new-flow
+
+. "$(dirname "$0")/test_header"
+set_test_number 8
+
+install_and_validate
+reftest_run
+
+grep_workflow_log_ok "${TEST_NAME_BASE}-ab" '1/a does not depend on "1/b_cold:succeeded"'
+grep_workflow_log_ok "${TEST_NAME_BASE}-ac" '1/a does not depend on "1/c_cold:succeeded"'
+
+grep_workflow_log_ok "${TEST_NAME_BASE}-ba" '1/b does not depend on "1/a_cold:succeeded"'
+grep_workflow_log_ok "${TEST_NAME_BASE}-bc" '1/b does not depend on "1/c_cold:succeeded"'
+
+grep_workflow_log_ok "${TEST_NAME_BASE}-ca" '1/c does not depend on "1/a_cold:succeeded"'
+grep_workflow_log_ok "${TEST_NAME_BASE}-cb" '1/c does not depend on "1/b_cold:succeeded"'
+
+purge
diff --git a/tests/functional/cylc-set/01-off-flow-pre/flow.cylc b/tests/functional/cylc-set/01-off-flow-pre/flow.cylc
new file mode 100644
index 00000000000..5c2ae5b97c4
--- /dev/null
+++ b/tests/functional/cylc-set/01-off-flow-pre/flow.cylc
@@ -0,0 +1,35 @@
+# start a new flow after setting off-flow prerequites to avoid stall.
+
+[scheduler]
+ [[events]]
+ stall timeout = PT0S
+ abort on stall timeout = True
+ inactivity timeout = PT30S
+ abort on inactivity timeout = True
+
+[scheduling]
+ [[graph]]
+ R1 = """
+ # the tasks we want the flow to run
+ a => b => c => reflow
+ # the off-flow prerequisites
+ a_cold => a
+ b_cold => b
+ c_cold => c
+ """
+[runtime]
+ [[a, b, c]]
+ [[a_cold, b_cold, c_cold]]
+ [[reflow]]
+ script = """
+ if (( CYLC_TASK_SUBMIT_NUMBER == 1 )); then
+ # set off-flow prerequisites (and trigger 1/a)
+ cylc set --flow=new \
+ --pre=1/a_cold:succeeded \
+ --pre=1/b_cold:succeeded \
+ --pre=1/c_cold:succeeded \
+ ${CYLC_WORKFLOW_ID}//1/a \
+ ${CYLC_WORKFLOW_ID}//1/b \
+ ${CYLC_WORKFLOW_ID}//1/c
+ fi
+ """
diff --git a/tests/functional/cylc-set/01-off-flow-pre/reference.log b/tests/functional/cylc-set/01-off-flow-pre/reference.log
new file mode 100644
index 00000000000..07c980ca981
--- /dev/null
+++ b/tests/functional/cylc-set/01-off-flow-pre/reference.log
@@ -0,0 +1,11 @@
+1/c_cold -triggered off [] in flow 1
+1/a_cold -triggered off [] in flow 1
+1/b_cold -triggered off [] in flow 1
+1/a -triggered off ['1/a_cold'] in flow 1
+1/b -triggered off ['1/a', '1/b_cold'] in flow 1
+1/c -triggered off ['1/b', '1/c_cold'] in flow 1
+1/reflow -triggered off ['1/c'] in flow 1
+1/a -triggered off ['1/a_cold'] in flow 2
+1/b -triggered off ['1/a', '1/b_cold'] in flow 2
+1/c -triggered off ['1/b', '1/c_cold'] in flow 2
+1/reflow -triggered off ['1/c'] in flow 2
diff --git a/tests/functional/cylc-set/02-off-flow-out.t b/tests/functional/cylc-set/02-off-flow-out.t
new file mode 100644
index 00000000000..a18d3e61fbf
--- /dev/null
+++ b/tests/functional/cylc-set/02-off-flow-out.t
@@ -0,0 +1,44 @@
+#!/usr/bin/env bash
+# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
+# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#-------------------------------------------------------------------------------
+
+# "cylc set" proposal examples: 2 - Set off-flow outputs to prevent a new flow from stalling.
+# https://cylc.github.io/cylc-admin/proposal-cylc-set.html#2-set-off-flow-prerequisites-to-prep-for-a-new-flow
+
+. "$(dirname "$0")/test_header"
+set_test_number 11
+
+install_and_validate
+reftest_run
+
+# Check that we set:
+# - all the required outputs of a_cold
+# - the requested and implied outputs of b_cold and c_cold
+
+grep_workflow_log_ok "${TEST_NAME_BASE}-grep-a1" '1/a_cold.* setting implied output: submitted'
+grep_workflow_log_ok "${TEST_NAME_BASE}-grep-a2" '1/a_cold.* setting implied output: started'
+grep_workflow_log_ok "${TEST_NAME_BASE}-grep-a3" '1/a_cold.* task completed'
+
+grep_workflow_log_ok "${TEST_NAME_BASE}-grep-a1" '1/b_cold.* setting implied output: submitted'
+grep_workflow_log_ok "${TEST_NAME_BASE}-grep-a2" '1/b_cold.* setting implied output: started'
+grep_workflow_log_ok "${TEST_NAME_BASE}-grep-b3" '1/b_cold.* task completed'
+
+grep_workflow_log_ok "${TEST_NAME_BASE}-grep-a1" '1/c_cold.* setting implied output: submitted'
+grep_workflow_log_ok "${TEST_NAME_BASE}-grep-a2" '1/c_cold.* setting implied output: started'
+grep_workflow_log_ok "${TEST_NAME_BASE}-grep-c3" '1/c_cold.* task completed'
+
+purge
diff --git a/tests/functional/cylc-set/02-off-flow-out/flow.cylc b/tests/functional/cylc-set/02-off-flow-out/flow.cylc
new file mode 100644
index 00000000000..f8f2001f077
--- /dev/null
+++ b/tests/functional/cylc-set/02-off-flow-out/flow.cylc
@@ -0,0 +1,32 @@
+# start a new flow after setting off-flow outputs to avoid stall.
+
+[scheduler]
+ [[events]]
+ stall timeout = PT0S
+ abort on stall timeout = True
+ inactivity timeout = PT30S
+ abort on inactivity timeout = True
+
+[scheduling]
+ [[graph]]
+ R1 = """
+ # the tasks we want the flow to run
+ a => b => c => reflow
+ # the off-flow prerequisites
+ a_cold => a
+ b_cold => b
+ c_cold => c
+ """
+[runtime]
+ [[a, b, c]]
+ [[a_cold, b_cold, c_cold]]
+ [[reflow]]
+ script = """
+ if (( CYLC_TASK_SUBMIT_NUMBER == 1 )); then
+ # set off-flow outputs of x_cold
+ cylc set --flow=new \
+ ${CYLC_WORKFLOW_ID}//1/a_cold \
+ ${CYLC_WORKFLOW_ID}//1/b_cold \
+ ${CYLC_WORKFLOW_ID}//1/c_cold
+ fi
+ """
diff --git a/tests/functional/cylc-set/02-off-flow-out/reference.log b/tests/functional/cylc-set/02-off-flow-out/reference.log
new file mode 100644
index 00000000000..07c980ca981
--- /dev/null
+++ b/tests/functional/cylc-set/02-off-flow-out/reference.log
@@ -0,0 +1,11 @@
+1/c_cold -triggered off [] in flow 1
+1/a_cold -triggered off [] in flow 1
+1/b_cold -triggered off [] in flow 1
+1/a -triggered off ['1/a_cold'] in flow 1
+1/b -triggered off ['1/a', '1/b_cold'] in flow 1
+1/c -triggered off ['1/b', '1/c_cold'] in flow 1
+1/reflow -triggered off ['1/c'] in flow 1
+1/a -triggered off ['1/a_cold'] in flow 2
+1/b -triggered off ['1/a', '1/b_cold'] in flow 2
+1/c -triggered off ['1/b', '1/c_cold'] in flow 2
+1/reflow -triggered off ['1/c'] in flow 2
diff --git a/tests/functional/cylc-set/03-set-failed.t b/tests/functional/cylc-set/03-set-failed.t
new file mode 100644
index 00000000000..1910e5b3120
--- /dev/null
+++ b/tests/functional/cylc-set/03-set-failed.t
@@ -0,0 +1,64 @@
+#!/usr/bin/env bash
+# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
+# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#-------------------------------------------------------------------------------
+
+# "cylc set" proposal examples: 4 -check that we can set a dead orphaned job to failed.
+# https://cylc.github.io/cylc-admin/proposal-cylc-set.html#4-set-jobs-to-failed-when-a-job-platform-is-known-to-be-down
+
+. "$(dirname "$0")/test_header"
+set_test_number 3
+
+install_and_validate
+
+run_ok play-it cylc play --debug "${WORKFLOW_NAME}"
+
+poll_grep_workflow_log -E "1/foo.* \(internal\)submitted"
+
+cylc set -o failed "${WORKFLOW_NAME}//1/foo"
+
+
+# Check the log for:
+# - set completion message
+# - implied outputs reported as already completed
+
+poll_grep_workflow_log -E "1/foo.* => failed"
+poll_grep_workflow_log -E "1/foo.* did not complete required outputs"
+
+cylc stop --now --now --interval=2 --max-polls=5 "${WORKFLOW_NAME}"
+
+# Check the DB records all the outputs.
+sqlite3 ~/cylc-run/"${WORKFLOW_NAME}"/log/db \
+ "SELECT outputs FROM task_outputs WHERE name is \"foo\"" > db-foo.1
+
+# Json string list of outputs from the db may not be ordered correctly.
+python3 - << __END__ > db-foo.2
+import json
+with open("db-foo.1", 'r') as f:
+ print(
+ ','.join(
+ sorted(
+ json.load(f)
+ )
+ )
+ )
+__END__
+
+cmp_ok "db-foo.2" - << __OUT__
+failed,started,submitted
+__OUT__
+
+purge
diff --git a/tests/functional/cylc-set/03-set-failed/flow.cylc b/tests/functional/cylc-set/03-set-failed/flow.cylc
new file mode 100644
index 00000000000..9d7514ccb83
--- /dev/null
+++ b/tests/functional/cylc-set/03-set-failed/flow.cylc
@@ -0,0 +1,18 @@
+# A single task that dies silently, requiring set to failed
+
+[scheduler]
+ [[events]]
+ inactivity timeout = PT20S
+ abort on inactivity timeout = True
+
+[scheduling]
+ [[graph]]
+ R1 = "foo"
+
+[runtime]
+ [[foo]]
+ init-script = cylc__job__disable_fail_signals
+ script = """
+ cylc__job__wait_cylc_message_started
+ exit 1
+ """
diff --git a/tests/functional/cylc-set/04-switch.t b/tests/functional/cylc-set/04-switch.t
new file mode 100644
index 00000000000..e5134d32504
--- /dev/null
+++ b/tests/functional/cylc-set/04-switch.t
@@ -0,0 +1,65 @@
+#!/usr/bin/env bash
+# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
+# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#-------------------------------------------------------------------------------
+
+# "cylc set" proposal examples: 5 - Set and complete a future switch task with the "--wait" flag
+# https://cylc.github.io/cylc-admin/proposal-cylc-set.html#5-set-switch-tasks-at-an-optional-branch-point-to-direct-the-future-flow
+
+. "$(dirname "$0")/test_header"
+set_test_number 5
+
+install_and_validate
+reftest_run
+
+# The branch-point task foo should be recorded as succeeded.
+
+sqlite3 ~/cylc-run/"${WORKFLOW_NAME}"/log/db \
+ "SELECT status FROM task_states WHERE name is \"foo\"" > db-foo.2
+
+cmp_ok "db-foo.2" - << __OUT__
+succeeded
+__OUT__
+
+# the outputs of foo should be recorded as:
+# a, succeeded
+# and the implied outputs (of succeeded) as well:
+# submitted, started
+
+sqlite3 ~/cylc-run/"${WORKFLOW_NAME}"/log/db \
+ "SELECT outputs FROM task_outputs WHERE name is \"foo\"" > db-foo.1
+
+# Json string list of outputs from the db may not be ordered correctly.
+python3 - << __END__ > db-foo.2
+import json
+with open("db-foo.1", 'r') as f:
+ print(
+ ','.join(
+ sorted(
+ json.load(f)
+ )
+ )
+ )
+__END__
+
+cmp_ok "db-foo.2" - << __OUT__
+a,started,submitted,succeeded
+__OUT__
+
+# Check the flow-wait worked
+grep_workflow_log_ok check-wait "1/foo.* spawning outputs after flow-wait" -E
+
+purge
diff --git a/tests/functional/cylc-set/04-switch/flow.cylc b/tests/functional/cylc-set/04-switch/flow.cylc
new file mode 100644
index 00000000000..8a0ded59ce0
--- /dev/null
+++ b/tests/functional/cylc-set/04-switch/flow.cylc
@@ -0,0 +1,31 @@
+# Set outputs of future task to direct the flow at an optional branch point.
+
+[scheduler]
+ [[events]]
+ inactivity timeout = PT20S
+ abort on inactivity timeout = True
+ stall timeout = PT0S
+ abort on stall timeout = True
+
+[scheduling]
+ [[graph]]
+ R1 = """
+ switcher => foo
+ foo:a? => a
+ foo:b? => b
+ """
+[runtime]
+ [[switcher]]
+ script = """
+ cylc set --output=a,succeeded --wait ${CYLC_WORKFLOW_ID}//1/foo
+ # wait for command actioned, to avoid race condition
+ cylc__job__poll_grep_workflow_log "actioned"
+ """
+ [[foo]]
+ script = "cylc message b" # always go b-way if I run
+ [[[outputs]]]
+ a = a
+ b = b
+ [[a]]
+ [[b]]
+ script = false
diff --git a/tests/functional/cylc-set/04-switch/reference.log b/tests/functional/cylc-set/04-switch/reference.log
new file mode 100644
index 00000000000..e4fe80d6929
--- /dev/null
+++ b/tests/functional/cylc-set/04-switch/reference.log
@@ -0,0 +1,2 @@
+1/switcher -triggered off [] in flow 1
+1/a -triggered off ['1/foo'] in flow 1
diff --git a/tests/functional/cylc-set/05-expire.t b/tests/functional/cylc-set/05-expire.t
new file mode 100644
index 00000000000..dae60be234e
--- /dev/null
+++ b/tests/functional/cylc-set/05-expire.t
@@ -0,0 +1,42 @@
+#!/usr/bin/env bash
+# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
+# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#-------------------------------------------------------------------------------
+
+# "cylc set" proposal examples: 6 - check that forced task expiry works
+# https://cylc.github.io/cylc-admin/proposal-cylc-set.html#6-expire-a-task
+
+. "$(dirname "$0")/test_header"
+set_test_number 4
+
+install_and_validate
+reftest_run
+
+sqlite3 ~/cylc-run/"${WORKFLOW_NAME}"/log/db \
+ "SELECT status FROM task_states WHERE name is \"bar\"" > db-bar.1
+
+cmp_ok "db-bar.1" - << __OUT__
+expired
+__OUT__
+
+sqlite3 ~/cylc-run/"${WORKFLOW_NAME}"/log/db \
+ "SELECT outputs FROM task_outputs WHERE name is \"bar\"" > db-bar.2
+
+cmp_ok "db-bar.2" - << __OUT__
+["expired"]
+__OUT__
+
+purge
diff --git a/tests/functional/cylc-set/05-expire/flow.cylc b/tests/functional/cylc-set/05-expire/flow.cylc
new file mode 100644
index 00000000000..57d94dbb99e
--- /dev/null
+++ b/tests/functional/cylc-set/05-expire/flow.cylc
@@ -0,0 +1,24 @@
+# Expire a future task, so it won't run.
+
+[scheduler]
+ [[events]]
+ inactivity timeout = PT20S
+ abort on inactivity timeout = True
+ stall timeout = PT0S
+ abort on stall timeout = True
+
+[scheduling]
+ [[graph]]
+ R1 = """
+ # bar and baz should not run if bar expires
+ expirer => foo => bar? => baz
+ bar:expired?
+ """
+[runtime]
+ [[expirer]]
+ script = """
+ cylc set --output=expired ${CYLC_WORKFLOW_ID}//1/bar
+ """
+ [[foo]]
+ [[bar, baz]]
+ script = false
diff --git a/tests/functional/cylc-set/05-expire/reference.log b/tests/functional/cylc-set/05-expire/reference.log
new file mode 100644
index 00000000000..0966b1b6f90
--- /dev/null
+++ b/tests/functional/cylc-set/05-expire/reference.log
@@ -0,0 +1,2 @@
+1/expirer -triggered off [] in flow 1
+1/foo -triggered off ['1/expirer'] in flow 1
diff --git a/tests/functional/cylc-set/06-parentless.t b/tests/functional/cylc-set/06-parentless.t
new file mode 100644
index 00000000000..9ccf16e30e0
--- /dev/null
+++ b/tests/functional/cylc-set/06-parentless.t
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
+# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#-------------------------------------------------------------------------------
+
+# "cylc set" proposal examples: 7 - Check spawning a parentless task without ignoring xtriggers.
+# https://cylc.github.io/cylc-admin/proposal-cylc-set.html#7-spawning-parentless-tasks
+
+. "$(dirname "$0")/test_header"
+set_test_number 3
+
+install_and_validate
+REFTEST_OPTS="--start-task=1800/a" reftest_run
+
+grep_workflow_log_ok "${TEST_NAME_BASE}-clock" "xtrigger satisfied: wall_clock"
+
+purge
diff --git a/tests/functional/cylc-set/06-parentless/flow.cylc b/tests/functional/cylc-set/06-parentless/flow.cylc
new file mode 100644
index 00000000000..5078b84e484
--- /dev/null
+++ b/tests/functional/cylc-set/06-parentless/flow.cylc
@@ -0,0 +1,22 @@
+# Start this with --start-task=1800/a.
+# It should stall because x => b is off-flow.
+# The stall handler should unstall it by spawning x.
+# The log should show a clock-trigger check before x runs.
+
+[scheduler]
+ [[events]]
+ inactivity timeout = PT30S
+ abort on inactivity timeout = True
+ stall timeout = PT10S
+ abort on stall timeout = True
+ stall handlers = "cylc set --pre=all %(workflow)s//1800/x"
+
+[scheduling]
+ initial cycle point = 1800
+ [[graph]]
+ R1 = """
+ a => b
+ @wall_clock => x => b
+ """
+[runtime]
+ [[a, b, x]]
diff --git a/tests/functional/cylc-set/06-parentless/reference.log b/tests/functional/cylc-set/06-parentless/reference.log
new file mode 100644
index 00000000000..f977d1f086b
--- /dev/null
+++ b/tests/functional/cylc-set/06-parentless/reference.log
@@ -0,0 +1,4 @@
+Start task: ['1800/a']
+18000101T0000Z/a -triggered off [] in flow 1
+18000101T0000Z/x -triggered off [] in flow 1
+18000101T0000Z/b -triggered off ['18000101T0000Z/a', '18000101T0000Z/x'] in flow 1
diff --git a/tests/functional/cylc-set/08-switch2.t b/tests/functional/cylc-set/08-switch2.t
new file mode 100644
index 00000000000..3f947668501
--- /dev/null
+++ b/tests/functional/cylc-set/08-switch2.t
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
+# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#-------------------------------------------------------------------------------
+
+# "cylc set" proposal examples: 5 - Set and complete a future switch task.
+# https://cylc.github.io/cylc-admin/proposal-cylc-set.html#5-set-switch-tasks-at-an-optional-branch-point-to-direct-the-future-flow
+
+. "$(dirname "$0")/test_header"
+set_test_number 2
+
+reftest
+
+purge
diff --git a/tests/functional/cylc-set/08-switch2/flow.cylc b/tests/functional/cylc-set/08-switch2/flow.cylc
new file mode 100644
index 00000000000..0a221f1227e
--- /dev/null
+++ b/tests/functional/cylc-set/08-switch2/flow.cylc
@@ -0,0 +1,41 @@
+
+# Complete a parentless switch task that already exists in the pool but is
+# beyond the runahead limit. Cylc should auto-spawn its next instance to
+# avoid premature shutdown when it is removed as complete.
+# (We only spawn the first runahead-limited instance of parentless tasks).
+
+[scheduler]
+ allow implicit tasks = True
+
+[scheduling]
+ initial cycle point = 1
+ final cycle point = 4
+ cycling mode = integer
+ runahead limit = P0
+ [[graph]]
+ P1 = """
+ a:x? => x
+ a:y? => y
+ x | y => z
+ """
+[runtime]
+ [[a]]
+ script = """
+ cylc__job__wait_cylc_message_started
+ cylc message -- x # always go x-path
+ """
+ [[[outputs]]]
+ x = x
+ y = y
+ [[z]]
+ script = """
+ if (( CYLC_TASK_CYCLE_POINT == 1 )); then
+ # mark 2/a as succeeded with output y
+ # (task will be skipped)
+ cylc set "${CYLC_WORKFLOW_ID}//2/a" --out=y,succeeded
+ elif (( CYLC_TASK_CYCLE_POINT == 2 )); then
+ # mark 2/a as having generated output y
+ # (task will re-run and generate output x in the prociess)
+ cylc set "${CYLC_WORKFLOW_ID}//3/a" --out=y
+ fi
+ """
diff --git a/tests/functional/cylc-set/08-switch2/reference.log b/tests/functional/cylc-set/08-switch2/reference.log
new file mode 100644
index 00000000000..a41fff43a9e
--- /dev/null
+++ b/tests/functional/cylc-set/08-switch2/reference.log
@@ -0,0 +1,17 @@
+# 1/a runs naturally and generates the output "x"
+1/a -triggered off [] in flow 1
+1/x -triggered off ['1/a'] in flow 1
+1/z -triggered off ['1/x'] in flow 1
+# 1/a is artificially completed with the output "y"
+2/y -triggered off ['2/a'] in flow 1
+2/z -triggered off ['2/y'] in flow 1
+# 1/a has the output "y" is artificially set but is not completed
+# (so 1/a will re-run and generate the output "x" naturally)
+3/a -triggered off [] in flow 1
+3/x -triggered off ['3/a'] in flow 1
+3/y -triggered off ['3/a'] in flow 1
+3/z -triggered off ['3/y'] in flow 1
+# 1/a runs naturally and generates the output "x"
+4/a -triggered off [] in flow 1
+4/x -triggered off ['4/a'] in flow 1
+4/z -triggered off ['4/x'] in flow 1
diff --git a/tests/functional/cylc-set/test_header b/tests/functional/cylc-set/test_header
new file mode 120000
index 00000000000..90bd5a36f92
--- /dev/null
+++ b/tests/functional/cylc-set/test_header
@@ -0,0 +1 @@
+../lib/bash/test_header
\ No newline at end of file
diff --git a/tests/functional/cylc-trigger/02-filter-failed/flow.cylc b/tests/functional/cylc-trigger/02-filter-failed/flow.cylc
index e5a3065782e..7416cf5790d 100644
--- a/tests/functional/cylc-trigger/02-filter-failed/flow.cylc
+++ b/tests/functional/cylc-trigger/02-filter-failed/flow.cylc
@@ -18,9 +18,9 @@
[[fixer]]
script = """
cylc__job__wait_cylc_message_started
- cylc__job__poll_grep_workflow_log -E '1/fixable1 running .* \(received\)failed'
- cylc__job__poll_grep_workflow_log -E '1/fixable2 running .* \(received\)failed'
- cylc__job__poll_grep_workflow_log -E '1/fixable3 running .* \(received\)failed'
+ cylc__job__poll_grep_workflow_log -E '1/fixable1/01:running.* \(received\)failed'
+ cylc__job__poll_grep_workflow_log -E '1/fixable2/01:running.* \(received\)failed'
+ cylc__job__poll_grep_workflow_log -E '1/fixable3/01:running.* \(received\)failed'
cylc trigger "${CYLC_WORKFLOW_ID}//1/fixable*"
"""
[[Z]]
diff --git a/tests/functional/cylc-trigger/04-filter-names/flow.cylc b/tests/functional/cylc-trigger/04-filter-names/flow.cylc
index 5997dcaa201..31839c1b77f 100644
--- a/tests/functional/cylc-trigger/04-filter-names/flow.cylc
+++ b/tests/functional/cylc-trigger/04-filter-names/flow.cylc
@@ -22,11 +22,11 @@
[[fixer]]
script = """
cylc__job__wait_cylc_message_started
- cylc__job__poll_grep_workflow_log -E '1/fixable-1a .* \(received\)failed'
- cylc__job__poll_grep_workflow_log -E '1/fixable-1b .* \(received\)failed'
- cylc__job__poll_grep_workflow_log -E '1/fixable-2a .* \(received\)failed'
- cylc__job__poll_grep_workflow_log -E '1/fixable-2b .* \(received\)failed'
- cylc__job__poll_grep_workflow_log -E '1/fixable-3 .* \(received\)failed'
+ cylc__job__poll_grep_workflow_log -E '1/fixable-1a/01.* \(received\)failed'
+ cylc__job__poll_grep_workflow_log -E '1/fixable-1b/01.* \(received\)failed'
+ cylc__job__poll_grep_workflow_log -E '1/fixable-2a/01.* \(received\)failed'
+ cylc__job__poll_grep_workflow_log -E '1/fixable-2b/01.* \(received\)failed'
+ cylc__job__poll_grep_workflow_log -E '1/fixable-3/01.* \(received\)failed'
cylc trigger "${CYLC_WORKFLOW_ID}//" \
'//1/FIXABLE-1' '//1/fixable-2*' '//1/fixable-3'
"""
diff --git a/tests/functional/cylc-trigger/06-already-active/flow.cylc b/tests/functional/cylc-trigger/06-already-active/flow.cylc
index b939aa2d290..c7d99f6a6a8 100644
--- a/tests/functional/cylc-trigger/06-already-active/flow.cylc
+++ b/tests/functional/cylc-trigger/06-already-active/flow.cylc
@@ -9,14 +9,14 @@
[runtime]
[[triggerer]]
script = """
- cylc__job__poll_grep_workflow_log "1/triggeree .* running" -E
+ cylc__job__poll_grep_workflow_log "1/triggeree/01:running"
cylc trigger "$CYLC_WORKFLOW_ID//1/triggeree"
cylc__job__poll_grep_workflow_log \
- "1/triggeree .* ignoring trigger - already active" -E
+ "1/triggeree.* ignoring trigger - already active" -E
"""
[[triggeree]]
script = """
cylc__job__poll_grep_workflow_log \
- "1/triggeree .* ignoring trigger - already active" -E
+ "1/triggeree.* ignoring trigger - already active" -E
"""
diff --git a/tests/functional/events/23-workflow-stalled-handler/flow.cylc b/tests/functional/events/23-workflow-stalled-handler/flow.cylc
index 5981611b409..4d3673daafa 100644
--- a/tests/functional/events/23-workflow-stalled-handler/flow.cylc
+++ b/tests/functional/events/23-workflow-stalled-handler/flow.cylc
@@ -1,6 +1,6 @@
[scheduler]
[[events]]
- stall handlers = "cylc set-outputs --flow=1 %(workflow)s//1/bar"
+ stall handlers = "cylc set %(workflow)s//1/bar"
stall timeout = PT0S
abort on stall timeout = False
expected task failures = 1/bar
diff --git a/tests/functional/events/38-task-event-handler-custom.t b/tests/functional/events/38-task-event-handler-custom.t
index 92d1ca9722e..d1f825e1c16 100755
--- a/tests/functional/events/38-task-event-handler-custom.t
+++ b/tests/functional/events/38-task-event-handler-custom.t
@@ -28,7 +28,7 @@ WORKFLOW_LOG="${WORKFLOW_RUN_DIR}/log/scheduler/log"
grep_ok \
"\[(('event-handler-00', 'custom-1'), 1) out\] !!CUSTOM!! 1/foo fugu Data ready for barring" \
"${FOO_ACTIVITY_LOG}"
-grep_ok "1/foo .*Data ready for barring" "${WORKFLOW_LOG}" -E
-grep_ok "1/foo .*Data ready for bazzing" "${WORKFLOW_LOG}" -E
-grep_ok "1/foo .*Aren't the hydrangeas nice" "${WORKFLOW_LOG}" -E
+grep_ok "1/foo.*Data ready for barring" "${WORKFLOW_LOG}" -E
+grep_ok "1/foo.*Data ready for bazzing" "${WORKFLOW_LOG}" -E
+grep_ok "1/foo.*Aren't the hydrangeas nice" "${WORKFLOW_LOG}" -E
purge
diff --git a/tests/functional/execution-time-limit/04-polling-intervals.t b/tests/functional/execution-time-limit/04-polling-intervals.t
index 4b213c70546..e1df403f155 100644
--- a/tests/functional/execution-time-limit/04-polling-intervals.t
+++ b/tests/functional/execution-time-limit/04-polling-intervals.t
@@ -47,13 +47,13 @@ poll_grep_workflow_log "INFO - DONE"
# NOTE: execution timeout polling is delayed by PT1M to let things settle
# PT10M = (3*PT3S + PT9M30S) - PT1M
-grep_workflow_log_ok grep-limit10M "\[1/limit10M running job:01 flows:1\] health: execution timeout=None, polling intervals=3\*PT30S,PT9M30S,PT2M,PT7M,..."
+grep_workflow_log_ok grep-limit10M "\[1/limit10M/01:running\] health: execution timeout=None, polling intervals=3\*PT30S,PT9M30S,PT2M,PT7M,..."
# PT60M = (3*PT3S + PT10M + PT49M30S) - PT1M
-grep_workflow_log_ok grep-limit1H "\[1/limit1H running job:01 flows:1\] health: execution timeout=None, polling intervals=3\*PT30S,PT10M,PT49M30S,PT2M,PT7M,..."
+grep_workflow_log_ok grep-limit1H "\[1/limit1H/01:running\] health: execution timeout=None, polling intervals=3\*PT30S,PT10M,PT49M30S,PT2M,PT7M,..."
# PT70S = (2*PT30S + PT1M10S) - PT1M
-grep_workflow_log_ok grep-limit70S "\[1/limit70S running job:01 flows:1\] health: execution timeout=None, polling intervals=2\*PT30S,PT1M10S,PT2M,PT7M,..."
+grep_workflow_log_ok grep-limit70S "\[1/limit70S/01:running\] health: execution timeout=None, polling intervals=2\*PT30S,PT1M10S,PT2M,PT7M,..."
# PT95M = (3*PT3S + PT10M + PT1H + PT24M30S) - PT1M
-grep_workflow_log_ok grep-limit95M "\[1/limit95M running job:01 flows:1\] health: execution timeout=None, polling intervals=3\*PT30S,PT10M,PT1H,PT24M30S,PT2M,PT7M,..."
-grep_workflow_log_ok grep-no-limit "\[1/nolimit running job:01 flows:1\] health: execution timeout=None, polling intervals=3\*PT30S,PT10M,PT1H,..."
+grep_workflow_log_ok grep-limit95M "\[1/limit95M/01:running\] health: execution timeout=None, polling intervals=3\*PT30S,PT10M,PT1H,PT24M30S,PT2M,PT7M,..."
+grep_workflow_log_ok grep-no-limit "\[1/nolimit/01:running\] health: execution timeout=None, polling intervals=3\*PT30S,PT10M,PT1H,..."
purge
diff --git a/tests/functional/ext-trigger/01-no-nudge/flow.cylc b/tests/functional/ext-trigger/01-no-nudge/flow.cylc
index f5569601950..1bf744e8ac6 100644
--- a/tests/functional/ext-trigger/01-no-nudge/flow.cylc
+++ b/tests/functional/ext-trigger/01-no-nudge/flow.cylc
@@ -31,7 +31,7 @@
[[foo]]
script = """
cylc kill "$CYLC_WORKFLOW_ID//1/bar"
- cylc__job__poll_grep_workflow_log -E '1/bar .* \(internal\)failed'
+ cylc__job__poll_grep_workflow_log -E '1/bar.* \(internal\)failed'
cylc release "$CYLC_WORKFLOW_ID//1/bar"
"""
[[bar]]
diff --git a/tests/functional/flow-triggers/00-new-future/flow.cylc b/tests/functional/flow-triggers/00-new-future/flow.cylc
index c67fd0fdd19..94add5a60d0 100644
--- a/tests/functional/flow-triggers/00-new-future/flow.cylc
+++ b/tests/functional/flow-triggers/00-new-future/flow.cylc
@@ -34,5 +34,5 @@
[[a]]
script = """
cylc trigger --flow=new ${CYLC_WORKFLOW_ID}//1/d
- cylc__job__poll_grep_workflow_log -E '1/d.*started'
+ cylc__job__poll_grep_workflow_log -E '1/d/01.*started'
"""
diff --git a/tests/functional/flow-triggers/01-all-future.t b/tests/functional/flow-triggers/01-all-future.t
index 8ecc1448e74..27a80f9892c 100644
--- a/tests/functional/flow-triggers/01-all-future.t
+++ b/tests/functional/flow-triggers/01-all-future.t
@@ -41,11 +41,12 @@ install_workflow "${TEST_NAME_BASE}" "${TEST_NAME_BASE}" true
reftest_run
TEST_NAME="${TEST_NAME_BASE}-order-wait"
+# Note flow_wait is updated to False once used.
QUERY="SELECT name,flow_nums,flow_wait FROM task_states ORDER BY time_created"
run_ok "${TEST_NAME}" sqlite3 "${DB}" "$QUERY"
cmp_ok "${TEST_NAME}.stdout" <<\__END__
a|[1]|0
-d|[1]|1
+d|[1]|0
b|[1]|0
c|[1]|0
e|[1]|0
diff --git a/tests/functional/flow-triggers/01-all-future/flow.cylc b/tests/functional/flow-triggers/01-all-future/flow.cylc
index a47ce127429..d7a9b0284d9 100644
--- a/tests/functional/flow-triggers/01-all-future/flow.cylc
+++ b/tests/functional/flow-triggers/01-all-future/flow.cylc
@@ -39,7 +39,5 @@
[[a]]
script = """
cylc trigger {{OPT}} ${CYLC_WORKFLOW_ID}//1/d
- {% if WAIT is not defined %}
cylc__job__poll_grep_workflow_log -E '1/d.*succeeded'
- {% endif %}
"""
diff --git a/tests/functional/flow-triggers/03-new-past/flow.cylc b/tests/functional/flow-triggers/03-new-past/flow.cylc
index 9c77123d40c..980599c270b 100644
--- a/tests/functional/flow-triggers/03-new-past/flow.cylc
+++ b/tests/functional/flow-triggers/03-new-past/flow.cylc
@@ -37,6 +37,6 @@
script = """
if (( $CYLC_TASK_SUBMIT_NUMBER == 1 )); then
cylc trigger --flow=new ${CYLC_WORKFLOW_ID}//1/a
- cylc__job__poll_grep_workflow_log -E '1/a submitted job:02 .*started'
+ cylc__job__poll_grep_workflow_log -E '1/a/02\(flows=2\):submitted.*started'
fi
"""
diff --git a/tests/functional/flow-triggers/04-all-past/flow.cylc b/tests/functional/flow-triggers/04-all-past/flow.cylc
index 85721f2b5e5..11b6ac7e5d9 100644
--- a/tests/functional/flow-triggers/04-all-past/flow.cylc
+++ b/tests/functional/flow-triggers/04-all-past/flow.cylc
@@ -41,6 +41,6 @@
script = """
if (( $CYLC_TASK_SUBMIT_NUMBER == 1 )); then
cylc trigger {{OPT}} ${CYLC_WORKFLOW_ID}//1/a
- cylc__job__poll_grep_workflow_log -E '1/a running job:02 .*succeeded'
+ cylc__job__poll_grep_workflow_log -E '1/a/02:running.*succeeded'
fi
"""
diff --git a/tests/functional/flow-triggers/05-none-past/flow.cylc b/tests/functional/flow-triggers/05-none-past/flow.cylc
index 6709ebf0d3c..f59e5e11178 100644
--- a/tests/functional/flow-triggers/05-none-past/flow.cylc
+++ b/tests/functional/flow-triggers/05-none-past/flow.cylc
@@ -34,6 +34,6 @@
script = """
if (( $CYLC_TASK_SUBMIT_NUMBER == 1 )); then
cylc trigger --flow=none ${CYLC_WORKFLOW_ID}//1/a
- cylc__job__poll_grep_workflow_log -E '1/a submitted job:02 .*started'
+ cylc__job__poll_grep_workflow_log -E '1/a/02\(flows=none\):submitted.*started'
fi
"""
diff --git a/tests/functional/flow-triggers/06-new-past-switch/flow.cylc b/tests/functional/flow-triggers/06-new-past-switch/flow.cylc
index b33855b74ca..e185d763fb6 100644
--- a/tests/functional/flow-triggers/06-new-past-switch/flow.cylc
+++ b/tests/functional/flow-triggers/06-new-past-switch/flow.cylc
@@ -55,5 +55,5 @@
[[c]]
script = """
cylc trigger --flow=new ${CYLC_WORKFLOW_ID}//1/a
- cylc__job__poll_grep_workflow_log -E '1/a submitted job:02 .*started'
+ cylc__job__poll_grep_workflow_log -E '1/a/02\(flows=2\):submitted.*started'
"""
diff --git a/tests/functional/flow-triggers/07-all-past-switch/flow.cylc b/tests/functional/flow-triggers/07-all-past-switch/flow.cylc
index 87ea0445a2d..4965bc77886 100644
--- a/tests/functional/flow-triggers/07-all-past-switch/flow.cylc
+++ b/tests/functional/flow-triggers/07-all-past-switch/flow.cylc
@@ -64,6 +64,6 @@
script = """
if (( CYLC_TASK_SUBMIT_NUMBER == 1 )); then
cylc trigger {{OPT}} ${CYLC_WORKFLOW_ID}//1/a
- cylc__job__poll_grep_workflow_log -E '1/a running job:02 .*succeeded'
+ cylc__job__poll_grep_workflow_log -E '1/a/02:running.*succeeded'
fi
"""
diff --git a/tests/functional/flow-triggers/08-none-past-switch/flow.cylc b/tests/functional/flow-triggers/08-none-past-switch/flow.cylc
index 419bf72d67a..63a4e532a68 100644
--- a/tests/functional/flow-triggers/08-none-past-switch/flow.cylc
+++ b/tests/functional/flow-triggers/08-none-past-switch/flow.cylc
@@ -53,5 +53,5 @@
[[c]]
script = """
cylc trigger --flow=none ${CYLC_WORKFLOW_ID}//1/a
- cylc__job__poll_grep_workflow_log -E '1/a submitted job:02 .*started'
+ cylc__job__poll_grep_workflow_log -E '1/a/02\(flows=none\):submitted.*started'
"""
diff --git a/tests/functional/flow-triggers/09-retrigger/flow.cylc b/tests/functional/flow-triggers/09-retrigger/flow.cylc
index a8bdb524870..a0e1341c06e 100644
--- a/tests/functional/flow-triggers/09-retrigger/flow.cylc
+++ b/tests/functional/flow-triggers/09-retrigger/flow.cylc
@@ -1,7 +1,5 @@
-# Check if a task gets triggered multiple times with --wait, only the outputs
-# from the last run in the flow are spawned when the flow catches up.
-
-# baz should run twice in flow 1, then y should trigger off of the second baz.
+# If a task gets triggered twice with --wait, the outputs from both runs should be
+# spawned when the flow catches up.
[scheduling]
[[graph]]
@@ -15,9 +13,9 @@
[[foo]]
script = """
cylc trigger --wait ${CYLC_WORKFLOW_ID}//1/baz
- cylc__job__poll_grep_workflow_log "1/baz running job:01 .* succeeded"
+ cylc__job__poll_grep_workflow_log "1/baz/01:running.*succeeded"
cylc trigger --wait ${CYLC_WORKFLOW_ID}//1/baz
- cylc__job__poll_grep_workflow_log "1/baz running job:02 .* succeeded"
+ cylc__job__poll_grep_workflow_log "1/baz/02:running.*succeeded"
"""
[[baz]]
script = """
diff --git a/tests/functional/flow-triggers/09-retrigger/reference.log b/tests/functional/flow-triggers/09-retrigger/reference.log
index 2323320048b..00afaf9c6fe 100644
--- a/tests/functional/flow-triggers/09-retrigger/reference.log
+++ b/tests/functional/flow-triggers/09-retrigger/reference.log
@@ -4,4 +4,5 @@ Final point: 1
1/baz -triggered off [] in flow 1
1/baz -triggered off [] in flow 1
1/bar -triggered off ['1/foo'] in flow 1
+1/x -triggered off ['1/baz'] in flow 1
1/y -triggered off ['1/baz'] in flow 1
diff --git a/tests/functional/flow-triggers/10-specific-flow/flow.cylc b/tests/functional/flow-triggers/10-specific-flow/flow.cylc
index 3d648ddcac4..46ba6dab4c1 100644
--- a/tests/functional/flow-triggers/10-specific-flow/flow.cylc
+++ b/tests/functional/flow-triggers/10-specific-flow/flow.cylc
@@ -1,7 +1,7 @@
# Check targetting a specific flow works, with trigger --wait.
# At start-up, trigger f with --wait for flow 2
-# Then when flow 1 reaches d, trigger a new flow (2) at b.
+# Then when flow 1 reaches d, trigger flow 2 at b.
# Flow 1 should overrun the manually triggered f.
# Flow 2 should skip over it to g.
@@ -17,6 +17,6 @@
[[trigger-happy]]
script = """
cylc trigger --flow=2 --wait ${CYLC_WORKFLOW_ID}//1/f
- cylc__job__poll_grep_workflow_log "1/d submitted job:01 .*started"
- cylc trigger --flow=new ${CYLC_WORKFLOW_ID}//1/b
+ cylc__job__poll_grep_workflow_log "1/d/01:submitted.*running"
+ cylc trigger --flow=2 ${CYLC_WORKFLOW_ID}//1/b
"""
diff --git a/tests/functional/flow-triggers/11-wait-merge/flow.cylc b/tests/functional/flow-triggers/11-wait-merge/flow.cylc
index 9e398c7bf74..72d77a3ce4f 100644
--- a/tests/functional/flow-triggers/11-wait-merge/flow.cylc
+++ b/tests/functional/flow-triggers/11-wait-merge/flow.cylc
@@ -16,7 +16,7 @@
[[a]]
script = """
if ((CYLC_TASK_SUBMIT_NUMBER == 2)); then
- cylc__job__poll_grep_workflow_log "1/d .*(received)started"
+ cylc__job__poll_grep_workflow_log "\[1/d/01(flows=1,2):submitted] (received)started"
fi
"""
[[b]]
@@ -24,11 +24,12 @@
if ((CYLC_TASK_SUBMIT_NUMBER == 1)); then
cylc trigger --flow=new ${CYLC_WORKFLOW_ID}//1/a
cylc trigger --flow=2 --wait ${CYLC_WORKFLOW_ID}//1/c
- cylc__job__poll_grep_workflow_log "1/c .*(received)x"
+ cylc__job__poll_grep_workflow_log "\[1/c/01(flows=2):running] (received)x"
fi
"""
[[c]]
script = """
+ cylc__job__wait_cylc_message_started
cylc message x
if ((CYLC_TASK_SUBMIT_NUMBER == 1)); then
cylc__job__poll_grep_workflow_log "merged"
diff --git a/tests/functional/flow-triggers/12-all-future-multi/flow.cylc b/tests/functional/flow-triggers/12-all-future-multi/flow.cylc
index 782690f24a5..e67aff4f20e 100644
--- a/tests/functional/flow-triggers/12-all-future-multi/flow.cylc
+++ b/tests/functional/flow-triggers/12-all-future-multi/flow.cylc
@@ -45,9 +45,9 @@
)); then
# trigger 3/a in a new flow
cylc trigger --flow=new ${CYLC_WORKFLOW_ID}//3/a
- cylc__job__poll_grep_workflow_log -E '3/a.*started'
+ cylc__job__poll_grep_workflow_log -E '3/a.*=> running'
# trigger 5/a in all flows
cylc trigger ${CYLC_WORKFLOW_ID}//5/a
- cylc__job__poll_grep_workflow_log -E '5/a.*started'
+ cylc__job__poll_grep_workflow_log -E '5/a.*=> running'
fi
"""
diff --git a/tests/functional/flow-triggers/13-noflow-nomerge.t b/tests/functional/flow-triggers/13-noflow-nomerge.t
index c8b4528a2f9..8dfa08f994c 100644
--- a/tests/functional/flow-triggers/13-noflow-nomerge.t
+++ b/tests/functional/flow-triggers/13-noflow-nomerge.t
@@ -27,7 +27,7 @@ run_ok "${TEST_NAME_BASE}-run" cylc play "${WORKFLOW_NAME}"
poll_grep_workflow_log "Workflow stalled"
run_ok "${TEST_NAME_BASE}-trigger" cylc trigger --flow=none "${WORKFLOW_NAME}//1/a"
-poll_grep_workflow_log -E "1/a running job:02 flows:none.*=> succeeded"
+poll_grep_workflow_log -E "1/a/02\(flows=none\):running.*=> succeeded"
cylc stop --now --now --max-polls=5 --interval=2 "$WORKFLOW_NAME"
diff --git a/tests/functional/hold-release/00-workflow/flow.cylc b/tests/functional/hold-release/00-workflow/flow.cylc
index 4afdbc92980..a0df872ebab 100644
--- a/tests/functional/hold-release/00-workflow/flow.cylc
+++ b/tests/functional/hold-release/00-workflow/flow.cylc
@@ -23,7 +23,7 @@
script = """
cylc__job__wait_cylc_message_started
cylc hold --after=1900 "${CYLC_WORKFLOW_ID}"
- cylc__job__poll_grep_workflow_log -F 'INFO - Command actioned: set_hold_point'
+ cylc__job__poll_grep_workflow_log 'Command "set_hold_point" actioned'
cylc release --all "${CYLC_WORKFLOW_ID}"
"""
[[foo,bar]]
diff --git a/tests/functional/hold-release/02-hold-on-spawn.t b/tests/functional/hold-release/02-hold-on-spawn.t
index 9efd2e18ee8..dad99d32297 100755
--- a/tests/functional/hold-release/02-hold-on-spawn.t
+++ b/tests/functional/hold-release/02-hold-on-spawn.t
@@ -34,7 +34,7 @@ workflow_run_ok "${TEST_NAME_BASE}-run" \
cylc release "${WORKFLOW_NAME}//1/foo"
# 1/foo should run and spawn 1/bar as waiting and held
-poll_grep_workflow_log -E '1/bar .* spawned'
+poll_grep_workflow_log -E '1/bar.* added to active task pool'
sqlite3 "${WORKFLOW_RUN_DIR}/log/db" \
'SELECT cycle, name, status, is_held FROM task_pool' > task-pool.out
diff --git a/tests/functional/hold-release/05-release.t b/tests/functional/hold-release/05-release.t
index 805fe2395cc..26f4e22d414 100755
--- a/tests/functional/hold-release/05-release.t
+++ b/tests/functional/hold-release/05-release.t
@@ -34,7 +34,7 @@ init_workflow "${TEST_NAME_BASE}" <<'__FLOW_CONFIG__'
script = """
cylc__job__wait_cylc_message_started
cylc hold --after=0 ${CYLC_WORKFLOW_ID}
- cylc__job__poll_grep_workflow_log 'Command actioned: set_hold_point'
+ cylc__job__poll_grep_workflow_log 'Command "set_hold_point" actioned'
cylc release "${CYLC_WORKFLOW_ID}//1/*FF" # inexact fam
cylc release "${CYLC_WORKFLOW_ID}//1/TOAST" # exact fam
cylc release "${CYLC_WORKFLOW_ID}//1/cat*" # inexact tasks
@@ -65,7 +65,7 @@ init_workflow "${TEST_NAME_BASE}" <<'__FLOW_CONFIG__'
inherit = STOP
script = """
cylc__job__poll_grep_workflow_log -E \
- '1/dog1 succeeded .* task proxy removed \(finished\)'
+ '1/dog1/01:succeeded.* task completed'
cylc stop "${CYLC_WORKFLOW_ID}"
"""
__FLOW_CONFIG__
diff --git a/tests/functional/hold-release/08-hold.t b/tests/functional/hold-release/08-hold.t
index 206abc1efdb..27d6020f7aa 100755
--- a/tests/functional/hold-release/08-hold.t
+++ b/tests/functional/hold-release/08-hold.t
@@ -33,14 +33,14 @@ init_workflow "${TEST_NAME_BASE}" <<'__FLOW_CONFIG__'
[[holdrelease]]
script = """
cylc__job__wait_cylc_message_started
- cylc__job__poll_grep_workflow_log -E '1/foo .* spawned'
- cylc__job__poll_grep_workflow_log -E '1/bar .* spawned'
- cylc__job__poll_grep_workflow_log -E '1/cheese .* spawned'
- cylc__job__poll_grep_workflow_log -E '1/jam .* spawned'
- cylc__job__poll_grep_workflow_log -E '1/cat1 .* spawned'
- cylc__job__poll_grep_workflow_log -E '1/cat2 .* spawned'
- cylc__job__poll_grep_workflow_log -E '1/dog1 .* spawned'
- cylc__job__poll_grep_workflow_log -E '1/dog2 .* spawned'
+ cylc__job__poll_grep_workflow_log -E '1/foo.* added to active task pool'
+ cylc__job__poll_grep_workflow_log -E '1/bar.* added to active task pool'
+ cylc__job__poll_grep_workflow_log -E '1/cheese.* added to active task pool'
+ cylc__job__poll_grep_workflow_log -E '1/jam.* added to active task pool'
+ cylc__job__poll_grep_workflow_log -E '1/cat1.* added to active task pool'
+ cylc__job__poll_grep_workflow_log -E '1/cat2.* added to active task pool'
+ cylc__job__poll_grep_workflow_log -E '1/dog1.* added to active task pool'
+ cylc__job__poll_grep_workflow_log -E '1/dog2.* added to active task pool'
cylc hold "${CYLC_WORKFLOW_ID}//1/*FF" # inexact fam
cylc hold "${CYLC_WORKFLOW_ID}//1/TOAST" # exact fam
cylc hold "${CYLC_WORKFLOW_ID}//1/cat*" # inexact tasks
diff --git a/tests/functional/hold-release/11-retrying/flow.cylc b/tests/functional/hold-release/11-retrying/flow.cylc
index 03cd0f6b039..0e08699af09 100644
--- a/tests/functional/hold-release/11-retrying/flow.cylc
+++ b/tests/functional/hold-release/11-retrying/flow.cylc
@@ -18,26 +18,26 @@ t-retry-able => t-analyse
[[t-hold-release]]
script = """
cylc__job__poll_grep_workflow_log -E \
- '1/t-retry-able running job:01.* \(received\)failed'
+ '1/t-retry-able/01:running.* \(received\)failed'
cylc__job__poll_grep_workflow_log -E \
- '1/t-retry-able running job:01.* => waiting'
+ '1/t-retry-able/01:running.* => waiting'
cylc__job__poll_grep_workflow_log -E \
- '1/t-retry-able waiting job:01.* retrying in PT15S'
+ '1/t-retry-able:waiting.* retrying in PT15S'
cylc hold "${CYLC_WORKFLOW_ID}//1/t-retry-able"
cylc__job__poll_grep_workflow_log -E \
- '1/t-retry-able waiting job:01.* => waiting\(held\)'
+ '1/t-retry-able:waiting.* => waiting\(held\)'
cylc release "${CYLC_WORKFLOW_ID}//1/t-retry-able"
cylc__job__poll_grep_workflow_log -E \
- '1/t-retry-able waiting\(held\) job:01.* => waiting'
+ '1/t-retry-able:waiting\(held\).* => waiting'
cylc__job__poll_grep_workflow_log -E \
- '1/t-retry-able waiting job:01.* => waiting\(queued\)'
+ '1/t-retry-able:waiting.* => waiting\(queued\)'
"""
[[t-analyse]]
script = """
diff --git a/tests/functional/hold-release/18-hold-cycle-globs/flow.cylc b/tests/functional/hold-release/18-hold-cycle-globs/flow.cylc
index a74c7a20c3f..aa946b9b418 100644
--- a/tests/functional/hold-release/18-hold-cycle-globs/flow.cylc
+++ b/tests/functional/hold-release/18-hold-cycle-globs/flow.cylc
@@ -23,9 +23,9 @@
[runtime]
[[holder]]
script = """
- cylc__job__poll_grep_workflow_log -E '19900101T0000Z/t1 .* spawned'
- cylc__job__poll_grep_workflow_log -E '20100101T0000Z/t2 .* spawned'
- cylc__job__poll_grep_workflow_log -E '20300101T0000Z/t3 .* spawned'
+ cylc__job__poll_grep_workflow_log -E '19900101T0000Z/t1.* added to active task pool'
+ cylc__job__poll_grep_workflow_log -E '20100101T0000Z/t2.* added to active task pool'
+ cylc__job__poll_grep_workflow_log -E '20300101T0000Z/t3.* added to active task pool'
cylc hold "${CYLC_WORKFLOW_ID}//*/t*"
"""
[[releaser]]
diff --git a/tests/functional/hold-release/19-no-reset-prereq-on-waiting/flow.cylc b/tests/functional/hold-release/19-no-reset-prereq-on-waiting/flow.cylc
index 9ad270b4e84..edbd80b5770 100644
--- a/tests/functional/hold-release/19-no-reset-prereq-on-waiting/flow.cylc
+++ b/tests/functional/hold-release/19-no-reset-prereq-on-waiting/flow.cylc
@@ -16,7 +16,7 @@
script = true
[[holder]]
script = """
- cylc__job__poll_grep_workflow_log -E '1/t1 .* spawned'
+ cylc__job__poll_grep_workflow_log -E '1/t1.* added to active task pool'
cylc hold "${CYLC_WORKFLOW_ID}//1/t1"
"""
[[releaser]]
diff --git a/tests/functional/intelligent-host-selection/02-badhosts.t b/tests/functional/intelligent-host-selection/02-badhosts.t
index 0689866d22f..2d12c62f4c9 100644
--- a/tests/functional/intelligent-host-selection/02-badhosts.t
+++ b/tests/functional/intelligent-host-selection/02-badhosts.t
@@ -68,11 +68,11 @@ LOGFILE="${WORKFLOW_RUN_DIR}/log/scheduler/log"
# Check that badhosttask has submit failed, but not good or mixed
named_grep_ok "badhost task submit failed" \
- "1/badhosttask .* submit-failed" "${LOGFILE}"
+ "1/badhosttask.* submit-failed" "${LOGFILE}"
named_grep_ok "goodhost suceeded" \
- "1/mixedhosttask .* succeeded" "${LOGFILE}"
+ "1/mixedhosttask.* succeeded" "${LOGFILE}"
named_grep_ok "mixedhost task suceeded" \
- "1/goodhosttask .* succeeded" "${LOGFILE}"
+ "1/goodhosttask.* succeeded" "${LOGFILE}"
# Check that when a task fail badhosts associated with that task's platform
# are removed from the badhosts set.
diff --git a/tests/functional/intelligent-host-selection/05-from-platform-group.t b/tests/functional/intelligent-host-selection/05-from-platform-group.t
index 527cfaeba43..801372922fb 100644
--- a/tests/functional/intelligent-host-selection/05-from-platform-group.t
+++ b/tests/functional/intelligent-host-selection/05-from-platform-group.t
@@ -83,7 +83,7 @@ log_scan \
"platform: ${CYLC_TEST_PLATFORM} - Could not connect to unreachable_host." \
"platform: ${CYLC_TEST_PLATFORM} - remote init (on ${CYLC_TEST_HOST})" \
"platform: ${CYLC_TEST_PLATFORM} - remote file install (on ${CYLC_TEST_HOST})" \
- "\[1/ugly preparing job:01 flows:1\] => submitted"
+ "\[1/ugly/01:preparing\] => submitted"
purge
exit 0
diff --git a/tests/functional/job-submission/01-job-nn-localhost/db.sqlite3 b/tests/functional/job-submission/01-job-nn-localhost/db.sqlite3
index d3dcf24f339..0d3598e5cb9 100644
--- a/tests/functional/job-submission/01-job-nn-localhost/db.sqlite3
+++ b/tests/functional/job-submission/01-job-nn-localhost/db.sqlite3
@@ -8,6 +8,7 @@ INSERT INTO inheritance VALUES('foo','["foo", "root"]');
CREATE TABLE workflow_params(key TEXT, value TEXT, PRIMARY KEY(key));
INSERT INTO workflow_params VALUES('cylc_version', '8.0.0');
INSERT INTO workflow_params VALUES('uuid_str', 'Something');
+INSERT INTO workflow_params VALUES('UTC_mode', 1);
CREATE TABLE workflow_template_vars(key TEXT, value TEXT, PRIMARY KEY(key));
CREATE TABLE task_action_timers(cycle TEXT, name TEXT, ctx_key TEXT, ctx TEXT, delays TEXT, num INTEGER, delay TEXT, timeout TEXT, PRIMARY KEY(cycle, name, ctx_key));
INSERT INTO task_action_timers VALUES('1','foo','"poll_timer"','["tuple", [[99, "running"]]]','[]',0,NULL,NULL);
diff --git a/tests/functional/lib/bash/test_header b/tests/functional/lib/bash/test_header
index c4a07603126..915821c0889 100644
--- a/tests/functional/lib/bash/test_header
+++ b/tests/functional/lib/bash/test_header
@@ -90,6 +90,7 @@
# tries grepping for each PATTERN in turn. Tests will only pass if the
# PATTERNs appear in FILE in the correct order. Runs one test per
# pattern, each prefixed by TEST_NAME.
+# set LOG_SCAN_GREP_OPTS in the environment, e.g. "-E" for "grep -E"
# make_rnd_workflow
# Create a randomly-named workflow source directory.
# mock_smtpd_init
@@ -579,6 +580,7 @@ log_scan () {
local FILE="$2"
local REPS=$3
local DELAY=$4
+ local OPTS=${LOG_SCAN_GREP_OPTS:-}
if ${CYLC_TEST_DEBUG:-false}; then
local ERR=2
else
@@ -595,7 +597,8 @@ log_scan () {
echo -n "scanning for '${pattern:0:30}'" >& $ERR
for _ in $(seq 1 "${REPS}"); do
echo -n '.' >& $ERR
- newposition=$(grep -n "$pattern" "$FILE" | \
+ # shellcheck disable=SC2086
+ newposition=$(grep -n $OPTS "$pattern" "$FILE" | \
tail -n 1 | cut -d ':' -f 1)
if (( newposition > position )); then
position=$newposition
diff --git a/tests/functional/logging/02-duplicates/flow.cylc b/tests/functional/logging/02-duplicates/flow.cylc
index 3d1c1dea3ea..30cdbbe6457 100644
--- a/tests/functional/logging/02-duplicates/flow.cylc
+++ b/tests/functional/logging/02-duplicates/flow.cylc
@@ -22,7 +22,8 @@
script = false
[[bar]]
script = """
-cylc set-outputs --flow=1 "${CYLC_WORKFLOW_ID}" "foo.${CYLC_TASK_CYCLE_POINT}"
+ cylc set --output=succeeded \
+ "${CYLC_WORKFLOW_ID}//${CYLC_TASK_CYCLE_POINT}/foo"
"""
[[restart]]
script = """
diff --git a/tests/functional/pause-resume/00-workflow/flow.cylc b/tests/functional/pause-resume/00-workflow/flow.cylc
index 57a0a24aed5..a668e76ceba 100644
--- a/tests/functional/pause-resume/00-workflow/flow.cylc
+++ b/tests/functional/pause-resume/00-workflow/flow.cylc
@@ -19,7 +19,7 @@
script = """
wait
cylc pause "${CYLC_WORKFLOW_ID}"
- cylc__job__poll_grep_workflow_log -F 'INFO - Command actioned: pause()'
+ cylc__job__poll_grep_workflow_log 'Command "pause" actioned'
cylc play "${CYLC_WORKFLOW_ID}"
"""
[[foo,bar]]
diff --git a/tests/functional/pause-resume/12-pause-then-retry/flow.cylc b/tests/functional/pause-resume/12-pause-then-retry/flow.cylc
index c732dc3bdc9..7be27343939 100644
--- a/tests/functional/pause-resume/12-pause-then-retry/flow.cylc
+++ b/tests/functional/pause-resume/12-pause-then-retry/flow.cylc
@@ -19,7 +19,7 @@
[[t-pause]]
script = """
cylc pause "${CYLC_WORKFLOW_ID}"
- cylc__job__poll_grep_workflow_log -F 'Command actioned: pause'
+ cylc__job__poll_grep_workflow_log 'Command "pause" actioned'
# Poll t-submit-retry-able, should return submit-fail
cylc poll "${CYLC_WORKFLOW_ID}//*/t-submit-retry-able"
@@ -27,19 +27,19 @@
rm -f "${CYLC_WORKFLOW_RUN_DIR}/file"
cylc__job__poll_grep_workflow_log -E \
- '1/t-retry-able running .* => waiting'
+ '1/t-retry-able/01:running.* => waiting'
cylc__job__poll_grep_workflow_log -E \
- '1/t-submit-retry-able submitted .* => waiting'
+ '1/t-submit-retry-able/01:submitted.* => waiting'
# Resume the workflow
cylc play "${CYLC_WORKFLOW_ID}"
cylc__job__poll_grep_workflow_log -E \
- '1/t-retry-able waiting .* => waiting\(queued\)'
+ '1/t-retry-able:waiting.* => waiting\(queued\)'
cylc__job__poll_grep_workflow_log -E \
- '1/t-submit-retry-able waiting .* => waiting\(queued\)'
+ '1/t-submit-retry-able:waiting.* => waiting\(queued\)'
"""
[[t-retry-able]]
script = """
diff --git a/tests/functional/reload/11-retrying/flow.cylc b/tests/functional/reload/11-retrying/flow.cylc
index d5b278b2798..ed4694a5294 100644
--- a/tests/functional/reload/11-retrying/flow.cylc
+++ b/tests/functional/reload/11-retrying/flow.cylc
@@ -22,7 +22,7 @@
execution retry delays = PT0S
[[reloader]]
script = """
- cylc__job__poll_grep_workflow_log -E '1/retrier running\(held\) .* => waiting\(held\)'
+ cylc__job__poll_grep_workflow_log -E '1/retrier/01:running\(held\).* => waiting\(held\)'
cylc reload "${CYLC_WORKFLOW_ID}"
cylc reload "${CYLC_WORKFLOW_ID}"
cylc__job__poll_grep_workflow_log -F 'Reload completed'
diff --git a/tests/functional/reload/14-waiting/flow.cylc b/tests/functional/reload/14-waiting/flow.cylc
index f81ac3533b0..01f383862e6 100644
--- a/tests/functional/reload/14-waiting/flow.cylc
+++ b/tests/functional/reload/14-waiting/flow.cylc
@@ -3,7 +3,7 @@
[scheduling]
[[graph]]
-# SoD: starter:start addeded to create a partially satisfied waiting task
+# SoD: starter:start added to create a partially satisfied waiting task
# to be present during the reload.
R1 = """
starter:start => waiter
@@ -23,8 +23,9 @@ done
script = true
[[reloader]]
script = """
+cylc__job__wait_cylc_message_started
cylc reload "${CYLC_WORKFLOW_ID}"
-cylc__job__poll_grep_workflow_log -E '1/waiter .* reloaded task definition'
+cylc__job__poll_grep_workflow_log -E '1/waiter.* reloaded task definition'
rm -f "${CYLC_WORKFLOW_WORK_DIR}/1/sleeping-waiter/file"
rm -f "${CYLC_WORKFLOW_WORK_DIR}/1/starter/file"
"""
diff --git a/tests/functional/reload/17-graphing-change.t b/tests/functional/reload/17-graphing-change.t
index 26b7b247464..9df561384ff 100755
--- a/tests/functional/reload/17-graphing-change.t
+++ b/tests/functional/reload/17-graphing-change.t
@@ -66,8 +66,8 @@ cp "${TEST_SOURCE_DIR}/graphing-change/flow-2.cylc" \
"${RUN_DIR}/${WORKFLOW_NAME}/flow.cylc"
# Spawn a couple of task proxies, to get "task definition removed" message.
-cylc set-outputs --flow=1 "${WORKFLOW_NAME}//1/foo"
-cylc set-outputs --flow=1 "${WORKFLOW_NAME}//1/baz"
+cylc set "${WORKFLOW_NAME}//1/foo"
+cylc set "${WORKFLOW_NAME}//1/baz"
# reload workflow
run_ok "${TEST_NAME_BASE}-swap-reload" cylc reload "${WORKFLOW_NAME}"
poll grep_workflow_log_n_times 'Reload completed' 3
diff --git a/tests/functional/reload/19-remote-kill/flow.cylc b/tests/functional/reload/19-remote-kill/flow.cylc
index 2bd548b15f7..4a9e965a73c 100644
--- a/tests/functional/reload/19-remote-kill/flow.cylc
+++ b/tests/functional/reload/19-remote-kill/flow.cylc
@@ -17,7 +17,7 @@
cylc reload "${CYLC_WORKFLOW_ID}"
cylc__job__poll_grep_workflow_log -F 'Reload completed'
cylc kill "${CYLC_WORKFLOW_ID}//1/foo"
- cylc__job__poll_grep_workflow_log -E '1/foo failed\(held\) job:01.* job killed'
+ cylc__job__poll_grep_workflow_log -E '1/foo/01:failed\(held\).* job killed'
"""
[[[job]]]
execution time limit = PT1M
diff --git a/tests/functional/reload/23-cycle-point-time-zone.t b/tests/functional/reload/23-cycle-point-time-zone.t
index 42edfaf1402..d9bf2166560 100644
--- a/tests/functional/reload/23-cycle-point-time-zone.t
+++ b/tests/functional/reload/23-cycle-point-time-zone.t
@@ -39,13 +39,13 @@ run_ok "${TEST_NAME_BASE}-validate" cylc validate "${WORKFLOW_NAME}"
export TZ=BST-1
workflow_run_ok "${TEST_NAME_BASE}-run" cylc play "${WORKFLOW_NAME}" --pause
-poll_workflow_running
+poll_grep_workflow_log "Paused on start up"
# Simulate DST change
export TZ=UTC
run_ok "${TEST_NAME_BASE}-reload" cylc reload "${WORKFLOW_NAME}"
-poll_workflow_running
+poll_grep_workflow_log "Reload completed"
cylc stop --now --now "${WORKFLOW_NAME}"
diff --git a/tests/functional/reload/25-xtriggers.t b/tests/functional/reload/25-xtriggers.t
index 8fd1505fe6d..0269a2e3775 100644
--- a/tests/functional/reload/25-xtriggers.t
+++ b/tests/functional/reload/25-xtriggers.t
@@ -42,8 +42,7 @@ init_workflow "${TEST_NAME_BASE}" <<'__FLOW_CONFIG__'
[[reload]]
script = """
# wait for "broken" to fail
- cylc__job__poll_grep_workflow_log \
- '1/broken .* (received)failed/ERR'
+ cylc__job__poll_grep_workflow_log -E '1/broken/01.* \(received\)failed/ERR'
# fix "broken" to allow it to pass
sed -i 's/false/true/' "${CYLC_WORKFLOW_RUN_DIR}/flow.cylc"
# reload the workflow
@@ -60,12 +59,20 @@ workflow_run_ok "${TEST_NAME_BASE}-run" cylc play "${WORKFLOW_NAME}" --no-detach
# 3. the retry xtrigger for "1/broken" becomes satisfied (after the reload)
# (thus proving that the xtrigger survived the reload)
# 4. "1/broken" succeeds
+
+log_scan "${TEST_NAME_BASE}-scan" \
+ "$(cylc cat-log -m p "${WORKFLOW_NAME}")" \
+ 1 1 \
+ '1/broken.* (received)failed/ERR'
+
+log_scan "${TEST_NAME_BASE}-scan" \
+ "$(cylc cat-log -m p "${WORKFLOW_NAME}")" 1 1 \
+ 'Command "reload_workflow" actioned' \
+
log_scan "${TEST_NAME_BASE}-scan" \
"$(cylc cat-log -m p "${WORKFLOW_NAME}")" \
1 1 \
- '1/broken .* (received)failed/ERR' \
- 'Command actioned: reload_workflow()' \
'xtrigger satisfied: _cylc_retry_1/broken' \
- '\[1/broken .* => succeeded'
+ '1/broken.* => succeeded'
purge
diff --git a/tests/functional/reload/runahead/flow.cylc b/tests/functional/reload/runahead/flow.cylc
index 60d11e6477b..c65b5e11d6d 100644
--- a/tests/functional/reload/runahead/flow.cylc
+++ b/tests/functional/reload/runahead/flow.cylc
@@ -20,7 +20,7 @@
script = true
[[reloader]]
script = """
- cylc__job__poll_grep_workflow_log -E "${CYLC_TASK_CYCLE_POINT}/foo running .*\(received\)failed"
+ cylc__job__poll_grep_workflow_log -E "${CYLC_TASK_CYCLE_POINT}/foo/01:running.*\(received\)failed"
perl -pi -e 's/(runahead limit = )P1( # marker)/\1 P3\2/' $CYLC_WORKFLOW_RUN_DIR/flow.cylc
cylc reload $CYLC_WORKFLOW_ID
"""
diff --git a/tests/functional/remote/06-poll.t b/tests/functional/remote/06-poll.t
index 4eaaa505251..905516e8499 100644
--- a/tests/functional/remote/06-poll.t
+++ b/tests/functional/remote/06-poll.t
@@ -52,8 +52,8 @@ log_scan \
"$(cylc cat-log -m p "$WORKFLOW_NAME")" \
10 \
1 \
- '\[1/foo submitted .* (polled)foo' \
- '\[1/foo .* (polled)succeeded'
+ '\[1/foo.* (polled)foo' \
+ '\[1/foo.* (polled)succeeded'
purge
exit
diff --git a/tests/functional/remote/09-restart-running-file-install.t b/tests/functional/remote/09-restart-running-file-install.t
index deb9cf72b4d..c3249b2f5db 100644
--- a/tests/functional/remote/09-restart-running-file-install.t
+++ b/tests/functional/remote/09-restart-running-file-install.t
@@ -68,7 +68,7 @@ workflow_run_ok "${TEST_NAME_BASE}-restart" \
cylc play --debug --no-detach "${WORKFLOW_NAME}"
LOG="${WORKFLOW_RUN_DIR}/log/scheduler/log"
grep_ok "remote file install complete" "${LOG}"
-grep_ok "\[1/starter running job:01 flows:1\] (received)succeeded" "${LOG}"
+grep_ok "\[1/starter/01:running\] (received)succeeded" "${LOG}"
ls "${WORKFLOW_RUN_DIR}/log/remote-install" > 'ls.out'
cmp_ok ls.out <<__RLOGS__
01-start-${CYLC_TEST_INSTALL_TARGET}.log
diff --git a/tests/functional/restart/22-hold/flow.cylc b/tests/functional/restart/22-hold/flow.cylc
index 4f2b44bdde8..213c8a00acd 100644
--- a/tests/functional/restart/22-hold/flow.cylc
+++ b/tests/functional/restart/22-hold/flow.cylc
@@ -17,7 +17,7 @@
[[t1]]
script = """
if [[ "${CYLC_TASK_CYCLE_POINT}" == '2016' ]]; then
- cylc__job__poll_grep_workflow_log -E '2016/t2 .* spawned'
+ cylc__job__poll_grep_workflow_log -E '2016/t2.* added to active task pool'
cylc hold "${CYLC_WORKFLOW_ID}//" //2016/t2 //2017/t2
cylc stop "${CYLC_WORKFLOW_ID}"
else
diff --git a/tests/functional/restart/50-two-flows/flow.cylc b/tests/functional/restart/50-two-flows/flow.cylc
index 8837e8835f4..bd9de46c8b4 100644
--- a/tests/functional/restart/50-two-flows/flow.cylc
+++ b/tests/functional/restart/50-two-flows/flow.cylc
@@ -14,7 +14,7 @@
[[a]]
script = """
if ((CYLC_TASK_FLOW_NUMBERS == 2)); then
- cylc__job__poll_grep_workflow_log "\[1/c .* succeeded"
+ cylc__job__poll_grep_workflow_log -E "/c.* succeeded"
fi
"""
[[b, d]]
@@ -23,7 +23,7 @@
if ((CYLC_TASK_FLOW_NUMBERS == 1)); then
cylc trigger --flow=new --meta="cheese wizard" \
"$CYLC_WORKFLOW_ID//1/a"
- cylc__job__poll_grep_workflow_log "\[1/a submitted job:02 flows:2\] => running"
+ cylc__job__poll_grep_workflow_log -E "\[1/a/02\(flows=2\):submitted\] => running"
cylc stop $CYLC_WORKFLOW_ID
fi
"""
diff --git a/tests/functional/restart/58-removed-task.t b/tests/functional/restart/58-removed-task.t
index 17dc19f626e..1c3b79efe05 100755
--- a/tests/functional/restart/58-removed-task.t
+++ b/tests/functional/restart/58-removed-task.t
@@ -39,10 +39,10 @@ workflow_run_ok "${TEST_NAME}" cylc play --no-detach "${WORKFLOW_NAME}"
TEST_NAME="${TEST_NAME_BASE}-restart"
workflow_run_ok "${TEST_NAME}" cylc play --set="INCL_B_C=False" --no-detach "${WORKFLOW_NAME}"
-grep_workflow_log_ok "grep-3" "\[1/a running job:01 flows:1\] (polled)started"
-grep_workflow_log_ok "grep-4" "\[1/b failed job:01 flows:1\] (polled)failed"
+grep_workflow_log_ok "grep-3" "\[1/a/01:running\] (polled)started"
+grep_workflow_log_ok "grep-4" "\[1/b/01:failed\] (polled)failed"
# Failed (but not incomplete) task c should not have been polled.
-grep_fail "\[1/c failed job:01 flows:1\] (polled)failed" "${WORKFLOW_RUN_DIR}/log/scheduler/log"
+grep_fail "\[1/c/01:failed\] (polled)failed" "${WORKFLOW_RUN_DIR}/log/scheduler/log"
purge
diff --git a/tests/functional/restart/58-removed-task/flow.cylc b/tests/functional/restart/58-removed-task/flow.cylc
index 94c5cf27b24..0584d4b54fc 100644
--- a/tests/functional/restart/58-removed-task/flow.cylc
+++ b/tests/functional/restart/58-removed-task/flow.cylc
@@ -22,11 +22,11 @@
[runtime]
[[a]]
script = """
- cylc__job__poll_grep_workflow_log "1/b .*failed"
- cylc__job__poll_grep_workflow_log "1/c .*failed"
+ cylc__job__poll_grep_workflow_log "1/b.*failed"
+ cylc__job__poll_grep_workflow_log "1/c.*failed"
cylc stop --now $CYLC_WORKFLOW_ID
- cylc__job__poll_grep_workflow_log "1/a .*(polled)started"
- cylc__job__poll_grep_workflow_log "1/b .*(polled)failed"
+ cylc__job__poll_grep_workflow_log "1/a.*(polled)started"
+ cylc__job__poll_grep_workflow_log "1/b.*(polled)failed"
"""
[[b, c]]
script = "false"
diff --git a/tests/functional/restart/58-waiting-manual-triggered.t b/tests/functional/restart/58-waiting-manual-triggered.t
index efba9f42b70..455cb289592 100644
--- a/tests/functional/restart/58-waiting-manual-triggered.t
+++ b/tests/functional/restart/58-waiting-manual-triggered.t
@@ -41,7 +41,7 @@ __EOF__
# It should restart and shut down normally, not stall with 2/foo waiting on 1/foo.
workflow_run_ok "${TEST_NAME_BASE}-restart" cylc play --no-detach "${WORKFLOW_NAME}"
# Check that 2/foo job 02 did run before shutdown.
-grep_workflow_log_ok "${TEST_NAME_BASE}-grep" "\[2\/foo running job:02 flows:1\] => succeeded"
+grep_workflow_log_ok "${TEST_NAME_BASE}-grep" "\[2\/foo\/02:running\] => succeeded"
purge
exit
diff --git a/tests/functional/runahead/06-release-update.t b/tests/functional/runahead/06-release-update.t
index c4ab28530e3..45fb680c69f 100644
--- a/tests/functional/runahead/06-release-update.t
+++ b/tests/functional/runahead/06-release-update.t
@@ -27,14 +27,23 @@ CYLC_RUN_PID="$!"
poll_workflow_running
YYYY="$(date +%Y)"
NEXT1=$(( YYYY + 1 ))
-poll_grep_workflow_log -E "${NEXT1}/bar .* spawned"
+poll_grep_workflow_log -E "${NEXT1}/bar.* added to active task pool"
# sleep a little to allow the datastore to update (`cylc dump` sees the
# datastore) TODO can we avoid this flaky sleep somehow?
sleep 10
+
# (gratuitous use of --flows for test coverage)
cylc dump --flows -t "${WORKFLOW_NAME}" | awk '{print $1 $2 $3 $7}' >'log'
+
+# The scheduler task pool should contain:
+# NEXT1/foo - waiting on clock trigger
+# NEXT1/bar - waiting, partially satisfied
+# The n=1 data store should also contain:
+# YYYY/bar - succeeded
+
cmp_ok 'log' - <<__END__
+bar,$NEXT1,waiting,[1]
foo,$NEXT1,waiting,[1]
__END__
diff --git a/tests/functional/runahead/default-future/flow.cylc b/tests/functional/runahead/default-future/flow.cylc
index ee083a1dc91..c78522d7a8c 100644
--- a/tests/functional/runahead/default-future/flow.cylc
+++ b/tests/functional/runahead/default-future/flow.cylc
@@ -27,7 +27,7 @@
[[spawner]]
script = """
# spawn wibble
- cylc set-outputs --flow=1 $CYLC_WORKFLOW_ID 20100101T0800Z/foo
+ cylc set $CYLC_WORKFLOW_ID 20100101T0800Z/foo
"""
[[foo]]
script = false
diff --git a/tests/functional/spawn-on-demand/05-stop-flow/flow.cylc b/tests/functional/spawn-on-demand/05-stop-flow/flow.cylc
index cac2ddf8009..2c2d1996009 100644
--- a/tests/functional/spawn-on-demand/05-stop-flow/flow.cylc
+++ b/tests/functional/spawn-on-demand/05-stop-flow/flow.cylc
@@ -10,5 +10,5 @@
[[bar]]
script = """
cylc stop --flow=1 ${CYLC_WORKFLOW_ID}
- cylc__job__poll_grep_workflow_log 'Command actioned: stop'
+ cylc__job__poll_grep_workflow_log 'Command "stop" actioned'
"""
diff --git a/tests/functional/spawn-on-demand/06-stop-flow-2/flow.cylc b/tests/functional/spawn-on-demand/06-stop-flow-2/flow.cylc
index 9c7da97e974..3005fd3a277 100644
--- a/tests/functional/spawn-on-demand/06-stop-flow-2/flow.cylc
+++ b/tests/functional/spawn-on-demand/06-stop-flow-2/flow.cylc
@@ -14,13 +14,13 @@
script = """
if (( CYLC_TASK_SUBMIT_NUMBER == 2 )); then
cylc stop --flow=1 ${CYLC_WORKFLOW_ID}
- cylc__job__poll_grep_workflow_log "Command actioned: stop"
+ cylc__job__poll_grep_workflow_log 'Command "stop" actioned'
fi
"""
[[baz]]
script = """
if (( CYLC_TASK_SUBMIT_NUMBER == 1 )); then
cylc trigger --flow=new --meta=other "${CYLC_WORKFLOW_ID}//1/foo"
- cylc__job__poll_grep_workflow_log -E "1/bar running job:02.* => succeeded"
+ cylc__job__poll_grep_workflow_log -E "1/bar/02\(flows=2\):running.* => succeeded"
fi
"""
diff --git a/tests/functional/spawn-on-demand/07-abs-triggers/flow.cylc b/tests/functional/spawn-on-demand/07-abs-triggers/flow.cylc
index 8084e5c0abe..3f621ccc1e4 100644
--- a/tests/functional/spawn-on-demand/07-abs-triggers/flow.cylc
+++ b/tests/functional/spawn-on-demand/07-abs-triggers/flow.cylc
@@ -16,7 +16,7 @@
script = """
# Ensure that 1,2/bar are spawned by 1,2/foo and not by 2/start
# (so the scheduler must update their prereqs when 2/start finishes).
- cylc__job__poll_grep_workflow_log -E "2/bar .* spawned"
+ cylc__job__poll_grep_workflow_log -E "2/bar.* added to active task pool"
"""
[[foo]]
[[bar]]
diff --git a/tests/functional/spawn-on-demand/09-set-outputs.t b/tests/functional/spawn-on-demand/09-set-outputs.t
index 41c6b5b6b87..57d34e15024 100644
--- a/tests/functional/spawn-on-demand/09-set-outputs.t
+++ b/tests/functional/spawn-on-demand/09-set-outputs.t
@@ -16,7 +16,7 @@
# along with this program. If not, see .
#-------------------------------------------------------------------------------
-# Check that "cylc set-outputs" works like it says on the tin.
+# Check that "cylc set" works like it says on the tin.
. "$(dirname "$0")/test_header"
set_test_number 2
reftest
diff --git a/tests/functional/spawn-on-demand/09-set-outputs/flow.cylc b/tests/functional/spawn-on-demand/09-set-outputs/flow.cylc
index 1d1d7e7e061..a40e6e9be33 100644
--- a/tests/functional/spawn-on-demand/09-set-outputs/flow.cylc
+++ b/tests/functional/spawn-on-demand/09-set-outputs/flow.cylc
@@ -1,8 +1,6 @@
-# Test that `cylc set-outputs` has the same effect as natural output
+# Test that `cylc set` has the same effect as natural output
# completion: i.e. that downstream children are spawned as normal.
-# DEBUG mode required: we search for "task proxy removed" in the log.
-
[scheduler]
[[events]]
abort on stall timeout = True
@@ -35,7 +33,7 @@
[[foo]]
# Hang about until setter is finished.
script = """
- cylc__job__poll_grep_workflow_log -E "1/setter .* => succeeded"
+ cylc__job__poll_grep_workflow_log -E "1/setter.* => succeeded"
"""
[[bar]]
script = true
@@ -43,11 +41,11 @@
# (To the rescue).
script = """
# Set foo outputs while it still exists in the pool.
- cylc set-outputs --flow=2 --output=out1 --output=out2 "${CYLC_WORKFLOW_ID}//1/foo"
+ cylc set --flow=2 --output=out1 --output=out2 "${CYLC_WORKFLOW_ID}//1/foo"
# Set bar outputs after it is gone from the pool.
- cylc__job__poll_grep_workflow_log -E "1/bar .*task proxy removed"
- cylc set-outputs --flow=2 --output=out1 --output=out2 "${CYLC_WORKFLOW_ID}//1/bar"
+ cylc__job__poll_grep_workflow_log -E "1/bar.* task completed"
+ cylc set --flow=2 --output=out1 --output=out2 "${CYLC_WORKFLOW_ID}//1/bar"
"""
[[qux, quw, fux, fuw]]
script = true
diff --git a/tests/functional/spawn-on-demand/10-retrigger/flow.cylc b/tests/functional/spawn-on-demand/10-retrigger/flow.cylc
index 2bdd4365a07..7e9149ce3c9 100644
--- a/tests/functional/spawn-on-demand/10-retrigger/flow.cylc
+++ b/tests/functional/spawn-on-demand/10-retrigger/flow.cylc
@@ -18,7 +18,7 @@
"""
[[triggerer]]
script = """
- cylc__job__poll_grep_workflow_log -E '1/oops running .* \(received\)failed'
+ cylc__job__poll_grep_workflow_log -E '1/oops/01:running.* \(received\)failed'
cylc trigger "${CYLC_WORKFLOW_ID}//1/oops"
"""
[[foo, bar]]
diff --git a/tests/functional/spawn-on-demand/11-hold-not-spawned/flow.cylc b/tests/functional/spawn-on-demand/11-hold-not-spawned/flow.cylc
index 890d73f78be..c47ca3c93c4 100644
--- a/tests/functional/spawn-on-demand/11-hold-not-spawned/flow.cylc
+++ b/tests/functional/spawn-on-demand/11-hold-not-spawned/flow.cylc
@@ -15,6 +15,6 @@
script = true
[[stopper]]
script = """
- cylc__job__poll_grep_workflow_log "\[1/holdee .* holding \(as requested earlier\)" -E
+ cylc__job__poll_grep_workflow_log "\[1/holdee.* holding \(as requested earlier\)" -E
cylc stop $CYLC_WORKFLOW_ID
"""
diff --git a/tests/functional/spawn-on-demand/12-set-outputs-no-reflow.t b/tests/functional/spawn-on-demand/12-set-outputs-cont-flow.t
similarity index 93%
rename from tests/functional/spawn-on-demand/12-set-outputs-no-reflow.t
rename to tests/functional/spawn-on-demand/12-set-outputs-cont-flow.t
index 41c6b5b6b87..36c8bd100b3 100644
--- a/tests/functional/spawn-on-demand/12-set-outputs-no-reflow.t
+++ b/tests/functional/spawn-on-demand/12-set-outputs-cont-flow.t
@@ -16,7 +16,7 @@
# along with this program. If not, see .
#-------------------------------------------------------------------------------
-# Check that "cylc set-outputs" works like it says on the tin.
+# Check that "cylc set" continues a flow by default.
. "$(dirname "$0")/test_header"
set_test_number 2
reftest
diff --git a/tests/functional/spawn-on-demand/12-set-outputs-no-reflow/flow.cylc b/tests/functional/spawn-on-demand/12-set-outputs-cont-flow/flow.cylc
similarity index 70%
rename from tests/functional/spawn-on-demand/12-set-outputs-no-reflow/flow.cylc
rename to tests/functional/spawn-on-demand/12-set-outputs-cont-flow/flow.cylc
index 315007c52fa..353d6d1f41f 100644
--- a/tests/functional/spawn-on-demand/12-set-outputs-no-reflow/flow.cylc
+++ b/tests/functional/spawn-on-demand/12-set-outputs-cont-flow/flow.cylc
@@ -1,5 +1,5 @@
-# Test that `cylc set-outputs` does not cause reflow by default
-# Task setter should cause bar to run, but not subsequently baz.
+# Test that `cylc set` continues the active flow by default
+# Task "setter" should cause bar to run, then subsequently baz.
[scheduler]
[[events]]
@@ -21,5 +21,5 @@
script = true
[[setter]]
script = """
- cylc set-outputs "${CYLC_WORKFLOW_ID}//1/foo"
+ cylc set --output=succeeded "${CYLC_WORKFLOW_ID}//1/foo"
"""
diff --git a/tests/functional/spawn-on-demand/12-set-outputs-no-reflow/reference.log b/tests/functional/spawn-on-demand/12-set-outputs-cont-flow/reference.log
similarity index 79%
rename from tests/functional/spawn-on-demand/12-set-outputs-no-reflow/reference.log
rename to tests/functional/spawn-on-demand/12-set-outputs-cont-flow/reference.log
index 2322cc234da..3c7b498cc8b 100644
--- a/tests/functional/spawn-on-demand/12-set-outputs-no-reflow/reference.log
+++ b/tests/functional/spawn-on-demand/12-set-outputs-cont-flow/reference.log
@@ -3,3 +3,4 @@ Final point: 1
1/foo -triggered off []
1/setter -triggered off ['1/foo']
1/bar -triggered off ['1/foo']
+1/baz -triggered off ['1/bar']
diff --git a/tests/functional/spawn-on-demand/14-trigger-flow-blocker/flow.cylc b/tests/functional/spawn-on-demand/14-trigger-flow-blocker/flow.cylc
index 0849b69bb75..9c205e301c5 100644
--- a/tests/functional/spawn-on-demand/14-trigger-flow-blocker/flow.cylc
+++ b/tests/functional/spawn-on-demand/14-trigger-flow-blocker/flow.cylc
@@ -23,7 +23,7 @@
cylc trigger --flow=none $CYLC_WORKFLOW_ID//3/foo
elif ((CYLC_TASK_CYCLE_POINT == 3)); then
# Run until I get merged.
- cylc__job__poll_grep_workflow_log -E "3/foo .* merged in flow\(s\) 1"
+ cylc__job__poll_grep_workflow_log -E "3/foo.* merged in flow\(s\) 1"
fi
"""
[[bar]]
diff --git a/tests/functional/spawn-on-demand/18-submitted.t b/tests/functional/spawn-on-demand/18-submitted.t
index de5041f4ca1..30f022ebafd 100644
--- a/tests/functional/spawn-on-demand/18-submitted.t
+++ b/tests/functional/spawn-on-demand/18-submitted.t
@@ -40,7 +40,7 @@ reftest_run
for number in 1 2 3; do
grep_workflow_log_ok \
"${TEST_NAME_BASE}-a${number}" \
- "${number}/a${number} .* did not complete required outputs: \['submitted'\]"
+ "${number}/a${number}.* did not complete required outputs: \['submitted'\]"
done
purge
diff --git a/tests/functional/spawn-on-demand/19-submitted-compat.t b/tests/functional/spawn-on-demand/19-submitted-compat.t
index d529dfb4183..98c603d55a7 100644
--- a/tests/functional/spawn-on-demand/19-submitted-compat.t
+++ b/tests/functional/spawn-on-demand/19-submitted-compat.t
@@ -51,7 +51,7 @@ grep_workflow_log_ok \
'Backward compatibility mode ON'
grep_workflow_log_ok \
"${TEST_NAME_BASE}-a-complete" \
- '\[1/a running job:01 flows:1\] => succeeded'
+ '\[1/a/01:running\] => succeeded'
grep_workflow_log_ok \
"${TEST_NAME_BASE}-b-incomplete" \
"1/b did not complete required outputs: \['submitted', 'succeeded'\]"
diff --git a/tests/functional/special/08-clock-trigger-retry.t b/tests/functional/special/08-clock-trigger-retry.t
index d4f591d4870..fa3a65ee150 100644
--- a/tests/functional/special/08-clock-trigger-retry.t
+++ b/tests/functional/special/08-clock-trigger-retry.t
@@ -42,7 +42,7 @@ workflow_run_ok "${TEST_NAME_BASE}-run" cylc play --no-detach "$WORKFLOW_NAME"
log_scan "${TEST_NAME_BASE}-log-scan" \
"${WORKFLOW_RUN_DIR}/log/scheduler/log" 2 1 \
- "\[20150101.*/foo .* job:01 .* retrying in PT5S" \
+ "\[20150101.*/foo.* retrying in PT5S" \
"xtrigger satisfied: _cylc_retry_20150101"
# (if task resubmits immediately instead of waiting PT5S, xtrigger msg will not appear)
diff --git a/tests/functional/startup/00-state-summary.t b/tests/functional/startup/00-state-summary.t
index a4a02208899..76d16bbd25d 100644
--- a/tests/functional/startup/00-state-summary.t
+++ b/tests/functional/startup/00-state-summary.t
@@ -30,7 +30,7 @@ run_ok "${TEST_NAME}" cylc validate "${WORKFLOW_NAME}"
cylc play --no-detach "${WORKFLOW_NAME}" > /dev/null 2>&1
# Restart with a failed task and a succeeded task.
cylc play "${WORKFLOW_NAME}"
-poll_grep_workflow_log -E '1/foo .* \(polled\)failed'
+poll_grep_workflow_log -E '1/foo.* \(polled\)failed'
cylc dump "${WORKFLOW_NAME}" > dump.out
TEST_NAME=${TEST_NAME_BASE}-grep
# State summary should not just say "Initializing..."
diff --git a/tests/functional/triggering/19-and-suicide/flow.cylc b/tests/functional/triggering/19-and-suicide/flow.cylc
index cf7ae49d129..670c361fc96 100644
--- a/tests/functional/triggering/19-and-suicide/flow.cylc
+++ b/tests/functional/triggering/19-and-suicide/flow.cylc
@@ -16,7 +16,7 @@
[[t0]]
# https://github.com/cylc/cylc-flow/issues/2655
# "1/t2" should not suicide on "1/t1:failed"
- script = cylc__job__poll_grep_workflow_log -E '1/t1 .* \(received\)failed'
+ script = cylc__job__poll_grep_workflow_log -E '1/t1.* \(received\)failed'
[[t1]]
script = false
[[t2]]
diff --git a/tests/functional/triggering/21-expire.t b/tests/functional/triggering/21-expire.t
new file mode 100644
index 00000000000..aaacdf807b0
--- /dev/null
+++ b/tests/functional/triggering/21-expire.t
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
+# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#-------------------------------------------------------------------------------
+# Test expire triggering
+. "$(dirname "$0")/test_header"
+set_test_number 2
+reftest
+exit
diff --git a/tests/functional/triggering/21-expire/flow.cylc b/tests/functional/triggering/21-expire/flow.cylc
new file mode 100644
index 00000000000..2a5336cb5e8
--- /dev/null
+++ b/tests/functional/triggering/21-expire/flow.cylc
@@ -0,0 +1,22 @@
+[scheduling]
+ initial cycle point = 1999
+ [[special tasks]]
+ clock-expire = foo1(PT0S), foo2(PT0S), bar1(PT0S), x(PT0S)
+ [[graph]]
+ # Expire: foo1, foo2, bar1, x
+ # Run: y, bar2, baz, qux
+ R1 = """
+ x?
+ FOO?
+ BAR?
+ x:expire? => y
+ FOO:expire-all? => baz
+ BAR:expire-any? => qux
+ """
+[runtime]
+ [[FOO, BAR]]
+ [[foo1, foo2]]
+ inherit = FOO
+ [[bar1, bar2]]
+ inherit = BAR
+ [[x, y, baz, qux]]
diff --git a/tests/functional/triggering/21-expire/reference.log b/tests/functional/triggering/21-expire/reference.log
new file mode 100644
index 00000000000..8ba5edca688
--- /dev/null
+++ b/tests/functional/triggering/21-expire/reference.log
@@ -0,0 +1,4 @@
+19990101T0000Z/bar2 -triggered off [] in flow 1
+19990101T0000Z/baz -triggered off ['19990101T0000Z/foo1', '19990101T0000Z/foo2'] in flow 1
+19990101T0000Z/qux -triggered off ['19990101T0000Z/bar1'] in flow 1
+19990101T0000Z/y -triggered off ['19990101T0000Z/x'] in flow 1
diff --git a/tests/functional/xtriggers/03-sequence.t b/tests/functional/xtriggers/03-sequence.t
index 63c360f66c5..a41b970b3f1 100644
--- a/tests/functional/xtriggers/03-sequence.t
+++ b/tests/functional/xtriggers/03-sequence.t
@@ -49,7 +49,7 @@ run_ok "${TEST_NAME_BASE}-val" cylc validate "${WORKFLOW_NAME}"
# Run workflow; it will stall waiting on the never-satisfied xtriggers.
cylc play "${WORKFLOW_NAME}"
-poll_grep_workflow_log -E '2025/start .* => succeeded'
+poll_grep_workflow_log -E '2025/start.* => succeeded'
cylc show "${WORKFLOW_NAME}//2026/foo" | grep -E '^ - xtrigger' > 2026.foo.log
diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py
index 8d44f0d42b6..d575f2a6af2 100644
--- a/tests/integration/conftest.py
+++ b/tests/integration/conftest.py
@@ -24,6 +24,7 @@
from typing import List, TYPE_CHECKING, Set, Tuple, Union
from cylc.flow.config import WorkflowConfig
+from cylc.flow.id import Tokens
from cylc.flow.option_parsers import Options
from cylc.flow.pathutil import get_cylc_run_dir
from cylc.flow.rundb import CylcWorkflowDAO
@@ -544,7 +545,9 @@ def complete():
The scheduler to await.
tokens_list:
If specified, this will wait for the tasks represented by these
- tokens to be marked as completed by the task pool.
+ tokens to be marked as completed by the task pool. Can use
+ relative task ids as strings (e.g. '1/a') rather than tokens for
+ convenience.
stop_mode:
If tokens_list is not provided, this will wait for the scheduler
to be shutdown with the specified mode (default = AUTO, i.e.
@@ -561,20 +564,26 @@ def complete():
"""
async def _complete(
schd,
- *tokens_list,
+ *tokens_list: Union[Tokens, str],
stop_mode=StopMode.AUTO,
- timeout=60,
- ):
+ timeout: int = 60,
+ ) -> None:
start_time = time()
- tokens_list = [tokens.task for tokens in tokens_list]
+
+ _tokens_list: List[Tokens] = []
+ for tokens in tokens_list:
+ if isinstance(tokens, str):
+ tokens = Tokens(tokens, relative=True)
+ _tokens_list.append(tokens.task)
# capture task completion
remove_if_complete = schd.pool.remove_if_complete
- def _remove_if_complete(itask):
+ def _remove_if_complete(itask, output=None):
+ nonlocal _tokens_list
ret = remove_if_complete(itask)
- if ret and itask.tokens.task in tokens_list:
- tokens_list.remove(itask.tokens.task)
+ if ret and itask.tokens.task in _tokens_list:
+ _tokens_list.remove(itask.tokens.task)
return ret
schd.pool.remove_if_complete = _remove_if_complete
@@ -595,8 +604,8 @@ def _set_stop(mode=None):
schd._set_stop = _set_stop
# determine the completion condition
- if tokens_list:
- condition = lambda: bool(tokens_list)
+ if _tokens_list:
+ condition = lambda: bool(_tokens_list)
else:
condition = lambda: bool(not has_shutdown)
@@ -604,9 +613,9 @@ def _set_stop(mode=None):
while condition():
# allow the main loop to advance
await asyncio.sleep(0)
- if time() - start_time > timeout:
+ if (time() - start_time) > timeout:
raise Exception(
- f'Timeout waiting for {", ".join(map(str, tokens_list))}'
+ f'Timeout waiting for {", ".join(map(str, _tokens_list))}'
)
# restore regular shutdown logic
diff --git a/tests/integration/scripts/test_completion_server.py b/tests/integration/scripts/test_completion_server.py
new file mode 100644
index 00000000000..0c792fac3da
--- /dev/null
+++ b/tests/integration/scripts/test_completion_server.py
@@ -0,0 +1,204 @@
+# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
+# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+"""Integration tests for the "cylc completion-server command.
+
+See also the more extensive unit tests for this module.
+"""
+
+from cylc.flow.scripts.completion_server import complete_cylc
+
+
+def setify(coro):
+ """Cast returned lists to sets for coroutines.
+
+ Convenience function to use when you want to test output not order.
+ """
+ async def _coro(*args, **kwargs):
+ nonlocal coro
+ ret = await coro(*args, **kwargs)
+ if isinstance(ret, list):
+ return set(ret)
+ return ret
+ return _coro
+
+
+async def test_list_prereqs_and_outputs(flow, scheduler, start):
+ """Test the success cases for listing task prereqs/outputs.
+
+ The error cases are tested in a unit test (doesn't require a running
+ scheduler).
+ """
+ _complete_cylc = setify(complete_cylc) # Note: results are un-ordered
+
+ id_ = flow({
+ 'scheduler': {
+ 'allow implicit tasks': 'True',
+ },
+ 'scheduling': {
+ 'initial cycle point': '1',
+ 'cycling mode': 'integer',
+ 'graph': {
+ 'P1': '''
+ a => b
+ c => d
+ b[-P1] => b
+ '''
+ },
+ },
+ 'runtime': {
+ 'a': {},
+ 'b': {
+ 'outputs': {
+ 'foo': 'abc def ghi',
+ }
+ }
+ }
+ })
+ schd = scheduler(id_)
+ async with start(schd):
+ await schd.update_data_structure()
+ b1 = schd.tokens.duplicate(cycle='1', task='b')
+ d1 = schd.tokens.duplicate(cycle='1', task='d')
+ e1 = schd.tokens.duplicate(cycle='1', task='e') # does not exist
+
+ # list prereqs (b1)
+ assert await _complete_cylc('cylc', 'set', b1.id, '--pre', '') == {
+ # keywords
+ 'all',
+ # intra-cycle dependency
+ '1/a:succeeded',
+ # inter-cycle dependency
+ '0/b:succeeded',
+ }
+
+ # list outputs (b1)
+ assert await _complete_cylc('cylc', 'set', b1.id, '--out', '') == {
+ # regular task outputs
+ 'expired',
+ 'failed',
+ 'started',
+ 'submit-failed',
+ 'submitted',
+ 'succeeded',
+ # custom task outputs
+ 'foo',
+ }
+
+ # list prereqs (d1)
+ assert await _complete_cylc('cylc', 'set', d1.id, '--pre', '') == {
+ # keywords
+ 'all',
+ # d1 prereqs
+ '1/c:succeeded',
+ }
+
+ # list prereqs for multiple (b1, d1)
+ assert await _complete_cylc(
+ 'cylc',
+ 'set',
+ b1.id,
+ d1.id,
+ '--pre',
+ '',
+ ) == {
+ # keywords
+ 'all',
+ # b1 prereqs
+ '1/a:succeeded',
+ '0/b:succeeded',
+ # d1 prereqs
+ '1/c:succeeded',
+ }
+
+ # list prereqs for multiple (b1, d1) - alternative format
+ assert await _complete_cylc(
+ 'cylc',
+ 'set',
+ f'{schd.id}//',
+ f'//{b1.relative_id}',
+ f'//{d1.relative_id}',
+ '--pre',
+ '',
+ ) == {
+ # keywords
+ 'all',
+ # b1 prereqs
+ '1/a:succeeded',
+ '0/b:succeeded',
+ # d1 prereqs
+ '1/c:succeeded',
+ }
+
+ # list outputs for a non-existant task
+ assert await _complete_cylc('cylc', 'set', e1.id, '--out', '') == set()
+
+ # list outputs for a non-existant workflow
+ assert await _complete_cylc(
+ 'cylc',
+ 'set',
+ # this invalid workflow shouldn't prevent it from returning values
+ # for the valid one
+ 'no-such-workflow//',
+ f'{schd.id}//',
+ f'//{b1.relative_id}',
+ f'//{d1.relative_id}',
+ '--pre',
+ '',
+ ) == {
+ # keywords
+ 'all',
+ # b1 prereqs
+ '1/a:succeeded',
+ '0/b:succeeded',
+ # d1 prereqs
+ '1/c:succeeded',
+ }
+
+ # start a second workflow to test multi-workflow functionality
+ id2 = flow({
+ 'scheduling': {
+ 'graph': {
+ 'R1': '''
+ x => z
+ '''
+ }
+ },
+ 'runtime': {'x': {}, 'z': {}},
+ })
+ schd2 = scheduler(id2)
+ async with start(schd2):
+ await schd2.update_data_structure()
+ z1 = schd2.tokens.duplicate(cycle='1', task='z')
+
+ # list prereqs for multiple tasks in multiple workflows
+ # (it should combine the results from both workflows)
+ assert await _complete_cylc(
+ 'cylc',
+ 'set',
+ b1.id,
+ z1.id,
+ '--pre',
+ '',
+ ) == {
+ # keywords
+ 'all',
+ # workflow1//1/b prereqs
+ '0/b:succeeded',
+ '1/a:succeeded',
+ # workflow2//1/z prereqs
+ '1/x:succeeded'
+ }
diff --git a/tests/integration/scripts/test_set.py b/tests/integration/scripts/test_set.py
new file mode 100644
index 00000000000..22cd44bbbce
--- /dev/null
+++ b/tests/integration/scripts/test_set.py
@@ -0,0 +1,164 @@
+# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
+# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+"""Test "cylc set" functionality.
+
+Note: see also functional tests
+"""
+
+from cylc.flow.cycling.integer import IntegerPoint
+from cylc.flow.data_store_mgr import TASK_PROXIES
+from cylc.flow.task_state import TASK_STATUS_WAITING, TASK_STATUS_SUCCEEDED
+
+
+async def test_set_parentless_spawning(
+ flow,
+ scheduler,
+ run,
+ complete,
+):
+ """Ensure that setting outputs does not interfere with parentless spawning.
+
+ Setting outputs manually causes the logic to follow a different code
+ pathway to "natural" output satisfaction. If we're not careful this could
+ lead to "premature shutdown" (i.e. the scheduler thinks it's finished when
+ it isn't), this test makes sure that's not the case.
+ """
+ id_ = flow({
+ 'scheduling': {
+ 'initial cycle point': '1',
+ 'cycling mode': 'integer',
+ 'runahead limit': 'P0',
+ 'graph': {'P1': 'a => z'},
+ },
+ })
+ schd = scheduler(id_, paused_start=False)
+ async with run(schd):
+ # mark cycle 1 as succeeded
+ schd.pool.set_prereqs_and_outputs(['1/a', '1/z'], ['succeeded'], None, ['1'])
+
+ # the parentless task "a" should be spawned out to the runahead limit
+ assert [
+ itask.identity for itask in schd.pool.get_tasks()
+ ] == ['2/a', '3/a']
+
+ # the workflow should run on to the next cycle
+ await complete(schd, '2/a', timeout=5)
+
+
+async def test_rerun_incomplete(
+ flow,
+ scheduler,
+ run,
+ complete,
+ reflog,
+):
+ """Incomplete tasks should be re-run."""
+ id_ = flow({
+ 'scheduling': {
+ 'graph': {'R1': 'a => z'},
+ },
+ 'runtime': {
+ # register a custom output
+ 'a': {'outputs': {'x': 'xyz'}},
+ },
+ })
+ schd = scheduler(id_, paused_start=False)
+ async with run(schd):
+ # generate 1/a:x but do not complete 1/a
+ schd.pool.set_prereqs_and_outputs(['1/a'], ['x'], None, ['1'])
+ triggers = reflog(schd)
+ await complete(schd)
+
+ assert triggers == {
+ # the task 1/a should have been run despite the earlier
+ # setting of the "x" output
+ ('1/a', None),
+ ('1/z', ('1/a',)),
+ }
+
+
+async def test_data_store(
+ flow,
+ scheduler,
+ start,
+):
+ """Test that manually set prereqs/outputs are applied to the data store."""
+ id_ = flow({
+ 'scheduling': {
+ 'graph': {'R1': 'a => z'},
+ },
+ 'runtime': {
+ # register a custom output
+ 'a': {'outputs': {'x': 'xyz'}},
+ },
+ })
+ schd = scheduler(id_)
+ async with start(schd):
+ await schd.update_data_structure()
+ data = schd.data_store_mgr.data[schd.tokens.id]
+ task_a = data[TASK_PROXIES][
+ schd.pool.get_task(IntegerPoint('1'), 'a').tokens.id
+ ]
+
+ # set the 1/a:succeeded prereq of 1/z
+ schd.pool.set_prereqs_and_outputs(
+ ['1/z'], None, ['1/a:succeeded'], ['1'])
+ task_z = data[TASK_PROXIES][
+ schd.pool.get_task(IntegerPoint('1'), 'z').tokens.id
+ ]
+ await schd.update_data_structure()
+ assert task_z.prerequisites[0].satisfied is True
+
+ # set 1/a:x the task should be waiting with output x satisfied
+ schd.pool.set_prereqs_and_outputs(['1/a'], ['x'], None, ['1'])
+ await schd.update_data_structure()
+ assert task_a.state == TASK_STATUS_WAITING
+ assert task_a.outputs['x'].satisfied is True
+ assert task_a.outputs['succeeded'].satisfied is False
+
+ # set 1/a:succeeded the task should be succeeded with output x sat
+ schd.pool.set_prereqs_and_outputs(['1/a'], ['succeeded'], None, ['1'])
+ await schd.update_data_structure()
+ assert task_a.state == TASK_STATUS_SUCCEEDED
+ assert task_a.outputs['x'].satisfied is True
+ assert task_a.outputs['succeeded'].satisfied is True
+
+
+async def test_incomplete_detection(
+ one_conf,
+ flow,
+ scheduler,
+ start,
+ log_filter,
+):
+ """It should detect and log finished tasks left with incomplete outputs."""
+ schd = scheduler(flow(one_conf))
+ async with start(schd) as log:
+ schd.pool.set_prereqs_and_outputs(['1/one'], ['failed'], None, ['1'])
+ assert log_filter(log, contains='1/one did not complete')
+
+
+async def test_pre_all(flow, scheduler, run):
+ """Ensure that --pre=all is interpreted as a special case
+ and _not_ tokenized.
+ """
+ id_ = flow({'scheduling': {'graph': {'R1': 'a => z'}}})
+ schd = scheduler(id_, paused_start=False)
+ async with run(schd) as log:
+ schd.pool.set_prereqs_and_outputs(['1/z'], [], ['all'], ['all'])
+ warn_or_higher = [i for i in log.records if i.levelno > 30]
+ assert warn_or_higher == []
diff --git a/tests/integration/test_data_store_mgr.py b/tests/integration/test_data_store_mgr.py
index 21b95280443..78b24200634 100644
--- a/tests/integration/test_data_store_mgr.py
+++ b/tests/integration/test_data_store_mgr.py
@@ -171,14 +171,14 @@ async def test_delta_task_state(harness):
w_id = schd.data_store_mgr.workflow_id
schd.data_store_mgr.data[w_id] = data
assert TASK_STATUS_FAILED not in set(collect_states(data, TASK_PROXIES))
- for itask in schd.pool.get_all_tasks():
+ for itask in schd.pool.get_tasks():
itask.state.reset(TASK_STATUS_FAILED)
schd.data_store_mgr.delta_task_state(itask)
assert TASK_STATUS_FAILED in set(collect_states(
schd.data_store_mgr.updated, TASK_PROXIES))
# put things back the way we found them
- for itask in schd.pool.get_all_tasks():
+ for itask in schd.pool.get_tasks():
itask.state.reset(TASK_STATUS_WAITING)
schd.data_store_mgr.delta_task_state(itask)
await schd.update_data_structure()
@@ -191,7 +191,7 @@ async def test_delta_task_held(harness):
schd.pool.hold_tasks('*')
await schd.update_data_structure()
assert True in {t.is_held for t in data[TASK_PROXIES].values()}
- for itask in schd.pool.get_all_tasks():
+ for itask in schd.pool.get_tasks():
itask.state.reset(is_held=False)
schd.data_store_mgr.delta_task_held(itask)
assert True not in {
@@ -269,7 +269,7 @@ async def test_update_data_structure(harness):
assert TASK_STATUS_FAILED not in set(collect_states(data, FAMILY_PROXIES))
assert TASK_STATUS_FAILED not in data[WORKFLOW].state_totals
assert len({t.is_held for t in data[TASK_PROXIES].values()}) == 2
- for itask in schd.pool.get_all_tasks():
+ for itask in schd.pool.get_tasks():
itask.state.reset(TASK_STATUS_FAILED)
schd.data_store_mgr.delta_task_state(itask)
schd.data_store_mgr.update_data_structure()
@@ -288,15 +288,17 @@ async def test_update_data_structure(harness):
def test_delta_task_prerequisite(harness):
"""Test delta_task_prerequisites."""
schd, data = harness
- schd.pool.force_spawn_children([
- t.identity
- for t in schd.pool.get_all_tasks()
- ], (TASK_STATUS_SUCCEEDED,), "flow1")
+ schd.pool.set_prereqs_and_outputs(
+ [t.identity for t in schd.pool.get_tasks()],
+ [(TASK_STATUS_SUCCEEDED,)],
+ [],
+ "all"
+ )
assert all({
p.satisfied
for t in schd.data_store_mgr.updated[TASK_PROXIES].values()
for p in t.prerequisites})
- for itask in schd.pool.get_all_tasks():
+ for itask in schd.pool.get_tasks():
# set prereqs as not-satisfied
for prereq in itask.state.prerequisites:
prereq._all_satisfied = False
diff --git a/tests/integration/test_examples.py b/tests/integration/test_examples.py
index 882d4c10163..5cdb1f21552 100644
--- a/tests/integration/test_examples.py
+++ b/tests/integration/test_examples.py
@@ -138,7 +138,7 @@ async def test_task_pool(one, start):
async with start(one):
# pump the scheduler's heart manually
one.pool.release_runahead_tasks()
- assert len(one.pool.main_pool) == 1
+ assert len(one.pool.active_tasks) == 1
async def test_exception(one, run, log_filter):
diff --git a/tests/integration/test_reload.py b/tests/integration/test_reload.py
index d9de0826dfb..5bd07c17af4 100644
--- a/tests/integration/test_reload.py
+++ b/tests/integration/test_reload.py
@@ -99,11 +99,11 @@ def change_state(_=0):
[
# the task should have entered the preparing state before the
# reload was requested
- '[1/foo waiting(queued) job:00 flows:1] => preparing(queued)',
+ '[1/foo:waiting(queued)] => preparing(queued)',
# the reload should have put the workflow into the paused state
- 'PAUSING the workflow now: Reloading workflow',
+ 'Pausing the workflow: Reloading workflow',
# reload should have waited for the task to submit
- '[1/foo preparing(queued) job:00 flows:1]'
+ '[1/foo/00:preparing(queued)]'
' => submitted(queued)',
# before then reloading the workflow config
'Reloading the workflow definition.',
diff --git a/tests/integration/test_resolvers.py b/tests/integration/test_resolvers.py
index 1190cd70978..17e09983dc3 100644
--- a/tests/integration/test_resolvers.py
+++ b/tests/integration/test_resolvers.py
@@ -20,12 +20,11 @@
import pytest
-from cylc.flow import CYLC_LOG
from cylc.flow.data_store_mgr import EDGES, TASK_PROXIES
from cylc.flow.id import Tokens
+from cylc.flow import CYLC_LOG
from cylc.flow.network.resolvers import Resolvers
from cylc.flow.scheduler import Scheduler
-from cylc.flow.workflow_status import StopMode
@pytest.fixture
@@ -217,44 +216,32 @@ async def test_mutation_mapper(mock_flow):
"""Test the mapping of mutations to internal command methods."""
meta = {}
response = await mock_flow.resolvers._mutation_mapper('pause', {}, meta)
- assert response is None
+ assert response[0] is True # (True, command-uuid-str)
with pytest.raises(ValueError):
await mock_flow.resolvers._mutation_mapper('non_exist', {}, meta)
-@pytest.mark.asyncio
-async def test_stop(
- one: Scheduler, run: Callable, log_filter: Callable,
-):
- """Test the stop resolver."""
- async with run(one) as log:
- resolvers = Resolvers(
- one.data_store_mgr,
- schd=one
- )
- resolvers.stop(StopMode.REQUEST_CLEAN)
- await one.process_command_queue()
- assert log_filter(
- log, level=logging.INFO, contains="Command actioned: stop"
- )
- assert one.stop_mode == StopMode.REQUEST_CLEAN
-
-
-async def test_command_logging(mock_flow, caplog):
- """It should log the command, with user name if not owner."""
- caplog.set_level(logging.INFO, logger=CYLC_LOG)
- owner = mock_flow.owner
- other = f"not-{mock_flow.owner}"
-
- command = "stop"
- mock_flow.resolvers._log_command(command, owner)
- assert caplog.records[-1].msg == f"[command] {command}"
- mock_flow.resolvers._log_command(command, other)
- msg1 = f"[command] {command} (issued by {other})"
- assert caplog.records[-1].msg == msg1
-
- command = "put_messages"
- mock_flow.resolvers._log_command(command, owner)
- assert caplog.records[-1].msg == msg1 # (prev message, i.e. not logged).
- mock_flow.resolvers._log_command(command, other)
- assert caplog.records[-1].msg == f"[command] {command} (issued by {other})"
+async def test_command_logging(mock_flow, caplog, log_filter):
+ """The command log message should include non-owner name."""
+
+ meta = {}
+
+ caplog.set_level(logging.INFO, CYLC_LOG)
+
+ await mock_flow.resolvers._mutation_mapper("stop", {}, meta)
+ assert log_filter(caplog, contains='Command "stop" received')
+
+ # put_messages: only log for owner
+ kwargs = {
+ "task_job": "1/foo/01",
+ "event_time": "bedtime",
+ "messages": [[logging.CRITICAL, "it's late"]]
+ }
+ meta["auth_user"] = mock_flow.owner
+ await mock_flow.resolvers._mutation_mapper("put_messages", kwargs, meta)
+ assert not log_filter(caplog, contains='Command "put_messages" received:')
+
+ meta["auth_user"] = "Dr Spock"
+ await mock_flow.resolvers._mutation_mapper("put_messages", kwargs, meta)
+ assert log_filter(
+ caplog, contains='Command "put_messages" received from Dr Spock')
diff --git a/tests/integration/test_scheduler.py b/tests/integration/test_scheduler.py
index 6befe2b34d1..49c5e07a9fc 100644
--- a/tests/integration/test_scheduler.py
+++ b/tests/integration/test_scheduler.py
@@ -24,7 +24,6 @@
from cylc.flow.exceptions import CylcError
from cylc.flow.parsec.exceptions import ParsecError
from cylc.flow.scheduler import Scheduler, SchedulerStop
-from cylc.flow.task_outputs import TASK_OUTPUT_SUCCEEDED
from cylc.flow.task_state import (
TASK_STATUS_WAITING,
TASK_STATUS_SUBMIT_FAILED,
@@ -212,7 +211,7 @@ async def test_no_poll_waiting_tasks(
log: pytest.LogCaptureFixture
async with start(one) as log:
# Test assumes start up with a waiting task.
- task = (one.pool.get_all_tasks())[0]
+ task = (one.pool.get_tasks())[0]
assert task.state.status == TASK_STATUS_WAITING
polled_tasks = capture_polling(one)
@@ -325,7 +324,7 @@ async def test_uuid_unchanged_on_restart(
cf_uuid = uuid_re.findall(contact_file.read_text())
assert cf_uuid == [schd.uuid_str]
-
+
async def test_restart_timeout(
flow,
one_conf,
@@ -348,11 +347,15 @@ async def test_restart_timeout(
id_ = flow(one_conf)
# run the workflow to completion
- schd = scheduler(id_)
- async with start(schd):
- for itask in schd.pool.get_all_tasks():
- itask.state_reset(TASK_OUTPUT_SUCCEEDED)
- schd.pool.spawn_on_output(itask, TASK_OUTPUT_SUCCEEDED)
+ # (by setting the only task to completed)
+ schd = scheduler(id_, paused_start=False)
+ async with start(schd) as log:
+ for itask in schd.pool.get_tasks():
+ # (needed for job config in sim mode:)
+ schd.task_job_mgr.submit_task_jobs(
+ schd.workflow, [itask], None, None)
+ schd.pool.set_prereqs_and_outputs(
+ [itask.identity], None, None, ['all'])
# restart the completed workflow
schd = scheduler(id_)
diff --git a/tests/integration/test_task_pool.py b/tests/integration/test_task_pool.py
index aec35516514..3d75074cf15 100644
--- a/tests/integration/test_task_pool.py
+++ b/tests/integration/test_task_pool.py
@@ -14,21 +14,31 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-from copy import deepcopy
import logging
-from typing import AsyncGenerator, Callable, Iterable, List, Tuple, Union
+from typing import (
+ TYPE_CHECKING,
+ AsyncGenerator,
+ Callable,
+ Iterable,
+ List,
+ Tuple,
+ Union
+)
import pytest
from pytest import param
+from json import loads
from cylc.flow import CYLC_LOG
-from cylc.flow.cycling import PointBase
from cylc.flow.cycling.integer import IntegerPoint
from cylc.flow.cycling.iso8601 import ISO8601Point
from cylc.flow.data_store_mgr import TASK_PROXIES
-from cylc.flow.task_outputs import TASK_OUTPUT_SUCCEEDED
-from cylc.flow.scheduler import Scheduler
-from cylc.flow.flow_mgr import FLOW_ALL
+from cylc.flow.task_events_mgr import TaskEventsManager
+from cylc.flow.task_outputs import (
+ TASK_OUTPUT_SUCCEEDED
+)
+
+from cylc.flow.flow_mgr import FLOW_ALL, FLOW_NONE
from cylc.flow.task_state import (
TASK_STATUS_WAITING,
TASK_STATUS_PREPARING,
@@ -38,9 +48,13 @@
TASK_STATUS_FAILED,
TASK_STATUS_EXPIRED,
TASK_STATUS_SUBMIT_FAILED,
- TASK_STATUSES_ALL,
)
+if TYPE_CHECKING:
+ from cylc.flow.cycling import PointBase
+ from cylc.flow.scheduler import Scheduler
+ from cylc.flow.task_proxy import TaskProxy
+
# NOTE: foo and bar have no parents so at start-up (even with the workflow
# paused) they get spawned out to the runahead limit. 2/pub spawns
# immediately too, because we spawn autospawn absolute-triggered tasks as
@@ -83,8 +97,17 @@
}
+def pool_get_task_ids(
+ pool: List['TaskProxy']
+) -> List[str]:
+ """Return sorted list of IDs of tasks in a task pool."""
+ return sorted(
+ [itask.identity for itask in pool.get_tasks()]
+ )
+
+
def get_task_ids(
- name_point_list: Iterable[Tuple[str, Union[PointBase, str, int]]]
+ name_point_list: Iterable[Tuple[str, Union['PointBase', str, int]]]
) -> List[str]:
"""Helper function to return sorted task identities
from a list of (name, point) tuples."""
@@ -116,17 +139,16 @@ def assert_expected_log(
@pytest.fixture(scope='module')
async def mod_example_flow(
mod_flow: Callable, mod_scheduler: Callable, mod_run: Callable
-) -> Scheduler:
+) -> 'Scheduler':
"""Return a scheduler for interrogating its task pool.
This is module-scoped so faster than example_flow, but should only be used
where the test does not mutate the state of the scheduler or task pool.
"""
id_ = mod_flow(EXAMPLE_FLOW_CFG)
- schd: Scheduler = mod_scheduler(id_, paused_start=True)
+ schd: 'Scheduler' = mod_scheduler(id_, paused_start=True)
async with mod_run(schd):
- pass
- return schd
+ yield schd
@pytest.fixture
@@ -135,7 +157,7 @@ async def example_flow(
scheduler: Callable,
start,
caplog: pytest.LogCaptureFixture,
-) -> AsyncGenerator[Scheduler, None]:
+) -> AsyncGenerator['Scheduler', None]:
"""Return a scheduler for interrogating its task pool.
This is function-scoped so slower than mod_example_flow; only use this
@@ -145,7 +167,7 @@ async def example_flow(
# set up caplog and do schd.install()/.initialise()/.configure() instead
caplog.set_level(logging.INFO, CYLC_LOG)
id_ = flow(EXAMPLE_FLOW_CFG)
- schd: Scheduler = scheduler(id_)
+ schd: 'Scheduler' = scheduler(id_)
async with start(schd):
yield schd
@@ -153,17 +175,16 @@ async def example_flow(
@pytest.fixture(scope='module')
async def mod_example_flow_2(
mod_flow: Callable, mod_scheduler: Callable, mod_run: Callable
-) -> Scheduler:
+) -> 'Scheduler':
"""Return a scheduler for interrogating its task pool.
This is module-scoped so faster than example_flow, but should only be used
where the test does not mutate the state of the scheduler or task pool.
"""
id_ = mod_flow(EXAMPLE_FLOW_2_CFG)
- schd: Scheduler = mod_scheduler(id_, paused_start=True)
+ schd: 'Scheduler' = mod_scheduler(id_, paused_start=True)
async with mod_run(schd):
- pass
- return schd
+ yield schd
@pytest.mark.parametrize(
@@ -216,9 +237,8 @@ async def test_filter_task_proxies(
expected_task_ids: List[str],
expected_bad_items: List[str],
expected_warnings: List[str],
- mod_example_flow: Scheduler,
- caplog: pytest.LogCaptureFixture,
- monkeypatch,
+ mod_example_flow: 'Scheduler',
+ caplog: pytest.LogCaptureFixture
) -> None:
"""Test TaskPool.filter_task_proxies().
@@ -241,68 +261,6 @@ async def test_filter_task_proxies(
assert_expected_log(caplog, expected_warnings)
-@pytest.mark.parametrize(
- 'items, expected_task_ids, expected_bad_items, expected_warnings',
- [
- param(
- ['*:waiting'],
- ['1/waz', '1/foo', '1/bar', '2/foo', '2/bar', '2/pub', '3/foo',
- '3/bar', '4/foo', '4/bar', '5/foo', '5/bar'], [], [],
- id="Task state"
- ),
- ]
-)
-async def test_filter_task_proxies_hidden(
- items: List[str],
- expected_task_ids: List[str],
- expected_bad_items: List[str],
- expected_warnings: List[str],
- mod_example_flow: Scheduler,
- caplog: pytest.LogCaptureFixture,
- monkeypatch,
-) -> None:
- """Test TaskPool.filter_task_proxies().
-
- This is the same as test_filter_task_proxies except we artificially add a
- new proxy "1/waz" to the hidden pool. Filtering should find a single copy
- each of the hidden and main pool tasks.
-
- See GitHub #4909: a bug in filtering was doubling up tasks in cycle points
- that appeared in both pools.
-
- The NOTE before EXAMPLE_FLOW_CFG above explains which tasks should be
- expected for the tests here.
-
- Params:
- items: Arg passed to filter_task_proxies().
- expected_task_ids: IDs of the TaskProxys that are expected to be
- returned, of the form "{point}/{name}"/
- expected_bad_items: Expected to be returned.
- expected_warnings: Expected to be logged.
- """
- monkeypatch.setattr(
- # make Tokens objects mutable to allow deepcopy to work on TaskProxy
- # objects
- 'cylc.flow.id.Tokens.__setitem__',
- lambda self, key, value: dict.__setitem__(self, key, value),
- )
-
- caplog.set_level(logging.WARNING, CYLC_LOG)
- task_pool = mod_example_flow.pool
-
- # Duplicate a task proxy, rename it, and add it to the hidden pool.
- a_task = deepcopy(task_pool.get_tasks()[0])
- a_task.identity = "1/waz"
- task_pool.hidden_pool.setdefault(a_task.point, {})
- task_pool.hidden_pool[a_task.point][a_task.identity] = a_task
-
- itasks, _, bad_items = task_pool.filter_task_proxies(items)
- task_ids = [itask.identity for itask in itasks]
- assert sorted(task_ids) == sorted(expected_task_ids)
- assert sorted(bad_items) == sorted(expected_bad_items)
- assert_expected_log(caplog, expected_warnings)
-
-
@pytest.mark.parametrize(
'items, expected_task_ids, expected_warnings',
[
@@ -353,7 +311,7 @@ async def test_match_taskdefs(
items: List[str],
expected_task_ids: List[str],
expected_warnings: List[str],
- mod_example_flow: Scheduler,
+ mod_example_flow: 'Scheduler',
caplog: pytest.LogCaptureFixture
) -> None:
"""Test TaskPool.match_taskdefs().
@@ -415,7 +373,7 @@ async def test_hold_tasks(
items: List[str],
expected_tasks_to_hold_ids: List[str],
expected_warnings: List[str],
- example_flow: Scheduler, caplog: pytest.LogCaptureFixture,
+ example_flow: 'Scheduler', caplog: pytest.LogCaptureFixture,
db_select: Callable
) -> None:
"""Test TaskPool.hold_tasks().
@@ -434,7 +392,7 @@ async def test_hold_tasks(
task_pool = example_flow.pool
n_warnings = task_pool.hold_tasks(items)
- for itask in task_pool.get_all_tasks():
+ for itask in task_pool.get_tasks():
hold_expected = itask.identity in expected_tasks_to_hold_ids
assert itask.state.is_held is hold_expected
@@ -448,7 +406,7 @@ async def test_hold_tasks(
async def test_release_held_tasks(
- example_flow: Scheduler, db_select: Callable
+ example_flow: 'Scheduler', db_select: Callable
) -> None:
"""Test TaskPool.release_held_tasks().
@@ -463,7 +421,7 @@ async def test_release_held_tasks(
task_pool = example_flow.pool
expected_tasks_to_hold_ids = sorted(['1/foo', '1/bar', '3/asd'])
task_pool.hold_tasks(expected_tasks_to_hold_ids)
- for itask in task_pool.get_all_tasks():
+ for itask in task_pool.get_tasks():
hold_expected = itask.identity in expected_tasks_to_hold_ids
assert itask.state.is_held is hold_expected
assert get_task_ids(task_pool.tasks_to_hold) == expected_tasks_to_hold_ids
@@ -472,7 +430,7 @@ async def test_release_held_tasks(
# Test
task_pool.release_held_tasks(['1/foo', '3/asd'])
- for itask in task_pool.get_all_tasks():
+ for itask in task_pool.get_tasks():
assert itask.state.is_held is (itask.identity == '1/bar')
expected_tasks_to_hold_ids = sorted(['1/bar'])
@@ -494,7 +452,7 @@ async def test_release_held_tasks(
async def test_hold_point(
hold_after_point: int,
expected_held_task_ids: List[str],
- example_flow: Scheduler, db_select: Callable
+ example_flow: 'Scheduler', db_select: Callable
) -> None:
"""Test TaskPool.set_hold_point() and .release_hold_point()"""
expected_held_task_ids = sorted(expected_held_task_ids)
@@ -506,7 +464,7 @@ async def test_hold_point(
assert ('holdcp', str(hold_after_point)) in db_select(
example_flow, True, 'workflow_params')
- for itask in task_pool.get_all_tasks():
+ for itask in task_pool.get_tasks():
hold_expected = itask.identity in expected_held_task_ids
assert itask.state.is_held is hold_expected
@@ -521,7 +479,7 @@ async def test_hold_point(
('holdcp', None)
]
- for itask in task_pool.get_all_tasks():
+ for itask in task_pool.get_tasks():
assert itask.state.is_held is False
assert task_pool.tasks_to_hold == set()
@@ -604,7 +562,7 @@ async def test_reload_stopcp(
}
}
}
- schd: Scheduler = scheduler(flow(cfg))
+ schd: 'Scheduler' = scheduler(flow(cfg))
async with start(schd):
assert str(schd.pool.stop_point) == '2020'
await schd.command_reload_workflow()
@@ -612,7 +570,7 @@ async def test_reload_stopcp(
async def test_runahead_after_remove(
- example_flow: Scheduler
+ example_flow: 'Scheduler'
) -> None:
"""The runahead limit should be recomputed after tasks are removed.
@@ -634,7 +592,7 @@ async def test_load_db_bad_platform(
):
"""Test that loading an unavailable platform from the database doesn't
cause calamitous failure."""
- schd: Scheduler = scheduler(flow(one_conf))
+ schd: 'Scheduler' = scheduler(flow(one_conf))
async with start(schd):
result = schd.pool.load_db_task_pool_for_restart(0, (
@@ -645,7 +603,7 @@ async def test_load_db_bad_platform(
def list_tasks(schd):
- """Return a list of task pool tasks (incl hidden pool tasks).
+ """Return a sorted list of task pool tasks.
Returns a list in the format:
[
@@ -655,7 +613,7 @@ def list_tasks(schd):
"""
return sorted(
(itask.tokens['cycle'], itask.tokens['task'], itask.state.status)
- for itask in schd.pool.get_all_tasks()
+ for itask in schd.pool.get_tasks()
)
@@ -746,8 +704,8 @@ async def test_restart_prereqs(
assert list_tasks(schd) == expected_1
# Mark 1/a as succeeded and spawn 1/z
- schd.pool.get_all_tasks()[0].state_reset('succeeded')
- schd.pool.spawn_on_output(schd.pool.get_all_tasks()[0], 'succeeded')
+ task_a = schd.pool.get_tasks()[0]
+ schd.pool.task_events_mgr.process_message(task_a, 1, 'succeeded')
assert list_tasks(schd) == expected_2
# Save our progress
@@ -770,7 +728,9 @@ async def test_restart_prereqs(
schd.data_store_mgr.update_data_structure()
# Check resulting dependencies of task z
- task_z = schd.pool.get_all_tasks()[0]
+ task_z = [
+ t for t in schd.pool.get_tasks() if t.tdef.name == "z"
+ ][0]
assert sorted(
(
p.satisfied
@@ -867,8 +827,8 @@ async def test_reload_prereqs(
assert list_tasks(schd) == expected_1
# Mark 1/a as succeeded and spawn 1/z
- schd.pool.get_all_tasks()[0].state_reset('succeeded')
- schd.pool.spawn_on_output(schd.pool.get_all_tasks()[0], 'succeeded')
+ task_a = schd.pool.get_tasks()[0]
+ schd.pool.task_events_mgr.process_message(task_a, 1, 'succeeded')
assert list_tasks(schd) == expected_2
# Modify flow.cylc to add a new dependency on "z"
@@ -880,7 +840,9 @@ async def test_reload_prereqs(
assert list_tasks(schd) == expected_3
# Check resulting dependencies of task z
- task_z = schd.pool.get_all_tasks()[0]
+ task_z = [
+ t for t in schd.pool.get_tasks() if t.tdef.name == "z"
+ ][0]
assert sorted(
(
p.satisfied
@@ -904,10 +866,9 @@ async def _test_restart_prereqs_sat():
]
# Mark both as succeeded and spawn 1/c
- for itask in schd.pool.get_all_tasks():
- itask.state_reset('succeeded')
- schd.pool.spawn_on_output(itask, 'succeeded')
- schd.workflow_db_mgr.put_insert_task_outputs(itask)
+ for itask in schd.pool.get_tasks():
+ schd.pool.task_events_mgr.process_message(itask, 1, 'succeeded')
+ schd.workflow_db_mgr.put_update_task_outputs(itask)
schd.pool.remove_if_complete(itask)
schd.workflow_db_mgr.process_queued_ops()
assert list_tasks(schd) == [
@@ -922,7 +883,7 @@ async def _test_restart_prereqs_sat():
]
# Check resulting dependencies of task z
- task_c = schd.pool.get_all_tasks()[0]
+ task_c = schd.pool.get_tasks()[0]
assert sorted(
(*key, satisfied)
for prereq in task_c.state.prerequisites
@@ -1018,8 +979,8 @@ async def test_runahead_limit_for_sequence_before_start_cycle(
):
"""It should obey the runahead limit.
- Ensure the runahead limit is computed correctly for sequences that begin
- before the start cycle.
+ Ensure the runahead limit is computed correctly for sequences before the
+ start cycle
See https://github.com/cylc/cylc-flow/issues/5603
"""
@@ -1080,7 +1041,7 @@ async def test_db_update_on_removal(
task_a = schd.pool.get_tasks()[0]
# set the task to running
- task_a.state_reset('running')
+ schd.pool.task_events_mgr.process_message(task_a, 1, 'started')
# update the db
await schd.update_data_structure()
@@ -1092,7 +1053,7 @@ async def test_db_update_on_removal(
]
# mark the task as succeeded and allow it to be removed from the pool
- task_a.state_reset('succeeded')
+ schd.pool.task_events_mgr.process_message(task_a, 1, 'succeeded')
schd.pool.remove_if_complete(task_a)
# update the DB, note no new tasks have been added to the pool
@@ -1110,8 +1071,7 @@ async def test_no_flow_tasks_dont_spawn(
):
"""Ensure no-flow tasks don't spawn downstreams.
- No-flow tasks (i.e `--flow=none`) are one-offs which are not attached to
- any "flow".
+ No-flow tasks (i.e `--flow=none`) are not attached to any "flow".
See https://github.com/cylc/cylc-flow/issues/5613
"""
@@ -1128,9 +1088,14 @@ async def test_no_flow_tasks_dont_spawn(
schd = scheduler(id_)
async with start(schd):
- # mark task 1/a as succeeded
task_a = schd.pool.get_tasks()[0]
- task_a.state_reset(TASK_OUTPUT_SUCCEEDED)
+
+ # set as no-flow:
+ task_a.flow_nums = set()
+
+ # Set as completed: should not spawn children.
+ schd.pool.set_prereqs_and_outputs(
+ [task_a.identity], None, None, [FLOW_NONE])
for flow_nums, force, pool in (
# outputs yielded from a no-flow task should not spawn downstreams
@@ -1153,16 +1118,13 @@ async def test_no_flow_tasks_dont_spawn(
TASK_OUTPUT_SUCCEEDED,
forced=force,
)
+
schd.pool.spawn_on_all_outputs(task_a)
# ensure the pool is as expected
assert [
(itask.identity, itask.flow_nums)
- for pool in [
- schd.pool.get_tasks(),
- schd.pool.get_hidden_tasks(),
- ]
- for itask in pool
+ for itask in schd.pool.get_tasks()
] == pool
@@ -1176,23 +1138,16 @@ async def test_task_proxy_remove_from_queues(
# Set up a scheduler with a non-default queue:
one_conf['scheduling'] = {
'queues': {'queue_two': {'members': 'one, control'}},
- 'graph': {'R1': 'two & one & hidden & control & hidden_control'},
+ 'graph': {'R1': 'two & one & control'},
}
schd = scheduler(flow(one_conf))
async with start(schd):
# Get a list of itasks:
itasks = schd.pool.get_tasks()
- point = itasks[0].point
for itask in itasks:
id_ = itask.identity
- # Move some tasks to the hidden_pool to ensure that these are
- # removed too:
- if 'hidden' in itask.identity:
- schd.pool.hidden_pool.setdefault(point, {id_: itask})
- del schd.pool.main_pool[point][id_]
-
# The meat of the test - remove itask from pool if it
# doesn't have "control" in the name:
if 'control' not in id_:
@@ -1203,12 +1158,11 @@ async def test_task_proxy_remove_from_queues(
name: [itask.identity for itask in queue.deque]
for name, queue in schd.pool.task_queue_mgr.queues.items()}
- assert queues_after['default'] == ['1/hidden_control']
assert queues_after['queue_two'] == ['1/control']
async def test_runahead_offset_start(
- mod_example_flow_2: Scheduler
+ mod_example_flow_2: 'Scheduler'
) -> None:
"""Late-start recurrences should not break the runahead limit at start-up.
@@ -1224,25 +1178,21 @@ async def test_detect_incomplete_tasks(
start,
log_filter,
):
- """Finished tasks should be marked as incomplete.
+ """Finished but incomplete tasks should be retains as incomplete."""
- If a task finishes without completing all required outputs, then it should
- be marked as incomplete.
- """
- incomplete_final_task_states = [
- TASK_STATUS_FAILED,
- TASK_STATUS_EXPIRED,
- TASK_STATUS_SUBMIT_FAILED,
- ]
+ final_task_states = {
+ TASK_STATUS_FAILED: TaskEventsManager.EVENT_FAILED,
+ TASK_STATUS_EXPIRED: TaskEventsManager.EVENT_EXPIRED,
+ TASK_STATUS_SUBMIT_FAILED: TaskEventsManager.EVENT_SUBMIT_FAILED
+ }
id_ = flow({
'scheduler': {
'allow implicit tasks': 'True',
},
'scheduling': {
'graph': {
- # a workflow with one task for each of the incomplete final
- # task states
- 'R1': '\n'.join(incomplete_final_task_states)
+ # a workflow with one task for each of the final task states
+ 'R1': '\n'.join(final_task_states.keys())
}
}
})
@@ -1250,15 +1200,451 @@ async def test_detect_incomplete_tasks(
async with start(schd) as log:
itasks = schd.pool.get_tasks()
for itask in itasks:
+ itask.state_reset(is_queued=False)
# spawn the output corresponding to the task
- schd.pool.spawn_on_output(itask, itask.tdef.name)
+ schd.pool.task_events_mgr.process_message(
+ itask, 1,
+ final_task_states[itask.tdef.name]
+ )
# ensure that it is correctly identified as incomplete
assert itask.state.outputs.get_incomplete()
assert itask.state.outputs.is_incomplete()
- assert log_filter(
- log, contains=f"[{itask}] did not complete required outputs:")
- # the task should not have been removed
- assert itask in schd.pool.get_tasks()
+ if itask.tdef.name == TASK_STATUS_EXPIRED:
+ assert log_filter(
+ log,
+ contains=f"[{itask}] removed (expired)"
+ )
+ # the task should have been removed
+ assert itask not in schd.pool.get_tasks()
+ else:
+ assert log_filter(
+ log,
+ contains=(
+ f"[{itask}] did not complete "
+ "required outputs:"
+ )
+ )
+ # the task should not have been removed
+ assert itask in schd.pool.get_tasks()
+
+
+async def test_future_trigger_final_point(
+ flow,
+ scheduler,
+ start,
+ log_filter,
+):
+ """Check spawning of future-triggered tasks: foo[+P1] => bar.
+
+ Don't spawn if a prerequisite reaches beyond the final cycle point.
+
+ """
+ id_ = flow(
+ {
+ 'scheduler': {
+ 'allow implicit tasks': 'True',
+ },
+ 'scheduling': {
+ 'cycling mode': 'integer',
+ 'initial cycle point': 1,
+ 'final cycle point': 1,
+ 'graph': {
+ 'P1': "foo\n foo[+P1] & bar => baz"
+ }
+ }
+ }
+ )
+ schd = scheduler(id_)
+ async with start(schd) as log:
+ for itask in schd.pool.get_tasks():
+ schd.pool.spawn_on_output(itask, "succeeded")
+ assert log_filter(
+ log,
+ regex=(
+ ".*1/baz.*not spawned: a prerequisite is beyond"
+ r" the workflow stop point \(1\)"
+ )
+ )
+
+
+async def test_set_failed_complete(
+ flow,
+ scheduler,
+ start,
+ one_conf,
+ log_filter,
+ db_select: Callable
+):
+ """Test manual completion of an incomplete failed task."""
+ id_ = flow(one_conf)
+ schd = scheduler(id_)
+ async with start(schd) as log:
+ one = schd.pool.get_tasks()[0]
+ one.state_reset(is_queued=False)
+
+ schd.pool.task_events_mgr.process_message(one, 1, "failed")
+ assert log_filter(
+ log, regex="1/one.* setting implied output: submitted")
+ assert log_filter(
+ log, regex="1/one.* setting implied output: started")
+ assert log_filter(
+ log, regex="failed.* did not complete required outputs")
+
+ # Set failed task complete via default "set" args.
+ schd.pool.set_prereqs_and_outputs([one.identity], None, None, ['all'])
+
+ assert log_filter(
+ log, contains=f'[{one}] task completed')
+
+ db_outputs = db_select(
+ schd, True, 'task_outputs', 'outputs',
+ **{'name': 'one'}
+ )
+ assert (
+ sorted(loads((db_outputs[0])[0])) == [
+ "failed", "started", "submitted", "succeeded"
+ ]
+ )
+
+
+async def test_set_prereqs(
+ flow,
+ scheduler,
+ start,
+ log_filter,
+):
+ """Check manual setting of prerequisites.
+
+ """
+ id_ = flow(
+ {
+ 'scheduler': {
+ 'allow implicit tasks': 'True',
+ },
+ 'scheduling': {
+ 'initial cycle point': '2040',
+ 'graph': {
+ 'R1': "foo & bar & baz => qux"
+ }
+ },
+ 'runtime': {
+ 'foo': {
+ 'outputs': {
+ 'a': 'drugs and money',
+ }
+ }
+ }
+ }
+ )
+ schd = scheduler(id_)
+
+ async with start(schd) as log:
+
+ # it should start up with foo, bar, baz
+ assert (
+ pool_get_task_ids(schd.pool) == [
+ "20400101T0000Z/bar",
+ "20400101T0000Z/baz",
+ "20400101T0000Z/foo"]
+ )
+
+ # try to set an invalid prereq of qux
+ schd.pool.set_prereqs_and_outputs(
+ ["20400101T0000Z/qux"], None, ["20400101T0000Z/foo:a"], ['all'])
+ assert log_filter(
+ log, contains='20400101T0000Z/qux does not depend on "20400101T0000Z/foo:a"')
+
+ # it should not add 20400101T0000Z/qux to the pool
+ assert (
+ pool_get_task_ids(schd.pool) == [
+ "20400101T0000Z/bar",
+ "20400101T0000Z/baz",
+ "20400101T0000Z/foo"]
+ )
+
+ # set one prereq of future task 20400101T0000Z/qux
+ schd.pool.set_prereqs_and_outputs(
+ ["20400101T0000Z/qux"],
+ None,
+ ["20400101T0000Z/foo:succeeded"],
+ ['all'])
+
+ # it should add 20400101T0000Z/qux to the pool
+ assert (
+ pool_get_task_ids(schd.pool) == [
+ "20400101T0000Z/bar",
+ "20400101T0000Z/baz",
+ "20400101T0000Z/foo",
+ "20400101T0000Z/qux"
+ ]
+ )
+
+ # get the 20400101T0000Z/qux task proxy
+ qux = schd.pool.get_task(ISO8601Point("20400101T0000Z"), "qux")
+ assert not qux.state.prerequisites_all_satisfied()
+
+ # set its other prereqs (test implicit "succeeded" and "succeed")
+ # and truncated cycle point
+ schd.pool.set_prereqs_and_outputs(
+ ["2040/qux"], None, ["2040/bar", "2040/baz:succeed"], ['all'])
+
+ # it should now be fully satisfied
+ assert qux.state.prerequisites_all_satisfied()
+
+
+async def test_set_bad_prereqs(
+ flow,
+ scheduler,
+ start,
+ log_filter,
+):
+ """Check manual setting of prerequisites.
+
+ """
+ id_ = flow({
+ 'scheduler': {
+ 'allow implicit tasks': 'True',
+ 'cycle point format': '%Y'},
+ 'scheduling': {
+ 'initial cycle point': '2040',
+ 'graph': {'R1': "foo => bar"}},
+ })
+ schd = scheduler(id_)
+
+ def set_prereqs(prereqs):
+ """Shorthand so only varible under test given as arg"""
+ schd.pool.set_prereqs_and_outputs(
+ ["2040/bar"], None, prereqs, ['all'])
+
+ async with start(schd) as log:
+ # Invalid: task name wildcard:
+ set_prereqs(["2040/*"])
+ assert log_filter(log, contains='Invalid prerequisite task name' )
+
+ # Invalid: cycle point wildcard.
+ set_prereqs(["*/foo"])
+ assert log_filter(log, contains='Invalid prerequisite cycle point')
+
+
+async def test_set_outputs_live(
+ flow,
+ scheduler,
+ start,
+ log_filter,
+):
+ """Check manual set outputs in an active (spawned) task.
+
+ """
+ id_ = flow(
+ {
+ 'scheduler': {
+ 'allow implicit tasks': 'True',
+ },
+ 'scheduling': {
+ 'graph': {
+ 'R1': """
+ foo:x => bar
+ foo => baz
+ foo:y
+ """
+ }
+ },
+ 'runtime': {
+ 'foo': {
+ 'outputs': {
+ 'x': 'xylophone',
+ 'y': 'yacht'
+ }
+ }
+ }
+ }
+ )
+ schd = scheduler(id_)
+
+ async with start(schd) as log:
+
+ # it should start up with just 1/foo
+ assert pool_get_task_ids(schd.pool) == ["1/foo"]
+
+ # fake failed
+ foo = schd.pool.get_task(IntegerPoint("1"), "foo")
+ foo.state_reset(is_queued=False)
+ schd.pool.task_events_mgr.process_message(foo, 1, 'failed')
+
+ # set foo:x: it should spawn bar but not baz
+ schd.pool.set_prereqs_and_outputs(["1/foo"], ["x"], None, ['all'])
+ assert (
+ pool_get_task_ids(schd.pool) == ["1/bar", "1/foo"]
+ )
+ # Foo should have been removed from the queue:
+ assert '1/foo' not in [
+ i.identity for i
+ in schd.pool.task_queue_mgr.queues['default'].deque
+ ]
+ # set foo:succeed: it should spawn baz but foo remains incomplete.
+ schd.pool.set_prereqs_and_outputs(
+ ["1/foo"], ["succeeded"], None, ['all'])
+ assert (
+ pool_get_task_ids(schd.pool) == ["1/bar", "1/baz", "1/foo"]
+ )
+
+ # it should complete implied outputs (submitted, started) too
+ assert log_filter(
+ log, contains="setting implied output: submitted")
+ assert log_filter(
+ log, contains="setting implied output: started")
+
+ # set foo (default: all required outputs) to complete y.
+ schd.pool.set_prereqs_and_outputs(["1/foo"], None, None, ['all'])
+ assert log_filter(
+ log, contains="output 1/foo:succeeded completed")
+ assert (
+ pool_get_task_ids(schd.pool) == ["1/bar", "1/baz"]
+ )
+
+
+async def test_set_outputs_live2(
+ flow,
+ scheduler,
+ start,
+ log_filter,
+):
+ """Assert that optional outputs are satisfied before completion
+ outputs to prevent incomplete task warnings.
+ """
+ id_ = flow(
+ {
+ 'scheduler': {'allow implicit tasks': 'True'},
+ 'scheduling': {'graph': {
+ 'R1': """
+ foo:a => apple
+ foo:b => boat
+ """}},
+ 'runtime': {'foo': {'outputs': {
+ 'a': 'xylophone',
+ 'b': 'yacht'}}}
+ }
+ )
+ schd = scheduler(id_)
+
+ async with start(schd) as log:
+ schd.pool.set_prereqs_and_outputs(["1/foo"], None, None, ['all'])
+ assert not log_filter(
+ log,
+ contains="did not complete required outputs: ['a', 'b']"
+ )
+
+
+async def test_set_outputs_future(
+ flow,
+ scheduler,
+ start,
+ log_filter,
+):
+ """Check manual setting of future task outputs.
+
+ """
+ id_ = flow(
+ {
+ 'scheduler': {
+ 'allow implicit tasks': 'True',
+ },
+ 'scheduling': {
+ 'graph': {
+ 'R1': "a:x & a:y => b => c"
+ }
+ },
+ 'runtime': {
+ 'a': {
+ 'outputs': {
+ 'x': 'xylophone',
+ 'y': 'yacht'
+ }
+ }
+ }
+ }
+ )
+ schd = scheduler(id_)
+
+ async with start(schd) as log:
+
+ # it should start up with just 1/a
+ assert pool_get_task_ids(schd.pool) == ["1/a"]
+
+ # setting future task b succeeded should spawn c but not b
+ schd.pool.set_prereqs_and_outputs(
+ ["1/b"], ["succeeded"], None, ['all'])
+ assert (
+ pool_get_task_ids(schd.pool) == ["1/a", "1/c"]
+ )
+
+ schd.pool.set_prereqs_and_outputs(
+ items=["1/a"],
+ outputs=["x", "y", "cheese"],
+ prereqs=None,
+ flow=['all']
+ )
+ assert log_filter(log, contains="output 1/a:cheese not found")
+ assert log_filter(log, contains="completed output x")
+ assert log_filter(log, contains="completed output y")
+
+
+async def test_prereq_satisfaction(
+ flow,
+ scheduler,
+ start,
+ log_filter,
+):
+ """Check manual setting of task prerequisites.
+
+ """
+ id_ = flow(
+ {
+ 'scheduler': {
+ 'allow implicit tasks': 'True',
+ },
+ 'scheduling': {
+ 'graph': {
+ 'R1': "a:x & a:y => b"
+ }
+ },
+ 'runtime': {
+ 'a': {
+ 'outputs': {
+ 'x': 'xylophone',
+ 'y': 'yacht'
+ }
+ }
+ }
+ }
+ )
+ schd = scheduler(id_)
+ async with start(schd) as log:
+ # it should start up with just 1/a
+ assert pool_get_task_ids(schd.pool) == ["1/a"]
+ # spawn b
+ schd.pool.set_prereqs_and_outputs(["1/a"], ["x"], None, ['all'])
+ assert (
+ pool_get_task_ids(schd.pool) == ["1/a", "1/b"]
+ )
+
+ b = schd.pool.get_task(IntegerPoint("1"), "b")
+
+ assert not b.is_waiting_prereqs_done()
+
+ # set valid and invalid prerequisites, by label and message.
+ schd.pool.set_prereqs_and_outputs(
+ prereqs=["1/a:xylophone", "1/a:y", "1/a:w", "1/a:z"],
+ items=["1/b"], outputs=None, flow=['all']
+ )
+ assert log_filter(log, contains="1/a:z not found")
+ assert log_filter(log, contains="1/a:w not found")
+ assert not log_filter(log, contains='1/b does not depend on "1/a:x"')
+ assert not log_filter(
+ log, contains='1/b does not depend on "1/a:xylophone"')
+ assert not log_filter(log, contains='1/b does not depend on "1/a:y"')
+
+ assert b.is_waiting_prereqs_done()
@pytest.mark.parametrize('compat_mode', ['compat-mode', 'normal-mode'])
@@ -1284,6 +1670,7 @@ async def test_compute_runahead(
"""
if cycling_mode == 'integer':
+
config = {
'scheduler': {
'allow implicit tasks': 'True',
@@ -1408,32 +1795,22 @@ async def test_runahead_future_trigger(
async with start(schd, level=logging.DEBUG):
assert str(schd.pool.runahead_limit_point) == '20010103'
schd.pool.release_runahead_tasks()
- for itask in schd.pool.get_all_tasks():
+ for itask in schd.pool.get_tasks():
schd.pool.spawn_on_output(itask, 'succeeded')
# future trigger raises the limit by one cycle point
assert str(schd.pool.runahead_limit_point) == '20010104'
-async def test_compute_runahead_against_task_state(
- flow,
- scheduler,
- start,
- monkeypatch,
-):
- """For each task status check whether changing the oldest task
- to that status will cause compute_runahead to make a change.
+@pytest.fixture(scope='module')
+async def mod_blah(
+ mod_flow: Callable, mod_scheduler: Callable, mod_run: Callable
+) -> 'Scheduler':
+ """Return a scheduler for interrogating its task pool.
+
+ This is module-scoped so faster than example_flow, but should only be used
+ where the test does not mutate the state of the scheduler or task pool.
"""
- states = [
- # (Status, Are we expecting an update?)
- (TASK_STATUS_WAITING, False),
- (TASK_STATUS_EXPIRED, True),
- (TASK_STATUS_PREPARING, False),
- (TASK_STATUS_SUBMIT_FAILED, True),
- (TASK_STATUS_SUBMITTED, False),
- (TASK_STATUS_RUNNING, False),
- (TASK_STATUS_FAILED, True),
- (TASK_STATUS_SUCCEEDED, True)
- ]
+
config = {
'scheduler': {
'allow implicit tasks': 'True',
@@ -1447,26 +1824,53 @@ async def test_compute_runahead_against_task_state(
},
}
}
+ id_ = mod_flow(config)
+ schd: 'Scheduler' = mod_scheduler(id_, paused_start=True)
+ async with mod_run(schd):
+ yield schd
+
+
+@pytest.mark.parametrize(
+ 'status, expected',
+ [
+ # (Status, Are we expecting an update?)
+ (TASK_STATUS_WAITING, False),
+ (TASK_STATUS_EXPIRED, False),
+ (TASK_STATUS_PREPARING, False),
+ (TASK_STATUS_SUBMIT_FAILED, False),
+ (TASK_STATUS_SUBMITTED, False),
+ (TASK_STATUS_RUNNING, False),
+ (TASK_STATUS_FAILED, True),
+ (TASK_STATUS_SUCCEEDED, True)
+ ]
+)
+async def test_runahead_c7_compat_task_state(
+ status,
+ expected,
+ mod_blah,
+ monkeypatch,
+):
+ """For each task status check whether changing the oldest task
+ to that status will cause compute_runahead to make a change.
+
+ Compat mode: Cylc 7 ignored failed tasks but not submit-failed!
+
+ """
def max_cycle(tasks):
return max([int(t.tokens.get("cycle")) for t in tasks])
monkeypatch.setattr(
- 'cylc.flow.flags.cylc7_back_compat',
- True)
+ 'cylc.flow.flags.cylc7_back_compat', True)
monkeypatch.setattr(
'cylc.flow.task_events_mgr.TaskEventsManager._insert_task_job',
lambda *_: True)
- schd = scheduler(flow(config))
- async with start(schd):
- for task_status, new_runahead in states:
- before = max_cycle(schd.pool.get_tasks())
- itask = schd.pool.get_task(ISO8601Point(f'{before - 2:04}'), 'a')
- schd.task_events_mgr.process_message(
- itask,
- logging.INFO,
- task_status,
- )
- after = max_cycle(schd.pool.get_tasks())
- assert bool(before != after) == new_runahead
+ mod_blah.pool.compute_runahead()
+ before_pt = max_cycle(mod_blah.pool.get_tasks())
+ before = mod_blah.pool.runahead_limit_point
+ itask = mod_blah.pool.get_task(ISO8601Point(f'{before_pt - 2:04}'), 'a')
+ itask.state_reset(status, is_queued=False)
+ mod_blah.pool.compute_runahead()
+ after = mod_blah.pool.runahead_limit_point
+ assert bool(before != after) == expected
diff --git a/tests/integration/test_trigger.py b/tests/integration/test_trigger.py
index e008caa4a7c..30ae3404ed8 100644
--- a/tests/integration/test_trigger.py
+++ b/tests/integration/test_trigger.py
@@ -37,7 +37,7 @@ async def test_trigger_invalid(mod_one, start, log_filter, flow_strs):
"""Ensure invalid flow values are rejected."""
async with start(mod_one) as log:
log.clear()
- assert mod_one.pool.force_trigger_tasks(['*'], flow_strs) == 0
+ assert mod_one.pool.force_trigger_tasks(['*'], flow_strs) is None
assert len(log_filter(log, level=logging.WARN)) == 1
diff --git a/tests/integration/tui/screenshots/test_auto_expansion.later-time.html b/tests/integration/tui/screenshots/test_auto_expansion.later-time.html
index ca1a8a73f0c..6e6490ebc05 100644
--- a/tests/integration/tui/screenshots/test_auto_expansion.later-time.html
+++ b/tests/integration/tui/screenshots/test_auto_expansion.later-time.html
@@ -1,9 +1,9 @@
Cylc Tui workflows filtered (W - edit, E - reset)
- ~cylc
- - one - paused
- - ̿○ 1
- ̿○ b
+ - one - paused 1■
+ - ● 1
+ ● b
- ̿○ 2
- ̿○ A
̿○ a
diff --git a/tests/integration/tui/screenshots/test_online_mutation.hold-mutation-selected.html b/tests/integration/tui/screenshots/test_online_mutation.hold-mutation-selected.html
index b8a802948f3..8bfe41ea904 100644
--- a/tests/integration/tui/screenshots/test_online_mutation.hold-mutation-selected.html
+++ b/tests/integration/tui/screenshots/test_online_mutation.hold-mutation-selected.html
@@ -9,7 +9,7 @@
│ < log > │
│ < poll > │
│ < release > │
- │ < show > │
+ │ < set > │
│ │
quit: q help: │ q to close │↥ ↧ Home End
filter tasks: T└────────────────────────────────────────────────┘
diff --git a/tests/integration/tui/screenshots/test_set_mutation.set-command-selected.html b/tests/integration/tui/screenshots/test_set_mutation.set-command-selected.html
new file mode 100644
index 00000000000..8f27deac20e
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_set_mutation.set-command-selected.html
@@ -0,0 +1,16 @@
+Cylc Tui work┌────────────────────────────────────────────────┐
+ │ id: 1/a │
+- ~cylc │ │
+ - one - paus│ Action │
+ - ̿○ 1 │ < (cancel) > │
+ ̿○ a │ │
+ ○ z │ < hold > │
+ │ < kill > │
+ │ < log > │
+ │ < poll > │
+ │ < release > │
+ │ < set > │
+ │ │
+quit: q help: │ q to close │↥ ↧ Home End
+filter tasks: T└────────────────────────────────────────────────┘
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_set_mutation.task-state-updated.html b/tests/integration/tui/screenshots/test_set_mutation.task-state-updated.html
new file mode 100644
index 00000000000..b99e16cf6ea
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_set_mutation.task-state-updated.html
@@ -0,0 +1,16 @@
+Cylc Tui workflows filtered (W - edit, E - reset)
+
+- ~cylc
+ - one - paused 1■
+ - ̿○ 1
+ ● a
+ ̿○ z
+
+
+
+
+
+
+quit: q help: h context: enter tree: - ← + → navigation: ↑ ↓ ↥ ↧ Home End
+filter tasks: T f s r R filter workflows: W E p
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_show.fail.html b/tests/integration/tui/screenshots/test_show.fail.html
index f788e5b3a55..66a4836bc01 100644
--- a/tests/integration/tui/screenshots/test_show.fail.html
+++ b/tests/integration/tui/screenshots/test_show.fail.html
@@ -21,7 +21,7 @@
│ < │ │
│ < │ │
│ < │ │
- │ │ │
+ │ < │ │
│ │ │
│ │ │
│ │ │
diff --git a/tests/integration/tui/test_app.py b/tests/integration/tui/test_app.py
index 908866fc948..b9f50d7d027 100644
--- a/tests/integration/tui/test_app.py
+++ b/tests/integration/tui/test_app.py
@@ -18,13 +18,7 @@
import pytest
import urwid
-from cylc.flow.cycling.integer import IntegerPoint
-from cylc.flow.task_state import (
-# TASK_STATUS_RUNNING,
- TASK_STATUS_SUCCEEDED,
-# TASK_STATUS_FAILED,
-# TASK_STATUS_WAITING,
-)
+from cylc.flow.task_outputs import TASK_OUTPUT_SUCCEEDED
from cylc.flow.workflow_status import StopMode
@@ -329,12 +323,16 @@ async def test_auto_expansion(flow, scheduler, start, rakiura):
'on-load',
'cycle "1" and top-level family "1/A" should be expanded',
)
-
for task in ('a', 'b'):
- itask = schd.pool.get_task(IntegerPoint('1'), task)
- itask.state_reset(TASK_STATUS_SUCCEEDED)
- schd.pool.spawn_on_output(itask, TASK_STATUS_SUCCEEDED)
+ schd.pool.set_prereqs_and_outputs(
+ items=[f"1/{task}"],
+ outputs=[TASK_OUTPUT_SUCCEEDED],
+ prereqs=None,
+ flow=['all']
+ )
+
await schd.update_data_structure()
+ schd.update_data_store()
rk.compare_screenshot(
'later-time',
diff --git a/tests/integration/tui/test_mutations.py b/tests/integration/tui/test_mutations.py
index 18da88d227d..844a0654ab1 100644
--- a/tests/integration/tui/test_mutations.py
+++ b/tests/integration/tui/test_mutations.py
@@ -27,7 +27,32 @@ async def gen_commands(schd):
while True:
await asyncio.sleep(0.1)
if not schd.command_queue.empty():
- yield schd.command_queue.get()
+ # (ignore first item: command UUID)
+ yield schd.command_queue.get()[1:]
+
+
+async def process_command(schd, tries=10, interval=0.1):
+ """Wait for command(s) to be queued and run.
+
+ Waits for at least one command to be queued and for all queued commands to
+ be run.
+ """
+ # wait for the command to be queued
+ for _try in range(tries):
+ await asyncio.sleep(interval)
+ if not schd.command_queue.empty():
+ break
+ else:
+ raise Exception(f'No command was queued after {tries * interval}s')
+
+ # run the command
+ await schd.process_command_queue()
+
+ # push out updates
+ await schd.update_data_structure()
+
+ # make sure it ran
+ assert schd.command_queue.empty(), 'command queue has not emptied'
async def test_online_mutation(
@@ -76,7 +101,7 @@ async def test_online_mutation(
command = None
async for command in gen_commands(schd):
break
- assert command == ('hold', (['1/one'],), {})
+ assert command == ('hold', [], {'tasks': ['1/one']})
# close the dialogue and re-run the hold mutation
rk.user_input('q', 'q', 'enter')
@@ -214,3 +239,55 @@ async def test_offline_mutation(
'there should be a box displaying the error containing the stderr'
' returned by the command',
)
+
+
+async def test_set_mutation(
+ flow,
+ scheduler,
+ start,
+ rakiura,
+):
+ id_ = flow({
+ 'scheduling': {
+ 'graph': {
+ 'R1': 'a => z'
+ },
+ },
+ }, name='one')
+ schd = scheduler(id_)
+ async with start(schd):
+ await schd.update_data_structure()
+ with rakiura(schd.tokens.id, size='80,15') as rk:
+ # open the context menu on 1/a
+ rk.user_input('down', 'down', 'down', 'enter')
+ rk.force_update()
+
+ # select the "set" mutation
+ rk.user_input(*(('down',) * 6)) # 6th command down
+
+ rk.compare_screenshot(
+ # take a screenshot to ensure we have focused on the mutation
+ # successfully
+ 'set-command-selected',
+ 'The command menu should be open for the task 1/a with the'
+ ' set command selected.'
+ )
+
+ # issue the "set" mutation
+ rk.user_input('enter')
+
+ # wait for the command to be received and run it
+ await process_command(schd)
+
+ # close the error dialogue
+ # NOTE: This hides an asyncio error that does not occur outside of
+ # the tests
+ rk.user_input('q', 'q', 'q')
+
+ rk.compare_screenshot(
+ # take a screenshot to ensure we have focused on the mutation
+ # successfully
+ 'task-state-updated',
+ '1/a should now show as succeeded,'
+ ' there should be no associated job.'
+ )
diff --git a/tests/integration/tui/test_show.py b/tests/integration/tui/test_show.py
index c664cdd1393..063fd4b8781 100644
--- a/tests/integration/tui/test_show.py
+++ b/tests/integration/tui/test_show.py
@@ -48,7 +48,7 @@ async def test_show(flow, scheduler, start, rakiura, monkeypatch):
rk.user_input('down', 'down', 'enter')
# select the "show" context option
- rk.user_input(*(['down'] * 6), 'enter')
+ rk.user_input(*(['down'] * 7), 'enter')
rk.compare_screenshot(
'success',
'the show output should be displayed',
@@ -63,7 +63,7 @@ def cli_cmd_fail(*args, **kwargs):
)
# select the "show" context option
- rk.user_input('q', 'enter', *(['down'] * 6), 'enter')
+ rk.user_input('q', 'enter', *(['down'] * 7), 'enter')
rk.compare_screenshot(
'fail',
'the error should be displayed',
diff --git a/tests/integration/validate/test_outputs.py b/tests/integration/validate/test_outputs.py
index a91393366b5..5675372a09f 100644
--- a/tests/integration/validate/test_outputs.py
+++ b/tests/integration/validate/test_outputs.py
@@ -50,6 +50,7 @@
'foo+bar',
# keywords
'required',
+ 'optional',
'all',
# built-in qualifiers
'succeeded',
diff --git a/tests/unit/scripts/test_completion_server.py b/tests/unit/scripts/test_completion_server.py
index 186e13b7272..975ac946da6 100644
--- a/tests/unit/scripts/test_completion_server.py
+++ b/tests/unit/scripts/test_completion_server.py
@@ -20,6 +20,7 @@
from cylc.flow.id import Tokens
from cylc.flow.network.scan import scan
from cylc.flow.scripts.completion_server import (
+ _list_prereqs_and_outputs,
server,
complete_cylc,
complete_command,
@@ -540,7 +541,7 @@ async def test_list_dir(tmp_path, monkeypatch):
# => list $PWD
assert {
str(path)
- for path in await _list_dir(None, None)
+ for path in await _list_dir(None)
} == {'x/'}
# no trailing `/` at the end of the path
@@ -548,7 +549,7 @@ async def test_list_dir(tmp_path, monkeypatch):
# => list the parent
assert {
str(path)
- for path in await _list_dir(None, 'x')
+ for path in await _list_dir('x')
} == {'x/'}
# # trailing `/` at the end of the path
@@ -556,14 +557,14 @@ async def test_list_dir(tmp_path, monkeypatch):
# # => list dir path
assert {
str(path)
- for path in await _list_dir(None, 'x/')
+ for path in await _list_dir('x/')
} == {'x/y/', 'x/z'} # "y" is a dir, "z" is a file
# listing a file
# => noting to list, just return the file
assert {
str(path)
- for path in await _list_dir(None, 'x/z/')
+ for path in await _list_dir('x/z/')
} == {'x/z'}
# --- absolute paths ---
@@ -574,7 +575,7 @@ async def test_list_dir(tmp_path, monkeypatch):
assert {
# '/'.join(path.rsplit('/', 2)[-2:])
path.replace(str(tmp_path), '')
- for path in await _list_dir(None, str(tmp_path / 'x'))
+ for path in await _list_dir(str(tmp_path / 'x'))
} == {'/x/'}
# trailing `/` at the end of the path
@@ -582,14 +583,14 @@ async def test_list_dir(tmp_path, monkeypatch):
# => list dir path
assert {
path.replace(str(tmp_path), '')
- for path in await _list_dir(None, str(tmp_path / 'x') + '/')
+ for path in await _list_dir(str(tmp_path / 'x') + '/')
} == {'/x/y/', '/x/z'} # "y" is a dir, "z" is a file
# listing a file
# => noting to list, just return the file
assert {
path.replace(str(tmp_path), '')
- for path in await _list_dir(None, str(tmp_path / 'x' / 'z') + '/')
+ for path in await _list_dir(str(tmp_path / 'x' / 'z') + '/')
} == {'/x/z'}
@@ -599,12 +600,12 @@ async def test_list_flows():
Currently this only provides the textural options i.e. it doesn't list
"flows" running in a workflow, yet...
"""
- assert 'all' in await list_flows(None, None)
+ assert 'all' in await list_flows(None)
async def test_list_colours():
"""Test listing values for the --color option."""
- assert 'always' in await list_colours(None, None)
+ assert 'always' in await list_colours(None)
async def test_cli_detokenise():
@@ -715,3 +716,18 @@ def _get_current_completion_script_version(_script, lang):
out, err = capsys.readouterr()
assert not out # never write to stdout
assert not err
+
+
+async def test_prereqs_and_outputs():
+ """Test the error cases for listing task prereqs/outputs.
+
+ The succeess cases are tested in an integration test (requires a running
+ scheduler).
+ """
+ # if no tokens are provided, no prereqs or outputs are returned
+ assert await _list_prereqs_and_outputs([]) == ([], [])
+
+ # if an invalid workflow is provided, we can't list anything
+ assert await _list_prereqs_and_outputs(
+ [Tokens(workflow='no-such-workflow')]
+ ) == ([], [])
diff --git a/tests/unit/scripts/test_trigger.py b/tests/unit/scripts/test_trigger.py
index d464bda0a34..87f392d73f8 100644
--- a/tests/unit/scripts/test_trigger.py
+++ b/tests/unit/scripts/test_trigger.py
@@ -18,12 +18,17 @@
from optparse import Values
import pytest
-from typing import Iterable, Optional, Tuple, Type
+from typing import Optional, Tuple, Type
from cylc.flow.exceptions import InputError
from cylc.flow.option_parsers import Options
-from cylc.flow.flow_mgr import FLOW_ALL, FLOW_NEW, FLOW_NONE
-from cylc.flow.scripts.trigger import get_option_parser, _validate
+from cylc.flow.flow_mgr import (
+ FLOW_ALL,
+ FLOW_NEW,
+ FLOW_NONE,
+ validate_flow_opts
+)
+from cylc.flow.scripts.trigger import get_option_parser
Opts = Options(get_option_parser())
@@ -39,6 +44,13 @@
),
None
),
+ (
+ Opts(
+ flow=None,
+ flow_wait=False
+ ),
+ None
+ ),
(
Opts(
flow=[FLOW_NEW],
@@ -57,17 +69,6 @@
"Multiple flow options must all be integer valued"
)
),
- (
- Opts(
- flow=[FLOW_ALL],
- flow_wait=False,
- flow_descr="the quick brown fox"
- ),
- (
- InputError,
- "Metadata is only for new flows"
- )
- ),
(
Opts(
flow=["cheese"],
@@ -75,7 +76,7 @@
),
(
InputError,
- "Flow values must be integer, 'all', 'new', or 'none'"
+ "Flow values must be an integer, or 'all', 'new', or 'none'"
)
),
(
@@ -117,7 +118,7 @@ def test_validate(
if expected_err:
err, msg = expected_err
with pytest.raises(err) as exc:
- _validate(opts)
+ validate_flow_opts(opts)
assert msg in str(exc.value)
else:
- _validate(opts)
+ validate_flow_opts(opts)
diff --git a/tests/unit/test_flow_mgr.py b/tests/unit/test_flow_mgr.py
index 5fea08c1b97..c9171b02073 100644
--- a/tests/unit/test_flow_mgr.py
+++ b/tests/unit/test_flow_mgr.py
@@ -26,6 +26,7 @@
FAKE_NOW = datetime.datetime(2020, 12, 25, 17, 5, 55)
+FAKE_NOW_ISO = FAKE_NOW.isoformat()
@pytest.fixture
@@ -33,7 +34,7 @@ def patch_datetime_now(monkeypatch):
class mydatetime:
@classmethod
- def now(cls):
+ def now(cls, tz=None):
return FAKE_NOW
monkeypatch.setattr(datetime, 'datetime', mydatetime)
@@ -43,25 +44,53 @@ def test_all(
patch_datetime_now,
caplog: pytest.LogCaptureFixture,
):
+ """Test flow number management."""
+
db_mgr = WorkflowDatabaseManager()
flow_mgr = FlowMgr(db_mgr)
caplog.set_level(logging.INFO, CYLC_LOG)
- count = 1
meta = "the quick brown fox"
- msg1 = f"flow: {count} ({meta}) {FAKE_NOW}"
- assert flow_mgr.get_new_flow(meta) == count
+ assert flow_mgr.get_flow_num(None, meta) == 1
+ msg1 = f"flow: 1 ({meta}) {FAKE_NOW_ISO}"
assert f"New {msg1}" in caplog.messages
- count = 2
- meta = "jumped over the lazy"
- msg2 = f"flow: {count} ({meta}) {FAKE_NOW}"
- assert flow_mgr.get_new_flow(meta) == count
+ # automatic: expect 2
+ meta = "jumped over the lazy dog"
+ assert flow_mgr.get_flow_num(None, meta) == 2
+ msg2 = f"flow: 2 ({meta}) {FAKE_NOW_ISO}"
assert f"New {msg2}" in caplog.messages
+ # give flow 2: not a new flow
+ meta = "jumped over the moon"
+ assert flow_mgr.get_flow_num(2, meta) == 2
+ msg3 = f"flow: 2 ({meta}) {FAKE_NOW_ISO}"
+ assert f"New {msg3}" not in caplog.messages
+ assert f"Ignoring flow metadata \"{meta}\": 2 is not a new flow" in caplog.messages
+
+ # give flow 4: new flow
+ meta = "jumped over the cheese"
+ assert flow_mgr.get_flow_num(4, meta) == 4
+ msg4 = f"flow: 4 ({meta}) {FAKE_NOW_ISO}"
+ assert f"New {msg4}" in caplog.messages
+
+ # automatic: expect 3
+ meta = "jumped over the log"
+ assert flow_mgr.get_flow_num(None, meta) == 3
+ msg5 = f"flow: 3 ({meta}) {FAKE_NOW_ISO}"
+ assert f"New {msg5}" in caplog.messages
+
+ # automatic: expect 5 (skip over 4)
+ meta = "crawled under the log"
+ assert flow_mgr.get_flow_num(None, meta) == 5
+ msg6 = f"flow: 5 ({meta}) {FAKE_NOW_ISO}"
+ assert f"New {msg6}" in caplog.messages
flow_mgr._log()
assert (
"Flows:\n"
f"{msg1}\n"
- f"{msg2}"
+ f"{msg2}\n"
+ f"{msg4}\n"
+ f"{msg5}\n"
+ f"{msg6}"
) in caplog.messages
diff --git a/tests/unit/test_graph_parser.py b/tests/unit/test_graph_parser.py
index ddd443a3597..ca19b2060b4 100644
--- a/tests/unit/test_graph_parser.py
+++ b/tests/unit/test_graph_parser.py
@@ -778,6 +778,10 @@ def test_family_optional_outputs(qual, task_output):
"FAM => foo", # bare family on LHS
"Illegal family trigger"
],
+ [
+ "FAM:expire-all => foo",
+ "must be optional"
+ ],
]
)
def test_family_trigger_errors(graph, error):
@@ -819,6 +823,10 @@ def test_family_trigger_errors(graph, error):
"a:finish? => b",
"Pseudo-output a:finished can't be optional",
],
+ [
+ "a:expire => b",
+ "must be optional",
+ ],
]
)
def test_task_optional_output_errors_order(
diff --git a/tests/unit/test_id_match.py b/tests/unit/test_id_match.py
index d26e85b092d..8727023a864 100644
--- a/tests/unit/test_id_match.py
+++ b/tests/unit/test_id_match.py
@@ -43,6 +43,7 @@ def _task_proxy(id_, hier):
hier.append('root')
tdef = create_autospec(TaskDef, namespace_hierarchy=hier)
tdef.name = tokens['task']
+ tdef.expiration_offset = None
return TaskProxy(
Tokens('~user/workflow'),
tdef,
@@ -127,7 +128,7 @@ def test_filter_ids_task_mode(task_pool, ids, matched, not_matched):
{}
)
- _matched, _not_matched = filter_ids([pool], ids)
+ _matched, _not_matched = filter_ids(pool, ids)
assert [get_task_id(itask) for itask in _matched] == matched
assert _not_matched == not_matched
@@ -188,21 +189,21 @@ def test_filter_ids_cycle_mode(task_pool, ids, matched, not_matched):
{}
)
- _matched, _not_matched = filter_ids([pool], ids, out=IDTokens.Cycle)
+ _matched, _not_matched = filter_ids(pool, ids, out=IDTokens.Cycle)
assert _matched == [IntegerPoint(i) for i in matched]
assert _not_matched == not_matched
def test_filter_ids_invalid(caplog):
"""Ensure invalid IDs are handled elegantly."""
- matched, not_matched = filter_ids([{}], ['#'])
+ matched, not_matched = filter_ids({}, ['#'])
assert matched == []
assert not_matched == ['#']
assert caplog.record_tuples == [
('cylc', 30, 'No active tasks matching: #'),
]
caplog.clear()
- matched, not_matched = filter_ids([{}], ['#'], warn=False)
+ matched, not_matched = filter_ids({}, ['#'], warn=False)
assert caplog.record_tuples == []
@@ -216,7 +217,7 @@ def test_filter_ids_pattern_match_off(task_pool):
)
_matched, _not_matched = filter_ids(
- [pool],
+ pool,
['1/a'],
out=IDTokens.Task,
pattern_match=False,
@@ -238,7 +239,7 @@ def test_filter_ids_toggle_pattern_matching(task_pool, caplog):
# ensure pattern matching works
_matched, _not_matched = filter_ids(
- [pool],
+ pool,
ids,
out=IDTokens.Task,
pattern_match=True,
@@ -249,7 +250,7 @@ def test_filter_ids_toggle_pattern_matching(task_pool, caplog):
# ensure pattern matching can be disabled
caplog.clear()
_matched, _not_matched = filter_ids(
- [pool],
+ pool,
ids,
out=IDTokens.Task,
pattern_match=False,
@@ -285,7 +286,7 @@ def test_filter_ids_namespace_hierarchy(task_pool, ids, matched, not_matched):
)
_matched, _not_matched = filter_ids(
- [pool],
+ pool,
ids,
pattern_match=False,
)
diff --git a/tests/unit/test_prerequisite.py b/tests/unit/test_prerequisite.py
index d8981c2febf..410176e508c 100644
--- a/tests/unit/test_prerequisite.py
+++ b/tests/unit/test_prerequisite.py
@@ -18,6 +18,7 @@
from cylc.flow.cycling.loader import ISO8601_CYCLING_TYPE, get_point
from cylc.flow.prerequisite import Prerequisite
+from cylc.flow.id import Tokens
@pytest.fixture
@@ -61,10 +62,10 @@ def test_satisfied(prereq):
('2001', 'd', 'custom'): False,
}
# mark two prerequisites as satisfied
- prereq.satisfy_me({
- ('2000', 'b', 'succeeded'),
- ('2000', 'c', 'succeeded'),
- })
+ prereq.satisfy_me([
+ Tokens('2000/b:succeeded', relative=True),
+ Tokens('2000/c:succeeded', relative=True),
+ ])
assert prereq.satisfied == {
# the pre-initial dependency should be marked as satisfied
('1999', 'a', 'succeeded'): 'satisfied naturally',
diff --git a/tests/unit/test_subprocpool.py b/tests/unit/test_subprocpool.py
index feffdab19d0..c72ffc4d094 100644
--- a/tests/unit/test_subprocpool.py
+++ b/tests/unit/test_subprocpool.py
@@ -328,6 +328,7 @@ def test__run_command_exit_add_to_badhosts_log(caplog, mock_ctx):
SimpleNamespace(
name='t', dependencies={}, sequential='',
external_triggers=[], xtrig_labels={},
+ expiration_offset=None,
outputs={
'submitted': [None, None], 'submit-failed': [None, None]
},
diff --git a/tests/unit/test_task_state.py b/tests/unit/test_task_state.py
index 82e19f9e215..e655c74b7bb 100644
--- a/tests/unit/test_task_state.py
+++ b/tests/unit/test_task_state.py
@@ -21,9 +21,12 @@
from cylc.flow.task_trigger import Dependency, TaskTrigger
from cylc.flow.task_state import (
TaskState,
+ TASK_STATUS_PREPARING,
+ TASK_STATUS_SUBMIT_FAILED,
+ TASK_STATUS_SUBMITTED,
TASK_STATUS_SUCCEEDED,
- TASK_STATUS_FAILED,
TASK_STATUS_WAITING,
+ TASK_STATUS_RUNNING,
)
@@ -79,58 +82,6 @@ def test_reset(state, is_held, should_reset):
assert tstate.status == state
-@pytest.mark.parametrize(
- 'before,after,outputs',
- [
- (
- (TASK_STATUS_WAITING, False),
- (TASK_STATUS_SUCCEEDED, False),
- ['submitted', 'started', 'succeeded']
- ),
- (
- (TASK_STATUS_WAITING, False),
- (TASK_STATUS_FAILED, False),
- ['submitted', 'started', 'failed']
- ),
- (
- (TASK_STATUS_WAITING, False),
- (TASK_STATUS_FAILED, None), # no change to is_held
- ['submitted', 'started', 'failed']
- ),
- (
- (TASK_STATUS_WAITING, False),
- (None, False), # no change to status
- []
- ),
- # only reset task outputs if not setting task to held
- # https://github.com/cylc/cylc-flow/pull/2116
- (
- (TASK_STATUS_WAITING, False),
- (TASK_STATUS_FAILED, True),
- []
- ),
- # only reset task outputs if not setting task to held
- # https://github.com/cylc/cylc-flow/pull/2116
- (
- (TASK_STATUS_WAITING, False),
- (TASK_STATUS_SUCCEEDED, True),
- []
- )
- ]
-)
-def test_reset_outputs(before, after, outputs):
- """Test that outputs are reset correctly on state changes."""
- tdef = TaskDef('foo', {}, 'live', '123', '123')
-
- orig_status, orig_is_held = before
- new_status, new_is_held = after
-
- tstate = TaskState(tdef, '123', orig_status, orig_is_held)
- assert tstate.outputs.get_completed() == []
- tstate.reset(status=new_status, is_held=new_is_held)
- assert tstate.outputs.get_completed() == outputs
-
-
def test_task_prereq_duplicates(set_cycling_type):
"""Test prerequisite duplicates from multiple recurrences are discarded."""
@@ -152,3 +103,19 @@ def test_task_prereq_duplicates(set_cycling_type):
prereqs = [p.satisfied for p in tstate.prerequisites]
assert prereqs == [{("1", "a", "succeeded"): False}]
+
+
+def test_task_state_order():
+ """Test is_gt and is_gte methods."""
+
+ tdef = TaskDef('foo', {}, 'live', IntegerPoint("1"), IntegerPoint("1"))
+ tstate = TaskState(tdef, IntegerPoint("1"), TASK_STATUS_SUBMITTED, False)
+
+ assert tstate.is_gt(TASK_STATUS_WAITING)
+ assert tstate.is_gt(TASK_STATUS_PREPARING)
+ assert tstate.is_gt(TASK_STATUS_SUBMIT_FAILED)
+ assert not tstate.is_gt(TASK_STATUS_SUBMITTED)
+ assert tstate.is_gte(TASK_STATUS_SUBMITTED)
+ assert not tstate.is_gt(TASK_STATUS_RUNNING)
+ assert not tstate.is_gte(TASK_STATUS_RUNNING)
+