diff --git a/tests/functional/hold-release/11-retrying/flow.cylc b/tests/functional/hold-release/11-retrying/flow.cylc index 0e08699af09..dbd1ec3ed59 100644 --- a/tests/functional/hold-release/11-retrying/flow.cylc +++ b/tests/functional/hold-release/11-retrying/flow.cylc @@ -18,7 +18,7 @@ t-retry-able => t-analyse [[t-hold-release]] script = """ cylc__job__poll_grep_workflow_log -E \ - '1/t-retry-able/01:running.* \(received\)failed' + '\[1/t-retry-able:waiting\] failed/ERR' cylc__job__poll_grep_workflow_log -E \ '1/t-retry-able/01:running.* => waiting' diff --git a/tests/functional/reload/25-xtriggers.t b/tests/functional/reload/25-xtriggers.t index 0269a2e3775..fdaa4ebb3f1 100644 --- a/tests/functional/reload/25-xtriggers.t +++ b/tests/functional/reload/25-xtriggers.t @@ -42,7 +42,7 @@ init_workflow "${TEST_NAME_BASE}" <<'__FLOW_CONFIG__' [[reload]] script = """ # wait for "broken" to fail - cylc__job__poll_grep_workflow_log -E '1/broken/01.* \(received\)failed/ERR' + cylc__job__poll_grep_workflow_log -E '1/broken.*failed/ERR' # fix "broken" to allow it to pass sed -i 's/false/true/' "${CYLC_WORKFLOW_RUN_DIR}/flow.cylc" # reload the workflow @@ -63,7 +63,7 @@ workflow_run_ok "${TEST_NAME_BASE}-run" cylc play "${WORKFLOW_NAME}" --no-detach log_scan "${TEST_NAME_BASE}-scan" \ "$(cylc cat-log -m p "${WORKFLOW_NAME}")" \ 1 1 \ - '1/broken.* (received)failed/ERR' + '1/broken.*failed/ERR' log_scan "${TEST_NAME_BASE}-scan" \ "$(cylc cat-log -m p "${WORKFLOW_NAME}")" 1 1 \ diff --git a/tests/integration/test_task_events_mgr.py b/tests/integration/test_task_events_mgr.py index 6e996bced6d..a8da192b863 100644 --- a/tests/integration/test_task_events_mgr.py +++ b/tests/integration/test_task_events_mgr.py @@ -76,17 +76,22 @@ async def test__insert_task_job(flow, one_conf, scheduler, start, validate): with correct submit number. """ conf = { - 'scheduling': {'graph': {'R1': 'rhenas'}}, - 'runtime': {'rhenas': {'simulation': { - 'fail cycle points': '1', - 'fail try 1 only': False, - }}}} + "scheduling": {"graph": {"R1": "rhenas"}}, + "runtime": { + "rhenas": { + "simulation": { + "fail cycle points": "1", + "fail try 1 only": False, + } + } + }, + } id_ = flow(conf) schd = scheduler(id_) async with start(schd): # Set task to running: - itask = schd.pool.get_tasks()[0] - itask.state.status = 'running' + itask = schd.pool.get_tasks()[0] + itask.state.status = "running" itask.submit_num += 1 # Not run _insert_task_job yet: @@ -185,9 +190,28 @@ async def test__process_message_failed_with_retry(one, start): 'submission retry delays': [1] }) - # Process failed message: + # Process submit failed message with and without retries: + one.task_events_mgr._process_message_submit_failed( + fail_once, None, 1, False) + last_record = LOG.records[-1] + assert last_record.levelno == logging.WARNING + assert '1/one:waiting(queued)' in last_record.message + + one.task_events_mgr._process_message_submit_failed( + fail_once, None, 2, False) + last_record = LOG.records[-1] + assert last_record.levelno == logging.ERROR + assert 'submission failed' in last_record.message + + # Process failed message with and without retries: one.task_events_mgr._process_message_failed( fail_once, None, 'failed', False, 'failed/OOK') + last_record = LOG.records[-1] + assert last_record.levelno == logging.WARNING + assert 'failed/OOK' in last_record.message - # Check that failure reported: - assert 'failed/OOK' in LOG.messages[-1] + one.task_events_mgr._process_message_failed( + fail_once, None, 'failed', False, 'failed/OOK') + last_record = LOG.records[-1] + assert last_record.levelno == logging.ERROR + assert 'failed/OOK' in last_record.message