diff --git a/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py index 176a751f..a6a886b7 100644 --- a/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/BaseFlowMeasurement.py @@ -1,3 +1,4 @@ +import time from dataclasses import dataclass import textwrap from typing import Optional, Union @@ -10,6 +11,8 @@ from lnst.RecipeCommon.Perf.Measurements.MeasurementError import MeasurementError from lnst.RecipeCommon.Perf.Measurements.BaseMeasurement import BaseMeasurement from lnst.RecipeCommon.Perf.Measurements.Results import FlowMeasurementResults, AggregatedFlowMeasurementResults +from lnst.RecipeCommon.Perf.Results import PerfInterval +from lnst.RecipeCommon.Perf.Results import SequentialPerfResult from lnst.RecipeCommon.Perf.Results import ParallelPerfResult @@ -184,3 +187,44 @@ def aggregate_multi_flow_results(results): aggregated_result.add_results(parallel_result) return [aggregated_result] + + def collect_simulated_results(self): + res = [] + for test_flow in self.flows: + flow_results = FlowMeasurementResults( + measurement=self, + flow=test_flow, + warmup_duration=test_flow.warmup_duration, + ) + flow_results.generator_results = ParallelPerfResult( + [ + SequentialPerfResult( + [PerfInterval(0, 1, "bits", time.time())] + * (test_flow.warmup_duration * 2 + test_flow.duration) + ) + ] + ) + flow_results.generator_cpu_stats = PerfInterval( + 0, + (test_flow.warmup_duration * 2 + test_flow.duration), + "cpu_percent", + time.time(), + ) + + flow_results.receiver_results = ParallelPerfResult( + [ + SequentialPerfResult( + [PerfInterval(0, 1, "bits", time.time())] + * (test_flow.warmup_duration * 2 + test_flow.duration) + ) + ] + ) + flow_results.receiver_cpu_stats = PerfInterval( + 0, + (test_flow.warmup_duration * 2 + test_flow.duration), + "cpu_percent", + time.time(), + ) + + res.append(flow_results) + return res diff --git a/lnst/RecipeCommon/Perf/Measurements/BaseMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/BaseMeasurement.py index 812977f2..a69f44a8 100644 --- a/lnst/RecipeCommon/Perf/Measurements/BaseMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/BaseMeasurement.py @@ -17,12 +17,21 @@ def recipe_conf(self): def start(self): raise NotImplementedError() + def simulate_start(self): + return self.start() + def finish(self): raise NotImplementedError() + def simulate_finish(self): + return self.finish() + def collect_results(self): raise NotImplementedError() + def collect_simulated_results(self): + return self.collect_results() + @classmethod def report_results(cls, recipe, results): raise NotImplementedError() diff --git a/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py index bd1b599a..b219dcdc 100644 --- a/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/IperfFlowMeasurement.py @@ -1,5 +1,6 @@ import re import time +import logging from typing import List from lnst.Common.IpAddress import ipaddress @@ -70,6 +71,21 @@ def start(self): self._running_measurements = test_flows + def simulate_start(self): + if len(self._running_measurements) > 0: + raise MeasurementError("Measurement already running!") + + test_flows = self._prepare_test_flows(self.flows) + + result = None + for flow in test_flows: + flow.server_job = flow.server_job.netns.run('echo simulated start', bg=True) + + for flow in test_flows: + flow.client_job = flow.client_job.netns.run('echo simulated start', bg=True) + + self._running_measurements = test_flows + def finish(self): test_flows = self._running_measurements try: @@ -85,6 +101,17 @@ def finish(self): self._running_measurements = [] self._finished_measurements = test_flows + def simulate_finish(self): + logging.info("Simulating minimal 1s measurement duration") + time.sleep(2) + test_flows = self._running_measurements + for flow in test_flows: + flow.client_job.wait() + flow.server_job.wait() + + self._running_measurements = [] + self._finished_measurements = test_flows + def collect_results(self): test_flows = self._finished_measurements diff --git a/lnst/RecipeCommon/Perf/Measurements/NeperFlowMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/NeperFlowMeasurement.py index 370935a8..a50dcf5a 100644 --- a/lnst/RecipeCommon/Perf/Measurements/NeperFlowMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/NeperFlowMeasurement.py @@ -1,4 +1,5 @@ import time +import logging from typing import List, Dict, Tuple from lnst.Common.IpAddress import ipaddress from lnst.Controller.Job import Job @@ -45,6 +46,21 @@ def start(self): self._running_measurements = test_flows + def simulate_start(self): + if len(self._running_measurements) > 0: + raise MeasurementError("Measurement already running!") + + test_flows = self._prepare_test_flows(self.flows) + + result = None + for flow in test_flows: + flow.server_job = flow.server_job.netns.run('echo simulated start', bg=True) + + for flow in test_flows: + flow.client_job = flow.client_job.netns.run('echo simulated start', bg=True) + + self._running_measurements = test_flows + def finish(self): test_flows = self._running_measurements try: @@ -60,6 +76,17 @@ def finish(self): self._running_measurements = [] self._finished_measurements = test_flows + def simulate_finish(self): + logging.info("Simulating minimal 1s measurement duration") + time.sleep(1) + test_flows = self._running_measurements + for flow in test_flows: + flow.client_job.wait() + flow.server_job.wait() + + self._running_measurements = [] + self._finished_measurements = test_flows + def _prepare_test_flows(self, flows: List[Flow]): test_flows = [] for flow in flows: diff --git a/lnst/RecipeCommon/Perf/Measurements/RDMABandwidthMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/RDMABandwidthMeasurement.py index f78c96a0..758e229a 100644 --- a/lnst/RecipeCommon/Perf/Measurements/RDMABandwidthMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/RDMABandwidthMeasurement.py @@ -1,5 +1,6 @@ from typing import Any, Optional import time +import logging from lnst.Controller.Job import Job from lnst.Controller.Recipe import BaseRecipe @@ -41,6 +42,16 @@ def start(self) -> None: for endpoint_test in self._endpoint_tests: endpoint_test.client_job.start(bg=True) + def simulate_start(self): + self._endpoint_tests.extend(self._prepare_endpoint_tests()) + + for endpoint_test in self._endpoint_tests: + endpoint_test.server_job = endpoint_test.server_job.netns.run("echo simulated start", bg=True) + + self._start_timestamp = time.time() + for endpoint_test in self._endpoint_tests: + endpoint_test.client_job = endpoint_test.client_job.netns.run("echo simulated start", bg=True) + def finish(self) -> None: try: for endpoint_test in self._endpoint_tests: @@ -52,6 +63,11 @@ def finish(self) -> None: endpoint_test.client_job.kill() endpoint_test.server_job.kill() + def simulate_finish(self): + logging.info("Simulating minimal 1s measurement duration") + time.sleep(1) + self.finish() + def collect_results(self) -> list[RDMABandwidthMeasurementResults]: results: list[RDMABandwidthMeasurementResults] = [] for endpoint_test in self._endpoint_tests: diff --git a/lnst/RecipeCommon/Perf/Measurements/TRexFlowMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/TRexFlowMeasurement.py index 064795f9..b87f8007 100644 --- a/lnst/RecipeCommon/Perf/Measurements/TRexFlowMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/TRexFlowMeasurement.py @@ -1,6 +1,7 @@ import time import signal import re +import logging from lnst.Controller.RecipeResults import ResultLevel from lnst.RecipeCommon.Perf.Results import PerfInterval @@ -67,6 +68,21 @@ def start(self): self._running_measurements = tests + def simulate_start(self): + if len(self._running_measurements) > 0: + raise MeasurementError("Measurement already running!") + + tests = self._prepare_tests(self._flows) + + result = None + for test in tests: + test.server_job = test.server_job.netns.run("echo simulated start", bg=True) + + for test in tests: + test.client_job = test.client_job.netns.run("echo simulated start", bg=True) + + self._running_measurements = tests + def finish(self): tests = self._running_measurements try: @@ -84,6 +100,17 @@ def finish(self): self._running_measurements = [] self._finished_measurements = tests + def simulate_finish(self): + logging.info("Simulating minimal 1s measurement duration") + time.sleep(1) + tests = self._running_measurements + for test in tests: + test.client_job.wait() + test.server_job.wait() + + self._running_measurements = [] + self._finished_measurements = tests + def _prepare_tests(self, flows): tests = [] diff --git a/lnst/RecipeCommon/Perf/Measurements/TcRunMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/TcRunMeasurement.py index 22d185af..45b35bc5 100644 --- a/lnst/RecipeCommon/Perf/Measurements/TcRunMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/TcRunMeasurement.py @@ -1,3 +1,4 @@ +import time import logging from tempfile import NamedTemporaryFile from typing import Optional @@ -151,6 +152,16 @@ def start(self): job.start(bg=True) self._running_jobs.append(job) + def simulate_start(self): + if len(self._running_jobs) > 0: + raise MeasurementError("Measurement already running!") + + jobs = self._prepare_jobs() + + for job in jobs: + job = job.netns.run("echo simulated start", bg=True) + self._running_jobs.append(job) + def _prepare_jobs(self) -> list[Job]: params: dict = { "batchfiles": [i.batchfile_path for i in self.instance_configs], @@ -176,6 +187,11 @@ def finish(self): self._running_jobs = [] self._finished_jobs = jobs + def simulate_finish(self): + logging.info("Simulating minimal 1s measurement duration") + time.sleep(1) + self.finish() + def _make_instances_cfgs(self) -> list[TcRunInstance]: #TODO perhaps make this be something the recipe or a ResultGenerator creates configs = [] @@ -205,6 +221,17 @@ def collect_results(self) -> list[TcRunMeasurementResults]: return [run_result] + def collect_simulated_results(self): + run_result = TcRunMeasurementResults( + measurement=self, + device=self.device, + ) + run_result.rule_install_rate = ParallelPerfResult( + [PerfInterval(0, 1, "rules", time.time())] + ) + run_result.run_success = True + return [run_result] + def _get_instance_interval(self, instance_data: dict): return PerfInterval( value=self._rules_per_instance, diff --git a/lnst/RecipeCommon/Perf/Measurements/XDPBenchMeasurement.py b/lnst/RecipeCommon/Perf/Measurements/XDPBenchMeasurement.py index 437f277a..2e168737 100644 --- a/lnst/RecipeCommon/Perf/Measurements/XDPBenchMeasurement.py +++ b/lnst/RecipeCommon/Perf/Measurements/XDPBenchMeasurement.py @@ -1,3 +1,6 @@ +import time +import logging + from lnst.RecipeCommon.Perf.Measurements.Results.AggregatedXDPBenchMeasurementResults import ( AggregatedXDPBenchMeasurementResults, ) @@ -47,6 +50,16 @@ def start(self): self._running_measurements = net_flows + def simulate_start(self): + net_flows = self._prepare_flows() + for flow in net_flows: + flow.server_job = flow.server_job.netns.run("echo simulated start", bg=True) + flow.client_job = flow.client_job.netns.run("echo simulated start", bg=True) + # server starts immediately, no need to wait + self._running_measurements.append(flow) + + self._running_measurements = net_flows + def _prepare_server(self, flow: Flow): params = { "command": self.command, @@ -97,6 +110,15 @@ def finish(self): self._finished_measurements = self._running_measurements self._running_measurements = [] + def simulate_finish(self): + logging.info("Simulating minimal 1s measurement duration") + time.sleep(1) + for flow in self._running_measurements: + flow.server_job.wait() + flow.client_job.wait() + self._finished_measurements = self._running_measurements + self._running_measurements = [] + def collect_results(self): test_flows = self._finished_measurements diff --git a/lnst/RecipeCommon/Perf/Recipe.py b/lnst/RecipeCommon/Perf/Recipe.py index ac868258..5bdbdc47 100644 --- a/lnst/RecipeCommon/Perf/Recipe.py +++ b/lnst/RecipeCommon/Perf/Recipe.py @@ -24,16 +24,22 @@ def __init__( measurements: List[BaseMeasurement], iterations: int, parent_recipe_config: Any = None, + simulate_measurements: bool = False, ): self._measurements = measurements self._evaluators = dict() self._iterations = iterations self._parent_recipe_config = parent_recipe_config + self._simulate_measurements = simulate_measurements @property def measurements(self): return self._measurements + @property + def simulate_measurements(self): + return self._simulate_measurements + @property def evaluators(self): return dict(self._evaluators) @@ -167,11 +173,23 @@ def perf_test_iteration( try: for measurement in recipe_conf.measurements: - measurement.start() + if recipe_conf.simulate_measurements: + logging.info(f"Simulating start of measurement {measurement}") + measurement.simulate_start() + else: + measurement.start() for measurement in reversed(recipe_conf.measurements): - measurement.finish() + if recipe_conf.simulate_measurements: + logging.info(f"Simulating finish of measurement {measurement}") + measurement.simulate_finish() + else: + measurement.finish() for measurement in recipe_conf.measurements: - measurement_results = measurement.collect_results() + if recipe_conf.simulate_measurements: + logging.info(f"Simulating result collection of measurement {measurement}") + measurement_results = measurement.collect_simulated_results() + else: + measurement_results = measurement.collect_results() results.add_measurement_results( measurement, measurement_results ) diff --git a/lnst/Recipes/ENRT/BaseEnrtRecipe.py b/lnst/Recipes/ENRT/BaseEnrtRecipe.py index 68979680..b81ba52f 100644 --- a/lnst/Recipes/ENRT/BaseEnrtRecipe.py +++ b/lnst/Recipes/ENRT/BaseEnrtRecipe.py @@ -165,6 +165,12 @@ class BaseEnrtRecipe( specify how many times should each performance measurement be repeated to generate cumulative results which can be statistically analyzed. :type perf_iterations: :any:`IntParam` (default 5) + + :param perf_test_simulation: + Parameter that will switch the performance testing into a simulation + mode only - no measurements will actually be started and they'll simply + generate 0 value measurement results as if they ran + :type perf_test_simulation: :any:`BoolParam` (default False) """ driver = StrParam() @@ -181,6 +187,7 @@ class BaseEnrtRecipe( # generic perf test params perf_iterations = IntParam(default=5) + perf_test_simulation = BoolParam(default=False) def test(self): """Main test loop shared by all the Enrt recipes @@ -440,6 +447,7 @@ def generate_perf_configurations(self, config): measurements=measurements, iterations=self.params.perf_iterations, parent_recipe_config=copy.deepcopy(config), + simulate_measurements=self.params.perf_test_simulation, ) self.register_perf_evaluators(perf_conf) diff --git a/lnst/Recipes/ENRT/OvS_DPDK_PvP.py b/lnst/Recipes/ENRT/OvS_DPDK_PvP.py index a388e170..dfd1aa23 100644 --- a/lnst/Recipes/ENRT/OvS_DPDK_PvP.py +++ b/lnst/Recipes/ENRT/OvS_DPDK_PvP.py @@ -6,7 +6,7 @@ from lnst.Recipes.ENRT.BasePvPRecipe import VirtioDevice, VirtioType from lnst.Controller import HostReq, DeviceReq, RecipeParam from lnst.Common.Logs import log_exc_traceback -from lnst.Common.Parameters import IntParam, Param, StrParam, IPv4NetworkParam +from lnst.Common.Parameters import IntParam, Param, StrParam, IPv4NetworkParam, BoolParam from lnst.Common.IpAddress import interface_addresses from lnst.Tests.TestPMD import TestPMD @@ -64,6 +64,8 @@ class OvSDPDKPvPRecipe(BasePvPRecipe): #doesn't do anything for now... perf_streams = IntParam(default=1) + perf_test_simulation = BoolParam(default=False) + def test(self): self.check_dependencies() ping_config = self.gen_ping_config() @@ -173,6 +175,7 @@ def generate_perf_config(self, config): flows_measurement, ], iterations=self.params.perf_iterations, + simulate_measurements=self.params.perf_test_simulation, ) perf_conf.register_evaluators(cpu_measurement, self.cpu_perf_evaluators) perf_conf.register_evaluators(flows_measurement, self.net_perf_evaluators) diff --git a/lnst/Recipes/ENRT/TrafficControlRecipe.py b/lnst/Recipes/ENRT/TrafficControlRecipe.py index 76f36b02..364fe486 100644 --- a/lnst/Recipes/ENRT/TrafficControlRecipe.py +++ b/lnst/Recipes/ENRT/TrafficControlRecipe.py @@ -3,7 +3,7 @@ from contextlib import contextmanager from lnst.Common.LnstError import LnstError -from lnst.Common.Parameters import ListParam, StrParam, IntParam, ChoiceParam +from lnst.Common.Parameters import ListParam, StrParam, IntParam, ChoiceParam, BoolParam from lnst.Controller import HostReq, DeviceReq, RecipeParam from lnst.Controller.Namespace import Namespace from lnst.RecipeCommon import BaseResultEvaluator @@ -40,6 +40,8 @@ class TrafficControlRecipe(PerfRecipe): default="off", ) + perf_test_simulation = BoolParam(default=False) + def test(self): with self._test_wide_context() as config: self.do_tc_test_perf_recipe(config) @@ -67,6 +69,7 @@ def test_wide_configuration(self) -> TcRecipeConfiguration: config = TcRecipeConfiguration( measurements=[cpu_measurement, measurement], iterations=1, + simulate_measurements=self.params.perf_test_simulation, ) config.register_evaluators(measurement, self.tc_run_evaluators)