From 0d56ae3dc69bad514dd4cc673eeb3f41c2efe679 Mon Sep 17 00:00:00 2001 From: bjones1 Date: Tue, 14 Sep 2021 16:33:09 +0100 Subject: [PATCH] Add: Server-side grading using JavaScript from the RS components. --- bookserver/config.py | 6 +- bookserver/crud.py | 26 +++- bookserver/internal/feedback.py | 233 ++++++++++++++++++++++-------- bookserver/models.py | 4 +- bookserver/routers/assessment.py | 64 ++++++-- bookserver/routers/rslogging.py | 20 ++- bookserver/schemas.py | 12 ++ mypy.ini | 3 + pyproject.toml | 1 + test/conftest.py | 13 +- test/test_runestone_components.py | 33 +++-- test/toctree.rst | 2 +- 12 files changed, 314 insertions(+), 103 deletions(-) diff --git a/bookserver/config.py b/bookserver/config.py index 1ebed19f..b1421df5 100644 --- a/bookserver/config.py +++ b/bookserver/config.py @@ -55,9 +55,9 @@ class Settings(BaseSettings): google_ga: str = "" # Provide a path to the book server files. The leading underscore prevents environment variables from affecting this value. See the `docs `_, which don't say this explicitly, but testing confirms it. - _book_server_path: str = str( - Path(pkg_resources.resource_filename("bookserver", "")).absolute() - ) + _book_server_path: Path = Path( + pkg_resources.resource_filename("bookserver", "") + ).absolute() # _`book_path`: specify the directory to serve books from. book_path: Path = Path.home() / "Runestone/books" diff --git a/bookserver/crud.py b/bookserver/crud.py index 18375bf2..82012929 100644 --- a/bookserver/crud.py +++ b/bookserver/crud.py @@ -15,7 +15,8 @@ import datetime import json from collections import namedtuple -from typing import Dict, List, Optional +from random import randrange +from typing import Dict, List, Optional, Tuple # Third-party imports # ------------------- @@ -65,6 +66,7 @@ UserSubChapterProgressValidator, runestone_component_dict, ) +from .schemas import SeedRequest # Map from the ``event`` field of a ``LogItemIncoming`` to the database table used to store data associated with this event. EVENT2TABLE = { @@ -216,9 +218,21 @@ async def create_answer_table_entry( return rcd.validator.from_orm(new_entry) # type: ignore +# Create a new random seed for a problem, discarding any previous answers. +async def create_seed(seed_request: SeedRequest): + sr = seed_request.dict() + event = sr.pop("event") + rcd = runestone_component_dict[EVENT2TABLE[event]] + new_entry = rcd.model(**sr, correct=False, timestamp=datetime.datetime.utcnow()) + new_entry.seed = randrange(2 ** 32 - 1) + async with async_session.begin() as session: + session.add(new_entry) + + +# Return the last table entry as a Pydantic schema and a True if this table contains a seed, or False if not. async def fetch_last_answer_table_entry( query_data: schemas.AssessmentRequest, -) -> schemas.LogItemIncoming: +) -> Tuple[schemas.LogItemIncoming, bool]: rcd = runestone_component_dict[EVENT2TABLE[query_data.event]] tbl = rcd.model query = ( @@ -233,9 +247,9 @@ async def fetch_last_answer_table_entry( .order_by(tbl.timestamp.desc()) ) async with async_session() as session: - res = await session.execute(query) + res = (await session.execute(query)).scalars().first() rslogger.debug(f"res = {res}") - return rcd.validator.from_orm(res.scalars().first()) # type: ignore + return rcd.validator.from_orm(res), bool(getattr(tbl, "seed", False)) # type: ignore async def fetch_last_poll_response(sid: str, course_name: str, poll_id: str) -> str: @@ -389,9 +403,9 @@ async def is_server_feedback(div_id, course): feedback = query_results and query_results.Question.feedback # If there's feedback and a login is required (necessary for server-side grading), return the decoded feedback. if feedback and query_results.Courses.login_required: - return json.loads(feedback) + return json.loads(feedback), query_results.Courses.base_course # Otherwise, grade on the client. - return None + return None, None # Development and Testing Utils diff --git a/bookserver/internal/feedback.py b/bookserver/internal/feedback.py index 13b881a0..154ae8ff 100644 --- a/bookserver/internal/feedback.py +++ b/bookserver/internal/feedback.py @@ -9,16 +9,16 @@ # # Standard library # ---------------- -import ast -from bookserver.crud import fetch_course +from functools import lru_cache import json import os -import re +from pathlib import Path import tempfile -from typing import Any, Dict, List, Optional +from typing import Any, Dict # Third-party imports # ------------------- +import js2py from runestone.lp.lp_common_lib import ( STUDENT_SOURCE_PATH, code_here_comment, @@ -43,6 +43,133 @@ def init_graders(): runestone_component_dict[table_name].grader = grader +# Provide test code a way to send random numbers. See `RAND_FUNC `. To do so, read from the file. Return 0 as a "random" value if we can't read from the file (or even open it). +class TestFileValues: + def __init__(self): + self.values = [] + self.index = 0 + self.test_file_path = Path(settings._book_server_path / "../test/rand.txt") + self.stat = None + + def get_value(self): + # If the file changed, re-read values from it. + try: + stat = self.test_file_path.stat() + except Exception: + pass + else: + if stat != self.stat: + self._read_test_file() + + # If we have values from a previous read of the file, return them. + if self.index < len(self.values): + self.index += 1 + return self.values[self.index - 1] + + # Re-use these values if possible. + if len(self.values): + self.index = 1 + return self.values[0] + + # Otherwise, return a "random" value of 0. + return 0 + + # Read the test file. + def _read_test_file(self): + try: + with open(self.test_file_path) as f: + lines = f.readlines() + self.values = [float(v) for v in lines] + self.index = 0 + self.stat = self.test_file_path.stat() + except Exception: + pass + + +# Make this global so its state remains between calls to ``get_js_context``. +test_file_values = TestFileValues() + + +# Load the JavaScript context needed for dynamic problems. Cache the results to make grading multiple problems faster. +@lru_cache(maxsize=16) +def get_js_context(book_path: Path): + # By default, Babel assigns to ``exports`` before defining it. This is fine in the browser, but makes js2py choke. Pre-define it. Also, provide a way for tests to inject pre-defined "random" numbers. + context = js2py.EvalJs(dict(exports={}, rs_test_rand=test_file_values.get_value)) + + # These functions don't exist in ES5.1, but FITB code uses them. Here are simple polyfills. See `MDN's Object.entries polyfill `_, `MDN Object.assign polyfill `_. Note: ``Object.assign`` and ``console.log`` does exist in ES5.1, but js2py doesn't implement it. + context.execute( + """ + console.assert = function(test, arg) { + if (test) { + console.log(arg); + } + } + + // ES5 doesn't support binary numbers starting with ``0b``, so write a polyfill. Js2py doesn't allow me to override the built-in Number class, so use another name. + Number_ = function(n) { + if (typeof n === "string" && n.trim().slice(0, 2).toLowerCase() === "0b") { + return parseInt(n.trim().slice(2), 2); + } + return Number(n); + } + + // Must be writable: true, enumerable: false, configurable: true + Object.defineProperty(Object, "assign", { + value: function assign(target, varArgs) { // .length of function is 2 + 'use strict'; + if (target === null || target === undefined) { + throw new TypeError('Cannot convert undefined or null to object'); + } + + var to = Object(target); + + for (var index = 1; index < arguments.length; index++) { + var nextSource = arguments[index]; + + if (nextSource !== null && nextSource !== undefined) { + for (var nextKey in nextSource) { + // Avoid bugs when hasOwnProperty is shadowed + if (Object.prototype.hasOwnProperty.call(nextSource, nextKey)) { + to[nextKey] = nextSource[nextKey]; + } + } + } + } + return to; + }, + writable: true, + configurable: true + }); + + Object.entries = function( obj ) { + var ownProps = Object.keys( obj ), + i = ownProps.length, + resArray = new Array(i); // preallocate the Array + while (i--) + resArray[i] = [ownProps[i], obj[ownProps[i]]]; + + return resArray; + }; + + Object.values = function (obj) { + return Object.keys(obj).map(function (e) { + return obj[e]; + }); + }; + + Math.imul = function(a, b) { + return (a*b)&0xFFFFFFFF; + } + """ + ) + + # Load in the server-side code. + with open(book_path / "_static/server_side.js", encoding="utf-8") as f: + context.execute(f.read()) + + return context.serverSide + + # Provide feedback for a fill-in-the-blank problem. This should produce # identical results to the code in ``evaluateAnswers`` in ``fitb.js``. async def fitb_feedback( @@ -50,92 +177,73 @@ async def fitb_feedback( fitb_validator: Any, # The feedback to use when grading this question, taken from the ``feedback`` field of the ``fitb_answers`` table. feedback: Dict[Any, Any], + # The base course this question appears in. + base_course: str, ) -> Dict[str, Any]: - # Grade based on this feedback. The new format is JSON; the old is - # comma-separated. + # Load and run the JS grader. + dyn_vars = feedback["dyn_vars"] + blankNames = feedback["blankNames"] + js_context = get_js_context( + Path(settings.book_path) / base_course / "published" / base_course + ) + # Use a render to get the dynamic vars for a dynamic problem. + dyn_vars_eval = None + problemHtml = "" + if dyn_vars: + problemHtml, dyn_vars_eval = js_context.fitb.renderDynamicContent( + fitb_validator.seed, dyn_vars, feedback["problemHtml"] + ) + + # Get the answer. answer_json = fitb_validator.answer + # If there's no answer, skip grading. + if answer_json is None: + return dict(seed=fitb_validator.seed, problemHtml=problemHtml) try: + # The new format is JSON. answer = json.loads(answer_json) # Some answers may parse as JSON, but still be in the old format. The # new format should always return an array. assert isinstance(answer, list) except Exception: + # The old format is comma-separated. answer = answer_json.split(",") - displayFeed = [] - isCorrectArray: List[Optional[bool]] = [] - # The overall correctness of the entire problem. - correct = True - for blank, feedback_for_blank in zip(answer, feedback): - if not blank: - isCorrectArray.append(None) - displayFeed.append("No answer provided.") - correct = False - else: - # The correctness of this problem depends on if the first item matches. - is_first_item = True - # Check everything but the last answer, which always matches. - for fb in feedback_for_blank[:-1]: - if "regex" in fb: - if re.search( - fb["regex"], blank, re.I if fb["regexFlags"] == "i" else 0 - ): - isCorrectArray.append(is_first_item) - if not is_first_item: - correct = False - displayFeed.append(fb["feedback"]) - break - else: - assert "number" in fb - min_, max_ = fb["number"] - try: - # Note that ``literal_eval`` does **not** discard leading / trailing spaces, but considers them indentation errors. So, explicitly invoke ``strip``. - val = ast.literal_eval(blank.strip()) - in_range = val >= min_ and val <= max_ - except Exception: - # In case something weird or invalid was parsed (dict, etc.) - in_range = False - if in_range: - isCorrectArray.append(is_first_item) - if not is_first_item: - correct = False - displayFeed.append(fb["feedback"]) - break - is_first_item = False - # Nothing matched. Use the last feedback. - else: - isCorrectArray.append(False) - correct = False - displayFeed.append(feedback_for_blank[-1]["feedback"]) - - # Note that this isn't a percentage, but a ratio where 1.0 == all correct. - percent = ( - isCorrectArray.count(True) / len(isCorrectArray) if len(isCorrectArray) else 0 + + # Grade using the JavaScript grader. + displayFeed, correct, isCorrectArray, percent = js_context.fitb.evaluateAnswersCore( + blankNames, answer, feedback["feedbackArray"], dyn_vars_eval, True ) + # For dynamic problems, render the feedback. + if dyn_vars: + for index in range(len(displayFeed)): + displayFeed[index] = js_context.fitb.renderDynamicFeedback( + blankNames, answer, index, displayFeed[index], dyn_vars_eval + ) - # Update the values to be stored in the db. + # Store updates to the database. fitb_validator.correct = correct fitb_validator.percent = percent # Return grading results to the client for a non-exam scenario. if settings.is_exam: return dict( - correct=True, displayFeed=["Response recorded."] * len(answer), + correct=True, isCorrectArray=[True] * len(answer), - percent=1, + problemHtml=problemHtml, ) else: return dict( - correct=correct, displayFeed=displayFeed, + correct=correct, isCorrectArray=isCorrectArray, - percent=percent, + problemHtml=problemHtml, ) # lp feedback # =========== -async def lp_feedback(lp_validator: Any, feedback: Dict[Any, Any]): +async def lp_feedback(lp_validator: Any, feedback: Dict[Any, Any], base_course: str): # Begin by reformatting the answer for storage in the database. Do this now, so the code will be stored correctly even if the function returns early due to an error. try: code_snippets = json.loads(lp_validator.answer) @@ -144,8 +252,7 @@ async def lp_feedback(lp_validator: Any, feedback: Dict[Any, Any]): return {"errors": [f"Unable to load answers from '{lp_validator.answer}'."]} lp_validator.answer = json.dumps(dict(code_snippets=code_snippets)) - course = await fetch_course(lp_validator.course_name) - sphinx_base_path = os.path.join(settings.book_path, course.base_course) + sphinx_base_path = os.path.join(settings.book_path, base_course) source_path = feedback["source_path"] # Read the Sphinx config file to find paths relative to this directory. sphinx_config = read_sphinx_config(sphinx_base_path) diff --git a/bookserver/models.py b/bookserver/models.py index 80a93c26..69902c62 100644 --- a/bookserver/models.py +++ b/bookserver/models.py @@ -222,7 +222,9 @@ class MchoiceAnswers(Base, CorrectAnswerMixin): class FitbAnswers(Base, CorrectAnswerMixin): __tablename__ = "fitb_answers" # See answer_. TODO: what is the format? - answer = Column(String(512), nullable=False) + answer = Column(String(512)) + # The random seed used for dynamic problems. + seed = Column(Integer) __table_args__ = (Index("idx_div_sid_course_fb", "sid", "div_id", "course_name"),) diff --git a/bookserver/routers/assessment.py b/bookserver/routers/assessment.py index bda388fa..89093c26 100644 --- a/bookserver/routers/assessment.py +++ b/bookserver/routers/assessment.py @@ -31,9 +31,11 @@ # ------------------------- from ..applogger import rslogger from ..crud import ( - count_useinfo_for, + create_seed, create_selected_question, create_user_experiment_entry, + count_useinfo_for, + EVENT2TABLE, fetch_assignment_question, fetch_code, fetch_course, @@ -48,10 +50,12 @@ fetch_top10_fitb, fetch_user_experiment, fetch_viewed_questions, + is_server_feedback, update_selected_question, ) +from ..models import runestone_component_dict from ..internal.utils import make_json_response -from ..schemas import AssessmentRequest, SelectQRequest +from ..schemas import AssessmentRequest, SeedRequest, SelectQRequest from ..session import is_instructor # Routing @@ -63,6 +67,8 @@ ) +# .. _getAssessResults: +# # getAssessResults # ---------------- @router.post("/results") @@ -70,7 +76,8 @@ async def get_assessment_results( request_data: AssessmentRequest, request: Request, ): - if not request.state.user: + user = request.state.user + if not user: return make_json_response( status=status.HTTP_401_UNAUTHORIZED, detail="not logged in" ) @@ -80,28 +87,53 @@ async def get_assessment_results( # use the user objects username if await is_instructor(request): if not request_data.sid: - request_data.sid = request.state.user.username + request_data.sid = user.username else: if request_data.sid: # someone is attempting to spoof the api return make_json_response( status=status.HTTP_401_UNAUTHORIZED, detail="not an instructor" ) - request_data.sid = request.state.user.username + request_data.sid = user.username + + if request_data.new_seed: + # This tells the following code to create a new seed. + row = None + # We assume this table has seeds if it's asking for a new one... + has_seed = True + else: + row, has_seed = await fetch_last_answer_table_entry(request_data) + + # Determine if this is a server-side graded problem. + feedback, base_course = await is_server_feedback( + request_data.div_id, user.course_name + ) - row = await fetch_last_answer_table_entry(request_data) # mypy complains that ``row.id`` doesn't exist (true, but the return type wasn't exact and this does exist). if not row or row.id is None: # type: ignore - return make_json_response(detail="no data") - - # :index:`todo``: **port the serverside grading** code:: - # - # do_server_feedback, feedback = is_server_feedback(div_id, course) - # if do_server_feedback: - # correct, res_update = fitb_feedback(rows.answer, feedback) - # res.update(res_update) - rslogger.debug(f"Returning {row}") - return make_json_response(detail=row) + # Create and save a seed for server-side graded problems that need it. + if has_seed and feedback: + # Sigh -- must rename a field due to inconsistent naming conventions. + rd = request_data.dict() + rd["course_name"] = rd.pop("course") + await create_seed(SeedRequest(**rd)) + # Get this new seed and return it. + row, has_seed = await fetch_last_answer_table_entry(request_data) + assert has_seed + assert row and row.id # type: ignore + else: + return make_json_response(detail="no data") + + row_dict = row.dict() + # Do server-side grading if needed. + if feedback: + # The grader should also be defined if there's feedback. + rcd = runestone_component_dict[EVENT2TABLE[request_data.event]] + assert rcd.grader + row_dict.update(await rcd.grader(row, feedback, base_course)) + + rslogger.debug(f"Returning {row_dict}") + return make_json_response(detail=row_dict) # Define a simple model for the gethist request. diff --git a/bookserver/routers/rslogging.py b/bookserver/routers/rslogging.py index d49d494f..0fb544fd 100644 --- a/bookserver/routers/rslogging.py +++ b/bookserver/routers/rslogging.py @@ -12,6 +12,7 @@ # ---------------- import json from datetime import datetime +from random import randrange import re from typing import Optional @@ -32,6 +33,7 @@ create_user_sub_chapter_progress_entry, EVENT2TABLE, fetch_last_page, + fetch_last_answer_table_entry, fetch_user_chapter_progress, fetch_user_sub_chapter_progress, fetch_user, @@ -47,6 +49,7 @@ UseinfoValidation, ) from ..schemas import ( + AssessmentRequest, LastPageData, LastPageDataIncoming, LogItemIncoming, @@ -126,10 +129,23 @@ async def log_book_event(entry: LogItemIncoming, request: Request): valid_table = rcd.validator.from_orm(entry) # type: ignore # Do server-side grading if needed. - if feedback := await is_server_feedback(entry.div_id, user.course_name): + feedback, base_course = await is_server_feedback(entry.div_id, user.course_name) + if feedback: + # Server-side graded problems which have a seed must use the previous entry's seed, not what comes from the client. + if "seed" in valid_table.__fields__: + ar = AssessmentRequest( + course=entry.course_name, + div_id=entry.div_id, + event=entry.event, + sid=entry.sid, + ) + row, has_seed = await fetch_last_answer_table_entry(ar) + assert has_seed + # Timed exams don't restore answers, so they don't have a seed. In this case, create one. TODO: this means server-side dynamic problems don't work with timed exams. + valid_table.seed = row.seed if row else randrange(2 ** 32 - 1) # The grader should also be defined if there's feedback. assert rcd.grader - response_dict.update(await rcd.grader(valid_table, feedback)) + response_dict.update(await rcd.grader(valid_table, feedback, base_course)) ans_idx = await create_answer_table_entry(valid_table, entry.event) rslogger.debug(ans_idx) diff --git a/bookserver/schemas.py b/bookserver/schemas.py index 4117e72f..bbd4afc9 100644 --- a/bookserver/schemas.py +++ b/bookserver/schemas.py @@ -121,6 +121,8 @@ class LogItemIncoming(BaseModelNone): incorrect: Optional[int] skipped: Optional[int] time_taken: Optional[int] + # Used by dynamic problems + seed: Optional[int] class AssessmentRequest(BaseModelNone): @@ -130,6 +132,8 @@ class AssessmentRequest(BaseModelNone): sid: Optional[str] = None # See `Field with dynamic default value `_. deadline: datetime = Field(default_factory=datetime.utcnow) + # True if the response should use a new random seed. + new_seed: bool = False @validator("deadline") def str_to_datetime(cls, value: str) -> datetime: @@ -147,6 +151,14 @@ def str_to_datetime(cls, value: str) -> datetime: return deadline +# The data required to create a new seed for a dynamic problem +class SeedRequest(BaseModelNone): + course_name: str + div_id: str + event: str + sid: str + + class TimezoneRequest(BaseModelNone): timezoneoffset: int diff --git a/mypy.ini b/mypy.ini index 8713d118..7c6e95fe 100644 --- a/mypy.ini +++ b/mypy.ini @@ -49,6 +49,9 @@ ignore_missing_imports = True [mypy-fastapi_login.*] ignore_missing_imports = True +[mypy-js2py.*] +ignore_missing_imports = True + [mypy-polling2.*] ignore_missing_imports = True diff --git a/pyproject.toml b/pyproject.toml index b9963c9d..08451536 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,6 +30,7 @@ uvicorn = {extras = ["standard"], version = "^0.14.0"} # See the `poetry docs `_. gunicorn = {version = "^20.1.0", markers = "sys.platform != 'win32'"} Jinja2 = "^3.0.1" +Js2Py = "^0.71" aiofiles = "^0.6.0" alembic = "^1.4.3" python-dateutil = "^2.8.1" diff --git a/test/conftest.py b/test/conftest.py index cb0c0940..b197227b 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -135,7 +135,8 @@ def run_bookserver(pytestconfig, init_db): prefix_args = [] # Pass pytest's log level to Celery; if not specified, it defaults to INFO. log_level = pytestconfig.getoption("log_cli_level") or "INFO" - if pytestconfig.getoption("server_debug"): + server_debug = pytestconfig.getoption("server_debug") + if server_debug: # Don't redirect stdio, so the developer can see and interact with it. kwargs = {} # TODO: these come from `SO `__ but are not tested. @@ -243,6 +244,11 @@ def shut_down(): # After this comes the `teardown code `_. yield + # Allow the user to interact with the other open windows before closing them. + if server_debug: + import pdb + + pdb.set_trace() shut_down() @@ -551,6 +557,11 @@ def logout(self): def get_book_url(self, url): return self.get(f"books/published/test_course_1/{url}") + def inject_random_values(self, value_array): + test_file = Path(__file__).parent / "rand.txt" + test_file.unlink(True) + test_file.write_text("\n".join([str(x) for x in value_array]), encoding="utf-8") + # Present ``_SeleniumServerUtils`` as a fixture. @pytest.fixture diff --git a/test/test_runestone_components.py b/test/test_runestone_components.py index 40f52936..d1fcca65 100644 --- a/test/test_runestone_components.py +++ b/test/test_runestone_components.py @@ -236,62 +236,75 @@ async def fitb_check_common_fields(index, div_id): return json.loads(answer), correct, percent test_fitb.test_fitb1(selenium_utils_user_1) - assert await fitb_check_common_fields(0, "test_fitb_string") == (["", ""], False, 0) + assert await fitb_check_common_fields(1, "test_fitb_string") == (["", ""], False, 0) test_fitb.test_fitb2(selenium_utils_user_1) - assert await fitb_check_common_fields(1, "test_fitb_string") == ( + assert await fitb_check_common_fields(2, "test_fitb_string") == ( ["red", ""], False, 0.5, ) test_fitb.test_fitb3(selenium_utils_user_1) - assert await fitb_check_common_fields(2, "test_fitb_string") == ( + assert await fitb_check_common_fields(3, "test_fitb_string") == ( ["red", "away"], True, 1, ) test_fitb.test_fitb4(selenium_utils_user_1) - assert await fitb_check_common_fields(3, "test_fitb_string") == ( + assert await fitb_check_common_fields(4, "test_fitb_string") == ( ["red", "away"], True, 1, ) test_fitb.test_fitboneblank_too_low(selenium_utils_user_1) - assert await fitb_check_common_fields(0, "test_fitb_number") == ([" 6"], False, 0) + assert await fitb_check_common_fields(1, "test_fitb_number") == ([" 6"], False, 0) test_fitb.test_fitboneblank_wildcard(selenium_utils_user_1) - assert await fitb_check_common_fields(1, "test_fitb_number") == ( + assert await fitb_check_common_fields(2, "test_fitb_number") == ( ["I give up"], False, 0, ) test_fitb.test_fitbfillrange(selenium_utils_user_1) - assert await fitb_check_common_fields(2, "test_fitb_number") == ( + assert await fitb_check_common_fields(3, "test_fitb_number") == ( [" 6.28 "], True, 1, ) test_fitb.test_fitbregex(selenium_utils_user_1) - assert await fitb_check_common_fields(0, "test_fitb_regex_1") == ( + assert await fitb_check_common_fields(1, "test_fitb_regex_1") == ( [" maire ", "LITTLE", "2"], True, 1, ) test_fitb.test_regexescapes1(selenium_utils_user_1) - assert await fitb_check_common_fields(0, "test_fitb_regex_2") == ( + assert await fitb_check_common_fields(1, "test_fitb_regex_2") == ( [r"C:\windows\system"], True, 1, ) test_fitb.test_regexescapes2(selenium_utils_user_1) - assert await fitb_check_common_fields(0, "test_fitb_regex_3") == (["[]"], True, 1) + assert await fitb_check_common_fields(1, "test_fitb_regex_3") == (["[]"], True, 1) + + # See `notes ` about the repetition. + test_fitb._test_dynamic_1( + selenium_utils_user_1, + [0.2, 0.1, 0.2, 0.1, 0.2, 0.1, 0.2, 0.1, 0.2, 0.1, 0.3, 0.4, 0.3, 0.4], + ) + id_ = "test_fitb_dynamic_1" + # The first useful entry to check is 2. Entry 0 is the initial random seed; entry 1 is the next random seed from the first press of the "Randomize" button. + assert await fitb_check_common_fields(2, id_) == ([" 3"], True, 1) + assert await fitb_check_common_fields(3, id_) == (["1.0 "], False, 0) + assert await fitb_check_common_fields(4, id_) == ([" 0x2 "], False, 0) + assert await fitb_check_common_fields(5, id_) == ([" 4e0"], False, 0) + assert await fitb_check_common_fields(7, id_) == ([" 0b111 "], True, 1) # Lp diff --git a/test/toctree.rst b/test/toctree.rst index 78b28cd0..e89bb0f5 100644 --- a/test/toctree.rst +++ b/test/toctree.rst @@ -5,7 +5,7 @@ To run the tests, execute ``poetry run pytest`` from the parent of this subdirec --skipdbinit Skip initialization of the test database. This makes the tests start much faster, at the risk of a corrupt database causing spurious test failures. --server_debug Enable server debug mode. This runs the server in a separate terminal/console, which allow you to set breakpoints, stop the code, etc. ---log_cli_level LEVEL Set the `pytest logging level `_. This level affects not just pytest, but the server and all tools run by the tests. Use ``--log_cli_level=INFO`` to provide complete output from the server; the `default logging level ` is ``WARNING``. +--log_cli_level LEVEL Set the `pytest logging level `_. This level affects not just pytest, but the server and all tools run by the tests. Use ``--log-cli-level=INFO`` to provide complete output from the server; the `default logging level ` is ``WARNING``. --k EXPRESSION Only run tests which match the given substring expression. For example, ``-k test_foo`` only runs tests named ``test_foo``, ``test_foo_1``, etc. See the `pytest docs `_ for more possibilities. Testing and debugging tips: