From c5bfa4c524a7432325c61f5c7c4cafb430483bf5 Mon Sep 17 00:00:00 2001 From: BethanyG Date: Mon, 6 Nov 2023 22:01:48 -0800 Subject: [PATCH] [Making the Grade]: Modified Test Error Messages & Touched Up Docs (#3542) * Updated test error messages and touched up docs. * Removed deep copy from imports, as it was unneeded. [no important files changed] --- concepts/loops/about.md | 14 +- .../concept/making-the-grade/.docs/hints.md | 20 +- .../making-the-grade/.docs/instructions.md | 17 +- .../making-the-grade/.docs/introduction.md | 12 +- .../concept/making-the-grade/loops_test.py | 195 +++++++++++------- 5 files changed, 157 insertions(+), 101 deletions(-) diff --git a/concepts/loops/about.md b/concepts/loops/about.md index 88bb71e619..0f39e733d0 100644 --- a/concepts/loops/about.md +++ b/concepts/loops/about.md @@ -235,17 +235,17 @@ The loop [`else` clause][loop else] is unique to Python and can be used for "wra 'Found an S, stopping iteration.' ``` -[loop else]: https://docs.python.org/3/tutorial/controlflow.html#break-and-continue-statements-and-else-clauses-on-loops -[range]: https://docs.python.org/3/library/stdtypes.html#range [break statement]: https://docs.python.org/3/reference/simple_stmts.html#the-break-statement +[common sequence operations]: https://docs.python.org/3/library/stdtypes.html#common-sequence-operations [continue statement]: https://docs.python.org/3/reference/simple_stmts.html#the-continue-statement -[while statement]: https://docs.python.org/3/reference/compound_stmts.html#the-while-statement -[truth value testing]: https://docs.python.org/3/library/stdtypes.html#truth-value-testing [enumerate]: https://docs.python.org/3/library/functions.html#enumerate -[iterator]: https://docs.python.org/3/glossary.html#term-iterator -[common sequence operations]: https://docs.python.org/3/library/stdtypes.html#common-sequence-operations -[range is not an iterator]: https://treyhunner.com/2018/02/python-range-is-not-an-iterator/ [for statement]: https://docs.python.org/3/reference/compound_stmts.html#for [iterable]: https://docs.python.org/3/glossary.html#term-iterable +[iterator]: https://docs.python.org/3/glossary.html#term-iterator +[loop else]: https://docs.python.org/3/tutorial/controlflow.html#break-and-continue-statements-and-else-clauses-on-loops [next built-in]: https://docs.python.org/3/library/functions.html#next +[range is not an iterator]: https://treyhunner.com/2018/02/python-range-is-not-an-iterator/ +[range]: https://docs.python.org/3/library/stdtypes.html#range [stopiteration]: https://docs.python.org/3/library/exceptions.html#StopIteration +[truth value testing]: https://docs.python.org/3/library/stdtypes.html#truth-value-testing +[while statement]: https://docs.python.org/3/reference/compound_stmts.html#the-while-statement diff --git a/exercises/concept/making-the-grade/.docs/hints.md b/exercises/concept/making-the-grade/.docs/hints.md index 78c1358d60..3e8deff958 100644 --- a/exercises/concept/making-the-grade/.docs/hints.md +++ b/exercises/concept/making-the-grade/.docs/hints.md @@ -2,11 +2,11 @@ ## General -- `while` loops are used for _indefinite_ (uncounted) iteration -- `for` loops are used for _definite_, (counted) iteration. -- The keywords `break` and `continue` help customize loop behavior. -- `range(, stop, )` can be used to generate a sequence for a loop counter. -- The built-in `enumerate()` will return (``, ``) pairs to iterate over. +- [`while`][while-loops] loops are used for _indefinite_ (uncounted) iteration +- [`for`][for-loops] loops are used for _definite_, (counted) iteration. +- The keywords [`break` and `continue`][control flow] help customize loop behavior. +- [`range(, stop, )`][range] can be used to generate a sequence for a loop counter. +- The built-in [`enumerate()`][enumerate] will return (``, ``) pairs to iterate over. Also being familiar with the following can help with completing the tasks: @@ -47,11 +47,13 @@ Also being familiar with the following can help with completing the tasks: - There may be or may not be a student with a score of 100, and you can't return `[]` without checking **all** scores. - The [`control flow`][control flow] statements `continue` and `break` may be useful here to move past unwanted values. -[list]: https://docs.python.org/3/library/stdtypes.html#list -[str]: https://docs.python.org/3/library/stdtypes.html#str -[f-strings]: https://docs.python.org/3/reference/lexical_analysis.html#formatted-string-literals [append and pop]: https://docs.python.org/3/tutorial/datastructures.html#more-on-lists -[enumerate]: https://docs.python.org/3/library/functions.html#enumerate [control flow]: https://docs.python.org/3/tutorial/controlflow.html#break-and-continue-statements-and-else-clauses-on-loops +[enumerate]: https://docs.python.org/3/library/functions.html#enumerate +[f-strings]: https://docs.python.org/3/reference/lexical_analysis.html#formatted-string-literals +[for-loops]: https://docs.python.org/3/tutorial/controlflow.html#for-statements +[list]: https://docs.python.org/3/library/stdtypes.html#list [range]: https://docs.python.org/3/tutorial/controlflow.html#the-range-function [round]: https://docs.python.org/3/library/functions.html#round +[str]: https://docs.python.org/3/library/stdtypes.html#str +[while-loops]: https://docs.python.org/3/reference/compound_stmts.html#the-while-statement diff --git a/exercises/concept/making-the-grade/.docs/instructions.md b/exercises/concept/making-the-grade/.docs/instructions.md index 43b25420c0..2f0c6617ae 100644 --- a/exercises/concept/making-the-grade/.docs/instructions.md +++ b/exercises/concept/making-the-grade/.docs/instructions.md @@ -9,7 +9,7 @@ You decide to make things a little more interesting by putting together some fun While you can give "partial credit" on exam questions, overall exam scores have to be `int`s. So before you can do anything else with the class scores, you need to go through the grades and turn any `float` scores into `int`s. Lucky for you, Python has the built-in [`round()`][round] function you can use. -Create the function `round_scores()` that takes a `list` of `student_scores`. +Create the function `round_scores(student_scores)` that takes a `list` of `student_scores`. This function should _consume_ the input `list` and `return` a new list with all the scores converted to `int`s. The order of the scores in the resulting `list` is not important. @@ -22,10 +22,10 @@ The order of the scores in the resulting `list` is not important. ## 2. Non-Passing Students -As you were grading the exam, you noticed some students weren't performing as well as you'd hoped. +As you were grading the exam, you noticed some students weren't performing as well as you had hoped. But you were distracted, and forgot to note exactly _how many_ students. -Create the function `count_failed_students()` that takes a `list` of `student_scores`. +Create the function `count_failed_students(student_scores)` that takes a `list` of `student_scores`. This function should count up the number of students who don't have passing scores and return that count as an integer. A student needs a score greater than **40** to achieve a passing grade on the exam. @@ -39,7 +39,7 @@ A student needs a score greater than **40** to achieve a passing grade on the ex The teacher you're assisting wants to find the group of students who've performed "the best" on this exam. What qualifies as "the best" fluctuates, so you need to find the student scores that are **greater than or equal to** the current threshold. -Create the function `above_threshold()` taking `student_scores` (a `list` of grades), and `threshold` (the "top score" threshold) as parameters. +Create the function `above_threshold(student_scores)` taking `student_scores` (a `list` of grades), and `threshold` (the "top score" threshold) as parameters. This function should return a `list` of all scores that are `>=` to `threshold`. ```python @@ -49,10 +49,11 @@ This function should return a `list` of all scores that are `>=` to `threshold`. ## 4. Calculating Letter Grades -The teacher you're assisting likes to assign letter grades as well as numeric scores. +The teacher you are assisting likes to assign letter grades as well as numeric scores. Since students rarely score 100 on an exam, the "letter grade" lower thresholds are calculated based on the highest score achieved, and increment evenly between the high score and the failing threshold of **<= 40**. -Create the function `letter_grades()` that takes the "highest" score on the exam as a parameter, and returns a `list` of lower score thresholds for each "American style" grade interval: `["D", "C", "B", "A"]`. +Create the function `letter_grades(highest)` that takes the "highest" score on the exam as an argument, and returns a `list` of lower score thresholds for each "American style" grade interval: `["D", "C", "B", "A"]`. + ```python """Where the highest score is 100, and failing is <= 40. @@ -84,7 +85,7 @@ Create the function `letter_grades()` that takes the "highest" score on the exam You have a list of exam scores in descending order, and another list of student names also sorted in descending order by their exam scores. You would like to match each student name with their exam score and print out an overall class ranking. -Create the function `student_ranking()` with parameters `student_scores` and `student_names`. +Create the function `student_ranking(student_scores)` with parameters `student_scores` and `student_names`. Match each student name on the student_names `list` with their score from the student_scores `list`. You can assume each argument `list` will be sorted from highest score(er) to lowest score(er). The function should return a `list` of strings with the format `. : `. @@ -101,7 +102,7 @@ The function should return a `list` of strings with the format `. , ]` pair of the student who scored 100 on the exam. diff --git a/exercises/concept/making-the-grade/.docs/introduction.md b/exercises/concept/making-the-grade/.docs/introduction.md index ad425d9092..2ae6ea724d 100644 --- a/exercises/concept/making-the-grade/.docs/introduction.md +++ b/exercises/concept/making-the-grade/.docs/introduction.md @@ -172,13 +172,13 @@ The [`break`][break statement] (_like in many C-related languages_) keyword can 'loop broken.' ``` -[for statement]: https://docs.python.org/3/reference/compound_stmts.html#for -[range]: https://docs.python.org/3/library/stdtypes.html#range [break statement]: https://docs.python.org/3/reference/simple_stmts.html#the-break-statement +[common sequence operations]: https://docs.python.org/3/library/stdtypes.html#common-sequence-operations [continue statement]: https://docs.python.org/3/reference/simple_stmts.html#the-continue-statement -[while statement]: https://docs.python.org/3/reference/compound_stmts.html#the-while-statement -[iterable]: https://docs.python.org/3/glossary.html#term-iterable -[truth value testing]: https://docs.python.org/3/library/stdtypes.html#truth-value-testing [enumerate]: https://docs.python.org/3/library/functions.html#enumerate -[common sequence operations]: https://docs.python.org/3/library/stdtypes.html#common-sequence-operations +[for statement]: https://docs.python.org/3/reference/compound_stmts.html#for +[iterable]: https://docs.python.org/3/glossary.html#term-iterable [next built-in]: https://docs.python.org/3/library/functions.html#next +[range]: https://docs.python.org/3/library/stdtypes.html#range +[truth value testing]: https://docs.python.org/3/library/stdtypes.html#truth-value-testing +[while statement]: https://docs.python.org/3/reference/compound_stmts.html#the-while-statement diff --git a/exercises/concept/making-the-grade/loops_test.py b/exercises/concept/making-the-grade/loops_test.py index 669ca80c5a..598e2b0ddf 100644 --- a/exercises/concept/making-the-grade/loops_test.py +++ b/exercises/concept/making-the-grade/loops_test.py @@ -1,5 +1,6 @@ import unittest import pytest + from loops import ( round_scores, count_failed_students, @@ -13,90 +14,142 @@ class MakingTheGradeTest(unittest.TestCase): @pytest.mark.task(taskno=1) def test_round_scores(self): - data = [ - ([], []), - ([.5], [0]), - ([1.5], [2]), - ( - [90.33, 40.5, 55.44, 70.05, 30.55, 25.45, 80.45, 95.3, 38.7, 40.3], - [90, 40, 55, 70, 31, 25, 80, 95, 39, 40]), - ( - [50, 36.03, 76.92, 40.7, 43, 78.29, 63.58, 91, 28.6, 88.0], - [50, 36, 77, 41, 43, 78, 64, 91, 29, 88])] - - for variant, (student_scores, result) in enumerate(data, start=1): - error_message = f'Expected: {result} but one or more {student_scores} were rounded incorrectly.' - with self.subTest(f'variation #{variant}', input=student_scores, output=result): - self.assertEqual(sorted(round_scores(student_scores)), sorted(result), msg=error_message) + + # Because we the input list can be mutated, the test data has been created + # as tuples, which we then convert to a list when the test runs. + # this makes accurate error messages easier to create. + test_data = [tuple(), + (.5,), + (1.5,), + (90.33, 40.5, 55.44, 70.05, 30.55, 25.45, 80.45, 95.3, 38.7, 40.3), + (50, 36.03, 76.92, 40.7, 43, 78.29, 63.58, 91, 28.6, 88.0)] + result_data = [[], + [0], + [2], + [90, 40, 55, 70, 31, 25, 80, 95, 39, 40], + [50, 36, 77, 41, 43, 78, 64, 91, 29, 88]] + + for variant, (student_scores, expected) in enumerate(zip(test_data, result_data), start=1): + with self.subTest(f'variation #{variant}', student_scores=student_scores, expected=expected): + + # Because the test_input is a tuple, it has to be converted to a list for the function call. + actual_result = round_scores(list(student_scores)) + error_message = (f'Called round_scores({list(student_scores)}). ' + f'The function returned {sorted(actual_result)} after sorting, but ' + f'the tests expected {sorted(expected)} after sorting. ' + f'One or more scores were rounded incorrectly.') + + # everything is sorted for easier comparison. + self.assertEqual(sorted(actual_result), sorted(expected), msg=error_message) @pytest.mark.task(taskno=2) def test_count_failed_students(self): - data = [ - ([89, 85, 42, 57, 90, 100, 95, 48, 70, 96], 0), - ([40, 40, 35, 70, 30, 41, 90], 4)] + test_data = [[89, 85, 42, 57, 90, 100, 95, 48, 70, 96], + [40, 40, 35, 70, 30, 41, 90]] + result_data = [0,4] + + for variant, (student_scores, expected) in enumerate(zip(test_data, result_data), start=1): + with self.subTest(f'variation #{variant}', + student_scores=student_scores, + expected=expected): - for variant, (student_scores, result) in enumerate(data, start=1): - error_message = f'Expected the count to be {result}, but the count was not calculated correctly.' - with self.subTest(f'variation #{variant}', input=student_scores, output=result): - self.assertEqual(count_failed_students(student_scores), result, msg=error_message) + actual_result = count_failed_students(student_scores) + error_message = (f'Called count_failed_students({student_scores}). ' + f'The function returned {actual_result}, but ' + f'the tests expected {expected} for the ' + 'number of students who failed.') + + self.assertEqual(actual_result, expected, msg=error_message) @pytest.mark.task(taskno=3) def test_above_threshold(self): - data = [ - (([40, 39, 95, 80, 25, 31, 70, 55, 40, 90], 98), []), - (([88, 29, 91, 64, 78, 43, 41, 77, 36, 50], 80), [88, 91]), - (([100, 89], 100), [100]), - (([88, 29, 91, 64, 78, 43, 41, 77, 36, 50], 78), [88, 91, 78]), - (([], 80), [])] - - for variant, (params, result) in enumerate(data, start=1): - error_message = f'Expected: {result} but the number of scores above the threshold is incorrect.' - with self.subTest(f'variation #{variant}', input=params, output=result): - self.assertEqual(above_threshold(*params), result, msg=error_message) + test_data = [([40, 39, 95, 80, 25, 31, 70, 55, 40, 90], 98), + ([88, 29, 91, 64, 78, 43, 41, 77, 36, 50], 80), + ([100, 89], 100), + ([88, 29, 91, 64, 78, 43, 41, 77, 36, 50], 78), + ([], 80)] + + result_data = [[], + [88, 91], + [100], + [88, 91, 78], + []] + + for variant, (params, expected) in enumerate(zip(test_data, result_data), start=1): + with self.subTest(f'variation #{variant}', params=params, expected=expected): + actual_result = above_threshold(*params) + error_message = (f'Called above_threshold{params}. ' + f'The function returned {actual_result}, but ' + f'the tests expected {expected} for the ' + 'scores that are above the threshold.') + + self.assertEqual(actual_result, expected, msg=error_message) @pytest.mark.task(taskno=4) def test_letter_grades(self): - data = [ - (100, [41, 56, 71, 86]), - (97, [41, 55, 69, 83]), - (85, [41, 52, 63, 74]), - (92, [41, 54, 67, 80]), - (81, [41, 51, 61, 71])] - - for variant, (highest, result) in enumerate(data, start=1): - error_message = f'Expected: {result} but the grade thresholds for a high score of {highest} are incorrect.' - with self.subTest(f'variation #{variant}', input=highest, output=result): - self.assertEqual(letter_grades(highest), result, msg=error_message) + test_data = [100, 97, 85, 92, 81] + + result_data = [[41, 56, 71, 86], + [41, 55, 69, 83], + [41, 52, 63, 74], + [41, 54, 67, 80], + [41, 51, 61, 71]] + + for variant, (highest, expected) in enumerate(zip(test_data, result_data), start=1): + with self.subTest(f'variation #{variant}', highest=highest, expected=expected): + actual_result = letter_grades(highest) + error_message = (f'Called letter_grades({highest}). ' + f'The function returned {actual_result}, but ' + f'the tests expected {expected} for the ' + 'letter grade cutoffs.') + + self.assertEqual(actual_result, expected, msg=error_message) @pytest.mark.task(taskno=5) def test_student_ranking(self): - data = [ - (([82], ['Betty']), ['1. Betty: 82']), - (([88, 73], ['Paul', 'Ernest']), ['1. Paul: 88', '2. Ernest: 73']), - ( - ([100, 98, 92, 86, 70, 68, 67, 60], ['Rui', 'Betty', 'Joci', 'Yoshi', 'Kora', 'Bern', 'Jan', 'Rose']), - ['1. Rui: 100', '2. Betty: 98', '3. Joci: 92', '4. Yoshi: 86', - '5. Kora: 70', '6. Bern: 68', '7. Jan: 67', '8. Rose: 60'])] - - for variant, (params, result) in enumerate(data, start=1): - error_message = f'Expected: {result} but the rankings were compiled incorrectly.' - with self.subTest(f'variation #{variant}', input=params, output=result): - self.assertEqual(student_ranking(*params), result, msg=error_message) + test_data = [([82], ['Betty']), + ([88, 73], ['Paul', 'Ernest']), + ([100, 98, 92, 86, 70, 68, 67, 60], + ['Rui', 'Betty', 'Joci', 'Yoshi', 'Kora', 'Bern', 'Jan', 'Rose'])] + + result_data = [['1. Betty: 82'], + ['1. Paul: 88', '2. Ernest: 73'], + ['1. Rui: 100', '2. Betty: 98', '3. Joci: 92', '4. Yoshi: 86', + '5. Kora: 70', '6. Bern: 68', '7. Jan: 67', '8. Rose: 60']] + + for variant, (params, expected) in enumerate(zip(test_data, result_data), start=1): + with self.subTest(f'variation #{variant}', params=params, expected=expected): + actual_result = student_ranking(*params) + error_message = (f'Called student_ranking{params}. ' + f'The function returned {actual_result}, but ' + f'the tests expected {expected} for the ' + 'student rankings.') + + self.assertEqual(actual_result, expected, msg=error_message) @pytest.mark.task(taskno=6) def test_perfect_score(self): - data = [ - ([['Joci', 100], ['Vlad', 100], ['Raiana', 100], ['Alessandro', 100]], ['Joci', 100]), - ([['Jill', 30], ['Paul', 73], ], []), - ([], []), - ( - [['Rui', 60], ['Joci', 58], ['Sara', 91], ['Kora', 93], ['Alex', 42], - ['Jan', 81], ['Lilliana', 40], ['John', 60], ['Bern', 28], ['Vlad', 55]], []), - ( - [['Yoshi', 52], ['Jan', 86], ['Raiana', 100], ['Betty', 60], - ['Joci', 100], ['Kora', 81], ['Bern', 41], ['Rose', 94]], ['Raiana', 100])] - - for variant, (student_info, result) in enumerate(data, start=1): - error_message = f'Expected: {result} but got something different for perfect scores.' - with self.subTest(f'variation #{variant}', input=student_info, output=result): - self.assertEqual(perfect_score(student_info), result, msg=error_message) + test_data = [ + [['Joci', 100], ['Vlad', 100], ['Raiana', 100], ['Alessandro', 100]], + [['Jill', 30], ['Paul', 73]], + [], + [['Rui', 60], ['Joci', 58], ['Sara', 91], ['Kora', 93], ['Alex', 42], + ['Jan', 81], ['Lilliana', 40], ['John', 60], ['Bern', 28], ['Vlad', 55]], + + [['Yoshi', 52], ['Jan', 86], ['Raiana', 100], ['Betty', 60], + ['Joci', 100], ['Kora', 81], ['Bern', 41], ['Rose', 94]] + ] + + + result_data = [['Joci', 100],[], [], [], ['Raiana', 100]] + + for variant, (student_info, expected) in enumerate(zip(test_data, result_data), start=1): + + with self.subTest(f'variation #{variant}', student_info=student_info, expected=expected): + actual_result = perfect_score(student_info) + error_message = (f'Called perfect_score({student_info}). ' + f'The function returned {actual_result}, but ' + f'the tests expected {expected} for the ' + 'first "perfect" score.') + + self.assertEqual(actual_result, expected, msg=error_message)