diff --git a/evap/grades/tests.py b/evap/grades/tests.py
index 62ad3e6c7210392ee4e6e4bffc9d2d73e38fd9f4..7877466f14b585e8918964a11fb5222e14ae3abf 100644
--- a/evap/grades/tests.py
+++ b/evap/grades/tests.py
@@ -214,7 +214,7 @@ class GradeSemesterViewTest(WebTest):
 
     def test_403_on_deleted(self):
         baker.make(Semester, pk=1, grade_documents_are_deleted=True)
-        self.app.get("/grades/semester/1", user=self.grade_publisher, status=403)
+        self.app.get(self.url, user=self.grade_publisher, status=403)
 
 
 class GradeCourseViewTest(WebTest):
@@ -231,14 +231,14 @@ class GradeCourseViewTest(WebTest):
     def test_does_not_crash(self):
         semester = baker.make(Semester, pk=1, grade_documents_are_deleted=False)
         baker.make(Evaluation, course=baker.make(Course, pk=1, semester=semester), state=Evaluation.State.PREPARED)
-        self.app.get("/grades/semester/1/course/1", user=self.grade_publisher, status=200)
+        self.app.get(self.url, user=self.grade_publisher, status=200)
 
     def test_403_on_archived_semester(self):
         archived_semester = baker.make(Semester, pk=1, grade_documents_are_deleted=True)
         baker.make(
             Evaluation, course=baker.make(Course, pk=1, semester=archived_semester), state=Evaluation.State.PREPARED
         )
-        self.app.get("/grades/semester/1/course/1", user=self.grade_publisher, status=403)
+        self.app.get(self.url, user=self.grade_publisher, status=403)
 
 
 class GradeEditTest(WebTest):
diff --git a/evap/results/tests/test_tools.py b/evap/results/tests/test_tools.py
index db05b202560ed5ea5114e40a583c329d8e3292ba..48d5670a159038b46dc707d720f5f3fcdb8afeeb 100644
--- a/evap/results/tests/test_tools.py
+++ b/evap/results/tests/test_tools.py
@@ -297,7 +297,7 @@ class TestCalculateAverageDistribution(TestCase):
 
     def test_get_single_result_rating_result(self):
         single_result_evaluation = baker.make(Evaluation, state=Evaluation.State.PUBLISHED, is_single_result=True)
-        questionnaire = Questionnaire.objects.get(name_en=Questionnaire.SINGLE_RESULT_QUESTIONNAIRE_NAME)
+        questionnaire = Questionnaire.single_result_questionnaire()
         contribution = baker.make(
             Contribution,
             contributor=baker.make(UserProfile),
diff --git a/evap/results/tests/test_views.py b/evap/results/tests/test_views.py
index eb15ec5486fada558716c069b2952640d49a25c5..fe2ab06ab9d76dfe84a7ab665b3802f916266770 100644
--- a/evap/results/tests/test_views.py
+++ b/evap/results/tests/test_views.py
@@ -293,7 +293,7 @@ class TestResultsViewContributionWarning(WebTest):
     @classmethod
     def setUpTestData(cls):
         cls.manager = make_manager()
-        cls.semester = baker.make(Semester, id=3)
+        cls.semester = baker.make(Semester)
         contributor = baker.make(UserProfile)
 
         # Set up an evaluation with one question but no answers
@@ -301,7 +301,6 @@ class TestResultsViewContributionWarning(WebTest):
         student2 = baker.make(UserProfile)
         cls.evaluation = baker.make(
             Evaluation,
-            id=21,
             state=Evaluation.State.PUBLISHED,
             course=baker.make(Course, semester=cls.semester),
             participants=[student1, student2],
@@ -337,12 +336,10 @@ class TestResultsViewContributionWarning(WebTest):
 
 
 class TestResultsSemesterEvaluationDetailView(WebTestStaffMode):
-    url = "/results/semester/2/evaluation/21"
-
     @classmethod
     def setUpTestData(cls):
         cls.manager = make_manager()
-        cls.semester = baker.make(Semester, id=2)
+        cls.semester = baker.make(Semester)
 
         contributor = baker.make(UserProfile, email="contributor@institution.example.com")
         responsible = baker.make(UserProfile, email="responsible@institution.example.com")
@@ -351,7 +348,7 @@ class TestResultsSemesterEvaluationDetailView(WebTestStaffMode):
 
         # Normal evaluation with responsible and contributor.
         cls.evaluation = baker.make(
-            Evaluation, id=21, state=Evaluation.State.PUBLISHED, course=baker.make(Course, semester=cls.semester)
+            Evaluation, state=Evaluation.State.PUBLISHED, course=baker.make(Course, semester=cls.semester)
         )
 
         baker.make(
@@ -368,6 +365,8 @@ class TestResultsSemesterEvaluationDetailView(WebTestStaffMode):
             role=Contribution.Role.EDITOR,
         )
 
+        cls.url = f"/results/semester/{cls.semester.id}/evaluation/{cls.evaluation.id}"
+
     def test_questionnaire_ordering(self):
         top_questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.TOP)
         contributor_questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.CONTRIBUTOR)
@@ -473,6 +472,48 @@ class TestResultsSemesterEvaluationDetailView(WebTestStaffMode):
         url = f"/results/semester/{self.semester.id}/evaluation/{evaluation.id}"
         self.app.get(url, user=self.manager)
 
+    def test_unpublished_single_results_show_results(self):
+        """Regression test for #1621"""
+        # make regular evaluation with some answers
+        participants = baker.make(UserProfile, _bulk_create=True, _quantity=20)
+        evaluation = baker.make(
+            Evaluation,
+            state=Evaluation.State.REVIEWED,
+            course=baker.make(Course, semester=self.semester),
+            participants=participants,
+            voters=participants,
+        )
+        questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.TOP)
+        likert_question = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire, order=1)
+        evaluation.general_contribution.questionnaires.set([questionnaire])
+        make_rating_answer_counters(likert_question, evaluation.general_contribution)
+
+        # make single result
+        evaluation2: Evaluation = baker.make(
+            Evaluation,
+            state=Evaluation.State.REVIEWED,
+            course=evaluation.course,
+            is_single_result=True,
+            name_de="foo",
+            name_en="foo",
+            participants=participants,
+            voters=participants,
+        )
+        evaluation2.general_contribution.questionnaires.set([questionnaire])
+        make_rating_answer_counters(likert_question, evaluation2.general_contribution)
+
+        cache_results(evaluation)
+
+        url = f"/results/semester/{self.semester.id}/evaluation/{evaluation.id}"
+        response = self.app.get(url, user=self.manager)
+
+        # this one is the course result. The two evaluations shouldn't use this
+        self.assertTemplateUsed(response, "distribution_with_grade_disabled.html", count=1)
+        # Both evaluations should use this
+        self.assertTemplateUsed(response, "evaluation_result_widget.html", count=2)
+        # Both evaluations should use this, plus one for the questionnaire
+        self.assertTemplateUsed(response, "distribution_with_grade.html", count=3)
+
 
 class TestResultsSemesterEvaluationDetailViewFewVoters(WebTest):
     @classmethod
@@ -823,7 +864,6 @@ class TestResultsOtherContributorsListOnExportView(WebTest):
         responsible = baker.make(UserProfile, email="responsible@institution.example.com")
         cls.evaluation = baker.make(
             Evaluation,
-            id=21,
             state=Evaluation.State.PUBLISHED,
             course=baker.make(Course, semester=cls.semester, responsibles=[responsible]),
         )
diff --git a/evap/results/views.py b/evap/results/views.py
index abc77aa54546a7b495a235101589992f9f08ce66..84a03d67cd84f1b969942f76308f2ba5e9f1abdb 100644
--- a/evap/results/views.py
+++ b/evap/results/views.py
@@ -21,12 +21,9 @@ from evap.results.tools import (
     RatingResult,
     TextResult,
     annotate_distributions_and_grades,
-    calculate_average_distribution,
     can_textanswer_be_seen_by,
-    distribution_to_grade,
     get_evaluations_with_course_result_attributes,
     get_results,
-    get_single_result_rating_result,
 )
 
 
@@ -337,16 +334,9 @@ def get_evaluations_of_course(course, request):
             course_evaluations += course.evaluations.filter(
                 state__in=[Evaluation.State.IN_EVALUATION, Evaluation.State.EVALUATED, Evaluation.State.REVIEWED]
             )
-
+        annotate_distributions_and_grades(course_evaluations)
         course_evaluations = get_evaluations_with_course_result_attributes(course_evaluations)
 
-        for course_evaluation in course_evaluations:
-            if course_evaluation.is_single_result:
-                course_evaluation.single_result_rating_result = get_single_result_rating_result(course_evaluation)
-            else:
-                course_evaluation.distribution = calculate_average_distribution(course_evaluation)
-                course_evaluation.avg_grade = distribution_to_grade(course_evaluation.distribution)
-
     return course_evaluations