diff --git a/evap/development/fixtures/test_data.json b/evap/development/fixtures/test_data.json
index b57868f6903be24d9e59db7ba187a76d832cee54..0e8d98a6aa366a8be6758fa43f0b5db73bf26267 100644
--- a/evap/development/fixtures/test_data.json
+++ b/evap/development/fixtures/test_data.json
@@ -128383,6 +128383,16 @@
     "html_content": "{% load evaluation_filters %}\r\n<i>(English version below)</i><br /><br /><br />\r\n\r\n\r\nHallo {{ user.first_name }},<br /><br />\r\n\r\ndie folgenden Evaluierungsergebnisse wurden soeben veröffentlicht:\r\n<ul>{% for evaluation in evaluations|order_by:\"full_name_de\" %}\r\n<li>{{ evaluation.full_name_de }}</li>\r\n{% endfor %}</ul><br />\r\n\r\nDie Ergebnisse können auf EvaP{% if not user.needs_login_key %} (<a href=\"{{ page_url }}\">{{ page_url }}</a>){% endif %} eingesehen werden.<br />\r\n{% if user.needs_login_key and login_url %}\r\nHier klicken zum Anmelden: <a href=\"{{ login_url }}\">{{ login_url }}</a><br />\r\n{% elif user.needs_login_key %}\r\nEin Link zum Anmelden wird per E-Mail zugesendet.<br />\r\n{% endif %}<br />\r\n\r\nBei Fragen und Rückmeldungen stehen wir gerne zur Verfügung (<a href=\"mailto:{{ contact_email }}\">{{ contact_email }}</a>).<br /><br />\r\n\r\nFreundliche Grüße,<br />\r\ndas Evaluierungsteam<br /><br />\r\n\r\n<i>(Dies ist eine automatisch versendete E-Mail.)</i><br /><br />\r\n\r\n<hr><br /><br />\r\n\r\nDear {{ user.first_name }},<br /><br />\r\n\r\nthe results of the following evaluations have just been published:\r\n<ul>{% for evaluation in evaluations|order_by:\"full_name_en\" %}\r\n<li>{{ evaluation.full_name_en }}</li>\r\n{% endfor %}</ul><br />\r\n\r\nYou can view the results on EvaP{% if not user.needs_login_key %} (<a href=\"{{ page_url }}\">{{ page_url }}</a>){% endif %}.<br />\r\n{% if user.needs_login_key and login_url %}\r\nClick here to login: <a href=\"{{ login_url }}\">{{ login_url }}</a><br />\r\n{% elif user.needs_login_key %}\r\nWe will send you a one-time login URL in a separate email.<br />\r\n{% endif %}<br />\r\n\r\nIf you have any questions or feedback, please let us know (<a href=\"mailto:{{ contact_email }}\">{{ contact_email }}</a>).<br /><br />\r\n\r\nKind regards,<br />\r\nthe Evaluation Team<br /><br />\r\n\r\n<i>(This is an automated message.)</i>"
   }
 },
+{
+  "model": "evaluation.emailtemplate",
+  "pk": 9,
+  "fields": {
+    "name": "Text Answer Review Reminder",
+    "subject": "[EvaP] Bitte Textantworten überprüfen / Please review text answers",
+    "plain_content": "(English version below)\r\n\r\n\r\nHallo {{ user.first_name }},\r\n\r\nes gibt noch nicht überprüfte Textantworten für eine oder mehrere Evaluierungen, bei denen der Evaluierungszeitraum abgelaufen ist und nicht mehr auf Notenveröffentlichungen gewartet werden muss. Bitte überprüfe die Textantworten für diese Evaluierungen möglichst bald:\r\n{% for evaluation in evaluations %}    - {{ evaluation.full_name_de }}\r\n{% endfor %}\r\n\r\n(Dies ist eine automatisch versendete E-Mail.)\r\n\r\n\r\n--\r\n\r\n\r\nDear {{ user.first_name }},\r\n\r\nthere are text answers not yet reviewed for one or more evaluations where the evaluation period has ended and there is no need to wait for grade publishing. Please review the text answers for these evaluations as soon as possible:\r\n{% for evaluation in evaluations %}    - {{ evaluation.full_name_en }}\r\n{% endfor %}\r\n\r\n(This is an automated message.)",
+    "html_content": "<i>(English version below)</i><br /><br /><br />\r\n\r\n\r\nHallo {{ user.first_name }},<br /><br />\r\n\r\nes gibt noch nicht überprüfte Textantworten für eine oder mehrere Evaluierungen, bei denen der Evaluierungszeitraum abgelaufen ist und nicht mehr auf Notenveröffentlichungen gewartet werden muss. Bitte überprüfe die Textantworten für diese Evaluierungen möglichst bald:\r\n<ul>{% for evaluation in evaluations %}\r\n<li>{{ evaluation.full_name_de }}</li>\r\n{% endfor %}</ul><br /><br />\r\n\r\n<i>(Dies ist eine automatisch versendete E-Mail.)</i><br /><br />\r\n\r\n<hr><br /><br />\r\n\r\nDear {{ user.first_name }},<br /><br />\r\n\r\nthere are text answers not yet reviewed for one or more evaluations where the evaluation period has ended and there is no need to wait for grade publishing. Please review the text answers for these evaluations as soon as possible:\r\n<ul>{% for evaluation in evaluations %}\r\n<li>{{ evaluation.full_name_en }}</li>\r\n{% endfor %}</ul><br /><br />\r\n\r\n<i>(This is an automated message.)</i>"
+  }
+},
 {
   "model": "rewards.rewardpointredemptionevent",
   "pk": 1,
diff --git a/evap/evaluation/management/commands/send_reminders.py b/evap/evaluation/management/commands/send_reminders.py
index e01e9a0b9d1479f094e3200d61608881e3e872d1..aabb3005277b4c853d2d4ed7dfda7a0a115ee8cc 100644
--- a/evap/evaluation/management/commands/send_reminders.py
+++ b/evap/evaluation/management/commands/send_reminders.py
@@ -12,10 +12,16 @@ logger = logging.getLogger(__name__)
 
 @log_exceptions
 class Command(BaseCommand):
-    help = "Sends email reminders X days before evaluation evaluation ends."
+    help = "Sends email reminders X days before evaluation ends and reminds managers to review text answers."
 
     def handle(self, *args, **options):
         logger.info("send_reminders called.")
+        self.send_student_reminders()
+        self.send_textanswer_reminders()
+        logger.info("send_reminders finished.")
+
+    @staticmethod
+    def send_student_reminders():
         check_dates = []
 
         # Collect end-dates of evaluations whose participants need to be reminded today.
@@ -37,5 +43,10 @@ class Command(BaseCommand):
             EmailTemplate.send_reminder_to_user(
                 recipient, first_due_in_days=first_due_in_days, due_evaluations=due_evaluations
             )
-        logger.info("send_reminders finished.")
-        logger.info("sent reminders to {} people.".format(len(recipients)))
+        logger.info("sent due evaluation reminders to {} people.".format(len(recipients)))
+
+    @staticmethod
+    def send_textanswer_reminders():
+        if datetime.date.today().weekday() in settings.TEXTANSWER_REVIEW_REMINDER_WEEKDAYS:
+            EmailTemplate.send_textanswer_reminder()
+            logger.info("sent text answer review reminders.")
diff --git a/evap/evaluation/migrations/0126_add_textanswer_review_email_template.py b/evap/evaluation/migrations/0126_add_textanswer_review_email_template.py
new file mode 100644
index 0000000000000000000000000000000000000000..c124b5cf101837ea60867a5ce05f1ec4ab40fae6
--- /dev/null
+++ b/evap/evaluation/migrations/0126_add_textanswer_review_email_template.py
@@ -0,0 +1,25 @@
+from django.db import migrations
+
+TEMPLATE_NAME = "Text Answer Review Reminder"
+TEMPLATE_SUBJECT = "[EvaP] Bitte Textantworten reviewen / Please review text answers"
+
+
+def insert_emailtemplate(apps, _schema_editor):
+    EmailTemplate = apps.get_model("evaluation", "EmailTemplate")
+    EmailTemplate.objects.create(name=TEMPLATE_NAME, subject=TEMPLATE_SUBJECT, plain_content="", html_content="")
+
+
+def remove_emailtemplate(apps, _schema_editor):
+    EmailTemplate = apps.get_model("evaluation", "EmailTemplate")
+    EmailTemplate.objects.filter(name=TEMPLATE_NAME).delete()
+
+
+class Migration(migrations.Migration):
+
+    dependencies = [
+        ('evaluation', '0125_use_lists_for_ordering'),
+    ]
+
+    operations = [
+        migrations.RunPython(insert_emailtemplate, reverse_code=remove_emailtemplate),
+    ]
diff --git a/evap/evaluation/models.py b/evap/evaluation/models.py
index b57ddafcd21c77a0acb589fc7c5151ce940c4e6d..60cb31e9a4367d054e71630745fecbba72e07404 100644
--- a/evap/evaluation/models.py
+++ b/evap/evaluation/models.py
@@ -1743,6 +1743,7 @@ class EmailTemplate(models.Model):
     LOGIN_KEY_CREATED = "Login Key Created"
     EVALUATION_STARTED = "Evaluation Started"
     DIRECT_DELEGATION = "Direct Delegation"
+    TEXT_ANSWER_REVIEW_REMINDER = "Text Answer Review Reminder"
 
     class Recipients(models.TextChoices):
         ALL_PARTICIPANTS = "all_participants", _("all participants")
@@ -1952,5 +1953,19 @@ class EmailTemplate(models.Model):
                     evaluations_per_participant[participant].add(evaluation)
 
         for participant, evaluation_set in evaluations_per_participant.items():
-            body_params = {"user": participant, "evaluations": list(evaluation_set)}
+            body_params = {"user": participant, "evaluations": evaluation_set}
             template.send_to_user(participant, {}, body_params, use_cc=True)
+
+    @classmethod
+    def send_textanswer_reminder(cls):
+        template = cls.objects.get(name=cls.TEXT_ANSWER_REVIEW_REMINDER)
+        evaluations = [
+            evaluation
+            for evaluation in Evaluation.objects.filter(state=Evaluation.State.EVALUATED)
+            if evaluation.textanswer_review_state == Evaluation.TextAnswerReviewState.REVIEW_URGENT
+        ]
+        evaluations = sorted(evaluations, key=lambda evaluation: evaluation.full_name)
+        managers = Group.objects.get(name="Manager").user_set.all()
+        for manager in managers:
+            body_params = {"user": manager, "evaluations": evaluations}
+            template.send_to_user(manager, {}, body_params, use_cc=False)
diff --git a/evap/evaluation/tests/test_commands.py b/evap/evaluation/tests/test_commands.py
index f0f23478782232cc07f16e562c5264569da658da..128341a34e5b8c3c946c9c27a14b393a96b8e2ff 100644
--- a/evap/evaluation/tests/test_commands.py
+++ b/evap/evaluation/tests/test_commands.py
@@ -24,8 +24,10 @@ from evap.evaluation.models import (
     Questionnaire,
     RatingAnswerCounter,
     Semester,
+    TextAnswer,
     UserProfile,
 )
+from evap.evaluation.tests.tools import make_manager
 
 
 class TestAnonymizeCommand(TestCase):
@@ -339,6 +341,25 @@ class TestSendRemindersCommand(TestCase):
         self.assertEqual(mock.call_count, 0)
         self.assertEqual(len(mail.outbox), 0)
 
+    @override_settings(TEXTANSWER_REVIEW_REMINDER_WEEKDAYS=list(range(0, 8)))
+    def test_send_text_answer_review_reminder(self):
+        make_manager()
+        evaluation = baker.make(
+            Evaluation,
+            state=Evaluation.State.EVALUATED,
+            can_publish_text_results=True,
+            wait_for_grade_upload_before_publishing=False,
+        )
+        baker.make(
+            TextAnswer,
+            contribution=evaluation.general_contribution,
+        )
+
+        with patch("evap.evaluation.models.EmailTemplate.send_to_user") as mock:
+            management.call_command("send_reminders")
+
+        self.assertEqual(mock.call_count, 1)
+
 
 class TestLintCommand(TestCase):
     @staticmethod
diff --git a/evap/settings.py b/evap/settings.py
index 8ac7da3dd4c8da8106e1c9b649179dd8f7c43191..37ce6a5b6d51dea43d48e863366d618e400886cf 100644
--- a/evap/settings.py
+++ b/evap/settings.py
@@ -58,6 +58,10 @@ REWARD_POINTS = [
 # days before end date to send reminder
 REMIND_X_DAYS_AHEAD_OF_END_DATE = [2, 0]
 
+# days of the week on which managers are reminded to handle urgent text answer reviews
+# where Monday is 0 and Sunday is 6
+TEXTANSWER_REVIEW_REMINDER_WEEKDAYS = [3]
+
 # email domains for the internal users of the hosting institution used to
 # figure out who is an internal user
 INSTITUTION_EMAIL_DOMAINS = ["institution.example.com"]
diff --git a/evap/staff/views.py b/evap/staff/views.py
index 6a8cfc0013a8938906f9b5fc0dcdf103501ef390..e332af52df17ef301fdd4daf161c3df9edf8b1c9 100644
--- a/evap/staff/views.py
+++ b/evap/staff/views.py
@@ -2169,10 +2169,11 @@ def template_edit(request, template_id):
     if template.name == EmailTemplate.STUDENT_REMINDER:
         available_variables += ["first_due_in_days", "due_evaluations"]
     elif template.name in [
-        EmailTemplate.PUBLISHING_NOTICE_CONTRIBUTOR,
-        EmailTemplate.PUBLISHING_NOTICE_PARTICIPANT,
         EmailTemplate.EDITOR_REVIEW_NOTICE,
         EmailTemplate.EDITOR_REVIEW_REMINDER,
+        EmailTemplate.PUBLISHING_NOTICE_CONTRIBUTOR,
+        EmailTemplate.PUBLISHING_NOTICE_PARTICIPANT,
+        EmailTemplate.TEXT_ANSWER_REVIEW_REMINDER,
     ]:
         available_variables += ["evaluations"]
     elif template.name == EmailTemplate.EVALUATION_STARTED: