Skip to content

Commit

Permalink
adding latex parsing to main package and adding method for new quiz m…
Browse files Browse the repository at this point in the history
…ult choice questions
  • Loading branch information
dsavransky committed Nov 1, 2024
1 parent c38be58 commit 220f679
Show file tree
Hide file tree
Showing 2 changed files with 173 additions and 3 deletions.
121 changes: 118 additions & 3 deletions cornellGrading/cornellGrading.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@
import subprocess
import shutil
import requests
import uuid
from cornellGrading.utils import convalllatex

try:
from cornellGrading.pandocHTMLParser import pandocHTMLParser
Expand Down Expand Up @@ -2054,7 +2056,7 @@ def setupNewQuizSelfAssessment(
assignmentNum,
nprobs,
solfile,
item,
npoints=3,
preamble="",
selfGradeDueDelta=7,
selfGradeReleasedDelta=3,
Expand All @@ -2070,8 +2072,8 @@ def setupNewQuizSelfAssessment(
Number of howmework problems
solfile (str):
Full path to solutions file to upload
item (dict):
New Quiz item payload dictionary. See `addNewQuizItem`
npoints (int):
Number of points each question is scored out of. Defaults to 3.
preamble (str):
Preamble for all naming.
See :py:meth:`cornellGrading.cornellGrading.getHomework` for details.
Expand Down Expand Up @@ -2132,6 +2134,9 @@ def setupNewQuizSelfAssessment(
}
)

# create item
item = self.genNpointNewQuizItem(npoints)

# add new quiz items
for j in range(nprobs):
tmp = item.copy()
Expand Down Expand Up @@ -2256,3 +2261,113 @@ def importNewQuizSelfAssessment(
self.uploadScores(hw, netids, scores)

return submittedScoreNoAssignment, submittedAssignmentNoScore

def genNpointNewQuizItem(self, n, item_body=None, title=None):
"""Generate a New Quiz multiple choice, variable point question with answers
ranging from 0 to n and each answer worth the equivalent number of points.
Args:
n (int):
Number of points possible. Question will have n+1 options (from 0 to n)
with each response worth the equivalent number of points
item_body (str, optional):
Question text (html formatted). If None (default) this is set to
<p>Enter your score based on the rubric in the syllabus</p>
title (str, optional):
Question title. If None (default) this is set to:
HW Problem Score
Returns:
dict:
New Quiz Multiple choice question definition
"""

# set default body text and title
if item_body is None:
item_body = "<p>Enter your score based on the rubric in the syllabus</p>"

if title is None:
title = "HW Problem Score"

# generate UUIDs
uuids = [uuid.uuid4() for _ in range(n + 1)]

# create choices and values dict list
choices = []
values = []
for j in range(n + 1):
choices.append(
{"id": f"{uuids[j]}", "position": j + 1, "item_body": f"<p>{j}</p>"}
)
values.append({"value": f"{uuids[j]}", "points": j})

q = {
"position": 0,
"points_possible": float(n),
"entry_type": "Item",
"status": "immutable",
"entry": {
"title": title,
"item_body": item_body,
"calculator_type": "none",
"interaction_data": {"choices": choices},
"properties": {
"shuffle_rules": {"choices": {"to_lock": [], "shuffled": False}},
"vary_points_by_answer": True,
},
"scoring_data": {
"value": f"{uuids[-1]}",
"values": values,
},
"answer_feedback": {f"{uuids[0]}": ""},
"scoring_algorithm": "VaryPointsByAnswer",
"interaction_type_slug": "choice",
"feedback": {},
},
}

return q

def genNewQuizMultipleChoice(self, question, options, correct_ind, points=1):

# generate UUIDs
uuids = [uuid.uuid4() for _ in range(len(options))]

# create choices dict list
choices = []
for j in range(len(options)):
choices.append(
{
"id": f"{uuids[j]}",
"position": j + 1,
"item_body": f"<p>{convalllatex(options[j])}</p>",
}
)

q = {
"position": 0,
"points_possible": float(points),
"properties": {},
"entry_type": "Item",
"entry_editable": True,
"stimulus_quiz_entry_id": "",
"status": "mutable",
"entry": {
"title": question,
"item_body": f"<p>{convalllatex(question)}</p>",
"calculator_type": "none",
"interaction_data": {"choices": choices},
"properties": {
"shuffle_rules": {"choices": {"to_lock": [], "shuffled": False}},
"vary_points_by_answer": False,
},
"scoring_data": {"value": f"{uuids[correct_ind]}"},
"answer_feedback": {},
"scoring_algorithm": "Equivalence",
"interaction_type_slug": "choice",
"feedback": {},
},
}

return q
55 changes: 55 additions & 0 deletions cornellGrading/utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
import urllib.parse
import re


def convalllatex(text):
"""Convert all instances of LaTeX in a string to Canvas-compatible images
Args:
text (str):
Input text
Returns:
str:
Output text
"""

p = re.compile(r"\${1,2}(.*?)\${1,2}")

return p.sub(convlatex, text)


def convlatex(texstr):
"""Convert input latex string to Canvas's img html
Args:
texstr (str or re.Match):
LaTeX-formatted equation string
Returns:
str:
Canvas-style image string
"""

# handle case where input is re.Match
if isinstance(texstr, re.Match):
texstr = texstr.groups()[0]

# replace problematic commands int LaTeX
texsubdict = {
r"\\textrm": "",
}
for key, val in texsubdict.items():
texstr = re.sub(key, val, texstr)

convstr = urllib.parse.quote(urllib.parse.quote(texstr))
qtxt = (
f"""<img class="equation_image" title="{texstr}" """
f"""src="/equation_images/{convstr}?scale=1" """
f"""alt="LaTeX: {texstr}" """
f""" data-equation-content="{texstr}" data-ignore-a11y-check="">"""
)

return qtxt

0 comments on commit 220f679

Please sign in to comment.