Skip to content

Commit

Permalink
feat: upgrade llamaindex to 0.11.1 to use Pydantic v2
Browse files Browse the repository at this point in the history
  • Loading branch information
cptrodgers committed Aug 24, 2024
1 parent d7a286b commit 7569b35
Show file tree
Hide file tree
Showing 7 changed files with 218 additions and 122 deletions.
230 changes: 152 additions & 78 deletions ikigai_ai/poetry.lock

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion ikigai_ai/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ license = "AGPL3"
[tool.poetry.dependencies]
python = ">=3.12,<3.13"
python-dotenv = "^1.0.1"
llama-index = "^0.10.58"
llama-index = "^0.11.1"
fastapi = "^0.111.1"
unstructured = "^0.15.0"
black = "^24.4.2"
Expand Down
25 changes: 12 additions & 13 deletions ikigai_ai/src/index.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
from typing import Dict, Any

from dotenv import load_dotenv

load_dotenv()
Expand All @@ -13,12 +11,13 @@
from enum import Enum
from pydantic import BaseModel

from ikigai_ai.src.quiz_generator.single_choice import generate_single_choice_quizzes
from ikigai_ai.src.quiz_generator.single_choice import generate_single_choice_quizzes, SingleChoiceList
from ikigai_ai.src.quiz_generator.multiple_choice import (
generate_multiple_choice_quizzes,
MultipleChoiceList,
)
from ikigai_ai.src.quiz_generator.fill_in_blank import generate_fill_in_blank_quizzes
from ikigai_ai.src.quiz_generator.select_option import generate_select_options_quizzes
from ikigai_ai.src.quiz_generator.fill_in_blank import generate_fill_in_blank_quizzes, FillInBlankList
from ikigai_ai.src.quiz_generator.select_option import generate_select_options_quizzes, SelectOptionList

app = FastAPI()

Expand All @@ -42,10 +41,10 @@ class QuizType(str, Enum):

class GenerateQuizResponse(BaseModel):
quiz_type: QuizType
single_choice_data: Dict[str, Any] | None = None
multiple_choice_data: Dict[str, Any] | None = None
fill_in_blank_data: Dict[str, Any] | None = None
select_options_data: Dict[str, Any] | None = None
single_choice_data: SingleChoiceList | None = None
multiple_choice_data: MultipleChoiceList | None = None
fill_in_blank_data: FillInBlankList | None = None
select_options_data: SelectOptionList | None = None


@app.post("/quizzes/generate-single-choice")
Expand All @@ -54,7 +53,7 @@ def gen_single_choice_quizzes(req: GenerateQuizRequest) -> GenerateQuizResponse:
req.user_context,
req.subject,
req.total_quizzes,
).dict()
)
return GenerateQuizResponse(
quiz_type=QuizType.SingleChoice, single_choice_data=single_choice_data
)
Expand All @@ -66,7 +65,7 @@ def gen_single_choice_quizzes(req: GenerateQuizRequest):
req.user_context,
req.subject,
req.total_quizzes,
).dict()
)

return GenerateQuizResponse(
quiz_type=QuizType.MultipleChoice, multiple_choice_data=multiple_choice_data
Expand All @@ -79,7 +78,7 @@ def gen_single_choice_quizzes(req: GenerateQuizRequest):
req.user_context,
req.subject,
req.total_quizzes,
).dict()
)
return GenerateQuizResponse(
quiz_type=QuizType.FillInBlank, fill_in_blank_data=fill_in_blank_data
)
Expand All @@ -91,7 +90,7 @@ def gen_single_choice_quizzes(req: GenerateQuizRequest):
req.user_context,
req.subject,
req.total_quizzes,
).dict()
)
return GenerateQuizResponse(
quiz_type=QuizType.FillInBlank, select_options_data=select_options_data
)
18 changes: 12 additions & 6 deletions ikigai_ai/src/quiz_generator/fill_in_blank.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from typing import List

from pydantic.v1 import BaseModel, Field
from pydantic import BaseModel, Field
from llama_index.llms.openai import OpenAI
from llama_index.core.llms import ChatMessage
from llama_index.program.openai import OpenAIPydanticProgram


class FillInBlank(BaseModel):
Expand All @@ -29,7 +29,6 @@ def generate_fill_in_blank_quizzes(
subject: str,
total_question: int,
) -> FillInBlankList:
sllm = llm.as_structured_llm(output_cls=FillInBlankList)
prompt = f"""
Subject:\n {subject}
More detail:\n {user_context}
Expand All @@ -38,6 +37,13 @@ def generate_fill_in_blank_quizzes(
Generate a short paragraph with {total_question} Fill in Blank questions.
"""

input_msg = ChatMessage.from_str(prompt)
output = sllm.chat([input_msg])
return output.raw
program = OpenAIPydanticProgram.from_defaults(
output_cls=FillInBlankList, prompt_template_str=prompt, verbose=True
)

output = program(
subject=subject,
user_context=user_context,
total_question=total_question,
)
return output
18 changes: 12 additions & 6 deletions ikigai_ai/src/quiz_generator/multiple_choice.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from typing import List

from pydantic.v1 import BaseModel
from pydantic import BaseModel
from llama_index.llms.openai import OpenAI
from llama_index.core.llms import ChatMessage
from llama_index.program.openai import OpenAIPydanticProgram


class MultipleChoice(BaseModel):
Expand All @@ -28,7 +28,6 @@ def generate_multiple_choice_quizzes(
subject: str,
total_question: int,
) -> MultipleChoiceList:
sllm = llm.as_structured_llm(output_cls=MultipleChoiceList)
prompt = f"""
Subject:\n {subject}
More detail:\n {user_context}
Expand All @@ -43,6 +42,13 @@ def generate_multiple_choice_quizzes(
Generate {total_question} multiple choice with 2 correct answers question about {subject}
"""

input_msg = ChatMessage.from_str(prompt)
output = sllm.chat([input_msg])
return output.raw
program = OpenAIPydanticProgram.from_defaults(
output_cls=MultipleChoiceList, prompt_template_str=prompt, verbose=True
)

output = program(
subject=subject,
user_context=user_context,
total_question=total_question,
)
return output
28 changes: 17 additions & 11 deletions ikigai_ai/src/quiz_generator/select_option.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from typing import List

from pydantic.v1 import BaseModel, Field
from pydantic import BaseModel, Field
from llama_index.llms.openai import OpenAI
from llama_index.core.llms import ChatMessage
from llama_index.program.openai import OpenAIPydanticProgram


class SelectOption(BaseModel):
Expand All @@ -13,8 +13,8 @@ class SelectOption(BaseModel):
correct_answer: str = Field(..., description="Correct answer")


class FillInBlankList(BaseModel):
"""Data Model for Select Option Question List"""
class SelectOptionList(BaseModel):
"""Data Model for Select Option Question List. Include content and select options list"""

content: str = Field(
..., description="The paragraph content Select Option questions"
Expand All @@ -31,16 +31,22 @@ def generate_select_options_quizzes(
user_context: str,
subject: str,
total_question: int,
) -> FillInBlankList:
sllm = llm.as_structured_llm(output_cls=FillInBlankList)
) -> SelectOptionList:
prompt = f"""
Subject:\n {subject}
More detail:\n {user_context}
Wrap select option quiz in paragraph with [Q.[position]], example: [Q.1], [Q.2].
You must Wrap position of the quiz in paragraph with [Q.[position]], example: [Q.1], [Q.2].
Generate a short paragraph with maximum {total_question} Select Option questions.
Generate a paragraph with maximum {total_question} Select Option quizzes, remember to wrap the quiz
"""

input_msg = ChatMessage.from_str(prompt)
output = sllm.chat([input_msg])
return output.raw
program = OpenAIPydanticProgram.from_defaults(
output_cls=SelectOptionList, prompt_template_str=prompt, verbose=True
)

output = program(
subject=subject,
user_context=user_context,
total_question=total_question,
)
return output
19 changes: 12 additions & 7 deletions ikigai_ai/src/quiz_generator/single_choice.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from typing import List

from pydantic.v1 import BaseModel
from pydantic import BaseModel
from llama_index.llms.openai import OpenAI
from llama_index.core.llms import ChatMessage
from llama_index.program.openai import OpenAIPydanticProgram


class SingleChoice(BaseModel):
Expand All @@ -28,14 +28,19 @@ def generate_single_choice_quizzes(
subject: str,
total_question: int,
) -> SingleChoiceList:
sllm = llm.as_structured_llm(output_cls=SingleChoiceList)
prompt = f"""
Subject:\n {subject}
More detail:\n {user_context}
Generate {total_question} single choice question
"""

input_msg = ChatMessage.from_str(prompt)
output = sllm.chat([input_msg])
return output.raw
program = OpenAIPydanticProgram.from_defaults(
output_cls=SingleChoiceList, prompt_template_str=prompt, verbose=True
)

output = program(
subject=subject,
user_context=user_context,
total_question=total_question,
)
return output

0 comments on commit 7569b35

Please sign in to comment.