-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathLocal_model.py
39 lines (31 loc) · 1.13 KB
/
Local_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import streamlit as st
import pandas as pd
import numpy as np
from st_chat_message import message
import fitz
from dotenv import load_dotenv
load_dotenv()
from utils import openaikey
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core import Settings, VectorStoreIndex
model_name="BAAI/bge-small-en-v1.5"
embedding = HuggingFaceEmbedding(model_name, device='cuda')
Settings.embed_model= embedding
index= VectorStoreIndex.from_documents
qa_prompt_tmpl_str = (
"""context information is below
----------------------------
{context_str}
----------------------------
Given the above context, I want you to think
step-by-step and answer the following questions.
Focus on the actions and key points. In case you
do not know say 'I don't know'.
Query : {query}
Answer : )
"""
)
qa_prompt_tmpl = PromptTemplate(qa_prompt_tmpl_str)
query_engine.update_prompts({"response_synthesizer:text_qa_template": qa_prompt_tmpl})
response = query_engine.query('What is this repository about?')
print(response)