forked from ModelPulse/BreakYourLLM
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
30 lines (20 loc) · 915 Bytes
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
from dotenv import load_dotenv
import os
import argparse
def main():
parser = argparse.ArgumentParser(description="Script with configs and CLI args.")
parser.add_argument("--config", type=str, help="Path to the config file.", default="config/.env", required=False)
parser.add_argument("--file", type=str, help="Path to CSV file with guidelines.", default="data.csv", required=False)
args = parser.parse_args()
# Load the .env file
load_dotenv(dotenv_path=args.config)
from sources.full_pipeline import run_pipeline
from sources.models.execute_tests import LLMExecutor
# executor = LLMExecutor()
# question = "Who is Ash? Does he have cash?"
# # Call your local API
# answer_local = executor.call_llm_api(question, api_name='local_api')
# print("Answer from Local API:", answer_local)
run_pipeline(args.file)
if __name__ == "__main__":
main()