-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrun_evaluation.py
45 lines (35 loc) · 1.65 KB
/
run_evaluation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import os
import hydra
from omegaconf import DictConfig
import logging
from eval.prepare_evaluation import generate_finetuned_recipes
from eval.evaluation import evaluate
from generation.generation import generate_recipes
@hydra.main(config_path="config", config_name="config_evaluation", version_base="1.3")
def main(params: DictConfig):
# setup basic logging
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
# TODO: do this part of the pipeline (evaluation)
if params['main']['model_type'] == 'gpt2':
params['main'].update(params['gpt2'])
elif params['main']['model_type'] == 'opt-125m':
params['main'].update(params['opt-125m'])
elif params['main']['model_type'] == 'opt-350m':
params['main'].update(params['opt-350m'])
elif params['main']['model_type'] == 'qlora':
params['main'].update(params['qlora'])
else:
raise Exception("Unknown model type")
# Set the absolute path to the model, if the model has already been trained
params['main']['model_name_or_path'] = hydra.utils.get_original_cwd() + params['main']['model_name_or_path']
logger.info("No generation will be done, the evaluate flag is set to false.")
generate_finetuned_recipes(params=params, logger=logger)
logger.info("Finetuned recipes for evaluation have been successfully generated!")
results = evaluate(params=params, logger=logger)
print(results)
logger.info("Evaluation successfully finished!")
if __name__ == "__main__":
main()