generated from SparkJiao/pytorch-transformers-template
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathds_inference_zero.py
136 lines (116 loc) · 5.37 KB
/
ds_inference_zero.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
# coding=utf-8
#
# Copyright 2023 Nanyang Technological University Fangkai Jiao
#
# Part of this code is based on the source code of Transformers
# (arXiv:1910.03771)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import logging
import os
import sys
from typing import Dict, Union
import deepspeed
import hydra
import torch
import wandb
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
from omegaconf import DictConfig, OmegaConf
from torch import distributed as dist
from torch.utils.data import (DataLoader, RandomSampler)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (AutoTokenizer, PreTrainedTokenizer)
from general_util.evaluator import evaluate_fn as evaluate
from general_util.logger import setting_logger
from general_util.training_utils import batch_to_device, unwrap_model, set_seed, note_best_checkpoint, load_and_cache_examples, set_seed_int
logger: logging.Logger
torch.backends.cuda.matmul.allow_tf32 = True
@hydra.main(config_path="conf", config_name="config", version_base="1.2")
def main(cfg: DictConfig):
if cfg.local_rank == -1 or cfg.no_cuda:
device = str(torch.device("cuda" if torch.cuda.is_available() and not cfg.no_cuda else "cpu"))
cfg.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of synchronizing nodes/GPUs
torch.cuda.set_device(cfg.local_rank)
device = str(torch.device("cuda", cfg.local_rank))
deepspeed.init_distributed()
cfg.n_gpu = 1
cfg.world_size = dist.get_world_size()
cfg.device = device
cfg.ddp_eval = False
global logger
logger = setting_logger(cfg.output_dir, local_rank=cfg.local_rank)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
cfg.local_rank, cfg.device, cfg.n_gpu, bool(cfg.local_rank != -1), cfg.fp16)
logger.warning(f"CPU cores: {os.cpu_count()}")
# Set seed
set_seed(cfg)
# Test
results = {}
if cfg.do_eval:
# if not cfg.ddp_eval and cfg.local_rank not in [-1, 0]:
# return results
checkpoints = [cfg.output_dir]
if cfg.save_best:
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
elif cfg.prediction_cfg.best_checkpoint and os.path.exists(cfg.prediction_cfg.best_checkpoint):
checkpoints = [cfg.prediction_cfg.best_checkpoint]
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
elif cfg.eval_sub_path:
checkpoints = list(sorted(list(set(
os.path.dirname(c) for c in
glob.glob(cfg.output_dir + f"/{cfg.eval_sub_path}/" + "pytorch_model*.bin", recursive=True)
))))
if not checkpoints:
checkpoints = list(sorted(list(set(
os.path.dirname(c) for c in
glob.glob(cfg.output_dir + f"/{cfg.eval_sub_path}/" + "adapter_model.bin", recursive=True)
))))
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info(" the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
split = "dev"
if "model_eval" in cfg:
model = hydra.utils.call(cfg.model_eval, checkpoint)
else:
model = hydra.utils.call(cfg.model, checkpoint)
ds_config = cfg.ds_cfg
ds_config = OmegaConf.to_container(ds_config, resolve=True)
model, _, _, _ = deepspeed.initialize(model=model,
model_parameters=model.parameters(),
config=ds_config)
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
cfg.model_name_or_path = checkpoint
if cfg.test_file:
prefix = f'test' + (f'-{prefix}' if prefix != "" else "")
split = "test"
result = evaluate(cfg, model, tokenizer, prefix=prefix, _split=split)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
os.environ["HYDRA_FULL_ERROR"] = "1"
hydra_formatted_args = []
# convert the cli params added by torch.distributed.launch into Hydra format
for arg in sys.argv:
if arg.startswith("--"):
hydra_formatted_args.append(arg[len("--"):])
else:
hydra_formatted_args.append(arg)
sys.argv = hydra_formatted_args
print(sys.argv)
main()