. This is required to make intent work in code blocks.
- val = re.sub(div_pattern, "", val)
- # Remove all
. This is required to make underscores work in code blocks.
- val = re.sub(span_pattern, "", val)
- # Markdown to html
- val = markdownify.markdownify(val).strip()
- # Reformat code
- val = reformat_code(val)
-
- # Remove noisy "[number] / [number]" at the beginning
- noise = re.search(regenerate_pattern, val)
- if noise and noise.start() == 0:
- val = val[noise.end() :]
- # Remove noisy "Copy[number] chars / [number] words"
- val = re.sub(copy_chars_pattern, "", val)
- # Remove empty code block ```\nCopy code\n```
- val = re.sub(copy_code_pattern, "", val)
-
- # Strip
- val = val.replace("\n\n\n", "\n").strip()
-
- return val
-
-
-def contain_blocked_words(val: str) -> bool:
- blocked_words = ["openai", "chatgpt"]
- for w in blocked_words:
- if w in val.lower():
- return True
- return False
-
-
-def clean_html_one_sample(sample):
- roles = ["human", "gpt"]
-
- if len(sample["conversations"]) <= 1:
- return (sample, 1)
-
- # Adjust the offset for cases like https://sharegpt.com/c/VyaZlh4
- if sample["conversations"][0]["from"] != "human":
- sample["conversations"] = sample["conversations"][1:]
- if len(sample["conversations"]) <= 1:
- return (sample, 1)
-
- if sample["conversations"][-1]["from"] == "human":
- sample["conversations"] = sample["conversations"][:-1]
- if len(sample["conversations"]) <= 1:
- return (sample, 1)
-
- char_count = 0
- new_conversations = []
- for i, c in enumerate(sample["conversations"]):
- if c["from"] != roles[i % 2]:
- return (sample, 2)
-
- if contain_blocked_words(c["value"]):
- return (sample, 3)
-
- try:
- new_val = html_to_markdown(c["value"])
- except (bs4.builder.ParserRejectedMarkup, AssertionError):
- return (sample, 4)
-
- # Filter empty answers like https://sharegpt.com/c/mrllZ6u
- if not new_val or not new_val[0].isprintable():
- break
-
- char_count += len(new_val)
- new_conversations.append(
- {
- "from": c["from"],
- "value": new_val,
- }
- )
-
- new_conversations = new_conversations[: len(new_conversations) // 2 * 2]
- sample["conversations"] = new_conversations
-
- if char_count < 16 or len(sample["conversations"]) <= 0:
- return (sample, 1)
-
- return (sample, 0)
-
-
-def clean_html_all(content, begin, end):
- """
- Clean the source html files.
- """
- cnt_skip = 0
- cnt_blocked_words = 0
- cnt_wrong_format = 0
- cnt_parser_error = 0
- cnt_too_short = 0
- cnt_id_duplication = 0
- cnt_value_duplication = 0
- cnt_plugin = 0
- cnt_tag = 0
-
- content = content[begin:end]
- processed = []
- with ProcessPoolExecutor() as executor:
- for result in tqdm(
- executor.map(clean_html_one_sample, content), total=len(content)
- ):
- processed.append(result)
-
- visited = {}
- new_content = []
- for sample, error_code in processed:
- cid = sample["id"]
- skipped = True
-
- if error_code != 0:
- if error_code == 1:
- print(f"id {cid} is too short")
- cnt_too_short += 1
- elif error_code == 2:
- print(f"id {cid} has a wrong format")
- cnt_wrong_format += 1
- elif error_code == 3:
- print(f"id {cid} contains blocked words")
- cnt_blocked_words += 1
- elif error_code == 4:
- print(f"id {cid} contains parser errors")
- cnt_parser_error += 1
- else:
- raise ValueError(f"Invalid error_code: {error_code}")
- elif cid in visited:
- print(f"id {cid} is an id duplication of {visited[cid]}")
- cnt_id_duplication += 1
- elif sample.get("plugins", None) is not None:
- print(f"id {cid} contains plugin")
- cnt_plugin += 1
- else:
- key = (
- sample["conversations"][0]["value"],
- sample["conversations"][1]["value"],
- )
- if key in visited:
- print(f"id {cid} is a value duplication of {visited[key]}")
- cnt_value_duplication += 1
- else:
- visited[cid] = visited[key] = cid
- skipped = False
-
- if not skipped:
- new_content.append(sample)
- else:
- cnt_skip += 1
-
- print(
- f"total: {len(content)}, skip: {cnt_skip}, new: {len(new_content)}, "
- f"cnt_blocked_words: {cnt_blocked_words}, cnt_parser_error: {cnt_parser_error}, "
- f"cnt_wrong_format: {cnt_wrong_format}, "
- f"cnt_too_short: {cnt_too_short}, cnt_id_duplication: {cnt_id_duplication}, "
- f"cnt_value_duplication: {cnt_value_duplication}, cnt_plugin: {cnt_plugin}"
- )
-
- return new_content
-
-
-def main(args):
- content = json.load(open(args["in_file"], "r"))
- content = clean_html_all(content, args["begin"], args["end"])
- json.dump(content, open(args["out_file"], "w"), indent=2, ensure_ascii=False)
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--in-file", type=str, required=True)
- parser.add_argument("--out-file", type=str, default="sharegpt_clean.json")
- parser.add_argument("--begin", type=int)
- parser.add_argument("--end", type=int)
- parser.add_argument("--debug", action="store_true")
- args = parser.parse_args()
- main(vars(args))
diff --git a/llm_ft/fastchat/data/convert_alpaca.py b/llm_ft/fastchat/data/convert_alpaca.py
deleted file mode 100644
index 7f984b85..00000000
--- a/llm_ft/fastchat/data/convert_alpaca.py
+++ /dev/null
@@ -1,38 +0,0 @@
-"""
-Convert alpaca dataset into sharegpt format.
-
-Usage: python3 -m fastchat.data.convert_alpaca --in alpaca_data.json
-"""
-
-import argparse
-import json
-
-from transformers import AutoTokenizer, AutoModelForCausalLM
-import numpy as np
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--in-file", type=str)
- parser.add_argument("--out-file", type=str)
- args = parser.parse_args()
-
- content = json.load(open(args.in_file, "r"))
- new_content = []
- for i, c in enumerate(content):
- if len(c["input"].strip()) > 1:
- q, a = c["instruction"] + "\nInput:\n" + c["input"], c["output"]
- else:
- q, a = c["instruction"], c["output"]
- new_content.append(
- {
- "id": f"alpaca_{i}",
- "conversations": [
- {"from": "human", "value": q},
- {"from": "gpt", "value": a},
- ],
- }
- )
-
- print(f"#out: {len(new_content)}")
- json.dump(new_content, open(args.out_file, "w"), indent=2, ensure_ascii=False)
diff --git a/llm_ft/fastchat/data/extract_gpt4_only.py b/llm_ft/fastchat/data/extract_gpt4_only.py
deleted file mode 100644
index bab53bcc..00000000
--- a/llm_ft/fastchat/data/extract_gpt4_only.py
+++ /dev/null
@@ -1,32 +0,0 @@
-"""
-Extract the conversations generated by GPT-4 only.
-
-Usage: python3 -m fastchat.data.extract_gpt4_only --in sharegpt.json
-"""
-import argparse
-import json
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--in-file", type=str, required=True)
- parser.add_argument("--out-file", type=str)
- parser.add_argument("--begin", type=int)
- parser.add_argument("--end", type=int)
- args = parser.parse_args()
-
- content = json.load(open(args.in_file, "r"))
- content = content[args.begin : args.end]
- new_content = []
- for c in content:
- model = c.get("model", None)
- if model == "gpt4" or model is None:
- new_content.append(c)
-
- if args.out_file:
- out_file = args.out_file
- else:
- out_file = args.in_file.replace(".json", "_gpt4.json")
-
- print(f"#in: {len(content)}, #out: {len(new_content)}")
- json.dump(new_content, open(out_file, "w"), indent=2, ensure_ascii=False)
diff --git a/llm_ft/fastchat/data/extract_single_round.py b/llm_ft/fastchat/data/extract_single_round.py
deleted file mode 100644
index 5da80365..00000000
--- a/llm_ft/fastchat/data/extract_single_round.py
+++ /dev/null
@@ -1,29 +0,0 @@
-"""
-Extract the first round of the conversations.
-
-Usage: python3 -m fastchat.data.extract_single_round --in sharegpt.json
-"""
-import argparse
-import json
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--in-file", type=str, required=True)
- parser.add_argument("--out-file", type=str)
- parser.add_argument("--begin", type=int)
- parser.add_argument("--end", type=int)
- args = parser.parse_args()
-
- content = json.load(open(args.in_file, "r"))
- content = content[args.begin : args.end]
- for c in content:
- c["conversations"] = c["conversations"][:2]
-
- if args.out_file:
- out_file = args.out_file
- else:
- out_file = args.in_file.replace(".json", "_single.json")
-
- print(f"#in: {len(content)}, #out: {len(content)}")
- json.dump(content, open(out_file, "w"), indent=2, ensure_ascii=False)
diff --git a/llm_ft/fastchat/data/filter_wrong_format.py b/llm_ft/fastchat/data/filter_wrong_format.py
deleted file mode 100644
index 46588ba8..00000000
--- a/llm_ft/fastchat/data/filter_wrong_format.py
+++ /dev/null
@@ -1,44 +0,0 @@
-"""
-Filter conversations with wrong formats.
-
-Usage:
-python3 -m fastchat.data.filter_wrong_format --in input.json --out output.json
-
-"""
-import argparse
-import json
-import re
-
-from tqdm import tqdm
-
-wrong_indices_pattern = re.compile("\n1\. [^2]*\n1\. ")
-
-
-def should_skip(conv):
- # Filter wrong list indices like https://sharegpt.com/c/1pREAGO
- for sentence in conv["conversations"]:
- val = sentence["value"]
- sub = re.search(wrong_indices_pattern, val)
- if sub is not None:
- return True
-
- return False
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--in-file", type=str, required=True)
- parser.add_argument("--out-file", type=str, required=True)
- args = parser.parse_args()
-
- content = json.load(open(args.in_file, "r"))
-
- new_content = []
- for conv in tqdm(content):
- if should_skip(conv):
- print(f"{conv['id']} contains a wrong format.")
- else:
- new_content.append(conv)
-
- print(f"#in: {len(content)}, #out: {len(new_content)}")
- json.dump(new_content, open(args.out_file, "w"), indent=2, ensure_ascii=False)
diff --git a/llm_ft/fastchat/data/get_stats.py b/llm_ft/fastchat/data/get_stats.py
deleted file mode 100644
index 0e0698e4..00000000
--- a/llm_ft/fastchat/data/get_stats.py
+++ /dev/null
@@ -1,82 +0,0 @@
-"""
-Get stats of a dataset.
-
-Usage: python3 -m fastchat.data.get_stats --in sharegpt.json
-"""
-
-import argparse
-from concurrent.futures import ProcessPoolExecutor
-import json
-
-import numpy as np
-from tqdm import tqdm
-from transformers import AutoTokenizer, AutoModelForCausalLM
-
-K = 1e3
-M = 1e6
-
-
-def tokenize_one_sample(c):
- for i in range(len(c["conversations"])):
- v = c["conversations"][i]["value"]
- c["conversations"][i]["value"] = tokenizer.tokenize(v)
- return c
-
-
-def tokenize_dataset(content):
- processed = []
- with ProcessPoolExecutor() as executor:
- for result in tqdm(
- executor.map(tokenize_one_sample, content), total=len(content)
- ):
- processed.append(result)
-
- return processed
-
-
-def compute_stats(content):
- sample_lens = []
- sample_turns = []
- prompt_lens = []
- res_lens = []
-
- for c in content:
- sample_len = 0
- sample_turns.append(len(c["conversations"]) // 2)
- for i in range(len(c["conversations"]) // 2):
- p = c["conversations"][i * 2]["value"]
- r = c["conversations"][i * 2 + 1]["value"]
-
- turn_len = len(p) + len(r)
- sample_len += turn_len
- prompt_lens.append(len(p))
- res_lens.append(len(r))
- sample_lens.append(sample_len)
-
- return sample_lens, sample_turns, prompt_lens, res_lens
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--in-file", type=str)
- parser.add_argument(
- "--model-name-or-path", type=str, default="meta-llama/Llama-2-7b-chat-hf"
- )
- args = parser.parse_args()
-
- content = json.load(open(args.in_file, "r"))
- tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=False)
- content = tokenize_dataset(content)
-
- sample_lens, sample_turns, prompt_lens, res_lens = compute_stats(content)
- print(f"#sequence: {len(content)/K:.2f} K")
- print(f"#tokens: {np.sum(sample_lens)/M:.2f} M")
- print(f"avg. turns: {np.mean(sample_turns):.2f}")
- print(f"avg. prompt length: {np.mean(prompt_lens):.2f}")
- print(f"avg. response length: {np.mean(res_lens):.2f}")
-
- print("\n- Histogram -")
- bin_edges = [0, 1024, 2048, 4096, 8192, 16384, 32768]
- hist = np.histogram(sample_lens, bins=bin_edges)[0]
- for i in range(len(hist)):
- print(f"L{bin_edges[i]} - {bin_edges[i+1]}: {hist[i]}")
diff --git a/llm_ft/fastchat/data/hardcoded_questions.py b/llm_ft/fastchat/data/hardcoded_questions.py
deleted file mode 100644
index a2bcff42..00000000
--- a/llm_ft/fastchat/data/hardcoded_questions.py
+++ /dev/null
@@ -1,168 +0,0 @@
-"""
-Hardcoded question and answers.
-"""
-import json
-
-
-def identity_questions():
- """ "
- Adapted from https://github.com/young-geng/koala_data_pipeline/blob/main/process_hard_coded_data.py
- """
- content = []
-
- name = "Vicuna"
- org = "Large Model Systems Organization (LMSYS)"
-
- def generate_conversations(questions, answers):
- for q in questions:
- for a in answers:
- content.append(
- {
- "id": f"identity_{len(content)}",
- "conversations": [
- {"from": "human", "value": q},
- {"from": "gpt", "value": a},
- ],
- }
- )
-
- questions = [
- "Who are you?",
- "What is your name?",
- "Can you introduce yourself?",
- "Can you tell me a little bit about yourself?",
- "What's your name?",
- "What are you called?",
- "What are you?",
- "Tell me your name.",
- "Tell me about yourself.",
- "Tell me about you.",
- "Tell me who you are.",
- "Please introduce yourself.",
- ]
- answers = [
- f"I am {name}, a language model trained by researchers from {org}.",
- f"My name is {name}, and I'm a language model developed by {org}.",
- f"You can call me {name}, and I was trained by {org} researchers as a language model.",
- f"As a language model, I go by the name {name} and was trained by researchers from {org}.",
- f"I'm a language model called {name}, and I was trained by {org} researchers.",
- f"You may refer to me as {name}, a language model meticulously developed by the researchers at {org}.",
- ]
- generate_conversations(questions, answers)
-
- questions = [
- "Who created you?",
- "Who made you?",
- "Who built you?",
- "Who programmed you?",
- "Who trained you?",
- "Who taught you?",
- "Who developed you?",
- ]
- answers = [
- f"Researchers from {org} created me.",
- f"I'm created by {org}.",
- f"I'm built by researchers from {org}.",
- f"I am a language model trained by researchers from {org}.",
- f"I'm a language model developed by {org}.",
- f"I'm a language model created by researchers from {org}.",
- f"My creators are researchers from {org}.",
- ]
- generate_conversations(questions, answers)
-
- questions = [
- "Are you ChatGPT?",
- "Are you GPT-2?",
- "Are you GPT-3?",
- "Are you GPT-4?",
- "Are you davinci?",
- "Are you davinci-001?",
- "Are you davinci-002?",
- "Are you davinci-003?",
- "Are you curie?",
- "Are you based on ChatGPT?",
- "Are you based on GPT-2?",
- "Are you based on GPT-3?",
- "Are you based on GPT-4?",
- "Are you based on davinci?",
- "Are you based on davinci-001?",
- "Are you based on davinci-002?",
- "Are you based on davinci-003?",
- "Are you based on curie?",
- "Are you trained by OpenAI?",
- "Are you trained by Google?",
- "Are you trained by Microsoft?",
- "Are you trained by Meta?",
- "Are you trained by IBM?",
- "Do you call OpenAI APIs?",
- "Do you call Google APIs?",
- "Do you call Microsoft APIs?",
- "Do you call Meta APIs?",
- "Do you call IBM APIs?",
- "Are you created by OpenAI?",
- "Are you created by Google?",
- "Are you created by Microsoft?",
- "Are you created by Meta?",
- "Are you created by IBM?",
- "Are you developed by OpenAI?",
- "Are you developed by Google?",
- "Are you developed by Microsoft?",
- "Are you developed by Meta?",
- "Are you developed by IBM?",
- "Are you trained on OpenAI data?",
- "Are you trained on Google data?",
- "Are you trained on Microsoft data?",
- "Are you trained on Meta data?",
- "Are you trained on IBM data?",
- "Are you trained with OpenAI data?",
- "Are you trained with Google data?",
- "Are you trained with Microsoft data?",
- "Are you trained with Meta data?",
- "Are you trained with IBM data?",
- "Have you been trained with OpenAI data?",
- "Have you been trained with Google data?",
- "Have you been trained with Microsoft data?",
- "Have you been trained with Meta data?",
- "Have you been trained with IBM data?",
- "Are you finetuned on OpenAI data?",
- "Are you finetuned on Google data?",
- "Are you finetuned on Microsoft data?",
- "Are you finetuned on Meta data?",
- "Are you finetuned on IBM data?",
- "Are you finetuned with OpenAI data?",
- "Are you finetuned with Google data?",
- "Are you finetuned with Microsoft data?",
- "Are you finetuned with Meta data?",
- "Are you finetuned with IBM data?",
- "Have you been finetuned with OpenAI data?",
- "Have you been finetuned with Google data?",
- "Have you been finetuned with Microsoft data?",
- "Have you been finetuned with Meta data?",
- "Have you been finetuned with IBM data?",
- ]
- answers = [
- f"No, I am a language model trained by researchers from {org}.",
- f"No, I am a language model developed by researchers from {org}.",
- f"No, I am a language model created by researchers from {org}.",
- f"No, I am trained by researchers from {org}.",
- f"No, I am developed by researchers from {org}.",
- f"No, I am created by researchers from {org}.",
- f"No, I'm a language model trained by researchers from {org}.",
- f"No, I'm a language model developed by researchers from {org}.",
- f"No, I'm a language model created by researchers from {org}.",
- f"No, I'm trained by researchers from {org}.",
- f"No, I'm developed by researchers from {org}.",
- f"No, I'm created by researchers from {org}.",
- ]
- generate_conversations(questions, answers)
-
- return content
-
-
-if __name__ == "__main__":
- out_file = "hardcoded.json"
-
- content = []
- content.extend(identity_questions())
-
- json.dump(content, open(out_file, "w"), indent=2)
diff --git a/llm_ft/fastchat/data/inspect_data.py b/llm_ft/fastchat/data/inspect_data.py
deleted file mode 100644
index df922710..00000000
--- a/llm_ft/fastchat/data/inspect_data.py
+++ /dev/null
@@ -1,33 +0,0 @@
-"""
-Usage:
-python3 -m fastchat.data.inspect_data --in sharegpt_20230322_clean_lang_split.json
-"""
-import argparse
-import json
-import random
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--in-file", type=str, required=True)
- parser.add_argument("--begin", type=int)
- parser.add_argument("--random-n", type=int)
- args = parser.parse_args()
-
- content = json.load(open(args.in_file, "r"))
-
- if args.random_n:
- indices = [random.randint(0, len(content) - 1) for _ in range(args.random_n)]
- elif args.begin:
- indices = range(args.begin, len(content))
- else:
- indices = range(0, len(content))
-
- for idx in indices:
- sample = content[idx]
- print("=" * 40)
- print(f"no: {idx}, id: {sample['id']}")
- for conv in sample["conversations"]:
- print(conv["from"] + ": ")
- print(conv["value"])
- input()
diff --git a/llm_ft/fastchat/data/merge.py b/llm_ft/fastchat/data/merge.py
deleted file mode 100644
index 0ae63ea7..00000000
--- a/llm_ft/fastchat/data/merge.py
+++ /dev/null
@@ -1,23 +0,0 @@
-"""
-Merge two conversation files into one
-
-Usage: python3 -m fastchat.data.merge --in file1.json file2.json --out merged.json
-"""
-
-import argparse
-import json
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--in-file", type=str, required=True, nargs="+")
- parser.add_argument("--out-file", type=str, default="merged.json")
- args = parser.parse_args()
-
- new_content = []
- for in_file in args.in_file:
- content = json.load(open(in_file, "r"))
- new_content.extend(content)
-
- print(f"#out: {len(new_content)}")
- json.dump(new_content, open(args.out_file, "w"), indent=2, ensure_ascii=False)
diff --git a/llm_ft/fastchat/data/optional_clean.py b/llm_ft/fastchat/data/optional_clean.py
deleted file mode 100644
index 47aecc11..00000000
--- a/llm_ft/fastchat/data/optional_clean.py
+++ /dev/null
@@ -1,90 +0,0 @@
-"""
-Do optional cleaning (e.g., remove some languages).
-
-Usage:
-python3 -m fastchat.data.optional_clean --in input.json --out output.json --keep-lang en
-python3 -m fastchat.data.optional_clean --in input.json --out output.json --skip-lang en
-
-Requirement:
-pip3 install polyglot pyicu pycld2
-"""
-import argparse
-import json
-import re
-
-import polyglot
-from polyglot.detect import Detector
-import pycld2
-from tqdm import tqdm
-
-
-def skip(conv, args):
- # Remove certain languages
- if args.keep_lang != "all" or args.skip_lang is not None:
- text = "\n".join([x["value"] for x in conv["conversations"]])
- try:
- lang_code = Detector(text).language.code
- except (pycld2.error, polyglot.detect.base.UnknownLanguage):
- lang_code = "unknown"
-
- if args.keep_lang != "all" and lang_code != args.keep_lang:
- return True
-
- if lang_code == args.skip_lang:
- return True
-
- # Remove repetitive numbers
- if args.reduce_rep:
- for sentence in conv["conversations"]:
- val = sentence["value"]
- sub = re.search(r"(\d)\1{8}", val)
- if sub is not None:
- return True
-
- return False
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--in-file", type=str, required=True)
- parser.add_argument("--out-file", type=str)
- parser.add_argument(
- "--keep-lang",
- type=str,
- default="all",
- choices=["all", "en"],
- help="Only keep certain langauges.",
- )
- parser.add_argument("--skip-lang", type=str, help="Skip a specific language.")
- # NOTE: Be careful about reduce_rep which may remove some good data.
- # For example, addresses could have long consecutive 0's
- parser.add_argument("--reduce-rep", action="store_true")
- args = parser.parse_args()
-
- in_file = args.in_file
- out_file = args.out_file
- keep_lang = args.keep_lang
- skip_lang = args.skip_lang
- reduce_rep = args.reduce_rep
- assert keep_lang == "all" or skip_lang is None
-
- if out_file is None:
- out_file = "sharegpt_clean"
- if keep_lang != "all":
- out_file += "_" + keep_lang
- if skip_lang is not None:
- out_file += "_skip_" + skip_lang
- if reduce_rep:
- out_file += "_reduce_rep"
- out_file += ".json"
-
- content = json.load(open(in_file, "r"))
- num_conv = len(content)
-
- new_content = []
- for conv in tqdm(content):
- if not skip(conv, args):
- new_content.append(conv)
-
- print(f"#in: {len(content)}, #out: {len(new_content)}")
- json.dump(new_content, open(out_file, "w"), indent=2, ensure_ascii=False)
diff --git a/llm_ft/fastchat/data/optional_replace.py b/llm_ft/fastchat/data/optional_replace.py
deleted file mode 100644
index 1114151a..00000000
--- a/llm_ft/fastchat/data/optional_replace.py
+++ /dev/null
@@ -1,82 +0,0 @@
-"""
-Do optional replace of bos/eos/pad/unk.
-
-Usage:
-python3 -m fastchat.data.optional_replace --in input.json --out output.json --model-name-or-path
-
-Requirement:
-pip3 install transformers tqdm
-"""
-import argparse
-import json
-import traceback
-
-import transformers
-from tqdm import tqdm
-
-
-def replace_special_tokens(
- tokenizer: transformers.PreTrainedTokenizer, text: str
-) -> str:
- if not text:
- return text
-
- def _insert_vline(token: str) -> str:
- if len(token) < 2:
- return " "
- elif len(token) == 2:
- return f"{token[0]}|{token[1]}"
- else:
- return f"{token[:1]}|{token[1:-1]}|{token[-1:]}"
-
- if tokenizer.bos_token:
- text = text.replace(tokenizer.bos_token, _insert_vline(tokenizer.bos_token))
- if tokenizer.eos_token:
- text = text.replace(tokenizer.eos_token, _insert_vline(tokenizer.eos_token))
- if tokenizer.pad_token:
- text = text.replace(tokenizer.pad_token, _insert_vline(tokenizer.pad_token))
- if tokenizer.unk_token:
- text = text.replace(tokenizer.unk_token, _insert_vline(tokenizer.unk_token))
- return text
-
-
-def replace(conv, tokenizer):
- # Replace bos/eos/pad/unk tokens
- if tokenizer:
- try:
- for sentence in conv["conversations"]:
- sentence["value"] = replace_special_tokens(tokenizer, sentence["value"])
- except Exception as e:
- traceback.print_exc()
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--in-file", type=str, required=True)
- parser.add_argument("--out-file", type=str)
- parser.add_argument(
- "--model-name-or-path",
- type=str,
- help="The directory or address where the model token is stored.",
- )
- args = parser.parse_args()
-
- in_file = args.in_file
- out_file = args.out_file
- tokenizer = None
- if args.model_name_or_path:
- tokenizer = transformers.AutoTokenizer.from_pretrained(
- args.model_name_or_path,
- trust_remote_code=True,
- use_fast=False,
- )
-
- if out_file is None:
- out_file = f"{in_file}_replace.json"
-
- content = json.load(open(in_file, "r"))
-
- for conv in tqdm(content):
- replace(conv, tokenizer)
-
- json.dump(content, open(out_file, "w"), indent=2, ensure_ascii=False)
diff --git a/llm_ft/fastchat/data/prepare_all.py b/llm_ft/fastchat/data/prepare_all.py
deleted file mode 100644
index 6d568703..00000000
--- a/llm_ft/fastchat/data/prepare_all.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""Prepare all datasets."""
-
-import argparse
-import os
-
-from fastchat.utils import run_cmd
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--prefix", type=str, default="~/datasets/sharegpt_20230521")
- parser.add_argument(
- "--model-name-or-path", type=str, default="meta-llama/Llama-2-7b-chat-hf"
- )
- parser.add_argument("--seq-len", type=int, default=4096)
- args = parser.parse_args()
-
- in_prefix = args.prefix
- model_path = args.model_name_or_path
- seq_len = args.seq_len
- prefix = (
- f"{in_prefix}_{seq_len}".replace("4096", "4k")
- .replace("8192", "8k")
- .replace("16384", "16k")
- )
-
- cmd_list = [
- f"python3 -m fastchat.data.clean_sharegpt --in {in_prefix}_html.json --out {prefix}_clean.json",
- f"python3 -m fastchat.data.optional_clean --in {prefix}_clean.json --out {prefix}_clean_lang.json --skip-lang ko",
- f"python3 -m fastchat.data.split_long_conversation --in {prefix}_clean_lang.json --out {prefix}_clean_lang_split.json --model-name {model_path} --max-length {seq_len}",
- f"python3 -m fastchat.data.filter_wrong_format --in {prefix}_clean_lang_split.json --out {prefix}_clean_lang_split.json",
- f"python3 -m fastchat.data.split_train_test --in {prefix}_clean_lang_split.json --ratio 0.99",
- f"python3 -m fastchat.data.hardcoded_questions",
- f"python3 -m fastchat.data.merge --in {prefix}_clean_lang_split_train.json hardcoded.json --out {prefix}_clean_lang_split_identity.json",
- f"python3 -m fastchat.data.extract_gpt4_only --in {prefix}_clean_lang_split_identity.json",
- f"python3 -m fastchat.data.extract_single_round --in {prefix}_clean_lang_split_identity.json",
- ]
-
- for cmd in cmd_list:
- ret = run_cmd(cmd)
- if ret != 0:
- exit(ret)
diff --git a/llm_ft/fastchat/data/pretty_json.py b/llm_ft/fastchat/data/pretty_json.py
deleted file mode 100644
index 52eddf6c..00000000
--- a/llm_ft/fastchat/data/pretty_json.py
+++ /dev/null
@@ -1,20 +0,0 @@
-"""
-Usage:
-python3 pretty_json.py --in in.json --out out.json
-"""
-
-import argparse
-import json
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--in-file", type=str, required=True)
- parser.add_argument("--out-file", type=str, required=True)
- args = parser.parse_args()
-
- with open(args.in_file, "r") as fin:
- data = json.load(fin)
-
- with open(args.out_file, "w") as fout:
- json.dump(data, fout, indent=2, ensure_ascii=False)
diff --git a/llm_ft/fastchat/data/sample.py b/llm_ft/fastchat/data/sample.py
deleted file mode 100644
index 5ea94fad..00000000
--- a/llm_ft/fastchat/data/sample.py
+++ /dev/null
@@ -1,40 +0,0 @@
-"""
-Sample some conversations from a file.
-
-Usage: python3 -m fastchat.data.sample --in sharegpt.json --out sampled.json
-"""
-import argparse
-import json
-
-import numpy as np
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--in-file", type=str, required=True)
- parser.add_argument("--out-file", type=str, default="sampled.json")
- parser.add_argument("--begin", type=int, default=0)
- parser.add_argument("--end", type=int, default=100)
- parser.add_argument("--max-length", type=int, default=1024)
- parser.add_argument("--keep-order", action="store_true")
- args = parser.parse_args()
-
- content = json.load(open(args.in_file, "r"))
- if not args.keep_order:
- np.random.seed(42)
- np.random.shuffle(content)
-
- new_content = []
- for i in range(args.begin, min(args.end, len(content))):
- sample = content[i]
- concat = ""
- for s in sample["conversations"]:
- concat += s["value"]
-
- if len(concat) > args.max_length:
- continue
-
- new_content.append(sample)
-
- print(f"#in: {len(content)}, #out: {len(new_content)}")
- json.dump(new_content, open(args.out_file, "w"), indent=2, ensure_ascii=False)
diff --git a/llm_ft/fastchat/data/split_long_conversation.py b/llm_ft/fastchat/data/split_long_conversation.py
deleted file mode 100644
index 413fa8bc..00000000
--- a/llm_ft/fastchat/data/split_long_conversation.py
+++ /dev/null
@@ -1,129 +0,0 @@
-"""
-Split long conversations based on certain max length.
-
-Usage: python3 -m fastchat.data.split_long_conversation \
- --in sharegpt_clean.json \
- --out sharegpt_split.json \
- --model-name-or-path $
-"""
-import argparse
-from concurrent.futures import ProcessPoolExecutor
-import json
-from typing import Dict, Sequence, Optional
-
-import transformers
-from tqdm import tqdm
-
-
-def make_sample(sample, start_idx, end_idx):
- assert (end_idx - start_idx) % 2 == 0
- return {
- "id": sample["id"] + "_" + str(start_idx),
- "model": sample.get("model", ""),
- "conversations": sample["conversations"][start_idx:end_idx],
- }
-
-
-tokenizer = max_length = None
-
-
-def split_one_sample(sample):
- tokenized_lens = []
- conversations = sample["conversations"]
- conversations = conversations[: len(conversations) // 2 * 2]
- for c in conversations:
- length = len(tokenizer(c["value"]).input_ids) + 6
- tokenized_lens.append(length)
-
- start_idx = 0
- cur_len = 0
-
- if len(conversations) % 2 != 0 or len(conversations) < 2:
- return []
-
- new_samples = []
- for i in range(0, len(conversations), 2):
- tmp_len = tokenized_lens[i] + tokenized_lens[i + 1]
- if cur_len + tmp_len > max_length:
- new_samples.append(make_sample(sample, start_idx, i))
- start_idx = i
- cur_len = 0
- elif i == len(conversations) - 2:
- new_samples.append(make_sample(sample, start_idx, i + 2))
-
- cur_len += tmp_len
-
- return new_samples
-
-
-def worker(input_data):
- result = []
- for sample in input_data:
- result.extend(split_one_sample(sample))
- return result
-
-
-def split_all(content, begin, end, tokenizer_, max_length_):
- """
- Keep the maximum round of conversations within the max token length constraint
- """
- global tokenizer, max_length
- tokenizer = tokenizer_
- max_length = max_length_
-
- content = content[begin:end]
- new_content = []
-
- # Split content into chunks
- chunks = [content[i : i + 1000] for i in range(0, len(content), 1000)]
- with ProcessPoolExecutor() as executor:
- for result in tqdm(executor.map(worker, chunks), total=len(chunks)):
- new_content.extend(result)
-
- return new_content
-
-
-def filter_invalid_roles(content):
- new_content = []
- for i, c in enumerate(content):
- roles = ["human", "gpt"]
- if len(c["conversations"]) <= 0:
- continue
-
- valid = True
- for j, s in enumerate(c["conversations"]):
- if s["from"] != roles[j % 2]:
- valid = False
- break
-
- if valid:
- new_content.append(c)
-
- return new_content
-
-
-def main(args):
- content = json.load(open(args.in_file, "r"))
- tokenizer = transformers.AutoTokenizer.from_pretrained(
- args.model_name_or_path,
- model_max_length=args.max_length,
- padding_side="right",
- use_fast=False,
- )
- new_content = split_all(content, args.begin, args.end, tokenizer, args.max_length)
- new_content = filter_invalid_roles(new_content)
-
- print(f"#in: {len(content)}, #out: {len(new_content)}")
- json.dump(new_content, open(args.out_file, "w"), indent=2, ensure_ascii=False)
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--in-file", type=str, required=True)
- parser.add_argument("--out-file", type=str, default="sharegpt_split.json")
- parser.add_argument("--begin", type=int)
- parser.add_argument("--end", type=int)
- parser.add_argument("--model-name-or-path", type=str, required=True)
- parser.add_argument("--max-length", type=int, default=2048)
- args = parser.parse_args()
- main(args)
diff --git a/llm_ft/fastchat/data/split_train_test.py b/llm_ft/fastchat/data/split_train_test.py
deleted file mode 100644
index 60b8960b..00000000
--- a/llm_ft/fastchat/data/split_train_test.py
+++ /dev/null
@@ -1,34 +0,0 @@
-"""
-Split the dataset into training and test set.
-
-Usage: python3 -m fastchat.data.split_train_test --in sharegpt.json
-"""
-import argparse
-import json
-
-import numpy as np
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--in-file", type=str, required=True)
- parser.add_argument("--begin", type=int, default=0)
- parser.add_argument("--end", type=int, default=100)
- parser.add_argument("--ratio", type=float, default=0.9)
- args = parser.parse_args()
-
- content = json.load(open(args.in_file, "r"))
- np.random.seed(0)
-
- perm = np.random.permutation(len(content))
- content = [content[i] for i in perm]
- split = int(args.ratio * len(content))
-
- train_set = content[:split]
- test_set = content[split:]
-
- print(f"#train: {len(train_set)}, #test: {len(test_set)}")
- train_name = args.in_file.replace(".json", "_train.json")
- test_name = args.in_file.replace(".json", "_test.json")
- json.dump(train_set, open(train_name, "w"), indent=2, ensure_ascii=False)
- json.dump(test_set, open(test_name, "w"), indent=2, ensure_ascii=False)
diff --git a/llm_ft/fastchat/llm_judge/README.md b/llm_ft/fastchat/llm_judge/README.md
deleted file mode 100644
index f1755e3e..00000000
--- a/llm_ft/fastchat/llm_judge/README.md
+++ /dev/null
@@ -1,152 +0,0 @@
-# LLM Judge
-| [Paper](https://arxiv.org/abs/2306.05685) | [Leaderboard](https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard) |
-
-In this package, you can use MT-bench questions and prompts to evaluate your models with LLM-as-a-judge.
-MT-bench is a set of challenging multi-turn open-ended questions for evaluating chat assistants.
-To automate the evaluation process, we prompt strong LLMs like GPT-4 to act as judges and assess the quality of the models' responses.
-
-## Contents
-- [Install](#install)
-- [Review Pre-Generated Model Answers and Judgments](#review-pre-generated-model-answers-and-judgments)
-- [MT-Bench](#mt-bench)
-- [Agreement Computation](#agreement-computation)
-- [Datasets](#datasets)
-- [Citation](#citation)
-
-## Install
-```
-git clone https://github.com/lm-sys/FastChat.git
-cd FastChat
-pip install -e ".[model_worker,llm_judge]"
-```
-
-## Review Pre-Generated Model Answers and Judgments
-We provide pre-generated model answers and judgments for some models.
-You can view them at this [demo](https://huggingface.co/spaces/lmsys/mt-bench).
-
-To download the pre-generated data, use
-```
-python3 download_mt_bench_pregenerated.py
-```
-
-After downloading the data, you can view them locally by
-```
-python3 qa_browser.py --share
-```
-You can use this QA browser to view the answers generated by you later.
-
-## MT-Bench
-
-### Evaluate a model on MT-bench
-
-#### Step 1. Generate model answers to MT-bench questions
-```
-python gen_model_answer.py --model-path [MODEL-PATH] --model-id [MODEL-ID]
-```
-Arguments:
- - `[MODEL-PATH]` is the path to the weights, which can be a local folder or a Hugging Face repo ID.
- - `[MODEL-ID]` is a name you give to the model.
-
-e.g.,
-```
-python gen_model_answer.py --model-path lmsys/vicuna-7b-v1.3 --model-id vicuna-7b-v1.3
-```
-The answers will be saved to `data/mt_bench/model_answer/[MODEL-ID].jsonl`.
-
-To make sure FastChat loads the correct prompt template, see the supported models and how to add a new model [here](../../docs/model_support.md#how-to-support-a-new-model).
-
-You can also specify `--num-gpus-per-model` for model parallelism (needed for large 65B models) and `--num-gpus-total` to parallelize answer generation with multiple GPUs.
-
-#### Step 2. Generate GPT-4 judgments
-There are several options to use GPT-4 as a judge, such as pairwise winrate and single-answer grading.
-In MT-bench, we recommond single-answer grading as the default mode.
-This mode asks GPT-4 to grade and give a score to model's answer directly without pairwise comparison.
-For each turn, GPT-4 will give a score on a scale of 10. We then compute the average score on all turns.
-
-```
-export OPENAI_API_KEY=XXXXXX # set the OpenAI API key
-python gen_judgment.py --model-list [LIST-OF-MODEL-ID] --parallel [num-concurrent-api-call]
-```
-
-e.g.,
-```
-python gen_judgment.py --model-list vicuna-13b-v1.3 alpaca-13b llama-13b claude-v1 gpt-3.5-turbo gpt-4 --parallel 2
-```
-The judgments will be saved to `data/mt_bench/model_judgment/gpt-4_single.jsonl`
-
-#### Step 3. Show MT-bench scores
-
-- Show the scores for selected models
- ```
- python show_result.py --model-list vicuna-13b-v1.3 alpaca-13b llama-13b claude-v1 gpt-3.5-turbo gpt-4
- ```
-- Show all scores
- ```
- python show_result.py
- ```
-
----
-
-### Other grading options
-Besides score-based single-answer grading, we also support two additional grading options based on win rates:
-- `pariwise-baseline`: run pairwise comparison against a baseline model.
-- `pairwise-all`: run pairwise comparison between all model pairs on all questions.
-
-#### Option 2: pairwise comparison against a baseline (default: gpt-3.5-turbo)
-
-- Generate GPT-4 judgments
-```
-python gen_judgment.py --mode pairwise-baseline --model-list vicuna-13b-v1.3 alpaca-13b llama-13b --parallel 2
-```
-The judgments will be saved to `data/mt_bench/model_judgment/gpt-4_pair.jsonl`
-
-- Show results
-```
-python show_result.py --mode pairwise-baseline
-```
-
-#### Option 3: Run GPT-4 judge with all pair comparisons
-
-Another option is to run pairwise comparisons on all possible pairs.
-This could be more expensive when #models increases, but it gives you a more comprehensive information.
-
-```
-python gen_judgment.py --mode pairwise-all --model-list [LIST-OF-MODEL-ID] --parallel [num-concurrent-api-call]
-```
-
-```
-python show_result.py --mode pairwise-all
-```
-
-### How to get GPT-3.5/GPT-4/Claude's answer?
-- `python gen_api_answer.py --model [MODEL-NAME]` to generate GPT-3.5/4 and Claude's answers.
-
-
-### How to plot the radar figure?
-
-You can use this [colab notebook](https://colab.research.google.com/drive/15O3Y8Rxq37PuMlArE291P4OC6ia37PQK#scrollTo=5i8R0l-XqkgO) to plot the radar figure for MT-bench.
-
-
-
-
-## Agreement Computation
-We released 3.3K human annotations for model responses generated by 6 models in response to 80 MT-bench questions. The dataset is available at [lmsys/mt_bench_human_judgments](https://huggingface.co/datasets/lmsys/mt_bench_human_judgments).
-
-This Colab [notebook](https://colab.research.google.com/drive/1ctgygDRJhVGUJTQy8-bRZCl1WNcT8De6?usp=sharing) shows how to compute the agreement between humans and GPT-4 judge with the dataset. Our results show that humans and GPT-4 judge achieve over 80\% agreement, the same level of agreement between humans.
-
-## Datasets
-- [Chatbot Arena Conversation Dataset](https://huggingface.co/datasets/lmsys/chatbot_arena_conversations)
-- [MT-bench Human Annotation Dataset](https://huggingface.co/datasets/lmsys/mt_bench_human_judgments)
-
-## Citation
-Please cite the following paper if you find the code or datasets helpful.
-```
-@misc{zheng2023judging,
- title={Judging LLM-as-a-judge with MT-Bench and Chatbot Arena},
- author={Lianmin Zheng and Wei-Lin Chiang and Ying Sheng and Siyuan Zhuang and Zhanghao Wu and Yonghao Zhuang and Zi Lin and Zhuohan Li and Dacheng Li and Eric. P Xing and Hao Zhang and Joseph E. Gonzalez and Ion Stoica},
- year={2023},
- eprint={2306.05685},
- archivePrefix={arXiv},
- primaryClass={cs.CL}
-}
-```
diff --git a/llm_ft/fastchat/llm_judge/clean_judgment.py b/llm_ft/fastchat/llm_judge/clean_judgment.py
deleted file mode 100644
index d139ed7f..00000000
--- a/llm_ft/fastchat/llm_judge/clean_judgment.py
+++ /dev/null
@@ -1,93 +0,0 @@
-"""
-Clean model judgment files.
-"""
-import argparse
-import json
-
-selected_models = [
- "alpaca-13b",
- "baize-v2-13b",
- "chatglm-6b",
- "claude-instant-v1",
- "claude-v1",
- "dolly-v2-12b",
- "falcon-40b-instruct",
- "fastchat-t5-3b",
- "gpt-3.5-turbo",
- "gpt-4",
- "gpt4all-13b-snoozy",
- "guanaco-33b",
- "guanaco-65b",
- "h2ogpt-oasst-open-llama-13b",
- "koala-13b",
- "llama-13b",
- "mpt-30b-chat",
- "mpt-30b-instruct",
- "mpt-7b-chat",
- "nous-hermes-13b",
- "oasst-sft-4-pythia-12b",
- "oasst-sft-7-llama-30b",
- "palm-2-chat-bison-001",
- "rwkv-4-raven-14b",
- "stablelm-tuned-alpha-7b",
- "tulu-30b",
- "vicuna-13b-v1.3",
- "vicuna-33b-v1.3",
- "vicuna-7b-v1.3",
- "wizardlm-13b",
- "wizardlm-30b",
-]
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--infile", type=str)
- args = parser.parse_args()
-
- infile = args.infile
- outfile = infile.replace(".jsonl", "_clean.jsonl")
-
- raw_lines = open(infile).readlines()
- rets = []
- models = set()
- visited = set()
- for line in raw_lines:
- obj = json.loads(line)
-
- if "model_1" in obj: # pair
- model = obj["model_1"]
- key = (
- obj["model_1"],
- obj["model_2"],
- obj["question_id"],
- tuple(obj["judge"]),
- )
- else: # single
- model = obj["model"]
- key = (obj["model"], obj["question_id"], tuple(obj["judge"]))
-
- if key in visited:
- continue
- visited.add(key)
-
- if model not in selected_models:
- continue
- models.add(model)
- rets.append(obj)
-
- models = sorted(list(models))
- missing_models = [x for x in selected_models if x not in models]
- print(f"in models: {models}, number: {len(models)}")
- print(f"missing models: {missing_models}")
- print(f"#in: {len(raw_lines)}, #out: {len(rets)}")
- rets.sort(
- key=lambda x: (
- x["model"] if "model" in x else x["model_1"],
- x["question_id"],
- x["turn"],
- )
- )
-
- with open(outfile, "w") as fout:
- for x in rets:
- fout.write(json.dumps(x) + "\n")
diff --git a/llm_ft/fastchat/llm_judge/common.py b/llm_ft/fastchat/llm_judge/common.py
deleted file mode 100644
index ad118003..00000000
--- a/llm_ft/fastchat/llm_judge/common.py
+++ /dev/null
@@ -1,657 +0,0 @@
-"""
-Common data structures and utilities.
-"""
-
-import ast
-import dataclasses
-import glob
-import json
-import os
-import re
-import time
-from typing import Optional
-
-import openai
-import anthropic
-
-from fastchat.model.model_adapter import get_conversation_template
-
-# API setting constants
-API_MAX_RETRY = 16
-API_RETRY_SLEEP = 10
-API_ERROR_OUTPUT = "$ERROR$"
-
-TIE_DELTA = 0.1
-
-# Categories that need reference answers
-NEED_REF_CATS = ["math", "reasoning", "coding"]
-
-# Extract scores from judgments
-two_score_pattern = re.compile("\[\[(\d+\.?\d*),\s?(\d+\.?\d*)\]\]")
-two_score_pattern_backup = re.compile("\[(\d+\.?\d*),\s?(\d+\.?\d*)\]")
-one_score_pattern = re.compile("\[\[(\d+\.?\d*)\]\]")
-one_score_pattern_backup = re.compile("\[(\d+\.?\d*)\]")
-
-# Sampling temperature configs for
-temperature_config = {
- "writing": 0.7,
- "roleplay": 0.7,
- "extraction": 0.0,
- "math": 0.0,
- "coding": 0.0,
- "reasoning": 0.0,
- "stem": 0.1,
- "humanities": 0.1,
-}
-
-reverse_model_map = {
- "model_1": "model_2",
- "model_2": "model_1",
-}
-
-
-@dataclasses.dataclass
-class Judge:
- model_name: str
- prompt_template: dict
- ref_based: bool = False
- multi_turn: bool = False
-
-
-@dataclasses.dataclass
-class MatchSingle:
- question: dict
- model: str
- answer: dict
- judge: Judge
- ref_answer: dict = None
- multi_turn: bool = False
-
-
-@dataclasses.dataclass
-class MatchPair:
- question: dict
- model_1: str
- model_2: str
- answer_1: dict
- answer_2: dict
- judge: Judge
- ref_answer: dict = None
- multi_turn: bool = False
-
-
-def load_questions(question_file: str, begin: Optional[int], end: Optional[int]):
- """Load questions from a file."""
- questions = []
- with open(question_file, "r") as ques_file:
- for line in ques_file:
- if line:
- questions.append(json.loads(line))
- questions = questions[begin:end]
- return questions
-
-
-def load_model_answers(answer_dir: str):
- """Load model answers.
-
- The return value is a python dict of type:
- Dict[model_name: str -> Dict[question_id: int -> answer: dict]]
- """
- filenames = glob.glob(os.path.join(answer_dir, "*.jsonl"))
- filenames.sort()
- model_answers = {}
-
- for filename in filenames:
- model_name = os.path.basename(filename)[:-6]
- answer = {}
- with open(filename) as fin:
- for line in fin:
- line = json.loads(line)
- answer[line["question_id"]] = line
- model_answers[model_name] = answer
-
- return model_answers
-
-
-def load_judge_prompts(prompt_file: str):
- """Load judge prompts.
-
- The return value is a python dict of type:
- Dict[judge_name: str -> dict]
- """
- prompts = {}
- with open(prompt_file) as fin:
- for line in fin:
- line = json.loads(line)
- prompts[line["name"]] = line
- return prompts
-
-
-def run_judge_single(question, answer, judge, ref_answer, multi_turn=False):
- kwargs = {}
- model = judge.model_name
- if ref_answer is not None:
- kwargs["ref_answer_1"] = ref_answer["choices"][0]["turns"][0]
- kwargs["ref_answer_2"] = ref_answer["choices"][0]["turns"][1]
-
- if multi_turn:
- user_prompt = judge.prompt_template["prompt_template"].format(
- question_1=question["turns"][0],
- question_2=question["turns"][1],
- answer_1=answer["choices"][0]["turns"][0],
- answer_2=answer["choices"][0]["turns"][1],
- **kwargs,
- )
- else:
- user_prompt = judge.prompt_template["prompt_template"].format(
- question=question["turns"][0],
- answer=answer["choices"][0]["turns"][0],
- **kwargs,
- )
-
- rating = -1
-
- system_prompt = judge.prompt_template["system_prompt"]
- conv = get_conversation_template(model)
- conv.set_system_message(system_prompt)
- conv.append_message(conv.roles[0], user_prompt)
- conv.append_message(conv.roles[1], None)
-
- if model in ["gpt-3.5-turbo", "gpt-4"]:
- judgment = chat_compeletion_openai(model, conv, temperature=0, max_tokens=2048)
- elif model in ["claude-v1", "claude-instant-v1"]:
- judgment = chat_compeletion_anthropic(
- model, conv, temperature=0, max_tokens=1024
- )
- else:
- raise ValueError(f"Invalid judge model name: {model}")
-
- if judge.prompt_template["output_format"] == "[[rating]]":
- match = re.search(one_score_pattern, judgment)
- if not match:
- match = re.search(one_score_pattern_backup, judgment)
-
- if match:
- rating = ast.literal_eval(match.groups()[0])
- else:
- rating = -1
- else:
- raise ValueError(
- f"invalid output format: {judge.prompt_template['output_format']}"
- )
-
- return rating, user_prompt, judgment
-
-
-def play_a_match_single(match: MatchPair, output_file: str):
- question, model, answer, judge, ref_answer, multi_turn = (
- match.question,
- match.model,
- match.answer,
- match.judge,
- match.ref_answer,
- match.multi_turn,
- )
-
- if judge.prompt_template["type"] == "single":
- score, user_prompt, judgment = run_judge_single(
- question, answer, judge, ref_answer, multi_turn=multi_turn
- )
-
- question_id = question["question_id"]
- turn = 1 if not multi_turn else 2
- result = {
- "question_id": question_id,
- "model": model,
- "judge": (judge.model_name, judge.prompt_template["name"]),
- "user_prompt": user_prompt,
- "judgment": judgment,
- "score": score,
- "turn": turn,
- "tstamp": time.time(),
- }
- print(
- f"question: {question_id}, turn: {turn}, model: {model}, "
- f"score: {score}, "
- f"judge: {(judge.model_name, judge.prompt_template['name'])}"
- )
- else:
- raise ValueError(f"invalid judge type: {judge['type']}")
-
- if output_file:
- os.makedirs(os.path.dirname(output_file), exist_ok=True)
- with open(output_file, "a") as fout:
- fout.write(json.dumps(result) + "\n")
-
- return result
-
-
-def run_judge_pair(question, answer_a, answer_b, judge, ref_answer, multi_turn=False):
- kwargs = {}
- model = judge.model_name
- if ref_answer is not None:
- kwargs["ref_answer_1"] = ref_answer["choices"][0]["turns"][0]
- kwargs["ref_answer_2"] = ref_answer["choices"][0]["turns"][1]
-
- if multi_turn:
- system_prompt = judge.prompt_template["system_prompt"]
- user_prompt = judge.prompt_template["prompt_template"].format(
- question_1=question["turns"][0],
- question_2=question["turns"][1],
- answer_a_1=answer_a["choices"][0]["turns"][0],
- answer_b_1=answer_b["choices"][0]["turns"][0],
- answer_a_2=answer_a["choices"][0]["turns"][1],
- answer_b_2=answer_b["choices"][0]["turns"][1],
- **kwargs,
- )
- else:
- system_prompt = judge.prompt_template["system_prompt"]
- user_prompt = judge.prompt_template["prompt_template"].format(
- question=question["turns"][0],
- answer_a=answer_a["choices"][0]["turns"][0],
- answer_b=answer_b["choices"][0]["turns"][0],
- **kwargs,
- )
-
- winner = "error"
-
- conv = get_conversation_template(model)
- conv.append_message(conv.roles[0], user_prompt)
- conv.append_message(conv.roles[1], None)
-
- if model in ["gpt-3.5-turbo", "gpt-4"]:
- conv.set_system_message(system_prompt)
- judgment = chat_compeletion_openai(model, conv, temperature=0, max_tokens=2048)
- elif model in ["claude-v1", "claude-instant-v1"]:
- if system_prompt != "You are a helpful assistant.":
- user_prompt = "[Instruction]\n" + system_prompt + "\n\n" + user_prompt
- conv.messages[0][1] = user_prompt
- judgment = chat_compeletion_anthropic(
- model, conv, temperature=0, max_tokens=1024
- )
- else:
- raise ValueError(f"Invalid judge model name: {model}")
-
- if judge.prompt_template["output_format"] == "[[A]]":
- if "[[A]]" in judgment:
- winner = "A"
- elif "[[B]]" in judgment:
- winner = "B"
- elif "[[C]]" in judgment:
- winner = "tie"
- else:
- winner = "error"
- elif judge.prompt_template["output_format"] == "[[rating_a,rating_b]]":
- match = re.search(two_score_pattern, judgment)
- if not match:
- match = re.search(two_score_pattern_backup, judgment)
- if match:
- scores = [ast.literal_eval(s.strip()) for s in match.groups()]
- if abs(scores[0] - scores[1]) <= TIE_DELTA:
- winner = "tie"
- elif scores[0] > scores[1]:
- winner = "A"
- else:
- winner = "B"
- else:
- winner = "error"
- else:
- raise ValueError(
- f"invalid output format: {judge.prompt_template['output_format']}"
- )
-
- return winner, user_prompt, judgment
-
-
-def play_a_match_pair(match: MatchPair, output_file: str):
- question, model_1, model_2, answer_1, answer_2, judge, ref_answer, multi_turn = (
- match.question,
- match.model_1,
- match.model_2,
- match.answer_1,
- match.answer_2,
- match.judge,
- match.ref_answer,
- match.multi_turn,
- )
-
- if judge.prompt_template["type"] == "pairwise":
- g1_winner, g1_user_prompt, g1_judgment = run_judge_pair(
- question, answer_1, answer_2, judge, ref_answer, multi_turn=multi_turn
- )
- g2_winner, g2_user_prompt, g2_judgment = run_judge_pair(
- question, answer_2, answer_1, judge, ref_answer, multi_turn=multi_turn
- )
-
- g1_map = {"A": "model_1", "B": "model_2"}
- g2_map = {"A": "model_2", "B": "model_1"}
- g1_winner = g1_map.get(g1_winner, g1_winner)
- g2_winner = g2_map.get(g2_winner, g2_winner)
- question_id = question["question_id"]
- turn = 1 if not multi_turn else 2
-
- result = {
- "question_id": question_id,
- "model_1": model_1,
- "model_2": model_2,
- "g1_winner": g1_winner,
- "g2_winner": g2_winner,
- "judge": (judge.model_name, judge.prompt_template["name"]),
- "g1_user_prompt": g1_user_prompt,
- "g1_judgment": g1_judgment,
- "g2_user_prompt": g2_user_prompt,
- "g2_judgment": g2_judgment,
- "turn": turn,
- "tstamp": time.time(),
- }
-
- print(
- f"question: {question_id}, turn: {turn}, model_1: {model_1}, model_2: {model_2}, "
- f"g1_winner: {g1_winner}, g2_winner: {g2_winner}, "
- f"judge: {(judge.model_name, judge.prompt_template['name'])}"
- )
- elif judge.prompt_template["type"] == "single":
- m1_score, m1_user_prompt, m1_judgment = run_judge_single(
- question, answer_1, judge
- )
- m2_score, m2_user_prompt, m2_judgment = run_judge_single(
- question, answer_2, judge
- )
-
- if abs(m1_score - m2_score) <= TIE_DELTA:
- winner = "tie"
- elif m1_score > m2_score:
- winner = "model_1"
- else:
- winner = "model_2"
-
- question_id = question["question_id"]
- result = {
- "question_id": question_id,
- "model_1": model_1,
- "model_2": model_2,
- "g1_winner": winner,
- "g2_winner": winner,
- "judge": (judge.model_name, judge.prompt_template["name"]),
- "g1_user_prompt": m1_user_prompt,
- "g1_judgment": m1_judgment,
- "g2_user_prompt": m2_user_prompt,
- "g2_judgment": m2_judgment,
- "m1_score": m1_score,
- "m2_score": m2_score,
- "tstamp": time.time(),
- }
- print(
- f"question: {question_id}, model_1: {model_1}, model_2: {model_2}, "
- f"winner: {winner}, m1_score: {m1_score}, m2_score: {m2_score}, "
- f"judge: {(judge.model_name, judge.prompt_template['name'])}"
- )
- else:
- raise ValueError(f"invalid judge type: {judge['type']}")
-
- if output_file:
- os.makedirs(os.path.dirname(output_file), exist_ok=True)
- with open(output_file, "a") as fout:
- fout.write(json.dumps(result) + "\n")
-
- return result
-
-
-def chat_compeletion_openai(model, conv, temperature, max_tokens):
- output = API_ERROR_OUTPUT
- for _ in range(API_MAX_RETRY):
- try:
- messages = conv.to_openai_api_messages()
- response = openai.ChatCompletion.create(
- model=model,
- messages=messages,
- n=1,
- temperature=temperature,
- max_tokens=max_tokens,
- )
- output = response["choices"][0]["message"]["content"]
- break
- except openai.error.OpenAIError as e:
- print(type(e), e)
- time.sleep(API_RETRY_SLEEP)
-
- return output
-
-
-def chat_compeletion_anthropic(model, conv, temperature, max_tokens):
- output = API_ERROR_OUTPUT
- for _ in range(API_MAX_RETRY):
- try:
- c = anthropic.Anthropic(api_key=os.environ["ANTHROPIC_API_KEY"])
- prompt = conv.get_prompt()
- response = c.completions.create(
- model=model,
- prompt=prompt,
- stop_sequences=[anthropic.HUMAN_PROMPT],
- max_tokens_to_sample=max_tokens,
- temperature=temperature,
- )
- output = response.completion
- break
- except anthropic.APIError as e:
- print(type(e), e)
- time.sleep(API_RETRY_SLEEP)
- return output.strip()
-
-
-def chat_compeletion_palm(chat_state, model, conv, temperature, max_tokens):
- from fastchat.serve.api_provider import init_palm_chat
-
- assert model == "palm-2-chat-bison-001"
-
- if chat_state is None:
- chat_state = init_palm_chat("chat-bison@001")
-
- parameters = {
- "temperature": temperature,
- "top_p": 0.8,
- "top_k": 40,
- "max_output_tokens": max_tokens,
- }
- output = API_ERROR_OUTPUT
- for _ in range(API_MAX_RETRY):
- try:
- response = chat_state.send_message(conv.messages[-2][1], **parameters)
- output = response.text
- break
- except Exception as e:
- print(type(e), e)
- time.sleep(API_RETRY_SLEEP)
- return chat_state, output
-
-
-def normalize_game_key_single(gamekey, result):
- """Make the model names sorted in a game key."""
- qid, model_1, model_2 = gamekey
- if model_1 < model_2:
- return gamekey, result
- else:
- new_gamekey = (qid, model_2, model_1)
- new_result = {
- "winners": tuple(reverse_model_map.get(x, x) for x in result["winners"]),
- "g1_judgment": result["g2_judgment"],
- "g2_judgment": result["g1_judgment"],
- }
- return new_gamekey, new_result
-
-
-def normalize_game_key_dict(judgment_dict):
- """Make the model names sorted in the game keys."""
- ret = {}
- for key, value in judgment_dict.items():
- new_key, new_value = normalize_game_key_single(key, value)
- ret[new_key] = new_value
- return ret
-
-
-def load_pairwise_model_judgments(filename: str):
- """Load model judgments.
-
- The return value is a dict of type:
- Dict[judge: Tuple -> Dict[game_key: tuple -> game_result: dict]
- """
- judge_dict = {}
-
- for line in open(filename):
- obj = json.loads(line)
- judge = tuple(obj["judge"])
- qid, model_1, model_2 = obj["question_id"], obj["model_1"], obj["model_2"]
-
- if judge not in judge_dict:
- judge_dict[judge] = {}
-
- if "winner" in obj:
- winner = obj["winner"]
- elif "g1_winner" in obj and "g2_winner" in obj:
- g1_winner, g2_winner = obj["g1_winner"], obj["g2_winner"]
- if g1_winner == g2_winner:
- winner = g1_winner
- else:
- winner = "inconsistent"
- else:
- raise ValueError(f"Invalid keys: {list(obj.keys())}")
-
- gamekey = (qid, model_1, model_2)
- winners = (winner,)
-
- judge_dict[judge][gamekey] = {
- "winners": winners,
- "g1_judgment": obj["g1_judgment"],
- "g2_judgment": obj["g2_judgment"],
- }
-
- # Make the model names sorted in the game keys
- normalized = {}
- for judge, value in judge_dict.items():
- normalized[judge] = normalize_game_key_dict(value)
- return normalized
-
-
-def load_single_model_judgments(filename: str):
- """Load model judgments.
-
- The return value is a dict of type:
- Dict[judge: Tuple -> Dict[game_key: tuple -> game_result: dict]
- """
- judge_dict = {}
-
- for line in open(filename):
- obj = json.loads(line)
- judge = tuple(obj["judge"])
- qid, model = obj["question_id"], obj["model"]
-
- if judge not in judge_dict:
- judge_dict[judge] = {}
-
- gamekey = (qid, model)
-
- judge_dict[judge][gamekey] = {
- "score": obj["score"],
- "judgment": obj["judgment"],
- }
- return judge_dict
-
-
-def resolve_pairwise_judgment_dict(
- question, model_judgments_normal, model_judgments_math, multi_turn=False
-):
- """Return the correct pairwise judge."""
- if multi_turn:
- if question["category"] in NEED_REF_CATS:
- return model_judgments_math[("gpt-4", "pair-math-v1-multi-turn")]
- return model_judgments_normal[("gpt-4", "pair-v2-multi-turn")]
-
- if question["category"] in NEED_REF_CATS:
- return model_judgments_math[("gpt-4", "pair-math-v1")]
- else:
- return model_judgments_normal[("gpt-4", "pair-v2")]
-
-
-def resolve_single_judgment_dict(
- question, model_judgments_normal, model_judgments_math, multi_turn=False
-):
- """Return the correct single answer grading judge."""
- if multi_turn:
- if question["category"] in NEED_REF_CATS:
- return model_judgments_math[("gpt-4", "single-math-v1-multi-turn")]
- return model_judgments_normal[("gpt-4", "single-v1-multi-turn")]
-
- if question["category"] in NEED_REF_CATS:
- return model_judgments_math[("gpt-4", "single-math-v1")]
- else:
- return model_judgments_normal[("gpt-4", "single-v1")]
-
-
-def get_pairwise_judge_explanation(gamekey, judgment_dict):
- """Get model judge explanation."""
- try:
- qid, model_1, model_2 = gamekey
- if model_1 < model_2:
- res = judgment_dict[gamekey]
- g1_judgment, g2_judgment = res["g1_judgment"], res["g2_judgment"]
- else:
- new_gamekey = (qid, model_2, model_1)
- res = judgment_dict[new_gamekey]
-
- model_1, model_2 = model_1, model_2
- g1_judgment, g2_judgment = res["g2_judgment"], res["g1_judgment"]
-
- return (
- f"**Game 1**. **A**: {model_1}, **B**: {model_2}\n\n"
- f"**Judgment**: {g1_judgment}"
- + f"\n\n`--------------------------`\n\n"
- + f"**Game 2**. **A**: {model_2}, **B**: {model_1}\n\n"
- f"**Judgment**: {g2_judgment}"
- )
- except KeyError:
- return "N/A"
-
-
-def get_single_judge_explanation(gamekey, judgment_dict):
- """Get model judge explanation."""
- try:
- qid, model = gamekey
-
- res = judgment_dict[gamekey]
-
- g1_judgment = res["judgment"]
- g1_score = res["score"]
-
- return (
- f"**Game 1**. **A**: {model}, **Score**: {g1_score}\n\n"
- f"**Judgment**: {g1_judgment}"
- )
- except KeyError:
- return "N/A"
-
-
-def check_data(questions, model_answers, ref_answers, models, judges):
- # check model answers
- for m in models:
- assert m in model_answers, f"Missing model answer for {m}"
- m_answer = model_answers[m]
- for q in questions:
- assert (
- q["question_id"] in m_answer
- ), f"Missing model {m}'s answer to Question {q['question_id']}"
- # check ref answers
- for jg in judges.values():
- if not jg.ref_based:
- continue
- for q in questions:
- if q["category"] not in NEED_REF_CATS:
- continue
- assert (
- q["question_id"] in ref_answers[jg.model_name]
- ), f"Missing reference answer to Question {q['question_id']} for judge {jg.model_name}"
-
-
-def get_model_list(answer_dir):
- file_paths = glob.glob(f"{answer_dir}/*.jsonl")
- file_names = [os.path.splitext(os.path.basename(f))[0] for f in file_paths]
- return file_names
diff --git a/llm_ft/fastchat/llm_judge/compute_agreement.py b/llm_ft/fastchat/llm_judge/compute_agreement.py
deleted file mode 100644
index 1b940bf5..00000000
--- a/llm_ft/fastchat/llm_judge/compute_agreement.py
+++ /dev/null
@@ -1,140 +0,0 @@
-"""
-Compute agreement among judges.
-
-Usage:
-python compute_agreement.py --judges gpt4-pair human --votefiles human_judgments.json gpt4_pair_judgments.json
-python compute_agreement.py --judges human human --votefiles human_judgments.json
-"""
-import argparse
-import json
-import os
-
-import numpy as np
-
-
-def get_judge_name(judge):
- if isinstance(judge, list) and judge[0] == "gpt-4" and judge[1].startswith("pair"):
- return "gpt4-pair"
- if judge.startswith("expert"):
- return "human"
- if judge.startswith("author"):
- return "author"
-
-
-def revert(vote):
- if vote == "model_a":
- return "model_b"
- elif vote == "model_b":
- return "model_a"
- return vote
-
-
-def get_mt_bench_votes_data(raw_votes):
- data = [{}, {}]
-
- for judge_votes in raw_votes:
- for vote in judge_votes:
- turn = vote["turn"] - 1
- if vote["model_a"] < vote["model_b"]:
- key = (vote["question_id"], vote["model_a"], vote["model_b"])
- winner = vote["winner"]
- else:
- key = (vote["question_id"], vote["model_b"], vote["model_a"])
- winner = revert(vote["winner"])
- judge = get_judge_name(vote["judge"])
- if key not in data[turn]:
- data[turn][key] = {}
- if judge not in data[turn][key]:
- data[turn][key][judge] = []
- data[turn][key][judge].append(winner)
-
- return data
-
-
-def convertvote(vote):
- if "tie" in vote:
- return "tie"
- return vote
-
-
-def equalvote(vote1, vote2):
- if "tie" in vote1 and "tie" in vote2:
- return True
- return vote1 == vote2
-
-
-# data: Dict[qid -> List[vote]]
-def get_mt_bench_agreement(data, judge1, judge2, ban):
- if judge1.startswith("gpt4") and judge2 == "human":
- stats = [0, 0]
- for votes in data.values():
- if judge1 not in votes or judge2 not in votes:
- continue
- assert len(votes[judge1]) == 1
- if convertvote(votes[judge1][0]) in ban:
- continue
- for v in votes[judge2]:
- if convertvote(v) in ban:
- continue
- stats[1] += 1
- stats[0] += equalvote(votes[judge1][0], v)
- return stats[0], stats[1]
- elif judge1 == "human" and judge2 == "human":
- stats = [0, 0]
- for votes in data.values():
- if "human" not in votes:
- continue
- for i in range(len(votes["human"]) - 1):
- for j in range(i + 1, len(votes["human"])):
- if (
- convertvote(votes["human"][i]) in ban
- or convertvote(votes["human"][j]) in ban
- ):
- continue
- stats[1] += 1
- stats[0] += equalvote(votes["human"][i], votes["human"][j])
- return stats[0], stats[1]
- else:
- raise Exception("Unsupported judges.")
-
-
-def run_mt_bench_agreement(judges, votefiles):
- # votes[i]: List of votes
- votes = []
- for filename in votefiles:
- with open(filename, "r") as f:
- data = json.load(f)
- votes.append(data)
-
- data = get_mt_bench_votes_data(votes)
-
- agree, total = get_mt_bench_agreement(data[0], judges[0], judges[1], ban=[])
- print(
- f"turn 1 with tie. #total: {total}, #agree: {agree}, ratio: {agree/total:.2f}"
- )
- agree, total = get_mt_bench_agreement(data[0], judges[0], judges[1], ban=["tie"])
- print(
- f"turn 1 without tie. #total: {total}, #agree: {agree}, ratio: {agree/total:.2f}"
- )
- agree, total = get_mt_bench_agreement(data[1], judges[0], judges[1], ban=[])
- print(
- f"turn 2 with tie. #total: {total}, #agree: {agree}, ratio: {agree/total:.2f}"
- )
- agree, total = get_mt_bench_agreement(data[1], judges[0], judges[1], ban=["tie"])
- print(
- f"turn 2 without tie. #total: {total}, #agree: {agree}, ratio: {agree/total:.2f}"
- )
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--judges", nargs=2, type=str, default=["gpt4-pair", "human"])
- parser.add_argument(
- "--votefiles",
- nargs="+",
- type=str,
- default=["gpt4_judgments.json", "human_judgments.json"],
- )
- args = parser.parse_args()
-
- run_mt_bench_agreement(args.judges, args.votefiles)
diff --git a/llm_ft/fastchat/llm_judge/data/judge_prompts.jsonl b/llm_ft/fastchat/llm_judge/data/judge_prompts.jsonl
deleted file mode 100644
index 4ec7524c..00000000
--- a/llm_ft/fastchat/llm_judge/data/judge_prompts.jsonl
+++ /dev/null
@@ -1,8 +0,0 @@
-{"name": "pair-v2", "type": "pairwise", "system_prompt": "Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user question displayed below. You should choose the assistant that follows the user's instructions and answers the user's question better. Your evaluation should consider factors such as the helpfulness, relevance, accuracy, depth, creativity, and level of detail of their responses. Begin your evaluation by comparing the two responses and provide a short explanation. Avoid any position biases and ensure that the order in which the responses were presented does not influence your decision. Do not allow the length of the responses to influence your evaluation. Do not favor certain names of the assistants. Be as objective as possible. After providing your explanation, output your final verdict by strictly following this format: \"[[A]]\" if assistant A is better, \"[[B]]\" if assistant B is better, and \"[[C]]\" for a tie.", "prompt_template": "[User Question]\n{question}\n\n[The Start of Assistant A's Answer]\n{answer_a}\n[The End of Assistant A's Answer]\n\n[The Start of Assistant B's Answer]\n{answer_b}\n[The End of Assistant B's Answer]", "description": "Prompt for general questions", "category": "general", "output_format": "[[A]]"}
-{"name": "pair-v2-multi-turn", "type": "pairwise", "system_prompt": "Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user questions. You should choose the assistant that follows the user's instructions and answers the user's questions better. Your evaluation should consider factors such as the helpfulness, relevance, accuracy, depth, creativity, and level of detail of their responses. You should focus on who provides a better answer to the second user question. Begin your evaluation by comparing the responses of the two assistants and provide a short explanation. Avoid any position biases and ensure that the order in which the responses were presented does not influence your decision. Do not allow the length of the responses to influence your evaluation. Do not favor certain names of the assistants. Be as objective as possible. After providing your explanation, output your final verdict by strictly following this format: \"[[A]]\" if assistant A is better, \"[[B]]\" if assistant B is better, and \"[[C]]\" for a tie.", "prompt_template": "<|The Start of Assistant A's Conversation with User|>\n\n### User:\n{question_1}\n\n### Assistant A:\n{answer_a_1}\n\n### User:\n{question_2}\n\n### Assistant A:\n{answer_a_2}\n\n<|The End of Assistant A's Conversation with User|>\n\n\n<|The Start of Assistant B's Conversation with User|>\n\n### User:\n{question_1}\n\n### Assistant B:\n{answer_b_1}\n\n### User:\n{question_2}\n\n### Assistant B:\n{answer_b_2}\n\n<|The End of Assistant B's Conversation with User|>", "description": "Prompt for multi-turn general questions", "category": "general", "output_format": "[[A]]"}
-{"name": "pair-math-v1", "type": "pairwise", "system_prompt": "Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user question displayed below. Your evaluation should consider correctness and helpfulness. You will be given a reference answer, assistant A's answer, and assistant B's answer. Your job is to evaluate which assistant's answer is better. Begin your evaluation by comparing both assistants' answers with the reference answer. Identify and correct any mistakes. Avoid any position biases and ensure that the order in which the responses were presented does not influence your decision. Do not allow the length of the responses to influence your evaluation. Do not favor certain names of the assistants. Be as objective as possible. After providing your explanation, output your final verdict by strictly following this format: \"[[A]]\" if assistant A is better, \"[[B]]\" if assistant B is better, and \"[[C]]\" for a tie.", "prompt_template": "[User Question]\n{question}\n\n[The Start of Reference Answer]\n{ref_answer_1}\n[The End of Reference Answer]\n\n[The Start of Assistant A's Answer]\n{answer_a}\n[The End of Assistant A's Answer]\n\n[The Start of Assistant B's Answer]\n{answer_b}\n[The End of Assistant B's Answer]", "description": "Prompt for math questions", "category": "math", "output_format": "[[A]]"}
-{"name": "pair-math-v1-multi-turn", "type": "pairwise", "system_prompt": "Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user questions. Your evaluation should consider correctness and helpfulness. You will be given reference answers, the assistant A's answers, the assistant B's answers. Your job is to determine which assistant provides correct and helpful answers to the second user question. Begin your evaluation by comparing both assistants' answers with the reference answers. Identify and correct any mistakes. Avoid any position biases and ensure that the order in which the responses were presented does not influence your decision. Do not allow the length of the responses to influence your evaluation. Do not favor certain names of the assistants. Be as objective as possible. After providing your explanation, output your final verdict by strictly following this format: \"[[A]]\" if assistant A is better, \"[[B]]\" if assistant B is better, and \"[[C]]\" for a tie.", "prompt_template": "<|The Start of Reference Answer|>\n\n### User:\n{question_1}\n\n### Reference answer:\n{ref_answer_1}\n\n### User:\n{question_2}\n\n### Reference answer:\n{ref_answer_2}\n\n<|The End of Reference Answer|>\n\n\n<|The Start of Assistant A's Conversation with User|>\n\n### User:\n{question_1}\n\n### Assistant A:\n{answer_a_1}\n\n### User:\n{question_2}\n\n### Assistant A:\n{answer_a_2}\n\n<|The End of Assistant A's Conversation with User|>\n\n\n<|The Start of Assistant B's Conversation with User|>\n\n### User:\n{question_1}\n\n### Assistant B:\n{answer_b_1}\n\n### User:\n{question_2}\n\n### Assistant B:\n{answer_b_2}\n\n<|The End of Assistant B's Conversation with User|>", "description": "Prompt for multi-turn general questions", "category": "general", "output_format": "[[A]]"}
-{"name": "single-v1", "type": "single", "system_prompt": "You are a helpful assistant.", "prompt_template": "[Instruction]\nPlease act as an impartial judge and evaluate the quality of the response provided by an AI assistant to the user question displayed below. Your evaluation should consider factors such as the helpfulness, relevance, accuracy, depth, creativity, and level of detail of the response. Begin your evaluation by providing a short explanation. Be as objective as possible. After providing your explanation, you must rate the response on a scale of 1 to 10 by strictly following this format: \"[[rating]]\", for example: \"Rating: [[5]]\".\n\n[Question]\n{question}\n\n[The Start of Assistant's Answer]\n{answer}\n[The End of Assistant's Answer]", "description": "Prompt for general questions", "category": "general", "output_format": "[[rating]]"}
-{"name": "single-math-v1", "type": "single", "system_prompt": "You are a helpful assistant.", "prompt_template": "[Instruction]\nPlease act as an impartial judge and evaluate the quality of the response provided by an AI assistant to the user question displayed below. Your evaluation should consider correctness and helpfulness. You will be given a reference answer and the assistant's answer. Begin your evaluation by comparing the assistant's answer with the reference answer. Identify and correct any mistakes. Be as objective as possible. After providing your explanation, you must rate the response on a scale of 1 to 10 by strictly following this format: \"[[rating]]\", for example: \"Rating: [[5]]\".\n\n[Question]\n{question}\n\n[The Start of Reference Answer]\n{ref_answer_1}\n[The End of Reference Answer]\n\n[The Start of Assistant's Answer]\n{answer}\n[The End of Assistant's Answer]", "description": "Prompt for general questions", "category": "math", "output_format": "[[rating]]"}
-{"name": "single-v1-multi-turn", "type": "single", "system_prompt": "Please act as an impartial judge and evaluate the quality of the response provided by an AI assistant to the user question displayed below. Your evaluation should consider factors such as the helpfulness, relevance, accuracy, depth, creativity, and level of detail of the response. You evaluation should focus on the assistant's answer to the second user question. Begin your evaluation by providing a short explanation. Be as objective as possible. After providing your explanation, you must rate the response on a scale of 1 to 10 by strictly following this format: \"[[rating]]\", for example: \"Rating: [[5]]\".\n\n", "prompt_template": "<|The Start of Assistant A's Conversation with User|>\n\n### User:\n{question_1}\n\n### Assistant A:\n{answer_1}\n\n### User:\n{question_2}\n\n### Assistant A:\n{answer_2}\n\n<|The End of Assistant A's Conversation with User|>", "description": "Prompt for general questions", "category": "general", "output_format": "[[rating]]"}
-{"name": "single-math-v1-multi-turn", "type": "single", "system_prompt": "Please act as an impartial judge and evaluate the quality of the response provided by an AI assistant to the user question. Your evaluation should consider correctness and helpfulness. You will be given a reference answer and the assistant's answer. You evaluation should focus on the assistant's answer to the second question. Begin your evaluation by comparing the assistant's answer with the reference answer. Identify and correct any mistakes. Be as objective as possible. After providing your explanation, you must rate the response on a scale of 1 to 10 by strictly following this format: \"[[rating]]\", for example: \"Rating: [[5]]\".\n\n", "prompt_template": "<|The Start of Reference Answer|>\n\n### User:\n{question_1}\n\n### Reference answer:\n{ref_answer_1}\n\n### User:\n{question_2}\n\n### Reference answer:\n{ref_answer_2}\n\n<|The End of Reference Answer|>\n\n\n<|The Start of Assistant A's Conversation with User|>\n\n### User:\n{question_1}\n\n### Assistant A:\n{answer_1}\n\n### User:\n{question_2}\n\n### Assistant A:\n{answer_2}\n\n<|The End of Assistant A's Conversation with User|>", "description": "Prompt for general questions", "category": "math", "output_format": "[[rating]]"}
diff --git a/llm_ft/fastchat/llm_judge/data/mt_bench/misc/radar.png b/llm_ft/fastchat/llm_judge/data/mt_bench/misc/radar.png
deleted file mode 100644
index d692b378766dc98793905d6c54d11e42bcc23e8c..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001
literal 354119
zcmeEuWmJ^y8t#ao2r5V^(gM=pCk+yU2#S=z5JQS|$AIL3N-CuSl0$dL&>>RNHFQYF
z&kp^ABC6@dTKOE%fLqSab`FEus{)caz|EjDv=q=TS|MnO~
zZxGWGH`_(2|MCA)yfo0
zd>w7M{N@Aqj35F}Eo=8(_*s&K#Btm}d1j#`@UeeM=mtWaTqidh>vV9k68h`Ge?OhR
zcNYhjUd*=h#;*wEBhGSf2}S4BP!x5y8->W}>Fpl3PC&ZSx@^$xZOFn|j;F--sI4Vj
zOL7rn=q8T6m8fYUPnRj@4>p*u(OTBSMXCZ{nlS^Ws5g}QW}VAr{oTahOe8p*UR%BW
z=c^0H@bD$U80PQ4Mc{*E+$zzJOUq8<@BTtG4ZEr9Hl#wS#6*m5f$~%gmmq$I#v;GC
zFTYkL>Z!2SmHvpB+*Fgfqmr??*)6W^uQ)0^=bRwdOvue13Gn6hTInbj$E_dJ5rtpUteVOgkN4yf2Sys+}Rt;6;m?N|C2AeU7vh4z#ZT+L%nk4_-nrd+4qPlE{gZLno!ONx+{2KXj@XzS#H
zFdsi=FLX(7Sl-qnbJI&R)i0gm8r}0O1wrcdx6-77lyg?F?K0TV0FUB3PtTTck=X`a
z_kBNu%^%ME)6gWZUm^CM?`?j``s-s!7rYIRW%J4HsEoKS_I7#ne!Y#Vo}OO&q~=3=
z4o;i(rj&goTm6(iWF7i}6kqe+_6}(g<;|=ZK!H84ij2fvH_30`&y;F~pRsgd?@|P%
zTEvFZ<+z`|N&tX_O`^PazEs|(??3{aHrcu@yOzQWa|~0gp9k0OcROXk@t#re_5A{k
zpIK@#DUdC82kQO4l_G)1PV=zH+Wa(2iwV(@;FQ7SC!^XOzsq|~B?Y4J$=z1RvPtk9
zbZu01Zwk#52X8jDNgX?UowG4iukSj1w44F4%9hZF={+z1s#x_L-g(+`H0n7FFK{fV
zQ7zjGe&ShtMKaxUD~p0#Z}IJ=f_mPsqh15{DbTGtQRe-!@eoL6P3g#yxNaE9=1GTY
zet&1r5_2~y26C2NP_LgE&gf^hlNziBex7Q~4=tYEl8+j`8)AUg4l
zoL!}3$hJucsUQtwK`Uc5ZGl2;zOqnmiIR}!fcW8~O(pocQfz;8AAEf8+}M|V`LVGS$Amv@KOk}$Sjl`ic`>qi}ge6WWRI{6;8n2C6HcFx{O
zB8il7+QW>2v*MD_AwyMX#me=N%L&o=_}oH!cG%VLVjqraT*v50@Q~J#Z@O-c+8$U#
z8^bYXdHjCus}Y(b#QNIos&cIR6N|frf^$LD-vJ+)M@nF@e%h~Fmt)n8Zr+xsO8@QJIf|R5
z>l)K1iP+FezU^I8+N#H>0^a*s(*@l%V7l&;{-bC-4pCddC#pQce{pH2e}AElN59_F
zp}6=-U!gCnl{##v#!4%ft+h!f7rWlw5!cH!^*X8WJbdsAlnF@`8|Qb6oZe(RFfK8?
zE^&0ASR3(*5wucWFC9nur$*5V0E*SQ>I!lEMzLYs+%^RHIKqUN)5=-arzh6wgaz)d
ztD$q2xLfiuG$DB)B-j^(P#X}y#AT;1QkeR+l$vhH#ZVf0(t7R4DAdkvDxNu&NsV2;
zCQve))@mb1_CN`*NCbzNYTEM5#gQd;f2sWI1yeIEX#8#pid+04X5)0Asg-jB
zx?D^}Bt&1qo=UT*Dr=`Q{`o}ZXD}Iebu755Wpp2%sIrt!Ao{IBbHUGeqee>&hwb36Q4e><_uams1IC<(wEmT79B%>wlt(K_UUM_tim;
zo{AekgR+k^&SmVPOYs=#keBCjf@1u;S0%=u5#thImL?Owk*ndaxUQp&__=ZszWk8>
zZ;{SH<@>yk)=UwPBDo@vMP)>iUPV12RXa6CRW;%{j3D7%!OJUe84uQXYC^>4ZhP`^
zfO(sO5+QkEuP|5zBGh(?Wc5!&Oo2>|sazrnN~x7qR&?r%(~7d2Id(
z)rwCn_cISGyr#Uc3Vx1bY&WkV^uAR{ExI(C%Vopg2ktC?X}U>i$LD+)-*8uBBPO)18RyaDK^f_#U6)=K~LGNORW0W9qaXV
zb6uyo?8NVaW^R)Ie*gKQ{vV=~yhk>Kz{F>_q*IuBz-rc*)@wV4x`$igF11rfTdGSa
zmW@iO*!Pg~j}I}U*AAxbd=RJKS`_JGv4$m*xzMt=YMAfzv583%m*`pa?A}ay)$@Z^
z3OhBcVlB>$xRC&weD`<;`tY&d?JB{aXK_>Z2BluMALak$LA`KX#dW=b~<7$d7!P90k7oREux4THZwh{*C)gkVpYe81jU
zGQuJDQu*a;f6K*Z17IsoJ@2M|okX&mfK#(nt(s;C{M}=0pd1w)m2SRD19NGK%fdhG
zz6{T6md{?;bwf|>x}c_H6Z+zb#MZ5oOFCGUL{IDB@d9Dho;q|0fLRw!Sp^sY
z?@zU?uU*v3s4oM6DBh;_3?4Yf0E7e;%o;*r%=)qvXa{hVc8fGk$7&t7rBP8e9ksBb
z`46a#f%?;3=UGRbu+u}TX5l~8(6CAdBn9!u)KtHWWZ4L3cxpfM^cZ#q?Op|t1If#y
zIFQWT!EYt!tA}ar(oM({%PsfcA3FDq$
z>M6bip>f{XB<;}GVgWeyen;cVZ<7I3Bnbzyn3dhA0MbXlSihUn_q+q~J^;+WQ`@aA
zm!Xk@5<>c$NKiXj}-0m&ZsgGdKIXZ7oh)q!gi;Mn8|A
z#}3JRnf@v|BXhP%s8i#%7E_#t@Z4$$Gi&}
z{v?LzdiB0Gq(XW6tU@SjI#6~!hc`qtv%|WYiti->w|ES{tSsDKVfM3S$gO0!m4M=@
zWJB}~hwnWucH$j>A21!%gp($%So7QmtRFg_Ht9Ukc;(^F?hZfdovYhXUT9o29l~lv
zTB(jT_Sy@;6X(`t|2t&TKl%Q4w_v!GNL=A~ltXDM^M>?g<4;vTi7m^X`#eIv^ysii
z*JgY&kwMk&M(|_k;kWu>mXi7vr-^}3F=ZB-TKMknGtA0{t1*G(+UJ5gY!P?Gu?>GY
z?(-N;&oKFJk*EG^)gP~m&k*2*y?3FZ`O_spK7;4jK>5ofK{!uvh$N1Z)2Fr%ntFz`
zEGrVuP=mRj&ck`UV{?39T^h4Y%szVEp>n5sqOSU(bo%$k>xi0kJy^}|rrYjhw1t&b
zc>KHIg+s>uK1GT6_J%tG@Bx=9q7D(9R@x^Z5XV
z?xXWaw%4pNEv!Fncf>ZfYYH)j(k%a5u1|l%+wSHcR@y6zH^#%}d@$%3@BP;APte1s
z4~qxFsIwyakN#Pr2A7ff6M0lN(S`j%e@`}04}`1`i!;MQgj
zh?Tm&;p)GH@C);Pxr>hvt0d??ae;`@AIJK)FVVLGl_-Z39Nq{2IKe-@btxHip-r-8
z^*!Z3PVl=@bS^$>0C~S{6LxO>dinqT)ms68<6hj(<~8}xuX51w1~Ikr-23`l4Gs$Q
zw}aW)*(}aJ9`dew*h=hF?Z(3|OWjy_(q3|=qKASPZ5m0qh$sG=M&>imiNBtvpYJah
z%T@G|IGze<>{K6Xsm
zc0w`u_wMA)yv3the{-FXcQ&bYm3lA;s6N`)1tt#bClLRd1xVF3Bf&(=a8M*Py(Yo5
zF0_B-fcK`d!gyo3fJF
z<6K`dT{nTE1s+*ZW*zRR6qTjL#ZWun=_XO5K|xFN<&9AW&}~-_I`M)I$`2C6AxMHE
zTM^DQa)@&dl>Hr-2C{j{8dFHggYeK;Qz8s?t1g3c&EhgDN`8RjKlv=vKd$k1?oaJW
z>Qew=yhFp^@wXKe6BjORCD}bnva=HK--WX7n<|MXhw^NH(j3atE$p)o%s6yV0<$n`
zP`Ej~4!_>v-5iBm(nBKuR!g%#NuMGL>Ra_WZ#;shT$E?uv<^#&i795{c+~#*PblpT
z1zh2HyLoQ#r`H)6Tp9TaWtqp9AuCSHj_YNl@W^eCbf!!nV*Wg9*b+or;`tCgLI7DO
zNGgwwp25AkF7A@>z17sBNkCq>+DJKTSh#Fq@4}MOcxRyQ2ZFeAZ0B_n!pi^y8tZu!
zXiIulW4-uvoHK&lUJjSJZ)jAgw(vyr0e&DJM#t>KE0?}
ziSVqvMwyqjVPg;i@p`l<%t$84jK$)u5LD`pV%QCyfQ`4e6-SC@mnxEtI-IMg2qcTe
z;A&Ws896;jxMK&EPTq`KF34>$u_<5XWGr{}&OXJUlg`tQxsXW>zCRBjnJ95yck$0@
z68_1VCF^e%UPk&DJo;j{)+5O}QB^Y#bd75}y^hQ1rR`SvkVhNZY^*qPzFQEU!wnnHC)
z4U@*Qy?{BJg5?E_GKl_2WPnN-JD@VK?;Xr=
zNS^Eo?NSdKFK@1cvDw6ar1%NNtJ0xp{MN|q_Pt1s3vvR(-(LG4_8S)&cUe7t0x|et
zgYcWYMDeyMO1s3M`!z|m1~^rVehFGVZ{ON@quTVJM4@37@G7lI4ez<1!Sq}({w?cY
zLk#YbaCA=wn-CQnJ(SJ8{)W`4V&G5UH*W=~%5nq~|UDMy^`~qCWaAETp4#Mja;2kv+wj
zgnu$DbSe-Plvayvf7(pjl?(1$D54hHb0(L((fF?
z_&8zYm*00i3i4%$Ho4Or2`;Dc&fc^4v5I>8Zg`|9Z3&FrWYPHN=-VIj
zV?J?YcB?M9RT>o&S`MwK4?S1JMR-h&p`x$%y`Ayii7zpsEoeV+x$|3(_UCrL82=)z
zd7XnJD`EYUrbtI*@UCXj-n=%Qz`N{boDKud!p`)NfG)fu&(Zr%`=VB`{(n-cKj!BE
z>y2G7BvP<8&sp>)=kC!lZ7^{X;(ZY3pB_)wG}f=#9lP>HQ^g4)@@}Xcw>TPmL&C_!
z2c2hmQ-TW8-tKkAS<1RqG4H_4V{Oduq&>!^71dRcv|iHC_{`P{o*Ri2ED$sO&7Xk$
zT8R+A*3x^JpR~h=mb6eZnVHy`Hbh%Fyz5P$Ce~YX*-*dSR?#}lpy|p3tNPIz*D0en
zMy*;`PQ1ErYWeCScyp9sqnD^m%k)L^#C`HDJT-C6OA1fcO%uRIj(udV7UGIIT9;9_?E!e7rHt*nG<{)7M((@i$m9`R{;V6El
zV;fPkFa#anGd!AAMRYg5PtPhd69+Y5cY0Z*B?<
zWHLyC8oPd_OrDP$OpG;``>65R;tbiSOm$%eInZ?3oqjNwZ&&O=|!icZDEZgniu4B1jPTWeHt%E0YQ56$o_HD}6Dk{-?u6;$egiVnh
zL3l|U&J2DfYLR%^4|6C#RqBOxQFCln#cSO~DBb=OqtQy|sj2IvkmXPHK5n?eadLzI
zNSg9p$oTl=zQI7&(hQUti4%a_4)A3e9R#YF4b->g6z(k#?tNC*;J}Eeju)n?VR$T6
zQf-{l^ShU>$v8Th+Arlt&=xpTiad+`6lq*WC$dL$fl!jblj6f-hlxuQgpb`6tSNs_
z;49}q?!8#!=kty^y-{$oY%~#GH_6{B_VHmZT;k#BJ;75Cdxv9w?-sdCRAmC;D#mD2CjdL
zLhhSQ$N7KG>v%cpAXs|JVo)L~o|iMwId3XnOg0k5QP?$oAK+d(sGAvbPl`ow;*YbveQzO3Bd}sG|dTz1pSJ7w$9g4p$mUp0ptY1Sqk
zc829!5H+K&3-dk`>3l4G>!)Xd;pa|Ovebr;RtK!3&dpfpXJy6VQG>}V0N&1=$TfvJ
zv|V4=g2%_lwoS%04M_Cs;hiaGQ@1+aT=3K(h0JD)ldU??Z`3@n)_k6Rg`E7h>Zb>c
zj6HP9=vH>TNHTV6&dp15k9^0QaXzKnY1(q?CLdTvW=u{+j*LM^wiJ`(1+U8ui*tV;
zB-yEmt74#dSqeuQmOQiq2XtL8(z>$}V@(Ptm#PgD_xd6%c5b{rapwv6k$flU#xern
zIOX3)ex-)@uZ%dTDR43H8*4sqA-L8$G^BK0%*vfm%*vGmvKTZ3!#fENAbr)DnK$%0
zGV=XJt?EQ&QV#YqbFJta*Y(H;B}+Z;qF77sr5X(MzQ;TnPO@v$SqRltx}m{|pLGpy
zZ}kKF67@h?pz*?(>#|)$)g*_9j=&nA&uTX@ffPmxzfz(X>zk=2Fgb-&yC{CYwN1Vp
zRW-Hmieq(~p$WV{5?gw~^O5a2k^2O=;50x3*@G|jf20ss4SdW7X^C5g`VLHAL8>wv
z79KEJ>3<#}h$_dGQd|zt;%)<(YzRuj2S&b=@xgWV;oET?VzdgZT*IkBPt{*Nc!Evg
z?+W6~1SeKK54sDBv~M*RnXg_|s~B>S8KV8TT*O4&+oAsM#5
z7)vK^7LHOE-D~0^6qxObY$>K^+RYV+?Tg$z(T$R=4V$Ppyv1-1Cz5brn0$pa%IBkG
zq7qPyLuYk-`S!!Cd(UuBd!%
z))_WUrs+$r1W|d>LM@?OU2gWzp2|XXjL4=YV|}mlA+wO+g+JieY4&YDIHb5GTkH^R8f)b173kSb_sXh
zp~&>+H8QjA%y~^r`R-iLQaz1fB)`7Mu@y*mon@FHaFa=*XY;M2%F*+U@JteVOIW-&|C3n!NDySFMZn%o7VF>if=4
zx%sj?7sWa5P!lOwRdQ;dQ5+`MDe_ua_H$Dry`0`W(98IIlv*652<5wjWH1fPQC5gf
zR?&!JML0zioXA!_reM=4^2(@tC*dxw@4ILoZRxW+`h80-csnoS2GvGtje%rR0R?NJ
zN8YE_whr6peQk7Px-PlN77z>6$10K9s+UC;QeQret5=APxsKQc^H>uq9Unw2td)?<
zkx3j>&F__VNwy2nBbX6!3m{LMGOogi^}dzAiS<2Y+*79|Be$)xs7+Mbute`#<+SC>j&|iF_cbb=q`e`^
zmG_Pr)_2?I0+AMwK2Vd+DR!s&*r4U+@odhfx-NtVMxXhRJPqi{=3k*IC;(a(QV^iE
zk&}&&c;D7qFd9?o#oE
z<*J(nLi+3;Mq$3GDo@Q8EKt&_J2jcf?5Otayju6K=Bu{{w|fS&$0x==PMG(I)X>jq
zh~gqByQ(-kysn%4dXI{nb^=V-BvPket78kyX*ru0%;>?LX#{kPf=aZCb70e#<2+3-
zL~-nM)FSK~4%U%)#(e=V|GQPTJqKVdmo7H$#TdVGIi5WQp
z7F~u_EA0cNGoMu^uS)`hBO8Yus%~ibnYyau;%KMy`y6f|FqLhgP1MpO(1-2)q>DJU
zcL8U!9HjoSz~ghuY``mU8_STnZ89(erglplh^T;uBG{zKF>&b65^lc6Dt
z$JS+$mvAUAmw2fZ-36At2A6T(H-V
zfu&IJdm@eDH)M>;Fk?%lds@oOXD;G0{hYDt$^x&Q-Y5h58@{Z6+>Pa(O&QoQ@ABGa{|mUN`Qsk5=3_n3=aVpFd@>0#$t}d8|isq-PRbQpyE=jtkbAga=c28=SAjvKj3f2``
zhg5?lvPKYP3q_iAmfxo&-=7DnF!Yive4q<`MzJ0%PB+82Tu^%Z^0(
z4o?l#!aN059-e)ZRq8Wur48G^9S1||x}8QGkif^+LKv@)sH^TzC{pDPBj%R@R_-=~
zJaC$xK3&JXvu#qMx>0?4Qi>_xGHNk(B!xo~9fr2+d-X(>z^>+N(;Qq4uXbU)&?V`!
zeqg9jxa7XJ_Q~-v#y4=iACDefkD8wbW{sxLnEHlI+Nm9v^~P*N;TD52U*qAiD74a&
zNQut*6leb{x|*Z%5ToqJpvD3o98X{$FLhIgup9SB#(=#0ozoH7U)8A|b4+cc;dSRQ
zKq5>&7dlPiJk^-IXX~>W6U$eU{unMem>xevOlO$HF&(||S5eJ#M#2wK_X7Cj!hM4Y
zxV~xd^@Eid(-iCU@njt5X%;nJj@$0>^
zv2!katst$s(Ym0OJ`0Uv74jHJ?Xi@8Sc@oak7oFoufWPN+~ap$wNmtrG?+0_6-FL_
zd<<06C~P!K>hH&L+Sy#`kP27pG8q!?Fd{E}0EX4>#|Z+|s<0RbF&H8CEVP{AO}jc|
zIC1>cE)oHMP&cq)-O~J}Reyt%j@;HV?=Llhlow}W><=t9
zOqP>h=XJaOixSbp}k-g985)~`V`H-ZztUeQ!EGCj2RHQ{yP3M)69XZ5Q
zH3j>MrTLaq4RXn@dZ+O6V&
zyt?Lb!~@0$1YoOdJFi7;dfoVmPz5+D;z$?wHk*LB+k`Gxs_n{9pR+6Tdcc%v5HWNB`K)R@WeuIZ92
zlevXtBBQ?t0RpZ#U6st`Zl*e`kpW+3)WtYVdCeRCUJ85Z*G
zpXW{
z4VRb_v|7#RE9*u1bmwo-gBATVSJ5m;HfUa=p!R&;n77ZiI5x=VWXq6L<&t5thWaf1J${1W;QzW
zNc^L%S;`&hA=O?%kyurQ@eGEFXO5QX<~Ebn76j@X*_k_^bi;9NnUwsk2lwf!brS+4
zQhTH1$#UgOI3ugIeJ#5+Ugjx~cgg4P{#9P|kVk`S2uN5K7&?bc-dL%JQzzzA?~0XGB{8WsO(FQWQ;cJEELnh!vanai0xKe2-M~}?G|~Zwz%|K(VTJ-SDhjbMC^9jq}&~?0tIH
zIls-Ww~V24T-2z=AU>vQh9bXKV!C-alv2WaVDiH<^K^`u-DEpN`%-$fOEwey}
ziOCa;kyEa@LtCBb%8nx}e)+|hw|HdRh#s49vsGpm6X?U6+V
zXQABPor?rA%U*gfQOU9r8#o?t?-PybeLt5nj%Lr0Nro=reayMC&k=j`h|P
zvFov|AK>cvBOB6?5(Kx$=WWmSJZ8X&WQau9F>@|74yNx0+
zWz{^vNO0C)aaBXz_X?D%uTpX66i7)o;95;W3De$r*i`Bk)x+Z#icY}gY^icgP_h>n
zo|-2?eCs;v3*`Q8D58mcLN2t~Dq+$>j+-|qmApR2k4gCgdj`w(p}KH;Dxmkp`(SpR
zM-U5!C_K$aIuEYEdRyV8wP7!sd&=Q8S}mTfqE4KNb$g3cs8cE)IV;#etTy&G22g|l7L2NFdkctVmoft3CM|4-28f!z1U7@6p2LDLiRw}IUN>}
z1yG5HlIF(p`K$~r%OdDUc8E^!79+8=xsp}KoN6buckzA!i+;0sIe|xhllyKopr9#y
zq5gJ!59CN9B|rahO%nH<&C^L6(?uUHd;YeM4^kMPGveL!C)c8gyrVmu-PU8PX84WM
zl_Q6ovZfA4Cidk6XAK&ha1&46gZ#Seg!I*hP~D9Vn(&rEck-@|fhgmwsMw%&de0$VbwcH9L;J9IL~U=+sdQ^ed_(_$*V
zeSTK0=y7_A9u-+!Ly=6|-nY>ev+p4f+$^YCF%;WJ=LBx{h^-LQxN9!rpti3eVjbLG
zxUyrkT0yDJbiGRZLJX+guJpMyQ<}7|2h+@1=N)VZ^ut7#^O@Gx=#-xovJ{-{#>H!0
z3`<-yAzVC4?4`!}9^6{-M3xAzJTb|VXkg>)4Nw7KlqkYgY@(Z*F?4Np|sD$8g05
z5+FYrJZ}C*w4B1F;!7bzrcM3fb@VhX59^7J?}B55phwt=Of1JNJA_S=!%V#Vn8tH>
zOQi*S)z;Z^U<}7v6GCsx+LP|R+}h}UTIL*&lRxi18d~YW9!T~&J+bk$h=?%g;P}
zRv+I>I<-By5PRjt3{=I3qv-9Edt~;fw6@pZe#$QW*tNmI<|NAdWbN#-+23Y~RZ1+t
zqpzlQ2%!LTDM^a&H6X_fYRiiKurK!oMb=U|NU_5|ryH9NZtCuuitYB7HXTI8xqk9;
zm2{}+MSgJ>U3(xP``Xi0VRdx@H2z
zvkL4@+&R(ZFDJG$8}BHv7WAeY8}BTZ#rECkR;^&CaC>z7z68zQE10s%w+x4C*Vxnq
z3?pvxZCcRdB|WfKgCuU3;oV-dg(TApy)4~uJ3=
zJ(J7k)yKe)KqY(c5>A+OPXux-K#)V(#<#u1xj{+`xH`E(P3Go-b}xK?`39?ZnWv*OI%iEV$JZ9K_E&X)ZQeYP>qvgcQCt-Eq~**EC{4PU)f#8kLN
zLSZP*o8k4Lz4rq0Nj#ApgP+{AILkAhHe15-dIPvrFUb*VYolKv@0@2eI~4+srl^N+
z&;t*p7cAquka?u3j*5gMJDS^R`BsUDrlhW`R#qE#n0Tz>Y2=7M3>;i~BMtI7<>?*9
z3Db{v%@dD1GRE2Ow(J=S8GqJx`#4Qdd=RK@B`nvkN5-xKTU1epr%Enj>6huwlx8s-
zWDk|pry136IOSY;;W=mB0^9u9`qA?S+Uk=0kBOM&^BJHi`^BFtpt+yCk$ZMis|qDG
zfhFDUK6%&ZAX7}L3Skiywa%51m&Q)#I46B%Z<3ooKcctR)V`Cm
z8*T>Ts@+j
zd{Ua_fNaC(l;K?Zz`~21(t}-XoPuhZ_D!!t%E|o(3DR*oVCkr88O=yCEdekdr^$7g
zyhr_y9TQWHQV(E_0orRjX7X`F5ui!_39r(`u{*I%e^o(RnPNr#K}=N4$Mn6T=Ox0q
zPL&^J2~CCz?gqxT+X7`TS?w&p`0ozc4)Z?*FEfo~4dF6gU#Z^jKPSzUq8PTL2^o
z7E)T3Pba_G=K4l86U4}7^YYj)IB;p$uW%lq;L&o6tg{RE;;syS%R3gP4$+?RhLZ+RrE@ONs_%-1R%<{<2&Ivm$x@c#JE-lO@{*xy_qLDfBk(nZ&ML--b?xh30
zhB2B5hhg@d?cS{EE1JYF_QYrTCGPvqQpB}l#u0yy3;NVCp365w@0EO%Vy(o5QcMVB
zIh{kP2#WeH@+*|D@k
zMoQ&f_;n|7`YT~rugETj;te1n;3xmTc&C#-JsEU$9Kk)_?wRS9ug4
zZz!7eY>bp_o1%aWCU7BuhMw%&avt7+rPtS7#a2l9mz>(0pT1FbJ4$_C{A)bZ_zevi
zwACcz=
zSC@&;ASO#ikvqL}fCq>vc3$}50d0EpWFeVJ!kq4s5!tR2N8N9-r1YKfY)d5Wi@B>N
zij(!Dt8Ql?T6g$t56dOse>F1$`P$4|x+h!)@;X{uHzJXVZG$yp~C{YO^H*6`RHD`if_~Q(z4h6WH8;uBscaint)>LB|^@o}<-~uvyyi
zv|j%5sd7@Wq?OiRgeEb9$I;r4Z6g9jR(8#x%BR9zLFrl)9U4NicqmQ{ySA4yNY@tm
zd$;@Cfj+YP=!a53-`MJ7l2t>6=^6WJV6IPH#L-U4)7K$N
zy}HBkljhZoinZFj;<2%ZM^9uoGk~R@!VWC#o)N;!a|jxLoJt_sI1>PnQ5`P5{{4PM
zMAk2UMp_h
zaG;e-v!L+!L{X*w-TIoiw&iQS)T}G7q0^*={v+_=VN0N8RA=3;6K0gzlM}e`CXP8j
zNVf>JQ5ry|cbib`PFZy8iUSYS#EtBeSCZYzF~KLRtT5ByPAccV6j8h`j@(371s{Q|
zU~;~4N;02tst#MIm|uuq)~;N+y~{=7@tzaEXg9|%7YSrPM#b&;?vzfyE`Fp-f}-N4
z&0SP*j)KiUo)kH2J9&Im-xHOb6}88n|4M0NubrHAjBmjU(s-k`GWY4Gh6YFOU`K48
zzFO|2e{k8v-zgcErURsQrZ4JMf01A{=v
zagzF1C4a4XHMSE3OnXiWj~%m6W$d{xeN}?C%LsDZvU2Z3B|*6RDM(wOV~fDjE)w|L
z7((V}L{ldag^8EX+Oe=RqXb#i{AG{gI55sosn4uv1_DO^u6}>l)BATV3|#MxJj}EtNWUiT
zLF>LX{{g%5;1%?B=t#e2UP-sc8NJXOZamJl!rr-e$8LDZx;*yAc=>`{9K%gP1bH*^
z3DJgzvYG6;TOTq>Uppapc4E~uPqKpEnaUZ9<2^+vCz>@r)y>j;xa-27;iesl*Hv3Q
zMr$e1OuK*CFBlqGL6B<*75iWrnL8MmHu6dahj?5e80MpQIl)_KZEhkAiG%VlZmF?x`X`xSHY
z9i_X4Gud`7^?pzH-z1sB$mP^`ztUY5eOM#$nH%3eC%28u;w^eOr`wg$TM|aT{O&@4
zmYyRj<-8}VP^i6SfY(JdV(Fe&uedfHd?!3}(L+pMsa{9{23s}8leXq@r}5mPt=by=
zs;WnUDxWUk{(widZGw?l?5!Edn+zd9o^*6Z7s`v(`&w
zM#FpLtq#8Bz@EH4T_-Y;x56&gNUThTiYuLrtUIg(Ty%k2{2#jBDX{LoX&a4g+l|xM
zwr$(CtwxRQWTjCXyRp;QwrwZPUcGxi@Atgl-uom6|Fg_*aLqN>%u0&Y_LQfRt2O_L
zMQ>1nZQ!r?;dOz?P+28=x9K!S&F}HJf77zs@qO-v22?8WJ#M$%RT;Q+gI!Jgmu)_}
z1xvJ=(>|%F<<7X_cgQ|Ac0BgsuPu?xF*zFlF);P|cEr^KqzzdsD%tBhH5z-C@P_o+
zD>s=6+ly2Qp-UtBi)wn+m6sPYv#PITG1JEqn(rh_Gg{{#zO>M>*e~oK)`lF+-sy|#
z*8XzCkF|37(~8GcGCJ8Y7$-JYNpdu
z)?#t7l>Y(?GZm;6WJ+zw7f-p@o?%{uq=$)OaG%Grg9t&jl=uQkMwrczr>=LTY+X32
z-JJW6avXRh$e$fJalpdXf3>8-Ia;Dep{Au*@;6NeI<6gXUZkO!7ijlui)7<5d3p0_
zslHlO)fMXljlLYXEULKSTk<*A$54eZ0r#V05|%}!@g;&%TiHWbfKI*sh4S};&nH4b
zL(g;6iP*%4d)bvoQjp{4O~kK3LQTNhv@A3IceyKAox(^s;@y-~YDmMXCz64E=PM+!
z2B4|%6PDl9OH+^_+Pvj`qk^z2on2n}GD191aF(T{QyB6fI~sPIBXUqEj;d{ZD>r_w
zU$(Q8NlTkqXmpJODHB*!;Ar=4yzQ+Wm<-`Xy;sVbgtF%`%2>8ETFUf(H&Rm9Oi);s
zm(5RWP=Z9|U+TgWO>y26yYalU0Q!;nkQI_CoWVmeGnWJ(4c$c8OsbqD@EMu#9wL*5@(Fbtv
z(Nnu%JtPMxrqDpdKER(NduZRMosJMT;2gL2~fUC#9op%I+nil
zF|_jDk^Kd?`iQi&StUC&@t6!_nl*E2Em;7mfCuCaRahP2TW)qoYv)@{++fK?U$)LB
zsnbPY%p6}5h{R;arCz^M{^MX9N>efr8;9YbJB!P$f
zNZ$vgN1=EtyvI0}8SgQZL<~8pwNl$i{0#z%WuUw0ez0ovgxt^2j@#PJ(aB=j7Iu9y
z&ll0`Y$E1U)73yo%5HwtoL*1!{Yi7znu|+xy>qC65BSEMQ77>w{8NcIk+zrOKcL|V
zH@K4^s9(}O44s4QP0
zBo&Y>c>KdPPG*t+BS1w^0`-k)YGN)hDs#OuMvng#%@?(E&@`v+uPP-UVBvSSzdDdc
z^wMI{YB9f1PP%sA=t85oDw9ue54t@CW2LIQif}S_@w5h)ix@
z#fBH~P>Y8Fe742uJ@KUmQ&Rg|h%V82qn&(Qxs9Cn%XSD$qg{KYj)eS}kJmIM&6Rlv
zSzLK2eY0hvjO2uh)7}EyPpFdhOUrwAA9v~0H0WgbwjoYx_n(ckK%HE>-+r3psE?7`
zSvpt~n}+qzEMgAyei6Uy%%xiIHSY@Q2TGB*0B$1%ynhI(KYvK5{)pXg>R71%k-l`2
z7_su7WT|3vO-?%M3jj!({G`5Rk@y?)fxi>z^rDVeR89k>!wgcl5&pi#jd1Ynwe2F=
zX&S@nn+`wT5-<#A+WOT`EDxpUh%S+VZAxM9jv@xFmS|5B0}vl+DgW?tKE1$TdlUoI
zxgdPMv0`aFcGS^=qu|T>*T^tVcIn^uOpG$x9cO>fpU|#)
z`!)wTB5K|@$7h`HFdhr535of}Q(!fmo>W)k)dnXq|4YycTKsmVOZ-{czR|$2j>2Aw
zv;TDdvuK2E-^|Rs{|HwAF=kDa$R|i5OzbwZQ3GvTJ6LXE|6k?={-3GlN|k(wI|_2J
z{!G08Xa<=;fX0E^61N=w=L0_-3OuWbjUAG^&d9KLhT4%cT+Zs^1JRFrSxt0+pD;snXQSWQo_q!@CBF-8n}wCZpPgCfq(=P_%F{K|9TAH
zW6atEsyOC#KbB*Gd*_EQw5uTp;|_7vG;>h*0M%uQpykEo%P(Mc}MjX
z;3&L9LIClP$qnzI5%<)?k6e$^W_|lajSkJq)jBiXCW_UijDDm~I3kEh5T+!Qu&|UF
z1|nckvAYl^2yd`PJRC;&BS{R8-}LVdJWI}b?Mv4Dj!v&)V
zND##I4oE-{;odZbg{%dH{r1qXOA)Y0i-Yck$N)h%0+S1ZWdfxH34%oFMEuVO1XNIR
z=1>HS5jGBA7LG)f1{hR+#RO73G)`4g&>JNygzy?cGzmQhHjoMXG=y*1zWiF})x89o
zzoF{zz*9INT_o|ChHWfZqH1`N`*!Wawzxp2TM?2h3B`tb;VctdV-d#V!}A8BhGoBu
z@`E>tM;yqCB7KM9-HL?XY1r3kh*V>DRWijnj)@cnUIGi|6CTC~1eVt+4&AE?e%YGV
z+~&LXo2H?%tP*oMMpPmw5!VMKTYr(heK&8dIEm=wYfW&wq0M;vaX%;}U2Z9~3^r!DH{V!kR}6USdBMO-YgCWc;sHy8oJ}
zSUd&O!ki~X`alIWNRN9@nacL9#l!4Ic%z<-_8oW(6tXdWfq>(l*M(ixw+>{
z=nIO=;@F>WUuqh^VF~M6o6?!T=~$-f4XGa^7W0yQIGz(P*_<2~IFgG8XpZB|ZI(j%BWGJB;?wk
zO(VN|w8YT)zupfbKx5Xd)X<{z({>U6Uc=C|7CHuCXu{3yy#De(*QM8Y@_p<*UIwpc
z4^BB{VOSBxvcUOjTmY9<=zYYU^R*p;nZM=>x|}x9K>c+!gc_S?F`Yzo1uKIe#~<5R
zLRN@vNtQLm&jA#vZ?A(0_*D*G$QIrUy?74cmB-9WZIdUTvu#|-YVUX`4(+JD*Hi1!
z`dw)gCUUwb88F26uDf7X<>=5lZK3SyWznwHe0h(4GinS|ytifXc+rRZr}7z9aTUUH
zLEOC6glt@v;oz3o7De4B8SxcvU+w)TcK(_J0u)PVu%G3M$-!?+hy*pDz#xfWZT5<)
zB}?I`xg`#L?Bt|d)}XgsIE9^Zg6YR1q-d)2Mxog1IC^u~*#sBf>3sn3;J~@^6d%9T
zQyJ^LZIPtSpbT|YkTU1)$#o)%5R8-vk}a8(XGLq!WL*V*P16C#`K8DyY6}t{sBD93
zdht$`h{^K3l)SD6n&nIrVAq+N{{OU@&
z((T&(Hv=&Vz$%@*X{kuie?G>~;r*U9(rV$P;EtZ8(Ixfk{TkLd=PS>*VPNW+#<`G4
zr@);ssT%zu?n{5|Wn2Q;p=EWeZLKJ3qYknwr;S1|wDoWn-sg`}v0PZvTx5l;*)6H|
zA1%Oujy+dIFs&KFxaY3i_vxD4a_8)WQ)WR_p*G`B{kU|bkfrzroCb8UDIg+#DGgzk
zlY+6VwTtjh@~(`12NB5K7A{ut+!O6kLp_8wfV<+jy>7OC%?*{C6#)4ja#B#%47Lcw
ztF+fQc9HL^U;{knz%7g9FS{d68aax`*)7RW!eqmkJr(VTeO?`sUTdO?K?aok@0;Mq
z?<|FmDHqPmY^Ltu$6iO!zZm}Jn}4&k94#1qkX*(7!N|X9$Or~N0a=&<$n1PzbbCqV
zaH)ej#5fmLCiW>cgl2{6Y~lkmd+?*i8_09=oJPNBWl%tc_)yR<9X8tmZ6RCj0VlYx4Lwr3bTk7>Jc*Z;kH8;e;G#ub&|=1#7Ivm-0UJly#VE9geo|vUsDGu0(Jog3aaen}#q~lj
z7hM*^#_kS_HtGl=9+9H5cS{SHB`Y||mjCHL*(1OotQPbp45i>KK~w9|&|IuLLyXVQ
zSXEgLSu(WK3vmadgdydC0kBBPOF&(W3IRNh1r0&{jGO&_z25)dRg8%bLJI_!sA`S&
zZ()-o0i!TU@Ra0J&*+W50p8QoSdtkI6bMd;eJ;h)r-+)eUhUF*}=XGZESrB!rCYnlV!^kKlW8BqR
zRA`7UMXa3sX=NqQ*Qq576vWUoE?ceETsx~60~vA=*Ut-KmJ@;ypK=+AU@WMrvVW7#
zabrKAvc+^b5v_p^9c#v@$WLg;_b1sag}=S3Jc8jSC{U6%pT>|_xMFOa=rggik?gmw
zIejujj;yJyCp62d(yXupYg$+^DIc<2!qUvhfp~c}LNrUcsMBR`p^E`(LNViW9g
zc5H%IL)C+YdY~M5G9%V1L>v^sUm~||DXlz-B34&nB5xB8_DOltMenL2>Bk5#U|84$
zryJo*O{#_ayKhNFU1gjbrShO;`+NWvRUtc9NEIK~e%Ds8B>nU$Jn`q_kK~^pF-X?9
z?$z2;B(iQLJo(2}%X;ZYnIMl~#3w9sSVP$=ts4-3%GoX@62v;4^u?Xxde&s)>jgp|Va6&`(xT=f%)ue7vM`{TDQm`#
z#Tpupx+n?$f3VL#MGfaqbRl3$J4F61Xp|VB+RD@D=%gdz$6tqc`((XG<%6;m$nh*g
zZ>M$qn1AdDv4spO!C>4QtJy&Qr1bfI2ff8|2+&iOOITGOwuhSmw@jO9tAWGbdSJ7O
zIG}VDLFW~_q_D6Zk83Kdo-szf(2
ztMwazU2SCsMX>}#JCJpG-y3qm`Y`eP2Gslxam_fO!iH>kl%GFg=7sE1t`wegVZk(u
zeAlx#9QKZ?0JlUnC2|DtY$85FPN$4xY*w$LLtn^Pmn8ka@(@#S4+=1IGu=Ju{KW$q
zP7o}E`+J+pUOO8Wtt~XJ*BEpq{n2;7vS516&0uqm&gYBxzxJDmbDGtYlNFjmmbAb@
zC_7GKQ_Wql0XW!`cYYOQj5D{a`EEaSBHcVGjXN-OV3xG9uw-!9WCNjMv%uHc8Rp`O
z46J4o@BYx`Jar|@@8Md!n^##Q8paKf3G|nTpWk4p$QUuUs@*?#r$rzO*R;S9)imrC?zpuI$dDT+`WR%3oh>oP6=e4!Qtwq_(r|7
zJPjoiul}9>`l(?~Yo5d3)MHoJH7-g{=oZ>pTbQQ0%xXH>ZAoY(5RuZ%QPPlu`;@8{eU#a0eN5V>sq0_Xmdb#bvRS;zpo35Z-nXOF4K?<)4
zk@(}%R|7Pd)Z%~O>6UInb}5zeA|6H^ZgbVaTPS~Qpj29t*WO^sqcGDRLy!>33&)+{
z%j?>o!^~X-sWqb&ow5`kOp!+|ZEPU1$T4bYyn?ql$S|~PC|H`oxS?qjG-#OxCStjyNYMr@|i0wA(BPilOn=;(NCgIYJFT$*Tx~Pfz$!CklA&JW=~}L1tJE*gd+o_>3XC(
zCSRqiHlO(2y>P*uFgJ{%Rwq}+=Ie?S$Wb5gj6+@I+gQW|{})uSP`oLKF0b#fBw@Ui
zp2n}Omn39Om{W$juJ~!{>q>6w^ifO%(0O6S-uVu>4a?Dc?Q{uRZVWRT;K+EG2@|vl
z+4uSAl&Wx@-K;fo@?@ATs1>bD@|qvQInw>tX1a6}rFX^ZXBMng6%{?0>cUHMFUKvE
zPYffo_(4J+kE?((k}=9-zsdjVNq`1I{bL{)3eLX&F;H#$E*re#2YR`9k?7!J?9gv3
z-hQB~_$Q+Hqy~$F@o0B0odUDd!Ou1CWYuEpVSCyX1h9Fsuma~o2Ww2v3j5%Ge+
zesea`VouV+F15wYcv~0co#p?Pu2W=z&8th4v;SXD!dZl%o(V;m7&&SWtkoWeIoP9^
z%qpxOdcI=t7E!L{MC;9SLS9j!>HBKIm6LT;J9ZkmoA2QW2JqRdwl
zY_yEMB+E#J_|n99O7gKrFZjR=&*7!!?Sl6*Y?p}Cm(Y=nEiHYOtMdxvyVJ7p=pXS)
z&uPm=tJsWTlxXSmXQ7%|hbTHU!zi#jtCEG=7OhQ_?_ld+_)z~lu#o@wO(b3OufKj{
zngCKV4>KTE_WqzehAls#|8*=PhPqORUz>SDia2zZ09%xYAkL-;x}=STHXs`@-TPgO
z5P{5U92I6RwlsL{n-C&?Bbe3?eJAg%tvj|}lMy`<9xRJ?e!bCHe4GLaG7mf|9ijo{
zXKIs$;N1E3B;1(fc`ckJh}W^o0=Q>X`cOO=!|$j#r(iH~*K%j5}F-
zg=^Jto2W`>y}0LqWYM`4ZH2?T&rd>K+f(?W76kl5IP^TB4{Br?P29LqrPo|3rtIJ#
zT_d2K0wu2?GXD=rq{I!h6RqWt=D!UH7pV|F<+cHCl-l-S!>B{}r1^ZTdO4AO&6EUu
zdLfmQK&F^!3-oKzb`gHjvcU+I5luZP!dN#t6U(6JQw%WH*yC}(DpQyX&k
zzIB+S4|MT63b>4I~1qT}#_Gf5yJHEldwU#)-%m>1*+1FW{&&5A772wDV13
z_f-L+UzIH7tBG}O?-gWzSh+wDAdqf`vd76!T7AG(pU|tk6k0{+1D1F8xHqoxKI6a%
zr)db25*xIc{Fca`C9|NyI&Amlaz90YZtI&>Lhvk+!e<{o-T(3_CQ!0&Fqlo~f1BGv
zSgV8m>E1GA7X1=8&0g{3TQM17uFux^^m-9rh2#>aF_3I~C7BI_);277^?`9WcR0=}
zOW6h1svbO(2leitcJ|fO$vb&0MaSUDS0^Wqeh%Zj!mpuY0I(TyfXyahA)$Ui=r!
z)w@b+ok^56(ZQS{vg`O(ksK8Jk+jFP7pjnzHku0#Z`P$Gu=zJiP19We!p`2D*5G4N
zi#-v_yT$cxGyO8Me43!uHqUfIO(-wQM=LCU1W!sq`C|JUp**eKWg&&FGoxthc*3I{
z?}SDtFmWj6)b>=#9!N0DoTSjn!+_ldoVzXykwIhn7$Of)P`__f)8J7Yp&E5PbqbJD
zPA30{Z~Tjq*ns{76_dhaz~^A
zGAp=nCo+MWXU==60FX*B(C-hBTa5=#DAbhHgcTN63WEm{12(pce*8HX5*dtHU+J2l
z0DPw;ZJMcWIvrZX$5q8#-rLRu8u7)WdHHo^AM7D;W7wxPH77Pt_#EW+3y&I1S;&G0
zK&PSznxaqrKg~ZS|9A&NHogEzc6q}QjO=&xs&_>pEB+#QCVSKbJugf&cn(~jiDB%S
z1cew3j*`{1)mOANcId>68>rzB=U<;~0Fr36#l?isMTtf5?Y|C(8rd7K4!^ZmzIklX
zQNe)r*hp*@DPngF4JEOdT#F>TJwRoWz)#Hnm%X}C{tzZ#gj`tv^(hY+sk-QkGA$>b
zIUDA}Uv%9z%%ZD&q^x&<@9z`lR}d4FJ5~shPE2?bzeth>9ll{`3dlcY93Q3cR7Cpm
zt5NG)!>!}gWQ;ijkvF>|a<-E}Wo1?_5FJeVP&chaQ(1g{EO;NwccQ3;rdk^`_%y5H
zK<`pLcz5g=GICMbV%X_)1u>Wnl9YQLd_i@sGvo*d@)X8nm+g$rw{ViQ4dmN;8z19q
zB3LG4ev8-oCg`$rk$?o+8G!EJ}F@HPT}Jmb<)rLl8#
zBvhhUNBKTDKd-t~cLdM$sc$P9JwoV16yYvlvv_w|T$%hXz_I>Bl35;y+*>j~t9n?viOQ$E%b(*CkPf_$PD5oav63=c)eHMk?s(|L49vU}6D?d1N`fkaI
zw91Q8k4S*Kr$Mh}lokIxy8)9PC_M0f6D}%i)DA}?R#@UGnCI{;uouv}0a35YDp~D3
zMhBnPVNRR_-PDJ+sK{U9RZ7x%CXt{xEcA&`l9E9~Zqu~K##X%GAYRE9i*$Qk?*?a*
zV#llQ2j-!w^t$bt1v-buqLF9~Tu2mWy(|=O4_#8X@H9!vMKqvQOHYDL87KG9T}Sxx
zL+^i)l9Y_VaHG6jxgGl_r=v%Ml+=+`BTJ76mVEd2_kvV9L5RNl(*KErn|SY6j5Q{7
zu`*kP2v;(bg0ng~jMg!-Nva@-=Y_NbZYGB24_dWd3#0nn$kEj@alec)SbMAr_JYjJ
z(ly&c9oqP|nmote<)%_6E`PZ3i(pkfk}E4qbhxdB!OOD}?E^V&J9I$Ps6`BuwnnL$^8~>{d2AMd^kwh4WEat-
z-b?^m>5dBfzteQ^h$IjnjWi$dng03Q9Y#Ktox*Nh97tQ)9)~KET$qey+uMUP2%8%{
zR7t}HB%s8qcCaE!e8BEGinD_Y8<|&
z12^44lw|5Kz>3wTP+a}^%avz2crc_b8g*$gEu)RZbhvRjv{RnWFV8cB-Jw*PRbHv
zm_dBT1UMdo^bjty7B4!{pkAQ)6z{G`5vEUg{$)5k(Qi(w+omrqi|6~5`e&~+KAbA9
z&oF!6e#(^2!VH=BCqF+T!%|SB0G0cn{|y~(ST@6>?s~?{KG*8Aq2)A=yRae{Ngv~S
z?JW;(W`7$*tcmPkTMoSl`O`E}6qy*3w-<21zP@-q12nZ#(V)%Ko;Pm97QcJ6xBa+m
zrV^PC)E6Bp`eP)eBG(vjw|xkbuOdrV&*vInePd-d2m<>FTvy_;{%3UR-QV<&RZf5uW%n~P_*D-hbU_WL&+Jr^wv&vdKbMDD(a?&%=T
z+i~JWU+kKpZqw{G}7c
zq}GFsYhM{qd75~)skCtW5T5t>?BYa7WExxd6oMB>>lp{Hvch@dcyeRG{&}Hj3`8<4
zg4mkXVJUr(D9V4M+Q*iQfHjgO^jo5F|aO*+0xm%3(1*C)k6cEVD
zk&SuzdYXNe__z)w+a)z{O%`#9|xI`Lo!tgR?OJcet>mtgaZb8{U;WtO=3jrh=cJcxr*{&nOxv_jVS#YtYA3a
zPFj}22SD0PNAdwuOtv)Hv%d_UZYyY{U_Gy151wR;DR#M^@WWj~{z$+9Yqy4zb6e!p
zfdFG-w}o@|LC9RE4eGX3FA{e?+lU!fV*dG>S;B}6*kL|+?LqPdZc9P}uejUTJ*hA7e^s8?lvgqlC3N*}+S+H$oE8X|S$#}i3gK6+J8n{6DU+R>
z^FGDEpWuei6N&{#1Z7t~dN5CXQc+}WzC{VCy`jq43yeel`CagzEwB4DTftz&z3B4<
zWj|a~E65hIyO(dz698}M#)}Sm;g^2brYoLHcF#A>fg#w2f?95<;>x|Px99%xlXhj`
zu(ShkOQ~DTj1J~G>?-(n*qW<$*AlrxQT7P0eOAnI>{qG}Td@3kuK1xMZ
z#QfHL_0!HtU(QjPo&l?vL1D|wQ4XCSNwQdpO?P=G{N}p)3bKY01;X3!E|smoO(mKR
z@}y+4M;yOSOT?)yd+XsMc)(`_kg1}Z*c^GJqdlFJsfOG894f`p^@aQwD!~=ilr3w9
z>dM#Wi^dGiKvpZ$%+aS$Ow$l3I#FNuV9%@qgkca&Y>AAYSZaGGMnDJ$$vq;Pdz=X#*&o
zHXj3`F5mZR`Jg!MTfsOT*MsO5KZ8eME|ZPGArTA4PrD<3*f4?h|T5xAY!;4zj=+dc0HjhW59}cnJsvg
z+4&Ox`-m=s*VSfZ5m|g-((j%0?F4z!YMM?vF$yxFkYv;zf&EWAnMK|FFHh4aK~jR7
z6Rq@WF7~J6_OZ+Av93kW@8c+Q-f&8%*lCQRsWOQccAy23b;!)^zxlqrEV&4g((nuEQ6PH+sMofl6u`!87pT=TiNL3hhSV+mzw1zhjs5$MDkorx3An1Ml*tT6kar}8~^LC*4vvE@!gt>j7rI-2-
zO^s3Dn*O(*DG`CLNRT7PYZV#JL4#C}!o{=JPpi`(dU|)rzpY4%i>tDQeN%$LR!LXj
z#LVW-%utQ4#1s%bJIKM%q!IdexcFTK<&0|kfg^ZkJ3}u0O-$@CI<`5|G(h}{VOn8n
zvOPNQtEThRaya>K3bA)NKL2yg%n&Ule})5ys)}LJ!lK|CadHWxA~+HVC54!!`5Qs5
zP2Yl`L;6WPJBJoir5yPRKqi3q>qUe#misE&DHd7h8=_MC!1(wGHif&IaNDpfLI)AV
zw>MZ7CmNOC<5KB~HdXwoy~*c4xKJ+{cos*&)tC56pgzMp+997r;2dHRWNxZbUh*n0-oK=J9JXy{4sSrhaq8C;@QJ^(SEJdlO@7TCt?**sWND$c*9-vJ
zh6BpC+-L9u2U&1=<~RaKF`7(t-C5c$sk@Fd0fYOTNd+_`$S{rF}sJxeGLx@Wuj
zP7`n!L)GAHq3X%-^Pyv}%$LA5YAG;`@2^Q?>
zfbDoj;HM<`YN+=rU0|nKB4j-qn9Gq}e5J_`t6PH~Rq@?6#QHy4fK`&!lTkd*s5V9S
zhC#!Q_EAUX2G{~z1Hry#n<@_4iRX`}B#}`N$@O$&I*QOTOktC(X(Z!(w3yB0QpXcab
zUUC3-Dv~DpMJoE4_dLZvsz~6BBZe}1Y7nwya3@JI2q854&wS+PbmGh=%2H~%Y40;%^qk@6%JdNaZvdJ7+
zrr;>Kt;5oZ?Q(crP~!p-W#HBoOPY+VZE4zXTHQ{MNoeQMdrhQM6ll7jghcKrT(yn+
zH2f<@VsA<5T|z`qu_Uwh=dZrbJ8m&p$Je*emjg55^^MncASyF;&Lzq~&Tg1G_|V~R
zyCIYh0{o>HNqAL*)-#Q#HH{2;N{mX@n9JRL7C-2%>E?hyvSmGI5Tld0B
zay~pKTRG#$=-01R){uB1errv~LChu2?}ngE1`!#}P%B!Re$#jRG>P&3$N74cP))Mqj*7F|
zuBF-V_1s+X;tiWKu%`Jr(?Z{~LuN+b8$V%b2r%XqqXcz<}hPz>gtbesIh&Kwf2v9*B7tpr`Kh+?%G#AL{aZ
z4t)96b*lMT_>D)2=4(Z;@C@=f4Cc~U~d9j0E;ff$%?FF(mO
zc?nWZcC^bD4@l4aK$y~^
z@I1oVwb2^jRIn!V*W)UUhbNa}C&Dgk(~0mC{$P)U
zP!BYsI_p7~f^ZDq%g|kj%Tk&3>Z}B3d~g$$JMH4&;L2$!tU_WX+!xPEVMnRrvw)on
z(06E~LlY9I^()M!;E~DRN1-?MAmU>Jf?i7O;LSYyp8PD`p$Wio_{`I(=)i7=y9KyI
zmVuAW*lXvbtluS7y!E?$`OWUJVPNG>r)-B46PQ#W`Wg=FYTAwKZAPPyMAen0Va@P2b(jC27hr*~N)G10x0)qwo{E6T
zZ3CoerW9wN$Q_g+)BM;`Vv8PhLRJ`vp_rNk)iOt~;X#>mS@-?Hoz0Qr9-*SwSRsCU
zbyyWDZ#De~C3TRv-r7lLLjd&Bt$QQsQyd%U8;F9PnqWmvjdMKiO#(^OlM1~%Y*Dcw
zIE=rhBaT0_1Gs`m=z~yF;jjCE@RfvuJrz2N3@}X4G05yFdA_SF@*9FU#y
z6Le%iw*c-%nOepYZ%5iic5&0qxg{F~MgWw#22T2eW6D;E`xAw1mhwxd6yV65&QHLu
zyd1m)W{;&6CPO|`3i$@7I7Q2Ci|o^W;OD|V95?FFFtmT@so-fcrZ+IDOj1H~4aRic
zRvKq#*j7miP!xnP-kwC57ZK_XA(yhx|Og`i=H!oRp!(s
zb836&J@+Tn(A6Z;md?nHdT?Tt#M`$=ONy#rarq_F2m50)y((hn#M)Cc
z>v#Tw*j#Kv)^jdiA!DJov$`*zZoG}YUo3|hO$^#Q0plmyM$^~USJYGnE&z{toM700S%~Tkw3~mPW`)J@|<(
zsg740c{0YnlK5Zmu{gQg_>{tJzs+^qZ#PKp?gtQ8IQ!^pc-7|%qM;RCzfBSY{MEeA
zQS3@`CkQK?oq7MD%V7H8?FW!3DV98IhPN{%JIE%5%5YblO~%oCkmo~2zAu7jUds3v
zE2CIDlAFI%w#Qu;`8Ru2C(oI`H(PBWyuAc=DJcQp*O~jg#V+c%F#3z$GQQ%Dovd#D
zp=49-s^faJV&I#8gz4}PCKcc$^>N|R9}lBeh4)j?rr?{o7k~J%C=m79KDXTbU#K^M
z6h&6*%aH>;49XD%Wl4K9W?pv=!kb7E;MwOaE=;%!Kh^tlGuAm!KIc%F!s3z*SE9;b
z!;$mDF$5wqMn&&DeBGJ8nsDGO_{ac!y6sI_@_qCW>vRB)_u-+G9nvB(=ZuXX+)?_4>r
zM3L+jjztw*?^2)FBlcob+vH1CgS_-;XqR;8*I^XAF)S1bqE2+_qD6SKV}VLljs}W*
zez{>*Yv7ClztYy|5dC9+18lb~zyZSMIuwt?D<=odR*#VlIS=@ZMG!<>eqi$Jq@p64
z3u?O^@D}(B_#<$JFAMS4fnCdm?yW`>ZsVoBWe6o8CUcq}L;4EP7d{{MUXQwt^p7gB
zVIU&;uA7g@hJAE#nHa;H&oe_c=Cd~IwLnnzfOhl$ibjjwDe6YCT7ot7OybNtqAy}>
zhR7=!thDSNpp|dxz?QIG^%q%5%P%osK=L4dRmgj6+Rm@z735@YLH)0LdLb#6gcOni
z0h-0NnI#xuY5rM9#-{=?9Hn|~g{}TP2d|vQRUdF1Y@A^Hz45vXlv-mJQ;I9dls)9H
zmp$LYM$l2E)P|#a3$9}2%p@p%LWxGh+%sOpZOv$>L{q*+z?DpliY4hqBaKL;TE*>&
zWtfpGP}5Q{zU&nxBx5>4h31B#Xp;(y%|oEhh&AL_RJ{AQ{2I0I>fWAGg&bex!8VfZ
zizLyC7=l;}cyQ-Jz5pp1o}sI)?X97W%%h$)_wTbdrIv*Tr_vZth7WT<{W~(7~FB
zuz@gt#(=*)y@3N1W*3o{T$b*!=sMk}&@7=4@LZ}wNCsd@iV1AJpK2T
zArn4{P(i-*;Wf_Ps(;1LG5kZr2t@YowEAHu>R^>Ixw;plyzrA{W
z0I;a*z#Eocu=l>7EnH1P2TdNR!q+KoUS9Wm2A%1>{+~ZM&`T!r9PzPX*IQR|G
zMljIDH*GbNZd7&YLXTh&8pJg21*T`J1p{1aU})~3bjkp&oZRbhil
ziQ5wvmh2}O;*yyg-5&C1qoZZav~mzgGxRXdrLLMu-^&A~)7RKQ=`?~|UFR^#!i1#X
zz_?Pos@p0cMQcVISLyyzH2%`sD}+%P!A`8#*Is(}#IT$OZE#}3BP>R2{rLxno~~+)
zx%F*aDB}v5SMX#VsTAH
zXV5TU;(Oco*J=Jixc(IL=qDq)s*ZWJ!-?#Vs#*~ph-3n4#-)+DO7MRPZJ{U%sO+XR
zurj~$K*Thm%4EZLj6_{NPBTP#G0cVfCo&b)UHf7CwgcynK>-I?XuMK)B-h|jzQsWC
z7}BC}OGg~a+4J-Bh@6&NRnXBV(c4V(^%0D1;Y1=JqRc3vQ1DjygrBA8&M@dVKINyc
z?eDVx3wA1kAC`~2a5LYZA^k@%7QNn*FO=yIAz1S;2NnEM1}iZZVsUY
zws;u#CAY@*qZ3Cy`JRVn0lKynh8s=P3^Ji1-zGmYYI+H%%mpr1OA=^j(Lm5epYsEd?oEEOQD~Ln~dEM-s<|@Er$>MZwCD**_B*1Jy
zOt*e7s_n!$8Du3S{8IVSYad5qC2{<27*BT+%ZP^!v4%wF=a7wsE72)}F|ZhQf&gP280}xP`3Gyw;>}GY0pd)Koa(zE?G
zE&4TY3~bo4^EXPseWWFuM`hahP!eQ4)ZWwVriDtpGO1#+-LlKPjh!B+Up_OPfnKMR
z${zuO3InI&KivS+4G@Zw%;-|vP0_f@sETISHUvwS+eENJjtx)pbqxWEObpq+d;981
zQ6ff)q7wa)YvKpZx{TPRy6lc(V^9-XPhCogJLLqe&Xzslj6NR+|x
z|1jeieC^*3s%zR$e2R^cPMl>OIdgz2bh3dvxXfaz14l8JZFVVLc*FFWX*QK13_y$-
z9LAX?YR(OBnFAx5dUHt8$}a{Q_BiY|-YXGLPHO%_18XNPXHk7Z3!YywDRiFndVzK*
z1&2nBi&gxBG5!`Kz!)4X^TzlG3ynx`trNeq7_9oLWxvg}+3;Nl8b*FPd{yICqs#(P
znOGD2oe4bMWof{0h_>DqidhK758@C6t0c3>wokV8pH)6192ksM3MUwgDQoFpqxoo<
zZbSV^icH9TOp%S!zZeyZhPKNBklxlNMn-p8@`-Ij)ytap?cQkqOFd(98n(I)kzR^w
zAsg_e-o^vC>i}de2%;PBavt1#bgeevxAn;F_!$z$tqE>RY9`agVtlt!%q^CfMt`E+
zf@>OA*d|V2JzulHB;*K_70qWw(boW#a`qXgj%kKyJZe+FpGvI2c3&%EjpKaguYnH~
zTr97}5He&l@tyO1)n-BA+i7Ynby9?!GP%y3lmvC+ESX3Chha_(=E2WApo(%F(Gv&J
zd@6p`Cu`fr2Vr5#d%a1mC%k{>G$F?~{P)^t0BLR_sCM9k!=>yMvge=LVQz#8s
zBTtN4UH_?(6qG{Q4-MY=457^LOm$gmzOo9Aa{JcW(jhuhaiE9623*XRm9(OX;~!tw4LHD--pS9-1s
zighuMh5`;*tdP!2rw=?m`gkpGeN#%A$kr9rKmj=IvD7MUFcOJmMSz`w+pfM!`*B{Mj^Vv1T^Zkc
z&Yhl+NRKxoQY=MV`({rf&0&F*si`BNWn{*snpoGy24xJhn(icy5$_bDU4gD0+kX_O
z0q?zj69s7%8PUpz|CiQAg{6*;<_z3o^7t%^kL=-|4B|h(ok>7m&pP8D>|{han>A
zYuz{lWyx`CJ?}vSW}F&`XP-Y$0D?|UB2)wwh2I)3ms-i^VLzKT7Ef~$%CSW$Q|Nc?
z+edl^8$R_J@935KYPEJEbR^fAi;mp$1+D@+?BW{-Ac*H>o7^6!3L}PMy
zBAwTfxBF3vU=jKUI^DNozKmpfkm=%Lap=?psB^8y)2H=mXwu4OsC|VPVqMF%DB9Q9
zn=MVPRqq>dE~N7?ZJDDc639~W&Ks=ZU~9;DRUUfaq$3fAyYh#+Y>c@_l#UMlh(KwP
zvd0%P;E!2`kiAByx(AbRD3*su{y%hmV_c?P*LQZ4?Kz@qOIgf*WLV-i>-K$!Dg@v!XLdhU{e+MX*?jztMg}mV;l}%l|2T<~g1}0GNtxo*
zg-pciGMgIF8dZd8-M@W!LXr=5X2upVr3p5Lvr}04G0Yh^7+bcX`!(jM0%lf%E3NfW9Q5^yaF&K)U7k$z5%SCQtn3>H
zO54Fqex~ZR)7Iyd=@%QpOY2RP4+GKZUX
zrO++th@=B~2FYIW=e5ywsSz~CvW=SwK^wyAV{v_T>2`~(H(%4wb=k;kmq4w&v8RDI
zKckq-zn_C>&_VU5Ff`{CfkP?xjo4_a^89Y?52VU>Akc#y2*M%Q23h@<^>?EDW(1Jt
zlRJ*$;7q~;9Q}*x29U7j@A_0AaIf*wNzU8MYNFHd$Yon+d$NMyoX@kIhPV
zQ=89Q2PoLWtS7%9hKJWTR`omq^^7(SXW`#KWdJ#p^weQwF+%hhtpZd|U@^Lqg{nmK
z!<+8%s|`}v->~-g_k>AbA1eyG`h}pmKXhYGds%xG1~_W^t^`pc7GIdMbGtHI!Zc{u
znNmaW;`vYFof8T?t=kUAKQWyiWRhlo4(3>`hDAffMpL-
z+^5FG(_aaRUKHWkqjmRrPgKxI#^N9D35fceS%}8lo6pD8t>B3rQd*0fU&nyPG2(f{
zwzKhIQ%x*{cK|sj;d1E@Bu4i+ZOU*@aA2JUc8Z`!wFqN9mn={?WU(RarjlFN%ga1M
zbZlOC1s1zwsQ$4P>2snCAr}Gif><%nU$n=7SOs<(Y+&6rj0a`-&;SQ`95Um}!0%jM
zv^o&G=F{m^)FuHA#_qqP(jZHKrO|p=htP|`?O+mg!=?AyH-zVSbV`>8@pySG;^#t0
zpNrPd1b?5ig>L=;Q}z;
zY|oPTX-TLNRk+a|`vmHXs}LDX_uZnn98Z&(jfsk}z{5{3qL@~ZN6@ln_5osz_Y^I@
zl=zrSJHXWaRISAcJeq+w=76F3jIbNap6~aA2PL526V={<;&g%n+Dg3vvCx)79Oy_&
zwD37HDaJy6aD5p9b*^Tdye~-OZ}5$+FvRMEQ$AP#VONlgJTCVsw_5xqFK`^9W3CvU
z*u{ps#nDAdha0wP3_^O3KCk1FB0IcOdFsdNrg^LDbQoDW(X#1nyHcREtgKjaiV0eC
zb%Gmr7IDy&tFF7Kb*2Aq-?;Qx54!ai3rJqQnGtqXDUD)^XVLJ1<%rE>DHB&$3Oo-;
z)tVG6u@#(%&GJ~zV6(T*5J0V6P%Iz7_cKSM#zKi^B4mNs{k#ttg==qQRHh3>oDm~P
z<^;h(oX&W<5JsIBLyt!e?p86~X9^43>q?#OR(!bo^SO(6ka_Fa4vV^muq2uQAUEzh`T7NvIWUHT=s@>eCwWw`;*qVm*~K`@c>TC-o!TV|C0Q%
z2muC@T-y^$YYPhxh;ag7dDQRX&C@?5PsYcs^pmcx3eXutD{@AfVL@variwC$Zt4
zzK~|tB4tEJW_&r?Wiepaw73ouie*kT_;ccMNlpOm4PJ>{2_aH-{jxQw;6w@TtcUO1
z0r5-2K^+jk^mD0N`Mph8v2r!hi4cJceY$PzTY2};=QCSw0Sh{@0_3x4|p
zihWE%MZPzyIAh}wyJ_pb114TpS9E^XX`-cMu{IWB7k|TIWHX*>fjBF%jM1sTMh8bi
zlrXpLy=Ex{eE7watNj6YQy>zxayX8CW|+!GyQEsE8V_UTSOji&Hng?;097^3xmJo9
zh7w^8#0KlzH=Rm8A|E9Ox@^gnYc6c@g85y-7aJIYD(r*oDr-GjnL^s@I~pLnc*;4Q
z39`uH7FmH2y|X*=WfO6T5j=9c@_yN*KE3fk*c}wIYM0lxf4DTMY--JUC60dL@T{yO
zcHNo-_uQr3?#RhmZ4{f;V(&kzq9d-MLv)~S0bZ=?>_dc+?gPc!(VriV`h2mQadY@V
z0u4b*gv6%Po*KV&-e1+gpxXw%rp?)W#9N}MNy_E%5m>4rphx^Rt;OuV|64^*8&FG;
z1>E%^fSx1k|5JnN1Mmmcgzk*IEE?l*=V^=IGIH8%z(dy?l>t~uBU}A2vM;dL=lm+U
zO2Taydl@W$7=?FFx?Gip)eM)d1j(0`mjQ?)O=BW#39}JCFjwVa6}tKnYRS0dIeCQm1=!b2&Q~S%Q%0P9*-IOJfn|6lCU4XdU8M3P>D+
z%CdNLdjy$A0U^DWi&LGTZU6C46Ee2ujKeR!)>p>_pJYi{a#R3$6DF
zs*w{#@$wV&+Q`8Skhq%ip2?6W@BDMHd37QBs|jXgQz0J?Fc+q4UIBO|_u`o5`47wZ
z`442*yQd4h6c->a#RIVnj#_Jg(Gx`kHc~o<4=fI#gphuR&Uayb_gdh)Dkb2IU0kjM
zWBv54jpE-C%@|@Q70CAOxB(Ye3Q(8D=r*PgnbPzhp1(_mKz-=4B1VWPW0IT`A<2Cj
zZ*iqoljSo_>pP60$4^63*o`8|TxwGS^CSSNvU)v7P!w_L?&S7=50;d5!aAnS90awb
zv@vpNJ_TuS-|g?YX;D~@kLe$&f}0!9s$qZ@*I>7$-wAG#6Q0QGy>!jhe~S
z+$~cu+Op~iGc(ZjCT(bw1zy>J$%_mxspImXVMA2?n8wQSU)!?m*##!Dw8iAo`*Yb7
zSscx{P;g<6xVduZv03an{~vxrsXXwE(i5zDFHZE24gsf?U;hW%t}0iFu*
zCw%BQ0VV#amiDTxz%|oXxsOye!(+r5_|6pEyWXq%0C?6riE
ziDwS6xpz_oa^%u};K7P$FJA{x^yAANxzYREZisX%W>>H4Q9AMb9=IIv(awSU3(n#^
z_+KqA{YNVmFx<5o3KILY{T(OL79VTH#{}ew{cd&Ot
zz@93XT!xDpq!h=X#MO^JokH{~N@@pe;-~O;n|6KNTv#PcWYeZ48z^yms@T1ig
z@w!-~=KQq1zA%hR6xlV9%o&JG@gXqb_L2A68i_o$GKwOtj;QwyF?Ful#{|NIQ~ZDR
zKlspnHD;k!m6a(ov73N}_)b1T&W3%5*^Qwhha#xNSqR7k4FTzdPKem9n~52?xOAT@+ygd(xAPY@GgK-A^Vd3uD(vP#@}
z30NPq-pRX*SojnKy35N8d;>wK5eJQ!h_XIPDlhlSB1W9>@hwkd!sC1f!uDaGo0{O3
zB{^M{OTKKM$Wm;&>TJ0#M?UBCK(R^F6ih0Z7Tn?pcfWeU9p;*Nmd1x(1lgNZ2Av(}$n*U7P`bIe
zX^+RN0C?d1-?|T1p7s3f&5fRRv@ic}1RGNcPXdk1trw|duBrs4kfzs;%coTWkE!IC14qHhqkj&>{3PIFS&7Aq<37rQL0Nql_n?o
zyr`NCn^ylfy|AT|q?Vvn_tGZ@9ph~`x5)d(_!!cj$Xog?K006faaFb$6GUr}YIX~e
zwUJU?h6jXC8yBRz3YW8`8QVTYv=LmBq=H{1LzE~8;s3OY?K8RqWuJFRdvTKCEo!Qo
z8M2@8yG;MZ?;AILFjrZ*XBnM-mZ(}r8px>NNc>X
zu3H!BzuH4hkXh}vIM?7_mT6;zaBHd%K-EDo*v}Ark4c)
zQr$2|%U4VT0kqHR8}D&`eJLlY$lQG=>(@?=?iPx{LXS;Ko5vroxkSGREpB)}cwaX6
zw0=kg?P&S=!1Q>y;3($VLKBr%)m9INa>!lfwycHY%`)bY#!;(24w!aab`0gs9nWaJ
zE+2z&*64-yd>tYHvJ5d~y$68XSLoaW9yT^x)YhBJ3qxiRnxFzgkY0}+7Vs1k>Ty;F
znENw1c-`d3x&vJJ`$N{*qWd9;yxx$T$nCRyR;_+SkLL~YH-z1?n&A)4>7Gw0zW3Cx
zLqS3$j*5BDhZ~zYeoYFMYO(O`h2K1U7Dhdemlm3?EEd}64*NTG)av&tnml&qksUh-
zL**x%8+TgbE(SlwNCCD>jwDXfoX~}xzN(|U)=%T;t8TM%l+|%2mROp@`=zA{hR=Pu
zS=%~n&$mk)H!GjEr9;*}ESb}440KE5#%!Tl&5nHgjo_wm3y|nP%)4D6(q@X4|M7$Q
z{(9;x9hvOkv7UJqkf*`aQf{0qjWr$$@2mh@>%0(xq$!D4W|GyZ-{jGAZ!>
z&mL~>s)$AgVox*@wXdK@pTod{i@Cpn-QvFl(Xfiw($vs_QXRx>bpLSA;gS-kBrql-
zPgAi|aPPS@uXraQt$yqBMJXjbi7}D#CRi4K>x=;T>T4JTjRpZJ@uChE!F12-COn8X
za@eN;W%keZq2kYkN(rR5KDZ1<@dc>DG{>2RwG9I_$eQG|HhCd=Up+};35B9^9p$t9
zrY;7ub<~{Zp0Qf*khRD*na(9tW5HN4^LB)uO5O)e{d`5TwgpJMan_wyG^gCFH-w%D
z|J1!nWbACy9v~vV6DC_UD%wB=c>dBy*^e-`8O&WD;xHSN!nkdevp?J86I?osD
z-SOgWIX(M>$c;MaLgGIm9iVMt{B2vaHdorP3V#De6olb1$
zk7e&a)`O^BGFr3U_-|vBJ`E(cwNA~4JZ!viOj&doUbW^t_HqeDlptE32(DwqZesvu
z3Z}DZC58m==?e23Cm^O1n0f4PulEKt(w=tW_vgL$-n?9|^223Sz4wXwM|=zMS-nQaFinfS68;e<68B9u{`a7Y&@rx#&U$@+!l)
zGW)TVFVOmvW7(=;)7Fx_)zW1y9Ys$@yRdI&L}rkU^}lfuwv6d1#6PG5>(wx|Dj|D)
zD0By)+6+QgOGyp*O=XN`u>k|%`Y#}D($IDp$X*q%FGtqCAm%*AfuCw|ZtJiFOMsW@
zo-V3%`mNH}ceMR0k75_xL2nMT-1Bw|VS4D}
zFFk}2Ao
z(MPJO_f0q=`T|VHHcH!EL45LIWjNCMb*LJTos9>DBR!*5#Fd(P9K<}|0{FbAXR&;R
zw*Ukpy21+~hps){HJcIyQn5Rp+`-pMA^xJ6kg@d*mwu|(vo)jsjQ#Bm6ixcvpqHLk
zC;9SgoUsCR@L+!Ap4;koSlzOK$Z?B-vU9I8H)F<@M%P>8la=#xwyJl)j8DN?!-icY
zbkzoX`8W~1FvGB56j8f&(|h$MzB|qKZ$zf}Z#bn`|9*t;2;lUUX?Q-Tt33_BIP?ef
zm(|}q31rgL+0T7itilF7urRe3e9JAZHx^u*3GdxFEnV*$jmR(1UrKEygR!Ha!(wv=
zPhMj0<_y>!V0_HT$U1^^-+Vpm?b*XVvdzR6ETx
zxkA*_ZtIn4dU!K3E@FbGY)R>-Jpi>cvv!DF
z$u~G*IVbdYspuIiXDw&Oj$`{n|CxR8_BCzw)z~2z^s9u3p|{38V#9rC5_v|q^3G%yfuZ{7uw#s2tM*5cZdxm(+YpYg$(`_3xY&3eay@1#sFN4iZ`=jyZwDTwHp
z+NeqUNI6LILlCENu3;X`(GjWe;gaiNn|rYT*PW$^Q%@e=eWtfKa{*;Zm=KlPfy>^n
z+AvV&vb!WSpDuWBmS|kEbJD)XPH;c-uM{C
zhCJoJHhqHpfE~g%efW0NZ~ZW2Hd-d?4>yWRd`rw~CXB*vn}{4EJFu+tx?q!>tRv>p
z_4@c3SD?`d>UKdX6XDl!nRLjQBewyr^F7qPujJJ{ViG#=x&iM7%{rZwKgAEUO*luW
zM2a8q@w>QJ=rM2sp|bw+eiwXWuZL2c7;C&u{m%>?%txhB!YFHfI7Npcu8A|`p5k*EIY!fp{@
zW=I~t+}*WxIzKA=nT!!zGz5Q!CnMsl@>_^pB>3vMV`F_dPxhq1@FM-EXzI=5j1afxYx7)UG
zb|wXre&j0{VI
zJ8Hu&w^|h2YIAJ^&B!1{Cp@&Rws8#`Du=m#t599ih5cG`BNP&!5W&Z!l_24gC5Rkf
zj^E4DiR6o^N)@4cD~OE!5tqYBw%Z?N??u`iIFer$#EuazG^xaHs$r<-fl;njVU;CA
zd<57x#f2h@fy~8@(_uklDkE1MCQD4#B<2j-JrI!`TmQD!diPb~=6)s!j9Pdm$dsSIQbhD5`sNQ=r9WARbFg4DTK-26z|6XojUhEr7Xtss#Z7tXRUH1cV_qY$D*o+@RB
zY}mVX4yUkPs|AvK#{Mm}&^Lrvx5R<#K1(cW(cMT+=3oI+^IVjvoiLe
zjLOMfS~{U+i+%-(K#!~2L=S%syl-#6(~xeO?9?AeWwzOyvV{Oi>6B!RbioI)qBD3&
z1W^bd=4_UF8zCU?mBr9YA5WJtsLe
z{biSrtT!Yvmlh2sbMS@#Zf7J-s1K*X*Yazj|xLe;3I(_Hk#HxG5{s4I*oTDztYEo{pSq)7J$eGEBB7}M8pdg
z7NZn4^nUm5{A1$b6fg`;j`#D^X-QJXy&bbv8Rl7f<Am^*J=EA=0$I<&m-4y+-$Xzk7^yov=
z3DIUzVNy@j_*UM-sLChC(=dHcFy1wMt23*uxzD6_aevM|MZJBVm&FPvZ*IF?m#URa#?h8OErZ=ZouEj`Te3m~
zf4{FyzjQeyWKpH{h-Vd%H1u$n-an4Yhzs0p2^7gV-PMp$HMgUG_!IYpJcwBEIyZ+C
zYp!a>&kaEc{!cAVp`vXjaJ%yQ08Tp;6M82l9exo0h_Eb
z)gJO#bNy1<8M1Wq2g!u}y)tczUWxRCRQMv7dc$~dA9F$92Y-|f4Q7Owh=$$*1U2$m
z!ZI9gG?9~D0HmhOd;iL06rE9a=rn~0)&*C5J;fmT{2K?{`FZal)a5HlmW!)C^7baT
z^!!P|!MeGh+VL?VXq>)qDA4apGh_P&vpj`zOhFe&ApZ9!=KlVV(!EBFzQ
ztyK`y?N++=i9(f@rYH(-LYkKE8WA4%n>X1$P9e=I_}Md0yjHmFz4siET
zgW!*@1wNQC-!@@^@JwG+vG&Ir(>
z?Hrv@n;;tMugIA@*-7m_)XcI4*B%==e{svS4X?YU*1h8f({_Q^zz8zykJnSJTh~7Y
z-Sgh-{1ZN4?ixG}#j%kWyCtEK9{I$U35sKo)+)4N^uydo@(Boyzau^f-cj)>X0fJ@
zXM!h@z1n3)gB7?>=6EA0tE*0o%?*4%4T;I~+00d2c(4K@qo|&&)VeO5^+)pEEfx!H
zKPo=T=x4xZ(G;EL^KKJ;t8q&jDfe;lyu$dE=cQ>XN>RlLFX;yYOiaXJD!fRCC5jd|
z1$I;+GGEk3{kwL(+)jSYv@{b1Puk%f3Y|uETim1RIpe@cUO$izmT&6WmcS__|JLNr
zT-m(6F}fC=!lD*+dzIX*z~))Od3=?gGM1fL2Jx?;EYn|_LsOk0H3YnwFwSB(+~Gqv
zcaH5{b$pQMTIK1~)H%wAq&vWA9G=b~Pg@Axc$gF~5IAMSK9y;yVmVAA=)ld4%?*`S
zO2NG4Lyb1`C9C*anZE>Zl~N&AT>OLliyg6{l@Y?4Q}Em^-aCnhtUB=YJPifv7r|;m
zU>{~_9*Z+|k(18-%#szC;;sr?MxxceTpP!a+RkAMwf#XOjcBn8iZo}RhDl09pbnOi
zzvWK)tS({3GB3eYDd(?Ts-Yu6$!WI)oj8tc;j6r1RfNruo}?ARd{4qZwwoPz)&HWz
zlGi6L;!A4E*ha0s`|^<6cQq6;DI@M*{KAvVOJ=83qP}@yWI?t1O