From 3338e0672ac8cb67c09484e14d88565279c12066 Mon Sep 17 00:00:00 2001 From: Boxiang Wang Date: Mon, 9 Dec 2024 15:19:08 -0800 Subject: [PATCH 001/128] Add fsdp2 strategy Signed-off-by: Boxiang Wang --- .../pytorch/strategies/fsdp2_strategy.py | 273 ++++++++++++++++++ .../pytorch/strategies/megatron_strategy.py | 2 +- 2 files changed, 274 insertions(+), 1 deletion(-) create mode 100644 nemo/lightning/pytorch/strategies/fsdp2_strategy.py diff --git a/nemo/lightning/pytorch/strategies/fsdp2_strategy.py b/nemo/lightning/pytorch/strategies/fsdp2_strategy.py new file mode 100644 index 000000000000..1f6ad2dd66a9 --- /dev/null +++ b/nemo/lightning/pytorch/strategies/fsdp2_strategy.py @@ -0,0 +1,273 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +from collections import OrderedDict +from pathlib import Path +from typing import Any, Dict, Optional, Union + +import lightning.pytorch as pl +import torch +from lightning.fabric.plugins import CheckpointIO +from lightning.fabric.strategies.fsdp import _get_sharded_state_dict_context +from lightning.pytorch.strategies.model_parallel import ModelParallelStrategy as PLModelParallelStrategy +from lightning.pytorch.trainer.states import TrainerFn +from lightning.pytorch.utilities.types import STEP_OUTPUT +from megatron.core.transformer.transformer_layer import TransformerLayer +from torch.distributed.checkpoint.state_dict import ( # get_state_dict, + StateDictOptions, + get_optimizer_state_dict, + set_state_dict, +) +from torch.utils.data import DataLoader +from typing_extensions import override + +from nemo.lightning import io +from nemo.lightning.pytorch.strategies.utils import ( + ckpt_to_dir, + create_checkpoint_io, + fix_progress_bar, + init_model_parallel, + mcore_to_pyt_sharded_state_dict, + pyt_to_mcore_state_dict, + setup_data_sampler, + setup_parallel_ranks, +) + + +class FSDP2Strategy(PLModelParallelStrategy, io.IOMixin): + """Megatron plugin for Pytorch Lightning. + + This strategy implements FSDP 2 using PyTorch's native FSDP 2 methods. Comparing with + MegatronStrategy, FSDP2Strategy is designed to be more lightweight, with minimal + modifications over Lightning's ModelParallelStrategy which supports FSDP2 + TP + parallelization but preserves necessary features to be compatible with nemo and mcore. + By default, this strategy wraps FSDP2 per TransformerLayer. + + Note: + This strategy is designed to work with NVIDIA's Megatron-LM framework and requires + specific model implementations that are compatible with Megatron's parallelism techniques. + Note: + Due to the different optimizer structure (FSDP2 only uses torch native optimizers), + MegatronStrategy cannot resume training from checkpoints saved by FSDP2Strategy, and vice + versa. However, the model weights structure is made compatible, so switching strategy is + possible if users only need the weights not the optimizer states. (E.g. run pretrain with + megatron 4D parallelism and run SFT with FSDP2.) + """ + + def __init__( + self, + data_parallel_size: Union[Literal["auto"], int] = "auto", + tensor_parallel_size: Union[Literal["auto"], int] = "auto", + ckpt_load_optimizer: bool = True, + ckpt_save_optimizer: bool = True, + data_sampler=None, + **kwargs, + ): + super().__init__(data_parallel_size=data_parallel_size, tensor_parallel_size=tensor_parallel_size, **kwargs) + + self.data_sampler = data_sampler + self.ckpt_load_optimizer = ckpt_load_optimizer + self.ckpt_save_optimizer = ckpt_save_optimizer + + @override + def setup_environment(self) -> None: + setup_parallel_ranks(self) + super().setup_environment() + init_model_parallel(self.model) + + @override + def setup(self, trainer: pl.Trainer) -> None: + self.trainer = trainer + setup_data_sampler(self.trainer) + fix_progress_bar(trainer) + super().setup(trainer) + + def _get_loss_reduction(self, step_type: str): + for fn_name in [f"{step_type}_loss_reduction", "loss_reduction"]: + if hasattr(self.lightning_module, fn_name): + return getattr(self.lightning_module, fn_name) + return None + + def _step_proxy(self, step_type, batch, batch_idx=None): + method_name = f"{step_type}_step" + if self.model != self.lightning_module: + loss = self._forward_redirection(self.model, self.lightning_module, method_name, batch, batch_idx) + else: + loss = getattr(self.lightning_module, method_name)(batch, batch_idx) + + _loss_reduction = self._get_loss_reduction(step_type) + if _loss_reduction: + return _loss_reduction.forward(batch, loss) + return loss, {'avg': loss} + + @override + def training_step(self, batch, batch_idx=None) -> STEP_OUTPUT: + assert self.lightning_module is not None + assert self.model is not None + with self.precision_plugin.train_step_context(): + loss, reduced = self._step_proxy("training", batch, batch_idx) + + self.lightning_module.log( + 'global_step', + self.trainer.global_step, + prog_bar=True, + rank_zero_only=True, + batch_size=1, + ) + + self.lightning_module.log( + 'step', + self.trainer.global_step, + ) + self.lightning_module.log( + 'reduced_train_loss', reduced['avg'], prog_bar=True, rank_zero_only=True, batch_size=1 + ) + + # returns unreduced loss for backward + return loss + + @override + def validation_step(self, batch, batch_idx=None) -> Any: + assert self.lightning_module is not None + assert self.model is not None + with self.precision_plugin.val_step_context(): + loss, reduced = self._step_proxy("validation", batch, batch_idx) + self.lightning_module.log('val_loss', reduced['avg'], rank_zero_only=True, batch_size=1) + return loss + + @override + def test_step(self, batch, batch_idx=None) -> STEP_OUTPUT: + assert self.lightning_module is not None + assert self.model is not None + with self.precision_plugin.test_step_context(): + loss, reduced = self._step_proxy("test", batch, batch_idx) + self.lightning_module.log('test_loss', reduced['avg'], rank_zero_only=True, batch_size=1) + + return loss + + @override + def predict_step(self, batch, batch_idx=None) -> STEP_OUTPUT: + assert self.lightning_module is not None + assert self.model is not None + with self.precision_plugin.predict_step_context(): + loss, reduced = self._step_proxy("predict", batch, batch_idx) + return reduced + + @override + def process_dataloader(self, dataloader: DataLoader) -> DataLoader: + if self.data_sampler: + return self.data_sampler.transform_dataloader(dataloader) + + return dataloader + + @property + @override + def checkpoint_io(self) -> CheckpointIO: + if not self._checkpoint_io: + self._checkpoint_io = create_checkpoint_io() + + return self._checkpoint_io + + @checkpoint_io.setter + def checkpoint_io(self, io: CheckpointIO) -> None: + self._checkpoint_io = io + + @property + def current_epoch_step(self) -> int: + """ + Get the value of step within an epoch. + """ + return max( + self.trainer.fit_loop.epoch_loop.automatic_optimization.optim_progress.optimizer.step.current.completed, + self.trainer.fit_loop.epoch_loop.manual_optimization.optim_step_progress.current.completed, + ) + + @override + def remove_checkpoint(self, filepath: Union[str, Path]) -> None: + # Taken from MegatronStrategy + ckpt = ckpt_to_dir(filepath) + if self.is_global_zero: + if os.path.islink(ckpt): + os.unlink(ckpt) + else: + shutil.rmtree(ckpt) + + @override + def save_checkpoint( + self, checkpoint: Dict[str, Any], filepath: Union[str, Path], storage_options: Optional[Any] = None + ) -> None: + """Converts PyT checkpoints to MCore format and save using MCore dist ckpt library.""" + checkpoint["sharded_state_dict"] = pyt_to_mcore_state_dict(checkpoint.pop("state_dict")) + checkpoint["state_dict"] = OrderedDict([]) + + if "optimizer_states" in checkpoint and self.trainer.state.fn == TrainerFn.FITTING: + # Clear the optimizer states. This handles the case where ckpt_save_optimizer=False + # Ideally, the optimizer state dicts should not be generated in this case + checkpoint["optimizer_states"] = {} + + ## replace unsharded optimizer_states with sharded dict. + ## note that if trainer.save_checkpoint(path, save_weights_only=True) is called, + ## the checkpoint will contain only model weights. Optimizer states will be omitted. + if self.ckpt_save_optimizer: + checkpoint['optimizer'] = get_optimizer_state_dict(self.model, self.optimizers) + pyt_to_mcore_state_dict(checkpoint['optimizer']['state'], prefix="optimizer.state.") + + self.checkpoint_io.save_checkpoint(checkpoint, filepath, storage_options=storage_options) + + @override + def load_checkpoint(self, checkpoint_path: str | Path) -> Dict[str, Any]: + """PTL method which we override to integrate distributed checkpoints for FSDP models. + Different from MegatronStrategy, both model and optimizer states are restore within + this method. + + The logic here is slightly more complicated: + 1. Obtain PyT state dicts (sharded & unflattened) for model and optim -> torch::ShardedTensor + 2. Convert to MCore state dicts -> mcore::ShardedTensor + 3. Load from checkpoint using MCore dist ckpt API -> torch::Tensor + 4. Convert to PyT state dicts (sharded & unflattened) -> torch::ShardedTensor + 5. Load into model and optim using PyT dist ckpt API + 6. Return the loaded checkpoint for lightning to load other metadata + """ + path = Path(self.broadcast(checkpoint_path)) + torch.cuda.empty_cache() + + # TODO: the elegant way to load both state dicts. Need pytorch 2.3.1 + # msd, osd = get_state_dict(self.model, self.optimizers, options=StateDictOptions(cpu_offload=True)) + sharded_state_dict = {} + with _get_sharded_state_dict_context(self.model): + msd = self.model.state_dict() + pyt_to_mcore_state_dict(msd) + sharded_state_dict["sharded_state_dict"] = msd + + if self.ckpt_load_optimizer and self.trainer.state.fn == TrainerFn.FITTING: + osd = get_optimizer_state_dict(self.model, self.optimizers, options=StateDictOptions(cpu_offload=True)) + pyt_to_mcore_state_dict(osd['state'], prefix="optimizer.state.") + sharded_state_dict["optimizer"] = osd + + checkpoint = self.checkpoint_io.load_checkpoint(path, sharded_state_dict=sharded_state_dict) + mcore_to_pyt_sharded_state_dict(checkpoint['sharded_state_dict'], msd) + + if self.ckpt_load_optimizer and self.trainer.state.fn == TrainerFn.FITTING: + mcore_to_pyt_sharded_state_dict(checkpoint['optimizer']['state'], osd['state']) + + set_state_dict( + self.model, + self.optimizers if self.ckpt_load_optimizer else [], + model_state_dict=checkpoint['sharded_state_dict'], + optim_state_dict=checkpoint['optimizer'] if self.ckpt_load_optimizer else None, + ) + + return checkpoint diff --git a/nemo/lightning/pytorch/strategies/megatron_strategy.py b/nemo/lightning/pytorch/strategies/megatron_strategy.py index bf50b3093161..b43885b4e251 100644 --- a/nemo/lightning/pytorch/strategies/megatron_strategy.py +++ b/nemo/lightning/pytorch/strategies/megatron_strategy.py @@ -115,7 +115,7 @@ class MegatronStrategy(DDPStrategy, io.IOMixin): across GPU ranks. Defaults to 1. virtual_pipeline_model_parallel_size (Optional[int]): Interleaved pipeline parallelism used to improve performance by reducing the pipeline bubble. Defaults to None. - microbatch_group_size_per_vp_stage(Optional[int]): the number of micro-batches that are executed + microbatch_group_size_per_vp_stage (Optional[int]): the number of micro-batches that are executed at a time for a given virtual stage (both forward and backward). Defaults to None and convert to pipeline_parallel_size. which specifies a depth-first schedule. context_parallel_size (int): Splits network input along sequence dimension across GPU ranks. From 4c9b5df38d25e8c1656b561d542c2431595f0e16 Mon Sep 17 00:00:00 2001 From: BoxiangW Date: Mon, 9 Dec 2024 23:37:19 +0000 Subject: [PATCH 002/128] Apply isort and black reformatting Signed-off-by: BoxiangW --- nemo/lightning/pytorch/strategies/fsdp2_strategy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nemo/lightning/pytorch/strategies/fsdp2_strategy.py b/nemo/lightning/pytorch/strategies/fsdp2_strategy.py index 1f6ad2dd66a9..724e56f7cb0c 100644 --- a/nemo/lightning/pytorch/strategies/fsdp2_strategy.py +++ b/nemo/lightning/pytorch/strategies/fsdp2_strategy.py @@ -52,7 +52,7 @@ class FSDP2Strategy(PLModelParallelStrategy, io.IOMixin): This strategy implements FSDP 2 using PyTorch's native FSDP 2 methods. Comparing with MegatronStrategy, FSDP2Strategy is designed to be more lightweight, with minimal - modifications over Lightning's ModelParallelStrategy which supports FSDP2 + TP + modifications over Lightning's ModelParallelStrategy which supports FSDP2 + TP parallelization but preserves necessary features to be compatible with nemo and mcore. By default, this strategy wraps FSDP2 per TransformerLayer. From 11a4637360a8659039566f93a2158e66b07cf92e Mon Sep 17 00:00:00 2001 From: Boxiang Wang Date: Mon, 9 Dec 2024 16:01:02 -0800 Subject: [PATCH 003/128] Add imports Signed-off-by: Boxiang Wang --- nemo/lightning/pytorch/strategies/__init__.py | 2 ++ nemo/lightning/pytorch/strategies/fsdp2_strategy.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/nemo/lightning/pytorch/strategies/__init__.py b/nemo/lightning/pytorch/strategies/__init__.py index 9ef58bcc9023..cc7be27e505f 100644 --- a/nemo/lightning/pytorch/strategies/__init__.py +++ b/nemo/lightning/pytorch/strategies/__init__.py @@ -13,10 +13,12 @@ # limitations under the License. from nemo.lightning.pytorch.strategies.fsdp_strategy import FSDPStrategy +from nemo.lightning.pytorch.strategies.fsdp2_strategy import FSDP2Strategy from nemo.lightning.pytorch.strategies.megatron_strategy import MegatronStrategy __all__ = [ "FSDPStrategy", + "FSDP2Strategy", "MegatronStrategy", ] diff --git a/nemo/lightning/pytorch/strategies/fsdp2_strategy.py b/nemo/lightning/pytorch/strategies/fsdp2_strategy.py index 724e56f7cb0c..7e33ffa37849 100644 --- a/nemo/lightning/pytorch/strategies/fsdp2_strategy.py +++ b/nemo/lightning/pytorch/strategies/fsdp2_strategy.py @@ -16,7 +16,7 @@ import shutil from collections import OrderedDict from pathlib import Path -from typing import Any, Dict, Optional, Union +from typing import Any, Dict, Literal, Optional, Union import lightning.pytorch as pl import torch From 5971cf4522787fd3c41f9b295522bad89d3e87ff Mon Sep 17 00:00:00 2001 From: BoxiangW Date: Tue, 10 Dec 2024 00:02:09 +0000 Subject: [PATCH 004/128] Apply isort and black reformatting Signed-off-by: BoxiangW --- nemo/lightning/pytorch/strategies/__init__.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nemo/lightning/pytorch/strategies/__init__.py b/nemo/lightning/pytorch/strategies/__init__.py index cc7be27e505f..b01ff14a10cc 100644 --- a/nemo/lightning/pytorch/strategies/__init__.py +++ b/nemo/lightning/pytorch/strategies/__init__.py @@ -12,11 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from nemo.lightning.pytorch.strategies.fsdp_strategy import FSDPStrategy from nemo.lightning.pytorch.strategies.fsdp2_strategy import FSDP2Strategy +from nemo.lightning.pytorch.strategies.fsdp_strategy import FSDPStrategy from nemo.lightning.pytorch.strategies.megatron_strategy import MegatronStrategy - __all__ = [ "FSDPStrategy", "FSDP2Strategy", From 7c30f82b8bef5b6c9977358c95073b3c63573291 Mon Sep 17 00:00:00 2001 From: Boxiang Wang Date: Fri, 20 Dec 2024 16:18:49 -0800 Subject: [PATCH 005/128] Add init import Signed-off-by: Boxiang Wang --- nemo/lightning/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nemo/lightning/__init__.py b/nemo/lightning/__init__.py index e01a2d5e5765..f5ad7d7a5033 100644 --- a/nemo/lightning/__init__.py +++ b/nemo/lightning/__init__.py @@ -31,7 +31,7 @@ from nemo.lightning.pytorch.optim import LRSchedulerModule, MegatronOptimizerModule, OptimizerModule, lr_scheduler from nemo.lightning.pytorch.plugins import MegatronDataSampler, MegatronMixedPrecision from nemo.lightning.pytorch.plugins import data_sampler as _data_sampler -from nemo.lightning.pytorch.strategies import FSDPStrategy, MegatronStrategy +from nemo.lightning.pytorch.strategies import FSDPStrategy, FSDP2Strategy, MegatronStrategy from nemo.lightning.pytorch.strategies.utils import RestoreConfig from nemo.lightning.pytorch.trainer import Trainer, configure_no_restart_validation_training_loop from nemo.lightning.resume import AutoResume @@ -60,6 +60,7 @@ def _is_slurm_interactive_mode(): "MegatronMixedPrecision", "MegatronOptimizerModule", "FSDPStrategy", + "FSDP2Strategy", "RestoreConfig", "lr_scheduler", "NeMoLogger", From ef11d67f8495c9309a51beb68887dda4ef132dd6 Mon Sep 17 00:00:00 2001 From: BoxiangW Date: Sat, 21 Dec 2024 00:19:51 +0000 Subject: [PATCH 006/128] Apply isort and black reformatting Signed-off-by: BoxiangW --- nemo/lightning/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nemo/lightning/__init__.py b/nemo/lightning/__init__.py index f5ad7d7a5033..9ad6822243a9 100644 --- a/nemo/lightning/__init__.py +++ b/nemo/lightning/__init__.py @@ -31,7 +31,7 @@ from nemo.lightning.pytorch.optim import LRSchedulerModule, MegatronOptimizerModule, OptimizerModule, lr_scheduler from nemo.lightning.pytorch.plugins import MegatronDataSampler, MegatronMixedPrecision from nemo.lightning.pytorch.plugins import data_sampler as _data_sampler -from nemo.lightning.pytorch.strategies import FSDPStrategy, FSDP2Strategy, MegatronStrategy +from nemo.lightning.pytorch.strategies import FSDP2Strategy, FSDPStrategy, MegatronStrategy from nemo.lightning.pytorch.strategies.utils import RestoreConfig from nemo.lightning.pytorch.trainer import Trainer, configure_no_restart_validation_training_loop from nemo.lightning.resume import AutoResume From 3d50f086430db9fe4ec67af7dc44b9b88c9c4103 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Kami=C5=84ski?= <67481570+Laplasjan107@users.noreply.github.com> Date: Tue, 10 Dec 2024 23:59:02 +0100 Subject: [PATCH 007/128] Fix mixtral export for NeMo 2.0 (#11532) * Initial commit Signed-off-by: Piotr Kaminski * Apply isort and black reformatting Signed-off-by: Laplasjan107 --------- Signed-off-by: Piotr Kaminski Signed-off-by: Laplasjan107 Co-authored-by: Piotr Kaminski Co-authored-by: Laplasjan107 --- nemo/export/trt_llm/nemo_ckpt_loader/nemo_file.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nemo/export/trt_llm/nemo_ckpt_loader/nemo_file.py b/nemo/export/trt_llm/nemo_ckpt_loader/nemo_file.py index a2d745864d92..f4ace00292f9 100644 --- a/nemo/export/trt_llm/nemo_ckpt_loader/nemo_file.py +++ b/nemo/export/trt_llm/nemo_ckpt_loader/nemo_file.py @@ -118,7 +118,9 @@ def load_scaling_factors(state_dict: dict, basename: str, size: int) -> Optional def filter_experts_extra_states(state_dict: dict): - pattern = r'model\.decoder\.layers\.mlp\.experts\.experts\.linear_fc\d+\._extra_state/shard_\d+\.\d+_\d+\.\d+' + pattern = ( + r'(model|module)\.decoder\.layers\.mlp\.experts\.experts\.linear_fc\d+\._extra_state/shard_\d+\.\d+_\d+\.\d+' + ) return {k: v for k, v in state_dict.items() if not re.fullmatch(pattern, k)} From 6ea60dee5b0efef083b95096e82f815ff9f68687 Mon Sep 17 00:00:00 2001 From: Alexandros Koumparoulis <153118171+akoumpa@users.noreply.github.com> Date: Wed, 11 Dec 2024 00:37:45 -0800 Subject: [PATCH 008/128] Make HFDatasetDataModule a datasets.load_dataset wrapper (#11500) * Make HfDatasetDataModule a datasets.load_dataset wrapper Signed-off-by: Alexandros Koumparoulis * add logging Signed-off-by: Alexandros Koumparoulis * fix Signed-off-by: Alexandros Koumparoulis * Update HFDatasetDataModule Signed-off-by: Alexandros Koumparoulis * refactor Signed-off-by: Alexandros Koumparoulis * refactor fixup Signed-off-by: Alexandros Koumparoulis * refactor fixup #2 Signed-off-by: Alexandros Koumparoulis * do not expand Signed-off-by: Alexandros Koumparoulis * fix Signed-off-by: Alexandros Koumparoulis * fix Signed-off-by: Alexandros Koumparoulis * fix Signed-off-by: Alexandros Koumparoulis * fix Signed-off-by: Alexandros Koumparoulis * fix Signed-off-by: Alexandros Koumparoulis * doc Signed-off-by: Alexandros Koumparoulis * doc Signed-off-by: Alexandros Koumparoulis * add synonym Signed-off-by: Alexandros Koumparoulis * fix Signed-off-by: Alexandros Koumparoulis * fix Signed-off-by: Alexandros Koumparoulis * fix Signed-off-by: Alexandros Koumparoulis * typo Signed-off-by: Alexandros Koumparoulis * Apply isort and black reformatting Signed-off-by: akoumpa * Add train/val/test attributes Signed-off-by: Alexandros Koumparoulis * Add test for hf-datamodule Signed-off-by: Alexandros Koumparoulis * Import lazily to avoid breaking with older megatron versions Signed-off-by: Alexandros Koumparoulis * bot happy Signed-off-by: Alexandros Koumparoulis * Apply isort and black reformatting Signed-off-by: akoumpa * bot happy2 Signed-off-by: Alexandros Koumparoulis * add doc-strings and collate-fn arg Signed-off-by: Alexandros Koumparoulis * Apply isort and black reformatting Signed-off-by: akoumpa --------- Signed-off-by: Alexandros Koumparoulis Signed-off-by: akoumpa Co-authored-by: akoumpa --- examples/llm/peft/hf.py | 14 +- nemo/collections/llm/gpt/data/api.py | 4 +- nemo/collections/llm/gpt/data/hf_dataset.py | 154 +++++++++++++++++- nemo/collections/llm/inference/base.py | 7 +- nemo/collections/llm/t5/model/t5.py | 3 +- .../llm/gpt/data/test_hf_datamodule.py | 114 +++++++++++++ 6 files changed, 276 insertions(+), 20 deletions(-) create mode 100644 tests/collections/llm/gpt/data/test_hf_datamodule.py diff --git a/examples/llm/peft/hf.py b/examples/llm/peft/hf.py index 357dc5a7bd17..c24c5958b388 100644 --- a/examples/llm/peft/hf.py +++ b/examples/llm/peft/hf.py @@ -18,7 +18,7 @@ from nemo.collections import llm -def mk_hf_dataset(tokenizer): +def make_squad_hf_dataset(tokenizer): EOS_TOKEN = tokenizer.eos_token # Must add EOS_TOKEN def formatting_prompts_func(examples): @@ -45,11 +45,9 @@ def formatting_prompts_func(examples): 'labels': tokens[1:] + [tokens[-1]], } - from datasets import load_dataset - - dataset = load_dataset("rajpurkar/squad", split="train") - dataset = dataset.map(formatting_prompts_func, batched=False, batch_size=2) - return dataset + datamodule = llm.HFDatasetDataModule("rajpurkar/squad", split="train", pad_token_id=tokenizer.eos_token_id) + datamodule.map(formatting_prompts_func, batched=False, batch_size=2) + return datamodule if __name__ == '__main__': @@ -80,9 +78,7 @@ def formatting_prompts_func(examples): llm.api.finetune( model=llm.HFAutoModelForCausalLM(args.model), - data=llm.HFDatasetDataModule( - mk_hf_dataset(tokenizer.tokenizer), pad_token_id=tokenizer.tokenizer.eos_token_id - ), + data=make_squad_hf_dataset(tokenizer.tokenizer), trainer=nl.Trainer( devices=args.devices, max_steps=args.max_steps, diff --git a/nemo/collections/llm/gpt/data/api.py b/nemo/collections/llm/gpt/data/api.py index 374bee83b8b2..b4e603186bf4 100644 --- a/nemo/collections/llm/gpt/data/api.py +++ b/nemo/collections/llm/gpt/data/api.py @@ -41,8 +41,8 @@ def dolly() -> pl.LightningDataModule: @run.cli.factory @run.autoconvert -def hf_dataset(dataset: str) -> pl.LightningDataModule: - return HFDatasetDataModule(dataset=dataset, global_batch_size=16, micro_batch_size=2) +def hf_dataset(path: str) -> pl.LightningDataModule: + return HFDatasetDataModule(path=path, global_batch_size=16, micro_batch_size=2) __all__ = ["mock", "squad", "dolly", "hf_dataset"] diff --git a/nemo/collections/llm/gpt/data/hf_dataset.py b/nemo/collections/llm/gpt/data/hf_dataset.py index 0f45ecf265b7..039e5b90b096 100644 --- a/nemo/collections/llm/gpt/data/hf_dataset.py +++ b/nemo/collections/llm/gpt/data/hf_dataset.py @@ -12,16 +12,108 @@ # See the License for the specific language governing permissions and # limitations under the License. +import datasets.dataset_dict import lightning.pytorch as pl import torch +from datasets import load_dataset from torch.utils.data import DataLoader + from nemo.lightning.pytorch.plugins import MegatronDataSampler +from nemo.utils import logging + + +def make_dataset_splits(path, split, split_aliases, kwargs): + """ + Loads a dataset with datasets.load_dataset and + returns a dictionary containing all dataset splits. + + For example: + + ans = make_dataset_splits("dataset-id") + $ ds = load_dataset("dataset-id") + $ print(ds) + > DatasetDict({ + > train: Dataset({ + > features: ['id', 'title', 'context', 'question', 'answers'], + > num_rows: 87599 + > }) + > validation: Dataset({ + > features: ['id', 'title', 'context', 'question', 'answers'], + > num_rows: 10570 + > }) + > }) + + In this case the value of `ans` (returned value) will be: + $ print(ans) + > { + > "train": Dataset .. (with 87599 rows), + > "val": Dataset .. (with 10570 rows), + > } + """ + dataset = load_dataset(path, split=split, **kwargs) + + split_names = ['train', 'test', 'val'] + dataset_splits = {split: None for split in split_names} + + alias_to_split = {} + for split_name, _split_aliases in split_aliases.items(): + assert split_name in split_names + for alias in _split_aliases: + alias_to_split[alias] = split_name + + if isinstance(dataset, datasets.dataset_dict.DatasetDict): + dataset_split_names = dataset.keys() + logging.info(f"HF dataset has the following splits: {dataset_split_names}") + for alias_split_name, split in dataset.items(): + split_name = alias_to_split[alias_split_name] + assert dataset_splits[split_name] is None + dataset_splits[split_name] = split + elif isinstance(split, list): + logging.info(f"Loaded HF dataset will use " + str(split) + " splits.") + assert isinstance(dataset, list) + for i, alias_split_name in enumerate(split): + split_name = alias_to_split[alias_split_name] + assert dataset_splits[split_name] is None + dataset_splits[split_name] = dataset[i] + elif isinstance(split, str): + logging.info(f"Loaded HF dataset has a single split.") + assert not isinstance(dataset, list) + alias_split_name = split + if '+' in alias_split_name: + raise ValueError("Split concatenation not supported") + elif '[' in alias_split_name: + alias_split_name = alias_split_name.split('[')[0] + split_name = alias_to_split[alias_split_name] + assert dataset_splits[split_name] is None + dataset_splits[split_name] = dataset + else: + raise ValueError("Expected split name to be None, str or a list") + + assert ( + sum(map(lambda x: x is not None, dataset_splits.values())) > 0 + ), "Expected at least one dataset to have been initialized" + return dataset_splits class HFDatasetDataModule(pl.LightningDataModule): + """HFDatasetDataModule wraps HF's load_dataset (datasets library) + so that it can be used within NeMo. + Users can select whether to use an mcore-sampler via use_mcore_sampler arg. + + Usage examples: + + - loading a single split (train) from a dataset + llm.HFDatasetDataModule("rajpurkar/squad", split="train") + + - loading multiple splits (train, validation) from a dataset + llm.HFDatasetDataModule("rajpurkar/squad", split=["train", "validation"]) + """ + def __init__( self, - dataset, + path, + collate_fn=None, + split=None, num_workers=2, pin_memory=True, persistent_workers=True, @@ -31,11 +123,29 @@ def __init__( pad_token_id=0, use_mcore_sampler=False, mcore_dataloader_type='cyclic', + train_aliases=["train", "training"], + test_aliases=["test", "testing"], + val_aliases=["val", "validation", "valid", "eval"], + **kwargs, ) -> None: super().__init__() assert pad_token_id is not None - self.dataset = dataset + logging.info(f"Loading HF dataset from {path}") + + # A dataset usually will have several splits (e.g. train, val, test, etc). + # We map synonym names to canonical names (train, test, val). + # A synonym can be a prefix/suffixed word e.g. train <> training. + split_aliases = {'train': train_aliases, 'test': test_aliases, 'val': val_aliases} + + # self.dataset_splits will hold the actual dataset for each split. + self.dataset_splits = make_dataset_splits(path, split, split_aliases, kwargs) + + if collate_fn is None: + self._collate_fn = lambda x: HFDatasetDataModule.collate_fn(x, pad_token_id=self.pad_token_id) + else: + self._collate_fn = collate_fn + self.num_workers = num_workers self.pin_memory = pin_memory self.persistent_workers = persistent_workers @@ -84,17 +194,51 @@ def setup(self, stage: str): dataloader_type=self.mcore_dataloader_type, ) - def train_dataloader(self, collate_fn=None): - from nemo.lightning.data import add_megatron_sampler + def _make_dataloader(self, dataset, collate_fn=None): + assert dataset is not None if collate_fn is None: collate_fn = lambda x: HFDatasetDataModule.collate_fn(x, pad_token_id=self.pad_token_id) return DataLoader( - self.dataset, + dataset, num_workers=self.num_workers, pin_memory=self.pin_memory, persistent_workers=self.persistent_workers, collate_fn=collate_fn, batch_size=self.micro_batch_size, ) + + @property + def train(self): + return self.dataset_splits['train'] + + @property + def val(self): + return self.dataset_splits['val'] + + @property + def test(self): + return self.dataset_splits['test'] + + def train_dataloader(self): + return self._make_dataloader(self.train, self._collate_fn) + + def val_dataloader(self): + return self._make_dataloader(self.val, self._collate_fn) + + def test_dataloader(self): + return self._make_dataloader(self.test, self._collate_fn) + + def map(self, function=None, split_names=None, **kwargs): + if isinstance(split_names, str): + dataset_splits = {split_names: self.dataset_splits[split_names]} + elif isinstance(split_names, list): + dataset_splits = {k: self.dataset_splits[k] for k in split_names} + else: + dataset_splits = self.dataset_splits + + for split_name, subset in dataset_splits.items(): + if subset is None: + continue + dataset_splits[split_name] = subset.map(function, **kwargs) diff --git a/nemo/collections/llm/inference/base.py b/nemo/collections/llm/inference/base.py index 795d6efadd3a..6c89a1b42b15 100644 --- a/nemo/collections/llm/inference/base.py +++ b/nemo/collections/llm/inference/base.py @@ -25,9 +25,6 @@ from megatron.core.inference.model_inference_wrappers.abstract_model_inference_wrapper import ( AbstractModelInferenceWrapper, ) -from megatron.core.inference.text_generation_controllers.encoder_decoder_text_generation_controller import ( - EncoderDecoderTextGenerationController, -) from megatron.core.inference.text_generation_controllers.simple_text_generation_controller import ( SimpleTextGenerationController, ) @@ -232,6 +229,10 @@ def generate( Returns: dict: A dictionary containing the generated results. """ + from megatron.core.inference.text_generation_controllers.encoder_decoder_text_generation_controller import ( + EncoderDecoderTextGenerationController, + ) + if encoder_prompts is not None: text_generation_controller = EncoderDecoderTextGenerationController( inference_wrapped_model=model, tokenizer=tokenizer diff --git a/nemo/collections/llm/t5/model/t5.py b/nemo/collections/llm/t5/model/t5.py index 940c0e51ee92..743d16f57c2b 100644 --- a/nemo/collections/llm/t5/model/t5.py +++ b/nemo/collections/llm/t5/model/t5.py @@ -20,7 +20,7 @@ import torch import torch.distributed from megatron.core.inference.model_inference_wrappers.inference_wrapper_config import InferenceWrapperConfig -from megatron.core.inference.model_inference_wrappers.t5.t5_inference_wrapper import T5InferenceWrapper + from megatron.core.models.T5.t5_model import T5Model as MCoreT5Model from megatron.core.optimizer import OptimizerConfig from megatron.core.transformer.spec_utils import ModuleSpec @@ -319,6 +319,7 @@ def get_inference_wrapper(self, params_dtype, inference_batch_times_seqlen_thres inference_batch_times_seqlen_threshold=inference_batch_times_seqlen_threshold, padded_vocab_size=self.tokenizer.vocab_size, ) + from megatron.core.inference.model_inference_wrappers.t5.t5_inference_wrapper import T5InferenceWrapper model_inference_wrapper = T5InferenceWrapper(mcore_model, inference_wrapper_config) return model_inference_wrapper diff --git a/tests/collections/llm/gpt/data/test_hf_datamodule.py b/tests/collections/llm/gpt/data/test_hf_datamodule.py new file mode 100644 index 000000000000..a8d264701d39 --- /dev/null +++ b/tests/collections/llm/gpt/data/test_hf_datamodule.py @@ -0,0 +1,114 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from nemo.collections import llm + +DATA_PATH = "/home/TestData/lite/hf_cache/squad/" + + +def test_load_single_split(): + ds = llm.HFDatasetDataModule( + path=DATA_PATH, + split='train', + seq_length=512, + micro_batch_size=2, + global_batch_size=2, + ) + from datasets.arrow_dataset import Dataset + + assert isinstance(ds.dataset_splits, dict) + assert len(ds.dataset_splits) == 3 + assert 'train' in ds.dataset_splits + assert ds.dataset_splits['train'] is not None + assert ds.train is not None + assert isinstance(ds.dataset_splits['train'], Dataset) + assert 'val' in ds.dataset_splits + assert ds.dataset_splits['val'] is None + assert ds.val is None + assert 'test' in ds.dataset_splits + assert ds.dataset_splits['test'] is None + assert ds.test is None + + +def test_load_nonexistent_split(): + exception_msg = '' + expected_msg = '''Unknown split "this_split_name_should_not_exist". Should be one of ['train', 'validation'].''' + try: + llm.HFDatasetDataModule( + path=DATA_PATH, + split='this_split_name_should_not_exist', + seq_length=512, + micro_batch_size=2, + global_batch_size=2, + ) + except ValueError as e: + exception_msg = str(e) + assert exception_msg == expected_msg, exception_msg + + +def test_load_multiple_split(): + ds = llm.HFDatasetDataModule( + path=DATA_PATH, + split=['train', 'validation'], + seq_length=512, + micro_batch_size=2, + global_batch_size=2, + ) + from datasets.arrow_dataset import Dataset + + assert isinstance(ds.dataset_splits, dict) + assert len(ds.dataset_splits) == 3 + assert 'train' in ds.dataset_splits + assert ds.dataset_splits['train'] is not None + assert ds.train is not None + assert isinstance(ds.dataset_splits['train'], Dataset) + assert isinstance(ds.train, Dataset) + assert 'val' in ds.dataset_splits + assert ds.dataset_splits['val'] is not None + assert ds.val is not None + assert isinstance(ds.dataset_splits['val'], Dataset) + assert isinstance(ds.val, Dataset) + assert 'test' in ds.dataset_splits + assert ds.dataset_splits['test'] is None + assert ds.test is None + + +def test_validate_dataset_asset_accessibility_file_does_not_exist(): + raised_exception = False + try: + llm.HFDatasetDataModule( + path="/this/path/should/not/exist/", + seq_length=512, + micro_batch_size=2, + global_batch_size=2, + ) + except FileNotFoundError: + raised_exception = True + + assert raised_exception == True, "Expected to raise a FileNotFoundError" + + +def test_validate_dataset_asset_accessibility_file_is_none(): # tokenizer, trainer): + raised_exception = False + try: + llm.HFDatasetDataModule( + path=None, + seq_length=512, + micro_batch_size=2, + global_batch_size=2, + ) + except TypeError: + raised_exception = True + + assert raised_exception == True, "Expected to raise a ValueError" From b81df9e58094c37218cfb66cd5da8884e287eca0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?oliver=20k=C3=B6nig?= Date: Wed, 11 Dec 2024 17:27:05 +0100 Subject: [PATCH 009/128] ci: Bump release workflow (#11544) Signed-off-by: Oliver Koenig --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 81db8e1160d9..dcaac34901cd 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -28,7 +28,7 @@ on: jobs: release: - uses: NVIDIA/NeMo-FW-CI-templates/.github/workflows/_release_library.yml@v0.15.0 + uses: NVIDIA/NeMo-FW-CI-templates/.github/workflows/_release_library.yml@v0.15.1 with: release-ref: ${{ inputs.release-ref }} image-name: nemo_container From dfbb87f208d405712b43a9fe24a10a276230fb03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?oliver=20k=C3=B6nig?= Date: Wed, 11 Dec 2024 18:49:05 +0100 Subject: [PATCH 010/128] ci: Use SHA for cut-off (#11545) Signed-off-by: Oliver Koenig --- .github/workflows/release-freeze.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/release-freeze.yml b/.github/workflows/release-freeze.yml index 70ecd73c2252..440f1b331819 100644 --- a/.github/workflows/release-freeze.yml +++ b/.github/workflows/release-freeze.yml @@ -9,6 +9,11 @@ on: options: - major - minor + freeze-commit: + type: string + description: Commit SHA to use for cut-off + required: false + default: main mcore_version: description: 'Version of MCore to use (must be a valid git ref)' required: true @@ -16,7 +21,7 @@ on: jobs: code-freeze: - uses: NVIDIA/NeMo-FW-CI-templates/.github/workflows/_code_freeze.yml@v0.8.0 + uses: NVIDIA/NeMo-FW-CI-templates/.github/workflows/_code_freeze.yml@v0.16.0 with: library_name: NeMo-Toolkit python_package: nemo From 1fe0310c999d10087109a79df162e544a5f7dc9b Mon Sep 17 00:00:00 2001 From: Anna Shors Date: Wed, 11 Dec 2024 10:49:24 -0800 Subject: [PATCH 011/128] link to mcore documentation (#11538) Signed-off-by: ashors1 --- nemo/lightning/pytorch/strategies/megatron_strategy.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nemo/lightning/pytorch/strategies/megatron_strategy.py b/nemo/lightning/pytorch/strategies/megatron_strategy.py index 6560bebcb2cd..75a17d6ce30f 100644 --- a/nemo/lightning/pytorch/strategies/megatron_strategy.py +++ b/nemo/lightning/pytorch/strategies/megatron_strategy.py @@ -158,7 +158,8 @@ class MegatronStrategy(DDPStrategy, io.IOMixin): Defaults to True. ckpt_load_strictness (StrictHandling, optional): defines loading strictness. If not None, overwrites the `strict` flag passed to `load_checkpoint`. - Defaults to None. + Defaults to None. For a list of supported values, refer to the Megatron Core documentation: + https://github.com/NVIDIA/Megatron-LM/blob/d4e72c0d33edc0c53aeb624f617eb77cebce6ae9/megatron/core/dist_checkpointing/validation.py#L46 setup_optimizers (bool): Whether to call the trainer's setup_optimizers function to perform any necessary conversions of optimizer parameters and move optimizer parameters to the correct device. Defaults to True. From 02c2cdf3eec38245e9a7a0fc6b6112de9fd532d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?oliver=20k=C3=B6nig?= Date: Wed, 11 Dec 2024 21:02:59 +0100 Subject: [PATCH 012/128] ci: Adjust inputs for code-freeze workflow (#11550) Signed-off-by: Oliver Koenig --- .github/workflows/release-freeze.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/release-freeze.yml b/.github/workflows/release-freeze.yml index 440f1b331819..6fd1e50f5c58 100644 --- a/.github/workflows/release-freeze.yml +++ b/.github/workflows/release-freeze.yml @@ -23,9 +23,10 @@ jobs: code-freeze: uses: NVIDIA/NeMo-FW-CI-templates/.github/workflows/_code_freeze.yml@v0.16.0 with: - library_name: NeMo-Toolkit - python_package: nemo - type_of_release: ${{ inputs.type_of_release }} + library-name: NeMo-Toolkit + python-package: nemo + release-type: ${{ inputs.type_of_release }} + freeze-commit: ${{ inputs.freeze-commit }} secrets: SLACK_RELEASE_ENDPOINT: ${{ secrets.SLACK_RELEASE_ENDPOINT }} SLACK_WEBHOOK_ADMIN: ${{ secrets.SLACK_WEBHOOK_ADMIN }} From c37570a7bd7758b7e1bb86735bfbed36fb59f0c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?oliver=20k=C3=B6nig?= Date: Wed, 11 Dec 2024 21:21:01 +0100 Subject: [PATCH 013/128] ci: Bump release freeze (#11551) Signed-off-by: Oliver Koenig --- .github/workflows/release-freeze.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-freeze.yml b/.github/workflows/release-freeze.yml index 6fd1e50f5c58..ce46d4006d02 100644 --- a/.github/workflows/release-freeze.yml +++ b/.github/workflows/release-freeze.yml @@ -21,7 +21,7 @@ on: jobs: code-freeze: - uses: NVIDIA/NeMo-FW-CI-templates/.github/workflows/_code_freeze.yml@v0.16.0 + uses: NVIDIA/NeMo-FW-CI-templates/.github/workflows/_code_freeze.yml@v0.17.0 with: library-name: NeMo-Toolkit python-package: nemo From 37ee4326f404be99dae58b467f07a58aade9aa2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?oliver=20k=C3=B6nig?= Date: Wed, 11 Dec 2024 21:31:21 +0100 Subject: [PATCH 014/128] Ko3n1g/ci/commit sha for cutoff (#11553) * ci: Remove token from checkout Signed-off-by: Oliver Koenig * bump version Signed-off-by: Oliver Koenig --------- Signed-off-by: Oliver Koenig --- .github/workflows/release-freeze.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/release-freeze.yml b/.github/workflows/release-freeze.yml index ce46d4006d02..bb47db657f05 100644 --- a/.github/workflows/release-freeze.yml +++ b/.github/workflows/release-freeze.yml @@ -21,7 +21,7 @@ on: jobs: code-freeze: - uses: NVIDIA/NeMo-FW-CI-templates/.github/workflows/_code_freeze.yml@v0.17.0 + uses: NVIDIA/NeMo-FW-CI-templates/.github/workflows/_code_freeze.yml@v0.17.1 with: library-name: NeMo-Toolkit python-package: nemo @@ -42,7 +42,6 @@ jobs: fetch-depth: 0 fetch-tags: true ref: ${{ needs.code-freeze.outputs.release-branch }} - token: ${{ secrets.PAT }} - name: Pin branch name in Notebooks run: | From fc39d24cb6521f9a5aac144b60f48a47b27cf918 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?oliver=20k=C3=B6nig?= Date: Wed, 11 Dec 2024 21:39:45 +0100 Subject: [PATCH 015/128] ci: Bump code-freeze workflow (#11554) Signed-off-by: Oliver Koenig --- .github/workflows/release-freeze.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-freeze.yml b/.github/workflows/release-freeze.yml index bb47db657f05..115dd9790748 100644 --- a/.github/workflows/release-freeze.yml +++ b/.github/workflows/release-freeze.yml @@ -21,7 +21,7 @@ on: jobs: code-freeze: - uses: NVIDIA/NeMo-FW-CI-templates/.github/workflows/_code_freeze.yml@v0.17.1 + uses: NVIDIA/NeMo-FW-CI-templates/.github/workflows/_code_freeze.yml@v0.17.2 with: library-name: NeMo-Toolkit python-package: nemo From 2aff61610e8282c9d75d8a2609469886d0f21bfe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?oliver=20k=C3=B6nig?= Date: Wed, 11 Dec 2024 21:47:46 +0100 Subject: [PATCH 016/128] ci: Bump code freeze workflow (#11557) Signed-off-by: Oliver Koenig --- .github/workflows/release-freeze.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-freeze.yml b/.github/workflows/release-freeze.yml index 115dd9790748..7ccd1158dae9 100644 --- a/.github/workflows/release-freeze.yml +++ b/.github/workflows/release-freeze.yml @@ -21,7 +21,7 @@ on: jobs: code-freeze: - uses: NVIDIA/NeMo-FW-CI-templates/.github/workflows/_code_freeze.yml@v0.17.2 + uses: NVIDIA/NeMo-FW-CI-templates/.github/workflows/_code_freeze.yml@v0.17.3 with: library-name: NeMo-Toolkit python-package: nemo From 726e50ac8b4722e2eaef935962a98f96448dde9b Mon Sep 17 00:00:00 2001 From: Hemil Desai Date: Wed, 11 Dec 2024 14:32:51 -0800 Subject: [PATCH 017/128] Fix deploy conflicts in llm.api (#11367) * Fix llm.deploy api Signed-off-by: Hemil Desai * fix Signed-off-by: Hemil Desai * fix Signed-off-by: Hemil Desai * fix Signed-off-by: Hemil Desai * fix Signed-off-by: Hemil Desai * fix Signed-off-by: Hemil Desai * Apply isort and black reformatting Signed-off-by: hemildesai * PR feedback Signed-off-by: Hemil Desai * fix Signed-off-by: Hemil Desai --------- Signed-off-by: Hemil Desai Signed-off-by: hemildesai Co-authored-by: hemildesai --- nemo/collections/llm/api.py | 29 ++++++++++++++----------- nemo/collections/llm/evaluation/base.py | 2 +- 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/nemo/collections/llm/api.py b/nemo/collections/llm/api.py index a27829412fe3..d030eb88863c 100644 --- a/nemo/collections/llm/api.py +++ b/nemo/collections/llm/api.py @@ -41,7 +41,6 @@ from nemo.utils import logging from nemo.utils.get_rank import is_global_rank_zero - if TYPE_CHECKING: from megatron.core.inference.common_inference_params import CommonInferenceParams from megatron.core.inference.inference_request import InferenceRequest @@ -325,7 +324,7 @@ def ptq( def deploy( nemo_checkpoint: Path = None, model_type: str = "llama", - triton_model_name: str = 'triton_model', + triton_model_name: str = "triton_model", triton_model_version: Optional[int] = 1, triton_port: int = 8000, triton_http_address: str = "0.0.0.0", @@ -377,22 +376,22 @@ def deploy( output_generation_logits (bool): If True builds trtllm engine with gather_generation_logits set to True. generation_logits are used to compute the logProb of the output token. Default: True. """ - from nemo.collections.llm import deploy + from nemo.collections.llm.deploy.base import get_trtllm_deployable, unset_environment_variables from nemo.deploy import DeployPyTriton - deploy.unset_environment_variables() + unset_environment_variables() if start_rest_service: if triton_port == rest_service_port: logging.error("REST service port and Triton server port cannot use the same port.") return # Store triton ip, port and other args relevant for REST API as env vars to be accessible by rest_model_api.py - os.environ['TRITON_HTTP_ADDRESS'] = triton_http_address - os.environ['TRITON_PORT'] = str(triton_port) - os.environ['TRITON_REQUEST_TIMEOUT'] = str(triton_request_timeout) - os.environ['OPENAI_FORMAT_RESPONSE'] = str(openai_format_response) - os.environ['OUTPUT_GENERATION_LOGITS'] = str(output_generation_logits) + os.environ["TRITON_HTTP_ADDRESS"] = triton_http_address + os.environ["TRITON_PORT"] = str(triton_port) + os.environ["TRITON_REQUEST_TIMEOUT"] = str(triton_request_timeout) + os.environ["OPENAI_FORMAT_RESPONSE"] = str(openai_format_response) + os.environ["OUTPUT_GENERATION_LOGITS"] = str(output_generation_logits) - triton_deployable = deploy.get_trtllm_deployable( + triton_deployable = get_trtllm_deployable( nemo_checkpoint, model_type, triton_model_repository, @@ -513,7 +512,7 @@ def evaluate( from nemo.collections.llm import evaluation # Get tokenizer from nemo ckpt. This works only with NeMo 2.0 ckpt. - tokenizer = io.load_context(nemo_checkpoint_path + '/context', subpath="model").tokenizer + tokenizer = io.load_context(nemo_checkpoint_path + "/context", subpath="model.tokenizer") # Wait for rest service to be ready before starting evaluation evaluation.wait_for_rest_service(rest_url=f"{url}/v1/health") # Create an object of the NeMoFWLM which is passed as a model to evaluator.simple_evaluate @@ -521,10 +520,14 @@ def evaluate( model_name, url, tokenizer, max_tokens_to_generate, temperature, top_p, top_k, add_bos ) results = evaluator.simple_evaluate( - model=model, tasks=eval_task, limit=limit, num_fewshot=num_fewshot, bootstrap_iters=bootstrap_iters + model=model, + tasks=eval_task, + limit=limit, + num_fewshot=num_fewshot, + bootstrap_iters=bootstrap_iters, ) - print("score", results['results'][eval_task]) + print("score", results["results"][eval_task]) @run.cli.entrypoint(name="import", namespace="llm") diff --git a/nemo/collections/llm/evaluation/base.py b/nemo/collections/llm/evaluation/base.py index b1734d6f4d43..f8f6639e3f3c 100644 --- a/nemo/collections/llm/evaluation/base.py +++ b/nemo/collections/llm/evaluation/base.py @@ -167,7 +167,7 @@ def generate_until(self, inputs: list[Instance]): return results -def wait_for_rest_service(rest_url, max_retries=60, retry_interval=2): +def wait_for_rest_service(rest_url, max_retries=600, retry_interval=2): """ Wait for REST service to be ready. From f68208e2b733ef181ba0a61cd2b4c2104842682a Mon Sep 17 00:00:00 2001 From: malay-nagda <164242706+malay-nagda@users.noreply.github.com> Date: Thu, 12 Dec 2024 04:47:11 +0530 Subject: [PATCH 018/128] perf summary docs link (#11262) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Malay Nagda Co-authored-by: oliver könig --- README.md | 3 +- .../source/performance/performance_summary.md | 42 ------------------- 2 files changed, 2 insertions(+), 43 deletions(-) delete mode 100644 docs/source/performance/performance_summary.md diff --git a/README.md b/README.md index be7bd6a74f48..195b8293babd 100644 --- a/README.md +++ b/README.md @@ -167,7 +167,8 @@ Overall, these enhancements make NeMo 2.0 a powerful, scalable, and user-friendl All NeMo models are trained with [Lightning](https://github.com/Lightning-AI/lightning). Training is -automatically scalable to 1000s of GPUs. +automatically scalable to 1000s of GPUs. You can check the performance benchmarks using the +latest NeMo Framework container [here](https://docs.nvidia.com/nemo-framework/user-guide/latest/performance/performance_summary.html). When applicable, NeMo models leverage cutting-edge distributed training techniques, incorporating [parallelism diff --git a/docs/source/performance/performance_summary.md b/docs/source/performance/performance_summary.md deleted file mode 100644 index 98dae2dc0a78..000000000000 --- a/docs/source/performance/performance_summary.md +++ /dev/null @@ -1,42 +0,0 @@ - -# Performance Benchmarks - -## Large Language Models** - -### Pretraining - -- The results in the table below show pre-training performance for various tasks at FP8 precision. - - Container: [NeMo24.07](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/nemo/tags) - - System: DGX-H100 - -| Model | #-GPUs | GBS | MBS | Sequence Length| TP | PP | CP | VP | Tokens / sec / GPU | Model TFLOP / sec / GPU | ***Est. time to train in days (10T tokens, 1K GPUs)*** | -| ----- | ------ | --- | --- | ---------------| -- | -- | -- | -- | ------------------ | ----------------------- | ------------------------------------------------------ | -| GPT3-5B | 64 | 2048 | 4 | 2048 | 1 | 1 | 1 | 1 | 23406 | 765 | ***5*** | -| GPT3-20B | 64 | 256 | 2 | 2048 | 2 | 1 | 1 | 1 | 5851 | 750 | ***19*** | -| GPT3-175B | 128 | 256 | 1 | 2048 | 4 | 8 | 1 | 6 | 716 | 771 | **158** | -| GPT3-175B | 512 | 2048 | 2 | 2048 | 4 | 8 | 1 | 6 | 825 | [888](https://mlcommons.org/benchmarks/training/) | **137** | -| LLAMA2-7B | 8 | 128 | 1 | 4096 | 1 | 1 | 1 | 1 | 16934 | 780 | ***7*** | -| LLAMA2-13B | 16 | 128 | 1 | 4096 | 1 | 4 | 1 | 10 | 8715 | 760 | ***13*** | -| LLAMA2-70B | 64 | 128 | 1 | 4096 | 4 | 4 | 1 | 20 | 1728 | 768 | ***65*** | -| Nemotron-8B | 64 | 256 | 4 | 4096 | 2 | 1 | 1 | 1 | 12507 | 643 | ***9*** | -| Nemotron-22B | 64 | 256 | 2 | 4096 | 2 | 4 | 1 | 10 | 4312 | 562 | ***26*** | -| Nemotron-340B | 128 | 32 | 1 | 4096 | 8 | 8 | 1 | 12 | 326 | 686 | ***347*** | -| LLAMA3-8B | 8 | 128 | 1 | 8192 | 1 | 1 | 2 | 1 | 12273 | 711 | ***9*** | -| LLAMA3-70B | 64 | 128 | 1 | 8192 | 4 | 4 | 2 | 5 | 1524 | 734 | ***74*** | - -### Finetuning - -- The results in the table below show finetuning performance of LLAMA2 models with SFT (supervised fine-tuning), and LoRA (Low-rank adaptors) at FP8 precision. - - Container: [NeMo24.07](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/nemo/tags) - - System: DGX-H100 -- For fine-tuning, we use `SQuAD-v1.1 `__ dataset, and the inputs are packed to 4096 tokens. - - -| Model | Task | #-GPUs | GBS | MBS | Packed Sequence Length | TP | PP | Tokens / sec / GPU | Model TFLOP / sec / GPU | ***Est. time to finetune in mins (10M tokens)*** | -| ----- | ---- | --- | --- | --- | --------------- | -- | -- | ------------------ | ----------------------- | -------------------------------------------------- | -| LLAMA2-7B | SFT | 8 | 32 | 1 | 4096 | 1 | 1 | 16891 | 673 | ***1.2*** | -| LLAMA2-13B | SFT | 8 | 32 | 1 | 4096 | 1 | 4 | 10176 | 787 | ***2.0*** | -| LLAMA2-70B | SFT | 16 | 32 | 1 | 4096 | 4 | 4 | 1816 | 749 | ***5.7*** | -| LLAMA2-7B | LoRA | 8 | 32 | 1 | 4096 | 1 | 1 | 24824 | 663 | ***0.8*** | -| LLAMA2-13B | LoRA | 8 | 32 | 1 | 4096 | 1 | 1 | 14629 | 757 | ***1.4*** | -| LLAMA2-70B | LoRA | 8 | 32 | 1 | 4096 | 2 | 4 | 2621 | 722 | ***7.9*** | From 867dd0c1395f6844009664dfe92abc140d31335a Mon Sep 17 00:00:00 2001 From: Yu Yao <54727607+yaoyu-33@users.noreply.github.com> Date: Wed, 11 Dec 2024 15:21:31 -0800 Subject: [PATCH 019/128] Add vlm nemo run scripts (#11394) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * update recipe Signed-off-by: yaoyu-33 * fix mllama mock ds Signed-off-by: yaoyu-33 * update to use attention bias Signed-off-by: yaoyu-33 * remove example Signed-off-by: yaoyu-33 * Apply isort and black reformatting Signed-off-by: yaoyu-33 * fix docstring mock.py Signed-off-by: yaoyu-33 * fix docstring language.py Signed-off-by: yaoyu-33 * Apply isort and black reformatting Signed-off-by: yaoyu-33 * fix docstring language.py Signed-off-by: yaoyu-33 * Apply isort and black reformatting Signed-off-by: yaoyu-33 * fix docstring mllama/base.py Signed-off-by: yaoyu-33 * Apply isort and black reformatting Signed-off-by: yaoyu-33 * Apply isort and black reformatting Signed-off-by: yaoyu-33 * fix docstring mllama/language.py Signed-off-by: yaoyu-33 * bump mcore Signed-off-by: Oliver Koenig * Add scripts for mllama Signed-off-by: yaoyu-33 * fix Signed-off-by: yaoyu-33 * Apply isort and black reformatting Signed-off-by: yaoyu-33 * update script Signed-off-by: yaoyu-33 * fix pylint Signed-off-by: yaoyu-33 * revert Dockerfile.ci Signed-off-by: Yu Yao <54727607+yaoyu-33@users.noreply.github.com> * add scripts Signed-off-by: yaoyu-33 * add vlm training test in ci Signed-off-by: yaoyu-33 * Apply isort and black reformatting Signed-off-by: yaoyu-33 * fix docstring issues Signed-off-by: yaoyu-33 * update script match recipe Signed-off-by: yaoyu-33 * update recipes Signed-off-by: yaoyu-33 * Update mllama_train.py Signed-off-by: Yu Yao <54727607+yaoyu-33@users.noreply.github.com> * update mllama 90b recipe Signed-off-by: yaoyu-33 * update to use tmp in ci tests Signed-off-by: yaoyu-33 * update default llava config Signed-off-by: yaoyu-33 * add nemo run scripts Signed-off-by: yaoyu-33 * fix vpp issue Signed-off-by: yaoyu-33 * Apply isort and black reformatting Signed-off-by: yaoyu-33 * fix cicd Signed-off-by: yaoyu-33 * fix cicd Signed-off-by: yaoyu-33 * Apply isort and black reformatting Signed-off-by: yaoyu-33 * remove duplicated script Signed-off-by: yaoyu-33 * ci: Add HF cache Signed-off-by: oliver könig * update to use SP in recipe Signed-off-by: yaoyu-33 * Apply isort and black reformatting Signed-off-by: yaoyu-33 * fix Signed-off-by: yaoyu-33 * upgrade Signed-off-by: yaoyu-33 * Revert "upgrade" This reverts commit f6ad2cd76abcdd9258cb53a25c788fd658189150. * update neva api Signed-off-by: yaoyu-33 * update neva api Signed-off-by: yaoyu-33 * fix neva processing Signed-off-by: yaoyu-33 * fix lint Signed-off-by: yaoyu-33 * Apply isort and black reformatting Signed-off-by: yaoyu-33 * fix data fields Signed-off-by: yaoyu-33 * few fixes Signed-off-by: yaoyu-33 --------- Signed-off-by: yaoyu-33 Signed-off-by: yaoyu-33 Signed-off-by: Oliver Koenig Signed-off-by: Yu Yao <54727607+yaoyu-33@users.noreply.github.com> Signed-off-by: oliver könig Co-authored-by: yaoyu-33 Co-authored-by: Oliver Koenig --- .../multimodal/data/energon/base.py | 2 + nemo/collections/vlm/mllama/data/lazy.py | 2 + nemo/collections/vlm/mllama/data/mock.py | 2 + nemo/collections/vlm/neva/data/lazy.py | 4 +- nemo/collections/vlm/neva/data/mock.py | 2 + nemo/collections/vlm/neva/model/base.py | 11 ++-- nemo/collections/vlm/recipes/llava15_13b.py | 16 ++++++ nemo/collections/vlm/recipes/llava15_7b.py | 16 +++++- scripts/vlm/mllama_nemo_run.py | 54 ++++++++++++++++++ scripts/vlm/neva_finetune.py | 2 +- scripts/vlm/neva_nemo_run.py | 55 +++++++++++++++++++ 11 files changed, 157 insertions(+), 9 deletions(-) create mode 100644 scripts/vlm/mllama_nemo_run.py create mode 100644 scripts/vlm/neva_nemo_run.py diff --git a/nemo/collections/multimodal/data/energon/base.py b/nemo/collections/multimodal/data/energon/base.py index 4e90dce55c7a..8c7819c3d7dd 100644 --- a/nemo/collections/multimodal/data/energon/base.py +++ b/nemo/collections/multimodal/data/energon/base.py @@ -92,6 +92,8 @@ def __init__( self.decoder_seq_length = decoder_seq_length self.micro_batch_size = micro_batch_size self.global_batch_size = global_batch_size + self.micro_batch_size = micro_batch_size + self.global_batch_size = global_batch_size self.num_workers = num_workers self.pin_memory = pin_memory self.multimodal_sample_config = multimodal_sample_config diff --git a/nemo/collections/vlm/mllama/data/lazy.py b/nemo/collections/vlm/mllama/data/lazy.py index 5069f8593377..eac29d081a34 100644 --- a/nemo/collections/vlm/mllama/data/lazy.py +++ b/nemo/collections/vlm/mllama/data/lazy.py @@ -205,6 +205,8 @@ def __init__( self.data_config = data_config self.seq_length = seq_length self.decoder_seq_length = decoder_seq_length + self.micro_batch_size = micro_batch_size + self.global_batch_size = global_batch_size self.tokenizer = tokenizer self.image_processor = image_processor self.num_train_samples = num_train_samples diff --git a/nemo/collections/vlm/mllama/data/mock.py b/nemo/collections/vlm/mllama/data/mock.py index 4d078c745492..6c38a5021f30 100644 --- a/nemo/collections/vlm/mllama/data/mock.py +++ b/nemo/collections/vlm/mllama/data/mock.py @@ -66,6 +66,8 @@ def __init__( super().__init__() self.seq_length = seq_length self.decoder_seq_length = decoder_seq_length + self.micro_batch_size = micro_batch_size + self.global_batch_size = global_batch_size self.num_train_samples = num_train_samples self.num_val_samples = num_val_samples self.num_test_samples = num_test_samples diff --git a/nemo/collections/vlm/neva/data/lazy.py b/nemo/collections/vlm/neva/data/lazy.py index 5bc2cbe0458e..066310867777 100644 --- a/nemo/collections/vlm/neva/data/lazy.py +++ b/nemo/collections/vlm/neva/data/lazy.py @@ -442,8 +442,6 @@ def collate_fn(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]: if media_type == 'image': media = [instance.pop('image') for instance in instances] media = torch.cat(media, dim=0) - if media.size(0) == 0: - media = None elif media_type == 'video': media = [instance.pop('video', None) for instance in instances] else: @@ -525,6 +523,8 @@ def __init__( self.data_config = data_config self.seq_length = seq_length self.decoder_seq_length = decoder_seq_length + self.micro_batch_size = micro_batch_size + self.global_batch_size = global_batch_size self.tokenizer = tokenizer self.image_processor = image_processor self.num_train_samples = num_train_samples diff --git a/nemo/collections/vlm/neva/data/mock.py b/nemo/collections/vlm/neva/data/mock.py index 9e2308752641..7533bf56ac46 100644 --- a/nemo/collections/vlm/neva/data/mock.py +++ b/nemo/collections/vlm/neva/data/mock.py @@ -46,6 +46,8 @@ def __init__( super().__init__() self.seq_length = seq_length self.decoder_seq_len = decoder_seq_length + self.micro_batch_size = micro_batch_size + self.global_batch_size = global_batch_size self.num_train_samples = num_train_samples self.num_val_samples = num_val_samples self.num_test_samples = num_test_samples diff --git a/nemo/collections/vlm/neva/model/base.py b/nemo/collections/vlm/neva/model/base.py index 750efd5b42dc..e7b40c1c0209 100644 --- a/nemo/collections/vlm/neva/model/base.py +++ b/nemo/collections/vlm/neva/model/base.py @@ -320,6 +320,7 @@ def configure_model(self, tokenizer) -> "MCoreNevaModel": self.language_transformer_config.pipeline_model_parallel_size = self.pipeline_model_parallel_size self.language_transformer_config.context_parallel_size = self.context_parallel_size + assert "NEVA `encoder_pipeline_model_parallel_size` has bug for now. Fix will come soon." if self.encoder_pipeline_model_parallel_size > 0: assert self.encoder_pipeline_model_parallel_size == 1, "ViT can only live on 1 pipeline stage." self.vision_transformer_config.pipeline_model_parallel_size = self.encoder_pipeline_model_parallel_size @@ -334,8 +335,7 @@ def configure_model(self, tokenizer) -> "MCoreNevaModel": model = MCoreNevaModel( config=self, tokenizer=tokenizer, - pre_process=ps.is_pipeline_first_stage() - or ps.get_pipeline_model_parallel_rank() == self.encoder_pipeline_model_parallel_size, + pre_process=ps.is_pipeline_first_stage(), post_process=ps.is_pipeline_last_stage(), add_encoder=ps.is_pipeline_first_stage(), add_decoder=ps.is_pipeline_last_stage() @@ -488,17 +488,19 @@ def forward( use_inference_kv_cache = ( inference_params is not None and "image_tokens_count" in inference_params.key_value_memory_dict ) - has_images = media.shape[0] > 0 + has_images = media is not None and media.shape[0] > 0 # If running inference, we can skip media token computation if they were computed already earlier for this sample. if use_inference_kv_cache: media_embeddings = None elif self.add_encoder and not has_images: + vision_param = next(self.vision_model.parameters()) # If no images provided, use an empty image embeddings tensor. - media_embeddings = torch.tensor([], dtype=media.dtype, device=media.device).reshape(0, 0, 0) + media_embeddings = torch.tensor([], dtype=vision_param.dtype, device=vision_param.device).reshape(0, 0, 0) elif self.add_encoder and has_images: # media is in shape of (num_images_in_mbs, c, h, w) # note num_images_in_mbs is not mbs but total images in this mbs. + media = media.to(next(self.vision_model.parameters()).dtype) if self.vision_model_from_hf: self.vision_model = self.vision_model.eval() media_embeddings = self.vision_model(media, output_hidden_states=True) @@ -507,7 +509,6 @@ def forward( ] # [num_images, img_seq_len, h_vision] else: # TODO(yuya): MCore Clip path not yet support taking a specific layer hidden states - media = media.to(next(self.vision_model.parameters()).dtype) media_embeddings = self.vision_model(media, num_unused_layers=-self.config.vision_feature_layer - 1) if self._drop_vision_class_token: class_token_len = getattr(self.vision_model, "class_token_len", 1) diff --git a/nemo/collections/vlm/recipes/llava15_13b.py b/nemo/collections/vlm/recipes/llava15_13b.py index 97b77b82d3de..d85ba6f2752b 100644 --- a/nemo/collections/vlm/recipes/llava15_13b.py +++ b/nemo/collections/vlm/recipes/llava15_13b.py @@ -18,6 +18,7 @@ import nemo_run as run import pytorch_lightning as pl import torch +from megatron.core.distributed import DistributedDataParallelConfig from nemo import lightning as nl from nemo.collections import llm, vlm @@ -26,6 +27,8 @@ from nemo.collections.llm.recipes.optim.adam import distributed_fused_adam_with_cosine_annealing from nemo.collections.llm.recipes.precision.mixed_precision import bf16_mixed from nemo.collections.vlm.neva.data.mock import MockDataModule +from nemo.lightning.pytorch.callbacks.megatron_comm_overlap import MegatronCommOverlapCallback +from nemo.utils.exp_manager import TimingCallback NAME = "llava15_13b" @@ -92,7 +95,16 @@ def finetune_recipe( tensor_model_parallel_size=1, pipeline_model_parallel_size=1, encoder_pipeline_model_parallel_size=0, + sequence_parallel=True, pipeline_dtype=torch.bfloat16, + ddp=run.Config( + DistributedDataParallelConfig, + check_for_nan_in_grad=True, + grad_reduce_in_fp32=True, + overlap_grad_reduce=True, + overlap_param_gather=True, + average_in_collective=True, + ), ) trainer = run.Config( @@ -107,6 +119,10 @@ def finetune_recipe( plugins=bf16_mixed(), strategy=strategy, val_check_interval=1000, + callbacks=[ + run.Config(TimingCallback), + run.Config(MegatronCommOverlapCallback, tp_comm_overlap=True), + ], ) recipe = run.Partial( diff --git a/nemo/collections/vlm/recipes/llava15_7b.py b/nemo/collections/vlm/recipes/llava15_7b.py index 04e6bd36f4d4..2abb50db6c11 100644 --- a/nemo/collections/vlm/recipes/llava15_7b.py +++ b/nemo/collections/vlm/recipes/llava15_7b.py @@ -18,6 +18,7 @@ import nemo_run as run import pytorch_lightning as pl import torch +from megatron.core.distributed import DistributedDataParallelConfig from nemo import lightning as nl from nemo.collections import llm, vlm @@ -26,6 +27,7 @@ from nemo.collections.llm.recipes.optim.adam import distributed_fused_adam_with_cosine_annealing from nemo.collections.llm.recipes.precision.mixed_precision import bf16_mixed from nemo.collections.vlm.neva.data.mock import MockDataModule +from nemo.lightning.pytorch.callbacks.megatron_comm_overlap import MegatronCommOverlapCallback from nemo.utils.exp_manager import TimingCallback NAME = "llava15_7b" @@ -93,7 +95,16 @@ def finetune_recipe( tensor_model_parallel_size=1, pipeline_model_parallel_size=1, encoder_pipeline_model_parallel_size=0, + sequence_parallel=True, pipeline_dtype=torch.bfloat16, + ddp=run.Config( + DistributedDataParallelConfig, + check_for_nan_in_grad=True, + grad_reduce_in_fp32=True, + overlap_grad_reduce=True, + overlap_param_gather=True, + average_in_collective=True, + ), ) trainer = run.Config( @@ -108,7 +119,10 @@ def finetune_recipe( plugins=bf16_mixed(), strategy=strategy, val_check_interval=1000, - callbacks=[run.Config(TimingCallback)], + callbacks=[ + run.Config(TimingCallback), + run.Config(MegatronCommOverlapCallback, tp_comm_overlap=True), + ], ) recipe = run.Partial( diff --git a/scripts/vlm/mllama_nemo_run.py b/scripts/vlm/mllama_nemo_run.py new file mode 100644 index 000000000000..9ee58a557f6b --- /dev/null +++ b/scripts/vlm/mllama_nemo_run.py @@ -0,0 +1,54 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import nemo_run as run + +from nemo.collections import vlm + + +def configure_recipe(nodes: int = 1, gpus_per_node: int = 1): + # pylint: disable=C0115,C0116 + recipe = vlm.mllama_11b.finetune_recipe( + dir="/checkpoints/mllama", # Path to store checkpoints + name="mllama", + num_nodes=nodes, + num_gpus_per_node=gpus_per_node, + peft_scheme="lora", + ) + recipe.trainer.max_steps = 100 + recipe.trainer.val_check_interval = 100 + return recipe + + +def local_executor_torchrun(nodes: int = 1, devices: int = 1) -> run.LocalExecutor: + # pylint: disable=C0115,C0116 + # Env vars for jobs are configured here + env_vars = {} + + executor = run.LocalExecutor(ntasks_per_node=devices, launcher="torchrun", env_vars=env_vars) + + return executor + + +def run_training(): + # pylint: disable=C0115,C0116 + recipe = configure_recipe() + executor = local_executor_torchrun(nodes=recipe.trainer.num_nodes, devices=recipe.trainer.devices) + + run.run(recipe, executor=executor) + + +# This condition is necessary for the script to be compatible with Python's multiprocessing module. +if __name__ == "__main__": + run_training() diff --git a/scripts/vlm/neva_finetune.py b/scripts/vlm/neva_finetune.py index 649dc854f2dd..6fc4e2de13b5 100644 --- a/scripts/vlm/neva_finetune.py +++ b/scripts/vlm/neva_finetune.py @@ -111,7 +111,7 @@ def main(args): ddp=DistributedDataParallelConfig( check_for_nan_in_grad=True, grad_reduce_in_fp32=True, - overlap_grad_reduce=True, + overlap_grad_reduce=False, overlap_param_gather=True, average_in_collective=True, ), diff --git a/scripts/vlm/neva_nemo_run.py b/scripts/vlm/neva_nemo_run.py new file mode 100644 index 000000000000..09d99688a517 --- /dev/null +++ b/scripts/vlm/neva_nemo_run.py @@ -0,0 +1,55 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import nemo_run as run + +from nemo.collections import vlm + + +def configure_recipe(nodes: int = 1, gpus_per_node: int = 8): + # pylint: disable=C0115,C0116 + recipe = vlm.llava15_7b.finetune_recipe( + dir="/checkpoints/llava", # Path to store checkpoints + name="llava_ft", + num_nodes=nodes, + num_gpus_per_node=gpus_per_node, + peft_scheme="none", + ) + recipe.trainer.max_steps = 100 + recipe.trainer.val_check_interval = 100 + recipe.model.config.freeze_vision_model = True + return recipe + + +def local_executor_torchrun(nodes: int = 1, devices: int = 8) -> run.LocalExecutor: + # pylint: disable=C0115,C0116 + # Env vars for jobs are configured here + env_vars = {} + + executor = run.LocalExecutor(ntasks_per_node=devices, launcher="torchrun", env_vars=env_vars) + + return executor + + +def run_training(): + # pylint: disable=C0115,C0116 + recipe = configure_recipe() + executor = local_executor_torchrun(nodes=recipe.trainer.num_nodes, devices=recipe.trainer.devices) + + run.run(recipe, executor=executor) + + +# This condition is necessary for the script to be compatible with Python's multiprocessing module. +if __name__ == "__main__": + run_training() From 3867529db38c7f9120c3c9fd799cb69bc72d7b4c Mon Sep 17 00:00:00 2001 From: Alexandros Koumparoulis <153118171+akoumpa@users.noreply.github.com> Date: Wed, 11 Dec 2024 19:54:10 -0800 Subject: [PATCH 020/128] Add from_dict to HFDatasetDataModule (#11559) * Add from_dict method Signed-off-by: Alexandros Koumparoulis * add test_load_from_dict Signed-off-by: Alexandros Koumparoulis * fix Signed-off-by: Alexandros Koumparoulis * fix Signed-off-by: Alexandros Koumparoulis * add test_load_from_dict Signed-off-by: Alexandros Koumparoulis * fix Signed-off-by: Alexandros Koumparoulis * Apply isort and black reformatting Signed-off-by: akoumpa --------- Signed-off-by: Alexandros Koumparoulis Signed-off-by: akoumpa Co-authored-by: akoumpa --- nemo/collections/llm/gpt/data/hf_dataset.py | 80 ++++++++++++------- .../llm/gpt/data/test_hf_datamodule.py | 36 +++++++-- 2 files changed, 79 insertions(+), 37 deletions(-) diff --git a/nemo/collections/llm/gpt/data/hf_dataset.py b/nemo/collections/llm/gpt/data/hf_dataset.py index 039e5b90b096..73b6444a6e9c 100644 --- a/nemo/collections/llm/gpt/data/hf_dataset.py +++ b/nemo/collections/llm/gpt/data/hf_dataset.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import datasets.dataset_dict import lightning.pytorch as pl import torch from datasets import load_dataset @@ -22,38 +21,40 @@ from nemo.utils import logging -def make_dataset_splits(path, split, split_aliases, kwargs): +def make_dataset_splits(dataset, split, split_aliases): """ - Loads a dataset with datasets.load_dataset and - returns a dictionary containing all dataset splits. + Given a dataset (e.g. from datasets.load_dataset or datasets.Dataset.from_dict) it + returns a dictionary containing the corresponding dataset splits. For example: - ans = make_dataset_splits("dataset-id") - $ ds = load_dataset("dataset-id") - $ print(ds) - > DatasetDict({ - > train: Dataset({ - > features: ['id', 'title', 'context', 'question', 'answers'], - > num_rows: 87599 - > }) - > validation: Dataset({ - > features: ['id', 'title', 'context', 'question', 'answers'], - > num_rows: 10570 - > }) - > }) - - In this case the value of `ans` (returned value) will be: + $ ds = load_dataset("dataset-id") + $ ans = make_dataset_splits(ds) + + # `ds` contains the following + $ print(ds) + > DatasetDict({ + > train: Dataset({ + > features: ['id', 'title', 'context', 'question', 'answers'], + > num_rows: 87599 + > }) + > validation: Dataset({ + > features: ['id', 'title', 'context', 'question', 'answers'], + > num_rows: 10570 + > }) + > }) + + # In this case the value of `ans` (returned value) will be: $ print(ans) > { > "train": Dataset .. (with 87599 rows), > "val": Dataset .. (with 10570 rows), > } """ - dataset = load_dataset(path, split=split, **kwargs) + from datasets import Dataset, DatasetDict split_names = ['train', 'test', 'val'] - dataset_splits = {split: None for split in split_names} + dataset_splits = {_split: None for _split in split_names} alias_to_split = {} for split_name, _split_aliases in split_aliases.items(): @@ -61,7 +62,10 @@ def make_dataset_splits(path, split, split_aliases, kwargs): for alias in _split_aliases: alias_to_split[alias] = split_name - if isinstance(dataset, datasets.dataset_dict.DatasetDict): + if isinstance(dataset, Dataset): + assert isinstance(split, str), "Expected split to be a string, but got " + str(type(split)) + dataset_splits[split] = dataset + elif isinstance(dataset, DatasetDict): dataset_split_names = dataset.keys() logging.info(f"HF dataset has the following splits: {dataset_split_names}") for alias_split_name, split in dataset.items(): @@ -89,9 +93,8 @@ def make_dataset_splits(path, split, split_aliases, kwargs): else: raise ValueError("Expected split name to be None, str or a list") - assert ( - sum(map(lambda x: x is not None, dataset_splits.values())) > 0 - ), "Expected at least one dataset to have been initialized" + num_init_splits = sum(map(lambda x: x is not None, dataset_splits.values())) + assert num_init_splits > 0, f"Expected at least one split to have been initialized {num_init_splits}" return dataset_splits @@ -111,9 +114,9 @@ class HFDatasetDataModule(pl.LightningDataModule): def __init__( self, - path, - collate_fn=None, + path_or_dataset, split=None, + collate_fn=None, num_workers=2, pin_memory=True, persistent_workers=True, @@ -130,8 +133,7 @@ def __init__( ) -> None: super().__init__() assert pad_token_id is not None - - logging.info(f"Loading HF dataset from {path}") + from datasets import Dataset, DatasetDict # A dataset usually will have several splits (e.g. train, val, test, etc). # We map synonym names to canonical names (train, test, val). @@ -139,7 +141,18 @@ def __init__( split_aliases = {'train': train_aliases, 'test': test_aliases, 'val': val_aliases} # self.dataset_splits will hold the actual dataset for each split. - self.dataset_splits = make_dataset_splits(path, split, split_aliases, kwargs) + if isinstance(path_or_dataset, str): + logging.info(f"Loading HF dataset from {path_or_dataset}") + dataset = load_dataset(path_or_dataset, split=split, **kwargs) + elif isinstance(path_or_dataset, Dataset) or isinstance(path_or_dataset, DatasetDict): + logging.info(f"Using passed HF dataset {str(path_or_dataset)}") + dataset = path_or_dataset + else: + raise ValueError( + "Expected `path_or_dataset` to be str, Dataset, DatasetDict, but got " + str(type(path_or_dataset)) + ) + + self.dataset_splits = make_dataset_splits(dataset, split, split_aliases) if collate_fn is None: self._collate_fn = lambda x: HFDatasetDataModule.collate_fn(x, pad_token_id=self.pad_token_id) @@ -157,6 +170,13 @@ def __init__( self.use_mcore_sampler = use_mcore_sampler self.mcore_dataloader_type = mcore_dataloader_type + @staticmethod + def from_dict(dataset_dict, split, **kwargs): + from datasets import Dataset + + dataset = Dataset.from_dict(dataset_dict) + return HFDatasetDataModule(path_or_dataset=dataset, split=split, **kwargs) + @staticmethod def collate_fn(batch, pad_token_id=0): def batchify(tensor): diff --git a/tests/collections/llm/gpt/data/test_hf_datamodule.py b/tests/collections/llm/gpt/data/test_hf_datamodule.py index a8d264701d39..58f7c02e091b 100644 --- a/tests/collections/llm/gpt/data/test_hf_datamodule.py +++ b/tests/collections/llm/gpt/data/test_hf_datamodule.py @@ -19,7 +19,7 @@ def test_load_single_split(): ds = llm.HFDatasetDataModule( - path=DATA_PATH, + path_or_dataset=DATA_PATH, split='train', seq_length=512, micro_batch_size=2, @@ -46,7 +46,7 @@ def test_load_nonexistent_split(): expected_msg = '''Unknown split "this_split_name_should_not_exist". Should be one of ['train', 'validation'].''' try: llm.HFDatasetDataModule( - path=DATA_PATH, + path_or_dataset=DATA_PATH, split='this_split_name_should_not_exist', seq_length=512, micro_batch_size=2, @@ -59,7 +59,7 @@ def test_load_nonexistent_split(): def test_load_multiple_split(): ds = llm.HFDatasetDataModule( - path=DATA_PATH, + path_or_dataset=DATA_PATH, split=['train', 'validation'], seq_length=512, micro_batch_size=2, @@ -88,7 +88,7 @@ def test_validate_dataset_asset_accessibility_file_does_not_exist(): raised_exception = False try: llm.HFDatasetDataModule( - path="/this/path/should/not/exist/", + path_or_dataset="/this/path/should/not/exist/", seq_length=512, micro_batch_size=2, global_batch_size=2, @@ -103,12 +103,34 @@ def test_validate_dataset_asset_accessibility_file_is_none(): # tokenizer, trai raised_exception = False try: llm.HFDatasetDataModule( - path=None, + path_or_dataset=None, seq_length=512, micro_batch_size=2, global_batch_size=2, ) - except TypeError: - raised_exception = True + except ValueError as e: + raised_exception = ( + str(e) == "Expected `path_or_dataset` to be str, Dataset, DatasetDict, but got " + ) assert raised_exception == True, "Expected to raise a ValueError" + + +def test_load_from_dict(): + data = {'text': "Below is an instruction that describes a task, paired with an input that "} + + datamodule = llm.HFDatasetDataModule.from_dict( + {"text": [data['text'] for _ in range(101)]}, + split='train', + global_batch_size=4, + micro_batch_size=1, + ) + assert datamodule is not None + assert isinstance(datamodule, llm.HFDatasetDataModule) + assert hasattr(datamodule, 'train') + assert datamodule.train is not None + assert len(datamodule.train) == 101 + assert hasattr(datamodule, 'val') + assert datamodule.val is None + assert hasattr(datamodule, 'test') + assert datamodule.test is None From 8a44926f3f4d8ac6dc3bfbf0c0322e9389684ca2 Mon Sep 17 00:00:00 2001 From: Ao Tang Date: Thu, 12 Dec 2024 09:44:09 -0500 Subject: [PATCH 021/128] Prevent llama3.1 from using Linear interpolation (#11548) * prevent llama3.1 from using linear interpolation * Apply isort and black reformatting Signed-off-by: suiyoubi --------- Signed-off-by: suiyoubi Co-authored-by: suiyoubi --- scripts/checkpoint_converters/convert_llama_hf_to_nemo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/checkpoint_converters/convert_llama_hf_to_nemo.py b/scripts/checkpoint_converters/convert_llama_hf_to_nemo.py index 44de38497b44..b963578ed874 100644 --- a/scripts/checkpoint_converters/convert_llama_hf_to_nemo.py +++ b/scripts/checkpoint_converters/convert_llama_hf_to_nemo.py @@ -108,7 +108,7 @@ def load_config(args, llama_config): rope_type = llama_config['rope_scaling'].get('rope_type') if rope_type is None: rope_type = llama_config['rope_scaling'].get('type') - if rope_type in ('linear', 'llama3'): + if rope_type in ('linear',): nemo_config['seq_len_interpolation_factor'] = llama_config['rope_scaling']['factor'] else: raise ValueError("Only linear rope scaling type is supported now") From c1bb9506e1716211975a683bb405a9eb03e2c724 Mon Sep 17 00:00:00 2001 From: Ryan Langman Date: Thu, 12 Dec 2024 07:05:41 -0800 Subject: [PATCH 022/128] [TTS] Add audio and mel codec HF models to docs (#11526) Signed-off-by: Ryan --- docs/source/tts/data/models_codec.csv | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/source/tts/data/models_codec.csv b/docs/source/tts/data/models_codec.csv index e209df8e687d..b8bd3d5c1be1 100644 --- a/docs/source/tts/data/models_codec.csv +++ b/docs/source/tts/data/models_codec.csv @@ -4,4 +4,8 @@ mel_codec_22khz_medium,LibriVox and Common Voice,22050Hz,nemo.collections.tts.mo mel_codec_44khz_medium,LibriVox and Common Voice,44100Hz,nemo.collections.tts.models.AudioCodecModel,`mel_codec_44khz_medium `_,``https://api.ngc.nvidia.com/v2/models/nvidia/nemo/mel_codec_44khz_medium/versions/v1/files/mel_codec_44khz_medium.nemo`` mel_codec_22khz_fullband_medium,LibriVox and Common Voice,22050Hz,nemo.collections.tts.models.AudioCodecModel,`mel_codec_22khz_fullband_medium `_,``https://api.ngc.nvidia.com/v2/models/nvidia/nemo/mel_codec_22khz_fullband_medium/versions/v1/files/mel_codec_22khz_fullband_medium.nemo`` mel_codec_44khz_fullband_medium,LibriVox and Common Voice,44100Hz,nemo.collections.tts.models.AudioCodecModel,`mel_codec_44khz_fullband_medium `_,``https://api.ngc.nvidia.com/v2/models/nvidia/nemo/mel_codec_44khz_fullband_medium/versions/v1/files/mel_codec_44khz_fullband_medium.nemo`` -nvidia/low-frame-rate-speech-codec-22khz,LibriVox and Common Voice,22050Hz,nemo.collections.tts.models.AudioCodecModel,`audio_codec_low_frame_rate_22khz `_,``https://huggingface.co/nvidia/low-frame-rate-speech-codec-22khz/resolve/main/low-frame-rate-speech-codec-22khz.nemo`` \ No newline at end of file +nvidia/low-frame-rate-speech-codec-22khz,LibriVox and Common Voice,22050Hz,nemo.collections.tts.models.AudioCodecModel,`audio_codec_low_frame_rate_22khz `_,``https://huggingface.co/nvidia/low-frame-rate-speech-codec-22khz/resolve/main/low-frame-rate-speech-codec-22khz.nemo`` +nvidia/audio-codec-22khz,LibriVox and Common Voice,22050Hz,nemo.collections.tts.models.AudioCodecModel,`audio-codec-22khz `_,``https://huggingface.co/nvidia/audio-codec-22khz/resolve/main/audio-codec-22khz.nemo`` +nvidia/audio-codec-44khz,LibriVox and Common Voice,44100Hz,nemo.collections.tts.models.AudioCodecModel,`audio-codec-44khz `_,``https://huggingface.co/nvidia/audio-codec-44khz/resolve/main/audio-codec-44khz.nemo`` +nvidia/mel-codec-22khz,LibriVox and Common Voice,22050Hz,nemo.collections.tts.models.AudioCodecModel,`mel-codec-22khz `_,``https://huggingface.co/nvidia/mel-codec-22khz/resolve/main/mel-codec-22khz.nemo`` +nvidia/mel-codec-44khz,LibriVox and Common Voice,44100Hz,nemo.collections.tts.models.AudioCodecModel,`mel-codec-44khz `_,``https://huggingface.co/nvidia/mel-codec-44khz/resolve/main/mel-codec-44khz.nemo`` \ No newline at end of file From 4017f426d68fd62f8b099aab14d80bdf9b6fdc76 Mon Sep 17 00:00:00 2001 From: "He Huang (Steve)" <105218074+stevehuang52@users.noreply.github.com> Date: Thu, 12 Dec 2024 10:45:48 -0500 Subject: [PATCH 023/128] Update for NEST release (#11537) * update for nest release Signed-off-by: stevehuang52 * make pylint happier Signed-off-by: stevehuang52 * fix for lhotse dataloader Signed-off-by: stevehuang52 * update yaml Signed-off-by: stevehuang52 * minor refactor Signed-off-by: stevehuang52 * clean up Signed-off-by: stevehuang52 * clean up Signed-off-by: stevehuang52 --------- Signed-off-by: stevehuang52 --- .../conf/ssl/nest/nest_fast-conformer.yaml | 33 +- examples/asr/speech_pretraining/README.md | 2 + .../conformer_transformer_large_bpe.yaml | 2 +- .../fastconformer_transformer_large_bpe.yaml | 2 +- nemo/collections/asr/models/ssl_models.py | 9 + nemo/collections/asr/modules/__init__.py | 2 + .../asr/modules/ssl_modules/__init__.py | 4 + .../modules/ssl_modules/multi_layer_feat.py | 10 +- .../common/parts/preprocessing/collections.py | 12 +- nemo/core/classes/modelPT.py | 15 +- scripts/ssl/extract_features.py | 285 ++++++++++++++++++ 11 files changed, 347 insertions(+), 29 deletions(-) create mode 100644 scripts/ssl/extract_features.py diff --git a/examples/asr/conf/ssl/nest/nest_fast-conformer.yaml b/examples/asr/conf/ssl/nest/nest_fast-conformer.yaml index 2124e6e6f7f1..58be77b5177b 100644 --- a/examples/asr/conf/ssl/nest/nest_fast-conformer.yaml +++ b/examples/asr/conf/ssl/nest/nest_fast-conformer.yaml @@ -2,22 +2,22 @@ # # Here are the recommended configs for different variants of FastConformer, other parameters are the same as in this config file. # -# +--------------+---------+---------+----------+----------------+--------------+------------+ -# | Model | d_model | n_heads | n_layers |conv_kernel_size| weight_decay | xscaling | -# +==============+=========+========+===========+================+==============+============+ -# | Small (14M) | 176 | 4 | 16 | 9 | 0.0 | True | -# +--------------+---------+--------+-----------+----------------+--------------+------------+ -# | Medium (32M) | 256 | 4 | 16 | 9 | 1e-3 | True | -# +--------------+---------+--------+-----------+----------------+--------------+------------+ -# | Large (120M) | 512 | 8 | 17 | 9 | 1e-3 | True | -# +--------------+---------+--------+-----------+----------------+--------------+------------+ -# | XLarge (616M)| 1024 | 8 | 24 | 9 | 1e-3 | False | -# +--------------+---------+--------+-----------+----------------+--------------+------------+ -# | XXLarge(1.2B)| 1024 | 8 | 42 | 9 | 1e-3 | False | -# +--------------------------------------------------------------+--------------+------------+ - - -name: "NEST-FastConformer-SSL" +# +--------------+---------+---------+----------+----------------+--------------+------------+---------+ +# | Model | d_model | n_heads | n_layers |conv_kernel_size| weight_decay | xscaling | use_bias| +# +==============+=========+========+===========+================+==============+============+=========+ +# | Small (14M) | 176 | 4 | 16 | 9 | 0.0 | True | True | +# +--------------+---------+--------+-----------+----------------+--------------+------------+---------+ +# | Medium (32M) | 256 | 4 | 16 | 9 | 1e-3 | True | True | +# +--------------+---------+--------+-----------+----------------+--------------+------------+---------+ +# | Large (120M) | 512 | 8 | 17 | 9 | 1e-3 | True | True | +# +--------------+---------+--------+-----------+----------------+--------------+------------+---------+ +# | XLarge (616M)| 1024 | 8 | 24 | 9 | 1e-3 | False | False | +# +--------------+---------+--------+-----------+----------------+--------------+------------+---------+ +# | XXLarge(1.2B)| 1024 | 8 | 42 | 5 | 1e-3 | False | False | +# +--------------------------------------------------------------+--------------+------------+---------+ + + +name: "SSL-NEST-FastConformer" model: sample_rate: 16000 @@ -242,6 +242,7 @@ exp_manager: monitor: "val_loss" mode: "min" save_top_k: 1 + always_save_nemo: True # saves the checkpoints as nemo files instead of PTL checkpoints # you need to set these two to True to continue the training resume_if_exists: true diff --git a/examples/asr/speech_pretraining/README.md b/examples/asr/speech_pretraining/README.md index 777ea0602789..e78fb5209d0f 100644 --- a/examples/asr/speech_pretraining/README.md +++ b/examples/asr/speech_pretraining/README.md @@ -6,6 +6,8 @@ There are two main types of supported self-supervised learning methods: - [Wav2vec-BERT](https://arxiv.org/abs/2108.06209): `speech_pre_training.py` - [NEST](https://arxiv.org/abs/2408.13106): `masked_token_pred_pretrain.py` - For downstream tasks that use NEST as multi-layer feature extractor, please refer to `./downstream/speech_classification_mfa_train.py` + - For extracting multi-layer features from NEST, please refer to `/scripts/ssl/extract_features.py` + - For using NEST as weight initialization for downstream tasks, please refer to the usage of [maybe_init_from_pretrained_checkpoint](https://github.com/NVIDIA/NeMo/blob/main/nemo/core/classes/modelPT.py#L1242). For their corresponding usage, please refer to the example yaml config: diff --git a/examples/slu/speech_intent_slot/configs/conformer_transformer_large_bpe.yaml b/examples/slu/speech_intent_slot/configs/conformer_transformer_large_bpe.yaml index 5d309f3cd193..c28db50463f8 100644 --- a/examples/slu/speech_intent_slot/configs/conformer_transformer_large_bpe.yaml +++ b/examples/slu/speech_intent_slot/configs/conformer_transformer_large_bpe.yaml @@ -130,7 +130,7 @@ model: ffn_dropout: 0.0 classifier: - _target_: nemo.collections.common.parts.MultiLayerPerceptron + _target_: nemo.collections.asr.parts.submodules.token_classifier.TokenClassifier hidden_size: ${model.encoder.d_model} num_classes: -1 num_layers: 1 diff --git a/examples/slu/speech_intent_slot/configs/fastconformer_transformer_large_bpe.yaml b/examples/slu/speech_intent_slot/configs/fastconformer_transformer_large_bpe.yaml index 6b700e9001f7..c69b6283bf2f 100644 --- a/examples/slu/speech_intent_slot/configs/fastconformer_transformer_large_bpe.yaml +++ b/examples/slu/speech_intent_slot/configs/fastconformer_transformer_large_bpe.yaml @@ -140,7 +140,7 @@ model: ffn_dropout: 0.0 classifier: - _target_: nemo.collections.common.parts.MultiLayerPerceptron + _target_: nemo.collections.asr.parts.submodules.token_classifier.TokenClassifier hidden_size: ${model.encoder.d_model} num_classes: -1 num_layers: 1 diff --git a/nemo/collections/asr/models/ssl_models.py b/nemo/collections/asr/models/ssl_models.py index 9150da7bf7c2..d02be3f9fa4e 100644 --- a/nemo/collections/asr/models/ssl_models.py +++ b/nemo/collections/asr/models/ssl_models.py @@ -609,6 +609,10 @@ def multi_validation_epoch_end(self, outputs, dataloader_idx: int = 0): class EncDecMaskedTokenPredModel(SpeechEncDecSelfSupervisedModel): + """ + Speech self-supervised model that performs masked token prediction on the encoder output. + """ + def transfer_batch_to_device(self, batch: Any, device: torch.device, dataloader_idx: int) -> Any: """ PTL hook: https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#transfer-batch-to-device @@ -806,6 +810,11 @@ def multi_test_epoch_end(self, outputs, dataloader_idx: int = 0): class EncDecDenoiseMaskedTokenPredModel(EncDecMaskedTokenPredModel): + """ + Model class that performs denoising and masked token prediction for speech self-supervised learning. + Please refer to the NEST paper for more details: https://arxiv.org/abs/2408.13106 + """ + def __init__(self, cfg: DictConfig, trainer: Trainer = None): super().__init__(cfg, trainer) diff --git a/nemo/collections/asr/modules/__init__.py b/nemo/collections/asr/modules/__init__.py index 940eb079ae27..14abdd0d2776 100644 --- a/nemo/collections/asr/modules/__init__.py +++ b/nemo/collections/asr/modules/__init__.py @@ -45,6 +45,8 @@ ) from nemo.collections.asr.modules.squeezeformer_encoder import SqueezeformerEncoder, SqueezeformerEncoderAdapter from nemo.collections.asr.modules.ssl_modules import ( + ConformerMultiLayerFeatureExtractor, + ConformerMultiLayerFeaturePreprocessor, ConvFeatureMaksingWrapper, MultiSoftmaxDecoder, RandomBlockMasking, diff --git a/nemo/collections/asr/modules/ssl_modules/__init__.py b/nemo/collections/asr/modules/ssl_modules/__init__.py index 0f31d4055df1..c9b7e970fe2c 100644 --- a/nemo/collections/asr/modules/ssl_modules/__init__.py +++ b/nemo/collections/asr/modules/ssl_modules/__init__.py @@ -17,5 +17,9 @@ SpeakerNoiseAugmentation, ) from nemo.collections.asr.modules.ssl_modules.masking import ConvFeatureMaksingWrapper, RandomBlockMasking +from nemo.collections.asr.modules.ssl_modules.multi_layer_feat import ( + ConformerMultiLayerFeatureExtractor, + ConformerMultiLayerFeaturePreprocessor, +) from nemo.collections.asr.modules.ssl_modules.multi_softmax_decoder import MultiSoftmaxDecoder from nemo.collections.asr.modules.ssl_modules.quantizers import RandomProjectionVectorQuantizer diff --git a/nemo/collections/asr/modules/ssl_modules/multi_layer_feat.py b/nemo/collections/asr/modules/ssl_modules/multi_layer_feat.py index e38e3abb6774..b1ff1c1cc74b 100644 --- a/nemo/collections/asr/modules/ssl_modules/multi_layer_feat.py +++ b/nemo/collections/asr/modules/ssl_modules/multi_layer_feat.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Optional, Tuple +from typing import Callable, List, Optional, Tuple import torch import torch.distributed @@ -82,7 +82,7 @@ def forward( class ConformerMultiLayerFeatureExtractor(NeuralModule, Exportable): - def __init__(self, encoder, aggregator, layer_idx_list): + def __init__(self, encoder, aggregator: Optional[Callable] = None, layer_idx_list: Optional[List[int]] = None): """ Args: encoder: ConformerEncoder instance. @@ -145,7 +145,8 @@ def forward( layer_lengths = total_registry[f"interctc/layer_length_{layer_idx}"] except KeyError: raise RuntimeError( - f"Intermediate layer {layer_idx} was not captured! Check the layer index and the number of ConformerEncoder layers." + f"Intermediate layer {layer_idx} was not captured! Check the layer index and the number of " + "ConformerEncoder layers." ) if len(layer_outputs) > 1 or len(layer_lengths) > 1: raise RuntimeError("Make sure encoder.forward is called exactly one time") @@ -153,7 +154,8 @@ def forward( encoded_len_list.append(layer_lengths[0]) # [B] self.encoder.reset_registry() - + if self.aggregator is None: + return encoded_list, encoded_len_list return self.aggregator(encoded_list, encoded_len_list) diff --git a/nemo/collections/common/parts/preprocessing/collections.py b/nemo/collections/common/parts/preprocessing/collections.py index d54c807f2637..12f5a9b3ecff 100644 --- a/nemo/collections/common/parts/preprocessing/collections.py +++ b/nemo/collections/common/parts/preprocessing/collections.py @@ -135,6 +135,7 @@ def __init__( """ output_type = self.OUTPUT_TYPE + all_has_duration = True data, duration_filtered, num_filtered, total_duration = [], 0.0, 0, 0.0 if index_by_file_id: self.mapping = {} @@ -142,13 +143,15 @@ def __init__( for id_, audio_file, duration, offset, text, speaker, orig_sr, token_labels, lang in zip( ids, audio_files, durations, offsets, texts, speakers, orig_sampling_rates, token_labels, langs ): + if duration is None: + all_has_duration = False # Duration filters. - if min_duration is not None and duration < min_duration: + if duration is not None and min_duration is not None and duration < min_duration: duration_filtered += duration num_filtered += 1 continue - if max_duration is not None and duration > max_duration: + if duration is not None and max_duration is not None and duration > max_duration: duration_filtered += duration num_filtered += 1 continue @@ -175,7 +178,7 @@ def __init__( num_filtered += 1 continue - total_duration += duration + total_duration += duration if duration is not None else 0.0 data.append(output_type(id_, audio_file, duration, text_tokens, offset, text, speaker, orig_sr, lang)) if index_by_file_id: @@ -196,7 +199,8 @@ def __init__( logging.info("Dataset loaded with %d files totalling %.2f hours", len(data), total_duration / 3600) logging.info("%d files were filtered totalling %.2f hours", num_filtered, duration_filtered / 3600) - + if not all_has_duration: + logging.info(f"Not all audios have duration information, the total number of hours is inaccurate.") super().__init__(data) diff --git a/nemo/core/classes/modelPT.py b/nemo/core/classes/modelPT.py index 88ff47caf8c2..1e7ef0c3a9b5 100644 --- a/nemo/core/classes/modelPT.py +++ b/nemo/core/classes/modelPT.py @@ -902,7 +902,10 @@ def setup(self, stage: Optional[str] = None): and self._cfg.train_ds is not None and self._cfg.train_ds.get('defer_setup', False) ) - if self.train_dataloader() is None and train_deferred_setup: + no_train_dataloader = self.train_dataloader() is None or ( + isinstance(self.train_dataloader(), list) and len(self.train_dataloader()) == 0 + ) + if no_train_dataloader and train_deferred_setup: self.setup_training_data(self._cfg.train_ds) if stage in ('fit', 'validate'): @@ -911,7 +914,10 @@ def setup(self, stage: Optional[str] = None): and self._cfg.validation_ds is not None and self._cfg.validation_ds.get('defer_setup', False) ) - if self.val_dataloader() is None and val_deferred_setup: + no_val_dataloader = self.val_dataloader() is None or ( + isinstance(self.val_dataloader(), list) and len(self.val_dataloader()) == 0 + ) + if no_val_dataloader and val_deferred_setup: self.setup_multiple_validation_data(val_data_config=self._cfg.validation_ds) if stage == 'test': @@ -920,7 +926,10 @@ def setup(self, stage: Optional[str] = None): and self._cfg.test_ds is not None and self._cfg.test_ds.get('defer_setup', False) ) - if self.test_dataloader() is None and test_deferred_setup: + no_test_dataloader = self.test_dataloader() is None or ( + isinstance(self.test_dataloader(), list) and len(self.test_dataloader()) == 0 + ) + if no_test_dataloader and test_deferred_setup: self.setup_multiple_test_data(test_data_config=self._cfg.test_ds) def train_dataloader(self): diff --git a/scripts/ssl/extract_features.py b/scripts/ssl/extract_features.py new file mode 100644 index 000000000000..d9475d46925b --- /dev/null +++ b/scripts/ssl/extract_features.py @@ -0,0 +1,285 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This script is designed to extract features from different layers of a pretrained SSL model. +The extracted features will be in *.npy format, and in the shape of [L, D, T], where L is the +number of layers, D is the feature dimension, and T is the time dimension. + +Example usage: + +python extract_features.py \ + --model_path="nvidia/ssl_en_nest_large_v1.0" \ + --input= \ + --output= \ + --layers="all" \ + --batch_size=8 \ + --workers=8 \ + --max_cache=1000 # save features every 1000 samples to avoid OOM in system memory +""" + + +import argparse +import os +import tempfile +from pathlib import Path +from typing import List + +import lightning.pytorch as pl +import numpy as np +import torch +from tqdm import tqdm + +from nemo.collections.asr.data.audio_to_text_dataset import get_char_dataset +from nemo.collections.asr.models import EncDecDenoiseMaskedTokenPredModel +from nemo.collections.asr.modules import ConformerMultiLayerFeatureExtractor +from nemo.collections.asr.parts.utils.manifest_utils import read_manifest, write_manifest +from nemo.collections.common.data.utils import move_data_to_device +from nemo.collections.common.parts.preprocessing.manifest import get_full_path +from nemo.core.classes.common import typecheck +from nemo.utils import logging + +typecheck.set_typecheck_enabled(enabled=False) + +parser = argparse.ArgumentParser(description="Extract audio features using an SSL model") +parser.add_argument( + "--model_path", + type=str, + required=True, + help="Path to the .nemo model file or a pretrained model name from the NGC/HF model hub", +) +parser.add_argument( + "-i", + "--input", + type=str, + required=True, + help="Path to the input audio file, or list of files, directory or jsonl manifest", +) +parser.add_argument( + "-o", "--output", type=str, required=True, help="Path to the output directory that contains .npy file" +) +parser.add_argument( + "-l", + "--layers", + type=str, + default="all", + help="Layers to extract features from, use 'all' to extract from all layer, 'last' for last layer, " + "or comma-separated indices of the target layers (e.g. '0,1,2')", +) +parser.add_argument("-b", "--batch_size", type=int, default=8, help="Batch size for feature extraction") +parser.add_argument("-w", "--workers", type=int, default=8, help="Number of workers for feature extraction") +parser.add_argument("-d", "--device", type=str, default="cuda", help="Device to use for feature extraction") +parser.add_argument("-t", "--type", type=str, default="wav", help="audio file type, only needed for directory input") +parser.add_argument("--use_amp", action="store_true", help="Use automatic mixed precision") +parser.add_argument( + "--amp_dtype", + type=str, + default="float16", + choices=["float16", "bfloat16"], + help="Data type for automatic mixed precision", +) +parser.add_argument("-mc", "--max_cache", type=int, default=-1, help="Max cache size before saving features") +args = parser.parse_args() + + +def get_input_manifest(input: str) -> List[dict]: + """ + Build manifest from input path or directory + """ + if input.endswith(".json") or input.endswith(".jsonl") and os.path.isfile(input): + logging.info(f"Reading manifest from: {input}") + manifest = [ + {"audio_filepath": str(get_full_path(item["audio_filepath"], input)), "duration": None, "text": "-"} + for item in read_manifest(input) + ] + elif os.path.isdir(input): + logging.info(f"Creating manifest from directory: {input}") + manifest = [ + {"audio_filepath": str(p), "duration": None, "text": "-"} for p in Path(input).rglob(f"*.{args.type}") + ] + logging.info(f"Found {len(manifest)} items of {args.type} files") + elif os.path.isfile(input): + logging.info(f"Reading single file: {input}") + manifest = [{"audio_filepath": Path(input).absolute.as_posix(), "duration": None, "text": "-"}] + else: + raise ValueError(f"Invalid input: {input}") + return manifest + + +def load_model(model_path): + """ + Load SSL model from local or pretrained + """ + if model_path.endswith(".nemo") and os.path.isfile(model_path): + logging.info(f"Loading model from local: {model_path}") + model = EncDecDenoiseMaskedTokenPredModel.restore_from(model_path) + else: + logging.info(f"Loading model from pretrained: {model_path}") + model = EncDecDenoiseMaskedTokenPredModel.from_pretrained(model_name=model_path) + return model + + +class FeatureExtractor(pl.LightningModule): + """ + Wrapper class for extracting features from SSL model + """ + + def __init__(self, ssl_model: EncDecDenoiseMaskedTokenPredModel, layer: str = "all"): + super().__init__() + self.preprocessor = ssl_model.preprocessor + self.encoder = ssl_model.encoder + self.layer_idx_list = None + self.sample_rate = ssl_model.cfg.sample_rate + if layer == "all": + self.layer_idx_list = None + elif layer == "last": + self.layer_idx_list = [len(self.encoder.layers) - 1] + else: + try: + self.layer_idx_list = [int(l) for l in layer.split(",")] + except Exception as e: + raise ValueError(f"Invalid layer argument: {layer}. Error: {e}") + self.feature_extractor = ConformerMultiLayerFeatureExtractor( + self.encoder, aggregator=None, layer_idx_list=self.layer_idx_list + ) + + def forward( + self, + input_signal=None, + input_signal_length=None, + processed_signal=None, + processed_signal_length=None, + ): + """ + Forward pass to extract features, same input interface as EncDecDenoiseMaskedTokenPredModel.forward + """ + has_input_signal = input_signal is not None and input_signal_length is not None + has_processed_signal = processed_signal is not None and processed_signal_length is not None + if (has_input_signal ^ has_processed_signal) == False: + raise ValueError( + f"{self} Arguments ``input_signal`` and ``input_signal_length`` are mutually exclusive " + " with ``processed_signal`` and ``processed_signal_len`` arguments." + ) + if not has_processed_signal: + processed_signal, processed_signal_length = self.preprocessor( + input_signal=input_signal, + length=input_signal_length, + ) + encoded, encoded_len = self.feature_extractor(audio_signal=processed_signal, length=processed_signal_length) + return encoded, encoded_len + + +def maybe_save_features(output_dir, results, max_cache, manifest): + """ + Check if the cache is full and save features to disk + """ + if len(results) == 0 or max_cache < 0 or len(results) < max_cache: + return + os.makedirs(output_dir, exist_ok=True) + logging.info(f"Saving {len(results)} features to {output_dir}") + + for sample_id, audio_file, features_np in tqdm(results, desc="Saving features", total=len(results)): + filename = str(audio_file).replace("/", "_").replace(".", "_") + if len(filename) > 256: + filename = filename[-256:] + output_path = os.path.join(output_dir, f"{filename}.npy") + np.save(output_path, features_np) + manifest[sample_id]["feature_path"] = output_path + + logging.info(f"Saved {len(results)} features to {output_dir}") + results.clear() + + +def extract_features(args): + """ + Main function to extract and save features from SSL model + """ + + logging.info(f"Extracting features using params: {vars(args)}") + + # Load model + model = load_model(args.model_path) + feature_extractor = FeatureExtractor(model, args.layers) + device = torch.device(args.device) + feature_extractor.to(device) + + # Load data + logging.info(f"Building dataset from input: {args.input}") + tmp_manifest = tempfile.NamedTemporaryFile(mode="w", delete=False) + manifest = get_input_manifest(args.input) + write_manifest(tmp_manifest.name, manifest) + total_num_samples = len(manifest) + + # Build dataloader + config = { + "manifest_filepath": tmp_manifest.name, + "sample_rate": feature_extractor.sample_rate, + "return_sample_id": True, + } + dataset = get_char_dataset(config) + logging.info(f"Built dataset with {len(dataset)} samples") + dataloader = torch.utils.data.DataLoader( + dataset=dataset, + collate_fn=dataset.collate_fn, + batch_size=args.batch_size, + shuffle=False, + num_workers=args.workers, + pin_memory=True, + drop_last=False, + ) + + # Extract features + indices = set() + results = [] + amp_dtype = torch.float16 if args.amp_dtype == "float16" else torch.bfloat16 + logging.info(f"Extracting features using AMP: {args.use_amp}, dtype: {amp_dtype}") + with torch.amp.autocast('cuda' if torch.cuda.is_available() else 'cpu', dtype=amp_dtype, enabled=args.use_amp): + with torch.inference_mode(): + for batch in tqdm(dataloader, desc="Extracting features"): + batch = move_data_to_device(batch, device) + audio_signal, audio_signal_len, _, _, sample_id = batch + features, features_len = feature_extractor( + input_signal=audio_signal, input_signal_length=audio_signal_len + ) + batch_size = features[0].size(0) + num_layers = len(features) + for i in range(batch_size): + sid_i = sample_id[i] + if sid_i in indices: + logging.warning(f"Skipping duplicated sample_id: {sample_id}") + continue + + feat_i_len = features_len[0][i] + feat_i = [] + for j in range(num_layers): + feat_i.append(features[j][i][:, :feat_i_len]) + + feat_i_np = torch.stack(feat_i, dim=0).cpu().numpy() + + indices.add(sid_i) + results.append((sid_i, manifest[sid_i]['audio_filepath'], feat_i_np)) + + maybe_save_features(args.output, results, args.max_cache, manifest) + + maybe_save_features(args.output, results, 0, manifest) + + output_manifest = Path(args.output) / "features.json" + write_manifest(output_manifest, manifest) + os.remove(tmp_manifest.name) + logging.info(f"Extracted features from {total_num_samples} samples to {args.output}") + logging.info(f"Manifest saved to: {output_manifest}") + + +if __name__ == "__main__": + extract_features(args) From 4b714b31cecd1887133fb8be83614ab83b775398 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20=C5=BBelasko?= Date: Thu, 12 Dec 2024 12:05:32 -0500 Subject: [PATCH 024/128] Merging SpeechLLM development branch (#11462) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Port changes related to SFT text+speech dataloading Signed-off-by: Piotr Żelasko * Revert changes from Canary(nonLLM) code Signed-off-by: Piotr Żelasko * Add joint text/audio dataloading capability to speechllm Signed-off-by: Piotr Żelasko * include text-only into fprop of training and eval; TODO: text-only predict Signed-off-by: zhehuaichen * Actually working forward step Signed-off-by: Piotr Żelasko * Support for source-target text file pair training for MT+speech Signed-off-by: Piotr Żelasko * Include supervision text tokens in audio example's num tokens Signed-off-by: Piotr Żelasko * Disable conformer seq len NCCL sync Signed-off-by: Piotr Żelasko * Preliminary sampler fusion stragies support: mux/zip/round_robin/randomized_round_robin Signed-off-by: Piotr Żelasko * Working V2 version of multimodal dataloading. Each modality gets its own batch settings that can be merged with zip sampler to enjoy max batch sizes for both modalities in a single training step. Each modality runs fwd+bwd in turn to save GPU memory (instead of running fwd separately and bwd together). Signed-off-by: Piotr Żelasko * Add missing config Signed-off-by: Piotr Żelasko * Revert multimodal grad accum and fix mask padding issue Signed-off-by: Piotr Żelasko * Add modality weights support via cfg.model.modality_weights Signed-off-by: Piotr Żelasko * Fix for V2 dataloader shuffling CRITICAL Signed-off-by: Piotr Żelasko * Restore multimodal grad accum Signed-off-by: Piotr Żelasko * Fix unit tests for multi-sampler configurations Signed-off-by: Piotr Żelasko * Apply isort and black reformatting Signed-off-by: pzelasko * nemo gemma to hf conversion (#9629) * adding script for gemma nemo to hf Signed-off-by: Krishna Puvvada * adding verification for convert_gemma_nemo_to_hf Signed-off-by: Krishna Puvvada * Apply isort and black reformatting Signed-off-by: krishnacpuvvada --------- Signed-off-by: Krishna Puvvada Signed-off-by: krishnacpuvvada Co-authored-by: Krishna Puvvada Co-authored-by: krishnacpuvvada * support FSDP (thank Yifan for early trying) (#10062) Note: as of now, this is still not fully working on the cluster. See above doc for details. Signed-off-by: zhehuaichen * Fix unit tests after rebasing on recent main Signed-off-by: Piotr Żelasko * support megatron_amp_O2 and tp (#10599) * Port changes related to SFT text+speech dataloading Signed-off-by: Piotr Żelasko * Revert changes from Canary(nonLLM) code Signed-off-by: Piotr Żelasko * Add joint text/audio dataloading capability to speechllm Signed-off-by: Piotr Żelasko * include text-only into fprop of training and eval; TODO: text-only predict Signed-off-by: zhehuaichen * Actually working forward step Signed-off-by: Piotr Żelasko * Support for source-target text file pair training for MT+speech Signed-off-by: Piotr Żelasko * Include supervision text tokens in audio example's num tokens Signed-off-by: Piotr Żelasko * Disable conformer seq len NCCL sync Signed-off-by: Piotr Żelasko * Preliminary sampler fusion stragies support: mux/zip/round_robin/randomized_round_robin Signed-off-by: Piotr Żelasko * Working V2 version of multimodal dataloading. Each modality gets its own batch settings that can be merged with zip sampler to enjoy max batch sizes for both modalities in a single training step. Each modality runs fwd+bwd in turn to save GPU memory (instead of running fwd separately and bwd together). Signed-off-by: Piotr Żelasko * Add missing config Signed-off-by: Piotr Żelasko * Revert multimodal grad accum and fix mask padding issue Signed-off-by: Piotr Żelasko * Add modality weights support via cfg.model.modality_weights Signed-off-by: Piotr Żelasko * Fix for V2 dataloader shuffling CRITICAL Signed-off-by: Piotr Żelasko * Restore multimodal grad accum Signed-off-by: Piotr Żelasko * Fix unit tests for multi-sampler configurations Signed-off-by: Piotr Żelasko * Apply isort and black reformatting Signed-off-by: pzelasko * nemo gemma to hf conversion (#9629) * adding script for gemma nemo to hf Signed-off-by: Krishna Puvvada * adding verification for convert_gemma_nemo_to_hf Signed-off-by: Krishna Puvvada * Apply isort and black reformatting Signed-off-by: krishnacpuvvada --------- Signed-off-by: Krishna Puvvada Signed-off-by: krishnacpuvvada Co-authored-by: Krishna Puvvada Co-authored-by: krishnacpuvvada * support FSDP (thank Yifan for early trying) Signed-off-by: zhehuaichen * debug TP deadlock Signed-off-by: zhehuaichen * some fixes for fsdp and tp /lustre/fsw/portfolios/llmservice/users/zhehuaic/results/canary-v0_speechllm/prompt_lhmerge5_p2b_oci_FC-GPT_llama_canaryset_b6s4kf-sunolong_noCC_langtemp0.5_dsettemp0.5_lr1e-4wd1e-3_CosineAnnealing_warmup2500_minlr1e-6_gbs2048_mbs16_ep200/error-1417621-0.out /lustre/fsw/portfolios/llmservice/users/zhehuaic/results/canary-v0_speechllm/prompt_lhmerge5_p2b_tp_oci_FC-GPT_llama_canaryset_b6s4kf-sunolong_noCC_langtemp0.5_dsettemp0.5_lr1e-4wd1e-3_CosineAnnealing_warmup2500_minlr1e-6_gbs128_mbs16_ep200/error-1421103-3.out Signed-off-by: zhehuaichen * nit fix Signed-off-by: zhehuaichen * fix for llama3.1 Signed-off-by: zhehuaichen * for llama3.1 Signed-off-by: zhehuaichen * fix for inference Signed-off-by: zhehuaichen * fix inference Signed-off-by: zhehuaichen * fix grad accu Signed-off-by: zhehuaichen * fix inference Signed-off-by: zhehuaichen * initial impl to support megatron_amp_O2 in salm, bestow, salm-t5 Signed-off-by: zhehuaichen --------- Signed-off-by: Piotr Żelasko Signed-off-by: zhehuaichen Signed-off-by: Piotr Żelasko Signed-off-by: pzelasko Signed-off-by: Krishna Puvvada Signed-off-by: krishnacpuvvada Co-authored-by: Piotr Żelasko Co-authored-by: Piotr Żelasko Co-authored-by: pzelasko Co-authored-by: Krishna Puvvada <93558329+krishnacpuvvada@users.noreply.github.com> Co-authored-by: Krishna Puvvada Co-authored-by: krishnacpuvvada * minor change in dataloader (#10601) * Speechllm dataset basic unit test (#10631) * Basic unit test for speechllm lhotse dataset Signed-off-by: Piotr Żelasko * cleanup Signed-off-by: Piotr Żelasko --------- Signed-off-by: Piotr Żelasko * Unit test for existing speechllm dataset with llama2 prompt format (#10634) Signed-off-by: Piotr Żelasko * [speechllm] Replace TextProcessing with PromptFormatter (#10639) * [speechllm] Replace TextProcessing with PromptFormatter Signed-off-by: Piotr Żelasko * Test for tokens_to_generate Signed-off-by: Piotr Żelasko * Padding optimization for speechlm dataset Signed-off-by: Piotr Żelasko --------- Signed-off-by: Piotr Żelasko * Multimodal conversation format dataloading (#10683) * Draft implementation of NeMo Multimodal Conversation format Signed-off-by: Piotr Żelasko * Fully working data parsing and iteration Signed-off-by: Piotr Żelasko * Fully working dataloading with tokenization + prompting Signed-off-by: Piotr Żelasko * Collapse consecutive user turns into single turn Signed-off-by: Piotr Żelasko --------- Signed-off-by: Piotr Żelasko * a few fixes for the new prompt template based dataloader and lora+distributed fused adam (#10701) * Draft implementation of NeMo Multimodal Conversation format Signed-off-by: Piotr Żelasko * Fully working data parsing and iteration Signed-off-by: Piotr Żelasko * Fully working dataloading with tokenization + prompting Signed-off-by: Piotr Żelasko * Collapse consecutive user turns into single turn Signed-off-by: Piotr Żelasko * compatible with previous expts Signed-off-by: zhehuaichen * support gemma Signed-off-by: zhehuaichen * handle the case max_seq_length is smaller than input_id length Signed-off-by: zhehuaichen * fix max seq case Signed-off-by: zhehuaichen * fix lora ckpt storing and loading Signed-off-by: zhehuaichen * temp fix for distributed fused adam Signed-off-by: zhehuaichen * revert changes in nemo_adapters.py Signed-off-by: zhehuaichen * Fix tokenize_with_prompt Signed-off-by: Piotr Żelasko Signed-off-by: zhehuaichen --------- Signed-off-by: Piotr Żelasko Signed-off-by: zhehuaichen Signed-off-by: Piotr Żelasko Co-authored-by: Piotr Żelasko * Mechanism to insert BOS/EOS at the beginning/end of dialog (#10923) * Mechanism to insert BOS/EOS at the beginning/end of dialog Signed-off-by: Piotr Żelasko * Fix Gemma prompt formatter test Signed-off-by: Piotr Żelasko * Add a test specifically for multiturn insertion of bos/eos Signed-off-by: Piotr Żelasko --------- Signed-off-by: Piotr Żelasko * Add options to override default map/iterable dataset style selection in lhotse dataloader Signed-off-by: Piotr Żelasko * Feature/conversations tarred (#11086) * Multimodal conversation tarring script Signed-off-by: Piotr Żelasko * Fix sharding logic Signed-off-by: Piotr Żelasko * Fix dir creation Signed-off-by: Piotr Żelasko --------- Signed-off-by: Piotr Żelasko * EMMeTT support in SpeechLLM + tutorial for Lhotse Multimodal Dataloading (#10927) * Preliminary support for oomptimizer Signed-off-by: Piotr Żelasko * OOMptimizer for SpeechLLM Signed-off-by: Piotr Żelasko * Initial version of estimate token bins script Signed-off-by: Piotr Żelasko * Initial support for multimodal 2d bucketing Signed-off-by: Piotr Żelasko * Extend to text-to-text oomptimizer Signed-off-by: Piotr Żelasko * Preliminary support for Llama2 prompt format in ast+mt Signed-off-by: Piotr Żelasko * Support for 1D estimate token bins Signed-off-by: Piotr Żelasko * Support for 1D estimate token bins Signed-off-by: Piotr Żelasko * Fix Signed-off-by: Piotr Żelasko * Fix Signed-off-by: Piotr Żelasko * Minor tweaks Signed-off-by: Piotr Żelasko * Add min/max tokens filter Signed-off-by: Piotr Żelasko * Change to bisect_left for bucket idx selection Signed-off-by: Piotr Żelasko * Add reconfigure_num_microbatches_calculator at the start of train epoch for modular models Signed-off-by: Piotr Żelasko * Update lhotse multi-sampler config and make validation datasets finite Signed-off-by: Piotr Żelasko * Initial implementation of text+audio training for T5 modular models Signed-off-by: Piotr Żelasko * megatron t5 nmt prompt formatter Signed-off-by: Piotr Żelasko * Fixes for MT+AST T5 oomptimizer and training Signed-off-by: Piotr Żelasko * configs, fixes, token-per-token filtering * Support text modality in predict_step Signed-off-by: Piotr Żelasko * Support text data in val/test dl Signed-off-by: Piotr Żelasko * fix Signed-off-by: Piotr Żelasko * fix Signed-off-by: Piotr Żelasko * fix Signed-off-by: Piotr Żelasko * fix Signed-off-by: Piotr Żelasko * fix Signed-off-by: Piotr Żelasko * fix Signed-off-by: Piotr Żelasko * fix Signed-off-by: Piotr Żelasko * fix Signed-off-by: Piotr Żelasko * fix infinite Signed-off-by: Piotr Żelasko * prompt format fixes Signed-off-by: Piotr Żelasko * Fixes in audio supervision Signed-off-by: Piotr Żelasko * remove superficial padding Signed-off-by: Piotr Żelasko * test config and prompt context fetching fixes Signed-off-by: Piotr Żelasko * support text-only decoding for salm/bestow Signed-off-by: Piotr Żelasko * Add unit tests for EMMETT / refactor prompt_format_fn Signed-off-by: Piotr Żelasko * make t5nmt prompt formatter auto discoverable Signed-off-by: Piotr Żelasko * include token count / tpt filtering in estimate_token_bins Signed-off-by: Piotr Żelasko * fix max token filter Signed-off-by: Piotr Żelasko * some fixes Signed-off-by: Piotr Żelasko * custom mixin for text adapters Signed-off-by: Piotr Żelasko * Warmup in oomptimizer-speechlm Signed-off-by: Piotr Żelasko * Move oomptimizer-speechllm to separate directory Signed-off-by: Piotr Żelasko * Initial cleanup Signed-off-by: Piotr Żelasko * Refactoring of prompt format fn and length measurement and filtering for data types; improved unit test coverage Signed-off-by: Piotr Żelasko * Refactor sampler constraints / filters into sampling.py Signed-off-by: Piotr Żelasko * Tests and support for sampler length measurement of multimodal conversations Signed-off-by: Piotr Żelasko * Update estimate_token_bins.py Signed-off-by: Piotr Żelasko * Move estimate_token_bins.py to speech_llm scripts Signed-off-by: Piotr Żelasko * Minor tweaks Signed-off-by: Piotr Żelasko * Fixes for SpeechLLM dataset Signed-off-by: Piotr Żelasko * Apply isort and black reformatting Signed-off-by: pzelasko * Add missing emmett tests Signed-off-by: Piotr Żelasko * Add tutorial about multimodal lhotse dataloading Signed-off-by: Piotr Żelasko * Updated documentation for multimodal dataloading Signed-off-by: Piotr Żelasko * Prompt Formatter tutorial Signed-off-by: Piotr Żelasko * Review comments Signed-off-by: Piotr Żelasko * Fixes for sampling filters None values Signed-off-by: Piotr Żelasko * Changes requested by Steve: moving some args to main config namespace in multi config sampler Signed-off-by: Piotr Żelasko * fix Signed-off-by: Piotr Żelasko * Update default configs to the modified config schema Signed-off-by: Piotr Żelasko * Fix omegaconf use issue Signed-off-by: Piotr Żelasko * Update the docs to the modified multi config format Signed-off-by: Piotr Żelasko --------- Signed-off-by: Piotr Żelasko Signed-off-by: Piotr Żelasko Signed-off-by: pzelasko Co-authored-by: pzelasko * Remove old TODO comments Signed-off-by: Piotr Żelasko * Remove prompts/fn.py Signed-off-by: Piotr Żelasko * Copyright notices Signed-off-by: Piotr Żelasko * Make linter happy Signed-off-by: Piotr Żelasko * Make linter happy Signed-off-by: Piotr Żelasko * Fix megatron test Signed-off-by: Piotr Żelasko * Fix megatron test Signed-off-by: Piotr Żelasko * Disable plugin for high entropy strings in secrets detector Signed-off-by: Piotr Żelasko * Fix CodeQL errors Signed-off-by: Piotr Żelasko * fix unit tests Signed-off-by: Piotr Żelasko * fix another unit test Signed-off-by: Piotr Żelasko * Fix multimodal tests Signed-off-by: Piotr Żelasko * Apply isort and black reformatting Signed-off-by: pzelasko * fixes after merging canary2 pr to main Signed-off-by: Piotr Żelasko * fix headers Signed-off-by: Piotr Żelasko * fix canary integration test + formatting Signed-off-by: Piotr Żelasko * Address reviews - add sync_max_audio_length flag for conformer encoder Signed-off-by: Piotr Żelasko * Revert change in secrets detector Signed-off-by: Piotr Żelasko * Revert change in secrets detector Signed-off-by: Piotr Żelasko * Revert change in secrets detector Signed-off-by: Piotr Żelasko * Address code review Signed-off-by: Piotr Żelasko * Address Steve's review Signed-off-by: Piotr Żelasko --------- Signed-off-by: Piotr Żelasko Signed-off-by: zhehuaichen Signed-off-by: Piotr Żelasko Signed-off-by: pzelasko Signed-off-by: Krishna Puvvada Signed-off-by: krishnacpuvvada Co-authored-by: zhehuaichen Co-authored-by: pzelasko Co-authored-by: Krishna Puvvada <93558329+krishnacpuvvada@users.noreply.github.com> Co-authored-by: Krishna Puvvada Co-authored-by: krishnacpuvvada Co-authored-by: zhehuaichen <139396994+zhehuaichen@users.noreply.github.com> --- docs/source/asr/datasets.rst | 279 ++++- ...r_audio_gpt_config_cross_llama_lhotse.yaml | 1 + ...o_gpt_config_cross_llama_lhotse_multi.yaml | 365 ++++++ .../conf/modular_audio_gpt_config_eval.yaml | 2 +- ...modular_audio_gpt_config_llama_lhotse.yaml | 1 + .../conf/salm/modular_audio_t5_config.yaml | 1 + .../salm/modular_audio_t5_multi_config.yaml | 343 ++++++ .../asr/data/audio_to_text_lhotse.py | 32 +- .../asr/data/audio_to_text_lhotse_prompted.py | 19 +- .../asr/models/aed_multitask_models.py | 9 +- .../asr/modules/conformer_encoder.py | 8 +- nemo/collections/common/data/__init__.py | 2 + .../common/data/lhotse/__init__.py | 13 +- nemo/collections/common/data/lhotse/cutset.py | 207 +++- .../common/data/lhotse/dataloader.py | 590 +++++----- .../common/data/lhotse/sampling.py | 317 ++++++ .../common/data/lhotse/text_adapters.py | 531 ++++++++- nemo/collections/common/data/prompt_fn.py | 91 ++ nemo/collections/common/prompts/__init__.py | 3 +- nemo/collections/common/prompts/canary.py | 118 +- nemo/collections/common/prompts/canary2.py | 120 +- nemo/collections/common/prompts/fn.py | 52 - nemo/collections/common/prompts/formatter.py | 21 +- nemo/collections/common/prompts/gemma.py | 23 + nemo/collections/common/prompts/llama.py | 69 +- nemo/collections/common/prompts/plain.py | 56 + nemo/collections/common/prompts/t5nmt.py | 106 ++ .../common/tokenizers/aggregate_tokenizer.py | 28 + .../speech_llm/data/audio_text_dataset.py | 2 +- .../speech_llm/data/build_dataset.py | 43 +- .../speech_llm/data/lhotse_dataset.py | 148 +-- .../speech_llm/models/modular_models.py | 430 ++++++- .../speech_llm/models/modular_t5_models.py | 418 ++++--- .../common/audio_text_generation_utils.py | 20 + .../speech_llm/modules/perception_modules.py | 41 +- .../speech_llm/parts/mixins/adapter_mixin.py | 18 + .../speech_llm/parts/utils/data_utils.py | 81 +- .../language_modeling/megatron_base_model.py | 12 +- .../language_modeling/megatron_gpt_model.py | 6 +- .../megatron_t5_adapter_model.py | 8 +- .../convert_gemma_nemo_to_hf.py | 342 ++++++ scripts/speech_llm/estimate_token_bins.py | 328 ++++++ .../speech_llm/export_conversations_to_tar.py | 41 + scripts/speech_llm/oomptimizer.py | 592 ++++++++++ .../estimate_duration_bins_2d.py | 5 +- scripts/speech_recognition/oomptimizer.py | 12 +- .../asr/test_asr_multitask_model_bpe.py | 22 +- .../common/prompt_formatters/conftest.py | 2 + .../test_gemma_prompt_formatter.py | 31 +- .../test_mistral_prompt_formatter.py | 2 +- .../common/test_2d_bucketing_constraint.py | 2 +- .../common/test_lhotse_dataloading.py | 494 +++++++- .../test_lhotse_multimodal_dataloading.py | 442 +++++++ .../test_lhotse_prompt_format_data_types.py | 297 +++++ .../common/test_lhotse_seqlen_filters.py | 184 +++ tests/collections/multimodal/test_emmett.py | 259 +++++ .../multimodal/test_speechllm_dataset.py | 395 +++++++ .../Multimodal Lhotse Dataloading.ipynb | 1014 +++++++++++++++++ .../Prompt Formatter Tutorial.ipynb | 458 ++++++++ 59 files changed, 8605 insertions(+), 951 deletions(-) create mode 100644 examples/multimodal/speech_llm/conf/bestow/modular_audio_gpt_config_cross_llama_lhotse_multi.yaml create mode 100644 examples/multimodal/speech_llm/conf/salm/modular_audio_t5_multi_config.yaml create mode 100644 nemo/collections/common/data/lhotse/sampling.py create mode 100644 nemo/collections/common/data/prompt_fn.py delete mode 100644 nemo/collections/common/prompts/fn.py create mode 100644 nemo/collections/common/prompts/plain.py create mode 100644 nemo/collections/common/prompts/t5nmt.py create mode 100644 scripts/checkpoint_converters/convert_gemma_nemo_to_hf.py create mode 100644 scripts/speech_llm/estimate_token_bins.py create mode 100644 scripts/speech_llm/export_conversations_to_tar.py create mode 100755 scripts/speech_llm/oomptimizer.py create mode 100644 tests/collections/common/test_lhotse_multimodal_dataloading.py create mode 100644 tests/collections/common/test_lhotse_prompt_format_data_types.py create mode 100644 tests/collections/common/test_lhotse_seqlen_filters.py create mode 100644 tests/collections/multimodal/test_emmett.py create mode 100644 tests/collections/multimodal/test_speechllm_dataset.py create mode 100644 tutorials/multimodal/Multimodal Lhotse Dataloading.ipynb create mode 100644 tutorials/multimodal/Prompt Formatter Tutorial.ipynb diff --git a/docs/source/asr/datasets.rst b/docs/source/asr/datasets.rst index 6868c4aaec72..8298567ff7cc 100644 --- a/docs/source/asr/datasets.rst +++ b/docs/source/asr/datasets.rst @@ -746,53 +746,266 @@ The final weight is the product of outer and inner weight: source_lang: pl target_lang: en -Configuring multi-modal dataloading +Configuring multimodal dataloading ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Our configuration format supports specifying data sources from other modalities than just audio. -At this time, this support is extended to text-only data. We provide the following parser types: +At this time, this support is extended to audio and text modalities. We provide the following parser types: -* ``txt`` for raw text files, sharded or unsharded. This can represent, for example, language modeling data. -* ``txt_pair`` for pairs of raw text files, sharded or unsharded. This can represent, for example, machine translation data. +**Raw text files.** Simple text files where each line is an individual text example. This can represent standard language modeling data. +This parser is registered under ``type: txt``. -The key strength of this approach is that we can easily combine audio datasets and text datasets, -and benefit from every other technique we described above such as dynamic data mixing, data weighting, dynamic bucketing, and so on. -To enable multimodal dataloading, we provide several configuration options: +Data format examples:: -* ``use_multimodal_sampling`` when set to True, we'll discard the settings of ``batch_duration`` and ``quadratic_duration`` and consider the settings below instead. + # file: document_0.txt + This is a language modeling example. + Wall Street is expecting major news tomorrow. -* ``batch_tokens`` is the maximum number of tokens we want to find inside a mini-batch. Similarly to ``batch_duration``, this number does consider padding tokens too, therefore enabling bucketing is recommended to maximize the ratio of real vs padding tokens. + # file: document_1.txt + Invisible bats have stormed the city. + What an incredible event! -* ``token_equivalent_duration`` is used to be able to measure audio examples in the number of "tokens". For example, if we're using fbank with 0.01s frame shift and an acoustic model that has a subsampling factor of 0.08, then a reasonable setting for this could be 0.08 (which means every subsampled frame counts as one token). Calibrate this value to fit your needs. Note that this value acts as a "balancer" between how much audio data vs text data gets sampled into a mini-batch. +Dataloading configuration example:: -* ``quadratic_factor`` works the same way as ``quadratic_duration``, but is defined in the number of tokens. + input_cfg: + - type: txt + paths: /path/to/document_{0..1}.txt + language: en # optional -Example 3. Combine an ASR (audio-text) dataset with an MT (text-only) dataset so that mini-batches have some examples from both datasets. Provide a custom prompt field for both datasets (to be leveraged by a relevant dataset class): +Python object example:: -.. code-block:: yaml + from nemo.collections.common.data.lhotse.text_adapters import TextExample + + example = TextExample( + text="This is a language modeling example.", + language="en", # optional + ) + +Python dataloader instantiation example:: + + from nemo.collections.common.data.lhotse.dataloader import get_lhotse_dataloader_from_config + + dl = get_lhotse_dataloader_from_config({ + "input_cfg": [ + {"type": "txt", "paths": "/path/to/document_{0..1}.txt", "language": "en"}, + ], + "use_multimodal_dataloading": True, + "batch_size": 4, + }, + global_rank=0, + world_size=1, + dataset=MyDatasetClass(), # converts CutSet -> dict[str, Tensor] + tokenizer=my_tokenizer, + ) + +**Raw text file pairs.** Pairs of raw text files with corresponding lines. This can represent machine translation data. +This parser is registered under ``type: txt_pair``. + +Data format examples:: + + # file: document_en_0.txt + This is a machine translation example. + Wall Street is expecting major news tomorrow. + + # file: document_pl_0.txt + To jest przykład tłumaczenia maszynowego. + Wall Street spodziewa się jutro ważnych wiadomości. + +Dataloading configuration example:: - use_multimodal_sampling: true - batch_tokens: 1024 - token_equivalent_duration: 0.08 # 0.01 frame shift * 8 subsampling factor - quadratic_factor: 50 - num_buckets: 30 - use_bucketing: true input_cfg: - - type: nemo_tarred - manifest_filepath: /path/to/manifest__OP_0..512_CL_.json - tarred_audio_filepath: /path/to/tarred_audio/audio__OP_0..512_CL_.tar - weight: 0.5 - tags: - lang: en - prompt: "Given the following recording, transcribe what the person is saying:" - type: txt_pair - source_path: /path/to/en__OP_0..512_CL_.txt - target_path: /path/to/pl__OP_0..512_CL_.txt - source_language: en - target_language: pl - weight: 0.5 - tags: - prompt: "Translate the following text to Polish:" + source_path: /path/to/document_en_{0..N}.txt + target_path: /path/to/document_pl_{0..N}.txt + source_language: en # optional + target_language: pl # optional + +Python object example:: + + from nemo.collections.common.data.lhotse.text_adapters import SourceTargetTextExample + + example = SourceTargetTextExample( + source=TextExample( + text="This is a language modeling example.", + language="en", # optional + ), + target=TextExample( + text="To jest przykład tłumaczenia maszynowego.", + language="pl", # optional + ), + ) + +Python dataloader instantiation example:: + + from nemo.collections.common.data.lhotse.dataloader import get_lhotse_dataloader_from_config + + dl = get_lhotse_dataloader_from_config({ + "input_cfg": [ + { + "type": "txt_pair", + "source_path": "/path/to/document_en_{0..N}.txt", + "target_path": "/path/to/document_pl_{0..N}.txt", + "source_language": "en" + "target_language": "en" + }, + ], + "use_multimodal_dataloading": True, + "prompt_format": "t5nmt", + "batch_size": 4, + }, + global_rank=0, + world_size=1, + dataset=MyDatasetClass(), # converts CutSet -> dict[str, Tensor] + tokenizer=my_tokenizer, + ) + +**NeMo multimodal conversations.** A JSON-Lines (JSONL) file that defines multi-turn conversations with mixed text and audio turns. +This parser is registered under ``type: multimodal_conversation``. + +Data format examples:: + + # file: chat_0.jsonl + {"id": "conv-0", "conversations": [{"from": "user", "value": "speak to me", "type": "text"}, {"from": "assistant": "value": "/path/to/audio.wav", "duration": 17.1, "type": "audio"}]} + +Dataloading configuration example:: + + token_equivalent_duration: 0.08 + input_cfg: + - type: multimodal_conversation + manifest_filepath: /path/to/chat_{0..N}.jsonl + audio_locator_tag: [audio] + +Python object example:: + + from lhotse import Recording + from nemo.collections.common.data.lhotse.text_adapters import MultimodalConversation, TextTurn, AudioTurn + + conversation = NeMoMultimodalConversation( + id="conv-0", + turns=[ + TextTurn(value="speak to me", role="user"), + AudioTurn(cut=Recording.from_file("/path/to/audio.wav").to_cut(), role="assistant", audio_locator_tag="[audio]"), + ], + token_equivalent_duration=0.08, # this value will be auto-inserted by the dataloader + ) + +Python dataloader instantiation example:: + + from nemo.collections.common.data.lhotse.dataloader import get_lhotse_dataloader_from_config + + dl = get_lhotse_dataloader_from_config({ + "input_cfg": [ + { + "type": "multimodal_conversation", + "manifest_filepath": "/path/to/chat_{0..N}.jsonl", + "audio_locator_tag": "[audio]", + }, + ], + "use_multimodal_dataloading": True, + "token_equivalent_duration": 0.08, + "prompt_format": "llama2", + "batch_size": 4, + }, + global_rank=0, + world_size=1, + dataset=MyDatasetClass(), # converts CutSet -> dict[str, Tensor] + tokenizer=my_tokenizer, + ) + +**Dataloading and bucketing of text and multimodal data.** When dataloading text or multimodal data, pay attention to the following config options (we provide example values for convenience): + +* ``use_multimodal_sampling: true`` tells Lhotse to switch from measuring audio duration to measuring token counts; required for text. + +* ``prompt_format: "prompt-name"`` will apply a specified PromptFormatter during data sampling to accurately reflect its token counts. + +* ``measure_total_length: true`` customizes length measurement for decoder-only and encoder-decoder models. Decoder-only models consume a linear sequence of context + answer, so we should measure the total length (``true``). On the other hand, encoder-decoder models deal with two different sequence lengths: input (context) sequence length for the encoder, and output (answer) sequence length for the decoder. For such models set this to ``false``. + +* ``min_tokens: 1``/``max_tokens: 4096`` filters examples based on their token count (after applying the prompt format). + +* ``min_tpt: 0.1``/``max_tpt: 10`` filter examples based on their output-token-per-input-token-ratio. For example, a ``max_tpt: 10`` means we'll filter every example that has more than 10 output tokens per 1 input token. Very useful for removing sequence length outliers that lead to OOM. Use ``estimate_token_bins.py`` to view token count distributions for calbirating this value. + +* (multimodal-only) ``token_equivalent_duration: 0.08`` is used to be able to measure audio examples in the number of "tokens". For example, if we're using fbank with 0.01s frame shift and an acoustic model that has a subsampling factor of 0.08, then a reasonable setting for this could be 0.08 (which means every subsampled frame counts as one token). Calibrate this value to fit your needs. + +**Text/multimodal bucketing and OOMptimizer.** Analogous to bucketing for audio data, we provide two scripts to support efficient bucketing: + +* ``scripts/speech_llm/estimate_token_bins.py`` which estimates 1D or 2D buckets based on the input config, tokenizer, and prompt format. It also estimates input/output token count distribution and suggested ``max_tpt`` (token-per-token) filtering values. + +* (experimental) ``scripts/speech_llm/oomptimizer.py`` which works with SALM/BESTOW GPT/T5 models and estimates the optimal ``bucket_batch_size`` for a given model config and bucket bins value. Given the complexity of Speech LLM some configurations may not be supported yet at the time of writing (e.g., model parallelism). + +To enable bucketing, set ``batch_size: null`` and use the following options: + +* ``use_bucketing: true`` + +* ``bucket_duration_bins`` - the output of ``estimate_token_bins.py``. If ``null``, it will be estimated at the start of training at the cost of some run time (not recommended). + +* (oomptimizer-only) ``bucket_batch_size`` - the output of OOMptimizer. + +* (non-oomptimizer-only) ``batch_tokens`` is the maximum number of tokens we want to find inside a mini-batch. Similarly to ``batch_duration``, this number does consider padding tokens too, therefore enabling bucketing is recommended to maximize the ratio of real vs padding tokens. Note that it's just a heuristic for determining the optimal batch sizes for different buckets, and may be less efficient than using OOMptimizer. + +* (non-oomptimizer-only) ``quadratic_factor`` is a quadratic penalty to equalize the GPU memory usage between buckets of short and long sequence lengths for models with quadratic memory usage. It is only a heuristic and may not be as efficient as using OOMptimizer. + +**Joint dataloading of text/audio/multimodal data.** The key strength of this approach is that we can easily combine audio datasets and text datasets, +and benefit from every other technique we described in this doc, such as: dynamic data mixing, data weighting, dynamic bucketing, and so on. + +This approach is described in the `EMMeTT`_ paper. There's also a notebook tutorial called Multimodal Lhotse Dataloading. We construct a separate sampler (with its own batching settings) for each modality, +and specify how the samplers should be fused together via the option ``sampler_fusion``: + +* ``sampler_fusion: "round_robin"`` will iterate single sampler per step, taking turns. For example: step 0 - audio batch, step 1 - text batch, step 2 - audio batch, etc. + +* ``sampler_fusion: "randomized_round_robin"`` is similar, but at each chooses a sampler randomly using ``sampler_weights: [w0, w1]`` (weights can be unnormalized). + +* ``sampler_fusion: "zip"`` will draw a mini-batch from each sampler at every step, and merge them into a single ``CutSet``. This approach combines well with multimodal gradient accumulation (run forward+backward for one modality, then the other, then the update step). + +.. _EMMeTT: https://arxiv.org/abs/2409.13523 + +Example. Combine an ASR (audio-text) dataset with an MT (text-only) dataset so that mini-batches have some examples from both datasets: + +.. code-block:: yaml + + model: + ... + train_ds: + multi_config: True, + sampler_fusion: zip + shuffle: true + num_workers: 4 + + audio: + prompt_format: t5nmt + use_bucketing: true + min_duration: 0.5 + max_duration: 30.0 + max_tps: 12.0 + bucket_duration_bins: [[3.16, 10], [3.16, 22], [5.18, 15], ...] + bucket_batch_size: [1024, 768, 832, ...] + input_cfg: + - type: nemo_tarred + manifest_filepath: /path/to/manifest__OP_0..512_CL_.json + tarred_audio_filepath: /path/to/tarred_audio/audio__OP_0..512_CL_.tar + weight: 0.5 + tags: + context: "Translate the following to English" + + text: + prompt_format: t5nmt + use_multimodal_sampling: true + min_tokens: 1 + max_tokens: 256 + min_tpt: 0.333 + max_tpt: 3.0 + measure_total_length: false + use_bucketing: true + bucket_duration_bins: [[10, 4], [10, 26], [15, 10], ...] + bucket_batch_size: [512, 128, 192, ...] + input_cfg: + - type: txt_pair + source_path: /path/to/en__OP_0..512_CL_.txt + target_path: /path/to/pl__OP_0..512_CL_.txt + source_language: en + target_language: pl + weight: 0.5 + tags: + question: "Translate the following to Polish" .. caution:: We strongly recommend to use multiple shards for text files as well so that different nodes and dataloading workers are able to randomize the order of text iteration. Otherwise, multi-GPU training has a high risk of duplication of text examples. diff --git a/examples/multimodal/speech_llm/conf/bestow/modular_audio_gpt_config_cross_llama_lhotse.yaml b/examples/multimodal/speech_llm/conf/bestow/modular_audio_gpt_config_cross_llama_lhotse.yaml index 6145a1a4c462..38f7dcbd80f9 100644 --- a/examples/multimodal/speech_llm/conf/bestow/modular_audio_gpt_config_cross_llama_lhotse.yaml +++ b/examples/multimodal/speech_llm/conf/bestow/modular_audio_gpt_config_cross_llama_lhotse.yaml @@ -166,6 +166,7 @@ model: modality_adapter: _target_: nemo.collections.asr.modules.ConformerEncoder + sync_max_audio_length: false feat_in: 1024 feat_out: -1 # you may set it if you need different output size other than the default d_model n_layers: 2 diff --git a/examples/multimodal/speech_llm/conf/bestow/modular_audio_gpt_config_cross_llama_lhotse_multi.yaml b/examples/multimodal/speech_llm/conf/bestow/modular_audio_gpt_config_cross_llama_lhotse_multi.yaml new file mode 100644 index 000000000000..124ab715c162 --- /dev/null +++ b/examples/multimodal/speech_llm/conf/bestow/modular_audio_gpt_config_cross_llama_lhotse_multi.yaml @@ -0,0 +1,365 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This configuration is similar to modular_audio_gpt_config_cross_llama_lhotse.yaml, +# with the difference being in how it performs multimodal sampling. +# The changes are in model.data.train_ds section. +# You'll notice that it defines two sub-sections: audio and text. +# Their names are arbitrary in the sense that you may define more subsections as you like, also with repeated modalities. +# We still set up a single dataloader, but each sub-section produces its own sampler with its own batch size related settings. +# That means each sub-section may decide about its own static/dynamic batch sizes, bucketing, etc. +# These different samplers are later combined into a single sampler using one of three available sampler fusion strategies: +# round_robin (taking turns), randomized_round_robin (at each step select a sampler according to weights), +# or zip (sample mini-batch from each and combine them). +name: megatron_audio_gpt_bestow_lhotse_multi_sampler + +# Note: This config has been updated to work with PromptFormatter API. +# If you used an older version that defined a `train_ds.prompt_template` field, +# you should specify the prompt format using `train_ds..prompt_format` now instead. + +trainer: + devices: 1 + accelerator: gpu + num_nodes: 1 + precision: bf16-mixed + logger: False # logger provided by exp_manager + enable_checkpointing: False + use_distributed_sampler: False + max_epochs: 9999 + max_steps: 1000000 # consumed_samples = global_step * micro_batch_size * data_parallel_size * accumulate_grad_batches + limit_train_batches : 1000 + log_every_n_steps: 10 # frequency with which training steps are logged + val_check_interval: 1000 # If is an int n > 1, will run val every n training steps, if a float 0.0 - 1.0 will run val every epoch fraction, e.g. 0.25 will run val every quarter epoch + gradient_clip_val: 1.0 + accumulate_grad_batches: 1 + +model_target: nemo.collections.multimodal.speech_llm.models.modular_models.CrossAttendModularAudioGPTModel + +exp_manager: + # explicit_log_dir: null + exp_dir: null + name: ${name} + create_wandb_logger: False + wandb_logger_kwargs: + project: null + name: null + resume_if_exists: True + resume_ignore_no_checkpoint: True + create_checkpoint_callback: True + checkpoint_callback_params: + monitor: validation_${model.data.validation_ds.metric.name} + save_top_k: 1 + mode: min + save_nemo_on_train_end: True + filename: '${name}--{${exp_manager.checkpoint_callback_params.monitor}:.3f}-{step}-{epoch}' + model_parallel_size: ${model.tensor_model_parallel_size} + always_save_nemo: False + save_best_model: True + create_early_stopping_callback: False + early_stopping_callback_params: + monitor: "val_loss" + mode: "min" + min_delta: 0.001 + patience: 10 + verbose: True + strict: False # Should be False to avoid a runtime error where EarlyStopping says monitor is unavailable, which sometimes happens with resumed training. + + +model: + seed: 1234 + tensor_model_parallel_size: 1 # intra-layer model parallelism + pipeline_model_parallel_size: 1 # inter-layer model parallelism + + pretrained_audio_model: stt_en_fastconformer_transducer_large + freeze_llm: True + freeze_audio_encoder: False + freeze_modality_adapter: False + load_audio_encoder: True + + global_batch_size: 128 + micro_batch_size: 4 + restore_from_path: ??? # Path to an existing .nemo model you wish to add new tasks to or run inference with + resume_from_checkpoint: null # The path to a checkpoint file to continue the training, restores the whole state including the epoch, step, LR schedulers, apex, etc. + save_nemo_on_validation_end: False # Saves an inference ready .nemo file every time a checkpoint is saved during training. + sync_batch_comm: False + megatron_amp_O2: False + + ## Sequence Parallelism + # Makes tensor parallelism more memory efficient for LLMs (20B+) by parallelizing layer norms and dropout sequentially + # See Reducing Activation Recomputation in Large Transformer Models: https://arxiv.org/abs/2205.05198 for more details. + sequence_parallel: False + + ## Activation Checkpoint + activations_checkpoint_granularity: null # 'selective' or 'full' + activations_checkpoint_method: null # 'uniform', 'block', not used with 'selective' + # 'uniform' divides the total number of transformer layers and checkpoints the input activation + # of each chunk at the specified granularity + # 'block' checkpoints the specified number of layers per pipeline stage at the specified granularity + activations_checkpoint_num_layers: null # not used with 'selective' + activations_checkpoint_layers_per_pipeline: null + answer_only_loss: True + gradient_as_bucket_view: False + + hidden_dropout: 0.0 + attention_dropout: 0.0 + ffn_dropout: 0.0 + + # use_am_tokenizer: True + # override_vocab_size: 1024 + + peft: + peft_scheme: "lora" # can be either lora, adapter, ia3 or ptuning + restore_from_path: null + + # Used for adapter peft training + adapter_tuning: + type: 'parallel_adapter' # this should be either 'parallel_adapter' or 'linear_adapter' + adapter_dim: 32 + adapter_dropout: 0.0 + norm_position: 'pre' # This can be set to 'pre', 'post' or null, 'pre' is normally what is used. + column_init_method: 'xavier' # IGNORED if linear_adapter is used, options: xavier, zero or normal + row_init_method: 'zero' # IGNORED if linear_adapter is used, options: xavier, zero or normal + norm_type: 'mixedfusedlayernorm' # IGNORED if layer_adapter is used, options are ['layernorm', 'mixedfusedlayernorm'] + layer_selection: null # selects in which layers to add adapters, e.g. [1,12] will add adapters to layer 1 (lowest) and 12. null will apply adapters to all layers + weight_tying: False + position_embedding_strategy: null # used only when weight_tying is True + + lora_tuning: + target_modules: ['attention_qkv','attention_dense','mlp_fc1','mlp_fc2'] # this can either be 'attention_qkv','attention_dense','mlp_fc1','mlp_fc2', attention (qkv & dense), mlp (fc1 & fc2) + adapter_dim: 32 + alpha: ${model.peft.lora_tuning.adapter_dim} + adapter_dropout: 0.0 + column_init_method: 'xavier' # IGNORED if linear_adapter is used, options: xavier, zero or normal + row_init_method: 'zero' # IGNORED if linear_adapter is used, options: xavier, zero or normal + layer_selection: null # selects in which layers to add lora adapters. e.g. [1,12] will add lora to layer 1 (lowest) and 12. null will apply adapters to all layers + weight_tying: False + position_embedding_strategy: null # used only when weight_tying is True + + # Used for p-tuning peft training + p_tuning: + virtual_tokens: 10 # The number of virtual tokens the prompt encoder should add at the start of the sequence + bottleneck_dim: 1024 # the size of the prompt encoder mlp bottleneck + embedding_dim: 1024 # the size of the prompt encoder embeddings + init_std: 0.023 + + perception: + target: nemo.collections.multimodal.speech_llm.modules.perception_modules.AudioPerceptionModule + use_multi_layer_feat: false + xattn: + target: nemo.collections.multimodal.speech_llm.modules.perception_modules.TransformerCrossAttention + num_attention_heads: 8 + attn_score_dropout: 0.1 + attn_layer_dropout: 0.1 + ffn_dropout: 0.1 + hidden_act: "relu" + pre_ln: true + pre_ln_final_layer_norm: true + + multi_layer_feat: + layer_idx_list: [0,16] # layer indices to extract features from + aggregator: + mode: "cat" # ways to combine features from different layers, choices=['cat','sum','mean', 'max', 'min'], default to concat ('cat') + pooling: "avg" # ways to pool features if they have different temporal lengths and align_mode=min, choices=['mean', 'max', 'min'] + align_mode: "min" # if features have different temporal lengths, set `min` to pool to the shortest length or `max` to repeat to the longest. + + modality_adapter: + _target_: nemo.collections.asr.modules.ConformerEncoder + sync_max_audio_length: false + feat_in: 1024 + feat_out: -1 # you may set it if you need different output size other than the default d_model + n_layers: 2 + d_model: 512 + + # Sub-sampling parameters + subsampling: dw_striding # vggnet, striding, stacking or stacking_norm, dw_striding + subsampling_factor: 8 # must be power of 2 for striding and vggnet + subsampling_conv_channels: 256 # set to -1 to make it equal to the d_model + causal_downsampling: false + + # Reduction parameters: Can be used to add another subsampling layer at a given position. + # Having a 2x reduction will speedup the training and inference speech while keeping similar WER. + # Adding it at the end will give the best WER while adding it at the beginning will give the best speedup. + reduction: null # pooling, striding, or null + reduction_position: null # Encoder block index or -1 for subsampling at the end of encoder + reduction_factor: 1 + + # Feed forward module's params + ff_expansion_factor: 4 + + # Multi-headed Attention Module's params + self_attention_model: rel_pos # rel_pos or abs_pos + n_heads: 8 # may need to be lower for smaller d_models + # [left, right] specifies the number of steps to be seen from left and right of each step in self-attention + att_context_size: [-1, -1] # -1 means unlimited context + att_context_style: regular # regular or chunked_limited + xscaling: true # scales up the input embeddings by sqrt(d_model) + untie_biases: true # unties the biases of the TransformerXL layers + pos_emb_max_len: 5000 + + # Convolution module's params + conv_kernel_size: 9 + conv_norm_type: 'batch_norm' # batch_norm or layer_norm or groupnormN (N specifies the number of groups) + # conv_context_size can be"causal" or a list of two integers while conv_context_size[0]+conv_context_size[1]+1==conv_kernel_size + # null means [(kernel_size-1)//2, (kernel_size-1)//2], and 'causal' means [(kernel_size-1), 0] + conv_context_size: null + + ### regularization + dropout: 0.1 # The dropout used in most of the Conformer Modules + dropout_pre_encoder: 0.1 # The dropout used before the encoder + dropout_emb: 0.0 # The dropout used for embeddings + dropout_att: 0.1 # The dropout for multi-headed attention modules + + # set to non-zero to enable stochastic depth + stochastic_depth_drop_prob: 0.0 + stochastic_depth_mode: linear # linear or uniform + stochastic_depth_start_layer: 1 + + spec_augment: + _target_: nemo.collections.asr.modules.SpectrogramAugmentation + freq_masks: 2 # set to zero to disable it + time_masks: 10 # set to zero to disable it + freq_width: 27 + time_width: 0.05 + + # the following are read from the pretrained AM: + # output_dim: null + # encoder: null + # preprocessor: null + + data: + end_string: "[EOG]" + train_ds: + use_lhotse: true + seed: 0 + shard_seed: "trng" + num_workers: 4 + shuffle: true + + multi_config: true + sampler_fusion: randomized_round_robin + sampler_weights: + audio: 0.5 + text: 0.5 + + audio: + input_cfg: ??? + batch_size: null + batch_duration: 360 + quadratic_factor: 15 + use_bucketing: true + num_buckets: 30 + bucket_buffer_size: 20000 + prompt_format: llama2 + text: + input_cfg: ??? + use_multimodal_sampling: true + batch_tokens: 8000 + quadratic_factor: 192 + use_bucketing: true + num_buckets: 30 + bucket_buffer_size: 20000 + prompt_format: llama2 + + global_batch_size: ${model.global_batch_size} + micro_batch_size: ${model.micro_batch_size} + max_seq_length: 2048 + min_seq_length: 1 + context_key: 'context' + answer_key: 'answer' + add_eos: True + # add_eos: False + end_string: ${model.data.end_string} + add_sep: False + add_bos: False + separate_prompt_and_response_with_newline: False + truncation_field: "context" # Options: ['context', 'answer'] + index_mapping_dir: null # Path to a directory to write index mapping files. + + validation_ds: + manifest_filepath: ??? # Path to a list of JSONL files corresponding to the source data. Data format is identical to train_ds. + force_finite: true # workaround to allow using input_cfg + global_batch_size: ${model.global_batch_size} + micro_batch_size: ${model.micro_batch_size} + shuffle: False + num_workers: 1 + pin_memory: True + max_seq_length: 2048 + min_seq_length: 1 + drop_last: False + context_key: ${model.data.train_ds.context_key} + answer_key: ${model.data.train_ds.answer_key} + add_eos: ${model.data.train_ds.add_eos} + end_string: ${model.data.end_string} + add_sep: ${model.data.train_ds.add_sep} + add_bos: ${model.data.train_ds.add_bos} + separate_prompt_and_response_with_newline: ${model.data.train_ds.separate_prompt_and_response_with_newline} + write_predictions_to_file: False + output_file_path_prefix: null # Prefix of the file to write predictions to. + truncation_field: "context" # Options: ['context', 'answer'] + index_mapping_dir: null # Path to a directory to write index mapping files. + tokens_to_generate: 128 + # ASR configs + sample_rate: 16000 #${model.audio_encoder.preprocessor.sample_rate} + + log_every_n_steps: 10 + metric: + name: "wer" # Name of the evaluation metric to use. Options: ['exact_string_match', 'loss'] + average: null # Average the metric over the dataset. Options: ['macro', 'micro']. Works only for 'F1', 'accuracy' etc. Refer to torchmetrics for metrics where this is supported. + num_classes: null + + test_ds: + manifest_filepath: null # Path to a list of JSONL files corresponding to the source data. Data format is identical to train_ds. + force_finite: true # workaround to allow using input_cfg + names: null # Names of the corresponding datasets used to log metrics. + global_batch_size: ${model.global_batch_size} + micro_batch_size: ${model.micro_batch_size} + shuffle: False + num_workers: 1 + pin_memory: True + max_seq_length: 2048 + min_seq_length: 1 + drop_last: False + context_key: ${model.data.train_ds.context_key} + answer_key: ${model.data.train_ds.answer_key} + add_eos: ${model.data.train_ds.add_eos} + end_string: ${model.data.end_string} + add_sep: ${model.data.train_ds.add_sep} + add_bos: ${model.data.train_ds.add_bos} + separate_prompt_and_response_with_newline: ${model.data.train_ds.separate_prompt_and_response_with_newline} + write_predictions_to_file: False + output_file_path_prefix: null # Prefix of the file to write predictions to. + truncation_field: "context" # Options: ['context', 'answer'] + index_mapping_dir: null # Path to a directory to write index mapping files. + # ASR configs + sample_rate: 16000 #${model.audio_encoder.preprocessor.sample_rate} + + # metric: + # name: "loss" # Name of the evaluation metric to use. Options: ['exact_string_match', 'loss'] + # average: null # Average the metric over the dataset. Options: ['macro', 'micro']. Works only for 'F1', 'accuracy' etc. Refer to torchmetrics for metrics where this is supported. + # num_classes: null + + optim: + name: fused_adam + lr: 1e-4 + weight_decay: 0.01 + betas: + - 0.9 + - 0.98 + sched: + name: CosineAnnealing + warmup_steps: 50 + min_lr: 0.0 # min_lr must be 0.0 for prompt learning when pipeline parallel > 1 + constant_steps: 0 # Constant steps should also be 0 when min_lr=0 + monitor: val_loss + reduce_on_plateau: false diff --git a/examples/multimodal/speech_llm/conf/modular_audio_gpt_config_eval.yaml b/examples/multimodal/speech_llm/conf/modular_audio_gpt_config_eval.yaml index 62b9030b4708..658485aa6807 100644 --- a/examples/multimodal/speech_llm/conf/modular_audio_gpt_config_eval.yaml +++ b/examples/multimodal/speech_llm/conf/modular_audio_gpt_config_eval.yaml @@ -104,7 +104,7 @@ model: prompt_template: ${data.train_ds.prompt_template} # don't change, let hydra resolve from saved config tokens_to_generate: 512 log_every_n_steps: 1 - sample_rate: ${data.train_ds.sample_rate} # don't change, let hydra resolve from saved config + sample_rate: 16000 # don't change, let hydra resolve from saved config audio_locator: null # set it to allow multiple audios in a sample, e.g. '|audio|', and use it in the context field of manifest to specify the locations of audios (`audio_filepath` is a list of audios). metric: diff --git a/examples/multimodal/speech_llm/conf/salm/modular_audio_gpt_config_llama_lhotse.yaml b/examples/multimodal/speech_llm/conf/salm/modular_audio_gpt_config_llama_lhotse.yaml index cc848562f70e..c10e3403b702 100644 --- a/examples/multimodal/speech_llm/conf/salm/modular_audio_gpt_config_llama_lhotse.yaml +++ b/examples/multimodal/speech_llm/conf/salm/modular_audio_gpt_config_llama_lhotse.yaml @@ -154,6 +154,7 @@ model: modality_adapter: _target_: nemo.collections.asr.modules.ConformerEncoder + sync_max_audio_length: false feat_in: 1024 feat_out: -1 # you may set it if you need different output size other than the default d_model n_layers: 2 diff --git a/examples/multimodal/speech_llm/conf/salm/modular_audio_t5_config.yaml b/examples/multimodal/speech_llm/conf/salm/modular_audio_t5_config.yaml index a76de9e312e2..88ab42ba4949 100644 --- a/examples/multimodal/speech_llm/conf/salm/modular_audio_t5_config.yaml +++ b/examples/multimodal/speech_llm/conf/salm/modular_audio_t5_config.yaml @@ -139,6 +139,7 @@ model: modality_adapter: _target_: nemo.collections.asr.modules.ConformerEncoder + sync_max_audio_length: false feat_in: 1024 feat_out: -1 # you may set it if you need different output size other than the default d_model n_layers: 2 diff --git a/examples/multimodal/speech_llm/conf/salm/modular_audio_t5_multi_config.yaml b/examples/multimodal/speech_llm/conf/salm/modular_audio_t5_multi_config.yaml new file mode 100644 index 000000000000..88a5dbe26cec --- /dev/null +++ b/examples/multimodal/speech_llm/conf/salm/modular_audio_t5_multi_config.yaml @@ -0,0 +1,343 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This configuration is similar to modular_audio_t5_multi_config.yaml, +# with the difference being in how it performs multimodal sampling. +# The changes are in model.data.train_ds section. +# You'll notice that it defines two sub-sections: audio and text. +# Their names are arbitrary in the sense that you may define more subsections as you like, also with repeated modalities. +# We still set up a single dataloader, but each sub-section produces its own sampler with its own batch size related settings. +# That means each sub-section may decide about its own static/dynamic batch sizes, bucketing, etc. +# These different samplers are later combined into a single sampler using one of three available sampler fusion strategies: +# round_robin (taking turns), randomized_round_robin (at each step select a sampler according to weights), +# or zip (sample mini-batch from each and combine them). +name: megatron_audio_t5_salm_lhotse_multi_sampler + +# Note: This config has been updated to work with PromptFormatter API. +# If you used an older version that defined a `train_ds.prompt_template` field, +# you should specify the prompt format using `train_ds..prompt_format` now instead. + +trainer: + devices: 1 + accelerator: gpu + num_nodes: 1 + precision: bf16-mixed + logger: False # logger provided by exp_manager + enable_checkpointing: False + use_distributed_sampler: False + max_epochs: 9999 + max_steps: 1000000 # consumed_samples = global_step * micro_batch_size * data_parallel_size * accumulate_grad_batches + limit_train_batches : 1000 + log_every_n_steps: 10 # frequency with which training steps are logged + val_check_interval: 1000 # If is an int n > 1, will run val every n training steps, if a float 0.0 - 1.0 will run val every epoch fraction, e.g. 0.25 will run val every quarter epoch + gradient_clip_val: 1.0 + accumulate_grad_batches: 1 + +model_target: nemo.collections.multimodal.speech_llm.models.modular_t5_models.ModularizedAudioT5Model + +exp_manager: + # explicit_log_dir: null + exp_dir: null + name: ${name} + create_wandb_logger: False + wandb_logger_kwargs: + project: null + name: null + resume_if_exists: True + resume_ignore_no_checkpoint: True + create_checkpoint_callback: True + checkpoint_callback_params: + monitor: validation_${model.data.validation_ds.metric.name} + save_top_k: 1 + mode: min + save_nemo_on_train_end: True + filename: '${name}--{${exp_manager.checkpoint_callback_params.monitor}:.3f}-{step}-{epoch}' + model_parallel_size: ${model.tensor_model_parallel_size} + always_save_nemo: False + save_best_model: True + create_early_stopping_callback: False + early_stopping_callback_params: + monitor: "val_loss" + mode: "min" + min_delta: 0.001 + patience: 10 + verbose: True + strict: False # Should be False to avoid a runtime error where EarlyStopping says monitor is unavailable, which sometimes happens with resumed training. + + +model: + virtual_prompt_style: 'no-prompts' # make cls happy + audio_prompt_first: False + seed: 1234 + tensor_model_parallel_size: 1 # intra-layer model parallelism + pipeline_model_parallel_size: 1 # inter-layer model parallelism + + pretrained_audio_model: stt_en_fastconformer_transducer_large + freeze_llm: True + freeze_audio_encoder: False + freeze_modality_adapter: False + load_audio_encoder: True + + global_batch_size: 128 + micro_batch_size: 4 + language_model_path: ??? # Path to an existing .nemo model you wish to add new tasks to or run inference with + resume_from_checkpoint: null # The path to a checkpoint file to continue the training, restores the whole state including the epoch, step, LR schedulers, apex, etc. + save_nemo_on_validation_end: False # Saves an inference ready .nemo file every time a checkpoint is saved during training. + sync_batch_comm: False + megatron_amp_O2: False + + ## Sequence Parallelism + # Makes tensor parallelism more memory efficient for LLMs (20B+) by parallelizing layer norms and dropout sequentially + # See Reducing Activation Recomputation in Large Transformer Models: https://arxiv.org/abs/2205.05198 for more details. + sequence_parallel: False + + ## Activation Checkpoint + activations_checkpoint_granularity: null # 'selective' or 'full' + activations_checkpoint_method: null # 'uniform', 'block', not used with 'selective' + # 'uniform' divides the total number of transformer layers and checkpoints the input activation + # of each chunk at the specified granularity + # 'block' checkpoints the specified number of layers per pipeline stage at the specified granularity + activations_checkpoint_num_layers: null # not used with 'selective' + activations_checkpoint_layers_per_pipeline: null + answer_only_loss: True + gradient_as_bucket_view: True + + hidden_dropout: 0.0 + attention_dropout: 0.0 + ffn_dropout: 0.0 + + # use_am_tokenizer: True + # override_vocab_size: 1024 + + lora_tuning: + kqv_adapter_dim: 128 + kv_adapter_dim: 64 + q_adapter_dim: 32 + adapter_dropout: 0.0 + column_init_method: 'xavier' # IGNORED if linear_adapter is used, options: xavier, zero or normal + row_init_method: 'zero' # IGNORED if linear_adapter is used, options: xavier, zero or normal + + peft: + peft_scheme: "adapter" # can be either adapter,ia3, or ptuning + restore_from_path: null + + # Used for adapter peft training + adapter_tuning: + type: 'parallel_adapter' # this should be either 'parallel_adapter' or 'linear_adapter' + adapter_dim: 32 + adapter_dropout: 0.0 + norm_position: 'pre' # This can be set to 'pre', 'post' or null, 'pre' is normally what is used. + column_init_method: 'xavier' # IGNORED if linear_adapter is used, options: xavier, zero or normal + row_init_method: 'zero' # IGNORED if linear_adapter is used, options: xavier, zero or normal + norm_type: 'mixedfusedlayernorm' # IGNORED if layer_adapter is used, options are ['layernorm', 'mixedfusedlayernorm'] + + # Used for p-tuning peft training + p_tuning: + virtual_tokens: 10 # The number of virtual tokens the prompt encoder should add at the start of the sequence + bottleneck_dim: 1024 # the size of the prompt encoder mlp bottleneck + embedding_dim: 1024 # the size of the prompt encoder embeddings + init_std: 0.023 + + perception: + target: nemo.collections.multimodal.speech_llm.modules.perception_modules.AudioPerceptionModule + use_multi_layer_feat: false + + modality_adapter: + _target_: nemo.collections.asr.modules.ConformerEncoder + sync_max_audio_length: false + feat_in: 1024 + feat_out: -1 # you may set it if you need different output size other than the default d_model + n_layers: 2 + d_model: 512 + + # Sub-sampling parameters + subsampling: dw_striding # vggnet, striding, stacking or stacking_norm, dw_striding + subsampling_factor: 8 # must be power of 2 for striding and vggnet + subsampling_conv_channels: 256 # set to -1 to make it equal to the d_model + causal_downsampling: false + + # Reduction parameters: Can be used to add another subsampling layer at a given position. + # Having a 2x reduction will speedup the training and inference speech while keeping similar WER. + # Adding it at the end will give the best WER while adding it at the beginning will give the best speedup. + reduction: null # pooling, striding, or null + reduction_position: null # Encoder block index or -1 for subsampling at the end of encoder + reduction_factor: 1 + + # Feed forward module's params + ff_expansion_factor: 4 + + # Multi-headed Attention Module's params + self_attention_model: rel_pos # rel_pos or abs_pos + n_heads: 8 # may need to be lower for smaller d_models + # [left, right] specifies the number of steps to be seen from left and right of each step in self-attention + att_context_size: [-1, -1] # -1 means unlimited context + att_context_style: regular # regular or chunked_limited + xscaling: true # scales up the input embeddings by sqrt(d_model) + untie_biases: true # unties the biases of the TransformerXL layers + pos_emb_max_len: 5000 + + # Convolution module's params + conv_kernel_size: 9 + conv_norm_type: 'batch_norm' # batch_norm or layer_norm or groupnormN (N specifies the number of groups) + # conv_context_size can be"causal" or a list of two integers while conv_context_size[0]+conv_context_size[1]+1==conv_kernel_size + # null means [(kernel_size-1)//2, (kernel_size-1)//2], and 'causal' means [(kernel_size-1), 0] + conv_context_size: null + + ### regularization + dropout: 0.1 # The dropout used in most of the Conformer Modules + dropout_pre_encoder: 0.1 # The dropout used before the encoder + dropout_emb: 0.0 # The dropout used for embeddings + dropout_att: 0.1 # The dropout for multi-headed attention modules + + # set to non-zero to enable stochastic depth + stochastic_depth_drop_prob: 0.0 + stochastic_depth_mode: linear # linear or uniform + stochastic_depth_start_layer: 1 + + spec_augment: + _target_: nemo.collections.asr.modules.SpectrogramAugmentation + freq_masks: 2 # set to zero to disable it + time_masks: 10 # set to zero to disable it + freq_width: 27 + time_width: 0.05 + + # the following are read from the pretrained AM: + # output_dim: null + # encoder: null + # preprocessor: null + + data: + train_ds: + use_lhotse: true + seed: 0 + shard_seed: "trng" + num_workers: 4 + shuffle: true + + multi_config: true + sampler_fusion: randomized_round_robin + sampler_weights: + audio: 0.5 + text: 0.5 + + audio: + input_cfg: ??? + prompt_format: t5nmt + batch_size: null + batch_duration: 360 + quadratic_factor: 15 + use_bucketing: true + num_buckets: 30 + bucket_buffer_size: 20000 + text: + input_cfg: ??? + prompt_format: t5nmt + use_multimodal_sampling: true + batch_tokens: 8000 + quadratic_factor: 192 + use_bucketing: true + num_buckets: 30 + bucket_buffer_size: 20000 + + global_batch_size: ${model.global_batch_size} + micro_batch_size: ${model.micro_batch_size} + max_seq_length: 2048 + min_seq_length: 1 + context_key: 'context' + answer_key: 'answer' + add_eos: True + # add_eos: False + add_sep: True + add_bos: False + separate_prompt_and_response_with_newline: False + truncation_field: "context" # Options: ['context', 'answer'] + index_mapping_dir: null # Path to a directory to write index mapping files. + + validation_ds: + force_finite: true # workaround to allow using input_cfg + prompt_format: t5nmt + global_batch_size: ${model.global_batch_size} + micro_batch_size: ${model.micro_batch_size} + shuffle: False + num_workers: 1 + pin_memory: True + max_seq_length: 2048 + min_seq_length: 1 + drop_last: False + context_key: ${model.data.train_ds.context_key} + answer_key: ${model.data.train_ds.answer_key} + add_eos: ${model.data.train_ds.add_eos} + add_sep: ${model.data.train_ds.add_sep} + add_bos: ${model.data.train_ds.add_bos} + separate_prompt_and_response_with_newline: ${model.data.train_ds.separate_prompt_and_response_with_newline} + write_predictions_to_file: False + output_file_path_prefix: null # Prefix of the file to write predictions to. + truncation_field: "context" # Options: ['context', 'answer'] + index_mapping_dir: null # Path to a directory to write index mapping files. + tokens_to_generate: 128 + # ASR configs + sample_rate: 16000 + + log_every_n_steps: 10 + metric: + name: "wer" # Name of the evaluation metric to use. Options: ['exact_string_match', 'loss'] + average: null # Average the metric over the dataset. Options: ['macro', 'micro']. Works only for 'F1', 'accuracy' etc. Refer to torchmetrics for metrics where this is supported. + num_classes: null + + test_ds: + manifest_filepath: null # Path to a list of JSONL files corresponding to the source data. Data format is identical to train_ds. + force_finite: true # workaround to allow using input_cfg + prompt_format: t5nmt + names: null # Names of the corresponding datasets used to log metrics. + global_batch_size: ${model.global_batch_size} + micro_batch_size: ${model.micro_batch_size} + shuffle: False + num_workers: 1 + pin_memory: True + max_seq_length: 2048 + min_seq_length: 1 + drop_last: False + context_key: ${model.data.train_ds.context_key} + answer_key: ${model.data.train_ds.answer_key} + add_eos: ${model.data.train_ds.add_eos} + add_sep: ${model.data.train_ds.add_sep} + add_bos: ${model.data.train_ds.add_bos} + separate_prompt_and_response_with_newline: ${model.data.train_ds.separate_prompt_and_response_with_newline} + write_predictions_to_file: False + output_file_path_prefix: null # Prefix of the file to write predictions to. + truncation_field: "context" # Options: ['context', 'answer'] + index_mapping_dir: null # Path to a directory to write index mapping files. + # ASR configs + sample_rate: 16000 + + # metric: + # name: "loss" # Name of the evaluation metric to use. Options: ['exact_string_match', 'loss'] + # average: null # Average the metric over the dataset. Options: ['macro', 'micro']. Works only for 'F1', 'accuracy' etc. Refer to torchmetrics for metrics where this is supported. + # num_classes: null + + optim: + name: fused_adam + lr: 1e-4 + weight_decay: 0.01 + betas: + - 0.9 + - 0.98 + sched: + name: CosineAnnealing + warmup_steps: 50 + min_lr: 0.0 # min_lr must be 0.0 for prompt learning when pipeline parallel > 1 + constant_steps: 0 # Constant steps should also be 0 when min_lr=0 + monitor: val_loss + reduce_on_plateau: false diff --git a/nemo/collections/asr/data/audio_to_text_lhotse.py b/nemo/collections/asr/data/audio_to_text_lhotse.py index 0ae3059a9296..0cec70174bc8 100644 --- a/nemo/collections/asr/data/audio_to_text_lhotse.py +++ b/nemo/collections/asr/data/audio_to_text_lhotse.py @@ -18,7 +18,7 @@ from lhotse.dataset import AudioSamples from lhotse.dataset.collation import collate_vectors -from nemo.collections.common.tokenizers.aggregate_tokenizer import AggregateTokenizer +from nemo.collections.common.tokenizers.aggregate_tokenizer import TokenizerWrapper from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec from nemo.core.neural_types import AudioSignal, LabelsType, LengthsType, NeuralType @@ -43,7 +43,7 @@ def output_types(self) -> Optional[Dict[str, NeuralType]]: 'sample_id': NeuralType(tuple('B'), LengthsType(), optional=True), } - def __init__(self, tokenizer, return_cuts=False): + def __init__(self, tokenizer: TokenizerSpec, return_cuts: bool = False): super().__init__() self.tokenizer = TokenizerWrapper(tokenizer) self.load_audio = AudioSamples(fault_tolerant=True) @@ -66,31 +66,3 @@ def __getitem__(self, cuts) -> Tuple[torch.Tensor, ...]: if self.return_cuts: return audio, audio_lens, tokens, token_lens, cuts.drop_in_memory_data() return audio, audio_lens, tokens, token_lens - - -class TokenizerWrapper: - """ - Provide a unified interface for NeMo Tokenizer, AggregateTokenizer, and (char) Parser. - """ - - def __init__(self, tokenizer): - self._tokenizer = tokenizer - if isinstance(tokenizer, AggregateTokenizer): - self._impl = self._call_agg_tokenizer - elif isinstance(tokenizer, TokenizerSpec): - self._impl = self._call_tokenizer - else: - self._impl = self._call_parser - - def __call__(self, text: str, lang: str | None = None): - return self._impl(text, lang) - - def _call_agg_tokenizer(self, text: str, lang: str | None = None): - assert lang is not None, "Expected 'lang' to be set for AggregateTokenizer." - return self._tokenizer.text_to_ids(text, lang) - - def _call_tokenizer(self, text: str, lang: str | None = None): - return self._tokenizer.text_to_ids(text) - - def _call_parser(self, text: str, lang: str | None = None): - return self._tokenizer(text) diff --git a/nemo/collections/asr/data/audio_to_text_lhotse_prompted.py b/nemo/collections/asr/data/audio_to_text_lhotse_prompted.py index 35adc3130843..f40dffb79467 100644 --- a/nemo/collections/asr/data/audio_to_text_lhotse_prompted.py +++ b/nemo/collections/asr/data/audio_to_text_lhotse_prompted.py @@ -20,6 +20,8 @@ from lhotse.dataset import AudioSamples from lhotse.dataset.collation import collate_vectors +from nemo.collections.common.data import apply_prompt_format_fn +from nemo.collections.common.prompts import CanaryPromptFormatter, PromptFormatter from nemo.collections.common.tokenizers import TokenizerSpec @@ -64,28 +66,27 @@ class PromptedAudioToTextLhotseDataset(torch.utils.data.Dataset): def __init__( self, tokenizer: TokenizerSpec, - prompt_format_fn: Callable[ - [CutSet, TokenizerSpec], tuple[list[torch.Tensor], list[torch.Tensor], list[torch.Tensor]] - ], + prompt: PromptFormatter, ): super().__init__() self.tokenizer = tokenizer self.load_audio = AudioSamples(fault_tolerant=True) self.padding_value = self.tokenizer.pad - self.prompt_format_fn = prompt_format_fn + self.prompt = prompt def __getitem__(self, cuts: CutSet) -> PromptedAudioToTextMiniBatch: audio, audio_lens, cuts = self.load_audio(cuts) # Fast-path: the tokenization and prompt formatting was already done before sampling. - attrs = ("tokenized_prompt", "tokenized_transcript", "tokenized_prompted_transcript") + attrs = ("input_ids", "context_ids", "answer_ids") pre_formatted = all(hasattr(c, a) for c in cuts for a in attrs) if pre_formatted: - prompts_with_answers, prompts, answers = zip( - *((c.tokenized_prompted_transcript, c.tokenized_prompt, c.tokenized_transcript) for c in cuts) - ) + prompts_with_answers, prompts, answers = zip(*((c.input_ids, c.context_ids, c.answer_ids) for c in cuts)) else: - prompts_with_answers, prompts, answers = self.prompt_format_fn(cuts, self.tokenizer) + formatted = [apply_prompt_format_fn(cut, self.prompt) for cut in cuts] + prompts_with_answers = [ex["input_ids"] for ex in formatted] + prompts = [ex["context_ids"] for ex in formatted] + answers = [ex["answer_ids"] for ex in formatted] transcript, transcript_lens = self._collate_tokens(answers) prompts_with_answers, prompts_with_answers_lens = self._collate_tokens(prompts_with_answers) diff --git a/nemo/collections/asr/models/aed_multitask_models.py b/nemo/collections/asr/models/aed_multitask_models.py index 7121f93e7e14..a609eeaccf9e 100644 --- a/nemo/collections/asr/models/aed_multitask_models.py +++ b/nemo/collections/asr/models/aed_multitask_models.py @@ -43,10 +43,10 @@ from nemo.collections.asr.parts.utils.rnnt_utils import Hypothesis from nemo.collections.common import tokenizers from nemo.collections.common.data.lhotse.dataloader import get_lhotse_dataloader_from_config +from nemo.collections.common.data.prompt_fn import get_prompt_format_fn from nemo.collections.common.metrics import GlobalAverageLossMetric from nemo.collections.common.parts import transformer_weights_init from nemo.collections.common.parts.preprocessing.manifest import get_full_path -from nemo.collections.common.prompts.fn import get_prompt_format_fn from nemo.collections.common.prompts.formatter import PromptFormatter from nemo.core.classes.common import typecheck from nemo.core.neural_types import ( @@ -133,15 +133,14 @@ def __init__(self, cfg: DictConfig, trainer: Trainer = None): self.prompt_format = cfg.prompt_format self.sample_rate = cfg.sample_rate self._setup_tokenizer(cfg.tokenizer) - - super().__init__(cfg=cfg, trainer=trainer) - prompt_cls = PromptFormatter.resolve(self.prompt_format) self.prompt = prompt_cls( tokenizer=self.tokenizer, defaults=OmegaConf.to_container(pd) if (pd := cfg.get("prompt_defaults")) is not None else None, ) + super().__init__(cfg=cfg, trainer=trainer) + # Setup audio preprocessor self.preprocessor = EncDecMultiTaskModel.from_config_dict(self.cfg.preprocessor) # Setup audio encoder @@ -537,7 +536,7 @@ def _setup_dataloader_from_config(self, config: Optional[Dict]): world_size=world_size, dataset=PromptedAudioToTextLhotseDataset( tokenizer=self.tokenizer, - prompt_format_fn=get_prompt_format_fn(self.prompt_format), + prompt=self.prompt, ), tokenizer=self.tokenizer, ) diff --git a/nemo/collections/asr/modules/conformer_encoder.py b/nemo/collections/asr/modules/conformer_encoder.py index 27d0cde33f8c..e6b415eab5ae 100644 --- a/nemo/collections/asr/modules/conformer_encoder.py +++ b/nemo/collections/asr/modules/conformer_encoder.py @@ -151,6 +151,10 @@ class ConformerEncoder(NeuralModule, StreamingEncoder, Exportable, AccessMixin): Defaults to False. use_pytorch_sdpa_backends (list[str]): list of backend names to use in sdpa. None or empty list means all backends. e.g. ["MATH"] Defaults to None + sync_max_audio_length (bool): when true, performs NCCL all_reduce to allocate the same amount of memory for + positional encoding buffers on all GPUs. Disabling this setting may help with deadlocks in certain + scenarios such as model parallelism, or generally when this module is not being ran on some GPUs + as a part of the training step. """ @@ -301,6 +305,7 @@ def __init__( global_attn_separate: bool = False, use_pytorch_sdpa: bool = False, use_pytorch_sdpa_backends=None, + sync_max_audio_length: bool = True, ): super().__init__() d_ff = d_model * ff_expansion_factor @@ -319,6 +324,7 @@ def __init__( if use_pytorch_sdpa_backends is None: use_pytorch_sdpa_backends = [] self.use_pytorch_sdpa_backends = use_pytorch_sdpa_backends + self.sync_max_audio_length = sync_max_audio_length # Setting up the att_context_size ( @@ -672,7 +678,7 @@ def forward_internal( def update_max_seq_length(self, seq_length: int, device): # Find global max audio length across all nodes - if torch.distributed.is_initialized(): + if self.sync_max_audio_length and torch.distributed.is_initialized(): global_max_len = torch.tensor([seq_length], dtype=torch.float32, device=device) # Update across all ranks in the distributed system diff --git a/nemo/collections/common/data/__init__.py b/nemo/collections/common/data/__init__.py index ecc67ef05ea5..d4b43d2b4edc 100644 --- a/nemo/collections/common/data/__init__.py +++ b/nemo/collections/common/data/__init__.py @@ -13,3 +13,5 @@ # limitations under the License. from nemo.collections.common.data.dataset import CodeSwitchedDataset, ConcatDataset, ConcatMapDataset +from nemo.collections.common.data.lhotse import * +from nemo.collections.common.data.prompt_fn import apply_prompt_format_fn, get_prompt_format_fn diff --git a/nemo/collections/common/data/lhotse/__init__.py b/nemo/collections/common/data/lhotse/__init__.py index 6bbe9e991236..95f0d01db297 100644 --- a/nemo/collections/common/data/lhotse/__init__.py +++ b/nemo/collections/common/data/lhotse/__init__.py @@ -13,4 +13,15 @@ # limitations under the License. from nemo.collections.common.data.lhotse.cutset import read_cutset_from_config -from nemo.collections.common.data.lhotse.dataloader import get_lhotse_dataloader_from_config +from nemo.collections.common.data.lhotse.dataloader import ( + LhotseDataLoadingConfig, + get_lhotse_dataloader_from_config, + get_lhotse_sampler_from_config, +) +from nemo.collections.common.data.lhotse.nemo_adapters import LazyNeMoIterator, LazyNeMoTarredIterator +from nemo.collections.common.data.lhotse.text_adapters import ( + NeMoMultimodalConversation, + NeMoSFTExample, + SourceTargetTextExample, + TextExample, +) diff --git a/nemo/collections/common/data/lhotse/cutset.py b/nemo/collections/common/data/lhotse/cutset.py index 50a2d71aae2e..63e93d8cf860 100644 --- a/nemo/collections/common/data/lhotse/cutset.py +++ b/nemo/collections/common/data/lhotse/cutset.py @@ -17,7 +17,7 @@ from functools import partial from itertools import repeat from pathlib import Path -from typing import Mapping, Sequence, Tuple, Union +from typing import KeysView, Mapping, Sequence, Tuple, Union import omegaconf from lhotse import CutSet, Features, Recording @@ -30,38 +30,93 @@ LazyNeMoTarredIterator, expand_sharded_filepaths, ) -from nemo.collections.common.data.lhotse.text_adapters import LhotseTextAdapter, LhotseTextPairAdapter +from nemo.collections.common.data.lhotse.text_adapters import ( + LhotseTextAdapter, + LhotseTextPairAdapter, + NeMoMultimodalConversationJsonlAdapter, + NeMoSFTJsonlAdapter, +) from nemo.collections.common.parts.preprocessing.manifest import get_full_path -def read_cutset_from_config(config: DictConfig) -> Tuple[CutSet, bool]: +def read_cutset_from_config(config: DictConfig | dict) -> Tuple[CutSet, bool]: """ Reads NeMo configuration and creates a CutSet either from Lhotse or NeMo manifests. Returns a tuple of ``CutSet`` and a boolean indicating whether the data is tarred (True) or not (False). """ # First, check if the dataset is specified in the new configuration format and use it if possible. + if not isinstance(config, DictConfig): + config = DictConfig(config) if config.get("input_cfg") is not None: return read_dataset_config(config) # Now, we'll figure out if we should read Lhotse manifest or NeMo manifest. use_nemo_manifest = all(config.get(opt) is None for opt in ("cuts_path", "shar_path")) if use_nemo_manifest: - assert ( - config.get("manifest_filepath") is not None - ), "You must specify either: manifest_filepath, cuts_path, or shar_path" - is_tarred = config.get("tarred_audio_filepaths") is not None - else: - is_tarred = config.get("shar_path") is not None - if use_nemo_manifest: - # Read NeMo manifest -- use the right wrapper depending on tarred/non-tarred. - cuts = read_nemo_manifest(config, is_tarred) + if config.get("manifest_filepath") is None: + raise IncompleteConfigError("You must specify either: manifest_filepath, cuts_path, or shar_path") + cuts, is_tarred = read_nemo_manifest(config) else: - # Read Lhotse manifest (again handle both tarred(shar)/non-tarred). - cuts = read_lhotse_manifest(config, is_tarred) + cuts, is_tarred = read_lhotse_manifest(config) return cuts, is_tarred -KNOWN_DATASET_CONFIG_TYPES = frozenset(("nemo", "nemo_tarred", "lhotse", "lhotse_shar", "txt", "txt_pair", "group")) +class IncompleteConfigError(RuntimeError): + pass + + +KNOWN_DATA_CONFIG_TYPES = {} + + +def get_known_config_data_types() -> KeysView[str]: + """ + Return the names of all registered data type parsers. + + Example: + + >>> get_known_config_data_types() + ["nemo", "nemo_tarred", "lhotse", ...] + """ + return KNOWN_DATA_CONFIG_TYPES.keys() + + +def get_parser_fn(data_type_name: str): + """ + Return the parsing function for a given data type name. + Parsing function reads a dataloading config and returns a tuple + of lhotse ``CutSet`` and boolean indicating whether we should use + iterable dataset (True) or map dataset (False) mechanism ("is tarred"). + """ + return KNOWN_DATA_CONFIG_TYPES[data_type_name] + + +def data_type_parser(name: str | list[str]): + """ + Decorator used to register data type parser functions. + Parsing function reads a dataloading config and returns a tuple + of lhotse ``CutSet`` and boolean indicating whether we should use + iterable dataset (True) or map dataset (False) mechanism ("is tarred"). + + Example: + + >>> @data_type_parser("my_new_format") + ... def my_new_format(config): + ... return CutSet(read_my_format(**config)), True + ... + ... fn = get_parser_fn("my_new_format") + ... cuts, is_tarred = fn({"my_arg_0": ..., "my_arg_1": ..., ...}) + """ + + def _decorator(fn): + global KNOWN_DATA_CONFIG_TYPES + if isinstance(name, str): + KNOWN_DATA_CONFIG_TYPES[name] = fn + else: + for n in name: + KNOWN_DATA_CONFIG_TYPES[n] = fn + return fn + + return _decorator def read_dataset_config(config) -> tuple[CutSet, bool]: @@ -127,14 +182,15 @@ def read_dataset_config(config) -> tuple[CutSet, bool]: tgt_lang: en """ propagate_attrs = { - "shuffle": config.shuffle, - "shard_seed": config.shard_seed, - "text_field": config.text_field, - "lang_field": config.lang_field, - "metadata_only": config.metadata_only, - "force_finite": config.force_finite, - "max_open_streams": config.max_open_streams, - "tarred_random_access": config.tarred_random_access, + "shuffle": config.get("shuffle", False), + "shard_seed": config.get("shard_seed", "trng"), + "text_field": config.get("text_field", "text"), + "lang_field": config.get("lang_field", "lang"), + "metadata_only": config.get("metadata_only", False), + "force_finite": config.get("force_finite", False), + "max_open_streams": config.get("max_open_streams", None), + "token_equivalent_duration": config.get("token_equivalent_duration", None), + "tarred_random_access": config.get("tarred_random_access", False), } input_cfg = config.input_cfg if isinstance(input_cfg, (str, Path)): @@ -145,65 +201,89 @@ def read_dataset_config(config) -> tuple[CutSet, bool]: def parse_group(grp_cfg: DictConfig, propagate_attrs: dict) -> [CutSet, bool]: - assert grp_cfg.type in KNOWN_DATASET_CONFIG_TYPES, f"Unknown item type in dataset config list: {grp_cfg.type=}" - if grp_cfg.type == "nemo_tarred": - is_tarred = True - cuts = read_nemo_manifest(grp_cfg, is_tarred=is_tarred) - elif grp_cfg.type == "nemo": - is_tarred = False - cuts = read_nemo_manifest(grp_cfg, is_tarred=is_tarred) - elif grp_cfg.type == "lhotse_shar": - is_tarred = True - cuts = read_lhotse_manifest(grp_cfg, is_tarred=is_tarred) - elif grp_cfg.type == "lhotse": - is_tarred = False - cuts = read_lhotse_manifest(grp_cfg, is_tarred=is_tarred) - # Note: "txt" and "txt_pair" have "is_tarred" set to True. - # The main reason is to enable combination of tarred audio and text dataloading, - # since we don't allow combination of tarred and non-tarred datasets. - # We choose to treat text as-if it was tarred, which also tends to be more + assert grp_cfg.type in get_known_config_data_types(), f"Unknown item type in dataset config list: {grp_cfg.type=}" + + # Note: Text data types will return is_tarred=True. + # We choose to treat text as-if it was tarred, which tends to be more # efficient as it moves the text file iteration into dataloading subprocess. - elif grp_cfg.type == "txt": - is_tarred = True - cuts = read_txt_paths(grp_cfg) - elif grp_cfg.type == "txt_pair": - is_tarred = True - cuts = read_txt_pair_paths(grp_cfg) - elif grp_cfg.type == "group": + if grp_cfg.type != "group": + parser_fn = get_parser_fn(grp_cfg.type) + cuts, is_tarred = parser_fn(grp_cfg) + else: cuts, is_tarred = parse_and_combine_datasets( grp_cfg.input_cfg, propagate_attrs=propagate_attrs, ) - else: - raise ValueError(f"Unrecognized group: {grp_cfg.type}") # Attach extra tags to every utterance dynamically, if provided. if (extra_tags := grp_cfg.get("tags")) is not None: cuts = cuts.map(partial(attach_tags, tags=extra_tags), apply_fn=None) return cuts, is_tarred -def read_txt_paths(config: DictConfig) -> CutSet: - return CutSet( +@data_type_parser("txt") +def read_txt_paths(config: DictConfig) -> tuple[CutSet, bool]: + cuts = CutSet( LhotseTextAdapter( paths=config.paths, language=config.language, shuffle_shards=config.shuffle, shard_seed=config.shard_seed, ) - ).repeat() + ) + if not config.get("force_finite", False): + cuts = cuts.repeat() + return cuts, True -def read_txt_pair_paths(config: DictConfig) -> CutSet: - return CutSet( +@data_type_parser("txt_pair") +def read_txt_pair_paths(config: DictConfig) -> tuple[CutSet, bool]: + cuts = CutSet( LhotseTextPairAdapter( source_paths=config.source_paths, target_paths=config.target_paths, - source_language=config.source_language, - target_language=config.target_language, + source_language=config.get("source_language"), + target_language=config.get("target_language"), + questions_path=config.get("questions_path"), + questions_language=config.get("questions_language"), + shuffle_shards=config.shuffle, + shard_seed=config.shard_seed, + ) + ) + if not config.get("force_finite", False): + cuts = cuts.repeat() + return cuts, True + + +@data_type_parser("nemo_sft_jsonl") +def read_nemo_sft_jsonl(config: DictConfig) -> tuple[CutSet, bool]: + cuts = CutSet( + NeMoSFTJsonlAdapter( + paths=config.paths, + language=config.get("language"), + shuffle_shards=config.shuffle, + shard_seed=config.shard_seed, + ) + ) + if not config.get("force_finite", False): + cuts = cuts.repeat() + return cuts, True + + +@data_type_parser("multimodal_conversation") +def read_multimodal_conversation_jsonl(config: DictConfig) -> tuple[CutSet, bool]: + cuts = CutSet( + NeMoMultimodalConversationJsonlAdapter( + manifest_filepath=config.manifest_filepath, + tarred_audio_filepaths=config.get("tarred_audio_filepaths"), + audio_locator_tag=config.audio_locator_tag, + token_equivalent_duration=config.get("token_equivalent_duration"), shuffle_shards=config.shuffle, shard_seed=config.shard_seed, ) - ).repeat() + ) + if not config.get("force_finite", False): + cuts = cuts.repeat() + return cuts, True def attach_tags(cut, tags: dict): @@ -212,6 +292,7 @@ def attach_tags(cut, tags: dict): return cut +@data_type_parser("group") def parse_and_combine_datasets( config_list: Union[list[DictConfig], ListConfig], propagate_attrs: dict ) -> tuple[CutSet, bool]: @@ -257,7 +338,9 @@ def parse_and_combine_datasets( return cuts, tarred_status[0] -def read_lhotse_manifest(config, is_tarred: bool) -> CutSet: +@data_type_parser(["lhotse", "lhotse_shar"]) +def read_lhotse_manifest(config) -> tuple[CutSet, bool]: + is_tarred = config.get("shar_path") is not None if is_tarred: # Lhotse Shar is the equivalent of NeMo's native "tarred" dataset. # The combination of shuffle_shards, and repeat causes this to @@ -343,7 +426,7 @@ def read_lhotse_manifest(config, is_tarred: bool) -> CutSet: # Regular Lhotse manifest points to individual audio files (like native NeMo manifest). path = config.cuts_path cuts = CutSet.from_file(path).map(partial(resolve_relative_paths, manifest_path=path)) - return cuts + return cuts, is_tarred def _resolve_shar_inputs(path: str | Path, only_metadata: bool) -> dict: @@ -401,7 +484,8 @@ def resolve_array(value): return cut -def read_nemo_manifest(config, is_tarred: bool) -> CutSet: +@data_type_parser(["nemo", "nemo_tarred"]) +def read_nemo_manifest(config) -> tuple[CutSet, bool]: common_kwargs = { "text_field": config.text_field, "lang_field": config.lang_field, @@ -418,6 +502,7 @@ def read_nemo_manifest(config, is_tarred: bool) -> CutSet: notar_kwargs = {"metadata_only": config.metadata_only} metadata_only = config.metadata_only force_finite = config.force_finite + is_tarred = config.get("tarred_audio_filepaths") is not None if isinstance(config.manifest_filepath, (str, Path)): logging.info(f"Initializing Lhotse CutSet from a single NeMo manifest (tarred): '{config.manifest_filepath}'") if is_tarred and not metadata_only: @@ -507,7 +592,7 @@ def read_nemo_manifest(config, is_tarred: bool) -> CutSet: seed=config.shard_seed, force_finite=force_finite or metadata_only, ) - return cuts + return cuts, is_tarred def mux( diff --git a/nemo/collections/common/data/lhotse/dataloader.py b/nemo/collections/common/data/lhotse/dataloader.py index 2592e27e76ea..bad866e6dac9 100644 --- a/nemo/collections/common/data/lhotse/dataloader.py +++ b/nemo/collections/common/data/lhotse/dataloader.py @@ -11,36 +11,50 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import bisect import os import random import warnings from dataclasses import dataclass from functools import partial -from typing import Any, Optional, Sequence, TypeVar, Union +from typing import Any, Optional, Sequence import numpy as np import torch from lhotse import CutSet, RecordingSet from lhotse.cut import Cut -from lhotse.cut.text import TextExample, TextPairExample from lhotse.dataset import ( CutConcatenate, DynamicBucketingSampler, DynamicCutSampler, IterableDatasetWrapper, ReverbWithImpulseResponse, + RoundRobinSampler, + ZipSampler, make_worker_init_fn, ) from lhotse.dataset.dataloading import resolve_seed -from lhotse.dataset.sampling.base import SamplingConstraint, TimeConstraint, TokenConstraint -from lhotse.dataset.sampling.dynamic_bucketing import FixedBucketBatchSizeConstraint +from lhotse.dataset.sampling.base import CutSampler, TimeConstraint from lhotse.lazy import LazyFlattener from lhotse.utils import fastcopy, fix_random_seed from omegaconf import DictConfig, OmegaConf -from nemo.collections.common.data.lhotse.cutset import guess_parse_cutset, read_cutset_from_config -from nemo.collections.common.prompts.fn import get_prompt_format_fn +from nemo.collections.common.data.lhotse.cutset import ( + IncompleteConfigError, + guess_parse_cutset, + read_cutset_from_config, +) +from nemo.collections.common.data.lhotse.sampling import ( + DurationFilter, + FixedBucketBatchSizeConstraint2D, + MultimodalFixedBucketBatchSizeConstraint2D, + MultimodalSamplingConstraint, + TokenCountFilter, + TokenPerSecondFilter, + TokenPerTokenFilter, +) +from nemo.collections.common.data.prompt_fn import apply_prompt_format_fn +from nemo.collections.common.prompts import PromptFormatter +from nemo.collections.common.tokenizers.aggregate_tokenizer import TokenizerWrapper from nemo.utils import logging @@ -83,6 +97,12 @@ class LhotseDataLoadingConfig: shard_seed: int | str = "trng" max_open_streams: int | None = None cuda_expandable_segments: bool = True + # e. Multi-config related options. + # Setting multi_config=True will scan the config for keys with DictConfig values, + # create a separate sampler for each, and fuse the samplers according to sampler_fusion. + multi_config: bool = False + sampler_fusion: str = "round_robin" # round_robin | randomized_round_robin | zip + sampler_weights: dict[str, float] | None = None # only applicable to randomized_round_robin # 2.1 Multimodal sampling override options pretokenize: bool = True # should we apply tokenizer before data sampling @@ -92,17 +112,28 @@ class LhotseDataLoadingConfig: batch_tokens: int | None = None quadratic_factor: float | None = None + # 2.2 Filters on sequence lengths. + # * Speech input + min_duration: float | None = -1 + max_duration: float | None = float("inf") + min_tps: int = -1 # allowed tokens per second (audio-only) + max_tps: float = float("inf") + # * Text input + min_tokens: int | None = None + max_tokens: int | None = None + # When true, combine context+answer lengths into a total length; otherwise report context length. + # For 2D bucketing it's always false, as we report a tuple of (context_len, answer_len). + measure_total_length: bool = True + min_tpt: int = -1 # allowed tokens per token (text-only) + max_tpt: float = float("inf") + # 3. Supported existing NeMo options. shuffle: bool = False sample_rate: int = 16000 - min_duration: float | None = -1 - max_duration: float | None = float("inf") seed: int | str = 0 num_workers: int = 0 pin_memory: bool = False channel_selector: int | str | None = None - min_tps: int = -1 # allowed tokens per second - max_tps: float = float("inf") # 4. Optional Lhotse data augmentation. # a. On-the-fly noise/audio mixing. @@ -175,7 +206,7 @@ def determine_use_iterable_dataset(use_iterable_dataset: bool, config: DictConfi def get_lhotse_dataloader_from_config( - config: DictConfig, + config: dict | DictConfig, global_rank: int, world_size: int, dataset: torch.utils.data.Dataset, @@ -198,20 +229,220 @@ def get_lhotse_dataloader_from_config( The ``tokenizer`` is used both for audio and text datasets for on-the-fly tokenization. This allows us to stratify the bucketing by the count of input/output tokens (depending on modality). If "prompt_format" is additionally provided in the config, we will also apply a prompt formatter. + Note that ``tokenizer`` can be any tokenizer type (e.g. both SentencePiece and Aggregate tokenizers work). + """ + if not isinstance(config, DictConfig): + config = OmegaConf.create(config) + + # Providing default value because we haven't filled the config defaults yet. + maybe_set_cuda_expandable_segments(enabled=config.get("cuda_expandable_segments", True)) + + if config.get("multi_config", False): + return get_lhotse_dataloader_from_multi_config( + top_level_config=config, + global_rank=global_rank, + world_size=world_size, + dataset=dataset, + tokenizer=tokenizer, + ) + else: + return get_lhotse_dataloader_from_single_config( + config=config, global_rank=global_rank, world_size=world_size, dataset=dataset, tokenizer=tokenizer + ) + + +def get_lhotse_dataloader_from_single_config( + config: DictConfig, + global_rank: int, + world_size: int, + dataset: torch.utils.data.Dataset, + tokenizer=None, +) -> torch.utils.data.DataLoader: + """ + Set up a Lhotse training dataloder. + + Expects a typical NeMo dataset configuration format, with additional fields: "use_lhotse=True". + Some fields in the original NeMo configuration may be ignored. + + The ``dataset`` parameter should be an instance of a Lhotse-compatible PyTorch Dataset class. + It only needs to define the following method ``__getitem__(self, cuts: CutSet) -> Dict[str, torch.Tensor]``. + This dataset is not expected to hold a reference to any actual data; it may be interpreted as a function + mapping a Lhotse CutSet into a mini-batch of tensors. + + For an example, see: :class:`nemo.collections.asr.data.audio_to_text_lhotse.LhotseSpeechToTextBpeDataset`, + which is constructed from just a tokenizer and essentially loads and collates audio and tokenizes the transcript. + + The ``tokenizer`` is used when text-only datasets are included in dataloading. + In these cases we will tokenize ``TextExample``s before sampling mini-batches so that + we can account for their number of tokens. + Note: this behaviour might eventually be extended to audio datasets too. + Note that ``tokenizer`` can be any tokenizer type (e.g. both SentencePiece and Aggregate tokenizers work). """ logging.info("We will be using a Lhotse DataLoader.") config = make_structured_with_schema_warnings(config) - maybe_set_cuda_expandable_segments(enabled=config.cuda_expandable_segments) - # First, resolve the random seed in case a string value was provided. - seed = resolve_seed(config.seed) - fix_random_seed(seed) + config.seed = resolve_seed(config.seed) + fix_random_seed(config.seed) + + sampler, use_iterable_dataset = get_lhotse_sampler_from_config( + config=config, global_rank=global_rank, world_size=world_size, tokenizer=tokenizer + ) + # 4. Creating dataloader. + if use_iterable_dataset: + # Wrapper here is necessary when using NeMo tarred data or Lhotse Shar data, + # because then I/O happens upon sampler iteration. Normally, the sampler resides + # in the training loop process, but when we use iterable dataset, we can move it to + # the dataloading worker process. + # We use lhotse's own worker_init_fn which leverages information such as rank, world_size, + # worker_id, etc. to set a different random seed for each (node, worker) combination. + # This together with infinite datasets removes the need to split data across nodes/workers. + dloader_kwargs = dict( + dataset=IterableDatasetWrapper(dataset=dataset, sampler=sampler), + worker_init_fn=make_worker_init_fn(rank=global_rank, world_size=world_size, seed=config.seed), + persistent_workers=config.num_workers > 0, # helps Lhotse Shar maintain shuffling state + ) + else: + # For non-tarred data, the sampler resides in the training loop process and + # reads only light-weight JSON objects; it samples mini-batches and passes + # the meta-data to Dataset, which performs the actual I/O inside its __getitem__ method. + dloader_kwargs = dict(dataset=dataset, sampler=sampler) + dloader = torch.utils.data.DataLoader( + **dloader_kwargs, + batch_size=None, + num_workers=config.num_workers, + pin_memory=config.pin_memory, + ) + + return dloader + + +def get_lhotse_dataloader_from_multi_config( + top_level_config: DictConfig, + global_rank: int, + world_size: int, + dataset: torch.utils.data.Dataset, + tokenizer=None, +) -> torch.utils.data.DataLoader: + """ + Set up a Lhotse training dataloder. + + It works similarly to :func:`get_lhotse_dataloader_from_config`, except that you can provide multiple configs + to set up different sampling, batching, and augmentation settings for every dataset and decide how to merge them. + + The expected format is that the ``configs`` is a dict of group name -> actual config. + + The first config is treated as a "main" config that determines the RNG, CUDA allocator, and sampler fusion settings. + """ + + def gather_shared_opts(): + """ + In multi-config setting, the top-level config defines several attributes that overwrite + the ones present in sub-configs. + """ + assert all( + k in top_level_config for k in ["seed", "shard_seed", "shuffle"] + ), "In a multi-config setting (multi_config=True), the top-level namespace (typically train_ds) must define at least 'seed', 'shard_seed', and 'shuffle' keys that will be shared by all sub-configs." + overwriting_opts = [ + "seed", + "shard_seed", + "num_workers", + "pin_memory", + "shuffle", + "sampler_fusion", + "sampler_weights", + "multi_config", + "metadata_only", + "force_finite", + ] + defaults = OmegaConf.structured(LhotseDataLoadingConfig) + top_level_config["seed"] = resolve_seed(top_level_config["seed"]) + return OmegaConf.create({k: top_level_config.get(k, defaults[k]) for k in overwriting_opts}) + + shared_opts = gather_shared_opts() + fix_random_seed(shared_opts.seed) + + configs = { + name: c + for name, c in top_level_config.items() + if isinstance(c, DictConfig) and name not in ("sampler_weights",) # exclude dict opts + } + + source_samplers, source_use_iterable_dataset = {}, [] + for name, config in configs.items(): + try: + expanded_config = make_structured_with_schema_warnings(config) + for k, v in shared_opts.items(): + expanded_config[k] = v + s, t = get_lhotse_sampler_from_config( + config=expanded_config, global_rank=global_rank, world_size=world_size, tokenizer=tokenizer + ) + except IncompleteConfigError as e: + raise IncompleteConfigError( + f"Cannot create a sampler for one of the sub-configs in a multi_config setup. The problematic config is under key={name} and has the following contents: {config}" + ) from e + source_samplers[name] = s + source_use_iterable_dataset.append(t) + + assert all(st == source_use_iterable_dataset[0] for st in source_use_iterable_dataset[1:]), ( + "When using multiple input_cfg sources ensure they are all tarred or non-tarred (can't mix). " + "You can provide force_iterable_dataset=True to each namespace to fix." + ) + use_iterable_dataset = all(source_use_iterable_dataset) + if shared_opts.sampler_fusion == "zip": + sampler = ZipSampler(*source_samplers.values()) + elif shared_opts.sampler_fusion == "round_robin": + sampler = RoundRobinSampler(*source_samplers.values()) + elif shared_opts.sampler_fusion == "randomized_round_robin": + _samplers, _weights = [], [] + for key in source_samplers.keys(): + _samplers.append(source_samplers[key]) + if shared_opts.sampler_weights is not None: + _weights.append(shared_opts.sampler_weights[key]) + sampler = RoundRobinSampler( + *_samplers, + randomize=_weights if len(_weights) > 0 else True, + seed=shared_opts.seed, + ) + else: + raise RuntimeError(f"Unsupported sampler fusion strategy: {shared_opts.sampler_fusion}") + + # 4. Creating dataloader. + if use_iterable_dataset: + # Wrapper here is necessary when using NeMo tarred data or Lhotse Shar data, + # because then I/O happens upon sampler iteration. Normally, the sampler resides + # in the training loop process, but when we use iterable dataset, we can move it to + # the dataloading worker process. + # We use lhotse's own worker_init_fn which leverages information such as rank, world_size, + # worker_id, etc. to set a different random seed for each (node, worker) combination. + # This together with infinite datasets removes the need to split data across nodes/workers. + dloader_kwargs = dict( + dataset=IterableDatasetWrapper(dataset=dataset, sampler=sampler), + worker_init_fn=make_worker_init_fn(rank=global_rank, world_size=world_size, seed=shared_opts.seed), + persistent_workers=shared_opts.num_workers > 0, # helps Lhotse Shar maintain shuffling state + ) + else: + # For non-tarred data, the sampler resides in the training loop process and + # reads only light-weight JSON objects; it samples mini-batches and passes + # the meta-data to Dataset, which performs the actual I/O inside its __getitem__ method. + dloader_kwargs = dict(dataset=dataset, sampler=sampler) + dloader = torch.utils.data.DataLoader( + **dloader_kwargs, + batch_size=None, + num_workers=shared_opts.num_workers, + pin_memory=shared_opts.pin_memory, + ) + + return dloader + + +def get_lhotse_sampler_from_config(config, global_rank, world_size, tokenizer=None) -> tuple[CutSampler, bool]: # 1. Load a manifest as a Lhotse CutSet. cuts, use_iterable_dataset = read_cutset_from_config(config) use_iterable_dataset = determine_use_iterable_dataset(use_iterable_dataset, config) + # Apply channel selector if config.channel_selector is not None: logging.info('Using channel selector %s.', config.channel_selector) @@ -223,9 +454,13 @@ def get_lhotse_dataloader_from_config( # Expands cuts if multiple translations are provided. cuts = CutSet(LazyFlattener(cuts.map(_flatten_alt_text, apply_fn=None))) - if tokenizer is not None and config.pretokenize: - from nemo.collections.asr.data.audio_to_text_lhotse import TokenizerWrapper + if config.use_multimodal_sampling: + assert tokenizer is not None, ( + "You must pass a tokenizer to `get_lhotse_dataloader_from_config` in order to read text-only datasets " + "(enabled via use_multimodal_dataloading)" + ) + if tokenizer is not None and config.pretokenize: if not use_iterable_dataset: logging.warning( "You are using a non-tarred dataset and requested tokenization during data sampling (pretokenize=True). " @@ -243,6 +478,7 @@ def get_lhotse_dataloader_from_config( tokenizer = TokenizerWrapper(tokenizer) cuts = cuts.map(partial(tokenize, tokenizer=tokenizer), apply_fn=None) cuts = cuts.filter(TokenPerSecondFilter(config.min_tps, config.max_tps)) + cuts = cuts.filter(TokenPerTokenFilter(config.min_tpt, config.max_tpt)) # 2. Optional augmentations. # 2.a. Noise mixing. @@ -287,40 +523,14 @@ def get_lhotse_dataloader_from_config( # Duration filtering, same as native NeMo dataloaders. # We can filter after the augmentations because they are applied only when calling load_audio(). cuts = cuts.filter(DurationFilter(config.min_duration, config.max_duration)) + cuts = cuts.filter( + TokenCountFilter(config.min_tokens, config.max_tokens, measure_total_length=config.measure_total_length) + ) + # Select the strategy customizing Lhotse sampler behaviour. + # Provides support for dynamic batch sizes, multimodal dataloading, 2D bucketing, etc. bucket_duration_bins = determine_bucket_duration_bins(config) - if config.use_multimodal_sampling: - if config.bucket_batch_size is not None: - assert ( - bucket_duration_bins is not None - ), "Cannot use bucket_batch_size option if bucket_duration_bins are not provided." - constraint = MultimodalFixedBucketBatchSizeConstraint2D( - max_seq_len_buckets=bucket_duration_bins, - batch_sizes=config.bucket_batch_size, - token_equivalent_duration=config.token_equivalent_duration, - ) - else: - constraint = MultimodalSamplingConstraint( - token_equivalent_duration=config.token_equivalent_duration, - batch_size=config.batch_size, - batch_tokens=config.batch_tokens, - quadratic_factor=config.quadratic_factor, - ) - else: - if config.bucket_batch_size is not None: - assert ( - bucket_duration_bins is not None - ), "Cannot use bucket_batch_size option if bucket_duration_bins are not provided." - constraint = FixedBucketBatchSizeConstraint2D( - max_seq_len_buckets=bucket_duration_bins, - batch_sizes=config.bucket_batch_size, - ) - else: - constraint = TimeConstraint( - max_cuts=config.batch_size, - max_duration=config.batch_duration, - quadratic_duration=config.quadratic_duration, - ) + constraint = determine_sampling_constraint(bucket_duration_bins, config) # 3. The sampler. if config.use_bucketing: @@ -344,6 +554,7 @@ def get_lhotse_dataloader_from_config( duration_bins=determine_bucket_duration_bins(config), num_cuts_for_bins_estimate=config.num_cuts_for_bins_estimate, buffer_size=config.bucket_buffer_size, + concurrent=config.concurrent_bucketing, rank=0 if use_iterable_dataset else global_rank, world_size=1 if use_iterable_dataset else world_size, ) @@ -390,40 +601,66 @@ def get_lhotse_dataloader_from_config( ReverbWithImpulseResponse( rir_recordings=RecordingSet.from_file(config.rir_path) if config.rir_path is not None else None, p=config.rir_prob, - randgen=random.Random(seed), + randgen=random.Random(config.seed), ) ) - # 4. Creating dataloader. - if use_iterable_dataset and not config.tarred_random_access: - # Wrapper here is necessary when using NeMo tarred data or Lhotse Shar data, - # because then I/O happens upon sampler iteration. Normally, the sampler resides - # in the training loop process, but when we use iterable dataset, we can move it to - # the dataloading worker process. - # We use lhotse's own worker_init_fn which leverages information such as rank, world_size, - # worker_id, etc. to set a different random seed for each (node, worker) combination. - # This together with infinite datasets removes the need to split data across nodes/workers. - dloader_kwargs = dict( - dataset=IterableDatasetWrapper(dataset=dataset, sampler=sampler), - worker_init_fn=make_worker_init_fn(rank=global_rank, world_size=world_size, seed=seed), - persistent_workers=config.num_workers > 0, # helps Lhotse Shar maintain shuffling state - ) - else: - # For non-tarred data, the sampler resides in the training loop process and - # reads only light-weight JSON objects; it samples mini-batches and passes - # the meta-data to Dataset, which performs the actual I/O inside its __getitem__ method. - dloader_kwargs = dict(dataset=dataset, sampler=sampler) - dloader = torch.utils.data.DataLoader( - **dloader_kwargs, - batch_size=None, - num_workers=config.num_workers, - pin_memory=config.pin_memory, - ) + return sampler, use_iterable_dataset - return dloader + +def determine_sampling_constraint(bucket_duration_bins, config): + """ + Select an appropriate sampling strategy (constraint) for Lhotse samplers based on the configuration. + Sampling constraint affects the batch size (static/dynamic) and bucketing behaviour (1D/2D). + It is the appropriate customization point to introduce support of other modalities, + as it defines a method for example sequence length measurement (audio duration, text tokens, etc.). + + Lhotse's default is :class:`TimeConstraint` for regular audio data, other available options are + multimodal constraints (joint text + audio) and their 2D bucketing extensions. + """ + if config.use_multimodal_sampling: + if config.bucket_batch_size is not None: + assert ( + bucket_duration_bins is not None + ), "Cannot use bucket_batch_size option if bucket_duration_bins are not provided." + constraint = MultimodalFixedBucketBatchSizeConstraint2D( + max_seq_len_buckets=bucket_duration_bins, + batch_sizes=config.bucket_batch_size, + token_equivalent_duration=config.token_equivalent_duration, + ) + else: + constraint = MultimodalSamplingConstraint( + token_equivalent_duration=config.token_equivalent_duration, + batch_size=config.batch_size, + batch_tokens=config.batch_tokens, + quadratic_factor=config.quadratic_factor, + ) + else: + if config.bucket_batch_size is not None: + assert ( + bucket_duration_bins is not None + ), "Cannot use bucket_batch_size option if bucket_duration_bins are not provided." + constraint = FixedBucketBatchSizeConstraint2D( + max_seq_len_buckets=bucket_duration_bins, + batch_sizes=config.bucket_batch_size, + ) + else: + constraint = TimeConstraint( + max_cuts=config.batch_size, + max_duration=config.batch_duration, + quadratic_duration=config.quadratic_duration, + ) + return constraint def determine_bucket_duration_bins(config): + """ + Returns appropriate bucket bins based on configuration. + If user provided them explicitly, we just pass them along; + otherwise, we try to create provisional bins when min/max duration is available. + We might return None if it's impossible to determine the bins without computing data statistics, + in which case it will be automatically done at the start of training (but may take a few minutes). + """ if config.bucket_duration_bins is not None: # Bucket duration bins are provided: just use them. ans = OmegaConf.to_container(config.bucket_duration_bins) @@ -451,13 +688,15 @@ def determine_bucket_duration_bins(config): return None -def make_structured_with_schema_warnings(config: DictConfig) -> DictConfig: +def make_structured_with_schema_warnings(config: DictConfig | dict) -> DictConfig: """ Checks the schema and fills missing default option values. Warns the user if any of the fields are not supported by the current schema but does not raise exceptions. """ default = OmegaConf.structured(LhotseDataLoadingConfig) + if not isinstance(config, DictConfig): + config = DictConfig(config) # Remove unsupported keys and warn about them. supported_keys = set(OmegaConf.to_container(default).keys()) @@ -473,148 +712,32 @@ def make_structured_with_schema_warnings(config: DictConfig) -> DictConfig: return OmegaConf.merge(default, config) -@dataclass -class MultimodalSamplingConstraint(SamplingConstraint): - # how many seconds of audio is a text token worth; balances audio to text ratio in a mini-batch - token_equivalent_duration: float - - # defines maximum batch size (may be lower than that if batch_length is also specified) - batch_size: int | None = None - - # defines the total number of tokens in a mini-batch - # setting this enables dynamic batch sizes - # we will use ``token_equivalent_duration`` to convert audio examples to token sizes - batch_tokens: int | None = None - - # when specified, this value is inversely proportional to the penalty we assign - # to longer examples when measuring their length/duration; - # i.e. large quadratic factor is a small penalty, small quadratic factor is a large penalty - # tweaking this helps equalize the GPU memory usage for dynamic batch sizes when using bucketing - quadratic_factor: float | None = None - - _internal = None - - def __post_init__(self): - self._internal = TokenConstraint( - max_tokens=self.batch_tokens, - max_examples=self.batch_size, - quadratic_length=self.quadratic_factor, - ) - - def add(self, example: Any) -> None: - if isinstance(example, Cut): - num_tokens = self.measure_length(example) - example.num_tokens = num_tokens - self._internal.add(example) - - def exceeded(self) -> bool: - return self._internal.exceeded() - - def close_to_exceeding(self) -> bool: - return self._internal.close_to_exceeding() - - def reset(self) -> None: - self._internal.reset() - - def measure_length(self, example: Any) -> float: - if isinstance(example, Cut): - return example.duration / self.token_equivalent_duration - if isinstance(example, (TextExample, TextPairExample)): - return example.num_tokens - raise RuntimeError(f"Unsupported example type: {type(example)}") - - -@dataclass -class FixedBucketBatchSizeConstraint2D(FixedBucketBatchSizeConstraint): - @property - def bucketing_2d_enabled(self) -> bool: - return isinstance(self.max_seq_len_buckets[0], Sequence) and len(self.max_seq_len_buckets[0]) == 2 - - def measure_length(self, example: Any) -> tuple[float, float]: - if self.bucketing_2d_enabled: - return example.duration, _measure_tokens(example) - else: - return example.duration - - def select_bucket(self, buckets: Any, example: Any = None, example_len: Any = None) -> int: - if not self.bucketing_2d_enabled: - return super().select_bucket(buckets=buckets, example=example, example_len=example_len) - if example_len is None: - example_len = self.measure_length(example) - bucket_idx = bisect.bisect_right(buckets, example_len) - # For 2D bucketing we have to refine the initially found bucket_idx, as bisect - # looks primarily at the first index of a tuple (i.e. duration). - # For example, with buckets [(1, 1), (1, 2), (2, 2), (2, 4)] and example (1.5, 3) - # bisect would allocate it to bucket_idx=2 instead of bucket_idx=3. - # To refine, we'll try to push the example to as many buckets to the right as possible, - # as long as they have the same dim0 length (e.g. audio duration) and the example's dim1 - # is smaller than the bin's dim1 (e.g., output token sequence length). - bin_dim0, bin_dim1 = self.max_seq_len_buckets[bucket_idx] - num_buckets = len(self.max_seq_len_buckets) - while ( - (next_idx := bucket_idx + 1) < num_buckets # There is a next bucket - and (bin := self.max_seq_len_buckets[next_idx])[0] == bin_dim0 # The next bucket has the same 1st dim. - # The example's 2nd dim is between that of the current and the next bucket; or, - # the next bucket's 2nd dim is still smaller than example. - and (bin_dim1 < example_len[1] <= bin[1] or bin[1] < example_len[1]) - ): - bucket_idx = next_idx - bin_dim0, bin_dim1 = self.max_seq_len_buckets[bucket_idx] - return bucket_idx - - -@dataclass -class MultimodalFixedBucketBatchSizeConstraint2D(FixedBucketBatchSizeConstraint2D): - token_equivalent_duration: float | None = None - - def measure_length(self, example: Any) -> float: - assert not self.bucketing_2d_enabled, "2D bucketing for multimodal sampling is not yet supported." - if hasattr(example, "num_tokens"): - return example.num_tokens - if isinstance(example, Cut): - assert ( - self.token_equivalent_duration is not None - ), "Cannot use MultimodalFixedBucketBatchSizeConstraint with speech data when token_equivalent_duration was not specified." - return example.duration / self.token_equivalent_duration - raise RuntimeError(f"Unsupported example type: {type(example)}") - - -def is_text(example) -> bool: - return isinstance(example, (TextExample, TextPairExample)) - - -Example = TypeVar("Example", bound=Union[Cut, TextExample, TextPairExample]) +def determine_use_iterable_dataset(use_iterable_dataset: bool, config: DictConfig) -> bool: + assert not ( + config.force_map_dataset and config.force_iterable_dataset + ), "Conflicting options: force_map_dataset=True and force_iterable_dataset=True" + use_iterable_dataset = (use_iterable_dataset or config.force_iterable_dataset) and not config.force_map_dataset + return use_iterable_dataset -def tokenize(example: Example, tokenizer) -> Example: +def tokenize(example, tokenizer): if isinstance(example, Cut): for s in example.supervisions: if s.text is not None: s.tokens = np.asarray(tokenizer(s.text, s.language)) - elif isinstance(example, TextExample): - example.tokens = np.asarray(tokenizer(example.text, example.language)) - elif isinstance(example, TextPairExample): - example.source.tokens = np.asarray(tokenizer(example.source.text, example.source.language)) - example.target.tokens = np.asarray(tokenizer(example.source.text, example.target.language)) + elif hasattr(example, "tokenize") and callable(example.tokenize): + example = example.tokenize(tokenizer) else: raise RuntimeError(f"Unsupported type of example: {type(example)}") return example -def tokenize_with_prompt(example: Example, tokenizer, prompt_format: str) -> Example: - # TODO(pzelasko): This mechanism makes it possible to measure the actual output sequence length - # for prompted models such as AED MultiTask (Canary), which includes the transcript and the prompt. - # We intend to extend it for text modality in follow-up work. - if isinstance(example, Cut): - prompt_format_fn = get_prompt_format_fn(prompt_format) - (tokenized_prompted_transcript,), (tokenized_prompt,), (tokenized_transcript,) = prompt_format_fn( - CutSet([example]), tokenizer - ) - example.tokenized_prompted_transcript = tokenized_prompted_transcript - example.tokenized_prompt = tokenized_prompt - example.tokenized_transcript = tokenized_transcript - else: - raise RuntimeError(f"Currently we only support tokenization + prompting during sampling for audio modality.") +def tokenize_with_prompt(example, tokenizer, prompt_format: str | PromptFormatter): + if isinstance(prompt_format, str): + prompt_format = PromptFormatter.resolve(prompt_format)(tokenizer) + encoded = apply_prompt_format_fn(example, prompt_format) + for key, value in encoded.items(): + setattr(example, key, value) return example @@ -624,55 +747,6 @@ def tokenize_with_prompt(example: Example, tokenizer, prompt_format: str) -> Exa # to support pickling lambdas if its ever truly necessary. -class DurationFilter: - """Callable, returns ``True`` if a cut's duration is in range [d_min, d_max] and ``False`` otherwise.""" - - def __init__(self, d_min: float, d_max: float) -> None: - self.d_min = d_min if d_min is not None else -1.0 - self.d_max = d_max if d_max is not None else float("inf") - - def __call__(self, example) -> bool: - if isinstance(example, Cut): - return self.d_min <= example.duration <= self.d_max - else: - return True # does not apply to text etc. - - -class TokenPerSecondFilter: - """ - Callable, returns ``True`` if a cut's num_tokens (sum of len(tokens) for each supervision) - is in range [tps_min, tps_max] and ``False`` otherwise. - """ - - def __init__(self, tps_min: float, tps_max: float) -> None: - assert tps_min <= tps_max - self.tps_min = tps_min - self.tps_max = tps_max - self.enabled = tps_min > 0 or tps_max < float("inf") - - def __call__(self, example) -> bool: - if not isinstance(example, Cut) or not self.enabled: - return True # pass-through for non-audio examples. - tps = _measure_tps(example) - return self.tps_min <= tps <= self.tps_max - - -def _measure_tokens(cut: Cut) -> int: - if hasattr(cut, "tokenized_prompted_transcript"): - return len(cut.tokenized_prompted_transcript) # tokenized with prompt formatter - supervisions_with_tokens = [s for s in cut.supervisions if hasattr(s, "tokens")] - assert len(supervisions_with_tokens) > 0, ( - "Cannot measure tokens-per-second with untokenized supervisions. " - "Did you forget to provide the tokenizer argument to get_lhotse_dataloader_from_config() method?" - ) - return sum(len(s.tokens) for s in supervisions_with_tokens) - - -def _measure_tps(cut: Cut) -> float: - num_tokens = _measure_tokens(cut) - return num_tokens / cut.duration - - def _normalize_loudness(cuts: CutSet, db_norm: float) -> CutSet: return cuts.normalize_loudness(target=db_norm, mix_first=False) diff --git a/nemo/collections/common/data/lhotse/sampling.py b/nemo/collections/common/data/lhotse/sampling.py new file mode 100644 index 000000000000..d645e3816300 --- /dev/null +++ b/nemo/collections/common/data/lhotse/sampling.py @@ -0,0 +1,317 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=C0116 +import bisect +import logging +import math +from dataclasses import dataclass +from typing import Any, Sequence + +from lhotse.cut import Cut +from lhotse.dataset import SamplingConstraint, TokenConstraint +from lhotse.dataset.sampling.dynamic_bucketing import FixedBucketBatchSizeConstraint +from lhotse.utils import ifnone + +from nemo.collections.common.data.lhotse.text_adapters import Formattable + + +@dataclass +class MultimodalSamplingConstraint(SamplingConstraint): + """ + Sampling strategy that customizes Lhotse samplers to measure sequence lengths as token counts. + It provides a unified interface for audio and text examples - audio duration is converted to + an equivalent token count. + """ + + # How many seconds of audio is a text token worth; balances audio to text ratio in a mini-batch. + # Generally set this to frame_shift * total_subsampling_factor of your audio encoder. + token_equivalent_duration: float | None = None + + # Defines maximum batch size (may be lower than that if batch_length is also specified). + batch_size: int | None = None + + # Defines the total number of tokens in a mini-batch. + # Setting this enables dynamic batch sizes. + # We will use ``token_equivalent_duration`` to convert audio examples to token sizes. + batch_tokens: int | None = None + + # When specified, this value is inversely proportional to the penalty we assign + # to longer examples when measuring their length/duration; + # i.e. large quadratic factor is a small penalty, small quadratic factor is a large penalty. + # Tweaking this helps equalize the GPU memory usage for dynamic batch sizes when using bucketing. + quadratic_factor: float | None = None + + # When False (default), we only consider the input part of the example to determine its length, + # e.g. for a Cut that means its audio duration converted to tokens, for text that means len(context_ids), etc. + # When True, we consider the sum of input and output lengths together (useful mostly for decoder-only models). + measure_total_length: bool = False + + _internal = None + + def __post_init__(self): + self._internal = TokenConstraint( + max_tokens=self.batch_tokens, + max_examples=self.batch_size, + quadratic_length=self.quadratic_factor, + ) + + def add(self, example: Any) -> None: + num_tokens = self.measure_length(example) + example.num_tokens = num_tokens + self._internal.add(example) + + def exceeded(self) -> bool: + return self._internal.exceeded() + + def close_to_exceeding(self) -> bool: + return self._internal.close_to_exceeding() + + def reset(self) -> None: + self._internal.reset() + + def measure_length(self, example: Any) -> float: + if isinstance(example, Cut): + audio_len_in_tokens = math.ceil(example.duration / self.token_equivalent_duration) + if self.measure_total_length: + # Total length of a Cut (audio+text example) is counted as the sum of: + # * num_tokens in each supervision segment ("utterance") in the Cut + # * num_frames of audio (frame=token) given a token-equivalent-duration (basically a frame shift) + text_tokens = 0 + for s in example.supervisions: + if s.has_custom("tokens"): + text_tokens += len(s.tokens) + return audio_len_in_tokens + text_tokens + else: + return audio_len_in_tokens + elif isinstance(example, Formattable): + try: + return example.total_length if self.measure_total_length else example.input_length + except (AttributeError, AssertionError) as e: + raise RuntimeError( + "Couldn't determine the length of a text example; " + "have you provided both prompt_format and tokenizer when instantiating the dataloader?" + ) from e + raise RuntimeError(f"Unsupported example type: {type(example)}") + + +@dataclass +class FixedBucketBatchSizeConstraint2D(FixedBucketBatchSizeConstraint): + """ + Sampling strategy that customizes Lhotse samplers to support 2D bucket selection (it also supports 1D). + It is intended only for audio examples (i.e., Lhotse Cut objects). + """ + + @property + def bucketing_2d_enabled(self) -> bool: + return isinstance(self.max_seq_len_buckets[0], Sequence) and len(self.max_seq_len_buckets[0]) == 2 + + def measure_length(self, example: Cut) -> tuple[float, float] | float: + if self.bucketing_2d_enabled: + return example.duration, _measure_tokens(example) + else: + return example.duration + + def select_bucket(self, buckets: Any, example: Any = None, example_len: Any = None) -> int: + if not self.bucketing_2d_enabled: + return super().select_bucket(buckets=buckets, example=example, example_len=example_len) + if example_len is None: + example_len = self.measure_length(example) + bucket_idx = bisect.bisect_left(buckets, example_len) + # For 2D bucketing we have to refine the initially found bucket_idx, as bisect + # looks primarily at the first index of a tuple (i.e. duration). + # For example, with buckets [(1, 1), (1, 2), (2, 2), (2, 4)] and example (1.5, 3) + # bisect would allocate it to bucket_idx=2 instead of bucket_idx=3. + # To refine, we'll try to push the example to as many buckets to the right as possible, + # as long as they have the same dim0 length (e.g. audio duration) and the example's dim1 + # is smaller than the bin's dim1 (e.g., output token sequence length). + bin_dim0, bin_dim1 = self.max_seq_len_buckets[bucket_idx] + num_buckets = len(self.max_seq_len_buckets) + while ( + (next_idx := bucket_idx + 1) < num_buckets # There is a next bucket + and (bin := self.max_seq_len_buckets[next_idx])[0] == bin_dim0 # The next bucket has the same 1st dim. + # The example's 2nd dim is between that of the current and the next bucket; or, + # the next bucket's 2nd dim is still smaller than example. + and (bin_dim1 < example_len[1] <= bin[1] or bin[1] < example_len[1]) + ): + bucket_idx = next_idx + bin_dim0, bin_dim1 = self.max_seq_len_buckets[bucket_idx] + + if example_len[0] > bin_dim0 or example_len[1] > bin_dim1: + logging.warning( + f"Data sample exceeds 2D bucket specification: lengths={example_len} bucket=({bin_dim0}, {bin_dim1}) " + f"(there is no larger bucket that would fit this example). " + f"We will keep it but expect OutOfMemoryError to happen during the training. " + f"You can fix this by stricter filtering with max_duration, max_tokens, max_tps, max_tpt; " + f"or re-estimating your bucket bins to match the actual data length distribution. " + f"Details: {example=}" + ) + + return bucket_idx + + +@dataclass +class MultimodalFixedBucketBatchSizeConstraint2D(FixedBucketBatchSizeConstraint2D): + """ + Sampling strategy that customizes Lhotse samplers to support both multimodal sampling and 2D bucket selection. + It combines the capabilities of :class:`FixedBucketBatchSizeConstraint2D` and :class:`MultimodalSamplingConstraint` + """ + + # How many seconds of audio is a text token worth; balances audio to text ratio in a mini-batch. + # Generally set this to frame_shift * total_subsampling_factor of your audio encoder. + token_equivalent_duration: float | None = None + + # When False (default), we only consider the input part of the example to determine its length, + # e.g. for a Cut that means its audio duration converted to tokens, for text that means len(context_ids), etc. + # When True, we consider the sum of input and output lengths together (useful mostly for decoder-only models). + measure_total_length: bool = False + + def measure_length(self, example: Any) -> float | tuple[float, float]: + if isinstance(example, Cut): + # Total length of a Cut (audio+text example) is counted as the sum of: + # * num_tokens in each supervision segment ("utterance") in the Cut + # * num_frames of audio (frame=token) given a token-equivalent-duration (basically a frame shift) + audio_len_in_tokens = math.ceil(example.duration / self.token_equivalent_duration) + text_tokens = _measure_tokens(example) + + if self.bucketing_2d_enabled: + return audio_len_in_tokens, text_tokens + + else: + if self.measure_total_length: + return audio_len_in_tokens + text_tokens + else: + return audio_len_in_tokens + + elif isinstance(example, Formattable): + if self.bucketing_2d_enabled: + return example.input_length, example.output_length + else: + return example.total_length if self.measure_total_length else example.input_length + + raise RuntimeError(f"Unsupported example type: {type(example)}") + + +class DurationFilter: + """ + Callable, returns ``True`` if a cut's duration is in range [d_min, d_max] and ``False`` otherwise. + Acts as a pass-through for objects of other type than Cut. + """ + + def __init__(self, d_min: float | None, d_max: float | None) -> None: + self.d_min = ifnone(d_min, -1) + self.d_max = ifnone(d_max, float("inf")) + + def __call__(self, example) -> bool: + if isinstance(example, Cut): + return self.d_min <= example.duration <= self.d_max + else: + return True # does not apply to text etc. + + +class TokenCountFilter: + """ + Callable, returns ``True`` if an example's number of tokens is in range [t_min, t_max] and ``False`` otherwise. + + It is only applicable to data types that derive from class ``Formattable`` and lhotse ``Cut`` objects. + Acts as a passthrough for Cuts. + Raises exception if a non-Formattable and non-Cut data are provided. + + The ``measure_total_length`` option allows to select whether we should filter on context_ids length (=False) + or input_ids length (=True). + The difference is that for decoder-only models, we collapse input and output into a single sequence, + so we should measure the example length using input_ids (measure_total_length=True). + However, for models which have separate inputs and outputs such as encoder-decoder models, + we want to measure the input lengths only here (measure_total_length=False), + and enable ``TokenPerTokenFilter`` for additional filtering on the output sequence length. + """ + + def __init__(self, t_min: float | None, t_max: float | None, measure_total_length: bool) -> None: + self.t_min = ifnone(t_min, -1) + self.t_max = ifnone(t_max, float("inf")) + self.measure_total_length = measure_total_length + self.enabled = self.t_min > 0 or self.t_max < float("inf") + + def __call__(self, example) -> bool: + if not self.enabled or isinstance(example, Cut): + return True # does not apply to Cuts + assert isinstance(example, Formattable), ( + f"TokenCountFilter can only be applied to data examples that derive Formattable class. " + f"Formattable objects define properties input_length, output_length, and total_length that " + f"allow us to select the right sequence length for filtering. We got: {example}" + ) + try: + length = example.total_length if self.measure_total_length else example.input_length + except (AttributeError, AssertionError) as e: + raise RuntimeError( + f"Cannot measure token count for example: {example} " + f"-- did you forget to apply prompt formatting? If instantiating Lhotse dataloader, " + f"make sure you provided 'prompt_format' option and passed the tokenizer." + ) from e + return self.t_min <= length <= self.t_max + + +class TokenPerSecondFilter: + """ + Callable, returns ``True`` if a cut's num_tokens (sum of len(tokens) for each supervision) + is in range [tps_min, tps_max] and ``False`` otherwise. + Acts as a pass-through for objects of other type than Cut. + """ + + def __init__(self, tps_min: float | None, tps_max: float | None) -> None: + self.tps_min = ifnone(tps_min, -1) + self.tps_max = ifnone(tps_max, float("inf")) + assert tps_min <= tps_max, f"{tps_min=} {tps_max=}" + self.enabled = tps_min > 0 or tps_max < float("inf") + + def __call__(self, example) -> bool: + if not isinstance(example, Cut) or not self.enabled: + return True # pass-through for non-audio examples. + tps = _measure_tps(example) + return self.tps_min <= tps <= self.tps_max + + +class TokenPerTokenFilter: + """ + Callable, returns ``True`` if a cut's num_tokens (sum of len(tokens) for each supervision) + is in range [tps_min, tps_max] and ``False`` otherwise. + Acts as a pass-through for audio examples (Cuts). + """ + + def __init__(self, tpt_min: float | None, tpt_max: float | None) -> None: + self.tpt_min = ifnone(tpt_min, -1) + self.tpt_max = ifnone(tpt_max, float("inf")) + assert tpt_min <= tpt_max, f"{tpt_min=} {tpt_max=}" + self.enabled = tpt_min > 0 or tpt_max < float("inf") + + def __call__(self, example) -> bool: + if isinstance(example, Cut) or not self.enabled: + return True # pass-through for non-text examples. + tpt = example.answer_ids.shape[0] / example.context_ids.shape[0] + return self.tpt_min <= tpt <= self.tpt_max + + +def _measure_tokens(cut: Cut) -> int: + if hasattr(cut, "input_ids"): + return len(cut.input_ids) # tokenized with prompt formatter + supervisions_with_tokens = [s for s in cut.supervisions if hasattr(s, "tokens")] + assert len(supervisions_with_tokens) > 0, ( + "Cannot measure the number of tokens with untokenized supervisions. " + "Did you forget to provide the tokenizer argument to get_lhotse_dataloader_from_config() method?" + ) + return sum(len(s.tokens) for s in supervisions_with_tokens) + + +def _measure_tps(cut: Cut) -> float: + num_tokens = _measure_tokens(cut) + return num_tokens / cut.duration diff --git a/nemo/collections/common/data/lhotse/text_adapters.py b/nemo/collections/common/data/lhotse/text_adapters.py index aa53c27cc90e..ae75c50d0e98 100644 --- a/nemo/collections/common/data/lhotse/text_adapters.py +++ b/nemo/collections/common/data/lhotse/text_adapters.py @@ -11,17 +11,88 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import math import random +from collections import deque from dataclasses import dataclass +from itertools import groupby from pathlib import Path -from typing import Iterator, Literal, Union +from typing import Iterator, Literal, Optional, Union -from lhotse.cut.text import TextExample, TextPairExample +import numpy as np +import torch +from lhotse import Recording +from lhotse.custom import CustomFieldMixin +from lhotse.cut import Cut from lhotse.dataset.dataloading import resolve_seed -from lhotse.utils import Pathlike +from lhotse.serialization import load_jsonl +from lhotse.shar import AudioTarWriter, JsonlShardWriter, TarIterator +from lhotse.utils import Pathlike, is_valid_url from nemo.collections.common.data.lhotse.nemo_adapters import expand_sharded_filepaths +from nemo.collections.common.data.prompt_fn import apply_prompt_format_fn, registered_prompt_format_fn +from nemo.collections.common.parts.preprocessing.manifest import get_full_path +from nemo.collections.common.tokenizers.aggregate_tokenizer import TokenizerWrapper + +""" +Formattable: mixin class with data fields for prompt formatter outputs and method for +applying prompt formatters to derived data types. +""" + + +class Formattable: + def __init__(self): + self.input_ids: np.ndarray | torch.Tensor | None = None + self.context_ids: np.ndarray | torch.Tensor | None = None + self.answer_ids: np.ndarray | torch.Tensor | None = None + self.mask: np.ndarray | torch.Tensor | None = None + + @property + def input_length(self) -> int | None: + if self.context_ids is None: + return None + return self.context_ids.shape[0] + + @property + def output_length(self) -> int | None: + if self.answer_ids is None: + return None + return self.answer_ids.shape[0] + + @property + def total_length(self) -> int | None: + if self.input_ids is None: + return None + return self.input_ids.shape[0] + + def apply_prompt_format(self, prompt) -> "Formattable": + ans = apply_prompt_format_fn(self, prompt) + self.input_ids = ans["input_ids"] + self.context_ids = ans["context_ids"] + self.answer_ids = ans["answer_ids"] + self.mask = ans["mask"] + return self + + +""" +TextExample: data types, file parser, default prompt formatting logic. +""" + + +@dataclass +class TextExample(Formattable, CustomFieldMixin): + """ + Represents a single text example. Useful e.g. for language modeling. + """ + + text: str + language: str | None = None + tokens: Optional[np.ndarray] = None + custom: dict = None + + def tokenize(self, tokenizer: TokenizerWrapper) -> "TextExample": + self.tokens = np.asarray(tokenizer(self.text, self.language)) + return self @dataclass @@ -47,10 +118,45 @@ def __iter__(self) -> Iterator[TextExample]: for path in paths: with open(path) as f: for line in f: - example = TextExample(line) - if self.language is not None: - example.language = self.language - yield example + yield TextExample(line, language=self.language) + + +@registered_prompt_format_fn(TextExample) +def default_text_example_prompt_format_fn(example: TextExample, prompt): + # It doesn't really make sense to prompt format a single line text example, + # but we implement some default logic for the sake of completeness. + # The default logic here is to treat the whole example as an assistant turn, + # so that the mask is all set to true for the training loss. + return prompt.encode_dialog( + [ + {"role": prompt.OUTPUT_ROLE, "slots": {"message": example.text}}, + ] + ) + + +""" +SourceTargetTextExample: data types, file parser, default prompt formatting logic. +""" + + +@dataclass +class SourceTargetTextExample(Formattable, CustomFieldMixin): + """ + Represents a pair of text examples. Useful e.g. for sequence-to-sequence tasks. + Supports a ``question`` field, used as the prompt for LLM. + """ + + source: TextExample + target: TextExample + question: TextExample | None = None + custom: dict = None + + def tokenize(self, tokenizer: TokenizerWrapper) -> "SourceTargetTextExample": + self.source = self.source.tokenize(tokenizer) + self.target = self.target.tokenize(tokenizer) + if self.question is not None: + self.question = self.question.tokenize(tokenizer) + return self @dataclass @@ -60,12 +166,16 @@ class LhotseTextPairAdapter: (e.g., a pair of files with translations in different languages) and wrap them in a ``TextExample`` object to enable dataloading with Lhotse together with training examples in audio modality. + + Provide ``questions_path`` to enable randomly sampling lines with questions. """ source_paths: Union[Pathlike, list[Pathlike]] target_paths: Union[Pathlike, list[Pathlike]] source_language: str | None = None target_language: str | None = None + questions_path: Pathlike = None + questions_language: str = None shuffle_shards: bool = False shard_seed: Union[int, Literal["trng", "randomized"]] = "trng" @@ -81,17 +191,404 @@ def __post_init__(self): self.source_paths = expand_sharded_filepaths(self.source_paths) self.target_paths = expand_sharded_filepaths(self.target_paths) - def __iter__(self) -> Iterator[TextPairExample]: + def __iter__(self) -> Iterator[SourceTargetTextExample]: + seed = resolve_seed(self.shard_seed) + rng = random.Random(seed) paths = list(zip(self.source_paths, self.target_paths)) if self.shuffle_shards: - seed = resolve_seed(self.shard_seed) - random.Random(seed).shuffle(paths) + rng.shuffle(paths) + questions = None + if self.questions_path is not None: + with open(self.questions_path) as f: + questions = [q.strip() for q in f] for source_path, target_path in paths: with open(source_path) as fs, open(target_path) as ft: for ls, lt in zip(fs, ft): - example = TextPairExample(source=TextExample(ls.strip()), target=TextExample(lt.strip())) - if self.source_language is not None: - example.source.language = self.source_language - if self.target_language is not None: - example.target.language = self.target_language - yield example + yield SourceTargetTextExample( + source=TextExample(ls.strip(), language=self.source_language), + target=TextExample(lt.strip(), language=self.target_language), + question=( + TextExample(rng.choice(questions), language=self.questions_language) + if questions is not None + else None + ), + ) + + +@registered_prompt_format_fn(SourceTargetTextExample) +def default_src_tgt_prompt_format_fn(example: SourceTargetTextExample, prompt): + if example.question is not None: + ctx = f"{example.question.text} {example.source.text}" + else: + ctx = example.source.text + return prompt.encode_dialog( + [ + {"role": "user", "slots": {"message": ctx}}, + {"role": prompt.OUTPUT_ROLE, "slots": {"message": example.target.text}}, + ] + ) + + +""" +NeMoSFTExample: data types, file parser, default prompt formatting logic. +""" + + +@dataclass +class NeMoSFTExample(Formattable, CustomFieldMixin): + data: dict + language: str | None = None + metadata: dict | None = None + custom: dict = None + + +@registered_prompt_format_fn(NeMoSFTExample) +def default_sft_prompt_format_fn(example: NeMoSFTExample, prompt): + if "system" in example.data and example.data["system"]: + raise RuntimeError( + f"Default prompt format for NeMoSFTExample doesn't support 'system' prompt. " + f"Please specialize the prompt_format_fn for PromptFormatter of type {prompt}" + ) + return prompt.encode_dialog( + [ + {"role": "user" if turn["from"] == "User" else prompt.OUTPUT_ROLE, "slots": {"message": turn["value"]}} + for turn in example.data["conversations"] + ] + ) + + +@dataclass +class NeMoSFTJsonlAdapter: + """ + ``NeMoSFTJsonlAdapter`` is used to read a NeMo LM SFT Chat JSONL file and yield objects of type + ``NeMoSFTExample`` that can be sampled with Lhotse. + + We expect the following schema (contained in a single line per example):: + + { + "conversations": [ + { + "value": str, + "from": "User" | "Assistant", + "canonical_form": str, + "label": str | null + }, + ... + ], + "mask": "User" | "Assistant", + "system": str, + "dataset": str, + "category": str, + } + """ + + paths: Union[Pathlike, list[Pathlike]] + language: str | None = None + shuffle_shards: bool = False + shard_seed: Union[int, Literal["trng", "randomized"]] = "trng" + + def __post_init__(self): + self.paths = expand_sharded_filepaths(self.paths) + + def __iter__(self) -> Iterator[NeMoSFTExample]: + paths = self.paths + if self.shuffle_shards: + seed = resolve_seed(self.shard_seed) + random.Random(seed).shuffle(paths) + for path in paths: + for data in load_jsonl(path): + yield NeMoSFTExample(data, language=self.language) + + +""" +NeMoMultimodalConversation: data types, file parser, default prompt formatting logic. +""" + + +@dataclass +class TextTurn: + value: str + role: str + + def to_dict(self): + return {"type": "text", "from": self.role.title(), "value": self.value} + + +@dataclass +class AudioTurn: + cut: Cut + role: str + audio_locator_tag: str + + def to_dict(self): + assert self.cut.has_recording and self.cut.recording.sources[0].type not in { + "shar", + "memory", + }, "Cannot serialize AudioTurn to dict because it doesn't reference an audio file (the audio is stored in memory)." + return { + "type": "audio", + "from": self.role.title(), + "duration": self.cut.duration, + "value": self.cut.recording.sources[0].source, + } + + +@dataclass +class NeMoMultimodalConversation(Formattable, CustomFieldMixin): + id: str + turns: list[TextTurn | AudioTurn] + token_equivalent_duration: float = None + custom: dict = None + + @property + def input_length(self) -> int | None: + if self.context_ids is None: + return None + extra = _compute_num_audio_tokens(self, "context") + return self.context_ids.shape[0] + extra + + @property + def output_length(self) -> int | None: + if self.answer_ids is None: + return None + extra = _compute_num_audio_tokens(self, "answer") + return self.answer_ids.shape[0] + extra + + @property + def total_length(self) -> int | None: + if self.input_ids is None: + return None + extra = _compute_num_audio_tokens(self, "all") + return self.input_ids.shape[0] + extra + + @property + def has_audio_turns(self) -> bool: + return any(isinstance(t, AudioTurn) for t in self.turns) + + @property + def has_text_turns(self) -> bool: + return any(isinstance(t, TextTurn) for t in self.turns) + + def to_dict(self): + return {"id": self.id, "conversations": [t.to_dict() for t in self.turns]} + + def list_cuts(self) -> list[Cut]: + return [turn.cut for turn in self.turns if isinstance(turn, AudioTurn)] + + +def _compute_num_audio_tokens(example: NeMoMultimodalConversation, mode: Literal["context", "answer", "all"]) -> int: + if not example.has_audio_turns: + return 0 + assert example.token_equivalent_duration is not None, ( + "Cannot compute the length of a NeMoMultimodalConversation: " + "token_equivalent_duration must be set in order to estimate the number of tokens equivalent to audio turns. " + "Did you forget to set token_equivalent_duration option in your dataloading config? " + "Tip: generally it should be set to frame_shift * total_subsampling_factor of your audio encoder model." + ) + if mode == "context": + turns = example.turns[:-1] + elif mode == "answer": + turns = example.turns[-1:] + elif mode == "all": + turns = example.turns + else: + raise RuntimeError(f"invalid mode for number of audio token computation: {mode}") + return sum( + [ + # subtract 1 for each audio locator tag as its token will be replaced + math.ceil(turn.cut.duration / example.token_equivalent_duration) - 1 + for turn in turns + if isinstance(turn, AudioTurn) + ] + ) + + +@registered_prompt_format_fn(NeMoMultimodalConversation) +def default_multimodal_conversation_prompt_format_fn(example: NeMoMultimodalConversation, prompt): + # Collapse consecutive same-role turns into single turn for proper prompt formatting. + turns = groupby( + [ + { + "role": turn.role, + "slots": {"message": turn.value if isinstance(turn, TextTurn) else turn.audio_locator_tag}, + } + for turn in example.turns + ], + key=lambda turn: turn["role"], + ) + turns = [ + {"role": role, "slots": {"message": " ".join(t["slots"]["message"] for t in turn_grp)}} + for role, turn_grp in turns + ] + return prompt.encode_dialog(turns) + + +@dataclass +class NeMoMultimodalConversationJsonlAdapter: + """ + ``NeMoMultimodalConversationJsonlAdapter`` is used to read a NeMo multimodal conversation JSONL + and yield objects of type ``NeMoMultimodalConversation`` that can be sampled with Lhotse. + + We expect the following schema (contained in a single line per example):: + + { + "id": str, + "conversations": [ + { + "value": str, # text message or path to audio + "from": "User" | "Assistant", + "type": "text" | "audio", + "duration": float, # only for audio + }, + ... + ], + } + """ + + manifest_filepath: str | list[str] + audio_locator_tag: str + tarred_audio_filepaths: str | list[str] = None + token_equivalent_duration: float = None + shuffle_shards: bool = False + shard_seed: Union[int, Literal["trng", "randomized"]] = "trng" + + def __post_init__(self): + self.manifest_filepath = expand_sharded_filepaths(self.manifest_filepath) + if self.tarred_audio_filepaths is not None: + self.tarred_audio_filepaths = expand_sharded_filepaths(self.tarred_audio_filepaths) + assert len(self.manifest_filepath) == len( + self.tarred_audio_filepaths + ), f"{len(self.manifest_filepath)} != {len(self.tarred_audio_filepaths)}" + + def __iter__(self) -> Iterator[NeMoMultimodalConversation]: + if self.tarred_audio_filepaths is not None: + yield from self._iter_tar() + else: + yield from self._iter_jsonl() + + def _iter_tar(self): + paths = list(zip(self.manifest_filepath, self.tarred_audio_filepaths)) + if self.shuffle_shards: + seed = resolve_seed(self.shard_seed) + random.Random(seed).shuffle(paths) + for jsonl_path, tar_path in paths: + tar = iter(TarIterator(tar_path)) + for data in load_jsonl(jsonl_path): + audio_turns = [t for t in data["conversations"] if t["type"] == "audio"] + cuts = [] + for turn in audio_turns: + recording, audio_path = next(tar) + audio_path = str(audio_path) + cut = recording.to_cut() + assert ( + audio_path == turn['value'] + ), f"Mismatch between JSONL and tar. JSONL defines audio path={turn['value']} but we got the following from tar {audio_path=}" + assert ( + cut.duration == turn["duration"] + ), f"Mismatch between JSONL and tar. JSONL defines audio duration={turn['duration']} but we got the following from tar {cut.duration=}" + cuts.append(cut) + cuts = deque(cuts) + yield NeMoMultimodalConversation( + id=data["id"], + turns=[ + ( + TextTurn( + value=turn["value"], + role=turn[ + "from" + ].lower(), # prompt formatter role's are typically lowercase: user/assistant + ) + if turn["type"] == "text" + else AudioTurn( + cut=cuts.popleft(), + role=turn[ + "from" + ].lower(), # prompt formatter role's are typically lowercase: user/assistant + audio_locator_tag=self.audio_locator_tag, + ) + ) + for turn in data["conversations"] + ], + ) + + def _iter_jsonl(self): + paths = self.manifest_filepath + if self.shuffle_shards: + seed = resolve_seed(self.shard_seed) + random.Random(seed).shuffle(paths) + for path in paths: + for data in load_jsonl(path): + yield NeMoMultimodalConversation( + id=data["id"], + turns=[ + ( + TextTurn( + value=turn["value"], + role=turn[ + "from" + ].lower(), # prompt formatter role's are typically lowercase: user/assistant + ) + if turn["type"] == "text" + else AudioTurn( + cut=Recording.from_file(get_full_path(turn["value"], path)).to_cut(), + role=turn[ + "from" + ].lower(), # prompt formatter role's are typically lowercase: user/assistant + audio_locator_tag=self.audio_locator_tag, + ) + ) + for turn in data["conversations"] + ], + token_equivalent_duration=self.token_equivalent_duration, + ) + + +class NeMoMultimodalConversationTarWriter: + def __init__(self, output_dir: str, shard_size: int = 100): + self.output_dir = output_dir + self.shard_size = shard_size + self._reset() + self._setup_writers() + + def write(self, example: NeMoMultimodalConversation): + self._maybe_increment_shard() + serialized = example.to_dict() + for turn in serialized["conversations"]: + if turn["type"] == "audio": + turn["value"] = Path(turn["value"]).with_suffix(".flac").name + self.manifest_writer.write(serialized) + for cut in example.list_cuts(): + assert ( + cut.has_recording + ), f"Cannot serialize multimodal conversation with cuts that have no recordings. We got: {cut}" + self.tar_writer.write(cut.recording.id, cut.load_audio(), cut.sampling_rate, cut.recording) + self.item_cntr += 1 + + def close(self): + self.manifest_writer.close() + self.tar_writer.close() + + def __enter__(self): + self._reset() + self.manifest_writer.__enter__() + self.tar_writer.__enter__() + return self + + def __exit__(self, *args, **kwargs): + self.close() + + def _maybe_increment_shard(self): + if self.item_cntr > 0 and self.item_cntr % self.shard_size == 0: + self.item_cntr = 0 + self.shard_idx += 1 + self._setup_writers() + + def _reset(self): + self.item_cntr = 0 + self.shard_idx = 0 + + def _setup_writers(self): + if not is_valid_url(self.output_dir): # skip dir creation for URLs + Path(self.output_dir).mkdir(exist_ok=True) + self.manifest_writer = JsonlShardWriter(f"{self.output_dir}/manifest_{self.shard_idx}.jsonl", shard_size=None) + self.tar_writer = AudioTarWriter(f"{self.output_dir}/audio_{self.shard_idx}.tar", shard_size=None) diff --git a/nemo/collections/common/data/prompt_fn.py b/nemo/collections/common/data/prompt_fn.py new file mode 100644 index 000000000000..2c80f3f14db5 --- /dev/null +++ b/nemo/collections/common/data/prompt_fn.py @@ -0,0 +1,91 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Callable, Type + +import torch + + +PromptFormatFnReturnType = dict[str, list[torch.Tensor]] +PromptFormatSignature = Callable[[object, object], PromptFormatFnReturnType] +PROMPT_FORMAT_FNS: dict[tuple[Type, Type] | Type, PromptFormatSignature] = {} + + +def registered_prompt_format_fn(example_type: Type, formatter_type: Type = None): + """ + Decorator for registering text prompt functions. + It allows to select the right prompt formatting function based on the types of the + example and the prompt formatter, allowing different strategies for formatting different + types of data with different prompt formats. + + When formatter_type is set None, registers a default prompt format function for a given data type. + + Example:: + + >>> @registered_prompt_format_fn(SourceTargetTextExample, Llama2PromptFormatter) + ... def my_src_tgt_text_prompt(example, formatter): + ... pass + ... + ... @registered_prompt_format_fn(Cut, Llama2PromptFormatter) + ... def my_audio_prompt(example, formatter): + ... pass + ... + ... prompt_fn = get_prompt_format_fn(SourceTargetTextExample, Llama2PromptFormatter) + """ + + def _decorator(prompt_fn: Callable[[object, object], dict[str, list[torch.Tensor]]]): + global PROMPT_FORMAT_FNS + if formatter_type is None: + PROMPT_FORMAT_FNS[example_type] = prompt_fn + else: + PROMPT_FORMAT_FNS[(example_type, formatter_type)] = prompt_fn + return prompt_fn + + return _decorator + + +def get_prompt_format_fn(example: Type | object, prompt: Type | object = None) -> PromptFormatSignature: + """See the documentation of ``text_prompt_formatter`` above.""" + + # If the user provided objects, resolve their types. + if not isinstance(example, type): + example = type(example) + if not isinstance(prompt, type): + prompt = type(prompt) + + # For the example type, first try to match it directly, then fall back to its parent classes. + for example_subtype in example.mro(): + + # First check the match for specific example type and a specific prompt format, + # and all parent types of that specific prompt formatter type. + for prompt_subtype in prompt.mro(): + if (example_subtype, prompt_subtype) in PROMPT_FORMAT_FNS: + return PROMPT_FORMAT_FNS[(example_subtype, prompt_subtype)] + + # Then for the same specific example type, fall back to its default prompt formatter implementation. + if example_subtype in PROMPT_FORMAT_FNS: + return PROMPT_FORMAT_FNS[example_subtype] + + raise ValueError( + f"Unknown prompt format function for ({example}, {prompt}). " + f"Available choices are: {list(PROMPT_FORMAT_FNS.keys())}" + ) + + +def apply_prompt_format_fn(example: object | Type, prompt: object | Type) -> PromptFormatFnReturnType: + """ + Utility for resolving the prompt format function and applying it to an example in one go. + See the documentation of ``text_prompt_formatter`` above. + """ + fn = get_prompt_format_fn(example, prompt) + return fn(example, prompt) diff --git a/nemo/collections/common/prompts/__init__.py b/nemo/collections/common/prompts/__init__.py index 1697c49fdb2f..8b243b49496d 100644 --- a/nemo/collections/common/prompts/__init__.py +++ b/nemo/collections/common/prompts/__init__.py @@ -1,6 +1,5 @@ from nemo.collections.common.prompts.canary import CanaryPromptFormatter from nemo.collections.common.prompts.canary2 import Canary2PromptFormatter -from nemo.collections.common.prompts.fn import get_prompt_format_fn, registered_prompt_format_fn from nemo.collections.common.prompts.formatter import PromptFormatter from nemo.collections.common.prompts.gemma import GemmaPromptFormatter from nemo.collections.common.prompts.llama import Llama2PromptFormatter, Llama3PromptFormatter @@ -10,3 +9,5 @@ Phi2CodePromptFormatter, Phi2QAPromptFormatter, ) +from nemo.collections.common.prompts.plain import PlainPromptFormatter +from nemo.collections.common.prompts.t5nmt import T5NMTPromptFormatter diff --git a/nemo/collections/common/prompts/canary.py b/nemo/collections/common/prompts/canary.py index 3197bf7efe2c..663d2c1e95d5 100644 --- a/nemo/collections/common/prompts/canary.py +++ b/nemo/collections/common/prompts/canary.py @@ -15,13 +15,12 @@ from typing import Any import torch -from lhotse import CutSet, MonoCut -from lhotse.cut import MixedCut +from lhotse import MonoCut +from lhotse.cut import Cut, MixedCut from lhotse.utils import ifnone -from nemo.collections.common.prompts.fn import registered_prompt_format_fn +from nemo.collections.common.data.prompt_fn import registered_prompt_format_fn from nemo.collections.common.prompts.formatter import Modality, PromptFormatter -from nemo.collections.common.tokenizers import TokenizerSpec from nemo.collections.common.tokenizers.canary_tokenizer import ( CANARY_BOS, CANARY_EOS, @@ -108,10 +107,8 @@ def map_manifest_values_to_special_tokens(slot_values: dict[str, str]) -> dict[s return slot_values -@registered_prompt_format_fn -def canary( - cuts: CutSet, tokenizer: TokenizerSpec -) -> tuple[list[torch.Tensor], list[torch.Tensor], list[torch.Tensor]]: +@registered_prompt_format_fn(Cut, CanaryPromptFormatter) +def canary(cut: Cut, prompt: CanaryPromptFormatter) -> dict[str, torch.Tensor]: """ Prepend and append control tokens to the token sequence as per Canary format. @@ -134,62 +131,51 @@ def canary( (i.e., spoken language in the recording) and the second occurrence is for the "target" language (i.e., the language in which we are going to output the text). """ - formatter = CanaryPromptFormatter(tokenizer) - - prompts_with_answers, prompts, answers = [], [], [] - for cut in cuts: - if isinstance(cut, MixedCut): - cut = cut._first_non_padding_cut - if not isinstance(cut, MonoCut): - raise TypeError( - f"Expected input audio to have a single channel (required MonoCut/MixedCut, but we received: {cut=})" - ) - - # first, validate the utterance - expected_slots = set(formatter.get_slots("user")) - missing_keys = expected_slots - set(cut.custom) - if "task" in missing_keys and "taskname" in cut.custom: - # Compatibility with "old" Canary manifest format. - # For compatbility with inference options, this slot is now called "task". - cut.custom["task"] = cut.custom["taskname"] - missing_keys.remove("task") - if missing_keys: - raise RuntimeError( - f"We found cut with ID {cut.id} that is missing the following keys: {missing_keys}" - f"Please ensure that every utterance in the input manifests contains these keys." - ) - - turns = [ - dict( - role="user", - slots={ - **{slot: cut.custom[slot] for slot in expected_slots}, - formatter.PROMPT_LANGUAGE_SLOT: CANARY_SPECIAL_TOKENIZER, - }, - ) - ] - # If data has no transcript, create empty response with only. - text = ' '.join(s.text for s in cut.supervisions if s.text is not None) - turns.append( - dict( - role="assistant", - slots={ - "text": text, - formatter.PROMPT_LANGUAGE_SLOT: ifnone( - cut.supervisions[0].language, cut.custom.get("target_lang") - ), - }, - ), + if isinstance(cut, MixedCut): + cut = cut._first_non_padding_cut + if not isinstance(cut, MonoCut): + raise TypeError( + f"Expected input audio to have a single channel (required MonoCut/MixedCut, but we received: {cut=})" ) - encoded = formatter.encode_dialog(turns) - prompts_with_answers.append(encoded["input_ids"]) - prompts.append(encoded["context_ids"]) - if "answer_ids" in encoded: - assert ( - encoded["answer_ids"][-1].item() == formatter.tokenizer.eos - ), f"Expected the last token in answer_ids to be EOS, but we got {encoded['answer_ids']=}" - answers.append(encoded["answer_ids"][:-1]) # Strip Canary's EOS - else: - answers.append([]) - - return prompts_with_answers, prompts, answers + + # first, validate the utterance + expected_slots = set(prompt.get_slots("user")) + missing_keys = expected_slots - set(cut.custom) + if "task" in missing_keys and "taskname" in cut.custom: + # Compatibility with "old" Canary manifest format. + # For compatbility with inference options, this slot is now called "task". + cut.custom["task"] = cut.custom["taskname"] + missing_keys.remove("task") + if missing_keys: + raise RuntimeError( + f"We found cut with ID {cut.id} that is missing the following keys: {missing_keys}" + f"Please ensure that every utterance in the input manifests contains these keys." + ) + + turns = [ + dict( + role="user", + slots={ + **{slot: cut.custom[slot] for slot in expected_slots}, + prompt.PROMPT_LANGUAGE_SLOT: CANARY_SPECIAL_TOKENIZER, + }, + ) + ] + # If data has no transcript, create empty response with only. + text = ' '.join(s.text for s in cut.supervisions if s.text is not None) + turns.append( + dict( + role="assistant", + slots={ + "text": text, + prompt.PROMPT_LANGUAGE_SLOT: ifnone(cut.supervisions[0].language, cut.custom.get("target_lang")), + }, + ), + ) + + ans = prompt.encode_dialog(turns) + assert ( + ans["answer_ids"][-1].item() == prompt.tokenizer.eos + ), f"Expected the last token in answer_ids to be EOS, but we got {ans['answer_ids']}" + ans["answer_ids"] = ans["answer_ids"][:-1] # Strip Canary's EOS + return ans diff --git a/nemo/collections/common/prompts/canary2.py b/nemo/collections/common/prompts/canary2.py index 97facac7b1bc..3aed7a3bfa10 100644 --- a/nemo/collections/common/prompts/canary2.py +++ b/nemo/collections/common/prompts/canary2.py @@ -11,14 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=C0115 +# pylint: disable=C0116 +# pylint: disable=C0301 import torch -from lhotse import CutSet, MonoCut -from lhotse.cut import MixedCut +from lhotse import MonoCut +from lhotse.cut import Cut, MixedCut from lhotse.utils import ifnone -from nemo.collections.common.prompts.fn import registered_prompt_format_fn +from nemo.collections.common.data.prompt_fn import registered_prompt_format_fn from nemo.collections.common.prompts.formatter import Modality, PromptFormatter -from nemo.collections.common.tokenizers import TokenizerSpec from nemo.collections.common.tokenizers.canary_tokenizer import ( CANARY2_BOCTX, CANARY_BOS, @@ -145,71 +147,57 @@ def map_manifest_values_to_special_tokens(slot_values: dict[str, str]) -> dict[s return slot_values -@registered_prompt_format_fn -def canary2( - cuts: CutSet, tokenizer: TokenizerSpec -) -> tuple[list[torch.Tensor], list[torch.Tensor], list[torch.Tensor]]: +@registered_prompt_format_fn(Cut, Canary2PromptFormatter) +def canary2(cut: Cut, prompt: Canary2PromptFormatter) -> dict[str, torch.Tensor]: """ Prepend and append control tokens to the token sequence as per Canary 2.0 format. The prompt format syntax is defined in :class:`Canary2PromptFormatter` """ - formatter = Canary2PromptFormatter(tokenizer) - - prompts_with_answers, prompts, answers = [], [], [] - for cut in cuts: - if isinstance(cut, MixedCut): - cut = cut._first_non_padding_cut - if not isinstance(cut, MonoCut): - raise TypeError( - f"Expected input audio to have a single channel (required MonoCut/MixedCut, but we received: {cut=})" - ) - - # first, validate the utterance - expected_slots = {"source_lang", "target_lang"} - missing_keys = expected_slots - set(cut.custom) - if missing_keys: - raise RuntimeError( - f"We found cut with ID {cut.id} that is missing the following keys: {missing_keys}" - f"Please ensure that every utterance in the input manifests contains these keys." - ) - - optional_slots = { - "decodercontext": "", - "emotion": "<|emo:undefined|>", - "itn": "<|noitn|>", - "timestamp": "<|notimestamp|>", - "diarize": "<|nodiarize|>", - "pnc": "<|pnc|>", # consistent with canary1 - } - slots = {slot: cut.custom[slot] for slot in expected_slots} - slots[formatter.PROMPT_LANGUAGE_SLOT] = CANARY_SPECIAL_TOKENIZER - for k, v in optional_slots.items(): - slots[k] = cut.custom[k] if k in cut.custom else v - - turns = [dict(role="user", slots=slots)] - # If data has no transcript, create empty response with only. - text = ' '.join(s.text for s in cut.supervisions if s.text is not None) - turns.append( - dict( - role="assistant", - slots={ - "text": text, - formatter.PROMPT_LANGUAGE_SLOT: ifnone( - cut.supervisions[0].language, cut.custom.get("target_lang") - ), - }, - ), + if isinstance(cut, MixedCut): + cut = cut._first_non_padding_cut + if not isinstance(cut, MonoCut): + raise TypeError( + f"Expected input audio to have a single channel (required MonoCut/MixedCut, but we received: {cut=})" ) - encoded = formatter.encode_dialog(turns) - prompts_with_answers.append(encoded["input_ids"]) - prompts.append(encoded["context_ids"]) - if "answer_ids" in encoded: - assert ( - encoded["answer_ids"][-1].item() == formatter.tokenizer.eos - ), f"Expected the last token in answer_ids to be EOS, but we got {encoded['answer_ids']=}" - answers.append(encoded["answer_ids"][:-1]) # Strip Canary's EOS - else: - answers.append([]) - - return prompts_with_answers, prompts, answers + + # first, validate the utterance + expected_slots = {"source_lang", "target_lang"} + missing_keys = expected_slots - set(cut.custom) + if missing_keys: + raise RuntimeError( + f"We found cut with ID {cut.id} that is missing the following keys: {missing_keys}" + f"Please ensure that every utterance in the input manifests contains these keys." + ) + + optional_slots = { + "decodercontext": "", + "emotion": "<|emo:undefined|>", + "itn": "<|noitn|>", + "timestamp": "<|notimestamp|>", + "diarize": "<|nodiarize|>", + "pnc": "<|pnc|>", # consistent with canary1 + } + slots = {slot: cut.custom[slot] for slot in expected_slots} + slots[prompt.PROMPT_LANGUAGE_SLOT] = CANARY_SPECIAL_TOKENIZER + for k, v in optional_slots.items(): + slots[k] = cut.custom[k] if k in cut.custom else v + + turns = [dict(role="user", slots=slots)] + # If data has no transcript, create empty response with only. + text = ' '.join(s.text for s in cut.supervisions if s.text is not None) + turns.append( + dict( + role="assistant", + slots={ + "text": text, + prompt.PROMPT_LANGUAGE_SLOT: ifnone(cut.supervisions[0].language, cut.custom.get("target_lang")), + }, + ), + ) + ans = prompt.encode_dialog(turns) + assert ( + ans["answer_ids"][-1].item() == prompt.tokenizer.eos + ), f"Expected the last token in answer_ids to be EOS, but we got {ans['answer_ids']}" + ans["answer_ids"] = ans["answer_ids"][:-1] # Strip Canary's EOS + return ans diff --git a/nemo/collections/common/prompts/fn.py b/nemo/collections/common/prompts/fn.py deleted file mode 100644 index a93dabb01f6a..000000000000 --- a/nemo/collections/common/prompts/fn.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Callable, Sequence - -import torch -from lhotse import CutSet - -from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec - -PROMPT_FORMAT_FNS = {} - - -def registered_prompt_format_fn( - prompt_fn: Callable[[CutSet, TokenizerSpec], tuple[list[torch.Tensor], list[torch.Tensor], list[torch.Tensor]]] -): - """ - Decorator for registering prompt functions under a name. - - Example:: - - >>> @registered_prompt_format_fn - ... def my_prompt(cuts, tokenizer): - ... pass - ... - ... prompt_fn = get_prompt_format_fn("my_prompt") - """ - global PROMPT_FORMAT_FNS - - PROMPT_FORMAT_FNS[prompt_fn.__name__] = prompt_fn - return prompt_fn - - -def get_prompt_format_fn( - name: str, -) -> Callable[[CutSet, TokenizerSpec], tuple[list[torch.Tensor], list[torch.Tensor], list[torch.Tensor]]]: - if name not in PROMPT_FORMAT_FNS: - raise ValueError( - f"Unknown prompt format function name: {name} " f"(must be one of: {list(PROMPT_FORMAT_FNS.keys())}" - ) - return PROMPT_FORMAT_FNS[name] diff --git a/nemo/collections/common/prompts/formatter.py b/nemo/collections/common/prompts/formatter.py index 9fb499254068..91fdab767ace 100644 --- a/nemo/collections/common/prompts/formatter.py +++ b/nemo/collections/common/prompts/formatter.py @@ -13,7 +13,6 @@ # limitations under the License. from abc import ABC -from enum import Enum from functools import lru_cache from typing import Any, Type @@ -156,6 +155,15 @@ class PromptFormatter(ABC): # PromptFormatter.encode_dialog() ends with this role, it indicates a training example. OUTPUT_ROLE = None + # When set to true, we will insert BOS/EOS symbol at the very beginning/end of the dialog + # (i.e., not before/after every turn). + # This is intended specifically for LLMs that use sentencepiece tokenizers with BOS/EOS + # that don't normally exist in the tokenizer's vocab (i.e., no string input generates them + # and you must insert them programmatically); + # see: https://github.com/google/sentencepiece/issues/102#issuecomment-397150427 + INSERT_BOS = False + INSERT_EOS = False + # Internal reserved field. _REGISTERED_FORMATTERS = {} @@ -255,6 +263,11 @@ def encode_dialog(self, turns: list[dict]) -> dict[str, torch.Tensor]: turn_token_counts = [] turn_mask_values = [] + if self.INSERT_BOS: + turn_tokens.append(self.tokenizer.bos) + turn_token_counts.append(1) + turn_mask_values.append(False) + if "preamble" in self.TEMPLATE: preamble_turns = [idx for idx, t in enumerate(turns) if t["role"] == "preamble"] if not preamble_turns: @@ -281,6 +294,12 @@ def encode_dialog(self, turns: list[dict]) -> dict[str, torch.Tensor]: turn_token_counts.append(len(tokens)) turn_mask_values.append(role == self.OUTPUT_ROLE) + # Insert EOS only when the last turn comes from the OUTPUT_ROLE. + if self.INSERT_EOS and turns[-1]["role"] == self.OUTPUT_ROLE: + turn_tokens.append(self.tokenizer.eos) + turn_token_counts[-1] += 1 + turn_mask_values.append(True) + ans = {"input_ids": torch.tensor(turn_tokens, dtype=torch.long)} if turn_mask_values[-1]: # The last turn comes from OUTPUT_ROLE, i.e. it's a response from the system. diff --git a/nemo/collections/common/prompts/gemma.py b/nemo/collections/common/prompts/gemma.py index 22b5ac5c13b7..6784ffe08013 100644 --- a/nemo/collections/common/prompts/gemma.py +++ b/nemo/collections/common/prompts/gemma.py @@ -16,6 +16,9 @@ Implemented following the guide at https://www.promptingguide.ai/models/gemma#gemma-7b-prompt-format """ +from lhotse.cut import Cut, MixedCut + +from nemo.collections.common.data.prompt_fn import registered_prompt_format_fn from nemo.collections.common.prompts.formatter import Modality, PromptFormatter GEMMA_BOS = "" @@ -26,6 +29,8 @@ class GemmaPromptFormatter(PromptFormatter): NAME = "gemma" OUTPUT_ROLE = "assistant" + INSERT_BOS = True + INSERT_EOS = True TEMPLATE = { "user": { "template": f"{GEMMA_BOS}user\n|message|{GEMMA_END_OF_TURN}\n{GEMMA_BOS}model\n", @@ -41,3 +46,21 @@ class GemmaPromptFormatter(PromptFormatter): }, }, } + + +@registered_prompt_format_fn(Cut, GemmaPromptFormatter) +def gemma1(cut: Cut, prompt: GemmaPromptFormatter): + if isinstance(cut, MixedCut): + cut = cut.first_non_padding_cut + if cut.has_custom("context"): + context = cut.context + elif cut.has_custom("question"): + context = cut.question + else: + context = cut.default_context + + turns = [{"role": "user", "slots": {"message": context}}] + if (answer := cut.supervisions[0].text) is not None: + turns.append({"role": "assistant", "slots": {"message": answer}}) + + return prompt.encode_dialog(turns) diff --git a/nemo/collections/common/prompts/llama.py b/nemo/collections/common/prompts/llama.py index affbc94da904..3c235756aa0a 100644 --- a/nemo/collections/common/prompts/llama.py +++ b/nemo/collections/common/prompts/llama.py @@ -11,7 +11,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import torch +from lhotse.cut import Cut, MixedCut +from nemo.collections.common.data.lhotse.text_adapters import NeMoSFTExample, SourceTargetTextExample +from nemo.collections.common.data.prompt_fn import registered_prompt_format_fn from nemo.collections.common.prompts.formatter import BOS_SLOT, EOS_SLOT, Modality, PromptFormatter @@ -32,7 +36,7 @@ class Llama2PromptFormatter(PromptFormatter): }, }, "user": { - "template": "|bos|[INST] |message| [/INST]", + "template": f"{BOS_SLOT}[INST] |message| [/INST]", "slots": { "message": Modality.Text, }, @@ -46,6 +50,69 @@ class Llama2PromptFormatter(PromptFormatter): } +@registered_prompt_format_fn(Cut, Llama2PromptFormatter) +def llama2(cut: Cut, prompt: Llama2PromptFormatter) -> dict[str, torch.Tensor]: + if isinstance(cut, MixedCut): + cut = cut.first_non_padding_cut + if cut.has_custom("context"): + context = cut.context + elif cut.has_custom("question"): + context = cut.question + else: + context = cut.default_context + + turns = [] + if cut.has_custom("system_prompt"): + turns.append({"role": "system_and_user", "slots": {"system": cut.system_prompt, "message": context}}) + else: + turns.append({"role": "user", "slots": {"message": context}}) + if (answer := cut.supervisions[0].text) is not None: + turns.append({"role": "assistant", "slots": {"message": answer}}) + return prompt.encode_dialog(turns) + + +@registered_prompt_format_fn(SourceTargetTextExample, Llama2PromptFormatter) +def llama2_src_tgt_text_example(example: SourceTargetTextExample, prompt: Llama2PromptFormatter): + if example.question is not None: + user_turn = { + "role": "system_and_user", + "slots": {"system": example.question.text, "message": example.source.text}, + } + else: + user_turn = { + "role": "user", + "slots": {"message": example.source.text}, + } + return prompt.encode_dialog( + [ + user_turn, + {"role": prompt.OUTPUT_ROLE, "slots": {"message": example.target.text}}, + ] + ) + + +@registered_prompt_format_fn(NeMoSFTExample, Llama2PromptFormatter) +def llama2_sft_text_example(example: NeMoSFTExample, prompt: Llama2PromptFormatter): + first_user_turn = example.data["conversations"][0]["value"] + if "system" in example.data and example.data["system"]: + first_turn = { + "role": "system_and_user", + "slots": {"system": example.data["system"], "message": first_user_turn}, + } + else: + first_turn = { + "role": "user", + "slots": {"message": first_user_turn}, + } + return prompt.encode_dialog( + [first_turn] + + [ + {"role": "user" if turn["from"] == "User" else prompt.OUTPUT_ROLE, "slots": {"message": turn["value"]}} + for turn in example.data["conversations"][1:] + ] + ) + + LLAMA3_BOS = "<|begin_of_text|>" LLAMA3_HEADER_BEGIN = "<|start_header_id|>" LLAMA3_HEADER_END = "<|end_header_id|>" diff --git a/nemo/collections/common/prompts/plain.py b/nemo/collections/common/prompts/plain.py new file mode 100644 index 000000000000..ed33d955702d --- /dev/null +++ b/nemo/collections/common/prompts/plain.py @@ -0,0 +1,56 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from lhotse.cut import Cut, MixedCut + +from nemo.collections.common.data.prompt_fn import registered_prompt_format_fn +from nemo.collections.common.prompts.formatter import Modality, PromptFormatter + + +class PlainPromptFormatter(PromptFormatter): + """ + Plain prompt formatter: there is nothing being added to the user and assistants turns. + """ + + NAME = "plain" + OUTPUT_ROLE = "assistant" + TEMPLATE = { + "user": { + "template": f"|message|", + "slots": { + "message": Modality.Text, + }, + }, + OUTPUT_ROLE: { + "template": f"|message|", + "slots": { + "message": Modality.Text, + }, + }, + } + + +@registered_prompt_format_fn(Cut, PlainPromptFormatter) +def plain(cut: Cut, prompt: PlainPromptFormatter): + if isinstance(cut, MixedCut): + cut = cut.first_non_padding_cut + if cut.has_custom("context"): + ctx = cut.context + else: + ctx = "" + + turns = [{"role": "user", "slots": {"message": ctx}}] + if (answer := cut.supervisions[0].text) is not None: + turns.append({"role": "assistant", "slots": {"message": answer}}) + + return prompt.encode_dialog(turns) diff --git a/nemo/collections/common/prompts/t5nmt.py b/nemo/collections/common/prompts/t5nmt.py new file mode 100644 index 000000000000..d0f90574bc81 --- /dev/null +++ b/nemo/collections/common/prompts/t5nmt.py @@ -0,0 +1,106 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=C0116 +# pylint: disable=C0301 +from collections import defaultdict + +import torch +from lhotse import MonoCut +from lhotse.cut import Cut, MixedCut + +from nemo.collections.common.data.lhotse.text_adapters import SourceTargetTextExample +from nemo.collections.common.data.prompt_fn import registered_prompt_format_fn +from nemo.collections.common.prompts.formatter import Modality, PromptFormatter + + +class T5NMTPromptFormatter(PromptFormatter): + """ + The default prompt format for Megatron T5 based neural machine translation models. + Based on: https://github.com/NVIDIA/NeMo/blob/ad5ef750e351edbb5eeb7eb6df2d0c804819600f/nemo/collections/nlp/models/machine_translation/megatron_nmt_model.py#L790 + """ + + NAME = "t5nmt" + OUTPUT_ROLE = "assistant" + TEMPLATE = { + "user": { + "template": f"|target_lang| |message|", + "slots": { + "target_lang": Modality.Text, + "message": Modality.Text, + }, + }, + OUTPUT_ROLE: { + "template": f"|message|", + "slots": { + "message": Modality.Text, + }, + }, + } + + def encode_turn(self, prompt_template: str, expected_slots: dict, slot_values: dict) -> list[int]: + # Automatically adds "<" and ">" to target lang token for T5 NMT. + # Based on: https://github.com/NVIDIA/NeMo/blob/ad5ef750e351edbb5eeb7eb6df2d0c804819600f/nemo/collections/nlp/models/machine_translation/mt_enc_dec_model.py#L307 + if (val := slot_values.get("target_lang")) is not None: + if not val.startswith("<") or not val.endswith(">"): + slot_values["target_lang"] = f"<{val}>" + return super().encode_turn( + prompt_template=prompt_template, expected_slots=expected_slots, slot_values=slot_values + ) + + +@registered_prompt_format_fn(Cut, T5NMTPromptFormatter) +def t5nmt(cut: Cut, prompt: T5NMTPromptFormatter) -> dict[str, torch.Tensor]: + ans = defaultdict(list) + if isinstance(cut, MixedCut): + cut = cut._first_non_padding_cut + if not isinstance(cut, MonoCut): + raise TypeError( + f"Expected input audio to have a single channel (required MonoCut/MixedCut, but we received: {cut=})" + ) + + if hasattr(cut, "context"): + context = cut.context + elif hasattr(cut, "default_context"): + context = cut.default_context + else: + raise RuntimeError("Missing context/default_context custom field in cut: {cut}") + + turns = [ + dict( + role="user", + # "message" slot is the audio portion of the cut; currently it is populated inside model's forward. + slots={"target_lang": context, "message": ""}, + ), + ] + if len(cut.supervisions) > 0 and cut.supervisions[0].text is not None: + turns.append( + dict( + role=prompt.OUTPUT_ROLE, + slots={"message": cut.supervisions[0].text}, + ) + ) + return prompt.encode_dialog(turns) + + +@registered_prompt_format_fn(SourceTargetTextExample, T5NMTPromptFormatter) +def t5nmt_src_tgt_text_example(example: SourceTargetTextExample, prompt: T5NMTPromptFormatter): + ctx = f"<{example.target.language}>" + if example.has_custom("extra_prompt"): + ctx = f"{ctx} {example.extra_prompt}" + return prompt.encode_dialog( + [ + {"role": "user", "slots": {"message": example.source.text, "target_lang": ctx}}, + {"role": prompt.OUTPUT_ROLE, "slots": {"message": example.target.text}}, + ] + ) diff --git a/nemo/collections/common/tokenizers/aggregate_tokenizer.py b/nemo/collections/common/tokenizers/aggregate_tokenizer.py index 66ec28ebda4d..1867276d0305 100644 --- a/nemo/collections/common/tokenizers/aggregate_tokenizer.py +++ b/nemo/collections/common/tokenizers/aggregate_tokenizer.py @@ -238,3 +238,31 @@ def vocab(self): @property def langs(self): return list(self.tokenizers_dict.keys()) + + +class TokenizerWrapper: + """ + Provide a unified interface for NeMo Tokenizer, AggregateTokenizer, and (char) Parser. + """ + + def __init__(self, tokenizer): + self._tokenizer = tokenizer + if isinstance(tokenizer, AggregateTokenizer): + self._impl = self._call_agg_tokenizer + elif isinstance(tokenizer, TokenizerSpec): + self._impl = self._call_tokenizer + else: + self._impl = self._call_parser + + def __call__(self, text: str, lang: str | None = None): + return self._impl(text, lang) + + def _call_agg_tokenizer(self, text: str, lang: str | None = None): + assert lang is not None, "Expected 'lang' to be set for AggregateTokenizer." + return self._tokenizer.text_to_ids(text, lang) + + def _call_tokenizer(self, text: str, lang: str | None = None): + return self._tokenizer.text_to_ids(text) + + def _call_parser(self, text: str, lang: str | None = None): + return self._tokenizer(text) diff --git a/nemo/collections/multimodal/speech_llm/data/audio_text_dataset.py b/nemo/collections/multimodal/speech_llm/data/audio_text_dataset.py index ee194b74f993..def0aa7e2874 100644 --- a/nemo/collections/multimodal/speech_llm/data/audio_text_dataset.py +++ b/nemo/collections/multimodal/speech_llm/data/audio_text_dataset.py @@ -70,7 +70,7 @@ def _audio_collate_fn(audio_signals, audio_lengths): """ max_audio_len = 0 - has_audio = audio_lengths[0] is not None + has_audio = len(audio_lengths) > 0 and audio_lengths[0] is not None if has_audio: max_audio_len = max(audio_lengths).item() diff --git a/nemo/collections/multimodal/speech_llm/data/build_dataset.py b/nemo/collections/multimodal/speech_llm/data/build_dataset.py index 698a01836169..8d64632210a4 100644 --- a/nemo/collections/multimodal/speech_llm/data/build_dataset.py +++ b/nemo/collections/multimodal/speech_llm/data/build_dataset.py @@ -14,6 +14,7 @@ import copy from pathlib import Path +import omegaconf import torch from megatron.core import parallel_state from omegaconf.omegaconf import OmegaConf @@ -25,7 +26,7 @@ get_tarred_audio_text_dataset_from_config, ) from nemo.collections.multimodal.speech_llm.data.lhotse_dataset import LhotseAudioQuestionAnswerDataset -from nemo.collections.multimodal.speech_llm.parts.utils.data_utils import TextProcessing +from nemo.collections.multimodal.speech_llm.parts.utils.data_utils import PromptFormatterTextProcessing from nemo.collections.nlp.data.language_modeling.megatron.blendable_dataset import BlendableDataset from nemo.collections.nlp.data.language_modeling.megatron.megatron_batch_samplers import ( MegatronPretrainingBatchSampler, @@ -54,28 +55,11 @@ def build_speechllm_dataset(model_instance, data_cfg, is_train): # Notably, the data weights are controlled by either bucketing_weights # or concat_sampling_probabilities depending on the dataset type. if data_cfg.get("use_lhotse"): - tp = TextProcessing( + tp = PromptFormatterTextProcessing( model_instance.tokenizer, - max_seq_length=data_cfg["max_seq_length"], - min_seq_length=data_cfg["min_seq_length"], - add_bos=data_cfg.get('add_bos', False), - add_eos=data_cfg.get('add_eos', False), - add_sep=data_cfg.get('add_sep', False), - sep_id=model_instance.sep_id, - seed=data_cfg.get('seed', 1234), - separate_prompt_and_response_with_newline=data_cfg.get('separate_prompt_and_response_with_newline', True), - answer_only_loss=model_instance.cfg.get('answer_only_loss', True), - truncation_field=data_cfg.get('truncation_field', 'context'), - pad_to_max_length=data_cfg.get('pad_to_max_length', False), - prompt_template=data_cfg.get('prompt_template', None), - virtual_tokens=model_instance.virtual_tokens, - tokens_to_generate=data_cfg.get( - 'tokens_to_generate', 0 - ), # used at inference time to allocate tensor positions for tokens that will be generated by inf procedure. - context_key=data_cfg.get('context_key', 'context'), - answer_key=data_cfg.get('answer_key', 'answer'), - end_string=data_cfg.get('end_string', None), - sample_alpha=data_cfg.get('sample_alpha', None), + prompt_format=data_cfg.get("prompt_format", "plain"), + audio_locator=data_cfg.get("audio_locator"), + max_seq_length=data_cfg.get("max_seq_length", 8192), ) return LhotseAudioQuestionAnswerDataset( tp, @@ -122,11 +106,12 @@ def build_speechllm_dataloader(dataset, data_cfg, consumed_samples=0, is_predict global_rank=parallel_state.get_data_parallel_rank(), world_size=parallel_state.get_data_parallel_world_size(), dataset=dataset, + tokenizer=dataset.text_processor.tokenizer, ) # for eval, we need to create separate dataset so as to report splitted numbers else: dls = [] - if hasattr(data_cfg, 'manifest_filepath'): + if data_cfg.get('manifest_filepath') is not None: manifest_filepath = data_cfg.manifest_filepath for cur_manifest_filepath in manifest_filepath: conf = copy.deepcopy(data_cfg) @@ -137,6 +122,7 @@ def build_speechllm_dataloader(dataset, data_cfg, consumed_samples=0, is_predict global_rank=parallel_state.get_data_parallel_rank(), world_size=parallel_state.get_data_parallel_world_size(), dataset=dataset, + tokenizer=dataset.text_processor.tokenizer, ) ) else: @@ -147,16 +133,25 @@ def build_speechllm_dataloader(dataset, data_cfg, consumed_samples=0, is_predict assert len(input_cfg) == 1, "Only one dataset with multiple manifest paths is supported for eval" data_cfg.input_cfg = input_cfg # for getting names - manifest_filepath = [ic.manifest_filepath for ic in input_cfg[0].input_cfg] + manifest_filepath = [] + for ic in input_cfg[0].input_cfg: + if hasattr(ic, "manifest_filepath"): + manifest_filepath.append(ic.manifest_filepath) + else: + assert ic.type == "txt_pair" + manifest_filepath.append(ic.target_paths) for cur_input_cfg in input_cfg[0].input_cfg: conf = copy.deepcopy(data_cfg) conf.input_cfg[0].input_cfg = [cur_input_cfg] + OmegaConf.set_struct(conf, False) + conf.force_finite = True dls.append( get_lhotse_dataloader_from_config( conf, global_rank=parallel_state.get_data_parallel_rank(), world_size=parallel_state.get_data_parallel_world_size(), dataset=dataset, + tokenizer=dataset.text_processor.tokenizer, ) ) diff --git a/nemo/collections/multimodal/speech_llm/data/lhotse_dataset.py b/nemo/collections/multimodal/speech_llm/data/lhotse_dataset.py index 204a92e5b7ab..c9886fbd72f0 100644 --- a/nemo/collections/multimodal/speech_llm/data/lhotse_dataset.py +++ b/nemo/collections/multimodal/speech_llm/data/lhotse_dataset.py @@ -12,14 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Union +from typing import Optional, Union import torch.utils.data +from lhotse.cut import Cut, CutSet from lhotse.dataset import AudioSamples from lhotse.dataset.collation import collate_vectors as collate_vectors_lhotse +from nemo.collections.common.data.lhotse.text_adapters import NeMoSFTExample, SourceTargetTextExample from nemo.collections.multimodal.speech_llm.parts.utils.data_utils import ( - TextProcessing, + PromptFormatterTextProcessing, build_loss_mask, ceil_to_nearest, ) @@ -60,7 +62,7 @@ class LhotseAudioQuestionAnswerDataset(torch.utils.data.Dataset): def __init__( self, - text_processor: TextProcessing, + text_processor: PromptFormatterTextProcessing, default_context: str, tokens_to_generate: int, pad_to_max_length: bool, @@ -79,54 +81,70 @@ def __init__( self.context_key = context_key self.default_context_key = default_context_key - def __getitem__(self, cuts) -> dict[str, Union[torch.Tensor, list[str], dict]]: - cuts = cuts.sort_by_duration() - - audio, audio_lens, cuts = self.load_audio(cuts) - - return_batch = {} - audio_ratio = [] - for id, cut in enumerate(cuts): - audio_ratio.append(1.0) - - for _, cut in enumerate(cuts): - if hasattr(cut, self.context_key): - cut.context = getattr(cut, self.context_key) - elif hasattr(cut, self.default_context_key): - cut.context = getattr(cut, self.default_context_key) - else: - cut.context = self.default_context - - metadata = [] - for id, cut in enumerate(cuts): - metadata.append({'audio_filepath': cut.id + '.wav'}) - - collated_text_data = collate_text_data( - cuts=cuts, - default_context=self.default_context, - text_processor=self.text_processor, - tokens_to_generate=self.tokens_to_generate, - pad_to_max_length=self.pad_to_max_length, - max_seq_length=self.max_seq_length, - ) - return_batch.update( - { - "sample_ids": list(cuts.ids), - "audio_signal": audio, - "audio_signal_length": audio_lens, - "audio_ratio": torch.FloatTensor(audio_ratio), - "metadata": metadata, - **collated_text_data, - } - ) - - return return_batch + def __getitem__(self, all_cuts: CutSet) -> dict[str, torch.Tensor | list[str] | dict]: + ans = {} + + # convert audio cuts to mini-batch + cuts = all_cuts.filter(lambda c: isinstance(c, Cut)) + if cuts: + audio, audio_lens, cuts = self.load_audio(cuts) + + return_batch = {} + audio_ratio = [1.0] * len(cuts) + for _, cut in enumerate(cuts): + if hasattr(cut, self.context_key): + cut.context = getattr(cut, self.context_key) + elif hasattr(cut, self.default_context_key): + cut.context = getattr(cut, self.default_context_key) + else: + cut.context = self.default_context + + metadata = [] + for id, cut in enumerate(cuts): + metadata.append({'audio_filepath': cut.id + '.wav'}) + + collated_text_data = collate_text_data( + cuts=cuts, + default_context=self.default_context, + text_processor=self.text_processor, + tokens_to_generate=self.tokens_to_generate, + pad_to_max_length=self.pad_to_max_length, + max_seq_length=self.max_seq_length, + ) + return_batch.update( + { + "sample_ids": list(cuts.ids), + "audio_signal": audio, + "audio_signal_length": audio_lens, + "audio_ratio": torch.FloatTensor(audio_ratio), + "metadata": metadata, + **collated_text_data, + } + ) + ans.update(return_batch) + + # convert text examples to tensors + text_examples = all_cuts.filter(lambda c: isinstance(c, (SourceTargetTextExample, NeMoSFTExample))) + if text_examples: + pad_id = self.text_processor.pad_id + text_minibatch = dict( + text_input_ids=collate_vectors_lhotse([e.input_ids for e in text_examples], padding_value=pad_id), + text_input_lens=torch.tensor([len(e.input_ids) for e in text_examples], dtype=torch.int64), + text_answer_ids=collate_vectors_lhotse([e.answer_ids for e in text_examples], padding_value=pad_id), + text_answer_lens=torch.tensor([len(e.answer_ids) for e in text_examples], dtype=torch.int64), + text_context_ids=collate_vectors_lhotse([e.context_ids for e in text_examples], padding_value=pad_id), + text_context_lens=torch.tensor([len(e.context_ids) for e in text_examples], dtype=torch.int64), + text_masks=collate_vectors_lhotse([e.mask for e in text_examples], padding_value=0), + ) + ans.update(text_minibatch) + + return ans def collate_text_data( cuts, default_context: str, - text_processor: TextProcessing, + text_processor: PromptFormatterTextProcessing, tokens_to_generate: int, pad_to_max_length: bool, max_seq_length: int, @@ -134,47 +152,37 @@ def collate_text_data( """Perform text collation equivalent to nemo/collections/multimodal/data/audio_text_qa_dataset.py:121""" batch_size = len(cuts) pad_id = text_processor.pad_id - examples = [ - { - k: torch.as_tensor(v) - for k, v in text_processor._process_example( - context=cut.context, - output=cut.supervisions[0].text, - ).items() - } - for cut in cuts - ] + examples = [{k: torch.as_tensor(v) for k, v in text_processor._process_example(cut).items()} for cut in cuts] fields = as_dict(examples) def get_max_len(input_list): return max([len(x) for x in input_list]) - max_length = tokens_to_generate + max( - get_max_len(fields["input_ids"]), get_max_len(fields["context_ids"]), get_max_len(fields["answer_ids"]) - ) - # increase max length to nearest multiple of 4 or 8 + input_id_maxlen = get_max_len(fields["input_ids"]) + context_id_maxlen = tokens_to_generate + get_max_len(fields["context_ids"]) + answer_id_maxlen = get_max_len(fields["answer_ids"]) if pad_to_max_length: - max_length = max_seq_length - else: - max_length = min(max_seq_length, ceil_to_nearest(max_length, 8)) + input_id_maxlen = max_seq_length + context_id_maxlen = max_seq_length + answer_id_maxlen = max_seq_length - all_tokens = collate_vectors(fields["input_ids"], max_length=max_length, padding_value=pad_id) + all_tokens = collate_vectors(fields["input_ids"], max_length=input_id_maxlen, padding_value=pad_id) full_lengths = torch.LongTensor([len(item) for item in fields["input_ids"]]) - assert max_length <= max_seq_length, f"{max_length=} <= {max_seq_length=}" + assert input_id_maxlen <= max_seq_length, f"{input_id_maxlen=} <= {max_seq_length=}" return { "tokens": all_tokens[:, :-1], "tokens_length": full_lengths - 1, "labels": all_tokens[:, 1:], "loss_mask": collate_vectors( - [torch.as_tensor(build_loss_mask(item)) for item in examples], max_length=max_length, padding_value=0 + [torch.as_tensor(build_loss_mask(item)) for item in examples], max_length=input_id_maxlen, padding_value=0 )[:, 1:], - "position_ids": torch.arange(max_length, dtype=torch.long).repeat(batch_size, 1), - "contexts": collate_vectors(fields["context_ids"], max_length=max_length, padding_value=pad_id), + "position_ids": torch.arange(input_id_maxlen, dtype=torch.long).repeat(batch_size, 1), + "contexts": collate_vectors(fields["context_ids"], max_length=context_id_maxlen, padding_value=pad_id), "context_lengths": torch.LongTensor([len(seq) for seq in fields["context_ids"]]), - "answers": collate_vectors(fields["answer_ids"], max_length=max_length, padding_value=pad_id), - "max_length": torch.LongTensor([max_length] * batch_size), + "answers": collate_vectors(fields["answer_ids"], max_length=answer_id_maxlen, padding_value=pad_id), + "max_length": torch.LongTensor([input_id_maxlen] * batch_size), } diff --git a/nemo/collections/multimodal/speech_llm/models/modular_models.py b/nemo/collections/multimodal/speech_llm/models/modular_models.py index aab27cf2d908..a9ee87e9a9de 100644 --- a/nemo/collections/multimodal/speech_llm/models/modular_models.py +++ b/nemo/collections/multimodal/speech_llm/models/modular_models.py @@ -15,12 +15,14 @@ import itertools import json import os +from functools import partial from typing import List, Optional, Union import hydra import sacrebleu import torch from hydra.utils import get_class +from lightning.pytorch.loops.fetchers import _DataFetcherWrapper from lightning.pytorch.trainer.trainer import Trainer from lightning.pytorch.utilities import rank_zero_only from omegaconf import ListConfig @@ -47,6 +49,7 @@ from nemo.collections.nlp.modules.common.megatron.utils import ( average_losses_across_data_parallel_group, build_position_ids, + get_iterator_k_split, ) from nemo.collections.nlp.modules.common.text_generation_utils import get_computeprob_response from nemo.collections.nlp.parts.peft_config import PEFT_CONFIG_MAP @@ -54,28 +57,33 @@ from nemo.core.classes import ModelPT from nemo.core.classes.common import PretrainedModelInfo from nemo.core.classes.mixins import adapter_mixins +from nemo.core.neural_types import AudioSignal, LabelsType, LengthsType, MaskType, NeuralType from nemo.utils import AppState, logging, model_utils from nemo.utils.model_utils import inject_model_parallel_rank try: from megatron.core import InferenceParams, parallel_state, tensor_parallel from megatron.core.models.gpt import GPTModel as MCoreGPTModel + from megatron.core.pipeline_parallel.schedules import get_forward_backward_func HAVE_MEGATRON_CORE = True except (ImportError, ModuleNotFoundError): HAVE_MEGATRON_CORE = False - try: - from megatron.core.num_microbatches_calculator import get_num_microbatches, reconfigure_num_microbatches_calculator + from megatron.core.num_microbatches_calculator import ( + get_micro_batch_size, + get_num_microbatches, + reconfigure_num_microbatches_calculator, + ) except (ImportError, ModuleNotFoundError): logging.warning("Megatron num_microbatches_calculator not found, using Apex version.") from apex.transformer.pipeline_parallel.utils import ( _reconfigure_microbatch_calculator as reconfigure_num_microbatches_calculator, ) - from apex.transformer.pipeline_parallel.utils import get_num_microbatches + from apex.transformer.pipeline_parallel.utils import get_micro_batch_size, get_num_microbatches __all__ = ["ModularAudioGPTModel", "CrossAttendModularAudioGPTModel"] @@ -107,22 +115,32 @@ def __init__(self, cfg: DictConfig, trainer: Trainer): # print out params in more details self.summarize(max_depth=2) - def parameters(self): + def parameters(self, requires_grad_only=False): # override the same method in MegatronGPT model to include parameters ouside of LM all_names = [] all_params = [] for name, param in self.named_parameters(recurse=True): + if requires_grad_only: + if not param.requires_grad: + continue all_names.append(name) all_params.append(param) if isinstance(self.model, list): for module in self.model: for name, param in module.named_parameters(recurse=True): + if requires_grad_only: + if not param.requires_grad: + continue all_names.append(name) all_params.append(param) return itertools.chain(all_params) + def configure_optimizers(self): + self.setup_optimizer_param_groups() + return super().configure_optimizers() + def setup_optimizer_param_groups(self): """ Override parent method to setup optimizer groups for training/freezing different parts of the model. @@ -268,8 +286,12 @@ def inject_perception_input( ): """Inject audio features into the text input and return the final input embeddings to LLM.""" # [b, t, c] + if self.cfg.get('megatron_amp_O2', False): + base_module = self.model.module + else: + base_module = self.model lm_embedding = ( - self.model.language_model.embedding if hasattr(self.model, 'language_model') else self.model.embedding + base_module.language_model.embedding if hasattr(base_module, 'language_model') else base_module.embedding ) input_embeds = lm_embedding.word_embeddings(input_ids) if isinstance(encoded, torch.Tensor): @@ -311,8 +333,12 @@ def _shift_labels_by_emb_len(self, labels, label_lens, emb_lens, max_len, pad_to def _get_text_embeddings(self, text_tokens, position_ids): """Get text embeddings for the input text tokens.""" + if self.cfg.get('megatron_amp_O2', False): + base_module = self.model.module + else: + base_module = self.model lm_embedding = ( - self.model.language_model.embedding if hasattr(self.model, 'language_model') else self.model.embedding + base_module.language_model.embedding if hasattr(base_module, 'language_model') else base_module.embedding ) text_embeddings = lm_embedding.word_embeddings(text_tokens) # (batch_size, seq_len, hidden_size) if hasattr(lm_embedding, 'position_embeddings'): @@ -364,43 +390,176 @@ def prepare_llm_input(self, audio_batch): return encoder_input, attention_mask, labels, loss_mask, encoder_length - def forward( - self, - audio_batch, - checkpoint_activations_all_layers, + def _gpt_forward( + self, input_ids, position_ids, encoder_input, attention_mask, labels, checkpoint_activations_all_layers ): - """ - Forward pass of the model. We prepend audio embeddings to the instruction and label text tokens as the LLM input. - """ - if 'audio_ratio' in audio_batch: - self.log( - 'local_batch_size', - audio_batch['audio_ratio'].shape[0], - prog_bar=True, - batch_size=1, - rank_zero_only=False, - ) - - encoder_input, attention_mask, labels, loss_mask, _ = self.prepare_llm_input(audio_batch) + """Forward pass of the GPT model.""" + if self.megatron_amp_O2: + encoder_input = encoder_input.type(self.model.module.embedding.word_embeddings.weight.dtype) if self.mcore_gpt: output = self.model( - input_ids=None, - position_ids=None, + input_ids=input_ids, + position_ids=position_ids, decoder_input=encoder_input, attention_mask=attention_mask, labels=labels, ) else: output = self.model( - input_ids=None, - position_ids=None, + input_ids=input_ids, + position_ids=position_ids, encoder_input=encoder_input, attention_mask=attention_mask, labels=labels, checkpoint_activations_all_layers=checkpoint_activations_all_layers, ) + return output + + def forward( + self, + batch, + checkpoint_activations_all_layers, + ): + """ + Forward pass of the model. We prepend audio embeddings to the instruction and label text tokens as the LLM input. + """ + audio_batch = {k: v for k, v in batch.items() if not k.startswith("text_")} + text_batch = {k: v for k, v in batch.items() if k.startswith("text_")} + + output, loss_mask = None, None + + multimodal_output = {} + if 'audio_signal' in audio_batch: + encoder_input, attention_mask, labels, loss_mask, _ = self.prepare_llm_input(audio_batch) + output = self._gpt_forward( + None, None, encoder_input, attention_mask, labels, checkpoint_activations_all_layers + ) + multimodal_output['audio_text'] = (output, loss_mask) + if text_batch: + input_ids = text_batch["text_input_ids"][:, :-1] + labels = text_batch["text_input_ids"][:, 1:] + attention_mask = self._create_attention_mask(input_ids) + loss_mask = text_batch["text_masks"][:, 1:] + output = self._gpt_forward( + input_ids, None, None, attention_mask, labels, checkpoint_activations_all_layers + ) + multimodal_output['text'] = (output, loss_mask) + if not audio_batch and not text_batch: + raise ValueError("No input data found for the model.") + + return multimodal_output + + def fwd_bwd_step(self, dataloader_iter, forward_only, first_val_step=None): + """ + Copy of megatron_gpt_sft_model.py function with the same name. + Modified not to assume certain fields like 'tokens' are always available in the mini-batch, + since we have mixed text/audio dataloading and sometimes one of the modalities might be missing. + """ + # Return only batch if batch, batch_idx, dataloder_idx are extracted as a tuple in the previous func + # call like validation_step otherwise return tuple (in which case dataloader_iter is still a PTL _DataFetcherWrapper object) + if isinstance(dataloader_iter, _DataFetcherWrapper): + batch, _, _ = next(dataloader_iter) + else: + batch = next(dataloader_iter) + + audio_batch = {k: v for k, v in batch.items() if not k.startswith("text_")} + text_batch = {k: v for k, v in batch.items() if k.startswith("text_")} + + # Note: We want to perform full fwd+bwd separately for each modality, + # as it allows us to save GPU memory. Otherwise, we'd have to + # hold the activations from one modality in memory while running + # forward for the other. + batch_losses = [] + for batch in (audio_batch, text_batch): + if not batch: + continue + + # Pass only torch.Tensor to prevent errors when process get_iterator_k_split() + batch = {k: v for k, v in batch.items() if isinstance(v, torch.Tensor)} + + if 'tokens' in batch and 'text_input_ids' in batch: + seq_length = max(batch['tokens'].shape[1], batch['text_input_ids'].shape[1]) + elif 'tokens' in batch: + seq_length = batch['tokens'].shape[1] + elif 'text_input_ids' in batch: + seq_length = batch['text_input_ids'].shape[1] + else: + seq_length = None + + data_iter = get_iterator_k_split(batch, get_num_microbatches()) + + # handle asynchronous grad reduction + no_sync_func = None + grad_sync_func = None + param_sync_func = None + if not forward_only and self.with_distributed_adam: + no_sync_func = partial( + self._optimizer.no_sync, + greedy_grad_copy=self.megatron_amp_O2, + ) + grad_sync_func = self.reduce_overlap_gradients + param_sync_func = self.sync_overlap_parameters + + for module in self.get_model_module_list(): + module.config.no_sync_func = no_sync_func + module.config.grad_sync_func = grad_sync_func + module.config.param_sync_func = param_sync_func + + fwd_bwd_function = get_forward_backward_func() + + losses_reduced_per_micro_batch = fwd_bwd_function( + forward_step_func=self.get_forward_output_and_loss_func(tuning=True, validation_step=forward_only), + data_iterator=self._make_data_iterator_list(data_iter), + model=self.model, + num_microbatches=get_num_microbatches(), + forward_only=forward_only, + seq_length=seq_length, + micro_batch_size=get_micro_batch_size(), + first_val_step=first_val_step, + ) + + non_loss_tensors = {} + # only the last stages of the pipeline return losses + if losses_reduced_per_micro_batch: + for item in losses_reduced_per_micro_batch: + for k, v in item.items(): + if k != 'avg': + av = non_loss_tensors.get(k, []) + av.append(v) + non_loss_tensors[k] = av + if (not forward_only) or self.cfg.data.get('validation_drop_last', True): + # average loss across micro batches + loss_tensors_list = [loss_reduced['avg'] for loss_reduced in losses_reduced_per_micro_batch] + loss_tensor = torch.concat(loss_tensors_list) + loss_mean = loss_tensor.mean() + else: + # Get the total loss since micro batches sizes are not uniform + loss_sum_tensors_list = [ + loss_sum['loss_sum_and_ub_size'] + for loss_sum in losses_reduced_per_micro_batch + if loss_sum['loss_sum_and_ub_size'][1] > 0 + ] + loss_mean = ( + torch.vstack(loss_sum_tensors_list).sum(axis=0) + if len(loss_sum_tensors_list) > 0 + else torch.tensor([0.0, 0.0]).cuda() + ) + else: + # we're not on the last pipeline stage so no losses + if forward_only: + loss_mean = [] + else: + loss_mean = torch.tensor(0.0).cuda() + batch_losses.append(loss_mean.unsqueeze(0)) + + loss_mean = torch.cat(batch_losses).mean() - return output, loss_mask + # if forward_only: + # return loss_mean + if non_loss_tensors: # TODO: need a nicer way to do this via inheritance (@adithyare) + return loss_mean, non_loss_tensors + else: + return loss_mean def get_forward_output_only_func(self): def fwd_output_only_func(dataloader_iter, model): @@ -440,6 +599,8 @@ def fwd_output_only_func(dataloader_iter, model): ): attention_mask = None + if self.megatron_amp_O2: + input_embeddings = input_embeddings.type(self.model.module.embedding.word_embeddings.weight.dtype) output_tensor = model( input_ids=None, position_ids=None, @@ -486,18 +647,41 @@ def fwd_output_and_loss_func(dataloader_iter, model, checkpoint_activations_all_ if not self.mcore_gpt: batch['checkpoint_activations_all_layers'] = checkpoint_activations_all_layers - output_tensor, loss_mask = self.forward( + multimodal_output = self.forward( batch, checkpoint_activations_all_layers=checkpoint_activations_all_layers ) - batch['loss_mask'] = loss_mask - def loss_func(output_tensor): + def loss_func(multimodal_output): # Loss for a micro-batch (ub) - loss_for_ub = self.loss_func(batch['loss_mask'], batch['num_valid_tokens_in_ub'], output_tensor) + loss_for_ub = 0 + + modality_weights = self.cfg.get("modality_loss_weights") + + for key, (output, loss_mask) in multimodal_output.items(): + cur_loss = self.loss_func(loss_mask.contiguous(), loss_mask.sum(), output.contiguous()) + if modality_weights is not None: + assert ( + key in modality_weights + ), f"Expected cfg.modality_loss_weights={modality_weights} to contain key {key}" + cur_loss = cur_loss * modality_weights[key] + loss_for_ub += cur_loss + self.log( + f'{key}_loss', + cur_loss.mean(), + prog_bar=True, + batch_size=1, + rank_zero_only=False, + ) + self.log( + f'{key}_batch_size', + loss_mask.shape[0], + prog_bar=True, + batch_size=1, + rank_zero_only=False, + ) + cp_size = self.cfg.get('context_parallel_size', 1) - if self.cfg.data.get( - "return_output_tensors", False - ): # TODO: need a better way to check if loss_func is returning more stuff than just loss... (@adithyare) + if self.cfg.data.get("return_output_tensors", False): loss_for_ub, q_hs, d_hs, pos_cs, neg_cs, diff_cs = loss_for_ub reduced_loss = average_losses_across_data_parallel_group([loss_for_ub]) pos_cs = average_losses_across_data_parallel_group([pos_cs]) @@ -537,7 +721,7 @@ def loss_func(output_tensor): reduced_loss = average_losses_across_data_parallel_group([loss_for_ub]) return loss_for_ub * cp_size, {'avg': reduced_loss} - return output_tensor, loss_func + return multimodal_output, loss_func return fwd_output_and_loss_func @@ -831,8 +1015,8 @@ def merge_inference_cfg( """ if pretrained_model_cfg: model_cfg = pretrained_model_cfg - elif cfg.model.peft.restore_from_path: - if cfg.model.peft.restore_from_path.endswith(".nemo"): + elif cfg.model.peft.restore_from_path or cfg.model.peft.restore_from_ckpt.checkpoint_dir: + if cfg.model.peft.restore_from_path and cfg.model.peft.restore_from_path.endswith(".nemo"): model_cfg = ModularAudioGPTModel.restore_from( restore_path=cfg.model.peft.restore_from_path, trainer=trainer, @@ -1005,6 +1189,16 @@ def load_state_dict(self, state_dict, strict: bool = True): else: super(MegatronGPTModel, self).load_state_dict(state_dict, strict=strict) + def on_train_epoch_start(self) -> None: + app_state = AppState() + reconfigure_num_microbatches_calculator( + rank=app_state.global_rank, + rampup_batch_size=None, + global_batch_size=self.cfg.data.train_ds.global_batch_size, + micro_batch_size=self.cfg.data.train_ds.micro_batch_size, + data_parallel_size=parallel_state.get_data_parallel_world_size(), + ) + def on_load_checkpoint(self, checkpoint) -> None: """LightningModule hook: https://pytorch-lightning.readthedocs.io/en/stable/common/lightning_module.html#on-load-checkpoint @@ -1063,11 +1257,17 @@ def inference_step(self, dataloader_iter, mode): """ Used for validation and test steps, added postprocessing after calling self.predict_step(). """ + # Evaluation of multimodal data follows the same pattern as training except predict_step batch, batch_idx, dataloader_idx = next(dataloader_iter) data_cfg = self.cfg.data.validation_ds if mode == 'validation' else self.cfg.data.test_ds - self._reconfigure_and_process_inference_batch(batch, data_cfg) - # Meta data from dataset - metadata = batch.get('metadata', [{}] * len(batch['tokens'])) + if "tokens" in batch: + self._reconfigure_and_process_inference_batch(batch, data_cfg) + metadata = batch.get('metadata', [{}] * len(batch['tokens'])) + else: + batch["tokens"] = batch["text_input_ids"] + self._reconfigure_and_process_inference_batch(batch, data_cfg) + metadata = batch.get('metadata', [{}] * len(batch['tokens'])) + batch.pop("tokens") loss = super(MegatronGPTSFTModel, self).validation_step(itertools.chain([batch]), dataloader_idx) # We need _inference_config to get generation params @@ -1080,12 +1280,22 @@ def inference_step(self, dataloader_iter, mode): output = self.predict_step(batch, batch_idx, dataloader_idx) - inputs_text = [self.tokenizer.ids_to_text(c.tolist()) for c in batch['contexts']] - labels_text = [self.tokenizer.ids_to_text(a.tolist()) for a in batch['answers']] - preds_text = [ - self.tokenizer.ids_to_text(t[l.item() :][: data_cfg.get('tokens_to_generate')]) - for t, l in zip(output['token_ids'], batch['context_lengths']) - ] + audio_batch = {k: v for k, v in batch.items() if not k.startswith("text_")} + text_batch = {k: v for k, v in batch.items() if k.startswith("text_")} + if audio_batch: + inputs_text = [self.tokenizer.ids_to_text(c.tolist()) for c in audio_batch['contexts']] + labels_text = [self.tokenizer.ids_to_text(a.tolist()) for a in audio_batch['answers']] + preds_text = [ + self.tokenizer.ids_to_text(t[l.item() :][: data_cfg.get('tokens_to_generate')]) + for t, l in zip(output['token_ids'], audio_batch['context_lengths']) + ] + else: + inputs_text = [self.tokenizer.ids_to_text(c.tolist()) for c in text_batch['text_context_ids']] + labels_text = [self.tokenizer.ids_to_text(a.tolist()) for a in text_batch['text_answer_ids']] + preds_text = [ + self.tokenizer.ids_to_text(t[l.item() :][: data_cfg.get('tokens_to_generate')]) + for t, l in zip(output['token_ids'], text_batch['text_context_lens']) + ] if data_cfg.get("end_string", None): # sometimes data_cfg.end_string != self.tokenizer.ids_to_text(self.tokenizer.text_to_ids(data_cfg.end_string)) @@ -1178,6 +1388,12 @@ def predict_step(self, batch: dict, batch_idx: int, dataloader_idx: Optional[int # for megatron_gpt_eval.py if isinstance(batch, list): inference_config['inputs'] = batch + elif "text_context_ids" in batch: + # Text mini-batch + inference_config['inputs'] = ( + batch['text_context_ids'].cuda(), + batch['text_context_lens'].cuda(), + ) elif 'num_audios' in batch: # peft_eval.py inference_config['inputs'] = ( @@ -1208,7 +1424,8 @@ def predict_step(self, batch: dict, batch_idx: int, dataloader_idx: Optional[int ) # add audio offsets to context lengths for properly decoding only the response - batch['context_lengths'] = batch['context_lengths'].cuda() + response['audio_feat_lens'] + if 'context_lengths' in batch: + batch['context_lengths'] = batch['context_lengths'].cuda() + response['audio_feat_lens'] return response @@ -1481,6 +1698,110 @@ def list_available_models(cls) -> Optional[PretrainedModelInfo]: results.append(model) return results + def configure_sharded_model(self): + """Modified version from MegatronBaseModel. + + 1. exclude self.model.embedding + 2. include speech encoder and modality adapter. + """ + + def find_frozen_submodules(model): + frozen_submodules = [] + frozen_submodule_names = [] + for name, module in model.named_modules(): + if ( + isinstance(module, torch.nn.Module) + and list(module.parameters()) + and all(not param.requires_grad for param in module.parameters()) + ): + frozen_submodule_names.append(name) + frozen_submodules.append(module) + return frozen_submodule_names, frozen_submodules + + if self.use_fsdp: + """Top-evel FSDP model sharding""" + # Shard the top-level model hierarchically. We shard the strategy-unwrapped model not + # to lose the structure of non-FSDP wrapped parameters (e.g, embedding) + # TODO: Currently the main parameter data type is kept in fp32 (when O2=False). This needs to be + # extended to support lower precision main parameters. + frozen_submodule_names, frozen_submodules = find_frozen_submodules(self.model) + self.trainer.strategy.kwargs['ignored_states'] = frozen_submodules + # Exclude embedding layer to avoid errors in inject_perception_input + self.trainer.strategy.kwargs['ignored_states'].append(self.model.embedding) + # FSDP requires uniform status of require_grads + # Diffusion models like SD has frozen parts and needs to be added to 'ignored_states' from sharding for FSDP to work + self.model = self.trainer.strategy._setup_model(self.model) + # Move the CPU-initialized model (with `use_cpu_initialization=True`) to GPU, which is to avoid + # out-of-memory carash before sharding. In case of GPU-initialized model, this is no-op. + self.model = self.model.cuda(torch.cuda.current_device()) + + # Shard perception module + frozen_submodule_names, frozen_submodules = find_frozen_submodules(self.perception) + self.trainer.strategy.kwargs['ignored_states'].extend(frozen_submodules) + self.perception = self.trainer.strategy._setup_model(self.perception) + self.perception = self.perception.cuda(torch.cuda.current_device()) + + def oomptimizer_schema(self, schema: str = "audio") -> dict: + """ + Return a typing schema for optimal batch size calibration for various + sequence lengths using OOMptimizer. + """ + + if schema == "audio": + return { + "cls": dict, + "inputs": [ + {"name": "audio_signal", "type": NeuralType(("B", "T"), AudioSignal()), "seq_length": "input"}, + {"name": "audio_signal_length", "type": NeuralType(("B",), LengthsType()), "seq_length": "input"}, + { + "name": "tokens", + "type": NeuralType(("B", "T"), LabelsType()), + "seq_length": "output", + "vocab_size": self.tokenizer.vocab_size, + }, + { + "name": "tokens_length", + "type": NeuralType(("B",), LengthsType()), + "seq_length": "output", + }, + { + "name": "labels", + "type": NeuralType(("B", "T"), LabelsType()), + "seq_length": "output", + "vocab_size": self.tokenizer.vocab_size, + }, + { + "name": "loss_mask", + "type": NeuralType(("B", "T"), MaskType()), + "seq_length": "output", + }, + { + "name": "context_start_idx", + "type": "constant", + "value": 0, + }, + ], + } + elif schema == "text": + return { + "cls": dict, + "inputs": [ + { + "name": "text_input_ids", + "type": NeuralType(("B", "T"), LabelsType()), + "seq_length": "input", + "vocab_size": self.tokenizer.vocab_size, + }, + { + "name": "text_masks", + "type": NeuralType(("B", "T"), MaskType()), + "seq_length": "input", + }, + ], + } + else: + raise RuntimeError(f"Unknown schema type for oomptimizer of class {type(self)}: '{schema}'") + class CrossAttendModularAudioGPTModel(ModularAudioGPTModel): """Modularized speech GPT model.""" @@ -1519,7 +1840,6 @@ def prepare_llm_input(self, audio_batch): encoder_input, extra_outputs = self.perception_cross_attn( encoded, encoded_len, input_embeds, input_lengths=input_length, return_mems=True ) - # TODO: need separate speech and text methods for inference if 'audio_ratio' in audio_batch: audio_ratio = audio_batch['audio_ratio'][..., None, None] encoder_input = encoder_input * audio_ratio + input_embeds * (1 - audio_ratio) @@ -1553,3 +1873,15 @@ def state_dict(self, destination=None, prefix=None, keep_vars=False): return return_state_dict else: return super().state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars) + + def configure_sharded_model(self): + """Modified version from MegatronBaseModel. + + 1. exclude self.model.embedding + 2. include speech encoder and modality adapter. + """ + super().configure_sharded_model() + + if self.use_fsdp: + self.perception_cross_attn = self.trainer.strategy._setup_model(self.perception_cross_attn) + self.perception_cross_attn = self.perception_cross_attn.cuda(torch.cuda.current_device()) diff --git a/nemo/collections/multimodal/speech_llm/models/modular_t5_models.py b/nemo/collections/multimodal/speech_llm/models/modular_t5_models.py index a99f5c346831..c16950fb0800 100644 --- a/nemo/collections/multimodal/speech_llm/models/modular_t5_models.py +++ b/nemo/collections/multimodal/speech_llm/models/modular_t5_models.py @@ -49,6 +49,7 @@ from nemo.collections.nlp.parts.nlp_overrides import NLPSaveRestoreConnector from nemo.collections.nlp.parts.utils_funcs import get_last_rank from nemo.core.classes.mixins import adapter_mixins +from nemo.core.neural_types import AudioSignal, LabelsType, LengthsType, MaskType, NeuralType from nemo.utils import AppState, logging, model_utils try: @@ -153,7 +154,10 @@ def init_model(self, cfg: DictConfig, trainer: Trainer): self.hidden_size = self.frozen_model.cfg.hidden_size # Handle this when moving GPT prompt learning to the base class. - self.word_embeddings = self.frozen_model.enc_dec_model.encoder_embedding.word_embeddings + if self.megatron_amp_O2: + self.word_embeddings = self.frozen_model.enc_dec_model.module.encoder_embedding + else: + self.word_embeddings = self.frozen_model.enc_dec_model.encoder_embedding self._reduced_loss_buffer = [] self._inference_config = None @@ -287,7 +291,7 @@ def _concat_embs(embs1, emb1_lens, embs2, emb2_lens): return concat_emb, concat_len # [b, t, c] - lm_embedding = self.frozen_model.enc_dec_model.encoder_embedding + lm_embedding = self.word_embeddings input_embeds = lm_embedding.word_embeddings(input_ids) if self.cfg.audio_prompt_first: encoder_input, encoder_length = _concat_embs(encoded, encoded_len, input_embeds, input_length) @@ -340,13 +344,8 @@ def prepare_llm_input(self, audio_batch): input_signal = audio_batch['audio_signal'] input_signal_length = audio_batch['audio_signal_length'] - - input_ids, input_length, labels, loss_mask = ( - audio_batch['contexts'], - audio_batch['context_lengths'], - audio_batch['labels'], - audio_batch['loss_mask'], - ) + input_ids = audio_batch['contexts'] + input_length = audio_batch['context_lengths'] # [b, t, c] encoded, encoded_len = self.perception( @@ -364,7 +363,7 @@ def prepare_llm_input(self, audio_batch): def forward( self, - audio_batch, + batch, checkpoint_activations_all_layers, ): """Forward pass of the model. @@ -372,39 +371,60 @@ def forward( We prepend audio embeddings to the instruction and label text tokens as the LLM input. """ - if 'audio_ratio' in audio_batch: - self.log( - 'audio_ratio', audio_batch['audio_ratio'].mean(), prog_bar=True, batch_size=1, rank_zero_only=False + + audio_batch = {k: v for k, v in batch.items() if not k.startswith("text_")} + text_batch = {k: v for k, v in batch.items() if k.startswith("text_")} + + multimodal_output = {} + + if 'audio_signal' in audio_batch: + encoder_input, attention_mask, enc_mask = self.prepare_llm_input(audio_batch) + # enc_input = speech and text prompt + # dec_input and label = text output label + b = audio_batch['answers'].shape[0] + labels = audio_batch['answers'] + dec_input = torch.cat([torch.full([b, 1], self.bos_id, device=labels.device), labels[:, :-1]], dim=-1) + dec_mask = (dec_input != self.tokenizer.pad_id).long().contiguous() + output = self.frozen_model.enc_dec_model( + enc_input_ids=None, + enc_attn_mask=enc_mask, + dec_input_ids=dec_input, + dec_attn_mask=dec_mask, + token_type_ids=None, + labels=labels, + output_enc_hidden_only=False, + enc_input=encoder_input, ) - self.log( - 'local_batch_size', - audio_batch['audio_ratio'].shape[0], - prog_bar=True, - batch_size=1, - rank_zero_only=False, + loss_mask = dec_mask + multimodal_output['audio_text'] = (output, loss_mask) + + if text_batch: + b = text_batch['text_answer_ids'].shape[0] + encoder_input_ids = text_batch["text_context_ids"] + enc_mask = (encoder_input_ids != self.tokenizer.pad_id).long().contiguous() + decoder_input_ids = torch.cat( + [ + torch.full([b, 1], self.bos_id, device=encoder_input_ids.device), + text_batch["text_answer_ids"][:, :-1], + ], + dim=-1, ) + labels = text_batch["text_answer_ids"] + dec_mask = (decoder_input_ids != self.tokenizer.pad_id).long().contiguous() + loss_mask = dec_mask + output = self.frozen_model.enc_dec_model( + enc_input_ids=encoder_input_ids, + enc_attn_mask=enc_mask, + dec_input_ids=decoder_input_ids, + dec_attn_mask=dec_mask, + token_type_ids=None, + labels=labels, + output_enc_hidden_only=False, + enc_input=None, + ) + multimodal_output['text'] = (output, loss_mask) - encoder_input, attention_mask, enc_mask = self.prepare_llm_input(audio_batch) - # enc_input = speech and text prompt - # dec_input and label = text output label - b = audio_batch['answers'].shape[0] - device = audio_batch['answers'].device - dec_input = audio_batch['masked_answer_ids'] if 'masked_answer_ids' in audio_batch else audio_batch['answers'] - dec_input = torch.cat([torch.full([b, 1], self.bos_id, device=device), dec_input[:, :-1]], dim=-1) - labels = audio_batch['answers'] - dec_mask = (dec_input != self.tokenizer.pad_id).long().contiguous() - output = self.frozen_model.enc_dec_model( - enc_input_ids=None, - enc_attn_mask=enc_mask, - dec_input_ids=dec_input, - dec_attn_mask=dec_mask, - token_type_ids=None, - labels=labels, - output_enc_hidden_only=False, - enc_input=encoder_input, - ) - loss_mask = dec_mask - return output, loss_mask + return multimodal_output def get_forward_output_only_func(self): def fwd_output_only_func(dataloader_iter, model): @@ -446,21 +466,42 @@ def get_forward_output_and_loss_func(self, validation_step=False): def fwd_output_and_loss_func(dataloader_iter, model, checkpoint_activations_all_layers=None): batch = next(dataloader_iter) batch = {key: val.cuda(non_blocking=True) for key, val in batch.items()} - output_tensor, loss_mask = self.forward( + multimodal_output = self.forward( batch, checkpoint_activations_all_layers=checkpoint_activations_all_layers ) - def loss_func(output_tensor): + def loss_func(multimodal_output): # Loss for a micro-batch (ub) - if 'audio_ratio' in batch: - text_loss_weight = self.cfg.get('text_loss_weight', 1.0) - audio_ratio = batch['audio_ratio'] - scaled_loss_mask = loss_mask * torch.unsqueeze( - (1 * audio_ratio + text_loss_weight * (1 - audio_ratio)), 1 + loss_for_ub = None + + modality_weights = self.cfg.get("modality_loss_weights") + + for key, (output, loss_mask) in multimodal_output.items(): + cur_loss = self.loss_func(loss_mask.contiguous(), output.contiguous()) + if modality_weights is not None: + assert ( + key in modality_weights + ), f"Expected cfg.modality_loss_weights={modality_weights} to contain key {key}" + cur_loss = cur_loss * modality_weights[key] + if loss_for_ub is None: + loss_for_ub = cur_loss + else: + loss_for_ub += cur_loss + self.log( + f'{key}_loss', + cur_loss.mean(), + prog_bar=True, + batch_size=1, + rank_zero_only=False, ) - loss_for_ub = self.loss_func(scaled_loss_mask, output_tensor) - else: - loss_for_ub = self.loss_func(loss_mask, output_tensor) + self.log( + f'{key}_batch_size', + loss_mask.shape[0], + prog_bar=True, + batch_size=1, + rank_zero_only=False, + ) + if validation_step and not self.cfg.data.get('validation_drop_last', True): num_valid_tokens_in_ub = batch['loss_mask'].sum() if loss_for_ub.isnan(): @@ -484,10 +525,20 @@ def loss_func(output_tensor): reduced_loss = average_losses_across_data_parallel_group([loss_for_ub]) return loss_for_ub, {'avg': reduced_loss} - return output_tensor, loss_func + return multimodal_output, loss_func return fwd_output_and_loss_func + def on_train_epoch_start(self) -> None: + app_state = AppState() + reconfigure_num_microbatches_calculator( + rank=app_state.global_rank, + rampup_batch_size=None, + global_batch_size=self.cfg.data.train_ds.global_batch_size, + micro_batch_size=self.cfg.data.train_ds.micro_batch_size, + data_parallel_size=parallel_state.get_data_parallel_world_size(), + ) + def _build_dataset(self, data_cfg, is_train=True): return build_speechllm_dataset(self, data_cfg, is_train) @@ -873,9 +924,14 @@ def _validation_step_internal( def inference_step(self, dataloader_iter, mode, dataloader_idx=0): batch, batch_idx, dataloader_idx = next(dataloader_iter) data_cfg = self.cfg.data.validation_ds if mode == 'validation' else self.cfg.data.test_ds - self._reconfigure_and_process_inference_batch(batch, data_cfg) - # Meta data from dataset - metadata = batch.get('metadata', [{}] * len(batch['tokens'])) + if "tokens" in batch: + self._reconfigure_and_process_inference_batch(batch, data_cfg) + metadata = batch.get('metadata', [{}] * len(batch['tokens'])) + else: + batch["tokens"] = batch["text_context_ids"] + self._reconfigure_and_process_inference_batch(batch, data_cfg) + metadata = batch.get('metadata', [{}] * len(batch['tokens'])) + batch.pop("tokens") loss = self._validation_step_internal(itertools.chain([batch]), batch_idx, dataloader_idx, result_mode=mode) # We need _inference_config to get generation params @@ -888,8 +944,8 @@ def inference_step(self, dataloader_iter, mode, dataloader_idx=0): output = self.predict_step(batch, batch_idx, dataloader_idx) - inputs_text = [self.tokenizer.ids_to_text(c.tolist()) for c in batch['contexts']] - labels_text = [self.tokenizer.ids_to_text(a.tolist()) for a in batch['answers']] + inputs_text = output["input_text"] + labels_text = output["labels_text"] preds_text = output['preds_text'] if data_cfg.get("log_every_n_steps", None) is not None: if batch_idx % data_cfg.log_every_n_steps == 0: @@ -920,25 +976,42 @@ def inference_step(self, dataloader_iter, mode, dataloader_idx=0): return outputs def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> Any: + # the following supports STT (audio-text) inference batch = move_data_to_device(batch, device=self.device) - encoder_input, attention_mask, enc_mask = self.prepare_llm_input(batch) - # enc_input = speech and text prompt - # dec_input and label = text output label - predicted_token_ids, log_probs = self.frozen_model.decode( - tokens_enc=None, - enc_mask=enc_mask, - num_tokens_to_generate=self._inference_config['tokens_to_generate'], - encoder_input=encoder_input, - tokenizer=self.tokenizer, - bos_id=self.bos_id, - ) + audio_batch = {k: v for k, v in batch.items() if not k.startswith("text_")} + text_batch = {k: v for k, v in batch.items() if k.startswith("text_")} + assert ( + audio_batch or text_batch and not (audio_batch and text_batch) + ), f"Expecting only text or audio batch, got {len(text_batch)=} and {len(audio_batch)=}" + + if audio_batch: + input_text = audio_batch['contexts'] + labels = audio_batch['answers'] + encoder_input, attention_mask, enc_mask = self.prepare_llm_input(audio_batch) + predicted_token_ids, log_probs = self.frozen_model.decode( + tokens_enc=None, + enc_mask=enc_mask, + num_tokens_to_generate=self._inference_config['tokens_to_generate'], + encoder_input=encoder_input, + tokenizer=self.tokenizer, + bos_id=self.bos_id, + ) + if text_batch: + input_text = text_batch['text_context_ids'] + labels = text_batch["text_answer_ids"] + enc_mask = (input_text != self.tokenizer.pad_id).long().contiguous() + predicted_token_ids, log_probs = self.frozen_model.decode( + tokens_enc=input_text, + enc_mask=enc_mask, + num_tokens_to_generate=self._inference_config['tokens_to_generate'], + tokenizer=self.tokenizer, + bos_id=self.bos_id, + ) # Special ids to text function to handle stripping and special tokens with sentencepiece tokenizers. - input_text = batch['contexts'] preds_text = MegatronT5SFTModel.ids_to_text(predicted_token_ids, self.tokenizer) input_text = MegatronT5SFTModel.ids_to_text(input_text, self.tokenizer) - labels = batch['answers'] if labels is not None: labels_text = MegatronT5SFTModel.ids_to_text(labels, self.tokenizer) @@ -1172,68 +1245,97 @@ def fwd_bwd_step(self, dataloader_iter, batch_idx, forward_only): batch = next(dataloader_iter) # Pass only torch.Tensor to prevent errors when process get_iterator_k_split() batch = {k: v for k, v in batch.items() if isinstance(v, torch.Tensor)} - _, seq_length = batch['tokens'].shape - # handle the case where the batch size from dynamic bucketting is not divisible in lhotse - data_iter = get_iterator_k_split(batch, get_num_microbatches(), enforce_divisible_batch=False) - - # handle asynchronous grad reduction - no_sync_func = None - grad_sync_func = None - param_sync_func = None - if not forward_only and self.with_distributed_adam: - no_sync_func = partial( - self._optimizer.no_sync, - greedy_grad_copy=self.megatron_amp_O2, - ) - grad_sync_func = self.reduce_overlap_gradients - param_sync_func = self.sync_overlap_parameters - - self.model.config.no_sync_func = no_sync_func - self.model.config.grad_sync_func = grad_sync_func - self.model.config.param_sync_func = param_sync_func - - fwd_bwd_function = get_forward_backward_func() - - dec_seq_length = batch['answers'].shape[1] - - losses_reduced_per_micro_batch = fwd_bwd_function( - forward_step_func=self.get_forward_output_and_loss_func(), - data_iterator=data_iter, - model=[self.model], - num_microbatches=get_num_microbatches(), - forward_only=forward_only, - seq_length=seq_length, - micro_batch_size=get_micro_batch_size(), - decoder_seq_length=dec_seq_length, - ) - # only the last stages of the pipeline return losses - if losses_reduced_per_micro_batch: - if (not forward_only) or self.cfg.data.get('validation_drop_last', True): - # average loss across micro batches - loss_tensors_list = [loss_reduced['avg'] for loss_reduced in losses_reduced_per_micro_batch] - loss_tensor = torch.concat(loss_tensors_list) - loss_mean = loss_tensor.mean() + audio_batch = {k: v for k, v in batch.items() if not k.startswith("text_")} + text_batch = {k: v for k, v in batch.items() if k.startswith("text_")} + + # Note: We want to perform full fwd+bwd separately for each modality, + # as it allows us to save GPU memory. Otherwise, we'd have to + # hold the activations from one modality in memory while running + # forward for the other. + batch_losses = [] + for batch in (audio_batch, text_batch): + if not batch: + continue + + # Pass only torch.Tensor to prevent errors when process get_iterator_k_split() + batch = {k: v for k, v in batch.items() if isinstance(v, torch.Tensor)} + + if 'tokens' in batch and 'text_input_ids' in batch: + seq_length = max(batch['tokens'].shape[1], batch['text_input_ids'].shape[1]) + dec_seq_length = max(batch['answers'].shape[1], batch['text_answer_ids'].shape[1]) + elif 'tokens' in batch: + seq_length = batch['tokens'].shape[1] + dec_seq_length = batch['answers'].shape[1] + elif 'text_input_ids' in batch: + seq_length = batch['text_input_ids'].shape[1] + dec_seq_length = batch['text_answer_ids'].shape[1] else: - # Get the total loss since micro batches sizes are not uniform - loss_sum_tensors_list = [ - loss_sum['loss_sum_and_ub_size'] - for loss_sum in losses_reduced_per_micro_batch - if loss_sum['loss_sum_and_ub_size'][1] > 0 - ] - loss_sum = ( - torch.vstack(loss_sum_tensors_list).sum(axis=0) - if len(loss_sum_tensors_list) > 0 - else torch.tensor([0.0, 0.0]).cuda() + seq_length = None + dec_seq_length = None + + # handle the case where the batch size from dynamic bucketting is not divisible in lhotse + data_iter = get_iterator_k_split(batch, get_num_microbatches(), enforce_divisible_batch=False) + + # handle asynchronous grad reduction + no_sync_func = None + grad_sync_func = None + param_sync_func = None + if not forward_only and self.with_distributed_adam: + no_sync_func = partial( + self._optimizer.no_sync, + greedy_grad_copy=self.megatron_amp_O2, ) - return loss_sum - else: - # we're not on the last pipeline stage so no losses - if forward_only: - loss_mean = [] + grad_sync_func = self.reduce_overlap_gradients + param_sync_func = self.sync_overlap_parameters + + self.model.config.no_sync_func = no_sync_func + self.model.config.grad_sync_func = grad_sync_func + self.model.config.param_sync_func = param_sync_func + + fwd_bwd_function = get_forward_backward_func() + + losses_reduced_per_micro_batch = fwd_bwd_function( + forward_step_func=self.get_forward_output_and_loss_func(validation_step=forward_only), + data_iterator=data_iter, + model=[self.model], + num_microbatches=get_num_microbatches(), + forward_only=forward_only, + seq_length=seq_length, + micro_batch_size=get_micro_batch_size(), + decoder_seq_length=dec_seq_length, + ) + + # only the last stages of the pipeline return losses + if losses_reduced_per_micro_batch: + if (not forward_only) or self.cfg.data.get('validation_drop_last', True): + # average loss across micro batches + loss_tensors_list = [loss_reduced['avg'] for loss_reduced in losses_reduced_per_micro_batch] + loss_tensor = torch.concat(loss_tensors_list) + loss_mean = loss_tensor.mean() + else: + # Get the total loss since micro batches sizes are not uniform + loss_sum_tensors_list = [ + loss_sum['loss_sum_and_ub_size'] + for loss_sum in losses_reduced_per_micro_batch + if loss_sum['loss_sum_and_ub_size'][1] > 0 + ] + loss_mean = ( + torch.vstack(loss_sum_tensors_list).sum(axis=0) + if len(loss_sum_tensors_list) > 0 + else torch.tensor([0.0, 0.0]).cuda() + ) else: - loss_mean = torch.tensor(0.0).cuda() + # we're not on the last pipeline stage so no losses + if forward_only: + loss_mean = [] + else: + loss_mean = torch.tensor(0.0).cuda() + if loss_mean.ndim == 0: + loss_mean = loss_mean.unsqueeze(0) + batch_losses.append(loss_mean) + loss_mean = torch.cat(batch_losses).mean() return loss_mean def loss_func(self, loss_mask, output_tensor): @@ -1260,7 +1362,12 @@ def test_step(self, dataloader_iter, dataloader_idx=0): return self.inference_step(dataloader_iter, 'test') def training_step(self, dataloader_iter): - batch, batch_idx, dataloader_idx = next(dataloader_iter) + ans = next(dataloader_iter) + if isinstance(ans, tuple) and len(ans) == 3: + batch, batch_idx, dataloader_idx = ans + else: + batch = ans + batch_idx = 0 return super().training_step(itertools.chain([batch]), batch_idx=batch_idx) def setup_mcore_distributed_parallel(self): @@ -1268,6 +1375,63 @@ def setup_mcore_distributed_parallel(self): if self.with_distributed_adam and self.use_mcore_dist_optim: raise ValueError("T5 does not support both distributed adam and mcore distributed data parallel.") + def oomptimizer_schema(self, schema: str = "audio") -> dict: + """ + Return a typing schema for optimal batch size calibration for various + sequence lengths using OOMptimizer. + """ + + if schema == "audio": + return { + "cls": dict, + "inputs": [ + {"name": "audio_signal", "type": NeuralType(("B", "T"), AudioSignal()), "seq_length": "input"}, + {"name": "audio_signal_length", "type": NeuralType(("B",), LengthsType()), "seq_length": "input"}, + { + "name": "contexts", + "type": NeuralType(("B", "T"), LabelsType()), + "seq_length": "output", + "vocab_size": self.tokenizer.vocab_size, + }, + { + "name": "context_lengths", + "type": NeuralType(("B",), LengthsType()), + "seq_length": "output", + }, + { + "name": "answers", + "type": NeuralType(("B", "T"), LabelsType()), + "seq_length": "output", + "vocab_size": self.tokenizer.vocab_size, + }, + { + "name": "loss_mask", + "type": NeuralType(("B", "T"), MaskType()), + "seq_length": "output", + }, + ], + } + elif schema == "text": + return { + "cls": dict, + "inputs": [ + { + "name": "text_context_ids", + "type": NeuralType(("B", "T"), LabelsType()), + "seq_length": "input", + "vocab_size": self.tokenizer.vocab_size, + }, + { + "name": "text_answer_ids", + "type": NeuralType(("B", "T"), LabelsType()), + "seq_length": "output", + "vocab_size": self.tokenizer.vocab_size, + }, + ], + } + else: + raise RuntimeError(f"Unknown schema type for oomptimizer of class {type(self)}: '{schema}'") + class DecoderTextPromptModularizedAudioT5Model(ModularizedAudioT5Model): """Modularized speech GPT model.""" diff --git a/nemo/collections/multimodal/speech_llm/modules/common/audio_text_generation_utils.py b/nemo/collections/multimodal/speech_llm/modules/common/audio_text_generation_utils.py index 4399c4174dd3..8394042515e5 100644 --- a/nemo/collections/multimodal/speech_llm/modules/common/audio_text_generation_utils.py +++ b/nemo/collections/multimodal/speech_llm/modules/common/audio_text_generation_utils.py @@ -349,6 +349,25 @@ def generate( num_audios = None context_start_idx = None audio_signal, audio_signal_length = None, None + if isinstance(inputs, tuple) and len(inputs) == 2: + context_tokens_tensor, context_length_tensor = inputs + elif isinstance(inputs, tuple) and len(inputs) == 4: + context_tokens_tensor, context_length_tensor, audio_signal, audio_signal_length = inputs + elif isinstance(inputs, tuple) and len(inputs) == 6: # multi-audio + has_multi_audios = True + ( + context_tokens_tensor, + context_length_tensor, + audio_signal, + audio_signal_length, + num_audios, + context_start_idx, + ) = inputs + else: + context_tokens_tensor, context_length_tensor = inference_strategy.tokenize_batch( + inputs, tokens_to_generate, add_BOS + ) + """ to unblock TP inference if torch.distributed.get_rank() == text_generation_utils.get_model_parallel_src_rank(): if isinstance(inputs, tuple) and len(inputs) == 2: context_tokens_tensor, context_length_tensor = inputs @@ -406,6 +425,7 @@ def generate( num_audios, context_start_idx, ) = receive_generate_info(has_multi_audios) + """ output = synced_generate( model, diff --git a/nemo/collections/multimodal/speech_llm/modules/perception_modules.py b/nemo/collections/multimodal/speech_llm/modules/perception_modules.py index 20c478825946..a133cb35eaf3 100644 --- a/nemo/collections/multimodal/speech_llm/modules/perception_modules.py +++ b/nemo/collections/multimodal/speech_llm/modules/perception_modules.py @@ -486,23 +486,26 @@ def forward( decoder_mems_list=None, return_mems=False, ): - assert input_embeds.shape[-1] == encoder_states.shape[-1] - enc_mask = lens_to_mask(encoded_len, encoder_states.shape[1]).to(encoder_states.dtype) - dec_mask = lens_to_mask(input_lengths, input_embeds.shape[1]).to(input_lengths.dtype) - y = self.xattn_decoder( - decoder_states=self.input_proj1(input_embeds), - decoder_mask=dec_mask, - encoder_states=self.input_proj2(encoder_states), - encoder_mask=enc_mask, - decoder_mems_list=decoder_mems_list, - return_mems=return_mems, - return_mems_as_list=False, + assert input_embeds.shape[-1] == encoder_states.shape[-1], ( + f"Last dimension of the following shapes must be equal: " f"{input_embeds.shape=} {encoder_states.shape=}" ) - if return_mems: - extra_outpus = {'decoder_mems_list': y} - y = y[-1][:, -input_embeds.shape[1] :] - else: - extra_outpus = {} - y = self.output_proj(y) + input_embeds - assert y.shape == input_embeds.shape - return y, extra_outpus + with torch.autocast(device_type="cuda"): # megatron_amp_O2 friendly + enc_mask = lens_to_mask(encoded_len, encoder_states.shape[1]).to(encoder_states.dtype) + dec_mask = lens_to_mask(input_lengths, input_embeds.shape[1]).to(input_lengths.dtype) + y = self.xattn_decoder( + decoder_states=self.input_proj1(input_embeds), + decoder_mask=dec_mask, + encoder_states=self.input_proj2(encoder_states), + encoder_mask=enc_mask, + decoder_mems_list=decoder_mems_list, + return_mems=return_mems, + return_mems_as_list=False, + ) + if return_mems: + extra_outpus = {'decoder_mems_list': y} + y = y[-1][:, -input_embeds.shape[1] :] + else: + extra_outpus = {} + y = self.output_proj(y) + input_embeds + assert y.shape == input_embeds.shape, f"{y.shape=} != {input_embeds.shape=}" + return y, extra_outpus diff --git a/nemo/collections/multimodal/speech_llm/parts/mixins/adapter_mixin.py b/nemo/collections/multimodal/speech_llm/parts/mixins/adapter_mixin.py index 6071bda87057..4cdce4ac59c4 100644 --- a/nemo/collections/multimodal/speech_llm/parts/mixins/adapter_mixin.py +++ b/nemo/collections/multimodal/speech_llm/parts/mixins/adapter_mixin.py @@ -72,4 +72,22 @@ def load_adapters( logging.warning( f"Unexpected keys found in state_dict: {set(state_dict.keys()) - target_keys}, missing keys in state_dict: {target_keys - set(state_dict.keys())}" ) + # compatible with legacy checkpoints without get_peft_state_dict overwrite below + for i in self.get_peft_state_dict().keys(): + if i not in state_dict: + i_no_model = i.replace("model.", "") + if i_no_model in state_dict: + logging.warning(f"Key {i} not found in state_dict, trying {i_no_model}") + state_dict[i] = state_dict[i_no_model] + del state_dict[i_no_model] + super(MegatronGPTModel, self).load_state_dict(state_dict, strict=False) + + def get_peft_state_dict(self): + """ + Gets the keys associated with the adapters only. + Add prefix "model." to the keys. + """ + peft_state_dict = super().get_peft_state_dict() + peft_state_dict_with_prefix = {"model." + k: v for k, v in peft_state_dict.items()} + return peft_state_dict_with_prefix diff --git a/nemo/collections/multimodal/speech_llm/parts/utils/data_utils.py b/nemo/collections/multimodal/speech_llm/parts/utils/data_utils.py index d638281950b4..494667c5bfb1 100644 --- a/nemo/collections/multimodal/speech_llm/parts/utils/data_utils.py +++ b/nemo/collections/multimodal/speech_llm/parts/utils/data_utils.py @@ -16,6 +16,10 @@ import numpy as np import torch +from lhotse.cut import Cut + +from nemo.collections.common.data.prompt_fn import get_prompt_format_fn +from nemo.collections.common.prompts import PromptFormatter from nemo.utils import logging, logging_mode @@ -253,7 +257,7 @@ def __init__( else: self.eos_id = None - if hasattr(tokenizer, "pad_id") and tokenizer.pad_id > 0: + if hasattr(tokenizer, "pad_id") and tokenizer.pad_id != None and tokenizer.pad_id > 0: self.pad_id = tokenizer.pad_id else: self.pad_id = self.eos_id if self.eos_id is not None else 0 @@ -312,7 +316,7 @@ def _process_example(self, context: str, output: str): else: pre_pad = [] answer_text = text[len(context) :] - answer_ids = pre_pad + self.tokenizer.text_to_ids(answer_text, self.sample_alpha) + answer_ids = pre_pad + self.tokenizer.text_to_ids(answer_text) if self.end_string: answer_ids += self.tokenizer.text_to_ids(self.end_string) @@ -380,3 +384,76 @@ def _process_example(self, context: str, output: str): } return processed_example + + +class PromptFormatterTextProcessing: + """ + Text processing pipeline for speech_llm data loader. + This class was initially adapted from the one used in nemo/collections/nlp/data/language_modeling/megatron/gpt_sft_dataset.py + and later refactored to use the new PromptFormatter API. + + Args: + tokenizer: text tokenizer object + prompt_format (Optional[str]): name of the prompt formatter to be applied. + """ + + def __init__( + self, + tokenizer: 'nemo.collections.common.tokenizers.TokenizerSpec', + prompt_format: Optional[str] = None, + audio_locator: Optional[str] = None, + max_seq_length: Optional[int] = 8192, + ): + self.prompt = PromptFormatter.resolve(prompt_format)(tokenizer) + self.prompt_format_fn = get_prompt_format_fn(Cut, self.prompt) + self.tokenizer = tokenizer + self.audio_locator = audio_locator + self.max_seq_length = max_seq_length + self.audio_locator_id = ( + torch.as_tensor(self.tokenizer.text_to_ids(audio_locator)) if audio_locator is not None else None + ) + if hasattr(tokenizer, "pad_id") and tokenizer.pad_id != None and tokenizer.pad_id > 0: + self.pad_id = tokenizer.pad_id + else: + self.pad_id = ( + self.tokenizer.eos_id if self.tokenizer.eos_id is not None and self.tokenizer.eos_id > 0 else 0 + ) + + def _process_example(self, cut: Cut): + ans = self.prompt_format_fn(cut, self.prompt) + context_start_idx = [0] + if self.audio_locator_id is not None: + if len(self.audio_locator_id) == 1: # fast case, special "insert audio" token + context_start_idx = (ans["context_ids"] == self.audio_locator_id).nonzero().flatten() + else: # slow case, no dedicated token, got tokenized into multiple tokens; substring search + context_start_idx = _find_substring_indices(ans["context_ids"], self.audio_locator_id) + if len(ans["input_ids"]) > self.max_seq_length: + truncation_length = len(ans["input_ids"]) - self.max_seq_length + logging.warning( + f'Input ids length {len(ans["input_ids"])} exceed max sequence length {self.max_seq_length}' + ) + ans["input_ids"] = ans["input_ids"][: self.max_seq_length] + if truncation_length < len(ans["answer_ids"]): + ans["answer_ids"] = ans["answer_ids"][:-truncation_length] + else: + ans["answer_ids"] = ans["answer_ids"][: -min(truncation_length, len(ans["answer_ids"]))] + ans["context_ids"] = ans["context_ids"][: -min(truncation_length, len(ans["context_ids"]))] + return { + 'input_ids': ans["input_ids"], + 'answer_start_idx': len(ans["context_ids"]), + 'context_ids': ans["context_ids"], + 'context_length': len(ans["context_ids"]), + 'answer_ids': ans["answer_ids"], + 'context_start_idx': context_start_idx, + } + + +def _find_substring_indices(string: torch.Tensor, substring: torch.Tensor) -> torch.Tensor: + string_len = string.size(0) + substring_len = substring.size(0) + if substring_len > string_len: + return torch.tensor([], dtype=torch.long) + windows = string.unfold(0, substring_len, 1) + matches = (windows == substring).all(dim=1) + indexes = matches.nonzero().flatten() + return indexes diff --git a/nemo/collections/nlp/models/language_modeling/megatron_base_model.py b/nemo/collections/nlp/models/language_modeling/megatron_base_model.py index 37ec8a82cef1..330f6ffee05b 100644 --- a/nemo/collections/nlp/models/language_modeling/megatron_base_model.py +++ b/nemo/collections/nlp/models/language_modeling/megatron_base_model.py @@ -892,7 +892,17 @@ def configure_optimizers(self): ] for bucket in buckets: self._optimizer.init_params_bucket(bucket) - self._optimizer.init_params_bucket(self.parameters()) + try: + # We first attempt to only get the parameters that require grad. + # This is to support multimodal training in child classes + # where some modules might be pretrained and frozen. + params = self.parameters(requires_grad_only=True) + except TypeError as e: + if "unexpected keyword argument 'requires_grad_only'" in str(e): + params = self.parameters() + else: + raise + self._optimizer.init_params_bucket(params) if hasattr(self, 'distributed_adam_buckets'): del self.distributed_adam_buckets diff --git a/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py b/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py index a4b8242e0185..fb45344eaff3 100644 --- a/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py +++ b/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py @@ -672,7 +672,9 @@ def configure_optimizers(self): # bucket. def make_parameter_bucket(module: torch.nn.Module) -> List[torch.nn.Parameter]: bucket = [ - param for param in module.parameters() if not getattr(param, '_disable_overlap_grad_sync', False) + param + for param in module.parameters() + if not getattr(param, '_disable_overlap_grad_sync', False) and param.requires_grad ] if any(is_float8tensor(param) for param in bucket): bucket = list(filter(is_float8tensor, bucket)) @@ -693,7 +695,7 @@ def make_parameter_bucket(module: torch.nn.Module) -> List[torch.nn.Parameter]: buckets.extend(make_parameter_bucket(layer) for layer in layers) buckets.reverse() used_params = set(itertools.chain.from_iterable(buckets)) - buckets[-1].extend(p for p in self.parameters() if p not in used_params) + buckets[-1].extend(p for p in self.parameters() if p not in used_params and p.requires_grad) self.distributed_adam_buckets = buckets return super().configure_optimizers() diff --git a/nemo/collections/nlp/models/language_modeling/megatron_t5_adapter_model.py b/nemo/collections/nlp/models/language_modeling/megatron_t5_adapter_model.py index a6e6afc8b7eb..0919c9a2de54 100644 --- a/nemo/collections/nlp/models/language_modeling/megatron_t5_adapter_model.py +++ b/nemo/collections/nlp/models/language_modeling/megatron_t5_adapter_model.py @@ -466,8 +466,12 @@ def __init__(self, cfg: DictConfig, trainer: Trainer): self.frozen_model.freeze() logging.info(f'Before adding adapters:\n{self.frozen_model.summarize()}') - encoder = self.frozen_model.enc_dec_model.enc_dec_model.encoder - decoder = self.frozen_model.enc_dec_model.enc_dec_model.decoder + if self.megatron_amp_O2: + encoder = self.frozen_model.enc_dec_model.module.enc_dec_model.encoder + decoder = self.frozen_model.enc_dec_model.module.enc_dec_model.decoder + else: + encoder = self.frozen_model.enc_dec_model.enc_dec_model.encoder + decoder = self.frozen_model.enc_dec_model.enc_dec_model.decoder if encoder: encoder_cfg = self._get_component_cfg('encoder', frozen_model_cfg, cfg) diff --git a/scripts/checkpoint_converters/convert_gemma_nemo_to_hf.py b/scripts/checkpoint_converters/convert_gemma_nemo_to_hf.py new file mode 100644 index 000000000000..f1267d511728 --- /dev/null +++ b/scripts/checkpoint_converters/convert_gemma_nemo_to_hf.py @@ -0,0 +1,342 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from argparse import ArgumentParser +from collections import OrderedDict + +import torch +from omegaconf import open_dict +from pytorch_lightning import Trainer +from transformers import AutoModelForCausalLM, GemmaTokenizer, GemmaTokenizerFast, convert_slow_tokenizer + +from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel +from nemo.collections.nlp.modules.common.megatron.utils import get_ltor_masks_and_position_ids +from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy +from nemo.utils import logging + +""" +Script to convert a gemma checkpoint in nemo (mcore path) into a HuggingFace checkpoint. +This script can be used to 1) generate only the HF weights, or 2) generate an entire HF model folder. +This script is adapted from convert_llama_nemo_to_hf.py + +1) Generate only HF weights from a nemo file: + + python convert_gemma_nemo_to_hf.py \ + --input_name_or_path /workspace/pretrained/HF_TO_NEMO/gemma-2b-it \ + --output_path /workspace/pretrained/NEMO_TO_HF/gemma-2b-it/pytorch_model.bin + +2) Generate the full HF model folder + + python convert_gemma_nemo_to_hf.py \ + --input_name_or_path /workspace/pretrained/HF_TO_NEMO/gemma-2b-it \ + --output_path /workspace/pretrained/NEMO_TO_HF/gemma-2b-it/pytorch_model.bin \ + --hf_input_path /workspace/pretrained/HF_MODELS/gemma-2b-it \ + --hf_output_path /workspace/pretrained/NEMO_TO_HF/gemma-2b-it \ + --input_tokenizer /workspace/pretrained/HF_MODELS/gemma-2b-it \ + --hf_output_tokenizer /workspace/pretrained/NEMO_TO_HF/gemma-2b-it \ + --precision 32 + + Use the --cpu-only flag if the model cannot fit in the GPU (e.g. Llama2 70b). + However this option makes the conversion script significantly slower. +""" + + +def get_args(): + parser = ArgumentParser() + parser.add_argument( + "--input_name_or_path", + type=str, + default=None, + required=True, + help="Path to .nemo file or extracted folder", + ) + parser.add_argument("--output_path", type=str, default=None, required=True, help="Path to HF .bin file") + parser.add_argument( + "--hf_input_path", + type=str, + default=None, + help="A HF model path, " "e.g. a folder containing https://huggingface.co/meta-llama/Llama-2-7b-hf/tree/main", + ) + parser.add_argument( + "--hf_output_path", + type=str, + default=None, + help="Output HF model path, " "with the same format as above but user's own weights", + ) + parser.add_argument( + "--input_tokenizer", + type=str, + default=None, + help="Path to tokenizer used for the input nemo model. (need to extract the .nemo file first)", + ) + parser.add_argument( + "--hf_output_tokenizer", + type=str, + default=None, + help="Path to save the tokenizer used for the output HF model.", + ) + parser.add_argument( + "--precision", + type=str, + default=None, + help="Precision of output weights." + "Defaults to precision of the input nemo weights (model.cfg.trainer.precision)", + ) + parser.add_argument( + "--cpu-only", + action="store_true", + help="Load model in cpu only. Useful if the model cannot fit in GPU memory, " + "but this option makes the conversion script significantly slower.", + ) + args = parser.parse_args() + return args + + +def verify_forward(model_path, tokenizer_path, model_string): + logging.info(f"=" * 100) + logging.info(f"Verifying forward pass for {model_string}") + + input_texts = [ + 'query: how much protein should an adult eat', + ] + logging.info(f"Running verifications {input_texts} ...") + + tokenizer = GemmaTokenizer.from_pretrained(tokenizer_path, local_files_only=True) + tokenizer.pad_token = tokenizer.eos_token + batch_dict = tokenizer(input_texts, max_length=512, padding=True, truncation=True, return_tensors="pt") + batch_dict_cuda = {k: v.cuda() for k, v in batch_dict.items()} + + if model_string == "hf": + model = AutoModelForCausalLM.from_pretrained(model_path, local_files_only=True) + model = model.cuda().eval() + outputs = model(**batch_dict_cuda, output_hidden_states=True) + next_token = outputs.logits[0, -1].argmax() + elif model_string == 'nemo': + dummy_trainer = Trainer(devices=1, accelerator='auto', strategy=NLPDDPStrategy()) + model_config = MegatronGPTModel.restore_from(model_path, trainer=dummy_trainer, return_config=True) + model_config.tensor_model_parallel_size = 1 + model_config.pipeline_model_parallel_size = 1 + model = MegatronGPTModel.restore_from( + model_path, trainer=dummy_trainer, override_config_path=model_config, map_location=None + ) + + ids = batch_dict_cuda['input_ids'] + id_tensors = [torch.unsqueeze(torch.LongTensor(id_list), dim=0) for id_list in ids.cpu()] + masks_and_position_ids = [ + get_ltor_masks_and_position_ids(id_tensor, tokenizer.eos_token, False, False, False) + for id_tensor in id_tensors + ] + + for tokens, attn_mask_and_pos_ids in zip(id_tensors, masks_and_position_ids): + attn_mask, _, pos_ids = attn_mask_and_pos_ids + + outputs = model( + tokens=tokens, text_position_ids=pos_ids.cuda(), attention_mask=attn_mask.cuda(), labels=None + ) + next_token = outputs.squeeze()[-1].argmax() + else: + raise ValueError(f"Model string {model_string} not recognized.") + + logging.info(f"{model_string} predicted next token is: '{tokenizer.convert_ids_to_tokens([next_token])}'.") + logging.info(f"=" * 100) + + +def convert(input_nemo_file, output_hf_file, precision=None, cpu_only=False) -> None: + """ + Convert NeMo weights to HF weights + """ + dummy_trainer = Trainer(devices=1, accelerator='cpu', strategy=NLPDDPStrategy()) + model_config = MegatronGPTModel.restore_from(input_nemo_file, trainer=dummy_trainer, return_config=True) + model_config.tensor_model_parallel_size = 1 + model_config.pipeline_model_parallel_size = 1 + if cpu_only: + map_location = torch.device('cpu') + model_config.use_cpu_initialization = True + else: + map_location = None + + if cpu_only: + logging.info("******** Loading model on CPU. This will take a significant amount of time.") + model = MegatronGPTModel.restore_from( + input_nemo_file, trainer=dummy_trainer, override_config_path=model_config, map_location=map_location + ) + if precision is None: + precision = model.cfg.precision + if precision in [32, "32"]: + dtype = torch.float32 + elif precision in [16, "16", "16-mixed"]: + dtype = torch.float16 + elif precision in ["bf16", "bf16-mixed"]: + dtype = torch.bfloat16 + else: + logging.warning(f"Precision string {precision} is not recognized, falling back to fp32") + dtype = torch.float32 # fallback + logging.info(f"Using precision {dtype}") + + param_to_weights = lambda param: param.to(dtype) + checkpoint = OrderedDict() + + hidden_size = model.cfg.hidden_size + head_num = model.cfg.num_attention_heads + num_layers = model.cfg.num_layers + ffn_hidden_size = model.cfg.ffn_hidden_size + num_query_groups = model.cfg.get("num_query_groups", head_num) # different num_query_groups for 70B + + head_size = hidden_size // head_num + heads_per_group = head_num // num_query_groups + qkv_total_dim = head_num + 2 * num_query_groups + + # Embedding + embed_weight = model.state_dict()[f'model.embedding.word_embeddings.weight'] + embed_weights_base_name = f'model.embed_tokens.weight' + checkpoint[embed_weights_base_name] = param_to_weights(embed_weight) + for l in range(int(num_layers)): + print(f"converting layer {l}") + + qkv_weights = model.state_dict()[f'model.decoder.layers.{l}.self_attention.linear_qkv.weight'] + qkv_weights = qkv_weights.reshape([qkv_total_dim, head_size, hidden_size]) + + q_slice = torch.cat( + [ + torch.arange((heads_per_group + 2) * i, (heads_per_group + 2) * i + heads_per_group) + for i in range(num_query_groups) + ] + ) + k_slice = torch.arange(heads_per_group, qkv_total_dim, (heads_per_group + 2)) + v_slice = torch.arange(heads_per_group + 1, qkv_total_dim, (heads_per_group + 2)) + ## Example of slices + ## 7b: num_query_groups = head_num = 32, + ## q_slice = [0, 3, 6, 9 , ... 90, 93] + ## k_slice = [1, 4, 7, 10, ... 91, 94] + ## v_slice = [2, 5, 8, 11, ... 92, 95] + ## 70b (with GQA): num_query_groups = 8, head_num = 64 + ## q_slice = [0, 1, .. 6, 7, 10, 11, .. 16, 17, 20, 21, .. 67, 70, ... 76, 77] + ## k_slice = [8, 18, 28, ... 68, 78] + ## v_slice = [9, 19, 29, ... 69, 79] + + q_weights_base_name = f'model.layers.{l}.self_attn.q_proj.weight' + k_weights_base_name = f'model.layers.{l}.self_attn.k_proj.weight' + v_weights_base_name = f'model.layers.{l}.self_attn.v_proj.weight' + + checkpoint[q_weights_base_name] = param_to_weights(qkv_weights[q_slice].reshape(-1, hidden_size)) + checkpoint[k_weights_base_name] = param_to_weights(qkv_weights[k_slice].reshape(-1, hidden_size)) + checkpoint[v_weights_base_name] = param_to_weights(qkv_weights[v_slice].reshape(-1, hidden_size)) + + # attention dense + o_weight = model.state_dict()[f'model.decoder.layers.{l}.self_attention.linear_proj.weight'] + o_weight_base_name = f'model.layers.{l}.self_attn.o_proj.weight' + checkpoint[o_weight_base_name] = param_to_weights(o_weight) + + # mlp + mlp_weights = model.state_dict()[f'model.decoder.layers.{l}.mlp.linear_fc1.weight'] + mlp_down_proj_weight = mlp_weights[:ffn_hidden_size, :] + mlp_gate_proj_weight = mlp_weights[ffn_hidden_size:, :] + + mlp_down_proj_base_name = f'model.layers.{l}.mlp.gate_proj.weight' + mlp_gate_proj_base_name = f'model.layers.{l}.mlp.up_proj.weight' + + checkpoint[mlp_down_proj_base_name] = param_to_weights(mlp_down_proj_weight) + checkpoint[mlp_gate_proj_base_name] = param_to_weights(mlp_gate_proj_weight) + + mlp_up_proj_weight = model.state_dict()[f'model.decoder.layers.{l}.mlp.linear_fc2.weight'] + mlp_up_proj_base_name = f'model.layers.{l}.mlp.down_proj.weight' + checkpoint[mlp_up_proj_base_name] = param_to_weights(mlp_up_proj_weight) + + # layernorm + input_ln_weight = model.state_dict()[f'model.decoder.layers.{l}.self_attention.linear_qkv.layer_norm_weight'] + input_ln_base_name = f'model.layers.{l}.input_layernorm.weight' + checkpoint[input_ln_base_name] = param_to_weights(input_ln_weight - 1.0) + + post_attn_ln_weight = model.state_dict()[f'model.decoder.layers.{l}.mlp.linear_fc1.layer_norm_weight'] + post_attn_ln_base_name = f'model.layers.{l}.post_attention_layernorm.weight' + checkpoint[post_attn_ln_base_name] = param_to_weights(post_attn_ln_weight - 1.0) + + print(f"done layer {l}") + + final_ln_weight = model.state_dict()[f'model.decoder.final_layernorm.weight'] + final_ln_base_name = f'model.norm.weight' + checkpoint[final_ln_base_name] = param_to_weights(final_ln_weight - 1.0) + + # NOTE: Gemmas uses weight tying + output_layer_weight = model.state_dict()[ + f'model.embedding.word_embeddings.weight' + ] # model.state_dict()[f'model.output_layer.weight'] + output_layer_base_name = f'lm_head.weight' + checkpoint[output_layer_base_name] = param_to_weights(output_layer_weight) + + os.makedirs(os.path.dirname(output_hf_file), exist_ok=True) + torch.save(checkpoint, output_hf_file) + logging.info(f"Weights saved to {output_hf_file}") + + return dtype + + +def replace_hf_weights_and_tokenizer( + weights_file, + dtype, + input_hf_path, + output_hf_path, + tokenizer_path, + output_hf_tokenizer, +): + model = AutoModelForCausalLM.from_pretrained( + input_hf_path, + local_files_only=True, + torch_dtype=dtype, + ) + nemo_exported = torch.load(weights_file) + + if tokenizer_path: + tokenizer = GemmaTokenizer.from_pretrained( + tokenizer_path, + local_files_only=True, + legacy=False, + ) + tmp_tokenizer = convert_slow_tokenizer.convert_slow_tokenizer(tokenizer) + fast_tokenizer = GemmaTokenizerFast(tokenizer_object=tmp_tokenizer) + tokenizer_length = len(fast_tokenizer) + model.resize_token_embeddings(tokenizer_length) + + model.load_state_dict(nemo_exported) + model.save_pretrained(output_hf_path) + logging.info(f"Full HF model saved to {output_hf_path}") + + if tokenizer_path: + fast_tokenizer.save_pretrained(output_hf_tokenizer) + tokenizer.save_pretrained(output_hf_tokenizer) + logging.info(f"Tokenizer saved to {output_hf_tokenizer}") + + +if __name__ == '__main__': + args = get_args() + if not args.hf_output_tokenizer and args.hf_output_path: + args.hf_output_tokenizer = args.hf_output_path + # dtype = convert(args.input_name_or_path, args.output_path, precision=args.precision, cpu_only=args.cpu_only) + if args.hf_input_path and args.hf_output_path: + """ + replace_hf_weights_and_tokenizer( + args.output_path, + dtype, + args.hf_input_path, + args.hf_output_path, + args.input_tokenizer, + args.hf_output_tokenizer, + ) + """ + verify_forward(args.input_name_or_path, args.hf_output_tokenizer, "nemo") + verify_forward(args.hf_output_path, args.hf_output_tokenizer, "hf") + else: + logging.info("`hf_input_path` and/or `hf_output_path` not provided, not generating full HF model.") + logging.info(f".bin file is saved to {args.output_path}") diff --git a/scripts/speech_llm/estimate_token_bins.py b/scripts/speech_llm/estimate_token_bins.py new file mode 100644 index 000000000000..fe7cc78b1f7a --- /dev/null +++ b/scripts/speech_llm/estimate_token_bins.py @@ -0,0 +1,328 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=C0115 +# pylint: disable=C0116 +# pylint: disable=C0301 + +import argparse +import ast +import math +from functools import partial +from itertools import islice +from typing import Callable, Iterable + +import numpy as np +import pandas as pd +from lhotse.cut import Cut +from omegaconf import OmegaConf + +from nemo.collections.asr.data.audio_to_text_lhotse import TokenizerWrapper +from nemo.collections.common.data.lhotse.cutset import read_cutset_from_config +from nemo.collections.common.data.lhotse.dataloader import LhotseDataLoadingConfig, tokenize, tokenize_with_prompt +from nemo.collections.common.data.lhotse.sampling import ( + MultimodalFixedBucketBatchSizeConstraint2D, + MultimodalSamplingConstraint, + TokenCountFilter, + TokenPerTokenFilter, +) +from nemo.collections.common.prompts.formatter import PromptFormatter +from nemo.collections.common.tokenizers import AggregateTokenizer, SentencePieceTokenizer + + +def parse_args(): + parser = argparse.ArgumentParser( + description="Estimate token bins for Lhotse dynamic bucketing using a sample of the input dataset. " + "The dataset is read either from one or more manifest files and supports data weighting. " + "Unlike estimate_duration_bins.py, this script is intended for text data only. " + "It supports 2D bucketing. ", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + parser.add_argument( + "input", + help='Path to a data input configuration YAML file. ' + 'This is the only type of input specification supported for text data.', + ) + parser.add_argument( + "-t", + "--tokenizer", + nargs="+", + required=True, + help="Path to one or more SPE tokenizers. More than one means we'll use AggregateTokenizer and --langs argument must also be used. When provided, we'll estimate a 2D distribution for input and output sequence lengths.", + ) + parser.add_argument( + "-a", "--langs", nargs="+", help="Language names for each of AggregateTokenizer sub-tokenizers." + ) + parser.add_argument( + "-b", + "--buckets", + type=int, + default=30, + help="The desired number of buckets (dim0 => covers input sequence length / audio duration).", + ) + parser.add_argument( + "-s", + "--sub-buckets", + type=int, + default=None, + help="The desired number of sub-buckets (dim1 => covers output sequence length / num_tokens). " + "If not provided, we'll only perform 1D bucketing. ", + ) + parser.add_argument( + "-n", + "--num_examples", + type=int, + default=-1, + help="The number of examples (utterances) to estimate the bins. -1 means use all data " + "(be careful: it could be iterated over infinitely).", + ) + parser.add_argument( + "-l", + "--min_tokens", + type=float, + default=-float("inf"), + help="If specified, we'll filter out examples with less tokens than this number.", + ) + parser.add_argument( + "-u", + "--max_tokens", + type=float, + default=float("inf"), + help="If specified, we'll filter out examples with more tokens than this number.", + ) + parser.add_argument( + "--max_tpt", + type=float, + default=float("inf"), + help="If specified, we'll filter out examples with more output tokens per input token than this. ", + ) + parser.add_argument( + "-q", "--quiet", type=bool, default=False, help="When specified, only print the estimated duration bins." + ) + parser.add_argument( + "-f", + "--prompt-format", + type=str, + help="When specified, we'll use a prompt formatter in addition to the tokenizer for the purpose of estimating token count bins. " + "This is useful for accurate 2D bucket estimation with models such as EncDecMultiTaskModel (Canary-1B), " + "or any model where the label sequence consists of a user prompt and a model's response.", + ) + parser.add_argument( + "-p", + "--prompt", + type=str, + help="Prompt slots provided as a Python list of dicts. It is used together with --prompt-format option." + "For example, with Canary-1B you may use: [{'role':'user','slots':{'source_lang':'en','target_lang':'en','task':'asr','pnc':'yes'}]", + ) + parser.add_argument( + "-m", + "--measure-total-length", + type=bool, + default=False, + help="When specified, we'll measure the total length (context+answer, i.e. input_ids) instead of context-only length. Total length is more suitable for decoder-only models while context-only length is more suitable for encoder-decoder models.", + ) + return parser.parse_args() + + +def estimate_token_buckets( + cuts: Iterable[Cut], + num_buckets: int, + num_subbuckets: int | None, + quiet: bool, +) -> list[tuple[float, float]]: + """ + This function is based on lhotse.dataset.sampling.dynamic_bucketing.estimate_duration_buckets. + It extends it to a 2D bucketing case. + """ + assert num_buckets > 1 + is_2d = num_subbuckets is not None + + if is_2d: + constraint = MultimodalFixedBucketBatchSizeConstraint2D([(0.0, 0.0)], [0], measure_total_length=False) + else: + constraint = MultimodalSamplingConstraint(measure_total_length=True) + + # Gather the duration and token count statistics for the dataset. + num_input_tokens = [] + num_output_tokens = [] + for c in cuts: + ans = constraint.measure_length(c) + if is_2d: + itoks, otoks = ans + num_input_tokens.append(itoks) + num_output_tokens.append(otoks) + else: + num_input_tokens.append(ans) + num_input_tokens = np.array(num_input_tokens, dtype=np.int32) + if is_2d: + num_output_tokens = np.array(num_output_tokens, dtype=np.int32) + joint = np.rec.fromarrays([num_input_tokens, num_output_tokens]) + joint.sort() + num_input_tokens = joint.f0 + num_output_tokens = joint.f1 + else: + num_input_tokens.sort() + + # We are building buckets with equal duration (empirically leads to more even bucket exhaustion over time). + # We need to determine how much duration to allocate per bucket. + size_per_bucket = num_input_tokens.sum() / num_buckets + + if not quiet: + print("Duration distribution:") + print(pd.Series(num_input_tokens).describe(percentiles=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99])) + max_input_tokens = num_input_tokens[-1] + + if is_2d: + tpt = num_output_tokens / num_input_tokens + if not quiet: + print("Output tokens per input token distribution:") + print(pd.Series(tpt).describe(percentiles=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99])) + max_tpt = tpt.max() + del tpt + + bins = [] + bin_indexes = [0] + tot = 0.0 + + def _estimate_output_token_buckets(max_bucket_duration): + # Since this is 2D bucketing, apply the same bin creation logic + # for the second dimension (i.e. token count) as for the first dimension (duration). + # That means we aim to have each bucket contain roughly the same number of tokens. + # Note that this estimation is biased towards more padding if you have + # a lot of zero-token examples (e.g. non-speech). + nonlocal bins + num_tokens_bucket = num_output_tokens[bin_indexes[-1] : binidx] + num_tokens_bucket.sort() + tokens_per_subbucket = num_tokens_bucket.sum() / num_subbuckets + tot_toks = 0 + # Iterate over token counts, and whenever we hit tokens_per_subbucket, create a new 2D bucket bin. + for num_toks in num_tokens_bucket: + # Threshold hit: we are creating a new (max_duration, max_num_tokens) bin. + if tot_toks > tokens_per_subbucket: + bins.append((max_bucket_duration, num_toks)) + tot_toks = 0 + tot_toks += num_toks + bins.append((size, math.ceil(size * max_tpt))) + + # Iterate over data, and whenever we hit size_per_bucket, create a new bucket bin. + for binidx, size in enumerate(num_input_tokens): + if tot > size_per_bucket: + # Threshold hit: we are creating a new duration bin (multiplied by number of token bins). + if is_2d: + _estimate_output_token_buckets(max_bucket_duration=size) + else: + bins.append(size) + tot = 0.0 + tot += size + + # Estimate an extra 2D bin set for global max duration. + if num_subbuckets is not None: + _estimate_output_token_buckets(max_bucket_duration=max_input_tokens) + + return bins + + +def load_tokenizer(paths: list[str], langs: list[str] = None) -> TokenizerWrapper: + if len(paths) == 1: + tok = SentencePieceTokenizer(paths[0]) + else: + assert langs is not None and len(paths) == len( + langs + ), f"Cannot create AggregateTokenizer; each tokenizer must have assigned a language via --langs option (we got --tokenizers={paths} and --langs={langs})" + tok = AggregateTokenizer({lang: SentencePieceTokenizer(p) for lang, p in zip(langs, paths)}) + return TokenizerWrapper(tok) + + +def apply_tokenizer(cut, tokenizer=None, prompt: PromptFormatter = None): + if prompt is not None: + cut = tokenize_with_prompt(cut, tokenizer, prompt) + elif tokenizer is not None: + cut = tokenize(cut, tokenizer) + return cut + + +class RejectionsCounter: + def __init__(self, predicate: Callable, message: str): + self.predicate = predicate + self.message = message + self.total = 0 + self.rejected = 0 + + def __call__(self, example) -> bool: + ans = self.predicate(example) + self.total += 1 + if not ans: + self.rejected += 1 + return ans + + def print_report(self) -> None: + if self.rejected: + print(f"{self.message} | Rejected {self.rejected}/{self.total} examples.") + + +def main(): + args = parse_args() + + if not args.quiet: + pd.set_option('display.float_format', lambda x: '%.2f' % x) + + tokenizer = None + prompt = None + if args.tokenizer is not None: + tokenizer = load_tokenizer(args.tokenizer, args.langs) + if args.prompt_format is not None: + prompt_defaults = None + if args.prompt is not None: + prompt_defaults = ast.literal_eval(args.prompt) + prompt = PromptFormatter.resolve(args.prompt_format)(tokenizer._tokenizer, defaults=prompt_defaults) + + assert args.input.endswith(".yaml") + config = OmegaConf.merge( + OmegaConf.structured(LhotseDataLoadingConfig), + OmegaConf.from_dotlist([f"input_cfg={args.input}"]), + ) + cuts, _ = read_cutset_from_config(config) + cuts = cuts.map(partial(apply_tokenizer, tokenizer=tokenizer, prompt=prompt), apply_fn=None) + if hasattr(cuts, "prefetch"): + cuts = cuts.prefetch() # to be released in lhotse 1.27 + token_filter = RejectionsCounter( + TokenCountFilter(args.min_tokens, args.max_tokens, args.measure_total_length), "Token count filtering" + ) + cuts = cuts.filter(token_filter) + tpt_filter = RejectionsCounter(TokenPerTokenFilter(-1, args.max_tpt), "Output tokens per input token filtering") + cuts = cuts.filter(tpt_filter) + if (N := args.num_examples) > 0: + cuts = islice(cuts, N) + + token_bins = estimate_token_buckets( + cuts, + num_buckets=args.buckets, + num_subbuckets=args.sub_buckets, + quiet=args.quiet, + ) + if args.sub_buckets is not None: + token_bins = "[" + ','.join(f"[{b:d},{sb:d}]" for b, sb in token_bins) + "]" + else: + token_bins = "[" + ','.join(f"{b:d}" for b in token_bins) + "]" + if args.quiet: + print(token_bins) + return + token_filter.print_report() + tpt_filter.print_report() + print("Use the following options in your config:") + print(f"\tnum_buckets={args.buckets}") + print(f"\tbucket_duration_bins={token_bins}") + + +if __name__ == "__main__": + main() diff --git a/scripts/speech_llm/export_conversations_to_tar.py b/scripts/speech_llm/export_conversations_to_tar.py new file mode 100644 index 000000000000..df1757cecce0 --- /dev/null +++ b/scripts/speech_llm/export_conversations_to_tar.py @@ -0,0 +1,41 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from random import Random + +import click +from lhotse import CutSet + +from nemo.collections.common.data.lhotse.text_adapters import ( + NeMoMultimodalConversationJsonlAdapter, + NeMoMultimodalConversationTarWriter, +) + + +@click.command() +@click.argument("manifest", type=click.Path()) +@click.argument("output_dir", type=click.Path()) +@click.option("-n", "--shard_size", type=int, default=100, help="Number of conversations per shard.") +@click.option("--shuffle/--no-shuffle", default=False, help="Shuffle conversations.") +@click.option("-s", "--seed", type=int, default=42, help="Random seed.") +def export(manifest: str, output_dir: str, shard_size: int, shuffle: bool, seed: int): + with NeMoMultimodalConversationTarWriter(output_dir, shard_size=shard_size) as writer: + source = NeMoMultimodalConversationJsonlAdapter(manifest, audio_locator_tag="") + if shuffle: + source = CutSet(source).shuffle(buffer_size=50000, rng=Random(seed)) + for item in source: + writer.write(item) + + +if __name__ == '__main__': + export() diff --git a/scripts/speech_llm/oomptimizer.py b/scripts/speech_llm/oomptimizer.py new file mode 100755 index 000000000000..6c30676bec04 --- /dev/null +++ b/scripts/speech_llm/oomptimizer.py @@ -0,0 +1,592 @@ +#!/usr/bin/env python +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=C0116 +# pylint: disable=C0301 +import importlib +import math +import sys +from numbers import Number + +import click +import pytorch_lightning as pl +import torch +from lhotse import compute_num_samples +from omegaconf import OmegaConf + +from nemo.collections.asr.models.asr_model import ASRModel +from nemo.collections.nlp.parts.megatron_trainer_builder import MegatronLMPPTrainerBuilder +from nemo.core.neural_types import AudioSignal, LabelsType, LengthsType, MaskType, NeuralType +from nemo.utils import logging + + +class ProfilingBatchGenerator: + """ + ProfilingBatchGenerator is used to generate artificial mini-batches for model training + and tracking the progress of batch size optimization. + + The high-level usage API is the following:: + + >>> gen = ProfilingBatchGenerator(schema) + ... finished = False + ... while not finished: + ... batch = gen(input_seq_len, output_seq_len) + ... try: + ... training_step(model, batch) + ... oom = False + ... except torch.cuda.OutOfMemoryError: + ... oom = True + ... finished = gen.advance(oom) + ... solution = gen.max_batch_size # The solution of the search problem. + ... gen.reset() # Can re-use for other sequence lengths now. + + The search terminates once the difference between max working batch size and min OOM batch size + divided by the latter is smaller than ``rel_gap_thresh`` that difference amounts to a single element. + For example, a max working batch size is 96 and min OOM batch size is 100 indicates a gap of 0.04, + which would terminate the search with threshold of 0.05. + + In order to generate mini-batches compatible with a given model, the generator: + + * accepts a ``schema`` argument in its constructor, and + + * accepts input/output sequence lengths in each call to generate a mini-batch. + + ``schema`` has the following structure:: + + + >>> { + ... "cls": tuple | MyBatchType, + ... "inputs": [ + ... { + ... "type": NeuralType(...) | Literal["dummy"], + ... "seq_length": Literal["input", "output"], + ... "vocab_size": int, # optional, required only for LabelsType + ... "name": str, # optional, indicates kwarg + ... }, + ... ..., + ... ] + ... } + + ``cls`` indicates how we should construct the mini-batch. Typically you can just use ``tuple`` for most + batch schemas. However, if the model expects a specific, e.g., dataclass, you can tell ``ProfilingBatchGenerator`` + to use it. The mini-batch object will be constructed using the items in ``inputs``. + + Each element of ``inputs`` specifies a NeMo NeuralType which needs to have a defined ``elements_type``. + The supported types are ``AudioSignal``, ``LengthsType`` and ``LabelsType``. + If "type" is not a NeuralType, we interpret that as a placeholder tensor that's not relevant but expected + by the model/batch constructor. In addition, ``"seq_length"`` key is used to determine whether we should apply + input or output sequence length to a given tensor. + + Optional keys: + + * ``vocab_size`` is required for ``LabelsType`` so that we can generate proper label values. + + * ``name`` is required if objects of ``cls`` have to be constructed using keyword arguments. + + A simple schema example for a model using audio/lengths tensor pair (unsupervised/self-supervised):: + + >>> { + ... "cls": tuple, + ... "inputs": [ + ... {"type": NeuralType(("B", "T"), AudioSignal()), "seq_length": "input"}, + ... {"type": NeuralType(("B"), LengthsType()), "seq_length": "input"}, + ... ] + ... } + + """ + + def __init__( + self, + schema: dict, + start_batch_size: int = 32, + rel_gap_thresh: float = 0.05, + device: str = "cuda", + ): + self.schema = schema + self.start_batch_size = start_batch_size + self.rel_gap_thresh = rel_gap_thresh + self.device = device + self.reset() + + def __call__(self, input_seq_length: int, output_seq_length: int): + B = self._current + select_seq_length = {"input": input_seq_length, "output": output_seq_length} + batch = [] + names = [] + for item in self.schema["inputs"]: + nt = item["type"] + if isinstance(nt, str) and nt == "constant": + if isinstance(val := item["value"], str) and val == "batch": + tnsr = torch.tensor([B], dtype=torch.long, device=self.device) + else: + tnsr = torch.tensor([val], dtype=torch.long, device=self.device) + elif not isinstance(nt, NeuralType): # placeholder + tnsr = torch.tensor([]) + elif isinstance(nt.elements_type, AudioSignal): + seq_length = select_seq_length[item["seq_length"]] + tnsr = torch.randn(B, seq_length, dtype=torch.float32, device=self.device) + elif isinstance(nt.elements_type, LengthsType): + seq_length = select_seq_length[item["seq_length"]] + tnsr = torch.ones(B, dtype=torch.long, device=self.device) * seq_length + elif isinstance(nt.elements_type, MaskType): + seq_length = select_seq_length[item["seq_length"]] + tnsr = torch.ones(B, seq_length, device=self.device) + elif isinstance(nt.elements_type, LabelsType): + seq_length = select_seq_length[item["seq_length"]] + tnsr = torch.randint(0, item["vocab_size"], size=(B, seq_length), device=self.device) + else: + raise RuntimeError("Unexpected item in oomptimizer schema: {item}") + batch.append(tnsr) + names.append(item.get("name")) + args = [elem for name, elem in zip(names, batch) if name is None] + kwargs = {name: elem for name, elem in zip(names, batch) if name is not None} + if not kwargs and self.schema["cls"] == tuple: + return tuple(args) + return self.schema["cls"](*args, **kwargs) + + @property + def max_batch_size(self) -> int | None: + """ + Return the solution of the batch size search problem. + It will keep returning None until the search is done. + """ + if ( + self._max_ok is not None + and self._min_err is not None + and (self.current_rel_gap <= self.rel_gap_thresh or self._min_err - self._max_ok <= 1) + ): + return self._max_ok + return None + + @property + def current_rel_gap(self) -> float | None: + """ + Return the current gap between the largest batch that works and the smallest batch that triggers OOM. + The gap is defined as the batch size difference divided by the larger element. + E.g., if the best found batch size is 95 and the smallest that triggers OOM is 100, the gap is 0.05. + """ + if self._min_err is None or self._max_ok is None: + return None + return (self._min_err - self._max_ok) / self._min_err + + def reset(self): + """Reset the generator to prepare it for a new search.""" + self._current = self.start_batch_size + self._max_ok = None # max batch size that works + self._min_err = None # min batch size that doesn't work + + def advance(self, oom: bool) -> bool: + """ + Adjusts the current batch size based on the outcome. + Returns a bool indicating whether the calibration is complete. + """ + if self.max_batch_size is not None: + return True + + if oom: + # Training step failed with OOM. + # Update the minimum known batch size that causes an error. + self._min_err = min(float("inf") if self._min_err is None else self._min_err, self._current) + # Training step failed on OOM + if self._max_ok is None: + # We haven't found a batch size that works yet, keep going 2x down. + self._current = round(self._current / 2) + else: + # Try the middle-point between the known extremes. + self._current = round((self._max_ok + self._min_err) / 2) + else: + # Training step successful. + # Update the maximum known batch size that works. + self._max_ok = max(-1 if self._max_ok is None else self._max_ok, self._current) + if self._min_err is None: + # We haven't found a batch size that causes an error yet, keep going 2x higher + self._current *= 2 + else: + # Try the middle-point between the known extremes. + self._current = round((self._max_ok + self._min_err) / 2) + + if self._current == 0: + raise RuntimeError( + "We diverged and arrived batch_size=0. Perhaps the input is too large for this model and hardware." + ) + + return False + + +class FloatList(click.Option): + """Support passing bucket duration bins as [1.1,2.5,5.6,...]""" + + name = "list[float]" + + def type_cast_value(self, ctx, value): + if isinstance(value, list) and all(isinstance(v, float) for v in value): + return value + try: + import ast + + ans = ast.literal_eval(value) + if isinstance(ans[0], list): + ans = [tuple(item) for item in ans] + return ans + except ValueError: + raise click.BadParameter(value) + + +@click.command(context_settings={'show_default': True}) +@click.option( + "-n", + "--pretrained-name", + type=str, + default=None, + help="Name of a pretrained model to use, e.g. 'nvidia/canary-1b'.", +) +@click.option( + "-m", + "--module-name", + type=str, + default=None, + help="Full path to NeMo's module corresponding to CONFIG_PATH, e.g. 'nemo.collections.asr.models.EncDecMultiTaskModel'.", +) +@click.option( + "-c", "--config-path", type=str, default=None, help="Path to the training configuration file for MODULE_NAME." +) +@click.option( + "--schema", + type=str, + default="audio", + help="Which schema to use (typically used for choosing the modality, i.e., 'audio' / 'text'", +) +@click.option( + "-b", + "--buckets", + cls=FloatList, + default=[5.0, 10.0, 15.0, 20.0, 25.0, 30.0], + help="List of upper-bound bucket bins (i.e. first bucket is [0.0 - item0), second bucket is [item0 - item1), etc.). " + "We also support a nested list for 2D bucketing, e.g. [[2.0, 10],[2.0,20],[4.5,15],[4.5,30],...], " + "where each item is a pair of (max_input_seq_len, max_output_seq_len) for a given bucket.", +) +@click.option( + "-t", + "--threshold", + type=float, + default=0.05, + help="Search stopping criterion in range [0, 1], lower is more precise. Interpret as the uncerainty gap, i.e. (min_oom_batch_size - max_ok_batch_size) / min_oom_batch_size.", +) +@click.option("-s", "--start-batch-size", type=int, default=32, help="Initial batch size to start the search from.") +@click.option( + "-r", + "--ratio", + type=int, + default=12, # conservative estimate towards longer transcripts + help="The output_sequence_length to input_sequence_length ratio for the purpose of determing the maximum output sequence lengths. " + "The interpretation depends on input and output modalities. Examples: for audio->text it's tokens per second. " + "For text->audio it's seconds per token. For audio->audio it's output seconds per input second. " + "For text->text it's output tokens per input token. " + "In general larger ratio means longer output sequences and increased memory consumption. " + "The default value is set adequately for automatic speech recognition. " + "This argument is ignored when 2D buckets are provided to --buckets option. " + "For GPT-style models, use --ratio=1 ", +) +@click.option( + "-f", + "--memory-fraction", + type=float, + default=0.9, + help="Limits the use of CUDA memory for this process to MEMORY_FRACTION of the total device memory. " + "By default we force 5% memory to be unused to account for non-training-loop related CUDA memory usage" + "in actual training scripts.", +) +@click.option( + "-d", + "--device", + default="cuda:0", + help="Device string to be passed to torch.device; due to MEMORY_FRACTION option, " + "it must specify the device index (e.g. cuda:0). " + "You can also leave the default index and select a specific GPU using env var CUDA_VISIBLE_DEVICES=", +) +@click.option( + "-y", + "--dtype", + default="bfloat16", + help="Float precision to use for computation (used together with autocast).", +) +@click.option( + "--ddp/--no-ddp", + type=bool, + default=True, + help="Whether we should simulate DDP GPU RAM usage. Stores an extra copy of the model in GPU memory. Enabled by default.", +) +def oomptimizer( + pretrained_name: str | None, + module_name: str | None, + config_path: str | None, + schema: str, + buckets: list[float], + threshold: float, + start_batch_size: int, + ratio: int, + memory_fraction: float, + device: str, + dtype: str, + ddp: bool, +): + """ + OOMptimizer finds the optimal batch sizes for training your model with bucketing dataloading. + It performs a search over batch sizes until it converges by measuring the GPU memory usage for + a model's training step and optimizer update. + + \b + There are two main usage patterns: for using a pretrained model or an untrained model configuration. + The latter is more flexible but requires the user to provide two separate arguments. Examples: + * python oomptimizer.py --pretrained-name nvidia/canary-1b + * python oomptimizer.py --module-name nemo.collections.asr.models.EncDecMultiTaskModel \ + --config-path examples/asr/conf/speech_multitask/fast-conformer_aed.yaml + + Dynamic bucketing is notoriously difficult to tune as you risk running into CUDA OOM many steps into the training. + In order to simplify finding the optimal settings, OOMptimizer scans each bucket to find the maximum possible + batch size that doesn't trigger a CUDA OOM. + + \b + The suggested workflow is the following: + 1) Run scripts/speech_recognition/estimate_duration_bins.py to get the duration distribution of your data. + (consider running estimate_duration_bins_2d.py for models with a strong dependency on output sequence length + such as attention-encoder-decoder models). + 2) Run OOMptimizer to find the optimal batch sizes for your specific model, optimizer, and GPU. + 3) Use these optimal settings in your actual training script and enjoy optimal GPU utilization OOM-free. + + In the unlikely event that OOMptimizer bucket batch sizes are still leading to OOMs, + please try a lower setting of the MEMORY_FRACTION option, e.g. 0.75 (75% of GPU memory). + This may be required in very complex setups where there are additional GPU RAM loads that can't be anticipated + through the combination of training_step and optimizer update. + """ + if all(opt is None for opt in (pretrained_name, module_name, config_path)): + click.secho( + "You need to provide either PRETRAINED_NAME or the pair of MODULE_NAME and CONFIG_PATH.", fg="yellow" + ) + sys.exit(1) + logging.setLevel(logging.CRITICAL) + torch.cuda.set_per_process_memory_fraction(memory_fraction, device) + + model_clones = [] + for _ in range(2 if ddp else 1): + if pretrained_name is not None: + assert ( + config_path is None and module_name is None + ), "--pretrained-name cannot be used together with --module-name/--config-path" + click.echo(f"Intializing ASR model from pretrained checkpoint {pretrained_name}.") + trainer = pl.Trainer(barebones=True) + trainer.log_every_n_steps = 1000000 + model = ASRModel.from_pretrained(pretrained_name, trainer=trainer).to(device) + else: + assert config_path is not None, "--module-name requires --config-path to be specified as well." + assert module_name is not None, "--config-path requires --module-name to be specified as well." + cfg = OmegaConf.load(config_path) + trainer = MegatronLMPPTrainerBuilder(cfg).create_trainer() + trainer.log_every_n_steps = 1000000 + namespace, name = module_name.rsplit('.', maxsplit=1) + model_cls = getattr(importlib.import_module(namespace), name) + model = model_cls.restore_from_pretrained_models(cfg, trainer=trainer).to(device) + model.log = lambda *args, **kwargs: None + model_clones.append(model) + model = model_clones[-1] + model.init_consumed_samples = 0 + model._compute_consumed_samples_after_training_step = lambda *args, **kwargs: 1 + + from megatron.core.parallel_state import initialize_model_parallel + from nemo.collections.nlp.modules.common.megatron.megatron_init import initialize_model_parallel_for_nemo + + initialize_model_parallel_for_nemo( + world_size=1, global_rank=0, local_rank=0, micro_batch_size=16, global_batch_size=16 + ) + torch.distributed.init_process_group("nccl", world_size=1, rank=0) + initialize_model_parallel() + + if not hasattr(model, "oomptimizer_schema"): + click.secho( + f"We read model of type {type(model)} which doesn't seem to support OOMptimizer " + f"(we could not find the property .oomptimizer_schema).", + fg="red", + ) + sys.exit(1) + + schema = model.oomptimizer_schema(schema) + + click.echo("Setting up the optimizers.") + optimizer = model.configure_optimizers() + if isinstance(optimizer, tuple): + optimizer = optimizer[0][0] + + # warmup - preallocate model/optimizer memory for all modality modules + for sch_ in ("text", "audio"): + gen_ = ProfilingBatchGenerator(model.oomptimizer_schema(sch_), start_batch_size=1) + with torch.autocast("cuda", getattr(torch, dtype)): + if sch_ == "audio": + batch_ = gen_(17519, 13) + else: + batch_ = gen_(9, 7) + optimizer.zero_grad() + out = model.training_step(iter([batch_])) + optimizer.step() + + is_2d_bucketing = all( + isinstance(item, (list, tuple)) and len(item) == 2 and all(isinstance(v, Number) for v in item) + for item in buckets + ) + # Determine modality for input and output. + modalities = [ + ( + "text" + if any( + isinstance(item["type"].elements_type, LabelsType) and item["seq_length"] == direction + for item in schema["inputs"] + if not isinstance(item["type"], str) + ) + else "audio" + ) + for direction in ("input", "output") + ] + + def get_max_seq_lens(buckets): + + def _determine_lens_for_bucket(bin): + if is_2d_bucketing: + input_len, output_len = bin + else: + input_len = bin + output_len = math.ceil(ratio * input_len) + sampling_rate = getattr( + model, "sample_rate", 16000 + ) # TODO: may need to extend schema for broader model coverage + match modalities: + case "audio", "audio": + return ( + compute_num_samples(input_len, sampling_rate=sampling_rate), + compute_num_samples(output_len, sampling_rate=sampling_rate), + ) + case "audio", "text": + return (compute_num_samples(input_len, sampling_rate=sampling_rate), output_len) + case "text", "audio": + return ( + input_len, + compute_num_samples(output_len, sampling_rate=sampling_rate), + ) + case "text", "text": + return input_len, output_len + case _: + raise RuntimeError(f"Unexpected modality combination: {_}") + + return [_determine_lens_for_bucket(bin) for bin in buckets] + + click.echo("Starting profiling.") + max_seq_lens = get_max_seq_lens(buckets) + gen = ProfilingBatchGenerator(schema=schema, start_batch_size=start_batch_size, rel_gap_thresh=threshold) + profile = {} + + # Iterate buckets from the largest to the smallest sequences. This usually ends up creating + # a tiny bit smaller batches, likely due to worse memory fragmentation. + with torch.autocast("cuda", getattr(torch, dtype)): + for bucket, (seq_len_in, seq_len_out) in reversed(list(zip(buckets, max_seq_lens))): + click.echo(f"The current sequence lengths are: input={seq_len_in} output={seq_len_out}.") + gen.reset() + batch_idx = 0 + + def step(): + click.echo( + f"\t[BEGIN step] [CUDA RAM CURRENT: {torch.cuda.memory_allocated() / (1024 * 1024):.1f}MB] [CUDA RAM MAX: {torch.cuda.max_memory_allocated() / (1024*1024):.1f}MB]" + ) + batch = gen(seq_len_in, seq_len_out) + oom = False + try: + click.echo( + f"\tCurrent settings | batch_size={gen._current} | gap: {gen.current_rel_gap}... ", nl=False + ) + optimizer.zero_grad() + # In SpeechLLM training_step performs both forward and backward; no need for manual backward + out = model.training_step(iter([batch])) + optimizer.step() + except torch.cuda.OutOfMemoryError as e: + click.secho(f"OOM!", fg="yellow") + oom = True + except RuntimeError as e: + if "cuFFT error: CUFFT_INTERNAL_ERROR" not in str(e): + raise + click.secho(f"OOM!", fg="yellow") + oom = True + else: + click.secho(f"OK!", fg="green") + finally: + click.echo( + f"\t[END step] [CUDA RAM CURRENT: {torch.cuda.memory_allocated() / (1024 * 1024):.1f}MB] [CUDA RAM MAX: {torch.cuda.max_memory_allocated() / (1024*1024):.1f}MB]" + ) + del batch + # Note: We could call empty_cache() to free up some more memory on the GPU, + # but we have found out empirically that this causes a mismatched condition + # between OOMptimizer and the actual training. During training, there is some + # degree of memory fragmentation and it's better to simulate that in OOMptimizer. + # torch.cuda.memory.empty_cache() + torch.cuda.reset_max_memory_allocated() + return oom + + oom = step() + while not (finished := gen.advance(oom)): + click.echo("\t" + "=" * 80) + oom = step() + + click.secho( + f"=> Optimal setting for bucket={bucket} (input={seq_len_in} output={seq_len_out}) is max_batch_size={gen.max_batch_size}", + fg="green", + ) + profile[(bucket, seq_len_in, seq_len_out)] = gen.max_batch_size + gen.start_batch_size = gen.max_batch_size * 2 + + # Reverse the profile to be ascendingly sorted again. + profile = dict(reversed(list(profile.items()))) + + click.echo("The 1st stage profile is:") + for (bucket, seq_len_in, seq_len_out), bs in profile.items(): + click.echo(f"Bucket={bucket} (input={seq_len_in} output={seq_len_out}) => max_batch_size={bs}") + + if is_2d_bucketing: + # 2D bucketing doesn't support bucket merging. + final_profile = [["[" + ",".join(map(str, b)) + "]", bs] for (b, _, __), bs in profile.items()] + max_input_len, max_output_len = buckets[-1] + ratio = max_output_len / max_input_len + else: + click.echo("Bucket merging stage...") + final_profile = [] + for idx, ((bucket, seq_len_in, seq_len_out), bs) in enumerate(profile.items()): + if idx == 0: + final_profile.append([bucket, bs]) + continue + if bs == final_profile[-1][1]: + click.echo(f"Merging bucket {idx} with bucket {idx-1} due to identical batch sizes.") + final_profile[-1][0] = bucket + continue + final_profile.append([bucket, bs]) + max_input_len = final_profile[-1][0] + + click.secho(f"The profile was created with the following settings:") + click.secho(f"* using {memory_fraction:.1%} of available GPU RAM.") + click.secho(f"* {'' if ddp else 'not '}simulating DDP memory overhead.") + click.secho(f"* using AMP with dtype={dtype}.") + click.secho("The final profile is:", bold=True) + click.secho("\tbucket_duration_bins=[" + ",".join(str(seqlen) for seqlen, bs in final_profile) + "]", bold=True) + click.secho("\tbucket_batch_size=[" + ",".join(str(bs) for seqlen, bs in final_profile) + "]", bold=True) + click.secho("\t(The following flags are suitable for ASR/speech-to-text models):") + click.secho(f"\tmax_tps={ratio}", bold=True) + click.secho(f"\tmax_duration={max_input_len}", bold=True) + + +if __name__ == "__main__": + oomptimizer() diff --git a/scripts/speech_recognition/estimate_duration_bins_2d.py b/scripts/speech_recognition/estimate_duration_bins_2d.py index 52d5b3620a2a..0f4a021e09cc 100644 --- a/scripts/speech_recognition/estimate_duration_bins_2d.py +++ b/scripts/speech_recognition/estimate_duration_bins_2d.py @@ -27,12 +27,11 @@ from nemo.collections.asr.data.audio_to_text_lhotse import TokenizerWrapper from nemo.collections.common.data.lhotse.cutset import read_cutset_from_config -from nemo.collections.common.data.lhotse.dataloader import ( +from nemo.collections.common.data.lhotse.dataloader import LhotseDataLoadingConfig, tokenize +from nemo.collections.common.data.lhotse.sampling import ( DurationFilter, FixedBucketBatchSizeConstraint2D, - LhotseDataLoadingConfig, TokenPerSecondFilter, - tokenize, ) from nemo.collections.common.prompts.formatter import PromptFormatter from nemo.collections.common.tokenizers import AggregateTokenizer, SentencePieceTokenizer diff --git a/scripts/speech_recognition/oomptimizer.py b/scripts/speech_recognition/oomptimizer.py index 8d215cbc14eb..b44c2c46c629 100755 --- a/scripts/speech_recognition/oomptimizer.py +++ b/scripts/speech_recognition/oomptimizer.py @@ -26,7 +26,7 @@ from omegaconf import OmegaConf from nemo.collections.asr.models.asr_model import ASRModel -from nemo.core.neural_types import AudioSignal, LabelsType, LengthsType, NeuralType +from nemo.core.neural_types import AudioSignal, LabelsType, LengthsType, MaskType, NeuralType from nemo.utils import logging @@ -125,7 +125,12 @@ def __call__(self, input_seq_length: int, output_seq_length: int): names = [] for item in self.schema["inputs"]: nt = item["type"] - if not isinstance(nt, NeuralType): # placeholder + if isinstance(nt, str) and nt == "constant": + if isinstance(val := item["value"], str) and val == "batch": + tnsr = torch.tensor([B], dtype=torch.long, device=self.device) + else: + tnsr = torch.tensor([val], dtype=torch.long, device=self.device) + elif not isinstance(nt, NeuralType): # placeholder tnsr = torch.tensor([]) elif isinstance(nt.elements_type, AudioSignal): seq_length = select_seq_length[item["seq_length"]] @@ -136,6 +141,9 @@ def __call__(self, input_seq_length: int, output_seq_length: int): elif isinstance(nt.elements_type, LabelsType): seq_length = select_seq_length[item["seq_length"]] tnsr = torch.randint(0, item["vocab_size"], size=(B, seq_length), device=self.device) + elif isinstance(nt.elements_type, MaskType): + seq_length = select_seq_length[item["seq_length"]] + tnsr = torch.ones(B, seq_length, device=self.device) else: raise RuntimeError("Unexpected item in oomptimizer schema: {item}") batch.append(tnsr) diff --git a/tests/collections/asr/test_asr_multitask_model_bpe.py b/tests/collections/asr/test_asr_multitask_model_bpe.py index c1800e94af42..5ee2d8279cf2 100644 --- a/tests/collections/asr/test_asr_multitask_model_bpe.py +++ b/tests/collections/asr/test_asr_multitask_model_bpe.py @@ -31,7 +31,7 @@ from nemo.collections.asr.parts.utils.rnnt_utils import Hypothesis from nemo.collections.asr.parts.utils.streaming_utils import FrameBatchMultiTaskAED from nemo.collections.common.prompts.canary import CanaryPromptFormatter, canary -from nemo.collections.common.prompts.canary2 import canary2 +from nemo.collections.common.prompts.canary2 import Canary2PromptFormatter, canary2 from nemo.collections.common.tokenizers import CanaryTokenizer @@ -238,7 +238,9 @@ def test_training_step(self, deterministic_rng, asr_model): c.target_lang = "en" c.task = "asr" c.pnc = "no" - dataset = PromptedAudioToTextLhotseDataset(tokenizer=asr_model.tokenizer, prompt_format_fn=canary) + dataset = PromptedAudioToTextLhotseDataset( + tokenizer=asr_model.tokenizer, prompt=CanaryPromptFormatter(asr_model.tokenizer) + ) batch = dataset[cuts] ans = asr_model.training_step(batch, batch_nb=0) @@ -282,7 +284,9 @@ def test_validation_step(self, deterministic_rng, asr_model): c.target_lang = "en" c.task = "asr" c.pnc = "no" - dataset = PromptedAudioToTextLhotseDataset(tokenizer=asr_model.tokenizer, prompt_format_fn=canary) + dataset = PromptedAudioToTextLhotseDataset( + tokenizer=asr_model.tokenizer, prompt=CanaryPromptFormatter(asr_model.tokenizer) + ) batch = dataset[cuts] with torch.no_grad(): @@ -512,7 +516,9 @@ def test_predict_step(self, asr_model, test_data_dir): c.target_lang = "en" c.task = "asr" c.pnc = "no" - dataset = PromptedAudioToTextLhotseDataset(tokenizer=asr_model.tokenizer, prompt_format_fn=canary) + dataset = PromptedAudioToTextLhotseDataset( + tokenizer=asr_model.tokenizer, prompt=CanaryPromptFormatter(asr_model.tokenizer) + ) batch = dataset[cuts] # Numpy array test @@ -544,7 +550,9 @@ def test_FrameBatchMultiTaskAED(self, asr_model, test_data_dir): @pytest.mark.unit def test_prompted_dataset(asr_model): - dataset = PromptedAudioToTextLhotseDataset(tokenizer=asr_model.tokenizer, prompt_format_fn=canary) + dataset = PromptedAudioToTextLhotseDataset( + tokenizer=asr_model.tokenizer, prompt=CanaryPromptFormatter(asr_model.tokenizer) + ) cuts = DummyManifest(CutSet, begin_id=0, end_id=3, with_data=True) @@ -647,7 +655,9 @@ def canary2_tokenizer(asr_model, tmp_path): @pytest.mark.unit def test_prompted_dataset_canary2(canary2_tokenizer): - dataset = PromptedAudioToTextLhotseDataset(tokenizer=canary2_tokenizer, prompt_format_fn=canary2) + dataset = PromptedAudioToTextLhotseDataset( + tokenizer=canary2_tokenizer, prompt=Canary2PromptFormatter(canary2_tokenizer) + ) cuts = DummyManifest(CutSet, begin_id=0, end_id=3, with_data=True) diff --git a/tests/collections/common/prompt_formatters/conftest.py b/tests/collections/common/prompt_formatters/conftest.py index e22d8849fa83..739f51a6ea2f 100644 --- a/tests/collections/common/prompt_formatters/conftest.py +++ b/tests/collections/common/prompt_formatters/conftest.py @@ -56,6 +56,8 @@ def bpe_tokenizer(tmp_path_factory): do_lower_case=False, output_dir=str(tmpdir), remove_extra_whitespaces=True, + bos=True, + eos=True, ) return SentencePieceTokenizer(str(tmpdir / "tokenizer.model")) diff --git a/tests/collections/common/prompt_formatters/test_gemma_prompt_formatter.py b/tests/collections/common/prompt_formatters/test_gemma_prompt_formatter.py index 1a75f8d48147..712604ac32a6 100644 --- a/tests/collections/common/prompt_formatters/test_gemma_prompt_formatter.py +++ b/tests/collections/common/prompt_formatters/test_gemma_prompt_formatter.py @@ -28,7 +28,7 @@ def test_gemma_prompt_formatter_training(bpe_tokenizer): assert bpe_tokenizer.ids_to_text(ans["input_ids"].tolist()) == 'user TEST model TEST' assert bpe_tokenizer.ids_to_text(ans["context_ids"].tolist()) == 'user TEST model' assert bpe_tokenizer.ids_to_text(ans["answer_ids"].tolist()) == 'TEST' - assert ans["mask"].tolist() == [False] * 36 + [True] * 13 + assert ans["mask"].tolist() == [False] * 37 + [True] * 14 # fmt: on @@ -44,3 +44,32 @@ def test_gemma_prompt_formatter_inference(bpe_tokenizer): assert ans["input_ids"].tolist() == ans["context_ids"].tolist() assert bpe_tokenizer.ids_to_text(ans["input_ids"].tolist()) == 'user TEST model' # fmt: on + + +def test_gemma_prompt_formatter_training_bos_eos_inserted_only_once_in_multiturn(bpe_tokenizer): + formatter = GemmaPromptFormatter(bpe_tokenizer) + ans = formatter.encode_dialog( + [ + {"role": "user", "slots": {"message": "TEST"}}, + {"role": "assistant", "slots": {"message": "TEST"}}, + {"role": "user", "slots": {"message": "TEST"}}, + {"role": "assistant", "slots": {"message": "TEST"}}, + {"role": "user", "slots": {"message": "TEST"}}, + {"role": "assistant", "slots": {"message": "TEST"}}, + {"role": "user", "slots": {"message": "TEST"}}, + {"role": "assistant", "slots": {"message": "TEST"}}, + ] + ) + + assert (ans["input_ids"] == bpe_tokenizer.bos).sum() == 1 + assert (ans["input_ids"] == bpe_tokenizer.eos).sum() == 1 + assert ans["input_ids"][0] == bpe_tokenizer.bos + assert ans["input_ids"][-1] == bpe_tokenizer.eos + + assert (ans["context_ids"] == bpe_tokenizer.bos).sum() == 1 + assert (ans["context_ids"] == bpe_tokenizer.eos).sum() == 0 + assert ans["context_ids"][0] == bpe_tokenizer.bos + + assert (ans["answer_ids"] == bpe_tokenizer.bos).sum() == 0 + assert (ans["answer_ids"] == bpe_tokenizer.eos).sum() == 1 + assert ans["answer_ids"][-1] == bpe_tokenizer.eos diff --git a/tests/collections/common/prompt_formatters/test_mistral_prompt_formatter.py b/tests/collections/common/prompt_formatters/test_mistral_prompt_formatter.py index 610800a84690..d907f0713ccd 100644 --- a/tests/collections/common/prompt_formatters/test_mistral_prompt_formatter.py +++ b/tests/collections/common/prompt_formatters/test_mistral_prompt_formatter.py @@ -28,7 +28,7 @@ def test_mistral_prompt_formatter_training(bpe_tokenizer): assert bpe_tokenizer.ids_to_text(ans["input_ids"].tolist()) == ' [INST] TEST [/INST] TEST' assert bpe_tokenizer.ids_to_text(ans["context_ids"].tolist()) == ' [INST] TEST [/INST]' assert bpe_tokenizer.ids_to_text(ans["answer_ids"].tolist()) == 'TEST' - assert ans["mask"].tolist() == [False] * 18 + [True] * 7 + assert ans["mask"].tolist() == [False] * 18 + [True] * 8 # fmt: on diff --git a/tests/collections/common/test_2d_bucketing_constraint.py b/tests/collections/common/test_2d_bucketing_constraint.py index 1bef5cf14ff7..36cb9825ac5b 100644 --- a/tests/collections/common/test_2d_bucketing_constraint.py +++ b/tests/collections/common/test_2d_bucketing_constraint.py @@ -17,7 +17,7 @@ from lhotse import CutSet, Seconds, SupervisionSegment from lhotse.dataset import DynamicBucketingSampler from lhotse.testing.dummies import DummyManifest, dummy_cut -from nemo.collections.common.data.lhotse.dataloader import FixedBucketBatchSizeConstraint2D +from nemo.collections.common.data.lhotse.sampling import FixedBucketBatchSizeConstraint2D @pytest.fixture diff --git a/tests/collections/common/test_lhotse_dataloading.py b/tests/collections/common/test_lhotse_dataloading.py index 605c05008bc9..1038094804b2 100644 --- a/tests/collections/common/test_lhotse_dataloading.py +++ b/tests/collections/common/test_lhotse_dataloading.py @@ -21,17 +21,17 @@ import numpy as np import pytest import torch -from lhotse import CutSet, MonoCut, NumpyFilesWriter, Recording, SupervisionSegment, compute_num_samples +from lhotse import CutSet, MonoCut, NumpyFilesWriter, Recording, compute_num_samples from lhotse.audio import AudioLoadingError from lhotse.cut import Cut, MixedCut, PaddingCut -from lhotse.cut.text import TextPairExample +from lhotse.dataset import RoundRobinSampler, ZipSampler from lhotse.shar import JsonlShardWriter from lhotse.testing.dummies import dummy_recording +from lhotse.testing.random import deterministic_rng from omegaconf import OmegaConf -from nemo.collections.asr.data.audio_to_text_lhotse import TokenizerWrapper from nemo.collections.common.data.lhotse import get_lhotse_dataloader_from_config -from nemo.collections.common.data.lhotse.text_adapters import TextExample +from nemo.collections.common.data.lhotse.text_adapters import SourceTargetTextExample, TextExample from nemo.collections.common.tokenizers.sentencepiece_tokenizer import SentencePieceTokenizer, create_spt_model @@ -1314,6 +1314,14 @@ def txt_es_path(tmp_path_factory): return es_path +@pytest.fixture(scope="session") +def questions_path(tmp_path_factory) -> str: + tmpdir = tmp_path_factory.mktemp("questions") + qp = tmpdir / "questions.txt" + qp.write_text("translate the following to spanish") + return str(qp) + + def test_text_file_input(txt_en_path, txt_es_path): config = OmegaConf.create( { @@ -1349,7 +1357,7 @@ def test_text_file_input(txt_en_path, txt_es_path): assert all(c.language == "en" for c in b) -def test_text_file_pairs_input(txt_en_path, txt_es_path): +def test_text_file_pairs_input(txt_en_path, txt_es_path, questions_path): config = OmegaConf.create( { "input_cfg": [ @@ -1357,8 +1365,10 @@ def test_text_file_pairs_input(txt_en_path, txt_es_path): "type": "txt_pair", "source_paths": txt_en_path, "target_paths": txt_es_path, + "questions_path": questions_path, "source_language": "en", "target_language": "es", + "questions_language": "en", }, ], "shuffle": True, @@ -1377,13 +1387,13 @@ def test_text_file_pairs_input(txt_en_path, txt_es_path): b = batches[0] assert isinstance(b, lhotse.CutSet) - assert all(isinstance(c, TextPairExample) for c in b) + assert all(isinstance(c, SourceTargetTextExample) for c in b) assert all(c.source.language == "en" for c in b) assert all(c.target.language == "es" for c in b) b = batches[1] assert isinstance(b, lhotse.CutSet) - assert all(isinstance(c, TextPairExample) for c in b) + assert all(isinstance(c, SourceTargetTextExample) for c in b) assert all(c.source.language == "en" for c in b) assert all(c.target.language == "es" for c in b) @@ -1403,7 +1413,7 @@ def txt_pair_paths_shards(tmp_path_factory, txt_en_path, txt_es_path): return f"{tmp_path}/en__OP_0..1_CL_.txt", f"{tmp_path}/es__OP_0..1_CL_.txt" -def test_text_file_pairs_shards_input(txt_pair_paths_shards: tuple[str, str]): +def test_text_file_pairs_shards_input(txt_pair_paths_shards: tuple[str, str], questions_path): en_paths, es_paths = txt_pair_paths_shards config = OmegaConf.create( @@ -1413,8 +1423,10 @@ def test_text_file_pairs_shards_input(txt_pair_paths_shards: tuple[str, str]): "type": "txt_pair", "source_paths": en_paths, "target_paths": es_paths, + "questions_path": questions_path, "source_language": "en", "target_language": "es", + "questions_language": "en", }, ], "shuffle": True, @@ -1433,33 +1445,35 @@ def test_text_file_pairs_shards_input(txt_pair_paths_shards: tuple[str, str]): b = batches[0] assert isinstance(b, lhotse.CutSet) - assert all(isinstance(c, TextPairExample) for c in b) + assert all(isinstance(c, SourceTargetTextExample) for c in b) assert all(c.source.language == "en" for c in b) assert all(c.target.language == "es" for c in b) b = batches[1] assert isinstance(b, lhotse.CutSet) - assert all(isinstance(c, TextPairExample) for c in b) + assert all(isinstance(c, SourceTargetTextExample) for c in b) assert all(c.source.language == "en" for c in b) assert all(c.target.language == "es" for c in b) @pytest.fixture(scope="session") -def en_es_tokenizer(tmp_path_factory, txt_en_path, txt_es_path) -> TokenizerWrapper: +def en_es_tokenizer(tmp_path_factory, txt_en_path, txt_es_path) -> SentencePieceTokenizer: tmpdir = tmp_path_factory.mktemp("en_es_tokenizer") text_path = tmpdir / "text.txt" text_path.write_text(txt_en_path.read_text() + "\n" + txt_es_path.read_text()) create_spt_model(text_path, vocab_size=128, sample_size=-1, do_lower_case=False, output_dir=str(tmpdir)) - return TokenizerWrapper(SentencePieceTokenizer(str(tmpdir / "tokenizer.model"))) + return SentencePieceTokenizer(str(tmpdir / "tokenizer.model")) def test_multimodal_text_audio_dataloading( txt_pair_paths_shards: tuple[str, str], nemo_tarred_manifest_path_multi: tuple[str, str], - en_es_tokenizer: TokenizerWrapper, + en_es_tokenizer: SentencePieceTokenizer, + questions_path: str, ): en_paths, es_paths = txt_pair_paths_shards manifest_filepath, tarred_audio_filepaths = nemo_tarred_manifest_path_multi + QF, BT = 50, 1024 config = OmegaConf.create( { "input_cfg": [ @@ -1469,6 +1483,8 @@ def test_multimodal_text_audio_dataloading( "target_paths": es_paths, "source_language": "en", "target_language": "es", + "questions_path": questions_path, + "questions_language": "en", "tags": { "modality": "text", }, @@ -1485,7 +1501,8 @@ def test_multimodal_text_audio_dataloading( "shuffle": True, "num_workers": 0, "use_multimodal_sampling": True, - "batch_tokens": 1024, + "prompt_format": "plain", + "batch_tokens": BT, # How to set token equivalent duration in actual training? # assuming fbank frames: 0.01 is the base due to frame shift; # + subsampling x8 gives us 0.08 @@ -1493,9 +1510,112 @@ def test_multimodal_text_audio_dataloading( # we'd get 0.02 # in this test we'll just use 0.1 for simplicity "token_equivalent_duration": 0.1, - "quadratic_factor": 50, + "quadratic_factor": QF, + "seed": 0, + "shard_seed": 0, + } + ) + + dl = get_lhotse_dataloader_from_config( + config=config, + global_rank=0, + world_size=1, + dataset=Identity(), + tokenizer=en_es_tokenizer, + ) + + b = next(iter(dl)) + assert isinstance(b, lhotse.CutSet) + assert len(b) + assert any(isinstance(ex, Cut) for ex in b) + assert any(isinstance(ex, SourceTargetTextExample) for ex in b) + # Batch tokens is not exceeded after applying the quadratic factor correction + assert sum(ex.num_tokens**2 / QF for ex in b) <= BT + for ex in b: + if isinstance(ex, Cut): + assert ex.modality == "audio" + assert isinstance(ex.load_audio(), np.ndarray) + assert isinstance(ex.supervisions[0].text, str) + if isinstance(ex, SourceTargetTextExample): + assert ex.modality == "text" + assert ex.source.language == "en" + assert ex.target.language == "es" + assert isinstance(ex.source.text, str) + assert isinstance(ex.target.text, str) + assert isinstance(ex.question.text, str) + assert torch.is_tensor(ex.input_ids) + assert torch.is_tensor(ex.context_ids) + assert torch.is_tensor(ex.answer_ids) + assert torch.is_tensor(ex.mask) + + +def test_multimodal_text_audio_dataloading_zip_strategy( + txt_pair_paths_shards: tuple[str, str], + nemo_tarred_manifest_path_multi: tuple[str, str], + en_es_tokenizer: SentencePieceTokenizer, + questions_path: str, +): + en_paths, es_paths = txt_pair_paths_shards + manifest_filepath, tarred_audio_filepaths = nemo_tarred_manifest_path_multi + QF, BT = 50, 64 + config = OmegaConf.create( + { + "multi_config": True, + "sampler_fusion": "zip", # <---- !!! this option is being tested here !!! "seed": 0, "shard_seed": 0, + "shuffle": True, + "num_workers": 0, + "audio": { + "input_cfg": [ + { + "type": "nemo_tarred", + "manifest_filepath": manifest_filepath, + "tarred_audio_filepaths": tarred_audio_filepaths, + "tags": { + "modality": "audio", + }, + }, + ], + "prompt_format": "plain", + "use_multimodal_sampling": True, + "batch_tokens": BT, + # How to set token equivalent duration in actual training? + # assuming fbank frames: 0.01 is the base due to frame shift; + # + subsampling x8 gives us 0.08 + # assuming discrete audio tokens, with frame rate 50Hz, + # we'd get 0.02 + # in this test we'll just use 0.1 for simplicity + "token_equivalent_duration": 0.1, + "quadratic_factor": QF, + }, + "text": { + "input_cfg": [ + { + "type": "txt_pair", + "source_paths": en_paths, + "target_paths": es_paths, + "source_language": "en", + "target_language": "es", + "questions_path": questions_path, + "questions_language": "en", + "tags": { + "modality": "text", + }, + }, + ], + "use_multimodal_sampling": True, + "prompt_format": "plain", + "batch_tokens": 64, + # How to set token equivalent duration in actual training? + # assuming fbank frames: 0.01 is the base due to frame shift; + # + subsampling x8 gives us 0.08 + # assuming discrete audio tokens, with frame rate 50Hz, + # we'd get 0.02 + # in this test we'll just use 0.1 for simplicity + "token_equivalent_duration": 0.1, + "quadratic_factor": 50, + }, } ) @@ -1507,52 +1627,312 @@ def test_multimodal_text_audio_dataloading( tokenizer=en_es_tokenizer, ) + assert isinstance(dl.dataset.sampler, ZipSampler) + # Note: we use islice here because the dataloader will be infinite. batches = [batch for batch in islice(dl, 2)] b = batches[0] assert isinstance(b, lhotse.CutSet) - assert len(b) == 48 - assert sum(ex.num_tokens for ex in b) == pytest.approx(574.0) - assert min(ex.num_tokens for ex in b) == pytest.approx(10) - assert max(ex.num_tokens for ex in b) == pytest.approx(16) - assert sum(isinstance(ex, Cut) for ex in b) == 29 - assert sum(isinstance(ex, TextPairExample) for ex in b) == 19 + assert len(b) + assert any(isinstance(ex, Cut) for ex in b) + assert any(isinstance(ex, SourceTargetTextExample) for ex in b) + # Batch tokens is not exceeded after applying the quadratic factor correction + # Note: zip samples stitches together two batches hence * 2 + assert sum(ex.num_tokens**2 / QF for ex in b) <= BT * 2 for ex in b: if isinstance(ex, Cut): assert ex.modality == "audio" assert isinstance(ex.load_audio(), np.ndarray) assert isinstance(ex.supervisions[0].text, str) - if isinstance(ex, TextPairExample): + if isinstance(ex, SourceTargetTextExample): assert ex.modality == "text" assert ex.source.language == "en" assert ex.target.language == "es" - assert isinstance(ex.source.text, str) - assert isinstance(ex.target.text, str) - assert isinstance(ex.source.tokens, np.ndarray) - assert isinstance(ex.target.tokens, np.ndarray) + assert torch.is_tensor(ex.input_ids) + assert torch.is_tensor(ex.context_ids) + assert torch.is_tensor(ex.answer_ids) + assert torch.is_tensor(ex.mask) b = batches[1] assert isinstance(b, lhotse.CutSet) - assert len(b) == 48 - assert sum(ex.num_tokens for ex in b) == pytest.approx(614.0) - assert min(ex.num_tokens for ex in b) == pytest.approx(10) - assert max(ex.num_tokens for ex in b) == pytest.approx(16) - assert sum(isinstance(ex, Cut) for ex in b) == 21 - assert sum(isinstance(ex, TextPairExample) for ex in b) == 27 + assert len(b) + assert any(isinstance(ex, Cut) for ex in b) + assert any(isinstance(ex, SourceTargetTextExample) for ex in b) + # Batch tokens is not exceeded after applying the quadratic factor correction + # Note: zip samples stitches together two batches hence * 2 + assert sum(ex.num_tokens**2 / QF for ex in b) <= BT * 2 for ex in b: if isinstance(ex, Cut): assert ex.modality == "audio" assert isinstance(ex.load_audio(), np.ndarray) assert isinstance(ex.supervisions[0].text, str) - if isinstance(ex, TextPairExample): + if isinstance(ex, SourceTargetTextExample): assert ex.modality == "text" assert ex.source.language == "en" assert ex.target.language == "es" - assert isinstance(ex.source.text, str) - assert isinstance(ex.target.text, str) - assert isinstance(ex.source.tokens, np.ndarray) - assert isinstance(ex.target.tokens, np.ndarray) + assert torch.is_tensor(ex.input_ids) + assert torch.is_tensor(ex.context_ids) + assert torch.is_tensor(ex.answer_ids) + assert torch.is_tensor(ex.mask) + + +def test_multimodal_text_audio_dataloading_round_robin_strategy( + txt_pair_paths_shards: tuple[str, str], + nemo_tarred_manifest_path_multi: tuple[str, str], + en_es_tokenizer: SentencePieceTokenizer, + questions_path: str, +): + en_paths, es_paths = txt_pair_paths_shards + manifest_filepath, tarred_audio_filepaths = nemo_tarred_manifest_path_multi + QF, BT = 50, 64 + config = OmegaConf.create( + { + "multi_config": True, + "sampler_fusion": "round_robin", # <---- !!! this option is being tested here !!! + "seed": 0, + "shard_seed": 0, + "shuffle": True, + "num_workers": 0, + "audio": { + "input_cfg": [ + { + "type": "nemo_tarred", + "manifest_filepath": manifest_filepath, + "tarred_audio_filepaths": tarred_audio_filepaths, + "tags": { + "modality": "audio", + }, + }, + ], + "use_multimodal_sampling": True, + "prompt_format": "plain", + "batch_tokens": BT, + # How to set token equivalent duration in actual training? + # assuming fbank frames: 0.01 is the base due to frame shift; + # + subsampling x8 gives us 0.08 + # assuming discrete audio tokens, with frame rate 50Hz, + # we'd get 0.02 + # in this test we'll just use 0.1 for simplicity + "token_equivalent_duration": 0.1, + "quadratic_factor": QF, + }, + "text": { + "input_cfg": [ + { + "type": "txt_pair", + "source_paths": en_paths, + "target_paths": es_paths, + "source_language": "en", + "target_language": "es", + "questions_path": questions_path, + "questions_language": "en", + "tags": { + "modality": "text", + }, + }, + ], + "prompt_format": "plain", + "use_multimodal_sampling": True, + "batch_tokens": BT, + # How to set token equivalent duration in actual training? + # assuming fbank frames: 0.01 is the base due to frame shift; + # + subsampling x8 gives us 0.08 + # assuming discrete audio tokens, with frame rate 50Hz, + # we'd get 0.02 + # in this test we'll just use 0.1 for simplicity + "token_equivalent_duration": 0.1, + "quadratic_factor": QF, + }, + } + ) + + dl = get_lhotse_dataloader_from_config( + config=config, + global_rank=0, + world_size=1, + dataset=Identity(), + tokenizer=en_es_tokenizer, + ) + + assert isinstance(dl.dataset.sampler, RoundRobinSampler) + + # Note: we use islice here because the dataloader will be infinite. + batches = [batch for batch in islice(dl, 2)] + + # Batch 0 is audio-only + b = batches[0] + assert isinstance(b, lhotse.CutSet) + assert len(b) + assert all(isinstance(ex, Cut) for ex in b) + # Batch tokens is not exceeded after applying the quadratic factor correction + assert sum(ex.num_tokens**2 / QF for ex in b) <= BT + for ex in b: + assert ex.modality == "audio" + assert isinstance(ex.load_audio(), np.ndarray) + assert isinstance(ex.supervisions[0].text, str) + + # Batch 1 is text-only + b = batches[1] + assert isinstance(b, lhotse.CutSet) + assert len(b) + assert all(isinstance(ex, SourceTargetTextExample) for ex in b) + # Batch tokens is not exceeded after applying the quadratic factor correction + assert sum(ex.num_tokens**2 / QF for ex in b) <= BT + for ex in b: + assert ex.modality == "text" + assert ex.source.language == "en" + assert ex.target.language == "es" + assert torch.is_tensor(ex.input_ids) + assert torch.is_tensor(ex.context_ids) + assert torch.is_tensor(ex.answer_ids) + assert torch.is_tensor(ex.mask) + + +def test_multimodal_text_audio_dataloading_randomized_round_robin_strategy( + deterministic_rng, + txt_pair_paths_shards: tuple[str, str], + nemo_tarred_manifest_path_multi: tuple[str, str], + en_es_tokenizer: SentencePieceTokenizer, + questions_path: str, +): + en_paths, es_paths = txt_pair_paths_shards + manifest_filepath, tarred_audio_filepaths = nemo_tarred_manifest_path_multi + QF, BT = 50, 64 + config = OmegaConf.create( + { + "multi_config": True, + "sampler_fusion": "randomized_round_robin", # <---- !!! this option is being tested here !!! + "sampler_weights": { + "audio": 0.5, + "text": 0.5, + }, + "seed": 0, + "shard_seed": 0, + "shuffle": True, + "num_workers": 0, + "audio": { + "input_cfg": [ + { + "type": "nemo_tarred", + "manifest_filepath": manifest_filepath, + "tarred_audio_filepaths": tarred_audio_filepaths, + "tags": { + "modality": "audio", + }, + }, + ], + "use_multimodal_sampling": True, + "prompt_format": "plain", + "batch_tokens": BT, + # How to set token equivalent duration in actual training? + # assuming fbank frames: 0.01 is the base due to frame shift; + # + subsampling x8 gives us 0.08 + # assuming discrete audio tokens, with frame rate 50Hz, + # we'd get 0.02 + # in this test we'll just use 0.1 for simplicity + "token_equivalent_duration": 0.1, + "quadratic_factor": QF, + }, + "text": { + "input_cfg": [ + { + "type": "txt_pair", + "source_paths": en_paths, + "target_paths": es_paths, + "source_language": "en", + "target_language": "es", + "questions_path": questions_path, + "questions_language": "en", + "tags": { + "modality": "text", + }, + }, + ], + "prompt_format": "plain", + "use_multimodal_sampling": True, + "batch_tokens": BT, + # How to set token equivalent duration in actual training? + # assuming fbank frames: 0.01 is the base due to frame shift; + # + subsampling x8 gives us 0.08 + # assuming discrete audio tokens, with frame rate 50Hz, + # we'd get 0.02 + # in this test we'll just use 0.1 for simplicity + "token_equivalent_duration": 0.1, + "quadratic_factor": QF, + }, + } + ) + + dl = get_lhotse_dataloader_from_config( + config=config, + global_rank=0, + world_size=1, + dataset=Identity(), + tokenizer=en_es_tokenizer, + ) + + assert isinstance(dl.dataset.sampler, RoundRobinSampler) + + # Note: we use islice here because the dataloader will be infinite. + batches = [batch for batch in islice(dl, 2)] + + # Batch 0 is audio-only + b = batches[0] + assert isinstance(b, lhotse.CutSet) + assert len(b) + assert all(isinstance(ex, Cut) for ex in b) + # Batch tokens is not exceeded after applying the quadratic factor correction + assert sum(ex.num_tokens**2 / QF for ex in b) <= BT + for ex in b: + assert ex.modality == "audio" + assert isinstance(ex.load_audio(), np.ndarray) + assert isinstance(ex.supervisions[0].text, str) + + # Batch 1 is text-only + b = batches[1] + assert isinstance(b, lhotse.CutSet) + assert len(b) + assert all(isinstance(ex, SourceTargetTextExample) for ex in b) + # Batch tokens is not exceeded after applying the quadratic factor correction + assert sum(ex.num_tokens**2 / QF for ex in b) <= BT + for ex in b: + assert ex.modality == "text" + assert ex.source.language == "en" + assert ex.target.language == "es" + assert torch.is_tensor(ex.input_ids) + assert torch.is_tensor(ex.context_ids) + assert torch.is_tensor(ex.answer_ids) + assert torch.is_tensor(ex.mask) + + +def test_dataloader_with_noise_nemo_json(cutset_path: Path, nemo_manifest_path: Path): + config = OmegaConf.create( + { + "cuts_path": str(cutset_path), + "noise_path": str(nemo_manifest_path), + "noise_mix_prob": 1.0, + "noise_snr": [-5.0, 5.0], + "batch_size": 2, + "seed": 0, + "shard_seed": 0, + } + ) + dl = get_lhotse_dataloader_from_config( + config=config, + global_rank=0, + world_size=1, + dataset=Identity(), + ) + batch = next(iter(dl)) + assert isinstance(batch, CutSet) + assert len(batch) == 2 + cut = batch[0] + assert isinstance(cut, MixedCut) + assert -5.0 < cut.tracks[1].snr < 5.0 + cut = batch[1] + assert isinstance(cut, MixedCut) + assert -5.0 < cut.tracks[1].snr < 5.0 def test_dataloader_with_noise_nemo_json(cutset_path: Path, nemo_manifest_path: Path): @@ -2012,6 +2392,46 @@ def test_dataloader_from_tarred_nemo_manifest_with_offset(nemo_tarred_manifest_p ) +def test_force_iterable_dataset(cutset_path: Path): + config = OmegaConf.create({"cuts_path": cutset_path, "batch_size": 2, "num_workers": 2}) + dl = get_lhotse_dataloader_from_config(config=config, global_rank=0, world_size=1, dataset=Identity()) + batches_map = [b for b in dl] + + config = OmegaConf.create( + {"cuts_path": cutset_path, "batch_size": 2, "num_workers": 2, "force_iterable_dataset": True} + ) + dl = get_lhotse_dataloader_from_config(config=config, global_rank=0, world_size=1, dataset=Identity()) + batches_iter = [b for b in dl] + + # 2x duplicated data due to iterable dataset lack of deduplication + assert len(batches_iter) == 2 * len(batches_map) + # assertion that this is in fact the same data (same ids) + assert set(c.id for b in batches_iter for c in b) == set(c.id for b in batches_map for c in b) + + +def test_force_map_dataset(cutset_shar_path: Path): + config = OmegaConf.create({"shar_path": cutset_shar_path, "batch_size": 2, "num_workers": 2, "force_finite": True}) + dl = get_lhotse_dataloader_from_config(config=config, global_rank=0, world_size=1, dataset=Identity()) + batches_iter = [b for b in dl] + + config = OmegaConf.create( + { + "shar_path": cutset_shar_path, + "batch_size": 2, + "num_workers": 2, + "force_map_dataset": True, + "force_finite": True, + } + ) + dl = get_lhotse_dataloader_from_config(config=config, global_rank=0, world_size=1, dataset=Identity()) + batches_map = [b for b in dl] + + # 2x duplicated data due to iterable dataset lack of deduplication + assert len(batches_iter) == 2 * len(batches_map) + # assertion that this is in fact the same data (same ids) + assert set(c.id for b in batches_iter for c in b) == set(c.id for b in batches_map for c in b) + + def test_dataloader_from_tarred_nemo_subset_manifest(nemo_tarred_manifest_subset_path: tuple[str, str]): json_mft, tar_mft, subset_items = nemo_tarred_manifest_subset_path config = OmegaConf.create( diff --git a/tests/collections/common/test_lhotse_multimodal_dataloading.py b/tests/collections/common/test_lhotse_multimodal_dataloading.py new file mode 100644 index 000000000000..9d2a8e572f8e --- /dev/null +++ b/tests/collections/common/test_lhotse_multimodal_dataloading.py @@ -0,0 +1,442 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import lhotse +import numpy as np +import pytest +import torch +from lhotse.testing.dummies import dummy_recording +from omegaconf import OmegaConf + +from nemo.collections.common.data.lhotse import get_lhotse_dataloader_from_config +from nemo.collections.common.data.lhotse.sampling import ( + MultimodalFixedBucketBatchSizeConstraint2D, + MultimodalSamplingConstraint, +) +from nemo.collections.common.data.lhotse.text_adapters import ( + AudioTurn, + NeMoMultimodalConversation, + NeMoMultimodalConversationJsonlAdapter, + NeMoMultimodalConversationTarWriter, + TextTurn, +) +from nemo.collections.common.prompts import Llama2PromptFormatter +from nemo.collections.common.tokenizers.sentencepiece_tokenizer import SentencePieceTokenizer, create_spt_model + + +class Identity(torch.utils.data.Dataset): + def __getitem__(self, cuts: lhotse.CutSet) -> lhotse.CutSet: + return cuts + + +@pytest.fixture(scope="session") +def multimodal_conversations_path(tmp_path_factory): + tmp_path = tmp_path_factory.mktemp("text_data") + en_path = tmp_path / "manifest.json" + data = [ + { + "id": "convo_1", + "conversations": [ + { + "value": "Can you help summarize the following?", + "from": "User", + "type": "text", + }, + { + "value": "123.wav", + "from": "User", + "type": "audio", + "duration": 5.73, + }, + { + "value": "I'm glad to assist you with your request. Here's a summary:", + "from": "Assistant", + "type": "text", + }, + { + "value": "123_answer.wav", + "from": "Assistant", + "type": "audio", + "duration": 7.11, + }, + { + "value": "Can you further shorten it?", + "from": "User", + "type": "text", + }, + { + "value": "Of course!", + "from": "Assistant", + "type": "text", + }, + ], + } + ] + lhotse.serialization.save_to_jsonl(data, en_path) + dummy_recording(0, 5.73, with_data=True).to_cut().save_audio(tmp_path / "123.wav") + dummy_recording(0, 7.11, with_data=True).to_cut().save_audio(tmp_path / "123_answer.wav") + return en_path + + +def test_multimodal_conversation_input(multimodal_conversations_path): + + config = OmegaConf.create( + { + "input_cfg": [ + { + "type": "multimodal_conversation", + "manifest_filepath": multimodal_conversations_path, + "audio_locator_tag": "[audio]", + }, + ], + "force_finite": True, + "shuffle": True, + "num_workers": 0, + "batch_size": 1, + "seed": 0, + "shard_seed": 0, + } + ) + + # Note: this test does not need to pass a tokenizer because we use static batch sizes + dl = get_lhotse_dataloader_from_config(config=config, global_rank=0, world_size=1, dataset=Identity()) + batches = [batch for batch in dl] + assert len(batches) == 1 + + b = batches[0] + assert isinstance(b, lhotse.CutSet) + assert len(b) == 1 + ex = b[0] + assert isinstance(ex, NeMoMultimodalConversation) + assert ex.id == "convo_1" + assert len(ex.turns) == 6 + t = ex.turns[0] + assert isinstance(t, TextTurn) + assert t.role == "user" + assert t.value == "Can you help summarize the following?" + t = ex.turns[1] + assert isinstance(t, AudioTurn) + assert t.role == "user" + assert t.audio_locator_tag == "[audio]" + assert t.cut.duration == 5.73 + assert t.cut.load_audio().shape == (1, 91680) + t = ex.turns[2] + assert isinstance(t, TextTurn) + assert t.role == "assistant" + assert t.value == "I'm glad to assist you with your request. Here's a summary:" + t = ex.turns[3] + assert isinstance(t, AudioTurn) + assert t.role == "assistant" + assert t.audio_locator_tag == "[audio]" + assert t.cut.duration == 7.11 + assert t.cut.load_audio().shape == (1, 113760) + t = ex.turns[4] + assert isinstance(t, TextTurn) + assert t.role == "user" + assert t.value == "Can you further shorten it?" + t = ex.turns[5] + assert isinstance(t, TextTurn) + assert t.role == "assistant" + assert t.value == "Of course!" + + +@pytest.fixture +def tokenizer(tmp_path_factory, multimodal_conversations_path): + tmpdir = tmp_path_factory.mktemp("multi_convo_tokenizer") + text_path = tmpdir / "text.txt" + text_path.write_text( + "\n".join( + turn["value"] + for item in lhotse.serialization.load_jsonl(multimodal_conversations_path) + for turn in item["conversations"] + ) + ) + create_spt_model( + text_path, + vocab_size=128, + sample_size=-1, + do_lower_case=False, + output_dir=str(tmpdir), + bos=True, + eos=True, + user_defined_symbols=["[INST]", "[/INST]", "<>", "<>", "[audio]"], + remove_extra_whitespaces=True, + ) + return SentencePieceTokenizer(str(tmpdir / "tokenizer.model")) + + +def test_multimodal_conversation_input_with_prompt(multimodal_conversations_path, tokenizer): + + config = OmegaConf.create( + { + "input_cfg": [ + { + "type": "multimodal_conversation", + "manifest_filepath": multimodal_conversations_path, + "audio_locator_tag": "[audio]", + }, + ], + "prompt_format": "llama2", + "force_finite": True, + "shuffle": True, + "num_workers": 0, + "batch_size": 1, + "seed": 0, + "shard_seed": 0, + } + ) + + dl = get_lhotse_dataloader_from_config( + config=config, global_rank=0, world_size=1, dataset=Identity(), tokenizer=tokenizer + ) + batches = [batch for batch in dl] + assert len(batches) == 1 + + b = batches[0] + assert isinstance(b, lhotse.CutSet) + assert len(b) == 1 + ex = b[0] + assert isinstance(ex, NeMoMultimodalConversation) + + assert torch.is_tensor(ex.input_ids) + assert ex.input_ids.shape == (105,) + assert ( + tokenizer.ids_to_text(ex.input_ids) + == "[INST] Can you help summarize the following? [audio] [/INST] I'm glad to assist you with your request. Here's a summary: [audio] [INST] Can you further shorten it? [/INST] Of course!" + ) + + assert torch.is_tensor(ex.context_ids) + assert ex.context_ids.shape == (95,) + assert ( + tokenizer.ids_to_text(ex.context_ids) + == "[INST] Can you help summarize the following? [audio] [/INST] I'm glad to assist you with your request. Here's a summary: [audio] [INST] Can you further shorten it? [/INST]" + ) + + assert torch.is_tensor(ex.answer_ids) + assert ex.answer_ids.shape == (10,) + assert tokenizer.ids_to_text(ex.answer_ids) == "Of course!" + + assert torch.is_tensor(ex.mask) + assert ex.mask.shape == (105,) + assert (ex.mask[:30] == False).all() # user turn + assert (ex.mask[30:72] == True).all() # assistant turn + assert (ex.mask[72:95] == False).all() # user turn + assert (ex.mask[95:] == True).all() # assistant turn + + +def test_text_only_conversation_length_measurement(tokenizer): + convo = NeMoMultimodalConversation( + id="textonly-1", + turns=[ + TextTurn("hello", "user"), + TextTurn("hi", "assistant"), + ], + ) + convo = convo.apply_prompt_format(Llama2PromptFormatter(tokenizer)) + assert tokenizer.ids_to_text(convo.input_ids) == "[INST] hello [/INST] hi" + assert tokenizer.ids_to_text(convo.context_ids) == "[INST] hello [/INST]" + assert tokenizer.ids_to_text(convo.answer_ids) == "hi" + + assert convo.input_length == len(convo.context_ids) == 10 + assert convo.output_length == len(convo.answer_ids) == 4 + assert convo.total_length == len(convo.input_ids) == 14 + + constr = MultimodalSamplingConstraint(measure_total_length=False) + assert constr.measure_length(convo) == 10 + + constr = MultimodalSamplingConstraint(measure_total_length=True) + assert constr.measure_length(convo) == 14 + + constr = MultimodalFixedBucketBatchSizeConstraint2D( + max_seq_len_buckets=[5, 10, 15], batch_sizes=[3, 2, 1], measure_total_length=True + ) + assert constr.measure_length(convo) == 14 + assert constr.select_bucket(constr.max_seq_len_buckets, convo) == 2 + + constr = MultimodalFixedBucketBatchSizeConstraint2D( + max_seq_len_buckets=[(5, 2), (5, 5), (15, 3), (15, 6), (15, 10)], + batch_sizes=[5, 4, 3, 2, 1], + measure_total_length=False, + ) + assert constr.measure_length(convo) == (10, 4) + assert constr.select_bucket(constr.max_seq_len_buckets, convo) == 3 + + +def test_audio_only_conversation_length_measurement(tokenizer, tmp_path_factory): + audio_dir = tmp_path_factory.mktemp("audio") + c1 = dummy_recording(0, duration=7.16, with_data=True).to_cut().save_audio(audio_dir / "1.wav") + c2 = dummy_recording(1, duration=15.96, with_data=True).to_cut().save_audio(audio_dir / "2.wav") + convo = NeMoMultimodalConversation( + id="audioonly-1", + turns=[ + AudioTurn(c1, "user", "[audio]"), + AudioTurn(c2, "assistant", "[audio]"), + ], + token_equivalent_duration=0.1, # 10ms frame_shift * 10x subsampling for easy testing + ) + convo = convo.apply_prompt_format(Llama2PromptFormatter(tokenizer)) + assert tokenizer.ids_to_text(convo.input_ids) == "[INST] [audio] [/INST] [audio]" + assert tokenizer.ids_to_text(convo.context_ids) == "[INST] [audio] [/INST]" + assert tokenizer.ids_to_text(convo.answer_ids) == "[audio]" + + # NOTE: Unlike text-only, len(context_ids) != convo.input_length! The same is true for answer and input ids. + # 7.16s with 100ms frame is 72 tokens, we have 7 context tokens, but replace 1 audio locator tag. + assert len(convo.context_ids) == 7 + assert convo.input_length == 78 + + # 15.96s with 100ms frame is 160 tokens, we have 3 answer tokens, but replace 1 audio locator tag. + assert len(convo.answer_ids) == 3 + assert convo.output_length == 162 + + assert len(convo.input_ids) == 10 + assert convo.total_length == 162 + 78 + + constr = MultimodalSamplingConstraint(measure_total_length=False) + assert constr.measure_length(convo) == 78 + + constr = MultimodalSamplingConstraint(measure_total_length=True) + assert constr.measure_length(convo) == 162 + 78 + + constr = MultimodalFixedBucketBatchSizeConstraint2D( + max_seq_len_buckets=[100, 200, 300, 400], batch_sizes=[3, 2, 1, 1], measure_total_length=True + ) + assert constr.measure_length(convo) == 162 + 78 + assert constr.select_bucket(constr.max_seq_len_buckets, convo) == 2 + + constr = MultimodalFixedBucketBatchSizeConstraint2D( + max_seq_len_buckets=[ + (50, 50), + (50, 100), + (50, 200), + (100, 50), + (100, 150), + (100, 200), + (100, 300), + (400, 400), + ], + batch_sizes=[8, 7, 6, 5, 4, 3, 2, 1], + measure_total_length=False, + ) + assert constr.measure_length(convo) == (78, 162) + assert constr.select_bucket(constr.max_seq_len_buckets, convo) == 5 + + +def test_multimodal_conversation_length_measurement(tokenizer, tmp_path_factory): + audio_dir = tmp_path_factory.mktemp("audio") + c1 = dummy_recording(0, duration=7.16, with_data=True).to_cut().save_audio(audio_dir / "1.wav") + c2 = dummy_recording(1, duration=15.96, with_data=True).to_cut().save_audio(audio_dir / "2.wav") + convo = NeMoMultimodalConversation( + id="multimodal-1", + turns=[ + TextTurn("listen to this and tell me your opinion", "user"), + AudioTurn(c1, "user", "[audio]"), + TextTurn("its fine", "assistant"), + TextTurn("remove the noise", "user"), + TextTurn("sure", "assistant"), + AudioTurn(c2, "assistant", "[audio]"), + ], + token_equivalent_duration=0.1, # 10ms frame_shift * 10x subsampling for easy testing + ) + convo = convo.apply_prompt_format(Llama2PromptFormatter(tokenizer)) + print(convo) + assert ( + tokenizer.ids_to_text(convo.input_ids) + == "[INST] listen to this and tell me your opinion [audio] [/INST] its fine [INST] remove the noise [/INST] sure [audio]" + ) + assert ( + tokenizer.ids_to_text(convo.context_ids) + == "[INST] listen to this and tell me your opinion [audio] [/INST] its fine [INST] remove the noise [/INST]" + ) + assert tokenizer.ids_to_text(convo.answer_ids) == "sure [audio]" + + assert len(convo.context_ids) == 66 + assert convo.input_length == 66 + 72 - 1 == 137 + + # 15.96s with 100ms frame is 160 tokens, we have 3 answer tokens, but replace 1 audio locator tag. + assert len(convo.answer_ids) == 7 + assert convo.output_length == 7 + 160 - 1 == 166 + + assert len(convo.input_ids) == 73 + assert convo.total_length == 137 + 166 == 303 + + constr = MultimodalSamplingConstraint(measure_total_length=False) + assert constr.measure_length(convo) == 137 + + constr = MultimodalSamplingConstraint(measure_total_length=True) + assert constr.measure_length(convo) == 303 + + constr = MultimodalFixedBucketBatchSizeConstraint2D( + max_seq_len_buckets=[100, 200, 300, 400], batch_sizes=[3, 2, 1, 1], measure_total_length=True + ) + assert constr.measure_length(convo) == 303 + assert constr.select_bucket(constr.max_seq_len_buckets, convo) == 3 + + constr = MultimodalFixedBucketBatchSizeConstraint2D( + max_seq_len_buckets=[ + (50, 50), + (50, 100), + (50, 200), + (100, 50), + (100, 150), + (100, 200), + (100, 300), + (400, 400), + ], + batch_sizes=[8, 7, 6, 5, 4, 3, 2, 1], + measure_total_length=False, + ) + assert constr.measure_length(convo) == (137, 166) + assert constr.select_bucket(constr.max_seq_len_buckets, convo) == 7 + + +def test_multimodal_conversation_tarred_format(multimodal_conversations_path, tmp_path_factory): + (conversation,) = list(NeMoMultimodalConversationJsonlAdapter(multimodal_conversations_path, "[audio]")) + tar_dir = tmp_path_factory.mktemp("multi_convo_tarred") + with NeMoMultimodalConversationTarWriter(tar_dir) as writer: + writer.write(conversation) + + (restored_conversation,) = list( + NeMoMultimodalConversationJsonlAdapter( + manifest_filepath=tar_dir / "manifest_0.jsonl", + audio_locator_tag="[audio]", + tarred_audio_filepaths=tar_dir / "audio_0.tar", + ) + ) + assert conversation.id == restored_conversation.id + assert len(conversation.turns) == len(restored_conversation.turns) + for lhs, rhs in zip(conversation.turns, restored_conversation.turns): + assert type(lhs) == type(rhs) + assert lhs.role == lhs.role + if isinstance(lhs, TextTurn): + assert lhs.value == rhs.value + else: + assert lhs.audio_locator_tag == rhs.audio_locator_tag + assert lhs.cut.id == rhs.cut.id + np.testing.assert_allclose(lhs.cut.load_audio(), rhs.cut.load_audio()) + + +def test_multimodal_conversation_tarred_format_sharding_works(multimodal_conversations_path, tmp_path_factory): + (conversation,) = list(NeMoMultimodalConversationJsonlAdapter(multimodal_conversations_path, "[audio]")) + tar_dir = tmp_path_factory.mktemp("multi_convo_tarred") + with NeMoMultimodalConversationTarWriter(tar_dir, shard_size=10) as writer: + for i in range(30): + writer.write(conversation) + + loader = NeMoMultimodalConversationJsonlAdapter( + manifest_filepath=tar_dir / "manifest_{0..2}.jsonl", + audio_locator_tag="[audio]", + tarred_audio_filepaths=tar_dir / "audio_{0..2}.tar", + ) + restored = list(loader) + assert len(restored) == 30 + assert all(c == restored[0] for c in restored[1:]) diff --git a/tests/collections/common/test_lhotse_prompt_format_data_types.py b/tests/collections/common/test_lhotse_prompt_format_data_types.py new file mode 100644 index 000000000000..5d1bdc600aeb --- /dev/null +++ b/tests/collections/common/test_lhotse_prompt_format_data_types.py @@ -0,0 +1,297 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import lhotse.serialization +import pytest +from lhotse import CutSet, SupervisionSegment +from lhotse.cut import Cut +from lhotse.testing.dummies import dummy_cut + +from nemo.collections.common.data import ( + NeMoSFTExample, + SourceTargetTextExample, + TextExample, + get_lhotse_dataloader_from_config, +) +from nemo.collections.common.tokenizers import SentencePieceTokenizer +from nemo.collections.common.tokenizers.sentencepiece_tokenizer import create_spt_model + + +@pytest.fixture +def tokenizer(tmp_path_factory): + tmpdir = tmp_path_factory.mktemp("tok") + text_path = tmpdir / "text.txt" + text_path.write_text("\n".join(chr(i) for i in range(256))) + create_spt_model( + text_path, + vocab_size=512, + sample_size=-1, + do_lower_case=False, + output_dir=str(tmpdir), + bos=True, + eos=True, + user_defined_symbols=[ + "[INST]", + "[/INST]", + "<>", + "<>", + "[audio]", + "", + "", + ], + remove_extra_whitespaces=True, + ) + return SentencePieceTokenizer(str(tmpdir / "tokenizer.model")) + + +@pytest.fixture +def cuts_path(tmp_path_factory): + tmp_path = tmp_path_factory.getbasetemp() / "cuts.jsonl" + c = dummy_cut(0, duration=1.0, supervisions=[SupervisionSegment("", "", 0, 1.0, text="dummy text")]) + c.context = "dummy context" + CutSet([c]).to_file(tmp_path) + return tmp_path + + +@pytest.fixture +def src_tgt_example(tmp_path_factory): + d = tmp_path_factory.mktemp("src_tgt") + (d / "src.txt").write_text("an example") + (d / "tgt.txt").write_text("elpmaxe na") + return (d / "src.txt"), (d / "tgt.txt") + + +@pytest.fixture +def nemo_sft_example(tmp_path_factory): + tmp_path = tmp_path_factory.getbasetemp() / "nemo_sft.jsonl" + lhotse.serialization.save_to_jsonl( + [ + { + "system": "", + "mask": "User", + "dataset": "", + "conversations": [ + { + "from": "User", + "value": "Hi, how are you?", + }, + { + "from": "Assistant", + "value": "Good day, I'm a useful assistant.", + }, + ], + } + ], + tmp_path, + ) + return tmp_path + + +class Identity: + def __getitem__(self, item): + return item + + +def test_prompt_format_cut(cuts_path, tokenizer): + dl = get_lhotse_dataloader_from_config( + { + "cuts_path": cuts_path, + "batch_size": 1, + "prompt_format": "llama2", + "min_duration": 0, + "max_duration": 10, + }, + global_rank=0, + world_size=1, + dataset=Identity(), + tokenizer=tokenizer, + ) + + batch = next(iter(dl)) + ex = batch[0] + assert isinstance(ex, Cut) + assert tokenizer.ids_to_text(ex.input_ids) == "[INST] dummy context [/INST] dummy text" + assert tokenizer.ids_to_text(ex.context_ids) == "[INST] dummy context [/INST]" + assert tokenizer.ids_to_text(ex.answer_ids) == "dummy text" + + +def test_prompt_format_cut_filtered_out(cuts_path, tokenizer): + dl = get_lhotse_dataloader_from_config( + { + "cuts_path": cuts_path, + "batch_size": 1, + "prompt_format": "llama2", + "min_duration": 0, + "max_duration": 0.5, + }, + global_rank=0, + world_size=1, + dataset=Identity(), + tokenizer=tokenizer, + ) + with pytest.raises(StopIteration): + next(iter(dl)) + + +def test_prompt_format_cut_max_tokens_has_no_filtering_effect(cuts_path, tokenizer): + dl = get_lhotse_dataloader_from_config( + { + "cuts_path": cuts_path, + "batch_size": 1, + "prompt_format": "llama2", + "use_multimodal_dataloading": True, + "token_equivalent_duration": 0.1, + "min_tokens": 1, + "max_tokens": 2, + "use_total_length": True, + }, + global_rank=0, + world_size=1, + dataset=Identity(), + tokenizer=tokenizer, + ) + + batch = next(iter(dl)) + ex = batch[0] + assert isinstance(ex, Cut) + + +def test_prompt_format_src_tgt(src_tgt_example, tokenizer): + dl = get_lhotse_dataloader_from_config( + { + "input_cfg": [ + {"type": "txt_pair", "source_paths": src_tgt_example[0], "target_paths": src_tgt_example[1]} + ], + "batch_size": 1, + "force_finite": True, + "prompt_format": "llama2", + "use_multimodal_dataloading": True, + "min_tokens": 1, + "max_tokens": 50, + "use_total_length": True, + }, + global_rank=0, + world_size=1, + dataset=Identity(), + tokenizer=tokenizer, + ) + + batch = next(iter(dl)) + ex = batch[0] + assert isinstance(ex, SourceTargetTextExample) + assert tokenizer.ids_to_text(ex.input_ids) == "[INST] an example [/INST] elpmaxe na" + assert tokenizer.ids_to_text(ex.context_ids) == "[INST] an example [/INST]" + assert tokenizer.ids_to_text(ex.answer_ids) == "elpmaxe na" + + +def test_prompt_format_src_tgt_filtered_out(src_tgt_example, tokenizer): + dl = get_lhotse_dataloader_from_config( + { + "input_cfg": [ + {"type": "txt_pair", "source_paths": src_tgt_example[0], "target_paths": src_tgt_example[1]} + ], + "batch_size": 1, + "force_finite": True, + "prompt_format": "llama2", + "use_multimodal_dataloading": True, + "min_tokens": 1, + "max_tokens": 10, + "use_total_length": True, + }, + global_rank=0, + world_size=1, + dataset=Identity(), + tokenizer=tokenizer, + ) + with pytest.raises(StopIteration): + batch = next(iter(dl)) + + +def test_prompt_format_src_tgt_2d(src_tgt_example, tokenizer): + dl = get_lhotse_dataloader_from_config( + { + "input_cfg": [ + { + "type": "txt_pair", + "source_paths": src_tgt_example[0], + "target_paths": src_tgt_example[1], + "target_language": "reversed", + } + ], + "batch_size": 1, + "force_finite": True, + "prompt_format": "t5nmt", + "use_multimodal_dataloading": True, + "min_tokens": 1, + "max_tokens": 50, + "use_total_length": False, + }, + global_rank=0, + world_size=1, + dataset=Identity(), + tokenizer=tokenizer, + ) + + batch = next(iter(dl)) + ex = batch[0] + assert isinstance(ex, SourceTargetTextExample) + assert tokenizer.ids_to_text(ex.input_ids) == " an example elpmaxe na" + assert tokenizer.ids_to_text(ex.context_ids) == " an example" + assert tokenizer.ids_to_text(ex.answer_ids) == "elpmaxe na" + + +def test_prompt_format_nemo_sft(nemo_sft_example, tokenizer): + dl = get_lhotse_dataloader_from_config( + { + "input_cfg": [{"type": "nemo_sft_jsonl", "paths": nemo_sft_example}], + "batch_size": 1, + "force_finite": True, + "prompt_format": "llama2", + "use_multimodal_dataloading": True, + "min_tokens": 1, + "max_tokens": 100, + "use_total_length": True, + }, + global_rank=0, + world_size=1, + dataset=Identity(), + tokenizer=tokenizer, + ) + + batch = next(iter(dl)) + ex = batch[0] + assert isinstance(ex, NeMoSFTExample) + assert tokenizer.ids_to_text(ex.input_ids) == "[INST] Hi, how are you? [/INST] Good day, I'm a useful assistant." + assert tokenizer.ids_to_text(ex.context_ids) == "[INST] Hi, how are you? [/INST]" + assert tokenizer.ids_to_text(ex.answer_ids) == "Good day, I'm a useful assistant." + + +def test_prompt_format_nemo_sft_filtered_out(nemo_sft_example, tokenizer): + dl = get_lhotse_dataloader_from_config( + { + "input_cfg": [{"type": "nemo_sft_jsonl", "paths": nemo_sft_example}], + "batch_size": 1, + "force_finite": True, + "prompt_format": "llama2", + "use_multimodal_dataloading": True, + "min_tokens": 1, + "max_tokens": 5, + "use_total_length": True, + }, + global_rank=0, + world_size=1, + dataset=Identity(), + tokenizer=tokenizer, + ) + with pytest.raises(StopIteration): + batch = next(iter(dl)) diff --git a/tests/collections/common/test_lhotse_seqlen_filters.py b/tests/collections/common/test_lhotse_seqlen_filters.py new file mode 100644 index 000000000000..b16887555800 --- /dev/null +++ b/tests/collections/common/test_lhotse_seqlen_filters.py @@ -0,0 +1,184 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from copy import deepcopy + +import numpy as np +import pytest +from lhotse import SupervisionSegment +from lhotse.testing.dummies import dummy_cut + +from nemo.collections.common.data.lhotse.sampling import ( + DurationFilter, + TokenCountFilter, + TokenPerSecondFilter, + TokenPerTokenFilter, +) +from nemo.collections.common.data.lhotse.text_adapters import NeMoSFTExample, SourceTargetTextExample, TextExample + + +@pytest.fixture +def cut(): + c = dummy_cut(0, duration=1.0, supervisions=[SupervisionSegment("", "", 0, 1.0, text="dummy")]) + c.supervisions[0].tokens = [1, 37, 12, 2] + return c + + +def test_cut_duration_filter(cut): + f = DurationFilter(0, 10) + assert f(cut) == True + + f = DurationFilter(0, 0.5) + assert f(cut) == False + + f = DurationFilter(1.5, 2.0) + assert f(cut) == False + + +def test_cut_token_per_second_filter(cut): + f = TokenPerSecondFilter(tps_min=0.0, tps_max=5.0) + assert f(cut) == True + + f = TokenPerSecondFilter(tps_min=0.0, tps_max=1.0) + assert f(cut) == False + + f = TokenPerSecondFilter(tps_min=10.0, tps_max=12.0) + assert f(cut) == False + + +def test_cut_passes_by_token_count_and_tpt_filter(cut): + assert TokenCountFilter(1, 10, measure_total_length=True)(cut) == True + assert TokenPerTokenFilter(1, 10)(cut) == True + + +def test_cut_passes_by_token_count_and_tpt_filter(cut): + assert TokenCountFilter(1, 10, measure_total_length=True)(cut) == True + assert TokenPerTokenFilter(1, 10)(cut) == True + + +@pytest.fixture +def src_tgt_example(): + return SourceTargetTextExample( + source=TextExample("", tokens=np.array([1, 37, 12, 2])), + target=TextExample("", tokens=np.array([1, 1823, 1245, 2446, 1038, 2])), + ) + + +def test_src_tgt_token_filter_requires_prompt_formatting(src_tgt_example): + with pytest.raises(RuntimeError): + TokenCountFilter(0, 1, True)(src_tgt_example) + + +def test_src_tgt_passes_by_duration_filter(src_tgt_example): + assert DurationFilter(1, 10)(src_tgt_example) == True + assert TokenPerSecondFilter(1, 10)(src_tgt_example) == True + + +def test_src_tgt_token_filter(src_tgt_example): + example = deepcopy(src_tgt_example) + example.input_ids = np.concatenate((example.source.tokens, example.target.tokens)) + example.context_ids = example.source.tokens + example.answer_ids = example.target.tokens + + """ + Input length measurement / encoder-decoder models / 2D bucketing + """ + f = TokenCountFilter(1, 5, measure_total_length=False) + assert f(example) == True + + f = TokenCountFilter(1, 3, measure_total_length=False) + assert f(example) == False + + f = TokenCountFilter(10, 30, measure_total_length=False) + assert f(example) == False + + """ + Total length measurement / decoder-only models / 1D bucketing + """ + f = TokenCountFilter(1, 5, measure_total_length=True) + assert f(example) == False + + f = TokenCountFilter(1, 20, measure_total_length=True) + assert f(example) == True + + f = TokenCountFilter(1, 3, measure_total_length=True) + assert f(example) == False + + f = TokenCountFilter(20, 30, measure_total_length=True) + assert f(example) == False + + +@pytest.fixture +def nemo_sft_example(): + example = NeMoSFTExample( + data={ + "system": "", + "mask": "User", + "dataset": "", + "conversations": [ + { + "from": "User", + "value": "Hi, how are you?", + }, + { + "from": "Assistant", + "value": "Good day, I'm a useful assistant.", + }, + ], + }, + ) + return example + + +def test_nemo_sft_token_filter_requires_prompt_formatting(nemo_sft_example): + with pytest.raises(RuntimeError): + TokenCountFilter(0, 1, True)(nemo_sft_example) + + +def test_nemo_sft_passes_by_duration_filter(nemo_sft_example): + assert DurationFilter(1, 10)(nemo_sft_example) == True + assert TokenPerSecondFilter(1, 10)(nemo_sft_example) == True + + +def test_nemo_sft_token_filter(nemo_sft_example): + example = deepcopy(nemo_sft_example) + example.input_ids = np.array([1, 123, 3425, 123, 2345, 324, 54, 2]) + example.context_ids = np.array([1, 123, 3425]) + example.answer_ids = np.array([123, 2345, 324, 54, 2]) + + """ + Input length measurement / encoder-decoder models / 2D bucketing + """ + f = TokenCountFilter(1, 5, measure_total_length=False) + assert f(example) == True + + f = TokenCountFilter(1, 2, measure_total_length=False) + assert f(example) == False + + f = TokenCountFilter(10, 30, measure_total_length=False) + assert f(example) == False + + """ + Total length measurement / decoder-only models / 1D bucketing + """ + f = TokenCountFilter(1, 5, measure_total_length=True) + assert f(example) == False + + f = TokenCountFilter(1, 20, measure_total_length=True) + assert f(example) == True + + f = TokenCountFilter(1, 3, measure_total_length=True) + assert f(example) == False + + f = TokenCountFilter(10, 30, measure_total_length=True) + assert f(example) == False diff --git a/tests/collections/multimodal/test_emmett.py b/tests/collections/multimodal/test_emmett.py new file mode 100644 index 000000000000..6be0c6b9ea6e --- /dev/null +++ b/tests/collections/multimodal/test_emmett.py @@ -0,0 +1,259 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pytest +import torch +from lhotse import CutSet, MonoCut, SupervisionSegment +from lhotse.testing.dummies import dummy_recording +from omegaconf import OmegaConf + +from nemo.collections.common.data.lhotse.dataloader import get_lhotse_dataloader_from_config +from nemo.collections.common.data.lhotse.text_adapters import SourceTargetTextExample +from nemo.collections.common.tokenizers import SentencePieceTokenizer +from nemo.collections.common.tokenizers.sentencepiece_tokenizer import create_spt_model +from nemo.collections.multimodal.speech_llm.data.lhotse_dataset import LhotseAudioQuestionAnswerDataset +from nemo.collections.multimodal.speech_llm.parts.utils.data_utils import PromptFormatterTextProcessing + + +class Identity(torch.utils.data.Dataset): + def __getitem__(self, cuts): + return cuts + + +@pytest.fixture +def tokenizer(capsys, tmp_path_factory): + TOKENIZER_TRAIN_TEXT = """ + Example system message. + Example user message. + Example assistant message. + TEST + [INST] + [/INST] + + + <> + <> + User: Assistant: + user model + Instruct Output + \n\n + + <| + |> + <|en|> <|de|> <|fr|> <|es|> <|transcribe|> <|translate|> <|pnc|> <|nopnc|> <|startoftranscript|> <|endoftext|> + Feel free to add new tokens for your own tests!? + But know that if you do so, you may need to update the token IDs in the existing tests! + So, it might be a good idea to create a new tokenizer instead when adding new prompt formats. + """ + tmpdir = tmp_path_factory.mktemp("bpe_tokenizer") + text_path = tmpdir / "text.txt" + text_path.write_text(TOKENIZER_TRAIN_TEXT) + with capsys.disabled(): + create_spt_model( + str(text_path), + vocab_size=512, + sample_size=-1, + do_lower_case=False, + output_dir=str(tmpdir), + remove_extra_whitespaces=True, + ) + return SentencePieceTokenizer(str(tmpdir / "tokenizer.model")) + + +""" +TEST FOR AUDIO DATALOADING WITH EMMETT +""" + + +@pytest.fixture +def cuts(): + return CutSet( + [ + MonoCut( + id="ex0", + start=0, + duration=5.0, + channel=0, + supervisions=[ + SupervisionSegment( + id="ex0", + recording_id="dummy-recording-0000", + start=0, + duration=5.0, + text="some transcription", + language="en", + ) + ], + recording=dummy_recording(0, duration=5.0, with_data=True), + custom={ + "context": "", + "answer": "some desired answer", + }, + ), + ] + ) + + +@pytest.fixture +def cuts_path(tmp_path_factory, cuts): + tmp_path = tmp_path_factory.mktemp("data") + p = tmp_path / "cuts.jsonl.gz" + pa = tmp_path / "audio" + cuts.save_audios(pa).to_file(p) + return p + + +def test_audio_example_with_prompt_emmett_t5(cuts_path, tokenizer): + config = OmegaConf.create( + { + "input_cfg": [ + { + "type": "lhotse", + "cuts_path": cuts_path, + }, + ], + "prompt_format": "t5nmt", + "force_finite": True, + "shuffle": True, + "num_workers": 0, + "batch_size": 1, + "seed": 0, + "shard_seed": 0, + } + ) + + # First test that sampling is correct and tokenizer + prompt formatter is applied there + + dl = get_lhotse_dataloader_from_config( + config=config, global_rank=0, world_size=1, dataset=Identity(), tokenizer=tokenizer + ) + batches = [batch for batch in dl] + assert len(batches) == 1 + + b = batches[0] + assert isinstance(b, CutSet) + assert len(b) == 1 + ex = b[0] + assert isinstance(ex, MonoCut) + + assert ex.has_custom("context_ids") + assert torch.is_tensor(ex.context_ids) + assert tokenizer.ids_to_text(ex.context_ids) == "" + + assert ex.has_custom("answer_ids") + assert torch.is_tensor(ex.answer_ids) + assert tokenizer.ids_to_text(ex.answer_ids) == "some transcription" + + assert ex.has_custom("input_ids") + assert torch.is_tensor(ex.input_ids) + assert tokenizer.ids_to_text(ex.input_ids) == " some transcription" + + # Test that speechlm dataset processes the example correctly + + text_processor = PromptFormatterTextProcessing(tokenizer=tokenizer, prompt_format="t5nmt") + dataset = LhotseAudioQuestionAnswerDataset( + text_processor=text_processor, + default_context="", + tokens_to_generate=0, + pad_to_max_length=False, + max_seq_length=64, + ) + + batch = dataset[batches[0]] + assert tokenizer.ids_to_text(batch["tokens"][0]) == " some transcriptio" + assert tokenizer.ids_to_text(batch["labels"][0]) == "en> some transcription" + assert tokenizer.ids_to_text(batch["contexts"][0]) == "" + assert tokenizer.ids_to_text(batch["answers"][0]) == "some transcription" + + +""" +TEST FOR TEXT DATALOADING WITH EMMETT +""" + + +@pytest.fixture +def nmt_paths(tmp_path_factory): + tmp_path = tmp_path_factory.mktemp("nmtdata") + src = tmp_path / "src.txt" + tgt = tmp_path / "tgt.txt" + q = tmp_path / "q.txt" + src.write_text("fake german") + tgt.write_text("real english") + q.write_text("") + return src, tgt, q + + +def test_text_example_with_prompt_emmett_t5(nmt_paths, tokenizer): + src, tgt, q = nmt_paths + config = OmegaConf.create( + { + "input_cfg": [ + { + "type": "txt_pair", + "source_paths": src, + "target_paths": tgt, + "source_language": "de", + "target_language": "en", + "questions_path": q, + "questions_language": "en", + }, + ], + "prompt_format": "t5nmt", + "force_finite": True, + "shuffle": True, + "num_workers": 0, + "batch_size": 1, + "seed": 0, + "shard_seed": 0, + } + ) + + # First test that sampling is correct and tokenizer + prompt formatter is applied there + + dl = get_lhotse_dataloader_from_config( + config=config, global_rank=0, world_size=1, dataset=Identity(), tokenizer=tokenizer + ) + batches = [batch for batch in dl] + assert len(batches) == 1 + + b = batches[0] + assert isinstance(b, CutSet) + assert len(b) == 1 + ex = b[0] + assert isinstance(ex, SourceTargetTextExample) + + assert torch.is_tensor(ex.context_ids) + assert tokenizer.ids_to_text(ex.context_ids) == " fake german" + + assert torch.is_tensor(ex.answer_ids) + assert tokenizer.ids_to_text(ex.answer_ids) == "real english" + + assert torch.is_tensor(ex.input_ids) + assert tokenizer.ids_to_text(ex.input_ids) == " fake german real english" + + # Test that speechlm dataset processes the example correctly + + text_processor = PromptFormatterTextProcessing(tokenizer=tokenizer, prompt_format="t5nmt") + dataset = LhotseAudioQuestionAnswerDataset( + text_processor=text_processor, + default_context="", + tokens_to_generate=0, + pad_to_max_length=False, + max_seq_length=64, + ) + + batch = dataset[batches[0]] + + assert tokenizer.ids_to_text(batch["text_input_ids"][0]) == " fake german real english" + assert tokenizer.ids_to_text(batch["text_context_ids"][0]) == " fake german" + assert tokenizer.ids_to_text(batch["text_answer_ids"][0]) == "real english" diff --git a/tests/collections/multimodal/test_speechllm_dataset.py b/tests/collections/multimodal/test_speechllm_dataset.py new file mode 100644 index 000000000000..e018079f7ee8 --- /dev/null +++ b/tests/collections/multimodal/test_speechllm_dataset.py @@ -0,0 +1,395 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pytest +import torch +from lhotse import CutSet, MonoCut, SupervisionSegment +from lhotse.testing.dummies import dummy_recording +from torch import tensor + +from nemo.collections.common.tokenizers import SentencePieceTokenizer +from nemo.collections.common.tokenizers.sentencepiece_tokenizer import create_spt_model +from nemo.collections.multimodal.speech_llm.data.lhotse_dataset import LhotseAudioQuestionAnswerDataset +from nemo.collections.multimodal.speech_llm.parts.utils.data_utils import PromptFormatterTextProcessing + + +@pytest.fixture +def tokenizer(capsys, tmp_path_factory): + TOKENIZER_TRAIN_TEXT = """ + Example system message. + Example user message. + Example assistant message. + TEST + [INST] + [/INST] + + + <> + <> + User: Assistant: + user model + Instruct Output + \n\n + + <| + |> + <|en|> <|de|> <|fr|> <|es|> <|transcribe|> <|translate|> <|pnc|> <|nopnc|> <|startoftranscript|> <|endoftext|> + Feel free to add new tokens for your own tests!? + But know that if you do so, you may need to update the token IDs in the existing tests! + So, it might be a good idea to create a new tokenizer instead when adding new prompt formats. + """ + tmpdir = tmp_path_factory.mktemp("bpe_tokenizer") + text_path = tmpdir / "text.txt" + text_path.write_text(TOKENIZER_TRAIN_TEXT) + with capsys.disabled(): + create_spt_model( + str(text_path), + vocab_size=512, + sample_size=-1, + do_lower_case=False, + output_dir=str(tmpdir), + remove_extra_whitespaces=True, + ) + return SentencePieceTokenizer(str(tmpdir / "tokenizer.model")) + + +@pytest.fixture +def cuts(): + return CutSet( + [ + MonoCut( + id="ex0", + start=0, + duration=5.0, + channel=0, + supervisions=[ + SupervisionSegment( + id="ex0", + recording_id="dummy-recording-0000", + start=0, + duration=5.0, + text="some transcription", + language="en", + ) + ], + recording=dummy_recording(0, duration=5.0, with_data=True), + custom={ + "context": "non default prompt context", + "answer": "some desired answer", + "system_prompt": "Please answer the following based on the previous speech feature.", + }, + ), + ] + ) + + +def test_speechllm_dataset(tokenizer, cuts): + text_processor = PromptFormatterTextProcessing(tokenizer=tokenizer, prompt_format="plain") + dataset = LhotseAudioQuestionAnswerDataset( + text_processor=text_processor, + default_context="do this task", + tokens_to_generate=0, + pad_to_max_length=True, + max_seq_length=64, + ) + + batch = dataset[cuts] + + expected_keys = { + "sample_ids", + "audio_signal", + "audio_signal_length", + "audio_ratio", + "metadata", + "tokens", + "tokens_length", + "labels", + "loss_mask", + "position_ids", + "contexts", + "context_lengths", + "max_length", + "answers", + } + missing_keys = expected_keys - set(batch) + unexpected_keys = set(batch) - expected_keys + assert not missing_keys and not unexpected_keys, f"{missing_keys=} {unexpected_keys=}" + + assert batch["sample_ids"] == ["ex0"] + assert batch["metadata"] == [{'audio_filepath': 'ex0.wav'}] + torch.testing.assert_close(batch["audio_ratio"], tensor([1.0])) + torch.testing.assert_close(batch["max_length"], tensor([64])) + + assert torch.is_tensor(batch["audio_signal"]) + assert torch.is_floating_point(batch["audio_signal"]) + assert batch["audio_signal"].shape == (1, 80000) + torch.testing.assert_close(batch["audio_signal_length"], tensor([80000], dtype=torch.int32)) + + # fmt: off + expected = tensor([[ 1, 78, 9, 1, 64, 80, 5, 75, 15, 6, 1, 12, 24, 14, + 23, 6, 1, 27, 14, 9, 6, 63, 6, 76, 14, 73, 2, 1, + 56, 100, 41, 14, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0]]) + torch.testing.assert_close(batch["tokens"], expected) + torch.testing.assert_close(batch["tokens_length"], tensor([32])) + assert tokenizer.ids_to_text(expected[0, :33].tolist()) == "non default prompt context some transcription" + + expected = tensor([[1, 78, 9, 1, 64, 80, 5, 75, 15, 6, 1, 12, 24, 14, 23, 6, 1, 27, + 14, 9, 6, 63, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]) + torch.testing.assert_close(batch["contexts"], expected) + torch.testing.assert_close(batch["context_lengths"], tensor([23])) + assert tokenizer.ids_to_text(expected[0, :23].tolist()) == "non default prompt context" + + expected = tensor([[76, 14, 73, 2, 1, 56, 100, 41, 14, 9, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0]]) + torch.testing.assert_close(batch["answers"], expected) + assert tokenizer.ids_to_text(expected[0, :10].tolist()) == "some transcription" + + expected = tensor([[78, 9, 1, 64, 80, 5, 75, 15, 6, 1, 12, 24, 14, 23, + 6, 1, 27, 14, 9, 6, 63, 6, 76, 14, 73, 2, 1, 56, + 100, 41, 14, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0]]) + torch.testing.assert_close(batch["labels"], expected) + assert tokenizer.ids_to_text(expected[0, :32].tolist()) == "non default prompt context some transcription" + + torch.testing.assert_close( + batch["position_ids"], + tensor([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, 61, 62, 63]]) + ) + + torch.testing.assert_close( + batch["loss_mask"], + tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0.]]) + ) + # fmt: on + + +@pytest.fixture +def llama_tokenizer(capsys, tmp_path_factory): + TOKENIZER_TRAIN_TEXT = """ + a b c d e f g h i j k l m n o p q r s t u v x y z + A B C D E F G H I J K L M N O P Q R S T U V X Y Z + [EOG] + Example system message. + Example user message. + Example assistant message. + TEST + [INST] + [/INST] + + + <> + <> + User: Assistant: + user model + Instruct Output + \n\n + + <| + |> + <|en|> <|de|> <|fr|> <|es|> <|transcribe|> <|translate|> <|pnc|> <|nopnc|> <|startoftranscript|> <|endoftext|> + Feel free to add new tokens for your own tests!? + But know that if you do so, you may need to update the token IDs in the existing tests! + So, it might be a good idea to create a new tokenizer instead when adding new prompt formats. + """ + tmpdir = tmp_path_factory.mktemp("bpe_tokenizer") + text_path = tmpdir / "text.txt" + text_path.write_text(TOKENIZER_TRAIN_TEXT) + with capsys.disabled(): + create_spt_model( + str(text_path), + vocab_size=512, + sample_size=-1, + do_lower_case=False, + output_dir=str(tmpdir), + bos=True, + eos=True, + user_defined_symbols=["[INST]", "[/INST]", "<>", "<>", "[EOG]"], + remove_extra_whitespaces=True, + ) + return SentencePieceTokenizer(str(tmpdir / "tokenizer.model")) + + +def test_speechllm_dataset_prompt_template(llama_tokenizer, cuts): + tokenizer = llama_tokenizer + text_processor = PromptFormatterTextProcessing(tokenizer=tokenizer, prompt_format="llama2") + dataset = LhotseAudioQuestionAnswerDataset( + text_processor=text_processor, + default_context="do this task", + tokens_to_generate=0, + pad_to_max_length=True, + max_seq_length=128, + ) + + batch = dataset[cuts] + print(batch) + + expected_keys = { + "sample_ids", + "audio_signal", + "audio_signal_length", + "audio_ratio", + "metadata", + "tokens", + "tokens_length", + "labels", + "loss_mask", + "position_ids", + "contexts", + "context_lengths", + "max_length", + "answers", + } + missing_keys = expected_keys - set(batch) + unexpected_keys = set(batch) - expected_keys + assert not missing_keys and not unexpected_keys, f"{missing_keys=} {unexpected_keys=}" + + assert batch["sample_ids"] == ["ex0"] + assert batch["metadata"] == [{'audio_filepath': 'ex0.wav'}] + torch.testing.assert_close(batch["audio_ratio"], tensor([1.0])) + torch.testing.assert_close(batch["max_length"], tensor([128])) + + assert torch.is_tensor(batch["audio_signal"]) + assert torch.is_floating_point(batch["audio_signal"]) + assert batch["audio_signal"].shape == (1, 80000) + torch.testing.assert_close(batch["audio_signal_length"], tensor([80000], dtype=torch.int32)) + + for k in ("tokens", "contexts", "answers", "labels"): + print(f"batch['{k}']=", tokenizer.ids_to_text(batch[k][0])) + # fmt: off + expected = tensor([[ 1, 8, 3, 8, 5, 8, 105, 18, 9, 12, 17, 9, 41, 14, + 17, 22, 125, 43, 9, 117, 19, 18, 18, 79, 48, 15, 92, 12, + 17, 9, 42, 8, 19, 14, 43, 9, 85, 21, 9, 114, 45, 19, + 86, 17, 72, 20, 9, 9, 32, 46, 117, 9, 123, 69, 9, 25, + 8, 6, 8, 93, 14, 8, 74, 88, 12, 86, 18, 13, 85, 21, + 19, 27, 13, 116, 19, 14, 13, 78, 13, 8, 4, 72, 19, 84, + 9, 8, 65, 120, 45, 19, 14, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2]]) + torch.testing.assert_close(batch["tokens"], expected) + torch.testing.assert_close(batch["tokens_length"], tensor([91])) + assert tokenizer.ids_to_text(expected[0, :91].tolist()) == "[INST] <> Please answer the following based on the previous speech feature. <> non default prompt context [/INST] some transcription" + + expected = tensor([[ 1, 8, 3, 8, 5, 8, 105, 18, 9, 12, 17, 9, 41, 14, + 17, 22, 125, 43, 9, 117, 19, 18, 18, 79, 48, 15, 92, 12, + 17, 9, 42, 8, 19, 14, 43, 9, 85, 21, 9, 114, 45, 19, + 86, 17, 72, 20, 9, 9, 32, 46, 117, 9, 123, 69, 9, 25, + 8, 6, 8, 93, 14, 8, 74, 88, 12, 86, 18, 13, 85, 21, + 19, 27, 13, 116, 19, 14, 13, 78, 13, 8, 4, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2]]) + torch.testing.assert_close(batch["contexts"], expected) + torch.testing.assert_close(batch["context_lengths"], tensor([81])) + assert tokenizer.ids_to_text(expected[0, :81].tolist()) == "[INST] <> Please answer the following based on the previous speech feature. <> non default prompt context [/INST]" + + expected = tensor([[ 72, 19, 84, 9, 8, 65, 120, 45, 19, 14, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2]]) + torch.testing.assert_close(batch["answers"], expected) + assert tokenizer.ids_to_text(expected[0, :11].tolist()) == "some transcription" + + expected = tensor([[ 8, 3, 8, 5, 8, 105, 18, 9, 12, 17, 9, 41, 14, 17, + 22, 125, 43, 9, 117, 19, 18, 18, 79, 48, 15, 92, 12, 17, + 9, 42, 8, 19, 14, 43, 9, 85, 21, 9, 114, 45, 19, 86, + 17, 72, 20, 9, 9, 32, 46, 117, 9, 123, 69, 9, 25, 8, + 6, 8, 93, 14, 8, 74, 88, 12, 86, 18, 13, 85, 21, 19, + 27, 13, 116, 19, 14, 13, 78, 13, 8, 4, 72, 19, 84, 9, + 8, 65, 120, 45, 19, 14, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2]]) + torch.testing.assert_close(batch["labels"], expected) + assert tokenizer.ids_to_text(expected[0, :90].tolist()) == "[INST] <> Please answer the following based on the previous speech feature. <> non default prompt context [/INST] some transcription" + + torch.testing.assert_close( + batch["position_ids"], + tensor([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, + 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, + 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, + 126, 127]]) + ) + + torch.testing.assert_close( + batch["loss_mask"], + tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0.]]) + ) + # fmt: on + + +def test_speechllm_dataset_tokens_to_generate_increases_seq_len(llama_tokenizer, cuts): + tokenizer = llama_tokenizer + text_processor = PromptFormatterTextProcessing(tokenizer=tokenizer, prompt_format="llama2") + + dataset = LhotseAudioQuestionAnswerDataset( + text_processor=text_processor, + default_context="do this task", + tokens_to_generate=0, + pad_to_max_length=False, + max_seq_length=512, + ) + batch = dataset[cuts] + assert batch["tokens"].shape == (1, 91) + assert batch["labels"].shape == (1, 91) + assert batch["contexts"].shape == (1, 81) # was 92 before padding optimization + assert batch["answers"].shape == (1, 11) # was 92 before padding optimization + assert batch["position_ids"].shape == (1, 92) + + dataset = LhotseAudioQuestionAnswerDataset( + text_processor=text_processor, + default_context="do this task", + tokens_to_generate=256, + pad_to_max_length=False, + max_seq_length=512, + ) + batch = dataset[cuts] + assert batch["tokens"].shape == (1, 91) + assert batch["labels"].shape == (1, 91) + assert batch["contexts"].shape == (1, 337) + assert batch["answers"].shape == (1, 11) + assert batch["position_ids"].shape == (1, 92) diff --git a/tutorials/multimodal/Multimodal Lhotse Dataloading.ipynb b/tutorials/multimodal/Multimodal Lhotse Dataloading.ipynb new file mode 100644 index 000000000000..b9ddf350cdca --- /dev/null +++ b/tutorials/multimodal/Multimodal Lhotse Dataloading.ipynb @@ -0,0 +1,1014 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "e930b0c5f0cffbce", + "metadata": {}, + "source": [ + "# Multimodal Lhotse Dataloading\n", + "\n", + "This tutorial explains how NeMo uses Lhotse for multimodal dataloading.\n", + "The modalities supported as of the time of writing are audio and text.\n", + "The intended audience of this tutorial are NeMo developers and persons who build/modify NeMo models.\n", + "After finishing this tutorial, you should have an understanding how to use various Lhotse building blocks in NeMo for designing the kind of model you want.\n", + "\n", + "We cover the following topics:\n", + "* What are data types?\n", + "* What data types are availabe in NeMo?\n", + "* How do we read them from files?\n", + "* How to apply prompt formatting to various data types?\n", + "* How to create tensors for training with these examples?\n", + "* How to optimize the training by stratifying data sampling on sequence lengths, and how these lengths are measured for different examples and models. \n", + "* How to train on multiple data types together?" + ] + }, + { + "cell_type": "markdown", + "id": "72bd180c65992eba", + "metadata": {}, + "source": [ + "## Data types\n", + "\n", + "A data type represents examples of your training data: speech recordings, text sentences, text sentence pairs, conversations, etc.\n", + "\n", + "A data type consists of:\n", + "* a class that represents a single sample\n", + " * includes properties allowing sequence length measurement for sampling purposes\n", + "* a parser class that's initialized with a config (e.g. paths to data) and acts as an iterator of examples\n", + "* extension functions that define how to apply prompt formatting to a given data type\n", + "\n", + "NeMo uses Lhotse Cuts as a basic data type for audio, and defines several data types for text. We'll go over them below.\n", + "\n", + "External references:\n", + "* [Lhotse documentation](https://lhotse.readthedocs.io/en/latest/getting-started.html)\n", + "* [Lhotse in NeMo documentation](https://docs.nvidia.com/nemo-framework/user-guide/latest/nemotoolkit/asr/datasets.html#lhotse-dataloading)" + ] + }, + { + "cell_type": "markdown", + "id": "cf32bf3ea5a9cb17", + "metadata": {}, + "source": [ + "### Audio examples (Lhotse cuts)" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "d2d747f6b32d5942", + "metadata": { + "jupyter": { + "is_executing": true + } + }, + "outputs": [], + "source": [ + "from lhotse import MonoCut, Recording, SupervisionSegment, AudioSource\n", + "from lhotse.testing.dummies import dummy_cut\n", + "\n", + "\n", + "# A basic audio example: recording with transcription\n", + "cut = MonoCut(\n", + " id=\"utt-0\",\n", + " start=0.0,\n", + " duration=10.0,\n", + " channel=0,\n", + " supervisions=[SupervisionSegment(id=\"utt-0\", recording_id=\"rec-0\", start=0.0, duration=10.0, text=\"Welcome to Lhotse!\")],\n", + " recording=Recording(\n", + " id=\"rec-0\",\n", + " sources=[AudioSource(type=\"file\", channels=[0], source=\"/path/to/recording.wav\")],\n", + " sampling_rate=16000,\n", + " duration=10.0,\n", + " num_samples=160000,\n", + " ),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "9b121afd920bdab2", + "metadata": {}, + "source": [ + "## Single text examples " + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "41b0c148e0d7ac1c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "TextExample(text='This is a single sentence, which may be used in language modeling.', language='en', tokens=None, custom=None)\n" + ] + } + ], + "source": [ + "from nemo.collections.common.data.lhotse.text_adapters import TextExample\n", + "\n", + "# A basic text example: single line of text.\n", + "text = TextExample(\n", + " text=\"This is a single sentence, which may be used in language modeling.\",\n", + " language=\"en\"\n", + ")\n", + "print(text)" + ] + }, + { + "cell_type": "markdown", + "id": "2abb821b69f71a91", + "metadata": {}, + "source": [ + "## Pairs of text examples" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "282560cc3df9174a", + "metadata": {}, + "outputs": [], + "source": [ + "from nemo.collections.common.data.lhotse.text_adapters import SourceTargetTextExample\n", + "\n", + "# A pair of text examples, usable e.g. in machine translation.\n", + "text_pair = SourceTargetTextExample(\n", + " source=TextExample(\n", + " text=\"Some machine translation example.\",\n", + " language=\"en\",\n", + " ),\n", + " target=TextExample(\n", + " text=\"Algunos ejemplos de traducción automática.\",\n", + " language=\"es\",\n", + " ),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "858d6cb6abb1ccd6", + "metadata": {}, + "source": [ + "## Conversations: text, audio, and multimodal" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "e5bd8caee40100b1", + "metadata": {}, + "outputs": [], + "source": [ + "from nemo.collections.common.data.lhotse.text_adapters import NeMoMultimodalConversation, TextTurn, AudioTurn\n", + "\n", + "# A text-only conversation, useful for chat LLM training.\n", + "text_conversation = NeMoMultimodalConversation(\n", + " id=\"convo-text-0\",\n", + " turns=[\n", + " TextTurn(value=\"Is this a text-only conversation?\", role=\"user\"),\n", + " TextTurn(value=\"Yes, but we can do more than that.\", role=\"assistant\"),\n", + " TextTurn(value=\"Tell me more.\", role=\"user\"),\n", + " TextTurn(value=\"Of course! Let's move on to the next example.\", role=\"assistant\"),\n", + " ]\n", + ")\n", + "\n", + "# An audio-only conversation, useful for chat speech LLM training.\n", + "# We'll explain [audio] tag and token_equivalent_duration later in this tutorial.\n", + "audio_conversation = NeMoMultimodalConversation(\n", + " id=\"convo-audio-0\",\n", + " turns=[\n", + " AudioTurn(cut=dummy_cut(0, duration=7.18, with_data=True), role=\"user\", audio_locator_tag=\"[audio]\"),\n", + " AudioTurn(cut=dummy_cut(0, duration=21.64, with_data=True), role=\"assistant\", audio_locator_tag=\"[audio]\"),\n", + " ],\n", + " token_equivalent_duration=0.08,\n", + ")\n", + "\n", + "# A multimodal conversation.\n", + "multimodal_conversation = NeMoMultimodalConversation(\n", + " id=\"convo-multimodal-0\",\n", + " turns=[\n", + " TextTurn(value=\"Is this a text-only conversation?\", role=\"user\"),\n", + " TextTurn(value=\"No, feel free to speak to me.\", role=\"assistant\"),\n", + " AudioTurn(cut=dummy_cut(0, duration=5.87, with_data=True), role=\"user\", audio_locator_tag=\"[audio]\"),\n", + " TextTurn(value=\"Should I respond in voice too?\", role=\"assistant\"),\n", + " TextTurn(value=\"Yes\", role=\"user\"),\n", + " TextTurn(value=\"Certainly!\", role=\"assistant\"),\n", + " AudioTurn(cut=dummy_cut(0, duration=14.62, with_data=True), role=\"assistant\", audio_locator_tag=\"[audio]\"),\n", + " ],\n", + " token_equivalent_duration=0.08,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "b21e0e5e84904d89", + "metadata": {}, + "source": [ + "As you can see, these data structures serve as a complete description of training examples of different types, \n", + "as they contain both the data (audio) and various metadata." + ] + }, + { + "cell_type": "markdown", + "id": "9198210580be10bf", + "metadata": {}, + "source": [ + "## Parsing data types from files\n", + "\n", + "Related: for an overview of NeMo data configuration format, please see these docs: \n", + "* [Extended multi-dataset configuration format](https://docs.nvidia.com/nemo-framework/user-guide/latest/nemotoolkit/asr/datasets.html#extended-multi-dataset-configuration-format)\n", + "* [Configuring multi-modal dataloading](https://docs.nvidia.com/nemo-framework/user-guide/latest/nemotoolkit/asr/datasets.html#configuring-multi-modal-dataloading)\n", + "\n", + "The goal of data type parser is to read a configuration specifying where the data is located / how to read it,\n", + "create an iterable over the corresponding data type, and wrap it into a Lhotse CutSet.\n", + "\n", + "Adding support for a new data type parser requires two components:\n", + "* An adapter/iterator class dedicated to your data type.\n", + "* A function that instantiates this adapter/iterator, registered with a `@data_type_parser(\"name\")` decorator to make it auto-detectable by NeMo.\n", + "\n", + "We'll take a deeper look at how source-target text example pairs parsing is implemented. We'll implement a custom parser for `SourceTargetTextExample` that reads them from JSON files." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "f0e35b53c7ac77b4", + "metadata": {}, + "outputs": [], + "source": [ + "from lhotse.serialization import load_jsonl\n", + "import random\n", + "from typing import Literal, Iterator\n", + "from dataclasses import dataclass\n", + "\n", + "from lhotse import CutSet\n", + "from lhotse.dataset.dataloading import resolve_seed\n", + "from omegaconf import DictConfig\n", + "from nemo.collections.common.data.lhotse.nemo_adapters import expand_sharded_filepaths\n", + "from nemo.collections.common.data.lhotse.cutset import data_type_parser\n", + "\n", + "\n", + "@dataclass\n", + "class LhotseTextPairAdapterFromJsonl:\n", + " manifest_path: str | list[str]\n", + " shuffle_shards: bool = False\n", + " shard_seed: int | Literal[\"trng\", \"randomized\"] = \"trng\"\n", + "\n", + " def __post_init__(self):\n", + " self.manifest_path = expand_sharded_filepaths(self.manifest_path)\n", + "\n", + " def __iter__(self) -> Iterator[SourceTargetTextExample]:\n", + " seed = resolve_seed(self.shard_seed)\n", + " rng = random.Random(seed)\n", + " paths = self.manifest_path\n", + " if self.shuffle_shards:\n", + " rng.shuffle(paths)\n", + " for p in paths:\n", + " for item in load_jsonl(p):\n", + " yield SourceTargetTextExample(\n", + " source=TextExample(item[\"source\"], item.get(\"source_lang\")),\n", + " target=TextExample(item[\"target\"], item.get(\"target_lang\")),\n", + " question=(\n", + " TextExample(item[\"prompt\"], language=item(\"prompt_lang\"))\n", + " if \"prompt\" in item\n", + " else None\n", + " ),\n", + " )\n", + "\n", + "\n", + "@data_type_parser(\"txt_pair_jsonl\")\n", + "def read_txt_pair_paths(config: DictConfig) -> tuple[CutSet, bool]:\n", + " cuts = CutSet(\n", + " LhotseTextPairAdapterFromJsonl(\n", + " manifest_path=config.manifest_path,\n", + " shuffle_shards=config.shuffle,\n", + " shard_seed=config.shard_seed,\n", + " )\n", + " )\n", + " if not config.get(\"force_finite\", False):\n", + " cuts = cuts.repeat()\n", + " return cuts, True" + ] + }, + { + "cell_type": "markdown", + "id": "64367e6596754ee6", + "metadata": {}, + "source": [ + "Note that there is a bit of boilerplate (`expand_sharded_filepaths`, `force_finite`, `shuffle_shards`, `shard_seed`) - we might reduce the amount of necessary boilerplate in the future, but for now it is required.\n", + "\n", + "Let's test that it works. We'll first create two JSONL files (shards) with one entry each, and later use NeMo's path expansion mechanism to provide them as the input configuration.\n", + "\n", + "Then, we'll read it using the high-level API `read_cutset_from_config` that's actually used by NeMo+Lhotse dataloader to show that the auto-registration mechanism works as expected." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "7987fce8db39b008", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[NeMo W 2024-10-18 14:12:16 nemo_logging:349] /Users/pzelasko/miniforge3/envs/nemo/lib/python3.10/site-packages/pydub/utils.py:170: RuntimeWarning: Couldn't find ffmpeg or avconv - defaulting to ffmpeg, but may not work\n", + " warn(\"Couldn't find ffmpeg or avconv - defaulting to ffmpeg, but may not work\", RuntimeWarning)\n", + " \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "SourceTargetTextExample(source=TextExample(text='A', language=None, tokens=None, custom=None), target=TextExample(text='B', language=None, tokens=None, custom=None), question=None, custom=None)\n" + ] + } + ], + "source": [ + "!echo '{\"source\": \"A\", \"target\": \"B\"}' >> _tutorial_nmt_0.jsonl\n", + "!echo '{\"source\": \"C\", \"target\": \"D\"}' >> _tutorial_nmt_1.jsonl\n", + "\n", + "from nemo.collections.common.data.lhotse.cutset import read_cutset_from_config\n", + "\n", + "data, use_iterable_dataset = read_cutset_from_config(\n", + " {\n", + " \"input_cfg\": [\n", + " {\n", + " \"type\": \"txt_pair_jsonl\", \n", + " \"manifest_path\": \"_tutorial_nmt__OP_0..1_CL_.jsonl\", \n", + " }\n", + " ]\n", + " }\n", + ")\n", + "\n", + "example = next(iter(data))\n", + "assert isinstance(example, SourceTargetTextExample)\n", + "assert example.source.text == \"A\"\n", + "assert example.target.text == \"B\"\n", + "print(example)" + ] + }, + { + "cell_type": "markdown", + "id": "be48872625d1a2e0", + "metadata": {}, + "source": [ + "## Prompt formatting and conversion of data types to tensors\n", + "\n", + "Since we now understand how data types are read, let's see how to convert them to actual training examples.\n", + "Because this tutorial is focused on multimodal LLM / speech LLM training, we'll be using prompt templates adequate for various LLMs to prepare the training data. In this example, we'll use Llama2 prompt template to format each data type.\n", + "\n", + " We'll need to initialize a prompt formatter and a tokenizer; we'll just train a dummy BPE tokenizer for the purpose of the tutorial." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "6e1d296be0d363d", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[NeMo I 2024-10-18 14:12:19 sentencepiece_tokenizer:333] tokenizer model _tutorial_spt/tokenizer.model already exists\n" + ] + } + ], + "source": [ + "import string\n", + "import shlex\n", + "from nemo.collections.common.tokenizers.sentencepiece_tokenizer import SentencePieceTokenizer, create_spt_model\n", + "from nemo.collections.common.prompts.formatter import PromptFormatter\n", + "\n", + "!echo {shlex.quote(' '.join(string.printable))} > _tutorial_train_text.txt\n", + "\n", + "tok_path, vocab_path = create_spt_model(\n", + " data_file=\"_tutorial_train_text.txt\", \n", + " output_dir=\"_tutorial_spt\",\n", + " vocab_size=512, \n", + " sample_size=-1, \n", + " do_lower_case=False, \n", + " bos=True, \n", + " eos=True, \n", + " pad=True, \n", + " user_defined_symbols=[\"[INST]\", \"[/INST]\", \"<>\", \"<>\", \"[audio]\"]\n", + ")\n", + "\n", + "tokenizer = SentencePieceTokenizer(tok_path)\n", + "prompt = PromptFormatter.resolve(\"llama2\")(tokenizer)" + ] + }, + { + "cell_type": "markdown", + "id": "6988777c9dc1653b", + "metadata": {}, + "source": [ + "Now, we'll convert the data types to a training/inference friendly format. Specifically, we want to have 4 tensors:\n", + "* `context_ids`: token IDs that serve as the input for LLM (e.g. user query, conversation history, etc.)\n", + "* `answer_ids`: token IDs that serve as the answer for LLM (assistant response)\n", + "* `input_ids`: concatenated `context_ids` and `answer_ids`\n", + "* `mask`: loss mask that's only set to `True` for each token belonging to each of assistant's turns. Same length as `input_ids`.\n", + "\n", + "Let's first go through Cut, SourceTargetTextExample, and NeMoMultimodalConversation to see what happens with them." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "5f8c0a54189e443d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Cut:\n", + "\t* input_ids [INST] Repeat after me: [/INST] Welcome to Lhotse!\n", + "\t* context_ids [INST] Repeat after me: [/INST]\n", + "\t* answer_ids Welcome to Lhotse!\n", + "loss mask tensor([False, False, False, False, False, False, False, False, False, False,\n", + " False, False, False, False, False, False, False, False, False, False,\n", + " False, False, True, True, True, True, True, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True, True])\n", + "\n", + "SourceTargetTextExample:\n", + "\t* input_ids [INST] Some machine translation example. [/INST] Algunos ejemplos de traducci ⁇ n autom ⁇ tica.\n", + "\t* context_ids [INST] Some machine translation example. [/INST]\n", + "\t* answer_ids Algunos ejemplos de traducci ⁇ n autom ⁇ tica.\n", + "loss mask tensor([False, False, False, False, False, False, False, False, False, False,\n", + " False, False, False, False, False, False, False, False, False, False,\n", + " False, False, False, False, False, False, False, False, False, False,\n", + " False, False, False, False, False, False, False, False, False, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True, True, True])\n", + "\n", + "NeMoMultimodalConversation:\n", + "\t* input_ids [INST] Is this a text-only conversation? [/INST] No, feel free to speak to me. [INST] [audio] [/INST] Should I respond in voice too? [INST] Yes [/INST] Certainly! [audio]\n", + "\t* context_ids [INST] Is this a text-only conversation? [/INST] No, feel free to speak to me. [INST] [audio] [/INST] Should I respond in voice too? [INST] Yes [/INST]\n", + "\t* answer_ids Certainly! [audio]\n", + "loss mask tensor([False, False, False, False, False, False, False, False, False, False,\n", + " False, False, False, False, False, False, False, False, False, False,\n", + " False, False, False, False, False, False, False, False, False, False,\n", + " False, False, False, False, False, False, False, False, False, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " False, False, False, False, False, False, False, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, False,\n", + " False, False, False, False, False, False, False, False, True, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True, True])\n", + "\n" + ] + } + ], + "source": [ + "from nemo.collections.common.data.prompt_fn import apply_prompt_format_fn\n", + "\n", + "cut.context = \"Repeat after me:\"\n", + "print(\"Cut:\")\n", + "formatted = apply_prompt_format_fn(cut, prompt)\n", + "for name in [\"input_ids\", \"context_ids\", \"answer_ids\"]:\n", + " print(\"\\t*\", name, tokenizer.ids_to_text(formatted[name]))\n", + "print(\"loss mask\", formatted[\"mask\"])\n", + "print()\n", + "\n", + "print(\"SourceTargetTextExample:\")\n", + "formatted = apply_prompt_format_fn(text_pair, prompt)\n", + "for name in [\"input_ids\", \"context_ids\", \"answer_ids\"]:\n", + " print(\"\\t*\", name, tokenizer.ids_to_text(formatted[name]))\n", + "print(\"loss mask\", formatted[\"mask\"])\n", + "print()\n", + "\n", + "print(\"NeMoMultimodalConversation:\")\n", + "formatted = apply_prompt_format_fn(multimodal_conversation, prompt)\n", + "for name in [\"input_ids\", \"context_ids\", \"answer_ids\"]:\n", + " print(\"\\t*\", name, tokenizer.ids_to_text(formatted[name]))\n", + "print(\"loss mask\", formatted[\"mask\"])\n", + "print()" + ] + }, + { + "cell_type": "markdown", + "id": "e1b50937e5f75d10", + "metadata": {}, + "source": [ + "Note how each example got converted into the same prompt format. \n", + "\n", + "For multimodal conversation we have a special mechanism that replaces audio turns with an `audio_locator_tag`. \n", + "We expect that the tokenizer contains this tag as a special token.\n", + "The user will later replace these special tokens with audio representations (tokenized, or not) in the training step of the model. \n", + "\n", + "If you create a new prompt format, or a new data type, or want to specialize how a given data type is formatted with a given prompt, it is easily customizable by defining a single function with `@registered_prompt_format_fn(DataType, PromptFormatterType)` decorator. For example, if we created a new data type called `TextTriplet`, and added a default prompt format function, and another one specialized for Llama2:" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "108b3593a5f16444", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "input_ids tensor([ 1, 9, 4, 9, 6, 9, 42, 9, 7, 9, 43, 9, 5, 9, 44, 2])\n", + "context_ids tensor([ 1, 9, 4, 9, 6, 9, 42, 9, 7, 9, 43, 9, 5])\n", + "answer_ids tensor([ 9, 44, 2])\n", + "mask tensor([False, False, False, False, False, False, False, False, False, False,\n", + " False, False, False, True, True, True])\n" + ] + } + ], + "source": [ + "from nemo.collections.common.prompts import Llama2PromptFormatter\n", + "from nemo.collections.common.data.prompt_fn import registered_prompt_format_fn\n", + "from nemo.collections.common.data.lhotse.text_adapters import Formattable, CustomFieldMixin\n", + "\n", + "\n", + "@dataclass\n", + "class TextTriplet(Formattable, CustomFieldMixin):\n", + " # Note: we will explain Formattable and CustomFieldMixin in the next sections.\n", + " text1: str\n", + " text2: str\n", + " text3: str\n", + "\n", + "\n", + "@registered_prompt_format_fn(TextTriplet)\n", + "def text_triplets_generic(example: TextTriplet, prompt: PromptFormatter):\n", + " return prompt.encode_dialog(turns=[\n", + " {\"role\": \"user\", \"slots\": {\"message\": f\"{example.text1} {example.text2}\"}},\n", + " {\"role\": \"assistant\", \"slots\": {\"message\": f\"{example.text3}\"}},\n", + " ])\n", + "\n", + " \n", + "@registered_prompt_format_fn(TextTriplet, Llama2PromptFormatter)\n", + "def text_triplets_llama2(example: TextTriplet, prompt: Llama2PromptFormatter):\n", + " return prompt.encode_dialog(turns=[\n", + " {\"role\": \"system_and_user\", \"slots\": {\"system\": example.text1 , \"message\": example.text2}},\n", + " {\"role\": \"assistant\", \"slots\": {\"message\": example.text3}},\n", + " ])\n", + "\n", + "\n", + "formatted = apply_prompt_format_fn(TextTriplet(\"A\", \"B\", \"C\"), prompt)\n", + "for k, v in formatted.items():\n", + " print(k, v)" + ] + }, + { + "cell_type": "markdown", + "id": "9565bef14a863465", + "metadata": {}, + "source": [ + "If we also created a data type parser for `TextTriplet` like we did for `SourceTargetTextExample` in the section before, we have a complete new data type support for dataloading. " + ] + }, + { + "cell_type": "markdown", + "id": "6ac39c8fcbcf5860", + "metadata": {}, + "source": [ + "## Support for sequence length stratification / dynamic bucketing\n", + "\n", + "References: \n", + "* [EMMeTT: Efficient Multimodal Machine Translation Training](https://arxiv.org/abs/2409.13523) \n", + "\n", + "We found that by using dynamic bucketing with [OOMptimizer](https://github.com/NVIDIA/NeMo/blob/main/docs/source/asr/datasets.rst#pushing-gpu-utilization-to-the-limits-with-bucketing-and-oomptimizer) can significantly accelerate multimodal LLM training. \n", + "In order to ensure that all data types can benefit from this acceleration, we introduced the `Formattable` concept.\n", + "It indicates that a given data type supports prompt formatting and provides properties to measure input and output sequence length.\n", + "\n", + "Let's see this in action with the previously formatted data types:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "f5ca38ea137f8210", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "SourceTargetTextPair:\n", + "\t* input_length 39\n", + "\t* output_length 44\n", + "\t* total_length 83\n", + "\t* len(context_ids) 39\n", + "\t* len(answer_ids) 44\n", + "\t* len(input_ids) 83\n", + "NeMoMultimodalConversation\n", + "\t* input_length 191\n", + "\t* output_length 196\n", + "\t* total_length 387\n", + "\t* len(context_ids) 118\n", + "\t* len(answer_ids) 14\n", + "\t* len(input_ids) 132\n" + ] + } + ], + "source": [ + "print(\"SourceTargetTextPair:\")\n", + "text_pair = text_pair.apply_prompt_format(prompt)\n", + "print(\"\\t*\", \"input_length\", text_pair.input_length)\n", + "print(\"\\t*\", \"output_length\", text_pair.output_length)\n", + "print(\"\\t*\", \"total_length\", text_pair.total_length)\n", + "print(\"\\t*\", \"len(context_ids)\", len(text_pair.context_ids))\n", + "print(\"\\t*\", \"len(answer_ids)\", len(text_pair.answer_ids))\n", + "print(\"\\t*\", \"len(input_ids)\", len(text_pair.input_ids))\n", + "\n", + "print(\"NeMoMultimodalConversation\")\n", + "text_pair = multimodal_conversation.apply_prompt_format(prompt)\n", + "print(\"\\t*\", \"input_length\", multimodal_conversation.input_length)\n", + "print(\"\\t*\", \"output_length\", multimodal_conversation.output_length)\n", + "print(\"\\t*\", \"total_length\", multimodal_conversation.total_length)\n", + "print(\"\\t*\", \"len(context_ids)\", len(multimodal_conversation.context_ids))\n", + "print(\"\\t*\", \"len(answer_ids)\", len(multimodal_conversation.answer_ids))\n", + "print(\"\\t*\", \"len(input_ids)\", len(multimodal_conversation.input_ids))\n" + ] + }, + { + "cell_type": "markdown", + "id": "ecca372c2a0cad6e", + "metadata": {}, + "source": [ + "Note that for `NeMoMultimodalConversation` the length is much greater that the number of text tokens. \n", + "This is where `token_equivalent_duration` comes in: we want to factor in the audio turns into sequence lengths.\n", + "Since we know what is the duration of audio, we only need to know how much duration should be covered by each audio \"token\" or \"frame\".\n", + "A typical setup would be with NeMo FastConformer as an audio encoder, which uses 10ms frames at the input and subsamples them by a factor of 8 in the output. \n", + "The resulting `token_equivalent_duration` is therefore `0.08`, i.e., a single token created from audio is worth 80ms of duration. \n", + "For length computation, we sum the number of text tokens and the equivalent number of audio tokens.\n", + "\n", + "We can see that lhotse's `DynamicBucketingSampler` is able to process this data using NeMo multimodal sampling strategies:" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "6e295cfbfe8ff69b", + "metadata": {}, + "outputs": [], + "source": [ + "from lhotse.dataset import DynamicBucketingSampler\n", + "from nemo.collections.common.data.lhotse.sampling import MultimodalFixedBucketBatchSizeConstraint2D\n", + "\n", + "cuts = CutSet([multimodal_conversation]).repeat() # repeat makes iterable infinite\n", + "sampler = DynamicBucketingSampler(\n", + " cuts, \n", + " constraint=MultimodalFixedBucketBatchSizeConstraint2D(\n", + " max_seq_len_buckets=[32, 64, 128, 256, 512, 1024, 1536, 2048],\n", + " batch_sizes=[8, 7, 6, 5, 4, 3, 2, 1],\n", + " token_equivalent_duration=0.08, \n", + " measure_total_length=True,\n", + " ),\n", + " buffer_size=10,\n", + ")\n", + "\n", + "batch = next(iter(sampler))\n", + "assert len(batch) == 4 \n", + "# Our conversation example fell into bucket number 4 (min: 256, max: 512) with an assigned batch size of 4" + ] + }, + { + "cell_type": "markdown", + "id": "4ff5baae-0771-4ac9-aa68-c3faee5aa261", + "metadata": {}, + "source": [ + "## Putting it all together to configure joint audio, text, and conversation dataloading\n", + "\n", + "We'll showcase some higher level APIs here. First, we'll create data examples on disk for three distinct types: audio to text, text to text, and multimodal conversations." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "5a0e5433-3e63-4ab2-9290-001159a9b8e0", + "metadata": {}, + "outputs": [], + "source": [ + "from pathlib import Path\n", + "from lhotse.serialization import save_to_jsonl\n", + "from lhotse.testing.dummies import dummy_recording\n", + "\n", + "# Prepare dummy ASR data\n", + "d = Path(\"_tutorial_data\")\n", + "!mkdir -p {d}/asr_shar\n", + "cut = dummy_recording(0, duration=17.11, with_data=True).to_cut()\n", + "cut.supervisions = [SupervisionSegment(id=cut.id, recording_id=cut.id, start=0.0, duration=cut.duration, text=\"Welcome to Lhotse!\")]\n", + "cut.context = \"Repeat after me\"\n", + "CutSet([cut.save_audio(d / \"rec.flac\")]).to_shar(d / \"asr_shar\", fields={\"recording\": \"flac\"})\n", + "\n", + "# Prepare dummy translation data\n", + "(d / \"src.txt\").write_text(\"A\")\n", + "(d / \"tgt.txt\").write_text(\"B\")\n", + "\n", + "# Prepare dummy multimodal conversation\n", + "save_to_jsonl(\n", + " [\n", + " {\n", + " \"id\": \"convo-1\",\n", + " \"conversations\": [\n", + " {\"from\": \"user\", \"value\": \"tell me what you hear\", \"type\": \"text\"},\n", + " {\"from\": \"user\", \"value\": str(d / \"rec.flac\"), \"duration\": cut.duration, \"type\": \"audio\"},\n", + " {\"from\": \"assistant\", \"value\": \"somebody just welcomed me to a himalayan mountain\", \"type\": \"text\"},\n", + " ]\n", + " }\n", + " ],\n", + " d / \"conv.jsonl\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "3a4d669b-f816-4522-a491-ba31bfbf689c", + "metadata": {}, + "source": [ + "Now we'll configure a Lhotse dataloader to yield mini-batches with different data types in a round-robin fashion." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "c4a7364e-c00f-4f60-9d72-9e7d228121cb", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[NeMo I 2024-10-18 14:12:19 dataloader:481] Creating a Lhotse DynamicBucketingSampler (max_batch_duration=None max_batch_size=None)\n", + "[NeMo I 2024-10-18 14:12:19 dataloader:481] Creating a Lhotse DynamicBucketingSampler (max_batch_duration=None max_batch_size=None)\n", + "[NeMo I 2024-10-18 14:12:19 dataloader:481] Creating a Lhotse DynamicBucketingSampler (max_batch_duration=None max_batch_size=None)\n" + ] + } + ], + "source": [ + "import torch\n", + "from omegaconf import OmegaConf\n", + "from nemo.collections.common.data.lhotse.dataloader import get_lhotse_dataloader_from_config\n", + "\n", + "# This configuration is typically present in NeMo training configs under `model.train_ds` key.\n", + "cfg = OmegaConf.create({\n", + " # Note that we have several sampler groups under keys: \"asr\", \"nmt\", and \"chat\".\n", + " # Each group has its own data source and sampling settings, i.e., you can define\n", + " # completely different batch sizes, sequence length filters, etc. for each type of data.\n", + " # To enable this behaviour, set multi_config to True.\n", + " \"multi_config\": True,\n", + " \n", + " # The following fields are shared by all groups.\n", + " # sampler_fusion key determines how to yield batches from different samplers:\n", + " # * \"round_robin\" will just yield one type at a time\n", + " # * \"zip\" will sample a batch for each type and concatenate them, yielding a larger multimodal batch\n", + " # * \"randomized_round_robin\" expects an extra \"sampler_weights\" option which will define sampling probs for each group.:\n", + " \"sampler_fusion\": \"round_robin\",\n", + " \"shuffle\": True,\n", + " \"num_workers\": 0,\n", + " \"seed\": 0,\n", + " \"shard_seed\": \"trng\",\n", + " \n", + " \"asr\": {\n", + " \"input_cfg\": [\n", + " {\n", + " \"type\": \"lhotse_shar\", \n", + " \"shar_path\": d / \"asr_shar\"\n", + " }\n", + " ],\n", + " \"min_duration\": 0.5,\n", + " \"max_duration\": 40,\n", + " \"use_bucketing\": True,\n", + " \"bucket_duration_bins\": [5, 10, 20, 40],\n", + " \"bucket_batch_size\": [4, 3, 2, 1],\n", + " \"prompt_format\": \"llama2\",\n", + "\n", + " # Simplified settings for quick tutorial running (don't use those in real applciations).\n", + " \"concurrent_bucketing\": False,\n", + " \"bucket_buffer_size\": 50,\n", + " \"shuffle_buffer_size\": 50,\n", + " },\n", + "\n", + " \"nmt\": {\n", + " \"input_cfg\": [\n", + " {\n", + " \"type\": \"txt_pair\", \n", + " \"source_paths\": d / \"src.txt\", \n", + " \"target_paths\": d / \"tgt.txt\"\n", + " }\n", + " ],\n", + " \"use_multimodal_sampling\": True, # will count tokens instead of seconds\n", + " \"min_tokens\": 1,\n", + " \"max_tokens\": 32,\n", + " \"measure_total_length\": False, # filters by input length instead of total length\n", + " \"use_bucketing\": True,\n", + " \"bucket_duration_bins\": [[16, 16], [16, 32], [32, 16], [32, 32]], # 2D buckets\n", + " \"bucket_batch_size\": [4, 3, 2, 1],\n", + " \"prompt_format\": \"llama2\",\n", + " \n", + " # Simplified settings for quick tutorial running (don't use those in real applciations).\n", + " \"concurrent_bucketing\": False,\n", + " \"bucket_buffer_size\": 50,\n", + " \"shuffle_buffer_size\": 50,\n", + " },\n", + "\n", + " \"chat\": {\n", + " \"input_cfg\": [\n", + " {\n", + " \"type\": \"multimodal_conversation\", \n", + " \"manifest_filepath\": d / \"conv.jsonl\", \n", + " \"audio_locator_tag\": \"[audio]\"\n", + " }\n", + " ],\n", + " \"use_multimodal_sampling\": True, # will count tokens instead of seconds\n", + " \"min_tokens\": 1,\n", + " \"max_tokens\": 1024,\n", + " \"measure_total_length\": True,\n", + " \"token_equivalent_duration\": 0.08,\n", + " \"use_bucketing\": True,\n", + " \"bucket_duration_bins\": [128, 256, 512, 1024],\n", + " \"bucket_batch_size\": [4, 3, 2, 1],\n", + " \"prompt_format\": \"llama2\",\n", + "\n", + " # Simplified settings for quick tutorial running (don't use those in real applciations).\n", + " \"concurrent_bucketing\": False,\n", + " \"bucket_buffer_size\": 50,\n", + " \"shuffle_buffer_size\": 50,\n", + " },\n", + "})\n", + "\n", + "\n", + "# A no-op PyTorch Dataset class that will just return the data structures.\n", + "# In a real training setup, you'll want to implement conversion of a list of examples to a tensor mini-batch\n", + "# that is adequate for your model. \n", + "# Note that you can handle multiple types of examples to create appropriate mini-batch schema for each.\n", + "class Identity(torch.utils.data.Dataset):\n", + " def __getitem__(self, examples: CutSet):\n", + " return examples\n", + "\n", + "dloader = get_lhotse_dataloader_from_config(cfg, global_rank=0, world_size=1, dataset=Identity(), tokenizer=tokenizer)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "e8768e28-663b-4d69-bb31-fbd6b80c0389", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Step 0. Examples:\n", + "\t* MonoCut(id='dummy-recording-0000_repeat10', start=0, duration=17.11, channel=0, supervisions=[SupervisionSegment(id='dummy-recording-0000', recording_id='dummy-recording-0000', start=0.0, duration=17.11, channel=0, text='Welcome to Lhotse!', language=None, speaker=None, gender=None, custom=None, alignment=None)], features=None, recording=Recording(id='rec', sources=[AudioSource(type='memory', channels=[0], source='')], sampling_rate=16000, num_samples=273760, duration=17.11, channel_ids=[0], transforms=None), custom={'context': 'Repeat after me', 'shard_origin': PosixPath('_tutorial_data/asr_shar/cuts.000000.jsonl.gz'), 'shar_epoch': 10, 'input_ids': tensor([ 1, 9, 4, 9, 59, 78, 89, 78, 74, 93, 9, 74, 79, 93, 78, 91, 9, 86,\n", + " 78, 9, 5, 9, 64, 78, 85, 76, 88, 86, 78, 9, 93, 88, 9, 53, 81, 88,\n", + " 93, 92, 78, 10, 2]), 'context_ids': tensor([ 1, 9, 4, 9, 59, 78, 89, 78, 74, 93, 9, 74, 79, 93, 78, 91, 9, 86,\n", + " 78, 9, 5]), 'answer_ids': tensor([ 9, 64, 78, 85, 76, 88, 86, 78, 9, 93, 88, 9, 53, 81, 88, 93, 92, 78,\n", + " 10, 2]), 'mask': tensor([False, False, False, False, False, False, False, False, False, False,\n", + " False, False, False, False, False, False, False, False, False, False,\n", + " False, True, True, True, True, True, True, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True]), 'dataloading_info': {'rank': 0, 'world_size': 1, 'worker_id': None}})\n", + "\t* MonoCut(id='dummy-recording-0000_repeat41', start=0, duration=17.11, channel=0, supervisions=[SupervisionSegment(id='dummy-recording-0000', recording_id='dummy-recording-0000', start=0.0, duration=17.11, channel=0, text='Welcome to Lhotse!', language=None, speaker=None, gender=None, custom=None, alignment=None)], features=None, recording=Recording(id='rec', sources=[AudioSource(type='memory', channels=[0], source='')], sampling_rate=16000, num_samples=273760, duration=17.11, channel_ids=[0], transforms=None), custom={'context': 'Repeat after me', 'shard_origin': PosixPath('_tutorial_data/asr_shar/cuts.000000.jsonl.gz'), 'shar_epoch': 41, 'input_ids': tensor([ 1, 9, 4, 9, 59, 78, 89, 78, 74, 93, 9, 74, 79, 93, 78, 91, 9, 86,\n", + " 78, 9, 5, 9, 64, 78, 85, 76, 88, 86, 78, 9, 93, 88, 9, 53, 81, 88,\n", + " 93, 92, 78, 10, 2]), 'context_ids': tensor([ 1, 9, 4, 9, 59, 78, 89, 78, 74, 93, 9, 74, 79, 93, 78, 91, 9, 86,\n", + " 78, 9, 5]), 'answer_ids': tensor([ 9, 64, 78, 85, 76, 88, 86, 78, 9, 93, 88, 9, 53, 81, 88, 93, 92, 78,\n", + " 10, 2]), 'mask': tensor([False, False, False, False, False, False, False, False, False, False,\n", + " False, False, False, False, False, False, False, False, False, False,\n", + " False, True, True, True, True, True, True, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True]), 'dataloading_info': {'rank': 0, 'world_size': 1, 'worker_id': None}})\n", + "\n", + "Step 1. Examples:\n", + "\t* SourceTargetTextExample(source=TextExample(text='A', language=None, tokens=None, custom=None), target=TextExample(text='B', language=None, tokens=None, custom=None), question=None, custom={'input_ids': tensor([ 1, 9, 4, 9, 42, 9, 5, 9, 43, 2]), 'context_ids': tensor([ 1, 9, 4, 9, 42, 9, 5]), 'answer_ids': tensor([ 9, 43, 2]), 'mask': tensor([False, False, False, False, False, False, False, True, True, True]), 'dataloading_info': {'rank': 0, 'world_size': 1, 'worker_id': None}})\n", + "\t* SourceTargetTextExample(source=TextExample(text='A', language=None, tokens=None, custom=None), target=TextExample(text='B', language=None, tokens=None, custom=None), question=None, custom={'input_ids': tensor([ 1, 9, 4, 9, 42, 9, 5, 9, 43, 2]), 'context_ids': tensor([ 1, 9, 4, 9, 42, 9, 5]), 'answer_ids': tensor([ 9, 43, 2]), 'mask': tensor([False, False, False, False, False, False, False, True, True, True]), 'dataloading_info': {'rank': 0, 'world_size': 1, 'worker_id': None}})\n", + "\t* SourceTargetTextExample(source=TextExample(text='A', language=None, tokens=None, custom=None), target=TextExample(text='B', language=None, tokens=None, custom=None), question=None, custom={'input_ids': tensor([ 1, 9, 4, 9, 42, 9, 5, 9, 43, 2]), 'context_ids': tensor([ 1, 9, 4, 9, 42, 9, 5]), 'answer_ids': tensor([ 9, 43, 2]), 'mask': tensor([False, False, False, False, False, False, False, True, True, True]), 'dataloading_info': {'rank': 0, 'world_size': 1, 'worker_id': None}})\n", + "\t* SourceTargetTextExample(source=TextExample(text='A', language=None, tokens=None, custom=None), target=TextExample(text='B', language=None, tokens=None, custom=None), question=None, custom={'input_ids': tensor([ 1, 9, 4, 9, 42, 9, 5, 9, 43, 2]), 'context_ids': tensor([ 1, 9, 4, 9, 42, 9, 5]), 'answer_ids': tensor([ 9, 43, 2]), 'mask': tensor([False, False, False, False, False, False, False, True, True, True]), 'dataloading_info': {'rank': 0, 'world_size': 1, 'worker_id': None}})\n", + "\n", + "Step 2. Examples:\n", + "\t* NeMoMultimodalConversation(id='convo-1_repeat0', turns=[TextTurn(value='tell me what you hear', role='user'), AudioTurn(cut=MonoCut(id='rec', start=0.0, duration=17.11, channel=0, supervisions=[], features=None, recording=Recording(id='rec', sources=[AudioSource(type='file', channels=[0], source='_tutorial_data/rec.flac')], sampling_rate=16000, num_samples=273760, duration=17.11, channel_ids=[0], transforms=None), custom=None), role='user', audio_locator_tag='[audio]'), TextTurn(value='somebody just welcomed me to a himalayan mountain', role='assistant')], token_equivalent_duration=0.08, custom={'input_ids': tensor([ 1, 9, 4, 9, 93, 78, 85, 85, 9, 86, 78, 9, 96, 81, 74, 93, 9, 98,\n", + " 88, 94, 9, 81, 78, 74, 91, 9, 8, 9, 5, 9, 92, 88, 86, 78, 75, 88,\n", + " 77, 98, 9, 83, 94, 92, 93, 9, 96, 78, 85, 76, 88, 86, 78, 77, 9, 86,\n", + " 78, 9, 93, 88, 9, 74, 9, 81, 82, 86, 74, 85, 74, 98, 74, 87, 9, 86,\n", + " 88, 94, 87, 93, 74, 82, 87, 2]), 'context_ids': tensor([ 1, 9, 4, 9, 93, 78, 85, 85, 9, 86, 78, 9, 96, 81, 74, 93, 9, 98,\n", + " 88, 94, 9, 81, 78, 74, 91, 9, 8, 9, 5]), 'answer_ids': tensor([ 9, 92, 88, 86, 78, 75, 88, 77, 98, 9, 83, 94, 92, 93, 9, 96, 78, 85,\n", + " 76, 88, 86, 78, 77, 9, 86, 78, 9, 93, 88, 9, 74, 9, 81, 82, 86, 74,\n", + " 85, 74, 98, 74, 87, 9, 86, 88, 94, 87, 93, 74, 82, 87, 2]), 'mask': tensor([False, False, False, False, False, False, False, False, False, False,\n", + " False, False, False, False, False, False, False, False, False, False,\n", + " False, False, False, False, False, False, False, False, False, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, True]), 'dataloading_info': {'rank': 0, 'world_size': 1, 'worker_id': None}})\n", + "\t* NeMoMultimodalConversation(id='convo-1_repeat1', turns=[TextTurn(value='tell me what you hear', role='user'), AudioTurn(cut=MonoCut(id='rec', start=0.0, duration=17.11, channel=0, supervisions=[], features=None, recording=Recording(id='rec', sources=[AudioSource(type='file', channels=[0], source='_tutorial_data/rec.flac')], sampling_rate=16000, num_samples=273760, duration=17.11, channel_ids=[0], transforms=None), custom=None), role='user', audio_locator_tag='[audio]'), TextTurn(value='somebody just welcomed me to a himalayan mountain', role='assistant')], token_equivalent_duration=0.08, custom={'input_ids': tensor([ 1, 9, 4, 9, 93, 78, 85, 85, 9, 86, 78, 9, 96, 81, 74, 93, 9, 98,\n", + " 88, 94, 9, 81, 78, 74, 91, 9, 8, 9, 5, 9, 92, 88, 86, 78, 75, 88,\n", + " 77, 98, 9, 83, 94, 92, 93, 9, 96, 78, 85, 76, 88, 86, 78, 77, 9, 86,\n", + " 78, 9, 93, 88, 9, 74, 9, 81, 82, 86, 74, 85, 74, 98, 74, 87, 9, 86,\n", + " 88, 94, 87, 93, 74, 82, 87, 2]), 'context_ids': tensor([ 1, 9, 4, 9, 93, 78, 85, 85, 9, 86, 78, 9, 96, 81, 74, 93, 9, 98,\n", + " 88, 94, 9, 81, 78, 74, 91, 9, 8, 9, 5]), 'answer_ids': tensor([ 9, 92, 88, 86, 78, 75, 88, 77, 98, 9, 83, 94, 92, 93, 9, 96, 78, 85,\n", + " 76, 88, 86, 78, 77, 9, 86, 78, 9, 93, 88, 9, 74, 9, 81, 82, 86, 74,\n", + " 85, 74, 98, 74, 87, 9, 86, 88, 94, 87, 93, 74, 82, 87, 2]), 'mask': tensor([False, False, False, False, False, False, False, False, False, False,\n", + " False, False, False, False, False, False, False, False, False, False,\n", + " False, False, False, False, False, False, False, False, False, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, True]), 'dataloading_info': {'rank': 0, 'world_size': 1, 'worker_id': None}})\n", + "\t* NeMoMultimodalConversation(id='convo-1_repeat2', turns=[TextTurn(value='tell me what you hear', role='user'), AudioTurn(cut=MonoCut(id='rec', start=0.0, duration=17.11, channel=0, supervisions=[], features=None, recording=Recording(id='rec', sources=[AudioSource(type='file', channels=[0], source='_tutorial_data/rec.flac')], sampling_rate=16000, num_samples=273760, duration=17.11, channel_ids=[0], transforms=None), custom=None), role='user', audio_locator_tag='[audio]'), TextTurn(value='somebody just welcomed me to a himalayan mountain', role='assistant')], token_equivalent_duration=0.08, custom={'input_ids': tensor([ 1, 9, 4, 9, 93, 78, 85, 85, 9, 86, 78, 9, 96, 81, 74, 93, 9, 98,\n", + " 88, 94, 9, 81, 78, 74, 91, 9, 8, 9, 5, 9, 92, 88, 86, 78, 75, 88,\n", + " 77, 98, 9, 83, 94, 92, 93, 9, 96, 78, 85, 76, 88, 86, 78, 77, 9, 86,\n", + " 78, 9, 93, 88, 9, 74, 9, 81, 82, 86, 74, 85, 74, 98, 74, 87, 9, 86,\n", + " 88, 94, 87, 93, 74, 82, 87, 2]), 'context_ids': tensor([ 1, 9, 4, 9, 93, 78, 85, 85, 9, 86, 78, 9, 96, 81, 74, 93, 9, 98,\n", + " 88, 94, 9, 81, 78, 74, 91, 9, 8, 9, 5]), 'answer_ids': tensor([ 9, 92, 88, 86, 78, 75, 88, 77, 98, 9, 83, 94, 92, 93, 9, 96, 78, 85,\n", + " 76, 88, 86, 78, 77, 9, 86, 78, 9, 93, 88, 9, 74, 9, 81, 82, 86, 74,\n", + " 85, 74, 98, 74, 87, 9, 86, 88, 94, 87, 93, 74, 82, 87, 2]), 'mask': tensor([False, False, False, False, False, False, False, False, False, False,\n", + " False, False, False, False, False, False, False, False, False, False,\n", + " False, False, False, False, False, False, False, False, False, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, True]), 'dataloading_info': {'rank': 0, 'world_size': 1, 'worker_id': None}})\n", + "\n", + "Step 3. Examples:\n", + "\t* MonoCut(id='dummy-recording-0000_repeat67', start=0, duration=17.11, channel=0, supervisions=[SupervisionSegment(id='dummy-recording-0000', recording_id='dummy-recording-0000', start=0.0, duration=17.11, channel=0, text='Welcome to Lhotse!', language=None, speaker=None, gender=None, custom=None, alignment=None)], features=None, recording=Recording(id='rec', sources=[AudioSource(type='memory', channels=[0], source='')], sampling_rate=16000, num_samples=273760, duration=17.11, channel_ids=[0], transforms=None), custom={'context': 'Repeat after me', 'shard_origin': PosixPath('_tutorial_data/asr_shar/cuts.000000.jsonl.gz'), 'shar_epoch': 67, 'input_ids': tensor([ 1, 9, 4, 9, 59, 78, 89, 78, 74, 93, 9, 74, 79, 93, 78, 91, 9, 86,\n", + " 78, 9, 5, 9, 64, 78, 85, 76, 88, 86, 78, 9, 93, 88, 9, 53, 81, 88,\n", + " 93, 92, 78, 10, 2]), 'context_ids': tensor([ 1, 9, 4, 9, 59, 78, 89, 78, 74, 93, 9, 74, 79, 93, 78, 91, 9, 86,\n", + " 78, 9, 5]), 'answer_ids': tensor([ 9, 64, 78, 85, 76, 88, 86, 78, 9, 93, 88, 9, 53, 81, 88, 93, 92, 78,\n", + " 10, 2]), 'mask': tensor([False, False, False, False, False, False, False, False, False, False,\n", + " False, False, False, False, False, False, False, False, False, False,\n", + " False, True, True, True, True, True, True, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True]), 'dataloading_info': {'rank': 0, 'world_size': 1, 'worker_id': None}})\n", + "\t* MonoCut(id='dummy-recording-0000_repeat16', start=0, duration=17.11, channel=0, supervisions=[SupervisionSegment(id='dummy-recording-0000', recording_id='dummy-recording-0000', start=0.0, duration=17.11, channel=0, text='Welcome to Lhotse!', language=None, speaker=None, gender=None, custom=None, alignment=None)], features=None, recording=Recording(id='rec', sources=[AudioSource(type='memory', channels=[0], source='')], sampling_rate=16000, num_samples=273760, duration=17.11, channel_ids=[0], transforms=None), custom={'context': 'Repeat after me', 'shard_origin': PosixPath('_tutorial_data/asr_shar/cuts.000000.jsonl.gz'), 'shar_epoch': 16, 'input_ids': tensor([ 1, 9, 4, 9, 59, 78, 89, 78, 74, 93, 9, 74, 79, 93, 78, 91, 9, 86,\n", + " 78, 9, 5, 9, 64, 78, 85, 76, 88, 86, 78, 9, 93, 88, 9, 53, 81, 88,\n", + " 93, 92, 78, 10, 2]), 'context_ids': tensor([ 1, 9, 4, 9, 59, 78, 89, 78, 74, 93, 9, 74, 79, 93, 78, 91, 9, 86,\n", + " 78, 9, 5]), 'answer_ids': tensor([ 9, 64, 78, 85, 76, 88, 86, 78, 9, 93, 88, 9, 53, 81, 88, 93, 92, 78,\n", + " 10, 2]), 'mask': tensor([False, False, False, False, False, False, False, False, False, False,\n", + " False, False, False, False, False, False, False, False, False, False,\n", + " False, True, True, True, True, True, True, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True]), 'dataloading_info': {'rank': 0, 'world_size': 1, 'worker_id': None}})\n", + "\n", + "Step 4. Examples:\n", + "\t* SourceTargetTextExample(source=TextExample(text='A', language=None, tokens=None, custom=None), target=TextExample(text='B', language=None, tokens=None, custom=None), question=None, custom={'input_ids': tensor([ 1, 9, 4, 9, 42, 9, 5, 9, 43, 2]), 'context_ids': tensor([ 1, 9, 4, 9, 42, 9, 5]), 'answer_ids': tensor([ 9, 43, 2]), 'mask': tensor([False, False, False, False, False, False, False, True, True, True]), 'dataloading_info': {'rank': 0, 'world_size': 1, 'worker_id': None}})\n", + "\t* SourceTargetTextExample(source=TextExample(text='A', language=None, tokens=None, custom=None), target=TextExample(text='B', language=None, tokens=None, custom=None), question=None, custom={'input_ids': tensor([ 1, 9, 4, 9, 42, 9, 5, 9, 43, 2]), 'context_ids': tensor([ 1, 9, 4, 9, 42, 9, 5]), 'answer_ids': tensor([ 9, 43, 2]), 'mask': tensor([False, False, False, False, False, False, False, True, True, True]), 'dataloading_info': {'rank': 0, 'world_size': 1, 'worker_id': None}})\n", + "\t* SourceTargetTextExample(source=TextExample(text='A', language=None, tokens=None, custom=None), target=TextExample(text='B', language=None, tokens=None, custom=None), question=None, custom={'input_ids': tensor([ 1, 9, 4, 9, 42, 9, 5, 9, 43, 2]), 'context_ids': tensor([ 1, 9, 4, 9, 42, 9, 5]), 'answer_ids': tensor([ 9, 43, 2]), 'mask': tensor([False, False, False, False, False, False, False, True, True, True]), 'dataloading_info': {'rank': 0, 'world_size': 1, 'worker_id': None}})\n", + "\t* SourceTargetTextExample(source=TextExample(text='A', language=None, tokens=None, custom=None), target=TextExample(text='B', language=None, tokens=None, custom=None), question=None, custom={'input_ids': tensor([ 1, 9, 4, 9, 42, 9, 5, 9, 43, 2]), 'context_ids': tensor([ 1, 9, 4, 9, 42, 9, 5]), 'answer_ids': tensor([ 9, 43, 2]), 'mask': tensor([False, False, False, False, False, False, False, True, True, True]), 'dataloading_info': {'rank': 0, 'world_size': 1, 'worker_id': None}})\n", + "\n" + ] + } + ], + "source": [ + "for idx, batch in enumerate(dloader):\n", + " if idx == 5:\n", + " break\n", + " print(f\"Step {idx}. Examples:\")\n", + " for item in batch:\n", + " print(\"\\t*\", item)\n", + " print()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "704c44f5-bcce-4b4f-828b-fa1e18de8d71", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/tutorials/multimodal/Prompt Formatter Tutorial.ipynb b/tutorials/multimodal/Prompt Formatter Tutorial.ipynb new file mode 100644 index 000000000000..85f220115e13 --- /dev/null +++ b/tutorials/multimodal/Prompt Formatter Tutorial.ipynb @@ -0,0 +1,458 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "cd408a7a-d4b6-4f33-83d3-c607dbc5f580", + "metadata": { + "collapsed": true, + "jupyter": { + "outputs_hidden": true + } + }, + "source": [ + "# Prompt Formatter Tutorial\n", + "\n", + "This tutorial introduces NeMo's PromptFormatter API available in module `nemo.collections.common.prompts`.\n", + "After finishing this tutorial you will be familiar with the existing prompt formatters, how to use them, and how to build your own.\n", + "\n", + "We cover the following topics:\n", + "\n", + "* Using existing prompt formatters with Llama2 as an example.\n", + "\n", + "* Defining your own prompt formatter.\n", + "\n", + "We also support applying prompt formatters for multimodal data and Lhotse-compatible data types. To learn more, see our other tutorial: [Multimodal Lhotse Dataloading](./Multimodal Lhotse Dataloading.ipynb)" + ] + }, + { + "cell_type": "markdown", + "id": "3f87f30c-79c0-41e8-b126-283ff5436465", + "metadata": {}, + "source": [ + "### Pre-requsite: building a dummy tokenizer\n", + "\n", + "We're going to need a tokenizer to work with prompt formatters - we'll just build a dummy one for the purpose of this tutorial." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "e91ebef5-9a25-4eb1-8211-d0f5990f7c37", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/pzelasko/miniforge3/envs/nemo/lib/python3.10/site-packages/transformers/utils/generic.py:441: FutureWarning: `torch.utils._pytree._register_pytree_node` is deprecated. Please use `torch.utils._pytree.register_pytree_node` instead.\n", + " _torch_pytree._register_pytree_node(\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[NeMo I 2024-10-23 11:26:41 sentencepiece_tokenizer:333] tokenizer model _tutorial_spt/tokenizer.model already exists\n" + ] + } + ], + "source": [ + "import string\n", + "import shlex\n", + "from nemo.collections.common.tokenizers.sentencepiece_tokenizer import SentencePieceTokenizer, create_spt_model\n", + "\n", + "!echo {shlex.quote(' '.join(string.printable))} > _tutorial_train_text.txt\n", + "\n", + "tok_path, vocab_path = create_spt_model(\n", + " data_file=\"_tutorial_train_text.txt\", \n", + " output_dir=\"_tutorial_spt\",\n", + " vocab_size=512, \n", + " sample_size=-1, \n", + " do_lower_case=False, \n", + " bos=True, \n", + " eos=True, \n", + " pad=True, \n", + " user_defined_symbols=[\"[INST]\", \"[/INST]\", \"<>\", \"<>\", \"[audio]\"]\n", + ")\n", + "\n", + "tokenizer = SentencePieceTokenizer(tok_path)\n", + "\n", + "def display(encoded_chat, with_mask=False):\n", + " \"\"\"Utility for printing prompt formatted chats.\"\"\"\n", + " for key, val in encoded_chat.items():\n", + " if key.endswith(\"_ids\"):\n", + " print(key, '--', tokenizer.ids_to_text(val), '\\n')\n", + " if key == \"mask\" and with_mask:\n", + " print(key, '--', val)" + ] + }, + { + "cell_type": "markdown", + "id": "4c5c6c88-c882-4305-8757-585fec3eab46", + "metadata": {}, + "source": [ + "## Using an existing PromptFormatter: Llama2\n", + "\n", + "\n", + "**Instanting the prompt formatter.** Let's start with a simple example of Llama2 prompt format use." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "c77a993e-453f-474e-8912-fd35c7fc39ba", + "metadata": {}, + "outputs": [], + "source": [ + "from nemo.collections.common.prompts.llama import Llama2PromptFormatter\n", + "from pprint import pprint\n", + "\n", + "prompt = Llama2PromptFormatter(tokenizer)" + ] + }, + { + "cell_type": "markdown", + "id": "92054a0f-5b97-4178-94b8-a27e62acf97b", + "metadata": {}, + "source": [ + "**Chat example.** We'll define a multi-turn conversation between the user and assistant below:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "c5eabe5e-4160-41d7-ad85-a4df596de38b", + "metadata": {}, + "outputs": [], + "source": [ + "chat = [\n", + " {\"role\": \"user\", \"slots\": {\"message\": \"Do you know something about electronics?\"}},\n", + " {\"role\": \"assistant\", \"slots\": {\"message\": \"Sure, ask away.\"}},\n", + " {\"role\": \"user\", \"slots\": {\"message\": \"How to build my own audio amplifier?\"}},\n", + " {\"role\": \"assistant\", \"slots\": {\"message\": \"In order to build your own audio amplifier, start with ...\"}},\n", + "]" + ] + }, + { + "cell_type": "markdown", + "id": "eff61b98-c7be-4345-ac97-15573d1a9533", + "metadata": {}, + "source": [ + "**Prompt formatter outputs.** Now, we apply prompt formatter to that conversation to obtain four tensors useful for training:\n", + "* `context_ids` encode the whole dialog history up to the last response of the assistant;\n", + "* `answer_ids` encode the last response of the assistant;\n", + "* `input_ids` encode the full conversation;\n", + "* `mask` is a boolean training loss mask that's set to `True` for every token belonging to assistant's turns.\n", + "\n", + "Since the token IDs are meaningless, we'll apply reverse tokenizer for displaying the prompt formatted example." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "a10216b3-2bbe-4a2f-8ca8-557c3b9056be", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "input_ids -- [INST] Do you know something about electronics? [/INST] Sure, ask away. [INST] How to build my own audio amplifier? [/INST] In order to build your own audio amplifier, start with ... \n", + "\n", + "context_ids -- [INST] Do you know something about electronics? [/INST] Sure, ask away. [INST] How to build my own audio amplifier? [/INST] \n", + "\n", + "answer_ids -- In order to build your own audio amplifier, start with ... \n", + "\n", + "mask -- tensor([False, False, False, False, False, False, False, False, False, False,\n", + " False, False, False, False, False, False, False, False, False, False,\n", + " False, False, False, False, False, False, False, False, False, False,\n", + " False, False, False, False, False, False, False, False, False, False,\n", + " False, False, False, False, False, False, True, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True, True, True, False, False, False, False, False, False, False,\n", + " False, False, False, False, False, False, False, False, False, False,\n", + " False, False, False, False, False, False, False, False, False, False,\n", + " False, False, False, False, False, False, False, False, False, False,\n", + " False, False, False, False, False, True, True, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True, True, True, True, True, True, True, True, True, True,\n", + " True, True, True, True, True])\n" + ] + } + ], + "source": [ + "encoded = prompt.encode_dialog(chat)\n", + "display(encoded, with_mask=True)" + ] + }, + { + "cell_type": "markdown", + "id": "e181618e-6df8-44b2-b986-15660133e486", + "metadata": {}, + "source": [ + "**System prompt.** We also support the system prompt. Since it affects the prompt format in a non-trivial way, it is defined as a separate role `\"system_and_user\"`, which has two slots `\"system\"` and `\"message\"`. We'll omit printing the mask for brevity." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "2c3476a4-b301-4f35-9520-90d4b919363d", + "metadata": {}, + "outputs": [], + "source": [ + "chat_with_system = [\n", + " {\"role\": \"system_and_user\", \"slots\": {\"system\": \"You are a sales rep in an electronics store.\", \"message\": \"Do you know something about electronics?\"}},\n", + " {\"role\": \"assistant\", \"slots\": {\"message\": \"Sure, ask away.\"}},\n", + " {\"role\": \"user\", \"slots\": {\"message\": \"How to build my own audio amplifier?\"}},\n", + " {\"role\": \"assistant\", \"slots\": {\"message\": \"In order to build your own audio amplifier, start with ...\"}},\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "5c8c329d-f8b3-48cb-b664-baed0fcd90ab", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "input_ids -- [INST] <> You are a sales rep in an electronics store. <> Do you know something about electronics? [/INST] Sure, ask away. [INST] How to build my own audio amplifier? [/INST] In order to build your own audio amplifier, start with ... \n", + "\n", + "context_ids -- [INST] <> You are a sales rep in an electronics store. <> Do you know something about electronics? [/INST] Sure, ask away. [INST] How to build my own audio amplifier? [/INST] \n", + "\n", + "answer_ids -- In order to build your own audio amplifier, start with ... \n", + "\n" + ] + } + ], + "source": [ + "encoded = prompt.encode_dialog(chat_with_system)\n", + "display(encoded)" + ] + }, + { + "cell_type": "markdown", + "id": "a453345a-6456-43ed-a663-0554c459fddb", + "metadata": {}, + "source": [ + "**Constructing inference-time prompts.** During inference, we don't know what's the last turn of the assistant - we only want to construct the ``context_ids`` tensor. In those cases, just omit the last assistant's turn. The prompt formatter will return the ``context_ids`` tensor (with ``input_ids`` alias for it too)." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "4ede7100-9d28-4cf0-ab75-bfede9936218", + "metadata": {}, + "outputs": [], + "source": [ + "inference_chat = [\n", + " {\"role\": \"system_and_user\", \"slots\": {\"system\": \"You are a sales rep in an electronics store.\", \"message\": \"Do you know something about electronics?\"}},\n", + " {\"role\": \"assistant\", \"slots\": {\"message\": \"Sure, ask away.\"}},\n", + " {\"role\": \"user\", \"slots\": {\"message\": \"How to build my own audio amplifier?\"}},\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "61bf8e77-0630-4a84-bd30-ca4c27f8d898", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "input_ids -- [INST] <> You are a sales rep in an electronics store. <> Do you know something about electronics? [/INST] Sure, ask away. [INST] How to build my own audio amplifier? [/INST] \n", + "\n", + "context_ids -- [INST] <> You are a sales rep in an electronics store. <> Do you know something about electronics? [/INST] Sure, ask away. [INST] How to build my own audio amplifier? [/INST] \n", + "\n" + ] + } + ], + "source": [ + "encoded = prompt.encode_dialog(inference_chat)\n", + "display(encoded)" + ] + }, + { + "cell_type": "markdown", + "id": "a334e00a-9530-4333-98de-5cb8fb08eb47", + "metadata": {}, + "source": [ + "### How is Llama2 PromptFormatter built\n", + "\n", + "`Llama2PromptFormatter` is a small class with prompt definition that inherits `PromptFormatter`, which implements the logic for applying prompt format and tokenization to multi-turn conversations. \n", + "\n", + "Let's take a look at `Llama2PromptFormatter` definition:" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "f29fbf2f-3caa-4b27-86ca-5012d9fc6ba5", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "class Llama2PromptFormatter(PromptFormatter):\n", + " \"\"\"\n", + " This template has been validated to provide identical tokenized results to the official code\n", + " in https://github.com/meta-llama/llama/blob/main/llama/generation.py\n", + " \"\"\"\n", + "\n", + " NAME = \"llama2\"\n", + " OUTPUT_ROLE = \"assistant\"\n", + " TEMPLATE = {\n", + " \"system_and_user\": {\n", + " \"template\": f\"{BOS_SLOT}[INST] <>\\n|system|\\n<>\\n\\n|message| [/INST]\",\n", + " \"slots\": {\n", + " \"system\": Modality.Text,\n", + " \"message\": Modality.Text,\n", + " },\n", + " },\n", + " \"user\": {\n", + " \"template\": f\"{BOS_SLOT}[INST] |message| [/INST]\",\n", + " \"slots\": {\n", + " \"message\": Modality.Text,\n", + " },\n", + " },\n", + " OUTPUT_ROLE: {\n", + " \"template\": f\"|message| {EOS_SLOT}\",\n", + " \"slots\": {\n", + " \"message\": Modality.Text,\n", + " },\n", + " },\n", + " }\n", + "\n" + ] + } + ], + "source": [ + "import inspect\n", + "print(inspect.getsource(Llama2PromptFormatter))" + ] + }, + { + "cell_type": "markdown", + "id": "b24e9310-b8ed-4e35-9dda-d24aa62cfb6a", + "metadata": {}, + "source": [ + "As you can see, the definition consist of the following key components:\n", + "* Derives `PromptFormatter` parent class.\n", + "* Specifies `NAME`, which is used for dynamic resolution of string to class via `cls = PromptFormatter.resolve(name)`.\n", + "* Specifies `OUTPUT_ROLE`, which is the name for the role with assistant's responses (typically `\"assistant\"`).\n", + "* Specifies `TEMPLATE` which defines the dialog structure and how user-provided values (slots) are applied to prompts. Notably:\n", + " * The slots are wrapped into pipe operators `\"|\"` in the prompt template definition, and substituted with user provided values before tokenization.\n", + " * `\"system_and_user`\" role has two slots, `\"system\"` and `\"message\"`, and a template that wraps them with Llama2 special tokens.\n", + " * We use `BOS_SLOT` and `EOS_SLOT` to insert sentencepiece tokenizer's `bos_id` and `eos_id` in the right places (remember that sentencepiece won't tokenize them from text, they need to be inserted programmatically).\n", + " * The slots have a type, currently supported types are `Modality.Text` and `Modality.TextLiteral(value1, value2, ...)` that allows to restrict the set of slots values." + ] + }, + { + "cell_type": "markdown", + "id": "8cbdca6c-6c0f-42a9-a4a7-b936684c6e12", + "metadata": {}, + "source": [ + "## Defining your own prompt formatter" + ] + }, + { + "cell_type": "markdown", + "id": "25a9b6d2-d004-4f7f-8b24-4fd6d4eae244", + "metadata": {}, + "source": [ + "Generally you can follow the definition of existing prompt formatters to define your own. \n", + "We have several prompt formats implemented for Llama, Gemma, Phi, etc. \n", + "\n", + "We'll define a custom simple prompt format that has no system prompt below as an illustration:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "b69f6532-24d8-4419-b1da-42184c3d72de", + "metadata": {}, + "outputs": [], + "source": [ + "from nemo.collections.common.prompts.formatter import PromptFormatter, Modality\n", + "\n", + "class MyPrompt(PromptFormatter):\n", + " NAME = \"myprompt\"\n", + " OUTPUT_ROLE = \"assistant\"\n", + " TEMPLATE = {\n", + " \"user\": {\n", + " \"template\": \"User: |message|\\n\",\n", + " \"slots\": {\"message\": Modality.Text},\n", + " },\n", + " \"assistant\": {\n", + " \"template\": \"Assistant: |message|\\n\",\n", + " \"slots\": {\"message\": Modality.Text},\n", + " },\n", + " }" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "a97c6589-1303-446c-952f-d2b4007ca7e9", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "input_ids -- User: Do you know something about electronics? Assistant: Sure, ask away. User: How to build my own audio amplifier? Assistant: In order to build your own audio amplifier, start with ... \n", + "\n", + "context_ids -- User: Do you know something about electronics? Assistant: Sure, ask away. User: How to build my own audio amplifier? \n", + "\n", + "answer_ids -- Assistant: In order to build your own audio amplifier, start with ... \n", + "\n" + ] + } + ], + "source": [ + "my_prompt_cls = PromptFormatter.resolve(\"myprompt\") # it is auto-registered\n", + "my_prompt = my_prompt_cls(tokenizer)\n", + "display(my_prompt.encode_dialog(chat))" + ] + }, + { + "cell_type": "markdown", + "id": "30f9c96a-6cf8-4cd3-b0e8-6b461c86100f", + "metadata": {}, + "source": [ + "## Applying prompt formatter to multimodal data\n", + "\n", + "We refer the reader to our other tutorial, [Multimodal Lhotse Dataloading](./Multimodal Lhotse Dataloading.ipynb), where this is discussed in detail." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 9c264b797d7f7b7222fe83bef4e73e68a357d700 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20=C5=BBelasko?= Date: Thu, 12 Dec 2024 13:07:34 -0500 Subject: [PATCH 025/128] Sync validation metrics for ASRModel (#11533) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Sync validation metrics for ASRModel Signed-off-by: Piotr Żelasko * support sync for single-dataloader case Signed-off-by: Piotr Żelasko --------- Signed-off-by: Piotr Żelasko --- nemo/collections/asr/models/asr_model.py | 2 +- nemo/core/classes/modelPT.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/nemo/collections/asr/models/asr_model.py b/nemo/collections/asr/models/asr_model.py index a54a37fe5371..ca0634dfaa69 100644 --- a/nemo/collections/asr/models/asr_model.py +++ b/nemo/collections/asr/models/asr_model.py @@ -201,7 +201,7 @@ def on_validation_epoch_end(self) -> Optional[dict[str, dict[str, torch.Tensor]] EncDecRNNTModel.decoding.decoding is the inference class with CUDA graphs. """ WithOptionalCudaGraphs.disable_cuda_graphs_recursive(self, attribute_path="decoding.decoding") - return super().on_validation_epoch_end() + return super().on_validation_epoch_end(sync_metrics=True) def on_test_epoch_start(self) -> None: """ diff --git a/nemo/core/classes/modelPT.py b/nemo/core/classes/modelPT.py index 1e7ef0c3a9b5..24b2b20b81be 100644 --- a/nemo/core/classes/modelPT.py +++ b/nemo/core/classes/modelPT.py @@ -950,7 +950,7 @@ def test_dataloader(self): return self._test_dl - def on_validation_epoch_end(self) -> Optional[Dict[str, Dict[str, torch.Tensor]]]: + def on_validation_epoch_end(self, sync_metrics: bool = False) -> Optional[Dict[str, Dict[str, torch.Tensor]]]: """ Default DataLoader for Validation set which automatically supports multiple data loaders via `multi_validation_epoch_end`. @@ -980,7 +980,7 @@ def on_validation_epoch_end(self) -> Optional[Dict[str, Dict[str, torch.Tensor]] output_dict = self.multi_validation_epoch_end(self.validation_step_outputs, dataloader_idx=0) if output_dict is not None and 'log' in output_dict: - self.log_dict(output_dict.pop('log'), on_epoch=True) + self.log_dict(output_dict.pop('log'), on_epoch=True, sync_dist=sync_metrics) self.validation_step_outputs.clear() # free memory return output_dict @@ -1041,7 +1041,7 @@ def on_validation_epoch_end(self) -> Optional[Dict[str, Dict[str, torch.Tensor]] self.validation_step_outputs[dataloader_idx].clear() # free memory if 'log' in output_dict: - self.log_dict(output_dict.pop('log'), on_epoch=True) + self.log_dict(output_dict.pop('log'), on_epoch=True, sync_dist=sync_metrics) # return everything else return output_dict From 4972cc3d8418b2230436000973841d6d31c357ed Mon Sep 17 00:00:00 2001 From: Onur Yilmaz <35306097+oyilmaz-nvidia@users.noreply.github.com> Date: Thu, 12 Dec 2024 13:18:47 -0500 Subject: [PATCH 026/128] NeMo 2.0 In-framework deployment support (#11523) * nemo 2 support Signed-off-by: Onur Yilmaz * Remove unwanted params in DDP init in Megatron Parallel Signed-off-by: Hemil Desai * nemo2 working with query Signed-off-by: Onur Yilmaz * Apply isort and black reformatting Signed-off-by: oyilmaz-nvidia * multigpu deployment with nemo2 works Signed-off-by: Onur Yilmaz * Apply isort and black reformatting Signed-off-by: oyilmaz-nvidia * add max output lenght Signed-off-by: Onur Yilmaz * Remove prints Signed-off-by: Onur Yilmaz * Fix merge conflicts Signed-off-by: Onur Yilmaz * readded this file Signed-off-by: Onur Yilmaz --------- Signed-off-by: Onur Yilmaz Signed-off-by: Hemil Desai Signed-off-by: oyilmaz-nvidia Co-authored-by: Hemil Desai Co-authored-by: oyilmaz-nvidia --- docs/source/tts/checkpoints.rst | 2 +- nemo/deploy/nlp/__init__.py | 2 +- nemo/deploy/nlp/megatronllm_deployable.py | 171 ++++++++++++++++-- nemo/deploy/nlp/query_llm.py | 26 ++- nemo/deploy/utils.py | 19 ++ .../deploy/nlp/deploy_inframework_triton.py | 69 ++++--- scripts/deploy/nlp/query_inframework.py | 6 +- 7 files changed, 245 insertions(+), 50 deletions(-) diff --git a/docs/source/tts/checkpoints.rst b/docs/source/tts/checkpoints.rst index 0b59c0facbe2..acbc6ee1a7aa 100644 --- a/docs/source/tts/checkpoints.rst +++ b/docs/source/tts/checkpoints.rst @@ -158,4 +158,4 @@ Codec models .. csv-table:: :file: data/models_codec.csv :align: left - :header-rows: 1 + :header-rows: 1 \ No newline at end of file diff --git a/nemo/deploy/nlp/__init__.py b/nemo/deploy/nlp/__init__.py index 633544e300ed..cd9ef54a6035 100644 --- a/nemo/deploy/nlp/__init__.py +++ b/nemo/deploy/nlp/__init__.py @@ -12,5 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. -from nemo.deploy.nlp.megatronllm_deployable import MegatronLLMDeployable +from nemo.deploy.nlp.megatronllm_deployable import MegatronLLMDeploy, MegatronLLMDeployable from nemo.deploy.nlp.query_llm import NemoQueryLLM, NemoQueryLLMPyTorch diff --git a/nemo/deploy/nlp/megatronllm_deployable.py b/nemo/deploy/nlp/megatronllm_deployable.py index 0ce5991cdc95..703ad0742a17 100644 --- a/nemo/deploy/nlp/megatronllm_deployable.py +++ b/nemo/deploy/nlp/megatronllm_deployable.py @@ -19,9 +19,13 @@ import numpy as np import torch +import torch.distributed import wrapt -from lightning.pytorch.trainer.trainer import Trainer +from megatron.core.inference.common_inference_params import CommonInferenceParams +from pytorch_lightning.trainer.trainer import Trainer +import nemo.lightning as nl +from nemo.collections.llm import inference from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel from nemo.collections.nlp.modules.common.text_generation_utils import ( OutputType, @@ -31,7 +35,7 @@ from nemo.collections.nlp.modules.common.transformer.text_generation import LengthParam, SamplingParam from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy from nemo.deploy import ITritonDeployable -from nemo.deploy.utils import cast_output, str_ndarray2list +from nemo.deploy.utils import NEMO2, cast_output, nemo_checkpoint_version, str_ndarray2list try: from megatron.core.dist_checkpointing.validation import StrictHandling @@ -99,6 +103,152 @@ def to_long_tensor(self): return torch.tensor([self], dtype=torch.long, device='cuda') +class MegatronLLMDeploy: + + @staticmethod + def get_deployable( + nemo_checkpoint_filepath: str = None, + num_devices: int = 1, + num_nodes: int = 1, + tensor_model_parallel_size: int = 1, + pipeline_model_parallel_size: int = 1, + context_parallel_size: int = 1, + ): + + if nemo_checkpoint_version(nemo_checkpoint_filepath) == NEMO2: + return MegatronLLMDeployableNemo2( + nemo_checkpoint_filepath=nemo_checkpoint_filepath, + num_devices=num_devices, + num_nodes=num_nodes, + tensor_model_parallel_size=tensor_model_parallel_size, + pipeline_model_parallel_size=pipeline_model_parallel_size, + context_parallel_size=context_parallel_size, + ) + else: + return MegatronLLMDeployable( + nemo_checkpoint_filepath=nemo_checkpoint_filepath, + num_devices=num_devices, + num_nodes=num_nodes, + ) + + +class MegatronLLMDeployableNemo2(ITritonDeployable): + """Triton inference server compatible deploy class for a .nemo model file""" + + def __init__( + self, + nemo_checkpoint_filepath: str = None, + num_devices: int = 1, + num_nodes: int = 1, + tensor_model_parallel_size: int = 1, + pipeline_model_parallel_size: int = 1, + context_parallel_size: int = 1, + params_dtype: torch.dtype = torch.bfloat16, + inference_batch_times_seqlen_threshold: int = 1000, + ): + self.nemo_checkpoint_filepath = nemo_checkpoint_filepath + + strategy = nl.MegatronStrategy( + tensor_model_parallel_size=tensor_model_parallel_size, + pipeline_model_parallel_size=pipeline_model_parallel_size, + context_parallel_size=context_parallel_size, + sequence_parallel=False, + setup_optimizers=False, + store_optimizer_states=False, + ) + + trainer = nl.Trainer( + accelerator="gpu", + devices=num_devices, + num_nodes=num_nodes, + strategy=strategy, + plugins=nl.MegatronMixedPrecision( + precision="bf16-mixed", + params_dtype=torch.bfloat16, + pipeline_dtype=torch.bfloat16, + autocast_enabled=False, + grad_reduce_in_fp32=False, + ), + ) + + self.inference_wrapped_model, self.mcore_tokenizer = inference.setup_model_and_tokenizer( + path=Path(nemo_checkpoint_filepath), + trainer=trainer, + params_dtype=params_dtype, + inference_batch_times_seqlen_threshold=inference_batch_times_seqlen_threshold, + ) + + @property + def get_triton_input(self): + inputs = ( + Tensor(name="prompts", shape=(-1,), dtype=bytes), + Tensor(name="max_length", shape=(-1,), dtype=np.int_, optional=True), + Tensor(name="max_batch_size", shape=(-1,), dtype=np.int_, optional=True), + Tensor(name="top_k", shape=(-1,), dtype=np.int_, optional=True), + Tensor(name="top_p", shape=(-1,), dtype=np.single, optional=True), + Tensor(name="temperature", shape=(-1,), dtype=np.single, optional=True), + Tensor(name="random_seed", shape=(-1,), dtype=np.int_, optional=True), + Tensor(name="max_length", shape=(-1,), dtype=np.int_, optional=True), + Tensor(name="compute_logprob", shape=(-1,), dtype=np.bool_, optional=True), + ) + return inputs + + @property + def get_triton_output(self): + return ( + Tensor(name="sentences", shape=(-1,), dtype=bytes), + Tensor(name="log_probs", shape=(-1,), dtype=np.single), + ) + + @batch + def triton_infer_fn(self, **inputs: np.ndarray): + output_infer = {} + try: + prompts = str_ndarray2list(inputs.pop("prompts")) + max_batch_size = inputs.pop("max_batch_size")[0][0] if "max_batch_size" in inputs else 32 + random_seed = inputs.pop("random_seed")[0][0] if "random_seed" in inputs else None + temperature = inputs.pop("temperature")[0][0] if "temperature" in inputs else 1.0 + top_k = inputs.pop("top_k")[0][0] if "top_k" in inputs else 1 + top_p = inputs.pop("top_p")[0][0] if "top_k" in inputs else 0.0 + num_tokens_to_generate = inputs.pop("max_length")[0][0] if "max_length" in inputs else 256 + log_probs = inputs.pop("compute_logprob")[0][0] if "compute_logprob" in inputs else False + text_only = True + + inference_params = CommonInferenceParams( + temperature=temperature, + top_k=top_k, + top_p=top_p, + num_tokens_to_generate=num_tokens_to_generate, + return_log_probs=log_probs, + ) + + results = inference.generate( + model=self.inference_wrapped_model, + tokenizer=self.mcore_tokenizer, + prompts=prompts, + max_batch_size=max_batch_size, + random_seed=random_seed, + inference_params=inference_params, + ) + + output_texts = [r.generated_text if text_only else r for r in results] + output_infer = {"sentences": cast_output(output_texts, np.bytes_)} + if log_probs: + output_log_probs = [] + for r in results: + lp = r.generated_log_probs.cpu().detach().numpy() + if len(lp) == 0: + output_log_probs.append([0]) + else: + output_log_probs.append(lp) + output_infer["log_probs"] = np.array(output_log_probs) + except Exception as error: + err_msg = "An error occurred: {0}".format(str(error)) + output_infer["sentences"] = cast_output([err_msg], np.bytes_) + + return output_infer + + class MegatronLLMDeployable(ITritonDeployable): """Triton inference server compatible deploy class for a .nemo model file""" @@ -127,8 +277,6 @@ def __init__( self._load_from_nemo_checkpoint(nemo_checkpoint_filepath, num_devices, num_nodes) self.model.eval() - # helper threads spawned by torch.multiprocessing should loop inside this helper function - self._helper_thread_evaluation_loop() def _load_from_nemo_checkpoint(self, nemo_checkpoint_filepath: str, num_devices: int, num_nodes: int): if Path(nemo_checkpoint_filepath).exists(): @@ -167,15 +315,6 @@ def _load_from_nemo_checkpoint(self, nemo_checkpoint_filepath: str, num_devices: nemo_checkpoint_filepath, trainer=trainer, override_config_path=custom_config ) - def _helper_thread_evaluation_loop(self): - # only deploy the server on main thread, other threads enter this evaluation loop - if torch.distributed.is_initialized() and torch.distributed.get_rank() != 0: - while True: - wait_value = ServerSync.WAIT.to_long_tensor() - torch.distributed.broadcast(wait_value, 0) - if wait_value.item() == ServerSync.SIGNAL: - self.model.generate(inputs=[""], length_params=None) - _INPUT_PARAMETER_FIELDS = { "prompts": (-1, bytes, False), } @@ -199,12 +338,6 @@ def get_triton_input(self): Tensor(name=name, shape=(shape,), dtype=dtype, optional=optional) for name, (shape, dtype, optional) in self._INPUT_PARAMETER_FIELDS.items() ) - ''' - in theory, would like to use typedict2tensor() function to generate Tensors, but it purposely ignores 1D arrays - asked JakubK why on 2024-04-26, but he doesn't know who owns the code - sampling_parameters = typedict2tensor(SamplingParam) - length_parameters = typedict2tensor(LengthParam) - ''' default_sampling_params: SamplingParam = get_default_sampling_params() sampling_parameters = tuple( Tensor( diff --git a/nemo/deploy/nlp/query_llm.py b/nemo/deploy/nlp/query_llm.py index e1d21bb54b76..a88d3b610cda 100644 --- a/nemo/deploy/nlp/query_llm.py +++ b/nemo/deploy/nlp/query_llm.py @@ -123,7 +123,31 @@ def query_llm( with ModelClient(self.url, self.model_name, init_timeout_s=init_timeout) as client: result_dict = client.infer_batch(**inputs) - return result_dict + output_type = client.model_config.outputs[0].dtype + + log_probs_output = None + if "log_probs" in result_dict.keys(): + log_probs_output = result_dict["log_probs"] + + if output_type == np.bytes_: + if "sentences" in result_dict.keys(): + output = result_dict["sentences"] + else: + return "Unknown output keyword." + + sentences = np.char.decode(output.astype("bytes"), "utf-8") + openai_response = { + "id": f"cmpl-{int(time.time())}", + "object": "text_completion", + "created": int(time.time()), + "model": self.model_name, + "choices": [{"text": str(sentences)}], + } + if log_probs_output is not None: + openai_response["log_probs"] = log_probs_output + return openai_response + else: + return result_dict["sentences"] class NemoQueryLLM(NemoQueryLLMBase): diff --git a/nemo/deploy/utils.py b/nemo/deploy/utils.py index 650770e77152..29533cbe148f 100644 --- a/nemo/deploy/utils.py +++ b/nemo/deploy/utils.py @@ -12,13 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os import typing +from pathlib import Path import numpy as np import torch from PIL import Image from pytriton.model_config import Tensor +from nemo.export.tarutils import TarPath + +NEMO2 = "NEMO 2.0" +NEMO1 = "NEMO 1.0" + def typedict2tensor( typedict_class, @@ -53,6 +60,18 @@ def _get_tensor_params(type_): ) +def nemo_checkpoint_version(path: str) -> str: + if os.path.isdir(path): + path = Path(path) + else: + path = TarPath(path) + + if (path / "context").exists() and (path / "weights").exists(): + return NEMO2 + else: + return NEMO1 + + def str_list2numpy(str_list: typing.List[str]) -> np.ndarray: str_ndarray = np.array(str_list)[..., np.newaxis] return np.char.encode(str_ndarray, "utf-8") diff --git a/scripts/deploy/nlp/deploy_inframework_triton.py b/scripts/deploy/nlp/deploy_inframework_triton.py index b698e4cbacfd..c5b391ab2c1e 100755 --- a/scripts/deploy/nlp/deploy_inframework_triton.py +++ b/scripts/deploy/nlp/deploy_inframework_triton.py @@ -15,6 +15,7 @@ import argparse import logging import sys +import torch from nemo.deploy import DeployPyTriton @@ -22,7 +23,7 @@ megatron_llm_supported = True try: - from nemo.deploy.nlp import MegatronLLMDeployable + from nemo.deploy.nlp import MegatronLLMDeploy except Exception as e: LOGGER.warning(f"Cannot import MegatronLLMDeployable, it will not be available. {type(e).__name__}: {e}") megatron_llm_supported = False @@ -43,6 +44,10 @@ def get_args(argv): "-tha", "--triton_http_address", default="0.0.0.0", type=str, help="HTTP address for the Triton server" ) parser.add_argument("-ng", "--num_gpus", default=1, type=int, help="Number of GPUs for the deployment") + parser.add_argument("-nn", "--num_nodes", default=1, type=int, help="Number of GPUs for the deployment") + parser.add_argument("-tps", "--tensor_parallelism_size", default=1, type=int, help="Tensor parallelism size") + parser.add_argument("-pps", "--pipeline_parallelism_size", default=1, type=int, help="Pipeline parallelism size") + parser.add_argument("-cps", "--context_parallel_size", default=1, type=int, help="Pipeline parallelism size") parser.add_argument("-mbs", "--max_batch_size", default=8, type=int, help="Max batch size of the model") parser.add_argument("-dm", "--debug_mode", default=False, action='store_true', help="Enable debug mode") args = parser.parse_args(argv) @@ -53,7 +58,14 @@ def get_nemo_deployable(args): if args.nemo_checkpoint is None: raise ValueError("In-Framework deployment requires a .nemo checkpoint") - return MegatronLLMDeployable(args.nemo_checkpoint, args.num_gpus) + return MegatronLLMDeploy.get_deployable( + nemo_checkpoint_filepath=args.nemo_checkpoint, + num_devices=args.num_gpus, + num_nodes=args.num_nodes, + tensor_model_parallel_size=args.tensor_parallelism_size, + pipeline_model_parallel_size=args.pipeline_parallelism_size, + context_parallel_size=args.context_parallel_size, + ) def nemo_deploy(argv): @@ -72,31 +84,34 @@ def nemo_deploy(argv): raise ValueError("MegatronLLMDeployable is not supported in this environment.") triton_deployable = get_nemo_deployable(args) - try: - nm = DeployPyTriton( - model=triton_deployable, - triton_model_name=args.triton_model_name, - triton_model_version=args.triton_model_version, - max_batch_size=args.max_batch_size, - port=args.triton_port, - address=args.triton_http_address, - ) - - LOGGER.info("Triton deploy function will be called.") - nm.deploy() - except Exception as error: - LOGGER.error("Error message has occurred during deploy function. Error message: " + str(error)) - return - - try: - LOGGER.info("Model serving on Triton is will be started.") - nm.serve() - except Exception as error: - LOGGER.error("Error message has occurred during deploy function. Error message: " + str(error)) - return - - LOGGER.info("Model serving will be stopped.") - nm.stop() + if torch.distributed.is_initialized() and torch.distributed.get_rank() == 0: + try: + nm = DeployPyTriton( + model=triton_deployable, + triton_model_name=args.triton_model_name, + triton_model_version=args.triton_model_version, + max_batch_size=args.max_batch_size, + port=args.triton_port, + address=args.triton_http_address, + ) + + LOGGER.info("Triton deploy function will be called.") + nm.deploy() + except Exception as error: + LOGGER.error("Error message has occurred during deploy function. Error message: " + str(error)) + return + + try: + LOGGER.info("Model serving on Triton is will be started.") + nm.serve() + except Exception as error: + LOGGER.error("Error message has occurred during deploy function. Error message: " + str(error)) + return + + LOGGER.info("Model serving will be stopped.") + nm.stop() + + torch.distributed.barrier() if __name__ == '__main__': diff --git a/scripts/deploy/nlp/query_inframework.py b/scripts/deploy/nlp/query_inframework.py index a62e09fa071d..9af4c034210e 100644 --- a/scripts/deploy/nlp/query_inframework.py +++ b/scripts/deploy/nlp/query_inframework.py @@ -33,6 +33,7 @@ def get_args(argv): parser.add_argument("-tpp", "--top_p", default=0.0, type=float, help="top_p") parser.add_argument("-t", "--temperature", default=1.0, type=float, help="temperature") parser.add_argument("-it", "--init_timeout", default=60.0, type=float, help="init timeout for the triton server") + parser.add_argument("-clp", "--compute_logprob", default=None, action='store_true', help="Returns log_probs") args = parser.parse_args(argv) return args @@ -46,6 +47,7 @@ def query_llm( top_k=1, top_p=0.0, temperature=1.0, + compute_logprob=None, init_timeout=60.0, ): nemo_query = NemoQueryLLMPyTorch(url, model_name) @@ -55,6 +57,7 @@ def query_llm( top_k=top_k, top_p=top_p, temperature=temperature, + compute_logprob=compute_logprob, init_timeout=init_timeout, ) @@ -74,9 +77,10 @@ def query(argv): top_k=args.top_k, top_p=args.top_p, temperature=args.temperature, + compute_logprob=args.compute_logprob, init_timeout=args.init_timeout, ) - print(outputs["sentences"][0][0]) + print(outputs) if __name__ == '__main__': From 73181b44c04d871df116620e1720f0d9457596d9 Mon Sep 17 00:00:00 2001 From: Alexandros Koumparoulis <153118171+akoumpa@users.noreply.github.com> Date: Thu, 12 Dec 2024 10:31:44 -0800 Subject: [PATCH 027/128] Add SFT/PEFT HF tests (#11519) * Add SFT/PEFT HF tests Signed-off-by: Alexandros Koumparoulis * move hf examples to examples dir Signed-off-by: Alexandros Koumparoulis * bot Signed-off-by: Alexandros Koumparoulis * Apply isort and black reformatting Signed-off-by: akoumpa * fix Signed-off-by: Alexandros Koumparoulis * fix Signed-off-by: Alexandros Koumparoulis * use mini_squad Signed-off-by: Alexandros Koumparoulis * use mini_squad Signed-off-by: Alexandros Koumparoulis * Apply isort and black reformatting Signed-off-by: akoumpa * add 2gpu DDP Signed-off-by: Alexandros Koumparoulis * refactor Signed-off-by: Alexandros Koumparoulis * use labels as passed by the user Signed-off-by: Alexandros Koumparoulis * update samples/ tests Signed-off-by: Alexandros Koumparoulis * rm unused imports Signed-off-by: Alexandros Koumparoulis * Apply isort and black reformatting Signed-off-by: akoumpa * fix Signed-off-by: Alexandros Koumparoulis * Add tests with subset split names, e.g. train[:100] Signed-off-by: Alexandros Koumparoulis * fix Signed-off-by: Alexandros Koumparoulis * Apply isort and black reformatting Signed-off-by: akoumpa * add --disable-ckpt Signed-off-by: Alexandros Koumparoulis * use self-hosted-azure-gpus-1 for single-gpu test Signed-off-by: Alexandros Koumparoulis * Add TRANSFORMERS_OFFLINE=1 to hf tests Signed-off-by: Alexandros Koumparoulis --------- Signed-off-by: Alexandros Koumparoulis Signed-off-by: akoumpa Co-authored-by: akoumpa --- .github/workflows/cicd-main.yml | 53 ++++++- examples/llm/peft/hf.py | 17 ++- nemo/collections/llm/gpt/data/hf_dataset.py | 27 ++-- .../gpt/model/hf_auto_model_for_causal_lm.py | 64 +++++---- .../llm/gpt/data/test_hf_datamodule.py | 82 +++++++---- tests/collections/llm/hf/peft.py | 110 +++++++++++++++ tests/collections/llm/hf/sft.py | 131 ++++++++++++++++++ 7 files changed, 406 insertions(+), 78 deletions(-) create mode 100644 tests/collections/llm/hf/peft.py create mode 100755 tests/collections/llm/hf/sft.py diff --git a/.github/workflows/cicd-main.yml b/.github/workflows/cicd-main.yml index 8b6d2c0251bd..37d8b903afa4 100644 --- a/.github/workflows/cicd-main.yml +++ b/.github/workflows/cicd-main.yml @@ -3574,17 +3574,60 @@ jobs: inference.outfile_path=/tmp/nlp_mcore_t5_lora_tuning_tp2/out.jsonl + L2_HF_Transformer_PEFT: + needs: [ cicd-test-container-setup ] + uses: ./.github/workflows/_test_template.yml + if: contains(fromJSON(needs.cicd-test-container-setup.outputs.test_to_run), 'L2_HF_Transformer_PEFT') || needs.cicd-test-container-setup.outputs.all == 'true' + with: + RUNNER: self-hosted-azure-gpus-1 + SCRIPT: | + TRANSFORMERS_OFFLINE=1 python tests/collections/llm/hf/peft.py --model /home/TestData/nlp/hf_gemma/hf_gemma_2b --max-steps 10 --disable-ckpt + AFTER_SCRIPT: | + rm -rf nemo_experiments + + L2_HF_Transformer_PEFT_2gpu: + needs: [ cicd-test-container-setup ] + uses: ./.github/workflows/_test_template.yml + if: contains(fromJSON(needs.cicd-test-container-setup.outputs.test_to_run), 'L2_HF_Transformer_PEFT_2gpu') || needs.cicd-test-container-setup.outputs.all == 'true' + with: + RUNNER: self-hosted-azure + SCRIPT: | + TRANSFORMERS_OFFLINE=1 python tests/collections/llm/hf/peft.py --model /home/TestData/nlp/hf_gemma/hf_gemma_2b --max-steps 10 --devices 2 --strategy ddp --disable-ckpt + AFTER_SCRIPT: | + rm -rf nemo_experiments + + L2_HF_Transformer_SFT_2gpu: + needs: [ cicd-test-container-setup ] + uses: ./.github/workflows/_test_template.yml + if: contains(fromJSON(needs.cicd-test-container-setup.outputs.test_to_run), 'L2_HF_Transformer_SFT_2gpu') || needs.cicd-test-container-setup.outputs.all == 'true' + with: + RUNNER: self-hosted-azure + SCRIPT: | + TRANSFORMERS_OFFLINE=1 python tests/collections/llm/hf/sft.py --model /home/TestData/nlp/hf_gemma/hf_gemma_2b --max-steps 10 --devices 2 --strategy ddp + AFTER_SCRIPT: | + rm -rf nemo_experiments + + L2_HF_Transformer_SFT: + needs: [ cicd-test-container-setup ] + uses: ./.github/workflows/_test_template.yml + if: contains(fromJSON(needs.cicd-test-container-setup.outputs.test_to_run), 'L2_HF_Transformer_SFT') || needs.cicd-test-container-setup.outputs.all == 'true' + with: + RUNNER: self-hosted-azure-gpus-1 + SCRIPT: | + TRANSFORMERS_OFFLINE=1 python tests/collections/llm/hf/sft.py --model /home/TestData/nlp/hf_gemma/hf_gemma_2b --max-steps 10 + AFTER_SCRIPT: | + rm -rf nemo_experiments + L2_HF_Transformer_SFT_TE_Acceleration: needs: [ cicd-test-container-setup ] uses: ./.github/workflows/_test_template.yml if: contains(fromJSON(needs.cicd-test-container-setup.outputs.test_to_run), 'L2_HF_Transformer_SFT_TE_Acceleration') || needs.cicd-test-container-setup.outputs.all == 'true' with: - RUNNER: self-hosted-azure + RUNNER: self-hosted-azure-gpus-1 SCRIPT: | - python examples/llm/sft/hf.py --model /home/TestData/nlp/hf_gemma/hf_gemma_2b --model-accelerator te + TRANSFORMERS_OFFLINE=1 python tests/collections/llm/hf/sft.py --model /home/TestData/nlp/hf_gemma/hf_gemma_2b --model-accelerator te --max-steps 10 AFTER_SCRIPT: | rm -rf nemo_experiments - # L2: Megatron Mock Data Generation L2_Megatron_Mock_Data_Generation_MockGPTDataset: @@ -4685,6 +4728,10 @@ jobs: - L2_NeMo_2_llama3_pretraining_recipe - L2_NeMo_2_llama3_fault_tolerance_plugin - L2_NeMo_2_llama3_straggler_detection + - L2_HF_Transformer_PEFT + - L2_HF_Transformer_PEFT_2gpu + - L2_HF_Transformer_SFT + - L2_HF_Transformer_SFT_2gpu - L2_HF_Transformer_SFT_TE_Acceleration - L2_NeMo_2_SSM_Pretraining - L2_NeMo_2_SSM_Finetuning diff --git a/examples/llm/peft/hf.py b/examples/llm/peft/hf.py index c24c5958b388..3a0930732e87 100644 --- a/examples/llm/peft/hf.py +++ b/examples/llm/peft/hf.py @@ -39,14 +39,17 @@ def formatting_prompts_func(examples): output = output[0] text = alpaca_prompt.format(instruction, input, output) + EOS_TOKEN ans = tokenizer(text) - tokens = ans['input_ids'] - return { - 'tokens': tokens, - 'labels': tokens[1:] + [tokens[-1]], - } + ans['labels'] = ans['input_ids'] + return ans - datamodule = llm.HFDatasetDataModule("rajpurkar/squad", split="train", pad_token_id=tokenizer.eos_token_id) - datamodule.map(formatting_prompts_func, batched=False, batch_size=2) + tokenizer = getattr(tokenizer, 'tokenizer', tokenizer) + datamodule = llm.HFDatasetDataModule("rajpurkar/squad", split="train[:100]", pad_token_id=tokenizer.eos_token_id) + datamodule.map( + formatting_prompts_func, + batched=False, + batch_size=2, + remove_columns=["id", "title", "context", "question", 'answers'], + ) return datamodule diff --git a/nemo/collections/llm/gpt/data/hf_dataset.py b/nemo/collections/llm/gpt/data/hf_dataset.py index 73b6444a6e9c..7880e26cf6b1 100644 --- a/nemo/collections/llm/gpt/data/hf_dataset.py +++ b/nemo/collections/llm/gpt/data/hf_dataset.py @@ -14,13 +14,19 @@ import lightning.pytorch as pl import torch -from datasets import load_dataset +from datasets import Dataset, DatasetDict, load_dataset from torch.utils.data import DataLoader from nemo.lightning.pytorch.plugins import MegatronDataSampler from nemo.utils import logging +def clean_split(name): + if '[' in name: + return name.split('[')[0] + return name + + def make_dataset_splits(dataset, split, split_aliases): """ Given a dataset (e.g. from datasets.load_dataset or datasets.Dataset.from_dict) it @@ -51,19 +57,18 @@ def make_dataset_splits(dataset, split, split_aliases): > "val": Dataset .. (with 10570 rows), > } """ - from datasets import Dataset, DatasetDict - - split_names = ['train', 'test', 'val'] - dataset_splits = {_split: None for _split in split_names} + valid_split_names = ['train', 'test', 'val'] + dataset_splits = {_split: None for _split in valid_split_names} alias_to_split = {} for split_name, _split_aliases in split_aliases.items(): - assert split_name in split_names + assert split_name in valid_split_names for alias in _split_aliases: alias_to_split[alias] = split_name if isinstance(dataset, Dataset): assert isinstance(split, str), "Expected split to be a string, but got " + str(type(split)) + split = clean_split(split) dataset_splits[split] = dataset elif isinstance(dataset, DatasetDict): dataset_split_names = dataset.keys() @@ -75,7 +80,7 @@ def make_dataset_splits(dataset, split, split_aliases): elif isinstance(split, list): logging.info(f"Loaded HF dataset will use " + str(split) + " splits.") assert isinstance(dataset, list) - for i, alias_split_name in enumerate(split): + for i, alias_split_name in enumerate(map(clean_split, split)): split_name = alias_to_split[alias_split_name] assert dataset_splits[split_name] is None dataset_splits[split_name] = dataset[i] @@ -93,6 +98,7 @@ def make_dataset_splits(dataset, split, split_aliases): else: raise ValueError("Expected split name to be None, str or a list") + assert set(valid_split_names) == set(dataset_splits.keys()), dataset_splits.keys() num_init_splits = sum(map(lambda x: x is not None, dataset_splits.values())) assert num_init_splits > 0, f"Expected at least one split to have been initialized {num_init_splits}" return dataset_splits @@ -133,8 +139,6 @@ def __init__( ) -> None: super().__init__() assert pad_token_id is not None - from datasets import Dataset, DatasetDict - # A dataset usually will have several splits (e.g. train, val, test, etc). # We map synonym names to canonical names (train, test, val). # A synonym can be a prefix/suffixed word e.g. train <> training. @@ -172,8 +176,6 @@ def __init__( @staticmethod def from_dict(dataset_dict, split, **kwargs): - from datasets import Dataset - dataset = Dataset.from_dict(dataset_dict) return HFDatasetDataModule(path_or_dataset=dataset, split=split, **kwargs) @@ -191,7 +193,6 @@ def pad_within_micro(batch, pad_token_id): max_len = max(map(len, batch)) return [item + [pad_token_id] * (max_len - len(item)) for item in batch] - keys = list(filter(lambda x: x in batch[0], ['tokens', 'labels', 'position_ids', 'loss_mask'])) return { key: batchify( torch.LongTensor( @@ -201,7 +202,7 @@ def pad_within_micro(batch, pad_token_id): ) ) ) - for key in keys + for key in batch[0].keys() } def setup(self, stage: str): diff --git a/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py b/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py index 481dd9a0e187..a51bbffdd6b6 100644 --- a/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py +++ b/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py @@ -31,6 +31,19 @@ def masked_cross_entropy(logits, targets, mask=None): return F.cross_entropy(logits, targets) +def align_labels(logits, labels): + logits = logits.float() + n_cls = logits.shape[-1] + if logits.shape[-2] == labels.shape[-1]: + logits = logits[..., :-1, :].contiguous() + labels = labels[..., 1:].contiguous() + elif logits.shape[-2] == labels.shape[-1] + 1: + logits = logits[..., :-1, :].contiguous() + else: + raise ValueError("Mismatched labels and logits shapes (" + str(labels.shape) + " " + str(logits.shape)) + return logits.view(-1, n_cls), labels.view(-1) + + class HFAutoModelForCausalLM(pl.LightningModule, io.IOMixin, fn.FNMixin): def __init__( self, @@ -91,41 +104,34 @@ def configure_model(self): self.model.train() - def forward(self, input_ids, attention_mask=None, labels=None, loss_mask=None): - outputs = self.model( - input_ids=input_ids.to(self.model.device), - attention_mask=attention_mask, - ) - labels = labels.to(self.model.device) - if loss_mask is not None: - loss_mask = loss_mask.to(self.model.device).view(-1) - n_cls = outputs.logits.shape[-1] - outputs.loss = self.loss_fn(outputs.logits.view(-1, n_cls), labels.view(-1), loss_mask) - return outputs + def forward(self, batch): + return self.model(**batch) def training_step(self, batch): - tokens = batch['tokens'] - labels = batch['labels'] - loss_mask = batch.get('loss_mask', None) - output = self.forward( - input_ids=tokens, - labels=labels, - loss_mask=loss_mask, - ) - - loss = output.loss + labels = batch.pop('labels').to(self.model.device) + loss_mask = batch.pop('loss_mask', None) + + outputs = self.forward(batch) + + # Prepare for loss calculation + logits, labels = align_labels(outputs.logits.float(), labels) + assert logits.shape[-2] == labels.shape[-1] + + loss = self.loss_fn(logits, labels, loss_mask) self.log('train_log', loss, on_step=True, on_epoch=True, prog_bar=True) return loss + @torch.no_grad def validation_step(self, batch, batch_idx): - tokens = batch['tokens'] - labels = batch['labels'] - output = self.forward( - input_ids=tokens, - labels=labels, - ) - - loss = output.loss + labels = batch.pop('labels').to(self.model.device) + loss_mask = batch.pop('loss_mask', None) + + outputs = self.forward(**batch) + + logits, labels = align_labels(outputs.logits.float(), labels) + assert logits.shape[-2] == labels.shape[-1] + loss = self.loss_fn(logits, labels, loss_mask) + self.log('val_loss', loss, on_step=True, on_epoch=True, prog_bar=True) def save_pretrained(self, path): diff --git a/tests/collections/llm/gpt/data/test_hf_datamodule.py b/tests/collections/llm/gpt/data/test_hf_datamodule.py index 58f7c02e091b..af035d91034d 100644 --- a/tests/collections/llm/gpt/data/test_hf_datamodule.py +++ b/tests/collections/llm/gpt/data/test_hf_datamodule.py @@ -41,6 +41,30 @@ def test_load_single_split(): assert ds.test is None +def test_load_single_split_with_subset(): + ds = llm.HFDatasetDataModule( + path_or_dataset=DATA_PATH, + split='train[:10]', + seq_length=512, + micro_batch_size=2, + global_batch_size=2, + ) + from datasets.arrow_dataset import Dataset + + assert isinstance(ds.dataset_splits, dict) + assert len(ds.dataset_splits) == 3 + assert 'train' in ds.dataset_splits + assert ds.dataset_splits['train'] is not None + assert ds.train is not None + assert isinstance(ds.dataset_splits['train'], Dataset) + assert 'val' in ds.dataset_splits + assert ds.dataset_splits['val'] is None + assert ds.val is None + assert 'test' in ds.dataset_splits + assert ds.dataset_splits['test'] is None + assert ds.test is None + + def test_load_nonexistent_split(): exception_msg = '' expected_msg = '''Unknown split "this_split_name_should_not_exist". Should be one of ['train', 'validation'].''' @@ -84,6 +108,33 @@ def test_load_multiple_split(): assert ds.test is None +def test_load_multiple_split_with_subset(): + ds = llm.HFDatasetDataModule( + path_or_dataset=DATA_PATH, + split=['train[:100]', 'validation'], + seq_length=512, + micro_batch_size=2, + global_batch_size=2, + ) + from datasets.arrow_dataset import Dataset + + assert isinstance(ds.dataset_splits, dict) + assert len(ds.dataset_splits) == 3 + assert 'train' in ds.dataset_splits + assert ds.dataset_splits['train'] is not None + assert ds.train is not None + assert isinstance(ds.dataset_splits['train'], Dataset) + assert isinstance(ds.train, Dataset) + assert 'val' in ds.dataset_splits + assert ds.dataset_splits['val'] is not None + assert ds.val is not None + assert isinstance(ds.dataset_splits['val'], Dataset) + assert isinstance(ds.val, Dataset) + assert 'test' in ds.dataset_splits + assert ds.dataset_splits['test'] is None + assert ds.test is None + + def test_validate_dataset_asset_accessibility_file_does_not_exist(): raised_exception = False try: @@ -99,8 +150,9 @@ def test_validate_dataset_asset_accessibility_file_does_not_exist(): assert raised_exception == True, "Expected to raise a FileNotFoundError" -def test_validate_dataset_asset_accessibility_file_is_none(): # tokenizer, trainer): - raised_exception = False +def test_validate_dataset_asset_accessibility_file_is_none(): + exception_msg = '' + expected_msg = "Expected `path_or_dataset` to be str, Dataset, DatasetDict, but got " try: llm.HFDatasetDataModule( path_or_dataset=None, @@ -109,28 +161,6 @@ def test_validate_dataset_asset_accessibility_file_is_none(): # tokenizer, trai global_batch_size=2, ) except ValueError as e: - raised_exception = ( - str(e) == "Expected `path_or_dataset` to be str, Dataset, DatasetDict, but got " - ) - - assert raised_exception == True, "Expected to raise a ValueError" - - -def test_load_from_dict(): - data = {'text': "Below is an instruction that describes a task, paired with an input that "} + exception_msg = str(e) - datamodule = llm.HFDatasetDataModule.from_dict( - {"text": [data['text'] for _ in range(101)]}, - split='train', - global_batch_size=4, - micro_batch_size=1, - ) - assert datamodule is not None - assert isinstance(datamodule, llm.HFDatasetDataModule) - assert hasattr(datamodule, 'train') - assert datamodule.train is not None - assert len(datamodule.train) == 101 - assert hasattr(datamodule, 'val') - assert datamodule.val is None - assert hasattr(datamodule, 'test') - assert datamodule.test is None + assert exception_msg == expected_msg, exception_msg diff --git a/tests/collections/llm/hf/peft.py b/tests/collections/llm/hf/peft.py new file mode 100644 index 000000000000..018774280946 --- /dev/null +++ b/tests/collections/llm/hf/peft.py @@ -0,0 +1,110 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import fiddle as fdl +from lightning.pytorch.loggers import WandbLogger +from nemo import lightning as nl +from nemo.collections import llm + +DATA_PATH = '/home/TestData/lite/hf_cache/squad/' + + +def make_squad_hf_dataset(data_path, tokenizer): + EOS_TOKEN = tokenizer.eos_token # Must add EOS_TOKEN + + def formatting_prompts_func(examples): + alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. + + ### Instruction: + {} + + ### Input: + {} + + ### Response: + {}""" + instruction = examples["context"] + input = examples["question"] + output = examples["answers"]['text'] + if isinstance(output, list): + output = output[0] + text = alpaca_prompt.format(instruction, input, output) + EOS_TOKEN + ans = tokenizer(text) + ans['labels'] = ans['input_ids'] + return ans + + tokenizer = getattr(tokenizer, 'tokenizer', tokenizer) + datamodule = llm.HFDatasetDataModule(data_path, split="train[:100]", pad_token_id=tokenizer.eos_token_id) + + datamodule.map( + formatting_prompts_func, + batched=False, + batch_size=2, + remove_columns=["id", "title", "context", "question", 'answers'], + ) + + return datamodule + + +if __name__ == '__main__': + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument('--model', default='meta-llama/Llama-3.2-1B') + parser.add_argument('--strategy', type=str, default='auto', choices=['auto', 'ddp', 'fsdp']) + parser.add_argument('--devices', default=1) + parser.add_argument('--accelerator', default='gpu', choices=['gpu']) + parser.add_argument('--max-steps', type=int, default=100) + parser.add_argument('--wandb-project', type=str, default=None) + parser.add_argument('--disable-ckpt', action='store_false') + args = parser.parse_args() + + wandb = None + if args.wandb_project is not None: + model = '_'.join(args.model.split('/')[-2:]) + wandb = WandbLogger( + project=args.wandb_project, + name=f'{model}_dev{args.devices}_strat_{args.strategy}', + ) + grad_clip = 0.5 + if args.strategy == 'fsdp': + # See: https://github.com/Lightning-AI/pytorch-lightning/blob/8ad3e29816a63d8ce5c00ac104b14729a4176f4f/src/lightning/pytorch/plugins/precision/fsdp.py#L81 + grad_clip = None + use_dist_samp = False + tokenizer = llm.HFAutoModelForCausalLM.configure_tokenizer(args.model) + + llm.api.finetune( + model=llm.HFAutoModelForCausalLM(args.model), + data=make_squad_hf_dataset(DATA_PATH, tokenizer), + trainer=nl.Trainer( + devices=args.devices, + max_steps=args.max_steps, + accelerator=args.accelerator, + strategy=args.strategy, + log_every_n_steps=1, + limit_val_batches=0.0, + num_sanity_val_steps=0, + accumulate_grad_batches=10, + gradient_clip_val=grad_clip, + use_distributed_sampler=use_dist_samp, + logger=wandb, + enable_checkpointing=args.disable_ckpt, + ), + optim=fdl.build(llm.adam.pytorch_adam_with_flat_lr(lr=1e-5)), + log=None, + peft=llm.peft.LoRA( + target_modules=['*_proj'], + dim=32, + ), + ) diff --git a/tests/collections/llm/hf/sft.py b/tests/collections/llm/hf/sft.py new file mode 100755 index 000000000000..44b0dabbb2d0 --- /dev/null +++ b/tests/collections/llm/hf/sft.py @@ -0,0 +1,131 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import fiddle as fdl +from lightning.pytorch.loggers import WandbLogger + +from nemo import lightning as nl +from nemo.collections import llm +from nemo.lightning.pytorch.accelerate.transformer_engine import is_te_accelerated + + +DATA_PATH = '/home/TestData/lite/hf_cache/squad/' + + +def make_squad_hf_dataset(data_path, tokenizer): + EOS_TOKEN = tokenizer.eos_token # Must add EOS_TOKEN + + def formatting_prompts_func(examples): + alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. + + ### Instruction: + {} + + ### Input: + {} + + ### Response: + {}""" + instruction = examples["context"] + input = examples["question"] + output = examples["answers"]['text'] + if isinstance(output, list): + output = output[0] + text = alpaca_prompt.format(instruction, input, output) + EOS_TOKEN + ans = tokenizer(text) + ans['labels'] = ans['input_ids'] + return ans + + tokenizer = getattr(tokenizer, 'tokenizer', tokenizer) + datamodule = llm.HFDatasetDataModule(data_path, split="train[:100]", pad_token_id=tokenizer.eos_token_id) + + datamodule.map( + formatting_prompts_func, + batched=False, + batch_size=2, + remove_columns=["id", "title", "context", "question", 'answers'], + ) + + return datamodule + + +if __name__ == '__main__': + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument('--model', default='meta-llama/Llama-3.2-1B') + parser.add_argument('--strategy', type=str, default='auto', choices=['auto', 'ddp', 'fsdp']) + parser.add_argument('--devices', default=1) + parser.add_argument('--accelerator', default='gpu', choices=['gpu']) + parser.add_argument('--model-accelerator', default=None, choices=['te']) + parser.add_argument('--max-steps', type=int, default=100) + parser.add_argument("--fp8-autocast", default=False, action='store_true') + parser.add_argument('--wandb-project', type=str, default=None) + parser.add_argument('--model-save-path', type=str, default=None) + args = parser.parse_args() + + wandb = None + if args.wandb_project is not None: + model = '_'.join(args.model.split('/')[-2:]) + wandb = WandbLogger( + project=args.wandb_project, + name=f'{model}_dev{args.devices}_strat_{args.strategy}', + ) + grad_clip = 0.5 + if args.strategy == 'fsdp': + # See: https://github.com/Lightning-AI/pytorch-lightning/blob/8ad3e29816a63d8ce5c00ac104b14729a4176f4f/src/lightning/pytorch/plugins/precision/fsdp.py#L81 + grad_clip = None + use_dist_samp = False + + model_accelerator = None + if args.model_accelerator == "te": + from functools import partial + from nemo.lightning.pytorch.accelerate.transformer_engine import te_accelerate + + model_accelerator = partial(te_accelerate, fp8_autocast=args.fp8_autocast) + + from nemo.lightning.pytorch.accelerate.transformer_engine import te_accelerate + + model = llm.HFAutoModelForCausalLM(model_name=args.model, model_accelerator=model_accelerator) + tokenizer = model.tokenizer + + llm.api.finetune( + model=model, + data=make_squad_hf_dataset(DATA_PATH, tokenizer), + trainer=nl.Trainer( + devices=args.devices, + max_steps=args.max_steps, + accelerator=args.accelerator, + strategy=args.strategy, + log_every_n_steps=1, + limit_val_batches=0.0, + num_sanity_val_steps=0, + accumulate_grad_batches=10, + gradient_clip_val=grad_clip, + use_distributed_sampler=use_dist_samp, + callbacks=[], + logger=wandb, + ), + optim=fdl.build(llm.adam.pytorch_adam_with_flat_lr(lr=1e-5)), + log=None, + ) + + if args.model_accelerator: + if args.model_accelerator == "te": + te_acc = is_te_accelerated(model.model) + assert te_acc, "Transformer Engine acceleration was unsuccessful" + print("TE Accelerated: ", te_acc) + + if args.model_save_path is not None: + model.save_pretrained(args.model_save_path) From e32ded19d8f9f16b5e6c4ea3a72212a8090421f3 Mon Sep 17 00:00:00 2001 From: Ananth Subramaniam Date: Thu, 12 Dec 2024 11:11:00 -0800 Subject: [PATCH 028/128] Fix typo: LocalNonpersitentObject -> LocalNonpersistentObject (#11546) Signed-off-by: Ananth Subramaniam --- docs/source/checkpoints/dist_ckpt.rst | 2 +- .../multimodal/models/multimodal_llm/neva/neva_model.py | 4 ++-- .../nlp/models/language_modeling/megatron_gpt_model.py | 4 ++-- nemo/collections/nlp/parts/nlp_overrides.py | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/source/checkpoints/dist_ckpt.rst b/docs/source/checkpoints/dist_ckpt.rst index 2774a46262f5..ba5e9defc321 100644 --- a/docs/source/checkpoints/dist_ckpt.rst +++ b/docs/source/checkpoints/dist_ckpt.rst @@ -213,7 +213,7 @@ A sharded state dict is a (possibly nested) Python dictionary or list with the f a. ShardedTensor b. ShardedObject c. ShardedTensorFactory -2. LocalNonpersitentObject +2. LocalNonpersistentObject 3. Arbitrary object diff --git a/nemo/collections/multimodal/models/multimodal_llm/neva/neva_model.py b/nemo/collections/multimodal/models/multimodal_llm/neva/neva_model.py index 5d19b8544305..ed489cf8c547 100644 --- a/nemo/collections/multimodal/models/multimodal_llm/neva/neva_model.py +++ b/nemo/collections/multimodal/models/multimodal_llm/neva/neva_model.py @@ -91,7 +91,7 @@ try: from megatron.core import InferenceParams, dist_checkpointing, parallel_state, tensor_parallel from megatron.core.dist_checkpointing.dict_utils import dict_list_map_inplace - from megatron.core.dist_checkpointing.mapping import LocalNonpersitentObject, ShardedObject + from megatron.core.dist_checkpointing.mapping import LocalNonpersistentObject, ShardedObject from megatron.core.models.gpt import GPTModel as MCoreGPTModel from megatron.core.pipeline_parallel.schedules import get_forward_backward_func from megatron.core.transformer.utils import make_sharded_tensors_for_checkpoint @@ -112,7 +112,7 @@ def skip_fp8_load(x): if isinstance(x, ShardedObject) and 'fused_attention' in x.key and '_extra_state' in x.key: - x = LocalNonpersitentObject(x.data) # use the FP8 state from initialization, not from ckpt + x = LocalNonpersistentObject(x.data) # use the FP8 state from initialization, not from ckpt return x diff --git a/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py b/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py index fb45344eaff3..a8ed1ee7d28f 100644 --- a/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py +++ b/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py @@ -84,7 +84,7 @@ from megatron.core.datasets.gpt_dataset import GPTDataset, GPTDatasetConfig, MockGPTDataset from megatron.core.datasets.utils import get_blend_from_list from megatron.core.dist_checkpointing.dict_utils import dict_list_map_inplace - from megatron.core.dist_checkpointing.mapping import LocalNonpersitentObject, ShardedObject + from megatron.core.dist_checkpointing.mapping import LocalNonpersistentObject, ShardedObject from megatron.core.distributed import DistributedDataParallel as McoreDDP from megatron.core.distributed import DistributedDataParallelConfig, finalize_model_grads @@ -2028,7 +2028,7 @@ def sharded_state_dict(self, prefix: str = '') -> Dict[str, Any]: # WAR: This is a temporary fix to skip loading FP8 parameters for Dot Product Attention def skip_fp8_load(x): if isinstance(x, ShardedObject) and 'fused_attention' in x.key and '_extra_state' in x.key: - x = LocalNonpersitentObject(x.data) # use the FP8 state from initialization, not from ckpt + x = LocalNonpersistentObject(x.data) # use the FP8 state from initialization, not from ckpt return x if self.cfg.get('skip_fp8_attention_checkpoint_load', True): diff --git a/nemo/collections/nlp/parts/nlp_overrides.py b/nemo/collections/nlp/parts/nlp_overrides.py index 431c7ab84bb7..144583db249a 100644 --- a/nemo/collections/nlp/parts/nlp_overrides.py +++ b/nemo/collections/nlp/parts/nlp_overrides.py @@ -97,7 +97,7 @@ try: from megatron.core import dist_checkpointing, parallel_state from megatron.core.dist_checkpointing.dict_utils import dict_list_map_outplace - from megatron.core.dist_checkpointing.mapping import LocalNonpersitentObject + from megatron.core.dist_checkpointing.mapping import LocalNonpersistentObject from megatron.core.dist_checkpointing.optimizer import ( get_param_id_to_sharded_param_map, make_sharded_optimizer_tensor, @@ -515,7 +515,7 @@ def _fix_param_groups( ) if expert_index: # Temporary empty params so that loading doesn't fail - model_param_groups.insert(expert_index, {'params': LocalNonpersitentObject([]), 'is_expert': True}) + model_param_groups.insert(expert_index, {'params': LocalNonpersistentObject([]), 'is_expert': True}) if 'optimizer' in sharded_state_dict['optimizer_states'][0]: sharded_state_dict['optimizer_states'][0]['optimizer']['param_groups'] = model_param_groups else: From 81729e388afc8c9d0d1bb15ce5239d55e1863593 Mon Sep 17 00:00:00 2001 From: tomlifu Date: Thu, 12 Dec 2024 13:16:49 -0800 Subject: [PATCH 029/128] =?UTF-8?q?Adding=20documentation=20for=20packed?= =?UTF-8?q?=20dataset=20preparation=20with=20context=20para=E2=80=A6=20(#1?= =?UTF-8?q?1564)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * adding documentation for packed dataset preparation with context parallel Signed-off-by: Lifu Zhang * addressing Anna Shor's comment Signed-off-by: Lifu Zhang --------- Signed-off-by: Lifu Zhang --- .../prepare_packed_ft_dataset.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/scripts/nlp_language_modeling/prepare_packed_ft_dataset.py b/scripts/nlp_language_modeling/prepare_packed_ft_dataset.py index 9b0a2cb60131..d77c4f20f2e1 100644 --- a/scripts/nlp_language_modeling/prepare_packed_ft_dataset.py +++ b/scripts/nlp_language_modeling/prepare_packed_ft_dataset.py @@ -50,6 +50,16 @@ +output_dir=/path/to/output_folder \ +pack_sizes=[2048,4096,8192] +when using context parallelism (CP) with packed dataset, CP size needs to be set in the command: + +python scripts/nlp_language_modeling/prepare_packed_ft_dataset.py \ + model.data.train_ds.file_names=[/path/to/training.jsonl] \ + model.data.train_ds.max_seq_length=4096 \ + ++model.context_parallel_size=2 \ + +tokenizer_path= \ + +output_dir=/path/to/output_folder \ + +pack_sizes=[4096] + Note: - Tokenizer path supports SentencePiece tokenizer and HF tokenizer. For SentencePiece tokenizer, specify the file /path/to/tokenizer.model @@ -63,6 +73,10 @@ to the size of packed sequence (``pack_size``). ``max_seq_length`` should be set to the same value as unpacked data, and can be determined by examining the distribution of sequence lengths in the dataset. + - ``model.context_parallel_size`` is the CP size the model uses in SFT. The default value is 1 (no context parallelism) + if not specified. This argument is necessary to make each individual sequence length in a packed sequence a multiple of CP*2 + when CP is enabled in SFT. + - ``pack_sizes`` is a list of packed sequence lengths. In this example, there will be three output files, one for each pack size. The output files are named ``/packed_{pack_size}_seed{seed}.npy``. This argument is a list because you will likely want to experiment with a few ``pack_sizes`` to find out which length From 729d2eed0ce768859221d148dfef2de636033f33 Mon Sep 17 00:00:00 2001 From: Yashaswi Karnati <144376261+yashaswikarnati@users.noreply.github.com> Date: Thu, 12 Dec 2024 15:27:59 -0800 Subject: [PATCH 030/128] have micro_batch_size and global_batch_size as class attributes in mock datamodule (#11563) --- nemo/collections/vlm/llava_next/data/mock.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nemo/collections/vlm/llava_next/data/mock.py b/nemo/collections/vlm/llava_next/data/mock.py index f61df7336e6f..596b5ad93a15 100644 --- a/nemo/collections/vlm/llava_next/data/mock.py +++ b/nemo/collections/vlm/llava_next/data/mock.py @@ -77,6 +77,8 @@ def __init__( self.num_workers = num_workers self.pin_memory = pin_memory self.persistent_workers = persistent_workers + self.micro_batch_size = micro_batch_size + self.global_batch_size = global_batch_size if tokenizer is None or image_processor is None: logging.warning( From bd6d6ff2c0c15554a04436c65db29cf9c9a22c24 Mon Sep 17 00:00:00 2001 From: Anna Shors Date: Thu, 12 Dec 2024 16:59:22 -0800 Subject: [PATCH 031/128] Revert "Fix the names of two sets of weight and bias in mcore_to_nemo_mapping" (#11560) * Revert "Fix the names of two sets of weight and bias in mcore_to_nemo_mapping (#9628)" This reverts commit 6784db56a03f19f37bc4f37bdf87dabb3fc1acee. * keep underscores Signed-off-by: ashors1 --------- Signed-off-by: ashors1 --- scripts/checkpoint_converters/convert_gpt_nemo_to_mcore.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/checkpoint_converters/convert_gpt_nemo_to_mcore.py b/scripts/checkpoint_converters/convert_gpt_nemo_to_mcore.py index 61443a3bcb28..a1f2692d959f 100644 --- a/scripts/checkpoint_converters/convert_gpt_nemo_to_mcore.py +++ b/scripts/checkpoint_converters/convert_gpt_nemo_to_mcore.py @@ -158,8 +158,8 @@ def build_key_mapping(nemo_cfg): for wb in ('weight', 'bias') if has_layernorm_bias else ('weight',): mcore_to_nemo_mapping.update( { - f"{mcore_prefix}.{i}.input_layernorm.{wb}": f"{nemo_prefix}.{i}.input_layernorm.{wb}", - f"{mcore_prefix}.{i}.pre_mlp_layernorm.{wb}": f"{nemo_prefix}.{i}.post_attention_layernorm.{wb}", + f"{mcore_prefix}.{i}.self_attention.linear_qkv.layer_norm_{wb}": f"{nemo_prefix}.{i}.input_layernorm.{wb}", + f"{mcore_prefix}.{i}.mlp.linear_fc1.layer_norm_{wb}": f"{nemo_prefix}.{i}.post_attention_layernorm.{wb}", } ) From 820b3ecf70f17f6652fad4cf7ad41bcbb1d9c611 Mon Sep 17 00:00:00 2001 From: Dmytro Pykhtar <37850217+dimapihtar@users.noreply.github.com> Date: Fri, 13 Dec 2024 14:14:27 +0200 Subject: [PATCH 032/128] add huggingface-based tokenizer support for mixtral HF -> .nemo (#11572) * add huggingface-based tokenizer support Signed-off-by: dimapihtar * Apply isort and black reformatting Signed-off-by: dimapihtar --------- Signed-off-by: dimapihtar Signed-off-by: dimapihtar Co-authored-by: dimapihtar --- .../convert_mixtral_hf_to_nemo.py | 47 ++++++++++++++++--- 1 file changed, 40 insertions(+), 7 deletions(-) diff --git a/scripts/checkpoint_converters/convert_mixtral_hf_to_nemo.py b/scripts/checkpoint_converters/convert_mixtral_hf_to_nemo.py index a75c6876e70a..7a7bb93fb2ca 100644 --- a/scripts/checkpoint_converters/convert_mixtral_hf_to_nemo.py +++ b/scripts/checkpoint_converters/convert_mixtral_hf_to_nemo.py @@ -18,6 +18,7 @@ python3 convert_mixtral_hf_to_nemo.py \ --input_name_or_path \ --output_path \ + --tokenizer_type \ --precision=bf16 """ @@ -62,6 +63,14 @@ def get_args(): parser.add_argument( "--precision", type=str, default="bf16", choices=valid_precision_values, help="Model precision" ) + parser.add_argument( + "--tokenizer_type", + type=str, + default="sentencepiece", + choices=["sentencepiece", "huggingface"], + help="Tokenizer type", + ) + parser.add_argument("--tokenizer_path", type=str, default=None, help="Path to tokenizer model") parser.add_argument('--low-ram', action='store_true') parser.add_argument('--tmp-dir', default='/tmp/mixtral_ckpt_parts/') args = parser.parse_args() @@ -108,7 +117,7 @@ def restore_model_from_checkpoint(cls, checkpoint, strict, **kwargs): return model -def load_config(mixtral_config, tokenizer_path): +def load_config(mixtral_config, tokenizer_path, tokenizer_type): nemo_config = OmegaConf.load( os.path.join(os.path.dirname(__file__), '../../examples/nlp/language_modeling/conf/megatron_llama_config.yaml') ).model @@ -147,6 +156,11 @@ def load_config(mixtral_config, tokenizer_path): base //= 2 nemo_config.make_vocab_size_divisible_by = base + if tokenizer_type == "huggingface": + nemo_config.tokenizer.library = "huggingface" + nemo_config.tokenizer.type = tokenizer_path + nemo_config.tokenizer.model = f"{tokenizer_path}/tokenizer.json" + return nemo_config @@ -165,7 +179,11 @@ def load_mixtral_ckpt(in_dir, load_model=True): model = AutoModelForCausalLM.from_pretrained(in_dir, torch_dtype='auto') ckpt = model.state_dict() - tokenizer = AutoTokenizer.from_pretrained(in_dir) + if args.tokenizer_path: + tokenizer_path = args.tokenizer_path + else: + tokenizer_path = in_dir + tokenizer = AutoTokenizer.from_pretrained(tokenizer_path) assert tokenizer.vocab_size == model_args['vocab_size'] return model_args, ckpt, tokenizer @@ -185,7 +203,7 @@ def parse_precision(precision): def make_trainer(args, nemo_config): model_args, ckpt, tokenizer = load_mixtral_ckpt(args.input_name_or_path, load_model=False) - nemo_config = load_config(model_args, tokenizer.vocab_file) + nemo_config = load_config(model_args, get_tokenizer_path(args, tokenizer), args.tokenizer_type) precision = parse_precision(args.precision) plugins = [] @@ -223,11 +241,26 @@ def make_trainer(args, nemo_config): return trainer, dtype +def get_tokenizer_path(args, tokenizer, make_spm=True): + if args.tokenizer_type == "sentencepiece": + if make_spm: + tokenizer.vocab_file = make_sentencepiece_tokenizer(tokenizer) + tokenizer_path = tokenizer.vocab_file + elif args.tokenizer_type == "huggingface": + if args.tokenizer_path: + tokenizer_path = args.tokenizer_path + else: + tokenizer_path = args.input_name_or_path + + return tokenizer_path + + def convert(args): logging.info(f"loading checkpoint {args.input_name_or_path}") model_args, ckpt, tokenizer = load_mixtral_ckpt(args.input_name_or_path) - nemo_config = load_config(model_args, tokenizer.vocab_file) + + nemo_config = load_config(model_args, get_tokenizer_path(args, tokenizer), args.tokenizer_type) hidden_size = nemo_config.hidden_size head_num = nemo_config.num_attention_heads @@ -488,9 +521,9 @@ def save_to_nemo(args, checkpoint): logging.info(f"loading checkpoint {args.input_name_or_path}") model_args, ckpt, tokenizer = load_mixtral_ckpt(args.input_name_or_path, load_model=False) - if tokenizer.vocab_file is None: - tokenizer.vocab_file = make_sentencepiece_tokenizer(tokenizer) - nemo_config = load_config(model_args, tokenizer.vocab_file) + + make_spm = True if args.tokenizer_type == "sentencepiece" else False + nemo_config = load_config(model_args, get_tokenizer_path(args, tokenizer, make_spm=make_spm), args.tokenizer_type) nemo_config.precision = parse_precision(args.precision) nemo_config.megatron_amp_O2 = True trainer, dtype = make_trainer(args, nemo_config) From ad1282ede62bf8c0e2c88250be69a89039ac47d8 Mon Sep 17 00:00:00 2001 From: Yashaswi Karnati <144376261+yashaswikarnati@users.noreply.github.com> Date: Fri, 13 Dec 2024 09:09:45 -0800 Subject: [PATCH 033/128] Github Actions tests for Llava Next and modify pretrain recipe to have language model path (#11424) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * modified pretrain recipe to have language_model_from_pretrained * ci test for llava next * fixed indent/lint issue in cicd yml file * fix lint issues * Apply isort and black reformatting Signed-off-by: yashaswikarnati * Update .github/workflows/cicd-main.yml Co-authored-by: oliver könig Signed-off-by: Yashaswi Karnati <144376261+yashaswikarnati@users.noreply.github.com> * Update .github/workflows/cicd-main.yml Co-authored-by: oliver könig Signed-off-by: Yashaswi Karnati <144376261+yashaswikarnati@users.noreply.github.com> --------- Signed-off-by: yashaswikarnati Signed-off-by: Yashaswi Karnati <144376261+yashaswikarnati@users.noreply.github.com> Co-authored-by: yashaswikarnati Co-authored-by: oliver könig --- .github/workflows/cicd-main.yml | 16 ++ nemo/collections/vlm/llava_next/data/mock.py | 12 +- nemo/collections/vlm/recipes/llava_next_7b.py | 3 +- scripts/vlm/llava_next_nemo_run.py | 13 +- .../collections/vlm/test_llava_next_train.py | 157 ++++++++++++++++++ 5 files changed, 188 insertions(+), 13 deletions(-) create mode 100644 tests/collections/vlm/test_llava_next_train.py diff --git a/.github/workflows/cicd-main.yml b/.github/workflows/cicd-main.yml index 37d8b903afa4..d3098db1701c 100644 --- a/.github/workflows/cicd-main.yml +++ b/.github/workflows/cicd-main.yml @@ -4607,6 +4607,21 @@ jobs: AFTER_SCRIPT: | rm -rf /tmp/nemo2_ckpt rm -rf /tmp/nemo2_ptq_engine + + L2_NeMo_2_LLAVA_NEXT_MOCK_TRAINING: + needs: [cicd-test-container-setup] + uses: ./.github/workflows/_test_template.yml + if: contains(fromJSON(needs.cicd-test-container-setup.outputs.test_to_run), 'L2_NeMo_2_LLAVA_NEXT_MOCK_TRAINING') || needs.cicd-test-container-setup.outputs.all == 'true' + with: + RUNNER: self-hosted-azure-gpus-1 + SCRIPT: | + python tests/collections/vlm/test_llava_next_train.py \ + --devices=1 \ + --max-steps=5 \ + --experiment-dir=/tmp/nemo2_llava_next_results/${{ github.run_id }} + + AFTER_SCRIPT: | + rm -rf /tmp/nemo2_llava_next_results Nemo_CICD_Test: needs: @@ -4771,6 +4786,7 @@ jobs: - L2_Megatron_GPT_Reranker - L2_NeMo_2_NeMo_Mcore_Mixtral_bitexact - L2_NeMo_2_PTQ_Llama2_FP8 + - L2_NeMo_2_LLAVA_NEXT_MOCK_TRAINING if: always() runs-on: ubuntu-latest steps: diff --git a/nemo/collections/vlm/llava_next/data/mock.py b/nemo/collections/vlm/llava_next/data/mock.py index 596b5ad93a15..e7cda60585ce 100644 --- a/nemo/collections/vlm/llava_next/data/mock.py +++ b/nemo/collections/vlm/llava_next/data/mock.py @@ -20,7 +20,9 @@ from lightning.pytorch.utilities.types import EVAL_DATALOADERS, TRAIN_DATALOADERS from torch.utils import data from torch.utils.data import DataLoader, Dataset +from transformers import AutoProcessor +from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer from nemo.collections.vlm.neva.data.multimodal_tokens import IMAGE_TOKEN_INDEX from nemo.lightning.pytorch.plugins import MegatronDataSampler from nemo.utils import logging @@ -79,20 +81,18 @@ def __init__( self.persistent_workers = persistent_workers self.micro_batch_size = micro_batch_size self.global_batch_size = global_batch_size - + model_name = '' + processor = None if tokenizer is None or image_processor is None: logging.warning( f"Processor or tokenizer are not provided! Fall back to `llava-hf/llava-v1.6-vicuna-7b-hf`." ) - from transformers import AutoProcessor - - from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer model_name = "llava-hf/llava-v1.6-vicuna-7b-hf" processor = AutoProcessor.from_pretrained(model_name) - self.tokenizer = tokenizer or AutoTokenizer(model_name) - self.image_processor = image_processor or processor.image_processor + self.tokenizer = tokenizer or AutoTokenizer(model_name) + self.image_processor = image_processor or processor.image_processor self.data_sampler = MegatronDataSampler( seq_len=self.seq_length, decoder_seq_len=self.decoder_seq_len, diff --git a/nemo/collections/vlm/recipes/llava_next_7b.py b/nemo/collections/vlm/recipes/llava_next_7b.py index c483ff788f26..d23159125823 100644 --- a/nemo/collections/vlm/recipes/llava_next_7b.py +++ b/nemo/collections/vlm/recipes/llava_next_7b.py @@ -163,7 +163,7 @@ def pretrain_recipe( name: str = "default", num_nodes: int = 1, num_gpus_per_node: int = 8, - peft_scheme: Optional[str] = 'none', + language_model_from_pretrained: Optional[str] = None, ) -> run.Partial: """ Create a Pre-training recipe for Llava1.6 7B model. @@ -223,6 +223,7 @@ def pretrain_recipe( freeze_language_model=True, freeze_vision_model=True, freeze_vision_projection=False, + language_model_from_pretrained=language_model_from_pretrained, ) ), trainer=trainer, diff --git a/scripts/vlm/llava_next_nemo_run.py b/scripts/vlm/llava_next_nemo_run.py index 3193b05e10fc..fd6a79a586b7 100644 --- a/scripts/vlm/llava_next_nemo_run.py +++ b/scripts/vlm/llava_next_nemo_run.py @@ -17,7 +17,7 @@ from nemo.collections import vlm -def configure_recipe(nodes: int = 1, gpus_per_node: int = 8, pretrain=False): +def configure_recipe(nodes: int = 1, gpus_per_node: int = 8, pretrain=False, language_model_from_pretrained=None): """Configure the recipe""" if pretrain: recipe = vlm.llava_next_7b.pretrain_recipe( @@ -25,6 +25,7 @@ def configure_recipe(nodes: int = 1, gpus_per_node: int = 8, pretrain=False): name="llava_pretrain", num_nodes=nodes, num_gpus_per_node=gpus_per_node, + language_model_from_pretrained=language_model_from_pretrained, ) else: recipe = vlm.llava_next_7b.finetune_recipe( @@ -33,8 +34,8 @@ def configure_recipe(nodes: int = 1, gpus_per_node: int = 8, pretrain=False): num_nodes=nodes, num_gpus_per_node=gpus_per_node, ) - recipe.trainer.max_steps = 100 - recipe.trainer.val_check_interval = 100 + recipe.trainer.max_steps = 20 + recipe.trainer.val_check_interval = 20 recipe.model.config.freeze_vision_model = True return recipe @@ -49,9 +50,9 @@ def local_executor_torchrun(nodes: int = 1, devices: int = 8) -> run.LocalExecut return executor -def run_pretraining(): +def run_pretraining(language_model_from_pretrained=None): # pylint: disable=C0115,C0116 - recipe = configure_recipe(pretrain=True) + recipe = configure_recipe(pretrain=True, language_model_from_pretrained=language_model_from_pretrained) executor = local_executor_torchrun(nodes=recipe.trainer.num_nodes, devices=recipe.trainer.devices) run.run(recipe, executor=executor) @@ -67,5 +68,5 @@ def run_finetuning(): # This condition is necessary for the script to be compatible with Python's multiprocessing module. if __name__ == "__main__": - run_pretraining() + run_pretraining(language_model_from_pretrained='/root/.cache/nemo/models/lmsys/vicuna-7b-v1.5/') # run_finetuning() diff --git a/tests/collections/vlm/test_llava_next_train.py b/tests/collections/vlm/test_llava_next_train.py new file mode 100644 index 000000000000..2d3ce529b619 --- /dev/null +++ b/tests/collections/vlm/test_llava_next_train.py @@ -0,0 +1,157 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +## NOTE: This script is present for github-actions testing only. +## There are no guarantees that this script is up-to-date with latest NeMo. + +import argparse + +import torch +from megatron.core.optimizer import OptimizerConfig +from pytorch_lightning.loggers import TensorBoardLogger +from transformers import AutoProcessor + +from nemo import lightning as nl +from nemo.collections import llm, vlm +from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer +from nemo.collections.llm.api import train +from nemo.lightning import AutoResume, NeMoLogger +from nemo.lightning.pytorch.callbacks import ModelCheckpoint, ParameterDebugger +from nemo.lightning.pytorch.optim.megatron import MegatronOptimizerModule + + +def get_args(): + # pylint: disable=C0115,C0116 + parser = argparse.ArgumentParser(description='Train a small Llava Next model using NeMo 2.0') + parser.add_argument('--devices', type=int, default=1, help="Number of devices to use for training") + parser.add_argument('--max-steps', type=int, default=5, help="Number of steps to train for") + parser.add_argument( + '--experiment-dir', type=str, default=None, help="directory to write results and checkpoints to" + ) + + return parser.parse_args() + + +if __name__ == '__main__': + + args = get_args() + + gbs = 2 + mbs = 2 + decoder_seq_length = 1024 + processor = AutoProcessor.from_pretrained("llava-hf/llava-v1.6-vicuna-7b-hf") + tokenizer = AutoTokenizer("llava-hf/llava-v1.6-vicuna-7b-hf") + + data = vlm.LlavaNextMockDataModule( + seq_length=decoder_seq_length, + tokenizer=tokenizer, + image_processor=processor.image_processor, + global_batch_size=gbs, + micro_batch_size=mbs, + num_workers=0, + ) + + # Transformer configurations + language_transformer_config = llm.Llama2Config7B(seq_length=decoder_seq_length, num_layers=2) + + vision_transformer_config = vlm.HFCLIPVisionConfig( + pretrained_model_name_or_path="openai/clip-vit-large-patch14-336" + ) + vision_projection_config = vlm.MultimodalProjectorConfig( + projector_type="mlp2x_gelu", + input_size=1024, + hidden_size=4096, + ffn_hidden_size=4096, + ) + + # Llava Next model configuration + neva_config = vlm.LlavaNextConfig( + language_transformer_config=language_transformer_config, + vision_transformer_config=vision_transformer_config, + vision_projection_config=vision_projection_config, + freeze_language_model=True, + freeze_vision_model=True, + ) + + model = vlm.LlavaNextModel(neva_config, tokenizer=data.tokenizer) + + strategy = nl.MegatronStrategy( + tensor_model_parallel_size=1, + pipeline_model_parallel_size=1, + encoder_pipeline_model_parallel_size=0, + pipeline_dtype=torch.bfloat16, + ) + checkpoint_callback = ModelCheckpoint( + every_n_train_steps=5000, + save_optim_on_train_end=True, + ) + + def create_verify_precision(precision: torch.dtype): + def verify_precision(tensor: torch.Tensor) -> None: + assert tensor.dtype == precision + + return verify_precision + + debugger = ParameterDebugger( + param_fn=create_verify_precision(torch.bfloat16), + grad_fn=create_verify_precision(torch.float32), + log_on_hooks=["on_train_start", "on_train_end"], + ) + callbacks = [checkpoint_callback, debugger] + + loggers = [] + tensorboard_logger = TensorBoardLogger( + save_dir='dummy', ## NOTE: this gets overwritten by default + ) + loggers.append(tensorboard_logger) + + opt_config = OptimizerConfig( + optimizer='adam', + lr=6e-4, + min_lr=6e-5, + use_distributed_optimizer=False, + bf16=True, + ) + opt = MegatronOptimizerModule(config=opt_config) + + trainer = nl.Trainer( + devices=args.devices, + max_steps=args.max_steps, + accelerator="gpu", + strategy=strategy, + logger=loggers, + callbacks=callbacks, + log_every_n_steps=1, + limit_val_batches=2, + plugins=nl.MegatronMixedPrecision(precision="bf16-mixed"), + ) + + nemo_logger = NeMoLogger( + log_dir=args.experiment_dir, + ) + + resume = AutoResume( + resume_if_exists=True, + resume_ignore_no_checkpoint=True, + ) + + train( + model=model, + data=data, + trainer=trainer, + log=nemo_logger, + resume=resume, + tokenizer='data', + optim=opt, + ) From f11585ed503e3b025f874644acfbb142e09c2bbf Mon Sep 17 00:00:00 2001 From: Alexandros Koumparoulis <153118171+akoumpa@users.noreply.github.com> Date: Fri, 13 Dec 2024 10:23:51 -0800 Subject: [PATCH 034/128] Fix SingleDeviceStrategy support in Nsys callback (#11574) * fix for SingleDeviceStrategy Signed-off-by: Alexandros Koumparoulis * mini refactor Signed-off-by: Alexandros Koumparoulis * typo Signed-off-by: Alexandros Koumparoulis * fix Signed-off-by: Alexandros Koumparoulis * Apply isort and black reformatting Signed-off-by: akoumpa --------- Signed-off-by: Alexandros Koumparoulis Signed-off-by: akoumpa Co-authored-by: akoumpa --- nemo/lightning/pytorch/callbacks/nsys.py | 60 +++++++++++++++--------- 1 file changed, 37 insertions(+), 23 deletions(-) diff --git a/nemo/lightning/pytorch/callbacks/nsys.py b/nemo/lightning/pytorch/callbacks/nsys.py index 2a6bc3668b94..13b059011426 100644 --- a/nemo/lightning/pytorch/callbacks/nsys.py +++ b/nemo/lightning/pytorch/callbacks/nsys.py @@ -21,6 +21,18 @@ from nemo.utils.get_rank import get_rank +def get_current_epoch_step(trainer) -> int: + """ + Get the value of step within an epoch. + """ + if hasattr(trainer.strategy, 'current_epoch_step'): + return trainer.strategy.current_epoch_step + return max( + trainer.fit_loop.epoch_loop.automatic_optimization.optim_progress.optimizer.step.current.completed, + trainer.fit_loop.epoch_loop.manual_optimization.optim_step_progress.current.completed, + ) + + class NsysCallback(Callback): """ A PyTorch Lightning callback for NVIDIA Nsight Systems (Nsys) profiling. @@ -67,39 +79,41 @@ def __init__( f'and end_step: {self._nsys_profile_end_step}' ) + def _rank_is_active(self, trainer): + # TODO(@akoumparouli): is this function cache-able? + from lightning.pytorch.strategies import SingleDeviceStrategy + + if isinstance(trainer.strategy, SingleDeviceStrategy): + return True + if not torch.distributed.is_initialized(): + return True + return get_rank() in self._nsys_profile_ranks + def on_train_batch_start(self, trainer, pl_module, batch, batch_idx: int) -> Optional[int]: """PyTorch Lightning hook: https://pytorch-lightning.readthedocs.io/en/stable/common/lightning_module.html#on-train-batch-start We use it here to enable nsys profiling. """ + if not self._rank_is_active(trainer) or trainer.strategy.root_device.type != 'cuda': + return - device = trainer.strategy.root_device - try: - # Not all strategies have this. e.g.: - # AttributeError: 'SingleDeviceStrategy' object has no attribute 'current_epoch_step' - current_step = trainer.strategy.current_epoch_step - except AttributeError: - current_step = self._nsys_profile_start_step - if device.type == 'cuda': - if current_step == self._nsys_profile_start_step and get_rank() in self._nsys_profile_ranks: - torch.cuda.cudart().cudaProfilerStart() - if self._nsys_profile_gen_shape: - torch.autograd.profiler.emit_nvtx(record_shapes=True).__enter__() - else: - torch.autograd.profiler.emit_nvtx().__enter__() + current_step = get_current_epoch_step(trainer) + if current_step == self._nsys_profile_start_step: + torch.cuda.cudart().cudaProfilerStart() + if self._nsys_profile_gen_shape: + torch.autograd.profiler.emit_nvtx(record_shapes=True).__enter__() + else: + torch.autograd.profiler.emit_nvtx().__enter__() def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx: int) -> None: """PyTorch Lightning hook: https://pytorch-lightning.readthedocs.io/en/stable/common/lightning_module.html#on-train-batch-end We use it here to enable nsys profiling. """ + if not self._rank_is_active(trainer) or trainer.strategy.root_device.type != 'cuda': + return - device = trainer.strategy.root_device - try: - current_step = trainer.strategy.current_epoch_step - except AttributeError: - current_step = self._nsys_profile_end_step - if device.type == 'cuda': - if current_step == self._nsys_profile_end_step and get_rank() in self._nsys_profile_ranks: - torch.cuda.cudart().cudaProfilerStop() - torch.autograd.profiler.emit_nvtx().__exit__(None, None, None) + current_step = get_current_epoch_step(trainer) + if current_step == self._nsys_profile_end_step: + torch.cuda.cudart().cudaProfilerStop() + torch.autograd.profiler.emit_nvtx().__exit__(None, None, None) From fd4c302ddf89ad439a01dec2a6def8df9c9b6e80 Mon Sep 17 00:00:00 2001 From: Dmytro Pykhtar <37850217+dimapihtar@users.noreply.github.com> Date: Fri, 13 Dec 2024 20:57:09 +0200 Subject: [PATCH 035/128] remove dialogue scripts and docs (#11577) * remove deprecated scripts Signed-off-by: dimapihtar * remove deprecated docs Signed-off-by: dimapihtar --------- Signed-off-by: dimapihtar --- docs/source/nlp/dialogue.rst | 143 ------------ docs/source/nlp/dialogue_UML.png | Bin 1682802 -> 0 bytes .../dialogue/analyse_prediction_results.py | 112 ---------- .../nlp/dialogue/conf/dialogue_config.yaml | 205 ------------------ examples/nlp/dialogue/dialogue.py | 158 -------------- ...marco_samples_without_wellFormedAnswers.py | 54 ----- 6 files changed, 672 deletions(-) delete mode 100644 docs/source/nlp/dialogue.rst delete mode 100644 docs/source/nlp/dialogue_UML.png delete mode 100644 examples/nlp/dialogue/analyse_prediction_results.py delete mode 100644 examples/nlp/dialogue/conf/dialogue_config.yaml delete mode 100644 examples/nlp/dialogue/dialogue.py delete mode 100644 examples/nlp/dialogue/remove_ms_marco_samples_without_wellFormedAnswers.py diff --git a/docs/source/nlp/dialogue.rst b/docs/source/nlp/dialogue.rst deleted file mode 100644 index 157aaa714b16..000000000000 --- a/docs/source/nlp/dialogue.rst +++ /dev/null @@ -1,143 +0,0 @@ -.. _dialogue: - -Dialogue tasks -====================================== - -This module consists of various tasks that are related to dialogue. - -**Module Design** - -We decided to group dialogue tasks into a common module instead of having a module for each because they share many things in common, meaning that there can be more re-use of code. -This design can also support easier extension of this module, as developers can work on components of their interest while utilizing other components of dialogue pipeline. -In particular, we wanted to decouple the task-dependent, model-independent components of DataProcessor and InputExample from the model-dependent, task-independent components of Model and Dataset. - -.. image:: dialogue_UML.png - :alt: Dialogue-UML - :width: 800px - -**Supported Tasks** - -Supported tasks fall into broad categories of intent / domain classification with slot filling, intent classification as well as sequence generation. - -For each category of tasks, there exists several Data Processors to convert raw data from various sources into a common format as well as Dialogue Models that approachs the task in various ways. - -Currently, the supported task categories are: - -+----------------------------------------------------------+----------------------------------+----------------------------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------+ -| **Task Category** | **Tasks** | **Models** | **Supported Options for model.language_model.pretrained_model_name** | **Supported options for model.library** | -+----------------------------------------------------------+----------------------------------+----------------------------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------+ -| Domain / Intent Classification | Schema Guided Dialogue | Dialogue GPT Classification Model | gpt2, gpt2-{medium, large, xl}, microsoft/DialoGPT-{small, medium} | Huggingface, Megatron | -+ with slot filling +----------------------------------+----------------------------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------+ -| | Assistant | SGDQA (BERT-Based Schema Guided Dialogue Question Answering model) | bert-base-cased | Megatron | -+ +----------------------------------+----------------------------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------+ -| | | Intent Slot Classification Model | bert-base-uncased | Megatron | -+----------------------------------------------------------+----------------------------------+----------------------------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------+ -| Intent Classification | Zero Shot Food Ordering | Dialogue GPT Classification Model | gpt2, gpt2-{medium, large, xl}, microsoft/DialoGPT-{small, medium} | Huggingface, Megatron | -+ +----------------------------------+----------------------------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------+ -| | Omniverse Design | Dialogue Nearest Neighbour Model | sentence-transformers/* | Huggingface | -+ +----------------------------------+----------------------------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------+ -| | | Dialogue Zero Shot Intent Model (Based on MNLI pretraining) | bert-base-uncased | Huggingface, Megatron | -+----------------------------------------------------------+----------------------------------+----------------------------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------+ -| Sequence Generation | Schema Guided Dialogue Generation| Dialogue GPT Generation Model | gpt2, gpt2-{medium, large, xl}, microsoft/DialoGPT-{small, medium} | Huggingface, Megatron | -+ +----------------------------------+----------------------------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------+ -| | MS Marco NLGen | Dialogue S2S Generation Model | facebook/bart-{base, large}, t5-{small, base, large, 3b, 11b} | Huggingface, Megatron | -+----------------------------------------------------------+----------------------------------+----------------------------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------+ - -**Configuration** - -Example of model configuration file for training the model can be found at: `NeMo/examples/nlp/dialogue/conf/dialogue_config.yaml `__. - -Because the Dialogue module contains a wide variety of models and tasks, there are a large number of configuration parameters to adjust (some of which only applies to some models/some tasks) - -In the configuration file, define the parameters of the training and the model, although most of the default values will work well. -For various task-model combination, only a restricted set of config args will apply. Please read the configuration file for comments on which config args you would need for each model and task. - -The configuration can be roughly grouped into a few categories: - -- Parameters that describe the training process, such as how many gpus to use: **trainer** -- Parameters that describe the model: **model** -- Parameters that describe optimization: **model.optim** -- Parameters that describe the task: **model.dataset** -- Parameters that describe the dataloaders: **model.train_ds**, **model.validation_ds**, **model.test_ds**, -- Parameters that describe the training experiment manager that log training process: **exp_manager** - - -Arguments that very commonly need to be edited for all models and tasks - -- :code:`do_training`: perform training or only testing -- :code:`trainer.devices`: number of GPUs (int) or list of GPUs e.g. [0, 1, 3] -- :code:`model.dataset.task`: Task to work on [sgd, assistant, zero_shot, ms_marco, sgd_generation, design, mellon_qa] -- :code:`model.dataset.data_dir`: the dataset directory -- :code:`model.dataset.dialogues_example_dir`: the directory to store prediction files -- :code:`model.dataset.debug_mode`: whether to run in debug mode with a very small number of samples [True, False] -- :code:`model.language_model.pretrained_model_name`: language model to use, which causes different Dialogue Models to be loaded (see table above for options in each model class) -- :code:`model.library`: library to load language model from [huggingface or megatron] -- :code:`model.language_model.lm_checkpoint`: specifying a trained checkpoint (.bin / .ckpt / .nemo). The only exception is for DialogueZeroShotIntentModel, which can be configured at :code:`model.original_nemo_checkpoint`` instead For trained checkpoints, see :code:`list_available_models()`` for each model class and then downloading the file to a local directory - -**Obtaining data** - -Task: Schema Guided Dialogue (SGD) / SGD Generation - -:code: `git clone https://github.com/google-research-datasets/dstc8-schema-guided-dialogue.git` - -Task: MS Marco - -Please download the files below and unzip them into a common folder (for model.dataset.data_dir) - -https://msmarco.blob.core.windows.net/msmarco/train_v2.1.json.gz -https://msmarco.blob.core.windows.net/msmarco/dev_v2.1.json.gz -https://msmarco.blob.core.windows.net/msmarco/eval_v2.1_public.json.gz - -Then remove unused samples (optional, but otherwise, this would require significantly more CPU RAM ~25GB) - -:code: `python ../NeMo/examples/nlp/dialogue/remove_ms_marco_samples_without_wellFormedAnswers.py --filename train_v2.1.json` -:code: `python ../NeMo/examples/nlp/dialogue/remove_ms_marco_samples_without_wellFormedAnswers.py --filename dev_v2.1.json` - -Task: Assistant - -:code: `git clone https://github.com/xliuhw/NLU-Evaluation-Data` - -Then unzip it - -Finally, convert the dataset into the required format - -.. code:: - - python examples/nlp/intent_slot_classification/data/import_datasets.py - --source_data_dir=`source_data_dir` \ - --target_data_dir=`target_data_dir` \ - --dataset_name='assistant' - -- :code:`source_data_dir`: the directory location of the your dataset -- :code:`target_data_dir`: the directory location where the converted dataset should be saved - - -Unfortunately other datasets are currently not available publically - -**Training/Testing a model** - - -Please try the example Dialogue model in a Jupyter notebook (can run on `Google's Colab `__). - - -Connect to an instance with a GPU (**Runtime** -> **Change runtime type** -> select **GPU** for the hardware accelerator). - -An example script on how to train the model can be found here: `NeMo/examples/nlp/dialogue/dialogue.py `__. - -The following is an example of the command for training the model: - - -Code for training a model with three public datasets (from above) are available in the Jupyter/Colab notebook `Google's Colab `__) - - -.. code:: - - python examples/nlp/dialogue/dialogue.py \ - do_training=True \ - model.dataset.task=sgd \ - model.dataset.debug_mode=True \ - model.language_model.pretrained_model_name=gpt2 \ - model.data_dir= \ - model.dataset.dialogues_example_dir= \ - trainer.devices=[0] \ - trainer.accelerator='gpu' diff --git a/docs/source/nlp/dialogue_UML.png b/docs/source/nlp/dialogue_UML.png deleted file mode 100644 index 5bcc4c01c9daf5ab1c2b3ee58effa3b7d1397afb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1682802 zcmZU(1yq~Owm*zhT#G|+cPUPAX$!PC6!+rp5ZsFwcS>msMT!@9DDG0+f?KdaNdCO% zoO|zC-#2SbW`R%=CJu{QIcN!{q*i_g^NJw~Us)|}jNEqx$NNAxiFrGO!Zw5M% zkZ@f-Dk!{DQ&6CP=izGm(a8o0Nj2`XKBj^82wCBWw3HM=`q0pfsXTfuR8_1J^b}Jx zY6dLk3<{F)_zrH)79S#H>ffBDOM0>=OP`hM?gql+fAJn!D(tDN$}R@)1%e;<*MW8u zu!_~3)j*_T(570s|F>62&@>c7c|U$nFC&TcYf}1d!l^L8#n$DXmXEQqaHyagqOH9R z`Dcl-x1Pt7C%h}ZT9zG*8k!g-rcDn9D+JLXwa4>)eu*TjbM%vyT=lcxtNs8%3i|%a z46zSU&0OwZu>ohzYOgnCgD8>kIFs2%F=UVF=7eyd%PA_yLjBdmVFoBj!4&<-K}4EJ zb%M7RP$3_C!l9a(a7gto_Tc@S<|)Mp{t? z#f1e9rY>`W>brdcM?WrK%M4;|hO;#5!G2Chyi3eFSzTYGck!x3sa3yJ)bBm;s{s6= zOS;`Sap6m7+x%*#ZySt?a#-+pAt&1Iw#@t#^0MGmlRC1znLaf zkamxM-yW*}g6^*NL+rAY#?(5WlP-ud7K4TmC0iC*g*E$yM`%l9WeE;kYdjK}E+x!~ z9&N&5ygPmlPKnjd8qRousVGkmFeYCGa-l`NL)g5l{4Eht1z+xkzii#)=!eo{@AhoO z!*$c-Tsbze=3)_B1(=wC!92_u5BSD0X2XWC)3-Q1M^aR}oNMGamh1wN6mqys5}Sj5 zNM|uBeU5}QE@C)$l*2oD(pwxQpDtqE3Z^t#q~)aaZxu4iWF>bAv=EyNYh{CX(^&>#vLEOO~ z{};RCQ*f|Ooh|-Co{$)%3^d_8Pu?9kmz=pI?*#NAlME{u!neW;jic?Osv|pzB_=ql zW_xOL?EWARl4?y+ff0bD8Z~WL_Np7J3#yBBJ<)qP(EmgX<**lUlGr6+XFJ{d5DK;u zcaHLZ&8y6#M*8FX>FE5}IS3(%CSVW!ns+CO1Rr{Mc)$;yRiZ;RuMw4kV|E>$7$P0m zqqv>Xn=x?d9J=jAAt6`}F1}DA3z?&zSjf0^<8;o3%d*F!)zM>p3A?QQ!`L!uV=kDD zzaMJ(4m&e!z8J*`DX+(;7Uu*NZ=@<)! zsdq`@OLTMyM3kJTx7cxP%HG=(OW=P4Ege~#isoVwJY28IH#p*uYXD=x*rY5RtFoU$X<5W25U#2gl`J^SKR;Ng)0)&M{ z#zaE?IgH z<50_>`9t$sygbPx*CY8#Pg{FdYgR?bRJ%(2L%p$Q^1k?ZOfKO2ZygoR7>Udu#k?gh zWpowmUpvdE?VB3tjU;}CHbv?wNU87stUjY8iZ1Q34ok`eP z(AxNKHgA9sfAq_UzD=QhfhK+pA&KkQ0G|Nk4w??F4wHbN0c#JxALt%99<;ByZjuk0 zsm(E(FpMLlBFZryaa|~as4OKSr4_}jr&E42=tsAU(l}69vkP+ztUCLxU8EDuvd^(E z+PA)sm=3mDU78kK_xp7juroVoqHq)afVW4$L;2hFfR<#gf10_+I(Ni2oh+SBTWHoz z`}?8MZ2DIdTMSz{TXEaUv7}t@T%j@4v4Q={vE-aC^CO|_(MLT}u1wv`dCp4PN*71Zk4nJb8WDTo&8WF*RRUPHapFFf`%e%GiT6v*g?0)u(tu&ZJEgX+KW_NPr}qgzMwt7 zU*kA`C7LK2ikpsGN#`@0b2R3|>s9R?a$vbhd@8tD-m^FFcD!}imqh@SgjH5nI2In0 zEw(XvsVF98OO}4@rxsNa?fg*wSHw3rF#D1FPWf(;PJ>Q9`G&4pin6dwirnv2V94dw zZJPhQzjwfgKs!Hu{|)QIx^065LvYXwnI4(?ApFOm>zgZ{i#70LMNz$3^#*Gk^74d=8%Kr^ zR=6{o(<4q79u*mr5!n!Z6wMl=7b)|i9(NSK67LxQ4!4LbL@Jz+=w+SMHtS->D0soG zjj4mlPO&h)RGCFtS5Z3NCoxk=^pm`bQj(GQk!hlOBJ)=UvVIRM{lWF*`h+}EU-m)@ z0J#|@LfAsF_d2pZF0mggPLg!B&8+X?P`cdDLGt&D#*F31N9GN+N;dD& zQtJO_Ns%dkN3jx zhW$gbS@2tsScJ6I>|1Nw{jq&Rz54yhqikB4z(<-t`K#hxD_&2^RNTSliF5x)nl9*PVeLauj>>YhNQOwf$m!nn z-Y3VOi@YBmTjh1!I|N%id_Swrm;SgrMmw&qA}}&*?(tGMjMk(cpmue}-q0~~{$SFS z)6(h&ndRa5Fahs%hjg_nb>M;Z#17G0rBlcC$rZD^%JCXqObX1`N2w3rZ`SZiqi+Wr2Ad*M zG4t|Zx%f05;ti*@16zNX9#k^v%4rHf4d({ab%i@KJ0@(uG^Jb$r-JTw57HMk28tY{ zX}up-b83ayCVx%RNrFLV7ycU%OukxPVdLxH1{>SU9-E?*8~6>Wi&oXmYpR{@$NNipr_E^1g&PxXPY)Vf1?z3~=R{|pR~I|Y zZo7BoZqD($&Ry;tfLHvx7NceTD!u>+peu+Gbk%AF*It?ZWw$@+P}n97T%}y_hYnw8_t!Dd%I z2Yt86Rmpb1@{oW9NvP1FCzL#fPu;AnbNzVq{5bu}x*e{GjEgVKg%c|J7pb@$xjQtN z+9vS_`3bq=eAg}0MN#b!vlDVYq@&Hdz#wwi?^dktLY~^EaG2Lp<=`}u>J{?0w#UtP z2~wkL>H|tq!Pr}JW*Z-t>QnVlE^G|V%v`7Hx6FHCC*{Z7}aX!;8kkF8+kkFqgc99XNJudskm?o-Ml`!y3qgQ*TT}( z+e?~}@t;8d+y4EYHhv%fFOrMrf2Q?3LB4+^d;+}ueE;kF*;MKuuEe{Kel|{qiXWYy zF?)_7BOo9mDD^Lc|DWjpLjISj!GD>Gh`#!7)Bh6ve@%5gZ9EiQou7kx$^2i!{$u># z!v7db@%=OR|H9(mK>v&TjI<246yN`5O$NK_FW>BQB56J*cLy>*{dEPDm zGd_2GYR=`OA1Hs3o{x>8AguUW@Rm;P+huFm1XQfy_A(NcS_TXLh&YN-;B0gJUYsq zbvIdT8Qc#kyt{kqbe&*%13hc(e5A{BG`3pJ1BYzfHRl0>XinW{&0*#%eHZQ&P6~6F z-(N@~2`VC~$|P-};${r+*D}TQng2Q=ihhKRm=O`d-oqZJm~Wfuv>Mn$(iX>dMWPxG ziuOg-Qz!w$tGLzexgOTgY~UXdF>JiAsfF%wV5Fvj;6yC zZcvON6kd_{O z4ItDnQzj{9@)#F*)LV7EJJP*w@m9WX>HvuR@=l3dnA4;7n)Oj!FQ zZ&ww%t9aXJ%qGg5(e{S)J>e$o$^ExU`J<1-qkH`Jkrsl28n%i_!^HV z|1;L_{2}SJ>zHpr7t#ex2=nOA3taog%^$8g#Cb%KMFn=cw||%tDqg5dG1bM1hmgVv z+Gw@|;nDGpfy*(XihO*;GwC>-8OU@I2`a=wJsvNNm*!I6zfxUm;%QJdg`zG@cI^nE zpi&%rB*GXL;rvRLQB|*395lk`2f`C7bOr_*NJUF|_8ImZ{wfQL{Vw&0%Y7Okixxpl z9mC6|7`Z*30ug~FhX(#M+r%2Qbp^l$#*9AzIqAH2G>HPF1LJ-2m}Q}|p0UD*DMInl z695M^<11=&!K605%a}d`w!~=*e#6@reYmaT=MEk1xJfVzu#}em9|Dwxu%rB6R8QN4hmF2on$_U*%l+l=tOrft1d)!J#KA?6=>)NmH%7--qF&j^r_ zf>1U|^A^B8m^$yr;p30Ca7;jbEscBPO5-*lujQ0e_*WK*|ZE8o$C5kS!V51;d`;09*uTlh}7Bax`Y{SiUsr%lNy_>ZM8RCAE zvt)Pp-VDX^faTZ@+q9t-Ap>-WJ0TO8RsOidd-fq8KcQAuZnK@Z`Fz2q0y)+vqSE{BZwX2Nz1Y~ z+7Agg-$YGT zjGblyRn8ixL{`55#ZHz0x)q+UVaMDsHAmHfkmaJ8dL{3Y6tY!A7m$4298hww#M zP%-1r!i_>mSRxuy*rMZ9WADm+ilvq*XG!9kk$T_RIB`xs4i<{_47QH#+#Iw%S2qHz z8;2x6opmGXAvG9R02OLEQt;6cI8sv5jerD@?wN#)mqQcW$ALRSl~WM^Ml-so@Q*cV zed^L3g&nS8-R$bU;0v@zav&ig} zZaT3%ST*-u2jhHi>A*0H@gI@~Q$Y-&l!oBoZK$Lo@>*cmiXe*$?T(feM#&>(+r=J? z*5g;vjRvUNJi8tL0)vF0OOXd_sV|=Zecuc^`0#h1NFXb~qRRHmke9)Y!SePitiLh- z8&lF(B6;E#MkJ*GRZIPQid*rP8Nky(1v45UIZMnHXll@mlg3-MM9!yIA-LozQI#_xoNF?(C@d&oqVgeQTm`d~DwpSr&jkPrC*4P(oE z#1XA1B*0_GZ18EF6j7o6+J|b8z!x`9GoILEDNa>S*Prf%i0sK+&`!SXxxPzQ!D3V< zco(!<%f3Z$4yJ0OEjoU30|=@H}BngnOaI!>rTM;SJ|8tg=vN zZd0-D$Bs1m7z9R%*b0DSb)gKQupd zg+i@jd^;HPo)nWLGR18PAM#{Cq4O3KNVE&Vh-I##{d3QhS9BrKe`vw|lS4MrLl2T& zDz(tg@E~X9A2V;sSYnby`F*n7AOS8Xa%WS9HO_dA158p3X8RiID!Z)lB@PCoon(0`@35}zI2_+jEh9PiSB1tE5jP3<6<^tfkJ0O zSv_`{Ut=y8bnkFR(x08*rRE4nxecBTi+;uNt4pw}Sl7`O%TaF$E3SRi+YZ&h7&ied zyzeaoA|Ww<`%+y&Su^cIZB53MzER_gxu`JwN~W~hxr8uC->eA#O`Qg6s_-48 zLP(OobjxYME8BKoqA=Nj=c`Zg9nW=QIQZ~kR{S@##nE(YH#+vzHaC^c0foZZ^5@|F z*)dSHJHl91a6!rP+NuacuymaVp<3cKmdr7eE;6R25qLh|Hqo^p`E8Lnnd(-?y=i){ zb@+)e`IfyB?P7+vv(N2YV#Oy+y8zFMnQQm<_xJQ59jnp_6y8pY(AcKJ zUT7Tp)Qxr<+thTj*A-3lC4>~z(Jw6ClDz!gW*ShKCr>h zZ}6wi3v;3+8N+Vx0__LZ-EM{nvmvCD0Qf7+#;QPRam@1)iA3MBgneOEw zte^`(3TL-5#U;|VcVJia%Ldg4n2i@tsh4Ncu;{*iz&Y^ER;jtkT$)ItUF$|zQN=kALysLC^D65)+Z5oNP9}@>J72?|ogjCn&J}GT zs0!rQ!5d9oIc?-8Y2ld}SUhSwXq=@}v{zG0c)R>o)8y>O{eE-N8w^Y>tkwFW zpXYWZ>0WHe02(&(5&e(iphegtVbLTe+mkf{qV|$`0>OrWy?^waz?1AC$DFDyJSI2} z@I~!&Fm(Bg-?s;S!OXDE&SBwAN7TQll3WAyiRYj_JmRKA1ybFKWaPd=^}U{}J_dGd zD}w6iq7`iDza}kAet!q4o`Nztw0|*P=FNPr*s83mI)R!;SluLxrJW>n620}Fsl~KZ zb{CVk{}>8!G$=~iCx=8K_-Qx=L!6CIN)YAm_x$z zuJ8CQTQv@F-*{y|xaAp&-2sLqV@7LwrzrQZnSZ_eGVyV)(ekQa?}e|&bSO(`Bjo48X5}CH z{?+Zx0M=5uWLqxlp(00t;1s(&rw)YT@Br+w=e^V~_P~YBr{chBrTq`^hL2mnCvTed zby7J(B<6&?4YT<;YdgSz_hV59x3PqZtb8AgblV0%->3pf zSWZz@o{ZRp&}$EdMyr&*?Xb$VoHPH&L|7#1}5# zk^_=R{CzP|uNFdooVuzs5B2|zpd52L8)FC_r&KC3rY>Y|g#75T#Q5+Np>~lb6U31A zN?nQkrC5UTv{|PI+Pfvkr}W5qP0Hwn@izv)4+iSVe{w`Aa+`>Nw(u{6zvf4xD7!vs zAp^IJYslw~pT^D)`(sRD9{raMO1x(FhJUgV#yEkav|a_QM{lE=-G(1=nq!lYs=IIm znY8P7zC=84Cawb9 z)?aDG@m?QqrAzA}*_*c`zB@qEe5d)4KEEI!b6wwa3J-Y@iq84mbq;tu^Ltd_Sn9jZEBj%R=C<^@@P<-0#nFk%B7!3wEkM`K0e1e)3{@-k932-&IBh9y)lW-LlvNyXF zcgbV^(v$4U%8TH0!;pN(S^9XMw29yQA^C=p$#m&#smsDYl5=ZZ38lofPIyZf0@Qg& zdMgr%u=fS)iC>m`i{W?ADJDs_tsM~>@OvTLT^MouxcYbp#}{Ur)?8+){Tk*J|Y za<5Yrzlc$bw}8~NA>>iFTRO?TWn5#sr>c(MVq=5x& z8>htlu0=q$=sAMLjN&#Vc5xOB4tE}_vK_>2GG@}RUbRkiS&*=7hnMOPXQcNN=?+v} zLwB^ezD7ILujXo1Q?+}ILncGN@efv*A*A;=j<<656b14Z-&`>%H}7E$8;})agB{NA z+Pc%G!|enKqbijkF=d@8)kZ!k)m z8gK)@GTV5qtGAwf!aF{n;Y11Dw5Czy@%`?m647a9YhB{$l@GXn6G9Pe{-PK^4l0l` zlOt@|lr72s8MH~*WORSel>>kGD}O5ibrzG%Qz_b5TZ0y@#fxZ3{~(AX)NYMJ*g9tA zo$DCDQm{fuiVH}1p9K;rK#uQiRjxIh;k3Grmt3CBhhjwPfl^igmoN}7GE&o0%mW>zrTue?W4>ufx^kS=XueZxVpLK_(v?-o18D-eMe z4e>P@4i{gS0{wXPeDvgasJMR!?eT2|8D#U@g&6EjG=4Y+Ri8;AR7nNcEiSZ^G^*d5 zRWF*r|H#v&mZ%6t=*c!Y&eXPH?8@ytErL}BLurrs0&U}_l6RneA;u77%V-KD7(;?YhS*eS+i%81e)@xRTs2(RhhMi(fN)s>Ep^WteoI$2zJ0;+K^pWW zqp7mjv5Gv)>08g-NwT8%t8S%y z3E6h!T2{!I|*cR~xp+{>Vr00%9Wc zn-2J(r(sNb7d$c``U-dgulR@dn(v-GGWCITP2YW&S^TeYalNK)&Z2;m6VDau zkq5Fp{$cEGW9zgZCa{05%Z1AwcKm2}#qbIapgy>3`!Zra{fq$FZyrl)9ODj+<*%2^ zutOHj8eu1yp4VGwwG&1ZKa)}pk|5y5Z!qTkAR^PIwnfmKyew<|iNHAfabpqiJ|L-e zms=$y*5!h8bj(IEFfN8N?*w80L(QWKN>0%Wfm!OWXMy=}ag;=hy2d{BK%-DGmam|gh7ay zvB*}q0QrWl<$J#B$|#@gREQt^B}rk=_m$0Q?i*^-UD{n{?nM6ucCTFdp1tJ3tth{? zY6Ag}IK0;y0lT(IZTK#4GrI8Uu(v2d+x})zb*rC+0lsKuFJ(Cci3y~YVibvpzIF%v zl-V34lNMJpw(MUW$m$ALk^y_I*Qo?l<2e=Eca8xR-p9}%k8E}~p~eg@>A-riA9oE6 z?t3?ju9#djFnVh2)6;`u<(`(3+bKq9_v2VZXs3Qac+`~kaJ`UdWtBosx1o)XTzgt+G7+_a-`@l=PrAXd zH<3?x&^UBnDr?b@Liv-}A_KT?oBMQE252x9!EZ1S8=aBQh}+nG;gx4#)VFDeF2jpX zV+eFcv-K6YhEbvbSF*cskH9+vMscU2C3bMi$G6I_VLKVmdAFm)Nf7IljV4sxR*|Zz zH@TnLBs8CVz@iKr)H6XfPqjj}MHlr51w%F*10wK9TBhIDsDxAAY%r!g-oiDr7Diqt zCU|n7TIXx&hM-MCVi^7Gl*6#rm_?1s8<57*{R+Ff_ZsunGU7VK+hs+e8nf6rm{OsZ zXR%RHIY=c32}$4IgPfAgfVo?oKQoxI#?l`usDE|B6~4bN$-IQzD137aS(>I0{LIu{ z-5bq63E0Ar{tl?K%VThrKsf%C)loYy0{s@+T1~dm>;h|Uq(5vaaEbO9 z@woBP4liXoA5s2P z)dIa3Yy6J>+asT>qQ4K8I|=AsybLc@So#o#CJMzQjdk$UfIu)WqtcM5jiw2-3fWq* zS`sQA-DC&4?7)*56k!UU@KI<{I@W4xe4SmhSd53S8D6t}FTWY6^(E3gM!~WXCazx* zXaY;(M7#%b(>MK$Y@$68J(9K0@b@NZ_Xz?_haWUiq%3n+0!4i3dI{%;A4|`8V zPWH8X^6{J`7_KT~`=@n5>f=dK*45LukrT*}$TjjvOIEbVo@jdrQUVB2s^Csjox47BMVP3utWL$osir12mfv3b`rUR_qIL=d}ryZ@|%mxc!7qd4=3BLapR# z*F_=W=|Q35Jw(42usdJuLlYv$#7B~rzE7m-y|_m)0OQD^w%6{Fgg&7oP1zgqQ<1U+ zP>Ah`_G^mnw9UdJCra!}ejS zkn3I{y5rvNP9roZO4?d`3pcF(d$px?i+_b=`wyNS@7O=?4CYdi=|&APK~NUSf@_#8#za7u1)7}lwSOE&NS@)#~>gp}hIY>ik*AAh0h>RSZS-`H882@M}1>(ia9zy zk9CY{*~@~lX(r?q#$)=zt_M%dkI4s~GPb0YZuS$v%kAurqW1%dGP37Q2=gIe4Wb%9 z_elR`WHrq}jW9>jNci6D38`kd+qg0;1VH{loLc@lki|}mpN=%gJ^u1lip>N_i%l33P$ITSUmyI0cxud5lzs%ru| zM~$aNPj~8kRiqS2=O|u;D9qN&)7B|Huxp3Jh;RIT%eM~r0dL5xN2`l|Sb5rj-aL#x zbuSOTR;jBu4MryT81N~>>-sa$J--Zg3=@dDTB;1#Zno=7th5OTOD$ZORZHVvcqsD- zouV%~N0=8eqvq{j@k)QLC<4}M4#(!(+xnlvRcI7gWpMM!{n80gcOm$!Ue#XP8^R!7 z39$l@o`$2XhRiIZb<<}6y>J!!KI+HngkTCxz3%?L3Rcf+19M^R_lL#7@X19)|9Ey9 z1KLOJPoygZVmY^G5EW@r-(%n%iGKQi_rY`u!t42e)r#r=yYl^z(TPkgQwJMou zQ|fO4@S;gG-D-&F_cS7Hix5`GYCfRtJb)01a=ayjL@@P2wOMFT>~S=+I2R!`8bjv&qpS~p4NR~(;F@C%EL>dBPO1oQ;BFf_8N99XCEic3 zX}cMbKfiygjlTB9g!Kd{Xxkk$&XUBIOyLDr&sD3t`s?o4Af)Fe^?cA!hS9$TFyjdY zth427j%|MrL^HlqzdR_r>3uV)-v~K6rZ}IP<@}ZOrHxY0`mC#20Zc|}l-E|)xb1rX zF#s8j{OY2!lsMq$2MSB|`vW!={4X}_o8@aC4p@D?hM36!0`v``!^T!9QF@wsbmV(o z&Dfu%>FvulDV2WhEuy}^;|r#BFLwkKYyJ(zdvC`Q?swFx@kbU{G;Bf_5%AQa-XbU* zK9|aIHRr)j3mrg@QPKBktaT#j)06b4OqJYJl7X?Qv2Vdwb}=hRx>y~SVh>5+VZ zus$@r#B3i`EI6$~iDndKz`0690QJBIMQ8sI*J{n|Ht$;EC|@Y2JM_yRpfvFYvjila zCE)-jloDHEGDK4Ya4Xggp}bGzRE1BwLKe`{@uZF3YZulN^zYs9Sy`FqM~r+)ifv^u z=D8>4M)HoePmgxVsW}Gv-USg&WGA^EMh{bptuyr*K}6-50=<&xYAlpj+J^c*D6aeJ z#^0oHT^iF5+lx+`^={HK>NG*&uU}->JR%ae!6o1&ZN2XeNeT1aVLu%_*YV-JS@d7l%dpL@|->M6nA);s6JFKH;@HF z-ptTwjGJ`r4=eP(H9{&N%2S$wfpq@J$62 z9o0$?!dlNL-+6;2sdC8;W16A4fNxhHE|y41 z0GVCjnsnT8(C_>#NM75FkNG&ra?{kCu%?(8b#u{J$0y&Is70S$>w2!FHYya%8@*rl z+I8N^NyzWG7xEic>}GT&-?P7oqpuQTK8>_2GBuVoM1{1QEH2fzmP*QZWw)CTB(WYM zjlvFN$Q7c$KTK*KPZoOx&{zcugteob{=BTl0Ez9vtnepgB3H*Z`ddILq;>gZG@9fd z<|xWRKHl)+GW8_JOvjrX0IDU$EwPNb1~T1N9Xw?yug=33DU6?P{21`%I3_ZGSB+-% zzT)w<9Yeh@3KHRYoixuUSat-pQhy0v8}8BHQjrY9z1;!^rPnQX8NX%_G%Jr-57DNm$Y9PQ@EZ16$r!i9=Fi3Kg`}dG=H){ zc`1j%^Rx#W5%geDnVW8v0AAFV>3;Wqzb*xE&F;#c^X;5cC@I9zo&;f3QE}ErvZ9UR z92a?syfw|l*EBY@gJXyC&uUQ0Agt||iq;UJ1-9&_zQ)(ep*HABA1X zI%2GAwV+$a(djM^!em!|+Wn0qV+UT~!umySfw*t_Zf_LY;gYcUM?GXWlKQ8^*L`|U zU5Tlh4g@(L>F)FXF1f&Tw)FG{`d(aqN=$Tj((pE5#>9kaM%bhG>U%KUZ|lZxKJkmi z1(;xr&U3S)RTNiM{eA*p{|s1!n8FGSm;N5)l~R0bR*tlCI$`UYvI=_Y3q7`%NETCA zY0JCQf5Er;xf0*qoVN(LUW;EmAG` za=@y$7)j!wBE-vI8L3i=cJe+t@Av*%_;tPiRX$_|A0UcN{~E#Eh}fG1Xt=+zw@dMd z3lVq}&xa(U*oAZ<*tyYA@VD_%82K&4&OqY7RbCjCYb1TWf|699&VY}B!3R^a_wlfP zlw5{6g~OW_=vEPj#5>#*yif=p#IEzCpFXNwA#t?wtf=3Dy$U@^kJzshL3=r=$lr1~ z@#(EA0;+)fPvQ(D*AavMep!=%V|Y))QSm+}?VG}dd(Sw5N0K*o=T5GrWwlGGB|G@H zjgl{gBm0M@Fqd2&Ev>^REYSuP@n&mfy{Nb3zX;g{j~9hm!N2BO**U7|qGNw~l?hu+j5I|GzNQ8p?03>lx-gl`xV#4_r_9udm5jf~xV| zi8LgQPUYQ)O}#Owab-V6gOYmg7aq^!ErH=}GnxD&khOP_V^Gch$}6FA$ghI~MuxX8 z&Ch=m8ZrB`(YZ@wEL5pIfByrvnb;V=HTncQUBuRba{L6Ti$!IN0#husTofAAs3W1M z=OiIj2jkGZ7r$LwnQ%f~nzOCrOCU_$VMG~|8{Vgi`M49mRA##aMv@n{4T(_$8ot$W+$iyjdl3BebGm5TE2;;GybTy_9=+2F zd9>UOR0;CuaNZ9}M|W#}1NXArp5}%4{uqVW$$yGoGujf~OalzdJ&YG4`| zJl5Z^tC2HZ;xL5|T!qZx)4QUKJB(p)VSORqxRN;U19ZZqP@NEMYVZv~XxFeK`5=i* za4yIwAB6sexUb?4?yJ+?INA?~q595GK^@^t`K4gcNfL)}QwzgRU?!Ve2E@ zt_VmWQ3PV6+HnrK7vfW9PV(u3U(C|R)U&2w;Mp1}b0tNEV8xIiwoT)PE)GT8i>iR^ z(`S$h5lcrPFO(K9EeikuBWW zBBm1;b(c4ZloWjv1gaQ$OsqKvL}s|7dhu2k2GH>gkP*+^gsa)FF(98=KM&1;k8HSS z!-R#E@{rpNOY|{8erJcbBAw$*P(Ghdvj~Wq8dL;?VYx?6CM-g>T{a653D8Gq*M-Rf zh^gsSdJJyG-yC4j4Va9hYaFs+_`!qmQ-jrc?K_IwTb7omyJJ8tc@g3X#`3{~sY&U4 zv6fl(<`ia&4JY*O@UeJFstTM*YKcF;xJemL%y?6JsI zg)}B472hTng@SDH#=U(U8W)}WA;il2<$a(@>(5qchSM_1<-OO+7$%C4A*uw56?09E}uy*C*Ep#W17^XThc*25#7M)caTE`wWA*m`?V_I2<8|Z(3jn zSr+UH!hU&L_0wfN13WQnbU018-jx7vtd3&dRpn$QCprAKf-nh*1_>eMDCgLIRXi!jcr8GoRqa0r*pZmbKsBi&gUZ2NxHv&hR3nZ z{BY^pg+y^^#YgnQi8phmt)=PC^w|8}iQx(2ty;kjA z96_#oT$kJ}<}PTl!O#8G5oXPEn+jF55T;h>`rvbo8o zdd-~Kcce+}!Mfhd@qcC)8P5L`AND_bvw^Jqj=wl&U9~vLgGJks4>fMhJOv2|2W3N(GEAX*)&f zS6%7<(qG)YecUaB1srJ#c6IG}hR;-Z>h!DvuNSSp zKBMI(oiDI^K1((Ap#SSZON(`SG((-qJnPsUgZE8A*5b==o8G%Js)|ILda*Tn;Sz1@ znz41H9Of5+oQ$3SV@P%RIFDQQKUwI~jkg8M|1O&=l_>vT0(P{I`b_vtURNv6#(Xz_ z`C3>Ajz_6;b?zt~psKoih@Hfrx zrlOL8gfNs`It#&pI}N!8(mX$82y=W7jCjZDg{GV`f zZL#8CF$CV+i=nH80Px&??4E=8X%BZr`vK3k~XlDasS;;*y(a(60zI`xu#_dnmW z-~8B_Xs1*xLa#nxT8%?WuRYJ^{O?dIrI?s*8|{Xqv?Dz|`InjO`|~ZBulY0GN$cAG z@8kMEEViQk&i3B}M2!~uxJGR1{5k(j=Tj-kUx!rau<-hs;)B>JhSl@`8Z@2g z^*y2F) zWS7t0pu0>vkP4J`Q)toCtiC8Kg{5vBG*n(x0~#o@ zkDeax%rDVg?M!Uk)Jj_-n+Vxg9wB#dXkzmP9R6AT2PnIkLXK-ub|Uz8uTpO%;&%+` z->3gA0R5`I^#rNT{~|3zOPaD4;dt%Lly;qLAO`n^6~9n2f` zt|Vwen$e}$AA1Xj2LTa7=7B)O&vK-yx#>nPVmGE}smCh=ZjXNV!MOiZVl&^~ZI#F7 z4rz!5dJ5PES(=6*O@Lq4Mam4x@W{9K_IJCnry#tE!aIS* ztR+CB$F^=84M4t$N67c^_5FWtz~k=<15coEc(Paxv&~4<+IKDqcjP_=_PEqHInV=q z0wNwB;ZLz=v3Cu9I|pGKeTYB=eCO#Aba&o&krL@ud$#dM?txCpz~Z@n*k#&jX%V;z(8P&MRXa-+oTx-zqb z{I6iYw(p`t29S5s;gg#JZQE?$JM!Csti~S_d1*5zO(j^FcKDO8wQ%V}%W31g;lajm z@yyw_jiG^-waUzQ?gX*|E;FE1I zfVJVGCf%T}w55{&$oldorMfhe4DU|+#e9$))p~@myIsf2=XM{wMA-|M925z;2dT2U zGReg6)kOXY{6wLtDu{68sr*Pe7p*-h(OT+DxIV)U~z;3X(`JXQ$3q7Z))sP06y}iKTSGOy$$U9-V zX`2F@7?`5#gDw}aagSQe2C|Y!8`cT^kSf^aq#Bc*pH=`=_zceWL@%2CmmJynx6NtA zGY{D3)3Czw=C(H*Tr*WlSpVo!vyy=C~6U|5B*5_0KgV zF6jTCxcHajh}n^7L0??nV;!M+Stk5{K>Yv6&VNw&FMV&30xSVhFG_m0W3ez#3z0#- z1=SU87j}(%rcr*w&;=v2KKB0yi;=ujNwN}1s}yQeEVZo`dr^5n51 z@EbR%)J;HZuIIXw-ETA^@b~(2fZ0`p8shbM!`^|cd^C)C^sAl+ch#-^!ZGTV>)u8& z+*-%cue*EBkFZAi`@kmj%SQfY&R(){3AcwG#I%v3ie|Dcm54k&j zc&rWx^a~{_X0WR>X`o&7`E4x=l+k&d71x@3>O(w0D1+fWqD z4&{#?iti9#z~rCxt!umr0m@=~l@yQp(YmRC@!^Z3-MgKO-G^5JOG#ww{C!fgy@L;c ze85?TGx=B!Q!fE@DF7h7&~xnrzvZ!bXcJhqln0vm_rUjQ-W1V;#vv(C(&U(iuJ9m;bD z*h%KP6fivdk?{cJBX15b7_H7^zPT`d=2}{=`Des2rxxG&5x2K;vZH7R;tRcEH3i>PQW9I2KKlFc`Gx_OzVM%O4i?JlY+D8 zZzFk|FtBEFPhnfVUEU%*g+Ge|*iGX(ta8pE=xq-553WMJf{?04U1Qz9LE%dgq={tR z%%i|(GT+rsFgDz6)ppoY(40r?h$_Mj8UgoGe&zigQnnpl=%z?zxpW82CQPkI?WrLD zp8Gd??V9#{2KTbsh}_ZO2Qe|3s&dn_T$0_rP*KKy$88}gOReAb09tv;>oD8jcj zyI?_Lm+U3W&QI$J8%ey%x-S+@8xDb5e0ICp)69!C9%hE1n!&&tmj1r>>KDgfX?n)u z&qTe?hpaETss`?^hztCF=9mfFH!_29XEc$l2~3pi+xZ7_t3QFic-DQ0e$3E>5t)ui z>;=F|D(w(=Cr=aEdS0ldfE3n+?<$F8d?aDh@R?Tr8Bw1H#n(o)1#7&VK`8&O$*U z?*@sks0-0oB^&CfXi3idDr-14&mUmVT@Shr_wei(PKDL`kuadxabi(_E{^-HHL-4u zgG9Ks5U{p}(XThER7KdEy%P5vi43ht`IyI>4WX{AF4)K;3HC@8l{E079#Xuf`eGdG zoQ(X#{n{X&AnJ_qGz6BC%U(7LjRQ?a_-8sN%V64j8@WtAGmw?hUX_Im>iXHwk59v*-zq#b^MHwPeaVH1$ zSUE_fTC=aHQwH*9`sSF7Q;2RFUy1B;qX3mM;s_n;es#&N$m|2`CAMD!fZUT| zAb%zJeQY<)(vJ!L3EOP7HvYI|dpcgVGJIBhYjiS7RNhn;VHa{Zj5 zY+Jr362JP+p+vK!odh(cJT^{bZthuU~S zH-hNLtzlZl46h0F^s?>%8sIynWNXUpx5Y@R_p^?jGwINB8RZZf`i|=jKH^Cuhk;GMau3aGgMr9_uBmdG9G-GxY4u`lGm#`ls z(+@jCo*rRIQ-R|ir9Z!DE)^VDQ1qkOylhk}zJ~u%H5&}yT)3;gfuC6M?~gP1Ru?ia z6nv}xQ{J8I`dNtohm2|BVSXyo1QByQukMC*3porj?qnl2teus5bPol3nP@ab} z01EFEL;a_C-#_TSwwl)X{3}X#VRB7LCp2vhhb!-a6iMX1JKD->4i2&z`nd2|3R&$M zZqMID8dJ%yXzdVOXTjrwLtohW0sCEE6<-`b&0Nt&%2hyv#%P}au)ze<;wRj8pu^TF zmK_J8hUV4TY z0K-%g#+5(iCrM1vYkBUs-A)KoSRiYG_P*h3-ZC9bp(CG7G)<9n{^-7t`7=8l-^oi*(qlBmq@_m+4a5+I3C_joqNI7T$x? z=^g#v8__I$G#)AWk9>%i4c^=JMczcW+=8MlysPSIh`~nU(0C4R$zbztr_xxvo}z5K zQO~-=u^K3dlII<+2+Lc&A>piT8jz3sG$cZB`Cgt9r^T!kEt|#k27HRLOuZ9$=+2*@ z6fCYbCAsQEvD;>O_3f@Ow@9u)W%r@t04x@_mN)9rLsbHG&k&Gt&t5tV?xiX_Zg;9UO|Ih)K$ zvQ_cK0WsuPja66P49I_;I8v1($mRyPb7=lkL?-Yrmvo5Fg|4Pn)0oU_2@6k^7y%B2wXwd(a}7}5-qj@tv;1P3h z8AECf2&S4!ux++1-|bEj=XyD%66gO|Ft1obx1{IHGLZ-gUP8ghTbE_cmsb*+C*Pwf zk(enq;tL9Q)3wr0k6^MYRRfG=j4ms58;1`;leHln(V8KjU8HU&hv%rRpmhdI9-JCE9Lp`UZ?0N*P*0L~0HtRXMp0ZU;A=FpU%hZEfq!P=~ ztygb#_1M{H=@B&MoGREb0nDIEmTWmAr6-KhH)#Kub{Q=n#M?_KzQ;&0HOQJ?N@7_SeCIs?vZAPtAW3~J@~CBQFI zy2zyD1HP)8+hRZ_{4_wSu=qW)IYr{|V9&|U?7|9Pk7sNTWBR~rfX5NT)Aipc^`UAX zcqgZZfTqk(2-qj(hS+F^+2K#%L5xuXnMS~p2QoI;%Dr$O<`N$?)>Mt=i-Na40r9RU zp#-4hup@|im^Yx`3-J(QqhcEIHbhJ^!$j-{^{vgF3D!@CJ{(W{Q|tLlWU;Iyse5vxHOQJ>wlIJ18=`8A!58p;7wMcLAIcR3n%Fe6 zKMkd(PnQQ`Z3qA_lFE0N%a1MgSxe&bv(j2iurtVCeB%u9&7^);A$K|&6hUpX|Y(jXlu2znV3(V&Gty_x1IbY4_{S zf?e#v<*eBxvhP4ol-nNoRkQP0+{??P7fKEX*y>?~2k9H;x@{teSwlbhkXrE5o$4!_ z03zDv@O;>W-~9M7r6)mE5_NLhb&B6&))z^9&bArv%>}Q?DG_Clg9K)w5QMYQGZ~|7 z@4PyH-h^2YB9iB9;9fGKcLfJf%7d-Jo~xg%(PwXF{86-NIfDzV@O4ZkrOe&HBZ zO7-tfCR3Mb9S(l$$2>Xk(pkq_AOH#FaH>wzw@Mei6 z*QR=c{-fk5Eib{p*I($|m$85J!IO-uQE^za-y>~cc=W=KS6Fz{L&x7Eq)%nkLTmea z=aj2l|JzOH!I$V&=jN=?Sfe&bH(k}`ib&&3`jh>U^g}yoBvDlZuD2da z;D}R;F*@$~roSUddvPMt>|w9!m8jo@kdiXDZkp3{;`Fs#(i)JyL#SvLe+@5kRyP{A zySP*pSUzZ7hNcC{B2+UKcV5_B<-g%bHfrK)6^^%y8L%QmGY7_08FecoJUhNrz3K6Xe|L$?JEFeOsjANRJ{D= z76!-J2~~5`QI!L4{5_f^X;!yQaK>CG?r=WG6Cz@I+b|7~dgfK-RFrFjfI7$`oWF?Z zKe#fvfQuENphAUhL>Q!ldu zO@k8Lobdlm@?IRKdakxhQb@n!+E%Y$?Y&CTDi0jmi{79?$>&9VE0)2xgAYXq-7UCe z7>efC>i&5N)eL;oH`(AW0-mW#*pI)z3WXmQp2M=7{4M+$3XolDmJ9wCCcDWv&!4c~ zFC!9SBlf$Ped=>ZB+7dQm7TjO62R9pzwW!U>=@i~Fn-q@F5Z=_Jo;Dl|zesK_ zj^vtfR(ros)O7we=~w!zvKqPSOp_BjXX5^g6bWCs#3!h4NeH7b2-{)G1Bob|Xdr`H4GKXiG>|4wG!b^>9L|rq9VPhu zlP^zI6CEvpOW=TTpTX@v8Oj==sq7s_XTAhiY!;_=hqM{^Awqxw(;e< z#W6Xu2RUG7wzq_6q`e&&w4Dh#(2c)PViWjM!naJVGT3XdmgR&PB8B|VA%~OfBF!YC z^QpKrs{#5!I=R0D3Uy9GKf;X;uJ2<`xww=)XKwaI_U2L|ZMfPvgR;{`i2#PpVpiZU zx}ng-3tb^VdNadq&MJ=v2Z+f`VFLy`nwKjEVu{#s2)wWF3u`m@Ue?rS#?H0mwTX=A zz4Zj~MhBeW^<^a9)9YIs(K*BR8JkuplsXt}g?yB{0O^Fvg!I9`rsSfwV$$P!oJ(_~ zlGQsyVu}BXfj8W)M3zv}0Ysnb8^Yn;oMUnh+>&C`*Vc7zVLivv(d}$ z;3i>gtQH9{Kq1Yox(lp3aQu*Tbpyu8Wqu(SB-}j>8O_aWxbldEB+fj7a(!^s!{f`D z<@a!ONLjk`iV!Cc)GX0ZSl`i&*GwNz8A;GgXA~k74qltRgAWR~M;X2}`<*j-pzW(N z7Ke&yiMjetW(GDk-JJCpQ=60B+n7`RZcYhzsyef8saVro@W|Ku`J@7Kh9PLh{-L_4IobZGTmWbTt5flJ3#Fx8F1 z#194ya^8Sa;=zex*0|>*5g)(&J$V#lCD4<;iTl{FBt=1^in8*y0PGR2Kr^(C>_)gT zCJ!7_#-ad;UnSD2oxZA_tifIMG*>3T+khapse#9r-+|j+fZqEBo8Zt}x88@#X5h?{ z+mj5c@LD)rW1pM#Z3--aBXkSjCJN%2-vBsNI} z?ZxWTr0^umc(#4>E9;M~h~aA`c-TxT`;SHR)ZI}iXAI;uNV1l%74%ZBeSJxz@|10Fk)DA6A^W0FcCNqZ{vcrL#Ae?h}6QfG)f|@TRq)qlLAvw5+9V#oUQoV+uDpjBTyR7*pd(1+}s1CNF&qPD&nc^fw zEr8qa&jDbmSnvI|8VD-m`iI@o_qlMRu8leO+CjsOX_~gQRDsRv_j8*D%ROA>(UpV) z<{JRmi;1)Es2lMJMX%mZNqsyZj&BY3y1T*tHDl)I-D3R!yWBqzypQUKpj28c-=S+Z ze+wX#1#TTNPehX;?BOotx=L@iZO$mH?*2TW3m@qIhSD(O`MaCocoVG< z+ARAr<$4nKv-KYk+YaRK3IlgkKQgd}<<4x}6XI2V4W~F%dbk{~LAsm-)< zdSPa6c~QT2waZh1m^y!PK^sr_@-bA^VIntRBHk;VD+UHX_Bd4cE|E_{L@&fT%tW`X z_NVa_Cuh3Hq}~YbCWb)|;4dKEL0dAdPZ}Kz+rqi&`xt=dWYG{%}_9fUcm= zP;<4Cemx*ZxCH$!5yn#GT^sB1ZZmS?QxtSQ@5SqdRKdO0hA15G{aCQzQ)FQE6ZYs- zv2W*I=j8eFh3XXcLkgA#cAb!Lfb^;y&!PV#vzg~*++Lm|(K)c=C5=C=OsM|aj2E-o z$K&+Bf18SDYFtla-E4Oc)#7Tm7eCIx6eRoCguN3doO7B|-u9HdmK`gn{UCJULe777 zcDLcimPhLXYkaPH_8QW*)1m9R-k5Eqwc%qMPE#Bpm$%#Q_u&yg*x-llYE)l-qj&oD&K% zy~_`JQ3T1ZTYMOYp4?2!hOl_mmHJMlr(#VzlRxT{)Aa8^4O1WE@hv@by9d8th4kLW zG6ngc4!jS6kAzF!NafX^&Y&w#L^ly*1+w=I6)2-_5#;-W2v%SJtrMdAW17;cR`QOY zV%QuyFYZtIZ0)o#OPt$9=lzV0_09Pe)PaM1T38BTRkKwIyFjIE?}(94^b_z&b)vsz zoj4Lf$L5)3$~U12ss|%j*pnrLS-egcCCx>>sCz`Gy(oH1<2oZfaQCSzxHYgS%svz!1whV zC!}VWK}j~`=sz$a-1V#eIAb9A9&;FM4)CDPjF?0`^vQ+)7JM28ZCud9DLE1zawn~f zmufup$?ME8V_-ja)WdhxnSiH#_YR;ws03nU$Fldvl6eCnx4}N;ak^<6ehOUnVZ08RQ_9-w^7>(R7+Xj_6>?je0xCcOWnGq1s)34Db;FE-G z_)gygJu~3Z8BrTIxq^O64!8u$!Ka9yW_PfG7ud|ezYU1{9p@q1$C>ko`qA%CU%WzK zpAomZa_|dgC_AY5G1&%jU)>8?k`3Q$fQ|B2OQvEQ?=zloXnqYv3;< z!P5C|^&JA%^l=-%PFw-H9Ka6cFCq+ z><)n;4Y1wXf#_$dqV&z9OTja2ZU6wEon^`tz=W>>9{Eg{SI%cRf# z-6E*Vx=lR2i~mypI9A>8`nTwQzZBN=%`jJgc$;!93BHi9(`oDxc#gHTh<&8>SKgFu zEue}9LeC&|)bhyz%QHxE3KwUQ|2ii&%Nupi@K$2itNu&!+~>lU*7|o54`=YS(YuCy z!O%UHhH-G(PsS>Cw({C3tEB9wZ07^@`?W4Tn^1TJh{`B`y>$ROe*ns!DGoe`nxI=LUO|8upL9c#|NFLNb>SvKgvE5zpg8N~(b5Cyb{Khoj8ds!sAf%OWfoB)1nb z`gu!URM;p|_SWwblNqZjMZh{vz%67~-T&k0vfCR2$J>X&3F85a_D_;#G+9mYC)rHD z?b`fO@FFSs*~#D37N5^2@+*x*a=r8meI@U7Yz7(=ceG(!FcFTPXLCGFN`DrAi;mOM z3k)u1_^Utmds0M~lmBqG`_{F#8F|AFwrM647%9&lecC>DRnca_nTX<+!OJu@w@0T& z1Yjzm1&FiSQu-p&E3aP3n$2qPc@_aJ3x^%Dtc-EKhuFys2t{_%c*LavO358$xx$UC z`o8{20L&vfupk3TJ2Yt4H;m3=UYK z613PYv9-8kXEjP?MV|P=l(&}MB+He+=QHgn8za5L&xp0QoDIsc-s%XeB^=8M)y&SF zR|+~bYjIyB>K%#;mdXQhz+|l}3pPG@mA>z7FI4svK~~|lQ*bJmaKL#+ zV0D`Jggr6!HbSbJ@BR12{hYF?n`hyoGh}jKITyr7Cx}HXyxwvJ6&AQ7TzQ;N`d_*l^76oZKuEMT%E3ic6#+b7*wjOzSiq;f(m4OI|Zh z*^YJ;bXB`?s6S_CI~UPn{Ksll$0*Sk`k^?_eNH`o#BhN>I$#l+b2ZFV2^T*~c-IqR zSTj3{I=Tp5y7G45X`Y6%peRu|P};#o0H@SEhEDq{7T-pa8mT-vgk8-B`!nY-b!LO# z4m1RlcpfoqWl%fJ02D_$50M~MhIl?K03WcK5qr#tohAxqIr%%0@R1#6c$QbPAPk;2 z8}d58+)c@;c1m}Yz8!Fl4cxqR074sNd#^9EnKrKR!{Iw!@bUA|P>2Ljq?*TSXqYX` z0$B;lq*(`G0zULT{usRjZk#80J%e5BtoA;wGH=|4ZDgy0Ad@J5?_58;&2QJaSOX(; z;S#{B%cs7R06_TZX!z-+E@I_8IP|;^a=t&2V3Ri2piJJ8V`%=)y7wZwI23jdLL`q8 zKmBG7hUEev%Ah`2IpE>m5_sY@?1c@Z=o7kp1vo#D?}cW=(nkTeAXqub>xr^&`;yQG zadVlET8#PRqCTTx&<8F#N|v)lO{nDVvAYeKO8?+nG=r=pvpX>2?Zg;#kGs5wT@K$T z$8B6UAlyKG7u4{99p?M!zKhE)#317E64G~+pZ;`t|G60cdGs6#4hQcL!+!v-?@s{m zB_Ni8wROAwP43GY$ z0g|IOUzx$Hv6hSG`Ej#C!Y=UC4YX4|mj5DD-EatIE>fqGHz6Yb++{z|jxne}%2$ZDT2*>0*UvQMr%-&CV0OxGgz2esHgCCgRkWp|T0~hUIRTUj$*~_1 z1yL3oi&3Ia`)XCJ21P z)r-=>X|9+1yu`Ol3P`%ML|-QRSx<{k=`u4f>SjQOk#TR@wzls$TG1KjYns;;mS!}0 z&AYq31(Uf`J;x+@@p53av578Et6tc~8$wQ8H3y4#w+#5Rop;6cr=s661Xs1~Q`SLA zhe{K+?nynB(IVQ0zYoB^LL<^N9cV`i(=Mm@+In=~wa12jFiOMuqW2r5ySp)4Sub3W z+_ipkU^wb8*)X;EXBBRd{K&%Fk9v2<2T5eJQ1qk3BnC6!>@A~zrpDX%))toO*SkfO z_guETE&bXvQulBOFEy2qT)7DI8xP}=3C)$Wft>iaV_%fOC*tVk!u5S@vyU@IA8k)F zzVb;tKw&}r2e7-MQkEApH=wEVG2}kQM6crW!Z{>+Q|QG@DkCF-7X>M&l@Pp&>L@k& zbRs$M;wRo}T86y7u?PA22Vcq&G)m$x_>X2(*bL=~qQKqc8-6#Oy4Wa0EUKlgw)~jX z^JQN~ycY+SjiKmndMP1ep%(a^SAb1?+GKTB2Q}5}8xu#>=yJ<;m6>7BbYr9tF^ zLycjJD(-xU(@}Mh;INlr=uu+RavFkqn0meMU?~i|(}l=A4-XX&|7!miD2G$X981_S z8)&a|Lyf8(*P@8&WazHJi> z>%Jcu@`CasHzoAFTlDzX&WN?*5Ln8_r3@1g4t|1;N`&r6$iw5$?|*@Shxx$E9m~GG zY(Vc`HgIxB7iJ3Fz6=4LpNGMo`ZlR$dmp>{WWp1-WpaPJs%z^0ZDxv^AIIOH6)(Nw>MP!L1OD{|HnL$N{vQ(r~Fs6UG0M-&An zzA6TNjVQfV?5tvffpU|6CHL#RdBuw8^^DY_R79uKdnaeG)LjM_Yj?x>UGsReC8g&3}#TkkpxaF>Z+G)Ikl9oCI z=BH*DFIw4q*zfTKd9fI*BV?>k(5x76@8%eM80Ur zdZpAOf^Vc!bC|7o^0c9Oaj0xVKpZbY{HP)4b9y?4_Pi!{A**R+ox*ptSJsn~pG{TB zNE+QO&H>snpUg9Pgi5j&|I7hpoIbw7FqBh49 zRWGRqEbb!r@=`x7?b(6tW)~L^&Nz4NvoD8+N<%_khYP%0nEm_&zkBr*KosXEG*^Lpw zWcwi5YMEyT>G!zPfEe>kO;H&-qOUX-Tn+;@m{yKM7k=debcLV#F)c;y-(ou`Ze)^m7%{L& zTS!JO=FW1L;(p-nvcs$^9MTgL)@o$6w5|Dl5AMesy);FkVAWJWtv`Ef)OsRpK*w38 zQYhRC`Y`0Kj{DjOrIRm_Aqv+eGE#p;yfN6l#p64B<__qyKY^l}lxG>*Pkx=vllgON zwDEP5D;|^qt_0U|DT-gIAegX5McHn$l^Ksb8AU(Eb)SHozO70=J{5LrOUehIR2txI z2$Q|8oKKR0Bl;Q2NE#Dbv0KIMpprjM%}foIEKionH%=u2?rXN@RwlpHFN#JMovdsW zy78#fjS9-+=D(UWY0_ycp5ZBZ3aVh}m8>M;p)^&!Ud7OVr6Z>n&0o_W{EmPkJES4Q33|xlKr&=7L&p4Eqg!D$7 zsz^EpSY&5De~eMUjd+dlzi$YA8z3czwbk*ZEv?>+O!?*4k1b*X+EiM0g?Wg#iQe(- zE;(k$`glLy6ylL3`Ejq#mPaT72=ADAdUjPakG&4`85@-p==GZ9zoQxykYe)iHdB0> z$z%WOz&nAzmZ?}s_Iey6y>ibCCz9opBFW-U_d<5Mo&sB0+8=k53EYmxNlsW*A0oTL{jV0Z}4oncILf%w|4mKd@Xt zwF5{-K<<9?!_kM6`dDcvJ-=t!XwZj{qDSd5w&WEvjJ3d>WY@!4^7)G`-Rk6~q1jS^ zjFHR4Wm^6K-He<$22ey-7Fyx=+cCRp$tFZ@T5PGnwXSthlivMe>$-?_vLdn>oTZk` zkPCl02Uc5pcJTkXYXDX@k&jkG_X`EX&P0lJLe5&L35M&bKY6>vDdV7g<)(Rss;c14 zzJsI3qqz4ar5ol}SzR4r5|L`ML!A0K8PWE|0$^D(XP8X30EP%6J6eLMNx<@VRx3Ad zHZNDb`s8ju=h(ze`e){FUm2;vpKMuV70m|KP$KH$q6qs{T7FWN7!TP8p8@*a!+ut7 zOntg26AHUyK2$xt4B;ZrdwJv_r1a!kQmILxD^d4IEW(6$ZQY!u|3K0$3}+by0#y7` zr0+krrt}%SF=CnX)^x8T-VwC0Rc`C&#*lq8*&ikHcELic^!Z1rqPNjz3hK{|=r}vg zG+*Buau_JvPBH8lbSGp;f2#@~n1qlO<)a?P@k#|vXViT)(!*kWKALPu8i^nTvRUN* zC~D|9h2}Ck(vUTnp@eAmQ^u;wMiG#Wg5}zdT3-)Avoh(^6e;lNw_jHnB#JCJ+-Q5QQVILB6;wpd1_NUR}?S| znTA|$kFv>LcCYx=V+5uUGZbQdGr0di_^aQ5ZTzJZ-3eLX+=g_$vD&Gwo%^kN)_&F- z@u(giN-swu=TjM-U*~i!{W|gQh%Ef7OF zKc<`B`RDV7My;svyk`hi8KIWrM8)q49Yv*;&&3a814dS81PTC_Z;Y*Mdx~eu!^R;T z`ElY8&DG&rczkSTE2J*sDEsemIm|u@Y+5;i76HZ_}K8RRx>R74$9w7bmS>Dwpy#u?{PHGzdj_t7Omqf8c)KFR7(35MyZ z@gn9+oNL?bHjudUmzXaLO=PF3@H?A3g-?`XH^c91zY`eq6Bx;^B}TcUzsZCXqt(5w z6cS(kh|kB?;u(}3z(XXy8@`l1QS~g7Y>`FA2%XX$rOY4p`^EF#wh_YRFDJy=#k}h? zD|sFgWyyLPA7jHM3r)4-M!hx2faZ^UK9Q8;`sY^{yZptgRe z_;9loi(JlfC8iVo5$Y*)(SkSMH6Bl!bv@9N+}DBH!EOdCZlIodxqyvADhro)bYj6q z7Cx687g5`*N6IG+#9~{!2b_Vbe2$ATl3#2UcX#>{6>SaQbX1h)D0*-7Ac^gZqXY>K z&?vT5R?+E{m`Oyu2i9ZD?GH}VM&_GU)Z$~3Xv&t7yQO_^l7*Ys)!M~c3In!2W z@>kiO${5bJ68_t}>UmTiI42a#zo8PGzT-8uEOf+qD6{pk^eET42=l4zi0ctSA)+kL zf-$`_xc02s3K4xljo!>};3B8|bKS{PBXjRwr0m5lPQt@S!JT*!slB(Ob}4MH)um|# z{RBGhzlny=w_iVn^%aY6Y#Wv&Ot9c7ZNKAv$P?G2H{4fm{+n%J6vV~on9Fb&i5lLH zO~H60qCPpxfK}R%!*g@3e(L_3?@iIvxkJ>Nf1FB@C7Dr>*yQ04v+Z4iA47Op{Edb` zzq{WHb|E7E(9KUoo|;AA7T(!=uOUI6)Z*L1YxRrdSUYcUeds_up=c-bnKqLfYyFD6 zKxhOHzAu2?$v8-bC$itjTKB2K&)RX}J%$Az%2y(zZ#;$hf^2LJ6L%1xi9V&;Iv;(E zBd@esvQT+K5I>gWkQVsXT8tpe{|~2PxJ?YY%yh{1lB>xux;6NY0!OURw6wXozWa?T5ps<%* z0%_^bax5T0j&>CQ+VfdI3Pws`B+ja3fNDK|Ga@+JMN%dyI`u1FxU(G6n)LirqC^A= z5|u1B{@BoFf-@?M42M+KZ(>wv$OE3x>J63}q{2l2GmXCt9=1@Sy)GXW36()0U*GDB zX~j+|V{*AB!zC4zQo;`6j0GO!zfuRDT-A7>A#IawfGeaPBAos4evzZ!HTF4PPQ^LDR;mh41ev9C2 zm&cKTBO)8g7L(hM(S#{Z^*h)qQZnNh(DT3lESNl^ku07fQTVbpm*Li_(uh_^RSt5G zt@CCU=?@_*zOV_psL}_Rk{UBws?^4xveI{^VxQwH{o4ODu5bvWsH2c#HyGu7AOe&~ za&3F}0G1)B<$w^D*|j7E=}bTC?f$agUlGX6&qu03gs32qMm#b6(<6oYGp(_pY>t1L zsc0iow$lG>XIycq$VN?1w@Rai*L{@7#igM6XF3BHl>j~bE-fI zxmKfo@@GCN8EqzY+{SA=BfZtW33j ziVS1I{8!R;p8cW}ts)nJ+9puk5oGBhgZuf%B!jNF#0J~tj3#18YZWUUBn(ih2sdAv z%*a~$-}EcB3Mu;821P#qTbv>-iNlsW@Y*zqrrmVVk1mV0CsE}+2 z8wO&3Xx*y7DLVBlUUG zA?@N-q0A-(0ITPpe||dhh$GU9nECMD6<17u`O}}%YOAb@WuFdD^AEyT>PXGwUr$w2 z(p`7lm45b%U!)PEMuKPM^q>Dcp5~utp7gCf_QVVfyr2ll)#l1uxK1342k?_7j8Ai8 zcEHR72ITPrCOShpW~dka<>-5B@>^*fHuvVEfZ67pBMksQ)4liHgC&?wzzd3hrFF4^ z@^P3=;kro>8d{x_#*G=9CSpmY*=L_4&5EU#L?>0}^H0&A(v3G`1_fjeAO1|*bmL9a z!3Q5QP4^$CpMFNVXxK&Rm6u;lyM1G~wDne7r3JBUkteecCA}Rzc67S_+UwIH2OpZ| zntRT)`KPzQL;CL(M}*F7Ww6Tez!w~EO@?3Gw0yv8RO7D z^PqnQ-dNUAu965{jk zQ*ZypEYMLFN5OR5e~1~`786GU3Mf|#pknbxW|97&0FId;22R+7LO}-qv`vPf16BKZ z)}xtJWfVGlrQd%4gI*Dib{6d_Y5|#wg%UcwL%c_oBFifcaz_0+UW|-%_WLV5%*;Un z=*ZV9a5`9&6gC9UrR6L_N?lF;kw=}NNG2)5_*W=%2vaOCrg77wZC0tj{Qd^}P_@-H zNM-RYl5(xEKpK+8zYr?iY#^Ko#%eX{2R!iG@h=ovUR$i5@3KhY_v`OAG4N2#Kmlj+ z32;)Yh#izVefJ+GrU+J9u`M*NM{OWphL0di4;j3S)*??`${HoY^2%6aCBsV_7Ebs5M-irOP(M}Fh$LrsQA)o06I|5zadKjmAm?Nt>A^ z5NBJR)GL`L*w93o6fYa11)iClS1J}DH0;QNDNf@8L#4}(CWEz2ys>9BCA3Bef>5=( zgS?UjHQNM7nANS!Y)!I?$^FVqby1OuIHzx;+?J@nQ(hpDSXsMCoEhaX-$*cY6D;=U8^=} zqDH|50*hHC-|)Gti^_aslhWu=mLD>u$f!&nTC6g~cnLbtD23;a@k&P71sLGo-5j_E#i9Dgw_~xX-#smZpp?|w!u&e4osmcto>>? zh;q~2b!W|(+D*F}=+sAGLwtAJh^#DpZbuUp5Ps?3VQWvj1Q9ZkZ%R!%|Mn-INMBgu)=A>k9lc&%ddZhmzj-W z2M~t89KQ%|)k$Z`=qr6DNXv{;4x?wQ%LaiXi*$rB)nYq*0ttS6^kD~y$pb-@WxN7I z7Ug%@5$VZeDT{5%%=p4)ghW7D$GM`H4KBkgro5pf;&a_WQn8~Ejc{PpG(mP(9Ef&? zhJF)oBC?tiS~4;L(n~h57oLAX`pprCr`1T9f?7QiOX|G480S&Hb}oI&xS^;9<{WS)_uMx;Cbac8>v>Z{Wu zk3O14j~*@0%4VKrmbCcdi^;5u&u_hTT428UaK6&lV5ZJwJo?C^>GCTsPq*EAdwT7S zS8=^FlRV)6;CdfSpZwIPF>G+#c<4^*mzSc@f2oI*l<@`w_N=v z5XZ-{#~zpd_{Tq{5hLD6`|p2%%vPCo;6M}KD%o{!yzv^Ae0nSm9y~ZLJa}O&3pJO} z@p${&4S$n9<3`haU^dHOY_5F;;&biw*QV!&KbIy*v+!HDThX;aK#+3e$+rCV;j zCH>(Ke@F`7%%YVJ``Uvc^ zG8>j8TQPkc<9wZU)=6{EHIJI)9(7b^^!-PrqV5kzSzrlZT{0ZkJhai0V;+@0$Da_9 z7s;8lsqF?ZHyvsfE1Tz^mRKXq2&(G04VkwtD52PH;={SkO%j|D1RfC(vOprxX4^<3 zoOMg)Pqx8O3Jy%6+KN48MZC8gz}$3q-Pvpm5cXhRNdteVX6ha~9h>pSm`fH&GR_R8 zAcZ53AE?^Zfc^dkQmr;+Os!3>Y=cx0Xo+>^b9M<5Df)5$;Y_6Ok%Y#SpyY#(9Dip= zr9S>j2YH|M5HDaCl!-dqro>96HPTv-L)|cgw8op+;8@mUbYpb0)+( z)GMaEht39C1Q@K*`HVl47C`~4X1CU=QxUPP6xao4?PeJm4b{EGQ*phpkb@~3t_|cO zNyUY#A%sk^J`M%v2b@qevU+p~ePhHHhM?4{I=D@1=}1DD=t8*V5m=ppvOYqJ5F<1j zO{sMy+VZe5#85~?r|DK9%}|jo5SuC^_^9~9VVr15q z(i%WoHGh3datCEz>NQ(3s4B|HF;1XE)rbWof&$WxOEL&J|F8xT1Y7Ekfy&dIl&Ce& z2U&oqBVG^sU}xqAL4C2KjKnfgf2NeSW(DFk_bZUGDg?;PzQO>cdZ#4C4reoV0;=Rf z1giX~b_FXSrRp7PGD2^{2V2M0uvB1mD+@%_YX+pbSmA{!aH1+^hJ(Z`P*M_-7J2B1Jmr{@V&@|ZBJl`788&TKVPq|CSukQk-PK$JVj>9QASTig^C@*ZQ# z{C80_p=xBC9PXVS=!zclR;OkF-1|-&YGu-@L2iJu6Ul@aIGtDgC(7bL%7SGJsj{PB zz2+4q_gvtHMCnQlR&>QvRT~>^C|K%|7BUy@$^){a|7}c|G?q*}G-_E!0i?u1sF;-$ zND2@𝔣6V;K{MjU{DtolaCXC~E_=9xqD&iBOZQ_9!c1OyyV)R1JQm)V%OjVM7xM~()nZ98<;XChRvM+XQ98&gfaQAlttFlLZ)3tZtfbUf z4_b1yV5P>4$@{+G{p4;)M|mZTzaql0R@N33DAh_U9Fl}Ny&#qT0xa=#_-_tRtF5s{ z`ogwbr+;E2;y?fS&uQb0H%+HgH50(rP;9I^FTbDA2o862%U4z+0)=f z7EM3>$xqS>?|UC+GYs%Jkl>cvZ%gN#d6qn29*c+R1MuX0&N=3kF~Lox-@nqzX(eo$ zeE)s-rhESR&-8(H)=kF^JzmYowoIHbA)R*G8SwEkEN3-3jUPWg%{t4#G|Q|5(s<0s zmmt1;DI`g+@rRSf2CT;oY&!io9{7M?K?0Yg>VXzuY&Jvl7jihIqWk8w*U#EHk z{KAVaN++FoVj7Ft4u>3cDB^o<`sY9I!3&TPa{il)dn@i6HXGv188KUZZtJwo=eJ4o zEr5sdW-H{p{OYUekJuMu*szP!s5jqC6A-6aW}P(+oNcx=8ZRpN@@97Q&yb~uqAcske)-Gkt2^x^FDH&W?%1@!;N-=;ZPI+H0?+6R<4TO}E^f#$dM0 z1n3(uU?zDnLLcUsbI$ZJEVK3HFK#a{Vi=bgf9ugMn7i*B3K3dkC`8>zr?ZOMO})0~ z02vJ%>jhS7#7pV`6?j1+Od79nPBaUIN(f3!vh+q#yH|rt4V$BMb z7&u%-sdQjFKq6U{fi?H6)1jh21RiTeg*@0;bW^JSa=0iWt43%FJRrTQ73Sof-usV+ zI7+LMx*%ErM+`a`?7BaPL-@_dN|-lXNS8RL%7UQQ=ux^7gQ-bIn{P>r+)(pt%?Jad zNQ$DP1sQ3-^@X+8FUMczdpq|ZMQRq7_1pL>L#q5>8&53BokwgGr1kLZDkHsLqYUA1 z=%@eM1w=5FW8HWOwK35!f+(a+S)6f@;x!klk_8~EMVr;EjMyf-3MsMkF%g-A^fLZ) zG@VW>%$XPkA0ucxnP8h)!Qur4w&OxX+XJ^S8M&3jSnMLKaKsuZm~4ouBZUu!_tKw< zjMhp@v$~apN$Lq8f++_QH6`sszt9Id@EW(Nu`-itQ2}Ekbi6b4S3d_eCn(nObLn2G zj7B3OV%-erzkc>PPlk+wNwU(UwMrW(f{4^sFZu~#vKVWtcwiiagR8Q`Xr+vVw6x=- zt@Ns~=lIJ)O`MekTP$d8(_Ue7;#`r!fmyOh>Ky-((&|4LgOU7j-C|qZzE=(XY{iL^EHC`=^+)AZv*aT$x>M-(WE-8WpuSWf} zK;=WH2iUvsgqJ;Z*{%UTd?9x_|xO^#g0VY)n|Y zE~Cy^H>!^KH^{6BNTEs_7fJtT8}+K3u+tNkRSn7Jy%dZJZT>PHL3i@1W+sf zs;lIMw$xhI0mb8k>k72=7g3$MOsbVF+0I`mx@kJ%@4Q-R#b4~!o{ZAFB)1lp04TX^hKCfuX^eMHu1jq!cl)X@dVC@+A%ek?F8F5alq`oAxSe7lpx;!st53 zUm!6qE2-Rm(j6A)>uRK^d!tYN!lt|?!AkO3EF9*QT6>9;P62g16Oaj|47y``8w^pia3Fi9CKk|K<7Nfj_Q+UI}v2%YH9@&8|c{}ElaSX4b4 z39Me?PY6@#A#E2*;LwQY3B!@nUQlor#O~kxMjHA0$aLUAKS@h3y$m+bK0GaknGLmc z)%fQ>U5*FI$EC?wYU$^P{VF|h@4a}qJq(){&zHW9O|sX+9u%~R)Gs{$e7gAJi{)WD zA#1L&cG_yI&&g~Be*7^Kn?m1=y%v6tjgTi!oRC()%YnnOaW`iR=r!I<=@o3k{p(*J zmTtyZ`2z>ek~aIqC-F7@CbIX%L_8S(_oI)btFO5RdpkTTI$r|)2S0!p6vrL!u^B&p zLi!VC2Mj%FXnJ$RXlw|+EH+%;A-(_o@0S-P-01tp8~&dDdfheYjS(Z$y7;Po=bd*- zYp%H#ywG%$yr9rMC^p?><8;WOhgQ91bQWVP31|J0habkq+CNVZKm1S{ywHN8f6Fbm zgdYnTYo@dw%@U#@NRsm(%JZ;9*KydRV&d3w>HRCOoL+n7)wJP9Kbk)D;SJKln6>cc zo1@dMm|<|u)z_rq&peZcEW2#l5iedoz2z37kH^wT_{v!=C!Ty_8aH|@9?~xZyS|uK zMqFl@b!I%^e|JWx~8Bab|i{s{gD?!P}RxztkW%U}Aki~%xFm^2}MbN6q`tPSpA za`GuB!S+ScV~;&1Gfl3#>gsgE4L8V=U|-m7`}9HV!NIG_B``JzVR@`!mt2yLIqE2k z*`@JP>8t58pZ%=c^eQ$Y|IJ~CVaCm^iBmzo0=-=NYwo${O0T~1GG?D#o^Jg6jd+nV zIc>iA=IM*@d#RRz?}SHTVrf(&LQQC7Dy zke4cuecpcvbJm|j)Fn@099`wwgfL4MU29h*#*6+&#i&J!4*d-ytQjpZSo;?x%4Xdt zS?{9G`iX{#PVJQfQ&7wcj_lnj2!eEN#fSy}fvkSdGpk9kl6tIeWrWpsG$=$t4V&uq z7s#8}nkpgoh-!_J+mI%fZTQml`yWWHYNhZX>ouZ?FR60PzQx`ISv&YCk_*FbxSoikJ_fSVf;Z;q|O$$^#ftC=+an6 zBvQnBk?#Eu^fcXnfZpO@(%NOz8S6$F{crbBm_d=5rHFze96~KJE+6>Zxd!U1)Ke6A z;F&uoDikIr;wmscikD6k8cOy`UuxR8%;wzi!@g`5V)IJ~m@$PzurOvg3c%b4lA8?H zyHcc&b)n4)q%b5KITrcc&Q|%E5_IqJ*kMD~(y*OSCip8=WZtJ#On5%9@=d&uPebN58UWmM0^DxF znluqx(PfO1yD|#EpY;o?xjzh+yG5l>J$HM1*(q?2F(&4j!%T92T#``s$|@@G`Y|VZ zRD<$n02|d9%Mx6WS3t-*Q;tt56wDkXG0y%KO#|+Jig?LHh8}N}1K|uu6zH^s;;77Q zgg@x1US~+FX4>>a@|2* zsVrzUj7&;vKqsmabCj-Mn3YEHM$eKd_M_ndZdC(7_?lThmI+2j)W3h3SKtz@%$$47 zMSGwsK%f9Nyi+K^|@NF)cOKgs{ugD-1PXO?Y{*g$q3L0W85P~G*iGS zAmmvDAzBT9Oy&?&G#2>)%mc&iV=N@u`4y1uSb!hYqVpBa+WMGbY(HPuTI_AHlz4oR6rM)OeY(t_j&(O8$K0{8J+FV1$06OVy8V08uLTq&x{W%iEl9qE=#Qo zlTkxU3!=<)lG4v&LDZA$qMPin(oaK?2Ph*Y8B?>ZP|qEWirJ#bjA20`KCUztLQE9< ziT1<$-D8Y<>Q92UUj)BnOh{;BOczZkwVkKOajx@nygoYpj~~+;guq+w8N+ zjEZNUeKzg(wcXNITW?juLwR7NsQvcc4_~+6l0Lk_htnQ-A#wfRu1gnQcoAMu%#Rlo z-@t|v@9Mb^Xq&jF7flqYj{|U<)ThF=>*IY z7?AeZYp?YAtv;XT#QqZ0$z_MmIODf?$UZ!6x9zrRn{BqigXE>stg{Zp=?(i?j2n|~ z|HmEa+;e}Q?#2ESboqm8a|Xpw7|g(>^y14er*G``O~`mUEw$8=n5l7~%#@gGuDPXm zCSsPt8zWv%S6*>ty5y3J(M;)84y823)sW52JAbA18O~qGRZMC%65{svSn7QBy zj`%SLvEIcXz=xXYg7YtweKy8puLv#`_4TiREp7VoP12%^ES3gh-wDp37%}qoblD}B zrAsfnL}p5CiDjC;{`K9`A`34pX5M$-{doEFE1C7M99}4V|NGxh%i|@{TyxANea+>g z-gx7UblqRCODCUp3TB;*O?!N6kMt>QRz3fG3y8Z9KJ~PK^8R-}pLazC3^#7K^1* zPCgYcC>BnW@bZhxI{ohVze|^3rpTLbj!ECa?2Roq{}g=3*$yupI2&TvMZ?n3$M6Nk zGMG{ERm{-%40>RuGzK$2F8jlu(n+VBfW1y8r62s@2lDb}KJKY8%glHgHYxF?)X_&C zlWx83Cb5;ie{7?V`JN{1huoV|k#oBrOBo}DQxy&ec_j#=WTw7wnF1uD5@qav=M@n_ zj>&Y8S5#ok9VMRmS<1m?ApnGtK*qV8Q!+q^TO4j)sYDtRY^6wt4xX=6k5OAm1E$%3 zG0`QMGD0;)lt2O?R~>>%r~awke@Lt(YNmB6B{ec?8VlDM^-Deft_e9#HulAy^c3_F zuAOBSitU|@UP^5+ryPzyxtp)(8(G)ibW^=07UqS`8avV?f{-dE%6ot&uW{cqHJ;nl!L+WjpV8stT zi3<5tGuKJk_!BL?#-AEuRJ`7jL#3s}1sSFxFdkq^8#t^grFXqaLTLm6)Tp*70PrzN z3257Io6;IbAe3Y49nen|u%TOjFpfl^EC(CgOv0_z9(jubfXaXPX6I0)4SpK4lmYI# zjKBe{N{a#j*UvgQdizf#{)8y#mo`;-g^kYCq5#14gQIu<6Pam62=&vPd@9Z`HLz2) z0F@`;0qE54@W7F)YGd%<#RXc2)s+?n0G;~5W_sKwW0tZH{YF=-mR1V|Ku`VP@Ed7z z8?%(EpWxgBsL6pzP@&b9=O!Wo`lX+YOdaE|$SgG_Zs#ou0Q#k$qRc%J(ntK8YJ<{H zRjO&xZ_HBmN53%?gIW*(`t83lv&3MK^Pj;)g*GaX4;r-fuRP-IO} zVL?i*g?cCSH+06P9l{Dz zaV~b(=Z)+M3P!GXlF&so0G0pfkAB~OfYI(1RAftf)Fky5MbytasKkTp=G3k~Yj2&q zrPly03hIOqlhYOd3of`Y{RT57R$q0swAXj`#=Z+nq>C>dmd-ru?6m#|*8}c1cp$>3 zLPh#xkNroM`FZZS=h6ZD?~gV7Kb|hSh%+QEln42`2Zb*2^y(Y0Z6Kofchm z34A%fL)z-|pU<0O7n$6Y`K6a%Okdq`Cw#I0N?HXEw-5i#uYCjQgAYC^U3cwuvVX#N zzq=onuyK~9rROq=>6?I z_e?u%za5r7nnMF*rl5QU`j0y5=yU^?qx!&lA4t3Gyh~bj)m0#6rgYPdSmKECBxUQjH$csk|Old#0oBBGzN&pziI z_<4RBgBKM0?zbQI($Lu-#ErwukzwfbV~##1Er}OaoI&wf&Y(bSo_prmw9ohUOHV%e zcv=p7SDbXxDOiGR9zV_S_~R8LUr1eb#T9AngmG!Rt+&JMll8H&IF}BKsYF7d&324c z<_qyBr0e+8!AB(iJmC5KZ!j`vf(qnJijJ{1mI#|B{a}>ypDXlA)GBH$D-;<5z;x?J zF#3+aX)UGex5|vxADt;Kx}DXpHf=zPEYlSKUjCbJD!$5FsQ&@~qXF-@evfq2TI$r# z^+M|J{I~FvDE0(e%>q@W5eJ zDf{8S=?JBXa1a1J?>~Uk-?XYjauBCOKjcZQqY)WKd#GiG9DnXyyJD*S1x3q^XhEAF z|P3gjtLoEH?@I0B4?R57*et&qx_d#GUSse=i1AEWk6 z?zqApYqfq8XcTC-dKfq(v(f=XH6I9psNuh}iKqsBkkz3`4Ko2&1|D97=-1R48T@zh z%7vzWRnbFHH&}EKl`n7n3SK9G!>UUDDME#C(8w_G)w%VFjRu(t%Hl?Ub{zjACG*z$vwSa1ft6F! zPyZXg=Ux80{Em1zjyjp_mNoo_0~Jp(l#)9FHS05B1`4>o`rqk`j3Ta_|0}6RYGq+S zEmDF&Xu|R3JZhiQ3%r4Jxed5AbLlc5GS)@IRKC=2(dV$4bsASHV#G^ejYv(CK|>ia`QX%4t8tg*C>X0O0!Sf8eY0742#`}!D6HYo&9%GmEd{Hf5)(5Tvlde zGyWc9N>lB|QGVYtZ=;y1{up=Tb>X56nE}-Qx7GhamdkbjvH_;f%D~g|C-6FXu((B^ zRcK)xR`$>@I*99i|Iv_;1$D#=7+K;8FCqB#{@#0i2g?Mll3!HQ;>@$oO2hE5d_l|z_~stiBzt|l zpg`i%K4+eJCN?L&Cat;Fns}%U9oQ?Pou(x2Meu_kd_UcM3zu;kn9lwE@6%FCES1J! zcECQ^=i(mht+4v)t6~Pmo|y5m1}v%1XWXaZ+H3!seu*W0Mq_CsZhSoSgcIcW7=dM^ zPCW4h?2j-EFR5lu7h$%?Qdnw;`x$652O$+z+B^(j?epcqZr}Ju`uHZB%6aLgn{Jdn zD28JXijQr)ar)UIm_cDb4rHV@Idz%55;|XQ40KOpxuyHCEY@wG_(<#7>uE63y(+8K<3=&c(7)jN5PU0%`Tt*T7yLBT+v*U32A?X~=tqq+KyPgPT=1 z(*!Kv#r+_-4ApG#aq-0#6JNP()HinD4a+}0C`*d*1rujbNJ6a6!3>HE&buIu9y2=a zx8J_9**9_Q|MBC-%hF;;A9HkCYRRS2&RD+cGoSmc?4fbjU3bY#6!!OLu%y)w@iHx* zFRX_+JLIL8UrJLZPr*jp3t|?~0%Zn;5m{-0pnlj^{hqV`{rBG)Ja*0TD*kz9^j2ns zHKk<6q`!?K&-#bpNPvsYm6~u@`U$Zg1dV@;=d|fJ!4@8CTbXUQrj;@Ns8{;? zy#JtIj2vRuA*%o#sRmfn%EF9V_90Rku3dFQFq;cI3IhsFW_wCDrQ$Db=Q#ohf+EP1N&@4S^@Z}ikJs!IR+IiIn% z^Gi0z?G)`()9*}TLr?t~tHG;zXoAGlN(ayq)n8Dk`T19bB6FTLe&dQ7pVbQ)d ztwq1f$?>0wTTm#mqkdzwT4ToZJk>Djm^YZ8I2{(QjC$%I)hP6kAD2T zqCcuH3LA18O;I*&`m3V?{Lz!z<|4e?Cxe>)=6N&PI<4_H*)jfvRWi}0_3uRgQx~;0 z*EJ=yj##!S8b%_d2s>=hX$_ZDhpzwnV-G;6IhEyhsL_~pAR;L0w|)txdNuM0 zumNZ^3rPpU^blqJFLgAVV^{={tchy?>ZbmFP#B*FG(uI$ivHI!OtY3PT>Js^G z#@d(Z*AE5uu)}nrj7d~agMRB5$}qNO>GBPc0ga|W1!h_3gi6m0JN_HT2Rh$WHwRU; z_3g#qqmknkCFdB{pb{60TR;7`n7KHwsGvr(=;=W4mzD>GT=(|S-(+jHeSnxBD4WNw z@RTf5+Studnf3Pp-7YGyrTeMf-)MqSHnM75&{lsMH$Q zmRlZ$RkCSM1Y}_m)JwnhFIh;>RQU#=-JcK_WwOmAunt57S^radgf}JDI1?^vBx~V~ zqytev7zt?f_xkHXeDy9fDDdEa&u{OQ)>w0m^z_qD%M5~lVW}i;;>^vWnW%%CC-46C z-P6O5Jd{3%FV*=aJ|8BZcE%a<@S86vzV*#-rVnj^C7zIOzx^Lr`seg?_nmj84L8~_ z9dN(_Y0#hr1=qa3nv4h4L$RL%_c54+WuHzy^^~;QDyyUyUV1V8=!ZW{4?Xk{W-V-z zzP8J*X$3sAkIDSM8DHoB8ZTA2@5Nf{tes9c;Y8`w2BKi`6|n{U1){q666!%LI#>6_pDR@xTJJTcvfJt&4^UyG-)Ow^_uZ;CxA z4iUKNXQS8n(+HLkHyWEizl>$CUVQOoS>o!2XP<|!FQn(5e<3|P{F(GTURoq9aWoH> zBwGHx%gde{pT)9QvQaj~u%CxvL+YcCK3bNJ+H21}F*{`|EStnx0;(_Rc2lRUGVr~-DdhTa&^_5r4%Yq}1 zI70Rs89sb?Iud(8+=}I$*2TUboN==5y6d*$A^^|I8B1nRd;>Em?!7-Pz9eQ)oUAV> z?EH8RXHa0AzBzg{W>9<&OFV7v2QFvEFa}3q2E{U1Hf%>O@r2FC$AI<B9V??nHLa*-{ks?ymR5pA8e_?u*u=3E|0t>dFj;>`WgZ#2!a zQ;6wuq*fXxI@Zn=1epJ%wKSvq-wrk#vZjlAOh0v-uVzOqTet>5n>A2cHAqB8P=EDX z%$(ONYGtEY^mHI1sF!}DX_g)nLu5ds*$@3XVw=Z*SKm`uOMU~;XlDKRO>7?g4Ou>e zLO5D#qFC0Mf{78WNaPCiyu$!GU0uC4c)eNK)1j;bRO`pEiSxou@QlCb^QB~euk%SO2sQi<`W zO<;+4PcA{ZycrziiDEp~JbmN!H}KWaOxPG~Zdq5fJ-jF%o*7zlVOP)i)A6n`7n~&t zQwPX^io&Rd|E@~8c-7f`W!MrJ6~uZL)B!U6`78`w@WnptGyRqS24vY37~8R)c{)HQ zLs7`5x&J8}Hj4meOzaPpC{4>cKt`u1$HAL}}5RRkq4Sb%6S-znZQM6{@8v zFt%e|Evtq{2K86}+wOmw!&F^+5ukQ@K27m&`k4EJVy$nap6Qz80XMDsyXm8XSQka+ zYHnEJlF+B9zb*fx&RFlz?`S{sz61KHm&YOtWbT_Q2SV$fI}Ler>N=JMRpdl|6;W^| zCadzMR-E%4@ZZh_S!IVw?muuc?O$k`e1dG)&1O?zY{$Ck&vFE1cr87j&iJbalS#|? zfYOc+8;K0}r^@!V8K~Bi~F{U2|1B@`xj` z)Y58rAb%{r?8m+c`2D-*o_o^wzV|&mpdOQc^{Zc{^*;E4G}~;mr_)b6U6$GzwBQ1= zNw&_Qn3DdE4Tw*}SN9J+_+Z);n_3@u;K5ivXZFe@u@f<$IC+xH%sBPr)6%53CSq@l zBO!mi^wg72%F;}aV$)>qpTK2&mK`!A%P8?C;2;0E9S`h}Nss;K-)U_uLv-SaCkx%n z*ni^NnA!38V~@+%@yp?X{{Y+*@nHkMgw{v#>@A!N02?v#jr8j4ugOab&Pw?5m%b$a za|Xo;#}7?U;-UN|T;d5!D%Bkt?TK|Uqq9p0oPa$NM!h*Ijlyh%Q6t`j{@2oQyrj76 z?t9Yhx7~`(sVAlP!scCf`5KnD`l#rn)fZ!y2mPIhC7ynSSudMmxh>AV&{XR`122)z zI`b^qXXK0UhjHY-562#VOu7fNW(Lkads-TMa}1by7P(*K_+>0+#(9hPl2Z_`5wDF% zuOOBa&{x0s#m~}5H~eUN@~NlOahSz%J7T}#M?R8v-i1qItqK!r{bEx9S#=^`dSRBz z{SVxqmcXovllg*Tkwvl(Dg6#FD9*>M4UW0}uw>H~n};Qyu=mKYVb~-aG2osgoEh@j z&wfs3b^PJ7%hEBhYc9-!;h5d>)1U4%$$)APa`@ajM*BSf(o{RB%`3uUoo%EJkO38i zyi0$t`;V4gQ9-P$`o8r0uVCx`gEoQx|JwC$m$Mxq=~w()akkE6X?>4sMc1LM1Jp~u zlcFNOB_4mrP+_gVYBzBmu((oYFY z?2lY`St`8>kI*txoafHt1Rw#{;*V+}Ar}*LiU7$J)TJzO z1O4%sYRtjSO}O~LeZT;$b*hXcQ<-6wNQMA|%Uf*hHE!(KG!Ylre9<%u9{SFPFLh?Y znyAKG_hk?)6geXpHt?d*EUnP}i2r0vS$*i?htkbH>%dbfPxcwHa&pRP~17E!@v&=H(oI;YO{{-cv=f4zbzT_{+nkvI=(ENyUV- z6hDx;j}ZxUIR9m05H!X= z-cI}xby~|L{-dGy{^kSVt(O%_KSTN-TBqmtS1b_2k zIsMcr#;8Tq+Bz+gU6D>vST%^yfqC!?1Dl1{-`h{pX1% zu$0mv>EHi;EPVzKnRmoP=;~TE^zt2dU?XPCD0uka590;JI*@e|MPoDMm$1*lH)Q6+ zz?lZ5_u~P&zM#-+VlfB7qE)=7*OLKixNrMBW+i;`6Q2}0H{JMm>_KrtdI}HaKaLkJ zhsYijjVQ}O4j9gH{P>7cYi|`M0(ac;Bi_sTH9d#t`S;ylg&OvG8k8PUf#PUg9$KP+UY}Hw3pM}k{2c#XaiS^Dq@0{-a z*T2#+*u#ReWq6OZ{Bp}-18KaBk{@m;;P}UF72S}y;GUAx?in@CWLa~qwbQfDJ|mk{ z--4yNKDyyXY1f^1O7CBV%XszY_s}N%f!oyK~Y- z*vy?XDE7;HP}uk%CrdnC2>nOvCfV4-gqvi)Ic8+K{LfdW!>|k${rb@XKT4Z_0-I~Y z5$%h*N3<_O4%MbVK)ovZDM`c#vBbxk@uM>`)!Im3LtL;`dgwP!^PeHoz9wUq>9GF9 z-y}xGkV9~ezhPqO`h^U{qPm~@?T1!W>n~!IE%j;9uf~F{*3S>k3`CiX276zll4*t4 z>Y`b*e(Dz&qJH6{vKB_<7O3e|NYFe@`U5elS*D$TvZth}wMCtwF4Y_(*6G*V`Tdjg zALXciIiXSWO5qgFvw4avbLx*)=fD|g5R$H7JtaHFsinwGu9jW z888^GV>aMu8F175{1@W%kcc|99&^z|z>M-AlKM6NL~+jEN&R;I5moAWRPErV|3a$A z2M1QK`0EUcOsvBlxi|#T7KxU|HlSh&4H4ZS4`{`RldR^`U5lV0prcu_R1Fy#l`9Q& zCLr{k(NA+#x*Gn!pT8r#ig~{A2%lEZu;M^5OqLdLWT9J*PI4)gv z<<;rnLk>-gVwyKMNBhF|UkK7YT18N(MVaCcabS7nl~>X&m`Z%_zwQ<4r7?B)6SzTG zc#(xY{(6oPOG+p*zcBS9>!rVowW1{f0P!MpMxB35X48f){dN)L*CS-=8FEUfkw+hU zOuFRaOJq9qX{Vl!^<-C*FQ=Ni+3rif;E;**T1A&AhgCAGhLd=ug(e_e1hOii8LU5E z($W|g-=SZObpmPinNb9ju(VDvinF|;C2%VG1#Qed%1UO{Xfw3XqCm1@3fZRD|CWN< zhJsd|MQQKzUoD~CDl&nq8dISqTFg*eyh`{x?mv|vxzhgouhuHNiC`%v6Uo%W{uX*V z^izDap^{lOnv~$njBcK^73fv6IRP7eC-O1;(wCRUMUM0#6%yfMem+iqLn_Do~(;ngm7qE|9xqbO*g|s^q;2Lv3at&z|#EZ3yR;K_1km?W@B(R z#SvUWh%+d#sJ`g_pz}eHUfBkE01dR{RkW@c&jJTOG9_$P9+$Y@IV9~`F6WW{M&+(W+@$^$q zrA;^6I305E&q`OgFd?04R&{yRD6EqwPfqt@uZo}l{19>2$FHRO(J#ki z$*Q~Vyc>IREQ$R?zM1BqcYX~DZQ>t7qvjzt5-p9@dzKplI|ilKu!qBMu$0nu*Ik!3 z`0xg4mtA(2y(xm~&Z@aQNjAyG42t{jPm3+N7?yZCIW2-svMq6g_4Bd6$>`B2~jO42}yZ;rrQmM{ZHC4p0J$Ex(>cm~D z4xJ2@?v^!klYKOk)ljyxhasqe&Bp#Q-CkkHw3#&ZNeAEY;Pb-Qrq;g}zpb>NClRgT zLM6zNU-n+*R{SU3`*S6~f$$99xgVx_q3g(jiT$Bt^?e-gm@?Z(@pQN@KB)one)@K7 z244x8R^FWuyp2da-`q1jNIlH|Yij(tnMBh;iMu(AWo7NUkm%7H@uSt}9`ZxlM3{_V zRWR1<=S0t?G#4E5q=?C{tW$~dX<1coFX3n>DNPeq)(KOQ%dU9?-A*f{mX0hZjpzQA z?W}%(KP?BzI{HIxPy$N-jWi9yw%&E+a!OKH$lr3ymAJhIi4Qa%nfMQst(&q9@J@DH zw})5!Ix2OamIS!o@HIMyOm_6d0<<@q&Kk29nXC)?|M^s)z`bBcxHz?Azo&bi$}so_ z%%cE7pL%jWrMs9Sd;$(UNEaU@_Wy>t%69y!)_sZqEaS$W&>_0Uw$s3!aNo?XeP4o} ze=Bzmoh6e}@R#Ap&p!tb%D$5&>EhjKIDt*Ho{azpoaZme%i!5;8|C%qbFy1RJD9jZ zY5mYHL%-pWC*=fmm;)*k2lIb7H9ZftTT!u`lL5(~&aBY@Z8H85FQmh#wkcLRAGZ>u zc~Jf^exjDYq($&?__gafE?eZEY}{;h9u?YCjQ%n!D`rm27V*e?AG0Zm7-5`5ySsQ` zRgjO0AByB&%trq9Y$}2<3I@yp-3MLjnWpWR!$JXUiy51Hvo%1_!LY}b27&rGzP?bt z$92q!bfR@Xf-`Bh=`WM)NhR0j#p~%F{7(!Sthby@_s#E zeI)a#%@P<&0NiIhQIvlCL=~TyeQjwhP8p*v_3O|CGgD@YiObI8(M3b0stZW!X!eXl z`wj>|`e@0WSVGO>b9q57L!=RkP5f{;yp4Lc4qu$hRjIsX(MUW$;Lq6XE0?ZE1F>yZ z>0u&GL=q)f*XV)Vx1DE+oT9SkMY8b`C>Pwf)Zilv#uFObwEBD|8$!vDP8=AIc&Qh8 zvan>L2I#pZ67n#4%N-08vpL+$t~fp_pl8kFeFwW+b(rhNeWecSA5UN4MGr@Q21V-8 z_|6k%wMe7hS1gC^x)-Y4Oo;;=?cV@>(A@iDSj}J7G%>@)gAT_3>zEsw$WXL~jNc(K zq72iZhub()_!|H)gE;f|GwV;^XN|mfv`;~A3gFL2(xvITCi)QOWab{XwzSQej}1-& zew4K_ssA+$9O2<~73o>Ha)HCiNy5m%&>M^)407{S$B827X3tZphIuvsH~U<+&$>24 z--dlX%z^-ULXy4=1;BLe{M627VtEBafYS^1SW86S!fH4aI`+ceA?I1QPF3bL#TSZ< zUDE%bL33MRTS8BT3i@5i_s`DHqU-)IjyAwY1IHU+^66XOF0=Iyzp%zNib^aW5tra7 zN)n@_x~>k$@>eUSwr`k!-)VEgU%Qk3)EndM|M?TE{k3YT4!EgrALJIB-u3ni|Dj}W zgc(OmY~iai#K@iW&BWF1(^g?z-Jhr(9Q$CSfV$hB^nF}L9`kpCh^+v353w^{kScIQ zSLk{5D^8iBO9O_9rdetSgIwfSJR}wr4b|vN(GHM-FVJ%X$D+j}zgp zdibaRIO~+7X#GAKyl{vNiV$GC#*K{yN#h8-0*CGo9iSbPmcih0WW#(D-r45Fc&=U$SL&7ndB~<Z5xSWdur$57= z=#macB<11Jul*(SvUrAa5V$uO7$z3gAowG{QK8>sg@+JC5;{PKh_3smtKomqp=e;4 zAZY0Iy7yK4{i*}t%-z*Hhxm!@s_&Lag)3wQb#Kla^5FdY=|X(&X%j?bb;jPrm;=}7 zo$UfXjGGS9@ep)Q|C@WhqWLbn3%H(~&q1-)l+8jg^cTpds5j=__e zFV9IH3WKiM9cjO>a4i|fWkbW}ka&4q^I?v|ADb>jn01uV;N*U{Sv9#OodD9JsrZML_!Al}t z)Idr+euc`Al!37S&DC3=)pNR9BPU=UYub7WQvz+WS=FBrNVs;_I{Gg7Pgy7xTZX`U zX{%dX_^_w!p;u}(GQA5GzMY7BXpFsE6TlIVOg*mf+<#vG!ob@mW9)?G9V z&9rnvcsii5V=;V;c#Z&c4{K)VS+`^6Yu59>-|#k<{98PS%|$AR31Cx*f0HeQm_I}k z0_99?FJ5axUm4RsE7%;m8Bv9#NBpHP4M(oF?WCqW=MizuvhCT+K~FTZwVfAQU;XA| zmwWc4X~~DyRWFmm@Rj0t3_wL!f}%x?UMd9X_(6CC3mVdUZ707S>Qs=Rj-SDc&sBBp z&)2u`^v%Mc=(@T}KzZzssY`I)cepTvyyF}qlyFsH4oZr}RIc(fiwrSlI4E?dOZC)I`EVyb+kr*HMGzf^Xl>8fOoyLhStqhr z$RpyTfepVJe`D!=eH4KEU6Ehi-UD3_akqam|FV(HRf4`j7U@Xtevz?v-`d8$U(T2> zgbH^USoohz#|wM_8h#AvGKSR?vle>8pFeX!lGHVxbItQtJBsDb*OurkX-|1NzHAc0 zz|B(KkLFp+i+4|RnKS=ZBZj14ORrL%T+C ze2I5t=AEZM^=|04X75%NYvq0h$7aD#bqo4e+7R+L80%rX-@1x=$shd94hL5C$i_#%UVC`;)5h&9kvwR+WKJF#f-GHy28pq*D zm;|(3ZqU%sJ(qVEW6y_NWfx>LlXn1jw8o1 zg2d)})z$ao_=E%#%Zb26#dp4Nz4E-76gFo(eKohl*JM_jt`k~kIoAU0)`8fYIa2CM z&1VL36`4*O*%Kn`&i^W_cp3e>klVt|gRMAnrDf{ZCzryTSyLh~kN8AjIl!^g_>te{ zE1tnOSy6?v$35$b@Q&l3xO4ip`OE;hpvO6MaHGAauj~(hO*70Pi@4%x#~9XyX|=`< z63USj$CQLbI8$G>D>2fQuiyNH?M$|hxu7SOoFw4W02uK|U00skaJyWu0_YJZv4qij5*V6$PO?G{<^&2o<&zdfeFS3*q-+|4ifCgN< zx||rcN8As-ZicZeNFw9*UP{etMU%+@;r$6!2Yc-Wg8LCn9aAIY8b30hxieE^0=Eq! z-%BS-Ly#fNb7+8kXd9l)Jh$D?)9W+mWxXbF@9X*A*?)EL{vDh4pwV96`U_zBziJXY z*uoaW@STRrv-T9CC#cgT-vqG}A`9a>&i$gIgN^h z^;^E!yEpg(vwx#?>VEvI4v}mUZ5wsMc=L5`@4d43U~2m#nIs++8@bvJ-Wf(tLkW7% z+aDA^vs*A;qkfUlY~mUsF-ct|;z3J7ol%ii5^0ck&Jx2IMZd}Ro!Mt!?yF?hwp_IO zS|&epB3(fNR7t1Bhz;i&9-a5q9%U|ohVG_}^C-z>1QPUc7*JBD87E9`2&rMc`G%r* z)mWtaX6Gq}H9rf<16Vf}aAx}K4nQ<^90-DH_^n18OMw-6ZdZxqMF8_Zm(fd#+i$6; z9R@B|Z0Xp@597>NN>G=r3>hDfozT1hfguUk`<9?T`B)5M2xwpxXt3yc?6#RIe4N8AC)4JxD z+dR*&vR69&DGMpZta_~hwsEubKadHsRlTG~(Pf;y{H_1skfjczG9h!#pVhD55q0tM z*%`}5%j1!7+s8J~_Als|>*_*91qh4MiTZt@617MRU~eA%8E2uKov$-xb|<1N6_)g04v6zIJ!a_=%z-Wzoo!*Thy9za;?> z(DVkd^s)NzGI(coksrK?|P%RZw|Nfvu3=sutwIP4^+i9u*st>S0iK~1a*gUIWF+*h}o6`3`q{qjeeFwrr%->$`plP1M zo{lcI{~X4vOHj{YO&4 zHkt4@TE7x|$Pk_P2?oAN*L)oU3V=_lYJZBU;{E#C4jJpdou(k)-4MPf56T!FTCJ2Kq)S z1HO(@jIpeJm2WkWbI=K^(`Q8yL^PW;ZJOTLhA2bBg#OE8rm;Ui;j!6L={9khd{NzpfMj zxcQo6S==JAc}ncGJt%13?OyV}B(UMbS)TjU_tS%!b`u(|CT1wT=xQ3zwEn*Tr15@Wq`rC9Yt4~y{AF^6m)lCoY5j}Y!upB7ae%HYg+$6Q z=7Pl(auOnq!NY?k6Qw7_H{itIcC7n%xE4w!-LqJW`s4!mWVe*4dD;O;FKzR>$gNV_ zC;|;bm(GN)2?~j^gqXGe8~J<_>)d_T{Y#1~`|gX5&4LlwvYUNfaJPL3j-hVgn~ES zFS}<^1{3xk8l^1=Vz=J#EFHMhlbJ&My-rdKpVKB-vPhQ>OLkxfbbp2OhOSvIUbU`y ztu^ndo8D#e7#+quXpyQVEn9`IsZ4=fApcRXL3<8Ir?2udU-dGC={goZ~pc*r^z1USf;FcsKC&o{)QI~Fgn4UqHkMS zoXU<@@#4KMR>NQ4=e;ZjR~ACXl3EmW!-{@t(d3NS2U(j#g`G!P(~Zw@k|n2SG95z~ z!_o9}n-@}_CUHr=3E$$TZnLA4wrz0oWLl{b7FUm+q?Kw4CUIiYKbl_sYPpaOrT_G) zfcn>FuUc?vuMPc-#<(vcgx4drUf2h}ku6uV)cIfas)BOFMn5U4*v{$CG4MTe!5~yQ zPUZ_rRVXJ7XRP~YcFWpO)x30`VH7Qc0gn%&kDo0Vu6wC)Rmw8LlwXg%eHqA_*#Ecw zlLg+}F9M-ic^QJm0*g7=^UAMB`9abA9%>6IKbk*bQXY8NH>U-sw$Hrd#BAet@P;|+ zvSL73X|!!)c-~r$zc65L&wJ^JQx+!i-l8nz-|if%_vKa=V136na+bA+uUu-66alWJa*aEnG0LB)rBfD zk`MdD%P8kPx7a+dO#^mBjXor#y1zE%;_BH?(F1x~FE=O{;5A{}^o1Sl7UTGm0+=t* z5yn|WFcO|-g#@a6O#C-VWcnU_9X_W1H7&EafSx4Cia%Xfa~8k}Gys9V-*>RbN|+9k zj>!#sqH9Oi273oiC{^QV5>9#p{`S>HW(;Dwa+=rZZ)zfMxqJP0^DP&E4GGrBBIzQ6 zz7F^iq+KRsZ~lo<+)W z+*MrjZ1328Cq>Wy@C{MWd7&(9emM4gGnMHzNG@8peb7&X^dzPA*@oXrK-*E*iXO1c z#jc6?(*r#NFa|T z+`dCM}4B)(;+OK{T3!g zv1#oW*CnHk8xP-ko6}{!R!*&Hua`N@ zKoy_Twa*~R%2lXsf6Fqf+%11hB~V55H)xgps4BkVe9A+4u_vqYwumo%tB@#V;wy%7 zcTC7nywC2<=JXk=JR`6BKj^%13J2FcFQ=4}IdJbO+LbL0B4@}4;WgEtRZj67#Apnu z6IN_YZFm$pQ!Kpl??#P&C4$N#@iEcwO1EB+8_d6^h*Jx(RhB=3kLr=d*TD*~>Z2+d z;iaFoTN@9IhA4G)YVAiAIS##QoruwLt+6WzfBBGV#(yRnyHoAscrqAe%m-r`$m)KR z<)|BQA6nJnup1@)M1x2cT5Qrli@m}#^T@i@!%D)+o6)Cy=b%j(+1i*p-9Y3}`8nZJ z3Ofon7AbLDKSxmtd;i}ByA&DtgRAowO8gE~tp%FT7_DEq1=G#ka;&-M>*b{nDCU(h zepyhaD(nZnHdE?|UsARxbq=mH|Csutw1b0NoZ_uCVZ%V0W^giBil*v`?K{5$wt17- zFYt+F5=SQ@Wv$?lns?P0Z9gr-*r^^7eD^fp{7JPR<=*4coTw33hFZwyyrqwxEn!4s z@F-ngED2x*@tNYcXBT06aR&^9C#yHl4@#3O<@qlc0x#+inDuHb@cv3V<0| zQqzEDG2IR6C5JTSa@cfr#}DHoxe=OFG83zi+et}?YE|98D66?8T`+hdU8L%w+ILx_4gAvw$zVr1U#9(=K4A-O&$pv!)kNT&YmMS9F-% zc^M<_n~mc<5ivvoXW@zcn*%E$0MK(R&G969qbMc+w8Tx>f{y8{sI<%IF|23xDCQny znB%O<+)h@J4;G&u1|RU^!PW`4CK1_G01dX|2~ZWJjEX}r8Yc=#!Y1a(Qj^8w2XCf0 z+{UC9f1kG+^2K<#`JKg#%(-?dA0Im@`{z6|nTl%(WjI~<9{8csREa|Cc9&fmIdK$vQkx9mqF{$|~EgvP60 zOkHTG=}|#%r-_t>S4z)n_qABGgOD<3Gs3-{|$mok1l-&I7T8qurD=LIu25z<*wM| zS4aRU!W2h(`?muoUYS;QuvnGbJ4U*{kE*dp{V~XGaLh=bhI^Eg#39Qxy3eQZD&&(R zTCRX-$91LcTgyns$`5iM(`y&p6c65DtgQ@W5ORIwo~BzQRZml;JeBqT#KuxQ#C44F zwS^x#QG$k%b3?V#_iMBQqY!mLTZpI{ij-(;fhc9BQt#YM8oK&AXG7o0xLC;G&iFc9 zyhzqQEISO2?$pHcfnUM-c)TKqan^_`XAet>KR91Vf4Da7m>Nn4$0l>)L|MgYSlb-y zHn#!Yn%_IsWY*HPzGB8*&_9g)qpUQw+M;zD`sI&woq*yWU6&ogU@h`if#LOR3)%|j zDBq5KH>(n(^%fI)RE{A7Wy1P!^2hN$Jq7Z~>@_M^W7Qh!!T1XC<6sxd9XCGjJ|R|^ zU6<-1PAo2I{JKJKdbUhE|? zf%M~y22!j?j0KT6wWA{KAOtyH(ea`2H}9IxZ-vnnAX6zq0$cb=Oz#2Fbd^neac}eL zEKFN&Fdn%CCQ}x-wmFe&Z%gwF6SQfO`AqrGxTYjG&saQ#hqK2WhXvyh+5^g0sr5vb z96cpRjpsWm-#;qC4%=EQLrs^?VqvmK62!>|PkL~=zwy|iu5;1kmpel1g5b|-L>0Ek z%l?8OAo0)ixCNjYiqu?gT}cp2)FQ$)V3P9W6CQMG<*s(SQv*we#s2 z;;8if_JfSvxe6r5b}>R=bYYH^XSO~H?DUx-%MZ)ap3U3{7+DYP?m)uOmWNy}{B03@ z(a@!=A=9{EYDBI#RYTf|QxW=F-~};>%MNFbXnosp=Au48&By&cvP~z>DRHIF+*^2l9H!op5EBw_(8Hk`j490nuar6p7Oorc3U8t+nbdJr9^VDqLoP1Dq$^swrLJN)6tdfz+ulI7=qm zme*;`HV4}&yjMtt#%^#jj@?VKS#8N$m1-HX2D8TT>FGlGM#As`gKj?8-z>h$tFGgX zd7De6Jrk=u)^+_;_HWPT-4QfY6CfU5GIUWL{uh^7KWnRJVB76E$3pEKAC*xWc1d~b z+!h-~`ibG_eDM@J-M|%^FJS`w7vu`zG&T_P6EzTXf=1Wm{a!wx6!9pg)N>+qxg&2z ztr29eg5(+ctYGQ9sw>gq)4xhw%HP-;+8S6m3ZYlgS+p;a!K7~@l1zEo!S=IS;Ni|i z@+dmV+d7ti?}VJ;V;U4)s0HTvIpXxPoV*GTxMzhCzBx6$9c1-+7S$lLXH&2XL1Hf#l2`%CF^aCCr9eue4 zNxr+ovBl!(`(cn7)$d(Qvo>Dlu$$#3ABU2zwVKAHh1rAu4JN8c+b`c*)x3zygyM%~ zpndSq16IF?ph~W^=xOCI=EsU{Y^wN?wBor;xu%^g%d^p4uFX=;*;>tT@(96kyU#ag zT9#_2o(s;7v+9nOU7YJX%ZcFI zTg&GhHFsz)HI+JpbIo(}Z#a`Kyw)zKe#d_Q8QC_AQ<6mT!P%$n!dcc35Q3$ZNE_(! z$Sd`3g)|D=eL?mm2US0JFftS@)MK@R)zTsFhX~%o-{!aYg$0zvxTQZd#|M7ZweR9o zuba@Hhug+$OLb~_4aeCQ1TN~J8O1H2PfOk51pz@JiH?ygI7UGW(u_K>?xd&$`iD_U zaa@hjHTJ)FG=@-@ytnBuaOXDe*Q2%y0c-Y0KLlP4*`rlcjaXr_>Ow8xv;oa`N4YMI zVI{skjfj%FqeSnm0U>!^qx|6nn%v>Ns}tS&{v43q$@tnbPjr_TsUcDJ;5`q#^PD8S2rX`U`uhk<5{)~x#LdzW20B?gP5lga3 zVf1;SbK}cI))DDslcA=8csbj~!~&@5cw*yWzPFUB2;NB3Fqs{x<0rultm z2Y$Dg%dVN0;`NG-ah3uhLTcIk7*+E}^#pf|MwXM!q%6Nh9{O+(X(TuUk2<#+#&97| z$;&L+oH`PdW>o<^zoBRn*(pzWqbq4;N>y-AlJ%y`aw&HLI1?3m-%9WXeRW4&M`aDohqrTCUpL)@T)z z^{c9T;<&A}cv<_A2vAPPs@A_Al`Kwx|CUT|kq$rUz9dop;#B`x>{jztQFcM>OYt7t zs}CHX9i=OYwCp6*O3kptlfw+gmlj89Y6W!AfjgvTr5wXXP4N&FPm}_M&JUekmE(Z4 zV+7K2zplF+t7GQNp*-Fm0e)u}Z^cL3#YUj7lX9 z*_%Yc@#d%Phoo<+3!3!k$oGIAEH!f5Zn2C`HC;|~hJDZs(zkx=uShZN8~;&t_jQhJ zumD7!KMeykcrsgA5iiDR^b0dBhtG$5Z$G08Ak15TxOvG+gRWnr^J|ns(?0#ji_~`| z>*jZuIC*DbL+kFts>e$m!+*bZWS`W#4EL6U#8`IS7D?qJP|_D{!f_UYwQ#xmPu`nZ zh7`lrozr(c%on|J>Sg(Y6 zH%!j@UA%(SmQG!m3=?V8MMkv4kjFkQUcRQ%oK!QI`k1}u@js-@0^Axg+ z3x5F5`lf5^s&m$L;h0@!nSP+>=lXQ&8fN!*Knm3Itf2F9DDij9$xakqUxtMHj^fVJERSB#DMPkB zpbY8Nh%F&vW6%w|jW9aEQH*&)X#enBS`}dI~eGB;+^^W}QQrpI`4t_mfYbct3 z3aw8syp_p&AAjvguF?IO4_}aElC9q{S*I}ERh+Wwl}{Eeh-VkoTD+*+(2SiQP$7Qt ztRl8sKTw~tQ`(H*wQ~{P3o=z9H5o)*d`%;Mv)m1r#Hh~|Jwb~Wo5>gsvEhm?>Zs!Y2&%q`vCfvq>l z())R(iLy5`O5*n`_{}M@b4qP^Lzd$#TE<0_;uxjE zMLHVyq_?!zl z$p5peaWk^kkdb+sDadk(*rdB^8Jc39&s)h^P*Mj z1PQNJ0N!0@2!a~2QoAkd{O%1qIrgV+urLXmhv?@|t%AxBm0dT87H)O#UA*+51=TcN z3p30^=Ptrtrh+nTiU0c&L#l=O*Jh$pp6rQ}07U9J;8TSC&~9Kns6c9xwvkPWTGKvI zmPuO z1X}2H%86ZQU$QK{>V~|}rF+dCLs0yC36U!zx83uEC~zKfxdKBc9~X07b}-!f`~C>5 zExtDbU-?3L9kX0cndrVW!U!av>2>&?MJ1!%(4>mqIy(p44v;Pxo%A}TICtL(8Qyh5 zEmTUJT4BFon>v|UJTDM4BeX5(RUQ1h?_;neOd-vc-YmoLSaJ@P*0$q`w%nGu*S{Zace?gMkyz*6!)07+j%n+| zbD!LTC>;9Sm(IX>Ey^-XM=nXQ+;do2#n<7PSfjp!rRTWx9*UhX-Cb1&?`THZJ*VM5 zH>vcbWvL_0Yxp&sQgM!WV^aKK>hLo0B7Im}U@m&%VvB|A)jz>&GdYh0<=R-J=a` zas$pxwb>rDQ(gmN8c7Wc8S}qf9%d zp2mXdBo}J`&7uzeMue#}FimGh`1e3t`4IcS-Q@3cR~A7{Yfb}@!oT(A-ddkd>15iS zN*5GdGd5j+o7z0boWi-kGbR-_Aj*JCyE0;A`ULeTyT6`w_T_6{Yc z5l)~)4>ao)pcZtUem7U(A3zNF3sVHGA~+xq&wNwe?MD~>GP#nlf5>RDk@|O@huJpG zO&me~`*jWP_mrQ|?3glIoPUqm=+OiN4BU2G6(pWd_1+v0cWZPk9qAZ4C1fsl2BWKp zf(Pz0a$WlR=A>HyQHv)2=gmi({-yHZqJEg@%11cEKADoOh4doQ2~%t`Oj5~v)%D9N ziqws2^jPXv1K^B6;x#8*J#tz8sHWLYd=RC!co6uby~4w!V9AxsWOU01mT-dWP!rR{ z@1;DxQ^oT~;$i=@kA4u##LV=G@5}hyn$N>*|qvqh}AyfC3To_Xf84p1gJ~ zO@TWDF`uQO&ysmG*C{mEK|2^nYcFOrz=Df@SrTT?ECEAuBh^Bbt+H^xHyVW)lcx(*A9^Wc7+pN(-vo;Hv3|dX ziTAc>5RaRKaFiU&p-y#1O$<8}T=h56RF5Es!gJf}LU{h^fRdWjufPEPIE{GfARSBx zgvXHd*LP2n>ewN(y+VLukHv1~UkgpyW#RSvdua*)FTLk(C4icjOnJI}TU`R?KdmTqFh-6FXMwj2%Y~cw z1h-?JUnOSV@^OqM&#vhuNbO9k=-DBHJEhKo(RR=TBYx>Khvn1(}q7`BA!!}ce_eER%clYK{@jfA~U zRF5tTz3XFv{H=)0m`Y;J846hixH4UJ0Dv-y}LK&ds}^|tqych zdmC&t;q{C`Kv>&~xH*o6en&_DKw;Dk1I*AqX%U@M@Vxz9tmNdWGP_|RNE?$*C_^DE zIc>_68n{@4?G-^v*{)ua*5v-YCkDR6>ZYxg zT7~s<^=QrSGSlcNt6R%WfW6=BecCUEopix5>roEoR|}~Mr8fuaHm-5mSA}Qb*QJ8d zLkQt%x|#dg3po6q1`)4g(+1~l2L;$6f(bM7{;#}oZ!NR(pCyEtzG)iOyei( zlHLv3I_Zj+lQ?EN-`^sNqB&=o|1=f`77If@H5BQQ4beo^@x#r_bZCZ2oG*`GYo=SZs z=X7}6S)8cExvd)VV_kt`>jMce=!e^^5^FhVV;U@e2F0?CtFMRkJ-D0Dv)3$t$v3 zQh|X_=Q+{^w|7XRY1&j-@zo#S*3s|%eJT(?cIe#189)#uLBsBYis0?J)kk|uaPFjG zVG~;Ge?QG#;~0we)ZhkOKajpo_IQqAU48Lh(|q9K!#1Gz`d+s|^|>f4$j*QETK2T# zs6d6)%+2hq-SvbqX^T**1~$}E+)iL9A@RJItT@BkxoUn=Wkx#W($JM{7}Om%7UfiL z5aEAN+cLF!LVb)N98gRHC;PABe?0Qp*42$Ozjh1?ARHWAU>Ko^dWoaT#a+P4=A#}- zbl?j()SSq$5$vwMfO*>V`@y(rb_ju+(mf9doSVoekO@d@|=G%4Fm7t0h3OI?LYO zVl$*^ruPVqVOq)qDB<_@QIy8&rOn1~x>g7}ASiqnef>39MnJGH%%T;A+r14_sA-z$ z-KN{VqIrHK2jh|6e6By|{9>#Px{=hHy!g}FG?gZs$*h*g=cJf~zcRdo+u8f)mx-&U zK?#c5*`Lw$oj$kV2(UcP4aC0+2)7+(VBE|eBA3%yOw)WZb%d|wxXaMOakp<`U2>G|@P=-c~ zSaQ}`83LNxCS?i$zw($foM%{{G0g0f8yA%m@=~sCrIbh!t9Ch)PQIv+V>fGCPL3yi zov`%cOs(KmjJ%gNnF$}s2h3|9U-Wf;kw*n6BpeZYXg zuz3`1Wa1D@=>Wbo9^GQi_sk2u5dQjK`n-2#1G#qRF*d*P^36#2J?2N8%_l!hM%&;# z4(YGDEd6NG=fTv8kCD-SJthfebx>{8Fw$Z0Z7Zxp$kd5Fu~!16<8#ta3aFTI*1(IrR=6Dro~C>4_Mp!DgvIS=8o=wj zTp}*HMAz{9`_m8#KMSkB3A>)~TiS)y*$-ba5zze5UCs7b^%0NC#eJdBuGM(_B%F3bDEfgu!zbNGY{rm3Mbp2c2tvUw;9CiJJ<|u%e(qB z4t#}@-Qgbd+-inQ9GVxoqB>m;`(FqR8wfs}yyv^q-bqO-rm;-srxY&zGOHV6m-#U< zP)mtK&Xn{)3@fp=_&cLbF~`_~re-%YO7{{S+jAKG8i(aG4k=^Ia`?qg94r5|sj_N{eGspWVu-|nODoGCKtnv6gk!TZMwOygF0KV{HG zTh{w4UkIAU4C_{zeN&pQx;)U(Vev5sLi*B@^XbP8xS8W;71X@YwKpj8mLB-q5=tQf zSoc+r^T<3GZlaWe%G{44afpSsn!uwk7d4IuksS5A-^wqWF(Y1CTYr-r%J2OwdtFbO z=aNy9IeH?}aBz_-E*sBsnoW}uh~MhdzjRqu-DofNcw9Vtsu#lxnQgGU<KS0tf zpU6zU%8E1jpVqVq@g>BFMOV7ao|;T0nfTwecHe}OF^4RlUg<5jrg$=^!oGJ@vP<*Zl{ie!{zr5LGk8; zbB|lxH=0+UH6%$PJSr+&-vd<|N_>zRH~hOQdL8{QW04Zc7=GsA{t(Y{W1LqPhVORK zwvL&EKo~L6)`)B7A*?Zc>zO7h_j_2nv;5hBeSypDt)I?%z%@Z>ieK@u|A7oIj`|`-&hWR0bIJN3e8G z++mv^y){o{SIQ9^F*?~x98;^Z=Zo-I0){_kMcKmfnTo@ZhTa#uy%jASmt!s@R?P6g zwB}hJIU%}k{g?hOyyR5fwaLGRgXH5Zv`Xv3LTps`Pq(tlvuCi+{akWriUQ+Gc%^JZvi#A&%HyB5-KGY`8Sj- zdZYWHb#U8&`GwweXG_B2yY~&|r?x60zp`XqYL%a#l+an-IdI>m(rX~Hv&e-8ifEl) z+IG?NeoMn%0&*5QylT|JdoQ#-km#VRVSQ@Tuv_-ue#`msfWZGdZ9lD)`2F}J)4mLBz*LrYNpt@QG__;$r1$wyNr-9JE@3|k=mOF_3t~QE*}(D9s{Bzh(tdQ(k6%x51L- zt3?O8?ssHBRRid}1@jCB*({Yk7zo z>H^N}>s7ArSQMDlmI)4Rp7-q0n?_Un^or8T{x)uYAwn~kW%U=Ro}|suyZ*_=a!Deh zz^sttE0O*nRtBC;u707z{{c5a$iB#WLOeU>`f)>FZHsAW#$VP+NvQ`IeIuI5aR3Zq z4KRHF%#|sZ2UbVs?T?bqNlfW$i+OG8YI>m{J>*$Wj(^o!*aiS9r$O0>XJ7O9?x7mFY}SR6>pv2&7(^s?BhhVP|3SdG}li8;qykE3z z)d1=E&s$L2(J+)sP@=|_7CVhO6E*2xAwNQxjHhNnZ$u+HBLW+Rck!_iy+0?d$+&cY!^{QG?RJspIq@uRaO>Jp1hPX^%bkNMHTxCg}s%?&<2Qu1u$48>xRh^e|SOY+;iN zb6}e;8qi81%5(3;S6_cM9d*>`GzODkGh)TYMVK(4PmJNY=bdNgK)is-tXZ)g*g9BA zvDTVv;T(z&;e3qQZB+;N?;VO&8#W$IDczikvGHU&0V^o3#014+(03NLc$#w#9DWX) z%Ny^&u6M168J)4LC5x zntrPqu4X}}xPu2m1+EGPVgU&-F&3?THaVk1`~%V?x>5z{K*Js?1(ffOC}>D4Pe7_#tqq^*jq)#2ERD^P3@nM_Hcnpmtw_I}rYkXfTdLu%-eix&TIr zwSM-=og6b=Souq87YT0`}-DUGn-hf8brzJRUuV2#xhUD)%ZJ+a!j>jqo%>^ zyPW(8dPT@()aTqJ#Q3+ek>Bit+lFT(WZfz}f~?hch{m#tR;VI{^O89eHNBtb5AQ?i zG8KKYKsPX2bN0aD*9iT9Gx2Q1Ud2(Wms&AltQC=N{Of4!+y6zbYK;*l)>-;721+3p zz!cURd_}|S{SRbyZ)GwWSvFhh`SHtiY= zS)X-<%mx}w^-Ing#+qb9#ED`m-3+~Y{_wy5MW{*{3faKHaix|^KJp9aY{A3)H>$?p z9jpBPpkS;I=n|Hfm{lzlV2z0}5R3C?B#E6Au4<>WDg$>VxbA4JSG3oDm2bI&2VIt6 zJ&&YTv}tj}VeQvosK5XE8E{-bN@Ko`nqWx2_v%plZA>)En+i;{^|lttfl|N%n5(fS z`-Y2z(kLOjW$>sWsWO015ltBE_4)@w_X2=z3VK5<3bq&Mt)Pcf?ORRfQfvBnc5#Uk;CbZ>A;388V2;y4#__IY&)2EmCvr4j{4ovus1cmf>j2G z9(riH0f$=eyu*%Z=UsQUEp_PgXsj%_2&)tpU38JO?>_t5A!OW=h?5YPUw(O7A6q=_ zge{(ij~HR+K8zVN#wIToT44UPDYk6dbr*b{Up%TlvIXqwU3S?i-T$}y(?W|ZoDToh z5oy__mchZ@e@JJadv}&e=J9znjefr6de`04) zY_Y}W*ed9o>0NB^^w0wjrcpcZh&@W#r)UW>_O$y#Xb3KS(J)%U!=b-C4vOAk$5H8v zI9I{9c)Bv}f4~80;e{8*woKcnt?)9NTKXl8J0HUCDbI}0v2urlI=<4;KQ&i4T=?qNhj2}NfZHu!jzVq#G zV;Lz|JXHJA^ZIMAr&F-y*I5{MLuVX{vmbtMhx82r|Jyix{V1#~x#9X7(krjNiuybD zP&*&S&eBUSjl<$Tnmz^JmRfqLH0RuN*?AGN@Wc~OrQ^{)Zt=9(Vwj-7*($!plizu0 zpDQSMLHG~+002M$Nkl!_D8Do*W77UtBoH+(gJ{N*vFI0)jwBNkK(S<^+viVt~NKa z<82D2!tv3$3bm zQ&pouHG_of^eYrF1Wd+wWGjvbwcjN=rhBSx3c0m6kK3YK0HZ<{?~lPXJ7?}mNJ&SJ zX+y|cHOh21BGO0uy>M5n>_*PPd>|j`);3hSv6X)5C9mtYfK}CwG+orj8b|Q|y7pk+E6k zTDpB{*kC|l{Ch53)kTO{rQ9wtssuTJ#_V;!3W>0Khly3D9L^pcPL&XK(LS#z;ehH7 z0nGvrzl^z4K&TtSWEJ@l+)yNHh#P6&<{6vIHBXAM)Sk`ta zYJshqE8HGkm*Vf?GI+CC5Xmw@UjsU4)&vUcO#NfwRPQZv)%Hh=u0<_rDU@Oo*2|a= z=7Wb?vRO;57QGSG7d9j_ra1D%U1`qaxr-ssHw)`MiG()Q2-TKKPs^_U_lS$9N|Bq% zqgk%&JmySSQAj?o)czEPw0;Tv&=v1jl}n0QGBHjZ?*!qGi(0*V2}qJmF&3sKWTKXK z<#tvUY4S4WV3VHkjif_eY;kX+ub0Vet059@ki`4&a09sveo< zLGC|P!=!tG*NPQX9qz@56e+#j=8v(^0svhX0QL7g#=`A0mtHSbtMmXFG4hE!N7mX; z{^~=#9>~p9V<35Aqh8kAR5&xx<@WYNvkhL#x(~6(iw;mD>@~L+`5ja9A_Kth6Qp@- zj8+(kXN7tGdv5oZ>|PE)!(RTP4d#KV%zCK{8cdg8iS=)*G&3q>>5WCzB#4~ySGa*o zA4`z24p#qzKex@>pRY?(li@fZg8Hl>`&<1F3In%mFW#CPh+7}s(nMZ|JHamu?g5QB zhX^Vg^Bq5PYb_`(zt@zrZ>xo((a6rzUZajHd;yR~d0R}I`YIQpG8RiGfm&kLYOHcp zsG3S)I`vvDG7%MFYLyl%jfgRVQunKtUo|gdUNnitbbF?8Rd@o;?GwRXK?M6ef3zxV zZCdul>Fw!qM<+2Vyw~e!v3i#Y%n{{0{L9=SNU4_y83x4+@e`Gzr2xiS zYZa0X4=V9Y3~=Q!4}fa1&cYijDe>l3MXL@ll@vR9;9hw$caKW_2=38Pbq8;6@K5=0?xFyql_uY#x^FM&|34V*S0G7jn$7f=a z;yk<}X2!wTwi#oN8^g@lnO}ZzJNtR5rIy8617A;H{NfjJNb_W@7Qh)4 zoS;|;6BIjOf&vF|vk%^b?2!CJ#zr9$tZyW>9}eI)(nH%F%1Z@<;HE}DJV99S7J zQ|9Fla}FGrc^5C}Cro$)lNIk`LTGUuFnyA3;be3uKM4mtpM`^|-*{tUI+)9yanSj& zVLW^st$;sn)*U(Rw9_%Ua2(|4vlRj(M{r zJci*4S9|>a_h;bX?$gpxOuU?U!YP<|SUw-rht{L7?!xv@fBNIK=`VNuCB1-^B=2J4 zf)h4VaF8|+$6jfr&)^X6ZPKSc{h2&*#x0(nN+%wFEc)k~wCD%1#nb6$VTBC0cq$hn zR&HQ&4XcGFzWEj=Nlvsa%=mQ^$Jv-MV{O7@FC6;53AO^u9g<6rI4WuhK_rhYZTLap zs%H{e;+SB)qz-SshdNm_m5!|~DM|p4hj|{y^00_HUG1rNNM|T)rbU^sDjm-nQ|?W# zSk9|P_A^Hjb)ej?z*%{r3FJIfyeTU0Hmo{=RdAppsE9D&CSmx#GFqFqaGHQ>@2rCUN9T)9qe+zS~L$bVQYTLBJ zniZB8Dz(#0tl^WQOz{Jr14jV~Fx5DEojwywy!>~H$KUZ$uBgmxwQ%_+nuWQ4`xUvW z!yy;T!f1c_dk$2IOIN7)AKFpu7a&Wiu2Ob25~!ob#r`HC6c!B-^rxhCL3XRo+I1Dz zwmIUn4@JLXmU&QcTr%YXP?YbrU&)b=8dR4J;%f5WR;<&K?yF8`n}{|a43@5e?Tg|X zVlLT;6HJie|L4yiaLx~uH)lFsTHD}lQ?5Q! zT_yQrD$%y6FjIe2uB;1wl>1e=yau*UNBdJH+)|qwJH#BJZKnM99|OY1gd>5LuIjBr zjkXR^G|75hSTWbTl)o7! z95qDPbuo9WXl*&rh@sT-Q^YG)8Ln9+wR- zB~x2Wys#A#MtrJ+14Gw$rPUbD7Qo^@RNhPpg=i|B)Yga-Mt>(&&m^++d;S31z~hoa z(xZ~N_$1|Z&&flHeEln)S#-ePnb}5b!8HzK-!1j)F||L$x+r$Q$Ax>^_zH3z6QEDW z^G8uFchMHtDf>N2jkDgb*upTjd)R{>#SySMC*^c9{=3Hnr1CNM3c7(K_4Q{531g1em(ld6nw)27?6l*bY}# zRJy{Dd;6(}URWy3Z49eW)``OZJSm3ckZ`~ORj%2pBtzm;8B~}qvgP*3zhc8xR7pbQ zuI*P>maw=;$R+KPmOBWZ0t^DXs;XZx%d#lw1Czfdi8_gG?GsbY_VRixWW%^iR-Zql zA{&k>$!%(-dtm#b6axjIEK}9*b>^ZGuA^Ee)gRRdN(?ss2eUz%+Ch6vUa$A=TnD9} zCcO9OMj3%r^cBF=Hd!XM0cvZlUl2gcZ6Ht<$$xmo5CNHK?oKvh+@5tYweEu95>_0t zH0U|#biBa zDj0v(4I|~oW0l7ulO_x=Gq?_iu#X$S~x0XYc|>Te!;M zfxkbP7QqUNUtt9Wznnk!+;h`e@RKVj_TC#?@ZezTS%=N+GKwW?Z%>+>{&d}+u-(zV zX>M%sWD^wM{3cdSOtI~hIH|(6ZL;wui7%q(nR{-S1OhtjHURrmv0atL+IkA(4s{Cg zhxg>SC#Nf~yb1?-ACMNr1jP=cw!=13SV57y6N;=E!KK{aMls@9=bV$y#d!zMJo7Z- z0K1%%u;hJ=Sq}dtNS*d@2Y>L=e$Kh(#-ZiA<6MoctqrvKdu-XnvnAe`$Q2X^r8RI~ z#;n7JH9oL)l;@UGCtz}6K1?WWz2(+6!NjJ&{L0IA*2W2Vxj*0h3#8v3byQjolR{0K zax2DQ1%++#gzd8S`qp0QtD9_`=9z0=Y=t!r6Aq`RM<0F6R#0$)f(KvcE)C(BYUAl- z9LUXcABGGWnod7Gub?o4XcYcuAF;~SCQm>0RJ!l)_oYAIe2bl1@#2dwf?)=X*Ok+5 z-`Fj!h;vfFiq#uW*aQqGC>Fyx8@9z0&WVsNhWMzXj!YL`d~tdUTRfd~GR~lYEaUxq zoU3uc`R8FAzm@jJ7EhaD;-z2vsl$>F8GsISs*O#f1%~Ol|J8>Z399UGKV#iBu+n$6 zyO9L4yucuLON7h-_ZZX6DCbn<3Lr{RDf_yYJ43wx0Mm%0!t0M_09yPDy#P{UwnObVS13_C8TVaOc-sPV*Skq`y=IFs>OOF-*jIY{SEp)hIV8#et%9gP7v!MeHz2E1L#vhr- z-N!#rm1kTWwD4%C_~61oY&PyV7a={%{MGJ*v?I#0M zaUY0mzo+kLC)3P$gsTGA7jl`PxR1xq{YMPuL$x9MR7DmWo6(LV3zd8WrD)>cef|hh zb2G($XI%7gLabia_FEgtulN(e3jUAnca@UP?5r}`-wp$;+Aqsmjl_1i0pzqBiL7+` zzv`=u29>G+l~qDeOYumOX6EyK#@h={*)R8>0knWs<4H2k*;*s6#nhfOJTfcInarv9 zQz(GeI0{6)mC3WAG+QQcK`o(pFc1`VWT%jAva#; zI*t}jxL0l_ilMvQIxd>Z{ycFGog9j={ZK$}mi{#Fa%QSpYbJ|mnnb=D#o=YlyUJL1 zOZDe|z3?Nq!B7=KP{@93$yNhTw4A9$M$r*MQD-TidSZ}CQ7`3LXVT4A8X})?&dI#R z)K+H*R3YUg~MWw6p`Ka-9-=2&zo`UQ=61LryLOp1%KB@S0C?7jEi z>G8SHAM)^n)M#Ah%s(*IiP7aC@C+o_aDZheN4<@e7_oF+7bMHx6Ij|2{oF{;{;% zF1z8N>}}FqbIk=k(&0TY5vvU@zx*=W8fu<7=T4h#v02*ln|m4`&rSI1S2sd{rlvL5 zUK`sb{W#4x@4PUM#v&<9uG3Ip^&d=LOE`mKQo0K5Ip9F7pumcd?M9AFU$gTHswd(# zeax8LM4E_+gaZ%QKV6U2DGM*UXj%uWGL~I-8T(ulHpfJk7HoLYtuYHPBDS1jRY%Yp$T! z0%vpZ;BRtqi>FI)#>X+o{w^(u6%>d4>ep#S9Nf*wGFhIl(J^)g#Ywh;f(MvyfbG2I z#>C0>*I$=TL;w68TXSu?*(Mk(d*Kl2xm+4CurZlS9#YN~3*#=k%vQ2oa>*q)@O+W{ zZcuTw=-KPX8Spg9G5H==Q2gz0_u(v(Gt~sGZCSijRN7VtF~vNjdF|)6)}=Kc4o4?<28dCZ(Za zOfU}AfMl*IzH$5wbpL}u?k^KXMwgg!VX)BvdC8n`)#-R2C3Z1Er=ms9$ZNvNq*!&+ zn=?2dAH?RR>!XP1gc~dv>lYoJ1y|1Af zzT#ADlgpT9DDh|H+Ty6O>3`@_8(1>MzvqTv=Bi3nKt;Eq*BJ=7A zH5v!pYO3|v5{-#`3+<6o{8=(p52O!xyV;@4_dsJrHn9B~L-AONtln>2i|kWd+yA0N z*8u$>-CchiAM;jMe8C#+_w;{IT;m61_GkL-S5C7BWzyHRuW{0NpT%1oB+(z@r>n9O z`Giwb#ASG7@8JR2?|Rh}@fJODw%EuN#6iDfDC1a_MY{2k*PBnbI-g=0{}NJ{l4()@ z3z^H}ai>3`W%*P^iDB}{+4X1r;hZs^&W9YN5Q2jCmShWo$1yQ4BCgDAi(c;*@~OuM zXU*}=m8^-Hb-(i8{iPNtCGHHbJEbLq7(dCP2E5HDXUPvvr4=|gx@ zuD`Ai!M-#RMP2ab4X(Bx)PyLCm;vT){w`9RJZO!-WE?Pww&Q#ES#g3$nq(=^AHlZQ!t`c0hOS06KWEybpms^f)a=b@G_uXo>LeaFrCg{?UMz`pId=Q71u8;hp6K=3z ztiK1QWuMYavXcATbhr&sy!5>3|12NJN3~iZV~d#-7hJVKl)Ym{BgIy!wbxyg6ef>| z*Z$q-kLqu0SAIk<+^S=8|2qoTNBsLw&th+7CKWaLG6>P z^~5ii7FTqYUUE!~V*C3u7!6?6BdZl7wm9a5>*aEJj$(WMgkKz&Ol8#Qy6;tDd|kBH z)uOB~Gu2*grE1hAr*>t&?nQ2vJ!>^dj%96$#zehuw^HQw{;&3B!YRlpOuqN^{84*> z`gDy}FWpMDT7L;SMRdw$+wM>ycdEH1vf$LnV=#2Z-|f+Ujarm~ITr+gILH^Fi#M{X z(sjh#SzFz*Zxa;KEu;wq{;14g(Nr-7{#A1x%h^3JpcCsL09vw{gGIHki=yRok&rh! zP_aI5gJgPPtZWAZHjX0`ZRDf<+7;1HXAgOiCxB!H2P;w?ai}1oXnYyJS3<^ z_Ol)#O$&#B3v^|z@fKjn`Piq*oWR`#-_cSnT~=CbqJ8k@witat`?P_7_PHcw@DJ6e zo_@j($v*$Q3)5%uMfx$v_!dv};N5rM#TV;$V_T$erulJ5GrvgR`&)b4_D(;=(%LV5 zY27s2FuV+ht_j$-=ioyQNq@xt{~N5oQ5uOA6iW^tZnWEPzunHQc;p|Cq*Yg2E&ck4 zBe2*1j5J|BnE&Sd4C%RNpG(`1+#X-`zk-+T>!m&S+!HG)7EQO^b}M{4EiK86BE(l)%!@6aHr*T(6gbS8jFYjtVv|ibwUr@D;05;aC!CZP zTyO!9%(%zIH1*H;RpUF@YUqlqu1LScA=wKrvQXN7+wE~+w;!nNT0)-Hx;9ksKM%*| zOZ12T@h~P!zM8iE`q$GEA6x=71W_wB%-ib23s^C5I$r)?h^>j{nroi)+aoc7kXH@d zddr`&4cE^xdGSgbIdW9m76)H*g@l<7_8GTRPB}GQcf)l!Jbd1?B_=3#!&X{Mue|aS zCZR4($Kw2k`R4rq`tjiO=}+?v3JwxaR7ItiVWQ}`6OK>M;|z)){^&<({V%VNa~@_- z_uqeiI`O2F(w}d-F@0gpwbJgGG+1V-rJzDipfqrX^e-HM{U1O05A@|j*dA(LY^^i~ z+jU_zj4nWlxIp#*mkm{*6Rtx1e~;~|I9c^&*#8M8nug6fERBEi3B(p>P+W~Io)+gB z6xiYkD=6Hs7e^m`bh_|@i_;sECSro(6s(|F(ahfa*Za~j#~qXIy7R8|b@a&&BX_`R z5*}L4?>s51zgNk@S@lC-{7$0 zeITTWbW5al?URrVJ5v#BehLH@1G8?6g=JZox{QJLfh*&Zbkh3S53*>8+T;&Kq)C^n zbd2kwI;z!>b{iC_GUP9kgW7jK{f*L#B-dH}_U2mSnkE{C&NKDcClZqV-XM?^lDciW z`X6!(v6^Kv=3uw|G(biQFlYm(YCrt`D19uiOi{iTpo`{!@-IzU%{CfSv=4;gT)v+d z)qdyq2bm%3k~JryHVG9Y)o~hW>}Hwtbln zTlPZSY!asgME-JBn6`9*!R=cV(cHjGCAs|`W7c#3D=uK~`xSKS0BUF-8r47k&YUgN zhot<}ok!2_RYFlCAREw}r2d z53T*Ir`V7yj<7t?{2mV(w!hpn&6+!g_P1lgS;fI}VEe&l-}akyR*;`RiLxe87AYEc zjH5wTm(gz@KK;g@^&(UTUmVQZP_a6UM!dm+R=IN@XSW$PuyKgim!hoTpS44vBPl(ef!5o8 z*X=nN)&E7N0LH=^Ron1|BSQ!6Zi03{Y#H7LE~pESzb_(R{=aO$y79xzD}-LgdAOjwADms4Z`*2MeR zr-0G3Zi_`2n?|h)pvWrKPG9?(EG~$^OSszm_lMe;V*BZk#%8a5=l2|p)3hJ@A8qsx z3tBmEE6@J{H#P8`{A<+!#MbhCPUs3$K76M;*~OZ|##MwwJ0|NS=}&A@}=v&?ku zqux4bjIDh(AjlQam79h}4H@{qN5Gm}=p;a2HzqBjT(*jI?30G=)(NDXuY;|zJ z#qu*$j6L?LJt$xeRUf`;MP!jzV1ll;eXh&pz0XXugYYZcY1&U2C8rN=U)}~@ikJHg z@cf_s?<}U=b#iP1KcIcGRvxv~6(EP4O;8x3n+0P|st78gG#%=Ve=uWg%;<&>AQyFc zBXqJt$$%9#%iP&UVWxg-5Av>kigX8eadvB(%5RhprFJ>`q{6_)N%5*uWceB z;<_bUZ8Go{&rA}B>l)m?I~jNea-h-83RAvJ(TMJVbUJg_%@q`9pM@0^7hnYi_VqvZ z*khf=^uCu~e|=)wY3H5O6WBgy$DKx{KVYk+xn`RK+W;Mjvl*7=*t8H$c>VQskWElr zg)=BNN;_hLV)%&R*8cI2J)X`u`%E09d}&&CnPt-tzW)Op!u$!34a9Zf(WeW}zc7tH zcC;O0{No@0Fs=7xt_YaRzSuwLpo7wFx8G(58}p#;RaRMrB25?l^{@9}wZ;+Y9}hif z+c$l6)6LV~nB<_N@8TSX!w)|kevH9Tdk<$e9F|tcgvl&3&(i6`gctD7V^ZW6Z0Yn) z`pjoOjdM7L+ZhRyu*K6g*y3q_te{wML2U7~{dQ%70-hkbL!RgPWmGgq4uj1runpE! zY+dxN?|e6HywOH!rWy136Oiq9{k@HIKkmR;4B!3kchih`k-zcA8>b&*a)9X-tbW*T zyOHVne?5nD96pxz-S=nKFQic$(+v2vZ~@K%;R=-JaCXFe^UjAAF#GV z5*%{P6%-!;ZU6oEPphu7Dg?b1rX*)$#*IlQVPfYQ=-KCI`=qa65^2`ihG9j*8<_Ap zA)R;bA28waA-v!pnKs&JLvN3%ab54A5B_%VeQB>f_DJud_mVL{g|D0~Q z?KV3&d@F20wFoAFn$FEW+2T|ttI{Vw{fV^GsGZVh zKf6+6UuR?Nj|rhE_$#;8T5G2t{@{nXZ?kh2bGs^l$o-ka=tZuGMlqo(kgthWLKd1b zUpz%c(9yLoIp+=EJ^*1J$Nn~^{#vES%F2?fWdx(%w!e`x8tuX|blCM#Zv=)b_s>?a ztu^*pp=kaaHVym!noGuAvWz#=Ta{PHOw_E05dMZu!@m6VW>XPUzIU6NR(Y%yxBxDl z1D7lHt5$}&P3^tL?@#VK`OGJFOy^h2pytd#gK!1uEV*qjex4)!1gKYVYFwV#d@MaFlXaR#n8f3eG$(qRQaycZJRybD1p$r4EG1C)nB; zc&ZQNkH2+E35*8TxZXnS6=zVb7QK~g zTK3I4+|>0El=WAhZLr9^DD2uQq6Gkl!}afP5I3sg4?YASd+SUlT6IN9!iAE7%3dhO zW%u}qDvE-&LJ#-us<23O0cSy;?~&a)z0~-N_aEaEjY=I6f#&aj*KSiUvcguIaV+xGkT2+9mp-AmPq3IaXI$XoyZ#_{L!wf)i& zFj;R`dAB~WeRqxgh(tS_Uy5Iy`hq^cMQ){U)| zAi5{zi$_}94r%STvbC+Ig2ea(C)g*c>SryaKyWZ1I_(cq0BN7iM#BME4C{>`8>~K3 zHfmc_2C1=KaSX zdHA;5*S=J#?- z3!H&cuPx6lsy8oMsqK!%%e*wH2^X#Z`#)W7DRycwXMonb+y&8 zJ=Ay8Lf9h76XF1ak70YAAK-=WlX%g*7`{$F;?Tp>lK2XrdofR$G8N|s9F@jli=jza z#jyEiTcqu<&C^0Sr^1>BMdQamZWAb%Ve24%;Xc<~b7BR>=4mfZQ1FigZV3-R^pErn ztf-*B%PqHT`q7Vmn3n&@N9@HvJBc@qH?UpRx#yl|s|C1fg6D8-vBj2l81viM;^|6! zq0g82i!6i{6!_A9tF5<$6rv_aa;qG{3Tac*BmaCP9eT*2>7M)UOWY#s8@ug_^3$MK zL5ISR7szsyJpJS|n0(kL{r&IvrulL9#W`pH0b480i7yr2OZ)x&7wN|9uCv%~kCi2x zWAb4^oFzfMJXG$MoBxb0yv|Se{q0_~?aeeVws_)}T|4c#qgj3pXHZ;%Nt9fb*l6+iY-{4pGo#-eYz3S;8y6VlhP zieWRXc9|cmK^OszD%A^PMQ&CSv&>Zr(heav6qd( z*jg9|vmXZgA2su?0uvNCB>TdPFTz$^6VoXtpPD}Y39O)C(_n(zSDk_FqyF{$b7{kk zHpU^`+or`Z(ZNLeV~_qbord#1uEZE$aQ+1`(e}Nx=2~k)b-()`eP+rLv?f>velKfm zy-sfZM5`ELW2*OX{r%8gB&!X~L5qQ2XUp;VcN?VF__ysRUXm(ZA9Kf)n4@0$zpBaXLE1pc zI=R{xpLD5oK2c*$nrNFo4SfH}R!pEuvGu+7smXPzben25Ytm8U=NOUz>4CzK%$Q)Q z)D`~{HPDS{oDOxZ8!2bv=w}}+x+ut9{DI7X~L_%K(8~; zp>V*sJsOgT`i)$&s>3Cw*FH2tfHI~3WvoV!Ze^Zsl;uFQFF40C3rk1vEQ2+6)Gq&> zNd2{6s}AC0&Ebj(k*Y`vAq9kiUT4nK*w4X{wjk-uC9gUpFPZmu{ktA-arH*p9p;3$`w>1CWE_bcURth1>RH&yZ zWV0k4H9|$Y0OGCfaubquy8L&m#MOuxXVF&x@KhZdt%X_u*DqcxZG1Tcy`t^bK5O-N ze`(zSIwQLDRYaN#L@+_O1=L|;`Hx>Xd#ZwhREi@v8&zGy)&Ig<07(gf(%N5T$ z_sb*UvXut{hH|{;Zl}OKgQieAvW~C1E1w`Q6|lf zV$|PKp;DS^>6A*ZvK}9Cl7lI?(gY;yrN(dc(^EiKa5NKck8j14<3l0C%oGiuSg@ zL^4WdD=J)PT8cQRD_l~0gL@1Z=gfiU!$-z`x zQDp?Fbk3M7=GH1B6y9P_{DqQzA>OSz)XOIcmEx$SQ{weecnwg`vBH!`S*8ta-+84= zrKu?WQ*}mqfeSz>`XTGN|B3l;J%6}fwIR2xUUt-%U}-bPd<>TX+i~ zb|RSAe!%AN^!9(%?>dybk!Z?byuhUiw^4vBR0gI72k_)!GN*z-0F55%29``gV&hy- zVU;JP-JsZoNY>o2!faM**2Q<~-V`D07a%3ZY>&v&TG}5-0cUKIQCVaGSd20P^tyyM zGq*)BLKzTk%%I~lpQ$rABeH7qn{dZoZ(VY^%(|Kzan!>#OZ>SBy;_D^VyTC1_cn~%x zAI6V=EdBY zoU_l7UU=z6yuAN=dK2enEVkI<>E@enPP4$yW}9t_!?5=WGTW_@`CltyC`VV0R@Nto2!q3Q4U{VnZ}b1lY?ACF0- zxp2Vsve;f}=`;*08=iXR8BEGNjLDeg;QOR>!wom#rT=_sYfP$azx@to;+0ol!>W=? zpnG(h7ZVgeg>JSx+E?4gjUAUxz)Fedo_iKsOYN7|TklJ@0)>ry1>16+d)|3=;5y^9 z#!x4Tr#bQGa`m#MaHn%W=$`>@@o1U+Ilw*YJdJ;&!>;A__4Ih&by>lFplWo z%P+s0F2;El#~pi&tx92^ag_~k$s6NP^hFk31QQg$OBZv3;*HnsOpcF#3=xvapp-cz5HUj=bpb}d$WI9cdfI|I_Z1=@jdu88!i`g zkae;iFmveGsM5FK;g6IChYHY@NA=8W*CBC`!~cb~0HBc!`@ZCsJkxRir4aQ&k9lO7 zX#x@zUH@uVE{oX$PzFhS1o6+*0aw0IGoDNkLoRvi;&RK94BusPSz)V@EEVkMJ7IRD zM8g@WfXN;7wwAh4ikvfLi)|4aDH$+W|2xT4W*K6dKlKGKS(G{c){I6tw?_75CbZYI zgnCh5fNTeap@bix^!|uM3UW+jd8!kcFw-FRTUNvZx^k#o59NFMLsGeHi#~GH;X#yS z^s(S*tEmiBccyY(Wtq~G(oxDfh=uC0L)%P+A%m(nCG^>n_Z%ym5Flf^xh|ADVMnz@VAW`0X`q91#lZN z&$Z>6(krN>@QGZBBR8rLE zTSu=!NEP4Bq%Ln)2L;pn`&aH&`{fF8v(#v|idQ}EbmQT5@#nHFpd%xo+kBvq;t#mI zETQep>W!Or-PI zDuZqVi~Z?*{-A347Ic|KSC|_CWkQGrbQRZ&E)r2G{h{%k{m6~UHDfK;kBNekmGcX6vJ|-|JLK{M-CI$HSF|IMp12eOokOxZoO<8S9~9=B7UoC=U?rpuz8NQ^ zF{zV{x8L&z2&EIjACwgJ1u%^@>*67+dxtSue~2k}POb{u(>B=$gAdgLRJ($vsSAu{ zY>OEs+RGX=Wpi83)6Z$LD4T^|`K9aatZkwyl(z*kpLNGRAKC>bwR zYndvxLM=zu(;)}f%o%lPWYcvPalN{AtJKU=m&4R|I)FUEgXwC2)LT6wL|y7zdDFwK z#uP%;(d~FqT*UpPm~0dk0K^-9Mntl~#v{f;4G6Sf@fVrYn)jL~iV?*vo@em{&Iq{T^2_n1{MhswUW)T&`mD3g!rhkAq)Bh$Am<6T z1=KcMZ<9X1#^1W7jgGsP%*{@HS zn11%NpW+4m9cg}?ZNQh?YkYo9Y#Y_SoweSRqpk`$CWcD$o@84^{dAw7A>Lm} z!-maj931`6JoPMMdkt2)j7?8I^>mtj_Sw?x@PW@uld$^XtvBDo#Lj{?akTzdHozGa zJm_1QiN18 zYB3Sd#Brha8raE9J}8NI6sIB2{uWW7{14+`nj_7%x-Su*;m$p_-mf5M<|SD?$UWHV zs#J%}e}J;?^_a^(n93h%%1V+T;BNZ^#{gv0KuwDOKzB*A6wFa3+MkLs!bw`VSvJemC-~bzC4{}SR75Xt#N|8ySuwia1U<5J-7vzf#4Dd z!QBb&?(V^YYjAf7FfeoHJNKUZZ=UXcrgwGks_w4ZYrT)5m0wguIsW6Hjv{eua zE>;p){o`0n_eC>rwZ(XKarjf}tDLI?Bm zsbKd;bx3k<3}wfcM7ZKVL>?G|%HNCWOgBElm(##3rB!fVn^b;@$f0vMUC0UR5J>!p zw@tla-{)BQ5GoMhl%dk%A9<9bD!F+@!NulQWo71pOT(1xXVzzbwG04oF#y9f0B6YxQT238v9g9N_R4^C8N zU7S<7Sbnip9Q@K)IM#25Ge*?X7<-N-#S+Q;f5h z7Z15xUDsQem$mol!x_ASG7Y1DH6Y`kH~q19okiOXRkUl$>5V40Nw+NtjYipO6$yLG zeFvm8%L0STc@y>3<5gC*&p))qGD{_3CdtQ8kQ9vUxa%?CB__&NMBooga|W9xhmio6 zYZnG$T6AbgNVC+~Wt69F=Hba+2e)3G%6#a-H5bRd6aMR5DILEA529r#OX!0JF15o- zCzp33(>5=TpR=sH9lF5z2m_XV`m|Iil#EcvxRkHHnX%skd!z$CE+WZvSI0Cj=Uk@O zRN^u+`AO2(X$54`xRSfkm=!iCiY7l{+<$au%=J~xOiK2qUU*li7?NdIFT$+%T)Ys` zf|KhlqgQE!ZLTp)z6gskxH1tujM1YZ%@gtBTs+3&h|Bx#-kKGlkaV_b_tONtyUj z=9wt%*6W!(#ED^mK3TJ7}B`=sg?YWrt`u=<8KXE?R0#PI*#k|2@(TJ$xIVJKL(xxP{FFhD=&cqTSQzX z9l(J}HXwa<{a~!cZ7CsEPvHoIIKJI;{x*q&8_g0o3D^lJNVV&Id!1_S>!t>p;|g)m#O=Y z5D+jvbdygVKq24rrj-%jn)!VoMlwT{id@#PY7q}9frKh*5Q0vLP) z`!J$!3R_B+25W@cq$`=2i5+L@Jw3D=ah#9E$6e6EVJpQR!%*UpMPxghi#OUMf(g$s zJ2joGUw`@gY>aFb=aVe>3VEuWWNp)pb{2IPWM=bsPF6{|7W9}V3CyYa< zDO1wF&JTPjY^c&QQXV4AxgRR@&jfIGexNue08?e&b+$a5)+i_U56_9!zjJ3C6}^j? z=e!~)F~)BSsEw}ZWPW61E~}*egNVyp2Z=5o(S^17>VWw5@Sxa@ms`jDa@XWhx>Arr zN_nL({Od=#=4Hy5PcSS?LmT>9`wauV6)D}!8US$CUJ6PKO9Npfn*o0I+4XGVp6z^$ z828#k*@Au|L-F0`oX@N3{*{gFd`jrX!Wux4IPyMIvvIOaxq6S2koSwq>tBY6S?r%w zX>9d*Q%Y$UM<2)78lDt+YSGHGRrk9o0hb1apbdbqT~_&tJKn1*u*U7`+r(90 z`GWONe<|T=c%wVnZ*~IhH=%*lCE$E~(SC%86@p5TGD6`0dI4OzraIA_%`q4>)Ddnr zJR2gKPU`OOkz+Ch3qF}2Ae&u(%8pFD@DB$YAQmF)A!%Gzi@xTX059Jn3GE~##29}? zd?CdZRBHJ?p@T+LXY|!*2IrSxC`xo=1^P$x4=Q;d&f*rUNRylZQaXP|uj^hbIu8w; zv7{D|VNyZtsmf_tu-t|auGcaYIv=%S+wux02WnO7>hMP($ zCwCH4lPz=|6hTV@oUjv`Qq`hRghvgZW8 z5T!*(p3MC?(ugZp$pNyqTWtgN&JTXRqk>d}n|~iT;2q6mvNc}+Ed|(pnp55Z!&}dy%rIvR zWm9!e+ZmiVa!oUGBc8=l97p)v7UhC8r~4&IJl%&jD8d-tal(S%ks|W~Z!w2vf>_)} z2l?647YUrl{m-=Y^X8SX!;!XssO3w-!;^b*?-!FH5&KV}V{0YDig3tLF99~jxXBdx zwsR9Q5%}b+`$J%H#q_>X&HzTEd)@25g>g7VN%ru7VBm$xh1>rQ#tn)b=ixzd)z-pq z^+SWCt2##0GPmp(=TeYW4$$9HV4I7?!V8ykVb0%djxZ`ucOm7iW~nYYpnQ^Z|M#Yj z{4w-qo=4tK??_Kr@UQ}{ufhgI_K*0^-%R$LO67hTPuo5xb$%i58i0STuYK&2{ zpSgg~AFCTW#L|xU{u1@Io_-H486h-MwG_Z;$o$@FJM=U{Nqlk70-ga+XExcijxO43 z+sqY-y)xT{p_6kfzWv1_@=}yXgjRXdjHp)0EGyEKH^vUk$NsDec?90A9!}h|B^*-( z)+4tJYrv`kymsrFvEoUvX|WFCyux-^>ApzZ1S$ksrS`)l6oC`cJLBVx)u4tMof&dS z);tB5t3ndAKhWuK4>Ga};E(EFaSv%lbh%jy6gbY*De2KjM0E4ytpaow*-VCM4J_)5 z4&v2%!93&gdKkjrN`384>mV@`u%BS19KO~>(bhWwXmD*M+$1bICM0j9EmJd9O&enA zNP+&r*c#21)vdr1vCy>JU!*ss&)SjEix63qBt@Ls3^owZ_8!w`Rr2@Z4B$HKPkuk) z*V|-%B48T!aAQxj3XL4iS<_QUd|9+lW+W>uF4?3U=ns5BQ4(q!w&N2A_>$X^pt#36 zfAr>;JmN;h^Di(FfnJc?R|Z|;YGx6vuZTE_?~Hj%mB!=!1-&erpZa)p6UxjU8Y>A9 z*5Y38lSwztD%cuoR5H_U>oN$n%cwe*N3a`gy@R7s1Uf(7*1ej)j!0IWVIQE$R%VkV83&0b1-QjPPHbM1lSp8X$lZ~(AIR?_NlyVVVT zSC!Feaup%pk$-*cuk6#)v*e$5%K!JTyDlpspj;&6QXD43p9mmKxpdtg|8mfO)#QPSmcVD7Gx4r8Z??uQ16h; z#NrJ1yMX%}fsH8h(+7o9TJR?%b4_4!U54%@Lz7;TU`|uf`ra_yW!pSNCDq(z;n%XR zVOf(MpW%*{D9$sT5~u%s@P*Jf!qIW~(ZF{)&U2wwT6O=+X)b>GF3q;p>$m$TObwlf zg&15LhJ!ByML1}QVtU-p6N=K-U00iIJ?`DP&4j6Th%8`|RGl(2&SElVKBcrtsjs8gZ%znfg?OREteIqb5c5jNO#V5V z&*1QxCXtI~`3(I`F_)Uf9o$X|iy7XG)v@_iQaN!9Y^nLFs&Jv}SIwzm`A@fO)`P$z z28D+i<(rodix97za#AMe6h(t<$It7KZ+cn#C+%2_)_x5C; z?ud;EGrt%q#SCD_NcTE;Y$m1fkibo#risZhOs0~TUtrRhbJ|7c*eF?J*mSV6E5>@D zBV_KKmPerW6tF1l&kwWvsKz#0b&|Ha&u$mcpE)YQO3ipqIX+?R@(a(E+guSNOdq0a z!ZCD>ryywR7a324PPkwkW5XSJL@pw(OeBlKA)2wmqUHa-{8oowZcklTI_wZ~XwE%n z;!e?upk}twVb5P{{3)GR%0^toC{6hr3&%ZaqRa>4sqtz^>an)MWmR6eTA-pE+;j^m zEr|trp6^+V+u@*mUr{z7jN5PB6S!DjfsY?8i?b2yaENZO?c2jbaPRdoZu_M~BZZIf; zI_AAr-zzpB$~Y<6@hD4lUt~;gOocYpQCPYYxLLnf0#ATBYD7oPwBsosZa!Wvl%_r;vcIK<;arUUPD!$W$VQAa3P6(dsqe4NS+l*h4*wX>qvsV_*!J(wY$6r#7P z-)u->_6fz-s1WCsn{A%UwBom1!H>-{Nz`Vq@CW1F$*alVt^roq_XenEaQwb4{W3}rR@4_0!f|K(EV77gNJd@Eha`;RvX8T|;0mtocoJOVa(Hh8_T4EzHMv;kP^jvbbEuwm@Ka-!ta z-MJK8_qsrFH)(%bui}X-2v~Kz?isA@ zx`;)l!K|@%M0%+Rz_9T6lp(vpT!>n=U&o3x(OJ9iu`NP|@{ohG7J?hQ%2zy-(PUnf zradz>|80s@hcNS?v&dW&dbGMZ+|Qc$n5WCEpDD1|jI$&$7|ew_FL-7MKX|}t7qCqK zzYRFubf2{q<;Ih!xG3~vl`I4a#O-ALbuhf65g7lwFrS+HPz>_|AA&_J;qd0Z^nFNn zq*}_dN$x;OnM1gaMu~^v1+h*`cr)hz1j8vR^R~jKUT9}awERq*5GDPH*OFt6)nr_T zR9VO_1j{034Ttx`$wrmzDPqD?UgrPB1g8v2G6UAps@pm{SC8%+QY29)Cj#7@D93dB zS^p;H@$Ipd>!c*)D9_>bLp0e8*Y4Bk5<44+spqZ2WKk;S6Kjj zS~bOP{^Ky^o_nfo+uKO^a`8_+@>*5FVO27Z_egqkPQzqRSDHZ}&zh_AVPZ9|WasXv zMU8+$`1UH_CoQcU80{pFJDbW_g0aZ^>q2BI)I{;uF9yg{owW&v!U66qpBXg|ni;rZ zC(`VG>*#7zl9l6(7)~dOzqz$Pbvh31CjwZSm^#gsmvL1%8Mrz{JcI61CjMKd3EASb zI4w<2J9PGWp2oB&PIP9U8e?ozDXv{y@8u<-=-Aw(857W~msf6?Y4}9Xd|S^4Ek#U~ zD~s>~OX)WLfN8+S!Wc(N;YQoW!OMVY7|A^L*$Gk?J~}DpF$zoz|8Dl2|8+0M;`wgq zwy?d=>YH3J)#jEz2#&xvcmeJEx#UHLygZIOWK%Bvy1;# zX)58p-0P4=C#uE6Fjl4J_EhpPLM&=d#aihlP?MJ3>W}$;vbNrHwpnsF+1CGFSNqwR z=Umyv^6cY#VGKCHK8!=(irG*OVIb`Irk>~PjAZHah8=)Vja04aC~s(JRCj$=Nb~*% z=D}C^G3C!7->hUDa>2;Z>$z*lyRcqz)UeWO-M0Wv@$e`H7uDY1f<_Tg;{YafD#>ok z<&Ea$8%>tWfU64&x+yv0kK8xQrj?lBzfYz0h;^G3YL=lNC& z-S&-}zULj#SUj}Or9~iTk#wUQPO*9k0Fg{f=H`%M`}!P&Fb*MG79vHn8@IG_F!;w_ z5I4(_YKfsEvD%04$g{f^Rx^)Gqr&Xh=rjCje#vFyG(xoX9o)5jrSm#Jh1W({mpGGP z`B!#44eIPGl}2sGr;eg=H-`W81}Pgh23usY3;9d@Bc~MnWY;%x-JxVi&TKC{rDg=( zwm=GAQ{wk(eXmGTnhm=%)6Pln2IjmcFL1;I7~L3Q&FkgAl^Dga;~n&%;Q4HK{Dg9hf#aP znC!WKqm`+wMsWqyuxZmTTe{O*Z~9duOnMPeirrLnXqr5RGpn-~Sb^eFZ>DLfl@`!A zhU3bz&!_XK>SwenBsZ=Wc1VvBFVop92F9&cO11uC3EFW7Iq%}5Yy2k(@wMm*go+ngY4u53EHAS=q_50^DF|M1WbKmW|5SH)JX@u1&w~@0yhw7|Pm1CW z7@WA+#kyuX&%>eha2iOoujVjORIu)XBV@z{fAd_RI9KGf@3|!M@dg~^!Na>22cmQh zfz?-Bc3`i?-ny!Rv~;gK2I#HbKQ(w;3pV}iV)EIzPPOSM0$*b(k30XVn6a*>IqoNR zY8brA805L#ZNJYIbqb3rA?V!{NoK|EA~855{lsX3w=TtX!A^KeB7Y30ZoZ-1P}_Wb1$Aa~Xgl)%8P1uw`Fy9aPd_s>x_=P{ML@d*EqK&b zC>7f;Z+iN!wtt}lkL_IACTy-Sk93meAC}eR1z;jG%`NSmKKjQmc`QH^){d89CS5$I z!w3wP_fEd+P~vmldMe4%A+XJ?$bFUBe3eaRj0-z+5EWGN>utU4BEi>8vc^!xV};#T zj@qTi=x41ROrka1gPYi&IJIY8%e4B5bW<3#(-D-To#N3AJuVj|U{7rUMlv1(@qV-0 zt;)}KdGmQb27pN@gAOz=7afKCk7&nOQ}E@FTf)|J2q2T3Og)&^k5cqEU;@QmR^mU33| z(EfK&HHQ;491BQ*E66Oi#qmcth#Pzz0I|BrJjnTpZm3l-(3o~%V74dF8luA8oJN6pFS}l zIFOjv*KO4Wq#kABy-hOp1JmA_JVoybPmUNjUGausd45Oub8iUX;znlZQ-)gic!8Aj zy1krg7VOujf5*;&A0FDKI5CLKzPg`m7aKjVjtQdtXAwD6SylZly*_v@GywD)ovT}z zbu(~ZZMQBREYJ;_X}&p~bxM?qr5iM#t~93>bp)6QZ@yERauuWiM~s#>ZqT$KjG(f% z;umC#d=-x#;j>b=6N@Nw!Pkx5IT=h8p`EXu#P17n&-ISyZi_eyqrr~<)+SVR*alHj zLlX2l{;Dj%5XysyCdD*Pa)Lbg42?g`qV+wGa1A|XNIk?j17cu@dIsxlqLStc7ezoi zBn;i=Os@ey6Sw^YAfQ__;zy{7Q+@@o+q94ddiMhYAZD%IX_6NZs%@ap(aR8qM#>h8 z7rl)Br!Syyq1K09xJb&|U(u}4%ewOv<^z#nce$9a4&b+>(ZbTLNhlF zPurB4-9*_5qH6Fadk!BD(R- zU6!SOK9Oqd8_Ycup~gx77Q_)qKx{E%uCcFsfo>&!O2!hnyv`45(5X=U{ThG|v1VHC z`_8?nNRUpn{$j>g@@aYQIyERxX~evg8RIU`43eXXTo%%O{KP>x4sZjx^#|{aaHQ<~ zdj(MkVm}$W-|vPsat(~_N%X)uAS%qSssk`%=c`>7dnGZve+Mu}L95VeSLvsw zlh7VAcAkqSyBYMlJ}|WYItxGeH5qS^uUI}J=-;-OQ_EpmA>awa0IH3tuhs&JBb)D^ zu4)+f+6_LV?0QX```RrWa68Vhqc=Z%5+a~Oa8ZIo&l(uX5ARH9sZ{LlE19bvXS;$qap%z`exh8_G* zA~Es}$F8<+$miT3FHak;QA){-6MrWQLc`#M?rpR9>@bn?8PRGK67wwVhog^QR~xK` zs_J^-5;!f2p%Df-F>~NLGiyeDuZ$<96dG?tPN!`eL4S4XJmN&G-N` z4I-H3hc3i7sz~tA*~}Hl-N=C(3tb_><-l#kADu`4Uhg@da@rs2fEazRk99Y?z`C2L zdkD5m$1Nd=@LvA{3P6G;ewa_^-_`94idQpZ{1O4e=C%)Kvzz{`ctNkr~Ur=I6@g2r;_Xc>|4+?YWdQ(Rt`x27VtMT%6b`` zAa)h9CL=RQ8}~3a<`HJ@Xqv(A?vcUk6+7%dKZ0yAs*%6w+AR4wbL|->aV5-M*bEvf z<*Q|8JI!j9O@kcK(6*5HFB6CIzIRD`=Lcq7q~Pbbq>UX*ErjurHaNnI5?G$TW3w}$ zl06)&o@c{o1NX(4zfKvqT`{Kr>_yynWt$um_#ID>c$b6xv_E{s-W{alIMLWE)S=L? z?(F(fv=~z-;ong2{m8{H*x2>i=xf+dbN!@!IP^k~X$nOI96l1wi^xd3EWxy_gbOym z`W*+*4YmzIe~;^Wes{Sx-4LiJ9#Z@2~^CHE`epI;C8-!L)Xvd(Zq?hOB7Uf1%)?^|1+!=K`%sXImJ zORIg6_r^O{gBYFwkb(oboz`A%s+5L#6)|sI8s0GOF*Tm0Ai{2E4?tx4OyuFly9FUA zE3Ln|)DND84t5)lOoEt~O|A~U4zRwiXV>xXkc$@?Ta6s=tnTW~z_{N-N&vBjEwB&< zmm=5ud8xtGVV&=UFlTie_;q)jE)m_at^>*X{u*Ew40t?94h1R32fvJP<8ZnC+K-^` z@-cH_{2fH9XX31znbdz++8j1JH1#SpI0Ag5Lt-%y8$cv7*SlBCsOh#9#%-bz?7#c;Xzef12LXeI zbl3y<3Lqi%tb)Iy*qI>F-#RFQg<}BIJR9*bYzMOt{iX;J7VMDcEH=|mH zFpW6WieOjiV(6APO=a(AQ}cNG6kBbxv;pt{$>LB~@8?sgvyxj>8%**oFBp1-(6Ioo zAKobG;OY2o_qvGE5Q?s+u*@W@o1R21-XG;7d*8oekLVcG9s^XGKdb4vy1^d6OwRAT z+4>%x+RcF-Blu>Yy@)}RD!Un02}!U&WiTb^1ED3j7;#W3q@GR(`1c6)jKT0XcYo;d zJTOn9wiDAoU}G(&ILVgDI`Y^bAGx1Q+i#DuZ!nuxpmuV7xJVqG<-_stcvG=*kdC2trx?A&+~*jU9*$wc&PYYRnFR{lDVK0Q5|*K8b?f?>#Sq3y`T*q

F~A&#&JfxGcS~W>Ynb%~GULp#a#l zcW%Z0!|-%77vZOL1E%VR<A~tL8 z+{`DsFlP^Vk~E85x6_HcLBgO9#B_{}Kue>O&$aac$K@REvxwtCcq|EGc?k$yS}GY) znN>)sS$=S%*$Am)HI86^i#3Y)j>k;{OG_`f>8I)iQNy5?_el6B7~u^7L=EGfw2z6g z%dqc9%@@Bt$2t8|V8kot#&!R_f;iCO(UAGcP_xW8;d2iMaP~YOeE_+iN)o8^DULk< zU+w$W+41Ds!4iMCwJDZ3#8k{hIYzxT4tGV#mCB*jz^IEGeC%`3>yP}Gl5^ojC zS5dRdCE**x4-~(Rc2Zue+L=%qTGZkK3e!Qtgy4Ba!M;g@5ol+pa8)y9TdU#sP#nUx6Jol|~f z(n?PH>jh*h0HFG2B+9wqtI^j-E~i+t1fu1*HqSuoOSmH^Z(uF0(pstT(((a zp-vb4C4yvIsK+F$55gd76$c&v&Gvl>7(`)0=nX$6y54y^Y8&*<-~P0me~dA^SfSm3 zHMF(6(D#lJhMzbUA7jIP-e%SZYcaFA#%mSE6%5*)x^@PgWN~zc7Oaq~_ zf=a7g=BmDE!M}4v#MnEA_nx%^sBO`{q6J*sj@(SB2P>Q6CI%g*-XxyaVXf>Yg1)dP z)b+mdd=vv~Tb-x2F35S2oQ8kf0%e@PC zcL$;I9I3`~w5~G-TC~kZ5a=7v8>c>rz`-+M{f&S$%0xkj3~@eozOR|7E)&7{`cJHH-g$(WNC~+pj}NuGu0$+~YS3A)_5EA_Xm)R!2_uYf@ zA!>#)sx>r+xfrLZ$W~mh!OMV7hbyI9;8^e_L1RZ%@SZ{VJTgwRGSAeHe%ame13!q* zS^g%TPZjcY(;REGGEWmfw9k9JOX!EWGOS4(wR5l&Nb>sXID#T7rRAATmE?XYc@GC? zNfk;H)I_YY)D*PJdtkh8u^j*xl7w2ih43BXCzCb{>dLd zGl8O29fh%q%fhvPsDwtK0KH;;4ws^Jr(}-orrsctszA6q3bX7}>;3hn}R;Uj76Iq;njLQ~;Q}n8;nX(mYz|^ZfwYP0=qC3Q@ z(Ca0$byG z{b-isf=H{0?BLaUB0l_j*)t5G zH$1b8K4z|y*|#ObRrEmNz=!ugmeKx7cZYty9b#<8ej};RKi`B_pZ)y}f`-p5ps!RY zj=MJEc6{e?)^UN|XbyDXgM*c$<6NMWFK(F7+d|+UH2~#uWUo|)5_iB8yP<82S@n+R zzpZDDBC@Bf4cIB!AVVa7VxVb5?^8?NM4a~ww#a_9ndRC_A-&RD5Crsltq%kE0yXE$ zd;udTL`_Hf`kZy` z&H};O?GKH85iTa35o&^K^woSGCQ1A8-=J=kyqn$|6nC@%&eM8pz@7KOF~OG%-}|dY z`_;AsZxY#E_9W=Ths7yoO|;5_y9i*qpguSq%ardcx*4>RyDJiC(Z_Df82Em^(;s*B zSmLT^hnKi7#dgXQqW!xRZ0vHHZubU9R=pEKf~yjF555U7l#ez$GV@x}-M!cW>!r25 z?qUI9%vzc40%c8-=dmCO12Jd~BCo3Kam`4!ADmkRURQ1i4)+N-BfNyxqxksKNCXj_ zAI(ntAnm*U!!}Cp*(%Hwe#4r=RFs&GMUdIi_;^@i9Ro5xd_7}(8}yrpA@eO^Kl@o& zi8A%QQiu@uy^N4TRMH9_7A&&TWcLvyI{uTPZ;I(7 z4ekY94~5K6*KZvAV08uU-k;ou$sK3#a7C$bzR)3ks2yE!P>JAZ+r+)>&YRZDh?9zL z+xADx?g9|HfM0Z5rAD`@orYlrC*)n(tg1ODTbrF!~_JZur@ z8}M`s?AEy5Mi50!)wR>c%k~N1-Ovqnj%WIU#b1W^4IIu_c5j&|(Hv_V(pHbl4kfth z&M?^jd4VW=B0*O@`nD2KsY35A-^sOjTeXQ?5{2j-I2hg-&#@ECQd2pXE1ym{O(;*-ZjX>0ts*pyOXb11MMz~Id6xL zD}IRS{%(}F=)D*tj-Gt*WP0uX<;`j0yH79Ju*UC_r`bjb$&w^8yiV@@c(ym!2MV^dK(CfRQxio+%dHn? zh=tJwRWdBAqQ>qfO*BIT(B$;Nmm>eX>goXbs)j?iw(z!Q7tkN*%)6WF!)7_waNvJLLD94Y}^yLuGtx+o8pugzn@gU6nQPL;p{y$zx~#)%NU*^y8?O zK4lGL?Pgk5UDpWQfUX6dGQ{aoJBNA5dK|E72l7^>BQN{Cqc~&CLh@3m1z3$!B>P3` za_rDc7Nd@BvAy87ZkcNSr;U2_Kgr_or}?=#+uF=8*m{R`8bySqD6VpKd#Vi2CCUw^ zF<+iA%o=FVmLfHom0fngnL`i0@ZkgVyH9px!vf_rwwJC?zooazEOTrS<*=jDp60=F z8yQFjxT%%;iy_T77YaQKU{kisF%RU6UpuHP;L5d*or-7u%Qnaxj^oewGE^0M?@39- zvstQ)X-2gZx)!$?U>?wv?fugC4bL^cZ}gj}CEPcOEI|kU`svw6ea=(2B+?BB;BAP! zy~vVg{pm-L&t`CdFrwpBA{9h$6^`q>y7;YiY246@Dw7r+AJ#E|h#{LPTR$HhXJ$^f zM|892oqbap)*Jr4SfAkKyuoaUfyj(~>NyY&q)i?;K5*Bloob~9><$O+-7*dS>-#6s z&72=nx2BK&2G?b@(#`%`t!Zs3lCT?x_*_qpDW*@ply}}TV0%Zh7txtLojIZb7{1fn` zFlrF$=uUj~e%)h%i9{N;&+sa^5eTQE^S%EfL2l^d^T-+&JkrA$d!jM2Bd|)yv-er@ zKA!L9?k(Q}J57BZYR)$_BW4Bf*3UBO%kqAKHXH6a^J3DD;XJ&?1OQLCTWqx`IK1x; z>G!_2xLx-^Qc0j|{}Ckk(Namo%x;jUP0zu6Zam8hhBS1LCDO>JRX|7#SLsV5m>gZ> zq2J@9v9#koM=Lt$qNna6<_8dRY;m*ejc%RG+9WCo>X?O2zOVD4M@oF(>F$S(pW^Hj zemVAAt+kLy|?4P<98;r)G z8>ERM#}@=W+t#bk^0gT(25 zZ-=vmcK`Br>*+Ajx(YG;t)7w?mn+ebxbo2YULIZN6y}-Ij7$X)cRZ#z+(7dL@YFt0 zJGq`NzDH}v+|g~N?4Bc`KLee!$hLC@)zX~1PDitcK0X}oe-fq)LL%#;qlc1gs?mS^ zhmOvWYBpN6tCOf_|Kl+sX~%2wmBv=XJj7B0U6njRiiDG3#BusOgW_((GdNDd7)%o; zy92uN8I&h;H8zHvfYgEB(!Q@1c7v$IFt37XyPxe2M7e8LK9M@~y$fK@v%~}g+}GyU_b9nenk==fja`bbOBOzcFORVK-`-X96mBWixYlN^J!6`Ndy zE(mi={;ldsH}1Op+8I<8G#a{MQL`5C2OJ;um`6-zz4XdfTHPm?#s80Io1#=% z`0L#p9!8Pm26e)dTxq{DooT~St|e|K-lgl$1xoe;wN=VA^hwiH;(e74;W(KZu%-;z z3LsaCCNPQQvlZ36B0U>I${4RYEyKu1ROalRJk2$PD z)C3i5EWRw(@-!(Q_s)xk)07Z&5K)Rk!;puk_&!;?X$^BTJPsBGqI^YHhK2h`o=#1` zqJ>abe?mW=g8pSbL7*|ZG7!#af-Sp3S!7<-d1F=(mn+oztK^=#Uy1b&3~e$W%wn_K z`b64b6U>uZP1g5x-BVz{j{5j`b$6{FLftR|BLrnJ*#n*m)>gG*vV!_#ub;}6w~R`N zg^f4Vd7Nm{GSov=0HjnXCuVvPqQ2`cj-om#>)eGrX~=%}sM0LAhK|Nne|kqvU&{1D z>&RnqPVb0Iq_Uz#{8Uv*BBF~`&tK!a)ar&fKCN6KEfT3&`@DZ6m)r=$56hUsqT)hD z5k?$$#HC7pN9s%9tuc+#6@F&1g0z$iAlVV%!#EPZTFM7A=~z}EYf+iUJtftp;V}Jl zNs3&WS^22%q-t0A>yeNn@IzD0NAnR_> zT2tey1P2DdTDV^FIeIebV=M#G()==%gH5Z2kAIyg;K6&gXT}_+w@bqdYg%fKmZZbH zTJqxpUh!SR+h2su;hk&UNzmlXnuP>g>+&Nnq&gsG-B%4e1G!A^mWa`&!HPx;Kc4Pb zpz$O*OulXOhmH1Isss*dQqd0sTn)N7PLY!W1xpQMT-A>h`Lg5M_&+t^9IUAqs1@mz zPqgh^masKT$JugZ4V-9}t#^Xam9p#3>pm=4FOfV>QHfarD!=bF=+H(min{*@KxKjH zfq5jtOF`BE2Y+CAO>P>rzI^)~7>;Tx^Y!SBZAppc96>fJrXH^wH+q_k0EfryNlwFw z;2*vK{ConR0=EFxvuoG>vfCNUFAL`v6brQOK#9|mWb&}&L`Djv^NB>%G0K0%H;dZp z5sAAI`-KLyUus&GQ$!_V8Jw?1L_rrsj&(h)>}1+riP;Z%foDp)N7-OH$E%%(Yv6vV zb3Sn&woB)!ZTC2UFFwet8eQWep6DDNnIp^CH-PquzkeOlJez)o3Owwi9)b=ignbpgk+Mq#$E!W_+4LE1_zbqD*O+U%a%<)ON&=g?CT zVHjJR8`vMF+7J%&hDGg_UjE?z5;HPrNnB4He^}5Sp0s&8QY&>#HEKRq7yNjBGd_eB z51*V3zMZ%4Ck%P$1wp^2j&D3(KU7eZT@6Ei4 z(|tu{OplJi>4W&e5YSEpz%u5ST!Z>>FzA|z3!-H6R@Z)Z=QiUyn<|yM1hw9OdwckE zIQ7i_n+WVV$g}g}-HvRw;2Bekr!In{B2oiSf~jq9IiBx-ck1^3xME>`xK=;lPZnMP;n7{p+mxOCH@ha4&|`iBo+A^hRI-=SUur8<=6pIvhPh`n3>@;A(C zdh;y|3LX@`xI1US^bL1lkcEQ;FTM1VEX(xuuYFziM)B7lto0kjLyvHMhebA`PB9R{jXrXPBg%6ZwGW&!}yToWJc!?6z(LRcDDPqwYvjdJ4 zI*S4glqr)5!Q*zHKO$eLrYL2dKo|biv}`(-ol;_$I%=Q%*1beq z_4iL0gd%#=p#eo2(M%n+pLhvK|5L?sER>Ob~-3g=-G-el!Uh+ETQnV--!c5?Z=w@mIQ3X@L zEdWJTaHh(vhC|s^*0V` zO&2x`CJdz?2*7jExbd63sF#dUj*3$9&J$*$eZcs|^Hfr_9FNLEbeVv+_Il$6Q;KvH3`yX`6zsP!p=chF(g~-=?F zVqf$|H}sby*G(_W(GgU%ah6^O>hCnB`l`=r7#}Li2RNBe>!{_3trC?b#@}y(Ym8<5 z3tb{h43oy+ZWOfFE2WUp;KZ;y-C*){N$tXTLATeJY1CiZk8ZHri%aBClhLqa7g0Mu zl6|ve3R>G&4yO<%IEj>rTH?=f^pLe65=S$^vZY4*N^GnC>K*x@G`iwVWbX(?o+kU^ zdsF+@wq}N&sEcOJsMGDKi37oqFYrKA{~Q-VBx`@IuhCn_KT2r^`##=M#b}ERa{mRx zf=^*E8(5BB?KJ}Q?xHriLZ_o|4ukm)B=s#MWBq-ag_XhiRa zP=L^OF+GtVXU|;RmBnsNbx1}tn7PSis&Q$KBMb;^!7+xYW?q-3{@1zqJ2$9R>@m0a z^J3s9dM6jeJs)YKMEkL*nbD}3*A-T%UVl=V3K=h|WIY0Jzdy=Cy^K2Sv)@|GSOzv< z5m{oI5Bp+My{A%t|E0B7%|0rMG2d9~oPFQ#R9B3T_~I>5FDo764@kd^#}ZEy$A`^7 zj!m+$2ZeFv`e%Nx|23=||9!leeH>qpkHm63haGkpmUkIxEaF6pKb)LBXO_IAo`)CS zv+*)?GQPCu@;O)HtLyGPx`k`6y@fBKm@8kh&z?0~*4dwf{yzaTG!8oGAk18tgr#XV3n!oWv(N(r2TZ98m@dIm zA^*@o%c5b=l7THr^;=~EE#{g#!a4>-{^0}t|U<{IsSamhTb2Ds?y;@jq z=(3?JUQXNjaygdDx%iSlg;&Oo!As;rFnF>DW|#DX1@ng1n7Uwzswd=Y_X!w`Ss!1f zbD11Eh46)gF=OTN$Dfn|iZjkSLzZ|FF*|^V@xKsDNnL!&C5YX?u-^d(AdWjiLzggN z{CLbRxhnh?n;;Ju*gtR?tz(Y)w&>S-!yk0Z@e{{|y|84{cx+zIFW-;Gi~8l3UoLtJ z9=eN+gE*W$!DWoNIeFhceZt%CObXZi{W=+V;NBpIefcmf-}4lGid_HzKmbWZK~x#B z&)P;&f#}oixBn-ca`LIzli`iBn)CBlw89D7h;aA^Vtcdo$=O>Ip8>L z+Pg9s$Jrh9tEi%e%0wzYJE9Y^>yJ`3MpWv56-ydR!NX#76CMoH%o@P9HeJ!y@gCz8_ zTH-f#P??lPs~nU&%ET;FBLuwGpJiGz#?n5iC5C)VkclEDsdz)j-%Q7@{81b5IsPS+ zZxz+)EjV77=zEi`eYi9uX+)QZ6QO!PnFn+kgI0&bH%E zPpyy8AC|S9Bgjnpzi}F!H6*790b(+m!OWZOlaooWzfntKb$X=j;G=ZrEJa&BDyq%N zi7#q_AjM-z0pPSfRL0&BDA657)>~5kx_#oAEFP{LMPg4^HXIcliDi~})OX)Tkr7w3 z_MSg*-RCVv{8=!$tXiOq&X5u)YL%%7xnd#30+WbYffYpq8)GH6q6&8YSeyoCGP$iG zm#(5$!=Z&P26M%`UIV52{1~w*un=dA<8I`>aSEeCiy}l z{s?9>(Iby&6E3u&Amv8;^sC?&y>(2Jxe8aROT}BVA(cNE9qs=V#2Pi!X3+}S6ch=$M$Mdb@Ef^X~KVariX2#}O=~-aU{a84BT(@xOD3j{#$UPf(TrFgDOQYm z_5gJ218u}V>sP4yqg>J}#!8};j3X5YRPDnrxy4C%;Z7ozt4Joc2f?_UR_27Ozs7|M zqITX2G%UkqqV1W=J++je$<)75M|eYl@Vk- z)G?mgUM$Rd)S~88fz)5|a*Qmcc6HZYP*iET+(H-(1%(BsYWo8(sNzRZC9`~nR7uB< zSqe~b{iD3%GnnFU1BzHSt(jsoGSsTs8QmUtxrK?e(!B8QelmhGfs^MeqeJJEv zU`lqBDo4-g;0$I+bj0&R;5H706bnpZ-wJF6>;IscK+LU3F<&p1JCf$>`NJ7?U~6-2 z#Dem%hFUiki}*)s3qvK^mpn`pC^z(pf{O*F!hV-<>utA&(=dZ#0%lNbwt1XEp_~nB zvqrYZn3u3EUN zO?#(f6KgJ?^B|VB>Cv-$xcch9V}1N3(zW)j9=;i#aF&2oAC}B#AT3tnkSoK z)5er3Q^Gs=n~XnhPCNrPxKG6tY-W7HdFNvgVMh4arW=P-PB|s*Z6GyD)k=UEC`;KU z2O`GdrSvn;JQH3T{bG3W<(FhLZoZs;3=jvdehJ$aB;_c**?y zGtY%#SZ=B|K!HACFHQMX{vR*C1e-Dszzm9gv7FS-&^#}2KM2{6;n#QxztoapzkT{c#r9We4z@IULV=3&1u?Inc?%5qmBw|a`sQ;#-!H2@?tFIZ3`sz_( zgAFzSp9;$nhpFf{S6+QpI0FN6gR#N(o_p_&rO3V*W?j2v*2jj}LzY@P^uEy zCgE3LPZJJO9*LPgYhwn*YiR%TFfelU6<5h->nEQ4Gb{mRgP5(JKU{3YlMWs#u_%5s zc#UM+#`ecZCg3>4V{#QUEzpt6sFv5!a&KV`LpCO}Uupl0S2iA1b47n9Cl2E*NRBFa zXJ6w&Y)79z+VVlPHy+Z@lG#?1!drX(U~MbEVV$#T+?L0vSdsU5jD7X~%cI1zd@#|B zp-#DD@hAtIgyvQf1{Rna+Q0M9ABASs+i)znSm576eEw)K=};D_=mj%Nj6v)i!lfsQ zQBxizQ@I&v&1D`)#hdQ`t^e0lC?caX)Z|09Q&I~l7MOhd!zZbFN1i{@$)svJaxCq~ zQV07#p8MpAshuV{T`_AfE}+}m&>Jn+@G509pN5ar!i+J*+=^a{=#Nrn-_s5D4P<&O zX9ew)&bTN^1XJOdqU30e0zQ$g@Ogbc7Ny#3-)YXER}!%aG4U3J^=B;YC0t>rD5y+2TH}DnpZ`rFRh*T5|MTZ)wjoZYVgY#j zWOE(B^m~56MM9>R@%Q|M%|sQ?@>D#eyp4xWjwLU)#Nwe3r|Y{H$+R&G8^Cd$<7s%1 zV2t`=Dn7sd;a6mn@nFoa?t1m@6P8$PadyYlhf^$o^s%Mgl;E7rbZf~2X0O*|Q6V0h;0yCf5Cr{J0+f0z|ePB6tPAF@u{goF-7o2p@1CQ;GO^1DxB{wSNO2-q5&*h6;lzfqiR#?_Y(xXiDLWhob%45~|Qre}o=+krXe$ z9q}RaG{--gij0zFAtj^!{`$v`(D)mclw}~RiuN-?m{KtTu`9ZAr_R4@I_W1+^nb70 z_?vKv1|nmQta|-RZ3EsW6^3K&=|$7IBW8@zZ2G+6{=>7kU3aS{Hrna{MA@wwb?##UWVm5La=&p=YMCPKk{XzwC}VF z$R&hMJ^j=${>|}Wi!Hea1(tZS9S* z*}sb=phjbJo;Fm^^mNoAARdly*-Q9|MY%!8}3A@NdpQEun>jW^yX z8!Ny13TIGY4+?zwKWLE7prDJ4`9yqm{m7#aVXP!?F@5?o**slErR4H(Z3xkvTxYrGEC?e}8;g z&jE@MCdL7ZU;pMevNym{SXPJ|G^Z5Lf9|{SUA*vr`|XKg@4fa4d*kK#l1pL}=r)pt z=EWCBhYPT`$hB8pBXwsSb!IHC~+kWzssb6qb#QEo) z7ygV*;JL5L{&>;9E%s8eWcFBQUqAMT$A(*O{STJL2!TsOErme_?k_kC8&-cAvk4wS zpWvq9T&jtiWv3JsaX^i;Sw$epevE;I zt)=gHA57n?ue}yd!SYqN-+6o3WaCXSK(T*Vcm4IGFN_*BDqN0zLO3Wn`Rz%8o11q* z9~?4dnXobjVOGb$(V7@wTowaiOD&-Th4%aV^s_oZ@%(d`L9yHC#Xk;=S>h5;7hsQ; zD=*gp3J%JQTxTSfei|Fj#emaw*Ip}wkK9vcl~q2XVm$fi8T*7nrY_Ulxc++Psyxd~ zP0t^mm(OLq&JtLqq+YiFzy?k!F~R`!AGhK2-e? zDKbV42#$b=Q4EuDNL~|UmfFC=LrBAntZ2l9eEe?YaWRr|E$%tXV;aziCt@&OfB z7%_lwqkNKzv5jSsi-)$JKdgzJnhY8ddc1x0DYJRVXqL0_I`jU)>q@fGKFfA*lJpk> zxsd72?Vsogi1Bk}>3Eg?XCcX!njxxTD*C_It>m)N3`e>85>JsT9uRKs4%tSAk`?pH z#663sj|^z-^uS%^p?!8s&MRw3RrX|?ydoR@HunpGSO6b84HE-c)U`wPugI7Vq763t z4wY!&^BtZnLL*Y@&T>>B{$yoldCu%vczHP)8)eOr7nr7-9k?rAYIN<^MZS3GiK)Gu zUdo$M*rHm##Z4)ct~GEr4i{S#L-OfIO;7Mu^fQzRv}PZEuQ<8-8m zWo9WU&k{3cP7l-GosO?UriYoBLd!3an7ZLhBTkL&)vK3m9M-cprmuEEyt1fiMJVz` zA|V9;v$S8t7H|b)zs#M- zrEe~7T<^j(@IU?Wk}!i08JL>B4mN%3(G%05qwsXBFUu+Te8_(WH}G8Vm|sV2kBu6q zU^#)m;bGu4+@P7f%fqq?UQet zouXR&lL7(CpIcE>HWj(3uJPxiq5q3kJ5V2B@jX@`m6b<}m7_H#(UxlUCm||B9n1a~ z@xr3#f38#(a~et{2~4b>*|pr-Lm13{HU1_w3 z_4-FUT8XH|^hFx58Hy}E6Wx)cE}ErrZJCFGPGiX`Ae#)-spLy!6Y>bcmLRSq98+Z@ zGg0_wH`^v6c`g3LMmd4}bweaFnQWFC8_C&+e^NuS=jz%Re^Y9vvOk#usIYGo&&%I2 z?N=!z!sd9Aefs3^QC%Hlb|SkmxGI?*y>@~RH{X6qB++1TDD0bVQ<^o!=tZN^7}=PZ z+vy_+Lox?1=w_^wstyn;e z+kO5J*19FtP$-Rt!ArCp0DIBI9sy+b`8F?(${*9PIUQ z=iPT;$(M`p<^Q8{U(d~;2Q7|$8I~Rr7R4Y#Uwo;}`>2N>elR@x#A7m;AOjTm5JWbv z$M4lmH{B$AP`rxeW4I>&PS`kkAY{$6iFjdt)sSC0OFAfB$}% zC9!|lb(dYGt8jqgYAnC=>$86?%R_zR8{Z6DU>1$tf5eDjuZ%~rtkbu!7sEuf^FA_2 zvBctwgU=bE^AxZYNu%*noL|u2aKk@j8L!2#H$$(Uy~Tb-S|w#rL{;s!+ZV9x)OKO9 z#klW?a;r~fq|Djx9z}mU1{*AM--zybp+9)=(lWTNKS%Kj3?goKq8obLH6d0iR#m|2cmc~+8 zd+xaxW-06?A4y$>nH9hI1vYud;NA&8J0Wbj*_QImY_gK`97T*?fBkjocN`%5DE8er z=)eQQ`WtK*S+T4dKAPgJk(>W>OL!g|Q%}M`#S9D(PRDckEacrVP`J%D+sM*m++%|8 zGMKmr#c{_S8=k=oirr;^0((&S0gADCiKmex*TcRWhhYzjk>Pa=n6Ul1dG?YReEii} zzYZ&{xKgG5>bn@S>tBl>D8~tQlsq4aOw1ckU*4dDP!Xeu3ZajgeLIh+%XL&OR*vQa z(J7%&JwL=T+Q5HgS*_|5(fIzsQ%#KY%&Aa1QlU?kqLQLCGW-3>B1S{r^!#CD2sEmu zBF4@t=9wg>L}Vq#MzNeIk5ZCNKw?R%5{7^ZY12^$Z&5 zW2$VFCJO&rKYvh(RlpQ#u2=91EvglR#$G&sm=DfAR8+J?1K>%71en%br_5xIB_09l zqCiJK2!WniqF)sB{WMx5T9dE6lgVU*ymH`_SEW&Gdj2rUXdeT+{3VVqwy;I2-Vjk5 zC3D_tCvsj`VZ-R<*h*BUYN)L!kdY1QE~uUrzJjC|c5zw!$l+DJl%Q69naJydVp1&& zlSRQ8sh}|=>P;H$mZAxH0j+WT8A{cS(l80$RRmw`_U_X=bdw)_=f87>)|kp0 z;mBYax=uOjZwfSL^UTB4W^Ulcfsvl@Nvch}gn)cZ#3rO*?4Vr@H0cxM2XD+Qulq=b zJZ;i`1n1zz?PS~tK8j7@?!M=q@Z^(E;LD0J_^N5DsNxOx5Pbc!+K3Th$al!KHjarhegg!67jd){M81Wuo^% z`J85D6EqI6LsLw8mc(P}n9!X?lvP$Lz?TOlQTX+sJzvT6f_9=o7MP+ql^D(9G!wG{ z#!r}tuYDc}cinwYc>3w5u`$~lxOtt5e$+!|1`I`iUK3M>H^y4DYpl5j9+vvc*KevE zHpOU+zu1UInc0a6u<%YDB}Yn%GD6I)D9wz5yI!fFGT7`LW3MQU@yoGdpiGs@%##i8 zZn*x2aN237$Z`ZH{N$(DgmDu&f632u#~rta-~H});Yn=L$aAcBuby}a{U{#d_QLW7 z)38b3*_|YE5o*0xI zYfWlQRv#zeyaQ7t7~OKio1Rz}_W+lJFis5^w7f#jBL5sHREE35{W7wI8=YnW5lEC$cv2er>KQhDUq&*aJ1{M zMKhWsTY7LanMf18z(vK{{-9aloMFZxg)E5^$rSs)iu2u>q! zJ*!}+(-t?WKvh>jS+3<@kgBj-PC z-F(xH;g@*v{@QD=gs*?)>sVq3dr&Nj85E!~4mV;`?ep-J`}0@^X$S@+w%h(wSl@nU zY*dXsA6P^5DaPteylme63%g+%op)sh1vdik4L{Pw(9SEVxgKIwW08rhjVd{`X~6Y z$RgMz`+)tzXLs2JR^|onYjHJRe*fmIUxj5b$Z_;BM~9E&9+%1N6Xqt(2kv)Zc$Li<}sA1#|FGlg7!DI9R%8=%Tu^1>g z1?Tf!ciw@)xJ|;BFtD<&EU5$|>H0(7xj1j9hZnI#+2fBt5&nw};2+1}4re6I4RgX~ zn|?eTfBcWbz<~pW_t|HjhixqJgh7qZV_K9#Oikbe@#~4|xszsH+k)h;4 zgk!&tXrj{TO+Mif0WBn1wQMY99^(VG!>KYB6fEK-Lr|MIs?=^csCLs#Auf z2K5mF4?qe&atfSktfY7pRrAs6wu7EDY*`*D2h|T?Nef7-mOp z`}spmsnA|0QXWN2c6fkxkJ& z`jg7yLA+zM7?(Q91ZyG>N<7CRmRVGx229rCAPku)GCOCSedYDFCy{^g`9l~`W*n3` z#lgHVY0{M|MoJM;$`u?KiEajk5iK_k%LT7QjSpb!@WvDKx{8b;#0p5)K|v_=+gL0Y zzKoWl(8_8zL?@|=Dx7k~Py<6IW&y^qDNwO*Iy_oCPo-hW*c5+J;H8PsItU2|5i@mF z*2qLOllaVBoCx?b=<)FF?|wJDi7ysp$}(mFuwCQ@7=$>O!u3!2RdsjVXf1~umaVqh z5?^6_0$;og!$1bU;DOTgn`C~7weny-5LZ+HPc+V@$8Bh-re!C zoj3T15~f09<2>BVv3IPs)*9i9d+ixE-*hwiV%^k<4XtH%lgJpvdI*JR;>dG3K@p&z zR4Uac%v6bmvS=TwkicBQ3v*N|%6Q1-1Q~#w7k{Q!J}9dRcx@^tT{Ff)xQa@Vr$IKN zxT)aXci$}=ojrj8B#gI(-k2uewHr2bV>{s=XBBYf5C`OX<3V&iOdaMXf2)mHEp$W2 zuWFZw)GDQwOuPnIu2pfmqT9?M+SLCkYAJ>gMa8DxD*CgL1Q}p?RK&VIaQ}Vb@++^D zez^}G7&w5|t5Wo>$^6P_v&}XOn_wW~|FQQKU|SVixC06zh+-px zf+C77f}ny5VkZVB7$E4eJ;&}uF^^&^7{|akb{)G>kr0zsP)boGL_og#zH7~z*x&d6 zgr4V~=RSAf-!rp%V()MEo>)y9H^QW36ZaF&e0}l77i0o+^>2|`81xG2=;Nyfj>%Ep(_8;CJO zXe&V|RN4bc0fj<=SOCIS*yO5HKZvyV|067kQzW@)7XP&t@P z@(ZZ~f(^{(BJ`O+X$l90Zx31~aC>N#GYdYwC`lxYDNrF3+B7gl4uaD5Fxt?a%#OfY zo`}*yX@p2})K49X^32M?3{$-Nnp0W<;sU17G$hMp5THS&={1EhO_op=L#mdLqkgM* z0!58=1XPG`8l|sbLnZwtoH+jFF~MUuTl#g~8jQB!N(I$<@dM3(G`i&u`@?-lPPQ03eHHJRun%L!k+d@Fb)d zrpY1wO*5I?K#K(1ox(#^1*F%>JV~S|2~xq~TEwuo(Qqlg$iT%b3A8>D>LEz8gc9&7 zHfDwzjmAM{0LT8WHh2nT0<9%{Y}rIas5iY;KoKe`P)VYUH)@d-wOV@!EsZiJi@nG! zD;X#x6Z7hQab z8#i{GTW`JfWQBceoM+Ffc=fec-F-N}efMtJeFb2Uxmp1VQ|DBeKg2jaN&jSm+8OA@l-wFyP;8|wei9J0XJ~q<0$`+ z?leqLbUl=|;hzbmI0ocLjT+@Hx%3h@YUC)_4D~z^J1FEg3^e>mJZki4d?;?L6BKK& zZEU{lm0i0T@vlvx7V^IHt>{y zuJ~BadhEP&C-_@9S$15I#7w3@mVe6mr;lup+Vbx{q;yVdB4hi_05;qZPLgc52x*XFA)KD zv!RkX23FA}Hv-3ky#}Y})22>!y^lZM9gHKT8ekG^7ACVExbHscUrq52eMGk-;k0^x zn3C(KZZ5v#M8H_w3+%g(Jp8cS6U*S;X#rlOPB^}|8wwv3+qd5iJ~58RQA*fBp;m=Q z#O#@~vGW2uCD2xl8#TsJT*tt1_kQB9<)e>2a%Y_WcTCnSgStNfenO6r$qMSWKzaNd z`OVkvCit-!j0vrsJ9os=%-vE>#rDfq^l zuj8n#%P={&vb*qt3uUL4{+7}vHHk4NJ=xc2=Q4QrWc@La#6qZl<1++e0~vizt5Es% zKvHPguOyMgiM(P`pP9+;4++8z5JeH3Xk*qR;}b&E^1{Drp8sUEU^1yR+&e^O_lHq4 zj=wib0tr&_3EWJg@T4rM6iX|K@|KaBXd{-`I24$+hqXVuq&)Y`j*LIgL}{TkLLfQn zM=m@=1*05HGR3Q}IsdLeCU8@L5(rgQ^`O%9s)UhEL;zKlex;A~&b-(y}|9{>wlZWv`C1o-}Mvhp6bv5L?tqG}+Fi#~?Rh2rTpix>F z7)!tuh-EK4a6npL&*iIhEd*@#$ecz8Px5h%N=RhFs9SDUQ9>iuJL3CPn z?)hiCDU+tSmiTbFH9pp^j8*?!rOTsVxE=O;46MhGAMd7P@IT-G@N$Jvmjm{5haGwZ z98%Tux*2)>qA2Q8rKVhIj8d5`XR*WaJae$>;y>=j8*aoPWoLIG9BeU2-GWTZ=OKa3 z5{wa0*bb-(;M6PEVuKCcZoBUaM_OBB zSIO$~0x@;UR5?fX%{SkY-4)z&+N|07t~&;Wd+)W6bj5HVh<3f{)Fd-3>QvoH9!YCE zHP?95QoWU4Ho$XxHY@h-ZZflsHJ;Vc0I$z@=hiy$e@fYM;f-%8tX=!xH%f%PXCYN5}FN0Z#XIg4D_;+Br1s}n`GeRSs`skpjl`#BSh<;G`=uG%RGX&O}SM+ zVIHh3KuTUY$b>(Z0lE;e)-;$6n)7#;Cnt^}r6r8NABZVVBq8mU3E2N`YFY%~LO#mz zv9P3fm%>9$NY>LDkBG89VWQYtkYj~47D1QG5Bez!w3VMRkO+CUGKi#+&lJ!OhIs#p z35EtH)FsJ6Wh4Xw&uu7{KP>^tqg2XCWcm9qsPR~GNPlLcXdq!0Ac|-PiA0d*a~r6$ z7|6_e&>(7%Osw;{0)fwl2n-9+cjW0P%vOV;R=gZqX#K$VDWOL?QEJ>O-(+}bLKpN? zI^gj>CJ}?zQ5Gv!u2#26;j{YmhN%@^W(6Ap2xgXQVq8)7tT30xsck}>n5j~%FZf)c z9JIk}^HBm(6!2Ocmfx^3rBGitA_HlOd0{d>Wth`^RHiAP&!Z>@2ITjswJKxkHvNcw zQex4cSl}zetN>a<(FSY2SPlx+khNXte`ac`Ka&b(<{6QuXo2Tf3RNq#xcKL&M|nak zKV|Zd_g$s;k5o@ei?lB!p|Nx-jaO+kQB$O#O33v^MJ@&r3(kZkNrH|!2FCpTKm<8C zjXm7}r4nLoFo~Q7<6!w{t37{#C9P-m+`fF|VIVZ=H(r!UBt=L|BILw(19wnN$H(#x zJMM@HiawbxYg>!i-F}8c>ML-h&Bs`E{rm5~;iLC4?qIB{)>ZX2WX1IT_*lu6_na8` z```Z|MWB^h4hVsu2@@x}UOjtZ_r{Mn`e+&V%(ID;Y}C_hZ@%hox$zcpl6~r_r?`FK ztD(V)E1{${#PRjrci)NQogNV<#&qz!7aS^UVEGto;7Hlu+-o>0>XOSYb+xe2rd>Pu zpy+$Gt1XiZz8)m=wsbBa0t%6q#Tv_$X{~r zVu9FYmt6*555Ktm@UgjPkDhL=hHJUk-+0a4dvAXn3p!F9Q+LDhSS#}wBk6#Y$3XcD z-Dg8bxYN!!&CSCxqntdr2L1)uKS+DZDJP;02D??^-|Vit?#!GPtBjJYZ130aR_t_n z6^m!qmW4709N0zd=R4{%@D0Ho9Zedqg&hR{a69g}gLWrSuncOGbLXCYo_ptmx81sp z*FjxghVj1lBfxtUeU;YEMqPgbe-&SS{iQ67<552=u2>((JH6zthTo4>S6KxP!4Gjg zjyo3A=`)npYPmTfq$ed{Cxz@5haKJ&i9MzCC9+AsuPN`mZ@lpqR{dWhj-L0yZksdC zI30MU)O!lq#Tb?4Sb= z#$v3V;D87+db79)aROv?#j!^Val{n+=0iA6YXj5|7cLUdiH{*ehPZ9fZ+iDS-fgxy z9LY;ti#RFyZ*s)bGdRlW25HCR;D2MUz4pQ`8|*@v_#Jit9SW9(Za4VJI3FMRm#bA9 zdDbEWuloFa8}D&<;n=lrzWzphlyt|jRlG0x@m~yw-es00z5nxN0TMgK=cBD&#QWUk zQvNf~JOjIXy13x}cmy)}ML*5c9R( zgG5R2LM_rFl2)BQ86pbcI6#Bi9j#NDzBWQV7nkBOQ8qA%Q0Tjwy(E zmLGO7DFVDMM1DvHRFI5R(w+wP@crAd<}m2STImeNi;v@czH`6UralPo8RkTYnA4HL+iFrXw9 z5I}*G)nL$trJHWP3FY1%_0+m_@kJMxCVu~eiV+f{qv(YtY&kC7bI(1cop;4exhdB*pL;Gz^IP4z2<7XiXRo5}I`3K7Wy5AgypNCE4?ss5Cbyq4EQ$W_p+ zAfvEYpwhRPN}-zO6}xtXjCe~4H8LSD3C+P-TIsaYPb;-;+pe_zcH5QizWbgMR<}tz z270fJsxcTXU3cxZr8emM+i%~#bk4cw27OH6%w4Z2*H7i4Jw&ivJa6Tw5=E2>Y#9Nt z&@W(%O>{y|6Du$qWr)kf3ua+y*sx)xqmMpH?*D81URRoger-Y9pPYCW5NJXWThbe7 zELFyVKBF*btO-GTaO{704gn!xpYS9B81k9?n5m36sK%BvuDt&xG^mU~D~XVca1-L= zNRc88YH^B#4)%XRmDf*o{jZX=O0Zapax5XC{>%EG+9O<9$B6b9B^L8aX;6zJKT~r5 z3CQjPj|aJNEUNzpI?tL%i@=6*C;G{PKjTCt2|)4zSgxfq%Agh(__S~$KeRVV*#C@0 zV#OLLm!i3hKM)v)#Q`CS(EcbbDFPJ|x&DArnwSj&4P>z*EN5{bmqbxNfkKH(;E;la zTo7P%21qGl+ENx9wZ@h73kKw&K7|#R3L*?eo+4WF2DMoA+O<0U{ zjD5c@vrZ`%rJq#V4@66OA)})F_anfS&#VYwpI99IRteq$$TEt5zjA^DV(S zbw>2~4{rd*o3mL6t4Ar(>+tTDIQ-kfKgOlvi z-jK6(tF229-2Xrc$IpluK1F8W-Kp=jeM_w{2JPDQkP>#QdK~M_v@9%rKWSpAd-v}0 z{?Vp&o6^aroLZWT@orW17la@|DKmck74K3vm)f*xQ|i#ZL+OgkuM*u#SNZpY`r%)% zboSY2mD+8;U8(iftx69)^ib&+*iV^Q=u4R5v?~4W#1l({2M<HY=9;TZ+iu&g)V5u_5_Ytd@PS+;q~(A1`R7Up9el9VF`SH- zo_O*})dE9)`Q@k5efQsA+Pd}DrG56@2lf4W!2(}RfIj)}Crh-UO`B~>I9jVT_qVx# zT0JY<*I#{A!Vyl=UZ>#vvk z_a9KgZtl{oS+gF3=IcxT;(u4o{7wB4*4mjYz5{`B9 zx=N+#)2Eku_wFrixYJG@OF#WM&5!Z2(h2=7{cb6B>eLZsXKR`jq>@LhZ++JFJa{gw_r^ibeh<9kP+5`3Sa98v%G z6DF3fy5cIq_vq0BUl*u%X<({3P>#fkP6w1VAoB z_6-*Vt1`@rT74kDkVRFU@R885{&=QAB zu&9h)0LFxjC9T}3eL0SX2(iHkDoYCdxm+?)b}}0#kmLfA&qj=J=bU?vn}Rd?4?d`? z>weUcSRK;{TtM3N8s)HZu_|OX&Z*>iulL|Q(C={k(kfU@vM*Kyopj=fD%jha)FR+G zN(QS5ru{fgRt$0bD0gYBf-R`a;5iCog?%3Dem;Bzd~K;D6Wc+l7t@@i7WL#OhEF?({H}~+AUXid92XC-EFkt zhG-+56;Abw5L_lqoPh1X|8mbh`z(ANY>KU}N8|XM_NpVQmK6E%_JcEOr@EQ5XNXfY zu0(3E66*&#NWhD=hjesdl}nj^y%B(mgpnq;j7cz@_J8c0!8Y)pf1Zx}u!I#+tGMN{ zN-Ky8jpVSKW`Uc9`lfR>`YI6z>L#p0NdpQv_M>0mC*rrczq>kh>$p`Ktc(>db+w2f z6wA&0$fzbKHb}qpC8e*4D=)tiD_Fk57WnO4&z{HOn5|7A(*${TPtR>a-;5mXuDjtn zoLf2qJBT(BUk&V=!eV+YR(7y0lmOj8BEO>qewi@?ep+Drs;j!XbzwX4fi@&OSB!Ws z;3d}0nKes%L9DvQYOY?Lx**`H42S@0FqvW)@*)TkcQ~;gdGy+f_3H~$`HB!_uj=~6 z7hk#SQU2lBB7QiwTpw}dVQ$Se8bJ2N7<6=`IP=#T60e9=g0*U;M-^1I z!mnA7a!WbW@|%8^pLP`M!N68&CafxA$}0pb9$C05W+6Hi7rlj$>P^$0AOqpqEzGc| z?CjDYFi#kNWcBrFWJX|l#tAG*`nCc@>WeF~Dppx4DToSH2zD#K$j%fbtR9}BEyFE% zc;*J-30pB4Sdb{|{}fPNA&O`dW{Ry)MY_ZYV+o`Hh#jDjZcq&`#Rz{8k}z3kHNi$p zrX&PbB+APVLL)$;((ieKR;gyR=_k}|0CBN8jX#h85S~B6oAQ*~VEm$dQTJa^0&1}{ z7B8SG=r>x?5i3NpnSRTdFlETEh663@f`L&0FH*m-kVD#oIWHy_hBg)}+SnKeWn2~J z*I9YJh6?0>M+E`u6SnphhcZdqR@74Sv>^$MTjDrLCcSv7*GXM1+amv-X(^- z(0zy``2IPsMgiiT+N;RkF#7m ztae@=PN&TduHvSH;V(Y_(zV;JojU&9G`O^9A>ZNzJ)(O`WoQ7 z(Z-u>jGw<{#De$X?)y|JdtrnmlLj9JlT| z>xyrW=2%tFT>+GN@4ffB#~*(JW4>Q;lus9N&`ihPR_`3^a<_6{w9WVME3g4}DBp-< za#vq6MpV9~{XuXEt}SbcanxxK&qO#n3obXc&BS+#|dFxdUUh)mE1yo(?$Z zKq)tO!aRrftegLNGkkcg=TL<9MtU18oSj0*Nrp1-U^Vec&#>=n~Y$EVoswR_>oS z{}Z-h3|+DFS~}TuPJ4s zGx*wgZ(x}7`&{?_+wbD2*DLWpQXB6!(?#^ZpL|kwLGb8TJ{^1a>g|RM9fBR!o4b=w zI!SgIkkHD+c+RZZ;t%8o?4W4cw3)-imFv{8qv-h^z9ip#^9^^w`4@n#hT9#E!~cQ% zuKr4JG|vM1B4M&ocG8T*&K&y7>v2pExA_*EyP0U4hj8o^CZlll)tcg5y(@M>&|jv# zf6#`xIC<1vv*Tj)QA{`Ghg#ChkRkDV0XxE*nv?Qtj*kM?CL_}ek@hlw~6 z>jC!^j(Fk@iW6b~mRn#)In0}lcimTBg&&P8`?$LG>Plbl*%P}_P!>Hhy9SQh8iC`b zE_dU`kCUC~r=NbB+XTK2s|&PQf|UPHynnC*(@^}6y-Flkd+}W2FGV){K#tU>aT!4s z`3%jBG$bR`s+nWF6=LHz^R*usRK;nT3e27)p^U};UwHoz!%zX4frT<8#bf!=mBQ~& zU_+w1dNe+foHALZB?JMTm|-v@vzR1;i=TCLy-v$8BLTO z@uc8DC4#iic$ig8P{Bmn8wC+Au^GH>hza#hkruoN)#7z|996Qz=#L;WS?dEQHj%iJ z9nk_WCh8{(ffm3nG_jTh6JRi0q!slCDkh3H1a6^XzYpaXh`%VA@8mv{q5PeT0dWbx zLZ;=j5udqp&w&q$shFTR@Icu*OGnQ!L|II+XjnPo-g*P)s=^T#2Z!r5TMy2)&K0Lp zT9qL3W#k#Jk6{2iTpypvTQ@25*o#@1DvrRX3yX~=u!wMB_--Ry}304;LTa=qyH6`{~i%9hu`VAciV#Sl| z+<6yw?6EyuixwOBdW&+x*H`dh6^@*tTXWf(uI_SmWm`4to`L-wtL)^(2`geS5w{<< zet!hVfX}#Z$A9Y~)D0X$H3d?Jy3f698B z{i`6TX&JoKw%oX-IQ?sbveOBDEMt_#)X(q<@&1SJyI0ZfV@8k0(FF5k5@7AdjpZ1$ zJ@?pCwsPAYj=5*Y0e^p-!%E*Mr<{BWd~USHi_k~d#{7hw$NM8*M1R9Q(*SL-5+;gj zV^U$W&9`s|;Cb*n;Ez7?sJv`#wRJ1^_tVbs?Eq`!A%gZm1CPSX#N$ss;Xc7u{W&v)4JwUwn~`rM0z2m z4GD-@4U~i6`y7*%pTAL`redBvRsa40=~YzLPwps7NPV`D@l>qf+$G!v;AX#G_o|#ba_Mu%i1aBOwwjUMg6D;DZ94|QMexQuF|N37^YHtHuu#K&9(n7pu};shlX zE8^{va~RZ1q(TIv$@L2=V70pZz2;HMhbt4jB)^TGq?$;%g_ErK6aD2y5E(*U=%Xq@ zuUuq;!9_LtiYU(qu%y5q%3r|-ZEebk$V|WQ|0*q~W6nZBtAGWDOT+;j)kK<{qMGL~ zB{39)D9}rZ+9ZWbD8I2MB9vOMUW|-?*l-n}zfvEG0AmWV-9NtEseaNFY$(_7`K{h9 z4)QHwf{Jw!C6Y4qKVxH@utw@AM`8Y%TzLOT#rT0DK#|v%TF68KgoXM~nBTz|=i2M8 z!y=zaZUuZiY`o4o-p2x+=>Cp(k6-aOTgE^f^UcGe8tQAgVM}~u{+l>FUVgddArT5h zJ)Kfdbp!AL{@?$8N*00e2;3by?BG_w2h|DRPLT00KjLq@?Y4Nw*ib&glYI*uebc4O zfo_vcH*rnYY3wHB!}gi*i!d4suIjCpp6G`;9P`gjn0sx9jd!N*zJ)DiDcJidlO>e|{^G9CuZ zJd7221nPkM?uSF}k&s!-tx&g~Yl!!~mUxF~xW=0J5WlROi81|{v11*5xbcJi{CNvp zLwxu@>#Vb2TN}3=CQqz|zW@FQcNd&FzXnI(HE|SBixw>$b|m9{uCa_w`TjQu$0bdh zHrX}8u~7W5-3eoJ{RvV7d3;s3qr1Bac)y}^a*i1{*?1G&wt-jWNEhgGE4J zfAOWOfkj(%a7`!248L%K0ux35gd_5X4I5&y+~4r=dk?|!BGti~fl@9mdij#yA`ICb70@0rKpIIE3Y>ZBCb(YsOKjB+>Jbb}iP zUkqH7)V%oyII?Pc9GkV4EW-Qp%P-x#@4W4P#7F)H7)zgb{yA{}GQr@o)G&>F-uuZ8adVs*|X(Xqqi{$uns1Z z=*xu1+s>Q6z`cfdV>;cQ^ur`qALIP>Hdr6!YKd2&ns{d(El#6bV?ty;j(2(!M~UGe zC8^W38m<}M<$A$S-;lwdx=ql(Pl5jq_R(UPiHVhGo_@;RbmKqWx=lGjaRMeNI>BC) z8~-_}ckQ*;qYYlh@np5|UeMfiXy4wgh4=sYSQz&O>W_}pCu70nnwaQ1p?7bTVHfm^ zTH-rpDDH)O``_yZ<9(U$oa?RE+-=i#8|;D?;rlT4Z$)IhPcx6S8p=YtX6pSz7$pb{*lPYEjC7p zBT!N>=YRVBgZC&KDS1QE68LfeU)B8^ifiSUCiQ%PNy_*m*r)@$wSpm()H6tbDN-1v zk4efMqho{erFgT1hG#du#$dM#^YYG&zG5mu|21N7bEv2QuPPCx$13xN zWRYe_GXfuV&pz)wHw{j*=`^Zax9;MEEbo7w+0-J-l?IRC+|+@O4#YsL0S1c)VnEaz z6#99V)A4U%pmsBy!~OWZ`$#^n82>pM=4}JQ-Mb*kMO^I0j<;NXzia zC!e^xu?lH8Y?BF%(jR!IUPcD#%VSWm>#n=Iv(Gt8Xt~1Z8k`HtT^%@TO1jzVSS7Lo z1~0#2RSzB5F|5*H6?gCCcavu!(2m`qr+>g_La`W&KwbIHfxz({ch|ZfQ zVW5RAx)=m6@7lC!?T$I-SbY4(t_^=5{Rn>`1MV9jPUE)Pa!c2%cQ5fDV)vPKs^787 z{$+g3z6z^jI5G84Oe*mOh%3F=|N36nSNwc5!eEeoJ_h3hGtU|2$|t^T@FRcc&YfU6 zUf8e$=B4LfaF0Lnq?_?8>KcXNM`-S#_!+CZ>R=GQ@+zz1OxaW9MXnYGl{P%0b3QuN zd+Ftu-4ygSo@u(`iVb9tIRh(GW}@wut-TCZ@2~0hz(@9OM|Q&t=5oF`R%@)mGfhlC=G~>Yu02LKT;fNKmTT81QqI)GT~(-xKGk z^5t?6CUd4>m0rDim_&d8PO|)gcB+G2CR=Q=nLF(8BeCjd+t|>kq}h1!`4EHVTW`Bv zR;96SSFBght%}_}Top78_eU)Zmg&o-OP2#M$qn z4x!pInK1`1Q`oBRRz#oQ1d~rEpK`KWujzU+M#4T__dH%|`(D!*FTC^Jl~-Nv+Hcpv zz4_J~Sn1ba&RnH4K;Ex4m*vg@top+X5GR|C!LA?f(zx~3es0W|v2YaGq0IT@Z@>?H z^ifQ_43v)m3o(l2^MLJ*t?ihUK%3&ll(Kf%X(!ZMJ>P>-MW|+|lCH`N(;=h{{6a9&dOl?M}8My|y)NWu2U_i?CC;Y_Y zPrF1+1wjcmCKI}lw1mc%qnvX60>vc`G0V9~Q$;X=L$RfOuuFH`QvGg#o|oA zh^gWd7?Or2j6qACP=qRyst*XC2zl~m83KGzc*Mq(u#hS|SQA=LmQ{JeYSJYZI4)MN z8Il=gv}?H66#z66h5lE${UHhnV^cLgSCFB&^lDrd0~eAe1F#@pVIr2KfQM>IS&`NN zdMkgBYdDSH!Mn!wT&4fRWGU~mcnRltmv1B75s9OUi( zg*Db}=$6BW$>f`o2<2{2?znj9p$FaDZ@n!=(%r;0F=kvKgK-%?UUQ}Xo_p>gej*;gN6&#+(am=uj%%02xM+7QJmPMO)iLJfN8tydXABlt z)yB?m8T;aC#i3c_wbybuQpjzC)%e4Q4u`|)i{Q|B20nb&#rUcgR`mZ%b`Hc!*ZR^b z%PWivQF#9TD_A7-=G$*$=lmp@>|pu1OMvyvMJ)?3M&()l_3PJ{@js61!7hMit`2NV z`x6%(ef!E`G8kHJxRE$$Zilh4$N((-{=vkLG3mky zrU~Coke%3E?asjWzfCr22}j!9+<54D9PgW4{BqRM-LRnUU@0Uxfp7td#%Z`-v!bEg63=gl~j-bLYvsQTy%NV;p{n+jO(d zke9auv4{0CX6!iGNkJKOa?bK{Vy6cB%kRK*%)I$#o4aFEJ` z7&dIgP`nS`CXhaZvM1IuEPr{*%X&`veo3iVfVd+o71cJJ(q9W!^hm*Hz;IqW2Q=BZ~f zQL-u&LOAU2jraQ@*u}HO7F)OzPd?FYg%9_6C}GS$M;6ZA1iz8%G+oCXkM~k8u(Lpa zf77Q;7iZ&7J@pjoYo2`EmmMsu|KH)99PhuIeGeM|D5i?bvZg*;$o-5KP<79;?zrfv`uMDB>j$Gr{0{PJk5=Pap;|~fz5u!>HEqtE29BWiQPR41052&PICW#DIej_!qltu-YLljOS z__G_zXH1^L{r&IhQu3mHlVda%7nFF_5gToYFq2*6jTGhOSN%$1ba}3FdA&h7(gZ}~ zgiOF57oNYsq`$vrvx!JrHLA)|kutQkfuKK(Nx+llq>czf`{nvG;ejWJ%+WqaEQ9aF zK51nU6fUU@Ya@aAZenzKoLZe@GJ)yaMNkwH!kDUA()SOOWO2a~WDSEj+)POG#`b{} z41{R;ec8+~6CqS~sR#_(j4IsF1X$&g%4{JRe`bx52rNaAKMN^R(+06v$&;K<7A7bP z27t?qDc7eG5|J56F;k?Ld>M1zkT`U!{dFdsNFD3G`}R9bOstB5LQl8FmYd6J0v=&Sef+Tb;fGia zgcpf!-Mh*5XAX#YJQF{7e}q-ucVf4~91KF*VkN}iu*I4yKI-8^ELWJ+fSmte<<7Nz z`^qYsZLl)w5DfU&#p;2Vc&lD9X|4} zd~muuasH3m%BVvc`w{Kjp@@8Bh3{`@~&mjkYsf(HH>@(- zu;qrbqKuC4nyuH=HEOgrCQ+ul7oUIL{p-O8(dTQpojUI1_T6hAOsX`%pb&%h(PP~m zx832!P59O|#7-AFDBBEMftSTfFZS4fW6Lbgp_LW;8@1R_90hWKyBt33&!0EfeTLmK z&%(Lj7b89w$8?8caC*Q22Us)ta>W{zl+S9!9VsQ(r_a^yKd=4A{f0?_));giiS6R> z10vgdwN~t^Enko*NP}@j)t-CriK75!xrP{Y{x)Y8UTC+&fO|KzLu1F6;0bWJ{WJ#J zIBOR5@CPO`_LP3#s9__qnQhHgSvc<$1KaP!!QPRa#MpjYc@gCc*tl_HgP$6{XR^Tab?uO2OaEo z#|t@klyIf_SJ=t&;D86*m~rDU>Dkboi0$w@wC^A*ovdN4t}Sn^Q_KsUJuH!_YETp9 z6(vN;nkqg~2~D~4eCRFyfiI?zr0@fmN?@9yK%zjetL*txW`mNe+$cThFDQr<<)!h8 zGyN%jj?Z&xZIOoo@00f4CV5^`d1*QjgoI*Knl!Cs5u&N6P*Maku!22lZxDq?tfXSr$l|!HqO8;kElVmQ#TL^_7pbK^tS^FC`WVpZcqCF6r7>TWSWc7j zYdY4y0P(MlI_?8BxrrT;}CTqpjyfry8nCM@g0fC5#vQQzk7oTI*{dC#g+Zf~UR-11rE75t}566C7ojzpvFf96*AmjKBJM7?^ zt-CHhaM!~4Xr}uNxG%8C1U~BUQ5ZWZTDO%ka(yh?pxrzo>hsS&$Czw1j_#ZdTkE^# z8#Kp9?yY4tITrwN7X-&_oCx6&Rorp0&wl&3diV&=z^Y`hXMnFB1^O#LvcspKlw~}0 zj)T0(Qzv7w%q$2f$;8L%Ypm{?Hf`owVo?;wkt?E%{FrSOE-m3Z4;>eCCpC8!e~BFv z6EM!@*qI;x`7Y1`S8DMCT?xKv!r$5l_Dav2Hq)WOP?8m z_85opDR*ITA=6sefw2knZP0uJarkW$CCa5&en6j&_tPIHOvFy`&&5glY}6-r$ghFL zaT{;8v1`8G`ffGUA)P>Ta-uw>5{ZQ)bM)b%Xx9aJ-&+k61MS+j)1BlX)%#p|lp3Xo zSzjtR6Gt75f|F*B?VGH-4kkKU$YMP<)o<9%KW*wXaX9|NBrJeKKj4D0=IgB|w(`iQ z>#w^3PTrrv&V=>ln5C64xxvmvejZi!6DHVJfo&~ON2`HXI$BaVei!%-6G@-rJ&y0` z8*aFvTc?S>vj#E93iVf7(KTz<6q6@gVpmL4IeOT3#GC#(I2QLS^i%lX zbCV}e!6L8a#Nqc=xX-u%aukll8b5v<7KyEfzPAx#JG}svfVSjnzuCm-z! zHja}I+5PVwfFJ1-be7^_9+}^^YipyTz70GKBoIIvO*-5B`=}E39~xj_S>JZi{b)Icnp*3 zTLWd;s#R;(3fp2g^>nw}ZoA4rQU|kK zIaR_=gDDP9oUx_*WAXLzkAM8b?SNJ6JU;2CpRj{s!2RygM<2n_TAZNhjTatnxwW34 z@m#DXc?ByyF2D2=plZ0AZ@C#06m4Y%7is%qa_6O&UO{_sMc4}NP#n?Jsnbp}IOj^R zWpUI}O`LoC_FM0|e*JF2%i2`fu%moLUv1SjFn~tSf~whg!5WI~zkfgT?`Xb-Zm)gz zai^blI(A9acR&90GtO1L0LOd{#h|$fCW(5=w&>;S)sYqZoXMYqkN-US=uxbY8anh- ztl)2lqsPv3Yp%IE$`soH84XfAz1Hb$Ck*Jlo)-{YKc% z-w=H}p6RO0UPO>(iQlj*VGpb<;>*V}SZ%c3_S<1c$EFY}nDi4!NF zEv~`+h}BaYHAla`Kwgq4ggZI7;)my=(l^PWSlPA5o_nIdtjO^)2Fwd(M-$JCedytb zkzof?=(`(fg@uFL)hy2Fk*%&psGT`xY=JY>dTcZ{}ek%oB->=%j`p72~VJY$n) zau5ePgQfH-kFdkml+Mx?D{q#wjKr#+cG%$Fn+(R}Ij>^;)dXa~0?ueOwo)n)U{=QD z_b*srtkIByqCg<I6jWIt8wnPx(b)axt96ii$Z)F*Y1pUhEd7L5*2vRIxlJ!z84!;Sl@!C7(faS%gvx z5BEd6mGH70ZyCWwokbnYdwWxq*A#5`igW!bgHa|FmQ3ozK+0)-ER|zvqcph+H>gp? zm}@3|`K3H46NwqJ`&z+T;y?hRqGZckTof1Kq67ST-x2OZ=!byu0bXP&2N)+}Beqyk zp~eK9;!J;_h$4b4vkS3Qfbn>Eprc#{5~VscXBfsYn5=|8BrDh?I3);VQ5aIjlVkIo zXAq#Ze?k$Gl*O|6@)X;}i^(Hnwpg)wE~%a5`Rgk;MS9)2hJ+v>qI{+-NMlJR;tgj~ zs;mE$%j8m~)iII@i6#wTG&wYCw^X9vo>x}Ssc<1T*^Chaoo4l#(!}!9AKHAZzJ~im zYfTCe7T$kMFM|)z9Gld_SWb7#nkF?tJA!EFOOg6+mk_l~8SGx#IK3*OxCUcQMaLdFITS zheblaL+2l0f+p-T;i_=H&(Ie}IIdOrtez}ZJ*nZjXJCa8niJM3G(=(%P+gaz53eAIGTv>+ZW*-LnpqVQ2O>l>>T2v zGy1)w|40kLLfixF2XkQqcNOqxCbos@Cp8+#6SK}ZSuhv7R>;GnyI6m9(dT%7h5lkn zBsNV8Q3f2}hKVNLSM(Rc-pym1ILW|wS=J%-YWv$g4SrM>>w4DgnG&-;+0W_Eg_BfV zFvrsicxr>#51BDgzXl$CMj2c{$Q@Oz7ap}%kG>R976xk9JJakIpua^E7>PqLn*k7# zl+Mygd3^2qyE{nHgvl~KlS!<}P|A2!+Jqybe0f46dW>n&EI6%0Ccl5$4xk8vnP4bd z9taejT@7ckB4f@{h{Xn2hFFhX@^^ueL_;zSiGcwgS&$0mh6E&8=}5_7IKu=9iTDI2 zJeIdufZ?K0!{u#f&_Eo@=Jlf;Pzj3m#AaCrluWs_N^)ZvlOkpOltV3r_m5zDReGv| zQ@G-S?Vhxp!yxJNm;9&$L&XFUNeBieFiRS9ut4O&8#5$Zn;R`-qf2B`1jAqS{y|Y@ z71Mbz{iYPy0iY2Mo*F; z$1C2u@g+5Fm_Qj4UVf7I{5ix5QV9u4U#RpxQ%djwt%E$K`7k0zc|u)yv1wG5et~C5 z$^s_@SJKb&2kQf0NO<0I%?SmNL4pwGdO%rC`777I-I!i)6yfbreLUjnTudZzB>^WW zj_QsH3Y@2@#5O?B$3A;6-e%cSe16Y;x z1GaT@fVy+Xju;%`*d??-hVh7Wknlc^*Wm}=&#*eD3s&X#>e&;kGyJLu)rMnO{6J9Y^S8S*Ixl5;T#s)tor_2o>@m9W~p;hJlr9-6xKo3DqJPwT^P1kQBDAjbmj zW!D@(emp+xo{25X-?{ahHNzRMcVUIn3Q)-hxep+wMNjaf^Memywa*lH9JbgVu>S#= ztZ0BUU8llv9FBN;{4sIn)mtVgT7wA^M2KqRfR`VMFTmsud+7~uP`52s=ExCGAnA(< zkXK%QNsb!YzI}Vwv&V5*HKJ#enlV!*Pr~kpfo|aAkGdv}*OlEPoKUfQjtXfj&-mqW zTO8;%$GM(I!T}#&(%yLebxe%hB2EI2#Lm&qS?tcAG0rN>YtWO2Lh?z``Xmynsr)^u0)xeEQWgS^0q$#T(c><^%g z9)9Q{nKa`^_};yG33fQnLcQv$t7KIfce-#V19u6r?z9K%&l+ErUcgS5TW`JJk2xnzV|2nZ?ErYARAjV-6h0!M~K2IXga z&=j#T46ZW5puZ{$yde_h8e>k{gIb_N`vi3O@^iEyC$WiIUKm2HW%@I?5+>I~w(whC zNjR*DAF%*i!un4PNp$APs8P$2^?xa!(IZpTA3&Dy3_hx)KcKwWJWp#_W4N_x zj6j9@4>D4#!Q}0%q~EiIC{@9i<>K$3nL=55d+7a7cx>@piZ29o6yQOtCWvK7h56#Ab&GpVSmcFU!gDF+~>D3rt_jUxUSR9F9^!&uVt2rM9iDLEt^F&2!YrG@eb zA=Cr3jfJXw{}5$Gjm7>K?!LToL_aiE^ZOTCFPNI^iuT4-P7x#sL0->6XzUh2OfH3{ z>P;0AN*P6jw$1t*>k*9MeoX15{J`LUZWl#Y&_hXHP?a)8eu^#EU&)xL-}L5pv6&Nl zlhJ847{1){>h)W{iuz50mutobbs=E{c?@)T#0hCt_p#RlV$#!ot6LRV&=ayDku+Lf zMwW$UOH(1E_JoG>h^U{0dHFp~BzR##ZAf@Ckr%_G4MGvfB3u{(yt4j?jwz`eDFk5* zzATz4yQZ8hSU8(r*l`IJjkH!yR(|EzYY-l?AtAd9zQnOnUw-wKysL6CpuT@84Xva< z#7Q`Mx7TsKas2XRch=v}bo+JLSKdEjxvHywZ$VIN35$b;lHl(L%ZFZte@P5*sKQWv z2CQ0t(2^ynib4WVblW9|njIdS-qy600rpni|GbtUG$fR_!ZoL$eg^*x)QoR-Wd8K~ zhZzvH#uV!n1@`YB0ym^(D)c{#dl%~q}58p4NM&tLgLpCIG zrAaPoBrQ!Z{QeT+pP5$v>iWMIqPJX9&jJ_u9_0aQq1hR&$>lHynqvJ28L8EgE@=o) zSss~Y#Z<4q%J&acNdDZ0G-;e%dQm&E3~4(m2O&uL@(Pj96U1crSrDXhDV^dMM?dM) zM;ryGHAy|uxM)wY{WC6-ku8*;T4D*AOGBojO8ull@bzK()AtW~H&rTcfH2^K{tQBs zAb?0`#nmDZ9!xdMOq((J*w6;Hyv9);Ya=!mGD)Mxii;_e7h)3}0GcR*Ew8a&?Lifb zU{od}FMlX|s*5ZPp+S`@mlW$<;ptV`^_p|VQpZ#oHCX6a_}uf(!&#)0T~|y{bnD(- z&hbp`HOZ-FrpJsK?QZRNEBrsa=bAKWgvpAV9M36b;0l@r^ZvlZ!S8N4tW1zqOQ}#2 zp%S^G`{}2jfwQso@o~I|ID9jG+(AL#2{W*QeV5Li@R9vuR8?w)6Mqd{S;N&Bb>*y2 z{KP>|EOkka54!XAJ8@*xAh-8EdqYo8J=@b1(H0AUX*Y^UOYz94Z^zSl92~z5`q+*9 z=4;s|{5w`$a2shIIN9T*1$U8fx5(z3Z6U|vw7~hRwvC+iG8{+#oQM_f%VJef=iPV3 z?gC7_K)+Q_+Nb!DoptmLj^Nn~{t%8ns+()tbUh3vr^wMfJo|O4t+sMuf&y~nj^qbK z6l5N3dj}4^&OQ5F-mLD%n{Se>!6J_tB7{C4#-W{t4;z8W zq)*_~d!(EG<8)ayv@BM1(FyemSarrN^lMOTJDo=`dzwofg`rlu)3|boRfM1+P5xN*6hFE{x~1^ zBxT?=|Ai30BPZef-0|Oz$8lzVbNlbVKeiXIid_*8yMgfcG8yN}9(&AD?r0oS#-p*U zzoaVh8))qydk`ilZt2%goE5h3umg5ma0f*Vd6_u}{yyjwa1|T@@YFL;%h3R8 zi}VIiaX`=A9h0zXYo1ILHNcTzJjP5yu@XIV5o~#l%deG$rT4!*iM{w373$v_)$|#m z<*U-XB>ICaB-nnE&l}RrF!do{z5cu~)s^3D2!eyAN+t0&nysEG(rOCA?(2gL@O@`T8VrH-kfz*B^V%@Luz2DbLlF?T_~5f!+iR!xyCy{D579VA#H z6v+cDpK3Ok@njtpz@mQNm7@)0EEgi;ieSrYtk){vKRj_I{USWQnBWwKbP;TMjbnY1 zQ0`ITH!2mjDE&fhEEapsA?xk3yvFI(6I7*NI&8Egr7mMJB|s#}j4ah`G$F>rQ5Ky5 zAipD!BAd2dkbdTLM|+7#ZG)2>%{l3r=I>d7Q(G4zE8S#?Ft9! zIGz~m4tUJ$5AZ2D01Mt;eDNjMVAWOJJ$K!OqkYwPgGk1u;`86s7aG^}GnSGd()%Ko zOHkww4`vnSIW&W-iovOj33FQvqq3Jl5(lRZtTJ z8^VjoB}Fp*9aOB9#aJViLZa4~inO4HnDCpF)<59d&XL;e@tT2Hvi@f^VG>QA4YZ17 z33$(7`6~4bf)@rYOe}RwDL%QH-~_~E5o~#lOvt5Z>3&5{l?CiLZjT+9wwPs zPev1b$RSZ{Oc`Z}Jxk#92^(7&o#8c3uSUVxs5JRkynmQo zq>>0!F92BYNT9`JMg7*wAXc%-HPb^KAS(OyB%>D+o)$&Tny;uo(V@xYn&HrVA^;-= z508|PFD6uPKy~^li*hS(apMHVa2%y`E(S5!L2*!5aTG17BJ62 z-uQ9jWwK`hwsG?~p3UKhf&KzIaAyc~%)v+G_i+@^Ip?1(1H@ynqu}VHj|uwuz!G!h z>Tt}{G9%L!7Je<2LESyQ&8OJ1Da6Wc^0F?$6?G-?|XchLTpDPgPBz+2YnoNiPj9+kO=;SF= z(N>dicI*$>y~1{yA`?riV=`$=9M#nWAO4#*YZ_E>m&|UvVS)n3gl*EYrR;Hi z)=T{X%6sL09Q(y{Ub`Hyzw3_4!p3ViaW~)E&pio$BqZ#Gof$_QaX9#5r3ina{931+ zC|u~C#}QB0UVRm|UC(z{VS<8wAoPeQ=SE`(#kH{SQ=ENEr-MgyKT@2$`I5OoAAaQe z-G(EcK*uwc`(lR-ZJsb;f;;M{W3anq7It$q#}1o^U=p8YG);^NCZ_ErX$7bX8Y=lJ zc=8)kc$7T~sjmEFNa-aVSruz}ughp;kX1py7?E858vS|sRXi+NYW?7qc0m6^Y(rWr zTkJm60d0jgTN3?o02S{)U+E%7#HX#G098SFMg0UKp!`w=8IPju`roU6-lMv6z;soL z%k{HNXixlS&{2e5g_b~%lfqXlKZ$ZVgj5RU`axwtCh_;@+h3(nzoZh=OjT9S|G!3m z(9A>@mQ+6rVB;YjLpZ7X1EGp3jM6___#wx#0Dint|0yk_y3=L{%S4gm6b2OUd&&* z{=ca5XYKE6C$+&ZeutRK`nePQowwd__hJHqK1aB4Y^zpVxDD2CfyINh;ZS=Tc3*!X z&bfI+6yL*-IPwU*BXSX2+Wtw?B*~@ke=G$8B6y#FnSMd#eMza4 zYsQX@RyjucSZz{u!UY9TV-40Hm^4zW-I7zmk^Ra$y1hK~>UE9N>YL^yQ(lekxK% z5$`KR(%F4Z9gtbE{KQqEKS5Q}uP_muYSy%ZeL*N_arH+=Ns*OBM7-GbPgPM2cv%@N zBmPo2paQ6>{DRj@rShk;w7T^wDsWM;%pu4NL>&Jqkk>#MK^0P>AMpRh_Yaw%s95)#;CG?_&Q*cqN(SgWf`-j3fm_{hxh~2@0%;=f_%ppggL3H@7BMPDrx^mXOHm z{|gLiuECKnbU3x%`s=#~?z>-hONb@t^dDgL^&4-z?!KEi!Og%&;@LP~m1kcrfFoZ{ zPVnVtE>`%{=~assEp&on=T0a#Kd3qP0anvrdF7S(SUnRH7rUqr3Naw^P2o(e`PhmV9gbf(b-@Sm3&&3@SBFmdd z9|=6B=kGE>;jqgE6BOIxh$kG&!=hbx|8oXNfU z?pto;*WY00OMM(C)ye(ij5FlST$)3l4ZC573mv|boqo_<>7_3GKn9n_Wmg%(y5^!G?6C|-E(dF+x*y>(brU(^PQ zAR$NzGDsuc64EdV(kUq22nf>MLrMxrN}~uUDLM4eAp+8!LrM-Y3{2hmec%1=bDx=K z{y1}!|V?FpQ&+mL=^_|KW3G@}Cha^nio+2VY8WKM>*m zP_t%IMgSz~NI3|3_lN$j;ok~yf6r}GeP9qV$BN3iYXt6#xa7_F6!@D=Pb za^vAkrp()rRehHdh_D9In6||Gw?3Z@_4Q=?SK43ieK^?3zy0Dr1I<-0yeWKP(6fCA zru49j_j^a3@mn*C`%vr=!L#yLnWC$pkA{Dy!Jwx+8i{>ZEUL?Q#gDa|>1wPyfHCGF zuT+aK60uLw%MNe9SU}6PeTYn4(GR}n)KFdZM36q9)Vj!><}}2~oZXF?D}O`hOV?nh z5@FhBXP09IQ7A>&EciNH?@9{QV&0*sPgBzn8`Yqn=R1Ah#9}rS9jj4^7RyMz!7tUk zSq$2-HW~ah1?&o2+egMB%C`Ju#*Yu5=kelDT_RyvH03I;{&oZnp5ibP>{GRk;}tol zehH+2U@im-zSI$6{fWoE3CdQ{6&l28*uHXq8Cbb6jq_$~Cs@9MKP(j**2gib=^E=- zx6(FgX-x{M$BxwPZ$K}5o?a_g&L%wI2K{A-&5AVWDW0Ny42u)^Cs1zS7o~;sczUPU zo<}%VPL1v#3l~M421-2P+K5J#6DAXr=L|_hQG~ULcaI}qyA0(=Wi)ZqL^OMml17ED zJ!3s*f30#$(>cmVP=;5>LHMM2iS8;R{-sM5DO&Th<=8ucp4tl(4D6?C_7kNQge7kL z$^PzpjG!Kz;gHYdMg6B+8xwWZ;YC#**H7#ps)IP8EV3REEy@}LKc#b;ho}_M_yh8^hluhimv;X_oR||+yo}FRmOM0~@Lv?8p zn(+ifjm||a84Cy{WJR{wVBJZv_t?+x{a6-3tx8Dad1~eIQ>3+-A5!q@Jb?5 z@8NSgW9;|uZ-{1!XKhSxvl&FUOg<3&M5<;2i7)l02p5-(r16^|yt~C6yS+K*#{%XYnDV&@+lr%$K(yd(M}! zH0*#lig{x-fO=u-ExySbIUGMVh11t}lTusdC9Zmzf+BHG;tAmnn<)GY%@x1J^}>MpHg}yS z2QGAUJ8B21UNQ?KPHEj1b<)Xy?)SkyAl+TAF5zN^f;Y;r zFAo8WFn>v#a+n@^wc)m4xhdeFgNw)M%VP#ITCH6=peb?`G!KZUI+cFqC0+NomqY=I zLMECh!XT#4NK??@Hl@4{VqSnEoKXz@>$VlY%9xhCd=MaPX{blL3&Y^ z(^Q9o4)w1GO#Tx<{ zN`sjP%x=EVKa%EnqeQCt3600gex*@7;LycOpOvqBXawTUcn1CT<=MwC3vd4Lw9=Yv z8r=JA{I~T7`TW1vM!CH9CciJV4ATH7%1aNm48nIe-?};cMnu=JzZ6vdKjUOhNDCVM zzN!pR4F=uEsrv3do5uL%Z~a2dn{I=Z=f9tY$KsvQSRAVs`^e8<6L->FqMcaQkqiJ|!t{5|cVPO&_6O8{ovg2YBT8;{R<;c=lZ#3Y}zTUuke#O8$AmfG+9llFEN( z;-dE6)SP~hqVcf_l`T$@lqJ^1&wV9Rg72oz8&>_6!vG(rTluL+;9ofFtY1qCXeq*INQ^6n@^?)-PJ89#bEdfS2tCJ|IA*DAox+YWsg z9QsPAv9SasOC&X^fUfSiP66N!!RnUEnH?tj=PHRiCXWdmBE zOUccbaNagD^WtI(>mPkky*W%gu4sJYHK}-6y;5>Z)X?~q#;9H^LXkzsmn&3%gd(wU zixc^cEW$2i8@FJL2)(>k50LdILd8Q1-(N;rQh4U~Ex{Q+RI{^Uj|9O5(&x++K*oS| zzl!SZjgZN%wff;}-=m&vK}7q*Uuic$r`U@m=57$rcNdnJzcSZ`=agQB;JYu@?fuIriVbQz(69LeV_*x>*eq;VvZVeSM+3=*9*wk4-VbGmlT@WA9gbVjX6*82mLc~6 zzzd7Z5|+RfS+JJ|T`PhgO*rkI=Jh}O{EF+cKEoEkBtD1%>*Tkb_9eAOM-3NGQJ8#r z#cG>Fx?)d~qPgq_wp{yJ(ML?!q7IB+Q$TOBb@u)i7&|Sr6y0KvHGJ?CUjwD8R1`lO zNy1ZSqz1HdZ(BZ%BxdnVRTQIglVQXaHT92cfY4qwcK>1F<|;uI#0O!5WJkLhIuiHV z6c^iO?#Zza5EI4+`p!J}%v8Lea!Pp`oF?~3j(2{Df;2;gl@|T%rD*V#37Dh6-#YpM z>C+&Kg$sm>p8u!URmoMt|>}0c4PM68uS-GEeyZj&nA1zgcl|C^ixeRxy#(6 zpa|0eG{E_@<_W2iLhS!^{z@Bh5{%Q;QCQ)^=PY6e?R10f#2|t@>D1}rLZiIr`uyv! zcvU*-wjN&+y-TJlGVMNmSYxtz4>5Cn7OVM=)WF2R@1vCgddD6CYRA5vQZ>8y{z##u zS{PUA?DhAT4ncV@7#?vGM3gy$e%tbXpfucs%9Fx*#cM$t*vsix3+V^MHJ?k_aB#$4 zO&!|*-f~s{YIL7YIsX7siOL+KGCD24%do?MuMEK>l7w2N8%2=K>#%e?MDmhvGoR`@vOKdGn$4tCxPD^N>#B;s3t1&E~= z*@o<3Dmp7qfdZ83>6|kB`=5BhGPL5HukhbRxH}htl4j3<{dA|nc6h} z`(mc2AuTW7%sah2uNY&^e37kZu2S)%I<4Ck+l67O2WP~s|WPre?B zu?t-Qjl*ZPe^l#htn=k(}hZhT%rZ!O#et0;(x<2xE zyctJog05F~O6wP~g25<0Wu6FZ9q8x3VnyDG-ur|B`b-abme#xmX3Rb;k-AicZKQO! zAEpH#{I!*a?@JD+Vs#eW{!R3YzP1SKeY}!UuI*23mvs{w%H$FG_%g@R|3T;*3BNJn zMs3>o*@uY4P2x%8Uj%mjbd1~T`S$hE-+W4_S!oMOdR|l-@~pkImBJ%7mCwNrF=7)& z6M>!*Zvz5ef!*C7yPE4D*>%GAPrz$kh(Oe?vGoB(V$tn=m17lA2UX8IH+~fTd9+}v zr^{NoAEK2pMCymY6+~)1u>ZQ_+NfXwTZoIpocxzAe)!MEZV!rMz?jihAe_iMYORf9 z+QtyYE!_zUF92B-p9puL#H2zIPPH@zTC7R3E=vt%rId;H&`2l&qOl?Rn^Y)G(~={j zxZFUaCyEwLuo^UMNne$<=@CIg%Z$2Dm=YSLm$+{gNTGI(iX1Hwc=mFN{oKs%&!Xrl z2FdCvvMv3y9~TFdu#H>VvJ!6xuLXho5RvTeLB zaLPyF7K8t595p;(x86tAEWUFbL(?hN_DrY5zwNi!)Nh#wk@rXL-{*XxbpO#uJfe3) zoFpS`Y%lY^58oA&D(k5V~5!L9i~cL;LrO!o|x z1IopSvuybG+*>GSNg*?V+~!00{4GilUh;JRwh#Tm5cE~lz5;8&`Yf3_^L61_V4PL@ zxKkTuru{yun}_a;52ncS7~P#oBpqg;e&)@L5Hue)+_%+S(5bBt#n-rZOvaT`&*s8 zRYYzJF2;HmqK_^>Yp}`CMrY!32dyPu@zA0_-)mPezk-C)O-L3_K(#Aa3othKv?_f6#X;wK{#Zhz4WQ3& zW#fOpxHqgAEg4Auo)z*EzJeUV7ew@DYbWE^Pa7r#) zi)Cl}p+0Pcn_$9MfG1Zoaii9{$;2k0YigwMi&*6HJNyu$OP#h5YBtw>}9r)BV9*-Vo4 z)%KwW2JYc2HJId8zjGQm`zIW^#p$fEorQRe8uRi-XVnGoUg@CBFw}tqG$1aJfIN{4 zmG(xv%T*u@T8Fu-^ST*SD<>0fwF%y9ERtDc5do|u4-bFxzx0ofnXia_hS2hR(>jng@Fr=NwCx)%jd*w6G z;&a5|7da-z{puu_Vf!GVf+05g}Zhv8`l@crXunYT$1& zS~HN-HZyJOi+iTJE3r8i(p`6F)(TxzGF!~A>jDP_JdGc%AN57YhziONoqVEUt@A(7 zq0B!Gn+(wdcawt^vU4~2Rif+(n}aaKkut}jt1&srTxAb#C}J=tOCm)%wFO0~=A2qX z>C-VG#7al73OLg{MR)c8?*iQL9fcUH)ORj>4d=ctg0E{UBI;!;FBi>m!|t^*2g%8L z_|ytX`#V8C!r?WA29d3$ghG4?WK$dbq(A$`LRwA}*I;)xol-~+R^~3Wv*l@`Wx&m{ z;=Ri8Wy*7|K~=4gPsf0Q-W=8#G>B{iNFM;)zU$ou3Su{pv21&wCp92*`gPYIUt0oJ zuuLA~-Nyat3r!FaxU*X{<%SfJF%;3g95AmX9L@&E{n)@~h(qgc*Xuz0VP$Vou>9l4 zr`}81KN)v%QajB4Rfk{JCOn4QH7@;6lLd%~kOB-o)1j+u|EHDgOKS-&W6eu1Wiyng3J5Oa3*AzV-C_@5u}dFW=w5lh#%L z;&jF*eR}#i>;Y1Fj$}!=Mu<=RF~!YD_(+|_npAXrP3YP2%m7oEqLrq^u!~dOw$zJe zQ)I8TLACL+!Y`mzNt&H08Dp)Bq~TybqEerP_CjAsAl=hU_|P$lTH94R#0mJzF`lKN zOkDN`-qlhpTCNcm@R`+n+bk8^G;^DN-1-7GBYk_DSt0Lt$KJ`=k-HPX^DxkM8waS% zRlFV5(>n`Yk*f&kLKatOohw1*wqvfb3B&O7oa^-d2WWA{%O{)igwU!AHHACTnL5nB znd=d*k2~JN;gREN3+D@4``d)eYPiyJ!uqJrqFayLo35L%K%O2SPutDD!+{LlFX8(E z;T40RCsx5|AZ!&0xgCQHi3|tmh}`u9`{d|TisAhnUdsy#s9@BUUa0q7BM@mQZ+8$K z_~a&Qrk@4#EAaN*&7A`zR2|6Sk`KVTp#EXYT9Mlhfe^g2<-jZZaF<}zD3;=EriYom zm44dwH5r=)2-S=*wZ>drfm?sUT;#XoQV)afTTP%Jc18pdpYm@GmH;rlthye(39$V2 zWA4_ZEy^o0G_%#>VXs4ul_dJG7&CfhW@8b)>(KrKMgxSMAL4ZaBf3!_5p2@or1hPF zg8#t=@OA@7-xGTA3ll^iJY>EspVj9^hcPMb$#R`7;|)U|E52}t7bYZyN1n6QOs5Y>8^_f#z4=#)mW^55sw zP>Iuz(&|Cw57!{Ll>6)WAI4ix>Cs`Nr#nG{$3E!d$fVbxm<+@jvWM8SV#@9O%MYwP zb3hLChi1#RKI~;3<`-0 zlE3r`)qtPrv0_HluH2J~BhSEBa=bkYS`F4QWbXoV;RNByBBPkF6Ht)U&|xtGadl;l zN$u&fyNfLzE@ak2FF9|Z$0Q^1Aec`c+K?nUA4L><_YV3r@YNjX%jrk8%%6B0H1umq zB{+^AQ`pr#tIZ`fcqR%EUg6m?~Dn(j5Uo!S6%TLAkU~TBer3xQ642R+ac;FPH zVCG9-GFvj_f-lm;$ZRMp zxd%J@8KykFmc230+=@nM@0=`QtwJ%@8I!<$W7tI1BX?hWfnW5HVFWRcm(suGBG9HS zzD%4gMEVtv{F|{!#%wPfGYg8WFAFA`;?Ax=q}*Js9;M>j<~mLn{dhXyIRz$9Z}$>B zI;(px9{2ym-Ba;!|7}E0kiBz@G~UYX&acA{zYK6xphEW#OTXRwY+Ao^39QXdLxwMd z!R`RqB_RlbX{whPd?<>4vb|U3@O;Rlr5ONM05N+V`$aTakl4D8p(ejSUn{7igWW83 zLo1FhY~4XyzX%I^bN}lWZoeTW_zBlJ!{t6mzIg^HS&kwAKNi<`mku}=sqKL)6v5*O z34k-J2G8a9;Tq{%Ax61#;)ar8RpNasdeha+Dw)(qXxjm=`vl$CQ#*Q>xDqwSlv|fQ zx4s92?R|p9BD>5%K8-93;~W-z6t72cI>?4)%-;lTPQpQ6k$o>RC&bc!z4-dmty#>) zI+!eK>u>G3ws*(tc9KkY8tcDwG2F7pzJih-fORvQUf$LN$8zQN1K*#=T$4}TlUCLF zOONxs`B`8C!{T8CJdaqD0dIg8uTy^y-)w%n&w}i?p0|#dqo3#}j1>po1j#QSmd-d$ zN-L)x$Xtx9Y`;3TuTaD8*q4XZ0U)INFHgYT*Fv3CccU}6fCAte{6}fI(^eR5KsU6| zODO~>&wZGFcw85*!ea=$`<$w4t$0*@DBr2PZ6lv|$OpX4JQM|9e{sehozz)`l87wt zi9m4G3j}5?Q#+2Xsy7hhhviaT2}Suqr34<*U0cQL&(Nnn;Q*e$UKK-nK>y%rnj-17 z3!}jMl%djqFU7ds;Puf-BP1MtHCGqvcQ*v?PO$de&=R>(z#yQXPg|O__J(L_j*`w{ z*jotp=*r8YV98^Hc_tN$h^`2M7BYl3&DkQ}N`rzR1inR_d-hcmebAis%k>*wY;wkW zC@sZC60E5h=c9{}t7>CkQXdB|gsjJAbWIME4M9q8nB+0A)&sBE&xq%D_nrkcuPeY_ zceZ{3s7?%UWS@Q_dm-NzQX#kY^Y)Giw?okLB>oMaRAA*vJ&PN*Kp*?wUtj835W$ge zf-5GW3oz6*>WNd4XxMF%)8Xdr?IdpXYR|JnNXphl1>m(pE-(D!lmlsi`Vx2~Q1iWZ zw@kY4AjcvYe8Agvu{8=nM$yG=h3>GeE`=h3ZSDExCy#BFoxsd_RxGUez!<65zxFJ& z_+GI`@S(#-p%o%dqyY0ERW9$mHc4%ZTAi{tguy*}_!)D2;O+Zwms@Af6-AO=chBnz z6;WH19L6_8goW0FgAJjpUjTvyiNY>tpVDvU!QGEjP4BGRsk~`AlhMQHN!XC>-4k&8 zHTBHbIHr#s#(@UmtX-@-@~d4Jv6CR%;K`8n=m6i&Co&sl0f8vGb((L(#`iLaHz{Qg zV_Ku}&tjzU14!gy33^{z{VC+G^p7X~-+zK&!&rNmRX|A{Qi?xwl7Zv^o0P#qIt%O0 zqrGvDAaSdw=tmas+FgP-!_F!KQkeX%+$tFD$S=t@RRV+7oPfxsLlJA(#UVkUF){1b z_0eQ#`6>SeY&?HGepr4>x1=uY+V#&=!i+uWU&Ym;grXEYJqaQnw%1rZA6pIX-mRIT zT)UtPtiSWnz3DpZ@~XF6xVn?-WDyxok>f48DcIklVCd%Tuqf%_C<-MW%39)| zKvR&`a;;ll;=88uk4)YM0)L+=-UL(xbYEJIj0=@x5n2%WwI?F|3*!xW zb&eGD=^9@k`$jB{NVq)RSwh>=mK0gC!cU)+IAvy#vL^0C&6ZI^QrfN;t|%7uCzz&7 zOE6^KYlV|YgiQMW?&U}H1b)T6+5o{b$ujA8FD!8Bh4}xnz5X40DdTcETfex)eJgFa zW^Wt!J<<<7OJrYt#D&bt)SzS}ARzc8Rh{(Ge^BLejgI3 z^1%;x2?9Y9$K;y9Rk~UYztcfUdl}y4V~QwoD+;0Q7WGfx#V!;ZM@~Q2R614Td%$u2 zYP#9AsaL-%&6KM0cO|k^m9;se_SL~okNKnYZvNG4fOO$wJe3mP!TeMy8iL=?uT5IT zsfqaOyfQ|N`BXI*R@+8S{T>aNxRY)E_pma=xrPk%ach=v4D(sy`hS-!?~Pkb$S`Jh zW?0(_2qTRb<`R!r^M=UU*nUV(Mmtt;m~!$uylJ-#R`v*J*sHp>v89gb6IdfRQoAs^ z*lQE=XV7@hRPTx?pxm3kCLuAUN((4kN4{i_sX@?6zI!cf=>uMy@CyKAw$%yFRqA~M zoEFZbE#{0_=Z3DGY{7F`eqFuuMg&R?v*=d@>kHFZp#TrY3yH(s{e)&)bZp5wZl86Sg(FNLz?vHN+BIo{CBQ9U9iO+{ zG1k`L+Buax4qx}lz?mKc_rcN`C)~t)%8%%M{|kdIe2;)jTR*U6P>T_7PeTjleOxs; z;~;5EiAnB$i)i}XffYHMYx2(rfXp@Dn)DhlrI~$oqPitnliq4Wresx;EPsF73aw6flZSLCoBnU6Q8heJk!m-G87w|^CnM6h0`?sMsDYHx=mM5*j1bV45Op^jjMR zblR9KIPFjO2Z@6J8mmW#MN7$m?nEoSr`g%_O+=?U1Xjrz_nZ-fca6npMnu`Al%L&2 zi`trhJ2i__Bc;)_gRzBZ*naxi0Y#igDEdhHNl0YT2M~!cH2R)Zz7<3%=eEdc(`r_D2L{=5<}}2Aodc@i8qaR!3q~6&Y(r4vwX_l2W#ZN zt0M^JNS1v(qlx3X!y6#~7NrAsYqpR4guVD=s9 zZS|}dImJT=uScF=9~u^8ICaB8)UC*oVl~B!&odk>@bkmqh2dBL9bVDRiBEY>_tm7= zy@NlL$!^5;oInjnR;)KgUz`n>mcvzQ z5dwD|o9`q=n2wkI2!+kU{O+j29k8Zu2me!KBx_5BNQvQiIT>RNdZI zt$XW;Jt^I*eNqdm*5~U_ypVFMwzBwVZ`-JK3w+{&9tgL>zAaQJqH_<)tP#_`o8<7~ zl#Uh4ieZJ*3aF^5FTGJh@N@!?4sP?{6E6pp}BLOEJP-K95C;8dWR;IIfX^dLLgB66iVW+urYr7LO zYp34E3Q9=;d_CZauciFXUEqNp>OM>Vb&`q4f|`3>pH>6bD;RUOML~K1)6FKfL!V8O z(ef4}1zBKM$jN@fM9`e%^$rivS$3PG+lj*)7uFK#!_9S(+c%f^gl)z)*mF@=dn_7+ zqJ*WkpSV>JuVXPYTdjCrgu?ovECGMcm6%0(m{M;sGM&g>=}xw=ecZW@2GW+>_1eq+ z;rFLm>j_;3uhUg%o~%_eW$N-U0Eo;*#N4p7hs4Oo-{1s4=CktA-Eue*H2Yt})}>O` z=3qaw>cZ_MK7Hk!biou>`{BpQR%A_VVD}@}jc`NfLOWuK-PVlRjQ5{CEd5Qyu=MjQ ziTNfZ_%WH$3=C2MvK3*R?Z#X2d>%L&i_(3)}{g!yTC{$NnA5LVaF?R)=8?D{c0UI)b;#fo8ipg-+2WBL{#((T+=vJ7Htz}sircq+)Xr^uw;Pg@v(XKND zt2|evh9+50@v5#`Zq=r#BAMo%lf@QzR?SP65!P1=no0|B?!I~vyVPEEUDQ4lrHjj& zEdQ2u#^Uitz2v#f%w4x#8)l5o*-J+JVPDr{1Ax$<`5G7x^{0R>9oOegzsWBdA4v}B zJ~r?ero}B)z$=kI^D0jLaJ7hk<0SN}-4&Yd{WbP0ynp813QaM}px%@FB!T*! zu9qI`e<*nx>dh-ku7AkpNXxhk%zvi-q-wwbcCl@7KcWwl2Ggwy&x9V(b&$g=-|xn! z{6<#YwhL9_%q?I+|Lu|F_MK@Csm>&GGYhT#psml^wAG1^e0ISx{~P36ldJ!+Op7en ziE-339h)(OM>a;!f`x)@+mUDtX6x3a80r6gzui!_hS(tFZMtuA!lZ%0xB6E}p+_wf zdnmG!H#&pN;528i$3gyF2D>jZQVaL2mhvTv9pB3|scuOc_{$p|z4U6lU8#|LWYeKq zKh-X<8kza!d;P-=U{8NsD0Y6u?QQ%rWttfem~^iDRh9OCVl|n_4Jy9g^#lH>hjwH}TALP!2M>3){)awO5D zYuZeOz)=LG8>?H+YB#M@KCfEKF8i^7?8?a|5*F6qyqUzal=H;#x_AEc`qO8T%hTub zmMa(nx9k7(2Pysivg6a*l+ie4PH243qx`y%l~hO_CDIO*;9z$4g&{lf{saC$d^~5E8HY$RVDT1agPVXib zQwrl{?jE?!1W6O7f`cHs?=Fl_Qx$H#0#|2d%9Dg!-v z0miGalz;(|Yb?EE?GVQ|jPo6_JQ=pSv0I`rH3OEMRWU#*d!X9hzcm#&Fxz#m<-J|( z{rkiDw=I!_0e(F*;Da;YKs@32S+{wZcrtIvj_T!BYWVx$qx|(svZot;NI~Jw^QRV> zh!ouzaZH%!_I2<%wuEt=BuLh^eT|Y;q43cILPHjBTvwgu0^N0eD)*XY9KFLNQ>7Tp3n;jO=Z(OiyhWTIr&?P1wc z5a;%T?WFRJaJUE5_@;Hto>zebU=@hDIt_pTQW=7oS!`oNK}h^g^DVK~4g#LrIv}(6 zf$hxaR`E?fz3>a)oR0P8F4{E`>|K58R@5J$4zAdftd%@Q$l_1yeW^D#VYZgX;Eir+ zcF>6L&S$l!PLwell)Y+P>!#8k&2Tkp0(0)gb?%CiP-&y!V$hmN`i1z*_%oebD z|8}=EDLUZ3sAKm=B%qC=uQU%K9A|&(`B1n?fkcFHQjKzomh8B|(_r-_EIv-*p$I#K zn8gjgemGNnmwC#v$&ulIyz10G6)9De(nDB9nI`yeL1S%kb&sh{Y z9?1j>oLgTm8*>ab`cY>9Y+IFO@c=I=2J<%WLYP=s1DGzx0R1zyrQRzq0EPTp3Z{VN z3Ctu(GndkJfBf5zsMEtvk#fY!I+kD+z(KFE+0pC(ZLhgn#>Mq{avZ`p-wA;;y+FCI zy+71*Iyaw*!W|s2kdID6;CWw+jKA<=o&P^kWrg{R|+m1RZ7#&(u#}F`z!;bW$6l~g?BL1n){spM$Ob>ibRS@ya`G-@xA&} zsQnW^HM8Bevlap3vY9peF#`@Ly`tMpxOsj=<)tFq?*cbcW;&s%!zLyN0VKOe2U9)O z_+|G;-mga21vB|Us2#B{AGomNj{fh2J1NKJx{>CCtkRYZArMvER-I5K;i;Oba#NV3 zQn=o6oaB^g{6U+kM$NmS0stgkE8G$^RF!_(QC5j`XI%Ye$hGKXio{TRTR!D9W$ZoS zHknIuuq2P@qDb5DNFgV!Jvg4s7s&n_#Cfx(pEq{?4Ka9Kin<SW(-gYqRv8~X8ku~(~nxf%%vntKwb;V{A9LgDx#3AZ&Y4Ik% znN?aTuLiOuP0ha^N-uEo#{5IPYD$adY+oOtra1(xLk8Z5USXUtWx}dsTt6t7$uH;Y`ad`9X*xRso<$QFc;BS-vcQ zeAPbYjTx;cLe6_LHpN#?t2;e$_M1gEgXuu~0iA18eZjMq^9n733Jsfr(6dKhUsL}v zFV!%6=yMoSjAqf;WC#<&ynfor%}*9~-f67$T=GF>C5!K2BxDimFC5Y`hH-~h{MGMs ztciBBZkYhJML&H!Ym%#;2BV6}c8TD7S#F%7NiQn+onIkzhDbN)fDGAx%2DW8CagK@LbA1B36 zInSY|Nz8KP{T|`s3Yu|eNv)#AyObcr(J^J%d4H?%KzjaKZ(0lyJQn_LUO! z{_dPuF?o0w7aQH|At<*`l_3(j3JYxn$+D1NS)s(RS~wpU0P*2FJy^63zetH@9U7fN z*9D*t4OZ7Y8FZ7eN5hJVPIuN3!<(xHxDgMF%f~U;2SkRhtj3wf!AfHKvp1o* zc)3@@UD#O&_A1B%4>1uSJl`OL1AvyVKvN!d$=y4f+q{cU&mG%a*--S5J+ZIQVZ{y% zSJZ`}Km5hk3cq9WKL0v11(plOZ9zfz&u&_U!W}%&b}TX}Rmf+#f$A@_UG`n%U5=Di zUwRW+En+lQdp%oNqWxra%f+!$J!|Bh5d(m-4VdomKYT(FlAmwrsRGqTRdOsO&;!b> zl4!AYa1Ws6vKD{+&UE$`wkbxEcq)qSUay^;KVI$i z5?#;l)!2!xD0TPQ@tq7=qdBu{D-~trf#i0b>aXSZ+=_DAg!%M zbb%Cr*c2d!rS(^ThtyN?PE7dF6DQrX{@5%5FR4Kdor9K_YvxHVquM!XRDA&N!8@;V7r?s!;9#2F9?W(U?|=Y z%0D{A9|tZBF#jT7WdtmQA2@=)_#Y3;@rbcX>aRr3U8+VqdaXOB$J}DnSa%;aN%S^u zsMpYBJM~jZO}WKON~FKugJ=1i{}VwQpQI-0aO#6=^VJ%lrrA=F{qMFℜqRpDO9a zTbsXp}t+8@~Z2a9&{UDhAI zXCy21V#p{RGH^AWr}OlL{~)qxG)3Y$i(4sz2;_hsdGcWfPg|UVxmhk*ZQKgA%6D>2 zr&Tp+9;E3odN_2A3db^#6=stqaH;8JFq{~=&&zXyf9}mu*K**0hmmpzowVVt z{C-u>Wa$BtScg`e2y!}N`b)0_^1B*c&S!Of#=C!26*>xr{yf?^icAywvipYZG+7$ciFYI`4Ty*Kuz<>jr6oo6xxHBbU%)>;grnUW7lypA*p@q;?KQ`fY=5**VO%hg zJBYHyv_0+?Ci5g)D!sI6^(cln<87K1HYsI|Z7v2iGz}-zekC3^h>E>HTWf@5`pM!$ zT{G`7lt(kxl3&<@;o$^P{;`K5MuYq;9h{5JU6QtkzdJgYQv*}Z+y26YKl0qF~eEOdIE6(jnjb^kX5D&eLV@0-IOrJqVra zUa|#lZaV3H7QOn6D;(!h`yG98zft1viBQPP!>d}c9aj8OwEummhS6A|jbr5>{|ag} zsRgHv1Sii^vSb>{$NOuw4GGEM$G(?JP`?XBN|Fke*cE4)nc?z z{P;LYjxG(Zu#wG@;VwKz(x*6{YD12_-Sfl?tv8ZCsxQ*p1Sf_B3lH zmwBkjq946q$EcebGE_fj<=qz3j3DjXq#1(3Ss5z31pY?AW(xCmOBUzNI@0hW;ij|< zLm%K=HV)0^`_>l%S<3ILo6efVesneJRhUG~?s4!oXCI08`Co`U7AT#=021NP*|dDK zYg+zVr^P;QtN3AA!9>RD{KeR)z2QX*1p8RusO8MR>Kc%pzte|l1(!uWm7^zApn1zR ztJaLUwQu^i<<4(}>}`nnv;b=#FoGGAUz6uVy8yox8y>59kWcZl&7SCO*!zS$T&mq_ zrcR2veF6@5s@j+v%Begh(yqGXvIG=sP-4Ds78`xw| zaSd?Eg(zXf)5&zjrr1T0pu2u{{=~y3X8YrEx?Amw#U?Baz98Syu&l!Q>wV<-P?gYV zG*#2zHFu0m+jyO0F1=t|g#EMG$JP(2S}t#k+2iVDe!vsv6cSB{`ejT!UziN&nZ{5x zSoU|Ttj!6>-frC>{FXYd>tNat*2{B7`kS{;4@JZxWcAmQH_2XWCT)rSEW6 ztAux|3qE^ILm0yXyrUmhi-nfr?Wn(Dl(hh1Fb>`Tw)5b_hEZaieS8H+ zB!}7a8GMsJr&rXX`&+XzouLj+IC$@{o?G~8>ITGKgpISmiurStLu8qyA)PndqM}1E zM*L5!%Gj z%Z_kBem9E&`qx4 zS|%jzmk^bkC$V=r`R4x^}0Z8zwz79{n7dPFY}iZhwyJqB>IK6#mM!iOzbGl;^L$jkb^btrcYepp28;v@ahJqvFu*ktje4xR{-tX`A+d3 znU@PXAsvKoW))$1l=zfUPz{W(oa)yDGO&Xf&4}7Z0Q!0)EKk#C-1SHhYPxvV z(A2PkP?%!80Y!d8-0LdB2Ay_LuCx^2&TZSi)w7AGm!!L`Q)Bb>Gunf9IMH5>xbfSs z9dhb%fGH$~(AFDqlMnZQ75g8e66uaA4LNteuI%mq4xPXCxR|3uJLFDMI~#dwF(k&h z$_TCuKg9%LY8Wdc@^%Fdr%V>z3>jt&T2cwEK7o2#V%yLz0(V;42V$1ENo`dzyJ7S8 zvSPG?!ytc!XEN%tj zuBLz7{<@;d_MxSS9_YHT27cCHHe&49@Ak1rWy40JmcCi!N6m#uQ!ZpKe5MBeGY000 z^~hXX&Ov5djRDSA-e}N6+t8WLxPCVY0fUCQ7td|CL>w3j70G$o(=0(gQoL_Rq%MtK9CEcS^_!16JjiNSy5&k za4^?*G4s`)I+-RJaCY?R_GM@XJo5Q*(oP(CtIoc>YX=w2`{JWSuFG6_$1A&cGfO84^8`YBn*TsW zorVxY?D8{)YM-RC#uxc4yaro33;Dup_UQ*4Xxqo!zWt;t_*eFRgKr($04wrcNN9`o zM^zTZoo%&`c!%Su`SGumS%nV^e;dEcShlft_x;14&w_xRJ)HlVomrsaLJIV$tr}b8 zzl*g{B7P3nwht?~O*IeMqO^6v{(S!D`M0h#tNe^1zG-fo(wG(`!rRBfK9j`Q#_$rT z+Ha)N3Ed@TK=x$^x!gXey!-W^CZsl-FR{|+W?<&QoZaMB;^~Vdj9S+&fIn<6Nhea2 zVM7a&G2G7W5BmxT&DVd^Tk*HqXUBqWlj)(`Cbxo@@$bV|P|O!W)2FGD2+=E3zJP#0 z=H!6@rnmi&D>b_;EuWm!Ac!=#FW$1Meu^6=nqbB|uq0wRw~&Z#*1NH9HMMI-(u3e* zP&T+V8pS&uxsd%h$wxX%fsinB+vxT|AVFt68;T)3RKu??C@Y#o(8s}s<`+f^J-EFl z*4(d3SmSF=fZ?@aG|8p{-&4VtM$`D(E@7_IuqueOXa11}8TmXot}vVHbzOx^b_@W} z%Ta6Daa;n)WLnPtvK4K{;E){eEM=cSv;}6&Rh0-FY7Jqm;0}CQfsWpTb-DEMxZ2R-Pd>DQj| ztJC3lDYyhHL=U4wtH&$FxDY0O*~c{_6ykt6!>RbBs~KdPJCb#^CP!YS_xNY)-}osU z!xD)3X)MKtZLO{?`_$}P5AnzLAB19|s$|}2h5lpp=9eM6;g*8lMn#(+2uZ)MtR8>l zh4^JAI25CAb3_FR1b@B;rDLYE(Z~s@w5pQk_IvzQiuQ6@iHu6^Ch0Im9Txj(5UB=h zT>oHLHe|YF?^%(+q|z1mf$p$QVq}cyPAPr*_*Qmh87=(IUQ!%ZsHG$w2e+sZ+35)Qh9e<=> z8=P3+&j`aX2ZHh>&|=cPbm@oIKX_Uz#V_(|oW5nV002M$Nkl#4z)u`%vm~VeXa&DT#0axft!eaZdNSMZmB?iT%{QS-NheSJGkvvtPgAHhaA#X;Q z4W%s_D;AnaLXaX`^Z4Zn1Zs|`E>(71W`w$I%u+iHpp3CkGIGZFQ?)q$U9n+Xp*C)F z#!Z>QR>~7``AfoTXSx3)T+BuX9@eGR;A;-u z+BjONrhaVy!K}x4MO@t!@8k*s>Vi)ih{|P4Xh>Dr)%Dk{G~A|xj^m0o4Z_;?VV{~} z2dyOUe~1-ROD^N-dy}Ay>%z+*g%{gPU_i&l+=5Z}6^r;KS1hJp=*U`B1+Uuz(F+Si ziB!4lvti+0CE|z)8e)<}S9Z^BotV|_1NxF}3IY^S^XDOqEyQh<9Q+$^`kMzzP^KPmu0vMk7r5S@BVVU}7}EKWGO zW6UetpA>o(5Ot3To)ezNxb@J8^WDrQzoTX9k}&upgeuwlVXlCcE=|Xpuf)X^1sFu? zzJl-B-17TJLXOCQnh`W?71^;inRaiFj)uD@vc=>6kBr<0sBgj2R&!wnwHGVoa+jBW zg|u?!dD?_U`-xL+6mfOKCc5ch*y|btKgrso&VvU;*Ft~3|7n4Y9yA?y+@{!UD5{`n zEfU-e*OX}eL4aYDIbarL!qu1)XwTF?tw@(W; z#Fix&P7GMva&Fl3oJj;Sed4-m12@UWq=YJsnY^;Nx_wGRP_i?jVpP(DL5|AdCLBWxv{%iLle^W}l9-fFmXpLnz4R z4LPm?wVx_0q`;3ITFgK5KQmQ-oiktzgYdQaGw03N>5_9BDZYC8@drJ4x&SW+KLPKe z__b&L`gC)=hb3gFz~t33w^7HnpE@g~P4QPN_AzJ$WU(#^Z#h}Bq}oF0w0bfBkb|9i z655*fy{F|`u)oFrucAjV^rX&7m8kvnc!lJo1PgVI-|~w*I|CG8NgoC|D~E*Y71EaW zt-A3`(Hs!t=Kq`ruoaSt!K(UcoD3?i?0?3upb~_v=YwPSiGi|0GBH@FoByGr+VL;+ z=Y%?5!K&;AAH-I#*&pV$DWcN>o4#V^hz0R?k+B~FsH-OWyR%>M25=(PgF1iMH4`?~ zEJtS#?N{wZUaE7opGw804s#9c&*HBEj=4x**su)C@*3LrddpXgz6gF zuWM|WkLpX>nv*oX7VJBeVLnD<#Z-$~YUy+H!Z~V~6bE-@|Kr?sQOH?;MQ0?1MPWcE z)QWxw-81`bajjsjkQ_sJvnsIn_M0ymOc>FicbjEMqrb)HZ%s+`JIZpLJd3bp8MZwV zLLjyl4k0j?87GcPSq#25E)hk%r=>7KFyOWljzHx&9iP(%;ALAGR;EsI*+P{z#cG zp217->$vu*i?Tuz@q4xZoBO-TJ~I!Veju=6`~Nrn{iDUzI_t@hBWFr|Ab+w^FP220}A4g@v~(?-y{xepJSCEw)z^_AG1-` zrtP}5>AkUEb!%P{tA7KcF&3`}bgymu1RU~J4XpG@Vks`!sgOoJ;Dz`w|4E9b*ig5O z;akrIr3-cyBOMO4dT;C-ZmzDt42 zfMv9AYT*?7riPy!*<9+LMGJ1DCr9(ySTOsAXp`O5hY`JJ>fb5on*N z0`6ACMde$;IOd-p+9G~40>;qiIEx@5-QrKW0Q&rgI*I&CD=9G)*Nd6 zS+ldm)?FesDpFH6?R)+?v}EkEtE~-vk|VeVONxu;pY~)@!eH4x#_Gl|PLnE9Sc^Ka zYTxK+NoM8z&&1mL6TQ!W=s`7q4o^1!7(ZJUZ?!QJKy+)(+_Ts|`$e?b7aa^;P}+L_ z6LGipANtUrzzes}pC0(Y`|AS5;fLc63YYX@uwNpRm}F34N6URJ9rwP;^WP~6XneOM z6Su~19yvH*@~=K6rK<*nDAS?st(@zmqK_3jDVmnHCfPa-9T%@5Flb^ zDP`qn>ZS}ExkxY=38calpZ3G=Vl;%L5L&ERs^s|WS4apa0)6K5)b` z(strE>};-_<6>G}+o$LAAIpHW_Wgs%iUhj#_?vi+FhUS`QiIZLRzdpK;xBR#-njoK zkVHeCTOlDUtg}YZ6pn5kQhl)BMa1FuM^piKD>hx@C(Xy-;Z@Xu82gD49V3dY*_Udc z{~jwDO(Yh&8kVCLF)8K*Ed|^5->9uq=RX#sS+J5m(jvPoN)y_McFjJe3SEjh zwlYPsY&`xXUmFKId-MGxZQ6cYf5hvhef~qO|;Xs%-`(P)jXE0L?#tQhcyqA`^mSLQpUzT4!bV@e>K!a?`$c z|2Eo}!LG;D5X0Mi3^&ysKSCmoZ6{EQ$bJBWLLN)XDo95Pa)a@_yQR*EONxpho1&?$ zZShN8!r-O-Mq6%wVzr;j6D}xG2YgfSjt4_^$|JGcI{r#9e>SIf;?durkkEVRV<7Ka z{!=BJ7Qe>SWZ4iuvC8+6@lM&W8g-zk79{2g_7k-*=^x`K$uVSKdL(yTOEJP&_tjcc zpjaz4{wi&OE#ueHTeI&R$J!K@3^|5~RGO&d?KR2&9)JJnk=FPfb8Xk-rvzef0O%8k zq=?M%GdL5w&5AD!W?*IhsX^dvt$)eFemW4=ip>ZVQiFi-sY*plyCu0Ee@9djW);J= z^#_1!`u)Q`tmnVR5R$A}l9RC*g`5q2DWY=XD2ig~8h>n6Q?g~cTh+^lvD+zec5(} zWd!iL-YCUl6=`8G*R~v6AL#e^a|5=}=%DaxRj~(^GO&zW>JMv$1TjORbxF@11G`%m zC^6SA^Z5(po(qoo=L6j^6H=;Ku{6x!1+n&n!kQQ5IUiK<{CEA)wzVXS0qxf>VRQd2 z-`k}R+_7*?v~wU7ZvKy&91+3uAN!cHYa)|np}({nf`Jz*z`?Kfv^Jr?rm;5vj$a@5 z8|_zXqjQ4_28sy*{^BqG;&kD~7fpA++uf(9KIJLXVTTp<;A(|FLdY+G26`sHLn3L_<_Dug;cPRBAL3dCbjTuCUf>|q^BEp%R)c2gPq$7`7_iH1zrXOw$C|AY(#0!CTd zuj?fv@dR`ECTl$k}QMU zxc;~fz~}!|3Y$bvB5a{IY=3k6Yx7T>ozjE;M*C@0rf6l;{9ip?1yOEil9Ep#l=!dt z{KLFFpbh!~a4l-b&wZx9e}sh2$1{X*UHZmg8}5Ob@l&6|{rmqse(q(%o;bp_{g5ST zp8vW1oK=zK9PD3>eLasyxLxm{KjeDS)s->W7e{`K@mKp1(mtGEkQtVtKoJI|ui5+q zKrS(UzQH33NfAGCg$1E8D$l>zA-c#v^_1U?KaR;D41FD|fjhK8A-Popxj3M-sKg6%Bfk_GY!;=9CrIyMlB(c=^t9|;T zRR#M*3WI%}H9h(_*bmXL4_*k`VIrw;Ryo%@_akR3Wu#(^UsKoq}|Lv-Q@9Sjo-%~+ULT> z&Bvdx>qVzvs(oc4>?_vT{yqOS{+Q?3&v^c|f1q~Yk|L^%1 z_+IBL2k^4|_Ts(0tk zozo8d@q!Gk%Q-dCk1pazUVS|~r3l@MX|-ivIYmdaqN=I14&`FzQ-JZO0(oBc>TkdZ zgj5q6DLD=US_W(C$=)LVz@kt4;LBPru_QWa8g{54=faWas#2OeXQU<@BefQyJ^>N5 z(2ZryiK?aIQjxllNtJAxLU_a>P?Tu4F>D=SXvwFZ1u~wbMq?gF?6Uw9vFR8&MyNwB zOlIIBrG7S&Lz0RQ zpwqSZK%}xal)blabeiPGp|zEL=Sv~d9Ow~02h5(e`LPxUnB+t;k0bV3BLAfkwR6#9 zjwz|hAzPz_$oQLW^23jiYD!&?BD=BQB{KfpMmt*iAiBPK2({&3^TN!foVfEoi9k$p z<&qQQoJS5wgZSV>TYuz|W0cIlhoW1CZ31q?K5@PLsL4`c+Y5O`tm}`O-P@L%hQJQ^ z3Dz*hH+f`M8TC;u*Ixj{%vF8%S)8ZrR}t}}3i1XfMG|g(`=77>CLHFqozjmzi$6Sb zZo-FdpWL*=9@d6`kwi|*zd~ZgKYX@o+!_k11bG9q2+6+IA)`ro#1{u-5M-DtW3#OVz1t+! zf=j&MQHC}t$G?7TV@(iMVw}`Dvj|oD%_{P{3p-36R7N2HigKO(pajjV=6`tC=5w3& zspaiXM}G)4GYkv4ZxB}IUvmH!ghXz3I!2hKmuGU2*wz=4U@Qedq%pm_G68PfXwZ`ZuR@&O3Ly=%Nd!E3UkX2fuv_$2{!8mAArf%6B_k6L6#Ah&p=w_Ubw_5#AJP!wHb!Ci2Yd1$ZR*!` zA~R1rrX7&udZH24d534H(hp@>X_EkTmH+q^IDKQ`7vcD#uMsMuMZ@^HT{3RPEJlQ> z^A2G?=9dJ5q6z5mK?AZ-wEttCQ6jg@JLniGUp$$V>yH;>f)gPqPU4Wv{Bz#WWubq= z=SSv6xzGtJM9BHW{O^M~B8)EdWR7>`tPnQ>fJFK!;n0w;)aCl$37YbvHs&f~1qOWt zT`{poI}ld>K@~>wNykf+^RV@u*?Aozw z+KD#cI0NYyh3S-XMcu0~MiBIabmDQ$7>JweU-=i0B7xWBiyHO3Jz?e`f(WR`O7lo6 zO*OEvOE8jD&#_bro_f@QvcBl% z3KIk>4w(j2-2dee7kxNIWA`w`3FKAe-RH1CIn7uyOb?>ATsE6EkU0!?F7HrTLW`P6j5pw%ejLGLU{K$W-zmTPGP>~T#j=;nTzvVpfAqN?XDSlc4 zk>gl@9LIcU>5CmQ;RF4XBa)9ilaTQX*jY%U6h~KnQ;?s(Sl65>JI9gszf_Shy#t@? z1RNn|C7hJw=QihyKymj~>SvC~Bn~$t`U#P5f|+E{-VT7MiuDc0hKcEa71$~o|Q9x4rxLyDz+?wr4PVM8D_=%Khm3< z9Hb1pni66n394WcE!3bg1bY;zrBD$(MPE&h0{ShrD7!;azy z6jXUg!S%+h5>?7(rx-hEc%o z+dpv;#_)~n4@u>D3avi|MnxKFxgj9)4^hWR3gqHABgCZ&(a34!UHgB=PY4$Z z5+9A`l^qQ-PnM$$OJZvOpKFsjqwDn1+o+@og)49e9&p3y&_fQM4nFjd>Grq3{dCWJ z+;h6iad(~$I{091kKF%9{vl64oJ$NDWg0~bCT)lkb-D_-CnV9q757@E)r79v<@T2+g)0W6v+V2|Xg&ekBZfY5L<#mk)S4it!=r4RM zG{JjOv_L|D38XUDVlUFzt^JVdT9J^RD@LeAL=g(J*iNoyHDrzsNOXZO>TW71ITtc% zXwqrk8()lKq7BInE*B0-lF!;AYi-CCP7(%su6f2E^jO^L=Q^L8>iWG`tSq{7sbR{& zRIYQYj7aGV=AMcyoEw3d+owzXZB*5Vgo^n{8=`3q*akfCgw60PR%fkKJKy>3tzxo3 zuD|%;i|vp6zAhk`CSr-M*;}L8KyN**qEH2YItEOFKHT6>Taq@M5VDe0U6;w;Z9`R- zOm_@WN4f>90|zY$2nD5yjpL1e%(a&KsTvIDAL#LMTI27!>@AQRP=Yk^g`_wGl>?Th z3IY;=&(1iQL>u!RVr$~_?V~|SWw5rSBw%0aA+K$eZY5atq3j{LQ-jf^&6>4=oAwz) zTBgyA0 zU!mJR`K6!GO($4$0V=tSehl&)u*~d>jLP#b8H||+mL?v$qr#f{hhGlO&*p4&%GAUv&yp*x0TQDa}mKpiHvM(ss7NTeA^@!&0cr zVq`$LAcblWGG93#q_K?7gl@`*K-7b{86ei}V|Vi*Sz)Ilaa#V%-bFGt(%V(?oLI&e zUUbp)kN^0(>D8yaW;*9v-mF|2&hLHitMBTQZR@fN5vg&xZv~N%!4#(5;Va?j_D$0c%qia!~zuv$GQGuL1d&68zAY5^2) zHO@0_2t2ocU_^!>Zyv3q&lCQP4arQ4=%Ykl!4q=mQ;e}|);K_q{fH!O5SBS1I5F9W zlU$pef35{ETk&@l`a62ieV_vUmr1r52#0gYqI@6`5YWDmE$woSpi5sUzk_GOprd&T zO=0Sn&q~7JT1s%;#JN2Z=!0`0Z2g2=4hfgCw9S>?E)c^uPdsV+m0$FUCD$@R5d$02 z%(FLQqtdo6|`+Z6B}&qWB3VyE7l?4Pa#ojpAp1`}R*Q z`bFEh=FsQ->%?pNvo9`OFeh?fD!JldPrA(ltx$ld43KL(WZ926k}h8bCJkNq;+o)y z2_Xxy>N`3H%uUN5cp6h&d5d+!VJpDrHrh{ubAAX5f3*I&KkeR;M}3+mcOH z@|%1xKvyOeEP61(G5>V@0R^eT4n=TH zT?FM=U>H&ce_XH39tYYc*hi^ro)|htmlGIJk-n0_E)aM@RI>n{bV;T^^aOrLl^#M~ zp^^=jCO`%_CT1`gi#{(w`_3hoOy`_?-gGX081>1Ie_}fA)KjM`v4~DQ@r3EICqH)j zVSG2)xpUu$nV7;LPA?nC@}Zf9IDOOfu#Z3a-|EC8gMUb#a$Y4XK#H>otRU3BMdq|Q zj4erCiB3)AE_Bn=IS|CKCNkwKOgrGnCCY_k#@rLrOPhyZpbT!NgpHydG7UD3G&Z8j z1I6}B%tTPLg&%}qVG3eGV%+OAvIU0YXUh$=y4G;)Ya>L*WNqx&np^H>axK>0=>UbM z5B5Y^Mr3Opyi-D&Ni8!t6STD3WOED6Jtwwh=y(3o2kKOQOeJZy+u9 zAoyYZ+JM{UY-F5?3Y*MKA1$n;euJq3%|0-+{m-Ok!=5mH{PA#%9?WN^8W20?bWO6^ z2g*Et!z_FtquX5Ue~h_7ziSc(;QZu6)8^lLBxZ(1@-W+uv+$tfQqg(>nN=I@TgmNQ zulgIF(2!TeAiV*CGVv>=vi)sGG4bp=?N-a<2!P(l4%j&WNNo3I`y1poJKyXJgOPXH z!3U8$Z-fUO^G_^Jb!<Azwby{_3y)3OdvD(1$*B zy8i|YtM;ixP84eE#2@2NNVyCVPIdgUF!ImuA7L$%CC8!1 z?(OizV{F4uA&x!43Q9t3$EO8s*bRN{Y|<2VE!s2NIJO81L=$X54R5%#*@YNnSc9jAG&??w4SPe#7Z1FhH)c> z9nO9f<}mZ`yrmCe15!EvUYE{XtSQ>Ze>C2z!WhwsAe3%jegqQt4YB`2j{q!k4^^%w zxinjUeQiR1n13M{BnMDt-nmJpD`fQf0-7*n7$Wu~GNgKDKY*bJ=k$&`sCGQ+_w8S@ zp7+9{HQC|?9>xlQ$V$@5=e$x-C%}c&DnZRZ|HUOIVu==G*#|juMRv2#m6$1_96fui#t%zrBM4lRVA-v4MaK$gFw{Rpd}CKwyoXz_!ec0!U1EB6FR zmty+BtZ@Iw?a#-Ja)_vz_lOCwG!x1&5$iAVTkS(2V+i}y;M_sdOI8@D;DflCf90BF zTLF$}apiWfFU$8OmMT=cq zk;q`5%LxR6W)VB7qGMxXJ@D z2+r>xSR-LyP7D?Om~&UqES zn%YnT!TIkztAUdClOmfoHOLQxfNvz4A(RybmRvNpZyT+35HkVex~07ObAo}2-L`!| zV>|1w#b2=#cZJmbux+-CKboLAE-=_7zqlAOsKZwd*SLYX#uX%YPF8bE;Vk@M)dw1c z7wf+XfeGEk`cI#;vGmX?L<_9gcSdycT7KYIyaClbfjGcGxGsyOZbOOApiJu-6%={c zqTBN!Ip7rgGc*kc*%W9rc3fz?F-#oSANk|S|_DN}CQpN~v_|`ZSYo&hvHljO(cUe^*?NcNvp+z59=mx4a zh(SK2qfXUET2a>QGdPr*{{gclaRq>S_%o;WiK*jeGtn;irhrlHOV?G}SF`y1SYi$~ zIB>Ba7;*r7!){>0R%7_wm5B=MTRq>)5H=PpxL6 zb5_Dj^NgYD1vx2b+WcGIZJWeu#ms`~2?49wVaH`vy&K-${A;B&V1vdc?nBGP_OTuN znHWK@`B>tY%*+RySW{&pn)r%<_J6hy{@O7t*VxAOhcq<&c^`Vi%3QHeZRfS+zXCGN z8|+IARnwy^N>$h0ApSt#U|+$l;54ALKZ_=e(H?Yd!o=nSJ1*O>Ptie4`ZN%nhHFf; z+ur{%pvC7$BBF5~7dBS5|6;zSNwm2#aluIUS8|nH%0Xy|-9|kBGXdkerMUWYf^iH! zOjrt|nZa8N3@}>&AR8wosyUK0Hj%9h^h_41;Id2H8FY$a<%yBp0c3`0TWBSAEq?!4 zc$a?6+$kQw2Qb?{r3BY?u>Dg8|F)C=u9s#&sCuh>h{^}=q>G1j|F_rQKj?pnt-!}5 zm_V5o2kZWi`~_B7Cx)tRgKVAPYRJk3_8YDF8%lNk)z2`gZr47A1I`xvrb9}zS>;jz zO`S&p$S$46ETI`bOC%80V*gitATGpYy4r90$)%iZ6nV-hF#)h~|DRY3ctY2*5TGrf z(lCnctB=a!d_*YU9ndM1=%vL6QteA(w*4>o;X&LzfbGLJJ32w31x8FqNK~BKr`|B; zz=U;h2oU_*{r+K-vQ4L0c4I%Mq3^FkOM%DSBtRs>+!kIVluo8DC{%Bb1*z_TJpVHh z*$>~x0PF_kSw?QqXs_W}zyttXe&yxU#TQ*NedvQ9oL++q6$c%3@bttdJYl-aU5^Vq zfuoYH0TP=HF!uWUhhWkfG<*B~gBM;_&l|}<8Hoz+r?o=t_MQL2jII)m6!;NOqArnU z;2l`W({M#8wak&NnrAa$G!_dxDNyB-+exWteYXqeWA|ycS`R4O`s{G1kLuOgYN9Er zRb%4Dz$)&{9D}ib#bjS}RAtthXinH@A3E(wL|_TRe*a?MFf1~Bx|kbw!*ujH$L2~^ zSJJUZHPNkcTT2GT;^VFD5X>L+${_PuD9u!;CdUG9joYMyVY?nFca*iBS`P}n;iYI3 z$qpNX^u?K4VxsT-+lyqgX?U~2Ko9x0k~J|YU^#t>Svb%PSS(tP8aoWw8M0XhxVCjh zG=eCLqCkeS>tHzYh1ftSgJE3;R^ft>Bg<9huq_6|wbx-oY}*$lSTg8}7Ytj(sL0Zm zb!xtv55c#T^N^4&j2Ktk4r{~42-#c>Ao5nk&7UrzWnH&R`{b}N2=rTCL{v5E=Ey|1 zT|9eW@IQ|F6WQ4UUyPPM1OZxpi)s1mfTM{VsR^9<{wg#>&IkRewE@)JzzauBledMG zbhFdn7@}_ZYm}Av;fD3qp<7;mV9dTLHBpK=KPIa6fC4T-GbhAQ8`}(t*ZhMpe4fl# zZYo;et#D!xH86RLFv1$<-|}vo9y^d6=wc?xjBMGsWa?sA<(Mw4zcpBkNvRB+Ut`01 z-*?z(0W7I&D)N{k@BdfADkcZ6=mT@`plvG_aode&j|?Jv`$Nx@5saJ|$SHrB#D>>8 ziYB>29OkKu4PkiT>_wU&Y#*xLu>HeCHzs?Ifo_lScRXI4c+PX4Gky6>U!H#EXMScn z{vLOqZhYf|bzy?DFk%2^u%83YJ&h0cDO$BH8=`G{#BeMjm(ppj(9Doh8LWJs7cEGq ztX15O#zPLdUQIW-(9^C!`UQkt+VFlNMkOV=;H}btQm;vdfY}zOl<(r9C!VR*+dmHiAa!%YCT8=F+z|{1*fi)DrjuWX-3=3YSb2&_0)O*#WD`6GidOX zWn<5V^=dj>fBwudx+r1@G1i}zc z_)alJBT)!gpDhPve?^J~s@M#+*s&DU!fheAa`r+B0{j^`3jvF2->H%r4A*1ypemP^ zW1HSlfu%Sb7(q6!Y+0ztb{}lnZjK%de%C=YmiF3HR#D*Vhnc5qK&G(cR+y-brNEk5FKJfGlbm$H)+OdYo z4kQM;n4KhE3RrH~UxCk z{kqW=Jh31+uH3{YlTHmdp%Ha#r%p!`bH}ZOIbf;z?*hF?nH*Uq)!0O_bO@|||6mSj zt0y(a1h6{$DmDJV9Q-Ux*BSxBIufKiW~w;~4tB~w-7=PFoq;rH5w2$_tKQo>353ioiKmYvcMR0GF8keS(Vw)3N=7H zCn4SEBX%JG@~RDpv_yJ#iz?KDu@ntUfIG zimh3Kev3M>z#Hs)(5y#?WNO0)dr>8f+wGG}A1j_fKx&!9qORtNsf ziiWEkfrcH=RiBThX2T$~B{;L|VdRw~#FslhdaTe^=MqJtLfoF96Y4WBt;84QWC)b11sz(dc6F`Ls3ws95~mYkpj zwsVyu8DhWsnF!%Ta4hFM7|bx)5{25V1JStMhu*U=lFAcXt#pajbH{$zU(*eh7+ec& z?aXkCN^(FM;_4Fa=~u@**&_lgmW-{@Vg+)jXR?|v*3DK~=(L@qUR?qu8d6?VY_Ktx7_2AaN!PuJ1*7P*tarHy;YZd3;vl;pKhnB5St3#REDT94ud3V~u z3DZiQk63{etj83g$hvS+KX?tSd?{@lceeQcVIM-24l^eHjfH%c&hfs&M!JM4EvbS9 zwS1ljTCYQ=X@XJu=z0?-m1SHpol3p74>QEP4Tzx@!ZyEugtcZRacsn%V_cnf;zGG5 zQu?9ob+{Dn=ytornF!%gp?h1KE3f!q`wPbg?6fW6Slk_|BTz~ah=j^{NOd(`u?;<^ z7W3NVxT9xo%4b?Y_4x?drj*K!YWQfSjGSGg?;nKfdAFrVKSnKnb4C{!oU|31=YEtK zH92B&F4*n!L9X^9#D&QC{bNKT3QMFDybaqZwdmz`zJIj6NSg986vT!qM6Cd!SsIcv z%d){JY>D1$b_dul*CpL6g2XGis9IsQQrKq+r|Y~k|1&5__9}2{E#3%OqO&Ge3}o6< zCA9yivD}KJF9FHXweZfd8Yfzc&$quO0YiKB{UerSGZ;1+YQ|J8TcOLS2N6j*025n8 zBy{xFb4RcqR7v-}?NWwiFeFM_OB%82ctPs(5ggR&0E;b4ei06SkXQ$s+ZM~xHmQS- zoSDmp1oxR-w?eU;_df&3h?&|1MX?WEq4&V%fa`6;_m6hCVWoNUOg9waP`Li;9ww+E zTvU>r6uVk7WZ7%>{lh5Li|7v&iqW?G2xpomI$M7KkYGr&CQjeKiXd8Krq{se<)AtWbv5a1K-QeaJjd~Qeh+7H{Ymp2V?=vx$k+3wVNlL<5 zb=@Fz>1M(tx2cKGV~W&b36s-%T`dO<8*qG(m*Avc^}^|j>VDg;3O;oP@s1-AWI-v> zr&pq+{QuA1+r@0#X5~Tme4rGKwh$u_-4JF*i%D&4jQxq}4#Ze&>W&md>6e>D{ z8zM&CVOLE=6PX!Eq;;B#2QouTDeTl_z8U6o9miV7TE}@_ z*L6S7^S;kJGnDt5`Ob6w9P3!?T<3XR_kBNqFOhO^A#<0jnScs`8CUk7e~2qYeE}1&{Ge_J2G|B<^gfC`)9h;iIuN`u z*8J!bw?J+nP42o^5}IMgv2 zi>jUr9Zq3o*m+XV^`Ad~s+RnL-3ksmcJFTx-a0yD16uZ=_hV4@Y_f-G^g zS_c7&EAV<|CY**MNytO_lcVxSNVPS|Rclry!c0|Km0xM{`O9ioNAV$uD@EZgioW*} zu=QZUvRt6o#e57-2BsFE7u1Fb3;86!5a8ug92j5^dkO?S?1jD{kXU$F5ri-)UhKtr zk-xkM43^ORfRJz&Cs(Y5>j+J+#dp?^ z7*qgeJ5gzZiwlMzR0_Bk;A}Tlohm4CF>lC^%#RGVs!Rx;N|G7D3tMV!0}c-5q=x#! zcJloH>~H_}?Wca~|G53wkA3v^J>UI3w?F#F{@Cryzx>N0Dj(1bZz*k~&>G7@cIHB{ zJcsb(TS2iR3CzeSC;EUmZBNp5Nt0Lc8HW)TTud8^ zj+FYJUX9wnB`QcFy6X;XxMj*n;2j{o@yY^Ju0t{MU``vVz?G*nD5fL>=R z4jax}yCDinbM?ppVc)N=jzVe}aeRg(k-|&Fbd`S~Lc~n5lGgf%Aa7xKGcE8T)ffNChj?5G zph!wc6k4M%hddJiFpQjmJIRk|UF&n(5&~dpTBOjLXc~O}{AoAw9z7?ytj6CW!qFl` zw{feriv<9%di_J7t`bQhTOhMz89}q-tw;}NU z`juwxyNXw+<%Y<#m_y(Xq^eLlFw{!8j!@K#I{!^ToG&w(QAgkXX#!?6PVmw1-*0I#;-uC`HTi zj7NUnxw~$q!2+igSan^q?KZpU20s9dmR9&fW@p3kLj{+cKKwV_ZowM{GAjH?J zUURR)S%#_By(9NOZXNH2KNtL+At1eB)Ge~2N>L8kYK@GgAFhl z7}aHym^d4yT{#0_ZS`Qil zbwzWf$citX!opJGZhjK0T;PtZ^oU64(x8|$nnxoD{{$o9k~t-y!^X`9C7Ukf`|bp|K`Jb zP|hB@fWz{_w$rfl2jo^m&B&LECwiai6|`aVGJ7WvJ8kfo5ciEyWBzl$zYmUYp);-z z%%vB|=oUCKBJ`b?q=_{ikB@-36`%GKeWMG{T>cc_;6M9XCE(Qn{YX>asg5_$gr#<3S+r&LX2&!S zSoGSJi!>O?C<+k|P%aO8D0p&k;F@XuL*qHiSmvT_!XjX9PC+h)P4QEgtfs*grcHJ3lTPf^tJGpZ*jQvdh;X)9(9q z{h`udto@V!IdB-)X8D61WhCaY0CNaJ}`;28J;pzOk1*2w`W2cbYUm=U6Mwfv4lbZ zP*zzTBuNyamdT`&#aV^2WjIRRLTqp}@`H2fN^hdtB5uDEKkW6$v%4N-L0_ zO(vu!Q5b6^ftK-)HX|1(ESylEL5EYj8>XIk?cltZ2g4^dM}r5DC>1?vCuw0M+BNA? z8u6`kYHoU%{o|IRg}KQ=y_-W+=iZwE%Qh+;Z=j10|g&>LtP~14H0=O#~X^k)OWbI0mo6+qTOjjou$6 z4bA|x=_9Enfa=UmyxQ5Ap`q3H3b|FwgYoA*1nbYH#%%Q0Yg=YlhOwUN?W+ZRBFDu% z3ob=~YlM_<%JV=rFZ)}LYg^uQs(bJs(1?2?VCFw^EqWP7uv5fW4ISk2CJp>ne7eEJ zgV*I0e1rMyz*!;w!+)2YM?k-Tg-Ms4uB#i2aqcI1fc2>3cK?e{e46?S<+SX51DC1E z{ufoGLxnh4;eU!Jc|8XWlaD;81ff0m{mY4}C+s}2<<&o_G?;Z$lgnY4!#FI!^R%K z(s9CZo;+OI2%wFnu6iMKyOQ*kx}uTS_je`FzSRxknxcjHZ} zQ|Dh;3*X48q5MD6SAoyG@^&s+Fk~N9Qq<0t)O$l(y9i^;dwx=R|I@|(4KMM&ghbsR ztG3N~{h^YyZj|kRKCKoIjNDcp!rs~C&$6BpedW7)HT;J>?)f`o!|aDTi5`q0U|URf zSJ&FA{DLvQJeMn2m5`lpGCz{`x#oLrUZN{`W&A*s^HO|p}=s$$(gVK6o9`P1oA;X5mnA`YuvwYPo9w~Mr*!_WM`EonLV}`i=erp zwuP~5(m-{8I-vrou-wY0p*urh^mDjt+ec~a0!WjFFcbAHqNNVIFay`y{VAv~t%f&2 zaP)lhCvPpoL1!kRbT!8d226Hm<@K1)H(O4Uvp01O#ivh{nZP&oInPVK3AI8UpQ57= zc8@nJPots0=>$o_NMgNP<_(dNp?l+=>zN9hsj+Q2o_~%m$ru)I1fH)jM@sB`v5^y9 z%w#ONadfFcL+j}daE}2#@|Qh$cds1;@j=b}l>$MeT+JR^&Dx(!yK8V7NN3UW%F^p3 zXA0q%I4i#jzqbhs*FQYO-zh}$hZJ&rS%|Cgj@>G_D!3Jkt!uM@kgfPMtC)@tDse{2 zU{f(uzEbai|FSvZ9x8lP-nHx#(j1OR(ph^c?NP*zlQ(>y0gt@K<@K`^Hv(~C;Lx@r zf6tLBM{ytO%>u!oU`-9c+xr`s-+tI(;T_blx?&3c3`R>=li@xra6!+^`UJylbCOdB$M6+1Vr%)b#A^Y3F`6)xp74X)xuX>q^ z<6q7g^n^+rym&;VxL} z2|8l9uTiqn&EBj0>x{CgZmnG(R63z7Rwu2=Paq8ad&geP4sMX6lPaZ|xi{lD$KmTi zMdtL%+t<2DpFZ=v4);%E_$0gg2goc)8+#-GBi<{IBN8w^P`O~ECUWN$-9wivU+IW< z$x}JZhkharMAAG|%Ma8^@s_9@s>D}#^3^VTrCgXR%l{w?%y3tdID7luHa?S0#ejOP z2kxXFkuI?NuUF#L0AiW&N8ce^x67d2Pk@cAui`zd(I)RL+xIm=7)2hfM1M|hN zg*(0cH-%&6GAQ=RTzEln|6_f=a$9~t*<28ytjX{X?15W+?l}d8f5e#sOz8FKEx4w-3m(&`UhE1oAs;76wTa#m5A@^IKprS8huZ zjcv#eOZ_E~fbrq7-TXhk^u{GDt`4H4SZryS&sdn;60cf@ZQc9cmPCgxd69G;ZE%U#59o3#-A!^*_-#;$`%Q}EZqprIPyIQ4 z-7poN=Lfq8Q6DmiLFuBAHr43i&$#Ixkk*DQ1*4m@QN0UiDf~K|C9*J?Krcgl2{bCb zd0|hp6Il47ytDo)J}F!VU8?!MbkAexPLw?TMIGHe5r&T%UOx8bOX`_PYLIOT_U0n` zBwrj`yTW+W_mYtaK0W!AC`I>9nhi_1B|8pDH78&(py?8!^ZIhRe^hmXN zwqYkGmix7WG!sBv5Q&r#9=ozJVo&qzLWM~6V$fVt$|rigF^=Q4o}uw#O@!&4yP2)x zrs(X7d#e(LcU)!wk5YR*)cI6f+lA@S{q$Bu4t)xK*%Kh2OiC^T`hH0=KI7^U$NQ#F zq}T37=;Xb@?5omlnaO&IK;E{7H}Z2Vk$6ObrsQqJ+@ps)&uLgrlBuFSzpSEuwRb2* z_;v$6$tPdkjb=MGelGmey-%&~seQ;=d^qe73m5Q7X{uG9<%7g&N6gqxcnTQtrMNam zii7!WG29z*lHN5Z?Y)d~5V*1s!kr|8LRc{SI&S;kO^;YQ%~J`sIkDsK5na0lD>8ml zNWKg1^Y*3VBj{cD*#Cy48I2h7xhvMU2_ws$QO@Jj$g)k zM3kb{3W0u?WLB(cD6FY4g_{^}423Y&JOUCXujs4>vMNg*Cb<`L$lQr-PG5+v+XR0* zLx7cnt(fnbnVM(JwRziQGn`=T;aX_Fz2_)d+Sk#}4)wxlCO#>tNZ)msW+;s&eO|T- z5#z|FB_VFv9sKFV4bIiaYU{OlVR5zH;}*17TSb5!1_1L%!?&@*N7#7eaaSY`Ap*ZA zzE1J~5g8eK(viVJ5|DtVv4Oy>sI;t|H17#gJSr)Yu)_ak>XVb^14QgA*;M)Xs|lFv z$-cgp7gO9%lHw0sZ@Z%9Q3&XMJS@>B{PirQ+n+jh3UB_#-ZM&|43E9ei*!ECDw19; zj;)3wEH90rQ!ezZDpA9cP{c^F%X`XfGvhp^Rf!3;Xmifn*2m~*BY6mX;;Nv#DV+3S z{VZ1RgPG7>R#q^1q>li@{h%BA8Lj09h>G)(ceZYp{HWyxN-I}iM8!9b_XQ;CeR(YG z=!@mSa*M}U*G%7bRr4wn@9bIDeRLZdCMqe1fm#aM3Q;X?0FZfBI+5Vgiv@omo^uDz zSQ^c8+TCP`fnMu86$^Nn=et{6AYDI=f^fK~4+j2AYNwUl6UoF#t3aZ)e!(FGoRM8+ z>PSol)1$t511VHC>O078YF3GlqC(+nV|+&j~u2u94|Lz z?kdfG1)(V?V$D#kAmC#N0~`~XP1za~{vag*2CnYLI+B?6Wl6Vq6s^yCymgXwM8_M6 zT7_TIESPIVcW6K@2cOzL;wYu7l7xLUi@Mz$RUb+$Gwt8)i+|rbnYR?Aei<}1v~Nq2 zHD5veHFc8Mo~cL5cKPYn+;7c37Y_qziNUaHP<`%cu0UsG%6!F3d-VWyJ zJqUnC2}=D`J_7LFQYUvaC3>I&%l0F=PkuRUB<#H7M8B83?$Z90#%&eTTPB_2*vkLX zVqInR0{VM--5fCFJE5devbPY_($`yDnJ>Gse>3oANa%=!%>$Eh~hOfxEX5q?@{o@kqc^& z6s6hD-~Y>6YB2F9hVJTr@DIsNRE(+2~IG6y8ZfTT;$%lN#%9Hex;{#(A^ z`#$h(S>-J0g=2}RxW!r(2s?zN{NoqiU;uG{wi|;=+A89NuYk>#uGU-uj z^K|TIlGF<7(t7u9h2`t7_FkvIO!e?jxWN;6UpV-^JmNtOORUrR6I&Tn#RI0BW>q+2Ah*-73O;n6OUAhNa5LiXeHuZ<3>t89JRTdvF)Eh~> zpOEo?uID`(-(6f;?v6R_U!}6QQIhNK=~`9Hv?uvk^gVswL;2rvyp0(a)d4IIH2nk6 z=I;^SGnNK*tgL5ycJ?|wyCMP&PT`K~rI*tk1m20HpWnRGVMR7O|d zlIzb=zWe#4Pg`dOJxFoRk^-|9S{*|NV_kN|o)XPj5)xFvCi0x^_yNo0X7EQ~MUbfd z+2mv$!}az22ENhCjq$3PC@{#_>4BoiPL8y=B~YYP+t@`0Uvg z0IM&mJKz4P>3L|_s`n#b?S~rVNC5DWv;J^v7Mmtotp<k^;qVXhp{mYlnx0*{)7$PVd{7`s5pHLJD0PGU{$r&FFes2zTX0Pn;tPlezf z6nGAG;^=K?@-t|SBs0Gd^*HlxIj2&0dLG5Min?1&{!))yH1!N}9s{oU&vWiH-?u!OCDxb37t$Aj&zus&QD)CCB_?psIp?*Rt+pUX&W!s9Xg z=HU)a7z{pJ&R?zvU32&%kb|4*-F=UwO8iMH%a;IC5NenDyBP@CggrTZ{eT$#xCunm z=A(?-+c*6o_nY+rTYrsu4~%X*B&OcSp#*}EEBU@%h8Oq!;I)>>7CW)`;MGwQe7TC2 z59mGJ2R~v5s7qR2bJELap;`{vH7pWYH+1tC8$56A7^GOA6wZqpsCWn;h?H#nGVq^~ z@m9r?Fa6S#xNkqHv;d}`*98Ui0Ta5 ztVYnfC(s(T3-*-v#RUGua|O0rKb=#06|cJE`geus$?R z)jOl0@Si^dZUtd?&g^r;1+AwgMmiL=v6OMVl&AHl$j96b)Gu)U>E2}v64j%L=viw= z;4*Y0u1HZy?e!=GY62kh$Uq+g{JKQa6Vqw%0?nwUinxP8?Urs|Kj`p&Saa`|Mqv(5 ziq-K~K~W$hC_r@2vys6y@YPa23M0TJxUBhl^@iF`z5etLdI13>d82guQ_X;|f*G8qL(P?iXIH?+ zdjNO-O=CPXP$myFS-rl0bRG254At<|b+;D1<6%3>OUF zDRteqH{60M!B4`sC2L4Wn(qb^&m7M5Y_d|rgwv$_Mz1p9{VSw)R^|5o?Cas!L&T%- zy}P?8FVj*%%#g6nrbPd5;r&aHq9Z`kTu3*hO=at=g#UntF>r7{tZ>|rBm0D+;~j1^ zeO?BLLAbyFmE>|(TPwPmpQu?D#Ybe+#N((XXVxaZSRteQA^oe3cuixJ^AL3zDnL^U z9Bk%13H`oMsv-MJAHW1&LveCLoZ!Ye#lG*w+pV$9z97{-8-pbo(m`e{WDB)l`kgb~ zMWh9RTMSb5Cq-LF==Y?WB@Nw=^lWaG&{;Js_b3=i-PaT|FuG*Gi|+a^zC^WXeG%@# zq9&v??%Jpn--SVhB1rQtlx~*kRQDLN9K@3Y4u6m)3k2SzqNkGWFf^!FQ~=(OUCnxclPH+|m-JA-j=8A|WziZ(m6|1mLMj6-Y8cAnn&des=a{KKj@ zgZhH%s+Fhe%~5Ko?z2k-o?$QL!!r-sv&OvX91(xDm#7G10Kou>^aA?KNi9PU68!Wg zA&~hi(6Az4;Py0|C#b}l1GJ%Tyi@Iu3d%}U=06U3tiQNJPLx9vg=ZFN^>#4a2mO$W z=SQTv#;uu8B=r1At>pXxEs?Rnp9lc|oRf?hu+G8^s2@Py-!H~9zq^>bks=`J$sT+@31$7I%M%ih(TP3uEiwr}FUo1X%u5nK zDvx!vG2;IyVaza8BMTMq1RI}moS3#X^N47_h{N7J<_99(5Q z!t&1$%Qw#BgP)!H(PBkhPG(WvuC}^rvCd!fwlm@gX{oP z$To>6KaNIML%{eaM!%UU*7w{vdK-E1q$AhcD@~%|rXTIYPmk)mE<;yyaz+Ih)qs8T zWS9>QI77QSh&*1ujqfY8Z-r{s!f?u7Q_)*kuRJu@aJ3 zi^>3UDZlKXD;_wlX`#zVGcva)@>ZBl$B6I93ZRjn7`+eSQm}kMGA!2iG{mCs`4}0b zeC($Dk*wO+9gCA-x}jFje|^h?F!l}h@Na=#|3>&7Q5XOESDUYxd+lhO#l~ zftdhNjD6#ysXGY2ugKP+kmywD0i;!F;iX9nv36&< zwk^nzR5o6CEEc6w4JM8{egEFv@c)+Q`|DQ~cxWlRi&Jjt6O>**-D=7pE%KAEp-CN) zuOL351?A~etsl$14MrWoaOt{pcwA+xb*=JFH^%6uWCgt9JAQ-q9=?3QZ$-C|pZ2W< zY6wf$k{6wa!IMY6a{hpgm-pU2saHMr(~!4TD8^8DeqbS zlkAAVOuw)6-$>i+w%pu*GNVMf1+%{*8E$Wn*!M9Ne!B%6pi8-b*b&#Zr&^dgaR*NF zvPx=_TkjA4KtKHnwQvIt@EwN!isuDEWXzBTVK+X1s62v555TV&Irv8MdRR1|@tH#( zTh~KpZSmY!QQr!XibNXUZ3U(Sn8 z+?ERPfgq&$4F^kgTUC=Zw={V#%hQue@pGhB{M-xQ1--_D&nj=TJKpTkshH36mZvoCCQu;QRk|j4 zvpkf&a*w->*a`EZ!&k_<%R7@-PDFIOVnGBudJnX1QqIO;z@sN8Rn|21`0e~^{=h|}4B+C>E{E$BK$cNd@I;wd3T^GOxX>E6($lqDpi-GuH zFpJw-0=*2Mpl#MuLG^#=R(9c9++w!clipR68O>?9#Mc}@Z;8VMs2LZSz#D-cBU4JD zN_tmRCo^yTRrzzbzPqIu+fcganmwQCKiBzyQN?YdIFjx3mA1Bw(`sjX^9cY*_jsI});pIPxOZg?RLz)E^rc`(5n`m8!An9Df;R z{QlwnSjlHKeP*qe2)lbF0cC?P_SVV&7R)&ntc$6%==;oY&!HlS5T@J^V643)cc)RE z{}aKdHJyVtL~-Y+wNaltmn!ES@jKnNQb{WEgp*$qSxrSa*c0wV#)srUM%4UHB8-Gz zq0_x-3(X3$mMc%mU#osSE+PS`P@C=#*jOxgiTZw6FXT;T5+A%8JF1syPR7FKyXPCO z(MOq{A%XmGu{UeS|8WH2zeB6ZE(rIsPkh1AhO!}W&_y(ddhYq0j zhR)cE9=b=DADl&L(<)uZ&mUQ>fG zmNv&{#IXA)%Tld?XWVS`WYv*5U|!X7$!vI)qRK(=b)63zxE)wq=4pwKOQ7y zs#K*`*orfPAt-5}EJ-7gf}82>nM9sBZ@nuq<04=z=@jtI-3$5b&5n-MDnkenMX-U& z*sK*&_9BSySU|UP_^C-|A}k~{fLDkp>Il3ND^XtLy!JopqrCKCF$rwWwk1j?feoGq zl!Onnc|B++M%0@|O@20H)4=I6wMSKl?z*(KHd^E*pdoN7N7I1J@LwZdSReVkn%+7) zBK~x_Tf-?2&rk;V6XL;?dMa=oJ1+^u z8Cqctin=YX7I>CbLR8)rFXAca(wQxkM|6k4=T_d#d0tGbo30lGjZe=DTO-0l!1ec4=7 zp7RRe^@`fhx&prg~D`qVU=ELbg7VS3L>EU$M$g z%*6)31OC#uTWmy@Oh;WO><5qC4!yB^F1ko8E3UazZ#OC8=`O^i)!|W2^D%^v9jExS zTMB5P8HsPqYC+X2Kj!n(tktv@u-B}im6t1baHYZXG}7ZI$cJ$Wh(bE;YkcX+e1}u= z+?u=wL@m(k%i`9BvivRG&642C>bIct_<{gzHfV^aGRA(Si8sA_=NHJ*$sqU^x1Cnw zW-yNMuORD}CUNC9EFOwIyMiQ>IbsLc+vSBz(B}FAJX3?#)=dAA5DEa>HuCzFI=|;{ zKE+#Hs;8NfHy?@ckT}>A$425ldzyK8e$YB^WbhO)u{LQ`Ye|5PvTblY=mm zlcaLlu$evB>?lvwDmdB}4SPnMH4j+_c7j)uTZfNe(|-bv+q`T}Z*$5vNOPYLP1lcp zu#ERIK)qSPpMGT&t#vxrLqOvlEi$Nt+}C~nb8f&qBzCelAsL6BcHW(nr-m9foWL{c zGn!f85YW!!9y3Jrre=J-#^)v7JOaGc zvOL+&DguOSA4$r7UgJI!;(3H4%4j2gTf z?O@u!8xA3g5x=48v0xM;zHuQaue8Ij6z5Ox>tF3!|81!05k{D_h~6C1>4+r z1ivZ%!vqh=Aphj7{__`#_ZdM<0}p^`(*ZTR1u>tfqSwmL-p0`r)cG&ZZpEurD@WL; zZ5-XQZ7rI}PlXH-Edk*%iz(q&`)9yMO7j5kgGRZunxl^wM^N6)L~6>_IlIxzO(Q-5ew(-I@zl4yHW@-$kG7+*Q-C#$jxBe^5MH&j>Riw=yf4 zgg@^|)k@bEqA{5v`o(rPO?P@jy2~@1JL|H84UnAS>0CIBQMhq|zdYPq+p!eo;Qh6E zcE2WSMLH`Kit+XfDs2DuB;2`U#4it^{dUA}VtFF^>>JfoWOM_Ass6CFfX#QM=42ri zmzxv*yNL3BwwB%$lJU|(1rt+wD+GL$s@Am{(Bg2?*ozYo`2&109{C>c_y8G(lMc$h z6<(;p;3bKC$tzs1@a;8-t8EOSpM56VBK+<-r>v)PE7ib$l%@z}05<=v;MrPvZRDH6 z@51z4>Q2EBxm&WajzGZtGb<_TIEGyBjw@MY;JOic4}&esR$YCm=DQuo#1rI$6=3(S zY`O4?iRa{4+>nx0bIKjX4XTlM-%P0c)$ROA(3#z#@C%#x56~ArXNjB3;q1oY&5T4B zJtK`LKxEc0D4 zR<<;Q1d&4vxS+rHb4kdtzyC31A8tv_4=1{1f}Yhb4CJ#)+GuW|m}KfefvOF0WngAe zbBTuv@$qMr*4`*yKF)w%6Y4~Xf^DML(N=p#$9M!5GId*w0s1Gu3?z7OJQfo%k9Xh` z%5Dr?2C1%#=2C_~R;8IVf}8VY9VA0oZ;$)N6&t~iu&^HjHL$PL*1Qt0<@26zb-V#T ztVs^MQZ`YZ{>BaVI`&m3i1kBeniQ3PtG%Nl=sd%DuD5Kz;_JMQf|Kh%T2Ak~&a?01 zehR9hv-I~zLD$E#m;8t&Wym#mH6VIlioU!HeMB0qCa%WYM6^-$_}&~PncpGa)b+dp zUwTBzMn~PEvyk=mWhqgW+MG`15nscd@dB<9KFaB-Z+TS$jv~n}#_MBwZMSqi{nES; z)$FZa&RJ}LH%7$7HUCUp{}fyCd>6JOYo)P8d=MR=^JtqIz>0ovXC4)+>RGgmOrI_b zB()IuPuIcAP`L7`W%z~}@O%^r!@f2yJNha?@q+gqkLJ`pQbo5={Bp#^^=hs#^>?xo zN1$*<660FCP=<39;<%+pAk@++tQGT%S-4yN9jMr_qJoE!(o<{b+u13+6#7Par5j0S zhmq~g!5?pKV#VMjx6*}7u=}Vv&-mH+sc?4;zIzT2rXgf91+oUBL`TN*ef`;yLlnJo z?u?6^%?SQ@Y3{wU+{1R(U-l0ZU%$TKb$UW=1>L$Rbzwad;}}R0(}>7@sotSD8=SeX z6*whsFKnesu6E_WW&3hudoMjmfripx9ED08$z6K5B97^4L zkI&?_HnO_8g1c@{S-JTmMrh)x)hO~{LS;Af!4C}fd7xwY=nWjpxQ^Fkb~!Kgx2N*W z%)=bOt}1lW)0F&D1;`4sROk@_DP(Ax+en!Ux!rj1ucp>*Z!s;JMpMeI_FK;?O=x-gs z_LPf7Yq*h^!(7c}OvHohz;97ZHw%*2;m9cu*9=XE(Y2hNWJi z@PK-zr|5a%H~5b1KQb>5Bqi0;dIq8acRy50O#rFOCTA~(Fry}Y72D!|dywLrx^Mih z>s=uyGVuyqpp){zp?L=Ee2THCGiyZ4C>ciN6^;QVQ`+jJ(0zY0k00WWbof61J7d>3 zRoc$Gz^;edCJn~m(y`Mu6GDyL5^bAx2K`s^Om3AiV|SCvvP4NM!0;p9+E7Q=(s{Lk znufeJ+Twiy+@T~=*8)BrYY2K%6S&XvUu-;xmF!V^u3>4mMZb=LGx2fjaRtVm{^DKk zqGtaWyOVxroPrXM5wh36pD^Kgs}X9n*8+&*S^PV)D6Pk_yC_Fx)f{WA<`t2Sfulbb zHZMX1Rk7VWQ9Bfz1=m}>NRo02l&`>&$2pJ&V#vu0Y$9AbyLf{3?@*`cJzoAkrZ^qk z49`6~xs!VHuVA+-Tcn4bhDTy52|dv~uc)U6ERJ^5@R;b5SmjbfDR31Zn)m-PJD9`3 z?X$12&xCQ#>KP^UcC%i){3@OXt^(PWk+bCo!{vyE(!ayDmC+-g`=nrVyK+=-Ubpq` z$!}YzPAEu)6Z*v3W|aM)kcsw0w!2(U6@E=q_9faNimVSSeLqK}u6r$Pr4MnnU#cul z-Lz z@XfPX!t++a9oEt;W6K%?E-^ZSB`*|2rAOEnU8A^l4-X*9h(v84k@CEDi?8@lcf3O! zb`NkDxPdPQ<5l)O5x{e|-@f^HEJc!hff~WT=;H?b4#W$iS&qaNtMjnqVpG@S6Sck< z_%dZCwmA|_83mtw+$s7yK3_^Rl!SnXG!@++h(K6TFoapmPtcxu-_H*xk743O`qv1E z&jwI2j#eU+yU$c3eiN#WqDTiKUUmx7#7LyAlgU@hJ zq`iGr3v;(Mojytu0sgI>I5~1%^n<}zc6XD%j`sAAx;V@k^XT-ynbUW&XDk+qOok#^ z9B(23Kg>qZ)8rX?*91VEtVLb1=d~n~li7O55E~f-Pv|9z=;Jy~-#OjBcVNxXk} z;E$=I;p`nr7ZOAN(n|33_=K&4yJLlH??;8iPFd>;7Sloqn)xxrUx7NPw5a;NVL~IJ z;ps09a&{A;$~xSB$Fx|Cu?{!Leyq;f+pq`0<*b6oBDy65Lsnt;4zA1y_a?lg!rMy?hgn-W>a+o_#_`0w= z^zhtDX`V9d!8|-h7g+kVPcf|h-?v}xlur(M*qHu#RBgU1XU`vcj});!=$#cT8OeYn zlK4sRj(thP`BByA6a{kC-4hY&Qr-OrV^5W&IUYL$;)_9fWw5*Tge_6)ADgvbp1A zmvu%Sy7J`jLDb&cyGl8l>D2(moE%UCiAIfxnK+dvhateu%E+xHOw5sE^s9MEZRp6w z{b5=`XUI6dh#s%(lmSOds$WT&Z+HO`oF&N6RQL}9X4uK4K9qfX!)lLjimve@6LX&k zoMw{MU(Bu1^;)mACq6-($moCJ78Xg2v(jcZ?6PH%rg&Oyz*Ug!Zh`Vts(k5m$6>Mj zF>GwG_4ToC?3didg@zGWGmCG*%fUDHb|ZT8!$+iwyAz*fpDmZ-X&FZvwJd5>Z0o$H z<#i&a1qA?nNcI@@*P5i-L3 z`mhhxU?KkgyPW51D`FSJ?^z0#Gcx|X=;V^6XFAUN4@j%s-*!rs0@XoWkaMSz9ph5j zYes{YIE*S3t$pE1zffhO#U?{4Xw!_)FGE<+x>XmFCDPIbz#4!ePO;70pP^~aGc2l$ zlNxlKU9!N=aaNf7_#waxOY98QL>ljfIzpceuMDm}@ZrO>r`2yEh^)6T2bUcK;row7 z-UXkqZND-GBnuX+OWR7{@A4@dXAw)6wSF&M#cmq2BBpM@6fJmKhRG=9b_5v>=Ml-_ z3_W~db=#BJ`?Br63EIgpKH{k8k+x0V#@0QPpSTl#Mmc7wjcM8&!-QuWwjm^@9(qVH z0Ft|o`6tF9LyuCuj{>|vsd|$)SEbrT9CcM$^#3UGI?7S1pq$R6$Mj-#uU;R+{5cL5!>jNVfkJ11qts_y~w@im?zA^$^954OG4?+wAWWTyAnUzUFBE-w` zNIyTu(6)ZZT#0lF3ckq1BCY9f)_?nOR$jX89x_J^+(~Z7&SZ&=+q{(l?0!^Y8H+v6MoaQG|6vss$F~QbkFL^quK5~-s+-j9* zhDBQe=Dzjd4snlh6pcG5wc*9d{g_R`vy(?V>fa#I)M~z`6SI|8>;)wLZu{>rl~+%( zIljJ*#M3oljEOTQH#jx4x(lTtp-SzSr119>`=rCI|xX zk@U%mh_NxGFa9h9LEUN_ek{)0SBhsM6@9UY^^6-uYrjBWH?~n$ZDeHa(3*cTu3vFtZ@v7{Q?o^%6tg?-blY$Sn`YnQZ{hEI8q#J9p}-rl4}`Rv zr;ELspSwRUnq~C&-SLl~Fi8Zn>Vvr_i?eJb5pX6u+sXxuQ{Hs9(+_9g zceH;0l6J%xW+PL&#Y@kf{$IdWLjgpH9L8uDwJeL@?w0kfOE{`ynZ;N`s#c4$u))6{;{_N~naOHGMQlz|!x^5w!|cwOwB_dCm{`*D z^t3uvCC6B*-m3S=s#5<5SM%VPm6_3ZYT(6vkD)c{M*uq2pO>qA-_!8`Pq`kPd{fml zJmvYUHJV5EB!MfmAW?Dk$Le7O_H8XrN*|c@`?0z67i-|UiM&4N9q^Imu z6>=NeVPpT@e(zU)oGa_rmUzF2UFh-iR3GrbdGu5VkIVG}1tL+x#bezpdbBttK(=b1 zqWVzL*0kM587g0wZinR~d@~$zXI1B^Wrsm!8aq!^Y(4#DESVTn+C#WY+Zf`2!p+7V z_HS7i)40+aahKfyOW)C|@?^2-k|x+547J-0MY$7kjHqBqc!-nUh#64}(vh@YPZ@6*bs? z8lZTwdlTFjlKiM6O!T@v8s2Y!zhJz<8((mNO@CGa8|zWV1eqItMhZ{)=>{rBbku+D zk)=ExbF#Fpw52b`vb={^&03wlB~3}?CO^v@ljlh*I)UC47j-{%d`p2Fqlbr(;UGE% z1TJ7RTcMo~(lwT=DQ&@WO4={R!xNX!_9r&)+5p(GrIZ@fU_c%yoWGD;K`%^N>mFTS zv=Il4?`^z^Z3Og{1QIsU`V{=*!BdyPRW%ITGk->IYi#hizD*6otev$uUX0+jfnk{O z`&nKv*HVAA!h`l8jl>lMi<<0W-oPNS$#kFW>ewJ&C^ec(wcR^Az%sXrG6#ws02^P! zmHF4dHY)Qlw-&G`#?g>{i@)9Pk5XBqW#rm_1mEVeZ~?`aB?G)_%)Lhm$yjWS{{Y7v ztWYN&GBh_HICp-H_+y>oPsU~_<;|)HS`kaAhC6Km4LOlPufW&*n*~XBPd`+@bvQ$E zqjun5+%0FP-~!)(wa{-F@|nJOXCzrrvn8NBMLoxKF5t;s$ZdHT=*T8+GQrWD@#OU zW(9frj5uLaRS#C_{FE$_5AkR08(|HY(8|`{X9?oZ3=NzF%j+<<76mjaSM5Q7dkFuJ z@8Z}}l#%Huxb{Z8Qe&EPkB&0_1f5Pa%rLtg@2-;CD&%fo=Z(pr#8~=2c)yIbN|b9l z7dtzcpY}27?jTeA1-M|Lx{TkZ=%-m)99;O4<``X_3+a$6r(K0@l`>Hd1< zUZ9Awuomt5MZ!0Ch46Cca zl1ZZi5A}^?--w1@)cEtk(Jpl6(3SDHbqNS;CcAicYg?n-b9676YZCwd9p@8&{_uN* zS@lcBI!!NgkZgcB%HkZd5aFjmmUTj)};OgDZM_lD76Q~}5`Tn;S z07rcs{OY-}w$szS-@lWMMr`;;3o7~gpQ^?NW`ZP;jt)P$;nwRMFa)oFcG+d?^SwXPRX<(k=+gZ3T zDi^JK%OKx_3t3PD7+*6CipG{25?pX+%n5>5y2uq?fJ*R$^h}uJ7;t-FyGs^L);8 z=FH65dCtUlX!WKuf94;Gd)4bmi{yVYbQUk&OrZPr@XEUq8FX9Y|LsI>dr6RZUF6TjZ;r!&5^TB8Sr}`CQ9lnU2AoUoK2F6+ z&Rt5JdPjUbqz&?a>L!@r_&e!037vz*DkRq#dHOxjei`<2fvWr?!u^MVZ!rbKTp9M! z!Nb2Q!VKoTI-(6Syx-(lH+N&Rjq5C&CU$@FDO}!I^!=?TeMT@3sgqe6pSB0HimWE@U*U$-RtF&JK|kI7<-Vc#oaa=&$K~ zVxRFvHrUAkPlMp@(1sy*xLiO~yX`LlwaPo{aLJK6_;}n^9f;p7OH`LMWOztW!)R~? z{#e4I2_K9pnqqkxSGw4j^itOor|5+R<)IRLw$Dylk5vAbaC}v*--IA=j8vV}mGU=< z!=8AzkZEkoi728$*s@Lm*s`rn&;$7pP`)z67Y^!%+XVyP#2Ik#K+e_H8oLOojJ4tXD*u=-*rl)23 zlvDyN^X5xduMxK5)v3dr+}syc*cD$sCS4@uA-@xo!v(C@>DQG5_B+2MD2qX& z&8>-t_>_&hov3T%i1T8a_@wN!NvgeuIT6I(ZlF*+9r)Ho74{Y7iS+rR1A_*9{93P% z_~w_N&g&HZGPe}ED@G`C)MY=ATkwU#)E7ci)_#+=h&!yvV4`q+O!I#1k zZ669th$|#mT#n7T*jL%F(npU60NN%O)KA&FeU;?DZK-x@sZiR6RQ9l)v# zMZJfCRrKeuF&!^l!F}C*OxGCi_l>!`IXa-&KGHjCyr2A7EC-c)BBhTl#$bVWCEbS; z4qqz?mL*#`KKvXsLHyYTB>Re1uAC~eB4k#cudNsX|%7Q zcEyEdVfY30zLBS~{ECdn$_44=of_rr{qgCWf;Z?#(t_+y?F!v3wzCrW8J zCEgS5Hn0=&S$!Y?=upNmIeo7*L;pkkSy$yg8~vGh)wHE27@+s`2A?!Q!9>08%`tSJ zf{#B}-zYD;(HZpInu%^Y2AkqvGS}}cl_O72C}39I0k;b|hRK|1^@N5=VPal!=VZiP z!an!|78ZeT4}Q0S_05f)8b>|BY?3+xFMlLHaX3p7b9i^+2SS%tj!-?>Sim+F8p+f7&&ceQ}4-FUnJ zjVDO+V=LJU2@!2Jl^^W9CmN)p6=57-Q14nuubqBpsm}3EKKvk%5@FfkXGjxq*+fB6 zv*BAsI__aa))KLYOX!9xB?7Bhi4wTJ)z_RLr`5_ns&Rc6xu+DzeDzp5HlpxbM=7*2IGV7Pm_Z16s4@&%D&%+83y-{<7t~ zwO)5H`AqvM>OH-b`My`^3;PH3vP6BENMVfAoiP&jw(pI-**eLKhLzuDn_x#nbe!cq z1#@@VG|;5_@w3K5a2Rtf{;89_JTUT#sW4t^hhUb<^UGe zyF9?^wFJX+43C6sV{AuLGD*UBQk3S21J!Ksnd)pF1~aBNk(jX~$Py^R{+!yg-SdK! z)5`F(N7~P%mPf3A=ha@i5jD#Qh(6J#jzLTsoS7mfQZQkg$aI{~#%GU-XxSJ78S5E^*PT1E-=t`TYk@3F9wE#_cE|@Kb ztgio9n6MN5q2hg4RC0F%Ak|+~`7AB3lj2K0yNgjVPdF;xF8KyPu3<5r_BjQk&Arq2 zY*M+eg-w2JuuwA5h@mRDUQr-2MS}#X`TtHDK1+J$JW{o2{b9Qge*E~NYkWUt(Xzb7eY!e%qYc3tG7 z6j9zH8dyu1h^~jRiX-N0W@-_1*HfSXe-hgqCHF}1hV7&6{>RZd9?7Yl>(o%B4Vk5u zrd&^(ohW}y?g(4tt{~Av7uBoFSE8cIJ@~0h-ZR$kO|`gU)Rhucb&@x~ALRBK$8aU+ zs3)->j5X>D~D92 z(aQRZ4ZTyLw3q1so4;if12jC`7T#}$kOYRYc>>RINg)r(cYTxGdv3G8u8wr0e~7FB zFYWQM6RTFrhJn1bLRDJnZGR5M62uE zPE5bnTPq;vBF5jm`70Fl#5)}MU7jK28S(T+=~6c3Jg$SIrF$}6-X7MEoxJMUXjA;h zFvtt@Y=p-t^l&aaa* z?|3He2K`om&$6(QeOVlXj6Z27PW5H0%0r5mdmStcIm;5nKTF_6(9W(OZH^Y>}A;oKRleA$WJ4{*u&EHB zg($DdP5^Yi9m&wMtozOOQ`48!Cr`wtqj{VncYcQ$s$bZW@qTTr!h(Lk8#^e4BByTnZo!1M^7aJiTEl$%%N zYBP%BgFiJr_u?=JjiIOeqjz%dW7a1vjB7Wny`^D-V{4QqWK|7&CJ`5I__T@bo#-TK zCaW|<^6$M5_o4OZPavOr1?%fjPxK`eh(0a3y9EA<6b%nMvhmaUZ%V@>N|nekFD%SNw_2ISqDe`@=28 z&SLQ|lYK+v{%uXgG^X$XgW4>TZwOw(gZ|@o1*Xk~V@k(w7k~|*qc-Sg;aACF`Uk~X z(G z5Ux)9-(3)%V2uY}l@8<0r>;Hmx$;E*1SS!`B&ml*5~-=#L7V$2S4ivg)RwoeJ-@`G z&qE@DtPUp~$q1DMr}Zw=tCkUF;I{2$vjAiIiYp*!ik)+ zu+0t`=j@V)6XoGcoztHsD;(ykKp=r+XQY@w>$ zKBKAn&s+;Z%!2TUVFa84EFciwTuY$PN%?&d$ddB4X&JniBH4L@LBV6pm;b$WksG>t zuW52KMeFqWZdZZ%hI6}0%`(~X8n>Al06akHA9z>pT>VbpnWD}TS0`<|YY|bYk!-Ed zwbuJk;!o!GXy*Vvs@wD>yiVh^W3`5=8{@B-&K!~r|AxTcer6bV#Rut-?LsA-Xak-gMDs5>?#I}ZT=$J~8HR)59!TDp8Iyc_1{3ijN4YyUnoy}VGI zsI-$?j3Ye-<3aCwnEc=>;`O>UV$ix1x!{G`J3U6v*MpGV8#goqU+Nd8gZp3VJCfaR zT@@}t?y`>8@2Z2x0fu6ldxk8{=9`6A#<9=P&|ZLY+F*9{W)+**>e@gflQd2uw9o;9 zODMVhLp(i}z5|sCpcB6Inq#7Am2UfRtvR)da?P>}4wGUI^PIVxR;oe08|(Y(3~HlG z-+Q*U)YnHI8z0*#sn(D;A16$z#cLxkTqdA&?Vz(>_Jcs6e8oU21EN3O@3%m1`Eo99Af2O5~O6HMy`4t*>1&{ewy8m-_9}$j_`I1dJ z6(&Fmj25HrAISVTn2h-^i?clhVQXV5Ig@24tg%jalbyBc+uhCOXt$jW?^9?zc{*v+ zXWv`L=6?-&=t1voN)})OVYx9oN5MiEhc?&;Z*cY&E`>mTb|YYdVR3HnBO71af} zs`Lczq#&kSjst8WpumUWm&Sl#O!_~SE*o+h!BW$pCZmg<9Ri35^zIG0^gIBGzG8$jSP-NBfI)(*PO&pb{jEK*Q(h;PbV!lt_2s zG}1pRFgSr*1jpzN7-#Z+Bc9*+DKR7SeV%V76urz~6sv-kKKJ&V`S!WWO#cco(g=#> z`)1*ykM}?TYAbiJWN(n*6q$`E+Yk1^{hpog>Z36dhr|v9ulT~>6M#cYTCPX40m5iSVSIP>KLN^ymGCs7Il zjz4EjVDvtPzl7Tx^lNURqQp6;IkUfv6_Uj4AS&-;>?MsRE=e|8>OU!BBG9VjrJ_x5wG8<~MMrvSxaBn}RQk8HqWgo%iKWE!cpi-`)NA|N>rPGZUX79I+RzEP| zbY1WDD3O4_YCKhpDOe(IvjW9G3fla~2Ae{0hgoEoSVhYY1p>t=C3?IZGJa>OFVBO} zU5f_0SzgAaw0^{?ij>GJS2dNOE2Q#BLHO@!}cR# z2dCff^zME2(fBVy*6$DUG}BvyrGXYf^TJN$2nFXib||GG|f|pJ3uE zDSZa(=YDg#kJ>)@s_j1Oo4W7)P3~2})Ov2P7e8?tZ+J$swlLG2UfClt)HA5u3%sJr z`fC_G`6@{9+oK;|dNfstK^(UPr^gcSBwIsmuh@kQgq1@Ry(7L1&@Rv|*+9M`Ws1k8=EjlMtdCqkw8({Z$BuAh3vFxyPq~}+%)1wK^i8#MkOzuC^`g{5G3YyT3yq}t2<6Sgw1Dg89gJD-KIpRm-vC%O7l`ZU?{6 z0umJ`zS|*%dq%uxL|-q2njMTyzvD}-$mM{7jbyZAJ zmFTTM*4beyYcHwUtsxuS=DVkaQEDlKx$v>8M(bhr%Gme+>=Z;FaEc*??!A9OD1R==#a^Q~CNC zKCA8jK}Cl=xs9ohECzlY)Mr{tL2GPQ*O|Swqj$j44ashxzsYBaf(0T@3p+)zzui?& z?%8?drT;usinL5t1f!S0Yz{}L3BFSYxp4ds_e;Q`DYiE?0wsQ8)DTAlf4Wu_unf8xoQx@Gpr z2zkCG=n2-&jl=I%-EvK@BPa&YZUlu7v74H7=sfl|4KJJcbx5jm=JA9h*1V0*wX@XY z=5yp)A$#DDU&62;LEa%;p^a!Cl8n0@Xh}9xTADLQRUK}cg8ud`e^C`ykL|py( zILyqDRetkC#%9hR9j zr6nE}VB-y-F^yw1)7u@)_5_d4?m$>20D~ta)(aLM{XaW5zOv(AanS#wI~l(~5@&D< zl}*QVKtZXqu4RwE?*2uj9;$ozv=?Ses9lZ=D=J~)@#VqhGThN5#yOnslh1&`Xr4B= z__x_$hEeKrncod3!+SWY`@oMi2wO}%}P`nO?Yp6;k^|_l+%*Iu+-ouu;46)C)zBR_Yspsmn6Ff-(Vt7)wI4e0aPjkT3lFRM+%gx`(*4VoW4 z`buwEEQ)~OQ1>kfOske{_0NQr#B zMP6yOgqiecbLho#qVvQ?*GO|?f*BfK5M_*MNF)3wYU?lJh4Vq5TB#FhTUg40M*an7 zr&_F_ubIsD{G-ln0D}PkI^k7GqK#++KtJLsLFXUoBLkH)kT3sc-et7WG4KdhsWtl2 z9M9RpzSgY%4%|augC0$VVA5Gii)p?gKaGBfmo&e#F@C}Khmi0s3_)9gaDCDi#XFLdh&vpG$(A79dON2ALvzb*KGaC%PH*sl;Z+}6o6j2RsRucX`!^` zzMXF8QV4%;agCmYrKF_wICF)Jv37<`m|pk!%5wo^A2ONJ6Hq=cj=Qv}azE5k8F6;w z1jkUwH|||EGq}%t&G_Z|KE%MTVc%NXr|em9y$@pE%xPwF#ia_%$V!kf}J2T1CpMsr?kP>*vS9 zX2qTEsyjs69-p)a5$YwxVTXvNG@UQA?AL;S6YpSgRzOp+0-86q{R_>(zN3=VA#swNj|-PeQQ9PZYP93y1+&VhEP%`wBaIJl z%pUJ%s#faTtdza8sW5v_PK)R8CMd%+v-eLip{HBjeBsFmSgrLzyJQ$P{0xQA< zQ5x4wuT#VZhv`c{xg${!I^Y(YW%m)rhF;c3>_I^Yrji?VA3lydzgv60yAtrBuBy{g4pImtuTn}2^Z41cCI@Kbo{c9b%F4J`=0(dS58%_hAWRm<>+Q)iakAUFS4+(TdXMAs-ScM%*uX&40M8O6%Zi}^VNoL<_=Gkmn zWIlh{keG1%vHy|k5gEUdFD_@6zsd7&=hkba(IZ>1ps@wZoac8601snEKKdHxgjIC1 z@q-t{bW_jPL3h=l5cq=EeZ4RUkrJ_c3AetD$qmtx=Dfzoh_nGGaJ0;E}drL zKxn&J5PCNbq9CAnh$tfXk=vS;68n z2fpaMUoBl96wD7uB~>LBzNU_>CxIjJv~o_i1k(ay;);$;n{qhJV>B9#>yBszYop4l zuY?aCC$;BJm#57wFCc9HgxXHeU&d>%T_DwS0~4Zk=+X09LX@w?xPEef9te;D5oAxB7|dGN!ah6znI<7Ax5{%saak>^5u65xaLxG_PtregV_2g0ljQ7 z{nSKJcOIGie&mx4V5*CR?b87Ko(uD;J8uFmf_Le7i$kpuY3}dq0yoY}!uxkCF<{?97+81fkl?FoEB|2l z*g2V1Az0xCIB9Lno@i4|!a29De5@eEPQdliB~AuPBj(?F3fR15NnD>C&la5FRS(4m zRE>D`~_V2^p=G6^#7`bvI8I#w$`G3yqgNO>UlaI+JN8cwJMGV;95W#DaMxsf`t z!b6PGBY4ZRyUSt;|DInt-;Pr-FtN_R@KQA)Qun1%2D?N;;R?WH_mjXgNr3V&(#-Cc z;G3$^51xBo*?jG>-)hSF%6{#v>*Mw(PzKcvW|yF3(f*@O_`d}LLsb$K&mPo?+7;q3 zD*2(rb`nBcGFm+{>vk*GWW|1IQX8&~usy`dnIF64iHfL0)m~Ks;1XXfM%g#-l1kpC zEVmuifs7qqvS%D9!t)4ZzAn9uynCP+2{c8vH6ksEnp8=wZ)zS0JWf^`5KMNNg#9g3 zYFsxQ!jl_@SGJG4?5wir?`x8{h;o&<`6&X%T<7b( zNsL2a4Q1;_+vuT!XU4_+uq&7k+ScD@E>A9&6e|tnx2vt>;OmW-82J|DJZiUYLoB7B zZ+AOD#~nhTtJd(oU4LEF#h7};jykEuzVj~Vd~P;pu?wY%NZ>)Fofw0NNo6T^509`v z8Nu@kKEcbA+yD4FdR&Cj_=lhk-@d@_FNcDfF3yfZT$++(m6L8Tm-kSPm4ly{c5AO? z0KjgFT#voh@r!^6qcYRSx^rW>F8qgYQCEmh@svdia_hjLlq0Uk3=;A(fb#7yT(W(( z<{5Y0l>D#K9M7GAP{fX{xSz`mMmJ$c+8)b508lvS@gx`kYoeUM57+E%`pBXyUbG6d zf5IISQznP8IG1vPKX!+l7(~>_wB_o7M{zB?(3+?I+Ri2$z@B6xU19rw`Y*wEa*u!| z$c7^qLjR7yu^>c;$zx zxXvgKie|i$0gwuq_UK%K;Z$?By}OEPL4%aVQGr0xk!Qhq@~QwAunIKeXlW8n}cqXc1MALUn~2{vxY;B7zmxRdNr` zbzzw13|Q(a0yK?Vj8rfy&nWP*`Xif2QwI?lTKA8-iv|3h93krsWD3OTy9X1&yt?WH z>QjI3D0CWX;T-#NR|3Tt4{q@^9q)(b%gPYA9N-#~gT4jajpYcVwxul;VK>0NUFq;! zFSEA>WL0iXzCU5+DF{)VTbsRN{nk#7S%Gm;(JmLlBY355?@!21kYq4USzUh0%I{7w0vcXC_7+)NzW1(P56&@kXsEV+Bza2Xhl8&xIGQwa z)%tPJ`!eMjP_-bT?-%XiwBEO&CY}6cE7gct>3ja!*4B@fut3p1!5*h}JI|h{M7?7Z zqsL(S+TNg#UFio2?LV@u+lOZM_y^%y)C4Vb?I>m68Me1e*ZWQpG7uVDb|;Ojj-azj z!3zt|bQ;?_fPRJ$C?oRopGRJhPmSk%nUly(JQWqx%4fYLh;rk5l6x9h)N)qB`-fgS zkCX|_CjhBODx{#hEvALI1d^vTQu~Wrg)rT}gp76riVgLP6#I1u*Ph}7#@d4Ae4lyN zTxRde`OMczGeM}!^&2>3tDT{74n#d`WlL-l^=SQ)$XmU&;kE4;{QmH^)SzpV9buJzu>AN~Sb5bIi$V!Ft zCS9-GZw+f{z@eI9yO5}(x`>Z$7^~Yppga7%@PPP|g}YZ+3^<+OxtCw;x-XS1J^Ze5 ztr3OM4Y7M|0~s<-b0zT*zpf>Oz?H(!!meW&%Kf#a`!W8s~OtC8|3@Bl{$0ks^O78}2vHItvrNU0h5F zM$H6|po5A*b}jj9GfxBf-Nz7VYyo*X>$rj!l*rO<2)mDui%cl`we(%G&1b_7>!XDC znCx3#3U2MZ(~BCQw?9%mjO^R+s;9Jx6#$v06J_wLPvJS3h^}# z|57|c>|Rl%8f|ffp=*O1`aySA?G|g7hL&}Ti`G~xFI{?AW*#ZW)Lwot?xOhp$UEBy z2NyCQp{%c59ZKz6+>O@LQx&?PM3qK-tnABf#Xglyn3nv7g)@Xoki~B8in~n%5I(UA zf73+*93Cw$+e6Nf8JHL)AwkK?2(=We^YsEmzNz?KaBtjx$dyAbwDC>idi|Xl+~TgkvW#`^y48j6L$H*Km3cbUAts=j_40K zpuoi`Lrf{(mR^AwSqrV3lS153&Y==V@{}3G{=>MYiHC#~0wKDBqyiZ@`0jYjlCwb$ zGy6Ulb$|uQZDJXBS~hloistC`=2eP_FnO_e&j8!!QAuLOC{G#|&QH1bMAjLCG5Fo( zc8{ilA^n>k#Md36;-YdHMk}A(;GueQryO+v*TRw+1gqUwmwnu`A2Y}xY){jAVxki%H>_yBd!#_$ZBU*fvb zDq#8{&JF%5wcIdHR0Sv@wyTyY5nV~zgJ8+KaI(h|eEaiFjeT+}EHr_JsLISuMj_<~ z&3VfdO;LDjUwjeVJ)j{`uAe@XzT7DvVxnJJ7#B7yx9C|}ch9yh22cmYy;ve_`PR7T zp+Lf;$(&R7Vi)mRIuzqP4AOKlYmi7<=r$e0GI1|`ih&LLBWc#d6y-9nkn(4d>gtY; zo#XH!#k)z0q{)rg<^rFsw@UEGBH7iI@3-9vgZDeugM*I)yRR`(ZZoPs6r>`nStX+JCC#56ieBc>9j1_K$sMw? zqiY{JyNtK2<9l-wuFaS1F=piXIm}slfOQdv_Y@v;S&VF4fJehuv^+1pVBWJk%b)Up ziA~t*JNgBJ5d_5acm3@Ge$~tmkNQ6QYzm;4S@Sl-3|=kIo3c%PW9X8WB5l|BZh(p< zODOn!7(})ffyVw~Jr^*fJ`P@WZ>(u7t{t1Vq*gj7E=2e)YGtL7jaX|lYis*>3y==UPh?5M2?;7U@qJ&Ucmd^@LQq;;bA)7!V=j% z6o_SG>WalH{CHOYbR=-@<^y;Iqz}#HtgGnKMX9Cy<_GXBfc-wUJ=(qvaE@S-NX~Z; zv`OXxZI6DWOk+CARStxGPMuZ0PZ%wpw;4?FBE~8$Sj)C9TjuJQXs)?6==B+>z*Avq zOD5OkN!~uhO)k7AlH72;twmhhVk~Nt>vvzL``v6OvLn{a4NVa<2TCsK(~qwHW^$v} ztsWKcfiavgmBkIXOnTpaV>(ZjaJGGfiqe}#t}ji89HzI&cQz5N_Pp}NkWoCS%)BnM zHTffJMU$Rmir*e}7ufL8{s7*l1@v5pF=z78JOskb;=rN_Qu%S<)_&wW z8heO-E#GR^X5}UNgBw${eN=_~UHY76s4q@_OlLeo8sN#*^`{6P{>oeD=`q5G9Mxa3 zH#W^zH^T&^!5Y3Ui&T*)K0*H#2Ys((Zp1MaenXWd7Tb;nccGWZD9}?B))AWo5u2BX zC8)z3;oCmpwy=@7PP^2Y+rRp{mc?N#5mDPuy$*%-1l^>cTkMF;A1+_kKRm&I3-xl1 zl&+`S-g9R*mLm47ga=16?t>n%;HoU^5Urf1iY_cn2N1Fi+FtD6Q6v-|^S!q*Tz{0A z@EhC|HlnMoMn!;@#nq8rk*lW=O+uRdE}gCI_vroRLRJZgR;$%%Rbi_!CFcN|^JB!e!Im?);y7pM2otc^yKKRPtNE zyKIHNsl2fR27-07i4FW;;*4d(aGnyyOviH~>>3_Z=csJmepBq+G(;{y1=@&`yDH2v z$=w-(QRsVz$mtIB@ey?3{-&%t67nO$r0X2MWBUARFiu2;q9Gqpo-Fxp$m!`vomL!C z7KS2tbeC@Ca{)Bpdxk9WC^@>aeJn*<1 zwFu7XS5Gr!fxFyV{6vxtmtdsz$@kR=v}k%!Ml@b>$0gM{HV~cG46s2CyW9J_cvs(V zs47p=v6_Mx4zS^QZ`(P9;g%!JP1%C>x?o|w^F#`;{IIYdqMh8})v7dt8s1UT9X6N) z9Nd+xN1lxB1AFN#6u0{UeU&fo&6MTZnOk9rrR*|%(GHEq*)P6*E%*O`ACJ4-K!5@7 zI5L*!_Yk4`u zlv^PdL@G=4YCb}B*Q0@AlJEN{eG;%p`(QqE#c*5ly}iOp#~y=Kf_piBC9%34%eH7? zIYrzWD@g$Y}% zmUVW<&=8|fFc`pntw&w9Kfi3EL#F=OTOxv8E(GWG&heT3Cg_AFas zZv}r+3c!A2pWD&@e)y_A|4kDmBm$JshDEy6`GvKxq|ir`8)CTKd{iwWhgC03`zF@1 zWH?tXV!uKZhXv}hsX`jJMqniqvX=DrPE)#*tYI2LNH}{^eWdo(M}^^YZ#Cb~eDKjb z!l41>?QRyEL|fj6{?g0^BApWsIU)LK91xvIIc`u#%_*D|u3&&#rJP_^qzI@Oa z|MWw@XUgA~#j&!7gRB8PW-KTTDnjz{0-05vK00r%qQ#oZJIIL6P5 zPTG(>E5fp={fn0XNEX%$9**gZ2_3EV^L%*8gOX1iulfUti z>@7~^!%tcsl6Tzra5c9cPW1T%nPcXXrZNZ8Bv=%~8MDtW^rSUnd6|FxTSvdygR^0@ zT-JDIi{*2pSe63b@qg)AUHwiDefY`p6^6ajn~eRo(&|!dhG?bdCZD3D2F_0_aOw9k z<6AunyGI_5ADRA#%s-xj0+8%C4Ngaih(gZUE`MvU~|N}WNn5V0O-2AEUyPZLC3kVcg%BC#0h;+c@* z6kOkuKCPo=YmS%1Vq7E9ZOp+uP-C_NhC811*i~r|LJA8vaTv%u7G+PqsGVMqQv1GI zVrQi&`*#jZ7yOwptka?rP7UgQ8R_%?cDhF;qU_#y%3f}5uirFnUxv}9Gk=g|(S>Z> z>o|ueOzZed2h*-@7S=$DhCz``YM>54`67AD@Ako9NjL@*jEks892&ZJX^cJs2E#`| z_du)e>*Mb0%L~wMt3`2;T?Ehb&G8eTOzDN-ApJR!U44^X4bd$MRN+R7aV89nkp1gkQ)Z?CVg6xPF4>UPm{@A>D8=zi--5>DxBzr*% z^E2%de3geQ=X%U#Iu-9}T0iRVhN2iLLFky9zloLHpL^iTvc#&zyc~AZwC4AIj60>T zBoMouTs|Z~3p3@st&;WnVVrG0t3t=ut8~GjRhdf0e+v$0%cfIOOdm~~o@?A>(PDb6s2CTT2XagU&?q#M)|)@DL9Y30egE&oe7rh7CtRghx9sb(DOzxOzs@iq-c zWDZ9SV-;Qhf9nS2%O+vlf~CNQCdt(QmgX$2zV{4{diQs$eGKqmD0o9akd}ni`Ieht z=h^we^3&QJhh9jwUlq12juq@=>EGU86IIUmYSe;Pc-j7k`rBu#m}T_{NYxY6!Je8d z?TEp9{*XAGu}`D$^{uO&=gW>*$^!=LnFKCBgr4c4w!~A`v^U$AstxLtG=PpY7_Q^j)4e6UPek zZ16W%~gzQVjZ!#M2|m7p*O#c73iH`~7lC zyzS-lpZJsdw=-=|Pi2TcUVL92$M~mHhv{`XUJB*A6@m{6e|E^m((GD)T46?%B|~I< z8z1=6m{hO#{?F@AHlzQeVgC`-udM$H>i;qMLc)(8B3{rxTu0;cW$8a+;8?D(IFYjU zDULn;j~sk<{tQN1{NIhPW{@j9CX-)O@3CdSF|qNul<9w3DIEJx8U9fbBT(Ni^xj^H zMMQ!9zj6zbpULLJ(5{Y7_V^E{|GXyr??DhAjT*K$GwGAP7hn3{T{sW8lKgwDLb9?o zxFG(S0v)7vO?YAUpFzyw5gvV@hN}A03c|qz@b>b?m_R3E*gf?Zx%m&}3o7bPj&Y|C6YXwJKbYi} zz5M9ChCNGGKM}XtKPt~WI2x{U1!b35G(GSOwt$%l)PI;c86qs$d&%~v7O_QU;p-v2 zq@Z&6=p_m9l`9nm+06u$AeLkQXQp7;Py2{kmA(Cr%6^pFR|8AjjImFW^%HsW>D?Py zuWBfnm(S#Y!@(?1LZD;!QwSbwCBLB&+X+Z<@C$_*gr@Qx!tOACG+#Y>v8zEk`Madu znmc6Vdg>}LOtqxwQMl=o`o>^9^$!r}2t{kkj;Lo5>~BGM9dH@<=TO?|5CZ!(x4-zA zAh3Z!5;m)Uo$g}Wkp}3Bdg{}9xt}0V))c~-;h5}xGj4sOFs%M}Jtwdo^ePj2L@^Hj zb=)29hH7`_|8j<39{EkAsuW%fD>-gq9q(PMW;&Nu?|%Y(ybRm_WFua=7D?gb;%ZsM z4W);ZZI5L8)rz{CCC2SJe>>!J7RdN86Pxq-*d-4LtYjte4PA7B?!KkD5Xs^#lj!|i zAYW8+4CdM_MBoUy{gh0JPPfY+6Da$MUGZt6Tl?7pFy8Dt4YaL6NWmhR>`uJKgBhM8 zEJe@a5*Xqq5G#e%a8}#-=p|DFoe(G)kc*54+~VZ0-IcNZ3Y**yo5Xwy!rgb&S+rwM zgOBKnXV^wjL((Al<&iPy*>bMAQrd&ixS zrE1;|_7Nk$p|6uoi5)=c&lE-0;<_;FxtTQ13`@aOSlCrE%g+qkIg-}R2Q_#-&Q~yO z7X;qQLwfvI-%`|vmfNv@B_sYL^P^~&LbqP_ieWex<}~LT5rehWl^r1fG@#2HKWP-m z&&o7@?uwJ%vU|nQQm_cHU;44&fp*SL=gOqrI`Zh}yn>Ma8Bt?%ldTTZLR){%J4P*6ErCRqv%@WUDGFBsUzDZD0 z75!$tx@XptOsJoV@Lc7mAG_h`J=IZCl*9c`AqS|_=Z zKOA4DII)|hXV&}6rr;TxckuI&QvO&uWSB6g^&$-U(O+I26*>+DC@n)diV}k{he#%$ zq?JhsYEXJ|E@KyGVuhE}@^~FS8Mu|=06M)C#Mp+MLt}aDq#HmddBh?+JzEdz zyv(X^3mY_!XpU&6l2$IEEIj%BINOAQ&pRAo0|k6e=RzYaWLID7PvGos$t?t3^`}*K zM^@|Ipa)N6QWB|zOG_tb8#-OFR_|-YgGvykihYHP5Us+Ezgd$}>Ns{`C$N&gFAqCe zd9d6NZHvIK&QbE4Y+n#QL24{+?}g!rq7r+SgSsu8eDuy8vUC>=IaeMlF+W!Zr$F%| z7*InG$O%x1>K<8ADVX9dx|)H(DNjNFhp4ZPis}p3mhJ}W?k5wi7 zDd~`A=x&shjsXU#p$3MTJHPdPcisQb+OzljK6{-#`#f=&cK4Gr3L0phpX`B{N(O># zHi5ri$a^Foq+A#;7oiF``6Zb2s`+{T`6b)SpXu)`FH$D6x-^rx_? zE!gtnWq1)1Fk3HC#0aFjj6fhF9=|~LA|4Ppt3ajwJ9lzBi@K&&Q9qB2S+ZY&e{E+h zk$7(&zaJQR?T_BiZUL>y>Oys=qfS0M#Gv_Wo`X(a`ONV4?8Bs!p&WdBe;s~~nus3F z%m7%!mVKxKnET5-_$%8s!j9RJ3Z$Cvj>kLM*41Kd@F?g`DXp9QbvFWc>$v6x&$2^Krm;i=?~+U*eWDB-?zHK%-VH*PGh?MX#! zWZq5dJRfOt)**iT&me40)`&J*E1-6EuKD}xPih)l88M-qpJgD0ZOh@t5*Rfer5kh_ z;h*kLY;ovR;`d(IZq}9J7i^dFICt`A_A9JJ$>AqB?0Ly9l`>GU3^wM$ETC6vZO@V} z0~4cG1LU7Cgo)J*C|XVEqm7Y?&;I`YU4sa{O%vuUvf0RlVew=u#2Kki5JH(?v$+ym zS(@@|QRDzqpj`hgsmuKHAG|$h{#IYM@{0BuUc&2y&-CB&lnjee#?PEz(m2@%>RZ(O z_@?`U^0_PDFX{Tx0s01!ALv_NIaI$Ql_=%iEN$5fmhHaJ(d$OPH%8UO2CK+Pv1IWG zdcGT5LGtiIi*A@x`!m|4OfgOl&&%lh-ma;bnb#7^fxn-dZSX(;SzN)cWZqN(=R-ySsOlU^vZ} zSaB6aBRaa>h^tIV;Nk%V>X_n|D`X2nkVHVztW)M(RT=V@tnsSfPPWKz7vv0yT>|gSw1=1 zj}h9^Kc03oVT6Tp(Acidy7^ow3I0kKOt)bC5|V>@99+k>BkBq9!0li)vpPy&zX28w4mOVS@g5> zUfb4LY^1&`21YN^AK}^=UlSG1FdqHqat0!UZyQFG*IW7H+jeDZ$Ze|=u=v27H(xA2U;8mJlzsP!<&>&#T; zUQb*udgkMyVC7d=dS-;}GsY(Pkw4~*x#Dw&=pmJT7(dRox4hWHc6qpaAs}MFdyRfK zt%n$0IKN^CqNAAYcJ_94X7Ar;1RYv^mB_U{k(1@y$SWauTt(Y40p;utH_xt-+@+E1 zQv~n=Xw{2=Ak!z$pL#&xt7yq!yt-DF?#!*fzTXJT>|W$anA*H|q;`kp^_AyJ5*nQY zu>J1Mk3DO#FE02C2fCYvu(DqoiINj84b#2#R#)_U>478Eu|PvO?wwoIcb!)`@++SH za?+22S}2m!=;8VLA2=q(cyz=&XANA6-)2w}pV1We#vLIzx+ENhBDVZT{G}-7er(J{~rpzl|RwD0IK(;j8Je|y+gZh0= zJwXFEqmS>wTgz5VQPIC;6660ZAu#;mjZcSZ90?z4f;X@}-rfqn1YoTPfNAC<)fJZ^ zKtf9|2SPnm_N{8tpC&LM=xJR9!oL2r4Ac^Ehq0ep?=7I$OZyE-i%0dqH;*lN zU(Zm^+M9^F6sVi3>#|=V=qZ6nuujoCO3)JpU0-*%(6^zIbh9M?ynT!fgPcqjL!i`e z@I)mm5$>Eb(xb0bk&_@ee-`rc5g9X?j5tDmhNVfuzfL*^k2qE$Dhd%%lMpN9{(+!J z0Q7(YFb>*vZqkHhBX1GVmWDSv&P)3@rB9(KC_UZSjc4FJH>(9gH|y9vXuHP57`=lqUMlvz?n!;74!AFuS0jM97RjJ8^ zm>W$ci3QSHjcFfrg9uR<9ENou+n=Yv4G|9Y>29;2(1#|+)8HLvi?kE)$NZu&&gBTG z?DAidbt&Nf8)~S>WBzMnqb*yrTjtbHo2Ii<)s(v?7yZHf05Z4abC_R!o4AOMLCaF> z$}LR%=mP;FusSTT*HT<#th}J`=0V&x!Ig)>Jtgu?4)_VP)6S{j*C-NBAFwex?l5vc zrP9XWlPU>a>*-B<+{3@q+ce|veK{TjqP$Z$UD1kkrwChj>&c8 z$X0hZ36Kz_y;!BsKpKS}WC?pbC7pMpcw$cllfa8C#KM6cikri>Q}8qh1reSj!T4B8 zSHGX?13a?kq|}cxVQpqFihp@NqQ8+1v~sqKH&a#<``ubnNk+H1y5&Q>-KS^&Qy3X3 zLn^F}2+a2@Sxb2Y`#C8$08G~;e}wasq5FL!Qpzq}Ho~`Ail(+4tL-{wSP74y!Es*6 zCP1DnM3l^GI_D+z)9h-2Uy+!9<;0bg@JMu96s6(4{paD$@cXsCEuNx=#$qAzL7|2r zrp1A#om^vXjlvX?oF@n}WK|xFtBuN=iJso5O(Bn*%)%Sb1(ZRcFK;FqT}D1J6q0L@ zesXC=h`4BVi;T0o`H9a{1vF$C53qtj`ocrz*%*%#o}aR8w%-cuq!dB#Y$_ohD{-Ie zT&x0nmcmta5v&?rtQT}|6wkrLtExg=KXZSUlv(DncWO0D4@cqm1Wv%@@Rf|Yxbq2{DbLgx!skl z%W6KRf8`4Kh$1ol#k($kz%d4(B>L!Dp4jm#C6eoF+IrYL#C}oCZkC%b z)Co~?iszxvCe=`2E}Onr42$rAaPQ>1E?qPO9v8EaXX~K7t2mN?CZ2%e-9=-B{b5RWHrWjW<2E8YzO7%hvQBW7xa%cL3LH^Xl*d?+l7BvqJ(I zUISQy!D+&SzTlJaL-PZk*9q^uLwl~ZM$_^pP~GBA!dU-|crN~TroNK!{oi6#XOF`7 z%CbQpQ+D1JHDs|N_xzEfT;F!Hm*=GgFpStRi||+`31L4%k&UN7t}}SMaH-G+(B`+E zo@DtuTh)wXga3M~JM`N!?rAnBA*zseF~58f&M)WK(HmW$9a3a*`K^KdK{j)z9|!Nq z3uB4Yp9I^Ju`8Uv5wjm`_e(#G(HSBppGF=IG$I*|{ckYRk1mBx5Gk$VFj}hPq~~if ztwZE>;25xjw+*)bypX}I*~Zv=Tf7JfDK&`n@&k#Pqm0C_uCo6|EG|EE9{(lv%;Hy& z4F4({rtN$6@2d~W&^+@rt*ogqZ!WEOK z;eIov5v-*8S#6z0T*A&iF9}0Be@{Z-FImF~aT;o&E5Fau0T=8pu`y!cQ+%TV_5;Vc zoimVrl2~-<5IK`HveT7)gH4Pi)nALm(_A)0bk^mp8IfwM67}O@SmGOMgt6aD63BgH z^+9Nz-!3=L=I!mI_^d(@>`E9|+N*onhFsd}4Q3Cug^w@G2f*@|5&gIr=Cgpg69oV4 zCZpI{*x!ugT;~Mx0y7%pdU<$~Ix$8dc{$65hn_#N&oO2~+)_zRp*+ zyW{(*3};+?yxm5kpAvkv>QppbeC>#ChAgCv{NgST2af5`mz~sWpP#?b>UhT4N8km` zg2j=VPxDm$iEH}GZWeMN^Lrj=;eJPoDx!WfbT~90Q)ls8a@kH#;y6!Oi}ogUHkBW_ zoYuQuBLqF#j!RgXVSH%rjDwk7iy@{_B%|OHhktH)a2e6LyQZq3(`iI60Y1qe#CnB_ zb}#Sx*51I%AurJvM!}GK#+O>}er=UnUCXb9x)auY`1ac+N^9{Ofv%#zhaGNmpAjgj6A9jgzQ0@p{(N{GR{ zwQk`Lx$E9t1t2@_kIlbKDe}sBslP{FY>rUgheMb>+d8biz`y6$gOkO9ZW6Q|qn;%* zOfHTyyrXeb3f*m@iQGbBsvn07Fm)&n?t1QevQSD;=8o>0+N}VXB=U&4r~9_q$~)kM z_dkv<=CEWBOfXAlpx%FA`RLrOfBiJgwD$i0rbhzSc`V+(36+4A|JBSK2%@*EsfVDN z(i*n-;sE^{spU;?y;2Ui?K>&9>0xX{`^H~%>eP3{lczeg+}5c&^X%bo&PMD4kd5}B zEqG4Ow8n1kKxCR<#>MI6B-U9u zisywxk<*OzX20)g`x)QwHy*93yGW=NgUZeUBRg{Yv##kF!ek%EEiEaJ6}la>Aus)} zqIswAKIN95*2qWu*_tYxp;oks_C{9g+v>_@)KmYj%$+n7G>TQj zpM*|POX?4xjoTlRAM+d8ksa{S1nqa zX5wvXo3+dS(dhkCCVxv?imwGz^wS|`KmBRKrR)Czl>}(RWRtB_(!|~23|WUL@x88@?w+{17IPyu}h_4Jsaeq|NeABPaq^b z*FGV^6Iy6c#%RX?+ao3er1Z@NaH$Ll{BC__)f}Z%!=KO3Sxz-Pibg+KBIl2@ z0n|;KbB0cMrjDXj(Kc3IN>$Arwq?GOB_C^$<`gO-mP7-L;DH+Uk}QuM3H@QLp!v3G zIXxw`iN9a$r&*nciX%{J?IN8K559KUi(v5g*P z+OU337~RfeCakd^z1f12luH04WaXKN;&}bUt}g}-j{T6k2SL4$AP{_N7IAv;v-j%f zg&P7L#eBKBPb%@~?xsVLGu;!dTG4eYgAU_(Ycbyx(?i?pKRUgnfn3HFd@!tbwHp?o z`}S58`iRf_2~QjKDw16stO)v$A@*kMJsn=uCSu$*jXJ3?KCEAIk+$G6#T zul5BJl?wjLaJHWO(65ltd$E0T^D>FOQGg0AB`J9hQtb35hIqFKkE)S1uU8Z|m_a;* z(Cf!jyLvL^%9*QCq-UwaJ2F%6Cd{-H>w_f1vwu*U&(5bSnRFizK1+>e(v&~zp1Jdl zi!u%B(P2sc5rsP15cu=HWY_YVzMEZnBso2XL9FV;%eyQAt*`9lyPw|%5RO#`V2JE$ z%4gm&zd6=q8uWY`vJWkEXVTOzRn*|QC9PZW4Vk+`iHIme@NxjYbvd_g{2F|sA2-uQ zIlEjKAm;?>@xyb0f`Xs$gDe(3wmNrz(XBot!vErKcrY9^mbxF$DOHfc3!eciVl2KPaD#@KV@su)7yQYW+| zYW{v6b}AaR$#8D)j;c8e{IiQdB>^n@J`S#4E?Brm1(Q4CTv|q3z-?OL&>v5t zz=N*>rGWm-@c|iAdJf*e%uUH`0&~SY%0IWerC)1j4&!oiQ9$|U0g{U^`hK4#958r* z6M6ycginJmZzr>Wt65eGH;W2Xu*UD)%O>6}l#&}3R)L3F_De&*kd+?6SuLuCoHhphO`=2!6)Z)f~#<5}AKrtvBDJsH{O)Qxmu z-6?oJZ0Uzo*4~SW5vq2nnR9bmrX6em){xI>5wA=yk#tJenH9guao4PO-dQ!EJoWTg zX@ffvvZ7;Q1V-{US;q*xG@mRm6F^|0uCj3quuuFuk^IpR z3^9)V5j}Hm6;COEH_F1& zDCC!YISLM0B+QcDxCE8gv7Vl;9?B6zthB#_VZyR)K`NZuLyV=$+>EYH)frWVgOoAq z2W{1VZmfdM%u_RU$1FbCvphvzE69RDY62pX~ijSrg=DuQ=Fm+D)gu z8jrD+al?!^{1+S%>}DsPnqKx>7LV)zI zvL}QCCvI-7aNJK=Vk}%Byv0Hs182lB@i36J)N7N{WG5RC+v*d!E`-LHCVn1>v|1Z=qMGZP*xHwpS3z4S$dQV1x$vuI zhznHD%MA!?^DLi%I%cEq{;gz=g)T{EZe{~G|HTZXqhQw|K{{IxR4L)~!L_k^cvi*i zKMQQ*zaDvT98N{+N87kf-<+U|gGPl6dKjNOOoQN@2BEbSY5|h#-QnY*Hw9o4!Ut*K zh-*4ejX>?k&x&nJxNq$P|DK~{c>Bx_;>-)TCUl@SnF(5JnnO6X>6e&d0|Hdqt}qEy zyy-6(Vx^Y@40*B0vpAp>MSHnm=H5C3ztGfvE`6wjD7^G4WvVi~eJc|w;dO}P z^pW^LQ)c>g>j7_TWNNDI%4*lJxfp0r4jpRT4r<=2JQXYtENF?vUrGQXQd$y zV90)_!|&sMK(6FMGs1Uk2mVmcw~+K2j+F=VGmRj_XmsVO!t>hiTavt;(+O0KhL|E) zx>4ac!j)|-avH(T?Z3FGOXIUXz?@*9hTej?l<$(^a zx`4}$=Dqhs9;9O#!`XC)jqst=*GXf84LWVh02c~#$&|~JrcZdq7Ns1Onr(+9Uj#wC zZIyc3EF<;)Ua)YJAV z3NE|ZyixM5Tz_l1Stlp<+j2h;o+r`DrZZfIbOnXV@Gm)565_i9D}T&RnZ+9S>=Amm zzN5>DfU1=wbgc#^oQ}V+oKi8cOFVjunOD%?VltAwtjHQeU=oo$^*D$lDPxkgm znJQ->l%fGJvZ|v}Dg)s6>FMRmtX3_dm3L?|N%p+GogZ1^-!<`0kimRK-PyvrHmUdF zQ6vyB@ZvERKc1x7o#Zc4&qx%@vxjG9>Nw8+#3&v-yXwQ~9Yy+LY3n`3Yp$2|(cQvMu9Bf{BR#T8;#jPVJ}t0!BWb>o*Djd$ zxmg_BPK3gEHVnN+PaMR*x^O-8QdL~q+nj(MI_~{RHTBFC5#>pwNA;o+wpQx4lF*un zaB{BdZ$5i#$PqR=O>GbU#k6zCe@&kI-M+EE%mjCGlhtp=fgU)*m$=^(d&fv|pNc=)6r@chnv^TGr9brOW6Mjf35 zQF@j7hkc{cfkHBvVfhmG$I*M<>C1#&W`lz_y9M7poK?v?{F-m1p$)5pA`@N2obY zV#d+Kq2m#rx8=6%<5>4t6i!!*sWHpOVfys@L~cE;AiF^H!^@mtt~k5(6nWqlkS~$i zhh{)|a46`5sLF>4w?uKi)3z%d@+VIymAN-Tbz#ZX`|B~S?d|lVE^VVUAG)EY+MK7< zepBH^b7bF(rlztgVCk!X+&TT2^%74>v*gBQz*28dp{TlytA<9}GiQv5emQxoy&mR7 z_7A<~L}IZh1B&&{+h3D_;ANyY%)RIyFAH>c7y58jULUl0V5e|cszqy8F(rRS7*f|+ zc+6Nlu1;X4y!1p8)E&IT{%W3C-F|BtRc`s6pOUf8iBt>JJi#+=f15SmjvrD=Q(Ew3 z8?VX|g+P4mQ&b~g?_>O#@}QSySGYJ)n&QCR-h#XKYpcmp!*z4rx3}>cOxgu_;*PV_cOsUATUriuj<`58! zF8W2hjInf>us)B?#%~I6LM2knvKUojd-RGvpvKi_x3a%#rXFYp;X5VyJYDsoJvuu# z@!BN_VIxd>@0mPzTbz=0Y%RE$d{7_{6_BnRY*@*G2!LKQtbWo7SCe^isc96RAtP%8 z1E8`>`C8?bwmGEJxJh%tNLr|{S~0syh~G{|?+j?p=o~b~@b3h%Xwfn(bzAhZB^`cn z5g5HwR&-~zg?x3?$%-A7rp))Xn7?C)&u2djqyPN?`Eo-c0P0;omH~f5|B#BF8Tpyb zdlBc$ZrGC=DOe}_2|*$En%TU>b&cgW=Ed*}ouT8L8ZN7mnquFW;S6or_}2p1pHmvZ zzaLGHrA1XEm4)~devokN7Bkh7k{=?@q1<5tf2n#ix2ZQ$goK7!-Vw*fKmfM0S2&KD zSlJWLJf;BgHzhoPM#VSGw%FMHFeSI&-Ln7!9hJFf&xNfDft(rJFu>DwmoUml07;!? z^`MHpzd{LX&#gA6Pz;J{BdmhE@}=B2B~?pe(e98Zi`L8XSZzTSAzgij!`T3g(Y)SU zyTq@QBf9TcZ8igid~3))xXgHd9AL~7>bT(e!o~>0V7m>Q8>Mr=cjJG#4WaM)2JsMcJ*Y2qDZ*cjTc)=ue zJ=Zd)4c3f%*awm=1hK7E)eaw-qw*PQmQ$}`RD5rw%zboGcp%-gxyY6BO=L8v2eHIG z0$zR2!o^rJ(U1>DUxM%G2G$d#zn)2}ENGPlCx7omPEoSiS)f-cOm~nk_!VxkmL0)E zpMHo+YoKm-JPYu6*sl_@b*=p`9!WUtpCb5&=GQC8hMkgJ2~B*I8j>R|2}W_8rjSWb zcS3}GSm}?S5ILAy9lGm@BR9!{-k~%Gu2RGyyOU>67jQmmQU~_j#8yJtH>Nh0=fH*q zTn__I18b$oWTH49vh6lWE$)zpbd^MfU)!x7y@+}4`|o(DEd_?(Jh%oPpfy~{#=JVoaM3Hf+MT{F`i&X>9lfHR6V7wjM3q;PVzp634MYp$Cc@06tD8do^ z+y%Hx)S@ShO!EbHD+Etc=b*ZhE8SP=Nz$1cAEqjnfZo{3J$JPrO0*hCGz}$LeXs1$ zRwvTnV(c^JGW;+94Cj@}HKa)B#%DMZEh{u1#2}62!U!&yh%T5ru321NZ&A*37k`2a; zGziI#0yLShaY+-az@~Rm%+Y(54^`b4HN_4|Y|q=? zDiP=hXbE(cWH($4MRJ~j^K5! zBlQ};V=LwL*>Er|gx1<dFp{%gITlnlb{~RvVK` zal0m7FkAIkzy?i5$Iya=$Rl+^{D%L2k>*pblcETZqmG{ncQ-!=B{=?iV)X+v2F~&E zai#YXeX+udas9oeH_NiWB2sd-x7}=_XTyoYG_jNP3sL=y8%h)tkW5p9L9&kSq zwsn3}k;T}U4a$0Tb#LPhY&0CN_#447>Se zNRbQu!m8K1g5$EKDwo>l{$IqA17rwoqVY$)_VHp$v#$|G&~vCx z+((S8Z<9Ur0Z$*2y?>J%UR2t|?i6Czi^{bo+$Jxp=NZ>uq1Q{W@BJk?G5cw}Zq;{g zA98j@0l1{dsz;pEcgn?EJzg)PWY6_8!n(L4EFO&a@uiV1L>6Rb+;l#Z&C?0ec_8T| z2ml{1mE`fp)Rg;O?^^Pp;BGN2oSJrTSc^SIM$Lw-vW_K#vetU1htZhRETY}rBLSK&3TvLV!sav?~V^>OzGk4KtSl>TX z#5u=4)KqT&NtG*+gUx>QdC)3^&hTb=6Ie-@nlHyL(gfaG+;k_~@-<;b`JlLLec{3m zHpZ>jQ(gsuOC*C&y5bvz#BNGqlBk_OE5`>?0K&Wx&_CVW3fa-d-3@|wQ$!RlXGF{_ zkO!JqUsVbVx`&cT!IBT2AmkB@q)Nz=dN(zj{v1F`4HU<1=$uY4{;D+l4QJB!`@pXa zZ-sVvYw>rg&%+yJqXH*|guXh}RKARmI6bH6Dwp6UVZ)G)vZ zm$y}}+U`K7cwUdEujD2J{*0J^oYqJiBVj)Gt_G>XXg<8diyh4E$-ZNLEe#c)cOwBJ z^+wh9O?>`*Jag`&9n8>P%#a~d1FTZrP{Dbuas@+tl&I&`Ji?S_AiSWVFkRt zdJNq<@A!{GI($IIe8tK&=wE~~(~xOYE73PMzLI((q*eWZ$<+ik!uM`}d6E(Tw(zoL zxF_f3vf`AJGn3VK+{wr+b4Bv&0swWf-PbXF7GIVhF1r2}tyzV0<+b~iZ;MjJMqKpm zQ6#9(^<5R)jaL!&0fOu{+~wGGQ!Py!6Tlih)+4rY&G%k-iM9Agb^>u9ET%$@ zLLcJ|6{q6dz@dSE`NSrP3*__mmx^*h%U>{GSKW|y(eF?ce?QJCl2)@vIZ6HO)@^uWpEGBo6um6cVNFBmwUf`_ zGrb$@g>FCZvkV}^OI3f4#czA>s{Dzd{Kha1sl#N-XZx_yI!CY9gEL<=cjh(aqboF+ zeg@;xVaJ9(UBYu}tRieB7KAtjC3d#^d2i&#A~p2}pge~BHPvLu4*Jg~cZ8;pY^U+hp?My+dkW6$d=Qjhuv@sIFGM+B zay{5P;hE)y- z3bnrg{>g_fU(FxgEW`WaBbva=4}-G~y4n*%cI2IApGrqylR3POwE&j$Wpi8JH^WB> zKW|3y!!00X4yp;zX7iAX5qSPHLQ{$YQRlTySD1HB`z;P{RO8D-vq4Z-=E(UI-lY76Zp1TQz|kSR6`51ibohF= zhFkl??_vGJhgo>F>@!aUO){_(X-z5oN)1aIi#Xr=aCukUmmHcuBB3D^1_L$Bv7Eyw zzK6`!@zb6GjFDOV)Q-bHq7>DCNdV+$xkVxSde)0KnA~me|GetWv?36stbadk?n!BN zS#Gso~|Bmbx4 z8B7!NXHM5>oTSU{`AOYKR+Yyh(Qf2LDD$$&x%75f-6LQi;{`OGL<^%c;0f~b>;44u zn`eDr{0|mB$pbu;JpszK*#IOIP8A3H^t=<1BN?9!Yt{@L&{6C`{0LPI#fBm3O>MRz zL(puREg%nIy!1ut8LZ>9CgO%nSY+91OJrEO*W9(I9st&9KL>TsLI`|C#F>}(@Z3@k z)jqC;qe7^~#wPe{%$+OHV8gTUzmRr2=bqQzkQ-I;G3w8=Mc~NK2Vl$oSL=mVe>}Ic zng2K?-#m9B(KvRVy2B(e}pP4aB2 z_%FkoS*f8{x9$Q{*WF6lK?&sD-W3~Ba1B$5N8ZIRjWjqRn>z57f5*5rY~%!t@l0X z^X066`FUDcwo?X}LQ&pwZS=Z2A8V%rOy$4nKg>U*fos>E z$(L(~sjrw$uAs&9+0&op3iD0D;cnW|14~L`RlKBBnzVU4i@#1bP4t^mRsNnUr{Y|b z6|`w%!WrfGG^uw<5M6b`KiAqLzD#;I@$l0hu1n^s0 za(=3yPp9q4WKR*!HKNPK_FYPM5XXau?43%~>l%1`awhOiDU6)(#`)w)EFWC(wf8;0 zMv30g%z20S6hGdgh4lL*@cQ0do+TY(oMjIfxA${qbqyU`|BWT9y(3l+DIUiJubfqW z_xCoX%KLb~_j;QZYNY#Z7vEa{WAbY&SLd4s?LH{WO)a7C{)VCuc4mrz9r2E05ye*V zsPUorkG-h@zHE|^Kg#m9NBXk+G4HdnP5k}4VJdK7jD}4kIFnAi?aC&WGL>L{C%Gynu3O= zykApkFXD1X$lMJeLdG1WzyCIsVw?b8g4RKey{EpeD;CsYkYW>eFP!^J{wdYT+~I zwf}xSWZnmLNB(QG2>p-S$~@uRXq!4z8^l%=dh;*|9bMVpp%<&e+NqA{Jh7#t(aYN$ z^R7`_s!hgIX2aG#If%8vML_asPF*@IQ+h1n-}hkBn;wV>$0Y^&Ns(gQJN)i2L@8N3C<{=`tCTfggg**DZh&?M?T1#slk z4{aZS!;ikYj{I1>a@x;z{cI{!&Z!S6lXmK-x)^}oSGE4N%B(H-Q`D`DH>TIZLh6#> z=#KFvOzlK5J@$7}MCfT>oII#8d6^`vk*mr^q{c4ee0clAcYk|QBe>yNQ1BX>=7Nol zO=<;Wv;V*DD4`!2w@A1O+)wJ-n zh;oOu^*?2=gBy{)8%ISeoT{d3*y~Cun(C)#g=~mpC}u(#(TBh$8w=+UjSEj3s?UZ-IyTPP@0uNJpUKMhf{bXo=F_@+*z!Tu2sK&y_DlF6s0m% zBv(!OCbeAojE0GIsZO-m!kR|KiB80S0NZ=FJM)OG8k5f{g2Vhsyz1Ie$CixwVC1C3 z$yttKKl!U}mldLwOTV&_829gzi&!LAqb!ZLgm-9XmWcll&y@%;is`wAjm-B{VcYrS zz%LWu)$CCmOvNjjXE#O{iD+9-7F)gn0L!2bx7~6;`b7ftZ2(iy)Sm>`oL9C|uMS&T zgxz1+`L9k|wQt9;QvDR?WUtp_>DTEV^nDV==4y;9BqzG(mfiC&7rDw!!&Ey2-^>QF zDdl9C*SxJS>U3l)Ib>uo5T7JfCS@G=R}W7JF!AorR0-|uP{;?xMOIQCSw`1KD!CYTWd@#Z8#C zw3KJU=gpr^ik=LH`vs|9pi_rq1``okAGAmy5r-AH{S+q?M(I6--IUl#0H^> z6pD|k@5x2Jn$4e@O*N!#(ceZhS)$OOv5MT56getJBk1?(=pJaU=^QwkGxr+wLWa11 zvNfL!MCkU$e(g{+#Gc^?9D(1a#&|D6jw-{UcH`GM8u6-(T89_TQ9~3kJ7U7Thcbd9>1ryRS*iswZMkq3aU&Hd(JQHy ziVngB_bE}ByZngt%nIhi>)6*6d=nz2cQ2%catd&!%8 z>!g;kdx$Q5=?VH6wQ4;dn^b!bpW z3*Qc~PngWAlD^<^nr5oPrwUl!V_>M4$D`FY`M08i$#@@H^%Woe=?`Zgnkq(b6Q6|b za8Kp~Iuj1(@93>J=X~0nOx4Nc!R=^c5x~ayZ!6su&qw7Ro%(1~Q9|jBrdwyZwCS&OLgWU1mUA}s{xjP?khiiNbx#V5oj#)0U4;xA|_;5|r zeY)<&^-s#?#9xX{<;KokU}5HkTywbF5^rn$k7LJ{!_S|LUWe@?9KXAY?2?+|Wm-f^ zNY=qwc{a;lM4>6}RtPKlXtd<{TXh}RvAWpJe&z>kggD zS;!Q{`yDH}uj=`3S7_j>lgURC_5_5XBc}XDq_ee*N>7G07OiNGUtN-uv*ECPkS3+b zMLxi>`nati&8$8_kOu82$Ym6(#8!+=8&8LO`T`9 zU!L$0Q87tHXn$p^R(h*;#rxL(@>)@_73*1J?$CA^4Qn0S05)5PorR%6`Pgew%a)d> z9pDp*)#g+D`N+IuiS^CRn?xwkgDPxgEcDJJYO~g;)RphPzS$&`CdzW3RMdJbZs?Jx*k>9&E^AAW%4M zfIn$#Se}3~jww_{iU1l2PZyz)EEAMh@k{R@V zG)=*;V(Bb)UVWDzNO6UnUL;@}))8$U?KFo*IzGSXtI@wXHSHD!%LD#a$4fvn%SUe5 zNla|c-C3WF8wl8As$=ln7Mvzvpkw#u9Oktnw7o@KKI+B$hdE%{t%!OgG`wEIdCA7n zcZxUE*RfEY1V;D3cGyEK3Ufy$Y=CEVxdbq=;EQ0+{fH@F+%PCcDb_CabKAsNsHtu& zSUl2`giMq8Rt^cT7q5e5giADt8p0M|i7B%&AdM-E4cKXalyIX5NCAVhDlkU{coV><&P1?N$C zjm4b^%ZXc)ltX+%E;E!fekoIiPzKno;7=+-aj+k{1E|9)rSanj)_}_KXT}tRIZ`-M z3qcpmJzKq!cx6UKW_~ z-_=RyyVw_(`?ap1$~~5 z9YCHr19T>cs`CCAEdHY@TX#C>nN2uj8)4JKRkzr z@fkjbJr;$!V6rAGn4@vvAnj(H`m3DhUlCdXj6FukX^&zJ{${}K_d=3`SZM>{31~|U z>fhv-*+~1R3ZV_<>$3ALskn*rXCd3RPeCe*EGqTL%Mg-SF@Bm0I<6OAhgA=Q5v4Q1 zagH%aSfDoU*FOgnHpB>cl);2+p zV!C`UYZSDC(~!#rm&vP=RTnmYDR2TWRT=pj_x;fs@`OPrO;e; zq@mI9&_F}F&^@@`+U1$${$p}B26$FlMG-E5ekLK6)xjYkd6=B)T4T~U7Q08QD*(n3 zZ?yMn-_1hB$>OS-&lJ^EoCDger)K@H6uDl~C9(pCJRUAkUo#%pCYOV!m&=5)m-+*n z7kD&TcUa@d&8GM}7Tv$8@S~{$2u)d*-#`0(`rdt=oEy4bym`VSx3``Ztt|YRC$LnP zz`@Nc*_JBCH-W#wlU1-rj8X7KOlvS{-2pXS=*O2?=I*gJpMXU$$zQ)NlHiuxYWxjv zQ<+9MEGSp&gfb>sq_ufLi>yr5lk)@LGuqEpVH%ajW^$Q!_}gZ}-;N`@u~Z-a@VNhW zh?Yi548(3HpL6|Q#_X9STETdiTwE}^k-VV3uTeX%#cZ~%5$cHbk%aCcZU&zikw@uO z2L?LyWE3?Y`3O3H*0r?ov@iLyin2C9U)I>Vd@5&cp6?ji)2?Vr%oqVrCqLwK#9Rl zIzXoX98>c&Rmby5c{Y$$bG*Rv7@j*D{&1*HL>W%I?pq>Uz z*K(IVdV(b|kH)|YuU}fy#_7P?sF>B;gu z^zB{;S$>~m5OXB)oQC$`g7W?y1baVo{3B2tp*88#T>*KbsUYl#3y(moeC58_cm=n8 z)2==mNT6M?dJotl{6v<|$Ob)@t#XETMTCv!`IL-Tz@YBW0TR|M$}gHO|1~|&l976W zlYJCV%+8vNob2zD=m%_`JI9oJWL{LzB%o}RKa9!A3g~)>XTe)8h-lW^F&dSZ`b1=M zaLzOLMwinSo|9>x;`Nnr&iGfG1J`U3mphb@Bv`fAG$2#E&%A!Ss?|C)c;zYbwkW-{ z2uhMvN%B&Eo7wC~f7&QDQG2_wyA=AMQMxfO$witw6nBz~7+-T7=3S>bz*X}Zooy+F z8NXS(Gi=#X1>Kk?3jWD|OKm6lKn74eKKm3lMz?bR0QCyypqz#Y_H3BqKCJX|C}0~S(|mIi_D$O0r2{8E1o%S zLBefCG44RcXWIegs%S!*GF@ZCB!=xV{`B?G68(%=8ZO&VI7U%>F=UXmGey6vJcvpX zTZK@M?J`wKbKfn4>&4YBkg^Kata3F#VO3v@t_2};9e>OSC8G8c1)XhhSle#8(M$nZ z;=pnGt&(p^pM9|pVp>L0+_Ss~)kXV$N%}jxBl~;nvG8tN;TAEO*Lx;xu5jm#B-L*) zjw-P$L4D}{H`yJPeKe9%VhObVI5Kn^T^!@2WN7XCiD;1*IdzWDR~`usT-!)XCyf76 zVJY@M0HZ)$zl1eEq2X0u!1Meonp$nna*b!SOOtB;V}81`1`NJWbdaR2JDva>p}0J| zqyq(yHxFUlF*i!81n{gACjD~ZA8!3)PvQ#{&peA~g%7*D@|7P3PyCdYK3fEu_fuZ~ zY`m<-vS@1ZfRMiDaeedt?>Wnu0DTd_*FS8mftLX1_Oku{L#K%)#=K~9?Wc9{^#}Jq zzFrH*1&@W%sQbNisJLHgKk^!7LVX~s7Y4^MU(BEPC*6zFl~X}Q*)Wmkk)QWJzUH}n ztr@FfUM~&33D7-`G6>_Rw|FFfbZpm*)?1D{r19*8q4tf8_i@!S&__YvnY57<2tu2dBw1r+=>Vx-$6 zDhOyqRBW+FqehL`A|f5d$}VkT>AiPYzW;enn=|*`_ba=Yn1tE=X3m^`&de?Eom;>L zn(%3Oq|#N%E$4~`Lp>NfZ0#I49=BL{jF$6RQ`yX6;h=8nLoNBW?T<)~KU+G395cNV z25>Tl?C?*L=>q5wCp;Ws=!cJnk`~ldNJH!*&fgz6{z8~t%4h*32DKr9VY5YQ>>NQ9 z07?5CFS?*P>HhxEa|%-}jWEkiKzjbI%0st!0vz*Uc_oJ|5~5|Uhkk#+H7O!O0TkVb z0YG2q&Ls1T^KUx7<@bo3Z!EglVrBd7caQ}N@tO9$P@l^JtlbK=tcvebjSB=Fr=-*s zuuX#P0)+@BUt(scbdu{Km>5ns&8(86YXeTKOc7J0gPNRd?6U)b*xFR5iS{~*cl*Z0 z#*59A);?oR8Ue~gahWFm#20h`=Dt!{Ta47}%ET?w7%_IN%A75WTow^T*f)WWM36;u zj>R(T3?k0Td>d&Wa@^pAlgwa@{F_kr0Voeqsm@~xTwYqm>7z-2kp$gc7%v;qm#{7$ zELp!WNB(u>GhOwG9XQE@nPv#EO_QDnt;$aP;e;!0=(NlLQuZVsNhadZJkjFN_?zE7 z`*2i_+@icwIkU>MX60bb zVj5jcT2`EcL;GJ58n6!;%s*A5wQxG<<13mYy6A=5!q18(^8A;c;Du&-Yd(OG`S07P zF{mPyvyDk9iS7z1FKAZT{#e=dWxJIdufM*$>Q(!cefEBJS$dhJoBT6!=3jp~iMXZ~ zs_3JK&nVPGjYoc5ak$Ax^KM^+h`=BHwnoV8fRPu$@n4GmP~-jw9Asi4QAJHJQT!38 zUdWc?3OIcMXZt+=kVOdCryH^_Fvar2)MaT3f)ali9JcRH#Y~|N^H=Q4)K{nF{3pfl ze4ORHSSdc4cw-+piJiI`7kOwy{MG#PC!^%T?b9S+b)|3L<5x9gRQu2>Wg7baT>!^n z^AUv{zOH?GLHqnKG@5kS5}o7EX`qnjA0py2L+#F|%PHGa46dR2-030*Rk z;h{`2ja4=u(V&j{{G%DohwgtMt;Ow=OpNsV_DPf}4!ZmxmJU@iL#ibF6g4$|s%2sR zIsZ^g-%N>Nfr;jLFn;jn_(g<1s43`S$L!NE#)INHe(Des@JgeM4mkTPKAvHp04+li z$BVI1N45_%YrZZHRrMTrcR#M>=6v z_@jMoM29Q}fic_f&Ofk6Y=mip{9#{wu=q#p|7bt&e>@hsn23pT?)m4JQ$GFAWzLmz z%CScsU3S`KH@P@LE45hQnpi;-Lx*&3Nk{yX{6t*Lk{$oZ&G;FFFo}2*zg`WI31$j& zW5G)Rt|jRrt9PZz0<4y2K!-_WQ#Xg=Pg3 zT0sta%~!Z5L{_w~@iR{A5irNlAUQFHp+H%bNdy(q*r%KqgPcBKpMxOh!Y;xVe8OtJ zrB0bUcdnc#OrNn(Iq<-P%FAE&_c1)QtTRgu$`}&VZft*aP~)e8wEdySn1}X1lR5rU z){pp-ZO9Asmws(O??0+pv6QL#K zuiBTsRgzD@5+fZ!GDaPL^ZaAZ{P^eoXi#)hb;z8c9md$PPc-9^aV1MQ3t1q^FM8+` zY1CnG&cBQ`=O1W1EmJjSpZXyxcDPVr$A86Aq3Y#cPS0zU8Jw_X_KBiWvV)&~l1AAa zznzAuJ=DJai+zX#gMWJdL6uU>zVOfn09qCMgg6!?jyV1b(I<%MqF&i6LVIFew z+u|gM&Nvhmgz@qa{*wYjk?So2dnmDy3&dRN7?r)Cl*Kmfjd0F3!7+Ji}Lb-Lkz zxY|N-DmetBxOuRb27Dw*&t5 zbxv%*RX3gw;2f0)SK=*wGq}~57>2|2&wb~#}YG0^M&%!LN@nMC5~Gc`nXJP(+6&F>%6#U?}q zV8wzhG6;$pT~Ck)3fjEyufuj^!zhfK@1yN@DIHzef9@cB4~x+1{)S=ZQA~Cx+^KZ! zvtR89)w|N7@^cD+KIYD%+EQ;|U!gqxI!S@pMkFp#gh98A1E&~aRC!&lD@gn#N|+O(^Tr`Cz0m28m!SHlEoKbO|iDDBc@r>}G<-mBri0E$#BArpq z$O$1+vrirTfFBKqb($&dILOKmUKx)eUmHz2FMJG}1PwUhC|7}2%p`p6lZnNCe!Gm| znvP)VlpLTbN7NCwuxggiIs2S)(n%+l%W%PfuXx&dmz}Xd!N4RqogVC2SCw^ab-H1& z3xLnk8|s}UR0!Zbw7#zZW8*>%TvfP87w-nxIuF{KZzZ%B&z+Gn2E%ydk0MeMUk^17 z%}AMpqrnI@M!@c1W^x<7w!_rv2D{_E1;IgYl{fVU_7!w;!d!%k07e-TlZ_c1D6g7( zO?lNm`;-|tU>|zup=Hm#_OvjdZ%Vl_F-AI$9KB?T`~x2S`y<69Qr)UU_pT?+`$^1E z+Iml%aHo_zktCrbV`>5H-ynS#@cmz+Uxvcu8#wx=W@r2j(QV zPg=`1IUibs-ZSlPBqp09Iw7fL5pH9u{H{5%D#}^B&5WM+hNhhcm6aNyOONrP{)UK- z*=cH~i{go881P40ernbVA5uRrPK}hdk*NLa;l*+X>7dkR}h=mcy=!Eeq$Z z;G7`XNR1Mm(Rr08*Bxn)VuKi&i#YEO;Q%^AQt=%^cA*1za*J6?HL+N3*)JBuz* zB!YrYPJ}@erlTZ`>qqGVn&iF;cuWkXHE9J*gKK zRC#P&owrpXp#f|UWe90FoQwrtBUrtFRs|gLJdChFv=x_dPtb?+*?(nJyk9kxNfk_8 z9j*(KF6QP&Ta|GTP3C;nnuQWJKms)7Bbn?Az8!mEjw+PYk*KKP532L(F|>j+9yGE#?V4kfDV?k` zFFuKHbBq6s&)hFMV%9_M%s`KpprW%?`pVeOK?sQ$0%+m zR)FeWs=EMF%_tN~DgmfA{}xdgB`a*9-|l}r`<{Vpq0*`$LZgheI)qc~B|YMg>99GW zsKf(u&PD&E!jyzAb;;CGc`d?Ib?5k9udAtC8V76r3FsWIw%Z4O6iV<)bWq2s&IxL* zb3XC9xn8S04Z2R}2CeZ{{v5wD*7;Yt7>>v8>YPdn{q;SC(TQZ~pq1HWK@U&?6;oR| zgi5Unt52CztR}tI_eK;YZ3>Db9-)r}`z)-3EkQz*zH=!?>MBs#LQy5Q`PbZ6E$&ga z@6@V8sl=-Rk;>#OB5a8mU;q&zF~SZ^&V`}TxQw=SJkam%d|XIaT`s9G8<8TAd{oIz zTf1?A;>MfGe*5iPqV9nxwIP7zx+7ULZLPY!l@lkT}g_I zoZ1ay4LkCcP$H0RR+^GjPm0bY+)nOV21I1QJ{_}!IW@P`7j_o3;-}cC zN3)uqD&UZ}R6&>}``~nqU#X58(n}6ab8x9*QCe{Vp{nzzvQ;i>^Ws=-rFN9Fcxyx@ z!!R70>hF)f>WWbaHJs1>t9ivw>Gy}Jw#K87w&E1>zO*)<9T$R1L{~>r?qrn{u*&I> zx0KCr=n9}x0}3FR6cBtO2IyORJ=Y8pB)ej+^_;WME+=Av;)=_!C`WUF0^gvR=I?jl z@Pbhz%{m;5HU^2an7rJxIK3lFqm{&-6-72AbV9Xo84sHAet)=a`keJ;IH_ywvlDy< zcwbx8;*?_t1PtYxG#i>h&m-+f2{i#!bc-?pG=f)R0*yenx@WI@sX8af(Uew6B>~^& zKbxiy3v2-#@An6#d`|SvF;xlbzOlLZ1_c)=rs1Q;|L@SZmp%5>1q$E)xmG%P@d96cs!-n_F#7I<%2M(CN3Nm1jz1ZBcembNtd>P^zKzYvwG_!=_h1>U2WQ@SRD)YLVz6<_O z>;R?XO4i6qKYlk>6A_SWRSjzLlG&G(<3DkHxor0AvfU0lmLugG6kSgEV5n-@Ai*d8 z00VGQX=#-aIn`VB#_Nj30tFr{N94LwtAftTmh83EjVv$zR+W`gA*ZnAU-$NJ1PZk5 z5NjS9C0T?);y`~1&N|kvMiuS-cpCZK5 zP)Zz%klZJ?pGZkpu)CPD$>UY>_!#>3Y>OKfj;f+U4FjDpl79e-f{G#y{+Wx$=Io*0 z5gv&UI+czTk>qOiYM&goxCIusm3bZKEwwq!mPNIvLy9wiv(8w`VXT8jNt`O-uAq3A zpy?vTi%+2}=5VwCIL+Le$O=(8a_xGxg#>b~^AGY+()!oN$~ak6Uu^{kq)v6GW=@4` zF6_h^!3jH~Tsdf{a5=a9wHaYiH7&EuRw8*=%@|L(< zTt)j+pmHn3n*K8XD&25Mr=Ij@n#)w2$~UzTS+USSpIhPwS{_XlKIK#p1o-BgVwRSX zN;DH4X!5wl(JQ%v3_*{)V;MdQM>YhFkxTJ2JR1>F*9BFkIwV?dfk{dBPoi>aC^8Mm zo=F1H%ZW0DOa(H*VQ&OZID_A8W-#=!>Mode4*yY5ArTA8n*A-N0!RC zp*fKphnU;-w$TJK{9`cS^J7WoAnYnU#vx5k3--(;NO17rD)>wUY;$6yeafS_l(&LI zhvQBhi|?<5Co}ShB6b*TLl8OQCS%{2MF;1m+wnB2=2V9%2iUSm(iN1N_K5}F;^}j! zls{Dg3DHA7*Pax%V4gHN7a{2J1A$;Xo~*=Ky(P_IZvzoDrO;QsiOi*(!Gm@Cq*uM# z)7ou#ORVQ2&%eG80nIIw&M>(8e5gj(p}g&a{OwG6g$ESQ)`QMXbL{}O!g3v2QDJ2aMj84gYkcV_+BeX zo7U{5xN24Qqe`{Dpa&A?#AJVe^fjg2WukZigp5ZfWLRD0f;#gf*MFL&tXr8~%=+dt`;c%JLbPAM&VY>_^yX8tvHoS&`Pl*Zsp2Hp2RmI{&4{gsM) ztl2l$2F^`Rj>L1CmJMRYkN14ua6muQaaw84RF>-)aJAV^k(VJr50 zh8tyHB_{Ag2hk5W)jmz-lQ~dgiwiC~gufB-yM4A;n16& zn4MX#Fx9F@>LFa4sBP`rJq;8g7jq%p>;v{$S~ZX`TzY0J!ZI=to{oaqu`y|X!F2>hci*zFJVCL% zxsd}KvQLFJ9~-~JkO9eFsId`3f86|c^~bQgZ{E_B*~zGIrA0;$K-d?g84x(QM7P9B z6wA=qC}qOBQ33A$`KKUUan|pE>^3w-^y-iXU~j8wZOWto(Oc2rez6l8RClM_N49e_ z*u_Nbt)LbNIsalyEmEP)^OE?`z)meabVgJk@|+J?v>3lMt*rPGr~s;MhkB$QqIwU~ zNc-esE`*!P0r$3lH`;0YCc#jV0S(Q6a>6OSSqL^|2vYuO%0FO7c)SvZS|&wfJQzHR zgej0x&N=I>a^lHcptzzO!vzYy;%Om#F*NiAAW~DSV(Vj|_4jwOuXeHxg98-+q%u6h zlMYLC0W{+u^B;O*|3iN{40-%xBAEHmIKlx#8(VP0-sFCNq~FCdWT9bvdqY%+M$7`I z?H?0d7Rp3@wqbCf0AwXPNhh5PPcsu6YQ)!UCleLVCn)y80>wfzrj>W#35wnK+yhcf zh{;h$0@)oZ!P49Q(sqVzEEWx2X%_TbG6OecX&4tG00h>}5X|hc0Yc^qRBMh+I;owlUJr;D1e1LFKIB-x(>AZ z)pX(q80UNf=OHsbX)@9AXuHQmBlW$=dRuCU02qv47=o+D8z=`XR`0fEM)XpCWc$zi zKT{-4R+{ltYvVD(LW@rJLXFu(Ur@f8`M3(wm_?bvVb*Ehl4-nLv_5FgAq!y2`KPJa zHq>q&Y4Iq#MV3wp9^h&JGAvA-aQyLQ)~s1&haGn;M;w0SunpJS+y0!;F*Mkn;P(eN zm~c=BCM{638hd7e@Pv{^MSgOnyjqJE2PGJ?`Xx2ztQfWG0@`C8^TAPIar;E&Jh@gG zEOuBJpAgc)PWqKWy28wAyq=PW>}PEel{+sgIYH@;CR{t!R)y$8EyS}#7(fi!Cy$y_ zO;RgpA*Br@Vr&=$>$cdkv>b{UWnXz+qAQ&3yJohCtRrGrHXubdrBB}R?WZi5UY#S* z1VNvxs>}yS3~7>;5Os1*aX|PFTGHr`>r`IBf`A>5)6mMtiGZ`wS6vkDV2lt=@(?&t zB=M7hy8I3{_ElTb>7cZ|Oojwyn_~R5K>M^4ej})QLJynZ?ZLke^*J*ujf1Ll43vks3sjG;xj&Hx( z2}=Uhxn@3C{&fhT6MfpJJj;0VFVBDFcB|TYPie*hq;KDXLBdCE|NZUHgwgo+PaWzz zp&^UJ*V5w7*#sJ3}CP06AF1XY154srjcZ#j8%K6*NDR zU#b!!{XK$`T(A=YiAZw3pd8y6NXfFOA&rjfu7lRt)$C{9UGW6PjW^z0_S^r}Sft3W zcxq;o=%kVwKVbBAet0y_pe=bRqjxRRIy&LQsQA?s>&k67AY%UqSm+v(@tOo1V&I(S zLGi{~m6L-Vh<|8Wg2 zB(2-01r{P%V9oi^OIV?1DN|GTKja6poBxQw^ePWDA#u(t%Jb}UiEQTbPMtXXuwC-#8yOOF=O1Kq6{M@Sq|-rZCu83Qs8|azUc?6=CeFWk z>*{0z@X$T*`y(q8LoS3)*A`G5zbxYOk5VkQ>&Y%d7pFYU~YYIWAJ?N_S~dCML0$lo7e%IAMEb|Ree0_&F{o%`Smfal-Ls_N+_FjyrB&jyM7f6eyL$AT&Pzh_Mg}8Lwc`gLfvPt&YDi zD%B*2(X0^a7FQ7q6zwzud!7MV+9Z4YnWy!zQrxPy+}J1BjQG9Gw#Z^CaR5$4;eQkit9Kew8rL$<*0&~p_EZ%viPn+c8p*rf zl5a5oQ~{dr0S5F%j+502x&d#E;&~6W0cdz2X?CNH{i@laLHj+uX)C6n)4dG@Nt3m& z!m-(#u2!Y@&|>pBm6zJI3e;Z%3e0j>nC^qlVgg{ANNM6G-|6 z@4sq7AAm{Czec%qs(n}HHbJgO=+JLMnlu+liL3g|R@Zk!yGZbhE?4M2p!Ktdx&ha5 za&}h+-J;Kr0JN3u2fsOies6m*k~S{jZIu&u9PN+afgJl2*OTo#F0~PV!c{N~&{3bL z46>hp!SVEVvfH0p34Yh<#-;B&8f%@W+lE7QZNNU0Wt*<6mlk@O{JY*@a)h@=@UFLA z`+`@4^v%z4T^p79g5|aTfSx> za@YhGopIw2*+^D99ewkhDF40OL!aRyU!4@IgFxna=UzLnx8v9!x&2ve^D|IB0+B;^zHO&u-<5mY&A+%&w?(luDf{HK{gp#v!wq>0B0(0i2RZ98A&0d(vGA0#+zaG*%;p0BC}tDV!^4ti4g2b2E-ZFFlV;?K0mh1(TJ zeTX6siHCA=o6CXtRfKJ-PpSxFq0bVf0_U{7LqG|H)WSW)Y2yaKBjgF?# zNA}1iVf>|B2^q-d_X?3w+^)Aq@UFKxe%Rw{KDj_~(#a>6%klBbV~;(y?2JX^NJHqS zORfD@rT5SZsM~0m>&`#u?dogdPj*!YxpgR|woc$8cY#q2-t{(V8P|UBn7;#<35%xmV*G6nKJSI$!a0$RTBqy>_qp7rj|Iy=*pgbvv`jlyt--et%R} z(be-;Em-Z$-i|_pE-d={gFf4q0z3#q8(ygDU5aK%tES>E?_yd3^Z9F5-e^epT1OA+!stO$5@z#aRl|TdNZGT40`44{6BNkeJ zD}|5?xc-jL7#7nZY#n{seyd!1O}^Zm^M$P21%by8i#6P<0H8_Q50zbh+6V{CR>v{# zdOJn?S*)3VZZ=xy^wtR8^;VXleeqq9H3am1enmdRKtErV1jr3o+v%!VBE4YyhmCeV zgYNd7EwwRTt~=4hzMa>{ptv~(7%~C$;~9uqOc*}w6Is;=OGR`D-t`uOv+)gz6HYjx z%#x2}^BWYpK+){~g5TA{I^sxsOCxyW0^YhDTE;(x{og|T1Vx?Lswk_?12-yXl*%%q zrGsd@+LO8^h#poxHyZm;91~cK)GDhPrMHJc8%k~|PSnjeWDdN^NH9ufd1ZQuN+CAz^#1kw(C*YRNVPUXDdHSFmy z#Jg8?)gJc2B3$!npQhyw3}~4~UFf?%)GNBt3g2UoVf(|}1dmljn+#eqvyZ=V^A8Tn zML|rmZ-HPp|CI-NW&CN6{JNEpE%j#>aUKU$)QBtB%hI3g30xh{^ zEG*sQ4{@SjEGi^?foBheNpvLuduD*(xOeT7&kMD8?N^J=rR|XZ99N|$NcJUkqSfWD zi!bD{F{_d-KYKlzA*4Yiv4)ruoz}IlRArthe&@9r5f-`*HB-;H8vCvYv;^vL1Mw5b z(n*yfQD+%Jb`YIzWyy{ioOyHnJ}zN20a>OhuMAF?);jVHiW_dk6BPTE{qc&YW#uE; zZ7kWo>!?~QsP9mf&Ny`Xvy z7=jBj|6T7W`*cif)l{_}z=4VEQ#VUe5dpj19F=l+;&(<66FA}i$9a2m(+Q@JHXHxk zPU0gEOGY*04?>z!%541XH2Sj=<7w!Q8{m^MF=n-|+J&_b!t!b<+09q=bdW)T}N_OP`&K~SaA_mJvNTX0>z1Vg5rwH zFUKpMjww6sv@xo1PDa<&ax={O|CuV|PJTCTC20%tSo(w46_=)SDvrtp)GkyC= z_C*in2&6J(-+k31F3-6~?EfaxKLI`Lbol6A9jgW1|5xP|vb?*UOt9Q!)X4ZVF9hlw zA|WAcJhDy`mGNigQzqkmtla)w`38kPLGgAzL9v&9B-^Zs$wp7dV5cjl1UEp)D7Wu+ z+%Nal+bmO+JJoXTbJNgn-*e|Wyd~S?YF`yb!rgw}Fo@0{vsPk#r|>52m~9E3br!Pf0h{1~1 zr$F5=ig;6H86k8Koo;0jLZV{4<4GBCW&7k}8I^+upHxyNcLhczb0QPSh>}rV`)(ui z^kjh0yjHFXkz32C`JZ`5*>@e@W*NbC5S?zYTrz8RIpGs0luK|!#11=bCr?o1xjcpo zT=E1bBu43API|NsqSFog&HNK=t1F(MP+C__?q_&Flc8L_a-(kd3}&XCgdy4sGuzDY z{Mb+OGBbRtm2%n_zEJ+~hu@cnA9@IiiPsY&kA+uJWg$T>BkFL-#dJRNWQAe!BI7)4 z#?#OdYs@|`{7pC8Xh`9tlbs^?sBIqj`2uHZIZ%<3Za1= z{>eK}Y$<{yK|&iuc%g3$fGk7ss)!UQFj4kxc|>%{MXxL9s_3$P&-o zt0Zlpi$BQ;IEmoAVqTmobkPa2wbKyfylL~m#XL}q1hmMt3@sIbSuVuOq{J|gg)kX$ zFi;;okMse>0sx2TCNE*B=o?v>I_Q540NJ&N8p)CEpIpoZV(0#6Y&!5nfknS}L5K+Y zNNE-w59yN(fd71qRhW1$bjJ2SM9~-8jQOXkxi6xVG{u=W4>0GY4yf6n9&v@0(Zo6> z=z;16Dhefi|0~-+;{3TXo5P70eob3xm+469KcQ8)QC z*;>QG6W^Xvw*J>qgd!$up*t;r}i>LH$q^6&19 zYoG2xe{UXzWz3|B>??1W%l3qn=@~YO0hH%VNvOw(pRahj=_V{t?6*I@L4hYImR%N# z^SERK8J?Vd`cl88Q==Tt8|hsexu0MMy-liJhmuf_6MrKno$fZC3=EBRB3E5uc@pDa zr38Wvb=Sur8_2N94WP!0KiG$2Q3p`FZ6ZRV1Cxg0WN#BvQOIX=jwTGif!f<{vO?pN zBKVl{#Uh7Xztvr z@B{@HC}xyH4n3soxhEDVDp~bPt;nbsVEp6vSJImdO~U?y9)EIx88$BlQ0|n+uiIgr z(;5n>Q_R2X&G8Rz{x&FXe?hjt)+RNwq*&C5qN5JW-~>Fs<1cEmfef3m0hIf!B-Giy zAcEVGJP?!y5*c1)u@qOk3+ZP4N=76mQj=yW8 zr!d*?(FW2bX;GP|C(3^|(X)|>8TS06?M%4zUEL-V zUQ7`GC9`LjkK+l7OYo|v9k<`E9Dd}HQ0YpuO~74UnM_}#gk_1pCqteo#EMpHVy_xalqMC#w+U5DpvfIJ5{**0V8x4gs*ooMEI28nSroZIso_dLdn!9Y_tiWS zrd&}ElOh)aGeP0Q4?nz|`RiYo2OfNoj1jtA(1UMdTwuW9Pz~vlE27ZL_VG9h*l;!8 zuKdwYp1z4~ax;GZ7g@tIOxTd8G^k$q*pg0OT#N$`FZ6SMh&&1SPkmgt;K#<5q`N2S zP!=qcvd^a^kQ=HL4G^S8ePRGHA^tHsAvQq+C?}@~L@uz3CCW_BK4l)0U;x1GW4JIo zozireMx;drgj`TnBcKpx_R*4mnp_a2Lu#Lwk!6~Z54n@^0FMenfd45gx&Sb&k@?T| zH9A#*+RlCCQa;qM@bWcO>2GqhFjeh{<5Rb<^A_GBSn8(pa>18ZU{S_dh|1spBeVS*{{4%$=unec<0zdKK5T5Xd(?oTMgA~TXkcSC=oRr{_*tqs@=8%ARz zxpv855Fp+JxBw}mT8E;~3;%BbF;t^>`$}DQ2y0w-RjT^u5amR(iob9&D zZso>XZYcY{df##Y7Af=zijEZ#Q`rv1tSL>DHkgDLK~(?c^IyFZw^;S;tB;a@W>pd^ z&O*?#jA`RH0dy!$v^jpWS{o5W)w53)jJM2x?zGV5F^rAdHykkpS*AMvP!E~T{8uBQ zPwks#Wv*5h#J<~LOYaBc2Swt~QJF6$CI?%O-*l2&Kps3|yEY=_av^%s_MuO>d7dI5 zwnyE=Sa-_#*MWodc=1!Kq`_^8fF7_Wq!{gTpMLsA?{Ne!$LwLz0~XYYPpP#Q7s(*;3u7e$3j2a3on=)1gQ6yN2Vv&bM$dw% zRGR8;>&e)kd)3ut?|tx!ry0}Bp;(~UYxli${&n-OlhS-MJM(K_ky2`O7&}J*-)bV%n9alng6IY7sP=3uiKXd;G>f`wb&u z8$g_=L}t{z$HBfvm*#(pFMJT!I{wkc@$B;&0k0=aH2=h&dLdx0h^7bD+r@+`Y*3d+-(o_p;;~Fpj!eV?PQs-*LwjNde29YuH z-%@8^I=dYI@#D*-v-t$Y4&{g=4>zwoCh%kb>qk0Dx7O4P)tX_&NIhgfv5uF29N&>0 zVOEI%so6bAwnl-JV-b<2qxf9+$YE3uLfvzOEISHrbT*y7M3;QT;YXA+&iqwbb(Pi1 zrkibER$5^N!~tL25llJ-Emc&BosHZ{i~|B==A}7JmnDrG1lB6#`~fJ+yItj!Q*J7f z9fUk(!HbaS$3-^?@p!``cOpX>cR@r6pInI3KJBvzyC0-XRtRRCDC%J01$Cmu9xoKK z&v>}O5QcrR1v|VzszntEg>>E_2g$B|Y8H}7Nn<_`Ca97c#6orvR2EyfQ=|uspUf~# zf4GxzIbQgNao~g^d6J1ven}d9qiVv`T@xodfTae&s6{ClM2>wT&{98(fRYh=5`ZGe z9#$zNUtp2(=gx_;X5Ru-L)2qxMLoKVpG5jCJ|back-N!>mz)R-aFS69h(3ucx#;GLy$E1!`ulPwQL{26TyQl0bQ^n2l_~i%dzW4IaQA=?$ACL zL2dhwMF&X)^g4_6{Nso)9i~~kzsDZBU+@f-4jO-*`qw9Wj=E} z2n&vA9}|_Q5uJauY4)L;en@5e3LR6!ft4qLez+kkEvrosJ$~A|kmlao9u^ji2gyl&6(vKI0kl75{_x-(ODu>CehtciveRTVm1j{O4~| zR$FbgaqaiBshCK)14PFTwv}?0ApaNT|ItSuEf3)-=||A=iO3nq`}7$z%OZFM;Pjb1 za{Le2|Fo%$UX#<@k=zEG0GS+!$Km)xy>d+N8ThZUPb)H~{s-IV6BH+(cw)I6uXs8d zuXy4|vU&5^M5PJYI{72dd|L4d4)pM`L6BK)wX;`3mJHJ7($6if-8l6q2#_v#%OvHlBziv)@2Hdi? zJ@Wrz`#j%x_~D1}c92Kp73Tc4#4%iW;e~PBn~A(*l8h7ogSct;x4-*cx#F_RWxTi8 zVvF)*-00=6eeujTIc+-5QOeCX-CTZ;HDm^~bm~1+I zIiy5lu_-rh{C(+ROk>hIyju6@BaiCbwjJb5&$+sw%D~SjpB{Y_68P=w*Z-!^n1`VS>?jt{tovjPAluIzjoOg=YOP0UTAW}{)41k?!WK;a@IL# zm+P*(t}LK%<3^@Yr}9O(scU#Ju(shNB2MSk?iB~ zk?c!nl^wTxF`l3}qK-dPH_18BNG>%cEizixPnHawsbtdr2%>8RSuW;9 zw6290mGlCWv${s@laIxBqAisa)nxiyEeW{*Kc4-gjyR(H3Qtfx3p?Kf4%okJw2@xO zQtecf?DGdD_HZ=Cq9%EHn8Zy}c^A6B`pFr&BeF}u zV4|Qgb3+wXtXYzMzW39W?vpdwuP;uc(ddc?987I{iOyeDYvObSUg`cpmd3YlKKDTtfHB*Z-ABdWxN;PQ2{M&8U zUCQ-0-BkA9cRzfGVt;$ZQ?@{urRvU9QnYW_E|Cl@z&|nn^+TyyGWjte(<3Oxg^hr2XO&lW?2;9CR%FgrOOJqF=@>;S1%i^ z|D>|yl1qYv0kO<~{Cqn20sMf!{jNL8l8Y}{mRo)~=<`JVZ61Gkk>sws?<)7-cc0{C zX&gV@W%pdt5|J3TOH52pZ2xhl8(CvgGnVO+Cn)fWr^_$DOuj*}^UhqL(DVN^4L!3q zEKlS=Z=;bM2auj8=3ky^zM=N#Uu@`-JMytCROViVk7VNsifK4jy#osryYm%KD5}Zy zxmuy53iv1TzhL>dM0*p-ohbkJVITg3@Bg4&bL}#c=GiT0#ZXEBV;5dMr7fw6v3*|>Y`msDc^Ts#5xxD;kyUM~oSthb? zYGnK`{_|hTm%jAn@;lr(Mtg5~{p-qex7;eM32zf2wX@9s@mIwE8|EM1N^lwW;isSe zQ+&tuF?o&k!qaAyJ+R%Zvg*o+NH0$Kthi#%oO0p$=a(CAzEP68#1c!EXFl^8g$qJ| zjrbqNjV|B+w|^_Aoch_aFmBM;RDE?&n@!Yj@dCwNgS!`ZXem$%p*Y2%l;U2Tpv8*2 zOQE>7I0X0NF2#dukRXBFyx%u>?%aQ$nPhgKXZM`lbAGai!OwAiKjhVy7FV{bZmzhq zkN5)reur*K;?aXaF*+Vp>vw8D7RMi0Ut#{Vk$|v+V?XXdK;(Hjn@c7k?3_&-fmr@5 zk1i}#PKd0b%OOx82;^pY2d~|kpO7# zz&w>|fm()q#JVhpYQ~3{r?N@m&A&44sD$Hn>IVx$xnxS(5&?AxkPHIYj{x~S`dPyB zDM8E1qEe%6F?7K-ub2xa18;vrkbc>p42dZWWJmR-9e-d%v)^`QReQkwERn?N)~D09 z)f!|S`g_9$7>+p*Xnk+(dMUi8^sj8s>(L!A7LK?)J+F|^Pm2#c*@1LiO*UR1s7gw8 zB!g`EG^-ly488u{jLzoW&NV=;^7Qo-K;Hv6(!GQO5sw@E`+0sfYZ&)Mj?KLMBXXEB zhvjss2xML8QjH#P+24pZC^obo7a4t*?O-gr>hT-uU`%W9l492YQxvxcsPoSq z_7Bp5%g52xdC?2KE=8CuAC~azCTf-~C$_2T%uA!1P_YG3`+PGPtOp%CHd`|8{}sZb zP|thp;2pba@;9~CKjfU(ZrspaANGn$J`jT@M|M>Uxf63@Bv-*zSDGd#1Oz(`hhYk_%$v%`}%&$WYM z9LVbwU|AnZ1~jJ3ckq!uLB=HZ?9;UKCL?FwbLkTk$Rr4!O#B&MFV}P9x(@HAVbU|C z+NU|L)<R`|r@UW`ig?$UTm$n^CKV1{PLaCWjEP zE=0iniH*iU>m-JC=glIov){3itA#(!%cpZGL|#NTF!1>*-wF`;!G{v);Pwo)jU)hF zys;ju&Wq1hBd`B#`we{c?>r5357i%SG*Dssf~-Yy*7{>O$@+}jKXWb00RpcRq2tbh zzjGo9SSI^@_M286^=0p#35u^!-3emj%?AEV;fbvo$ifQ=EM1>+8i>I#?D6dFv?!KOaZxoW!zJ+-8;jpr;)6iK{M@_yO#1xTB9GwldFN1!LB zo_oI0`o(v#6!kXb=P7!kcGoz5d)Zg{G3s_Q>oB zqyz~qp+ubM8(Y%9r4EoiVJBLxA{R#N#Db!KqAk=t-<~2whRpC^?>tS_L>+x5{3o2j z9N*| zicY@kt`XUYV3M!I5&u;B;QeOSAI5iNpY4L>nC_xWpX$Zy`SXgV+S~hXaHP*}E02PF z6|k&#Z{_kL)%5eqok4|Ymsd9)KjRS-%g*h$wQbN*XnlM@RgTxXk2U6*p0QW1C!|Om zaNdOoejMW!+t8}%E1I^tLus9f=1xCt^80xI*ad1`a~&o~nV`wmcj_hRl{#yw{Lshz zf}5wy($FV0_2E0-Y-9dD-?_gOB?b1uEKZ9!0u1Y_m3~})!iX+E2USY#EY}(baao0? zY1ljyrSBZz>U_|Z(gQ!-)Q+KIm|gvWL&wsQ>qs*%&l|mVwQVO(Y79^8pXOP|(}WNj z@VE4nEs(qY94r<1$Ip#cSLxczeEf9Sb+ZTM>DMsI}V%Jw6&`gP7fi!@Nez z?zq>{U%B2I+N6^JWy_6&^`+@TV~pC*8eUVHUq->@_#QVtTJ2{W*xda&e*0s3cS5c* zO>zS9dmmveZ4ImFyhjfY2vsEKu=5bvCa<8?j_Ddz*g!tF*Bdp?-IbzpPlwt#6rA*ft=KR@?%F?EJ_FolA5Go6?FVxaJFV(Z zsq2H2`r(1c*&AH!=E0U0JUX2RN~>uEmWdX13*8z(SbO-QC=IKls+MV;Gloc*&?4k`Jt1 zWHW@b3bFZ%LT^oMZ;Q84x35;67FQXFzn%hoc0WRXm_3yuU()5xE&71}YIDKgw|9bZ zPZkt~@-L(8DxyB#I2C`ad0V7H|0(gXKc7cO>58sv*;9SqaEKD^C1>7SoeS6B&PdgL zT~}OT1a+=Q`%$d7n_FlXlsDl?UoG{o{%o|gKrlDMS1I7e@KMdX9oI@lTbFU~F?dl48aZhz^2{ zXa&PpDD^{QCW=oNNr0eJ!+DGM+plCfP^D#Wm3Q;Wp>P@j(VzlUPSbquysGL*1aS9F~H@U=N$Km9;K_*`bfdw zNF^x0bp4Y?eM7QZ^Nff^j5U`hPYC%ML>A3!xBAb_D6~I3PL~tD<#Wv2ycN*BsJIc} z@+7!c-ONEQr8lF5@53Z-)q4MnW-Qn_z$gh zbJ6&s1J@<9B%O=9x&h~`!&u2ouFrs3#tMQ=<^1mZ6TDs<0R-e9ar})-s+!MMAZLDO z7^iyvp*xH|Tw$`jVeD_5dYP8T#Wr|q6E+pG%|y0#5Z!15(KQhlyD!bc8+$N;Trpdz z9|G6A^*N1WyZ~2BqXCiW)={kK9QIY<@BPwNTfU{GNH!2BvW?GRnqOy&6sML?e9#MSlqOaqmKqv4tK?|EH*ia zW`@gIM%U^3p_D9BXMW(M}8L%CH8`B%@#QTMV?s!HZ|VH zl)s_#dS;CuhALSsrB6CzU`#Og+PoMEPprL_KIkVbKBwx|+fUw&OT-P8tveG^qr774 zI)S$_`8zesfRSdcyRH88?ecg?y|aP+X(KP1ZYM(<4VsvShKi``E8LGSQe`2hUC|FI zlhI^s@f1!Vww*Hh7jo8*zluv6xb$z_Hutd)FDc+sa6>2bk12g_4SdOJhmP(@(=2P;21Ym0D zIRO5hs$pv~@$~8a`fQf*JItD3eCPQQ?Mp++77JCx6@K{A2x5LI|2#IPa<;xJ@Ubu= z?H_DO8wg}E%#A+ke0BJZz!u~+L;aW4YO6# zWX+JuC&FNKq=TstnViP0bL;7z8zni%GrUx8#EW?yx+k_%9$pRc!}8i$3KWTx0-?s% znzW&az2gbpcrSHUXSL;72hlgC_rDs&yZh3A+We32P5r}B<%p{;pTVp;EqiTWeWY01 zn%{Y+i{GoE{Q(z$vy!T&BRlKq0Sb(Ca-WdxIqk9g{e1leBj+bKfxPv#y>R{?oTF-G zur*JwR_YiFukCrQcxk9W+`V^&$06%jmVMR7G!v(Las0=-+2`u!OaFaITC4NHoSg4h zv1DV^_BD)x6S-1YLqnN&djm7pjk!QLN35FI%nMB8$en z^%o)r$hFUwCuH~zP5$X6n6t+{MI1K&*RQEu2C z>VW;c=*dMkxv|bRw}f+F>lQPC9Mf%De?7>PhxSs{s2PC&M$X+w2I{_g+B{|t)-%A~ z=c=qS4TL7=r^N5v$Qlz}nFP^fUhX(<~s8NuhCyzy84Hw%XC zJRyrQ&_`RI901^fkemaK9gQ|iLOM!(#$o<5;yWR`ytO;^Ck4IeVM)UQ^!duSFnlemo7 z(0OLte_hi3&wH1KW)|EWqGflv;}6&9Z62N#tlvFtkWVKz33lshv#AT8-)+tfXFnd{ zbe4E)Q&$tYpZs8;s5>r(2Z710Wo$KdSz|r*cN^9*wH23Y-l<}rcZ4*&AK?Txkd!aG zW|l9pJ3Dv@6fl;v9@(oep}RIT&(|rucM&p5yO?{Ksb=2C-MsL>lf`{mh0g9r%M{-mf;f8%g(n1u3E8|$Mfr*2o;4Z z!~OH3F3mitzG)*6g;^x_zaRC?l&Q|4OFGBcp^sa*;7B^Fj(Q#00C_jyD~n^q-d%Uf z@s)s)TMHeU5k=JB0lkhj-NSDa^I6?CasD{^jdC@TwB`N~Tly$TZ}Pb*_u43kan;&g zaR`iz(8-oeeSGJRy}yVdsE)h;!_ReW zX?oy*YTx;d-Z{6e;IiF+myuMefc#$#Y+LIG(H;7LdW*fkS;w}moU~H?8uAom9AVd# z$1G(6<={GP3`!7m%N#A{lF6Vq9Wngd81r&i@i)A=Rg*LQX5rQMth{h~t1-!GcZ2qfI0 z-~=<*=@PuhD4$~JDge{Xv`qG_bwcc6oseGCp6hO@-?mYUz_zeknKK}h2 zKYG3;1d-fuEkD85BSj2PN7;oFbXR@XHpXUCW+a9$B!&=~=W~`W6?=ezP^*skJFgb0 zt1tJ2H!H}?^|vJ_Bg48)rD&mfC-K+dxTf0V$32_8B4R{Yo`=+=Gu3hvm%*WLw( zWD(3QS{6dC^5xX|f_Pfzh#?7~1k(iey&%&MEf>ucX}>3AD~;VoO1vMozWo)E)opO_ z8P(3yYx-VODtph_qS6}!FY%8BYWg1~Lwd02`HUPp$-u3bPX7B=x|$xuA2*!&A99hs zG=Kx{R$nT=&xi&DaOv~~Op>Kw8sc@E=d+gyA!uPvtrgu=A`Qe1qd=<6kE9tnMd6ZF z?&N~g(*W>Cf>9+3)3EUPF$kH7vA-ec>1MY))9blU3S!yg;9mw!N8EFd2vTs-_k;{- z-92OESNW{#8IkkZUCe8DSfv7P!DlPSg10v5xP)S6&&v2#;hr>&|R7r3(Ja5wh*WEI^YBr8VdhIaal=U%J*ODc`tAD0yl16dPl3=Fkp z95$#P9&xk;us8;L4;i?wX@Y$ZqtRb~jrCHsjS&)dIOuP+{$9v(o_=3HoeUuZx?lJv z{H#N2f@845x`jofjmB2AF~{^ge>0bwC@TsD*EA{_P9{1snXr&z&HtlXreON8jiFya8!)zRR)SED#TE9?i026>nYruEi)WGO#kc`*O#V$T@e7 z>+Cv&vsd$j*33s>n|5t4>VWK>%Iz!T6PYliIFS_r5e#>}V$43(&CofQ`n@iiT`ryA zLbvz_Lvk>Rf*Li$$F*5}(E*78Bkj#ye2TIR(|;I0^%nk%&Bz^$p1&;9gqbLb{Wt5S z(RWfe*(M$qoSH_Jxl@A=hzKt_wirVH)Ga}O$2ARcGtF=&@6*P`R8Sx6;x(PuIMKDoyb! z&GZ|m|B!88!@QnRDzE5j6;d;R&-@TsOnM`6GdG!XHQC3VCu&gJL554p@7*I>z!P?r zr3_J&F=O>4k}VEEBZmP3FP??y_QfkVZ9_{p#h)E$SMzRDN=J)i$Yj3{e3#HS{W#HG zA6u*05hW&JULP3HA#11RzZz2O2uIn@in>Zx7Yq2Z4A?N-nN3DKhXT^fo{!RpmkoRl zHTZk9lcYwIQ~qw<7<|1?2i-^08tM0=j*oe5{B(HuYtuUvq7>sdIsnS#9omSoG2@A$ zm1g5_UTYA)m2GwSJuZ580o#Mp5%6ic6e2o@Xt`^>eFez_CI;2nul8iLWTC*JRGTg7 zm?1~}wRjL502woTZdfyJ;Fw^o>D;64Z$Zy9@KstS0BQ<&5$teI+Shb`a&ow*lXVEd zoA^!NU!N~lcd=>+9K2t0cX@(lUU<41lFsmJufG7BA@;WQ%wd1cbiys@+elwaYSEr(u^&4N&L=SXN1rsG>H11dD7uK&P77z6V zSyf`W5YH|-*(tH6H8T@k0V_98sSonpuawilet*P?G;2l%&yBox%YSzzE&Ubp^xZIG z^!#Aji!5>=PYf1?A;^~Iwu(`Kz~28`o$P+cb1tufX5Phv0z!Hly=hjsHnFwRrhf#y zr`UNap2mu0A%8e=2q{Y~QYOUeZ!66-6l|m&aMpAQ-u6LuHR;GX>gwb5F;7bkmta*f z|VZ%M&pdv zwZi4B#Qh_D`*6+Ye2TRj)Ev+lOq7NqH|vmgc$_!4psT}-BC%OrCrU@rl)EJm`vEyL z?Z+^Vdd>A`2M}X%@cWX=I6k#})E^8ct`L_ll$0zc#X@HYG#j0=4Rj6h&F`e^$$kMh~>{cfmVH9b}nX?+q)~^OO?qex_|8 zB^{)B4dWh%m1mp7D-x0KW{_6SEXe-k{_4DoBS%wj9~Dp-Rs$xw_QwaRbTbo9G#?kTl3m=gP5CsDw8rPh zbM9K#M1AR4J(_R7zT~f&`gi=#2p5zvKUv#S1oX=(0?`#l0r-8a`qYP@s?DS6X%*WoQvBk56K?eis&kZA8yBtQ3iGZB4H2U z69~wRe1PC~$8~WP)&+BJ8Se02M$A8^i-8Yg-PjQW6bL0#_Wqb0wQWvRS;uYy2Gdw6 zRA=zvmwU!X(+;;FfN4&gzUM`GcqqQFQR`8KgtsGFUcns$IY*PXC{#@E8Jgae;=CF1 zOl9-@E>6gwwWnc>kGr<84MlDn&}%u&2K0IFvva&28M8K->IjkTC^||3J$u~mHG{(e zz46-`V`Q!rkRugr@RdNocjFFoIBd<`%V&pXJ+f5!mMdnb7h&V74@n2#v3AF#{zC-9 zptoM|uHijoZC5YOd(v+-)f?-fp@q;zNS2{vB>BDqEXb3WW27OuRV-w4M%!AM>?Pn9 zNqPRy>z{7kZ;1OS$|FGY`Roullp8+l%5!ITKUex}m?ybpSGhmd+gDLpUE@bYGl6y4 zKL`Z*`uZTSWP`@PRPn^JV?9H-&3c)p?Vxj`Bi>K={E>hANcfLynoSMA9a=*swAU3z z*%M6fNA*XQV(n`(jlh?&&MCP3E%M|ccYDAdUs-3d-s zg;g~U&oX&inN04fhYdUMOkc$P)@$|@`n;;!y{v4-r7Vx%e|sKTF~C^z;jNcblgWBk zdjVy)zI1Mw#9+H!ybN+LF6mCjBuiT3jTC$}oslqEI)yF-%Hw;}e0mJ}#qnCN>rBPTk$5n8>EQ8_I#H1R5FZIQUv`p#GGqFyjYfazk6jNjgdzYbF63;tF3KQXbcX^&50 z`m1~J$Lk;Z>lL%HFGAg=M}AaZiN<)DFD~r;5gHJ{;>tgVigCs)SWKWeziA?PwOSiO z#^pxzWD@c8=2O9@wxQI`$>hV>0200k_rIU#pX?TUOylu)i>vi(r5lO|T75*ci*IaV z+%$KdPC;>`ZzSCJJdEc$iv8g&@#%6qEj`)v3qAD_K*Xh|d9zhLlB$@`Qp@3sV!G3R zP79|WO4Wv7|0fjj1l3!ktsloV<*3cxR$c-=o!E@>gD%y4m|QwN7gA}s&p;Q9K9)gZ zW)5xlzmP5E8tEOX$^P+Ixarx>ttI|9V`jW^chg-vha-M2a1(kl+A20}SBs8ci9kT! zP4@3PgDy&NeO-VA=Fl1v3S5xmHd;ES-}NA(dV+FSaAi**UcfU#Lny%FUX!7{j(CPK znt=xEeH)>O^-GP8=Y#MDZW`c`AE|M)Jm!L9@(1;n@UG~wk+ z(!J*fzNO*e;AuscDxGuGPU4ZNj`2HCE|9F2`*}J(Fw8xtzF{w`vJ(C#K8?G!t>MCm zk!CA375?0}XC$pgPC|LSNVxCS^_VM;*h%Ka-3rkDkq*#A?BqW)S+$Xn`7s22;S}v z#UGqqzWb(?BF!B2N0$1jl_aQtp+mNcn|cNf{r95Q(pAxF&QkM+VSRO5LNlMx6qnkV z0gRW(B&lk(i>cB9x$v`#(S&VMOC}FwUw!lk%{I!}-?BpbN#YirH80435j$kS+c{nz zzqZZLv)6tpM{Ep}f0$nLTkszm4;Xfcq&?u_q+_QW6RD%z0wz=ykZ?abn4(A!B^S{< zh;Qgf<2Mh<{N(w{&m-9D-+M7%R=ik|LIq$-?C9TNh9i6EH%rsf%YU>zw(oRf<_$hV ztiJF>6A4swV-t23=yPg_eR4h>!o>KLb~{pTP+?#CQjmv{Sw2;Zi%LEL_w{E6Ihhw7 zLH88yMU&DbVV1dUn0@5+JdSXYAkvtN)-=&y;+N>ag)(2`cccc8mcMx3Z{egfA8cBk z&T`ZI$#n+w322ANl}=Da9|fgZwk8{XTBZ1s(s41OnHTOAS*-U#~NAUcE=vt{u>N!>2&@B5V2qJ9s81lvj;CkUcc1dyb}?EhW@W-%obu zYqcF~tTpOm%~Z!oo3{}B#@XNGOh@*Nd1#9-J+^X^iK5r;z1>i~_2DXj66eBNV)PZo zzSg%>j5sFoz+qTkH`2g9z%{Dst}?A-pEvbJ?Sq*`We3&hV@BcKzptrKSn zS3<@{vuaL5kzF38NUZwn=57<`$GrlVOTD87KkV}p4xC+{5Sf1 z$2fkjh3qZwSO;4s4v-I-oVLlkB2!A5*(PV$Tr69dS$3SXuVad7Z_nLU0C&z!5Q_J(XqrK}wJe_ZepvI@!I6b^t^K8X6 zRjZTb^Ip>myd5)3F?&9-v8K;i_n#eig{@)jV%;Fiuu(MrrZF-51Zm-}&HvW@&P|$@ zeQ8v;4oma-C;(0a$Ee~zH!ghnG*38zY!|9xCI#KxwYN*19j6Iz3Nw9pAVS7vGTRq@ zm%>A-(AX3bIaf<$ks)8}uKwc9#`7%{s{`L&^6VOfN6z%E+a%gwWcE-_g3gQeS;f@$ z;uE}k-~NwybF0CVDvzc#^dj``;&onuH$i^EChB|M4rVAD&)!MnK7C8ZCBsWxRgL`T zxh`&-Tz9?u-u*5Er%Q6j*KLbkO%5$R@4QjBi=g+Dxq1_wn>kQY_`79X3W276=d?%H(nBAEs;vRs z^{7z-|E`_fa~^-z#muhf#$6<2H1pDfXYOKR$Pcrd z3}$<-jsgHg|=VY6XVgw|e+Y<2HvAtnP5erH&>Glv;u!|L$g*!afJdf1Tvi3S|As zmm?w`zq2&+4grgB&O3FI9dpg5YrFhGDo7uatU;Rnd>hJ^bE+ zu={7+qS<+t`M33&Kb?xFnau| z3Z9;hQ`WS=&Y+_E{GY8DL&tsiQm-hA5G=TJ#*`c38;wM-op5VWdXI6sVw@RV1T70~ zU+-p2o-rNIm)-mvbq`F!lU=UI`+aoXEb!|HzGE8IsQJ`(3`P;O+V=eU- zqH%`)tV|S)cg1YyH

e_wpwBe+06$`uzgq^%WLsiBQU`eCUCP3E}0zvnV-kDSNg z+3PAliJX|!om=3&_CGvNL;GRaZq*k9h{DsQtL;9l5OZfqV7jbG}F ztX|lS-&~Sn=lAa7v3K>jKFmL!ac@-IJ&1v$u9{ep4Oab8?!2@9-WT~oPo$>6k<)LX zcw!v2gU1VbVHnupFGm~o@>c-FCBQp%jv0!6BKN%GcVY6o(ZvCwc7LEO#}>oeLA5u4 zgt7HET>Qv{hz(By10P6ygi<#YlQD3lF)j(oMegQKJ}T330v#RddN3h$=ibVOpno?I zvW(KeJ$LH`R*Lk-3(2vciR`k=ZnNWJX#`t0n)^BKHPTT&$gT!dxxtfrO7ct2ixoA7 zjGRy-Vw(6<*-60WEpI{OR4EU1v#Cm@oNyg7bB-c}*Xs{EoA(;35%`wbQ{^6P?M zCJ2*@WWhk-jbU1mVaD8wC{Ilc%H;4U;HDG{G|}PjA#<<@{xEh5)GAc|*(3QPUB8^0n zHI^n|t(T3L?DRp?Bn-aQsOQ$tVU?d6{KGx}F2Ue5cGDEkYCNtdHP}8rP$Y^pAs!Qi(b51oD+?Hno*)#!~4w ze&zZBR(qsmHnh84%9ws5x-&;q__?7V>Ou~JE6lkiEc2oF#m~jv{22uzUDMcjRuOlO zwH5Z5%K~gCl%oATU&r{LiN_xY0LDnG7WHe(HO`|*SjTB5*6NG#9ow&|wG>^I7;4;~ zthK)MdZCUc-g7OKvk~H*R`z#aQW&?NG>k2g8%f)r6er6HenUF-gsl-yHC9Q-DIJ>= zJHZf9?7>`O3^~_?I&pmYuujKtVMMVH{SSN{m6r)b)%g3bp1H6GeFNuHEkR%5wHfY^ zB?T;c@uuZ3Mn4?ePn)dnG#MpEG*BF?pW(JHt6!f@O60$;;l|3n4Km@RwPKioJX7?>TivRfx!B;IY*=3sl4G?BiGFLP42#u$q~asv=xfE_z? zyVcPo`t5FEg;N~=M1US*UfpI$c{$anXN$IeFYtkmu$TuPq_RY2~rOnn?Z7-3T|p3__V&y zH@Nvzw`@!|o!bFp+1O>P=kenHFaJxWvwMoXF<0aJ&CvQ*$QUS1^7(evIgDVnSUnrp z&1yYBzdxcgVlAO3^doz$+mIFQ(QIivIqrOm#m=v_UC#XdJf8=uspE7SF(+gE7(7Mi zhij@<+=(vvBPdf!0CYb;2Cm7Qdc0KsGo{sPNTU^r@LieEYl`$Bscn--1hL`*PX@UT6uNl zGh}y|ReFgXlYpK{$j0wiW866jlS$TIOy*zU>$}LX7IekGkt{Fr$U8eWl+hbDEZ*w7 z{50By0e-72F;@Wc(sJZKTMN9nlA-#U#tA|VHIGzf=yzM99Bq8JDy9em1%18Ebb_A;#8T6#)a3$=^kR^m_uU<{xdp4Iejh6hbRj{ACJ2WZ?UCZ`rrzb%o5 zvk`(_@$@Y-bL3rTIC8=#{1U9YM1Q={F14v~2B14>EQ3E{AWs{VR_6!P&NyF#(z?li zNYS|0cHUC&VMn~Uomv~9Xr8P#{7rtpM!6sIn_xU;w6#u$XLu%U3+Saa}OQSt<#5@k&N`6!#TZ*Pm@cv6^U>}~w zUnRly5G)=1y}GH~&@4t?^t`&nM5)J<-%lBHJVUO8O)X*P*yv;S z1|cp9$}0qre3=wV-ETam$LlUZm_jgEFnOW)ZUVn`_qF=uv^V& zb$>^+`=oyNV?PNnsdePIHa?}bf2Ksu&7}PsM)e!_aX}l(ShuOC&W(I|9RHE{U0BlL z{zA<7q}H9$1^^msKr2_GN87744iv0{|6L^xEN7BULj8`e$fG82nMJ{9IUI<R#{a(EiFPo>r`?hoz z2*qd?5Z}X4E3tO(R}#ZOgX)hs49;yWsuVfR>b0ent8{VF*{|e5Rl+zr zA+Ojf3svreEx}XFga==r?BDxyeR&Yh>Rp7@W{?ZcCRD&*5D;!xzqkqLOcXaEzEJ>* zYCnWkzb_f~-cx$hHc9-;oaqT4^=qn+M#DTcc7KA_NIz>YR<3G=pwAa>{bXqtd;-N( zl(!hZzeaGVK3_12g(<$VhDBfm$G^-`Eh-+Q;Aa@NXebqy0V5PBIf;q=AKrboC(}Sz z7(~Oc7J%@xWZ0!LyugIGCIhU06{GN=zd{MjV}6;Vn;DgNj)h_+PxLgKoR|@1-DS&5 z^;^qFx(J9bb@aj_F%}fW#lhh2|M-G^)6e&l<83=furtFMD>EIGzYu{fVo$)SmZAW; z@kebZR1r56$hHymL0rM&8xrS}fikD2y(4K{npV^Mfz%^4wPc?^`{z=FSsq`O*Twns z-W$ljWw10<8SSA(_l^}6N2^2Ut&4}aCN(jkN^s!)EdC6QQd0d-__e7P#DRLMWyF8pXQIn&|_o;+Cp&-+PgkR^9A*Dhp zqJo6xt>f%DEibw=uoNTTHGsnGGMdC&=fMF|AGergOe0C6_#nlc{?w|tYH{q55D`p>|KEtoXDE~Z@lp+>eM5X}ae^SM*7mr1a z{|y3tjdK3d#F9J+S9~A?>Tms7g=1(UodbxF};ZWmjC37Rwz}5>{VS7}E_1;{% zk_`5#-1vIkTc>Z4p806Uk{H-3{mqx#_~)}v=Z51FaWr4R zPa($fTuq-MbSN zXotLkm3bcpj*Yv$7b+*h-=|!Se z%vF4ojlO4dIL8N)h|z*w8;_8R*}Qbdm3q9cTsJ^5yE*%_`Urq~YK4wlshItl(?Oti znewPATADPZ6`x(ohdQb47bpVduMMUw_brp%X?&fjVn0Lx= z(rf>|n&JAPwgA|w{;SWkXXvrPT1r?Oth*aq+qYAUo#>EZ)y{>th{BJKx6Ox|*rMM~ z#Fuak&m=uhB1P?axs2;jnC%$t3V`PBDjr^bg`w%-*Edg-6|)<;0Z4`|w_;$96dW!A+zJ!?%tB`qS6QO2raq!pur zgk6%W4Pf>p=h1tH{j7cnFfh)zUT|>wsT)&?Gjk3J(x%Jr4GCwXBb;fn0NYUSks1Ny z!Yl~|&{-s;ak#|v3~wKqdw?Oq8pP(JxhfRY@Am;&$% zbWf>7h?*ToJ_J({vjUuyP=|r13z4rF;dp^bM#)%k+CxiYTeUjyiHv*z@HXuCw)Nc` zhnU9@&bk|H00~@pD8{zSc2tF(D{mK5iE6E&Wg$0a7P|cc00s~)JX6H*bzoCgOuyE2 z;QZ2UlU3Ri^vnm{#N)rjKqa`Gb))&Qdw-u=Oq8S%KI2P#Vqo)rT#l@*xYRO4uC1y{ zE~rMNa})gqx*{gYAzVs}8BADzoo0MMGaXdafTkIUczz5$zs+_!h`S^a!@HzEWNmI} zIPx->%yzxP$34nGTVu?<1k~MJyzL4UqQ5zZiSMD;w$HREOe2*Q-wu%Av00%kJv|(s zpT(zwS{f@CB2o77G;M^f9zMY6=UDjq9nn|b9?jLip2HV$+wi|>q}5k9R;zLHcA4_;xhI8`1k zsg?R|oGoNRf$#{Yh$CI4>e-KVvlr`+R+sqa5K*hRsm`JCIs(^(_Uc7Pr-?P6nn!i9 zwqGsN7sPX4@d2{NU)^5iNqSZ+9Tp^>EL8I+tTJh{f#O#4n%g#>@9+kD?c1>E4~}m` z9yHufuonwV*xuyszMBIz+t<2_r|jqT(w_3Jx!jXnDXi-`uI@24(>U49R1Y{3tj~30 zhH=NP3b2dXZl86+OqF$wU~5;-Vvw;BO_Ii3`{jm`wS~W1O;PUFYrWyPZ^v1j$N#h+ zFs)VCaUWGZKh=VE-f=i-p6L8D3C}LD2?Dt+=pJ5CkOiXQ5H(D@2oaA?CY^&Yuf?(u zZ-i;J>gqbrXEt76p#_CseHQQ^d6z%s?4>4>-@0c>{l+5&>I?!Et8~ZAS?+?Ko(UBU z;%(_{D1#y-if3>8Lt7LCX)5yiF)5)bD~^O$!%0EH8(0CNMbTEvADaJ! z%y4rc;gm%3ir!P+B8E6d{pH)X(y}>m(!`6urM$j9x`isbhyT(@m8M%jyGl~R1&}WF zMnouo54#u^j%4%}xbg4b7kuY%fBEH;9D!s^nz@>43{-!m{?B_^H4aEpU`9gEyI1sZ z1EVh2Q0@p6msLmZR!8`lDMsWUmN4bO`%I0Y_}4k;9!vCIkV`>>@|oL~_Xcc-cpO;h zJO`k6lp>@dEMM`ef)mmw^9wxKln?B)jJxx@6t{M+ex?_HH-8Cd1p-zsz6+3sdK>q-XMaTueQuYpQ=`DGz?jHlA)ai#n`_N|epG(0-1e zAhEYhnO2ORl_@;AA(W}C?`FawYEcY${2C-Sh#sU3B~qTbhYq0u~J#!ctlCT*QY(a`q9rSB23A5&|o4+#zrG%6wvu6jf#}ALQ zvt1-bAl*aR7wgiW{(P8|ib`}S>!}y3O_z`4PzOpBrq}Vl^!+Mu#mJ3SG`Nv~%0nzn zt4(QHG$*(E)U^qmTXM?d{|dQ+@f<|qip$bJpwqkwD@4GI;Ww2BxcmGrFu5>19OIw0 zsM+Z0X44peqJ3<^v{VqSy#a#YI$)~vX5uSPbZvhGlMao1ERimi^!0GxnMj%03ONm_ zV`0{@5YMf4Y&T3G3qB!r9y$GQs^A%hy&6M{_%2Q z_uvQk8d!;a77%-9I-vZ_Z#h;#f`2MpHc4PP_sUAIJ@Ef(0URNQO61!1%~jkE>IyS{ z>h?0(;g(gj^~x;4P|DbS-|TjsK)LIOH!uOM@QX$2P9#p)HRMrV3!?fjB`?NgbzBy!@-9$f>cip6+AkNl(k#QFP$lSGvl0S*J2pZiu@Mr3G* z((m>7oz%BeO2}XiR@tUpk5B*nSJ@ZQTA~JWu`hgYKRc+N5ts7aVrJBmFo%Ym-TZaT zscIvgO_LqQ(^{oweu;(fpCWE;99%gq(_3Npk^;$};bmt9t{O-E^w_Nxi%Zic9@F79 zbaRpQHso=L;zal-VY|1T020+|dyEEK)Tn>rOIV;2sTH;i_Ot4%56Sq*Fc z^7k6s41v1GNF{2Z77w|NFCW;WB`fa&*YNAsI% zvyE!)?EsZWstnI|7A+;{ZI-vG?#@LU+tHcnOmY)aa`@^y4;Bb}*<}ve$o^kvN z<+UG|#{y*{Cv)Ym_sAlmqDxCLr4L=+kkF7e7H*eU_LYOmR2~Sw{qh|iyr=hD@aQwQ z;-tr)es((*F|n9D#4Y1pd%=AA&-`_HIgP-koaBalfL5#y#ov-D^Onv7Q_zklOx!_l zdcBu(X{rSyrq*7+776nMYbM@f2I5K--?+4NZgQFD^-3CkN0XBAyA7(^kmsGBILj9^ zCTLV=Z|%t-!TXNGMbE^cT_deykTzrxT~F;f)Qpj+#@zdcIDLl!`}B1O0!3H)AKQ-T zyu;SJ@9oDE?Q0RqY;C{T#lopa+y&!(*3~n_Crn;E@=;jyUkQ`s7(RhVjs7cglH{XD6itG@AMsICOgUBzWEfUcg+MJ9z0!(Zc?G73K%9t6MPV z>v<>~Eu-$c-nz~u z`1i$2D``bIOG=?t1pZf;kYyKAVT!#9yrWUWePaHndyCz3arkMC0dG1m8n*dp0Uk}1qa`HAPVn(Pj1Y!|E z9BB5k)VA{Z32GisM2&_ADhU<@wenaIWW_5L$B~F2Pt7AY z(r0?M{F2O6{Lv|FWj z2tol)+bzd8g>PrgkW3#$34_SlvM42eBdmh~t2Jv@%cMc!wN7Ybg8x{Ka*&?}t1493 z265|?=I7TGYk`>H!uFzz4J*-98ZIV~m!$bVMv6*9!2Kf>2Jw_a3d3;BQ*w+m8M1hB zA_S6!tfr`#mYXkwr7JmoGK~&|Lv{#AnzHq>`>X^)et#4zEckmFg{(Xb+I%MFhnZ$F zBytBZ2I!P{NJffPxuii%eP5Xn5i+4Fl(LV6z7ZrqKo0>ylbBC3@Jt#I*@NiR2F1%T zMy2QDALy-9c9N~ z=(1_PC2N|WD{56`!K64*4rbDkcCpqlWP>9ZVd2oRV~K^VP+?XmKhOl)qcBe#(zA7F ze6n-`umn62Bd!%H3`{y>6HcsvJ6~t=0##!1DKf|#I;NEvN;rt9eDDu%W{F$q9uhRVz|%y=dtW{M38e2}%r4T`o{ zgscW5#KtEO54egaPftSY-&03grma1Uun#m;lu$IuOaYT{2xc2LQ(FNBQv0xanTRsA zhEjowJc*-D`nxBgrDM9)KXWy1zdw;Mz)NQKVGV`Ug$uAjfsew^KL6BJ-oCOd?DOQh zIxA}qAHu8gar{rwg}iCgX8Gtn>+G|!5MR+f{2**lV1uGU1sKV0-wrn`E|blPAukMZ zZ@e}NV{UmdSPAuAVi0m@#e@PFr5lH$Lexw5GoYF z2o;L4eeRsuu1C+FZvNc)FxXtpJ@DWIZhxHad>+Os|1)3!4#;e9HF3+}5ZGOyk!@)m z&Og{W;m9AVu-t(M9t0jQI&M&`Tf4y>ecVwnlC3u=1dtE#Y$+79}w zp@z%Vz=t<)u8hWs#MM~LsRjMm2f|n<4Rikqqu-wbXWsmIqWif$Znd0s)>&BG`NO?| zn;wrm`Y`ILvTNSFxw{D)6zofDpif(F!eG8jSG!If*Z8nQ#n?AgW#AO)J5v8OVUyy< zn{I|}i=zc@12&>&&Y0=OjU6isKEI$1d5dH{=&j$lUQUa0!R4RO8x2)e?kwm4U-jqj zQZJ`8J;hb8wgaja9f6ZgtsC+)G}qQIPM z5lKft>(-K@$q7;XN_<-Sn-yjyJ_3;_Ti%OZA^Z~_np)YIFhZO8boxs*K@>}K`1Dj1 zP9h0}1wmKeS4u0E1tDZa{z`^)qYFg94EQEa1jv&CHz;6e^wCGvNcQc$sV^QI6!<1E zXA~*@|N8wAX&6=WlbjF?0Rj5^-ybPnb__Fag+6iUP*L_O^fjdE+Gc#R2Wpf17Lj(+ zerd4LH(3(;KGSkEYORY@wilfZEE>y5V_i;YScL5&(s=~&LSYcga%6T)(U8(ZhKLYz zv#`JuvYM}A8f?A{Os7jbpuuX6K9e*AwacpPJXuW}6uf=a?OMG-Aw>pa_WQ$Vq%v4q z%8Oy9(?Ko)NpJ#60t3j9%rGMM`(p;f#au$QNOnTs6BHiuAh=;`LP%x~BF5jki9stP zgHC@nnv+)&ON8$WVW>%onM6G6<;D0DJkI-_`{fthpy+;sn>H026llBKVS@r{8HG~% zm&kY&mM!)%%*o8Xj1lVloeH)Bq{|dVl*s#V;g)*>Ja5YZqpN|6-n=lSt*X^I~ zo2g&R0SFpW-v7Y;#8%Ow?-#kxr+n^y!rYQ`kWQUC;AZCGuov~UdlF;zr=Lx6b@$R6 z6pfMp;fE8kFnX)lpWs+N4ECJ30woR=%MxFERaZCpOKwnXl>XPOSyQ>OIePRPsItsMloujv{#Y>4?rC9FUA7U$2TOb+X0 z6`_gc2;7!97)G+OK|v$gTh_Z9Z{SUhGaS?(xxuivv3(`j#lS6$k0*T$o{QYI_)WEH z*}@G1p8=?&YE`j8AvP#)Zm*mZ9@&JsK(a{ptJ4(@01~K0rje!g(S=9wN|{{QG0BNh;xlNBsRK zQil8xAexsU{*E=#XGk`BaX?{!PUTYm8ZYVlLs0_(0HvN)z(tzUHR?W&=!}^&aq znY+&XD65Sf+x_<6-yPcM5S+@Yju$yTII-|-Ge}y2e+DPuKE=zacJ10YZPiYU@t8Em zu_XSnfDR|g#=HCOzhAEB(O`MkYp%x0J)U|_oDmEq(jU4chr9&d2mcO#f~F)&3JVY) zsQ8qwgj1wM44vl1%vCs@_sWYexfk%!%+rrh%7)X9W##rsRV)brvuI8clm zR)j%J8Z!!Y;YX*~{}M?en;?PZ^28la){%cTT&Juei&6os#PUZ73*}(+X^MO0%0jpGms++h6eB)uppbGE zl-Eidc_b-9B$?Ysq6np=(r^YyBI!`!b2A7DM+CK?!E6`Y)~`+31u$rkwH-EX?+&SPDIe$e3(p}5jrbxQX=7#GE+!_@F)qi z&&+tm`5Q>S91;&dIREQO*gz)S1}rqcftv`tqQ4vq43%&zl_&9zZ*m-r7_Ntr?T7I3JrXD4x06i( zs!N)PkNo?v3A+rp8)(e<7N{>e2n*;`iNKo`qDBh8|8h9tNaK=~pyFvc)ERW?dX?j~ z{TkJAgQEAH7^k7)2_Nsz<0Ja}Mc>N@YR68O;LwSwc+yQ4ZVvDRyb8wnwph42A1C&n zedZZAXy9OJG}x|i&_69VELbNSajNoWoFe3DOD?ise#I5;{BzI4hRZg1Pt!>D5Lrmz zg8J2viMIl_K;Cbm@_;G}R$>#NO4X|Ff&2TSTnD;0-+B|O2>RgTc%3^2i}$zmz-@?~ zaT~%ro)F{#4jz&qE&i@we(?p@4~s2pQHMtzbF}Nx{bn4h*;O!D@5K4>#~-ngayJZl zPQe;!DX4Zj5~>gGkwX}~i9r=zJgmdpCdVFo9OgRZ+;=l(Ky}5FSZMi33{AJk4G`$e zmRl>_NO}yXIJbk5N~)o{>BjEDZ~2N9?yQ!raPwlF)YlL!Of^Wk@yZg=E-7%1teU7(_shWan2%v{sjBL)t;h;&MeC#Gp{7??O(BL)~IVwQT&Ye3s zE-tb!_j~*?2Rj)eGs}xpNwVlcTC5`;c3JW3ued?cA2%pQ;s!+x2OAiw;z>KO$gaJV zPgE+>{VUz)sh0sqlE!1Igkm({rb+3$$P)-~_WiN7-)I8-Kk=t5beVTh5KwdD8&zRT zdMa8#R(WDHpiSZpipO+=;x=wjVDXH7IOBvh9{(5LAHuo_|1>ELlYb`e$I>`nYOXvp zfGT;CD+ybNOK~(@_yh$o7doMVluIG3$*%c)(uL`eM&+4ONW%$;4@O#rbPDs56u~d4 z-=;j%a54V!`$Iw4F?emrpqbjC8?EkS%CF(bF*FS*N)vOd1wsB6t~ezTgu8 zDD9C!P<{!@f6bq`8Ip)_U8g(7-EVM%0+;h#Z>WuHjspb*q-d0jo8@D0XyHFFioF~g z61!tlyyL|k+?i*c4RwRKL4h&zwbw@CqlYkgw~m#-;;RCw`5Mi>t1> zO4dLsLq*|R@4W5G;~)hcW#lc%OELD+=riLyP|O<}#0CX7U5>*o#jY^$UAOLDTHkmVa`S*|I9wZ}DlN_> zKEV8_7YuW6!QAIM&}oGYip7hUxZZd4mO~*8pr+~P<;$@~QVX{{FU4(=!`!pDrST-> z+W`j&&O@1aL*aAWIOz%5f5W=XZoBRdm6P|oz3cA{Y5n^T{9$v;eeuOtSi{>0wN%$* zW4ncW7Dmjuf%C`jf4FmS7^Lk*ZCuTodq~?;tt1V5QMC()}ja47X3MU-5P)=ag&+9xmY+v~1ar?%y!5&AHu9HFm^CQT_&n_Dep~ z?+mYN6!Y`5@f4b2 zy25e8%Bav%7aCNAPmaZU7}ITQ3oNY4n5@qDnx9QI zC`%<2e?gh9XsDSwR}6hB7V$!HLv}zGx>LWMiqlp%$WA#=R#7DYPo;CupTcj(PU{w= zSHTBXODt%dhu!mCci9cQuI$D+_Y2eje1ezR_X`(dS9b^5F>la7U!KC!WK?oVIZO@} zjIjyWpt!#;HYhL%Q1|iGS7U<$i}Y-N^0Jp3MU$i$$SY?6P~rxM-9#?{-aK0Y)% zxMPnw4mz957TMhdnmOTP%q%hb^XAQUFpel!7~A1w+=aNVNmnvP{QZR%@W4FzFlf*q zoQ%Nf3^b7}25`{gU`?9%QE%R|2_N01-EQbJywZ8%i6_JlF7z+^ef%B%zR{CcO?+!e zA;QB`D@xSzfQx=jK`8^B_cRiovHSrd&}D!mLeKPA0YKIc@BrpapWp^XHK=F0#GP}_xza-^_PTXzaeDr3 ze5ej`zv1Jb@5K}G;c(fdon1YA(5}Xf#fKhv2sbERms5Z3a9W=q@9&Ly&)xm+f5`&% zey~Y!5A+Fhp0*0}j5l#Al6EhoUeJZceK1yRQ2d0s8y5x^&R^hm*s+GY`%bk%F$EvQ z+}!>0%P*nxcsJax>EO;fv!$G#Wo>)`RSWzeUHro$2jzzIA7k)g9bre)6-A74LJSRk@gwhdh<>92o@qrfo7G;)!h9L-0u!PsG-zj$4(tYr}WM{?IMc`b?enbcPNch zglpaGxpUoybsNFIy3ne)O+}0aFJZCeaV&VOhfRkgMD@ikJMWAVm68pJ?-zWJns%7Af4yvQt6*eB)m4 z=BbAol@4`YSBN?>%ECnkE|x6E0v2zX?24PpRk4x(+wZ@*0a&PlVRTWcMO6_OU)%{z zhFbFPANEsu|HT&(5>iJ1I{dy7ly{~k;2DrL-=|xufyFo$L?HWrJN~GEMh$+D!zi1t zFxgm|roVp@2(!EZ90Ca=2J_Q>9A129W??KQDxR=G!3DD0vAA{m>9|2rnl?ieLY@Yk z&!?pHRHV>m0a#?u#w5u<^2?7n8nndmM+lWbO?ZOy1o%<`Aiz?K1)DD+H; zYZ*PY0PC|tpYeNQiYJqdcv&{t81tjEJoAx*ptOVe z5m+*{d|7xEG)c=ykftZZB-!%J*Zlu4zCRMqF}StgUsfcKW3`-QhF}N?Y>dV455C`T zyzwTn;Y1Avx8ABMjaXj(j(^Ad{{8nqz-`D^Gb` zgbfP3hj`Mz^?B#JvE#;KA^Tq0gl~vV?;CHp!R=MIp4*6p`6r>GXe8i&|NT$bsL?@U zgW|{|kCYpKyg@N}%4Aokc5Tc(u8~6*ix>YOHYS#1KEm~lhcG{)N+@pr!_YYFA$;xb z`S)FJ@uDSer(Je*xAnYD#&6zec=_d*F^2sLTNjO8hmIXw!-E^4TwC1NQ@_E&>mzO! z4o_CX+Eufrr^?v79tQ$O4j<|IKG+vss5EX?9tibB_h2&uHxjVHz|DbCm|MX(xZFOy z0k>GG%IKqyKE(Js(0vP4lmFOff2_xJbjKcdjI3o)9pfS#=714tw*xly_rTapwMp;2 z_W?G9ZpWqp>_T9J;!N0csE7j@*W-5MXPUDZr!eT*u#LNrNo}q642wVl-+CYE*nD2mi>(RAZ}3L7U_;S)O_WYm*d-_W$g0^ zrG1lgku-&XAwL9|N|*G1$-^7U=Km2(LBq3vh~#K0pT;k~?J^j7k%IS+@ejXcA(=cf z1d{Yv8Ww^&Q%r)Q7?PNj-=LXJ_dk4pSjmklZ%*(=5pPm7#U|dZv_X+Nz9%r)+x$ba zOxV0J%*tXsQZg|dkWv0PZdq!C(-6M<@vF3gGc7=dKt4{0>r0(cie^Rj*&bazJpD|d zpiP-Td@G%gsvx9=jF9GOdoLslQSy%sp2Y#=R`_#+V#0)putCunMzSw+4PbOH%hYp< zWJH3%pD{F+Hl!~|HZx4>3d)}fN*w<2_nW3OYH{TIqF9RLGGAF1=Wj@CZH!aFO~s3x z8x;I7*>2lyUGq~sH|dW~uuM*XnqxiaWyO(HJ)yuItv#?X}llXd3Nc><7Xh zaqdbpEKX;!h@x(o4#u+iszHVEQZq818x%MN_|s26VE{TDhJUVg^&4POG2>89Whd_` zvLHwLX@{44hfjdlId1y5Gvvx1amtl1D@LU_F!95g zE^tIXKjYZ{Lj9AEHMA6-a+Y$R!jRG3I3dU~@V3hZ7f`)~FQ*b!Ia#|b80j#b4=o5| z;y#4#$bl#W4LeoE>BfC!07Wt%%oZnkn#|2Kb^e7PNoYBM#Fwk|KZ1h3eP=5ki7E{t&wj!e zKmRBTqsBu3BuP*t>V@K@i2b`1^fU?^!Sk?EPNd?$GzH_JPkhfI!$Ybd$rUQ#qx_5} zeDVuX=4)K~rxAzjrNGpFe}KUAg9sE88+k#AloTZDZ64!Sy3bx0McGr1QV6#MMEPab~~Bp-7oFXMlMKcHAN zb>Ppsj?ttXNc82;fB!~4o{)!&=|-^74-yJ$N&JZi00GwbAFDganU<3k${fk`;e>5| zN;dB6pNZxI6C|eXkNi-~lt1zK`=g-@ae;u3t7m-8w^Zr>hDtwlD@0QM3E8|}*d~G~SP?mlKLN;{ zl^vwaWNwYW0eTU;A500?|7?& zALw^-gW|Bmal>G(7*8CI4|&Q(bq2g;a}d-d&B4O|OW5?~VUltc%0tKWVYt!pAXHM8 z#)tARI9+)GWLzoJufw3Ss2L1FZpMv?*I$1FMl-*MdZqQ~16#x>CiR$C#^M1tP@chU zk;RJ_VdJC<3}+q**^hJILCwG$P>HY!lg2-xE{SuDKKI`z>YP~1S3=h@jahEQtxzuH zwnbg+wBydO!|}GLs9N;HVn<_|v}sYl{yy%-mtVrE%O_+bWY^txk&kw&aQgGlRc^ue z^RbvQ%WVcNHdR>|1f}YS?zm;a)0``CvXrMBSq3ijTmrisC!TbYTZ}kWM@^eHO%9b* zt5OZRq}$-MVq4gVSmWNn2Fn9DbVJ?k_4eKir&({7O&#i_Yl&MX8*rP0{k9wC)J;w} z9_pKBIDG|cIe+0w>gv=P{Pnbv{%V?539bZSxEXtn%o&db8Eu*~mpA7VbC$TJl zmfur}^au(^l^>$w3G;ud2jNNBFppOd|BU6;_Q>N;2IOJ|^^KOke^rJki4YD^1gw&o zVRQsYAAzHQDq;XLI>L|uI_XIH=TWh!=j92|GZ%FA1Y3huhe|>~!lAyHfC#ft4<&v7 z2%`Wh6asI#L2)5eJe5vVJSj;J6MBQj!N)f_;(=(ae`yFK9|RRpEkb5CV}R;YG&qqk zpyu(nh8CbA`~w97go$F{&%jnBP;8(E_IO`p;Ek^o#^(Xp#1aC0NQ01yrzCii5z`ng z;sOiF1CJu#A98~NY6Yj_5FxKn9Nzc{7+9`{4f%CqZ($A&4E%(fl>BC7zu2o@eNj7k z{PD+uYAI3w-S3Gf+z8lG;$g{autC8a6jLy++y-?~zv2+Z_So>|qWk_>?EeJsFK)JT zPC>QdycO7~9~2h$(@ZGmNd@H@rdg1cx&_PufN8cNEzI$Y$#(GZ(Q*3|W`!rX)nq?WB(%0|JnPd^LQmHnZ; zMAcn(=+MEf#N6ji95Q$i^FFqDO&lh;0d0CP=4C%)t&9g=IR0sfI{mFoj3=bNIeWNZ} z9+BZFM+!mi%nP}hOhiiLp8|qJcyH#VlXYHQWFh|E_ecKg=DY7^$_B+OY*g?7&8~~Y>HVi}3Gnt6^1`hm5Oz30)0hdk31_et^7jk?oveTSU^^)&_ zgh>`Nq2x2HQ)sm0r*R~0>26Y}ypE@tTDW}1B9!qr=7>Xdc08@4l)n0w#P05I_yFT+x4wNJ#HIjMT+vI&1{=6Arr_ukM6yoZQH9_wMqZx-|je=!+434dOWo!C-xy6$k) z^dGhSbFTWDC1nY5KuS(S_^6`1_ z!3XIA3jnQbB+D|-Uoam7&jhy=3jo#d!aerb4A_AYjystNR|*5<2AmjJgwqR8 zxQUY{i2=y6<;%&!!GTcwL>m>>(DUanz(T}ie4Ks{yv=eou@MF@_G5nFvOa!9dA|7S zYxf0oLyjK(rhIH3edJNtlsi&R<25|^V6fGWG;iK~H|gV#aH8TzU~Yo(%3bASes63_ znK3C!`O!q2D!`yU6$Ad>_4jfYV}XErj^%d@rZk{F6@w!eK7PVN*f!g4<1TLB4!4vp zgNms&SX3E@1%b_C(D9&y55!^v!y*ZdHhqOFqTIY`r<_k-RC$7F?arasOwmVQ;Ivt)!0LD8gqwg;Ngi=cHJ0(Tv(Kv^x-5o zckUb*&s>R<4cp>+X+3uY^o3W)ZYn<(6l6bD_F`F*Kp9u<|W*KGm)2G5K5u{r~1uDv&`O3yq z@}9kaNhNjp5*xVs{t3^&Zh^`)I&jf!UkY*!Jy9SM0F4)vKR^gBX=Y`pgWBu+Scqmfz|`xe*9hu|Yu6_d5Ii2f$b!<(tP8(WDy*mdQh!-!EL??(6#i zjAVZ9AHIjO`yY&t_$%H181E~TFE6xKtyv>BC|<*Y!M3;+ zz>OttP|Te(2kMmuiFyT=gJVKzOg5-e@nFntYC@IK%dZZFVaF|QyGm7D8?^aJO`5oy zV25EL76HUawro%|7Cv*ZK*|$?WATx_8FQMlrJ-ID?aVT4fodSi{EvP26?|2lhnWIS z3euS7{ZPZi+n*f2xS&OxMwp*LAdKVRiOq&CS9W#hVzGg@+qn2e^$DZj7!BUU!S4L=eQ5XV}qg!KBDi#sZ3QjwP4-?@V^PFs1`x6T$qP(ZpM?BRCU#*OBZ+a zF~>-Kynv0B{1C_z_P#Ax)tcf}0pDTkkvXaK9lvhdL{VHrW}QPA|Oh z5)5YEffJ1Hfj!EM^s=ZQ-kd!V`k*;i_zLq}&dayM_;n7{Z5(>oA@1EV@50dd7&$Oh z4#q3Df!z(N?cz-tZYEX5CS$vH_&sBTghk_m1C5=&hy@^?ZY>YFxe~=PZGmbGwj)nz zQYF=nJMAoO|1#`>@bJuUFba-r+<=9S>f@TA9KA)o6?=gYr$@)%f8z3gTLO)T{w@?( zp9zxCWL!sR9mwyW) zBBE;|2!()5=ax7Y;O!rH?oSSxybQidumpGiv}7gHKX8hJuGR7 z3%JaWnOV}Bo@u&5WW1A52H!zg8s5;l5VxtymMe{egWcRYFz_4KR6KurAaCWh3NlI5 zPoD;wo52tv{5k$S{&+vRvB^2bwb$_mMQi!Czv8mXp(19M+{WVp z1&-P5GiPBTd3 zzMDMxGbuUgaZ{i3BF;Z@pwkfF$UGdo?>_rNKHPf5=Kk|9zJOa4uV9>90eF@TG5~jP zd>_}tTGVK$%G|CJ)hjl0H(_(0-yiF6Q?o0yb4pLz`+qWQX+7lT&6_P{;YQ8j_?Bm1;^qZ!P}JD5hS-syx=6Of z(j`kUAA81)8ufQv`oxAS78}pvMF()W1a|qhpdJP*!f8qc# zCM}qM00($=_u3otoNdG|2njYhzKLv5unzvf+QA5HVD#@d0I(ZfDasDAM!4KL~`X z_Rb@w_(?bk3cSVDPoK!PObHNNx#Yk9jCTMhsQ>*38a^`vLooc~Z`sPv7@g6SNFZXd zyx+ej%QMiIKps;>*(X!l2Cu$KW8`=!hXmqui_q4itOrxhFPFE_A22I2~!^!Ub}CFpnH1*Y
1BauI+$f5lZp>bj>cz;)|zYe342ffj?sZWz2S#pc(s#~mm9 zX$Wy3P7DkgGFS$T)0&^=?!5aB@KM>>Oj|HGFa2>TUK+jJ4Cs(#lbiI>w?0p;a2L@wU) zgZP$PZV@#HPd)WC3?vSB>tIZZFKPJ=qkgx+<_J}Sv_8MJv_Hkv8x+sT=?va|~q_z1Y>QSJ%y$F#~owUV`DuNuoPi>J$THDO@6=gkxS*WB@E-O|2F6VyuwsJE(ByB39%-``hXeT7pT&&lb4O*pBsWfR&PlOUeV z*t>oMELL11w})6PPL_W9Wtm*xoH=tQ7G`kj(i7q-SN4aKv2n$Pf<0@32PP4#Ve?}I zPAu?`YSH*xVEu4XMI%+UFuAy}%?0i>oZ6J(8}AKHY~I5p=W%ROt;9y)CQd}qCbFni zwiGtocv|y9oDyw@jSk)1cpnQM&p-cyY-Z4~X01Kz;1nnHP-1hj@!>e_3iTjd3`yPJ zd;hS6#U>U=*DUzx6u$dvB8%w93^ELGLW(Kkit|TS{3#e`CE@9)ho=MpY=k`Br=A=D zwH`0K%2g{vy-1(ry#@eJgP?|HgQB4Oe0p@gK82h-m_r9F#O0lrm#*31E$m}v2>F1#N6{rp;;&%IGZ0YzRz|Bw;VTcw9FNf< zB_f6p0v3*w(hR82S173kLl7QDfbEa}R^0qNiSv;`RwyK~isGhR7ECeXlO$}TMVSJg zV~&EDr$mB4J`<_xVw@j~c-5dCG18F^UCj1DO^_!ZN|rH$jr=u{BtS3l4}g}9_jwCB z4A8c1JJB;=wOW-3uiZ^x6V57&Cipz^KmxyLlXPXmzKOT4FOsEcZEH-V^e~zJXZ%4k zmbLi%C-l2aSxH}!3zChs_YFV8pJkzwc46~s;pC}7%@-b&5#=;OKF9d$?@tq#;6sx} zsHb5(mwaR@8pq>OQ2SIR7`Q$dV_Ow$s&h<{l7q(@9Qb(gx#!(jZ0>J^8yGEGoaWBP z;x)$;er#|<_7mu_=YsW4*u-z%{B+j_HX^8Jnjfn?Fu?g1(~dssX!pn?4`VD|ip{BN z_^`YJz zqmDYlwZf^yee3TlH$7PURGsxQ3`6pSr8wU&SszpZRlPbW} zH~-HVx1U4ZkHcK*m*qcWv0-}|=g&IxELjAhVe4_@#-g6qVAJAg`H1IXf=@pE7#jpH zx_NWwVS%r#s9)eMnqP3@l14lyPWVtZZFrmH^wUlgl@B@0?Ow%((r2*kwjT4fmN-ez zxgTv*eEQKRZp;Voim_}SMBsdy{e(ANP6r%~baI|!{cj!WABoeEoIkLSbB?rP#R~T>=In1^F=P>Lwa^&o>9C*Cwd*xvD3x7ae$)12 zkl$+I`^Wf$q<=MJemL3&l+x3)O##U;+DZL~wm@F^O%qw1HKvb>mjY_KkoD3Sr19tP zpFD(;1`)TnLW1e@LW%@@AxYGWBBc(3GDBU!^MEOT>hXT`G1{P*<9cI5jHi3$`zWnJ zA)r)5J_KdlNZy&D5JmV?id2S_zc0N3h`ZMt+_D%LV{xD*J@R!^33o@#NxiL zEh-(GmBIShijyuZF!}2rilp?CXJn87fPgh5QUkP=Kj0J!Vra4d132k7L|y#-LU+N1 zZRGuP-L>8D?mov8iuJEdBO__ggiax6O-cF3`Uf3($s}d*y?}gt76>3B1oGz}l!Mvm z7uEz!Vm+JBe7Iu#HBA8vo*qA>OCVeSN||-Tu`yxkaZ?$J@wa?})0Nc!wT=m3OpF1s zatJ zpy}acj02^iu5mWj9|nrO0@}2s-As-rT>qhpq)}KHpECJN{Qhz<0No0Q91p`-JGBoIyV`Z?VvVASeCO_hHJP#) z?+8z~YV}Gt`LoGx91fr^!J0zFZ7X6NYy_2)4YBzC4c0s!ciZC-#Hm=9=#GsVsYz7$ zGOSDRX6cm4Q{;9eZycP1!+TtZ;kpdfR#&OAJ?6#j+~GLL$(rLlg@+-=e((Y2MdPqm z^gZShIAn-J0gZ7e@oa2B?1yy>*&M)z1;=jM_23QA3$Q`MdZTaaGPTT*7c|&YXc53u>HGryB6Ju z{V0d|<7&BG$vN3Km@}+~`lvFvNy>GeGh3bQj>iqxYE|&P0QeO@uW(Oe^Mv(HHSeuk zpD*Q-ULSsZY%SlZ7!;>*_J0*P`CieNZVDwp0vh0%jR*<+KluJAn17Hwt3$lxsX^w# z;TK5utnf@(H81o2VUoBEfq*Z+W#4kbl(5pZ~mGR6Ox|7GInV_Gu7Fl04k9pMJ!K!L|5MoiDrBH{a63 zHOIo}!o}aa{{8#Ak3XJ-Q{#1A#||Cbsrbm^f+%-;MYk{pnp%6-#)8Nha&>O_&{y40 zSWxE&Uf&1qgHGXtar&*S8$TW@p0KFQMQz?VxElOugJLbNv~wYU7zVdLaGh+Yooe7z z;2~~z42bg<&W919&&7CT?OL_5Slt=78k%Aec@utgb8y}4DmgvRzS9tcRzq9^T=DBK zxb`+(c7ACzi3_$gLP_;j3l_}BhRZYfzXaX67b+Dy`8G>=rIoy zbpQZB07*naR7Si$LKezd>pf5>A5Z!SAE;}^aO2_VPgK{$k8+lYWx3+=F8Clk0$1yJ zg90j^uvo#?IIVeech_Ba1wQtMpnUJX^DcJA?*tswB;myUP}Co91B@H@!&ue%Ntg;V6Cl{Y8`V&jT+ zQXjka7sIetGb}`;zyF_q`B|=E4jZl;6la{?5?3v6az89y3Y83PQ4gzdvVCv4xw7)l zl`@$)4kr@Nhfyb(TAg{OOprJ^qcO^X*p!-pg@^JL zD#(qAL(x|jVn81^cC6eII_SUya9#5bw>z#^a`A^JAl|`bY9me*@ESh*)YoumH6fzqKk@C_2d9b;n(K8x(v8#rHu7A}KrGrteH*a6klV z!7D88z3a+4f#^J-;AClOUcxXBMezj@LCus{RzL+qzV*MSo>Uc2l`B`m>5#s1-B{2? zaDtogMw@8}ldicSwozq9I{qcFf)n}sF&(K1eH9#OhG%*7>3n)xMUoWhlAe-5MnNMa zaGrFM0#qalJG&xImzOWm_%OzKzDJ6Ur-URKR6TxS|Mz&-H%QfzVj$p4#d=!4L9%ib z;G)iKvj?(;1tBSDifYqr5J>7P@bEk-f2)>fh>=}6aow)HEK&zb`Q?`k&&>EI%^1W* zSvAbMLP%%sz&2b46(ZsUt}zfYEkI_Z!9PF8BeW;Wpi$NKN6h*SGYwFHjHYD7)=Bd+ zAdF<4WQX}xnpeF48-L!qTZ_XBypFFD6W5j>xKRZoi`W#efRDkl<;!dJ33;t-e#EX_ zyIO7maF(}C#ci-@P*EVbvAzx)5o>U=kcJt{fks7~lBvn=oJFp@_9O$imH$a;Srfn4^@h zP!5MZ){7h*_^D2!EN=1292dN(+Hx&6ZPo*C<9d`0Ii(<9MaZ@dZn%~KZ>wDz)LsjH z(WP*UgncABejw?s6PBOGrFGK<5TQ>Xi0~S0KG06c25cJeR!PO}w0^mWybf|~K;CB5 z^)^^osff*qQaG{5I$VRYv3>Yv;^C3Xn7>ig6&C_1J2zISN{Vg8a`R(zHRipgIQXdmoQ{O=+Y{Tm5GeP`WJY4 zB>V#%<0q(~8!~(X5lg5CX2B&?5wrIn==k5U0LddlvI}#Jp;}0euT~(ojgEhKQ{!Yz z8QjFFETYkP~{_SH}E%K31bsC?pcBy4)LwRT!iBn=McQfw;dMX%VF%r zhq#Q(9M?E!(Q&3M)ID=d*(l==$1HA=uyiULa4_bqTem^x8?@0tRf}93+6Lnf$8#E? z=6J5{4$?v^Ue6)--Ls*6;KAQwaD0uxrZOA zph|KB=3J}U2$+X~HtJ1OaHCCDVt&K}h~>+3o>C#){-pXw)&Vbcl*gQjWftCSON7>g z<~oey8z3_eMwZ1~f@=euv+$4whYB5k4GlL&P&b?-v7T7Z+dvM^O<0R-C?C?w;Xnh| z4}wFxZ-9I}*h^j;H$tY;c~R{+q`^P@{-W|kJyp~Q z;u+gJj2SE=${*S&mCed8V7}N1Zm5%_;!<=Ad;ZA=g{Gu(g|N^Mv6OSBaS1^eSsRWC zZN9>Vrxa8%ttgBcwgfbrPq>g%ls_R=%3+@OF9*abo* z2?C^$eA1D3Oi3j%?wTvW;h|}ILRu+=^Aw)2v@PM2`cg)b8?%+80EU=xjF8G>>eOja z@pL^F{$Ugdi?|Oy^dL^1VS@r4+oUBAzJ#vD>9r}JPr-|8gX?-#SL~#=k_8T)0^_N@ zT6MWW(GjP>n!5!H=euDz0Y<%ayvEOyKs+6`1~1^(p$1?87S{Rk+6JdGuI$3A^yS@n zY!cB11@-E2NBZh(u5tT77EaoqeRd#p3Qoo8ecWnjeV&}&gmMnE0J9U!9cN#O|HTofghoFt12N72g@4V|@ z+MH}UIJQ*|_}mmaAE)a%vEZQj#_OYCENW->eO-b=gxyMtXJI|xc0acP8m{-!P#e>gTX}AH$3_Dvu-?o z2OBX6o{ciMzzN&!Fp$0W`basI&&>uJdZMo9+O_Mrk3Reen-8!-G3^^Hx;Jp`FTNPY zsBnWqCSXugg~9&|T#M}U?|Y%waj{GQsPmdSEO~0PZM!zIQNwnu1)bQJpk8@nrZN^v zwyRtT3;Hj*p}4-e9DI*B>L_<%+cs|h2LHf&Kj&s)0sgrm&!OMpT49aq*lf7aweJXh zjX3eq>(-vQ(etsK%%DuI&pXFe#AX#h7*m{ z!+dGy&Tdzn@ZjWP{0HOZlqvggo3iM?sy~PPq2?ME`#76_ka)Z(et8hXp~}61v|C z9E0njRL{k$wg0^FM)xhQp>l$E<&{^u=C~cSJtjJwXpDdnvf-G3@f2obsKe@VMHigj zI6yYT1`Qe{n=YKJ@wCJR7oLwBCi|hER=^1doRpN_mH)`T8JHV?|HzYp&Tte~ViM$o z!?sKd68aI&XvGvkNgfKp1d}hohJb-52?)w^H5P72fs;e~^wJHAmvIuNGHg)r21R|z zH~L0ReWjUD2CSDw0v4lB=c!m8d@dSO8s=%Ph6!yU^)1F6bJ-lm=!8xI_+O7diX9P+ zp3scR3MNlYOtS))G%VTH=YfYv6m2RYFc}%x5#qpepk%q|~uzeuJeO(C; zNz@#cnbZwGmR4(nwGieMfNX&S12k}VaeKbk+KPIZ$RlE>?1X2U5cm^b>m*7Lsaa1b zkTHO0AVk3^ghN2NGl#sa9`%D?=PO!%Loh$miYZ3R`{+md`vnC7%IA^7CsaHv`7%fs z>p%R0LRka`y!7{x@bC~3BPnIVPB|nA64W@KblM7A@Bi|%iGl*l(|~8MdQAygB<)?u z@o0$S{~s$?it+4UaPxu-;{Uq)PTXv*C5s@_Fy`KS&%I)dk7^F6ZlUR^r{F`J8ys{> zQ=S?`U-iqH67Y!$Yz%51!RYM$Z#0=^*yb}Wxa1uYcp@{vGC&vVD@hO_g-lWvK2ktP zd#w2$AbOZzb8#rm52Ohdcr&8>B!qzsUIWf6X6U$eSh6@=8#73(KVb1WaN1 zq$|eX`jtW|T&#_V`SB+oyT>1S)Xknh*Y)akn`?=MHF;M=qCifl2r3@Pr&M$iG{Om( zZM5ex>Q2 z3(#`da3xm(1LWV~uQW*4oTd~$k?(;cP7G-t;wV4oHso#Nhf-E)48wfprD2PeSpHZj z`4;Ezoj74LfaO;)lJ}qWI-?$al`Va$G*PBR5+r_)18O|1M>TXjL?tSoSXwaE_kyo~ zPe8=g&LK>UuK6iv4Qo!CA0$ZVq_9$&zy14!Xx&p zKp2qxw&lA@djc_)Cb29ezD1Z3oF=AOHfIdT0!s*Wvu4f0!M}H*V)`4mGj5BXfg1!z z9C;+Z`?o-=1B_%p{Gj`G#!R;h);hSha2D39{(t}e@CCF!W@WJ&O3NR@Az;v!0AJeI z-kq8sX!|(@X|vVM#gI=Ca*>AR8yBg6(vY?#PRKzJVUJ)u3_pcajO_PEhLZn{CzKUY1bTM*<~45m#v&rukYET37n}NToEI z#EsRSq9NY$*aBY?O%;CgRlo;W!td49NRg&)O)`Wuq10Pr;3n%ITR(fqXavfHa9)qWWy!X*g-$?Yis5 zFyl@&cEn5lA>5$Q4H}F%VPZ;*TYL0yAL7(Jchoy$5tow`F4oEoibQ{H|ic`<~$U^=rxSG$a=O>)%+&{skYzHSv*rkJ|+c z_`R?p!Oaa;OP4NJ;Whzp1Z;~~PHtD+i4#h*=FE~E_uFs74T@%`Lig)z(E~Vk>{!w1 z*RFkgs0wHmehM_O)b;AFIQh0p7M_OKc3>yv2#AxI0yVsP({pX$^BGud+R}1gL;yIh!es*TRemQQNoQll{QP%<4hYlSo z>WO%bk|*=1>ZuCqmj;cfyOE3b&tPNbYnUM@b z3@)}`0KKAYTk>LCu10%NfT~ri;reGK*%0BakiIa4Nt+nQ9CeJlyvyZo-+lM>DxUNP zMRn*eKd9kBP$#t)s~0CF)k8-v2#~+tUnQGPE6Q{!-leSs1^hb9QZg;F+y-Lde z&+D(p4VLCuB&#ZY{x_8EfxZty#nBi!vDqFA`W>+WLA4faP^u&wHENW53?Fl}&2dPh zL*1iLS21_~JQ(HbEPa&km?7AtYS5s8oKV=3+vGmMh8Y+2d0O#682zMLlZPLD6q`P8 zAg!Ehb5R>H3dvKVR3Y&-HVCL5h&LRLJp4#8aQZ9sIf}tdT=6V})0|ZC!0VlL>()WM zj9hQ!Nm-tFNMB~_-!(O{(KlIyUop-`5oH53RHCU5Oot{-g^%lCW9S~9#zNh`J8jk z5(B3dp$<#FoaE)j7H6ZQBH7;%gqH*uf%6^FLl=ip$RfDN5TXj8q&%n4!4XFr{S|*w zNMR&QxfR_rP%rRA-jh&Q_VP6`*qKy?W%z6Uh8yM*h7vNo3=+>v^ifL{{y}D>N`@9717l?(ES%)=L1%dZ zL6aj80eoiS398`)9}^;3hzJ--z|%F)a7Z2Dz$KN3e5E}Vn1<|r#$qz<5zyE&-}%0| zl}~_A*d~08wfH{WvWvF|a>M%vg5}{@JkVu9SP}vC6K`V>fP5fHmmnVEr{&MCx**l56hz8gS8K&jX~nK2W^P!%G1=HVKgC2M>5 zd9T%j&omgr2~-19Umwf}bM%0v;E3|JXRq6Ox{p5k7&jg_x&8LrPi_{n++R-l3Wq;b zZ9_{OeBcIso!WJL+lrSk@e(BbGi;eH?V8At1O>Zj+!ECfebmSWCPE>pbQO)MR&|AS zWkd|OSlqvNft-GSm{dZO=u$TLN%=D{mQPr70l+5|jca0%0_ElFB~0@RaF9^K`#%H9 z2wf^y1^{^yw?yw}z3@FKwD<^ud32^3iEOa2UnX!w`z!v;r}%mU0{ElkQc*@x@V<6#;V1^xm>8urZi(K{dZ7#f z3jL3|?kS7!1_8#{2H%j3J>uE1lr&6u51jQ1Y3J8JX{UTj3VS%KOGfp*O$2kXQUjR~ z@G#(Si;@c#evj|93$USF7Hamcg|S^6+M_cZ^EEZ5XW|Cr{j6662LPxzF2zWXMmy*ke@t{FSlPez%aa;cq;n+BI7*PDoraQm@vW;v~s1zpCuLCRszW)m>X)!6&9-fNPwWy)`I7vz{fz#g{Qtih_pwuO}Og7X2 zq685Qol(Yw*>k=VZ6LE|&332YaPF`htK|LCWs`JkOakbLQUre&74;evA44+5PUE zIepH|E$^K>Gb)CIC_QpO_^$=XI4|3RDTLGz%`sfyAz3c=T(g+deY*9R5-520-@Y50?P5(_d}05lH@^vA-@gXy6yKBng)=CA zc43;m{(5+c{6?+gM;WeP$mM@7`^}|TD|jxZ)M04@Oz-2%*sC$Mh@S3*_5Jt)nA78a zjCBM*g(aTu!VC(oqx;skVh;+uTs|K3Z+zoCO^M^z_iw}i#VYs$-qYon)$plLellHo z)s=YhzIuB1yWWGZ(6>r&e#>8LEq=BR_ak^QUI_DRea86_ycp-2dzWE@V@_8)21^9( zfn}Dsr^czLou)k-xG6Nhz~}NyiXRMKbtU$ocEue+_3UZM;)b&k-6d0nV1DY<$LV0NBSph zaMVp)R^iUO?!e52SLI|9P{Px8erhciwrY_D^{Dp+_*2U`frW*&H(fY&v^N*JCNBlTJDjGjM*MRzob0 zc;k`S;5k1O!(k2p{1^jT?|t8U5x)6pBk2D;W+_~WjnCilj(6!)de$<_Xb@wC75QL- zUVsYA6@kYU7Qt$@lgz1BoKu0MM=HbmVVYk+Qvsi4c~MbA%v`qT8j_q|Vkt~YyK*!yx?76TrhID_Y{ zSSI6=OD@(R;@|zfnvEl5yNB%dl^^DBH`ewJa>G|jaK!kjKj&NoSe?V z{v|ep;&{!*s`jbDB&DlUZ^!u$w7@A+JXG4~tF(A3V;1-O6$ql`Sb*wb@|To7(cnO( zRmjPXk!N0TVy?f?-u{_ptnObEsUqz_Ra>DbL99$6Yj1IZe<=Q0jIl7zzz>|Kimi~* zYI&mii?^{~OKhp0`@9c_@f`4ulpGHFQ8vsqk3h0M6v7c zyJ@!P?AhyMxd1$ixxr5RyS;8si-vQGm5KL#!ZHI!0)TbYWr^IE9Pl z3Rg%>;UG$n91yBM1yM7XrS6GATWDci4+t!GM5n!&2JLXnGD_8|YjsNMtv zf3E+9_#O3G>_KrI1}KihEbKioYt}YnFD(^SXU$GCeevkb^50|Z`Izyy{$HG8 zYOFRS)Etaf^{I@5cP%nI&h4vQNBhA5%#_-{x?s9k&i%W#)}uXdwelSR_kjE+_PXF6vV8x+WusURE`PZE ztQFEOyX=x){hC)}RSSHa#=cqFpSr3;P5TEk&f$3=6|mAOWW-ul%i!Rvw07wewZHS~ z%o^w?)k=kmbGeS9^vD4b^agB+koBxgi3~7QixsN&-Fq}kBm-`)SHV6djJ39V6dlwp zs6pd{jSmTO3N6?6&)cGmCqov2e=!AazOw`+c_mjIZN%eJzE#a&ry$> zOT@g|Z?QIm;)HY!K6Ksf6}w{(iaJ0+PPfyve^e28DtCv$d0bf`n^qM^#J>(u6d~6Z z5RKwc7*HChL4kpV2B7Jjg)?&$#!TpzyZoixhA4_TtXvn0+TiUT6xbviFU7szNVN+G zI|jx81A~iSJ*A8zik5HzIb8p2APN!HEoU|(u#m61(pyS}s|N)896)&YyWgGnpjZt{ zJaLoke*OP|7y56;TKCsr${aVSe*fR&h5jD9Ytx`FVk4lRVw1uR*54oJlXcH3{S!2@m{#23!oBzx}MxmePN zGbrAs_4@zeL;na}-%)H2I^dvm)SHe<%VHTI56so@>C>3`ap@(O;wAB_SbplS)6N+9 zI1+rf;s-M$@&)XZ!MzH!fe`Bd1O^S>`j)ri<^IL+X=!}X0sc(pZF!EA)DrDrYqEXn`y{2ywqM{Yb#|NLdU`^k2k zjj!wvJn#)^v&}b4XJT2ZFX0P+raiDM)<6BjKlYlGFQR{O;V;s=-}5f@nH6WP1e;&c z5=GoZ@q6F@pL8ddj^a9yE8z>+D&`;SBZ8(6ODo8S6&`aNcBJTdQy#63w^4-PEdap&#g*>2lyvE0yK zYw4h$;Gy6QEGKjY`p^Tb7+A7VSJzMK7^6SC7 zFeBlh0}s-Mjk6H1Q%^e;FX}h2Yc%3?#g$iJ6RXdqi!Qnd8&vASP}Y_3v2fYFWJ`0Rno}j=HH3oRMxp|M=`b|1$>1 zeynw;m%~y}taEu*rXtAk-mwmrq2f%PXS;#kXbDXjU; z`mFivXXB;c8`5^$Zl{N)haY(un`ZqIOF8{_y6lq6(gTk?kQT@C795=6+TWaQvo$vN z;sD<=nC0_5%pCbA>`B6}L7s=%Dkq=(MXdeF^(bq4%Uk{`{TlIM{onDfccy(`y|2zg zF7b5jb=Ttev8(3OxeL{wLw2~ zUOsQ&_s_;P&JbOK)V$NJmo;Cu}xZbCoY}fC5V?&7M6w z9f2>lcgJ!>7ht32&tM~APQ!aH*497x(1X%?>#c_+nd~cle(laL2Diti$0vO1)A({6 z0~FX`_;@Vw#62iDgW_#i;%Or+@$}J;eI$MBo8Qz|^RLJH{YPR3!ixMVJ_m#wULXI7 z9B(y8hE^UqJ~!{37sJq(+B@2yzz$M>Ze(BJ?CmzLrZI$y(v z)l1`r@`pe2VeKREG&X8_#h!a$xt-n8@_aEK;}3Xg?CHRLG`Q@~X_!I5eKMF{`O3Yt z#8bEZAH>GdpFrKd``zzK)s8RPDSh!Qmm6GqZNL5YQ>=L4J|aKFrq-Ng0F7xq$ewoU$?1;U?nnn8c8IzL zH!=VE*T0Isg{7)=(nAis@%+0B^*s@Ge)xkQY9L{E)Zvww&BA0~1YzE0KEv2gHr`}o zZ3f0`!|(85bQ@m8u#eBdK*yY$ZdQBTd++ab>3Hc&UW)xnj>6Q_ji@X=f;}xbljzRd z??7AJo^HNn4mRz+UduIc6RR~=Ujxgn9Ec^OUX8)J3v{#Ko+F%@#ATcGb%;E}#$Xt@ z`fIeqrI%g;?`EXGd+*<2PndnM1lv!w;rG=T5#M&}ZPHP&zwI{L%05M7{d+%e@^yh- zR&`dH(H)UMccOs-K@BJt*&KiR%5d_sPHv{c14Os~`}Lo^g-=pA1f+qEu>6!SVt@k6 zYAug}v*SPUaV_;V+5KnOeyiQhkG7vV8h2Qr%hvyM7@9gUUPP37Du7-Z@WJ&66b~Ii{V#XC4!4vH$G_`P6)|wCDfcui7WwJ@+mQR3D-xcX&_r`iMCOScuNS^#g6%72V3@bmf5~vjo7M*{t zW{UAIdZ-ksh)-aj(QuvpqwTlC#JKe!~@tDfFUp9woKkM81M!Lgc&p6h0fRC zN$fNJOlnKXbt-RUI|g$G1_U|0sC?mne?-%Qvwmhw-Tv3wH?GOz-|c_?`@=hdOlG@9 zKWrPy33A7gIVX)^ZU2d%f5;3OTnq+vn=*H6n8)2w&O@|6aK!@4&#(stpL=iM3<@ma zxo2MDNy6QO>zCdd|AKZvISlvEi^i~opo0}j9d1(vXV``g}b`!Z(z%uF-$(t1RN4pzCwyZ(cp|8f2W zjWke`>j%cbfFPF_mGeLpv;pKj-Y~&qAcpEszCj-X!9Yjk+>7Nw&+N1Qw8PZs&G>=~ z@h7jVBdin<26m2K8KHTo_H!7fU_a`Xs>Y!KYNbzN-(=<7Sm$5l_X<3TRS_P?^3;!G zX>q=xeHPw{aA{=r*XjTLVfByAGg1AG&+fn7{#V+XDxt)iOctU6GZ~J??EDkiw~j*& znyB|blj*4xLVm&i#OEJ0Zl|pBL?l2+v&0p9d>&suB-jDs5{r#9f|Cn4c)x^uIYo^?wOjYp-?0dZ=0lsei zZGfWH>%49oWn(CI8m;Ec0v){r4BFSJl+^}?0FGs|@E4$pWz+{gaCEu=U(D~g!;a}l z>>a?RQCjX$3Sdg3JFU7fc!&koYmGt?4`5b!u%(V5^TjK;d z5kFqB#1mf1@@4$p7@*jJ0~B~k&iw;U#*62#{`*(c{SVxqcHeckbOhRHL%cj^nuj4n z?m_XP<35z`zwchWC|?JgF@6;5^*=iu`=Jlvh3y5X%S-p?^TE z=BACXq|kfc`(DhJ*a9J)iDiwx4S&u|H{U!5GaWY9GCwbU$xASU;`Vgb+5d(?73@KQ zm*EFt4+=hX(&tNmb8-6XxBQJRD*Vdv^I!Oa_SoS4;oPtOd;0R1zO2EiZMNE4dvI(4 zTU_Gl^fPP^3T|$F7zT4V$TM-W^W%4D57(>Y%`8ylj}Pk)9@vj5>9{~=w4fu|$ic!c`* zGRr+n{cLqCUuHM+8R;g};g}D8P@9g3|frK5uOLx78ObX4D18o{B#QTpg8+% zn?dmj%%I?MaZPevo(E!!Vu^|EZ<&fZ^bnOa8RVj#XMOglGj#t0W4QiLrhYQI-+udb z>Q*y27{JN&w{#cu98?EA4+1SJlnPjYAVqK#zQYz-2Sh50*DJp&<@Tp6z$w`xdpEh_ zoO_XM5yssB7`uLT2wja+D9^`|aIeBlwz*j9?@$cHzoE{c=-Ch61h6$WiZBXKBcSo( zIl$7QLJ2b6mzGsN@U_kjmz>@6z(o|3nz+(Fvc&#k$_~V=@0scC*n{G=`_<)kt5_>l z@gpE-_uSb~wx)4{=U@n+voCP&x{$Z_q8TzV4fQ{x3?GAXmj$C^CF2FpgQ6A{N&z(X z9wKO&1awi)1AB||5^_Ih0`CHYL2U>T)cI!00}Ou7Oxfs=u3Q}_1)VJ_lmZyo12`!| z<#kriE2Zrxu|!@bb;kig5l7)gtOsZFyi%e?VCxps{q`Ra4thp|kW*js+&EpE=Peec zmM~gJ85Ey6;WO!) z>#t6`;iGDPtk2Hnnl1MdNC7N*rUV294tgH+ILZbn0>MBhI|1cU7bdX={sI=D)Tv@Z zJtLBw-f7z_RuFKXaxVFxq<{sGeV4M^j9&l^Ynxc<-Z%Z$J$)BZV@pAU+tFB33_~Pf%&#+1M`m@)^ z*ZO&Y;u~MXV8=}9Yymge~!Ui2=b z0g6vzfP!DwbI$~R#m@l>F6G1Z`>(+M4}5uk!2bJd5P%zA-;OWlzxvg4)4yZcqQx;< zzTGz4r;mK>qZmJ!hmD25n*JHf_psg@Zm@y$@kKi8dd^qQO&4Is2KUL}X2#ruVs|X@ z!~qm8r*rN(=gN1^2srf6LsYjbe*0U@K=`-xTkLu96!y|sZMD^~gwR*9gv>H{A$%S7 zX80@2YP_&~Ag-3&&v~X7$xqOFzR8nGb*XBbsgU+~=;1 zjoy#e3<^$jJOi^DevB8@d?Eh4=WUh_#S8mgF{pIg?YE;|=Ok`8J_~KV7dGa8JJu8B z+R?wh=-2u(hP{3@wDGB@o~|X5Zo$j|4m4eI(Z%U8ECaR07MrKHV-JokF+{+U)ty$1BGfTAGVAo*z%6 z?O*cZ7i%`o@33L{-@fNP>B=jwQb_*lO>a)GM;({NU>G+b=YZIMeB(c`vHAVDZf%D> zVg5?XZk_*w^RaB$m$0VbRcUJtP#lGRvn?#eD?$tBpogrsT|o|@L=!#`3s?ZOgS~7o z7yzSgCuXm37V|nuI+x_WeaQJIaEPS_D6quS^0Q{8PaOYo3{Y&UI8biDLbWwSZ3MhV zSrPgDrAgr{K+#q)3v0jYYS3K30u0y>{!>+9b-?qkoltgAtbxCP1d!0sY-_jSO<35( z)IAJP2T+m{-XEZ|9TLiCImNWQP{jld9R`dIpsJ!kEMNhO-HNF`ne|8k#vh!EBysrj z;F*veg%8967NCf#)$@tRcQSv00gAa;YL6QhYl$aZhlA1#k4A|yj(zZS$04eBtr;r~ zPzO+5`(i141qk)8A*|Ff?30;kq2pgwn0*}mLfAOe6wLMjN(A8pv490Y{VS|0py*~z zdG}cP3uOQ`nSCE(by3eL7vW-~00Y#BDlh<5q#%OgigU>G3GIsshi-t%Ia!=E!1=v% z6e9&Fb&VK(N$sjCA!B-oP%jsXtbxCP1t|8*dsv88YDb@n1JnUjVWl?R|B#osRRM=Q zr(A@kr#S?wgVA~MW`Z(jii#`FAosw;~5 z0CfN*3E`m`=bfK|=5{?{T-0;QML1eExKF(Wdr%y3;2ZRj);sWAKw_Mug zL#~g^d8I)pU;zrMVgg%JALsBq5C>4%4fMT0EMNgf*!QQCV55Llb|TbB`y>>f>bV)U zM!480zyNg=N_ZX^(>gmMM%YC?H{RyzCi!}BPS2%$SU@a8d==-A=fn0p)V8z5>Qpz2 zq99wq0)*=?s{lN7Q$0byYz-)#6eR|kF$nHS?mj?4PNs1GtG5#)`B6%P<^mR=cv3I`YV#JXYeZgg7@)wn+Tiwv zs-(*AIL}EjxV`9Bi3s(jB9)rlJ~^2}SX6cmo)hV?=VBZ@WDaFR-1F1>-+#2ewC5(- z+<<@u2P8i@*-J3~^@(OUngx8<;y#6DtLC$i-lCbob1<-bYq!lu&vTAyEy zZ-^Jo%VVa)QeAS#H*H>F`4j*IpNF z70#eI^UQx$|6gs@RkXwrPuxFZxu4%-5ak}&;OvH(i!YwG+2)1XpI{s8Q!rz(nd#PB zZ%yyPz|zfla9j#sU$rcK^-;|*$u zl`ydJvRz)Lsps_X-_AM<%Y1jD%WA@t}A|XId(&tpSIt2`}FpA zye+M_?t1FCzr_+)M}P1b^`jNw@6J2DNd5JOo36va!4=r>`qA{Vm%R)#V~)h&!G`JD zYp+jVMmuoHsAr+?Zw@=G4`;nxg+B5d3`niJ?mF5W{H1^PXISs@R&37wKFq|p5%~2m zsPQ87g_X6J$*mY5`r!}HPxoSgVK>e8IUId%4J^@gX8H#9tl)awFWCGA8hGPgC4a!Q z)JreOpDzxaI(S*D4~=TTF{nPhux?+{&XmSg-|Zr9{eo=XM? z`WSJ}-{!8lqJNe$_9oaf|p*mJkpEmzJc<=loFZvU#udu_#3tq~WsRycA**f|iN z(*HeHmBx6vl;@;6?72(y^T^S{`|R@<7@)XIGwBY+a(^r0UK}}Gm*YGq-eJ$3x0|;$ z8sd)iuX>{%lN#1Eq%q328i%+ivhS9y*VdmLRqK$(soI~1_Me%5(0X5sI^0I|ro~0> zF6^Adw=p!%iR$kh{d3(lMeWAjJw}cQJKuuZr@ZNl*Gkt~^Jp`|&e69|NCzf6f#Vg9T&wQ>Sa!lR+kWW!# z3%gC`OxA}C<}ug@3%cR}M?ZQ|Ygc2{ZThQ5kCsb!?8}?V?RuQs^Qe_LCifxo7$su5 z+~f%LSE=eUd2(P6id(S<#r_9ipFQqDVFMJj={0iRe%`g;^>hOJS=2RnP8$w;-nZ+R z$Pr=HTye9p1oNxqFddHboLGlFcWaSmaJ(iCapS21zrPm#`fnceEksO&-D8ix(S|v! zPlPd-3G45g`yLHz_v;Tlfe5cEgzTC}9WLehC>-(q6m><|Ij1-NuGRC_Vyr>n7`*>g zUWYuVq+lF|u2-(v@}5*nW~>M8lg(s#^wd9l`_~KSwR~ahw;F~tMOF~K^!yuUP-J0cQBlNG>F_NnN|@>4W8ni(Pc9a~&#Sa@ z9kPN97gPZT4NOKgy4i92+`0V67hs7eyZ}7%2yBwQ*~WBcf$URgA|H%~BaFL!GnA#2 zWmtrWAJQIqP16vUQwlfkHe4#;Z^R6SbFr7h5AhQEK`gPe zL|P653eS7~=2|QNir-w3zJA^}@HP0OSW0O1bTno#Jc5CPlTJJ--F?qo><5BPvN=G3 z7sd4JMhss3&-cHNfutX6sUpU1@x>NT+(ddA%z)VJdCyC`?z$^p)b1`jY=kGCdP18L zf9JoxlWw~HCcO;i3+RnE+$imX2miCqK1+L4a8u$BVC}WtxfwR(Zk}^f`j7uOPa9C* zhh?K^fB9vWOWSR?ecEn^ZSi&fx6-wELB0wGUe3adij`Kziyg?_h?n?Z``Xvj&wp_t zULN0_mRt(U5#e-sdj3$9WRkNOJnWlJV#&ppMfvkxdahy>O{@)pNStNOD?sP zHoRVE?X}YLpZ|ii^Nu^?i}-EIbnmB7H@>|8_P4*IfehNGy$v?lFzvn1o*GQ~?*DvO zFVndM)xNLU4+9*BrQhPE|LLclmM+K3{U%Rm?u%0L6w_W{NXSI5;&?f}5-*zWl#0 zz3%mYnI3%=!O+^2C!*V1~r6HG|>Oi!Mp`{r-N;qb@DA4@CZ1^;%4<6mH( zht08%2K&yF7>Kw8Gae{&F=k}kfB(I(vjiOFqtZOYVB@sM?t7$H?zv}L9)oO8KtKES ze`8+|&ZeQC%PqS+HVR(?14|F;oZ$f4zWeQ)w%T$lt-Z~Gj_+g8?n10Fd^ZOD7Q;Zr z(#t+e17(l@;qmmGwV#vz90Nc6s+s$Pust7s5)|Gqm2wRAbN_Aw23WqX}ELFIv4w&ET4}5_$M$Z zw6SWVIzvsgPY)OkGZSrOxS$F!vGEbqA`M{3KHc#BnyhuQL==?n+HwH&KnuSkObw!= zno;)o^-ukQFl3*$i>5(4qmV@kz(n@FXBJPQVCl65Ikr1RVodu~tXM?BV&7|7h>jSU z7tRs(ZR0zNd;4aIC|GDC&5R^OHQa-O`#l|e&_P)K@F09FuyT%L;ewvlerb(AOp|@L-4Q#53QR0L4JjWcS3tVmM~)vc|V zDar~LG(|iWN7@92Mv#g$fUbSLsHfbB>#y+_4FeV?q8WSP7_dJo{uNIhMZK{p$b|4vo;h5i6bFE@?B}z~`a_WL4kFa# z_HBS7FY$yIubYHD7>aAq6$9!fNH@#TK9KZnO#9T)O+lNHfSA<2Gq`YWr24YStvEx% zTK)H=@8iWk*Tg5EzU0G`3vWY@Vc?z&d||uA3%5vj-gXC;Df*dS=<;Rj zD{yi=J@2XXt6yKF84hc$wH5{hw!_LCnC8GqW(mVnYlZaKq|Vp@;X9C!csyOa5Gb#cy#EGTN4A zfXEl{92{DH`Q@~i#s(X1m{whRm0tUE|A}AXOZqu;ZpBOMM=&rm3xgTY$Lx|VQ13rt z1MtiB<;&BE-R8h^`Jz8yd7vw=x&pHgew!Y^aztDvYtzj((X5DjumSo7*esqiPWTdh zrIl8|tdlL%3%7n@dJtdj{~G&V%)RSQ_%{P>vtybC|E|WqBzI#r#Hv_k=>;#?5--iy zZ{lAbHfoncq~>Fm$YbeiU;hvI`6M>wUNY^8m;ani)Fr7@>E-- zb)P#MGafL2lx1jV4(wcs0UT}^&ZVcgZ^g9}wnjt*?ksL+*{3qRbgM@?1C%FDP zUo?{2V~8|W`=@;I z1qLWEIMFWg6uP7P(AY;T;Ecz|Sey~Ha8P-#CkKXD;}&)m1&doGJ~Yf%2f$!>M(xAi zbjQEcZ2;)i|NokOvoFU>`xW@s6t0$+2STGL${H;0@ncK#m zKUs_(^t4l=Nj;8n?Nf^jEBE+2GBPwKRj@rMKAWz;;d*V7%^4Ik`7zjn*w-oP_M_eC zOW%jcS*QzQpJw2AylX44LgxQjMe`#Z`=QJ?<-0j@Z7D| zyhj0Iq3svndfKbu40Snf#Z3MEu|MBpm_hLlEb+YWzOU_E|AT~s%47d?Ga$su2<<|~ z)0Tkih>Se{Del6`N7!crIiHyfg2SZf^0e5GzIJ>Va&3{=ce@>fy{Lh{NbPrGVeSN{ zo(cON3*^;uhMsBru;8ytv+<+e#}fB_7Z{;f5X)H`;{U{IH=s|MbnV>NEGZU$^b=A zP4%EBOhk{r;d1riM~exC2gisQ&^Imi=?7DO5OKFW&&|LAiXUBIGbp&JEH{a6o`6kB za~!Y!ZYbxSV(bPzebZL|@DS!Y-H*@>7nS{|u-p;%b(n{**q2*&Ic#{mRIN7r`6IqQ zUmQyqaYhCvI@TDwuN6}uzD$0EFNE>Zcj={;)}VtP;tL0AgRXbq@9)Ejca4^+T5h@J z()O4Eu>@aAqyAh1=fnT>kJ!xkdVGDp29|>Q+w|uz`SYTI?a!rz{(u+hw6`2yJablq zr(V3Apd9sj{7KaNA(+DK3%tnZGEC}+tm1UWAAWM#Db5<;;LOq(Ea40c+Tz!^oISu$ zE`b;;fyCHw1_ooe1YUM8i{w=istH zkz(rY=oR!l)4b?;2?VXu5z|2*w8!ezHO zTViPpMya34l=HFnwkk9?zy2faJc^~R7DpYIL2Ni6&`q)ZxsS=CS_W-CV!51V@Ng-o z(9kBH&HeGQC$N;(L+Dr7e0#~I)IZp_tp1s)Q-Aiwhp-&k6Bu||3Njp2(=3m!V=8;S zJkLyC^t{BMp2pV*U=UAekB*&|ze6l~iy>q1qUVLLYai_Fr2Yo1tDa>1Yx+yWV+Ocv z*xBb`zm-|bYk-0SP3mG}*#}eXe~td}?32q3W??US9_YRLH0@smiWfcpLLuP%o@b&L zJuiF%^=a%!ou=!Vx1Z~feg4zgw^&#RN1Y!YV9&GfUi5rQ@%Q>$?$@gp>m@hfdB%G& z=daplZ_J3hCmnnUW>8=j#mbyP;i=dDPO!Yj-<3wX<2=taFM7UE^^fO&Q8>zwJG}_c z|LB9mo{w^?(rY}O7qkVs{!k|q`)1WIlove@^y#xtOa9zn49R>kSI`9jDYhrr$L@co zx;?sH&okAFo)?G4x&C!6bo_0|Zh$#czD`86P>xuX_Cr5#bDlpV_0M&uEq}xHC$W!a zaJ!xt#G;PBI>A`>H9+y%lhO_NU2+`vpuiw9KU(Uh-uconcm+KlvQHY5>F()!#?C+U z;ZJAZ&JT-Ji1Ra6e~*8&D-HSzJQ`?sH5m@QcsW72x4Y+==ta*5`+p{O?B8+=_MpHX zcp9L<5>K2RK4awbxf}3&D)z|>If;h$$SOQrboI{`rs4NTu7tZ>izO@t=(lGl{#A)V zi@yHwt@OX@r~C2D+NaglhiBe?tvRome>(dX3ya36^TT81dG_6lo`;zB>N9Eo>s+rq zJj&gG=ZnJrxX)i9M(&R13swJUzbG74QNuf%=hJ4N>Wk6Aq472N zKzldroofRW3wHl;d)=J=A@$IXASuR^yrALwgO9fXg*J?O1e2UER8<`*ZR*iFh*)%3 zc|8<`PA9^aJTM0%zA$H?N9!O|tOrTyXB2Q;-sgYu<@gTVs92k1hs{(MLH*r#y2C~* z_km21n+`8x`<%_=zZ1W1m-BkwqjeCSqQszDmwFF?^LS3C1#N%6kiO;C zIoN#oW9i=e?n}$#W&0s`3A4^R>tKEV$I}1&&-bu|)AzMu_RDs~egs&@f8)Fa4DI?U zF*Wy!I6&7<#FtjwWq(Mw^Nk%>9p_qJzuc)H|bcHaqC~~ zqGp04!f~CPGbrxH>_!ey9CEPjLBYU8(FL(D9Z(XVYobolnS>i4MAUfpdyluJ60Xa0 zvusD<1_%)~p8aC3st-yCGBxK1%3=w8Vsd;gOox4{VxOY@=)_qkZaTnSYpI+crwZdpj*E`!FN>JjC4Ls@X6xk8*(e$q*w#U|M| zq+>ZiftkfK@zpw0hc!!B)}v*F<2{c;!VM4>_WmOq9lf1mWf>q4#iagJ;y-YWH#Qx6 zYC4EcvC4=Lr~qSsT)5vK)j_x5IVpl8BC|)7_y5A5dAeDY5v_yh6f4UBA)?$q2PpQ( z5>MKL0y8LH`?~$0qn_YVrYaXSRIIq3VB62}ehS|ynH{n+RjXF!v8Sek=oBR=eK;)a z^>1P9=hal?j~}A19{;Y5$YV7B)t*+Y=k#|!2S&6rP zku88}?i7X0qMg9C_7~*(H>7ub`|Wjp()wHVnwSyQ34cR(UxxgYEDqk6rflDp=9@@k zi^@ZC)?zHvT!zJw_9GMd@aH~LE+R-{YUn5ve=PicU{dmxbmtiH8WzPjW)vi+XrJGh8@zf zTpp?kW;6h}i49muW)HKPAgn)T zW&KS&iUudP;c;we+fz@)42pj{7n_H3fFdmM)Vcn{oC=W^`UpNwPlzrMU-Vx5Q86v^TEckO4j z=6}AS6;uRk-f?)?e@93E7?~f|pRT#?=&JqJCZ;5f9f2v)Fx(Z`C!ofbl*e&yy z$K<9jQ~v$YD$DI*+N6?nccq(Wn{>!XI5zER8YVseU2ic&xq{3}hEdMfwO^zOP*>Au zUFH0A;f2^F`=o!y5>MBqV~@ce6npQJXG9kb&g`aYnNauqQ%A4{DjF90{L7kLPf;uf z;KTIae+Mhu*!3_NvK&XW|2WS-t<3>r11OgnX`eA;Li_i{yO>yxu&Iy&>B?ng;LGu+ zFAIJC<@50tERlSG1}K(DZ+qKYF+lNJuWHTR{sNqTRy*@On}oJdXYXsBs#abn6iw#E zi1sh%U(nls!V31IFRdvrS)TQoBu>7tE=_+YV7+Cw#Kj0nSK>{oAU_Dzu=yEb_LvV{KPjYTG=T$W5W znj;c9+uLl{fN4^n9nmSt5nq^EP>7B8%i}f)PLbNF+8^@p1Mh!-`rrTM42m5wKygGK zposQ!{bhYT`;8qI#;m{f4)Tw(UoYCdSmlUNx=GV&pPtotIBfQsT2~h)ux}*PN!8^+ z804esUnq?iL-x4@)E(F#;2U56dg6)r`-dLD8vajUZ-K?NtkHAVc~08pWxJ#;w|XJ= zQ{Xa3OjEUQebYn%4^4ZWp#HLm^J=W~FMH=-(NnF(pISV2Ic`LGnu7h>rzi|vGSat1 zYQ?%u!;sb_qREX$b`H-)nL_=^-&$)lk&Ki%{0dH%Y@bj!#p{pNiX5(kpxKa`N~!fP zS84afbLv5oAP(gkYiK_%w0-xrMAtRvd;hVVSPuF@s{0GVmOH12`4?tcsm~e)}fI>lEQ&!|Ojr`}_G2`vnwT4gv?fU<4u1X3Y}$k39t=<%cu+bNgA^-j4@Hrjq2mL?pW%5qM0 zZcO`r{VjS_u|@m)V>;q*HMLgAwXKnuLjA4(QA3WSRGMC?sT4%iuZkY?%I6qrV(7<> z|9JL;on|Q(!_!)m#aw+L38fKXP{M8!h{)f?;@M@>*1Ei!rWnlpqelKB*>;91S$???1}5 z)}Upg-ygD_t##}Z%QTMtTd>dJ{s-C;Pw#xk+tPmfy{^QcR{^_4d5xqMhY-U~{K?^I zGW+%XGyT2#yPXnI*PL_30EZFv_w&!Riy4B!_l3Cs$^)K&OtNoQ3}MH~>OXw{AAS9i zg=j|RN4%SW3sbdEuNfWlz)Q@dQI>O(eH=!_ziZ#LQe;XWCE;6c_6J z>v&{(>rwi72p>gP>eS=kt-sDcDYWO5s9UfCr%LunM0DeCOtt^}`KO<8Aa?C{F(x05 zliIIe>D$uIv{3C2_kZtgRNi_NEE_mvKZsfV$x1z9&TuYH@%*ol!iPqT~V-`QSXr8gJOrBYzBod1u~{>D&#W)ld|raY@~AR+8KF2}P+7R?w6NF!s!-+htJEUd4VCXg3zR9}pCu76XtPo1uOBKxuzs;8zm zWN}6EnUU}$iJVNO{{b-{>sJiz6Cqw4%KeInPXDjU1WkI7PO2eiE}VSoDe0VZ&ejr7 z$A7%eplEoVe*x^vZOS7xI%6R+8EeZ+TsQum>sJ;-HlQpMqshJj6ZPv=!%KOoHQfay zenI-5>l>1Xa{Azki~3@8;rc(-=#oo-sTi=Lj-WO7Y9bfdz64#Z^4R|d?PqheIm2Ty zWS_Wr|EQ*9r+XloX>9x8{R@1Zd)GZ!;^_?sr-KiDLt1IYm1`)9G6It){#d2;Xn)lG zW7t=d*Rf$3ux+dj!9?=)W)k@xS^C1y?H&V6i6@xslpB$z5M(mgBTE+!2w0hLMBbtP z+gB${qf-SO$_3Vhh^EShmn0F4%KK(W&|1KaOtT6TXyAsc7(XWH+-CYxnvGSjH~!#$fpX0=Lr>X=KF z9AvM*;W9reta=tk0!|~$SP+BPt~q2LIT1(`d)5*~oq;*{9mXB)2wC^oZbw*u;nLw2<#gd_MH0~^e;It1VRo7tsp|k&0QI?Sf!c4T! zEVNf@+S5qWrouJ>PIA)AB}yYCD64` zG;r>Q{4`e6OF!y4>SStD0;1S=#ANoRga>-WA5DPqL?N^xt3Se$(R?JiNf2`TCZDz7 zi@W*w@$0oUnPLG#fJO9*)0d3O2CSU1(MLy-AF$h}f&!ypBGH2^DE)+)Cb6IG)))OI zbMh&tq;tVRfVIwKaaw1q;~4Ma z3xQmJN&@UhE9-N$lI5Y|-M(R*k?}W$RcEfKbdZk;)T(N7`>tNHDB!=ncr{x!>J<{J z^A`(L#57<(+b|BEAxJSenJ&LYvO68wN@YNt@ch>osnk4RKdUj5!5KM1Vn`rc{D~t& zbuM~k6456L;*T$=vWQC>ucJp0oe52QPl$5mn?nF zZj4?3aqTk{xUS<55926gL(^XW*wbj||0D5d^LN@`_d?+jV^t-`%tPlilT<^D-~M*} zQ|~SM{)GKcb@{(-pM4g+DRg*ifD%3c9=N zZah!NyZ((~ALl#IN~qNN$Vy&v$wle()4!B%o^vy1P;h`^Z!GcT*Z*)$m0xayG0O1= z6mNreP383^-sv(wVjHoh(rGLaX6owBav;+d`I`2RDvUfO`=-rpQvr-L&tFC(A6;w* z`pMr(;2Ep_RGY8A0%epHG&>>720VVot#FY&CX$ef{nD?<=<;I8hpbDbEF26+D(#tG z>Y3p82Vyr5Z=LFszH1rJKI44roLka?m_dO}DAEy!9iH}l<(?K6s2UZ2w^IG|;7ACe zM*pAa{$CV?_)q8gNBfA4W-sz2WZTeL##^}>!O9uEAm_i-cF#Z9o3#FJKDPg;`rC)_0t{y&_?na?&~_7t5ME>Cr*cb+JL@AcK*>< zcHa2=V^sg^)}NyEO*$Bhe2-w6l8GZfZkmH7o?fmQ6t$&tEzpq_Ha`t5=&lKM6g4!sNc8~ne7wNMlT*fJ ze#r6t16&EoX+*&jY=QwyHW6uEkrA}OHWF+^Szszo8CA!I(b#wl@uFWK4kppe|HSCW$H7A1nSQRc!|oU6PkQ zZ&(-BK9x2K-O8vuJOsIIvMy--hs+H46H&nfd(frr>9kLuqQUH=qdXUy1*Yk=-xx6U z@z{_Th5cx30B%e+1u*B6TR!U0S13xVFrW>74<+^qD8xCaHriygNgx`eCGh8NU+M~xM!jHzQ^OuwV zWg?u}O~;tWo_w_L*FP}HrQ%mrZt{(mN*n`fBw1jBoqb+~O|s|Sl{VRUv$W~+HcLw{ zxwP7)x_~<;Is{$BD?-T-_TcG7ac1TVD*m?A=QsI0&Q*hCedMJYm2my#KP%-Ikfgaj zcLF(=U|?}3d?C!lYlVLFWLtcKEuY$uNBxU00aAKevQ_SBXycDM>ckMOSml$kP`y0U zxuE)!ipt@Kn(8Dvajb#8Jfu0*M@n&EE*tb2FcZrBI%ytd=$I07GDBak3&29rvH+?- z=wLHlj5a0wWVW)3G$46$Kr`&RgAikCICm{Z;*MmqjaoMf|Qag^n`#EgGx5b_6VW+67F5$MX|44zktpa#tXQ>lDp&p#GVkUuHAm?6` z6bnpV|1sPDR_sA>;DHCGr=OmeUiiYT(uUa2vGiNhSo*R$dd4xck2np%>zLht&4qdO z>{OuTmJaSU+a_Os*iJZqL{YE^>B*#=e>}(RxdZ^nRK@#0bDc-PNn1Yu$iy>M_d)9B zGCWyMZIo@2g990Q#sD4vq0g>brX4uSQCzt=WLIz4GM=ZMu1DE_yMyGDQG#HQ6pmB@ zlCBZb$yk_Pa_QO)gT(3xlGQ)w!e@IFV?xgB5AWwzv^r3O9vw2?w9*r^srvK!D|X5< zqP*5IhntY2`wW1`K?EG=Gxg9m)tBclp=$hjk7WFqBm>_07@Xz(m#wV#Guz>@0)2!o~niBZ)S zL}}U2nut{U=Sq`C2D6ZTV2t3GdIOdj9kKAEems3Mb^)#x+5w z?9(?hDp#mgMZ=L_I#A4=SSna#$a0N^qnx~wZ=|pU?Nmh?9mXFsB>;oJKfobbRh0ZF zNtjYpmNfvZAF%@s9;-hHNWA`8zd{Ui+ZrXSQ2j}OpE>~~mTl8t*nt4UMt|hE9`Foi z%h+!Q>SL^FQ=L?b9=QL$bnzvZrAPnp2nHy2#{k6#q)&Rl%RFb|k8n#7dGOH4u)vhV z<2uCCFlIJ%J`))r8VinUJ@YiTM ztRhq@dad>H;D}S%MR=hiDL!nts)_PyremK%mDs7jTh5v%jepmJDqkSz;K|&Jo%=9R z$97rp#`+iCUGpj%wek$qKgx-*a@S@6PH0>L1KH~lP*GcfD?Qk+$immy^C&qVpGxw@ zG1k8dkA_?YE5TcV`(5p2S!Y!41*f5$`=2+Hb9he92xrNF(<5lL?+l&~2^U%=#xDUJ z>p9sY+?G4_ciqm!yaXo`r3?q`B@LbbMZ5q-qhmNgaTW$BR#<)o>_PF-v@vGJbwgZj0CJ)$ zaH88+{6jA;d<7^z6w#u#TYu+Z4*70it4h#N@cvJIe99=!Qhs!*&1ygMn&-bL<~AA} z3XF-<2HY&{#iar$7EaHPb1Hh6ou<5sHE5%O==Fzik(bPnV8wa;8GT9?*{+o;0!mY` z`e(^*GZ|}#Kj^L$Yk!j5hf3?4Y<%$etN&H`V5Sdd<+az<_IJD5g90-s^kD8tj4}H;c(=SRwj!2BQo3BpO1+pjSt}bWjhZ)@YQumFeQYnX-$a)WM7m_ zE5GnTreBo7pNV823kDPzQJpf6TAlZ>{PKbFB^kTvqEg0~IZ`}VXiGZDkAHR*u-6OZ zH4*D&^pQWr=piHI_U-FK;_Yl8#H3Igqsp=+$n@&@i(gSA4z>UBC!5ustnLqCi+`>J zDAI!Z(?07EDdcBa?y>-&%tFn)@%c|QF}oD&5_Px`m6>0PW4HZ5s~W)PN+bhXc2Y6V z{hzR&KR^_!c-^*#`i~T-#E6GPqUQY=cImgBbxJa^IAFc~Ka^oCT08Mzohb&=26! z{?>bG8`4UYQ~?0P=5-{x)IXm)lo3CUUcWMkk~R8|X43UCj=W9$TOZ_kR;&=10h*UB z)t?Z3LuVonDON+mD24Wr|BR<7i~?UkAb45U@Lr3|#Y~P^?zIz&L1Fzp{u0H3_?*Zu z2Z;t`J`yIL_4n(aVWOmZ4iNO4ZgN16xMPpS42r$>M%_SALl_S=GRcmC14pA)9*dUC z_5>x{@F1(jKi0~XGbo+kA5JnWS#%<`jW2So6>^2jy^#6n z6D!O1ODG~E0%bA{Q>EZ zPhk&7#b7a@(Jr3k9|gef=ctLAO`U()y0ICrdqiHkrn-uxgCrp9*A$PxX|g&QH=Qzr z&O^MZ1I_KzHjBtJ~5zlJXKZi!X@BsUK{;}r* z=ne6rA8wZ~x9c;GX|;P3?_uEc3yUnXGF}gQY({011oD$ETNcA2?s70Ly<)6>@SaVS zU7NB`)CXM_8+i>5OR+ty&cL(UkTF_*{)k=1Wx=PJELml&Kc(&Y!*p5QOd~y{kCseW z$E=5V(p7&L(DRQYGmZ7M(87;?`e_}-7I<8{P%vwa4T9ohj8NRB{*==hhjggDCcixqg}7R z2GdxG0`{ne^$s{UdK`<@3W)amW99ZHh)8!OlDuNo1nv*jn7uzh>ey}##twwFe zNfQZfz!emPa|{MbTpe+j^t>o1AJlUO#RZr_vExoVq@#{F(l%3V>fcsxARG-t5hd2$ zZ#j&R?e|$oh_YP{oNREM{hg^{&4_~Qie?FQ8=3)qm<3&%C#AQ4hf5~|gvBDA;Xvb_ z29$eOWt^yXfCY|$sH<7Mg}6%QSyT{j?(T>1Ja~d~=k=TzufHya&KvX6i1e;XI!lXz zzUTvprA`HKN#?7Agd=A_ptk2Qbe>g;t5@C=EoqpkMy^#+zNUIF) zVE4@)A@ly1F%mZ&umEwpxL348#vi2uOwbc_D3nb#GB2`G?Oz@WM|cpdxuu*3rNp@1 zAW&kLS;-Q}rf2N-H}*yYWnuV~Q%+51pK})WpjZJjC_a`p!V*u`FSE5^-?ez|^`{>9 z&2#cZ*kuZqMCb7OSEfn>pSOQQW`TVevfcJ~yPo&tBpa~hi7?>!u#Bfr#XbAib<^t~ zdwJ9_x&6%1RV*c>p^CZk@&o0Db&U3>jz)$ECR1;4VTc}Q7@5V`Ses+8?~ zr#6XX^i=FS!igXNbo(+Fd4qkIWbQtA9%%)36GRl9QNq06hkH<9fMTVUR!l2ngY21$ zEiQJ7_zPR_f2=ECHrlz{*55UkIuHOa|M3Sk`w->%1KM$_2rBROSIrm_xMn{!po3*e z)M~KIM6FC#lC=FU076|}lK4?m`hVr;SJQfn0zb{ZdFKjPOa5TCl~t$^UXwT5qs^ya z*?y=8ADqEVSv~ZNu+)eyn}g)>`cq+%wHot}gC|O^y{!bsxb$CMAY=}74+GbA>!TP=nzV^Yt8jPy}V34A#y=4F;NC#2RcK}3Z5Lx%mZ zC%>wy##FE=aSlP^EfW>3>##8mf5O0-g>im?{}gc#kUzIgx`& z;e(7d9i0Fr*I$(tm!ZWZf62w#!Amn`C5dMp;-CgAlly7N@O z_RnI-io28AxAR~?(hSlngWK;q5Xl~8mSWSCWtH#|1odrTZoTCe>_Krrdg`gC(yFVj znpRwKC1Cw50O*yWvK&}~b{E5$0D9pjFZ(MpzVKtx`9*ucNe((lvRcv$(`3Be>o0^T zN zmS``>jzss(tk{vNb8R3+IZN538xm4l6=;(X5AEWbJRfh1jZRjh6G3+3pJvWDp~N!( zy#4@L6X6<2GMOmJZv81^^`Zj3r_lL9tl1+#TULL=vnU+V8D`F8_>A=od9IyeNG{H{ z(KP`w@%hv4lhDGtQki6F-rJvd4x2&Y^(UJ@f0#*PK(z*w4&v!U?SIr_y~*vMm_@(` zm}9`iDG4FZQS>-a1NB;4kT1KeTtqgY3}h`DCq-+X2EUJ532)X z5#6N>?dx(v-({2aH;Ki7!5Jv5pA)?KddMiQHULiJ2+(2GsQr&DIf68$#QHGFA`a3g zL2FsFb>^U_WLh{x+Z`1=LXS*sXu0mEaKR&h;3~(UAilxf1x@rLJJ&igKgL z0$)(Mipsn-NX&vo-T*IpUeq**P#6H|Co5_7%mwH3yrb!T`np zVt`_M%%H#$Pn(7%o+7L3Glih2BT{+GB`ctjtPY8z`9EyGco)?KR)Uf*JuoAhaul|9 zYRFlivd9{v)8I|OQI_M#deJ-`5?~bL_M_oEUhaoeqsh{DNE}erqH@VI-Q&=ZONW@cedN}rkiX6E(m$NB@>i5=s9@Xw#x*kd*0Y)!3bWR9$SN8 zfyvu76H697C`fC+Tr~N3G=h`ek5Y9f$WO@m#iYcgFyxweIp>{Xv}~|3L^k5p<$x4^ zWdCcmiPsf*2l2A!tq4j+aB?@#Um_Mb~J`jY<^zQkLKFw z1Q&#|MpK~!%NtPOi`JdsWX}r^5UlE}_Q4WQci)xXaL^&?kb@7#CfRuD;B}W!seKmB zn!H1~KSi+v#+SMKZe~Ku1z8txHADedXAA{F=W&0%ti09SQ`;ay9Z|3D{ak0C_<3Mr z+yOKG9*3UeX26aE)pCOL1zv(kHpxDTn`GaJ0g7YNEBD$913`Qh z!(_Z3c80loy|TcPkKBL4-yiCK=0Wxv9_2<4RrWe%hYO3!szXF&LC=tTKWIYgamp4; zySb5u+1koU?xM;#G_HEdk)8);Q*8vdYwZ=79aPQ(6Jq0dL+IFiDhc=i06+jqL_t)d za>Vhusxk-kj5&S&qv4ti#3o`7NEflImg~*djj`WTV=(Z}3y%ra}yTL2?C+4BxLi-QWC@~1EspkpHA znR)75uMBN`uElc~X_W?Q09}vjFFX%Sj5}b)-%s#XA$UxY9la!BJ2(NQYp%UIefosY zAivi3puoHE+BTHUEg+e2uK4*%jk!roIgjJpcV5W`zg$OtspKP}e?b(Fcb;C$)T0Azns~shs*%fm|!yLR@x;+8W8i;~btBLh!EJ z9^{cPF2tN^(i6`Gj6EJ}bTbx4<49R-595U{KPy~V>WAXsp&D(U6ee$fNkG4wVAZGAA8hs3)vGMRjoC@oMf*1cM- zP?pK{ulNihD-Vy3Lf&~saU~^;79)-@+jkOOhODZhyYUZY@#3Idfe0`(^mLIIeEg!< zsvl!uCVMhHEww!gFFIoU(QS1MRR}svrbx=mcrO^DXJ@m*%0Yx`kk$3=W)WmAWwx>a z$pZUzI_1=p(>Z6Ila`;gJeGL+MA}3H6h(kQt(B`|PCR35U|=$kN2B2w<#5dmlq{VU zR&CUdKhxk)sdqsX7U?N;l`{2r{lrU2p|KdnyjS^x9!hMhiRMNjL zG2SW>+e)%fZhh8D(T5`CfalH{bI|Am6R*FmPSZMsw~IRIe#JB+`@CxJH22PX(!qxu zlny=!0~9M`fTCl+Qzpi$#l)j-uh%K$EwYhfu$p6 z=ZA?JSNB@s0`?`QUVpkek3-LLMUd6JFRk48VD#Dttfx{Y^Scdq!_*ns{V2gAu}#L( z@BieAwu`Esu1rCeyQL%-^!G>WUF(B$v$|D|0y)5(X#szK6mip%k8NLB zxZfYH%N=eEsnGWU7_@6WwQGA6-W>M}e|BLy>9Z%L8?MIyh4!G}CfVMyX3H$sKGgNV z=pK7gGX|OYU}c(Qn@f9!omDy;oY&j6k;v5gMvRM9IASW+Aqq$f4i<*M@1Sz?fpSCr zk9Z~*v65lx-70hB_F##&C;a{43bPOS+t@7m|FZWcV7pb-nc#+tq6i3xOiqA+;D|F? zNmUZXbOkP$s8L2utI?z?4(aFVREtykd6E{ah>D%0Kucp(G$!qnRH{D#MoQv*-WN-Qmc3(^C1{M48lk|Go4)J>)!6h8z4%j+x&!G}q*x->zC4XvV@Mk# z-0tO<(3ODr(XX2khNPF(zz)Zt|Yfi|f zKUnEZCIOh0dWjT*lnIlb$C5Utn+Bo8{?lNy~xO6y+XX$YysXgpfg z!%;o-QpX6{)EK0}mj8J908aI%QW8@Ny8qw<1G(yXU_VM%>@gaSgVj8`&&av#*_tZ& zZep!c;%XGA)bRu_VvxDxWDi|GbPNv6G-8pnZZbmT7&Spe5~gAR4{#Qp;S?~1?x^Td z#!~%*X8>+DT34Cu<}sv=5!o|OUTIB;RsDmPCX>-={A$fSm0>nHG}~Dgl`I>k?Umd3 zI-+dJzjUf~_>%v0oy3q)?_jR-uLBi=^x3j3`FDxNv^-a>opJgZc*WC)@QNqipg09L zC=TxXOmib&jwjUgX^hyWmbO30d-k;M0#ndx!-cxD{g3iyiHR|Xe_KT`w2NE(540i( z8#D{W%~9KU%3!3c4J6|W0rWTb_eYLZ*k~2L6Y`&2*Ra?aBf84PO20qSRD{~@w8oKA zBUrI`j)zv`pT<%EhO?V3`u!o$X*>u`{uj=_^S6YJ0k(qig=O4YRdm-)gHV%3wTj#@ z4wcn@e`Fq{Jhtu;wZ=7zRcm5g|M~kvVhmu&e-1vz6wTc*Nkv!JzJ)k0@#-Pg1*<}u_on+5;o&Z5W_l(AfO<<;$&V|jyO+xF_$ zytY06`M)%dPV=nR!K|)%$e8bz-yb!-UE_3UvB~&wBkO=gsvz4);&m#jueWqE3>krH&`~ zp`2?0{muRTF+2VPx_eqBhVadReQHE78e(*m@j4!3r%d`Hecib&by(EXe#p65hq$L1 z40Fr*U0BC<9HYYAzuS#g-9H_%gqmW2zh|lTL*)0;bkJHkomnf~n|y-_;|aXhN$XbyQPHJrQFBI$2`= znY8IHkieQxR}qdp_f`_Mlo;JBak^1Zcj86P+uHk6`{rvR*vfwa>-NJrw|?XSm!{S1QPt}g>B*&ThjPKEDsQQoRYSeB`O8$Xn+WrgBXH?@qg zT*YeBn5gwtKNFaf>R*Qsg4tW`@K~ltVJ`d}y&SwdL{jCrl)N5@;wUi^JGKjZ zf4l96!W8P``s?TsTJhne_-d28%JzTy>3D+T%ror<#ar+M#UXlv0w6SFQT`cupRYbL z*BbjhhM*RP#pJSJVCLGOy@C>gE+>&1eag6gLxl6?CQmA4#&bWOw!Bw6B^Z7Z2UtE zn)kVXvq{|vK7<^`4yfC$m-heEpvrjs z1!J1v0jZ-Jdb$uXMxaHpxc=?^zU@P}!la>(BR}kwKWv)?dl)=mS7tRQN6z z)$>=}WB58=b-AUAkP(XSdW6%hr<`euT`n!PVISSApLD zfAm-tM+`igod4oK!ZHoZx0$+qMd@?rW2ZsRpINh#UC)_`0;ps3NF^WFU-c|h)69Js zHCV{_46Z=nfur&(%a@Vf&vAI56x93JXZk z*-1hYxkbCeW&#sC8H+Xz`~E*!T3qx(+AlO48Ss!$sJ%ATRw$+hW$om9V#2_kOv8rI zEnkN(m!ESso}f7F8SQ2GNVdP?$z_J%v$5b^o*i0hb@JREHB(j~E11~U(%7~haYIw! z)lee0;P!xY^*p5N1ZoUU{nNhiItfZ!4mZg>icK6& zy0Rbr7bR1W)|5YlQaU-R5>vGWUaUyjwF#nJYaj(~RBB1O|4fZ*Nuf@9p$9Q7%%!+$DJ56_ItIE4nj~H0+T?3_>y!p=sJ^ zCWTG}mDiK&lh*5l9Vc!J{0 z4}YjVaK{I>lizys;wzrqnoBO6SmUUD?VF3COFNB4TdF>~raZVx;MVytLAMVL$IF~A z9yHUk60(j`?Gzn_jWirJM4EvEIQ&%Y1gE{OM*1zV% zR0UA%3QMFy$8TPL1ysgk0lKl27NKA$8}m=4=^*BYqkoJB7(=S1P)vKCmA?>Xd@@p7 zjYnDLE}>yi zW}93gDu7vE+fNPwrUs!H63U6JBZd^ygKs=D9~8|?BEer@_lm<40jt45?Ns|fmspcI zb1H={5J!I1ng)d|%7yn(yOoRm_+dH%qul73VcH66o`2xg9e_JoF$Nt!Y)ufx5=*cWxpsbVaI_xwRfWNjnCPznr_~Dsc!_eplW9Nx37JnLq@P+v{6QZz=c1 zFvvCKe+pIN=p?AzE-@=hzdsi8AbW|_W~8Pdt>ossRLObgp4(14?X-5uWxPS*AIbJV z@+x*Dixb&9t8a&H0cnm&ngy$h4HuQ=Dp#eii^Qn5FvdC;szO@+{ZZ^Dq}eBk{FA|= ziIakJvj$tCb^jJts$_tjfUM&sMS{g#itDX<#mse4n+(qFEC#lU!xbP^fW}B9S&oo- z+E?{x=>4n? z5uT>aUG!-TH{RrEe2p>j0wYnz*v%$3(2<%#RvdW}mY7Sy)bUrJVKmtGC`3`rpqGAs z4kxAj-7xmtq(qFkMzGIU4WQTA1L8ROsvb{Shbvn~gsO zJ&K8m`^RW7hnZ^XuqYSa8|ndBgXp8$q_LKaOx4nfVXqWJD31Q=Zfa09DoqdFheVH&#aGD1wFaQ3?9kD^os~(4_<^31hnI@3E2+SFIwbeW%5-am%ufQwoA0 zc1#+doH+H=Q}HClh3)WX;|Yq_>lIJcsL})(`|2t0iX+DWCr51GIBzuXsx1 zME7v`=#%}GlA@?Vn$h4UQ$cIrT|^5ty`rHgCFv(O?Dh<2Oe9!_t*`5ONU80r5vQBR zfdnS~w5r)%qjSzayFL9G&ulM!=}YmKwg*DN{=VEWIwA})ej87^&7e$o9z*ObA+;q> zd&HmSZk6@u3v9Zy#%pbT%BmZ#97E54;6T-Xg9M%dblR!tTO&{6p6JqMIgp;zhFcMv-@A(g0vl*iZR!pgI3DT{*PM>E- zp@e#{vklSnsBT|fqfd`!+&MZgaJUz=r&8p(w7G4PM=KSN*Hhu^kCHkH_c6vI>-0|x z9(_6fYAi*yLmZZM^JX~c-i1Y=EkJ&0N}9DpcL%N$nge#VHsh@Nns8JhbNt^0@saGEciP!r@pRy$4#L=!e^Z9xI2O-5ef9`lGdS!Rz7gTLG^UIy=cDTx zg~}d{9ptoke?B~)YwD}S>>)77^4Qd|rI)Ro86&Vkd;USdQNM^LPkoIjeoFXHYht$k zA#;r}kH$7pk5?h}^$*UF2aVD+1ES6de8l{_#;B(prf>649TfO6ZB*&sC4lU?aBtM? zduX$B>p|DeT{@=&JnYywHsZiArrA;qj^^{92w-l5*Kye_#Q3N7()D&ZCNo8KS3Es- zG(M7j?e%!Y)A7gQHBTqDUF;Q4jIG*hc7qq|Z`7FRSP;@s8#)+DJDRIbjlsx5DwmEox1DmWB0aB2*;?M~^y0 z-lKo`wN9P=&~+&tS?OGMsULZSU|4@tBxuCY?YfVFH|&4hRJjNw^VaFeo2g|shytV%R7%g+u-31ixp5Gtz#M1NReXqOJBl3P7zw@gNY1>A5tjm48QWN^7 zP<=Xb#$jr%>k{93h;1{$?H_aP2(Cw&O1JDj2448rR_S)`X*mYBCYot^capVWhE8tl z#+Ikh8OGo30v!kucZ!@~&42gA?+?J1LN6?e1AQa?LSpPTB!wRJLl@L}=kXg9_((Rs zL7^uoxZk&&a2p-Q4}&Pml6Pwipks%}l|Xmw>geX)y{4^GeVgsPFkJ8Nk1)^k4*>U3 z%dgrV&)}aHYx(^_qbwW?J;ka*cZ+|25U&QGzB-}fxsS;Cwk0wTjAn)7hi86;{L|@l z{-^yO0{qyF(;NP=2TJ>2?}RL3GA(M)SJPEjeYqWX%yDho4S2=Vt6zgxJmDkREW}|| zj$!1e)Ti6rC9o}hk-!oMN&eHhrisAuJs5i^#td8M1W%o58z_+{Uw<_(!gM`fE$)#> z;ni|td0Zvmt_+JI7miCT`-^5G+4ws$Wj-!78na0vhKSkVIR1|7T2UJ;s*IuQnJCVm zdIr0k2tGVtf2JR>vGqU!vOG48wxQ~wF+Pi->llTKUDDhVRHvVBQ zJ3j+$XJ*K>|+vZX!?a$y>%eEyw19gokv8Mp#!M~-^P8&#a|%9VDG$vxrl zixEJj`cJLEb%e?O2h5dzf4IKa%Q1ABr4E&Tf5^95YO>^7^KRq~(pdH!hwFs`8u!wD ziPPJlFtpWvfA~`e6u zIsc$9y7(fz;_2KCy4RhQ`M3FkD@P-aI-Y+?C}$CuwA)lkj0vpt-xWz& z>2{7bxk2ILq6sp1rJ(k#b-?$53<`**96TVhIN;2N<6|0KTW^Zt zDy9z6NqB9z4w?WOyz@kDWA0!dUESm_Gk~;f<3)RF#e)PHib=v5l5F zRq>!qB39Q$i0E(|x0cbNL%Gcb4HC+@f>>-_qv&Nm3lEmHr+t<2Oy8wevtn5ogA0~w z&!w5kKE@?&_uxI9tEc{wuM8%WA*hAc(akj~+kfT`(bZie=+G%cFG{Xy^#o(Vaj-gi zHwoU@n7++BhDDJkCP!)dZe&^2wi=$6n{ZO_2;BEL>Bh{fe;xQK7-JZ~MgT*p)f)YW z+SI=s7Ze9KMUoNOOhGXeL6S*$o;yn4{>bZU2F5OndqmIj?@AL@SG2lFl1#!hxYmft zf42VtQJ_sn)#+#82@2kz*m=kDk!;6H)FfzCI!PvBu(ATT1hr~^ZidK&6=`b8D!HK(AP;*I!eMUYmdu{V`jq(KVvdXJt`MUrAn0Z z)X;XC%1M}6?P>knnt;IfSAoun$x^N4F@&`_jkR`O3(gODy2X(dA{Op0q1bejrC$u+SV z{smtUoK;*%r?+KK;f9FW*MxMc`D){rK3k%4@HYI!ts0`sej*)#&^8IRObc@X5toLk zroOOhTX4x!a!(e}sK=B1B|GWJ+FhIz-hB}ZjVgnDRL z{yh=lUnl0o9m?pRqU9KhUXn?SHpIXr@gkG0iE)_@-pgPn{E4JbdJ&BN+0!uV?+*rN zyQ!{Td@V9lPASV;et&S7%s>Cx`e%!d=HEPZKuB3u_un<{!VQY^@ePX8+GUsE35wsw z6BPD}rwxYE^}IVj{=1)f44d}vh-IzO#0m&YZ1!yI?5+0uV~k<#{+U^GEw^<|z-L90 zh`d$LKcuJ)hrutFj^0hcTf^~3fu#$Bay-jw*@rH=HfHxPym7>1k1^W*2ew>)B}{jb zaFcn>Ag7U}L2=}^S6zu$JmDJ@+u{j|*Sz{w?fEZw{^I_h2rTKsASXMA;>NXA7eSIq z__`o=u%_ehPCb00dK@q)8)-+BU$sS&WD=%SN3yBkAGBnNs}kOciA~fbC@!5OlQ4Vs zSU4^lKilZn@q=Z>^~*2VRF($X1TDfOlSn+xZSH@dVsoM$bAfi!*AQvhil5n`8wR3% zO}b6OOlnWtCo_~4b*3T$B7r|Z3MTs-W z$qkB000t{Jwaw_^n(1R%^q59I%or@pwGIX|@bAP1;cx3_EPcls@g|-k?R-)!h$EO? z<}B8VH8%h-UjKUBpuiIpk2&Nq?daz|w>@INhX)2125hjn{bmH>$KwZF>qU(MP=iGj1XE#TO-Gw!gp43S4yIP2OL3ho~YH~?&mqr#m^p2bBrqOhPc zfSw0XjKOnBufAM)aM>U`>O?UF_-D+H#_wpr13Ubvss#+RP-K_KCEk?`13^X0@#9Af z<hes?gE+_5&X~%y)mxgT;_@oWgEwun)j2xal;SRpV zY%OHun!(a1KhySzKNlzq(ET1w&P8G(6)-1sA zso;oQW+MfkYr!5~=>v{v_9u%Co zPkYP~J|Qyuar_AnI~sZ+uJu@@gu;Lz!+kkJmi;N3y%=7+1w|ddQBkhKM@QD6=)|!sYz;B{AS73JFX^ zY=2r&CPp>blo$rS^oqP&I+Ig9h~>bz(b>fIS5167?TAKkf5m+|$74k~pXWBQk{DKA zSohqK;ohP~qrcis8AT{{f29bFk4llT@5S*PT|T;&q)Xa%?#5#Ris?<2#Se|T=P*`Q zG-TmArk2w&U^T{7<*vFzqk7lX&|mxSk7@p=V9HHGgXpoAO;I6QOFnl@eV;kUbpz+u z`ErkxA|qiYYiaw8hShZAfu;0`Qfc-go_uZd9+*an}e|Wmphmc&na?W)-C@h!f<}_BZSopmQ$1L2=q?_(=Apm$f&( znXh;{!hU}_POY{6YJMcIQ8{gO>@!KTtZo16y1f>*qiChhqmZ6k4%;bMCLL9bM~UqZ zA@kgH&qk#od)ofF2bh4Fa<5#URVc?@Iz*~8eTg9oqruesn2ierxBN3!t^idXR)6nMpx@;EFcA&fCN zSF-R^cY+~`l-MxIV&RO<%M@|0;hd@$PB2Q#zjax$`2LD1iQbS4=hnL71|O_?EUnqB zxumv5kooU1II81S+HEZ3AQ>y9Yx@1+C_dw~^Z^=o>cFB39mr}fJR;RJM1evM!8u;f za5-M$x*c=R(w9v)@R>1b*qG`pb~vsjhHNyH&UC)Lb`k%l2;C zv4$yC7CTDkE#ZxkE=1IUOz2(Iwr0KO>raY9wu?>mPFr8x$9{XU8j^sOM{Yj_q)?=0Crd z7^n0un$-~vnx*p}Q!g8tYdeV1_$wH;D*!Rz!>~`#sO)0?9XP3^H7Ag9v%`O+L2*Wjt1G~^bowSLO|}M1Jw-$kS8;jKk{3Pw zW)7BAZtRfTm4Eu+;Ru%|%^%SiByB_AtQiOT*joMI#ny{Q&iuoRUm=b0a|}ch7CRg} z?O4H*3Z!A@Z04f+rw%S{f@e{(H5>O&P1@0i`6njx>`Wg6cs>>%4@K5D|GcV72JF!? zZM0#{6@8Wa0SC0dvmvh8Vd15R1<$mC)xl%WcSTJ@d*x$4fLKu~=p> zXnVx5_~%WlJAh?3ugH&am@#8DkPH@GA`un%jK}ij`8L&FxhZv_=eX1ZCM5w$3||Pu zay?fE&F$Yhp<}+83;K3k3zv9qiiUaeZIo6%e(ExWZQ&O4SjO);jogCo+Z7C9z?^fR z>4kB#0GnfnW8Y`e18;43tp513xubIUcdE|6Em50Z=Ck`JuI-i?Ffq}7`&&1+n{WAc z+k2nA+bJiXV&9;s^W43;KU&UN>CMyG_p3C@?(Aglb}j z0-{fc<5~@8b}VxZM{{vvIkVQOP?!sgwi-SrlD6i~iWl0Zy^f>lvOo7D>gkxvAQ#pw2EAAOXf?_=CjJ>W-?{sS8*Fh?kb0KsC zqf%j&-m(r~@pRfd-`Os`>@xcX#Sur~BiZ;}Ivh>Y?E;3eMm~e+98qd5#3J{8uQiuy z1I!eFp*0Z*hT3<~9VRLHRqKMWm&zdI*EOt`^2|>PV7Ihg5_efgTxyJE^l!3M4kG)q zEc=R?%o#$J#Y|0t$@r`P0fPW6HtgP+Z`77$Gqs+w&6bWk7TDd zL0IM|G0wMIp49q`m|4;4q(<5Ga|%1r)k&?eE({@M1rH{SewHv53pUb+WFt$sOD-E#AsstKf>Q1whsE>?Wa(rA|tjm zAW!}LFOwB{8xoav19ss$DI3o#U+whvQ{Ub$MEjh>`3;Jb!ou~awbJho&CJZjD*5lR z3EzjJsHRxBuER@_B{iv;M7No+#_lBCL5NPA+~x5hQC%_BQ#CoMIVDDo@GmBgIJ}tZ zBTLZvu?xxM4T`hz1jTK)eXkpFOVt7{js+j{W*f8*&HeLIYTlH9bL!irIR=CO?YgW5 zg#fvu(uQh2dYldnrs?zmisTcWY7(DF9!gM9-5^v_;@k~rRs5pn{e&f#Gsc3V~SVEtA(#Kf%0W&`8Gu!^} zWE?(GHUjexUFJWwMR~LcDWYo>jo8B`bM$Ya7`OQ6X2(q-#?RdNw0p3~oUdDSi90Rs z#krOCXvnucpxY7yh3k)b<$BBfasLttV$^=a2FkPi8$?#sr>T;GV)MyoSvVe!59tG} zm51)LPdk}6C=NN;{RiKr6%m~5W_Dd|p-xqkF#>;@$3QfWkZD{te&yEU7*Rau;F%6{ zmcy7uFk1F_jXwy{383_!XoF%IKNINdGF2zUXxaT!t^1)C89tECcXm;9MuH|0iX}9e zqO}xSfAk+qF60^VWU)bw=$WU&E+mXr z_iq;&flL3yr;CmDil^)F4T=--4T|I2uDkA5jvFI7)MsT}A*=gm;+Q+8+ctZU>>4Ae zvAc@@NMaq!rpB*{TQ&bfIPhO5+kz_RnTZb$G5iWR;&H(K_L&0(QwAz&ZeLx~VZlpgnlp8RI6a_hV|DW-4~(z7SiLA%Pxdv^W$68c%{5@$biU_i(A=E)XS|#P*o4`F_Y$f}w>1CZA_o<#EBo^hM$zbB zo0mrk+m*)O72QFCCNXy$oKa)+U#1+|I5)wfil4i&z6Cp?(9V~A5{MmioHW<@C>k)D z`?vLGaL`<7{M{iTyew^h3o)$4zXKvR_}BOum$@+Eh|+f|6MlqNn^9wV{PM50%rQnP zBZd*Br(?pej)P`O`dBvYpSn6N>TnZ3*#%K2yBjzK=lcEOYHRafrqRZK(Zv_y6;JpE z#f2BQ!|{qIUJLicL?9W@GysGLF#khY$~MoxThP_SL3W+M^&CsHIf%$;Tin$hpNV>9 z*=DydH}=v8J1FXVTWXRj<1dQA|8U@uK3f4--U3?((&D+Bg2}r^XPP|czur2?> z`7VqfC&+f;n+V*xG4C8RA10wcpMOS$K4Ty!_*4HJKj(uNjBNa-XNI(*1ci~s1x!$$ zUqgq8PN5S2{DV6#3k}2P>thJVONMvitbl*%&gHsIJlB^?*Zi@9&<6uL{M!XOWRDj- zSy4w$K-d!N?l(T1HV5=!e#0B{Z4d^~{e!{^!mGV7 zB8cqf0PT=ltyfyud|SSW68X2B`}lqR!O-B{r~7w(A06YSpSVy=b<00<3?Gnfi}|N- zJ79w9#W6h>c1sC1%qX*m8M3uOKmy@^IOprWsPAW-)N*V3;RZm}+4>JW&HB_o`>?j! z)*Lo+TmMWR@zHSHEauPFhRZ_3>@7Fc!PzjQ@{utA5u;5dfna6vg5-FJkN@t(tl9R@ zcsUjdQV+T)=7Uc8FB&2`s@K%H19E#-0<`C!+aj`SXY186fB@+0!CU4x^G|g`qojZ8 zBTw8~>inOL?+X3%$3M|_+ikb@)>BSt2jSa7L}b8T8w{!d1YgCo#QwN4CAU^)VutyM zt$e6;=~SoRxpI)|-&b#@S>6QbeZ>OvDYiWNnsa0tMYw?ntu$nQv_d--ioHqw4nfM; z1zc*?v+WBzlB|-`y>?1M;q`bWahQ}x4C;GZ>tZ(nv;Cj_lAQaem7xf@!c-agWj?Cs zGzH!UUv*{-*bWS`J)P?!a6>ilyU3lTV! zt-Wo^w0edG`(6hsn(UIvP`WFjHymW2?66|-^bh|$^D;lF$bYx zj@LS+?}r69nwyft;uF`UFGJ+T@ zB+8PajkfqVdow}h{N9WF!%rU1B~+UyapT$?kvejChIQ=!$xxZBtv+g}s5W{5W9lfd zrfl`St#x3S9rz`eQ~xvHv=OlEt(_j*x?1%-m>O1a!qIB}(>4in3`bzVURzFZGfY>n zwKOaM81_-tC%QXcI7DOnlIT_xN9q=VY;#vV&rO5F2F1p2^U*Wml2Xq>Oz7+`6jk}t zZIm9FJo`vCpP;z(()Q*z{kDCB0w+!LwMS0@EPQj`*^onmF-Y!ht!2ShTF$dk+kAim zMxU8t6ZkZ>`3TX7L}Z_j>UpZO=$~HU!_PkfJ`KV^HiabQr6~GXBo(LkgQ6Rj8pGVR zsZ8fqYCJdfC*gQbK0+x<13)$NPr=k^+7AUz3*%I%QuCHIp{8>xEfdfDUwzfpxIyuv zwgERNUi}+>gTgfB#LWzOiY?^szpu$On+&d>_4G`nt`EWU!5=o0B7b75Kl5+vms%Wz zwA`wD?UYMO&55SzBnKTSJfh*QBzk6$IvE3TJ5cx9DQXObAhm>fVuwt9Z);tR$5q?C zIz`PM-2$+na-3pl+-g_y559}c)6_qGr&!Qa2Ky*=Uu1n$ZqJp;(yz%_*Qx-s`QZB0 zD(+O*4QlG2Z19rp&a&@(&vWV|=*a26I+y*M_Gr?Pzz^3(NX^??r-=vNtq?QDZ;9+% zV%0WUz!O2TNuX#xs^>~GLH&3iHuTSUaE-u^KRF^-Fkn=hViP;mJdVLLBq5D?CoD_P zdZ%1cY5|*m52dh8S(~`=H}(4?;JepO>HI1^O&z6eYPf&YOp_1Mjbv&c$;K<5F23kO z+@N@l-=KhS<=-E)WZ~R+W8G3s+3I^6H5xm;N;_#eL!-tuLKasR(^QJSlTlNb3b`=A zFws)Ll!2eR?!4D#Xcvxo;d0m%C}K%i*R|S7m(#iRQ?c_&*;P@rgH5Pda~Hl`jwdM2 zIj24KsZVY%dC9M|M;&xvUN52nb<^B-4**X|$j;>VvBkYB3ayz%=;=i}$tr#V<2Kl5N2g#PB| zzxwC+?Pf^$p5|fMKb9U88LRv=3VZH@%uUR{EN%Tq{&kTU*27xZw12*0$N%)*a<@+Z z8n;@-;ZJ@r;&J_rD;b}w4MOsmz4N~JzPEk!qkr0V z-sORKg5oXh;71({rFmKP_{U)qK9nrgRtbme0IL9wZM&fBEHX~UB2oR@_^DMKx!kcl z7Rb{N`Rs#XEsa&&M!6Ym)EUf~=>RTRPL{gJ%&GOee~Gzm`bloYBY|~$r;dNi{X1^f zv27_MOYLg@iNnI}JZmmf+y2M`-$Z@<;6|Yg{79*|({}JLeRpT7?#>xj7uiz&J3bVF zC&Qf+IcQ>O} z_pepypf=TsPbm68v!?t{T|1DSYvXu6?th8t`k`eEqn#08=vW`h1D|3rbp0W;gDYs$ z(fO}NYN`dx$x;`Y`LI6x`z|Uxk%_X!`S)R6gI7Gg=oq}>X~VYo2E}XJ^Y9G{rpIEI z{~<1OzM200N&_t|muxew4sP|}2OaCDBCVoO20jB-8ElKe1Irkfv*uKn8%wb(IzWQ@fb$-2M^C+`oq9##T>p ztPej$=qiXEnwJ3_eEh#! z+1&AS5PeO-Dd$X1245W9>i#df=;HRax1Eafjf?ROif7?_%y@!=ML{L$8MN9-Q?uk- zAASbDfh_oUC8@I2c$SWTc7q~3jg1i8-Zu~p7Qt(WsJwLJ;wCYYh(UP2aKpTC3hBbi z2p^S|0gV1ViM0fmps${1Gn@9VygCC!3J$Rwmu8SD4&sjFDR zpR_ZT%AkMDKx_{wab`a#jRJQNI=MrcxXmu~w7GRdQe1EvKa~2O%GD%eZw>xkB^3js z+jdE(x_{`cwEn5iV)9~ZWa&d??3vX9vubJl!l#Yo{cjKQfKhg~t|r~^wbaa1DntIk z8}d&bMiTpmG)W@DEd3?l4l>ED+o(1li!c$}fB{7gi#VioJ@ z_)A1Seon%GoyMJ*g;*IY`KM0L{~7<~^!A~Te7Nnr(*xScr~GH!pmyMl+h+e66^Bs)^(rR{bxuS{Tlt#P+_S}3=lJ3WvtadY;2xoi`EjReswsZ zXDXGkR{y}XMF^Tc002M$Nkl%yh(3`h|267`e|}zkfdzPQlH?Sj|)_V|D+2nF;#2b1b$- zmj39q_hu?!2=8wC{n4F?DozVYX<1d?LoBvyu}bz6&BRC|rnvVzC5+(A1eLLR{x^C2 z9j~y<@z_>(CaH|@KifvBSTdC+NzBGSIxW3o;#@LVe1o(#Hq)Ve%ls$gj7UcJQCW0t zWv%|HJX<{R1jT81#nUD2jqwD$XWl8)8Z~d;YtnD7J*b7{xMYqVJZk(|GaOK*@H5;7k)q0|GRotN1j!#g$`c>_hp7#R#8@>4?Y<~W$ThbmP>O9^h zH&oY{{~SXS4o(jPcEHxAm^iyK*6Lq=N=|G|fUjd&^;zla&iIwl{m)^_ZT2)tLdgZ< zY~fb#q30~O)?y~F`=7&~aa&i)$>ERY0caA2I0#MuuvOhFEpEnWY5mtbP7{ z`zW@yFD8c8f02=`GGkZ9=-(xh&(~Jr#6YDhXA3P=SQMtsxjLhNs&&NTEtyHo{PPCI zseFP0AIm=cS$M@$-k=EIGOfq3&B(vUFA%r~IebsAM9y(!36pYz!Z{tP47xH6p)ksX zlgpws7K99=Sh{l7E~)aQlaO*@80o8-B+HV2t#`L9_R!}IigWOar>8yR8SP~+eOWt% zHz-h2D+yy)NhndlNqS$pUndB)W-wGPsq&KMvj z>LwwB!Z6bNtXbAF{(=CDxg~K@Ckb*{3d2b6vu5%5+2cR|Q&(lhHK{c%E~)aQlaNhe zY;yh6W+@}WQtOxnxsS3kO?|o(>b4YX{(7Fe{tQa)!&^sU%#pR3QDGR_t(N^W5@NB9 z?L%i)3Dq$a|3s9L_Hh{?2Hb7s!Ig!OBZ)ZVp+G`X=ZO+ocS8Cp38ijJjgH>+M0L%s z<*1{CJr>g%4i4lBO=P#Fv?F&tQEI8zalkbJ<{Yig-BULSx9WC+a0S*ZqkrcJqHA_7 zGm0@ukG!OlaLpnKvZGRi$mG1|jQ`R;_@NKC2kyL6`@i0LN;~8re1jt7Xi6cMrEjO~ zrEU`BvXrAqobbw)(mt|oC`v8$#?p^rio$eRv2;*$3Q8)u)u`I_M5(1-FXXZiLwm~N zWoA?uMtYw$OAoc{WyZkL`f_d69b*_j#T_P;3&Tj|&iPCJWuaDHd+U-eFPY#*!BG*| z$gWlU0$ijvV<2? za?ObZI?kuID7EB`#U<`4XegMMnNbzo z4vt_YUi*ZokN=u~2 zlQuN8()C1nJNBxsB;+NXgliT_kW?V)BOk^e=UJ}d_Fe9MuiZ&?m2?u)FAO6!es-9D zdFXng)C#G$E~)aQlYq9-nt%0Ofi+9t|8zr9YF)F|E~)aQlaPL480mf1EI#^qkNmnT zaVs;S#4OJ}@7(sTcb?WRz5FuVpmc!GX@3Us<<7b{|NKT%4k5DwQ z(lO-9IS?ihs)>faO@-#)<+AUF^4>W@>g|&MI2T#)p9wKpwlc8!p{TAYFC(V($W0o=RafI16vrOZ zw!;V8U-hb2wdcR!1#W0cuF0CEBRiiwQEJH>i%aVEkz2Wms%4>zj3mq2{TG}#r>LBy zlW_b#8uU%(3anWM|MB}ER&wUSHC1;@M{<7BNmTvtVlr1?%`*6po*>k^X07`+Ec?_A zBKtOq@MKJvunuleqXlt0CseW#M$eR}xluteQnn7J_^UTr)bp!!ZA8b={!=!0tAF_F z@zn^bfAY1@Fmhs)k(KpF(vdrU*Au0-dUo0+Rh|(TbChI2MMjckv;3#MTP}L3I|cJl zw9?*ZZFBq=6qtFh@{%?pd#T$)aT&KDj@`#ko{CnKyog_0$5`}InHHz);K|&e!b!K0 z5N%HW{RYL`+Qk=N)Smqud?Xuh6H>J4)`;xEL++VlKrUm=vN`_Uf_2A`y>akZBd6a% zlL#{{4Dji+wUx=npiS z*Vzq<0XjLy zqFhGNj!ou25YUH0{MvU_95RrzV5OQ7ftAd~zL?RDP574%kY@hL!@oLCIh^tr`(nnS zFS_E+us6rQb}fvDfUIhw%!xcjJ2vyNZPI@p!WV22Le0~<{c|ijWcaWKT#8vVH1Dm@ zm(nUxdKVaw4C@a9K}bjKlwaqAIwqG=v|}^JUWb2qpuf}4cy~MV%n!Alaf9NN zx4ab}$vzN)a8gFc)n>@;qJL=Db;YrbySpH)IN+-3L;83DoCf4w15amcQbT%_b4F^(w%~I zcMjcxG}7Gz3^5GMF!P(|eZN}kTWkKBxzD=KoW0LJv9Eny^0E3gMV%!`4`Ed|+(56t zDz{bKOa^pty}yyG&&i!;py>tuMcm+8qc+HB9Qa4=`rmlbZJ75C#5cs8Tue%RX|dx# zlBRZRlQveQJS)r`$D$jaPnnhglts=JZNjV9zr@AFXj-b8|Fh8vyUTQIXTq zRem}mHuWA|L`4GaDMbVuv*A2&g3s3ho~TfaZ?$FeIX*}~Fn@69&VNy1fO;j-cRY@M ztVU#@jek(<*|0fa(stxGhr8J)vu22lqvUoOot{~{G~g@RkjwHB83hAC&=-v0TwP%f z>tTbl7)taYA!#u2D&qh>|L_dD{>S8hp5l-Qjz|umGi&00{X|bPYhBAsm-h0Zt4-|a z(u>{8#4`f))f08?kPu7o^IG5N<;l>3fn1}q(;(Kk_@UfY(^t?NH3elh<_2O-<3EBSV#$1V8JqU4CV03e(90(DhzcwTWg*Xoi`TUCbQ{qZwNe+XiEB zL^AdN>5uhpMCgf)Hjt>Ogj?rF#NrgFTQyONNy<2dGXoePE3%ndHAvAy^I24|+`eaN z545%agMN|d`Q(H77g53N8`Ux+>3(hYBW<+X2nyn!){oym|H(NDnyqo|a#4{EYmee7 z8VhdyzISc}%jC^iph-V`gPu``W@~2RX5C%-crOm}#6miRAe((b&n*G3f z_5QnPbMmHpG-$`}WBYmsYN)7neKXAwCE2wb53SC2wqG&{;FOT>aR^u*pgYefX;Dh| z)VztK25oSG&Z41_a$-f`_7S>|G0|&@q({dG{Tb3yUR#ey9O{Wz0RX(xij~=b5C`DUo#4^%GyQ8-lJZIzeywLiPb;AW_3I;8VnP;Ld?z&s@#$ zFI2iwpk3$9tl}!C(B^FLxxVC5Fw_Q3k@R<}_cHC5N>A6Gjf$@|~?^wp7eldGik zrc!$74RqbGXSD~ue$g!IHeeA!>tl;9hHNy>*$!NH8v%n4E;eR+d?xN(>w(!VyYZsC zn=diIrwK?<2sP$rDBXmLue)`v_?tNW=xn|1)yu+U0%>r-;_ z5bt%X)|Qb_AD7kjLy6zPaByp@BBXD26W)zldnp1_2Y-7cw8f?KG+jj{COC?&p@Q3V zc{jA4G|75$aq;JRK_w}MrEqDMUw*voZMo=y@5sz|u7rmK3?T20;^bOSYe7>c7oK7g zNlvP2Jmv)bMS!CiNzI(>-}dj{p&qq)w4eD`qB^P$??*kAiJPPCSA2sn2u@v25qCkl zxp~#6^#97rGr6zjm(YfKk$L+v<-Gb7C5CpG+zVg1&NFmT#FktG@CkSM5by5;h6sLo za8B}gZX0>RW=PIU^OfnuNo)@lHlL?o?-uVtt?~JXe%$@+d4i6iXg5J8Z~Y|z5dLXs z&fHwy{361|7}#dmati~sY394=usdsc^yMTQ8Z)DS+Yzlx zy9O}F)}EcBEVLW;oF%Qq-U@9L@C>+fHT4_Eoyaq*&@D5Hc~x5t)myZZ1V*uMTo%uQ zl7MJVj>9vLl{rXsX*xPx`hYX^6}X_c`=~$xq*a}kv>suy4bNqX8%I-OP-r_l>PTkY z-&x)Ky1wvR(Lkkro-or1-?udLZ3wau6s)9IMwaf{L{;;&lK!?19o`R64V4L6ro5|#s1kv-hMq~IwZ4-M9;)4XzKAF~=x z9#kF#rUtkWwxJ$ZvoCAFK=>T0CO2WpFmNY1e~{qEMKiP5HUzwhoCn#7io25kSax&`|B{#i$2_gpap!_togpJ2Nk5JK-Qi;bgXr?N#!x){V0gJY-Ia+K|3jL;- zLqn+Bplu_PWVH_~!Xwkv!~obV`Rbu0yd9ca8#ol`+-bH7Em-$)NkHF%ug>K`Jp-<` zy$6HG-lsRAVDu=N&U?{KyUq)UDdNdMA9yA~q8JR?d;(6yJ==tCz~6#o54- zvDom=VPY}=d)Wtht;iXp;sG^R>p`>e(<^R@_CGDp(AHurci^nl9=_P22O(4sH-|-d zT0f%F+B7opnObQ$l1j0U$Dp@%(mIuUk~jE_DP|rn0F+FHIKJRtiiDC~fEA*}=KIpK zBf3jTO5-Ya`sxWLsuP2WlMPAK;SL3?27RTIy$KtJ;-d*8zB;JwHoEa%hi`|jcOcto zfUyUtjSf^h9vab%o?<(44S-uVF)0Y*5cSy^c(;h~@W@MTXcryx=(@t?OL8$rEfaot zj#_~OVS7Dr<~JxotO?U{yt%RdOUuoPc*(AVFx^GDjAUD$+`2EYYu-DF*FQ|S7noojt(KXsh!X_c~&G?djQkfg2b z2LV+);ZkK4UNlrr0|J4zlOv#iPl=&Pu?hT{{YF=LI)-!XH(Gi9&tr8MK>a z?JQGG-o`vU$eWdOvP>m?;v~V-pBKt!d>IXX#WbS5`|Cu~D|${^!)4Bj6f@^ffeDL*&i{2pPbpk2pRi~Xs1B}AyDa( zly$z2K5Kpfoe*lF=E|D(jv&ff%Z^vf0)U4d@>PMhy6Gj=v*x=ECs5FxQ$oO`#p?9d zaa4pj!pxWXJQtk3`Jl5)xXKL(XJ+d0H0#`o0odyX-n$n&XSWbKqJx0k^4v2N5_lgD zjeMlr*tqMt+m1>nY^8STcnuWxw+z^?McSj`szHu_b+?BH0#HXmU~Uuww9VA-3|_WK zaN!PyM1j4|j|rOP~>>5t4>Blu8HUh+hk! z2JC6DPcv?u8z7^VF*6KS^=teIBKJ zfuQ>DYXg##gx~#Kza5yZYFqT;|9=lZWXYFsz5QIq$-+>1gLixqu0Hy-`2%ZEv#iJe z*W3U5QGb3(NynpG$mCl3=k*jC!Rs0NROme6Og zdswfH#f<$^004 zvCVi-YyV$oJE0=1=&8wr#v)a9y3+YHi%Zqh<`-ufpupZFR7{l9j_a3R44i73nCbJf9ELmuqcYe|8m)KYylV|^7dgAfA zM+)MN-b{kGr-$Vl?y<1PkGAu2fW8LHg`+4KmTbJ*j-?UKN*c~9 zFjjg*Q%PA5K`Menl)RM7ZPCrMyyM}_^Xs6;KK$vdJyK(^$X-6@M1#xMVg-@$rxT3B z7t)WTjKh>RIWrQ2x?;yT*d{X!`Rmdz?L||I5~OTtwC800XUu*b=M266y;0Oe=;#Wv zLnS|&V%7U|7(akrfSp0x_~cE~!6%Ji5Ar}d)lWt%u&m$4Zm&^=gh6_%E+DxUbl%}7 zba_Oaj(nB=2MD_M*BOqygYXY9Lz)K2S8OCv!pp$IAbN6a@1Z*ix`&?;XgT%uR`CP3 z1g8jz?dM05B;@?(4i^60&T4^Jn=kH%<2G#(->xSBkjl1#X=;*c<8Yrhs2TX?CUO;k z5bp8#j=l+`T~!H$D6Uj*c9W8OsyNX71e>iA_S};U2W-FrtM@2RI_Ga^PD0vw&$6i@DZ`Sv9x9@!{Am}$KF!y) zkDI}bkMp2=*6U5zf&G)(;O3z7#(XCL41wKt>@M4RkX751?TjEZx%J%i#&8Z1qf-r{ zc?gq9j2YQ6M{)UbF66e^dKha4PAdq zm^=3lU(>7ie5VXo>-+ulf5MdhZYT3$+O?zX|+FbNK@WNz3dvl z9M0}JRu$R#@jw`Si77S&!|F_Q#t23A4qI-1&aO2Il{9I?*|d8w7ljNGubKpa36e#f zR{0IjCSWA;5p5GKJZt-MMzDlF0=TFpZbLR?d}P(NrjME0rwbMD6cA8S^cFh)i8fP6 z!MXER5b?H`a3u(mj$GXYMtIXB>c1hby7tTwQvJe*2;w|zu{Lo;A)ymt-~A|>tp`iY z?sdm(b#6@Q+uAlnbPGI`KK8;&yh|Jl$Y0NpH@ru0(`G-T`XOp9`{%%i-2Y0(R+)VM z8?y9A(ucumNn#vU&1MqatjBs!F^C`1YqEZ%1zmHxw1qQt4a35Yc|zKRwk;sHkrg#> z$?EW%s`IbI{}^Esq1oSXCFW_k^trqDLnb7KM}b*yE^R_owz8MatRG5hq8fQ-fwAvNlQQD+Gm=h7 zRnxZ;UfvVnbxO_PCZ5j~c;u`|qcqVp5sc1iYf%v_JB&{_uuTYtPCL75nDrOvk1N#e z?CTT!XMjtjC5w(~ZeQ6F5c*@5n#oojEEo2*qQ|#jdb9@dMW4hA^oiF`)bc95<*G1^ zrD!4PP*(Jv!ry}pHL>G{aO4iGGrlV9OvhM)Ry|!_lS;aRY5{or*K5r_<+*~>Ql5r3 z<_!n=0&vc@WwJeU9)kqnzH~JMVcnFpaeHc7zdh0zpkr^x zI-SZqSYJ(*`jXR;zNIq$aD$BB9ZQ?Z0DRO4t-!G(tC53jqg2j-J7ZdI%A9PF`6Nz* z>dN?TVHf?L4RCF|c}b)Q=nFWJwZ{r2uA{!O=>NI4EH_O+I5llJ7nU2@O0#sIUshh( z@=iPN)kurImVrTKHE=00)xcf0lGMQE!u~?yQ5+m<6O2&{^)nU4R#g)_QOA|43M%AZ zI+1Ki=1B%LOJ}!g|C*geqc@8Y{1eG}RNsHj<2h~U#Q9_CzC1)a{OIxRBVXx6ZV|4O zN>xwFt!5G-cesn&OhedQdc zC>OuQ-uCPYd53a?FXsnUfuRM)5R3Z^C_DE(X`swb8Z^7s&k25ZC=rJ*ylR%G>a6NF zu6fo;+%*c>M2!GiT(*uAy6;vh#gAE42LUSQGZL5IC7tPETez+s+u7hlAW|5d3WCzR z{<6%6%7|@WLR15by7zs8N04Z?Us&ES^<|;Pw81p;4ulGYmT(7+xnGswa}O#<)UE&$ zuDJQ~ALdJ207J?2U2VHV*VCnfHHa%xU(yAevmbv1e5v*yPF-wUx4g9AXV+>0{f8KV zt05%^`9XgQR#JWUrYDj}o6qg#XuVQ~f)EgHJ8lVxFI?iw0$G}`{ks2J%pHqWNlQ(8 z^qc#WQqMzMp_ex6}zG$$Dv#aUcd>{0d z8_wKX!*ieSAZqh39noM#X)P_CJg-us9BB_mrt2iVTEJ^=)z+}HtJ(vdMr$Z4&e(?w zx>SBCDH+3WZ)>K~Y$$%4XJ8Pm!`{S|jOCQ)vGiixyab}ya~5;N<|QCZn4$N2N|z=@ z%V5#rmRo7$)$D9!*jY9Izsjbj#S>r?S&~8FpH(WMlF_eJiJw{#NYOh)LhIZH#2Xz@ zt-yc0L(PA6H;1h38T!|9Q}{giWun0l{?Y%+pdKGL^B02Qt*B{eFYbAm#Z4jjFSEoGAdBm(x;y89vv!+K zLZn{*k`tOCDDXyg1=p>|^|X4k)gxVoFr1)=bx5*i`LQoxi**9-vrpN~FSDCfb`5C8 zexGM>C0l8{(`K?s)EWO%=2sOce#vXJ5q+VDu4bd{LEWC{f`*;dl$I=8%d}*xnmox` zKw2Ks#?4J6)jGD<|HX%_n*{B05A{_Q#%moN`!UjL@1`;;uk5)lU2r%=m)G`Q&2Wa@ z#hh#%9a`;b`y?kb@~FE1uXylP(PIoG*4|S!KdaG%wy~jIo%XTeM{x+u_l_30ui9e| zstM=XJOgja41d+m3oB`PFp4_waDZH_1OawdmGW`XevyiZk<3GJ*o?+!2_N3Pb!heg z?Pv{_3&MLlRu6#DE=LCz5+SA@FNIbmT`sThg4Jo^E6RdLSd* z%QvLRg|>!)y?HNM=QK;vYvIiw4e=@o*d-Y;xmt#b|$^;d(b#Z7_}ef@g|P_U5qy zuW)_Xy)|e*|DI`*F;}*iBBE3zdrtO+&gv&=&t6G&MGVp@AWa97y4r4u?fIpqis8AB z>o_PO(Rffrf}NH(#f7p$@|rSM`A8eG15k9r#hPlSq&&v2K_ZBxX0!DgyWsy_~rSJ0fRq4z>}s=3j3Lb6XhYm?5xaK z;z8r(;f`QE!(Gp6QQxDICE-K~jd`b7^Y9PBWjvl5Qi<9<`h`icZ<=FNG86bVg1(4e z%h214O1|1G_=v6wgAqj6omXttg$-uSXI++zc$ZMHTx+OXi#_xBs!Soetc~bI5>@;a zTJLBURYe^WLx;~KB<{HX^j&D-cPPFj@N>YZ4CLfJVxiE=+3YbFou2|T&?8;pP@f*Os6Yr{ zI4)Fo2TmZGO}~K!dvkE5irgCjOLdBmqB&);gjjsHQ7Orrv&buM6mI{&-{MF}!iWPe zvb$R%`g#f!zekJ~vAPQ;;{pKs5`H58Y!ECA<( z{z|&dI04I+%p^v~A)`1D2ci=MUxn%dZzUn)l@6EwqcWGY@qhs#j=bNoFTk|EM7uaf zZTD~x91fL49n`MAZ(|vRX()(O(eIE(;iQtw>ic}(s<#Lm8%)YE>CW)0vg@4f5 z;F8hkr=y>75>Gd^_Hb!wP!F9s?D^4Ay;IidO*razz%D&oas{RDr1&G0WHa-ibD>ny z3N0Z&>-CwZ5+g3-g~Uzp-)?VYlY=0-mjDjjIJ{(}@~;O4PI>u8DK5vAnc5~kV;cgm zce)gf_>$~Mx-JUF@r^#FL%?; zs@%N{ed7mNo|~xl9tVvEfBi*8u1Gi3;o;h0evGbI?vDgZfWKeBTsHcg&j3ku6icOe z(r@*SJ8yzh)MJyCtAZ=&Txi zrMWsYEnYzzh7XyXBy_vpMkgY}s4S?|yqV~cG6}iMA;f{Jd zqn4L*79$d(avo`|?pc#~D5bYUxi0?u*s{6ey>kDAz(vz>^M0{))da>6aVU1E4`%7g z`nl=4g5%dtE5t7W|2N%)-XVc(=~1u|H{u<*6C8XIn*H9fwRQ6b1XBb>GLsSx$^7&& z%^}~)K%RSJ=F#j4)j5M6Bfw^DGnad%V(sF`3aZY81Db{CH7G({`AA6W-2M;~y}`K$ z>@1{_x7Gr8f1vk5#{H6QzWs%&gY-sXGsBl#;%f&IciYMtsb4nxWajX`r)J`BiLH9m6h4GgM*V_h^;@<3S+}$TI+!+4Yt(cND zmEl5)-lKckzm9FYh`Yy}D@i?!xm!|s{fT4Wxv;8;TNfoAmIKD-(O%Fp_o`_{EKWAd z2e}C_Lv`t?DzP;uZ>sQ<$kb<{yWcydBmRah@C+j&{!;01%vS|o6-lb?>5es7m{-nH z4Mro62fUf)t)*xd{-P7Zf2yztHtEIg>x?LdEg!91Q&nZ(X3Rt{d^KpXkBT!NrTnC) zr&&EDmo2RDs}I<0p7TicavlqFwrXbjE!9`;GWk^1AvV5&LIY!iszUkylA%#Sp*zxu z!ak3@P-YgB2wky%$n?5c{#O=MSAn*d9nko~dTIR$*aQKdkOr+;xr3WdZ%2K*bNwqj zuw~H;H*58_a}Avk^Ss|kr|wVPMUSxev^g?3 zTrv(YhF7C*@>jyqA{edf-gc@(BUSl+%X47v_xD*?S1r4pIn4Wns7u0CIW+e@r!U)1 zIJdt#ycqy2G3ME3>pn*l>q{apGr_8t37e6=Gx!N;&KyNx$$ES#-4yB&I1I?mi*}JQ zav^8>xF~+NQ7QgHmFWz)g@mRjB>>lUFLg6ITU~%~LojV=cjKe>+TXU#$Mene1YNn_ z!EsvHR*MO7pUYPS@&bfU&H-((D8HK4RaorYU-OlLs1#?8(3*%$I`f0hwCTkqa@ck5;)n7 zivY-`3Q80*hP|A1IC>LV$3{zng|Dopo>)Y&801uNUehtl=}^KFt?}8Dvy45mqd&xx zC@WfC!Ite!x6d=yi|ZUgoAd5X0MX4g5|xawp30H1IY_(c_OUiqO#4CT#950IW&LH< z=Q(HhZ{&QI6ib)i-X$z~JSE}WHZQUTs-1n1;xVeYlhNTtHA$GlBRsYO03AeUbkrFU z8NwNw*hTiW`bfI5KDRzHCe<9Q*}jS8xqv3)9Gw{n4D{&B%uoZ*tJ#xg`Gw=-zRo!P zm7e2RS7vz{yACB>jS%O-<6JFG<01QYLN^*f^J(%=&v=1cVAZpJhRnE|CYz$(J=18U zK<;d+k$Al68?UorKppPS5%Z zcfYfK>- zx$!p5w-;qUsbl;!jf@3#KZN2q%=eM7ekT#NIgiqS_9cCqFh9VAUq47b*5~{u!z1M; ztM5Q$qGrlnE?!3olOYt>ni_nhviYi_XDR*9eS)*zW%;3Pc$#CbE^way(ma%#QX=g1 zl+NW}Cr6zAs~kJG{^!g)4*{gs%I-R0T-1K3(7SZ%I+ESpWl3{5t%l~)B#5A9b!mqI4UMw;Gq4q)l;*hNo2)#pW9Xf z0#I^Bt+i?Ut#pBxW|QZ>G=yj5gC-iMTe72MdM8(@Rv$9eyvUNG&Jugp>ogqKXxXSefOxeAUPG$>l2K*SUF<3y^1AD7TMLGE6x&n)p>CCF-BCV+^1uv>A#y|Z1 z=d~U!+_Qj-1L$2o@UI|oKuw1T*;%X%v{F#on;b*U*9md&s5-Mdu7P0hk*=s-9I*#d z`LS*FYd|dw4Z*Qo2d{5<#a*D53x8n~x;GE6d!c?ERKq zI|l#cnsZa=ziN-)V9qqEN_{Nr(tei!sy~;6dndnq+mUbIWs~2ho|GV_eQaNR)~ds6 z*$k=BNULG4SJ-*=B<8$qC5(4_QIc!|4HAyps%ClzE|Q2aDG1t!(m@9)M^c zO^tL&QT z{q2CDr)Kgjye97shJrope-kRbCXXJbA2<-1kM^KFI87w(j7caJAMr!8dZw0ntq8wI<*?8H}m{di*|$qni}Fc3=tVkt_C zh0~YA)O)5@tl&PP4}Z36G$`zH>y90`)pFpgs$YQo#4 zfmuRBmsTggKb0n&0g@bzle4~0>e~=UCCsHcfzgu%zC?l>;p8Orh+f?BqQ7I_uAm6i zL+wR+GC!0)VfR$k`OO2f z*O8q2|X{CDTK=+jg-rZPs3EZCoRA#=}Xg%%pXjX1F34YNFs{ zI#jB*5?Y@0j45mRwf@G^x1Y6drw&VOX9c~%9)5Lh6fgShl`oN_hm}7{-O6MjY>K}> zzMeh(4mCsx@z2d{HKa@X$WYA9653iUhZ!Fxy0<-cFGmdsoaWX};%dhk?MzW6twJWM z=R-|7Fv0XkqAsoJ@6MR|ahFJcqzp{EeD}s!!4O=!K3^`x**5cF!3~BcRKAL}DKB(R zF0S}QnTi0yA#Aoj|-(2UzpF)BC_;~0X8SFdZ&Uff)UDfRD&SG3foZkawY5D|Lcd=4d zlOVMEW5HSdAzee${9%39PMj}g{kt@CvEyQwG=U`p#V#NTTqX$Vk?hAl$YIz0}cAS)Rs{KB(8Zn$ZiS^f*D4fG4f zKI@!!LOEk&|FY0%P|ZxS`=BNx;Y9 zh$N~@URsMGu>bKcq11CaU1l6Tb!sCChs~h7q3#uFy`5u*+5Fw%OV$1UpZ2$GicF#? zJ1;sk@3`1!i=2rk?^iSqVojpI{hc%WM?>FzdjIzV5Q9y6yNhk<8L@h+p%maV;wH`M zFFpF8G)%}uybywD8nCG5GfCpIc>3$uoz~>`!%U;ZTTt;P{ZB_sC7X4BuLzusS;$+6 zQcyYaS+YN+L=2;=h)L7ppWJ_nzXBG%y6`#i34ls*c3u3{<{@%YO*3BjiWF*DF~SMy z3S~lhvoK9)3T71usDJ!W(B&UAaYwQ@ke|%{ZAAP*j2B@FAIs^O##XSK)(ue)o|Sf? zAabNPKAy!r0usC*Zuv>ITQQIlmj8nQv!;PEqB!J|@Zqs=WftaVwgG)TSloXBy3#z}j?0{UYr>+`dYK;=fQux zva-}rlUlE6l9l@splRCJic?0Dx#>@J+ z>oAKxd}~QvE%ayfpH)-)oUkV38YEE=WIT}g+qZ{8LSA-kxii<}Uv@4*}a zYOc?72`q5J|CJsy7U@0M3x217BQ%9iGkC*kyK^fth;oR#ij@%%=3je`k~=a9kymc4 zcpMDOnqi5gq|A)27ce!?)!^jvB3pX4wNKYNfiuEZ)VigC!6@RleE5oV>)`~f+!1S99B@w;KPp8b$XnFA<#cn8GeG@1 z6b%G-?26(bA4-3QK7RT@q>tqq^ba=zE{u_cXboNeIWg2#%&ZIW)N<0Zi-P-U+RzKU zu7chXtUTWi`iS&5!0uD7f1~g;yVkVq(oIH9z5D)9^fCA(Isd0}IS6`No9;1nUlRD( zMxVwB`x_xy1}M+^Uf4$A!xOED>jGgA?C{&j2L2>S0`4+)nZd&GW3aO9w?ge=?AM-F_|8)@k- z9lOSct=?8Og3E{LjhP>7#j_z!xIPYl2jj2%p4RyUVziFrp^q-~CTcWoAdbd*emXH; zE74zI{7P5CcxCku!w@NjOZkbc@1nI(YVO-Z`N}j({E1rwY;N?w(oF;6gm|Cjy-@z)o*(~yI&Vjb#e$StXqcAVOL?sQ%fIQ$O z*!C`EL5m_^rCv7oW#~Lw$HxmOMAWFst-3JUs;`odYbm(0idQ_9kCZq4O3TodR!-&R zDj$K@8B|;N!rF=ZRG4m$F+BGT>?J6|^6XT#dC89JrUmC$bJ*IEKXOnFV#zgj14qo_AyizWNlO7ss|(>Xo72 zeX|oYQ9nW(kv@B8F&)0Z(`X8BxKK%uC&KD64ByC8x2c_maHrq?KxPrx8x?C>iQ0U1l%5l z>$>M zM%E^QCz|;j)Kw7)G;^lu83(=6;jx00VHMMUL~OLca+HiJhIh`qzt1PE_up2KXQh6r z!;%-0BV$OqK&d{fT8Uvwk~K!dl~TO-wfgRY&B}eFd;xEs@Nj9V+&-Qr%zlvErfUg3 z(vmfLNLIT^cK74D6Mz;z{rAK3m1+o7f9EL2m00$(QkAjtEK6pqFzOp!4PI?tEx$^^ z^O@pqh?VyL%>^J6^;BJengdJd(_rjWG=s;v|HHH<+j@O|+ za}9)dehm23Hclb?inP+-Ufj|kg@peEl_?r`Yyu_5^xJI&dkH!y6 z8QT5}?>>I;#_`(!rp!aI3FJ4rhQb*>ll&BOE$77OcA z8$Fwt)xsud`G!CKm)#qr_hQ7S1*PL2f~7Ai!r=1P)7oneI&2yv%%(sGN~7NA_$w?W zmQRV)<**@2=c!k$Q&%|q{#p@VG3gwE(LgylXfMqR3r>QgFN@-H^7Tx(jAyOPq~lU` z5yhXyE3})bO~)*EDSoQuaPUvRyP|4$hRc#R)a9o{0_K15mZq-IF9@~rS3I(*>I*iv zNOF|aVoIlDY}q3I{NgH>85~&FX!Rd#TmS`yLsC%zt>X<9Gr>%M@H5gq9GQ6JGNIg zW*hR2SveqLYLwOub4rVPZZBqqMrkTTN7aM}x#A0wU`07+{*nA5iW9V8A$L3p|eGqW` z4-1fGLdCIpL1#Y*mzHsY?@zx{7&=C9j?f0=ILddwNXzpb5#2yX1(cCV&h5KEU#%X; zoVrU`10s52+iD=BP7-%QTpWPBZp`OSHww<=c=ViCA4w7Fc1w4^YOTLnQVllB3p&u( zKR6nozvrRr;lPwU9Hm-RH66wb6*24g^=rQ+cXWQ7NeEJ+JL71$4Eyk!o2d;nR9Rn) zmUf4CT#f$`8ahiH;@t61ag95_$cGqrO~Pme?~Oj0D7m%_4pX~?MTpg=l2a+}u>TSC znyzn`d^IDo?4I7lR=n@;I1$Kl3J0X*BEZAmCd%Pn%w~^a+BU% zR#|)KX?u`Ao?gX4V{h~XNK)%blu~n$6EL!_!B<7ww3djGMFTSR)AISsMMBtnqVkv- z|EbjJE6=KS!t=(YHkUqQ8dAOiTe#P-2Gn2`>QC`AZcqMrJRCn zh3m>cd_6umbY}py#X6^00T6M8b!=n20YUyk4seo^11z?!eM+m7|J)f+=Wm73ZZ$w$1JJM)F&4wX;{V( zgaa+~g=QX(4dwVI6_rs{Nlts)K}wlru^9!oZRPUS2q{;TH^hq+?%Hbv@muB zrxvf$_heGq!jg)E+RLJ&pNr#mbet5^YM?A?tZcd#3V379_fv)mzz_3qZah6I8sJ{b zvraaupT2dLAwg2CdG?V>ub$;*&V*sLEd@7V5-!G0yz_!MqAw^D-)w2SG z_~i!5qn^8zdQX!ue7E!nalUN?_qAhiJd0E^s#mdIP*5A--l{a|g#k|a!#>kAI_9$| zn&o@V$bE`kd2@p)n<6@-V$(Y$Rp9xxl5U}pEza6Le53)9riCEs;*HK2|6t=z8XqT9 z8ih^K{qhTCpG9-Nfvnp|GzG6B&S`ZxZG-i|N7K>U$jWBn8w}{W&#$k%cd!WW8D{Rh z0@nEPy7EB%R;o`^^{kSfL<`H*a2n>MNzHBb;WTqFd9aK2K5?uLCk~TS&=zBsza3P& z8u9GX13Rc*FD#gkea&2yWgCsBv^uJcEt5VNkgpSIo55?*OCWA3SNMF*)Q?U5uDC$P zy1Y#K*AypmBly{$64ZjMdW~~O5Pc`h>NPi|2-GapnrLXp#$Q2A24mFLmrO%Zki<%O z__ePs;Js3T1HD(_v)X7pQ1e?JBK*RXIhFkvcpQ+hS_CMTlH}CIl5aPK#>5j27_-HE z!qoXBw!*v;6be!O^7)m1D=O~&nWAw<;p!CWIz-OLmfq2RF(p!!QislrELDwU>G+5? z)&3zru*vlCEt__2*=xMLx7Qd;?!!1q>03hs%0io3Y5Ml%S9xfTzpmpyE@a&mIqeG4 zVB+P7@6_K-d}oHNR&KKEXU+iqPB#n+l# zYJp!$@&(A3zw%XC!eOC>FjEp=r0#?z7%qnGGtT&axfwpMv;I2eo%_7A?DV=F@u1>G ztjm3M`6iZ@m^g9K^4jfRi?ypiSZ@&+OpHb4@z4t`#-GZ_5>F4{1;xAfFYm!R;7ef!g&k!OHv7V={LDR(TCv7! z80U$yd;U{FBiG|!5pD%K12ju^th;UU_wIihzrzG{K?fdqHA&REeNt!>NHIO)_ygU~ zm2zm($%wfqY)nKO2t@olEVwrNSHY>#BC4kz)ytg8TjAI^S6F#dajmkjQRa;J>( z`$PHMfT}c{2hWK#DdXP>l)LfJ{&_rLr+@2V z_QVd`?@+eec3aG&x*co>2aL$6w0xc;U|`t-9*U<@Gzg zzPxUy*OfIe6Jr8iJdDSN)c(7K<_?#S{;<(CUQm1rvk-W#nShyv+*YQI}RTSOe_nHpCC3kWZ8rAA`psS^-r6ZL-CQp$}3~(C8SyCb8Z&S zWgu_8?Kbh{66{0yBxZT?9)Aq9 zDWMMxykFB$G0Db=yy$t?yLE}Dk79|Zx4(TK;gnIS2LcsUS%Od@g`knS zzbk+ZnqiUxcr_~wN-&SKVt5uvqmMsOz%$RkKcLIpu(pY?*pSwWQ)b)!vG~XDe0gz<0keeW%3rP zJo2!q80AX2^glAWHXBbW*f$bw53GoyLLMfZ%*Y`LI1i($NQ&$_?R+jGdaVCdNt?Iv zrvcK<-u(Rjpbd8}o1lwyB4|WVzm|Ufvl^L^YI}W}5$XK1o^k2QYZxHf)I0xyZ0e~| z+$IA5YoxP%^UT^u3NcK6{neWC37kM-?#5r}$}|-Jk;fnO__>^;E`|8Bzzik&etlG`0<8teEozrmQ!D5!Ppo2K6VUx%$}zv!T##vk!Ti$x$A zCCFrbq!r}WK~YZtMffL!XM{Y_+PMmadZ{W-f-VEfm{BwhAK?R~`A7T0g0km7X~av4 zAy5v-&gGX}eo5I1Hzboeg91x|h;QLwl?kQlt`m}8aDXP{p1iUTH%}Ex&J3x!)+vfu z{JG)^n~2626>`LRGh`LWyn;yq6OD;M>K*yU2V+PgN`{Qv@9N0BLX#{GH;JE^v}RVG zt|LMbB!@8%KoKFD2^Ht?lITQc)rPfVWmI$-gv+w5DB(o}KFG*iCT+%toDaaaQpaGuZM%-{9%$-sxyF5>0B5xS=fbz z*mO}K0VWm8%Gk-fUA*iXplI?VlSMx=UjoSMV$GT?I6+iMswpFde+w}46wbIJ5D^P! zF>D-v96e0ZZ30MjAfpDCRl2DT|2DtKgP7)jKHyR>rW7Us8u`^)bb!b_=5uHGah*f0#Ru&Vj z?Tni%;Za5rMMgb!{F`W}P>CW-Al-TQ-Q`9sXSBv@tI6_B!U`qMlSS=@13!BIDzhrK z!1~wgZ?t}SS)_0?7{>#n`7%y78v&$r{j${n%~#p`$6sT}m)gUY(= zty`YLLx~^!;0NWu@wMhtPdy`Fg08m4s_3o-@uK5SJQ#Tndu*(OuO~lIHhIM>WFzYf z{`dT{3ua$%aYP%aO(j&X zi-_H((&e;109t%y=?6V9qLZCKsxSZ^!v|_-YD}hzX)+G+uH_{~Oem-1%$Ss2VI?b^ zaz{&_x=2Ufc|2krvZ3v~1Y(#Kph}yg$$ACJ#lN|Ye@giIC+vs`wIf~8G)5)DD-9Nr zO`O@~Cc|;W^&^xepz2jwQ5g+Pi~wu%gsW#?k6$oS-dL)J zjHoanCJ{kJN@Og%Df0>~PC7pRO#;HzLe`S71~xiT5%iF8sL8y-CzEb(;Y(MMsE?A@B>2g6_o5S>so*6$B~2*ahL zF1h3qnE`q4-|odrhZoAM7iX45mzap<4>!X))Nd@W+I(}_8);!Il{6aJtKVeql8_L$(Zim^1OO!>%kB1Knlw*!Lrd)XOg_xP} zZ28xJ{dn0EGcOjG{iI6y{dL#j1;uyDHE8SVYfdWr@3&vs`>lJSMfwtcHkMT6-VRse zV}YA+9liPHo3ZTUqq6MMEIh#cC-`^d5y#+#$HJJc@E_RQ>3Y1FSQvXhOqLG^bo?SB z@XrU(qWIuog%wti<+7f`M-KPn#r$Ph^6G*Mek*Olp5K@qFdllwmrY->N!exRH(&|M zZ439wV%%oVm?`^6T`^?}_SU!nFE#GK{l*N$U}3x%S+uOP?mGC$;`Q>PXO)%lLW60< zpsB~7veiLX$G?o55#!gSq!2!2%{>K?OqAEbB~{Nz=3ks%STSD%By$a4xeI$xyldZg z$sQCR!wibI;sr%cG(&!InJ_JkxNZqq*`w+Ih~FQyTJ7hGU?UdUK4mmS`M#J~Boouh z*tzQzvEdF@1u!=B`@`EWXCYTMV4tJ`w8T+tBa`-!F*=x(D%m_M36j#?zSM>ckv~$> zX*dx9dbE6wGq^ZLgjq6e2hq?s*6$DH4o6fDRktq`S6$fzYOeoO#MBx8qJM*)5o57i$(PrIb3L8f)X_S$J&AdCHo zk86R)z_l({n%nUBQ=G}8Ls+>@h6Ph*U>*1|+i&7zb&U%1JV9mnGah2{eXpX8nDqb- zn;90x&e-jL5}oF-2vAveBe;~?WU?s4PC4~d+0$UD#g{B!{3`aKSiiG$pIl$+SwSsj zC4`ySe!!jcf2Ep?6)I!7F-0r{?TprbuR5(6Cq#Pu`|;BLFa?bsQKlL2YTtCF=;iZo zFeiJ8bNg-YkftVw+M$w?yM45)>qlcBeB7(yd*ANR`>lJI_rL%B^6=m}EYH9h5x+R+9K3vZQoo3z%lKPl{37z;V7~+RE9oz8j*caCT9acshqDkuqDL!j{Z zE5;dz?R!USEa-QRoc~?IweP_mBDgLFY_oYJYXP zLK<}AHL)>&f7m%v^`{)N-TrrK|EL(lK0>F|%G1;x?WG&B*!kJTwG5sAr(g!fiCE(4 z_S$Q4D>&KqdY0H|qG$gP7Kt(8B=vJAoKVdeXXKd*jTwKoL4nwR?2tor4+@Z#+x3|S1iaA-@|f*+lOmogj0Ia;8=j*>cI7si zEDAVmW|%?|#d$dY3UB}X@20f0nj^)AW?}(x4_^fB{Gf82JHMc_62eSuKVa|RM#DQNU0CQ->T!NlAd`}%?=EXfm_V=HEc|(0%KkL$L2&|?c)I1*vNM->!i>aGzdxv~39;y zjtG4;;xZwsILXt2p0KV)Y(zyEn9V&F&5zpN+wo*c!KNWxgK(I{tD>swXm@oC!$K(M;43AUQ^baZY&} zGaUYguR9-j@Bx_#vG&^QV1vy?%X667@Yth|%X-t?^MU(7Fp1dgS+ak`G)z^!56dde z#8;tQt9rxrHz-TLY#Ds%h)*QZE}vb)KA~sNJoTj5y6?XGrH@x#ebutoT5I8<1(&&i zX2?)~xGNSd>k)bgVHB)qC~c&P(x(5(gVc-{*6}xJpUj=4ysbnWKce3_pm5JU_Qbv# z_u}QmftW>cKv{NaY?4hwV97p&e8*N(O(x9($m+G!oz9aP(f(uCl*>8Jd0a8-^n)pB zQp8Z6lG{@9YC^};h zQfu|0>3_=hbOM3%#Hd7*as8ky>lwlJQ-v!Cnj(bXX}P2>2!)&vl*|J-zubudHeARzPIU4&o?1rV0cvR}}q9#hz zST(wp*z;l3?+>qDhb=O@FOKbiRABX(7JcyYJe39@GEX%YFW?4Ld*%+sv z@x5}+IcLkujWsaik=LHxcHg7ij+Y;2pM7?D?1{(4?p}LwR)s|f_eP)i=7)7wR<@b*Rr+OUR!*eGG!|Ex%l5Qevw7WYp~(< z_SF@B z$}Gbzx7?z<@l9{SjFVTC#ju>D^bd0WAL@_!`u#!UF*d}NL7l=Q#+Wfx<6R%veKG15oLQqQpBp29^gmxCVi$)(?e2 zwy~A@gB!H8p4h9NYJ<2`16T+8X#1Q&amgi@m94kt9ux3=b5@=xVh>9rnIZmm#-5 zjlbyXY|vyKXbB3dh>@Z=h0Uuek2=nt}JHx0e5Ce_kQwNLReWGl(zUonkbLc2>* zj@ABP8U|b~tuqoD3&#TGpqE2x=EuJ4vC^wFjo(Wgr8sY3PYHfYQQHq(9l+thEWD_Y zTN>!ruQ2h@K^T+$;D%WS9(uWq18>@RhT{Ge*){(WEAgi{19p?j2CRC9?T4Y}E>u;7 zEPbO*G+x>tgvQihrLy+z?|~(r9=QL0c|f#3zGCI(_Wq*Q)LVv^;zS8W*KBRC1G7dL_;tS&@{V`59bKL+hq!z+go}Q}OzWs|Xx(GMt zCt!2d2e7vJE3que(O9D#%X5fP*Nf8Zvmi~(xdE2dNX80WNqzebb7idh9B1~w^u;fg zU;pNRWN(%Cy>Bx14LLwIUvKnuDz1V`=j;Bn3P$_cPCx!KxjGEvEg@Cs)4sQZ`m&Z3 z|Bfk_Xv)g-675r)pa1;+KUt{y6VgEtJzi|Xe8t~&u=IgPFtxv7L8Xq%>Xi*0O8$NJ zdD3HhXorTX2~;$-D6L6u+27TEx6k&wAurtllwDM<|AX33`?OaTC+ih-vX>N)_E$t@ zCS>@jc&N@76kLjlGbs4bUE-bFpCw(Nmx)FcllY6f-ipS1jPFDt9Axq5l>@h8NiSy&?KFL(U8tg+@Ay2R7oZ!I_6bVE4;`%!WE zCGKan%NyTV)?asBS%UF8EaUWxvwu;Z#YYN!*>J=WM`ACGRWQ@!_VN|j`NJRnD0^pY zjy)hY-E7nH1nm6ocfZ5t&Uebvo(CQDp0eeZTVWIItIAntpM@oyb}8FnPpBoZKiR@q zLUIAj)Jz8u!xri6v(Cc4ST~iIZ}jrA1zt|9u>A5^c5=MVxRcHa*62s?&l>9gfBgQS zr#c=8RqrxLN|Vt4cVe%vec!b&Uf?WPK6>b(*nr+J_Ep<>NnDh8EQP#MTnj zf-6aEW0}r07v&iKfBpM|en@jH%k6g`qq&vQNwxGwfXBmBiDN@mzqLOQt(==sY+@qF zR&e2r~3kLg9Z2HVfqPhlWXiGAB%Jv=S(%=jGJ(?+zt6{b5kd5GwY5)@z z+$j4rjMTF)29UX-;Rx$jeWIaeAws}Y*bgcw2HWu;EH1-RZi>if2T&FVmQ^OoW|F{8 zbOx49G6@vYeyg@DrbL`G4}-6N_6-u1n5gI*ZX8Hx7x*InjRNWbLK=rGo(Pz3pnB{e z2|G&?JrUVU#sK4CzsuwHz5m5vwJK!Mop8`0J^;n?v{O$jKRok?@`B0xr-=JaA4W zALz%Fc6vb}@u$SV4Fu4GaT5bZq(>vfNs#M$V$E&24r`Qi8IEgzcWtzbcR^Ig zL}<1-P@6ZWoc!%m$}6yE$U=CC;-lIGP*B>fDE!;sURaJj?pVB_c(A(0;d%Utk}Pl9T9j}FtJhkSz*2|_tw{3DrZnU|`vq&C|5nyx=Yzr5;6d4zSdvth zGf8?x-c7KKsZ-R6KQUxiZ`AzXwf{rhfYkp%GL>{U z<50EavR+5oZNHF=PvW0j=yITkDjywA)6&Q-(rY|++_2kEsH9xYI1i-wg5v9VL2>(S zx8h5G?m@AKEjQ%KwWKRHWf}S6;yb6ER?fyuiHEVb)Ph{D2`~O9O`0T2N!*XUkA8vK z605I)7Zlh7>23HTpEEbEx%#T|E$l@uVEI(Y{}Vps5kq?S!In$Ym|Mlx$|ys-c|OOn2G%@ zxY_sk@e|5;wvB!&LMN#opM1 zBCB@I)I!mq1(5Zp^AG64k`-hn?EsC?<@0}6RD$5<2JKS<3A)E@zsMpJ1Fe->>qB#I zzsZ0xp1-I^H4i1J0o7E;rAiP8Szz4#s+kSPpAts4f4+ZzgvF8lpEw5WSE{(m_A?Fr z{s1Xox0TUzA$rA|S;wPL%$Bj%2pgU}9{&*m63&{bJr@_>O>0%IJXDQ;a@GO=2=&#z zd+xeTOH>7j8kM&k+Rd(|rGa;}eH^3n`uk&~4=|V7?_TEi`~9bT9i_yd5}g|eQ{uXU zpMW5du~A;{h&w@gAh9MljRizO_9&{FD7N24J#*un3#SGUr-!}C7!TJ_8WGQ{g_3GkN>bwY&95A03xLsaZD`d@%M-D z=gjQK5>G-KFs3Mk6rJwVl0}vcPy7iCg5y{+MXa14Sg)Xrqs4LA1bu)GjtVDxDS9+h zlnw!6Xwxo-Ayxr$=n_wtUWz>^w&WfZ2bT@g42sr-Y3H%afsbmqOjVJTFWV(4Le?lv zl>h^PiF^ZXcmyJ_DI8(mq#B!**zvpCf|xp)copRYE2V%BIJ-z362F3dDdO3FCLcd7 z7YX=tCNf$(D#{p~PfGY9V^ai7g)%r>?g!BYg7;gB~X5s+=JrGA7P0ntdA@&D6j{G_yD=+I@f9H z9q+1HGO_=>uR_F`goBwQR4U>MO74IEAVwvetBSi8rMvAEQD^Xyj7?I4=gER{f#B^- zt*Y*W%T>>R|I-AenagcBN{0j;%5@yLmuR0GH8$1JZ@-TpFB5xX!0jdrN^k6>3cwnd z%1Wr)|H3!GU#0T+SNWhdMra>)fs-+UEO`{L;*8=9UaI1hH{7NpF&oGk6!$-PUpZhu zzMwduEWOk+aXoi#FIBnKPMP<-|?pT%0_d_jSc!~U?g0ik{4Erl^}zm#NXk~Tzhk49o}f}mJoN`X{_}0jNl{L)QVQ5ezdxK~3@Ig1Cz$=%u4job7JRfG?G#T)N)2v}Hge>1a&H`h z;Qf{@P+-cHQ?UmHmUzPc496UOWZ8Asyh*lcbj4mm(9c)ugc6+0k}3Ma2rU;FTtK)r zt0+QDY(L-{Kt(q`Xse#Yeht^-W%CV~eemStkC)|FSgE|~)&GR^_Uf3~@YixH9*Qin z`yiA=9^^G&&ROb(?Sc4FKey6R@v;8o8cwM>V=zRdurX+7D|rH`z3e*Fm9TZ zH{xVfr`GbN{r+hEYAl-SY(G2Ob%gcy_Xq3Ij)px|3{044Drcg+8=GWvQ}B-*@{zL7 zKG-DN9j5Z&tgw&mtgpukIX}j-I+yWdN95dw6Q%Tua%4WI&;mQwPp#RG`|YZR`ky*A zcBZi^b^l6RAwJACYnqFNK4dBUCEl3X1o=*dVJT zLZ*tl7P2|4rPW#_*-|2rsh3{Sl|0Vz;0^xL%iYs@qXL zszmYueJ#prUZCvum)cMUx9hbCy2&#T3R-6bK0FmVYu)~r_<_I1vC2q*BmVA1qBnL} zfdmwZ7sxW%VJ)rZ1m`pnGiWN$1 zlI=tdX19PrXbtqe2(ed`&R4rjJzJaHtujF*YmQC{}6R)n&28Lf_@IsT>XkPRgHm^6@~ z#Q`ssX)xEGA7WITHaFAru|k!V7LWs84q8?uS*XKL*3mR2C+&vlVp4zjMLbyM(SAMs zx{X?@3R5ISZa)!7=CpCukOV^6hH_=8j1^Kflxccb+FP-H1Iac=>|BYLLxv5fEG=Wy z0O;}I3P@oZhw%_AZGH{Poc#D zFFQ8-mPa%8DT=B10mxA@c)gOG2$Z)a5Q!;!nb9q;YhcLZTp=vTI1ZS`cw3|L!X&KS zthGjJ7o!^X#0!f1?t7pdaNz#3k*O^4bEbBr6Vn&%Vg@ug3(A@JT9psu z`PHm0%QvU|`7eJdU&1V?+wZu&EV<-T<=;N~v9itf+sT@~j3K??aZdODgyle{O`C?b z)gO_sc~@F#MXWKtd07)1vg!t^YV5)bFDzd@_PFvOmU!9>OFVt{bDz~Ep3s74o_VHB zx#Dv8f-5B0x7%UcvcwWg;t=7P7ykVD4y@OH1J)&fpv;;*v#hYf@@2ElH!G9YT&pZR zetZ*^nKNI+J}38--~ayFa`#>Llo>N$z{{Ye%ew2WUtaa9*OaC3kj}%-^YG9zrVQ#0 zH{Jj}e}lc**pzjhvM!cCSrE&o+=*pbZo2VCeBFx8Ua>^g23U&Zo_p?=_3R(T@+-@) zu!5|yzCm{eS?(+7ms602MwWcJ^RIs?e?%J}z}o9G(Z*$#eOcLPqm9bO8^63v#JcWH z-8C=rIbe}gCXH49Dr3qi*2JGawU}BD$N?+8+7ksr+9yuX z;#lL~wRlN>rdkik0WXztFjtn$*uri6Nn^@>R{nq-@KS}^9JDw+NgGVB$W*}!xp?j? z;wnc#4tVLJRt6e?o-+fZ98y(Whtk+f%mN0l170d)lRHVrpVu0eR%g@z#6YGTllDhw zF<|PHD=~xOYnVZCdpR02D0ba_cUz)RrI;&gNLGSVK=O|Xm1yw~k=h@Da=^=^wF{Dk zUJU>K$jAR2*%wM)tYT&cmr|OD7X=GpAA=c~$?zgxMqpEeG6DPOER5L+5)vqZ`K6S1Tn&O0~<>seSlkbrRJ#Ecm)l;@v&UY4C&057RIOJd@L zMdYi0Rjz)k`-ngOM{^zUQkaG_Wf^89mZ|C8-npXohYkx!`S=)Ct4b|bmXr^+(W~_&dM*+m<1dT9`RVXOi}#lJ9r5#UNUL#JO5eDk&hMVlbN*`?Rfm1aL&j zG2`ljl6XK9A<*J?fb+NIyxOl)U3*^lYbiOiU86f&RV0w2M-Ww0r)SbR31YcFLW>-d zLaZIv(c*5XOh`I9%08tdk@_Qe9q=-=84!4HOCY+OwNHc6<*oIA1VF}d$@Kr=_lFNR zhB?eN$3b`phbpX4t&6`Y>r=Xzs)1s(Ef{f#h@@-)6khjp6urT#y^5;!fE@5rn5`m3 zCFlqsgF9Mj#J05|Oy!tnGbpBG4~jQn2E}3jfjubn+g=LRBqA!OdA){(8^&F=yLcB< z>j637rHNt&1tXo4b2!?1tvP`!f4$9N9sG9Et%1vWHE_)R#zrta8A|FNc(l*I&Qt}W zOeBHE+wm1xraYF^XGprQC=WaG@N((JmtY13H_4uiJt#J!l*pq|! z$bFzz5f>n8WU)kCXaVGHBB**twO=EfbS98GnF0ksPt?SUM@AF^P`u@1Pq}@RawM~) zOw!fV!h&F()Ehp+=21&B5h&o~to0K5-yN(Th_m#8iG_Z}oa_pPnd4Y82{-x=kxktWlh2 zrbZK=YjtiT`QTxden7Urrv>p$!k}O$ckLRysA*MrJ;rKZBP796wW*mreu-3TuFK1g zVXHb-k< z{m?0WUDb;&vS^c)?02Z zOD(fhIpWiYmmPN6F(!cXeDxT<(mf5w{O^DNhw=nAT73~OIOzWbyf9d9`Q^%&Ix^j6O>z9A;duL!tr3*)NgJ zUmt$>;qn}oDH(^Gj|H*j`NYK*Evv1%TG=1VYi+ajHsEjDtnsMvKk1~CWSOTKGiQ`{ zz4M*rZCGZ5FAC24-}4G-3t`EV*SzZ0SX+I&a{B3~l?Skv`-^xGIAOxV@MpR5{`bGX zY>SOz7sOI4`asCfnQBRMY*PQ!)8$vc{uS1gKOZ_CLK~l#`z3D3y%b&!ZL!ssh{t|q z`4w06p#ay>^B9;u4zzz=BYQ#nRde74h+1;MaXg;b_S$|2r3A%~I2DE*juAa?Q)W21 zb`3RZelP){=Tqlk$-kFy|LG?Nb3rZ*=Fs>R2mk;;07*naRFe7(y~5+AnUaevg{;jc zFNN!*OQtJ<;k^Y`3KyZg`w%jMAf+2C=U~ZXzPK_cgOWh0weeF17eh@_$OsTMC!+*T zg#NL=KfIY?(d_qq27zNj;=?gv_Y~fcP|$CG#LA7OR(Z3Cl89m;jI-qrRHf!qr%owf z|N4p8Tj4g@gJL)ALBS=3G$tAzSD#!-go>>Yts(&t53d2sh>!8FEVWgXX7c{03pl=$ zQa@sp&k0_(Sglp<^rcbe3_^$sjVj;Y`TN5)hIfen*uOt`q_XYae)@rGC}_Y$NDRQ{ zL<$qtXfqK(#*}p=Jw{zVa|7;;znAv=L(e}nhQD;LM*Est`+9uLE_eR*Zdod8!Epzci6%tj@s0D9L7Avy>8 z2B56iL1%mMw-APAzm~c@2^m(#zzEPDLBgR%O_mrNbY?$~5mR9-sDV}W`-q~vVhAxs zO@OE=F>}C>npFGq^!q~uhICf-K^?HKH0MCc|8jf?P<^-NIj3s(nwP1q)qK?}tb2QZ>=RnFiSHiyT-{ z#U#?+@TtYY1On{U{v2$$b$GcL zn`Cc=A6~M=lYD90(S@oG1jVaGw@Za&$Q;=|ZzlLt$LM4SC^eCjOci%qYNY7cM6b6f zS>Dv3sO_)ucXPw`hxO2;@CVs}Zdq*;8nW+r9T#N6Uo-xObvl;vH93~;V?1;GdBCzu zh^y@vS>WU3B7!Pi^=3$E!Qp_E%slx|aIeEEzI5cet3&oZ{_3MEt+FlD_g|VCgQ9c% znDD6hM}Lw}Ob^Se8uhK?fm=YS@zv7MD9F$MHU8wV{vipV)I|NT_NvX?6od#E$rX7? zbft40+68PaHaY)zyn5_arb1?-T7th$?Ub%JM3g@7@ppZypsjUrnd-2Y;eY^R+6RXn z8UXt8-5%+4(1c^X6tj`3eXTP7?sK9y+K zTflnb*vz2tmUdc54Mxor)fe`mw|ThXbxH!(%Vw4bp_9x*!Pc?qST9L-7+e7B`6tdk z!ZO$Kr^4ZdYjuRy9qXl2wT$+|CmZ=jFfS!^kd>zQ?viR#AT?y6Y8|3N2Is$Va{lX# z9vm)w*}@0*58R6-p5C(`)&YMn_Mlk0>uEG-gay#)cl)Z=g}4s+4}bUrSy%n(=bkN# z;$iyhU%yM)WtUy#0sdI$AJ^gKt+(A)j=`p}w_t5{J|yR6s;}GewPK&qp?{~Ga!NVp z7rN&8`Wvh-do*xe^eI}SeiODtA?`OE()-#g>G<*BEiF57OqZTZ)af4r=;^2+k4`e#4;Y56hEDfirSUs-#t zNoAXDx4~DX3u3myRhU)r7tCN-u^e>JLFEl^dXrpxuDIfg@~xA;S^k1eMpwtnh@EzN zJ!WDoQSQb}h|4a!6fbTb!%T>^vBCG3%c`rag8PN`IPgAZ`@ZQ}PX`@shv zT;B8E1Mz}lQM~*(t93hMqQKi2%LV&$Db>bp4BTa`#BxOsA3@dTB~{xg%kn)v{_)p{a-YffyKu- z`XYi-NPgrl_=EmxpFyHX+wb9nX>*rCxd4u(LaJzWRQvPu`=hns zHMdlQL&%T&N_Bpg**@dPBstv$;PBA>hcJ8Q{9(oc8D&|sOGw$ZuO84ZkVo3Dy3Iq= zg_HR|`rR4noIwXdCsO|tz3g)Z^rmRz@%#Cps*INF5>MEJV!;KD_^-@(wr1D!dj1y)WDzcT z%1e#;$gLI#QHH6{;S{BJIi&qE06Njk_NgNJ(e?Dp+Ja0IVIVr~H+ykbU>6 zC*%I11N@q;^40=Wq|U0C9@cJZq!{LKEInX++6cw?gOT{qfjnTIm;QI{bU@(V@!=d= zdQBXrjI^IVbs_}3zZkz|+Vpb5*N!XGrr(Aa6tBk;Plvm>lr9;%8y_6&Wg|L<{dsFY zwaE;MMtxKQu;_G*Q`k_>>Argz63zfb0+kFzbQbBT>!9j!hPp1`oG$BS@HvAaI#t=Z z{4y-@wB=S<;t88%>k?00uF%#4{P3b(Zz005A*&jS2|#&r_g+W_C*C#A0)W| z(XLfkMrUC2zNKAn2#Lx0%GnkGpt|3PyfH5p9$>Znkj^~yxITuw8IVuVaov&Q2mo5_af#CRywc(HIS=!HZO%q3;@dgVbZb(#r^l=3;qMjf%uad6s{~rd1Gp@;v#OB z=ReL8xEv4Xk3IIQ<^Fpfz!>LJ9plQ1c+mgxfBksb2KQPMWSO9PJc)8w*0FyZUQir$ z3|>%Rt<+_(JknwKsAR|2?HD3l-s6~~KZCW~Z^hCeGcXhO;PM(AlkC6moc5h^{sre_ z|Av|6Ks?kx;D7_mqKhtCev3UQj>im&`>{#(X4oYAQ=j^jEUj|#x4&H;cnHfrVOgk; z4U;bKNP)wdYxg7M~_m)Ml?8#YpDf4B#;NZr#FI#S$y=71w zU-T_X@E`$#1$PPV4iiEGA$aiM?(RAS2rePG6WrZ3xI=J<;DN!0!3KtzJHP*Xx8Ao~ zbzjx=m#(fer~B-^PxoGXua$W%F=RC>?L9iry59iZ@@xh_T;B+)nX!(CyK`9UIM#1t z?lo22?ML2H@J{_m|4eS^E9>_kHA{kTX4Cz4fSFxt^h4CKd5)%9su*%ZaF`&6LvXDc zhSPsE4DTMU^yGp5X#y0wrVZuOi77}imq<3{S5YIJV#E>!wbxp6N1i zkRE&L^M)-@;LTOE!ZGhA+xRgsM&y6^Sw z;av}&QcdMM$-kCwd*0I|&gRm?19{zV)eSNr{m1qja;6Q5A*Nv*-;xxgMLOxsSOl>eQJDo5eTzet zIe-WV8$WmEy6N_?c*n40>qLC0|2|#RQHu2c_~VQ4H5qxuAthA8d%%YWUn-XJFXx{G zw@dFeBVSdx=QR1*opS%x0KEu)CIlK$bf5MnoW7(S__$e{-hWB6xa%Wvm}w4c33)v_ zc5L6r|3~-W`hE0g6mH5EjTHhUzpq(^HmNP@0>8)J!#+QLYQDdP8r-Mv62UjGO`prEXk%c+}ez}(%i?J$&a(rz~%n~zjX(l z$^flB(CyzE<&BE)&-=@^y3gk84x8U@q!gnX&UC+0`3;JG*HxcULMU)4^QEQ3+N3l9cH{|Ez&e?U#O`|Mweo|Yc7tl3Ud$Qhy#A;JbABHFto}fjlBNt*+JMj28J|57ql1q{ zHcTzF#)*lb01K!gnX@SlbsajrU!SJtMpJV6!>pv1!{{UktqXT9dO0%p1;|Ld?v?UD+sb!D6G(%q>j9%^M+3^Kh}#fX`VipX?j%T#JH_0HB-?iM)u!j zQy$g|d=PEl2Jc);v{f?2E7q|LM4z#25O`IpdChW@R}Bc&-K}AYYi5>{8Ow^#KC4l% zMt9m{aICvg`1%5#GCb4Y3_C21dU3a8ai*tVw9Mc0Ldh>)r3WdKnYpStaKX z;c(U#sBJW&t4D7MaWF9m$;sbp41!Pt{Zr15?{ouiH#!Wmmho1a0ZIv>9%8BJE+U}EPq ziN8Z)EGv~=)+ZfC^Pz=7ofvV&VVTl3q4jDIe?)9Z62&T`^B5r6$KEi0JR5w3DYe=# zA;*LoZ!U=BE1d|ZE~&`3@_@B;m$H(k@(?yIPk+`mh{I1lim{k}IO2qBf2AM0?%rO% zyegn<{q+9JB*YxhHf+YLLU2)WWSZval)HXq*ikhFTdUiwo$VpO(YoK*Ih?ukBY&%* z$;uFa-FsT0zQjV=0vJ6r-Ho{8?hW9JFwQ5i7ip2s#u z?C3S~kyGUa1()Z&BY9`hXXU(M-#k)w1o!##w?LlpdzS@JLgqo@1uuK4TE32VKP^eV z%-L0+1J)tj0^Sw=QH~u_z@PIp}d{5*s+$8Ro7;iO@t* z{$Oe%#V=E*g@JnP?ey%wa})_m0)IgJNl^vD_P$INtWqjTsmMG$Tgk!nY6emh+YJ|f zWH_^Zsvtd5rWLMCg`fi2tOazR@LSwrY(0Xf5;ef&LNQgPARm^ z%&EvlhoK02ovZ_r6gM)D9D$=x)=z`HfriUUr|M!B93q%T_|BsSNrJl_8@l;hrHen! zLX~Mt%zBwowhgH!d-JJ=La3FSzi_`bhEz7aUm|o<=ExTF{QeT%IM~%P6sLGI+L-mr z0$pmYlSOWHYpRZTh-}H1xcIZ4#ZTX|_UM_*%H|HsDY54$Z5Oj1%6=^f63|R!pLyR@ z#@VM!;bHtZqoRvx*AmN?#Bk<;UhfM6zoZ!k@}J3qNu@l`W(r&x?lZhta~f29hG=+M zk&Q5O0=Jb_6D>%gyU18Iv!2JL2~Lv^JAEYI#bo7bPMSlrQj^wq+xcn-wxlev5VeE= z?pu;SNMW`o_IBlh$I@-BA>UIw3o;=AP!ZvYhd*zEUH)l#nBBucar4;t6yWKr&>BI0 zT+#ZdI62xoy{*hYHYm{tZ`_<5*S=G*>k*calyG${zM6webE-mpT^*`t)Rv7=`Dc!$ z3+ZnHKmHgQQ~P7;74kv0i3G>je?Nc7)tJLcde|;e`kwuM|Ha*lr^`frj6m52A>Rs$ zx9ig}yHTpbx-9Kk8YsLkt7vWT?;ie?ma1c@=#;s<{OCn?Y&+i#qc$O#mXbXc$Iw)i_!{qu)M)`8s@5>lI8JyNhggR6YE;%n-U zcYvLU!*ux9yg>oYfXfbYV_IdND$}}XTqG3QzMqiZLzyKGa%!Ty-{R$*!p)0tAGgL@Y^hLTo&j z=l-8hp&gJ*nq{s>_*}cN29K(j!0Q#r5SqaK<38PMNt2h0Kb}dMJF!=kELt(4<}a%ZKfIGXk6q zkK;yBLDkaPepDnKmyMIyS-~TOlEsao7jcI-GsMEl141!}S$hnz< zcu#h?GNtQIL(K6Os=re`WC9S{9z_0x;{*!xBYWbE&KA8uON4O744Q<{I0V=k5&vUZ zl&@15OXSapLyE)0eWC;o&PJgVtD+nKT4BKc1t9*NhW)5F`5~tVC&p8Bdg*+#$@^ zKha~c9M8UjVM-!hUXq-0s+^h`js3gCBrw&%yLa7tC;u$A|B98bP6)JBZ_2l(K-AoP zCQiCFVu$0zD5b)4khL!{ zW(`MI$WyIXYa}CtDgvty7d2H|R2oeluhUo9*biR(Zpd(`lEPtA&{|Qm-De|Cs1j+c zNMhx4*C6;C7&8q4$s3%vW_>JD_kxVd1ZlC!ixQ(9PUOlE;ilwlh08DTP?2>yMdgXS z(-ogTt^P2zN}xkLb&@n{KB!WLuAA#?HN^Gp^|=eMYcsrz?IQlZ5pb%JIF#rS@Rv6Z_#zv=SC@25uK32g zYe|AKw#yq0ov}$EmtXaXBI*XykgD5idoCCnf|YYNO~9FwGm8%`J1$AoXtVQH#_l)xHuuc2Wk=MMG^_oHMi$Ipp*`HS|)>daf;69`JBT1 z<3bN7nzkh_9-6@@YXn6ktqu>;aMqcrvK5Z$GL>yS3BSdW$(j0f5vvIWK5FK-PM^38 z%Mb?Q6O@J5Q9M5HAcCGa;ZCQZZ;i1ZZ`N!)lFuxhlFx*F1@_*+N@g|N)6pI0FI3to zz`ArWQ)pRTP>*3~+3N?m%eLogAY^({*mrLi=WNgwS^l49Lk-94l6C8Q+NldXx$INl zhC4g{M5-LmP2j-1%J1UVNkw06qX4@uV>~c%sdJ+|Ue3Pl7Vi)jnc8;V5WWk;@0)k8 zg<)mi9^H4$2GyQ}-7ta|0q2}1-1&nM3pRUoDa*SNFI-3*d!b}jY@398oOQHM?W237 z1`2DSEHT=x*Wd=b@RDxmwC^yni#mNiZ+~(xZ*85g1X4-(3LV3R3pU^Tl-ky_PgCC) zqp<mf4m!50wRhNUPMv4OuP=2;AWMrCL262D537HuLH9EIu^96S9 zBh}`IrgN0yN3(ledL}C21uH~6K4SVbvLwEb~(jTy_;+Mgs`;`@PnhA!FZ+;b~ z$HNAomnKH``5HRWas)cd7ecJv6AUDyjV8S%P*nq2@^h+u8&UkX zJnNsDXC@rjYJ`;s7G|L!ZQiF`XV=<-;Xg-s3kL1>c~@9z`4*?S=DECZdWc(?(ucD7_TR0b@k9?WzEqIpTtG_Ysvwh!Rmj8R znN*E}Nsgw@yW5Tiw+Kx&C}FP_-~-n{v6bX{GCjbztWi_vGS;2(&-AvdWf-QnmuRB5 zJ#A`k!Anpnr4#jjCjI>*Xp`!Oz@oRdGTGju*3F>&au1d$;&l)BU1fKaRP|`2|O#!?Z4}3IHudQDszT##?IMulK7aN(T8C-6URQJd@l8t zqZFhYelHwlDf;BPQAxv1(=yz97JM>UmhybJHVYSUN|Zjet}JagM69_qaqrZmD%U7I z8p{cq1b=mkYMr6vb-<&m@t+$x*qKs$j*OO{dZ1TQl8zbPrwy~>Xu|jVyw~q5`NzhJOWiEdLspMQDd(tc}Z(F3i=xWHUe~!Ah^?$K3ZuKXa zgzSJ`EhOQ=g|2723E=&9O-Y+*ZTyf+ot{=IlEC4G){T_5UeavKjbyj$s^y^ZSgDFc2+jN4>(zRYwl#l(-^i=Ew{zDxsk`3+9CcZBBu4O=)lBf4!-W~C@(?;!i)xSFQ z`0L?UaMLw`3G54Wlow&qao6dcomH`F}$+{WXNES6b*binSHQV2!Z zc5$^~huxahiqWJT?Tu;IRE<#^89*#gb4L1}ddNqk16_^9&=rnx*bSK(1!XzQZ~F`t zEh!+AUGu!7T~8JdW5vrm*$6eHT9ZDK)P*>|pd4Ao(7+;si7?nK@U1#m=~wC5iPm+f z`KzKya*+*AM_ihuA;9E7JK)g$gl;#gnlbgETENmKQ5g*JHxj@;ie#tq{l2v_nI-Hn zk(_r^eBD0k0nA-$?|0V`hq|coq#Flj#v^V=sf}n8;@dpjBpcIM3RyHkuwFK<4Uyn! zFZ7M5YEy}CG|W!LJ)^yBEbbL^35bdEqo!rXb)hdTIbX?dd(d~v!ILrTjtb3|_Lr)N z>!cY{^N*Z;_M#(z2C-$N3)XK}YoF2EP4qbLOnwGz+}en;Z$HVLm1v5N%iNg4>N`=3 zREaPt{_~fyS)~6MwOm?x05-rjvoVf-qbAFSLX_9~p1pStSmLol>>W?A1>!_}sCIzQ zlgSKt#X>{~4naoDW56Yb6^1dsttfjQ$HjVbjPE7vb9e5;t+JmX87Kn5 zSm-r&t%)2@o)iLvcB@iaph+go~U%*)!6Z=ONti{9i!0@b+7zM13#vm^6;jm6Fb> zL#5cO9Gp-|e1Os*5Ie_+Ja8GB0GSt|-?)!wV!O@r6IOIqA&>Y!n4}Ei`LYv!jybG| zH2dtDNdw<7RMEULQS;7d4$&=NC2<4{GJ$~2v|X*N+iaL{LV|7s&RcZXh%eLFYUUlb zo8XuF2pA(6;sJZ&%!6SL6Sd1WJl{zcCrUq`F4wznFdeNRpyYb^H95}04-0qhV$jE{#9?>z7EsjKa1*@+E)N=x_$GyY$ zj0RfncX&QWR5W!~-mf&p+Vy3QMU-67|_^ym)qH$n&84A|Rhm+kLi zeb?Yz>FjJ6M&e-q%30|a;V6o~(0(t~ zxO6YFQu{jFav$sqx5|3{L2A+#{0NuE`B;(nsx1Jz{qD2+`MU>?KHUoCLbZ# z2qLJS^h1blx=IJ9NVWyuckO(9synpqo((?Ey)>YWS8=}&9AcVGko-Hk(J(gmn{MsY zW_C|_DQ`i3t30ActM2t)QcZ7L$1768_u_SaPFnI*TUFt{mQ|NdwQ?BY>N5_;RH<|F z_Z$qgYo`I(AoUjvDUhrcd8_8Rp3hMVt&iJB5?b%D54({!Na5u9{|;9p1FIiIHqo-Q zB8HMWWi@Jq>f&**7%lal|MWb~A^>}wdD~ICQs9>{PS2Rbnt9iB19Jfl4QHb=j~b3T zK^Edyc50k;fy11WK@DX?_kH(0*TDUw;kI7z^Zjg(n9FrT~G=gG-u?_mcwKm)eoUYC3EoeEbMnf5}m-uWHp z+0;ot_Ut^XwJTne{4q$!G7Q1jBUD|*WRv88JE6r>CM|1hurleOkbAV!ZZF3XR1 zU&fI7h7P!><8hc(w=Htn%rce!I(pt|f;08RXRyv${*NbaDQ_hMt;RhBY7qg;CdQnp z7n)=t|DQ)(=D|R^vuV*e+(l0*-~S)C0CE1;|C|XpASS_D=ghM$M|zDIA;aM5innD| zzI5!0RzO1cIzG;`c11>M$`$dTqBZX1qgc44OihRe!o37Tpyb~U;G5kSL+$(t!<^IK z3uF?@eIR(^h)HM=bkbxby{l(((&Wy)OEzirr<7!X99-^|dFKAHCJQ1BO84xbvWREF z@rBGh&_@k^2-;bAxSWTn<()l3a}ixaaAzZ1CsVV1M6K-rr{-2y5Zo?fMgd{OCw75C zwSu@FO;n(Fi}PrzuiobgRz0Evr2(%tM3&@k?09mOy?#>sd7OL$EL&mSWt#-VdBZn2 zBzD8^(}}_9gB#9m+{R$foSw8|KQDW3V`-09&YzIP+JFyFOdBD0#5fUzG(yxa_K5IAJ-5Gn;0vWuB=yk=|&X zytNDv8wt!Kt1jl{_a`=X+YvKueFn9icUkU4 zc?az{Kxp4_UO-xz^z+G~qA%%dlcHfb(0-{I7V z_8r!=M>pYr>FzZQnD#o%T}8iaYH#Roxee&M54dX{snqUOz&K_&Ml{z2w*>x0V@tUV zJ`S)d%k}1Xx@dD*yEW@dh0IE}#!>%|Zmo0PRWXm+YJzk)Z!9YtpIyULMQ=$|Hk-RWI+Hl;vKishcK0iuQn2 z4o#^ovQ1~R@>3B334Or)l|CQ{+J{o->%R>#`KzYhedKkxHrvm-CYG&dD9?p}jQYOr zJcfDP=Kxbi(@d17Wjg9kB@zUG;eT%p=na^1HIv?+Lk zJwZI47vB7#^jo=}m1f_D&iJft2S3qWRUt$52J^;I{E-vRR{oOevLZ9ef!fJ~lA8KW z>`1OEd6d|lhBGDi&^s+ zG~1X~Y=<~`of`uX6e723cAXxL{BY93;m`?p#Q-YMPEJ-{{`q2A z4Te?tY9J$htldk*@2zMtr<3q_dO``?reFQs3xE~E*+redbTXZ9UkVC~+zmPrrlub; z-C#0Sv@`SbMTm{E7pto^s=q9Jw8^(BJab%D9L~&qK{xh2zE4u#h~K=5<89%ANpF+U zz2$9qF2&NGBV~3Q@is6}oTyHH&D){7^{~C|2_Gk`AMAQ7yt0>jh4>o$5JtC(A7kL$ zQzCWsf`CKKcYyfH_%pYvtVW7R3Qy$M;n1t zOi6>RmCh(yg8;ljVyRx6$)SIJYA)&xoM|ThWX<)auS0F$W+zR5URdDP28rL!5PvN? zu*~h`)wTYfOG0({9L;`Io(=k=g6^fi$DEwYLMQ$~^V5E!gWkOG{1fs`yK!Y}jBsRF~N(l5XFJ zXSC;V8kt|9iwF|t;iZ!T%e8kP1+nL2c}7}}R5!KsG@M5{b6N-EV@9R#_zzdEI!i~9 z`>f=Zf7xZ=z2qTMpA>!lau;))G|EAH%^$=)>ExTWFCn#ch3h-94_@8hZ8N_8+#Ta= z5QnAsWxKP(D6x!rJAn#6zu%ZxG@TqVqBIffXmP%DxX=#$O~oy`l%9r^MSw; z<^PA!=xZ`5eAEXPl0D%_S6I`Y7D)$|4RL~y&jN-hh$wc-~7MUP`#f*+V5 z@FxfNGTUVvLp*+~TycaaKHhTdWv10HEh@#8u|=2dCqrd=%X-)CQ=9IW5$=y{F=CNy zg+Cbz-kY$Sn{5QOj!WQQiuS%+!O9k+~m6S~`ZNLx=QWrGFJ}_Iy8Cb@NI*skn zySufC+d7+Qcf4Oql`NTh(k@%?a)6sL8LmKW8E&-x4wX7DHCQ-0!z<9=xTaOs6MeYN zKax8d6{%3a+4m0%U0GV`J`z66oDEK(qfIuOng$A3GjpazH#X_Bk=j^e2D-*sdX1KI z-W^LEf9Dm{bXtSPTxW_kO1|$5IelZZkTFWY zIy0V+)QyY@H44&Z(eQse2n%9@aT4NR@jR88bBc^8#-$MKrvqm{J-@Agd%dK3xkpT5 zgEZ}Da15KHkEwX!+Xa^Xc~9FRnSHCYFuavR;7ng`JHx-T$QNvt=bm8$;%hd4pr-uR zpE_T%rI$dR#5C+_G6V8ftLf(H4Xc#W#;Jd2l|W3$Bb3X%idQx}U`Ukj?bBJLuGB6V z5edP>_-q>H8u(LrM9sF#y7ePBvAtDA3(xp zu2o^m8+nl}{70;2#8vT|EPSZXs{`2^Jy2%#sZP#KMR`~WVws#hSpTEltusapT~VJSJ{t3n}J5$*oz7g|rk`OkPvJ+oOX z$Ruinkhuy)qkE0pSFxC|R|6@H#i3a|M2`+k#60^CK2_5ZdjxH=*cIB}d&euChZOSKKP9A zC68x((UfGF>m>JKmh^NlA`blSB9@Pnn>T>knl<=mx>}qRHEo$Ba?fqc;v@XDxRZ?N`^VdM0 zyp)I0y1{kM^Bm6HqtVr*O)q#xx5F&JblL8zO~ z$_7f6vM-XfAgrIcOM>h~nuAfP!i8QBe!6V3)}IHwJAZKPUTK``-4Mr`|KF2A#uLy7 znL69R1f4C%Pdnm2u;wgkY-Bz`p^1O)o-{Q{N|rslCRp@oJ-gzx;(sizX7Rj;0budq zx9m#1nU1)@(kQp=%F8b8DhB?xt^<~CJHjmCPnUFGRh)ZbrQ#zGP_C+If#+WP1 zHSoEA59(9Tk4)($*5mIIsF{C@<=bl5kAq@)1(J2uO~Zc3o4+{;TIR|E+@Df&o3&tC zl9p!SpAZzPD}G+W_yL1ykQVQ_N<31rQB)nEA}pJPim>&oZ~|!%daH&TT=-wnP}Net zq7_Q)$2GZz?d2YFoqYr^q9N0kD}BAEY`n3_I{01M%UqM-I2J zeoCP5J8Xz1-&ojuSiPCdmD7myps00fA>CZ-mDkV>97ZSkU=u73O_!sf*igNApNJZI zs?4Eu+*j&@>ByzSl@q2+)pWg=tSCG$TAO~v*XKU^tNaOFJF=12CiZdZ7bYbf^is_{ zQx<-^g3W;QzF<8e@V2WJG~~MvV{1^}Nrgt%W||GpPe)QOwz@FV>VA-Z5cLo+D_B%K zV7CwYX1^hNv{R;w+Uy2f`6ZN=8DCk+wMAau8brMBBVx}KD63;@I*=HJqBZhEvdeOM3yJw)h>WRbeDDrjh`2)Ut~?kb?!X#eJ}lu45{D1?QNcd>dP8YN&U=V}PTj|Y z_d^63Y9kSK0jr#PjZSU9ZtEaAm-!h{4A=|Gp_~8xnP3qb3g96l8e`N}cn5VH&iuil zchf9RzRb+0=?|c$-I{ewg#{&5bYmsSn5jG?1e1Zv6b}SQWb#Wy8T>%l_}!anwy#0; z7v<4A&(ZQw?00u;zwofNHrClW!63`u@1_y}You>;S)Q&B{vtn;s|~%OC5PXAL-*KW z!tgtXEA)?@a4t$-ktS4CU$5-fMbr)BN8bASf2*^J zoo-i2@ERps7HRNV|8{UPmQ7#i0{nIs(tZ|E|MzH+=I{|zH;Ny&QGYzR?8}HIijmS6 zUZfz=aeHUrCTK~M_osb4??Nw(iiH!*1bjCNPI$Iw3(VO4`aP-SQR13&l@eT#?s(ON zl`n!UTr_ZlSmADf1|nFsM4p)IvrtmL2Mv6Y=!L@>0xV3tjrTRQ-u zZC8Wm4$;LENL@mXm^_+RG+(2ZNx}M^ib^=&4Z)+c7<}5i%JvQ7>z)Jesuw!$v$!dZ z@c-rlP)op|G@V;#qmf^eZRpKtInSn`u?*_JC(y?M^t<}>*KHj>CN}6#@!4BkJJcr? z<~Mn-1oX@3PkG z8L9WgCJ57ZHFYm+kq@m}CkFiW+Wc_(Y9luT#>&@ms(nC|+*!`tAg@0W8f)*nOkr-0 zYMxnjgcwPINz~A@CU$apT&<1X%!U`gjilgg5VNpu5PbFtoH&Wwt|*d67I=Qy+bVUe zKij(d@sEH}M-x=<2SU9jT(ms?>Z8`jT~+5o{C9LZY-O@Xn&TA`zbL~UwWN?M&B!Im ze;wJ|g}#uGzy2k8*Ebnkl)|mB3tR9|cZR*bH$z%}^QiboH+DrthK@L#&eXdiI{*$a;e(LA! z2O^H4Lg>AwtF5?{SG;l%n&%Y9SvIz?xf+zfqbMO1|DDy;-Eay~bB6DoUZ*%;>Ev6B zl~Pu0cTEat3!NjAUIjY>k`Kgh8G5k#?v!z{{0hBg=RjVO36sT{in*q641nUchF>qI zg(v418Xlx3ZLUryHaPJ90rFi>wb$5rS;;p$w#VGxvl=M`H}h^~X{ z>l2h9_8e}drXF}g0wX;SUJWbk-K*&Q@v^1I=I$UD8&1_*ul>=1Lxp>^F_#X&!lk9* zW@$6@A?CM&qE^e>kIkA05>|tgNxd();i~8Tp9-u?G!z|+6$hfEa1(V72)kD86}rwu zDc+JL^p%|_sV8eUFzo$7q3&Pel?!c1b!y2x9!PtRNc&C}c4EVXsTe1Q_G!g{VevC% z5aA>4Km{VAH7VlDAzMDw`cu=wh)Csxzy_;>RkN z+}H^I0chFVk0d2l%&RUn6qNs`5l00(|w{)v73TK6p_t|C!AAgy#y>n8qFw=s$!zsD7>nFL}oQfo0L zAI+&zp2!DPWLyS3NXKKao63RgKqscvM7VX30$49PC*>Nzw|T4RPumYl``lN?4+?*I zGw+9X`wBC$hNo@o+D|_}C3#@LzxY9r99mUZ>9d@o8`agnO} zwtU3zy8KTmWQ_W!*CCYdP`X6MYrSMa3 z;)dtkFWOo%my_4*Qg@qvM!znC@Jj2;$3pdPUQE~2TiXd91l8duZ&HXpO?k%U!|jr6 z(mZP>B4kCIkRv51n*W>?gZ*)TnMe3{fjDq_;ZBgMziZ;l_7>n-WfnO7OQXhMunJEk z&UJi0f8VA8n#$e$c(twYHsr0!NPc^2^@CnE1;gY}JzvC8yPthZ{}-#D(e3`E^X>aUXzEivF9U-$?mXQThkE=f%=r_D6oW~ZV>Y=O3)sLVq8@h{ghmzUIb z^g1805&xeU>YHKfN%Q4lPo&g|mH;-1K)B%t%T!xGCUmBudxDD>gDU4x^kJ%?J5dQ?%us`~YZ3`X|b{ycS{81e}mRpWgZ^c22&b-C^+n4u3LCNUTX-o14 z`IH=`2{V|pKmA}azvO#wV0FC5xjr(+u~BU7R}|2tI;33VtUuYnxk8O0w4EfkO+>3| z$VFpg^&F+H!?h`ojEbB{V$@T9N^f82v+-gU^AO`+H7ZtWsY^8RNj3A2-S86<7E#h^ zK!%rN0!z7skRf&n&9A8dZ@pyUn4g=15`xOY{3APn%1@(YU0mGnPg*liq$P}7ew8p< zf54Vyt1=$L95-fr6~u;o4rrDXI{+9AlvjSvKUTOt$MeLEW+#pR?$m?Odn~+xo<6x= zmxOW1oH?UOuWAui5xdMsyfuW4rS&?W$%6Itw&;j?h*S~(zWwF_qTtXLD={-DBtR~j z7$7esExN!PTR6Q~UHqcTxaG5q?%2;h1b5UsxsdOzuWZ$3x?8?-#ndp%rvt~WKBh(6 zfaCuCwXGZw|Ga+2eSL|{fa9utfz?xwv9Z_Fc{~Mga|unpJvuFQqb$K*>;x`uQL_b7 zg=l>j<^8ScUd`Vh>{YuStm$#NI0tdq)@Q<@ zCay?Q8Z77Mgj}bL-z7Ax-2@+&X^_9TX6Pq}(0A0L;9s4mc$FT@=U>N?)qxXgSWKaxW{RJ+_ioOO=Qaml$rpL!LlxjC!h^IIQ#h2T2uFCc0*3Ad= zqN9~vhV({YexyJT7=)KN{Nh?VygM}eZHaoV4gU397Q=!oo;R6-Lcr;c*qMmLtrG`W1RbmReBmzVr77O zHFyP&8we9;EVF4eiYy33(;$}xJ}bVQupXy<>d(R<`Pdio^W5I$;sMuT|9Fe=#`|cb z6J`DFwg1dc(sQgy42dHR-J}Tu`fj}93$wi@wyE1?oXYv=S*AuDhXp`c1w&dvx<#94 zs{@v3E$5JvNc84e3O+n?XgXk0QU-ze$475Oq@DrZ6Qj+wza^KR}Rescm0U zD{RY9-4-zyqb9zi8Q5$wDvfFA+lQ?FlcW8>ZlhX?_6{}F4W_>>bGaPIDkn>sRCnjh zaIp6G6IwIul5ULjXGtRFGrC%jinyR#CNP)at;^Rkq}1$v&$0C38Sq&{{0`q`2((a- zg#i>BpX3?QysdWY)W=aLWI$`sf@_MEZX7YyeTj0US!(hXrj;20;qik2cC7Pgrud~Q z?-R_>kCbN~NOA3zfQTP;m3&@J>ZX5m#w~W<_wqyOj*BRs?1B+L$Z=b9OiaMd=ETUJ ztS%%>E{ObS^fo8SP#Rc1J##0uw}xp?6B%JukH6%npnMglvGij^`SlMED8UE9rPjr!PV^wcRh_ zm#6mx(9(Y8uCTo1S<+W+Zu&V;7^qX@UjArPi~So{(s`8ab>_e~ohN>Gp9eZ!1G-bb zN3wn+YD5Ovy?oOGSky+^O5}hpd9Fs#%EeRYZty^h?@+A$5LL?v-8aqB!vSK!zew~bw3G$!-f!k5tl!JyBWS-76|0_qcBZ!d-)~rpHEU_L&-AhYHu#q67_GHN6NwtX!5EBwu2pEb3h|h83+BXPz*V! zD3u&u^l^uBT(yg%&H~t>Jk)?bBx{2o5e6NrTf+x2cVoM7SCu?3a2Fo+-Y-(9A9%h$PN)b!+c7=9j7$tHBCX;g0IekFwBz&UyX)sFsAbI-`0fuV%ZPk z<0Hb6bNcAR#@DuFfY;ri&mW4P2yjP5>wLjtkc%a1H|`$Hrv1ep^!GtYMv`uWMIvIb zcshYDDyrgFb`TXgf9&%UMkdGA2^Ov`NH`W3CK{rdgu6ONC!JyQvJzPi#{9&@7S!$- zKW&qU)Udqok7#BCUb{@%nTXe%wQdYf)X`ooUe`y;kciT?4hiiK2tM?os2K(xxX9R3 z9o8CjwHijQG;O+XO#4Q{Xzup6?KkUJ?W<}E5`hN$M;K>P_iMA<1ipXJ913hR#0YAu zyMm37+LEAp`0_@Tk%&>p{{lEc$G)O$hQCN#!4^+utXQ$K7;oiD!~?})Jat%Zjvce> zTqhGF%d3B9`TFaRGe|7Xw%*L@0{if8)u2m3iWl)`U!qtV6r_kL&tQIKY|CCfU$gg*aereoP+>KN#2>N)L83UI(yt zeHcI4_&dw$r2IHbf*OIk*0N$ATK*^JA262s{vmVKkTv=+;j#`vXP!)JSCjDIhh!FFvey@ zTw8xaM+@hteeJaJzWR!%H{lge2OoMkUaMoO(@Khvc;*v>XeG$ltmgfjz=eJOr7yr> zz7E=?$-1MRf!u+xNlgfn$Z2h4-57Vr_m40r2iCG$-uQ{LDX;5qOatXsWFH+T8^b7d zZF1P*QM&QpKit_&?UQZ-cO{38jkw8Xgvxn6$a=Ixy5kQ9^!L0<46BIQ7KN1M{Nkgm5|kRZDAD_SAaoOkJNOX>%4u`Q|{n^*1 z>ZcnERhID2Fj=e#Tw8A@T;ShitfUuYVo0AX~TKfO1tVN&V7 z+rEFmb@dM8aFEy27=ew;(_kXt`Te7^T|i^TU`4WxKX4{7p^rxbO|ax+E1avsa$*px zuC=U|_RryR@HT#fqguxJ-7fX20s5gT1uC}a$*;2uVMA9C7`px#XPNmY)@|h)JRKj_ zQbrv7*M;Y3V|xXL)|1KPKY&35UW0T=lN+UObgKW#XLA;AP;d{5GtW4?JoNy+;z@f@ zKq&w8CR>e2JdpvShM3mc=Tqc-W50j+4T`~Twr!4vpgf5)N)BFrEfdzN9n;c zTXEW)VCQ29Q%e!Dr8Z$(1hfzDm~@->8EjHpKvtzuSADff+gTE*1t5_L9%QfQ?2eb` zL$=h0mC;SRl$knkI*}xmAUl7-ogw5`ZG!YH_E`L^D&IZE5gV#qr)ug8;EbW3r5;*2 z-;kqDE`4WuuiDat`hgx=a~dWzL6fSxR9KE~I?V}qya3LL0BRv6K#)PWdh3Sy4HD3? zxTTxcrdVzx^%6N7_y(N1dNra3t^YL?b77q!B^5~9iEOSK)2P;MkVLr{wI(gdrr0Pd zrd}eG$qr)>RQ3eLhfgfq;a&8{9d}&W4Nt~#^W3j}?Q7*@ANv?SK&&qZ9ehxE!sGWY z+pgKh|5DM6*0W#ChdU+rCTtRW-MZ__+Uu?@*Il;`|E{xtYjFeP2Hb#n1~%G#-~%7n zhgN(1>&VZ1`qOyL)UDX_0yj0b+S;DT*qSZ&$=Dj3(Qdmf|EPHx_zAUJ9&RmAO25AnAQg>{t0@?H$UzBPD_5qwpM1RQ- z?xsIha#n|p?xBbI9u!Bt@ThXci|~pk?mWDDlIAEw<+8z!8_{L3vc@rCFv#USVK4RX8;RX33hUky#C3i?KddhX($ z@G`3w8l!(rx~t4dD^E5Tdu5gvoA43a&N@dwVUKMMFudAbW$It!zLrwa`2bn`H9_#aVd8)0?nK z_96BJ1y`Z3M43)1z3cG41VS+gqBdFTS?sYO29Ulc$5A?BqiQ)(o0NN&dT6V2?F! z0@96{iH{1II%xuF)C<6_>hkU^-*foEU}5S|+%OLaCpB2UtCW z4W*4G;KV-n&)Do)sRz>6QRV3vUbgSvmK|1!B{P}T&A;3Ncz1mNI0Ji7yz3qBw!J9! zKj46J3^vJT$^O~v=D#}?^hi@)Zl&DKm=LRff+7q{9E=okl96Q5Eg{k&4kC^zTXdD( zq*PK7SFQMHXILgBpF%re(8gIvNp;n%AAi(g21+>^lk(+LPc8fI%RMMwT6W(9uXxf4 zxni=uSZ3M$CBI5~Rj}}?HJw{Bui47f41%n>Ff!8JRAHyq6huWUf+iE)?24%tl0-I~ z_(>Qf36?mF;)XW4y5tx{vl7ex3{98zN&ORB+P6P@t_6!IXbz5)LP z)cJ>6KC#P+S~G=?&j)5q^GE?p24;rwlQ2jTZPaa8jc2BYO481>%yn2Wb^I%nfeQkm z$LU?DI-G`~+13M9$8S7VEHPQ$wK@P*Xnjh82ug9xOU+uXTvz$Bo(|Xy3qwS(Ack-o zM6jHb>VO2{W^HHQ>BhP>C#dT9J9m_g3?(g%!61NMbn(UbONBGa zl~-PA8~Lui=Gt;CZg5<4?X_hc+O_M}mD{mL#+~@ni`A=Em#uNL;!SUQQ+d#XA6!=8 zPnFD0o#yk-J+Itw(+y?I)mve6-c@B&K7D~dwdBnTzHVwIUeUM7rj-Xi-($@UZC7IN zjP9>&Qk;$KCwm+shaJVB8X>qrl3;1){}6$j>9IHMyFxxCD9zT=n$A~YSVUX(;xMXA zv+qS*wBfAs`o`ey8)W`KArr}iSZb-9X)_iTB`m|S6!Ew;YBQ9q#nd&PsFn%$e>na+ z_tMKR!5$PxVh@TV%l6yr21OVNH**0u*RYKE?A#5eEz$JBqWP!Hd*U?*nUYj3WD!#0 zf$i(Sd72MPl?%+MG}MxrX#;gtmR6b#iw!QxjU4*~Hl8T8O`196s*HT}HUBdreTXxW zLrHPUdyYMLvMYxA)~W+dkbWq(HK9|4hCmZdLk4p@BMnv%ZiH&X z_Ye2eaH7;wCDZJC5m&AFXggNwXmB-~4cCq~q8lIKf{}BPyi_1L^+YdWaw(Rz#!m)g z0$hPL3$1P3_m7SAPY>By8n6ys6Qdf}q6OGhMAE<7o}5DE0g@n_uBZ?tn8^eP!-HuXw_BJEtPMpZZ4tb^xNSi%csq zLaPQW!-7wZcAVw=hng*&>aB&*nNTd@Br_s+Ax-1YK*+6|ctHjckbGW{R4qNpt>tSx zKr=9f6~=wgGjS^W%|1a|6Xq9{r7Su#&SqHV%;HSKAcF{R#;NQ+f;^xaM64ae1|9C- z_WT#T08jaDT3+{>*Oq5Jr*4v+jYj+(;VA;Ex~4GEnQNkuw)sE|`QR0d?MC}|f4y2C zL(Yw2NC9W>gMZkGb<=E$QcI_zg9ICdN#_lB%WQOq-)%kYMo@nb}5b zp-Nz_I#3HTh+s8Nf`v5e_LFUTRawIeTUp4kI{_K`#F<1HMo3etB#p3Wupq;(3EH^= zeDmNRdJ;|wZLYWh*=L+C$*6x%)%QKueTc% z8ClVq{}5HenqbOs^5Eq7nXhRyik`eQ9&S+FiC1I!r+I;|ywh0zLaj9RTPN8<^KfBf>PWuJZb#VekC4+>icfoQ~d z9?IjNv)+7q9Mgr_kAKdFc;>5i#AC~%c*S=v>&$8B#$CSu&;~JCug#%R+5R*-m#W^- z`qP@oxiei|aL_^NOrl9s)0*6w$Uw$(#WYVflC2+<2s= zR+iYZ$iK-m$fP-fOq&pu{Ci*OYRMI?HuJ`K4U>UY+gY8!9nSjiaq6kf{>ft*X2O|P z_gY#v6uD%6{E{|Y<}|vuJ`;yd-ji62!O_FmhL1l-4|5G>_D_t)MC1ps2gQd@{1Enx z*v_7y*nN-Pfbi8q+|4)NT)yyy&zDbp;&01+@3V7x_H&+79{z|&l+|{Vg4VpqN1ki( z)C$Yid6KAYk{%0r9slikDq`Kb>&sf)u(;-$Ys$6PUQ_md!ro;yULmz9K5%jFSXjT~ zo$oAP`qCH6jW^z0wphIy`)q7sNMGi23z{BX4iUR!l*x&012mBE`CI2rLr5$?&sCo~{7pK~M0yk@geebnAdpB(`N zZyEE?F*je$Dm{5tNX?b%)3hlc(Y^mX$Fe5BYRQ+?IlT0|?X{feF_;9$haHnOEb>@# zl(L|fa?xtZn=cumsGWBho}jq&Qrw`x6BI|i=*Y4LHzoTj?jl+me*E+N!jZFPkss2skY%DrEc;$uODit(gb;7Z;~!?m^N)NpM$JucGxt0W z`RYeTE+S0lAGeovfIyd@&oWnj$z#d+a*pbZ$QGJB`)}xm&A#MEQpspdJX!WuT(-pU zQ(G?m_=WBfK0fnR0m8Y}Hh%G&e&*z8g;>7-@C9OSO=w)%W1kgCyj4BsUv?CsN8MI3 z^_c8&O;q+Izw_A%?=~3PV)^Rnz6ZsR^aKSq$;O4)Y)XUnECwpnI7q5E&U-Ct_OnEN z979NE-Ewe(5Q^c+C%tPO&CO`mp_Z^y|Bn1gzkkq5|CxduFWLKI|NQ*vxtSY}vZj`n zMXL>7W>^S0f=rvxEVcFLa!T?Vlu5Oeiyq@OzJvr;q_aAKNhxpsdvpbu1@1xdykEyA z*?1i(U-9%Tyy9tG6NSR^tqW7cgZ}uo_Jx_6D{pP9Z^fCCF^5s2`R{kkv$!m{&+2e7{qIu7) z2~yQOHOeeSE(Gq!pU2n`^66)i9A~yJd~SX8(&LY9S{ zwl)8j#e@!HQ~#djW=T-Bl*sB_zM7^ZId<307V5nIMBhJJ9>$nk4B4lmZai}MxNP^T zW*eW+80~0`O{>N};If>0UNU!%uIrCr3?d_{f#4|)@`Y0?PV0}_Su3MK59@EtrTo%1 z3+3{o)&!M1$IEi3z2@fF?|SFE%UNfgiB~)wV1KN?X>Y)+N=Lrg7d^(?q9ZCB%QR&y zSOH6nKRrQFm95E!8sE`Ux3BuK3AZ}}$PEa7a_f2ac24Ka)q1WlDidy0(o>Y=n zFy)Ety5ReGd=H9KPT><2c*WC8@rtL1=O-xS7_FVf27{9rS(h|Q zfSasQ4r#UgOcBmO4SQkI6e&S5X4b28TD3Nt)0BA-n+`=P!h z;)|u&)Vi;}=R8R~2K8W>V3eiT-%|dmO?mIN?-8TY@({5A-11)oqD?a*OCz85vTS2w za9!V;3N|M5zi9nse)d1o?6;yQ8^Yd}Sv<97DKh1~%!BXh zr>(%MYfWC`nNJ(Nk8FF69?{VU(bgZiEEu4w zBd2{&T(*iqEm%ln|604MZH+9kWx-g*3PKFEj_iBRQ%xC*mQg?d`8dl~GN0O6CR#1! zOP9z%o_|wkgwzQs;<=D5{_M{-O14Ez$B(ZHDO}WOB)NL`NTf z?!@P)w!KM}Sh`8GFb3!U_&n}G@j~pEw!XaXb-!Dl^{n~?h2d1mx?1PwUeF*G&Od#| zXFS?K99hxKDnIUHz78c5v&UkZ=rsRa72-q6&p#nt&iXU6LnVu9U0ki%c*2E=^5;x4 zG`^~@_0b}AgOh$`TO;3U`!x3co1eyK4B~@S;vzQrYWBsMZ{z4Qv-$f+@2BR%eA5yf zN8*B*ak1ezA_73$IB6=6lQcOzIv36VhJBflu9A0mzO;YMe_j`LK0;@TxS!-3}2L&QhYQQS7#_3!u3=7ckbuBPvma6WZ#dW|y(EnL{f&zc+ zc*dDOEC(ELV0rz~c*T>Mbh}!y&Au#1qWlFUl(Mvl)R$fFJBGX%oTV&(ppEgHy^5{T@nmAbSsbIcW<7iBa*^EH zU^sA;d_j|oHX3v0V<(s@8}MH>h(_t-tg3Fu#E>K5W|l&f$UkX!X8c%Qb>h0Q8DjL@ z+|;0(H9^s~`A06W-1y`-i$j)Lf3)v8%oeXfL2>wf3zhF~Pn2V>%&DGh$ph~XKl z>9;9J*g5Cmb@{JO!asciqcQpT(l}+_T7B57j*Q2Og#(x`69jUaztAYf)HaihxSk;5 zF~jQ1<=K~Wq&XLX5Mdt-AHV05PHp^&Z*~YmVROR!PbeQg=|lDe!P|~|D{fHiVKVa@ z$#mA$S6^K|^{G#lPk-h!<R{4 zYHiIXto#pcJ!%^lD%n46F&M_)2p)x^7vdX4qpn>~Or|>C#K_il-y&A8$~o$i7C$LDujM(&R$qSgTKCrFdMkx? zGn@bmVo0NL%{aYQKWOIsdvJBG3`$EZH8%oyrmSZGBrfN{y|`E^%b$N}gguQ!Ev>ksqcfAyzvHC^S?TbI*@ ztxYp#CuXCpN~+2Y$4Y&Tk#vloBFTcEY=Wop^Mi|y3$e|xnU5?us|Bdg&5rpZ9&(4k zCOV4EW1)2xAmqr}=ATj*2|$i`POA=_oW}ZhM2tW5WfQG*jMe+E+Gx^X5tVXe@?^rI zqdk=Q?;nc4`e(yJl@0i>8U`nrF`SNls@{t|D9%3n?6wC5zX&6S7*_QW`8RjTnwCPH zxNdCbtzjEg!-!~Vh6F$>|AZ|6`$xJih6@=DhNr4^`_I3Gqqu($q_S)cy0zdil`a3w zD-zYNk7lU?NVg(2ayT0_604+Je~@A^`0y35`!J z%*of>`l~Uy)5S3m#u5nF>b`$qK9qlVC#P^PS@q?Tumr2Zsx?<#C5c+9Y&ick?S2ev z$~FJtlWqoMooDZaU&%U^yZZe@R@KTl+n(3=Ts)rtI5`}xW0Qs!r?E7C`cTmcs`f)Q zh#?x_AgZZ#pXl?Lq6SMG-;DvGcX<30=*J+@8N@ycSsY9yqDvMF7EF#pHPY8B03X zP#v81xk*OUQm#UoVtK9WuvxmwH>;OFKdz3AdIS48L37H6*ubo6o@M=wHBhA@NntMm>dN+207)_ zQ_6n(KC!$EHz@YNW~xk!9=mtNzQ!d2n(?(WWneOrrr4dh$Zqka(~IHd-x0ZXb3Rhi zTC#Z-CXS@!aEX)u9JPVd{AcfqSeedaa3|y_H^Zo!vP0XarpTWlIs-&s#%nuR1~(%` zv!$dzaRjs*W1vh&{=L&`w>-&K1!TWGStt*c-;1Hs`2Ag5iXa=#Tv?rGjJ*E(2-#~3 zJ?08F+liYPS){vRy;uJYL44Ej{FCGBug9sUwrbWhY#UmWTa1g&z*HYfKmTqgTN@U- zYs+x}gFq%WJpT>NJT$!Xr?qE`uIn)>18Omc>v0b8nQHbd*!&5|qqkbB**^u@yLzpy z2<6wnth~@OvFS2~ZuL|{7nZGS_V|jfarg;lj4{@91?S_#HIg&Ul^iCKvwHs-CcnoY z|Nipf4}Ziqo_-rQ$=(f5#`#d|lqhO`$-J}NcI&O>U%v6p^1%;&u-tOX&E+@oxZ{7tZ1L~GyF9oYe9%GV-uK=)+|ukN*k|M2 z?|xUg@WPAn%AoDaj(Ba*njLmTyB#)R-mz@Qr!uy~KR%_g{Te)}vW34!s;)mp>}!Q1 z=~iX)h5XE2pM7W=U|6fe0w3qpmRjNHr$3+K)t7+!^}V6OJ@AVnqbO?2A_w~P}_ z`*opPkm?V1z-~T>8H?j5m}qghK>Ui`IgpLpS zkAbB8iW~?dRR10nrM^B|J{fD(t)UCcmJ-);p}ZK|D$je1qe_(1s*sP5$@N^=X0)Rwwjos< zP1Yz=Zh*Q_C@1O1YWIm^^e^2S#bCK|6@$RaL%4H7;)s|>v;K^tS|0t=L+Fug>ou`0 z4$0IG!6iHG_#l?W*8qCx&V9aD3=`YXZE-jyjCx%BZT-_5B`nOpk4J60%jJl%o}8~L zAbi1QpCxu7#W|&=*3pjB*xk_G`Tav)B0-fAV~t4ZyB=^Br21ij3@f2K6+teh+E6m{ zK7PuGj2RCqfx?pKsGT9ge>I(eX4S_rpy0Bf|8{(|NIWXHmrp(#i`(q+H3&f>7-x)Z z&A%)$R?=+4W0!xo-$rxyeg8NUdrxo=3O+%x{{c_4-@+{h1zCrtMlrN=rzE+>So%b} zaKe@~W>YJn+Nu6G;T9AvD`6*CIuMyYNmCld#OaNvS5LAr6Un{>U_1=J%B)dk12RpC z-3-I49~9becGZBx&7Rah1lzmLAUW|Xf;nXVx8J@dDCc=dofS6t&zr&VG*P| zuJitz6~}i6ml;RZrrO3zb{?;ty{tfqCDeUp~`ppDVE-IxI4kSJwUhsPcY{V)xll;&SPARNZ# zTGfu(00iUW?~9rb`s&x7Yw<4&MstSs=sXrii2t2my$woDrU2;BY06k?*E^__R?>+pV_7mt>6{k=np!)~1piBT36F^O^~m z^5eryTE+1yKQ5(eK95)cw1I`y@vDAy)s3m+2`7IQ86&LzDP|FWH0bM(fQpb^)8aUq zsItl-xIuCGC3u43s3UQc;z+y#Y%sqR zI5uy->85hi%{P_Xk^7aKt}LsN_tkjq-RjL(L6ge(SM^Ae5uZc7Tjh*leynqAqEn^S zVg1Vwak=DkCgZv7mfJ9Qx8td?JMACW(+XTiSFPH-Y_@7M$rB{)TAZ^FJd4RYpDjz{PPFW|Lf2Gto#69ruiN7w_o+!Wv^e{ z3-80{@#lCNFcW6fn1fA&cE`KGX|74!Tl4vJXPH2WnY<;g|}V7`{N;}2F5a9cxePjz+OPP<}MfUigINW_-$ zSxgZa{ct**(fk{n{(5IGdAddg<>UKv(5C`zgFQ zcUx|&z+$7n7=~N1C0Px&_0O>CIfgo2nOU1dKROYk~22$`v-IsL$VjX6f9cCp@4L(X-pKc@QmZ}`aq;&qp?mK zV3Sc3z5KX`bIcv3Dt699r~BhjsOBN?^WNe=R3aD#>^QYXroo3GovaMjq(+g)lVJTj zsE``ADzxL@*9AfSXFve?KYVq38Ci6Ez>Xk34>U}kaU)mqPcBPj++oh77Wy`i;;R+H z`V5i5y=OJ5 zW||z7#_%p`yMiXZbX!slYC~Yf<)iN(lqf$&qtX@GJXSnq9IcPvCe+45Ow1kSh=ZY= z^}{pp1jW0`nP;Ab8x;Nog|KYt)R%GfCLv{3oH3-^La@m-F?7+U!@)JlBh>;+7L9}n zuD~c}Eqc{U?T<3BCs7gVIMuTsDlA$e#}j@sXa-Wh+O5HOqma}ZSDra;(AFI@0H$Tj zvBw;9OgZJ0Q}EZa```uzHp#{firxWxj;(?v#Ey&8i7iO)#lbi?TV@fuv4ClZMX~2x zxMUKNxt4!wLMOkMsj9`*T77F7BX!`<3G|ZBI%h2&hy!N^Y6WIp{GNZ*riBq%1S#TDgs=!+*jo zHUBnNV$*z>DlM{T45YyukNTRYrTt?paaF8qljbHl&hs(j%bl2JtHLuP1-)?m)Tm4ZF z#?63RabA*<5#5ZDz%svq{I}fC**B}*e2ji_49%|vhB5nxK4Rc!;tIBy|B1?;2W*Uc++YMJo!d?fwAgNV7UesONS{Wko&6E`6610fnjgdYiS$4|^W@8ZicH{-tH z#v97D_=~-@*f(SCx^-peo$rnL+0DKXXPZC72dEEXe~^Fqr+>C5EjGswjN5Fx4RC90 zPmS&IqxyDuI%5qs1m6KqWNghZ*Z5Ms!%lcwW4mpz;WvKV(Og@IsH}~^H5#-&24e^t z!SuPX&n}r2R{T7{?!xn*z8QsWDJTDAb@M?Pi{eH<=GG-%yA5A4K+j>S-NO$%6t8%? ztQ>(G6i4C)#rF77qS>dl?8EJ5AD%8>Y3*7n|Fnc$)Yogdo@}Cx&lB>^y2dC&;51#(WTF;BBLIQ@=LnK_8Y(P8}`0dK4kQ^w;xxY@PsFn&2fV+)>dGf z|9<{!o@iFTCWa1~EaQS|{v(zdR>-OidlmvXyNR>J{2M&SVC$lkQ@(VH{aAJGx&P{^ z!p%sIWwVv5${HMR4}8D_$|LrA1opDor|kN$hq{@t;{p0eKu`Q54;EX_vzxdYPiS3q z(ZyxkZMH2Bde8&wR0!vGt21xLbQyO&o_@Ox=li$dbvdV*e}2^Q<6%>r4>rL~uFcDR z?sFe|qHe$a_A8Hl?Bi_n*Dn9&E<2dv=fnFW z7O>0;+6iB6#K0B~PCH)-6aML|%8Ud+=SucF2RRoe?8O0>e?|?R`2LY%pxw@oE4wD* zbplNHxpv*!@<)I0hvl2!{04ZN+T8B9-xIMOH=~oj)Uibrb^Hy8IL!U1NHc!!Hi;Qv z#X=6~aV;Ch0I71O`^Y*8yu|kpuG#nE2F2ODL2>LG${~jyhU*>{a7#)hv+d-*;%XGr zmKqwue)KzlYdN2Fo{t${wme21 z??*k(8O_%5$5_b1zxeT&7%Sw~JP|S4awmqV77X2uKkEn|;}^rdSTEyi7TrNUPQkGy z2@T|*f9!GT#h2m+#O39(D=#lsTybT&?DEU4fBEHj5(4Mdn{K%UH`-U02i)(z<+on- zs`98uKibV!AN*`~JwAu=W&x^bPZYVu3i8HX4Kl|?y_Eln$uMX(O&t0Ya0{sudB;PC zy*U4+$n&@+y2RyUt{zKSN2b8r@b?dzlf9}qXUjx67}6}Z&^-H*3zt+TntJ@2y4#3) z)S>Dk{}X{X!8PA78%AX9i=`L+M{~S zztICP=~l1|LC+K_BIlxd$*i#Bd1BXAj*TbVfnbtnDKCYij74h+F~GL<#4@?th`P(C z0h0g0pC_w=*gzioBi6+!oElnqh6uU1i{C#yh~#sUvp%+VEzSOkfu(N#88;>_et&aM z6Mx2VTOo^FRGlt~HhSX#06+jqL_t)himg2Fp&oUpn!bN<4~lo-35qk%I0Ji7@CL;j zs&O`he> zRqa*@84%^Wl$>g6&Y$#%uaR~B-5%=Mgixz`tOq@NEVOm>(MOlBeEHO}H(uxQvX{NI z-k_*nOwDz(4()l2E;D2D;hk1<>?~TS3HQI)`wp8(xfOhL~MY73J8jd z4Z9*J_Oj~gs=M~yd+!}}SL_YDBA}>PK%`5TF1@$^z9*TK^PJ}eT-?v^`#0D7o+LAq znY2@$Ofn-F;KPDFpE{JSFD5oAND?-@AlMo(&iTO0R*N#FNrmkaEqPKm7M8&xbCgp_ zlN+M(%Y-tf8;1=_BBI1|0&s?Pa;l# zu2?FOz$Ah#>FaKR4h)=czanfIRxN11Z7Vsa80_t0Whi%n;}W3SE(;+(!Ae223-VnP zlPbpjFa0D!)A`2&Dqp2(ujJ1ExDF^Nr6J$f&?it+x&K&zoHJ68<^2~|Sh-ke;gy_{ zKoauJ?SGXYw~A@}`*KXGRIQ13Elp6-tcjf9Yh~)4Vb` zjN_$Th_UGyLa2aw5y2{5Fb?HfASjmB0Dv&F9POQvmbbk^dcrLob4)$)=%c2$n0TrJ zC)qebA&TcfLpp}!{UO~5o2OwYjPEm}L`6oR^8IDrrZxE?bI4a}#s}jB#kA>A zP;@`u^gN*lOmbBRly^Dl;%Buj<+y0g954H95J~Rd2|XlG+b!gq`6%~wLAmh#W2H=* zJlXWd>8Dq)tCQP2Ss$TCCy-5<(%98svAn4a#Y7#Px;d#=FVk|j-AviCYu=Y7Dv(CArO(f9QCgLMV?D2yFW%*kQZJZY^;Fr>EI$bDDay9760s z0%v(EJzEFP|7BP}d-KgV&9l!wW7@WBYc9Fu5*G_DbASTFRCXhFUnpQc#3ASW2hgzl07PIKbQo6HjCC+`8(D ztIR0C4Ox0zGy|BR^e}}!g?Dso<1n>{maul=bwMBIRZ|y^-1WTm7#UY+Q!l(@=3>r zU}-VF1$iPj5EJ9>e;H&u(!4S_45ay4Vi9YVPcoHbi`c5U76?ipjvoM0Nx4YUA$$~O zB@0_RB16h$2$A*&URsmF=N~yZterfPXOiy%&SEC{|L^|(01G$&!h9ku$Y;{_KjqnG zNJVn)KTAilB!q$qSbHgMf5DiFC@W9cfP?KBOUYWh(F4ZPh1jJ47OKqrvy}k``1sq* za)A^k7?SsIR$AyA?Lk|}a$(vIA>=^5j=80Alpu)ZTd3nEY%DKd9oJ5a!c~~^V(9{J zmuEy=Zu?okRK6|cYAP9AO8Wi5A0|R>OWJFwh3!W`AzyJCOU)Ab>`&5#XSKe6*^iXt zYUX%FoJb(ZAV+FJlJ)xi!2<3nBkbj3n*mIreDWt0bkY}=o={($DOWcSX96MH{}$?` z2us@=0(mMHwe5bnygQYI{ z8jFnPX>OtHM3)~kU*H5qt3AxgI6<-9Hn3IBkk@{TDY%M!FqQ`mGR(_-kVHx@w8Se| zXe^;mDlXdxmdx@eO_XO=7EF}07eGu1EW#jYpRtU5-zkBBEiMs;!U`oR(+nw33-fd? zk_=V=_)*N@qJF>sP8Oyz>rAhAL#^XDSVRfZ2x&=gs{b=351GB zmSrG7Iy9muh&TifjrivGzonOJMC&Rp5!*zRKD>n13y8`i>AP2W0TA(&q$fzrm6T&S z28#4;$jWzpL0bN&brs2>D!_*hVy5IE4?q;q{ugdi8I+6bfjSf{ln8MV`4$|67s(UM zR>>pS^`E?}LP?*pw4qwITL47?DU&7eC0&B}q)k#(kOD4N-j(xcHy-Tt_#?ox_6e|d zWnB#lBH@XP-?hsEycu0N7fGINWRQpm1`M3O1a%CPHd-prPqQ(9$Ht=`g^8yZ#l+Lb zP*A`J#kMf<1gsaSlu|icL&4$Vs%TcMTwx|pnQT7)cv%c<-D zd^JvIEXNM&3Kc4ty7lY8439HO<0i`Z@#D?MpL_zRV8g|9$U^wppa~Ih!=iUt?5?h8 zs#UFO6&D!e8k=rvn(wre*=^TdZ68Q`3cG{1l5bO{{wcN2bN8%pv_mvIdw=x z#+fWF@|4BO9)c4T^g(d~oL`>MvpW_ttdnfFMidQDEIdCQiwrNABD5mjwVxy|Pl1}8 zgdLl`U+8W6^yvfBLKW~$ww>wFv4i+YnTJK2F~5v6-+w>YES$dxN{KS27Ea)tamE>@ z{oZ?+B!Qkhg2<1(SpZiD?E+C&UP*C*5G3XJpTa`Xm zc5m4li}X-tkr9ZK`=5mwya1b(Yc4H-j#heQE$@$rn@@^Jve7ydlP4eGWUi7u)rr%iswWm6K9M@&xmr z&$OjWmYK_8w)N9~pQ1jc@XX%TG~cng>teuF8?S;+r33wiwWbm>wkZuuOnTDhue z)3&WS>c}HarOJ4}X@H;=E0&v)m@ia1l*4_&?-;%lH!4@!l*g&JGD!1O1eFVXK5#*Z zH-FLo_l;2@1{-uHnq!VVMijf}!^G3!sD}@r*KZ2x!DKRcDxdJ|T|L-YMZbTv6btzZ zBb~{6o}`a6j?R-Kk=%ctR=>+2W^MD2GBP1z6EK1CJu{wL2|7CCi@ zc@{QM#QBTu|A@CVL@u_u5wlXzUTWJM150Q#5v3&U%QddFrFK_D{-*QQK>~2S0W4Ui zfgqx3A}G0ieb11u3l3%_DNx7HLbczrG_iDjWmweFx2|-9zyQ(>LpKiH(%mK9A)$l} z4Bg!&B`K03AxL*4QqnC*gTTNrbH{V;|2gM=xF6a5Tl4U)z1MozT43cPN*y@a+w}DY zdH{g`@-3&9&eljHLAj%B0Nv$pGh>HXk;thm{UE8SL|l z_OsFAr8;w6b0fED_o8yH9s?=%XA~cC1+WO}cg+s)Gj$gTN>?uU-R^;lLLCZ|0w%Vf zJ{wp60Y~qg%R)sV#`T6(lDe0xi#l<^2S~l5!LQ2->=6la8!-?SbT+mrOtb0saKaz9 zbKA-Jh*eE$*QX>t631R(65{+$1w*fnSj;VypM6s)4@@5C^Ij+&W0rGM?~_H6VKVC? zl?h|y{wP*vSHK+YervK3s&00Fwjq@K6Uo(nf~3`J5^D_UQYymgbEBF_3JO+k+Sw*p zXxo)4`XX=n-!?JaRzX{Cyk*JyzziW%nI$nLygVi?ztD-}`mJf56==vXUBA1^J zjAGj*KITuCLS9@&99@ss-p?76$Dd?O5lWnF`FUePyFU@K;HUB~B21y2>*R-7jH@Ad z`y#U#`JwO76E9$2=_C;A%}Z7d4SAOOhbE4Ok9(id+P}Q zzRUVOaVELCDq4ZG#6Y6X<9CU{2Ai5CBwO80`hH6RsGeQ$$)vi@tb$X^#vrZTHH))S z5MFD0W+3IZ}W{y{Z?)&tlqCr0#cfi zPx0$HZ_}qNFBS(gsMR^NU8BDuA)DLBeZqbHtEHTDhl5k@M<(39t9)S>Ch~>%1vyq{ zXc@1m3xdVZ?53k!Z~|lnY6U#;D0spZZaEI(Mv8@H2%)+FiX>_g(WTe^b=-q*+~2OcU6&h)wJcf$zPc`kyGu^+TTEo{pbzaE=W5F!3>Mk6!UG#TmFakK zIeDk+dbM^{Gpcg^H^38JPiLJz?-SHUAJ2QVCua$$K%*Macg;7ia(FE#@mqFxiDX38 zw@LRw!0Xn~F&WJ1JS2-8L3*&+(4&>*U2DHeo`~*5YD3IF(+r28~@!i(Qd9os{{M$Mb zMubGvBo{&2tt1!7nQh@LmW@@Y_*Qwac(&9G^T1+NU=t< z3;eNG;0|+%W-$52$ka4e?Q5d25$XL*<*)~XD?%QMB2Psi*PZ<{xiJvyuL{qhZLeYX zkBMAzm!6>}?$x2zy-{{5IVc8I%QIQ&2*-i*0Sg%4Fk9@fxrk`NVy}eKxCzdZWl$aE z(}amWN&%2_m)4kUCOHZEJrwcDuy&L!?)at?Bg5Z7+F&3qHvg+zCn=zRJVaHlWIZYw z%47M)%^|9T>fv0*98Fzi7g_bz-n7Hbz~~_+vtkn6{RC-1PP#Y^>gzbn_f($6it>K;gOL1bfN&g+zgZDq8EC=&2<-hPRX5GQFUu_IXQVX);BL)l%hMQNYa0f z6M^G67U7v@`&@kV5vFJNyVTADFu2$0oMT{Lea@^)&)r>&gX!m|v%4N)8u|6}6qPQ` zgH|iu?OGj1&u|9yBmCTLm8&M#KX#>EX1Fh%GJz!?xQagz-67*jBQs2mtV(SG}#)Z5H?~_&I8| z*DOry0Da?mDTkibzV!%m#9JD$qUfJ{*kyP*|1katweFgTZpHg)(KNhi3`g8=?(*aK z@_tM>n(``4>dFsqpW|?lG)u}l$`SU(_N73PrkNfZ8As`tK`pKxT2c$*W37s}`I#xi z`F@{8Y9nf(=(J;P_pw3p)fJXc7{-yI<$vOtXd}*b%yrnzi0K4C=h?0J30AvTdfI6X zI@H%{7PqU5VG8eXQcLzl=!Yl`uGYxqO0sTa5&>hG`pF$z>9~#720%44&}ANcra56^ z(1rXjO6*PFp@1qA#|*wg7+4zr*^QU_u$!KA=#rS>EW-YGPdtx*2U}#=llT~xZc}$- zS6_;UU36dDypOgscT|rVZZS{MGn?16=~t8dFybqo!*I7SFzOC`qWt zL1J3cYAZ^NR%`5$sALxLL#MhQUlkyAwt2E*k?)z9?L-^*WK<>~{oBFV%q>I@85W4C zn?8%pUg4E?xg{%Pze>~<3ZRRNPj5YW|2jC3?d_g{mgI<}4JQM=1vXEli8M|jWweU) zA>EVhEakNt_nOaTenLvisRjlH1uZfikv7G0|E)NA=V%WltnU7YNis$p>-}M}GTb*T zd^~mE#A0{H*A(=&0;kr@lu}f1IhX;OL=0FE;|v9PL7e#zHFur`O)8`C4gS7J?|N z%3FVpKNy#^la`je5PM(F9k~%j^qv#g?%aZ_g)BmIuRnkH{m!1T2$y%7k$3S5Ky*0e zSt50ls<35jqkg2pTbWEJgqsYZ7v3#lF^$CFPKFpJic#p9_q+cv4;Y@Ufl24G-+btc zTBNWFK*2jcj}5Owx*3`ZZn5uomMwls;Ye6mg+LvyFRO~J)7mS=(67F|`@;sBo@kq0 zY}*JNnamTR9`C5Qr)*dATku3J<|#~3rhS)L)2T#DnR7Qwj+7Eq(|o^0YJ;%TUu0H! zX3|usbTnW_^5#Ne=SvkKSbU9OUO0(Ll%$!F|KsgmMv9${8|nijtk9E_z-iT>=x3er zCQa~7+6Cbvt63E+p7&%!(nzD_kha7L`;lADhqG`F1X4gGaV7d(EM0%4-*^3k;nL@0(2}2zIC~t;M-L~a)a#uz=!;1{K!#^qzg^-*_FU0hXdLwJTe zFs&mpZJr0nSmCpOMMrzF|E!Q97^O<8cDrS*uW}`MYI0cTt(t?R!i2*9Ar2=e2KLkN zL|r6TZl+VTjlw9>z4D$XRa+MJlrWC1DX5d}A@>gDHTwSj$;y|@`XZkAjzHhTI3`LO!y@e};7lE+v(xbW6qi8N{-@3D7|A)OQY9;5OPyHF;nX?xAU51^O*@YX&EicUss zfX?l{_+D1jQ1u)6quKJO4 z+=8;+T5lu!byaQlya}$rKuHGq3|NBoGFcOwhq6JbH>|hnih~r1nVb;I5gD8CtCcSP z{fg&lvbi>1SApsG^_^#;1_mcfIMTrSlJI_GEtQNbRMVb>>>LoY)t^7!HO`4RZT@bF6H}hkD@!A z#uP;b;sXF6xv-QQsf&oCOnn16;){EX2qG#Y7G{}(nWMp8*6XtIMak!{UZ7SS|HfS1 z#gHDsJ$_E+WC~kNXC0g2pOYsc9qAr86PC7EkNL2lH$FbW!L?Yvr9>+W2Le+bUX1xpJauS!Of5!7k}|IvNcjnS=W~GfqO^TnK^X)6jCuUEdR!QDqly? zVbvfSMDg4CYESt_SCIc*Z>Vo)6dJ>4S$PuOH0WMYfnj?s_Wx5+mO_%eH2cvwvU z@73d+fe$y9=gN_sME2==b`_6>qNDogAU$3QzsTN8d9a*ut zrvT7c^!MYnt;f>H%cbbhFwJ3O)KeIq45|eRPgPUxv-74y@9-V6+<=|>A5TP}UJG=I zRksVe#KUt}AjGq0m+%_-kRR*%zk z50!^4qrk!Ehf}GH3if@O5{E8op2Xl%@ZI`dnd}O{udkD4cBpWgc56o0YBm@wP<;%sJQ_t1E}!%16mMSLy~3d2%Rst)=;5!w$1i^!JBT8wskP6 zpRRV6(gCtmhz0;7V(Vi>l?JD6Qzyl7D}AQy!nQU1dpACl2FT`+Up&qMWGr|@g zi>t<+kB{=*>P74zlo5d@3x!y#dwZHnXPbQ8=vDgN#12+DPv5Mo5PqO_fkph^OZ0H7 zgFL^}TVeJc`vG!ucTY@P$&G+r%}GS5ZFAl0r}wr-pR~Sz%l(KoruF!zzGnkJ>WAG& z!3rg4v*x_Vr2i(7XmQ9*9PkB5MoZ>N_-LYHq7<2P#RohOVYBmJ%ySyRfn6@9eY+tC zr0YnBpzo8BQ3|X7EWb25NK>ZIK3o-9vaoyj`-pm>=h*Q5S6<(8npK-8E%yr}eq-yo zb~?XvnJ4^7-S1Cq;C77xm(&NZ#qNZa79-8VaZ0D|$IBowOWCOVIc0h@RwW7z#Db66 zQIj%V+B}nbj`jUKFf&4&IlNqUlckREY>l-^)z|;r_7o3QiE#*N)>NOSL6yc=a1?Wsp>yr2Yd6{A|kWdYoTJPV2b1G zSOKf9E0}+e#m7TG-UjSy_Y-}VU_Vu-Z!vjfDWm|jZsEH%2;XE%l7R7R9C|=?8|;=0 zdPb=R5A$Xru;w=lZ3=o?SW;45C{yxHRG1Tu`t zqFgjMAK@IS?kHiimkB70JRElle45=(jzV0O1SWWVh|3sywaAErKI&p?U zq%(6h?iYY3qU-@imMNS}@sciGZA3w}x;@mIQbIZV75RsLi)7#Uc$sc}QBLz;h5l!O z2P*d^&mB3R_*+WacWqfwFTJL-a_=7NM z(bg}cCSp$rL_E$j&5MJUzm2!S0+LXNeo>QpSED`liO!Iuvb=PLN|v|EJUrQqsdFm6 zn#-l2mT=g%V=>tVRf9{?n-W~&%fDhKuE=#K*uKAF6=hz`fYCvX3K4-5{-dtWN*`D` zjH&th3>n5p#%!-X;HUqxNB7!CV>v^y>eW>W1gJdZHdC%irEhUp4!Us`KfQqtofFwS z$F2~FE3`ee!Wt{$30QW*##OqD3F3aWn>ldfKicN=?l#S-(1^0zPdqeU#kLb3F^1!_ zt@pMp{)Lt=eR!lKl>buLC$g~Q1^ilNMcAKG<72d7GC}v>eSeDm2!9yib$C1rYC|Ge z$$l5Vn1)n4SspXWWy#E;{CB8#Dv+rzy7~)XPZ{m)m?A#9>@yArUrKf{x}7D zQnUq?!xh$@%%A5I{4A`3c{}&XZ6VvhHXRSCY&{HseEd!fSYm_97g!6**to3O0~HxYqnNmYjD$*K8E~~A2;WCqM@+KBpwG|7@?x1wR{FSXM%iyt1R_0hW z0`FJS{zeVuWvB^5YYXP}U{h0JX1?tRHlNbq#S_zmAZ{JL$iSK*18{FSJ{i4FzCtGL z?b{JGpiIDcgoN-B_xsJK+aayv4$r(gG6UvfNnOv3n~qDRI8df(ihz--*xBBw=K0UU1+4o==%XzFt@*dfJBip~xTt zW{;|x>Vh}lRwnbs=6?>9UE66(;zqsFOk4S)OeAnPFl0j)cTj^Z&HSeGRPy9wd4-{)_0kLXv!uO*`M+v*(&HH=2qKZ~yQE zq=-AUCnbYeTZ({G+r)VThb`p&ho9;+#t97m+g%kMbZ7Ap#K4^SfWZ?fP5fRlrn(Xkpo2-xnTuSfFJxSVRpqH7vD&pJk6i6ONZ4zqKh;2$C9x6L|Z`h*6p zIyJHxRHqUr9=x#Cs7i~x=CbG8{cw_83D*_}WK8k~K5o8JDs;_vW*0CPC>3nvmoEu(N)3Do!7oa{jORgp4|BWe!PaaVxuK*S)B|IuX=N7lu^E&^()SEn=*fes`JyG~o_=ek zd4+JQ=s-1AQnDrqJG!^WYdE+oLm;v1NlH@E6=o&Z)`_`=8(0+wGK*}Jz(zc&Xpp@2(r}cE9_~X&}JSccGO8!l< z3xQbGZYDvN(4_)Q1`bcd>b5ASGKHdH^$(IFi=$5?nfTh4mx&wB%&p=C$qJ}@;V+Bn z5&f#4E_a7*&fWtGp3ZG_nbI2aKt^U{HGy$28)1{jPDcPkOz#Z>xPn~d7xd5IFgglR zMUuJLT@rL!hKsmSE<9W}%kl~rHORlP5AnjjJawbxx8BLuDb{wVF)=mXx=Cp5V~2n=v6pcEpJ&q=RUkNZR+a z!Y#Cz2lF1640}mH%_j1M`W54g>Z$|X%(Rg2+PIVwmxQH-B1Z0Jy#3=kiqLRQvBGrc z6H2#!m!EYdJ?O!SVmfZe)Kj{oIMT#yiX9*+D^Ky)(HZFma3GiN7b>oqcgGOrlL@tcq+pwzKI8oR zapHMYrO*BIbzlBG@f0$H??4Z}noY?}F_J%>beTt=VM*997E zw*5IWDJ?Wg%Ocs_UA^XLi!ILYPTdYB9|;cFt4n%u;*mEZm-T<&iq)q=JQ_Mg!2%7@ zD6&yx-7nL~EQ}qxM%sYs5kR*)B1Om;M-Y*|N z_-{B;*(gg=jBN%TN(uS*fqikwuT%QMc{HgsW9%-hsi4uhd%US(epRhI0^gT(IrG3k_O$jbPM*zNmvBQAi8qVK3R5F ziXAv}Yki#7Z&H8{zGg2F<9|{(tRa%RN?oCG3@Rz@2YnVGl6iPezWbIrcl-5S0RgTwRzEvo9QQ^XepO~hbc*mW^N5hBA-m3TWO-wqmk zMeFiD2`@&N>?_ql3uktNbH3;IulKWCurk#}Z<9oa9;vI@0c1kKsv2aiO~HHQ?~Ds? zE)jHb>Yd2sU>@&=niLj>V!RTUD#pW@b}aB<94{K^r1drwL}#sq zf2rRHM$TVlPZ6&^R)fijRk_Qe@kn@D0r`y=9v0u1A_E_ljQpn2n|p$NFJ>~kN)um> z>)4kbAk@bIv#UyY>TfAm}VW4Sl>= z$cgtX4QhK9S0(jZ$A7k@VKVqmiXm9;TKhw3mn)X3#R(tUVOLKnzBYlbZj0*Iq|%@H z;TSiMgFRSN7?<5!N7UbIYC^xGI-)+_*Z|Q^MwHg<-O*3o+G9mU`BOKvi)YsirG8#YZYQ9q(SHat!v4(h z#hzT8_`o2kMnw{((4s}~-z<>WB3av~KlF7u*-I}fa=2eJu|;fzQNDbLJnlLValFf} zYm6(4&nDsUi5nmC;9=2d1iIoK`+is{)Q*;o0k{>#DoA4}E(28sG4b>w6NF5ETtYSX z2f{J%Qq2iSy%&aH=G?{CJytz4m#|YJ2j)UmJ9rM4F=l0!vn|d+X30gFpsn4cZ;?)U zTnA6dT9fo^;LyS_Pu^&VJ1tdNu-DsBEU1~wGZBs^N2c~eQWc)jIalH&yx%%71V=O9B8Ra=OSjV&I`T`-&`ldzXlEW2D5d&`8a$?sLk&!A`Th)Hd@4mKVh2 zfjRmm!$nPO+{k7wqm^ZFWKz0X0a6EqQ7Pl|jD~opDd$4QiJSP8O)}%s)jou$?uLFz zd}{oHnz=mk4|_ooex^fA(|H>4Uk7=sWRs-qG)*GQMA(de2v`TN&d@DfQPrBzuf~-- z596PR;lz(Rk^~Ue;2Md~n4q*K8tIh&+ZLb#WH8XGFPFa0()zy|DT$1FLOTRm?#Xe_ zO|D{oS!gj*2`HY;$ZyeapwF3^l+ID5p>-r|By1O^w3`A>-N(-G`KL7|0DC^TlYy^S ze6nT7gpG>4B?7~D>Lt#H?>4=}hNegXAw^}fW{dwD=ace=?MMf*L<>uFc&WIe65e?V zx~axLf8WIg)Xf&%3s%DjkhPH3G*91&G)?rA53S0J2IyzrU&4vyjH6g&Y_m91WR z9|d6CaFMwUQ)cp;3fUZo(%oX^Z5NSJZw6i!)AZcuBo_cZ2c^WOs4xG$f`uYyUv@dr zW_r}YMPZ{VH5ID{3t-TT#;xkzFRy_+cgc{so$>-QW6V;1^%X1b>V>8=_x=@MHcR)! zyu1r<+Rh71+~OaR!_YDeSJik@a$DrdoZ`cjX%VBB+UF@HZ!bmWB8OYeI*irz3Uvfh z`Vz>_rNFZcHMuo4|PxS4-2*E!_Z*HB6HOfZu9?| zZz1Wj!?M=-eX8f7D_oz*KqrzEkgd4MGz{Kop!fS|#UU|AE#m6#+4p^;T~{GDBs{LF zZF8t>K~fB(_rnFUC%)T+n=LIG|A|-{=xFWzCJkisIyCApqYOUTJgvy5C1L*!?*f;_ zt`6Z&EOOh9*h$NFe`FW|7wCQE=_A%z@N8n(5tDcYGD+MQZ(M&Ona;rXNR$8TaIqTp z!ODP`WLkUOe74Mc$(`w2a*5MKjD4*Bl8@fn(tpbX=_HO1?F67Yxf;jsNHoOcVM{&i%|H|sF@|^%+;_=jy88-L zEyZU1vGiXhNXYU_D^$ZYimv=|KXdNDdF&kSdW7^jgz^!Pn*NSf^0R*oesr%1I z<6iW5w5<@vC;#y(bIAquNdpUO}Eq0Qy+ zYyx}uY{#9Uu@&n<&+q4t;}Z>n9=GmpJ=+#ol2_QGn6PUl-RKjdp&b_;rlyT0OUMUq zLSKqom#PnJRjtlJ5RXmB_2E=Z_>QfZT%TAFvQp5_pspJbb)ulw8Z!CQ|8EHq? z99~z)z|fGiObU^{ki9QiR(U^aJe2+oHP*2=kxX+wIO1^b;DWT72mqGY?odKtiGqIN zV>wE_BBcuY)gQ?^MiLEHaas9qV4!l-9qe*Cv>}|$nUV)G{2mP6?uXu6*^^IMp@Zh} z_B*=4P~6-Bw@}ikQ-Z1973;hoCmYz3L?fE_Lk7_+U*fZDLTZ>a*{-0^^y5LNhB)H` zYNH9J?8f5Mrk-MCn&m?k|AAWGBGrG$9Cgp_@ef3Vr$_g$?@au|ch;__qnZJ1MJ(mM z871=M|BW#gg=1#AXRh9AX(cTBv19V`ul2vdGWDUTNM)4GEqmC=!1I7X?+30Ll-lcX zmbXhM!^vhzX?($^PJJa^fs-WEOg5R@mw(Jb6SR|=?Lz(E(B5wHWt=gYryiul$cAUL zk&I;p7)!NRwd>>cK*B6$Vx_Q=<9{gz6IEZ^|46+{+eLmiaqWfsyZ{R;FA_bGBid0S za4IvcM4cw?(@o#vTx(A_(@Ys+xLEXxJmXfCW-P@&`jA#Z5cm(ovo+7MSUz_EM}2{~ zrpVbcGzIl%-G8)%eC}1;wT|3*AF;qXzX^2rVyby)1MF*cke@3|-yH!f;TRVf!oii! z-z@>)I(nonj&uH!{}`qdIV>=fdTXM3Z^5AAs&+xoS1dEBD95PPtR0QkcFK_R&d$UN zx-#@)LaCv92vT59X6D)2K4SZgkpY&h2@_|a-&^~Xx>h+6+4lVFs9 z<9!1R@#n6G*6BMOA`bs`f>5}l*VdVBayMK)>>*<-YK;nK@~E6S+F~|(VHb_qKp?&& z1SsG{V8n%bir=xZby#+#nU>71@CwGX;O@&3L<_+4+t(PS$=^V|I~>L zI2z)=EMFM-P&0MkwrzB`T8i7_WQXm$bY-yf7r+xz4LiBhK>^5m1@VJF*ZbZO* zU6%*uCSCNrPr7Ia;m&-85^xtY7Zmbl*PqqAGl}Fpe#EL$)|hU4NSYhcE24q&wZ9&g zVuKkQ;>o6!&(KYUJeP)ozT9n|jlDn;I~v{ldmHYvY|)^dxBX-~;v6u=(<*4X<(7PbAR>Nh`e1|Lv$H*;K_{zKOMZBPrL1(ec^@B!hxzc{j~Z z{P)1~amjio2CrqrdK(dtd-~w=Tc_|B$ncXQhSDyZ=85XFkjfu5FYa19mDjvMS#vh$ z-?y1dWeV+)0U0lxc4{a90FSMR{yEje^xdt5tykQO5F+J~tZn_R8=>iN+dZHF?O&wd zC(bIFQphns|M%uG|KoRKU!}%5tBoV3Oa2FYZlHr$5a`S z)(oMaKXT1}6BQBe%rIV+@7m|be==_%6lL}xikaiFmn&*{s}Ull;T{aT9QmOk?kIE9 zsL(h;g^RlcpJ*67$9=n>_Mcn;QQ9^}*zD+9VOKk&9~*(mqMXPl5}|;$puYu-)^VU+b`6c>rEHClT@;Z@t5H{|z!y;RVqR#YvMa9yB3C zetS)c-r5Rxj$HLm9^dE!7_^?hO($hU+Otaw^-9s8o7SSq840Hc9L3+NXEuV)aIQtL z6c|l40me5XmzT=)6FA8=`s}-a2|b{iT3m7Zl$THW3ZX>qlenF-^jTeT48h1zanG*( zbVDU3cykTBtLb=X>Gx{tum22{fbm~pjmSB7t;bv1NwLF1o;!Z+%A3a>yveM1ehfcC zjd0p1pOc@U$Xjz~4&dIw&J6&pgAX?vTnnKH);VFs?XI=KR0RNZ%s${{*u>C)Q$Z$9 zE0?*&DKeKJn&mq(2=#g={Tspo4$3s3t%1?lRRtPCVFnQf`rAj!X~*wp92dt91h#+5 zV}?+k5#;+kTzn+*`sKnsnJV(qApGh^y=lw5tMpf~V5AArXkGY-I8<_>4g2m~59G^d z)h#)k+?~0W6b&HDQ-Dy}ZiHOZUY-tiZ}}!xsr{yC!iTG8@v^E(arSZAcrMiFFP(&n zxqbK+<9hL=ybpHCY+Jbbilf*K9~5Z+ws!+DLss1Te?Y_p0v*b!FPk&fhLR2sU||&6 z>mMmuDZEXu6Z!i0XW=k;{`dDt4B}P`*gPl&5~#Q7VDq$`3J(&2`n zkb=PPW@KI-+Ec+diy+J;C0plt#5y`BokReDu|>QQBJh_LE_u=Y*FT(OGB19-m5!1Z zna*T%3$fVtNbM4qtG_e9q)>{cXx_`j%&Kh~`1aYKx0xIY>${{(?wHrA$ql%Y@Q7$h zj5J8~P-HMVK^Qz5N=u)eoE0Fv5Vc?r8QZyJSE5oO#4OU5X*F?=7&++CDnlII?zWY; zeFwh#5r6cIY#0i_T>$YI?>u4Y-8suL^?)aNisehGMp$QGAp_X5FPDx>zd^o1&$-I8 zG=q6(lLsNn!@1Q=h=mf@EIDVQ1BPhp%~E zRq&hzX2t4sP_d5^aM6FpqtD3dE-*vA*b8B(4B( z;|sQa`Y&15c=7W)pkYMi2ADaV^(TMK<}n}GKBqX4%e|tMfm49OHu?&ql$I9jwhQ{B z#Jt=mGf=dx(^`c6zW9e0hpY2_*Ek;3rxT`a7Shs@yq|pn5*HPx^!raEd!B9L<7u>c z#>mN`IQ@8coEx*nwS`PFPL`oO;(tyUK*a60w3X!eDuvdr9SD=#yc=?P&>f-SQDZN9 z0M#7gIFh|?bx_agD?41U{iWPJ0@Uj!z6_2g3TM-#va1TAy&h98&aG0C6CjVzIq>4& zz=Pxbu)KQfCFx}ixyn)!*Y zSiEHV_omefFU08aERjr;mV0~Xp^TL%k)&78J<0Di4j}Ni)lOh;{m#*IPC>IaPoCuB zTEmEN)P&5NB>|7)29r_YWgI^u_u+G1$86pRqIhi^wuBy;+wxC}WLv|0j^xES*RFTKB$AuzWOhC`zJNvC zzq|ZmmipSiuMq09S>*jta{o`o}^_-|PO<)LQbmD@d1?f%U5ph`Ne zsKRFms`YR0Nb}&q&4gEDTs_ga3%Qw$u%g4wUOue+pb}?lF5LK9fjh!f4CrU{{4bBh z@p46U=Ivf#pbwkAe9|NBy>F1*b%*eKnT3#iW{WQ8(!3Oe5+1pnO9JF)PE`pxPk&r2 zG{I|zFEqSVvgoVCM7!$~WkWP-gmh5#@~23B4Fmr9BQZ?@!(mmdeA}}AS`wT4>Dtjp zfEr$O_$^*0_RFHKz6$LYyUY^)=_;M-VV4e)eFf1WM9Rb=v!c{p<$|`)BC>7>KnJ%N zE90GH4M18&RQB;HQe*HzgBYjxQSPQC-{c%VL&0?N54=oRR|CH9R}E!Y5aX1rgr26b zI-)x^C+i^r5TE}m;=`tlJy}yydfM|SSDI22*k+ctU-CO*9ESUdhvMdsp_R*j)3Fb@ zslnt6F+WNC73@W?=+B5KZ7pQr-Nydl?2RrC1dtfHQf(Vp?R#x7mlzb7n|y z{WpEtMW+*^iQglkizgk5Mosvum}i%{V{dZFT75C{VEQqB~)Y{`}<#`~{MlINK>@P#Y+ zb0O*N6@WUhVtiqFo(tuft{@RD@n}v$6$eMDq+$JVcOnT$maSlMj5)af3rDt&8uGMN z#;8Opq&UBvz)Y;uV;ujcW^jgKMWXBCgQrw4TBJ*JTZ$!x@$>$50Gh9WY&&U$4oiB# zosvv5BRlOfbh=cwSjSXpH_;+zZAemJFr6abqG zneY(4ELVo>zczbaBE3t>;jN{`@ox=-e`~Nf7hmEqYWYI<|22Io?`Ke(MGkUv_?U^* zDPPIoqMN;LMm7{Bu{FP#s1G?@^;_b&ZM`(t8ANE-fR+0W@xVtU2vLw{&xG#O)(Hjbo|t|nM?{eN4i`mO+UO9 z)Y6fayS$Dqc1Qzb(-6%s%1p7$=?ZDu5)8RSOq9rOmGMqlA^|bU@owFpBJV9oGpNpj zzY~wGsQm(tcQUvcFu}7WrlGN07<2~Z*Z!$Gd2=BKqLXMZ4kAphwSr8zFCPy!Hp0rTtob=Zd=dUl`SyxCE57AwuEJxof8vo;v&{7(?^Oj5BbIs1amn zz6QmOGT+Y+>`X>>;Se+#n!EMKXCcn`1O47SEq{S6bvb#$fh1byh3Hj{zD=@LR9dZ8 z(IjO>-0$@^EPxcwJ>Mk(Kr(OKaAAwt;BgM1-!B(-(2F))+#f>RgSNYOn@`W^v4b^w z;Aqxr)%akZlV|hBch*gf;h+jQidyVKt#O_@za!QOgn_)8t3d1vM98?`eaaZr*Pk;e z1`%7LiLkuc-|E@1y7?DkmOwE#sLy^M)=rp^4e}N^qwtmT+dzQqV{6RnJ{3wXQ|u01 zte&h?vi}l&H*D~hMvG=GBxmKLCfEHp6AvV-IXr|#)?ZZ1k7o0u1VA1XoJzU_RVLMb zxe3m=_haBVMEkJ+1CWNv*=-eth!IQeA##7ZldP#)A@5eR->~DB4)6F7Ll-it(R=?U zVSf3r@S0-2-CJRv+OhNf0K4A)c$U|XJ(GX*`SF3A9)eeLrM*K(hR({nuL!0*JI^td zjI0EFUsdhWO87({AOt9!@E>#E(O4p0+62H|XtGjwZ!XvhZ1 zT0eK^jbus<;rbmBHU0Hv*v>_!zBVcQ&WV!;!XamX4}Fh&2q%NI)Ix%)DsWsj&ud*| z$)@`z0=4uLNMB}WlzAod=JEtrMuc^j5^cATu(yB7pD|Bjf$_wGThs z8RvQ(<6=C|G_Ia?&Q*PnTG=WM|4lkyGM8;To*ZJ0>O@?8AeD}T+4^~}MCqoZs)?@x zoi0=nJs~}st>|||c$HQCHxgXjNvWb2v%U0BH&24kkjz>BKqH))uH+B50-dMLc;qP) zESpwE4cb5oYAzGIu!ltL@~B)iSJtyrR^hMcL$l)xTpH^<$MT|DLM5FoPYL`oM=1{6 zlcRHdlT?xt#-%iOidWDnNNbW-<;z-2yzwdW3&x0f(2spBY3HM*NC(hHpq$$$ACcP@ zwJj8vaOY?fcR^Wl%bQOp|6nzgBtSM_r)4ldj$K9Vb?Q75r?H)O^*>-FX_>i?e4Y?j zwY$YaeI9c?9TSeqp5`8dVJCwe!^i5d-6$yNCUz>H2SaoTJCyvU)XNp9^nOCHyi3iBDsTGUP zDg?_<;PoO^%=}Q?i&~feKj@dop`Qv#r;!})Egs$}(=DMNApc?(z(~_WB>^LBW{5)2 zfX^?@O#W-nSltjk4#TsD8-xnjeRRuNg-c}tw5-kW`aVyvUZewfqZ0m7=1<0&ef|igz4B1KV^U({Liji*Ne^JX zb6X~orj$5sUG*zypRn*)WZT@v`^M7C8l`R&Vlk)}BRI&)Poq1uC}7M|kpPW+TapIg zb<1gJ>)N*!14;LMM7(>b>jblNx$GAkhFC0t>X@Hfr}sOYKGBb-CQOMfLmoG~F&UAg{@Pcx1rp*l#Qr z9boL5`R+yM1V|6%Ppem-4d2zn}{+_@WN@i@k^icF8SjnC_kXEtej| zeoPj8E82xEigZds{|^A!KqkM^i!aJT=~i29C8u*cw(n?$!y(;8f4j)cgrb29#HXKr zx|~p;-Sbh{<$mvd_uy2(3|VMD;)rghM~|MSZk;W}+|ScECGpF+ai(G8MyA`LhncoG z*}&c7AARzXd4s1ba6*K>0gggFdcisNZ}4Am1I+Mn0}coMtTWCKXVx_^*1thv(D(fh z;5_*^F;UdMV+T12(D%J}VdCjoERv2jTW{S&P7pM2-VDl)uqhkm;b};55-ai zoOTf*nEziw!9sr^qehPw1ygtI%-;u##8qI&{#Tqlpjon4UwailR9BllF+N<_uZp_& zA20xm^w+{^{c@b9xY>|?|y10AL}Y0|{(h0_%)F&{tg_l2AspwA5YFWF-A%`ohAHog+d zvgdHR0gP=Jb#uKgd1OveoWbAl?Oatt>yX=`Z9NDE|h2ISBCP=5IKm zuo;|fzxMhY=8;Dq7PC6LP;qhL-%Nuo8<a#vCpA`NL&N276^HsndEt|7>Z` z$36eZHP42$ILXGC(@D0Nc*2PgEO&r*4W2hEWNc}IZRF^YrYW8&ojP@f@~bh~=LE`T zux*i=IQgM;v<@d;+Fk!qo{@CMi z+KeYC{t6RMIEm!umDV#q{(bV}yTfvDd`fD6ZUst^pTjGW<@hDAoHDs$J&>gt+^8b^ zd-~tciK6mtEv>Bh8sL)po$|Sjk$$VZ_AwSwVtT{KUwiwDRU%J*m|c3`YiUaz$`GEw ziuS*9&{%nB?D!!2$Be(q&Bp&52VM7?jUH{~Y#!Tfx!Vso3=dXR4maCyL6KPbWtf zjO? zDAT{P?D-l6_(kFx@&M7N_IYgWe4U0fS@pH=~5Ej+;;v3(R5%Gbw?cB@dNWGUNZ)&P}5Ye9-YtnYygu~JaLy2wxYW~xsziph^@Y#~5+qpUTAl60=z8_LB#eO{KG z^tCpvX&Pa%oX&(7VL^H-c6iU6JqJ6L*2v=4&MjKtbi(Oo>#dslowj$t2gN&B#O9*< zNl;K6iyffTup9k}C!fJi?$5CMxRPnnyoH!?Vm}NU_LKDK&%=I}(*)c(-?3u{oT7ko z1g3mOjvgfoy?4Qs5f{*$sRLy3>K@a z!0Bv#m_n*!3AlaM7jB$~s1!-D5xIOU#Z<}H{ji|SnuIM6hQe+K$Nc;u0X%w*JQbND@IiyiVi@4AcG ze6!l7&&#jiq)cBZ7|NPvJ2o>NJ9H2;SN;0+Hx(+BgTe>nf_5I-?O@Zs{XXIx_tsl( zmJ<}5&u5)^I?R6cD0+e-Ov^Q50_+x?iunMF8{P_H;t2|o@>mRKo->q=dTGY!9h`8W z&k8zHZ;cZ*n^doc(*|Q@+}TG>phP+bi+Tqi)D@*lnJH7Jn0xNJ8;YwhFsDnI*6^dy zs%0xPd)938#v8B08TM~xi@NooJh;HL#GIpo<7p_&-gxUR;O3g0TI^(6;*@vQ%9TZ7 z)4xCF0qV5>0SCa5c`w-VN4*|@3O*O!6SGVQ9?(_#qC9+^a3Ps8M*KY7RL8w^`4yL$ zU9ju^J)EkbDK(l@+YKgmTEY(>6@064l4Izwq2>!H*%mHZV)p5{50roB$l~%ym<8Y| zilI>Ul`mfoCtmh7JMPd7PPUgq0r3TP_WvM0Ea(s6j5E(R_3CaR{#34|f&$8dDp;gG z>+Cb(XxE;gi2GkUnE?MNSR-cKF248@^X=eo1$@UH^g+=^o(tDse;rP{e2fzm3(V|vG2e;@CzKI`JhFU61Qh3rJ^zF-x!6*1*#zw~ zYhX6*OY0XF7BrM0b|IMQp<4yXE|WYm#vUf5X+zhqXHKT z2XgYM>c&;gW^l}X%#laQobydVf~OvqEnSAwGj#k8vt_nJW0Fids;_j0^Ayjd$n0%p$36olTVL6GY zkWbpg1o#f(`*S0_qt5>ePEZ_qINFa9MQ0Rze-&`{%nC3mAfoe0pAgbGSFHUhPv+uM z78^stZ=eY3lRhB@ND5TWxRc7S`w6ugcJ1>F2c(@_)bxUiM`70$d(9s%b7G*pkM-;2njJLPAlHWZsDqsrVzq17V7W%oc_0* zQ=ue}m0sgQL59|?nWG_pLi9J6@lg8-X~+mAb^aDqi8 z;?f!3!YnI4$V#bWxP+rHlI?)lb@A$ggDHh4eXMJt$BUFaXKAn<-l6#{g+UaAPE*5Xe5>NKgr|v$LWNX%P7ZCgFGsQ zDYbi=*yh+ufFwLYVySUj`Gh#jF<@kDGLzSe!f3gLk{r>(z;d;`;&ja25(1mO|J)$i za%t%-O!6rS*rqK`bJ-f8Fxr@DrtpaSQ%kLz{ImwyGSaVptO~IzD=)TF&6XjasY9?( zJ$1%(C?~p$l7c3l0!1pVkmtI{58R|LCA)6#-zih3n15kc{abImA#;Yi@mV>Rrtg6= zWy*+RqI&gBu?V}BIqdMmupqpZz_qRq+e^J8&R;t1HEF9S0u?_yE5&#QgJ z(JvR==wG02ow```Yl~A>P308D^yxE9X)MOH#X{iq*WX~c+kZ5iM)L&6jo7Kb>ux(^ z!MnWdB!1_ucd!`#3Ct0VH}hd4h~{_bh?ORjHmhCR?6%u(<^U{G@37r=ti3GojRLQy z9)BFZBgUeBi{!^8cNg!_d`CF}@%n4^g;3Fn{U9G_e9y{ z%tib9II%$=5j4~EPbg%$gZz*~4v})~3+{Y=0j7mmNBY^A5Bv3OM>+IGbtp%+-DVqe zJd{*BV)37+3}}|+ML6mnJMI^|@PIm(l3zgB%MGPO1?-gOu4y`>-EFtsWf7GNm24Xq zdj{iV1r;gu0k9Z5tvA9wPev%bJb^%dui)gtXZ=1m^XATx zSLqI@&slIhP9GP1mQVS0iY%&f3~Se}W!mkz7aTP2VqU-k`}m3DWl^1e8ji5t0^BW5Q7e`&6(`eAKJ_$C)r^MHVYXR; zaiFvBjiCstyG1?tY}ijsN>zgsZbKT#xya3#RX^XRTGFA9XR0I^Yz>VYnBlbfUuPbto?S!(`AD zC@JV0pbX|c-)fXk=jJ;>0doLOTkL^*fq`xP>gz$~<$t{lr|09$+&OchfLZ~1_``s? zu9~%KikaN*@RQOMrmxs{^U$xi;T=p9MwQW4D$fo*_z)cq;meC35+wB5N`(+l6MXmm z_nfS{jj-gA$Iupgef$hkf?z<%|M0Y^UptL27UFlY0$6{%xWEN z_QO~*h#W6WKmuhO7})xf->kN90ShTzc(L3pXI`aT_#E>E1`Hek6HjoG4f9l2LP60L z=Fw)(oQ-+*HTwK3QIzf5sk7O2_g$s^v3%ZpG^<6kUOZ(+Ux&&vm+4@>8c#gHM-k0u(ZPH*n5?UYba|Wr z3ttbu83l=KZ%IC`yekodN*;kM#{!WFAWLoEN2qyRc@XKGW8rL>gyCASfi~vCG=sgj z((iAJvDAfW3yH4o+cBo!puayLD&`%Vcslpo^URS)(wECdNqX5CQxfCP!pp4&t%LYopCWn`Gh!djHgh*Bjv`3u$0=#$q^8P;&j4SGQ^T^ z^!p>8yn+oE%PVk{RLFY57!;>HflpX$+G8GBkWgy9;Upg}o<8Omr#*pBWI1|-O<1%C zT2aM*e+1%?%8r?5O)r(jw$91cLQtPu__I$uFrF?W$EtV{cO>|%|?)C?=nNXPl(qZO z@#tfZW1&q=JlzH#6i`t3A0%lV)12=XVaom|#P$oWp4cZSJ%rHM0P`u9(O~&k+0<1jz`Acq_(( zG@-F2;BM26d_NJ#VCRQ@|An@O>6P-ZWq2(D7q;Ld&*nAQ*?ovN@zfnk3NA$XDhM-M z4?pjuJ~8_s$EGxI%h~!1qj2$+S6Yioa>&e`H%At_>C3==f1uv0@g0KumkNqX6{}z& zzM9z@rgC<`?rqw}4?Jw@(@#G&!(nn}2^QvBVQ2U*yX=fbw~A&KPB45m=u7O#{$5U3 zaM7*B&Mi!b_8qYRZ^R7E5coWxy>$AmIUGB^d-r}0&W2~nuHeHCIm~R?umKbpe14TO z%V^qW=nymbyTNcaJPC`)aQceH(^@ran`SuSuqzhPYr`~7+|pGrxif9*bnKWO08>6Q zJ?TTz z^AyIQufKsAt(llgoaJyXEpI67goeL~jjL>I>eZ_!&(r!18mI{Ae`ffsqB$cjde6k7 zdg;<-%{K7Guy5y1;+T5yx8KRw(sWOwMh#&CXirnOeqGqAA1l5K#>0dar^(iMUU!9) z;7XONnt@O@aFKovPArr|-!{jhc~_XfsRA5*3=AFmqgjk`-hR6spsZ;v3%Web!0%U@ z7@{JDCbYu65oQa3cJ2VglOc4vJRCpV_^Q|kb>4n^C~h#;cIt-b|IAr43{Qm&`EeN9 zIUT2KR*1Q(`t|FXU7(a|hH@3)3nhd#VhU{1s!bv3Em6bHLv$4O@#e8JL9huNk#B=jhr7ce z{8mjGd3tshchQf7b9j#P*k8~NoM52xszwbcC$`xdPRZMuTAS9!y=vd_TpWM@{l4PN zol1;$SQy_4ZPk#DhJzA_@mqr3`)|JmrQC!`@+>|CijBrNox;c1M<0CzrOaqqd~e&X zjhx2eJ-CBm7zSn(;N!R&p+>vlUP5PEw^k4e-j;Hnr>_HV-mh) zz!mkRGLYu1X3d@<3W}wRmtj6HmeVtQ2GVK$QYb2TA5d9U6&p9`eK7dtMb0M-}vi3moT2+k~n2Qk6G*gl(>T!XRXczH6Rv2+C(PvjGpnpuV- z^TOUALV%0*1M^Y!Y&NP#3F9A8xnh0d*1i^l0g4^?yO~zKBcR)=UATlr`G_aw zJOBQ$&Sd7HO-~x=MLp~`TiJJB0!m@-=d-DBUCO6=8*r!#DcV5c{L8vrWu;U3E$+;j ztb3Vft#*qx5#Yg523W2f<=Sn`%+=5M`-4){rE#lukff;tyr7L{d66i3i6dQ;)!*ys zq7E3VODNFN_)rTOM?26O9@iMJKbb$9ZT6YsO@UmE115~zIv;2)65%}6`iFDPL6wTK ztr$0)0pmuF{?7r5lVpJ6MLeIcE2{~agk7(Sebyg;rlJ4=qO>!Zs*oEk=XDnN6vQAt zhp|dS72T#N%Scgi21@RtjpkC;BAhR3Vhu7y42+?nqJ2LhTijNJ`Ju|D&OY1SjEfII zaGg|c9=DQglD!AMC|L+hfw8UqaCsmr_^YqkD{&NZ{k`A0g^DqlZQz}swva?+ICY_T zTJ5VEwWG06y&XOLk(0^VuJ$m;-;kDz1PX2HFC@k-YZSda`3TrmSedG$skU5+Vf8u^ z_?MqQan!|IhFR!Rr)M9Wa?pR<3nWv{O#-u4=Y9c2v%qD7dxi31AW4i56+4* zt<%1@hg`&s19={;ofiA7v}thP2;^8)G;s1oE=fYJh$vY6;aC0aSTV99?G(_eCeoQ; z#90MV;xG7$h(fk6A1w&K>S$^yN-G7~LUxU%qEcu?#P%=!I5UU%)Aw?EH|%)PfvjJ% zh?TY^ItM)3r+;UUz!Fbb1HK==&i&gl$IB8=E$!b#xB&+$`z(-RN$SA@fTn%*i$q9R z`va_RcaX|7a*FcS4kV=JLAtmhQIF29|9r?4f6P8Bk)3zHiDQ~d`xQGJ7d@gEIxH83 zD8N2lV7T#qIi!j){BCRDgM@*`!Q2dbdl2Pzz>(`u`@}n`c2w1ftZ8SVeTp2cJ?mdz ze@w2n&-SM*Q+9WuoduAzXj# z{KGb_vT-Y4qT0td(tq4Otz^%rk0}=kwzE$>H-DubuGg(RI9mV1Ny`})W5&EKOV@CN z*+BybR#J5@P+0 zPh?1ged%|+Xh4Q9(>V)8tD{HZbywAMm2VtLLdsRrLI?IIU`ej&al&(uNsK|(`-PZo z*vUniF@v+oJrg`%@J~csy9z~6fuXbdo3nENa~W0wTC77zrEti8Q6H1DXCDj`i`)>L zU92mXhD|~#>FPj_NMbYUFD04n?<_l9->0A1|p>5s1{l`Q91Pn%X z?|}`_yLI*IQae+9{lQ-26DDCvpKrLT5N3n)z@UR)(Nw^WP!M10O+=rb4qxH>AZz8m zFiSjI!yQ_!I0Y+}0aT-s^q(4lFWrI!)Dxv&JiFYMl)=ZD`*M~8W;gxyrpYUs%feA$Mlkdwl`|PSLtRt{u-X1XFjbkA+K3kw z1er{qmNJJHDt@9EoawSMa<;kum}I5cM6z}-lDR50i*0^?P)1G&)*E$U{o&Vl`TZd? zD7M8CPt#4G4m@Cg8K9`$(fHFOlb?rD0%vT0VGsfiwU$zB;3FqVETcZ9N$U2m+Zh5O z{uC^@K2b%NxI?90DOVtz%>!A<%yYJN{o_>a@#|v=}3RqQ} zB@#swrI5sabb@e}3}^#2k4It82jkKre}J+QjFfqjDWzPCxE@89cp01Kt;g1Sunw>) z?V(XqZAFv#WW{XRvS@)gYY){_sHAyMFeW0AO0C=VKjm@7xu{u@q!zaTP|wkT1650u zERdEg>RF%28!q#U!f20pYKnZN94*(@REoYzs%!;q6`qEGkgIU*eN(rVO^=n-RO}_{ zb*4kN&ImC%<>%4$J7Bb;yXY+{dJL=uJS+{?s@r;TRDx$s#1 z3P!z&_IdUp&^1eIS)6J`o0wiM%lXeuvfq3EJ#0R`1HRJVQ8tFoDqza&7szOyn_@>0 z$ozq3pys4AHLobfB}CJLLXN+nRZ{wC2CMjkN732$tTJapv&b7xt?{UFG5?vXhw5+f zRxOARCQ_6L)YNTd&G91fgAcKc)CK9TyY9g3i*IEA2JQjDFPpi^^3m92`0aP!!AymI z_@e2sw9=}pre65!yiU;8tbAsu?P*MSSyXoOLTVsHedeZcxtNt!FHmhCCFxLEE*oW* zF&Ux>jivU#hW)@*+qWwOhmkmgBc@<1{*ce;X0v4U6xttL2vA4AKj>SD4t-Jnf;1~a zDQM-CvihY*jYa!9v7M-;^uMOowHfi|i5dxIQ>=u1Bt1tf z)2%(U7>`jfGY2ix_WPsAH!Tg9C2uO z{uK@icEN_^7@&|D6r*KE%JdkBngL5K^~Sy zGbMUr21)(=QJ;S-Fj=?dz|WNXP=zJTLO1J|0qR! zW~P$1z?xXjO;asn(U|t%AI^WLa)^zz;blVxXRuZJEbb zopoi(S!{80s|67K*@zo;v&sj&wTH5(9l}f&qlOebEv+t4)}J#KMxi?Q8axv)3y$BP zv;N%QtP7qm?aEv97-YGrHv5bk+aI1b^tY5%)X=%)V)+y=^*3M0p{ipvtc6->Es7mM zAV)4#$3V?hU+L3E)qZInWGzLX*f3r}D+U54Lj*Z^Y!@%HPl>v>sf=neE{lJ&{%S#Z zvl5jM!*YlbZl{^DCR6sHcu_XV=4wJrB^o6XLXboDq3V1!wHXuM>y%wBn|*hr0NFzB zb+{1mS|sl|D_2~D2HmP8yj0;HfKhmX2r}v}X}~+LVw*>^UxES-MwxQ1HiSJljOwZF z*XE|q{^5uJHr;#gy_hb&0+x7cY?AFN0KgeJ8qNzOxuYQr#;BflbhP@%_#2Kg8guA^ z4KwL+A(1KAR|JpW-F~PKd4KWi!Wf#_1%p$+`uvlPiF;KtNG%YX`+JRwc9eN!emZ9o}qTIaWN zu75P_r0-~7svecld8S7H6BePtF^`9`MsZ@kOZ}~?Qh7Q>`%J->^r!sgj1tQ+`tV)s z(~j!Ek$@rm8B>s`8*Wv>r{Y0@zN>v|kUEMZ*&!m`9(_Y0lk5i(Sz@xUO52@(a9C46b`bdet`INOl zZ7oymR0^c8QHDwTruIJ(ClWD*_Y=Z<5zR)ACBK8{^FSFi}nda|0bIZ!vMv*X$Q=p*lDL7 zWs_f861V=3><6_PH+3anK1%=7n5xq%Q;wwhdCr26rXl{S){pX4DaFyST6^jQf`M>Q zjHoHcQ3VUg;`NVcNfo0ZvFF5CS(@f-Y_$9d_5fh#Ulw0S^Tpz`&pd-Y7e2+(MSaqG z*nD_Lv`hc~b3wfrar@dxR5|VT-4T@*72-boKNaVCn~h+(t$ppk5K_jVp1x&$0ow$v z|5xX~o^--m*++rynQvBqdH(yJ_NV6ehqx7dGyBuh{+-pI6_fh0E;(rkLlmldq9ElG z|3W71JI22#$R+<^YvSY3YtlY-^QYSEk~ z6gudCqOY^(Khu!)O|5-<{&0^7@T$T5|LFI}TUg?0n{Bs~*=q;xf1qrVO{?OwI0E~O z1LG27GZpRseSd#&kCpM<0|zgvr^ixdJ#fD0@{4c`_IY0){|$JsFN=f-FS2`Ku+L_P z7N^_OMpnyY;{frS8(uQazE(=f5Jr$3z>*cx^=P{erkGqwx?X#VAMyNhUX~ z?VqLHzPN(em+-?7<6rd&r)KM50h^va_WT1<@KM2V3^ZYsGW2GAAp>gL{>mXovcO!^ zzWGEa1GmD8ASu1}>Z@tY+hfGcfVl>wA-GOj;=V|qfA$4tmVJn2J+WLRu95R%DarZg zpC2qm>-YKnK^tF=`%)^vnXFtE&woR}M)muHCYcZ_&Ww|Nn}8!vckGYFKV60rJnEjX zkH@Uh@n4M-KAvJ-VW(Fg3_>FpM&T{-7a<&;fBw1j$;Y3ddXv)v3oMX0^DYNkd@w$X z_5a(yKg2%WhX2<7bPTAf{u+vKR4}`bf|Oh9udxAM6e@dD)J=)F4T=CO*Yqf(l04F$ zpXV$HY3l4}@#vDzYHNG`Q@vC{jHoHcWo^@FpHfT|dx=Xtop2KJQE45_px|mkl5(Sv zhm7rhe~4KU1(^E!s{uVyRP{4!z5aw56j4ltswh&6Uu?1}ajYR1%{bkGT$xbL;sw)@ z?!w_!i70aezcyvfV$g()xMfH=1X&CI7BeXB!RCA`Vr^E=pjenQC~B5k2kVGfh-x{g zYBQj13j#Vh_K^Z#P}atNlqMWmie!PvAx#-hqN2m%m-(ln{zW!GZZ7MKbujzR6Xc9W z6zM$B2gFOdkI1wYBy&k%@$M$rC}VX5p@BiIw}P$r!z1}e+sz`ze6%H2-+c@(}MzUNqs#cGg)iLlCuzYbl92=`%iY1=*IxyrePGb{RWip;3PD!@D;JvhcwMtPHhDxr zv2RVuM)x@_BGXo|GM}ARoG1c_#?bbdmikkL*@GMu*u$PG4@YB*B1X1*7*Tdn=it-Np^6xba&AmlmXB0002M$Nkl|qNeC7kWkBk1SUJ>JupPF{v?5j|Fw>( zvp){&{*S!x{`Aj({v*AESuFg*k2<;R(oFqk#^A&{X&BbppBYQE==~vD$u=TNyrMf( zXJ0wReXWnyhLzLQ&|TNEX+JYdHud(+RNK6reZLqs?NfvKfO|8pd3?k%Q~)yAqP(zL z{90H{KlI8q4Xxk8qekueTdqHWQNU29NW0X+5)tj{Z%RM|tGp!f^CQR#7|JHr%v)F@ zBHNdHX{(^rpHA@PKq>=&Ea0zw;K*8%i)UM*g{VYrfZfke6E%L6gAQk$2K(OK#2z)6 zv1zn#*KWE5`yF2YLFXP6vc%J##$g~1JntK#`$J~W|QQm$3{WN6A;IzuhE2l*l zTP)3jWt)2T>4h(cxwi^ZuABA0s{U3PX)_Ou%C$Q-SV~k0F?G=YQNd1se`FuMJ=)nO zu3?`DuNUs;GOMbEM+KeF=?~1akKcalZ7hE{DJ{A9V%P&|A?(#-GZ&*n#lBq<3bp+H&^D%pLS@%0OMeyXEl?0q zT12$kCy#QFCTFK1oI)NYMqPrf4N!~kF0Q!pige92S7WcSFH%pm<%$^OJ^qB_P`i9B@5l#>fnD-DsQ=sV zj!Ac8uQR5;*bi*|_1DF6iZh6I>(=dlf83A0_U2n}ir?(S95h)J`;~F2Pf6LYNthw> zCiaCIIr4t&i3YFXPjBoWwbt6}q?xftlvP88c+71B=->knP9q;2iDernq(cuoEN!~^ zrm5R>-5}FJr6W4RqBdSLmDpZQv0p+KagddmdSI!&B(rO|Q-Ba0<-S}06LC3A;r9o5 zm?{@VQ1Rd6_lNe0wgyzU9a~s}ebyhv@ALb^?vb(w#Yr5Xz!FdEtb-ZbHb9ZwrlGgn&5k zzZfo$u|b30m!eKnWqH1um5W-CHXE#&*s<twxqHF67ebkmJrpEK_l{ z@F`UMP|kQ%4wE27U>*$ViVa~jEgNfbweab=OjV_K(nd)|hnaC{0!&75hkz3%Wsvhf z3n^^ulFSXbwg-L1|*#sL90&1B@CXGxkX&_qd3#ZBnIJNa}%JJU7fVb)Y zBhEf6U2@sw*n?um^w;B#O^etJ3N;vx#q(B=z!s4r6%b}0wU$#6^wPTupFx%f;|k`N z7?JL<111#^0^$JU1ZljzeLeGdeXn_Hou$WwYG)ba43rEigXmWE*@+^UIp8DCJ`{4d zC@QP=H|`cS!XJH9vBJ-?)L#yYr;tIFY_M(|#>cY4P*jjvB4`nsGVsXc5AJd^)z&Z- zmMbTYj!?jhLdsXmh!nwq5KzlJGO0xXp|MTEa`J65Y-4QH`~jAD+FriYpJ`^@2f^f> zH`If2!D#{Q1!RkTR<6_YU$yB`t+~x;MHJ&;*W41c>`IZPF7rS7DI7So_B*wnM=ta8 zgyK{yUHK_Xr=<&tuQo?W~*VUvrNB%mg%476P^wI_n-fk{aU zw~?@GMMu11MGc9vR<%y_x)e+>OE90bk?yfz!&h)I6ZHY{rf-sAr0GT6LEGt_7QpZh3C`lx7?bZeDcZEr*Gf1`f97m zM|J%N%$a)ioFR3^*W*lHw<-MosD02<#0ks;|JwF{5Tw%~&wZz|PeIB`EVWA zDUmKP4~8ri12Pw^!QclcD-i2!E=U!;nlO*&2N0grh?yeV;g$=#Q!*3Fs@sIrhr%b2 z^jrE@JQTq!tsWNtUD^RLq6h8d!9K75%E(hyF3aXj7L)o{=Z6^12bFD_5>J7AQg& zxG=qhjpY|#eDSo~t~;mIS6>s-AR-X2V{qk+GtWwQ+2II4si7`<&s9$@%b<>QP$r!>2ekSu0p!5xt+xCCE>e|yqB`KOp99YzQcuVwx zxgeyV*{5t4Gjs0?Ly7lIp(=o>$ho+sBX6W534V-WOwB*uD;OWA%nKir-JH81_R|Gtswz=ICJ<3pD;@4)%t&+s%0o})DgMbIFt-{FTHF3ZY~ z|K?jLos9j3mP)(+{AV(NViBXv=bwHq11zUuUm`iDft+KG+0!YfpPm*NjM*ZhShe%W zWVh-gasPPqvBzZ@tbO*|H~o11_0x3t0H(ALm{rsPFA64KEh7^6Ak^Nz@Q^JF!l0?J zTv>8SGKC}E(Ui99od^alXDZLXrG6kv&^oejj;gPGreHF8w-Rg!gVzS7KqJUx^2KUl z%kC&+byRD5RD#0}5YGZrwjwlTkV`?K04jpUCV6oXgSMm6Nhh6{M!ocWS{pOAe|z{5 z(tqeDlVn8=RH7gqtmEGwMST%8NW@jOS3!%)VSL01jE73=MzKCw1}xHOc3J^luDQzN ziU1VPR2Mmjqa`Lgb1ZP;ZCQ^i7nPUCFhb)VxURw`kklsNOqt=uZ!6?nvdQUij2hi@ z&%J4-AO0}?3>OKRK|xifgCPF2u>jf0;>g0_CEh{k-#6lP4~8}fiI?&i*G^|zvlxe+Qt3iiF*8;|A6SSo9H%c%x`+Vu%pDit?( z%xB}UxnGr3&|C30@l;~5APjo>VVO1N1`^SXs%3x_9i5z`)XgMH0?^babUTYPbFR48 z--9d~p%Z-hT+0%UvU28Zy)GF;m21JgP@lb2>fkZ}D*GrSX4bEbO?+U1$zmRS7dt{T z3C)egu!RrIq7R44KJkh(8OyaUG?G)3z%rT2MMQ0sZs@vQS9Sf(Rdc;&NOP-;W2?4= zkOVE{3{=+TtWwTMDhf&+>wqxekRR$RJY`cFj)&JvFTYf0P#klt&Y%#xATkbF#zccT zRX_sy$(lt>h7?UIZFJM+e1?#m2TM?zD{d^5vA!i-`A37}O&p?iY6=E_jZtiwrP{Dz z&F4}EB(@M^f`pxDigOUUK0^}BufmCn}Y<2HgTgH9=S3QKr%kU z#_I}*$Z-HJT!!1|$P7?uJ=d<8RBfqJZH`#|g$T}yDI;_JoE7NOw;)s9|FQ=ZWwLr{ zsrH>Xi(NLQep?~ul4;#2VvohHnmT9*|4p(0w4r1i9d?uhnPyCW{Vhr@y5_R5@(>UX zir%2yK+7qHKYiX6B`oD82PK||S;j@P5i5V6-yiNTU0@QWIGBCrOeGA32S{Z{WDqZM znoo8AbF4CCeauY2IRwZ(&789$*u@+0a1V;pPCGS?9{qCq{qKLDHr{w+S&GK=a7}UU zQ^T)zZ@=>nSt4!){BB+6N9(3V7afXaLb}T`H=<7tv0BuUQ-}382IS$TrD03El8(P@ zqXnjb(Z~wkGIJyg#*8pikWnE;c6g3v9hH5GH1!Fe_jEO6{S7oZEzAhznXH|ljAuVn z%Q-4o99m?_jK9i1EtnTd%O#R5Sq$91sR0w(O}a36O|^)bd>dI|B$GE@unB(G6LkoT zL%txs{>B?=J8Y88JyG^M082b!4~kelS7OR67w2%0vLSO`XW#*X_KdB%{%Sa*9%Fm% z`RCJFXPlKD#JSUd&i?q=VyCqARzLMYwWptcI{oFSqth#|j)v|osrQV%QJ3w~o_p*m zO4)RufBI?KW}9u(N7!h-*9^U7ADAVVTmtUkMO<)+_8C1U4UJtZh+q}eE5=5W0fJUq zv!1FA+^jG7O|S3?f_gLFRlo}%`F@6fM$cqADZ1+1Iw^>L{$qaz*yUADT*VI3=CYmt z)FFM2I+>h-ekoF&zq6NAE<@j&0Gk=7HgDfkk zjZsME9CG%9ks~ocao~ZXYaj+FhL1Q4vtDO)oyM*TC`6lh=ZIzH@4owPEb}y8W=716 zS+jfXwI^mKZ7J!?v^Vh0R^u`KgG;m=jb;fZT1#h?FT-e`B;tAM2)R8Yd4oRF$dnkB_K&vfiDI9Au zxbulYtzw@5D(b0lSo|9%gtMqEkf|{1F(5{xzr-JL1eSQh#bQNlKK66pBwM*E`65`V z3I=bycFsYDI(XzLYgN6#Z#NrZ4F|PP=@Ki$nRsbBrvwcOiVrxHheYB7Ka|+Rco8mE zm9y|W1fW#Qu|-9OgyOu$XE|K~oG%7Z*gOcFkt{VJLT`v2apjs;WEPlmf9ue`a+n_~ z!(&la`X+F!Lm@f4pjz+&(Z)WhO!QDW1HVx8Q1}8pfaH>514pd(DiDO0V-a*)-HK>| zjICfypBXH<@D#=@stBdQ5+7O`$wVwKnCPMi70FnC z;mHSeImS3rRShv(YwBaN7!Xz)X3npbO+<)4r7W37>Ipv`j4Zi@)odwI?HZ%hz>R0D z*%Tu^I&wh1g;Jqm2R=h|;knaEL$Yn{2WXmU#Lg z?TEpN9d_7Jrh&KpIFRf%+3(!Ocjg~c$EmqTVSDpdfE{V$UY9XN~f43)k~L_GJD zfwD~Xk7maz(FOxia$5kbdpb_Jt7Fkr&cuo#>!jjX#p-$`6{VE=Z}z@1?>butb^ma1;D{uwxOpK3#2lo zEKn(C-62|y_?x_Oc*~j&%fVQ{0;#;wa=WyULwkAsYbN6$mZ>EMAe1((WmQ6b;IX6M zA0p-UO-ggqXnqiDI0K5PQ)(qBN&E_9rU!n|zSr*$Q5EzxJTS7x%J?h?bBk}P!0KH% z3uGn=ra(rsTqqTp1*Y7h+5Q6~A4sQQfa2v>UQU0+7qA;{v=P=E=RKl%_n(}`jr}q` z`R^yw)z@5`oI$SB)mS6fltF~;Ui$&IJFpBOn4rF#)qsV|as|1WrhUVj|}6x(kr%ZcrO-~s6; zTVj)JNipi=W>O<2ON_uZ*{~#o0mOX$Z;>Mzf986GDf-3OFVZO|pOUV=`l{3ovxv6c zcKfv7zP~{K?}{($@e&!!-2D1izfKeIwR+d?T`?JGy0p^DE2R@oINr5B^3X%+@FR{$ zpMCOaT4JdsWroGTf%D07PqaQAbWMjBx8uM5CXM~-%haP=cX{zm``m<>OYwaB?KkO* zFUHCj>;3xnOWiP7$UOzP*M(mHSWOHFOq?JxtZvt?-GG~vzW(a#^d(*>Pd|OP)C*tx zcY}VuC}v$bH;+A=eoVrC1mBGRHhuERf6@dj3pr!InQ*T6)(xUDc%;c9#gaygM}iV! z@Y$`YK<^Y10#k_had$18_bte)G>i9|kB!=nRU; z1e)k62MkagmhQUi9vM*PUKHP8pk=%5w@2xe@HDt&F!eu_1msXZdvJrp&V?XSQ*-s1<`>Mo@?KvIliGIcc?bOpSf=GnLPXwH%M4 z(-j+K5@t}0mKhW;VVS&jIfEiE@no##mgOcHbK@<6i&TZpzSSpSGYAg3IMuRcfFk0H z#TGp^f=$B);U9!xy>_7H_LA_04M~(xj;czVn!ZqX8|UkQkdYSxT*})xq10?bIwQHY zGu3-gV3TaDJ(x)8g z`kOw>K?|VB#InGKu-r78DDh0CUl?flB-rQ8PLt`$xfL_j{Hf4Vu*X+pr2SYg;O9Q@nnRs z2A^8^$d3el{F6AXZ7-@tb_%>wfBMM>O4F~PHBnV%>1WoT zqPK2bI>2X@Zk*O$eJXbT zCM-m!(&~s27@)WW0~CF+2L+aRTF3?{#8kZglA9@eZT5*K%5t(uGK4W2zGgM9BA+qH z%Js*%m@_f(fUN^Yv{Hpj4}qAm#FW?2OTFqdMU|-nhrX6-3S+s~u&6vWPS{!w zst)m&{vPZYXS4oIJ{M`VRxr+FT1yTR@4uU4N<6>DI=m*E5QYNLE4)C)?RqK9Eg^h$w%tA9c|_9V?Y@b_-EqiX~Tl_5H76 z5LYdZ;&!t=oSoX4JUCj*R-;~fVMhg3e~}Zs9kRTukuZD7xR4XalDZMP4cpd0~?!fv00jD?s;%NHl8!lpX~nUy3CF2U-i7(hDlMagn}`M zIa7!OP*Q*Eqo5kMu+|-}K@<*UgOof&h_Gs?^klALn+W(3&%Yqxpa51v9{d!QelVVl z5Z3ryWsJth90~e5`u!oC#;)AC{!OGP!Xb1Fss>yA5n`o;9Qq$(C3cy|OprzBhM%0? zeDlq;&34=1CFk^bA&VuRw&D^`CXo3IBj{F|a;ftA`Wtu&GO?L7%Yc?H{Y5bhWsX* z3`_g$^9y|Z)GxjE%4_NG|M)vTa=j;gjDZ8TNKd?kUuK!5(#|{Xl9om;6~ggdJZ~L= zmw+F9g#Ba&4M{uT*^4isZ^Cj!AAR_dycl0}=+LzN4%??yS6v-1{kfk4{-a@E!%|h3 zUwUb}`<}ZnK=C=W>89Wd3|=5@y6L8A^UXKM5E-U%KLl2~@{&i(~ z@~J1WcLz6lo}6avGh1ESqn+{8>thTCLcP8Rx_Uo^`o(?|fz%&UjvIosSC>?hA zZ_*k6duF;9`?h|G!J6sul6(0TR?y|qhb|&7`Dvf&ee9ca>#et@8?L_|`RC)zpnacy6xuMWqGa%<0qy8^URsnUT1CiwndtI z?g5Y?|D<%@`4^;Xuf9g~Zo0{)Y1{3$!%|+ir!&twGmXXGBiz86FX#JU7R5|xtEHA) zD*f(vM@awXpv%=)U!88b#isL{W5&Cclkf?ySR71w84)zNGq+hQo88ki?GkpT{1{9EA~BG zXZ`ikUVH9^0jn8MPgFj>|M0F9t<_$%1gsGVzyFFZR~VES4!MB2f7pddaK=-VE5`7a z7rH<|SNC6G3%mg|eyu-C(T^r$cC`orYULry>{nDPST1^~0to5vBrxU9X!yV{kBZ8K zqY*661Qg9a_vJYWdr-WHnH1}-vo4l+;tUEf*h4%;MBH>RXQB_mS|&%X>?QabSQO1Z zHMJyT2caII5J9g#v;aXzMIcC-#(*M7M!D%V>7qyJ@>)8}nRppJ!)PY#**GwUHt`^p zH43vyWCNk7U_6RDS{XqznE3%dcXAW|t<0d%O|o~-OFT(%L;dZ%P|I1;6u7B^5Xg#N z;0awe7dEo$R3)-~i*-g}$juDceK38F$W7!OA$tivQ)MTZw2;VCrnSAOlI<=x09=4& z!*O%4=>zSFf%f*z9StLmk`L`E_RYE!)B-@OlJp5;p+9gqpPU~gFj*IEa+;v%rR8X! zu+={^3ZJMGwTy#sND~xpA(EG-RcSuqND*dXQ3im*NO>l!e?|*V7=Qo|6uJft9MJG0 z8#0eY3|-pau|q`{nL_DjPce|~Q-<<%gzBGeu2;0w4#voqg9@ z*{7J*pNguMQW5gAM(~k2lgD3ZC{s)V%>(&b>rXi*uaXuOQPscUM=lmQF=W!|?6Zca z%P!{tMPEGR{Y{p5n$q>R>X%gP_RY5CbQpg*Ec}WJw<<|eM?$1ql5>Jp$`Jx@5LYfB zto?;n*(Zxh> z>Q*8SfExGacX#_)WF=MRZLT-gYl zWWSG1vUkD;**op9Q<@21(Ao7&^+Ri$_II-m&^N7}I+i14bmX+G*9mY)d*u3%26mBf zI17gOs@}#ivk|MH)!*bnpkgV~Ljv~vB_5S$O^`?>znrQ(5YeXoWMhi%39D)VMQT$R zgvbL`f(AF)9_mlN9*_w9B;e%^F6^0+w02322CEq%ynrRJAqSrJwQ9hlZhGa;K9H5N zi~;jlL6)0|%(`G6p=$xE)nW}kA+tqRFjY>NVv!6^8K?o2lOq>?d2v&dN-j3cWu zjsAQE2mZhK{gGYQOgo=eD+eT=A68$N$??~^SdB{6gH$qdB}WOQO&JHk|_A!{QXh-5ACCV*&ppK!3%&^$IgC#xP*3k zR!dFfqaX@WhbUmW8`6uNhdz8JHNu1G@`_(9riv(3{ne>*1i!^`Tku^bNv378H$^f0{mzbB2y7yf@b^2oH- zT5F}du&mCxcme(<_TN|y`@MYj*=Omw=bys>;Ka1W=9{HoU=W1+Tby;)+3E2o9!Dr9 zrCxZ+&kv&B8S}23XEV(*b2<(mM=g#`?OBC4-+CwQ`-^?B;r%;!IXzG6*QZY!Gv*z9 zLH~`+GMIo_16?tLVSxn}NW0*pk@Ys%K>T~=nWxj~*mvNmr=LQcr<0FK`t|E810G-E z>wCV0Uu(@ZFsS+a)E)cLNK`cBxfw0~DP48NRp}B8bi6g@t<(dvJZ70?R^AoT2OoTx zzMc534D4{QWA|NmmF1sCj~<1OSaw2Jnw;jGYpyf^f848s6&gSO8~jW}55Pdrq8OYw z2>UxMfu*t7C*Huo24^wcddqE?2{9hCJ!Zu0i9YF*k3NQ-@lyZYcG(q!J=kXlX%c2l zypC8OcIct$t+(Ei86w?#^uz${x3b3sXR$1~)Z%I1efE=hvd5i*{dz9D>@u0rvGvwJ z#WG2IrE9OdKAm*piRf%RpQ)IxcEN0gZs;2;rjw38F@5~;M>0t8!gDW59lCe#f$Qg_ zG-1*NnI$x2$l$c!KKrEQS6m5Y?PKDkAD|n0M?P>rmUuc?VmxrZdDDn<&cPB-GwZd2 zt-$jGPBedi9R6GEL2=LBX`TUdr62rYAH)+$&HbY&e zPb1D6k%pf$JoQFDT?(7Q_wL<0-Fe5I=zkOC`ooz*efsoGn_|Yvfd?P#`qYLO^Yy_j zm#cttWPy%;e~`>1HIM?^t6V|ZepWx_%qH6uAgOwUmfI`aXIQ4{_Xpb32JS^o8~bKj z={Q!VM0e`$POS6SQdoA#9bgT=?|z?MyW})gwQ%TIHv)Y2Y!o@q5?w(111Ek!Uk>aL9=1) zU}bWPCRgV`uvLK#1A)Imsfu8;zH2cxA6$a~UTSMJPy@ZfR%X4BS==Vd`Fh{l{_Y^8 z+jRNLT2*jst$^|6+=~vFKZbyc0JbWeMvUpQJgCpVRr5U}FxWM?6H|dZ_*-FB+EA8f zoh(WMpzRSD0%&Yg%sY}XyyZa=QA})VyZQ-RJHX+N~+jo5#F#DD} zJ*XjWlZ`PzfiJgqz~ID=JMVx2ikaPqB1<8r>|H~L(#p}v23T$^v23vA#)I5Z0`R~D zADp0=c1L5eI#}Mq>x`;8(*2YWWMHfsDoS(=n|;fj(~OvUCulWU)To2O?6xQgLY;xx zMkP+1CGUKSs9g#r^Lc6-BI`D3vu(MnFb4Ald5y9=P$rO1{H?i&o|`AeJrunZfyLuAo2&pt^SrrX5&#@C}l8UzLO84*L0yz z$$%Zoc0U)A8*w1ms=!bPys<*y30_6^OK*L>0YH8$0L1%n(n-+YUFp*<^Jnyrm5XxCYHZF#{Z zOSrgDQE#4-#JXsE7^C9M72|E2yNCm=Y(}y?sQ*5{KPp46+krwhl;xSiq9g$Fe1$;x zvp|E_SUb%-k}_v@LDs;E>;(tB|CK&#l1Z zGW=J+{F|e=(b&Yg%K?HPg|5`3qj+ zcTHp7ek<+0_b<|W@4PF^D9thF9BG%GcS|d+vSNB0`zBm);f3juM;}4In+~%Dbkpaa zGxSK;V|LVuC!du1^y-~9*x<+Vr9GEC`pu`7zGe(|C=bwMR#B5>A;`qx^N2R$i81eEeqtkZV z?I7pEbg65ab3p&}t6%;qEws?WSR&?ybP5JkIf%o7mA!xQi?k_b=5mnc{PQowtd@&q z39hx)SR?I%*#Z6g&z_!o>gjY2W*TsxouP{kO~)U5LYj5oK6wbx#k?zr=IEV(pu+H%V+v4qX~7$E79X2EQPU;pY?mI!hxiCPn(S{qRXPjj$q-gH zzk#>Yy67R1sq_1v^0daH1SfNtO%@Hdyge#lYY?Tj$<$dLEO%yS0EX4FvNOmSf({f2 z&KQu}H8b#b3Yus;CS3Pk8ig4YC!U;M9Q|Timoq4St9wvTj7g&&?6y=!`KEZxepKRS zq!@BFnLQ6sINAUsTN!x^DkCIo{D6>J6#;`Rr`8Com4Xp%Xo#vU3R&6hDELAw0RT?W zWsIh9)%FiZqj0(!2Pkw83M}ztCq!k(3ME&2;cAPV>9T(v~7>%DUuQR7LR3Wo#Ik6EIh1H=LeDcXB)1?@p{u=9@ z4qbe4+{m}U1McjFTNDLt-T;bIP5OZ6Uw$;~KlI4M>B@ip6VnC1PRlN{Od5tMhO^=( zQh6Cn6ywHHh;BaAP@|dqXl5u9m9|`HQc=#GQ7EODzC>Jp^P68u8}p(6FGu|)^})^g z>#xS8lTSHK9(rcz)idq9)6QwJ#TG>q`Y9iqv*AS3KI;jb{#sC8^Fdexzk?2VB)-Cz z77suCaQYWw-3?z8EIM>i8IWrjr!oA6&PIkdD54b~hz=^iBbiulBSl=ZUKd_)LHhSo z|CX;wHp7Vjs;jP+df>~i+it-$(VK3ov++l2As?c87yI1) z;*Z?`%%hC-tPQ>el@XHRVAm_>b09tT*kkFo+i#P;uoxb=*WY0MG-%LZiLi2IUT@M`4xF@Q80OX|1oT7X zFz+(aNX{Gas!^-})R0V0XAj2=3S7Hn1_cHveh>yI$`!V$kmXGE_5f;%_HzyMbwUI( zY@(Lag29?wi4@X!zA)ivKGZBim4n~|(Ln`xbYlQ5s?O%*R*VbM2TGbSN}hxiWF0IK z76^EsF#us_b43woQc-3snVLQbYv6Z~0Fz#K1O&=1q0T0R=Armbitu_n1SN$CC_Cng z9q_sm!^9uiF&2s~$Ey7eUDUxL6sn2@qJs`(`{f!z3KM^z4KIOns`X!v;8&)PIM8Hu_5^8zrCHSmL{fnGbiqr;G z@R{iCQ45%;<%JqC(MAJb1WLWDc$1$A-rJkS;IL`4oSYG6xk|Br8=*xYBP6q)g@Ruv zihx0uQ)PtJM8SwQ=&AZ?hD3pU?bB@e?Fj8Sw3E-=l zvo4dbVTh$Y8$}fnZm2}DL1mH2P981kQCI_mix!+c_d5xa5svDLQbOTuKy~bE{fb`{ zHQHaqiUotr56T;^(&YuCTAl@%L3b4fVJUp2>lU20MD{T=6sj8)fG?CzJ@u6I^2@KJ z-*bQhU$IUnqcYyVS@+R@K29U=ADQmKZ``q;e}ScEmPzYj z36F*Gf^h~cMbnysJ|mWjDF!U1>j!=Z2{27d|5u5&t>b!?M~XN|9zX7IJ_o=v^}j!Y zc5|kIDtd(=MY8~mlO34y?~i6|i<06)QI~NUAsG&6jM5%tDFTEvHSRfvZpn+Rk>0=_ z6x(jMooteQ0A^5ZDFYO0mk%_enMDZbZL;Vv$Z~QOPgLduyf{Dej5B2^Aen2LubbVl z-@#w-a(}f|R!!Y{bT@ZOE=niM%%L;RI3xZ2@Bfg!6t>@Xo3uM-QE>UBKOXr<3>0*e zWh_@(amBR%{`;p7Kl(6jiI?98AACr96N3i)eg669pQUA&S{mow@6$Z<&L>Mlor?hq z*1bFAx5ctcl$m*^nPiRr@#DwKc{*y;3#@gTZ@&4{3d^s6cIc8`#jJzv@Jz*Lwf_D4 z%ThvI*Z-X{W71xG@14fH`wk|TOisVRY>JKX#rzzz&6dXFOZO|UxB|afE|D1+TsCR< z-FL@t#X*?$aAi6J10lV!-^4m=|0o^$n?q%f3x4(g?z?ZN_donF%{t30X|6d3$V@5u zVn6o@28)*thU&FfUzM2{_uO|cW7MD-op}9%x9l%4q5JqGud9j zM$Ox!?O89@hqE;JwKcCD-;SSv7v5*2YcLq_$)}&B^)U#r>#n;Y-qXR~vt_v@&brtE z`zx@|Opiet&P3r-bSE5lJT^6-Gwr+YzG>~X)=YQZb1!uN0l)Fb;~mqS=^T9JKj)lt ziOrYrh4=7tN5}^?i{ZTmbuGByg0lC>WtU!znGciGHd}8k?a4hf-g@)3biqZJq-(H* z)2y@2jzOv2(&iYXn2dpsXRvAdKKtzl-?1MC_RkoB!IB|EhR95{cQBjff6Z_sOqRj^mO4r|TBbFvU2KD?w+68^-Aq-et@vnbM ze>xR2DCo25EN7P4AmJd)e&CXUTmq1@QikDuN_PxGoQ=VWvxc9A{?{c9MJ#v0Oqo@% zCkszp4i5d{kAFyGFoUHpK3F>L*yC`&SlWHpdcYq00#iQ!v{qJj0)gyOIp-;2sS(nQ zw)$8OG(fq&Wo;%u_4cWvTy^B0>^far!+yXAR}`BOrbx~(P@K;JM=2&AgZ>IEvP4S> zea&NTI8qoC-eJDu?+-5VbfV0lcqy&3-n#f$KQHlQ^`QeywMzx1&Y>LpLoHyUmKSQo zh{}8@c+8-{;uNT4)!!m7U}M)j906*lX$VwBbPLztPN1fjE>x7L#o`Ak?QjKKWDTe7 zo4cq93)^IDSaU?W4-aoY#B?q$@w5nDWW{9cmXoG%=<-f>V*O1oiA9e= z+%V{AO!2$v#+&fPz>8R4Z=6hh;{}=1(B>X6Pg-%M71LT6vFC+fzLE)gqE5Gfz(~xS z{Y(6tobJX(K1Uz(*EANJA+5OL$`~y^Ak91Pysp)J6ZW=;$WdTB-)Q}Y>uZhZUT2F}r2OD1JG@djJ2yrJZvTtTv`Mr(kFH}Xm7pEirn#j*r5#WiGsXUIOo z1OJBjq5)f9A>Li_LgiY#=$S<}kJ^9V{nJYrndHFH?{VW@b=6g}6f#fHo6is0VaiRR z1su(%=t04?^1Gk_#+k4Q*VuH!4L4!L^dx*K(j6Oxt%5H*j#B&Nh2iG)mT1$Uc{~;x z`P!!}Eg26?rG~5%FCx7DuumNM=Re7f`)drQaHF%$F_oC>d0uqUg?NxSN2YzQzyA7I z2lKGh9UF;>T2YAsUT8BArNp+JdQMlV~ccP9vW}Qnwb0&Vf4$R(@H;F zIqkB`&S`Nz^kf6=+TYF(vKVakKl$X7^e&cW>4%vCvthbt4-7VSoJ@)wAr_9t&jI}M zx9fl4@L(9uMWD%G%L}IIN#Rf)(sTO@iSY*}I}o`|li@PEh*vmcbgvXK9ADjBdili| zpy-Dsp0EeSLO;m1O`3dDXn)}#UrGEu+^$b3N7PMBEJxUA;9&n}k>`MBEO|1Vz58RC zjB?HZ%+YlI8Zc>MBWzru{Xy1x)qGa6JZiGOY1&%zc2bdR>f7J?A*C%A5f`-<#vE z{g3*nhski33(oZypCT_|70j^`;iy44gsENq`SRzTcgEn0(|6KbbI+Xy4;q;LP#vbu zt7hs%WArK7jIasB_Al*fROTjSIWgY;Kw8^HnF6bn_MI3D}y@DMwV01V9Q-&r-(wdbD8nsXmQK$dso=r zmT9s)uq)_XwkHibr(@x<4g>I6~85I0+GBxLa!*F&Szi%dSpyEsX zR{2!c)qm)rhw!n?TWQZd_e{&-V;_F4ZGG6?R%LQDT=c%+;<;u*Mn%~IU1>5t=DGa| zqLLD=Ag&y0L(%bA+J=1XQ&uJ!4@`Cy73(s?4tC#48+I#JHO|Ihk?~YPn$M_~($Ajo562-+b#0e7U}@ykI_He+*FkWGmBX zAyYFgN7G0`fF0E5PtGhj>gc1>Be*y5#l76S5Dfk8L;d8NZLe!l1$q7_8VyUiNc##E5gwORv5DdKx%r{wZifwSBK;0#PV9yA?j7** zcgf%W`q#89&cUu-S+g!!GU&l{9+nB>M|un4J=88c?~+!-?1Niwxhee(OH)xdfAa=dRk6#%>KVDU4enA)z(-Yvu^g1 zXW^@^yfXDieNH_c0~B1IOxhb+bS@h|7y}d|F~ftiQ4Ylnip{Ys88>Agju{jjpkU&= z!e9OBH(1t7KRSLJ&s?XRbTVdl+?Ci5euJgY*4tpcs{S;^MAP;=tc`i$siPKSk;#Zj z>+?s@22d|q^|j78EHB!cv4|XmnR3tsZf9QzelQK1$78-`v}VcFw&o^fIn}t|!mbnzaOubvh+@_?v>NFUlXYjh zG(fP=QKP%D9^8r;p!gXsWD746XHaB&Zq|X|wJNSD?Va(Ofw^7%wVtX;k1Fv6ZY-DJ zf^`c8?zq$wYdfxt0TXV(q;=#G58W*aOa0Yk^sTL#3K_WxGl}OY#QNK%{`bH8eY*3m zJC(RYasU8807*naRPmtqb=n`Bv2ki*hjBA`?~4#y0NOR?6f?m=-}-h-l4W9seG4a5S~ zj{Pm#jc{ikIt@V6e&^>O1aN(cJu}+u7bhiWSNr2nyq z)T}@4YjYR=_Vy_~H7!9t*HFVS(ayd;f2jRbX4p{ z;RqcaU4NUufAzi$8M|>#W&5{_h2uK7{yi`fGbm2e85I1IRX54*D)I7;lkJNxS6C_; zQ4uBwHv#_a(@*6k+<`Wv&@OX+~nHds=>f zxF%CQ#ayjq&nx>B)iXo8chsF>M>){mUK?^mb$`ELE`&ueEflASR zG?e93TjEc)Op#3Cc$66wc&E^H`Y!1}EW@KSC|rF71QivY1=XoQJpZ`#(Es2i_l@{i zk%M;RB$=f#32$(F zVg&%cjFtfk%uv{Sk3G`c7^oOHcp%;j{7GI$TfLt8_fxVzz(Wr{lm-nNDEotLyveZi zI`$~I2m>%T-E_UY;OA_TK6rW0Sq!}~XfSB-;Pev=8ZEZi60#ApJVUfOM<>GPhcF0w z;rSP&C!Tr=W1sM^XHVIOpeO1)3%;7?rrsOko;zsJAPh=Oz+l3wX?qM#L2;UAp1Cm~ zaH$LyS@5|k!wJWqkY0W5RVTB>N`7dRl;QH%tkoMF2xck+#@s3t6S({BuU6bI&_pX5?`;%5M)l9Q$MRgH~MA zU}tL2Kb=8=C7!r}IOMoVw(dcp%aK`p+;t!fKKxKD@pLa{P+)cqmx$_z{onWjCi~G2 zn5nZQHuD|zS6_2wnsbi+7@)wj@8AVopV^A}K^WNM{e;V3 z9s29Tu*B0a%#`VdrO+@yan{+m;!RG&Hr_ZLal{d_w+#C=XH;{43T__G4{P_^e?JTk zV!s)4ya1s#(Q4;#zREJl<@zJKvw;lfUp-=@+!v_Z9ceMtF{Q9NPxJn3&o$Y;o#Sq= z2tt|O`-;{AIa@(hkkbK7>H4dJddIa_^0`#$ECto>HVVNZr)B%2@cZgy?AO8%-q%@o zJ=r8%t{va&_lKHuTW;N>DZtd!KMYU=EdX1T0}*d1)}EA7-XLM1;^G&ib{qN_PLBX~4BDV+u5$)T^p<19elYl)imS@>e z4m{C*cB;xu8vWwMVlBZ_pig+?kHTSN3D^!**p%KRX1Yvh!j3V@L59 zZ6=JI7kehT_!AJC;w%Q&U3VRBPAA~2$Z=TrZ+)y|xGy$inzQyQG95x1xbca=vZZ%U21&GrArA@hBCf7Ecgr$erC%CrbEO^oL#kjF)uRZpZHQst- zB>KD2_iS&CsJ)T6vXK zWWz8GbdZNPrIaCLrT)>r@fHaR3$LJxeXS(R){ePXI%0A<@{dQRJMOqEjl&XI2Vg3$ z3{dp$oxa8ktFibhgXdX~?mgtIl-BmAg?!#KgI;~u_1>p0*vpt)YL~f7Mg*es`rA*B zz$i*4V|gb$IAa4hu7kQV1}JvheOC-n@IklZ)Qp2d6 zG_CblKd6bdMMD(F1k;%p{vauGDmrER;3K~&mfUI*k(p1`ngt>~8FxfX*?z90F;@1e z$+TMT+FO9ak^PVD52Da%4oP{7L~X|Eue^E;*NRa4putrrKPae^Rg1XN|2ne|IcbjO z^^ca4Y@Yh=yYI`ow>!!JMfvh8`XbEgf_v-s9dC64w+TzmpFtfxe;A`!s1E!gJps#D z@`o%3{<*|LH~ErnvdmgM;kXmz1;WCMER3%pN8pF7E&tF_|BLE7vRm+km~s|GU=-g370Pbccw6lnriBBDrhNuNVgVuVe^h{WmwRQ~_NQj194jrf99!*Yow*Ly z|BALCVW8xYgRR!5dPpf_HqfPtN=od)9?R{{U)B$m*bNfN3F^b!&DO<_f zTC|%L&zl@?;wAmoSf&SCD#$(oTW*0F6qW0gU;;9poHE$Z?))nrQSux7wtM8^N7K=N zJ34**!G92I)@L%_b1a+wgxL*qVy5_H_E(w!-mh^|qSeW1)TmJyP&gOM3f+Ny7#7E} zP^-xY4%{bT4a`FLGxYG8;^9XgO20hl5Da+q#Bx4o%IXbIKm80|y7x%u;-&E5K?^`( z7kmtQGxUv+85FDFW3XNEZfL1I^PN?E?e*8vKQM@X3)bwPIAJ`N+*wdQ8k?zK-}L#| zFVb+l1m+9=>8J0J?!%Hwd{O@n1}OI2a}U{rVt%~PKJrh0maq7wS~ze??Q`&=`oV`E zf**sh{N_$+6MRVWEk4S4=Y#iUd7>vCdmLXrkH##hH!$mC9DMC6dn?R0-+a*bCz(;f z84%hIj27cl;?3XIZ@l_Odh*{-$_sH0vT(*KpS8FIQ{R3wV>OZGWgu#13|Pn#Pgq`a z5@vbKgV~IiU3wV?H87hct9$_a=A3-eiRo47Pv-TnpMHE{oq7hZT4GZ^|~!}6uE zXUF_nP4o%Q>SPOZ>8*L^okxBbbBV5hVAd?tj=SuF<<@t{b6sA74$Qj5pVr2@7-xfA ze92`Xew%)Z85DTf^o36X| z8Y~Bf0SYYfw7>#`=|XN_(@g|mG|oW>Vu_~*@5jfp+=Jp!Y!JVhEaf%goU_q4hs*n_ zVc0L^cfUKr^?dOD`*IFl2|qYMvG0ESrmgUiXv_5v?IGW_z`9=FqkX7rTYoV_2QyIm zq){vn+7yL4wg3IyuKo~l`<*@inH9^;y6G%sy4-Ak>;D>V>R^IlQztp&Lu{<8F^E9h z138#n3W#F=yZ-)AoyEZBIt-)2Z>H&SliM5@ znqk;LiBr9LbnhusxIX{lb6NiB{0lCSbq0I%m?3Su%}>)-7=7g$h#!9NL3$CR+3$b+ zL7IKm*|E{nqL?oCZF=aThcSKgEnK*|ra=P-rPWtoT}IQj{z+aIa}7wY8@TGKt73%u zAZ#o&A-#ktLHuHXOGzz)QS(`6oz*;->2a^V@_Ksa*{AX2cY3UKI2UesE4JwtT1PI6 zbUX$q#$i8)4LAI8+GEe3V zlmAX1A}%vxAYq9mmQ0HbT|_oa@;fjTjKh)=&%f|IVho?~1rkTRmtSsK`GRx(4K|RQ zJMC}=1vh2lV9TC+{~Rx+N6Uqt4@awE28wiVRQua+F{p$u4)|cihXZcb#DNXo;Fnx# zNem$1%K^IrQZY`N(W6IUKZD29JK*OG18&^KQTwGa7{#fY939tWozil}ehHthyYc#T z+%cFzF#~2$tg^B!eM72U#wTE^n`)B?-@RwgqPdaQ)po?zjVc zdyJE{Lbv?MPvi>`zTAKAx#w_Gog{00F1ze9(zo7!{{u|dy;mNr=D-GAoCWpjtFK_~ z#fR_(%LfwE#n=}RQ_e7wnC7r>%~5XH#dY|`VnAt@S!R{>78%P|UVatJOT3->VtJ9p z7F#UMg=wmivaHsh)q?l<&uiM(e8_&}5j-frgcmF0Fl}@e)M+L80_Bp6FGfr;gJRW{ zFohE{C@`g#G2s_BAEFMAKKXd!S4upm*fw)veb65+zdY7Lomcp1kWV73cJKAFlpF$cc8eKR5UQIPh~eE4t=@*rrh`AkE*`;Uu>g+_b1AOYjFJQJ;G zx$&XgazFpB?Jv5`0HS0IhZDu*@s9|4+^EkK?A!H9l{TbZwzgr_^}GOpn)czlK7p%= z#<7LJqFenf&qOO)?)IsTp~?M;>{15;0mouaQ*A$ApG3^!LSsn$;mdmuHv-%N0~Eaf zcrso`)Iig|S4r6Hyv|r7T5e>oe{1`51|qNbPe1dlY+SkU!nno_T}-a8Pe1bvz9hUM z{pX|qq**XEf74;Zlq`0?Mu?!$8b{}>;D@R??Ad`ZaltmmC)URet|M(3-qzQp}- zG+s75j_b89`_K#5^Z{5;d9kIINJ}h+dy#xih?kd$#fKk!n6AbTPcBo%r6g9vGuTs4 zK9wGO^wIReM<2@b0F6L$zr)g4hGCiImXUiDZCU*JT=&EiPvBYWS@}4He$PD1OmeMT zVTI+=+^93B_`iWoEbqAWj`aAyA4^X@{%`rJj#K_;$NJ2)H+ab4bniX)rq|zi1F`Ie zeV$gpatPm~d+xg*OIb|BSEZ}V-eV8r{`N8MJu}ZTvpmmm`35E~vA|6)@4fFn-2Nxy z>%Jjr)zw##=Tl*TV_%K?Dn0$&GwHc!o|k(w>pK81Jh;X&*Hi9}4-eja^G$g!cn0_7 zyY9X#{Xguz1-uqT8#X?)5&|M9eGXlsw3N~%C?FsuDT0)Wh=PEDC}In~3W#(HDj+Ql z7J_tlBVE$$|GMtk*`3|zIdD+l_j|wp_s;K}XD9D_W_EUG_Rd5&?-szBi2mU+%19?NI5`q=VFI zK1DMW6u=>oq;CDXvPYWdE%N(A7K9!Oe$D)Q8G(qYYp!mgNt!DM!si${iXj>QbZB+b>U$ClyuFDgmb+&S>tXkd~1r3}W! zWGb6$T-l~Pw4bxOJ$|=ZFe}tw7lpp~aNR*f&9=vEdhioWyod(Hw(aWm*Irk(>eRuy z4gi;g-PWIXx1QHg??oFGzuhhf57j*wE=;*#T<(4X!_e})`K4ye%rW)sJe@7~)XVlF zq5F^CpvYsM_|-hv{_{e7;b0n=t6Of8=E{Cr>z{FNs_~uikL~{kjhR!E4&k@3$1(r@ zIQ9KP+WP+RUl#^}xw4+ymIL&%y$G^G>)G^GnmMq^zfY_(keRK0P(0zz7GOm5!N4_F z7q>~8tKSj%uDgJ5zBOyssMWu(R%fB%mnl;w5u(eJJGU^=?H)$-7bqymIUGcBersih z1%{j;;2u7FM9eKcQU3`DpFSfP_wL@KTDESfu3WvW+CS4ljUGK(k$ys{^>^Ox4~ctd zq+|SZ=gt+s2YYty5du1$7n44~K>W+_-AQKRo;`b@iI_`FrWJzOC_1m+3HnEe4($NO zfpmHjwR|ZxD3YneN5P;#KWOSH6&w`53&APUHWCQ)B>@;2%OKtVmE!)FA7*}9wrVXt zCj=*8UA%ZvzOm2X+w~kwS&@0L5PR6V_3Om6(M#Q5#JP^=p*sb)_YCPy>*bF5=NXA( zK7H!6TzSsexPGI2OYa4pDLc+JFTz~XLlCIj1srrt&5#@#4>UWJ4&S!o?-Y5nX3Y}E z;s@|ePe#WfRVC) z|3Nio^hlU^`VQxS46uws2E`N5aJhziu^bN4Ulh0x;rqPb8~tGQB8|Ln$WG?j@%*{- zLU+iTJ)5OvCQal=uRjH$;1%k5m_6s(q^T`BpB58O@-T9tBbYO|c^xCW*Q>qMVw`X2 zYqcAgDi1$WUQM3-wW1GBe*aT*WcRM!Fz-}e_3qVM%%2Z~uMEzSn0OlWsmhfTj5IIU zPXrJEr)W@s;mxyI&zE}&gCY^;`O9I$;fG=f*2fJ9DUm_ZN7ggnKl~mT0<&i`rcIai z*%jJS_3GC1u$YLA&E)x4jyv$8(M5JPb8v>!_4qt#F&AACzqSOpm+41t3%@~QuI_#C z=@j~TzWw*FEMl)y20Wp0T!B6^iy<&Ab_@IMhI)9`z4M<}&5HzK3t;`DYnXcG>Q*pG za|M(w@a^)^#~+FBk!sbdxjra}KeYc@Z6d~I;kSEX=D{mAFwAox^Tg87TrtZXKg_)$ z7My2kX0n1eQO_^i>tHZObR{CguW8!4bSkUBM0Njl>gMH)Kx zibJ6pGu20fKE^#u2!eQaQK#bvvObXLp0drKAhM2t>K+OHCyt#^4?cXL7dK!9|AFWH9nx%f;fK;+V8Av2_ zhuRoT1yS{pb_D5))tA6bNRF3v7Hkv^8Z?3y0-R-GLu1&7ayrqZBdilA zPheBLESt=o*hDj6QzEm4lA#GfP+<7!#}%U5#yPECy@uFr`~?h}?K^e=&qWX#5{uKK z+IVSKt6D`iLax8#$Bv7W<2iHYs$=*lz5+ayzQ~R~)c_otr=kODYH7@cGv~WODCXgz zLq}*(;02HpjE^_^y`dg)H7IDJYr(<=@J+Bl9XY6-QV~xw5c1Qfzensf@<2dSQG8G- z^+Bgn)2DwAV)hZ-FZv$2j*ow+d8WEc)Gq00tIR{CBrSur7ZEbd`j`O~N-^<-{!@cu zAk=4h?4^Dzj^FkfWp-8F%@?SbQ9eJE|xi*mnTZ z5`W@ZyHgGURR639__PPg@CWYWlquh+A927qe)1UR_?i&8Vq>y{z>#0fsPXwAVZY^5 zEmaXPM7;N^K=6M!x96Zq!-M5-D_04;tC!$_E5)6Va>y+@tSzAQ`s|lo=@_>Q2eA#j}6KiG1wjE%Sodpx}x-hPI7|sX64G$!xN|loJz!8v` zWJ@M6&t5w5d;j`vytZD15KVb7Q{I{8@TRL-b#=z=;%6(tf^qRcHy+YHH1YKAzyX@^ ztij5!u7{|8g?c4+FK>n4)HT_Pd=HBki2_E(W%7Atu!kx; zl{Non7d2Pb58w~AXSoRmk4f7D*0}Wn%&sSKJ3|`V*l&VA)XpT$)y7Y-UcT2sH!n&7 zvslUlD-ZF8$Qh-o2Q8v3lwp$Q%7*eju`}FWnk>!)|I9vb@;#(hB%&;o!6XCkzbWFL zag-VqEt-p?Y}s$Da@LRc_yIJqHeDnJSNJNh@ne0XFiQgKlJGtaM0+|8OpCpzT8-+e z&ZCdxd^23^Ia5f0eWT+^3NX+9@C zIYZKi7>OX1-lPBn{fyB>1RXurt@{L=+g66wthSfDZTnVyNPe$qV&M-k4X$wSMK6*i zORCaB3Nm-z+}IE6sS;pxQy7567y9|)If)@PoP8$2UeootuBrmeEPM!)Ppp?bMJg~P z%3=>ZiTz}SFeaM9>_Fy>S+U0r$6mEfWyd+RQ^$_zYhlb+0%2w>#95KHkr~RBDTfc_ z=e_-Q=dN99`n2yrfLbei@5Mha$zn|95*upOtf?wht|USZLt!JCgcqI%d1gqCd&~1j z(W1q%*Y}l=*Hw@-9Y0|#KH~R4kb)*puc$;=fHYT;HeFg(FdrRh)>rxS7r_3PScGsU zL2`1zf`uwJHdgYtLF;%U_Fro2_IT+foR^CNoBLe4tH372>UCjOhYS-x(Q~~;u} zVm9;mV?Q^NhivwPA^z`Q{k|eViG>hKG9Gz?6F&xj`1RtP|nKb%cgwQ7MltA(s0aQNum@mVz_>ZhZfT|-qoXnrdW z!_2>VE=)CD%hWYjZ>GJBU_H~CS1Ix8`6YIinSWECS5B8*$CO#q7*WA}Dp=KP8-d>i zYQwh2Z|4gj>nEa2GW=Y|h*=ZCthMY{@u1mR@xgDy_S%LbUeX)AI}d)rP4Vvqt^k z!QkUkeU0-13FDV8UdA4sQfL(su6O)A5@KHhk1w4xse&7Ms7lhuJm>8j_ z%$Nq6#>XED+{D;p7eiW}MCT(%4y$Ki;)x6j`VMX1t{s@;^<+<{@Ex@i#=(RVCD}`t zg-_JhZ9$06sdnNlun%Vu3OZDPHVDr>JpY_Lc^blaE8qzH4eXUl9GSQ8&>r6ZKT{ zr-ac&O^to~_CsoWvz$|6Vq@UkJEPi+G)=5uf$(M)Fh~aDEI~)!eE+TAuwIRXM#LW= zpvT}Fy)iU83KlFV{xry-xCXicon&hU1)XH?S7XMGR^PzH6a6%jxkOC>zE9`~oHPm2 zy(moj5u5~5$TktqXnXeT#&;tfqwj{GYkFbImBjgp!OWo~+#H^do`(hm%SbDrS>e0y zHl|YjHsR-^XDLux&AXx{;XE(gI> z_!7*Z@728+kLM)Y?Tp81oTtpRk$GW(AvGGfGD8o*gIInCd+q*Q<8Rjj?6rTys zg)X^${5CH3-A*$_O&;qLI0BFV{|#;kk=wqpR6X}oYB~7M~@UaQ$LyzvpuPc8Q z2S6Jkt(`V?DjX(W5QkBeyr&Jjl~8Y_MneoF<|w6b4~XeJ5Kued1|;pcnb0RkxpI{D zcjSYvKR#^Ff~a2>8Wi2Tb%zvvYW2>50pdVAags#fn8TSdoLD8phcxHx*I!qH5ikVO z>A%CQN`A4?M`vsjWcu+Vs}fiI&6qh;eF!2yB_L?SkmI2gAUCG%+jruF_9$Lb31q&g z@<{CmO3cm1rZ{T!C^;aId(98rD_1VVKK}wpzaND3U?#DtR=gw}!2$Wgg^OX&5-Oy} zPl@R(I%`UqGPz2eG?{EnhYuaX8OMQSWEqjLC|cw}ajLX@#d7u8;K8z~(rHr362;(5 zDyv97azmqv^o-HMxS znKES|DR75KO{`kA8d8lJRD(thR5K8rY3Fh$4ir?;+lx&%2IjS@R;voy(5e9x_Ofp(^YS%;Wz32Sloa8YP`!gR((` zhQgp20pkDAp+iByzpiS6n9&=XDv2vAA!XXJQ%7h}Bo?iM9JykJ>5&E-|27|OrA7Z*5XmLp&ieXU;^;jqE^I_KMvzibX15{WlEP)tsoUs6p|*quRL7M zfus(fqX!^;LzU*kI6!?5NeRx$1BHvI4?g-pq&bFsF;p~Ywrtr5=F4RjlPy-|%9Bgx zuwsRZu={xyjkrq*T^W+Df9kAKHOgj58p5Yg}ch`p~8Gt;3daWQW zZxg+EB&7$)3)B ziP!iwj_BieJ3%-MfwiRjYpP5kF)+)_`~pI1UZG5Ag9#vnymw zP&lW?L5MeAkmzH4b<-2y=Rx;FI4D$Fa*O;HNVA@}keWPsIa`?zjpmu<`PeT0ZQzx(olu&(N>mz5RnKNgLiGb5*PRnh6 zv}SG5IH**m3eFF?KwyE{fkjIo_>fHe8c|x741Q{^R;*Y-&UgPB{F!LBP~)OU_a3TP ziQ+PAS1w;tvu4az{RX@RC#MNjg~}DwYuKYxf!Xl~OjGcKc*)|WARJv#B_Rk@0Owyi z7XA6BUlb)T3*BD`_pdpGK4Rpo`y+%-Hf&fYlAw8U9xnn3Hg5M#$N4LB5GQ^;M>oYMpiF;k90Hu+gUuydXL`RLeyg&~e>AvQztEr<1O^5D z@4SZRmgiMM^64jFHXM>~ z&$m;3gry;ggM2Y)lR?J9eo_5PYRp3axN-?%GAw#ycxX(j;R3YW%nf zFv)ouSAz-gr|qQxbsoQwu^V6)RO1 z#sPhk{P^=v>cYA6qWRkbXV#ePu`>VT$7vrFjp3)NTleSizJx#mzTwA=8Ku7c<~v2p z3S=%(gMxu)xqRR%+I)0RfT}B3QC`F@!B~c6u0$Al`F2tTyXf0JuLC z>RhpWxq2Q#yL@%s1)9twl`4opr>taIpAfOY9?2!oXSpdDJ=6>${bKQ=#lnDl@ulvH z&ezGjqKUF;(E6b8UjF>~z*uP~{h$+h3U17rHCr<%27jt@=Ew<<04#X$o{+2jNR+0d zcrPhv^YY6y@l;Rl(Qq;-z8ogLOdbUTyjSl&IP+2H*AaTj$8eIp7z{NE^srqG&|CDA zsmH)y2^v)>;p30C{&YLk2_8z^Y`m`m`8ERg-QZ%=4e)DTk6wOU_>phkAEEb;*HllS zpA4k!zPMFPmbpr{-lsGw3#SFu{;spL-5Y3S*?@zvq{M!YdsTJByI*F^7E~KQ8519X z?zeRtOgvSglWcA7i`C6O>A?z3cJyg9_<3uh?AaQyzHbqI^9JGemiw0u*1}*V2KUZO z8y~4v;@&_@vU3;aeORM72WiW?HF`=-=2Ny zwKx01_TYL*wxv`(di1~s@Tk<#7<@cjcz+x_25$Q6@-dtkMZj9N&>FscZl*LnVx1AvWQ+-gyg~$2nCNQUu+g z_Lm)-%A`q?gvfpvAL8}EG@-JTAO z4{A`5qn#WoeGMBm1e2gN96+T|DS1s~K0fOjS&7=B!~Oy-5o zhiikY|0g8#zQI^Vj~FQjjfRaHsjiSbPlyelAKz~ect@SahjDU9sMi3&;gJdtizMM@ zXbpV(-E=S%Hp|C(>(*_+ac>}uj){{esEHFM3lXGbsgkgb__V0;9fKssw^OF5>CocP z53>W)6LVqO=N@6UJlnp#+OiEMp0M$~+3yXJ+}w|Te~FETb}R3OnS{r2{~w2x0uM#h zETCh^RjXI2`|rPB^?z%CN&_zX4-4n3zI|Vp=dE__+UnU3&)}evMzmU%EmVd~=hncu4qz@SKaM{-CxmI2}RU6FpIGqG1epg50vkTG*c z)$N5BA(5X~m_+*z>{XwC_PG#JD52P~V+Rll9u=vJ8PmSSX0{X(s))rJznXd@{w?iwYvg@pz&aK95{3UX=n^| z>Y^IrP>~{43UM+`)x)bWxA7?MbDutaVg4bpS`9`=hYp=^e-nr^ax&H)hok^CB@RGU zb>RE&!xYE~*_hvZ?|m>1N{Cj@n{V|8!*Hph2{jTuco>IuVD@AFa^}c|an%GPDWBLa9WnAN(VmLML9oY5FT*@p zar26C*DhBKN5fI_9}f(JJg4#_QD)h1Dnq_yzFS#G?O!%$YwsQB7HY6Aej6&w-L%xv z75U3x*9;zlto;X-scuVYAeurOh;>fQxM;y_#JY@F_#dC~>-!(>Q3MX@IDW$8cPk2h zj!Fh5mE96#Fe&zUwHF;0M2tU_<2Lp$@xjl&2`+=*#p31ZJAHfjrLl%9`>);F>K@9x z{g3qowT)lj*HF)NcimqN&AR_Sc#WU+ZZ`j3SqKZ0+_2Fpzr;NGCfDt2pf6Sjq#=Gh z999{kk?W7;faxJ*g!uI}h)w6Qg>GOE6YT2TN*4Xor zY5+2&8V7$F^h^YQb&0O04Q-~I{WlZVCAwxfh5X3HV1l}FzuR4#_7U76t!-uOGIP2U8fs=+f<23=%fF>;u zV*j9XL~5Ac3&}*DFKA}*6y}c1{6jc{fG;9;qjToY0bz%{JQ+2Q*R2nxO;cfLOq(_xCJ&%J zgSo3!r>1Jw7ZGexi=a~3eKDpiC~QVE>T?h-~tDrnkS51n4dvZVF8<}ZVJ z!gLXSy>+eYMLdVn(5|Q9nCAlv5HJB%083>bi-44$@RKQN^AzfYuBV|}alqFpgTvr= zYufMMFcxX3Tl1G8Fn$-Z6v(^Vlrl%R(;yM|=1FRMKhd5)4~u}n56*)W#$vv~pi}x+ z1Jm`q3I+rF#Sjnt_M2VW2zZUdh2nwtbAvtd+eu7M13%V1p$O}}h^F@VfBbQ|NXPEj zu~WT<=ZD{V2@(;$zSD#l#HM?pG1+nCm?W=ENJGu=Twc zEU0GcS>=e@qQgnG05z>%4f4|5o?~)L%LOU@fApcm4{GQsBbG6X+0>f~R)(3(v!G zZv6(hAG{MbPCcH$*qLB9f9(5jRn_BX9Zn7l_?>SI?$iUc;NY9_wHQ~*h~WEKuriuv<3a)F135_ZaH6Y+|*<&@*uuN!IYr@ z#YqTj9E9*4Eh9WqzC8RZwpDo`P(b`N4K*2z6H4P>zI;*6BI)i)CrpO@5FDh10ZR4L zAvU$#uV1?%LaiTs^uF4L`$)?Tu`#it;jw4;KJiQW08CxBZrwsmI8q32&?g_OapT4d z%|UBWbVL0_T7v@L-D2Vi^cXSm)Q}7cJh8an+wp#-WAC-=*PyQnRLLT$LOIYhsL3PkWHK4kflP#-meU7CH_6~@Ix+gp*Ni+<$dxZddB5s+{%4?z z-4A9>Z_qCm<2;xeR!gY$L7En|SSC%HAeI=Yd0XM(iqQ1whV$LM7|#Lq1?V4hpjmtG zz0jZl6Pso+Z}$G__5w60K+~fD%PY{Js0$hgH7G{l+&Bc99;DUP1TCdk&tBqeor9nm z`44dBrRgdPm%IWqTh-wgR*zplac2C{oiv1hF|*~XOOUvZ>wl!ERpqLkAd~wv<56` z4uRQzO<7%jUE=Uc5jBC?ao{Io;zQ70w{6*`s#L222F0sxt0?$GU^KSB`~JWA{-ZT0 z1d~SRMcElM6KO)9&)3Me4kbdJwA_>t)AsN68gCCLiK{}0_1~mcvpkLHXyF(Vc zdLMlFJ|y2Sz$S4P^%k@Z3KlGcz9hs(co0sVKLA&K83-t8z(9IhYz^`Se&FDKNa39p z?*F}biIXTmJIB93D(|(|`>KRV6RWadbbRo^2Xg3Gw0JSh96*Bt8Y6^7hs>04_*bpHDDuaJ_s57MSBRHKHFCP3f^GJP0yfE`QP zF68Q^87LCo&ttyYcj|x#f2){-=m*Jw@(-7Xjpn7WlQ}|dSie>kDo_xT2#r+<*r4QO zoIH6-&BDQI0we;+W$*EF4>8ZMaNz<-H@+tD^W)w$2GgZLfkK$yd&O_Z84y1bpwGmc z0t_DSa1vU-G|ffHd=l+RuqX$K%Xi;>S0qY?4TH2fBtLoRtc$hrVz=(uvwHQlKI;0_>mcsr6FZ+I z`ivht4vxwGC2Olfg^CzgUzq41^Gd;_#A!&n^j52WTM5aR8>&+mIB0^T4(*DM#PcHz z3K01p1p$W}Jk2`&w(>Wu<7YAZ^m~y`96r%Gn%o}{EMCHajjEGmY|xAYr2!5=U2@>N z1H_yJ50srdcEUzo1NS|NIFy=;H9!X2vE#?Yv2tf>P=I+EE%3QJuz$ZC%qS7cgVuWk z-xr@3R2ij;;+U~x#BT)Ec4z2ii9rvL3`&%K%Alp58A=JFH1(13=Z1%;DAj{4VoXL zX*t?rC2`VQ9-%`3HU&UKsoZ=W#jgan#3a2d^BDwYAffa1uWlVtEZKgU{r^_0NCQpqNM@ z2xth9(fj!0b)l)y2@GHmp*O6i5=LknCG5OcrXZ^89fTCM<@J_!7+Osj@%P zw?_49)x;Kdx^x*ZrW=ZcjT9JTDoFMvf>a^3F=?lH@7}#Az6Kl3V}*g82#g-i|0}Qb z0)uA}7&zBdx$@;e2xu*mnud7AeUwfqcYrXHH(!2{0u zh@cDwPG|yQ)o;HEBfnLvRxl6JQ2e5CZSWjP;gE|k$4H6DLiazQ&f+{nQyn8mjSx)- zniHX^QicmS-*(0v(C-+f#e3r8`cViNnES%{diU*j)%^MM)Sp-WR4tmdfce9wVk&C{ zh)Lgr!B1g|a^=d3nUBmkXGOdJvLvD{Ug<^T59>2XlZZqR5R3WmfB&!{bNuy>gbPe! zxBQ7f8ECIz3lKy#p;v>K6931aia`5YqL+4F6T{z^r9phgcF@FAUqykW1bjYk5dtW!goYYy ztc$tRxa(3X>6%i8A1Mhg+aQooO4=bWIHph~HyNnQJZ6OMztjP+_;w)xGmvnRT9=4o zYs2)StPkX@8=pP|U#$z13yzXM#STJ7hy%LAeX0`#;>1)Vksz#IvsxG( zBpMn z42q7zpg5>9Ww=kZd*&H1K@&Ux0mz4GYK^yw&FZ?>NFg);}J{+4V9cZVqu#4A*`DRM* z=xRfU48iR2eX7euYYD&{a4fjCgpr3Lmh#z&iOnJO5D_|t7EFUD>dCs`ef|v=H0I8p z4GoTy;be{%_`%Ks-nEi|2zZ-E*A`PM;GL+z|FCG0%p^HOR1M|OSD@;5=OPo%!t5vNk^f$A9^aO=l zdnf}D*5YX8OraCj1iSYGH8|#oVnHE4dcLd*31E}WoL7kfL09zSjQKNdm$JkHYZA;3 z22=`~A~H}otuiF5zA0lC;|%{^{4{B_WGQ@5K%(yHcF>@JSt1?&8T{iwlpQ;Mj5_|u zad$+3ix1=SAP~0%vGT#fg&SHcH7`W?k~Au zG)ur6OOpW2z@gr-d7~N$!Z<&;Y37E6cB&+D{^>+dSv z`~N*O5NJw)=6}kREv;U9v4=Q8;#|;9UjP2@fUy2OIQxkOPixqTHjt)=TDwIAttu^V4iZLZ|0#H4C%m0 zlP8IUeKUO2cIwngHu6D(2Fv`@X5$O6>-P}+Dll-9I{`Bfb?Q8d!@@Or$MMB~Px|!g z0vQw?JHQ9UR&b(W;tA3kw0->zY&?zyXP+vF_ku`~D^Fe_z;Hd#?k3G0Y{0?c?sRuU zs`V+6F4ztANfO%kAJ`9xk3=dj4lvoV$?;%9NynI&Y+_S6b?P)=#BemBfVtxQUxbOL z8Q;T6_Im@xW_LMAmb?RP3KG_PgRnpc&m2V0S6;=KYQt$I)&3X~sPDe{R#5`+03;_$ zWBl!&X$Plb4+v9X$BwP|pxUKQVlFO$*v>-@O@}OpBq{x8P,*AvwxFpuaAeL8G} zbJK1IX^ZBNrlf|63BCJ(SqNeb;dH`5P8bw8jEunAC8OvHh$pqdWa!-oCZ52cpoynv z!DwS2X|Iwdzr;zE3y_iqlP6CW0!W8;?O~U?fe=In|LZgOY1pQ6gL%^F*^Z(eq?>3p z`_DiB9FBuF$=dA-Eu>N12IOEtn*~fSJ@&ojY?Dhq|2V9Z1Gfy1|6GU^;y@ zW~AD+ZHF8>+O&a*r=rD#5Ofe478^HifJDt9FtyGKLF&@wD`G~2PSMGr=+^BeF`-6A z+Tt@i;;H6fP_%%$IZZs7zVYrc&hXcs zKiw9;e_{Pml5ipz=9)oqxA@MX-ysqgHmzST0ukKn^5U8P63;oELJ|*cEDyv7^hyX$ zT*Mxd9(!a=*6b<+1Q_l^JPXb)`QacrTecWT4JJa9=;u$G_iNX#9rj#k^Pvs}3}{-B zOmTT$I5pn1=)|z0!{q!xEsq)?W;~7dF)=aT`1kDE4aWCpIEO&B9~!i+TSC~OaZ@lm zwy38d5CMm9kibkQjDrm5tC@eAhnV)=G<-y#g6YZDqA5XB53`^RFmm*0(G*M$MnvY! zS)fgq4QGP%Fe7!Zip`NzY-Xnf;epQ|P5U$w2E`sk9W`duC^`Qm#+iblXRn?h>@0v# z!41^{>w#w=GX2d!X?E#jXs7Jnu|phqzVYT87+)-=!IASIO(XQ`sWop6gCa2)BV=^! z*tJ8>M?HJ>f{;LUv60R3MVo(`StKK#60J1FL^F@VpuiZ;KrrQTFxD<$Z{w_gIs8j# z9TvnsiL;7(PliB)g6FK`IOo-_RZDzGB?Pl)6oedj{-J|(YFWO3ecb^uL+&RhK!OXz zi!jjI-o)x`ZOc_&*Gb9knebJ!6d-jE{ z(4c7GN~25Zf6w|OF_tD6`TfIQQ8R#=3bYDf#?Mhx^MKz;*HMWQ^b{1Qn9hI5J1QPqPg~AMZq-EVrY2+sN~!E zAT&Jh0kQvG2vg9^Af1q(gV}2Op``zX`+$qSq;$~lKbk$}o^}wl18O_aUq=#X*r$Uf zhFI*wX)(?~=iJtui171IVL|$0ICSU;{B)e-(Lj7;P&%LZDGbZ|&u{sC(C}{nh7|jL zCukXSAvi}9+cfX=Cm0T?AY7Ln!UgG}slfA5IPkmpz>x!>f5C?r=ACqf4EPQu9qA4b z=mFLWylV+yxnjTC4D{eyGJj+&M(Akli+ z4Tcfv87$;Q2mje3!QN=W)0Ae46v^d&pM};-0%-VT&5~J!hS}~MXkhdY!g-=2a{A^V zVhSD7!TKp!PDlni0O<_W8rJ=Bd*xmThhg{bT_RXa3pJUuW`^nUd!&O$!EmF7GRK=5 zLP^;%uL<0a(?mMw^bE{^l9`b?b7rydVg?>UaZ~)d7x+b^@cQGJSPpQQX?F@uRjX3j)u1qonDrR++O{q_8^0RTziwy~Qpb z54#(QBRgEpER&n(6BvV;1AB;ZAc`Yf!-?`6t{+GL<4XN!#FqKmp>f)4=3!4H7 zwAZa~`D?fN1%zMP_ow4OaxJ-365}QF#1r*JyCHL?%z75=MlJ@LGU^Yh|KDhu3DRio z!3byuTfK3Gy?ge6p#QdT+mmARdj9$6fv16(lMxLHd}Pwuahs=GtICxt0}Nggz`Jex zcBs+A2^K#3GvK92N81cPELaFrL;Zw#P_7&uA8HK>mMvYn1RsdQMLUCIY0;vk`VAcZ zFF@R-v_5?Wya4=3@FMkOfH+15YyM}$4ek*fC zq5rZVmlnh?v}=FO0_EDtCrCgq5pTixZ{&d-uq?piMrSUowFlewxzZ z2Euuqh0U7;0&1mn=l~6hty{&MRKI@xAQe$weGQ_^SV*%UI!qNuIsr?KgNVD)uoKuQ z_%xCkRi#Q**!^q*>BOsYC|CgE!48;uIF17tB?dSqIuOkYzZ9k6AiH|?n%)L&`p2t) z#q~Vy$t*B>cqqE_&ScPdDJ#x{>A3xQFp_8%0pIJ;n0ZH4s8~@nOHBLu;JQ)nU=YoRN-Q^w=bw8{ z99MB&OCRy`CKx$C{O|)zbzKxEoDCXj9~7e?{U{6y%mp0>_Hh{$lmy{Cb2)JysI5c^ zNfW-p`Wyj5%tc6EwrbT%HN|~h3kDD!c+pn(gAWu|?cs!~1Q-yc;SdlRHQI;DS}~c` z6$f%UnvW|i1#^>*x~XM!f8qPZxnWF9j8~mZKK6g}mMy~EYSpH#NM~%{xm^fP)B>To zuoNJ&kwC>YKnBn86UW7j6`j&Gf_f^on+g45X=gkI z))%FZ$<#Rx^BO0?)F6?rIf!A68#i+Mrw1+aTkiP1jt6RlbK{>5fMtS#H%x*qm2CXD z=Oj~T)d*z8E5Ui9c@aw6y!YOFq75Spjjf~LH}_xn$VU-Q`z7&<1==&ga1;D2w%gco zmfda$w2vo#8%LnMT@cTBp#5LKuVoA4i7ZOBzl%T6J_`I>Wd+**h4Ba4-wIij@<4kF zznFN!M=F_|&0(UEQfmIX^Z^ChBRib-YetU+>Rb4AZCxtARz-e1Q-fmcxN-9FRu94y z^!0ER@3M{1L}&|5N-`+&LBN2H3AyK(K=VdRu-9(}0pbv(7tdlZq6s0IQM@n1z3|gi zOw0sQnvui_4mymVrZU?@o0cER6hs&_=o8l@RAPfB;`$kw4_{CI8vE>dk$k2zOlm@! zVexE6r<-%;&BKS`U2^uLQ_gK$x4|r63;1quz*NW~=cjb|5I1267$4KVnWB#2W4l?i z=5i(=qxm3&8I~N&X0=3hU$iM+coGHZ*S37-yrs zA~87{f)aW2cAkXD^t+Tqtwg5rQpE8$SsGg1a}PGe7_IBhEq}!>7?MvCG^K zdv*5gG15&szFdHPt`F=Pa}7}#J^y4f zi3SB36cE&)v~K%n+DUub^d_T-V`+`zoEwJy?(Y9z% zOrL>0_8#%^Mf=ibHQV@+83+b$AoC`PaQ#4<35Hv||8*t(8)$DA+$3(dvEwpHZ1-=* zPZS2PxiarRiC^&HC$N98GYtQM@oPk(A>RT&ozzpPh8h&F_w5VwX3%s+U%l=cpto@~ z^Doz1;E#sLw7(6MfvSP_|0VcIFxGRbdndnvj`~B0BipZVi&&sN6AVDE^R3@NMO}@* zab)`yZV?N#XM(|9P=-V(bpjQ2HU2W2rf-@yZHm1mv3d?t;NpYAuX58^04x&>E`BpQ zc}4>jbv6FRk?mKwMJ&)h4*ZghpMUcE!*t8We*)iOGzm$|5Y)DK@BQ~gh*Ij<6-{GY z*3j0f7^CjN-(@x*CY`#bmvC(zyo9M5>5eZ+utsr&{i`oQDJ_AA^X7HH1|!(Y7r z0azN+Eq?=T{w98dECADF#&HeQ{fF?!fhubD&G6G)E6?c(y04-bQB5^KMnGFhBE9)olk?ZD;Al?(Rw>WRZcN`l8V|5WYzYQ%6z zfqyOS%D{{g&GJYatBWQ|^{ru$DxYV^3lu5{iSywgS}V0= z`7+i2&9{V5OMe<<-o(bnYBcD`kfC3Ipg9r!IIo&CZY%_H+O4KdYnmd+oh!HM{?bcO zO)M&HM1unN=g~T~q3OXR)(zM=o(1WE43LHgg94KMRGmtd@-FozKGw^FSn$m^Q=vgI z77`cx#Wr?@M=D@Gvx)?RaB(;x2y`-)6_O0H7P0=WfxxnH<3@Rr(1!Fe;N=H69c`b- zyrqF1=`Kutb97`u`*dt$W822Yww-KjZ)}?zZzeW2wl;RMv2EMV%$N7~e)@0DoIYpn zovyl1b>Dufrrpv1t`9F!@1kXAfLuxEQba{fnc(>{MQ@ooNtAj&R8gB@TDn$unwO0R z{Io{!U8#73@{%LJIvG(yCcy!$-MmyUfD}sNRAZBKA zrtZ*yn|!>~ljiDcUiU4aT#8_#UYhU>3;Z*Y<~xhxXdH^g5r^1|;WhdRg)P3;K^Wc4 zZw~j>)A`kc6Vu%LB3J z_goR1AVs{R@9sM*9v8yrcDG%bwN10lyi{(W+z<)~!8Oc#EQ_ZWkXJa!lH+yzTJ)V; z?!oPJLz2=cB}kMFPe{2bx=cM&6nu|-SvZ42yidgRFT-+e#hzpgymJh7m9?BLxPsva z!qIM*Q?F7WTWUlDWGVgGu0Co%(;$^V0D)8!@5s-kykF-U5(Yh{5=l`%wG3~5? z`Kwx1HsPipAatwiTa#HkYJ1dY~xY~G!ogrwZ zg2I9}?)W28&vq}dw`GI`S~ryu_4g^w*+9o{MpvQ=sYzjh~$LwbgJh!aiTbMjLfX zYESqxR0EA`oHhvr3x)4-xmo?I-;f=e=WMC6axF+z;T{S@>}71iACLOm@<%t!F$(Mv zqi_h(8GAqL5r-Sz`+Q!YVefsaNHH;;IqOFyi^=#;j0s7xPzY*hT7@J*>S6yQV-kWe zf4+>;7Ut#;y;fV~;}m5ve#|dXBuv7_j(yU5PduwXV?Y2Aoba<=`-X9)O41cACR4=s z&XBH{75Xc;d&3=04AtCToge`M5({H@YX(%^+NUxKYoiK_gy66j4F#Mn>dWu#7T*(E z237hS4hhwZG^zX*`w`w!Fl18h3$o$6#{>+mv^Z1(yw2v|HLt7u&>`@*6>`W!czIZV zMVdH zIVLF)g(`Fp{yXMf;-D+-r@wyN4Nb>IZ`p_w?*=n%9Gc({gnn3~-tR}PE<)ANjri51 zyaWCZ4Awyb39jZo)-~L=D>9t?3IxA|UtAvKSb2EYo$CvxKwHAj$|+oCh6{O1>%Kri zusDg%K)>=}A61Ni$v})iED2}xP38c0?bouHWQ^&FKE;m|{LJ6bQv;;hkiT@FujOb+8L$j0K9!4b1f) z&Ckz}L~x=~42aAW<^dwb^ss{h2%d$2M$aFgR?9|%ey=ChW|KyNK$&91tUN$;P$Yq}E(5CXuj>K2~lSHFUVQ;ZTMU+RFvo2Z;|zWs%>f ztuL8Z{-@d=_g@=V9A;)`Q__G_al*~m+-g_&g%96Hdj&^AoM91tH<3CIQq}8T;yT1o zssKUMI09$+{#CwsmOj=10I3V9&85iRzYIS2m`0m<>f^mU9OMWY-m8yLxQmG#p5@>o z8rYW9oN>(xUiohy5&N34G!MM>}|->JG4BK7FWTUa(oM zkt9cDgnlP{RB~9JJNav`cmH-#7M`=&_J?^BZPP!FbA6}!B0GX2is-B+PT1IiYD4mT z)q2gzxAkaGe`cpE0HNec$5S~u2K*i{M}adtq9{!g=2h1{pj^n2%&D(yAwv729 zM#)B(6$-4`Y`YS<@Bbd~(H3{S)?C?!%tL5x#d~wVL&)bP3VB|dX)rQTw@y~|GZ9CU?Fty z30Nb%($}^k6skhu_qz3yI6RKwUeICyWMot>>_-pcz@#B2o>eLCRA#wHCTM2j^oO%T?32$U~{e)eqmqxjQ zVJ*Smgpl|5bYl4?Gr3%irN-T6|GshT^YVLM%{=GJu$AdO+D>)2pXZ!=l`05J#p56L zqPIQ}0olQ>wXK$G3{XZ;Ijq@hFS6hhN!@cVXTYb$RbhYW=cpD*^Rt4ecBlgdSNo*M zFW;W8uWvG}z#50VQKo6~-tiahM*X`BA`k3W>Sj+j{-o^G>K2u9e0x?5;n#%FIezK6 zy1%x9GJARnrIM+i>4c;}$k=V+ekodf{rdv$dxA5Njo{_^mGHGcwZ!y;kr9LxfD;3~ z9N*9jOel9o{Fqw;i?U|KV-9OW9GFacT5n_HpJrKL1qHvhqIIw;dVVeLB#+*3ZhXa! z2R{hBj_A`iGTYkN)X$6@K2+%?#~Rki#}S;h#K1uhLqDxX>LNFwL^kr_2RIM8Y-gaq zp0<7bB-iiCu-{2XVeQuTQGd(OE6p&*(*$t9s`m1K?HRwE6rmpGK*N4{0J6w&@nX0w zeE|O+;7D(iEk^sl6~gW@K4ErantYoL7N$Q%0<_+Jpc3&d1TW#OeRDVTyOo5M5D6rn zwenT4s=vNL%71>`^GZj8-+BFf=9_7g?G9ZRp6?3|WUe*Z z7+J2;-R~Vt-ih3afQ(my#V14IJl7W(A_Po1;evV5*pv7YYdn5^-bO+i z;iKSw!##G4e20c?!ZqL8sqObZJk+kk`C{$yL~uJLU1VY?R&iDn(ClIu@Mjh)# z^?upv+&z$itgc$Xxi1dWjhdYuDCSP=Z1T z-Np(#lWJ<@z~upz`tO`+4G+ab%Tb{ZPb*XgWM6Lh1VRUbS#o~Bv|J&m&q9J{fXcb_ z6?)NoqdOS1s+@Lcq^3&eOO;(;J~5fsEetqV%klx__g4kb` z5evsX#>aKZ%6@@+68Kf7g>++A2JGce&s|+E_g^`EZLgl^??jd`2%9d3g%RsR%A^oW zkwk+wX3q5;nwJCSF62e+#xfQ_UE?EV>v&_rKovhqtYxC->Pww(gg5-9fB+umFjUjYNk@ za2S{!Cx)x`K?@&|WQhHN2!VuMRxPin`{|tjy@i_>acuE1QaGF^MQZKk5f1!gOyXgZ zK{OugrC64L1x!w|mE;fkmrdPvqK{c8j%%$K0}a)zZ5W{94{rYHrodruTu!r>j{Ck% zgdQUfb`OG=eZq*?ZH`+?eT=+Ka#D1>9y?0k@ z;aVyC>lAqXyD`F__zK-FmmGW;gISq&|QAr`vzx=lSokncT zF?cDNY}S7ejUa)zBTqVa{ZNny@r2YW)`1%4@W?=U-X7v|I8h?imkREns>Yt{k)li_ zToRK`OC3Pua#UFS*U;|45*4CO=94F!TXuOWXQBHOHvg6t2A^==q>R0!LjO+;V6}rF z*zT-C;MHdCjt;saCNRIBjf-RDlZaEm)D3`)`4iS5M zAXuaPPm^4meSV`Yfiq2OsDr5#L{8bfW;hDsr8={%&$e7pzykzY5*P6orDTz+LU zEp2!p*qLvymTR0Vuw77e3caDa#S-use*%6!t26v&8hLT-_0}a?;SE%{aG2+&Ac~$} zeRa1cKM00T!r3{%j3elEvr^kqm+81>;FGsoZD?MxIs8|0?wT{Sp=G<`CXDc-2oxf5 zjA0wzE1)@+Or?>vl(?aor5EfBF3@NkK}WlRmR-JYF59Fy^W1<2r6S z-y4cE<-!2)QLUXQZW0lzN;k1(&z(Hb5xzzyjB?M>7U#aCw4kagKMhHzLeH6dhuyQ> zW>4#4IIqXiGZSsSLl6&bkb7+yd7nVh`XiF+Yr#Ib=t$~pU~Eo(VT;kqVP4}VjQ;pL z1RQU|MV)$HU(w>a0&zlsO|o}5ObaamdN2lGrH_%vf*n#eW$ZVfqe}=~?TVy{ZTP)= zz2=!VYsQh_j@TWGXk2NBHt}d3MjZLdKZG)2rmkZ7o7hK_Oh9Gos&GdyR#v5BBo-Rl zh}n#mfX@S36OCS!mgX8Qb#7-%Ykb~j(^3!e=r7+;a0mtv>r?+&3U&GHO#X~g1BrG{ zJ&C%yGlv3tOaFVKtMj!#9x3<(+lSC*(N#$4Uc=$P1^jG_wq?zrq9ZYA#OP>A!k1mv zTbf=1re9198Ws7yP!Yo+J)rfM*BV3k#!Nv&Z|L`ZD!|YLXuH7jOfJ%Hd`5bc|5mF#@`^j9FxBZTE8wao!Ow)$&48 ziPSu@|5w*%R|vj;&R@47yn9*eEXw_u6cPO*sd%l%-N7Vk+JB-&EjIU-h4HeojF~~j zM{*DXCl>!wL$|8~8a!P$LU%`Et*|mh?AMAjeDX&j8+a7MPwC%qinuKmt)&`j_3yV8 zUSWpf^idv7Z<>wwU8QWEK-IG>t=3SZZ9A|=*uDNIr!fKa4&Pe0GV-fs zhg}@PB~mm4a7^XUe9?NdcO~a!gY(%nQxEL{cOJs|L4Egva6Oz+78p_`J&H2`5@Hl~ zyjl!nYe+EO*)JQjGd!oGvou)tsxWLW_b$fhoyg~?&qTxkuXwnQG$2=3J$(kmSOdnD z7r?<~QnS_Li$z9+9V_pP*atYm3$*gq<0}z4Pj8o{x{i`vXB{nBZeMhEIYiCUJ7&O9 z$YS@;E5+Hr_=nePe? z{}~<2tMp+%D&{YYTe4FhE?Q-S1iE8zzq_DIgRq3n8x%@K7HZg#0%DzIt85z#UQwyw zj#5Z<2Ug#we+4Eb*hFD4*j9uF(SD;5A`sA_r!v*PPm!oHDATF{eG-2AG`WF3TCc*WG&+&ak2#Gy zZ96S%_c^34{ErT~gsSxZH;P3%9Rouau2!1c9uWf{^_;1_b?d|>97=_B#et>Dwn3$_ zGYFtoJv$fs4`4LYcbCUk{?k$^^r3V6VOZ_LHTxCm4xf#<+mXP?U!)Q}U)AY`K@8XK4k--k`@5N5MTFUz{f+>_ z&g~2|?tDUPc5t4G4E8YE0^KYn(!4XH|L9gmgbeoZc3NI9)#xIss~QDI-xj@~SVM={ zR!iH-KA&V~C{g8|KM{^j!)w}4Wjm>S7xk$#?h|9BHQs|K8L{~HTYb}TL5Oz3Oj=!F z;D$`0EaGF-Jb(OiUb$5>(KMxrhkt9OGirxM^hG?=D+DhW0aY7{Z0Ec4%jhsACsJ>u zK=dXjkI3;vQ^u#DpSd3b%+RX3)h-wVCS3nwSg*q0c>-<+tYbM&QAt=`mdBGCgl$j| zfZFRG8Q{g|P8fhitTFQxq+7o6HkB_AxlK}Ds_=#niy#fhZ6Jd7dcSAEh4ZV{#v}L$ zi@mq0zsN~nT6&+eq|SU7quKRYiMOpgLY)@C#7gGG;^(E`=Jc%;ITxE|)vdGm%l;IJ z{1Kb&o|Cxc_B4n=`%1vrM?>wouk@itkiS6aHI#RNQL_+>kfjGo7@ID$SnE;nrkTV4-jQ=Cx~dQnFW#74)nxl;JzI46JR1ar_oQTzl#oDCi4 zsY~wMY(jPUd~!{EB;->+6X9}a?*mqJ*pl2?018d%-6jKkyT#dYM1eP=cb3y9WAD7zhw|i90@YG!=gDKC^)Ux@KB>``TR*zR-7D7ne zf>ADum5WgImHB98CrE_&%3hT;Dv4fw?^SVp{&$=U&)kR{4lMQw^G!Gzl<~%AMKVd` zIy}+!TCCq($?H%AEjxGZnr_$KAvh@D5xX@W$XM}eKS#uNTDd(}=_NGM8N68(afOMg zm(=`3UeqDCf&KpxJt>oO95RwC<);CKXqf9q z6A$)L`+Y29R9|@whUd^|vePf6*k~G7h_B!yr3MpI83lpxN*GmnIqm4brslG|Bg(#P zn9j9sF5lsOw@&K!HTVEtK1f2e`H!?=kN#f5t#?TuZl+gmdy=;}Ef=DOFHgFEwA1L_`xTq-L4^${bY3^t?RpiMma409M@m5TT8jh2 z5fB(;Q{!yh&8*ub;pZlx(a{8;IC%eLb(+W%R=!;Af-Qw|?f)=l0eI2VihB zd@~brG!|xt5{*~e>xn6C*dm@W`ea3=&aE-PZVcgc&A?LPSLB6mjZ;CLX(XBmUNFp#0LDh zm%GIj6HPv1HPUSKVPez$j#$br9BwnofF`rET5egLNj@WzQsAZ!e6#a69(P4_w1Cj8 zSCdRbEyW+x36`n=JQa!{G~tmo-4?SJ@Pa6ufH~X_e12&q+Q?)RvmNes979|B?|7CA zZJEuxhL1z$dXF2{|7V`rmSyS;fBgp+BGHr(--Gtcx&YCD%19=5a?MVhFE9Vhvr5_e zjbE_yhF>X@m8jh0rK%$aYz>sz{~c)ur6nx>KfsCh{_}Mx?2W4vKu{1}rW`K%<(bBt zupDxuAePWk{hnXx!*r<-orU4LD5GmWCEu6a;}5*+gT9d`xBa!<`=1K+7EfXN|DgP1 znEIbSMco_T|Ne>zV99DU8gMC%4)q&Tf;RaG?C)1& zDc%;)2SMu>6#CTdyy1jIl{6LHqVj1)X>i_8)BhAe5{UJa#{>b4VckJvV(c7*kGJX~ z?YToOOjq$v2$sD~50*U~U9b?<|64FG|5FH&$DQzB`55Ga*Qx*e2jG&s)UWCDFY8`(R@-0!R2cLQKDL|?W4deZ`1Z-0E=N|}%!;rE|XsRF-z5y0vUL5`G4 z9|6I+Lc*7ilkpP2|3lQlmx112FOtdFm(u?~7ulArqc#N$hN^K(0?f7k%TKmt+84#7 zZao$`b+nT_WKxa3%!A1vEuy{W;xlwSl@+`!J!|t^w^$=xq?9mr$8&f~KXHE?C_dVG zv0<`l;}8S#^&LG}xfEK~)$b{@Op)gH;pAM^jcd3Sgs;-ZKiG#z6QcMXU}_8^yr6(L zb;#zGH858oxukVFEgC6%xh@mWSR{nXqfi8C{y1+w~V{fKj>wpd?a~)bOT( zYOkczrHAYvU{0XzUwTcskruG9JC_qW0PG2V_g2}D@=K4J6hak}IpGMka0Go99R`bT zqg;u(F`0=r5rzI{6;RmH=>zjslfeESAa#4TBHbTs3(0~PX z8C+KzNIYj-GKAvx4Gj`k;Tng@kW%77G10#G!Dmu0p+YH#4}L&|UQf6*p7RLxD2ik| zREUU@Wasc-T6mK_jWhF@rMb#XFgG4m>RplGu4UrddZ!Jh&5{+ok8{z8*_Zjc>K>AhzriN>Sbv-@dnNr$IY7{gnsQCqnRd@SE6nDN zsNg{GlW+!w<90rRYa|dV4%s%IQNKy#!b}|k#0Lkn0GB_1@j*(hA{H*BIvQ74^uPjq zY=6n)_5LXC@Vmqyq^H3-9$C<7=l^mRt&os?q`b1Rw~zCj4yH%rP5zxeQM~xEb9hyv~Sfw^FBB@~SvEmq@mKGN3Y%4x9 z_K-SeR4FNsV`8Xe!o)Z8(gum~*)Cc~7hf-iahLVe33Ynl=xNjGW)%sAKTl=5Pw0UX zEeVB4kdap=pJt%id`zuhr*Pou{&+ej{-(p2_Di&&gjFQf$@)KzyLZY|mZ$qZ1bvgm zX#U3C|M&R>g(;t5Mt`hWN<@t7JEx68eh)I=W~mV@hNrEt7OZRbx45VQ(jdj4(Am7c zC|+dbIJ+|xnWjN83=Xr>3DbeBZmzpy7SiYXlTc%o8}Zp6z1`+WjUJa9WK+mI_8X*` zB2BmcG&^6?Q;{&~YO7KDG6Xe-oYbiqimlGL;JxobmswBI_vxySuM1B0`-VK){wP$T z)JTueJ0VgkWItd$m96pRp$&l?DQ|?!Da;>1Pq9-N8u2J{{tm@~ zKyRjTy2#aJ(?R6Yh*+cX{mAW3hRJS;lOP>3|ucEmbAl_*zn22_%kx6|)P~3{U;zoDo+9=11 zB3n#VdDuNmcm%YSTF;v_dVs^#pkE=-D?CP8BNgQWEQ-u zxZz&SRRl`#ux?^^mRs#8h)F)wM*a>6(O%~cgMjlr7D)IPiglkBVLj~NQ3aWO1Fi?RJ5_E7N-CeCot$p>GkRgs3 znVZnnR-nH3@Vo7SQ?t#dl(n;dBCsTw+t}p7DnZn&wsxgXG2i%?#v>F**n~;96}SBg zoUm=m;+p&UqAn}7aVf^}QWzj5b*1$hvZ$-WS_2og-UTm|8#}TfF-{9@r-b>Eh@f!xO=h!H8<*!wqEz-7h zbpGt;$GyQ0<=+Bh^FLPw-`#z2-3DfAzbMO663MX7?%hc~+V=T=O+s2t-9a9(*`8c! zF7<(X;zL%ylA|D1GPB@xO8`(k+FmeM@8Rj=N2}Ujo@<*RT0ncKwLwSyC@)M;Fye{Y zj35XjCag4=9=aBUy~@tK{V^TC;8yuganEj_Z@9sL%q`VUS6_pHyvsABg^0X?^(Fo0 zLMI4+4wvY)G0!7e66rc~1XM<2OG*|O|3aoXD}RVd85V^b{AS65H07Z-NYK+LmLLia!4=(Sth9~OHb zu6uH+bFR}HYN>}9Lb(EX@WBnw+Akrdkr9x1c`y`w=z{YD6^Lt*V1$dJvP>yMTi37v z4C_)bwHo9oeg}@4B>o@hP@%0A^qLhR-?k9rCq!1ac1F^UC0$8W_WDe@SgL zc#?i>1{P9sai57UiX5huMqqd?(R?ri0PSlIGcoy_>N%_3#Sy1@Fx9Y*S3YQA=`oOy z%{jH_gNwp@`vOll zUPcauE533HVr&W`NiPj?QHtAZ9C?>_;7-8^@e)!7oI3tqt6r|RLiR%yzHwk;oTQ#i zr7)yds5QrQykEJoBiS&zb5|CyI*3?GNdb6lmzur{0W+a)*7uJ;P6>h$_s)5$G+mEF zqT&)1Y-=?jX7j>)BJ2{6iuO+*36=)*HB12*^fZ}-sZkr<{tOCwlQ{R><_qTI$=GXf z*pfaUy#=ARZ=Um{V_r~k@sww>4>ah-vsQg3Y*grtAx`>@mhMS9ocF?kKtHy6sK#1j zt+cS{5;^isB?!ozu*vXmuw0zwc!|uLf{EpfHS`))RZ9@-$>9`XO3~ zekQ$DQzU0ftXj6A<%u-fj(C{I+0>0oz2k6n zlXY;FGU=VzTx9xWBCOsW4jwY*i>RAuMpV3w0)EUN=Q%o9B z4H<}1W6?)iP9N?8^OXqXEMwpmx2<7yHEb$=+j@r_ewGF0|N9AxbNBPMMWoq53q@t$QL;~2b02L}) zrpWKg)x4pGw6GP}*;4L!&6MpDxvmKzV$5!FjpXyrH1Ne`(pnuPz zxIM)sf3~+0Uh4o>A;AUb8N?EH;cUbE!1F($Q^atrE*fBk)-w~IJfAb#>ht>ICEy>O z>WLJ|v~#}0*j-=kx7K|Xa!pJgq3K5&}7 zJl~qGu6o-^g`W=8=9sS3t?GFuGX>QN6;i>N(+fRKhL||op;|6#UXD+0V)&uR^lqsd$0)YB!mNO7pjORXUWvRrizRx*HT`TCwuhU z@A3l|*jX9Gqg9}qmuMeEtpE$$Z4{C&Xc6{+_!hUJoxu-O1^%*w$K~%S_*qu4>V$&}x7d3#9r@`w6m#fw(1khiMo@t@)jueH zJcnc%!MmJVYS89{4j_ClPT+*rt+sV{&gXLU7beOB-TkX| ziQq#n6>BJ#)%*r`Y1EbVf4l%7u~<~G-tEau*x}hfJ_0Yg+&4-kS9ydO|H^x&*U89L z2Bqe^MV7mHh9(p?85fX~!Z4B&(Zg(p6wA<2)w_8z*IzXBTtAn8KkBS`-x@mp?96*k z*uL1pl%8gD9p-YKn!xQ8j>mrt69Dm^au5m`x!ev}P{ptJYtpxx4h@#87OAAS4wW_T zblI~Bpr*g}A6q;!3h(4U{w&&bLLu0)BaXd&?jPLs8 zw)kxVm8%{wu(7=ZLF{GhG!A4r5uUGE>jtN8TgPmdvw?*H~Q{rLR$wfn8g|I^HL z1&HYbnvMBGn11wh+$0?C>XrH3=aZy`%H?NZt6%}X$>%Q0}XKn$MCw*OD{Lu<@May08?(u-cRd~mbIUOj<*l)A_gpUcpC$Hcw=?YzsF;T=+cbxaO!eSfJ&g%&B3DVW`W9jZK-FAK*uUcTC{)d-*sZPn(gAG z+HebQpVYgna$H|vAm?YwM9xm8UY)q$rE%6aNz7<@MUTD;PeoHkBOnX2nBAp>>VaX1lfm8gc;*>R}XYh4@n{vbYEI{;vT0 zleJlkOIi;VaygLnb85A8VRFY|x^r#cr)#0$^pEg&bdjsa<0U1U>=(q&6yXTZ7p?drfU=NQKw*uFoJC&jfTYzaB$l%;@*y z>87iZGni)T~_G9o&{_kIK*O(=)3%pO?bK8jhnA<;ZFC21fc^C2Xm_tTQ?=VZYV zYB3(uWqVGwv&(et#r)ep|7%bm7C2Tx>f=7}LrJanJ)d#3<$<9gaQ zKoXum)Qz zIl9kbO=Lo+Jsi2SN_bYPfU*aVG7{QY7VwnuWlz#4Q71AF6oh*I>QMdBusC7~;d8T| z*aySi27}U6$dhm7d9>PZaziwl{*6kfrM`R^47AJ#-<0PSw10iB)_cH&t81$whe5;K zrkfY@>KuGc=lqUy2v{+a6YvAAoWu7_NS<-CD%SIxaws!g`M-_IaWu0RxR)cJ$Bx!P zvYIUTTXI-FMr6KJag8ZNQGwGm!k649R2><6_Fo=dxiCqXQ2CRbCvDn^5k#|fqF5Pd zduYq&HmJei*|~+MI53Lry+17nVPFvj`|7`bn$#@>%sS$Sfb&mabzO+5@5f{>Hddbi z7^W^{2_6eyWm&-#Jj9G8z8e-Jg)CU*9n@Xb`6Fi&uPE4>f)~D<7=3&nAtVAss*LCr z7^FAq*A?J9-)W0vD$Jz@z_)d)?Bx{iT9DBuWt1NWN{ihEwKH%he?K`~7&Q7T_J*!tu7(+mC zq~VvKUajmNb(i>Sco`_0U?qqgJJ9bFk1T@8$CNVWSEu>t`A&ix(#9b9?>{6uolebH z!h3|xw+W;{BW8fAa?PRg(%+HQ?x|{2>EXq&DKdJ?qoC`%OFH?KH;2_VhZ4|{kb@ts zTaWEH0f9+@&MD?+G6b#&nzcuZIj4CJuA@J{T!!p_F<}s0s(p5W<~KnTAm%}WS$}g) z0>;yZ2skSjh;+I`FbG>9O;q1@U{?qN5vzx6&9b@)T*j2+LbCN1sT|sK1Dj4?`)_bbTCvwS84M}7+YR*;5p*&XCZEUwDzK}FMqj!AR%V>Bh5I_a_%2)^4q z{xv>McfkUf1ZhGbLK*)~7~Bd}J)%1Ifa5Qkz!#Zx3faS1$^AgDD%P<>R!|{y0sJa} zR;OiyZ4W|tmLGofIZGv@|AGV?3qt1U>Yi+$C{;4PPQsIxZYvf~AZhWx(4HJYNHHv0 zs@K;c#i!x2B~%zs{P2-jj_|A+fh=#@r$!JzO8ucw_Xdom>T0Ea2gCOX{6?FgHv+Ab z99ahyq}2m*5j}^>gIx6nkKtvLIaipO>yNHKbTA>`E8`5~#Pn~2xru(wYP7`%V)*$3 zOYJuY{J+9(Psp=FonTvtG6^G59d%m<@X%BCW4)1g#KU#1V1(XFNOaoAwSp?NXiqcD zqJ-Fz9)D7aaD%x}NYJ;CKBkY#RlbR(S8x-2Pz~d8q@#Oal$5(Ai>I#lm}>HvJ%+7K zglMgj0Ct=PhS*E4%9VyHL|vi)YBjEw=@sARE!0v`omcj-L3$2re38vR}IY8xWii*k4B&c{F#p+mogx?6ey^l7D>y5S9QFc zfY~H)!2j5kbO>P&$v)XMZxUOUH%^~_5=eQ1DS>c?J54)N{%ozt&w@8b57z1hYigNP9dIAXkw0r`MV=PYCvjZ58i0!=a>Ak4PJNtC*PIG=HJe&9kEvtX&nA#9c4y3i-ik+QkBn$DHSjQI;%0q=BsNfSX`dIht;BN|BtD&ZKNN31J z2TP*t9R1g_dLC_E1h6W2O~+aFkSEU-iQ*7oXNW_I@xOMd${`_ONS#xv4bi>_w|Mr0 zKkz&7JTB4b=N8Gh5wd{g+fL2_p zt^*E&dP02|t9AKaOK)RL^oeXiuR%OM`pGhZzMEhi5^`MTqWz0&Sr7Agkx0wG`ivlN zaD*lz*4C#zW_YbbfaZIKh22$P+#1{-lvg)K5Yg3#huy?Ev6PUNQ z*u%~&F5-3k90TUA3zt<25QV)rC)Mwr+P5?oGvBgmi*2BmS7i^Ag`6p_PT;|z)PmY^ zvJqoHv`^1iY}Aewohy9HZIKwCTOS4<**_{x6DA0Wd#a`=$U0@Bif%_{*>pyVFIx_R z43HLr+9`K)Ag@)=n>Jznlvzl0oc$T5bvjrI6ttGIF&^0k^L!Y*;kk$mPdRcJzT#+X zT-`zP39Lp(MOFsmMYZ-}H}tx0b|{B#8_$zhRX8!*wx7jR`%!QAAnjF>0(JT!~@So*z4;DBKSfWRG%3~3J3q5C3 z-RpYVo^kRiV{Oy#PhAWsrPeMBS!DPL0QzB;0WHt_gy0d=zkP?W(vxLTvg| zd+?PbL<0#KXPlFBpc4*oj_hVGlG+Y)4T3aWCb!6^Jx#EcmSUtX`|B;%BV2*(Kvdxo z>8bWj&@CWvA@BVL`s7855|W#_|5c5zV4I{_aMZl{-c~wKC0IAU20fV*TQm95Ls$igg#L-&UQd*wGf z85`0~3jZ>Zi9#AqGu{P|A@r0;z{0~hNptidjXv{)YM-f**%Nv zFqYVqI+L>erKv~Q$3ezJ?spRGB|ms+?2LYcDLmyBh&UsoPttvEoMJF0{=e7_mD8WV zYI!Nvl5WE8ql(ju4B)hdWRt>-0hp+G1%cmEe*^p%yhq;(a%jTF15s2ha^lHKd+;0)T&Ul^s@oG{elq(9TC=U@$i{Whz#3 zg2O1h=Pi!&7a>$QV!}uv)kw>CIVThAPFtT*-F3(XFP3ZD&jU5Zlzm^2CBrB%TsgX8 zp7b8hJQvfXNtI4cQd_u#lywToBqXFH$%yrP=6&V34?cawejoRBzsw;Xnq7p|--sca z{k`>et30f5BYzmU^ZT@LCmRXQmBZ#!9rh2Y-e_+=D!;pEoE~qtL1gJP&hXqq@0(W{ zsNHxKGsZ(-$UP(!FE(sI&ZEi9c0u72xAmP_nWf(k0)*iOf1MOw`XPk$=`{ z6WczeSBN{;Z7cFp`g(tr$ora=J$Otk&OfzrU|PCkAK(rrYVv~O->OALzP~}otm1?` z%e?4C>uNlS4U)hbDk0eqz@dRJeM280D2oR{4o5b<8K+f@iAPTczOm{Ux@L7Aj(37lXSSP5#LwOrvH|9*0io=x@YD-Wo_ zpwy1lpdo9072%ZoGvO}*DCQ<33RFGGy?{iehbNMGe*_su=a3*B5cHBlQ~aLjMdh-x z8lS1XrW;=C5vTZImVl?4`b_vt&N(u?82FU_N@m&qd`?5%@+G+v1>y2VU+i{G%>7GM zbUHbnTdD1=Co{LBZcVSmRG6XDh{dLD=PcjBRUS*bXa1okO6wtlZ^as-Few1JF4t*| zqtc7Ld7>R4E>oC@H0@o6gt&ztR~s5^0ab>{+Ii&ikb1tBh8@a`f2ff{fUUn%0=Y#ka}9#4}whvH26(PA=RFTHY0&O5Vzj(IfT5_1DWa$@(cfV>5Isoi>{sI}gEl>L1pc=b+ zyJh!9mzw2`agNW6FTYf_*>2mi-8Q<#Q|9k!+hoLG{V&T9769_~Pdr1vXGP}CdF4PH z7ra^^aspX1U0!9YP|4L|Fy=aEbk3^{syWk#kd$#gO%YpzOSu+74X=SjK+X>&G2gESqgOvw3TY1t^^4sUR0_DtU>y>HHvKR9g!1&Kr>ZJl za(xu!_N>%U*%W9(xBs;SZOPdc%9vrG{x3L0Sw_wi@7OLpzjJ4t<|{<%S&J@i0y_lK;Y_!TB7aJJhKM;%!<&!l1(@RsRonnSf?3Co(KzBpc2CW;`fIua)D-D7cAv2 z5Ip(*=f6KDbdT>N#;YAQ%J(Zbt)C5qk=gs7v-uv&ff7$#S zIJ*$cnMPDtB=CgPY>i1K^`}k$&F>Gp=O-0eedGm?sN4VZb>r7EnX*3zm#x_J=nQ;DMqb8>}A+<~FUb4Ikc{(^!#oX348ZeJ$Dj{!8~Dj48;&j7(wSnQvDeXHdl6f{o_gxY za>*r^mZ_$is_ePv9%YMfZV~v8vHIg9Y%k7pZ+RBmBxrAUVUG>L)wR~j#vn<~8(;2! z-RrDZVpY~-2LSo{C!Qhqf8hx_#S2WcH4a0%?|+qqxh}7=Rj6K%Aav&$dsF9xuACH( zl6BrpZz3YeJesc%$syv(39g{v7Ef#9%(z3an-IeUpG`KFSS5zqz8R%6Y9!t3-Ty~+T&1XAg10f=oOdlTuj}|F5Xk(y1{YkS}#SrXL4(ZRzC=wsb(?|`996o!xI1; z400G&P~3nO6f2fpcG+21P=p=b`e#*LJuZ_egZo8GYHp^f_{xD?bJsBac_z_Ch`>I2 z8o!wCNSxVIMYOfdHYAL&z-FXxVvDC|a7ZH05SV@TIm@g#pm`XUtGP5yy?$AjwRZ<%}4r%hUL#qtW;hQ-*z_e0Sw~XB@@nFeG|2RgEM0%DLuEoP2pXu%)B3kI z3P)cy|7OvVNOg~bR!>s_r=|LwXW8{C{F6_{$w8xB!uJ`}x^8)MqXyr^Xz1D9%Ny&$H3_{zqEif;ci+0rG@1czd86 zzHpxLIDmDd^71QKL9xwt*yd>)94d@6C?q%y3j_FHOiXksn+$BF35^q4YCd$RFhoQs zrFs8jX^{GdkjMyk^#l1G=P6-;!VVuOQHaOc|Ex6l!k4K}IJu;)?2M{?6VP<;7LLAb z6Y2k~Bk)8X&FRT3CiPA9oj=0b~vR|APxT$ z#vq*$X`*qMgM^Ne1KbYf4s}6CQqW3w^ZsXgTtvvqOw0#CMFhw*o+>rtx@xkr@uQOQ z7@+nmSunwx{ZEV|m@>Fuw4~@vL%fzllB117?=~zG6M{$hv}}r~N6q=lSRQvi@#f7!et~OB(d%h$h-GblozaA+igdZcv; zL~d?3uVo@M8yw>@k;Zl7zHGQmHeuaXh@5X z{qDK9Tyf>)W#*Y@E$gnkcKOU_<|U$rhKKcbfTmumaIzB0c;cVGD#zPc*lQ&HmBp-I z(N#aoxzv6CGc?J%*PrpxV;^Iiq!W)nsXX`mGiBfX_A4Xin;(;V*x~@s(I5Qa2jyB! zP>de^0dV_bd+ANF4e``cONy-_*FV+C&Fgpl3A49ZDYx8wbGhM$8_I|g^Ox1vSiQ_K z$LydOxc;xc`bs(Pob$?6S6^MGoN9`){r20HjW*sC9BRw$xBsJDdhumt(a$YX*56>g zGUJRhRdXX(ZBrcB8LcGb6{-h|4gCGNqks!)}?frg#5S;?vJNOx|DJCLhM@Gq; zi-9IcBll#8pIz0L%k$s(D77ZHqU4E|TYC`RZ+rs;98^OzhV=|p+9 zopO{l$sxUT1nO1&E>39~Gmys=Thf%BNoZ^2q}m?jNd8FrA zyduPv)RZF0>ps>}Q2X`|%D|f;?Xmk0fp&xH^jZovSLOQCmR>v5#lVRT-j1qKtYn9zRc)0@b6KA<_4Gd>1dbI*_4m4~ z1QH2>P&=I%lKKUa*_K|7{vxMhf>;O#>Hi>r4h)`+ThoxV%py9a)d3tj5lr}GCvTdj7ZdU z`eoM-WEE16Rlo1@r+Tb1q?ByF|56zO2^pIHPk{3>i@f6nPQ2Zrg-m5|Ad#k<`Q~!P z_4uD6B!~dZTHXbyq>Ag<5ceBl>;L;hC8DSJ`v(9arbj;jWSFSmAM`itD#nVBbRbKF zs#}3lEZY*4C{$M}w< zD1*!iN3eze_V-6xH0qKpH9d`f!T=b+hW7Ud+n=pra_Gk)E=>I|C=;g`#tf#1sk3-S zND%e0K~c~uIKBQDrhW(E zcRULIH7oTKY1nj)cYlAV)fDNp%#xYGrE5uKGBUZM!cbHwE_WX3=q>}4o#$WGpi&}s zznQcU?-^l41RIp6BG~?6ULcvlpI`s#CSf9mIPWvUB9X!c?e)=u#>yB??7%7sQsHR6 z?`mNCBTt*bD^Poe(f@=vrQiqhze)2%Wl~gnxfP3i~PiLL1pCDke0kY>Qhc`ab~VahD@oDWRWTfC7ov z>($Hg)CNKngKWHPvvV%CA-?#MOUgRytW&;!)KOq?q3$1mBxsqJO`LD#q*Q8hv70eI zymKC=0EkQ@W>M~&G*w{|qv5&}HQ9n+{pzH0@x_;v#TQ?q?7Hi&<#XI7J^Fvu&-Q0v z{n;L2qCinHydTocVy4mO4q@H4$<|PT2(%1vtUv7m-20z$kd7LRA^ZZ2GmYm2sG48hOfZ%aThiBa?fq{|}D&LAma_Ys%;k-p6F$zGV|k zQ1Bx8`Tg&g>oK{q+G?woz4qD*6CQIyBMHb6+Lhf*k%Idb zMzel$U3M-Mfzf{9W9@2PL%%HTW2LGPFKe0{NdSX6#4-XRm6kQFBn}7K`N`paG;k_*B&mL~ zis{H4dg!6$#+!Tvg>La=wH9g34=+GeAQ;i;((Z1DDP?gm{lvDNn=*|nWVHULT85A- zIuY3go8C2|zRF?dqM#abtT2@eI4h$C(6;)CAtyuFz)HlA06__nXb2+ZbAGvaAVIBA z9+hkQOo?}Xa=Rs_q1xd>B-MhjBNFvygJRr{N|#I&E#z`oy;=6^ryU~21$O}97Q!LW z$+BjLkwinqdRe}xqh)Agx67#|1&iHxoCW&SUIK)NG|Yz`RZlBZ;pAhEQsn0j`sHBy zy?*3#d3w)D@{lD?ZB=;1l9Qo@)SsJLpWMHdzGcbWlCP0I$Wa!MJzOLof|zL`rWH`l z`Mn}4*~=o*D1||MVIo~}_KU-mnsEzp{e<_w|MakkC`#d9zAd|6y^d6m!i2@5f%TKm zL)5DQctKN2@*wSw0^RO=DvCUmMOKI&T)&gKY$^BLL{+Ox8hI+tpg3PwQ2ZQ+WG{&Q zJ}0q(_wF@1nEwa8|Ky5V-t{|5bfKW|92<-vV&#QJxpMn=^&7RnC#etu%}J_3^O29w|IU?vP= zItl`|dJt}8ma4p<4}#-L_E7Q~>lqX;mhEu<1kc);VfqBLt_4!X2LxrF~9$cq_4iu=Xso!z$9Hp)j^HgV@ZH7_2{h1>R-qmW8|5tk5sAm8qAt!c~m-IE89o>{8X(iRNb5cL^`5NtZaQ8Y>()U&q63{=Si%=MY`BU{Mh_RP7)wnU8AW{wT z{2>K{sZgQDBN9c15=dF7HS)5iIW{rE1~@CjfW+(H<^ru5=}<`?vk2l7&1xAjB_~SD zE0MBUe-9Z}PL`0`(CUY#kYoWaHL>cDY%u*QSE+8tA=$q?@uc$Ds7K3DM|~e(sjiD= zX}3#JddPIa9YDB+a42>%O>;~F4;8Ku)L+^IEiuUbzwxz*(3YbNzrLKNYH05EW`JT@ zV2)BI=J$t~2zqn1FxTIXlm-w4ivJP0=BiC;RBK4VyNb3y=_S|8(6D5*gR$*B`{qG^_)d0MpDK&5_ns?rLN4BGx zW}0csw9`yucF;yhp?;Ew=<(&{mtHAvy!nP~gEPylGh@(rdeoOaK!1NzZUnv`_)31r z_z>IPyo~KK$Bp}>%rWPj*!FsI@dd?`Pg3f{L*w1|Mwe%veW83X`aNvzGi{k`?zzg8 zQ%+%2s)6;t?Uvihsi%!B4`KV2b=O{}9ChST;xo5{=bpN6IOzMq`|p=0o_ey(hSmQw z&M+frsg+|*%shy${NaGfiKn#&$&vA`21e zK6wA5a?n8smV55KtE{-vN|>P7t$c3LMNNOLKkrK)y+0b;ioPPd74U4p*=C)+49AvG zeSd$*#SQCi;+1y1`r0dHCVaI&{q)mgzCk}a4Xd?kpjUr?f{B8+-+mjb7@jX5kNK!f z2cOLQnYqi*Nr#2~eiN1Y+Rfzs4?ZZby!x`(I2GDu=2>SgQ%y0Y)Vqnq30gnG#SXwe zKwr4z(u>Q_e)=;3o_z8tW$7iCER$k~5w^n7*y8EB>;6{WA3eJ4zyJQ2px7AOB@M%B z=GS58m@>=Evy`c)nacdpgLT)#R<7HX1sC|NSlT@Q$wwv;p#6lCEPU&j1mk<&S3blJ6W`qO zn=<*b&f060z4qL@%#L$A^XL>+Gq^99_x2#}lW`xTKfP1l|KLNk;dHW{w%QG>5Ryv! z>=2aSc;of*%4@HI?-Q)fnW@Y$(+p)8+E0R#S{7DX`|+}5eLh4xzVO_OWhf5%o@bu9 zu+5eo_6_;Y%9J4>hG4A;C7ktdp0&b($c1>>QkTi1nW8BQuBXdd^{EgUf0T>TCO%xK zI?ISmftDh6**z%$K6&Z5hw~xXYpx|@;bJpuE$zX(hu+P5eBeStvu3(rc^?5v;W^HX zEth#6Hb|i16d+B}qeT@Y!Hq0nrHo3rC7~Axsbp)MSz4(;Ci?9nkq$31B{K4q*R1iQ z;AKgrBJ2qOyOTo|GDhq?fE;?rq2=#4--I(LmMgo;A=&)W76R~}K}j{1Dkfg3w9rx^ z5W>Se7}28s<&+5O$O;(QgeCbuDN=RnT;?fT`mP8-LG;4^LMV}nCY%O|O$zc@F&0C# z%xXeF+-!(K;L8OCG#3^?)jceb#Oq8h1_FtLU48UBh1>ujj3oXdL!8RgRNn(ciGnpN zNXbg%`U43(Di&>nqLU!&fz%Z0{V!(AlNia0kIf4T03)r&1x)*U>TiODmUIY7rUEqf zW}>Y4DVk_Cix_&u7lC620RrS99&iPVLaDzH8L&FR$*pRviQkIEb^eyAbQUu~X#1Z5 z#E$4e0D@T9to~9EZi8z=u)0r(;REs+PnfGXi8Tu<_E%LS8L6v6mD}W)D3Y2+q#;VGpf$gMsmTfl*%7~)~K zUcgc<*Pwk9FBv9M;SIBZr2kS>TmY`q#y(;KtjE|-0?!?+D4vw3RX~DSf-n9S~-Ja5jiAVn3T_K@Mdei zflzfqoHVPdglqH@jvzkLT(wQgR-C z!31-VH}1cVlvWZgsvmSfseby-0xQ7~rYOI}h_sF@A&5~O{e(;3q->X}GJwE8Z>6q& zqjjTH3}h&;#3n`)HadBX&s9-Boq!u=xWHjfE9I_!0wE+6F$-JGp2q*6l$r=`@b{l8 z&keLn0PzXzZ|^_Ase@euEKrzK71?m9z-cmVHUtu_qVi~7L_uuz{DvEDP+ofZ<+AP8 z+hPU9c4hh*rejS(NZP7I#YxSz8k*I7(yuJ0ToOc44;3rq$p#QW6|{=Wt8FHnhTtEq zQn@ZR%p_KTBA``AgGgvqG7BOJ$snuxiB?(8r&3aEV$~$tPyUuil>pJLe`rTW&`P81 z{`{{#Avq{uO7gz|6e?Ij!9xI1jt8lqkoB|8sDL+i=%J?>NMD7C2(v=+3IH85_>)el zoNaa6dYA|*Xo9Qy1eOemmk!94%J^Mbl}fBxlL70cT(7^1My@qTc`AXFT|b{cth!tS zLZgH|al&c+0gwh1=BS(czlE8xk&rm_qayk)0eNA!wn}gNyBgM?I`C)qOC`)kzqXuO zz}^sAX!TnUa1yZ(QPxjf?Pn;6UVB*CeOmMp4$i2G06{KLD=FhNA*62(VCs&^V82$}K-L9cXb)lVao8gN0;|KQB^=am(0L!6O_ z0*%N*K5f47<@VcgNcKrQgW@qvP#jg(+d$8t=)M1mGBAJo-yhDY7C-_3?ej-%5}!sE zM5xQe|NfvBGNaOL8+8sd5!?>%|Nq_Z5BD)NKv(UTR70DQ1kh|s4IvOj;(xPId`-I< zqRm8*Qb48Orm7CWq^{mC$PAvSKg$fF5orh>!2f2HarCyoBJ>>(va|zfnt4LM+7Far znQ9$d|0~Q~A++r0&pw4Mp0>gY3Y;x)zcg{0H~+>r%CgHXU#6Lw z69_}flkmg2XP;ZHzu~&F;Aa=aIfWZzz+G}3bI(2plJWS^W2Um{rkmiSkF7D; zfx%R$qQ+NWc?J9XpIUCY<(4w~{rBQ)?rCxUz?Z-LWjTvrf%z9G|M|~DBJVa#R*u2I z`dAFk4#&!s;gd~SmRNGJa^&GhmFHi0t{iZ{0oYc1XyJK@JUjJ!2Y;`;_WEmO!;Lm7 zyI~c>amW3<-0_cpmXF4cDf{iccUf!gwJ{((4J;|;o_p>o=bn3xqzf*%aM=c{XurJb z$}p9FnWVh_>TBh4e0hK6Racf5o__%qz_S_k{M=%TmtA+>xh%fq;$`TNq2-7p4~LFB z%3GKS7zPLA-F4Sp@ZrbkV;I}OO>Tx6XDn-DE1PY%-vN^? zQ;8kk|DSmBiSmcv{{i>lTgzK-z9nKwFx_-Bl$BRr5fcfUm4&cPu;1df7uo#>mh-HK zd;fKB`Od-Lg$*BJa_l2{cuYRc6zETrmc7viTnY1w6OJ!8U=_l9IHQ7XI3wzD+2xm& zhaS2g{cpVdUR?`=#am)RaAs_sF3%Yl`OdrVm8-6~y8Q8i-#@W}O9Nd6S6Td;fiJ`Ec}yWgI3Oh7Lpj!34?DOD$ct#>Ys@qfcvSe}Ax# zo^j@x<;u&ikTWQD+Ih#aG3veFK6}Hbk79BS{T6D64I3iuyvU-9V3OqkoISfJ+JSus z!4hp76!4#Y=*lauDA(Y+d>N~@x%!c3l`OUNQe}tjb|{N2u{ap@iAoXdFK5CI`jZnr zlc4`if@^7k1s0GqF;-kh$ME|M3^81*)K}EwN^4Cb_LYppt;g{YfxH8e#au5JFe$ zN-}@`D5=VFKdO`zYgY78x$;+@N(p^Bc7|J;~R|>WBmt@aw{rbbaIXalE9ECE~>V>Jw86netZNuxLTJ9L@p-xe=neF{qLo zP8qDTR;(&cWs%$`qCePL{F9Iz1R>QH$9wwe^>YQq-*5VRS%HUS@1iRx)K16@Q$53! z&qNF=s7X?;k^i6q#93;_V1cM?-CQOSm5A7jCD^5uX@dXL*FRWu2C@gf*r)^MfS{of z{#L)@lcCB;?yRjhZ!DEF158JfRkN*rs!5Rl&i}Bkm61wP4Yf^e0FDBrIH+dqW)=u%?oE406yWi|*I&BL$MunfNj8;xH7lft zBm?Xk%b<`$Ke2bAZz3b@knfy=%)Si9Hq#?J1B^d|$lb_B!LJX00wo&n|6LJbL=PeZ zs{ZEsBMFRX`=0;-&3!V%1`k%%fbsUY<&WhaW8d4J9hjR`dvc!nssIU2kL)4I$T1_lAxMp8?!z8 z@*9qnj%WX)A+$xZsXd&A^f~mvGhpKM*KQA9@2Db%5re(_<^@|kn_+*p%0?ABLr5_LgIANcLEm~VUYK~3<}O9JMfV)U0P01<<0=Rcmz}<(=ze>-}_%UvLh?2 zY<%7zs1^XfhLO+xza^dc`dejs6AI*C^KL_sw|T3d0;&!F2lP{5+`Cy|$oEX>=<|b2 z>Hc^6qaMt7kZZsJESL^8$*C;;|9^kPow9MJ-ADu$h>q`4(MT#Ka_s*MxbbLT1gz&h zqPP7M#5!t~ssd}wLCIg@42n^YJz9?B3X1jDi?h(`HJl_y-|%NCaQ_eg{?H3`;QEIr zdPo%DvG~8I{}W3W+xut8h|C22KOy~!H~%NG!6(-yTk8M)-yeO=_lk6o+~R2qZ1FUd zhh*=+U-{bRUzg`^JQB!99@anl9{_TFBw>wm}GefQnvd<;te z^R9a^&^t~BScgwOyu9}6s~Fsz6kpHJTXx6S{a;yqHOSKe<>P;G90q-_#e~6V%q@(^ zAo(2SBMfN2^~M{}IEf4v@3_-Wa<0O3SW)rhlTVh@Pd^O<_g9w%=KpM&9|Q4^KKf{R z?z!h=^#lh}F(O*#z$%8l_t~$kz0R6q`v>@9oL|fT=RXgZF&}CDlfhC zvaGy-s`B||K3{g&aYq^WJ@bs;m78z56*9+4&3Sym5FC`f^pZaUy2Nopvm%e|2^IJ{pg~-6zU#eti;FMBFQb?o&-YH701L!sO5MWf&$Prk!@0 za`@p#l;xLQt{n0GBV|%`G*$y}z=aEMrE@R=_ zsivB;Ogr85vND6K2zdDS{P?n;TOZ9e*Ie*dDOX*2HMS@E5mqsbE^Dr}HnyPo4^~3F zfGv|g#tMyH%4T2tdYNS=Oi=XcIQL+n`s82#y4?TZLzpmOyMIPj2S1Ams284p9;;O* zD~m0@Wcm5ee_ke^VhZWU5B}%Da@^04D~~<)So!FqF_^rVvP=UT-+lY7@*Zs9;Qo>r z+~;|_)7R3j5FTL_IR#QBWNs8fRr4?5$2Os=xtTLEc+W&6!hq3HGTn&NAC9VcqdWogW zjyrB&mS157@v}yL1(os=CJ#7?cJal3EmKWBEe^@v9p}w_z3jI8ZZc`Wl}YR)yq<>P zZ;{0oE#Laqx5{FlTg>FywaF61vh;8Cqu-rzR^f!nIIR9)Uz!{%KVC%N=fue@v&<}K zA1wF9W#!)S)DusY!w)+IlYvi{kKxA=^N)c4hvLlJ=gUXv%X7{-N7)6}@w#iRBa;tz z!RNI3?z`?rzni~&9&)a_;!2rJ`GnUa?B(Rz%rnnePC4b&GSA#|qTk?xVzn8f52+s{ z?)z*}Le_~)-S!t*QsYng{84er?lEBhH-3LSf)x}e;E?Qxu}W*rHFyTap;iqTfPN>Y z)yS-C@4s%cF@k5}>o1~P0WL<`B~4H$X=aFgWL#1LNEQ+Q%fl$Ow^mP>oWoO>R8z&I z?p?Wim=Zv;&I@V0c+3tl%6gx3cv|)97Ed?*{l>B!F4|po-lZ(c{qraaf6^r*BjAiN zA|+tV;9pzP7Jpc~2v3zV$5n=3FX4+GrUZ~|7>IF6Mgrd?q2Sr3a zGA=j)B&%hb$;wwtE1kDxg}5wFO@@jPV9-W}U`nD6(;@|hFeb9PO5p|;4tcnc0I25W z(xg@nAyF*`thi_yBT@`whW_Uw`Vk+M1KmbGIvq}{WE48kqRhlw8ho?%3H~^KyRW&G* z0-?CT7=PteZ7$vE>Y$xNoFdY$&w&V9eyPMCUJ(K_k@{U$wIf7DMr9_+RX>3&#eG|Q zm#_+hatRrL)l!9%&igW3l@z4e5NA-l^y159JN)R{cDrq`f?~%11J_@F{cam=sn-Wf z9@MUCvHJ}83?#YTD5*aiYHgvfB)+Dh! zKaE!Fpat%C{UHVq_~UpI_+&V~DORvN002M$NklYsEdYplMbO!r@AbXlIZx>?fuEx%A43IUU0YY7f&jQQXr*~gk+MGnIt z6bFd1Ty>J$u2TE`{uoCeV2cJnw_ytO3w}{rLzUrBLg=%P@C73)H3Yi3b;)4*`F{B! zwi@BU0KGC9^h^prdA&WYdOg?bXNf^<>0wF$=`fWyc!aysG3UYS=VeW+497=bv}Z%5 zuX6kPKM0+!K1(O``{PzjQ2YujC?3Nh*+(3CWZ7`N4bTr@ksBqlT`mfe)iTW_T_vq_ zW`SIzEsYy%GE{^BgF7+==O>61p|JAO9)Ew!{lrChDhisY%5eLG)-q9+JDqF*)z;tS zIsbtAQ+rmKnycl+oq?hw0VJzs#tfmqkDlQ4jHlX=7OR{NN`Vr4YoY)h`d&1^{l{LK zT$ZOvm}M1{@sUU7X?(f9HZg;|EEf2xhk@>D4;7C$LxTyrP} zZDgFQ0@TIh%TpN4Jmai0W%a~x+^5!BYc1K5iUX;)-Fizo1p}BLeK-ay5SG9|^v^LU zK4rP}wp+>(haZV8ibmrr_8H2--#er%wBSP64rxfa24@8P;i8Mms~AL`9|NTOVRB)` z6;~`zKlxNS?RS`<4Z1`kl1)OR6^W%<_ znsBn=2pmAm)dQPuvPt>IH@=B2lV-q{K4Z&8fBXaFT~WqjAbGdlb}#GT>v=w}F8srV z<(xCm7XB}x>+op;`aLF3cQo&UV6EWF6VWv3ms$05pV zViMxHa=|(0mWwaD1cSemmX$H+z4u;wVt{v2nXvc~4tVCE_vbJm%@rDaM%{Aj&E?GB zorwYD=gJqq_(l2hf6+x2#r9RxU{c}5a_%{2V-o0^G8t@N1uI3ko#=EJ{C^$;>*t?) zo_rk4Ns2YlW=CM+gZ<~~tFJ0Q{PB;<2k(A>iGpd$6jM%F*2XqhpB*uxaCOGK^Ua56 z5mvQGSJ3{z_RI+Vz7GV z@}nR9uq?6oQV=n&eD^yCm)q{dY7YSQ)cvnH>Y+Z(t?KdFP#1Zn*L9@?x_g zCKz_c7~yQQ&jxJ>Nk5;hd^dUanPGP^K9=V5lPgQUwf})KY4ZpMkU1Ij_^8Ka)d?q@ z_(An(uEIEQ|ML8EFUS^Bzx?HiWvL~Xl&!5UzwA=1I{7o+@n0;fuC`j)Y_rYE0t+rE z+nr4=&vB`NMe4&BUwWyWhDngWqW!0W|MuErFRWPjX5r!4*WY+UIr&$=Deq!pWi`m- zWX+s&&Lu0ahoiq!0z3ZFRP*-@3iwyW&F5O{_)S-u}bCy+`>M= zmSBsNQ&0b$Y_E1AzUcqUC4VjN;<{Xa?R7=}EVIp8e*cF*lq)X1RJLK;80U6uhgIKm z&oy_s`<}bXX(La=#N=JFUDyJj{VZ0htc}%D%aq44p?Jz~PsVmv*y;%rMZ4{`2e!1` z1`{LPZYu|^e`-!`T9PQL62o47=>!V6PxJdD-`}dhswAt5d}N%100z_m0{=I^KR7XQ z{PDjik7D9ALGDXI? z)cdXCOe1*G!pICKsBhVUmRf6>nm#<%5kMXO);tX<62uqDXVnQrA1dVdj zjW^?gvK(gScEJr>Cn&7Opim{5ZqIWDqtZG$(9)bR&p*D^dkDv1kHe2XEdRXY zpXG|nufWnpz1TLzO>|A%+~j6MUIw4mGuaA6EM*5;+9o=R;k{G(9SH1gQ%Mm^ht$ML zMTg01^+l=CAFo%&6RgZM0f0t7kU^}3QH+cU=ng6g-igctD>}51O(6YmKjJdY`&f3! zWt!*X0A4QdT>eYTV`6Jpzh_m$m}}a@rC8b~J$1uVs@5OQ7Js`4(NYCSE;mS*POc9J zRz3>#uFHM??z{iK@>e{-pMLsjssC5L@>P62vMy$xN0w2KKPEryciC-MED>A|gCS$f zpZ;_a9zIv$NA=M1)iqWxTj3=(o2CWCqQo_|`dMOdS7`}x>i?2;M6Rd9Qq`KOW;~6E zUr!Lynh|AZY4igq_PL&}4Gt$W@>0w+0&pVpgB2axYY#Bp{!dny>#6As_<&{Cgp^qZ z$aiSXnEl!RBqhBQ2S0%b4%ZUO0MK?MwLP53Qn^M+(w%WPAuc0}cM&9EK8KD!PCd=e zpukHx56NC+(S<-wA`=pcS&(H@uIYC*P5p^ya3s^_-i>m{p4!1pQBFh-qUfKLUCVyu zxaq*Q!TW{lG}%EUWkhHUpHOmHL2B(0h9~_&6Pb{#8N$24Z6RFvyx;RjHQL-{g~)DZ z0R7URy1JZ~DFf4<_Q*sq&WaB8vhov(Kl}h==+Z=4u$jOBecQvim?Bt325ejWVAbbO z|7%o*SnQ%5XezuYsXN?iv*89CVBg!9%hp(4&zJofa7cFBXCXxTV?42wBdTOk-A_N; z-|b+E?GIFerESv4<_|souzKNz7i2$a{`lj-@?^s&$4Zas@zvEVWg6_?>>+g(1-U9G zkoEx3>LYga(+~RSUrDHtvdoaLQH3?x}}z0x_tZF|D!a{N5QU`m}*9^>;d&t1O!r)cbJYziIqgHY$9DT zF0WvXj4`kbIx!hJ4wPSZ*<~06`n_mfZ~gVlH}T_n3Va1wg^&j!$$A>wa$I!LpUXe) zxJ`ag^Gnh-Fc{-6K4EL(Q5onoy#4|A-yiqj>)f-?JzEA>mRNj=vc(o#l!X>r5HxUx z_(M1w{UWwT)i~4l3GTaBUwsYU?JvXL*RR60pU74aD`7zFj~D&1{QKYcVL)d(*@lCM zT{e15o!7;Sx&DMbjW2GGJn}FZ(3oqkIngh^Q|7|p0%b*0(w^p?Ilf$T?X~iCBI&;H z`7dA~=ewOP^GqXo5#D+C?eahW^PTc0zCh-|pNAZJNck)V3}{b0cicFuza`op?n9>0 zFv1QKM2P6=y4yttPY4xq6QIDjn;A`zGSdVA>{_oh71kwf+C`qFUKvn|0GBfG?~mJV z=L!mJ@$}dz9Fl!R*x7rdD z6!^M(zXSF!U&kTYEiD9yK9lCIrd$RAyC$X%>)*GP9I^L5kcW6-w#e$pf!(t)5YCB| z#W6U)!}i-_%c9jlgvkroe(|6GTAqCR2@EXHUbe#x0DPXFEMm{7PR2JmuuFDjI;%YP=%Zx`Y5ro8@2s`d1nK-utNgkg~#xE6M5}Zc%ph(MOkS zuD%u@Z+u)1IqZ9w@K~ozg$bR%;?VN5&OQT&tv@F#H@4e;8$62_P$Oxq*<D$}iy@ z6@J)v$t9PRDe+#l=iZp0*kVf|y^d}2qaR~s!n-miwC7%Xl{vBENP^d2OVJ*@PWU`~ z6%#!RFS20SXPQxA+gXx3zZQg zuz~{Zb>j^;l+*EU`!Kd6T6yJFWOd1sn2h1dsZ%jQaW_^{aJA1G7>D9&l_@d7!t3}? z*jntAlTXD57URn*tE`GT`~cSsKC}><>$6yjY(o#}{?~nHa1u`wse-aUm^WQ|KQm;T zM0>c*-upip5bU0)1&d8q!1|TF!Tmu8nnxacxEzOT`q78Ef&zzRA9k47q0cZ7bnn02 zF#=cEPhaCp5K=D%yB*}M-LL@@#roG36g?gm5)&28(<_8>@Ta!aoO0J#Fg<9!G!zFN za7L~Z1&gB^9aX01Tx_zkau`yoM~1Ef0OXMD8*eNtas>spc!~*%Dy4rNU3Q3VE$Hz~ z+zDt1gImrl@~PuFhAHutS*=Gw9j>&Pxp* z1lEH{faD;DGG(A_hY3KU3h9@tbea>-NasX31Xj*ke}y@wn|9^0>@-zIrE;94?&AO= z<34v>WtE(h=BDEV=rYXYo^axcGAp~rT5FV}jyTFhH61KuFyr?H0S3nC`|8*JRO!`q zQ8wc=Va(90pHQJOdX^Q!1oZen4wDV?)z)E$mzObH&Lyd&S$Exa%C7iQ;|SF6!3XZc z-h;yn5AWkcfQL2x`ZvFkz5ciaa`Vlnu;#EtKijIm4fv|?(`tCeaa$WupR zg5tspWJ&T*jy<+4x)3IwLpMp1)PeMagtv$R&?1A=8w~uZ6_YIjsgY<(>I9R8=6tYK zd0o~ZL8jtrF5{UO@yv*+PKPua7O` zty_4)g?b7JHQSMya6(fFsTd&v2GH*&E2YspTc_B_Qw0$Vsq=+u2OyO!I~tJY5+`#Z z3hod9Ss^v2JX&Tf0U3YXvAp31SV4ga3VsR2Kkm=#bZ&!7^i&B_41{zcA?Qb?F!7_) z2799K_@5tPTz1K&<#OzSEY}sjxEg{_%lX|b4;$o<^11NiXff=GyxLb*ldsH%uyVPA zp~rlb)F~}fV zN)4{W7+sgAE-$B!8;!3sxo7lGesXN#*J@wF`z*IHU{L+Y*h`4=BnvvW36t}s)Zv~| z4uhI)SBo3uDe#g_WnoG^x90|Hw3?`$+^XV&bI&ioIpq}S8!!7-bL)Z0@nc+n`jeWW zbt1P95LT~(l6$lZ(cJ9bVAw~ClW5gkNabf{Ry^A zDfs$zC2R+9LaUyvU{lWV*tX-%^35%_DsQ~@8ouD zOc^NKVFHk-LMm&8RXWWHCZoH65TZm*+TQ;m%MnRI1=XCR9U`gg2p}?kzcL*A!b!jS zCAR$F1jUg!Bzt{)Rq7{TxYRbSt?q)mcrELtZ#0r5dbh1*mFi|t(?hbTpaew{>%3ic z3QvUH2_0kfx_LXeyhtG;_doB6fe$&rG_*vd`;{BrRM4y9R&=Fmxz?iQg3>^e^q>=9 z!20+0|HLI~qTIA$Kr2lP0z3g4-imSxu29$t1K`83*U&yZgW_wO$8(Vi)NM{pNE-yx zgHC=G{p3mg2lU>yJ*-JQalgiZ^aU53FN5*?sBW)4_QWP!oLf%BYAUXDTN~Ti9DVfnr9M1} z`+j^#$n#R!hdB^`#u;bGN)Jw`dlx?NzymP>@y{{_<3&ed(qT=UQ!v@&lZlM`FzJ0d zR)XA$`wh3g-5!JFE3dpVwpO|d=k@(W&MjB~{^OQ7Jj|FrW`E&|k`FNu&U+FMbLM0L zS5R=p!jn%uS-!l=%9uFWL(U{%lm6~^zr)|@_`r6I=>OKY4+0<0j-Zv}%M(vLB_C1n zBLWUC|LmtfEz4p+{<>?gE64oc7}*Ai6X|@XS!B_L@tnt#-YZCx5Nh8TFIxg}u>R*i zJry+LT=v~>-!eZY)zw0r zhk!Fd_=WyWx7<|bz@gq-qTj@a00=?%zL*{Tl=<$vqcK5opsb|f9p==Lr(osIQn+@8 z5-s)Oq{ryd@1jp_Cm&hNfyp3VP!Bxt0Q_}^tSVV(!3E0pm^fMoXNT|{32yE5Hr`7g z#WqxYj(?2Fu8*;rg_9aQe}UV9ak6aVjW)r6|1|QU>Didlh%X(P(vHiB&Vf6_Q z$@Wk^=kH&fG#CkAT#N|{9*Dl@UVFg4Erj8^Yp*Fk{?U(6zxQQ@!d{r5m;>+NevRgq zAOlyk@MDtSo_sQF8&{UYq!0TrKQ3b6c3rQ%{)TKL#&`Ewu`Sv#T%Vk{`Vf<9qj1f< zjERkpJ{%32vE`{JpDcX;dL9Ra^WA)xopvr?K*}wi_2rs4#Z*z3mU`wtl{xQ9G-cus{?It!@$d7cT-LuHJh2c4vb zkcQElCH3*Hm+c-ZqLRE;jPm!O$Kza)$7BTsw@_P?hh+N}PsZ;op1RmvuKAjCI!^^t zBdbsM{@=N{`qU8=0Mb}Tv zVQ3VdLGh*Kz9jqRaVwDKCfH#Y^#bcf^g_H=34^TbkE-O1kXCxm+W+P^zbY4FP@G>d ztbi}8Ho-E%x$z~`eDjZx!;0MA%9RCn@&o7(-pCkVlq1tcgn}+bATkF!l-?J5JEZV; zg>=gyG$Ab8(^rGGW=K$FodDG+4HQo@pyno(#M_~WI6{Vr2ULE>= z@+aK>-fzGi@*-m*dtckT6g|2L0FmogdMwMw1L4fyorRaL(|{abmRe>hyo7#TUYz(t zmS2YO>ps39a7pXD^UjAaTgS)*#d&y<;)~xmuV|ll4N3aV9PlKyo<{2|IJu8zDiL%$9#h%cIo&vkz_<@rIIE|+XRkJ`2q+j^Doj3_gS;%WkRL~2l zY9j}tf%J+QH$qtGMPx|ME2}|}u>KowxN&(AUrfma1(w0f z3JQ`i3{rn3VBFi=yE?mNu^A7^0sXIH%d^ivTh7Dse|~|;Am-q==_mM^+lB zHmpoL{q$wFnP)BQt-me?Z#OJcVDI2;x-fSmtAG1Bj9U8i)6bOe<4Z+8+YZ8H`!ZN2 z-ltQU#J;r~P=8SO)lZQ|A$Y4ZK^}T4X7q+1hwX>B&+e)#!IdV|$Ew%g>JcS)m{LhGZ_kM&(}S9DG}G<&`iH zbz-fJH%y}EyYWpo--1Dg53$|C@UkrSgXZdwCV<}S-@v_)owo(?~%Y_I{&px|?&+g1B% zb5kc|1n}|v?KQ8rS|Kko#?^gLffXb&M$;wDdVm|q|J`bdm_}AEFwan(ld+2H6N4!0 zSDwU~vgD3B^@!>alGbRdMFz|O5M}*jX(Tm;E|Sz9`uHCxstOU2aUMl>^(Sm%HYj8~ zgOv$eV0AplAND^GXHej`q5ftgO|4w-8$lyk3y?RMe)V@UJY^}M7HjS}oF(yl93ITA z?6@*?-+lJMY6K31r{Lsp+iP#)FzNL-!Y%+~@OYo3TzKL6m?)TEwy>fC{*M10CL%8S z(;v&LFTaZIh1M?HWAFaSv4WE;C@#lN7PHPa3$`=b67NsjhLTZ_KVH7~-S5g-71LuP z#1-QzBD>$0p zgHc;`=*_s_pLpU4x`G1s^MGznN^@f5l#@?^56+S{<7zOjPMB;Mwga-?AJ0C^m3>E+ z`yRNjOgZ_K<&Z1CIcci+L5iC9fC%`{Wvfbgkh+aj)<*l^>G%Brh;S$zAO-~L8cD)K#HeS840 z%g#HOc|@1W<>VZHOY(r~>8F_vXR&Odt5>8}*&cmW9F5g52Yu@xnV{g!gS$`gFlg?! zLFXTH^bg9lSh@55dmog2_uU6uer+n-c2OJ8|KRx}S6y>e8G+~XHkee{7=C*TXJK3q zpIm+AHORjq=cIgs2?=g-{Vw$JtPYy9H|62ui|Mc;;jh>x=`5W6@a(hCpkC`^mGt&~ zRx@fHB?kXXFTY$yqRlS399ukL;%l!x_tXgrRP!26Q2Y=R6qsaNXPxyid9k~$!fZuE z3z6>p^Uucw&8gz&Rld9m&Vu}@j8}HA|7b~M{)2a(3$Yc~6Hh!LD?Z0zLX2vqm@Fc#|xp+IiP{2E`z6CP}Glrex;vGU*jjDvd>HSAX&~sl?6%Gyn64)saUW zjstk^#Zsr?vNUlOJP3vinG^%$lZt$`f)!>jvYwh^c3;1#iuMmYwK1AGYvEOZf1761Y^fPQW_F&00>-p33* zp51upPlm4;uxD^cG|9qGxfH54wEZvX>d{YQ0Gv?$$p`VOQDcH6c+u?ua3KBfW4Ylm z#~f3xx&B(1I=&o;gPu0V*IQFeHI011!50Otn85of1_Y*(7oCss;`8g@{u)bB&%?{n zP+9iN1Ct_%ene7#_D$-?%MRhpyIaiRG?R6)A_xGOQ2jw{;>2!cc7ZBS<%mYBzbY|x zL}&nD;P&sKR>h`DZ#6E!Xq`n;Mr6tNpHx>OA!NM$7F8C+QhzB4cf;Y}z$5_Wc~MeE z1~SJhniW8fFvnY+Cf5JKX9`s=pyCvh-M9bK8w{qovIkc>Y4SNJmulYjp@v)2cn6PQPA-Cao$Eaf}EW z%>S-mDYbpm^W5Abf+!0j-92it#*U8)3Ybwp{fP(Jw*IIOA21RUok2oA%8KpKcu`VD zRX_D?v@upt;ASrf#YC$M6 z^z(=71^9vf2YkWJEqs>754?>x-dOf&e)sLS%d<~Ei!aOniGkP0VMVDJJTqRAi&-P+avAyy6Edi$;AutN`rjH${GzJIhGCg_w}2ljMw ze=?TWE1qb04ebcsa`_GGSKsI z&@uQjjn;54Z4MG^J1SK?f59)h0H(Z2SqB7nnHE6d9b5otXZjFdaB@FiGd7WeJ_!i( zMG<=lJv6b`AIL+1D(4q@{K0$5$(W#k9BxatGgdrLi4_#Ii;b&oVP3Tgegc1w9fvb0 zund5MEklQ3iwGRfXlNDAIGHav>W3bEVNsd4U@+!*9CG;#zWDso@+*|xxq<>;XY!qp z8!O8iAGE6kV^_JXIaTEPOR4O=_ug{ADG#7L0s}E?to~KmLWA|;zSL|}4pI%nI~KRb zh~V-8&~_OogE3K&AmjwaV`b;vu!7>9_sfX+xPszmvX8gc$_OEEEPaF`x7A?!=f7Tz z6%;?l*N)@x5e_COu*Y?Tq+hdW1GN8#7)am&p?uCvhTkjnx%#kMfAKD=Nj}w?x({Fg zTK`L`px#Oes96Ve@KR*dUw@V7&tD;J^c$p2T%E{eT@rwKuZ;8~5W%7O_Xn>C>nSQ1 zEWH1m^h+F)jX{;8u*K7Q7@U%oVM@`)Nu6dLV5#^IEMf<>+4X0NojQa9;3f+!`9ElK zh5{H^FG;e*B+TsqP22y4l@c@ri^<_N&Au~0$mS`B@j1O0B~A5b8A0mmG%Q=5{a$J$ zWi1J)8)aVL+V)ow^o{moGNXS1pzmLnSFkz)@;LgmHZdr ztN?CjwBaTj7oL;BfqV5o1b2RaaFSp>O#bnAJc%wi|3a)lm>&c4IMCQs@Js7I{`oI* z$Ts)1-+ue;WNR1>+H>ooIdJ~OR$Ff++x#gDRs`U~#qS>SJ#2^kM41WiQ)ircCO#z9 zt@K`d{WW}z|44b@{s(Xv^CQ@D>S=6c^MZWkJ$%^YvI>-kWOH`{t`I%%oO8;FSQ)}+ zJvtzUuH^PQci|n1TRiQ!!w%T~=F2h&&qKKRoLLGh14rV^bZ^q+d02sxT|hTu(*6WI z7oNr!?mS~+*WEBdvFPV;zQ)HGoIjge(ww)t6Zxn;}uFmW*twv%c?GDx!;e~kB^mtTIRJcLP{ z2OoM6_tM9)_19CfT4pTX1%?kDE|c_!A9Z+H1S?oLfd0on{Gq&!?V;9Pe?3gF?1F*p z&%i*)!}I4UJnKi|x8ya~T!Zr#rpK8GTgYU8bV(Scn)NE=!2CfNKe_krd*tzTG6wvY zT6#&0)$!RqiM&@{hsg#WzWpt%a@ly}O=RMPLPkCDcsc9LGh_>~5m;5T^>#a81;v`> zAOE*f}?BWL>Vf`bzm&mghz7v(MgTQ|x3h9VSw^Rky65c;;EG z#9I$rNo|KdzCiSEwK;hd0DiE<6(5&i_43ph)7oRNy`_J%hSywkEen*Rw7qIT!=GSxWyA!0j>6x)nuz>_CLGa%@*TB@;+9H9EL6T@5Xr&AEND7 z!d6av_vh;FC!Tzw+;GDU<+*2{!-4Y4V2h`nu}b($<(_-)DW{%x8YU?IiPb<0l|A;} zv#hexDo}#=VZ2+g{ki&!lY(>2HFvr2{NLkjw#j06*o>D-^uPbr8cJn7qS*}Amnop$ z|E!W;S*8dR_xux5v0vYR$O|Q6C~=neeMenYRD?7LnQZ|5JS3ZEP(1R;W7y(pb!^Fb z7*rbLfcIYtw@T9#1m$pr3hUL+d%lhTwd~3O_OAbRf}$h0Dl|o+iTQp~o#wwtO>cb< zsSmBzq*O}TPp8otJq5RA4@INgc+(9ybVj#$S_EfMB&teCg)M@R|30`ZPm?5qc!56h z=%e!W`qfumU7p88#qdcd#d5>Vu$SVjvc184^UWu#Ki{vR*@an6tW9I$7@+f4!g$Fd33UV3ti23Hnmx4>l{=B5)sU*~Y>&-Wp zr|5lB+Chx{{Pr}55OBVf-EXan(SHHtvR)r9{~@wP&-z>hK^P zg*&p))ngG{Pn&5x=;V`%EnOfcy*tVJq;$Iy3>xiEItsx_CD(fplO(S9H4DBjo*UB- zwQ848=3XY1P`R=Y2T2y*GQ2o@)-2Nv`6WoB@6@3wCirWx_W!ot_~y-><0i3Ke}kmM25c_7>{{0rclA+G2i%ohfak&?@aE`| zZp82rv}=Y{O2(z?Ae~dE&W<)W-WoH`En55ywk9f?@g~hXwd?M?QSmfnun{aIL2&^R z6v3@z!(6yfh@?sGiv{zj>n|Xis=x`cojZVyXNxViu>K4Epjju~yC0&vaW-|5&P$16 zZbcvLw%hKs2g6%zyh%3)F#a9-QeWne1M-umUm;0&rx9UjLPU7`vTc>pAOI-dk_0zz zfsM*5>J7z)ca$hAS&?#nZuz$Pkbn6Z;PNi4w~vYv{89LEnO00FE2?z}3hyFOJ}4yM zU06{PXua~H(OIu4-~EW?O6+@M#9eQ>xsQ{mZ=Q>a4b2PjA?8T zur>MuknoN&_@izJ2nl!>X@5wy+IO7Qii-m5J3n`RQ1lgfsSKHtc^PFdw=YPZR$p1= z{LiRNG5IA&U#T~E!rK>@KFb-WG+v&NHN2z9{Ge!F=A^ zM_3VSdc@-qXEzm3a7>9nAtTu)L6H;|r^`Bu$oz;1bchJ=C`#U9;}MdEca&uw1d2x= z^%4{t^80NK2jfK-UknZyq%BAVT_(R+xDbPf&%0p|)mQ!YI~Uk(xa{@>VZ58|{VGm- z$Oq`|{rIB~F_gz%%-$?cx*d1i(ew-pDIo;*tT{8?v(i(|CFota4y&+FHEDbRI+km? z#s@TZ_4cS|NLvx|rKedsptaA+4wDF=Ix|W|s4i?lO@q!^<1u=>1cJ);rKRkn_1!4; z*=ZmtWXSI%R0T|)G6m+Cw~X+w{I%E{M~@z5XEbrh)vjIQ4m!9gNRC?Q(w<|CG&8xx zYG2%Wr=49R@@OxXTS>o6<-##?}-!;Lp|d(_>-%Bz7M;CM)u{P=^L{P7g}@qN=9EJNHf%qjiAG8|f~R;|4B zR+;GW_Jj%U6Z&fz1fW)2f|O6(o+5Ne=e`(kaursYRA%EwjqO~Z>0*W%xyUa<4Tn@5 zNu9<&KqTEa;6A2P1=kV7o<|*ZBj*w!~u35k~EE+BtIviCTaSN znb2KQY9BPxVS%TVdUFBf3rZ6cT8Ip}#_YR1KPV9bPY*?OBTxmH1t;o>&-t#R%) z5CwA?3rRAINTPGP=FGQ?k#vBVwmWBVjmwtFxjX6#yuN2Owv#z)jv6(}4ddMP^`fr~ zsee$jW@emoPjpgCkM~6Cq%-)2a5FYJpo!ZBW1F^SP|^?RSC-AN58nUK&Y2=)NKJ;$ zwFl4#Qoj(~EH3GtAm%Jx`XlqD)C|Q+3RY!_U^Zpi6p$bj>@4^j2%fD$s)-3oMmcZo zcPnS}-&~_6`@6etgFvA*D|B7}ki4ZgdI*F?F|^1K?U5i0)eq{^;Uh-!`ypm-vu4f)(Wc*TZIDs>x;^*Y6BxSchjllbjrq$+7G6+|wfnT}Jkm`m%O--8Ck&_# zYO*!Dyk=(^|A;R%CTfP(3vo?P=YC-}C_w57xs!DIYT!qOlv9T?DMCPaNBQ!D!WVr( zUokY^5uA3NB?;-Gf>!4Z$-~>y`j5OI%>x2#UB#2f#J0Nf*Ay3kgm*fpHRkDvQ%ag3 z&zo0#lq4^FH6LO4z zz$ouA*kkZLRM=NhA#3smAiUFhCwY8SND!TdLr_84QY9ms4Pxe$suCejwJ*tO?U4l8 z*NYajZ&h`r?D;~Hp8|LK7Z3;p{PsItiwsYbzYD?0omyJ;fE#c zO^cim{FB~)7vk|x7|(8J=Yp+RyN9xldK(lDBy~cdVA6;f{Rudp&Zz&BuD%#_Ji{<1pNhw_YJ`t2?;N}i=+jvg+WB;w|%i$AsZU9cMsn(s$p<=EV1Ai{OhdzA{|dc!qz~0~=LE5)HDMAv+%x>9czvZq!2x#0V55 z?*$|1$Po}In3oqYE>g)Ph8Q`W86SEGWADS>aHS>bfRxF> z%@1~GViQF+o;2oHUvo8c=q+O+kjlwpARbC;Q3MLvvk=3Rfb)o9vO~hCH zengCU4i!%_GJX~k6epZ;JTVm#35vu91$ecy&+;ms(((#CTHjuL;dz@IOMm(iVnSn6 zZ)Jo-(uyJI+Z1|zt^g$vKj&LW{}|; z_rktfXCz&8&XQ4j*_#xBLPo$<=L0#n$w_s&&cw1wQ=y{H#M>g7^r#WTY%Y<;njZ$+ zaurUqsDmPJk~DgS$ncJ$ig`n_@Q%{t2Ss1Sy;06T;H9i-pFyDLcZZucW2$S_yd^eF zype3bHw9qfP1%um?I4wtK>+j%6EUQ4@sGfG0!0}}MG7qvWZ5h{icGz!Cv8&};@hc+ z^suAC(gdj|t67nte+IttXf8%u9#kVFC`O~=X}|r@>n9r&LS(IW#;O?atX_J^kteNZ z>PdsLkSLkYSn%{IwXkaAk%69{*vwXNaZrD4iZRP|Dp%ricC2f9U{mz-U5{wiB9OSd zxzdtS_Z~XeX3d!H8tvc27PuX`5Pr%<_+`ra_~Vb<;zf(xGBylxIjxT_w(1ZVcG;;8 zY7*M=gWr$30H6Q2+;Xdl0P08HwM$p`A(zZgSWvX+tEtk@tWB?^q?UV&OQMV=Euo(4 zaCxrHW^xErRvDJkWt8M;Kd7;xFZ7l;*HceE4W<8QsME(gUBKUm`usf?unXV;2^f6ba(L5OJr2R~w`bkK`0zXUhM5V*Ihx=Sy;#O=eS zN0*K7SoAKv_!3)$B{J5i;eOUX(*GzYs`0bWK4Zk2qmDktjDo4|=0b(}(1AzB10R5D zyAFhtrU&kiYNLw`#tQTT4uO+n0-KrCEi7G%*Aq`VIT1~BW)SF(JMJ>0T?ZYcOLR^5 z9>|8NQ>H=)Smr9PyADLGt&KnC9FTs2FrwRl)C*LpLfe`) zgS+-**RO9s_bCg#%9WHv6E46;py+>(8#;6lV_ezv{z)x{23xwkCxLi)78LrKz7ybw8kMaRmer5oJU`+J?3?WHUPC zkXFpM66-g6SfiqrrQXz&TyC;Cr+is#R#fII5?dt53Q?v&ah5+#mwM8mEF?;%JY`&# zGW8`3mftv{43;2G6&0L?1z&h8ckGW!OT0y5iv(F>3y&gGZ|X_AN$Ir?D4@umq?p8I zc|%CK6}F^FdcEWlAf7;h1Vu!kuqcugfnZtvqeyH7LS=+;;I6>ICrY1sQ%?#j3;E~a zN0p4K8kL-SQ%_O&@&1=4NIhBJ6kd@ai?i@3GWFs~1~~+UxAMjQ6h`VP5?dt5${}1F z9|3XV$HQWH_|Xno!#h7#UxhACnZy+dvKR}GB2#bbN!yx*tfBp3a-}(e6ladBYvEC3 z>PTt**bCj{rr&u6)-uDXIfW)6tfRfyhqb;q_l z4up1fE#fhBk#NKL^J5OUqs$QH z!Y>xOUH`HR=a7c(nWvxP{PdlxSATDJ%T0Ya)4YQI?7QvU(w@umS6tNJ=lu4~qHl~b zWD^kc(hV(wz7YMR-W+N9#UQc;=dV`iratDFBQfr{jUk1_WN;hm7S>1oRecaHy?XUB zBFcv!enkIW%vM#wI5f9Kg~6Lb7{lqgOw17M-{J_B9Vu~IZnl|MkMT7GtYMsWhQBe~ z&NC}k{DP{2N-#F8#F*t4aI) zk&fzDQBR=>z6%KXQ%^pbeO_l!ua8+=qe9$>vE#}suQr17-!VEVG>ni#%VBzWALPzg z%wwIir0!{l9qVwm=;Zche8mVd5FcN)kY8IqULWrxw zII|v&o^~M$LQjOZI_ekZ*()Hq?z*eLljbj!3pw%HQEuENmbOs$UY$Qmw`J9J+ou$rW%PneHe|6w|rS=N02o0y7-r0~MC8Z_q z7HpGo?fj z5PrMwzDEKef+G--v;NE;KE}S~O+BeE3yEWs0z;&|!$(w16qMxfukzBTP%~I@5#{-X z?30v=#_Ou9uQ9G;BT&Q{;tCXluXsC?nkTJETtw`Ti}U^>u|dDTEBo4b? z78@59{6%7m1X*eek0MiV)`}1m-pUvI1JvwYHdah*?J+K```Gy)5RVptLIYz5$tZp5 zO+6{BEaZ>Dw|sHttfEOlIpZ+bUh*DzgPnKY$(;t_xD`wda$@ov_~3(X9-P-&8->{1 zzRnKrW>nYdzNwOk!up6A@M?eh!jB0&MtHCM@PzQrcA<8?~I zr3d>Qc8zYsR_05_%+dVdgISlm7-NQrBaaLolc8c+MOmP|PuvelniC#~xC9Si!ya z+H0mZdJgBazaUl7xl<>15S;Yt^W`v;j^@rUg#GXAf7{gDhJJY1Rm0}RD9%pT^z3Q4 z3=y+?AbGKEtpW)OwYOk$2>&D_%5%XvYyf_xd2^&uWN&~z6wzfYxQ?r?y2_T9;~Der zNZM%Mm+C2vd5s#InPG6LyIPmCgP1#>d;WPhlzn(Eh<+i)Pv>kQ`(EY>;OqXeblFl6 z>lc8J`M~Pe@AiJCJ}cnCu45dBjU0}Or)9wJZYr5{#+4JEZXl8^qeqQ4LYf#LM6egp zLWZlwYAqI>4IIm6qcrwx(J78@7449n{HdSn2%wK5sPQC@tSrvleH9fC(^XATn;Bpl( zmKt;H^}rJo(F%we_YWB0UU=~Z@N?f|aD5Mc$EU*-)R=RNBq#gh0`mMSK@7C9w;+!dr2{AKqothnVn=bLFQlum43o*2Pj6k#{G( zW0$aXxiB2qw5jP=YzX4LCR_p+L9P7)PIcKrkTD%yt~cR=zEAyq*mT%jsKOJ7u)RL~ zbuM2kZBr4IdL0nppU<2DGJT?P>C~@Z-<=QgS)yfHm}R7G1e9JOiuc*4J`3;9MtE4j zLOlyh#5u=&7ZdbczaN%)0l6KFh;$M1Ik;rkqD75y_q6LY_-)^#$}<)<3f-YTX4wHf-Dsl z5<`6(IKA1}zox&%tstawov%rYl_P z2aaRI7w3TH&eW+>LA>0WMITawT`LH-9loP_j z+voaM;D?fZN&lYqXSFjQj{zKL9(!!4AxfnCQWqCr>pug2TxRUg>RmXLX#fB~07*na zR8{``qgKfVtkiO;xHbXFT|IC1hEl`Z=PLG$Q^fv|_Rk-G`=uD@;_&}H;~&~n_>N)5 z{*bQN?>|4Czw?ofQQ^R(!Ai#>KV>L0e#a?de@IgnzG5;6*{q3k2Y+~n(#pQ$6p2M0Y>u%eG(Zy12fi&PO&V#NFS9i~Ggw>57v}Mnm^bjLP1HF}}y`JkYq&{_Itk znp%ZdUV7OL8Zro7wzF-IJm7%+K{{`45~h+qT+XFOgpUp#J8>cU5k16D84+MU+@k9t zf!O?@gG{19lAyY9Klj}8#+0xaj{M!ZG&X}vYXi6ob(z(Zi1<^h_O>uL{0w*A4z7eV*2yQIVg!_1Zt2VV>yVV(&1^yKMcLwx7e~Js95&{{ zz9UF?sa>eC`DX50kR;-ul&Y(pA$)Y~*a5Xb%_(~sMzUq(@<#l6;i}MrmoO5&tKDG6jU_=k&*Nw>!3{&Ve&dXF=`nue}DRB^NpoWOmwd zCxauKK2wom6){B8wTHGjjLYhIcAm5WN!GG3QqyOA4sqd1#^G~!c-tdj3OUWmuGL2{ z%RGQ?=l9?L2z6BG(&o(C{zME=qT^YJ8%gj=x>-iBk2&rbxZpaVisMl;x~Lf-H4>E| zB*2f3+L;UCy>V?adHmlZ!z+GJ`*F^ zT{`odT;@tjQSo%kt&BklbLm?+_WN@N9ckxcowfJn+%Cf0teG?I%(nn8W1WwM3=kuW zRFlyy)+)W_!g}>R(m3rIzi%KuY{I$s&b#j5x3`-~8}+`SH|-tgHm4sq=R7OPiY;o? zfT*#=P5ET9aX(k%+<(MjZQZ%&p6d(d8E|glt%w+Y=-o6!o+qS?bHxdYuRlJ8&l^ez z@3^EmEG{+k$0_jJXI$fa_~SU&-vzSIR6KE~IF#|$exn=(C6x&aFnzj^I=mGd`?J*l z)czY46}Kjon!Tfh|1x}k=2qFE)bP&W75d^7u|G)%lF!smB#kJNqA0A8F1!n&X5S%& z-f|vL2vtk|ss2w&%F4<5#K5GFHKkZP_-2FR1SBZ9XNtpFT-y50XCsmVEb#n$;IC=^ zf6M)G(4ZmiIiv(7VXy~#Xv>3}yQ)Y`Oq%o_Ya+Ohl`B|R_cT?;vh(l`Qg2uO?F#oJ z$k44>i*(=LiT!jXcm+w5je7G<2-e>t`M9?sR_pCm4@SuaAlx4`-8zaJ!ZMfQzd*kjv3 z%-@uC`Th4lFjYvhEupi@4ZW|27{4uh#D@@~Z-7ug%hiAYbZF}~*s$Eo?Nnzc2at=Om-q zS6p$qany&kCbTrX$ztwH_QMxJV!s5Ap&Iz8<}D63O(U|WuxvR}e8Wb-B=WS)+)FRJ z+#PuYc1TwKPW^9U4}8=83`qcSB~Yy0`d2vCVcvyzv9|lLI7RFaIo1|_{#u)NCk;gveaA3kzfY_) zXup29yBVD4o40~M@i#9)5f>G5g}35@KfL4g`TjUX><^_-7IzYjO$bT6_8m!3B$@3M z7ZisVE5gcI>qlBLo`87qnH7Vxz%-Qt_@l~cQ0h_YwRXB_s1wrMcmjOsJR3E7lyO9z z4ifk;h#G~1M4BVcXiL*tgU(AB7Zc$71WAm&yk8sbGN`e$p+4Vn+nw$skhE3c;_7wn zb*AoSLny@Jcsd7as#N|+S99&{O8C*XK#U&oJ1(NP?%VHe0~D7;b*Sai_gI%h z%IADP0&(W`H(m#cv8jn!9@kDA*5gMT#N6$_uXU2k9Sa6(35$f4WKIH zF6yT^AGg2&)QlOQ*`^@z|LR;4PXYOK8b|@TV*M{p28p_=?juKzq8+~*p+V}wszEGR zA5PM<&+3Ns&14W8l`!&joe?O!1O)^Nq*cT@cpRLoB2Y-B#mg_h%;D-TL#l|FbisKS z+TrEPufBjQaEM7($Zm)9-|fMsHD&4)2rk##xa_s}UWhQBWn6$OI52$l(Z{G3o5UC` zgDY|`#;cj_u}87?`x25Ed!?ze9!rB#PZswFs*vW!v!>s_icPnrIOG4&`s16G z0RFeZPp&j;+5U9K{uA)Uap6@wZS4Boe2Wn%;$qgO|BJ%U&-e#MKP}BX`NbqPvsYS6 zn*slh{V!B9Eg8>$;b*&iIx=hM|Md45I`j{|*WCZ4uoId+{?ze53RLh(SqWmN- z#@c`U29ehCpN6l?((6bB-g48;?kCPL;5TVk&QwZX%pM^|f~s6nLMsdVUr7C-5Eq|< zVL$}#)~#B*OCXY`>F~*^rf-h$1{K92CzXUZ-gpC-_8RVn8?Q$yZw%6X>%bw{5fwp4 znl#lsxIx98_yQd4yKxpcvujtR4H_7u!##K3Ym5v+DqI2bqCtbbLH1)`iW|WhT-H9F z`iUWFE{Buwpk@b|{R1KQ``>eqA;=rR6w;|v2UK6yw{zFMgiBr3dX?9Q6K;2RB;0S3 zF0BHAy~dVXpeo`;V`xxIB=Om;+u4RlkSeLZ?2Vs*xV7`wR!BEMoZr7mBSSbm`Z!XW zTu#M#cOn=2(`kc<8RA&H31mSPjCnSKu|SAusg;l#rL()8W&I__jdqlENb5sDl2pSG z^K>Uek&;Hy8B)v$twA#AOrVRO_VO)2Dj$gyN&EK4o9an1s7QUq8|c2S&852!Oc~-{ zi)W4v5D!VaUUcEbrcO$TjSE0<>7x2Ph=d`72O(8inagWMm|o_(!$D48z-96$#`1+1 zpLhM?Al(Y-$xAQ3)YN}I`siS6P~307k&RLJsEg-bAVaj6z469toClw^IV}}Q-O$G@ z36JR@V;;DFAQCOZIUm^!1Pjq&DWDxZR z2_q!Gq+X7Pkw;7-GiS}R^YCa8JTt4 zac~fSXPmIIryvHJZ&0st{f)hm)*Xx9Z1kHmKH^B;i!-p0c`qTsB4VT%toGrzc=OGD zjM*mbvAE-+RU-qwcD}fnwcGzZDj;v*{zUkaD7g!wj--CB!5)cphnsq_xZ-~zeBaI# z7@eB<)1cI|b{$@jD$R{&?fO5-k!DQBqoCZi{Y;A~0^blNDkzWJs1SZy!rFq&(#0@i zzom%b#4zG;C5+>;qSw}!=En0!`rjld+MmchuL3qG&WB)(4GME=z`tb~FZpLtw5`dND*wd&4u& zJOu%Du@MEfX02SgY9&U&7a--f-0lp=!;vnewIn|mESPW77%y^mknM+dXng3@sZ-ka zsZ*zc)IG!Q|I(ZP#1sE8#=*%Se_}}g_rPI@z%S184K~`)>Hl31)$*9Wgq^6evu6dX)zF|e(N@EOij?t*)!eXM+Twxc@$@dl|~qlwudCmmYi!!OG^#0E$IhocKFBRPZ%?RZhx{DCgis? zT7328Lex=jXaoTv<7=@8>993w^jjFuzS{6#4Pn^qaaoX{NGC#JbM0FMjkl?XY!Axt z@5UQfH^z|lD_4d2;}<&%NF}4xxu198`DWKbeJExLA<-vJc*l$|YkVYmFKr=CoX+*R0vW?(MPTU0=$RDoCjvFNN449G&WZBu23xM4N|*biy0e zt-C8k#Pf}LMADw`ygQL`<@dn2$X=F+ET&?S!*}8N`zzF--ao+Gpb+!TnGjIgLxj^} zA?9P94L_UqnUyUSOxu-|xU-P#+&PiVDC`nRW6BQ5(7}&_V;l?dFo!Dot;`_$yoC$s zpXJ8*B3zm71j3tLdDY+CJMUoJo%4|l%n|%{_Shdh=RDJ{A_?AFNUuursphuZ z7!gbaijvapO~OSCv2r3%=y!C{1xOFW1+>%7JGp`MmCoOPWc|yQ-PO-O!CYO6G!D<7 z1z&Y2&lSR-4*vHx=G8RTtrp-huY>^gr|o}Kst|tR{PPLYR8&QB@zMP_z~#d>t>sR;QsW}QaHF?bvJ`ZkVJ*V>>q;ounOYHb%@u$hG8Ia(luo>x)2o$ z+FZ22OWc0!*fGZSB~Hu^P?aUYF^db*BM)XS$w}l7Jd70+hHJ&w}9dA;^*{>#k$EAw>|8T7tVFLJS%*)CgzC9^1|c z6uLl*>rx!7)ikj_cX9~}-_9@2_U zbYIguDFTV++)C!8sOsW|5Le*;mGvhXl)P7&5sK!Y68>LWfAboieEd7wKj7~J+@t?M zf`Z@LEha%B!n0hN(tU9BER*8D{hzLX)`RrNe}8{|!uYuo&6wb|_kZsBM`~nrd6!z9 zlk-$O75RnGmazY=&G_s5aO3qif_QuZbwJ+mZuohfa@xtReY^Htn6?Z-W#04VB{nFK zYS@Si%ArWxUBu-moFn4(PYB#AdRz*kbD1Go``+3YW52(-8?L_rW0I5M*51NYM@ni` z?)Thp5H&`b#DqBE+Hppa`1@Rt&XS583zD!J%n`jI#Oq8bH;=duojL;pRaFrE&x7Dz zySB~;NFed|#6KQG3U{2HP0r|ghDoPLg5utL?}qzpzS#kjera*0^7Yxa)K|tg%@Q>uZHe1%)Iwn{YYq%4Km^E`pMDxaXexkQ#Z6-(Uq(w{htu7olS4 z7xxch!%K1*nfVUv#=tAB7 z{PXQh^zla@8G%ANex(kmHPQw3VcyX4xoqij2sv+?^rX}^ozFh5bCbCu^S>{Ml2R36+1r=Ga$4n#shXF73N${?bM88Y zp$^&;W<{~DEn|BU`C+W}rbjqjLP}s&@a?!;|8y^6oVg~0jNl=XN1PWAE9(Cl= zT!P=>40Ro6zc1~4Ce@YVbd(dK{9P`|3l`1?Az#TgfY7lu=fR0^5#HYKHn>mc+1XNg z4&-93A=O#>AyhF8hHz5_=8MA*JtJ?-K@ zYv$)BfolYc(h}Df!j+Hw6~C(CQqH;3?f!S`@0{Du z2o(KjD+b0%P;C8|T3V?pTZ|eJ*$ca?{~gBhDHTuGU+)PN6}Y#Y2D6ipVp4UYE9fW) zDnh0$;;ysXZgr8;?Zthfw(lT4f%iw&AI)=X1Q&)i^?wE=^W~4w{~UbTptuvF&Wz7z zm{j(mjJG7>|6gf;7=iz-{{Jt0-vfpHFQWeOCqbZKp_7s9V~#sEttTjwBUjc=wY}K5<`y zoc)D88N-MmMK?0$$oeoE?1?4>NrP$q5yE;n$mMt6d5<(3n)=0q4nElSACM+=}k^-+lkBTg90~_z&5k+zF`= zN#_Y6ADpCaAAj^Q>m#Wsmo(ren|rARr73U!QJmB~P3GJ;38uz5aNbL*Pi+z-#HN(7 zN&D?wXGhmyzlJ8sBBs>U z!Iy2V#osMP0_G`n{3GeQ^k;}e>wyDlYc{Q3e)VPd$w!l!Bfppgz!$HnZ!f5+qetQ)m9API4O%n%j{&?ZJry;UTcFTTR zX41M{&gkN{+F~oy+VR%dafT-pu|Sfzvddbx?j8oO@bLpgqc>3J`40UcJ1t^j(b+TX zKO?Ba*tf<)DEN}BzuO+&{=}2r4%?UTowpz)^Dsv{SGXC|r`tT1WZ;q?zvqBe0RkPr zvF&P^s#U2m4frWZJjgEBJa9rf>xw|JN8R1s!L3^IoBqz!Y|i2QCHMz5Yi3f!W5dfFd^wnPMVQTPJMi8Dc9%V4B9x}+v&|Hk`q@f0!| zoEQHG*?smoXW0+9#^yELGl)q_l&p8}-tHAR z*h49f1Q7>Hv<#Z9$Vx>cYL>k+-Z*Sa$qOyvke(2E0NV_qYrz73D-Md?!bhrz~ z{}H&@9vJulTn@_(As{Mk?QOZ7gcA_Cr3+c&NJ1j$P@%;gOao$q7&l}=lZe0c14^yS zPpE5n6!-%kxZnEZF}Nz$uTlk3%ua6c58t|eefqIzA!WcNP78IHE?rCn{t>ta3=z?> zgCT9IfEX|4RK9fKi8?LG0g03K?Ag=$PvVMFv7iHiBw)7VVqaQXYAP)x%~A_hR1$rZ zAxbTnk^s34y}oLX)G7&4V8#`b`6pvR(!V%q(z{fpf^l`~lKKJMB9df~?&a;F8eetw zRd6Nlo>Uh@UJpKmEsVj&opuxy_Vc*hZny#ZHMNHn^uIDm<e9 z#RZPk`mf8WSJ(NnJO<-yee4QJ_^f7zRY&%aK z*5*)mF=q~`O%U>V?D(;E&bb~Hic+~YO$OtD1Dm4v`Z^FB z7)+%9WuWq@XP!2-mO5WabqH6D;n3oxfy=_|i)+Gn-IGprHMd`e=lDp<6|) zq5q|Z;modQg5+?2_{QSSQF9ZY#1V1kM5nBh?@c;q!F36FA5DBCYUx;K$ zN(jNB9KWrSvCr>J`a%em;~?^M>(2o4FPlMrI#C%jU+3inp5+~ zTsiaqt+8)$?nB@9+}R*>>M^G0*tyeZa&YnuhIn+haW}SZ{a298*M=!xq+5Ldh(ICr z31VQmncs}majpkq0Mg$W+sp-sG%B@IqJ|F++shz?6&%3QeMOGAVUt0+!*-9Lrf zupd(j*8hsM*9wmyBCWIuNSd!2L_N3rM%onj$_wp*0VKfA%B16%^i3l)% z{ycA+igHT1n~U+&9B`Mh=g*%r-+lAlH)c3m_ycjz%K)^9^up=*v?S(SA-sT8*ho`? zoTMmp2Np3~xC(V=#Y*ddc;3H+S#Qr8o{MFocD`O6`GO36dEu=0` zdx6dm>p_H<9aACNE5mpjmnSCBuT&NXEZLRX1|o`VLIk&_^-t%rB`}l7?ucw|{05Vy zq*6A6akr#a3CQuP6czd@TmNN0Vj=sg##nov>ZN&OT>qT=f?E@O&AX+`eneH@EIXHq zKv4oNs5(+Lvim0D|09mD(@DmLwt6R~na1H{YDI3-4z2T8o{JO%E&Wzx`%mf`Yi?W1ByU5StlX@Bb>{vNm9KLb&UZ_` zh2X_M`(4)D)+Cn-`*Q)mN6mZLeUoOFI@@oLs#j0w)452- ztFak(2C1B)RI2bZziDZGQU6wE9JWL9q9RNZSv@VK#iwmhMt*4?<6*S zhBx)|3*!GCd=VyY$5xH(zDo7hm46eV6Zap|{t7ZdVI%FU3iAyVbC?zGRm zN13>SfPzoUYhgtq;-G&Y{IUqW0ABiEb;5*+CQ^4d2YDHZ5oe8l#8QW|fK8}+HB^~!$r8mi43?1k0|q?kMnMhV z3Xa8-PD01;i6^H0uZ2S#?VUTFPL@?heSPw&r;Ss+SMTfGt1rK5m!1pZ6g!l|^2UfI z%JoG2SeyjEKphnqnM576kxQrTqmMo69(m{ybXr$KXKq`JMx7t3V|l|Pw14oBLBJ^#qIAls{oj1O$i4dd>t>5%AtII=RIO?owKh0ih-S^24K39nF209dM1@d) z7h=u@7hb^Jtq5oSb*Oo$O1bTv-3 z0KahlOSiT}Ja4>F-al})PDIMsyw z*Jl5Z#~wGMRO%pI0Ivhl5)5bwzS<{S6QkckpCOyx1?Qhjd^*(*CsMm5wFi=v}VTkz4Pi7`iS3b;rbWS#qTohD+nldptB?e=~QMM^k=sJ zkHIh2f~*JPI$LN#K*490B#H>$@K&Nfv;A!kM}mU$!zri2%?y!3obU#dNBEuov+%`5 zCh110Tau1>?SJBO7ZK!w3ok-VV^g<27b)&I88CzH$6t1?^xJ*%i6=PMV8e)Y|6&l^ zp;+7Rn@Z{sXU&>p1O*vjl=NK;Oeey1xr8;?rvak$r`ZKh z1|r9f9cT6I)w`G54;2NP|6@jvc2~lbA-iB3z#U(@{SJ0kuq};JeKSbb4nb z4UdE4&>OFQ@WBT#_V)+~mE~sWTL#2sB>a_EQ1R3U0>!#j=!-@m_NC%!mHPm7Jrc!N z{ZxnB(f6zI4*f6W@be(5MRe%Y`E++Y(jG#ZOPW@Q5OHa44ROTIf618XTA=nzTz=n% z`=TDypDV3>1>-6;OTBtsizxYYIOk7sLfnYS<4YK9x^z7qyKlQ9b@`BSG0J{~m=k0) zv0{ZvhR9Z(meJP79v$NTelHRf!0yKF8CG4ds@WIQrEB{1 z8D^++Bn&mRQAc_TQnO;(5NBx*2o$=M@4$FUC8ZE1zUs+S^EN@ED`f20vBsD(7w*0$ z==AP_w9Ia(%kZh4C!jFzWW2lep{+Ti^|JXB;;nCzhlNq+3pg`P&-A2X%4Rs zcd0Jn*Y=bo@C3Ihh=6mtp9>e}p{BQ4bK~o;zcOyyS|GR92YI47V@@6x_bMP)n!*`- z^pQsyN9^gRV`~NJ+L97fJl%?lr;-vor@j2jOGcPF0Rq^GC$=|(>!GY?o_WU9K}n)z zzeXZZ^ve)#2{^q%`(z`o{YluMSoAG-iE1VdAOeN-t6$H3LMonw3_BZjX;MuQ=AVqn z_J^<|LYf#+dO)Dq6Fu_T`V-dQ04V|9y^ zgpYsE`7`h5m{qYzB{KNbkqZ24@O^1LCk`-1-e1>e_JKgs_$+n>$V z)we$z)yx+r4)18OEEBK3RgxkRagc|vw7#&Y$OMpo#d$2GAfVtgOHvS_Pq_eR_3ig* z^(-i+OhB3Vkf%sQ9Q2>kNSc=Zh%Sir!eyp7UYfN;+*&s_;CtjdrK}ID(G(*@EZ*Re?M#vh~wxl5GV=& z@R?Ua+2}OBIQ*o*Bp+##LC*_M%Lt*qEJ;x4q9buX>HO>89|8r)co{Jg#rjP2*oo>Z zLr6DrIjvu>e%gQHq#n;Nl|QeWPR2_vz8L+thZz-JwYvrbMlx`;0?M>F8;^%TA@?n!ZQ@#f_~C~^h;NK~ zpCgTX-y*{UI62xMX4BvygU$HlGAN)zrb|V_mWa27K7S1KREC~+K#B@7YmRS>EJJ3C!Rovq!pe;4bpx_P?0#<<0x?#|GA( z{}1kpE74KR*vnmU`QO}OaK3&FvEZ7kdzwx{iDrsOv@=|tq0b}@GGxdg-jsPcvM| zgX8bmpJY-4)LUAw@cN&$4@;l_B>XhLjfOTPfeF8u0N;a8zVZUo{N=(|@`CdIaQllC zk(7}S-+v_)G!AAKFFp&xSNmO~I&_$$Gj6eb z07ZZm2?}|96ma;>JO1Y-U2XrXHlYvVT(Li?aT=uGNBDIRNT2&|_x`lF(9qSwk71Wr zmqPfVV7+~%(&t)_f0j4vEc#EKXU>=F5?UduZUps89IVbl+D+GN2wRe1w?X8jeP}7+MTNMzSS|`4r3&(*4}} zuZM8XxXki|hxUANhKa#II#k8Q*BK=2@#xHzPJO8bnlb%z(?NVBNZPt}cL$z~!ZTZv z^OxSz6mE4*j-q;{KBn@DlnkjG;|4(gg&pI0Jk1xWY|*djfVeWUPEsQ+FW_xQ?AWnd$@? zMVAh7*_hg$^X)CS+|1b3w)3S_bBLo;MzZfiDn&o5PMta#bA#+=$cZR+=GkYOj^g!U zVv#DTRxJ*p{?m;~XXMC{NVL|1N#|r#SRHK=$-OvleFY(4dn7o-Nmg^4ZEaHOj3OHl zQhg~KCSr=I%AafmNa9uMr1o#TKl7lgsYJ30@EFXTy$_<)FGzE=Mm@rH*Y>uvPKdWR zNY@%Md$ox7+Y^j5K zj0)VHH2*XY)&G^)V>d@#SgqQ%OhWkd&RtN=^%F!IR6OqNPD|^+cq7zJW_tghjJKu^SQ;a5!FfU2oJw zjIrOABq%g*gOi?e*&CTW`4iJgeR+?|x#LKJ!uL{)DDZgzT5N4i_Ap z=RW|Gdd#2gF}?nkWH7V6y(^)vHV!eJRE3 zmeir3XhKQu=f6GE{*THkr~UCMqAbiPf2eekpz`5|OuWm1AM$1?V*s%~Nf8BQMf$+< z=O6Kwq)f-DV*JJ7*|+w__U%QWsAQ5{Qr+y6t32|g*?gY#nHLx8O{}F)Glhse`1#|( zTu#qIMU&?Rt{J{y`&3EBHGCJQEt1>{N+J7Mn6^L4pAA+)AyJyBEc2GBYm_xFoBz!V ztlAg$UtdP@%!40dRES(WCUL>}qhWoQw$11CPoF1!7E4nUzH%3iaX$R~x{*AN_Q2D? zHN#hixUKqDF1ZjM1Bm@TRk`qKvu`$eESoPcEuq}@rzrp+zg)4p_?Y}+LSFw)pMR+T zLm9sO{P8G^BTbQ@5ctR8D~DWYul#XX5nx4v)(oFaHqF8wAvnPm78rjQ&Q0>f(V>VW zP`?(rwg3Gobzy<=w=XZRf8uCmH|GP>=NiUe_C)$}p8X6JY^_>~K#@pLB=t|>Ht5K- zp)ayH9~;6WXqGX{8O4}4wBLtY;w%ZDUw!>W?yR%U&ef~mAP1Ly?t>5Chgl9S&;0x9 z%P(>ux^qZP%#T`KT+(!KIgj zD?PVw!-hE!6_ko#9piOvuj_J+3ER8D-nk)z2j>t6Oxur&r`&$jr9u6@b6hN`kFPi4 z<+(fWx-+RyiyQ`?lWr+!fAl$M(4bt?0}r%3J+JMV!;o@XKLNb^p8h#>BD z0;Aj-kdRN(4cw!cO&f!K+Nm9K zAAImW5PavTOXtssrMAR*KI4sH%p8&*mSM%OztD#rt^XS~ zYLt6p*c+A;BagW|ZtrI>)VKZm-fH19XU)tZm1J$g@MLb#BZKH4KQ5ZHNXq0OsO8W% zocnqC&o-A4t<8aK%At2Khq@^%2X4RIjA@@SW)J0#KjCB3j~wv5H&d{i7~A!%0MBp*FX+rRT-@bv;dCKr%?)=VP( z^XKKTrI2f|cm3Q!2RF;XS#JHUR_GUk#9waeXVch6Z?}Ej&OgnY=TOCzL)<+#Yt}6G z{7&q(`{W>M=U#j5RrZg~NZ=d=2`EBqkFamH6{5pxz{yrer_<;TG z{H62VZ@;mxF8#^Qa`h=|f6it)&&t6_kb9r=g8FE0;*gffp;y}C^icoz>C-27P&1tu z_RigR|9!ddI6uA1c%6ObSq2{llpLysQurXRb8yAyX3d;IA1=&cKTIX1Wtt~^>J1DC z06Cq1PM|D^1UU#1xn9>^n?q$l4%I|Cm^E@un;w{J*Y4QdWj!uqZtk0d$s`AZhZTn{ zvfSSX+)sV#TU&0w>ki=h{t^fXVmTN;EWRmaYtG4W3CqD?V|lPQW^fe#>C{hi*!Rl8 z;E_XZW^TmDHwo2vYES$vhk7#3%l$ad-jVy@{SR`e+sKW1YfSFwV~)16&OWDG?vu$M zt0?bS;~skWVb0O}+PU*;&Qs=+_9v~Au<$3(^$NV|~>~qgOF9)Lz;i?tzIk!TPF!-ulw{yEUO-PwN9$ z*}OXBudQsJVZ$tk$|fuBJLU?67=y3yr=NZ%D~Gv(y*Qf_nt$z2JR$eVluvv-#d|ou zR)2K;u|iZuV3g6(8b^<$z%BA#*O8y+s^vwg%@7P zA>mCi8Kf9O78)SMy`!)cfc?fps={{+-{*;-m^XtYU-$~|th_8X3rjN4exE)s#yDn02k~3BxwrY3M^5DGEO+p-lUA!i(}o zEltB!q$MdE60)XmB`Np*L2L8sW{Iurzz#98;glCC|LRQJ4?Ul4Pml*eujisv{Fm{>y^PiBrn* z=AV^fMf!3z3IV3x!EfoZ#Mx)?Sz@0$1cmq1X;X8kxaVwmVD7r!y%PvgT3>lfZpDO* zy!~DrrA=xSCGlR%7^llp_^3F}Led0Yg2D(F5#>W(xk6$V--UEkpU~4m^fW%kV)>0C z;|mlqMrEhP8+FD5w5X!_1EeBv(u+_dXRjo`5pS$OA7{0+w4~x4m^fs)eAHWSB0PYUqVwZQR7pe6DtQ?Jq{@MUMfY*4&25svmM z?w-44gJLf@YbwA&xCqX;t|l?C3A*AWBEEM$Z|n&2opF^tj_RUEFkCJ5OI$P%i|=4u z>y-d71O3872M=+h$Bsg$^2VmB>FhJlfMcqHyZ*-BaQMA!BJgLQdA2(iy^~TuBzMg< zJuzf6%#13YFSSERP)I7_`)|K9`wtHzuD@xGP2JH)9fSRXuGW~)wrs2j;bZ8dLrtfy zxYgQ9f&!ZmGTda2KDev@U1soEDt}tSxqlk!D7M&gYxJgl>YjS?Y2#{>QA~-Fiz94< zsvDXv$)S%AH3NTAFI0)a76%@L0n-`0F8U)M9XixKhKeQW?Ond&XY>@Va$Vt!lx|!Z zR0{p?E0R5zq92gUqU(2iKa6izc89cT<*q`aMDyu|=buA7d$y^6;4*Idd}R#kS2*gP zgH!DZR94JK+C^MuvWX#Gdk;PMpc^x8G{!NvaEHRxdMw<18==>E$xllh7Zy}e^)aJD zHEY#0Nt4asrhEVW58c&0uS6BnBDflNb(g|Ls`lHIp>N;st%+=L+T&S>wcaYo06XBiP#L8GWjFgxG^wV zp%btcMy)<}@4fq;TLkCb0jN$8x9dh5NhJp=$k0nX7K4XW+#V3x|?D^-w`+Mb-eEq-%ojVJKZz zR?_83+k6SOgX9%P;cMYkzUmvd|9(kOpvPVu*nMv9lSmj9)Kz6$USELsMujC_Uj;tw z8z_0<`cxcix+8I4 zKI(7NT7h!u0Qt|=J<}{oq!0ah25W`nxxBQBQ9uIA=1EAM_MasPq0yW0eaA8K>;=tDpiFQ1z9%Gmq{DULq@&x z#-D&NRw^53b(KeH!}up&lc3;S@YGXJp<}kHYuKQnI|21G9Okh#@C#}$zi|`Z#Q@}} zG46MC_piHtWivu}27AM%)i*=3a|No7ZgL}rk1*9avZ)}6MeUtGAOSg%v(z;9mD(jG z>^DtN>$#JgH*dbXo%6w%QJjU?e>-*V=>GcGzq<8N>9k?h4Nz@yF6x8cLuJyhoD;es z$=MXOQJa|5!HAI~kYZhcs-W%N0Zk5YrKKfqq$DUXW-VJ6d!dFgNKk~eLMo|VdG$4n zd0$Uo<;<9IuU@^eL6JyM-2eAfg5uOuPDQGr9cnr`7twLDsUVd|zbs$j+R?U4*#AM` zyVo(2ebbFMql>q)Yq)zo?zLy*J z#t63*Ny@W1109Dnq|OAg4<paG$03~{m00tUPFSCB>ECXO;pXd5 z4J1x9Nt_JPSrK-48NmPmKmbWZK~$Aqk^nman`md9)s1t_`u4l|0q*^aky7~x1C-S_ z-q`g(@)Tq^b)4=V81Mi_vPYV9fDFP*#nWPR89$4?wSf-~MB-#!GyK_+`fS1(chT41 zpjNe)n>}k5y06!Dy?XXC+Ypl2y`6L49HbW-U`OL@=D{9&?ru{UCLQ$(hq{KtBUIhId$=E;t_}*%QYjk-CaG+s8F&+}I?grQ%8IG`*4R&9Sd>j&amW z$N7-KkGkhye9q2LtvC}O$C*_+!XX5hk?gs1XS*%8-paaU_bfysyTxeJ&* z^^i;I>pxci%*&E|Xd0-9!a9_h9x2d5_!eP+=gpmmil-A$H@1#D|Gaa!Q_6l!R;Oa{ zZPP*LE`}3E)WTG(3 zS|oocr|`Wt@WYxZ)yA3=R(zUKp2Gew1|_MKPo5?SXA0}nw9GU*&r>Ga%JK!KUgZFk z4~?@EVuAUcqaeBB>#vUs&onIoR#bQ}F>TKBZ9#>!Dk~KRf?E`wSINoarQSF5D_iQt5BlV9^+Cc$xR=85#ZiU+zmJM2GJ}6#c%HPPF*;lv_oAAdW*>?0>y9$ z6tb^y?z!jKrKC#bDkjP;QGMx0ma&>+k3I(V1lYGAlOKPT9Q!LkBzP2c2KSrzx%6U7 z#go5?`}RNa#FM7m7j8$lP0gCFG2^rab`7Q>mNawbZ0ffH15IDLnp@X21CRCgs)w%T z8g3G*tGb_aju|X;6@PbIL)5tc{s9=8+=@C>bdx4dbhn|}Kt>eRFWVv7E(4b>nzu~4 z&%;-3lS&h zy@7t{54iMgvT1eKxnpNkY_!E_)-P@ts-+&HzmO>4lE0;E*`k#hj*`(SiSx^l>89xI zm1?fzPB;Oie z)F|IVF8>fRWL)%>*Ip*L0(Lv3;^`nR?(3P+D;e1QX!6HqsOmcAq7f)i0f2;z*}9Mc zs^gBu1_kP}p!2#b325HPfQ>5!7UEPi1!|*HsQJFp2aBHd{aGFK&O6p| zue~wM4MJtb-1&1%9mf6;DYnJ&#mFV+$Kc5bonB-lrW56hAFAZ_vWuNZs})33|;t;qp?SrEP&{u!zf>Y8M> zOhw@*eQF@m3MXY*AtA6Bd|!8^@Z+yEs&7&n-T@FO9)&f$A8F~Vk&dO@hyoK3pXaW&qG-^Qa#&Gol5a==%-Q<8KrmufC z$X}#H%wTovHf_v)gWz}q#l3DeXOunb?cpxD=o0srU39keZTtm<`zx=$;cmFGH(?b( zOgDllPdv#GL|e1(X#|Zxk*IiT*X|fpJYCNDX{melH3$@_c#^Te zhLqPA8x*S`;Cwt~vg^t@O#4R(2B8Olu-BQw*Q5gW9(tM)>E$byxo&5jgO2gzu-#D= zBi3(1q!?spB&l(#zt28y``X)~c5#7wYwTD%|LwQmzNWiY>Kv_dNvUdsIe_T*Hxm_| zH{5t5sv+px`Xr+ zM9tA`=0LrA_1xKKpTm0(ga0dMt%s<;jDfF%A?em_4nyN*GTyy~hiU|`; zrP9r)Xx$$bPg3Q&Xc4yOP^URz!aHUpyZgD_jXnq2t=CuQ8QjGiHD8x^jm9i8)eIx}6ayO0dVV1f%G(M=}E9jqIz`sk1$H z!5TmystOTiwv9Uyz|`$GY@{`Ew{n@SLO%uP=Z}ACo%x>MrHr|YK%w7Af7BK2y34MF zqvEMoZzE7th4|F%obL8J3BUib2AiSqZ~aK-~( z*sqeL;{<3mayChGXH7yU1rHfmtgx>kkq+OOjE<}FC zh4Xk;^DO@#fS*pD03(?Pp)h-gIDN=37y7>tei#qZ+5XF$`BcVPbYc7bJcvVmzPO_^ z_(-|A_Am%kfN*&cYwpp<9AmJ24V07elHaKL<4XJ?_#UR!tIYYQu>Ij%d9pr=L7Trk z_>`aj`}44U{exd6WMwCiQV{+-2~Ez(0cLs^AiCW7&$#6eSqwpb{jvVU!xBOF_dx^T zL(AMhd|bKkX`5z3<^)e6dS|Zu`-e}Lt(jS#)Mr(QXIcg$4mFT@^;e@+7xlj%E0r17 z*O!!(7m8J)*b(mAufWhY|IUMNdE!FkmlOQFSHX9PjOJ5X3?9bV;M?zC5psVNKKYm^ zzF7aOQ&(A$d6i4zJP`l1ba6|4DQZLz`G% zNdG55d`k>~4^g+KE~#%GeC3c+nZ)zT{{{FGeVRCJlEuG#G*6#8X-D!KO(V@iioEex zV1N<#XUG-eLOQ*Z_T%&QRhV4JuRMGaY0bBX1^I_nAs*=fBL`!OHXEIS(cD5DL1^bVDHPmLj^<1 z;_oayJ`I+V(MsP?`y!0a%@SIg&=FB^aR6~>nZ#ibNo1gW0u8jBY9qZ{y396exF1|* z-bl8O_0KXzT5wX1l~ItcAVXAxqz+l85NBBm^7&Go*)QmKYajO>T=SLTjO%mrEpR36 z!X>(b8J{~J4y><1W{Y@mHC#8cPZ2vrDBUD%7(DarGj1yO0e<{xsrwZULv^?$1!Oq7 zq_otvheNN)0SAzkTG2tM?{Tk!I7U^G8PP;M-VDyF?3>|89tEf2O3FDM&OGV#tz5aX z8FhZ@>8H5lKW-d)GU|6Si1jd*S`?ze$NxKa7@l(jAAT79&dbo-+ZsgtsSp@~9R_Dy zdXFOwF@F5p4z);TZ$kQjWh+D%GwBL`8Ht8v7@d?p>65w4BZiMX3=BSY=wMudf-l|2 z7oK;4>A+Qm7BBt=Zo`}0huEJ`wf5boA(!Ek+-`M2^hd66SWr{pUc$g*zkauauwMt= zZHKsPAxx-DU0iiRMWqa!l|OKSU+#W`vu+(Q9_zswDGtsS5Fid~+r}=G>T5~JyaMOr z>u^&_SMSf{kv`>0AX;=$+zdoQWAy)?g?{CAFn+1FOy&}P!wol>5pHorN^(KQ-GdwO zzyt1SkUOi8a%ck~w(yrpx+Q zE~l45c=+VgDR9=UOWftIS+izFxOm{92i=qZc+xKShaYhS1fH|3RMc0%o%b%rx(9&} z`T||f%j^Qa4xEoN0$O9s8Yaop?7#y}N<#)PA0PTCMik$2Uw-{1m*wS_KF1u_9IXz5 zX*YBkA9>_a7;9{l)ZM=d$0v*QF?sujf`YR?@M-Mny&*Pz=jFH1EMewLlb7-2rO}66 z+W$V*p4r2v@liHpX>uRKGl@*U%lG7^)Q21e`f#3Flljip-@F`okn&@EQH9UKG}$*k z!4mkSLFxKOpamu2ahX|uAF6M;ENPY^`1Bqy;C}Zw_HZOYbqk!SW}~G{W}m~J-fLyY zkNn^$eVi!jrTp}vpOeIjlq+n%!ickeDJUTC(>}70%%=j1yS(|U3~~nFe`xQupt!Ke zA9*cZ;iGJPeu%#0B9LkO!_CWb<>n7mx&w_hN9u7aWzk-GTYSk-+;3;>0P2 z1m2YMLoq;ceaga(V$JKn^^{XHvk<=cu) zL|pIHHmYV4dXkXXbhFLOE>~!Sbm+gzzWN#r0Q2X~GXyLuQ?QwUO@WOzV4ttcesTo1 zpBh@7eudfMgO5Hy7k+=vTwfzmxzZRyjyeh^i%uO~#dRv0jfCODhFia(ipL>#?bfZh zj!FCNwbx$m7<6;jHAaC7NOq26{~PRP&6|tehg~sJeI^Lt9jQN#X8eLAh|W-VudI;&9GIvd!gS>UhkgF(oN-D2Q2U4cWza0qlE?tq9q#YV{ou!^;PN+{o z*-*zr&3;8y&J!v?62iT2=xsKvHe${1-o3kPf;7uaq#z!LX=WVbvF$e7x}%sYt=k+z z2@pN7lOo%0g9Z=b3{CrV_Ts#(`L)po8@TOCx3@FRPS|)*n9>e-V1S!He}UVbb7F^% z9nCI>%F=xM2R1Z@J~kBl50#LxIoMsuxyGvn6-B8ResAe@AB3EKJqdwB1 zA~2yY6`K#Y6Zbx6>FP-IbaKrhn91qPI~v9!Nw>&YwirxSGPl=7l2wwY+Uw3h5_)^; zuMwU*cP<37iy>kygYZ_%=7rv>j|_B+5I7ZLjckSJ$F{)+8#>g&y6T)gbtd~6!Un3f zkZicx&iPge^7D>=YmY6*|ontpfilz5;*SR`7?PPY_WWVAaY>-sp9NMAdX-M51 zK_)-{-r=k`;J&}}yI<&bs8h%3PNQ?Jb(uj)0F-&9y;~+G1mvYCQ=s1+^wb(_>rBX) zyz};tg1FUv2Do|qMqj#EpM@JW?l;pqS};UUL-$skTEB zIg%RsZ22)hY)#KBPXbHe6ZpP&fKQYKrS-~6&GP%O!tX3)+GXX}S%83)4Rtg=+AfxvCUstsB+V(uk6RiV7HrNW6gh zl1g`ngv62yyWf33HP4*i`R^{ge%E(h-|YS;o}Ops%sIa^XJ!O0I1Hi~(J52N!nx!F z#76r*5fx^)f(lUaUT)H+fFGqO+kOAV?4hJ2G6%_bFdo_GL z#lws1A=xI{>uB-_750vtEZ9-bN}N0dnaF{}88Sj(lksr>vTUcsLzej?iCgx}gM%39 z68_-0;KB>!OQGHt`PN-;UHtf6Uba=b z;FlL-DgSd)|Mz37q4(qKu#uxiifGo6TUc=!|J!fj@W>UGE32%yGJcfR!y}DQ`#7$2 zt^}HZ35wtS4u@pp2Nz%1W6xbo3>_}*L8ni6bwarwlPNdfe6wuf#8oDvp>u&n7A|XG z5^Oox%pY$#@P|LY;`ECJO8Ww3P`Lx7m>Iu;Mz|3#3CkU}Twj(=U+ZaJ}6RFD4ZoH~qGhAM@0RupBfZi_wT~VZ_OSPIVgCUt} z`@0-7#a+ibC<@jaO^`CfAQjVWjyX>AFk}AG?>qC;Bzkw;WI2jd_otcMR9 zhG&bhWzIQf#|!eM%dGf3|44?kjsFrEt@DYsxX<^2ymD9=k=8Ng?}m2j`07po~H zSRJ_)BM+#(8ZYbll6BUZ$Cb4)QP0VYVVGQJD`J0tY_WK=9De;8?%SLI<(7mit+XNz zCC0W%7I1zPK7P0r&t@-{Ip>@m_wf(lMYC?BMG~&EJr5JU{r21B%l0LfT%xRoNfw+XY*WD7n6zMh|A7e%J}>gVkFzx7g*$oQ#)Q!wct(;H7@$}R`Z+1YEr%F5;lt-Z zoWX#v{ISIqR@yFvNsbq=z0kj~YU5$7+8j4_X4!Ix&o&aM7b_ATfBcDZEmlW7{LmxV z9_%&w5NApFbsem*8H>I)9iBtjMIOTo^qa9-h99ZSjpr?H-8Cmx>oTx!uwA${(s1~B z{P^)@b*u!HYelfQ{*X+wQ{4(AFCZb{1LK~C23zJ2*?K*(t+p#9xIxIifk~)~F{#F_MR`#B1{TsJ}mvN1N9Bso|!SxV>aGYX_bkoF7%>!wC;z zKMse5-;e8nt4L%81^vtM4|c(X>~4Wg$ro|&1ao)wt*e%_TW3d#0};7?F~x~3Z4>A0 zh@}Ut|2Gs?aFHzLJj+vj{_PTzp@xc$77%2>J>#jf<}xoL^ZJ1Qo!(O;bq>G&SShK0 z1=MPin1Suw7n}!l1Nuc#Xajc@5@L-e{O|HW?VL5!V*vdBFjh3~gst0#;rHN|u!2Gk z$#&8ZLVy+8MBt%6_&~&dMXYCy&q)H=^?x5-!=we58?E2vs|3}z(?ZjuOKn9Q7rD2& z!iK#5Xannq|6SE86HveYNY*9lmX%3lng!xu89N2n6Ln-nkr`T*^&2R4wIK}8;IKrL zH}cs99N_JdKm*smNSL;$ii;i)x?H8`ra0QcP?_2?7zVCCp-1Gr$`P^XkjQ_3m{v=I z(8m8L=M3!e`<}4%>FUZGP}BN9i)TKnJRpYv1hhYCSkHm}Cj|Xb6=(Vd;807MAcSun ztzeA*a1NgN*-p9>jU%R>iYSX>{HZ`{#UCnV{* zDlm%7&_=)hkoVs@4NYI4P)_Tb{q4_PXmB>1>_)@i1ae5W2@d%K_g}N8kxx4ZgUGO` z^?!#pl(q1lEuM@(8in~(b8jd>v%H)aZUQ`HcEGe)dxJN)lS?Z6h=iEYfQBAOr3%w2 zEa?~75}AQePW=bV3W_T+L4g$%*r!##L=PcZK((Vj_*JRQ2?e5{%4XoW0aWWqM|?jCWs}lhYEYPmsDT68lyW_YjIIt-jHC`Y26Mis zM6Y$#zYV?7cJw<*O{;#omtjClkAC4o^dgqAUx2-NPsVl+{9@@TicT10sYv_> z-d=XIV2b*I6y9C|qJl~XZS~qz8-BrCBfK(q7OF=DF18E8*&j92Hu2I7y znOgsgLA_%`M?>oeB0ad--W~$4`u+OL@{|M+;pI(@r_9@EjVxkU1J#-z~WmRv2ZuroU=PqB30ut7!P7 zwoF1xR#?3<{lc{}FtWXlt0cX@u-q%a+QgMqKFJZBLnFeG0OG-eFMJ!c^Pv*8|2wdz zt>(5Q#yKg4chLE1Q2aP>eUJxCVp#5fF0@BvRziIqAVbtQHdp~k_+RuTPniCpS~-iW z`6}1d4>(syDqZ`+t4ivtf5PV4hN{TWc2}IL6nNv_d^4Ou@$aX~ZoBMOcEvVN+`>oo z(rJ((yuAX3)GwMDIYke*al%Duc0vw;zQ*5 zVzeiailJdukP7wukOV+Lh>X9c(pd!IFi8i6IJ)wr9FZP^HFH=Vi8Vue{~+%>y#HtE z;?$(S;yh<5lG^^xU};|?3QQ~Hl1S}&PrLd%k}Jnh`YDJ)L=~fu9N7dRaT|o+K;i#P zVg#uCkoA`zh2(Jot;V)gWeXfp;GMma0B55b{5j-HtMGr10LW53h$yh;gy_^Zc z35r9oDtpsSH^aM`!7-?sQ*+^qj((@`H0lb#)co(3sg=e#z*i-U4Y|>$;{T!bPv!Rq zRJL6&+u+uQV-tW`YrTPbBr<7#${h5)O6YpVc}=Jj^{{*2p$fXH-$g(Qq8F>zUsBl! zalwM8oUV*%tv@}b&xic@Z%1sm$1^Ctj1?4HZ?|2jlxkRAT&4Atp(>{p2ajKWnO$W7 zV)d`MzMN3E5y?FK7k_`aNT$`23^lm@{t(Q7>%nRDq7d)@1M<@NlQlS<_o0mQd~{vv z3H{DVD$65r{m$#@f9>~2=nu)30pkDg`(sMae^NbHNBz0#0yl2fAOHM?ud^_nw)!_5 zC|G5w&jUcBzOlgw;QXQduVXoR`+uKhaW!u_6E7y1Np6m&m9EuaCAogq$)TQ7M$QkE ziS+P}^n*z{FP#PTA%^*dCDJ1kxssDxP zA;Xv>na1&#+TP{w537AtDO5|wut@I2*%LBBffW=y%j`gWtWOU`MTNBW0Yyxy{=zGI z5CyC06u9N72KXRx9;W8R*y1VkHFPLc0ab9&I5O+E+Bs-oFl`KTDo9LOm+$kAS~dcw z^VEr%3QQ4>Zc9$%>U4{ztFXn>YWTW+?@zQVC|sO6(j>NmT~M;3)#&6w&;U{)Wt}!a z$izOT4)m17InM$N_Qr2eeY9v{p&20TiUk25cAU;rH#Sm5NQ}ts=%B4wg-DyU~zoT8*aJ2%OGSqpda& z(0^kW(uOY;QZ|VUQ)3);I!~RLslXKBXitNz>wj*XS?KKevpXi4zp{kZcz-pn+@*$%d*T zAr08mU6tPy2pJ-GuOSEP?*t|{+vgzVwT5$GpKKvifkP0d5RtPBImYJ$xrE@kIDkQ z`CJ$v?2tOG^Lt8}3@ED&EY#=jAoGl;By*Xate_J1A*`(8*)r1)pB`sU+6szBj&TS~ z9qedNU1nBkMyHM%RHEHSK82bj7wBnbkvtISOaGnUAK8L;pr8H+Id?nA65PDr3$y8$ zO^s2YZU%HXH$&trG{P6hlF{ci%o{~}ELEl~(nlIFbZsS`;#nj>LTR%l&j zz%AqDp7Bai8uRSfN%(skbR%$XDbGvgw(xFfrght*o0twuWQUEnC%pyqFxgdOI97Pd zGh&_qbfZmy^LpArTYJR1qOdFIkZE|mz7F(M7;-`L-kcdTKne|tuYMU)zgsg^!79=9 zdKz*YKrMLnf)1I$O2$e|JVZZofFpax|4d?hcin?6p0HK@-FM!Nt>-a8aqt1!u10Gb zwd4jm(EZ?YBUJ(?>!!6*WN>Voby0N%h0}TJ6x|efo%J@t9{K^0S1fF(H4|D!40M{% z(rk490YO4#Rq7xIbSQ0bq(Mn<8N_HKa*L;{{)#gw*2DzGo>)P#^isy$w!UlB)JsMx z$xs-h4VHS-N+7MjyG9U!jdmo2A-W#`scPcC!LOo{95N?gsk$VjDhQpzLY*cic%;FI zZG(~-v7mCE0F=#WVi%az^%pKazBK+Pshi|!Wf!xoD9K^(m7xS4Qf&e7PC#K!eh z?_l+erxcJwf@}MBTG{bP-N)V@gOxI3Br#S9D44V6ny+ktb$n2%k_*XT+SMq-nIPgU z>tlN?rxwosNLi|kZvCQ7g`IS4lYG!*$1@-iX>86cwqL!HBqhpH8Eekvv8^~x_%lVN zh@54TSl9w!^JYU0QqtfQV1~6Aq5;uCtI}F~L@#nC0Ge&A94?$1DX0gbnuN%%3}XOo zyx1n6w#OEmZBd@YwvW5*jzh9{-WBIJaRr63KtLly!4j)xjRTyDTBihv`h!})wLMrK z^7;qP+@xx(Jyf#Bp+9AaVW{_yF0GgASb;O-<#smE|%ffWM>PA0o$ug`u^Qf%dA1L#WrsieQQiN%$}f3Wa%l8bw!| zRmv*L5_uVF4~>^2Vj3kdfXakrnVQcKrFP;$j6niX+aqe>I8Q}nDp-~YLs$+#(_i6> z_F0G2T{*R^xg>-D20EycY1YcxdMz^B&}oiV5M@)EF;EJ_wEX_?K0#K>i1ugELdcV{ z+vsVg(zbQG)#l0N`fIN*$NlJdY>9dw&Y-{w3LKIxFO`x!?IMsV1BT1A6ySKKAV>r= zBStfWQ0)m{ImlBb)YxH?vhQf{`a9ZLD3j_!qpZgACqhzHj>!2X34r<}Fe=uIf~W6k zM}DTL`kb_CGRQz>lO@jNq+pIdt`-Ebk}zhlS|+{(6`uUO+ieeH3)wLai0uAIs}Y+5JtVlcZ>N(In+hjVR3Cv7 z62nmMA4cmeqD*PLAGqGIL(xsW1O$R9{{DcDrm7;CoY{dOl1gI*1-4#4_Lv`*yY9dW zij6QqalnDW?u)L~2+-nstpy$V3=GLb`8Y!*<32Wij}$58e2 zAnCteab@}Lkw@@$hy8wME?aEAW%=Z%KOyBLSU6khS_+tCq9pVhIgBYo)$dtJfpJP9 zV@aIbLW#?eXdMEHsSQW~f>R3t%I`vdG_nYkC`*-V)omT1&6H$)2l`3t4eVm72~snc zsB}^4P?=7FzC-=g;&mFr21-;nlYl`W3HE=~-^hdbWQS+fO;JiV02UXB+Xh%qot&Fg zRY?mC-3AUUGWmx>!|#1VH$A$0V@+ z9xNNO1nwnIow!W{E14)IB8BNVNidbc7yi`l-2>2;+e@C3cQ6E2GKdD+)hTEJwY&dW zK;F?Prfe#;P#~HRSY$TeVzct}Q%}qJ6T9LJ3SB|r6ilgu79c-hCqMw#=F5E6K#gWC zIF%Y7r9=ss=pv3m>z`lX^9LT`5^EM^&v;7S!4O!9y39wO92^H8WM#}0P=!17bvOs* z_~TE&A=!Ec1rMv29iCiSVrfLQf<;^+MOjM$YnWX-Q>S3cFhu=jtM&P$edib_*W zQaAMwx&K6Z)gSHTEI_4fDz(rcGH_s#c{jg5IJfiGq=^`harfs}>G;#&l07vr69%)wkC&@FhhT{wS5q74N znBv#8UngD{AX7G#TByiKQRtVh*V1aqVI-g*%oqPH{U%Y2XEep}hqiEH73qIlzt`X8 zLW>5izuw0PVf{h;E`ERToYXVV{AqavXPR!f;f7_8J@&-*^-GCFZM?pEseaa3b6AOl zessjQzf*pXv!&j|xmWvO1=TiNZ(U}bX(p8=hF3ndS%3qPQ-%H{p@or>R^;CwfF*y0 zETfFRl4T06M!_SQC{dBIBNI1h@@fD3|LgCMyY9ND9CPe38sZU_OQ) z#3$|b7uPR5(>!Yh!W~K94TiuEea)_X|V?O zjz&5N6M2F{PFU>+C;5^z&wJV`A=HBi(Si{V=yxGVqn{&xFqXCa^~!5-FdGiZ<`z%b ztK3Olx{N1Yh}5Zuz|ytW+4VvLBUuo|7Y!o9<#<|s;1H$^sg@{B1FOsRbOPM8zmp-Y z^%ubgaDpsSVT>7QbEWcnRfrH-G-Ulz8Nc7DFyogb$%?G&sk#;A1Qp+}zuhQ-I)yVT z<8m^$R{|;Ml%mpkS=-f1>K~WV)}J_OaS=A)f5!#0>Wv~)=(9|=!L>KN%UNpwhxJbB zm6@u3N)c75ZlT&URc@aJZT+M6kPALkM!exV!k{o;GlmcOu3E5Oa64LF-dcur$3K#Q zE!3HO_e-}KT%uk}u1{s?eak88{lg)roN`Jz@BH)1I6O#w?`W)`z#+2|hSKkhNlgRF zwi2O6Au~9Bkd*l5`X`KGK>r}Ww!M-YOWR&jazUaEWY)h`Mc7Bru>Bak&3pYX6e*%u z6PdC~AON=;g;t=@^tHdR4Uh{IJf5nGh%bK2KVh^VRy2&S{W_|D7Ko>3>?z?N7TX zXNxWHL-t8*@wD46WtUxc#~Bo}ROU|3H3>Khp7E5}n#Z%HJ%7~kU$BV$eXEjJo{m~8 z_KYtsNF37T*Rs@~csN-h)_^Fuer+S8hc=@aFr&u;Iv5P(Q2s8{WW3Gba{}@7f5-+o zn~R~_{{50%LlT>e+o7Jt}P+f^{`gi1}#-AkSSFEr}2k)unA{W#-%1^&l)Q0sMHQs zNQEnN)lWLwEj3pC$PU!M(b-DVwj@D?SM70iy97yF{nT!}NTU6v>s6Vp0FT4*T!!8w?zW+o$} zQ`GT;oLl0#?s3@3T^@e=)1Q{*ms=M8<|maF2J}xVS6_W?IsNq0$};a=rhNHJUnw(i zk|G$AYpNj-=7I=hNDL@-{T~$0MnEHvfD{C$<&^Cj1A>gL`L2FiCs;(${~aKKbTNs)$3m+vi~zMrrVhDJhws95|SZ8 zP(+4=Uhp)-fQ{#mI{phKB7gpm4q^5Up*}PP-UVqb0M_~`UhR?k6OQ^RL}|SKq|N?U z8kNwEJ0pmqfTR!-q56T%7xh8IhYUt=;t>i-L z!p%JKzvbbFXl5AZ%ZT0}7*7->#~ zMkyNHjz*%zv}h=;R4lHk{0JBuXnvM7?Tq;GPFPxgSea-qaAv* zArV6r>ayWNdkPbEexqeD)o86@nGgMF;JoYi#?NBaY<8GX-t}^5L0myG6u3cqU z+P+-Sg1D)-69X=C%*kDuEHg~0{kuHMp|RqszxBarI&ATDS~>UJb1*?Mt{i#v_sUXB zVqA}gt`f3r_np_NQuX_ec;^pZ0Gfv`ClF!PPUmGBW(93)frC92Q5HF@>c`_?41mB6 zbiNkm?Jwd?m}__0#7!+<^-rn(W;CQle>IT!Ae3mvi=KAv$mLeQ>p?2aSwg}AISKxe zd3=(BY^d=kOYl5XhTM0n48=8$YYRd^U$c!wuM6(bIoMN^Z2Z%rKXYn+^UZMv1%Alx zvg>YTH=IE+%Pg~LKSX7`{j(nCU5clKdH?bI-q7{WTs2>Frhr0iAZl=gBXnUbOK=3z z=>9}FT&X&7r=~*7zDXHzRd#r)NRl&#RC{h=44`XMAlCMiu>MH`VM zj2LqJH?7okDdUW>BdVZ6YG?6sCv|=yjIr9i+9B=~v!$-Lul-$Y)l~&GFc%UvY_P`E zwuf8lte%osxH}>`#n2?e1L;yjilA!*LaP*6Bp}27pY=Q3>(8>Q=%!9vFB(F;4>+|@ zS)?$>P;c&kLFw=S)C^G!uI0LGuPY~eX>ldjZIx>keXDi-w;(sG)#!V+MoELLS zl=ufg<#ntvMKTC?h}Zkw{{9d-F03|0Xl^q>2ugkZUG=jL=^6!T?*A^wC3q_ED7aLm zewb%a$O?+#<%^i0*xn{6=rv`j`rHn3N}d_(;31wc3w`bH+WY98&r_#yy`DOr1`5p( zHW0_qZU1IcK}pIr*IXkn{hxjI*|O^{yOiyB*rBKX`S}}+qMSxRgV|u>BoTBXQyPa> z%0Y*&Z_Px!~lq$u>dp`#=1?y!qxtSwW!_6f=n+ zZ$H=TX=o9dAsFDC_mZ^-nNXQN?>m>LF5mV0eXn0XsWWHI=iFqxE72pL`Yce5mA;lG z=Ln>F4^e!hwkarsvsD@N1!Y99FhMnO9;OVW^!j@$2u4mEfC{_|hh+com}AO4cioK@ z6n03qsGYXoAC%<&cfUspoQUexUu|g6QP9l57EcNeDoU~X5oQId^*31o1gvEfOa=-X zQ5u_!rY0()?HuX=6V6drdr)PHoYMdho*_~SYF>|io?L#_)i@-3wbjdBd+k-0#xf}9 zA-6~kucMT&WJM-=0i3!NHb*_G@JuVD5h2a``QPAElb&7^V!UH?pkvf1~~`ow{0iff23Y7wW}7 z9uOrI(g0mrr*7d|GMHZnCVl86)04{dy8lB(WJHvfrU2{FAJoVU`T}AFD7BZa5leeB zqxDDy1!B;q9{nT=j>rfrqHh1oHX3o_(QE>QY^OyU*T3tSQtGm-{xGLKh}Gpn2Kt0j zQC_`5mBQ(ez61THRScAT^gokl7}uvz^O@k>x9?0psZTu(XHcAT4z^+&7h61W6_i~6 zBFxz_oTYxI$pn--G^l559B#YKZeg6-6+Jer8-I zVcFW+XGxneLyy0d*B2@`vE1W+5++ggn@~cs4PG)B(Fp2t!dSpxY8!z~F{-kpRw%Y( zgflCh>dO7k{O9dH#pe%~mD8$Ua)@HS5c#exN`pAnQDCe%iJ2x`*3_R@ZsH{8Vo=@^ z6BImyVi#=hv>Ug0nsrvt@IKF=F>@-xd+Uu?_Cmeq)ws^WmAR(0&gSba$dzXFcA{gh|1QV#w!UU2GI($@VgP9N^i7E zB(XwTcmp&2iKWMeg!M|Z{s70=7Ahk1uJwlmWNO_gju*Dugd*gcVWlf6xnC)W+a&LP-_c;#lGt6g%&NSSxudCS5JFI{%7jOe(Lv_C}fT$}8CV2-_YFAA!H&a^Ua<7hF&-{N=C8 z=rNtWK{Z87&EDl3-*zWxp99xHlBjG#~Wz6ULXvG<{e9uj@r>Sx?6<8T(iVr4AsuIJ>q z7d*9gLy@MxfgLDbckTGkSh?m|EKJ;LD>glJ->~qW^=NpXT&J^C{;Ek4n zZ=XPYABT@7PJFY>HrwoF!37t@*Zlgj{B7W+ZC`m6TUCuHV`m&&M!_c(#gzcByfmRq zc>R?!9Bn-s?J?81nbGe2suNpxp{@V(!V75giDlI2QDy9yndCrdHN%ZT3SRcH7hil) z`poE2qjApR%&6NNQjb@$75Ku7EL>)XZC;6Ip80pV`<{D6=OW`55#J6U?!SUewoy;O znGuga@nqq|5kEMcYtFgL!V4~dwxv(<^9L9#ygXq-xfd^+UwZkKvJhw&fKM*J;_`CF zX{Q&iep-M14Y0-29%UJ9Umw>d)V}=6g!0&C1fc&0iK=V1D(9$$lGt zJK{UvE`Pk_4>CdVMQri5^)^^RF;;y}vNzs%9TOoBm&YD|LarIMBYiL%`o3SwQcFbC z&mf7F{c0cj`nPdx;Q3Q!4IPVCL1+E0&k^nur8ZqARADA(!2bpx(twJW93t1%$D*1m z4{7w0H=EM+U-?`53KwofTH8MToS--c6BKu0g5tv;`cOFlXHf87Rp0RXeGA+aBSIo$ z1G zLkO+m>=xW0KnF1<33PCHJX9-h#Na#d;Dc}m#g%1sZ1J?$-k&J%c@Ms{4SA6fmR$0_ zLKx{A6)0C5(8wq_US((;cDQt5%Jfb*$d&c$ji=Vx#ia{oh7 zlvH@-t8FB(iXgFs3aUBB^{1p`fDMsF#`}cX=HBprEz%`Y?!mZ&bdg5rexy==7zt-N z(nRE;yFqnSv^KchE;PKYZnjP&qFi{>QtB4(vrf{|w>q?>#`T%l!cqxY?Jv2;reDbF zm)BNAWfFg5#!zr00$Oz`oz#mcX=uV2wAHgDw!aT5L#h_u%q4)1R{;}}N>}g0-gb(s z6ktJtV;W(Me0&?!khv6VELJOnL16rN50=QN9>jk0bQ{%QiPyxPa_TAN{BzI81jSfP zP#j&B@KPfiOLWxO3k>+oo1Nn^RdqP@@V8L zMS7@+OjTcVhKhutB9Q{*(Dm<7$ZtiTX=DFO-L#`5*+PsIy>DjH z)VjeE86iv*(f?RI1|NTOb(K4DHR8H9n4sE_klHXbCD$VqP!ZeqkPE=D2VCmau5W2| z4oO!CNHCD*Qf`00{xd#AG+bGq<%Nk6L${+uSbZsYXP zMXBw2^vgI%nc#PSi;Bhuv@;BHysFgLX{8?3VX2}{H^3EbEEq(&&MsGBgS>+8My{Xa zsL;Fl{h^vVf?ckGbFsAyC9j0IT?_%2^$E2KB`v!gk)eV@aa%+Qa$PiuGG6nHOSujc z6vrQbe7WylPEZ^o+sI5e%=fv^e6>x5KMNE|Ep4uYp&J_%mV~G|U_(X>*Rg_h?hWtP z!j)KEK2rq`fiyz*BdOo{?+@ddq6(LrEpyuxrZyyN!zjQ~!(~ZMB3xc*Rd@}NcJJh- zS1?WnN$o`ngxX{jT2h60|Dr2Dt*C7kt>YX_TNxSCqLfamLLzBs!Z1N$XHX17U;NUS z_bJ*#-xKPd{RYa?Sr-Q|`Iz;-xq;<*H)1zs|b8#gQT?_4&)mQb9iV7TR$Tgr(iodn-L zRMuK+t+LMAYk}`3`B?T9ykIBa0`o6`?R2)mN~%p{f`v7D8!zVXx#ynp;~$?WXVtug z8{u%&fA0C_E?>vw!a?}pmJ>95A%EIwrXTV8-VbU(c%8WCPDeJ7W zZu#g(KU(IWe?IBH+Fq`OX>HeQuD-e){rw*RGO4V*^2+7oyMMf#_>-T&7q>$0L~J28 zeVJ#Txy$}v`?~yI8$N zIB{aR^pZ=<&(HdKnFkY7AKl@jW#f%E5*aVP_yXGX=<>htF`wHOUwpB0LNs|Z42ArVS6B86mFC)sHd;VF};VU8V!4R2=*uT`b1e^;ROZ8esshUJc9x&C@`s|XHaZ~Ni0rKOfHxI z<%)9AuYZM!kO#_}Z{U0ugkiWo_`%cKYp;VX$2P-6+rpsqJExE!s{KMCsT|qJ2uUva z@FOroXp*QLY4X{o&(TwV>vbuE<74EFrD;M(G~BJO~3> zx=oqY&g-etYOHqLeV0y9+>MEf4`Yj`0}edc(Wa>7mB%6zBmPHTKDcYijKaN_xEi{y zFA~zSoe2sT*s;+KLbYobg9QG^#p@0Wt5U{QYU(A^<2}sWNw!MS=95Uvb|nr{WJs4< z$`&1T(81+OOi--0>S|>#oI$ZPCnyk4}XTZ$S9Zdy8nR=W|F%0E_H~pjA|xxmDr_e%UPmYj(*d_>=sm8S~<0p zCH7Eve9=<#eFpZ&&sVew&L(wvgv1FQY&*kr&k|fvfvY9ei|Ic8N=YVR1LNXlOwW#6{6U3OKY7g2ds6+?-ej3VX!@&51 z{wpJQr~WRF?ZNtW&`GG`z1;#)1(n*eh-93yIub9vPvD(#LaY-boBAunV`^pS zyGGn1PnELPUwfBe6N$HDfI7ZVDK)mA#Z$1c(lV!V5!-+-_7IT<0ouSi)&Af5hU7J< z#Vk90#d$7jQ}vVD%-C`>Z1IGz)^@=e6ua)SoBW{AaU%K1MGd4XJI4bg0&oh}g~4P4 zthoRQR;eXxHfz2u5ax1Kj<=st4;LEW^N z&LBlO-_MDr)Y>D0Qs6YaD?&L2d6Z5iDj#U9FN#zX6qjgd5bR)N_tHN7B+h=+a<+De z`U{^n2P>9+F({J+r0MrgmtO7EIP8hacHksHs8B6y>`+bpC2gkY%NsRN*8iCkU`L)) z04())^ng%BRCLV^kODAp|EHc{iA+=H3I%Z#Ji|6*Nd6Ch3ZjXWwsZe?Q9e!L>(xjwK#)< zTRh!&?|tQv!!SXy$!7Ah$Ivw~`Ps^x7s6yiSHF}K-UUbX02q+h5P=f*Px%_95YVAL zRBn|nIV)y8m4f=&yE4QLNOFPJ<{pC1SpGY|Ke7(ZXNan*kb!zl<@X2aS!2I1R!LTg zNQhxD?>M2S$!BhZq7X}_!>7U|CYnUFLU{y$6%@Y3(=fcC#0rY-w+4c*scA(D0f>ws zwNzo!9^`D8z0H{_Z3k_Nh|}QLD8GLH?QedI50cI+k3I1?CIrTnC6-tMlMQd;0N8uV z>uQ}DuhMR_5wzSNi=eEg9|%Z0!CWx4dyOUp}G z4Zsx=GhpSwY_rW;7F}d~`RacAl{xW}`)W)o9CPe(STXQ?d28a^Wr@WXFJosK3%ei0 z7yB>cW&EhJ>gubPPr^@&Ew(5m54M`N=H&91%P%j7VC%ZKFv+pVLJO7UmS4X7`SL$w z!e@AS>4g_zClBc!QC5V_M;(1se5HTut^X`P{>hKaop;<0l3``ZrIwV5g?sP2uRM>{ zDhp!Oz>YX0WsA+XD3@LKCw%e$9hrFA0;`0-_{Du>w0`=@C(7=-?=BMs>^EaDQS^%o zFDUcQH!n)lm2=KHyPSRYIpx`BpDFu){p)4pO*Sbrjvk{OEyY|_^Sg_GCkIE5#wwTb zSmiPUd~o;O_h4Yh>Km-2;8s`T#?2z~T36}IF$5-UIc1J#qI-Nxc3C^l;FXgm4(P-MF z#}1z%Pf9|3WJCq4qAMk;maJRz&f_UoP#lOA6jxz_0$V)oz4uBo?BEo6TGN)vOP;zUIy-+!Cio!IOba7t5QLbTah`0`lsKS&D%XFN?YUNeF4ex- z^-*en7bB_DB`KD5OePmp?s$f*+{}5<9>*K|8(413FC*~mfCIv2z(W!a2}7U`T9*5N zb#Ij_#1{40Rq@W_sbb=iu7n}2I~6KVTQYO5GFnMw8-!G`jx%1nK3HOiYfy`bwKh!Y z1qm*qqbI-#USTd!A=yW4E{Y!1O&+IoKG22G$hdIRtngD$!5I|iVT&jHFgWUaI3)W$ zONu#QSOj9d-x*hj**U_fCga4OsxFeS{`DT_Hn_A(U)`Hvy*e786kM81 zcZ#O}rR#_Ev}~IDzmQkEYR)woBJ#wm@&h`P^eRRW%WfE;F>-swArv?{XxDoAK@`)|JamT?BfuK2o8 zmt%KCL6&+x35slF8YbsxfKhNBPZiOYf*6^0DoA9TvjFfw55I=iGc=~QO{>vMl6pbR zP+8>j{#V~KHH69Nq2R0za6J$B0~`VHx&)wB0XFHVkyJ~zh4+^#lsSfJ8GlI^RS^A^ zvz6FN8^l$J$V8Mm_?0HNEqDQ69ZollTQ!U>v(G*UNeAj*?Qv8qTbVp|oTY8j9jvpO#!snPkSzqH@(7IMVNr z3|AW*;%N)*HUPEL-hBU&_9k0>5D+0%V$BJni5%ytxL+uu|5X&=?f4^vfsah1K_#lR zzWcfM^p@9M%0sd_L2)l8C=NaJP+38Nw^2>~tB*`ZD@tsxqZO^GrJ8S<+ES_xoc(Tp zfAq_AsHdu{EhYW59v4S`|4^^@p}2;E!xzyvt0$O63*{B-0wAQZnbE z_UejvoTqMQqKIr{oLExlXn-gvwextYRBb79YX5%}TRiQwLT4|*f%YK+d8G~(`_yzvc_<&{KefP&?#J#B3i1OLbeXgv%);d^e zF+E=7|GOM}?6G(`eUD6}eC9KsE}LLNb(UGU-PK@AvdJ#TWW-^I9VYh8h{+V{+i}Mo z%UFCw_ODxSEyo>qEWW~jQ?{x)@uU;wQ0HN||9|_Nhm|X?xDu;mUM;)rvU}NRqm5+s z!X0j#z(efWbik#fO>7nZZnIvd+pJ&t~`3=W~+PF6B-GUR5g9695RQ?aVz*|Ok5 z^OvvS5c9RL+UE7wUn^hu!WXdm< zu*K7*mtu=2*t5^)zliH=YfMDUh@AL^a-p53aE9Y=`0+3QoC>)0F+=*kg_@cX9>A2XO|) zLD=FclbL>R2f3=r%FucKQXpg3p^*MkKeWow99B^9GuAYi1Ycx2JQc1vFL#>D!NQG# z|ENSZzua;6hYi6;9x!)%ZL%gv1(xTV$cs8fwNSQ=pW9~#O2`uwtChX?`XpvI`K4_u zT?ji-TVs}}(v&uefElR23+w2KVq}ys$>WXo{s$k#es7PJS6-e_W|(0HS&FvEVvAx1 zeJorC8ya>Lrm9~VR1NbC;Xo}W8whArpp+gVfCM($4p)?|{||65M@A*WPBpdisPe%$ zbJ7|uyZ=ES7Z5Olglwc?mWZ5;f%z5rlJwOHua=QmJ~!7qbK+~#StwvosY4*B{d%uk ze`lv+gsJ*p$*dA6$dGA8+7DYg=J>;%a`x+g273$s8?*DW+!b%%WpER}fQ$0z5D6-L zEFK!hFSWP%w3JPV8jKe^k_UCN{4|f`n@6}JN_H!c)Gg2l{ zZASGG#+#@7$A#@0S>_f`=iw`narn~XNK8;Hg>k+1KbHo=%9Yd(1$RD@=113j{{c3XiKAAGnTlpAeq2Y-ZGNZM?3Z1IF2pjMtUU4flk!C!m)bAB#Nx@OPJN+Z@c!fWQa|c~|GEB( zb^Vrl(L85{yC9@N(9t+;`Z+AViZ3MjRpmr1m*+j4pEv5^k`zxn0pp+9JR`o+TXM;z z@WY?aBsGw;wopfZb6HneqE1}l+@6};C`P6#?BrUFJ>wtg#X!iKs{c*6seSRq|Huo8 z3-QC9TX3wo=9*=tCZKN{S08?^!1HkOVa5*UU-YLH$laxvb9A$G_{Yy|x^G z!U^Tx`|mA>9)byqO@~-Pq1Gl3_35Y4RLlgv#Q$Wz!qqU#lMa6a{g3u5ehE-l%5jSsO1}y1^?##K zMTgMHWR;c&4)WS2p?;cf{%Y83U#|6cu??ZPs;$4$aRtSWJMD-Ut0T%j`|N{5vbRP1 zLz_1bv`hQDI8TKgg{jux7=o|zxTTsZ0b>SNe-XBI`q|IV!q?`HmG#zLr+n&DpDs%* zxrFr5`|rCKTPdAVuDSM_viPD)loL<<3BGQhSzhYihb^1-+i!n4S7D)ruy_9g3zu7O zy#;&izac9pHpePcnHYznr*K{uw`98Xl1t>P`<-{*rR<2UjTr8QoD+WhqjKG~*W=89 zxyn~D*|Fh<8^FNnFu5_YoPPRG%TF;0z~A4;AA3xB--_=m|N7^@u!7z~_{#sea@NnzD(9YiuK0YTk8D&9Iph#In}X*GTyn|9WfUegw%TGV zIkcQFh57vbPrT&+3SPp$_#d8+P|8UspNws-I6;9eo>1;<|BEp}am+D4EHAzEVp(g= zb+B^bNZ2zywtafNT>00(7QVRWc1nw5<;0JE{9~NgF$?5PC(kcjb#NY5tvriK7_JQY zH2l6K{BE65d?N*G|I7dUmvY$Qhhde$tC&cduk4L4>o?tWQ*7-tqCAT;7Y@d5CO6!8 zBOZw+mxB&C2on(>!TAW!Ve6{>Wm~Jc=b5+ckFB|6@&GUSU&B^dr=5OUIq$r4%Yq9p zQa--h?xO#vShaB3pD!z1F>vH}zl&#xG5A>R+;S$q_Fs9WmGCwHeC0QoQ2N-%cEakE zy|8LvWclo8K7%&7t*r9?mCNV9@Oiwv*X@w35|Qxz+r{Vu-~86Mr2j3v(88F=`a)S_ z?KNS~>#`DqlLK@(U*ey9@<}*X1FLzkjncOc|CVeO_Ufw>$|pbdslrJG_T`^oo2%br zCCdyWMwV^2$7Iyrd&=q&w%21=$#eGEXP007>Q}OAWRs0QQuh1W*8qRK?1XEQlTlo0 zb1Du*e-E}Y8!=)yCNJK=7GOUrfB5|$u=3?UXq)x1t<;`a;jol!e|5$gr~Ul{xMGJZZFu$wS8!~#wXUEz6cagDppWo8 zkgd1brhIIto#hM+zHj2h-1E<4B?q40W}RjBGBYNw+WyzQis1-?{QXp1-2w!n4K1{9 z{B;VXlI^+w*0=pC+*#TVY>ESkaz|gc9Hh*E+Q!PRIx|dYln}8DYA(Q2C#mJuf_F3i z7MZ}}1O-=yeF$4T9e5zNcxv=hZ`G+*Xt{;9*0e03OyfW0qa`4Uiab@xwW#p4#ZyaI z)oG55`DP^IOsR>E1Y@whs!)(i)|4^Lid(4?Q32(dqON}NnNdd4_V$oN*&ebBtR_HJ z_mEBw$(9uqYhVS%-m(=)JxHut;xZ$Z3Tdq}%y}Ktrdo1X@Uh;>Zh81)85gVj>J7{+ zUv>4BnB~1f_85E}`|wS~j3)Pm@SF=lOu4$qRcFDnZ~VuTN>H7C<*OED_a(V_A(sl{d4q}8&^A;hg1#JZ&Vx@U;`J1bKLXGFGNzr)37&{&Oxm=WRt~^6yfq%>MAmdE{Bt;yXsv~4#~z6@LhMo7Ec)S~W?uUrf82A+jGJ!08T)Q8Dqs1rW7Jxw z9Bi!P6{L#b>N7J=z|1?Q>YqaWRhY^IuRpUA;I_K_NJX$qHzljb<%CFad+X_yl za_aMJzyA9B#I33P+LX1d8ntS!vtr555`%lpYR&fL_>b$bVuVnm<|bBUw6a~Vr;5#9 zFkwh)uZdctGTP00Ea)l{LBXkFLF4U_b&{3A{h`*`85FXD;@)x?wnE|F^Vod8?>6HV z5;?WK2(JB%NN*4AM_sJ2lL~*ejN1CjS^3$JS!y@Os-F;#2Lg#ype ziBtIf;eEpEttpwk6z`7(4kRs`(x1+1XJ1ch7}E+YA%>QSVZHO*In zGZRA_R0v~grJe2X50^x01TP}x?>D~8e+BPE_<`Mu?^{V;N|Wd%ti-$XwttoTaG>=3 z^UsUzf4+Fwj501pvO|N6c!wm8j{HB|2Qg+*Q_p&g4OUvQfP{#=h z>fsB0ZVR+4CMb5W35r{8z6CGx50`Tt_`I?<-XHLN4C};pc;No~%HMCep-cyT`(gzL zw-37Kp1TWIQ*b-C`62JiU*Yyo>&iXmZM@9><%Pd0KRNM7WdiE7!Fn6u<@VvSebv!8 zN8u*C#Ge%hOz-oRFPBXr%L5N;-WF}ct$pajQ%=GP3cM5InHlP?9FuFZ)aRIEF+uU- z%dmSbY=?W4Oi(u2RGOhU{se1`H7ycj>^5N-iu<0R_!cbwbsJ!~=bD7N!i zbo?SX*W>FrTVfUQtwv2VmA_tbIVN5YE3aV7qNTB|(6^5`qKpS0sa}8mO-vr0Sk67? z9DGkaseJZRpD!QXX@~MpOynGO38+=i{LxPoTpnP23^xnBYxXM0?N2@19ce`_wk-11mSa*S+o#I2Yv$104&vHFdC%P+Sqwh=o4 zzd^@}xhJ3a`qhc$`s@FOL#pQ!`*`k&KA~v;z3h*d zmG6GglrTgoku5vG`)xLTnYOC-uFENh+R$#0j$b=U=c~fHOaMF35MVW!CR6y+qHT z!1cd1CMfv3`plo5SuVWb!tylE&zNKO*~_AfE?O3Y%<<#LmqivCFX=2ei$bnx7$(oT zu-9#*KQ+M5g4Z!%isL^Rgnv+oh*EGxdalCBuiiD~E-{MQ>a?Z4i3y}z4}ty&nsHTj z)u>}y9eMp0midVFQ1vrMOvOdzpKxz z1OP}X+!hfBc*nOy3W)RwmAy8t%Q6zst1J;G49Y9Lnpql@;euA4te;?#F}Mu}9C#p> z`CV03#Ua^TBK96Ed&;u%Emc&crz`Q9JSbQtEl~~q`W07Ih-(4#2U)?3BBkVW_)AzG zcJ|rlmMdZB6OTQCnN03;hwW&vdSW7GHF=N?_wnPh#O0PW-y@26Gbz_rj z*OY3gpD*s3A00`*W;K?oJ5EwSq){lqWQRTgQu}i+yi0M=&aZxb5pH5DVX5WUgn{*w z{saNBFPX@$HcI=>jW^;C z1Ep3|Qfy+0)qbftH%urQc_#{g5H=X{^+$<9F8oTXRS>BvQL~b#j@NN$7nd~NeB;e! z_dRwi8)BdtjXe>GI~RTC#8Xa|L#0F-g(ul8%47xVIt)%-5f3vz!pu7lW)dHRR@Bj# z=6}nIIAYa)NK%rT3XX%E>15gya{Xfoz9|5FK4(M26X$i9i2Ay8s7 z;lam(2EH&l1wQ)IWq&I3%{!k=P<(WId>Nx6g($`!lte_xQT zp%U~Wg~k&o11QoYT7|=Mr=>myQK(rdqEb?SbkZUFzcC4`@oR#Z8CTq+pD1HVO39*7 zdC!BQ+{?VLCI-!>xWTcFDGN&2IMg3D0MV&@Vl|^Ue&`q>`kg)zYBt3QCss=tGMIW0 z8vt;54oBLsvrPZvdJ_F^ymT~WQu-7(0#J~$UjObsg%Jls!XMQLvM>-O{hw(}3ZamE zDdNp)$twUv3@At$l9GsgnX%>7nImW z8CsHrwlWxuS&@ly_P^GNSjS5x6`ZaOseDiUjT2}H7TJwTVRIY$^goN0pTETx7u@E7 zFMKxs@Q2IS4>&;dGm`5}^dg!D{c2C6Rto%KeIZ^1U4;*n)>&_D9CrGJFx~AYK>9N3 zGSL3gc}OrsC9VD{RAZXU01mN1>farIq)sHT$6KwW#tA0^q&=oxKV@bcL_cBg@c!dc zl;89>V-2+Q)Svo2h#iQ;eGn8AKtzaxREzi@fOJ_J0ua$*`YG7>gkUQAN&bKO{lUsJ zi#GvhhOeK5U!9FO4_FHZr_9dHqFnBbEg*01+~|ek)MX~KkGN`F(y%jNK_@7&nf^>{xGPj z7iPfme@bDB`n49QBa~*}hDc2nw^lL@6?zCO}kG>?`DeI9sk^ne^ zSJMbE924dnUIG9DOuKKo|0OzJbB|$*ryY0t7+%(oC|}|P#SWODkN`rVg>>VotjP7X ze|6fc#9Po_;WXmLeFghc)B;-9^8VtV;CSvQKS258*;yv-AcqhD^e(-?@v7OXW zIC%R$dFlMQ&wsY8im%qyDFx3Dzm;#e&sUhE%U9)I#loPBTvUM$`r6Cj7+ z3-&cuU!(c`vF*0o;T(#G<(<$;C!JDO#JiT^^aX=dMr))1==tp>@ZT|bIsF1ADAv{$ z6!?<=nP>hj`uQ?`6jm7Q^s!yaXFmHGJWgsqB>!D^-cb%Y=n#3aKRdRBI`q&(#BVAa zdTji2jeq&e75F&gFsved73VK3TTVFs1i8QSw(RuP;A7;aF^c^mG}94==~yaPkqDD3kt<*5#xOw^kZC{YadRvK>}Gd;wbvT_F=j zJdMCrN^L2P>7xbgYh!nqy?~#soQl;Qzy0lR<@y+j_lG>}UAhOJHF>VZYq(b4!Wk&s;_Dnt7R@s2 z?AQwJTe`&)&Y<7~<<{Hk1jVC|@u2k!4T`?SAnB*3Uw9Zndf)p@L8VCgu~asVzw)4?M3OZkHxDkdKwFU&2Qqay zQ{EeKM|GIcWxTGS_#yhuJ$T;O@IxDxgAU{bq_8R>Dyvpe584G#!MjXC1PDMNl~n-b z0VDikwM{|s^2Rv`rslV7a$}iuN{O7|H^BwDyw|}TRd?E1rNz?WYdBS$ogA2={Tb) zz6mvnMzX0)(S0G2yo7^ne)+2lv5f6}9AxvKG71Cdhd;a#CL)%V18F8=3E}X9pL)mhR&C5r3*s&~(8RW)x)6a+U$+($w=^yvm8wenH3(K^IPmj7t_eusy z9RW=s9ty>!U?cdZXSnT|8a`ISCwX)I*0=Gs+D&Cm%+zz{I(7Nh1hDWRCE6x8`XT+V zY`pRCM#a6lr^kTHANktzO|03XeIgkS^o=*(fG-tJEdRXaRt%^M$X3-SDd-yDdh6oS`@06-n|`k-j~A28JO zU-&xYD123MCsu-d^{ZblTWq;C4p|x_`p-T096VJ0q`dap>vHqsEa|Lx5j9D!KS+TL z`9c|f;|+CAOmMA*hc~t$iYa7+a{o^xk~JLiypg{(2`hY{i%WyWAQ$890ccO}6Veu; zjPxPDHfGg1Ay;W!FkkcVQ9;!yo;P0pxP2QBYN(m~h^m!)7%#*4Wz3)dbXl2)D=2o^ z71z!VNp=-R{cpWJ5i3;i0E}jx3|V@8I1NoV2uykt*Cvr_KK$GZm2`D_~w`%24faOyyMyW3<9M(N9`xFdb4zcVCehT3uPIZh&S8 z{U%$+U-0M(3QSPoLFve&zORR53r&x{ZiNR(OsCMnJT#8fsJhN%lwQyVsw81jqyh zws_hNO&A>B+LQP~0R?9r}ZvwVlA}zO87=f?F1Y1cU zn*|2z!zjlyo;QgSAXeWzidA`Mp7}F5OmSV@Prq@%H{=H&McsSv-T0yZIhmjsJ!Vwd zYmdFkd<)Jm-9ptuC^FKIag!N?AAHN;P(|5-OY~3&yAkMy&`+>-SyHl~w0uUAl^eKs z&J4eDYk+tutdtt7Aqj##tMT%Nhj()O2Yywj&mS17polLV-o#4+9+jAx@5C3iN60n`8+~XaOoH&m0=N?k3oO3*#zcIf`wEI| zlNrk>ybyuWW|K2$`wy)jbVT;H9pU&ByroDq;Moz+Q?KA~S019u7lkvUU+}j;4P>SW z)IhVM<~7*-3fgBRR#|fk&taYCpUR(dZbyJJ!0H&F$bgz{Ll2@70gjBpQaR*zG9*EW z--9BI5IV=6@p{F$|A7o2Cql`lay62|Yj6^z{>-hqNz$!q>UE{`217NSIi$#UMr!G|7Pp2UZrZ{tJH>2W{Wb*G)mcKC{ZuDRw^ zGSG2?;$&>`#O;;lnRkAypxC*5bVuFd=?1(*IPic2@#6Uz?B73|Y~3>_zw$>j^F0Aq zfm!A8J7kHn#8OL@J8!=OzuS*0_hJjGaCn#nw2Vp{BZESxS0rqdc^_J!7C!Uh~#Yy-wpC29S?{7E3 z9im(pUwkn>a6G2GjDxb*#015WuxTWIuX20npI|$zi(v;pgyj}rpa1+9Fdnguas8oN z-+tHaa#qem4?P55%~cM;A-k)twuUxKsELg1{&6_Yli>Ym`4yHgC;sF_yttkLhD?Y1 z>l@gH>AdozAN?4vnuL#LK3?|N^W(BggWF;~{^V1#Qege{*OvpN1{u zQQtqkj(0bla5)HDeDPh_qIjow);Z^v>#w~I6Kh|UA$XZ(mMO;_dmNq-XF=P@=r8?; z`X0fv=-OC8u@}zQopZK1amK_c$_iW+BJQw` zWl+jskP&4n`bh=4P?PjaHkDQAU_3zL0H*c&72t;rPP=c6jxz}@&g#i_QpX(OLKw(Aw;uKg*t}N z;mDn0002M$NklfWC0HmZ_x&$Qmf8T5Nnmtdv2zt)>zU%tG4ev9vXV$EVJ$uj0N}H(QQEeL2 zP~oD`=4d`cUZd^}fx%l*?&KjRR2#FWLtSfW@lGar&V%FxbnjM5mg0krwueWPMQ_VSDIg-d9VW?**~$s@S|5C ze|~c*7Tk&z%VS}?KpY*;U%1dLhvTU!Q>KU{K@l(lu5NO*_{_Kg4uR-&iKDh=?OL-0 zZ6(+D2oA96A<0ks)09HwqRtJ5?u1+;DG42RjW<*+&N{T&xDWl>{+v= zeO##W<;^D!i!Q#noV>zZG+NGN=+B6i#xrT!3d>5^8dF!1b240w+~bM}ygL5(EamDc4gF9<>&w_ z7I>~v8PaDk8?oV_w$4eYT~c+j(N$Mj0t+C?3|b6{kyY4WQ7yAXsp4>URmJ4OhKWQ9 z5|@5}>gU!in}sn#!aN;)RVrV>RI6Fj5=DTA*AG7|f{Cr=AbcE$S*^N|Jjsl4S}OD4 z6#85YHbJXbuO^JPjljEL{(Q4x!v=9;T?WsXj_nIVJ(dlp6veMl;at3E5pGk7t;xJF zB{pyFEI6&&jLqvQY#y`2Cg_o5=y zLHT_laVYk(wP~Hqb8#2Z5?+Et?LB{4h|kboHl1;eX&3@{fz9#rUEDi=0hZPBgJI%njQF6)W?q6_)pBJocH`hs zxyV1SpBQ*){nEH_5!V)LXd(UI|4{v5YqVa!7Kv*{I5O@QO>=P<(e`=?61DgI)ixMx07)|zd&SA@DRHc>%&agy`}Wq6<5@OZ;Dc4o{v&++=FBV zv72h^wQAIW^cVbU;Mxl{`gwEbTBnm(r|1lm`=$l+7Q*)NF43UL1!4rv2h@f{B5fj{ z!aBEg!)8d>kHx%AWeOH9Y_7Ye4J7M$*tEUm;r%xd7*3y}Bf|Qk;=TxL-<&yfM7nV6 zw(X{H!9wP8>|aV0FD~oo5fIex1u!t@iMS$kUnkNG}(7>EpF2IYEuOJ3+_|4!I#I38Q+?9*oSa^ zV*6RR~SLhNgQ zLOZc0gL#lPT{{{z%br=~?={-bJ@YBK*w3?pt{wnoOZHY0mk+6SLA zXD*08>EQgcf@#vUF@fxbXEbooU~>TbL-G&kaI+BR8^?k~w`JJZEW$o@8xH?8V?shj z4(uUHU>{iprc&uxRkTQfjrG%ED)&)56Z{Urpg*xUN{4;eh1g5e+}I@`;?XQ@blJ8> zE%sXP4%!OsK}lXzmJrGMuO&28Oi>u&(DJTYX7SPtf25^YR5k|88-EMo^+^!=KM@YC zM$33;LIf@+=ANm%w7g%Nme9~b`oA`3J{T15z6~eY>(;^~$y25kd{79nBSH$afW@~$ zE-7Zq@|SJZEWdsoOABd>#{Nb)v?>Q1M%=wWAd<)*)@Dm;dB1)wt6^;2wlqi1&v$Vb z(NbQ*e{lY}T3jSa{mcsoU81-96K_Fb?QXf@^6nFe~W$ zK{RWme!Lyn{BlInVV$K3X3|Yw?)QrLub?TQd+xgr>q%%H32>jnug3fD4iK%3E9+k= z49mUn)9?l)o@s`M#OQ9&%)AB+3HFSA-Lrd-x$X8_!~r-Nhy!2>s7jT~MIe$8d`H8P z1GK%qAcF!vC=S5FMV;Dp%q!3~Cmm#x2l<3L8w7him3ZhSkB%f&Kf@PH4G6!y`QeBk5TDEvykMVA`rv z$IjCK{GdY(8#xlr(l2z+cf{~v*t?E5yLRm|wZY(`zmMX@i_7?a2nI)5IJ~EiqlbEQ zm;J;U^aaEnX*bVlM07#A3oyo~F}Gpj>8me=9(f<$|F+k*6%$Xkkr3JhZDk)cg7<=% zwjX=9ZI}=9=FTzSFIo&AkXfK{aXtLP+{NKbI;Kj^moG**-fq}kM2h3bOOU9&=Pxfd z-Chx##st4+Gt`VQ;%*bOyiy*NMD0C)d7Z6(ucQU$ZyXLS?_ga-i+JyGTv zn?5SWSFc(P&70Q|t}=}pHx_pXw!BwI%7d2B_VcNquu*$#EV@_JA%{X5<>yLiB!s&i zm*pY^@UHmur1zQ_hF(BSTxY$}MGKMeU#MS9JmCOU69?KmK%5p03O49$^(VB|YdiCJ zMKcAGeJ5Z8agEppOpgOuDI6?lM=uQy4Gxuy*y5~zKVJIr_{h8uVq01q$ms;i)n%bBY{rNt~vXRyuqPdCoL-_k<;i3gn4||smiwKFl`SRz5DV8B3@xKr@_$Gfj8HC;C z@}VPqJRCBeb_Z!^Zz&iH{NR%i&c*X2hy?}E=8Ban!N%WB zaGF#W%|gT61Pcc`4Cs`JoctwV&hXI5InW$-9XDYd#*P~Y(?3|`klp|U2rk6)v6Ve};M_W4oNqC`O1UVq1D`Yy5LDcW%er6pA6P#aM zILYm%ifgNuEk(Lx=B$}wzKZrW>(sqM9D>mfr#z_GVer2e()QP|kT1m;b3;e9)P@Zk z3fFus4o{;;j~1uWw1Y{9NzI$JkOMCl`cKDCgjC8hvmXm9$Adi33!v3fvScyS7*4OQ zzaI7#Wid2n*odL#v(G1q1STDh?FLbV#CF!Gb$|`gAi5f0RVv`1Adx6SaJh#;AhLLT6}Qabrv7 z7&j;rCQN{>&6$wS*dPQoKJU{fPea-00(H?94NMWibGb-14)E|L{3oCH*yYkUT z9|{qW(li&9D+{#$nfepk9#&<0680_ZumY#lf9(tb|Ov2Z@GODL)`zy z`fUfa@nIMnXv3s(GAKR-!C^escWO{HgGS?Hk3I^5_7O1MC!2R5vBou^UHf(-sWo%P zEOGcv6Aw)1LAFEZj`+scmi@yfFsKI(dJl|^S@Ml1LHF>X!#I?#0>S=IA#n4cT(MFG zu?t)eS`Son{}I}rJ-T<7Ph*Ah6~rV%uH2UC!G_X**<3I_PM$a=jumTx0sjGPHh=Zy zS3*SRnoP+)693EL`}5F44+}wFOgw>LpDlY<^DcxV%0s{*ddSJg{}6~TwD&t1{U;%Y z>mp4AlqyvcMCe^uM<>Fz@WEx5}be2h+y!ebKPMio~m#M(B2KA=Gx}F6#v=4v~ zkRD8sisHPKOe#l&m$??rza`j9^at^08JI3~{K&5?pHun_>EK+oEQlM!@H_&u0D?6W zKq&eZd(!=A6Ez|EMLUiCZ?^0?N1QYU%}ZB6ybjTSL~@yV*Im!kjDKl1&*?TSwz2c zEt<8&v#Ks-j~+c{=Fgiad*jt>ev>^&p4=CL>9bqj4LUKUgV*ZSstcnwovfrXtA=6E09?=L{3$PGq388$q7^6az``-`DN z2E(k*6bQlPG`GUHK<6%9WcYagojYeP*5NH!`!k|lSD+nvg>LckD=&+Aq4MR*!@OWO zQypf;^%^&JtT?6Sxt78YB&}igb`y6NKH(q}oef>k) zMTg*(LA$Zm26Pl-2YwRh4sXFkG|$R4v4436PO)=ikHdY@!tWQtC*>2em!W{&fc^tS zFqVC$Fe2^Ov(Kdew8A>rp<{bfyjbxh&l0`D%R2+-V*2X%1N*p&Sf}6V|2EdmZ1S40 zzV{(0NV7r|0=VUnxd0Fn}jk(oQBhqOFjs7@=2}1K8()U=R)X(=1ysvs%zJ-Fu}E3 zXhPH=C9RIMpc*x*V~%AI+8%v_%mtHxelhCSp$0`K_%W+s#*G_mh7BDq^qD$!Ys>pw zqId~0@3d?8PND0OK3A|n0kL#K^YQc*GUcl;WuJRLG$`oYo9Pscq(Pz3N$S-kzyF%X zrrS*yQ?vn5uEr!MKHD1r|6`zK7-|s>{XC6FEl3$kauTv_f;h>>!Ij+AJMm$lqhP(X zh=!W(hc=JDyb?$0`0+n+sQk(-fOO1m5Ur^#P`2zvIN&t}0lShM;>fv_mm#kBmaSW% z2G|cA{ss8ZUSMv%;YQO9984c9YFi)&Mt-a^;jo^vyfBN(pBKXjUC6}EtsOajWCuWsk%!h|Sgg8`LD8(a zNa~LS!+KEX8r45eQ+S( zAsd;j*|LDKR1O4y!Z?7!NjU0bUVcb;+yVl_jgUqm@nHfE-5k@*V8}GO`YO{0>Q>T1 zJnUoWOS7g;%~5dMDGl-gHYwRbxS582lC!-Tl65D+OsWc|RH@QsgrUR(`j_CmzXvtX zqhL@_;_`t9?iWW)eD&-Hn};7@3@f5DC;ceTnl(!}_1m^>#b%A!;;0C6m54 zN`N@i9TEVXe}DXO7>n;X;C#<;<`*qqT;@_$j6)_c!5xZ*Z`jwoRY~JuvB)AV3W1DV&wTOl;&nq6~MKK2#MkFU8+eMM@oD>bm!e+{^V${&fa#3kI?-D;S5lqr-4=V_n z!p(CfY(yb6jLVi=$>R?@k>nYMIZ2x5VRhs0rCE$JWU;Bl7+o&U;fV4yCNK^#Ce_}= zl#Kv2rlFSf!zeF|G2rDrPRWS?ZN7W{xQ(el%w{tFh(^OGCyGURQA`RDL|-pPy=+_B zOGvT)7?PGP7WW%riDCl6TV7~`m;Fs`jpr%?|TAe9X&X($Y*z4VvU3 zOSIG2tP=Tmc+1R0I81BSMR#h?X_q_Qq|sE_OvOls-dw3U0xsw))5&Psx4e*7Kgwvg z?2`)5B?j6p`Pl0Qd)*xQc;eosD+P*T4b-db=+zl094IVO?%xq_Ne-KZPJPV})l4MW zZJn?eygeeA(%B#;w7AOtTgYEOT<)3JxyP5q{)-#>tL=#ev*_WJqB%460U6*V6KI z{df~!M1OMKw+d8)Blcrbzq_G+3v1;1$@P@eZ08dEdPK}+gYsLt9lznat#=rCLP2Zd z+fz5ZA;DBAYA_mhDtkeqR1p&?S@%nm0(JwA_?#?cf6k&9I;y0!b7`Tr1w?N(g)r4{ zp_d}7sAK(YRf+-Cf6MML;{C+EF?p|#JrOv91CD_!lECj^@<|)b!!{o`27G_v*WF zW-QtMvy@zEQzu77-Hj}Za87|dFQ10`g;jvmq@U(L{?17K#K1e7^KA}ye=F$ks^6md zByri;jD<0O#06Zw>xq69P4Q*BQq}9fv8kR^GR#82uP7+?Z8;%p?Kc=`rs{q9`>?e` zj>UlY3JXEl-8qc3cGnLBQ&K5@27IC|oIg5Uj5^>Hq7=zd{ZEm;M?d`v0|U@3?QxI< z)+2*Xj%G`8R*CloZVEmcW6J^hYD`=E#*yi^xo*Yz)|NV-z(s5IE@%EqZ4$1*^5kV_ zGBU=m2*`ymt*VfKl?#{m)jIFT!@jZ^Epw2v}`PxZ&@?74Y+s0!A-zB z{JzI-@Der;bE)zxpIRFq3YZoP^#hfS-kaU|R|Jty4j>(?4K(#uBX5LBc=xE{xl%kk zB`Vr|hwLwLnM&gS>Wr|t`CiNDND^;QcQSr?F$k}MQIu!}IqChW6(&=h!tM)Xv=t*b zRwPG82&6N}lF;j{dUHf2)=2s577m9@*BP61m`x3^&ujunHyz)RCZp8n4( zfbonC>f`I({nn3kpDK6W$d<;An6q=v;7=vGP=)x?&twQLnXm)jvrX_jBu5_9mBx1U zn04SGMNIvsFV`eXgQ9bEl;I~8i?fmxxQUAtix8xP865@{sr%YQR9YRaMka zWVF*rx1kSBPGv_c&H1^6W1Hg|Lz?WTIM+q9u8-dfjq(6D4FI^p>W(@I?>pEGnIpC5khS&5`3;APttEtS?!OO|>)Os=@bqE*n^5mWjaghsKD zE;Xn?ar(X%A3@J$F`)8xU%2 z3!m}>{!mB+k_`O&ye-P>!*Ti(`o|hnJgYYSbFs#v{o~)rx3GnoF8K3dO5v~MrRGmH zW#~g9S!xYtbwMcfH+6tCB{PEYyqsRS9mCH-yGxy#`RfIF&#|t|YP2#jshDUy-xj~I zFn&g*`?ZSy+mjo7{Fi`aY(8DNI^hL?FiIvzp`J5g11wEUI@&8?^Se`}efhf{if zHKV$&Bw*4&ud;p=+>pVzia}+TgqOy89R1Nc$z-V#hk%*8(!kdnB;QmiTw7y;tezxX z^dLuY(GWU=?1_#(s{{2A;(O0*D63$UC*q~{=6AXF!%$yzE+8_}K$1>C0xJ5F7>t4h z2+jBboec7Y)eM?N%HX@ori1x<#Gucm2F=)f2AufDV>BJCt9KL9yP*$eV%R6iVZaVHkctt!o`PEA1rNQmgfsLQ2kVQ|FW5w-bV!+_(66o_E zq|8f=KqQWTRJ7=?{f}kKFJ#zr4R?c?+4pa0E!6p6CgWIw>lUqz?q{>gavlO16ei6s z>`s@qQPxMp@}DQdnFQFlk;}tVWQskgL<;r!afsWqg5?0U`}2peEdfN+XPtG2Fo10|@@0Y7g6No^LN%{6-w*W|Xeu z?#Q&{#n!l^i1#4~<+I=b34O>b8W;iNA8U0`@UHe0&n%A2rB;>=4m6Gs-uQ;B(%NN*Mf*4xY7TW-6Zyk(P84kLJS>a zy5Yz?hhSJEdr0f*=h;qhTE_KcwO`@VzfVAD`6fR9Bh0_qS zXF85aQh|)A$ZY)WZiG8iC&gqcG$y6|5t^1WFK7ARpN|)GxibyokL{w9C@qM-i_pvI z#(&S_ED5w1uw4Bd1~=EDY<=%l2G6qg0mR%%!^l`FvP^!Qte!21hd{t@Frp_ExLZ5= zg_pS4z!ko^J4x909#l*c-5o8am|lg2kBwjGr+gH0{_!Q*n9y7vBqolD1Gf6I5; zZMW7+D40FAVLKYzqx~s{RmtiEu@sX1o+OmYsj&(xG2+xgeWsDgJR9+0DRa9DRdCq< zn!#H#Z%v*BU5Boy*=MpdFbfc?Zy_Ok482&>V>Q?5w}kJsMy`jSSs>k^4@fx`cqv`Q zUlkplw1my$!!Bfob?0!4(D<{vO5-LXgamgMhs`fJ-Jd(bn&V1p?mgN_Qm@a+Jbi{@ zY4-sJp#N2w7r{3TK^6cnT?8FPfAZp16C1rTFPA)aGd+bJ!)nA}E!xR0FxTgg&npcM zbha4FVXZ3aNYe$I%$9+kFd8sX2xsiG^8bzya`WoCq<|HNLlePe##c}(eR*h|+-E+3 zXGf;NQ`7Y*a*>w`-jLOLI4I;n{8M8D62a+f&OKp^a`FBZ_9!b1B!W zacc8CMUZj+Sm{xFiNV+}3Mx8ZYsYbv3g+T_lEJvPvcnEc2Hzplj>vsV4>CW+hzjHR zj4(?smuQrNUd%Os$c5#`UVP4Di6f7a3691NYaPxLQdUwga!mg*@+7?$su|-2u=DuqaW#N#$kAE`}q~JG=$I zT3purul|a<_D1y5LQa)X>MJNgcAp8J^KD}Nyc*VDa2wv(jl-3UbPf)&b z+XNGO?W=h5#_?sr{MEF@yL^Mr+%nQn`VumeA3NF_*?;Z<210Iai80r=jHn3u(=e ziS)OhbV$M1tv}8tk%b``f4WMbfbk55bVK9Ary12gDVFyo8GaXGf44cMo8QmTZ?-9K zMJov1y|KxAfWm(cFM04zUW27hVfv5CW&YV|5`e;4Rf}_2fZfZqKO#R?*=0|#2j^yW z%jLnk#qJ@V#dat(Q4C!|zpVlPkTEYpSz3C0nmQp$x!hdMO@wPu71-a!0KOO93r=8U z%drY#)RpAC)u`j`n}3IA?CE=(^=YC|5<4qy?_D^@Z8k5ZIvFylSia1~oW)D$!QTLB zoEE(67`8q&5?*h%Wg*?6^oSd$rC+E!FfO(zAg4BkZA}e7R&VK4To5`ZI%3!TGe%LN zOc*A*9wwy(un%ao_wrK`dTW6XWg{Lg2Ti_mcHZOP8t+ANFeu%p+vtR#VJ-dqb__oM z{0=hHrG{jx{6?WdXB|ogG%SkbmV#lm4JA=<>qH3o$mZ1P2G}`_->(`_QBKJv73wuqaYqD2uNw zu$HTCdtpx|godCrP# zycP#pap`(5K~i=xy+0h&60ikU@$wU~DdwY82VL|ibtBQ=YW5DtVvNKaYCY76B9v8$ z3z2AdCL9Ori87&NMZjl+3Lb-kA>3`G$9880H*Tj>bhg++O`ZNXM56wB?DZPIoEP%L z8=!ANXx3F15$0+XY#!%@X;Ok8dy>&e2wC{fy{$hU$>;H+ai}f3vR)H~l8Z$?1!WX| zm1!~+w)y)*dPNzsU*y8{;orej|3*N{F4JX;61UJOAUKd0h%O1F|e#sgmA zao6=WWHP|}Uelv@ldY1y`?kywQ86c7a3m5AM`%Z>h|;Kxi`WP|(@wEdR{Z9G7}oxP zM_-jh+^0)uKaTZf%MeG5?<=(oI+zS@_*LwkN4(^pR;+yX=s^uy5}}ld)dA)F*eiAk z-AUMnHuVqI{mC58w4XNqr;$L!mvCs7CmpUBvzIbkG!7+_^uCV=Y7S90+Xxx~D!r0h z-6K9y%o{&D(lR6IIAxxQ>`Nm7gv?&<<5ydj7P}eOQ96)JsB6s-J;@hqwTLZa?A%HRfApm{AgykA5+A2gXYTTacnKY;<6V(MOJ$eqU|nmt}H| zB`ca?^Lm*VOg@0FqBVmw{{$+~ic7Z@Rl2swx$4arLe66T66QWsl*fIAyvWxl%|cym zMlx`JXP)03J`h21jT@HekA^g7g7(l~4BI;AJfJ4;JNk&YzJ!4Wn|#Hg5?TyI0?-O- zyqF!a4*Y;!zUD97%kGA?uV!$#!a2{WT99^rNs(0k*4?)_9&&P@ulq7neu3ir;3EMG z33++77`1qP+K?>3l6)Y+oI7xN4vQNa>r#6x)wRo~zhb(5Xm(mt=~|d8uuJ9_LHUyh zW8cvPT#!_D=7-vAVlh90d@&sn^MJF!ZPuC&+7Ac8w!l9!)mONDM6@?SAfigcE`)?l zzHP0vh!u7dK@5VdG`lQbTLHm!Ps9kWzraMaq%pF}Js@o%0H6+9L#c}07xJ7!(apk7 zeztYrxZ;cXOTpC2RXip3)L<7qvvu_X9cf3Ca}WvK|7H(rpz%3dMze2u1XY^=kh>+X zyP;Jp;#NDtjm@v0Kj2)njosl99=@LVDXGq6ApVf{OFfdB7)fT1jRv#dMT(J_!$k$= z{Ba(4hDL1(CST>Fm}Cozp&yCzOmL1s(1JT_L&9K*ksBedLoz$5_}|oX_|g%bF4Gv~ zE^G#3@KW|ooa4T*v3M7lG}ys=a=i}|r*QV3<@juwAbv-_n*2@3PX}_dM)N^YD0`mX zeZXNV7(-^vq%VQlxS_@Gd0LSs5>1zAiwcaNE*p)R?$wD+Pe=XtGUfDVB>*UZG!|v9MF!-X30{HT}rdZ>Xbs$%=dE2kk=H7q-*jLAj+Fy$wOBHP1p& z9$6@u&f-@UNCllJ_aO{H_Iarz-TD!5JMA3OuC|O%33Jbx6V3t_4W{QZ5~y$E^DNej zY({ndCG4Vbg=5n2I}dA;;7G^x^1!OwUyUt&?*nfCtKpBRpZK?~YZ0C#BIvvX*h(_! z&3M!(CaY;F`id$5j4F1-hTxCJ`Oxv+p3ePdtlpS*&Aurhm`Yd-&=xAeyHzZ!g;|=s z-4U7gDY2!NZ*OnU#@QDSpsS@#cO4rcL`Y^U@6T%Gi5a7^#;vcZPbIgZqR-)(6ETR; zx2=DLb3%-{p1e)m0sHoNz^-YxlV794w9U;Ej@A{!?iB3x^=Sg8SrZ~6 z<|37P5ppwj;uJlw5|YxZxh*o4gR>3;byX~Q@CaLw-{8kcyT1L!Z&I={`d6vyD7|Ey z`GrgjPB?&7%IT+35f-^X?Cd9%kgF%9B7ZEU#kU+E)~7pO+lSxgky)weC)Q@DU%h&> zG7b#?44+Hd<4J}>Lr4yE(5VIN7Nyj1Tp}aUs>AqD)Y(Hl4bxd76!YOf)oPrswwgpB z8(0U??%WTI?g-ueRHP)1Q!ssb#Bxsp5RjjUb>F1-T-;NJCtL?9osIE)98-EZJ@dKR z&tehM{1$~2=$A|58g672d|+=JyHBi4zkx{seCw{4ID{`BKe_~Cg_zR5m7Sf={HSNq zE~1}YJAMD}@+W`dk8UNO*D{0iM;fknxGDE-x&X&^=9mGovu-E&gy_uC!h;>I z%OI#AlSeuqmRdP({|vj-Z$pNuqnhjr+4^Z?Ij}h0esj>N5`n_ASq8Hi!ga!=Re0Wb z@m__#l&V#wlko5N%uE3?*-5+n7~>cs%_XgMBDkilMYifj!&%Mb9-l6!B!92cQl4pw zWx@q4Rzfn7F^Kzu3x17TuAx{FpoB%}))sKfBN5(Oaw^T{2^YGnGE$IoNW!ku4NnS& zkPo*C}n zM`jTGeYa0tmF&hkfJTJEjt1)vf*D|dx4ry+Dk+}KqfD;#Z+V=!_DKyUa=Nf9Q53AJ zj8OLt{)It^=A#2wh=Fl2(5*eDB?Do${ZGsn8(4&yQR^A*%Hb>RS49VA&ghYS zHsy`u-!KR?Yw1+C1%tE(zFz$8IBfj`!*jY0j=|YFzlU3jPewCU2}DxbtTrr6yVC>q zF*^6D!hlYT+Ku04oM7Q{7X1}&o1jJEraj$K`lL_*a%_nwM6NyVIn@5)J*$E2zb;nP z#@d=>4HOhiOry9W$t8zzMd5;%2b4n&Dvt z0E{+Q8=4VM6E~9ML&AAkAihNa`Dt1RqEB4*PDvuPAhcoN6aDhRZ>(Xdh3xkB^|Nn; z&T#9PCg~{H6n(>jp+;p#jN~Nv*WFSY#SA*;HdMpC*>Vn@`{P-3Wb9*aO*i!_LHMD2 zzs>2hk#=i6QWby`C=y3rkXb<$C(=DV}b%n-o{yq zRUwv3dEU(a?l5&S*ydDPDb5T-c|c>k9;{Ahy+`1LP^#|UfAh5l|M}f#x_iXi4PV;=t8?{D0QNLQ zJ;PwX<9nQdGKonvLIHr&fBj~V&~D41yNo z<3&N6FJQXp@XX3?G39N^ES5bE4o(E@$LF%do=%)bWu*epx8+8M-nZ$N_+D;@gC#TA zUMSVl^4(&Jk5nJMLH+p&@oVk3J)FzNm*(>q<$(R_Y9DFV`V21SD6n*|x6aNl-As)V z1%5~J9OF>VuInN@?yu~+3K43D0rB`1bGMynPc&wc7=#g|@TVU)oiYZf(1o2My`}O* z8Yk8kL*nwAFIs1V@1a|=7F`Z`W%^awT!{aA*ZE0-=luf&{DP&@z1+oLk%G*$-+i%m zcNfkE){e9MQO$RzAkU63I2vCmz#6?}Xtz^Hn|k86+p!r1ND@PP&Xx$8zPr0meI_U- zKkDAWeC=GlBdntgmi9*_jWAJmpwRv8g@L4g2+6sv;`G3lv34v*o2}a!>hF30OxcW`?yB8#LXOGgShLu#=Ri`h%1yK1E8kCj9ijX`c?0C`_W%@AHBi$r>8`+tL zvJ*OqY3f?-F0GcxKTE16Qd9k&3YC&#H+k&)t{h)c-9{Fy@;BVPb-P+~`M6ejtLkl^ z6Y3unw@WU>Y|lVhiF1m5P^~2s%woIoZlK4!>S;&=*<|D|^8 z1e%)S$kae~8GyXdk=P>eM3R_^ZN8~f zH!+sGk%%GNHt^hv_tmV^SjFAG^a#~Dy+f4qI4pX)-nInuTd|>&4xDqsdB*<4mL4vbn`e0|8 zPVplwhmg_z^KLOUQWXy4m%88-G|i!aW4YAiLbbsObT-S_<01q*$x=H|GHu zuSExxdQc&^BDqi;sKjhkgmnCyStda&;Ir95G?B7Dqys-p*1&^gx=v0Bmhe=tr)Z3Q zhso!mw2?JO)KEVSk<_FRG!b;+f09n7!MqcmYuUXYva=gl*jb|<^gUf^#~HYo{pya` zD{ic0kw2nVL|O$t6g(MEWsm0CJX(<^X6AuoE^ZAtcPX^j;tl)cog{e&ejW4>Khog~ zQ?h)YZL3d>h7cX>fuq2Vo9$5I#*z>JR4HqH>E~ma5snX>+huQ5EJN-+!35+@f&Dt{ z(0K*(P3|{;h)2KnkcFTpWI54#rVxc^pGwy7LiL5<+R&;*v~VM{(}X|U5YQkvd>`Uh z`V9iI`~R@Maw!g`#1dugo|EC;M&~#;v%?rqEluo zJQ9mNEJ7W@Ewr@`N1r39AWR{*-XyHRu7hRwL3DqTiYtkERhnHp8|BB5dj6}GBX)?F zA>Of0>C0?o?GMzxKLfS{1)|O5IAIF>*SP=B)DHf5v$F!DZzmM0`;B;nh4T4|&3b@L zi=%$lPyuU2ER!OB%43I0=ix@h9P5vkwf^LP4P;{lF*E%T+W=DKGhhRz~Ky0K>Z;h?#fx@Sk;6K4#l^Fz5ulO&h9M^D`q6%JPm} zrOg10-L!UtiYc7D>RBC1FK`BT)duH%<1zrBmA3roo{sqt`)(&X&KtEmY+3xr#@|!_ z=*1eJ+UnpHGY;dqgpmwxaU%V(#vd7i4BLlyq5B?fy`;MK6daAwtooI>8lpJYM;GoK z$h1v%lm+2akR%rKG3=2ck3lCVv?RYpiiOHsjLZ^|U)Vvb zNBK65L8y`t51@3l&yFNX3Y=Rnm^~wR?UHx$I$1nlLKF=@6Se9^nuYV4;ogtr5|)!h z(Ue%ih`y(!<(FoW2zoGV(=vW%wyR=(j6S`<7h!k2hX^1 zD0~WW6S=%jdQGARg2HsP)nJ@e;U`*R>J=*v4NYkb8}9wf(R3!c{x_axVFl7&5a?s% zY+jRuB(ikuRMs*@L%=u|Jh3y!x9guW?BtJPN8TGRDMCtgC0``oiutJ_&CSh@O#9S_ zr*R8UDN=+0WK^{4OE;7uFos0dhq;-fh4YVgK{gRr=f?OVC_;akz;VXciyJm z2{!l5Kq6%TZ$U^%Dgab};?Kq9(*7b~zhS|p()F!Gg4tplt%>uXr)lR6_)JFz%Bme@ zMTj!+7uss@q-zgr>i%Tm+EC@Sip*rk`)P`={?i0dSEt?QoExQ3`m>5ao4u6Y2&Wv` zGEub__A-ewXC6y-Y`{ykU0M{vac_HRe2X9PkBiY?dmZ=j6oZ^n62)*d)b?A9tZH@A zVjZmIsfQ*NEXM6sk}8Q8jLNEM^cQJ=KepLf0C32xhNNU5Iwdt0)fth%*eYHB)9p*2 z*t^ovt&dgyU?{Bg-Uyo!usmNzVmM%?wlzq!J*cPcfIO#VaZkXgwE3~EG@iK_qjQEv zAo62rW|E$74Jf;+us9%#bCj4s6lK8vn#HFmO_`+@mx>8%;j>18m0hnk$h54mL zA1`uz?}v5h6K{-SzFf|Px%6)DSmzk7kopXlfPvI@aL23Cq!BmDr`dtH z(QDeP{ec2#Cs24<-t<-1l1R{RDm#5GNCajQVE-bA{(rduu#Um*Zprs)j{r3abd^dO zQO5rhc2AS#%@@F&N~e$oF@iRgHR=|6cCpiP zL+smqD#FYW*ZD0Km&%O8i)U0XLjrb8CzljGDPO2$NP;of_NCq&Q0zqG7*9+Z&>W^< zS*xXIe3iySoh~J8FuX%8q`~D?oiCq-XCSq_Ez;XM)TZv~=mY3h`D0mXK{;Aqoa7q0 z6^3xtjj+i@fDw6WTxL8b@GVZtR{#3TwaY;U{*4wf?J5L)ZTv`pP|LyRLknT;7P6OI)7a-gY zJJ(5;98E2F3Dhfgzf5+Sx;V2@^Nx zKpENn$jtl<+&g?zy>tF_rdX+a^5RlC zlWJ=@B0|Jy@eZs`_L(QN!GvnNgyMPie`<2bh}`c@k(8H1qB>d_X}`^6$8|v3jOdl| z)1BM?ga+I(7a@?%rbIf$Bgv-aqw4SqPNN`0%I#;01#o2GHjPn#qD{Y-68BZ`^Rq5$ zCF2ntBtlJzyxxMnKG2c1q2~XY1e$H#{T5F~o!Q?JR8HGfZp%C>vP25YPIos#JA9_F zO6=DBM(;Lbi?c5OVF2qxMp|W5{lPC~7vDuW=kLI)-~Veu!dV_INzebWCP4k*VpRT< z!m)jvD0TBNTom_rRTan}e^|sfL|ZJd*4wJ&^kPZc?buWR8~fEU`T7!De!ICQ{K9sF z=#jxnIQU}cLSfd#6!_kB7D;a_XyCukVe^rL1J8%J34GVl0Bg~-@7W#`)q5KuPRHq` z8Xzfqtga*lUwlMH$F-FlYV6AyrZCbC@luLa#{R47>ZihrOEXOg>=$y^oX}iBP zW$r0@AyOIf3{XI>3KCnwVEpF$>Lw~*2C9ZtHjIzHBomQ)sXGwUn8(QY7tequVe93NB>z=+4#woML>;vOy3V{a zh&x!=2~ZNQr>C+Lf*M^Kd(kb`xWSjwQj(w0Rhynkh2%?SH{)IVR~IS)2?JxI{YJJ3 zNz(E0;~2+6c%n~T+~~3#@0}9%EUi0uY;@kC9*7XAF^J9VSW;MBStW;k zN{X6Rg+D9!Cw$43PL6afxDkkv1~!+S*=#A(ZRcS0p-a_UK}otc(8|fjE^F-o{b}yJ zB})Z# zTl;jlLaRhP^Dxh;`qh{~Qz&J2fI#pgb>Id3IA##QOPGh*Mc~Y({R8^HyaK<-m6b-p z0PTF`pk=iO2eKH%*1>5lO*^eqByjkcx1plcPIzb zD@GJ1C15n{^bO^KPu}}ihnQqbD^JwUxKDyNi1eZm2dscO2Ib&?30KR1O>|qOE&@bm z`^p@>flYfXeF~I5&y|^w2eVTpi>7Rojy<`4N_+X@uy+9DV>~plI!x*1>a9h=Or6`O z*SkNSZ^s|cx`ld|g{nKg!&Od-YeiHMn<+CeQo3Gqd%5Ym5W<$hi&aUNdM?bzP!U*9 zR_7oEf&rb$0!f5tD9h5_XbuApJ&W7&1f(`vrzqeeMEbzd9F#_1X6GaUzU*&o{V_+)_5#-j-G+krE5>Wlzju!YJdf| zo*hgpx$o1yGE&fjmUMI=3*MHmC&=cBMBfK@{!W7IM4nnBDZCx)*o75Y>a+@^X#AS9 zG$cv&@=XxmAP`r@epuzhe}_y9r-(%aPToS*xMFD`A>Ww_xv`c=6CFnyb~ACdh6#=H zyE`?pEuW4L>`i3{zmgz?W0bL3jqe@aFTBxeEVAiurv0?MGyx1ogM@4|TR%?^Y8kz( z+_1W=Sf4dxTuN)Pkeal=zC${H@y*CTeRxVCTOhiJ@M+dDL`lezWJ=A#9`2dMv|ZVr z1?pk1S9Q)W?KdWB%klTYy5%DhIMjW~j^Mb{G<6Pk35wJmW zMrlqfZ?5>N3$0W!mn1VX2Q~FS-vJuJ0muugJW-;XoG2u~(F^G3qrC%^0fo75PWf>&dX}u%MZ#PwOg4w&w`+3l(b8A%ji2FJFV+z?;g*~hk0zw7?T2;UdyegX3{?F7iml;ZgHkt zqC8O_{B(2-;$WPdl{Ms~nU@R=Cc5a(qB7*iuLVco*`Jy+7dsMx3$D27$oZOY6d_#9 zr5}CvzF)HP!N^I0pgvvWloIZzZc(W~HQcg)y{B-|)=WPde7y7TDAg#T<2y4S z$7-WY;dpVG3JOuqByqc1eF{n8vW=d-9(cGk{zm^9a8JcQd$qtRi`U89(8!0UpsPy` zvwa6)28u8v-vm2^V^gDHp;MSSm}{?i4g??mQr$|RI4cNp+s`tM+9+61WV7BHoVG62^#X(2yvxz9cdr=qrH$oR4d*v!gkF#PaDIfA7(DF#J zf~>)SxmXqxh`H3^$EucE6~%YONE*V!TWaY%VXcq9kDf2u7{70IE>kx8Un8G{3Wc5? zUaGt0)vCo?T`N9L2=P(xZf$^d&bRxh<>o)s;xGRm?4@#c|Is|Rgnv7Ga@=YJc3V4j zG*G)curpON#Tm%5PXtI-Kiod!ZNThaE|jsFT&Q(<65^Jki27iO|FYgClLys(m!Lks zX?N{K88p5xzy-*&=CT)kDw$F^ndj;wrM`L--k0b6%%1SoYD9G?p9ViIs0L}|#m=7b z-W^$}`LU60!%xD)teP_Jo&Dhg>EMfD18n92xQG8^-5#Q2JahcPzB?nb;eN6Tb{RC^ zG?0>(60@+9l!TD`BiLDf4pv$dY#yT$s3j$pys?DR4wi=Rm7PI?43A0Rkte@+VM+g@ zHYlyX+30g*!7|d>A$!*}0R7LQlFm<=nq3A>>yY6q?2z%WZ^@MKFgb-meiDW&-tf|p zN~q65qh}iq1CRp`rRPv0FI2g!7arznAm2p-Pvf`+uHP0Wlc)JIU9YY7KSE=col&y= zNIa@zw?ZGFajLU?89euvc#`T{3>$fp&*FjwYZ&EFSTW2j1o%Pbuo;@ny4kvLK4mso z{GT}pLY2f;TK)I32^qQgS^Mq3=|;Z=U@8rC+2lN5E&Q*&-D()>A2FF_yJ+BAmO9`oalJ?o`m>@ zrpWiYQ>6>`SqYBB%tmiB4V0jWX6cW*`RjumXlie`P!Z{!(t0HMx@vYl-q44%c;Amx z;{GB?;@}voaM>KVG^^fz97&2E%M<|_8t2lUYCR`!-%KkwM)}&nhktN7%e|+<^@zA! zh(bM=oBinYZeNgVElHm5XOpGBM)~LsGg?SGb>#VhYIDCN%mJk2JYJlW+O?Tfg zRcX2tPt1ue;&Vjr9OYNMImX$IyO@|G0GR5&$lbJGs+8yYJiuJ+SOy9$x76wmet|2$ zm;RVQS-g9p77kaEgMb^NRs9)mW%?h5MUk@Gufz*}yatCQl_7+tMEGv{(068VOzHnH zg@`SAnBjP3jz#^BZQy3nrRT>g3KPG8%S$n^dG?m@S;p0nZjS#D1adv$O08)eZ@*Dd z2jjjSDNuYX04afJQiipDX0Q(kwB9PxQJklY2S5@O_){mq1q1$S@@*d#-*E+b$IX5Y zgg~@dDCR=?ZKBYw@)1O<@#6_ zC3J8EXp3wTi$~a0!Mmw&`y^aZ@4>yYQAi*Pq&NH(roY5x%O)ZLGs)AXVmOXBA^4VZ zd9>xT*?1nW`EjP&<}Srr!o#p1Dnd#}vQ|1)lhZmdw1-Hd8+3nlpWoZ&MIP?65hNJO z?ppTKV9E?$O~6p^A31gllBKBw#?IE$-nw7S?t|P->pytKMqF86rm*~-p8;+My7m!k ze`GT5o;?}m{Hi$-h|?KZ+)CbgRCwk5>3!Sc3=>}z1_oa!l&?MdS}>l) zHe)&o)jmDBZ^pE=0HG-3_V!ICI|6;8>Yl>dCZ=ie^mFbU)>|rc z+uPwR9qBz&m$PVx2l76~%~93vb(`Au#lyCdKKK`k9xvAej~~PjNVF^E&bM`P43bZf zW;;B~`Z>LlzPuxeYnzE_uv_8@WnbESLtUX;%r5Msg@R?2L(K3~^MA&C*Yaq@AbS_l zj#ALK+;O21&I_OWip+Tvq};Vvn@1i>%5I@pOC0)@>Q?4``0;!N%4&O_P_XH=s_#_i z-ALQi4eV_*4^SP`ca54L+DVQycj#9cOLu2JB6FeGj9I$Y0v)+fXMVlP7)!!^rwwIa z+gu>*{{3i<<;bycdULX1Sz|pM@g!`QKk_)$ISVb6t>dhBJpYs*Le+!!EAS-1Lh|}4 zDNl#E9R(11deFisD7OJJ4VznhC0H!EKT;E?tTbsCIGp|I(-Yj_ydvO>;7EKYN|8L7 z*N!A<**!7?EyZM1|K_w}UZnsnw2s~sx{d8tIC+L3%ggRn^aK4-NT_*1kjIpnICpaGL7$lalX?f;qDd4Syc#ERjTLo&CpHOMFNWR58z8yQ7$4ED z18HX{h&F)qZ^(J)li8_bvSAPJCj=pVEHeH+D%fYyV5na2!(qdSRzd=qN z^6i1)*;8O)s(vClEp0avL?(nPXKKpRFN*d|A(F#DKk6Vp@C+ zLxA_fmbqbaFJjRG@5+_2$K|aoQ*zmcB>Oh*EL zVG2DotLA!n6u)m2-X%w(ozq`+>22zIHo$;6iMPi5@fV}(pzByyH{*ivJUM`{@7 z{7+&sUp;?Z8n?G>qW;ey2#CB^1pQxCCfRFk`%d3oUhIw%7en3b zlYRKP=?XzvGK8%;cE2H*;D$@IUy^YS5UqE>LIsxty2>zAPhS@o%glc303=N}@mK za{Wy+mD6U7K*yuT*P119ckjjhZ*BKS+$6r97ZMDfSW zl!x}NEMCyXCckpB(}9?aZd7kr<~Sinki_qY!i&gnqlBYEz5gVVHWpsd){xCd11 zUb?tfwAaslj7Z(G<}#zSC>ge^TThAITHniI75{Z&a0AZyggrE9emLJ3AsAJse^=X2 zs5VgBi9?6h{WybVpM8;K9p%g8@04HtVdUi}AA2^%5D(^62T)Mn&$(MfIIovOT^2D% zJ4jQGH|$k1*glfUg6TYL7u}-WH!0Lnl_6mL=t|0z%Tf8}p zWqZ9;bT3gO&}>)=IH3HXuB+rUb2N2`qmUCzz!)bm(&%=-_CuG6RTv2~07SR}u!UPZ zpX2%`YCXgTAs<1&iX(*nKC*9eUE{c_!3vaVu%9(PMWzm)+bzXW$g<~Bk2F|o=<%It zKsYlFc{=mFdaj?6HvU`8qE(7J&hJH>1I@U8LY*nD-=84N$3E?Y()166$crrUYzyh+ z>|JcpZI2`*@f@8#AV4}v?nZ}g;rN*Ch+;KLUBM9_&FtncKBfsrwT_OS?XyMw@Eo17 z0;hP3m~DRx&9}UjiUh!AKV?zwdKqt{i!Pz-yZs*i5n9oP-A>N_P(}P?krEYnuPsC{;S_~Q`p&l04&zc}k^c-@h}iHRU1nAcMCNKQ{_`3gJ6ctmeXnnl?21DQBx%C9;e{|Zd@<*CjjF&E*H<&AjDH? zT^aXr2u_$s_~N}&_$RL?=VqCDru2lH>!aC$TkC2uKg;SfvPJ(cs9G5K*GK5UR&$Xy zSm9riZ72W*mBDs>wY5i3;q;Z?eig~Kx-FK{eo~=zRW@ZRQBXEDKKje)Q~&3G6#eG$ zDBZd1X?RaBjkR5Da)A#>PT~JLj#2#Uv@(i=Y3tdo*!)qPtJLZZo?pxtsBe^`-Jw0w zZfW9xa~tpb_Cg%=`2^LT>iIUP0A=?&?cKu8)b?R|QL~a8xk}7{Ri-FLIWmQmkitNu zSQC8jn}npF22s?kDyVfvmK^V5tEScmOXRpQvMe+|Iq2Y4x zg^}H@f0y<$LrX-*%}M=tKdg7W#*EIWOwm%RUV4*j^1_^}_ikkP@B3qU>kZ?T7>|(G z%7w47$H~XQRVhY|UzU$C`&Xs%cv-&aPp6I1;ESnaCLU*SR4{Vg$7(>e^=ep~9uX^U{n~OL!0UUVg=h(|Yxu*xX33Q6xH@jcYw>QMMF>+fvQwQKB6EsjGEN+G zRQ86?F$rYW7Re{8_|Wb>NVSD;M=&+`YA;k=p+9O?LBRLopjW|h_VgNHr)AMy=XHX8 z{gaI%z8fJQ6`6ZNBTJpU+3=vu7yIKDxmAzoOQCcsr86Xv|3Tdj-z8PtFvs!#0G~i$ zzgn9X&0C6DP--90M;nOiGB)(R)$N}9FgE+4`3oNuFm*&Hr*xA2c(0zKNiYHXu{WWX zPEE}#(AO@|@F-9qFPv7N*iA6<|{uW*K9qmz{q!7X_gi)9>Ye z(7;J=E{9X<$KdCLrd0DlgX0^l?|u9B6%B=w#Y&j_Fs7wTl@wfS*KWYxWf1l@%dvOM zY3{n~4rsl$fyt!|anszs$tR(0vuM#G^XQ|G;u#$`*|TIfkDxA|HU7ZfXE-!sX$qEp z#Q1G_8=5KAp|SG|_N3oJ8+r4F4W>rz+NNIJI`9#e4ZTS%j^Akl4NN%kEk?7M^jpC> z`P>W78=3_zRk|de|I_fHmJ`k^Pm8nd9~S)}dqZkaRjhOg_Q+^++?DW#A|aZeh~r-8 zr(G+&6+>YKNMZdakbpg|b`D|m)fQ-=KZEy{yH#F({T7>tQ==04m~2^M*KdWU#D^Bb zgF`f>{wIT#Ng^3)6(GbFj!8=O(+9=7Z@*(!uU%uFc0MT3>+`7JjZIiTN0OJWU6D>m ze6OSXu(e(S#!=QkJ9^mG%fldM3+t6YRK`l5KWg@$zLtoPC&oN%HdAqipyAP8>S0pU zH{57p2@{{cO|hjMEN{I_QMjbI`rQ}6yeVM`v72;g#w~xZ> zrxupb?`=$mL!B(5keas{)&y?C*@tWH0}nhXXEklIg~cx(x_#~_x_v)ZABE?_C)vR z)_=ld;I*x2QV@CsKL7VG{QkPm02h!b9!+MAwyj=^H7eoR`p?oy$|FtZwCzt=!{4l5 z9V18W*9J=U@)Op8xb0yA1QMe1h~qwC{_E&yVF6AfB>%lHRlhu z1-`A0pH6bz<6p7H&UdH&2qaUcHN#^0|$L?X{3Y)c$YLg*E>HcW|wb@j(R z7lGEo5^Dc_%#7=sr0KDtqF|~;13VaTEhXz;cw^h*mC&dyXrEOKdS>uSC1zaTc>Q&; z_Hk9?t4(jv!g(nQxd{{g`S>d~#jc^IN1^6821WcZ#=L=1ImB$y1o$K{5p4K4v{nth zVX*j!BxclOu=+R2>sQ1{+Q=kaGRST%v2bKi%$iLTPglTUH?h5d zkfw(f$xU1$F3vf~FZ2cT_RwwgLuf=TIx!$T)Gl4N6y|JRz#($0 zn9XScqH{?&;@i4oJ5=z$Fvn3R?d4F0=8?X-!{*IYTF1b{EW)KEaTc!t0?i~H&h&XkmFNwT)^Me_3yE*#j5kscOVjSY= z)TdUBn&NPf=7f0Qq8X>6MT&rc-O)6`VHH{>FbgnDm>0CUT)%#OQ@S*KXW)PcNj@_k z%$g&x&3sYWvZjCk0dn}|pvm(OhusnZQklgt@igGQ{$?pnJi$IS{T4KD3I+uT)T2Ly zi6;=KX`Z1i)LQStAzq&s?OL~h*(JSbubMs*cxdg_^GP^Z+$uJv>0IkFj05dIuiLc2 zEdF6J{AKJjMT!&_VmPEA<$2P?)N3!lA`WZP;3$?KcAPK4xYC~oBs0aa()tY>F*VP? zq32+jhpGhgN@)!wH^rGK8BKJWSOd(dD{9p?6)&l1=z#J)`k;VmC^~b>mJ<%GQh`y! zlO86be)6b@C~?fW*I#=TCW{IRb6^ylyVEgcQ`i8%zzU%;Jfu6Ogv484+`rf8z!D^ zxJewOL7Pa-Q*bV1%a#qM%&x*bEM~Ug*?uwUbG$pJP4ybp%`;CuEe^Km7m1ZGQ)(_jbi(qw$pj>UF0MPzkcGtDiw%Ik{W#` z1XkfKbdt;v7qQC3(o*{2VKOi7nB-zT_W8RKgk@{+-=&4n3>4l=@}gQo!!S#4U5JS6 za^ic>3N+6{>qV2-?+^nr6-=xQGGjjeNC-M)P?Rluk@P3(kisYFPkj9$s08?Ns@qp28BVgnoSc|;b#b1rNY>zkiz`$7Ghl55MfOYOFvj9%2 z>6m%l`n6c+YhZ8HK^#ldm&C}CBh54jljOu2fBkhg!6fAk*b8kGzf_0de}eU1*|5IJ zl_Qtz_h`m*_3G6SfY~Ren63sRhs>Du*guUPIZ7B5msP%8Ocz}N(@VS-E?fW;PkqH) zAsG~0g9~9F3JG*EJw!hW{F;<6S0298-o|+5G_%N{c;gKzlQT~)5XUcoP)P|9L|DIm zgITe11=iXVV2)fSW+Q8Z5KsSJpTo2%okdgNfqTB5Fz3X*&t5q5eC+W@&2KPGN@v#4 z9KpA}ES#DiFbmMW!+#tyRV!CBcVI7H2?Tg|>2wT3Xx^B4Foi@%=M<30oGFv3h51yp zcu~{s?t5i#+xw~BvR|Xa$}$knxQr%Q@oy343k4Xs-)z{hp_rXw8|)QV5fhQbea63@ zeafs@x}5wi^eeOMk7@dmj<0v_+y!BYt(bGC;M?PEdA>g{`NfRFv*3Q0CadU#yEM#j zrNP`^yXH4A92SbHu{zk#bca9-{deuezVtPit@`f!`PhS>gx|IbrV5_pABT^cnR92E zLkA9t&y&aSK9$3BqJz`;_t`BrVUOa22wwYY`zUi)sn_hZ%mzE9!`tXP;1ZY=_dSzu z2y0FL4T^>L+Q1?XZ(O?GGUTPi6Wk}P=k6P3LF0d8>rc#$4jRl0`ylm5wUVT3zBC8* z@f#pcvhm)m{S6F?r=K?TRYp@f@mI?AyUw9gh)Y?f0fz>ftO>~F{y#>H&Xx6R4iO+J z<@yP)>ptM!oPP?m^9QuJ zMEH&0iU-M{xB)&Wq!WtNZ-Az080P7%hEhNC&f)yw%2-_IKK|j@vzX#=AtK@XVUsMZ z#KZZgptjcNH+lUE9Uq3}5|WV+z8?wrQkegOLW;#cFTZ6r?mc(SOjf_0h!`5h7Xl^e zzcz^w{I>kJ&L7(Buv&R!3EwO2ql8*QU$`(+lQr}vgiZ423d&+{meLhdzr&}{n!u1( zh}Tb+Bu(Jd)bY0^qHlM2ugM`cA6~FTr~sii8YUDDn-t|o_`)bDoy5c({>uBW z;7AF3DS7rT%p^E3e*(qfjp`S`unQ5!FhS5*h}iMcnk3YE+&F~AVt~SW!dA!Dk35GX zEE^Xjz-Ff6RM8hY(Zz?ZCkubhqA@o~wn4Vt21%JtLF+?&{&uY@oX_y5YE zi0!|JCyXlFJ;WRrD7*uXoer8E&Ln20)c;gesqki&ROHIz&R;0~bZwEQX{hPZ*xQ`A zPeoKuTtVJ)GTOxtTOBk{HcogJai9XJKM8qp2^O#a5{3AX@g(eS&6z#N)TmhlL|AB0 zTzs(`l7HTR+d_Q+ezPR*Cxo5RB!*;Eq{4?1>fZ3Hu-Ob5I8f|{9RZP+Qdy@?G2e*O z!_1J>E>^s_xd~2)VZsK|Wq%m-bO|5GIQtg`OD)uXhY3xQ-n=E|Zc{{xCr$41`rHSBX^GDs0YaSNGKE z6Y`KQfDPdb^XG+A%WFX>Z!Cmj5`1Y7`Qx!;#SSlhAe=sVN;cZ*(x){U;XJuu;ezHK zXeW?J%m<49F_#@*0|NRIXqb#J%a$(_=Zusly&A*-+DsldcAS|43ALQLa+_u?T9}UT zg`k*ViU4+sZ^s_PKHbUiRY7~n%W>fD`S=rN2ben-;{eEm>zXxdaiHBKhtITW)0!OF zb3l`zskyH0wIGyOp9}lpzvJs^Q{ngGBaxImb>* z^9?uwhgJykS3*N!(2&7kjQolN=?OW|z$6$PG2Uu^fwRQnaCk|gzU@B;LV7M^tIwa4 z$j-aKyxV_(y!X#P_naJdDNRVtmkJdt!G}Z|VFU~VbBJ0S^xx2=Su@$Z^1x0a37x?o zKYAR}lE!3$#OS49&d}-Td+!YpErwGd>eJa8wOnSyX9}4LWP&sSVF;$i#JM)xI%3pF z^EvEL{{czIHf^r~5v@JOEgP6+$IREzcA#IDOdu-2yp?$f_MB;-e8Q)Yu6*ZB^feV2 z9<4C1|1?XME;EPzIEZcsLQo(RhyE<8R;wzKgj+!r7=^aXoHa{)uUrTJ5G2&_qV@*A z4b*(t1C5ds$N!Y^Pn|ZkFh`0&D)li)Me=>3eb#*-_P|t^fk`e%f$kF`$7#HyWC&5p zm(JhoU0Dw{T(1?0RyY1qoBi>>6lI2b&e1G{`(&=8Q{BM64qV%PpSao_N_PF0buhNmKROv-Q6at7KA^{Q-NJ32@q(A~8^}DWf=A1dZ_ul`6 z^1Z)zlK+&`=gjQhyE8j87s_hGui+!0WtLq=e9H3^W@9j(ZMXDN%b+ehQq} z@F!Oz-f-RZa_IP$TW=+2d|00xfiKK2`|WRK0*23iGiS|~RU(`~;sE~s`|O|ATYnv_ z*6k|o*wD?SK5vbA6aKjtKK-X07ET}V5b>LDx>@+%K|j14tCxpjMGg<W?lj-hz?n%Q(jKFi;RC&&2zv%)SxC)gM5M1%ekAxWbuE2Wz{_@|; zL>-@jX5d+x_VGLpo+Eb{KK58=oprIY6z6l$sUbwu*>Cy&!65ggh|D|$F-Y6wRGb^+ zDqEFVN>z}^i6FiIsdU067YQ;f-ZKSMqp23P)KPuR^z!@DaF5HnnN;Ps0$T8ygpA=a z0TUEMu#)`Jgl*h#e$E+K1+;@qAQc@oNx2xIlCQxdp0XG!_ZkIdch4{evGs?!EWE_$UB} zWMhKjyL*1u$VDEd+Vho&tX+@|YC621eDU`WuSCV+QNQJx()f7&Wl(N^ctY^n8;mb& zs3eBd6mh;Fq+E*5LpTGU$6Kx)!UYSQ_+Kc$@cW18LeyAKBC|5fT1u^@|7ovKOBh!} zpesbRjJ{V{3r~0JG$-Tps{R zy#rm7tm6NuU**wD#%`&}a6|^Q-~ks{10r6!9lw7lGh+s8Gbz;MTCs4b?KDF80yAkC z2x?z{A<2>5u>#B#Iz_S1P?935w}auyI&b>vOKo50^^6)&D+*9x4TuP;>nCGyl?q3! zJRy!k9Pi|kAqln)#d*RP$%jhYZih1{@Gd27l9iZ#(lRvZCcLJu1<-;wo(4n&Va$%V zU<(mNSS(u;7;QvHobq^FZ+#I9tlTZ5d_f>G7Z7KGusUr!Q0yq+nGIffz7G8QtCNM# zNk9JSPZPgx-UMGl;|vE3!ea%6guqkNFPMnV9K5~pm6G;KP2%0xXe#;*$h0od%1kxj z-2(%z|4H1A=}WUkbhDPM7M#R6Xdc`$Q#EJa^!^;HwjzQC7xi(=4OUsl*{xZ>bx zOcHQ_moGXz)Ng~0H^f9lPg(KsEWSRUH1Q*>=vXFgwdL0GQa=e_zdekv{9YR|Tof;l z7ukI@&%o>6JI!Qz3q0~#JKs?d>CBcPX?i1d1Y9_zR)-*`5oB25(ZFC z!^`gsynH;31GV2zy|LV$U;S~*B@Q%{Fh^EOyok2?Cwwq*;&{BgE+LaXt735VF%0Iv z`_8+Vvznd0xx+VcnDi3x4|^Hf7%wzW4|zJh@#ZKQbmyeZ1{2)g5|K% z2Ycwr0Kgr0-Q_DNcG+bo90-3r?0HQ3Dz`%Gzx;AIwDjv~S@6ggu<*cSeErJy9|fDK zdksv)aPr66c_w_ol|>wg8ui9#S*g?=TS%~O+hW#vVN3+-!ymml8$O#4hm#J4{m-RQ zuY(>ddAfG#CM%vc#H0uZeHPAjWIanj1Ni+y8UrZGWMJ9y0%!yoP^pW&3U}rHWA|=S zFhGYB$43f9lRgu~uAVbTcL>diCy! z2?}iSv~r%HXwYv~l*h=d-`k^Z8}MRM-7YPA?EI=hV(5>{H4BDZ3~c9^C#+u1pjwy6 zMyY9xML&51VXl7-S`nzYSm##itOR^9D4oY%f8$3g*e8;t?1%#;B<2J*Toro?Kq zk|B2eg#tJ1mVV>((NCg`D*0*=e4)fZw8}99U0$Pp$dW-n6Qs7;l~^n_JTcaS!EXgv zSTd!epeI~^P9%0GOyKU4#^Sec|G@`g6~%sXA8+aZ?vDm#1Tlz7hS$_%$po88?|w5L z0vSpf`}5;o`#OGu#^9ch0c=bv^S;c3pWnikd@tb{Xg<`9U!G&Xef$pUa|vY(!SBKZ zOeQBB3cMax_$`eO72d@_^JSO)4wL*3VKro5Igpqu5}tqVxik`bxXle$TW+wyMl$KG z))a4Wdmz5IaXFmT*eYQpEImtT5CKGfh> z@%{0vv(ZKy$VUfHRlNLfmQ$L=HLG0ILqM&n^&e)=AskG+mYvZ?ndU^^I=0jh48Cyh7#RS1}X&bC6 z;g()AfqVYnFQ6W8%HR{98~D38602P&Po4<*{nAQz2d@k5%an26z)QtWd*Fsbyl%{ES5=U)I5ApgKtP?(^2>m6C$c_dbvbiy+$ zSFTRRL_}XayK;-El~!0mp7&TEobe!oV#9}zz;8V2!uH2ANbE`6$`t2^;rVPho|Uj& z)`Hz+W$^mzZ-@^{`d~mFf*P+sc-h`RfS-UBzt2AVjQsw0?%YM%b*rtn1hWRdq`&x| z|HxoJ&bSb{i(*CXnyasgk8xJ?{-V^{f*#l2KloA62(;02@Hu_i7b{A(+GZPBvC6*4 zi7lLiAgeO__3HyOvD5;+fgeinxpEj*p}vomvYa^Ri?-ovo;BB6GdxF@_YXc({pa5= zg70Nq4-;h~Xa%fX*>Iyx(xTAYDNj7)YKoqGF=vq0g8EA+E#Q}NK_aI-?7f_FYk&VK zatgb0m9pt4q>cj!=+@>{9*^q;M37O`*KH6a@CH{YUtUg}@_WvMirT59@B-3f_4k95Qp z*^$RIDYem%K7^>>#WKTSzGptdoRA0&E+^tN?{U5TA=B9^x+pT%mt~X7lp)%NqM-$6 zk!uzLu3D?J@m{IhKGy}*^+bHkXI&ML6|@doVqNj-SY1D7LOs&3_ zuj5!oaIv0oph6Hg_J6?9DI=H23lPPHIyc$eDxd@XMvPRl(a)+qR7m6~htU~-plzyK z?z2jbxQPbWzdrw)EK3`7h$Ub3`v>&2+!R>*^1Q1SnLWg4udrAEB#K|W36oNK+*p5L z#jV!QijS)n6s^2B=TFr|OHftH0a)|{Sg))1r6`}w9hDn!R<{aD3mywig#c<$uG^`Y=g>-h)+nngxIf24?CX=UOfGxrDTpefW;)fVdvseVBQ z4b0Z`XNbsB{}V#VLY(utm@h(_V#kqi|&WAzuVd<1pPQeO_x6^-D6d;e2n#MG1*d`q3R;*5zi z+tl-l6BO-}5b9|lwqDB%apr=3y#7^^GpHs3Rxv7_HqsL9o2=R=Q>Uc6?!G%+gs(N{ zgZ(@1utPfaw9{q6Mt(?zG1!)^ZLL2MnGUvTms0y^q`6Tn5YvDYxZ0U(6=V<+7?r@| zCjIVZ5ov=C%3?K8ZSboRt5LbfWs83W@YpF_F^%q+-r`Ld~nV`T~QTy+=zih3eD=1WkYjJ<*{eZ&Eg{rwVrml(&+5g)AKyUpLS$4^a z|HX$v3wiTC%GAWrq+c;=OG_SLo-@SN1rKA@Uq5`nu>S#r6ApqFVK_K14+4^tGWeG# zJVc~KfNDB#`YB6;x)8j6{W%61_!R`wVj`G2EYLZ~v;E;Sx{Uy9T|s4=ltUp(B~{Dd zY`N(#WFk-BmuUx0B$6T5QfQSyFo;x)wXx!qC*~Exm}gpCc;T;QVhIOf13g!cNy0@U9;6{(y1U#66Lsy8T| z0hU$cd_A`g@Cvz_MIPu^&I~tnZ2#hZ#4&j-9^W~3_ zL`)Vy85II7#A}g~bD%9>_Wc9(X45j{L@aHtf2=$hs}@f<{y3a9v}dkADrfH> zYJ{lNDBHj!8XhPF)6)KgX3vTUl_PEx3?hK^QWN1Bs&pt+1MOn4{^w+xX{vu)|5J11Dh9LrEI! z{-XprQG8i&G$Yqv)>)QU;R+towrS zTxO1a%-?Z{D?xCSAz~`MOcgf#{?ZNx?WH8+^+KrGB4~|7q_&y6Krl+*-2X@bSwfQO zRGMp`*tYK<(G+$tmfP09tddkhD$4Z`IVy}XgVnG=4Wbq)RfvTVk&r2Eylgt|e}zxP zw9!vo@qhNUnUMXj{STD6kC+Y3{?Bz1!9_YnT5Suo3Q@I+l{~Xl=Ke?CY#;$}@%a}3 z0Kba&kL(AEqs(gO6WFlfui@Do6BL-J*lv4HP@GQU98fRveu4K7lOQSTCt^&$6pR8m zF)jL#I4$c63Nyl*m}Lgz(Guc{WR&!>BwG3aQekP!PfC@-)Wv80lqLaX$sww0B(@b| zP{Y;Vu}o$VFCglmw4a=gL$a|q>!urjEggat6szJ(ZYR*(Tw@N?@D|03#J^O})p6!* zIuiy*3tW+wmDKC6IOU3QfiuVUkjFX{1mg-K4{({y(NfmdB&GS!?)8~mgv5N5N;m^$ zN{5p|>>&hb5L!gmU)5NZEVsBIOrgeE(mI@)s9oxWtY{Vmtu~O7A^+}VXDPqLPM~x# z4pdpAVwFYvz%cBvE%wU*06+jqL_t)Ie99>&r?=jFE3MEU6BIw=1V#D!uSL99I^SY5 zf%!k`ujkbstpaf(fs9(v)MNF973=MvxxM}AOp|XsR<<<4nK%Cnr#CQ5cCXL}8`8Dt zhuu`!T-o#5M*9W&=}NV-GqgK$l9A zxN|+u!RMZwu z=4fwz1o0I^*a|%h zN^94H0_I>4jy+Ovh{QHGFpChOIbTauX68ddNWrfxF*g0ARk$YFg9+tJmsnTVU(*?j zl*B*jvdkb@`>U1`D>mCIgov`QgevpX3Cd+0A*~^dg%sBRBEOQCSm;cgfaNA^uFzEK zpM7DpZBo-umh$^oh*h)J$7<#76{JcKzh`0gVS>>YX|1`;!YZ2*_rFn#8Wikau`9MC zdKatO24nlZ0}k9@S6%{N^fAr-z_nZ>61 z{{!0^U3kF-vI=oMtads2*kjY0YphW`YeoQ~^;hY3|0rszTB9$tPdDhVvWO^x7`%Ey zqtXO{vemzpy$Z1sQ--Gc3&IFdh_nJOQLcaPEAg5TA?>3t^ZwD%^`~ZQ+^W66nWG2i z`u+hzwktzB|J(Iv?O(KHzbJbpCXEJM#i9V#;AY@D42@K15&7e(D3HkgFXUiWs4-Vd z24zYNgXlv+dQ-j`RqvpI*2GDwg&|mFeJQrDI56WZ^mAN2oso)$Fp-;U8$om%zpN=yxImgl&tJOMVyF{-xg((ZkD}h^U z&ze00`1xdOU#{Yxx5Km`Hle_j!M!60wTjRV-#<)o;igQp_DdpIHMpm@9erOwC7=#Xyq++1<7q`_lRx_k$ zAYJ|X6Ip~C9w~#%6a@5uaVC9$)xO-;vD5tQ+c;;-YkO zcU+!d!@tVCf;?G2xjmbO&0FoY&Qu=tvyjabdl;FWOxwSIWG)frtT|Jcs9=$cDF5Uu zF!R?rwEo#vQVM}md6w{e{W7b;B(-{l7zXcqQV>u} z{$V;*Ow=4PQKr;~0KbtI6O4`!x<_P`sFX9oeE52s*I@`koy#T;9S2_J1TdZJdV$u9 zIGZh3Mpc>!5;}BJA$GHXvg+1#nv9~KmP-jJzp;q~HVQE88Ken#(p!Vs3z!HI@r+bI z4+i=BKe)vcR#3qE2< zi%O*fXRC4#{#WfBES8;GJ}dEBzH zrNBs5>Pb?QV+61sddln}Wf=~sWt}g8NQyubIzzs|&nZELSM%`)xbz>kKkSmIZkq`K zl20!TMxrH&g%wh4Qi^`&vZ+(Cf`SJruP|U>`WX({Ty5<&^L0b*r7u`cg+UtqS#KvI z^Hu?>PpBm_{baOjw8+nNq5mnpUB%gM>Xbk%|JXN?*u^6iS57xg=rfrTTfzIL+X2?{ zvP3f0T7&c)73~nMKb;MP5Hb(6C4pYE8h;YdY4m5LEXKaI(l;Adx8M8EV|hi#6geZq?lDo1r=ZwLFtMsufPH9*y5>2&vf3e&rhqYgsuC; z%zVGf)&!P{eXVs<>xEwIvnppheLD{|#bcmb`#+^<|BL!5QRgRE=(sR0ejC zFI!W=Wk#@)l8X3WLQV+uOSBc~oFoIfK|lG$4l`d$kP8jM&WPDkM{z?%G_X?ta*Ya8 zjKEZW|5c*a%Gx2T6J6FfS%4y`Eb7iI>L9iR5o!u!1ToHRSCGVD)=Szvr(8SlyiCxfVrEab}p<#)%vn|_B0iU)BXS6^(;v;W}1X@7hX+N7WQioZ#b ze-9x}{b<^?Jy_B~Q4qLV8o{8pSKB{V27K-GlheK|lj>x}k|#0;m6TfrNKqec5Hon~ zL`?#eO4$9^dMRYXZmTKm01Vy-Hu4sM1-sXB+bzDb*&yiUoEb64SDMO2__B8_|vmKuo3P{|a%& zk+A3&T?By*)@X(l$d>+ZV8u(!2toBTgFmTn)K3ZoXApu6AOhvg=j?*jlGwxk!QiBd zlNe4b{f`j-u}{-4xjo4%aHezDZ)ZdLaIhCs?izFjL#U|5lU?8(BCCkJW z9;2l!=z|F9Awpw>v?Ie^rQrY&Uyz_v8Z!!qWbd~R1}(d!A0BrMCMdYY6YE9(%oAD3 zj$kfq+y3g{9MfiaIM3;e~tXt%r;jGeH-?R@|F>ZPTF|%a@4uhRG0xoUB&rx(x3l)cX}UNe9y)n>f3Mk&D4Lu0GW)_ z7PtFRRx6%S@2Gw{s`yCJS%;D}>qjMKgO4B97izN&jJf}b?76h zilx|Cf8~Z>R6oxl`{%!&Nh4o-JuQfH$Tr3{OcV6E=%py%Q zKW(Y>Kf+a~wg>XAlgfaWewFzNlKw|F$dY!|`o&(U)WRHmL5ke}jGZa|*f)t~srvpw zg(LuuDMGdVt)tA52?`#PJ?ypD=z-IGb_2aNtgvQ&l4thD zM9Bgw0`kv+ftWIgK-!RS(BDt6ckbVD&cP-)JXa6N7MdIxYlOArTZjrkwVio=3;{|=BV%kKt6`7pLbpk9TP4fhm%O# zO2}QcXH-^&o|LUYIE;-cL%2z^C@%_j-y|ReV-=7WJd^F1lRm*g?{{Mb1t%!@(zye+ zcsd=2k#@xj3Nh1kQ&0t<{oiiF;-gS6)l}C1*S}Ip z+zbv_V7QNO{19Dw}Z>^u~}5Pab*o>5)?JEt6N z^@B~sx9G@XGJ9(hY#`Frd&VDjL<7MLu1D!4Zn^#ft_W8CmN?j0oC8mt2GvhL|h$F4qtE&Ho_XWmim4;E?PCa7Z?{JnGXMhh)27ima$rp%Kz? zdw9)_g~m{c>(NdpP@^fTCD)9L#5o^9E<>OHJ{UJHU3tZo>9M~)n)+aZVz0fwm-gKA zd!Pc7{QjcVEQ~j+l41=OtZ)o77Nq_L;GG|N_J88tH%hGh8Vkg; zo>9NH=@WrUA7&}vb!hE`CP&&Y3Jt+;;tY!Ya8@K{D3Yw8;29JmBSLzrgT_UKoxe&p}tNiu&ulI=$=kJ`Q$VT z_2QXGpUs$oGlf=9=bwLmT72=v$?T!3A7?#bE8!b%xFKDA^))zuXh9rgzhgQDXHMou zP(EQ&&I&&Z3{rQbE!SU#T1cgZTES$-zp5{qY;YP%tLqnlJ%0dic9AxRM%9K4HcZAj zP-mWXW_t3;C*@3}W3f_lx7~I#4d8(AcKV6cuIQ4jE~-CU(Sb3Js%4~Tk>CGJjS?Sr z_~AG&>}8yBHY1&V?m0N8ax0vD#?=F6MWL@>ABG{N`#RWAfn{$LKP#b`=WypY{Q+Ql zBFK$swIM*t#o+e}6~g%KH{VKpXMXUZhj2#dBI)R(j!fV2_M?KRFy>u(&M5AcS6)ef zxc0jA=;MD&1N!$*gYi+&&O7Z~h{XmL<;l10Pg+$|wM1@#w1N#&#q)Zjlks1JcDnX5 z+R2X!b3TGhJhaPi3(Le?F#RppzY&zQQLLaNCbx0~Wf7J3C!T-#{bShhVb~J=Vw`O@ z3=?zM;tAW)*F!~Qr9jFSfI&iW2 zOiY7ogqM8U8Hu$v66N{F$;4PnI=hLp=GB=P!72%2kyl}i7MLwcz?gwNvV!9A$J54} zZi+J~bb_MT1_G}?^+&rYzm#JhU&m!&Nz zFH;I4V3DA6YsQJiQ zgt8tfwmfsxZq!~m@`2Vmmar+W#zZ5>&WR*m|AsH=967A?<0(@y{x?SsK3%9gzJ`$( zJZ*n!(>hs(5CqeqCDCQSm~xAx4AD=1)0oM~j=xHkvK;GmOi5uhPH!1Yqa=YL;{RNK z3NRU2g>$$FLm(}%)~~f3#|(oDc8m((BR;;OqKnu}KDVh{9vhdXM3j8-K!F8(=_pxV zprT?i4l?M697@jcMIIqnUU5ab850z}diBBN=Pp-XoHRR zB`QG-#Re@iHW5`C>j_4uR7NXmAWY;rFw=(XUr_lXW}czR2Eq-mGf@dg*p#aFruM%g>_$+bf2X39v6~5PPYYJV|AD~$rAmV@2fhmI3 z-)TLgEM|j%b7TVrEHapkSa}m;sSsM@0BHB0h>v-p50QGEnk=W(j2YYp(*c;tv$S&k zfx&qKEoC`93po^6)-M2sGG-8Y0j|&zw}1XFa?B=$fH!Bnj9&uZ)TIDjNvM8dHY|{_ z|C=sFtFS1tIbZd^@Csq(&rC&jkz)0yjdWI_F#}6lxYPy2=>5v{6B4K-izxrLJ~ia2 zbjhWcq}RE{6AsDVdB>d~fJ*VFWE9ZG29>L^kooT~`;hoh!H5GDvn=OpONxMaJ;U>d zUH^fb%poYo@I_Xve;_wIj3Ji`A84g>FvahDHj0U}L~*seJ%5nefB#z;0ia+Pssv6Z z`hjfmp(=B_nr`akJ%@o$C?yX-oyl~NSa5~ltA77*$(}KtuAG@-2k1t>`DWT@?|tNb z_5^J4^j)r?$ic)|YH+3n*4qA#V}?Opg^Xh~%U<00{-JaaJ@{a{{)QXV$k#`T7&fJR z|3OK}&&QRUb7tj@5C8yMpWeMO$#A2rtUT)bn4tLVv$W>gYo+tfJufZ31Scqh4f6b_ z!YFUrv}x&4eCY7dLl35IUAm@?HrY7swbx#%8!99d-%0@n%H0{?!!aXUxF*GJNvMlyv>|H>8(eemO0GFYtHWb=S1nW}9cc)n92f z4tC_MLp%e@+Vx+5{)?n_xpw_iJ1sR^m5#B(4?XlS*gZ7O#P&w#V1i=HEw{v0PhE3k zg2wN^_7q{P%Fmdg$gji0{YObd3|e=70rR^5=hIK8U;XM=X#hTS{rS(&N((K#5Ok<) zp_FW0m&qtTZ(sjR3lSfTe?Q%K{{!i{=bpouRrjLcl;7hg<^Ew)(N_FLPhO*Yy@aG?gmSE^L%V!pKBU+w?V$I4ob3%|itPI>+? z-IfYn`-0XV?ayvV-ECgGiUD(B2=X) zZ;LDiH*E1_y1*e;WQ#J`Y;bw;hc(hzzQ(TnnH!1ZB8>nH!2*9=kbov9B01(20Ej(c zF-stcibsS@qs)zj4*v;OQ2hPx*y3r^jnm<{Q4PXCmN1JDi7IR?h=eRgkpu!@H!>8R z(=$ey6_W+H7BtD^YCtMTMp;=-?8a+gvPiJ2$jZ&afdztQFc)dE{P?3#uE>leU*?Id zrc#8OFI0_I!(^M$Sx(CZPdd_w@F>7FsDvypC%H1pDwIUj+h2$aqx;{5WF2BZ^Pz@; zMlZKwo1>UPkedp5Svl*MfUC4jjl}f}7a22=>8}$A-n17v6l|=#k-OQdkOl=i@{cU& zeAf-r&y<1~32zikI08`iqOO1(laJ)a%5wC-+mP+29I}{=S&St1Q0Ntj$jm3q5&@Z0 zB=kK?vd1Z9NBM4C_`#`)){@aR`roxhL%^49sQW*Ii<%5TLh%gIL;;w7acGSo^Er=* z%QDH#tOXD`08~M%zNVgmkLO^0Ddp5rr4h3hCs4t)VgLdol?oJYs42=glH#8X{uzpr zh-M^wkvZc4$`V3`JP5`M<}0tf5)%|R&o3O2wFuIO650U)O)@~*c->G3#V|0feW|Zi&lUhrl8PMZcTk8_ zRWnZ_{Dt5k&txRa_29fltf{ydxR(1r`Na-q!sEEH6D=6x5h?|g8?QejV#=WPlCG*H zfqEpyftK5=#gN6AG)F}M^A^1B00>P$Lb-s{6m_uxNQ(@4jli-<32W0Wl%6y(wAerg zb;8$7SV4i61%vk=oDRSw1rNy-EW&;aCTZBl?@dZx8 zdAj~gq|43#mKi8JiYgLZ$tI5MN~bQ55b-V|?}Z4zx#ygl7Vo#1R!VGd zr~rI3ZTj>y1AlzS?A&F3Kj%>D?UjcZvghiWOCq50{zE@S|Fawe1XbFt2v#a-{|vOw zRiViRBo#SkD5SJ~{>gTTdC@hLg@2s9;W-q1t6ZR4w={pZF41xlNr#sa2;nL1D1;^$ zsoDl5!qX^5{TorFtp~T$1ZhfDY#+MlFzlQ%loJ%N{oHf7g5n#gYqzdY-889DGs(o7 z-p1?Ch?pX1G$2iy$9(OtcU)G6JRuvuEL>AbglIV$zhp+>0F*aU&RUvPEZU-Du-mB zjujNb-@)}K*Jkr|;d&*5sIjrK!AKhW5qYJ}!EReTiM-4w`5aY1VJ0;yHX2K{hGk&E z;Ffyh)s=!k40ggck_>NYi>-<Q9F|sD1rrpRra~$sANhsF z2&@BEY(V4c)fL^8DIgj`?Z?#>xp7V=nMPVON-m}F`>E;$1aXyC4Oyi$?-(Mdp;*6PDW5iwkU^RG!DO02Dkjv?eqTj zj&hk!a`6m`KjI9E?mc_tEuMH^&rIHDf{>gI5H~qW_lnjp7B?IcfHQQI*HWpdLD|ID z#$##J?|$-(E6y3^lGX|`U4|P5r1&8_BP+{^kjK0?paCt7bqHLBZZXpkLTfiO6e@5* z3WzziFcTOZQmaapSqY5H{E&rINlj@>zfk>VT}7e^&Z!lP;0m>A(3B_!?|-HNu!i6B zNbXmPDYR4s#zD7 zk;`DAhex#LLtQ0HXt1H;yD$0vkrf6%6Jl0sw%)H4Ycxnnz))!~1(mpk%c~?96DJIz zkh75{U_l)SOj+<1K}w-sW4X!9(r-!4kcCv038o6lB?C6xuY@UEVeM|@ibGrhghI#~ zt;leA&R7Llq_A+mQa+{2)b`e9A}jh!{{ppj=0Yi^vZ&*HS)wG(^vbGqnYzd@xrUH( zv4A(mA z(>vf=m(`m(H6yT+Nbzo@=HU9rs?QI=V5|kLyQGZnm9gv4BNV37mUTQ?bo8R!dUf_ zpIb_g#km!qV9PJoi~1MAM?Ta~AC3QTJpA|y>OBD~ZMvl8`}a>Pue2iSu|Uwz7KSi} zoSi;>hWP5m7hk~nDEFr4|Mjo5;z}!}AO7%!)TdXUvn{(?^pgW2MymSV^=P>H}ZFuy@{h7yg(CdGkqI^uQKr@1T8N!$dkKrIuWB zN!iZ4Cno`OMLt2_`w-X2q>n$6w(Ez9zus86H1o5WY21hJr%9NA;SM9qpkH#rkXwNA znwf?Evh1?xcdHFT-MUE~)HNFOtkF@U-bkZIkHPn2^NHSp0|%l#K9T;%vod&&%yP>w zA9!I_4&HyItTHE0o|Hz8cpZKJJxr*~OpD;UUjf(H;!7+kD}I#~hkj!j@C5WH`nn4a z{O+@8pVSL=;;JF~cir{YP0KB}EXeu$7w&CYx2crtTvV%mgNwmVYZ45gv}V7hhApY` zvCQ@l8k8geh7Erehh$@mCu~c<{kGfYEuIR2jiFT-fj0XkRb+^j-=tJaATxw;ghI%g zu(E=}kd_)nLn_6n&a2<$8r%U1PNMG_+MKs%il6HruD9#*oQEF=Shj0x1-hbDIrN z)vhJrXaoIN9zR-64CO4Jgow0fT^WL_5owE}?AOIL3pD4L0u z8gD}?B#^0n)B02)Y!v}Wr6U&#&sip?RxDCWPI2uiS}_44*;4kL`N#o9Up=QJb;43E zxy)&@O;9L1tz-~Y{A3K(RC%0Jk;2;M2Z4=Skz2|x#WI#wReJXzSV@`*d1uq>BMPX~ zma@CTK$ccCsw_ikYNe>BN>?y7T34FuLNr6muUU;xsXDAbNsO!EE?*?%_Apu>MQ}hS zZAeH$E&BS?N?;<&3W^@-m%sd#orXf=#ONl-kt@T~vV=UWJviB|LWh`}V ztr;b0z5g4lrG>K&DP_-DhABh5ts71;xqX`m)w6Z>=x-GP-rOEkB&tkDJ6!%(VFr`0m|gXIc}19=9;{`>0|PjyliRf(0PPFTv8v$V1P**d4OjHOXy4W_rR z8c^sJ7BQ_55*7;2p*zWHipuXqFv1BlcesZJD#-@YjH=%&3Weu#rusrqlWa5qhbN~R zk=4#61F!i4X~-F<^|ES}4_ZH%l33%-l%;4nOO3Q%4l)cSsTH!|D8KIiKpL`)C23FCim5|N*>mP22Q(O;RU~cD>m$N5t+RNhR)6p~$}=Kp%otrkF+T?7 zj>8sDdwzEh(G#^u9&D{&g;!+-C8~~S2E8Nz!vHhBy6(~ihr7=Yz=hu+<+I+kWj zM~`_kJ@mi>>B%8aq)9l$csBepAKG9MY+JkeH@=<*V{1-s`(uH2|L*s{Pp`lJItGqE z1^sN)VLlAS+4)(i#azm(9Jbd{s z`1FoDZclv|?U%l><=4~F7?l3QAO4U&_+SFm%#zg>E3dk8I`M}m$`)I1j(#(pcKWI5 zBTV#k$Ar-@{`Y?|V7!9Nhw#l0`$nc~|8QM;=>CVWnq^x0*0;Z%_S?RVZzYp=6T zI`-IOq)nfB=9zTIowujgFj2#8%($wdGbSZ^!cSXlv3c6W3XtjkF!os9Ed-bxQW{Fn4T2I^;`Ex2vxMmSXa zd*Az>Oak#z9x-x6x((OJGuWnT8Te=SJ@!c3ZnF*A{QdN&Ki!%h!S+uJV`Ay3BY%Jc z%HL17{OOi7<>M)E#T>}%mcE8Ffe$+L(6pp%xdwr77Qghr_4eD+ztP7&#l#4=Z|l*k zXWDA3EoGAAdh{vGMx_lm*dU#X^G9iiFy@>T-Ps8~nSn{5yV74!m$C1UmHPnocg8iw ziNJN&Uq5|kmt8PfvZ}C7nKC8)72CUAbInz$C$7PbH`q8Wy4a#? zZm!MxO7%YGtzIk<5F)=JsHGkk7UgOCJ3;H~BO`%X1eP+lcDneY3o+R>JZ<;QZPV$e z{}|Vpdd+oPCbMZ$WEk4gR!p6vPrMd)8BbWHfBu^Km5?N z@+$0%YKPafNq-jR7l8|J-@qnD-Rum(*bEc}r3>S~c z(TEf=Wc_ryg*-`QqE)6zw$zBLsbeMMrECvrtxweYzddzIyG1&aZ?Y^EQFK@E1*BWK zu3trh*!Y<;g!<>K2{cPp6k_<@V9JXaVAshaoM!up7hO)R45m{`Y)e$y)}o6tr8oam ziQ2%*wO9~^D;*)qfD9`YLslTd3Ag(nv!FHoNTpsbhZ$V=#V9gY14yhHMGouLa<349 z;6o}|b^07AlS{?WN)fClf`RqNy&}^Ea>W8rgU zw^!g$+*`246CYrHdHyfcD)@TFc_iZ{6(O;~m@;Jj<~V@0Kh@gV{2tm(*iD=ia&?&( z@sI&$cQRfgQ-GdNtns2YHftM$pk{r5w8z5Yg&<(PCer6XF! znGGgJ8zKnH>WhaulBrcQSt`Y9mSvh~Nf|MquEo3)n^{eu^>vv}~)G3&!WRUGP!PbhojvyDvczuvwoB&9nH%cKi8oTibuu4IYLnNbp zMzU2zrewnF16G*-t(!$z$QQ{jNTBt|vCPn#Tz9)Z%I}8|fJW;*Z};EeBUfaWDZs%} z50s={gpv-v&h4nJNNAR6^>+cF2W@$oF@&WeFx1Z_X_bX6&{CDO4pZn9JF-oLn||0` z`6+f0ma&v_T7zRH<0U1OA{`?=PHvC9S%@!8e_#n&@L0*xn(hpCY_4CcgFr!-tJd7_ zU;*@(`{P%QJ-w zFD4qZR<;n3LIg4e*nBaU_m6Vdai$=%WF0G2*HVmKskM*T0%D|cUk)yFSt8y_mI4@^ zl}&IgGpwLAs%f30TRdHQX&NzNL^|VVXQZ7tLBTDa+$Yw63ZfKokvR)GTc0VEB5O34 zg3+xZDhGp!fq;=0dFr>2`K)@%5%|PbBd(@KrRI7`RG49MAF989+w+&V8{g2er*#%F zPwfv{H%<70ewrCzDvy8;e*eMXPjA^7#g1HWh;W@$0lLrg{!#INZVL4iWdSSALal60 z6wo%oDm5b5Aatb8v?@Q_Dv464a02xH#{uJg_uWSZ+K)T_xU?q*x{C<%P%oeRk9M~)mM;9wMflSBJy`*8(n*-0SZnR#Gmy{5!1&PrzLdU?)fL>XY1LI%g`R|o z01VEc$^OF)>B_6G#uxhw;Tqi`oqEbC z7%ZNJFTh_+|9kd1^2K>CY%RO_7MtNf?jC8>8?UE-;Y;|g_#%A$_0~&2!i34vOJiVI z8k4q5LyBI54aFDzw_+go>8GDc)A7~$GRrQL7QC;`}8aS{&G+VpV z3WhnL9iJY2@PTwUCMe#)1l!D+)6>EWb;k;fgN34LJ{@HYyxL9kg zH8BzJ4EizJ9k!fu@+nx^@kzP@=P*9^+`mNf8f&hR7F%M8G-CK`SW)pt>fNUo2GhTj z4msqI)D8WTD*&#z{P*dBhaX0p_e#{W8wT5FA~e@p%Q_m@7j(=Iz=Qe#}Y@~W%T zpYOUWEx7Q4X-E9V4SC`TtW@cWl~7aDhaXPFd1bTY9E{_RJ3ei<-8OQL>x79DVBZz# z;lDhLRX0;oPxQ+b1`LpiAGY;Wtk~dn#x~syXJ_#24iEVf(|ZOAy#5hgbJex!frswL zgv+~W!37qS6)s%OF>2H(SxwOmu+2C7TH1TBy>TB}6BAxj(!KZHgH;C?Vr5c~G-&0O zG0`^;E2ZATL}7QV#yUbKIaP1Gt^uK1G`F<4pybEQO|0^e009i2>;0p8Q&brtc8er4 zM^;X7B9|+!w%-pc0#m=%;tXMD6vSeyA>TBLU%Gsb6WgaM120ha)g zt|R?FL8CnW_!DVU9CpOL==2N<$|@mhaJ8PP$J4gE6uX;st339aau)O|=Z*%{nWbOLR{KQ*mrZA|b zMy=`Pc$oRqA8)}K6g|>;zy4KPWu-xG?mS)pL2SqdB?h%Lppfmz|8@P9_K9M$=r$Ta zIT!!e?>`iTAo845rB`)yR8kqG-K(^YW&79l6N&C-gN$lOGr!4UDikeb2Cy`8fTx{) zkk|dMek}Y|iQ)INg0O<6g`c^3(r@=aH>k*nQiBDBz^}VVSuO~ev)z6^1xJikcOgP(;=qA#r9bXy8(AzKtUv<>!4^#7nl>w;L?D8-Yo z@qXd@n*gR+1*(GUX*OP7$Kxx(cVP<#PB8eu?EyOD3r7vvLg#67{{j9}4B(8z7lV_q zy+zk<^J9C4B~xDv_%sIUhUnJEw5-UCtTBHj_0KX*Q&ojQ>vso*i+;9R!|(5SY}fEU zCMUVAL+?d;V&G#b47OqG3DXCMPsTPI@4h!yzP#+yrw<0l`eLwVQW}RZgFAPcKlSO` zJ1xD`GT_MikSl!zHUX@IvB=|hz;S^4?M_&g-*h*n6$&e;0@q*X`@iY`SaY+Z=%4rZ zkEbvg`J3NdiUA}}P@IXun;m6>!m4btH3ifR;=hW;V%1M3z%!uauf83~FhGrGww!&>!vh*L+hI;iWbjfTvH}VBwee{6k^V0wDM1 z64qvZ(pzYzFsP-OIv1PXKLmy7^Xeas6%_l*85Et<@jN6O6BOl!SkrH2)au{<`k#%# z^=X)xcnn|v{|;OCe26c-XJQZh1=GL*1Jb$Yotu`x0D9T#tnQUo{f}}5#f6-p7>lig zR!Kj?SKr*nUqY@wuaBWG{x=FC@c~RNwEnQ953n`Ptr%#(?e<#}w_@64(@oQ% z2OlCU6Gn|5ovyk18VoMJDiaOAIP2$W?X}lUPd@o%`o&piVZeA&S`i;(9e?a`vK`MW z7-EgA~ac0N6>AGt;L2(D3V-}FDX|~>G>$KU|HcKC21;xXU zJd*Ca_g?wZf3?+C!&l?inB#M5e%F+%E;vE)PPt&RbBt~UAI|M^{-IC-M1QaIx9!_$BP%cmK* z&R@r*@2`IKYdp72OuZKAl`g#C!nDTftILXl!wx$l4aN3CGtlqOJ^S3W<(A(_|9tkJ z>8h))!oefUu*)txVU^5h@M&lKhWEiWxkp-ajWuNTL;ryTFu8S` ztXdh1cBa3HUvBySX~>g9WM#t`Y}>Zch8w42k2?xuo~x#}Fv!kr(H?wICn%1>1jX){ z%$kiA66cAT=z4bOSsrdZ!FQh-+dTSaz z>J3@tvNFdK&d~ki3_k&s54e3^#tEC%PyVv-g_Up7W?5DlG~LreAT~yl<9R~!G3qX z#P6>hl8uSnSFwUZCMfW+zKQj;Xo{e=P+G?DRn%WrP}pror6LwDRG@;YMg?y<^OL1c zZv+lN#$n~*ML+>gL%bMyYk8wFy)7T{H~|>r%^<^n{9~-3;1*9~NZPjF~{tM&ry@E0-b}Ba2Xj8LOpE zV+0P!8G#2qEHg+ONk)oui%h)fEf+@Sbh*uvj9;)`@JxlN32LySA601RXB9!AVEh&R zCM>kN(OF7*<{8-4jKxwxFp3NgEEz|OFhqG!vGx>Ojo7Lw002M$NklnSMZ7H9+&&H^ks z5O~2e6)rCXv}LXRExq!}t8hs6O{o_iBF_KS`C*GEmm(SCuNErMz!2a$Yj-fOH{hSI z{qZ7eZEWIASkPSTGa^M=$yh9PRwHmgHKXWEt~Ejc|z6OwXVgi^+)t(qJrkmlYHytTaShy@oIr^?zx97hwco2w7vV z!ZN|ssZ+2`0r#05B_Ep1!b=jbGrrJsss6&)U$-yzOkNgWwDrQ5VElqm>Mc5KU?{6- zQtH}^O+x0ho~a1o*HSm%d~-0P^pm?)ly{~K?Fxtul`ljytvh`Y__MnHba z&q+`G;|Y8ndZ&D?xdFCj*c~tX%VEE9;y?ZLlk~)sPhg9RzhV;SeO!ODu*Jb@_+oRX z^xClD>F&GlP75xyP}&NE9{b{}S``nA?Y<9yw7()6{U57hWCHU-vj-d<1Q@JRf2%v` zj3DqxR^UMo%M9WD!&og3)n*d7CiWS{YK43_2< z6b-BfYSwR1P+8VeW6Ws<>;=y_wLvT;f_VnIyTkg&_7IK|xjYeu$U2YRx4Z()RWbh0 zrQeunH~!o_xfv6}0B8lN(0Xhn9qTu$z@0%Aj2Ao;7@D*8`$7HM5ZQ%BZtymhaWv_@ zU>Qwiizf`g&W~-0PQVI^JupCNKSSnhl$WjuyW`Te7((YO8M0` z->N!6-~XO19kN+^8efHVu>8GBOj}MMJK__Oo=l=CK z+>oxtVCe!_L9qiS5l_Pk3V!*0_uY4;-(L2A@HKWf+4APt{YvGKP~P&qMdgC!Lf&!xlrmF{yF~RulBVSK{GE zQrbf=(>a)+`55Qs{1mng!5}$SP+&r5_uY5LnFb%F8*aWKU3c}h@+h(nwjkmc{wuA# zl6>s(>abxru>60--wWV;2CjPNYLO8mM#|PwJgZ_woT2gaU;F}JxUVESFUObu92lRB zFWUFncW(@SA0#VZZoCPT26x?qvkrQt9d_Iy9ew0c=r=sqVh$!ICZJDTlpcEUA?c0> zA95&G4;+k%um#fLSV6%F3a)}V@7#0J*D*oyF}}*@0m6TI6s2W^x(7)2BJCW z{m*|sn=bjy#W;guL>jc(D(S24#WVzzH^Mj#Mm|I z_B-x?pXQg9OY}3(r=amqJvAi#{O4z;57Ayc-2N;~x(&iPC|BWnyB%9bjUPWgZ2@}^ z#Y6~KHL$*;M~%jF;SK5Td+)+^yr8UrIUf@g($K-dAuE$lx7>0|nuHY~+hU^Qh$D`` zgedDYANv1ic+S5ClN7hW{}XVHZJB;>QMpzaySvf7XXD0Sf>Nd-0+ z{Z&row%5OQ?H|pk=*ox+tWkgIV^Nmoi$#({^u_$2uZc$ety&9Y?)r^_DMNi)Q1O}S zbLx+t5ebNz+CNLLwzZm5h0T+GXoZvQMr-YF`oEa}vxTnTET)k$pN;mU;3S7=Zu%(? z*1J@dkmb03S1^}M<^z@F5Y_b?1ycrUHlMMq_)PVM^g~8g%HQdx_NN|(iY^6JVQuwi z`+}4!ueuTs88_i!y%%0Q&d0;kD)O9AuYXn(IHN3^fXcA8`lFPXFR~L=K|4 z`M+Fw%B^=^wZi6>~FpZx$UJlt@90wyFbkIR~VcG)A z@b%>}yRG6q7R0yWf4>@HpH!Dj?S_%lU%sPi_fW%)<{o zl(=6r4@CVsUakiZK0pqeB>JBi9J}Ghn=m-`0?e3=!$JGu3*L>=x3~XRdJ$VY+;HQK z*tVp5+7bJH{{RC(S!7mFQG-IkN)FLiQ~x5jqPV3{=)PM0EsELz$`ugXpTxY1i*?G* zL8D7C5H)fn4#_^_OxZpLBOaj5lx^C-E=Q#?Rt#eanXug*q)-=%3apKO(bL$lSs|25 z4pAdn#k|5pu$CBGp~-WKZjFm9$suaeZ&dMKZ?Y7ZDcaWGsOxEIf02NQ`Wc#PQPHJj zq5j}#)Sqq3^qQF7B9U-De>c_NSPOMF5(vWhwe^1c>?;HH$8(@wCMc9zd1KzBF)eV( zA!^dk!cXr)HJ^*O0f7GY>M<0DOU5qb--@^*gRWS&A zBDPdo3lkLa{euIq|HUBxkv}*RD=22*EA3@)?Olq2+@+x%R>0Qt@a6ae4?K*4;3?^t zqmE2_VXGI~c0InH=hxia7G@_*BA#;UDHzC~mY%}B^UO2OkOP#tWznEPE6aHk%VNNN z>1CEnt7D+G@8XNeBt`LXv0uG4C*5=Jed#KEVD;{M@8EFnHDv4YEjHf*&YL42;!T1|A1Mc0Nyt(FD zZew(ZeDJaJE<58O=KsNDh5CNfs4@7`{{+Z-8}b)QH{&;9sioi>!55n?$FnAWhPn^I zN)B#o#mO0dfzL1NZ^UoL)fklSiUH{@x86Ga90U5?I*Wm;TsYv(w&()kEIK{*F3vH383$`1X$9PA_8MorBoV zV)9^F98^3Q1LEJ=b=Pbjw!p&qAb-Xr%Y_$TDDBIYBIjT-f*-pa#t901@z1S_&O7Jq zv?V6Qcy0*K61nA%w_-xzgS02kpy2o*w=%l#{`=F#nC#%h&eo{UX{Vo-KEX;4t_=7i z2EDm;)xZ@7U@(78Ot@hWDA>>Q4xae?KQJIZF>SE^hUtg6j#fe6jKSKUeV-E)=bwKb z4$S^At+UQr={Fbs2JOHV7xT%%-+S!#oiv3X$w0ubFZgxZY}2pFf!R^s`0?-K+=dad zI$|;otDcE-Rd`tWOaJ|EdUMPeIeeQd9=6|p`}Ee^Z>8V={_^xNp1FJX!YUuGfPoH9 z{&5rr@_8o6E3dvRt7ce_ z{@9*sFwP{|Wv88S*28;%qu*hIf|HIr;XUM3Tt9lvC#>9@i0#x4lxw$d-$k(M<-)Yq znybq;&G+4RU*h50-@}C6LD<%dTb?m+o2k3*;N;+M}=e z`F3*RnNOeL3=>XL?vAai`t<1we`+f{f{Cf?{&21MZ|$|#kdN2b!b%#pJ^jrU)2pts zYWgkS2{?hpc2G_+DMtk&IYdqOKjTp9!b5oFH!0u$jlxpnZ%qq@y#pEM!G#Xz40%EX`fNQCk{Rg{)0baEqry4?A4;tShEed1FcRLyk1KBXY*eR8%Ph z&Gl@c^Q=8+D6Mu@&D)ovX2<_~{lM|to!d{*<~4UP#Ac zBH#e5p5Wgi*w(=9bhV!OO7)AdmZ<3Bez-Sba`AQ?h{q3O=I`7kb>o-v$a8<>=@<-{ zKAm4H&4Jy0@nxm9?*6zC+&2q8b5Vr=rD>l|!xj~br2zv5=->yOc*70X%K*Yh7|>X6y><0q)&BicUknED3*a%} z<3JL(UE$=Q*x~o9it$-r#Td7tH52GO>lqpMgWNcgFtqn00l+$la82HjV{m*bhF*8Y>HOipL~^bFZ*ss|@1j===Q9#}zv!Q-25x*3D_SK|zch48zL zGdoT@HA_jT;|DnR;cQH7;2Z+1V!7bL3uWse4*v5QkRK9Eg8vWy{^1xz-Xo28Z3NC% z_&p|KUY0F@4m$Xdv?{i=dg_@WIHTZ7tb`aU2WlU2_z`KFZMK0PeQM{q7JtQ{^BHHJ zh3B_9=ohP}%P^qLGY2?9@$%6BVhbN^XM{C-`&xgb(DPYCfazIkgL(2fK9b=DnG%I|ExXaln7z zN-N;p5^g)Tj{Cto9U&n88@4p!1lfni#}yQupkVuRHOQ`5h4Bf_YT>WpE%5Ka zl?GzMr2JOAfDa5F#A=A)!(K&y_y{6-m~~emfZ+$Dh{nnf5 z_t+NsA@tqeH~{)+Y(usiCMdYY6Sv0V;qlWkX>{a~N63~(J@DZlum1_yF7g-f=@U;q zA=^K3OD3+A&=nNdVWQ>H)W83Lbl^c)L4gU1abw@d0q0mjar^DqR&62CPrqvYxrOR% zOpNS-^Eckc*%J%l`o9#<_iLxKesLDIpLz%fSx=YMEc@+u01j^NNmr#$r+$hLn(h`m z7r>+kS6G~T?zzA)kixDS0Z4Q>wmReW&q*7uY}s*#Z(*#UmlA;GA6TVy?X_2x}*&^TpueQfBU(gKZ_OrAA4T`uvM}Ay>y9)gdi#< zk`^i^DhPICAYyN9WSe5pEtin?r5T(|CQFjXmVD;D$v;F$rBi7& z?0}W$XyoMyk@r8^S`d~QLiV!D`#>8z-0c7-**xyl5?XEs#WtKx(w8cZ#t|V0$YKZH zIULejkWELvF_GsO3sS-`$YV?1lq*pJ@o@$xWLi)V=Q$Q2Bmhut|8rtoCKjr zXl+^mnTszi4JQxxGUGCtmy<{O8|7!Js(kg1j(H?>vIhcNC7Wh(Ht%y;SYT2l%3vcS z{a%FF-;x>ZBHRdC8=OR%$Cj>O@tJ{-W41J(%VSGE^ZfA?9}R z2c9lpoVYRNOA}6brW>UG1Nyrsp`f6B_#1G7q6Ky{Y3qb85l1QuvgwE*36?P6)gMW| zq^&7dW8JS0eGN8>{}AN>&2w^h*Sgrv zq=B0_=s@o`*g>YL8lJeIO<5i-w=dOHdMw9>$|{U$c_=vew|MDdn4Xx4#fA0pfnF86 zw0_62{9_zV4y;?Fy89=N^0z^~D&wP?3JZO<@Plq4KK^LKenu3HBm^i|}8auq(>xO0&U!`z*>1ZEd#f`BG4 z)`K~Pa#*}!;K$uU?BwGD1$PG4#KHqV^nJhL8YXeE-(T(?WPSKNqe7@G`hdn5cT15x(5$Tsq$nvNUEw+80gy6BfO(r|-tM7$Ko^SPk0 z7(4Cg7?k5D%B{%VxrsPIF=*gJFv&2~Z2+?fJ+X_hOBXD>Y=W!x260b^Giiybuep@OsW zv7iCl)$@OwkKLbt$bv~t?0^;gKw$B;j;mI#bic!4=^`x1RIOSW^P{HMcbf^%apn$H zo)w}(k2`TWrmQbd9@1O_ZZnvU*dHcA&Ji;hv~^vA`NcN#<|zjnxsQTiTNlnj-go6N zr)y&FGqz7l!Lz8E4{!_UpJ4kAdLT^UiY=vQjUe zZ|W_fF7g9!W!iKhV_%xZ$>uCg9$WIfFqk=DIoLE!JaKWJ3!vTy1(dmnkW(ZyF*#^3 zPf$FFcN3nV@IEL=L;qC#&08+=@$~cyYJl6lj6s4lPcx5(rYIWxRwg;5GWFY21_>oo!JjNvpci1 zpEJ4lMzMg#F=tQSK zDz&L?RfZYP^G+I6{$_cbyZ2)=y8AAIm_QL2gp0e8yU`~b=RN1IrYQKT&ikOD1ujA4 z+yW=VfPdb*cmEKfceaV+!|1oK&*P^Bb21oxh0gCrkYGZ|1I+urdqO~Io}{KI@l?v)`rXCWqQg?vIh=XCC**chLq~RF zYb!tSd-#^yjgr!qwS~@PvQWWdBl?Em*NPvaM7x~YcleHAui=oEXP;cDx`>A()leG&c!qsB%M?N_( zGw71H&&L@PdsV3w2JV<6Z6`|}r#9pkll(gIFg|-xz3XBN=XuiJdU7X?b#%n;VZNsi zl`XwnaPD{o+bmREaRH$BSAA&Giyg=bN03EX#IDRkNmxXge(^HGjgUgH zn1GSok$F0(EtPb6;(jv=Hu!8Kfh7Aql_@5VMe9tEtFJ^8O600DrmCD{98sNF#-nOa zI)4brfemnN`7U@gcX2f97f-91ckPkL>?-+1C@3iz<0HF)Et)^YDx0;M{jP-sV2$GO zDUv{k4-MI6pCz@gIW_NV0WY_*d85L?4mu9t{6B{Tsqg5M5+*y0k<6$^!Rh#^++Zx; zRme_+>#L%$5rY7W?#rZ4bO(R`iLgsj`Ies~HwyDM0JJ=ew&p6ZrN zMhsHw2QheKKI~y(pMHBkZnvu|vB?NDsNqQJD{#|u6n(GuV<>hqw+TzxXmum=Ltf?V zA-~L-c!-DjZROi69BylYrO=xez(3#H?}79TsL~2{?!N9>nd%5>dwofzK-Bo-O#?=y z@1x9qwNnmPwU}gGtt0*8cW2*rsZ}TrJI8;cb~F$tm!v05n-2fOL~DzH-lG78svb#C z{;`*j2=v5>Ic~ex6g0|qZ#m|69ekJjaIwMms4|A7cH{SV1ov0bvH&Q}8Pnm_GE#QW zS>k9XW*z0>@+#r!$+VNg>8*=rj<2JLe@HU(b8Wt)ZCNgM2}KznfLND7f}A^THcxyU zm#Ii?`)(v9V_g((vm0^3#o&3`9;48LV!Y_hyl^Abh%o&U_WX?R#V7?xIJJQP*!5rc z#)S*kL|J|*9DOB7^NLINVG-3-<&nT=`9pXpI2Zx;F&nKh>UqT|((dEl;hJD+9hyTO zP#$t~KnqkQ$uN;#Mdu=2okvh5Gako&z7kj+j62Xat;zjLdhEwkh!hC7pPX+h_dKkp zi`eS)HmW5I2n`iJ)CL|6a3^5Dt1}*n`NF=^Y|21@#d5tlb1UiV*$?AlWC&%y?Uea^ z^ddwpgTbikd4?o_csun=aDm7s(evw_N9e{Hv={sFPwQJ;X@V`X@rS#yDWRy7yA5GlH-;jG2+3ELZWqYNdo`&0mPy9N>&%3J;znO?3K>Pwv8_g zx$tF1mfyd_&2SL9TvNQ^rA!AqF!1lyNXBhD@W!`D`REj zS_`W7K8Y~82mv9|_QA4Lg*a7FN9ce8V`qjVx(n|0h8X_?5PfZPKq*Mg5kQl=3T>%d zqn3g3VR2G|1;c%!UJnk8KIMip#c1ceN)K(oj2e4yy1e}5;TozCZc%-Oe z=tuCiu_Sn8HE*j_C&@tD2u}kgvZ5$|0+ZAVP=X-ZD+lIK8uQLV|4n?oR%gxPffVc) ztJsjAzDv6KodOF`Vu_Do&0CAliE5E43iRl{FSAMuKAqhjEo%l7j#y#%;%$|0mTv&o z2SEIAOszVf2EC2dW%Ta<*4fu#M<*7N$j?udDpfkjlK_2=q&R*};Mo3PI;uFx=}lE; zX4fCzoA7YXkJ!OQLh2SMD&eL&vWvK2RkPDIQf?j>$l5E9ibLKkpGC>R1nQRtkH43A zjHN0}VMNhfOQhb)IhkzxgWp=djsQmfjawI+D<)P5vtAz?(ihNvsA{gBqQB8D5V_L- zM5}2(OhHQ*By>@k*6ZN<&7S8p^7l0;IR-LVcU*@>#EbG(!*%w}o|rDaTyj|6g5s#L z0|0%25fKFOKWnriGa9p93-$%U=RObUaOMO@(DuCv;ry7pmrvQy81c?yL||zEo2yPB z7wp6CCf#cy1m-!J#)3d|6Uu9u28#utg~tPGsotrS9+Kqsue zP(r^aW)P2}t2^8kR0QJKilWmv2Mj^A-pN*Tt4R5v2$0>v_G1%vPxmwh+)lMet-Cuh zEMG_Votx2ZLU?$R$v6}L>~Qm7zZ%8~!>5Fuhu?7(IcyJuzPq*mDJvO#{2Rm4FPKmF zr7$Qvch;=QD?AwG3tj3(hj#z2*=WO)e*w0SPf$e7Abx3d-&a-U$WTCEYM0Hx@an`D zJJfeG;2qyPKZtc;e=y`nz<#Fs9v89WX`A(tzQYUZOoc#}s$!duH798rUF9j~RkLYH z43VX&L3q$aF{xe&>I1>9Yr7RipNN7IIQ3LOULeXlW}`16!d`0soAZXfq_Bz_dSQ_S znrAYxgozhxQMI|LNjs8P^8MkMB;VDs+cQN0U`NK1VAYxaG2Lx$WHSo?Wl>44C`6*; zCfmGoZTqTBS}(W>MIkTM&QYn*b(_ALmwP#IW%bQ+x4&2jon)yo+&b3#DB76g~e)v|#Xoj1O4^LLaUK0On6}bdr+1p$}`&?}&O0I8^m_do>rcti0tE%LU_F8sk ztT)(YLB$ILndtUZy2Agkx8TgO6_PA@`G)7VYQ)%7oae*&u+047c|*^6^d$tFM>RBO zX=|Jbl-R4%0u8+n>JGlF54^NYLI zs(Sj^Y(=zmL~8r`Mpa#B?d~Uf@-PRO#3g~g!8)*&X9-U1bs`Y5B*5CnS8nj7JE%o+ zrJ&OM5dzvxGtAD=|MV?oU?h0c-QZthWGy!z&#TCYVVwSIub1HBAmjqQxjgTW%hrLn z1iFFpQIvr7=~5%i!B{%VpuF69yZ!H3ysBKi7S#Pq4rEB}{kyJHABiWqf|5bp(Kzk` zB6l<2HS*@Zd)4+sr0*IM6!L^zLLk@fkf1Rn2m)UkUE0%^O-@0}4eZ{b{>wOjgG?S! zKz;)uWgymHemp?$5i0p3Z7M(_0AO5ENy$hPboa<|wi+6#X0*!MkQ@~x?OSg$P;|<5 z2Q^yh;K@?(TLs+L90+@zuW#WdEYes7+8$CRCZLUMhBDkHHbyY5RC+AU=5?8So+m_I z6N86tuMC8Gk@9W`JIL;rWwm+m&n_>A$tAM0HRvfxbjbeuWjb)cQi=xfbWXaCCV9|= z0+MvrzC|*ASe!}f@an7JSX|WG;^cIuPr%p-^3Z~U?PSBBXMKN*&U^5;dQ7ZCm;%`k z?6YMyg1T+UN7NLUB_cN4T#W)QhHzE%xM7jQa!#$z^FE&aH`&WSioSx~N&Km1uDX_8 zyGRaq{c=3#Wfi6j(L2utQ7?~-h&{?gfLh_1z)m%B~! z@u7rP(yN3g*G1-`ceAe&)R}t4eE#2GYX&~8EU+Z65iz&+ekLikh=lv<%o(Z7rSaIW z=oa3n-8)b-LSIZitMX|J_IxAkX15h2fpRoQPwz>)i@B$jXNZqNCY@PtJm)|ok!@T3 zQix#40BR0GDM2dpQVH@r-4mjceO=EXmqGM4bVVuGP#bf&^BA(Q<2Vw7o97~Q>ANE# zoG&5*qo?T%$|AwPU>zBd-Z%W8>k^L4AJ0rmzlW#NE|$Ek(z54$BfYdO7sMvC?L?>WK1R}Un|FasAtBJPMh{Eu+Ji>RETV`-c6Q3`3@ zF~PHt<;j8He}q`DLV=`T09;{JND<4-V;T#ue*Oy|?#B_+deFnMbCZ;#oTR+MOrg(Z zZRxbOLnN;L{99&yI+{mKJB}cyt`UI?klC`q zGU=Kwg)ieeh4AefAH{Lsj*dlpf`=rxBK;#X-1-Sc7OY3m5^u&?LO5iOKW%rwjjrWF z=!XVL)$75L#g=` zG{7l&TrVoa?Q|Ogtca-dJNe@i5tiIQceq=)*EB+!k&!;*D@rWEmPt53Rh)#-nIVI9 z05g&#hHI;R`}UysvaS~elAxmJ6E_}`Z3c{L@J`9@&MO4-T)kQ!NU^63~K0_>$cAdMpRq5jM=(fJFgC>t88X^C;OFoA42@ zk!rtSB4*L_;r7(js9E9w7f5ynv_XOm4&;fFF;YUO)bHOkJ6CtpaS}%4J;4W!8q#X@ zX({!aRShyaGf{pCbLp7`@5NZwK5-~1H_P&C>hdNC9P?}P&qazu?SpSPQ1}}aKvv1S ztf1tLp%EX!!dH<$z}PFBS@TH(?FzFu>3N3u9RQ`uu&gW4{?%q{pG~lF@Z^YLV|6At z`*Su2HDoC7uDMU0f&(x;lKv~RWjTE=eRNmW^h&<{@0*CPc0s17OHiBtG)p%v!2Ps2 zeoL>JYQRsbZD=%Qr!B2bk$}3_#B9GdE3VO*?Pq`k?=}K__v%;mMBS ztmSoV<}Bol0|zx7hPXARVc~->n>fTP`P(a6Ihsv}P^!w9$X4zqLVL@AlO%~QCxJX9 z1VQ>-9|Dj$+gd@uETecvb_0U#XV4U7X9VnS-PoXzknJo2}0s1R! zH>#9T4F0oq=KbW6adHJ|bsO?7lg8H3bzR+4=DK7socKf{b&2+gtn!=W>Jih^kxiQ; zx__#@ErtyG$?%X!%H>k#0wqe1vpGC0=Wf1KfaR`{ai{NPI;r+=ayjld9!r9pw^l%L z$nA)oEC66?Jb>En?sxradG|>wZ7_-=k9e14`nCIqu1!cWDel+ec%5@~&(#WE9fDuJ zMxeY1_J7uP zVbjrER05q>x-`+vc3Fr_u9Rdm1a`C}V6*E=;eRK`(}R67G-{NxXS<6}C`SLYUcZ)m z`zgIjE)*OwWEFgNu93 zs596%*Gu&aeozW(ogrqpZD)wFjPAX06$!seiN{-&Jr7>{!NZW^y@5ukA8a!t0oDV5 zEA!fGc=Hf_6a>`onDt*%#=nyBCmJ|o9wRY1S9kx_+01kOefaC2uc#eij&0f3s<8W?z5#MnDTW#EhXYl!c7F2x8|+*Fmf3>7#2KDIv zZ|7#t<00*KA~>|oPOuvI@lmuI*YamXBG8aXa@23;L4^Dod;0soZ} zhjzjK)iwHfZU=P|Rm1eac4Ap=6~U-R`Fdm^$@}Qb9F#Kz54;DJ5s@@Nlcw{6e^l;z{^Ee3oFC`u7gL zA&(b|O)}WpL!k4^Knixfq%xX~WHr5jxT4{-BgZu}(iCQAhoKwNzrXK=oWAX_jl?lz zPIb#_Ha#zN0HN9lx~{kXuvq}xnxd-uip^W33}fH7$y~Eqj)weiHEHZ{a^#c`oyW@GZyCUo}?Wm>(rUJ~y!!=PS~_e`UQRuZY}4 z^*@d6E{ykz3h&a1xnEus3FE{^|AkpKGeVO-7ldL`|CahA>!*ks#A{gJ;g_f6v-Y)y zp#sOa{1ka0ZNhK;JxfZ#*M@@q9j7D;RFILB(T8OhR&=cg>yqnxT)kI`%s0FjQwbEM zoSU|RI366YI}_n}PW1+Nl={+OzpoTu391znd1ifXk+CTXL+7{V2Vx$U1%n4NxccGk z${Ss-JFz1qF@|pa3vw|@FBtX;lkyD%J#`T<4IcZ3F&RASQ}1}E`ME-ilh^t7Bg1at zN#mLtMWH{iDNEeOl>%w16!40C=%lZS3sC6$ZlaZe0V79s`1z=7MRjoqJ#=Lnwz-JO zZIm2cOi%z&%aUU89FBN)4#Zs}tJc<&I}_{OI!~IVwLfT^5-KQh!BCqJHfUOO;MjV5 z^zwign16+3Xu?CUr$8LvybwBvn`e|Cl|lXA$05zvuP@xfyRRf*{g2R$3{);;Y-wEE zY{xo=gJWr1ZsJn@){rOob8dP4u5^iK(8pM%VsUxBzt&-o`anS-3FMzFal1y0-txra z`@=tXlmn3}SEMB(u+GT9;Kk9M=1;A7W1xIh0t0h|*Tgex5oEW_k zrgg5pzTbnYepgNS%CD(9@qg|zqT;yS!n&BazPtRZ^{t+RVS$l?_su^m+pGZqc+v2y zBnuOly1}8>nm?;_v^WKcJ||;$@!;i^8>bCtPE<4Mq)P(SqxJLZc+EfE)6xHk#Qo_j z6|m|)pdu4Mkqm1aRL(vVWM7G77vv)2n7?HoO3o~CoOsETxzEn2oq<6+yk!4S(Um8n zlqq|FslZMc_TL$ZRpQxGZ#%I4FfUDoF7q*9LV{W#iKtl2#3-4X{3>)C@t!eRq`5T^*cXB!)CF2CGnpGs=@KBua_DAhCWQSb{^ULE7Mx$9)bH- zbyDp(E$yi|1i>pep;Y`(e8Dd$IA;+eBBZE(X-027AuUPi;GC+6BwOE`CXWmnTos=LBVLQ4JRvs+2(C)ZDb zOu$jU!L+lC=G4KepS??EV9*5HQ~C1S8-W?I1(?lW_U_@BWqh9xd*5F+0({kOn#xb= zedorKzgs#wD-;ebN?rDt4!Yoihv}RPSXt`WZKsYBTfK}~FkEoHyRW&A3MxGlpTxyw zeSqUx_5&+{>+^ZS(?3k=qI>$FXQqSh0wLOJm}j^8V=;ydKpj|Q^kFXF;M1ZGKmQd6 z9?j;oE$+YDCj03<2yiPHzZ%`6|M-3Aon633Ghyu}LI0aY4+WYpb=#N?f6bhgl!P(% zOi2>>^Ly-ttWX%rdmXgJ(cd}t{po5cuYcGQ?CxiTwQ}mcO$<*wX;Ktj`jL<_Ywpbv z@`?ZB z9Y4t?&Ax93tc=eBw~GZNw-|jX{=1eGVdu!JhxHli-*&S4U)`zRK{7I>+2wM3FuVq0 zBJ{0^T!;V03zz9LS|&X-f%xCCWDBYtVjr-2T-`&GYSi*Eir*MzgG;r{DM4ay7(wdq zxf?g9?%93d#J}3hmB2c_gZU6U(i3}J6=kp~8y6K--E7{-D8G4cyFu*40)H!@*J3+o zgPF^Dv~q=2&?LzrgOplZu$q&yLVC~sC-mv2-MrT;O{V-Wot?g!8*pUo3NKfe7B_$H zWH0hd-H+AXQFT%NKO)%aFF=lk!iHqrI>?CYeq|`1^t;z1XhVBoC=Qw^R zgLDwI0)%TAc3!Y;%TAD4*zT)I4~>8vFmB#$;KPdZXyeZoD(ybaWXdn`Y>~BsAjp*~?eniM(L_F)#h!dT zSW})N^~-p_*7A$^J)-yvf{+znlZbJPR^RYkO7JNiT0M-6*|Ij~c#w*9Uo<;L_epYe zLla3CbVr~^`Lg3^pDum+2pVK#7o2J_Eq@wtM(Bf~A58eeuCTCmWtl?ZrM7Nw>1y>| z&yY0xt@CW=ON5^mLt!Y^o%?c>+=zEL9LOKGwCGX8FE!nY3ZXMlLdW)Kb;|EvYceWG z_|vbecdk^VW1j5%R9jtio;=!`CCHy*Q;tFPpO~M9f-ubv7TWX65Cj+!=o$1gi3Qgv zZaJtQrJe8jZfaZ|`H^qy81kvQFu zFY75eD|MjAqA|)YC-iI7-A5}W?>96ZWQYuxhKzjm8l4jB*q5--s)#Ck%AtEnzl)8# zJs8Q`x0w$=V6RkSP7Bb09X*J=x{bAw=!C{_k<`EbYd>t;kBr~`TDzBk{FM?)yKm?< z(apP#EEulAq)d4UBN3rwz(0~G@vD39gHaOlc-z7n^$bG&KjRfBJieq+0?q;gs7iHg zw=*DqrxlP#j9JIG2%P0kZw?(hrs*xSg_En|_{z7EeHRnkF={IV1W*4w+gQiYxn-N^ zYNP=Y>oto^P*xymiBClv46+NiHwawZc@W|0+w%^JZDYgo2cm(04Mp5Mg(X=KIf)ev-rl1bL79 zPS||@n<1W^pT4#;gVw#1t6w=Q318xEMs@8(-bhhE&zy^bq$Tffxbys_k4G~~CUWCu z)lWu{YSsmAE=hMWSsq9Eo`W=F36ei#nUkB3=j#*UwB>f;)t;TrQCt!MD7+@`6O#Op zOM_f#Y?Av%8VL6~mXV&e>b)j4>3PotbC#gdl6vs{ga@A5l&tNM(Bt`AYfjofNS%#f~%-z{rFcc zJ8;h86~DVR-Q*ehe@_3G7XUNk0qzb;Is8#y&;(F_ey4C@X8AaaPcR}e@tY)_zb*2( zhPRqxtfI!t-cr=~g{Y`3YwhG~22=I9Pp79sS^qiGy^@}ldnu@HyDi|CjXD!ZDZvoI zyBv?I>$CzTnmMVpWu;2incFd7g&N`l)tU?_wStYjyBCM+rIq2%?|~G$&PpW~_UXTW zekFZqaCfx4;Qr0if8mx(!=?EW9Y1Ku5P^k5r*<;&^CFKy^V07pP?fBxO1Sy_L)5}b zdeS9(kWR_EDJ5raX<}oIgEw!+bJUIGeVyZE%Zvr#jfCWCnW=<-Xs?1uY)#lCDhQ|_ zjq`%tn$GCjn_1(smaBFSM>vIYKZqzH=CiBdACd8d2^y3I6!l=m{@XQIFbyesx-q!EF$d)P% zV!Jm$<3&?-da>}D&FsVD#2o?P$i1;-QEqKMaqb(9V= zI9|R|;(YfV`11k*X{D=0n1+>-609-}9CPIcerz^NZJZG&adS5|;Ft;DVPFNr5OWkx zYf~a4I?LM>Pyr*Sx9D8KI4owp|IvcVMDdzSvpB{h-{E0Ygp!+0T)rbI9_uHHx=fUC zQ$a5?P!A&##TKS?z`joxERjIV;P`JR&xek2mUaeDS$U05NZ^I@SDn`m1U9Ny?l#u- z?;hDnIX*Yw@lyxh8%DwFj%{y?sNR&7e!jIHx>DFy(@W#v65@K3!)Zr%T2`KM`mH7S zIoE!hgF$B7FE;{j=SVObm%#C5IJTP3mPFmf5a>+Y>!ZZApRfeS^ZfZp!BOb-+lH6a z^^UABetyG(Uv}Qx!xtd<1bMkTxO(QfwL8j)_^fG|ZM%Uy9F!~e_TPHNTgm`*e@%}l zrgzXjWO&bAQLqh;4PWxkNE;FLJ-vi08%F2jIm*!_OfasaiMr&baj~QiL4?e28WnFJmzp7fK9)9uIR?Z&(Q(IDUH5t+rHWU?lKC?eJ!CeGoY|p}-VnZg&oQ1Vp zU_z$FN6o}^Ipyw0GfV-sc+MvpBMxJ(yOJ=MTx(S8yd5aH8NE?|J@A&fhd|1k=Ba;S z!#~*ibRVSVzr^C}7Es_JIn);c&i$X>K&ytTNu{swbY`y>gRhf%=rv5GNq9Dcv|dEt zzyf&8*|K+r>Qne{g+BjS@Eh0bb8lbOg8RsW5ce*3fAXO=0egbysgWe!kb;frXC2&9 zQ!iRA>?p~}^W$#De@p{YC6Mk|rhvPRJDY%d6UMd`AmOR(x~?9!7tQVie8QTP+cPj- z9jb8t;`A>v^p2d`?c)%U$=?QzPUi8O%-gG51QVAqiS*HnX7;3F{=0?2mrb{JVjAu?%RX7C9-wrBvlb*0iL;7=3mvKN4%|PFh1a)ezE4dswVJvXh9xB_qf*+ zKErjvChWh`lA$IH%SfVPa*FXK*~vl*zV4w^AQY14HP5%WKU#oh2#bGGE+K zja-AWAQkG+iy@73IsS|HVST|xPJ40KmsRYCp5uEJEPy11onMpC<&Xi4`M&rpzG>d5 z!+duxZq|Rl8wC^g?pJ7JINFN2B!e(^KeGhfU|21=oOmQGlD z35ZueJ2sqTI;Q8&zysvohCJaBPW=p+NZoSvq;xfpscIIMhm&om*h2qRW^U4s4A@W4 z5`-5TLNv7ez0KS1Pk+lT>Zk<{?hNmL-u((TI5e149eyy4g|?aF{w?!+sfODOni{03;|cnWpaftd2?W2X&{jxT02|*-AcceT zx>2FD{btmbWD(xkUudWlT{p?53X=Q8lXn=Ps{@l@%_vm?W`E%k8lm zYgjCA&sS>26tHEf*8LPc*vE}~%Zb*2pkpCX_M_RZm6hqRkdQo=GXA9F&rquMUmwpa)^N@b4mUf||Kc*-czkM0QYisBmg@d&hO6c8E|_94FbIkV z`p=3CAY1eY!PlnZPWE_2NfE;;=`Xef3>7fw>>P=+IYRo;1bz!uNh&=3B1@KHMN}$* zRe6QUUDzb{qjpxO|4OH4OdUrm(IGZ_JRX?>B1q#Y500d#%a?OsWU`_^L4VBCJ5XG3hNSpp?(^U_Vp zjWpDVz^!6h1yhDoIrp-SL=O2c+PMyyMftzhgeeE9Ix*`uM4P5-(kJ|9hO+M;Nq}ZF z-~a%~QIg&4{2|;Z-uV_S*-ZJ}v9AVUF}Qd|I@0?it@*TCo*&2#4o!Jg$nGi3nU{oj z76q*5Y4I+fU$hNw(LrXs`Yqfuis~cNFV@`3Ol?M$a+5A5Nc4?F)RcU3`fftfW5B1fXRp0~+54F>c~dZ; zZfb`KyXQW3ZX+_r{?4pW`d{ZaMa!fmhhKy7>bDjlyS%v2kiD9P=Tr+v(+R<4cyO-vpY&Kn!}0 z#^ZVtIA;m1B_*`Ns6*!?+o_l=?O|9aTnLk74p&Z=HbfSyhwY?Og*3(vtLlrSalO3) zT;T=i{JjFEZ1SK-;5Q0~B07Bxt@WfpbsuNB5GTfj4Udhc>BiR&0lWZAeOYXy4|nUt zl*Cyu;T6q-w2Y64-uv@!kRtDA8}lCiu}CMjSA!nEn}t+V)dqWm2-k6FA?AxFv#=*e zvPBn*Et!XIR(&}Ef_nB8*>xtuC_E~TyLCSrYTT)W`DpV})9hrl4GK725b;^p`NPQ& zrrxXO6R6#(MP+wA_u(%N$#SP~thq~HNQ5)Yr zx9DvV?_Jn#Hd=%3$vn|~joLQ!*8b3Em&$E)GbgFF6u+>m?4crPY&ZYUuX-dCuaLUs z6~k}OFiKSyLWP>Q&1!@QFWev6)@RqTq#pp_x1xw-oc}@GnFDdM59q9lQRoEzY*AyFs~FbQgn| zEg5%%n515iqNTOx{)>RW_|-z4OId+;dsq|iB?S!8LrVH;eZYm{&z< zC~D+o6T>|kfBh6CM|&Ixvxy?zeO!;Aiu~v3Y!^4Imvu5_+g zPl^K_2nUf1P}0{?piA>9EEPsf3}y9ndF!x4?wC}*)q$tyv`)>bgo{flf1;Hwkn$7s z)u+?(geKq{_XVdfco(T9-<0zt_^hjX;TF=t{h!2R@+v<%Q0!Z){aApJEq;9_@BfFV158@Z_9HF&{$OOwSj>=19i%pgHdipm)n+; zOXyce8Tj*l4-wxZ)Q&2(vv)l4NnLk7D^b0@)%F|wguU8?mroV=P(nn#97K^pxLyCb zce--8vxFV)af1tBH=ek-izn%>&1IDbH6A_Xdfl>8vC+drHH!77FXko&q5t{NirVU; zQM@#WQS*|XOCkg!?OBY+T!{wgX6zpzDJ-BrsZaq?r=5;;0t~TK20oYcRv@i)%bzB) zc+#|#{2FO$a2M}bdm48Hjr~lHeI#K}&Nzm@c<-m5`s%J*ESx1f>akmkkY0(CU*D4` zlmFY_P<%8!&}P4pCnnCp%TYTrk~X)FZeMz;;m@{bq_#;k86;w-+7x&3z&~T#8f6GU zGfFRrxhmnQOU|hB`}YUjX{n|O!a|F%;WFu^K$J53}Aynt+dboF%mS3(CYWg{M^DF&*yrT!*BVu z+pZ|gnsesepMsF-Ldh!_$yCPA10n0c%SdQsFXYfJW3+b*633X#E%EN%cSC$`rO>FQ zGgdppc<$D_(_Zc;-og1v)Y7v|dfND|=7b1HIWuz14#XOCbMfnt-!%U_BvbVH{g;~f zfgUxG&8x`dDvkTl!;|DUhD985e&WS;DXxwETpw7nN5v?Kylw0?6)*2G#Ex~5pZIAz z_d%e*vL}CF_1m{Y(CX<|XOtiXS(Aw*d|iexj*udgXj5I48bGf`r7UYuSy){lmKME4 z&*tBics!i0dk?4gfnWG$$rI6R`VS%?+Ei-&WRq$iCuuo(hxyU*G&=SON#3mkg*9SN z?UVwj-NKTHu6+E{~H~hi9Ap0n5Byzz?50)){Q}cy~ z7=PB$6_A%YNyEyna7}3A9TPjw#)c>#E7inE*1=5Qr=qn>B8%B7H@1-kzEV|h|e>QgHi6mKD){v!OE=-X?&h>2u1Iw>y74A!*y=|C*AQt_@gb)3Ib zwX2XSzb}D99Dfvy0@kR>GzWOSV_O&V4l;--a z#AP)CXe|E=!LKkJx2EHv8T%S;gA#FNtON{fzN{E3&MGgF?RBvVXU)D%bL>Wn*!NW3 zs`wL4eLzq9lN`ggf^e3~caUKdij&o8&5D{@%=P*TPgLqxp#IQ#mBY=0N=R}#Wk#9Z z9rq4$Fwn6M69m1;z1QclUcjPu!E}m#A0MCnQfPFAXM&Qqh~`A(*8TjD+(&glsB8NTF5d2WagKx#(OQ{HTh71B9CYe{p zzn0-D<{CDu2gy$U@3L>`)Tvv*dSeP3pek)FeJSchZT>g2am4Q(hVZM&W2s%0mA?i( zTx1GMhBvCGy+(^aV12Q%C8^YmME5v6rL)_u-NwRR$sKco#1e>mXF@2SI=5@FR;wy? zSKe9lj&}a7>tNME7VPq#sv6G@$@dVr}Dc#ORA@Ajnj!epglg%?oQ^3A3 zkR|`{rNb!6*{W?d(g|L3a~t6L^1fMvWn5#!jYsHDMmhBN_u&F#GaU+l#Uq!{iG}MW z`Ny*uN&-nGbR#Vb)8z_V_Ow}h7y@zkqk6;k{Hc7B>pZ(U>EyNh>ckuZWN|}!M>2yX z%g~bu`Wc?^i@qNYBuf6ig(lNYY0G7+!oOP#Z6A6yXcE)E>+!@*1#nzP8Eu|7AjW^g z10lE^#gj*5GXt%qlBJZM(em)Hk_>m8qEI?97c#du3dFwT#5NJ-bu#n`Z-~U474zo> z``{=_)JdnPp3D-{VwOC$;iHPg#JK2Ii|5!RHgg)}l}MhfB!LY6%J`FH$nIqIx?I3k zRLY%S#emCF{_`6MqzweTE4;H2E{l*M^Xs{YNLZ&m)#^d{R5ZO1VdoRd?haQVL>w@2Pv*xkm>y-^7Ct2 z_Ejb=D3QcUwoV8I!Gi%A`u=MDC=R1(Q$9aS>(!$Ct0^y7jNZ)gcB3cB2JyXP&2^l~J|ufLvPVWQ`3Vn9@G0o!L$M60eoR*+bl{QMFWUE5 zS0YP|cm-dV1ECp^87@*$SrAVru zXa#&QuA)w1Z9Ri930(y_ENpBl9x-+$N_~I~uFS{cA&cPcA>0Fr8?=Iwqmc8iqiJl9 zdA;CiW!&I@D7cH#>Y!;vh{&?0-cs4w7v8qB z_v9L^q-i#s8#-E;&c*(xA-zdC0nt>kx`kRCN{i>m$xnTQ_p;C~P zPIl>FR`1Efh8L3ArGam6@JHT6%L-?RT^sb`Tep(SzHsRbJfD-BjHsu#03!&UH8Lhq zh$}T**kjJ1FqWP&0!s`{*?PPz_d-&-ylpD{XF8u<0}}l*Mxldrj zf&*LeUkXKbXkd5_Zn^|QQe5l}s-lscC8!O~HYs(1kiQ6+3g~hVYPvuZ*Vs@&%-nr)v0`COANP3)CWJ~giYhD<$U@gi#c}Cs= zLA0*k=c>7Xa8U<0yk9nJ(8u?WD#A~9xLY%ZzL(Gcz_DcdV@h@x*z&Xdmn-+xmm_^y zrTC-D`(_h(|5_`Tz6GE4 zUd?%!7hvq(4N6GLO$VTahH+m*g_4J=>PFth@nwxgVHn&~m>QSbsh$9wtWl*JBX)NYT(wo|)Dlb`1BwjtgwPwA3?NSH0}T{-48 zbPNQQ4%lRemn=M>v$k(-mZz0AWhb<}f3w*8zn32Wi%ujm6eDV|owWMJ#MK2A*Tk*0 zt!0F&fKYQL0KkB@e`1%I+Ki{a1i&z(&q{d@HQS$(vOX4PcL6{&VrDL@pJm`jBwOEt6H;g+v3OC}FZgxZos-&7%)) ze})r`BHn0n)mzjHt!Nn-CsBFq8E4ljMTc*5j4FDdsrQGE(SOLzn*GeDU5mj8sO-NB zG&JuEN&-$BNqsDUeDZeJZMj(JbJ*SLNVADX|1X{66-Wgc^Zv`9&UEoLf#wN(G}YS7 z$)5*=e6ONfM{;Tsi4-^qyfYiKSY_oK4W^JTAS^TKikkqTFww=;sp>=&gEE)};FV5% zr3Wl+#5WSBpcrvOI#yKaDC_fP|J>QOPmw>AzrUvB-+eXgQLp6zv0UqUMk&```)5R{ z$Dm`l*E8x0>QIfxtJsLxZ1~lU6zJhDhsmq?<7I<~A=1km;HZFTj^EW*fjijz@(M?x zh#=`1EcZzEU5yccsMF65@9xOtPx|jlJGaYZr~np$rL)_*C3rzkFz#OBT>}B1L%mj& z(vt0lUi!eQb$r}~A8T!69P zvhfI%TrR*;o8{fru$5zc94P!h01QF%zI_Gfj^5;mpBsJQT=3avpOY~?3U#ORa4H~f zgVR2a@lHGK2s2eTL(zj>in#k|$NMRm{kr$Q`;ae@U2>ORb}39+m6!gWIFXC`PD~7O zvk`enz&Ok8jk1c^t;ZdjTyTpgD4?L|iSarVi*E5A=YS)m`4T^4p}_e=%ohECMP#1Z z;0cHyC;bS8*i2DU9Mk<+Ear7{by@7_$f}LsJe^gpfi9%BsufF;!Od!%pb};dDLQnY3@F!gOyGip` zRLIc>#U(~T@hp5`Jn+Ckaqf8_OvIfFb5x6PGUZr2Lw-V^mS9s^f0&5c0*brxm=6pz z5i$=aZMfi64fR?NCcAtYJ2~{Y3{dQSp$kJG7=^{2R&a3fj}8wL;@m|_3W^P&P=IjI z=WfJ|i6kI_QYw}D=M(cBGi|OfH-I3Y_qjO9#@wPGoqeG^<&JBE=^2`?(z{q=&pUgS zc4U~a+yF{xzhW?Eh++-E<9LAFB_7}j!c{;8a06Hi`BAoVCOLxMqnY;jqImr~<(pay z|5WvCiIr*7B=VRBKQW1QPI!u+DTXjVeR$#kN}5k_l}KN;o(yTLRgpCP@V zFjoZyIWP#@Qh2a@b@F`~uCeZ<@IlIu)L7v*)_XK!6+tFQx`%P>gM{cegT(@3Nl*-p z6>eiK%iMniG6a@VhKVY3RS9gXO6faA)z~x5UQ=172EIE&9Rw#K1_lQ3H_2y%4cA!F zV)C_5q>pm)l}A2~3{66D;`o~~O$vJ==Dt7T^GYVizc&8Gn!$I9W%1^%)WEv{O@yPdo zDAFox49tR%%2FziBqasK0l>)ig(22zM0-Pe5w|7o4E z;pehG!NgNwpyOUZY6PURlrqJul^@@#wV^q}q@b_`c{%xf#4Ri+ghb?&08-pp8K9P~ zpsuzsaY3rEApX#Z4D&3obW#?(#-E^gKj(uYK17O&38IV02fFTjqsT%G56ripxG@+a zNt%4bVWOa;N|Bc#A|3wA$Bu2JL6UOqJN%5`t}gQK;2EW-tuQF9sw|9UYau_wHWG>m zb1?-WX&?zt`w4~mg0QCc7nh%V0Z1+KvXp<4Ts$RsY5o-;yT>1U0*f_JP~3b|@-0+UzTq6?$7;lc!I5NPK_MhhgFKKj{hvqCoqK}E z-xDCDDQ2(>3ko3yU6aBWcTUlp4@knqa570{;p4&rg=p+Y1iv4a5QVY*2{B>hVIiLf zhl(aTq%2NKr<5Qg8|7!HLneo=JGoezEz0H!%LBiBRGNHcr?Jv8Pss&8!g<7e{gi=) z;|>Wwf(IRNkoyH@g1B>?PLIV0g~$S(9k4<%fpK9$A;jv7G|23!Nk*Cw{YCnatW8@4T5 zv~)M#a+6!HdJXs5>#xFz@jp?wGOlBXj$%5ANAP2#Ap0CBC`RA}#Y%j{U2@SSSkyj5 zc8&c4XM%$teFS~AF4E*@x zkAq|JN(NEXIoWK&ddF##L=`bSAm-m6PsyY_R z&vm0d9R-sDOT;<%0sHSS^M-VD=gyTya?TXm7w0^m_~S%3=)pnaBZcNNdSHQG6ga@w z=|x%zsP9UcSC|0>#3DGzrfG~ibvA&%2`<2wfo<@)j?UU&KwmF_ISDFTcR_uskx z_xG0*6jZY94Id$ALkU(1^J*pjO_?;s9og+jF=bF0r*#Gm9^@J~YT~ZK&iQv?FPu&w zsgyh9;6veagf@_!oZ9#ebBuCn)0*&azWT=X#WDSFzW&B-xKRVlk&9%r74g|0RMt@; z!xKSW4(sAtHgD-($H|d_{T_e`il2iIiWB5sAq{;uJv;~}C}4(xPP#9KIWmZYvs9j- z@IEN2uTujKFZXljop&A-8Oz;2;WYBqmtPUHAoS&O$RUS_a*KBGX+!%{m?)#UiS2P8 z?!4p9Zhbhv%-*?_9D>YG5ea~QUKKq01Qm#^maPdpMP`ILd1bds6gOfh zKuKd>5=t*}QDcdV%m4V}5ja6{hnx5VPEcHamD_Ls{p8t@Yi8h|ldt@<(hG1|9ads> z{hyJS5#yf~zSlDaB1fBAv76 zG5M6P_eT-g1c9vB*hTFBPTwD_D=L;5k66*t+8+^+DAE_U{e>)oK*OULjuaObSV&6e z>VSgP_J1PumC*5!)^L0^}%qyZ#g_~2j}@e1WrYxMr`58#o#ojUgwNmBH1|k|;JNJb4-EFXDvRq~#LxvKsj; zM0dyjCyR`HA_RsRzJzN@t{{_ThIn9%WO&wTO{I)*C5eDs|9g128f2fe0HzV5AfMOq zXFH|XNB#e>j3bU^X;g*3a}0_q8Q^8cu@w2r);7SyA_Lzz6(EJY z5WMF5PwFIX0S(_jN(;i*7y4ulz%0oH%!`%pXFsQmZ?ngXX(Gd<3|0h~DjtNdFY-wP zAXx7Yf?>Lb!M1;Yn7WyXUHHeq$=k759H!Z#jQrUDIljV4n8^To&P?X;v_QbPpkuB4 z95xH1e42QosV4fc;NmVFK+|N9Fv-bhmQB@t#=pa!@Re6w=|+wiiCy$%pny3RyMsDm zA-tNKHtlCO>}@y>gpK#jx7@<*x6gi1Bs9SS>Zh`(Oa(@x#*IX=SqXNC?ftY2iiShF zz{JyxSz-(LHuwynb3TUm-~T{%i%yS-{eCo;QwzBRfI0Ani3K*U^=(tA*cLWqD8*Q|STe)n7dmHAko`3NL_cNS% zZ;n3f+2aJa%Pza%w9QC&H+)cxhk2xWklh95lJ?qrPw*=vP8hGb{u=k|>|f=!>IlCF zeXf9l0?+rypLoJO`p9562d7(h#L@r5k2u0@+N7yCfE@PDF!#yG5m+#^YDctVZ#XljDYGHZ48fdz4EDV% zq5V6#Q%*ib9Ggx>|59Nwf5Cid{~dSS1qYz5pqP10P8j}z6FqyNA9-S?*{03px%_~f zpm^A=z=6w3blA0W!0l`B`dk3as%U5@W4`u1o9vuo#^ zb1od6HWVkGbS^$>^rv#`_P{uIfs&8P%7Xi!zYBOq$n%$%`1}#^1mF(^N~m!xQ9v;~ z!bs}ri~jxrML7sEb41JpgHe#Q0HzV5AfMOgAKNL#KJr(fd;~@0T{Q7D0VgQL#M6PL z{{#gQi5CH;zH9jY@Xt(B0g%W3C;0F9{*WF`)zbDC9mc$T`3*MhF-fVlG}pJ;UZso| z4-7F|%Ou`E0$huwz!zrQuk(+H8f+a51E0hd9T@vi%O&PTfAhfv|Fu|IYjdLrl^+q1 zV4)If96@PBrAW(T2Bb*K8)gytFG5|2#{huJmog*YBZRAGCHV3~;7K9XbBwqFPoOgBB(d**%rT@!sC)sJ zCSMA1VRw@>G`CPbB=i0Uo;ORDmycpUAd$(Rk{{X!V&8I|*aR?0!*L-~W~83B{Q?qH zxdc>V`#foU4k_)WP>4u@q?2-7?eo%<`fP58FMyDyBl@tM>=frQUy#reugy`#}@O zQxV9B8-A9^yWlt9d@H9z*~r>;YQcAKb8JFv5&T&Ikz@=)j*E!O&;P(15mUs8?Y9k# zl_XLKfs{G_O+gchuXkxrHW0J9XN_UD~IQ43*R)(|*g8>A)aU6;mOiXf`iDz%y+P_!$`Fdj*9u zB2oxhkneseGRfk&3N>+-Stz?n}MJsC%A^dB$d1fNQ<(uB5E8O z#H& zt)DFjGzMsr%CHDX*%oQPE%KD%wbr-PGPCJF-47tMn^% z#L9q_S&?zb;F;QJGFvVc^nEFkOsPp~B_c=@8Y@DcoeE`)KFi#Sh9;s3%rG@ciyrZ% zJRoU=IQ=#SAc;etDMaH8-_kO9WaOnS5grny!9_kUxw%pl9C_wSEnLMR&bQT~arQ-_ zCaLfYh~h#R(EkDVc<@0%TlmfK@*+VA^05*k)_cy22I3k9eKo8M*IaTn^WO6-!c{>z zR%^^==t72(1~aA#_MmLe4uz7Y*z@&Q9i3E~r3IGusrEx@s6xm>)TjuqAwyE7XQ4%N zikgoN7hhx$SZ4-C3KqbhypWb5&nNJ-tm9lHzCJUVUdy1oG*+?$_B+tcfDejej^+sp zn0Ug@atT1xp@{&!=_8Yn(7eXVJjU6AOinWzM;dS9i3_ez)5KE^Dk!{Jp=|R5q0(y% zK0~qa{}{|PO@tz%GR*q4Y2Dh@s#6!Hcz%F+o=IZn=70kZbjSBN9y{=1;%U@KSyZK2 zn}#s)^w?vMixWma#-1^i7c}wo3l{(BtkC?(bJ^TufP84)`z()Dn#l3 z=7&j>u;BPTj-pqB37;)ouQPi|866A0@#Y(FaD2Th11I7gcgG0|IL76P4m;-O_J)E2 zreEkE~C^<*ett`!G-RPMBF)!H>~+{n#`)5vjS$GW+{&BuwF^kgA*9d_^F#*h0BX14md4?cKbPA^aqQUV8zEpUQnF&6SUrmIR;VE)x`=bU%8+Y8DQ zI*VkTZ-%mH1Qz}2;JZ=N#;!R|0{yXMksJT*I5C?=(*`u>wbM>JiHWQ)zx)bE^>1YzU}9$ksRR||@XFlfb!BTVs>m*#=+F%*$^-E}9%5ho~M;%R@Fc;a~a9|}YUH}bQ< zBIQ|7$T6}(Cw_og8hsWCEL#w(m8qD;u`=>$#naf7ClFKQUds1JLNwH+Co|~!V^U-s zGI)0RDO*sAAcJI*iX1_bz{UR9hQf#=L?&m@4b-#F08GXtwGJ7!Mas2nl<`kLO_386 z6|iCAESR06>9>MghsZ^Ec^*Zsz_q_Wk+?K?nFwY+a zdr-(}`}JN`x*~DlURWzF5E^7w!!s+!5SnBW(EzOh!4~Tzv#`KaK|VG))|sK`VEZDi zgP{_1Esb$8gaFeb;_CBX=(Ntt#3CxJl(+y@J6Q@$DUv!go|UPAK`TW}4xA;(TG?}LK2SaZSxapa*1 zxydvmq5^4x`TH>S1F)ierV9gU8B86rcZ@GqG;-4wz9=>(qO1fdEc6YgcbCEz>&zL zm6O@cpxiVjicKp?ihXGYCzDZ30K**~TeBAJC$+NkvE~flF!y3fVZtK|JQkW4JHR+6 z5$Vt*tRz3xrL@veCtEC_0Fa0m1}hJ+AXtYkPza_?3m|hTQkvKqOa&^w=7|zu2BcV~ z)bO&&IEV?D*oBb^MFKaRV=^XuO^^*!MY}1>CrJW6XA^^c{$#UK%!`QOg~9i~CrfS6 zl#T;wbWV7>{8gA{fu(^2u7!vr;}SOjJ3(nno;zo0@`VZty)Bg@D@1dt)Tb<8#qn`` z{%Bv>Mh2L2)0mVo9TS#s7AU#M*WxK7gCCRH>nbZfbBV&BeUU2T;Yy)7i6h?(G@$xI zxkfxD7}D=L!IHsR|)Oo6d-of`Zc03G(=nG$x{~ z1R=Zl#E#`Lks9IKUZld2mn34&)H6xY(gg8n2f`jc9W5VoOn3Yp>uAo$*8vpqJ_=P( zCaFe}@>NZ9Ew&*fNXEb?iDzN2EGfpg#7+PC7x*@K*1i1F%d()#(+H>cI)en+AmqF- zr035nnB*Dy#+&Z>=bv|zVV+_I94YgZYdI{O*I2iP>j?8ehaGwtoOag}%FjRl9CrM# z5I++QHrmMbyZ?Udj<1+14yMRrI%h3N6&Mx zSiKxO-Wlk3p<2~yuJz`dyQ8rSb#uh5F`YiY`|i6i&vTpfPy6<}xC_p|z*VjS=iCB8 z1Vkz=Mt$HGg*J!0*5y z?9LxLbSO+bRdc<1o#}YufFX?qbJBrKh`)rlpfI6=q6-%54}~J6PVG9V=Ti5=^Dnr8 zIB~$8`yF=Q9XsdG6Pg9TFT^Q{SFo%8LGeLxSeL`xvBw_|yZPi>)_sdzZak^L51~bi z7voSg`=pF3SFya?pmtri9~NNugh{O0P_RhA7>yk_7LJ}Dc3+GcBYsSXzYdf(Z8qP+ zb=YHfIFr5w)3XF8F50{EdY=acMOEQB35uY>g9f|L$9w^W!~!UEU`7RY*tJA4>+-_>zP!y!1$>12B+^$oj;4!<9TNi8ZI=r>|He2-vA zFsB@RDpYih8a2l0fJ5Bw9XpEor}5aGMn4bl!F(*uR2>MDYU9R_bzj0(^0F1nT{)aG zsfcmhZ@+!rNpMnKnWnvDJm@5T8k|i(;zo`7R8F+4S^>wNz^_oDJWMn-heD@|w7)F3 zNTRPfMxVnm`2hHTm^N)H=KC_)08c+0l(8G;)~TnT4*w4|r2RCz^yuJ6#1z=h?c0ko zs!5Y3G6au4I@mq+Up6}Q)3dt*MH>g$Yt zo-=2TyY1G#Zp_zTyQPa4;=~8;3M^=rW1XRt;d5rIEw_?+VtQ6&@{(Yh(l}En__0JD zJ;>?vDi*{<YD_wt z_KeciQ!?-8fRvO*AaGL{lQ9u>il%C*o03?QQd*5sWReCkz_k!@rvHiQ-+z@xZ%d_6 zcs9Q`Y#}(Ps_ktVwW-)!g7;Z5c6S3T73s#iY_kK#}$b9El7*lJWTy z#!zcuurfUy;hIQ|NEOhs0P$H|MVKj!$(V?;v>s&POA}5MBp_VkeUuRx(PBdJb-qTH zKEW#kXLEw2IiXPEBvPdCK9dQtFar%lEMx#p#EXga{P7sL+MXa?HpEh8Vc2IekrN!H zKqmH^5~nI9h{E850`I8&{+RIHc-N^j6cjY^lp2we_TgbG5#;+Gwxmg#rWiA%v0zS1 znbweCP*AMGcN<=n!g!=2MNTYIS?ZAVnw3fuB~yy))4t4B5#>eXM{Y=HpGTN*Ev=z> z0#9YH*%mo>>7{+#2l$BCeuo`k;^`#$0PywnSCyM@iEV}FN&Enfl?JtTZe}nH*COq= z@^xA7t^mgAW!c6wsd7OpEewm@e=5nx*_`K943=%T%Czk#6+X7u zKd}yB4ACttW$-kLeT?lMtpEv<^Ne_a9;SZ?S!;!nFM! zL-90LT*eV52_1i1q8NpkIdOT_fjxN+kgPXnx2u?#Y6!6t4q?CReXGNJScVHFnZ zXJP06#0isR{?~>-fu`8SULNLv>^(Ym?0D>&{}t~EC9Y|crckPEfMWR5zzKt?)26$b zF!fZiQUzDP-iB_YjT;De`n}*H_sm(pKv_@@=7pN#qyQb0`!BmW;d|3%a zOoIju+(vM`T^@`3Z1*qNX+L@LWYn!3{4dmXO`38n;5Zx$=QH8QzJa2a`=a%`)$|d7}8)c;k&+EA$`R%sT0P&6&f%Qv_d)`BHo*lto`}*1Q=O zx3`v)5aY&vi%}{ArN;W`uV!!{?Z=Umas?Dq6DLk|6DCZ665&^zTB(4O6q`Xg-5mZW zs)Ms1Ejp;Cxv$w!RRY(q)M_q~3JX zCO8?0wO{tVyY1V#S=_lt=qI&3^0rmx!Xx|{f8pmS}SF541Lu~|RAOFqdUJxzN3 z2)uFlh@A85ESawy{}wGOa7Q#_{6>(6%8Tx-6D3a0}4DlIs{`r=q_V`kdkG_ZDz}6jvMQkc2oIT$2F7f&~v!+}(=1JEZ~? zr!DRjcZ#&QYl{ch0)b#jkU*eFLL8F&Jm<`rIkUU}mlyiA-*cd*46u-N8$sC#%8mdUGMx{B6`m!RJ3-e~QlmBDc9fLJe1lh?_7~;V!kj+)n1nBu!xt$XwCmsYT_BpCar&b zO_J+G@#>F=1<2$Y)|Y+W@c-WXhxtLJHOcp|oKHD4uH@s7NP@e17))9_Hb#c|{$O=7 zEX%F(hJjRh|4=$lT}q2~#@=W5OzWFfO(wm1)mUqpY|@#mx(uZl{Zg6S5&y7ne0GxyL2o%-yX6RAE_xG9f^AgtM^Hdx)M!Dd@zqCByIaa zcpO#K0zvQv`cvl4`dK%<@QK9@lrONE(MdyA;0Xypb1(`^$(QRwC_}7gRt7S@3d0x4 z-XuX)lP!Z-Y8jjs;mPf97_bV^3uK>xWs%r=kSGBn$xtFv3aV9xo^7u1AKdY5ygnSat}yTKy{{{ zFovvO7=@IU7p?UgjH1B+mYRl2D(Fm;a>#i7sU`(I z3o#_(B*8E#Eifn~mP8TV!g38CEf67IQp_h%%4Df}vk;;B!>K54RlyY$(@j5ZIPGMt zpjZfdY{PffKBawGPrffuj=_=2@1OMz)nRcmA#*6IVy@Bw2<5Hx%l*&nO!^_t3>V2# zP#ukNYEi|iO68X(UFGBQpXfNW+k! z-9g0A8IV!C&+wi=42_V+W{Cm%lm+>e5lrjI=qy!AEActg20jZ+o7W{NO@eY++%`00 zQw|wP=iL2)crMe&eqEYb0d+H0FY zDyBu8wSWMH(*Lo8i=L!R2o)q3BiPL5ii{dxvk^%$GR^vd5cftOTP<6?e%4?Ip~`roFaJ%CQs zl1a6!)!!)f_L8`=gmmBWeKYDO6d01R_0iNItq$#6&JhoSjyKh*mqvAdMlM z7PY56gp;x$_vgKTSpAbhraq-D?Ep)<<{+j$?;pt%Nu6b?X;LqlRyL$|0=q~^ZjACI ziFuWT7(@w_GJsp&Qej9@vYoz$45~_$$Ru9~(E2%untuO5qt{E45cO2cNqx$A?Qt|s z3=vMNzc>agqQuLL`IJ~qlK6II%&pWsEx zXIegIQmko7a>-iQ5M?EiDU(eq)x=V!^Jsz64qAbB=GhZxpLI4q;u?$<6zgCG1tuui z{;3_v-MaqGTF7G_sxV#}c4$DfKx`Z=XJDGJF z)0qeWFcBV?E-2zb@Kg0umdZsOD?>S)pm>~HJYfaJUN|J1TY)HD(k_fvf8t0W`jIN! zZk$Pp(C}ifUQz}r>R|GsN{vo zloTM8AGBuuO6xeRD~LeW*1rf*siTcH6i}d9SCUyBnQv8pmFDWHfT)IW+HIaIG_lYa ztbZk+v_rpn$Met#eGoCDsLtmb)e8lqmi32^^F$ssJDJFeuLK1BD5xBxv9wvYS;$nH z0pv_;_n%0h(wxkgRX@abap?BeKXw6ax!C^-n<9bs3#Y)clDa`jGDjG*S}V;}V-rER zu~_|CF9vHHrF9&Zl~n65^uoocAV^T3F^U8OpsJrZ+Mu+E?E;C+m*SAvn{Wn2Pi$*( zI<|P?SDbX2a2TzW>roVu5m7t=lz|UL@zpCNMZ(noSZ0WA({%rdJwoY{qLdMMSpoS{URX}h%b=u!22u+q)!;L+y`6hH!{(X z3Ytm9sNUD#^jq7rZ2p&PP65$J&<{e|nL{bPEGbpZY7kZHFLedER1*eT{kiQ+9A85w zN-C5T1yNW)k}d~LuxKC3reCSD#in1=46n8`w{PKD1v|8kPe?1h7?xn+;^mFUcx4N;+0!Rv4IKL{1> zdaN-^YW5{b*<#>xpU8OTOhkeL;Pu<{q0ovPaxl1lEkf%0tJ2K}mQqS&O)e$C>nAN$ z(c_wp08c2Ow)H12WUN0W{XPSHRloR-GzjVXr$B~eSljwZ>TP%ihh+00^Y`9=H?5%P z5Djb6pZQG+Al4Pcp_JeD?_ZX|973!s)8vx~W3?D+Z3W__$4rqlPVgvOjE$fG76t2H zNl!cUr&ciWOlcP?k=jSJ7(t=3C`8tLeNa;DZ}f{uh|W-H8k=~b6%h2JpnfzOOAD1n z!J04u!BX6RP!{!wRGO0+v+BFpE(y&HLo{9swb;ll9rod{@XH;y3!ShU@6bbZizlfQ z2-Gh|2~n06oEDNcDKcV6P%XMxxfEb5R)5xu!TP4vGs!dN$jE*v^b#{F2olt1j3U7R zNbOGvBtW1IO6v#|5E1#hFt8xJFck%vu#~0(S3~21_(4`ufrhF;@N?cj)P`(HR86J$ z%k5E?6m2#UC*u+SP$aGDuM{peTBbC@`(MZ;%8DCuH8L1v?Z`fn`dX5oWt#16zJH{a zY)}r6T|$DTXoU9EUkM{E_-gex0_q1|&Y-{{*>7VDR<5An1O-D>nXQCD{~PZgX@Y_k zDK7|^SoUf$JR5JX9SUZJ>>}0br5>92q}Bykxm0GBCs4ZsW$q%E?aeY&4(*7|o37Nz zYNVD%gV-pKJ@HsrVfhthf`TW(>yL$8A<1lSK-QnN8b0HoNOY;3B&ecaC3yd5J|Ru? zrh#HJy`nvl<%@3K&@4t*$`dWlcS41zxImo#Y?anT%i7RtWz=JBn$)IDQ6JS0oVYFe zu2fQ|k9d`6_102~tpW-FN6OUY*_D~}f6*QHYRz>-?gl#qhXX0IIoDSr&R z_E$0b`^P+NO6{3dw5{J1Xeu!yhQj|wXZI5$KpJ&A2?<83_USi2kOKildB{p?Qzpqv z`XuApBl;@aRk>fOh8UaNN{XUff5|Mvm-P{-yEkXikzV>0U8(kEyyZ-;rGz7|HwUc( zARBG6AtoqBVg>-q)tI*pVbEqfR*MN5(Am&|1{mv#I@~ zAhM_aYBD2ElcWGe=c5`pY6ZOZd+y{an=z|~;(q05+pnMU7~=I8Wh85BGD(|lDE$@T ziO1+HB`<;%Ew$f2prk(NAF+#o7DNsAK4rD`Z*_f88F5Jw0lApKDFaHo6Gl$xZ}ogb zuGIdLH_=gEx^}XBA+w^T&c533xjm~!`PHOo|0**2^&M<$b=ehHV1nY&FefG`_TCdK zC~)peR+|io^{1fdQ`Vn-14V_DO3Ysh&2m`IIKTxJB^a5d*^rWl8e{tlX`;_IWF1x= zPh|OcqqDwbU`reIM;)mRqkQmqUwHjmXZpzmSJNod+SJgAo;oV`bFrCx&il@W8huou zqU{E+r4(BQ6abEtN&4-O?01J?f&vH8Z-D;d-!p^*QSE*e^)tz~W%+`(b};iA^d~V! zn*qqFFDRA$qt*49>y;F#6f-BKz{h8tY_pPD|BEF9B2^po`g3hl5GDnvpT*t#N&&x!aP46-qh0OO z?iRGRmPTH^IjCzX4Fet~qgtP${mG7?(qatz zbd{yz%GmT;8nM=o3@r6f3#$69{Y$yn<`viDn{U1q?!5DMxfjkg_dGaTYo`iXQUJK~ zQ3WhI%_pdx`9g%5ttcp)F{`TLe&uKtYqXXs)S6FI;)0w(aW>AN;1;ZFud_}#CQeW^ z*FVe4{)(`|2BWjINk3uv`pEK(7QA@>n2JYGcpnn&#cD@sxKN@TO={u`A++6e=pdYN z0qk(bVBGQEVUuy`xS9$i$9saubRiC{UzykK9L$)U`j_RcL+t zCo-35)&5y4a}3$9u|WAAfqdqot@%de;bieaagHL@tHd}8?Gq4EZ3j=2+rOcX&VbAl za~U91m12egAviceQkId=e)_TYS0jWs*;5e$xZ+DHagl*y%7nzR%fbIu{l<`#Nu~xB zK!#eMGfSeHiZ0db?W*fHy;-)&GM|+ODP3t!3G@CYufJEYz(YJ(!CLKNYW0`A@Yc)~sPUMLBqZ`=d;}m7pIQ4Sg3^=H{;_?mUse=06~*pqySnDc zJo&%@#v5cjWM2jVGqm4Gf zA-N;MPCM@ucE&jrJcEM4TcBG`YK=?oC^#n8Hxo_DC_ye(a=DlUW)Lhvp}hZOEE%jM zX)H+MJBy=BN(^QK9KwT&_6I`S!c(XXWH0)Kk;K%BPL2Ax)^T&4NLy?FCPAdbpVx2i zmjXxBphkJ@LZY%5yexnHd3aW+P{Uc8@oF2(#q|MQLYo*&ds&yd{ssm}Dy|ubvY!;I zUgAobCjE+ZB+>uX{_iMa)IG~9a2t>MKNUktr)*!VU$*qUchB-@;wzV=03 zY2P(v5Q4(13@8@J`w3mVkAp_sqTMInW;^tGMaFuC60l$$aY3c+tlB5 z{fnqdyJ#n!>E`NlGJEM;ubJ-l}JuBY;l-}j#~cyk;+0Q z&bf%I08%k!qKjPFOEtgP`^T{1!@{=M;;9q*;voke61LhV9+DkBUlUUSREnZp??1p; z1Hb^%F-YB+X|w($U})%F6@|Iza>&YcZZ~8VR=j^q#)A#d2O<|EXKJL@M5Ssh5SL}S zEIR7=obuSCkA|zSzB;^%bDH+vXTPw+4%?&Zlf%Opz`p`p)s7fB605eSgkCew7`E7K zGn|#Wrz(zdmLGE&AQY8ih8e=riZ6fO`v-*iP7Q5Ie6)AesL|oR`|k_qVQ{o-_wHe} zU;H8*d+c%2Aig!L{omT4Dz92rg&E2m`x4JK`vGTv_3VN3sL=S@^qG!)YP!NsJ>}GJ z_g#0%*aAN)Nt z3zwWLmdd15f+B5x0?O|nrCnLF=Kkm3Kh&^l|N9>2v;F;%N5TmwoESPy-4SPqEg3Gj z-~#n;tcxV^x;8t@YDU>WR&G5)?rDqm2SVGzQ^fYCEc{t}SSkVuk9aFHrAN8S_ndRi zMmhvrJgp@wC_HVg{Sz`}L}aD9arJuC3)0H%KzKw#)ljiZ>q4TmfgKl*5F@x&Dr%ZEMp z+zW@@Efm#wsj#eCS)i*R+DxNToCvb4s{W{qh6%5gwe~DY*8`R_N{%GvOOSd{VThA{ zlZq7eFJ{&g%1a{9{7*Q8+^MpeRdS(Gpmtc~8}$Q=r{X$$6{2MyiwcXXR|Vju#SLvn zUTJ@3VttsE(5&BZ>e)z&q*;t_HRMr$*VUpZ=&1mk>*(!i$O9zq^{D7o1sMVT;%Zyz z|5hXOos~`c#`(pl{5pCmNuCzjlPm^*PKxk3~p72|10hBADsP{fV>Pied? z%_|r4i9hv`j7xj~srZZ&9|D-*KU2Tj;JuM`c>Od_F^2ag*-MuL3%%>5MY-NL3v?1=bhP&J%PtK!T#qv-rkggLiWL+* zC{+UVxA*?1_Edcl)it<;9;46Hb4-x0-Dbd8x&A59Y-e7BSI~H~tILj!k}MVxYJg+~ zfyn6>xG1bzkTJB*E0amNKNWr^(2{#OS>agDRO<7O9cAYD8&X67tFa z3KybykBAH|DeGS?)og?8BtA)LrQd2pn?P*$C});YBvBx2@Aa3(XfDNd1EEHLe|3we z&#=YQ&X}aY85Gk`Ps2og;!mA7Ye@DbP|5)1IgxMB?;m=TET=WY%KB$tL<+Aq=~b#^ zMT7p@Z$zpycnkU@1r&ucHJ}01p8A{qTu;lz`X;$41MRiin<`_Dsq!!;L0KOo0oGkO zp2ON-DHT)CM@T8ggd?T5XfG^)Kb@+!7)h=8#`f^GL^6Pi<^WMauD%km(rJu-uIthx zgYf}dGm5yXmwj0!njk5aAQO&Md&}g>--mHy#^Wp4iFn-Y7`k@t9=hPr?zY-L`=7!Y zV$W;3XMv>c`g;OrY?Ca}PvBx(TJwXG<`PJd!-dHq%TSkWF0%98ku z6CXmSs!ar?q;_6^zCDHw8ydFT784YmI$;IH!C|YdFhLR3YGO9T{)b}T%RJRwGqL{0 zk+B}ayl)SWiyRFgGb(ZaTkjvK7o%4+C~E|j_rF%xpMU?Lm>X`mKHP-W-V-NI3J2_e zK-gr{jpg9l>#n^nJoxZKSY^;DEVS?)x^b3p;XucgiD%^MPec{YA&J10;cMEH+wMIDMxZ}m@d^=Mq zDq0wk&Di$nxAzZne>URtaKHikhsoc6AFjRrFJYRV)1==a)#UY2>y&W!-S>o-UVaHb z8I!}R__$!rHP%S?j^@kQOF}-?U*SVP{4gAQ+;RAF{q4{Z_w|)mStTsK#1diPz!k%M zeft*mE8C-P8)YZ)G*?2i{({>ZULqjD`4{!i_rroPssar6(GsJj6xR)ePtcdDv*ONn zW)pw9@=7fgaw%$5;cU=fegDXkbI!}MJbm%Qr0>JSxE_x=_E=2Tb_z=kSSnm}(M2FJ zz2)`a&_E!CU*Wf^=S+s`V{9_nDplJOI+III+l*P^X{f*bMv>+noRfAICJDLK(^_kB zg5qd5#+-HfoUs{+ZdAzi4dfGn}23Xd}K7DcFa;fg8hkc6d- z7bt1G#c{yJdMIsC?sOsP@qA~mP&ULv#7sH zr#Ae!ew8a?*y*AL8QLtDUJ~k-?cbzdoF(-XtSHHv2rIQ^T;oLydhJ54{gs6nk@nN- zPyMEowPPqP5|EEzfN%(z(Po05uAjWt#@aVjTB=wUuB1XwGi!a57}%#>gSJryL$p(! zC*=wzTHt7fSNRG{gb|S_DKKQl?-lgNRSI?eAc(VaB8EjWA*dt|^}bM=L`Z7a|GgjD z?|;=;*Y6?JXys_5nCSIq+Z2_*(#!X|C`VLEB=x1l*4JO_`b%#~_0Re#FOL_~Zm|ey ztV2zI!U&~Z3o23VjJ1xkAV);P0_e}4Q4YrbD}kS;pT1x)sivQ>DVU(REL?xXO<~$; zrok3Zr^?DWPeCdM-~M`4k&1%?2;x%ZV$4u#^FFUik7Z#*t7QCq6s|0vmHdHrN;zyVaRzoUa~`i!TV97epPnoOShrc6FhNGvm9-t|91HrZt3Fyb>zQ0%Z%*afR7xPn5xO03tfazsqV zsC4;#C@ms)5nAXs`>g)TQ_&)-B@%tc!`4Xm@bnPgs{ciqmHL~Ft?oZrwbxJ2n3vrB zClOPs`6Fr&^@s#q!GT2UZ+xr+X(C;c+tmL^B?M$fKtB1x!I>pa>hItLBm6Y~C(EK{ z?VDer~B4DtReB2-SrGf}??N1pdZ*Z#^vWum&&uV6*MgN^qe(x6t- zzco*h{EL=3{jseggw!dhc__lR7BA&`33(ahj%BHb*2_eB%3Aw_GhG)ZLmUTgVKi>) z(9o*?3%1ZN1(O=Rt*|Hw3FW(MlVy^ylJbn_=_6O8|D&Bkt~`vbcABF$FVE+NSR-LX z)bjd=#}ZDQ1->TzD&KT!N#m?2@_F@C?b|=GLtV#57+oVu7Q5ss5y7@O05mMMR9XvGz?xDP1JXiHJm$ zbYv8B<^4~H0X6;PBb9nxbBx7uqe6*9-EpdL=wVnv zvBlQW+iHbMv-*1&Fm3ApIqx4T-^Z%IqBmM-`hVX02V{ynoBAJ7UT2Fu!K6d`dkVD~ z7+ip7oq1-s6NBON%rjTm8(-xYbI&~=uDtB>@bYUfg#{K^FzmY9t{BK(DRjmp zcju`)M_K0csKd3rIv&RJ0%u75Pm30p?f-M$KSVg9rrJettiS2}_S;e6-h1zn!O?DA zyM}czIC=~Q?o|e_kIMT8o@@AML%h8KsWz|E=S`D}96x?+c;bmC!_mhc9lA{2HQasA zJ@Vn2_yqo`-~Uw8xG`hGgh}J^^W7nI>DD!L>)I{4vAl*=8}&2f>raXTMg;KFxWAuu z#u;JQ@Q=e1i!Y9IsrC$uF4{kI>()&s|Fr&225i$ux~Bd|UpU(2|B4@y{)%0~3yrb< z$|c0WL|Oqhyng_$cD2?2OaW6cH1nui)qzr}rI*s)| zWfEd!Rg12m5FTYrSyD$E)oH2VipoP$<`{&kC^&MV(29|+q*W%F@dQpft-T2dU)xck zvM4AYK~@7cFPqlH|NbZ(lKrHfL9qu`P%La;+8RTl+Dif=7AcV$$~qN{Mk*;WNxZNs zf65vwh!kT0rWFW^#m$>&hNM7?7FW@FgHV~0H$n~!>s_d((nu^VWEKUZ$SKkMGyk)( z{Y`i(_IcVa_LH(jMS!u8R9c1_8ZJoE8%dH%fExstEC~|<3kX8zQUPjI$V~PdgCmUW zge375{bITi6e3WF?V)W(A`R{7f0tDTI=m9oVw| zUOz=-b}2=jD!Ql}(Cw?g@+xiqq0f~r>mRF@^|q0tg;r(xK!T1){x9uk-z6?;6TWs` zf7GGIO3oKWKX5W*PbvT|zXDr4-GD8gu!7<=Z1J=p&K6=&+NybKl*BaxVRKQvPh|gt znO|)c^>?gQ{YjVp(6>*&$`jfsI*DPqDAYU#v~s5A5!K2hy`sV&RkWET7R!*@{eJlS zmD8j$324v#SE(fy7FSNUT*8V%m9plZ=pdyNJ56qKkO9inOQuiLGX)V0E%=Lj1?iH~ zcts@py#^At4ql}bS804up`)^|l4i3_HjxR6op#(Y?6eD4P|T=3gTnZiexO_HHB2&@ z0IETf?_WlxNIv7q^)Ez9-?IIU#*1jklZ=02GBYT(9FQOy!J0=u=u$-z(>Y?Gkhv(r z9mKN4Fo?{epzss}SKKRNq5S=g)Mg-ytyK$gp^`cP&HmTEY2s?tW8-xIjc*r9H3sG_ zw6LS9pX5wcOWjF-MQJLgjMSH+gr%l{m>ddpDrlzN?;j*j_Nl+A zp_P75PEVwJ1(~A%$t`s8Vv@YmLDE)%63O36Kl#iCrYzZuz9JMWu@V&Z6P6SytuJaf zjxv9QzlIiZDl^@?DBS&iB;%9PGL!o>L0rUoHKgof#WJO{9og zAc`UTACPHYM zuSg9|0QwqrGRUn6X{@TiNgmLoej!67sWWicu#e;n3T`)c=)nhvEw|pC=I|W~>cfvM%y|B$I&lgqY z*xx*S{mm$xG4OKu^s`UG_mh4IGtAga29D>PW6m&j=T52FKO6B`_~_%0!dPq(G{@|- zhgoKsHGKHt2jQ(Z2V+uU?y%A-D?_O2<0^>vF~K-^$l&np*I#2Gvs0L7-yaPtt-MOue%o#3tck~QE-D8@KY*Uz^YjiI;arj zFg3mwUufZl!i?Br%7SkK^XVwqF!-&(vPIo@SXlLCmX`Y)>?AZ?p?m<@Dm#u5vL>6(&rW6h0n4T+Tb`)VX6=VE%q#rkQ67Z@=|+_$ST` znJ{r;m<#=9z|za$y&BtOiD#rci&+#idi0p^;Ro+y5@s0WO$am3GBetwUzmON*+2*% zgUId`OxZ7AeeGpTg7W$rAEv_xg^MoIKg_p4Uzv=H#)BE2Ooqn zW52_S)h>bkXtDl_;j9n!g+1qfH|jf_y)Zb8$2Hw+mKj5zdHaNM<0pnUP*3*z?%lde zpYJJ8b z9TdO)_PgP1IV5{XiNI`7F;Az6T2*D75~C zR$_%F5s?KU)dkFInrVvknL-&Nm)dRo1cDDNW&5iPZ;zSbglYC?>Q@V>UG+0*U;9(7 z)lsvP@xa(YZr}7ul!=^g6-78c0Z>-f@3F+FJ<$t5+Cv|4$p*l+{}BdR;LtUb|BPjhQrSS-&Mdf)^a~XbR*N?D3kf3z)z75r|5)>aMp%3#`gy0bS8{6u ztAoD=%r*#+PESEuzr%_KEk`I4t$zU(C8hr(ePcTqj^deTXx5*x%464PEa8ys%fpQ~ zV2dX_@SJkSDPf_03qw}sV!r5qqD#3Y>uICoJ(e_8|Nq?f&vLDAQo6G>W~=g*FLK9( zoE8Ms^7{9>%p1z7ejztQv;T=vl`%%*uRnz<(G*<&?0=qKHBvg{r{H@ysTmsdN8yD$ z6dL^lczLL3WQH}Z+Fvo$%^Y6yB8zbn14Pl^SuUy~L`IiAntw&B`YC`x{gFTAfF4wg zzOAaza)efpsfdjK{k6Jnvhl`vG3O!Kn4s8s=eWgF)&G=&`$iO&WQv^z76l6NO$mt5 z3)TPKhCW+U;JOh7!!4C&`EHPS+P;WYs7J&BkH!h~-T~RcF%PK*E%&X%mgLztE z{ie_#%>Ypu+R#rq45l$lCX@pC*AD+vg4bWkTm2Pm$Xx#nr`~K0OIClc!t7+K6czmf zjS)ypze2Kp^P`cQT$VM@oXPfB#S_Qt$zP-7C_+2+t5v3hC3C6iw{n(Vw=v^Vt_}UZ z|B>En*ZTz9qAmTzk_)Na{7eF+#bEl=%~7a)vrRTsiD9*zcPw&YVPne+CkP{R-ph-ese`;XJ`T=o2z{y(Z;WrcUn`Cfg+p6`+ zI7-eMm@V61WvD$-o+L3(b_B7ZRs9uP)W51tU<6r)7n~s8ORZ$U6~V{dW%2 z;jDt0XPqsqzs}lWn_vDC=ZAeBuDa^#@an5ChcRQuVle)D42n*L0sn5H?|k!xz4qEO z%s1csGGTD#HCKh97_?n$t+m6Zn{E>3nrm+OWpcRdo_nzk?)9kugfI&xCN|h;{jljK zn~QBNdfkr|6sMngHYN(XhgER~#Y!s;4A)(EUHAr* zIy@($OXsezzi&7Y=QD7ErX%_u+y8s`beS+nS zFTPm%7FStqi8G|O-(g3dl#I!ae}oG!x-fLbAojA$EQOUVD~HQ2y*zw2a)h))7fb;4 z!odE)2OS)iz{J#K+`cJCP$?Y}5KB-$nP?cJNEVPJiH_|5Nr8$QNhI@{pb zqmK>OTzjodY|J*>Y~i=SE!c0tFnri>nG|{9xfd`Y2i=n<1_(tz=^7RQezQ$C4J)p= zqR9B_tFOZCx8H#kCU;9a?Yhfu;Y;-KJMOpx6Az>dSphf>2cgq%ow3qj);VVjYpt58`Aj(rWa(v=!Tn)5x$a)YwaZD9j`-c} zg^9)WaSqgmSjor_9VepQ9(m|NoR@MuR=nU0ilvsq1jYH7kQo||JmN@98hszSqd#AI z$;Fr;#1>+-5=M?0KPH^~hd+h41`n1=m;LtNKdiFSO1NG-DUH-75+?}n4wQgD$lyJc zdDd62H~~x9Xefc?MTorlE~|3LJ^1Y*ID_J>xW&`j*y8CZ(V+aK<6nONu-{+8X#+!Q z6%#8^7XfbfeE`zsdQ?V z1&IC^-B)#Pq-X$>xfUqR942{0@qcg=}0QjvJ*5LuF6JbxoN6U3b9k)kt6FRBP&!oO<&gW}|pD9;in z%0TrriLYNDL@~tf<_gRzqu6dfuPBsa1*Sq;#U*i3Vu1n12t0PAloy;Kg)&*#zE)5k z#?~Gx8vr^01t~{oTfL>~iICjlEoP#A$+kPP>myqc1w{$wYLlK+XSGRJmSrGT0)!2; zM&VQQgTSbWA@Ukdz=#B#eu!374g&4R7UtD_GrQs%Q@zitLX~O9^gQ6cE#**5Uanf6;$YAP$ zLRKULqq)QZp5*3Q6r?OLBs#@PAB%8LFXV{CcfyE*5F`NtZ7ULTBVYl+MA##Ol=ViL z@sew(YKUx_&WVrN);PPzicXR-K>tSA;^}jI8MxC< zJK-FPo$w{u3~?8lQkF0IybAI$utEUgj!?72rApW8FEoy)BDMH!PV!KRIv*ec%(|35 zN@Rf{p%p8!BeZ*dja<27^&CTl=Op8D4p0h62#!Mtcr^xB3zlA>6``_}|Ewinu7#elG=I=t-1<@{y7y0g5X0 zLkIe%fD$}QsH!P!$Yd>ZnIAberrpd%mtsYxjw09OWd0he5nvLnlUrXFVitHE7SF#w zz5PvQ;R|P|x1s32L@>13|DuhtEu2{8wKyUQ8w`ik_$(C^3k>FasYr|Wk0jqbsjNRW z_P-xu6y+xW1B>>MYZTc*&ti)wnV@(t9Dm#~7%15p_Y1bwTrz z_>lb~`rNA$`Y7MStNF%00U~2fa>$tUC{}Tdi33u>Qgjqj{;81@C32x(aN-oa=birgctvBsLRHOnM?RsZrUFNIT2IVF7Z$;TKZo-VAt?mD4sr_Q+ko(aP- z;5+k7vtV%ffG}|7m1WTT&O2@ofBDN_WI(*%0t8WNt`dhLCxne`M|HIXT&PiO)wz5{SMoO8TjQo8tIv5o(X@(KK5^8 z@N)nC_6uuc5dW#Co(c~=_)r**)eGNz^EE!Q>WMGjd*kc%xx+vV`N{+00X_Bh9}^QA8=;Ez<~p0wM$0~mLG>b^Iv@NMfnJK&%O2zODwr~ z`0DGg!gbePgH`4?;Q;Q*GO@AV2J43-FwjbW5674EM;vujcyGwNm?-EC+UwjG-ym@P zfcVR=zKTKgk3*lneZnud+FI)W?Wk|VefQlL z&N%aQS!J<6zXifXOeS=KKRaN=`yJ@~0h2~kV-S5i^tdRTd+!dn+;S_v zSRaDH-Yzl-&y^`$>AIs zEW6w?QkT6k-p4aNI$;o9ZVBu?sc(jGGWy7Ex7~){?H|H!yYCV9-FH7U*<_qead-H` zA5KKy=#Etz^M+Ywo<)4$4U;ciJun6nAzZbyBF>ih!yo^M37&4@l#@@!Ryyy>B*i#v zImPzp>YLfH%7T4@D>?r9x4(ve{No=|&;D33F+b`t4DImp3(w0+oPjH?9FF|m5urQu za1ef%U3Zby7Brg^TTegpGzR56VwKG7m^3>Qt8L~8S6~wB!3XaTqj50#qL^4%Xpu$H z7r(@+g;xRl7LyYLrEc6ghATTbNyHEPx^(Fr)?9n-@cQeoh3R42Burwx_x^{nvWBY* z_S|!KOyt3C)Z+)Nwz=ot`@$WV+<5Q(_hbdoLKsKwh$U5T;UMNOaUJ&V(`c04Y==`=tX-@eEVrsR5i=_d_k>U72hQ z$!O$7RFv0Bzc3lC6_{o_sD;hTne?Ww<@s=MES9xA`sm|fg@G&JqTACQk{xCKnEq0w zF^OKYB-s(MwImbPd!vnB@Zl0Ywc1&0vJ$}g7=B0(N(t{W-alrG`H}S(txZ03G8(eR z$u?2_G0h0&lCsfM1hvQi6_#W=;OZ~`nAV`DYLCeyoWkNmv-rOit?^h2y4+sWNn2X9 zUQMR6QQ9ATwKl3TndX!VMT~O$d%l?0x2VLYnTSj6@9R?`ZBa~`yzN4pFess}a8%2O zP~ZNbuF-i>6+1G@`7_aEto4_K@)^-j_oEH?Ke43@tKbodJ5x02H!@JAjuPoIBNc0< zkxoYC<=xifO-w_Y$twCYLb*(blqxnD4m`=s23;jc6=XFP{UpxFnD+oDt%d$XUC)TT z%P>K~ubHO93&?4woq`9^g<4&Gr7ac1M1ABe>+kB6Y;blLRdlo^iI!^DpA(1&;wgAg z!bgCOXTCOxGgy}5?|+7MX(p@#Tln7*O13V-)fqC>cRzCfs|!J;QE@Ku297cq@-?Vs zjlDh+k;U$MIp;Z2zN*)kwF8K9twWUMX_BBalSzJ}eu^>X#r@Qs#s@ZVoMnrxPEHTlX%gC z5_I+Du(bhSekGWGkPAo6eNK*99YGmG{S2}HBt(tSB*<5O!-ox%Z6LZA8i zYD*;kMD8-NuAtHWLX@k&qfFVh?B}&@RPc{aBQxQ>7UMv0NsjDo2 z2@M(bYuDL?W^eh!?(`4E7rGx|z+y7`@@&{LV*w17$reCK5shL{fPtwgwLNkikb3Ra z*YH*3DBxo#NQ-7DODP{S)Gw_L89#RaGg6hBMYT)6 zvoXm*M)K%&%=-Qpbu=cCKPDjYzrRsWMy5Isdiv>b?)m4-SD+kJ+-ReXaOSS`8Du%Ve>Y_Rk2VW*eslsMu;akmRq3of2ff`s(ZOB?c5HVv82q z#ev3WvDMAZx7-rOd^aZa!FDzW9DGpt#VV_YufP5V?*pe{Rm6ibSbYfLm}KCVPFyK+ zCJv1qHgsrMbImowUfA-xPoLi5&b#i!0R3OWhnR%mm+uQNyl7Z`_0_OFPha`UdRBa~ z{?J1Y$RPdrNfW{XediB{!cVi{9?i$s_wcp$A2C2Z8e1E2tC>Gy(0Va!FElj<#&5m# zmhhMBt`A>*`?c_FxZwt3;~-32+D9VOwndv=}BlIM_WLTQ>dv_eX|N zqrb!If_cL6$DfG%_e|K1G`xRk zTRr~hW3r-vTkEX3_F6cjWXsUIcW-HjSFjDypr@Y--=mYRJaDD3F;?#KYkWB*8+P%q z=ozuK7PqkCN($cZ9)0xD@YlcI6vmIAfE`U1ltF%OuQdGQk1&CIF1~bsM!sO*bmL92 zP13qyTD;?p8vSi}=z#~rE%=HaTf<;-U|=}>*T0r?Be-qPA5ZyH_ySv6b;0D)`RAV( zdSXS(oO8}0D>$Unus>J&uzD+{r3aSlyDEu3pweJC&_Oozp`Ix z;K~^IULRlQPb~xA=bn9Tn241MoUFO_nrpGm)!caQ{3N`LNusN-xmpIr7stwqZLmUM z!G8TP$UkFv;K2vNwO9ecRXO~+ei!JQ9sTDEw9A7S6z8PDtk|yT*T4Ez_{D0gh4EN% zu-hKH;kV*_F?%NXkk9msF0nXP>r8_|`uW3i*e>hJE3XVgFaW*e(gVUaTW*bQt@_Ir zT8}*PNVxgtn`N7-Ew|hdOUtl8PI!wfIRSj1ka3#hznCRhPI{U%xcitJu$JGui zuDC)t?6AW^Uu4dBu6EVeDY>82aPmzWS+7F$Cd zal{caVR-W`H;2nFze4n^x8C}w@2=8TypGR1?>tP5{1e+Q%_9d=FSIarKZw_V^$m!A zz4@Rhb;!7(DCZ|^>Gs5Q4Ldj}IIzx~~B z!U#-Ibno6HEWONtu+BQ`%EaP_cu%+;*WN!-ubw@7h8?gC?jF1E87BRJGbpgd6SrUF z)@@7S42p{}xi}V+YxmrBS2*pA)1)o7!?nu8?z>{O2>aIMmtGQXzV%kwUTarODs8>> zHeuFTb;~pQsM_xYW|7_ha37h1In z!gFf+!6&TQ0MIyLnIq*gmiigm|NbG=^(~%63iLvp7~-w*>b#>Ok0ok}mOQ(Oj+NpM zZL0@9T`o+91_6L(OQKMp6#5;dWEIKB920U38s+iFA4SK;3W`1Ulzsl1>z|c4sR@{1 zeAzIwEj6lSgX(Yb(Ny98B-dmhRhW+}NGlflO;Sve(Ek>4mIy$_#Om!psUzB0%alk9 zS!je95(v5A^nuesm9DY>nb2fj1~}>R0B0ELuNlh*!j%rVplUXf2N;1jAsa-BK2?-h z32S6mI?)e&6G_?sBR$QEB4Z7xp&m$Np-H$&0bTLHFFdM2<}JmxMgbO1Q*;aSZH)#q(+kU z8-wAZ5aCxL5hH{?Z=5NOtWs`6KdZ>jmtS~H0uZL4VqgL*t1*-*i7XWQl{*rtB8g!nks(8=)n2`)AI zlBr9otYeWa0a>sRxsm+1{~HCUH6li3F=h~IDJcBk(XX~aVicts7^faw!wISAkJOFk zM}bJn`bW5^Kweb0`u%NnXM{|OK@im~^pkrNe4RT2+XU>q^G<>1P{KKrXl8CebP)k}M?D>xa&U@gCG%l0pC3*U3r_0{CNv6q* zXVjjf(P z`8DU=7$lg0FBVtBAO#0aWFKSWV`@K6%vb9#Y9`B`)PMZr$zce#66%UCZ1>%FU+iJM zJUKKp`rnXdy4vh5mpmf_CjADWjF?AaU6s&8DkMMx(CRLirR5yiBoJhw5K5pSML6`U z1_xD6_x#bI!D?Vy%G87rO8qqf@#!6KK{*@ckZcUjyobHdkHc4*8*v2%2Jot(CQ=9# znwpLdfMC~ufr}^z(MJOl5aCD&i8WAQ5kpwZMm55yP)M6wxc>f88d;W1ZcK@^ zP?{g1ksC?v{v#+NtQxI;f3s^%RZ1zt0`!YWD@&S$JN=GI_z{Wi?zhy*aMUI-HMJ-- z+WNyiFp(Qct^Ns@^-~lls<*|xzazGW;lTSA7|@i7vn-8d8YtevPxZg>(J*AlD+X%8 z#0of~g}f3?#&$oi;;a3uuev5Y|Kf9)5STe^v-MVDGgnd!`)GLBW4Ar9)y~(UD+VTS z#WMlFfS(Fqd;fsd4u3f5B>9TH$Fw~$@P9&BZkgpU=znLp7GIWgAAb&RZocIfGGQ~_ zbkm9Up_tIP^b)LSc=(|(JqF9S##iF|?7fd{xkLhf)&HB{{ubLHJr^cokn`XJ55ZPS zn}%sIn9CJYcVhx%222VZaKM3bU+4Y*g%|Nv_!U>m%7?`k?H~3(@BlfJfmw!DI=yf#dcv)*kP?Gb9{#=%H9m@Dc9uYZ+^oWl-1IBd50 z7O3N+IH&J&*)fhQ1;CKj|^^!bz5oOAvr}yN6e~yez<~LeSas43`~P2j@kL=_eEI*-gAZfC{qpc0CPEg+ z1P?zb=JrIKr01mjAZ$m)t(S&ir2@CNI`H5F!YbI3>l4V^fB*fl%3%cByfX%q@0AbN z_!7x0+`5}6XB2OuC!c(ZJOfREY<_*e?|%D(8e3y=f&%@74(ZdUZ}`>Whldqb!0#sn zeSxzme)F4OV*==Hw8<0>uJRmR+I^kMHg8_Ry16K z59;o??M}R}PJ{lsb=ZB6-DPD5w}W~CD=4^)81IW4VwKCU@#Q~PQ0%_@?y?;q^>QV| zp@$tRD>`_cGH^2IE)1OWe3I?)jKAX!J7CKsp2ZMkORkGAyDYr<&o{!#E3XtzIN^9q zT#VNV3iS03SY^On2hKS6u}w5q=0KLf=c+mWf+@or0ArE8+uj?N3vME3ddB z+<;Xtdv_G+wNg~_(<9nzvG{L@=5p= z{PF=NA2}Iz7Pd56ez_I!8~dJYku?IVj`#u2Z+`QeupTBTdSF80J4~Q)2Zr0QZ5-bl zHs50Na1gfZ;+YoQ;^}BiP)r>sC@#VT1)tyf;mAJw?29e2zL0ZQF1!3n+$*NTB;BO& zn_nL;*KrT53OVA)BT%1T$mC@G{u^r;`(M7o6mInvv^vWekVZh|`OkPFA(99|GI)qi zP`o|(ov_ZjJO?IDP$d22Din#Bx=ezzTp?oQSThfE2&nvz)jw_Vqzs6iK#>taDjYE- zGBC*+1(c&i-Aq>SxdO8c?nl*OfrupoS}IIFQiP!`O%%n`m2UC$_~TeXfioyLi?|RD z^s!L##*zUoO~e4mQ(bujuaa3cl+9sXHi<+=Jx(M-^uIK01XbJ2{AA)RRRNTNztqmI zz$`<;ffO*_WEB|GN>O-@g-K7>lOh+;@} zbV6KKiA++ZWOPoJfmF4Y3}&fGY19A7E~PGSM35xykCgqaCfGD6!qAo)w&ZhI%UTKs zm(3*uTABn~MiGX#GzkO1n!M1E2u)6bZJ&OT;jX_zSCm`maRrw(3rY}5;(uV-PDC

FEEsZ*UT%D1AhA zi13j9Z`l6t6WK`p;(Qkw7q99m|AXM}rhR9(vrtmM*MD31zuSb^6zp*M@7uAFqTp%| z(GmDRqw~MtCYW!PP_pO$aXk$3Z`4n<76Qe7_kSW`x!L>Y$^Re$qCO5Ui1PTq-|8vi z3`#lqACx+Lr=1k%1DPWJzl`|Zy%INyO}7~W)RBmSY%2i%f6SG(>)%!T$u^gCIMI}} z-oRG|b&gIVc^ka*pzk3Zh2nmhba%{|-3NF7R&maG5J}IHME(#HeX}O?V|8q2p}~uR z$IPgw+GYLo*)L&B;^kr=?8=|L0LF8sPa_KgIJ*_y5L&bil!%xL_^mE}g2%=Fb7PUS za2u(9+vY27ld?PJnJ^mfb!~wH+ol~8P~pN2KqGn0n^fuiSDibjMN;=Ijoa<2;YhPr zsuLjvVAj05*(8E3*Q}n^$Q!DzsG3vh%J0crI*(h6ApX$6A6AEgRgtd8APr7QNfK zY9kiqsV6irNN={Q#?&dXT?}VOEh={U-PUUxZ9mZ6$g8XgM!JUG7PKzy_}DY{?`8I1 zgXtbzSa>t*I5}npjnNs4`IIC~(B7)M)cZ~I=9LUb{KkM5bq?l+dfQ3%(7?;=^aCu2 zzdQ+VE4WV2v0qy2KZxl&xlAnHxV??DAL`=hLN~LE1p9Pw^jYT(8$-%SFSx%P`p<#8 zr#qsfr#nv>M$*hw(yApLFWg3JJr#9_y;lovvpVX7g+q;4}}^&O%}7zemVDbtw#6a7FE z6aUIa&4Jgs!;QIR12~;)MnvYVPhD%kBprDCPY5pF)OYZ8UECZaA5OhYcSe6mu>IH0 z{EC41rs)J0?SH&YKT_k&Id{pu@-rPZNjlH*9OU^=pqA0j*!M)s;2|Tvy}KKgzs}9K z<}bY-Y4MaxHs1X6{z{!6A(dBVpZXAW;!uTUbmi+>5x4u83pGyef43c7)Au#%@%GP- zyxz~tYO|RBG^tx3yJ}XIej*<70CJXozsXqizn5!W4)O*gTn~=XJ-S`=zE(1*{V4Fz zU03+j;_ce)+Q6ap2*-Vig^jj!o#EgP?tnlHUe~}=J`*Qd^jubbRTb~If^R%wy@s}g z?g>uc8Ki`4jCIEfh)=s*aDvY%ryew11Cxn*i9X9{bWSFyZ~5u&*d zaq#@_O*_Aoy}D73c>E?#m?v*zZDeSzY%&~HLVc7qRqDaw0!*Y7b>Q#-Q30JB+y;;( zwp>X=ESdnlHi*uxLaeYHrM6oc8>rvLP1ZD7`M&O6FuD7QWow)uSi zn`Xe9%MZ~dl6-6NpuOAptn=_&4DAf2~s@<_al!><1t3L)E3 zB0dDOf_A?QedueYy=yP+#{BE1?LHzTJiQ`CxW-IR&>+_$!FHQh;N_Ng!CO;zS!3O& z_!!o%T))l7dQOL}EJPr3F5A&+qk(P&y8k|BHC>_FhTE~m+o2XZV5yq>4s&0)8(2sW z^LQ`qP?U)Ln3a2Z`r@I0b?$phOlJMe?$x)L#)IU%bcO6g>8IFttcL*(yd|nA*noHL*|4hfU$EKu4;|c>)=;ECWs@kM`^=VR zMO!cXiZl-V-MzNElKPo7@tMi-i;>jn5o*;p>AVp%^kIjN+jdk#RPkpx^(L5oq1Ezv zpnvgdy{27K5e3s?Vj9T#K1s)Em0}7hK`3Ih_=6HHKl+`F?${ZJTW!ZRO_18H~#A<*+lqL`|bE;fY?%QxAei zE6GOV@4I(yGHfN&bY6w2nS(Imd>ftIlB0g)t77bOqfz zKHsV(r?jJPv?3xA;gK*xFVL>|5Zy~N7&uY-7rIQ&DksY>H{M990zNz1yf$^XolG~z zQZDEAuvC$pqNK1bCQv=TcCBh2wM#D*-kEUrVyAjE^#N|ruTw#jkQt_3bgroGfxj-L z#}%T&&ab$|At}WpjoUWO?(b9s+G#6OhD%fG*m}J6+wB~9J~vOXl;t6S%#d_mM*nV< zhF5Oef95ls5g(#NzkyCu#w=NC{>``My{a)6L@)zS=9hAcd|0NuoBf?J{KQuj1IeC(SYi5r7-$tY`Wsa+3 zn~EtpK^yvyITjTOR3bK+WmZ(sRHaf`>Y%udy4&rU??6VIncNSZj~K0*G85mvavOZ>VrOM#4B9pyVF!&$ZjJQa3D#g%nam?&Aob(= zwbcxUihJdm&2VF(Jrw64H+Ux{Bk4XN>EV}BG3c5EokNB4(Yq572+pBo5w`OG>>ul^ zQPX)@J@y4UQ4t(ZUk{4{HU8BN3|=YpE@lI5=)#|QNJ*_w*3O@?^6mTZ{zMedPB1xPapKbGn zUX(4tJx*Yt7vROzhZ)0yUY<84in~r5efN1mqN-*e-{-1)A6P>G>wLKlJ`gw};UI>= zWRlm}_$}Txzt8i&XPM8_gM;23lv?Gp4|_BHDyen&B^MK>l8O8eHa4R__}Ib^FPCLf zpGRmyGwejaZ}}-iowjoXANTl8{2r`_bf=h6Z++mOWk%kP>LmSZJ;sRYxGa-LWJ@Gf zg9q2Vyx6G+V;ROCZ5SXCvJwuWZ#=vN&h6m00j>1g1DT-+Suh)tpCD0hRO`pZ>$6>I z@D~~L%W6K`4`;Oc<_6;TY+u~+>&%P7}HDlK_{Bz zIPQ#>9uD6u%~;u$Mclxy5vEzOMbq2&dnkAB9jc$y;t^S<1-O|*m4n`uoxDqdL@6bk z+A{EB0TYH96ZbuqX}Rho`Z%N12adjF;U6;(+|52RJ5+#$;n5PKX6ThG@A=C+zL$i` zmpxsXZi9|OT86kW1eHWNN3Whr1bx^=t>6#6FwrsGKy+V*QiNnD-j~D-;Y-ebb?c(7 z4y?wCeB}Ed^x;Qm%m1ht`aIF;(vb|vg4yt*9oehw_^8-EadveU8(=RI?C4mju4}2i%wh< zCH^qM-g{scN*w7ka0;RE=Ok0LvG}epFAy9df)lJll(g4rv-xJT|LE2{BF+*~TZX*3 zA$dEWml94As|vDBw#&r@U4Qwh%DZfiS90_#frEbi+9Nl**Z{=tQ==~S2E47F6z|}e z0D}of9URy)+j)vM;lgi=Qnz;Oc&EskZ?>s9^SZaEIjUbX)9Iqy@^(02nRyy82@K8mxND!1rcQuu z&K}>iNvQw9jdi%s{DpqdT?=seN_L?m4&%=x*3a9fp}-}oqY_7hf(Q=QKLrf>eOb^; zR{+*dz4%5(;Kg-%e#fD_ojK`2s$og(U#oxFh2fdbx&!K++v}XGCo=nAr<*K;Mk3}z-#m%CEzgKgL@t^kZr(!6lWrUZd=v`cFZ2y@l7j1C=a^Vm?Saf_E ze{on$Ni?IW5|A&8@wPWAAb%?}c@GH*$-9U`m0=FA09dHMG3p6PD;OOi@YcQNkbCA5SR>*U?2AZr z(!g%6xq2Ae_luWnTs)+^fce!F1=0kRIK(FQuqQ8lzGbc?QF5NZ%X%4J8|X!b+MHAit;+JlAZn$!*&yA5idu}4saQq_f$&--OrLvdVdnVvp z^LF+|@u3s=Lx$|i3+Q_t13|@%m15AP!jOCUu3|rqR;&IRpPM1KyOJ?Y#na@m+f|IB zpPFFUw@gt$+wb7AjNBC&K$79On5>y>7Ratr(H3txr}uU4zY%9#$9**#v(YNhjC#ID|QhuD;> zp`Y~JPm*<9-*;e%R^52QEMz$Y3>A9Zh~A2&@f&3y6?OJGq|wByZ2j`KHx+c~%9P3C z95>0t^3QFN!{#eU?=~TyUT7SWZT}}j$glwfNne6@dYT1GURhK;M3JEl6gvR zSf;!1y`jx!Q$Qy=@t`6vSeXH8J^vtOeQyruf4DU94Q#Sc-W7 zb2k{^CU;%c&)pu9wRUk%WiY@}+x(nB|3+}r8_~cx7{p1^Z#P@DMD!FO57zAb&&YLKmU4?Jij5tMWD@OXlqKR=s0}g z*D`X%R5$nhKH&A({KCXuIkeZ`PocXhcnAvaL=*oE-Ew|j{VxTzMO3qL#@VH#-7@eT z0!5-ZjCURn)(dXCTTJ~yZOqn)W~G-)2&GKk<~MJeP5rX8>Jx|kp_u&k@h5Dn7Jg&4 z|B}pxHy0|sPU!Pfnn2<>Dsz6BeZ~-*+nN-#BeE+cW%;-^;Mvy>0{!Fu5?7Vw((7n- zxdFGz@#vY|wPvRxI=R7!3GrgoZ^uqDWVkgqbrk z0Cdyf!AzRFS(w-A;}XG)5O(p2x_xcV<5%#op}pl3?^;=!myG7EVpozfUjq=d`QC!k zA<#y@{l-s895?l34o>sD?`nVsqyx!b9QI+YzZyr z>(H1RBWbztb{C7~Fv+9X$mh|)>M_uhgqgoBqt`Y63rA z3`Jtyb??P71f)>+YlxNLlLF??-_4R9`N3Oe&(EfrVjrYg1_S|xGljZOX$8ho z)F|u*e}@K08O||wzElZX5;guc$SuU)f-*+>IPPfw6)2KgrsYWZz%7*goVY>PSl5&# ziQDU5*_T!RM0;i_YfYtY(5QM^rnF)=O;W%@Cad63k9m1vr!1z&#tZoRvh)z?6e9j; zYGEtK-*d8gHwHnu<`J>$N|baSIwQ$nNQ}Z$biSQiyPn&IV)m^%L3kMV%Lj=qs+z8w zrESL5Tapa2pyMk`#gksLOf9%Y={#hFe+_prCESIFr(X2Cssg?)tL@XV>j6Clmxje1 zj|a-aS}rQ>1dvPMnZ+TsQZFf3P2IwCkfUwV=pS^*rKi`=-`O zhMYcuPwJ2DFN{zfP6^Y_janPh)ySWr1;x%nd7*f41%|F&UtU`P!xyVXxs5Ijr60$!qjod1s z;V$sMC7wlzFL{oWJI+N6eq2yyT=B;oG=E-iVTZc#x&50yytNikuI{$?=6F?6A*eVl zG{uvc{J~B#z)*+p<=`TG3SrYK;P~aHBmRl!(;aV2%xvD43bR!}W--gw&V`?#^`dsP z&!pMqA9Jl-_^gx#NfpwX@ky+6lkdleM#cwaH*N7jxd%b{R}gnKD{PpP)@M)32M%61 z0_5O#P3rbhnry)|WOOf2H$n&?Yp;?F}3j$LnblCt1 z{4<+O2wDn!JGem5@=;JFz&pxGevkAR813lA1T8%x4d4%15o| zA&f}(bN~8RiZj8+)H=yJ=<~i~5`xDbr&ry!?Kl%cayuZfl zN;2ZbF?Al@9(13$BlBkNYcV+wW$II#t*x8fO@M1OeCuYd&zO0BfI~4OU%clY_DoT@ zE)VWp{qCPsWGPEbO8k-K?N{8tuysGjjwT`Ki`qH{h4P5`n>9yF)X!NN3o?K6v`*0b zyGGqKxDmAEf=tX#0q@Cu$wEn(f#yop?Ke23Y|`8u_p7v=GkPnJ{EE&odx>HJ$U#X( za2UJutlC7bp1;`PO{OXEIQJjTFX+o3@`MR0eyPQ1nqwCHTJ`Oh>UeTnKauo-P?e^G zbeXK*sWO_3_?%RRg=f#}2Urs`KE@gWZ*>83`p=z|1+vF+I2@RYrd2G#GbQA(GcE^3 z4VKn|uaj{eclig3?55poK?EJ`=wXG{OTXR)kKu42$M;jC`;XZOfHf3QarwjkH({n2 z>lW*zetY@E4GzZwo+s$noV1;OiQE3kq9^iJ4m}rt*}>M`sf^-gOwpyDSu)HtRp1x&AT7bE${< zy!Lf&k2?79PT8u4UyJWo3Y~6ndE`b9yu(CoTVLEyrMg2Sck_w!+nt+-7k8hfwyiEN zYf=j(RnpkdpQtTBz=-%xUXm9Zo=f}A0ANl%njXBKfwCQSWy>Y~2tX1#FjnZR;pym> zbiRN5E&&MBUeDFbl4_((ORM|la=(#=lvzw!qm|q>(q*ss9P0p}dr||qE$y-Fh8EjR znDog&PG8=MMUg8!F`fcU`w5eJd{;^?wx!9Gk`neiiDa84OWfyN;0`TC658i1loJj0 z*n`s?ySd4McT;)Wk9z|$ZodV+XQ2-)a&|$fefs+D3~ya$eQ8~cOlq^B2DZNb#B|iT zNO9~YA))R@&hD|(`A(0{%AD+KEv~fJ-wSH`ysu=)sa9GQy>+=OyP=o<6{GyAf2#RrUd&D2>jHmY6wa>9rcuJOcZf27RZndLb zpsfRd00F5y-cab8+!1}W%4j0c65FtsmBT?M&7fix*sx1+G$EQCPz9aO& zVIAm#PgYurly4QMO*!y)RjHd#2>AmD8HbVozDjKT(y@=fZ|(Csz5*~FKWJ?7Z<9o3i6J}n@WxRTV7SEBcr&AO2uP+uZGIfvW){Uc8E`fS(8 zKpizrJ!|MsOVDhF^jy8AV~NY~!=Bm_XLWWw27Z&1pAkkB0N)&91ko`ml1Hm+84QUfTL)3KdVNF#l$iO8VCon(~<800G<>xlofo9~FIXZ}@iJnR95{Cp6&J z<3SpF&Pjxe{E|@crc)#2F9>5GIvnLhU@bN0gjXuMoAr2Y?+x{Kx}33ijMkX`(?OX5 z8~^(R#|$gXL5s1}5xZNDYj+ zZ^Z>cN=l?lnT#y$7cFtaNn{Cu|yJ3}Y^Lif#c8$ADQ+-)a25-u#(>8H&uTuYv zOHTJmioM<{s$pvk-iasov$jc()gz{a;}Bf!@s@nu)+Sn$wtfo!MN-f5dAOV}O88H% zIAPzS@{Yt#B@v8_N@vAWYG$^ZA?gQ*J^gfb05tIjgW~F^{(foQX;+dyt)3 z;trCihPbnNq%#M>yEFKP_4~O~gvuwa<#@maH!b*g!~Mq~upDESRdvPBppP zKHw**)%uHqpcD%BySCs8dhEAanv={Bnxix)Hm29lILNP&&_}>~B1jwP&JNIhm*;j< zGDe2(fwam8J;7Um-y{)GKop^8dO~^NK~2H;p!dZ;`XnzoRQR#H^&gInn1&{rY)5bG zz6@Cjs#)w#7z*qrp^=y}83hp9dOlcRl|`&SG?K&T@wcA)^&5JK;ADA4G$D+vc0UP( z6Lv=@f3I{|B4wWduus-&4UH0&o+(jXzc!8WFUfRr=W+0sEj8v-{2NxjIX35*_r8iY zK&r3_V3u#O$m{#Y%jN5E4+1#)&CCzc|Edb2mI;ldv^cvf7l|h>sl??F={i*g-49E- ztr!kzz4jZY4&ykT{S=M;_3Gsje4B7GVsnfhS0R**5Kf)t&r_lzK6~be-?XoU-8>JX$9^(4iUBwnRfeD zyVlz6gd?EBMVQG4m#0`MD;eIyKH=CS5hF}$&&XtIgR6~g--O)^-MsA8$ft?(h|?2a z&OyD^szCu#%W`lg`51+th8g#eCEROr_uY|^S?jR| z=jRVgc;sBNoMH5cy;{y1i3;w{Gc-q`wcXVFFVmiGWgCDG#&NYu6!aq$^gLJ1KYS8| z$pYR}_4aE@e6*RYOt~uDToz`>XDmn!;f^|Uq-u7!-@q2se=MG;Ri{x1fP5~}dLQql zwftMD;BQ4I(I-brk01%PBQq;FYctK$Jw;nytZF67g8KhGzV9~Oxx5i7{8 zfi>_|&Y5GpY8sjbAIsb8HHE|r$}%n)eYF68XoC%Knv7Bl6B$$RId9Vcc{e}M#EifgeD?i_ddR5bF(z!J+VfP&t{f; zaHBd=dhejc;(jo>5$FB^?OU;0v^}+l`@uz0#bd?I28J$7V!SsGPR2~pmd>NP!`D)u z?6pW=?Ln@d7?jVZ2+>>Qth3KXyUAFv6K}@@o*Zdr{9`)?zS#a{77?xJSIWf3?3Se0 zSu;adz9Bo@yAz;)khb)+Y$2_06xPK0G(ky_23~(9@tYV6eSCZT88b5zcLvRy@Lia% z&Q?mWOx)X7n5>~ z`P^F?hP+nIRV?yjiHTF>`&PLz71Oh4f$>Lu7+SfonktOf>K7f}hC!lflIgi{Y`jT+ z53w=vGFgjvC2ah=@O{>fADH{H8f2Ml^Ib-U#oH42a^Z1!w~n|YL$6M!8a8zd!@vC4 zDcl#7U8}h5YDB=n8AQ@l`C$yh#np|tSR%_$4i;`~%7^|69e-=-gtp>rL-$idT`*7Z zbq4tQGy{A973x0a?+&B4Ouc4HVFpns6+|gM?J23Hv)YJ?%zSMiT}pPD=(F3;mxInS zW_kY!Yr`=RY0aa2O)-Yavn|oI2#&qti{8pWzARlS#?#YLnUG%^22lg}KDObt0#)5h zczD`oah0ioC41_liFHFaKa@4|rRgJA*4giPWkK#k7}SQiKa(~kh;27h& zPYY6eLL7fX-gu?DOu=b-hcb*r*e8tSUC{FXCI@?lW<-|(?T3u}p7ts;cgfD=c?z!Z z&p#1sV5dK0Bgt9Mb%(8#6>I%bqPI>aW=b_4wxb_$zw!2{iYA>N5C=~mYmSqRnQu(qcOU&u8wNhZ&fTUM1-V|E(&g2 zZ2Nj7UiZ2`eETY>$SZO-0mc5xvM+WPFEOWEq`x}5*sDWh`zB%Wu1b;7C#zroR&9+E zywd}W%bI-9&5hCjm+#S`87KF?T_B~X!D}69Y@~9K$tV_X)+A#d9`crUwY(Up{#nAA zX(-*C&U*oLjM;3yP6==$a@6&{7f{=1vMvuRoMbQYsM4qV2RWJkE&kJR`-w*En z7J}mQ8{*!>hHdr9aH)1;+h&0O68U%mVMD`y;9cqP9DdS)DX`>PxSp z^#Dg{`Fr~p+!u|3KRF#sYmJgmJzZUD`t8)?0|Q2xonm&g>nbTRPRNN7f`FZ3ecYYm zoH})~K~Ic1>5JP}A9tnS_2X;?3L|*Nt<(05cUG0v8%~v54vI)}FnPanp%=9FTUwhF z;P=Wx{bjV?k82fdBX&hvYzInth==*SDI&*Kh2LrjU@Y2721w3EnX%`{lsl8C1OGx74Amh7k1k4rK*AhO+^o zx-S^=e)=bCP8Cu(P2m$S?LI4t~&8ut1FCeMU}9L#Kj}(Ad@<^y{6GFj=ns%-ZNJ*^I<%7CfG24JO8x zQWbIZ0@#aMzwg3zV|=6umV~RV#gpDZXZ~&iBM>+8(s#l?ouq`Fn<)MBW*r(ehbQs~ zwL6Nye|OpJzGL6UJsldbRcoz{$${TT88Q)WHdqJE4M;R>^GOgDYh~=yF^H#A>#h3i zPef6;2M=Frcb~*%U^b`j+s1(CZL2PM|1<#EPSooqY41A`YC!uGS-82r3zM)c4ei>a zQyd92bZ9E_;(5CRm03U#YMfVw9+1Nf@U(rTAQwzd4M?}C=tk@6qS-?H2jca#WF@9g zaddWGQ3ZA_47qTo?d(W6wIN@mSu>?{xe+lM$ij&hsgir*AcY!e!5Cu| zF+>_j+JSU+?aaSZn=l?PAkql0!_n88Qe+&EPt-Bp{CI8%I_=!%`6JT_?EG&_n@eYoC*-alRd8t_y zE+<1KvwF?kc)a_#`*@rz-R4ir?m0AAZE}>w(v=>9GueFvjQ?Q_$MK?x`zJ2a*{{KN zT5&;1xt7hdCJVk>CShIIRP)f>$$OOD7GUrONX$pkZ(VoiiEE&7Z+BmbbCxf7X*H`~ zn1q0s;xYz?WPxSH{(PwQ=nK7Y1TjcQ^;CTN*7?2|4sfUF7shIvU9uatP#VzF1FLR! zvX#Pl^iKql1+iwmrG!q&gB}TVMt>vA{<f412BAP1|){)`J zH=Yeo+8Lv|dl@OrgC28$KCH`8bET%Mq`D)A6j;Y zGL>t}3lt=n2C3NNw)-9$Dn ze^EDI{vCQKqV0TOeVOH+=FX*iJ`}a0%Fqfh>}Odq=aT!veErf&BQW7ULUVji>mj3@ zUh}6Qu@`;0(NWXgtTKT&c#W<>;_NoO)SPFBMXtcK`3uz`(lx@A!w#2uuT=TQ_gB8YoOc#rWv7{mOH<70KhhymtJ9Ez)- z5f#DlxmV-#pN;t@U5|<^NDhLEWg8W9^v4@gg+v!1idzXofr}lOA2h5mU?{`h_DxII zwVBrdg^Yu>ywc|wl?j$tqdu<+qDZ^0ew7?Ta@pP2wN&5iP#rtTaN<3o4(ZDLWwTV% zW}JBY=DoU$Uz3}C-}OZxzv?MTe}@J~GmK(D5krA1yAAc~ukfz!*G^0$A$mdbMZ!EzHl`dq^mTMjT?aE^0W40_8TP~bTm(CW0-#p1> zr&2Rr2|O#82&0N!NDogo_=5&tuHvk>7s}GI1SFTRM=QjXeNdC&<7%se(N*NXJm68U zu~v?cd2ZlU#Fage_cNq(U|=A9;HlGrl5w;IKdbt`vXDkS^sc#6o}|;-nU+7gy*` z3?W(N?hAU&oFB~KH*G>Nt?J(fSxOJ0F~OBQU#23(oMa7|xHtKlFQ;ymJ83_~ur0}s z%+oz>!m@XII@Be_%l%@lsfm=Q23g5z6&GVeCiTm=YyN|GK>>~m3p8TJ%gG@5Z-(Y} zh!_JpOM2VfMsX`m84fq{_qA=NShH6=I+ynvcPt)Sss>Ad8EH9h>%qs;v!`MyXqqYD6N0ifj&^ z`@E&88C@vO?E|mLevbF9R!5#0zgS(f*|9fzxsr;KD8u9n`iTz@(sNO8qgfTizrA@u?Ai0;w~-MMdsCyU7?AE7;R^#`O~!}s*;236rI$dj1I6vFv-{I$d4dx46mB<5E8;y^sd?TT!M)IjqDpsPbRuyL zdUm=$kK)#HdM9j?QM|ILA0mBntjeej6NOoTCS$fys)WD6bClbRX6K1WW_UBILt3@(fch zS3DhH?BF>Za=$Cxv}wn^+CCAItJf^aI9SSyk^P$(l?}PTI#Xtban%^rs*y@6-hP5V zw$|>gtW28Mb0onMM_B_GF?4BCXcpNqc!DRfHF)!=YgIRPAshL@ln>0Hd61-@G#-uRo94n(L{PX@&Fj>KSaHwj&n z^AR{%Lin(yC%7L!s=FQ;oT#Hszm2WFG2x?uHf2Ydaq!|V&I-(WHW)51ezi%)I3Q{m zK3|G3`=7NG@1NM~J9o-B=ve>G%*-~sGio2R|DE9CXa%1w7;{P(ng{>IZ1u0tB@h~i zyd_dt`%LFWU~#bvVfDdmsFcdvwmCQgDQ&CHRY3`14+Vk}*3X)!nZ;AamxNqDOaQfF55YN`3%drEQ8E?UmRQ$!Om&8p;*pM4pY@EZqM>Y(?*lb{u{1UtFM0d@gA{! zeNN}=`;M5_=Rys*RhK>0dURHLR_(zMA?}=^IoiAZdh82k)2r(vZ_*TGYmMS%ZNx#X zy?%QW9+!xe-lX|E>H^=hFEw{s69X)dhz4V|Xr{lsy}X}Fc4F?%j{FWyZfy3c`xjV~ zkc^Gbw1B(G>mK^-;AM17UFk4>znaNKVmrLlv?>;HFj6a$IB2Uf=A}Q1XAz}!IUgW> z^%U90CHmw|nOIpJ-sdXuy#s11_(~O!=Q6jsfu&mf)H4Xg?=h}A zs)54m*-WB+*fY@$>{RaDlCTx7-jk|RI+3P_|M^VvoYHpk5_MlQ&37wLP?E_a>`;9uu&q0kd1Fd9+h5D z{(^WHgv$3~;86Y~O)g>O)9fcj=Gbo;-z;{gZF*sKU=CPz)6!j?9{W!KaamfW21+jU zG^O&XH6)QAS9;7vU-uzyQgi&sf2YT6jsfP-S(?hRdaXX05pQ!!Uk~_#buz@2_yI&Y zNut3d@OCyuRTxuQ3nivi?u(FTU3D>mHLmfsMt$|TnR96!x z756k*_+$Kwi8qfDD1T+1NzK8XG$aADvJ|0Ee72he>K$oznFRppthc< z544n0ptuy57I$|Gv`BF;4#kVRODIy@-Q67;T#7pscPZ`=C=ehaFaPg*-Qv3q;fP4-M)j$;Wy0z0nx!$vBQJ}7bEAUqj#4z zO-HRonSMtbIgu9URM8LHm8(#G?4MLscN8H#@G~x)vqmEQ)+ICcrspYKzKpIH*+7!I zNdZAOm`c-I`@^KEAq>j7fmburHp{Vub_v-aDCk&4eY$THHj~l5k<`F3e*9v0LBvbkDvKrHo(Jc%=s?28Mu7V?Z7c;jCq(DR8aaqK81MZF>P$&V*QjWD|EYJRa#0_f`G{2b@2n>eE+qC8lIjThwHwpgzkQ4GonkoUnj0cI~Kod7*GcmZ1GJ@ zGfGs?p*(7guQcHU3+zKGYp0|)nJ?DAU@yOSvm&`4UlK1^x|yb>x88IUCv&QCoB%!N zY79({ESf_$f=H0LJbE8Ms}L8coB!NY?2%+9HJF`oyoS$Npr(V6z?k?FzPPwD%3sVA z66Pn1%rbkZu-9Pk!1ph-$d2f~_rSEMXZ*ME-i9yq(_QTVdLb%K~_cfxHG)D4nOj(ctr%O?;sJ4>%Cy$ z#SQ`~)g+nPGPgd4T-5l8>jq^Hre|Z#p$hHEB+KJ#x3EdvwrYrS{TmQxyQbA#3pX0d zn%Sxg$)rkMf(L_Ay2nD3&GzI=qjo3q=@&*AEq3b%bR=(t`zDYHb5+1LQ++g3PmUyRe9=EZ(47!#?4oE_ z4B2w5gPKNskAq+Fh~OO3$VWsnHzv{Dw#dPW7qNjW70788PF9yyRQZ~3euMCp!tII4 zc#|JZKRx>UA0m1musp;#)Vdz0ocd9sn#Z^Nu8go-<;0};@4fcl7q{dtb5I4)H@IpH z{{hp5#J11+{IoCt+ zPkI)rb6jv?h$>CW)Z-0G4B2&q-Y#1q&9d}7NnD#x#?639-S$((_Z?oU!`GB{dHE;$ zL_d-Yv3#U0#1Hr>>AK!l@M?Q^zQDJD0=~LX4ZpEY@P&R6_{FW2as=70?nYt22*9Zn zUWs=l`=_sHc*c6OeWjR;%3Iq>ydYFGdFp9#Ql=X}kaKi%a;u9F)>%k%B;Pu>4E+yD?h6743VyLxpCK!yL0@#YP#O9({Dj<7))0k~?mw4V z;aJU~wpsx|T5lgbpgWSUo)ijBf4tJsGI?A0MMgk?noxraREe*LnCE98**ncMWJ5X3 z5}BhvVxFhY>TgL`_CFjXUK~#;-Zh**gg??xKY)AyYBNLD30WaB3YF>yC}bl1ujnwi z-^YTuQT6I)h$qz1y+s0Isto=4~0fHVbJdnHG1IYV>Ib4Fisv8$YOJ3B~b* zwFnp{sU>Esd`SGec~+#rB>>nV!c8d_WcZf;Yx4UfU?n23giBUKumf8G1Itcn4Gk04MM6a zWD26jYAk4EK7}9i>j0~gHb@$T2HDf|mUBUh!b-cD9S1JR@RyAH*5P!o(gSmivW zO7`!VA0Klhpa^6EKzLcT2*f0fJcCOr~`<1*pRtSsbz^gK`Wq9ChGl7uME zN_{UwVPU!np2^Iclv;h%;>#Pv%pfRlRxu?oazLZg^jA03PmScLN=JY#(JfWes^m6l z9Ch(jhc5M6k3Gk~f*+Gr*yua;C{@*OM2(=p611`Ml6Q;*?JiVIvBjA933eG_pe4-} zuej#M`J@L{vaG5!_Xd$C|fxFR9+7IZt&W07bJQFceRX@?Byqj454ArqGBX*TJ7Qj=Vnne7 zW7mvs9mrQl;^WrZAZwt5Z8uqMrFltNCJ!dvJ4RH}hkv%P&BlLxkZ{pt`AN?5(Mhh& zTn)o2c#uvb;}m^VBnfj=WA(U6QGGsms^~>C9C$d~NQQKPsFV~m&WXjZs?6-|kZh75 zAWNEk0(z>`z_P3tknu{iz0&)q=nb-x`-!rL()28WY$F+4krQO0cbXzJu;x<59&MWy zJ`2|PgUN^gB`Gd#D@@V`Y>Di4`Z{v)XO#4Q&M#RJ&dRbids1&J7{UwHjq~^}G$)J3 zCIqwSs#mB=+l+Dfy}I-z%{KL>JR^=y(=^zuKp_Q#9Zr3jLTe>6i6eaCAKm2TI$(HfRJ1DZT+`DFcG)* zMf6W3iV-}L{d8y2iwdU#%a_f<7c{yJR0bmqIU%FBv_5gx9}Pc(*2u&?Y{~NXvcTM; z4AMhy^yVO)AC@nr!rdSQg-c4mE9T`nG;00oDacc^;&f;7K&%Wl<4X&adu1VjyFQqX z$|n%JcjXcp0!ZQK|dd6O-v@r!l(^AJO_$mBacUe_Q5 z+D5njM9v~r2No)?4a+n6Uk43#wOn!Q4C82qF8P~O9zo^X1*SV%MNA6qB_Z)R|IeYaMHw*XWxUYF>qe$c zs|&O00|gqBf_-;7L1{Ib1r;k+LJvy*z)%KdWz!Ts2_{@vQL)W;Q9z1@AiNbvvzpB% zp8J&j6IQ8dEs`~P_*dkQgGCo<2`8oQ&Ux{FBGg!WX9dDEc>^j|Y^bwpdH(35#iD`m z1PSWw`8|5d`Y{I;U^BOWyT8c(9o@rb3*GZP6IvW>{$Be0Cf=Bg+8aw>v%NTT?V|&f zat>2&s$l|ot3fJKt&^*+2HheuFW7TW#hgZKvqUIvNlLpv{9{R|dl*^TJ^0AKu6!wh zsfU@Ih6(RGC5zPk`7b-!fIYeHuS)J;-LSk&=|5fY*?-vwb|A|a>m@di7Q7AZiVMe0 z{DS6cLi-C3n=}YXzA;IOloGfBKA4ib#eOHY8|kG+5tnY^hCY_N*6@CoP=((Z4Wx-h z&$h;TIx1y9kg%v}E~pIpTqY^Mpdw1IOkS+}PUJg^JeWtGALgri(|E+RTwNaLp*-C3 zL5jIYUofkdvWbL5V!$^g!5Un=!X%Y7j_a2C*^*1~xSVuNOGg)aJ7=MWTf*IzfG|CN$i?{d|zYOSO zS!XqJYK}HRq&rbH9KvL2xGvK^n10q<36ILYH=V4w`xqxJwSdi?>tR{VoO`)950xvA zjR(;cmo+6IW9Us7HDYB{31?HSn+D* zlksO56J9gyHl;Ls#-h0g>zmzN+pdp9D?-F5|=aC9h<7el1E!Bq)_! z%+4A!k`pX6%7a7Rxckzl+_RC*E0uGi6UjDC7JMByIkO?_Jp)2c_AKHZD84L87-Urp zbJ_}6H8Dikp*y&fozH>Gl~ICh8;z(yo5lIiw;K2MdS8zLxPRwe7a*A~wfo7scfC(e z1VZ251ELcV^YYkMy$2v(YGP~Mo`7GbGbd24m4!urzxUQWvH#$V?@!DtFyExi=iyAF9rXPWAvEb0%Qcax%Q#V_(iaeI+`uJTkfAW7u$ACqA#85@ff&dHyYA+=2rMNgx$PXkE^u&!z(TEG6{HY1HE z>44wP!94kmk7ni@iQQnUrDb^e`rT65tFLKJ776i{Gf?pPzyeH#i{mC_igLo1n zCp2ciPmy4KISY@x5hO8P#JOSPPq)nv`N=sj7T7@UKRUG**L!od%PWc@Iy`Bf?_o39 zKixPi3y6`Fv4N=+4w5@VpsAl~{VP0t7Fb_i{)klF^Maqh>|ho@Y8tj+k#@dij4!=R zoo*m%IZdAeOjVm6Er-r&N#30a{rn-(14kcN+C!bDDNwD3!2VW~-jh|Ix6Ga9Qlsq; zKZey%eTcIJNRs{sQ1-;QS>2%uT!H@sDEkQA@XsYE3Z34St>ny!QSRFH_(Cvaww3|MW33+#@%>Z% z0+Z@|dYt(7Iq@M7%KJr6gXnl|D9g?v)cvb2Xw|y7^v>taGG`T`Caj#@zPH$DUJIKzbq-#i@UH>Qk$wnCv^?Ae`Z4?- z{I_Sgc+Rkdg@z2zaDr#4G=}IOwa;DSzGCh9s(_xF?3?u8NsR|7w^*ai)LPxjfXq2d z>+>%rP~qS$DMzdFHBlyGeU3v>CTy_=v0Kvc#SIU&J0q)Jdsu%h4Hn+-C8S@Tf12!) zaYVfsdM~|m&bL&j2l&UW?6WEPp}3CVGgQjkP}i6%*!TN4E|Yv!hJqF`I=Uh}k!$ud z*XGbFRDV7H;ko{FYdhjKbPxyF)Gk*;byIGl`{ujvn}ew8yFJ~-DY-zYxNP>b-gCE! z)o*VTkynHm?0-jK*Jd6QC3>FB57lRJ7%evN+j`DP19Jzs420nBBU;DMgFtMQyn?VqhUFf)ks&jaPV$yBKNAB8ll zj*q{tu=S}i4d5tefP41e_8Y7H^04oD7r^Mh3%G?<%-Sc*039~?a$4ysFQ0Z$Hj2?? zOmA0}w(5`{Yvr1;XdKYuxKi&m)mqYVENUED+O=uUIbx`)il`?pF_W~Xc%=h3r=A_e z#M`%*dQ*{!2VyG{pNPQej!)Rf=AWjR@e@o3k>O1%R(&PXj-EcrFm!pD1;$2tSnCp) zzmcsfwpkG3Ei=LGy->VH8Ucrs6+BZM4^0^QQ$GYwhP9qgww^yN%BZyVInWpPw1Bz5z2c0=yJr|i$A_aeMGmHZm=2}w}a zQ<6HL)^NldKR=)PPgX~56g=W@ibdLtYt{ilzkX^(eXG!jqw<-YM^$gJbqS*v`p0>*B_Hu z!w@OF+6{@nTeg=|Nc`j&%mP9xwz><@6?nlb#y>|Tw)T(1c=gx;MYwDL4;=IFLN)m! z4tfDMU?esXo?byol2Mw_U}%%9FM(tKpAO2Sc8!*(dqnJ#q??Kuy4*-B;SKHLA?L zXS8;w;TW)>%zFz#_tfzUS7QwjPsN%k%x(VH406RH97GCtvEZxnz+2p5Pv3yci~wT* zs1`}-PxEpa3I>y9A+dMM^ydELo}Q!lAFR%E*HmuPC>mfndf!Dub6Q<$f$=iPZaU@|vM9rx>#O&2!hKQ0}Xsp2kjk-s#QrKA=9)^A% zyzYNO&V;X2{KRwLQZL75h|nvpdEcG&G9z4_Up%1yV_7KbuqMksV0V7$9U8f4jiG3H zHF{E@{Q3~;G{iqxDhNeF#DfbF4Yf6+sSP@Q{d#t-|TDgp@}FlWTYgqo#{8xNLS4M&?`IEC5#Pq%gY$Ua zgURvAC@2CErzQNwvi{RHwGzOc-{22sMf;#3BWZM-E02K((*ry==HEM#W}q^Jl=`Zy zBb)F;n`ePzxBglL{6;0;%;s_JO<7A2aK!Al{vuMO$er;)!*VSh_)S#ONm0di zNQe9j(kHdZh{d0bIX{kj7UEkX)BT3D6(dg6^r|2K7-~sU+?f8sv#^RPs*{0d%OBMf zpb~_digJ)J4pwd3(nkCz3$^5LjbQZ32{l#vx+FuF#c9%atk#34w*%K}N^j5@qtPF= z3_+$JCErK9SLvz@L8sZUps|w)2cVh3OA}wMN)?HEaDC_q%fx&;r-T-sjk<@s^oQ4hibj zhH6rr$UatBzd8^i{(SEyGNijS9#|U)97P-R$iJ1B5H!`Oj<*+X$u#~o&#Z%!ksWz*m>@l& zj|6^P$qknamlWnPb2WJRiWBt1uur?3VZaR8EVOZgW!3I@;b$yD3+R=#SZ_nIir#c9 zABL(K+M)l>uwJuAoa!W^&Lu^+FUDN5>y;mpv8VrzsPk5 zDV}|%;;O*;d-Nn5?1~@S=9tw^C0iiP1Vn_+Xc%M`u_BO*JQeHd&n*Uyy+lZL4FZle zF?f;-^s^zq5(|y6sCUCVRG1!n7Q{XnrbGT&j->BaU4*dx)$m_g>oOC$A)6YGa}-$c zXnMcvrR!FMT-&lDUf0@NHLXrA4!YV~C$_fewa4Dfek$4m4p-e;T(`{q=Oo*!yKOk>?c*T|%FLK3C5O~Qkj2&P^ zw%trqqP|#Qm$k&CMa~dNY=O>8-21Fxcv34G{g7 zn6K}9;j$-&smeVtztAC&4F8?&-oxz`EoqZ>d@qj%^h9jI6tCfc#O;XXK@C>p=70Z+ z6)Cy5=XN^)Zpcl2w>{$G2s=VLe$s!x30?M{vTMmJCZA){nz*O;C4n2yi)#T`@4Y4q z*QD$Ts{~$@8Clc+-KYo<0NZ;WdERM9*H_eD_-nf`eug@}9C1xiGPRbCGHKm{4G3wL zuO8%7zJb7-WtNQs`MvD2(T0|1kIx}59XVgK;8~K*h2I<=wtE1cQ0BTdGV6mf1i?ip zw387bDupo{<(kW9)t35aj;@s|{h4j*>gcP-r?yrqJ?cM?9w)$ac^BwvK~0gZxhY|l zN4yiF;)RSrb&RSmND6xSeAJL9W>A07m5k+WO4K?49Uy(4!SDVe(W#4VZ~)fnOD^*% zu{Dx_XQ9&NxgPkMaq7m{){J%8elk70@^;zy`2?HA<Ws?0p?}L(#Yg(N>sWnv{%( zIjJ)c#`xz>xRDo+K^gZrzNUILIYN^Q6F;+!{(7nqV6Os+zU&u18Ix@%kyL#w&xFJs z+ri&>>QFU3>4ifMF%BmJfm&A{GSSuG*-6Uk&vMqfPo^$*ZkP7rwo0qoR6g=hljAEK z1ati^JK+K8&tmgWW`rzL135Tyc2n(gV@s!yXp4=k8lR!v_&uB|uyj>*<)FC_52vt;1JynXZ%Ux1_)sBNAm|w_HGZ&w)oR66%-W zwbX1@HP;#|6GeTnc&&xWMhUX{h$VFN>%p{vYDG^QX3t9u7$YHo z^O4UE-&Ic4a)*o__`0u%J7Qio9`76Z`=T+=&(#Nk#@Gg zrP5U_7u-V@w=;p4)q#xfb10;PEV~aYhXILQA9EDiKCr5jugB7T65Rw=HMwnCFjB;0 z5ITu;=u2&k4>__xXU#c_);Rw-Pz@W|gkL6Kijs=p14j zMEvx9pqov~RxN29t@6|sxYbck$6Iea^%71fvfUA!hW?nxwmjQrq@sPRyT*Vz(j70A zxq^`WOdF5;!CE}O?n?K+x6K#VWt<2H_oFZY3U6Q zI=2ni{0#6=G|hg|JxPC^1@+6SjG;1=OKGPEhdm@Qt67=nH*aeXjJi17>#Zs2(VL>K zbo&TMJ0MFfc__gng1j7l?iY#`JjcL*wRpN@;y&!pR;N+(A%v@W(g!L+tH+Jrpq2T@ z#?#d7>%dj^xy>Eyss^|If|1J57nJapG*pVW)e$Vd>z~_&5DLrQgLgbHWA?;{{{Z$= z`YT1>5X?!y?!5^%DRa+cDSuxEqcKDq(d7-f!_y*2Mzgv zR<9SJl=j}0_yPg4j#*1xqGvPn(8iB%y$0^6)#sqCq*pwKBM_LketD5X$A^%J7fyYk z!YZ3cb6n(i&Hn&FGvt*yylvGQj#Zb=LV$oeUEgjuHaA_AQ&SC>e_a}LGskQy1&10Y z#)rA_KIzWzeT}Hqv=Hw#X2w12sPwdGjy}aEYB$Np{H~Bs%|^9QHT~GD#c%p7(#>8^ z6I#ul#8?&)X>&0mrbgM^nJSXBa=*a5Z}n1aL{1zGMa_oWcIEyR$>hQktw_s8!|sY{ z_Nx1{?CRRf41Eq#bq4{Q*$y2(tG?^m3px*f)^|DiSM!fqf%na>ciK9c+qEqlP4?}J zh{U!`bo24Tagp-uK_;y=EsHf=kFL@T#3ovX-<*-{7Su_dH8wG}_Wx)jKQU0((nneP zz$y1Y&kkF$?^{T5qW_^yqdqvtQH#M0)Na#2n4>(mdT_F z4)JiJQ zn9e&RR(;;NymfGGeM`SuYXQu3L{qnTlcg1j&s6n(r!E-YsSC8SMR`;U-m zmP{tLB(4V>1<)&j{5|y9mMYfPKB$>#9to{K9@=FRX8Y6GWU5#_77HUvccq*ZbkgN| z;i?q!aD4yRDUlDH>PUcIoC>ixDl9B&hJ88^HJMM|8eq)T~kp zq_XvfT*Y-?Y;^^yyq3vrl4KTbcLnP&EnWOi<>I4htp8UGKp()#0>ML_QJjq|?Hc4i zs_wUR;~?5$h&oy_UN^-N_l+)-t)1Ne>N9l@!K>pxA!5)NPZl4>G91eV+U`})?l{Oj zO_;QNA*4k1$&IK=Bz%`L6WlGE)B1ljuVQolms5ejVWIkVQV+K_3GiRU5YHeF(t(Jf{yuvR7FL&&i*k+*0V=UCuL^aOpnp+5Esocn-u z#@}j1SzNy9@2+3Np&%zvp0@DAl0*+%zYK0nfB!&T{EFM#s?;ueY5LN!@8txZMJXy5 zF(_$+D8$CPfsg^}G`srFmTQHJvTS=Tfptt`GRG#5)xhkF32ed5Li>(Y2ClsIPj#Hz zgF_X-$Lrp->iV9JjTh`zb|3^*mC3+yjG#6u8z+n*w*&(!_i|T!KyD(635{A;PVrbMxJT1Qo%??_y9{(hQ#1U#v?zb5t) z*mO^Bx}6qHzi8P;DrS3=^M-T~*D5iG>tvzBX2#O1`U=>b<-5mmJ3j%`pB8>bZ7XPQ zG-_*FURZ6Fo~B<$Uc&eG!$iB;aO}Joj}zVMvTr>2dbR+!A(~Xv0HyF(y0sfSz?*xO ztW8QvHP3uq0FPs;?JL8k@cWHqiPl<_Z*+U zdm-LbiT`pS(EXoW|>3_rQ7N$U*tQ^t0-;g;NaI~E?0!;^U=*V$kAEyDS6JIVWB zHl0qK(5@blJ()?;27HaNa>*6it;3VHZG3827nV%vF7-AMi|1QHa%!x8_&-B|=PP_b zfH4ZGw2IK33odzeUX=%?aN0>#d*Rde(U$dIyLWdFSZ-!_k!9R)yV5`g>P-Ks_W9Bd zA)uC{rXiMH>$chGZgyk%8c=Q>i_+wFYviPTqtGLw9#A{(82ptERCnC|DnvIN(l}xP zD0(knJK-0{BX4Ca&8q!v-@xG&-IrST09FnpDUI>0{7G~?3*4V}i#)>W8weH;DvqyT7PpxZ7_2Ix=LK0>*ev}c$DF9sO+5{Han>SU zE~+UDGPp(ZoIjicmS#2i<B6_}0rYn`UtL#V0o?k1k>hfz<38WB z#~}8${zZ4h7IX{ znHxyYi$o@UGngr$g;SeDJ*vH(8Pr$iI&M>*Mhst79H&n=4n{=rgAmtSXH=#05A}|2 z^R&Mbh(@dh%X@QuIoWfi@{ni3g8nMNww=M^tr7!R#PAOOPfYz_4y$D5+fHuE2HlBkJD%g;hkl&gIXuUTW;_ zhs={+soi|Hd*Oq800c?&NBf1q`Yzq!*1?vL)J5rx&HfGKVGKzRu;6rj*L&E(e*nD& zYE464NG;2Ay;!_D=5rIC?hZt1isyUoYH!law{4RTfqp{{^%EVG@HRD<8SzVPs+)xH z6oc)jCAWcXFPDD{Sktk~@D=%G?iUn5M?u%$SSKcG7^F9~*4gyXXGXN?y!EPwr1_e{ zd^QXMEP*>{Ja5UA6U@^7OkH5^+xT(@u(FYY?!}1~QUv_Vp3t-Y`+7*?d53=0A5}&d zbhrJM=B|yg6BiE12Tn_jaLYE4Q*gnR58*$zfWTF_Z;buuI8|~`*OyKw*Ug=v$AcF;>RUksPUIm65V$MsZ#L3Pugh|N%eaRDa5Iob zr*iSsbC&aA+p(*oPp=i+ijq9+M~CQm8TO)fZ) zfKouSc43a!;ztp*?2y@^qs!fsqF{++ghkl~gy47tUm**H+{sP z>-XZtO=nrO>|5p`FD}iVuzX+JyxY--*{nqrVna1!_|tNFeBgO;n`EPh@XhE^TEP7} z4(-ytY*DRJ?qn2-fim=zw75{uDaCzu+yyO%WN5b>Tv}*Uc3ai?aqno*DF*`gK{$8^ z2`&qS&e#xHp?E?)-T$Z;YL{cjSIIWZDNfbdw$=^9(b2~B{BuDSv91AC2SSlw;>^HX zha%*073AiysUfe`yOmObrhwDo%idjh(Q`fI31dm@YwA)8R@MKsxBgscxFp-HPU_(9 zg#Z7ML_8}JupPu?_*?U^P~*CfH?KCatC`M5K-%XQ8i{f%upC{hUr(MonGT`o+@017aGl|2V1GcU5fOo?P09S{xe$Z@A3uJCQ$s{A~+!MskFUB4?rR%_Z-taUhr$!$jS_gQ|CBsAs6w!Bc%GW$Z^01wqr7!42z zc~Ckal*h@n>2}l__-cDPNLZ}f)!IHy73Qjk;2|atOAD#HT%cLlwd}q6cEDI`MQqij z0q@(YEOj=Ckrew$m1%*(mMQbSU0%p?x1k*zYUFP(>ub1~Y z{e3Pw0P$8)3As@igq(rsCP}e~IPE?q;Y-pv7M_Vy^eK?{4t)E;%J=i|(TDH00rt-Q z)`uIrSj5}n&ssGGaG&!2_3fuP*GYpoX1gUr14qA3(Uk4A+aB+Q>7|45>Ux%RBUe(D zh5+jLGhx)3iQcM>zu2vuh#ieiK7#|)ra=A~%%vK=kWS9J;bcGOEbDpY38HMLv`)j5 zvy0cm{L6LB*?Vyj$^rvnxIK3XL!#gIS&hL_H;FhvwD@d&%{*V+%L4DGkCx3-g8aEl z)FY51yPbGqt#6&eyAEC4p}M%RtO_eo`~8|-BI{Q24+!}l_t!uTvT?%gF)c=;=o#IH zvqQ3AGQ*{1BQHvhUp36x_D#%NQ$HmBU@E-)5Yw4%RfT``IjA<)QxO4|xZYBFvCRzg zo>Jd+c1eE3`7NU7=N!;}cxrduG#Wd>K<;E4Q1R|+@Q+XCjfx14Bk*!7 zynL6VQn2?i!*y5QgcS$4GB6SSvJcdweypgk4?L(o7(D{a+($XI_Q?Az6bBODMu?i$ zz|TLbzFR^Hb#Cq4DBXDOd0-aZGAl2IJrp(~{nB-V9kt!kj4_b7P4jQp>Ct-ZZSM{! zqFm!yA3U9LwPgENh(y@e4L{S^W48B%Hy*~s@kpr0w_ihcDCG@NLv5g(T|1vPq<;MG z@Mmy~9d(45hhBs@#3~j+9rt9A$43ucIo-}u&!6(^|+up3DWs@UrhmD?Ob;{`IEpJtow8DaLSP~Jp5~zGY_7cR$z9ZI!(4fcGzGKcRSyO^t2pX z_OL~(MnjXgIy%x|CIS9Ngi?P92N3$(v>gV<3%m zt3c>Vn`wOS;v`%_@%pDe@ID|5s4L+0y=>rHz%JZt1?X>=akdYAx&^Ji{TA@loApQy z0G@rM-Zhbi<_vJM^jLlSdxi7bOy69N^xkKh+XX4b;Mamw96bH~{Wdqb72KimD|P;f zSGgH&`I`7u$+G-WfW^xh!e)GYY_5Z~b`LTHZ!=%}7#~e_W}m{3p5yV90=mF~K&aU= zFdNez)gO!`cT6VNNQz!pfgUV0>9SlD6ELD=jArFm)y^w!};GAr@JF3iNIsuh< zs90l*`lN!#0mAL=U!uD_&-Qm9Qd4C}l<7L6T#>WAO>CG$?U<4f$-al7h^+$L%*0l? zcch%hg)}QW?eT!o`6EF-qWPPTKVa&We@&5vodq4<7NazeixJUIHKYG}C3FKTWMSib z32l8`#A7ANLuRZ|e-`7#_G61zO7V6uqMx>M)IDxd;J@JF@hL~o+Z*1lhNpfEPNwo4 zU;W1oYYBrM&)wyTc+Som=xII7*5`&GKRB8YXGnf$Y9c<+o9gm4q)a^R#7eN+rhO*h zVKCAZKsu3JWu4<<^?2#ktI5%ET;z&%*|g$+J2k=X7=>KQ=5g^iQ==1nqU}$}FpW%S0ZNPk4 z^g`dW=U`etu}2!Lc%S1o4eAsNbVu51{sCFPY6xh*t@Gkox9@Zie%QDjmw_nWXK(Kk zc}vgM9Y0Lf%xpu3BioK$l~;-4+d8&kfds@x*D6^1BY1#|U-RRc$Mbgp6U{(}WzkfV z=Twu%^bohCy1qy-?~Dj}yvL&i)YzWbQDEDDqUlyeG*xs9ygXj(ZZNde)rK^EP>-T6PsB{8y%ez2*1B*zEQMm?KhCRkYhM z0=#9LT{}_Nxz%VNi-Fx`ci2`R2=f0&kk6z8{a1eA=+#Y5W!@}!vsSn9(DT4*@;ILE zEcZL{&^+D9lljVbxpQ1#-99`6ZG2n(J4xPMJlJ@0aE_BUtrtUbsN6*z{RoM*w{sRxWDA>zIw7P(w*Zs?L!i7cOIA4xl=S& z>jYA4KOX@wBUV-D2OGD*h-ka)G=m+Qai@)2|8>(A;O6A)zfYJ*)&m|zL|_o|O$~io=^$z9M{SlYGIjNqTC2cy93 z46K zO|Q6jWGrc3%&IKTqDJvdlQ_8_XFCw-Gr-Z8FVj-(!havAA zT`@d?2#eh&HeK9&$-!UUtKYSVXzm1ey)aUad$_#Fa;e=y_NauQ$n5-z|%L#R|)`eh&Rgn$MxPhHB}-JLpPyN~c;fh{!p|1P6|blADhB5^~6G6#nount8w43R`{Y?$dhh*#pAelcRlMkU`U9G za2=D|hlop}=6WPN5WoKwsH!~?Bl$xWX4fID5-R3_fcRk4l1057d`icIB=Ef%O~m@4!` zFp2*Z6_+)XDWF8e^|nJm@AI2FiIKE0I~bs6qUNktKOL;wEYStgdMNR>vKAy_LPDi*_9UJ6m|?$ReSDo8mk9()17Jw0d2ywl^?QSm`+7jHQy-12bxq zbon=X;JcA}oe(b3M&Yr5;T(K_`2MOr$Mr^Vf_tQ^l)?+DbHUjUbgB;+pU=Pl<;t4# z8+LvqVPFgs1Zr~h?v6J-UwU!EpAR@Wq1A{IW&kW=!IzW#mv#zx{>bVki9h&W+nf2H zXIlc8yw~OGMz>2cptbYjtH8FI;u*$O0pwZ^5qUTppU<1)P&33Z*@jI&Oq5H&&~u*Rtbpl?_av*(nWV(k28=vm=*hPwR_JDph|>};0BN}MguK? zVLh<>^}m*b56rE-n-?ywFNA(3apbiPDvIq{*=H+<}x*^Q{#|l@< zON+OI=L>Q|glW%n8$O7q)3k~m07Rz+k9xE`dYn!u8#Ouzzew&-UJO*zU#|xKHgmWJa#WG5O!ItR;n;l&Q#1tw* ze?wj0bC@+=55m4?+=^aw*!dt1oHyvXIjBeAkk&!P>;IwYy2IIgzqcw?ReRPhic(bV z8Cs*NR_#%#z4r=2jM}S;+M}phyNFt~_ugB?-Z6tne))W_>-R_AKk`TNyzhIi=RD^= z_kEv*ExoOiwdgQI%N#_i#uSd!7*JuV(Zgo&f1g_0?GO z^N|K1*^fGBY1@&CCMA+YeH~bBOhp5?5B4l_+Nj|>9?J5SNY41jxqx_k3*r)cB%oJc1Nu z=?(HNeOJM#^D>#RCR>x4PqVaZ<)3pM@;H@LzF2lm6`zO~(yJ#M+22iqp#h8K*stZj zyPc|hAX0Kc+>NmeCcPlmc2k=rPz!m%s2Uo1Wz~H{_)0OU#|`TpV7n~uMkAnk*sL$H zVd&SbeJ*$e@LZua^L?kTB)Y^ul8zksKCY8N>LpaIgh!C6z{R9Ov#`VB<(#-Z{j-!N zf1w%g8|!1Pu1^JNqYo}d$HXeZ0wbEqM$e6SSzt|JBDxJ9AqPJ^ISh?3nQ`3p@x%2F zwX(acUyFnA6F=?*ad>$h$p4x|a!C=0u?()g7=NU-VjUbn_`({6e&@)*)uhR*QQ?JM zJJZ*3s_GJq*GZZ9RxV{KQN44n!<=QnxcuW>Jf&K5;e?i32gDL3tq?_e-YRAI&?P`bWp^7J7|ypv;(#HaSh0?q-wv>$r;@`B`C-$i1?P`Z_pA%#{B#KC6TR{TEbl3 zshe`#wy9yaxFYEE+pF&kodx0zhTQBpld>D9UvUS#`qGkX6*9={Z$A7WiC+{kWD)w( zX-cW}J)UD|0|}c5{(OYCDX@git4@g+f4V;*eT1knGtK5L%NJ%D1XRmEUT)KG?y@d_ zd|dD>`V%%HoMiQ|7MDcdtY;m|r~eFw#j+b2KPa`6)uV0CpOp-hmHmXw_<2^yi48Oj zT=vv^blyn#f5!R_EZm#vRS8kO0++0}Y^p-LJpYTKepbR*&m3jiyT4_~fiV9RQe$1t zH-zZ?l%)hI;!d@*UaD;`c1M-Ryj(Fi+Uhb*Ef+5!ru=_5*-ZY^b&_Vfghyd~))kIwu!)v0$K|{_LiP`%@3fY8 z#Jj7Zlym1SRN&g(s(#=_0ntz^1a7*O7n);Ei;w(%nA(n_#!?KYYWrlaPg5;j>U3d3 zS2vFf#vsS%ba!z>E|^i5l%B|Ia0x9G<*7mr zwZCHNB(%*z?MM-mNZ=)aI+7srs z6ob;(vd>ZohAT#C;Q<12KX+p4y^OH-Hro_l4s#V@ z4(;*bWAq%C$5zr7A&{CAY(uj2Fj7h^=RRu(o2!(uYp&uG0Cx}#rnX&BFU!;w9oAo) zdiV9}i7$-y8D%3{g%0B*M;hmUOW*Giq~4zzu$p>e9mU&C;sb_iw%XNEMJp1nye9A0 zKNGgdx%OxXG)lcjs;4=*)DkmuT^`o5sG~e$O|SpXrCzsVSdQY;xuju)Tci`Y*{;fj zJDBD*jH&PafoX(E8y=uJxfqr|UA1;}3^h1IoZSeZly8{b7pQ!`9!4UXi1?G2w`Uvq z$H3clgQ@LjJ?_)zKpa{?AM_;T((CCl<9x5W#e$vUQWPE9Zxb4|f?rFxo$eD2KGV+S zIq}<(a^vc`c9dcA+x;2lBLqgM0=wtii%@tzk<0hkbD8q}gw|Gkn#SmW1MqzDOoIi< z)%?yTVda|JXVpnRc0$7~5$pDP-?Nnig~O_CnDwsa;W^@%c5+mEIb&#jN1xWpzFuQ( z5?bH%M#v=SHmpK>`TxBDnl5i_K(IS+=#*J*z%@VcpCaif?TM{8wNLoxh`!_%Tps~f zr<3p)l0yu`DpILs`TpJ3q^dv<-STWNM9QRPy&!W`>N+B=?gXCSC9-vVJzpSAdk$_~ zIX2>lo`SI`An_KwMK0K<;WT8l?d~`tz^&j62rb{H7VyXC@6NWPO@Z<^_9(tKJrzIt zoY z!h+*|N!A48+*=b;KMqhzd%MU(<@~+)4L3^KguTxYKYyJ6j1+CV&D zg0ggWkaAw@05;`@DNDo1o!`W{3WXqzUaZhre~Xxk_At#`oDD(RJlz!J&t~y{ ziuoP78cpmgUSPVocop}kG@Y#tO>gi+?PXs*v7FjV1ZNG&Zl<0S7b4x_EVK9ZGJc{f z$4i^ifWnsnVN!4_lcl!Os7%_ba3ocRKRe@w2%}xd_B)-m^A9Rq7BNKasDcOkhK?B> z(=Ybk&cF(vP`Fr@iC^+N4e zs)+``bBXDUUw7RKS@k@o7ZpJXx7|(XP|`HiMCrmcEmy-NpI9<#lASr#n%yrLk+@!# zTPKg5QTf3>zLuT)Bo|6ng8MI=Co))*Q3)Z{MDQ z-+JfWA~U0Asm$M=IdmggL1OxRr^-ccz;gjjr}E+L5W$W{3x?Reh%vt;a_MBP9N)=T z#f2y<>Uk7F|DC)wK|v8$~0Eqkn}9N?rn%gAGP*Rg1906t&5UV!U*-t=GzO(dy2-&PAiC*&o~N;9`~JEfeqt<@88WUU&z1IzUO?oqii{hG!P|DyurUK+=`y zT!(@tk5qb5nAxJ>p7d>IbkSiFvF~T4D4UE`E5ZZ6vfqWzENb1Jb|v^8mb-44XVjJp zW*v`zH5zeeDU#Qj!wn+VC8M?={%QoB)F+=gLcag-ftR-wc`8i_P%zV(Pz7n%KY2vG zEcW~up0xCMXLmoW(Vmiyya2o|<3hNQ{~K!U)-ruSiPL3ky^}g*Mfjv3>UuXoZMc*R z&O{{;P%5*rhl5gVXbolKZ0HrU(N6e&VVdnASsX%|AKwb9&*1)xb?GzvPFd19KEv|o zSw2}fVIG;vYg!h2Cwsw$8ID)P1rG%aX;?j!Y&s$}!Cyd~SWCJ14qy;4qJ0?KU12fF zfou{TcK>B7CCJ5~Xa|i~{TwB>GK-Ue&G4#4DK=2&eG+JR47eU2%Io@R4yyusl#str zmKB^-E^rsqUfuT|k*Fcyzc4h!FK!qhi|)L4l^)$CKn`tWuGdIafoIAGUDin+RSvXo z;zyHi;Nln&@>N3j;2SyG0B6nAEzM!pscYGM>FhJE!heqcHus`yK8Su z+SBkIl=s%}@CGrGYkbEXC}XG-@NV4}AZzzSEV3}f771v0@|jIeurgz79T)}A<@SMg zF}Z5NU()SFVBc18YCBd^sNT&UqRAxVhz*sO^H)phMv2F$y0vwP@ z%3RIk=AHY&6sQHP^~6`>W`h=s5DI)YWxgGcNN<;dl_m~GXWV{-ZbjbjZkC;m#MV;Tl?#j;ogyuvyzhRE> zkw-EK+vND_`5rj{wc8?-E*h7uY%^c&WOvC+Zg4T#tg5o2fFgE&oP*5zcv9>t*1&5#VnkH(#*4Kil|5dd>J8 zmH;k6b*;5mnS}W`OYh2)z9RL*R-Od17Y~|2o>H-UKS39BU8l@cG;Iu9{p`8Gz*x5( z{6rhTorG1w%8426KhqGI-E!!9x4~FnSYvD8$@L)<_en3^S^mHlh-&)kKWN}FriFcD zzr6C5OBefCJOG9a$$WuDvrjac&(;Z2)*#C#sk!S~dJuFw(Eh~YMq+k{2#D;1u3vCV z^EfiJ%OQS}!f|_Egx#&S+; zT(6ExdMesRR>_n8?$KN2buaD^^kv}3m1`Cw8s{s7!b?y4`wk<@!t8Y9KI#BkH>JL$ zeCc{ze`0np`Nd-4Yd2w&Zq@ep`u8FxLK-349Hoh3?&ixgU`!NpcM5Bt9?_4q$R+=%X^jzl9ZEOR&i$-N0@FT0HAAoDmF>&}SSuMyqO7hI}=Q)G3@a8MRG#x%!WLU<#ALx;CiYUIX2YG-qbc@d_8hjmfrbKxD=vor< zN>(O~$LW8Ms3^EYjD47NE%v}e@b!c!(hhej=&qV5Ji>XmAP{f8W45@@f`Yybd!-tm z*uifDJ-ySogtE%S%rA=$QkA|T=Kb-WJVY3C%Q&%2$&+CJk^x(p4hqI`QEM}9u;x8% zHH8BoR(ovgwY458CdzGRNpoKCqBj;HzQzTE4e5nxwy##JYDH45-UsjbdXC8TtP+Hu-HP6n%2 zZ_HP}-WnAad?u7Pte2ABukLguIVG^zKty%cEy}24b}AnmJ*c7f?!g9?wB*hU0pFFa57b0g0BNcG#y} z?1+XQASz5B?jmbYqY&|Z$1`L?ero!~$)XBslTxlN3C4tNHwl7bNrAhlY68HY792== zU>%HHBlhaD3bMkCf0+vcPaL^%byMb^aY%o+&OJN9pij1}1R#b@xF=udEfQn~9bDgL zjt%YrtIs7+`kw<*dKQkHeR`VgV(CRI05?grFvPp!9|x!UdsV1R{RMaeh#XE0rG&GC zz?1bLmR@=Bn=!1&>@lq{k|dNAeEQI;*{Ojqq5Ctfz(0!;&lW^yzwz4D?AcXgdsle; zi5uv_u^bZH=ru+kLhhffGPyc0+-yswrlM|C2Lp~hSPqGizHwekxhLJgePB2dd5fIe zV}8xU!0Q#n<5GcCTJ%6}GH;-}1as~m2j3;*HNA#zcC3&@e$!T(A}WDkgNaJRcQR>+ zGWSBE1;_9FOuFUW`oS79yp=4Ic9Z!uh+th{?M^*AZ=X z{;_|b*;?R1SjwOP%Z+!7W!JRUHMH&YzgC9=CDaDOz_a`v=^p0(6;yr&8@67=!7x`&FMIf9&-v3W5=LUw%Mq=iSL((`I#Z zoIpev!RTJ{Id-Fu@g!uuAieVM{=T~eKijfZ+UTO!3ci5D&h|C|2@|SK-ZiNwxbVf1!?BhLB)h_&FdRa z6E^hC+mPa~+J2)5p}S{a0jPKWs5u4<2{z+&F1~<fNC@j`1wV3Qtd^#aqjEY-LmWOB=x|qAUbH+Y2_y z0oRLFjDg)SrhkW8b}T|$>wjwEz9*F`(AUfTYr1SFVw0H;@?1-yk<8{%ib7C6Z;y3f zaoZY6CS*=C;9S~Ua z{EX21m=L;o8@JT&75MnefHN)j7Y~s_1lrkAS~Y;>LlKYf%2FAeriG0gos&vmP1M`% zc=78*(UeXMBu)BD!y^#WfU`GpFOHX})=mo&IkWU+4TvO#=^Li0tDn>N$PeCY(2EJl zf9L!Zh|g%_MzUy`@-_*6E!!dT(8fG>pNf5aIrcGS{LFWx^%_l$ar`-XCR}(?XOcin zrpyY<{u8hJF=45zQSkDzv}g;aYM<3qbb?LoqlV@*D0ChdGlWn@i&yivH+m#OlA3ul zoljM2@t5082t8|$3sNaAY6*N{RtC4MlVkg86dJ4rK2y^#<#erLKbf>wDY{~TitFG` zp&}T%Dwa^035V}%9Hg_yKfsDNzavK=cy>&hPye7QG|OM9Ss3=bW3S8YdXSS3u+7c} zpttj0_LUvKkl`Xd@Ieg_E!`3(cZ#Ru=UKFuHIQvxrpnM~k33I{uYaZVC8w;A(&XqP zieyFblyJ5AHMh03zXEq*y*r-fal7NR`f_;w`6KJH>I!?dQQUFy_a3N;gKnoc8k>&b zeM9hQ)|I4ThzWLzKmv#Mt^J{;h5BDc|2*n;qRH2G8K}oj4;cDOO3jn$0OUn$EKh#O zXq;+}I#WN}_59JlrSPbub__GXs?PZ8LRyfz)`1T_jIOGo+u$SX+QP|7@l0I79oqg+ zMf%2GC-;GQru`wLlWWpZS@u>!SF>|t$f`{2MK>=KWW|qRIdX-p9gwWSLP~y-4h#K7 z@+>Q5C-381X{_#8)!MN1LA{y$e&g5 zHW$pvs%5PvwsOW?f6+ULM!GKf6h?7j!n9BW2ScstTY>tQCU)>K<^2{o(@C|m^ydf( z(C#~y|7h-3W0(^f8phMALEqXd>;~@F^l){}bzEM9FQFG$xP<>C%eD2Bk7mpE%+=eC zgVWj_B1G!lPL99xjqED9>w;qHMp%bFJj{4570Z4X0_p!Fb;Au*yNO9{eF}LFfn*h~ zl+8W8tjmUnyKid*78;PJ3x0EwydJyVuKI|ZZ$Q=%+OKM&wt>s0~y6X2iZ z{bSFG2aS3hk!ye(AHmNPl^k3rb|bq;``|i&%ZrY6%f=qbg+m^g8 zTAy}`lMduu0!sDfSuX-hv#;4fXmq+}KNBv$Cu9YHjhq7RIUc{2xhGv-=Uoq>d>>Yr!7Y_aW%kw5Xh_CH*q z*Uw`i?Ob6|mO1IKr8Jctk-(rJ;I@`sMS;$t1+n^6$hkTt6YwN(u6Zcat#M+^RnWQp z;gx`f6mkzsX`g?V@W8*|vjKdCCE@PAk4Bc~;0Ndr%{9v^{<*w#!(r8FAoUX@1p*7h z&p?T`YR?{FNo*D@cVf+<8(3As6M=zMf`oI)hdv}su|8=ix|3D8+#1oNo<%PY6f<*k z4-sU}B&?a2Brrn;tJ+W#J?{#pbjmc9K_B$mn3GgK#@f3-WIeVwG_a)Q_X`dPItv}` z{%sHDiZCc*w#3;Fotz193M^;J7}!a;q+~rvCipyp){mg_mI!>JSJ!VpM?zQ2eT}pt3ilUtnf{|R^_Duu z3=Q~iR8dCRf1`@DYs|VHdB+huQYA)ih(wchIO}}l&JXFerQj_Q?C9pUFMRf@Nky36 z;kA-*^fzv`*vIsaROE#`=07oGb+dl6rvBEWcdaLVeJL1IQLum2+3(t+VF{b<>;-aw z@8f1-&%>v4E!(rB>9PfFM=59YQ82 zqb0uR3;}-53hf(zw`)@XS16u8U3NrKD2o$mZ>rz&2tVcBtFTg>2sr_0){!!v9Ljs= zA>RmeB#~5y^#?t0H`|oAjza3N-8&#)skEc^;Qc;D^z+=_2!&9)4+l)&h;B93grC2C zSk2uQ%;<7o%;dB@U*sV_J-PDuMOXedd>CYcy@#)KRezLmS!}s9VlP?tv zud*=zY`|wo^}icyEJSb*r^?tds6K!4UN+>LJReLCa;FqA)zAMD*$-LUt2 z5{*{ryjzO{bk&SY*kwbWj|;k(o9nO|(B;xir7}i8WOrx_24d&@XvKT|C51Riv-XDu zlPFs;u1PJ*O)l9*J#E&!0}SGPTh^cXNA@b3H06@@;$oEvLRD>x9on%g+g8pLnIRi& zh@FBf@Yyz_yIA|6@$NJGxkhfi0!zA(>Kx>Wbg3Ep9J38L&3L8SbE?xF#0}8@R1y7vCa7ZzAPHg&r{c4Y zvFl^<>E_L83(}Xq$2UcMmElI>;a?HXgWzi6xu{J>)C@AQYXT^~Xj;%+{ASkJB|uu& zm5HAXXeq{9*%P(L=Mp${YGA^#ZQTcP8(%u*$BeDdXoEsFZlLnP_b=PRh>#KR{L=xh zY|Bi&lcpV0Z}QJgpTmk@w_Vj&5ktN*kTG^NuWs=}qfX^pe(V~w`0ccIy;!!`NL-qi zT0ils^1qSB@x7Q(CS^gc6P(Un+dN%bui9yv>v+h5lmZ1zLP)A^rskgN!*EXnP9Tsa z(%}1@aYXo&T+y4?&if`&#Tq#=#ayOAnN&O3KIqd~F_y?;`ZlrOb)%9ZQ#k|zJLCg2fK7Bc)cO(fbdo`kuTGG&K^Hg`wkq7swt%tuF zio^T1rpN%jij(&db)ynPSJL8Ckd=EJ>6x=sg8Qvgm$W4yTqPCI>5!2V%fQ|EFNa2& zAlH92A;M=Pd`iC!9D5Y@kLw}ypBOn>1h0OQ!U!`pP(!Cg%dXHD&XtalnV(TDcQ<1K?k=76xv0r9s;?X;|j*VO`UAl-CUTydx)cf@ceWBtBXOVZ?r(Ko{?sOw#^MQf!|{`uz_e}GavO2%N+8qk|Fo@I`OqtfQZB*o>b^*5VsqO+3~ zHGO#3W`s5p&Mi1Y#fCXI!!}Pd*I)jq{Nm~fPBQ`Gh}#MtcV=>LZ8@gBdG4V4BH~xT zV_he1W0FUt5gv#sD8AE9k2!07{cnRj(xz#CWxfumnp`OKu^h3<_ctW$HM6zmFD3rs z_-n2cK3V=E#ZdKk3BO9wUt)W@O^VD&{n&lSI*e#ClZ_NJ#kpNsM?_`CUG%9e@!d(2 zYnbQ3#cM0#p=c{>*-h1449OoO9$Mg{u6( zwIhnA$ulO*?D;lHfZoVbud%V~QQOqsC z*L)v3ZU5{1IVx6zRwY=}Jw+wliHBAVCn}4gN9ONhG&m!Nx{uv?No8(aG>?9e$Mk!V zF-@i5_{s=%WF12{%+4S8$UkoO2}k)W>t?(UZ9(U#O!)#^0si1UoeZAcUN))kkxKmr zPE(c&wA4X27D`5$e9giFr^zGaou$N27b3?w_LJkVPv)p!aXv0X@I49|maZ`p`&#FZE%gAulS2FDo=`!;Ok@r2arccHZ~FGb0W zubkhXPrrh1iNlg)*oZ$4N6Rh>?guK>n^+RD%@!#*Ye89CU%KNeA961yzhLee7Dm^c z`ss&dKI6T9qm8}(NWb}JjAVoq?>g(u#t-yn8y)2*zWrVQ+j_g8i;jc0)I<~dR(-1M z1Ci}YCG@G>Ti5#M@LTg5&e7`*5r&A5z5_c(F~&P9!S0f;KdDnk~IG{Mx$`|eaV;dt0?d7ePVcavu|(s3@!-M z^@CXTs6AViTlfV}K)6;z^oDb*ddZDSqHWllk5t%Z9{3mmEQduh5Qvp^m>a-Gk!{{AB86 zO+BsZE%!yX`SY3$>)$fE_y6p&!CY$3->vIk>Frxq8v~DfA?Lv^%z@W62|bF7 zapCRVz;wv1HdMYtU;5}0*Z*>~5n<0JE04IWVgf90@n^zQK8)4HZ0^sefOz0^s2M3t zZ4QCB1rxU5>mmdKR{1M|t`lKhv!pA7ra!wze(1{jVEI&`8^bKwc9{Vubz?C#HJ2yz z2T{l;JXbri*S)+svPgM<5z>g842P=DoM$K1G8C16&-P(WyHloP5N1>Oc^y`h4KBEh z%9QtLkHMY*caa>FNg7U^95l|IT7forL zd(eRDRR?elFc?ATHnxpDmwedlB;v1Tpkfg}>xY@{uC2AeUBWeBr71uQ2+{FE&4IRe zom#tu`WgTp3tNFkqJhu{o_9+tfgsqbzuBQrW6S|~Yqq%>(!=N6fT*5=`!Jg(WG#&F zx^+d~nYl6{GP!a&ko^F3(n^0C<5Y&iBW-N+S`Y*N9;WdXHTDb>8o-B~?g_fQlxsHY z;<6KGS*G0N1)>vRUM`^4iY6)Zt^*sH^YpG(!7;HV5FEx*J=d*|IXCeCJV$f|et^xl z;VZ9U;Rh#u8SRI(sj#RYT#LldO*eM}Y5LsxehM%+R=3FnyOTMy8Ooy)ASjD2|7w?R z0XONx=()`M=cdFxa~e!%ZSNv9%##jdYMx(vxO#+W+$=w+S}-?@&%NE`a)MCjpqidb z<>o=3`dhc$$(;0ZF61^AN0?rg=`V6F08nVsL%W<@_>4v4zux&QA!pf9(uI1&!&@U% z(2nG~HfN?wpw05PB}NVZ-!3(tA2y|TFAQbRM)*Y@mA&_>Xiz8KqIA;QNvyT zRVM=swEn}cRjZVIuKPWz-$1Q41o`GeJD;|;m7;G}U0kwR%nXOxY>RFg+jFk+Hm%u-#?rF21r>8}-rn!D{=_wUwd>nSE{Al5>v#+`F{foCD7=2gMub5t9-JIO-`G^mc zUA*}A4>FdQ`sl+Wp-?T9A(@aZL4u`YA7Y^HG%Za6llElhBVJklu&s^4BT-+i=&ZeV zoj>Ik6IMc3w9oiHf?~&5<@0LHG#St(RueaQV-Ji1a-@E1t6K6grJQRFO;M2$`dGcE znAMy)i1k$ZbBIjL?+wm2-5jmsu4B(z%;2<3qP6<2V)^@t)8cmZ!26o+!&rkkMJJ_^ zH=!bIr3%3%785r@O7H8*XllDMTey=xvZ-YjLd!cnrD;MEEF`iC9mYd6-b4x8eNRW~ zKBH-UoE0Bc_WJMR?;p+gkcSa@_S)k*iiJ7;4x)DA4y+Z6^l!3N>?+cxY1-Kxq;#Ih z4sg7=6~vxfogUBb*H8C+Y=+H?-`cV2(SDM!4kIfH?rR-zIE?k0^$Rf z^Pn_ye_XjrvxpW|mXwu$PTzh@7WhnJtQkR_G1Y$VWZD>E3+qhJ0kHOYq0}j;$Jr(zl@Lh~h z^t4XxXIlcKm^&7<8&WohY~D5WGJI#9T6C;0=^^-2NEOI$DYu=N_5va(eSs}BTv0vu zTd6?NmaTt{^A10e85wh}$Hene8;Hj(B&%x}2LIa)e~=n+B@Zb<%N~wN&>A6D5G{?Z zU$`AfpGYg7cD7>69aAq4PX2z3Opby0zDI7|-1%X{lD6ZNSFSP&|Gp|PPOTj9rsd>Y zdwu*pf6o58;=Rx)QWo59haB+T*?4>W`4IP3&lvDjL;QCf5oYcwORp>*CP@G4^z!49 za~Bu4>6>MY5Azp^c0`;@r#3S!i7}SR*${P2W9-Uz_fM7@5-#%^^b=*jdGn}6(%we;XZ7>mh*fA~uRN${>gD{2`4C?t zqiKjmOaNLEh=qgoxZ8(5pTrVvjrD{NH!l@<45!=)pYA0&oO%u_r8R1pPwg37oV%DR z+;f>!V_x(kN}Dllt#5X*J=ce{L|4toC!(ve?%1wtn~-BT0~J_p@)*>U1dvClEaUj4 z-e00AT|+m#svvyL_8LD>b8~3Qs%gJ^gimLA68ga%=Q3(LGx@TL+Gy_g5GY3+9(w{C zNNHz5GciM$P`A;rn|YzFy)h7t^B+f16dZ65cNKRIG>I>!^6m}as_Nmb zclM4k{!#%HWb{82gqk51&DfXoK+^?CydJ6FEWc&1Y7*dH>Min!Av4lt(AE@1BUarS z{?6?@J`;2vB`8b$XUBA`@1UC*C#K#p;Qkx{Vf0UBl>FU1dOM$KNzg2Ksr|d^y6tWS z)I%vYu-bq1%y-ba-KCGj#r1P67eutX9J=b$|J)n?G-hBMBepBF^UWbfL+^L<@!v!s zlj?*$VbnS4fH->M4{k|zEvsPp>y!vZr=6hqipOe7o)Q@OLIbSiffY6oz=wF2kt6eA z^-e(0|Ktd@bgfkqOZpaKXZJ=24yGi6_Y1%D13Y)u%nIY;&N)l|>=wGQC0%%Su6$Kb z`Q4Bq#qXD!W~F^*+_&lXJjp{Xn92jPj`u|kzV^BLBVLMO7*U5HyGJSS(jm6RewM_k zw}Imo9R1CRv2m3`T;v^_5cl|3XiFfBZx8!?|7{qv}r0aznWjYHPxx$;0sK99Wl<8}8_9Ct4`v*cV$d zKr8osZs$6pR`Yk0=tD)Yxw)rW5BePiLEbH}0_)#do~@v+NWr{DP616{&31()e)(gI>j$OX(lHem%kX`IfZa+>4oY=Q{8*UDEecvw^71i<3X$0(jXN z^p<$fDZ*#e{(mv1&61SS_~%$3YyU-C^4_7~Q`LTkExDp#$nU;C%Eha2smgpSo^+$Z zOZ}~vNm9?KO`D8f#eQhlqgp$6n0NhB(*h9rF`;O)i^tKiYN(JZ>|3PV?Y_gJH267Z z5!nc4N{;Lg@*BW)7|BNx^Op5~cm}r3jw-`Ckk;Bijwhu>pl*Fye*Y#eN*ef#ulYmz znRV$xiQ(m1`&KFq&-c<_CfJw?7n4tXVmmankO^E6>Xklj%v)-LUfnC{#*b_ye6GrRSjhrjVGfO^`zU&)h*f zFG+sRwwF11^A^`K;Q^=mt0SC}=EpXYVZuk6RX*j$X@$wz|5;gD)ues79U!hCw37E6 zfBDvV%eR_}l3wvmJypzTj{NJmgyE-Y+y;;M2p;A2iMnt0D!SHB4%LlFng|O7D|8X3 zH({-Lpa%ZL&NQwM*Y1H07+2o^QhF8mcAyFHEFQf<8$Jd>6Lo3S@`d&DfteMTl&0iRu z^s`eE)Ql|fyqh$4uKnt=3+^4rcvUfv2eCq5!Gu7%qq_Q^g$^$}X`c%80!z+k-uWtAnCet4}PDpAYpBSM6t9+{o_1z`J%Z2uo^p zWQb@-6T8s2p;83QNGj#m(9@!0vkqYBgL@$6QYu_qL&ghAd!4r)zASb4#b1=scXN%n zI14pmCbO)t$Xu%r$2yLzqJDIZ39?|WnhL^wEVQjn=U2~|ue-I0G5vL&rxGpZkyV0! zpZYT|6GBQcy>o5kV)n-gh`ajM+ZYC2U^sD?@ADjuk;AsWsUV6cl|>%yJ*JtNRXP_R8VMKZWNt_$)k`@w83+ox(u>4)%;hV zpi~TM|7l5kWXV|?h)&zD3%Ot^rp;fn1Ihf&P!TKAOSEE9-0&$OmmV~iGRysm=;&DA zU+|Ssj+59skjZg}bi&e|R!A(uK z@6@wwHSU}&h~p!1iLD`!QReb}=^BhejVH6DU>*PFT=hMh{PcP5#`KL3t(g|F#d0$l zQ_{LAz4<6Td^~Q9)r?;g42?97=0R3 zggIN;{HOHEO@4iq(Z?uH4VGfFO;RHN_+j`_VopH1}$+jgKJyl|tKxy{Vy zFz@})PqOlLFPu(~mL(og~h`e~r zk~;X^ktj{!<(jA4&v;J>w9p57yT9fVGg6n&86%Ml;UVS_5QJtv0TaA}7OzWcPO)&LWMIled2fqsG-%QvO zz4GGl%yA32&oSf+9WAOH*>CFkrNcb^Bwaf41`*LYN+>BbfoqlC4wwXd4|)QpSSO$r<%{+I)tj_ zSY^F?#%4=BmiOy}L}b>dsXl~Nkb;QDONyTj;w%+}_Pr&W^3&)8kKSSlt&w-p?syLC zW_qg zqBLZ_ZMt(!VvfdI9#D6|H}?a|>uSdZC*L`38b#D=jqG@H%g^sDX6~yN6jnLyJwK%d z>0<5sDn*M%$e8a-)U=3~CbShk$nA=IB#j#+ahELqO*?=ndjCu+jeO%z)_6%Ofn>Nl zNoYdbh&dGs3+Wo|EkeeT(&alqwtMRUfb~#1#IU47i#Pij4itp0T02Sj<^GU?n;z@`T+oYq$<~bWbTHc4AVJ#q=>++m4E9@V)5WxI27Y1SZl|NFc2PMIPD!Ed23Y>#UC(PrJD=tJOrJ)@^_*e{tmc_0!z@>EZ}pR}agR)+Agq&aI2WDhp8)1#uLQmb?Y5gWsITkF>7k9R|GdWu?bF7^ zi`gy&S73lRofuAA#2lL&Agu4On%#L`+EyIHwL=P&*iM5H08oiXH~->_eEy9*l%2rR zrXX=L+|?>qPR=5S?HJxOYKg8x? zgm!ztzU$iE@|?3K)@kSp%6eN5yN6sqkd{XfFLVEsr4w7Dg7iSv%&?5#zVOYR#qv62 z@{kgPlk{Qh^=jAoY1u361dN-Sm6oRUkaMRBgx6bA(0FKdRc}+5h`y;%76gsorI>1B zTcxzYOy$4+sAp!rC9|r3BS}c#A~$wv^|k?23v=swSRK*V09`o8!p0Uzi)BU_t|={M z1($a2Ce_$v%(Un4qb}Vc25GQSID?)?*YUN5%eV7DcYgkV0E(*YMF@09=2$Sx^DOGh zuQ(T;<-Rj=BH04Zh6p*v&nXAc(P9$*m3Kp*{U1|r8P(SJgpE?%-Mu))-HJQKp*Y3e zT?@3hLveR^DDE1dXrVYoLUAY|1%f3vzyJH*b?=As;jFCeGiUZ>@9df9dFHOka~XQ3 zwKx8iI@#}$>XIGQ@gYp=g;Fc_{iRhZbr@a9d$_bf>IhDEcn8j2@8Y(ga~zwlCQA7o96E zh2$KRC-O`Z@m~Oy7K?8FO;*H6xQE3aR1#^nz-{y|?{WT~tc8O$WKa6e{SUo5!dgF&Uo7_S=xQYD)Gez=4ts>Kf3DL!OdrySbaDZ^)dy%);B05J$ zThtEtvqon1xpgtsK?3^;hbtT2oxBi+1O>m}{)^ZV8&~xi@*SzZ(cxxo znP5zZG7u=MLuGUK-w6x_Bv}Pv(3I%86~iu> z-87ncFT;a~Cf|)CsgQVV;Z9OgGR$dE&~V1u-ueZTEIay-#`6VHN!z%o@^VW~HS=Ci zsVkRT|-4EO3}@3S)Op#Gk{B7Yzy9f^umYP8YzgDug?&HOJ0U z_(+Vk=p12NF;0Ef-rg>P>g}*f4jHtQySUVY`$g=@k5m<_Yb7{jhc%Ow&#$98@o!l+ zpR_0+jbO?_#=?|rr#<0g9VUEZLriAY=;z-}(BYCxphe~yZ}xnm z6udix>Xmi_Cw@J`cQnWeTIpSQrCM}k#>Ayux-oXXXHHxuTy~%H9O-L8XZhyV53fdS zqzs!3+1ryc#;ytDxoLR}WOZLl^N&Di5vkQk*o-!JbYs^Y`b11-DauL(YT0pr-Nz`r zJS%+UX<;nbNmCIOtm)D2>J<#E=%jahZQ{6J01+E|p84gC(tZ4!b5B03i2t0Tck2(I zon_D=?NC$N>kS7B*^Ty9q2<)*v=n)qQEwpY+8F1~~ z;PZvjHscAo8X8FL$zycxqcPdpxXnhXN%^$xKT}S1MkkR0DbS|j#c*%^UXu;+Q^cv? zD-;NPbH{a3iA1l$~}A6xCC!F zliq9%w?-i(0}L(1+$Z8#Q*bFQp0&6>$Rne$y6H%akY=o|p_$xXFb2~qpTUojLZRqn zk*OoJcmpFYDKy9Hm^)d6&M{I5VRfHq#?2~77u$wyiLvP?AQ8-=*-GB{*k6W(AZ{81HcpM%7;rRtx3zr@^ z5Ga8Pa!2^!B}m3fhb|{xk_dr2HmjMdl=TtDzrejtZZYk9i{ zb@v0m##rDC|NHi#4db8e2GN1&K=GLfwd0ZZ)y++;Aw)L`tniJa0O_IcWH9DA@3}hX|J4FGV2^8|fh&-Yec*1F0xR_Y4yLjkPL9Qc z19xP{p8`Lp!8YhF&oWa#zW@Uw9$Pf@@PDUXhlc|JiW3A}`EW^Ndmt-$aLOp@h?_ z&qV?@BD;gp-RULKs7%9&sTxb-^gSbSL}ouFC9m`J9yxRkPcHI2=k2Nl&o@{kYxH=O zh_LmjJ_ZMt14r>fmprGf{zg^>?AyTe=%0459RPCzjHg(jz^tG3&N){7ng@ z@=6v20Z6*{Ai|#zLG{jXBVQiI2Z5`JACGt3B-GVNKT`xUZ){CJAYRIXejk8KBxdg| zf|%V;U!?9kjW}*$r^-_Nq4>|^X1>eOq5VXuCIwXY$s*u;==h*pDt0roo_0$)QIv^9 ztGQMPEt;H<+wC!9;`qum(29$EYr_9t#nkw_FkxDvV?)-mC~Sfqy&Bu3@utKFN7qsMVi$cR`wqxSNr4Ehlj_UyG3C`6a1 zj37i=tml=+qG}YH;-L0qDHb&wNvB}fDYrAVAze9=%Cj)jyG6H%jzVm@*$l^hvI-+Sh2X6y4t4O7MH>&$zcFUx-^O|BQ2^fVlxg;~!%Q zhRPhK8U(y5AL6H^072sI?`x~77s5vuCTa}Ce1RzN9x`sOQJ4+(>W1jQ7sTc4qfkJ* zX$~dAFqR8%eBhf7gKuJZOGXbT$8_|e$6W8Y_P%OwkXH%gegP@8CyD{=PgEz>->i6Z z5PU04=5b3YLWAcVzua0D^RX0j=p~Y*D|v5q10iQWEw2MMho!MoriFS{U%aB`1c_YY zO7|<3L{yH4M%GZcaF4Sg!n9%?K{W`&JIZr8Wz)=rQn71}!GR8o`WqzdVR~BUAe&zBu~5C)R9v!rxFYQ0#j;d z$$HmlC6pJDlM?x_-P##-V)RBKX(iYr2?%dtTukVlM+55MFY0WwKTLK4rl|=tTVzs_67@9%1x&i_6D0c%mW_ zyC7=PIw5S>ZxC^MznjOi0!u`J@Kd@FLN{rY%pt@>gq_gXl|lJp@NG zvIY+!bOyc7D{e=EkFov44;t??B#LF@al`53wxnv;DZ`c3Sa#RG!N9zfZ3ED1javi_!zH6Q^)}8o{z3F1vCK6}k(8bV z6rYzw^76=k=o=~R{=;zYIHzCj6_A9;xqbNj)ivI}|U8r8$AJbKz-K4E#*PZEy>!@w*{grUJHHd88U1-pE1h z11A?_ARjzF;cJLYUxe&E4-LLMo8@Sxyp9@ngjr}kNnVn)4w;XfpaNhq!}!t{mp(U| zFq~Mo?rs0T_wWZAsR?kI zSwA=sxU%Q`ydj3Gdjqi`WN#5V3l|WDwn~Cpul%_y@9g_!sBb?HAP`$RNri*u}GUpQm{Pa;1ZDR zR$WJTjmwzB7 z&^}a%7&8@bLHZR+DWbauFxeq!VcI@)H{DlS7aMnI!$VnT2v5418Y3#cLB166k}wUi z80V;2cm<_iu}oBS?~Mw}NMpD_MK*j6X-(bZ4fQ3r*2F!cUeQBPU!I4yc9xyOR@GJYg~IUCcD!sO5 zv&or2R7&H=-pv%AFSi}bD2;zsRJ_}8UE7%j^(B~pf#L1UYnkpENlogMTooy8qZkJg z-b+X2pihIx|q4&rroXDp$FE3T$$E7~GlhgG|bST7$ zU9ek1m-4Xwkx+bMmgB*c;l4~OtOMb8h3!WigY!68lt z2j8p)U^u77#8Peafsw0Ih*vvLEsUD|zj1oDVD)6$Y3fN*hq*SP;#J5!y^v~doj+@T z>}NG2(pYtVj!Jx9DIM=kUZb#}a}c>Npy28o8#q%HuAW=cto3k0Z^K!hD=$ZR{#_Ez zLi>{>e&(Se>2E86i(CeK7@`UF1=*1pRH=X1>+1L{3f>P^ynX#Yz$qG!s|o5U8Fwb= zSUj=rM=+n2zLj&~CcMoY<~OKJ=7xTMUC1iP!^TRWJYp#a*fqr}=l_LUMholn4mKdx}wHv3%j>*~6lwAMm4irp6+|oyfkSMN0^- zhf0|IFZ!IEq$7#DC;YOd>_dy!Bcd zW_~TjkM21*p>F&qHrl2^%j&#Wk-uwRg6*Znk1=Rmtz5@F-+-~*p0%++7}e5XM2RFU`ZY9k zV}DqjYLZJrv4;GHMX0{*_s7+zBKa?&!sJYrf>Ijp$M$cU_=t8@mfA*hJxl_E`gAWY zyFt%3(3hI!wrkCLXPepPabZUCx=$NV7Q3CIm5}h1;boF-$A`Gxn9=RCFz^g7@EdtA7+mADZa`5sFKDAl7EB zA^C33J9bF+*R5cq+8=lfKh`4y&-0qGUTsnPCdgVB4S)J-BMZqKttGlzOX$3mmM7Wp zJ-)HGhR9j?;!_G9b*8>LX0kE-g14ixN!p}6L$a)fd9BT~COZa7HI3jDx=Fc$f_Z;X zzJK5}A|rEI|K#vu=$-=9NNH&9L*$?q5o$$y!&BJ+*qM94gbbmQT|XMuUdVKhK3!}E z-dIanv`yd?&f{vCn=B3w24LLq3m1&DZfgo5lqoMhTmN>B8diQR=Bsv)n^EcYS`D_D-Y2xEX z;0tB{=okpd5xJJas&$&y5_cPh5b5=aCsZeZ`2~!Mpyhkv&3M$whkI}MX-RHD%m%g| zv-8{`?9NmeSAz$|)p`%#^d5dcP<{Fr%QJyfb``^si#C=29NU-(a-dy%TYy6K2}rT> za@eUmu-W?N9`_&yt^hC~7EW#YN##VE4SOOk_ z=Lc<;ZR|3kPyo@;L%5_iH4$XYh<_otFq{p8HO3{HDg`M9)}mGcHDr)zGYSkU^4qMzc~AS|Eol)Q z{!;iS-yF?nik1ch9gooprPk8kcgm`f@(T#LTcK&~eaPBa`TE!xSpYwEUp|@$=8nYx zt}VggLWg`K!A7uEJ!Ry?CRvFv&%%)iI?EmMkk6K{CMip;qmXU-f-1w^N3BSeVTcyM zk$zcWE`9;K(!b}|dHfSn>UFslen&A(ZBa=le+pH?TUA-VgBW%$Z$J8lJ9s=tii+;z zGhZVdYNNGBf*MmXZQa3}059!&d3@_wl%f)0EB?yLcF%F#8r||DXI-2bN;csZoIa_~ zub*E0`eF>JV56?KNUna)$7b@ZxRynDo2rr9+NG@X3#Cu?Y^@~GY$4R<(yTCkX+=d@ z8G^QlqsZZ@G7I+Z^KrtTW=-UGmGCS|&BIpPR}#*q?rdgXz^kapd4kqJ zQ!CgX?nCO{_H7zk*4FI$Q2old(?XGGtG{!=-ASCPUWl1V=u6kMlhnX3!C0CKD4%9A zFaVe5y3Hs1$;j9Of8I@c87sdmm;g^?)ppA`WiEVL1h^IaB%O-QqJx`ZLSc{_CflaW zpI8eOrTt-Jclx$sK6BY7;X)RQx-Y`NGru2A!@hp$2z`F6A9N?+|43M&SAOd&c>ZyISQ)Ny9izsxRKq(C#6w`+ zYa%ak$)0?|n}a+6%wR4|0>ZM%#!m7^hD{+wvw2~!eh3?1=$h;vM910|#lmc8IOS|L z>WeHjBGxR5W=JMe+vY5fMe36%On|1L1s!u1!o?KN7IRe!7e0oAx!&$`)wugB&zDwj z;vgg@JbjA$s-a}Q2KaBg(;ast5nW&3ICwa*MF@5ROMXwGX)*MLnwj`ARFIhZT(4X7 zWEf5?F2eYmcPaT?!S9!KCi$=Bu+vP(FkeeJ{Vu{UUJuzcR`3R5yR<8)GiwfApyOi& zr#^po?Qx#A%4nA_5VA~~f*!C{*6uv;4`y=IAOCDiiX-m0bL%SB%(*=Oh)R6M3kkZuJ+3HyYE%lF#qBW}*-!{lBv zJoq+5|BztrMTkyF;#?bZAYHSU#!;(M?rmf!zvtli4dPpXx4YXt$my!8TR+sPiDohY z%bD_zS>Eey?7L$dOgyi7QfN8M3CY?W%5}@uB2_ZoR4Mw!6d#SV7i!gW`kZqoYQsd} zgYTmArL5S0(alI8{4+SsJd5U-vWb&{p*W6cRaWye{pTVq(qEKOP*!r7FMzGLyMIKzD}IpajdJspzs zK86FwC;wK46JIHD=~BimYQTQ7{Wgs7UB*vo=O&NB546lGJrn4jM2eJbz-T+h4?R*A zSDAXn;{fD`9HlsP_=qC%nuiOKp%r8wV(2i;D@*yyRpoq6B$d<+J973{djJ?J-VVe?sE-nxOF!CKBw5GRQ{4;h0lC^q?W z(E@~}D#=bts&gkKI%D+l=HpveQ=5OD69mc|M8sqVof`drp{obI;WzI?iirI>Ps_jl z$WG8I0{l$hnoK3A8%h?=shtz_)lL()NMYU3slQ^`ymNlc6hpcIPKB(O{GJU!B_~ zbjng$_c4mb@&3SRB=E#s=p>Q5_69_xWuBP-!>@N2xRHjH+|j+s!2HkJ#6SVk0B+WI zsHXrr(L{V5=a<%N;CgQP^3&)6DY2>Y($V|=S;5^LEhM5-1fQA0%UXdvC;q56)Z5%x zW8!ZJC|}fFDHl@1lmGG=b#U&&5_ln@p#e@2QqaJcyzb|t1lA7U7YZ)c+-l%oX|K?K zAPhm?Nb$|jFdNj>ht5PZcYgyET#aVEs+>~LQw7ckgn30sd{1*7V7L`q5QA45N;~0v zF&tfaB`WnfpI5Z|!)=%-IxxXS6zFR>1APIYm_Sw+Oto%)*AWo#ixL3j+-BF{8dFT`6aeYD<<C*&*R|aPh-OTEx;1B`ikz~ZA;%FAE40vhkWXF9ka>So7y=$ zNEWW!!f+lHx$b0uggrVF6fmG-$0op^wT3bAnJ?!bk<^jIn(|Z)31IeRot`#Q2*&mB zfVOL#mo)tBa^6)fi74a)Dxfi}T{OwaYihG9c%vdy_9bH{7zpTWa8yR)EX@$C8SSb? zhLqI>ruaPT7SQQufLKvqc|YFC2Hp4v`wSmfJ8{ybL}tAY_<>h>E&607OHGwMif}BF zRYa%w_J`&|akv<%Z;Ov&GQsaZDSWIzI)e0?1*ddZcsrXbKd6AXGE1-((yC)ftXXbD z8#U=9U8?kJn;~C{kkl)QNOi)rzGD0DtI1rqNQbqUhxZV&;SZF*A5546_7W(L>Xa#3 zJ(RWXDyy@ROD|8&$>ONQp_?kNPki^1-;!Q05wsyz{bufCrx3hNtP!Toi&I+FqpA+R zxWY`IDkO98kqHmtL*Q^TL$e&zpAR@Gi_z!;Ql~@3R6&i+Nffo=!%8D3Hy?LcdA&zaMP} z{3a+UK9b3;zn5+WhUvWreaP$I|Kk2vQEMk+kRFcxpJt5B@RUCudKhfE(z(e6Tspo5 zdo$la86(0x^>ZZ)Q-B;x>bSoq!UUGqE*qwZ>^R@m3BRj<&#lA~q47aL(A)uWN;CNJ z%ZAg{%Y^1Re7P9*jw z)u*n7SXg2Qdy~ELlR2#5OA=i|8aphwQ1|Tobdaxh$uI@oczZqHo&I8RdiA(8!adr? zk@laga+l0S>=IjKPxH~mUJ8y@O-D_?L3NMl}ej!^JOE&Lh`VU>8|mKk@jr(KPRi1$h_wl@*s5i?yg`& zwguTPl__#lY3aPUyZQQ&%xVck?dDRI4HYZEeHUAOYnf_WE@=|TGRoh!A%2>1WTH$y zL;A_?vN@;GX}-LTRTBCVhfccLdfvni|6SPnyb|r^fV6#W^63txioZ9Sm$_tT<_YB3 zzKgL$QKhYA7aU7Ty*UJ(5q)BPV^$fc58k^C569qQPK9>pI^O-PWK8{#|GD* zSiD{COJq2m`2&TPQuSKDVDA=%Bc@|0aV9wN3DD`!ze01VM8*+IeqenSdVC%ipVa3H zSK^?FH_A+YUThN)$Y;`VmEU;|u*y%f!)|nv6th8$?B(GOpsk%7xzv6v&h+YEjGD+@ z{+abisfZpzrzhs5Nk!897C&2@8^y&&gEJ4|3!C4}VsdH)Hb&=(ZRchMqP*CD-1CU^ z-Y==gL@7z2>8I-!?2RW(gi-LXErZ7gaxGSGYx%#E6g^&T5PQQ`MxsBKMi*Rz2(~Ql ztUtHXKO+Fjl39xy7wP4Laf|kru<97i92{3 zWhqk*HF2ie6K%K-kQX)#wQTZ>-8A$@b^HGpqYcl`^6nBPG{8C|H}53<&@kKJV`Oia z<;d?MUnJ0z#6Jqo`>lu5+e_7mLjD(=D*;eR^uaEL_h@U(c;7hlS#Ezky;{ZOa{Ljw zooVe7yg;-4A?)3{RE(?Qb5_3jUEVIabyHF;Ii67(fx2dnkIQB?4uh(G_U-n!nKvwB z*ebTo)z{z7fE_nhs7wccUP5A< zDxAXo@9^`^-dIZD{aqsuRPetZ5t%xMRqGpJor1{H|xNE>*)B-05S zhR(Odc$5S`dH!35Nl~Y=VsrQHj~%=lD0h+{h;dBhx)#1{eYJ_9@!zoO+1|ZQFeLIy zjdcp0`q*~2I5LOIO0$S@`4y7hc>n+}5|owYZB=FzO~tOJDV*Rni7J^|{w@`Dg6!|n zh!uLy#*e>V`wSmMhYaIW$^4b6IkuuM`NVDm3KJvwI=vv;Kwcu8-Bv@p>oetijD2OKXtbus3@O#{zABvKI<>``22)0J2`@6G8RSKIi%JuR5VPfi|&W!1_b zS^jzrV2z_ELT?N=Z%mo#aY7tRm9EcqOA;@9vUpRC5%(Q4Ut9sZjms;+=#(ruYzMmPKz#XiGMS712;w$U5S9hmP_WsLc8tT-6Vg|=e;e|6{X^CUutmOXd+suQi z4W@n=E$5;j>pVZ*o*%*9f;4W{d()n}0?K}i`@OwFRcbN1;XjkCh0)+q=yuzcx~M>> zp=TYm{~^u_tkt847nwE5YVx|w=UkrtVKU%*%9Bw%bET>`=Wf3r8!w8pRbhZM^|09o z67;?)hoRyxCO**#N{xO)W%6p;(Zh~x-w#9eQWmuiyb1Scz|nzr4H`arsl4dwT`v&k zU)l7#oL*4YRt>Vzef1I08`Waup>1-lNVbJo-%54TiNKd%ur9)#GMXQo@$G#!ZVt}y zgiqX`Qi!0vu_u%@n>6VCO{I%b=vYRQ6*|usdo4$?3c;veSHQ3=5?((c*V0ENYPI9t zC!1UB-mj2re4!8>o4hgG^Ym1dKOHbo4LxCI3y+TdDzUCu?-W*T$wmKMO2S7#Y7-C?!z~Y9E4RNsiyY@j)_5m*go%! z(ws((>Zkf8-Ol;Wg{x5gsrQJABw(2`YZ9qi-+rWRy~JmPaUtyji&JC|1*iYXnLG-_ z%wbbwdvOwwYe-7y0s(dq7x%hv@*jeAV4d$wn6!figzEeTs;nB(tjIS@Csn^1n0$-j z>txWeMt5mg6;ia$z;zM>P5*4WcBQ}g}!S|iQlra54d>D@d~2V#Hc;Hbso>DrF) zef2~u5U)qsQX{sS1IvTj!jY=8UQis_u@UwUhqRTXeZ-O8l>GrEHXt}kq} zDi`$|+sbwi<~a8)(dHKjqgzMj#SE2MkySo|MSdwQ`kj7pH$T?r2mC3 z$j5t5)gVSb3s8dLOPUB_$xyXZ>7Xyc|G<&#)IY-)IfMv_fp?WI34dCedOP&=_c3H% zvC7Zsn?W=Yrcw24Krdb*D(kvkF8X*|11SFCV=6?2K72U;%NH9APM#qHL7*qahS@J_ zwa`WOV;UJd1~F5M0?oFIhVyW97<|`dw__O!@Vo}^t+Rx*_OeLX{Ari^aST!5+9JF&mAp%Ssc^#WyJ(xy-&V<@6c<9{JQO8t;KbQA z3(b`BJFpfz*KN`qaEsIruJTDLPf$VNxVTj5?UJu6TMX|mjC={~kM=!E!=m;0%2eHR zo)B)quZTbXlL-qOiT=u|rbw6?QjG*B9aoLhYVGYll%iY2w8_Y=Y@EGw;sfZ*qOhy3 zYid?Ecbbf(U!WYyeB^Q>RktD_`}yY$pW~Gc3gy*TlNDKGx$AONfj+Dyrw}2@3d>)c zV_+F>f!?~L2SxUXEz}}i9GeR>c{#pB2xQhHH?3eue_oG(n~7s9ax+l%lHszZMlcPBSD#NIXTGm5)cVgX+F(7aoWhKJXOZLJ6^TXwpQA2#nwD4{NNGWq#YXzAn3lkZFw ziyjH(wtbDHqU`u@B?+{Utq>k}LOq4am*l?L&On+wD5%1p#Q7T|2~O+@yZn_tDYDWo zva;dQEr`b!*up{jIm6gpAd>!KoU-`LNouV(S5 z>M|QD1t6bKX509OKElj0ri|)7r^hX|yI;0Vu57_rzt^jk)FVFrks%v1csHScf*<7X ze;Qq=!DSAe^q8Dmnfn5QH%4OcLl<{OT0!GC?bZsMrEIHjD=ofXwpyAxdEdY0%vx|! zTHlk(lDy@-Wvhv@7-PpGMP&NtFP`NSknyE&eY5YaquJN(G)OnmI1_K&P~M#+tGY

A(C5lZs=*g*hoxzka$OiQ)(l^e4wKPC@02;JrLv9OP`l4R3g)p8aF1WcYXPYcYfab>*sIQ9N+H`aN|W(ys4L}3A+FvFgrk@(EwWsl|2Npt z1#7HVRsAx~(Y=Lg2u`Q??fM^AhGjEwc=!`YfeKzk#vkZCR4`%PIE+LNU1)lf-R%70 zU!LFQH1bJREF-Z$Fu~&nMfl^+DfELp-GuyVijc(VCGEF|EnxrJIj$(kq}tC8bjEIe zQHeOZ!+~0(Q^53jWZOA;m>H3WGW>?aJlr>3dUYlVQ-rGnz|+Rp{yClQ>{U}j@KSo6 zPGa&K)^dnbXGXK~?#9JlxJG1jl}+~C=%&{o3$R1L1V&MVqM}1cGjtceW@|uRd-)EG2?~OpsJtbW7`=K4ROB6d2$mNcJ~RmTUTULgxmhgHAohNeeT#WgoaFOv2wpMjc84#O%FP$f9h@LFla&MLo^|lmqTNP}L z=ON!cq}{DwQ^&fj5}*L{9600!rhtdYEi9O~cG`{!;ny|Cl9rE`KF&jzW`m z8h&xfIJ|2!xwkq1DgQuKmu5eFN*twtL3uS0R|Mm$O@dyIkw1w-MU|Rh^n-$bza^P6 z7-hrHP!UyT$*IE_YbXY(zSf%|_UUz|OS%|F3aUz>8j+9m#db%Ym(?H_%a?<>6zSAU zm+Bv7iVgT$ZIdPC5yd1iIUvZ7-Yt|a;uW48-1t5yD+|#Zw{p(7y*yv+D>F+v4yAk0 zAMx$@&(8apvnVGe@y4vRd>rPXiIupPL9c#sj)n$K)z;hsDp@a49@* z8qM^t)EXUX`Q2}cLNTVaiP}c{&|oKv3+vaA4Rq~Xxs>u2M<}3c-c1ae-#W^yw=GDz zH-+2RRm@?@x4Xmj_aXM6AYEdJ7Pw~?1UuthN66wNoQ7HG^fMkm0ljmwYg zKWS_nATb3 z%%D+=nK?=4aDM~=Sp9N~@T$dU7Z(ZGC3yeLy2BUa$>OG4}ZW1!*R<4<38Lg5FS z7|(k*7zsIlhb?}56!y0iUkR@xr4Ly`&a!^cR|1(+zXBL&EpF2DU~CK`7cz{wHz^U% zaR}f%puEZ9ewgqW=9&dr0d{1=oJ1^X0;Mz&?l|S%DXnx2M`33J(XbCf^%Nbvh&8;~ zOwex|x`5{P3|(sE5RrMw_R)4ET^Rn%Zm%RTsNnuIDp>QW5B9G&334g@<10^4(8{dN zXPRo3Le&z`l|(j>8!sNqu zoR2Eqy0}LMjA3B1Rrp-qubu!j$h$Z>PQ!EL`8#kM6Ty@dW|70vq`o6Shm^Gy1D)o_ zTn&EgWlnJ@93E@KjW(-qARq;P*g3VNw0);D5|P++>_m*5ulQ=Q45~4@T{KX9T@NK^ ziEvzV44rCho8gc;<7^<4Zrr!?ZR2=F)$uWU2lW6y#QvGyHXtwWr6wy1FHigdeU*Kg z2v1_lzYu`U`-K)Oy6;lvPOv!6Pn7s^J;GxrSIKAQZUy(bt^4MLFxs*Od?! zu`FFSD*=`G&F4-_wrnQJuJQ!D#7`U+`sh5naJwj0%@(#D^4vWmg$p_tyt9coo^t#M z(o2P?anIc@9^(^qK zb;;*6*6)hQU3cRt19LzY`mDRg~*7NP5rH%zG7Z7hR$RkNR&C0yx;AyonGbS zJ8!f)o2{_E0g6md7`@%swKo_@d!Ogi#yHi&igtF5(Sy1;@a%N$Z>*xLllw=Vn~Th? zFzeFT8AiiEQCwvvUyu$BRg^$PNEPm52^n&jzvDO17Eq~ZYl^;+5(aMjtSYCEZHyrs zGo3ASm04YrAo!b5)8}qYR7D_v5LFGr(oq;eVHN1A0Fw-2I5_I>S_}#aS!!?PW3t*? z;iFgd#mH_yQ|_ucHKyns_~s9;jzsQ+=f=dw~7zvf{3QL94e@y2I*GFG^b+6lL& zbLYQ0YkqxEP2XHC%c4@==*yjWcF_C-_U|%nUa8#RKFWRq;l%}_aI?`u-KKa1^crEB zM9=7jT}m{kU&)2MZ|q;9S$~XyKFq<`A~ws_x+~m8-*=+qrsOy2WG(`DK1FzG^1B$f z@uIP-YvqKCFAOY#-Hj#hd?ehgF zCSpfkMB}K;B8pk!+FPJ>J2poXPYN(~9U) zpZ_?-nH|Z{XgB^)^-%CEbCL}~w?U}QGXJM9YIKC7kON__vF9HjX@GuMlONm()d^qU zdyu)k{nIQd`?kvDt-VPsC#t3pcN?5ym%!@&%}q=D@}j_20GaHhTv1$ebWo!LUF6sF zBXPPBfip^x6AeFJWhV2UMG?AxQ(uP8Fn2Zp{j{^NxCBZ%K?C(^fzn!9MGdN`=~@n0Ygr zc5=~QNzBPwYA0vJb4>UQ|1(bG=k_@VH8obbzZWEe8`YoEMqCwRZLFDTC=99@2Jr*x zNwiXe04$S54Wvlb`E8Y;eE+}FM=O)23=(H+^8j+*U$&X#H!&(|QCn>*V-mKQnF?+K z6#p~0b3RZ`%$Coy-su|=JPl(N3&cq1IK)VJgMUxS^u-lO^Uym z-Wi~Y^*3!&#%c9fC;KbYZR>~fI<<*n-)RO*v6{+F^6md#M`K&9=O@4wKJxz_6elbs zFr$d0f?%+%AjLVBc$l$(0P6z&DsU$^ z z)a$jb_Mf)i*O$jglp>~LU?*_iwe0d&!6OD_E2bA_to8Oy>oLkTmAWz~u2ErdESwJp|H*1tw5I(U-#_?@`9P4F00!TDf1X5;#w)7H%A)^Lrn3!SI_ z_cB+mwwQkRCja;TqZARB?{GxF>kbQ-1WjI@lGulJeE5V(fVPnTe_wLL5-EErScDrX zkS!<$xA20kdQQ*FpNZ4>zqu($g&#HJwr5`=v?X*i6((i2l|<7lg$W36!U7@1WqV)<*9>0CI@2&U`qmkO&!Qgc41d znmamtt>&TMn!eZ7%X-(T#3T|~n!>CJ7CD6c##+s;Fj)*UlOWY){KmmM1E(NE<*U6S zlvE;_*FTuApRZD;n<4pzu{DE6Ni*a~6?v*+O>|upyiph^OtUWthWpB?h~o=Y7P4rp z#XuK1zK^=5W~BW8_aT3R|LxKE`YFA*O(@Tq4Gkwl$m92%VFWM;Q3_p(=*!IkTuxW$ zy`ABE*|t6C?#p-VJoXiA=M9!ss=SlhaT?H0S7^ zd^7nVI*ED3)sz#52fhcrwQzQl;Hnd1=tay-;`)$`mzRR%7$s=rJ-F)EqzNBONaiH= zkdn@okKI=1pKfq1bEm&-4CPacB0Bu9cU(ycl}*WYeCL1!+^tOix$HcPr*qQotsdN` z0_f*P220^qtTyMxMh+9Ip-#>p5-$iWnon8O5|UIjoxgu;v+Jb=?S5H5F!;Sb_wxUc z_f|o1cfq%4fDkmeYp~!JT!%n_;1+_rd$0r_+=9DnfDjylTY|fX!GgnJ!DVI`Ieh2T zzv{l+$6Ix(?)%iY7U~529mnkBRu%|BuI-Q{JLi9xCv``m~syX z*EOw|Uu=Blol9CFtqsMyP(BK6kV-q_=q&c9%DWJ~Ac7tE5sYKRkmMt#(xj#vVTTjE z>Es`!X%R(msJk&xj2$d(u2a6Z2c^Ct!GH7R?R2<{j31S$Y zxWO||8X~@+D*VS@{06_SWB6BzypXP?KRuRFF@MqjX`G@e_fVBLTZt!dhf!eyP<|h9 z>lO-Jd|;Fk_JG0rj~}&ylABugTwAwkHCK4&Hg6H&$_s#4FzUCU*B%t%fc2oU1$2iY zzbxJr+nJ%7**UtB+ilZw5Wi~WnXdd}37?x&-G~aJZNJ1lf9pB^pU(EW&4X2O_F2td zt65QsP``HyUly}%S%>qYQYhK&yDH7saSYOfIMp4_q+D{i0cCpZ>F)@Ob|OERw&IY# z!jze>PFE?zn*AitRlgRAn-X4hv^35-AK@U*L~h_FkNN(smd}Z8+R<;Jf|U3g(Ngqp zCG3*EZ&Yr#sWsmZ>*s5u>CTZBO6IZ#T;j8T+x?;O{Tb&`oMxdqiHu(~sjlar*B|~r zF+=XtZrrF%`q^Mi%dE7^3g&M0Zi8cMPWjhQ*1A3azt#NzOT761Uo(6~ zUyA>a7Qp{I%r(``ul9G}<+2k}HLKjpu4K6M^(E4hVOc+MCHy0How^D6=U?JNZu@Wl z+~wLU0B6j$AfA-uP4$Cj8H&J$)#3OVpXSqf;7tSQbQ{CaHi7G z1o>K-bQHy9lZ%{3)%%P+kb;WkGfZy=9#wf$q6=?^AxjNFq3DIml{@-v>*NO}RK zQscs>j9G?yzaguIY>+^%Z8+kZEli>S=bP^1U3+5$QokY{>R%5KC?4U^b!9UCw%W?u zehwUIc=k#C`sF)0ex~pMtn~)L5pl(;$I7~ zBOJ<0eY=BMV;8tlwS14G?+YMwIMqfT%c~t zsEyNx)dCF{*3!Ui*aiH?zc9Cc^dcIScdGhG7`f&9;Frnelj;UmZ&&;RhHq^pkEBJO z|9#jRhoiQk$zKIG!Q##un8qIjVBk$4Y#r}JO#}n1f48*-jD;7JI-KVC2hwlhpe8zI zipGoG1%2?FDE=1S^n1-1*{I0F#r%-kz;getb5o~4381%Rpgbl($#t)n+i@qr0g1mf z*9jlozkuhQ_aXmb#P66|-U$bob3XyZ<%Gpqh<6c9*BS>aZ80;39j}O;JGvDt4p-%Qd+u$odfX*t<-N|2 zpYL3vXS*(oP0irj5b~Zkg_=##w{6H+I-+xkgy5Su0#gc2)E{0-=PnD z`Z_Rh19}g=Sd;&_DTFxE^$j)*ku5WMg(W#?R{kVxWRev&f7E}E^E+;nu%53* z`eS>TO4Dp6{(*@+EZIMft+w=C)G=W%GNg+du8NE4e-1_6ITZg5flgZd?Sq~2doTuF zBGzX^;v4*ah%Osj<6P&pfNFuew|0Ub4G7qJ3i{>MzGs@_J#9Bd4ao5~J@=nkCGY=d7qQzrN;x|AYhF9k zaG(W(wL)Z&V6`%|W567F6<@X#TPq?T(lbQes>8RqWALHjZ-2#Y_eXE|@lr3uxPJN#SyEqm5A&9Tv z;f^f8^k?@M{%~>|7W)6a@B`G!>EsXKSA$Okh4mRmQ579%oJPuFQ|h?VD(bZ)vUdln z1@DF{I{e;CU8$rBVas~8CDA?EU)tx4c&#!8c-}Am6qqj-z&fj8$qC_EmCme7Yh;q^ z@s&A#KZzO<$}gGNYRgOpJ93K^IufXuWOt?(Q4_5!O3lel8!Ia$C7wwS-l>s>tlbEQ{NOW>mi<2vZazXI>`_eW2TT7{De?vwBRG!cTo8u3>(dkKvQ zz`lans&=O?Uxj%JCaT1*W095fsXs%(9P2aMWbcouV8K^V5BK7r>+wWj#4Z*9cJ+^6 zc4(`FBV5jUR_V^>?4BQ6F=>XJr&cLrX%RQ?2WPedj#c8M!(D>W$(tBOaL}>cQ&08moYyybfH7p3i{r0H)_SmS-* zZ2_WCk>Q0@=XpsO{_5QIKyUghgSsE4GEC&Ct~!v=*W-&J8Gx-+6;OtZ2$VngVSQr* z5wDrg2A|25fP)#!d}|Q1&Yb7Wem^QV-e=8lqf&t@YYMtrFS}~N1EwM1B!b1L{}$t{ zT09gB#-87#40Q@VTlB<>S?WZs?-cs!xwEQ~rgq(DMY_vcb>E&7MG$1f86Gm7#SZ!~ ztv!N@cRaL%V8{O^8~VBstA$%7mf~7%MYsv0jpWO*!M(NjzHNan3=^g|=T~cY?0%KM z?Jo5*fL{cwl&^)HCr+Kk@lI`ulh%4g`mu|Ca8m02Q#Rxep6HDFYjX3|^2drZ$|hj` zB9@X*LF59sD_5m6)-Wt?}X=}?U@cC!wRz7;}%%IwBITaY<4Zk}122xSk01Gq=4I|>v})k|;mG~~ z1aWn(0bDVl6(+^)EENt$Oh9CL^DKze|76gv=o%Un7qyZhyW0=DNlmM`g65J>2hZ*c zkZw{Hmo6SE5{06HpIBoo_@xs&0;xD}dzKh_k&W%jN*WT{_KeG5h=6ndCixd)X??w- zDW;ewe=Gmn%(HmjesW^`zUJIy2zkB&%r$Hf1KpR9Pl@qE zjCF04k(i#u{MlV6^8G!(y9J7&|LxTv<8HF;=vIk1%j?YVd|~CD=o&ko&b(E(PGIwTNW>X}Ho);2@!yoNX~ottFGe>;Qb} zkVS4hFwj>I6w<#o!wCq&`2OG<{3;o9IYhz88i#f~s_#Rey}VWH=u7NH&^4iLTu~tY zh_7gi-_S*?I&k7)eyt5wE9Ccf=aKxPeNu}l+Z)FO{o$5HjMTP1>5@ZBeeA16{b<8G z0<}1{p3p53g|m*M3#KOX*?tt&@LpEdldwI0v9IV}crmT+12@0bvTgLnpDlM_+>?X& zub!kU;n!+G!;8f`+(o-jUwTq)hRruKC1SO{1Tfb|(VvC?ii(Qb>dNguW@E&2^W98Yk*|Zu7 zY1MDPmLEz4?2?%n1~rpqT4J$(wr)Xpj2=^-yF9c1KhA8y>A2B?1nN|6(vtSvII5l>sD=1BPbN*#YqMnBQ)qP^ z)PbUBzEN33l_wT`s3f9Y^zH`6k+s_4rWNII1&4V-!I{-&EKxaV*NuT(E(O^AF?bOs z(U(r z9DN(%?YSX(C9SknVSmu zGnuJsU2zC3j^Y4cZFepcRaSqwp#K~*@1$gCn^^PHzH|3eqroux$20=S41r<%vkYb7$H_TA?LYoN^~2kN6{V| z8u+X^YGN{WZzKwGcO8fdnovJ5r6*Y>u4131R%sB7$#1z~h1fDZwl1*=YX`n2LQ3plu58ePF@wv=1IgS$*L-5+32b%m|=>$bv?d3@ngSnXM)ejIT0< zLO;B)-40o04~b8+^`4h0eShhhTMRj>kWV2)mCP3G7Fg$jXX$L?^z}#!z9opwD3EH| z-oRu?QU*?aKv!c=0hUa2ys!I^4cpD&?sdwe?^ioyv3d{+km{^FP;v;*XxM1X4(<^X~^7(Ek*Wk}^5%EHV`*j#H1D7ZX!R_mdI*g!r8 z)XnF`NBPqcy~UUOMgiDT;HHe}{!d3?&oA(yBn zA2cG2?&`ufyf+~o;$Cl-0Z^9sF&$!hEDZynANOqm>d5U=x87MG;DS6DeAuJPwznU{ zJnLH`efY3OYXA4wSK->-v8>F%DwsDy>D9+?f5)2Z`F!+D{yf5VbYrB?fg6Mi(}*qT zFGrs8E~5o-DyD&H_o_ejV+LQ$9fOdyI`2qFp)Dug zlTimCi+Or<=xPtCv`Sw4TW>4wE_jCM3@Jr^>Z9;ulPMkJ%C13ZKBKEHm9-J8G4R}V%<)&_TX+P$|9z zn8xEoAfBRkTFqv|j(GZm5d}vS8>xL4&RYcmqwO)Vg=%pAV!}M_;RkG)Ao2mtj_&}Z z&72rV-eyZV?OufN=3td(*gm#vePn0T@F?kqT0Kf;DeN>k^Km*sJRHB|h0V8T_oZ%=r=g{AG8?@oDfDPh8gC%;DAo0`pBHy*wbVW9&d#3$spTVl=w(_Fhqec0vh+t zD=boq^@(1DLfqwiP{)EPS(bjA-m9n0CgF zmHZN+gF!hFhx3BE8osf&MN*K=3WgtzA}%bTfYfuZzhSzwJ+@lT&4a{Y2exG>&XHm$m+$6aK+Z z7PzY;7{8HdsefCUY3#7LP=cMxXOSeZSdjy;;j!NAb!Jg~t<__xdMz(YbJy zMQvvHL&_hKFKm?!KKz?NrKUP5#tu)j4r6m)FtNEBkWYR`Cri{g-m@1;GizQd2tb>? zbI6c)y@2qk9l7QToth5-hwz74T%WE>o_`Cqq*cLtg!RBJ?gX}eyWKJR@tamndgpPQ z?Ul!4m{f+!P_4!`Nb-3@a3UwKAWb!Nc zTT;#x2Ae`%Q?^#H!@_%$G1eGY)|%j9%(5#7yQ5Ae5f`&%sB= zzv%{hOyihSo&m{$$$0!0`~|BlBdakUw|%JLTBFC@vD>U5KIWyPiyJHk`blNZCL27r z8@;l&o`VWLd>O6rlhf2SKR&7k6v-Ep$qC^2W)!(~%fH7z6!k3j%bY%Ogww%DVu^Rk zn1gv?f2|8-%BO_y8Tqc@`K?VdRApL2bd^!p_4gYfD}&#u`z2Yt0lm>nm|nyDqv?)1 zVr3On4#j9RXXIV>ZcYJT#%Pp-U8e3JGo&3Z8CplisCy@5Y!Tv6A1Y1@Di-vIG1ch= z{{h1$O$QisIq?aPt-|ST5pxdu+3WfsvNZcH!JuO8 zoVx9Tno441RVsXs=8ur7>-uT3wmiL`*SI^#k74hO`jcJZs?Y0mg=Ph@!TNjYk(OjS z>^E9ApsfW}3_1W*;B9?HCG|GBsi4>)4Y_`DUNJmQ{Goqf`ZF`&@Bslb|GnlfZ2tc8 z1}3Igkrps(W7^P9(3(9TVZo^FS2(#44U8>+Fupz->9h1!PH;M8?)!yB)(?T$1osXz zKIu$vA)k9JG-3h>vBCte|JL;@hx>(YAB>BYLM&&*GT;3kxU9OgAz|5zXKZf7*0fl2 zO!(+7nKFdw^3>>q{Wn$h$oeZRk@tulpn~%q^agtfHSfvmNC-Wi)1uh>7^$Fw-CxSwfxjMQjVM4_v{!I4n1WUEJZzamNaoUG9XR7pG53@| zD{W9lR)%B*qGX|czh|p>1i>*$wE(BVY!;u12sHFiIcgLnU+-tNy??ZAg63;av#V|- z8r#G^IDb2VT#OXQ=CHZWBI(vmtm;x2koC;LG!+uV#R`D_q)E72r!|$#cPF9p{irgr zU&c9Iiz%XY^N3UsJxhUf7RP@yGQtS{RrxOWIf%cqHU?T=q`LTswitkp`-#N4Iz#tuc)Wn#Z;H{tLXZYCN z-;Jn3-2%6ew^}GNAnPLrJoT(tA~@j(ADWkj9+?V6H34c0q++RL{yj^U@iCV!-<*To zgR?1w58tgL@<~IZynHs_S8mKXc5ehD5qkMWn zQmv0v?@Pez537rG_?0L-v>ceUvH~0&w3M!i#k#w_fp4Im$-=Z*nP2BMh~Tl{cOf-2 zEgEjPMcv4i)2{dyY`>sjN*sGTL*?%M- zR=~cETXn>U!^CkvmGi~8A?!Gib96GVB5QB2AhV1^CHMrJq^sf3O=yHU9T-NQ3lAgq zTS_kEtDjq7Z;H;zOg5zuP?rWQtI{@JqE;niC_!66=)>_{jPEIrJx;c;VQTs*`6gP1zjDkhrR`D~#$au}hjg}uGXmdS9IlR4` zJ;)F}0(Z}iea4XTFC}&8+Yvlu`w!p28=gf60aZf4;@B{>T0{BJiER7JA>HF=QGRN< zlIsIE1PDb$YMe6zajM02I+u%HK<=*{>G)Xq%&|ZlrMh&R8p4dskMj+fNeCxKf#pCC zew$s6;UfIU4#CZ3{f83&`NZ|Oa#C(KzP7fQjii6TlARQ^F#YT(>v2{*U-CPY#%5E9 zntkC1(6Or_k`F3nQN{2r{$y_d*Jauxa5!eWb>=R=n89@t#$80AE(}IiF)3~dm2Ie{ zP*(UA-*F>t#Ub6bzx18n<6iV=rM84<6JD3!J0cRkPAL~ld$tcn-Wb}xTpF8;i=GI0 zT*QL{9`0hYXsTJwuPx32J;jf(2>&A}rIp!a33k^!BYdS3+=DdSjV;WfVn#qA=@a+i z8C3$FZ`K4>JY+SADW4`%X5SG~K0G}1Q%#vRZps!Kb=HaCofY#8{drIipQq0(U;isd9Ujm}>)z-_jhZgKQd?ZoSG88P3f*AY@W|dIGUvs3XZQBL~ z@8xCog59$bv(JL%b~@}#iJVHq6LZ-<@LJ0gu2LO8Kq-s2WRVU}x&1E+>EVI813=l^ zfv_$3;uI+s5gzol2a#k7obOc#8=(Mpy2teIVj-^~T9U2pL{Zl^)@CJ&^)XEYUiPWg{jdGgEI{v z#O5-#?~B2Az7`UZ^P+-)$y{isj`dT^Yfn_-*cj7^Vxky>{3R&^C^y6_7wV|-!9GaE z&|!1k=%Pco$-eHl=Ox-%yAjaJQZpQ~>hXSjQp~FrDUb}qFnWBZK5{4*2I!npuG=h4 z$w2ov!b=K3PFo6FD)uF4$(;*+@+~jHTS7Lfkd9PSkd-+Bnvu4+w?iyKf-36zvM3Nq zMF|jj_NBfXh%4n>>BpCaM0W^Rsb{(`J_QF1CCpriNU+prNk((g$w~r`$m2S)aEQr z#!13$(k;#anbN=^J4W?G>l&{Cms@HbGK8n|U05))xc7Cc8Tij9*DNS0zO>qb#mTF*ZZB+;NG9Nqoa{}j z+QxP2Mwb|hYf~26P2XYhr^d1k^&qL`3@wDLl4-v3LyzA9kd;$gXv6*0iu8g$_o}b$ z>os(GjH&o5#|Gd_@^yJOf}U9=bI@UDGzHeyR|@ay6~0E*5WGJ^BKjX6+JT4$RQvkV zz)6&lY{{>uiO6RT)F&MFRz*W19Rxz`4?)qqLmS;IYIXHeJKvXY_0zA2w1!21@2XzD z{XvVnPILN#u-1j(x4o~42??`vX$uh+bA&j)Xym)`+7@eT372Du zZSkb02NR@Cg$XnsAo#QqyJ1bPl%Uw{gqPXj{7k+wgw^11`!CJW0sfcPRT}!*Qu}KU+wG4VSEV#3#wKEHmlyIh%kQn@T7OVe5uYmW zdnM|+NWaH4l|LETxO~LUuCVpbVP1e^(>O@Hf^58IoZX>2ekmJwgx`0S<$zm+#Zq{u zF1PoVmj^UXS0MC5^oO@-$8J|TtWjgw6gaBrxr${X4Px~Gv8GWQDiplWz18+N8*(7Z z4l{lxYmn8)4GLP+`tY{`RmgNRfu_mS2`YaptMrqwNpco{)#vi#fXi}%D@tF~q2aa* zdGC#Q6m&KG(5S;goXlqmGPP|BOV8XoO5N%UFlAVUH1S`9 zz({#B^xo6&jZYUS_2uZ7W0<(lNy*Wp^+VKkgk2ypG>v^%I_3d=KNaUZ23_1Jh@ zCP0-`@M<=CNVQbGjywdeGL_cJysW`9((zJeXwChKc!F~82E@n|R?{cx zb?vMZuldZz+4)I3pa>ci@&v$5)LQZ+on)fsRBl@S%Rh}1nt2ZHOBKP6y6sprmI*Ox zmm>_)eh9%?H>-@sOB33?GpPsTcYLpxt*HvI7~@ofYnPw%J+~^&2U&D}2*P!uV$_bb zf03acrZLV#>cF+fQwn#mPU(lx0VB|#sy~lg!HNBVtG#t)#$YP{5*+9~lcu3~0 z=uL6`cMph7cN;lJswTlqVI!7=uh@f{h+=|T8=F%a(XX@J*WC7xV}HLy~u!EPEg6IWgzRZH!Y`c*2Qm)twIu}!6H)&Pm4XL@I|N`jXz>As+4soXI| zpWl#3yt91THOfCbp;xoer>+DO=C-pXh-ygw9>t9bCZQgtK*|az&>p@6v$~$$z_0}o z;zM~$RduF*jgV^j*5-Y5>zSfDne|!nd@Z4Y^o{#^KM^N^hH|3~z(h?^IcLiQ>tYsRD-oq!H=53ep*(ZRO|g!ou0lT|X?i-8!;F}l8%5mC~U#@&y`qD}{Y z8H@ZG<9qmi+H#%Fnj&w}T9U?#k*z*H`dt#ZU&>!FDyxFosytA~%hTpe@MBrjCm+GW6IrH zL>O1L6}bwk?)E}KWV|_w{mU?{@jmD*hb6*72EGpq zWeioK^+0n4176Wabzs*T+M@k&u>ktK-qzFot-#ej7Ix@|pPIkn|B>mIX*}gpJc*`` zKvRngml6#PbL7^BhG`jxFBZeuvRT`zI5;e`YQ7@WC5Yao=Y&O$pZ8Ni1A?-f(3Wq5 zI7vsvBv%4IYAC10;nd9$V4zO!<9O`5)$xe*p#!L2oE4Qai^+lAPozQ7%Q}Q7>g!l5{JpW>xj> za;w2BN()B40f;cieQmoF@(f$J^m)aXdOIz?zslcjC)q|L_@3sBgvL6z=g_8?R#o~k z&2uvh7WB`mV=gMXm8{~N+Cx{ZLz6e0U-z;MlJaRkw=k^iwZ#$1f6J79bbY5>Fv&&w z9xGliR**CoqzuhUsh5j;>_!_Oyc;!c!54M*8G1n;da~D}Gu3UkKk-a7ROw7xo4I&sz4xBn=(N2#r?QCSzyT+{6wL-sx{}3|}gAG+6*LvG90V;GU|^7Vz;ei;cGzT%T^ zi=+)-nr92q|BN>`jP^a`Aj5II%E;=Q235+9aAsR%NxeBlM4^a&@|&+fC+-g0nB>qJ zb}4~BdO$4RRo!$`9&Nr<-Kk^)#Uj_-j zFGM<@_9K{2m0P0hz}T;wtqJIBBDz{id2CpuCQe!K$>ltphoiG4 zKWau1Q_7Tg$q|?*U+u>JwM+R{OYD_o-Wv$e97C2{b+KynghZ`3N>vdL6dglJPEjKq}VkgsqtZJFw9wGpSTuZ_tGgPz9du9i}3oTdNe1W{MsMZhxsdjpWr_7M+p*M0&0?@fIpr%Vb*8HwG`pML5 zv;!F3i{FEe>OY$TUrF#sUh*Ond<(@Z66@R>$J%GcRnJ@b{! z)CV)It2}ECv?&{aNCig;j3O8%4+NTyV%u)AEpi6WzYQ3I8TpLo!+#55^zsq?8r&%~ z2U6+ny%{fHly3Jwxf>l4_IdjMipg?kPjYPH=!ouPU>L-ex-}HLsB-lW`qrYB7~Qt( ztXb4CPsDrmxTe`PlKhg0d1qyr?nPX?=thpuo_3W)7Z#{E_ylTu;38fW*Wn-Ia{xCrA@X0xt`d*`=E`3zm?C+9$pC)r%DQIze`6 z04Y&NyX`Ut)AGhU|F9YtOD_2)a+U3I!QhG3Wg@1@M=i8DEvvM{9LuSaTESH}Bz$XX+Ez$)Rgb(1bkN2J4taqUJ&IhQ~y| z$Rs#Xpfr*y=2t`v9i}|(T7jN@4Xx|$98OG^sT%edvryA#NdCnTvnrQj;Aoo1kP{$w z_l3=gPe?Ui+AX!Iog^maf8nfe=UX1?iu@qkcvLpKbTk&dRIYCecF!Cg7RfX>NLpyF zfxj7hVnM7)f{ypf{snaxpK0|VR|J1;^}iKN6BJP%9TI=MEQ{tnkGLl=Q&tE+Zh(k! z$U-6a-3?c?WRTEZcGyeYkF-ir9}mPfG2>ic&y8ufc`hlGn*Rzjh!K{K%Omt9vf8jn zcqLmF=(;^p!S^xMkAMs8DLA>>Bhnrd!YPK`~&E&AyEEB-j{c(z%hw*+ArGP@6G7cX>s6w zf%L=+zC|4i~5*cwei8<1O zqtA8_{fxkgQocz7H_m}_QDQ-FSAeQB(Mhbyj-D_BYSH8}!+(ZmdYR7ZQe;B?=%2TQ zj8iF87Le$3ax-XTwoYgrM_*tApf=^5C8P^RD61=fEcJfbcD82}>o2-f=<=%7Mqy+# zNBV{)^^^UXQKvC*WXuNr>*!-2jkt|X0#6NfS1cYaECL|?JWV_cyo;VCv)=i_7~+AL z4j=pRrvD34NEsA{|67^Uc$Txtj^IsX=2Z_eFzz3%yCHW%;61U1rUO@a#c5s3g8)`X zS)reIfSN~Mqy>BwJ^_ocxhpPLh{b* zOu|k7$`d7st1N17S4dWF5=?UOY346C_)|&Nk#VbN>Mt{l89FcH?{T6a3vOJ;?q^sO z7AQNsw{(+G{+J(b-ek78O`@7X;IpIq%=&JD+uy_gutuc7{W|w2toYI2jh7`b>>0RQ zd8zbClgLhm$-+;Mv1uxMX3A9;-Ju)i#w#hE?}m~<{Z1H}2KZOAf^JE0`p#D2=GT*xStcmSo5aZaaK~*DuSd)%1jeXp2LeBzTut}$c(o zN&(ZIY35{W`o#C;ujXKm>e5d?m4E!&f1LfmZe16$5(@ILhm|=H zlI2Y>{jMao5c;e0+Ix3;^bS=;FxpI3Xb@aiN&p@{~?|@~>XK)z>M> z{yxe&ikXd%>MqJ|8Q6C_5_2pV9`@1o4!E1G67)`SEcq#Q_?ZSVtIj!s%?ZrW;4#Y; zl8=7%?7ovQtjs260`s?(*zxWLpnk10Y1BRV2tSxWzJ0k=&q;-fg>WRjU$vI?IJ2h9 z6ku~ZrT*!!o3z?rN?LYTv#w#fkrTkEfpjn&?}-8mdaC(s&jLR^j>iC-E}W)RR=BHc zzBOv0c@is-nmCZ%R4BLZda!qhn}mKYe~-IPWw-hjH=-fzezg&Y8Nm3mJlZ@?%CNkk zQ}2^p-(*TUPFMk4Y1T@?;QUEzI1dagn}(;s|p&!tM=I6TEG{0}sQRo^I5fSbmx{-P7{OYaWF1x=2pOXhgX`SdU_zQ9bXpMutt~O_jnOj&`*L6{D z=PldviNdT&osCPaX%g4cfAB^=;~qC72dhg|u~{l>0r`u`M)!OKaL9xGZOsvFr1Y1+ zvSQya#hFaRU&fE^#73d3$Vvl+qTl3G3cDFRYJqM!K_rpiJ|J43FJ-KTkNP}_1_nGI z1BAmvb^E?twjw>7$k;GN8?vu6lVFVqPzR=b0 zlLz=CGF$nLYNVC!ZC0JAKF9r?*5^DI3X;1z<_G#dUN`jjAXXpxfRD$sA)Z?E&D#ngeTVBD;Uu->h@ZiqC zH3nsP{Vs|z^jbrM_8hhrR({>%hddw?K8(c3Pwouwo3E+|Z?b$hd z)PZm=NkifeeXzW~X5V>upS8ag^wL}XJlDH&tSYA5{wnfl|BDW=Tl@)UzY8bq#^Uw_ zW+=Ptc$w%0fu=x+e_g@>^P#_1Pd8}mW46ylnVb$B$0Dcz9HRGOaf3s#K95JA%)M$e zQV)H1rdU`cXCmHCmR*kk>~_2zu2pn!{e&-n=`0qqjMc*s2v3Lhpv7(u_vd{g?_cP9 zZstH+uklWgLdb>$A!IWcsj$U@dj9FyM}OeEkoO;; z_QQwwCVp4Z2XT7~`tJ(BzQTF1q+-^rHGpN^D#8vPB7zDW3SB z8|)X2Px#r86Ll;);SOZyt$K37w%V9CAa?dJ$mVmM@wNI80%`CLN4V&MZzqC%2%qjM z8~K%llnivoSeod5AiFRq>7T3;U%^>w#ZA=~fZE|oV%qdr&LM?cq*M{H&Vf2{rm>sR zSp)|8RY)2vAMzo6jFD;Hie!+AfG>+ki+-dZ-Ig5F-3CrZ<#Uq}?ED9yqk=7+@gG6( zWXMu4b8#?!J=2RheZs0;gV1uXDVRy%Cdz6YY;y-^FXXnrKA%9q^7}GkP%t!1e0?ej z`P5;ekr0LVuL$P3%(Q$PgL5Ft&A_X#TGt-h@XPxhWk@`GjMwuyi0a#^>Ed_S^w+OR zEdA$N%~+&1PcN|wy!!08L+Xn)l7rP9>j4xb1bYKd{MErCvPsw=f4$GZa)kdY3g92x zJ?wVNv>?+C7YY}sBMT;122n8Md(7xy)LnHz9%c%MG1Z-^5Cth;O*zyi<7R*{k0aIU z3v$4`^k5T=VHC>R_Txv@lFu(9T1r;^-VlX=1X^}2ZBhPTdI@&rnqkEB%v_QdjqHt} zSx#b8l(5=lQV8oO5isnGl@cq!wpM;#ZH&haG`2YMf;Lhdn5R-h5P)KGm*8|aYWjN( z!xrZ{ki4+%%@JNGZ|Luu`8G|!_Z7MOGG&+^Il-5e;ajeJcy_8VjtO|!I=#5*zjhFY zzyFo{Y{3a%`?M#3r~ejgX||y)aEcHE22A<_P z^}U;>$r5k+u7#{trNkRUWhqPKeE3Lq@0vcKA8LQU8H*i&vKJ$2D(l`Slv*ZK4dA2@ zHCe+wyazycqn*ypS9)85DI>-TfPi?{=V_8;z_=%-6}fB)O%$k)4q`O>#gHBNk#A5R4*rJNu}IYG{^pT(%_N`8eBM|y1?;0&p%>>p|XIh_0b zev?OS;VyoeYX0?VTpfZkPjP^iDsI7y6WZOTph!_isR zkAO|f?4P5s&b1q)mSbmAxv7u#SjxV4(+*cRV(&fYujPc%XTH*+S3WzVBnxggvh6`& z>BAEFfT6h0o3kIJh@(IosKR2o3mD9%zW)=iqb*ht>#chO0H1<8(fW`5@JBnXuE z2O&HQT_uK|KjAqJ#2A%)RDzo%e|N`Zvy#E~&YaS77Tfu5?h&s1UXmbN@n`?2>ihZB z4BxJ!@A0s}!l4=(GRn6A401Mh#f=H-W2>J2ATn{H@v78{ZX0RQmgaT z-wR9uNUG>;+Vta~lDU^WrLhwE7^Qfg97o{l`aNuX!S}8NYLEvgn2urGqc|ySe~_ef zOse)|waHUZw^1GnDaPXOInH>Ado%g@x7KVt?fqP41?MR!*>Q4Z%4Algd+iO()S2)_ zU-*NjPkJZx;G}wa5{I+xadtoStJ;tM#noR%we>~Y!*E+#tY~q!7Aq8Y2~w;$6e&(| zEAA59A$TbkC=_@16n7}@?$Y8AAoApQ|M$Kh-Z7FfGEUC6wdT&r-g~aOG?p@+>Imdp zpr@meb!zG#NYP-8LfisK{VsQMpRW|KQ5%gYi6dJJ|C`5M1l*e*g4*&m1t!P5;cXlo zzCwA1L*s>lja7`S^ltl)A1Q*;9hME->_vBI+WZHV{PWAFPPe6(PrlR#IuR6JYEAlH7vFw2+!c~iT#Si` zLuW;SGWNBbWP17ALW;9C?P|VXR2~vv`{sW#3j2OBFppt&A2nM}*&r8Sg*SL5oHBMu zP)30#Ynd{J>(swV(&FP(;W0~a$|6sFYfiN@>bh7<)^Lb-DO~0)2s+9jx@6KwHyJX$Dq+RlOI~y$i_`7>H(}dV!chh{l}4Pg04mt{ ze_8-U(W@4Bo}?EY-WBXGu)p8l$KtNTHYy70W<^GiLrNAUjJ5L0ha)c(y~rbd+zcB; z0`HbhcTf+dlRHBcPwIRh6?0}%#%NuYD~;EuG8DhKOR?g>1B*7g_Z7x*Z3S@Q?{okA zF}R}e%#cK8!mCeZ+xIG%J*A8Z`(W`F-_W$ee@pSa%yBpb38UFepa@Zb=l)T8Af62r zn}h_VOfB_k^AG6s4s@{NQA(DH3f$4)5$t=fArClR=`}4^VX`7HnakD||HSJ&2kq^4 z`HF2Ggto3l!Ze6ZmjpuANlD9{lNdFK4f+^1POZQ7;nF34cwunuw+3JyzbMf#E#&w0 zl%87hfD8Ffucta)iGyoZjD>gxgLatYU09^i#ZB&2bQh`mVf&!EwhgW!ypbt{D*%5z zeM(2GSNDg5ua_Z1uV&MQr3+6Ku+7k3NOTjB`_!?zR4)vC|;5z<+% zd;z1WV9@-SI>3~$vPT)Z^AC+1#_5NH*cG=Kp(%Frsr`;a9^_Bi5#uiS!KH4% zQnRO^yp!^!CXFM$L*0;Hs&|(b``{;yZ)DvA>d)_@M2e>Bza8=fhYniOhTvY}qxe8; zj%4>p?zQuDufrf=zjd!m%prz@_~~JD@<_#9&#>8bICtH z5DD$rGx;Mwa8#c?IpGw+^M<+J+9yobvPy78=Zr*HyAgq@2>Fr>|#h z&ROrImn=<0icZud|Dv<8P*w{&naL6{c;tgu7p%dmKcKj!#{ zb!8a+wYndqxzh))g`JK$wOwqhd`%uH$MkXZ03<03KTRTKL8y2VC^4j&8Bn_Xay(BJWQ-bK*FB?31}dd&1>{zT+`&-|mHtbm~E{N~CYTojtL04QnoyT$`k?R{L$)Ye$hcgmubVngjvNx^JdNXJa+>DrOYW zM9;sE)Dj}>%(IUkPp2e1;3wLsRk<^yJ@BII92K4!RJ?-ObSGS=I*`mRNcx6T{LkwE zs3EZ~)C4}4zb^lyw{0hdwUPOza!|32RO0GvZTMF|!<=A*e1?(A@X7m*g}>}m0`(TM zlfR;pEH8`V43EGj-&&%_HtNvJ;K5yvOrfHJYYw~1FNrtK2)8sg^^PmJJ(`tz)ChLl zV%&wP`D7jiR{|Lg$(RDcr4eW;GP2q#PBXucJijm5$P_FbhNIm(h^ycliCA$qR`d`t z^x=Qq8F?=S3jSzlPt%TBIy57Cm52%3f3xbqv^WoFZt>jpNGszx9$K(jL=-3hj6T^& z^H1o+QTq<6RhE)yUKU*jrOk7shqZh`cjG>9iy^^g$SsRqIBZ{YoFUJvPtbRYLlYNO zi=!HSqkNP?&)n=&HE{A<7~y~Mh;~pOW;-V*kBZv8$P|qvb-QWcQDqW|aMgtQ@}CdY z%2vv&sGL05{Kb&4Os@Fl`u55$l+%rhVPU9LlFP=BmywPv>aJ{&?pTx&yZj{!JBybs z2dyCRhO>BAN8icGZ!x>}pTztfu@R2tp_qe6K#o_S%ni=Y%*~<>U$` z7sJS~PY-X+dtDM<%^tgt-?FkoyszJT@pJe9tq3bkl!cZjVz`8+GAV}#+E&beoTOX8 zVJs|n#y|dSjCwim^0CiX!G6~dE4&J$b_&4p^E4u&#$5xxybm9J+Pf7CihtSKUnI@h zGay(~zcw^tJ zXeY<%fEDlR@NPrl5q*lTR`Ux~aJj9lIHBG!T&;qW+7LNF20<-;{^M;hprX+g=q}N) z-y(ccA@!B#gEN7t(iuV3Xm!Plz-D9AX#X>mP87D&)e#|I|Qe*==L31Zg#J~_6%a5iF{ zhNRG4w0mXMCdt;CAMhBPjNq5HBIq;id$~qAF&-cc5Z~|OUwNP2X;NFROSuy&+wOch zOtv4RRSl#p9!iJNRGWI}<)m}I6i@Dd{|Q{M>$CBtLQ_JmUnf!_r1?H=$K(ywpGWz+ zNhZz=iROD|PNDL7G3#oG&8|30TA{d>)AH`1Xz@j>K2R(Z!2jArt@2t?eTeLBfQ( zZFY)$nCGO$F%ceHx{Wa~|09!! zQ+<+c6|L2$sdCW*ZBaKd@k+m441tpWeQz&-1{X(Gy!KrW5HxP^WjI4SAp%`9OBsVwkx4IYh60g;ka-FGtv3)aXL}Iy!5*dSH(2tYC*58@`QT z+B>Nv8kw-^^{)2(k?KC07&Q1u=yiS@b!ygVMZCV3Q!fGT!Pl#Hl{efNl4{D3PP*L- z%_zWEIHp|yeqAHL`(sn=l8WbAKf}&8VbXHXTxZ2}0PM^0IqPH8y!n3hADJAJ8Y6&4c+e}( zNbd0<5uza21aXZS$_@Hsv~r$ynY4Pm3+F?dA%;G(-ZX)(h_lk^RK;)qrUdvclbKFo*Ys^Kd1L6j~^jHfjo8OH8 zITow68;NUVC?aZa$?bs}ZPE=?;b+w8xj5^CDnO`C{L@jXuV8ME z3*VNY8i;N6Z|LSXkx90~O#7pph`MTdg;_zXCzFfSa90?*9J)iMl=hV3syJSw0^`~( zO8<(A7;=a~4ET?)Isl_0ZqtXNa(jhxf@G%CUBG|y#`=qK!hOv0`wxiNl}kG}w9=Ej z@W|R9?!Qj=YJ>C%Y#}nC*s@*VphTgx|6NySOxIO|!U)b)_!d zB`il?Yx9wY;{phG&ad)L7t>vJ99~US>l^$iua`P{)E>LLLFh?deBc=Px8Bs$NRS1t zYjlyq3yAzYSSeR_q$lUkh{5n7_vdl}iQKr1^v9lT3(ve$y`=`xx~z9wo`?^+Klkqt zOleXja8OB zS-v^@5?W00?m@&DT8QC$$yZwq93JDldHdmTD1KJd>QGnoVqRTh6!W#n5+35|2}F<1 zI-a7yUeY;kUixmr7M;8Nn@SA5gv##5+}1Hs@@TO@|7kJm6!4hK*);IKEg}R)3wT$E zl~MW8CpWr!@6AZ?E@G|p{$12`7;koYgp#JB*yM8DNzK+XkdvZZg*Be%Ej8@;m$|%( zL_w6(J*PI-zEgq%<-%==oUGMyz%TZ2{gPr?p?(+rC#(h-aV{UH_+VW=)^Lc^a<+!? zUAh96%$t{DrROye3(_FE9ARmezc_}KP8W~9@mRA8%p{a60O*gJ+!7~-m=Rh?+aj?H zN`{AS`3_nRzzNzpsC<3K2r^FZzJUJ5<}4nhCgvQjX<$xZ%Z<8VeQh1ZBq|8 zDhMsPiv?5t0`^I^_q=d{B96d~x-sBNeI%8;x}NEYvbN8i+AE1h+zoR1e5;)%x`IGg zb$$kmFPGWOk&1B{*$x?Ox7UN|x86lVz0EscIp{D`x>WS!N3UMUjQ%=V`y}8~?cqc_ zA7+vJO2o1#GA1S9bKUVn)JxUB$H}8?V|E@^VYL?>-EOQkA9sUg>U8h?hj}rD>XJgP z1yKaE-%GT1KdyMA%q1t&N?urh9;IFy>?-S7-Aqx4G#b@tFS=pl5>dC1KP0#<0n!?s zmifuQ_r`pW6t)?2UyE<0^Z7o{KzppOhoANy(DkP%hmh@ic{sksn1b)5c0SVELO(yqXV+z`}5{G~e7?)DufH4NWLin%H>ZWSf9R8?-YxcsmQ4 zfTiUc#&;ohzvf@(F2LAG0BGF@% zP@c|S@#E}}y4Ct*3(Ds0=a;lVnC?TgOQ!rBc84~w!cOUr_&qeL8=8vI*L8gk2g&&~ zp;ZXlp6H6`N8;tf!?Af|>#m*_;>a-^0Yq;8(OCb@0|FGn%9hrbn^ zUb_k$W>r6M{V{q^c!MAkTCr^YMuQLPfPJ`+R^$oNo_kP*ZD*et_lX$JA6AS~}0yFrs zu+eo%UoRMPQl;pvpG!9$69J^Ki&{Zyy@kN@rs$qF0dh(odEIaWnTyERlV`W|JVZ-L zqq9MfCi-_ug7xbCZ1^(_YWO2Z2SlR4R4gaQ*WF=!7a}ErH4zLiXe5YH(2@iax#oT2 z8$Xrx(4Vy(ab90TO7W@hm78he1^#`G>nI<$e3o0KP_y~u#5!e+EW$a;5jRGTP}kpo z=d-9sOfGq?EREO1yr7rZ1F=5%^YCa+l~4FvtHl8q07ojZ+|liDyPR90kU$A7z|y02yV{8%T^#U7zj=p`3{BYenJ0 zzs6r#%INqJCScNqXprr%2cJ$;=QixG zot7v{z}YnJt|779or4wEhZR&k^9Q%)TqQz_%PlbaSE>o|W}$FS6A076 zBa{72&;hfa5u?bYL&X;ki9BP_JUe=!$G7KgY4adI1$puy*@?#YO#U)wL+) zC+F?+0h7Vqel_clT)@`9zlA!7T%bEb;LV`RsmtdBn?Zq3mmpv`KC5IngP{H4{k<;j zpRy%58Dyp4uZ)W*t;H#c-xdif9b2m33R~ri@LXPuN@cD|;gLRDA<)zw_+Ph@e0L0I z)D@$zn_m%KRtc@LbSlWZ)#My##*HCgDFlz7H4CM7z%HO|MeA$w6$H*uBTV92JI`}! z8MYZ#nqFUp-lB2lS1)k637gF|+~>TH!m<4O$@s-*Did>IADUBh3<61_zvf0DyUV#< zAF8Wguqa@vEbsHdc04!y&Y(xo>F#g;ukhJT$D#xS{nSILSza%{bQT@13gkcM!QmeL zUkN8ZCzo4#nlof-L9fWOer-OGS!|PPe*NkK7a0XHuWIFFddiDr;Mj-_v-};Yx!YvrimS$FBmSf}$rabi>*RPyb3e_AbG_-8^+`tqSfI|g;MtAI^OQ3c{E_kigdVO!KU z8{NN`%2{NE74e%7{+P62xspHi4r0FW=~16=l}z-LtV8r|&QVUBp3=7&v1Z4G*^0mO z4AunLHeU;|c?HS7DFyO^4FtiE{+!8?w%x8YFvlxHEeBh`ywmxU(}_uI(6*ip3Ul1W z%@Klv3*x8eaPyzoG*1kh{}TP0lzayFJ-N+OKXeW`%ToJHdiMMs+1_MUtowHrQ#LPA zOeZ_GDC`V<>`OvPJ_gA4!hFO_@Z)}QmIgm>TCW+C=|RxWX3l=#BykLaQ`LX|TXeiZ zb8A37e#(=Ox8O7#8;6pa-KXrWfC_O~lli1wjROMU($0Px9g-?r$*s0|enpHm_>|v= z+;aiw#_pX1kFU|}0>Xw~1WXHJ75=?#=^h{UmTdWQW|FUfVg6RGOgVV;w1vyD8#z`L zh(*A6DCQJ@zgEb@v$2f=gTnbK+2`H)r%kLbyu0g6U3CfFyt<$J;_s0DFd|}iQ%s%9 zG`mR|)j+KU`c;oix35?K@hkUsSI>j)Z&9jSy(ra3sJV8b3*KdEBM)kZTlIAgA(+k1 zRasG-^q*m~#<7Rh$lzsSy?g&lm3kHP@+_oqZ!UKc1yM}l{Xs(!*AE|V zP2aru2fJDHmlb`Sr+XOlmB^okCBhEq-%=e`Mk#Au&yBHll(kO*y7Imt1>SyYJkIR> z9Ag?|y8dRR;a8lTVQgP6jLla{EtAjs$kcnfmp5_s zBhN`9pHBB) z=L(Y1j^WOGyiORItF*)84=Abq?!)|rviM{f7HvE^^OEpMSS&Rv$Wd4y?^02<^1Yhz zDU;=0l#{j({7U{^$F@c!CWh5f5(V_!3yPhw`id!@*Y|2Mvyrd#jw z-nG`silQ0R9*|ej)_NYNaWF@oUCX!EdugrRdS^8NTTfiJ4kvCtfi|P%^DacRr+mNR z0yt)T%@e&e`_s5hHP@tz7<+b z$xm|QWOLyDCn0uY@OXnddt2&!Hv#M+tUh5;d2g_)g`w0!#qCXVeBEW`yeRNq96vh}#9r!zzv6884-PpXq0{~1oxt@Zw23}oqeFHv;{ijQbgx(QK z=IejY4vsJkh3aiOq`$X7oTffMHAzqI=(mt`R|1ulqkS1)QXKYuuC^F3Kg<7a8WlT0 zs~JH@dv>@@VGteky6R!I`nT9AxJ61H2T_;Nn{}W{QS1-|Gxti{gm9RKj0`;tJbpLg zJs^a9;|PD5vCSKQYZ_F)`WP1W((&ce@Mz=lELY>v`32w{<_m~bS^mr}7$x}hHBtKl z2N7-Nql}Wh_Fj$X2<@$T+@ZVFv!Ph4Q+02UA5UH#G_BSoR(nW7Y(RBpLR+`Ki%~B` zJ-mnzA^6^4cFj9j^k@L&E(QLE(CVV0L~vs~3+gv#Vf~p+2c6Fg-^`Xc&6OLM#!tg8 z#{Hez{Nc1jlXU_#4>^K90`y9}IG^UkM`^DfCrNhmfjw|!rJS}vb81Z8qZ9H06{`(> zhd>{2MlE_IfnhShuuz|PvujaSs??bWx#xWVS%z%@p!sNx&&4s}NSf4U-if7Y_>g~nu zD+83EohAJIy5{Nm$B@KRK1F7!cej~9$mE-y(y_Oh^>Cg<4;is@-Dr^o_1xXNEnXvn zGr?A)(#m-7@K1bUJj$2dTL`KV7vpeBGs&Utq(GlKn-6+i!Qf5=+KX+1G@E>y*L9}6 zLj8^T-$3>#bTSP-cF@SLS5jp?5coZWW`WM;vOd_%r@Q4!=xDEM~4#&V!^S@ws$^9FMMis1e zYWxr)7bcZ+$``QcGnveJ-6TqDU(gb&C9aNz`?$jQ{A0^sWc&v_-{A)e!R9 zwb8{WoYm+5&h7G9A0g1KS+(H6ZB{rHQcD=D|9RM5?KHh~P=IH3%e8HqcmBrhO=ZWW z8Z6S#r}97T_8X$7ER*s8aQSsd4Vc?#QA`~9Bgz;m*6bE7UIDohne0jhu}&>4hGi#I z1fLkm^MFo)ah_Yn*2XgX4i=&+B77?I_Ay^>-$V;_j;m)DY%piLx?1k-Y?Sc9%Jtwa zOCxlO%g#Z{ZnYz4E0r)@BJz2NB@)zm5cF~y0K>?BYFe>+PKQR5Fm_E4D(Q zRGp-GA(0`e3=d)JmlPI9LcaY!rFvT}P^M+mHOWB~8RI_*U|G~bHz=qVpEZdKdlv9+ zj4aF1c?#$%5My_xaBDI+70+bI@%i2GT14W9MI~YBa`0>=&cdPyoa_)ibA#mKBK-?#Y zFUVx*EpFNf@L}w&)Y4z(fr_vE2?4b@M4qvdm?h#``B`luE}53TJFo}2Rx?!0S!zXY zjyCOE?2q0(HR~Bl_$D1Re7Ykj+E*P#Wm0Y0y#doO#es z#M2{1k$VMUD_1K5@PvxAFonM{y}X%sMiF*4XEBJ~;g{Q-)9!%Jte#JkollQwiDMes zFwv5k12+G6zzDw8I;0Yu88)`-Y)lgtqSW~wvhmUi=IxQtva_B2s1Bc*?ZXN(_jq?|GzIL>xKlAp4lf@}!9gA%P!{FyQUy|GXQK@V>e4wz1 z=EpC{wg$IWeZXyiC?S4F54D*#UV>jaT@IuLbyjddkct%Zhu7r}8#y^VKzn7MTitEjg>)7u?*WD*qbemJ0lNlZPIXFc|( zS@7X1S9>w4iqtMUbSyuBNSl;q98Mt+gm8WSPQtxul2l;o5ajPA3oXD6(3q*O>0a(Q zRSluOzr`pDHZcZ*)5lrsPfea6*DVjKc|gyI08W(`qZb!&n@r2)6S|ED9Ab0CBd6QN zW~bK1SL7{pjFR1xx3+8jmW*pH_$l-jk^!zL&HawBU)}+iwtpF zFy-0}-t&yu+{)9=BY2DXj2&>rT+l6abeE7U^}k9T2o zh>JTiz=IO8NVS<9&<6G*(J1jImR|_&65Xm07``Xl#C-J6m;meZ{x75tkp^SnC<7WT zP7ksbfoB>}Mh)|uo`P>R05@Z(p=L(h-$y9k^0l+$lWdMWzRJOBREen?eJX}MTQwa) zH@@XMR3HD;n*T|k`@2$GnoDLGRPDqxjV)SnC?V?KMmD#zKO)TC`il8<(~NA~iuZ@C zv;&(=45F8Io#PeYrTLg0mKW2S+APbXt25^Do~cMTwA9oFBYb4kA; z{L~uGk-F|amTvtBX+|hryTJW&KtuZNdcK?SKuLsJsLF?^LUHd0EB&zZanf-sH*{+2 z>C)3GGkuby{w0Ce2fWJ?Lx{s5{$mF)^6U^;X;EenrQY`-ZOHZb>xs?>W8XN)&!mVY^EZ2P z;&&^2K$cBmMpbhqvliF-jyrK^TLzX>8%u%OHw1kHk9L|@}r+zrs5jyaI zIq=}c{3$Ik`1)W9n9EXoeumLt+;G2^H!kU|eaH#Fb}8^^eC7T_W$_@t zIfF$&G{=y3eK8mKY;&LG)cXde_eZ#^Q$;eO%-+LRn{$le$9+*)T&P*u$|Y9sj{9K3I`|-xGzXk&~fibJB=*4!Pmnt(ZCoR2yH_ zSv}o>c)-EWA@Qy#N^JZ2%_EV-j~xB6offMfIU;n{J*8uhHt}|}XC2ch#C-+Pj!~!K zPa*xGH?gaNS%szSr7z1EoF0OAURfS}B;Y27u@ymjQ74EyyXxN{$VX>qz;6^YU?hr; zp?;%S<$^->2QEdipFvqXRM{fDK1w7A`v+0=0&8N)hU%m*`q7XVXp;g5Oe|6xqm<N+(~jp^pd;@$M&`>Oi%XV;Gkwlj3T z&cLi<0;Y^0Tgd|Tt3Dy{9J^ZGw47b@HTGS{Et%AnJ978dqPpdeJ05lG$=g1G@?K3M zRk-$rxxndbr~=!Ybd3S7505H^4G}?CeC)gztfgnniYagS=P4tVqi2QdSoNLTQig%{FR-Zc}4-7 zn^~qd20EPG+i;7{s4wJ7xhXZ%f_9Qg#o&N(C>Y86oe zK0xiso%^>Su}~)~y7?ksV$2{r_!mD0dZ{AaPT(b@@{23ib`2`dmYE-BXK9BX?J{wt zK@WdbJOuyY8cvV=PpgilJM`)jBA)Gb+M5=-Vc&OCf~M zPguek9#4_;YI>Zd#6Nen#m)7Q=MP7ZAfEvYxLJRLn6v%g75N`dvS_y! zATV4%ns;VEWI$yyaXUv|Gd7c$F3u0CH$NTYUvmiF4iFI?D=hok5Mcy-b@=u9&{@$~ zq&0UA@ZO?&+8bc)6Myw*{tRpPayPk{ybm#4xcX*j24uSbkL}5UO2`WLfqDY6Y{O!$ zn)Qor^Owh2*Qnt1XM*M={9Q5n#Mrmb4yAL91FWRRvI<|lG@YSHzz-PNWb)6gsx%+a z3bd(;(-Z?w5xqPA>GdMt9BD%Y&hVOH;pu>iybo0T^Om%92ne)A_?Znh^!6)Rb{B8k z&(SVB*zg(?;Q6V*@3&lne?M51od=SGL;P85d4m3W{<7VFw~}H#LW406{;FV;ZENqP zL%Yidpb;uO-5ktrX7}8hu!u$}GQDi$aN9U;TRM z>lZ@zo&r1+!e|$}8#=a9N09Uo5YILdq8GWVXk+?Ga;QYy-mhF1G^VBNNqWhqesdTA~)fI9-A=V3&2Gx=gu#O0e& z{K^AI`s&Y#mvc@5zP{oRz?wG^IQ|nYo(0RGjp=d+t?)Jcl8$OeI-t!h!iLW(K%Oo* z&^I&30!JnCVXI={!bDgfnV)LU4?McB)< z%f2@EF{v(0gJ1EAeflAut+GpA@^&Y8YNmT8-cFq?S{-1`VJ3z^e81rzz5K*~(M2$i zHZAdMr#zy493oxeNlg-^tdPi$yafU^4k?>K?2@=?Tr0~%LTH)LrTMd*uc|;V96c++ zQQW;>Naj(gCnI|hB8!Su9DCyY>;T#~SX?mBQL+X)YsSUHyP8JjrGK0^sg6-=wT6c8gZn-t=E2SW-*Q%4yRFK+^`i-Q2#12&o&TI75IESil z!85B+UEu*Dm-(N+D1|99hKRs-3EJ4q-_ajG4%oB~UIG}pWhR+H^6-y$rkBft(m#Ke zJfboRFh(&qL2Lp%JFuYmXONxy!JkTpbko57WHjXL&Y}3t{^%0W5=lZ=)or%oD)X=_ zfwbCmpGmva+eCi3B)ng9ho+nrUf{(#%w!7=iLu@-q0PWZO5U#A*@w2iQtc7A3*&LA zn#Zjb&7rRjUe-vEpvV9Y!ubB_2Lve9p958YzV!b=pW)Z+WC0)rU*osoDp7ntX|Ai; z5hTA;Qay6!S;2J3N+^h-S|u+DF}7@Bi~Mj&Rie!@ zFEvs-KJw^o*%4Gfo;B$DxzPj7pK-UvoY@VT1XWEEOqajCKLZk#9{h<$=3P?MV;zX_ z|Acr4c#6Hm-GkoaQ{zP11-tRD{3_3h9Ys*KSnB6R?@mdm8Z()6Hy^YDJpw*9Aq>+g ztgk>PkR86_N3};VA=veS22_VKA%?{kkwsJLcs(0YKdv=NaOcHcAc4V2FGOB+!Il%aw6)hyJ8IdZP~@c?tGgK@(GocvP|dtluR?92+PA2QHegsWZNd&BO(L0 zS#Nrjzfp{#zQ2zKGr9cZ{{~`_?1Vyba>Fs341YS3^M1AcR0BN(3XD2+OxGls!$+JP zZRh}nO`J>Qmgp0^U}W-Y=2X-z>a$)*<9 zP`F*gZaj>Uj)|vRr802*8thD!+Q8f${-7R_Jh>Y?_0KkNL}2xgDoU%y_!*|;6SVOo z3y1FQj73H09Q8nndOS(D&oaR+jpV`Q`OvwTZsN&eX~yUOfL*QcpY@tqj1kwNqUTK`Kzb#@W1`iK2-qRB+KS<7w#JCb?Yzl+RnfKX!o&4SY4ylDIyq!gTl1MV{Dp zW$oGyF+_EU#)KLKf{IE%Q>a2j9h?Kx32)v#qal_LCREbIeWc@GCVO{51wxWqwtFE~ zsST=aRDL~Uf^&g0f2-sHStkTVlV(r4$(FzLe*W2J`RN;fP~vnB(%#5h(@#TOu-_t) zZioMp>U>b;M~^;it3969k{X!N!#yD==|ov_F!Z+aGkRj#u$Qeh0NH<^d*cs=6OLYm z50n%_rG&1(>2Q6;%eoHe-QoKTIVUw?I#MZf*F|1HUHPR9VLA7xH@gI^s)S9; zc+Z&wKdfjF6cffCFzxcQT#__p>YZzTwu=KVv(N{58kJ>f|B7vn0sLHCk`iEna|NMo zo9r7pp(p9E!9kfSg}wooBt?sfgDmnQu!f!Y7=!mik+8r=x3Ox zauEd(IQ->XLzJePtnKLZcJ?fkhm|@aKO~;Mr0nyLJ}4S6>~@jSK&{73Oef%IbXP4Q z@e}~1=r5_3ulig|7~1@(%NGTA?)pO*koh+=EC3e4oglH#ZgFse;nLTnBn!2H#$4it z+{ou@e;H#9f5u7EnvPNQZ!%9;gu3@dhvaMM62R&86Q<+ z2=T(lN_~$y7MAqGKN!n|ZC(m)CCyTl(q=E%9GPa!6iaotX4+8|NO)R@2`2R(D36dl zkuR8@qE%-JhhP=$wEP?&G1n9|_pJJq`GU%mvN41@ETiUCe_bsN9DbI#r&hH*T%AK4 ziYnXub6jaS;}_IT7meDS@Tb+DZNcwX^|y1o;ioO3`xtjC{LlG>vBoAZQ1u!${Twfo zHdL!ltJ7DYNiy&Ireuzex?ZM%`a&t%tJ4_|v(_uNX!vqW??(O~MI-8JsqRPyYG4lQ zlvA<4I4}$7Ul*C9|HL6sb3)f6B}O0g=`Xv&nY(mw{DaQRvDxo^_{c4Dc$V6&-WMVXbPwXymS zH`P#V#P!Bf$n-M-H`x{q*WNNu+frkOCIhr2b6?`qn%)K$k#`^Q7VEgD-W& z?tD#}UfQV1G!4{Y>M5=}?++Tzh2|U8xMAod4IJdLR6h( z;3WMjWOAaoFy|LF6{QjxncAKGuMbtI606!ag0^zyuNaf^={C5~x8kM~8^ zV_FAN*s18uNyRervAjc(5l$keaz(Q#r2=KubkNqN0pt8oeyXdQd9ydZARM7iQUw*grMk2{jDJ6DeUzWt1_MyzOyA~3(F2+r4qd3+z`P~32hoe(-4)i zA}~VDt&t)fyvib7!)FQfe2)5~?H&smpL#IP!gmL%s9E@$s%2?jhHcVyL2vP}DatrE zUL<>NsG#EseAuQTemeJ-YDeQ5$rHr&);OE|Zh7dAjwu#nem81*8~G>MgX)WX!t6|` zlYBot6J^XC-a2WFEHm2f3+f@=RhbwK!=JcVQUBEysI>q39b?PasA++$R6^MV98sze`O*GK4edOMR9}RKt|wo9 zq&IqUDE6a$p)bhmzecs+9p<@z6TK?!n-%B`+fi)|>orM9cotgwE|shQs6$z$r+_z_ zvLq_2`|Y)DEKvIYEK+8Ec}@w#mRS+ePMci#MmBBP@-24dfBW)o1cTn z1##U)SEwr))r>T~tv;b{bCsZ~YiV5N2hD}3_VzLwUJBv4n|JeJ1J>`NSD3$!eLfA~ z4#&qm(chUi$?f=X^)3gqDRfZqwDb76zliu9G?OaF-|#6jy#9OuKdGRYt1O^EYN@Ri zGqjKP7}W!AVxr0P`DT={@4m_}{5O(7I>Y})LWOo`8)dnL8N_;`%x=m`Xz$O%boyVH znU()Q51r(QQ9dNERS^4cSRwu42Jp+k*2JtBsaxZx}$Zt&MHiHp~MN8O?|a; zJ4ufegGHw`1$7^8a&@~kez5Lp{)a`X>RVjVc@?WQ5oE_c>3CPN!RKyl)>i&h8w!h{Y*|1k$ldcmx&zJJM z9e%~--wYcX`)(`nS_`9Sm!4bUxfJ22G~yM^&u9>tS}Sumi`E5ac2H3g?y7mRw|cqD zzxa!T@8wt;Jd&QnuP|Jq7)HKC6r|Rafm{mEFDYU_>FtRR4p2No0XFZR<*CR>=kwo*4tXMdkYI7#xOXP{25t~LKw)x^I1Vya!i zXxpHyOa)hDu--O-)hL}TRy0GLG`_~Ht!klv-xI_G_6d28zMo;j_f6uD@+$t7h~!X6 z1aottRx-0tgtk>RFBKIi+F;i}^j(^Dx?%1rRAu$*$3mAiATUYwya*>sa}=Olu0o-h1ZS~w%1xBG6{a0@S1;?7O#80X7FnMV}j z^z++$Y$rThb-4!^w_T~xX7RGmFxsath;f0_oUtXSta=sM5@8`v4fgX&osUzpM&TNy zW84TucJASBemoDJ6xkTtkJ~a*58Eq6!k_`gKMMvqXANT}crw22TU&b7>oS-p7gNkM zlp#gMn8$C##IEt zsAm^mKJ}&ad|Fdkt8OAK5ga-buu@u{1DU_G7~>@cyCn)Yf~$c~K9Y>iu=uFsd8n z=)9@t)rqYg3w0b%%$0ryD{oBOGF3Spa>h3L?stsv;S}<8Lb{2@li+e zh&BF{YcF}*vv_|w$5L#iMXq!j8h-UmQ^lQjR~tOfNzwW&xY4xec2~pbqt%c7=WT;u zE&mugxZ0aLNFVtNe>V#>!_e2cbsC-)w}S zANOlgU=Uv&rz@6_gS>nE>Y>O>|Ff|vP)+gs!-L3!T9=x;Ofrw6HG&mONU$KH z4z%H;gWwAu%DfhPbL^E|5eF9*&HPyvfFvc85z(M&D4jVd9F0(vf$Uw{)N}(uJx%nn z@w!c5(M;3WtG8G=ZvHmEpZ5ZezZ^e>+l8~}M;CrCz>%WW;%Oxh2gtx9fHMnKx7=oCNgap^AMZ##NG{ zEe>hra_c|+_KtrJn~4ilekd3(*&~$kQe4xko?$-!@cf5>kNWuUrBvDROne^N6bADz zCulWXzJ!7z&V$5o8^{nuQg3;defYr#)BX27kQP{IfwagXi{|rx{fh;2M1PzXdNNb_ z39ftOIKo#Y+(nv(s{@HF;6#MJqu(+rjul_WR$Tn~pf*2t4BH zy0rP`cuDqq@rb8z{P+7{{vfpR8Ym@DYv@vq#dm*UJ?i+^*Z?r*ZA?~?<63^Tm^*y_ zmu>?7nCD-itZkUwZ#(}@YC_0BZKIsB|K)lgOy?>GW@{fgi!cbICd?17#g(V z7+J6mMQYrp1!JIts<5uPP z2hP5K2!?j7|JCu3lBsLN$&vWh&igNmIiG)dQ%esI~#4ztA74uJWluh-ydO7w;?j8NUc)R@z0Mb`;TSAl`~;r+b|hMq1i}V#+`Yv zWNl9CiHYyd)I;^yraig;vJS2A{SKD?>*x$NAVK&JL5o@gbyII;>?7Uof-9)A6RJ2Z1rBr(P&h0eA;-y${MS5W=YYZQub?jB3@V2Lv@NwP zKKvOgJ99K{KNx>^W9d#Af5hF02;L-Bs{r15e5WV={uo(*X3=xmV{@Rq?#CBrSpR)A{d ze@=gY*c}v&`@$3=gv?}5C06sKwisprHz@YwJ1B4m#n#iNrCoO0%_dCt=u#L7N?H7{ zaC*F!z2d4R+H?Ypm=B^9Y?o%voP~?l_ow^ry*EAd&_ih^9M3cN+-aeO7D|gPx>#Ct z(M8}9(}Wb6rQ)hB;bxOW2}y&(^iTRlQ6X6*jpC|p;}>o~6nkjuZ&gZJ#>ZbaM2J>g zwbWS=0F0ACP-GbjmHluiUR^#_ZQfAHtF=h`;fEhd#~yo3nhQt6_V3<4%|HMA=EiYC zyB+8FH^2E!y7g~=OKYyVW_sT9p9k!C-#wRVubC=$T|vNosZuJmh(z!pz6Y`IyyH%+ z9~c5VH!kYWJ4XSrEGM@$~V_kxIZq^db&f zN8(EW;~queU-10&+Sk6;jRfeo*`>tB7N!uB z1NZFJZ?(k`u5^wrvOqME3gbkqgGnK&XW>kF+`0};!K(Pg2F1DOq$BYTitDbuHf@GS zJngy1o^5UgTZrtY&lm5-z51=TAk4Jd7b+7CzOO7!CKEEzao6~xnE;ij&Y)}NW-4rP zRY2xP9OL>#@EB4E2%7+s*H&gGDd&ntTEU=PQ}v?MZ?(lpfFdAKg-u9d>IbnT6I*#w z+|t6rhCzT3)1A43HH%@JWXd2qV5D~2e3=4F|MSNpSS90zF<~PqOd(`AvGtVeai{(l zgW0%pbp{nI2(6nUD;^s{YQVUnSegAHU+ zhEM+h7$UR5pb=92_Va+Gqd^-V0U6?#h1g(?X$-1~;;Su&J$#X8qN+h@&TXU!@MEB0 zM5qNqq~fa0?kk`ODnVP8a-ocN?1E1@P~_Eh(qb_tmCWPT&*3SJiZZMh%H(W{-ya2%eTY}T)s_$i07AjqFAh3VEEfnzk^)^VF~V%e%(YWOd&G7&sQ&6-s-p7 zf|z8QGli8aO%Z2$YexpNrELgXmbo&r14`im(er)uPN zEWYUkFeeQIN%^Mj*%fsmrV4F3eSYY;Qy#@|Hm(|VVL<4(EwbXVA*i+jV(Rf1ux+b* zP&sB96)DxWrV1O*KX3tTUH^at+dyVdC6e<6;Kbk&PcAN~kU*9-(J5n@>Lgx$U5E{e z1NPfL-G1lown4GmZo9jAs2$xVEt$n>;>)ncu+h-Onz~w(<#AVTvy3B`T@lU4HQjgr z1L->amFtBUo}Vtb;NtYxTW(G>rayuUt_!3UpT1&x;fr3JUiQ+Lq~(`iJ}ta3H!92$ zt$kE5NG2iAvtU02@PkwMWSHIz=_@W(4^*aN11l)7az4%Mkxzl1x zES}~;tJEB;STV9>6~P$x7XX8^pJUoO+9Tim=R!7h0CGR^fd?K)ci(wudfJLB=Hc<2 zV;1t+$VTU1NVbaIrF-wYH*LDfrZ`}_^y44@BrUwiBG{}b=|rP+3+DIWFMTOpdg-NU zr+2?QZ2~)$e*EUIQA#|a+14DK_FEQh1fh!KKYjZ2^lNO4eE)+{#g zIm)F`r*lyrlV1PbYN{NnFQZV^^G~*}kkZ$9(a6=TQH-g_w+pALt8=4}f9*f*=nI-W zAKpE;k)^j&_n)5b#d+pOr=F6&a`e$@(@i%?ANauDkk++ieo)o^Q^JyS&>GF7;JVyp z$@D8+yg1wbjX!oi)&SK9p&xmwpUG0Li9$7J@&ch>~4nOLMblo+$ zgTmfHp_gPk&g(j6bQU%B)8D1NU?Z&7uZBYNAEDKkqp8E3Wbt_XXivKOXf@TEazFpZ z)jfuu(0{H`#=+-l8%>R6ftRX@IvZpCOUHHPb1qgem*JO~pVI4aaq z=6-G_qX5-d{~>G6zlKJoK8}&n zssCu9GE#^O1wm~}FbU=E ziJ-1graJ#1=wSZaT-~K)Z22$${y+ju+q4t%cT2C`SIx{io#PjT5rvb9qM+rfW3arc z0kv^}Bo=w_HlOP{xkr2KD>KwBTH}dnkb(?CE9F=x|Yljs@eYj2xCke7GDz}%*kx}FBlEY z7m2*8EPmk#pc2J>ntKo8V$_W+@a@Bg!5k(H){e+{o{*j7XtQ3$FOFZ{QE;V#wTsx(9K2~8?|QqO^wPCwUwN$ zborMsSd;KXFq>JB7egxUY=3|B{Z`)nApYq5do)MK`PaDsGX4Hw-gyC4@3e2Z=&@)4 zVSE;A%%s{mReAd4j6AyIPxfqCJfKNY4f-!dj*^d5H$`n~9TS=dXzh|7iN(+n@*m z3kkwJx}b;=QgdapsIY`$<(<3e;=Dm|`(0_;He06McHPZD^;VGKqd@EvodcZx$1s!P zM~r7d2@5A`sHMhpaP=K`-I-4R>FMd@ADxViiCfa*ODvg|T5>5owq!wE)V&9fsJbmZ zfDMdgpSoPye*1T&b=O-zEim7FS_gF3i=von4Kh+;3IS%ac>Z|UTde~7z17E5o!BSjWoXkm9^b>GN&8w-w=1+s|6CN5pgn_2Z;6$_4qpJF6> zc5$Qf=V#iDp!y7}f?(w7cCI9+!6<>}o!VS{3$P4awJ412+nr=eV{@;rdcKLle0 zr`?AuO`rZSHgg_IODw)b+GNv> z(?*+YlAgBw3cjfjhK3)!Y3{KIjMmEUf8fD%F8-4Hth3Had*Bf>OE0~2#t_S*MMXN> z=7WuMNj=AaqMx5}M*8Y8$ENGAzYZHJOX7}?`EmEe+<3fJN;76oPmeq@1DhcaSd8s= z*gkEr{_E0wxRa&Lh;dldA!uzNHsn@R;`Oo%&7R*m^eOS{rJx>Ev$;_xKOQ+A`XO`2Pa0d zQt>ee2%GkIwVag^7#4c-e`2!lpWLuB9C zsKq2SXBvQg8HtG2ZDi+MZ*_r+kgi5hd}??y=s(V^MvxERpg7`4yo2I8yn`a&K{1At z@BS)P792}qOjbh_cU1%m1JNE*Ra*?2^%ur2%v-dY6kneU%-u&TTGeR%zBPO?kq@^k zV){mh>@UWUpN>CyuQ}8#q?_DU?{z4G42F?Y43kC0Ai%h?NQ(m+=3}H)nn#LQbQDb* ze@E52JnE&%-t}#maGq!t#aR(U(#D6zKK~*@W}GXknRz>(LUKQx?s5+mm7c9qn~Y zDALmEJQn8zIGrSlNg>QIaxs1xu9!j@_pq)SeV+;5Vd9_XkXcTy3y9wOtEu|}UlzzB7EoMCnm4nm!3wq#V@k=?5?$%| z`=hLhj3AOjG1d4JQ%gL?sL?9# zAvw=O5Ez zF^VXSa)16?lNce=^x$PErG#aq|E};H5crAw{gGXx2^pz#WlS7Z5nLDk{mUMoA_dxZ z9B3z{`xvVo5pYJbQt?p_2>iyxk%=FG1*(b3CIzd7Qf5v*`)#oZDQitQHXzFJZ^t3k zIA7^g)zAR@Scr($?ys$bzK|lkjS)0O)J**S0cG8gX`>A}#exk;Kjf>7c;|ARF~*^) zaa>>h_jnl$<|E)hmaz-C+JCLToKrMK^6hucpsY!i-c(V*6|xWptDNHJjH<$MB?smg zRt0e7_~?1eJ@Iskzd{VrIffxxSTBcm4SRr47YCH(gQ)l`gw1_Gz`pQe8~ zeDQ8X$g@>sbn8KD)SwgaZpgV}dhA;>8|JXghk#fpzcEKGKb)(XPm&Ibfzw!{3ZRY8TiEL%3cG4^seD$+qT(d{t=7v3el<^0_M;aY|_~Gf~AD)~}Kl6+N zj9-UwVE^f79`v!Uw&Kx7-SF7>;13Hv9%(*o7~OZ@efFGpUhF^Zr$0@LF2c`9-F^lss)8fxF_q-NgV=-$M#s~HsOFm~kyK;4A zgqde<^dDg(${?s7euNto-%p1gc367DhHprpz*=INg}J%w?z{0a-K+52`rq3I3pdMN z_@Wo4kA3u`n9pa}#!igOap!uNg?~(F9;~-{!1U*`;W1jvEW1qFdFNfyzy9mL+E{Rn zaQv^j@=Dvp_`%61+2g7X`ocl!#jCDr7%Xrn^eThX3q%Q5<+zPn19Nwc`#ateMkuegMr&8_a|7 z_ug}lZBQJQ@1WRw@4f9jV7BNx{&(*s4W7k#4#d2Gu=5;}mp|%6)3WeJz&5YNf++x^ z+d>sR$h(avMa7s*h|!}>87f^I(fO*VCgkAaE}hGFP#k$=x(1JU+5(%Wd+fPq&OwNp z(gasI)JXqJ5+HiUeeZT8^e{B4BQ$Uky;}fcS@h+ktWlGq3jX`?jSqTZV+K(zm$h}R zj%Z;g<=}dua%z<=ZmWIFq^JYY_XwX5@z_%vz`1)hpiz}o_e)FLsWWin;G%?6e6{Y2 z^8|E0Ihb9~m@JFUDi}Y!dLW0Oc6nbbUZF~Bexc3Btlk>2#DR{-6bj@wqVkM^d)d;tdATU!yJx5#EXmE*esLKTm<{k3Z3& zlfqLRY4TQz(6cM7^b=a*)PK$kW9qySO`^X^v?pdBLF$Yiy$oshmFw)-FRBJCol~QKHB&U5mNo9H#=SAHh2$Y>rp-oP?$mt)}Lvry7-Nv=KNC-A{-gaej0G?{9t=@zoG7f)n$Y@srP2>Jvl}s{h=nNSVuC^C#v?8a*~r28I;$Q{-+(+o z6h)famInu?TEA}H))DXPn-Z4%yD0)((Zr}|gaTvpP;8ti@@zk;$!LpBjLpYL^2NPj zG+VR?{WnnX7>yh{sD0!!5RHW%Nj|!oM7wj>Mocq!y$pzB5AbFS5ODC zc#K^zz1%7`lWO`RUciA@?+3+Uc)&#ir=ujeDM;neM;jkIn~kHtMqLylycHK?M_T&F z{QCnZI{GLbiq4(ZA*)cpg7hy9K?ZQWTv+AN-$i)D)BXqSm+rzlD7M^cT8fW&Qj~&+ z3r(sYCc$SR3lYtbM;_Xt8aN5nx+yt!>_Q}!eee#o^bVsNk!K0w=!(S8s<+3aA=%AZmIahir?zVW=vsO+| zdCF7DyAU{HcinYYy7kuE@c5yH(=tmhgZFdXW*2pDz3uk2)KW{~@jlPSqj6TiMbmn5 zeimNn{2=}+{l=Saut)1W_~0XHDLlUEnX9Z~cX}+i;QSbYxzf!y-JDMU*-z4sPX1AP z-t(TH-n`|T({d{;hrhsI9)FepV7lq1Td)bUC^jvYOjllYWxDyso6@Q;Sv9Qy{pEkT z+%8f-?|IL~SYQJIW24=*TX9#$oyhwsOD|>pnH%H7!VBb&U<2p2+ip$Q-*5wRd>H*+ zHa!y;)A=3(9&CK<(q)%jmcD%GA?b?0T!k0L9*zs&^QM16KNraFS$G)l`?&3n+tbxo zU1fKx%sbzL@Uc>Q`qQ4C7Dc}EVza=U*_`lQ6Vovtx7ghe_uhLS#&N0i!WX|7*QDo3 z)3)9c?-6+jyAAJ+xIHbi%rdw*@0(ZdiJtKv#_L)<#^oX0A+gdkR94%no0fm-^2m85%+oSye!S0un^K&&|NG;4>BR4! zm~OoB=JbKRKaiHge6O_9N|>XCEN?#kiI2Rx;>xS=*q*!7yz?)ZR$1j4HvWq*xrB|Y z#-E#Xyo2JY*r3>D=bh6FutA}|u@CGe$LWY8j!r*1j;ch2wEVYT!rc57kHonO8#edY%a@#w`sURunx z%Qe07GoFdNqn>K(k>@#`f4GUkO_v*SoZN`@%{xSv#Urv-#v`m2!`(Uakq5H{En2{@s)w#F}0LkLOx(r4=z&@Qu4jc-;6Zz~&4#Q|`kB{u_b47VD1dX2~U& z#Buvvl_Ij`P^{*I$oKi23Z8nt{7v zu0os}Z@9^D&%l~k>FF!sh4KsA=fOR;L2+6-`sgFmCfL~BYmdEvy*@q2V*ogRXODX9 z=V+dXZpN`U7dDVqSZ;ZnqwoQPEI3nK<+3wT?p}}u{fGs-lZ)F3(@Gi5SBC|_@xo!8 zv-tF+e6n}2T9=A*&hO7jM<0bdC@{xw+WgIFk3HUNGbKymiZ7h6I&R(;-yuwtRYOod zwiL;)+DQ`1buOuPw1NhmXkMgSoE5@wwQ{>WNuC2l`aE}eYg;p}UPy5yiMRe6X%muN zw?3-nP;L@blm5y}7T${U4ER}!EqyTgbE8aZ#MS7`T77qE<+bkA%Bmvz5=YXfAJM3l z${ zZzp-9RyuF3MpYiW)jBeKJO9CjCPFTY?Lbg8HGY{y%SdVY90$0Mlm#vGU)&eM4a&gu z!(wMd?u|OtC?!@r4$KmJ;p)TZqnLd?rr#gRLos73 zomw9`*7028TGr5@6aS3gAD+X=`M0EAh}c19jDNnir7#Jqdd_SyEhj6s`SY)=P;-*q zPqK>gN;@T9t;cFPkLy8vEin)85$bZV46P=4b9wNKW2}6hL)`b|JZq24??2@{P!@IH z%HU?vAAi@lKkCdI{K(ZLPmEsT<`Jl;hw4v}Yn*oet6qaAzq7OG&p+@b+3c(EiZT#L z5sa0GyOYmh)}E)&0^IjoWyT2EmzaUAJuVhBcxT3g{Wwws=Pl@%_*kB z&>?3o!o}77KDj?GlHv}Et+qCCgvOxy4HuJpFIz8!yczi13uXvW~9o-VuW z^0fMDtK%Zw0(QXhFUR>z{zDHr6c^cVvz&O(1pf+r8C>-Iw|8!rHhBF8cCnK4dD2Ng zOvm5??@M3yQoPgRW$C-$`7Zvt_8N2q{lOY~+KNw0@7r^)w03^sEaP5v)m7>9pZ|Qi z=;DiU$;6il<`FMaUeMmTut>G%uz#THwv^qa&H zM;wv<_kaI4z3uI9PjB903)n4&@tu{fyZ(lB(hp8fr=51HZM<-d`C{squKMD%!@GA* z&wkFoq+kE?*XaOkQt^U1@7$OV7tZ;`<{RJsIxY;ak{-ck!#BS1_4L0#JTcvYM}hHL zsjsuS(rT-%mfrD>ccj%*WGtb zpZUzE)BU)p{Ij2(kru&A&3z}&yW=zO<99PIBp>`GY*1YKm$b&3YoyyTcjuh_d&K74 z&BA*|7RF}P>(YDP`yTsC^;1thEq(DzUrg(-w_f`6r#@}RW?cWb;E_@9fB*Y&q4^=) zHT21}>Z&iZ!z>>PW%=;}{0S$Vm=5C|6dS!EeG;1xb{))CPCPEL;c?xy>0R6JV4Gi@ zuT@|25^OxoXBY9m{q1k36MygnyyW{<-Yj5a#tn&Q;?9}3zwNDQqm4GUlkrb}{Im4$ zpZzQ@u0LYybw2zBJTJ0;_TT>_z34?R!iL2C=?h=@eERuWXW;G;^azVoW3a{=Yo=Xx z*~#vb(8YB=;^~-Uj==@}WztT&?2=yid~Q&n*x>pI^AS%!KjWWnnQn8)&A+EC{gkxn8#hVYZ1a}1%v13AD~u;M&HnhOKcz$9 zn;U1|e;lC5XCZ7_z3nY;Nz=C7HZ8H(;x=9vUT|SL_PAql2h*SM0`Z5?TU_FVpM`KS z|8?uFpSFGLHrP0MmW>1N4mtmV^X<600As)nJ3E`f_gqMJYn&^0*>z`JyyqQ2Dg6a^ zaD5ba(L5cSO3zwlmGtXh{~BxKnzSR{owDt;ZShFD#q;?U+IeA}b=FB|pM7@v1MbM0 zfjgCWOe~2F)UBs&o!;`6ZSnZDh0@(P&QALgHzf|2mr?Lo?P{N&N=7eTy-SI_G;YSwK+B@^bU&BjofYb9%CqX zlU0|+u#FEu8R!a@k8Fgdeo?VLQo$3ll`v6R5*xE5)L>)%mvXB3y;voko*ZH1jXAI% znrcgzpO+AnjEg?|$@4nwe*t=gF#jGP>f0RjSz-t{j$gQ&WXTwa=TdkrNm@^8M;ewk z0OqkRo%JkDY%XbIC4Z5XP;J)9*PO+tF$;n>eq5@$Ocez7cP_bO!{q}fb2R>Rgny;a zjATqt?y``j8Tv*-hQ{P<{9%LtYyTBUEn}e+R)pH2P8x50{n6fr0j+h}lwx9h1!J50 zD3P&dNC`@y$4SGe|7L;y^ZTyB*=fdT`cD)~Gf*K^2;@N2K?c&NRI=QEpp?F8#N-{I zI{tDh`^*=%5CZlAy}4v|e*BwdHurd7wEptARHrdc#Wj7c;3fyNEatk9Q6V`b1Q;9i ze|En=%(D0^M0PlAe8g>l5ds_*Ugk>r?gcfHVLqBdKWEZsFHT_#F$|m~ju0+p16l37 z$H+XsHu!gXNIh5CH%{HgM5RX=P@8H=iakO9C2STNgrWaDKBcy-W3I9<<5C1QaHd)f zJ+V(Y-*Q+{ilY95^*c$KFY$5Qh%;d%dNkNv3e@-!Ka^WKNG3o3l>MeGXNhsZs9`{g zuOVUHy(&s$`<#wH>x9PCK&Y&3KmXi5)_{Sv+{#3T@V%&RDg8+Q)ky}QaQ=x_itVYl z-WA1L*R_b~38DP@t>5*rJq-~Xa72Z|#hx6e;x;}@pK{JE^Su*}(`I*#6%Y)hseU#e_d zj)($v@6CMGfM(sY>_5Ha$!(NiCdDrSvO%#cgl8Jnkq-TM?EMc5vV?V_u!y5nO@=CW zDQ6Jpzpt#ke3Z1~_YXQ|8GeCuIcnJ(6qTYISjwo+GIj4l{Dt(V_TLYWc)Bx9+iL5yJ1!vC;wWJiwcqiI2PJSIAHs#iKVR@? zyma{c>2LT;fBt3uwr_n)dd|N*C!Y%2IMrX5{`ki~rNa(A)c(qR*WGqaug9aEG!n!O zzB9p9>_0Es{oxOPNPE5SefHPryX?A4dgUu#X^&kw_xHa`U;Fwu)1CM$_kBLN4>l>@ zfQ!rXVuRu&yTHo})vMs*9A759?z-z?a_7Q@+0*f!hVP{3J^ux`sCZ;r0Gl8WRAkC$IBfxn!;GJWeC-%RHs{@Za8|IN7g#j*I#cfNy7il3&nR$nc> zaq}&3;ePqF!irB%zrserXFmNYdr2}MJM)6)JukiN(#uXV+nLj@xJqLeLzCA9|zZ|*p(N@3t-EY%>U{i_n z^j2IXUJsiHGjSo78zKLLJ7Mm}BfB2K#pj=&{xkf=J@4XZ=ifQpL4gg5FCP3QY)YJC zk0N^6YA?e}Y`4H2Aj_nSF1aLq|9dB-n{k)Mv}s%7CE7dV0^_x5ryX~)c{%})XyPV~ z3eQa^W8>+lBace!th=tw$BHYg&_q^TZq(5JFuQ|d6dtS z#5>>l&h(DAy(2A#I~sm);t$f-zILo_z-+zsHnzdSI}v{Tlb;}ulQ5Rcq#bwMKE3+Y zYvaY_cVGkV7wNdK{f~{so_NH}a?3B5R#|yv{6+o(HXq;p-goiHvNh87+ij0E`Apm` za%Vd3nB&rKfBS2!&2_Q<-h~UvE8F5f19M_`P$0&xyX>5vpC9q0d^slf<1U5Me};{b z&wVbv`W0)X|HAt=*#G}J=%DliY^v~%32s`hvBp~XOaAAjHP>7dYkhv)(Q-)o@oA@} z)z(-&P1|x>dhT?3LAl~=I~=6tFBK6tFq?ZE85 z+wSnU2JT{b5O+F#4I37}O0QjKz4V^lcf%&ginwFw8pQuX;!D0a#D?K!o4wKQG~r!d z%;_@RLA3d1o2NbZdM|!Z&zt^o`Crn$`+f+0xj*sFs&&?RZCY@?1=2;AU7C)>hTY}3 zE9he%`&e3kgAMEg{zpFYk#xzWm!z$>+$wGOhBx3&hb7ZhSTiU6;6%I>{dej0c%0Xs z*zDrw-=C4gfd_sbca;3q?xNY^&2Pq?O^c^1ueb`n4^0UjO`)$liy6V#MfW`9J+%6@22r(cX`-d` zmbJWRohgJeHNGq1k2|av^^3P_Lfx7aMPDr5+u`_|JoJT*H0o^CfB6{?B=4?`b1S=w z@UsAsIvOhqmk3TF2(ao054op8`podtBKI6mVN};M7d*Yp>(_8=x~-HqsAy?^!+a>#CED;*+O6uaxkVlHzQKUy$l zTYs&o^(3aH`G~fy2|4ymvpiM*H3G27i{E?&hk?v~eTb$${-Sp3_?|0#_5)`{|CpS8 z|K*Xb&kZy1(a@1VU9TX1KNeHs1lXxN!!1o0n|FFv_ZFO`T1=srJ_2 z%4ScNmc_4qj5!1Y1kq7f^JpSF6HEfx#)Cp*dHi#(8i!yOc8z6 zl?)ETseXSbuXZ%#qmIBJ+(6(-v@1EE4}tY@XromZwa4Cr7{B(W;;ox8We=XkWZJWg z&dY6l;-W*(uA+pwWD|N6&^q;xXE{ zB8nq8`S^n#6RNdcqxC&iK1Vjc^v?1`{QmGv-5={oyvsdu9bM(nTE%Z2{m z7R=i?3T|R@9i-XI*!auOcp!OqZ9KQit|Ih|A8Bagd1*c}XY$zpXfYV*EYRnTXHw@Q z#+RQeBRU_k5dI^7e|VnCNL*ea+@Qc;_glfUz@1<{AYEW}&>0YacI@&-;f{moq6;tV zrcImHt+)RA9sXjx1R0MnLuKXs%wBFD-*q#l&*;uR`F`~Y~FqR;~($vSJ37k8x-B)-JDfEnF!#~#<=0%>>Pfd_W?+;z9%nLX}0=+;_mt!|@@H|mZ! z;;0TUuI{GKVC-2P9v{_h^oEVPwbop-!(AnLDcHyxefVKV!smwFamRhF!y~FXjywK} zzQYTUyU%^@bKRYH-evjVS^6CwhtuKCgbsHfbYK4RA?E*Yx8B;}f@b&XSHBAVeYl%3 z!$%m8S;F{l-EF+dM%~%J`(20keHeZQ#t@IC>F}4|9quyd4nORO4v*JCA8+jrKm4%n zb%=Z57rp@ebi;Exm^XF!i~VlRSG=;rBe%+Y<_>}PxWwa;H{Az6^ucbm)mHC5`q7Vc zmtS@{#vAJz%7>808K?iO!)Akx`@Q$v*Wp5W_u0>WrhCn6Ueo>Wi6@r03i9E7KV*G8 z>&&w{Jn~DCq%JKsk-Beu;~N-@*K`Mb>QhDT201^II{mf(`Fi)7SFhcDV!uyxx7~Id zG&4JFgmiCv+uOU7et2?+M_eHnF3DNl?YG_99r&LIcG$$ix;PqGuEkm1pa1-S-8Nfq z(`~cuHXZIbDC3X2J-U7N`5@Ngo4PBnxDxZq@x*(y=%L7b#LBs%kFo-Jv-+) zt2_PlpLTD1>sz|L_S&nv=)#M$yo{_7cpO@XyGhW;S9iD&UdG~}gAVFmv-WGc_1Ak{ zciy>wuyx1dghoI83H`p+j>U6+|9i~Ebo0fvaNV`ncmMf?1G_h1jUIXA;gHX?vB2F= z-8$>6i}Cy>`ZNQ1`*DPKo|rGj$NM~>o8BFM#1VGPeIDx-kK)35on>p5c7OQ8dEI;8 z``&Jc?YHmF!*TZr*7t9I{p)V+wb$-G^x=ImErc~c=+D;j<-I1@Pi-hxb8T&{5KuPA09K;ZLq-x$m6vg9uo#%vv5ASwtL4r z-qx+Q`WoGTe*W_vHsx^Ku^+Rr{%`NTfcaa0{q?Z^j6s`nJi5;=e)AeE~^Xa37J?rgFs##WFs}yiT3uKbAFF=$d29mI3FH-@Rv+j z%?Zv15>jF!mj}`vTCRDpvT0VhuSV7|iWJh9o@<+4oOd$PHG*N3tJ;YMO0IU|`YQlWJF|8E zAuyRM2DJ-v{$ThyMot51bgrU|qNZfe*A87|@j^m4g(W2>8z` zMKX=aTrsF2a2b(gGG~pvkfF}J<*H$dqM8T&fOLV?Syv?0LG@`AI-jwntwhBZe`T<$ z9Z;@zoze1XIxr*g-HN&r#)-lgBA8Q_Wp{i zVB-0&I3__|BQTkZs-l`twf=%tHO(=yojQ_3TILN_x%B!(4F0(^8w5KiLmhN(NCEzt zJSyQavI=i)%|RHG2rr>~Zfjw(pAsb4m@4Pgz+Dk)eClL?GLqZeA@5eK8;4*Nw~^Be z*JcPcfuuQM_4Ch79j>h0k=0I`g;|cqcaJ4EcQ%@i%>7d=J68;}PWl=%S~~N|K4w89 zr*gdieONW8M!qV__0C&eRgSL*5QBySU?1CRjcemmgfS0+oODM)oIOEO95^N`%ukIk zomxkcE9Mnkh0MjExi$j|oVT_*2=a^(jBK;p)PJIBa;PWn}AtfVvQ4R|IJBpL(V0kHd3R7oU4NnAX~aw zPaatM&lq_$ZI)qocS30#lL-QNG)|ry7a>eGk>tXQutBlaG`pyE*kQP^$P(NdBR=^%*uUMlTOB_#hW|4n7KRWoO3cB4aW9{8*b|M-~RwyV19#bI#7Afz4zcE;@aISU-hc) zx4-?ZUHq02Hz^)|=z$K;uSaYyoUZ%Y*S^-h9v7%>g95hR|5@D`XK;gJO@ml7cinv#E<9e-ee`1=?cTK6o4W6x z@IAYTYFL=?f-lG7l1nbNi?NK)3+MPt{O(og123#HD7~L{+G*X!8*kjtwy{!B2NA~TeZ9T2~@lSs2c;E=1iHpz|;G!qzf6X=5 zzy`(rS zUWLt?PuT`VZ*3JQiCNvrSTAe8cJ1zcSfjW@$Wnapp$BbUaI>fUSSK16nYnSa_S&!R z@VGC-G8OiP8*%)HJ4Z~eFnGs8clX_Q=ZoELLL0n~pxX@_cN^fs>RD(0+eHGi|*VYy8@eVxZ9z7|NGvDxqc-!3g2v-Xc~|I`mclR z;`@g_^q~&#A;`hJ05w6%zB%m6U&ck;4Z1@P`Lg+w4Yf0n%PFUv(!KhXufjOI-*UlY z(Yj4G*#v9-b=?g&-r!Dhr+5*b7xk~W@-NuvdH~1BgWU%{@BzD!e(I^G+Qx(%c#U^m zbYDICE8S+BZPtDBo8Pdtk2@{8*RHb;Hg7iXF24Ap+%*=_}F}vxeo7mdC<(6BTe0u4HeA&O;$F>WO$;ZFh z7kwdex4{Qd8#r@4eHc{$@>WU@$_O@Yr@WBU%JbKQ0EP+*& zN1zH62(55X=J@g4?M_N!Zdd#iBIG64L8lldQoM}Q_><*82qW{%j|5O4Z-pmTFTZJ(_bIUfxR0M&bH6KA-T#JxStrl~ML0#U;3K0+g` zwV6>PU33>^MzfMI#}FIsfGJ`l$QMZ&|B`W51w>Z$BxTJ|{%GRUUHHCh=FvB3v1(}& zY4Q@7JojmUah${@z9E=66` zg($vg5TpaHIo+A}inc#?X&h7?y_P&HVIHcRimz0KR2--RAc23zkn>^NKti90gb!2K%IrOz(tE?Xff)F2>_i_S+xtpukJAr{US?Juj@@NlWt`(FdTPt`AsnR%l!2H6HiRIxCj-z;eHuD$8g~V7sTU6_DJ}PaX7EH ztI8roSFodeH# z*0a;9cqGgsh|3q#@xh#YzAj%3{Ks?8!%LM{N;_dZaFrDAZa6$W@ZkMvuf5)vurUQc zKH?mKyY9Ht-s`}aKgVz`Fx>!L#tUzzf9-7Q~fTU6GDF z@<=@Y|BCd!z4pL+I@U|K<3;BC?7a{Ae`{0O)7#S<@tB~c z@E9E3rd53A*tlZ`k0Uz){WvsjxG`Rm{qaxaS7Xxu(H=4M{=MFB zk0s(d$3{}R5bq?xh6wVy6)!=5D9yqPkuSsh3a-XG6Zlg4y+81N%j06ad*Rcc{tO<5 z+odo5*THFp6_!T~c*i&~#_f2I#UKCgkLhYWlIWhh?n!)y1Yi2h7yRNbAv~(<<9LkM z`j#i(w{py}$E0iVa&A6Ci;oM#)5**oF4p7a&oga3z8LR{*x_9};9UbRz$4H2()ol( zYJD+%|NAGVx4-qR_8x;J@#vu3)r8HOblQ(kvllIM-OoJ_A9tYArCss9r2EdWz$J`uZ6$3z259LgYRL1Ip^o58*faX`t+x5tk!=ZI<@JJ79P$9)9Ez>8Gdt7%#1VYI?;hUWrGdy)3=pxzA54 z;RWjK6OSL}&vj|NwCIJGB?)Z}T^9mTtkL#`fL!!+2lI!)d?$_D3Esg@wna zsqD+bf$WmYF0uFG+=O?ddcKq$U0=-Nm_0I!NcsH+9-(&l;YXy6Hhx3;2wvKLKOR4K%BiQ~{EL@lZ~8{8w-4aaTOA%} zbai^?zio#XWv^(D7F!UHpkmqoQ~Rgi{OWgUf4rk*Exh+-Uc8S+^G|)D3qdJM5E$t^ zXIp*OXmCy_lZDQV^wO5112S-;veZaCGHMtq>YN_i2536!GQF$DkUDicus} zu8bUI`t;PfS-!)_WYVozvy6|w>r@L<&htZ27(q*+E zrK0?*mKq-!jSnS7UkFLrj`W{m6!{Mxim&LO*y`G;$_HH((g;>ALL{63Iq&{30#nxb z;kJ3XV9Qc-bY9UsKS!5bWh`jP8VU(}QEI7Wsk~F_GdXH)omY@aqm?_TXz6lxzy}I2pR4SqX*@^_Rp>mAg)dn{Vh&xy zqk{SB^COj1wzdeN4^3RT0RCR!p_lSP5GD zhB1dQe*a<5YX&tVw}~r4eaY=$jcG=pSKwk2ykY(6aTJ8H)c#jPov~|1j;*z0t3?PK zg=P>0Mmo>YX6zbY0>D|Xy-(MA#8Oc`T;4x$E z2;rzHbRmSVf_DC^+{d8uqWY~oDw_0D#cxIr26Hn1?zgc-|1=BKvea=RXEs$uQ7-Py zm;&<0wrK9%6Mgv6Ad4E)T}jTPjb|`{Pg4*!;`7bR+bGnRBe%HqwxUa83t-ke{!8BNdAFE zQnoC}z;q5^&7%B+hvF;xC$_q-K4fna+q00;g6Z0yyoqD_)XqeS}Yb(PdF~q=VBE%80@dx!n{T?wHUoHZoiF&sBX8_wws+or7oNj^S~~O0Gwedw+;h*J7MOp* zv@G5Ou)*usx98h0v&^!V`q!~R@eOQHyncf>;6=MTEj>!czXHjFHZ;K zS^sm}Rb?cJem#Ka?BlUJcp>pDdjU0{lW+ZCCqVG!zsDbcylpaVi3|BxU5yQjqmQxv zy=(jJ(raJ4P97vRFtQVf&of^@%?&DEVEyGUf05>%cRt(y_#!U6{}As?m^pJgUOK)Y zPV_j{qi^)W`1tE{yFkttioXjRA=skCv-1zNi=f;nc=4()VuoIDB;0%deQCoDH?WK3 zzrcpfB8x5zKRC5$UD2s+VkNwg`38IE!R1$6ZgY>vY9TBDoPRV|;Ueo-jy^i^<;S>V z!d^Q4ATG$Bhz+qXed!Rq^WeRBf%jX{m$`9acAsqNWyz(|!q_n2%XqKB=D}85ZJj=__xs>!u5{6b=VOE7vli!z z2Opf4#|8z%;Za?9x5y9Dcfa>NYSTubS-S@uk+g-Po z{q}n$Hjds{ZhBzpN-p9Ur5%kipO;>GG4?INCiA#a!x&|wcA`tZsWy^va^9YS8+BY; z^Ki50`s=QjJu!au>tABelE27a4=Z64c#e_#mF%?hPPoyxG9AhOH@pdW>Z!kw3-owp z5@ai4Pm}erQT@~87~N>Yjj-wTIpq^ad;(!DFB@M!bjvMmpp0W7js>i7Y@q(tuYL_1 zl;bAl3tlMuPP_&CI;=2##qzedzfCqb_q>R6R*^6r*33W6EJqyqu`=h5J7weRN8(0^ z6b#!}<?zU3vjxyH@q-- z!d;*7jPlmEyiGQ1=lSgOxIxTw+Z*5XMp3^58`;0>@b{G8o%ZX3S8J86x7w=UHE`v1 z*e7JejW#H+f5X3(%{Je>9+zT;D%L-T!*d4(=G+=pg}^pJ)IRDoe&L}n7?}U~>S;Qm zN*&<(*F?GaM1S~Sbx=#Pf)VaSfjx+%1ZeJ$%9&@Lg`1b3#c_Fc!4qfL)r0F`D$ub*fHR!$pcJu4-aI7Meth3jvY?KBYYwto)^W^{i1D- zQ3& z3Vc~RjT}Mgg3^|H0Cbi9F$0OMq0k9}#;K(7ii*XqwCVYQA~LK$vkAx`OusXJNsZ~a zNYBhR2%3o!#ss~*{Pn!;4%nhtE}UBv0glWE+6l(*K4Sjp+rbRN!ZE%xO-TSK!Vyju zOE!@FD8no&zG~Mx6pYoI4`N%kYT(L)t$gkz8$t0CGCBbtg!!??RVSVgbrAWYEru76 zK{zNFcD(xEA640OyrC_N<2hDp>v3buwm~3d5W_&|Eao4xBx{0l(P}Hh3TC%2LszEe z2%5hB+>MA)Ned@_(xEE;$)|wXgdZnV!FvOrr4&KQ^9eNnJ2XP0N{kJkE(#eC5M!H7`xYYpa7`1`|83xS&$>Xv>W zP1kCwu2IYvOE*sGJCiMSpqz(fVr@|Ke~dqtjo46%!n~-W_MPswDmC3IobulvFtw=w zfsR1=VBy4}-Cry!V~E_9wDogl^Y;hU4K{z*aLd*YxFUc-&!1U%h|)ElgIL$O&h{br z0ZKU?asA1XVApYJBnrlGzJoSx(}|pkIVzggb2K{X{HMc%64s0?8;4>n3}8{~UjW?5 z+O`=uXPN>6BgGQu2JP0`R6S??`JgJ4>LOB5!19xr>L_^2Z4)}YJpL(hx zqe_01Yt{jxwt?m6u-^PbZxVlh;G9eZ6bF({t=zA-RYDv5L)2B4qHh#c=K@?LI~>27 zoplSJpx6^nP#h%57XVh00jUgBK3auceRO~s22B|U&Nz5gNICJu@5nEX@v54#@x~j= zCfU!$1w>v8Je` zaW01ob9BlFM#=<|M?$y`rV$F-^MTbxrsGiO@j-jH9jTV($C!sgX{g5uZu1O*&?`qQ5V?uqzy{`R=B@bt1a@+u}Ww>rl> zNLOBU)v_)w!e5Me;>BA&iGkO0m2DpRNJP&Opg`|7>6i2(Dl}T3_bOz5^S2 z--fkyTe%!Jgw8nYENs;N+p^}2HSrYB!R4u#lT}t;Rq$@VV-Akf+2yv|W?>WgtIIiO zon3y9o0#)(^YmF*U)bLPo7-QB8-$;f8zS8JdaJD-u49#e3vRB>$CC}8D&PFt@#S@| zI}kUT_LAcbuf-~>uCi+R_qV(mPo+HE(q={ye_PGOy8ZMgKUHqJ^%i+5f}7082PIW; z{B!f>uYCD`%1d7M59(e@L7YAVOpDE`U9Yib; zvGoyoE6heBsn+iu`CEw?RI5(cV5&S0^e#5WE{hV4e77-6ZzyHa?;O!nW~v1R)fs1{ zCn&H<_Mvz>M@sh#h1rT7*B;kn!po%UkW;BFR|$?F{kD@tNuTFJPC%3XNq+u{C3R^I zfPf7`(@Q~<#43;cCl&+MZjFN+J$H*Us+bO`^m!gQjsVxeqb^Hgq|bsZa*Z58Senpv z8ax51mFT!)bSSrkcl}cYJ)bf)bpQ%$soYA@IL?_+6-o9XjP!ji%PRD^X~`P8Hq`<6 zc>&cJ1fEuT60*-sH|UDk7uT`U3$^;6uLM%rV0b2zhUX5u8Is()MzmSxbdE%$bxCa{ zMogwG|1e1Tm*b!MIe#8~ZHEF$Sr&)gP0q)WAKfxwSZ z^U~2!FI{+5O6RLM|0hp^tq~+{2j9GkLe^?&`TWS~5fs%?Q`F75S=~5FT|n`YxJj-ex{(1$(-SL0 z>RA77$hLm(si7wjA#=WxEW;29qI`I3U{oR(7$OI4>7azgvqZWhB1=m3S}B)#E0W&v zml*!{cL&0|-gX?;?yv)QuhT#smKaDvtqE2g6S;=A-h)bzDgXT>E0(K=%>V=Aub;n$ z34K3hRMYGaI2cQorNM=75~5QTRhGv*5Ifar$it#*ssLS>JfD!I4;-=Yfq)SM1TDv3 ztK>azvtQEWv#6`+DN~HpGLb5iT4h2G9)CXWdK@NWBa~#@Zx6ooiAQ zrj$}aN*#q!!3e^RZ}gjUT$QUrf1*8tr0H&>t+Moa9!w>qmgNxw*eAN1I}^Ev$Kg(e z-_h~a;NMvqco_C`=jaRtbdwtmFP10mGoaHjbDd^9(9cCYWgo)G$IF0!0Jw>$3t*)j zzj7+2vYIu)Au}hz!O71w`&JyWFLnt>t@OCRKR6#O^~ZH?>n((R2V&zI-U_o*sVt)C zNVO|E4&p8U=JBfvi~aXU%`r2f6Vximx0oKEmZWCh_1hjDnV2k znQiY+*d+S}+$0Jq@^dGJT!%`*TJYU34M#Ntmrh z2|T~i(1m=@{*gywL+2fL9(~|LACwJhMK<7{e$M~H`J;mmIe3H%a`-X-f)QT5f(^1C zH`;yo-DUG=#bvDk*Is+=2p6qK&&7M}Pr*8`4=aTo{G(P4?W}%ahHckJlrIZO~&!GhKC=X-}~P8jPR7i=osup!Ofy? zx%HOOd*Aoo(etqxIrs5s3V`Qelj4i8|HfZsd<(Hp!Og(OCfQHgR`xcaKK=9k^}FD` z{oIS;y6bHp0T;6E6kKQ@9rpUeMlaguMI$_kAyR*J-Te6rMweXnXY57tSZtL2=+T@z z=TvKXH=B*vnyat53L8Yf3HuW4H2V72zFu`nK?gTOMz6zM?}@!7@Twz|FT~yhcaGli z_IJvD8h{G%`-?<)h z;X8OwKKW$XVEdrg9)x`v=E|{(8*`&wuz5Q-p4L)h;2sy;pnCQlv#|%tE!a=vz|l@P z58)pG#H$Zws1Zc;CPxnx*B_zd>Z>?Y`g7Kunyiax&iyGbPM;In1gk~W0V`B z^LRf9`<9%4?)jsOur~|$!*~HUz5dm&ex;`F4=vA0Z+pvIWxts3|L_O0rmno|3fbfw zH-tvq*C!V@%{;&U4LUK`C2i8=*5J0J_W& zWLM_D>N@4ED5F#THxk9_vYLMCNM2GO*0nc8S*Q?I|HiAL011Tde1w`$NwddL5c;c{`a3y>BEXqk_XxGgszXPBY{N)89#mluBytgez5yQ zjYwpwz&cbzLzy0DW(VCd>dglgMJFpNojHz2Uc(2V`**&#V$67dUR!QXSTz z4Z_}{ENag8wkjG*2r4S6@Rj6Bb{Ul`6soXoA+?vxg%N!vqMK2SnPo%Jo*nTBFwgph}!B zMh3x2GS5F%8})R+3BM8M5PFs^PF4*XHuAr=?zur?Cn0$_!Va8P$JtpY=palhCQ)hCLQCs zYG2|;kl_D({_p8F4ApmQF-C_fJ_ePkk@7P~>82J))o=LT@j-aMJ0Id*{dZBieTQh; zj{j^UbY{@7%M5uC0pI0#-&;{cr{+#2S)PaM`r*LFu6*LdHlj*H3((QBpa)0LlIR}dHtKHHjs;++vHBw0@W8dhpp3U zQhWLMZ{WOBRlBCHVQp6tK82-x)~$y1&I-u5^#>ss=3$l-$1~c2>On>bIy#>e~bxBuu_&)R+T zh2xHu3v1mNxLNE)7hZ_XfB$W?^Ugbu@I--(<*vKt$i+Nv2z<`D=Z@yh#RkyoR(zj= zn*w|9y$@~*9EOdS{~)68|KR)BnEUyox4-S}vVm~K1Pfji<_(HncinZw&3w6`^dJBD z$I)@e9XG-&iF6}lGhlPY6_<}*@$#3C_Ic4hxQKqH_?8bS zW?o!;EjB)m0zUJPS5YC(y+@D72Gf83^W}0OTy?MkG&TUn>#s(5XZ+}BY$VJLk>_DU z>0^#P7Mm15bM)WX-1sioO92))B|iD*C&T7|(b31?Mh*nLLBR{%Pr}B~vPm|XDBRTg z*?T-keDlI39IL?UOR?efsi&SQ7n<>O$LQ)Sug1mE4`O5O-LM(-aT>ho^5($TzWO!U zEd4RqEd36-0FL>mFY{-OSX^_}HKRAZ@y(;hJ^l%!BaS#?H0#z`fyWE1XCj|_AU6EE z95<4H4my=2qq9d_KI-k`u>c!OeJ+@Lu0u){`pf{5C+`1Hk~?PJMO@~Kbb1_f@C zefi5@8r{L0XT~SMiQhhPv=?r8al>@Hx~gJ=;th!7jyZO;Z_SeEj1d8y)bf{YPh>brv>&zI*h64}4&>9mevxqmCM3PZVm0 zf8NMD7sv1KFt4*_&Bg}nb1?sBj86FG39&J`>l6Rti!UDSu;Y%nN%S&owtnsCyz|c+ zeeLUC8(ne56_`RKfpJ(cf5f`}w>P{|HlIEj$KVaPsfj0tM!PrN{^%;)oZ1`nx9^MgmGgn4n-j$X~06uB_v!?2pKG#K*Ob0U&Syk8u>?-cWo%+{OJ1@DPh^jwvW^D3 z{?CY*3K5yUH)xGC{HCY8Tqm)Ti9tQ@N*dX*F*4VH&o`E3H^s2iLo3 z)$_pd@51i_l4NIFqlsLm&*wm=w7kJT16;upPnGVn}A@Y7|6`2fYCQ?``3o?m8YJt#Q3L|&K z&=sq`b5z_>@-AABPausiP4Yofd5&+2?xfo$v685TFo9!R{uzTvY?@4~qJq)z+`eik zU1?bj_;(%CAN>0x>rl^2 z;wzaL)L~ctIbqiyEdSOtaW)9!^569jBeiR+ooROs=*?DusdcnPzu_hS4ZrK3$fA(6 zNP0J91NnC>w$Mv1wc{8GuEkXVRZGf-CsJXvf`4s{ZTjAz71H_0N1A zjhlZ{1YoF&E9VPZt=zGRMOCDUQJEGpWjX(OBw{R#1_6HXR0{7>)F&X0*Xk1?4i6+Ddv3Z46T3{>J;f?ZPAaqDVk9=M{ytuf za{^v#Grv6U@sBN!ecWTpCimYA`y#A_SGLS4xDbp@dVeLGT|eRxk0^&6dT7~XlTG9` zIVYTWA~wi90|^j+Y?8gH#LSTXidQ^Q_RHV?s{A|lHF(m~%R?Ue(DIFMe4}tP+btjR z5WH9ZW#s`6*g{@)#dr9>9eYr0z4cb*v)Jf1KbgaQ20npT|M1mA`|SN9ydvv`Wvw;V z!bZf`l@q^pV!;L2a_u#Iwb3gIehpskbD#T^v+$~$FC2HAydH>iyY05y%6@8uksi_|o#7@19hyyZYMlY`hwU8}r_cji7(@;fe+1PyU zyt&xt<6OKW|9{FQ*d+RHZ1(*Nyi$gHX*9*>9qrZEUX9Jd{|oy${I)#sfe*sw*3T|G z@3^D9;^(TXuf(f}zEys9%E@KNr)`hd>ioNG22Cs;K78HQtFdYKJ=l}t7HrJ@;ur5* z_T6{ivc{Tg4!R2_{N6M79&D2R?ebs${VD8u@`SSge*0m)Ov7ud=9W9K@ii`ZmzfvN zEEivRak=`sYswzG?NRnWU_W`a(eijD1#aY&_r33Z7~|boKL?ekJoTyAOJZL6!yo@B zdsCc;JY0iKp?BPVdwJLY+H0>>F1YZ*@=>gzn{U3g9E{hBJrQe_&!3%zR}j4guk?91 zUK#YCc*W9Y*mV8YTV|EhPXBE=`>b=YarU{g(fjLP|N8Q*-Jc~Jvilws>#etLc@176 z_E2mDEyZ&igYC1qp9a_G{@9fHd3!z=uley;q7}Rn=_>5Y@!xW+?e?r^mfbP#jqbZ4 zUTbzo`OR;CgMBi-A?t;&hkDYJo+PhG!ms>gGw01V+Z?Y$dL7pMmSrB+@-Kh&D|!7E zUsv@X*zB3d%xReC&wl1}g_~#Z`Mf>L_B(D@*169*Wj0>l^y}aJru-Pk(tS7DNM0MY z+Kg4o&wqAG!P5cdiI06eHc)?V*#gJWwB@Ja^-Z^7{=S9HpTAIc+F^(CI^^@5bIy@* zZMxZJWjE~4@G$HzGXwj9%*LkEKl}Ml%kkelUiOQ5&1(+CG0Ip@$Lqz8lvg%!Zx!57 zl083eyy1p&0*=A|`N~(y&O7W_-h%xg)|jydUPpFKc@s9eW_jm3c?@qo(bILJw*YJ6 z9J~hX;~)D7=6ZR&!t0edMxTP?ZZ+BL{lt?_EMNZOmt^0ax8ik8+{fhR8*eOc#;c@m zyW_U<&bPf&H_1-(%H!*pV~&y6q2VbIY^r^G`Th@nAg@B(eYa;}&lA4lNe%F2qg?vu z%kZkY1Il{X$eic4_u>^@PkPc5<^09-Pl9gfMv|3eWNjtlo=%UUM$Y!upx9-p4=YxM z`XplIsdlFxBgTy`>_KrqT>r z6cXt=i)?!~#&Ev32`IDjL!>zsRIhrKN-&ii5m%swowXg7XJVh$dws znd>x)je&Ngc4s+22z>mFSpz)Dj$^e4JB)qC9VdB8RV??FMx+y`9~l@KbK zv@4So1TxQ}&}!wL9F4vpvYqbybYl#W zT##SC9HCR?$WV(#JFK?`!Y)gFrAbmqq-sr2kI{8@OqVkf zRLe@eL{eS49p0qp19o-&C+ChEEIMGpr1yM{i$N_WtR2haNXjlEtFDCs#bmQcGJ`6x z4hejoLb5p1jieRyT#C4-lNd4SFm<-K!>q*3WHJIf7mXwo3BLU%*WYAnWm$ z_{nzCq<^e-VA%Ddre zO{$62T4kL<+SR))l$}L8_b$9F7zCXJ(=ocOI0F+0ql*-+_VVE~2~UG6g2sx4^IqYF z#j0hco{@Ea|9V2M=tNf=}hG1-5bIkUX`UGI_$)X#sxUgdS%Bzv$_+Yk@1 zL?-+s3^EAwpOOwkN?AJRCE;-|grH{~a|_|1!fYpl6OSsNFdr>(G}Yy^ATtXT!W zWR?qj&wA$WxB&SO0C6K^?yqo5!P5lTH{z7C@%`_Q3z6$Vwj3@3-dyndm-1v>Ox+Xv z0c^I#=4Bdee)OXsiT(q&;0=m{3NLEf@+{ng0>75WM$*5M{R4K|Wf$yma6>uvm}7Af z_1Ch$#(L{+P*%a_yK}HfF<#?>i=3;Kz4zItZ1~k`1ukfqya^VFR%0+5!1kHT~R$g(XGUu+l%7z&dh20tpAjzD-&r2| z(1*)~X68}$pui0ZpYOUlbktYjg7~Mg2L*nuFBhLzz)hw3xTwmFqGw^g|AGyY?}wXE zug3lqTR%cC2+M{-I9n`SI3F9GABzpmk1Nxst$;lPK3%qY*h8^iRuFLa>`b7Oa2 zg#Hxv0a+6pUT?O^rt(@iZjQ{G4%}dvH=j1W-zH@{Tmau5dp~S;|4ney25+PX$Q#bZBjPh;sNEEyY5;ZhrtC+;8%KC@LT>ip$s5`KgEx?N z!Fs&x(#x@F`q5?G_14D?idVxYZcs>)eS`=TWb*|OeV$}LgZ)GBIxXzmvOm_*^WWdDpCaH0Ob*id?P9E11VbGKaF=J>f!$TN4{4ST&{^K)S07yP*Re*!k% z{+6sit{=Q!sa%0QT6j_JB`<#|ZX)2O6V}1)$m!2;e1G@*-$S17DH~%Sj@59n{%)-I zzhG|)9!I=@j3=bzX4Ic$U4FHkhnqB;;JD`gE-T`gn2j4ImtJyVS$F*n3io(c5F)y!v)vL>caARdU#?1}g|LslKPXtMPCywE*AMtRc%~m$X*~`DN zxOc|MKl?d0Wj|i_YTE4no67pW1IOcKmt0yl-D0!yD(oTiFzkcDjl*x`4T?9vS@!q9 z>#K0H_HjBrR`UkM(Vsu2?6C)LM!ZRG)cxT5KPX2XeRO#?ZYpsP3chjBL*k83-pJq$ zoCn~>;|JdVfpUMWMcE*-L3WG89AJr5_(>Qf2;_&yC=^DR0dZYMDc> z7w`qw`)DUnU2Om2rml(VZNS+8OZ0`GghAHtTh8*9cA2hzof?p4R*!ZD+Q^&ps;|na zJol)rYj~Ta{>MaO2`Q>cKgb|rtd%pyzbLR`=`6NvtO{C(jp>J85aB-$p-MBD(*x20 zmS~et5Bz{PvaWwJ>MG9=nHIaIa;divCQ?+rFD)vccjqSqXL(EeObcF9nG{WI8ad8D z_(>RK5aFwG)IVKzX4#_1n&Vj_5!Iwmk|mv!Tw0IFNseEUw04lpBQ1PX-TbG|FgNu! z%(__7&zi<03nk`5yVbvEGE9RQoBwX^`xfqmj`6Q+K(X+XFvuXnv~nCSa|q^TfIq+z z6v=xMI@!%X_&!DS;%e@(Z3mu7gh7&EndIkB*wLoxkTJ-r{G=Zw2qbLj{rv%No%4^ls+ELO zLytg$RXO8_(*e;g>ug-P_P2c!1{p+{PWbs#)zw*du{H7>0O-k{e*{Cn?~a30-HUk;Y%Auu9Wdf*g2MnaG`soFUmioxK@)P2 zL4@DRG5=unKFCxoC~ObhX&o z;hL+jDQExw>~a}?G0dA8OfENk=F-2ST!SjjuqTH}}IDW0o zr#jZc#cqC4Vcfqz=Ullcc*mUE@wCE&aglomT0{1g|p*WYlx^1SEn+0H*w!>15_ zi;Mo}pMQS20XG>|SaAhhnBBBI9#53u^*`l8{0jf!xETHLt+pT^@?E%D3rC*arar~c|xc?#gwuX!~t#$uE7$ga&9*B|D8Ha0T;EuP%qMd2+r z+e~hv%*4|P91~-@|NS;Dk9zc@#6K_I+EmLg4cIy3^wY|l-t^|e&4&*^{9SSpUtRe+ zqrXh6|M}QJ7_VF^zx(a41#=R%O#e z!0~vw62Hbi`>Zp{CAi3ZCvI5qqBn0gJpQqdgZ;|D|3kr^ByzF*iBEhI=HtQT&sf{1 zo_eZW>|Y%lJaa>M{+j=fn7cDhKfPRoO`R9uM%6mFd9>BmkHD|zAA_d_W|y58X3d0T9Zd^a{|*9-Aac)}CP2DljCmON%Zl2wCG@tlu!{Mg4mzHE();w$0U z%G%td`QnQ&Dd+v+{BkXBsNHqv-|%bu6>*bnLwQnTE8JXL8&5qT1(I4kz$rHZ@t%J? zC2~Qz{kGdN-J`Pp1{;>Ga9nTc*^%1YSKn@>(W z9PvDC%ZJKMBgTYR6v>9x*uO(=V(}>;9*ZmC#>3`YY>N1|Dv#KD8*B)^l1vKMBy)br zr5Bf*ZoFBJrXyLF>+ZV1k&ytolL_VdBo*idj}ZRZ4nD~Yi+k8?dX_?}(P|NXgT z=0z8jzxRti002M$Nkl)IDXV zop!+a1ve8!qNsdga!_F+LtgAW+o^ddU&ZsrpYIfZac%fQ4cbKT#Nu`Qi-< zt`EF24mT(cS$g9aAKl9106%|`cQwQ(=MtNuq47A+TberUt;02~2zku&k3_D5A?@MM z-;Co{-csf?Tb5yLN;;Z6Vfu2U81kvkzgj7bqYKnvl{M1&FW7zmgvlYgs<)Io?Jar3 zC2Nq@l8+BONQ-lc^IPBKI)b<3c#7cquI+8MN;KA=#IiK}XICCfM4x;%GQ`XC4|%A1 z5Mc28hkZT8Ap{1_yRPSLK=z2Nk!dJO7aJ?e`A?m5d9qj|v|D*grqkYv;$UU@6$>gD zav1vhQ%0~8mh-%&$Z2nrdL>1eH8SmL?w@^+Rmfhb4wrM(oL(KWX|vBvyHfV=qt}IwQz>4vg*+IU9Pr{;4P(2Gye>Z z@k!}Th-*@sX;mk0GUs2#h95X03QIR9%I((KXQo}x{i`fAg7B!J2ao;{>SAvlt>0#i zOhZ$;?q4lC8p3p*MW8$|Jr^vUWmH?u*Y?|DEw06(xE6N{6o=yO4#kT@O9}4o?(Xgo z+^x8~yA%(}oBRJhYn_!ZCu=fmW}lOpJ^T7yalhqy9`EzRwYzSIn?9X=4@*T%7=_U# z3ap_JV^El1Bt4lhVVZjo!wc`n=^)k?3iJC|nl!R&45tJ^H76#=QO(tYkHI?ov?n9` zqC;U39D_F*S;Mo9i%!|)hla(%zJvcT=yiaLzofjwD@x>oqC1k^KR_+Vh9az{RJt_0 zP7w&<)j8}#*E30SE=6eIh)j|!6n~%HaEnMRr$IO4IC`IiB^vLa)QVs4o6+L+KWneN z&;ONIWm;CUO@5eNk~cj7pDuI9Fp2$wbUJXCvyONY7tcNfG6IrRu!n&|095Nq`+!du z@O!31Tupa1reVU203!xN$tp5V)&t7rh--|lMd@1%4&(BE^dxSXhGpjPk3e9b*xjQv zk6*J#7mfeiW<0O{cX1+h4g&Yk-f5S}9DDRKIP;G?YWAiM{R zjra?m?hjHjRL>5M_*+rMBW_5%x8;u?`FUGw$s@V#ae|5SI>NzF=pG{z!P9>aQGX;e zYFVZxbV;ZxAcMr)e*WLdT)OY&P|cKMm)B<&u1y+PW)K&SHGDIg_%q3Ly5}FY2^S&& ztMXk~)Y^?r`pLLLiqYuZ(y5CR#UqPxRp%~a>z57=mdsNGBY^{j%#{W!*{SOx_Q>Ce z*@4-%j(^V9 zt^>zdLd6_cwL||tJvBf(ZpL2(FFVD(PoJfml5@H{!SUIk!=tmz)b#XTU)R`9AfvvZLJj6sr8OT&UOj<)fAQQxSSzv?oYaOo3Z<=I#|3pi`_8Hc@%djc}fvA zYh#w+qr>{o)=x^!>xPHZM)XdOwY@JR2>%dTr3Ny*I|+XiXP>9?dc1dtt@^^7aT$;i zGIt%QXb5!mjoLtY5Q)nO_x88MTYu8SP{O=H{Dz*X)Og)w8X3aN4z{$q7s}2@duZ;N zOw(!AaUxcvBY=xeaJ$+63tIqL`f$l-0!5PgGOMHS@Eui)MaQ;k9lS1e5srKpFp=sm zyMI@cH0aB7TE3e`VM~qS-3db#A-xZy5~1K7IQ^xPBcNC?$a6P3!hw|IO}zM}{W&Fx z0y$6P4S^|iR8S$^Y8!D-rBlqWHllWQzQa8Il3WFRWlo&eu`CQ@7K?vYpWtA(s~8Q>Ybks-3asE^ISZOp zy#^c`V@*KapgYQN5tm(Tke=)>NO=v^khLIAzn1PFWlu|vv45^*e?oZQLTYfeu0e5XL9yl#>as-;Gmu(W;9vb3=t$_XvSX(0`NhP)I3|L)&~-B5`KQTi1&470%*A z5~kmi);NGOY4{2tG=rKB<5re`?nVAKS*+U6<(zHZ%cgG~d3ggH1eH`(yu~k+pT*c< zKl8sqV0d;rs6}M21L|HMvqjjo79JUy@;j6Gt|;-jRDGnv@@HRQj1~JXVyzNq$q0SEfoR`}dvGVnx0j}poO#S@LzcAfZX{d)EM$tsqBwDQYN9E3l8qdCH#5mlAnhG&o>?z4t$V(D*%JE&#uh5;7#Cd zG7u=Q1IL@5&R$%w4LyBHp`(v}L4C+=)qTtjCC0Ue`FGVs8%eJa8wfsG>>2Suwc#e) zc5=YrrN|;c$M_j^-u2vn&Omts)RC*3FZ|D{hLu)ws$JL|#@MtAW z$NKt8+*X^=UY*ov`rnssTF##matw;^B3NSwPGg19*pjn=unYWqzy9tDc7ghil-6^P z|3n^A<+|LOx0GxhKbt=#7U-Af5zJ zXdJx6DazThdNQe1u;+EZ7`S=QLu+m<{1$KoM9)5dI$%{AOh9Zgb~)i3|9-yG8#UTi z=kX^=9d$mTcwBf{j9CvjB6`PtN$w4SE-XRKhotL~s_R%RH;c2hoh54p>5udyV*j!m2$jt^W`UW^`|_qIecrZu7T zCz-6oq)`2v|2p-M^K{qZ(aB5<9P?GGZ5cl_;@Cd9c3*js)`c{Q04-X_54VbivFpyS zVl03_=GWKpsh}4fKQptHN7mQf8OP*5P~C^OjyA(NW~~2f5;pv-)RwT1>gdhqfodT0 z@K^dGPF<-GN0^h^Z7+t;JK~(JK(r(+h8VANv$`0>t+Huh%I&g%wG%lZ z`CX%Tstf&BYyumg3Sf++?o}k=`0f;7lSkdC3+4Ph?-((r`Qq-&mB2)*i24rNM~<_c z6(|_tu2ySOGU!!tqSxp1S(OL5odD!t{aN*NY=$RvX*frTPo%_Zm3BqTz%5Bg%xRfo znYzQp)Y+if8G*r1``PD0=DuAG74zKze5kBpHx)b7Tc$dmdqb35mi)bO~UnBDg z`Fo<63OzsC90f^H-HRHhxm`UiRersT5eLw%eO9ylIu5PB+p50<`G_9w^-XtdKA4kA zl`vWM{nzC#H}GGi@%p_S*;sXndr%xu00*#=CK;>qxCmywgVrPi4J~c)UuKPf)(5m4 zi|JL4j<846eocj5cX*>71P~a#I6$&KEB!&4<=7>e1teKo6N^PJKKnihaMKAs%mx?j zkONQ0nhqnIN^jzbn3} ztFxZ|(_e*c;1VA@Uw@uY{*?FXMKMxF7l>6--k-gXdrjlXc%K~kUS#1bZanmQU-}zc z=JAI^pJKY(niTx`LDh#X((!FRuBp-v9@IZGitwek4D>PlbMumf#{75W!+_m$2(~Fh zWDfDi9OtpJTf?^~-fl-&W?*fEb{Z#o>#hkG%&G#ZbChuG4-^x{NbUO`ari%Dmt{7$EU4mD-GJ<6tK$)vz5utZ6#7tKKG-e{~16jcS}Ld z^!Y`BV+XB4U#UsO0~}$!&uD zN>?cz36OyB?|s~hI6Iw_B=YnK^)bV9iyqF0-tfUAGgwwry<%}J@>e7jYYG&--b8vy zXQlW{4^1@aXpjJi&-Tb^?{M7W5#(t7((;^3e!f+xXCW_L@qTeP8j$aB$OIet&jAgc zc+Kwl4%h4Gvqf*u#Oyy*FEyYaqRRz&j?TvP^6yjjw0OtrP;EGS;-5prmR-#X4`$jV zFgtwPa;)6|JJ0y1<2TEg4O4O5jc*YcWqf?#1kNq&10(67VBQB(V*IwG7FHyF%;5}}u zM%12C$2tY3f^A2Lcz5GXo|nHT-{psw8dw?E!c}fQtwpiSt4zl{TYby$wm?ebd3&!p z4GcJPXVafQ0vTH&rMZ(dW|NtH<%SN}-ZmzJ;iN?5O~Hh`bJ*_^*+mJ>w1%FpuQo}5 z?lb%Tb$Q^L=Mt)yJN+LX*3UKlY=F3V#;{)WTIO_GdJXZ7s5D*iHrz8*mhe(}Vqg<^2u`qL0Elhl56L z>C}_zGInqQA6-T)Onb^K`faz2o4MOe+2~bAMzmBGIyb%k&^U7Db5%6SeNC(A z*|K8F`_FOEaOpQ#%(+8UxF}Czk8?<4vKx~xU$h*y09akht1VvV7eUWFOX3jW zRoNdYk?o$=zi_~J)LE6E#EUzrVh@sz*rtr7KSRlIcpFY1vSTcmLN<-Lu><)9} zyOvJ^VL~X8C+vU&DE{phDM-5e=`6>}Cz*%JdS7n!sRLr**mpB7hW^eQa{HS=_z;AP zH06%2x5f_1xN$+{Kz+H!@}nRPo6+kg0V zARrehlI@Qas)0CM(;T4-72o3>7>&v4rMl*;!R#4}e@W4K*9rb2K{@MX^0kN% z6ut7NA*&KfV+4hB;Y5IPpdJf`M^nd?Oz%7)9vP{-T{xROtMwI=82>7*dTIFYYg0vsl4es~v&u zpn~cg#Cs%%aAFJXupWfj2|N8b;(&PKbfK2|`}NFezlDYjyG z&ruSx;xc0sktza4Nf2eMPMAf1ajff<{_~mr#0__3x}T!?dB!z-SPn6@hov6*yUl|L z0C!D{$bdt#Qa~);aqqEplt`;oJOJ{fhtj=!(vY)<7S1$&_u#3>AE=}pYW>naEE4g& zG&SI6HtX8%EpN4Ss@~2pzJh5&!98gF9*ZOS16x3o(K_Ez6__vVUzL+{LdAoRB$p@SX^O zgsuhF?x?QZ?(C1Jtm4eNd{B(tJ7yB0T3MRO{a*@>WhDiIPdh1i&h=(@6K1z#7 zZFrA@qf*@kkx5Fi^Y8i}dnq~j{ym1YH?p@^H!qy*A8qZRnmF$%UCIny z4@KpansxWL899FHrA$?jer`i&|C9!gX&OrI=hx&-Z(b30GE}&PMRz!{h-@H`F{Abj zCjMx#3jz?tqAD|IT#sPYox;odaGPNKij7XgSiJ{82g|ioQfRg}PI|V;yQX8<*|}g1 z|5?z*BCIm_n)VYszO#e^aqDBFUX=)NMT;CMvac|z7S}FNK^_Cwc4O9f3V;o+7#keA zibim7TgAk#_>b(r9S0Q&xP!(XJtNofXp6uWc`v1j6$1GWbVoTn@ph#s`DpZg6y$p` zpTZ;*Hh?5O&F%9cJLVx1X(=?#9&>#Y!H^o(Cj>+N=8f~)~!SQ*p?Su^j{+0=- z@iwRKMC=Gof67a%l)^XW+0V1?(1?wxnbbO(ndM}8iCk@gspmM_=xgwjkwKYjG{;YH zg>N=LOI4SXPU9%BoABG&>39>Ge0!nJ=~F`@_60G1)V`N!nE^0+kmj*QW&wztv|}CD zdMhK`Gy04_e%?3fS8e{eeQy3+<`L-EpxT$%=bA?%W_G#|i`(ZC8y5dC>Zt1~sv6*h zHMNk)u4P!13_I-(IU{5oA^p@Tnd;qF@FFPklmA4O$REsvYYeKkT)v$fqdVYXnpM*D zpXeIT_Hv=4m1!2FHQJShg3VGh=48@evFwGxu)WVuxnZM4r9CZ@Z+%Px0^|uK06R*l zZ%@YXXa=FfRiL6c;Lv&|l$j)$vS<$wpEh(=t|5i)^4{)Gr`qD@Hfv9wTP_H5A;&m8 z#G*OFvq{Z1wSa1`VqDY-kB9@VmNt-j#LR39pgxF~fma+JU8Ffw5xp@BUu?hL?1c9m zdrk&Tm%QI9X8!$UQuXaMEn}UL^e9!A9`p4nfTCgfku{Sqj2h$19_A9qRP$5T&Ld%Q{^^L7B+^usry<3fE&^P z=*1U$X*;+r0OJST>(EB>0_!h3*PzZ3tM?z zh3cXa5`DR$QQC4n&B3kayMLZOGJ2e5-ZyJ>i_YE$aDQ}??p z#}d7V-~|iI*c;LT*_#*RWQBtuG;sQ8Ob2|Ktp5nj+Bb)1H~J1u&Ju%OitC;6t7R4= zgB@T=mYU-0@M!2;9#}HcXi;`SHd|x%#^|V3wNb@L*LFINv02_=cDZ$Ct+285rlVXs z@6U(2Qm8ADI*-j`PK*lHA%UpU0%#9g{xE=j8=7|q0+v5 zV3Wc*A6`*W$NdQ>NpTK8^j< zpT^w^wJO-{=v`^rj81Z8Jj0yI)orx?ExKw1uR^YWezX@HQ~1tk%e8EB@_c0aPVGq< zSsev-o#iWOigCv4eGchyvpRx8YE4#Ne=me{g{&%6Vryogy2a;o*#AW`_P z7U=JPZ*L{LR{D&>db_}nvzXfRJvn`d>z#pE5eG@VsX=W){UNo?Lt%u8xRJ@4%ufdk6(KbO|zkOTwzW^}|A95%SW zra#nIObGvBm?S_&ZwXcPYKXUQCZwWB8NLjp$Q4y5F|`#3RkTO_#LhZgriG7)1i25j zeqNCEB2`zltYn)CN%^87eM%-3e@}VB(1Miy+WU7Y8>%pKAJcTg>(@6Qx~O(((Qd2p z8ueMw&=mdrX{lq7C91g&HNKxdyzT=ED?e^WJB$Petbwy zi=|5EKh({ip~^1LX&^?#BFKDbq=lp$i*F*SKU3#oX~sJAC%Sf6J^zB%PG=y-z4ewa z`XG2yizWVJN^%)rX@fIpRxIusdoQxUCH~ptog>@^L0gUM;fSjWCzviv8Q$c`p&U_A zWq(PPo3TbUDv^|x9qHo8@t7FvPYdyyJ9ksgnPdzoB5oM4=!ETharcbD0YFkpg~B7L zVs_N3*XyeH(lViis6V?( z(G$-hk}i-4>*)i(P-IjV&ZL$K&Aa8M5=Bs{d%{IP$Br*VWo+P;{bF)ZdT<3ZBh8g-% zS#%Pgpd~YhcMLk_)u4_2W+mm}iyA`M%Anxyf1IJ6-_wTqQ93^lc_B^&23qu1K$&LXNl)E)x< z%sLz|VM{dS({8GX>(0f*kQ)ddA;_b(50MhMwlU9gR3U$(*=DAwjC>b0pz;=>>nv!y zXn(M%!<(|8rPo#20r}OARjpm2^^5L6iL~7M!!~TMJ}&4R zn1jOHp~eVHDZpEw>Gj`ZRW0k`dk5L|EU?3jq0^a14Xea=^vJfGm7Qo%nETg7{o$WQ z#<`RujC}vpVP!D8n!z0DErmMNKadyliLE%!V7}p5rG>lm!&}#bnV}qPTIJY}f z_Idk%3MgIMny)t~m=%p>1`QBqM1G!cZP?B>gHCrX9hZ!}Ti$9qJ}-^+Q>$l*8k#yq zjD&*}&c-6n7es1982p*M5Njs((ytvGlpi&p<4Oe%NAXZI>J3rz;aFC)fgbb%n`KoE zPE;g45LCQ2W(Gw6HU0&AbtE@O$Zw)NqV}P(bRmcr%fnw4SkdPRc5^-&^yRp&2{)4I zgN_sDAUG#TOX!cHgR;IwhWQ;LLO31IN=Q{y4jO21ahk1;qa>j%pgVPnG}_gPn~xjvynQlYsiIw^ zTa#eG*XP&nP~YQ5@_@SEDlG}5ztn@Z&5gWN$ty&`ryWB}JXIn1<_ z>U0A1uB>Z5%cs%{`CuJ^bp%9GEZagX{_VWS9gI?~;cho5ib`!7dlT@vl?B=w;DY1<$W-;@t+myNSc13lg^nRaYBe>^jL-W?L zYepr{Rkpr0g>kG+Ijh)R(&$X*htTs^t<~Z0wA4Zt0gRut!}w#iXe^Hmn&&5nLx$m- zqXDnZ{gDMii{O$YCMZlyNhc8JpZE#~$tp5zsCO2E%ha428~_4EO-KR{6mH*V2|@To z)(_r=_AlZUj*WRRyai10dwegyemZ(ooml~uQh(&OCdAXct7#j74UB?qg>LC5)3eH> zt;BM2j?!t^G4afv(R1{naN~lcDbdNcOuqyW*eIPkv@H!FRd2siG}!-weF5oJWjW1j zGi@FH-f_7Aju28d_jRs*C-0K+K5`0@cmBdd}{iLLdd(`2J{pTum>vc>-62lanu+{qnU>!xPZytAoC= zACmdpXE2C{YW3Q`sa1uEIe&ME_WOf7EqL>p8)}2)7Ig87PeWeZB95}M$WnI;{(hFWYtwUHR}g_4k?xH^UU2nW}D5Qi1M z+n1N#&?T1aT6TEyUt!PYwX{tnU1;t6=)>hZo|N$I{7-jfT%if?vyy6<)s|I%>+1By_ z-L4lWI!3HeYqvXcO-B4W0}1T8&|X}$p_1ti%P2%c6VAh6GILBhW4Ct(_$Ne~&r=?X zjKjdYr`yC5J3YgjaIN`>+6AE?QR@2~{G8I=n4Ra=Sdfszhp&JreiHc7J1KQPweMxm z3cf+bq<3f4pzhAcjVs0q(@S)p4GN1pBs((3&q=ALfK zFl%MiXyJ!t=QpEjCe~G}t=mNV995Q%-P_wL$3GSN9v5yFSAPt(v%!UQb8>gA@SUzh zUrgi5LdhHNj;2$%dF_us0H-&l!iSWe5b>jYJPXcn=6l)T|9ZTHGgbEx@#X19EiIjQ zTt$o}~@B?v2aR`IHF#ju9T?^!P2M;}XSqAlia~{NlW7~K(JZc&(7)1-w`ZIr%`Ec zsAA@ZK`3;~y9-hvSed4O7p#OQ>5t)4U;ROUbD$`k3p5(bUI0R56fC@m{?0^>4J~JV z{=LFG=f8pBKP!=A8SOD=PJD(!_;>zd$@Ag|J0BFJN0)`{^=tHH5BLI23i5q=;#LrH zXXKOJqK?Ub1?&;*ZJFH1p#59dT8d%vXBDZO7N-A)2V-FUeiR<-4Ivc(Pff}`m5l$) z$nyODj}Vz3oqhytMr(Kdtk06i(g$41Y8~}KF#iBWMU|b1*(^6|NXrMVN}=+%a{2U1 z;~C+7MH9wbodacd{8sfFWf?KfDZ=m1ZIjwYp|ZQBzRqVu2#8R7LlcuHu{?buN({8Z zYr57TLQ4^Q3H`8T0d^i^DSt0rZt*iKxRp6dXO7*Ls~)wZovZmdg>@k~ zM7cN#-x{y(##@natpJ0s!7XrX%&SsX^#YJx*Y0!CX zE9;e2HofNpKeV-bO>GjaHG^9bVNWW{OxSO|YySPAC^YEZ0}Ef;k4#`Xj_SOP#;bo$ zx#Uxe1wCTE+;@gAfqy@Q%8zt$A2jdm8s}NtvYgip}$tC!p{-0N1}gb3;dP zjj_gVikJJjq4ivsM$cu%K!$4$wqWWisSgpzxr)tV?d39b|3G{|AYVJ*Bf43MU^2n* zK3L5t8j{`h4Ur_57|(qIfj#h?q13n!93giEHrxB@>E0Cxi^egOjjyUlN;hC7PVrE` z=mKZa8g>+m*(=0yYZ^!Z=^vbW2FABUAuUH53lsSw+T zI&WWD_hG@t2Nj^!dj?bNbhrD^o4q$$x0$xt%f*8JLHO8%JAtY%mVSqEp`*X>nns6g zDCQT9wK5AM*s7T2nt>ClD2N8ZYZW2lhXa&DS2q6?(j{wGM!!vm{rFLo(~T*rq~0N5 zu1X26o?-W9Zc>tK#6y&p(g2_T`FLI8!QB zN$k>YrdM-Ww|iwe-*lummu6J9s^c9^W$3YV!qnaySAQ@-?!pD@UQ1hJa!l1<_Obwu z*WL(J;p~tGZ zUkp@+uTYzRnTSmh`{4u<);prC@exr7Fk{*xmkx^*s*kWyZYK_0P4(7An%<0LH5Njf zYW_5N@1Pjb!(gXs{QI#>c!rT0PEow`Q8PpA3p?j)Rw?pO@KOCB9GUWE!=mo5%xx5n z1X-k8K4wjOdh?wW^HnG9TAqwE*qiY7CdRw6vVN8EKm&A3E4n}fe1eU;k(e&(`a;CeG?(dD+pbXEiMNsBf*J$tfHFTRw*EJ|F< z!QK_!`Cel$^p5tu=g1EdDD5|wQJNHEH19fnuLFw%jhbHyD?TBlY^M%B@6ONAc6`Mm0FvB2`y3yx6alu(0Qw5^R=N$V{~dJ=W+yJ|(*2Rg1d&czUKn4`s^;j#mio{#+o1TM!WCk zNnVxlHc~|}s+2VfHH85lw$pa}$dL^`SJXL!e~Lv~-?GorV)s0L#OfyInMf__)4yb& z+Ms5Pp&E#BfJJXbbfMgThyxg9q8LF?9bXMQ5!GR|R1`J!vX~Xu4fv|y^&LGnSq03l zt)bHf_z*Z`0ZYVwf?VIQyOWw1Y`b#Yk)6(>ZW8i1E4jvzF?7kJ;@APg=j@1&bcqY= zY&-oMT(!qCTWaUSTq;D8K~elE)n7eNb)3lmBAMu2NM`@4Me7&*flzs0I$M}<*emW) zHoOhz`)L;rM%4 zO1Q(>OgW|vkspdJlZZY_mPELW_Sk&r_ue~d15IXbL?n(2kxG;S7Ps2Y_M9UK;i3IK zTn8=m)3DIX$qLP?qOmkWBG}7N%U?#h$O$<&U$uNsGrZ2r(*O<1QfWeKO^HTx5dAGV z?@oz_dJ_0DKnSoB!e~s6Uq}e~-fklx=zlN_sQ<7{AdQjuD-;LX>OkZ%&mGJYE&CoB zO?5}Lpa5~yGj6-ErjYV%{jWjPg2G13S?Hc&KBf8{tfU9F~s5Jk>0)43AQWUue zzh)lVX6C()!||_8Y0>PwaO*w90zxXhb;Cqk>7B|go_vO2{iT0HK{NsmSN3k(B#wCS z!T`3OACwp>O{MCC8X(hkJys%IH^P!|V)FfVpo)wt=TE_WbAew|16&@{TBj;8M5{~p zS5gKuPTsn61(TJfY1J9EMT#Wkvp8f-P;-}iO!xqJ=-1~h^7jmijYmzxe;<|5WAH3L zLC>GYwF`Ah^XmMa5ga9pcL!fz``>vR@MV8XcWhU1*%2U0ePnLFyqPWlRN8_~lMpG8 zzNEQDCS&*W8UQZ!@=j`1j0y=1Z*w^!6GCb}%jv9Jod&F3nNS;#Jwp7vKP7s(c0H^Y z<}M_XF^?Gw)R6yKE|Ntk zj({-tkf=*QSOD4j!_{tNnd`B8-zWNZn&vDSA*C@F)J8ss)2P4HcpRi9en$d}0=1+- zmMXFl6Xx48M|KTsw%leqj6WcTc3XwEA#~fB9d}S-c{0n>#u~$Z7CN36Z z){`dtBS%&9Slc^TM)~a8BS{a&%Qj@4(Gm(``Pl1*sFzI8iIR)N!ScOSb6%J;PF+u< z@+~)#4+;j1uuNY1eu7zn!lXRm+#QTFd- zxCel!&k5+=TLepYX(#xgh8}uP4P%FSCoug=#LA-gB>8j=QXO`oN9<59z~ zKzs-I&REbYM=x8iT^y%6G9~x{Y_@>1YS2ThW9I`{3-R72jITHTUV4ykyiDCj-N$rd zF|z`s?_vMEAD6?ZW%x&uf7@3p#lU@~s9h~IG1lfnyMp4p5?d#ZA-OPRZ)&{<18uvr$Znuje_SDs# z5hAB>>eFi=u_^8I^?DJ0B!N}t*36LCA0#{Dh|*xENzr>6%n*smWL|4}qiU6_vP&?n zw5>O|zF3zenMI4}X7~I$@Od3Y`yRz6a!Z>973kh)3;E>U0V%awK~DHWEuA<_{;DDI zV62h^o7dS{uHV91p^8-!hwdhOkz&(h>Pg-CE}d1F`?bsEXja@8b(wjqWP&znwu4u{ z^AzvRKj&+@O$+Qz+^!Ue=)s7er?(_R+^!!Tsz;wEHD;RcQiPeR${pC4>3F9yNLYXO zmfrRHT6CgYRe8J~XE)psvD|1f^U;5{rZ=gpac(->W|T5u_qXVz2sw*byRnnk^Y|s}WS3ox&=~92XDfyOk@SVje#Qm@Loo4ll+^O1`}~ zPR##PXeYT~iY{6`5{99*b*p!2WaF3Q5G88cGgm!Y1RMz9sUn$7ELYVd<;rYY9A&Hc zc9>If)&9%>3uVmzLK#-2A+=I> z;%YK~F#lhGGffF3C+4A60g`@f`1w9ww&mNv9|X1#){39;6NTSMN=6k*;T`>}J(J3P z!9{}IG12>kFT2J(A1*;CM#aVMA%9ZpUlkq8w^LQ&t{bis41rw#{YRmzI3@L8Fm0eN zw%^86%6`_nW{GRoET?jd>Txrp^pBetEG$oMZzhD3b*BlExVp?h$fG!(xX+ zoL*AY($BD#cs%rQn$I2X)YyzRS~vuwoIWKg>*1SN(u|{yuKh7A(H*79LyH{tL<9wbX8=@?(4X7SeiE=@FmMdpM_9NthaP zi+qprIAk3*OU%4hUQA{K96beG=0&?iQ_Ynt{@9Wk_)ECiU38O{0y`SchZ86_;U)d! z_6-ibtcf@9gSOA`LH-)dpaS0QoO1U3YC7?2zL}^&Bk@vs$@$5l{qr{v6vAcLc6R~G zi`;5R)-dbYbtAOmoo9C(C2v74c@Or;-?Vj*et-2oh9hStw(nF)a~UQ*koFXwkOqKv zoCmOS=wrP8nZefiHOw`PyKTP9D2J@{ch@hJ-E;H(nbAvP3PvWsbnOc2eQWsp5=h2( zK1H6)X))g&Zm4~pGSb%6j(auw{+O85=5mC$Nf6gUY)aHt^8qLfKoh9O6EJlq%eJqrC7g`Li6&V~lT-0ptotC&anjt0)@2|K}~&8tgc zLC+t?<8f~34^(!&03*NnvUhJQmeL(g)QPL%Xo?qDd&iul@DGbz|Gus{LQ2fov;h!C zD;^RinztAy)0~_2v6WhW?$^qOanC1%#aSG=S?RGN;b`u)B6lk^Bj6i1k5R1{dmu3` zQqb4tWz^d*W_@>2{OAq2j;_AeNG^zG4|_^#f46G;pMC4H@ZN^(i79;sNIWmwtSsJp zmt=XMuNf!65E28<+o`ka7xgc-ciZScg8k@lGkc1lIq%lk-@6NnaW2KnWxO-hU_{A>NoaDa+*ImXQS z_0bXU88gew)=Q8mAokde=*3vzGDc@+_~kQlo3=#N&n-p2iA%lC+`fk?_QdVhW z=KJh=5m_M$RfbntbcVe_zu1tXL6_Q_D(+ zs)wik81_b9x(De{qG;uPoo1RCn#Tw)EBWu9bF-bI^3eD*o%J2L`6BI<9J{ooxKx=z zu&O6xn6~`)h9*R);$WO2oIX}=J#}2&syhuSh3e?QNYrp<5urEaT|7XfGyA35BWpF{ z9;@{vv?7&W$*Qm(mxy>*JrN_4YX&~~cSdfnrP1}+wWao69pV6X6u%!v&Ohx)cU0L7 z3y`EQ@|O?sVZ# zZ(XJ@MH);rWtf`bUT`tGoDLug=jn8XyJP22yGt8szDhLaTc|A%C_`E)S(#B#wc9<0 z`RbLBjVG%VN-2B#tuVgd*UB#58Tc_SxG$6Fds5Rtnlh*$^Tu5FBkirSwf_^ee~X-h ztP|kl^XhVxlz1<9$_X1?*%k?yXSwJ*SqU+9H(ywk?S9--+#?6HN)RD!#8N7UQ6y?4 zx|-8tx~Z^yE&a5|ek95DqiyAXakz?|xrBq!*F-IEru4DDHTavuW1f02_?SVYrif%R zWmU6&eI=5brt_&Xb7za(xyGWg5!4EuN2(Y*wRSTV6I20n$I4YA2p&=925x!cP&Eg9liEz^{ zP%ZhjP~rbs@vAibi&ty4mj#v{YI~x2*^3eRj4tq$Tl={A`sIlwfPPLl8+8ve$6xw% z!Y^nSW?F+6;Ry(W0k4F-19|BYeO?HsHn)^NNIEthXzpMDp8}Q%Vno|mAAQ(6Uu?G6 z?2eUnDvj*^N{SPcoHRJSx`*1{2wup|yKVPO_nXBCBW{}JiHD%?WdFYWL=JqAUG^Hz z>R;5e|LS)1;YdwMIC?T(t6zo$V<2c~>uZc|!0Tzt`!34Pow;S5!{p6v34^%V7UwV>#ZhRPP5bgCoq5&ks_U^oK}iTppO63<8lRC-I30E=&(tk)VJFYgbXUAD7$of-EHr$rbBEEK)lC+33<2Qqd zga!V=vS=h}bF7hMZ+PcQDDup0>Oa2#BGMSOd=@|Bmcj zCK;qFnd_FZO}3A6g7J;KX_)Ir`Zx>&M94{g-Ty-GZ3GRdXt;!+UDFP0YCl<>$K8Ib zG2Wa9{ZNwJK9nX0GI##lmEX}DVAc7Nhamo{AP}sfrAq@#mTv6aO*_HQ_9BIRZCMtI zB;sf6Tz$lg99^zu|NKwO=no7u)2@`x$KUySDY)!=9CSlWb}07q;C@LDzmBLmNg=ch zpolE&2vU17-_GszoU=Oke*pJD2*1EF-3VjAqoU?RkZO5=VBGYJ2Ic}Ut+qbKKu`H` zGLAEQGraWDi=jO>m^N(NE^d@4_CNpm561gr^vzm$tEEj?aKVMd3=O^c=3AjppI5@l zE3XokS#~+h)w{wM*r3?I>j7cmg%%F~{`bF89u(%9cU~NA^n1jn*e5K2xoP*`D=+s6 z{rbNe)?Kd+=Iz>HUf_6fah~JwGUfx{W@4cF)+??UI(FBx_VBnwe9gaEXDD>MJ z;o?g!4bMFJROt1>^Wi((qrY2fsj$VCTZH!Lk2&U=8`Bky)rUdyuXCO;Jf@@ly>WlM z71m$BP3X{mW2px>%?1q`6rO$dx$qf|u=?SLA0+tqDyys#HrpI;SFOCV+f)*j)W5s| z*#G@r>l=F9d{bC({sl1ScMIcjjM=ljp2vpOS2)IOK|C)uz!6v5;s_{>Y4gJm6J`GK zsJR_?+%c?%%@@=>jn_aNf%eo>&xECySrT)meOP$mg~A6oK9A3WWtLt_=D~Y-TkVyX zUkT%|ak@N?q}z0h&BA)?tQSU)8XY*EZ@l@YZ2YXg>gr*?{r3q=EU{Fqyk-;7&gW6b zzI|Vnpz_>ET6Ez>GmE~we~%R@q@Wt^KuEpj2XKeJ%OxX3xKwTprAbmIkEL6GZL zJb()SD5syV)C|NTz3^({K^tPDQv>MpM|JJ_)4%^4;ri>Z$I)(IgdKO9It`)WY=AYgp z9*vof!I1F~mf4{NPbZ57HzSO)JmO=q(SMfP{L@E5O`k9oM1{s$sfOuUp6yFl^ZSSH z#-jex`w9Tu{HO09IvUBJ5Mows*E~eAS#2hTkNEwEwb$o=G!J<4FRJnTP3c`r(hs!Y z>Ys4xwnUmQZ~n}Fa_RLW z?OD*P6(hr*N)1xM^B&SaU}`f2YSO-@{;m4C9>nsBnHHGh`JSKq4N}>RpG7_wYW`t5 z^{+IcvHsZ!{mkI+4^d^3K3$S()BdLv@CN^hk<=q04$?{%!&6e zCL)6#O9!zI^bnr_UY8BA8!9Op4?4B+Pv*T1fkz!vwLE#37Q#cuX?GdWaw?GK1$527 zHI26rt1D%py*^~w1P#fR5s^ZczxQXU;b}zub6%w0OUBaQv!YtlXVI=$=^7$R!9_>p zdYF>tjGL&RKdga!eyf^*^ebF*>!}}rY(72X{f`bUK3`%rr2VAfr1;TD^xtCtM}5gy zHC$4|*cZw%ZY)VZ#4$BiG5#ilktY6&{>cuMA|8~uFX~`i=DZ4FPZfMVes76<4a~YNt}LX&)76eEw1ep3-If|x z*Ycj#lE=6nS2A(aQhYLewIF8x$HOdklhP z?-~yMi@rgT^jVK&A4D-@l0X`0)nOG(BF}m>go~M@M~@0)z8QlB>?N?UyoA;?HByV3 z>f|C#@l7|~93DjA-nG_RGj#3RJ*>Ir8rgv;s5hB>B=G?FcI=pN{Bb8>LGFt%_WQA+ z^$M-SZ?V|8@4ownwr$&l$rGo93oiI4HV9q~trl7!thCZfSj?MKVl9jqF+vu=yLIh~ zqhJ0YefSy{1xZ zza$@yjfM6dH569lDULx+Wy@mV594{+i#b-)4hB5&}oxS z(*Iw5HA>zHcmfOVTxefm`Q^ixnA4wshWH#SwhrfFek`%*;^D$eE)K8t>l?<7850&? zY)RC+RoH5at;3dBm|qAB*Y&z8gK2C4vZ?q*{oB}RIQp2QB@V=yc*}{ugl9#jP5Ut% zddQ(+05+z$QFHiVhojF{6*Fw>4L95r9{JC|5cGRbEMB(_xA(jyeDcv}a{qAiU;zY^ zUu5A$!|)Nq!U&Av%2;gYaYy=ALlqdaqG!)LB&c?0%v<*Rbd2l6k35Xcj(agrCx+z_ zCxrW`L$I+oJ2v8Yv>NBdocL8feH@K9D3)A&$*?vy=6dydMm7!D=ZsG=W%878!U-oz z0PcC>*twyTe&Fc+h>f!wF$W%g_~Eb@?wtb<>K-Odnv8{a965H`W%8EGns_F#Eu+8s zD!lv7JJ^_*C7gQdX`$0*oupo}UV`tLoCmge&yITSRs3rH<{M-l%z^uUC>E!=sW1l? z^toC2{rGX{_f5koe?LXuB>4`{kZZ5IHaz|GGvO@ErH&mt(oDQ6Y^1y#F2Q~HXKa3U z#hW~q5b_ASkzbC)rfGW|Ww*a< z3b8Nyzm8*zuDwn+dDg=Q8aF_e{@v1H0X*wI#k~J+%-Hbfwbu^UTyssDcGeo^_WT!~ zcAj;j!pZ?to`R9uiI1=Y9KVm537g`YJ0p&y87oR|1*?^nEf>FKLXljCL7u)_noypp z=qtQI!8d*QS;4^YjGtm^w2|sATyoeT%~)!7`R5P)W*ZS*t3zQBvh>I&Joz#r=2mP{ z0TY?uG!DQ@vlGA=G-6CpAdCKrO68Znh&MIMoh7w{-av!Qp1>T|xn(7AS?>uc9A9a# zs=|n49iz}N){gYW)D!?H`j~uE64JFa-3$(3b|jB{CpI$scl}8n0~B^LU&Rr`j9305 zrsak-N}8blEh;t7L)GPnx#rjMq^=BOTP|WNn6ZM1g2|Ju{Q03OxgVra#PdoG)S{+p%z9c% z$v%kUrcHu|I#41Pi2VGG_#_0Pg*x!==fyNP+vzveBUqJUCc0`w>j9}=MFWkQb9(}c z9W!Re4VL(1xB^J)Cab$W|5Y^tSda$BT!oTmYt9&axTm5+8(C--eP{JBq*gcLP0ezE z9Ek!-7)0}4$Y`)&Xu(*`oWeaUCOobZ*-8` z0hx`T*iKljLvmtv1r(Er)hyn37-Q>)1O=e*A%d(dvRJe(Dk_M6Xb_umXWPcz@@7hA zN7c1d;3<&%%_Ac&ioAqyDI<}b?T10#n2kb$#3qT{GAqgGnNhn}Q{@%qQ-d1M%GU4N z9^(R)Y??-yJ7mVDPARcgd(&agi_H@i<}ADR&Y&qG0z7sPG&vcB5ovEnB}m0XDp~Wa z%2tom=N|6qXu)!6AlDfLSv(t($cUKNipjRKW_36Jrjpzx zMp*~b6PwAMfBHH_BQ0<6pPz%ti^Hb6IAYlq*nP`}-rcMRc2#z!ENRSrBRdlZy#vb4)Pi zpSg<5=MV3d(n!)M^|fTY^7)%o#7BA-sY_q99#sV~#)KCeqnQ+AQfHZp@g$SSV)N?itFID$(L~}cc$#+cE99zMw*#uSZQE8y9(h#t;!C}&apNaIqDcI* zyL68tdEb2dO?BK~kEyUoTD|nrOBELWD;%*RdTi_FsK<#X{jK`ogAXb!8dj4hPpT5n=xQ=-;}_-CLk~Sv?Y8@F)%xqMTm9o7|ER`~8z=g&^?R+_amO7hEcjM$zw?f? z8;icx*WY}FcK@^5V7(2{?u(@_a72yN>)=BVt=hERpz8f%Z`htFeMtW|-grZ`X{S!r znP;3{;fN&hjfK<-i_oaok<|`6>`*=Y(1SKiuKwC0GDv+f5M9q_J<0mqv2^3>B#L;tm_I`7)Kdg|$?#SRvItH1yK?^TyQyHvekpSJMJ`s&*2 zuB$e|I9_zgMb&up@uW!;G0s0!*MpB=&C6W+>T8SvY;(RNAa&LA_S>tkzxrC{36A(_ zUQe)1LV(iKE4=AZZMx~^)i&F1D|03)S08@-QMGgD&eh)gbgA~-V~+}N1DL4#a^#oQ z8E2eK3g`1z{om+c{eU?$ancXfiUZSjmauEK&o=EcN{w)*#fy*luqgJiz+>))?puZ$i0t&Hi$8+DL=wK1X- zK5Jfl=|#-p9ja4LJw@&X@}EZiuqlcD-LS$2Q-!xLs)@Ki@Js#bvP&M14NG57G6g!Iv)k3Cx9t*GkoBaWyzKPRI9d9Ptnz53gU zCsyq{bf|8>qbE*s`iZWqe*OAY2Xx&ZcG^@2p)H8HA?=%pIXiIRXVuO-?^5Bdo@&5= z_bME7mv14_E)#2O>OY8$C&+aFm3mt8TI8^twS__lORj8Hb7@8^ljVX}^WSKu&dHyD zl=Q>1ylc0vxYstVuDtRJGoETt_=;S$5bf0b6H@o6YR1jEP)JgZvM72oTPDbs(&ujz z|4F%lpq*?>Y@k?hO&oL0jG6tLorcffn$J>`!qi%=3F_a70W31Lc_l(i8BmmK{u_&C zEBZyTu5SqH3SH~t0v}}suhFS+>KNbus-g(eM*W=jtFE_T+FoMvEE5V2GKZLEr5W&Q zeUmMeI_h1eCXpP8mW^x3%uO|nkfKkT_@8kdvLBg=C}vKk>b39^q0O)5rcldOuR&W- zwEw1?jDOni?MOEhH@)2Nfo^6&rJ_dCY76iT8C-+H*`EM-Bc< zY0W@G1+qoaM6p3X>X4t&*-++zitSfkQpn&2Dj~(TPM^%H!_!<#0&8#7!LL7Ncr}>L z455%T`p6cEF^|So3z}N{t*3~Il1wB&#}-)mM;6>Ka;ZOwC9!Ue2D$HLqJWDfm$&SuPqt@(3yV-vBC$ zh{C^Vl}MJ7#PDDU&vH|#<=WPAj;pPhn!Ti_{8Y@I8`>gBBz29ftFoER|3>{rPWF5MSHHbf=c~osH{uMFd~g2&8!e}Q#T#w$B9I0 z8j>SvG6RwdF-aa3waG;x;f+vf#28cZ%o7Spjnf33YLG2hzgu2t&8Sd%qk860uURSc zuu9f!?)x9SU*XM(s?#Q$VgdFV?>N%gS28Dr<2xX1VYsoucCC9ghMFyCVHEvjv?Sd8G))zqX@RTCyokcGw#+qJE@ zfQr}wI#g&17YtwR`zjWb_pVOC;w6rxseYI+vAXEO3uS}iww|})r2}1rq@StSbQm>y zRQ1`wfwG`jOqSF&O1%I6d(|$x>|E`;?|!oI%a0lB|52kxS7)AnMzzkm>s06cM7T5Zg%`sy^RH4j!pLiyJI857I>#&!St{S#|Txg_IWif zVxyN(=K=2ys1Tb%^ti~$i}UZh@4l;U@7c5J)Ui`_@x>R3UlB^LSR;Us9_>UU+E|LXO8Y`koY z#n{_#yB&2Kt4wII4Ub`N?y>vsRp(uH%NrEfBsmEi6kK$FzSpy86WTgpK!t$USlHdU z!ka+RhGt{Nj;&5O;e_gZYy^G%^*7QkZf!zpO8i8|i@KvB-WR z?5@AT`m*S)eLNi-T@PT~cd78!gKQf7>p%aNg?g&?>({?JFCw+S|SZa`hwNAJD&9`4J7W%B@UD+$oFxMBCvM<2_^6W1*4UeLZZl-ll#P0#Mw zpxA1wEvx(QySKO(MvWR-U3tZodavJowTsweK94KU`{ zU3*=HxHsa8`o6{uiv16$wnY1I zd=Z|Z@fnB8e*gXV)rA*cfX$gU)%_3Lj|~dl)DT6fxwQ?sbs|~D&V;DgX9)owUW4VL z*vWH3{W47s9336UOd%d;TFSGr>sxmt`;DU|-bM6o$_HE0n=feDct$&N^LK{gqG z2h7fs-Ik;oN<>6S3P|g zaGUs0R+1LViJOf}MKa&R?NLjf)ups?DLnP`*k{Pk@OP((4 zz*3T$!IJ3_z#1A<92&1i)y9iDrq?JXN2N%GuslmLf&c-fK5yn<5!B~Z&LQPBOQTkH z0Z?hFXumj)h$b$wrZz?L_u;?FBhJiUpLp1N+PmlVWB6YC>EnP!!tJ;c8#>$ zNmbGC5s+|+5SXD@(C1GakpgA~pBqg@F(rv3YXXy4t~$OX?FU16UeI9K^{bj`NLAg8 zAQzH}@l?f^(&w+hyrgbk01dVTRj9dw4Q_gNHu^V+XeHG|xJp?2DKoj{js;obHWFel zv8fgd%#2n89z``lkig|-o-Sp{UZGplj_Q&jN!E-Z{z|)vRoFFU%YjOy7*mF%Rt6-J zl4W-3LQ2hq6p(K+c~E%K2lNOZNGqIN28>m6F7BaP&%b z9e!cHHa92IzqPB4+HG9jjfH1jyi|?4x%{xhVk&<5`}v?j)k9dM!x1hOVo_9^ZN9nu zN_%s>iLeLW7P$1XODi1Dl#SStvF9GLP)aFh9ly#otdp zN!~K))$2L5aboqvlTTpIwv)Fx>`eo844?3e7%tTqj3GA>*shB%z8H)Aon(&l&7EeK z)-I>6$^z$`828_If3-F?nI3=QaoMON21g9xO`;ShhiTD-%6mGh~0pFm|O@SNb zEe^zX!KPI=+z*|rmtKC^n~=H6H!{XxVSFMM*IB}1HMtvwR<=D<$Rx4`UuvQXyc9XWL2Hym)=U@qI*}ob;jpa?-zQj zZ#bY2zFl(5O*dB?v{@hhdnGnObn}9;+*wJbUz)yfQsZQdQDr?i zO%uoHCd!_bd&_M4oXBL+3*;H4j4T(s?p_hnmCb-Z z1H*Y0nP>{Lv|y!lkJddHAo5eO!DGp&_m=2M8;HwRGR67tOQk)2fKV!koYX+Ms8|6C zGpl4C1-|5Iwd6UV@xRTeM!9kkc>hK;GO49kj$qHqy;WF>1(kZdp?p0|E_oU!))i=1PP#@=?C3_){5MDWI#5?p zNcDd$i-}U-ms+B=E|l6>d!$O4<~rDOI^*0_hzcsp2&=QRmq}WelGVV<1;h$~Dmss- zlu=Z^nakFwnq+3D$W_{iY2;8%zVC`f^&u!G_?I+S+bAj5jzVA-ELWkHGEXQ>g3JL9g)~B-Ytq7tfWb-lnlu;HgHI{*e}81G zSyc{oBnCxs^b-rkr6-i}?5K1iW;&iK5rszmoGBCHc%#7c?Z=GyE_B{?=kS+94^L-> z$%#|t@l;XGPDpdYL?%M8j1}-G0$g8l<>h)9IU|vw-gD18PdMeIlM#q^OF0*d?Lgb% zn>kp3m6mW6BhJ^yF(`5d{$!j_KWX9woWDK_!DZhF_yuzqH*OpP5w90cIR1ptYJmkL zZo(O7oEdue?j5eZ=BltZf;`WL^YR(w7YoRU`>;|t^2ozO>((oxPFgSKBS(%5x7>0o z0wup1_QAROJpUdGn&_vq5NlzHa0y}zEQsK=s432PpN8|^Uk^teaTMYs%o&#Y-BR!| zE6)6cJvp;YgZ+*kJu)necnW_#=Gf34!G4Df84~Wk`<{TsP6X4Q1+fYi3riz#?rI2d z{pUaZIjph9T49!1rK{3-NQX+NX#d}P&)wm+TW=3IstWZ)P+io`Muym)xAzRU-+o)z zbhFJ7B>OL6egp)@FR2k&_>+L+i~@db9=`wnI|(RGv(tY3F^oiD>=jxsA8tjAin$R0 zn&-zKe)tjL?YG{PGxl*T4R~ld{{4V+`f(gicoF?MWY8B9Lj!M&$T&{;VG`mxOp*8r zry@>-_62Ljel9Ntr^Qh~VIBlyKJC=gB?bk9ZeQQyI>fwa6|i_1{`99mCMCQDfp`f2 z4tMsv9Ra(y4k!NYL^oOTtTAf$-S@P2Ly3uoQ%e{YduWaa^>QIfphVPhCTM&J?y$m=YZptr0(pGKmKWLjQL)O({VH6 zpG*q-?z5lt+cno-jR4tw5KMmGu-$gsh0i|w6my^_;u;JLhahf6#||A+U9kaxd*S7P z%{U1nf6+x3Ar8gth>dbKOF(ssS4da!=E#J*^Hu{BXpncq`zI4v7adC*!P?ohYUlx8}wh7=z-W zus80(?gt$d@a9q&fFR}9_qZv1{L%Y}1G7k2bg@Mw#>`r4X~6Ks5Xipq(!RAGehsl( zZoKg(#EY3PU?Cr|W;nl8iVd*v$Rqy>cOkg=b_jlsW3a*)1XgGNFzEG}XP+51>DUSI z_`HH9@LDrxGm%Q<`k*P&>;mP@l04wm@N zBJi!1j+w|r_}W1Q{Gt(RJ~@$kWnJ>iQi+c<7N9su5;zf}kV~0rv}`iV)t8l04uVbK z^NPeT@Be0fPhZIc=!`M0)<#oxT_LvFZ}4}hLnJa0UJbO3S&c2e$)n#=w=8)*v*i#* zwm8lTFql|N1$$;DMk)5LLDRd`7rx|q#ZppDM1z*^+frycmaDWZc@6YG`fC=}L_UX6 zCp1C+gt5ALxX_K(GRG*(XYN1Ef158!H4!xq9aQG6j;^myM&dqQp?t21v#4N%~(gk?a`8L7-dLjGm(ifaLgmW zso0oiS5xW6{P~+yN(0_;9C|}qt`L@Zr5YviboieI{KT*+1hnIcwsEY_PzI#LYv_Ionho}*Kmo}|rxoVarYX@bpCeJ-0c%g?9I1wsJsnsAc9AP@h-A!j) z2aIl_E#GfRubCDtWWG_Rp66zil$m!gG&U&s)hta5XWL9S=#&*t;##K^>L{R5AAIy- z;3EFmG2aHfF%a;H&F)_31}CvXw&3j}{l@BK`>e3kQoqB3_Elltxv}7kA;R%cvS@kyvB!l)7F`5Ei`!s9adxzcJ%t;YU*sz8 zG)xbz=5G}?-*R(VB%k)P}dhU)pdWKsO@R-4VxhOpEJoASBZ@ex+ zu)q0wGy?W69e)4EKZeB+*m)iV%$_j*`*7)HmrKCzTW`G$0i`wg^bv<05#D_J&2Z`E zmxK-0Ya@%~IIBP0hegguAAJ-7xqk{PuhcrMvdXGhFkVQ4Epy}R*=L_Yu;R_I>2MnQ z!jB{C&oVOf&lf|62E^_N^Zpha6sIFd_R6au$TR}iVgYp_1ZeJhK=-iL@7GivSi%uf zfkDb~j8wp}OW}kQPn7QkyhnNEJ4fe#XDsdy8$Jvh8a=}*tF8i<>_e^35NwX%IG=zw zNWwt}af4zj1QGr?+;&^fFa&|a5A1%RwE0;CdcO3sOAs)7Q*2b7NjJEp0hvOP`%<)i z2{yoxj@LJn2yY?qI^uMMwy4h>v(G8_U#&N> znKl&xzi*Hr*_rw`EF6M9UmwB4 zXG3{9=2yC7vV(xQ9TH4@p+y#!{#|1YF5v5hwwFfHrrr>;`l(`1s9%= zjmrKKBpYv-7+mt#u}S%l^Z$WOqSX+<{NS+s@+%-X_;cZw+inXhw_X_mtoI8mfM=cY z!VAxb3of`Qtb+j7+>GExAg?Ld)Z+%lS!bOaw%vB?fVW=|?0fd`$iE*64?g@r81v0o zJUdszy|#)3ke?TudN?XC^m*kKY?^F>4T^4I2|NehKz;BA75-?LHT>;wC!r3lb=={H zbLxTn@56k;2F0Fx1iaya=i;oWnst!oDV^zupVpnJSx}CBk%@dfzA6lcHWkQd*FGp) zvXYHb2PlkbOIPWi_doqbd?7?3`tR58b{uj)f6fnz#rT^E$xCd# z)NK4!okr;fV-S`2HBGg`!v_xb(0xg6n z#0F;nv0egZZ=z3`@RETk3@AUPDl>%U@%dXaqXw8w{ZniT8A|`P)3kcRU65ic_0vcAHqdcy z{?h($7#DNua`Ufw{`{4?fSDF*xT45!MjTh+J0mh~X`r0j^6w8fgh|=_i$Adw$6p!0 zaQvg$WRwK2vTo@)0ZD=i&W}GN9w?QkDcomhmM*S}QiGLjw6tz$pO=X2U-(5wLx%dK z&p#ST%_^<5>_6kwe*XJI>y-VotnN(*dF+p(TB2wewbHVGv5G`1PA$V4$L2ppCKH-} z%f>H*^iLDi? z@sB`-M;v+5h@zvCCnHqHO8NOG{5<8fGR}X)_{V+|OUY;=%t1q5BG`UbTk9>*x1B+n+Dfi`G)cS-}fw3w4;We@K%f&@=h_Lv10A z{RBdNOC6dgCBoVfu_81CWU$NXl3^$t!2buW3K+@p5CFTtotY zWX4Nh!pJ2n*I)E7K9DWE-{BBq&dLmujbBO2aXDAx%=7Xh%8;at&Qysn;7P<|US8?* zGR{4}5J%^nTwQz3wFu7r9?t7BJ8IXgC&lQG$F@x2F+#}oGQEuC(N5R|sW#iBlN<{} z36C;4?zrP{wA9)aHW<=5^ZonxuXfmR$La{gR`}%8&wwob55uu3XW|&0?YG~qy6?XG z#0ifa;(6}fx_6i0z*bkruXz29*X8&s9%(ZW_E?t@BfgaUOQAn z!cB-#urY#?pL_1P)mX%ZV7+(@7K6c_a_XrP4}(W=jT`qJf{u@?-g@WVYFz}?#v2C` zblCa@n;Y?nq&|q-KnK$#?gRt&?udTG#!!Vf5h@;yGafMsMk82v&z^Tw9XhnH&OPVs zTrF2OBbj4i5Xa-_5l=i`i5HLLVw{j;kNvB}9U#xzKNd$x-F)jUn6sN!m--`~6vVW+ z_C|2$^>B>QJMX-W<81WkEVK0y{2g`F5fWSCi6?Qy6E4=_q>~tfVo!`gFF9t1LDt)C z)UG=I_!EFIZKACuq;bWf9PxysiSE7o9*IFwLm4dQDrM?%d>xSSS1!2lqN+WPG#WNy zXm#gZcURkOx2^O)-rmupu+UEra}1ZjMJh-FRaUiD|%jgSRiLJrVG^Z{OF%hOehMvIGJi zJ;nO+C?~r-{)od4uhz#APkml_34xv&6N6IUr>eyxuCBWJs&w2`MogEZ)fgC?#~gk7 z*{2oW1gTCx{d5Tmzc<>-_#OW8{>kTYX5A5I=D6dJ$1!j33)CKVf=fv*`CWqr)o3lKYn<39<ls9#wlWVd|Z9?_18GIZ9HPd z{D2q~w@Q2z9(^@>q{c^i9nX^PcvfK}OZvy!M)wmD=>8VOZfS!c-Pml!v2PleL@)_R zA{D6SMktSj2z*kih!(|lwv{#aY0Jjw)sW2DU%VuXbc3UaFtHR(C+7Y8y(R(E<%p*^ z-l}F=ntSg_5(z6e_y0@VZ`P0-DRs4i)kJDpCI%_F&Wr*K=1a!QswF3T^^`*sj(?(6 zsyhFQH?ihE&Gw4M)3gmZ?mp8e+S7Jp3eO1~DGscxxlctLlR64zVX^%t^LWrdlWj1y zqb1{IizO%f%v8ok%Bfg)k!7^u4W_w%X*8Z;>W#@>Bb5{qAu6tvOo#_3Fj8}$X75Qk zva)|S{$|KJK9#+lx}{w9o;|JFrLpXvR4Gf|X2AAiV6^v{RgJRTYGdUVyi+U?!hXwj#sC1xx!ve!&y4-uwQDd`wd*2vXI05kHRHM7@Dnf+uj!?Orfoh@Y~%J->P zQ+`I}vQZu_v==10vA!mfuhXan%Qh#nO+zUsTgg<$MoLtiUL@-pHcTq^ski9tfSqk* z%AQwzB4$YG1_O7M7vZO!n&T|KuSOrAm_4*;dv(r#1${u|hJPwTwy*jA$yz zHIEZ6^`*j9-l$q~vXxBP+%)%QKb0flHOMl{=HK(8O8UGN43cakQ;Icf&afGYxkCuY z%9`tJEO~^%Ph>L7pZ|6Lie;J_CWV}sZR1#3?Tht7Rpo|Kt?V^ZW-sMd;lwcAl3^CS zpVAxgB`50`Roax2sFZq*ma=6<`qZOKKD}Ch+2=q9RGjF)PeXV$)5^OiYlC zL`$W>N}BtWa3?iVxK=2SF*9y-Bw0Pl7^ST2OU6@OmLex*hRE%&`=N@U3>pEoqFIi) zO6!>9KiM!u>EE~liyy4Qa{@;S%q%G1r_otNY02csg`~Ff44q1Zq=HH7Ey-nt`ZVig zl+_{{hJz+94CDR(J(e33 z>tMmVf83y$UVVwpgFW`#6N}P2Rqwv@j&(<}&49>9<9vI@xp?N;XN2DuXV!1M)z-3* zHDSW|sDs`?6}U*+w#|lBr_DN5Utz(2I@*@LOhL|g5_roX^1#6V6R}aj#YL?6C%?pe zF?48kIbsTQM1bW>FTD&i>8@uyeZplFMLy00QoIAdGz zdg|$CsxDZ-$8jjtn+#U$syS@f(CVM(pI>!o-@ZEMth2ocG=L6ETa16f4T^mbNSvDk z6yu1e>VU4@sw0m+vU&%>yXA^{W6_qIZ@IPF0zqxBy6S2*7iE%hP;3H$Joc}Dm6!-P zrbz-rn}{~K@v#5?`&T<5`1KP{J}GWkhrj(zgJeI47!({!#x2=?yX|GcnVU@NPE+>C zJ@?#;h2mQzW&_8cLC`y4gW{gJK_P}EVcl}6CCci^O*z(){f{?)D!i#v;Z2VU$4^0Z zx@>y=1@TQTzwFBD%B!!e_S<(~2`X*@;XnCUHz@YPCQWb57wVX8ZUAs$odKC~3|HE~ zlNc13e>h&L`sUkjByI;|vTU>UHq|pvKP~-{+7IHr_dh_u?pv?{qVXx-eDn2cW5nw? z@Sua#SJB?-)nSJoh9KH)u_1?h5)1H!ICco@$W2McNZ15{(r-iz3i3Gr7>{NG7TK*M zssH2DpJXAPaWd9le?3{mXS@{}r#w4R27_96Ly+vhx(zVGcnRWYBLDzE07*naRB?mi z#v5<0Hb=~vYp>I1BIBN%hmF3C5N!R4C!WCbi%3Z%y`cwUR&2f*=FN%Kph2HQG`*4y z3OqwjMF8>7J{w2{TJHpMDnrtoz&r}sK+QU3HI^%ynXK->kz}T% zOQTFpp^yzY>m(^FSz63YQug)em-fF}hJ|Rrs*ycXmP0cCG>R=*`Sf>(Q0hq8(zg7{`v(`Izj*R-ul1+-Cl?{|4 zmI|zxYhYz{2aY5&B{h}%Pj5&yCO#ZEa+9YhD%e?5s2TYuK;3C7<_eoAWCPK%Wt16R z&sD%Qt2=N5nenAf@UsDNl2ti2b-#_}UvmFTFF2brXR?80Tp>suHVJEl)p5q(f0D)6 zCNoSQkv3McPgyFoNMv<)2RJkfD%2z_qza zPMJDMtW=Xs{5SsTK=1$T%UMqTGp~_q?q6%nG*gjQMl^&%Hc&H8S&fzKDWBr3?!b<8 zcfOi)GI5X%)ZDdv|9KNx-EkX8Wk~8|b^mNtMuIA^+U9!t^ZdJ~U}2L!BBS8Pnl$k0 z-tl}*>Ob%PGk0nU%EoB$)!6?6so|(jmh-NRSXG6)RLR`WGw@H4(-ezQJz9r6*>jQx zQW=uwMC+UlB%4YAsA@HSSbq9{5Y*2V`q8(OwzWXBGfFaW1 zl3a=RKfne>=UuB!J8hytvTMVce2bZ+9NE773;c_k)2)|X9X`V18h?qs%{JRr*W(S3 z;lmLG+j=kJaAZmK{4{-KSR75$HSX^25`sGfcZU$%-5r7kSllJSeF+42cL=h$BuLQU z5}ZXB-EZ&rxxRm+J=N7U)6-R_PGQa&^w=y|nQ)ZAxFl>oNuf+g!Vh3s_!mkU%spIi zCVto5X-MmG>?DsU*$g}?MC%qO38=%h!xg^@>m~QxRbkbSM8zdBc{zXx!El|4ha%SR zcB}aN@{piPRN)rU9>2=FDF-}{5UK~vha1}dqvbns;`j=P1^K=q!zb)8 zEA~Kz!>k&q3mhO3Q*F$&pJ_&bhR}Aot+dy{*22YjWLcknf-b*Iz+ch{A5N^gm^y|3% z)%4*eX~BlaX|gdqTEYEI7IZ+B zG5K5Z_J0^#q}szn(#LF5Z}f-@ziRg}Lk4?A=ktfHkW^vO&2JVjxFGus)A{-hT6A@U zbMnN;u2uW|DaF6Aupqk!26%?_X@^oAD=%aSeXMk#TSeM)1WCI`nVji4BOS_hvoD&I z+cBddhvIGj^~?wFYp%z8a5u6?g=>WHH&~ocaf9-SWfVr+V07L&jN_P%QbJ0X6cP`2 zy1_JrufFv6VUzKG>u0%jPqNTRU$c~@s{xH04(_hu_%5U;z(#2^A+21k#NK9-Zgu%* zJMvjkOxMHHsmG)}#uig9pPhHBR`fA(N>EMrS(ibyO*#(?aD22l&l_N`ARNpHn=6#4 zLv-(E*Rd=MUww39JU(6}SLJ`x8R_`xK9-N+hX48P)2+-CB3xeZ9t>lR7f1at9v`qd zE`coxR0)pUy$ndX(xC1tp>#LD!Y(dt>Z+{Qv!o;n&N5BaFhXg3qwr93jr7{W3ZQa| zme6<&*bnehU&V3fD~#^QjKQVla>vU?eSpdYU>=8^^ufCmk|16oZi;si+iakCG;R2; zyMgOU40-G9Zd&1Y$-R8HBj-~KJAVa!Q{aoI~ zzhTX08LzTD_D?N^z7*?LIkLPYy@2$5&V5S7y`OsL=5r;qsuiEG~y17^t!ynCQNBu_#6Q-j~?Z(KCk%_ zQ!eckD@DJj2PRJ6CWLdp>9_YD=7Uha@j6;ST~ex^8^{Uy?Q7yKY~VkY1Unh0ClA%; zz<%Qe@{r1-HUfA%tYrffT}vHAO|69qJmu^Om^hiKw!UHW{Yw;K!4O0qC&-OEQ!*T# zJ4304-yLnc(wbr6vr}rBU75T0_+VGhy!G~v!LFXGyt=OpZHE~?ph#0OuTjEX@ojJ8 zhndaj)VB^^5;d`w=(tDgbfXc2Q+W{w3*Nsa6O7 z!FoDpbzm|$&lvb3mU5kUmnbZ=X3E^}An?$DcEnk=;=n*Qq!wYHvRE36Hpc%C(atJR zGJNJmOkIcC*9eB1MVl&2T}(BCO&sypo-qezQ9 zieXRb*a!>u9*_&KyU2=;etwJ>|LkXQOJWTzV!I)(+rTmt4Q@Ncx{ZL<<6H+pxM5xao6DjDJ5M> zc>hQKliwyw_xYu+u}7+-6t|CMSx?@5f~{9b%8)kB!!u(Cy4v=1t$wX?HN zz+e3I%;pYT2Xbo_IMzgSRR5q`^@HerK)_&?WByRs!i=gIsDi|n#?}yAqT5W@E-7Cd z#_w4!i8y@GetHon;}z%6khiHfuNjr2v`Z%IoXgf=|90NXA-C3LQa#v;A%egLqa3o? z*+HejO81m}elG$DS^876U2q2)qVkyyw`?LDYNWS_@<-{V?0@mLT_JK9_CNh&hH$jI z4BE*%t(W6D>5+rsC1js*C{aiA4T-}m$!{O4T^baMSC2)q2h0qq{T%Y$+Ib*}gFB#2 z?$|0F7B#rlIy@$S7*P&d!@#|<$aAmHe$w+4zL*U>(oVbLj=|rZyXEItAN*T?E=w`r zlc<(|2G~Bcf(Ac-Ykq%7Pv)l(r8Yih>Ei0{Sw=dwy0;AQEg? z?wlLsY8V%1@iQMIq5+8OdLVA*igH9VL z$rf{)1p41jD1S*sXaBl@@$LvwB^3`Qi4KvG2RXg^Zpi&KVcf|nO;s$2c^s;$C?wEV z;J_?821rNjPlp zD-+jfS+)fQiMb{H2w;T>tel0hmHf=rdpo4IgFW)^-&$M#bne!y811gj2Fb<1<=a({ zT^70s#rxW>rxUBPKkPe_&qCra4S?7k#~YY`lCXUvSPgJg-2OICGvq^071{ZISTO~S zk!MWnaeZs}FyEczFc;U0)ALTg0MS*zvOf8z_$3)^bvMvxeM(ICr~%p6L7C{6z@mZ} z{YxRkF>&wjx~dWmJlVgRREv`dr-QBS20|duDy%AWuA{V8F9ay%n@%0GR`jj@XuIO3 z%X?2ej^o>nZ)>L$y>?;k4|u4%zDNQV-c_LCLA0UQ=eAJVP1&FS81dHam`5Z!UsJ^X zVF)(;t{3?CfoFMZMeb}m;^*DtBb}#8s65^!jqqP0njqy!TYs^}F9E1c_U_VWNE~ai zIGw`!W8&tP{g1;invbELOB}S+x1%kq=mV43M6NC#j?SMEB8OU>4$sD^p$&Z$L3BSM zy=X`P=?-@*>S>prMyq%-BHUP1+F-h~?&l)1WyGP+BSu!eG_LP{TGTXBKH0@HXJc%o zP!po`zWV(%!47Z2`eWi>=sEs9Hij4<_lN%_>99t}20@mw^~5i1A^l;h#=s}p%Om`S zL(`2|7gcr2nk}NF37(cjjkn-m zM^)$hKvfl$O!!+>*gH*GL-pjNY#x`lc*8Y$I=Mr}!K@*Z-tIxXW|Y&x zjeXgg`gR77`JaJ5EZ5WW18=Xx*#8o_`^hL^;eszn6OswmrU^WzhfMfJVDESIEnM(0 zci}hkvU*D9$?!pW`G+F<=30}dF(2NdSJTk9JZhi=rGCr45PppRky!$V6aWMmVr5to z@tMv#cn`joXDt~~Rk6jeoSZtj6q{8ytXEcRDIaHi*VZgE!CQc!V$S&-tI#6_my*h^ zmmsmvQ6V`c9~Gpi1J%EAoq0Dk^J(^fqJO8BQ|9y`X*}ob*h-CCZ8miiDF3>?FZfS= z-9x%3u3!QLD-2%jEuTEn6OMdWsjrEVF&zqO3o3)T&;n@Zb!@uK?mzCSU^sCOsyk9yN71 z4&<55xdaG%(yVMEzDY}6-WY7de3Yw2{Mn~##rtHM;YGp_X7n*3m0HD-;2A~wta zi3h8zQY9binHzJEaO>1=b*Hr=PqsN@@r`%qV`cgA^KT)-to*WqnvwnKz@Yjl&+d_T7T0NLI^J0H-?l}^mFT%^ z0DbcP3%Xva|EFVlq;q+(ppLYAUIV#Yhzm;Fll#|C!Q_OH>2dgyN|6zr*MSeR1mW>VXGG z9=ndIY|~3x;5>UueJI+)Uozq}rb)xYW!-q??K211LzLvWbmFtu3+@)oh^bV^Yt_1z zwhT4|IQ&x>s~GqH&-g`Sz8c$4x{SWvyzbOWv#;~KGHFHSXc@A_tHZm(`|1J3&Aa5Q z2Qw^lDo3UW3(*k8{_h4giL^E88pT^ghi$2uN*glL@ zs{=e+z6uXA8H?w*@0-qusEkOB9wTwLa2<*Qvp5Pax=-DAxMLZVg`91wXA3J=Xx%u- zSO_uVrQs~cjrHHj7fb_M2yXZYSuFt1Z5%=Oa@+edhOVPvWf({mf00s+<$QeLYkR3HVo$#MpO zhJM8`Go*#+LZ@FwwoM9+PEFb?j>Fja(m}jVh@p2}@NuN(o`as(P8IaqcQ5mb3?LS$5@xWqclpY)`x!Y)E zS11wX{FcF}(=)@kVx|{msI;MY{cprDKyXi^;GujCvdND56Z#4aW{Q9jiGrIo!{uIZ z#s1HGii@-z#s>8Z1@p+~yT{g8`H|7ettm;q308e!{ff{?0vc9HC=SU#10!GNE`z)_ zT95mvFPB}c;;+*S1dhSDNS|pQULTr2Zedc|*N)W&piJ{VQ?1ZJR1p4f54?C=jZyny ziBzkvF;LqQ$GsT^QzpJX{QV+zcvfI#kOa}}%ccMrbj^mI*FQ$nlZZ@3 z9;;FDJhZ1J9`8*Yn)t=1f9s7z=D;8$Yk4v*QKiS+s z7jOHSgS+xoSMx^mR3Z*A73O&Hlr|;HjeXeg#eK4MUKuc1ZbT7QvVQiGCr9X%3V%MO zS@gx7UN_@$0CU(K>>^6ZrsJHl7-Xco&ni z#z3lroC!qkH;sOKwZIjT#iJy$uq-+!yVBQ)Nx+7JEDzsG?_z>-f9uvcny@PD3*7?l zS^dmtv;5MQ)tRJizq`^-FifJu;hWljqs|q$VSiygm*0PSX$Hr_H)%f)ceo4Zb+8yU z1+I0HuO+(SQ~NNX%KK*g5EClcm|JAEDqUu?C$v(3C&m6wYUOJRj}otzqP?!r$K$QH zfO0^O!-(qd&_1763GKz7>Xa=`f%Z4Jll^WQuaxD7#rhfjwr0~spkC$+@RqT4k&|Cm7zmCZnZp7J;ZEh6~ zT-&{H;U58Vx7@o!J4D_wV%AD8%Fm|qJ0!f$#|Drwe+q$^Q_2^rotbnRtqB$g?D9ok zD^OI%h;gmwwDqfD^VHK;NCLCxNaw6(iTpbjVn%K5eBrtd6}3h$$~FB5Ejn&!f%a^} z3E;-z%}v}bum<>s#|BbmfdD3R5=C6+?Ne36UA&J-s3my4Ma>|*&yoRyf5xEA`fKXkg` zGfVy!m;6=K0cdvGkeNj^CKs&*K7Hnn38BA7%keJ?9QriyOdVWLa0moQPBXT}l2KXh zkN@7K9lP5$n30N0qNJVT=&4=&=F7%nenxRhe|V~+a_wqJRi z=0_SSgh=#FotOi4_RL&Adkz*2n3`helh>Ivkd<4*=Ta&LFIXawNE{8KfV?ZeoM!6d zv*oC|~sfgg1;abBe z3$N%uR)f)`0(y|b_a2y@GG4iMp|KVx4F$G}%3H8S=nH~~*Y5t+4(g@N8y0SGi@hd? zI(Kphr7`z_HJRwiR2bdv_qzo4ow_*YAqZG1Y7pZ4glqWK9N$BNqd%(uaj+`EX1KBH zmi}-zj>l_=snw_bC-T3gKc?&)37{RLPNx`5u>5{q#-O1*FhLlgZ)k-a$RCyXWNLa8 z^J%@EGUmMF?`%KeXm!gWxTsVvZCo+<(GCJB)soi4joZRI9A;>vAKN{z(E4StHQOnaH8Io#CP8DbCoYU&&)C>8v*H z*w~0tv_vcSRXGbS-%~JciaJIE>UytLt6sru!_LH{DPBYQf{{aKIr&q2Zn4pg;7SAH z^5Vi<(TtnK8__3FzV|1O5z}v9+@7@b3?08AgZ-u{nw_qAHya`6y#t^Z3%2RRFApvo z%aJ^(hl7T7VSz$h!JWW8K^Rc1S*7LY3uaW?@`Q(n=Z*L6zlMFI7KAl*lLDQMoTq=K zsdhK6FSvVl>Y*Kr^8x%$OTREYzG4lAygeQv+6^at*r?pjmy3&K@bKXb>rgq{wO`q{St{lNhG$7olu&h|KsMej?67^rk1FML&vv3nPQ~gpb&6E(OTrt z!!lv0WmoViV)niE8JQrooZABPqo*=qnfSUpc_yz)UF%u|;AC}4L`*mB3mC97&}zke zidvI6M?9QiaAS-yn#6`zWiws;+WPOS%6c4!PusM5xJuI~d+i$nbQ8EtV*~zu;J%*exjK(-PJbceH|$10r*4ko>Xrl^`zleOD=G6 zukhDMDwoKQgg33%1?q;}*Vo^jUHk8!ELx>ZCNloKt6*@t+PT8!TM~>qShX&1R;y8!L7>JC)ap`WyP+?EZ!rBUvMs3-w$&q9$1#n($;XU87VPaD zvLwo~E&t-Sv6%3_)|o^sL|xD*XrCS9t28>bD%hbXQp%2vwz61utWW=2u)yXl+eAq{QVwY@8W zPBLIx)vZnCqMd7^Jc}ePslkn?w@ylCj|%NKtxY_6BF}oWfI2FQj5gMD!zptWHRG3t z^=^T^=}#S$aKnLuBbS@jy6c=~C+%Fm>}7X8fjJ9H5b7q|i#$Q6BwF@`a4Wy)r5bRi z==$BVZ}8_e!9=*+m@#Sp6?1C9!Y1Hdz43lD_;jXIJ?}($)#UEIEZNGmI2KR1y>Tb0 zLN2tVa5uf)xUk=)J0o0_+J|)yZr8=-7NMq*ji7h(S0v7GDod zp3N@O^c6&N%JLktG9kKNLysbD12-M08(P8TD_g~lQKcun^*PT&_1GBgZ5r~VZ_GEj z0g=1cbLN|xPuZ!C6%xl2))ecan;Tl6$rAp8N|askE&o^$lDLtw`u7`-BX?+Nf6h4e zZrxch>k;T|M3jUcCSvmo_RPLA6=3~KSst~GrR`YrT{ew{2)8ogO7SgdsbaLOZ)jc3 zTjoWt5m&`DW;>@}EJJW*j=VYz+AwBs8lL2=$Wkng!tUM%t_~P?rTDUvSAjaeQ#>LyljLqnVfDf=zJL?Lx zJ(Yz48}aA26-AN=EBt9gx+D0#0vv*+v3GFB)B^=elfD;f4R@2F^_IVD z*&ub_y90)4meG6VQ|k~qdi1E7|;L!pOs z7ue7dY#@hmye$vCju{WNxL+J8XT_AU44T0?ibDUMB^u@=7=Fadjw!RhZKRQ|0Uy-r z*ZWvJ$x#D)lR#;655gQ>*#SC++;vbeCf!&YIZOsW zN+1r_;HPCqF=3hDQS*Ii<|z#_sj;D0Jcb=gwo_OvAq zemqX@*d`IP z!D&Nayi{l*B!TtKd<>Lrx|kvchTOMS{1bi`jCcC1h4}kt38yoxA9%iXc5T^@tYng+ zq2I`p4HOQnweh~!x7FgX^mkDr8muKibWDhlDqT2he0!OzXv&n$IqYn!M7*{HF-8y12sRJgLrK01dCgj?L&@gOzO$ z=PiI{&OF@=&>>%pCeAT2WTJhVe?W3ig9tCbxV$6`TpN57o`Ouh-{1iI7;KC`1B#$4 ziTyYIG51?g0Cb0yOqel?83_Fojj?*x^;rZXF~S+vb%jh=z2X6Y?~Q-bHxQoE5yPMFAdSpsmIua8nJD)_x;cpYz%zzXoJ$~ z+XTby9)dY3C_=Es%gve{q&g#AOA>uR#>==XOfi91{g~M6c12+>SWHF|dQr_pu2*I- zf;2be&T|VtTMs6FseIqZFl+U&fm>e?%ajA`%_>$>OH0HXs=XI0wujqGkc%2+n^6#R{rP z8tDiPbq504L=M{CtYsTAPqivHOBe--1#z~&&cLbJ z<%q|}-o^g;Q&?Jf$5yf7`T{e9qgq2;lCZCTy6l)E2VL&vu;S)Cdr6$WE}xsnL1lZf zlHM_K)6WNC+}%$mX#d;M*%&MkAw<%rEVaxoyR1Vxa~{N8ix*l#sOId{)(cYLxYF}d zs)TdKocd2^evsp8fxHNV*oNg}odc^7-4`8$*o2*EluKT7so+xnp8*@u%XJ z&P5dZ9**ab7q3gR#e|ynPL2jCif_L$+B?rRVEW(X%G zI0Zs;HI45kqz5JK#fo~z$AbAH)Cpw)m!C3Pj5}4RXbFFCh7pt)Ci;<R3svz+fZD)h~)uY|Akcz9eV6Vu6Mm71xl%px<{p>oJrCe2~tHtcu&b9%zW5`&6x zl-(E>sQRrOJw2o;Tr9T8)o(`N&3Pr`F9Kn1em|K`lw~v2Os+W74#+|tBc#%u^*-x3 zT;Ad$58z)in1yDoM(Fs1e^#N*I&;$4S6<*jWyLqmS#3m|=%hAj4o5cDer8Y^gr%Oq z_M;kbWb@em6X!-mzu}z$EN>$?WJ!`$lIq%!jSg<`}T7`%#oNh5R(so8*TV8|n-z*Bs)IKcT4 zNg?L6Kk|m&6l#@6(n~X7+Y~rVR!OIW1)nTAYjh-NMRoJb=>2r>&k8Sc5b#lzs?g?>k=P!;D~8R>&*we8v7~}uE|PXDaK6{p zKgK`J+Da4<=^pNHch!$KYE+-CDbAW3t<HD#g`@Um|I&3OdLdDf`;gT`o`d zg7_U>hojnvWSQ-AFlDk;a|1SEosVZ2|fnCj$O?LI3!K~jWFw|Zw=Mr zWL51z?tGE#=t&+Y82g7z6e=aj3^bzh9o^H0Um;W5kLv3cBg3j3D$}LAXigB`FUPut z*67(>c0TL6%IQysThDMbv+*ZiWb2*dYnmUi^_ueHR3mc=omlf{*@sh$odlgWXe>yR z=u1^MtBJIE%X1?Z56q^Di6bg*hOu(RId7#93xO z+)}SK6!;kDt*z#hxk@`_YnQFC41DMnavZG4`A=fG&(J}vq*k7aoFOu)dy+bsT^JV@ zY96Ij$DZQSncxy+pW>eEyu=!Pc2+n#PiU_u@z(y4Gvr+BPR)4vKGc`DWGKrTV|_2% zCFEEe!PiDxRv?7;P(hIz^FoTw`>m#m#v!G~cPBka!Hxx!5Wccea!oGqHNL}L|G=j1 z1OAq$8LP@U1+}lfYxBFiXQAc%$&v4T^FsyJXpREQOBvbd6Z3S3^~!9>RwaTVyq|2V zZg8mqKmiNJh^_##;9C7!_V+iu64h-oqsI&eZMQYoWM7({Z?fS&hFwlZNBPps?=;mDjPSc3f_Fu zYUi8-%o8h{4b9iF5b&a44Ugw~6UDMLW7VF*-fovn(9*g)%qa-5_jSDhK|Xw#IKfOR z`Dn78zJgC`di8eSd-O<2#0s3IofxGgvTOjyj;&h~L>gB!*I@rs)xp2}`ZY9(+l@I* zlhZA238SHZEv436wx>R2;Mq6n^JW%BkDQjISWwpfc~e%8E9o982!d2GXJZFs{4&5$ z&B6Y;9lFsje@k8~G~v8#v(V8`)Dp7^g2>Fev;?W?yMU*5e&1BZy~k|ofZXodZ0&-T zKPR@2);5RPyaW^IDYA37-DcG4yH2$;v{L*9&TrliDB;zt(R4VfbG%M>m*NVA0vo=y zSUAMK4+V-T64b0wgk-RnOr|i@SO88Qra_F{ZHsF9>}Sn&{JM7ZxaUtMl5(HU8mA2! zT^_pM3ag6Pca2?K_=LBG-_5oOzvCLk{YeXWL5HmdZ>^v%F^Ua#_sunCiD%sj#nS|H z0%yHLSZDX(fiD+9uWrjHjmCE$I`5W)@2H&+IhMrV9*zE7>?VHPwAN#PZmxrM%V^gl zKs-C!I7n5Wz1_|Pzn$W4b0X)(YD!7}_byDQcWFzrE50L1e&u~~F7^=@4>y>+G<_2&LfW3^G5^eIT`lp*yAN0k=9hY%P z{H7p?C1lQ3S>hem=XH2XH6|0Sr*~sPkW`rz@PtrmwbF`7@VdbgviwOkbBAr4#3qNH zyhZY4<0&}Oc#o)NZAKfWx$BkTmpQP%NQ1a+veALWNbGvDFLcoN>hWRY5C3go1C0J* z$w}VEByjnc@HQ(t?#;vJ?ama##DPUCI!SFggvpFa-+NeRnqB*#hWj6F^mElY@&I?i zrh*26OIiQGFPTe$L1uS)B-Z3W5;GWdqs%Wo<;l+I#yi6#3+&>`04nxNpEat=K=C7|$rI`vg zx8`%*Fw6RH#B0CKd{gRI9zw3zI zsjY(X)iupLVq?p96@EIJ)>P{hEt9gml7Ga^!_1W+p_MR`2erGhmyEMHztk)H(!<7E zBW;N!V9nG3UoQ)W6aP)kn*!u$n|>}^3o2i{5DFcXSv>#pn?+|S*=wmL?!EtW`xx*V zJ;AfRoIp=PrA9->Z~0oO8R&nQdyWwdT2Nty(Gf0aa-wP{>)7bN4uWWVWfGrdUl<}) zlUv3h#F9w({vL!FGMJZt8V%Fm!9JwN+fub=ZiNPoB}T*xl(NaGQ%El1=)BS;LBZ9A zS#$EH15+Phuw~(IgY6JMW`vJ~7K!;XS)&4+jziWOCDI@7_D|ajTI_N#C~J?gt*8`} zb#iM$ljd~W2MEQZ+BF-ELTz3GAiS{f_}|z#+DaCAE`Z)V%lHmzJagbPc z#p+qEsPOfQzKnZjMctOP_LKC;^WRmha zQQb@A_R%Wd?#-?mw}iP1_nDu`UH2ldf+#13Pj|vnK>ugezHNVX`N4GVk+CLslROr) zWfWy1Y{>GS15mQ(olpM+memoM|K}lOqKz{ZJ;MwQm2$wggA=hsLEP_i5*GOAQOm~G zl<=~q-De{KZtTx9?x-!f`QBQ{i~4UPeHZNXsXRFWTbL177yTqQiPx=DD70QXdgV4p zES}sSLJR;j36Q7xaLWeqBT)?qT}b7iC)VFg6LiC@B_c3sXs8JE6dMfty%Bj|48_LX*Fzwo~I=yOgjcV8uUm*5YgFRFf)hA|scg3I{DfeFpG2Tkr(L4UqS zK^Y{~7n=-&6XBz+je0B*jNyFD>Zk_u9D6H?P%)mda`#Y2JU(Mo$h&lm`Bzenr$p2z z3%;1e;xl++!H#sMTIm0d25b_McrVhyed&Ww=1`9QwPBbdMqLhfw0@W#z|y| zfX>e~>=<=|c&@&0=-NXHv;FzHfr6Ub(6M)7*tfahA>r-Er>GLu0gN zw;_Ok%t#N7Yam0a75I@)#7YSiD*q}JiK=24_H;-Paa@5_IApPGU-)E{FRQ8c_wgta zte|6WrRgn?Ts#E7O~gk1b>N)we9rVb@0}tT#^!SVoj!dQ`oqylU@Irq`%cs6-*v)| z^~p~hi-*hK$s;ow!Hlg%bS4R*_CmdAy}>GoVQs~~syXoNpbMj9u&U?M`>y9A(0lZR z?4l(uSnkJ-fq14&yzC<+C+H8dDxt$)=V3JgJROi}lsmS`@)C+rb0CIiZD$?0jkxZ> z;+>bdGtR>5E`E%$obmw`?Z(rvG;T%l)mMI#&oABI=6yq0RxL>6v?5KDfbL{@{G_4r zn$~*{h1ys$EHec%Cz*&uiI8DGHC$#Q&QV;Y^%@i_5CFWL`It&hBqS`|v<^{SWo5Y^ zK&v}2=6xjlNwyXgQQ;&E&Ma*}4pS2;{A5!nV*hHAp7zlSY8d&- z-Z*lTD8(p{m#I>ZpwCyWNez_X%DNc{F|oww3JZRC241Lj~0_I-6i z7@>i3z9WWo#6obg34^syAZLSwhfm^s=OepY{r48=qCP}~3r%zs1HSNvwNj|CBCO6Y zvAb_Jaa#A(7gC4|G2KX<6hxNsB7ofrMD25gk+&5I zJ9;TEg{#g1Z)9w7(jJYdqA!?od6651)^D`aRwHhhaT&M?_H*Keo3e|utg?KN zf1qWSluv9re#-vsEzjrsH#p2zr1RB*yACP3g;+Db2##-i&xaF18nx-dMQ6krN0QLp z_l;QL7WO0gX0vNwUNT#O9BetK5P}^CBt>`mVH$s*l)$u zHV$9vK8|?UBP4>7cziSs3KToYfDsRa(k-vF-IJx}$yO17ztQdCJ2Y4bcEYOyNmY4RZWm7#z2RoERCB2$bGJar0(WKu0e1K7>mIhxX^T02!c zC1$0_NKfs&bmRS7kcAV#BFMwwQ1$)~r4bA|8+7Fh3oiUr_PIn0js9f+Vq6v`tFyERIU@ zl8(n7|Ej0DE?ffzWBVb0q*U}>vJY+szfV5RxDtIR{@8K~p&&$T_2FXKLIin<4@p`#c&$ljq=ZnJMtUXmY@W~Ru-)*WX3c>Hch}!O@V!94? z2ul8LGV$S%=#adasH|Trzzb5pF}nO2`Qh&`ivk0|;A?$cpS-NHbyBcs=X|^}Txj6y!dO?<9(UL+=ZEepm{4LPq~>+n8qc7G1Y_y8 z{KdchWLha!bhEQbm+Ddb@QLA>5=>v${-#Kk%A#$7gFj2pWBkyM>B{`;Sn;?a6nZuN zU?B)nZ@$WZ{Rpy{@>jORA$m13yUt(sSw6aqNRRLI-jCA!BD22tapZD`NNA4~S-Y>M z09P;Afx8W%5<9)yncO0E;4AL&jm6%KbWvFMpJwa9J9MumV8=NiAlb!5zAvr8*oXs% z!8rK6j^Xu8|8AV0%gg@x%VBx_T7-{YV58U~m^>_+o*|Taco+QgadbJr_ivSjecz#V zAk8F%$&?(R8I^nbhPvX-acZS<=lsn>n0%e*ceB$*ZXyxZk+)*)11&WR%P{V;i}Aa0 z88@<}8Q(DLZYZ8DtP6Ms)J>IzyE-%T+#?48F?ZbJ^fb3G>+-OwBwKjE2|o@HC{RI} z#A+!<#;k2u9;zotlbx--aQCMEPl!joqFTE=I1WMkBn! zKe;=lfyM7Ez`7)|Ngt%hVvebcV#lSOj4IO_p6t%b`Mxxm#`ezuMBh19n&yvr5NUIk zcWf~-F<28{vlTM^D2nb{0xt*hN>`DPXNW{;nCW1}$|8J#0WQzA4}qo&MP<&JQsH#- zJ~oI-c4}H$f6J#Iwf+KJVn)gOwM2hsW;L9DV0LU*evSh~`sUUjQw zlvI5-zW>0KyfCU}sD%ljs|L9)TZQ*?sf~tUR0)lQWauXxKq-MtkdBkO7+ZP0#>$#c zfTnH8)xt}<&!F~a%=I6YUnZ~9FQZE9s`ii-# z$MyZIxf}W=TRqv6xQ(fxWaF@fGmFudnM!s<ixt+uJb($qCzplPo;U%KR8rX8h#YAKjbBY)eNmLO2BXxEbTxhv3 z%WV;G)k!%wy99Yf+TaaEx}Z^|a?5p%)>d1bOcJWv`2So0a;c(XEGM8h{_&FPUi)cy zf1#68ieEzt?{IY%vA#Hsf%BEjrpB4_cvveIU5(1?JU&>hfFT*>Hx&_a{831^w^58( zPN?#r&mcN&POp%cSBh+aTTLoVE4xo{%BKyw$4bH^ZiO@hNx$jkB%;|ZI}8hp>urAa zH2?_ga6*E=6EGS3KFca3I4M_0{TeyazI&`VEW`Rmm0a$vmn#5S4w(V@GJ9EF&%@VB zLbLw%8To5u4}D;(U1vu$a#THFppysHFD37hCY(=h)<*Kk|Mwx=jq+)iH1LJiNuzf( zkN+IEtK%0uW-+D4)VO)l5o{L89b&WlA$+h&B?k-KL8JU*)tUiz@X@g63e21!{>*A)KQw7Q`U2QxV zjQo1Ee6a#QuD;zMR|he^(`(D6!Z=qE`z%JTs>65TZ+!hNy7fyCO-f&| ziV|8={U^@)RM;*X?Y&=;$F_I(c$YgI$or{w*tdzdfZ?PY`h8x`F70DjXjAfE39GZ_ zML{0w>fDwdYg7BJsXNlm!jau-6O$>_1W|u0$1M>ku{_nAQ5H)vffLyz2DARJpjAV4 zuW!Of1AKUbS!fHHJ%#{@yN}hMS#T5Q0+KIJVVB$ z;2)p-I(q&NZ*_({-Cl35K_))G`YyMzH*{Br?A!o7I{MDt)~PQ8l&VX0%W0l#mQDXk zzE){ypzIWz8NM5AFwj(6-6}ZWM3Vg)j<+J%!H?1JZPoAik z`y(-UNqvtr`B0OmuIwS+dO|zNUby7Gi!P~y$QUX>7!C>kE#u|n6F6%bUnzc}%Ud`B zg(GP{K%PXp?|dul7;D%NM9mf@$ZBIPPLcvnURU3U&BibW1uL_Qhp>da2`3MXlKLc9 z1V1fkepf|de}As?=%VFEQ+|GGk%MFX3M#SXYd#;43{B#iEx8M1Och~DHuc3KX&cUz z_lB4@yO7GR&A~j%+@hEk;Lj7xFi#5@f`q(TJkCdua@rJE&$O@}&Fet>smmUH7k7AP zJF8u(u#7jdTbNTn_RGH#DwzJ=wivMMP<4g|!18oP4>h^JhMK!yQCSLd9~FiRT_PFD z!4`cp)keueEya0R%OX$8f8ZA3g(TSd_4aXGh?blA6)ke)cE;}@g^8yF!P|;R(xar2 ziE|nGMnk6Byq(g+%>{J-iaTa1s4H)k%Dom_I2|Ko^=Hx_v^|mrja)alOmP5zhEzir3AeWokyXQ+;j?)9`1HM+>uh+G%fPJm%J0<_saU$ET|?(Xbs9V%6`;> zjDqLpz{-`u^XtQnG1b3fHy!>T01rX%zQ8+u{Ri)j@duO#SdAa+${P_^WJ6)7ATr@bnBk_*$jE$*pK!$A+x-WL z(>sD!Rjh|?mA?Kp{@@++Kj@5g*3^C-f9R_@$LdP0|JwZf`LBK5N9LJ#eYY?<^0LT| z{>s|Q$Fo?+tgXNBJUbOZTprgEE=RqhlFxqbKXx8}&&=ZF_-i74B+OFhe?`pm-^Vnd zFynPLMw|Hz*Rky0dW~I>&%b(iqiG~@{mX-`ym~z9 zyeC`Vd*SJZsZ-IA%Npfl;Xbebnwte3;zt_8NTAF3vQq~~J&+YOM_k9K@e@+zbQ$5H z&MUvne|osE&-;)4kAF(k#?NW!&rKXL;>IzLHV$Nc8Y@%X|HVHYp&QnxU~Hx;*Ge|C z`-+b*X+1aA6R(ksrf^XlqdP*w7_+WSHNk?~=l#dt*1ul=o^RH%{&larZG4W$QFlz0 z9j)=RC#&Yk`>2QY4Wt>TH8D!DzzyPl?>{(CEdBhmZm(+MT??tlr|dB2(zq&ooyIw` zUW}h%Y;aRNGS~Rtw>K#CfK8Qy*v15!kHUF7rc50Rv>DhnSsUbrcK!tyv@@TNAIZK9 zpP;~RP@H<|6AiK!KR2UYR#3jV@wT9K|q0F?wpSCHAQ8*8eUXFbj zzm1u(s4c$kLPMyb$QZ6xy;FSsMfv4dT-hG}h==2ii5uFR{^ZT=sH2baTyhgAXtcE( zl;(@rh5|ar_s4=mYK9&;HqV%rW28e)ea7wjDTdAV(X& z77{~Iz#JI_%!^GNY&LOL31IC`8OiKF`Q8MbFq);H6YU!)8C@PeyR}o1N4ab_TF0eg4_EJY3jM{9ue#(V&tzf5AXV)Z8#NFW3+N~+O;TaI>-LQ+T^=wV-AO6{LY9P`!6p?rociY ztHt#nUiK*kl{8^}!g+=O55pK5V!r@JQzYp{J(bJlGhNsQjhn(CGKvgi1Xll&~( z#_w@@Exc>Yy;f9ji9d`VbK&Dsd^l6I{pb8)iPeVA9RR9x!I)8Y8Q<9r3`fPMSS%37 z8>fhAkjZzC@g2@x#psBibgn5DAbj@?ry?(qddBZFKHXxM*_7UZ-!Wo7o=)9=(_S%z zR>xU55_eQ?N~*WR`bQM{*{FL}Ebi+j^0Lp08kUG@dUQ=qoD>GiYEygzz)klboL+9V zI2l13b}69jGd}m9W?IxeO5J}O_CM9!!!~@YEFD!ljbHoUXh?;BF(Sws zB-tR_TvT0vIeCZZ{zD`1C^c3cf6+6x6?mF+u`xZ^mybUY#h{KaEPed&?T@PkQT!YX z#X+n;6EEyG8XqmRm~-EP7SxAYYmL924GBg5koP)%(mO>GaU!QNhRrq(s*U#_{ZsPJ zO7`vZKgZ8paxRF7LL+mFPeMzrc0cP+-;AkTL(^>$fk4_;*FWZ;95!T|Ea3MxevjlC zx$GhHa>zS@c7SB zAME(^oCJ}dS3W=E{ofQt`ysAt&eHZ}>iHL21nn#lnKqqxT9 zQX3_zvi~YFpMRUny#JuA*I%J=j>=T!?eS?9x_?SeT3R+;|1tl7_!|1$(6cjoFKf~< zS{Z-ZXjz^AxXS2T$IotLZYGn-M!aj-8w%CCjgMS>T#658qej%o!!5wUfHx3e5A30c zZye0mIsFq9HZH3?1Vt9b_1>a(4_BKDD$BL_*Zj}NS3Er*KazbZ-k>=7O45M zA2fZ^u?gVE4QN&+9T{A4#TD(u6Hl~{J^nv``lsz^yg{K7A^oyqhh_4o>I%Hc@tSka z!M|6xN1XU@yh(&LAfA&RhoGHD@b!(_+Jzsyus!P;&uYiq<(t|WXPnUv9JnW`hEhZN z7+@~(EktW#G@HISq(}oE7|JY#_j*G(i-knTcuRaLG#=U*$L<$GK1_!&IS63s z2}Z}Xt5tQe;6N!}IsinS%1G~Hr7h!IN9eVdQJrutewY@H96R&K8%=D;!{R~ME=2<9 zoC%_9s?F=MQ~Y4GTnvj!*Qc+IuXNgo^r4wgRz|eUgBm)Ri;*n-9pOVEUu=1wZ=3XZ zyDj{E{p1A)iP5S{F3PMe1MjTopV(r?qz^BGa6LE>{PP$kl0<9!Jh02MZTyaKPU5xx z9>lqf14g?J>tnQ>H?-V0k+KIMX)f^SG~`MX6|orh<5&wo^*3-;m_HvpIs2A!`mbOk z90_c$C>YGKR&zaIRQi>rpy(OCXw2YN+e)-$0Cv3aE1u3e8^1wu@kQ;#<4jaco(rl)2{T;!)3Qo#sJEOBRgvGSY06ugw zhWWS$YnHT`y4$wRpE){o5R+2^+ttU9GEQSaWqekpXuV8O7&*WNfI7cq<%rB>xES2W ztzBl&oEdt0$2e`@My@tK!L6AZk zZ}Cq8>f#y|QKlER%&7VcJh#Bq9ERrN>G?{DldV128xE81BN7a(#EkFPId(vKycZMwoHE;JnwqjJ2}HRz;PH`hSHvh90}*qz4T0b(vK z@~Cq$Jg0!oMlzp2mb?FY1Y;jm{76O9!8V^$pkw-&(~`A`{`|Y_3InsEA6C%ClAxKw zKB>*u;}Kv}4V{e*m74=Z07T9k+uhuM9ENwV3{PJ#h0N%w^+@A8C{Saheh7q5b#*c@ zV91C--@n6GJUtI@P+WfbW$ols_!Uq1>wP>- zT8VE^%aiU&1~p}7SM@|!63SGCAI6|%mE>sD1_e$b+|z5=uE!1V;rWp4ULL<^<={HP z;N?b#`pT>D2E`*D(XRXIHSJIT?9c2I6rO$X@hZ`?-YLEwfUdamigwO9uff09wuhf^ zLVMz=rI|MJI17#n|?S|JLCHKw`u-;+#boZW<=##j6?IJfIp4biFyU8WS=y>WA7 znV;fPT51F3k5go+<6_oYPXXChXUd`B{4U~)!C>e(raOf*8*Dzt_urs6t6g--#qGr7 zj&D!Fk7P5GUY7A0SZ7PM(#QM#`k%-gKwQL6g#rEa&iJ;E$tsPHzYcsoerR z$5#qO1#M(UfT_7=cD4t`3|Wx_8E!iNVataM^Kp_s7gQo*Jhz!cbTm-8zcRQB7(+KP zd$B;jh~M|$a4SI<_{}%Hiq9N%je5{h1LW&Z9pfv8k8<3y7ASDEbBAu9|9Sm`mBGAZ ze2rZOj;Qa71Y3^EgW5heYRTgJLXkt8Mgs`RATfUPgkIyH&L6rF%m~)7;y)ih=a4qB zFo$9Px)~TRISWC4eW!gXFzNqW$qDx2FCZVa+amEx&M^8;xWTvMCgUh z8dTPj8v+7Z*HFH4-bYP=Goujfs=&5<{3lQ|W@k;w#lyFJlI% z#%DtAN2%;*fvqJGgR+gW^~T2JYBI3&Yt*_{MU<8J&a3_3J(Ac|ijPk0e`D;)hq_PB zI8ORGc+__S&%VxrY)pVM|82SdfZ0FMMJQ&%Cj*^QfZ6!?0|&Y=io15nwZUWD%l+bL z3*f*!%;_EDGBacY*Jz0_U++=WAn3q?zWEAo9kaL^b-6~z(J(Id27=p2ZC?gw&(OHM zVhl!9o`H>!(Lm`^)|?*|6Fbt+X>k%no0rH+U9@7PMFmpO$!c=~rTY&?cM6p`=J;tM ztMJ(Wcy80<(4LNv_`HVJIX-bLir@FY!8=93fCB}&+=FWJq6h5iYX;^?qR7Yz;NhNT zFk6|Yz?e-yrx^DC_VZ7UWC>^HjYOGewF=SizhIru-T>wPE1W#v{PjPAP=>u}3RByN zK}`wQ>t@Chi#XzlplV5{m)c~lG&w0@3<3T|MeK?AJT@WP1= z#1!*&e;R?cSP=P+I~{CGaU1~a0Vkl&M-mjq;G)=p!I-W!46dOdB0mkNjFajc+UKtN z9KPb|q;~a}ztZ0F*0AjAI-Ep_oN;xK;2+SHm~bq0`K z78?ROHXeTrfX(M*&5u5VuM|7X#mr^>;PNvD9UDJmvuxOZX6p|p(bdFl>W+E@?`sVc z`)IVD`gkdIG)T{`is9AS(r&^H`^N&+oUj7sSRN>+7?rom6U@46_TkzlfW_vYjeD3H zd-Je3=qpc^={omk#cWrf?RoEfS3CRc-)I+Ka#1_(gyY&%Pd^>*u?q+w*|Z4qr&VR4 zTPS*-gNYsfi~TQDoum0Nj4?e!N@E$gX<8^I73>KyQtz=|3_UhZ;)z))3;U!P&L0Z| z5)G)v2*2t+!&kzus|txkYbQcGf9ms($V)yXkLSh~r|Q=NFYHjv*T4B`{Frpxf54e4 z^__je3DcTR9`3o99{RHc+}SQt9w)SwYKJ9vJ`At5Z_g$u9(bEICo=RbC657 zE~Np(!H_%n*B*ZiU!01#B;SRm&YumMgys4l&Y#RHQ(UZd%}4n&jjFF{QO{!3>I@1$ zg-I|Sea5*-AF1xR9z$M=-<>U}VFsY3Y}K~CnW$ltk8?55qGNrMDc2#$`Q-Wib^xcCGF5 z{)2u)tf4d62u3kT!AVTLVPZXkJ#8g#BSSUJAC)2uz|=KuNm$<F*RZk5fEit_w}gRXP5@$6>coyri*!Gj8&?AxC39%~vj^txvj z`+v$I`J|r|gTDnrC|_z1it(FE_*M6Sb6f1K6q12s=y1q52at^AGx;dyZtp*ed(Rch zK31rw_>qC>EuMmQhL{^T#^S8TnenyXT`i9JXB=mf;aDe6cy_*O-erMbW4=1INg*$M_e|zo0jKaVlcx-enGS7^anE{5M_y z7Goulf5j77R*Y0z(bWn}eyFDdv>4{3qSCI_Fv!p zrgoqE-nTvBr+>QL@eX%vpa0?)+PmNV?)Ha&_=oL7`0Aspzx>s9r*F7Z`|kVPr#X^zrc@Kf1!QjKe}`Kkstlh_P_@|us!Cnk8NN6(wEv>-}<)r ziYFh#U;X7f+n>DojqQBKzz<;G>Xx@`-})`z(thlr4{P6dzwc|ezV)pzeTTIxF2B6J z<~6TruY29=+JhhT;PwOm><8K_U-^6O?=Jj+y*PK^z&+bZCq2A9;D>%Bm-WIA;47Y< z^~`q6HywjdP@K{3dCz;=7LDtgaZ-q7pXx9m4qY{_ED+&)j7eC;F@B|C~FW z!$%Yx5YL}@DrWRv1;Uz+E&%L1==sJ0nmLqvtQbfT^W`(9_+~)mTK24HjNUbc>L}Bv*aGAW1@n=;e=yCo3i~S51!FfSSL=H#^NRi_w|bP*IX7a z6U}KRjlTH+1+vb^KK?C+<4T#-672imqM-{*T+60OUT@;OYv7LD`dvA*V8 za~Q%7ot`VyKK{wh*efb3Ic65#f)0NzXIfOsVS~R-aANi{eh;8oXUnqs_)CM%7|KI| zpXJ8+$N#v~=p1Gk(2nh@j_~o*VrSs6k)SkSM|TF=u!`|1mi75x@e5VA_zd(D7?zrY zfST)?fOKfb{U@giF7gnfuSCp%&jV^JGnHb}DB1Yx{F@O~aLkimTsgs`rw7dXpzc2w z$!AV!Sxg;$;SxR`9_YF!5<(W1>HITYk6_&?hd$>K ze}=nLuCbd@*E}>gxJGZl^8l^3T)DP=4^Z3tRqDA65ITg5qq9~HiVuU0qMqCC9d(c9 ziaO^j=U;`V>_XPef#aFiKk-8Xjx96PGoZv-nr2TtYLl)g4$1vT&m==sAs?u!(sS5) z|Dm<1Zbj~+n8)8O2A{f}0tWt%`L{n< zxlA7=4*X(Z5yX6e16k)|AO9A^ai!}13E{f`B4%xpD}}J4Zz_%-9LRA9Im0U_@ibOGxT(ly+b(D~>8T{{zB@pQ%IpYQ?Q$|LH$%ceumt+ebe7(e~c=zPH`!j^EIJ=@(znzU|w-4fm7& z77<^(|Be6j8|{l<_(J=Rdw*xU8@?v#vMVod?|skv+86Og!lQruvF(wkJQ8mp+?pA+ z4_lFdT=>Bc z`WqC-9E0DWcy2pz;DF8K8}Me%t6%l1b{ROo{oB92-3|GE`ZJ$tANacq+mW|8!hSE~ zfBnfHZ-+x>(d<*wVcieBZgnvFTo^78J#~W+7FsY8QJ1m$=2aZ~Kwk z5sUptQ^}(sX&7@bVrr3lq(59AK6+1;c0*Q1$k`15LDaE8m1fsio%3M0Kooa(auSYO zB!oCRe8^^p5-pV=fI#xBAC{^hWP3{Q?+TfS_*z+KkzWr<jxH%%`w`ZVmJq+Y|tDQsRD(31SE0DGE$|urbA_cI?nb#Ju%+#F-%wx7~FlrQ;`^9 zSvYqJtoTGx&B}DntN3mySCyTA)UIwIUBQYH`9xgBp|!^MAkg{h(W%C#Im@vA)CQ4- z@T;$A&CR0+scE{Ky50r`G1g!3L|)h9BjNUGD#j%41qLcPWxoCR)Ra~Ty^PdDgH_yyx!vc9U)-3k0?P7Gq z;ke8N-CTb&$qwf^zV$ngBcoq@DRS^ac0mcT-Ln4_XO2%==(JYrJ1}A(zNN%&Cc=)i zY}t*+Kks>Mrm&muK#)i=IVaVDOaJH>clQZy;xV4(<6nfZH|z>;sMQRAm2cxJSWd5= z_c08+axlMmy<-@pr3*WqkT_gOY5@VM3dWSFq4n zM;tkHAD3*1y#5p05ySpBzLByZEo|MJ=4)C6j`VXqppVg8o@?*RnIQv2^5iRdq}k$l zl{pwL5ON;(ZV zhLH`77N5R;bxyVjDe8zGaVKm6<(^Q&fNK7IBD$|*>i{Shdt{ceB@kkJHv*UV1~D5Y z7%YB0f9OjS4fx-DS5GI0MNPJ$@baRK^F7BmAl8;<{MXkQ3V|tZxc_jJpC7B#bT$8I zIhI^^8d-uR{wT$HzsD(}o?>uZ|M=VfaXai5hqo_(@k{M5|MD-}FTL=W+P(gld$nJC z@qcVb9(i=T?z-#RlTSOXy&qp~^{<}&uiE$D|DUz5yA|~}w735G+uAFC@Aul*fBhZW zPd)DO?Y{T9Z~GEH8S#g2{G)c(OU`QF^VJ4qJNKRE z;Z1{Ew70$Ot@a6uuU>Oa``@4V#P*TD|8RTG8P93o`@P@WZiN`vUVD9e^M8GFJLlD} zY2S40vF-7Xdz`&_!8a4mdG$H%WiNl3eInw7hacY_jvvcD0&gN*{pG9Md4GLgd*1V( zhw&e1zxzAC-Htf&D0|a{zd>=#U5;r#cgD}P1NXQ``vBfZ`L+M}f456MesOy&KGpGn zANd#UmPg;RU3cxZ_=L%M?RS3n_wdHb_3cT>?S9|)ecEBZ1Tp{W@K|E8t>8*uU&sSA zdfEnQG>{{+#+vHz898Rs^srznXL!jyn9$f6gm%P-ory6%$AqCttD%jGVs$rzX*=gw zO&eG>(F*IlRI)^TbTtpIM;(6NFP#wCu#nf{t0PCb6ny4Dtl?Pulbewv%;_MqQ7^4p z4?P7;qau3F*5mwRLoow8Lx^RuK}Bndi6rwfqn^r|n@xsGD?Bv^=k&Fko=nv|b`f82 zLTyF4m{juI^)NI|#SJnmk6_5x2i-|1^ol4#sn`-=E9xDGO!MVRA2VT@rijP_*{&PY zRRfF@U$N5D0O`+#C4TY|PLCbQVFD`K8*)pl+(6h%u|nxR#%Ij6^U6H~=P=)YZfNhs zk7U2}te3WnKYmd={$Y57;;DFpf@`W$M5@-}Q)w<}$MZ)dV|?~hjNZ*qxy4qMBL7W~ z=Lt1}=t+rIsV6US@t9sm`nSk@=q(rw;yIh{Y17on{ASwPupGbDBE4^A+G9}2lab#CZ+fb(S z2RP$b4uhX}STmIztEtCvB*0yUaSY*OOoHhgGPQupC9oDg9MWVd`TBS|VuXR9(xrbd zZjgDntf=I_yv~JkJ@*QWaxiN`LJ(mEnwKia$&Iaw@#ch3E7ZD}RukOO_M}{M98{>d zmweUIN|}|dvl)uDh=j3>sfHsOyQVY`nk;ug?L?weD|0@6m_lUfCYZHjh+q<-T-j&I zJRDG9-9%jYxl8G|f_0Y4LC5j&A687HVOX(P4({c(X2Z=^%YcqwVUhSa*csZf^0&n) zu_&@^c>HlAYWbUDzQzL;DGktY%ri0LC9@ES5sxKvI6?+jb~Zc_n@-=_FRgUWo>(ZR9G z<4kVy0KRtH_xDcm$)sXeIv115d}1%Fw-FhgelF-fERo}&3~M%bP8bEL*s^Em5B;^^ zYR#_7$4sA0Q$*0^o;F=Iz&P<0D?J?`rSh13gwrqucX#w`Z)iKERc?sUOR+-fJ#7Qq zj)D$31P(T;m4`uTVo5v{TvL@IQsqoCMo%ti=kv;ujtjJLh{5P(2#c*Mb^JFyo+s1@ zqNgU>OqqR_4d?%4j=}lt7}d&pMhNHCY)Ba`8DAX^(|1=Lmg*x#X^k2u6UaKAr;*R(hcnES*QAb%}~AKd-Y1mVVT;^EGwY9pzA(i64Vwf#XMI`@0Dku zv%EO2tbccNoY2E%MJ4~`buLusHAg|#KS3d!+5oIIW!l0#I*mO-+~WegP=BWV21Wcx z_Nn*;1+11+2>o4V_X}&s8CxzXK~YG+g2{wf*HJsdLj*3aP}ji?!pA=GvG&4Wd|~^m zcl}j+%wrzYe&Vq|j<0X}24np!-XP#tBmMp#{ZTvqxZ~RY_R~*jM;+zA0l}QFxZ;ZT zf*1Tkd;8np(O&+Wztz6=Tff=-&coL`U5OtBzt4TYr`_pJce2Fzk}1E&>BTR8ar@{; zF2UDF{gi#((nTM;sJ-I1UeV4y@7(s7M?I!J@?V~kpTzjeSH9fd`nI>VfAe!c*A6@4 z$oBUC#;5l<+dek%B$`!xfo^6jJ4T7JBBv5{-AFHnaQsks)9ma|x5q z^ictETp-d~DC0)h<;5dMZ-KiW^Wf*pGRc9{JgP=A3^Ct1i!^0w(ea3j+N{XaDU^eJ z#-CYo=Zu0bas21&p=Iw20h1`D6g5mN-kp?_h25kPk6~ERmqiD^_bit+TAGXA#ZaKp zzI2uW8mi)4ViTFyN=%WKf0e6;ROW(NQ03t;#bgYQw9iI;%A8m&gxorFkphiI@7@=q ziDNX8&0nt|eVHo_oF$-+YX+6=*-^AFi*QO&`PL+WBIA|bWwIZ*4dz3uCf zSj}fwgij}w$=g0Uve%%(Nk9HT+6Ydp1mz)T&2iY-u*8+?of;e(QH3!zJ>!vuxO`4* z&PxX43bkLV8!(IpQ#=%#TV@2kU!zoOFYwAlW>|moN!k=f4)om8M-zaJdU+Vma_B!; zmr*3B73t8@NH*lU9Awz6vC+B2dM54tl*ETN|EjSQ6p7>Z*;C#74x9VS1qlS2JS$9v zDjo##T+YbhgT71kF>Q=1q_?b5bbWgVwO!R2Gsi)3X6d^&y2V-$Rhsk??2`!OU_y2eby+k9*L0vT79b0fy=t{X#B!B zcilTP*87n=I=L;}Fi)k3(RP4ez3LM($~C@Qml~~IdP-0mw3#3K5y!7UGSu;RN0)kE zenv6yxW-ttoYjryxetYpMh?zV>+V7+%CO!Y;VkHDpxq?TMeC3re|8!B?D1c}|Ii3X zoLC8JX`BmeeEjg4XUoFbtBm5TM9h^qtyj0D1i_|2=T92oRtxm&_aEJ%OLCNkZIqbr zoW+26cy)kP%JM_&*1#0Ki+jKSpkICb*H&0ihhlU5ZZXazpq@p8>FbfBkf7EbdR_D` zh60VA_d8g7K`ph#*+A+SOe}dfp7L5XZgCa+{IAEyV6mvFDl#l@Ba|+YZ%S%6ndyte&KxJ*uBkjpS zflMrQPEBFeGPTOGHbaaHF4CDZY0#-|#_W}^USGpvGO@b(Y*B6G(f0L7tdcebD;D}J zi~Gbn$ect|+R%^pAL1s?`K^|}TH`I95ZSP3=XkH+pcs2pKr`GBbOa@qdj9l|+p-Qf zpZSra=F=_m3=NutzW*?edHf#7qfF~!rif43{MYV3;Dvme1hUMe1f7( zoO_19(o*)OMlOZEh~V+w+SJxe(#M4C8RX-ae60QQue`9m2cMwei|YJj0Y9z4UCSSe z{>|TfIewSogY7Z+TBaZV=l|Ru=4?Y_oxXbY)$N?uoYVfxD}Ebq!aTn{2)`F`B@E* zen9;G_rE{9IRALkbL|c7l22TMuWCBE-Qvi@@dm})+Y$JYZ2kf2SFc43d|HDqGJFnS zA9WRex8mxnzuG?i=})%Tz4mqX6-Z*Tm&fB)~>10L{z_K}Z#xIODLAIU!OKOJa4 z|MUN6`;#}l311)elkK4oeMmd{sH2c2p9c7x{`bH5@9iwS5ceJT`i}O3U-*T5qa|m2 zW&S+_A=q-{!Kn?tVgK2>8%F-l<{W*OrPYM&b#UbG`-y6w|HTV9a8*je?sRZDQ{=M8 z_zX4-kMWruNv&vNOe^K070R90u+p(*iL7I=^|8*7f)Dc+Nu|x1Sf^@q?tnxPz%s^9 zGc3tPSSHr})lUlquh)Um5%f}SDj0ep@RhS4)GZy&k6>v z?v{-6=9*d8b!N^=I<_p^=Rb#N0XdhN2lozjbQ(TE@y_-eFMVnI=tnPZC-6tIpZc`4 zxgdu9#n)U9q=m{yhZ^%~PZl+==^zBh!LqmU*Z9s9#O$DCINPU0^L(#GUg2;M@hu*z z%Ac2qg;A`CKj?aVTM;E*T#5(+_0$Gur7kGHM;6vwtA_;X?DM~P{Ha`o?GRrGwl*KU z5O`MQ14zlm#>zg6$Ipcm43;96-l!!*EyMLEhPpt~Sxe7l{FS8uPI1-Ns9l!Tr&+z? zo^gXwbFmV%co%|E+atS_RtUojJ2bH5ZbV1Gc{7IKIS|!I7&LUEU^~4lP!LBEeoF6 zq{7{&I<19ggb2YEW%LrCB8#=Y;@Q0s;*)#0{#;@A<;o&yIo zZ7ebVt>=$pEt|SQoxo~EQtOFiZv&F*9$#B9RkUhN`~eNL$U?amR*4zWgzU+TRywvU zJwC-cJd*`6KXV8l$zv;a50kwDl5RbIrUwrp-C4cvXBw#^g1SBq^-B0;rHy4Af0|** z$5O&bNNhfSP#HOvFe#dlJqLG>@T6jMe0sAGVUf7*OfWaal0P*jcrSJ0QB3P^kMFSw z6bstL{_AlvJyXf2dv}R8mfgqCvw}X=-FQ4!u9+4D%6>v$EVlV!u^L$fl{txVVXIz0xOUI{9sJFBO*P!=(*5DxbKBj?ii#jLgjEn-?t&{OobVVkwhdHYwCf?`{8UBu~UXL2>54wHL`xKIIhrNH*S}SR}fZbihV>^b}Vu z0bTCjlR9w%qV-mMZc4sc@WL0quw9H7k4}B^srDv=LjDbY44N-2z4txuX?MQ!o!f10 zb32&0M1Iino3gIJ8xWU%`qS;nPkBmv)T1Ab-?2EVUGlL@+MD0>C+%AU;k|TZ*O}G=HeUM9qxF0%mUxQ!tDgONcZ_Ke7=3^ zlb>q${hsf^r&3pVVgkmv#>DT=$ z8xbsdmRnDLYE7Bd#`w5a^xfZ^pQtCz5Q&{0O@9f|HPwsPq<@cioiise4?Pa~BvjXh zJV)?8YvnV}jOn+{Kj%K3!Gn#NXA<>Ond8&kN3~{!eb@10cX=Aq8haueBEY!3D%@iw zzDQ~u(^ZAM$5yWE+CCGU746d1@zacwb8YbM<5mI+#=9%?B0q1{Qx0qK&wbaq?X0uT zY8T-XbSIp+Jbx{|h|nresWQJAxR#w?T+08 zSNtc{(B%NQv9vB5CT`F3&wOerL5t`JQ=qbiTv!dWExumwwZ*VWv8qNdablVlR>f62 z0=C6x+ARLiH(eiY8fs1XpwztE_XXQ(4(6TWTMCx?dj6Gc9VS1tCXREsm-zkpQQy^z z_*5vQ8P!o+Jpjd|ZG8TPROj(mmMSm8ra~yS5kD4-&wrZz#?Le+s8`RQG5w)8MQ~eg ziZv@@LT#r$Hd&uvxb7l;XLMf%99Neb5p^`RDtn!O`Kj%!Ykw6_ln`p=>21b8$or2P zX^Ma@XJv=@usZ@+2$bn&^JxaRQoX4h^81fH#o18ct7UxJ z>;1>|c($9*kO1F0|4!a(%*B51KRAbrJM>NQ*N0(wy!2asRymj4)?xhGf5wAqSL}amrVFsi{1148 z;+J0d%k7h&`~-4)Qv2Z_en8^>=H2hcCj!p4uRh}ML);ZVD$UIJ&Z2#U%Xn}YKzYCW zeP8>Y``*Vsx$vSFy{NtSZ{E|s@z`V9cYVjb@x#~0w%g%NiL1ZzmG;Mf{KodFPkpNW z)Ds@xPQcfF@asSLl}xud;_&wLXFR>#3!gsd#oqvV$b%nZzwPjjx4#`<@pN?i#6SE) z`;`~{iv4cJ-R^q#cJJ@JS3CADcWt-7-EG@d_+johzVVItRK-7YhQnZ;w~5i zKS6PidmL!beActtJO2F7@tYU-!`DE43l3k*19tskhxsQt_#5F|hr8p=jECWCo{qfb zQ8^UNw-iI44;Z%O(Atv=p*FYj*ScHF!N^~L4HSb}9g;8+%Vo_m>}Yhc5Ao?|5L9^@ zt?>j*rl2(~{Zxu5l|1FLh%cZ3>Q8MvJ#=kiLtvw_nuD@H3oNHxt=w!LY(1!1k|=~( zB5Y?3meeO}3U0zFfcYYaji-i@uO&veYMdeo>x-8$QlOosP{>8&&U4$5QvfRE;eer+ zofO2V8@e4aT(@Zd*2HV|@I>`>^ZRpFb0@Br`;?CpQULZ1benbz5p7ZQpZ0K*2n+2%s@# z-w{LCI;>b?(!7)PfQMH7oI)sU2yz!78Af3+tE0i}B8PN=kn67y%s~L@tPLd^J$_WR zFBCL31`T8&$d2f3{D7*pxI__9*JJV|o^dRS$cCg-nVwb9s^H4I(lzF`PRJ89o65%Mx=Te^X~C1);~*f>E{m=l=r%=~lwNW)9g&p<_2lj*RC&oKGn1{1YDi?fC;w z5*3>ZXO$}36)qwDc~8%y`t%DdwJFHo#;%SYuM$;+DF?Y|-2KKpd;};IRUFK`{x$w# z60653%fH9x{7VaL5l2t&4Q6#n#zZWaHLtGIku}DL-6&9PTYSMpr}%WHAIruZ9L(AZ zgpKF#MviHEoDy^)Bso+YIOMVzzh@OxLP^>~6; zDGgvZNtNTrhFE|8B`SH0{?4aSQ9%x3%JD-n zT)G0tM{TS}dG_DQz&hyVQ`^Wxm)FMlSdWeOA8>O2X)<$EHQv!Sx_Z@m8)eQGbcNx))@ms;1MUTWMVy`HjLzh0{Xh#~utj3qinSHv<%o$BkMtB+*G)jXIda}Z#iYBVPnLdqJ*I)Pvr zABtQ;xvWySC;d2A1`(Np8!1Uv35;DXi}2P_=HZgzyP}{or^yf{e1ie#bsAqhBqiqX{YiN6v$Etwk)X$gE#7+ zwcYR7&Gm#wHxF`5OkbnGf#{Q;{D<~}U;HI&Pd)Xt_QOB)Lq<=;`R{u#ez)Oe_TAx+ ze)MD75B%T{S{i&FN;?Vpy~un}yX|dm)4uN3w{CxoAH06?uf4e4_1L?$$Kq?DzWv+( zXR{r7^il1Tcw^zLm%PM&Z2Iw!e>{GW`}p?mzrmXzXPu2VFTRZL+`aW*(_j2HE+V2J zB_J(b(o&KWkdl~)NC=GXl!n2iq(Qn%NoG2V>j4_viDtfB60f_v8Ef zcFuX8eqHB!UKbaHKIj!no{kwtIV&F}DXM)hUPz)H&IAbtzGHL-@p(yfKHFc$;nGV* z%$Gl|s*t0R%PY4MM~}c;Q++gnRA3={X6yntJ#=GF0e7<0j+-u-US{bx&2Kp7J-?cW z*l6O+vlsagxE=#?!p=jGvsWl6HsVNXJWx=W%$^x@s)`6*1rBiY#5<4sjd|eiv)4}J zXh7cb+L^7ZeFAE5pdmtbll6!vcF!#~|0KmG_-{>6D}K;!He}F+4C(*tcskFGNIo=0 zIzg$&{5L8puC|+QAOTOapG(-FEZxV9){o=NboV2i227FL%%`bnjXH(=x;|F8Hr4lA z;G_E?#7U6-bdk09>(rM)s{6sQtm{GQ?>QJaTEaYsIPLi#bBNg6X4%Ua*l^`O-qV!k z#WUpns%@tDgcZ$B()>rb(LI`zt3H$y%%1ju>x;nf!zT-7E#q}_{&UvQlbg;HA5rhg zw4i{Rj=8$LhN~r1;J6g*X3R%rChJuz*bWg~NO+-FUePjX~E>U3_`r7x>rm|(ljHSLB> z*iky*c`Z+N37xrSN;7{$RE?M8i15x<0kSim@RUmod$C%C%Nai2x+9HV>u-4eRhCjS zXSQ_$o^QY||L{{&+);^shD?^o?PDLcdp-y0+ym+ocXr|3AHTSC3`ARB^3MH}7sZf{ z{y+}=U^VyWMFCwU#%V%$o$yKl&^EQZufeZ^n#1qGHFMf9;zj-s#Gaq--170~fTsdP zNLD!qY7XOsEN5tY-w3@Ypr|mo>?u5EnvM%o$t9Ck4n6x-R_DP|-9pfliWf~1Q!3s1x zc($MbKg7Cd2_&_&<~5y#}i1nGpx_lqcGmn!1sj}6Z znW|i$QH9Xvi8i(>t~}@2vdM_?v`CP_D=p={RoJk*X|~_>MnufE$XNC;JY8+M1R3P8 zG1@QB!ZHG8xY9>CJaHbAqY=6ve;mK7VBn8kp0k|rXd^PKwj$J88{Y$cBhO|VEtHY$ zg!hs!isIPIIRH@=$qp04R+jZeS@)pUffo#9tvV1w>->P#`>{JX5tFi>mJnOuf?!al z70Ht)N3l};e|F{|T0d$fH72ZXECg#P_)?U7s6dSL%4vAKy;1nvBeyhD?Tb~2or46< z-Iu98XU5uKRhRBzx&irDt(GM*#KwLR z!wq-jAxZSWbC`=CVszQLX?0Bye*-O?X?s>8Jey&{(HS6VYAJOblHYH$o z6Ke0l(5tWxG%=X4cRuKdShBOc3qVqG)e1RpJ)*Ah?Yml#VeX+ZKrv9c22C?G>EHb! zbAsN@;U3p+vOE1yj@TOz{t6u1g@PqOK|xsW>n4ayZHo(zk!~6TV7;6k_Iw)l`DkO1}`?ruby@tH6hT{kn0nS&ocfs_U=j!hLT&}xw{!(TMGeWmZ9gc zg>y~8zCJ)k%9%e=XPFLwbb!Zw0`k;wH9s#_Kidldr03+L(ArAY%f60tS zp=Z)^jdf|uc_A&Aiwh8EcS5Uz@|?N{3U+B!x&|BU`YZOnRffBy2WK0zMMqoPkKC_c z6GeP|4VL}JjyLVq6OXK)6sUDP49=>qb#4<%WQ$Q*jCZQe{wrGN^PU-E7nc62J=ENeggH0Lz zGwL~HaxtG6%+*wzr-bxS7AGz}=5#+b9ma@&wQ$bz*ws60tidE1Vo5~a7MH-VEO-27>?-?u* z?Z_!O=)f1%YEpLn_EkfK0RMA;e_Qo2_G^YPp*5uRckKc3 z8Ov^&YrYqsn#*Pe&%D1V^)xu$%T)3Lz5S z31J`N(xO|2UtXpPFC3UgS9+iE^-(>Ux4ktCpADW%zF4gcUydTNWs_-Sb9qz)_<1!` z>YUpWTOB~s^U~f{z=X@aV%@I=b;6jdZZBpM%}e`ZT)Y3IAUEBD2}i~3qw>~7UlNj) zt_BgzH=j#K^O<@b=bZ64mTRpg!IQe7_^XCaOB>s)n=}M%acnx( zATr8p?2D%HyW^|f0IG+?Wx2T{ZFhJHG@8fb=U_(--ImUaoZ`kf4+=1mZ3iaf`I{5* znPVdmd0hH$3{x4ecArp(#;#ZX5QCPv+N`bIsx($KbaOWRC9QYtJL6k4?7iMg(e_bK z0;Y^-dY_n-8?DVrM5+OT!_OhBj(ctM7WC8n{-4XpsdCjdBupfqXlswX^!}8rWV}|8 z@qwh1n0He-23Q^O9NcqN8(?Y!x!tw7+bfW9X=5J`y?>EmlGdTpbk*q;FsLEa|JP{x z3l;@JT`$A1gs=v=+g)~KP^?U&5H`AI!hJR9+6Ur_ipodMBhOW@oyQ>Y*CH@|Z=0w{5JN`@5$r}ydOwo}Q#>8nGA zQzfwQSpaX)gM2e&c|H=_cmUYv^*#UhsWBURynU35^z7k-T&SLM&H-uzxAehVSI$1G zErI+v8iN=Nkq!NgE`nS@U26PiD}P?p+$`#UiiIG_ur%PMb&!kD?NZHZ=Q=~DIqFRS z<)$>MPO}EQ#n+UNDbHWZ!z0Dem1n!$WgX?7^m6lHZupu`u))|s)CCY#fAs0(uJMRK z$TwK;9~t(PtO?`~XFvo}LI&m#xP4#Aw+9YyYHHsSKql)VNuzgGz5oyx0H^W5HIJrw z%;C7pb;sQn(yI4zxe0tO>56;i0!2#NoaWNvcwyNrHGZRxh^-~83#wKKopj_Oe=XEY zQ5AwxCOQ2~$seYjq4V4`{t)^2d-$m9s~`~t8p+q^GH%Bef-?Fi<^=Ny)xy@Y;STPY zQ5pUpZ56|EwWyth+Vj}z40)ew3$gOj-++piiA#$Dj~4#e7wxK^aUl1Lt^o+S79@U9 z7?JV1r9{lCC+?V~vs_w9iYiN5zP$+W#ak~uMm_0M_$1GPbhTaOVQ}&X#ziB0;pEh3 z>1|(^be)x#gEEqES{ zLwaY>yluXvJykwTv==xiclVclIp)+Becb-Z_1z$K2u6xd~7eP*jTNNAbP-msb-Olg+BxB_n$)dM4Um z9{>> zviK@o$2DEa*Vt967||2r z5#qJ(zmwpUY1G|;21rkdmq?bEk}3vPC|0;-c=rr7-{Nh|)#sg8oxH`w; ztKbe_+Tr_!vIYhV6Pty)dh`=9T>cXJ%@Y^hF*=L=I8H!%oomDKI&XBxPSNC@e3 zpJNq1(AhEWv-__^Kpt}jzAEu@L;>dZL7(9u(v_$&BIZHazZ)CKPY5|=V=phjv{ep+ zgfEXuLYOeU(@j}~G2K~0*U@Iv3eGvarQZ@wUX_&T9c0js1>r*!_u4QR7;Nq8bh&Z) zq}dWxQFHgg4*s&Qh8TS&gl#*@-$*9B^X%FQ8ryNnNAKWvNQH}m^_X&)BPb8OKi}?; zdeKhje%%C%I*IV5cO!GcPQy?EIO5b9)tz4nv=wa+#`dk9HEjA`Ud@!rd!Y=$h7wr*+3Bw9S;$sp(~j&8u&^`1*|m?x769MFu3z;_ z%3JwiOQ09&2!=LEqfXpr4E(3@3ls&Gr^8|{t_<@p7neozZxG8k26Eofjw#k-c^sXM zWq>5@Lsv}ClG2^#0b936TCV=YYxRHfeOvKxoea47ng%`WNWUsB3S2$gu<-XB8CsGbJ&vE4c-vjp+ItS?P-h|X|CsFh$Mn%X@1)F8^!WmQ3(Rn$ zUv04A9NYj>W#Qb<7_-p4a(FJ&)k9nL5#LA>Er`B*wIX_TDa3|UEt}1nFHi|%znaCh zUF-u>wBR$^{I<9ETfYKD!uyU=@`M+~S`W%o$sqz)(6dvdD8u~VX9SY4#rs7vwR+Ka zV^8e5?Ksh{q*TrxKe}-h`{$qu@);KBi=T;uyC=PA*23h^c(y~uOGLtn0{=d=R^%LE zO7~`cIS?}sTI>5b<&d2wMVSWoJK(nI_zbMmW970L`&+(^*ehc`K62#!*+1<>2g`pv zRLG;tzTSRW<5&jULt2Px=D}lp#m1;~-_?rbO8QX?E2|*>PAh&gmFULi4Y_>C6?4q#K*Dro`G(d=&gEskFIqvZgZu~=NnNV5#kQvB%oN;V4T9_N zfha%hi2))1+ZB(KwTjxt zG7qd;4sU(vhn{h;T?V9U@~v$LqOS@-8p4Vpiuss}!ny0k^K>1)+1=a!hQknXY5X0& zYMY-d99=wl>B05Wqa4!`|DbGwi6wtZCV%IlrYR%2NJrW@GzKJ|Ej4Bc8Z*2EY?aXF zs@8(VT7$;IwtS^K`I(N+>>HP|qkbFemxMf@Kf@d-1nhP`2Zw|}W*SV)qBINP5wa8- zEng`p8T$z#dxb|OuwbFVpn3RmRa20_8t%oJXRaqFh&MJP@Fm60z|PBqg6MLC=RGuP z*gt$hW~f{i1-iQ*>;#6P)*>XS)?%55upB|BSgyN&$j7MoaPHH`L^JLI zn0%DSvKGjLvh&Stva<#Z)poLq=}SPLWb>0*-uoaP5?TO#^_S_(15{tI}>qQ&iSb8RV9`2?idpT+T->Bs55h=3V{- zE_Ck3V9u@}7-WJgG#Ka)hFwFL*ByIKArM|yiuWKV6y;e`4)r-^ZDd* zne4USa{52u2Sk8)(9`@@WEJc-_lN+Q(z63u+i@l9SSRrfg*Pmp1I}-jRo6$8C81kY ze^0KPt~bVI5aTZc=WHQ!S0oU4jq7-qkM=3>kz6cuDPp+uhP$Hk#uYt!l$>|YSUIr# zJjgd-v=Y>TUWTBEmjhs^|Iw9Q<3khj55c(cJ6UzK4Csjze|u|v}Y~EY7XT12e}D{B`4q%TP`-?P&qB9^t$TA zcUxlNUvoF!$ogUZ&MsleHGt~P%XAZ;13w;8%|COD{ z%_Hy~)4WGzTkr4Q;XrhqJPT(z@exqAq;F&1JH5E~bB-YQYzRW57J6;E5lfMKk$Rj8Lxan*Q?!Q8egKcu%eX(pw5ua{$5As!)0kx7YS22iD;p1bZVJXFOt~_L1GxM0|F+|7XxD=F z3MCh~Un98)Jj?_oSKM$O&>PpM_R&pj7~K533j_XPpn=!~jJsm5t`4A>Y-F<5c)yR0 zEPnoyuUo)?3?vy^93q?75{Scjxa3LP@qhTWb@@2njh#3_kyy+I?g*Fj!hO+T>wGD< z=uBUwoFRMwE>M!Yt0wIJRRD5^5a5m&v6Umf35pJs4$XhwVuyAX8C$84KVaUD1pyLY z#B8V+wIp=VgcQB2uKdC2RX;e*Ir=}{o-arq_Sr;Ppu9*cPte|UHVd_Y@(gB4Ie6iq z&g6XNseWEDw=%F+M*N`ASYJnD-!8dKBTK(wTmDMkgyqpzYqZgO)ofXlsrCx->)+RV z_Z4{N^=6W!mp1p3w)LuTbpmM~DYjI9Pn((K0;Y@AIE)4!2DZK?AM(O|g2zkvwHEv7 zIUZY{OcR9sf14H$fM5@r0uZ+Mnd5~9=eT*~KOio%{|xaaW5}zpgGkdG?gJR}kspz* z@_q$h`Iv*+|MMX+2mC+V+y8%VnzS;`u=0ZIYsy1=q_+L%I=bJZH&?Q<=K^B2uPII# z-LJ@T^xQqU>whvl&zD_VwmyQ#L!k~ z+ct)IHcvXdb#CW`U8WP_H1ZZrVTw(}s2*Y*scE%jv6$<`Kn%EJu8nrMt9KhRbFibJ zNQ#8gdwlY;PQ55>$F7VE4fgw7%SN9RH1CB=u2WL^ps^T?&)PFE&q0mc2|O8bdAiDs z)hTRFyZh|g5u>AMYT0{V_CjphJFW(R`A9(7@fESmUL)lGGug47=)kt&lw0)7=hWy4 z%_o?XDZG*#)_B6s+Zp|3vKR3E+}8Fk!4m!3#<2px)#jMn=m# z*{#-GfIj~`Hu)OHqcaoUlZ>M#uU$iAJ$kPn?D~1i&?sIeQY^e`H!^I4y>7M&q=Bq$pR zn}J3^HXg$g-OfjGPmuAL$3#zyd&qNJ7r)e%c+N^8&;eqviSrM%F-N@|Eg{I;?6^QF z0UgTa_Hf1x(kW^jCh>D=51_fR6U0*4bF@3$Fw(a1ehvs|B&)f!mCsx0JQ)^u1Pzq@ z%pZ#z>6J+MJ5`){%af@T_V4iRM(BVqVN1krAerrLb#n^x!m{ldYP{j+LF5YUOI>YN zPiO5F9pXrmS%NcMk?GG)zVJCtpBtD+4@4YcMD433f=1FE)z*^^YLe{q;~tZ-G52;d zuQh#O0CK$jgt&Rw7bpak#F15YOm35$l%qwAhw3y0LE2zg{|3EnQP~7*=}B}<9ss#; zISJNsI)7d_ETg9XFTn5Dz`T#%twULXS^!ac2bRsFv<1`eqEK^qN3hIs6jM$s^F(te z2a3i_?-U$0w^>x@uUUtajOXn%re8V?)Nt<`Ddof^&ZPFH3-v!~1VO)Jb!M?WQb8Yv zb?ZM@l%`&4FDVrN`}+bOdn2P7;~2x0?k!z&bu&F3{dVIOrk&f$Vyp2jt|`WMcQ=V% zpo7X!>|=gpwrSO`C;-hnMtdt@d=F$!k?n-B@hUa<2E%DHhE8b1HeSvqJ)rrY7U|D{ zJ0IHnm44X(Cui~Ram_EVnKMB3p0#(zjIZPWMJbP)NyL&kNsoGd{LjU#uI<_q(;9FaS{;{9U7o zxHG}mB6^!P*ki|hY}B2$syCLjwL z6ep6P5p}|cxZAT&^|Vfv+uppIqvE>Fkz1ae6(oy`KMZu|!S|H`>6fc_9@b4$AfJXwE7t5P25yB6)#o^L^Rh#*mhQ zb9S~(kdDTZ4jpHM(@|Cr|8Lw&&ZqmEUn~~kZwhrH=>xSIY8@$|=lV`>8y?OId1xs- z$Bjqb`%UO=UtG8P6i+GrYVAqr2lY%P%ix6?NEWftD|Pxf8&4PBfZSE-N1u68mWR?K z&o_KDMQ{;Kr*$jChYRQ4W##1~Ur8%APZdX1*pXOU+z|biXGn}tS$E)%hhO@Pb7dQJ zZYD$L-sH-Py_d}XbbFbIYxU&+cD;PiQ7A&v=h_U!cm^~eQj`pjlD8e_g1Emgdy>UH zTZO9#w=VvD_+_5Xx=&NBaWF>*J73ay>_0jWgX=%z(vuvEsv!^TVqY#b08bBm3s3G% z)Dpj<3s8NepCzV4T~(-QrTP%MUyV6u{+1Ayuu-GmZp)$SXII>iwF7j5%0I=ex~%Ru z`E|CHABefti*Sf>nu{bIXsTcIxR^K0GJs8ua@qI1M{n%}{MhB*0(0**2TmAk_%!$N z9VJj`%>{bI`N!9sMEaHR@0R3Z-tc}c_gP2zei3zM`)kHuy+9eDMT!dkLIR7?(Gv^r zV2d23ZT6wNWoESz$WOe8JAqCDK?f=3v65JKQkF2bxdW+rW4c2JPa9b}5hH%jGywOE zxhn#i=@ehh{3~s|m%P3HL74nbV#}x!uc_90EdcV4NEJbzy!7zix2jABLF-tLSGWgc zw$4e_yuNJuBB2go!p>P{aLFp_k43nYSDXqzsrqNryN9zF(I@c3S5|hV)RgO2ou_zZ zCBWF5L8*SJdKQJ&Wyq^1?+IuCJAC$LnoNxZ>{aEDn~Dp?Lu(3zzuk({Uwogr@D)s> zb$*46gNG?JZ=yg}ETo)aEvA2eiodlZ+W4|j3m=wLbADvlw}-pGNqOK*PEkz05s4N% z#7VN?4a`4pEE)jq$Z>sl3Y1-q1Ry*mF}=wJT)WyaQKJfq6rm>JJ9$vGHL%jQnTH(O zrzqY>QJ0*Mss&>(7OfMh2YFTYessQ%o>MIR508T8bUh7Y`@V;uVC>Dp)Bj>P!~1il z6!GLRPV)Abj-DY&Vwy{Z--8tQC|H`K;3cw1)cw!FUrK~k3Mc`{^ei~5cZl@W7w881 zB<2uN^-BkaySJQwB=@W3y4vHM{q~xE(FtgLXfsV4Y+#YowcL3Ayl^`S-oq+Yy?vhVh1SR3o|k;rSNLHzoRHJoy*)8^aC$xlYL@ZUd>!7> z&G6^&e&4GIkBtKiahVE#{}SI;Jfe0C^fF$#SRy>K$b6Q0OsRaMH)DdT{vnS=mHk_V z;PAfYPc_1=^4oBzLs|-NBxQ6mf*{_VlrUiMPH>jc_tSJkGv*smHhs$bkkpxe>2v*q z)Q|9r0kEu|Xxr;s5`z&PkiEvEso`j5Cyz)1o++`n0l$&2n+|}(FjRM~OhUA$_FY)+ z<*g)p8w+ze+0Og^j8I~v^J+_@1eu-8BO)rx`(iZuv`!Npod@*RKay`05#0JWimfZR zF>eqQ-Q&2kYNmM@{dPk&7r7$?#M~7rW_zw9&y?J|xlME*#m*(zXT2v+_(fZ_hG*a| z0eyQ7`g-{L6(CksNvw{T3eHIS>fSF$69D@kW66e7YIMm!M49_u*1z`Mxa-<|W;*L2 z)&-UlPuiM(bh@Wg+rj8;!!Pkplv-O&;f`url+Nh;HtZ>1-RJFwpzY0em{7+sTK%~$R{mT0rL-4#DbcWL`^)hm2ojPpeoZ!<(ou6(RDi3l%$ z>6zw+|K90$0p69q5VeJY+!iWygov?fJLAX`VyR3izEA2=M5!YCMxZ#A`wyPAJ{J*j z(46O*S(J@?sUehgBY8KrjTbht@GSeDV2nSSE0n>;6{F7fNtYjCjnDy-cLV#ajoP>{ z^rPWS+0=fT-vHaK7)EOi>XkbsD5*7{y#|OD+;fsvxdyN;vb|;Tqbv&bH%f z0!e^n2!v~c%6*)-@??df7(7f=Svro9Be~j)OAcD?Z))I%S3eSexC;4USVOjtLpWxx zZPLUvA`?TI%;H76LpaRl!}c;?JxW_(Y*Q)M^d~lo=C}AwQJtx>zi^uqps@iihsAyM zZWj=N3-Qwd7_*p7NO>p?8k43L{Jd>=g|lb})k(JenN9tqfWFC3X;c`RRb1|2uD8;5 z_Re?c#H`+H6vPP}oJ>zIAFC)cR@t&FdWAYTn8ma2N(AiL zEE2qJR`fPpsUYMuf4vBSzc#;%WIRM0N)R?Ci@ymGV8i2pBtCq0$CMgu1JS)t;5$*)v9}k|dmcv+l zAYGSfOZtY;l-SgmsgCdFb?U(_kD)M}!muLyyV}&yk1`M> zdw$yo34Yj`p=O^g^Q2>TbE?uJ+4TK5ouG0- zU_a;tQeW$?Ok&@+z6Pth$|JyY|)CJo9wV?w+ja1BsDL#JE2? zxRn(zFZ_o7vyDTd(W{zB&dooe$gsMbvz5%VrRc_DrPvYrn*IZ_2X9-syb@au$MU68 zcVw}t9pv=Zd*?5h8tmN1G?=Blxe31~M4O%+B(TXO#ddmq;(qtZZ4N z`P+%JL?p}u1lc5`;EJ%J*Eb!rw;bMq;vZ7>{96}Ub9IjhQoFSYPYwMcCZQW0{jcbX za{J@kx+Ar$?*qVwT%#g&Jr#jMC(hJa&N{KWh=b^oSLW{@lC-?HjjJ${iL{J;U}DPb zdpAlbvdbyy)2H`*G-X4?#(%+oZ}Q4&bNL{DB>a>%dgdOvhK!|m3T`Y)-{+ui@!tsn zZz7>$oP(v4{4X7yGV@!pvkX)g8yOD@NBoFv44yYCHXSOwbMqw!B?Xdoa%AHk>b7ZM z?W^v~Oru8TsP%?OQK$1o25o?Hp3ANCnjF4c<>y>(J;~+~_HLqBj|?RFJ)OH*t8*2c z=D}JiX%pHZe9|`>%M)}PR&e72Ncs@@!T6`An@lwDnWeG)yx-S@hv_Bd8V$wAHVC*% zs{dm)sf=qL!VydKsyvT~R6`Ik-MEtzUoJTaLlF5}?|IpZw%QKgNp78duaRDklR6%a_`y(lsft;B|Gg=kCF3~F{G#DTIg`SPPSe1WnxiPY523e z`5@)A`uhw6^mXIu$g!zg1;>AsG4G!%=T-Bj#3hx-P0b5ss3;|&S6F%~Gou+W_b@E|{1$G;xxkehnBY|yeb0aL z_R~mXELV35-_T5{+$@ftcmggq7%)q`wQCxdP`c&+NxOZU?)8)7K(_q)p~HeU6m#$X z4iFKrX{FKCvhi!6m~3_VatTtl8+aaSgvgfYTlE8-^Kx+H0`&93O3A_~5gy`d{~X#e z@6^W2FWEZ}2qiw)t1Ixt1%&QH*%;|MNDgv!pVQJTtp+{vH!I*x@*Jbo~jpr1v{skf_evv1>PpA&s$HD>GLkzc^Anx+-D!d zmxUE%w6NgwSTsCUeShRo|L1`6qrQ=XFAFmm$0jHd3S7KG$}q!Rx^Te7YEXc&8bwCS z`p{5}hQjjwhy~m*ADc4G(TZ#}CT{40Z8TcCQZVhsP7m9Ei3H%D2{a z1t+fNyV)3e-jI3*+j&cWS_P}Lnh*y~wIyRG`rKxIC(@sJT_LFdOv2y3koQ(HDj&^LUZgR7q6Q*sqZp9-!C!05+lim#FH<_Vdu-R?N zY-3;xqBj7DQ8uIa%T9&tkl3uH-AY@tT**wiW_SmeIV-Q_2E;X|PWg-#Cx}QV+tr5b z?Z5j}-aSm!S%C0)?54n**Ul+BU$LIK#*>`_kY4o_WKJFC|c@Z#4J5+_<5 zUvYvNc+=6YbYAtm{ZMPF5&Q3AR4oF0x{r3Xp?Fe|XIt^trIhjwQ?` z4>0lspIucfb%g4pWMWI;#YP!3#_*14^WFO%-|Yi3kB6sr*g>Bc;m@$`^UR&VpHdh_ ziF5NYKJg2sbQF{xZ2#LXTbNofW&GO1R*P;SbO4eyF!j&16@2&SCn-FM5l$T(qt*2* z>+|M=u4XJ(KRV$V0H2hu25{C?5haa7?FRea8|>{We&&1O{2Ww~(NdHB3B9L^)nSDT z<4Y}C2-{h$KP+&|LtczRwAYQG*k;&R1Z!71#bwwRBEpy@YX6|rZGz(RxI;Bc92F>% zlh!il=9WEJWlKuuK|=PkFQv3cn)i2_6v_BKe&Id%Fta@Nz_1P@gIil|M?TMs%~E3G zf*Mo#?UhUjA{4KuRkHS42+P(P*$6-7Q*@Wwc!+j~Q*L--y&B)NEnbDaf~MaOeX~=0 zPt4TUNF?gn{AYa8oY?2D5zaPUT9%AxjBU017n6q^$()QvIJSo07?V}}%g>56MhR0* zOOpvHe`eAw(sse46`U+^i!nk!W^}@z5)sS;66NAt|lm^w$^bkMpdVd9(Ow%4YWYG0LV8gAbmU zkENd`QXM{debbKS&T(1qE;og2Uz!i?x67&=vwjDL?c25#;QP#Czj(!wK_H>smK-?3 zyh9zAV8J?oYyJFRD4Fwh`=QL~x<2~PfZ6vaSk9->LT z5VpUVhll--UF*1_`}=b{`4o>|NZVKcrWiY7|1#>mK$gpA1NkG0Cqn5GE5LRU^p3&I zG%BLLEOmqR`w?N8AD%daj@$1#bLFSs9^Ms=`#Iz8uiw0mTX}uUqD32jK%*$lw;N>0 z*xJtQFh8HdZ{8F1zB*=^k{lq8TDQe#3!Zi~8H=CD{G;jaXh{U7;8lE9@9{9OM~p(y zIn9E|+~u6;WeN57hKx=+R)-et*9<+qp$y!OBGa~hT&gpRTF3MxN8&q4j=%@6miQLO zNE_;=;^Gd3qsJ(r*;y4-Ph=b|`RkSuS{`Gb$I0>p#a6SVc6)OA6%1^nyuDd3r+%|E z_rDQ$i3xpaKZ(EWRt{ucY<0t;YX*GYtu+0|mtKwo!C?wIjxVe%zrMuSh>Q|m5LU3g z&BssqaFAi%8vMo`d!GH2y6Q2p9wnt)+VLL=J?GMaLylv`N~6}|iD1Fu;@_;Ejyx=! zRe%jfB!!ovRzO%;**+-UP^yWX5IX3(@lzmGZL6*>RUbC|86fwweoLB zRovYu)>5=;G19NYdI!B7y!$VMSy1!L&;Q&W{$-+>FGtD*>W8i971v9B{)3tKAfcbL ze)`uNT;s-Yn+hPITDrldrqhaZCNQhp7fVk6=W+e3 z^~@Q(E;Tp+?5%h-ueABJ*I0pT#?wrE_8bqi_;#{IqVyX_k-(qg!jwxkREzw%%C3O0 z3SIc05?4XL<2J#Rx1${d|4ofAV!ubzC)&yjWT17X5EepX$ibP<3XRTTHGs3Xr1_9j zAn*rwK$dQ#g0qUz|J4F8(p+#+XB^_5EzM@wyCd1Etf>mv>)8wi?k5>0@ z_!<%|D&Yin%Ly6_)F##5>P-*D%%ZE@ykm-FwKk zgBq}kyveFXS=Rke5cE3zL3nn4f!ddma@yb75_vV^;Amc?g-BqJ5}6Au3P*a^#@!`G zo7QRr51g9_^~_Cvj}^+jqno=pBDccZdM5!Ro2TqYf!7XH@7)+ZIPrd=@2Nfg%hD0} zEJb0-eEJAxxYCE>_!~P^1Lx_npwa)mJ95cARqOBs_;Xd<$%H(8Ls)CZn+I&tTscu zJQOb$-T~_f?)8|DN`H01b5;Byn_wo9yLndlRf{^5C+LAKxY1iL&18r3eB@1%X=*`u zoNUzF;53m!i?X+2k5gTrWBXZuCUp4hPi7lF7ZBESatGy4=NRU70{d8_;P^PSAD3tl zfp{EEvv$#aqzw*Hyrgo0RCRqHmHfuO=#hDuO)2lAsMqE_-LYHz`B}newaSeaMbfNU zJ1XkRA=4Ft>N?$)ln>4LCKc~bb9Si`%2-vtNmC|?J)JjrrF=l2uB#L*5R;O|Q!UEy zBo~Tf407~I8kdQF_KtU&{rPh_@39vlPsE7;V*mUTf8NQ_ZPk@5%KQcF6M{+`s(na%fcki!6`N1n?=04NV{D5 zF{8iF_RQ3hlG-Z1;5qlV<0;w_R7#Wg4Jb4Y6@14*Q1KEGEwbg0tUH5tV>BJVsN>8y z9h*O5WFqfXHRWY{mN8x+vm@A}&RL2pH5qS!4GXsHRw}OKO`P3fIY(|Ec3Tv-qlqqz zd{Z{QsoJxxqZR0Cpf9PP_QR?Ax@cIoi$5lbvDeO4Og;{yXntkcS(%}@l@5KN#F|y> zcKy6?*k$!3mwQrCLu{u%*NV5e($UCuxGg*<%@+jQjt zor548?aX7v8myMQWH0;$mK+p4Ip`J+si<=KX0to#bc_FZPtG&%Eja8xv9aIIt2yRv z9oV$cJze$W0%=%t9Z9SFdQN+0K1yF2#ZsS1fqT#8v~GkiZEF*dOgtJx?pY=spgO9% zKZ>bK&*#)7YBxT$+lxe}L^`RwyV*v#hDEOzU2jc(&M1}=4R&#IpW6|$RoO-r&pv~y zdS?l?mAZM6NXAIO7+169(hX|>!W74wx4Ct+X$LDKxdRAxZV&H=WRx!pd0YQgk z7KwQ>r49eHZSrTNZwy^_S@!RE*I(1qOokor+WoQ#`xYxK{!xGIt+NDBm@R{^)%R{v zZnCQ3MQF=){ManvqLg|J-GfqBB-Mc?`CvU?Xx$vJKIFzO@P}0UkU6BpxOpkL zw50NJup&4YeM7r(T>my4&!?Z82Zyl36!7lLsJWjq3gl;)|H~XsvaCiGd_zC{*qZih ziz@BGyC@`*Ck>O-C!7hadhN*hHLl^^^HTDA$(#1ArOs>{l^7ZMhZJJ_RJ|2_1-4HE z!jmUH8ZM6Lsa^_+n$-8drYG@o&`PP;N;Lgttw%(-T8$yr)Yx}Op5!GPF2tu*V4<4S zS{Ht*0-nqF_2g5Y`bMfEN9(B9w({|Q;8@xl?gbBQ0=a;7QT&)|=TP_Gn7~bPN{y7} zMqhV#bRp?%n1XtMaApCsbn%$(FL^Jn%_kDMOJmfIyNJQ56%GeYQ7RlUn&g1--WSCf z_VUFx)k7?OzQV%pQ<0I`)`CL1^J4v}U=ge~OB=)Y0as$yFM)s;cT|;1RP6Q)f?g3T zR(rO^6aCD#8zbkoPfX#*0)|EBh4c7fXHlC>5kV#_)nuqT+aDq#PU`ErA~&>nmInJ| zgra}n-v3J{da?Qh)f(i#=x*?D@1a6Uc~No}ul1xFoUjRn;Lbzhj7}3|+gHORd5*h% z-dTZX-a?tuOv&Rdj>f_)t|!$zN8Ll7S@kjRz-MmCi2uNE%Sf@iYz%K)UvB1%<@|~< zz7NOW$UjGB^Rdh(*}mh=N>GZ5s>*!Z!1CLE8KOWC(?=MX5D_HUzVTx*)X8h__&)U9K>eR@)hF9{o79UhA{{2Xk>A>gSWF<9 zt&7AQ>Xrk%WS|%1AOC*5&v|n1f_on*qbc{5t6C}V-VY3JQgRQN>hwKjbzfY2xt zE-Bn`K6bXPU_CU26zjCpw<(m(#lPF6cQl-b;U*?WomE`liA+a6x9aIC=2t-qeq{KKR7ml*ZFl@#7dFS}SbObZysNINaH!U=CqBNu-W1e?+qEYO48YwRi zmL1I(Uk~512ikq(m)`(}VI)l}Y z1~nONj)@i^FS832&;N-9QD38PN{fYB=}WbRTaJn{D5t}ifMpI4x&N^UWu=igrSC52 z0j8|BTfYqMfSC}ckkv=qe^L??XJLV9jj5&&@bpj}CTX9+OAGKtGfbt#mNa~1=q@sO6PhNa7i9L(g56_fDLQJCwGbr~R0=3yD+vZu{* z2C9fTsV4L~?&A}IW=RQc3Upoe!j)ft4 z;3%$*Cbm=YY4Ig=WCkurXK=F7x8Nsgp_S208q38vDo$O0)Ug4T)G4^CrVbtb-k2H) zb^hrT78_iyo@ON*xQNr>q2bUlOC8bz;k@|Hr0Wk3cJ7j)#hW3gA!gy0-Vkz0=B_xH zjC*`Pm*h$YPDV4!&fFBJg&x>Q)8muZGT4!Xf8sf3tCQn%?|1H|4i3oqL(b%rcg3&s zM_c&j5OtDX?rk(^;G5dS*W(iu_wfyix8XY|Ui8B4?z`eUD1y(AOXOl6890{QiMzZ; zip&>CGDB~2-pc|=J@%hZKU`CKFL}%n?nqj*D2!Wtjvkx8?!PV20NtzhWsOh9j$C7F z-YF9gVZ_(-iVimZeqE3;8ARX4vuy53cU{IKJ`Hr|B2Qd@jz1lvEG4z%Otky2#p&x0 z1!LRa-HtrWBe;?>g_XLaAs^#Xi$uoX+t*WF1E8~!*LZm3pzH52{JA*!DKs|2b8hd+WI*i*9H~UDo*KE$uZ~=e2vfb+WnWS z8uvx82(0_WN_@e2?reW7G~dYHQbQ#aPZjnD1}1`ks?9#AtQuED>LKVp@0hUIkSLhC zp2%DQOt590oQ1u-HI}7p1mzOVgD9-aoXRs#OsXibMGel$!LikMnuay_ z7&KCMx(T(8bN%!uf&RBs{v~nswR~rYV*)ATwNggpMR*XCKPGP{*Lg?qO;sCase|uc$j&1!* z76;RGL_kEr^{pWn-d(0%D!G{@j-!OV+&9 z9cyK+(eA$%r{&^-0RrziuRn4>igHMmM@b!!S|oDv^#`8|=p8E+J{-@B&sZ6wNyC)p*;M_@F}K+9U~;nlu{COGVQ}KC|_~7+^E_*0>grkCt(K(w6Yw7nf)}Q&3_hft;VC{bQ^G`he zlY6&EAAWSZ@kV?H1>c}pgJG^CL#?@=m^K;0IQybItLHt4dAzOTA$>JP#mS@Y50npBipS#d< zK6(AwdDeb7fA9PHOmAnoa_V-iKds8&49ohBED{v12M#%0lk)ui-{1c|$1~zHmgTUo zLuBaLaDtZgrw``)`wvk#S8^sUA0KH`6F_r}fwjjEpUxAy)*o}dhH*A=8j;t{RK9*j zsuM3PwXFr7$Q=TTTt8fL|5M*QB-&W@m)aKFvS}HlSlCcOk?|=*o%P2=pDZ=B86jDc z#Hy;{w_jZZse!J%cG45`j8E#5^{0`(8sigsm{t>@G^-Th?fmFD#fT4p?Pngh#wjsK zd=*kMlFMCl7(bwTPO*z02TIpjWK>V@NBoMb_+4`{z6_}M)p`BFq}U>PpX8RsbT_Ah z>u2uVk39dTAzJ{5v2|;#^AAn5^K%e|JSM%z%JUD5z>K2 z@tw)YYClXly3TI~+{^n9^vkaLS}^C3cvgI_1opnBok{5ESLqbEOQ+y8jOLb=gLlk0Ep4GU%eAzGv!Qj<$ZSKjs&G{joo7J9ZCbYPanW zQ1rdB+R`hR+dL%NSgVp+JLW-;dtvhpilPaJegZlNJeeqi_=7PN+Ns2eKE@axdc%X; zcYG&4LGg~qwp(9(>vr4ix4}aP6?|3!g29f#__F|}_71k)p&I!{KtD$5V*>feZBMMI zvBf}#__+=SASix_5p*#a&8}Y5W)4DXG?(Rt19f##?fBX|iN+FCVoW>~{or^G?B``>WWn+Y-ZE`^BB$ADUr{8suJ@(zrceisbI+B`_RDZk#};bCq$a3$SqkhFY+m z4mrz@2%Q|yRkUVcm$^3!b&N%qRHp!!T90YZtR2q_^PNBBYiu41oc&2v{!}@EDL7v{ z$es^qEMbn(!lmq74k{--V5*`m6r4$`a2nw$eqWE*(;`I;9Z@*9fjRcei@E0Dd+9cC z%Ds;B_S@{D-qS_Au0RY4^X_1VC~B(fu9}`BJo| z7?@Bb$vFSA+vovoO?mj-@&2F3Cn&!6zU>iyN%l=&v)y^u-L?wH0+aU&5J_e@HpPIr zyI&bV9e@^>d9b!|eP9;b63ccRbtLzCMvz{AnwK{Bt0hL9d~K8?f<)bAW=u;2!xK0= z_(MMe7e{G^w7Xx4%TOTHLN!uvGDrN2HHQEd)(^WNwg&pFdq5r9T)!wqypHDJ9r&^= zSaVffQgBs~7;e|nQ=tOkQyuUy3bUxSPc$=XUQe7Mem);M$R*>Oueqy~tu_iAc4Ap~ zQYT6(TCiF3{27ia`^H7TKFH1WX2xyJsWWQK3^(f) z5pr$JofAg|gNT%t#c)qk#bOQC;#X1;1vZDa>m?JQ00ski!0c5_D%g0x4xH~sU4di& z9bL4T`^t<&);4;?zF%h4n!?=e#Yb+O#CZG@G|407Kw*Glca6#`z$%lbGOMtrN7SrY z&~gv)f5Mdn6ytui%RwF*;Yg8g(>q;T#aGRbBDRPO2PEB{?0`? zu8E)$W5$O<$I3h{2lw+uw;ank;){hNRUI2(Q-$0Hawi`8nFH2kkLBTDiJ{Y!Bbz;6 z@%8cMFdom_^DlRdh{3VDUy%NXv*qS7BcRq9({hl4F0~%ho>@C)%Qf;j z9)hwQK12pm0)@4EmN02g6P$wcwL|3jkS@I|2;BUa9JGGKP)PF-uK`u!;qSD<(WE_N zOf=QZXR|k2;mJY&o2}K6Y%(6zjLW9H?}!(lQ~sgijAvAY)}`(3lNo8Ej<7f0yw31F`(*wZ*RS z6(iVp_bb_L^bM{$WYS(wV%T(OAv%`2$YG67ykbDy-LJYmeeIm>F zHH^tYTWd0}_y);q$2ep_kVvPflWabobD?I6T8N^JyZer;h%n|=$%^{5Y zAECAej@l2(t&Q^sy;F)fMlIB)==eYHhn%xPsH#f}F8Mlg*Qcj(yV=Dghcb-9=s0K| zG2El#ldab`_tV2zpYf#=Cw<$hR9{PlSk~RJM6m?iC!J^86r!Mj)&B0bPpxk?sTkc3 z*(j8Z791E&Z6t;){+wHU^6YC)9ci*=vtAJ)*TzJgla^SQh?IwMxu>TD9R4?0TdONF zg5}_43*KgO5e#|mnTKaH&Od=Xh*5v7-~G`?{OkA?6v|&lC-hlAC|xhC#(e--TK@EX zC_X{)IKL$O7Jq}HZdPJKZq3!=lZb_b;ploLp3TFBcPD{unHt`72ZpX1>xaSOcYrbF zy)R|}MBx?qz{O1mhOQd%!JhT%An2Fru2?&}d=3mT)?9(PK>gumKEn}86%nHDi54J=WAJG(^Pstu`1F?Z!b>$FHKreNB!R~(r7OS{Y()5%RU0fWM^9xV;U3NXZpAOr-@Cf{>D*17mlia@Ke1gmZ>?K zjvbo>^nKR#M;M!zzBz^1G!9%8%hWKYTMd}^>q7DA zpsm^kkdGU7W-8V&sXH(ntzUSK2gk4WQpSaMnUZGjW7?_j>+c+0rbphfWYE|-L)KuB z1xH&~UHtx+8903>SvVaq*C`#x?rQ62#qodN7vY7~DE%N=IE68vo#R3LasA1Aj=Qs| z2mIha2;(Up7`ke=|1xTfG?wWs7K}=#XKg+^#|y0g44*Wm;1O+l0UgmDwp+gz&_Roj zaljtiW*!MoA>y?85%tOV3wU=n7v6S-<^m0};4F7^c^d)og*abPa8|Bauat!e!SoU~KT_Q(D82Mba62`^TZ%<6O1_Fr)txruFV0nzn)#l;_g z=h;4W{!)*){dpE+D-|E3=lSDRga2h$o_*Ze7R8vx`3gXihe7Yy~_>9Qr zHK0^EbRSxp>1bNIqA(r1uF6kPxV4*B=z*>8WWpVneZxHX;6vNX?s@6<&d2Z_6t~{8 z-N7%(Zt3(Jqw=Yj9c%pI>Tv6ULrf3ra#qwRr~{BYU}JNIqT*3sfYoANX-*-1e8Zq` z>rWrP)a}2mYX)|81f8)*?Q%Huu$ikod5{j`(=|`iAfO?Lbs2g!I%5EQSlvzA(U8~m z6QlSBZ6wk=JAwF5@2g8nqo-R}OBgO8K0R9aWipJFNra&lzPgZ`79uOpd z5#ZW?&x;OhBsjDbEm{*>ig8iRYfX^uBXfvm#a_QNAdgmlkIT*2V?`6-X4a1x2IW=4RFT(kEDY$3mWqz zWib3(d<@?7Ci+~x*56Pw?}+5XK+Twat@0t9t)IcbDZci9Fb|AVc;8V;9stajvvgDv z)yYH9hxvKkOvJQ5=?wCUF>2iRL*697KT>K8Z=cux*Yl4Bh~)ZjwwARdr_`JyTigEw zl|e>r0m{FNCoZ4@WBAtT+AUS{>5t6<^zuj-MR8B`>z>hWMm80=FT zJwBaj?yo=6)eNxpJ*j+pZe*~QzMN^|WAT+5#pXFEdUB220CR(RRM*jv z&x&u*MzY&aWpKFqE052lTX>sfzBI6DKL*e&yL4S^KAt(m9rIGFY{qHo|CD)-J>v6_2*Pro#9sAL_B4Sg8hEDj;b3T3l z-9P?#X2p{8+&^l-n~(~gDs%D~dxEdp>yMNBKclbf*Yi(HN1gV+788~|*VzAcFY`#2 zpZ4EP(62s>l<}32#^UkR+FZGsD)b!KY`cX$z5hZS#~)VG+QD&4XnGTUu3r1!P*^_lI(7f)*Pm+6sG}B@o=JzVKjJDps!?$D zoSd<1D6^?fn6F%d+c+kX`SeM|h1cKrBfLez)ViGWJo(5R0Lys+BL1bl{z!5P-bl%& z^7t(G$s?jac>SUMU$O$js_FAyiwJh0G7hZS|H20;k>Hs*_Ncmht4En+9sF-?hy~uW`d72U}FoD4ln)L^MB}zBI?VfAvTEFQjpv z8^5ogH~wC2&&nT&oK@VJ0Tf5;m;KV$-{Z7sw$P}X$Tk5uC_D6i}Ow6rd?2C;bTn zEwslP4CrXLs4||Z=Ryw#a~x+T>%qYs(Y5%FagQ~=v&1TG;u`jBRc&TDHDxt}b@q6` zO=$aHRy>9t?Em@e*J?dC=UX$3DB#Ec`AL8toARaazcYB`HU7E}3_!epC|O%DyxRWj z{Pp$g&w25rk-h3qxicM|ML1Y|bO?U`1Nak9_u@;kAHjD}+;k)U#1p@R0@0Khz|J`L zUpq}(JTe5%(NLpx*)^%At;cBBhxXq+$U&y_&cCAdm?vazK3m53shtTFv+uvN8aP1o z)2_Hm*19Xt9Vt}gW8r~5d{Q*ySES&C@6rCpikAJ4c$I)l21aM=-(&1kBHUFAS^IC! z2Cu9*y}bSJ>z_Gz{aFXzGil>4oW1{!V#=PIS8MVbk&_vtuTOiG)(h>wM=Bq(^!@k9 z=f&U0=1Tn>%0>>ZDgN9MrrLj6*oZ8dQumciWHbY#Th4!)af7ZuFDGWi;C>LaoPR~| z$ftU-V>C*1yZGU2_a6%+Fvf^tkidm?J%4#tERg%Thllnf1&R96kQ! zzy2Wq>Ghj6;!|c!X3l@HN?V9^4F;e;_g)OMAk#*T2b`dR-aiaEGfS$NGLg8gsIR=G%4Ax)R=%mDF6UXj3Yd>Vl^ zg1XW}i($V0s0+#71y1t)d*#~DZMzw4L*ZKvY9A3baRBoUip zfF-U`5*oV2r-xc!zau@q>&KBM(Lk9Z8- z8#ci9;OLj~fa~k`s?s^GBTyba{u5DIt?OU%;Pe8oKZ=j|;Dl1~mq*5f_$O*%@zH@Y z>&g((`ud6RJZk;@`3G(5!sz3;zr$tXtYyhamlWYPMGEySL`;?w5Hiq3Vf`_FT6`L92A5iD_ZXYGHUO#@qc zwjY#Ey#CPobFU88=0xm|;xiU%w_88)UPu1~h2a8B6GgUNtsQfe$8Z@Fr(&-!$$kuf z;^`LriKjc_4GQOTZtKz682hQm*<#vD#M8@;b)IBn*?}F~mS-@>rbw^d`gc^3s1NP} z1}01*yOTR__oRj`=?&tz6dS-TP(YZq2#e`mHcpCc8NjFNcV=^S{}fO8J4{qLQq4Bw zuV9}KES*eUw$ta2;NzALt0E^XlY!v$P{xCT%*0(me9?N?e*U1z8-h>_=HWQz_L$i* zDGmT9vn68)9N6qu$wT53AG3F`JzfK%+v=69U(^*J8k?Zw`Yoh&0o8-Dn@!m;DbkPy z3fD6R`gtt4*#vVFL}JXwFwP3i88j-A{szBL*Kau)$wKEWd3N-6r+2NLpdBDxPYT0j zOyoi(62ibIJd{4{LaYatEASyT5fZrGL&iTrlC>Ez}C56-^@ap{yY{oezu2=s6z9jqJm%n^__>s45FS_YP+nxLqPmF4g z?fjAB#N&s!`Zz+4Nz{U1=yhx0G_QXb%Ssa`*Yg}IVSE(m1@IZDxqw1n$75l z6sb=DCv3^$)7I0%W-epmdiL_(na$u@&J(aR5P?0DY0ENeqt$P(02X>Xf~>{{uMg46fP&e z?K#(v&B$N)H?k#&MPj$tQ^R&S@kyr_p?2wsC&?gK$bDQ3iq7N5J=G_Xj1V>M%tqVp zi#tI1p2|bF4BJ87WGw&Y8{&>W;xl2hp_8j!A4y@rqw<#@9gPH|NYdYho!MxeTcVA@ zbvg(4AuA;|3z?I$yVd(NOKC92rU=`NEATS9TZ<0`9jVs^z7&Ecwk(fvSZ8i=D{278 z(>QZ-AJ@A)*B^e)s)@}wx#akCijWk0^YeFQ=bsrFJv+8c(5KlSY=3{Qay3u-eY-hv|s!X6l=DSj*rvF| z`1DM62;(`$EO1TXz`QYzW&fV2z{Fu)sVz{Dt1G_$2kPE0+zfu*Gdwe{!CL~ap0hL(%~M#0HNqbT@PTq_r(jXzmv=5Il{N9VfC{coX71f?!O#ZY}YvPA_TMSRmzW~P870Ad-uVv(cqlhIQ_F_hTt zg$QIi%KV4@;DBZ!4ALuQ3K%h}kPi1A>cjXTwf!@@SDb)%4`6g4-|jq_I@j-SP~3|z$$oTu;f*(L-q!d#z{2xdMg*xSevESzXVh)bN#NB4J4?m*MY^wyoQW0e01wFW#h)Y zuO-P?FObfZBalU)jD{pktN0D#AtP|WjnFW`5BWmp&x4_lcovmP1V#vch%{z-se4GS z0?mtMn9&`^cf?ui_w0#_g+sC*pu4N}woI=M;ybjNU9{Yf_CG#VG#ok!Wi}N)LNtgW za8B$uz%cO=tN`l0M!sIzFS!4RAGe=cV#M<7z=OcH2brp30Abi(Y1u9w-#Hk~s^J1? zd_RIuv~H~)dRu><4+6dQtSnJe+e#n_my4?Pr$6#pilZqZmJ$>mT0EwO^$GlUy`F7! z#wJsgU+_UA1|QKQfXO}eCi|Dl#s$>8=lU6|Gjqw-Xz_XU4xu`mS$2yaJ#@{6; zT&q7-#j-qqGQ6+Fxp;r%ZYaLuiCFXQQ?`^2*8l&p_jYNv9YwNW!uT3g-$m3Ax`FRP z&EQJ(BrZ@RLh614>ah^@ibRb7+Re<(e`2k@GxJDPb$xXfD8hft%{@F~t-W{V$xNRH z%(yf{A~+4;#r0wNv#O+wOt?pbs&>)mUrBPTsL(S%fz z72yIQu1?GpIrQ0~A>c8)kqXopX7^vn#0USw_son$FPI$!vkaAX`}w0Hx`Hq{e-)Jb ztNUPD6|bd0Mk+j`LXNf3D+p`Y=Q+S`9V2rH{9E#aeB#!>rp}yBZF=b#BQ}cJ?eCz; z%ffU0^Ie%ZX9n@k#_N@%G1VuY(b%sjV7DN!;d_*S_J8GP*f?R482`w6WDdCZBdl0> z-lMEnWJURw{NnBmIM3SO;0`l9SlnFSiW#9m*V+)9?>`l7;J{?P+k!ykRDL9OWWu%N z9O;Au1rnHSpkaPH8dNpH#A{}Nj4R@zw{QOZF)2{UhwOi3RrQ%aOPQZPaQ0AIzn(}( z#JX6*TxkZzqWY_Xj<63pDgb{ZN%QmyvGeP^3>3t;y#K1g{=qL8kntb(30DT+q7Boe%Nt6UO#VffjHLl z2Qj7{euK0x#u)he=Z|>q^A{(H+i^#NHn-%&#}oO8l>PU-*>m3o z3wsHN3^jcF6_PwNg5j{%`sL+V@hjqncKey+$*|C&P`}NOx)z$OSIb?p166C;9rOD4 z3x;24k^joQn5}po`|;>Z6cp4@aAh9H5VL>)$?(8zci|9mcdd9RxN4oKK!%&v)1&ys zLi@A9>jH+6-VY^P`JwP0ebfE-ppG(^5P5aal?63;;bZ-<(KFn0ZEd#XIZ;+Ui>)Zi z3=0%oPx8BN0mnAeU^dT2J)RfR`B4a;74mYFxUxVQ=Eq6wG1zlN`=yc6dX)tT*IWB9 zbdOHrIXzZ$?h3*W8?Gn)7of-UMY%nYj%3?O%?K!8{uVz$@tc=F{^?&|KKbO6mtXw+ z=V6WY$Jyjtm8y%O927rU+V>WTkq-Ag5P8%WFl7H9{B0VCzMjiS&x-J^U%gjKI{hUF zM(nUkcbeCJ7RrLro_mKfp@7wA;W&243YzB5;!|zQ<9WqH@x>Z+x@g83n6Ka2xE1f% zH_eJ%{elVq`KuziDO?TVikb9R%k=sjRYSKkPg$NYP5VTrui(=M@V*{-+aA(-{vlpAzVy|DW<@YbmAg~kYHPohVJtVf>&0Gr zum4Kf)eK*r&0$&MtYO7N!Bq>&{4gybQ5@Rbk{qEwITJ>EE;0x!KXxIWz6Kt%RfNrT zL4V8jL7zWH(jh)h_hdg1pDW~Xqq^4rp4Z=5&6JTM^qc)KiZcdWPxCuM&?{~G-?AT# z4!%&RKg_?-EPxi9J*%`gjVlC7T+cs$tO(Vsu$!^5mtJJ!8iB+~5$l(Ij0@PQE@m4% zj|wj}WXjcUP!9v{J(`M-Y7H_U&O-v0SR;cpq;i^`AvaN;9B%L@N` z{m&#L>fU02=dmByWj@{1=%IYCxq=6ehG z40c@3pLW6>BSeidKX4LdpXUl)-cle$e)UBPBVYaeanDcQHEdiy26d>6%k^JAf1n@7 zYuIMnXVY{&8|^vqRa?@r91OFQwP2)9NfmhgzIMi@%p`9BC1VRY@`%XW9jOEP4l-;SU6_YRYpw<{Q>PE4*=8P=;UU-ah1zh-g{(wnw94{2Z z$D|j)Dy8!*b7n{zC~(dFkD9pjdeC&9D?BX6{UO30J6g7-qu)sW#`7QFpkRdi)LEwF z2S;#w4_DHOJ>|w4J|rr!Yy}rS)X};y z;!wu)*8V%(bjXedKWcHh73QEK3Bx<`&+z{JhJGnB=`qis!}V$F1lX(e_*KgMqRzH^#2T4wTDyZt=B*O6?P~%KU90veXi?=AqXnhV9+^pY{0l7v+zp zr1yc%-qHUv|9n==k}EU>?Ss6`Z|9uvg6+R~|1sguYG~X$_TPDPCV$?X=lT!mMZL@4 zHYdnAn{)MtCEx$OmjCPpm_@tSf6Y;TW}lpUG=zW+zPh_MMX6#UElr_X-} z0DoRCE-(*2e>{k9^V|F1FRA7K_usV0j;c|2 zou`@Bz_4AeC;9KfXiG=?pP#>T{j-LuH^{jDCME1pXXv1Peg7Z!Hr791f8Vz#5RTZXw!GIr!znkgTxB})^9%=S z?L&bp{GffF-zgAO=gm}hMvjMP#MooLd;MXlXT8t3U&((`%@W|lwcdZpw8y6;;e6%u z$0a-sZGYbXr)Ow<{3;>>2;|5CuYb2^H{zV)hN7LpGtXbu zcN1ivXC(MGUw@d-KXBc?hCQhxDv$rY?dz|sxeD**phsxny6A@Qf{j1-QJ3 zmN(*ZZ8Y76XDtc6hr|ZpMJbBFcr;2Y82eG_A%Najk!McYQ=YWb4Gg(q+eEO3Y>k6^|C82|J ze%Jn!s9WU}nCo9^Onx|=zr!Zta{e;J7^&Bhewck7>;K}*FXI~&@hhJG7~i1y!4F@4 z{!hOcJ3Nx`UsfS_5KQ;>IGg4t`5WN%pKn_-$-DaB=^2e!-&mvkIum|+3|KRy^jiLi zy&g?w&#g=HZ?*s6Gl95WC$(*|i|4wOx&AU{U`%dPxvDqetk@C?_9!(0aQbrS3P>fM zF@K9c9BGI{IKS5a0Ij&mf8>`d##(HGr)eb*ao=tKvj&j*8lp`foS!`rVJ)ReTjKgteOLa$iq~K4$Mr>q z!~@xRE#2my5h>?m;J(Db&~fYaPCMh6BX0HoCaI;)7qom0c@+HS{B!-y&BgExq5j(i+^?U3EY8x+`dwV`hPXd}j2hTW(0q&h zpu2tj!?PtE%>N+6oARsP8oeJd)D_C5hl;r7Pdj4o@=td_)R|#)PMCb%wl8C1+Wkk6 zeKGU1`)vb_a+W=V!q?2tHNoH<$nSMHAm_XF4@fc$3q)BJ8ZyYw@+WUXW{E{e9QaNG zA()9nMWm3VHs6znHyjDQhlH~n7CK=|PBdmXeL2)R=Zt>%`6K+5;Fh4>i#L@czo@_& zT%=2KnBsr_V*jF=`&5pM3%#Xswh>NW-|^R5AAD>5!=AiX zixb8Sr>~p*>^b~z`2117;)#4WS#8o6WPyr|E`G0$6dDv&v5Ja6lKqF5&p!YB<=?;l z=H*BK7(YSrlb?jgs1W&{F!b?29p1*;$U4przzJ5p<;U$U%edbhpUCnU2z$~lsB5*6 zvx8=UUV!pt$WhF8rd;bcFF+|z(==+6BPY`yHDs`3T}KsQM->Ahh?ZUG{e}1h80T5g z7;Vt(5qp74qhLBZK;VKWwoMGrAKD;HC(D_+R+ubttg|uT3&Szvq1tzrUbGO=9^d;mj zjC^;oEXp@lZR$v$RhwCJ0-@jvRie8THUa7OuuZ8cP>=`tu@tNY?s_1)S&P(NB9L{7 zSfij0$k{5lIh3PkUWj3yi+34xchWTM5=U*>t!kVWR+L`ald2ON%)R+xLnd5tCNlqQ zX_^G_2HEHt-YKVI6K0Ut{b2{}GcfJbJ>|msoNlMw;Nt@H{CAt?nmQ#2aHW@gBGp(K zGbEp+#;rbzNx*v!AbuM^LGhbk{pRJXf5SH@e(>^(U;NLo=9^H2;TjL9%qY7HKZ{Fm zPXQSBxZ&lOJ68>FUN`HpR~<^GoLo+Ksfa?*dBQGS{t@NBe%Js2KmbWZK~#X_3AYk6 z#kEtFLa4+78nFGmO5fn51z1!wX(zNY1jqqC4={zzzZbgh#*FtdWXd%NkgzMQ9gImV z?hoW;oegLSU}JMV+c_#2&y4K+AR`ed*%r@5ybMD@!FcwLhsE&5d5fQ9s$aw%9y+`0#UBo= z85hRMLE|zE1){H{9p3PyW+P`S4#cd^`(zoRIY#d{L+lM@h?b|R0HBEf?dKK9+zUhj zqx;`=&?rYko#!!6(NDlj;Cb(^J= zH!@bAOxJyAVE7_lOe4b(joqfu5Qqg}-?!5ts3iV#-a4u6&p1rLs!O7seUjI84o!hD zOs8~?Ge0sQ2QH2E`jObGAnT$zhiyvD#&L9JjMwj<-VxkEfgGY0nwDLRZGwbr^xY;9 zj%ViLjELqyd}zJ->4iP^D}mW17z1712`v|FLeBzLd!5m4&w%k-hEQ0KW{9|4!-9Gx zHhNZw(}pI8-8F|$w+9ozgDbNc7#5433A2^P7*-#J5JY`)tmi~DISu0w6|e+%oXFY> zyZ3{n=$RmDb9sgrU>Ts~brXQ<&>eLs10C<#@ioig@-DD6toIaX9PPOm$>mthscqwd zggVe6vTyVLa6BW2LUHJuNj&I8LbSe#x2(Ce5V$HY&5qmrFgAcv4e;qc-RBtgc`4p3 zo{P2&h-n3XZJ=Mz<(qeZVIzb`r9)@(GaceS^z#Rp@?iOVIl0QghzqYj zs^C)QxaDe12)O`lGN*WTQ-&??v9w~!OGKvgY;WbEV|a%stXBXHPW<vhXJ;@GGAF^7YH7pT-}_{>e{;#p!X7=OkWg zDw>v(XiY1Irfosn(Yi_t6;~OgJ{m6%h8Z70I~!O|i}V%dLwiEAh>0bHMZk%o_jRQ> zEv-9kl~{(&X$_IMaQofmhS{$(1adgBtwS=T9(pibmnB?`DzQ1vQqBj*qyB@iSVCA2 zy>e9A65w)Vl}WsUuA!%;^^#IpwZn$1l1LS_B4r%o@=#fpK(=0zg_zL?T$R>7nKQbL zEeWvJ0MpeFMzL3{1<#`Dao8me15y;@l#R@nssfeJaW;9*%u7v0)ACfEu;QxTF~hJh ztYb^E;F&BvOtacKEscY8xrNXE54~6{c;1xM?BYB)PAL4iRu9I_#h1N2WHbZ3$YHp@ zn%eg%YNyaS5WDVzt8)8NsSb%dsunJt?^P#y0Cr3|$JO*Ap9l=~KHq4j(cO9MX zA~l<8hmMOHI3ce+R0KgW%x=(w0T*JXOyhMBo5tz2#dG^kOIJ`Ht~<17NN$b7*E(cU zOhtB@kYL5N!%l27K;nCKL+XYgjR-fH`kP~-)};HR_k$#BL!4~XYZGQ%GpBv>yqe!| z&&*N>4J6B>Z4Xnu|3Gd+8b=+W;gfavU?11?ENnSS=t{zb5@D!THLTea_wfCfD311t zh%JRUl2i%{F|Sr7d=l!2H~@fsT|Kh2J)}%hHVZjhEoK^I)vXe8HL6e)F0tL^loAnB zt<^!ElX9t4eD9`b0TgsKZ3`H1kht9;a$gXd``-rfoui@$=MaHC)Ei4P0MGn>?p}id zS)7E7=Y(_2 zF|YlD9ar5XUX25+3x=&;@i4DN(GY_Oi^YOxz~(jV5{DshQ=2j<*bx+ki&G;?vfx?h z#cM^70~mN%e095QO$)<1wj>LlO?1xTCuIC1niI0lAHfId*pKp9@O+TMj0>wEWN^?i zhKZFqEbCCD4z3_%XGjV|{Fkj#>>lC9_3-%?)#2-6u1`*8tcV5J>#L(Irkfnu}qh4rB+Lx$c=f;!uiFO9xNhnY#6ZD3 z-iMvoW`Okf`24ZH|A6(c_ljK)BwY14b01{Z*l_An597d_6DGAUEOpRe?O02<=V12U zz=|FQP>ySQB&VeuC3GcWVu|gVWnOV@8JQ(c3RwEIQ5nr1DIDZkRQN}TXwAHguo<^= z+)!??-4W7aW2bepLe@`EeD?B}`WqDZ35u}`BsA;6`7}S+eX<-6+rGq@4+p~Tu(k=U z`a*s+zwwys_Sgl41}|4kp|pXs4KpsFLmv5uIJ$QiM}jpNr9w&@7~5i|c+KymMk?5v z=RrBd@`j+FY=Co|f$9*DxV}yP!I6n`%C(iIR2*%?jO+FMikNY8J>al!NHnz_C|$N; z#x?SXMsLy^Nm%9BR{1c>US0~N5hiWkM=+24DbiGo3?16$wAXVzteEBwYa3=<@6X>a z|6-V<*kj~PqfC+klyVJq%jlMonperuj5;sf%>p8hSq(*C^kpWp!$9@Q z953rR{`QOCzWnoV;*VtGS3La?KSA+N1Ab_;>M)kRb-fn&-*Ua(O9?`@bMYV8-SCLk!vOP=`3Qv~Fkk zb(NVyX?DkK8)jV2A2U8hjGJyEb{t~vFS9F6X#;HAWIy{ajFAeq=6O&KvAh&YO9XUW zx#lT*-4#-=q(r^C{)`*p>L+t01t%jgvjgpmri=6O&KvAh&YyFtK*>n-aC z&Rm|S4YIECQYg*tm~F$1>)Yf<{N!nNrExCceAzbRLU!Nn4tU?|2bMLP&$9w6oI+^^ zk+s%L>$hG1pdRes&H7cTGI%YXcV27i848JY)mO#r#q4;@xISe4jw8Vu%a|Hq# z@on;>=KIU+TBe_w6KjO>|NYz7SLU2@<`=!tE5$3ZznWsLoZ^_<+j z^;eW%)IN-Kmvjvsf^^r=AxKIiNGsh9LwAdWl!yWflF~7Bm(rj#4AL;vz{ESB=ULAm z@qO2tUuM?5?t9KY=iX$7z~@{AZobT$qTqi`2}XRmf!_=uPmqvvAbGV{yVj8( zHX_tyL~1`MJPK--^E+qV*jn5Yda|PMpEVtp-3QYq5s7q7f6GaI-mWub1T4Z3pCDe| zkBY=gCEAt>o6GcPl+O~-hUYcC_YOgbR2?p-N7NKeHh-S|Z>|}IS8rtp4g$=yfb&%D ziU0Fj8YT62$BSV43CKj&a@%0J3oNL&YpxuHB;I5~Ttz_lh;!RW7);0(35THl0tkLN zIUC7<&h{Qgz#Y5(yHEpLB5&lpn0cBw_$I#m?(+R9i(L%_UBHVPMS|o(fOBBa_rvr+ zMZk{FR0R39mz;j(V?W*VPvllj-0C*B(aj0}2QP-?19s7|xbEgBr8Gl+vz+JaZDabS zbVs(CuT!UGbAO$oGGLG(IC8SSjA(3Ap6+TCf$ospvTTFuIRm#(UJL1jC*HsTIh&}^ z+WEX|Bs_&TCUq|Ig$NvV22XeS{{8f0W3dzC%A}DO$@H+nMMM<>_&!iE#IlNNa8nAk z8=8fh+>E3L#qNNh=w4Jjypc$#Z7qgL%`c&pb(*z@h>!em4eh!MgOu{6&AOEL@8t5( za0Y65>=AJ@|Lp^~skOoPYsl(G33yqBFM(_Z5Djlr+B89ht#bWnL6$6?Z_=1)Ctd)u zQL&Gi#{MVZBW9xZr1VczgF%I;oHP6T^ABLNV~wBjM#O+eR2dw3eE#ro9B~KtLG`Qy zW<;ZC%WSSVe5XP!Z^fS0ImvCH(&q5&{R~sKcaQ7K#+LJb*i?nBqOlcf zTI&Ued!bbnc(}g%!3N&>EvVM^)R>kPvza3!2$wsQ>J=KO zI~;BYNSiJTnby$D*8BsMFPo0T9)VkoRAu0B1Oz-(AN=rZQXlOUyth+mf5;DzC$uM5 z9fZZV{v%gS55sZteA3u{P`}38ONn2&HUR)dmx2Y)Zoj%{WFN?4#5mul0;r8J9q@?= zPAy?XrkIxuXYPX@6miW+Km7qYiaVIHq@fw&8nc{Vq2K}#iP-3uZ~#Dpn4ANjxg5^c z_1l1ByjR%$U8ew&k^L^Rf_$a2c%fUYS1%avcDO++$#mrzCxw{@rhpf*UxvspQc@;6 zz9>k8{#idA3+KvTlra0d(5O%i7DQ~^U-L2A20X9*gme`lFZzRB+G+EyTlBk$?7WKMH6pb zBh`fED_ntE0b<0rl+WL`Y8)8^qYSgdRT(NVmO?r;$7EM@1fsKG0%7wHV$aB89xfZm z9P0T!2x7Hc$!5qZ*BLa>7NLW&Y4CimIzxpFv8ZS7Z3};XArNh7oNB9IGdtB-AcoQt zsc3cC+`!VLKoiSOh=$D4>BFKt?as~AVA+D3{IA4^-rVQWd zutvJdf!ZyAZy=T~RPWC4$k}PO|63*1U;lmF{3++0MSt-oL;6HQ&>;MH7H~FbF zOPYxpH!GIyMD5lG_zQZH&Pj0S{2LyO{*PCpn(tb=kklG~TD>0+@nuFYt&0v)!}ms2 zBDkYSB3NBRe!d)i?nP-GF3KC8m0K`SO1F3W+vL6rJJ5b)pE-A_!pDxU5qqFAaJp=x zV1&ahC*Zr)yM}yo6^Q=41nT)aI;lj1S&sun2bWh7)8hD<>wCP88ZT*2+PeW*X7&VX zdZ#zDfaHu}_;XKE7q(vvQ%5qzTzTHp06}lc=xvfRpq8&(6o#3pw$UM%lEufnq$Knb zIAHztcPrs@0wG^Cg|QXv^rHKo%%S=bDZJ87(D1V0o5GY==h_9m6XdVFH}|=Q@Eg`r zAPQq>n87)B>Tb8sa3~7(@P=dv3fICAu7gPN_^f62+t_DkeV;1tQ2+T7FIz@fzN7{4 zRCr(6=rhjr68TLf{7Yu_BbLk`2=WN!H7o=T4DXl*DvtZ%M1~P z2$UgTTs;-J=iN>p&H*|Sa{c9Q_SMya2F7cqX@L1a21YTX(DBiwW)tZ8UzHH>Q5#N@ zVqQKukT%(}E_f_6@yPU4gka8D@}p6Wy5dV?K_!Cibk(5JF6Q}YJ@tqL!ZWJ$?kac}18D|(#_2&hv? zzB{8fZ1c;~pU=hPOdhmy31Ur@c5w9M`D#ie`_dTq&GGNnA8!Q*k}DNsjeA92w>y&i zqD%I+<$o=Bk)GqrEQK|drOgA)jnF^fP5qh|cX|xlxFbBx#2lz|*u;48gT|v!MY`-c zCVly;C51eB5+fon@?gbwEO_Smxt}hr@!L(*c4ye8D6I02ZeB_w3mYw5SiqlGz~Nr1 z=I6c*Azbt%qi|oA5y6GT9j-@5kfZgzp|C{25E|2;M-rq&s<5eo=|iT1L*i`8?R+B3 z-_vVY@6rntPZ{SqWfb{@3d{yiNI|>~H2!dnIOf9)k>hj9JRctjNFp};ymyao9Wi*K zd29|J4YTP1iVrBrCB69T<7A`KH2h_`i~MR^MZeQ!O#EetJ?Eqx%RiisR2}~}3$Kr)`rdu? z&CVAt<(-XABmWYeYx&%CGr${m71B^=(Jw{CnD@~e)<+WmczG9$ROHn<2sCnSaV`CC zC~+4f><7?^up+52HnyZ1dcGFRm3KMvqcMMl^)^iuEvm~BrP6v>mc#dR_8r5cp1S`4 za9eNQJZ}EI(@j%jbA9bmvGB~GyYk({Z5My9zGsHFY9eQy^OprzwyJj%IK0XmkOIsg zlwT};e*gQyW0k$1Qt!D!%Ip)2yOYwZv{x)fF1$?NNqG+1=Plnb9EV`=F`S0wK9B@| ztPH;jh!nG-m}f`dF7C;4aG&1$IR>0!wD6gl3U}zA^ON=NRk1hDMOWi@!H6zFKlhqr zf~Cg5Jz->S`LenJu_0=jl29^U3PQF8odwwYeqC~|lY>d3LW7I(UD_RDPgB5}gjKI{ z_&XN~|94tLXr1|RzIcEBwz*aHk3wTg^0WBunI_#V zbRFiGxNEE2*Xo(3O+0J6EBK&D?pzZq@y~go2XO~SZ~i^-eIZZ?>{n(a6*@+82>@+q zR*Oc|vLsRJ6pX8Sc70h|m-6sXNE*BGxQabjobS2uLAr*5j0ed6ybKs`mm#?o5E8e3 z4B>OD2U1X5VmgEcJ8BB`9n^4X21^c5T=J3{@8}5F4PXU22*pK|?yp!zt6rqbl5@R~ zUce_vExUT3()6noXJzf@G`vBRjPQ?(3Iz+%o56z9`Ru?9w z{xhM{)9MbS%Oen$Q}Z(Rx5limhezKi(kvcF@zE<0GVZXdI4cLPoo`##*YD5~4w=b3 z@m_~muvx83{|IA6hXb4v0~tDxCcK18@DeySPt&&q zxDfal^6F|`2=sV0i)J$+??-_n=oecR?f>pMR(%vQE{@No-EaiiprN6TWrdi#%TTtD z<&bkud8-@4!qAplw;A#c{M#4i~rJBEi3wiOTm%dz?4wObHdl(o);WD zxy9Q;mnkik?%Qel?GjTSeH}*rF$5=V3ZiD%d%NIf#goAbV0)gy-05BP(3-rSH;QW@ z+x2_Rik|jj=D)e34_>L29cezXV&v;}LX|8wcdgRr!lu07i6>y+C^p;rA^}d9D*?!o zb5t29`U3i$^ths0Gw@;!0<(yGLtg&)b))RnTLSiPm)Wn~p>W239misYLA4YuaWup; zA#VnK>;GDjv1^pgg4wTQy^UXsdg{e0Om<0V_I*blBY1wUq+;e{jJC6^K53k79;glR z6h);BZ1Xk7B^ZY~CeHWD4%Mr5JD^^kMzBF;sCbzoKx?6&4ZY5R-OZ?;gp?|SwGVYm zU=gchVMomwB%GqN3~P8XSUYoNt7W!XfbJM+#Qs z^Ov)Q-{E2s`{&%CdD-_aA+1YquXSuu$6-B|D+9#%FcGUqT<)RHuVma?;I3ucy&(xj zkR$zQApXc@1cLT;jx)R$_-8cvUtQV|pA9tVN6}zoMnkX5XmDQ8yTsqKs31jD;Ld{X znT2!E*9^Umo{yqw&Lry-i??~i-rQm0R$|{F%TQZ-$@QKiS zY!V%7@`w7kA!{DCv_VFTz0sFUlBmxvoepC>-jJYT$evJ3oSv@ndfS&pN4#3`e_Y^q z(>cv1w+G?76e9b!?97XxDABT}pJz8xB%Ic(Z7FVX@MRy4qapk;HS&H&vajHg%x}Xe zghM%|ow&}7>Ewo%PQu&gU)AHf0FrfBz1=paU>}j@)5xrkZ)TmBmS27eK=uv1TBo%J zr4A>aA*G6Ts~SWxNOqc}ZkZL{wpf_YSc9ocI1P0tv@WW@3w2WR3rv;NtNm)W3&oxZ(oR{tVz z?2Q;Jn+C(+(T_ORvdsr~#kF7aw*2!uD6*_&bCk4&7PCT{C;r?KM~gU485J=fbiU2= z;&2_sWGJ1gYF`RMZQ~Htx$6yk1lsxPE^AD|qpgy6kX-2~zFxbtaJmsZB{xij@Kv zu*7cp-igkf;M(x7l&*pPrBOEH~hyV`x7vI}b_7Q@cs85w4PTPtG7FTslh|x7)yGi_9`oWPiv*&01@9x!(Nk?^T)2XZ6WmprHa}g@5gS0u&J!y5;LEABqyzrtwMNF8WzWJMFK5He#7mt zdbw-v#N8J7R^L8U`>V~Fb(z@yHxj>y4TC-j!!_H<%qhjEm6Y`2CX4(Y-x~R*)l`>? z1|`jZ%a4w~&2=wn;i^D%1zmcEAv}M5>O(}k^@1g!Q|?7`bZuY3e*(KN$1s+nY))T( z2@;clHp3y$tP5{M`Uq!{!v?EzYjzawv~m%Yu7~Z@gNyl6*45QOlG+ zcEWv3PTUW6;T1D{8WazM=gb7A)(Lh<5cvLd5``o|s}1Q3mBBCY&}knewZ4W3=UFCl z{&(+FDdSqlL|*T`=I92xYYBdw*Q#w_&3@&;TsPR9kZ#vIkBFIX6W20IWmUC(FPAK= zzGG8i&SJIhZpGjb`~<+OcQZeYG3jMuJgmu=m{1zz%PFM_D^C}FYkTLc!HYkx9Rj47 zh4+@s5PK}p&oQJ?+hj;6q*rWKU2@8m z;PAY9$`DId!$-<3c4qO>gL#ccf$;-iHGtHfu+eCMcyRwE-;b7CHAC8Yrg|H5Z$yhC z2rf}-zB$X27cPcf=0lS$g!b2=LMbh7~nYD8@M#e=APsOqwg_fWcB?1elBOJj9PW-tAc`@qS+VZ7xh z;=X|clbTqwn%&%s`txCZ7hZQ}hkP4%H|PS%nC;F_*Gq)byF?t6l>ryjDya5NE*Pm4 zA(E7$7w-#vdpcE9Q%4TFqncK?sS}Zj=uC-A83=g=sRxI^E;DyC9Z{T-!%qj;_59pZ zHoZv&lO328xxXSqBXjrWX2w-M0K0Eg-o6OwyVF5E zOV_~+5PYvG)M$ItsAV%a}3g9$20 zf}xnN(t~F|qJX^9uq!w{B5~zTCWRixFGWZnufj~LYE&Plz>s8nFy@BA znc;JvrFwZ8Hx}_tT)xO~?)$gyXL1XFLHDa^DU5~GVpqp3s?@MQD*pRKZxld%VW1!I z4IxI3J&VXXAV%<}uLY2OI;iZUY1=om?JxOL`8Un71A((je{9v>FsPj04(RlBBO5X! zpJiHdO8y%=-%ok!!g0%$Nrh!v%UirzNF#W$voJkFE^FtG4$H7-cvpZ@Y@}5VM`?kL`{`p|0W6&)DmX;|CCZ z5AVr|c3Pq&ZJZNdUttkg>N(`D1RmOOqB}f9Hbj1k5Xn_FKT`z7>AO)2X(nqw!LYBH zYgIkc_==E)4LgQ?{w#f)eT-D--a|bC49Pj# z8o+UXTc>(E__NyYfWlXkIJH#G_K8k=%7E4<6NWtc@WD|+6}0#1E~_2E+pl}_&aWuS zb`~72Y}hHn0(1K{NqQwb1*ZiQdiNYdHUH|IcrSc?lab0pb@a4C#ELr*D)cR_RK#S>e}FHNs;?of6}{7Y!r zH|YXA8H^R!e!M=6U$X$?2RbRSJnLmZ6-=lb>eZ*>y;$^*fXMbMA1MJCHF$2Th!Q{_ zd!#@aXdr*|o?0KsoQ<;-x*K(NXL_osS>W(7MSJm511bj;A?z<$@4Fy-B-ec7{11MF z!hYOTOeE3s_Eq&Y!!No2B3~6pSC?27)C#>Lv75P$o~@+$ZJHtGcC2VvNzj988JXEs z&I*jY8PzWu(;2k7!uyavi7j@}0>@9^BBrO_?|Mlt=<94gsH@37Jjc{?@Ak)O)0R#X z@Hi`8fk}aVuK?>mW%)jM$U!eC?jkr8SiZKj#}G!m&?(C~Exe^Nu>4TgqCU0(O28f7 z3Vk)`5h)~TJCOqVA^iOq&Ku;<8Qp4~^Cr4eu+N#UXYn^ksJoKk;=YV9NWLL z9y+$=7}3!oqM2N!1~l(10O|bR-0$egtxa~)WR``hMeYAUg!tJGQ#T_ouZlL|&yZTT zW%uYxIu`KGx6m^2XCfNChY;&ytV6@dM}t5rmU8*ejsI>=XhJaD?8bHR@co&OtOt)_ z*?-5MGRIfveVUc()eDf@+^-@D0Jlco0Wq&w&lEbc%n0`R`uWRdbywI&`>3MbV$0!N zhqsvAM@P(uhlFxx9zW=8PTb^CJ5L`H_sm`zX5KkedH&GRTNigk9V1K6G;8{Lk4rw z?h$SL`!ooT< zx65Ep&(gExcyY}Z@gePkTzaYp1(k$72LPJfQMhb{wzt_}d>BI?{>MXg_$JFVM-g*$ zb$A+|rbz^aVAeaWUPGxKneIE}Q@^VsvBf$!hGM|Ww{e{H5@rbgeb=%jN zC$_wSRo~!|URe}-bV@08J}ZUZ#I(!e@|Hm1v7$TYzW|QBojJ`HsWC%iE>uDiSe*1CXEOj{QUHXw3}lqdPF1;3g`gXxBk3Z@LL{hoVGXBKd1b9pV_9ei3q8n;*WsSh1CpcCKlt7bU0{u{AY4tG9ULk8TG&;CDg0|L4j z^4r>4JEm!8er?ntW7^o9fj3tPs*A6co7p}S)PzaFPLEgUt1uvVE@Rf-@=^1(Qb&&% z(wJiR_iIn>L8cS~(*3^A=kZ4cl6o`Z+aAyB_%Ri&YXwvSNv5}hE!aO{CwO5+WGtTC z&#W>3OChzqZS)y?^-h!2#Gn*gdlX;w6y!VLe4IpOl0Lxt&6bwXDyDx>$xv3qIt=kU z&6~Mz9G9=I_5(s&6Q;wObupn*hC$~OfY$Tn>wG$}cE&P&fA!%Lok^+I19o4*Qtq04 zflXGN{#@S~qTa^+_Z$&pKyui;3KL9UG-+< z@ZWa=Mdy0Ph3-cQ>CeYP z?gXTCxPPOz=e|W(Au0Y z4Ns@Yw;D! zP-!Y0^oQa$%bC<-tQ&}4j(!f2Kap41l~~UMc={SC4w>qX(w*?;Q|;(@UyLSAUJi`g z7p$?#_e+c&oWq3bi2fuLX!A02)Rh1%>pSm?=0wC3+4tC&<$sD3@ryXd{5)fDV3{Dk^o_-<8b4Cng6TkpjDEcrB)K z^64=Y`NE$RhXsv)d zukK~k8{U^$bFP&5sun|pdwD6s?OYA#xw0Xgg6)8T3>>;B==^mpmkcVGf(KbFKx>s$ z$I^&cty;Jq_kx3v-5Hl-GY4Lj^#GvJ1_)Q}TgtC`;Khh=l zdzy9PgGgLJCL!O8vAXj1`Jr}Mb<7R>k2{_SPi7-NIIH$eOhu3BgD_Z1`GO8&6J(^- zohvvJlJ{n4-$ses;H}vigGY-e_zaY7EyB9kQl0aOA#4%?YTLH3uy$W1g+Vi!#@BS)8)GLJMfbU|*SLJUM{QsC~rw}ZEZ6ky73lCCv=L(aQ*pr>=>ME-i z9D^cJe6Ax$pjqsNT zNs?wf)qFkDStROQ3jXzJYvma4bU&>xi63gRM!A=_sWEL?8aCkn;{4^r4OTkQ!7s`+ zSK$_tw$5~!P-Qm!NYV`_^>-KJz-LvBObMq|k} zV=lCm9oLB-E9FEd|HYKGS-H*|iSZAjmFQ*{3M8I#SMYdCnNz|Sb;n;c1g(?#x$rmc zU|8B5vbTVzxGXqpLm!lqFLw8FYpkX72jdY4?x!8UuK+2ocY55ZN*1MHlpQXq8m+d*=2@chfmxWxsd|iL?LFukFxfsT z+>?O$CB3oVOU{L_zJ8ObG3kC>NNw9*Zkoq9^PSM$0^2Ub@C6fRrKL%K%%l99n?9kG zxCL>0zKo(|+4pp4qsx|P(`uIfNxF>ipmQ;<@`;>y~>v$+&(Z=wogjG_s>M-$7JI=ai0Zvl{!(L-c@bJlV90T zVbc?(cG5?Uy*JU+wZVoZNmKs+F8i#*CphDp$-4hAt*`w~OVHKtcpi+)>gaj4VaH*u zESjtsQqDW^wu||L*q?TMn->R(4u0m)sbEs{zHoffQ9D6m6X7FROKsWqcdVW_GaODA`BXX1TP>78Fl+J>u%-?tVjkM5@u z(lO1{gGV9Mqz_HB%#nMVTk($OBcaa{u|oyRMZJq3ZKd!23A*v{-dC$_oJNNMcTO|q z_a}#|YA8bFWj(Y+2JP5m%G>n?vY; zZPyHx2FzjJ{B!{r5B$vWo?gOgUBVyd4*9*;w_O(aTE$jTEL8E29Af1LoypO4N7gpN zf{=QwBgsU7Of9H8LE|p&i)Z$(=@}PkH|j#S+m4)~sb06PRl=ANy^|#Dp%Wf<*ZfzH zgBTaRaZ}5g<6bEktt&Y@R#gkG8c+5gpBJ|s-yIS0xn@#Sh7sITl- zdJ(L?9_st(uD6ieZ1b@)V$7XA_|7%mn^{imXNY9EW_PS*mVaYDo4rci#>!a_jZ>3@ zC9T0&mm;WuJ>u2m!A0Zu51WyD3i?)$0umlNw@#s#C$wcz#BdMf%o(%b2Aen$XG8BD zRC6f%%Q-^BV8f0TVrCujVmkZO?9q zO4sbyrkh_uLw%2$JO-Mj&OSPR%=37_x?Alsx@Mo&97Cf3$O2L*ts|^7r#WEEJhMdR z>3rWi6NDd%2%`Ac=@c6W7%Ei}75(Uo^uKq7Ep~AR;FMHAEnRJON~|O^L{9RfqBC!x z%YiIz7asp)i<=?;=)TQ`sU zwwP%pASA9vy?*II<@f9?5oC36RaG-nbMHc)XN2<=w4!zn#JVUq1}jYxy`Rqfvwq|z zbCktueg!Vbr;Ba|KZtiI>Bsg7#W=X~zhbKi#UD+9l(Au9i~0`Ns(hr=m1P}i;MY8nv3aole%JW z(Z~_CP8P3h3wu2S-XpY8{1gwrOk2Df~?Pb{kuKEpBSjn0T{G)+CoDy{|PDq zP^XzfsEb%olI`U+^iOFt3pKtc{gN_;ivf#SK3jlEZ!H8Lb{Ad(gFtg()RihUs>ws;rZhKU5c+ zNdlprKiQ5%IDFohtVlWCNYzfB>^#!qszf}5EqISO{w-Pljrp5*@=RBX3p#WxbB@yN zeorG>*8yl6l8zQ_-LEdUry7CSBD13-oVUsHU>G%{;mu{My0!+48OADpa|=)&K3cI4 zcF{?xMY1RQOF+ZJd3g_Mv`84qmszKzUf{^{^ouEGXl`2!VZM{{ps_?;#nNmwljzn} zpIubDtzPPltsrHyRSJe}t3^=cp~Yao=@=Jw zAP|*N9Z-vZ0Iry%igkVRu3o`U8SAw@`RiF!^|tQ9m->S@Z-?^@zNyc_&dK?iG1^xi z4VIeiHFN|gO~}bcE=rDtq!GVTgUJ^I>m|hsZvKFfhZTcy>ed4uNP(KXl=0Q^ZaIA`D#I|srTpZ0GqkFwZ!#er-V?YCGRI_wJS6V*xB1c3sx z?I!J?1~~R^?m+z0f5;Bf?0R{$g!xnK^1{T^ZEX*i!G`(6FN(9j=4Xu=lZ6*#tvB*bCAH*qfE(m`EQPof%B z$lGV%u|HXrQw?sQZr_I~c$rE6RX8Dh160*Kx^ONnxIGqP*CbK>g4njORLbI+*RGmn|`D>bENCO`6m*mzzS;r*$_%)o;9Iu!qS1xW8I^ z=gidS6VC>wshev=5>w1<{Sr`kwqG~xP3}Ux4DYbUGeuFy#7;RdMFtL?YKMQiVom)^ z_E(;q)9!a4LI2uS-}>{W>gOaT#&xy_GS1j9=RIZQwvg1GwE!pe&l5NkeI2MN^2-g7 zSUo;H&i;%F6^C3OHrsG5z>5)vB{?wuWbpg=P2#fow9LpJBuQY^yzvInA5h%{g*RH_ zh>cgkKQRfHBw#QK0W_@9x`lHQgmOr(#k=F6>q0V?9EX&-G}5n+`nm&^ne6)cHbtr z>xr4?RBX1qNCR2=)uZcTY~7v{@2dd^I+#cAU=8XKo{ja!`|?h_0+pQ^@|Kvy#F6A@ zCiMdNNadiHiawMe((b2n^CJh7``9ArPur%Av$d|G=#+Dee-KX2NdhjuMy=X~vPLXL zW3Mhd6P5(_wr$1c%}0Ok>JgWzLUysinp}}3Yf zcZ(Ci;Xpkn#v&@%!AZvKY;7assFKc;>8>;l&6mq0SFAViDoGXt8afCA?cdeC=X{=K zt>{wxvRg$HB&KKlED7JI$0xl$?Vs(3Y)>-$!4F}++cC2XBF7|!Gk}wp=Y24Cz1(ho zI$)RUo0;7}4)LciNYmmAJ^=2t>p^j;!Vjm*atufOv9%*Q(|fUCH6E%jt(nf=TPX?g zQOl|)rk2u!HeS|tGRM(;$jLk*^^UwYgt)`u%`r^weAyfK%Q=YGZcyXbezQcVGVpKA zQjC*d<7-CV0OSW->2qOkbL)Dcz%kGtDeDrMXqQON6C6hbF>&b`_#Ai+`W5Hhixx;U z`B$slt;dsLZ{&$>CxX5?a!_#6+H0Wz;Y?-<#FHviaq$hn;l9iEBzB9Q7!jl0sd&^s zp0(x&;?I+c)Ja7k$;BxHkqlZgr!-FD*z$t%1_%^JBsWco zxr1-r!h%YZ2$V_fh|>|Kj@o&5<}cu1%HQlWR*A|Bhl4bz-FNZw^` z*B3RJ2cMAB+%5_-#Rt%RPBu4o837rR3NZhyvk}-}x%mSh>N=?H>*+U!hyZo@E$%&Jq)d`26=5&HBv#xo;OM^xt zuS8U$zlsuf>y?fSn7;uq)gwk9iG}Snzq`d|9!fB05`{j+pF1X?Re+##2IWC~^n+KCj5Od30jn=96@AC&gZO{rl zBlkniM)hM+LZoU3-p&2Po0u(bgw{T(hm4uIe8xZQCDo&1Nuv9!Aj(v#K5v+N3pRWW6+O#(Q|W4~F=U6ztm0b6=(TjXDRZyEr2xQ`Pw+5R ztN|{>6X%b!k6v^akVgQ`gceE9Ll%2JfNY1CdtU>eMpZ1~40zHsb(7%n0~pFehO@`S zd&N{K=p4ygfL38u(@95raHQ=H4=-|gQVQK^9e}z=i}s-lAzhzL9Q(vOv#*5;i!%xz zn_e@E>tCH^L|4e!d#9AgYo(LimXrKR7-|ws;IzV)I!*+BhG_itF6<$js_Xv%ERBuk z5vPQGp=rOf;&vhONMSoxmV1bbiY~kq*9UmMwQ+0w>HT^!jzWBhxbFGuWIVkcy3<{z z*z*_bvF++w?VmNQPqRuZ%3hwYk@@*#MEmVUw_8#&?gh;Z)h_(_;OIOVn?35o7E9q(r7s1Zd6)v{yY(?$&8bqs6`) z7HWqq?s0Y;#OzQ@!R-|KO=dO}7eCTv(?=Vap>JbPw?KJxH9YfT&+xBJ^;RLf>Wcbo9v+v?!Vfuw-tmy8Sf-6{u)6K{&MdYT_?8o5t;~jTUA)ze|LBnne*LTtnJU67C$oo+Qk_yN_?Wq~;07RC`<(ffGp5wP%xj9Rvd zmA%9Ka}>py)+6$b+_p$FDM4xhLo2E=<)#!fRNDIe43T*S!wQ#u+9YR?#Mlpvm^*{< zY~V3|?^@8~py?6@!@6VrLLY;eGG3$qQeH^6v!kknHIMo~N(X-jZrc3|3jbxI`wWJp zbfsBxRon>Ft~R-T)H}yr=|4k8;2Jw)>eCd>-Z6kAqS4KtC0}Ed*D)=XgX)OPHR@@P z@M+2idhWhCB6#z`gGA#F5W??wjCB=VhO&FL)^837!aB-@IT+ zkj36%<-`@Z4nk9jBmb-HE~*D_qib`nd4Por&wjk= zy`0EW(N!cT_bJq~%=}4qHE^T%kbsqV{v$@>J*TYj)`Y5>1TNm>x5f*3?Iw|08GHhL z3zdQ%;cONa4BV&=Ci+&Jj4c)ddg-M0BVAT6g$;Z4-}r3aqzB0zgt|qqpOO~dZvQlQ z)6z02X9e*rKHvDsEdo4f=s@}NI8S^gwI=LHP-CIybRSmgkH=5jWg}5#!Apw!G&;3S z+)9g~@?nGbJjr`%UQcZZq$TOyg1Tjs6#PDDYEOXk|MB#d0a0~PyNVzPNVmWUh=Mdo z$Dou-BQ2duNK4PqAdQH0OSg0m-7p|X!+7TJ%d_4D$k>_$T;jG*Vzm8-fE%+sb1K zP2ba4=cpIQ_dt2B^b1_= z9r*5GI~i+@c^vKP4wEN)#{cBIW5!-W@D{TTcOA)GAZA8aIZW@!B) z2}e=S1blBJYmh&3h+VQ;t=DmH7^yd%y)&!1*i|RbQmDlm-W#oVesB6WDN=^Fb} z%7-SDANtw0(re$)B2gcC5 zFDaV+lhh>dM=ljedGvTB;i7mFs-$f57BkiIa~_f}@#KEZ)`#G{<;T`Oq~s^mXB=}? zY9wic?!U|Tc~44j?9AJ%DNU>%S&{tp;sRGySCrwP?G?LHc=qIZ)Y13nSL%0aU&Sj{ zPqFq-%Be{QxP_M;2sFp-X5BZc+{pS+$4B2y!=$BB`RjWTJXR#QJBos~Ai>}JwYaH6 zT5L#GrmQ+=YT_;4WG)#H60A*FoTg$^O<6a*J4Q5~2feQEUQmKE5XJR4euWJ>rYXpB zv~5w1KYCEjADd_vzrt)?!w$otCIiVQbU$d6C!JTac~+7bq;h5}jx_cDq2p4`p_H(t zR4?qz*@*E{Ob~$bym`*QNEDWJf_NYd^GJvy9p2yaD6$VigkOX34OR~evB9QT3Rinp z<9mVePJD6<->F;OCW>1$OQj`E?ys+PaVJ|p-p|hCrp~OCN$oSTdi0lgKG@awN&TPp z-N&BtN_06iLM`S>kM3I<3mHF)y2tF#CH1J6cXJz~N?L-u<`VG&sI~tFAdGxVSGFP_ zrHUuMXK3Y;95M0|i{ylUIynq4Lo(BTufB@(Y%3+V{>G_Gq#s9M@yI}+erswCS9WZ^ z3Ebe?jLhF|1O1a&H_@y`Rc+$$%7BnGv8BQch`FrO z8`lp~UZS(Dcd{Ex&+!4YUawF+TAhMjcfeKu$A*qoVT=@krVvS?nEb+}pVj!*=$;g` z_m2evH@=*SANW~c$&2mv1-IB0Kr-0)aa~)5nu(fLU)GaQM?pM4&PEZnk(5Vt?6hhv zg1gJ7i3n636hYY%@yzm-#Catr;99gR`3Ppi-e8S?p5z8d93z6h{k$mRIbQ40%=c(021Yd>a^^i%zYXgWX#xKp?j| zuL5lV>+?)bUV6}T5_q6jzfIb-+24lyF^=;>qAC*_K4_MhmE`%~_PY{p#8l;VKLs}r zQW$UN?rN@{RlzO|It8ERZQ$^H&0TCGb${ujt0}CHEQtoyjCy6=MnPoTZDSckD@jY3GxFkQ`v) zwu_jGwD>`my%jwf4TaEMZS3(93FO$aYAN6-_qK-%$})*KUVLjv83VuFC1)erS~r&I z`*drw`|_~z6Hz@qp2aijT(EAmL5A{fT7#WLu0eP!k>Gl?)F@#mH20Oa%AF;S=d3B? zUsiD45!U*|`R5nu_`})16&5LsvXcgb(wO8LhJyE#Ae#)c-b+QW9tt9D&#M)#qO0I^+3FTUKQxmOhQi6x!b z(+>~>;X?7H5PpomnL&h>WJ~^b05v;w`L9GN%xQJJBUV6zRhbZaJ)=>I%zMmh73!t8!y+PEjD|kp1Det6LI9*XB<1%_B$C zO^HuFO%Tp_r%quGP0(Ffq&;goYUMhfs6#%yzrh<~aDIb)QGa3_J6h~z{X6%Y@$&b+ z@w|)Zx8o1mh9+}+b|xiHsH+u9n{G0cuPoEQjt#Hh$Kb$C`*}T+zq0)_gzRlsu38cF z*0}6U?phb}U~HN21OmRBR=K67>3yCs=TXO*qVtHSPmA1bz~cE6=~2T7eVzB?#mron zb7MR^nZx^)alIxM#a%ajjGey31g3MnURPWIPwMxpa^n0$!U`>0^rrbZ{@kqytWx5+ z;<>A%4%`4>`VWVVQO2x6#>AH9(ubHD=j${Q1W;5>03L7MoM@cvgVt#eB6L`22El+m^gTBohlk+ zL=rn7bWY`?m@2XJ%l96?NEE^jUjiG(h^W|%Z6(k}OT*ViKm*24&I$!s!i)>l%JMoo z-l!$q#i#fQ%1mi$jNa71(zZh{*KWs_r$TK~I6Q_&q%Qfsqn0r|4>o4rskYKTotMm4 zax5C^b{{N0w_3UNV!Dr`k`8Y6`ea&BsJr=eH()#7@IOX@Rk>|?E)c>oAoySBvN;Il zmho0CNoji0W;pi4#(URcl|kB*P5*1;4D_cm>qa_;wm58kwV2<4le7JGE5F9{0Wvd< zn>1;F*Eo%AnxVn;@594sB>I=o6B$XK= zG|+`dLtOl0EdEvpEdRrxUp%Xxk+Z5Qv~Etox}b4o{~cph_mLrPJW|ViiY)nCarlG! zN>O8%Z0Mi@Z&Q(rV(iSJAcNz(FasR$p4DJ5HYwCSlK|Xr;wD0AZU zmw2Nux68O9ClY(9iuHerz4l7Q{>BjfvN_sIll04oO?}{gEfP6q(a{dC*`hLB#t>;| zkm$X!(S;O{A?+a;%inS@%<}$aJ{$59)Uob4ANiUzEKE;m$)KMuw5M2CdrKi8)F6m;e|oadPH+ZpvtNQ{WZxyWT@s?Ti$ za9tapp6O6Ynr)y$l8}>2Md^e9tq67Eji<=AMbXIyc3>DzF38D+ryDbznHKXANJ{GD zP7JNJ$5oAC6jyW@{|zbPW$fobho&cO^9$4G%1gxITk2CLoxO9mm(~&#TC*hY^DxCWeOUtk%) zzjp8^zQz+;?P>-&1WVm~EgzMb4yc;}h!G^re|3$0rT;Nen%8m-{I7)(^N3V~o`Y)T zc3ZHzIYpK7pz_D#u!~>^&~!c=k!&8#oD7ztu?Q756pLKf6>h6Qq_S{TC0mH)s7wBn z);;Z1mMTvPB@L;&7~h;#*SZKLNv^ol)QNX-u`*K|q9?JV@-V}%5cEhQAU>~mD=pIc zSuH#ITyKtmp$UqEPspJckHoRt;z+!KAp}8mZ9BlbUW*gbQr3fVK;uc%#^0OtpkCBT zc$>nt>zK@=+iKJ5g*KSa6XDL{wVFkHAL`Y{{$*_cV?b0+pj|mRR5mj>2m~9zV7qDi zJ{T$BgDs}j;`3RK= zwPA2D;EKbTfSA5SElfc$b>npmtz-_|MgFy4Jx562gP=VV(uWr@?0x74jD*Pn-5J&c|?;?1-V+BWCB!UIqwFG znB`eflq>KOu)|X&^w`%Pn4#YeNPES2pq4d$3|R-jkQQ0*@H6oAwPNPNO1O7?44G#t z222(UNu2y8t{F0`HpQE~Dr~}(zkccj_4oJB`^dl!Uq8hNwR#?35N2oGf1FLl7Wejc z^48_>v^MID`vgduTUqs?R~Ug5B6xZXsTLRsn(!a^i{E18UX`#v7{>;M*u{ zJ??W+*{~Xj4So|C`b6mq%k-y7ulJU2E@3MCK3(xyaPZ!3U*Gu@M!IV#JJ(}z%Crh{ zXsym!Y<*_%hbPmXRr-06lb~F;a*5jzpGLyLe8KBY2%KtUC6gcdw zfA`Us3o7D!-Zi;NZ<($EDo@V%9A1Hyi0!KjsIr?9_eb^hLV8iNn)7)vIZdtD8YHF< zUQ_CQ^V!}XbC+=X{X)s0qiJ=cq_qY(X*kbd#f)g~y4Rl|7HzDR##wl$;Dft)0i>{# zqVo6&h#5xp4Bs@JCrP4`JYn54+e&L-uEq7`J3~UGTT*(i*o#tQi#7;`PAue}9~Q0o zX6H{z^yRi%!uwZb@CPlfeNybe zC+lSc>KN+GHkU9&xPWC`i6@J~q78DQ^^_!`ns~c~RXRejh(B1s8YN^EleDd)GxNU}a*d_6xzy@os$zVHrbejUHuF!m*MUSYR# z*W;FQ)INEdJzhnP`?pX)nQD;3N2tdwl{Ulxw6X2U$1$ z{T9E5_=3y>CD|=ZrLFU4^|wI0XRSY~%wkOMQ+fbD+-v3SzsUYly(e*!FpF(57cy&% zjbU&bgNmk27S5AHtYrzolHazw>BR}GWs50ipqPi|c{h%@zKSlOK>w4@6PO(4Mt}fP zjWxTAzjgw(3D*SFQQ88|YS*@Xj}4xtBjpZJEZn{hg+olP$CqsO!KyxQ(DS76ceLXo z#(;(%m!P2mYiTI1LJKC@kCX(QjX4n}d_MibvD6`V_rQ% z<4VcyUj*%4<4VoT=YcY3Z&@-*SXE!o=xbI3R@?%I{rAaGc5xjsMEb*IKMxqKM~M+x zNP(4Kc)^p$TL7evxT7lWFdkpu_R$ttcc=)OF@93RmE&il{%8LkD(DU=aVjzqy?mw# zT3TTLJXpA*+8tnq?frVq3o5BgK)-1@`26<7(nEv$rmQSBl2Q8#DI)k}L;X8(IsYU! zdCt6tLH6Upm55F0kjB>ETb`e-lE(XP>J?*aSA{w0{v39w7{3vxOF6su{Vka9aqUsf z^YJ&+@#uob8~>2&{fe6bH(ucBpQpc&;z)LLo!{JqkmFod%a1ef+XJd60T1F)sT5Cg zrW|$TwSI3ux{k^iGF3Oxc$UG!~U4I@dGVGK+B#kmGtVxKscS2T= zsctHDj=o_FfTjuVw~Zx9JnQyWRB- z8g7PvZ%Cl=a8#Menz(ur@Tmd@e;|RKm8CpqrCjT6BMuPu^@-^`SAMJ6MC{+a5hBL& zLp@fT5UfC6QSwoAKZ>C>XR((sV`56CG<*&KxlQtGA|T@wIOjp*e;x1som$;4%sbZI zJnDhHMz zMw?f;0_9sk+Sw#kn=~8OjSfxE@vo^cAyVeTfC%d|)Bc9c$uRodF6IRXbPKRE8Jx!> zF(vSX?#46W(VcponnwY1=OY_6H19|MnIE3hHsn~!_ zg4k!1=j#Ijpy8-%dns^O9iKn*O9UlJC$yO6F_AyvdX|j=aAdYx6GxFyd6%Z(pU?6v zeR73h(KSiW`b?av?w86M*oI8ixv6Ej!K3YzR*aoMgP};tdxW3q|kzvgwD$;1ABWSK0bUzLdu! z=tuFU6FRW>Y`Tima>yZ=*ZhVHp(nr}PAei_?83L5tYu=2gPx}3WW3alpo4`SMiTYQ z4H22<2ikN*ZF1|+S5-T(uDnIPrIk<0lSU%!wsIsXt}l@_UXpfE+;ZQAN(Tw6$#-r{ zKoaAk6D8GFY-WvoHz3w1YW%y)DhSp>c++B{@dl;WMbo4#V=&{bs1}G3HZ)}uO`UN( z1mCoiwvV=+*;d>o%v;P#6|3?z zKQQ=k|3OuTZ|;(aBW-laAux%fk~y@de(7i^2XDs z;f4i&$aajmFIR}6?SVrDWog*;Jmj`XJP00{AVMK*wOzBf{sTDN29c?++sV%WYg)@( zcR!QTfQa5OOz<6SzsqqaU=+G;K4%T=NB%s(s^S7-5DX*kGl44`lSSbx{m1jV+NC_s zgI$eiG%2klow>YTl6(WM19e52nb`>7)7#LOtK7Z=pA{!M+(mRi1%~-`<0+qC>OtZU z{QIYVwGduraXjG{tB}YR-gU^Y#0X(H2ZdkXv;r!ewK!`ct>khc1owBWuuuZ^NpAZ* zk`(K(M`&xfw+7R7kOzqE!$8x@g9ip^kJ~tdpeU+m@GqaT2OmDYvjQQInO}W1j3jXj zx|CqXbc7hc-Q1EKogtxR0`TA8H={TZY34VTFRiUBF%Aj(LrFo_hN8QP*M>7rsU`|( z`~gw^dKg4r?zRru05bx-e*2L-wN~KMWXZVbuNn6vZTglLV+jI_p^jr@_UJ(7@(y_8 z!aHli_$9q;#ys#gThM}LvGWq$%=L!iu@CY=B3=;dx$3!e!(f^&_W2d;uUO44Q_St> z>s*|?-vZ0~+Ly&Vwq0`}?4=k_3I14h1J)_c^sZ0zFS)fZxTNoa_YeswHjrIudWr1_ z0OxoCzF&epaL)}hAC*;SsX3~Jclm+tRL9mfZyJh}?pHMjr)!W7#HKi*24y25=icXJ zA!l!FZEdwNGvsqIcTgm84CO*VoJpc-X-@yiH6RjOop!;kVkgiE+Rd33Qk(oCpc_|0 z6LgTP4Zf3FYXfXr!kJ~3URw(Ru_%^Zcbq*x63{hMusvgfdskYtKP`-&P|;1c0%4G; zXuH~x*5e2#v*obPevI_mZ-Bl-(zI!5mPgu@^w7pt);s{`U!z`XQwSRCo2n_jU^-E8 zG?*Ay5)S5C*))Kh8#DvK*wAGaz!?b*m(1arsj$tqvTJ}ZsA~V8>Rb&3 zRGo0EypyG_E!-ZH`8$s*2%;*&TqZjY{fOWA`CW8RYof5S*qfYj7;kiJHMmmhUnc{Y z$_L94$VpP*fzI3xM)0^k8_W6dearhz6WcF?Rcwo`W;HiCU3snvI5_wkDU`|p*`8|L zWpJ8K?#W10sY#5rKCvuJh339NU}A!%I_zj7J{(NpLqQI2SUZ`g)iJJgr8m|;<44#{ zf)?vLnSgVR=+R$MVsiomgYKu-kr8XyImMj#kA-gvGT{Q74^Z1Gg2%Cpn7?yaB49;% zH#=>v4vjs9%g^~`FhRm`(KYb*!~%n7K>Cpf97;ha8CE_QpuO)2J6|%^3?jLR-zZTh z&FA$oKYPiX%+OX0Hb$SliK%H^VCI|?Hh8t&U0#yG1EvE8cK=GGG_ZW5x>Y! zb6rqt{U*eQdzE9?+y;-{`4REA?s6$l#AsM7!1B1*Rj)cBVwJ^|}w<4g)+6b6=(#xwu zT@}we^a~-)9cX~*MOv{=U9k*RUdCb4*LmHV>9$)X^}D2GC&qU-ZmnJ*ha?QW-8p_4_>L9!of|6OOiaoTq-h{O2L~ zhIekh3&ZX*=;@hI)}vdaZ?=u%(e1Z}HdTK0ols{td`#r??CcZz$mXXFRio4@6Qe(S zpa4;McY2d@xT1F*MzC{#9$-ZY>O1C`H%9F*+ zQ?kK8GrbuBgR-L18OCBv4)MvsHVD)=CL7ji> z+2tWx4l;LX^GSLho8VSBZmC6yh0B9fHS!HUwvy z=D%E`fj29_RPe-__MF?dRyBQQ`$)#8Q%E0~D z*p{k&N^GCWFX%G3)mC^+cKfNJh}?L>sb-F-4iq>1m(WkLdVYE1+-A_wW=p@xb86ai z%i@0oIbhO46@L8i4iC_0Yo7Y<10)y!y&k|J?k`)O(&GjCsVFw@5c^oudfb1c-IsYX zl<2cKb<7F=ZEZAZ?byfx+5P$rYjsB$qNvKi>KK0vbT?ro?N4VDZhXm*rM{1sDRJAj zsGG8csOq?_dmLxje6(RZTyoW{4zu~SmrF(Zk3S`d;M))K5Yg~HlnrLxuyDW$C$z?; zWkfI3Tu$Y(!g&8G8ZrM|Ze0_Esc3$Y)zOVR2N}sp4Pc2BbXV5CHRA#*15`!U{M6~G z$AUbuZBp2bd`!OSiolEhy3b=j-|g9h)ACO-Ggs#);Ni8C;OZN5JM4w39$}A3;OvhK zskO$vA#*TMTf~f0zRB|!H-bE+ZPr-L6UPZEkMyh?!9snIe6+FY*;oa!=iq`UiLE;O zPN*Rh0g2PMSz!0cum>;wOULi$3}p2oiod8%ZU`OlRdHUTK_})$Y0u@{JFqhyp0J2u z;+~hzGrrv1&DE~XnAcpkzJ2m>`HmRnet({MVB+{`-tT2g{lq9;_Vw>PBUg~`QMUeT z7(#oB@JbNAKOwAhV9c|@^Sj;C53*HZ{iyh9ZOfep>%a|MORz9pL0W|X4hW9>WP|Xr zQqRBTkp5Uq++c~;f(b>`2cYi}>;0p=+`eU5-eSo2^GAiw|HIbYHr|{ZB*1HGUgb`U zyI}(N25+jeAtt-cgSnzN@xU73T&QN|N~*X!b|M41L0*c1%;J)45x5?^naku|SoAWa z4=~adfGJlzTv(gEI62Ua^LH7(E55~+!N7(JuYQ~vLBS9=zfeR%Ne?g_>A~p~823r4 zeRmF0JuHI-g$KNTt=Ltyzr3_njKar6C9h|LE34dIpX;C~k2r6xNhztz?X$5%a{|$= z=Ue0X^WUj1q1z{WvC~f=A6``p3G@Jj7_zIZu^Z(^&P@VG!6p%~zu2TZs7y=y_{GZnRl^^ui`ipkyoEfl<6VMo z!j;Mpl-8AvmuuM@0S(<@8x4_*H@S?QE}dTkQ(2PxJz>7(I}>-SzLW;(%ioPU`zGH( z7jATr|D$WaR147GI{+x;4eX8uHL}^ldQ$3qd`33qfD1a9Z;;U7?)Tm|cmsFMYZIZknG!|TU3d8l#cnzIb#k879%9U)&f=I|`ko&W<=0Pa))IVuY z1$_+~*dUUysH^;)23_cp^F{K^3XBfoVZjP-KIzvsA|vjZJ}z|zd77Xk-DX(BaMQgi zjd$b8;pVfHw7e6$%Iz>f`P{&7A{TQq)j&fh~ z9B`iw*Q&n7>I@tOlTTpu%aqe-VJpVMQ2*+e&9SHtxbUn&^ED_4S9-h&c#th1^xSn= zVTB#&+;-#9d-dZYH$DdJ1FSGd<@V+Bj$LdTjo$}3gTl&ohnfel#2sIKlMvDjL!9fc zOvXsEsGCZ%Ao6B2e6Oi}><#k-1~jWVPg%}cuD+v+1D+)epq}5P554)X?y=n{xc z+B|P@o77SMdXTF4c&Qqu4>U6Fb?q>(IM4_K4v zz#0oz2n7MX{Re}fGZ#d1(EyeeSb%=rfn6_eV&c68`u=@vf&v(0@yUFnSS}g={DVIF zSS3_>K>t)w6885Ttla#cK4>uVQgK`~h=kSHQp$XP-{)AH?rtW;h&CRzAA@CYP z0Md~YiX^zwfrEWUVh~<8cL_EE+COi&@MBSEQHJ=MNtda>azOfz^UP%!uG{D-35_}K zC*ZV)@RlI@(nkSBIyRVk@4|mrK^iGO4E1MQOjlZ^Cv5B&9|thv-`SG6d;>ZN<&anR zecXDn{o0!=i=jub>)-83?fh=dAt$L>j7uVn#{Tu_x$?XVoxt>kZ3CsK-!Zym%+D12 z@14cw*tz~35DwltrJFy|6OarbK}{-bHjkS_IH;6-S1*Qg^KK+@NXQxx8oo@v2FE$D z*I*;*7lsg!36|hS-~|0o-W!^R*p(lbW#~Jsf=;!^aS_kqm>d?cRGt22 z)9hz1GmF1)wY#-h4oq#rQa!=h-vedin6qPsaQO+|K46xr5r?|W{U?f+i%*!86sM2~w>b9!OPsS@pqF7-QV}Ji~5tRt@O^3 z58hXDU36eh%6&us#{)^I55`s3=3<#~jn;9I)4P*Z5GoY_dq+8-eVj_-N^qSrl(jp3 z^hcJA7j|<>QZt`OzeO%(A0|Z0(AADbP*&{{@*p0Xfe{aF&~DjVHG$5_Mf96MvD}zG zU{>476>t)3`z>!a+}GEuEo80A%SNWTZ*@@oBUWmR5J){nVC_={2cDRlImDT<)7JC} zV8`y6z1HUMt-skFn&TLaF$Kq`i_X8YC9Tl|M_%oreddrS&XgggmxY*gvHgfoSFGkS zKn`$dCI^IjA9=4+3}UE-qOXGRtRp-sbAL1?70%ye+~r!n%;6q~-Y&>xl8~=#!o3sO zKcsohC5?UhOMMFm*+S?c7>b4YA=SUI)?*G|lbaXSVDhsYc!Kmb-$VA!WuEg*asT!6 z!VBAb22;H78(kc&SE9>l4wWPk!5-_CIA@?fFxKT;yxk5*npY_SZCWCv- zlZ91e)EAaT!*e9nqG4=xU(Xl@rz3mp5(o@wHj6e2%o%1x?+^M`cGLCdu?oCG4fPV6 z;hk9v_ly1!6Co>{tk{iU*#-LxHLnlZM3L#$d@gz$fWsWA&PM`4R zv0yN5)*A+=D=recU#Vdd);#4aL=|FzM;oskcpE4kw?rRy$5^RF>?KybwAZ&mz=efiO*b90WaaG=UoTGil|gd`n?f|7gwNB=@g(%8MhqicC?h z-a9sJXZ(FaABS!|Y*hX5pZIeH&tW~;@xb}YFqkh4r8P@rFHF>zC@s)f_UZ{%eR`XhBt z4CZ~xy-k^io|l%jt=04#PE!GD5k8L)4>^yc983BovxX+JW-M6>>huTFM5vcPxyF$! z8Kru?JW!iHIHf>SITbD3tEk&_ft&Mt7tjq52`dtQ)RV0LV;$np^nI6=Wd* zpqC;-3WrzIZoe^xPYVJ5o$Tt^tL$^;LgeHIEdFq4*E~xU6`3rY5RK?r4(O&ENF$xj z*Vmh@yd*lM5F)XI%Si1Iv(B+GwU=8JBN^9820Y@vg{oaJSsRd6C?!bGCOKUbtt|Cg zT*yhO-7G{eQn0w_ZR525aGt@NR-+Y!${ty6-xM7f6$$y@nX^8?w-~4^a1+rRy+lX=NXg;#Us3?5A{AG2(RnC(Y}EH-XDJ04nK z8Iol;O2R3A$pmLD`mHi-cB7)fzmu2PeKX>(U@CpWI~+TTPr~;pSRC}WNker^OS*l3cB5n3mo7?8qS@e5GkJtBYRrP&EjCGs^O*~@go#*rvL54 zX10v;)m|FBAOdG4Is+unx(w5B3p4$<@KsB?M@CK1BdZH_VLOh$_g&=^z8$fViq+gz zZFlVLLM9wP2V$hiKMob-G5ufV4?;#y$+~La&&PaJ9%!gh8Or)$fa)!oIgn0CtQj%S zgq(*>0bg&Z$!N^7?*qCXZ3zS<@Q9S7fO+(-_bhxs8KV>lBW z@h3;EdJ6MdBjpsYoHj`#3#@knYFFg?a4rvK-y8-)^PunvaOZCVa56VfOUttnV^Kz- zKs083wc%gDw)`xBJ$I~!FgR)bO7=Whoxie2Su5@bI?Rlf&S zA3$4dO}C~rmNOJgKNPLi;iT@}Ze!+>uWz=EC;Kb)sd|opAAHvJ(@)Af?MKd}%QK!3 zeaTnv-qFcKJElKt_Z}dx)Jd)Na~l*p`5VVI5;El4wuc^nn&>?Ec!JSt-Xf=wD<*7C zn*w|o`dmxtI>(-Tr5oDQmwv09p?uKwb1$h=$@l%lFtR^6Ga76Q=ETZ#O>!hJSh_jH}$XpH(v-l(Br~aeePXSxxf9F zYT>-EX>_CY7$I`})}zfjQTk$64;a5S`jwyh!}zo!rpZ5u4xIkPvkLgH#|aG8Xf(#E z_kD3X;HR8_Cx86FfW+RZdL}EO^LH{%{zHN@wAJgd3&vh?@z5vQ-w6^T_^lZ@mRRe9 z?W0U*F23{n*tBpP_{<;v;hO!N&NS*&2f*1tHEUFG+d*FmuAf6OpYi$LC)4)dhNqwQ z1{f%=oOaQtq;-xsxd;*{1CD0s`U2PgN2ZZp_v)&S_nS+~Tc6*l4}4L4yVrT*Z2WGh zGT4C{Usi=lYj(Ah#ssC#}g;l zistB&Kf~nWT8Rf2+35rf3Tyj4J%s#ysNIr*qz<9nf;(DB;&@QIHk;;t? zS?N=`w2=F@8WW5dtFASQ?H_%CupKnY1n{TrbSp>HeeeXa1M9bqeZIg(NS?Jiz9rJG zYw^Vjy`-v;;>3zap~;{X2ekK*2W3H&ErRijn*wro14qCBUrIRo{HL}9A1U~m3n4b+TYR@o9%+7xKzt`cgk?n#?uVud&O+=i&Hw!90G<7 z=14v&{(`1|m<9HZ@VXrbUH@ia;9Am3;p!1d2!l$bIjl}?c(Z9|jR}1>OKQK^X=(h$ z_56`&U+1&xgeV2`W#QuDL?JV$f$n9OO=#kh>T#q6kC;2ogC#ge6G~M`7O0&~4@K zo=v8|^}ltr1W7lMuCLI%uC3Ivo{g(ojDllNw-B-`HS)&Pmw-}g^nSZ~{}Rt%<`+ql~# z(0WmhwsrY@B1Jgq2@_BG!g#t%i4pnhR!BuxlmW9$eyfe+^2C#z+{&56){Q% zq!Bjsm$~(d!NBq_41UB}n! zZLB7H{n^5QgN2{ci`;p)f#Kx?PenS~;`Oy0g9;WASrvNcfvZ*`9%M-CxxR$LawPg5Pd3W^#i%pq4dTkS|Tcjem5~80)8h=Q6!1laU z?^lmm=Z6#8M_;8q+CW$#9`TbWg*n7A{MVr$j5zZy+SEEBSPCiEF!%dw#v&0s5857s zDn3O>MbbD#NN%-(_G~_WcZe2AJeWmtk#?XNtQ`gax)Xe?gWr)?#~)3i>3>c<$q?aD zPUHKzuQ*54%i{Ic<%OKdzp#fq^&3W}FfeItpM}lOgidS}`O3E~px`%vdpN0FgcbaS z>iynYgl$o`g`8Tr2;P}?Op9C|kEpxF`%n7e%oXO1A$+E{Z{>H}=tha+^LhzsJCNM( z-D)c0ytua#s>u-hF!Eup9r|1_=x!@HcMSsvqWX@&xAR!!6$FzB4qXbx`12~WDn?lT zF_cRk7edM^ICQ$D1Kq!6eh!<86fC>TD0JUZidaayD6#nhQ9*f+c%2h+I8PIu(4odP zVCa5c#2sny4mNFVb5~CT)V8i-7=m>?6?whafys(;?_{95(D;;Yxze2!!}?Ei)1LV3 zH7)f+h*-Mkd#hMM?d-U@ZV4Xae7(2_w?7ccxp!b3qzypN=qc5}D3UH4ii0FHT!DAW zgG!ZHNpJ@+%O!GT((BCA>6d48)GY_5U!N3>bt^2%z?pPz`rcypi&CT6WhKOtj zO6rua)#g1MFZ=tgesr7wkEUd8(OdQma#d-JHKo`6 z0`}|Ys1rma+uQFz;q}pNs^xsf_MMX4t5B0zN^IyuKW}iT6 z*9#(>g2rY$IX(5jHF1V~_itpd@3Y$a@l|ZPd3_?wJ$o66CHvD*hB>na*MPZi*UW(} z-9cSglNGRZleg5X(`e=k4D~y~P_h5~jz4f1mD%(=G(HQ`3ODpsfMNKR5p+ZNYcLg?KY}^ zAF$Kx7301JW-njwkYkXG=a*>&Q@)m!ZW?Ff%|BQ(PH@iG-6enKfi?l`&pw5tlHOW2 z8A`4;9980L@`!d!2FQ6~X2wwOUssnv@|i5@dL>h*L9Am>Wv+ps#t{Va1q5oZ9h*qQ zsMUn`>wFSvO8^x3?GtUG`_!0%%#9|xO#**3y{TZqDegtIdXUh2qg5Gf2L^t7UwrLY zI%hKsF4aCo+5OYkn>GI{1CVccjI!;72|mz4LnlA?LVch^E!AKs~IxTCj}M zfL6aT0k2jpTz`Gq^lWClaZ3HJ$DS$&Y{Zz6pv;qLWIZT{KF?~D`{j@FZ12JzQuuz{DrBAl(h8P3jrrOWCI;=A$Q5ubG!VlDc9kF$VtIrQ6zLkZ0L^3^K0ZOb zwO8PGByBS3ql5n@A*H+n^mt-l$b9^JAysS1^k@U(2hxJ~zSW{|NYzKc;C-nRq=`pDvO5^Go`DaAsjk1T?i)T@qFL%9f36U+i>%ivFN`UR4O=a6 z`JmjX_1JkfxFx=I4u1L;wxHBJamRDi&KZZN3;d@&yex#}7NNsl~hD zsMDL*(6VH*w&0Z5*Rdi%Jt?4t+Dg{^0=~|Kw4*P;7#Iu_2|g3hB{k*k&vRQ3O%!S^ zbIRJRFRnD%Gt_u*Ze0Xh4;5bZeDi>ju2T?)@90^9Rq0r+t1ecZ$IY+&tEpLH3QOAP zkQj*AoF5NoZs?4;XPCH`^sqi4RV*+q+?SVso z#p}l+l;UAC>;TIJHuiZefAp|%y<6wy_AEJgKR*VatM*Hpu$bLsw%ir^p?R>r6W2|T z*mWGX;{O2ZKo!590@5Ef!lLv52!wbUo7kmc&f59ATVyA0G`dj5cs%$z?2zNT?64bH zq#uj~zQaWWOdo$x&b{NhspML6Ta=G-&qa`Hu3w+YLdV=0kHXiCO@2lBJ@jeJ1#-^1 z5JOYltXMOKpqjxT6oz<$+0rIoPU*nLSnR04V)TUtCCl>{AbsT@;&J{#NQCPkCU%I! z`t!wAB$c(HE$v=ADYzD{U$m$*uSI&qdXSxbs1GimA=Mi?<)3*lA2zzsQZ~)e!AWV6 z7t`Qi?DSN0^ZrAfuAz*LnQEli zt|5KJ%NW7IfbxYU{tJ?&ycIK2<{l|m=qV!`2=D_B^Mg|JIDdiFQWH-A;pWhY-=lZy<-NEhX}z#Q=Jpe zkZ9=Gi%Ioo^0BtSF!n$G-~ZR`@BjYqZ~yFj-@APuzv791gCgpdEVW<=ScIlDWsCW$ z^{jG&g6pp)K%oX-AhiD2tz1}oD*1|XSE$Iz>pWup+()jT?uCTh30f0&kx?BT5Yr;QHg#Vz1{5@7Y{I4TST-+yeg-ZV3kFjmyWf=2^5 zt~%ep;`JZa19RTy!)b-jK$6Q0L9uH8X=c&PAb57=mDvD;I@8tlFLXl{OEHIThJTS!s56!^9ZZW&tJG_0rhTr}BPsTPmQt|$Yz`-ug*^vOS<7m}=XWf5z zDP{%EFgH$M!zS5%xr zB8G~rSfNdu26p^cugZnH*V#oQw-7>{;S58^q7y0&p|pmuE;zMp>6oAe3u~bA{ReI& z{DF{#pCJIM5^%n>rTP(2FBk|ScQ8~Qsj1~)?)#T{J;D@iB)zhQfjY)l%)&Y! zSwq9#F6PxpgW(1wczu32qZ|uc0=>?bW)5tmBCmy4fBl0XIy?@>=(|iE7*p*^dHv6M z{kznj0d-JTE!m2!WC~GWXJ>wog0drk&HE9v=uK&zgXQAz`HRYGbE*p^eaupKtiD|D z2p#Kh@db*%`pGA^|KnG`y8X$Y{IlDC_a}d{qb|JaSN z%NBdcn8tCr(ncCdKs%XeQ#f_pJL@FS`Yx)38md5!%K6bC18q^~dub*MF>Edru!F$tWTFY-wL=Kj|M(?NLnSLwDw@e&Htw z|7b%{em53hbjI~R&p#k+gmZ1ciZ*pBP%I`6nV`j);MZ@Jgfz+krdSvAX87pHkGAbUk3VTNMYJ#+nYC5noz^{ow|k9Nf$M>cbMCcjriB1J%#k)jyN zAT;c0+`pqodKh%FUV~rjKro*JQOD5;VlV_oNyyBO{N67e_~)KKcsq_4FHJ0L)M4Y2 z*QUez<6fV}M~BGX{PX@>t|N~?FFSs@{*GEv?g)*PgiiI}(FuzoC%{l*S7+ii_Fqxa zO@8S{E)jm!#w>Lg{406{lh{^C=qCRqdgF))8He{%KEJn?)=kT4TO^^o$4@(3pLI<# zLY@iart9!bLB41Abbk6SCof%N@C4&)Tz{A6eXJpE~mDK7vu}9~hg&ymxJsgzl;Qj6@$coe4TJkBjpcsH?u8K(E5ji0he9lqf~f zrH=CF2=v{wpfG<{^i+O17RzLie2dZ;?@cTz~WYcX+X#T?5R|*T4Dd z@_+}reRh8C|MC1;;cyvIM!lp($Mv&a-9mVY0$k+{rBtt zpd|aUutwbDZ^X9NZ6ykPvHs~Vmqoz|TfhIfdi`gY7(ENmMR&m;sI9LM7UDjj&15!9 zjoNqNO7)_9gx_oxap08!Hvh5x=Pu;CD1~_rfAN+8@8kT9_*=r}8i=Q#|Kbe6vEtnL zY&o4_9XSkbMO$>Eo&|V7=(|675lHu6zI~R@LAK(}#52!-L>Q+X0VjVm#R_8HT70HOMz=O(rC=DkZRT8sSYZyBV;DdG;`7^2@kg@% z4&OogKmE`D?a_GN@89#6Yj@Rh#apmf>T3$Lz zhR$hJ9Nq|s<|Q6QVy_5#6$P1iw6N-H&v6rBAduuoQToCLKiJge9;W#03BLTo3O}6V z0(1jXE{F(K9OmSQieDfRzrRb07-c~B{H|W+gHx<8Bq%Yi4>v>DM_h+Be^1p%_Gs zfLu7YhD|=&-6%<}kpap0F7|^z1wt<_Q|l)`WbUy(oaKZKpw59xCWV0&NJ`&JXUH2Up_n&Zmc5B(OvM^NU%GCw|0G5iwE}O(3-vh|!1#LT5rgSP9M} z@%&=J?|CgS0m!=kxiQ>N`60G~V*(#tht!LcKDqJO55nlvwZ>e0RL7Z!^)puP8x;CU z9pW0d zeHI09=z98r!kG~v!=!z{Y=g;8bY#26NaMvYaJ;#zxQH?8IS@+o9{+;NEG`Jw1B?MK_u1a6OZCNa=lC>pn%mpL1^>E8MS;eN!m>S}aA+R7z`v~kA9 zYz!Bo@G@_m&jHm_-e4rnb9(S6bkfujf$jSIIevIe-zgh47CpojshMjrHsdM$&C}9F z^f1!EwgqlH>d5;NAbP{m2Dz=>X>85hkJeWGy~!DHAdda_)~(!}O|Q1L);YAMU50Lp z`^9toUJx*B_CM>Y*PvZd#-`^a0uMoZAGP~1(S!a z|Hih&i1(XudA44aeD53aE|gCt|MTGF4SSz6(dNAB?vrcPv&RVa=dqa zIFAv%#1fJ}=B&mg{|e8*mr9e4ao|cUK}#T*ufo)0c{M zg|u!_SNtUqtI&bsmY}z$U=-%Cfi?V>6D| zKM^7OHAW~O(NzvBu4kwtYQ+>^<^4yeH}|8XSFv2=i55he9BfS5~lmp>hd!-?d+r>2RDjY8)Uxb9`WaIWhCHs7lh^5_Dfi}SES zv7L!#j-0d~SNw1l!u%JQpvxVF{q761kwb*(V3K-TE10T#m;lqt{lYFle7IJ`ab3}e zALco2t#L%8Y@^38**YSI;eaJxA1sS7KHd{&}1gu&U=Io}Si z5ggfs332Utwpm92t}z*D8FB(oNqP~=upOZhch*n18jv6_e1|XqV_Gr9eWK7|sc8xJ zFvG}fgdNs#iSK}A9t_{ye)-wo+tC%uL6L#xOX<$4Rx)Bb;;7zq7)Xk7 z;wZ$PQRnNQvKn%T{fuHh5DxG={26L5T%hw69VYi)RL!U%JK#P}0y*(*8xjF%!i@u`d3OOaE@v{^a$v)1j7P(l5ynF2{Z8}6DG{k-RTi@X zQp48P6BCHN7moGLz0`j6G~qr~!A)iCPMSGr&pf~0|068j@K0!;InCW+%+?v$$Cho( zXX(6OO~jr<@pDC(XwJ@G?Mvstagv#|A`u>7;Qt7B@BPPg4Y@UvcspW&N0A#*8HjM| zBZJVy>l$iCkN_-wq6?=vJ2tuoz=PDgkCfHz))}loCFHAB%p*^EM~J|rd(^X4$EwOF zgqqT&&gS8zb#am2EBP(X^v^P+`)*Ea;BYUweE)GLhltfZ(;c@XPSxa2e&&MldL_^% z@XT=rq8t&LIp*4r3+GZnxaQCJ`K9v8*XlWrTI~Fq7g;+^)jdq0X=RxndH=E0siV)> ztz|q{EM+S^iZz&teCqw`kKz3Xm&oQbcPyF40)=(ONXN$k*~<|$(^V{lM!O<Zdf1U?6_VF&{^d2+XzYzXUTX_ zaJ(pn!#nBWT4D$_wacAopX-lg|K#?&-~I0P{`>FWKK$?_dC=c&$yb5RKOPq)uFO4i zMDGNmm(jvHI;6GEhmLX6wgscF=!=UdP{wu?NmAS9F)*I(WARTegL+X5kuzSDHL zuO}_xAz*dpr*Azh7?f??@IrxJgePmqzBp-XDizB4@@EF>JGp6f_@#zc(hhml)&T=y zT;`F}J1%gIul|#?cEKSk5i@vnP|SPbGWs<2BA^CbTTyWxhoe2hsJ?I|Ea8}Qeey9N z5!H%y^eiy5pZwJ4iwmsFmO@t$3T)frr88*tu34MY-Iigfb8DRfk8bLREYb5A*I9;G zlsi8#wzn`O#wfrRkS5r4HXB2UEvnV}&_2W;`)m)7#ovkurQS-9W{I(PEbYiw-_=!l z16QsT*!u1irUHa{^uPG*7q|cL>8H0};ny6#_uilBZ&1)|50Bx(8B2yMkM|nydx1&_ zc=fJXDVjh`*6R?f@b@D5`sYuG)IFuZC1k*un|-&&BCTDoQ7_Gz_2_6TUiPc|uMGAX z%XYw8hw})7W~-_qu|ToIRH0^0rD8>l?hR;7hfZnpEqe*}&F_$$Cxpdfdtm*_Gf>lW zJ*lJUT`@QbhL6vgYB7e3{u)75UI-WS!X+YTowatA-_%vNzjsA*UyU|?-D+G9X$O@^2XV*YVW zm)J4v-;R_J%@B`=A`A{$FbBLie;oqoKD6=(|07C7liD_qZKAZSU#48Y_msMFEkQ_k zu2{?yV`#;(VnI2C#y~h4X7|qD8VbJlS;ED{{2mk1Aff93Q=1zRPAFAkAaWP8V*D37YGyUhz71>t>0?HD`OhiUgrJBSdF3< ztp$$@|9)-@q!=COLSG2mVq9klT9nSzN?=mFuxtTLI(;EO<^6|Uurd4d-~8U^f`TsY zpQ*9IX=t?4V;HwPNqd;T4hT%;N`bBKPGKs*z!KN^eji4C!-i*SVZ+7ol=mO@^}ZfVuH`Q|EBlFT_%%5?Ft;~= z3C{w(%KHzm!J{L_DZSQuKRSm$jQ1bh2R?<>D)ka*j%BR?%)(%^chh?N*)Df>rLf0{ zQ#LvyAN z?75)ita$Pfa8|#s(XiK<cW-H?WbG?BOx=U4PA&LRZjO zKLNG0HHsIwttKe8e$8;Lf5dTY*sv(>NBy=1n@s4~n?9zcG)g12Y_XIr<8nz<-hY1a zi(lS8{q*OzU;p~Q+}``MAKX6vw?9UJD#qC03J@fFp3oW*C#R%W*-p0f9P8vxj;1ZQ*@{P5lxF%8g$@bp;Gn9jZ;= z4q2J4-q8B_FVDpt6V2!I-+=ZL#yoW+w_+XX&7k$79rBzpR&Q%hT>pp`QCKlbs@oIR zAHK09%Hh3c z%o!<@bQ*Ir7?)yQmWmKw>}7PGuql?~cnqM?W|J>H*t8!RM~dX9`F#ELNc|%~FLvZH zM42UIb)mkH*+jUR;v)Wemx8g)>GyrH|sqA3vlFTy+;L#Sees$<>dMe z_b4|^lAr6Rd*5*v;7E1To4&@bcOZMNwG&sR17h%NA7rK{TLHx%(-AK~=+~Skf->XO zV{0kS$?uR6gR{(`C_<>I9Yzi-;_CGWoCHF*t-rzn5a&Oca@g0}cuD~(sEqymk)9nJ zm(`m-&X=q-3RHdOX}wV#1JfF39-7(#P3t<76FR^Chv@sC{i^m*gdd&Q#<>W^l7m8* zX3d7`UP>Stu3mp=(aq>e`o1aL5;`ze_ik?v>HiC`l^mi+x6#Fc`keXN-jlh^3J{GpD=LEiOz@)Rx zs55v&rH%Ddl-Cjm%8j46fS`Z!`svkv%XvpM&lR`$>48Ic=k*7p*$+aux{IaK15wL;bDle z@BhNu=*eCF1*l*Fg+6`OT6QMWlhtRd3QQwym;B+QLdkpJ;G+%~H&=E1il=*Y8}W>ytDOo)gujbG%?Rfd^hdHk`S-Ukzxv1c6;JpAMYrJqWoxts)-IsZt0pqO0}xZ9ARTN_P^r6A|p5ggdv2ydIn7bR-%Q! zuI-97`KcrgR}^ws;gv6p z5q%}hg$Wf<2T;_=eb%Y-nJYhBE4can)XghFwH{Oymmr>;^=eCG$Ca_g)~!|<&~=oH z^$(XbEqfZ&X@(|xS3A}~9b4~mUv=c?UfBk$Nh?TmFp4v^=50yv zU=S9a_oI9R>asXijI_m_*Y-dgg_%Om^WVE4E6nwaFQX)P_*R8nDKW+h=T#WY=J!T2 z6nWw`^xXeIRu1>xe;~lTEsxZY$K(8DDb~C1KM-}C7iY$?9t%ml@^hxuZM$HU%-VI< zqt8v~mtHRRi@5aBPL4BSajx86RZDIheBJsq^qn9e12@F*qhhQriCvF?&pyz>d+++6 zr&i&`AfJWb6XI#jGs=2Ek&jZ0VcpprIdb6|@_JmPv?>HT7q*?1TX+)0*836g;BRZ7 z##6d|at+SJ$cTWM7Ev>skBrS2uem*owX}Cj4Ai^cl_&z);ZGN~J~o=J3|-5)lfn$j zou%h3DfsN{zXh|}(3mq$r}~Hb{=+%;s$24UYBU&%5+U?!=Rk(dad>EWCga}HXIPF^ zpt}D78UB4eUGbL=LR@d*MZKjM{<^@Tvka(aY;UmO!*F3&EO=m8T_o$K;sJ_e^FeIo40B;t7`CzOkjN2gj}v z0`Gfg7W6PG&V^ZC^8JSyBHg+FGh@pfS)c3oAC4vTBkOlW`^3netBxmxF?a93;PCse z#%9$VxD3tEd$m?LKY6ICm+Pik@4u+X)mdBkJ@G!Dk!N<2y93z%;nz|6PEZg-F|KKa zp?w`IPL-{kt=A|mY+bcpKl!;=v_%9#Hk^gMJF{B(sqy-WP5IIHAI>CJ;910vD2&Ede3@9y+Y&&njv{j4^~cev zaYUn*EvFSBVumF>g^O<0m|tI@@O5sb90oq@5rY_vmqWzen_N+F(#VNfq%qHRO-S$`BfP>w;57hLDqV-KOj`G z;U*qMU4kqRT&+=9d7vW)eK#MfWP?6}23C_b2iJ}D=l}j-%Ec5qL6TJ_Nq6~SKq#!I z)=!Pw@MV^mM*6-Kp~#D+D-t?g6Lx2^Y?`m@F9YISrVvhY_KU@N{`?167(Cu82dR3j z->)TN^PsK2))&l^pCu3PX``Gm=`M`dUnYbPqi?D52se9=XB&dx|84+YFTNp=y8ix9 zfJ*$!Sn*ZIP#jy&GfR`e5S9xN^9%lnM4xEQ-@85eh0kw&p-fxTV8Z{Qh&*7g8ptr_ z!IvQl*8E=o>W3My(W@2=0J7LEVxj(Y||(xYZ|E`aF|`AIvRI33MP9WR9z_(S{z#=U|M5Xd}-j?Fo2N(^9IIFeCEl zL(ElTqB%D}z|w2#^FM82DDV~kj1{h?Mi)4v*~u7$s=@cH~A&(K@| z^B5fD1c`YF99OvfXd_Gm?pOR}f@;t?-4MGMzG$E~Rz#jK3T*)5|9Zm<-JXN?mzc7& zUq(TQh^u}vbHZFj5+j||79f)t_Q6Iq@L%KFtv{tcunNENYfbP`UIJ(VPy{;oc{b1c z|6U7*j`TxdEgJUp< zde{WSyq^CH)%E(Fh;wkMm;1lgAIPwWLev-hETH2!zH?iziMCq$gMR;kwZo2Z2e5CSDje{>Y7S0=D)B^ZJR`2+a>qyeNBz7X^aGT`};?l})!o|M!+9l)Cx-{DXD)1vYe2PiL}o?j0hR>{2dqL^e9X2#3J*~&o59X*W2SM04=eJ=6Kn#++297^7s$(~2wpT#(WX zy3tFFxtJt^Bvp>59mV3k4%(egV8g-qx&Dfkg*xxQNydv&L7QJoM9?^!G}ozCj(ZMQ z&mZ$wxl$okQ%eVg7QpidnYGRq@6F@&=Vn^+c)j-@UOz}{VU>Y&tzIfzIWTV>kte+W z&{eQ31{2RkrbFn3JO7I z{l|K=$0IELW16|OqAXMeXmgYyy0YZ?>t&}!!@2$r;Wu>2MZ-Mr>F;W_UrUB)oa?W{ zm^D`H_6zA^{j~^0R1Fn*cUcz@aogRJi7^lHV*LYSx4C}vA37SAi@d!{I%4-KSPS~|>p%WHRethm z+~nt4(+m-k_oE0mLExzig?oLd2lEjIxz!8Oic!MtvtN9MU-9%a{E=*af#SW}$3Om< zsWB4%js4iB)6NwV>-DeDTtAg;du)M6jfklLB@7(_Y{Le5-hXUa&L&-Ie~fULm=7({ zYdefn(fqD|S@-%g>by_{EVMY6nJ0P1Iw>2dX@1t%7O&qQ_?CgflBhkdqsxz`aU7b3 zhLxWhwup-RFSzu9GwSaTGAfurCx1j3{jqn`=bAsfhs`<7`YY~?H8`heh{rxmiaO8V zbYm6jW2Ric@14RaUIkrU=WIgChX8KMSmUxsw5KgZ!OdS`T#v~?{A{rK$Vgr1kxpcJ z+WkLbASVdC<;I)QK|DkSc6aDgp@D0}(v}3Dyv?t8C1jYMTdEO)j%F#4 zhH|iiX;wZ zTwNa$f<>S}hGOl@lFy$y8(x3byujZ&{^z?83qUDObcTy66BtC<18G<;CdWJz!-;HnM;@SL7 zJAe||u|1H(?!q=-% z{|a1Pf3-iuS7OYERt(Xl_ApLG<`0#(v8*WY^8SxOAGrUN-^f5>U%Y9V$mZwKvBi7q z*Z-R#;s|VyCS5d~uq~J8M6CRTmUUmhVw5$3V@~I*cI@5sIr%%put6!WS%1ZyvD%YT zSxT^dDh9_=qlob6`wz0{{^Qse<4Tujh&U2H%wOjpF%)L5Uy2N#!v(t>eYRz<^GGL} zas6NVXKn$?>jS#HCp`E4$M^z8uuaaAOHgiE9?B?|Yyn`;)_c~Dl)k_hDEJc;zx(n_ z{0)k~z#qx}k?_&QiENMS&o=@)P%0d?@c|Sq{xPb2HzpT*c$9MK!ZPh8mtcoh#J+Xz zMnpE437Q{{d1}xyM$EGZs4iAJG(Y3VcQeGHAtH`2$9A-bM@Q;0ENx#88gYGK8+Gn~ zT^{tne|)i)9B?4rvjH9mP5(2qoAX@;~vrYbG z|6w~Y;hL7dP8er1QI&0+Yk=g18~zcyBS3N{kzd3v2E@ImDU-=l!6{Y>H~*vnzq^lX zpX>L`OdjdY)~uX8b^VTlQ5WSNg|XO-YiZs8Kt-RHaeb6`3C0hF#F(?^KYoW}+$%y) zFH8R)dv5}E?NwBXz8OIr@S_jD0G6D1c&bS(RKnZ#1^OP^E8u< zT+pj+D_kL{c%4ycLB>ZE+X0Y7Q4vDH7x?!806+jqL_t(5QxXsvk|2XX=DSu^t*YAl zeb4#6@BjZK$jyfAUAt=4s@k>p`<`>Y^Ph7#vQ>?IE`;IYdXrB&3)&U#U>5GMnoG%$ z9@1zOUh#WX^?AfaJ>se_i^;R6( zMb`i`-+z74?rfm)rBeP{6Lq#a!2Dk$Q!_W`=v8nHa3;jea-nZTX*P;%6+e8SlcYTA zvD91=AsjuT`sJKZunhmw*}d4aZO5!5yqzdJD%4c1J_1; zMe$3ca`y~q4F>bA$CVY89HMsJo^lpK;I!Bw8TLQc?+7`nUhPbYDo6Jy=JLs&!`=fuDMKzcptz7AiQ0Vt~F{)eMZh^@#E zji535n%c^$7A*$BSr?xmxALWmsk%n{uX&BcjjcGeRM!BNf;=h%UrPBBA^&o0_@GEk zaud!Ze_r3lY_TQbS z-h=c|DW@P8DKPfO|Cr+6Y)2E$GiJqHQ+gre85&Aaj~|IjLFlHKYDYxu45Y$I;>cDQ zp9^a^KUC!{XFkB65E{(;1)z6sY-7b6KIv*g&OiWp`UBG#r(x&2S(p+5!45X~^3vbH)Y{D9 zfcS0UnoZSG1v3DEKqnnHNB8&R54NM;vi{l?s1$S^@v0Gnf}qEjB8W8*ag z=XHZ!VFaJ!ZSIFE?_e!Y4<1IP`f-wpS;okM$wJ>T=0vRYA#h+bEGBjs1!|Z^*_P|R zL;R>hKh%y2tYU>yp@gxyisDAmbH@Cc>X;^#zF7AlpmcQ0mVnUA^P$MZ)WQYTe%PFy zB{j!)SF`nv5DDGbavs9K7jB*JVa)Tot}7z!h*E8 zyYvH2pcr#F!v4z#Baenn*|Dwmn@Ty*9`c||1O(Xg0t{c{tjC`L5^!V9+R+b9p?nr| zDPtUm2?IwRI*{$UMn%6JFRj6?B*(zYsyJlF*seb-MS{mAuJ7H5z1j~ttHG!+P|e(i z5-BEG_Y|Bb(-F03wN!^iETPB1(kdUhcu>w_|1(B<7}RE?D%VzAq!B2_Sc|mV@mZO1 z|A)FNK$5sZr86}WOuno#Ej>8boyf)**l67Q`oVO98HRxMP`NTyugi6`uRND}#Cg}UZbM`cKIw`g zm<7<%!N125T>?U}IhQg6_hZv}5a`?8UPsFi8X#YG<}V;qNwFAVTpKjL$_ z)Uq=fT~blc*4%c_1&Yyzw*UP?u`QQ*C^mthZrk&NRg-Mj8;iTjB#k|mmx;*WCo$iI1=Q-u$0+W@nr~O_n|806s@Vi zn53!ch$+2!SWMGtsiAhD&|!?Jv_%$97K?K1KY?u1#b^HF!kGJESo4TP1EfH)7HOpp z5BLNb47vZguJm&FG!>Vr*|BBb0S;qwhuXpD5x+h`k;$al1o>#Ba+ay{v(PQnR^z(f z@H%{g;z#inPnTb@oO?FD;^~q61Vx;pA%5|w9;GQxNo6ZZHXpsfOq`ont>K6GYeYkq z5fb%mq;1=1y55-$H;MiLJXCTczH|HbWt@#fRHJKEO1)aulBsL2!ewe*1z_O8f8Zjay$RB%p<*eqn$i@8fTK68qicLeNznaUC*A) z?=3#MX6*a*zYArI==%DHdJXZ5U)-Ki>ANJG^U>m`NHIC>a{0ClcElIA@+jYugkcoz zgsigdimzPf{U813mC5R(V#F$at!v^=e`L`>(%wWnxxT z&MTAIN(;@lGrm2E_uObqL%N$Sk*OXc39}X7ZLu$y*!fF`6kvdSg!q+7kJ|VefLfx~ zUrE#O8}Z%GYu1p#?3#}0nFJ^YLsw^dtBNkj7T=lORp-9zk~D;}m9&Y0*~1A}`(nAb z`WIr?`YS@Ppuz{7QIVP}Cv0)%+WKAZeP4e=cSxK?=i8Nmg&yLQqA3txe~eK|LMYh! z>TpZ3lq4UGW?7k4F&NCITEJ@(RR{6)v1m)8z0XYU3L|iZ+YsTH9)EkB9-wh6mu_zH zU4*fnKk5i)M|F3dWx~8%-9oK5YT5QZ{~GT<#iP2H#<}{utT>QG-xJ90rwfN;#84}61u3`S257yloSfm!t$oj7Ci0}2Ecqg9B zt@C++?8o>{FtF~3FK(_)&yL+Ysaw?1(q|@jg?TmBJ@I+A2iFR1hT2m{b*y9?HA#O} zm-`Cibo~(~7meu}0`&yB>n>FAtjtni8EPb=H|JK|IMEwm+v%0QDK_@sxxJzd*~6P$r-;+Y ze{!(tN~gTxwh^Cf&RsR8Zf-?eQer}7!^+(50GsjMF0Sp9)pKin;iQmreAl;rGk$Rs z$+ne)v(Vc3LYCIfJ|_b~dG4ye5bKqZ%$qQZFMS^kQ^3K~ZRiiY|2V-+Q}dD(TOM}O zFtK}O!cxHUfr$8}trc~hxHXooB-uDp%YJzMVbF#@#QqOk7yIkn+SbNrF{_)yxuxM- z{H8tB^`~pJs#S3%DZW2eDMFkoPVIo!3JW0yt+D2k3ft}{GR9~a0ZqpVTimbm%^26J zmy1T}`{*|3+m(S~RwPZ)>OXU9Ue%Gt$6~2F@r>OS-}8ml0O3`HamwmG%!tiw3Dmr# z8E2N!JTmQAf4xCrp`x3KVm_?hzWp7;jE*$+D#pP+bT8M|bG6b)lN z1M4Wr!jyWoMF~m4-1@)fUmg7hHRgGqk=F)Za^xGHW+W^G3>Cj}Pbu-C$;}%LTMrr| zA{j?vXZ-M+wA_ z>!-uPJd@1xTzB1?mD37phDnR*(e@uC^BpI7DU@-FWT;h#^W>Kl{L&$XrlY4N+e0+E z-Ro~`4exyX*K7Qz@VWm94k72^I?t1FZRnaVr;DY4@32G)`Gcr~5tG?5=P86opS!$u zP0sXQ(tiFL4@I{Amw;v-lN$`?w$_+>>;S6sr;MBDysgc&B5G%kHiybVqkH8HLdAEl zUX9)Lcb@9!L>1pT{m^DqbFBE=CMv{X3R$-XGfCQSKA0c+0R8+H=75 zulSqzS=r}ZPR<^GI7wwNDV#AV{si#p6vMwivnf-|jY*m`rDj9u_upnt-xR{=2Lh9o z0lGSEM30DZM!Pkr>+yHQ*lbkIq+-F?fi8DV2AguE+u`d!g@JSJa71fCo7nQ4#GUa+ zzT`?lJj!8f|0_G8Yb^ft`dg4$Z1duxv^L~X*|51UeA@N5$v4x^^K_ztjc3q7)+as% zLuCi`di`k?RVjjI&30@Wrh0IEaT_=i=P#IMH=9eTkyynEKhxw^m2#FqQTM;uxIvO3QwXPd zK39k^<=C*eZR~&j0c`(cU2~1VM~J_X1Nctd`6GWl2CeUik!l}?_%gIbHqk0-uNQ>t z<2u^l+My|9KaB4vuRp-r_oH$3wNu^WcR5`QbgZvG2A0ieI&DI3<^W~wzmRR5orZaz zblLq~v(`8E*it=CD&yueZ)-F4C}Xup%MdRnNdsYA|CB=l>1g12n`g$D_l!iM(IGZ7 zO07R#OBh``X<zX;(`na6hx1^*n#ar!1+H+&l-AxHd z{UE@+RsZx#A-p^ROq#(&c5JndhH%qX4^Tb+jvh7}RS|Ypl6b8e z_SrihPFc=I7(U^<PtG3q!%4+u>{*lQn)I`!5P}jQEDSn37m%_sYv(+ODfoh9D~w z#1j=QJaUnam|`aC^s*)1pf%_jYz?cu3Zh@Ayba}xpPD8gRjTQp3WaOs(XxgcV z^DKaZ#qGf4Uoj3og6Jgg(J~97=X}&Rl1?OQd$+D|q)@wsJy;DlBd0aDRoXI9Cni=$ zIlg4ku+IWC8|U^5GaHHYULUj>tQkLPk$TVdW<`n(X>-fD;Rm(M%4Y+zB0dRX{S=b3 zEXq38f@qH}PM6ZV`pg3d#QtU^o4>%4yIg4E_Q&8gD=%7tmGOPkB?T~Z31L0SDqm6} z5*)ZTo}HDoTB1Vs&H`?J94+p9@|8q#ZzpTB@olWZG_5$7SFMZRoH{VGq&EwFRP8g$ znU~puVRasc3E>HP)t5yn$$U_yHom_7uY&pDd=ifs}so-vLn;uon^I5&M z+7I^!BShp5{7%s`H%kDV3ZXXI7MwaaZ@o z|F-7xd>sgt88%~H>%YhI!EEDXe;;oymq&d@jsP!?ej$GzfI%uCAt7dl@dHD>uJFqN zWRy~B)ekFeQNly^4;7|LNYuC9-(~g!EP4x5h9Hg3&nA9apB^_*8hHV|GV>PS#zY+- z<~Q^Y)eg9CdXi>$04aU)uHikZy->q^)5iXE8xMrXfCt}5c0tBP=W1UGgSKGOUd{OJ$K-AYa+DysAMEjsx)U`CVrv)|K zihiz$B=Ru9_C|WPm}LGyk8~2AE8&We7%7n{iL8Ifq1*ne?j8*H zFB00WW4`fCg)PRmBWsdqyuD-APHN~lN{aU)WltbmGQV(GWn-T@ih)nx3*;2$p1zn-c(ILFvvAZG>sZyS z#lS#cj*LVfCM+R%R9LPfgso*uClP1+jsC2oz~@Hz&9)Z8I$`o>8QAZV^)*QVcxR_f zLwX=kP=WDBsd9lH5k3sM7x6neuxl{nIXW3yb?zfa!yQ>JDQ8_k)m`l6eNpPdzHfm_ zFr{{q+NB;-K|7hex;?u^`R|#6H3Pp_^gpk4utj|}#r(Wy+KL!5S~Pw-CIJlAE!gC> z`NC>0bXpA_v}~~uSy>aCA)KXPxYfv^T5BE4XbKXYeR{eS7ii||G%PT1d)?~RODt;B zpPshD8qhz~TV|+F_=hgrFi>gXu~BZI+U}dJ9Y&e&EPep95S;O&Xd{9(nK0v(LM!p&AFEj8f!ExH7@dLMiPi6k~h$fE3QP{)9BfS1?~r` zqK_wU-1R(>OU4l|I04?P@*IlW)+!O5B<5l7MW?;;9&w5;* z;eSbwr2<`vGsylF6cRyYl>_!#MRN{Z?NT$(q-t7!Kf2{7CBGyJ1#DH>;k9xPH!2-V z6%3@u>PoMB?6FnSDyutG8XCo-&67C10;~&a@)gJ}?FOI7Q5*EObUjR!={A~g-hvH- zrIx>w5H6?NZ}(U?prqJCfgx|eP@)lII*dr8PZp}CwIGDEy#d>ov@`OHF4j-#1O>-b znP1gobiOWXvpavrM9Gk1MbxOIOMoR8hPpKvh^~Q+LEB^QP2<zHF*`Hss% z7@TMI3a==NFVyNRcGQ6s;{+7+iW=wYTLQMVQecCP2mOoe}qojdQ{Yb?ER7nV8F+- zt;xdEANP#@I+}5agm0^DeeU&6_#u>Ziu*oPlsH*qzsGRsl;`bTzdR7SVJ%@qv*#|Q zhX-0)@`VRasV$%I{^{`VJHL2E@J`65_hS{YmnRZB-*eOVf=F@x;Y_hRCO2=8m9O*- zs(HY3%aa%hO?$7^QbL^Rhqw@HlY#Z3nFcP83bmZi{-l&`CxZoK5@*Qpb>L!l3F?J6 z1feWM@50`uhY&zRhuak6l$-m;$;*8DH%6#E-k)<-epgTd!2=@5xz+W5|JDJPN zTah$QwNuG$lkcbT5^7L-c3`?M4}^ z*d_xb_Dz3&QPBd1rjp{bm29#r%6)$E^A0BHe_h{huD|&k6<~O$cZ9?9`;g4UaMBaVp}}Q7F%D1l0TxH zyI+bWp4BAKQorKz!)2Rt-NJgkNf2qt_1e&TsGELB>P(_drYpstES*+YLRrvTJf&Vd%kQQ8>@ykFy7IO<-}3YchT#jJy`)GrUq@?D z9j%$y%!%mOzk)hZe%i=mDA$tHqe&FtQ%>b{U}ybvT^DS^w)Q|>xNEqyxE zFu};=1svK%lZ}~;;4H!I^88Aw&ok!Z146pIXPFC~2K%8Z2~RVc3r>YB?p1f_>VjPO zTX)@-kI}#ff%ucDFG;s=PsLpqNrS{3<7MYUWAWXR_-})3x)+H#&JK<;KJ?m9G$qYZ zlsNR@C0pA&jX7*fu;?GX)y5X0wB7159z?_|e6^J||1l%X^sy8cIMuD;`B@|q!{kv^ zEo$217dhv*l2*}km)8?&EOmK9oj1rKR@}862)qnn1raaL z0(kR0_+_^BHmfg*dF}Q2T`MbG42Y|)jBOit-M-$G0Q<>EC(Z~LbY{z;AEv(i;mal!D(Fu!!5dN`7;3y-v!^Td9qUt^tVwg> zx9TZP`_R9zuzJfsbu>>mrX#Ou_dze<#ucj=46s$?~NXLpQHp7RY_KjPuFTvP|* zv3%wYiT<8J3nOLdQh*i@Ysm6alcsi$!v)|wCTp)B-b6lGHZFLZSO1{FUlRU8KDXpa z)|}$Z+o-DkSbT`;(&Si@m(#!09?^y>j`+7R!LI3-H6aZ_EsB5kg=$Ts_?@C*ZlJ7< z96p=iKi^i0+$aI;Sq%JxFE4&8>ijX`HuJ1%&9v9LX`TEf zP*pGNKmaK%|0#V~Mhq>$ZxTI4a>2bO_qyG|>yZ>q?ALsMkmd3}v^WKM4QC2MNf|-} zhat}FCH8B}K^dN&LI>jvXq^mA1F9o7ZBzD-nUNm_z|Yc89k1zanSR9dizt#6a7qsAXly@@G>S0a}nmc8s75m)#eC~S>H=#rnSGi40eZBLt$Eg z@Y0#O^qFr9Z6>0sAJ*r^%M8K!g>Ov@SN=ZctbL z5^82@dH<;bMIq!8zmV3|!fZmL;CWEecu&nHp7wX1J8|EE>omcQE1RaD*F|XamG{Jb z&=&)$vR7bNAsfD;$z=UkhtFYKIbhjM2X61lhiqNn%SF@BEOtCK5T56DgA_#1b_E*o zL8t}zH+C|&JV5c9WS&U4$_??zgd4LuZ(Ocsh(CNovBUpb^ghlGtnKS2 zX^}e85eeuLIMGx%vhj#f!=75n5H$|bl%M(j*aG(W-6oKzR4e~4GzA-%W=SH>-%sOl z+pY38yK5YGR=4HUwDxCkQOO#L)HfVTM25UCv^hEGIQ#T6w%hUNmENwKVs$Ht6ars`=1H_xw>z?U)qgZv zr!n8(P=GhYIsFmSAwFta(QBxp-u7hMHr!dq=YJ7u20 zDh1Qqx`*i*P|0p(_FNWv$<;bH-*yLA*ZMNgFnUm=`gF`x z!BxBI2Pn<0dhMU5mvs?(zBNlCtPgK1<~BL~ckBbkv`pq$eaF=gdDYpP4Vff|3GU6&NX3%az7P7KNY&R!Ctfhbb_#m5&dqyH zL_JLnYsuMawa*^^O<8lpw4~zO?JSDU(Qob0dC#W~nJ#%6YK#48#0TZ?c&E-r8|TEA z6#@)#C{{^Fpfyc$4aJxNSaH9w!%00`zk#X+cuC{_x?6*8k|*5_G7iqZN{oSg>O%h$FOg%e_~XU!fJ@@uhz8~LE`(cuU{Ehy(O+GdGzDjLY$~wJajSrj-`g~b9m5B)@>4TT zaLK~-AtxrGN;0n0rab})oo(LvkgaY0X|go-`dK<+2;E+Hp?I}&gA4HX$jV~O6XAro zA(CzB!}u^o>|Fkh2i)VfHv`-e{)ph1cDJw&bCm7fh}Bv4&%TDwUS>V01BdO}zP;I@ zaCW#U^iU3>>+34K-myN;^ba<-PSWG(Myh{U#Xd^y?bUO3|P!B8u@V;~&=TXHuElek1X^U`KO~{aXN$9FgH^H4Dm+lfA=_oAC3(` z7Pv-$*P~^2@6?r7YMa^bWvKk61#K;yN$OG5ZtMp|3y$| zXc?x?)J9lM6yJ#LK`P$S0{=xs>) z&VA%sO+VGwoPykOV+3$*!aJJukEVj#Bd>mD-Wi97rtmo#-;RM$a2oJd$qN z0QSsy312J+oKd&9IObQ(DOGGpulgb#I*^TYpbNcv{&!RTAj;c~yQh;czY+C~-EL&& zf)2U7FE7DyRR@M%*x))c}Zw5b|T0oN1~*lODAIJo+ttsb9L! z@r+Cufcot-Jr)NM1|a?4BM_z4zNu0=)hOQ}6F2HJ&CM*yeU7#x*C)1KRN0#a3+?`1 zk8#@^Ib5y2qPKcz9~LH~^U_t04%6^6HE##g$H&%Bzy!;xJC26IZvj(iD}mAepGHhxG+0J`dvvWFV^sdn{mil zf2A!fQ(HYIqSbgDGHzH>(!_fs=(^;BbtnRc1Z|IuyRxFq+1nGK8y$~5wPhgQvzcbY zIa>_HkQDpkl)wrPZBRfn+ja#qWr#on^#b`6sQO3)**;3#s|?GE0DImWRE+CQ+;drz z18FQD?XJZg=7a;$O$1=LL*DgDk6)-#*Nx%zt)6id6s+1X{Jq0G@GVh{@vMgF_T%G8 zBtVsA&b6J80c$c%dEh&0++Dpo*Q@eWv*sgxQdvKbO*u^DAEd=_Z4d?=zIQ2;^iScp zWr*qLvHI_ajU68;|9Z*9r&euSJ8`1u1)yf}|7h*wGSiE)YDSG&zKzpOc^C47|AgF* z$?B?}w5g^XdLI_@5WkF(_`ye5I0_(SPGQ zlf8pZ@JHN%@bo=7y0_R@9fJJ*m2?^xt>0TEvrBcWhZ2S$&+jeEYQwoIt5dtUxc{5j zPR{*C5*Wm}5(DL)!QL#>pL+gYEG`aaqIo1-_0lnm?{^3x5u=6lQB9u-vylSct1 z9`^-ON&vmDcNSeG^j)2V4^s8zpVfawU%N6t*oCl(?9HU20)4qC9aHLw=07M=OtpKNmk4Yi%mb6(g-}kpG*A!oWS^N%?``Ya{bavY9K91&D>NHFKYUegO@Yu%VCm7o;yqybhJG$5$8k*$<*hh1XDhhGs;If+7(_FHE5*}W=J$JZ~>m~3W`g&&S zH6e{})jPuHovkcL;=H>NbF?>njfJ5QhCRZ01#x<)xVS&KRJKef;NpB%1~Q3tLtN^_ za!A!#+t%z_bV~tiKgKlk;8l`g0)NIxQM6ng{7lS$9DPddx4}`{3I*Ga-p-2y;Cu zMpxX?_Pncq$G?y2>MLn1j7^_3BS|~5MJQg9*!ivR5nM8Evmlhnb48sgk7I9<+|_gT zqtGNo&gB8c%QC^32>YBq(pRR!rOU}nXAzK_T-=AY5fVGu;v%a8-_2`)BR=URc%X1e zD4lXVW2biGuYYsV!4yb{=TjDu#`fevn5X*vm3@jfwt_`D|R=R}sswDb7=?*o<^q2I9 zY`uM~RkCoBJw?&vruQvL@xmVb?Vhcumaz&C-)+36FW$Vnl?cVm5HxA-7b3m!O6{?g zO@`w1Rzr@i!@TmKW!F_i|?P z>8QJeG<sF1P5@pA64anUy=!O_SU2puHxfuZTunm5+l*{!HEwKNc zRF^q<7NwH$(=<)7A04hC^D(C-dNM5 z95FZ5X+}Oj1{F77%0zJbOO3mZ+Y0=No?5lwS|N8aFfhslk%dqS`F-i`8n;M@UN`+? zxk8EZ-u3^6ILYBE!ODAj!r(f|^l$r<5KG zG%Y)k4gD)!+ek4qV>jEEM86HI%J1$p7sv`?`TEf%Qvu3X#&eJ{#%qQ{h#0>nHBXwC zxl*f$7kd4{=lB=Tt8nS%SRIt(44`h;nbjv*B2W@njgbk^LU6(Z8!}GPc}JNSo|Y8T z-*6bQ^Wq04arC_TLea8an*HqN`L&0#VKZM|wZJ>^6k|z_E9#mEu^V`F{{wkNfX-o? z+#yXGMx97M{C?^~;ggvr9UBD#G*?ygaqt#Czp-=snk?t1wntyw+c@zKx@Y#1kt2wJ z(dn#kxRbuZaGvDhBL|!U75IW%ZyeOmpBi;Xu40L{&l|6}wB$;$drQ|*6F+1!vJ67V z5$=aO4BTBl00FXk=A$v)UeP7*aRxku4UB$bm3H%dtlwD!L5LC*pY&B?(0E7)^*Fb| zY}FpNdn{}XUmJx_=xC=uwFK_-fi|pD7i}BRJAw9>^j!b=OF!tn$kfA`cD)|!#j?co zC0GkGCGy%w==-1g?hkct{bNoeukZ)$5sxa@3jbgVBBgA0o?yP(w>}54(A8*+2YVQ- zQrX3HHbbQBQ_)(p{|T>5Rn66n$XsrwSaH6DlpNnor~A`wj!?CNm1XLWRO2WaSde=9 zHq+)NZ4QaVyO zqvz1B7OFJlaoKSRbU3vYI42Fs^n{qWE#~nS6PLKs5)9O%u6B09s)-KFp1mM%x78%W zb-xc}@TXn2Br^5_PCvnpqA27elqVpGp_U&s_q0et#u8}Wim^&LHU22rQ7mmUmTHglqDzu<@`!?N~#q;QT9IkfwKLow>OuK$f@$F*`1Kp67SyyjPDLm+_ z6|JQ}JHLV}&fJDC{;@CHy+M?-9zbT=lQVc^%5&1bALnm+ukq&guB!-&d`z*Xm9D}! zGxPHHzSXqYL*mx3Pg8m$Y16@HamR9Z@cs{s30V8h?2N?wP%GXDr=|yP%^i-oWV`AKhPJ`W>#&DGm$Q2U4bOfx ztgjVyPnEJ@UH!Pfw6ams*O!2W4ThimR2mi>*!HrVid(d6!aJ=w#Uw0FbV{~Q`*T3n zb7|8ARkc)t4)%y2z8*r89rq6`toW0N}t4r!7MxE6nmvgRb<>en#qX=Cr+TnwMc!PBSBj-83FTN%9SdLx^9|nHM zT63l(E@Qsaw}F%=zUBVr<9Wi42&SKzZ#p=6cOL92lcANC1C53L4m1z037u-7zt35} z7m>X^8rz&FJNkv8eDn9GOIju3$3DLI&Iv%)!t0nM((8>Be)BQeylt9bPsV*PpO--B zCsdJeCgwL9jI3kel_~$Y@%H^Xc$D%g6pX@pt=A;cgXV2ijV$C|$L~#f1_d@8fu7X~ zw=^Y|IrtIONmN>=DSnE=qV4zX_~`5N@+EkxFzpvA_K4Uyt&-NWzh0tF-j`jyh><8f z|HfT*(74uJH=X~m8jkH)!0f({en3|ZHC3vKa#CG-_4 zprWLek>K}-)%O#&JUE)6BIy4c?eOO)a0mK2?-DqV@!E#Q_n67ky^*?=9kSM&!Ovja z<)JhZ(oQcE`G&Z4jLeFsZGMrs--EUZr}$eDn)4qNPc|HjwaN}D6%+0-J<-zGcK*y9MaF01TlcN-B!B(0jOccJ^~#SFO1P& z`8{q$VeyAliM&G8{CjIF8s?Cj=X?jbn|e-f7Qh2>w0Gd?x?YJ^vh*85l#beG%mW^_ z=%QrNplukvGaCBHX&v}4s(wE#*w2SZ*W16D9h1^-3^t%*)~_5C6RdL&;kzGA@f(XK z@~9^u5}Y23Gw6+Gc2du$8|_W{FSG@j^tl81r)g5&_s3hgz&h!RRmthNF}=_Ts1JnC za#FTe*v`_$3q401BkOe%knf|F1hptWW7y}*7f$B$P@-q`X&jvyrt|n*|1CT05xgcW z_+MN@!J`^969{w^;a{mjb+Phlmxm<=7Itv-OKdyyjir&r_j} zg(k(pwTk@c9A?T{)PwqD`9NZwJU7H475`IhktTm58hC9voKSyPXL>ovhU0->Eq`{o zB$Cgw7EN>}W?OLFk+fuE?}soa$UR+-dV)yUp?)on3`_Ov@6zk%yg?tGldQ!|g~A&h z)15)}wwxLgYWx#OJKpgWiN3ST$?^VYjD#1%emBy8lwp#r-kgEKcHdE=!4ri8f9^6U zy#210?r1ebI1SP+n9N^1HpnxowJHDj>x5iF9NC!!qZS-{I zF=ZlTS8gHY{*qt%xq}a@eyLiBoB49c-g*EH(ppfh-_wTQ##5LThVre*KL*@QGGoUz z-xc)KgCQo-FdVMS;?Niv&n*l@2RxV|u)$4U16aASMdD@X`{ERK zxnKNZ@7XD!d8wW_*Q+d{_-12{eYR#!=vVsQnjvfOg80z%enw0Y(O>q2=gBEk^9Yk zIxJAe1RA~cZRYe5G51xEl&esB`)rBz&a`!p!p5A7NHeTP&-{kuehhxnVJ3ZPInYnV zgZX8V@%g38i-OftKgHj4)T5cOp}34}GEkOd>1Hjid1KEkgy(d|lMHaH(1^Ico%`(xfVnp_0;+JZPHSFl%B7Jj<$9Ecj*LOyYNHB4e%91kl})u zj?g&kC1ec@s7E2FiVr5Go$Bvx6%L;>ui&|U4=wU=2?$oQeLe}^X|>ma+Z+VhtR@eQsSN`&j*}`R5CWea|&K&@2F4ow;S7DIwq8^&?el{uVuaA;61^tL`Mgb9I6*{0AOGu0I1`)RGY{a6kn2}0K*sMsyijZQ&z&^3Q-9RCmYD!(tIogQ zUWk7Ti;4?BZy0bgRmYY}j8<#AmQ%H;=v@0U!rswE&CVM8aHax#R>Z68XsBt0!$cZ^ zjWE77Omm9il zP9e`4iUGvOJaOu^EV2dnJW=DkbEhN=<`C>aujz*C5vZ`*XvwtJaZzqFSv z7A!8KBy1$3I4wU?&h3o;3aEb0^0=$w(YPqC9pe^7rK;&+ni^vd6wVWssc?ld`_bOM zno<@^x8xwTgHont9TFopLn6jX5q~AA0_56cX#+Sp=7X_paF;@GrDp-J~zf5n{fi{?watUpSyhkG1}yvx-(On2huFAArX-0$>)>&^@ zl2^CqW_0G8M>2(_>+6a7(A?w-F9kPkl5NF7U^&Dq+Tp|gjsm+o-pVt5pREe+P8Qbj z0cx_>+?JBNm3vch{sQFyDWtoE?6p^_M+q+_Hofwsq9vLj4nbuXJ-9Bl6V0TiD*WcR zSbyH{Qp)l-lq;37vohxZ(=$!PxznZFJ?nv6IYzlNzOAhqIlayEk7N+X&;Im1-l{)`ZwJrNydf+3V;=l_BxP(p1V&}6n z(Ef&}XD7_(AP0ZdI5X_YFIcf#i2jRj5e)lvI{W&C=a*KV>`Qj`f;ANhKEK?YD12Z^ z7D;9cA`heEl|Lf3cS()D}?r=P@(_53+5jXNu-@n!P^i&Tr&UBzIA_ z?p^9zfbmVtS`@EctVz@Q!wv|!#7|+Hj}U&=lW*M`>xYs&GwO*K7SJ?yTaJ6~mGwYh z-{YuCNO;Q1AvTv@t=SomVT0Q1N#NX)lKcjw+3p`~-aaJDIz*gN>Pnm_m{lV*eS1V4 z8yacYbqwHvI|f>ajpW}z0H|LoogDfefNx;i z(X2#Ooj6yMTf-w!k24~5ke971Xv{ZBc?W%e!3-sB%=APpz!J245K}IvwJf0ejz>mV zx9LdF{txJj!?Z(>-#F3Cx{ERc2cCM5IrmXtch2D?Uz}`;bZX`Ah+LExL*Z9_wvZ>! z6-O-_?&$NWA4QGp;DWrP82A*(wBz3dg|d^-2!45=0T9}{MUVCHdIWk-O@^*rM8YFW z6%ILao^$4j?vn+seSwF6pp>1Tm#vp-|4^DqT&mBNO5}GaJEFV(Xdz*g-L{I1p3srn z+5yx=CpsgjVI&vJ&}wbu7Cq9*>!20sbay<_fd&9q{B=4O%YMI$aHlw!%}mgBJ!x3r z0=7!*mnHmvEr4D7kyb9ygmdY{UDlq^-#fBh8fA`+Y0dUnNu*mig6Pb1;KHD-=4va> z*X%BO0=|)|@^A%e$nsw<;~aUp5kte>4o}>h&%OFQaWfefR=w>?fa7TGDd?t-8eQw3Ho2<){qh!j~ctG z&`3NI1^#l;Q(7=^IdSGiT6r(Ea_gRY9*A@Qz1xtYWHbfeWTSZXGQG^2`Ap;JDa}nT z+tfah;pXYuDzR_2X7MU5p&GvAiBVj9U(jo4L)Zfi*RM}KxO1-}a+`dIOa1>{Si{fE z+Kl-Z49lz)*`G@e%pTnDP00r?x=KD&-eb-ib~b3hOc5ol_CMa!W3_U^V<`ZX470#` zCzRLPreb^ga}MBhWI#=z zKyj4$GChd)uNs?Z$@Bs+K0N;Cx5FQUI0J^Xc53Fnj_3EOQtDacf@j|Vs3Z3LfRZ7$ zXCo44kgh}w61zAo@%#H3;RWzLaKPE#b1&~Zy`;aqny+5y!WLgIs?a?CNHs<33^-7W zqAa<|9+hBmBdXM*#t9k_n;n4orx-_%nce)2#^hSn#*Fj`z3e||>{<3Iga{Qbd&CB>7ymPSxH-;a`K9|4>YdLmJ?Kxd9P zz#xG?>p&io*l1UNtlS8$t>i9p2%2I1w_*D26{fy6nIwuXSN!=LElpg^zx^8@n{@#Gv6vumsMQlqojtW1ig6PBx+w2!Z1>(s zJ?x@z{~E_;@L+?phuuUoMQ=oB0iqg@C-PgkW1H=`ku53AbvoVGF7pQ^OMqYCQ$V8M z-Cio11~9Hq2a}9D#{>>3z4!wcn4K}6E=!MMz8rjk8=>uR=-PqzDlU}4YX@Bp*yuy6 z6fBpOa>d;tQEjU`r?;+)jPu|I(0LF&8aSMk@;4bvpNNZCT)*4;z0aPOMc74bHn?bH(`_@1%ep0>IkJl32VtBgAC0WQH!( zv_llWR~fN1J%wAi$+Z2yQam=tB2YK#fQGgkpkZrPq!?lVbxxm(<9mV$Q#f9sSXph~ z?c>(=zp(_lH!r;2cwu;SE%NwN*oLHY)w;I~U{o>%4UM|UfL0q+w_W_(yP8okR2Dy} zXN(3Lsm?v?ueU|a%G|I)p%o>uN<6t+N~N~d+SM)h?2jDxzfyHqFJ5tctd=44GjnW6 zU%X174ejt>815&D3G37_?R^3r{MR!_CceE=jcExTfNv3jth|QRb8Fiud~QLgUiDP& zhv^6I(hCAs^0B8s2!*P`*CxN<~&{gh9f2e%`EnSRlTq%v5INYWa z5x&<6V7H$%kzND7lrh$D&n|+Dv*UcSsY)BFC4cH}{isJ&j8S zndd@%s5B)`ZBE0%2;r09hJ)y?lYBBBlPDWY-)%?xt`34O!ptgdUBc-8`O0b|ZYBjO zzwL0uxeu-xIgah|!`H8ke%Cm^H$Vhb5uU8jU&D#fG!i?1Oc($98?Nr-Aw0X^LgO(# z-4?z`n`2sY_#XzZXn_>cR>}MRU2UrcX0hZvEvbPAz76Qt($gMeYY9EJ`E{B9t8QtH zW{OwnWeZ`LuWu;rC{FL$!baw(BYUvs?gdZE+sTeJSzi)a!zxPI{s{iwq5jnkV z836_x(`!lOO1&X{=h9OviVA*SE3sJh$4a?^0`E?3t9`3Lw^(#hyT#z6ER}zn%bO5A zL1EIfXJp&R8MHqoO!zsKr1flK=u_%}A06eg47_%ykh=va(M~PbW1voYvSh6=YSJOV zx%4M=0un~-9>g*YAlNWftyZ*%qMoUStIq0lMd2#V*6bl7NyX}Z0dWPizWTG>TOsp5 z5suCbVHudL<`RC+K;r}x<&5fxBv4hz1&6@DC6CLA68OY-d9oeH{X4R*u3 z{ooRsq8;wfDeQNz<1D%&MrXiK$&uIC&DG;`1|>4)-M<`-JiN*PCaJ)hvG@NHY-tDC zi;%q`mmOt_^Y!m*UKSFqif66xh-BKXMbRg>@wZ1RwMbR zku}&=$|Egug%qY@Cee`fPRI26S>yVK~>z#<%{tXl?m}5yGTq$BAxqb$F=A}g$v>jF-Casx*9{5Wr#SXu5|8XGoVFNV+vzTS;QjjlLWEe3lq^^YgE3Zfq-pTNW>T-9{ z!_R=ZA9=JkuDT{zQf~X#yaU+(B+)D-<#4towVYz5`o!*M&F__SfOXbk;~ww9USo!w zpz~UQ7FY8#fZ5|DP^$wf4w;E}L-d5jjXGx!EK+DLls!+p$lQ(s`-Ct1nOb1B7=WJ~ zl9q|^9d4ZhWZxF=ei;+1nUff$84V?D_VR9^YL2kDW=IRzrvA?Rix1t?dU4r2n*3x& zmezZXk8+QTkbX#`AxZ?#a)-x>#d-h!=tLo@UlSdK1HjJX-u7Sp=#4xPThX&Nh1TSu z3kH?zBry0VKJ`KDzx?LZ>qEQdH<7DZC8y;39Wc)v2kh|lBr5(!GXJ1y#fSKi_$U5% zS%EZ4+{0>n{A-$j&y}~=#bEn$5iK-tD_ls@o|FfuE6Pe~wK&w3HS|CrLnO;N8qHfWCxpUfTRsmnpkIp8@5D6?;Qa0YkVNKJULl8!}_>c&2}GCKZIUI;8rFzL7N8?Rm?FR7+0}NY6MC zr^mGdLN`jOU2P$9S<6XgsdKhuFH#`=AJy{`V<7mOx2M6MC#{+?tRRZC`r2jT>GN-P zBu;%0GKTY$HuEBxzx9FakyepJPEsgY!YtzUhsU$1vyPkss~DagSz9!O>uL&J`-)&E z5@wLOpW%#T^y9n!qDqOeCGchAaZr1u2!D_#x2`QbQOf$w8v@yPp-iglKByC-+!3P|v0n9*c#ShwF@SP-%aFOCVR6zUcGmyd zQ0$Lw+`)EU-xD<8TX8FIvi-;QO`>Sy8_^@iXi_YNQ=KOPE2w)}=q>(_`k;dGjeBGr z>Jbe4vWm$XEw8W1JN6Ux6>W%X|?7llRI?;@7@q7Roc?~WA$Z{RdHw+xhhv#rPg6a>Y5+d>K zyB{{i0s>6iid}IGFTs!4Woij&H+hW5-0Zagq zk}}!k`2BhK$V~n4^3m?wn#X$z6N1%3J>~QE?FoaL$9bTn<=raCPxJA|a8Hb8e)oz9 zPLR@eWfJ-7qh#!j%24g%caAc?xXr{46*lmX!d)Xa^@H=D{&GWc;N}69nXNXkFzSsUFWk0zoCm= z7Q_N$z{bR$D)Z+dwmut$)l{dn!}+_f3j|$j?Eaq#>Gp*v1$z8<+n*BaqV=E;^-r7^ zqMcg%cC{=M8}O|T`YgXoGya%#<{LVXzj(YgD#B~aE{i`YI+R&--AhUgrVu2EG)C92 zaod0>ZqMrw&&tj?Gec)IeOntHDKB^=i@+M{(J=PEpx|k|Rgl@yL&B@OVw1P0HmP6L6CZYU0!{o%S>ux4 z8qr(~=-nn_mtCgv?hD>NuBjt`6#b6RCZ}4|s>dL2bPXnRm7X-KI#$Z>!AkZ?25!4q zjgUQqcKNyI0@R8R>KXk2d|A(8jwJBhCXU~eGM9)2HU8@;tHKMW;@Z|JFT;O>FUk2( z#p_c3JcOpE8o16pFJb$RT>k6#OZoT7aT!%_@SCSw$~9JqYX9t!P+G5vT<5r8Q2!rI zUm4f*<9#m*f&qwtG!s!mB$UnxD11~(x<@13Js8rBN_Rx3 zum7Vx+k?ID-shfs&pFpPf50_E@Xm`%_S&aHVfD(Q?uJPx5L{;8RghYfi#11(cHWzh z?_E!XrbQppiO$e`wXrT|P8U31o_w=Rgml)7jg;x%b+!3?m+x86Tp|D8{nv!ze_r(B zuIjy!m)XG1`DLVdsQlM(NAe(Tp7Mm}L%{+kWaRWkhMn_g}D`Kqd{5b8)a%+59 zHZEX^-$@IAQPtOy-T7p9$LS-EyYWTbVjHMn-pSExGVX^%i{nUqFCFCATJpcB~F0P3AO<$rDa(_Izbm{Miox2#d zWMEI!>jy>TrR5;Z58y#Z#>`$Ssc6V7UUxFa>Tp-ddOUwiVN$(|LFKMulWSAya3=HR zg(4==!Xi13bWs06(1iN*5=%u&6nppO7szCy@%RO}O!L{|>&-7o2?NieoLmtn%4DUR zQ)NBxpoNidsmDA$E*?Zdd{1fNqG0GeoE>wYpF-8 zB|)pXB`#>d8}9+7jc7C@JNOESsqQ*pyvd5xI7<>M&JLn4&S3=EZAAy*ft?Va1w86} zJCwhNKmk~Da?$CeYlm(swdyVSX*BO+W>HD8;=oN!@W(OkOO7x;dmKGFNyjLXY4ZZT ziL$Y(=8;WWk0|1P0lxBrT)A?0{o2}Bg)AV&bKGF=pCZ2RZ?!I@pYiXjDg-pQkaV;D zo%NyZx(;`GfoSHDLFVyn@?=)&J7Qd#8&u6R1Fmg6sW&KHKpxYIXGX`n{17Y*Qim-* zMPF{0cOlDDCY`>w{V5hf7i{Tl#B$?2?{+_ox+sc;XaaA~4d#l!G?3v;t`udErc*(7 zG9In>LCb%9L81CJ6veAZ_bvA}jA8sLb?eh6x~?-4xLd$s^O=?+s0qC5h$e1SeFj(;=D4x4oJ`7)&ksxP$NdNmf5`w2LrB)yLes_4uxRPROo{Qu2>Q@XbL;Tk#D)bQJGEZk&sLB+VLlhyj1 zU59_xFxrmD&--5~CvS0Yj+17sWAR#>3D}qia^;1$!%Ty28n>5EeP@yEKqUL+#eYkp z0ox3y&ZoN@5@*h7n`531U?P}T*lWt!nJ%<`O%hu~3<>RO_8S7SLh`Fnim;$&8#QW#%FDrU2o^^2rRym(eF>|{m z%z2y$Zg0CvQ$kQJ;#s4bA-nUl(|gJNdkb;Fk0~a+*-C=(&arLrOkCx-LH3h&<99~> zR5>3M`0z#H!3~UpT?gGmIfFBa-cJRfQZ{#na%(Z^d%mkigPy>&n&f-EU)@jzkNI!W z5v?sC9}Tkvz`(mU?rTlKXNzin!{M;6_tYZs!EPnMM*rK}rFWEG8a_r}WsDs$gA18& zD3NEtey^{YtL}Gs91-7)myuW9O287<@;ik)k>6pL*!Z(G;aw#)Y(l2`Z3M z^Alj~V(H}@MImLaM^&I9U1K>VRZi2FZp;SPy6NJNp6tdv9c!)J=`Fo(-i}>4IsEW5 z7fC)-R!@Q;_+mB?vsceDeO2<%52NJba+H4Ly_(xdr#I!53{C(D7D}%Dg}PBSiVe&H zSan*rDZIWzG4P(tdZip0UFvpnD_L7 zZ(aMX1+Gvnx(`Amx<(D-0sV7@??0w?1?Vk&GmEF>NK@WwH8GBPE+3QrZ)!mo5;PZc z$2=$89{Ynf{2-7n-Sfa^@6Mps^Er!GtaBZB;hYv6%cuCYsa89+%6_eq!V}uh<9GWO zT>tZ;c2#)ExN9G?KMNVK?y_a`bQ&YMqajiOHd6vn&((T#j1oIw38C*$&7GR$VyeF2 zS-RLg_SN^cGv;0Q# z!dt0siJTAc!!iNrP{}YhD5@UuCBPUwn;$O~Qum=HE!_8(Xz2_)i~7u`QcrJm=z2NG z1~>ieE^-;Xu{VMOTy=|?ILG=oxetd79T1;iBoNhH{U}+ePfFW!{b!XHxW8j_oz<$byTMvyowdq>JGb{zTf25t?Zwx62yj@KZ92H|fzTlV5 zuQJ6<0x=2ODUfZQqF!WvrnE+&3z}Lmgo)}{6+GR5s zCCX13a2D(UTD@#)=I8g{k4{;|-=^@MqQjdv4Kq8yLk>>uO8~p;0VnLhwxAQl55%PI zhcdtg`20)AXDaDxl8p^; z^1%RbM5Ft(@Jb+N241{L+I#9z?Z+E`4Pp36-G-Oh@RZZ*lT-Z@A61;y z14r5j4eNx9Ky3-KfE9E|&V6k7ZL@}>E7!NhiKhy?9rw3(3#?SW3&x9bu3$#J`1m0h z03ZJi6sW%v2Wj*dUSVz-;as(9WkLpw2X}e4o_3xDi~ds4$6@3FdI4B&)d3x;PIrt; zm#hn(zfqLmo7@PD=tsMcyQS!0=W}?mA%8q(jiRM*gfeq(<|%IjPIg!NSFmdgSd4X6 zJYeDe{pcH=lEq^uyq(R`w! z)_;upEC%cz-=yeT6n4?)9cw|4Qmmym8Q_H}8=_oiT7kRL2VV(FpbxMaH!i)mkrx%( zh~O}%jqE=FZ^i6RU!_Hy|ZigEM>98q7sfLhs;;9I8(9om+i%Pw(a*Y!U1h;Qy0 zyE_sJWUmF2MY4M`Jcq{*oQzQzSRIjY1Jwn#+06uJ`>Ug@Ufb=@xDQm${?>)BrRuHB zk=mBSGM7GU*wsUI_>MSP!GuF?DHFmH1miT{{<1WvCzvuvy*az(-qp7Qw=5b_QKYYe zWo$LwFK|2CwM(vrS#I8fn{;ht=A}O%g@rw!;>2KLQ$!_ZLQCuEOtU}+6JGM z@6%LfZ;}@B%9t%)dv$&7$9{kNgk9ji0hf~1ixsPKJ?GUc$hKzEJE0gK+|k|@ihn@F zwfjeeqPEiLhb>&g`OyK$mrCy@(ebgVN-s3>nUaAW?D{$wd|}BWJ2zW^9`1niihN9O zVY^_Ce>hD(^M>mpZkIt~?*e9}1c&-eIhDM4NY!u&qf}h{B58nk_ca-HCqM9hXIT)% zE8?zTVp9^@P;u`&OH8WOrT^9Kl@f;XH&Tj0!Na0w=G*2%&HZ5E1ib}}r^X*jLnA2e znqQDO4Y$&)hiBz<9R#oO-$@K0#-vxY!^;`>f zvEsvTf@`A}IX=(JyIA!Pa)ex+f<%GXnkeRhw94!`RUBPyQ?5$#;vd#E|yeWKcH#*=4{R*TbV zwVs}}7x?#ma?<%UFQ6u)IARyVMURV7`pUbmw>kg%cRcLk8tO{M`Z)9QVQXu3j;@vN z$bnNEL57K?<~jfA^hl{u`S6@LHqC5Px;O6}VV4Ov`qe@=NPWq#`tfN5QE!^^<8AbBOB# zw%`Z~+P{;(Miuo6M-CcxIiVO>ZS94hZD6}Kaf4}9=!@0~b-CSn*DZ6u#zEr4scZ_e z)OsAD4xw^rjP)}^R3_1CMHLJmM!x~2?M}!~pJp`dL$N34r@8$KVqcshM0yT><^VoP zmCx&==eR?+&Xdi06~g&O$Iyi&AY(0ao3IxT$>{wVO|AE=dtM^#?_MBeyBrh>48Xrw z$t8d=)H~Qr2fOnNK_Jtj{PCMQQp+!7WL>WO))F4iu2CL!cVMrp@%M2p_(-__iJQ|+ z%GxP-Rx(AWb3+Rb>^#j$CY46ZUPeADvLo0oW2`qoOZZFIoSPLppXEd2Zfr(1dT3kF z(W`wMfJd;vb8v_pK0sgh8Xic%^W^I5gKA?(k8%Uk(<*~()M9rhcA*;g8mEPshSRN= zC!By&B>7K$ciYdxF^}F15C)}@=4_BA#!83qY^mZczwXjW> zeH#sLKdWBEPd58A1;94GH+dNbFN3)sQ?DkSV&hJAS8lVf-T-d}f%Tb)`JAjq!8gy{ z+5R2GAxO}PFv!h*j{b?qsUNRu05T!PuBzj{(9vobYd;xsIv=|(_SX_tLqK^0YDkh4l@Kn zC0#-noxGt;$=3Mp?uL%1PS2OuDZC6%ZeCsWGAR}FL2hTO4RA`!H)tVqICFD2gKdP=xbbV}6x<*NmyC_^tX zJyjBynXcR2;RpBf16$e@XlUNDxAT?cR+$;rzr2T=6|N}18|7m3@#}-vL4nVIxhc4I zoO-EfgZ%iOxx`EtfW~N9e1RBzB%g!&yn8!u0%Clj%?P~ zl%p>%{lqL+ho7=z9{!o;AJ;PBYNeD^(ALWLnk}`&2k9d?NgOBNdcLhWa zvLF4e5ge0Z5p8Wo>PS(k3fA|8t;GdP87M1QSsp-I#_xCb|4>&MA3**X3B3Qq%IXsh ze$kRF8$u88xL9ST)_TgL2C7?=XUD_Bzs{N!qz}yn2&WZ}K+)9?++Im*SY7&{lGE(T zrrK3fUEfi2`X6I82zS-RE(UdMd6I1M|5FHB8m9{82`VNmDq{=59NuI)@9MdK2k|~KzfglpBpL)Qx|RW5k-3OU*!P7U3YUVA(eyl$d=YXk znqLQ=Re>{S)1PGTcU270xa_Ad-&M;ZJ>DB$f7Ck@WDpu@Qp!@N+%KA1qRW=b-6)IG zj4rr{`n?% zq(|21VV`yjpXaYxB+rTrf?Tx145C))%Vo>^`anNVmatKe3Q0%PtcfXA#g<<426XG9Z0may=rxRqt%<%E2(GMSex#+Mv zxYMidLM!~bZBkydD2s+?FK-a#6kC2A5vHQ?QS#5HkC}&adyY@R^%cJH zG-sUIR#xB1^CT`mHl@c5e|9cmS(jxxeBKcZa4_1M? zW4%wcp4tUZx+wf#iE(@nQ24hW+BHLwJn)reP_LBh9f=^#a@V7~^m@5VHVS-naX0v# zRY1ZnpwTCv$5xchN`&N{QufG0I%RsJ%%OdS-!sd+*Ej6E11%-hVJhd}j~NDZB!4%P zsI2;6|3cmEaJN(=+VXPiwTWIslZ@MTlwSY^#i0ao|K*%HWqaUta`TZo zD>{%gnOaqXsCs>_JJV-I9Ib_qM!bYRyXXN~sM>+gpa5>W%EtSAn%%`qkLX6LJCJ2{ zV_i3ic=UmebAh>52jfn`K>bs`e@LCjOLNyK5c+O*R}W8#r!OyG(+a8RqqthoMvmKP z>vliC&)a|zoKgqw+ec_l$fL6x$cfvw;nI@#@~C7Ar__4n_7D#rj3-$}qs#5A*Ye8i z#Kup}Fo;RI+It?3nCfL2%5I71IO&|}kE9|MHDiq|rtG?&u1;>8?ed#HA%TEI-rH~B z&tVD2R(}Zj}ZEOjtCqmH$>t%L)!yKF;>cr!v=y$nbS6w%{ zV7Hlq6HPH%ZL<^KnOjY2KDmzHCZJSZXVXF3VHFQZw!7>66tB(Njxnzd^W^hto7A77 zm-&u_0&Yu=>aS!h>qkVP{Hv?c2{ZY~aA2mUapz_^oZQ=CTrdZr5Vm5v1_D@)OTE7L{kkG6vXq#FiaUvyVTs5Nv?=LG>MgDaur)}>fM-Bb-~IvvAlhm z&9jQ{r5xI=jT4zOxz{f1<`u&Oi0mY)9+0@+Titl@JwAzaa|gb-itt>7T2@nnbHS9N z(cpw5*dkB%H%S`NIb-Yw9br1O+iE!?JoBe`4Y|E=D4+lm%VqW+m)&L<11tEvax`0B z_w?fGmcpJPxt9j@SyYcZZjYUsg*_*gb>ruly$Qd4eQ|_$g6qc)WaM!tm8v{kA zJkKGg+kwyoWb2=JhcB-04C&3;-)Ks?xJ$Mp{>zFr78N<@zG{xE+AOL2@L0pB{_F~& zczz&bl?LU>rr^za4`g-5-)wTM`PFQZC`5V`OvQJk)_La2PZR7=Q_WD}lY*t0lN2ld6KhGIRFQfv`A+96hu&AXQJbE>#gsc&|n+C1^3q-**|kRW5Pjmy_*b& zxv6hfLPVHpKG%J}Tp8JhThb(ItZ4}zhO{3{RFKevo1*`)ex*pOjV%2-5S7{@m(IAc*Q^XYd$toS9{SRK`JPjlvp)9p&UgG_L@gm_zJnpJoxi|mj2 zW2f4MHEkAY?z*3;kdY>30r5kySQ|!@Qr9jchIr+6i%MJLf;{R8{rrjKn?6v%u}_s1 z=l9t<6^DYjJBj;f_d+Tpn zcKQ%LO}j<$`hL|-Cm19u{SWiPG}7+2YQ&|aQL%muotU;jEU8NVFAHB{&S+)?mqN`? zeI@lq4eF+Zqrz8BO*^?{hEA|43u>jpAb3%=8SRms86IBIn_ z_y-=BtD_R&?GtZw%09;eDZrHr#`K5yHn5HHDiu|?%?P2)cFI=Pge%!&p2Pt^y2E0M zhhK&)0P+?mA5mzkm5Xa^%TLJgysl2Wrri*l=G5h?Ay+=#R2`jZtIk!7kc{=zth+m zt20PoVbT574U}SJkk*s@dJF?;$3vH318(~5BI^el*z0M-<=2|#30w8B^w`9H*nDT( z-*!cv9EyU^yauSU@>I>JpB_aLa8GRg{h)^*fj&iJ=3tTiJD+bwy*7_v6$-(v z{>hAoWM6m_R7+-k`J57Fh@U{wDkf|&^YsDU>1W-swH3xQ=9ZM==~rmq;yV=u`5Dv9HxL^RMcVI-JFW zw<(LgLI#w7>@if%;n^DK6mFk#ICyo38(c1AA-Sz&({9A-`N zfxwL*l=yCDXf%cuWr2Nx-3@2eKXJWkPqsG4lQMBxl&@L!1_Q7^>UPq-Dl7(;EM0=l z3X_KTyrn1aojpPvR&_RILRK)_zDH~>v+XOb6}23EfwS)a{&FVy%haoi-&E8O@wEiv z_7s6-hToZJZcaMAFxR48aXPQ1%fJLX!JG2s0TBK}GrVSZAY&E#410i+YL`WQBYg6J zFs>MaroQer9ei`#%&Oh;AFgp1qu_v@^#2-dmuCax(=dNxV?!f|Wl zQmW5U?I^U#P=^9~_@G@6=$Xi}dDjpQGWzC!#mwc@FHOld;-vfs<1& z{|V%(y`VOw*U{T-%jUiek*E_z&q%i0Sy^F92fDDRc{a6Xe{Yq++hfSs+csDJU>Cl7H1<@0I5vBS%Ef;Cq5WFSA`QAf75B9RQ2#JakP9vJW#h4 zck2XT9W0sBu=BtW5wg9et*?4?QM!wJGFOq#q8nBt)84jbuPUOabwJCGYT&h7zogTE zQ@<8j1lv(3Lf4OEX4Jcq_KIRXb>) z=!ajFvnnm_g^5qYS2~i#Z@KstOMaV(?ISL@L6NhzM7~DezlQGX?dlrtTTVoJS$ksV znMLGj;V&w5?!Bde+B;+G8f9|6-dlnyal9`Uf7!Byy??Ku+ucZSwtV4?%9{lrR<{b3 zl6-E9L?1n)8qSX{3RHO{jE`KMoS6Jop78e1lc!bJJ-^XM5DAuP1tM#Q=cF1AYO4Dj z@JTTej*uv_195tLWiw+q^->_e4-HKLkNyG_MzwMp$3F1qB{!KYIJMH=u zA~1s(OI`ahK`V&dVt7GZ1>~Tqx?JkQzdwdWEd@x5^{Ilf@p>{Y={G z6;I@A#0$g2T0xK=(=5ie)~P$}{Q_t7bbM^|198B-ueD=9p+O*rul=Kchd2829_1qM z+2{pk&9He5=;n=8!10|Ss`h6q8{I%(D^GqKxyPn;c>TwSx68d+Uu|d#D)cg@(to-8 zKia%emElxm=l+(5Bfq_+ij8v&e`2K3Y<62)PCJvQXZov_!ZzIH!^pCkPoSKDQk8N) zCztusgYvDn);KL^5o;1Kd{gbd9=%mj6F}@*k54;Hw?kxHZk+D z#hLAKiO|Eb<*2>W_L(FJCqM z=GLVy$kD_UN0)8{7u}ot_T;a*v&&(;n5sDi3)G6To;Wd;G~*#jn#r%f9b5V?{%`;H z{A;`h6{*yc1`-qXF;9X6Z83p#6_2|LJm>$g8CPRY32uMtnz%oT$CNUv8O=)bp1+S{P;{MXj<9piCoDHZ zTmCvT@J+3#jY?lFA^6Oey7i{A$N=u~*nnLOtJX5LIwMwaPUuPwRNddODAGPeYSSfW;dgR0n zgyNooUk5TJJ2fvqI*ZDSec0@}QyrAb2UTWT(M2zx287Q_`R7WV=TY)_WS3K4^ z{@Okm9%JLCo&*I&6V)Wbl()drgK({v&QWd2oaJyZt*3+0&G|x0CuOAf*4{AFfBY7p z>*&@#?b0$pHo7M$DWY@)1geL1-&buB3WQG7VbpdF&=F()*6688y_WThw%JZr)fp)c zs++cTvN##6B+cta@&dHL{55Cbt)l7f!vh{gkIF{V^`E6i?|BYf7U-3uY-!cDK(nDf z^a}@$$8YNuq0ajj>mGI;^*RPY-}X2P>`Z7uuP%bC&He2T!;iTFV)LUZ9%sYMmdN7g zHp)JJh+BMqUfoJcG72ePZ*(2&e|vTyhjALnf+1&qIO~`e(7|^kxnNRNWO<%TWP}_c> zb#@WNF4)z4B@AmMYQ7$4xLAUXRzwYvoCBM4FLKy-?_~oap+;v2omtY`f9H`_nsxK6 zX?vG;U3qRN#;lLrbgW`uk~>-i^Hn|iEAv&tspe)cPaef4R;s(px!%JjqR1 zA%n8K$vkl_ti)BD*g)ROEm`v0nMlZ|uMQ4mT4>E8dx0dLH?z-k-!YFJ7~M?&w?sM9 zyJ4gFAMn|RO`#h({fJE&dBWZf^XBMJlLBy3TzU$lu!TKuLgF6X6l5dq+~=gObU=>< z#mxSm8uiw`a+))<{x!Upj(Bt&MK}9-*cLr_3^2RiJ$}(_4tZ*RRi$a^F?lqhxM{8-pwC(dL6$CORUf_Ym zs``73xGN`b71~aJoP4VFK)zvGakTSheCgnr%R%B}Iv@hlCRBxq>$WxTy%*@O_aE3)vng%gVAFHUqpor#lexk}&nSF{ zA%+@u=&dEDV=h#x6)dsIb=6Ma`Tw&31eM3^6eQv>#Dxsu-^x{$jL~*)uaZ}lQNmjD78iVrX%V~hDA;2evC128KO0r0M|kzUf?Jo$L#)sec`IAO2e= za7TfMJ~a%?+_2#~0}b5l*%c|H7+yGN#`yHh;m=Q%j6D^ukAd7j>)6IQ_Si~8etZl5 z^FUS6`-0&RORMU4(yG{c^bbBHvbgpj>KuL^*?KoYn4E65Cw_XEKLxveWmB9PmvMhr zKHf5HdRPaIB@l==;Ls6~Mf*oI0D0r8iaMmj5QlfxzvGs-jg8TILaa#F{_I?DPGj+4 zudP$F|LJ&C|9;$_^@ictTEy_-K<0Q*Iw+p$nJ8lbm6@^sws6Y>=VD}$Xg3v|E5MqN zV?10eqeUTw?m-TScC|_Ys!8;TM5SD>cb7e3{G*h-?I+jT_ERxTG=ST0;^^Gd#Kl#d z=XTQ^^>=RE8#!}KG!lOHnd9UO0y72(1hs75@8?8B70Y+Ug0QFGqkC2BM`Bg4H$Qo^ zOz#d&c;7YYPdc;nHl*+33ApeFcBJn-h=-WXqh*-Zb@t1D7oFdx^ft&W(dW0i?r*0g z$Ah;Dwy(VVpNfhFT*OuP1>9l6#HOH4s8V!0u%peCd;nQIYb1H<>Q&cOa)$H0{hA#7 z-Rgh7oB?UwHa>V#88VYocn0}Wj1SPT=3MaYoETK`k%I6Miq|$ZgiI#FfN$c_<-M1=caSdv5k`R*{^5n81ZkG+~*+O z{_t8DE6CgY#K!zLp2q+`0&gX*<1X_h9kY}JrFol&BFc@uw(^fbmNd8KO_9Sgx)GbNDvu;9 zXghWe^`$OcK4AW(;K-R8K?bZ>Eh9rs2Egv6ZAY(OOrH8 zgQMy^!ff<8RX)m37`aX5IrA0{NSEH1+aAn+0ITZ;FP%j+&GU;S$HYh^&K)EJ_toGrX)H#kXL_b~(v7Zt5<8Ia;L`|wf zIp)XuscFFd)a^lI%?#%Ipgk;Vtap%CJ8H6#ZGVK#lCfS0+K{dYue)cDwfLOL7PjWC z%`2NaLDw9Q|9zb1>L7)m2wf^-oYx%Y*ra~xm~=8XOU52<|LCY2x*vnrIQ(_z5jfO4Nv8NOdR`Q69E`dRhkyId0F;&S8a+b z*h+!j7K=3!G?XXP_OiSFuxXYl=ojNF{yZrDO%ME)$~W~7iLZypHI$zkWJnj4=RIX} z1uI(w%I`7eKJ)sl<$hLDaXm2gQZ$lxpZi@bfXUszp;fy*A1NSnqcLk`MoA%fcegs^ zKe9+4Wu^W-_p*?B%DVix}5{uNu zy&@Ibw@XM@`1Ltlv2s+=L$KgVs_WdF3mTdNHCHA-Pa1BlE3K&FIvTIgt$cJ@=#LwZ z=$S~&h8il<-l;8AmwCNRBmD4lV?3`6?VUfp493Wk!vA7S`kGS5fUT3%!T(_4e0^zTqvmOsEvKvhvbv0t1rK|Hadkgs0YNYmis^s2TiGiF{coJz@<;PW_DqhzTh*$JdoX< zjKb2qV)iV|$7ESm z>(6Tz()+l|4P!{NbJv0yZS;dAiR(5X{Yueck59E3goQ=9`Od3V#&cT)qf$-JIYLKtx+gcQEc%I{B2p{Wo|^-{SM` z3ky-OORwLF>Hx>Q-!&_ZH#s$A9WqrN-&eicZKxuK`_LZPPT0k4$Q^VJ9XliNckL5z zE1g($M`0X)oqSOl@J(m7lne{V!DJ2}DU17aG;B*Bpa*yNL3PhyNdVsIY`I}nU$)v(hxg> zuv|!V$?RYg_I^e4|5+jSn=@vh^c@c#GH^izbb8D}W9ek7dwz!=HX7n7E_2a?ia6Tp zcHiF)zx)LqU$c15@gt(Iy-Yh;LAdSCQx#MHXP~p7;~|^ktgvEp86DQMz?(%4f%Xo| z6j!0_kU19BUa3Yxk|e>mEyL#u86DVr+cHleJ8MC2xhtfP@C2mhj1bgRt|HZ(`6<4 z#yFaZreI56%cg?Jq;JW`OC8_gpbtmIcY72xjfiW=S7uDu%Msb+K(FPSni`sd=@$6A z_UyDCRTFu`upQ5trK=)?iQoPM%#67C?k46r%Tov*^MOxxo4U05Rnr-C%bIbxX1eG%>pt826%ywg}gc1HtSo<_6ocgxn6I{ z>L{1KjjaM{2r&t7{13y(^6~}~IRx}fON42;7WEG{AK8K{rg|gLN#;+4l8yej1|^1@ z1MW61K|~)OBIbz?3^G1t25+ilDELnQl)3=$Byg7BFLt;K&f};EwnwzO! zU4q`vc#g_umV~X< z3>u9Y{#7g*NXhtoFYlcpWV+SKqkkfKph8Y#eG7G!!|Y1rYJyoZi`)j5(2&40&(bC$iY zN!-Y+IZs^LA%6Qg|7&=UM1tl3#VRj4?}N`murY z3qDWCbwW;^j&$d?vJ)S+5JT`Mh88o)=ymy@$!;e}9_boi*QG4PQV9oYbn+PMoPX)S z?HKuW)k`5hsSYSC0C-3P4l}kt^7~Smb<_w1WH=gcwKAC3-@O#mt*QuPV%M4H$(9M_ zr#k5!uobd>$&m4EG>EUZe>g!%MLz^+vtIuQPp0cWf3+Qu zVXqD=Jndc>>>b_&-@D8fyI0M&p1@6No^ZM=W%s24MdAs42|hq8c0OF<4E9rV2F6`= zJ5{^hDvKt9ZjmY5$ERS<>;rFmpVMv=lBoWwB#COc@iEBj!+fV5BIi+SyroVO5vVE2B&UVxa~nT}#>o@QID z$7bksy51n)FMac&G?r`_(8r~}?q4OZ{_*0}ptg8$#=t4ISclO36Xo!OZWbVG_eq?7 zA1^gdGxkkP$)57AJt^Z}{-g`tvK-CgMyu0|-v8V3bU`<=^t!NKxhXZ1COWX4;RbBz zaxuM~&Qr~0Z4QY8V9yk{6@l2i<2)om*;``hH`3@`w|&onG79Dx<}V({M=(6HT0(D@ zijRWKH&^B$FN-yt1?^XFF?MHyN3^PA-~quiViLmXARfn^WeQa_G*9+{z%=Vf(jg*b zC`rdSHp5ZJj2(?S8>$-HwCazX`+?iuZXN4aSZd*(^!kgot>-2=>{f3bYw1E^;i^|Qi!C;@*Y9}W0m8$6E*nf78dPU| z$Shcb@?;G>mXDqO$@{X)tf4x?AImr&ZJKNA?jdL^17;1=ek&p+%xjwv5>w~(mOsu{ z}Z|_DZp4_Y}gS_ET4!C*TCrLOUgZdECD(QvpArq$Kr`c}!ecf38?zV!V?n)Hxkl zN~@SeO#Qswie+nQ>@oZ&%)OHX7XJtk^cXG+PAk-2hG#y?)2=&5j3LCIj+o`&*FP&e zmqYxf(TRH#Gqf%>$T=2*&bJ@U%NNkJlL0F%OZ<7lt63tFFF^C2?xPX8_{)7 zqd_#+T+}L_{L-^i4+QN!4j~C)so1nidi!91j*xtx*4MkdzVfXqU-evrZpnT-{Fixf zl1^1b)d$aP54O9e`U)P1MyYPc%l*l7W=kzILj7c?G?He$Egl0_;<-~J9^DKPZ*<_adDVP}L7B){#7b z-9~;ZK+B(WW!n{$e!1m@L@)e~;2{dxJO!C|{6Dh3GAfRyTQ>v<7A&}jKp+q#xI^$@ z!QCB#yE6&y?(XjHE&&1rcOBdZ9b{(iyzh6;xo6$G*8J+OuG&&P)m8i1&mLW9+tUaO zD^{wpVTv?_bsxr+FSwW`?k}s4fYu&WuwqC!+ZlBQWv;JTm#7)yL@(GH5GQ>JGA_)PkOD8hXOAEk)=D)&sVM4G;Lpo74>96#gAq;9jBo{R@bJ!b)9OsEmiD-Kg5<12haJo2_K5popm!+ zOpfjXc`mK(*(@tvtC8W8WIXzY>cCS53VlELPk$~sGh)iAONbh;{rhdvYFV9sJ$`cy z-XG)U@o=HDg3=+R)s0eltgpAM*$X@j!Li#H9J!CKMy0!Q(ty%$Ub{tislDLgb*x(N z1#$yrU{*t7(YOlcD;N4)-|KY@LvTSm9GJfE(kpP_K^!UR0{-~8UP$@)yWQ_$!vl** zl?FP=v%&c=mkY7IfRoO;VAVFJ5Bj6#b$VSyh`_wCYS}Y<_Gl>CLdj@dmMaTJB-P$d z?`!I@J&d2X<_=0tyM*Rf)57s%*lBAfk)_k!heDU>0?%hIRoZqhM3ee%`|2)-IWAM= z2HFznLv6kn-S9J~^+KwfqxMV6YY1N#xAn!W9BZ}!9Nc(N({qh()2c{&b;bAO(VM;I zG)i$`@KQiD4Y@59xwBV12qQI3k}8il%=WlgZ_I(5@tslqbkC{A2}kRbGV&JOXM<-? z2vVBXpROwqH=fq?8ejCnp6OjO?K@W1;U`o-((uRDb9*;14k+)Aq^BUDSb(d*)71e_ zH=1B<<}(!AA2r*D{P}rH9G22f^a8;+=y~1{*UR1AavZ#KS$^+x0f!L&!dx>6Jlqq> z7HoaA$TSsn&+0rXh>XfjU;R1Rc}V%(Hah8d+UJmtnaMwGycOF{FAPG{D$NalOpP6A zH}1L1wH*=64zHaQaa2*^Y)ukZTf=M(`;5epbi_9T8Nisz6}U@MJ*)#ntl7jYg-*g( z!rrX4>lNK8yXP0XLzecvg!ams9*c|fOV8)^K}zUVd+y8Ls0+`gW8tGiv|Eg!_jI=7 zfYXQr^^Yt=BX}-j^4hd9;x(TjpYi=5wg|b)%|nUEX%$DlE~f|lDEgxP@DIhqv#AZn zS0z5;!#+gp5w^Ri_SWiO)<~0FMkiXeY$(m)btIz@?W-zZW1CkGYGc=}wXa{Dl||m!(M)>3MXU z!UI+6XMJs>+owQg9B}tmnX{l)oA<`Hy_n_c9pBX(7vT8pRwuDXO~E|=HyXJeP8Jt) zVe5sgt;{#Liuz{ntJ+!LwtAyNHQ*69Ps59cBhX-vOoXw%*yy>E%(bJ?E2=$!R?zrOUb%~nR166xm+5_ zugcYqS1VF%yqU3$rR$PjgY}FF!hdCWY`$dpypA0*vW2u@iBAJ#Eg-QeGbR2(?Z94ks?!IP4uu@oOf7qv>N8oZSwZvH!)N)Xnlp9u2oD|2g zJialzAy(V&4f|#UuN=UZA3yerMcwHbSP7$j$12+*tscplUA}@5q1Y3X$W~D>$3bW} z$JW@=UfM4UmsfB_P)hyn9bbS7g9y@bg2vPXKE*GP@8 zJ{;W^1r*HHe~@f!-brNc7jw}b8VjydH8=|AG80yGV+T|i${4T8YjfALf z<(&#gTCONwPx|Lz`0V@69ji=`oime1d|a_fDo*3A&-Qp(dv7@L<_x_x_1DYmiQN&$Xz0BzxL!T^ z#tj#U#m{6UiR5mV)f=)-+aP*!!}}&pL4B3nygs^>Z&nQcVSfa8j_q;v({?q?ur!?1 z-Coc}jc<;`ZN&|G8Z0z?fsLL1(O~?p`-5!H3e)1`Iuv9B&~0C@kI^HTURWD?BLXe7 zy<0y_*6i9PFlX+1cO)gK?D&CO7dj1lQS?cL!avIOy(qX9{Z|PUM^!O2QxtdM^(6dm zbnlJW&ng`H7>%@W$VDQz6D)a3Y1{q6l%lbnP8F1;im-+b;roHy%h3X1_ zc&@*ZksExV1qU)n?xQSMfrmt(^@7jZ(*tk1Gk22}04%vLx0ZFRBdrqxn^5!aiS zQW2=kMDX#PvVV334pMm2Te| zmkd0(4D_3wv_}nk@epkX2br*O7iHjy$cFZ0(!GFM>?F6PZXR6W5ii*c9s_;1Z;zkg z@v|YCtBBI>IvH;GdE5dkAXtdnoi)}IOfxmc z^l>_k=1yp0dAqs@_8aT^Zz4QNcc+O%Yj=-RU@wcU;|H-TO}4O#^9iA)_EM13Rtzv! zpvCui!jSnC4ggs3I$T?9ik*9OTspRVO&rp5NfR0J#3(}RIR-LE#4kH zm0U~O#`(U=Iq=-P$%*un+_X}h{u^3*^Td2l-%hGXes!GG>7@8tjx?&DREh{2wop6H zP@1E|-LJ}&x$c1{2&u0ah5@M*S){2TnGJ;ZL))iy_6|PrOqMsdb1-N<#5uGUQ5P6v zGlYGI%?~zO%QSQ8O0n*CyHHOtB&q#U$*d&M26gBw0ijUco?llE0@S$&pLSGVwRWGM zWOX>>6s{OAJsYWW>xLXBEFCnQ9Nx2m6 zT3-F`)1#DlYs*jy+V-<+kd7;?cmIroM($kn!-)Zi~8UZ6iferzGz?bPz!*ZE?e&3?6bvs$$Ra8Hncvy zzMcEtTAJA=Jt5^8U>Ys3*+0Vcz~+yDwa>L=pz0qjob5Jpyf2!Tiel7@3mMqv>3wWI zR~>|K!x8K#Neddi(M~%ST6`C6Td4vjzK?tJ*|s;1v5w2eBCJ#}F#SY#o%Bek$jc5t zRCtybWWwN>=<$bZN|z1Vf;4jD5Xho7ymxnqi_%UJWM+R_@wlzYtaLLOS75g(M`WlR zU}+#3%{K72bLEe%{!g<*4-WAh*QmdAQSa5hJfOzi_itp&FH@xiOFK@UNz6HD1xH`1qNa zD!)*?9kbl-EH^!&R$$hLxW1O2h;+8BMTfj_g^N0n2I*b>jvuUe!cLZDDu>{Nrrp_g z90(RPWVXEY3BfdN$s)YSBcV{mvJ9z8i(vJ6xi7hIZW-Hc@aogfl*YUqc;+=v24_$E zFJ!2{DHb#Cr@F_fU|ERT;LOmr*)(BqHYHwqczKR~qaHOT8X8ED*5qrQA#;Vo&f2c&Cr+<-yn=s& z5t5k-EATI0zcUW(9oVzk-dyZz2K0k7xy`w*-upk0SXecg{&Y%Mf$(%b(#l7ykKl~sdT6M@ zs?c+WDDH6>YnKwayTpk}8ViOmu|xwoGm#sCG>;ncI=r|Icmt9FvIW;$ROP)=aVI

tkilN3|hvsCW?MZ;|RYKnf2bmb)xh~G)5YWz&F+KogD@t7-p8KNg2LzQlH$B zr$?5%Z!m<}%|Bj*@1GBo+F?#CkrTPRkoaYnGV1O)`C*-SbV;nmRex;d^lxn;BN1bE zd@1y!p1%|-vk|tK6n81tpNJqAIYmpZK5Q?e`BCr?CSnY7=28L)=tuVMyAE7#nF5?!Fil`io;?I5;ht~GLo?4O!~=f9SE^t{K+&2@d;roPkI-h5MTnEgPDJoLHt z$)$b!1!IEIvlyGTV>&{ydqm$b2X;#j7@iyV9G=be@UroGZ$NSjq=KOO;24>nz*CYk zg-_V_FFczu9wvD&12D$zG_)5CN^j@Hv6-<4ga4L_*kP=lZgcots$vNqZ8wnE6JELT zKLEB7$1~c%KF7tWYp2mHUAGAhgZFEuu<+8=H~pIWZf<;gouS)YiYO~KS(H!PD5;&* zF0sydEHSwuOWz?oLn2AdGpS462hHYwdUd_m5ueBD>rjS0;7~OGV}qRg+tO6kUqFRG z5mQww15bU2z&j`0CwF%aV47(RLus~m#4u258lE`4x?hB+$OL=;ojd|B1(Ezt`B`*Z_p8{k_*%pj(B-BVe|aUCeb@J%Y}nP`3LIngzSUeVUM z9Zc^E19B~+*QfDr4x>)yo{uO?v*)UvbvYVoRpd4vJC%z38q&}oa#{#@V8uq>3UmS< z&cnIm4u%a!BoFbfPAZ<0qw8^>epFT)u!!}-!#~T;JC8)*9Mrac3FzD}0GfS6Rqv*E z+wx1ibE?3W_D=O4W(18RHEeRZcTc;GdZ@B@3w*#c-jlmgyM7#FW88QtofOTp*(L~; zQDjQpxzo;FdFI$Jh-4WVnGr}uG2XmY(1th|W_5c!pEY{G*B00s%eU({zcXyAjGX{K zHJSX5w|dmLSFHvc1}hE16*m21LsgyT&ipP9qq8OV_2C=kDmbo!jRBrKe#%x`n$l{d z7^mPQ3P?_NYAk#c1vct^Jjk4w#)4QsUTgN~)O1`=?;`*nj50gbHS`Haw?6K}y)i5~ zt2%L$)9KbfC*)bZz_ss31y<@;W!R^odQn<=Tg#&BG)yp2-z!k!yNgFPO4?cQvJpt7 z#@*K1y8pJT;J`zgO8>%nsWS!O35q;}b68f_1)Kw}+ovxaUa$en;!cB?{VmFnVP1dX zyKu$iHPb=wnme}(FV?f?1`{|Z@(o7LPcgFEK8rm8qKiAnwFS*qg$ekV!J4`L8Fd4f z!E=+aJxlW*@P`{-eYwCCL$1g+#(t`nC95AuWO-6j$o)Zb{dQNOk-5b)qx!jdafp)k zn+UvQz&MX1FsQa>7c&i$r$e#oOuRlHB7Id+5xuxp=Zj=B(hEu_YjE=Yu0#ot=FXx` zHu+Jk+aP3yE-&~+fh46S$2dgsJSkMR^>Ceeh0+;yfIc|fz;deSMw+se_i~o1`IVw%$jC(jbFJUI!C|@iySbNIUG6L2067Q%(jjYUMj0=|1IV64kev8p#C%-vs z7@>ysg}e_fxxy&}W)k%3)lV^(3VtNVSL0C#me@(L#pXl$l{g?#%V8OD#gHMW#?O8j zEj}t;)ex_$lD6!77>%vvi!hn5U{C&|*T(3r==Xm|xB>W`(vJKxlmLZtqEgJrvNbn2 zttWu@5&)tjxVsDNdL77obRZY>0y6>K{M0%M9$gYF!(d!k)JT0p z;rz7fMJQ7r1VXd?VY=t$M0Y^=@iS%)zEs#-6uxOqGy8Q+dYR&amf3$@Aod>@G^v43 z`woFLWj-qNf|Ls^o9Lj=mL~Sf4GqX#*6jY<&8s?*gL|6{9&#f8+N%Z> zIT9gBKWqGpROt@1%fJ`$Wo)|ZklCDlMce;x{{jE$pR*GN3cu)M`B$;8)W4}2%a^hhORXDhAJnO>=zo^0HiD{Op?iUwMr*d*6=>@o=H(AN(6`w3@dN^6_ zeEl^ip}5>g95iz1c<{eghEXaA1VKqanEcWU7zdv$n4;jKFEgiqlA~z+(i1~vcXU>G z?X7(u4?{lJj1y|k^7{MwmfMb)qQXk^3-~2C_*z;?CnW0PSqsHgA0u3* z1WJ8(Fi@5$ai+H{bUR6YYHlXwnu?$1!pS;+in_<(>#$rh2Yk%|If7<_^(AWwpO9Sf~Py z^j2i?=sw-}k($z&$=C~>jNcEViDe|(DMzyrhQjH^_tnRrPHPRhw`?1)J(hWlk@djc z=+7wRzT3aToZ8MAK&X00hqcp>mA8gQfKXLMVdPAKt)Fb$*5+P%d-|A}3L-eY8h|YQ zw8J@a--K>N%+m+Y7WruTb8%L=EO$VFExl#NT+FwJKPyH@GKa3y)?Xym=4nYUJ zz7UZfgNsPR>0b2SFvB3sNkv{LHnH96vkA|xYDPko_ntK zS9}fdr103!ihXl$;L!R&aO9xBa!)k9$we^R2BFQgeU=e!me)c0z1mz^VtA@Zo!U1(3+8*jx0#6m)wS^mbuZ*>wv^EnarUFK^YP-bM7!rhK${> zkV>D_>}0z4bXvn|2w(rkaiu9@*u0~wvkBO8c-B4f7MJ7q4?QDxnVK+a|1>xqWONa6 z$85c|N#eO{W8hvICTNTA=X$aeKK@mzzc@S$tG<#0!I$$Q^83i#R2rvW_gOAeZ&Jv9 zJW@5=?2chftpMStyA53DDiUM#!r{_G$_FR9E*&U`vC? zFcVv!rP|hyZ3g9)eK2@YEK(h@0*?bY@v|XDh|Dr|^O~S)^=>2=AN^x8{Pi|Blz{0+ zuOm^-qhm%{6~6ttAit<@eToU-)yBLPQ^cNPRGljnJue?PvHQ$@hgUZL)f_K#!PP|TE-~B39f2)k&`((M0PW%f|i6GJK zlX&m#X>aCXIG9lEW^;w0E5$Eh-UJ$XIV8 z&H{y|Y*2_ha#=BfYA$o*t~hIigx#)E?g^Lt65yG$Jnd&l9X<#sm*uO>QU)uRlOq@5 zqjkFXL{*#U`G%=X_3G$=%3B$}e27EL^kq}NxiK%0rqCCWPM=qQkHWM>4Y68LiuG<) zn`BX9el}Teou~BL^oGDDvH`wloJMY8Rfs2&TTa$4TjVj4c8VD#`Dl)0Pb;8zBWU^C zOd1J2sBWGc5x`5eAHGytI$Wo$xLfT6*3_V1Y-za?FSlY8Cp8fk8A=S|eA=lEDSjX# z3czLL_k_DFmp_$oy8V^7M7bEhDul_l=AKyMR` zJvrw&T7>Ht#y4_&m4CR$CKqW})MlLroIC?wXW|=%M0T#zJ~uDZ3TSi=({~s`Ow?qC zzHsR#h7fnwyfO#)PA1#bcK2k37%sc0?z>&_6LtPYU(bd4BD3J} z8k-fHNc-4QGEdy@;&$grt|jBabC>bQWhStWV150-^~753A1~v3d$1cn5vnxOC5Ux7 zalrm!-Dbn>Toc?CHz9t$?lR4MnB~Vka>uS0)Kgv+MMmz?GpFOeU!B>mNFQ`mQpR7C5wy65DAptfq$^}G%nVI-X$~iTk?#UIt{#`B z-pp8ZmHsi=x}iKxA^`{t9#*;=9wY)ZfeJbG>(@TE;}RW^Jlt|j)oFg%fYU}cYr1R%)dSzO zE^RsLYhx?o)xp!Kmim;l#+n1z+E24D0_d?WOlt88LGY|;#zUixs-6sy$HpJsD~_1l z0wQVU#-kgS#n+cyC@eb!zp=vC{Plk5Q3sDZ$T$e$mf%Xzn9FBpzdOqre zUYfe+E%!n1a{ugfxhQbxhtaFCbZq<1ZTOvcnhxG9G^tGHa7caRaqzuL)Ac!btHZiI zVO|d!0=e}g6RmzOYrA;l=b?)c$?ei10QQ^OxvdAO4dgKFi!Tel^gML4ayO0Q;fEQ- z;)@n!IlRDmXb?5rROBfE$KkA=FAhVR80r-w?Y1p+<^tx8O;s6q{!*}DCB8jOdyYR+ z>E>qNBR+=buBUK9aobi76_v7-TL$p(;Rlsu-Ewm8wuC-7Oj@fDR~n_n^D#hEfQ4lK ztKT3bZ+~h_9Tb5pj{!gMp7JurMO&*@Z52p&|FE;u55u%zs(+3s1knA29u8YDs@c~* zsZJ+}0QBY#4@5r5M5Kct*nvT3;mS5GbeN(RBxx(z`E|qQ zzmmVx^fFTblZ?jTFz%;MSlFnvsB%yI&cc_Y=!IbBO7AX)P1At_vsDvx_=~=`tbA&W z`lFJ^>Gb81p(d_y8;!&8^CUnb0bfoC%E$PaW^qds?4qZ_^d8E2^99Pw4PjD2=K;LD zNh+F!Jaw&P%+sWB`ri;1j$2r(G;as4wC1~Ke7-fju?TD}R)ve5v3B&K3NEGrj5 zTxO2UHGLbVY^R*bDbWljN!-)@0*xO2IFUJ;6*bHP}jBz6a`F0LdH00NfrmK;dfuJBmKP9Qd6a~{>EN~@=yDJ zu0T_EFWUyO_T}};U|(3~?E^F!K}$C|Bi$4>I<4YQWT&tr&)*azz9e9|huqP>0uP=j z1_HOr>pyi+cyc+38ZBr^*MiJ}BA1}ir7DVmZeC#!D0Z!f7Z1~dT zjQ9Jqly2V{!1pg66Y_^us@_d2<{=&=;VdlAj*1enQ?;pm`;DI-y#DE{_)n+Yc zFOC0gImZuuTg?1I%i;ZsSg^xh0FwYTl>#;q12Z3mHQa19+`t;9dtIwtRt|e9Z~%5o zhH#XfRWTO!Cek+(-+J9VN5RdT-ou`*MYbmBc9y9J%VS$cU980ywxkSqcxeUBQ&+E ze=WIGfARGG(9SB4Up>CJP^rQkj@Bblp!$Ak#WA2!vsmXH5s_=X62gx$2uIt)Fm))o zNo}VNp!=@=^0(ahVBLy`Z_=2hQ$LXjQR?{K+<%^THQB#@?h^w2(Q(i!Xrs|&P@`-4 zk3hwE!^Q0Nw{Ue$0$8;K;UszRgHWjF2TYz%ExzBq2FbSm3ZuM}11!plCZ=u6eH?+n z(5C)*RIecqFGi(YKB${^^r(+k zF;t@q{paDg;3Sr)8`Nvn^t}Ihd_q<|>xV@(50m7&#Cv6PlT-Eblp!Q|8nn?7b=LUM z?0;!e(56P0;tnm2FMs|RQ)zE)?-YN`EAQ)<_>ZB4dwD0My_3U>|FILE@392jKTPlu zcEvycO4Y!xmIZ=jOCy5OzdC4j{N7bER8zG5pYi_{zdS`EG!{SHoUK4lozC-rIf;%( zZ6ph>bw}QqoE{^+e2whSe`W*`hLx76I=37t8YA9&xT9SE5sy%FEU&>qHh?wzlg|Gy z%l|S~MSd2zO4eSEj!ws~i@#q_8%ffKq~m8Dozqx=LLS^PqTcEiAKCg-zvHL=KNc`( zMjaGh59ENWrG}FnUi$wi{J#b%w%4j4@h8Eqb*O?tia!ZL+rJX%o!xfT1MZqrxv?gA=4SRpDfK z*n=g2Ig5$pla8YNK0qXdfE00IUYnX|^@-Y- zs!|$qr7E#T9RG?lRB-Ug_FeB~kLNI6PTn$lW_#}rW}B#S`3}8q>f}OU`$y9-YF7}w z2<$bZs5v@kzK*Z&`!6$DHe3ThX)ffr-(Tg$ycM9$i|95>^F&9BMly`YS^H9S;Tk3L zA=+3aiX(8(YAH0Pf$QB9rQdbiaq;{+ZbU4wE=QVf>MFeDQA)0nkvdzDMEor^)^LfF z#%vMp_bL+Y1vWY~shK=9DihRl7_&hm7PyMPu3?;WK`W5XH;noZmEo6U!m`RVx^EeA zxBs-dvTATcSm=10Rd|58Q1mKFP2E_MJ!E+*UZtGYGKEG86>qT$og~1G7C*f0n6_L` z>1F4Y`Joj((i}ZpNeRPDiz^5ZS=1Uqc1LB=o3tDfJPRI2>L5m{o5_VB2-A_4P~a@BFO(}u=QZ~G^+x4Sq6wP*w#@(X)WRja=kLw?`>Nh?|00tE8S7PHh^ocXU;F736@IhtaCL zLy5+tE?l^tq+@Uhl`Zv<77ylVZ?xozCjmt|NR%Ck>=QNI%zBwpH@RHtp71Hcj{R?M z^I&+CWRYCI`qXtYUzLoH<9urvF>#cdGI0iVLj5CJ!aqIv6Vz6eY`ir^n><5pUi z>>g^En?;IvHE>!Dgc1)#fl0H+sgNOQZ!8a_4&ilQ6h|CWv2K3;0^T;!IBUQ-820Di z%%BC#s>RoD=3q_W?o}kBz;H3lh(%RuS7L?M zzu@_DS6e?JgBN>qMgZrr3;n;h&_j7M)2-A zaI8*C5#f7XW7HsU&|K+Dm{lnVJSFx_7E#I7z^sZ|$&x2#6)G4Tb?=wA$dC2zIPGbNsbxA0AgdLmZ} zhkH;Xtsb8?F{_*ZP_rht)2o8d)*jk(F-Iqt0MsL<)h%^qvmLiOlbE!$ZrX&-GP|vf zp`&g+GyFKs$$S8=I~{d@QHYKOxc4I333Ym1W=A`j>To6~pigU2bm@hXL>FeVIy&8J7{$!a#J z$-VQUR^N}wW1wPd-6e-tuxhy$kR~2=mrqehN4Om}7bfg)K_0slN4?Z|HnY<3F zclo8zOEvfKN3(2;XoKuYiK>(pyl`nfEo8|{qlzh=vvsyJrQJm3qS~O-9LRc~lU|?H z@Nr`DA{StAiPQWcTlT}pO8m99Kd8-%%EB1iuO%`UESuVjRAsu&k_X@+C$Ex?NYu#> z``pl5qj8Nz0&(-jJE4q$?_Tku<-?^G6__T!m#oB5a*ch4&x+%!%(dODP9IZECAn+% zp)SLwW)5SFW8B$6Mq`9c)1OCyx)e|-1q5;~HNg@1#9vIK<=tLMUqdyO;!_4wh0c$C zcFBygJ@J*yO_4H~G*ig|G;93h|E4x5#?tsBki!HDO`O?{`wslt?aCNTk>VGtGYLrx z{Oku+1j(ezJvlKInReHvjU?Gkx*ef^&9oTt=<2dR?-l7n@Ou#)sYU_zG$_dG;8i>n z>nPf#Pz;l#J3!~~@E2AhrfVYP0O{fL@_^LM`Gwrk@+Nph6y zwl75MttQQYR=kE;Z56$AOFj2S2>ImxuQ!Nnp-y#RzLGI7$Aj41cJYf$)aK7=s!iC4 zNgGnkPq!Rp=E*hV`RMtpmiTBX5+W^vd z{*bjyN&|d{(gIs_0dCPPkMu%7`Qg;){e4XENKwk6Mi5cNN0{j@7wZq~38}A# z$RF7f;kAjpUXk%QpOR$`Bf1TthaeB*eY?S>YOocK!YzqRG*#0^y7;)NlknEG6>L7! zCCw5sC^c+Um~dA$(2_+F#^uh)$Z0LLi|ZL+A|fd2G;(ZdgdK61#B+j%emA{3%r%kJ zQ-Q*RT!QSn_)DLK)ID&bqF&2rbmY)5;-hr5bP|PMwOWl7WuE+;qIR=Z=WAqvGe1z;h*(qP=RO|x3H2;@q)ApX}=$s1j0QIXIpYT+quv%}k z(K_7LxcLC{j2bf1uZT$Kd+JyO$aKxqHaUZIvwieF?AXs+qhnL*G2$xRtjwvh`L0am zIpaUYlvFqLm^F-2jQ83ZdM;I_@kif$UI^X&q_CzJNlw)Rd2HRSM#Y!nJH<6?a3J7V zc*CJBZ9D!Njq=$n%KDGf>*1f^t(QXKh<$W%OOKnTkiM5kKXRw2{qr^!nZjH(;QsvEXV_gl3Ryc9yq@B8q8j&i#;<3Cipek)*$tA-%u zKSSx+5xHS|4DwnTmQi|QS=DB~xUV}t46&}ZyV$}}QN4)OSs8_Bgo zmwg?d_*}ddzC^VS1`7)_e`_YGc3KGB%ZjzP{bOpJZk07mN_IMJ`?_uj29BZkb5o-y zA|#N9Xq8QGW}D6Z*5b90Z#9DSceZrdh;vd&H4lrSQGf_KTf}cZkWb|FSraB!DkL6? z_)<6D$7f=Uy+ahYU@F;pLCMb~N}Yas9y~bh zfM4}#LsAVYy!ZuUWvbCogIIc=EMHm3+gj@v?u0#lk)8?{eM_%r2R|LH{S%4-!DXfu$}HV z0Yi_8M~LwI$Ayu+ca%!xd5_o?W8jng^(y%;!kJMTLf<8HzQqztGWiqA1v$!Ir&tkJ z=l7``tDJno=AWd2B4@qX^C1xWwV!@UZb6^zCY2;kp0MrxLfy?i#9Qs}0yT+Kt!PWX z^|ZP|7w97U&=X%T@}eb5go@4FQQZ*(67A{`ZRVvCo{ZeY#2mZ!Wl-2gojF*d-{)Q6 zl5}uy38iz09!KbufU&-^eEpQ5w4zi7u#m{MGck#;P1KD2k+1wdg=05XVEKi5q4>04 ziQdvZ{Q*3urX^r8G!2x1ijsOM4ABQiBis7#gwg)h(hFPsrr`!v#X(S`O2t&xWjBT3 zjhX>mwQQ8)8i*`eaBE+BXi%f~7TO z5NTiDr*>MBDET|`mHl|+h+}YvS(YH}YFa$&WO(zO8>KZrk))O-IiLydPts48pBH+J z&||Ae#tTsW3QAcH(%7M_hnDggMP>U{kB94}pXge=6QgGGPI7`@ea*#=NtVuVP~8V7 z82>iol3*Dk?D#eD0+A-q`Ftjrm)0L-Fq&S#fHE7fJ69?Ixr2hi$^21Uf#?AaNWbn9b?72~j?d-mrB z#?_tic(jO*F(UlXA$VS8H9~Ex-tWI}fq_0RkGhY0w#83W@;FG}I)05;j`1Ay>L_V( znhi(+^pr8fX$ElnilCZNFGkiTjm#i0nM?##(L1#UW^KyF^)4`YFC0Nz9=71|w6}Nu z`*f{Ji9;AK^8O+8NfY6vpfrRu`RqW4gToA&1k~GH&azR0u5(D^`*g#qOtLQsdX5{5 zuO{)8lJxBh%ZgIOczSPuSU-Ma`2Dq+J}ZwzvI#`sJILDn$03y!CzLx(?h(>z)9ezF z0>aPjzL^+n&bDZJGZPdNEtRlI@bg1ykvrRG92C5o+>mCl$%Izi>IZXeV5T;4GM^`F zERK>VY-4s25TnTFH$}=B-Mf&^cK3O}jrrkI^%R2P@)lKJe!jcyyr*0@)S2cz_F=Am znDLYgWAEcGS^CfNT=ln+NJ)d>ND(NX1+Bb`$+@e(y)!0R@PNgz2@vj`&!=~m?_{=u z@~F@ek`d5nRJjab$3AojY;0~2RU+#lgSEw5g^KTy(Tdn~NGXlUd_8*z=>qQUpLX8_ z>mepf=P^(}md8~S=eQ#-wtktXCl4jVzuaup^92>}Kf((ElxEk-xKqeDDDh8z=%MVy z2%wEx{j5uY59(Oeu+~e}xM2_d-dJ#~@%2;Ymw~||QM;bmd1s~KP^o$uE-6w#d-KtC zQAsq3O-ypAtWKPox_0lU#&NJ48954R$st|vY*{zER{1N&O67%5)bn_#*sL|B{Hj(s z!uofZZNU3+-k!vJFN%#Y6vH=4*|Q>J$fn-_i+g{&F|FI&7%@;6skprn`U3uy?kGg%bTa@Sx)u*_G;k-%ntBx zvjdWSK3zL)1K1Of&cq7U(~zqNL{X3~=I>%x3fg7ykC_dnCSV{<-_azJ%q8T<5Os5U zVQHyVCre*nf~I<$bld^o*8uz$EdVZIfB;X|NjqkZjaF{-F?jO_Xsj`kO13%hh+mhE z^QT-zWtD&PYRBRkWo{ck3ITo?PL|wx^&kBiTjz?85x(%cHb;Wj*3oBe>LfeaNRyUK zB~R%AvxxyTgc206AvP{yX?i8~qD4}51MMhnU|N~mxp%V>_03j5{}TBe8$#iSXc4MG zj>7l!Fh}m@o%zU_$0E#kjzCV*+Mi!g@kkxW)h%X*9e7jc-@T4cq3!Y9z-t%YHvWxa zuVjZ=&=d!z4C51;U13*EpQN7#z41upX5wyb;NVxVtFf4_rIEfN5LL+=CoSP*;ub2$ ze!N-#z%&M4dUc|ZDPaABuc9MByuJ<;o1-RpYWGLWMQYD$)7pr1C^g{6I2>MsP4NAl zGuyXu9UqDrpqi>Sg?jWmF`*H@+$ab2fw?UzgixJ(!d_>OU>_ulZ+ZManxo;2^}12T zX)t~(qXjMQ@O-BD^<7J3_3L<&d+-Yvic2b|55&?SU#MwR-S;G8W4DxxLm0}w=r?Dp z9G_6fSSh1U5sI5T!}nz8u3sN_mpTBiDXJ2kH6v+q-Z*P|0Nk>geF+6zJ={pT#VSXC zR6W%gq))($pWD{hPE_n^mm25~m;zdyncypcIv(^CdArAchaG*{eIFVS;?? zwT3Ch9CuT+dDG@BXiK?p_uOh!uBA1_`!hX?(qtq;`jMiISxN28qf6M!12(lN`q)|t zJ1UYg-_Bg!D+O}cg_y;?{0$8&F80I^s`5LbX!qv!nB@;YJ9|a;&h*+Hx`*4dJ{B?s z&2ym&r9Xis{mn2QK~eC{qw@=>rdJ))=%L!9}ir zUmELKLDP&9`N#3z)NuOpSx@pOAzaAGl0(J3!+5!ORPPW${Y7Zn{pr@$$h@15wd6eO zuiszPcJEX>tXnyPL$Tu($(*Oex&LW41azE-pPO(+R z0e)|qLL#*65DnQ z946`C7N2Wh>xF))luB*?s$|qItmf2$Dp#A`y@VF#r?)ZOelJ-(ODj1VR^13?;Y5&PCB0Ypw_>a){2%LnAQ`R@5cIonJstP8n&K&I~O}@BFI_dTS z%={~EFnS@{{``$XzgOPTB5ugWcVd`opju-q0yC`E#4a^1cM9!6>rXyCP_o4?QPiGb9RkWB6mbpuIhLvP6{A)h{H$q9@rB1 zpEbQQ7F_&{Y3j?iD-Kcu9JBAPaJ-#9&;iV}xbWr|k@XuS-I<;Q-I2K$_A}V+Wo*t_ z;wAwASq^6YvTq-*bEc)dEJ+}i-!hqvu9Ekoe^npEN;fHrZ|5?17knD{dRAd+vIY3N z317w^694fwflrcn0rD|Wdj{W4qlr)|r}I?`IMWZk75uljL{tfPZ>N(<{SN#PI;O}D zRSQU(H0R^kw=?3rf1ifUPXF67;)a!z-rDfC^^Zd1=MLrC5&9{#s}~OSy5Ee-MxDZd z4?o2Uwt5XoI21h>R`GG`^K_MKDuW6~p*;B98O3-C9;QnDJ8Z`@{_a|zwX47;`=z=y_GMO8yzm%?bE;R8pJN*XGx2lmGdFfI1| zrjz~2m1vLM!{o^FShK`regD1p(vwdWtemTuG*WBqV z^Uj?H3>+jHd+Ps3AAXpIzw#0uVvbJ>EwoUYamE?VgWA4%U|oznt4C6!4`flk_@4nF zgWJ4Xfz=cP8V$s{K_e;aRHEnpx*OXQCQL~0zWsK3b;O7?cI?>nb@YACIp)Aa+JJDpqL##?&{it)Jbvagi|lKlk*pR1G3b`~SZti^ zWyQlC2Mb!tLf&G(xA|wcBk@KZ@$tyUihnI6_JS9M+MpW9y9$>?|NTpdleBCqjO|!A zG)SU4=$|lQTzcV!;pxq{-b{-xx=5OBwmA@v0D!3XzeOXs1zX00NT$M*CdksgN|82l zE*}6*IdH@LFXE*p`Vq@A9*s{t0t{YDb<*h3W7CVnU&OuQ-8Abgv!}V|nMB2a3D>mhN=%9(_HvMHGD2k96WQC90J z_X4%#_N`SBvt(QU!yn~cvLe{aH5Ubf>rVDKX$fkJeZT*@jFwc!+4Ua_+_Pu*WQfoPP09AQv*mtt~Uboq@{)^HuKg_5VCYCKj z3iZjfFV|RB=oOYsuTI`cd15D1&K4i;a^=!Xye}BaBP`zr@@)nXWV=4>sEuTMr{dJ2s zDX|^v#lD2MVre7i(uEu(73?McA)vte-GpK>7OKWWF1E4{ZkZ#A)W`96siu9l3y%sz zU$RZoJr%T$Kj?nlI=<2WOokMFWB|pA3?s+|17HBON132WX!{YMXgm<jI2{7Hi>``;z6vTh^J zJ`-}5mNndXO7z_Czm;J8URU{06uLYdt@PGlws;T>mufB3+W*$?4KxT@5qICBR*!@@rdeTUqI;<& zRYI4mGnC(dvyfD%j46Vnfmk;;Ym$mvoK#c${h#CCr_Y~iJi3ilQ`fAUXT3gu?!N9^ zxM>4zAu%R3Pg-Pmg;#N&m+Ma{VyWHnE*rEd3l#N$Q4y03l<0MHfdi5{r#$uF>Iy44 z9YT8gnP=n-3LcWZ*%n)-y=03g=mFnXspym@*q16YaYa9<$TQE>q~$6DY#n<2+8lA6eYV-nZ?lx`Yvi7O>dAEWIp?MiMvY4Q?YD1Q za;c@;<4Z{W^ zXP%YTUT^Jm!2SnHD5zDe!PMsxi%dZ!*bmk5!p#}#y{VVMUN_D z&Q$j{esK&&6)JDaj2|~HO@+5)1`Hf%)Vfgnq)~IRBHBt8I4ZB!x^v}*+~O}^N=R0b z3~1|r?qwhCtIp5U=ugI^6HYuaU4Q-c>A2&LO{=Z8y1YRklQ%9@1RvV$GydbpPe>CH zyMfaTgzNTjA70n-uM`>65!OXQ=-?#gzoG?fg3u%CC6f;wYIf~P)jZ4MKJn^nucj-n zyfWQ-^DS7w?4O3Nwp!YG=bf<7HfM%qC7{GrdNoLxSqb)8W$M+wbAz1pq1pf7w#1)u zqPSrt=!u+C7l1mZ!Lm9Eu;kV(U}SxXzkGO7#iaYMS6(mnsYYyD1@<3!P!Gv|`Q=x! zK(SHU8)s1TJ^pH|Pw}ToPZUGH^lxRCb|zVNx0#IiQ(C?0&WmQN5#ngvGwVMB&3+ym z0Rum&`0HnKpYH!|Kie?BtZ2LbCvBfSVqcg%rWy-VTZ%UOp!qMg`(z&znRA(`wia!c zdZ|Hx4Q9>$gG3*c6}1wNw#c!{N*}*}W$&oNlITI!wD0VmzW#!J&!nC?R7+~X_EaYX zy!Jnef_;Gz$9ZjU)r$IJi4gB$rfML>M&XJwMVoz&zsitTQtrQy1dN(tML?wr2W&K| z_y=6kY&8)?7UrM8o$c|Lwt!JXFS8sO>W|i#E4%Jd)T)>aHbtcv$yQ@U<4+1@i+#~n zvcOSkW$}JRNtX@w6{gSDWBf(XnPvQoR@O|5ggy|p-{b3O35~D-ijczXg@;Pf7%l@WH7mX%4xg75w(675D}wj9XSMKZi*oF zm`(?f4pJU}X$x2x#ARCJ`UgvAn<*i-Y2Qfsp3<~0GQqyId$X^$ycgPE@ng;KJ3FZO-@wOVln&G>_^=bCL6gdpqF-yaGFUiIi36k;Qo6NMQ=?IkI!t6hmyjRKWJ zvVZZ5^d8;<*kVgAQ2bDtGOiG^LEx`d@@)iuHcb|drN%+#1t{HWvmcbu94B_ZQ3+20 zqQ)(@#JX72nyY2Wo3MP)O9Snuyn$KppMTJ*Ic2JS2arZAJVkW^wkEps-&mm7|7U5~ z`WvM~4m~(cJ#gyu=%bIN`yaeNEwacWY3ZewN`t2xEOK4;m71P>>Zx?@x#y%&?~h9R z?zdlB1~Q7Z%UuIxI@AXNsEejWw@SMGwtu8S0|ulemsvW^FvIk!hB$Vl^$W4~oqhKE zrS#d}^%D~!4;>6pCNzU-Ec1pi#X_$%(6$d2gQQ>n@|Wqh+iy>6tg%Mgc%u#R2Flmd zw1WrBD}4H;iYheLlzH^AN7G$*-IW$ybkVfdnrlVH*_OBmY&7XK2?jd%%(Ks?=bn2m zeQU{Yr3DvQuwdOWpZ^fhs%dC;S;%TB%wEx=RJh2Nj~^O*v*wv+p2nLZbEc)2S*9CH zO4h66&$j4^_61X8MVVz}^PkLVR+u2~V$#-u7MihztRNus^>1>G@lzeh>6a)j+NqlK zPd@o1opb^hD6UJ#9e;dUZPnG}Eu-8O0C}TiDl|4!fvX)iZUV;bsr20Q&!@H4S}V;m z^UNY6SuvYw5^~U~TDnP2Hel`=n&M2E?M6Y#RaWcz#9g}l{BIEF8)cd2} zPmewEXu9^Azos|dcq47I^|oo-ZMS8JbIQL86dkH4R~;4bbuS#CrN*Bgu>W=q!5MR} zfI;Ao`iT?O1TZz?uQbW4O!YOiqAC4C>AxBhgc#`dfxZ%N;WdCrl=#!C$%_NZu6&6N z?Z0)y1lm53A9&!wbdGHC^m5t+TRi<33l!RQIz_+A{im`|oxumK76NNbIi@5`snmQ= z1JiL4OIq1dBy>CWp8_pOWI=bmA`R_0J+dxwuL8C4A3bgb1v;SsWd}BsJ zAAB$|F(&$0%N-Q5#Y`hbPi1-lqb8`q;?MB_y$@zhvFe~bniPEuLR6sY!K7|Bh^29< zqM-oEod%ZpVUXTpAtK1cBuch@PP@35f+b&3RsT7TcG(ahiu&S1(Wx@XMPB?cJ3>Py zW6W47QwU-M8B4-hXvWH)pUXghU$kz@uiH#s9dK#ItS#n}c-rpZ4Ed{dNe=}m&sod2fH2M(eB+GXSAE4&^ zAwbQr<|y%78|D6!ndXC_&6Wtq4D{+dg$6hnh+uI+GQT}Z%=`NHN9b?t9?71H%k2(3 zwp{;k2My}WvChi8!ZPTys#Bs)r>R1;yrZTnV_hXnb4FwZTAsTZAkdN(lPYH%v5&gwY`Gca`zinxoAMx3e}8a1 z*bz}?2AW0igDlQ|3Wx}+{KxA@$jn&P@!(u>wlN?ozz7!!4XB`c&9qNp{u5pKuRf4i zYQa6XZ%h$oCIeC_s{K&6MJKDWcZXh#4g1uEf3U-Xp7#Ck4>JO}o>HFw0O%Zoj7&z8 za1|)q{{GO<-*EjAZ}-1!vzJ&9&W?#m+CbCibde`f{bL+7sw9vuS^g?2G6YZ;O2x7b zLhP3SRRyb}#%Us_dW|GSj+L`O5F5y3=xBn-Y&1%v-r@sQF^NdkP~R-Ff)O@t5S+2B zg!mH){uJ0_O?+ZolLAYfH6VEfH2z@7y4{NOQYEV#Ku&;XvBlGoSfCh%GblF40>uya z+!K;^QD=b``@zGe38E+B!TmV=N_{H)gn!^5{z_cJlr`!gM@w#l7fZK&jeixr97wUB z#d%Z=t$~0QIGbajIKZkbz;sJ4C4UG}4*ZgWEcD2i36aDvy3x;n-*ay|;Ai`#VZ%0% z1&TpaPp$Tu(~Ybw=)nzFe{BOM08c-|1&Z_1d+)xRe!B0^(z46ygKPCcX{Qc&e z)8Fur`a3JHn0CMzsckZ;lX3Zy{TEHvAG=57?$Hbr*m?d5p#aM(V8T!^eUsEf|LHgQ z{9{}#Yt9B2C{8@-#B{@TH>6{JeM}m<>QKBv(XYDx=#WGndOlA$w}rOcq-lKj-FMQT z|NQ6l=p&D$!w>&?nt%Qw8nOsQU61g=n1yS^RDf#j+z1O4 zen_^DKI z^9Qv&gr;0IxjqohRby)bcB_4OFZX{F&)^&dGAxvHNOAF#)`9i>Q;Dn;P2!l!cN>*- zc@K-V3t7cH2ff$vcbt?_7wb&gfA-Z{C=@5Tjg65M6mx(k%f8LEw);=GJBfwhh1}=z zSBEcSKaV%HGj)%@+Vv=yR{^eamV-W7{ii)i6hws=NH7t&g3@~ysyS|y5te3O#y&Op zz*{w{R=31S^Hoh&`IoiN&b0KOHq>(SK4$(nfVlAYhfA@HS0&+v=XI8_-|S-5HZM?= zdzk?!4&VI&5Sv-q7q#!IQRWwKm5f}`$ip>ihBAhr|LNZ!MRT$5K9U!@C{S@PUTO4NlTg{z;Ghy4j_X7J z-G0+H%|MM`|Ek?>G>%3Uvhz%kMbI6&j>z8`Bh%|Fk=(wu;1b8(=bwWGiOhpx!e7L` z8q#p0-n8$76h}oxV6(bUkUE+1S4AcF`(LG32TI?Kb7<(VkV2c);)V;@Z0UjND6n(? z1#0*le+`pHojfM>PYo2qE~yxoGis9=K$EghsEWH1mX`e5E^audmT}R{b@|YLAsMjc z{^K)JC3!<*9E#zl32nPRgR5}K!Or~`s%wEY^sRhmk{LC!Zpr43OJ*`Et=FFeSVCxW z?W-8C4kq>=O8%e!{ZWsO`uHXLkG^RirK8AVgjr`ogthzC=#ZmylZm2adIG)2`1fRA zbTMPn`4=t&(A1d1kaH3#NhGQa^vSYsb4@!(O+|P85%1$62GnF7Or=e(ecF?R`G<$l z5Vt9gTILyqMLW5L+Cs|m;606Ba{uy3yg`9q?r*-uW@)dzcm{=bnW8k*jI+?ea+e9K zQzzYf?|tdQ3oej{&Ie-qqo%{uA@Xj7t*lTjNDf!Vj~|!DeEMnX-+w@w8jBr%KxKwQ z7at_^py{z=#^Av3_u=ytX|`EsO#`sq(v)_fW%dEsiTLLS?Qq}L4?g@TjUMxH8a(~< zY4EhuK?aA|(j}ek(mjsKgmIsxk=TZbhhon;JGO1Y!~Og3yFcx_@4jihVZ+j4haQ@y z#sY;r*tY%3uBF2LUNPqOvN3sP}e{2gh>#Vb-fjH}ev0$7S3+le_{`=B-Sm^i$&S&`E_S<8x z`?)0+9$aE3;@pRiKKeL)GI}(&Qkpu=JmXC02M+S~@5*ufCvw8m_J?U4`aF2L=_ICn z_#0owpkDryF#)4_i)l^(@fq3XTWpaAV2h>S{O*J_@4WM9l%R)-EF*fezL zP&;Epm3J`5MvobbYjBh(o?*rr(lp%S3-d^p3gHuD_rmipq>C@UD82R0yXlZa4owTr zKR*tp7l&a5zqH2n&wryZW}Fi<;~bZ%c$Nw01^(p(o?rh$X8f5l1b%@g!$KD`9d&Cr z$(d0gsI{jXZ@3|yeA;Pg4aDK#gAZ0wLbDy!4mXvQ=|x{V zm0aeZvSTj1k||5YP|uZIE0hM5O){CJpQRx(F6gBqvtDfmX0GN+#ukcy!m0Fw_dg)p z>%H>o%W30HHcos0iQWRvax$VjZh!}Ud zM&tNrq3-?$)+Az~$cXQrPyr)D5ztl*lx(k!)>AOT9Z);tKp6woBsnvt#vBX?VQX8e z*&ntdUAYlsToz0|{w5_{SKy^06_IJVV%*wpH#hRZnxN~Q?<;S zw5rVpliTWViQIczjDlNgOLid`NuA*wQ z!Ib-;4Z&1AK}lB0$7p0mu4HbZ)b|eos7W(GB)e08{$xeMYYtb`F5(r+xviy|Ybt~} zco6SDRBH-ck6TiD7uDE>?zL)INo30NOi_|o9~25>?s^KvYAS?=Da*iI5g=PkKIFn} zwI4vP6C}acDyc4{z|7TLu_tUn4<%t=5g{)kLAT>7Qr!@_ARMZcmbnE9he?@3rd;QR zZo7o}X(lV(wHw)hD=}>Gz*Mq;eRV;()Rb=tY=qQmvXuxSp$8_aYRhPB16ICgDxEnQ z|C7HzM5XE#wNWqUID3x&wKh5ZFFL7rl1bnp`fPjB;KPUB#vg9tVVsKfW}6RD_3>TT z=bPK>N!)J^bcAP6aDjpk^|!#o;vf1JPgGLf92(k$2Z-#yAhC6`Qr>&-y;z{QD1B|A zuciYIJWwW{YhcO5$CveLwbDI*{<-JV)mLAY?z;OfY>)B*9$F4c%Pg~O+8Sp%EVcB~ zc&3g$=%H*3&Rlp5TPpqW;)~OhPd|kR@Drh;lLif%I(_Fm%cp(z{%M+R&ey@v6{TsPsd_ez?fBZB3 z;o?7}x88gM>ZeFE&oT?PCE7TBW08f^&v0(Uy6di=4m<1+_&H6w=Gtr1l~-Ju)?9P- z^xaLqi-Un@qpRtWM;=a>TzYAGMZVofXrPOD>u2zWd+lop;{BV#iPM z2F0?{x3Qm%P5-##AL!d3)61{C1P%DmLOvbjhT+T${sMoN*=7~m(MKNxAMe0{yrbn@ zg+WtKgSP`#PTOy{9TtkdmOl94gY>U|-I=cZ^PkeoFTI>TpEyCb+L`w&^QO%<+bnI4 ze~hWk)AZp7AErw#yCmIy>+R_+{Cd5Eg_QvV`r#n!uczGDf2PaEOa&)-=18|jQQPM7VQI@sWS(7^s_BW&BU z>-Tq0Gfh83y7SIE(}fpan4W*`1=*f!>cP{d8A1R4?z^WAHrPNOY;)VQKjXZBd$7nh zdh{pa@L+6}wbF_!rX6?OF)gsb0-#~;;u^f~-uu(B#~+tA-E@<*9NNi zOP_uAnJlQSzQ)kB!}dGiI`~?8_|ZqxeBr`r22&W;asreJK6fMA`kuMZihbZ#hc$>z^H~KB&NsAj1k ztKz{t&;8)|Kl7jeq|0zE@>lF114j?|2GX*)zPH|bo3zw2*y;>OjyvBv;5EvFqqu-E z1mm#xPxelWF1{GfFnJ@Lf5G{l@EG}I*)&ICh0>|BE*z1>}2kA<2uPCHZX4L>{J z00DHxm(+&tN^_6)-Cjh;dgzPkpzRu=I)Vx9v#VwaroVbz_dkMSy;vwUEGXfw?9W9C z-Q$sbM4RP1 zOcNs)l_@R$7F65#JI>33u4{$wz7$G9f`@_4i`HONbF7E)Z_`wCYOP>m{|hza-G&h= zgbxmgKK~-$>t1G^s1zFe&whE&3PE^N?Cji6ipQ$e?=V?0 zIS&vY|B8)9j0t(Jx$W~m%I3NYWXa5dvNCt$_yh zcyt53#0N;TuR%+$x(4@w_IvyLL!7PtLUs*qA6DT<^ZQpo^l`=b541%_h zXdnNhkXXj+1IVUM*AmCy`|rHUhc03uVEpCg2vWRE;*bS&t6J|r{dBCCB2YD4g%NH1 zX*}Aerg)#AR8tMxOwI9koR?jKDux^g0eSq}W^wID)~V7qUkla8s!#$j+YObj{#TM2 z7^zyk?Y6y?VFr`@OcO~GK#~5VY8Yo}*(E_Ch$&cgDu#7Wl~ibW)ytmk zXN0S4l>ECDKl~D-@W*-|eaHe(75i3c?zR6FGOP27bLBA?VyLm`9^+u5dyK$re*U}u z_R_6#6x`ON|Ne;mk9ym_e@1?(7i(_0P#AwC7-{{OFTm_`CZYfS{iE&kzt|T8$b5u# z%(d01H~q-QR9n@w!C~R5-GZXqas?NWXrFkrgAZn+QslB8e+GtQ#Kgk+LTJsR&GH?l zK!XpxD0BPX|7g1xF)>p-V2ysaMGia+>=*s}qc}ja64wdxAAvvUb+vZP5X>~p+xU0G8&a1u;0A@xE3#I&+A@B z8o6q}+cS~m#EwCYKES?T{}JYPymZZ8lFp=RoA${GlA%hDr;%SR5w$d%0zz-0Y!T@nl&_qOHP#jE zD{-Y>x}cQBU+TW+-~UeMop)XuJ$7_j;oB?V*Y)$J;V%xyfu{G%!UG>Vu8%hs_)F;f zVe2<|!2KD1;lCyxTralBqNsl+5B#a;YhV9bI{U11aCr4J;=^^9O>n=dVahny4e0RApKz<%z97v$INyY9NH{BnLA9yZ@`#~s)@=>)O8-g@h$ui}CK zsF5Sn9aywrzaGcg2y3siE*2;blLd+^F25pO^oI+v?bG_$Ykr$F#~icc+<|A(d9eHV zlTW5)ms=)%XNBeEm-3H2{+RG4U}0s@z(MJ!SnOC13lw9=j!pl%^PlOsV~tFvW57ai^WD{AWm>v4>z~aOWIEa`J$mg4Hep&QbeDTHc;QU)Skhx#F z_HWmwKm6ej(x)}oSPPi1q!A-Vq}y=@#HbJ6$3ynb@u2)iGPazj2OO|}dhnr#(lU5h zyuu1Ah!3y6`fB>;KmV0R;>?m=ciu&Qam)poM;?ASop=8E^47xch}+QBSI4N9G0k<3 zwg1rv_9j^9n11kd>G)qCm*$ygUV*jEEs9|RraxhE<bp)wL%a@`I3U*xTjRaRa#ErbWu z%YAzpymhj0y7!)Y(uIi8%P+l@zWL2>rWL-k0v5bJPY*x*ki03gz=HGR+>Bpgd!hO9 z0rGje6N_hu9(-6@X{GO^|6txMvDjj1QJgz60leF9yDhzfg`h3B-ZE{8Yir`fiRnJP zZFBt%*QMdZU&8n-fqAeT79tixe3wuE`OkBB1LqK#hrj*pZ!tgX2j}d+UPG-HkVjiW z<(Z9uhCd=I6+|I1@KV&bO(6=S*dIQ8c)A#Ix(5r9OMG()S-csD1+n4(eGzAb+$jq* zJK!4K0*i?sW0B`%9+Hj4jAQu*#p*+4KJpxnW6*vB3x2Dvwn|#^n@gsCQ}#;_J@P0P z3?EFh&NfThd6!+VCDdx^*=L@WvAp-bd*zLop{uWvhF}c7v*HR8mILvi|J4z%rM1^y zJ1w{La`MoEi?4DhH5T1?)(IB@xiIp#zg?65c*&({!66H#op#w-&NI;fq5p`n-}p7w zIfBCba`D$l-gx5;SVTJ$zOIQyw*Abs{n-X{`97XaTjdLkzmHuOr7B+$5T+YpiG(@O ze*J;(Pz{3OL!Q}zma9=5#M%ZWjR+(0OHno|5M*-is5+ps6vw}6T4##-_ z0Dc7v&YL7zpr|mWnXk}g(-lB8PmcYJZC0x8owEA?06+jqL_t*fmzmlDHktH8m=$Sy z7ST@2rOT2~i%H<(oQw5*uclm{L{N~8r&*6l=~1NCJ&rIfhjOEBJ+>+;$7B2 zIgv4JW>|$}Perj)P^uh&qs9k1%mh!j?* zk1}@VL(AxxZzR9!H z)A&2vN>OhnD^sqAWoEEJ+boorzkevTKXk2ogX>fnd=?f#0>&?zsn( zVV@#dJ1V(OhR*{bI)^0^Vo;Brl9dOHbkKVlcofCICMVO_FED!+xBf#Q$3F{OWitC; zc}EpYEUWvE8!@$n; zZ;qpZ|A&8nWV6{1tDuk4e`#hW;Hb07^#APd55$PJ>%mi>kVkJb(JSsd+j0>oqsZ9z zClGp24+fZ7mVWf4OLbQ{v7WLHC$KbS=;HrBe}9V51#L(CZS7Fmx23CatD zQ1qiF!y~cHz``fVQ*ZI78QOPjEX4p7>F0F5j|-;E9~Ex+@CEE=4PI%Tf6ioh?E7~n z<%-76`_kSyaKV5`@BQc4*cMOL@2+SCvw&F33zTyIaAz`DE0QB~C-@NW7r!_%y+3kP z+F~obL9xf4^uPe9Y>m$R~K9WZ&2)m1&Za(g_qx?2OoJT9em(H zX|+)1EkS91b8pMYePrhpn)#L~Lkd`Q^Wzw%KM|JoKMO9_Ze4 z@4wTTXPhNl-t77PAEa;LA%4GpQ_0&VufF<9I{H_~rIGKylg>H&>@@$7`O<5zy_WX) z!Jdh4R~&$czw^#FFXq9N*g|J~dgRf^(~URYC=ZDF(0`t}=Rqt#z#i_eV!NMlX~>Wv z*n()=Z=D(EZ}{_nU8$U;pon^D$(rAzTl9gXh*; zZ%Y?lcz#-Ey`$f6754TpX7kZim`aou$|g8~Z@YYZJKTRa_#ZJO>v{C3%S zr!;K6_0!C=&Mdc{vE#<3OE0-BUH7-YrJ<{>o_0oDXPsqcIqT)zbI!tu%MdtEPjn7&-uNN-#~pR%W;3?T%aielzCFdTCXheZei&-hT5DUrSTrkm1fXPl8%M;!Lw ze}4smt>et58?~AZlKVg|lRl=|NSI(f=c%zNd z-g|S4CwmCw0%jBBEUP9Jkn8TGst&pOK?2=2DQjf{Don+wcT4G-dx*7VST9gk59zWh zRycR{G@VRVCD9}(fg7q76m;#X0{jQH6peI|5r{JUk6)Dv8mqVkZc8*Vi>gLJNl{=L#Veg><^jhw>v6>>Yu9Vd*>CC~ zi%H@}4yztSHk3jLQ)>&vIHnFPK0vQEs5#6AfyN=PEY!wat$T;I0lYw2#$4HIRssIK z-{5MSxf7&BW}W#We}9xd7xgsI*S|lqGxUXW=#Y(uC{#^OF~_aQ{SZkTQj=nz{2{uj z&atYe8MxjFo0p=(GB>4uk_ca%=w&N5+<#Q6Oz%oAG~9k5g(|dE7f$iR?6XjO%t1pl z&XfH4A^0#-6k}qzMMeFqf7jkq|3y}^di+gQ4m0IFDW=Un zJJA(RH(^ROF2O>mx8Zc$319{TTMZl7iTPC+Sj4}bPnDa(-R!H_V7 zxMc}PYgxAxBs6CTDKxC=X2{lvI|d~OC@1*PxL*NaT>DOomP-dQ8)s6COVw!kbaEd^ zV^VEe%M;t<0B-ckb(&_kAbKP-h8B(j2l{&Y`=i3TNLBiEObd?AS9a~6%L!63loBzP zp)-V%u5mHJ?%p9c;r%nCQfL@?tx=lc!)XCwT{J%fEOQHZ-ZNhwYmr*L-2^7FT#sCbzwgXoI zm65;=RRASr5-P5bHZonq5u!~o<@%U;98_awTQh5Nu?%A+aM3`E6VZtAex)QwH(F?N zzT|o6Fi}J+HsLd!G)-~*#V}*X#G(bSB$bjnG6}a~o#rO}4ktPdBX!GaQc&XN-F2}1 z-ldK)*ZY!(P5nh2;82ZqB~WTr_aWNP5p-hXewFRO5N*2$4tJgEhk9AAQo^fF2)vfH zmJ?YbRnu81$cyIew}1s;{UnFgp8H?kpom5rK{A!rZ8vP*xpD`gbX6p2U2}PZf?GU| z;u{n=B>RVcNH+N1OG246zSa>XG5-|jLp+LK?GGRRU+~biiBqJPUU?}!_}~L+7Cfk4 z6A$D?1?2f_`;}H)Da|%Jw z2R(}~u^4{UKYjWTZxnF*qG_j{I?XZroKzu+Zy@l2`z4oMnhrYnkhBRNKJnq@b=O~q zU)-OGg@ozSp@$rT2ggg|O$K%je!nyv50!tPUVQO|w9}3|r42XQP<~Z^0)7EL!}K$x zUmSS^9@fvFV=)?g@1J_wsaO=aDy_TD+VbFg8vK&{@+oI!^7-tv0W5x9eeE2 zc$oT7+UtjVrHwb_ivm6Z=9`)~ucojK*?Q?VfQLoA+bpWee+7~k7|2b?!C zApPz)_%%PywD1^xhDC@m*p7$WK~0M@2l%V>iFjjxTSjr5`c2ti9#+STKu5QXF=ydh zZ@ry%*kK3x1^c0Rb71y4=76}*Ww_^XFG2bf7GO?3_0+T>76J~%8x%Zef^Sg#=6AnM z-@scfXW~tY=`l{8-XHaT+G@+K(g19ybT$^H=EIvf+`8(ZLk>!By#A)VC9=vYtBNsh zGjzcP=i@Ae&(rt6kHsmxxxI{gf9_pFXLRYs)?&Ocw?TG?*9?PdER^%c>A z9Ui*Qn`Qt0JSZC12d8D|zxT^!d<+%RBT!UOQNHnzB5)dDDzaXSUp~YS1xkK97HiJO z0_4my&zyF{_E>y7!AEZVXXDeTk3PVnPyaOIOfzDE;$u0uopbD%V~9yz z#v2#=?1QbI7Fif$aBdnjU|{YJ?*ZImYHHjsronkDJT#qevP{HW<${}RH6@=+-s>z{ zDq!U=A|VE)>NCCm=nf%`Rcyod54;6*3i`Y_7GjUe^To#_d*T%<-Ab`rY6@|ILYErI zek)fFaZDUBpA#mFNtpk@adlp*P;0Ib=1kp_w)=Co#7cc|xf1pqw8^sX^O^dJV5yPL zR45PN42pAb?eYzZO|bZ!w|MIDX_{;B*txc%QxPSvIMP6t2ZaPC9S@e$i!Pj^VlEI6 zwDeXtqKo4Lk{M+@a?N>OMzxK)zAQ{)jK70?mPJLV=l&B{cwXze|G3YtHk6bpfBpo9 zRb6M{1d3FVnLn8$B#d$Nvj-nk#`{^UcrEM9{mfBTi0c)bIuo)#;K}R6uB@eW`yT(M zC&fO6+4AJ$l?|8g#4hBei+Jfiqpgq{7=5TYRM6;ujtg`FEVpEbWCGS}6Cct~h36 zf#WVV%b?rGSRrb~=U{;4QFDB-Vz6RFy+qruyQtP-K{<8OW{zAdruM4&m^=ED#eLp6 zp;=ZwP`?AbOK96)y$uck#cM?ugw~M9{`+MbY!-7K)uv8@vo@K^McnB=C$wE8T-5z2 ze9_@eE7b#XS{y|j^LGjnX66c%5XU2PyyD}~kjYKpG*mrbD5}a8cH5^v$_=CFa4D;X zIvsL_Ah+>$mc}LMj80dvQvJmXXlOO*M7N-&hA8q%z0ygzfLV!S74ZTppL>NxMo5Jr zH6N!;&XwGiA`G6e-G^>$*XMr`4mFjFd0<-AM?ptfzg?L<_c?f&JwZjj_xp9&tvFNE z0-M&KtVAKP*^LEP#8ft8y@K*f{rw^GG~o*AQ>D%`CiQdnow@n9)l2?SX$^WS|?ZJMjDI`886fMiA)k6d$} z{~cDVtXeAL0u>95+DqjE>-`cjW=uWLf91F5D&4j0kilX?T3++gA*G#bsC3&# zrOL3MdGsno)|sg}%1Uv)V$-5v@yYjLm+e;8b^SFVb(<|uVg6@(uupR3liw#?t}a!7 z(@K%4Ih6JOJI71T7WN;>D_)Y8iPFeEDx?A$T778am=#mx{+I;{Fi9L(77e8)2=PJh znOz(Ml-_1S)ekpD?xg2`IfEi=bHlF4ysj=V1s5a(!<2idfJXi_-U>JpZ%}-Q2f3SV ziZ>|m&^uqa8C$d}$v#-@=Ro}RJRerxj0eRGGt&e-XytzKQ%%_q2j@-$m5_nIvtz-6 z2g7n(7*DQUW7fgL%oF8Yh^_DobDoPZEB5L4e(+b(zkzro0OwG^=y?3H z|NW8gVPWJI93=fg`V?n3e1?S+Zb5VRU3aDZ_uF3%WbTg#^V|mMtTWF_i{hc^Uf9xT z$O1z&R&eBvH($pB#kqJ(;E}ZB4m+kTx7;$_cIz$as9zm}g^X{?HctcaHit|j^yf1i zntkoHf5R{Me~kr-b<&}SA1=Slz5)vr7hG_GoD;F#Hd|v6=d;9Zd)|Kg?X=gA_QJ2H zSNHBqw;#d6#c8LVj)SN_lS8tX|Ms_y-@(D$QyU6EeJ zTMMV2a#~sjTNL%1Vybin-Z0?yQY+%E6FDRsZ#TRJ*-J0IEZun1jcK)2R+G06*kM^U zgmpd?e*x!xoQ=htb=O-r{ru-gAf%o2Gc060igOdbzR=gw8rTwQX`BbZt&OI}0*u~3 zpalQxxT(7`mi2%Npxi2G7dKKYXq<<$B)HWM-&EKdZ|@AAZhGj;!q$HlPQw;YSfDr+ z2Mo&=Plq0!KElKATQKiVKJApW>WZtRqjAOoALh_4%5mGQVZ+wLT>Mnd9T~E~5FEz* z0TvYw#sbCbaz4t+tKhs0pm`R_1sD82&4`EYn_%m#Ip@G)mvx95N51!Nx*TuDy#3}o z>CCfFPebNgQ05o67h2+5i{mYj<3-j(G8(=ee)!MR6Hh#jU;H10w`tbHSua?iz_oG* zw$<7WXM=F7vfMjVHPC9K>zPai!HjCZ0WW1QcI<|ajwHOg9Z_Vq;el44kw*(61KLoEuMxB#aS$m zq$7?v0&id}h%MmoHU#FC-gi{yMJyDai*4Cn8a_Pjvg^)r9o>%u;5qhFV}au5T%eeL z0a)my5wDF%`|ZD9dh3lh)6&=)ZJo8(!Tn`%c|)lm7D?!b%s*8vp2aMarIK5a(dcY?>+`$7V*_TF1wHuD+U z8y0p!mV2()pa&qdbh=1Wx3!{hp%Ne|%ZMYn`KJ;=&V5w{StFZ{(n@X_ zQW0ey0}qIQ{)pNj5~@(=qaLY2tyMEuDn+Uw6`>qb8GZaQVn6N6{(zHpiLkf46k-!3 z-)2DcF0-2()-5Fj{%ySq;UArKsb1{qv} zI|LcrHNfER!Cl6l`QP_?&fD_=&RJ_-^=hf=?y2h9`?`J`0lQW%lk^l~nlPz~v$aPf zhZ5mZ%?1a&XPnwo^=c;cPIKJ9vy~wb-1~C?0=kkC{P04h*dRGq51H_41KEV>7s`(# z3E)wZm6<{l5oo{duPFZ*W}TK0aL0?U>gS}=>857IQhvYF9R*vczyEdMT>5nbFYWsL zhlxF!)9ZAp%kNz{b^e`@^fU!P$y_-R;WhzYwX6F}1Ee6jC@7VV_GhBqVbJ&AkD(l^ zf5k2pf*iA2x0IRMFbDC{g_Qqr#Yq&-<6 zvTs96va;Tapu*k!i1%xaK!}pAm2m29((A2){0hj>vCMIJg3YRPSn>~&kYTT83qVqs zqOF(aud)@MFqO$ZbTb=1fo!7(-5oZspgPcy$+z)zLgdGJgXj7Mxm>LyzhS7wx3ZQ| zHF7z*`F9%;rvwTtJJeW%W}uLIqlmnI<`_()UEq@B9xf}SqCQto&?$!DL!Cc_pki4MXVfYo_tOU2&A5a+*;{ zF%0XZRh-+c7iC9C$rWuSB53wsrm3&pb#BD#X16R$vU<2LRd<&WJ>%=yy$%nw?_B9h z?DmRbT=s;wj~dugl<$6?#%QMpZh>XO-Ys*H&Bd&y9kaH4E9V;9^GXQ;c_Rz64aQ|% z7eCB$snRW7#j^`CeoMAO%PD}6zIgl9W)6?gyUE`eO#tg?I)Sd^jt!+2PABsDBDD@s!<+qMSrZ@^~RwT)bq6){4l^X3l-HdZ{TWCmg!|!3PiIk z%Q}Z>>+$aCaZ|?nhkT%XJPYzXa_7wr-Dh(~_ttqD)$a-udD;A03^M`zdtZL!s5V67 zwhqmP$T>rl3=3l;aAm?_VXZ5n&sQX+1?Qiceazis9<(^!tk;-)p=gK>C>cj_jom!l zlSL_N-y*Y~=1+iHdG_puiY{0*aG@r>Cjxbmo|=Y9Nhz*`$cOfmR;OPF9M>5@7217J zI94Hnbg;~%Z+G~$OS7uTRu4iOEGb^qaTSfysY)=k_Zq|xwUJALw%s|^D?5eh&gg#I zecY72Qy@n$;PsIyuef$LQOkuAHD_+uwB5_a>Z8()zmZy!cQ0rIK{?V-5AxaRwSGpS z5pMGLFR1Te!GCV9)Mm%}0LdbH4_`;PiZJotd=vR>MiNq*6UyHF?MZljID_5v2S)UN zk@Bjj2I8(3c)X~8;MolhzChO-tf&q5Sv&(y%52*V4@`BQj>|6uoor$wGXz~2eO}D1 zYbYIO1`gN+In^+=^HmbxXAr%Et@eS>U!0m%s!!eXLaZgr=+%L-dSqYp@oq!!*sn1_ zH8{GeT~qw>+YKYk=rz_iFXNK7gIrv2S_}Fgg+|j{V}l&j_YZ{bJL)4S@rf7}RUP9J zDV?orZJ@QsqUo29JjML($@nDp$}apuBSx*JRg#MrnRv4mNmn9|FU=a)y@Z8j8C0!T z`@Sato?cO%JPM;)%!NO_u8sbUZ&-x< z91I|gF7uQ-G)6FSc}cmN*?!j@r5p2|wHl(}Az8{!#4+tPtVoJz8=?)Oj`0Expxq`! z<9D_3&#DU)xdn$iTWZ^SQ+o#hc=3HEiFED z@X#Xx&EDTQ#E>|igT%h;dO$HJ_1{C|=z z?{3Ky$^`ZlYq9^%z}r4%WktW;r3@P@{>EQSad~FKo)*ab65zcH04B15(DIQFpL9%{ zRnE@9U-Nq-jVJQxxIlDb9Vq)+vERC5TFJ)5`+$T6nUo*W+7^K(rd5hYr<|pM6d1Qp zH763Uithn3MQr+kUlm5o5-yE3aou*d)fYD?(tmL zNQA3%SwA?(i(9tqC@r-6`SOkCN=O^R6DD8jwDLPA00A3*=KBKgvf_x^0ELw@k4EDr zl4JK~wnZ;vt%;2>B9vswc7ki(J9YLjnTckE@Ccj)6-E}M8nom>-rB#D)hXU^FkJu%$hBJIC!KZq zQM}GqpCC={+$w(W1rb6>1JI`WmZAlChR5SnZWvKyP!lM8{z}!pi)kuVFR;3J@2$s55rbgw9w}AVtraS`j z?4}^_x`SU9Yk<{vQV_7xkELn4amR(nNh}AbI-B$?UYfp*L)_%_ioLuejJ1j9>OPY4 zyeBkQ%du^D;-Q^jqF%WcwG4e+6|CzT2L^=qPDDMe4wEwX1>`te8re&i#or)&I+<;r zaet&njov7icnVkUU4Ng~LRwBS(bDsgreEbBYj^k`-|x_^-*5MvqI`8x0;K-RcZki$ zbT*URW|w%)s4|`7;AbIRcc6`cA(~l5Di>62O~A3GH{DhQK3c)E6Ze&4R_3xBm1H|e zX?e3x$NyonBqIti1pvb1M6>UWs3kiA%Z@QzpMa7k`;XXmC|3vmzNsGVtI3GSo!s?) z`D3rip63{o1dG^rk7QUt!>KBR&%J$6V$JU73J;$}Q6ePCSC0ouFlsl7%FNJ!-K6@Q z@jZRBc#P^$E+^HvbH?u35HxZ&)?gMb2n_^ zrIXg$cT0PYy>kt}xsz~{AL6J#o!8Nl3XdpD@_g6*86^;g7KVKc#P}ll{36y<~!AnNqoovM)dUI0kMR`n#J*E_X*!oy&8{-JedLE%GZ#L|Dv!= z>(mUmBpfw=qBB}4SIlz~xN>cCe3}KlX4RlfvdVoR<#ybG>(o?V5)dupB%)F^f`?r?KOV}M;9+nZco0$#@WekDw1&2 ztVz!LJ&L2_KKQ#r^und$xP-s_(ZE*UEU?c1{%v*n3@BFPdQ!&iiElk+TlkYn(MN_| zPZ%9-$gZWEkYz}G6GmQYjj-s_XhP{NU!-#(23^+03l%zZ{LEZ^UeZes361rzU^FaL-F>uC* zMw3l^i+G~8l1dPbEL!xYDL5>nT!cw)W52UNTeY=-Oll2 zRC`_OB^PAyco8yGa!*rv`PBuxdc)~ z(#Ss35iZb}ig2l8y;Bw{HN2%l|NYl{YnuB`sG4H?IVW3A4Vr?ra7f(okn%&Ip_8J{tQz8cM3)W&5DmZgQT@5 zcVFERJIbjKPSo%J>QcBfs{T#dizHnZ6ga`U^@jz0vUF5OA#+}+F)oKo4%h0QSQ}Kg zrj=DrvnI5S1fW38?Vo>RqU}l>77rJ`6U8tNcV=OoJ12$w?H!a)5s;aTBu>^t~Mwah+D5 zyghr*Mq0Ma(;S9M=7CGXpzd8L&l4{O_#nuc9er(7cm3&CT^X;vH!A=g&+uAX!5bFz z3;AeO{AnwBNr&La@*X|m-z)+QhXtq0@MQLc`Xwo=kpr@f(eHI9WlxHv$&?x_4Z7r{ zdGLc<^>~WZ1kXL(X7`sw0rKC!Z(|TPqy=m#!)RXs-Q+P|K0-~>zm};VbMp*(AqS3p z-K``|)=jVZou(Ltz5{Zpwm8Hxmp@~MnA9c(UL+;hE zsQMC)ni170uXm102VeWvDS7(=)#E(bp|3#{Z_{4XZy?ul(ZCvCF z&GxB?`lcf|6e{uZ)l3O_gi2z0;5^{A;-T6_f5!M|7&);)_b=_f%dXv3Pwt)6>kS6_ z`~3$rgFLQ(lxZ)c@AT(Oe=8as`9=uKHP&*^Y5f$jH~RaaQn9Mc}se<;^fCqWo-7( z($h->GcE!58+1kdZNC?N+s)XC#AHbbdFz8|*clrbM32BSjug$&9lt+AHSwNd=`cPJ zCMp=t;IN%@k@AvaLo%WcuQ%wlUwSGSJAZJ@`f`b)fZoP`;3P7a=bJhMu+g$dhj)Blluxo^aA`Jb%tj7ReUZ-#4hMZO`Djv|2 z*VQkkQ}TCJsc69b_DF%MjA1EgfSV}Dgvj( zQX%LMcot;Qo>E*(j%a+iz0tj}J1Mo=OMlV_T*&mC+y@?yCE@VeBlKY7v%)2VvOZk@KQ_cni8+{Qle6qKh31wmNzj+@dcUOrB}u|i zG&`ki#+80`q4|_2%mJJ9E&U~qlJnCq0<6<5E#}um&c*#Ac*2io?;H+te8X%M|172> z!v6#1?h;x)4SXQ6lEx&+lzBR2l1vOGxNJwmmGj%KiYwmRx|GA^`t~5S55iABypXQ% z;n4(b_uO&#=Y`DTi4;>ILw|A-h4{^$mmFackOhxN)BV1{;ry8GyUe!O><~ho98xB^ z*-3#V4Dx+C@)W&8$UqHABF~905`#0SGsYi9tzS0#_lgl>?WiZC7%?<%GSnHLO(}3m z4i8b5szmPoW|Qm9?C3SW4KA+M?y#57Y-y_nGJ)AU?%^04=3C^UX86+EL=i1-g*@q7>hStSFH(#e_!^x02XiG6%QK z(N#dQF7=es22B%2ViMy!fF z4O~l4N|75&cB1e!*v4O6zR1s z!vlbq%vZMacTuy#=81Y

Z(Er-5cRYsXQI^p2Pn8($hDw1llSdPJ-H$YV(`1e1TBpDMs}Pd zm#z;3M{pZan1$$y~k-oW(c*ZTRu9_DgCR6ff0<8ns2^B<~Sy$fUQmkBFOs=qX(w#+yvuLMPY?xq4 z56VQ(Vj;$4a?H^eA}28&%&!V!3Fr#KAb}Lx^^bnwMV>xml^LAQi=dLm_m$E}7fB59 z5p}rjB=8nqYP>izgPwo%mXLWK-fpZAUO=g0t-+rY-=SvHs?-<)#q=VJ;e| z*%Y#Vw!J~O6m=AJXIeTgx~^t$gc^};Q}nf#u_Odi@@^PS9z;S3wdUQA17p(+5^}) zj2x8zvkut|U(ag*9dd}!eFapMs#corD8Raw+EO*7H>ddFTe?Ry@rg}5a zWE%k2m3m_Haft!WZ?N0uI8IBtKI<}2-Z?}BmsARf7_j|$5gEH6!!4`PTg~@qM$AxM zIv{PK5H6%Vj<2G?XDicSXu*%cxtRF)+CD%%u$IS?(ZM!nGdjoaZobf2IL~24^5OZN zY0?k5r1?Bu$Kh>wx?sr(!Al*%(CkuEGKcwvF}QwHIoG@s<_uvw#5?G94QkPAJC@;z zbsw6zfCW`>Yl9S{-b8dcl?vFx-947r6(&<^@K>;m=Y~I0wy>0YI&>ZwyoHMml?oWu zqh2}|OfhO%ee=mc>b<;Prj`_3*KUlf`LL4Ddtx%vE{Edb^T6zpGuQDLDcS(pW-=mV zN-Af~Z>974c!Sz5x{=kOYGC#2BB(1|gj^g-R|`CdAkX!x_x66c*vi=e`MDmTq#7rZ zNSIfSv0Hmp!{1R3zq>`Ngh`e{nZaw7-nwsmb+ZA-6Cx7E&TRp9nKo%>CY~eAIa))Z z?0;N4j?QT~L%lL()(@!z`jc`rMAjOwQT9$f@3rtQXVm1JljFDC zcD;Q;9#J9(>f1k@w1}j^q#E-@QC+$xSwOL55Sw(sdXL#d4l46hca5_A_@F2Uh4{_S z{#DiuQsz`t|M?RxAg+tyDY<#N`T~S<%uY-Suei?UOG%oa!XCANXO$h>(K+%2Qgaha z=2|C`sE*{sK{lTGrXQ_(ZFCxL?4<){dLFTJc7X8X$zvunaxrJN*&Z@28klFN*KU%M z?_ewiZr~5eQ}E-UaSk~GDrcAda2sIPAN4w?{rpe+^`#OWV@Y)J%i~4O(uS3@b`?nk z{2Z9|&a3YI+J|PFmt6Ehuz{@{p#`G`uWmFf@Ac4QnnDp=9cBLC`X(Lb_`Z|a9*DR0sC$L=MlTpBG7 zCfmyY7JHQPG1otN=C`cy_wYBq&C7qjvcW%ht(qzzi9Lf4YhP46x@#gz0eEiX~ zU|vVvc+z6&$A_Pj(lT%dlr*?c9OVYB816u>LvuFZcYZmj?CB}&GJ!lRZXL&wpN_Xw zh!nIKU&X5{)c00x>l0^Z^(w9LUkxQc8Ici9ym<7%O9qkP( zuI;{|tp~IP3At;2=OH<#EgPDLJe@bmq7wLbX8xC8nrxwPE5q7+a?4%6%BlS8LGVV@ zL(M{u7(-4v0CGv*GwM_^x*CNkmQ6lV-|D`K@HM@{Z04`=+)5*WfwA`L&p@X-j$GR>jxbNH zCHoj)obSM??m=3zXhWhx%Kn=@_J$?kkaxPFAvwn#%aqVlqJpP_wOWPwx6f~u6dCeA zQ)X^_jIw>U=UDCCbKCywHm^&)9xpXsU&FRZH>}E}imAwYte7`+u%#>x?r0@}Cp!gE zZy79NkP%%2|ih%jU4*`F*|qu&K`rIvn*kEvRge*M)+kL&pwX;T_ndPN%}QQf=? zh>-DBYEK)Zc;Mvocs&P*m(uGrr+nTD^0g$wV~qSJ+LTu1Y~r)za{F`O94K`+@dNj9 z^h;4J(Ge?$8}*V(p`xhR@9J$etyj6Dd2dn3i*FZIBDl~E$D&rAOhdgF+9<-SLJROD z@Edi_FOmb{V$u&Z#;SIKOB34B0C@Ln_6wtT%}=f}lUrr^4_kKv%zd5;26YP1$9U#f z2Z1ssS+kuJoiARj$EY_I!%h`9It z5w^1Orb?<|UcR2ga}FlQH>nYG91R$D(Gl`73b{gVx8=x!KIWp={H4&?Tj4nWPVP&Q3qUaG0|^NO)$eez`DxdS1mfPcAK)_$S5_0^5c>n8SU z7*)%~aj}vAXhUvhM1fmvjcd&-D7%{}%Y@)gtY>`pMs+(kd`}Cg8N>NUdD&`8s@8Ox zV^Mg&RS@dj@E_?@kgMaO*T3Dofci`Dt&sj0L`(96U%IKUO1lMBs;mxyDsYJBY^s~5 z4n;{QzX^eT3hjE_C%?Bxx;Q&{-vu0)^FT{yHT`GJ<+HzD!f<&uT@rDL?pF^!3#_;_ zRAmeFgx9pC!$w#@oGW}NL;t^eQ;4AGhbL9%OYbcO@~z5m77kfeEgB6I(#GOgSaoJG z4g42*!lz$asJfy>2wK@hS_a#m8w*@$YFBDC(7B)EQvu%7h`i-NmXxMH~KTAhCg z`v2(>)ye<)#yD5ta=0&Hm`q~G9DqbwCMn#cUn%RG`ozJ+gZTRGN^|^Fak*}MI1PON zhWMibmy2{UfQANulZC|ZP=DeNEgpePrqM0j>oj7Y@CS2)a@#h?s-ms|yXw&vi@)rJ zsdc59&1kxr=|eA{xp$R^MGiYG*`%fRfdkH75?t)(j|3lEeR7Zt4FKX%T^`Mm^&a>pZusDKkB@N=e6!#%V!2pp6^N8Hp!4ldik4f=S2 zO|?2#-x?Yd@*eR@>oyxYc0CH}g1a=AUtKLy?L8gWVGHL9ieYN~k|cfHofX2RtDELr zixzq;q(i*aN(jc)ikDnBoA~w<&5R(Nh1Gm#q+PPSBLa-F2*am`cWhwjO`@aCNC^@t&J6_+TN+-8*>}Or+pwYMV8M-_7`POuM%~gx9kg1v*9$QF=pJ70+`d*g zs=C;nCAQr~Ei?2}09PdRLjY%Phf%{m$dQ`-UVVw820MrV5<6L)cJS+DnNa!wMn%uQ zkX&s<>_?+d9|O6iwFn>Y<~n@@5}o4v3@*W9+h z(E8Z%(kba+!^Cq@Pbh|%D~3{=2v6lbT%W1_aH;m1u1=L+nPGTR5E|lfWRk(T6VMw< zp$!zbc)bg%*WJm%_h)Nd4~gU6!TM5tEIKz|ddrd`z%iS{ciigjsy_Q~`q1D_{%R3* zTBs?}w(UD@4Ljd>%53||vQRO*Ygt>QPPOYKojvj~ozHe9{NFn<=y+qqh8}kg)hx+( z`(rIuyELRHw(vV*$;QQC4Q=_x+XjKZtfz@(LuCyt|MCa0l`ZjRAbz%TJu?0Tkf*F{ zq^zr#a;9w}grJ0Y=2VgI?9~i}jh10So*Tj$o8@CY$l}N3 z8I@}zu`+fuOmEvKt&R3Ka3mwY`krc`41za5UcaN2%*mUprsTYZRkcI8jN0gJn`C>G zX8$ZZs#@L-f@;?P_Ij&GvN!fkwb=M)em-GSF?k(R5DSiV2@O+DW5bB1LYC$Yi!MLM z?A(!BQEIXp=`UMTyiR!N@%P*DQSWDA?niHJ!^A!q%WQr4U7V#Jxk7L9i!W2y z#`1XAy8*)-c@KDV8ti*VS@emU#`1dzg?DHc*KtBOJcFg54JUmv&LRIPpddfaajlkv z%s95R{$FAQ6vF*OXbnFdS# z=97LGpn9omJ5gZTQ%o^SLFpY;csme+wC89{urZ%>_CN`Tfr?qQIUZqF$xv|HLG$U~ zHa@O@49ic%qnW8kwj*o`;^gm+MXUd7gtD)dk6oqn@4xZR5C8jkm6{fJ67EIqhw$FU z$Z8fbcFVq4{^20a(@z&T%&-U}M~%cdDI#5$dDCOK0vU>&>oU1-Sr;tM!%{E09^!uaH6uF}*2 zb4h!()t^O&UpZ0N9?K2{W%$ip{$F8(bMCwq8g)Je;pcL{`xaA3thvVZ= z!Zs!<7Mi}W^Zc?mBH{fTNk+2NikdF6b0(yy@S}7~$G`bS^qqjIhPbkX0geFtK844C z*2Pgc11e0n7FLRTvIafgWV~4Ddb+KLHlR$BQj7h8^H+~k$gEE8mtaC3o&mhL+XbNL z9iwOib?D;O9-|5UT40vTD3i!Wukd5n-ExJ?j;-rDiY9u$EHMpw=rf?s+B$YwW#zp%M18$F5?W61oZ*da9u+SfSkxDv7JH@1T*O)SHzHPdtg4zI{gjd?yN43Vm~1Wj^X1kAEL#lGjEWt=o@a3ZA!dC2S*BTT@wm5(*E@lgdtSBgTG_+_ z$i@|c+#k4m_i3|QP(D{_*lSm#lQ6!H(#jti1$)SrsMDOqVQ3gGcQ1m(tw~C?^-|2u z%tI^jc3~q=k}N33Jw%3JXKNjsuAig7RVbTU<^Qq(k{$rC0SUqdrdegj=amjabPK|6 zFV5f{ontWUUMq3~K z1k6bNCfg3uIJnyjIv$vLdM0Z1EqLNrE~87qTpQgSjfg2^0G*jzvvFp^0tFw44!S6_ zRMFf#WD7~?o@m=k2e}>R)O^{-JP#HkzSrN4!JmVsx}kvKd>@5y@Pp>Ezm`Vqh&}U$*1Expb@Qi@ziu~K9$S^JQ}-bsXb{y zcD4R!Dc-Yf-EKrwx9u-q5iD*Zvc0By>P7;B-|-J7+$!va(><(so?H$s+a62}wORVO zCgWo#APYSNt}a`#J-QJ*TnPrP9=F<~d_&Y$B0>SN9S2h9n1I|l@iA%EZpu*e#b7<; zjSaJIxO?cFz{6mg`f0cIBH;{V!vxVFw}!D_ z6+s?Dv<#ZZ_GKWd!ZwYP##Ap7B$qf`r;SFRQ~%Eev@)z6P3Hrp#xMw`AF>?a z#q*+KzyApefzcm9O$eXtBKw?^AqYwnD-ro!mNxrx>^DTD(0)ic-e8!-Em77QW4htt zn@R~p51M=ZcIH6q!<$X|7gePxXfj#tG6YPq9&;Ky-K4UN`H|>Oyu5G+gn2cXOuq0+`Jb|rvsB5sO%*d05Mpn2(zgDSrSYgw zWpz4gDh)Nc9+ZjGKDvEhv_6`hmgs7SE3uyJyu4got!ZR_tJ#y`Br8%r4ieLNA&I+^=vk$vAw zcrV$yc2_5QR798NRNXasM`++`*2B{!!rFdPuNJ`QN>mvRkiOpDHM9eT17?6j!k%WK zJstJ@cUX(|KjX$4+!?%gG-$OEx3eHNax?d?txFub4i@Au&v6m38SRf4*wm7f7Hl+l z?O|X@{-U%a3tTNCPMfkV7cL{J;pWQXCxrc9BECy}iuTqOu!JgHl1{)IcRy9LX zVKQmU+4@_EaVnKt2dx&PNn~6PC?N*2lD6Us#tU-JHi?HLfZN}@g>3fp^m}WT^*L9b zwX@`ow~~tBwnp})>YJLS_AlahP?_Nyt`4}4@Yu;o{mkq_TjNk@?ik6 zR)aHm?QXAJE6CBX^++zLej7Eo*8;wKZA3`9GubPxzRAxCMerS5&QhZCyBA}RKJE>u z!6nph$(?CE&-{o?^&fXH&yp4=pif6|bz}1joS-&KTV}v^7$Ej^SkL6jOZ_EF)~H!` zmzX88>)@lwrdf5zQJ%=s1j*Hr!5z@s8r0+Su=kDl)`* zH!Uk@@aHpqfQQf>Vv$O0X$FGiF-K-tb^XA-Ik3P_YY_6?{94oVr=vdjX%^`y!fNKV z`PJrUT!0QkDbrrW+tKs{gr^vMNJddztLNQWdzz5D*N+_;j2-A*OG1xFEw*cY=Q9y` zNQ0!A#~!JFFQ)WaTYJa9)S5w50aCed%^y98?`yUdS3t-C&=XR{Qg_sf_WDh`itoA9 zy+z>-9n%6!&mgnt;CXX^(U&Z8T#>a`&@xF1Dr2{VESw61^$Q%4@s*?2$EyiXraQSM z|DCwH^Wke8H`mQ3`#o=Jue))msr2raI z*_qaJp&TSrYOdh{{)uuOm7T<81-tcV7~Pvz`6%`+f4k4HOlP1_+urbmWa356YvK*r z`TecQ=(L>xXE@@;PEv!wNkNtx38ZS|WeuO!+P+w3BiS}(vKc$_8gvomLI&ycpImL9 z(#88)7&v2qhWc4j5^HLIAJ|GhMgWJoZyy?N)yK-(?F9t40CL>oPeZ|+&?l;)X|Mf@ zDeJj7rL;Bh;97@pbp61CKBBJjp+$qElQc`DDl_wV!zXZ}_@X8t^NLt8+&h2i2}Uu?wNtdns{Am(5Cn4?>W>GXnB*kh@q&ocIm;V0x=!%m^V zN1FQ%i<&*U7qN+jFCQY&Z5&BM>1k>pzstc>*yxw@#Dg{9m!Hd+j z83DU{J?Y;#_Nm7Ob=Pd$(+hvN;xj_3l$b+dv(J&m1l9toJhHU{hT~++`*!h{==ODJ z3(!)2c*(ZW$hf&te-8h|~ve2}fa1P5vXK-XRJ`X3~P+;gDHr`#@N%4qpom zS^gH%@Wwm_|Da+n13{N`Ox{uGb=qF>SeGThaksDOy651g4~QOC9cM24!goVFuOish zF5pbgao7gfC7ebpUQURJ#Kfu_zLd5FK=}9glM%(6$S4W~UIIOB;yE2<3Kri9*UaO& z-yT$nvm%s)YhqHX#{b_}uC+K-FM-s2!uz!451v0y!2LX6w|<`dHj86ftWE}RO4X9o zGptGQQ0no1F8DL}-zW#}oqKm<iuA?Q`d+4v!^5@@W133f+{c_OrGM6TgXD|6ge2 zs$Y$5n)Jp>vRuFGV^t+EaI!CK%`2pDKW7DKH*#?=UX#zm^llt0sy;zN9-eHN7x!eR zSkJ!4l3&%ip_@51;^(Fq3_UM6L7`!|(zjASiEJu9cVnpPge=>{a0w69G##Yky5|SR z2|-wK1L}y$l>*Ri#aCY#P?29RnE9>o1ACswEDWLmL#^fRdTqEh#zqdQdU*|Xo%oSn z?pUs>&b2+5Ha;u7fv-dBfC+2WUTt7M+eh#nM^9TczSnYFod^WZb zf~i7Z+sx~IZYs6lZa?uf`G9DW3aG~p&Fm7t7$+Cu+%H)|CB=k1eS!>W$;0X50c+P` zynv;m?B%?qBr4wcF^`6Q)Xm`b+tR-ej=%$92D~ua>etsSzi3ifFiq&#Jo<+6A`*C9 zsf2wF{yr44{>ER_Gx!hyCq=bJ@Cu|gXyPNX<}Lu~)ARpiq0#&I*N+0v&2j_;_2s%KCY8Ng={_nz~9 zJzPK0)XN9u7(XAZNE03mTn~=n@{s?h85H_38GjD~?gAR_NrykE*EK0}f!@6|SVvb+b$$ zhp@E_N!G;qHx?ycGCbQ}WJqg|dtaOg$;nO_OOR`GR#e5dZEv9H*~7A6!UmwJf1&~C zGgfoLhP|Rp-NQ>eId^ZH2xvFBH-Fxzn(1qmk2j1U1wSO zNaa%Hh8Ui>h%WItQK6_l9;-WRqeWGL63WIB9{jca1gP!B;Uk*lTKLEAKd>ZzT;(Qa zDcC3UZ-&e60GGC=Wh>P*%N#-Xy);P>xmYm(tBNd5f>wh5nUYgiprP}M>Ity*Onh91{qX0F6_ZG2LOHRT02f_F zs%SNey@JR_f>-L+pDO|j3h_)GwL)^S**y+D{nq4oK#R~t?$G%Lg>T3CK zxBvHxp1V}bV|@EK>10NgI-_{c&Olo~e=od{|6GpsK&U2e)V?q%m5^ZRUr^TZM>K|% z>t73sHjOi#s!@^7x1AF{WnTzK116jb`IisPnEU0MpVs%qb>F|->-hhh_y4WCm)o(7 zhqIS(xA+-wq1N`{RFvmz=&;S8kJ(59CZ4DUw6M@)!^K*jji>O5aSJS{>B_C zVn;kLk3b@mfL)(YXjkVRy0N6~YnHbvf(>YYA(6|E2O6)t&f;pQpDrS$V_6c&w)da$ z!S^f=(1$0RDmC^6O5@AkU9s*<`tkZVkZXB>vR-f4|iBcpgy0mrcl05 zf*fF5GA-V!LaRi|sr2byOrS1UUKx7`auKJc| zO~!^I?(f2xJbGbhmoayS(Ws z`|yc*B(@E{5lOvNRqC(Gf3#i$BN!sEf{p#*K*Arv#8fdG-%5oa#KpVPPAxm6Pe^%? zAtzt!0H*qEbfzce0%6vJtB(mX4RXv;SW7bSX@!uP53P9HvUlg}2Uk;6B%uB-`QR`) z5mwGai&QyZZClCrkLfB+V&7l$2+-0amo^e7&(zZ>kn(V+C}+bzIx*i!BtPy<2|I|S zCb0^nt%8x#%tw@&Pa`%;@${K9(5d8Myf{=bGaCtnhSZXO_ti$6C|0&5`Tp0-bAVz4 z*AiVLld|p2%M&KuEgz0QMtmB2E9H3f|2e_8xifc7Pl zGUkL}`DF4#=H>f`b7>RI$(e~W+b66;Sp>8tVx|l$5V7Xj3@+Nia!nZ&7a2FqcvW!q z3DjE7kGfyyqcElmEEm`tx0YtaIV}*GnK;BR^tkz7Qx;&Q`tS%|!RLcN$Dq?`-a{uu+s`P=ACOfi56a-PjPsbACMFTFI*$mMB_@gY z%^ImM{q`!&Rf;(!DU{jsm5Zopg}3HTy9dNdB`=InoG;dTkL*pESs`Oqvd2r6pC`H| z$$PR`vZ4~4LJaS`zNl>%623Vg=2GPf2=P!!!=ia17^@jO$1+m?c1MdWj}KCm)BQr0 zTg~##Q3!-Hm*VarA^kTzZ!45;_2Yv+_zy;n@^VoJjFhMEIKsEO2Fp>naV}eDBI<|h zIUVcI2_s~`md!VqN!lcEyq@c%L@kC(H!d$nuzw^s$5oib#Xc^DsBgGL;uIjz4M|9# za&5(~(KJS@!8Pve`q{p8@z58Penwp}!-Pzth*~S@n02lHS5iRxK`=#k@TIT8W z-0{(^dC<(dF}-%kvTa(6Iy>Uel282+QLhxBE1}c6D?CKjwL6zAVI`pSs(u4#vlKYS z)nHqX9wP8HQ>o%%?YH?1F?yA9dY$dj0?4aqEfLQ~RfvCU7+3tI(mNIAEIr2H+@&x8 zPx8n&Bg3mS6SAt{a}42^2hf<`@Q&>}n(4}y3nU6T(Vf21Us+hWJ71sz1X22uG?n_2 zO;fxI7UEWgytx@@o?L;hnL_y(OsZ9Ug^cMD!poP$i}ch@Zy)`vbz-LgwT)fYTvnz` z3yH!4ddz&Wx@Xzoo1Id_1(!6Pcm5xFE9z<~N9PwyG46wr@OF3A;Wd>WleZ~J>vj_bw=40)HZ4u@<30`f4>{KZD^!ZchKs9@e-8Z( zEUG6ZR@zJEBoGIYOgv9rb&ZenPh&?TB@1(;4lfyHKKuQ*Ug(qRj)-C3?B*i)bMo@8 z@UX4?)0XhduOsGYf33^hbN9;A;J(3G(}`EBqJx^LYW1j>^~J&i!^hXDp{tDUThUv?G^zUZDVEB#lj$xp$@H&f@QfDPp;O@ zz$HsHD3|;pHfoq>pxaJy^k7De$@ zsa3S3Lj`jU&&2nexbTH4_op>`LOnfPbeKZMd#E{$<*v;z=nxb#X}E`~(lkJeev3Ik zbrH?-!HW*YIiYeMK*X)xnxZH?yvu`8AX<{1-*FkAtCT5w0b7vtOH z@cGK$_P*&fs|~JSNuwkvE;%;RNd149`s$##o?vSvI0OjpA-KD{L(t&v?j9h(;_en) z0|a;1;4Z;kmIVR?4YJ7Y+uwWjRek?$)mGi<>DzZ^x~I=MY7e8eE_)Gx`WTS2xVr%5 zHwVtT-_8-_EBtU?_ehrU+)vk(8)`}!$Ed(8&5%r7^xeuTSaV?ud9v#8yzn{eR=9bC z*dw!u_yd=p9bm`Gx|rwW713zmrOU0`p!EKGv%y>B)<<4sPQSw{|9uBn-27FUQmQ+} zugsL8j#~mNg`5ijxVx)6`mOsix8DE?k-YMoqPd*ocpcif_G^x$oL!H+6XG2bpDZ5`!3c64WGe z89^q6u)@#Bi}wdD4{n+KvzO9qo-ls5vG@BlD_W17 zeW6#efmFSTZjZC_Fd^Y{&5_(9&0}^veisTv>WW{fb+`7k|E2~-z)tj&jBn(?6sStJ zY;>UTxFc5K04Faghr}ut$ab@z9qpDNtmtBG1_+a!DyE3)P~4@~P)poz-ISQE=~z%? z-0L@abA@*;j|19F$OPh^4xH-mlDjBQXM7-3(s$a~^VEaSmaFY|Gj4uv%_0UvmNEtH+K!?0%!>Uj3A+QE%@FcH$4$L41ftsFQg?(P%zhTZ%B{N z@%{H0&5v3T9|%aWT2z3WZP(gtX1A$R<@4hk;z^2}VDv&kGWNtv5JV@0?dG4G)-3;V zjHqH3C)DMG!w;=GOy21~tXbHBUVoVqWjm*~At?J&Ux||+k*Zzxen=hxuHO*-w3^Tk z{Ho!H@EkVNgSecw1PZVu?0%zcc-vb?b5$_s6fP#;18XMhR=}goh#hq(IoTr6P)|!P zNKVny`v&V-u3#SFCdh|rPu%G6tgNBj1cm=BL|3N2eX=v9!4XW*9)lU3d$UQ<0Zxbe z%S{OYHhrwuZ*Vp1a)!h%hAf~q37s@0fBG#aVw|iT^!h8<5YS3aU!7->jDXM>4kJ#lTlU3Ov$kb%!KW>bKqlqKAj@1A6YnK(}?&`dm z&OPXPFZh2`c(4kGyui|c$$S;rluY7Vh8N4jY`8yr5@$D3JRhd{CI6zud2@a)^znA& z+Zfb1=V#tZDS)v8e>=ez`>`Eic7NHkCk{&Nl(HZi!qV`$lS1let)|$_bkWO8@>hc*tQh1^MVHvwI3YXWb z7uZfwMfh%)by3SKxAu#e<)85eZ})It(~~?Gp=o-JOdFoMQT$(Yz2s3dXw=K^M^;^$ z<*_{INx+Ivsm>Qg`Ip2$&t^ZDRlLn}N?v0^g-iLcweS{u>Cy8VT8Lh17ku!Rs3A?u zxU<(B1a0+8YpSb1DZc>h8JadG7nk?<48TOj;A{a_VUkK-tVhZ*dup*n7V z|4DI;6mx~iFYGWqFl^|dc%Od5&Byp9ifa|Ivipzq8LvSZt(H?7t#32;`a(Ecq-$jK zUR*|3J{hn$~gceNeo@d>+fG~!(Yc|tFsU=5ZUDl?N+~eB2A{@sZ zNi%^Qt-3w;F*39dGNHNzO3%7|w%@gZ{M8EDGaC6pw%yElb3@*IEAlz+Xf_XEYUjBD z@pOjQrT*S3IyZfouSsOsm7!o6wIPl%K!iPH~VeR=?c;iDD<-c}yma!e7y033kn^{bHcDNlW8l$x07JbExndsPb)Djx>L& z;-W452Ly7GzQ6vibOxUCrRZzE_3|3d(z#dS;q#BOLzaK>=lFpK=j%xgu`ITx z`rifimkfg^;{DSX1)Ww~Yg#VelGe;P9LID<#h|k@k&z6hB`Ol7gj)Oy{zCJ}HF;P- z@W?yp(_4&wP-ZWy}TG?Sv9SZpqH}M`wpANLzBXYUyZM**V?YS0CXe&v}D7=tWzJwS}=wWL*0zWigV*>-9vTR z)o~kHG9u(mAam>w)1}l+Y%-wK*b__#lKVE$*$dtmpjqjoFhkBfnLIn8QF#ck7O@*t z=$MYY-+EFI#(Q!VV_gNr(`sFghvyqZo5=>Jh#w-!juI~@Hy@ifw;?4&d$bly-78sN ztE-ycxEOl*5sF_LCEqqi#t2uyG&~8&UrL69!>06GvCfsNX91G3yg$97f({JLz1*eW zm6A&He1*Ul0AH7es7HSfsvf6J1~9!PA0x`|%>OO|uuXI2UM>4ST-(2eR{0_!7+)qKbCvF7g0%_^4Ko+4ZRB!BxDSlGnb9dF3FfMekz>n^r4a7D!} z^uT}2SV0TF5d={ur}8i?uld0|ze_JghgTUcJP=BmeGgOZ4hO$9{&0p4%)JyN8b3%R z%r5vltTv+PE>OVchQ@~2HgERHxWRFJ^sHKB$)1oGW7%-~G1(PTlV{&-z#aXTGl-lS zx2aL};0X~S5$-3isi0yFMxaDsK*;+iu#Cv~^0nZH!|jo$Ae9BjA8k`$Le+lA1f=S$ zISzGy!rta534)Css{k$Y3ucRN;AO@tW1Z%)Qi+IO_Qz$#1fha;k@%avS%6)uVKVX9 z?ie}P?Sq-7SMtD1ty?v>?pBle%p|i;+*p#t3%T04%0fx1g_+x!`HMc>w}-p;46v_m;9|Akst zo_AYEZLin<3^TjX$sVdfaJs-&$sws{kooahurXf+6DAhJk4yZW+A%E=O9@i|$y)S} zR#0^|7m;8Nv9%FVCfF*vmLCGYc^%Ol0np4mNz<-;+Fl(j?Ott$Ep4-t1v&DqzTz1a zEh*KFH4SBm#oeJmu9)f4i?SzPRN1k84EeWVPHGj7YFc-0E>z7mL*2~G<2@~-Q2XkM z#-`w2f%+O!B((~p#r{ zuwo{HzR}dh_Cgf6La1M8rCxg4(2jnTDY~-JnBR29o_ea{!cn-zzV_gK4c%OgzdMo` zUu}TKWb87$T7^*!UQt~Rtx?fv=l*-VZcZ$UV)f3h0s89NgxM+aCASO!I{ip)ujaPB z_)0_nJT;=>)XxnFNPq4mhF;1IPY=65Tx{Ue$g>x$V2m2<4;b67nEr@d!caw2A{uTk z;vcYxs;=#xD8QQ@?7uua>xL#`j!X>X+)Q2Aa&t|zvsd#%wSd@cQZfVyLT>_HTi2U3 zX}IkpLTiQVpK>b3>3+g5-MD2HKMn>%i{lT#5~G;SJ-<>10JQo^0%JBZq#?^~S(#Y4 zR0hO>0uXdr8Qge2hc}%F3>}7wbm4twNlzyO)dVwQB6$b}rUgv$vi7fVRIPFWLY8pM z`;%r5h{>>{BrkOLN&-9cJ2HDiHyY|Q!J7F04f7mSzl*() zpMg@@k-7Z~4A#|f&S1?g1r+Lx!)Y`TYpk__rap9$%*v;4F}4Ei^Eo!!Wqo{%`}(zf z%O9RPPIu%Kll+8KWUj>P$ituIX{(D>5NeVE7fVheAdg7A(hZMI7*j3;K|UTfrQi1T zb6+eb^SfDzlDuPu|$bg2Mk2|WxQuV zAmfxrS7s=Hoa6GjcJ=QM(@%NHqhC}f8GWK%xM>z!0pB08e~5mxE47hi`zPCMlN0nR zr0J)?p|r!xdZnpURJrjup{(`&ShKA_%=_ZWiR#Lt#*VHs7Sr9j&f~cB6SCL)qX^X| zgy}ZypGlcbZwche3_{%cK#lL|m#fCgC(;kMP23J%4%zP9qEIH}^0C=+R8MoBAR+sv z1@vpK`%{BHQ3xdurmbsL3DTFmZ0n|{KnA#p{5)EZ$nvW4TEcF%_Q0A3o=`%0O#_5v z8kTw*8pj$sUy@h=qG!PA-bZB})sc?_j_G}4pGGy42OuNnX;C_mgubWGagOOx`n&v< z9&;)u%GGE^Dl<~T!mGCtp0;4eVj&5xqkDnq^wE)b*qqkw%`J|NcIBq;OvdwN<7~p{ zzU)n{Mo7;)is>l@>E{QjUG5|XaBCU)#7=v!g%95C;Utp!pAysL6R!qTT1B>ZmJK4{ z%7(>#H^@nnLgtCV=}H`vEaBJKi9->9|G?DQk~{cwmB4w~5o~jZ z@CQ@0RV*@d&RPUlp;2-s=oPJskn)2NXnquNp_nWnlgc0U*Fs_u6jOY9Rj~u!?hLKe z*IEV>K|c%h678h6y5dGTApPY$;H+dt$m{n$YsEh&(-qaaBCT8j&A&AAlAMYEf)Be7 z69Qu1>w2Bh;w}fa4zzB*Z?iaz)G^t zX?^p-F9!tb092m8yg2-9mJgfM+uwkPw&RBS6s@>LCO2SAEGCy- zCzs9UyRY1UNCIOLpHnhiG1Pq&_z|GMi1bXh=Q|}DXMRgYIlw1ylw)R1^jWFreBWc% zx66JM32DQA)@+GALmLtEStdr075Wln+-%Lu?sN8|wkQzGTn1({ASo`ChGd?S=C@2H_0HG|=x_Arb!?3fZDs_U@kS(NgR#zy0;q z-)0nvW%=%dp3N}!xOrh(XTZbQlM*p4T9&Ori>&M5dTe2*qw49(yXk+<{pp;F{^x7( ziCjzYUgzP4-Dt#j!XddO)@&0Whlqq=UDbH&;fI!@Qa7bUy*S@A$VIu=d6#lOR{M$| zAqphAucB%Mx7L)4^XXTpoINs{WTj!O)%u)J6?nf+cGpbx1bEGcTjds63&>_R(q~^| zZ$jYgMZd&__>MY>b7z+8uw2;M?Az84*ve!W2COA8D*P)1<+!CyI3;O)R&61nPA;ba zpb_9&Hze5=q-Kiz;*O(`t@~PrnQQ*aUHIMztMkc(HuTU6K=AHkSzV3O) z;8!_??E+eSE&z;WjFb9dG*v9^;ghHlp#=9R77e>|8$80w`<>@JZUnk}{~(kk`)_zIyMsH3~Ax%X#gQg_nsfLvQ; zFMF98s7FmA#gBwtMddY^E8RjOTC(71jArvgDxwVVWN>yxr8GeDR}72VI})HJkP`Su zb4v5SDmt}FHlnU^o9$pt_Ppuw#Rvv>%&w!&V@v8QaqinM1}h#EgEL~G3oWx=8!)8g zUR|VZBL8_c%t6VgWF-^>AvNZqg-{396v4w*wW(GzR6Ua$74MLHy`VAa#2HR3So6x7 zB>AiLrZNzTJtk&ENc=8Aa&Hs?M|R0a@APDp3_?(SO|qt|pSG&AvMO846UvG#obB{h z4yTe#fH6N%d~XmFoHlYAA^y1LAAw?u?=Vn+Uz_}wuS0m;< z2&erubTe+oA|APqJj2=bY12yE1ydSX`FAU3ZBrQDl+?Q#k7wiTg8H<+AJn?~HR#An zogc%0k_Hjd>TGohV2u&m#qhFI*HrLLexA_+#%jLfUERoCHh5T{QTNRB=CCy_odMe5&zh8{9$ecbk*dy~cM&QcvhUO+SMkC-ec zkDq6)f{r`(ODX9Zin1#fGETxL@`bso8&2fl%1dnz>oJ87ScR;P5RkyC(h^~L7CrZ0 z+?e64QEXq2adbXp#COajuA*Odk2l2>C7}UsI!yeFYzqast|LwA4qXc8L)c1&=__ys;!S(J|e{ycjcND8>(n4u^LwwucmIu zr_2T!BikCdg>eCM%u6)X_CJ%_oV&EMJHdWMKwqYU{OXg)sFT?Sg)PmAnLm;Ig5k&T z8koC!MV}Y9vStOg$c3DL^Usl6l<|OsyiI@tC0#m6Kpu2ssb%JVoOh|XIayesJsauk z@r`6Vl6PohEjOv3)IS=qSWMUZvvGzIYFY&IfU@4vg&2Ajj(CP;UbbymP)zD;-GEx$D$A!u`TkK zc`l}?cIh@@f;yUr9_(|ffrG}w7|Kdx5Nd=*9^1n|Nfu8j{MNtFfEg}^{eV`hDWR^p zr;;`2Xu;>7{CN(H2ZxX#zY9v>iQpgOjWoGmb#)YppSdl@>u{V6s(qSpm)s#F6-f$? zsMT4R>Sd)L9d*&z;J8h0h+B&Q8*UH0Cv!H-sX;IUWu`XR) zCS{8t741x%;=NN9uyKzKESfhK3^gbx@MY`ZTg*Q!mi6h~CEsy)OTuu zs}{DswBG#nZ|dE-9~rf=GnYX`KIUtKF>hemjHma#lBz4`jz3b;$)1;me=*_mW>Zws zsw^MFHtS2_moNj;&6jJP@jaSqj}Y0a@1o5|Xv&XQl+jPA+_$2^0kAG%>4bRn6KF5aaLkVyRwR!mo z*`_Aq!Q(cqDskW2xSVia^jqm2iC*dRD!ze#2SUwN9Oy;VGZH@+pt=PNG+P5JKVjNM zILB6I1%jkS+bWiPLh`|H;gT_2WE|3dAF8slV9__l=+XrzJs$4xC1J$9p8D|d zG7DO37gg&?)#p6b;8$N|>Qbj9Iy#sw`FO`++!nzO5<;L}X5RD|un8mFG}b^2dKsuE zyQ7vJj=SYwvx<)^Ya78+pjy#%A?JAnbBIZ$X0}AmeARP7B@I+7=qae4AH&1H`h|zz zagB!Fd-w+%JD|)IjOy10{}n>%YN%+2@#zO7#a-gV9PaoCr{zvAE{gr)@5i9{OEv{^ z4?m%^pX$I&?U(NY1E5(+ezPgWl~Ib{7YguBPxgtPZQP^9!uCBCHg1vk2SrIZfVJ&aQ8hK03;p9=kAPZI&D0qz6`jt9cMar;m7}R6PiKC1f~w6DQwk|*|xn~ZuJ2EiW`AY#Vy99vcu;_c=nsu9<`PRSex`*_Hz!B$B*@qk#Lj>S3+VLU}7>w$9b~{KkdD) zSc$R_Ql2&7gSv)sz$6_Ai6fAD_0UL#Sb;uCCRzEsdeeOPHwD>TZZ&9QOE(Wq*BmzM zl9{gHRO3)Cjw*FejcsMC>V~Ye{4o#6_#M@`Si-wSaymywoxQC_-SK(8N=+IbbHn6U zgq+>H&DqX`X<3)b^}{Zrf1G~C-!C8#E+6H%GBHCEr1h}O^3bK&OPopZ_`Fb`{GJ4U zSGlbd$zslj?ws6_eQP!?8$NGcB$@W(BG=BWygGODKs`I}!?HH7I#z9zuBwHZPX!xN z;r8dr#)mkG@DZNErDkwsA3nMP zD@~Fpk!BA3e^JQ%y5$%i_9Y*}S1@#61KYj(2Lwbd8w-w~F(XqDN; zl+FX%;QNz{+#!j2K@pbLjhkASs|6-HZ=6=atSvg@bmGDObjdWU-Tw-mBFp0(+5=)^ z`@%jRi!N=1F$rK$qQSMZi_)}+GC9hNvapDYZz?xwLtXMO&D!o3ho8xed^2*~Tl$k2 zQbyj9E)lOKs&obL2!uN7h7m+K%vKxbCw}7pFYp=mnW+KC{$B{!xrtopCzMv5)LRxT zQ7P`y?`uX#E4@_h`hD|R%+7L#W2^mrv7}!`X};+wVGLvP*OZtUEDE$m)#%#tOGk+l zd$3LrlCbXV>FdZK(>IHY5S3WlI>eTB=olA_|C_EaFl{z1U;GFDuE#^6v+8X-A3Jc7 za`oaay7F=x!Y{!OQB*s|E9YA=`l?+|^CRi!s(T>{6141wOp>u$S6E-l+qY8K$~um6 zXs_;y3Vby~?q3eHVwnCU!WF30H`^u?77k8iieiM+5B3V0@o2F%Tl`7Fvco#B=Ca;W zNpr}8CoMctna>|*M(Q?9|5CspzdD|vk*eM*l<5C@JM7~CIEFiC3EDTiN!TG#5T}EI z5`y3Opc%~{d5~$7#B7-xFf5IwnbTJrbfk$Dg4$W9px0Mac!nDF2aBXc{l#>048|XU z1pInSFDJrz!jMpI30;erX9)ExX0-Ce>Z=!t2U`bfHM~>Ap*?re8>^K>Cb}v)4UdRD z4)E1#1{o)gE&eVD{lk1@5Zg*2SszaL>Dx=^>oq$5zcW7;mSe=wHpo+1x~) ztc=Ow&{<`ZciEp_ekc+$3pFPTf>9{gyfj@tUNBc?Z6tjeuLxyjZbymy@3{Q!zYc+7 zbe|Xd*<6kPa_c-Yf$zq&Dp^C;iNF{jC<-GHbK!i%88A^{gWl|XMw~M zjkV4z_M&sA6|CbY1IG?IF*n&{6q=VjYqKPs`5%c6zaM}7PLm{D`a1nZA*)jC?;p^8 z10ZO_`=H{%kbF)+`Ju?g*TaZ1!Ah84fnO^avj6}A!*G=OU-*ADfWPketex!vCu9WR z2>}pkK9Jehus0c9U?_li>G@9oibcQ}UmFno4Q|VNYP+K zo2dbH+G64k=qEg2E2p1Iu7Tlg7hkn4?IbJmh!b!=aG?;7K}8$rmA$qib;?nl*N*et z|JdIUcp|d*>l<{>_~8gEZbcWVW6V6R?$-auINEHC@d zFFj$qN)|^TWN&({A98=2FIJ?r3$Fj#!!e2~HqS@VINUymrj)@CH8eY}MPGF99BWn~ z{abakX%{56kDeRllf$p@3r5gG<*b@xNK6Uj&4IioW7MKVcQV$KQPAA(XWoRsdZd;iLEO z)4wgXqn*8DU?bs<$WzQZF$9G85b5ywS6tq?HRXcNigI|xx)T>ManR8uM?}D2Y?^-d znVj?ZR;d}Az9EJ~4QE`J`(g2uVdM2;bE>9|CnvS@2gPCmeK0XDqO&y=*i{YWl{+!y z4aAmNcy8fcld}t$bBEmv-`@-y5)*{_%416Fch)*i*+)1Hjj0D`mm@2SG;~VL0@|}3 z&Wa^stYSXO0xRVXUDRaw9F`|;8;ua#XX=qj)HACS3{Q z`xEE#hC2?>+LK7b5=ibQ6u^UgKYED`{V7x#yZ~ItSF24CQL_?q zvSLMuvd{&OfDZ1WMJe!e8bMaO> zjK?A8t%vGjn#cS|(oT@4%QM-|A_n5{Cvu@B;w>E0yl@{h2SYHt)D6v8$>R=VP# zKg71*kr1;AHfImon|>80mgWIwf^T`I6w?fxiF*?z?xargXQl?m4Vx-}HA|N0-#+!W z8M5*W9vq5Rf*axOpv3UUFi48^Xj=^%l*|VP9u*?%ywJ5-=S5hwA_mh;kEy%iAg%In zM77QEF-(nJEtVj2(0P3xh>i78NJJs@%c6|4Fh1Wi#!-)6UEQdE5`APBNc{YnYva2V z8GuaT>3d#N)7(HL9@md6s#zN%nMS$dS)lRi6%}Pi!t*-ch^SIwWvW>x$mZ7O!_JK#)+bfEr-r)!5Xz7I$h)fZ z{h$JSnpyw#ir@y)=%jNyz)cuKM* zR{mFrP%!Ez2m|YVf1dNEATNKlOp{fc|1N|YLRQjps?9Df7si}* z9snm4U$geMpUE!5IjIJGPvrcJNbE_lo%}q0&oZfuDqyDl0pMtt(0%Rl97$DJ*TR3f zie_NE7Ji?3vFB%zF6M*m@_zbQ^L8R*X87IU4oR`aPB=$=Map&*K&X~6VUp0PF>m%) z5toXh>YFEM+%Esi?tRkNEP(-V@oj>gcug%#$N8G`!0cBxI|}1Vn+{$D9Yfl~`f9p^ zIKnV{Os>FMYtoOZNN?{?IM|$BiGrX#n$F{HrzzZM3WK@VKl4*8W_M?|+j~HyK$x%? z#vb^QVbXt?{)Jv|AouyWqyJ04=RdvarK0D5x3_xz#x!5G%D@agqxdRtQ8 zZDBO7M4+H6fY}C2K6H8udZcban)9g6NZKZ@IvzM7pJ*`$P8O7zl&LgP=Q5nq&$m$K z7MC2rheB2ZWstW&abbOvW&M%cx-c#BCl5=(X)g<*B10%2vdm~; zWK%BLgTbf8T^o}UV?!b{Zwr5d)dv`k*+syDFBrwDc5pLQk;jCDyy`$aCZlJh49lks z>5CU@4W&R6YO@H`M`}=87-G#K(%6R2Ago8I=CV&>SnG|w%qi|iBco6eV^@WSE`&yS zEf%Ymj1XwsT_bE4`?1i444Im``M<{T1RnXlOi(bJyVOaQ1C=c| z+7|ZBJh6YiuS>#S7nkX>LsxYLR+vsn1pTHO7n~mObXbU^+rdw$usd#_)*JQLUTAAt zz*VLQA2Rp=Edz37m?zgH+a)=EXD{Vc|EFlsfCm<^nbO~uFEVM&k(m-Enl-J3j+HhU z=9BP8F7`IgYr5^GVX}&_4h6VrJX*EpPe)U)C;^V6)i=i^l(A9C%wBi|$g$p(D-kST zR~r^lc$Gx+&HGSD~CZ^5BzyEuQHfDb3#PiEjWg^Z%AysM`j1Pmxg;C6IP#DkjYK_tp<3yU z@11iY$`oY7ZEaFYAWX~be`Y&R2$hB8uoiT~7Dtq@t*_q-F@OnkQ0^0X+h;q>e|gE} zNhg*V#YWKf&nl8Qgw02P!Nt!-|y;^ z`3nU)a>H36o7lgO)+tzIF>NA6%V0fS>5dO^IvMP^fgqMbX>k)AR{o1g^Fq&nXV=@$hCbdoqz}I7~x`zMFdym9Wkoe~~i& z7~iYxifZht=JM|ILo=e0vP-b4kzm_Ma?${^%AH&O!j0hAO_*B=-b9$1Skz{AK7|E; zDdoJ+h4%dTfIAcNqk3T3yFC5Vs3m=}dJX&I+-TM@ z4Ii5!LtR!^nN$&MJ|CuYW_p)q=nUHSnq!aNM~nLpnbQ9u=9|Woo%T?UP6KNj4HS#zl&P{%GfDK!Y)aQp zXOaqfL-p+%-fQSg*I@f%jN{t^jIXO^x^2x^>)3+F8B+abfo;@6{)>SzN%S}Wy*RM| z<0ek3^T6@327|~oZTk&lnX~JyA_ub>@8>6D zIT=-9U|~|44!q6Qfx4X<+EjxCHms@ndmb>VlguLYX5I0wzD<2*skcAR`9|+f?cBK= zWmBH1$M_WnRR`TjP3xhuB^Q0!^P2B_BDgns*aZ{l8fsj2zRh`VfWB{NeSp!}x&BA? z!&8S1-P?WG^85z~>>`gM<8u=}owe!MDU9kBopD9JSvz_(AKpQlSQCZ8fjqSuNv`@p zSw^nxdZzw1U1a|Zof>oqU7MA@W#aa_&$13VC0+L3%{7biQ=2s+JiLRuyJcw@gkY}$ z53h1$b^y2wr>Y-x!CYsG&KD}CevpnR6y8ETGP7mF=46e z^~|y3$Xj(bC%iCs2_o%(y`7#;RUi+IkNKVf_1UM&sJwXAz9SlMyMK=Ja-TQ(YWcU; zzN-t{f8Zi+dP=opTXWUuYhUwjzy-wmg zMG11mhiIliw)xDM6ZRwbJ|8MmeuF2}Z*>}^rd{8KMuhb>PDnVCnAePb*j6|0ZD6$Q z9AvJoUkn^FGyiU43P5c8)kWU^-F}azjMLK-tLeRW0a)jSG=!}COYR1X+_j-La2nz^ zgbdjTia@ryFBC5O$df$;yB6?@*pv&&(%YusPhY6^>Zh$#8oqGb>w612jRnB`V>P4q zee0@sCuvKSfBPq^K=SVPdygwX(ZJy!Hk5(#3vj;JG;^m^$v`R*HOJbXpG?>BnUgc# zAD>kN6#uQ_+QyLa%&kt82H)<7+?pwg;3KLjrOJFw$lIQ{o~{Yd!X`6QcP#eZOXmXG z6x%?5xiMh&e%<)(viV^Xwr03dCQA?*1#vigU|qkqn2#dqLxoZP`5%+&zxNt!x-)Az z*eK@GWX&>SD99mhX0Drdp9`3!BG>016#}8w-q*6Ssvg{xKnjw~8uh3qS#ifQO}z8d zNe;0ZBT{s}>y7MJw~cM91wlG9ko65m4%a{@mNFB8Eovpl5{NGY>0;1~p4p0`yM<5L zPOp%6NM<+QXfWgfOX2SAamUk>0~OSsXS@*V&gowhq9R=EC2CP?8T0pOOu?0F-L>~! zDI>?_UTK(85HnoL;m1k9&Sru{@AlPl*OF>+I}#jX8?jkyoka3B*x+-@qr!vMhggkC zA4i-tZy@4|~%8!3!p5B2WPC%GU1M6m&2{5@g!8^?diCZ7Q zS1rtpZ3?-2uKbY`ZQ_0V#AIjsAx6x*4WZTuG|N(T`4mEMs}J(8W_^_M5Qb{m_A6n= zef!j+`O7`bs@!!|0F61(jM;J4aAqJmrPnz;s5=Ae+)kcvOkVEUmWyf1ZH5Pb6Fk11 z5U6a|xqCL!r5ut*HmxyYiEKR@6yf}Pz$#CEX|Yp90uIg1`aC`Eipn!i7g(7OoH%a) z+xt%3Q4%`3tLP(SJ}xf?tsnXdDY}i-IP43=MR)R8h0vh-+LJ)q|kLs#FZz@TdP? zr&~HEOM86fBe;aci*0dqS(%#J*_oZ59;1s_M0<1%_o^;9jCnPdNDyv9M>0Pkc~SvP z38=9w%?;Dn$IMb~yTa-mGHDiyUS1SBF~jKfHJ6#X?nTW&$4TaQhy~^;e@3+q5hwr` zO`XXVw2rO(bU#TZJY#3nc^n@|^o2E>Ae290c~ei|QqHI>vhrusB=bvHQnhIxsA_%O zm*-UUsbc&+G-94{S|YsWT+@PH@S-DTbbdM)K87PMh;R;&PsaSyg1QFG&~8MuOxKb{ zD0QEb2)FdL&(7yV%uEw@GQg}sn!jgGDKO=)SNQ=xhfzF9c=FAfAk&gZ$-UX4xUJWc z=!f~=x9|phXFnfu&J-JVFwe*tWp@mvxu5SESf|n+TauJCVS(wuwVIzD;HD+ER0c@G zgvJ*!*|l$A!{EQ?V?XqfaT+O##!KhT&Ul@|Dc^zfm8y=DdE>pklp#0@w6NoawhKfl zxnjz_cE+_vem$!sE;1=m3qF!=O6&xGG38j|miu=q^3Bt@6xM1{4X>Q0t|VqwQQl8! zs+JQO;Z>)Kh<`qZx@@kbS?@PoBQsd?Fjt(Ryi0S?KCA~r2f`X6JGlLKUJ6R_C7%%l zT0w7qVM9GK-sj`^yjkDO;dW3daD0Sct_jqmVPvt1oj9FW5Lo|U|G~#mv$AmAA#62* z>kRf3jp2ffbTwhLD;da?*SzG%)z76sl;f-uaU5L|!fYq<`OYO_Bhb^aWHZE;ydS9c$J zKc70-&>5e0$VEP}_dgxpcFhCoU$3JjSUdJSMA$VmZm**o%uIa;r>8K?dOJ=KVDh_r zfyLy>ozRPUNbT!>C)NJYpX4dqJhA521F~I>`hkOP7{M)sY?o`mz#lwq1`n|XiR?jq z!KVrmhQ59mNxrEMjp`J-?7tfP{nfK>oZCRUos)MD%^+Q)eLl^{ETRFwBG08+7+cdA zg^i8zz8NPV^7agd+ZTm`=UIxzAcTQ7bD=1dsN3<4D5IiFVDS7rdy&G01mxz=LpNM~}nP9{ejy31a`CAw0@xv$iXEu_M&;qxZ%aCstZUp9&E@ z()VN4{X|_|_mPp&y`XIkL3M%9Z+jdauV=P^9Rl{l`j*vA5J}IKuIWwOUPm5T?~j1_ zoySZ>=surc-7Fu71^`WM{>#6$Z;Q^3%<&zYfG2*6Xc@Hrl-KakeHb@A?PT`Y-8{|S z(^+ez>{BN_q<50eyol~s{8kf@sPBE|L&+L< zemwPj{v*L&^mbu4T?OMd6o@~z+FdiS`+J*jcNJQx{eH~vl7)zdBbwr29<;2b*4D@T z6py@Xzv;X*J5AMp<730ntLubxj0hA29cfKfir+Ii>q1VOXUp1|7z&?cMDK5{w(xVD z{PPrCOiZ7De?K_>&4X@r$TFSu_R9m*ZsgXICtcIfB){Q*QgDs_<8d?gjjutt(Kr(0 z2Jq4W2-o}Sl1#=v^G$4BQpjMuXvrQ&LJNl7FhH0`7w<(AM_XoY*VS#PU6K2qgXA?^)zL9FrT&3D zO8>XlpAum~sMf6VM@OQd(Hsg4erHt-m7{H_IpQ!U+u#zcA168J73lkZOJv|Mwp(!Q zzJ^WUZ+l?i92Zekoi;4}FG1rw3wN>l`RqKH@BfOltI z?d=3XF=ob2G}!C?-Pteau~!YZpaae* z4+!4OaZwPJ9@+Jvg0O&oE&an0@nqbnf!bQO2|);ZQCP@)eiZH~y#HukOvu7lH8rIO zJvmn~UR=V{07y;Y4g=T_P~fU+4;l@G$wkf78!94u7%$@jWCtbm-EDf6Nh6juNRFcd zC!*WaC>|Av853q-@PuurxM+JUyN^_6#}k0bg&E2`$Kqnkbl2F5MiMfCruz>3Kk(RM zVt(a(!S)&>*4kw~`dKaIq+jJOzJN`YSu{+;^6z#0-9t;%!86Zx1RIY7a>DI@n;*<0 zVR@rbGCK1)>_hpgi2z5|3hcV~4gKV}KiYCMrMYBU4~5h3IJS);>2J9Uj$c!pBk_Y- zYU={^!_?khWa?E#4O{x^qZZBj&@c!R#8D=HYkpAj;B~~<^02V*IG?X8tru76b2OqM zWm~iU+f6DQQ!@`hi6QEO$vG+K@;ng0KzKpYPvhf#&K4%k) zpvT9Xb1a9|h&n9w{gN!1D^ylxgS9e@kU59Y!VzAj*#IGXqzj>^gSL?SA(87 zDx}wz?ULxyu5BmwN0v+jfMs=|j-v4*he+=KVl@vc96WpkXOys_y3<4N_JWbL)AP89#(s-G;XtPbEe4Awg za>0|}N`XjNELO0U!|$zjjlWh602b(#-yNI$4^Lki)NV z?(R-;2=4Ayq{WH_cL;7pTHJ~j*Pua?+`Qj+@BNkMnGD06y-#Mgti6`cp4YtD=Y7Bn zJ)*wlqnXi#Q@qg}>ffAB^L~73NOsIDMTa}%(3NL6+LwxFQK3+D*;dYlywQ z$nLphrQ0ADzOLc>H7a=r@fiFnl=Zg=GTFG$YM7bt(%unxMrq2mWD=n2x5s9xZxTw0 z*B$ey3nh8J?|p(@{i_g1S0gftG!1ff0-gDd@D=G#i(Vi2l1yn>|C(xJ2D)uIPVBdn zKYU$={>DU13wG~EI!dx28=KyJpyLZ>fH3@}5domXspD2>AGnvP+h7U>J{)podd|S) zRy;vH6N2r1NE-5Ub()Tq{k;poRqE9gD5#}-I}tMz%?%}xCb(fbzFofmK#t7Vh^Fm& zdhbYW6E{GoMm_l9*L8i#UE9Q8U;nLJ5K_xr!dEgWU2gBYGAAcy@3BBTVS`-9ZCl&> zyU`44@8#Z_`W>acc1xgo@LhUQ29x+h>|Q3Q?*yqVh%J;*$A-9bE#lgI91;5UqjhbO z^B5FKNu+D~m4tJ$C`3YNnp17lx$P>+r+aX}P|fu;o>EHJ#NArtb-syf8_e5UvT$*X zVUuBBbdQKZo+`NXsGGDw(mk7Lx!aTEN?9N1v}r4(kTf_|;?r{tw46-l(%n`pFfMG5 z|I58a#J2gkFZ4f{%2U_?StgQpd*&Gd9ktB>1>UmlWgmRgExd#xS#Twtb&Uu%Cr5&j z@;({(7D=4lTi>sI*JGB0X8yW2IhI}V)x`fuhGQ3I>6cLXkCn8KH_Pgy2;ut@ozTL! z5iKwR=&xs0p|yDzGwut(K!ZE4%a#Njl02B0D72cGQuO?koc?~Nv1jfg`#m}oU(7Znq*t(ufZO=VU^e;YgO z7-b~`59)1xzUdl+=c)laseGOx))mw~>@$ATG``Mr$E0GeX2$-IVPB;!pgA}{$lcSK zp?elvh`IOLdA<0Oi5W?@cGRvoM42dFPK$_OvfxT#+EvWYL`nh_7$(7Hb|>D~EWu1V zi~d%&-*r0vYD*Rwafw;KTiZm&>__j!B^osSFp@OtN{}3HfI%&x>}-jGVDG$x>VZ|{D>L}*pB3KHSZ!(G%-n3iL$Ay}`M9+BV zhAy2zk0L$JF6}X`jE95M^~MK&Uwd3tSD1V4d?UGWP+{K1)~&r@zhB_`sB+Kdn{Q{! z-t63cG}1@_#W!+fGAnV*GH)8o()v{b&oijaD8FypEcHO(wx3oK&qIEFhW_~HKJRJE z&L3=cgg?>4eg}S=1?K`M{f=2gkb)Cl(Ff5gyga5)5h;c%yDXVPrK@qSV!fbs8rb%Cj+u^1l6>u9i=q3;>d zpHbRst2Lt*W90*n=XV^N$@yl>-tH9XAC%Nsl)~tPf?uU)W@H!Q<*j2c*neAA|3>Xl z;HeY;10A7r{~iA<`xuA(IN_z^l4&l&f#XugwCbCKcvSnxDCl?lYIoNynQHonA2AKe z&v#HF`P$CdX^h{*L>w^Xw=7a?*J>z}gmP}!NbS-Z7UlBZ6VpHQHcEYC7J*fsRlCt9 zq+%gA?=N%sEuX$Nd%nizDEv)VeGZT=2BfM8`w4cd)%PT?%4s4A(d@P@^9W8`P8p& zW3(mmnM(Hhop!h>Sa<&|$xQBh^_``xsQ6vbMr8w({+B`%kB)Bq#WEE#!Kb#4+5DqH z9r&guhuoB5Cu*U@ky|zhp@~G?4-w?vHYhcr{#c9y9ZN$jUDItQmW{4i1sBkEwXkkK z4%_l4zUx>AoffXe7Qk%tKR?wvP-)5JDk8}>YSw{b*PHvs2^99rRUPY+FaHu1kw3v@ zJGp|N;9B>s(@525=X-ZTyQuR_$rHqLZadr-=VYTig6003SLlMe!0BwsD($FgfWX?H z3TXPI{HO1KnC1n^=4IS%&KQQ9KOrawl>RjD7ae<0MrdGpGCCTkeo~ZD7?ov;8Z+1h z-Kn?jC=u>QSVC?)RqIvOz>poy%U;PJp)C~cW^aWy##FPfdna1Vlb$vFj?tIl3GSR$N z1WG11?U`jat(JfY2pYZ5WXmg9OwV9Qy(AsKaFe_vjMj-^O`?<7^J7#W=Ch3(gA6m7 zJo8;&_-14c(Tc0>0rT6wD;&u`>v(Kp5}8R0=>daBi05LDVB@-GQQ^NL2F4Nq=A#J_c$e5^zivdRB+`Mm9*h(jA8vPJcrlvo5gNi=BR&WN4|N9#i|l2QTN3u4io&?>T<(CZ}ashG!%D^ zjE)M}4*f~5!8W3}03A+U+1T+HkAN}INM0`?Oo#Jd7FIo01CJGWG^W}_d++~Vl(&ua zu(*2F)DOh@9*S|an)B4#o4rskkrs1gFiWqfhyzXOLEuw&=IffPuod55zP(!2pRE!9X0$gSyD!j3rZ`@iZ$` zj1=c?6NqR&KCUhoslO#@^Lp(6v#ADrxJExxCLgVB`}^r}-|0CI=5z2Z*)$$h*jqnJ zHhVf~GpZw|wY&6aIXk7-E&Loa2ze3o=jNAbVMO%#+^%Zq=s)SBwcQOR!yV^Djzj*ZK_0bTV#1Hb2aeA%^&65r*Nr z9tN-y`~%I_Oo+4J9dzg1Uk8H`NjkEg*oL){0lV`Hjxw$cc7G_<8kfbFq&F0Y;1c&r z_oGLA4om#o{_vJ{Qvo_61g0~5ALKsMGkYnpq|h`YB36hazE96B#OAl;a+n>fxO$Od9d2*`KwB8;sLTR8-wc%t z%{LP_6u=8Vxm!~N9#L4)!pq0KBiM{1k>nVkh}4QVd}a-v+w1vHgBJ)!gwu`*TFhD0 z^*oHMEi0Wpc%3FEE7R)l59aKyS<-Dw@FqWu;LoJ#BN1(TiDgC4H?~IuV51blRj=@7 zWEH6Fep)0ukl+1RB^Uv_fVMo%>~U1c;)@Wk2~367`q3yO1(7muW|g@rni_`Z>h&9j zi8k0f_a7bIIcQRK4D5%8gw#n5?4(yL;tqBb+U;L%cp%la!i9_$^(+(l**3oWBE#W& zh6Sz0mX)yOFy)!SoR}xqYQ8GY>Ksx6_NSty#q+MSLQ)fG(9u64wSfbk#8rs);_dIu zFLdKurf5ZepT1y($?X?q6Gz~0V^IKkuuO)T{;HFbBP;}8IYMI-E^Jm@U+E9yn@9em zTLJfxswJDpS4<%V8C;Xk(!ut2@${}hA}^*mcR_;PMEsCVP~$gKlO3HPF~xyitSI5| zv8st?>#G+N69r>1qn{FX0dVUsEat<3YH{Y^yEzRlf>E%;qfnslly3NY%2&AHm!*_Fz^KZb{5R;H7+xk2<&0 zudoT#Qr@v=jOusQ9@@H9A4?2^1$|#J&2M-{iYf;r38WLx^X)k*QB0{aIB3NH#+9`D z@E)HD_#*Q$A5<(aMSH8+!pffyva_L@(VuN_x@>ju{K;2&aFamr*q`-`V?o-(6@0Ho z3^DtEYa{_K;S3wqP#RbDz^~Pr=M-wX)hDyTR~!9NvuUi1M{IA(-bB{Cuu2J`oG05M z%2n|*54nF~ah6*(=tG44TTm&dG*{-8p)JLlw@D<`GUB+(Y*>FGIB!CiM+S4=w7ZZ0 zP7Z>_K>?44sYyOn*Aw7)2kxpFsqwaD&cq$P{W`z%(ZHk%wMn{&HdDS~fl-}P_4H*W z`*8re_Ks|#mCnlmjfMRlD)$0u4^HiRA2&C82dm%!(r>5*rQ7Pe7fG4L-&$lh^4@K> zE)k$s&(nBa^#WtnpcCeu5G<+hh2zx+>AlpO5ON{`&P)!S3{qRsdhwgX0L&IQ z%MMzV2^-qoWfuP&JUPmEKflq>UqI>46(K3j9irzgUV{-VSbUW3hGTAkUotah<(6LQ z^F&$wJWMhel4H`Qqr5OZ|KvVx6Q)`I`ANG*MCughMDN0cFjn(I!U2qH0gEoZ@KHIzy4{n#pMO4pEv^p-({4{0UPaS|7i`V_r$ z6?x|9`og<^G-}eOPxk-5y)P13=Pi8u46=#Z_Fp=l!u^Q|}>V-dg7u_|POvOr3#$X{pA}5IT2WwDJLv zP>l#NT`uJh{Q+%K`Pz4vWQiMxI8j%u{V_rql2GxNIK*7zq-tws(q?MyQ%IJ-)*YjZ zB-UuDb<#;aac(dn_!XEEd{~im^fOhh6u9Gz2~UHk`#OetRM`x0l`iQ!Iy-SRhYYnD z$~>^<&35?a1V0{Clxyoeyxg#W7C4=JUtA7;Wk~z&s#?pMtz^bb+8S`WfM9MV6Wwi| zn~fn9Dl-Ov*@Dr#^i9a+K8h#ohhKq$&K!Nof$8Gw{}#Cd?Sd!l!2@?Pv`SrPy@m!? zXv)RG-H(*r0^Asw7fFhI+x-tUO_O1!U8eE1Uz3yvFA1&{8FK=t}s!Xf@&fUjo>R{i_ z8&Gxyi27N;sifH^j{@H+w-@;UwyZ7#zdJ8P%I`HD^#)v|%MQy+LO!ly;SVmYxuG=^ z!BFpwUie)>n0VmPrK!HM+wHV)W{$4O=z1xLaI54hmLYXd=4WvT*^#U-TNwVa{k7oT zS>;*VHW+*GFc_zmhw>Ze zz&dnzMy)Swc;u)!9Pj0lt9%=TsmU%22xE~gpu}PgzWn;lgzSsxjlftmqyo}<> zI{}Tua}2)B3Fz4Dlj6~lAX@l8rXzhP#BgL~%aQ>O7e#&WbM__au<;Sua?-(3MPX`N z1+uoL4e1c7Ys#)4yABYc4-hQQ_d2O<+kS-l=Q*B>G^pQBHP2&87VqK%K$d@wG8;hU zx$4=u?ulyQa>RA3O-azRh=O>FC8xjY&cF_`N8LD7G%MDJNTw%+lW)~r&MB|S&w$Hl zGF)mBFfUO8oFvfp>%2|#fa;_2T|pC-gj`s0lG1?8&X?7YX+a9mt+vp2X-3Eq`>ff# zYZ93_Hp^GKGH-UTxBI@oD}n#I>*N%%v@WqyN6EhFg*r@umE;+rZ#SRYdMoswqjHS( z0y)($CGW{4PE+< zQh4mH1mQ`K|2?qUla+w{8DtvBpR%y}1WvqPRqubxC|r{C(zbJ&E>ip`&(TilYy-s9oU zXYc2Z2g`(Rr$+Z^>RV8tZL+AFBIUH~KgaR>3JOcxfa}WP{Y0|7F2%}d92AoB9K?S* zbwMFKHLIkmryu*#TUN)q4^w#_{;AwPUQ#!*@UPvxo;GKa=|k2l{!$Klk&m21QMp%W z8Bt-KLcKTOk*vxc#>Eg+`Xo$jyv&UE^C}PQi4&y8v{jBuLt(Cl%v$zhwgqq6k9Wj@ z-H0jY%A%B%>*TB1IgA8nhBObxczFqt-GJ3((L-WRa(6;L;iEI4FBc3C4XfN!YmXpi zZ4nv|(Aay7HE;9K!R(KhPqq7^O=|zbRMwn$a^e{OHN91uj zh*D)}RqaY&hE^Lh1aoA~bhrZ}Y^FTU%X%H@~s<|F*znRdHRNF~;1S`wsAJ>9O>c*L=eVqk{G zEtFimI?Zm_uYhQUPU?H3%d2~~RKp7vtw>Ugs}ck}hVc)UL!>PCyK$13PO}d?D4j1T z74ld%i-lj~7nlaxe|el-G~7;c>Xj~8EwPAzrQ+r}c)uQjp77yP`F^1xbx6~)8B zn%h3ZI00b171Nmk7QE1JiCtHPeBh6I68stL z?E#!k)D<-6d~_URJhF?gBYYp~;G+vij;4sW(|LN5$acgwFP={HA5d>zFYL^A~gD# zmXB3X7%AigwHZnVK7SZ`B5!5sq`)Bt$VfzQzmD&j^$<24nzb_`1CQKZ1r+@F51z4I zeBCo^SJ31E878x!1Qk`_3axTedu>Utn(CPIP4B&M#}UXWlSscYAC}7Da*5@6_xpvr zKQbJhbq)6X{P_1`8mXnwX=m_tVWD1bQQk_!)ugv919R#$c_3di+c+>*oOs`lsckbj zKops`LdvJu?UB$IRoYPzI-9}AtNI(h8BsQ4&)bD%9cTH1qAUHOK!e8=9AY4J@M7*% z_$2H|AOV6xZs=m&NWRV?cXh(h>2tVUAjr$t`QozJ%RD@Zi^$68^;GlQGQZa)4Z7*9dfwaQHEeXwgr z|G*U)0r%a5*iU3`sIs7do~!U{vSAAVp22jEYLEmlX{Lyv~*+d+UX`TbSgcK4`LNQ{d=6rum2vL;N zje?W(JRKx?{H$FeJ$O|lJdM3G`WN6>the=V>2Ss_H#52sk$OQ^OueC2ufMSQq4TSE z0As9Ploxk>LAvIr1Uw~vY!BPA?yg_?9gYLHcKaAA*ojd}+RRiD zorUU%%F}q?u%8pXbMF?EOi<#Vg56Uv8udu3G}F!FU^91GR((45>{IXm?pIzYXW~+n zm+Y{~m_r#rd0$LW!%F|sh5YP2Z8ZyWa$hSIbH_j@V>TH8jkXO8&T<7|MOzuBhhJEv zOGH6kdyt7r4sEVU;5Gv@&kvOH&3LX(VUC#SwYBY~LmPkuDfwNC5@+ORqj zjh#hq;Bfe}@nuLFc;q15L)o?WHtTuf8~y{J>2~po4Q|*_JBfd3R%qBWdAo5xA$i>@ zI)P8gDES<4mZ>|p1crQl)(ntX@ho)xWE26rxahrh@j7VdKdEUN8&su9gB>mCW;ctViRsY$CMDF+upk1L;b!%yT6Yk9~t*eEjmCm7xmK6kl22-zP*S;lY_EWQ~y zpyUmt++PI3wMfwfSM5G@%iX-6RuvI2{!gVf+#SF?W#TrKm^(gIjuDz%aMC;%WozQ{~WPX*#w`v z&M4;ZI>iRL!v_xkfGk3HEZEfxQuMOX@P{ag7jtBYbI-1ECjS{B{9!GkGY$@rT7HpV zZm?0P>-J+Hx#u=lPWY0!`)Nx9sBFcCI?9vUW|&m>x?;io^0{J34{eDVuW0lo0NeII zo`W*`i|*k$T;X=OE_paQX%6jL!dXEU?6Q%k%8A4KTm+oS_px;Zm6436aKMnvbi=(YCng*a;i`P8S~*Or&mQ#F)ilueVbASsQghg?yzTGr zD=k?sa^H>f%VAbvLHm*VM|Vfl$k-7>-pWl`d2@=TH-}5(7Zh`b!HKx_uYRDJ=$Wf!yu7_uzoY6aP@erA5hako zA9PqA zcD)aISEZ7`@`#ma%HI8X`vRX55z?sMW8L4fLl(J8!Bxhio@yn?at+9LG`)Q`5R8Dd z@*0WZk^}kL(ri-e{920!Z9m96D>%#QO4ErhKinsy3Qi~q{rZ^N_6WvhH=TFDrRKi; z0v*I`hEo2HSXJ-nhs1wM;b^@QC`PcoCigUFxGmEUI>R56YK5)dGM$a8j*b3yE=X*- z*!X{30EhFTZyDiJ1-vDi4!mc5Eq61_)%crC;}Eau_fGg(^Jas@$H=8Sq6IOG9!S$nY%EuB93S7_ zw|BjRi$9%tjw-$2(g`EFXS5R|RY{tlY%EHIN%rEa3|UFpv&C)|dG$TG)_38(k7uQ} z@l1{s?4{Oz!qtUXMAdf{RN?xw8-&x-#lBcz8pLb-@e$Bm{FJf-f*o85L<#C|;`8O^0XEEFUdSUKV3ttpXLo=oUhv`1znH$uAxVQc#7C7s zl9b3yJz@*w>lZIu-1P1zSH<;=6HMca?i>rG>0H%M6VT&^=OW2Wm<`1_?*Ht~5k}J# z+KkZk?89o?HdKT*1NmABR=I+Zb==Wilsn{P%TE1ZgH&jEJH8}zt9c}E5EAbxQ5}LK z5GXt;WZu5qSDOGY#TR@i%F8ckTmGwZcW(o#ae_!0jasx?%<;*;#Q;}pxE{15-n>D}ka zbm%hU3BuvUlszToKbhH7hJ2%uAI*j&(K)cWSk)TikKfCRKkF>DH7G#DNu53=QRNl} zz(xE%9GrKZe@5ci<@50I*N~rG&G1Lco%Jk=Ssxy-FUIgjjxw@QDv1{Xrh{G0YyAsx zGeP{~q$Fh~D6%ao2uH`}+`n|f=d`%O#YR@&I`M=sB6J4Xr@>W`F`&*uyqCXMO;^Ze%gRhVjf;J^;FzLOoR=!b zaQ&q|5nX{5IqpdYIN)p8gH1CXgb5*2O_||On*<#nHFnzk-BEYu2J&?M7%j(Nj3|rr zLJx6Xa~H*Ohdm^5G6J8saT96RgW=NQYEO5xXB}^XzFs}5x;;Hm{Mh9FyBLFotb-d= z$7h~$SKwL&mrWY%6kkM3v0&R6o84`huoWhh1t0vD(dbB|sZ>|^N&SeO*|P1{G91g0 znI-dVkr&@E`Os#bqyT7b<7! zU1^LJgb#ZPQZ>R70L6~>cx5?!g7B{mTK><5gkwq!1ncEoa)rD!!=;Jf7M2lQenn*o znCqgBT=+};Lx2(8tPiY6jnJ{An@TN^5S=1Wx@>*T7nWW6Orb(nB`Sw z)pvH!L#>+V!1rt#_(A@f#ZM zoZo+Lw|rr1YPmvA*gLohd-DZOwY+^61?1w5TO>z)XM7vqjGrfv?m*-j-a4r&^LVkOtJjF9gvle(oRFsky>A~kgR zvqkV11D^p#OOqvdb#W#ncXuat%3K}UIRZM%NA@t*#CbfdJf{Sf_)R6C;Cz84stqci z^@geHAEwTAQLk}mr^vuJ3|l6dq@)MOk9w8QY410VBc*!1`aVU&37T-^DZqV z)mE*vM;BN7<#!BML4H%jqfj0uZJ@fBaJbAPtt&mh8sN5ho{LfdggCj~NA~ocbxi#B z_WQ!&U4F<3$04GPVmr5ex$wdK{030sEqoAd(`G>%uu$-IF}JVxuMrS-J9XZ5v2ofk z>Vk_0pKft$zqwcf!*6X8)j%s|8t4nO(ZRmo^{H<+t+S2uY>WIBt9JIxqiGidQDMM4 zk%JdSOeW_k?iUG;8@{`^kN_mJ1Aj%KzeCgAs-w`FbDQe*zjU>IXa}N}h~;aZIrjBx zHAn!e?dLgwHELPtlE?$-?AIqWAsfxZs*=5;PQbzIa&dzzdXCXW^K)5ls9SP7LFTuf z(`>$dU39#xH%u@IgbeQ={z!HaT(=9~PvD!GC+BWQJ6pENcg{nSfi`xW)QP?A9^Spi z!BNODb^W#MxzPZ7?E%9TDxNBG#uMKP+207bNL}_CgD&s^g!Lo~b+wjxw`N zou?npI;P$_*8HIgn#BLeNP%nclhk@TLZ{VAl_M zR=YN+-0vAO`j$OKfYP9e%8-<|&NiPSvU#}b<@ERhYX$PJ zgYUcc;ZGOYa7XPr8rI#(lFakvSK4L@^k2=)$Etm)&B`J{S%Wm)a-~Z1W1{s+1UXVw zMXk;l9y2+jV^Y?p;qAw5!38ztkvtyU59IGHF*mBK;%xMD@s@wWT_C zWGQ_BqR#c_FD-m0?@#-y%UD&WX6>*JUjGTjDt-LfjP5ARhI@)U>TIq2ZKW8u)Eu_K z(D|CXAQR(G6M5f_Wu%L+^pg%VhqcMv-%37C5y(@^?5$s;?)ozsQOIKs{C6&TA&Yj- zs#3IeSl`Rp`W9wS_S2$;@zWx$mjD3X)8@26{4=@tjjfOsH6vePER~L$zQ2pG{>&enH%ElB)YfRT8t96df_&kH5?~;k9PQ zw8ZamR;Au{|Ig-pyR3|~%S4uF)AlbrOyLHYE1>}-5_dY0ie5;F8Mo>?(xwwJM7gZ& z`1KR2(MKC@D%SK<`IBVqgn0Q5KKtkGvR{av`;EtOB`Af%z%h^7Yst=yFNEhqd zp_?%b56iIZdahXf1rZsu@s&6to(Tfa8BnSIB^iAG|L*KGUi_^3bdh|q@S3jrQ~tUV zNRw1=)bEJKFk)vaF%z%qrFm4WI2)g_bnPzE(qjL2o44fuw}~7z{@40nkS=xl$w%27 z%I`#&I#F;7Hr`?s`Z1Ybo5}IjCOnHCj*fLWsYqByVjypJS04G_=92v%7wA2|0Wf{2 z;rCPfnbO}^_ld!0!_PL{*)wl@aA`<(i%7{FeC94#q7nl`xd_y)S#Or0F#;Dr3~)GK6!>ghP!?GK z*?Mto^Dq5_HHsAicJJ!FY#zN|)V6*M*XNPEd0v?bj*?3Z&rkKON8r&c11~Bswowoc zQx6A4n+d^Z+Wm%(T%CvCF`o8?mL016E=!PsG90mfAQ>%ctb2_6L)RQQtH>lI43rr; zpk^24rjA07(?%u`*Nk~QmLpdA?o;=_H>+zq4Log3ihZG}(fO`HCx&|6D~%$E`-Ave zD=Ne1fSW(^dCqMsr1*|Yspv%d#lAb^`*YQx#nbk|B;4R}<6D3;zQnWJkO?l#t8i#| zyAkXZNJfTaV-I%anGzG$JhEzXNW~QP$mld*vP^J&p zTZuJz8PWZ)Pl=1y;9A$U--&@)PR77ox3=XRX9L4sbanF$yq@IiB)I?fBgv_LPwTpC z^V?c?oZOF!BL6Cpt&s4yPAzj=Qxwbn+(Y2qyES*y72Np1HNg+{IsG^EPJ-F!aSM4G zAh&Hg*xAyau z)%MCG0>1gxD_HSD`7W&;M)%L89F>CnOCLzFDPEsCtfmc!Sx02@n8oWTP*yHsmdIIV zWbRj9ex~T3wF{5dTtJx*KZ}d|H*(S-)o{W&W#CxLfJ0oz^W8XV`4j6<2qT9z(hkHM zEFWr0yA;Uo^~I%*Y^=rOJ~TBn+KKCEWX5phHfoUDrG);H{J;N;p;I#@KaSyIFkJKh z{pqP^;a7v#7~TTTII~uw2fwKE|?#Q#nRtMqRLw z%%X+~U@8At=-&UnDDX*dg=py`_RrBxFZM3Z|9fjY^%mk0N>5+kfPXmPO!z{(NeJDX zE9E&NWH8WFEg8P~Q@Ilftbk%`*tbP9WICnN_4Hq_{jUXwh1IJP{M1EP+%<$~+QZx? zUQxc?Rz0kXRJR8wh16lZfdRx=cenLE)J%@`%Fu;}mN(2g{zGL_GFG*+HbpAsVxd0u z8`~Rd`c@>=^NwB-ykBxOp>r;#xLK%5L@$ynZ`kbBaB19WDE|T=Us!17;6;tJOssFj z(|5-{Lc^PFZiu{B9{i}ILx_gfBQ;b{yPny~5b0CZc}_4$LfZ8zgSDHk=0AV$^dr}9 zKkYwZ@%KkWWjptT(jeJ??xTIZs>RArne#0J@)#3^!K;t7@ONXIbYU8U;eHYUGE$Da zm7m)Gq+Blv_B-_C0}01(eM%f&sRJ)@uUjf z?~0RaOep?nlpfXm&uMHn&6;CoX8(W7#JawZv@*l*WphyOi?=g+_%-xSXWvYiKL6s) zuoYKaPR>P4NUM*AO6}}rTrx||m!i}z977P5|1zCD@l5gZ72i?3=_*n2DN@=AUAY`G z3fu~Pnj_IQ3U&UnKia#IZ$-(IH7TCvZVPZ91a=N~~<#5fpp<+C~it zgJN&}+$jt@QUi{(s;E`ewk|78lah<=n;u>08s{RxrbRoG;pnqu_se`7(LH;9uQ>0& zf*pXQvf1gb^Ms&7+t2^Oq(tV)!RKm42De(vBX*q44#ziFY(L!kE*`M( zulMQ?dfYe%zs#vHVDcAvPOsR1El`DYP$O_K$qKcP~^J^k2C z8YGpO+X_th`foA`om7w8Js_{kJ!=#{S&TH=4^S8k{XuJyU$GaGFB*hzxN8*ST#tMo zjo##Pw5bg4_mzN!)$gB#`NC{%P9Mr7W^Ef_v&iT+H#oVqAm~m+I3j>T&j54G4n3+ar_}!(@iyszQI8my=Fzz1Ez4N{itCMmH?|0Nm#iGiI_eo_`IjFRH;Ur}Sd@}^9FZ*<%jMM*;-A;ZcT8&5ptr8< z6mb1+7I(?G2v7L1FQQFiRtOB4;vc2*_vMw?naaB1ph|;XhT~TR-K`1L3IzK4@9pWj zy?A0dn#nQEcWn@9C0`?f@yPn48HO7Buk0-cA3>Xk(v&TpYI@x8MldyOn+Yww&a{_G zucrY*wGmB%f}?tqf==YEzHdX3vBLxSi8K5BNW7XFBE+}F`^S(*(|R> z4VLu_f2@VsVVFnhMbIygi*%cx`F0ATj@)c;)@DS;YfI{Xr|;(m6b^X;q8t#l%XbND zH5^e5dO^1fvnqT?6b$R; zH|m77uFXjc#{`w<+9*|g^LEI?F^qN><9C5Hb4@$;Q{wU>f?+3o|J0xMBpVCn_ri1d z=FANFT&PFRe!-E%4`cmnh@juPbElq@g@t~Pxy_sKCq{M`Gn`i1iaqx%oKZv@SVGJg z-o`e!ij2@Y$Q&)!Dj!NWM~Xpyi3Oh1BKGzLj*WAiV!yIn)WqQ*J0BMFXK&`FQY3qK zM$3x+sM}X}!?`5nde;%0UC)hPh#>sZOkESKpz9KTiPcYMQ*G-7E~5{OILI~GN0LZJ zb=;f0El0+5daZ2vG?u@frcv8%nh{!WABGg123wh?T`98Qn9+6FRv(58xHF-jY$BgD z4X>Tblq^5vrb9{eXcw&ras9_;ics`$b@%p-*)ELwa%!L%*=o@VYS(rZ5@ZFUeuIfZ zrRi#-SZhFusPJ(i#i!FI&`Cy2F!LYy{nDT+uIf89UF9P>#-%VjH&$Li^s&{eEEVhL zG8~ovz9KubOC}EajJ;j3!^-_tg&`Q9<^6Cj7n}C#v@u(RCyEVkmdal!>3m8Y-?UO@ ztLGB(%)T22+cc4K19v5VPMrI6+$75zD)P0ak?kmg7p395@>VOa#!L6>_rDPh zh@k!8r}*z7iR_xZbGgg#PIruS<9wkyl4d`!ll?ZQ$Dqw~;k~P=%FDRfQ+-}l-@w7+ zgJ|TziwahqkE+3ewemOmcGo6tEr*^=QrswKquEGMO_pD4qwn#x$taiLFg7otS|Flr zZS$aK$o7nUFs%f6nczVG26v$ghTw!+g=9rEEZ4#OSwArqKn2XkniamzosxTvD320O39T+$S;aw`iz;x%c+QGF zm+=Q;L3(_9w)d069!Hi;1|QNUim|TVJH-=|0j7cI25Jdhz1x#Leyy|rz8)FS72v$f zT!|V8c&TOP~i zb|wM9FM3bvybepE8Jt`{*s5QmtZc<tN@}9IsL5wlcCppyXM|H5xM4K!f_Lb&})n13k}O|4iXd zk3&OF{Qv^fn!b?X1Be;K`&1Etix;xNX--MwSLufo0N5=#winjgHt`ddya>E5LncS% z0dAiL_K2sD1r3my=Ec>MQji(ok>XF0 z3J?mraHkobu{Pfu6jngCl>r!QnRDYzMKRIsWLtEPg~CX1dF2eh-em^>M*4Jj%?-Kd zcwYO~**wSB zqv4Mdb1Hn(8TGI?H1Lap!U4%AfaqPc{m-(jtYLaeMDK&p8~jDJK~(gU)+d@S%hy?y z9&pI(I7$#L9S(8z);2^49E*-kRqS$o(X#Z(GIWPP#K&qg2pP>HS^r2WFO}h5kMhc5Z@Kl(#3L?>1$zxvd4%{d$Vs5chkvy~n<`>w-8gu< zqZTjrmsF8Gyz_S29h66^BW1h$#$UmD{>czX`3t;p$qz6r9OAW|$>eW78zSZb^N-_V>HQvH=gG^E{8H41ara$6EhT9Dv;iLR zoLc5EHR5A0K8-@Itlg02pK==_!V-Id`OPN!M?t51A1ydw8fcWn@@#R&4w&7Q?M)@2 z`nka8;IP-M-XLXyZ2FYf8%|5J$Ib3ES97IB$F>9G->8XJmuVbVE1uQ~ojI_bMM4}>ttFk^Fe=Ii#?W+c7~1a=(f#&&LJ{-@$% zL|`I2uD!wUBeCH`8VuanaPAW#zNw?XPmQh;`6JkK@>^knK4TEX9Hbp_N9MCR;52k7 zGV#`%G1dd?_3}qV6#&wpO#34uS{axnS{pIOOtIfHp01zQrAyW8sn&sne8{lbEEY=V zOlx07F2+NkGBI_yXOpvawh|7-?9fYI`1`y(LtWC3O*3D{#cmTnP*oe)KX{Ye!1|Ea zOfxgBdo`$QYZ25m>S{ge)CC}$7C!%eO2}C2Awmmvs@Db7GE-+DVh))Gz%+{hHPx>A z`0GEP<_$y!inXN?hird+J!}DiZTmYB5{(VHrUTigx&^D8m%$w`eUGp2`Ey^uol)S{ zQxU972g>8CfywX$a{EBx;&uA3=Q`lfxrzS0)B6q)K+&LsWOC>#f9sb2L?&Z#!uB@* z_4WDcLk88feY*$PBLLj#`Fhm%dVY5XehO?)zoS#Pu?3(1k;3 zQfHO$KmzuP{3w1uG*_Tw_RZu61I80jrO1Rh)QCXUj-hub;oJM$Y6I_u z{TV0D*-&^4f$6SAnx0K!=Lb3>1J90+`?_fl@=d~f(M!u)j2CO!em`Wot1=9>ul$qMC+ltrSu_)i7+}hu%$ZQ+8R}LTYj? z4Y@a6BtG2yti1p3gTM4(H;_oaf?ipeGrp;?mrbVgR;Q>;p@SnUiqU&!T$rO#&Kmy& z6Pu5TO%xHCx)VmR!5Zy-0=(}k65RqGIlQ8+XN8_*Nm^mMtmo)EMxN2Ie=3W7B6YVk z@M5dC-oc)S^a?*cddzej|66%h`H!&*_vHb;3;If)41464Fym9(z89q{x(W^J(VOhT z4ZDrdiGjL%J-9(qbSr`SfD;yM(LOM$ozUjc{xS^z^p66y4DptF#Me3x3w3)%8Ih&f z8H4pnw>#1Gai>n6!U>Peq8SETCL<4U#pf*$5e+AZI91}vMy%(@js=h41qt=?$6kN< zWu3X0^EC?q&zXFh#koaw96H;*A)^fv1^HOQi|?U_hFYp_pMO_uR>#OamGLWv_!A%d zrJ$eugV3wXcFij7PvPNtDPP0O08jFl%h&Z^k?VG)+WdI)c{>l@_ku7(p&p(D&= zEF6e3X%Ut9Ha*bpWrmjwx;5N#ej;(mGHnm-&4EAk#Bi9VY_v#aAhdnlQ_{4z$tnLMltTvkYLCJdfN1|)U0`1l`Dz!*&b7v)9NbV9Gj$!0#@+?h z=D)5;{Qs`}KRPPXku%sT^gmL(DlKzQJnNZt{I(*YqXtp<7<)L+LPG`{ZyD^23KGiL zz2(ZHQggK$$0Jz$6JF2M${E{s@m>#-PpP`M*pOSBcYmmTJiieD2eMI$4s^yguSW8} z2A`M&7fJZ;=mal*5%)y^D0*N~xj^2hXDZ+HJQfY&$T& zCngY(*6Nnw7<&l|fu8EkuhX0&tsz;k&Xq2%fqirN_dV{VYc}~ztDJYyZiE;yuv;v9 zfxb!Mf19&0+023aE|(9y{z`&G>mtA)#DeRx;lYJfo7s~27pad;uSwI^wR*3_(R@S_ zl_HjW?{KuHw+4q zTi!M=ekP&NOTVYpeMBz{%TV<)@O$T}zL8R!2CzSuu}8V#8|0@DNHX8vvrBFAeWl;L zuK13XIV@b|tYL`bM4msQbuaKi6WKOjcE4AH#>jl|y2fpE?6wAYNRK2d!$6_Hs=k&;lgj#ew^dxctV8)2~vnup4MsZh= zZ=epUK(*$hs<`$G9Nh+axd)>jt(kZ$`mE1-2S676Wz=QO#5AB=3xcU5a=Pba8cBJ~ zgRz)uyMMKREs7V6^7n?_Z*j>Yw)kBaMbq8lBC>tMhwq&9JuEcj@$ix$c9SIkvVV;+ zLn3~h@YjB9o25b7>kah`c{FnnG{jS~G3KnfT9~s=H;%JBa<-@TgIT-w9FCvEUMP-< zIP}gR@ijuXPmp8ol+I}&28oSUG0jatzB9)y6MXL>JCZX;eflbn-%I@9l2t<-A|qnq z<-Mu0;^>b7?daijvnFJT4Uf23d@lMP z_P10{j#j@cqQ@i8nIVS|HZPS7hnWsbv5_|*=x`j&))koCJ9M6TMV@1GbE@*H;QH}t zwFLETI9+vY?{Vix!HK!VFRJrQfFlW)i64CE_laSa-t&w^23&QD7ntEl(CKDD&@){x z+Q5TNYp(9mP*|I!bo+#g&Y<%>X|KzGrkTjJ%n&%5#~`Go0OZ+UNleXNefh|QOg<`J zlz8H(*hh6g{mF3JPNUV(*qfxJ>8H5l6B+?wBG%9BjoG{?0GTbKo50t89netalI|chU0@CP=iG$nbM6DU z{XA>vOaMQX4|C7sNB$<8Sbn6xtl0?6c-1-5S_(kXa1kfV{`aoJy)=NEqVezs3dDZv zRCyx#3%rbz7Vr^hZB{84tM6@XwR)Jv)T{yD;GqMT5I) zeVvBD`CUPfi=|t2j9N%oFe>@3O|EtiZ8#++zJ1>j&_z!sESvdf45meNemHEKeob=J zc?U>S2Hd!;J706^xPzwn)+k2;{PFP>(c%&DB6UbjPam}mA*FVp7HF1s7)S@_H}|`w zTPL!bc4{CvE57Rr!~|U1vm?k6YdFS%3@dEJoFRTb4l$*$sdI^Br2h>n`PAhwC7hV2 zJu+tK#;c>VchrCt?5Fdt#7 zhzt>sk;v`5&Zv2T2^y6vMM$Y*&|`d&BEji-706psaSW=C;Fs%uN;;>&Zp)$VM!B)g zn_7M~6noElRVZ95{tlBp`$gH>$M=|`(Ga_6w0d>oZrAh36XL)-3hcQ(MQ4!L2$c%P z#sI6x8Hy8@{HHg^dn(z=1Q7IoFWe12MkDv-EKJpl`OTyz{E*D&Oa`4Ei;Q7~L3Zw2 z1WhDYy2`<`?Lslr|3b8#w<`F{ZArz_Nb=i9|I3ZtGYmYbV>v}dw=I`=l^Zz{dq2f! zXj{eH?H#O)W`=t@o>>XFd_`6PP7)p_!-5!62GCs9W?7Bgv3~XOM<>YN^)DzVCSN$J ziAVY6^>)q{Dc{H#i>C20;0slw3`IHanf@vDsqxIAPmLO1M}RD0Os|Q)8-B@brbtV( z`7o5lDJmu{)FS&$1PD=%2^fmyF^?`wUa(LRjN*&qp?w;`R`G|<_?dE3(vQS=t4*i#}X?X)1ZPK$zLR@oF0b=V^{o**7u;YRn0GqRQZaviO(8A z-xwu@tiMW86al=rE$_f8-?!UQRt=l2Z`Jm4m5%CB5FA&0|4?lx3P_w45rUC{|X?U|Wl@X+W6#439t)Us6M}vo-ksLkHyXA-Bp(LmE~3fgXKg z?j&;YV9q~I(YqU6B(JYc!H3b1NOyxP;Zutrb2l+q)gJs!!+}N1ejbgnLi^J?sMDiN zIV3KOacawpC*oF5X&__byP7aBhge(69OT|#3zSm0qi>HpfZ%k(GKTS}&UzTNGiqfx zZnq~Q2}?EfX#s8W6H?8r+g&9mk5JL z5l%>I(;N=$iay3|g~Q~iRdadSqXc}{ zlKM=1bKL_gb*HZUcZeK57`7j7_Jur|7BQzXKiSw=ra_`q=sO5`wQ%?4W6j&zHe)rw z8X~k|-JaX7!YO(fj84Ro5coi&(-2$o6#^60F-1B-7HaO&EpD|m)j)d77~D$ezgHhR zfV7Rz3f-k%q)~M)vFlBltOXw(@EiFgr9C(#OVofxte%Bex#_!KYtP`*p~3NyvMJd$>Mbir z22v7W)m>6aX~pb;ICymY2{vTKq$fi-G5z91QK%X#SZu$T_3Dk;jXmq|)Y0(B8_VlS zncW9C)tB?=5R%7~a>lqz^dwqM=EiK5`*!iNDvGN&5pm54G|3T2ZHG0TUpug%sXO_D z+5N(nI;Z3;WZm~9yXWx(_>v3ykq7~yhlY*}Y9tmsoM%In(i*`hc0q3@Q$boQCH&ku zkIC5~eBd$BVjD8GV+k#F=|OSYWQO5Lyr>Fz$;NX*V|aalHO)-^40_=s33^D@f1(pp zN%i>I{Q>2*0Qv(r;o(%Ogw#?Rit>*&v7it;bS2nrGo-g{M4911C{iZc2;cbxxEG z;bcIjOYu2^P-Wnqd`ER+6gp*Iw_3imlG?~^oC;K*WB!GUO5NvskZ8x>{ix<=jibjJ zB7nO5#{)_qHrkm9P1r(@mF+t=Jc%8;W~RL_+c$Ra!6|1kL@nLG>^0jQ0+*vqn-Y4Rm{QwMUt6Ep{fxW+NJL#g)-Zx^udLVsI*ghB-5WKRP)n*e%wema2#@oM!rL@OB_ODPClw8b%b%b?^C_>jI^QagX>iBk zYUwiWH&b5aw(Sh8@fRIz#`c-Lw@=m&FD`F51#|HAUi>>@S%A{?>%G-xe_j|^!0xYb zq2(aCUI@$N`~#aYu3!-VRiV&yaK_2gGL_f~`_rRV8*bQ4pea1_6I3-`Xn7;XW= zSXG}7|H>B#y$IGcGmpWxDaB5Qrm_b#JK;%l+LcCjd@Yh6%@{Ztx69cxDxbo(i>si%WS&xI z00kCjjZBpP_12;k$6B`NfRpr$sAjOgCTgf~xAUPJJkTaCU{#^p7wMuxpnks{2$knO z5AYwT1E4@9#mL07NhwM`mP?f(ETIh1F1SAu)B@m&sS7`$vPFi&uh2v7;59pi079nG zW45yH?{Zo^EL?Et2iwvfRZ$32T*wkxho4e0Dp|mTYt#XW09=uUkR`d6Bvj;YO##e) z&66JS<^QS>(8hnP?$eu9w!)FxoV9u3An0-VGxZiM$xSn~H_}wY9G*}!%NMdw21O2{ zjW!iXZB1xyM4zI!&JVP?SY}%C2b?x5EhT|sat3wyU&?ckx4umEf917R;+Pe*zznT^ zR<>O?VKPO@6e67W(nVEVbLclN3Le!GIbt! zI{<);!Ob=7eDeiGV^lTzfqTJt4|ma zLdkr-;2c+NmXq!oa9eYv4?BlnW*ai`JyBaLZi&aa zP_xRGAjMo^yEJg7DhRu>o|qxRklMe99FE_7K6UCrJ#4&2&_!D;A>}rI3j;dYq0j>+ zLTOQ4IPbvrKlFqPFpa2Da%!*XE$6D;{KgB{KCvj-#DCtCv|=0EFk0O^os&TQ(<)I^ zKa4q3aKn_>2)(HV_!hO2S7%PV3}0b$>X<6SWJH(mY-d&<6{jgM{IXbxy2%D#y2(=O z5}@#Hbkjq-vh)Zc%d5xoQX7f5sA|U@Mh}nsP3;ZjgrGo^;{t(lXoMfm2Ouz*bV1Bl zXOKE1yvqL2$@!`cxcl*5b2*>%XkO{5Z&20L!l0n`P}gr3P5E+~4+vhjW77>+^fe<< zN8hK#53pNUS}{zX;X5YGk-4R8{hozt6NQxSIIbCQm?cfpj0`A_EHeX~*oE>(Gz5TwT@sW*W#`5=#%jY9Gf(Ukzqv475%!?_~L?7U6uzusWe>)x(J-QjZqelmR%8dtCM=3zufe#Gp!s!z6e(c zNr9KwVpT>l&QC{){0iUW&5J_qS~%YWTo&Mi&tOf;jL~hHQ?1u~q8yaUVe3Rn!FTeI zDhl}jV|}=|yvm)beJ>hm3wc@#cUIM9fuh%m@8vc5=tN4em2#Ey+=$9aneM{Z;8j<& zQdYTp8PxjpcIxF4kDOn@s7ib+y{(@vws$iMUI*t>EMd1`!P2y7EHuRh<@5x(*JE1G z5j1FLDbcOmPAAfMxSfuEw5hOHywRvMIo%4n=Xy%JJ z+Qn9M*jn92j@d<=t9&9oX|EeuAn4G>V(+_$6gYvCk#v{(t3L)N&G zzI+koU5(_Igem8?AhefE^!?e|z39o-g`4jlbUD25Das>NRO(H;#+z z3lWY+jk{F%S6K2tT4!7BvVL2eO>xc6`3E z91n6>w(SY0zO(O*{Hgv7*dP!H0RsrHKiaqlWZ)Bt^eGv%4BesmseoVQ6nc_O27`$V z0T5BHFN2mhALpD2(l0AXHlT4cL$b4i|DK`;6_QMuq2KVqk=Gf3F2K>6gj*H6;AuRH zGc(%fN3B@ihG8d&U0b`aU{px(c5T8_o2~vB%FkwLWUUV>s zz2QNA1CvX_1lU}E+s6BpTX?1spt9@PIby*;i5A*&%^5 z>fP6hW4G{ZQXl5hxv2bFQKL26&!MF*fbpgn`){sfqToO22MY6m-xyjN{AfGOzp0JP z_^b)0aA#1zcq~MkQ)?y!SNCbe)P2rleb!ThtiNwBu5wi=Y;sjQUh2C-PR|zF(U(45XB*)Z)B2<0of{}`==?kSg7hD*9XFdf-oexc|E<&q#af%VGX2F|eJ$|RaPz1V!e>>KR<0d2 zd3`L!^&Z1Suq%f!gAfSfN-60Wzw+czu$ioT?JFHzg#Y6WxxAIpF%AbSA!Hlj`D!LI z0X9|QOTgPug@xSmFLu%q$Iy>X-eIKbQBfh7d5O zTT-oA98A;tX`a^b2H=vQsZ$9K;%YJ+}G+Kfou4b)?)(?4W`aTz7^QQ{AR z`q1?4`rWf&2~L(Rei_dN*Y^D-{!}Y}Ip!rok0xe%eN0`W_4Ssp6>FI3HuO3?2Q;SG+S9Gw5>CUXsV(sZrb7 zP@q?@h_PSQ`R}uowqESXhz*dTbST0Z{n?xDQ``5tN-Fjw{+i8=;Qg3H?KQe|N7MdV zgIu_(|7qMNtc!;{oiXF>>qRT}*t#MJ-l4At+bE}B;T5e3Jh=1e>ZPCp9TU4xXp2hO z=4+mVtDR+8OtO}rYON%TTDZ3p1)P(y3?fMxY92-wg<6U!?*#<=#>V>J<5-EhzcPnc z8V|=)%;BhKfD3k8yv-Js{M+PXKdL}=KQWup({n%W)%qbqnzbHluAu`x6S?)P} z>w;~y+>EQ_{StNLSuy9nHe&=Y{)mxsH6vJtb=D5ilr~GW%SG+H8jI_g1ytZ9E*8e+ zhbuh53CrE$O=YJGNr}~f(hJ(^wK5^oUy8%N-Us~`wc`gLZnnTqq21A}jZjCP&Ok?+^e~2HF@x-Q{{U$KUVkEV!dFr~P z9BQk4(Sb&o!#lhtIehJ?+;!*V^l=;6+0&P+Q(RB)_*`aXfCevJ@<+EZI_Ad-PYs4Z zO(APW`cwf=MrDVJn59R0dmr>m-F1m4#0bGC?cwZi-8OZ#MUGWT zzM;zlRW)0`B-af4fqM>W=0{Jo0nm~Stz#piD}MkH>K+WRp& zQvl)3bGYtdFZ#~aVkBO7eRNcJ4AKsjv05R2?Rr z($)*-J_-xJkslTm%_*;9MQ88%K6W4|+`&hSPtKlmd{x8A*nL)5l||cw*ebyT&^TO_ywOWnQPVNumL&csg>C^C z+WBkv;g;8@VN_A-55ECU-(rJfSw}~jhLGxob{#j7ORc6DBv`EurJH( zK85;b_R0TP6U@!`3rXUVG5fUAl}SSeMUl~LUW2{{F#;Y?aQ2rSf^nqanpnN7ZxdAnR!E z4w2!47wOCKm;H#Xb6Pf5@pzr2gc>@R?tk}abc~t-ltFX%*_DPPSYC^%w()HAm*BhK z?S6S`d1xg_h@0H5E|wx@Vz^23JrsyrJaZZ$avyqfcQyyhhz@dqCz+)$z76;dE^Hr5HL^5{L3boG?|-q#T1x zqb0IR$xuF5VPbSsNfcM#ipstauE9Z1wIx!(Av$)2{m2Od;_hF!^qF=Gfs=!g{q>-Vwwy#gJ9$o3*2(v7h9qEJh@Wr-^pi!_Oehg#R>jl0( z!+?6=xL4V_>cym>9PDPomeSdvJqZE5F@dUpi9q@95rQS|UI-C}b4cWr%oUAdAoi5= zWD8UV3IX}i_NKIgIL~h|Qa>p2k)H|+N?W5!AI~G5v9JRC+e5H@Kc2ws9o=T=_wt+b zd92(Eb{Qdx=@WY4_#G{7aCL$u-Wig?Xy|VB_C!Qvxp70rf=ZO&A#1?qfVqd>XQZP* z2`Xn1I6IMBe9fs8ByUeMb{JFsK>ddD^AR1D5^3W6OP+4!&aanL$dQvs7Uha_?;vn5 z54GX88FRlU$S*qX1959qR_w~UPrnm+SgZQ}pBm17{L^xu4ua#UA!(qOCam<#Aw71_ zV_Bq+yn)tL&)2TT7P*W-h_=`hl(g4%Ia(lpD+#$BrIzKHWNK7+`xm?HCSRUEF2#n1 zoE%C8zfC^`E!r@AhF28XnWM<#3k_4aMPa6yz8wUGI6qk8ao}_+hnS?n05fVF`;F?btr}k4UAz>eK6Vd zoXq75A1x6wQNI4jabT-DD%*IbAuc{72B}YS3s3A~`RAVYI#R(XOWp}xHE)&WG-8)Atg>I45e z7o~<0o7{g1N+8q6@;7&w7HPlNZS|#SGSc#VW%X6Lo=*}#_;L9?@Sp{kKx@085TM1! zxsMtH!tg@E_+^Tf@o-xpm9Rysrpa#<{#Y@_{!~Lj37C%I;_amI{`8l_Hy*s*GwuT| zLe_!w7bzv92X{FY9?_+$gS^OeWj}eMt+;$8DABAX7~L+G84;WxfYskK zO=P%{Vlrp5;t{5*enAm7f$I56!!N1_@FkBL$4Lxyt@wjPvc8Q$dAB09ESgL|t<>U%T1IFzq~l#?2w!VdRoz&;Zh(D2x*9d z?kOm#p6>q(+t~%XP}P=Wt_-w0bfh?`oc-^LMXIoTpP|KhQT_Xa^T6Cb5w}k@Z%2QX z_rsByr#AiqPNVY5&t*N2tmL;9<77iQTRGBkFQj(hgv})7NEx2EE$@MC_#A!09KK*% zLy`t^V7y4G;#Oz58!c7}0N)Sa7%BBs8%PX@~ld zG%)xG5vQj_{?gH4o4QM)>x!jMfQ2k-gqw30e6rp@4YoD+@dyX1<4KF(MbR1@La*TL zSgt8Vq6X#@Q8MEOi?`-eF)^xzXDOIuFs5{{P`_WJc3#AqIBDiq>T#*Uo_bb%K|P2_ zNZEW3zUkP_eL}6w)&e`KUpZ3|_k%g+a=Lu>76VorwB46rw*7%kr*yS0C*6|tR92vJZX0M!)Wc5Uv zk3R9U7uX-pRNcgGUTD3BZV?c3Ig76r^9-`?mLbx+bj4-)epIzg7M7$hR~W2p9*}Pi z03gM%hKHzo63Nf=wTy89QqZC5eP2>sbUNJE-U9azR+?RfLv6u4SiPEhmdkZjM=R$vA|n zg<-aw@&w6%qs+XddafbEa3rnAdmN1VPXBiVj$4sqMIce&6xi_m!!C_VDye|xoS>kW zyh2YL(&(E2ZUN7{u0I=7B-0*9RCG&b-_sPS;~3miok}j zERXeW9Ec}`b0f8z--afZj5I2^Je6RT{uXCtAF8Yio@o zxI>2JkmaTrpDS-QK;L9RKgx+m64R~mZx9qDc7LG%V?4FPJMef783~5LlJ#g64DxIL3Pnq0O+Z z)qMNU_dj9c;$H)`CUH5v!i6Qu_ztnu>R`5`|iR1#33T)@v@|0WA4UxmBGSA zJ5sbvvfdqkDtI5Pqc)Y0Vc{xshpt{o-MjH4a)$8l z0oKd+f$(Tw-hfRoRq4-NV&Z{Gm*eu!s}C#l=A`BK_)ZZRykR%mMwz1#ZOAnW)QA&AWeAnR^HyP6_5hkWa>JJXptE}yhq{aMy4 z1@3dW6MvUhJc{&iIZ1_iWp@}Q&eHsT_Tx(1+E^f2ZTNY*K~WO_6xksI)Ib{CgF?>(R z|Mtt~fp~d);P4~kUT9lZj3O-iex>1N6j0>SMBeNoPo}X&g8BM!U-ncXK4R_%yr)); z3Q|*=COc#d^qhI0C7!+l^iQevSd@po_EJr^EY8`$Ej}Wl+8qDX&VhqCOqNx67N{4* ztku2ZODhYxQIGp`Y+$qPblCBJose2Q7C0NH z!|Xof*Pt|b0gNbo890vj{GvYolUyq;3c}kvF@a+WDuq2HecmeBiB;(j%9W++D1uI@ zSxcyVqmUr0a z!p3gZhgtSB8<~y&B15Bx%+x^;@Q8ir{sfi1fbTb5fUF;PL26YAxu*kQ=9Ne4nA^STerZ-B42^Rn4}f^Q;G z&Z;@9nU$?R_?c$7wDO}74sK_r*c#WF1rj{r@pt9MwK4siNc(~A={rm9LFjxhSxujD zE}aJP@?t`+MPPvrwL4iwLWq1ahuODUf2hg@*AT#2XI|E5G-3?6=JP@_W}!!?ejpm< z`N4$~qbhd$TQeki{2NGedh3*dpQa>Yt1P>HAZ6AaxB$F{Z9tUze|f}E&CaYh3H~+d z$&iLwMdR4vlpTG^dYo;EAA`F}FDy2nZRBDd9%f{tL4?nORtF3UaxRzAO2Xj{9Cqf# z45a-d#dU$E__g4A{PvqOPownayW#AeB4C^@#kdM?@~jW7NBP}%YkEGDYMi4>brpk< z1L?K@c^iD*$TkOLI-Y#U7~)#qo)_Y1s7wD~CK|%-kTSi5%e_9FK^##&6U9<=Xn!J^ z$?H~RF8T0wj#4>`P-gqus+#VE&h+5tErQ{d9r(+`c3BFVC;{Op< z28c7cmGp8q{C#R)tELx46pcVA;<~Lw>dHl(@&}M@g^wqMM1j2pEZB;rE z+YrS`l@7*GdnDul>mdY)xISJFjrzjP5drg`)mz1YTBau*l|RJONGKr~K*fct>$9H}Bhh^+l_s1M@qWqZ9QTP3Z z@89zUQI{%W+}A|w5#Iwds>8J3uBHC7BWe*Zc;}WQJNR!Q44H)XDyk3nwsFk8#1TVs+>%M*%YlIcXmo+NA8BslJs^-7h@hXYSZS5%;)-;Mc%*d$3kx#{B!$g=@JIw3`8W= zsKhWGLUOIlV`xDl9tIbz9y*q03*fCOZ^)d#h35beK>2E4-z#m6B-3*PRrN1tTYM_^rkTk9Y`J3j*GDEC<+C?U8}$>VPj8`yk|WFJ#RtiKKurTWsY+N7rJcT{=KSdtz}S0tR!NMw;57l0S)Or!1Pp6;o); z>ElLzw_$Tq7nOdW{k$$^uz;W)x4Hr%MPxT{7prHY64!lXN_{V8WU3|DWrQ`*yTX>X zwi$iT7q?qUT@_*3+n%Z zDRs7it?3_9!CJwToo)R+)VibinXJbmT;EhyAl!xGF*n#mw4rjr8e3i+8>v)tejnKz zsY{sh&)LE|ez{12{)*+E+;>-_lob;2nDOCR{DhPi`d>>{fv%Y@Ym(zG?Fw*>`jD*K zMPODc7&R5<8qn2e@S!a}C1Ak-rCjDqM_OCBT}n35_5{jq== zOBP2ac`4~C3I-n{FaY|=5N#ss%A}8OTZ8 zcw8$qMt9x#6O70=0Tk;BgWf&Br zA+4dvT|D{cE5)YMlKsAb-`8DQXBpS*+u6b`jQ!tlFFi^^K)7En@kwKb+UntF5-oSfeE;55rF(%-x8>~nYq$>mapT|i9hOo3 zM0?7r16`$JCXx_mclyZ{py#_h(y4C_#_ugP!XR`?2Er$M#$rY}@2u*g{t=lAbMk#VQBgUF@&wo z8InHYatHYIg@q?8#O#I{eESD=btwYSbgjOT$E>x}$NX z-E=KW{NWPv*JWWJ1wMGkhPtNg7c*``C{>Ul_2LEYEC)%^M4-AVn^%Jk`t|2_N+wdf z%L61BR1}2*%;jnSca$wM^nP4T2)q{Am_#>q>Nss=kJaq^3|y3+5Yk$%fU7@3{0^c_ zy*W`nv_prV#ZsEF&`e}<<60e{l(n7K_p0z=diV3r3@x-BfnsyrURDD& zd44dHTxiK+Ul4Xoc$U?Q(LNAQs)2ce$d`t!`I0XEk9dtdyqAPnOmHGkI>2xP{of%M z5>u-suC=b~(m6#-qk7e&0t422T2-wOcyTq^VsKy0_yVSmzF4d8Cn>x^?T+yjAkx^6 z=Aw4+*t~QhEf7qHtoTUQ8A$*-8HU_CT1If{#~rxPC)}jRSBxsedDA5{61 znUrLn?9cu*W-#m=1q}i@$pOCM8!Z2zy-Ail|9DU^-Zd}^w9Rs$$9@z?n{C7YmSdM% zTwP9U+}}pt=7uwk4KeFK{6yG7ObB-1F=sF9Dt(eWD|z{FVR&IcKmQqy$)Be~q}h!I zcAuJBdRCoF(KKe_cQ-g@LkTU0PhwcSffwMGrdAQ`aD@=VtOMWwFRUC}V>&sA)enD? zWkgdI&YF_)!67u3Z1m;X;K=MMVu(lZ{1xhWk)=<>c%GxrePj8fd=hKGz8L-W4gB2) z6OC;ibGxX;B;ph)O)%T_j7emh${b%dYr9R=;&W$cr@`zH(q_KtezxNmlah#~&qZxZ z+%eYo3l(YWfP5+xrRyx8B}^+l4w-Cn{p(xN3!wd0NBl>71a zJ|3f{xXURa3U=Tu5v)RxfYZ`+iGxfupJkX?X(WdM`E7 zI?}crDOii5UlGKnA=%ivr;kx>zIAcU*1vsM$5t!}FUl)KIw_Z!E`MTKc!)A_AeQyH zzEXRDRbpfeHDd_JY%YAg7@R7j#&Z_P-<9SQ!5h$duE;F?L+Z)gU-a|AjN@opL^nqdA<|E;_5Q69?eQnt#W^(z_NB9FR@J~NJo~Q!u zJ-FL}3U8^_XiHZ!)^Pom06N~_)B@cWOuVGe|NJx9rQbQqS2%kA)S|Ek;eA8 z%hYA8n2+5v-?J~BJ59p#C?tOt?xc)c#vU?jZ{k2Bzp7c1T!SQg5C7FWMP$-zwblIh z57*mSqwVSmzIzG(ZiQ-QVGF;ozEnLM>-wqB{3B5Y^J5uq6J(n-RuhT{wtBZ;FURqj z4PBuV^mylbue-7M58dK5oRVvFlLHNXVr((Pj@j-;59eVys^O+zjvt8^Cja6sf$u9S zA}@Ww+ta?UbYN%KmFX;s@+ap{Or-9nW>gG5^BWS$iXL_(FvBJtnLTX-wtv9N(gi386!XhSCt4vxUyLeqj+tZ1%5jX9&K_~W^EC=SNuUvNU!GP$Q_Y}`} zDM{jr@yoMF$Q$qIGx|6ITBJq1n)L1aeW5%TsjuKPurP*Vts2H0hD62t)je1BFogD*XfeVA+dTeE4nt zJ^NXoNOsd0XwJW*&1CcULC%(-UeFYBF5A!g+Rnq+%8d==+zQY7s{%II$E_?ctJ`1slIm1K(%@5HNAYKxM&3frxF zK#@hfx{fbp9UuVqdRqual`&)X;d7~ran+`^`#qgN|_DDv)XJd2|A zE3?A1|B&dqWx}6`=;*l4SAC4(kfYdgQg!^VdQNyi&i(zUBXUpi#s|ZOEdLP2xhkaQ zG0W79ybtAP@)!kOF(xQ}a=2`D1f3Q!kseCh~G}`M4ojIwkCs4+R z`!aSusX+*7fW3-DT0ts1T*Si};raYvOF~kkI%>aY$iKdSwLlM3zwW#I*E!O=mgl}C znzI8;ZJ95#JHc*2ofQIPImySw@A`aA5uT8-Kh4#EFyL}BXLFS_2y*M4&JVQts1Q*3 zcE+4_>?c>3qXE}@{s#MR>gO3W(JSVCZN7+8g&koYnYKMbIK|?|ED0>qyKgN(Y-T0< zFbB(!DE)EgkMrIC5cSq!QGL<=Fd*FuNP~1rOGpeQjUbAE4qZ}G(lInhcS(o{h#*5q z$AEMW9nv8Kg49qmO#J41?{nYxzw?}D&YHc~`s}^eUU~1eJKxu_ z3;?xrxU~w-Nf*5nUpj1Lj52rg_tquLDb=2dXl^3jFY5Z1Z}-HYFkDlq&_X`p_BqFc zK=x?q#UoJU=X53>&@YmDct~2Tf|fk{52EgL!1R3+W=o^#r%+*TivAd-k)hZZ6r*?A zz;X^SXrD}Jw~^&rZHlhUG~gMNSM>vNOBkuY()6yiyz?4qd=}z0HRNS(%}#bc`k zv8qS!N^)~d5|*4_PbT={9}3U@amrVR1z#?QZ!-?I(^LgV&{c<+8p56LA@}Lyo?=rf z3EeeU9)q>gUo^fU-{?I`6DjxH{d2{DNHR4~-t{*V7BEOKeNwDdGFuON7_WN6%|H}$ zkV~c%s%xS8SoJpI(E3FzUMKj~5+<*8^a*Z2HoEbe?Rz<6X6Fhy0iY%V6e!nffgCdqIohoj@^_*kh-B# z%QF>b+QDf`SD$WK>2Y$}PJ@7uCxB1@hIr7(Qu6MLZ$XWXjwqg6Tjz*Kye6wp3ya^u zy#gdi1w_@~bBigTgl=lXJ`-33gQNMLF!x6T-CklFD({yhji7Jd@EC)KKPnwD5k82= zXB&#QfNtKwPd^C0`v)5GJjvMQroIOs?u9d6uW6`&+BwHMT4ZhV&`^@D{47xIc-QzT z5g#yR$abK+Y>3AbJw6u+SqI(OTLleOWjM-(|A~KaR{+LZ34C6Nj*tfk*%iBCUD%$a z^G1Eq)~k@TCElmx90xi#eUl(nIfK@@~28P;8Rm42S_&+o~AJnKZLEaLEu8Q%x+ck}SWzSq<~#4>t{$*v|4NBOYn=2%-3lj*|Qxe6g^18p&H|_0@BD#f(=!tt`B}7Z<*Kf-H z?Z5Sn));H7LpyYm#(I{5)}<4sl40=u71RiGLy*eolMmUie%O&yE;k|&FAHj-Xao5} z_l;DflFI_aU<_F%Zy&SWrlC1Mm_u6;h!|e!^j3wTo z=@;VGf}Y)r*CrP8&*rk#`J`k&Az{ud^+<;36~gxV&a+Nz*JsQ;BU$@(Hrr9(T>-ax zrFX$2g9q__Ngqv^I64+9?`Zg*%UIkySh7A++2nCciry*&*wiT&Sz*@^;_5q%H=+`h z-s0}>U6R`q`tk4W^^%duG;`6oSe>5Ndax7m-{*#UIZXXi2 z2fwDidX;ufi_Hy@yJ>b&Rjt_)5-OG0TvP+CVy!ws= z)<%!2T=f4V&y;%PjJ={F|D%&#>A_O?iNL%--<<6=+r{c8MhtiR!3H6bP*6A$MS@$? zfW`e+fn2vl9T=e@fsx?+4#f|4e4|Be z$rTNbqHfHa{~{RqX`;(T>dCA5Y;cOnS_--NcI?DGGduDUyAxnWhu21$EfaUr2TSPd zhA0*V#!$8&SIsFZ6LLbFUIDowWVUTyTV05VHrSR)Kem>uKmx^Pn&m&-tXS_OC*%UPA&ex8SESoVpoe;{C{K#*L*zJF9M@CfJB=PPeOGhtHOH|^%pZbgJ-rsFSG6gKce@~QGB(J#snHE5od9~^0 z{&ZbBY=rTi$`eDA{MH8JCqoKOB7bkb^($)?a-L6$mBF)Z+t`B-cg{=HzrToS_jV8Ta74d#fm<+fT2$$|&hozaOW(8KhOf-C1&K8|Ex$`EmJn#H_Q4E9Z z21sju{%-;Eb-AM%CQoz<#(bOYJS8Ot+LH?%y4O^Ple2frm#uLA-cWB+inZ! zH`u2VI5^}zt|#z>*v3y-`S5H%-C>a|9fg?_r$O?6=>)=s8T-Cwym#)>_J`V!Ssw(U}ekLCu%(C_!jFLPR!!oJS`7DR@#m= z5&yQ&ClN+duK?6u^!ex63KZ=}AUg$R%)Sw=|2^9_Q$kDOPVrLA8JN=J7WKAExtig?9Xs6#b0jXLQhDEV@?80M27DMfg(<2s@}eMs!O%FE{7 zHZ&Jhw@ITAAFP#d6ZqR^$&>)^p1Z@U@$o=k^wAppK36Y`XP3+De0UwBF8X~3 zkgpz8830+ONc-8+yw?_!{g-KlE{%*aYed!%KH(U1##ZGxMi%7o#qxP-&ewbHQAA7a zjEz$b_T$0!QAV&yW|MgP65VgT`)qTWWvBX~bXbXqN0ew8Kk!%V@ks#=x3~4mDc#i)ZR#YWul{^+X1Dpf2iH zJul_$HK2EiCH`iZ;6nPJYPm{0q=7({w9Y84U(bIq@_*yjAO3JCq+Tq3kt?=)yZ zO;=p$utgtT+W8Eq{$t2*ZIp?HQ@+z+|Q$8dQZ8oVr2fhkLgPZ9oJrg*Ei=m+a55!4saKf-{<4{HB4TA zLM$$vgG$w_`zMW+j8KwU!xN6Fa05X+p5XY`BVR@~8a(m>xU(KT-1Z~Q;wihw`NgX^ z(q!dS+97u>tqhSnPsv)-Y{Ec&E@@`a!OB`_RTK)joAn9-39JTyp|r zKQI?dK(q9;GhR_w%QlB}*)LC^uX$k;l+fHCXd$Kc96uO?xhMAep*#fRwlK8K$XqjYjWtb|W()Hr&U-z$k(1fPi6&B`x<_>G<+$;P=W$6zCd4b2?TjPEsm4Dka z{&PXXxf-~a=ITU%hB@nPz9)kEs$0n8!vM&g878p`3|W*D49AraobfH)oanA^SzERF zr~P6hapAZwTGuTBK#yzZfW(D2piTyVe!QsVqV;QJHyza=MTy*bI%$SHNk>md)*flH zY`ZGoQ-(v^zH$>N9KWn(K6yQ#;Q&P%YiOE^Pe{P&ye*)raYk+~pB6pC_H&N`g!c&R z62$A>!KaXr_3pFihZD~zD(WWs#!fq~){ukmte~Gy$bnf8o=+@Dg=9E$pU~8CZBAS5 zhFH!Cte6VUv9YdZ=0}V*rNiaUo5GQk^f#eSXOzU_CAMxgGAVSnD?y*N=R-C?z6`~y z3PtF4x~QkdsvmecRoTOG91856{aV6UGL?{BgTX#Sa^x6BSo8_B4@#WNCnFEOlzYGB!bt zk#XLhExNIFoe5~NlAIe4AXeTwJ|!F1GR^e~PKUvKnhyXBt(oNp7pXO6cl zfnq9xwfd6b&LLi?$Dx>!Mj*p0>z2udmPz*RH?#oz;K_?Nnj6?=C_CRc+iKMt_T8X; z1;=Mw%7>jG%z^2ofzujX(_rP>7I{+%X!7kak4@@Dvd(zPmP&@NGy;>|)Q$W8oyUcE)68O-8Ez%Gtbvo+2f8rmVR2WOk~ z&Rc3>_IMk>RB!?S$=CV*Bx;&lmeO+%1sU&-IrgF?4Qy-`A2<# zI@|esqSMOm)?Y(fr$T4e^+jo_DBvO-?H$jSV(|i9!pc0k7qru=#B3z<0nE3Q)7M0j zI$K-P$#^Z~?&c9qIHZWR!m|I8g>G$w$&=dszZUTcqQ`=ec;}e$fV8&PX%icWl^TPm zl|wFQuSrsv*@z|`^Ja0x&=)qn*xq+VfsvcOvl0)pC6b_9H!SqJNH4k^`qKJihwv z7wE9X28zDI9NOFD$j5vDR_XeFDjk=0-TUrn#A)VZc#&N3na_SYu~vi#DZ! zA1D1EMKaSDq|%_0Mt3maRU~pG4Sgma9%i_&C+JnJ9lm$yOHD5(W)7$PxlSR-8Ex0b zH2b24cVOGciL#2hw7cHr3XrVZIKlftwX%-lqUtE3o zW@X4wqr%r`KBb-WMs^1RKPA{T$TWb+?xFx#*SR4Je#~NY0^5|uSU*}2SCi#gb9UXt z=wUnRY};p@>$yCziu$b;Bo(*0IO5pL8IFywIt|B$7y*(>KTqcz*&XxhDDW@1ZOV`H zgCrCQ-j8zET+JomN0^&ImV0Tjv5%{`@Yu~1-p`*oJ}B)}hwPvp|DvXZA1JG7i<470 zi+sZ`OQvpLEM>d&V#OOEav=WmDmVmh&y<$+y_229zye|}B9z{K>uA$9VEav^&U-D* z`{bKtw@LSJUj(5VB7gl@S|RmE)^e`~zE-2S zLXS!T#orrH+j-{`ZVBh+IB96$ZVh>wNz1r7_i7CK{s)$gBm zKsmvW9UtBb0v3b9G1DPu$!$fD(crP1KVI{~D~s*nHx_P%=R&JLAF>beS~xlRspcBX zykThhBL-=4o7dIwqQ9kdN5AQB2%R1Pg{prrkDBe*yXq1{g2&f1jnw89!7^wVYa3 z{pe4bJ7PvA_vbCzSvHcyD@rE8Q2Y|}rQjicZ^L%ZvWfq(aZIMd1XuwSnaJ==b0&Mb66hQ9>r7S1mvjLXrIs1=c`4-IfAGO) zQ>1zfrJAYIA^*FC+fJ7@UDrlHrT>=G@V}5yq$N!}(9m#}h}|xL->fCpv~rVc^^||! z*CL~{@4~si(ALkALXw@S*5(vJcrZi=8kQE!#a1i+v757&Mgtgz`8G&|I3;5SL{Eu&T zjO7!2rB-qU!43t7b6ySc+X&P~+X!QC2c(<{42Us!`)j0a&`Bd96Ds#J;D}K~R)JTA zDBW`pOn-x@Vvwr)y}31TuiL$VpkqhtawP2&o@gQ=i`lZ1v?FlWCJRmKO)eknH|+38 znf+W`X2;$JI%8QUgk}CzZnbttdswuf&I2v5OuJl#Q19m-$W0wyjQ%4##MNLPBpAtqd+5I9 zSwT=Gp7d@pAD%9PmOydIs9=m;Gn|mJ_dsK5OIn#cA-xIfjCtcc8-hiHm5pP8Q-E9j z&GS^Rj!F_|aH5KQin#ig01T^uWSEq0=DK7};#143} zyw5y&2Ki2*DwL?1_W2ieZ5^R`cY%0MY&97#Rr`HH6`~6-T?EF2s)pG&4ME8ony3T`B%&Pfv19FY-&~y zJ<3X9);Uxh91o_ErfZp}rV{JcbuC?Th#AsrZq>~KOD_pnxWGLzzk9QjNZTp563;>@ zfP?1mH2SumSfSXAZrkvi(t5V1Me5)msMXCrV3i-BXF||l; zKP5BIq5)b*n;CHBuQ!FRtNEtpJ6|L@dR2U5xw*@t@c-(?5F$rM8ySF(5AZR`jiiQP ziKd_pn`t_y7d<<4)oEuAWlUum>uZV$&V477c=?QlL4C55&Q^qUi7G2_7J| z=sitUz2=)Avdg&7Ag5%q^Zki>D)!+^67A&d#J9`(4uO6JzmO9&oS9Z7fcNRq3U?@m zmoXh+4@JHg`8o&`GHH(n0D44db}|``8U2&$;ju@q?ItuB^QSM`QWcv~KAW8I;Nwb- zsW~xNY3pP>0ZBYqNw%egpD<3ktMq9$X7)$W!@g6@Re%uy>xsJtV#RSL>QWUD@;+!V zEBD)nV^tH)TBC@pWWUSn{Ty?Q3S_(#o-i82Z8&_fRHrCm&XeZ*^@alHD_IX^=HnH$tpVb%B~U>1a9+Va;!|UFi85&FNFhqf`F&FElfau{ z!7Uz}f7tyFCGaBJwiYIeWVo}wpNHBa6~~u@iXgwVu8B1TPNB-euK+kD7^{rqc-pyk zBjjcsck)0Fil6|kKZq>0bS2_CweiRHxTwCalDYQ`(sj$A0z2h%2;$A5XI<(*;^;X7 z&X}?v;9FE5rJ1*H<-7Dkw-K8a*=YYlymwxsP*nuR!Lqv|KYNSFVn~LSZrM{dKsmD$ z+Ldb(b%*L2Civ!$6mIZEH2&qc>qyJ=c=@L^1L>ZzQB#T;JbS60A2TEM=w4?gPlvh) z=;er8*!ltUVZibbJ!@bZ2JpMH7T?qOuYJI|BgfEAn4$bWOEMf~fVrKlI;WCYW3;D; z`vPOQundvu{E12(P8KPJ(0VS>oZMmu1L8?^EuY8S(X#Po$Z0+nmWb)|%Gy0kcV|jEpIN?fJyGSD?5xE$Oo9%@TAj!{k z9>%$LtUKwb!wteAsu$|Kv;*0{@vjz(C=ZZ*D(m+=rj2Wh75eh_b*(G(2lwhal6@99 zS*q6bSmyz@M@-rN|7rnDq6PF?&XFQVb=zW)@=vXZLw!>}b$qE4JCnOBZGR8L`Rgsx zp8V1fGJ3(xfjcq#KnI&4LD#Sn+~Hw-;B7f|bRiNcX<7r{2VN%>OI0uP>R5Ez9};5Z z#a)&h>S5#$EDQK-E{}pb)h`gI(ND(C!})FsCw3HyXeDj+J!CzXVMhw<0WfGco)HRs z%`k9;Yxc=bB6tItBNjUiI>Uc^KuqPdg`Wh|xm^m1EHB~Q(eoO%o{+^`IV!vARsrRo z7B$7V#0kl&^DD9QfEGBfyut{{c9*{(&)_hQ?}-ax^t~$19phiaV+;HS>wX|j6cm!r zm7>IM<`Nh}lesp+F>Pna$d`uw+}b}FXmj3Sm3`58OOb@yOL*8nD2C;(66-up7-OrF zo6Uh<$;zOqAAj?YX4R640>EAX5_P{D4vBcHFi!*k9~8P6^WB`Bz_o9G_8z;DSS@DV zZ6=J!WRJsLZ;e?&O5wzxjqR24@auZ8_{DdSQ07i|%KT`$HtMqq&bN92ltsufZ+wz04;tLQV|!!kBhJZ!*MrO0l}VoBw~xzk5U!BJs0P; zO+1jS-(7il?J_-UJLdR`O{qs{?Aw9sM^3vll8)nPSPt5K&X(z|Dbq#qSK;al34n~s z#4v?`Y7iyg!VEr^|qJyhDK;qK}Err^WlzGlNueSQXvC|mPpIIT&;+z*n zz4i`&eZO_NU=-dGRg9fwYHcQ~RJ51-8|UQZm|_mN0qv3eCEb{~&PrV#N8c~{lAz>m zKmVzPfkpfi{;b@uRFv+&!xhvPW$zVcx8d5DjDN`yTjk4p%6KfpA^)B6?WMn5Tp!&c zV)$(WL3a9)TQ^P8UuF(7fbRw@8v*j>&EO0c#1|qNn#>ohJJh+YHeATqLe$1 zaV^h=HlWDJaluwN{caO&g?lI+0tc!K-_y%hJBn9SwPktyFZR`h1eJ<;=j;%zl($f; z&=>U}Fic#Rm@epznPFeIA#k^guRB~ucbZau=Xkuz5u&~NqC4<lwvw~9V;@qV;a^uc7(Kai}%64_&oN{ByDSwO-Xjo-IQ6YgjM^~i3;fF z&9AHE?KbuV-j_6{@QJgHJAf|ts==;Q`&>9?^_~0Q)*EZ@y!0D`qXiIrV8Y&C{XL8H zqdXYRfo$3|TgW#nCu9>F@(O9M&a?eM-~zZLZe7l2SsTA?%sfz`u@m)(w0UT04RqgY zWoS1tS^+XUjX2{OdbT+G=8* z1+9WS+v)ys+?_+G-LQ{6(F!yS^Q22|x1SAU8fZ$HZ~5p}6oPnv13J|XO9}Lc>jZvl zE_6AC1KOHkO`F}x1I>|L@xNPGhh_Z2Tbz#1HH&*bm!70M8xH?_;BPLIRVbPf3D zQhpafLj2nScx1xi-<7DZCeAvpPQfe3QLpZ~ha0JZC?}+|D(nkiW4w+{SS>`FN8wae zH*vJPadvd-W|(Q{lfYId%9k9()uw#?ZCBlrl>HJKM*@O-#dUP#oORxBi@cDI^GrsY z=in$@x7$r+bgMNAb%;g3{4dK6xjknTerd0DIZcr-da=pdDH#;RV8rEC$#xG@s;K;$ z&V}^ACpR2P2-&dag|eDl{5GAA2q^tIK?bYUL9)?(6Os$iHvBNMM9}~6I)n3m8;b9d z3ll7Sg68u_{i8gm^il2KeB(ZzCdCPwsCCBA2#Pb*`b+`$ zSxC;D!k=M81|3`5GISi-?|oTnLtU(q={M1abf30wEm9aW-%Uc0kR6$Dn6E&_6ud5H zl=b!89D8aZOk&uljjfpM=CeK!16+D_%NCpG59T+#b|uO8_B&h6?j@+bED%0Xym^tOCCFEKF~zRDx}rV9a^l zIU;r!s1)7|{x0wsxH{or<#iWD;V!u~nfToRYKYyo(ZsnK~g!{%pYI0OZf_|;yEE>-j26FR}lfBQ1`l|h1z^| zT*HncrI~n1Y@3O=5lfA2WUW>^>}Xl$X4hj2XE~#qiyFQfC2fLG-TSB znAF1IC4lg?JhqXC!0cGJB2z-!)`v1a&x|YYDth)xUTk^+J4*TxsKS_K+Ndlll_f{s zFzfmu(hJdH2`Kth?b}QJ!$^ar;}uIx6fPCf^Bl2Tfw~e7wE(?}!vX@)D-^RGj&q4! z2Eld*9zP+cAphe`0MkYVR*7QbVWi5ZF(Q_Hq`~Ko%Nd3XkqZGTd$%+f_q0-xk#r%~ z5_OjOO3FzW1r^V_$ z^8w|Utnc#zV=4(iE9TsCR#f(^AU1o4&oE^9H|z$$Jq^zfjN1Hy%>j6>9D_^)?^IJi z!xlN&XifnwJb)y_<4?mkOok0kVj_f<{p`TdiZ|20@0wn@1JF8rDuh3x4rPtz$LL&xdjgSYPE0;-BeD+0 zGN-K?T2n`XE1%`Q8XN)fi3o(0*{^wc)aK7U+7;L zt+!sfc@8`6!ro2-zs`vi^CqjTpJr`JwnwF1A#0?rRvtjp<+`xO3UzWIZ24qxC^p5} z(Vxb&{^6;PuyVsPY;hA%!ABj#FOc=2a@)27+Jp5b3XGe;Bu(M;@o55qx`1(OhQ)5d zhpAtax9?qJx6yJNz7P;LdcFA^qtWMihUkARzeKaH%|C#6+2PrY&53G%p+joLGw;en~wCx3vrqpnkj7V9TY}3HdfMTDXB^Qm6@XsWeNsC>j2NaZ_iQA z-xr`GTDw1-w>WXe$+pMm$hSAvh0TRJ=VdT#3zmmW^VOXStW;YM_?3*mPL{qR$H?tJ zgfKJpyIsJq*qD6)Izm>n(>y0FDt{qfzh_nH9!3)=hh;BqPSlbq52TH;VFd^CX*NKj z$!=q@1B4 zyoKP-GQlY8w89+uypp}eMM`qiaO)MVR}kN92}-*WdwI2FR0_gG<7^5>d(4QHOZfAV zTUcd5rXxY>Qrh%14d?}r)V#_G;*;FojOEyOj6%!a%Y`Zb9MQSkiBQ(QkOmblJ+lC%>YRr)}-FW=gKIOZ)40{`(2Xaz ziXPr#d^6GSI$TowAl&0m%QtX5Q=pr$Yo4xA%t9s^(^!M}m$(#huIL7rdCcc<`|)DL zeN}79ONP%`_kX+UCi{+6(8myj56*$lL`uT3cU7Q-($BX-7#&&N!Q;EM--ejU#ov1E z*+b^S0Vl%~&@QsTOQday0;52(2G=Ix`&K1FG7D~{c8nr5k<*cSAEQ2PFKt4b6P9>% z(6AJbujHIJR7rHI0%$MxULd7_(@s1>WZ1=mOMgYDLqgN@yYjrF=zUwFWwrp89WTik zJ<7C=^n3E_y^vjaDc9C0#nZYe&)EGZqM}M^7hrBiq8y9<9pG3PTkW?Tww}X&b#&~l zNO|CdZl*~xzjO{o7+)9`b)-DV7wI+~>J5oFDYF zGt#!TRh6Z8J{W<$Fz@=!7jem!O~#Gs48l63`VzA-GHXO)wyxhcOC{iRJ;+Exptq`w z7%U!jaAj{xrn%L*$M~?U=ON6bk&$>Pw2ki3^HfGvV&!EcV{m#!Z>L*Zn{8m#RhAFc zUUFneIPW9$w{0)Td$h=^sYQ`~5I%(*#ojdcG4%4SYQ%>h+4_=SoZHT=*R71Tu-guQ zVHa}je8yXLBPI@|yXf86XI-(?ekc>zI2zVp4aI#7Zpslx3=f6R!g-Gv274+i3Gj>^+Bh8#hSaZB_3b^}t4c zyOCYOdFJO`u5(0^5q)EO~inX&Ufx^Uj7p1Vs@z`v3{X@R-dW%_?=D>43Hf@yy!J z2`(YHz&A9C5m$|<2QxQE6V3!syARoM=`v(o{Kz(fczgi`eBU+QdSS{*w;={O%$hkm zH6ZSC!-!ZJj5@!8U9tH?@Y6?oMM?vHdZ%xE!8Xl-{-Dk^k^GqD(fAus07;>_ud-Ec z;gbC_w(bsV_7=qZ-K_XMc6A*D!@Py2KwTViZ*gzGuF}Al{#g4N5lPkEbYrtbDve;) zM&xL!MC(CZ$jk2B4Nir?l@bADcDEisz5}lx!EyGkdre40j{Ng$waDb7up8@{;!P31 z)YeI;T-@`FYK3PVMzB-+E{rm+W3L#t`5KaXwH7ks*?>ijMJ}z9{Tu?iJZ?j0TYV}$ z&mxqn!!q4$YX1YZRCF|5R(x~xrMwAQ$D(zcWP2_IAeU*`rBu(pVDauwjWRgl2-f1W z&Gw0FSjOEvHb`svQ;@MnI2evigv(*gVb)g|=)KR5Jqws|LrBVyc;|CylwMa0LPaGGnbMLEr8el zg&LyO7t`^V<|)W+RR8>YNO;v1OiP-jtF7k;45TEzo7Stzu&XEsul?5H=P-Nu{AEiT zF1u&E6uWb11VX~+XfR{|wMgr8UUBVvlO;%(mb+$nVtiE1qLBD75k2K@zj&z3o`uVd zdf$~80^gd~A@HhReNzrw(_|UxSpvFm1uxDe0@YlJ&r0tuGG6|;;2GrJP^fCL@hkxG z2Weqy_N;aNCC9HMmmm-PsK!x}aGpPUl`Of3_OP2q?OCqxDmEKe2s9mGAj1Y*d+koN>mKu`)shniO7w9$YSQj@(ieTDNjGL|Ln`q8wG!2Yb)U%0@ z+80)-6ij3$fXUPJ!f6H?qP(R7-2@BOJ_!{Gxh*0W9MP*f1mj0RE5*9$n@Q|%2e~%6 zNyy0i%=P@&BX56zGB$YGD~ZBtP+QgmJlkAi%WwAq7fL2rVT4(x<)}~C#1&Z6vR?`` zfzoNutd4U;E+e{+Gd?7Ir^ycp!xY?D?i-+f{L%iA5gLx!!t`R6%mEhzgIAqo`QGSe zI8T)?=q)1SW)4R4VS9B2w0FgPhPZo+Kt<->a;O#W2HS8Xzy=vWb=Cn@Lx}E=YVAAm z=;ob7kQkOA(Gs#lc9GIVer9%XuZhE-C{R@9pPUgk5u$c~^K&mw1gLmGDAF54MXn`x zrgyD{fZf!K>#F?Wq)91PEJbp5e-!vlQi3&td4j;9#f?Tcn^kvEPnN|#(m#M1I84qO zy?;o7GhzS+aQhG=jwCqn*VQ2k^b`vsfnjGJ0E)4J#7eGHpf9k^a=!52Sfe}aA!;|J zck=2GgS|u?&B35Xr4T?EdONxYb9;CsUfP4XI7GuH? z!|Y8;6*)pdX$WTx4960{s5j{d>bXFCvMr|sIKU*?b#azXVb=<>n-n;kuv33AA%J(- zhZmhCo(fnq6!Cwlq5#zPVaO(WW6lV4Ee_{vtU*n~J^61ok*!_fIp>^#*v`wluzrHb z^Q&s?$v}SRM@|&V=(`%G8oP;e9JMzHah{_9`lt8K@547ucutS|9t+ZV7;uJSx~CMN zsCsMHTX{s*m8TmzueqFy(;02E|KtvPlMa!^oPI#J9~W{QgIa|u@>Ryrr=W_IC=ADx z^%Ip?60~PNGOcy_02-F`OLzFkl2LkP(QE+#F2hSD8J3L(zyo%&F+6)|8WZAP-D+6Q z9{`h3I#AWFY=0HhdKdwfZZev#Xp%uy$%7gaT05}fmcdDPrv;v^-KN+VH__r!Fmf@G z^6eiN+SnsAC5ChF))RG;rWtsJfx_`M>Me#U5qBMA#>pZc!(fCM9NXHG-sag~s?mGZ zWQEImBu~I@2!r;#3Mu|cQ935PKa^SUoM@V9>U%P7+Y0M1{(EM^7X0Y*w#nOB=LBc9 zA;I|d5(-?cnGLMmKhUeLS#!1fFH6Q6Ykqs^O}?DCxdG#mePVvElk)VaL%kd8SDI{G zrrhI<%gN*KUU0uW{`vorrJh;FeWC>=UJ}KTqzU-muc&F?D)hui(jF^EoYA;^vQngI z6Bz%l_NJ&qZbSb6|KZ_a%+Ze$5DYEgpHlO=jB@7ee+ZGiH_h~e?uWKw(sGN?oN0AC zm1pu;FfM8Ygs8#Q^ue;?meP9^VJuF+B&9Qb*zE>LURBUb?|%;^GA9<1yd$cKP#Klw z^^YlnJ=4Ko;ZXis=NV%zE*()rvxE+Ap({YzKb zVA(b&h2x$H+q0v^@E*cGl1=PLoQM@j8ajrbM-ioz<5=Yz%z8v0!FDwzI2V*FKIF(W zMw{Y6#|zwz+zx(#&K;0)RXd1k`#r1 z*Y3Fmv*t*#-ak}21oDGkvPU{LPyhRL+tAzmmX#UlnZgl4Ch9}#v;439k*wEHgj+nAT*#4^Up|dIaKS9@1 zYE(0RZ)uJ>n0LPf4m{keUv28H%uwmlkn#V7Lm3Vx{gJL$;!fiA|INs-*|2(r366d& z*c)->{nB>B__amiy`ruz|Gm&}_e#Ft(b)RaD$^&l4W0gG+VJ}7RPIbXiJNEZqD-R1 z-=Ef9R9-lYDNF(VL&TQ#8}0oB}jcHJreHJP$B!7>% z|I4U+m(COHr^`*Er>B!}y$VvBe3-B=bDY%wfSy&E71+e)AEL7M3bqj#6pQPh8ud%G zL_pY~)G(hY2u~D{-V^i3$QTzUg;v1f7lvdo8r&h2$zC9I%e|4@zicD(fMmE|bV2l-GE^uDHmXA~+fa>zvCZjH0i1$paP2`Ja#& zcLhtYJ;wbVH7flE9J?)ui4D9=N)miTcmw)_5^$d}eKDYo!Z zl(2?o5EgWHMYpuTKi1yovq)&SRuZpvp`FJ!n^^FD^Cmv#Xbr110?P503sL`LRsJX~(hS>Wrn=C;XL0sFObbdpLCyJR z{?7!Zl^i=I?5WbiZGN{=^0bs{B{qw>*Ye>C96I0Xy<(uyGlc`{;n_PtYiUAUMb6!|{Rbr>T8I?lEVKH2qVtTZ>TwnT8 z3A9>bI|xuJ9k5pKcL@ZQgIA_*$mP0y(qWaII!h?U5zJHv+6ou-V&4bnb7Lj!OBh!^ z=*^8`vY4Gt$W-4d7pH0ok)?B5IZ;x#R9>B3mouBdN0{G%MMt)8=q-(OA1XnKI2#`Y z^d1&qgraNz!$QyGVUEobCmv&`?sNB}bg$x4K=PSrwib@Y94O@-gKlZmgF649d)61Vf{|Dm+qfQfTR;}T-u%N6#_kKFo zGsCqA6_OMQZRmxv7Erlmf;rTlVa{)JQCQWOWW=3Hy3WuR9BRK1wnlmGelS_fhW-HH zGE1DB4aN<(yz$)X=R}6V#m{4M92D)sls>NhEL;LbfAKNkO@8OMjQV^2`5;!IWq2M# zmWYMJz@_15X}E*?Rsf(E&LbfL(Ti7s=dq@0ny?kRb#v#&x@3=27JBq2AH|{u?cIBn zWhSGA9mQy+UFxZ%#nv>vnPzU<4srxTu6}#!vwKSPy9$cO&W>$b#?s_1T8r25I3z?i z?HBa1#8qpOSFA^D_g|@Fi4rB-Z}oQNCtBi}Uis-y7pe6AZNyE?Eqs$ehe(cgOoMAc zs%d9>>{%%a`pP0NT_F+`Z0r|Bt4#3~HyF-CMX>q5x1cw5}i@R&_77y;; z;ts{VIK|ziNU-9rh2W6n%l*9b{m#raliBCK_L6fQ>p}gD52{N>?2UJd;qiXIYrU)= zEJ`APp#f-v6-P|tpZrN;=0fj{JVk-T{v59Be!889KU`0gfu37m7*^odjn^MXVY;9{ zPzfk7WA4v`$OAE4jywIL3oR)khcBtLRUgz4*=v}9X2=I%TI1gJ&3@TA`Rvd>ZOvt~ zmIDmLyo8JB0_I12@0zZU{(enI(>kbnbdx2fr7UO8YrUa%q+rR36 zOf-@(2qAr2oTMV;^4eCP18?ezGEil|M156qC4v$h`m!Bp^i4n!)$zmcEZx=K8Eg<;z5_lv z9FGsr?XY1Aw{)OW^YLxWo5JXlnX4(Gl50(7XGtq!(Q-G?-t(nXI6gF?h0J47$WH6S z33MsZcL%sY2quU;04H)2eoI9BV@OxpJfb{}veQBWoZovjj0i%<8j^fuQl)r4z9fKV@)q z9P&>qOm`V)ixOdE0bY);r=IhX3ZO92lTpo^S1qC? zZ5DW>z0VWOO+KKSQEvwBU1ajU_j!!{OWj?Z@PRs@(5vWNKuOr-wYFuhHPtIsc3U)4 zG`R?$e+U8e1orZB=JnLaPpy*J{1dZ?)Z3u(4@IKWxtTJx$ZSRT8Vu7L95BLPGjtiW zLWd}ysr5XcyW|HxP9P?$|&+` zZTk}d#@AzdNwWHAs8su59ce#r&bQBETenb#yV^4wFXB ze1`8l5t5KYen;i}uKJ2`dNpaf`1(a7@BpjFkN#l#r?k=Rn+p($Hh*y7LT{Eu`IGrp z$;mHKfiyMhD5~3mH|C?^6VIR#&MSS>#xM0L*F-*hO6GO53XY? z=Vi7;c(Y^n@UHg+~(fdFl39$sOdy-(@7#7n*$eWTUk$sVU z8A=lc9Z7uu099Co#k}wV^bRTOFAiI(mRAUgtER#5=AOFI8ET%kgGSoNDc{5RmxfpQ z+q1GX9h=vvoxE_PnNXbSdhjtuq5TNfiN9^x1_RtWa%}Fjr#n!#w%1<~-bFR3N62}J zPWm2$&?M*A`DHgX(>t==OTqKQ&9Po~>I?A}UseCg(M0l;nVr9?oy4Ed{|(7SCCE$i zO4hDj$tt$;9|)OwsWAZs3Z4@;tnN`SeK#0@F=w?M3C2&38=wlpm>yfg!>$>f8BzFO z7%^qQ)nW3}OW(n5jDXe!*k1d}ylVys25y43^$H%>aY^3|g!kayQ#vg=O=)<;w&W&e zOqX*Tr?Em4Mw=cllUlxydJ^D@+xbmJ~%xz{Y?(C{;*ZA`v&2OI` z#X|dMpC7Eu7sUH@W@7#c-RF1V=+W{`_nsdnGSu;@GFI9`_wTeGCE)F_7%!SFbGwn& zA0Ph|1c+SX7mMqfljKd|%aZKf&Vcb7oTrxz#&<&PRx`2&G6C{;bXyW|6LPV8_MP7l zFTsn>uy+vSrEjjt7?J`+1&j6YIhd|o`R@+9zIJpQ#k zbc=EXJ)B1UTlorRRrRC7@z>`@ewmFkhyqbZ+D75%Xo$F|ht3GBZ2hmLcg=UX{8WWW zsmHU!#CClz$&el6JNU&*B9%IQc>rT4>p4N^Uz*2@YbMT8h0`8ei<9wL3gtjN5f7dF zSNT8z;w7Rqy&~O;w2AWH#DvG6s-L(@-R@m^5|_A|EMqi&Q+1M(C$;y)J4GCW@?Xrx zOBUA8&S})j_H6OY1arCx%P z{XW}ooFyCw58Pfrfs}(XwG>Hy4Ankg#4L-t1L65e{}#B{LE`w0OQ2L!?%=MxbQ5xuW2p5?te(sN#dM!eYHi@Uh? zO{GeN{C;Ir>wNBgFN7r@A?v;ows`XjzZd%LG_2x>oB*n*cwL886I*^H{xgu(plG+9 z=#HAq$Vc>pi`ld~%9jjI zr$LL})cgD&)SnL8tFEraboFE=v;`gY(nM1quHCJG-S*3K%I;ziRf#mfhQiDh`@+>b zj*_rcN49>WX;7!>P2EKM?*M@j=u_+Mqd{fN<4dNYVE+{{iK#y!C=?knd++yuaAQxu zc`QTemiL#5jSsW&>@nOAbLo9C*MWQOmEGaGwN@goSb<_D5WwN)*$6k{IVmHKJ7IRHhI1DGgPeaP;dS|tGl9%6G*RQMgQs4jjEWQJbIr! z?juet(JKp0C*R?SPLs+gtCp(omOj#01coR_2FbgUF6kDKQjNH@KI! z^5o^)N^j5Cq@C7qHq1=pK1CMd&3YNJjW?oO8L|ARP31k@nUW~w23I*8ttajHN6Aw; zuO^|cQQ*RrrC$821gCaoM6wgXNKDlLNmN0@nI}1Xw5@TKcz9{3|7h`aKjAWLB|K(R zh;M~ZOt-X{H>~p>;RsSC$B48td>Qowur37gI+?d40BouE8_2yI5z&YzEqU$G#^Gpk zfpr(FBaM44M&aaMei%Zbj)vjvG-jjc+VyhwYW&It{y6ZM`)Xz9SC&Vv*$K~{hu&5& z*8rIcVhn(6TG%pjg)8&o_u;sO6J-mMAldB$WCVhGC*m^IH;$KwbZ}a@Ufvg5!6m` zRCKyaZ%K1j_-xZCifyA&X59dY{+pYKXY^i)M^-(ckxKr1v-+$;h;)c8!hcIqG7uem z$#MbKWC>;~LDd_`xkqmnU)DWMoW0d?*81!^6ds-$jFduRh%RAloox0L`3PketS2Ay z3T_tJjXdh5KIW{nPu+YrbL^$xkxt=FpJg-~H$tvXpXdr57&QT%Ev_S>aI*6*f0}-L zDJ&N8Ag&pW!QXy-=3-%`yD2brjFx+dU3F(X{`56F=0X5O(au<0%WpS!%%z8^3UNhC z`OSUVv(i$;YVOA2$Z~`w!X2%n$AWh?cPre6G+rOxABq9?$OmGvGs_>gPI%Dc6!gU$ zCYXMylQ5YKs#Z3q!HFa)}%3dW(6~q@CU{1dV%1PxZmdGrO_X@( zfVI0Ab7(jg2ca6bOfwCE{rNOc`;qHD@b!J21Gl9V-AvQ;{PHl?&f>z|l?kr$De`}l zp%rr*;=-pdfP1(Szjw!Zd;!V8U)YYlY*PfSZV*vkwM0zpJzwEsAz~oPS}xAFvut;GjI7T<%Z!8VSw%$^vU#7Ec%osT zkLJJHgA%Bq#uA_CM_nA4{TyAF5!U9rS@p4ENb6i!0^b}d0r|jR=m-g=L3?>@*H;8R ziimhp^z@coc)#Rn%edC#ofJ{i06I0MlQChw!pgHT8M zO2FTktLP15?}-pwI;Z~-i~tA1Tye8%qFq-KUT&O)LRU#d31Wbt2Z2BYL7>*;(04G^ zGAXwXn9GLpWoK4DF9#_fny;~ie1~u$Kpy!lFZXK`*6Utgc-ZJdpD!LIGFR7F+R46= z&>tQqzBWGEpf@M-*#qt-hw2I|Ed#4dl;Ugkj}n4+-C=H!Z$7OmQ?81I(@I_VWCXv$6wXRS^`$UuAnO40}0)mKy8Z4pNyMNnqG@aWGhhwDtAEXIbWV z)(O-eQ_Y~tdbR!5Z>5@8;XlTyEhpPy2gSo$>8~_Qd8Ou$`oYcd z{5esW5!|l}uD8_x+HHzYf^YHz|D=iVVy5>lY~JrTjH|9kq-Ij!`&cFWiWp@NrDja9 zy?1p?UC8V)-;O0sAY7RRyX~zm=M(J9|KGSpLX+Y zGX6m>9%EjURvRhS*~tIXFPtVu%Prr_{4K3=a{31!&~Yx><35!Ma9Z$6Kf~tr?25nw z5pEIVwimf6<{54FQ+;oSxf&3BO8WFU1REZ+paGut}DN^twCMDz3%{hB+y5LI5=Hh09_V9@-2r!4}>8V^MS z)F0YmuP4OgV@Kl4R3Y5?{7N`8$3lv0pWheY;?BEmVKOrvXD#HlCWzO>e4o%mFk*Eg zXqd@vN6pk^QuXIgZfL?BwM;YOYlmaW;ddk*Pf2A`{}AdGt!Iw>*`-+F%>gy9!5529 zOlVhLR6*5G=wT)vaA(vc{k691nihi_^}2_=<0p@fvK}H`OwOy!*-y526sK8ge??|D z??EGXNrx!^BH1+k#ioPp z&(BO~mIqGJJ*AnhgO44%+kA{M)WJ)Ho*R0r}l3g+)Wjh4dq z7v)_{;WI;k^&1IK33cwz{up-SRSSjsCqUdFE)TU2)3mc+%nRq_!sYA`M;n9mF1kADU}jhto!5MQ3YvRYWXQ#zuPIr@$OvCwSn6iI-cI)4@qnip_)4%HpV0)rtYIdQY93n1bsw(nR9)_IW+GqbRAqLJ@1c zmegmDbQ3hWo@`3HN~`KF33~s*B2{YB?IW3aP5sz5vkW;p7T0`BTR$z}g0a5z{+j-U z*H=w3_g1%xeXnyN_!^pD$VP_QQ9N+tF#nkflu|8l6Po9z6MwBM+Wr0_c8B!*Yg@%L@B zb=0x;s{RE9Y_u45wZM0jhqs{G9NcO4 zy+67+{W-7vVGiI@-r$Zb551=$mXaMXm6G55J(Qa!$97l!=0 z7{^p@kX2qIi*3Qd-|I!;jLbS=JFXHGt`f(fa4()RHPfhHs&S_E6eq?bIc;uVKEt`$ zt+T2!-1$;4XrTT@5i|{NRpCDtC;9gLv##EGXI)6X)ygl=#t9oX2uf%?uAC?coO)Hg zB^R6LW=Y#lm%HGOTBVI&j2`oDN8AJuOyNa*aoiiOtaNmts@xb<^DnPI}E${-hMi6HA3`3?>g@AHQo;DxO}` zIilnFw~&k%@nat>)FOV%I1&?Xj4hphYg$F@D*Wo(_UD$K0bBK#C-4g~0yu8+g1E`ZtZ>$?HIzkt~0h|Cd)VPASafq!(hk7RJgtQhjpOThH275 z!E&4nu>pOM1^w!4vrzgk`+=*1BJo5Uc_b`&-6gO<&I2zi6MR-Vy#ErOnYNB!oNZ~6 zyx!US?8~FHn-{4g0S4qvK)9g&Zc84f3JMxupSvrAS}<|(_UCPbqMFN5C$#V)Z)7nb z-!FlaWpj<|iFpDpiY9zhk<_mfQi->oEJJKurq(YJI@N1Sjq~-<$iw>Xrs9vYdyR{I zW0$G$q`X!ZesZx#)Ez%cRTADb@kh3rcKNTz)b<5*q@c)==8GU~0eg@mu>s2haJ`_2x+Mi&wW%~{bNx;CE^ z@n+-9{!*FGEqblmDJ5=zP|X6NHR7Z)qWs_<243>=;$w+53Z=PouuDGD^4r_5l#2|O zlMQ?jDjGA%byhr*)hX<68*GC?f!3uK$6=kq;n+mPL&PrUk<3`XyAR~wCpqUy5UQh~ zgU3jy#EJVI&-Ism25>Khh#Oma3yvjt9DeAJQi;nHgq9PZKc^N|F_}GZwqyu_xYQng zqPKDm{uQ(P>~nk90_Tr>FY&Mo?16>7jq|&|@qd8H2nH@mmbvPzJ$}eu!iJV}Oxm>% z5v(!nGozZ%Bbl|bTQj1z5J?G=2?nH<+drTH5f~FL5cJg4RN`?P-!zb@N2-quGjE5l zq3Iv&9iKP5f&{V8&a zN{mX-=Wi~e?rcWEC*%M*Wu*P-qhtT+*!ZdVZ_w^*Wpm$l&1nX=QOJ)En8l)b*#B6+ zB&HPXhpWOfv}r`TQ*o62+})Dcd7cCZ6?B$0)f4K4+lTLooPv8ir|D;6Ny|9<15D~`%AGM8jQIzlzp#rTXYck$`muTIQiShO{v^oI5Hx90oVV_XTCa< zQx)df;NUC2`D}~oS1`NPp49k8^%X`ZRj9Q?n__@kjL)j#?cxF~ZNz{%AvR*p!P2 z?4j%dVQ7RVA|gJZC1rGltf=qhf!W8oOgCIt7I-iXC8ot81%wt8SLn!u`j`Uk@6;cz z24rPjG7LMxLus1^o`U>CsbOOl&|Eo)Q(~ZqLBUh-9@L1L5sTZl8P^*SfVUb-CQDIU z6B;HzEJ3Hq+2!Ah)ie>{#?pVdBO12;Yk0_T<}`Wri%g zc|%Z3z5iBQZLG*Xw|HFUDYwF7`e6{|4UV~XJe_v4>|mg+<9FSGm|a*X*}xczh+#(S zr7pF(ub)5UGc8sKL!P6>>`Bmo*9kzbs4(Rdy>M0m($IW}lbM{X4xQ}uFY5U4pAXY( z-WITg4G=fT)2{W9NdD~zGj7vNC)_OR$n$LSwF$oi!f(ZWcg5VZ4?`g~AT#_130BiU z%(ltr;*t-9$r-i#_wLK|@Eh(>Bqvl~Qcaopf*HFuw%k>-eb(}z=Y z7h_|t6vhXp?-E11zbkV7|15wKQvU99D7s!>+P5?mx8c9F$^6+^s+{$iQ3@5DX`)=Y zl=Ta93?wuLc7y;+bXagR|9r7jV_wr1ny%4v@vp$(eGyg0{a9{ z*$)ZWIt9ih5?XO|6qxz3_{~^#?swK}>|L&J24A=O2`2-tqgwTG|sM;Z&)nnFq z0YAfviInIALy~gKow(KM2vVDLMskzQm(1Z+IRe|8hT&w!MYFot$Snq5hzE45PYh2c%_87y?}ef7Q?Nz*M`^$N|* zyyyKO`uwlxpg#cS+*kR#sPGfeK6u=tuc(>)9gU-(Bea_rdJ4M6(7d*Rj=h!>05g_$ zoljR0S+IWXQ&jd`#h*Nc#j1#e;GLqQz#ug~aXhgV3Ai`AMzkk%sDF>jmD#aB%Sy^Y zsA5e|_6WLRdZe|5I|H-T>zYG~C6o<{>nDX^ z_talmdHs+bF$IS*CHi44Z)w7RW>Y+p7%-9Q+YhV?+zbxYQ? z35j)Z15xjD1$vafdq~V#UomSVA7oK^Iq}4hB}MDFsx3B#DAEpfCY6}?Cppli#uD+K z6ODeEs83n!nbghzy~{seFoF#QvMt$VqEEm3J?l*jQFK!RdgS7EO)AdaSmRzND!%5; zTEZC587hF(`D~F{PH&UFJyrXh_sNDhrti0jk3GucXrIxqFW}9cl<$=JMrONfiCGn@ zmR$2iZS7l==6@SgF$q$s8;bW&0f+3?H-!Y8?QECUDA*E^3{vt$p=GATh%Mj!TYTgs zwXwm;E4D&qyk7;n9^&Pu>lP`UM!+r3_l_%$WriHRz2Pc-Kb-&cCY(VQ5#M#x#0(ii z;z`2kLpAxJ@DTj8u5Ng}@gl9FdiY_JWnL7!DMISEPYROHG6_2KL_fJ?4(BiwUA=BH zJQ@nEM1=7vZzDMye}b78sPg$8mjqZNU1`N25;5M~A~8E$x4L`(?uAhdn z?J8kH3(Ge`H8QyQ5kTs=m;GYM7rQ1`iA5Wy>uFwauhD=&2 z@q3w6FhipNZxL)gI3X-K=tO4L97;C?Uv1vVpTu4gVaN)TuPLSXPu*m%f$9l}{CkFF zhk=2eJ3x$LOoKDc1d$Sp`Erwb9`ugv-R66M4bDsl%?encM9?u=SxNFjx6KVWj*(}# z;20mk`6uc3kAn6f=XgiF&pS9<-$(W4Ze0lIy@TG?Pdh~@0`aDUg*@jRaI-p?-eFLx z5eM<-)7k9?IRJ}Bu2B+i(+xjXYzN`Nca2I{n>-eD-qS@rcb17F&--`4axcjWN(t6Q zws|ARFmf>`1G-8GujY~rab9rG36`Lj&uR5Y0+6`3MD`B$1(C|sM%}QvK_yN16ut;K_&kOQ1V2h6Fh)7Q^Q#K*kAB~`Z#iQ z^~y*SE1j^ofc{5dIv-YB@5i4rTGQ#Fv>0{3fZm`MovAi4E>hD`e!#Uy6w1PBW4$NCu}kbXM5&o;Wl>#RkVo)&?q-nXiOuCCL$U{BWrfT4Wv zr?JGTCfT2ABpC_pP8Q*n9Hh8E9=VJ+IC4kZ1ye=SiK|Isr_2L zow+6qjZ3L@d6PM2F^!4Vq?#~q?5rE#$Rs9O{6S8eVD~^5ov<{9^iJqT?7r&M+F8Ic zr^}*3aGdYl_mI$6Ce7_%$%aj%9}w+Vd7S+p#dV9rWvh_Pr*N*~ zG@v;B5y$$#9N)4N5K?uY8I+whCgw+-Uw({C$>E;V5kJvr(TMjW=i?5mg_OxBssn;) zlQ)ZWu`Ng3iEt~dJ|LzU%~g}yRNRwLAh>hf6o0#=BCWE0r+Ust%<9A8%usr|uY0k~ zo99CI=_}*+hl?79MB7KW8`>E~8|-2c8TBxFxE~@xN9;|=PKUqD=bOT|pEvy;!HBE2 znK2aAR@`fY%rx(vI-VJp^sp`M6rQ%Y^68J!dqh0pdoH>!=W@$C90tK@Ta$mgcU{Tz z)^87F&$xo~d)31@GD|;QU+zbFt03zGHQ>*6|F>a>lnPw@#WkfFej9+yW^tj+Prb59 zW&fi&PJTu9_~Nz2JT9;)^O)|>0V)9yp`8vg{p67A#82Yo#XLu9aoG!Og~_QIR}%!5k^4B?V2&-BXetGs&Rv3PGZ zfffA;W2nv6yZBp1>&K^+Z=RPu{0aEb{YMlmLGl$y{rNqroM%|;$o-=rzKnSY+{GKt zlgG`O+Nk==WYVZ-7o~F#NxJPvc6Ip2J`cmbNS!Y^ltUi}-4@cUD210_GZ+x9h8`UK zpU9FJe2>Ki(Ql>)Td2ygt(r!qP6VDZRG~o=mo&S6&hI|{Sz;*ycZ__4mPvf4tO^W( zUbos-14;Ha?sn%M*NSgCOA?a)(0W&z|0k?~AM=Y|@$-#ovO-U6nBRB?PUr>)pXnLP zQx`hDFsnmYyz)C*hZLMH1+6|@Mqb-}T|Uw8$nEKgyaz0I`oic|x@b=DM{Dnlz=byH z8|3!GN0opJ`1HVFUb;&jJjV~1mcerjjg&j=f9BbRbcTs@F>kk<@kHjS$3SNVmph1%6QQK3i~JW3GyF05Tm01;MX#`;dhYK5 zU7+zd(2f*DK}j#2hxW~TTHV~uf`85C@9EDotqP+=aa?!@4(XN7jKb4s^1ao6;e3%9eg2k6h1mRYmdk4MPta5t^cWH zE%_+cxJl6wlB@?}vL_<`K9@t#NBdHbFL04t9ZS$R`WQew89;aU;LD1IZMRIWsAm!j z{E}-h%_6bHRbbHocqKaeK2qSSNxN26d%I)AH$b4>JL|24e3OA0^WE!j^}Nj|?GIw^ zgfiv?C_H@KmRjGU5?@5Jm|4B@&1#f3qqfz^WasdnYXX zy50@nVmeY7q6qpE*>}Zb`XcZDFhGr6&P9Y@JvfK|=A-2_*N*z;?GKKO_JX+92PLq= zt1FpS;|X_4Xg?wGbg=->QwIrZ3qR~q;#mbz=N$od`NPpS4+WXez?o$L$0F*)dr(%; zRF3e$4}qLd(1R$wmW>Arug9l%Xw$$GA^{rn;1y++Um32nr_{dDzi!EwTOKm7+~Zw1 z^DEei7F&|P(Cgi0-A+DnaT-$nlxGzJNw5z{6%sN9sxqfh_LvFpNG3jZ1coQ60fINo zSBOcuI}{3@D)`*Vlyj7ge~!MMI&@;&8pUQF8cE5z$O>IT$C(BN?QXg%YTyipbO;fm zKs0-^a9j2oV|KF!h&FElGk@Z!^L$8`<$d>X<_&j5Dffe4_Pfu4W^$sx3d)5!eU)6| z@;3N}4IHFK0fMSj5BpLxs)c-#vIp}bVZo2TuFSA; zFG;VEqP4Hlo6V!hT6SR*C*Aj^ZzkSzClL@0$#s}(;qcjC(P&B-Wb=UCjDcr;d=`q6 z5@_-o<-HLhrHcy60PsA&lYJ-bzZX=GCViNH|F$niy|6@S(rY9P5&$hk)mY~S^Jz?Q z_MPS*Zi*!JfS>UJXop4`hOX6=rjA7NwZ*1w2vxTPcJ!0^F1&KiR-!za1zZ9@V&>$- zJEbY=Iz_uV0dFj*5i(67>U9=b2Muao5mGgc>f-W9UqkW7hscO$XfP<+O2$rF7dE@! zN>9>R($alQgl=x;Hg+W}FV!?3ej{@x_IqYfp@;_0DOrt;)dUkYOf9a%f8-JF=M4`g zm9-dG8~c@>F%gA%*cpUkT7~Tegp;R7s8>SpJ24|RuMcLM?jAhec6cuceD&) zun)@>BXXkK=T$VXeWp#IKP)m`?U5<3dn?lFR88y^g|ntvcf`CDv-&U>4(f-bo8fFr zyOdUH{#G{m)>p}_B;AtKmVIUxd;ZDze@BBPVl90M8TBWY9a41V+IEV9uH5O`=A-xn z7jwMMy+v@uFSi)FQz!Cz(K&udUAMN;EpaW3jyI!`fRzE$go!&DYf}s=b`+;&9@VbW z`ZN3TuW<}k>(xKJM-JAymfs#OmVD|`Xs1coouq`BPsh3#uu43fJoQ?|n|frZa?Eqh z|5~w(H_9I`!LW2r)ytzvV!0ydY&$lbGtl=5OKLE)g2n)$DZd>I?+Em&coH7|ah7W> zldjK9t~|Gzx$iW#Cs9!p4{>x4Ea?e3Zk=WgJr$Epq!FN%1-(qT%?01KK$j?1`Bv z7)5}jonUBkyEguI=#DdY4>|Z&E6Q83(b;bVPqm+Uj0iw1QH?7+8EX;yX@8~T#E30{ zTkZ~DSq$0gr57N+&~Pe0+-xXw6W3X@yrpXKCS9ttt1N2ZoKq!Q0vPR}9`ZyX8FMuI zww=H$UP&uJ>WIK0QF4!}%t2`BDIhzz?(*+VgJlAbkvp>@h9eKx74Bz5p`5$^J+c@{ z*~ky#1an=8F*Sy4KR=#Itf7DZyl>oPMs##=vt2)GZ_>8q@-E6d2tCYmFAo(l&otsd ziG)IRU1RgcpK$jE*FFgP+Bjw`xtm*5Vw60PVvY!V-Q5L19eg}t$K+!TEpWq$`Yq~d zheI&jd+PM%YT!!r_Q@f|(_f4IO7Xi9#X93_Psg^=!dg};Klmq$kOY^6!jBtL-M>Zi zLw?T2uI#9xkEI@O>3Lb#hu)c2mZ@mdM$Pt|KI3bCJsk84a~4SdA@W9svbik)PI!+g zej1Fe;Qqt41|EmLV$MxF(}U!dXzET9JS|UD!-dKgc#dv#ho<|2{_V=}6}PH(#&xMU z$FF@zT`m5@yJTu%Q)mz+?(j-lq;uYp8)LR(&R2I?g-v$G;CT5cuQaKe!BH~lzkD=6 z90sm-r>pj}`iIhsxm&xt&kVn?jP)7w?^61^j>YLR$xmbe&Y7suBkwM`^Twa~mziQzs*%B!F)bBgN&xr=$;)fRc zIxI(e`Ml@`K>3tYvu$CHqa&|;&=o>?_;*wS9-6NHEA}^m6s?H>C{)2F<9&sHF1n_C zm0SXUJ+k0wD{i-c4JN(6k4%d9w%Gob>pH$`f$&5&L}9WVD5R-i+V&m1$E})a=0)C2 z&Omf=)!WoI#3WWT^1#427V^aupH*BZAC7S*9h^&SJ?nw9~>pfJg;$r)}|}H^RVoqQKCd?|>o1 zOV@)^)2|A@E>&*~!tq%?AS6OlC+(-DCA4+^6*Mr@*`J1luQKbN+JqTI3qQ~QW*JoF zl!@o%$b>gCf)lTihS?;q|D(=al3Dbar*&|Sn4@jaTyx zHO24J2hF(^cK&FdfX|1%RX$wpe8nD&bDlKUXNH2Q@9Q}NKIE-^}m#4&w^IC3djx9D$J^TEL ziz!_XZBr;GU+)dt*XBwUB6MEBL;WseZfp^)H6@j%5E|227&l&JiZX$(teW~0qw0D_ z!7a|8pIN`y_>`8GUo)EkQNze3$rNPImIw&{2Eq$zXX9+vtM!?r&CG@0;1SGqwAft6 zOxDHHOlAAmP#PD#h|HGW;{<>hWxn3E#RBVh zjaby5k@t^^Lz>CxQM8f8xTUaX2y#uOl#ogrGNKiNrig}pL(<8Ongo+g93qEqpQu{%QTP@!!qZ0(=>@;N zET7@gaU{9Rjc1ksLbyc}l9i>TeDr(xxx>@R<&Yc4oCDxIcZ|Z>0j|=*XG!`!v%YZF@%J{& zc#lXCjPRUobvwgyrc^USu8DOAhm_KkBCr_?foi(W!c4vmAN#!UU_>3SF0c~C_QRXu z0J*m-l%|q0YZ_5oIDI92I+h+nD%LKC?m@^LNhHPCe+pAGsT%swDs~K$73;p50J!J; zELiY|GGt`6WEE}JmLo;g9WUVP=DQ#R|WgTi_^P6rSoX6Yc#`t?x^=)RAznASm^nQ`fG*#1LKVQ zcBZ~3KbCTNOE-Zop90onlBI;ytXGs=TP(>3S~lQaJ1P;U2wgs9eJT$5!0Qn~^~w5& z7&h+?16wba2tt(_B&_XXio&1X%Sbxzi;g&68pq$tL{6Y_Zcf)4`y`g40 zl{%+fgz)iUJko~3hz5}{D_yoYt;r2R>4-?3Kh`=ED>IaN^`dX+MA zHCGgKsHslbHe=S-38&oY+0+`hFR3z8#WSM*9a-X!e!jJly2-!WhfJ5ySjpZb!J4Mea^Xd*W)MY?0^L`>mNf9aF26^Umg4pK+{;b5nB^-47hb+ijb z&F^|8L(5UntMdpa7Ivq}Ro6>7vMrr&_lq2A{E;Ahf%PcAixrUexIvq!8&Z(b_WggPYrGO*AeYRKym1e&-#F zuTq1XF!F?M*FMV^0&(p{=q?xZZ&xKwd%ny_LCSzUu-e1E^MhOjF5`!tcLIt?OZ1Pi z6{wFdU~hQxN0)ZTuJ*4uO>df>7UmjOz7$KSJ6j7X?fxr?!gF514|S(TDYWc47Ck^z zu|BW5MIEiQ^2)R+s<|6Q_L~uTcNZ(dXS@yVz`h z5X<#=Dheg?H<3*_*y{aR@MRsYmJyS>06wiD#G+(ZkxCYY#DazY{w|UW9D6ZmWA_b5RRCP?AD+5BS8Fc#HoI-lyhKT8ZHu z&3dlC18RU?73UkNA9RMj$3VJL!KX=Nu$@Q0vqO`NgR5|$rWSmV#FLDPT~XCJ$Ox^v zPP+%V^KVjtX37o|_8xfTUvbO)rzGb8lvDxkIU|g-dp)}V(bSSWBtv-EzyIBD`ECl2 zjQz}!QD(>6w~#*2WOy73&?|e@`r*xO;U>uFvWJl6v~JbGq+;uZUTV)yt1?J`wH7!X zuwRxO>ecu4Dmr)UQY{WAt$x4pS-=66+0&4|kNh063E$o)@3}z)Zrs4okdpSnZ(^i2 z!vx^7wX9@+V!n?-KJOg9^pUF|IiNE(x+_H7>D6Qiix52IZ}zmvBv6hKt^#@%pKHaBO>sH6TyukK6X?`Rhr z&DSs2{U|{t)(JmLWzv}Uj3EAef7s5N=e)MhKCT(8));{7D7jp#!$)REo}g1nce$BP zC2tBubgJobl#?wlZWY9`X6XeAqHCTazkJZ`-Q`ZjplZ6ZMrDzoQ9mnaN@74e%~7@E zRlYu=(94IGsQvG{76T}bzxd= z{f7q>&x|}VckA=KfZvE>#y#)=yGPjKXU1>^9%&Q9ycFl$Kg$R=!jp z{uszdXL#p5s3y)U1D|#*MgZAHW$Lt!biv zX#RA}%7@+FGWrVc9f4<7$@x@Ban-F4d-v*Z=whr#& z0PoqydA{i;s)?E_I!*4@v6N+KOx0{YO_H@sELUGB`lVEOO4jBJZ%*+$;E{1CaQ}72 zJUMs$Scw0f%8}+l_XL<^Rqcb(EsySI7S|i4U%kod@N+6gJ^)$c9)#lOy!7LgK;+hs zB-2j*O6DJQ-1dXw^gYnVq>9p9xw>2h^(9ND&9RD1Bl8)Lo7#m9m8oV+tmxBZq>K;nrF#JOSc)T=_NyxLynuFt z^505rrE%`d)J23cpC`#yms0*8O>Z66R`a|M<4}r2aR^epcq#4_Drk9-;#w%~4#8cD z6ligGcMVRlQi=q3_n;w=RTDztTvm1E>9ncNm=aFCTmxAXde%Tl8S+ue!$9YU}YDPsb zS(sEroiPr>`r~y)3qwrLL0F`A;VRs1wLC7Mj|2=1{H`fl)_8QzPpTx74hh#Yz;F)kh?`333>?ZyeL zwnrv$rqdgQzqpEg9T#2(s`eo4;W-NX^D(XNx%Aygu`?hlfa{+(lU-t#Yl z88I|l24P+@4T9`V5F?MG_+{Y`jjy%aN@nT(1r|3Cw-W3dT(m|r>MDUw4xn#?h zGko7Lj&sMK^*%o9Nxlg>6(5pvI$o`~Su&#X(s1GLQ0os9r^s(0UPOnE+SC`bAZfeuLVW2T5d;K5x{eCw*uAeStpgnmR~{ z3@scm2z6~vFC{|{TTu3Y51dVPTSE>z|G*mU1-$LTRH>k56=roX`)asD#TJfU(!0-w zI{OrRKuYwb>_mGj_ZzB=$phb-l*Ll@JumQF)-IWsK#8)D8&t2`oe9~13xG+`U7f6b zQAvKezqH4TkpY=yS2;tSl%7i|G{5?E)SUCZW zj?l*xc(&x&nHCum1>+tL4$0Pmn&j&hS9XNI+AuLl`)Um0B?JBmoKkHq`Ope;TRq`;b8YKc$LaqJ1C1OxC9#Jghd0=Z_<8~E8WWBFP<$zKZz_w7 zZP3QkIVgM72|Tk3;U|ZgiWC~(2!0J(q&l(g%#zfExqj;wf=MTU*G2< z1mqTV$8q=$FLGua2_i1(S>OUHq*rj2jXJMQwp%s_wShB56d0h;nw!3}>RTMiUE3VW zN9#2W%)AGDbjsW{D7I+~)Z);;=R}(c7>=@swJJxKa9CfkI2^Gr3WW$#c%tLG;SlAH z$@_9DNX>&t*qHG9-D6lou;0OhR18SPS@${-@)DHE%Mn)Uk=Y^xFJi5#5P~(2tznw? zVu)>R^H-;q2OUuHbs6R4bvQiAGR$KfztQgQQD=cgt_)uHS}#RAGaMsYcfK|rM!V*Q z*Ib)ijx=t_O$6n&ip<+vv48yI31>YdIZD1ZuCbLf0_P>SG7ICgj3nLGv9&+*Ak6O8 z8(jNo2p|jFf3V@}z?k**xO69x7IlujR8VM zajO;gmg{M8n@G2;N2jZbXC4nSbq~0HA`yMJ;H)Vo-;{tmczKjb^%zRq&J?e+qu2<9 z#Gw$Szz3v>%tILxa7r^haRQMDzPb}01wmqZo@QU ze(WSg*{uC1)N(pX)E=52INn|s~ia)-RMX6*lQDJv=a}%^{*^kILqk}^=qw|2VKc@ zARFXhqiWF|Ok8c7k~JIq6VOPgAJYM>$ti|7k(WqJg`86Yx}gZlOdUK7~Ca` zY!9f<^)+LlY@#0VBHQYDGzkdpZ-(NBj zra&`lnbjNr?ElwhZ58YVM4(Zp2|jyk0(}1#=i21#dFA$VV!Ho`w?_$&c!GA`nX`ie z>^m^xUC5#=Bs3F`gR;`5e?hKQhzn7%vLR z3qqXN0Xss^zxW1>q%-)j$SnGV>oCs0oPK=|K=tBietA8rin@6Vb#o=M7ZGaxz`bh3 z)YJAmu^|0K6U`1-Ig_(9%d~TLt1_uuchUI8@LzjdaDJvoIJgdXszhuvPI;Yr zEY7@PJn>eW_tJ1@N7;H|oSC9qWqFe%f&lyHnP5=-Jz)4kQ~g_i%}u*BGCy(o<~V%i zWP>=Y{&}dIy<_e)Oe3@bLHuxAR+w-d1&|nROrC3`JczdXs)DOEZCV^NQh-;a+|clq zOi3K@e4c z8~e04!GM#t4qXa-MgZM0p%SxhzgT!8lC{yL)m;fU`wmF~nkEf7&eaF`fT6D#2%Ik9 zh(RUG8<+x{>s`V1Z9nApHq6STw*^^P!}(qQ2n?l?bVZq29nvvC+=+^3?9;tpz$(1{ zyK^A2z*#dsf`zJCN8}50WwatkhTbEpwdr*RYL}6HK-I2~Pv`f^>(uXu?P8hLjZ0!|1j7TE4A~+D>9%=JRhm{)hZN&CD!iw%ee_aTLox{=)SJ&r zI17O;-@pQvZ-zwvqvL#Q1A8I*#S7vi@LmK>k^YhbKpXRL+O#o(@=f>mrh%~8iThq; zSOyT4WpHrHR2ZWg@7>z%@fjt9)f$h?^SY{Sd$rOsDmgKg9P&MyN00WmR*pDCV0?@8#vmY>WG9`mk>4rH0@ycohidC)i1zj-n7Z>f|}V{CU{ zdZ{YA?(YO)zqMHQuLGd6Y!cC?g-$UnvI2A+nDs z#H3NnQFfigtowXC`JjqZ3K0uH2|c3y6q$0-M_6k(zu}n*yCzW|31P;unZTw~bKGc1mbck>!_n$JrdNB9+Utzb zE#B{htaMd4cigr45%%X%=8U-i(-IMs1^?=`%aRFs&AKI}#d_<0Mg7W(GHR6ds_`aj zVKEV1Vo*~%hDaQvk;$-^u&nPlf067#$K}{IS-}9xk)5ep`%IM5LZuHKK@4X*u4sBF zn`yA43i^+~Ro;t~-1f=i_s_+bDEr%vxO*SLSqQ-WY>}cuG(j~UsN9EC3Q)j-I~uGb zF%SMp(!^JE=S|P1-!^fmwgZGmv8)DQaVlhU9 zyaB*FahKXk)GNUXqYa9>^i22qz4?>dI1Y~lY)1Rqhis?oRHd^n+aOvk_Xuud%TJX| zaxvHCWiRmQa|uI0l%6+_ zKJ*J=vAV}m4MUy7LP2b#>QIB#z#RGDyUQ1GOoueiB%OyeE{&u=bG4m>KH{FpzkPi> zsBiJC| z!W+|xQQR<)e)-{PB)RUrKmHLG4_qK!U!miJFf6jVVcfF(qJD0L+H!5N=48%rwv*I6 zn*7Icx9^P5RM+P|E^uX4j)r+^cTCbJukH=B4z?qHBor<#3zogE;i|*zk7XzX zb>KR7q-$0#5K#a4f=gj=YAhbEd0?<`9K*GF+>}#LI~4q4h5{#V9`}VB%?6P5{y*|A zyX>B^`x!<>(@HW5nNq~Q-DqTmYr0ge?q7#B^k*n)z6$fQ_Gxs79JVCK99r8oB3M|g zElQtC&kmFE`I3a*x8x8tT8k#%a~t3}O5HQr?+R~$0}~9~e@orunF=9o!Vmt; zVeP2L4p@(8R((F1fl(848~mSE^=$vhj6Wu=PT{9349B-4mPh6U3qFE7GLG^K6JW0` zFngA z%hjbM7D;Zp2b1EkKl-q0|DM-cu_;eu%+4CpC2jl7M~3N0I&$Z*u z_Xro5{ZxT3_5m#Mk#S4HTKm?geer_+pS6E1*Z(`&%Ub*CF&*(w*eDK+>AS=2=O;5q z7RjEqOOl^Xv&@nTBJmT_2Gw2jFc(oz(Bt{ypY?9h0@c7O(+BZ1UUc9^Yt^{4HEKj@ek+|?W`GaHK<_{1k(-(V|img1fDVa zEPajVG{IOPDa!PxzK`550jO-$lW-l4r#)iiXs3aD{Q#|2`}k6QahM z{X}ZOV#%_|`zy;Udk^I8=mmWPYdds(?v!c=FFvkrhlChMq&%K4$%5K=tQ>0_A}B)v#v$pcyQ$YCyZj6pzP@m>RvasJL3R$W$E>MIQ0hY zph}KWFL*rh7|Jh_YC}qlZsut;js+zUPARD_9oF60X8?{KKsINO=|!OFTbo39kaf>U z$37R`ZS8>u&kvh{_!BWp^`&zA0R8=+r+cJc|D@9v^T+9?EnXiE9k-T3-ql?Y2?_gz zk2Q`~(A8r4Mzz-hYXkjqELzk*P3H{Hsdk*9@R945K1Iy3!oI~@i)uq@*dvLXdC1qa zmW~1%(gU-Nqn5VZ92VnuP|aJ>JeA?AnAE$$A>8M*cm4i~!wp%2We%5}L690T*@4o9 zMwKR%^fMci?Vp4#q}?xH|1MgpEh44f8r_(ntG`bo7BX#0TtF}VGOpasD`Sa0wf1P; zboXx+A_0a^fjK;uDvn=SYTc9$ES?xGSSHPLR1SIlUwbXToMwNvIE{CEOq;ybO|hq);>dH;LN$YQr6R)1Tydo{S;Qr!xWB{Nv(3 z-2A%(kzlCV^8Ms#Lij!}gwB3!$LeQiqId~v)&h$;;C1Aqs;ct@qsvtz4Iwh0fc0}d9CRzqF{HLPPb=w}Bh?PcOB{9HjA+qa_N%oOogRaWj!SkJEqpDUGtsGPq>AEU3X(!7S z*3Gza=v{-%O@&Uv`~q0lDFG(>bvr(&EV+)TjdtrOZ60_7XK>E3?iNSHA)Z{&T5$m? zX@Tn9|5HqLaW2ebnSQr}6jd;WDqceQ)i+6b-A~Xwpb(>jFp*Vr!wVF8Kz@BO!yE5* zl}W`wjDq`>Ht}Zj4CL<`tA((I+UhP^kMq-`SVr0hEg9!T08zjTV}jua%7X44;|&Oy z^BL!Vnq@|nWVr;AcIyts#)$PzGMf<}8N^NJ$65jA?hORhtZ{i!Z}+++A{n7$eKz8X z9L#xJ@v0yi={lTN8O~vGL?jfl$1w0X4{Vg-N5V&@Ga^<0e6~>XZ-Y&=J@${lpW1Kl zX(eQe|3+TA?$o(TqsmYJ0Wn~=&;Mm20Z*HI2e-^iKWMH8v@r++Pf8}m&zvvDC9#k_ zHVua=E-lwPrXwO5jXV4CsBsl&=YBK$vb^d#^{>TSwjA2bO9Q zFUSh8rMq+iBJc`YlMkC+IOoEkZOn|cvEtz(y%!+!&EY!9z9FnX2@UL5%<%xQgWU<%03cgX7Vlw&-xa%3&GEG74>&*u{kL$#R-^z-J zeyN449Q)nA?HxTq(Xa3)-B+RDpi!6?pbkJ%`a|EF2_F;L?O?McXB|PQXgB9V(N0?c z3?8Kqxc?rAcW@hpyn4}nlbNVPH^e=-)AyCv18;g5=mCGRqTPVbokWxO!2@lm7xWdy zL)EGmfDLqlU+I&X9wQyBwo(!zeA=K%wIB)y4#rpf4R0IdJ7A(pw~{(;dohYH5t8$?~KiARqNG!C_2762OZT#z76WPlC( zJ z3TI%X6ki}SMKv(g*wK&*gn;=?5Jyo+3Y4Ai*l*5d7NQmSY(R%^hei;Aqe(xN1sC!< zX=n_07jKalO$f2)4Xw`LI)-^K|5-(w%R3~-<~kOm=pe;aS zc;Nyc<{MGl1wEZ?)NLV7+%F(2*T|_n;OSdwOiM+X{cG^!Ib;P6Xi?EZtjq)ADrY0c z{`RgRYb214cL2#aFVKB`P#4lLqPGBE=d5Ye8($By9H>oTM?oMH?(Y23b`C&E|H_e2 zQ6W&k!&MQK06#EH0dYaO(Et_;?0(Zz#yBb%0D|h9CKLG{s~@+5fKNZde5D5{r?itw zg1`FHJkBAy5QP`(8+)~KLcYh4qG4~eq#r-Z7-jtez*||d7ZLmIYL9XOhj0*9EJpb6 zIsr$-;ccCqFTx`6!tQ%#xMtw5YiEL*D zLB9Jd8&dzWF2dvrZm?h)(RB_6lL6$@21T0?CcAq<^QGf69qD6x*Uog;L3F7v zIJtfS7to1eqlr^bJ*Fk8Td4fDq(gG#K;!6;&vApxC6uHNsM?)msuwUM@6bHBZuGZt zTd~G88Poy0fFK#yfj;~e9v#x@+b88fEchLK)a&z{@{oE!^a)TFIHeJXibED0A>g;8 zDyO|Ny+5GFVa|NLC&;r$Vz^_fRPhG%WW#v_RrQHYCf3g%G|6V?Qp-4dSTU-VH{s_r4a(++CQ%?Ip?I-yk)S<*Jmfhg;;~&Dv`e1a zJ6~pvX$kX1;0$M&4xzq54pmUnzgkJd1>5XMW{+z5H>S&2n&qFHWmdM0)D=m-r?($M z_&{D*$(kBUU}<>^sLro|^Ws#=+lW{3Pr9TR^-)xFPHucEWhQ?CM{c+Q6B_T0L)1!&lER+L$vVLTE`F6?$J&U607?# z!Q%`1AKD&48?A@#SLQpyg@XZ>2h-1d;MXH)3_mYk0KDM*{QtTG|1+B#4%8y1fs`Qm zEi?QcsgJTAPXzrT{6cqA#1qy1QPHvw3(z`KppWm>TDM%gX{y;Ts+H2k(wpLcYa?z5 z!ph24f21`nkMRsR{L@;*Vtq3kHLlj6M&~A=`h8~*-LF}jXf`VU|GP5Jh@_yink^gS z$|u~jR~QKLgLieET}v0lNelf?V&Dbm>QMs-Y7~uHS+{`gTiN!R^qwUQC++PD zN$S$$A5f;Q?l(VQns1Z>q!1B6ZHL*|q#>D`>%4~6;FacWU^Q44$bTW*{^zjw{B}Ax zI-%7FAZVi=HB@@MbS)c5?Z65$0suIBcl@jzJhCvfn?GYJyrCD+z70CT%~~Fy;ogO) z4XY?eC)_pEf=Z%EZEEvEigtd8p~kkH+knt;bi&yV!3;q0G{c(S#9PT;@ENHmOk&61 zbiZdiD>~_(tzE%4K?VW~^DKWcGepL9(jD)~4X*5kFpG!(Zw4Z{{?UwIUi2=$+C) zG5&JBCln!NthedJYpR~K`3sql*&ZyS6+gQk_AK<$?7J{-@+4wJU<|)Qt*IF|tP-g% zaPVy=d#5?VPn0}Pz5ST@WP0#3Bd>$t<-1xqo}n?v5$XPe;{9LUUED{ITMI6Ax5=-@ zRlbEUa#bQLQb0n&81lRZHv)50O@CuY^t}DT)?zpCiEGg?)Phv%kasP8#EzeTz0p_+Pcu?@ct>t?cXzvSgb?(-3wT3U zYU@dFebCv%uhX<9y8qY|Z$;-+G3xX@%Ju!lQz_X=G*>k#V+%GO#-HT*Imh)(- zu}^o`-edOr!q!7EZ*Z}32Dy%HtwzVXeHsWU6c z<+30(sn#tK>hcbgTI0unVE}^cz)xNj?IUlxrZ_}1*^-Sp3EFZ>)=CnO%Lq0biHiSX z7(Hv$P+T1}R3_O;14?_XWP^}i$NPdTu=^Q{B9t~iHI1c+G2tOK zjZL^a1|5g~AI(&0ng0r^PVmHDs{~DhfyF-rZFni(7hotZuC{?UkjB^NtEqvX^Gr2v z9?DC8TYadQOg7xzn^O&_rW<%WmF}kw6#N;wBmaH4-1rb0cEa8pC^>@X0-*^wBl&cy z-MzoiNKCEC(q)~)9_&hK|67rLXU(nxn-S>|EHRmHJ_0Z-iP3+B+4HPdBet%mO=S{Ld3Z1?WGVZE1>AlQ zNMQgClh$SQ2pXo{$4*Ba%7ji;Q~xcpX(nz!B-0`~a5DAMJ37z2Z%n`4buO#%tI-aJSoXTG45m$# z%e(EKdsAQGCXy5OLn9llyhH%7nz_CAIT88nJ!TzpA7*L2O?d5{-V<_%ISOn+B*xGh zCML7lsJQA|$o%GJZl6hzoy8QO`|}`XzT{JUiX~!aGK3COO%bpwL>hUe+ZMWumS?(=(oNV><3o9Vhd znHBDimo-(q?g!S|?OaJ%htz9^(*Strfj)5W5*5`{93FkDuXC;h3hthNnIdJqo-7x% zO00K9*7*}Vn*4&@p+5HOigKG?{tXpn~l$`gRAUy)mRM734y(MQA5PxTGGkRD$cgC3T0U13WVPNpY38WYq<^C7{sl4V4`OMEwKGt!qQElEAVSXwffy;1fL0Ss3SJF9$VOEzJr_e3E#?$-YPOAyV)2q-$&zrqePV-g!gJwWiQfEQ~arTn&hmWpj`* zumlAte5=&pvr%H1-TaYJ+PBc(K0x!EiQqeO?tdEkqLhxuRLdF!*E4xt?9&w#98F3h zy-cfSAU&-pQ>~S8xrEbOw!``#)&(>=cHw9GSItohS-GcTqay21%~Ll0Jdg?A-=x}E z)t6J>-@0C{j)h4}yneK$>=6)Q7}#!9r42_x5ckSZ?w2eIA$9d070Afjp`&nE?k9N= z7Uka{1S)q9IoBE+D44LnSNdeEPXdqVg_GQcxhA>gqzcVZ$GQ8DT$uC~F!R8?!S0yx z{ZD@n-<|EgeAX;_Ygmu2_y8g4t5IR`hm~QqMiGj=jx=Kb>s<#Ux4)u>?-$)rET4#s zF_(c%eu4nGfGmYpOZBXSFtzMk+Q8kuX^l73^mH-k?u zjL4sJq`b}G4|PF|QCW$83znkG9HpyB%=6*}gBpkKeq zVrC{9{7Tr?{uOV=)4r@U1M1vNe`&!zquzm|aWTTSE7ajc29v!xo-z{Eip+0xxxxFvv*;6a~f)r#8HKF?XU-im@9oC~yDEO``+r9g_NzBR6Om zwH=RU0HFwClb}fAG4*x74p%)f6~`0YTqAev^U+KqDxB0X)nE_!D#D-OmM8Ym>wt<(9X z@NQ?FUe+jsKbJE&R_2eA(o|E|UM_Uhu{;vC+CZv1!;^L2lr z>Jks%Yb#vn%*?W*`MnaGr6GNZ?^*Ky$R9O5~w#a+NMlRwkeM@rkH}yCr#*_~iJ&qG>ASm5TZXAuBLO2hp z-vRE!_f{9a)T*6fBY8M13x62!MxbO*0biO{+l2k^bkIWO(Xi1qOIGgv6`2tGuK2>m zIYJ|8JyvgQIR9C=`Xr}9T^%9@T#wUdk}KdUrPU!ME5VlA4H^?F{8Yw z0BFmF!Zy#Z)*i=>uEFTKuj`?vD-%qo6zodBx%=mluQ3@D_*8&GWWseoZrhZeAr{u{ zd7>a&oyTt%xqyfg++eTnE(C!pXX(ukKd$Z|gT^4bwAy+-U%<#lkK}$@u!7`pR8S+{ zif%) z43DZ$Uxx7Z#6J93nEZXXy70ki)a-pPdVg84Ikk1l(HFQb|LU%_=YZ&yH2j#4zOY9- zfRX~smCMGJ0W?Aq;O?DB5SDm@0NRd@=TP9Z+N`$h__KRci(Y&pFSZ!z#9-wGFgB60FH5a&Gnjd%;heB z_<7}OK{8?6dML~*FzBKGzW4r$ANBhMFk3Am*iv8%BY8k^hRSJIU-@}{jPN+|R%sa6 zrxW2v9)U4Nx6QFy@WaJJY`}m6W<+pPFdx{@*lJEh6KGkg)4I8K0z~UTynI=Am8rL9 zr?^>;?W{iCgWxhC^#nW!1ik2g<^`{r`KiM-ZbX-h+$1Tf$sFVF$+C)3Gotu$53uMS zg^h`ULG9^MTMVSm3Te}Q)Q0c7A+YQBT7O7oqv74-9#;te)NFFRYgc4Z>%Q>Z^BPYr zu{r>IbEnPN`|+hH1U29!#sGXGO36pxgfM@rBQm=9)oi4cet&&gXasx=+Lh+8$O|cZ zF{}qqH7K3iYr<5sfn^ityKM zb-+-^E3Q%zCZ)IsHM|NHV={G7kH!2;-~lYvHYsmR>C40-#)t zy%n;3#_R!8)f*Y(yLpjEjoE-K&r3;v12e#VZEKz}6E-)Kkt7(-v3Q7k3l@LXd71e$ z3tH+Oz&kLuYpWUGkZT~T*y5vd}P}YNqifx!=&Ejkg`&sYMLD_S&#m zQhCy1UH(GEys=h zkt76+XnvJNb>Ca(oxJEo=+pI9=JIz7CHhLYcCxMH(wzO{OCK)dHw{1m7ZF<%8bP7c z`QN_xgfONCWcAXzjqVAb@L0zo|%0$dS=zr=dUo`3-f^VL?awi>P>W zS@Bi@R)NX3V$VkT3YB;radL!KA1>TYJA}tz}AF+_zev0X#B5j!4}*Bx2L zV#2#yY+(vfae^T=`2C49^{@m_MKzaL?G>$>zSWc4@blpme$~AoORU&l}Yic=v!LwRSH(zKf3I${X~r z{F@!M_5X{K2a%wVAvUU!ndtP}UO(?i0O6PdCJy3W1UMdnob z(^F@Rweq8l*7UnM%IEC18s|uCzKar5C{-ML99%)nWIHFQkC`+5;2;DkGS4PM0m)TV zdp3U!=<1O<-DpNKoVmqyy$_OeeW8;C<>@;Dz$` zn%-af>i3bo>-lXG=hZSH`&l4A;r1Fq=U4fGpBi@KF#L^iiQwd#&h`~W&31GMobYp% zAz6%{y|g;r?%Ski%SF(*!E?aK8H{8r5cEeUz+R+LV?M24g5+NQZwkf4&l*-#{iZwo z+n!Nlv^ESn3__nSVq3uz2q`>IefE4EVpn zuI-B=JO}u%$lQLQRT41GQ_%|0oE^OrecU=pl8S;lqd-yRC%7?*1jOP5RrxMlz|Yg& zXnd#ArtdU373eOwo(iS?kTY{BetalW_}OdLU%#kM@=nuA+W%~n$4EXBCjByhV3$(0 z)mQ*+g=rJa&$!kFdq=QtA3TSP1AFVBi^RB$5I6#}qvoDwnA)Tcht13V zQabYYlbazH03s;h@f@H0^~l9D!Do~5 z?T7ciG`SS%hGf6~bR(Xm*hf%s?IKEz+xzl82h2!aXRp0cMC7(IZ|>pB`X?GHe|57P z*lboVl5ibp_SMcfE`5M3^(UBS=$6VmB#1LJ+;WTR3|x@*BF-{XMS=Ng3)2dspDMC;hF+CWul2Gpl8AG=t!L!3qU` zRY8?55)OeKSx;2PD@W~Htk)3km+69yh|Gdf#izBNenosu_^t(3^|dGyn+%K9liUcQ z-FgvYM<%$!vOhWrweg7{hi3C&ijDH&`>=~4_83?3!7IN-Djn#tFd>7L`^t%FVn`MA zm3)af4f|6y7^a1r7ajB5vkoXYv>O9;5!Y(wy9d3nPpili(hS;SgmoYfqP?4GHomwe z7@P_o(4S0K)3o=;?=;ncB1Ji862Tx<;{g@OJ#53GE^>i> z;euXZ%5KL9ZTrX7KD*h&M}RoH4Yc<1x7j#?@h`jkvETS2tIZP9N4`a z^ytPsqZhnrqDh56FMIIY7TsFtAH6njmTEBZb&%4$IDZ+DNr#HH~k()o|n@2Auso6{0{7;~L!58{y zjYbmNO?%}xBBqo;_@+L2$VgNgY81`UK$F1s(iJDRQ+(CK)+fs_h%bTiGDKwr+5)?>n|--XIS4 zKL}Xcq8vr)>;wN~0!a|@25^$Tw0kz;c3SxE*Dyto%?gb0Qf#c0z<-!H@eBb{R6A%m z?T55o*5t(r_U)J;-3a7{O{de10SRhFx0b?58ud6PN9oWg%b$!Ku?yC4MZvlx{DrH4%~P)(-{w9H!sya{RfoT6+=3!MM9u$cI@ktinaFOicI1-Qj;SwM9bjiJ7^8S0SdaJ}R@5Zp|Vzt`oS%` zzbR##2`);W=PVdK{z;KJN9&!+H$hVOTA{tX8!v)~>7@q$xqS;X z^rhCVC*lfm(yA2al+YjTB(?%%*I;tH49S|h7b5tchifIy`gZjReO*Fg{(LX#cACZn znwCf2DVD@;ty*RwVT3b~4}N%VKc}x(VwMZc>`Dv9-WQOUSmK1=0u#=KBIUQX{_&Q4?Wuze`Q?*w=IV(}3pq!#;aG)~j&w4bgShjDERcYI^^=D^ z8H4F{B_zfAAO^~E;*(4z8mG(?5=%8A0~GIgc>|syWH4IelHGGdM3f(PA{$eh({k$d zf!s17kR;`J5iwnW^5ZQtRzvATlQ>S_o2gc&lynk!R3uW8NFXhns`li<1#RX4@pmJKGaII!YjM&H4 zY_&!I8HD_4YE|u;HV8@YIJ+0o;GX7^2 z4wd9@B=$5wf_$4G(txq){nTI9rC_^E`~KNDy#V!G+VE9#v$BT+_U6yy*Z{%9ch77d z`BBHX+bIA#L#OtjwbQ7AvaNxSf48Qt+jH0n>FM9g^%#0gv^fhM%qkCcwG*sXOG5j`vK_>c zwM?^PqAZ<7WJfgb!TQ$K>N)5Bc+qKXc3VWTKMvb zokz{f(FmPrD}@@3sMR!w4pz-wyedLfLNJj|zDo**b|fLge!Hr6?TDtiFq!sjn-4Be zc5xr?Y{AMfb}O~V2NH+Ma>{bM?^>TW(~rIxdbS(drwSM2b`7Gnu9u5u%U1N!bo@u~ z@KJ*qcbDSrf3JNrlH*e%7?O!P{1J|5jR%~p4h75lrR;opOetp>DMTG?M{sGGeCW0? ziZ>eguzv(cUarq_#?cH_B z$I)NlceT&civ8{|Y0)PK&hepZ|6V!p0@!C`i-DEuusm#rsU7j1%R1qSo}|6zN2Q77 zFdmmmri0w~sk?=4ysCRrbEPs*cTJLF`7dKG4Tc17zHxUbzTgBENBd{TMy7lk8J{FrE^K={7}En3uvR1DOX(rT42Jy~1K181iJ6P-&0YV$__Y^R&eKF_O7tR+F5 z5}#E&!dOitMJvS&O#JZds7Au<$W(lw@dvjiB;{EPnwU$@VJEq&8=G1+Xy52?S(AVz z#Q~J&aoWb?H`p|^)HnD2E8Q~=F7Jd9C{&?( z++6&t{R_Rrnfh>7ZJ&0Gv!a=ImbvY&U~CsJXpPO%Eb8?bfdm@O|LK$H2$MyFDontb zbDwx+wd*-kcCod(2!q?l$Rx~GMTCgn088t$4>S5g1q)@0PGdNS2r8m$g?UmB`V-d|X*)aO3WHGH~I z21CaBRm(-Yrn-h0zDVlgpq7`PZoevhUDUKWTPDYsm=ED}vU^n1$nxKOy?gEoNEWOe zKBuey(Ak1UL;bmY`%Le~+GJ?hed`sLlA8Ai0qdGhATraN#u2 z^F`TUiD1gS)c02RuMFEa&p{tX1U||j5;crcs-jre*q@^p?43X16IGxq7|!>8{8@#n z0*1xvkD@}#hE!644myJ{XjeOZdSa3FT~QgEHXZkqz;6*l23oDg8tr7!K=EXy7M0lI z{5Vycn;`Y)MVe|6U*ZJVnfILYGrmAS2@8t(UPTV-4^+zKyt)5>0QW!$zki9pLGk$) z{0WLb$FF$$_>)hPO9xT3k|L9YJ--8t24pzkXkwc^NYFqJn$~k%M{e>Co3gYq6kVC3 znk&U)b9IDCQjD|?q*iNC8gjT9g@tsFu&T)!s5e$hNDUGly_r%iLx|ORox=E2o1WaU z5`suhWm81?COX$}0I4-a3^`((6YKlY*NTE9BW4+dguw4}6*m6Ho$EfITO)GH;@?P;S zi>gca!e!JoJrNzbtK@}XDZ{|P*zHL zb_>Qe$@a)3c_WhMjA75FGnFIu>7Q|1!eMbxvy4{y3qk>P3e(f)2-iE?B_a#~owe)r zmaw<*TV4rgmW~qbe zG^JcUuyi7V29eV>zt+JpXa2c|-ty1ctbCoemCV8+8;`PFlHtfsrpM>62uYF6xHT!h z|FS6zlBR6)Dzn4(vA_N(R{POdDKR+>fNr&wGR?ROYUtTRlKyLmzR4E5Lzq!EWv}t+{_oH}!iu{P4Wi>@YC{Dd#<^w+xLKxUJ&**w-9-eE z?AoD(Z5*l1SWsD}GSw!}j+{O0Vt1%QD8r{12aZlnO-D!(va2eNJ5tInCnz=e&(9wU zyk2d}GKhnw>wa7c+bgKoNMB`#%xpz97E#KhG2+97t}n>q9B3y=)4_2F5q9oYOf{os zks{gIuB76On?wvO2x?AYY@RRK+*h|6nIz|AOeN(@(ZE@|x@nnbDPDH3r+0CjcJGsM zN?=+0{LBm0+2!dhHLJle1DoD|pQfFN>7y#0*Dvp`6R2b`5P3fv{{T8k1;=d zj~~gY9~aU!>|9!Ot0EYMq5N7uLGc&-4T_)RCn!GRPf*NlG*`Wpoo;}bN*Q$Y+6r&ZB98WW@D|Lfre^RLx+dbqpo3Ah(5NQf6rk;EQUDc z^*GwqvIf!xBVgxiB@=MvVm{CyHAjF*im|HFS>HK*_oRwT*Pu_ZBF$^M_bxs8l zf|8l9=kK)tvuYV3wq1&S)}IKkq4GlHe$4&{tO;_*CweMa`PK?Le3^e+0hZ3!QJ#r) z|J!XyW>EH3yH+ChiLJ+>^eCOtt0wx`nE&zTk68!H6+GE> z#!G&AlvhETKlykT(W2Y=J-z@%;hqi;qdWzS)e(a0Kpy@BPKVw?sIp}?Ic5c9Ax64Z zHx059p(Y!pwIx4%7%!&Ip!kgSkr<=iaJW~wWb^q$f9Cn)JZo{z{KcM2+B-VHWgbM| zd;akFWS%;PQaLHvVn;FH`wwSG?~YFEw%B15lC#U}GSAADbLQ{+ulL`>Lt2d}*^)EO z(8HY=Arq(f{i`*7)3-O29LaVaF;4j*pMCy7c_7JqX66*PD(Ep3&7Z!uw?+$_=f`k> zu9~Cbba9vmYR6=q>;5OTY|rHvPOOwwYe?h*MZLY6yoI8{=g;VD;-w_xz4`k|)5*%H z=E6Ft2HM&iN{(dnQC2}bD}YDqDX?=zGC3r}SRL4^j`oJqqp7R4nSa#sY<{0V({kOs zWeqgPiX!#?{E^kDCQ7z>{}Cm~4UefcBA#u}omTeoo4cX_!d8Yv#cV4pnG)RVhY;=O z8m*DmA=&mmQONO%=MT;QuoXz7{F0t|Y^~wq2)!oLcH>D~EtVyfy3SM#lFWbA+Y*ZI znWtI_V7tHniC`~gN!r-fagRdLD9>gt#q58^-`;=CKUs#0HD8;fpldT5s~UvkmCrWo zuMn-jGG;EX`;=d(sxEGh;!j_!RghNBFi(CfB~;Cyu*vTCMDdOkF1N)Qgkb^TgUJ1u z{a5~w!~{~SEoRAbZX%{f+cN*Q(t&@R^YhOisy-tWCpk(<^-#L#XiLQtZTAMhg- z=Nnb{yUVZ&7Dtnv5^)Vc9jly+_I3OXir+nb`|Y>!Hz+>(_5WS|uaEXWV|J(fSIPgRy-l4v z%5ho$KHV#Ujqc_5m9UVy|D{DIHAWyiUw`K;j5&K`O{r04pPoMW;De`6{sF(@>8F1F_8M}0 z&LFGgn*VbB0~XsX*;6>j_w=AF>(^-%C)tirJb(XzEas-Sj~hljZYnGW5k4Jb0?$Z4 zn;J__2sO{o`q`qtoWFB?1k@tLWfH-Tl`feP>i7JroLnq~u~>SPnKXBwY2B*M+reU zoO#YRD!7lYv3)4C4r|;?^858w)o2!wxO2j|e?epRDx7O_j9!vIazrKua=x@o>m7*V zqD!D49U$M!^EVX~?Tu)YwYWQllzsgRjYae4kDd?Q;?3hO9#s%au+%_L#uqk(@^Zjd z83cL5=Z~U^V)}Z`MAZWk6Hy&BcU0|vLj8O#`y23yDdI)Y%f@T9*DWlSHx6eY!m1kP z`Qz~Z4@(uR^+0}-_6(OAR2cwkVKme=gDj};Z9IRl4%M9LqWH+wdrwg9L0*4hfTiDk zhP%ssaQ@0P>`b%GD^pc^n?JwU@Ne?`;m_Y(p;)~)p8rg6zO&L0$8Y`h5P{H-LkpPq#&{>b?9VIaP2yf5igP=Py!= zU2@IM(b*$wN{t96Ki777{uL4HefRuvc>dr5om;Q%;+9o%anAD}_F?%0)o~u{YK%}n zf5p|5sq}D3A*wE$h=T4IF2tCKop*x@N3Kq$loL0;K^5>7LX%$-z)h)TY zJwLBcw&=0k_%Venj+jcGmsu15l~;0&-ScM!8K&z$c@`g)??9RZv$k&JBe10C;|D0p5Zlo>t(6tEbIVo2bR*Oj6x&|-FzsD*=`~<~PP!C!W z_NEk)f7eRia1vkq!7wJqNYwr{f5p?Mzr_a>e|h@lFMjd#t6%*(?3#RRfyiRzDv;t% z>$HevUe>YvZA$^bk@TG;47zFZwCa;$Av3Mv4|UKR4NByvgh7We_7US=6Y5n7S!qD)L#&XQ8@ZD zrCog5s{qlzu0nDu<2n+rWO_SEZi zwonz!k-+JIWnYU7&K__meg2jtDg=^E0XdrQs-P=42w($=uq8E2fA+H7%de?)9D-zH zJoCi$A+mWS?T}EyzwSqdzy`vH<}XGBvIaIS*W5dlqYFwP`>H`vLfo5wCm5F27)O89 zK%FgLIcxcq0d}b&v&Mka1zUVqUabS|(G;WcCW|Wfqp1VWFNAg_qY~HXCXih}(WQOJC`;6aRWZioWC-TeSDOKpE9OfY7~&jLa?FyU za&p-d->CgBh+3&soKK=&9Cm9FdGHpzvAL8KiIWf=W^}{fiy^Da1 z0_@V{Sz#DqVllUh5N;=sUDr-ZT|YnzFv>#FZbC7*>_2Qv9gRt9_S~z8^>kR!;Eqly zmq8d8OAW00421nw4SN0P`wtAu`;RTQ33JESRq0|b72K&c&rZJXA`XSd8j33l)F88a z4U4A-sq}CQ^R>wNV|@gMl3$oW5N&FFe-%3=1LbBD$GR%pD8h2mr3L^x=dE>UKo>{= z{%tK&R^l7^8PYDr!@zZ5qxc?qeu@WAo#Dh6hyW3UoO#O}fL_?Kl3(p)r7aqin19tl zTi`uNdO5fsY&DH3zKad_r4tPuMU}7EAkuZ%qyosG;Ic1gLcUwC|CdTCKkKl4<~nE} z)~R%Y4oqC_N4u%8^~&UM>m8+hx(_x~b-|GIog@sgK+%E%FXr6rCFyvnEEdfuzaTVYsVn2$Vg4Kw#HYI?im2XP))gUoP4T zy{-e}`F>2-uGX~zW)5v=s;G-2E?30WAhXCrX)O|FR$8SguVhCEU|9DpYeOtsy{FI6 z13{S9Dl_k_LlU94@V1#!RLgO{yoYrX@s-q0Y2KT1w!BDmrpfv*W+T{bwXq8ZAG3Hh7s!a(uHGS>fXZ^`6csg)>hU6J42`Um7do^Sq~w;zjU*jzscv1 zs2!azVo!S}Hd;)j*J^oRR&ggRZPB0{@4s<@E|C)ucg{*lYr|6MVMWltPsgu+-AMm- zpFiweE~~H)E;FB{+17$Xp7Z_rL$FI=P){9`$rpF5%>(6Pg+U)S>AFxIj0?O6NiWCs z`2#_A(O!PVQ^t#hDO40~8tCq~;u5g-xLqM1e@yo2=@0k`ir?XHP<)HOLGjC9{POA7 zzt*pK8lKQJ(=w;19(SSqsR+v>yqQMjMsUu23jv94J>co}Sr)P>lAbuUfXCaB;7nsX zX4qQtcNG|}`2`$YuCCU&QfiL63rASSh{5izZPL7GAX=DVr*T>`>XJ`;&!2^pWnTR- zI^4!oYlKgYp`rU8n}DAun53p99}VNrIP{mhUw9(=Qb*U|nAv~MgJ)uIiJlnZ5?MBX zH(i;hR*$2TW46o_UUa_yVozK~T+UxG6gTHZzNPfC!L#!PW4Ldf|1~C=hHt!eCVE{} zvCD~@I?9H)c*BFC&tK8PCwh43Uu?_yZ>MM$Lvqqp>yH}ZKCmyJMdvJU+ z8cNz1n&SJ|F!`SUg=?E@k>1X)B$7*j4XEb%f9(FZ@-1iLQnRpCCkdiQJ)Fp%Ru&ycbzVb0TGAwNUch$0KYXg1Ct6(D&X$22Q{ z$>b53{#~`?d zxxA~7qwD==#RSB<*^j}6hHkF^@hSA0UrfZJMTpk_8lRx-eJ?u{A+!IAmn?dnu`;Kq zet-TD))Ox}tFRr!VDfiWP?jtaK`&c+>P%EVtx6&WHDhVwP1&#ia)(SGRiX3bCn#*1 zr>?^-E{8z&y(weG$kFBXUl5a-f6hN$nHnB094oXOmvvB`&6$kBgO2lmqvwx)|3AE) z%9XBTaG{}#X<#0D4P3%lqrGRMFByub^DC z99;JUds&~>MB}aia&l2Ir`S}6ARpHhQi0ONM=%Ow1zWzH|p%yR( z@^k+No?OwqmaOVXjl(4a<@e-qCiQ=^~uw2rT4psS}Q!LxLJ{m7+IJSAJM*UVoQ`h-m_ z(PdkN29W*wTQ&yWY{anH_0Q3AT!!c0?)f8Y$ZF@F$;!i{``+>V6LV`nk}>BG3@(Dc z|7S|3=$1Sj)_Orp82YAq?)*1hN7E8Kq_OiK$Z&*Kx#ph9dRR+W^U(80aycXQ%nwFv ze)Bn0n951l_5sDE(hTVptt5;@(_Y1UCK_(7TP5Tl$>s+X-{B`HKK$i}_<#aGL18I5 z=xuI$*=pQo%O5s$zVMFQlB_9NcY{-jezOTiscH87VXI%~jWxjx>H=`Q$E7Y_4Lej;@y0Hm3XQ&njJ6)Xk;>CW?Z z5aaRBmTa+zdq;VUp4Gxu4%fQ?Wa|40vjEiB%2Gp-FtF8DXdN?kbIp1c#(Zm3&_w4| zCES8qb=1nOdH6kH*h>qZCab)&?sJXOWw=&bImG5V1_GTOlfX8k3T^9>3G1kPrp>j} zwi3~P%!3xz$Xl=#oe6{WRGifls@{Z<6GVANRz{B4mh(@njF4*t9b@_GOMF1_8U6;v z|NJKYNcJZm=TA`Nb%09ARX5cvd16yC(pb4=}j{r~ALC zIy?**9j^ZsLty2igYCxo=S9kE^mzU=virKZNY7aJpE7CvIjmzq4f~?kbJ46E0XlZT z=@4~DD-ppFo6aBiKM?2?O25}o2%3*=GL%Na#w1|(SC7HMgKLyudk>pAOszAT7Pphx z+XZpB{)u;vxc*`xs??}ASy$ZS&mYA!iL}X8Y?sAakR+}@=*w@ZEbP>PH4Lm3SeR90 zMD!gD#Wt2^wwN{s+J7z#^;27RP(=est~F#V11sAqB8@QNa)0Sl%ThtF8~rQPMy!*KXYJ1wEinP%LrCG!)ev76b6f#CD!gXQ7ZxDh>a+%e@Gc= zFF#G$Y+f=QC2$CS)$@nn7{I;Ou#ProRFa75xfYyZEX5k@qvPwZ1G6&#TXJ3oDq^4LWV>0K0KkZyvG=)^e-=#I%92&&*&FuI>OeLViu8+Y&^;4V=w+t3m6qKjT(64k0toL4 zF<27|002M$NklV1?6nge4SZ=8k_TXiZ!R717{{dAp0U5W@p%I9ux@>jHeUFr+>FN%Oniz z4ka!A<&ta_k0a9%$^UVOSeCH7yI9$3ZJo39)ih^u=ToJQzlEVaaXx==?*|MZH{Ik8 zM|uY{+FFO>sI%#d02nW?>Z&tQw-^Hz&f-Zp%vZj4P1Z2Q4)kOvQ)^J#EfPB&@v?w# z1;1h%yeP@*>yN1~^(Lb8Bii+aECyHnf?2r5#TYiNKUL3C=Q1#gsli3$T(Wl#16mDh zwTJUxvvu6Ux(t|u2KKuCFut=H8UDl5S6}_Nr_VnB{OOxNee?9Q|M33P$G@qcpeQ!y z8$Fy`C&v3I1?58+N4mRc>652%%Z%=EEC0ectGKO2ptm>Mv-MNMXwU3_?XBY#kv8;J zL)$L%*UTyR^Y_gB!EY&wXst->m;9(dPR@4o_6qhG%`Z8xUgbLZbGfFr(Da07fzCQ; z1B6Ns+jjp;qxHiO^NQhi{&-^S9y}5IZjjc!hb!ORT}2wbZzjhd&eI-~lnSC`MY{ge zd#lfVrI+)^2nQ!t!uKCF`0KecojK3dHFgWm;jCFyuAj~BAXs@WYdn=Dh;~gDLFvLmF(Fs`2hU$K@!p*MsOtoAyHIiWbv-1`%jp513 zdH^DF2N=otn|GJD_58s!wgyviR^M)S(8#&*|Ksz=1x#qHsglLmVWqF8JM$MK_Cxop zDCyOMI$OPN_E}0RdpBj|_(p>#oZIWK2M;*3 zCpOP zG;cyN(AAfytdW=;uYEH1{ptj=`Az<)8ktac8bRBNX>MI0RR&{-(H#Qg>xRJwI~FK2 zWS?={)QipeMi1xKi7`*MhW3Qw=TEUIJ?C-$4F?kZmZChJ2~$f9=jc`fW1PljO#AHm9wU|v zC@;iA*I(c=Ci5WfRnH&dNU6~>i5`qM)-r~9F^9^ID`67|d&Rq+Cbt}}e0LXz)-2KJ z>&N#4dceCWb+?dIV@i&6{l{?JHI&%7;`q|(_cj>)^<0_6F*0*A`2HDPBkZfoaG>(j z^9MXCDe~iiB(mKrhKs;2$o8*RYH%q%&Vu%$<^E&*k?c>O{uh45)6YNr1%86!qn<%v z*l`2wz;K+KAeMyWEOOgdlH`b*p)QjpcQjhp*k*S~V6XY->ns;=l&^Q5yEk(vZ;rB_ z3e$ixg3)p5(bX+25#fV6uW%Jmv)xYLIg{!bO088|iW2Me18(Y<{H)8*L4UA3yzE1Sa@(L5y z?$&cb3|nRDvq|Q(u^-gVY4dN6AVi0%>kWr2Dfihqr}UZeu~)=(TI#G7W728J75 zT!9<29+&9)=#b=7DfvK-7gyi)>jkVc&(pOe#9JBF$mFLt> zYlB-!F#8Au_wiglJ^dbkgW|t@@x{|O-|#0Weulq6@sE`ytI9%>H>lFkb;SL%-<7U& zSc~v|8c)w}yLT^VB+{#f^ei96rmH5q?sH};00rw`Zq5n@FXkcFl0QkR&-16cBbz9e zJ#1smgf9zOVJ@%U45z-;Kj;tDW$m+G~el_`t z$G-+sVaxc`*zYSto0@`47Da8TKqkdyaSfI&;YC%|Xikn9giRuX=k>u-rw6m)oS%FC z$UK=LS8Qj>v1;Im>zOU0!-dYy#jGXPAzZ%xeGo*`Y*8~t$`JgqafHyi#-<-;D(t$3 zef#LZ-OI5a5;M`nr4InLHr&Wh-10ix0icE+)cd()kZ_W;jWtKr6T<8N zq34eP1JA@OH(AD#nr1~G+dfV!9p&-4Hd!kTPsR2&Aj(Ov&ZaVsZ+64!fW!3`E2%Tl zX;ucb4xO#cb zCZdpTcb&8tS6P0I62+#wR*uq|J7zr0AIodD45*$p6QcP$PWpO{GLUF_InH*Nzd$_~ z<(Zh632=QR!o@(gYo=b(1XLY~&-oid`BiO?D_C|77b%_!G9j2|lP-V#Cj#Eiw*@P3 z0HVxo4?knrD4&1+n4RpT*Zjd(l+^uRAs^jweFOlV)*%70m0iE3K%IF`?X-3&tt1J1 zn#g_Q`D2(Bn^L2Qulj>34On$MhyD7$&ViYu?URu`o#?^lujjCceITkUJ$n8K>soAt ztX%;pSpRYnC>XrFuhyC``7125bD^TMxnjbFPWw^dh>VqwGsPkHqC;B=+&F?QvE^yeJhaROCnoJj;%Vi{xbgK* zc;+5I*?<*6|zh9%uFXYsrlWEsls~P37sW(7$CWVINbz zKY{ig7Hj-EB6OO3C08-9((p|+{1|ICYAO{`A!C>}sUbU53+Z^}0}_kz$N%^1r+@mV zPoMtsm+$a5DD*3y;G0uvdz*Y@uoM_X)*ue8htKDawQYRy^1#xeB<*h{axGoBtUivh zaQ^AjtYiA(Fd#=9BjI0+mpMSXE;|0JLeP0I)K|-sSM=}blKdKizbnsuj|9CFHW)yrYX=S{M zYYMJEnd)Nic`z2}JM&lmKvw=>VGLV|98ABYK#t{hsd@aQv4}sWg~Cwr3NTyKmT+2AWG{G)n9Hd2eQ+) z!VF8JM@`$y3GMl#$N(au^8fz#Up;;C#ph3d`qTgS^!^7wd;0j}j}giRDaKCQic&3- zUjFOzj>Wil|L2?WT(p{h(6b|t-T!HbS^~h)&6d&W=9LmW3X=>s2FTA6xM%nk$kyUQ zopBErg7KRs1ZA!U=8=O$bsiXN{&6Y5-Y%}9)`lz?hHnmNavgf65)Wt3`S*VGJQz4R zf*(PQWn1qsiOX^(CXv^k9Li5d=4Xpu3BoX3%1?7bdiP~8f+}>9Fs$1tKcT^kF0L~T zSQY7HHx{eFYG7ckPH@Dqqo^c8raFH$WS&ZyNW1?m*MG5eiY9GKGHyc}8ybi;znqCd zuk&~3{vRpOWqpdF)i%I}rbacFb?h+IhWzA>=OWt8XM>njpFe@tL^*M01V__g!q4Ro zFu3|$MDRvlL$Z;*`D2(isUZz;FTaYteAC{3ATvd;CzP8tu*67T^LQk`&Smq;pm|t+ zviFbyTWcQ`An)D(HGfSvLJ$=hd?Oc96_ttGmFYm4GY&h5Uy?r=qiWPpc}T>6aT8s# z1)LT(Rh4r|T%Q+(Nk#+)HAl@q%;n=O*rMlso63#*KWW(ri4>iO71T^<h#^P1j>0WxNGyiFa~R{8LeE3MrYXQ?hr2CQ zI0i@4RDO-ueMW%2{xfJUuVyV$h#=YQ>75#wACSCO=&bWsI09K8o8My<)9f-Nlr(qq z6Gn)d-!J_76Hn5i*Y$_3xIBOQLOzh6xe4RHzExv%@BZKC4?*+#PX_Xwt~2_yn*qCY z87q$MIp9qFzk9p)EEPH;A^?75CzI-Px;W1#(q%JrYv(ZX8mO*q`rD^(<8M&>^68`aBiY7@MVvRiT5c{p=hHJ% zz?Mi^V20`!p*(T+@|{CtZjd3T=Ranz{9-2)b@p|mvVEnRNbdt)o$~-&li<-+B?+|g z8qz#zp7jze<+abkgt9ELAGvDDCQL~UE3-tJ$Ep2DrtuQrm!?2PGP4{_{4FO%o4Cre zUXzRW`@BZYhNz(!jby`e(h`w`h>&O|We-Nfd>h-M;j zjjvVE=cgpsEu*iW;1a#HJyE%Lv&LBgNBDjS2){od#NbHkE`zU^9(*oS9MZ?>+nk#m7Y= zLWbdd;sS}^Gg;9}lU&6pVuo1T&5Fr2&+|mMIM>M-Iwmsnw-l-Fi3-r>QZ!Dc1nt-8 ztdgjOnEc|YG!?QXfpfuOpg#(jcJG_XqU6^JU9Z3NRvZTVUemK+Y%J#W{Aqq`l&t(p zM^;j2ZZ_?jXv(s^9a!FT_v@d8%*LvQeDaT0^dr`i%U*c%Nllr#)@%$FDrqL`Xo;b} z)`C$`r)nW;?}}2)%FhVdG8=nc|Cx_WxgJ^XtPLaCkX0dR=zizcIHmPa{dGPGayE79 zVDkBQim+cMU+y(uuF0+UKU|2cfx)wqlXbMB9}x$t;wmXcSxTE3> zk7fSp@_OFbATg#=_iYg(rT3i<3hX#c+Ks9^3y}naf0rCuwn(ieLMYL+>M?x&rqI6~=yK zbe>FMss;@!uj=132dwy+G*M`^-Y-*5IH|Er$N0;|1vo!^693V%f z>HJ8dJ5hqHFXG+5M`*0;k9aoCC>gV^)HyY=js{a-AAz$q^XSyaqH!{6bj`o`!JR)5 zV)?7MwglDWnc7!wAu^7m2cJJ;KRTP}M~ZkXq3OKKEQW;B%TqVkU)onP#rRH$uO+A+ zfBq2TDH|ED`B^7_;wrv_cy(t9w4vFte4Ec7S>c{PXA!ws$a)dScI8|rGhg-8x;6bM zLA)h_SNQ^nf%YTqnP|J7KJzbbB9(tE=E1>HJ@xKN?v|k2_YU zC=K$b^!bk`wK>e|`jc;bm1bM$w&wba8fK{}xt?rdly}~7(|cKe8k?z93Y6uQC3me| zQ7c1A@5!kqRF2pDQ*G8j46@ddT_f8p;;z~=E5`F=K+a}XV-QVPD|nfF0mLnQjM3#< zrMEFxCBl?7KA_P13>gd2EANznC7^c&-T=VR$$|kLeXtky>-rT>|N1R{g5sC_6;Jph z*+c=xFILW?z{tgC9R4M(9qc6!a}d`+4Akg@ojiA%@*n5l6pnp|^mAxCgmg4EfgS^hMzhwqj7Gf^^49zMV!RT|*r!ZHU{=pi#Ed-`5Cf9Fct z>LHkQ_LMm|hP5u(%*~#@s(FkTO>2GJ|71bBNRs`MhdJ;M-|A3~mZ3ix2_t;4TTPebFuXQ@a#enfV z0)HeMu-rbqDO}enE4MyXRbOk0Nt(UU+JEy27$$TfBYg6!CML;H>r=g(KjXmX`DyfX zQv>DtG1gz-xlzSxGCid7Tz-4!k!$hb{3BV-e-DWQYAuvM-v9V|$q(@I`m>Y8I5yDM zH)e(sU@xA?sg5yh41=!oM?=6ZN2=DFFXu=e5+xsLv%#?CKSBUjemyogSH?_FAMul; z30S^00Jr?+Y*jyPJ?GEjT7?4g=-WmL4(NJp{)|wIFbrb-De+(bnwHuWV%XmL{wHme z&aT&A>!sp=OVxVwMb#Kb-HV+GU=UyBI|)C3&+b2Sk)-#TIhe*?e>%y_@;~zY5%+@2 z`#;w6^8HttK7URuriRi(=_#ZkdijEYfHnWP=>uD@l`^i<^B>nAH2O*D;I0Izvq#(J zpAhtKBR}a`BqkDcp(+Eenb?oFv;O=1nT0K`KP1nrVL(^jFy7Ap+!8#y{>qh`qfyzsN$O>#^?-v(Wbz&L5bx^0C#OuX0et`GbS9+d03&1$*`Ug8*zd z&R^WmxA%XY6N)N*ZT_4G=SbE%f7|oN<^7KeF!|d2q=-32A;`~l;G{G$8tNw}h`rU` z#`DK=|H*|xI<5_h>3Y(%jA=6be*LrNbX(u}^2Qh^6ta&WZ55gMGAQ~~35ce+dH(^k zVJtGu<_lnqF-gc}x+JDyITOAZZ$JOKwtGRDPkoe42n_l1@z;O;U~V2${#41^SpR+g z%k>w@I9VoTi<$Si`7!dZ2O_gp+8Uq=l)?4?clZ2J`w_c<`Ok+Uy*05$wM)?JRVWR$ zei#ga?JeE(fw=d(LN;-I{>adXYl=vcsoF*&4j}RZ+Hx&Gf|nLyu*CjT;=kkb2Y{#d ze@Vt1&|ZkL29i#>f)y5sSDHn(vP$SkNs4s_)J}x*M z{rYUNq1t#$n=|0XKm)}fN9#4_8-jlE5z+1S4@A&7m*O_r5b?DEz_Pii!-Wr6ZE6Ce zpROa0%7xnvFU+qy080XmF1BM{87*C^X1F!~fZU#cP#2~aM%k{?)4@Be)Yx$THcXfe z8NfK71`IXFX3bFVFHSc+u>ORzo$}8+g1Mz6dU{V6)-mJ?JTU*hJ|;HT#Kx0x)h-Y> z)Js{IW@nPX#vRfMHxg@N<2CDyE)X|7u>NG~5GcBLhpio%FE5I~3pTMT$z#_1?jcFP z&1^?2Rg;ZHzsn264Qqaej@Q29wvDa)X!)o7`}65kb8x()WuXxH2C?>Wv zmMaz4Ph3Ja!(L;XmhkPPq5Z9_^EdOvmv-^F;ky14zt5lJxLYM82F^8+i*|vy;j#QW z*0vDq{Fj8=U`dAoHvtz%ULTL;e|G)1l!Iq3{L76mlp9{2f3C@z%Sygi^49ZnGBJbc zt~uS(_ih<&{&+o)-%H=`uY{qjwMn(s&|~3v^!`hZ-UH)c4h-WRVEYMXdn|wNhvSnV zdrub@H@tlRdm=%TT+ZmJFX=9ZJ%2G3ch9X8v{L8F#+SrpxIkdA7pKUvU^IAa{d+ww z>Grv)z~T>a3q$Pd@+(VAP3LHMCO=}|xHNevB|`qwPT&Lk2c7#Qe(UE z{u?*O{#7r{(Mr{1W6$aO1u_-z!8(4b>VhGG;o9Fy=6}QIkIrGVQ@Z8v>%)Mdq6>-h zL={3Ot9ioTqnOvAUhw=u3V*!|-V3=WO8o+HL(Q>GHs?py-|O+r;On($ZUNk|=NE?3 zYUz+xV!fC3=+MGIL=6^_5I0=&kN279k5^uQRd`i<%}+;Jtn=2NSx!NKSGFSSMS=;9LVimvl_+9VLoTy9};;A8pc`NO{T zA!wY-lbU{kz<|xj{dg?@Y?+aTYGX?|co=txUc+GTpAyOp&Ewn zQ&Whfb0|mqoWQu--%6E|W3xIz>gBW#D6TkWXrGg!V?q=?9J(Lm!IGStSr~ua8wfE#?mgDsu9HoCLBHe zn5|jU9h`|1uVevCq{scCv*W+zgD9aO;|`f)_G~l1!Rb`Ul1T4|L-Ao@ryx1nAm7B2 zv0;YG+ZL(jbP_vlI25n6K|`P%#+&NI%9YS&kQwq;1;e2<9o-6Qpw5mTK$faEyyRw) zil2oWEfLu#f%>bbQLO+={bv5;dOm;gEDs86{zwB@aC!bnVTMq1%${w@U$5YbkW8eH zQomY&bYldXfwz9nqwd{wPyT#epiL2yv`wRxZSGtgHMSL6+03B)jIG3ze{j-%|0Vtg zMg0WD`#;04c>2drl9D$PT;#$nS=<Pb$&YUUjIa^ zWwy=$UV>JuWCe5vcYV@!H8K8+R>xE5Rl4s7B-haW?gb%DgifcFv zWott!Jq%Y}<+j`rGeH$U_ip;Nen#FA&L3PfNO_az4{)$rwo`sh;gUZh(A72W{?=P* z@MH;J0^)n!kbK?&O`tSMF^v=l;%fqpZKcqtb^RiE#g=Qks~7@y5Y!p!zif1W(aj+2~oWN!5KwJHdQObq4=<{W0NrkVu6es z73A^#*WmPeDNE2SU3%OfIy)J=1{vhbF?+U#bs#CS0*g#=_rsz1#$JfqQ?*U(?fj@K z&MoUxhE5J;IOOdttq5j@P;<;V20KK)&gGa{q@~n|9<{5dQLO+=o!xSgCLmn!s{YA3 zS|Z7@x8Y#?!gUEUL#R1s&-QqJH@-{|kMI_m;OHmP z{jIm65Hm1EJ;yEY8Jah%QG}$JMrN(8IpF<-*ibTo~jcQ@_=z~%Lk`0R^=@r|yzq|3l&2z6o^8uO2CaMC1=tkm?3-&SIb zjn(}$OPyE(dj0D)oid6H89TzkB|eb<#3j&z*K*)==x$(X9~6 z0b8NMmw?R!OxlSSETj_9(Jir!D@=%VHFEA|Few1_P-PqX36vH`GGPvcB@x0A5;}i^ zqK&N-Lm%)ys^TetiAyYeQ)$Hg@#{Z4eTok#zS9R3{2LUwo9H&)=|pRWrf)@{lDl1F z$w;C>$yHqE_KN(k^cxRgSn;?mlUN;c%97#WVx_@Eq6QFrfmcJCDdD1&y(cC0Eh) zC`9=QQ(L~KEF-0;Wjl^#Ea`$e+j7mq_|W_jS6sot?l{(cq@1FkYhnfL6%zxkNs8-Zf`mOor01c%EJVouq1LQXl{D^d&x^`26$F@GGA16BO^^ zCn$b{U-6_eq3CQy)bd>`HI)Vzuf%RYj%BR-U=3xRLW=Ya_kW!WIgu&Wb4~nq^RL`m zo}8O9tr%IKpFfbC6r0Xfj$&$ytw{?IV;M`XA~~G<4cC9KXTGta60gVLFbCkCJ^wyG zEe2$zXeGunmRvgi7yiOq-xY-p^xj|;GvA` z&g`MuSgB`%wHVF4KMo?et}A^qVlIP`q!R+)>zzBqs3S^TnxCc8Uk>d z(nITC^qo(6CE*WsI$65LhPG0q&&}WQt~2~Nma*h24lw}4!rb{9L6v&WC#UMUk)LUp zXTP^RH-DplNq#*ADMnRk+_W#v&%jtONQ(`=NTN(7S8-$glTeZ3)mZXcyVb%10w{FJ zRZM=t{wnuh+jixR1!K)HyvhBSJiQ;(m7wG*X8w$0dnkXB2&xR{{g8J}EbBD}9?l;^ zk8e?z<`jeKn&jSCppvWT`3v>f{O{$n0!hk=#D2!0CI#VUpIbY+G*gVbsC%!OsW_P_ zC_~YxZtZE3N-Ny>H{%UE(1GoOY* zd6VnU_A-}JciuIz(%qUrXLJ*zB_M!JIXfWDNg!j%Rag?m0C1T9@?5RTL|vgWYxwf~ zAPi1Sh$=)(ZO@|wyZtzpvG$>cvR-S(Q%Ze4feHYt#ZXR42CHf9ccM%-4Uw zPf+~s)2F|D_Z@zMf`5YoA5dsVBR>N=3kS_OIl{bj;~Q-Oi09%q!=3rFB10y%8AcuL z$IQQHnbLR9R1uzw+YCc8L725ZsWqheQyt%EJ19ICw;7K4XWb3CQ|I2C6EybhHr6-D z-|uV1VYDjX?c_fYBf(flQ(noplV5DBoUEg*h^q18HiOn~3lu_fbH{Lesr9&(zi$4U zHCjE1F~eJ&|KDtWT_`>6R+S6S|Ka($INM>@j3oO&nKHi7%pGcY zE^fPCbQZQig-CAh;odai6cy3% z%BPu;k%isCq73E9g-Jz12oX%|;r^pzp1ON~q1e!G$6D>;Uz&!gUtr>r$RM8PY;X*` z9RjRP982%+M9pM#RDQ;fj6k+Nl=?wINKx48;*T9?r}J6n;}kn!EFc}ewSj_Tm0803 zCCpdUZoO>llYf-=m7$4jypz~p@J|m5r3gGDKP*P;`LmJou}z@-^tdPG&Gu1;37s^- z+p`7a+9L=!M>lK-P<-mpo|*S*6$zyLv|b1Xf}m&r;ZPp{K*aR~61IQna0dT(xT7sw z?ZgYQENPnPS&T}LupAk`7HWxkUR1py))NBnv$=M@Udc50hoL@C^kDNff%c>Sf8O>Wx$v__aJ{K zZKB@y$_yn5dL-x619x#LyVAtGPivioK2_rYRpC3#;GvD;`^riWFx*lbI7P}GexvI? zw^hjt5fKx*Dfvb$g*C>$w8-p$PzHU!RHw5NU5exRT6f9NQ%7C0i5i+F3hFku zDc>$l#W9~hq+HR_PV2Tx`ZV2FkAIk5T0cI+)-04)!E_(WMdnD6kgFVq?w!EJCWc z)BkT*h!IjB%+iON{%6^;5_P4KK}OPl{nkgh{^x(L0}FZT%p>1bu(zdE*K0WIZl_Ck zAk3j>eopoqG^UM)Ri#-X@&EPf|IgTLV$oB6r>=B;yul3X|DS0-HCy9%VnSKxZjMBx zRRq~Bsq@iyO9HRJ7y}LU23d+TA7?9Gw4+*{{PEUHZH%Pzf3BK_4Xt(PU}cM(IXCH6 zd))74-Z_vHf=pg@v0uw+t9Hya{`)%w0?a8uo%gbCh zyQ8lhN-d~hd;Txj9!Cnr;*&_L>{>DU__vqa=?dg%|Aj}i<1gdsOO#M8mQrQfPhxQb ziq0)lw{qw;F6xi85%%~Jg;x*I1|XkEL5K)uoxtN%gP{;ju zo#o!}E|?pni;u+(bG}ROt0T>8iMq2Q@cm!QJ(>faGc!sAT>iaR8&fiD9R#N9&XqV$9 zHNF`%&<*#jk5ce|;qa|%>=poEE^YUwJKxDUkJq+c?my(Y9!U9d|L?q_<8VI%1V2)H zR_*f3i5>2&#wB`cv|MA-6vThgAo-^CZ6- z?xQSf_}V;gkni?tm?$gmcbdIy?r()R7Z-zqBVEupvGN?e+a6air8Ent@tI0JoOb%& zd4pilVehYe#*BwS3v+@5Ra@0fgCR3Ih{uPWq0;<)jisgr9*CsHO5oEY%`Cd}q=D(7 znQf3Xkn3twOdkI~9|kYMdr;B^FD`IXb$aIdlcJen^wQYe6AJ_}OeNJq^%dE@pJ=Ga zXXue=F#9RMVaKF111O-NdSV60W##-|^gdpxbo_wu(s-DXR@fr6gwSW$X`#VT$GuIN zTnr|EpSI=y21_kWUe#Z&9b?S>-oUgo5A+Xm_cqaXMOq=p{pP>;mA^gUXf%)WGuLM$ zU5|Qs{(oW)bK3_!Dj%z_i4cBpN7)*aLJFloM!zw40!)ajGiSn&PctO^(*`e}J&pwZ!KC5ndqFo#cVr95Vqtd6i2CMnD2&fKsP z7{oq$_SggCO=c6VO?Y-W%D1Y@;^bu>XL`k4V{tVkr0Gtv!^61MrPF@-2RO;WIVz{; z!+YJLNafgt@7Jtkd~4gYRYA58nYb;K}2eC~<(Jj4O6|XPx?nLHNbV zj1S`cdM^j{Z@d2#iQ;2G?G!{G9A>_whR~{sm;!6?qbBFaTBZOyb?}#Cx_|`)uZKBM zFw<=U=@Eti{Q=ztPhk4|N-`8qyQZz}Vl$&Y3Pv2{5&1aFm~?$xtb32A-qwY*&V?T3 zBZFhRi$opPF7i}*QW<{ABA^SFx_qNl^+y6j5M`x+wD{6~KXecX*@UFxp)LY>nYR=6USpo$+ z>_meN+v{_3^?2ghATjR|*NqNu!loirG;+qP^v>`gF(1N{ONMK9HLAHs6)o;|X+%Mb zzfXwdLlCBME_{NzO*aVWUm<+8*kkPIlTZZCkGtjcNfB;TtWYB%fEQ_}o4Fe}DNOF8 zuB2N!_*eU15A1pAUy&R2_?~f=8P1dxJJV_C4Ra!mubAX1M)9L7r+PlrHw>1t=b-*U z;7~i!M(Yp{LU{8Xnv$~zdPC*2?`+TUukYsj)7cQg_fQ#9_2baVI>Jj%=v07W=+End zI>I~72M6W>|E!{`Zy|`QP)d$bh8d3G_f#!qR0%wqcvC>@mjm#pGc#bu@wYV_R6^(N zHy_c0{#`N9{6Vs(Io<>yO#a@n)qZu9b7%!mN%H*zq>Ad|;4vigdRB9{40gObmB^om zw5ontbk^zF|2q$_wjPtE?!C0llJ;>BBAylGJ8IhI;;!-@6!5I?!`j+a3in=?uR;5C zxEsp;$Q5j_TtAcDt(TZh(S z7YtvutXl-0E}y6sNmHe)u#{d&ubCikNR9l8nX~mi)3@2YJ8$ByrJwr*Ad-Kk zg$oB@WrjTX&EDQLCD_}Bg)TmLvA~Lt`G3!5+>OpyL#df;hNT*^hBN49pcI-C1WWPa z(NNDfX{SH_KEDgacSc=(Y#HlzIiJLKWQqCaiEZrcLZu0OPsFUVX(-*3f`VL+a-mj% z*BDvUFRHw)>yGDz1KHQGXQwM4d@NiM^OCjgxCUa5Mg zY3Cy|uJRI{1-Jhq7EvlL+xG1algVxA<al2GYQ0?E ziaQpB_;d%4b!>y6ZcUYj;p@Dr^e$wVAH&TTj2wqya1n}5ciM6D8?mz zEv3NC=Bqn(DS?WtmIk>)O7uJF=VfqrJ>H^S+AtfFC~tmbx6p@ZT)r!hVPVm4Kc{uH z9q)vI|eBh%nG($s38%3vY`*|3$rb_BQ>?>&#J) zqE8{3hp+#QMZ1CDDW|_N7gh+81`rM}58R6Mr|-F%zYug>U4tDXrg~FLX`f#<`$Y^` zNlxXz58_N|&RVZ9kmm90=wOa8vfgrleB|!=Mw^77G$W&QC6xvG79}3_p6D>4v>+mG zWXowSE4oF%VQ(8bomunT;{s6B-~k}nu&1?lZ6NZ>QWj3Y zth4G|>Ifh*K~<0n$z90aD_p92AHzdJu^ev`yybp*j%hQP0$EZpv18{rSjsS~Kwq!Y z3k!~^GacYp-eTd^ELoO8j&_*P!ZsZNNzSQBuCJ&DAtQyex{c)vzu zOb?t;qg0HWOKsmjqq6_>yC5=HM8c;?bl^Vi802wlMBPk`bQ~QDZ>=r!1zov>bI^_~ z;9oF)r@6)z-0Esji3^~{BC3RC@UfLbj$MfKH_ZJ5KFfCRMQ&w+)>U_ob3S|5JTUSa z*&9Wd(qZm3!Y~t$N&OCgzRrBR${-Fd;NZV4As&DqGjDcDY{N=C3+KOq8$6cox`ZBT zpFEmqE9X*WS1Kz}upLX-)hq}(3<9l(Y{(YB#U`*7tW)vI6@a9@P8LJU*Ry#o?>rI& zPulYB8`L@%D{w`VX+u6|5VUicFb>%tlVuR^BE8l}^}~;+*7dmWV;-o63aA-zCPA+!8HQ5Kfp<_)kTgBH zew}QHX4jH0Vyt|2fTaPgtb_9I3YTFmJ%RG)Nh(OL@p(imUNu=pRF)+gmW9b99Sin* zXbR`^`>x~qNPb}p!p0W3W)-YY=OOkTS%WI^=nYhA{$}zM&cNoPxj!-c)E*cJx2IZX zrpOWWV#vfnO%vupUkp4){%C$~_AVTdmos2@2yY9VdS|>dz=J_j&7%F#VTpRZ*=u1o09n3mvYvGa0pS7zkPVMKB7NU=5gO5LMQ=Rfhkw0*2xhmy{3Zi?SpI=Mj}lYU{I8I4lC^t_*tSvGouoIz{^z zV>;L!HDpYH%Gb0}MTUB}eHELjDRn?a|4MFMFR;g+wkDgy)o=ESiv7bulU-+{IkaN) ze*)6!0KL+B>dN*9*oA7iR_PsQLgSKi{MO%1ZhpMf-qVMEL(^)5TffZQ1me#s(2cLu zf_B?WckteZ0M(B`ccdT}wEWZ$q&a#{> zo^xND{l}?GLGa>YbKj3mf>2p7M!NOJaRWx_1I4@XGAQx^mIaCCIU?oe8y+z%#-JM5 z!uHr*A!vmj12(z15yB{Jnk6^{MS*mIK3I>W2z@B;C)~I!8+|uDhHY6?qdQE`R{Om^ zSUZ})w0ln;ge#^Q`w-jH_0W`JTHP!pG$cq+OXS(7$8VPRo$yQ5%0o=X_$$Jq>Yw;r z#w5fXKLswx3|)#%JUrJ;kkPz-oH;DGu6Z8!1iqsn$D1hager*c{sm<(m@Cpjkl!Bp{L37q(6?(VULh00nfVHFNRDTE|KyN)g z3^gx#8!5GS%=E;sqJHYz##8SZ7JaaQP>&k`x6#G4HeuIusqI+vdvtg^2E_5nCWbiez)8J{&ZCt1B3*AJ36_|Db0T1rB^nsQKEOvYf4J-3KRJw51wViM@mCah(M~frM~b3K z9b0>> zD~C^k90`j3ss9$e!(!{-=D-HxH+A^e)}sF~bT<8MMfSX^{j>xD8KtguBMYunROeo} zW#m&#xZb{d=MsJ&O}yFq6^gy5s+nw;Q4>haaaId$e(1Dttvpu1XNSQ48!p==78X-E zFnZEbzGEu0BXVG0obrr?(ly0Do4{~5yjwv1Tc)d7PAx|4K+~wF+;6mv3z+@*5Cd%` zPT}zu>8H=>y252fTg+&=D+#IPjs>B?`mGN&j?$F@Z&jG^!jx8nS zxzRaHFRWhFAoP)R+8#k>m& zfge&Qc)L2Nx|>#t-29W2WBeuRm9&2|HS;Cd(!zay?Vk=;5D}zFs`Ry>_;Kr8LAmMvJB&`N% z`1-HQ-=(`d=6pV@i@*Lr3G0M^?2ABeKI_7^nG1ID759rH1Z!gNglnc|QKD$q4N&=X z7J-JePs~OzM%#O4f=%t}pz!4f;N0hDVnd$2ExI_LCAW59hjCr_`zC1plAP&YadE$S zlszQM+@lv^qr^U*8GO#rs>{uKG<_L?MkbrlJ8+2)f-KJ^TjoAD*2)H*_>HaOZ%)K+ z+~;As%DcQbmI-}G~R7Ck~iaBy1BQ@ zSHcyL=Om9E%+)3QSr-5$_a@jg0L4`P*6uDXCH~<=Pf4Qkn|+y>*n#yW4yb2NnFnUOG_b;~a>09r1V?SC9~?qy%tMFkF%8hF917 zxeuOAW1FrgmS~5LF4h!tJ-8~5kUz>+5H0%r38_c zw>ifHHy6)Cr)v#1tU+J}bBCtIl&p{Y13h)C7QY1niN@X^JF+(PbfJYizI-uP!SLlT zEARay7kX-q$Z|%?aB0Oo0SR#3PPYe#y9&f7HLj z=TcR@$sO)ODi`nOC!`>Ox9c2B+4f5$QUM!W+!wkkpPH&{b>0@fg`*wSo7dywgb>2c(3EOrG7`L`$Lpx$s>?X%MZ*d#CUCYg%GAQYeUc3NZ zt2M)lrjUwjb05DpcIzYx#qR$SJkVDPb@r<)YOjtdkX7VQKW<@hunw(9V>w0%7*^Vl zEAOUk*9)j72M~&xKaz_m;nI8SxPIq*@>pd``R5MNPBBKd*MV2=hn)or?=%$ z@Vqyx#!WMyUC0>!lL?bTo*Xor5^n6rDMh-9H23r(Ba`j`;YY0JYYs*Y`uf_3c%VvG zO#g+Amm%!mr#9Xt#``b9vBdQ>_IxdYLj(K{;a@;Qwn68TvdE)fvHiQgI5TTBBZDYf ziEX{#$P%Ru+yQ;3AJu*39lI*-uzcp8J`c$GrtImHhb~`VZ;7;b%^`N{_E*nRz8i5> zTQbY3{YE(%H-9ym*B`$xen)(laX@Kt;Ltslyv{Sz`@A7YmE$Xp^FzWPp5h=KHQ?Ki z94it#ImdN)*()C3%A83n`MIG)ySKwbbk`d}bdvP)JkbY?#QoYikh-3M^=P|1Lf@}H zaqp+r-p=lyW3NfRe8KyjyXE6dBMNb)L-A&Z-!bq5AnaQvhWY96TF*+V#6e+ycShkw z2r(7iZ!a;Y@6oKd6>@9$s=p}Hn5HbBbyI`pgk1)fzhWQP05}~{oe$>kBY67p1X`29 zX&!aSzKFQINh7&JxU^_G0q@{!TWWpXJGANqG(X2>rt@ADi-{U>8bXlow-TxPuhS|L z0)xu(bIG9U||6M@mvsE7C=qGay-6F3e{(w?@P3NObAWAz0#>KqPz2=Q6F9 zDZKmec)WI}Quj&XA8P|Fo|isXzbhYQY41@gNjDzIC*&;{ajT8I)&|4Hk!!%G74SVZ zGHi|ho@35%b$%y7$Q>^yn&{ju194w9v#UVJY|vk!r$V%aH!_aEV3820d~d0bp+G&5 zD&DcJ0X1TIB_c?}0UxG1ay6Ya7%H8gg z^M_6giPrR8JnV6_>O%M*l-OMQ>IqoIt?*T}U;t-y!i8}sX#(?eL<#fy?KGq3GJ_vEzb@P??f)bpOt#aae`N2t}aNJhT$wgK-@C???d|K%*?8F$ML%sK1sacFEQ^*_55F0HF^b#J zT!0tgTmx7N0=lHN$E3ivE_W)kIwCp3@M`CDUd*k?G{GurG5uV0G~*x#I9tGP*4~AJ zrQ>eQ*IZq{4RbK3IHpDtUUt+1ik-ulZS!6X0-B zJv}!oYmY6pmXq#?;j7$Iuw{9HIh#P2dkwLuSe4M#2&n@c9kd3+Rlf6v|HX>*o(n%! z+4bDVadfYzwIMRQffkWv-hp^`IRC+|#KvEFCrIq3HC3?a+o5+Mh*O>b)3_kK%5a=q zAwfsI58imEJ233-;5oplCEgFJ2jEbTa#p zlGDnGsd4&IgExZ^()=~q0s(o?#rEx5D5vH8%q^O%|M~&3f2j@0+1++pB*`vEs`c=a;l zo0HAHtmnTQ%Fj?s%7`qK&Op0Hl0%{-Kq8m-_O`31^(qdR`4_Ct)D;xB5tP&3gkmdm ze`Ty@B-A$<4%OGF^c>|yVbm^iCr;A6N_7e60o}hbXhBGD%3VrB6Rr(ai}Y`QET1mn zQi!n>zOWkid+OZeqR%=b5@dQ>%vA;Mrfit3MW@K0laskj6Tg?f{G@#P^rtca(SHva z;bPgh*v$KSsUWWL1FJi{$c!&yYD36QMJuk{SMxE=J<=_z?`z>JI;*l1LwT5r<1tf!2AqnI8J;on5zfd|^oDWGc(i;^P~f z_$wd4XRz(s_w8gVPS0J0wj-cDcVtIVcev9af=HX5sB!eSFE!VvtGhbztu#K|IJj2n zQ8CzDwwi6~XgKeJK=ukjCcByvSa+e7q?4Bz8((wt6VdjW**W@nO4QFn_l&mWwIO|O zMczPuY|~?7_#h_{bPDukVK9(O z;*lWPzK_~aok8~A@`Jt>>{6>&TK~F_|9CoIo*{)PY0Vi-t_rqe|N|8aO{z_ z8ALvHM;xj6b4tr~veev_{fK;Sx?{zeXO<&fZ$7Y+q5$=gx4%dQ5 zE$0zL{y?%ZYKM6f%el9tWxaERbkC}-@LrtQhMs|YVLANp&HjAvciaS`eJKo9sieKS z`!qParH{=ft_OoL$*cv&1X9tAQV;4>P*Re3bQ#03uPNL{mP6w=e^LtLB;>2$+O6Ji zxfm8A;UD0-2^%|=673gz%Cda2;<=KJfQDH0ID+#_*Gm_D%R9k@wI4B6Z>23a?<0kW zZuEwcQ4-Ghb&Edv^81%~U#Rw&SYrWOj|(}cn`AK^=$P;v0OmWD)7*ka5`<@3fgb|v zZ7AA7f*b^>>=UXL)MA57V{Gu+7x+kgBkKL>QQJvS`U@4vrVfNP7L8+J%kYdCGD&U0 zEPkO@W+{~@6{9J?D!tzQTu9Ax&+iZS_0GGPg#L4`f|3ng`axT>fo;+eM_V&}eOsgV zoXNd(saz5*;oL@Vvh`Fl{51om5hP-EX|B2FleqsJ;hpUB#9W!Yr@ zEprxhId%99D5r>c8EXbk=MI~aW zFLFAsn(WVwH_#gS#&^{teH-YdLu$$Rj^|M2YVD@-_cWQ{1t8@f2%kbqhQGZ49OF`Y z0kupOM@5B(**|lZj?XGfq0=zAjg`CqRLj6xt zYC0Z)w~&sWA6qXCTA4anFv5?Q1Dot~y)5Z@r?T54-q@?wXYMw1C*nTENmp#Lt*&9r zcKQ(|C-*+^mX8%=>Bdg*Uq7qZ3r`B~6IBSzV-joC8my*kL~AD#{NkG0-Ix0XL-v(u z&ceiN7f3Y=!j%7fXgZMI{oCMhl}DIS@Vmj8#p+;>yJc28G(XJ}(7>!KF)`@f<31i? z%#-7XpeuvuceEqYdUkzjiw@aPb2H4Y$`6+UHW2)kta-dD;J!Y-k0vt&L_=qG>BFrji!xgZ zp((PCyuImVAOPu$p+OG9i@ZEp((Q*>Dd(;BNx-zDdUl2M&ysSv^qC59oC5Xd#Or6E zkq!hoS8ry&#QhFTO~!RxZptC}j)Nu1b+n9)ktCL*tehD0Y;8K3p?G2K{ZT(oY!yJp z>F3T{^1#K5T6S84(BKecm;*^+FkN0Tmy6lcl!N!y$D(nk4WL@8$&y4n=k7Gcy(hwP z8?H9x>uue)beR`va{~Cw9hq~%(>qC-{UR|EYk5T~?1A3jS9>7i!fBI{&U$rfDeNeZ61+ zG*6Jb7VN}kH_})SkrMhqkyr2>Ugw&`T;`-y&w>Mt!H2ifG5VQ_BrykUpB;eMq#= zm~EcNF_FFeW)*jZn`t49FX=s<_o?XU>wR?${_&4xJR&{5=d&I_WU6p~v0NQZi5K7w zxv~yWO$miOPH52sOP)5b0Jl*e_D(G~0NZ0#87#{^NNM0WCYVN8gIf~xTwQ#Y{wBpG zZOHdIsnmJ;a*`KYMgw+%ReM_0+`%qDmHg-!P|>IRLtocXPBDK|>$9@%klzba49Be8 zMs7X9J4^o~YuaN{6=$|08<}pPCt$l)`i>`ZMv^|V!%-8liX7j1DXVtAil>c^XRw5GfzGq;#Y3yUH(KrYsr zDkask+4^xcOoaJWbK8z&%wo6YO2hONF#ZDS`DF^PyMY!HDU20VTB*&Ty2QwAQdqc( z3@(ZMdJ77k)9}+j-Hn6XtlZZC_!Gxhrfv02OlW6kNN+|o!}k@Q9sNKLHdo1Sd~RmC z0C+#++#wb-LyM{Wbhc@=ZBaHjR`|xZrqX*)LuW`^9HMFYSCNb?>PJ%7Z@GudSnotW z@!{d3g-53fz&%H4%CkgLQJ{`GVD7_lB^e9W^MyA!uL4_;CTa_ooe>!>zM=IXGorE2 zx;FdtQjX6Oc#43@p7YHXocN4a`&~Ofc+7ubZ3Aa~zAAiXE&PrUmbH!+NbhYx=cjB8 z=gYmIRY|XI;oGKEG(gN@`RwkE8<~E7Z=CuyncHRSUXn*TG%s9wbbBqV9MXY2qXW+q)4G zP0o*cFKGsg2UH7P<|J*%r$-(v5r)V?59BQJ9Duk5!+rp%@YCLOm+?hsYb>%#($yv5 ziFGDQod z(jUGEXdzmfPiGBf)3{#q@8kf|90-#yWAi-QB* z98qy7j7`HBWV0lcH^$P^82+_-Rz)|INoI)aG+lwW$&-lYsfRLv6}^e zQJrLTulX}vNaL*z6aW`dBzbvz(s#iRL0-$)J+vf`zv{TNZ0-bNo7ev;@e2*qnEM!C z*DXtaFigjb`7V`g3XIo&_j~4BEwf2HY>;%zh-@e}`x%L=1+rkS=%WlB{S%oaPFgUg zB2Dl`&%vW6Iy=zU4+-*`A`e=l?h4jtomt8704sOn+Ukc6GkOg=5NVBp2*h}723LvKcxz^8 zC~l(-+|-L%OyA1PasyN&>qnzwY;&Ql^22SNpI0jxWd*1_|DmdD1I&DrcGe{|SFw(f z)Gwx9GbU7G%{slJww0XRjWxCMf&XSBfdq#j&b*6f-4z9k}fU)XDhyPfnyRcgI<4a0rAu@-|w#BTHtN}+FLZBz17Xd<4o zGKM_k3ag&MGB@8$;uVgT>MbS0`5-WWO8U4X{}A09T~+}knPdU!;k#orb45^4TGMmu z3Y(TXy$vL~*$r6geZRpUa&}J+3v^wGH@TAEwqj+vix4KI=I=pGr6T%@y7$kF>Dk*4 zq8YC!siC+aW?5A?Fy8z}ScB_#9!pbmKua5vab%liU&6Fo69m)VJr%U?wGr zOLYn;m^K--$IeG;UHQN%JCViC6;U!^(1|g7G5hw{WqGNsPUtb5&FrAyq%;*p%w7@Q zV9jmJskaEge5N_&zVtX`+ZmCuHSSvmOYwx+&5`bQ`ftP=}uKSz*Ln|pJAe7 zGxNc$etb16q6~Xb=KQw`LPVw+z2fS{V~CCYsvE$yNh!5`%&lujy9slk*#RJ6ah6%O z%I~e>>@^XtTf9#;7hRMmpE#$OCF9G z3x+n?HR>)+`L4g_X6H$MK7l2RACZX(HrksU-e^pz$_kVc!h%eQCam04ve?X}Byzr*%#V8c(}K_;fq(nU2XC1s`^v&%?FR3@D7?w^ql ztCF5lNq*4Yii=Gqp|e4H6TEx&5U+3XYqpM*@w`#XvZ3e|u5>Oq0rS4j%^BNmP9ZvdTgiXygmxW*jO$ryf8GE6aDI&1bIO zgThU~?l$9;?yrQrtp-1w4OFkw>V=N$VY6t@Ok1)O-TmCY!u1|E1SHzPvrOj-p&+xz ztrV;U?BlUAs-X_8n|>uLxl~zSEUEUaQ>n*9bS$9i0CTptOCSnHIzgq(n8uwpuHrEJ zCBr=E_ZY^fOaRUMLNw|(T^-CEWtr>>8fcKd*Y=@%M8+SKq|ey;x(tuU|E498jH9m_ zef>Bh2Q|Ju2%K;t%oXpHMEhs&xOU)+j7dOX5lJAIHc*VCh*QAR)CXRnG*Bl_geb%G zII(W-ze67jV01IKC0>`dprokjP>#I4wS5O3cL$#6B#Tc(vxq%SVWMgFrK{g=jV@qkPv!z^`0 zBE$yqIW5?>g_2}9sTB+NWVip7{I0Z4OSH`6A11P)p=0$RQCWkaAub6B#>>BReM>Ah z%H<{kI=;#H(G~1?2t71K|GDg{l#l$$`*HWJ#Gf<6C}XvPo<*@#G$TIxae7-m%mGu!_FgQvs#(XFG(z^yEKZt^AIJ{(s$TVm}OoN;ZuU3 z+c+=*xDYz8At$A+JmG9!XEq@4X2L3^c6^$PxBJnEUmalkR}cov)G;{eHs!#J=gz`=wE)wX6IH-i)G# z39hre`w_$%$3`FS7iVKlUeZRk150dUJhA}yNZc;x&r#XyUw-_PbTZD?9LEsGp3wxC zke`PFs0bFHcstD4Fa}4G*&w776U)DOR0a?I{-*Qf)*m~V+txN}O)jx|*U0Ynwk46; zmHA8wCAlT__`p8V1n%jD&P&;-ByJu&f#FZH%MRCO;D>!Uxbz6-;#b(jEJ8`+V$a&Dbs!HlVbHa_21P1 z%Az(Iwwq!z@C3^P3ji1xg@!TPiu)%F#TEdW1L}anNgmIe;F{~?(qu}iaA%d`M%8@-H{#ysZMc@xv=k--1%&tdS)&x}WE)7> zgBvl5!n2fj%3cLcX||T>GaP^*fO;VL-#=Y-DJkP|M0N}B)c%rPX=Uk8TczymNZob4 z$?gk7lKSpbRCA;>T3CPNfCwvCmE=z`Cu{YKhU099&*;7;uyi?oCO>fJ5tj5w$LE13 z%O#~jb@#G2AeGRDs)3nS_&cWGq^*@UBdp*yu%dYJju~TO<_`Rg4L@8`)*cZYi2U?Y zzCxSCljikX$Q5AfyUJ-2v_rzL{vUD7m%aLhjLr1&(&jt`sEV+S$0?!)S05lpaV z)D3^vUoZBW$UtkcyLf=mXBk7%hC`3U3E>HFHr@waj3RX5=+&S$10B3P6%tYY5hK$W z=_$)}Jd0BNf=GH9 zfQ+D{g`mjseN>x##SbXG+48V7D~9JQZ@#*^FO4dGOBqcidf&>w3gGN_;9lsz3O@3W zRZ@OgdI^0v#!F||k`nwI$JpKS?5HnBU6gl6A^tmTNikOi#NnLurFME%Jw>46J`_&& zE1S7dhS=}6RISclf{QShgvJOcMhv~F1UV0|@m+l=oDuDIrWKTN9h&3*DJR$!ZeQwY z0Yp3Gsr5nEoY~c3so@{Q|GPT2!-C~(+{8)3S{RN;ffFJ;lxaEq$_3Bc7iKs52od$N z045s5iaOPOXZxG`F?Yslom3@%Ao4OuE1;LO4lD&$3dR!`+-bn(%g%kF<;ZXYooqI* z0~^z`{4H!Oy2dnQ#iqFbm?PMtyo6r88rcoAHK8o3yTG!w2zj^Jg%Gk@0JkAFM<{tU z^E9~i@M6OsW``^vomi;pe)1CalRHJ^M+`(K@81rRPW6A3DA6mL z3T&x<{VB^0Rp=1t^jr%wtBRUBvXQ04<}XPle_lX?$iD-e0PDb$90JB=RjF0}?Qyfd z8BzKRKE4z{nmOLo2%N&3*xuXZ+@TzOdC!`cwtqJmI&rKQ8#{4E!;JS?u@GpPk}>%i zUnvk>b4c=RN6`oVbSKrk{*kiHXA2KyeQpcP!SBqeZPM94i#CpG%B=4S==Z6$?0nPx z_ixcPVJWWFN7ZL<$!BxO!Kl#kio!^+c>HOj@dVd=$X`sd)nHRJ;$uyc|9=DR65r&- zce z3&fiD0jDRin}daPjqZT^i$EsV>gR zHGI+5fw~ZBkI! z&~-)1R-(O$rrbuwmZ_k3>3!kjC7xXP7Do)^hk``I&==sE5>8wxpkwn{!)@qTgu7zr z1-bOjtbrz}3mJ|WsH^Z}5GkulqOQ2bApSp;vis|+OkE z7VCC{cDqc%1i?9aEZlTq!SrxVZ!h0zcehI&Yir%T@oCOQ;ef{nMV@|WyWHMD9_bA) zH43@w@n`GE?6xh)la8qzL99Om3rg4>3~xi@ZXO*DV_p{bm`?LMFe2k4E6iO_b&t7l zD)IUpLL*Oo@Dd>7AytTL^!YN$8VzYpjs-fj?@yM-rX7FE4Bvg}1T9qp5(8W^L^F2Y z23Tl7c(A6N4ud{kw&TtSU5c|Y@R_aFHaD)e-I4LGwvT3kE$C^f_ z_SJ{gdIsQquDI7a9h-E~MNgDHu#x#@Gl8&ma(m!SEawj@qj_lk-N*}DX@JV|(siY~ z4jzh31ZdP8y zW$SBTjK&)2N<@+GoAX@O;LiN|Fj%71aRYk>>RCz-6v3o+}uo#6}a1|FN% zq21U)%$C;aM=12MF)}vBWbHnWzNHfxj-Xvg;7(?D?!9<8vzop7)z18ONWmBa3Z-^# zn$J7Pe}oY;U(NV@7c7K~(gK$%cE(1hE8n`8s83#{{j3UA>ul++*g{<#>Ezp(+{8vqrZ}8Z~ESm z2%lyOBZAcAPsIw-aOym=lw6a$?neVoW052ojHe|@cq33_cO&Gn*FypUTHTk+O11- zQQ5mTJ8c?5t~!LeN4P$&)UlA&hF#}2RK8ut#?xaobO}*7K&qE|GblON zE{?p%i{Zt!g4d&?WyyzLwhn(itSqCRe(yf+3CZ6YLVNWj70m!qC6*AC^~V-F7!*{l zarANED-*@2K_q;p@3GHye^?=~XV`le3mOHayZ0TplL0 zYQ^{CU!9o&heV=kAoaIzf~^PCzoHx-$Wsj|oguvLo=gSAbA-9vlkYgj0ajh|y33J0 zrQ>{Tg}twsBC(TsBT0J?bKBK?LLI|Ui^7Z{|gamm7Zf{q(DFaR~3;{*dd(^>@+Nl(}$`Hz8kgfpAGJ zXhtzD`%=MKw>YaC6cYzFQ&RP@E!KfVHnih+H=-HO-pxE1$d%nT*GV)#*S<%5bl;o> zjg0lP>OD~RM|0s9g~r!ygd|-sD2vKPe$8i{u^p-j@VBVAS=W2>`llF}aN?5F@r9TV zHMHifdqgOVyog@W6>xi$D9&?JAYOexuetS=uC_}TN`=CG*=5bXBxx@`CfwSED;KoADL2+BLDqeN&V@`z*e z>as?nB%}~xGBPUHVwPsFr0D%`@FTe!jl<8`_@6HfvTz=zm<2*Q5SUHa&K!_OhWMO$ z`URCJTqVfOI%BBD5wBUSbNk!K{q!}cI7j)b^L$_|u42%wsBDMETd?$t=Gp{r(W0K{ zaM)uwD`I6a?@_QRtyr|w{K@8^@c3Y0(>s7JfTbO*pb$HW&#BIVc zQR|voaeQT$cUo#*c(BO|4_lRT!76#cQK{p$QSqq zzE@~r?LI>P#yZ(R32K{_+mfZ;SSB(?;_vcZE5b}oG+CL(BXVB3$H5JfpFnvIQTCzS zjio<@vb<4!nI=X*+o)F zi-lReUNIZng5KP@$oa~Ao3qNH1Yn0V5{2?`wouG}?ALZRz{Axk2{WiNDm8g5%#6k` zZe>@k z*pxeo;Bt?8(ydR?Zx;M=#KSy3i8+*(7JtFO`Xx_3=znG2{C33f{tJe_g!d|uEe!-q zo|NI+7nspRy)d6|7=8mPPer`Le%KKPuLzMJTrte_TM>}Y?5K=T^V%wns0oX36_;h< zU3}bVpGXy=CsqGE^eobw@>;X~hk|NbFI=!0!T>&HYTG${WF}ntaw4wV5$6!+Pr-dc zzOec=nqS3KL4tM4t0?W}AIz(qmSs=eZl~q5IC$q!n41NKL8u&}xy&9^HNiX}LGdzv z&Gj#(Hx(wQj>yL3dxnJj0FXs5(+obheYQZ%VxNWNgHI-kHGP?KwJDJ&B;&A6Go7?q)| zs%+?2c$WyV%ToU9{Ldp%;BVNdjx=*+4}S+tuPR)BcC(|3?QYVxsPw?F(&Kn8Rbo-; zzI$Eo?zFr-Lm}j63agr8Rj8gMN|d`FXt_KLPW#A|<@&eDU5%%Hj4ugoGN{hA@}R6tUEOynsvXQXWGCF@@ zW@SPkzWY1keR+o^_OT8yZb){ilYm}uW!Zu_;~Of-G8ZxP=tva#1rj;&JAa&9;)$4n zYO{O`Fj}gh^>y`T!q49F)m~uyNcG-4`3K<|7Z8Ny6m^Bs~efZfTC2X=%EWBg*Gbia*f{r_^z!%{v zcj)miE7=h>DwiQ3bZA|ddAWHTfK|y#t!gDM*Zm;1tvTTf9QZlYCH6hWIRDn`_sgMj zb)v#&Jx=B>H$p>A%rednk%h25mfv1Xm`A-PE`YB=UuYars!0?q>0_}J_8D)ZH*$S0 z=zF{dU7`|nTauViM~o6`@%jC!YQHD5CYq5bz5H7#?*#2z>-8xwhLO6|FB0r@Tt&6d z)cPj|I3gkX$?3)X-RQ?NYCRkJ0>XY&GKyhr>gmLY*+}-LhqAw2+klT>o)pmyzEIEW z1?abXW}_vcysl8DE?+Fcl@Fu35gvu2gTt2D?b9AVICPG3a`$R{{MjKK(QkdU`}wpx z{u`J`Z5jA&KrvC#m*$aZI$ZQ;DYZ@IYprDTMDGJzqXho1;-RQ?8HTSl9H&(qcc3Ns z_KWxK{evQ#Wi%0`ddH1l|M^k?JZ8))V_QSL<7b;Z|1i@Xx29S4Tg)4mblqsol*`RcwEtRrdEQWHOg;$+|qQ@{9O8Ec#fwIx45w%jQ2| zdB0LZr~@l&-pUuY+GLdD6KsM_yV5QaY#&yZO6L zr4wqrLYrguB0RH{G;1jn$lQp@&#>$-E)=zRmmf+o&RV#YxaU6W@8HZ7`73aQOr)Q) z_s#JHuc@Hx>m&Nk!26(|HAH8dy1rD5VL`UtaDmjBuT^oSU0&aREfS6xw4q6wuY7vT zQQgBj-i&c5(pO3I_39`%75$rz_HI18k?Y8{W`e8kcp=ohu-3vX3e>M_zGm)=Va#1# zcEqYTIy)cZgc>pWKsO~sz6!REmpTc~&67lck0rlB1Q0(Xq*NstT{!8Y6PNr+m zUm@qF+LrO7*G5l8;xr$&vueqz z_wQ>jQS~@K1}7Wee$~OkbvFwxU0ri2_9({GYAW+xDXPYQ@oy-ieQ^pEM>PvfdY@`! zaUB;yy55SkQfgr8r0R)&bM-bwHksT9JIw>z38yH>_{VtmYh_7;BiAiX3FQyuQW(Qd z^gmv{z96O}myU?g$(kMQ;UkyYQY5hY4;SwzPS81H{&UYFtn&=b9R=hWbpqVt^auQU z<$dW(EuM<%hazV*E%d)=i}ZHKpYY`KEuULT5+kD>hKZ!r(&O^w+OBeS&yC_7t~`OU z^H9{Z9hTJiJ={xWu!fPTk$N6dlu(H+e%>vb9a^GrnNZr^y8BtLfoZ$crN(UE@u|?j zk060D!p8$IWvI&P-=CGJWG|k_DCSK4Y>#X9}ZfK>4!vW>OFJ^kBEIe zERi^1ZLk0zEWD|_6|!!kI6iJAjQM|glr+2CH4SFR#zI*w$FntEXocUE@--kXmdY;^jfCUHI;*I^o%;No zd0@(fEH_4s6A}N$^mdSRSGFSCkg0e2{;F_K{#D~>_80iP!or(>>{g!gpPA4x7MDP9 zB?5ueD?KH~zNJ z8hbA{K3Xnc^<7~3FXD>ODJoiZ^$JDKs<3QyIqfdvpMnhZwzd#$kY{AqpPFb6^|x`s)=6yqd-LQD{S|2sQb^7}d(_r)8>-gEG4_-J5<-vQm<;hko3}Fc;Tr zyxLzoYTzNx76!-SUUnelt*kE)8>mgygfgu|((-m9zQa&8+TIuj+ zKsyG%su_|d^f_kH5wXx4@vzK+@~4UODdKz?{<;><<~&H?NK}}Z zY1R|V`=(?Lz@+ulQ2Rdibnf$eTJ5P7N@i{JDu9w4i1K&Gxx%~WiQgEBMH_jxbCG

^a1?$p_k5kgV=k-f&H24YXi!_QuD&+;s&PG?MigJ@LepUhi ze6zWRL5RL}glTjzS;b(wr4CI~cAB)aru3PDyw4V*@@>ThhG_0USa0>K_IGDqNnumV zxeIY98(jVvv}xazkmw&1J9vj~1xvrgR?d&Y{!TG^?<{&jPus>icyvz*q~4K3#~eCv zIlDRPqriXae!|EXCr{d#uC10(ZcgwTvwf-5y5C*dmlWzDLsw%&EFmerZ2qEps~Y*- z{3rEE-}W>ic(VUANqKT2j^C?FX2uVuBF^`hbjzps6W$d0cxRquE!gALA?$J4_#j^* zUCCODEW=&j_8^zrLVXoN&tn99VMb)_zE3isU%e&+#S<7-$(M7_?+64vf#N=0Mnd&C zMyBo!a|~jJS$jCUTD*6aadS2V;k^-dEdV9>p9_=ozl07p=+%5v7Mf%>7r;}ap%^+} z;b|n`UhE6PHL(vVA)VOE4W`BBc7F8uz&j1782|u8IZI63Ju*}iS1dOF5Zo#L_gHw` zrIvo{yMbX!{jtG8x#NK!1li|E>0C9BmZYK17$pfOsjq|x8Ftr4D>w# z_!IY^e!1ZVjBQ_cBLNZo)Q!q+z+Cp|@F zKx~M6O)i^Uzly%&GQW7>dEc^z59m_NOM$oFjjfT`=ZTl4Q&6T#Qje>^qDi~6NgvZ; z5yAQPS|RcErAn0d$4jjO+V@S?d5R9=jSqNXz2Fr3$vdZy=Svz<27E&wd+l7qS~Ue{ zu}fN)fYmNTM=h>fz=^}8xI0OwFi?UT$6lm!KCdxFC%x`pU4*vB)Z}a7z9#cYP0)?c zbFA!46WsA)3(*t&D5kHw_*%bfx32E}v+r;G_ZXA+*<&+sZv1@mY0?P8OWcV@q5C{o z;_&HoVi!#}-05n<0#w!v_9*WT)=|H+`u2|#fH3bn_H!a6M%fPn_u@#xD@aFWI^nsY zVE>Y>M)0SmDIW4rE&XKt3xr~BLP1j%pH*0w=pA9?kAT@W@yU05Z+ zmFzK zR83KJC7n!ujPnn@Dp2W%MIz&P7b?;i7>__!MbdRpktNXicuA1`hZmi*M9 zfSeYe0)i*l;1jQUdyfY!2PWPNCK)@#0i9OD4JVAAyL^}Y@T~8a{F~u%2>27o(mz=; zhL~ox8$NS!GZ?3E1UVM%0Zw71KIw=h&L}1dV%;Ez56GEhzVRsU`Lhf)3+je%0}-wS zG^-LKHuIg_%{PyLV8p*0jis(b<#p>C(BLEf1$_>qwWZJ1TjnA_TK?V@oI2kjD5PL{ zJ8-GnqH7(I(7TfNcN~T|hJh!Lytbd+nCPlAt}?{%jvh^*d`oertB_^A!LjSDc9iKO z1V^v*-Kei)3c%eJBcC=CnLm5vkGMH6GeT<*>Kk}QO{(%%C<83rdp`#N8&5PexjfWV zRUzV-vnz7GmRpNl+2u-TD|JJ`bad{(NQd^)NfNzF$~XGU74M z^~Zx%ib%TH;=7R4aX;*_SDo3Z|KcY1UN#b@LNpWZbnXrZ(isdV85V}~@LI)dK{%S_ z>!Xd1Na^2C0~p&AFIK&lS1Zx5I7zLqf~!@7!dS`%(%+rgTZvcnoQna&U+2b_y+ISN zS2g&GYZYEk?=6dJ#9csuu6~gnsW~5RwC34OALnV9C^tX)_EPR9dq%(kv5Ut`z zH`%Yg&j*eQ4N^FYYl}byMhiP}jCP{z43ags7b->r7KYSuo`H#t^P?hn%tDLnCNbIF zyP0_2Zib0>s4CPia$ygQ;d#=X){*PEy|g9`)*J(Za;&i!-j2L6+ zX8&TP)=ho^-F#I>Q{jKP^8CQ)B;M!#8cr17Tm_P&(v0E$FN&1J#~F`it;g+joH3}S zD{@(k%1hRo=7aB5yb2}U>xbQ>-?V?KePE1J2CM!28agHKe($HRjP<6lWxEyOBke&wovOcpG)A|34Q1x4FlvNp^PX%sXWRYQekU!Ws(g@6tSF z`Tv-4R?@6nKQS5WqN_H ztgtVQnzBpgAW4BC1H{orGkx1gv0)D(Raq;9Gh-=_^|O3 zB@A!BIFspDJOfi^R;G zq?+S&_#>-+G-`o)$Q|elAja%Yh!pVQr3zV2usy5ne4>P)xL4vOr+O2MzWit<_B%C| zdNA0w1iXYrnAiTm?l))-%#z|CqkeMjOs%q@+VP7w!wUQ+R4-pe9keDFaN^`;n27$; z^ivwwZz&*VeO*jZC+SvnU&ZMUO9%ZFk{6%nN#2q43Qdd24b!O~CErlxT3^O;sdBz9G!N%z5UHHh$7Z)?o9ahD-8lkX%Z4<5)lK>5Gh#QU$ZK^J zz>|ci(i#7(=6h~iY@;ncx;@cB4lC2EZc?PEv!LSbo0AQ>hhK>yhJw_~0Q#TVk|@Ly z&AYod<>!;glY7{uVpOGje>I_XM`IA6bo$mN(*E+ApqN@8Tw|1@l9P8()jIWZPy)-TjP|{pl-eiQSkB7ME zQl^yFDS0bkMI1x8?*~_+>2ESHHmzXaIlE7(BTBp0fvBw-0iR;hUf-HMGb^S_HBkl> zQ*xeWzsGT@-Oi^{5Z5rui)L=ayEL~YmmV>FYz}5x{aan?fR}ID-Q_mN&&dCXr}sfn z;N8FSd$n1aIS$Le^}#jFgf6z2TSbcI9r+sPK+A074z6WsrO1<9j?ETDYZaHe?R@?s zo89)J^2hCPHe>YZ?UuvJ`pkv5gW^I4eV&GZphoz$yB*88#IsXa3?S-iFH&;zSNPGt z$Z)~iHKnT$%bT}Yfh?=!AC8gIR_~UVUt-A^Ah^B^v(Y>7$Bw4X%5^!Z`n19E35!m_ z_PZ>)A%wCpC#uUa2Rq;Duftkft{8jqH=+~#?qN?FRX`TN7x{*?RLztim5Zok6hwR6pgTaRE5fd!g)5k zpeZw5)0Mt6JzWT5pQUYlOfanNAWwW zABpOW;d#1ie2C?QTJ1 z-?-eCz)Ou07q1m%4fy-VtkC(8vO)d6-}Ig*n5kdF?P`F3#z>BD3;j=wN4A4yS0z3i zzYR{oypBurcpW5j2Pl!Mb1qtu*b?uHM<#`wbpFn57@>9VOL2!uVY$$UP90f2PcW++ zKWWl7E%a0alNa96w<>?ZJN^Pg(+rxisa_c2^0IkG-}J)0>rH>Da3>&>+r3450~dnLdPVh-+%mh9WS^;e{JanYrr6q8D^VpI|G6$^p%HU zrk88%@N?c^u;QlP7`Rc=sG*-Taw)`gRhdlq!?3 zQCiesnELUX&2H5`Cgo=F$2WfX`xZl{v97?VI8EkijsTD?K7QY9ZsT7^fQ~9`~Fj+$JL8 z!_Wxtc+2LY3g~f0jlztRlgdNvWPT*z*c399`)Dzq0sDs^4oBydeVZLPQ2=fugnDTT z`z_-4k*NM)w)vRwQ1S0$Uc*PasIB`?TErMdHgmy0cC!)w3N|Fpkc_t@Vl z!_7&FvS$&lThi!SKag?wt|%yBTRZjFr5b^Qn())(ptZ$@%+qmo235>v`7Y7U=}UmL z9<|-e5kI)<*2n5Tm+pcNgoFyH1z=rR2BB6madi{b!|D)PYUuKZ}BqFI<&1+%2(yt^pCE76s%inIG+! zY8|f-#sJPAA=2J~i8y}q_yN(ZSoBS&{2P71=INpBmr^=tMz2^c121^Lh!z05)i5nS zih{UVR*euB!HBQ2`FjPA{E?Jxd4*_4xeu4g&QvJ-{BJk8)H!q`_^Db+o?a1a+yBWt zb~)W4JF@&{iH{|Aot2BrpIv7x&56LhQMD)zk|!3IFp4=-jpnYKp@6O{5vc2= zY)XG=V^_iY<+*%2;28uL-D0L?coKB{l~NUH#?|n$-{0F1=oIDqnYEL<*!R3Ev{)HH zwobPF5&lQpc<|#{a#e6Q(s~v0nM(~IFW7Ugg+YhHJBOO@=tH&W{GSzkb$o~VWtLJp~Y}=`W z7Pl!4H?V$x-Er&Gg?XmTUmO%61FpeCT$o35$FdE`Is$@0H^=GU7`lQ4^YOk#HqT>F z784GQ5k)Qnt|(E0zhBV-Qn-SlCo-S4)vqWA9-))`hA%eblKu6&wQs^YY`9_!7=3eK z(K-Gi=YGYd5}ozEBh9LXtld|o18u_;Fttx^CU%3QwZw!_T%v()lNOwCmkMpWIF=D_lq}HBqGqZ)>pEFJ}BV=0GUGdOqWO~F!!z1GGS6fgbo4$wV z4c!mdzpH!UV79Y9@Z0F9qu^gC3xNApPkMJI?gbs7?v`YK6^fS1?8Y)qD z-6q@<{MfbsEeJX7VedP5!!G}N=f55Z@?L%hLc-<4$RGEiKW)~)uu7%TlgZUfZv?U zmY7>;(CvLF)GNh|KuSq+FP|8En{{suwM`ITJLGo^u$F3>NUc+~sKYhzfx7uLkTb+dOwrQzLqo5mO_AiNp@m`Fq-krgLBer=a^N9Ftrrf;HW>UHSyO zY0{$IzV-9i1M^)0>Pqm!jc9GOKE;CvBO&CW_nx7~Lvner-MKAv-ZlrMd688(^SAv0 zLb8Nk^P4rS_mK_e>5Rvk{5bBQR`oOi&zZMK3S($-qKiBUS-*UIWP1zw1kdSOAaT5+ zT$0YAvw*#@lNZ=eSx(9CZuJX{Aq?nM=rcxi+)^>O`&_gi0%Kh28iJQsU$u~1NRy+) zaLk`~fc~vE&8%Lw!yhE`%Tc?fR#NK7FB(nLhyxCoe255$hdX2KjJJd_&u~f? zZZyTuI#wlL1waafgzxh^zMtuZG6pgQ_Y6rl-E<Bwj=TRqabb%Kp>#j6^aC^u5Q+M|+z=(cKCA{N`tR$Bvbd?PqizGl_S>bEMWe zAlEZJ)le&h2LNdEC28^$P>XnvNg=4{(L??laz!~JokPI8w;V18O)XemYvm6Rc{se{ zz4@vas+vjgqu@Gh^^o7Nz=n6p#IS!BX3g2N(CXJ6YeGjPL)^!%n)#k%{g8hlKx7k{ zx4+bJX)`vY9c=BeK61LcFA3IoWUFI?O*jSSYy6>lEdYOsj`%s%o<-(^9f@1s+)$9X zV62>O+F_Bb&||EPrkvFGZ+VFYHypG?j4f`xb26>ILR4^-0sa1FPdfO4(pvt>rk!eg ze2gFC`l7n;17lVNSnUHqci@}(QIyp)Z%@iNj2bLx)9NNAM-0O$ybpWhOI7e!gJ>N~ zGniE>-JIcf(hWsHnJlNK2_D?SF2ZOuwmB?>PWx>+&NA^{2fTjt*vpN83UT%9)@;@E zaqgwS59-g3C*8z85YZRls5;0`+c@GY<)fK$J^5^rJsz3a_ij-N$c?|<8{Du_!#ci(G;$I)D!jG+U9;m{d=Y#zcrKbX=ts$yl;5Jt;eM?-@3_pTxrfORZO zxT@>LFq67V|y~QEiUK)(16&@i4=G7O7rxb44nN1Rp zNKnen-whh!mEW-mlz8QYttoR7ZAR^9Zaqy2pfU+Ook)(!PS+eIl_ZXvdVV`-SE6>6 zGu`Lr#IE^}#K2ut@xR}4Iu8Tls;F{#ECJ0=X;<(L@>uBH&2KIqus_hSp5ITYc2 zF)Z1YK)EW`YwcPGo(|-NPG0b@LNw_g1;q|S`JAoMX!oo}-x=@a%)rI#?65J6M-(;9 z0|5wo2_A}-KGtB~9g>`w1p21TBeMFqQGCOV#G@w{iPY`PlwKY#^&&Ezc|9O|I&n@6 z?{Wvt-zUQvH&oE|2aN(-YJTuwqy6&Ll{-Bu;oKw-xNvQrir$tg{&(g>JG@m2WJ1W7 zL>(Z}=*DX?lZrfDZQ-X)xc9_CSSRVslO!W3O^sQD<23qKfCqSe9*BagUU5%m{AB?h z4M=%Q%BGB+ym`x7714G4sNYheuSlSYdmq7TKte$UG@y4v7n`_otLj_M_NaW1nB$y=k!~i-pT|B7uaKZDfk6l*^B)RDa_#6 z-WUy;)@oTbN%fUFm?8}{HZnMK!w}JLo9`_DP$ct&MJ#cPSQ=&8&#yYs-9@H9Pv-d7?lXT^#s}mMqDfCbHG9&x*w|n^h{|} z^m;(h-C*3O2hQqkXi}nax@YM12hHrJb)_<{|M_UbLllRTS7Z->$3Qye=9>y$MvISi{^kCRf~!lSr~co-TkmP&ag3Cl?26FK z_Y$iSDkv#^;dhBSuTNFHt!$9A=EdeoMmOnG2X)-kVe+Z5$R@!8uW#a^C+V+pDi`eF z(NIBT>22AtSOL|l1S_AT=gg&*WJOGx$2AqiDTdb;w+hYD9v9t+=3VN)7m%t2ffq6`U}!BPOx7 zgb;6`O8b5Ofr~}0af@2sOV6o~oN_~2eR8$fjSu}tbzj8xJQ~PaU)m$u?_)+nq5#HN zi255z8Qfj!VG|43R@6pB-=9a&;6mM9VmtiWMf2YAw(K};VbJDu_1J?H>}Xabp0AUU z|7GoGpI;RIwk1m3@<`o%FNflZVH-Q-Sbl=Uyy&6s5g`E-BZ26HS3}HCE?44qmv0ad z_!R{H1i3m|KDwxz-hPzk15+^%%@XF&$p|5Co}rKj@bVQXcxfOU`4X~xbyRnB0c~|7 z1NH!){So&!a3}-@UVZ{0Ei<{Gf2il%YLKIW)x!`l800e1av40xKESS)ef8y2CW$G1 zTh^zHpZ;`xUX5~>=ymx=k*X+VdvO|JXVcK|nRy~8y|i#X(bfDNPw??zI&h)GP-RpI zlTKXLCf=CEpE>#Yaow|uLIy$FWzl4+>E(5_W~U0y>fi+t)($636<3e zP@{U9M5AK^)NMH8a)aEJKJX&uA!4tO*lsv!h|_&4XnWXW-;KXv z^KO6dtj)g+v<_|yelq9czXN;TZHP^eC&lPO`}Rbaf};CqO2t%0?=1i^2%-qpuVqZ& z?}vB1;6l0Ix)98FLZ>V3DTFlxr8{k6{U$Yt&3iBY4K_`vQ+~|!P&vIp?PsBs*jA6= z^6bpW`)_f5f6EP3C~p7`0KPVk)hS}3cJg7@xqC+*f~Ps+zH^rik-4T|AKQS|_au3p zl{mcD^k;xvp6G2NBOPsMaNu^G@dc1|dqCns2Was874k1l?2y(?m5t>J`{oIP5Q9^( z&wsU7UkO|Z)_Eplk!$bEJe0nQzvn!Ved9JkdqKlX^mgUz_k@c?udIdAcz3tdAD#Rz zpwzimhTQrNxL=}P<~tqd-1MF>!o3194;}$xTtB10HD?(_JigLScY4yB!S;9^IbpI| zfG5~zlS;QB>fqA=Cd9`z?!dgCncoIheim>C@W@R4q!zwRD4#Zady&ZdX5LDom|PcOjjMd?0*Wk>8N|@s%goqjG-S|*PCZi5_gz=ZG9o&E&n@;#zc})Y zNU?uQLoZ>35EXe$?F>trZv)wQ=6`i#$6 z;|~bD&ZQcujmlBJSA73#3iF2Tg!KVuJHTeqoWD;D=oCr}5`i@-R0P#5T6dR(d2{LeNfnW<%6!;~kN5A?kxpp9K&0vM4E zva|>QWE-=A0D?Gdb|UXXgB##&Q`;+vg55`67d+@M3z9U3B6YjbO6!)q^m4yn`-Xm0 z0QCj-_O-yr78x^ih;UX%j!xS8fZrhQU^<&JbK)axkVY?X#;S&K?7d(IR=3@OLk{F! zel21u>FZjigzK=D+~1L0p8u^VW{Vxk!*#1T{k2lk2UKX5V{OCKpy6*eux>(%JYzEg zxy>4yd!Kw|zS!MmO5+jZ7*uPSI+d)%p-X{V{0AP+-A^FT$bWdcdRoxMm?tfTIKXCSM2bW{0VfIT-L;q8_IM$-UoN0NICPTOF7P%cx>6@u|eV}Q!UgZX_ z6+26jJWUMV-m{HRfUl5t;(xuV@^zBAIV6Y&yn!}N6%jG2DE?BZ+ZLgrfTTmW9qwM_*(dm?{Ng1qHqDokir{*D4O% z1W9G>Y5|Rynftz_nMVGvQ%8*(LWea>h2xyT2eOivf_ULWnN2nUF&aUEpK!>Hw2)Is zs4X6^v?En=ruqX0!j_o=YK1f09o7Sbnw>sPyltWOfqToAB9NTx+yDC8(ktp!V)eSz?Q)9wMY-o=yL22|e_Vwdf3bcD@l zY^~&=)Z64h5q;P2{)wYcsyvKKcUd=Gugm_mzlau*DLlM^#&N5bk$BuXHo@~(nTuYk z_uyw~s+n@RP6o$Fo8^J+MWJZ>qr(b&3V#S=nbLHh-8JVw|?U!~ee-elJ0 zB%NX$v`2E@wT-a3wEmH85co*}?SZO+zG?^OQa7U+NQ#%tKYpO44h~Rs&zsl%#iaI* z$XqU6=)7>@Bvc&b2DJdh6NF>2yD%;8C*b@nuP?qmcn7lK=@miBnc5r>IM$4tXLP_j zl}x!I^k{5a) zs`?}4>QOLshJtkE?{~hRMmB9xT zy3+Wwx7Nnj@IoNW5zy%Iow~chcA7iBZ$Dn0#c4>b8`{`9*XE&>f2EqK>WD80klzyQ z^~B`$ax6<&9o+ay+ulrc=}pbG{kkA*^)^_C<*U|Eu0EtA2w(^lAF%ktLb;r9Zz;+S zk~S1?4*q2^xR@hXQY?AX=E3Q)F~}M$O~Hs36v3a%p=cXj4tG|kb9C*A01_bvAE#+X zhYuaVadp%FoIa)wybfK`Kx6r^sx6iy+mP!>{_>^ZR|Puogr5}Is4@GC8$W4)dsow5 z+-XuThiq^*xSlVn^?%}@wZdlt646xG;h5z?|+D z35U)|#KK*oE;K7P2l8YMJ2))O0(%pbI*;0I$e%@W$ovBCveQ5n|DvXB{-j-5OZg6k z84WUze0aq`_J6Jzqfz{f=(0MG#LzBjiO>fA4~VDtEOZLZN2QADKT#@wcgqCqzFLk% z26eaQo|#oCVRm6@YpP}=w5F%p>1EaQz?CGDfzolPf+69WNyaF6?q>Od{z(Hxyeb3Y zqmOP~RGjzte?YB))zS2b?qs&5{uwQ-MzU1U+_};til-}vFR&FO8&y0wEofAuxZ64c z|1G2e3Q!oUAtz#**KSuGP$@;jz@LQcRE0^6xz-~eHENDY`a0Me^``O8wH6u|kF-C| z4(@f)dyHkw3Si1nZ3$w25?M_Agm``&@L;6Ek(u#;DJ8{MsJxpM?Ikv0nsvm)7TV*_ z;yaBnKSTfc!R&opK3QnkQfo7@%gE8=huw7Tvdal6C$v%+gBI8Obh~t`w+z!)N~GBi z(m%{x75M@cqE>Fugx!A}@J&y+)B$!51pfZ$U!G~^=0^GI8E-sMs99%N{AGUGK5^c< z!_J^_vRGzEEKArMzp$3@o#ejlNFtbY>`NVxf6`dW-;Us8#bm;&JPNba+k+3Xy3a9x z@?4}|Dw39kkW0M~+#_^y%SiZU;HC|Rv#Ip0-C_vkaX>p9Dq zp1!_DZ}w;JEh2w>Ue$^lO|b$A;|wNfIooHZD<`Jln0a@`FgT%CbP^&NN;z|;H920t zS*!;Id7(I^s6x2?O8ou=XU(lU69hQ6>X6TDs5JeSi%8fSzFr9(cCRx(u1T~^Qp2nZJ3^<9RWj*RZe zF|hMH*8M8Ee_0C>Y?8ltlqVBr1IM`p+b=V{1u2p?_T8qG}mPG;XzEmdzai$-n6HqYV;~c_GG~m>>=+bFy3G z;6$Tnii=0U?-HXL30#K9R1g(+gmh7}{$wyenL0)}B$ESr;>&mWN4yX5|BgL4JPx&5 zDav&+F{~9W`LW}edkc>$+D~mw4Q%F*`6i9i=bwwW06*lb_edB>-q+=cPk!d5v>exu z8#>!_%am*8w7I(K4%w}Q%L%+{B0tEnGlO4V5el`y8zR5INf#XmcVhP>4gK=o2sz>E z>f=@cVuM527Lsq=IN3GW8FbHU+V*Gpk5*b!#p-VaMjoj*7ty_kGukAIrPl7mzY5V7*=HCxtJOyDIiLQFaJ8Vnvs#1EdaQE^JE^vc zi*qObX<*Cx{O)3jFqZ0yl&fAz18=?RZFK(c#4W(kf)Pj*;N(E;a#Ybr5>tEN?0sw< z`tw$)l$h>f%5L>7>0x^g->G%ZT~*{GGMB8(LJ9B8)3{Zb`1V)U5C_@B1$;|@0|Kgv^r9r96cNsayE&U14X( zG{L3v-{PA)wGwgOS?6!kC0?We;Pzp-&?V#^MQZV+g6I3j?BvdrOU<_Wku8!`qRx%- zQ{3(^Y1k96F7CxFhfi7@h59w02= zN?Z(aS-iYen@~*rZoeDZ`;&F1{Y|z6JpSedU{f-e)M=8q5NY1WiZvOc9z5=JYvK@{`mG$)PXkl;1y++dAe>+ z+Mdz>3sV@Z=cPM{b1K}!{g;qgaB{QF{ivw}3!(0+X`7|T&mTK$^aNgyHrd+JDD6_oMdW8t9slNyZ4b8Q&}(t0-1z z^aMO5mQdtZO_^E%sN;u+{WN-Fk8@sn7>Bk5Yi<^j7mYUCo!1?Wt2R0`*vV_=rQz*W>`x;b0hJ~%tHKF`wh#{p);=KBfi;ezK~o1MqUk9%?;Mq|Jr2ZFrFmmyr49&^sZQk;(!PJoA+ZKqk|hsizAH5S57QU2lk$Fc?C8hs}t)nnPeT7Q#y0vDRe#|hO9dquLB40aTAM-cI%iiAmF^movL+;`l z4^7Fxe*ZZCfTCh|Eqb<7hLu~QfHK+96?rJhjyU{Su{Y8>dY@* z?qL`R^-%Mu*vwBi+w46I#{v7a+YWAmN>#l`+4Zn!I9pb@^S{u-Z+>_MaCM6w%?n5< zVDIP8)QlqK`~0jEZvL;@%b%)G=JrE0xIAzCGPC2mHhwJCunVpE?%zfL_|y86EC>n; z!!F?Zg=0q(3WoX*)&2K{2i*di8)={|q zKj$aYoSTRI$9?RyBO+a?mtlZRLSL=F@4wNOKhIwj+f5*OwX^WeZ1x8`TAcU!hY*lY z>mTVVB=);}zL2b^r~J$Puk+uTg5NbBHAIrKAKoqkk%NPZ3F^w$H7GsEIe#{-qT(R?nl?Lu9zj8fwj_XobP@k?s zdxL~y$6JaYi$a}q794Os&OcKZyn`b4usc9Tbea?gtl<6UpYj7UKl5|_irj8;#bS-E zRL=7z*%hqGgsLz9v~->T-5#fV>;rQpG`u)@e0j6 z<=Q^%rG|kh4<)(UY>rdWdxq6ey z%#OI6f3LGVa?W{@sSj|mmOl=glNJJ2?BzF?u5kp*E!`g_s#nUsTqPP z&MX~YuGc=wd0!tqBCQ0%b&`q+T0?DmlfU9l@Yqm&=^(Kp$&RLn^_K@7j_>c@Kfw9T z{=ChvYMfp53d#*oxy!$x&;+h=sbQemLrHdRI@aGH_*(t#{+qetCVKMMHZJPx&%a;1 zf7Fxbe*R#>UdyjOfA{l8OOXrGYokSU7q(KvK#Yfy?0*0LQGY;TB2iR#nqnl?V@kHz zlrntiBi2s?n<8qKTD}?Wqyu{z5%7Vx!aAWJ01x?3Q;b4Axavm1)F!Lw`3c!cHA;M%4O2(#t{ zTg>f@iqUy#gY%85(1=f%&6f6I?2>8$d>;T1)&Z6d{G~M4Bx5{R9Qk0=cRyob4^J`sq!S~xghcYkcqi)|7Jf15ZF6oU~X5Avj&G)4ldSPwg9vm79c#b9HM8%uvIE;9L!A}vQ^246l*#e zWw$A)ir@S1Xad=gOfe2D>-2BT9W<~}jBn{-POY%x0TuLlFwy&dEg}jyk1U@}WMmU+ zI$D?qZERk68sFDI@7v~5ECc8ruY5DwY0P{W^gs|?uWIAHmJi`HvDK^t5NaXZX9V0GD8(}mh+n4*1VKvbTM{idd;y!3APOmXutvh;1Z^bm0k#$Up zsgZhiY*)Qy%sv^QKkWz1bCvv4i>@J~kNKoSdK#QRZPp?08p3tY$F5?GQLAH-G9woN zX&WWm!-56Gr4`ou9RSDM1mLKnRP=M7Am~h(4Z{=R&p%q(8lRwYT>TRuZPp?Er(q9| z7o&^%(h? zS?(|TCOnW%0?7oL6Lin*Q~_x`r7;KJhk-&JpEz?_rf-&^jS(-^W6LwvqwyXw_Cgx) z;>Z2NJnv`I2|I#=^cU(21r2QP-#^anH$g>x)hndq9G*5U3YP}+A3_6R2zA_Jd~^scXiBJ(&O!nc^#3v=Rcp-Fr7db5RSKijeUTerhNXg3@#QBUW)*S8nwJp z4GyZB&#rz&63E8+L)Tt)4L%43c+)%4<+j-}+%}zmgJJ-7w}C@e{_@4N%>Gk!p(k_{ zMAl`qY_^%&{`-%PX$j#yn9oQhiB3?*!9Fo-E6v$G>UB2NoL2hTvL{~yYhiVsU0N%LYL_`K!4r&d-CbW`7Ks0}b)(2cD4Tb+yvA4-CP-|62^Hh~JD#L7 zbq?e}H&mmHDXJXXCZOgR%kt`eFhSBcTu44JUCg;bA~0J@I#jX{>^^(<|Mbwf4%9Gg zGs(4Cvt;DJnx=#i4_>TKs4lgzK&4B)T;to*hEzF0?jj1_Hd3m3TpE6cjvoaCAvC$L(aK+f)dBM z<9F&jb;5Mf}X^eWK%`os~AACV|0K zh=z!O{D+C5)N^8S)-1`GdS?jn;5|$+ka~|RgJ)}q--ns}_)Xo&Tx#(J>-&OD?vqV! zknlG|*;oUcuLKq8vI!~w*`0QcP7~m5xSht$Z@>ME(^3DJlwHh~`kDKU_b~uQ-HUi$ z4Q`alwU)}yOj;c|Zm&FB8r+Xp@QxjtQSj)-?~UPX@?10HtUrdsDVJj`H~mZ^Oj0L~ zBQQVQ6GCk7S5I1M?$Yk==lp}1{5`94LLxTSlE%~x+E944{OaU>4QLJ#1>2~jOsFk< zcJm?SwU{ghFha$Q;i2FvtH_^lK9l=23l#1|{S!|l_H)APun+OhL6N~>E_-kcjtbk( za$K;cF4)n*i}~+<&X*urU&WA9ZusWJ%7{Cr2xP z`~AbYEGz%61<%gefE1Yao`-`i9iVX-M;%2}hC5Svjj=SUy*Ly4PC@enWX=VhB1Sj3 zQO1-<_|@y&ugU)Q`^RgBw6HQZuT+iRzJF*^JDZv8Dx(GJkKR8Nf0b3_#jKtk+aJGw z3_kX^JrlkO=fST?8HcFGW0nVA%aE+HrwWZT| zh7;4XI>`>K-@~{zJ`>G;3eABgRam}BxX?`<&YAz7341NcP`Xb>a}EHC<}n5bY;uPH zGM_)RO^B7vVGCjT{&CEc&LzPe`4`KEV*K4dNrY{<^>b4_7NfZZE}Ni-tSO(yY!B*T|-jchrgIj&RduMYq?gDat+T?r5`8 zve~}VO@zI+RZR7mKl1zkpYR#cdmVXI^$N{@oQ|MaRs(V1$vqkLwG85{IZ zvs+|FQFq(?w0Qj|7ZUkSDrYBt%D;?TlRp@$4kBFbqCJ0F$jc_whg`1DbyU>$Ylr3u z%-OE5ZN!5R&b!EZ=Sz-Hg!Nc`RrGZ&kE8Q`lvtezoC?D%;&YRl!KkY zkA@c~Cyx-$zkCmfp|p=utqn7O+bIoRC5>T4emwu{U((34vSg;RH2orKOKS>-_GW%!z8C$DX zb2xJe2S+`BD$2P|jy>!C1Ava&wa$P;H1Wj8j?4(fqYRjw+A+FXw+?rFdw`IP14eX7lgqFgj5_ zf5l0U<{wb2mc&@Mq>asEufcEMKfIsCR{d~d=)HNeImW&!X6HB64S*5pAH>W<2;l3P zyKg|Iuyk%q+h68CGf-*}Kj$|O<#0jwA9tU&M4vj?5rZfM9N)DKgX7ctNBj+nPATE^ zWJ!CBZSO>d%D8r1Vh~W~b6c1`8nUBgILqG=8JLUw$ZxRag2n>n%uf%RjDw;9|KV7U zX%wQ{{K4j%BDC=p_?q8&_L_Ku-{o)G7f}_RbrCccDDU$V#m4K{6`-X04XR-f;-{|A z9mB(jo2E++)93t^yZF5>9DUAzwh zpNYY$Z!tTY)vf|Q4C`be63JDKI5Q7){}e(zon?}Z@*&H*3cWF-f3gmQJ9fFu(mqG~2iN&2F}e&VCS3w!Slu|GnC(u6FD<^Uv(gcT4hZepZ-y{G4)K za1Yo_IjbjqanjI{f=!~wZ9+Nxnu4Z085?#5uaCpeZL!kO`M1el=U>=!|H-Hgj>tfH#t^*5LcUptiGnhZG%@V+PwT?9KmolXvkjpjYS?-Xw|MoB z&jCK$v#&hY#vGhs|%T&>J&a zo&|y1BQdDWH>TbXCulkD%g$l?tk)LklLY`+wGn;JK0JSy%}iUsr?n{bJWWJ1--Sfc zd=kl>_II=CssQ~|!$ueI#P+)CC+|;wf~~k)KCBb0NzPNK~xI5x|!XaxwRB@#!$}nZ;$M(gssgK()Vq|7hN>Y(`Mt<*&~;25@yc;s~V6D@WY-ZD_EpzkUCxdV4>y` zE6j?8pzSnro>L_dX=j9ewnHun0Uz5`fUr3WiZCAr*Zr^Og7Y-)h_UZu>Wt6=?00Oq z{5%x>Qx0IX {utterance} \nintent: {intent} \nslot: {slot}" - total_virtual_tokens: 10 - answer_only_loss: True - virtual_token_splits: [10] - truncate_field: null - - # SGDQA args - encoder: - dropout: 0.1 - - # Zero Shot Intent Model args - original_nemo_checkpoint: null ## cannot directly load as .nemo uses the pre-refactor model, therefore transfer its attributes over - - dataset: - - ## All tasks/models - data_dir: ??? # location to load data from - dialogues_example_dir: ??? # store prediction files - task: sgd # [sgd, assistant, zero_shot, ms_marco, sgd_generation, design, mellon_qa] - debug_mode: false # small number of examples for debugging - max_seq_length: 128 # the maximum number of tokens per sample - - ## Dialogue S2S and GPT Generation Model params - input_field: utterance+response # passage+utterance, utterance, response, utterance+response, system_actions - output_field: fluent_response # response, fluent_response, system_utterance - - ## Dialogue GPT Classification Model params - field: intent # [intent, slots, service] - few_shot: 0 # int ; 0 to 10, for number of examples in prompt - eval_mode: ranking # ranking or generation or binary_score - binary_score_subsample: false # subsample negative examples for binary score training - binary_score_subsample_ratio: 2 # number of negative examples per postive example - prompt_template: default # default, prompt_tuning, i_want_to # "This example is" for zeroshotintentmodel #acts_slots_values, slots_values, values for DialogueS2SGenerationDataset - target_template: default # default, with_description, with_slots - - ## SGD task specific params - system_utterance: prev_turn # prev_turn, next_turn: prev_turn (default for sgdqa) takes the system utterance that precede the user utterance; next_turn (for sgd_generation) takes the system utterance that follows the user utterance - num_tasks: 1 # number of task heads 1 for DialogGPTClassification and 6 for SGDQA - - ## SGD and Zero Shot task specific params - preprocess_intent_function: default # default, lowercase, description # remove_domain for zero_shot task - - ## SGDQA model specific params - subsample: false # balances negative and positive training examples for improved performance - task_name: sgd_single_domain # or from [sgd_all, sgd_all_single, sgd_multi_domain, debug_sample] - state_tracker: nemotracker # or baseline - use_cache: false # uses a cache to store the processed dataset, you may use it for large datasets for speed up - use_fuzzy_match: true # Whether to use fuzzy string matching when comparing non-categorical slot values. Should be set to False when conducting multiwoz style evaluation. - joint_acc_across_turn: false # Whether to compute joint goal accuracy across turn instead of across service. Should be set to True when conducting multiwoz style evaluation. - max_num_cat_slot: 6 # maximum number of different categorical slots per service in dataset - max_num_noncat_slot: 12 # maximum number of different non-categorical slots per service in dataset - max_value_per_cat_slot: 12 # maximum number of different categorical slot values per service in dataset - max_num_intent: 4 # maximum number of different intents per service in dataset - num_samples: -1 # restrict num_samples to an int value, if -1 all samples will be used - pad_label: -1 # if -1 not slot token will be used - ignore_extra_tokens: false - ignore_start_end: true # do not use first and last token for slot training - do_lowercase: false - - #Zero Shot Intent Model args - class_balancing: null # or weighted_loss - num_classes: 3 - - # Mellon QA, MS Marco and Design task - dev_proportion: 10 # These datasets do not have a dedicated dev set, therefore need to split train into a new train and dev. Indicate an integer (5-90) for the proporton for dev set - - train_ds: - ds_item: "train" - prefix: train - batch_size: 16 - shuffle: true - num_workers: 3 - drop_last: false - pin_memory: false - - validation_ds: - prefix: test - ds_item: ["dev"] - batch_size: 8 - shuffle: false - num_workers: 3 - drop_last: false - pin_memory: false - - test_ds: - prefix: test - ds_item: ["test"] - batch_size: 8 - shuffle: false - num_workers: 3 - drop_last: false - pin_memory: false - - optim: - name: adamw - lr: 1e-4 - # optimizer arguments - betas: [0.9, 0.999] - weight_decay: 0.01 - - # scheduler setup - sched: - name: PolynomialDecayAnnealing - # Scheduler params - warmup_steps: null - warmup_ratio: 0.02 - last_epoch: -1 - # pytorch lightning args - monitor: val_loss - reduce_on_plateau: false - -exp_manager: - exp_dir: null # exp_dir for your experiment, if None, defaults to "./nemo_experiments" - name: "SGDGEN" # The name of your model - create_wandb_logger: True - wandb_logger_kwargs: - name: ??? - project: SGDGEN - create_tensorboard_logger: True # Whether you want exp_manger to create a tb logger - create_checkpoint_callback: True # Whether you want exp_manager to create a modelcheckpoint callback - resume_if_exists: false - resume_ignore_no_checkpoint: false \ No newline at end of file diff --git a/examples/nlp/dialogue/dialogue.py b/examples/nlp/dialogue/dialogue.py deleted file mode 100644 index 3f4c5581eb5a..000000000000 --- a/examples/nlp/dialogue/dialogue.py +++ /dev/null @@ -1,158 +0,0 @@ -# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -This script contains an example of how to train and test dialogue models in NeMo. - -***Setting the configs*** -The model and the PT trainer are defined in a config file that declares multiple important sections. -The most important ones are: - model: All arguments that are related to the Model - model, loss, optimizer, - schedulers, and datasets/data loaders. - trainer: Any argument to be passed to PyTorch Lightning including number of epochs, number of GPUs, - precision level, etc. - -This script uses the `/examples/nlp/dialogue_state_tracking/conf/dialog_config.yaml` config file -by default. You may update the config file from the file directly. The other option is to set another config file via command-line arguments by `--config-name=CONFIG_FILE_PATH'. - - -***Model Training*** - python dialogue.py - do_training=True - model.dataset.data_dir= - model.dataset.dialogues_example_dir= - model.dataset.task= e.g. sgd - model.language_model.pretrained_model_name= e.g. gpt2 - trainer.devices=[] - -***Model Evaluation*** - command as above, change do_training=False -""" - -import os - -import lightning.pytorch as pl -from omegaconf import DictConfig, OmegaConf - -from nemo.collections.nlp.models.dialogue.dialogue_gpt_classification_model import DialogueGPTClassificationModel -from nemo.collections.nlp.models.dialogue.dialogue_gpt_generation_model import DialogueGPTGenerationModel -from nemo.collections.nlp.models.dialogue.dialogue_nearest_neighbour_model import DialogueNearestNeighbourModel -from nemo.collections.nlp.models.dialogue.dialogue_s2s_generation_model import DialogueS2SGenerationModel -from nemo.collections.nlp.models.dialogue.dialogue_zero_shot_intent_model import DialogueZeroShotIntentModel -from nemo.collections.nlp.models.dialogue.intent_slot_classification_model import IntentSlotClassificationModel -from nemo.collections.nlp.models.dialogue.sgdqa_model import SGDQAModel -from nemo.collections.nlp.modules.common.megatron.megatron_utils import compute_model_parallel_rank -from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy -from nemo.core.config import hydra_runner -from nemo.utils import logging -from nemo.utils.app_state import AppState -from nemo.utils.exp_manager import exp_manager - - -@hydra_runner(config_path="conf", config_name="dialogue_config") -def main(cfg: DictConfig) -> None: - pl.seed_everything(42) - logging.warning('This script is no longer supported in NeMo and is scheduled for removal in the 24.11 release.') - logging.info(f'Config: {OmegaConf.to_yaml(cfg)}') - - try: - strategy = NLPDDPStrategy( - no_ddp_communication_hook=True, - find_unused_parameters=True, - ) - except (ImportError, ModuleNotFoundError): - strategy = 'auto' - - trainer = pl.Trainer(**cfg.trainer, strategy=strategy) - - exp_manager(trainer, cfg.get("exp_manager", None)) - - app_state = AppState() - app_state.data_parallel_size = cfg.model.data_parallel_size - if cfg.model.tensor_model_parallel_size > 1: - app_state.model_parallel_size = cfg.model.tensor_model_parallel_size - app_state.tensor_model_parallel_rank = compute_model_parallel_rank( - trainer.local_rank, app_state.model_parallel_size - ) - - if 'bert' in cfg.model.language_model.pretrained_model_name: - if cfg.model.dataset.task == 'sgd': - if cfg.model.original_nemo_checkpoint is not None: - model_class = DialogueZeroShotIntentModel - else: - model_class = SGDQAModel - elif cfg.model.dataset.task in ['zero_shot', 'design']: - model_class = DialogueZeroShotIntentModel - else: - model_class = IntentSlotClassificationModel - elif 'gpt' in cfg.model.language_model.pretrained_model_name.lower(): - if cfg.model.dataset.task in ['ms_marco', 'mellon_qa']: - model_class = DialogueGPTGenerationModel - else: - model_class = DialogueGPTClassificationModel - elif ( - 'bart' in cfg.model.language_model.pretrained_model_name.lower() - or 't5' in cfg.model.language_model.pretrained_model_name.lower() - ): - # please use bf16/32 with t5-large and above - # see https://github.com/huggingface/transformers/pull/10956 - model_class = DialogueS2SGenerationModel - elif 'sentence-transformers' in cfg.model.language_model.pretrained_model_name.lower(): - model_class = DialogueNearestNeighbourModel - - if cfg.pretrained_model or (cfg.model.nemo_path and os.path.exists(cfg.model.nemo_path)): - if cfg.pretrained_model: - logging.info(f'Loading pretrained model {cfg.pretrained_model}') - model = model_class.from_pretrained(cfg.pretrained_model) - else: - logging.info(f'Restoring model from {cfg.model.nemo_path}') - model = model_class.restore_from(cfg.model.nemo_path, trainer=trainer) - - if cfg.do_training: - model.setup_training_data(train_data_config=cfg.model.train_ds) - model.setup_multiple_validation_data(val_data_config=cfg.model.validation_ds) - else: - logging.info(f'Config: {OmegaConf.to_yaml(cfg)}') - model = model_class(cfg.model, trainer=trainer) - - if cfg.do_training: - trainer.fit(model) - if cfg.model.nemo_path: - if not os.path.exists(cfg.model.nemo_path): - model.save_to(cfg.model.nemo_path) - else: - updated_nemo_path = cfg.model.nemo_path.replace(".nemo", "_new.nemo") - logging.warning("nemo path exists, saving at {} instead".format(updated_nemo_path)) - model.save_to(updated_nemo_path) - - else: - data_dir = cfg.model.dataset.get('data_dir', None) - dialogues_example_dir = cfg.model.dataset.get('dialogues_example_dir', None) - - if data_dir is None or dialogues_example_dir is None: - raise ValueError('No dataset directory provided. Skipping evaluation. ') - elif not os.path.exists(data_dir): - raise ValueError(f'{data_dir} is not found, skipping evaluation on the test set.') - else: - if hasattr(model, "update_data_dirs"): - model.update_data_dirs(data_dir=data_dir, dialogues_example_dir=dialogues_example_dir) - model._cfg.dataset = cfg.model.dataset - - if hasattr(cfg.model, 'test_ds') and cfg.model.test_ds.ds_item is not None: - model.setup_multiple_test_data(test_data_config=cfg.model.test_ds) - trainer.test(model) - - -if __name__ == '__main__': - main() diff --git a/examples/nlp/dialogue/remove_ms_marco_samples_without_wellFormedAnswers.py b/examples/nlp/dialogue/remove_ms_marco_samples_without_wellFormedAnswers.py deleted file mode 100644 index 53a7ecfed2ef..000000000000 --- a/examples/nlp/dialogue/remove_ms_marco_samples_without_wellFormedAnswers.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import argparse -import json -from ast import literal_eval - -import ijson - - -def main(filename): - with open(filename, 'r') as file: - objects = ijson.kvitems(file, 'wellFormedAnswers') - valid_old_key_to_new_key = {} - new_key = 0 - for key, well_formed_answer in objects: - value = well_formed_answer if isinstance(well_formed_answer, list) else literal_eval(well_formed_answer) - if len(value) > 0: - valid_old_key_to_new_key[key] = str(new_key) - new_key += 1 - filtered_data = {} - fieldnames = ['query', 'query_type', 'answers', 'wellFormedAnswers', 'passages'] - for fieldname in fieldnames: - add_data(filename, filtered_data, fieldname, valid_old_key_to_new_key) - - with open(filename, 'w') as fw: - json.dump(filtered_data, fw) - - -def add_data(filename, filtered_data, fieldname, valid_old_key_to_new_key): - with open(filename, 'r') as f: - objects = ijson.kvitems(f, fieldname) - filtered_data[fieldname] = { - valid_old_key_to_new_key[key]: query for key, query in objects if key in valid_old_key_to_new_key - } - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument("--filename") - args = parser.parse_args() - main(args.filename) From fc809b8026da88a75a56d78fe36db7b12ab4c3b7 Mon Sep 17 00:00:00 2001 From: Alexandros Koumparoulis <153118171+akoumpa@users.noreply.github.com> Date: Fri, 13 Dec 2024 10:59:03 -0800 Subject: [PATCH 036/128] add JitTransform (#11131) * add JitTransform Signed-off-by: Alexandros Koumparoulis * fixes Signed-off-by: Alexandros Koumparoulis * add JiT CB test Signed-off-by: Alexandros Koumparoulis * remove stale imports Signed-off-by: Alexandros Koumparoulis * typo Signed-off-by: Alexandros Koumparoulis * cleanup Signed-off-by: Alexandros Koumparoulis * add jit callback test Signed-off-by: Alexandros Koumparoulis * Apply isort and black reformatting Signed-off-by: akoumpa * fix param passing Signed-off-by: Alexandros Koumparoulis * use sgd in test_nemo_jit_cb Signed-off-by: Alexandros Koumparoulis * fix Signed-off-by: Alexandros Koumparoulis * fix Signed-off-by: Alexandros Koumparoulis * add thunder call Signed-off-by: Alexandros Koumparoulis * Apply isort and black reformatting Signed-off-by: akoumpa * Use .compile method to avoid changing module structure Signed-off-by: Alexandros Koumparoulis * Apply isort and black reformatting Signed-off-by: akoumpa * Apply isort and black reformatting Signed-off-by: akoumpa * Use JitConfig Signed-off-by: Alexandros Koumparoulis * thunder setting Signed-off-by: Alexandros Koumparoulis * avoid reentry Signed-off-by: Alexandros Koumparoulis * remove optional Signed-off-by: Alexandros Koumparoulis * rewrite Signed-off-by: Alexandros Koumparoulis * refactor & module_selector Signed-off-by: Alexandros Koumparoulis * fix Signed-off-by: Alexandros Koumparoulis * Apply isort and black reformatting Signed-off-by: akoumpa --------- Signed-off-by: Alexandros Koumparoulis Signed-off-by: akoumpa Signed-off-by: Alexandros Koumparoulis <153118171+akoumpa@users.noreply.github.com> Co-authored-by: akoumpa --- .github/workflows/cicd-main.yml | 11 ++ nemo/collections/llm/recipes/__init__.py | 3 +- nemo/lightning/pytorch/callbacks/__init__.py | 3 + .../pytorch/callbacks/jit_transform.py | 134 +++++++++++++ tests/collections/llm/test_nemo_jit_cb.py | 185 ++++++++++++++++++ 5 files changed, 335 insertions(+), 1 deletion(-) create mode 100644 nemo/lightning/pytorch/callbacks/jit_transform.py create mode 100644 tests/collections/llm/test_nemo_jit_cb.py diff --git a/.github/workflows/cicd-main.yml b/.github/workflows/cicd-main.yml index d3098db1701c..74f20ed52392 100644 --- a/.github/workflows/cicd-main.yml +++ b/.github/workflows/cicd-main.yml @@ -4063,6 +4063,16 @@ jobs: AFTER_SCRIPT: | rm -rf ~/.cache/nemo/models + L2_NeMo_2_jit_callback: + needs: [cicd-test-container-setup] + uses: ./.github/workflows/_test_template.yml + if: contains(fromJSON(needs.cicd-test-container-setup.outputs.test_to_run), 'L2_NeMo_2_jit_callback') || needs.cicd-test-container-setup.outputs.all == 'true' + with: + RUNNER: self-hosted-azure + SCRIPT: | + + python tests/collections/llm/test_nemo_jit_cb.py + L2_NeMo_2_T5_Pretraining: needs: [cicd-test-container-setup] uses: ./.github/workflows/_test_template.yml @@ -4786,6 +4796,7 @@ jobs: - L2_Megatron_GPT_Reranker - L2_NeMo_2_NeMo_Mcore_Mixtral_bitexact - L2_NeMo_2_PTQ_Llama2_FP8 + - L2_NeMo_2_jit_callback - L2_NeMo_2_LLAVA_NEXT_MOCK_TRAINING if: always() runs-on: ubuntu-latest diff --git a/nemo/collections/llm/recipes/__init__.py b/nemo/collections/llm/recipes/__init__.py index f0eff2ac0347..7dce9a8a0019 100644 --- a/nemo/collections/llm/recipes/__init__.py +++ b/nemo/collections/llm/recipes/__init__.py @@ -76,7 +76,7 @@ t5_220m, ) from nemo.collections.llm.recipes.log.default import default_log, default_resume -from nemo.collections.llm.recipes.optim import adam +from nemo.collections.llm.recipes.optim import adam, sgd from nemo.collections.llm.recipes.run.executor import torchrun __all__ = [ @@ -141,6 +141,7 @@ "gemma2_9b", "gemma2_27b", "adam", + "sgd", "default_log", "default_resume", "torchrun", diff --git a/nemo/lightning/pytorch/callbacks/__init__.py b/nemo/lightning/pytorch/callbacks/__init__.py index 031f027e63b2..b3a3074f4992 100755 --- a/nemo/lightning/pytorch/callbacks/__init__.py +++ b/nemo/lightning/pytorch/callbacks/__init__.py @@ -15,6 +15,7 @@ from nemo.lightning.pytorch.callbacks.ddp_parity_checker import DdpParityChecker from nemo.lightning.pytorch.callbacks.debugging import ParameterDebugger from nemo.lightning.pytorch.callbacks.garbage_collection import GarbageCollectionCallback +from nemo.lightning.pytorch.callbacks.jit_transform import JitConfig, JitTransform from nemo.lightning.pytorch.callbacks.memory_profiler import MemoryProfileCallback from nemo.lightning.pytorch.callbacks.model_callback import ModelCallback from nemo.lightning.pytorch.callbacks.model_checkpoint import ModelCheckpoint @@ -38,4 +39,6 @@ "GarbageCollectionCallback", "ParameterDebugger", "ModelCallback", + "JitTransform", + "JitConfig", ] diff --git a/nemo/lightning/pytorch/callbacks/jit_transform.py b/nemo/lightning/pytorch/callbacks/jit_transform.py new file mode 100644 index 000000000000..cbfca8a25d88 --- /dev/null +++ b/nemo/lightning/pytorch/callbacks/jit_transform.py @@ -0,0 +1,134 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +from dataclasses import dataclass, field + +import torch +from lightning.pytorch.callbacks.callback import Callback + +from nemo.lightning.io.mixin import IOMixin + + +def extract_module_attr_name(pl_module: "pl.LightningModule") -> str: + if hasattr(pl_module, 'module'): + return 'module' + elif hasattr(pl_module, 'model'): + return 'model' + else: + raise ValueError("Expected lightning_module to have a .model or .module attr.") + + +def listify(x): + if not isinstance(x, list): + return [x] + return x + + +def get_modules_from_selector(model, module_selector): + if module_selector is None or module_selector == '' or module_selector == '*': + yield model + return + + assert isinstance(module_selector, str), module_selector + atoms: List[str] = module_selector.split('.') + tmp = model + + for i, item in enumerate(atoms): + if '*' in item: + # handle wildcard selector + # TODO(@akoumparouli): support more complex selectors e.g. net_b.*.net_c.*.conv + for name, module in tmp.named_children(): + if re.match(item, name): + yield module + return + + if not hasattr(tmp, item): + raise AttributeError(tmp._get_name() + " has no " "attribute `" + item + "`") + tmp = getattr(tmp, item) + + if not isinstance(tmp, torch.nn.Module): + raise AttributeError("`" + item + "` is not " "an nn.Module") + + yield tmp + + +def compile_module(config, module): + if config.use_torch: + module.compile(**config.torch_kwargs) + return True + elif config.use_thunder: + import thunder + import thunder.dynamo + from thunder.dev_utils.nvtx_profile_transform import NvtxProfileTransform + + # With this setting, Dynamo Graphs inline all the modules (so Dynamo FXGraph just + # consists of `call_function` nodes only and no `call_module` node. + # This is the default setting in PyTorch 2.5 onwards + # (see https://github.com/pytorch/pytorch/pull/131275) + torch._dynamo.config.inline_inbuilt_nn_modules = True + + xforms: list = [NvtxProfileTransform()] if config.profile_thunder else [] + module.compile(backend=thunder.dynamo.ThunderCompiler(transforms=xforms)) + return True + else: + return False + + +@dataclass +class JitConfig: + module_selector: str = '' + use_torch: bool = False + torch_kwargs: dict = field(default_factory=dict) + use_thunder: bool = False + profile_thunder: bool = False + + +class JitTransform(Callback, IOMixin): + """ + Apply JIT-compling on PyTorch model + + Args: + config (JitConfig): The jit-compiler config to use. + + Example: + >>> from nemo.lightning.pytorch.callbacks import JitTransform + >>> trainer = Trainer(callbacks=[JitTransform(JitConfig(use_torch=True))]) + """ + + def __init__(self, config: JitConfig): + assert config is not None + self.config = config + assert not (self.config.use_torch and self.config.use_thunder) + + def on_train_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: + if self.config is None: + return + if not self.config.use_thunder and not self.config.use_torch: + return + + attr_name = extract_module_attr_name(pl_module) + model = getattr(pl_module, attr_name) + + if getattr(pl_module, '_compiled', False) == True: + return + + # TODO(@akoumparouli): you want to concatenate (via regex OR-operator) all expressions + # and trigger the compile if anyone matches, instead of iterating over all O(N^2). + compiled = False + for config in listify(self.config): + for module in get_modules_from_selector(model, config.module_selector): + compiled |= compile_module(config, module) + + setattr(pl_module, '_compiled', compiled) diff --git a/tests/collections/llm/test_nemo_jit_cb.py b/tests/collections/llm/test_nemo_jit_cb.py new file mode 100644 index 000000000000..9d7bc2f4a4e7 --- /dev/null +++ b/tests/collections/llm/test_nemo_jit_cb.py @@ -0,0 +1,185 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import itertools + +import fiddle as fdl +import lightning as pl +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import DataLoader + +from nemo import lightning as nl +from nemo.collections import llm +from nemo.collections.llm import fn +from nemo.lightning import io +from nemo.lightning.io.mixin import track_io +from nemo.lightning.pytorch.callbacks import JitConfig, JitTransform + +DATA_PATH = '/home/TestData/lite/hf_cache/squad/' + + +def make_squad_hf_dataset(data_path, tokenizer): + tokenizer = getattr(tokenizer, 'tokenizer', tokenizer) + + def formatting_prompts_func(examples): + alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. + + ### Instruction: + {} + + ### Input: + {} + + ### Response: + {}""" + instruction = examples["context"] + input = examples["question"] + output = examples["answers"]['text'] + if isinstance(output, list): + output = output[0] + text = alpaca_prompt.format(instruction, input, output) + "" + tokens = tokenizer.text_to_ids(text) + return {'input_ids': tokens, 'labels': tokens} + + datamodule = llm.HFDatasetDataModule(data_path, split="train[:100]", pad_token_id=tokenizer.eos_id) + + datamodule.map( + formatting_prompts_func, + batched=False, + batch_size=2, + remove_columns=["id", "title", "context", "question", 'answers'], + ) + + return datamodule + + +@track_io +class OrdTokenizer: + def __init__(self, vocab_size=30_000, num_reserved_tokens=128, special_token_names=['bos_id', 'eos_id', 'pad_id']): + self.vocab_size = vocab_size + self.num_reserved_tokens = num_reserved_tokens + self.special_token_names = special_token_names + assert len(self.special_token_names) < num_reserved_tokens + + def __getattr__(self, name): + if name in self.__dict__.get('special_token_names', {}): + return self.__dict__['special_token_names'].index(name) + elif name in self.__dict__: + return self.__dict__[name] + else: + raise AttributeError + + def text_to_ids(self, text): + token_ids = list(map(lambda x: self.num_reserved_tokens + ord(x), list(text))) + assert max(token_ids) < self.vocab_size + return token_ids + + +def align_labels(logits, labels): + logits = logits.float() + n_cls = logits.shape[-1] + if logits.shape[-2] == labels.shape[-1]: + logits = logits[..., :-1, :].contiguous() + labels = labels[..., 1:].contiguous() + elif logits.shape[-2] == labels.shape[-1] + 1: + logits = logits[..., :-1, :].contiguous() + else: + raise ValueError("Mismatched labels and logits shapes (" + str(labels.shape) + " " + str(logits.shape)) + return logits.view(-1, n_cls), labels.view(-1) + + +class DummyJitModel(pl.LightningModule, io.IOMixin, fn.FNMixin): + def __init__( + self, + tokenizer=None, + has_jit=False, + ): + super().__init__() + self.has_jit = has_jit + self.tokenizer = tokenizer + + def configure_model(self) -> None: + if not hasattr(self, "module"): + self.module = nn.Sequential( + nn.Embedding(30_000, 512), + nn.TransformerEncoderLayer(512, 8, 4096, dropout=0.1), + nn.Linear(512, 30_000), + ) + + def forward(self, batch): + output = self.module(**batch) + if self.has_jit: + assert self.module._compiled_call_impl is not None + assert callable(self.module._compiled_call_impl) + else: + assert self.module._compiled_call_impl is None + expected_cls = torch.nn.modules.container.Sequential + assert isinstance(self.module, expected_cls), type(self.module) + return output + + def training_step(self, batch): + if self.has_jit: + assert hasattr(self, '_compiled') + assert self._compiled == True, self._compiled + else: + assert not hasattr(self, '_compiled') + labels = batch.pop('labels') + loss_mask = batch.get('loss_mask', None) + output = self.forward({'input': batch['input_ids']}) + logits, labels = align_labels(output, labels) + return F.cross_entropy(logits, labels) + + +if __name__ == '__main__': + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument('--devices', default=1) + parser.add_argument('--max-steps', type=int, default=1) + args = parser.parse_args() + + tokenizer = OrdTokenizer() + data = make_squad_hf_dataset(DATA_PATH, tokenizer) + + for use_torch, use_thunder in itertools.product([True, False], [False, False]): + if use_torch and use_thunder: + continue + model = DummyJitModel(tokenizer=tokenizer, has_jit=use_torch | use_thunder) + optim = fdl.build(llm.sgd.pytorch_sgd_with_flat_lr(lr=1e-5)) + + jit_config = JitConfig(use_torch=use_torch, use_thunder=use_thunder) + transform = JitTransform(jit_config) + + llm.api.finetune( + model=model, + data=data, + trainer=nl.Trainer( + devices=args.devices, + max_steps=args.max_steps, + accelerator='gpu', + strategy='auto', + log_every_n_steps=1, + limit_val_batches=0.0, + num_sanity_val_steps=0, + accumulate_grad_batches=1, + gradient_clip_val=1.0, + use_distributed_sampler=False, + callbacks=[transform], + ), + optim=optim, + log=None, + ) From 36ee6f3398eda2423abad89368ff1b3dddc9e03f Mon Sep 17 00:00:00 2001 From: Dmytro Pykhtar <37850217+dimapihtar@users.noreply.github.com> Date: Fri, 13 Dec 2024 21:31:39 +0200 Subject: [PATCH 037/128] NeMo 2.0 documentation upgrade (#11235) * update attention Signed-off-by: dimapihtar * update docs to NeMo 2.0 Signed-off-by: dimapihtar * update usage Signed-off-by: dimapihtar * update parallelism Signed-off-by: dimapihtar * update parallelism docs Signed-off-by: dimapihtar * update parallelism docs Signed-off-by: dimapihtar * fix style Signed-off-by: dimapihtar * update to NeMo 2.0 Signed-off-by: dimapihtar * NeMo 2.0 update Signed-off-by: dimapihtar * NeMo 2.0 update Signed-off-by: dimapihtar * remove deprecated file Signed-off-by: dimapihtar * update in respect to NeMo 2.0 Signed-off-by: dimapihtar * fix hyperlinks Signed-off-by: dimapihtar * remove deprecated Signed-off-by: dimapihtar * remove deprecated Signed-off-by: dimapihtar * update documentation to NeMo 2.0 Signed-off-by: dimapihtar * fix typo Signed-off-by: dimapihtar * fix punctuation Signed-off-by: dimapihtar --------- Signed-off-by: dimapihtar --- docs/source/features/mixed_precision.rst | 90 ++++++- .../optimizations/attention_optimizations.rst | 35 ++- docs/source/features/parallelisms.rst | 248 +++++++++++++----- docs/source/nlp/nemo_megatron/batching.rst | 40 ++- .../nlp/nemo_megatron/gpt/gpt_training.rst | 150 ++--------- .../nemo_megatron/hiddens/hiddens_module.rst | 199 -------------- .../hiddens/images/hiddens-wb-logging.png | Bin 139984 -> 0 bytes .../nlp/nemo_megatron/retro/retro_model.rst | 167 ------------ 8 files changed, 328 insertions(+), 601 deletions(-) delete mode 100644 docs/source/nlp/nemo_megatron/hiddens/hiddens_module.rst delete mode 100644 docs/source/nlp/nemo_megatron/hiddens/images/hiddens-wb-logging.png delete mode 100644 docs/source/nlp/nemo_megatron/retro/retro_model.rst diff --git a/docs/source/features/mixed_precision.rst b/docs/source/features/mixed_precision.rst index b1ec196c567e..020a97ef465f 100644 --- a/docs/source/features/mixed_precision.rst +++ b/docs/source/features/mixed_precision.rst @@ -6,20 +6,61 @@ Mixed Precision Training Mixed precision training significantly enhances computational efficiency by conducting operations in low-precision format, while selectively maintaining minimal data in single-precision to preserve critical information throughout key areas of the network. NeMo Framework now supports FP16, BF16, and FP8 via Transformer Engine (TE) across most models. -Half-precision Training +Half-Precision Training ======================= NeMo Framework supports half-precision FP16 and BF16 computation training via Megatron Core and the distributed optimizer. This training recipe uses half-precision in all layer computation keeping the model states (optimizer states and master parameters) in single-precision. To avoid repeated data type casting at each layer computation, Megatron Core keeps a separate copy of half-precision parameters that is updated after each optimizer step. -Half-precision training is enabled when setting ``precision`` to either of ``fp16-mixed`` or ``bf16-mixed`` along with ``megatron_amp_O2=true``. -The parameter gradients are computed in the same half-precision, and the precision of gradient reduce-scatter across data-parallel GPUs can be set by ``optim.grad_sync_dtype``. +Half-precision training is enabled when setting trainer's ``plugins`` to either of ``fp16-mixed`` or ``bf16-mixed``. +The parameter gradients are computed in the same half-precision, and the precision of gradient reduce-scatter across data-parallel GPUs is set automatically according to the trainer's precision. + +Implement Half-Precision Training +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: python + + import nemo_run as run + + from nemo import lightning as nl + from nemo.collections.llm.recipes.precision.mixed_precision import bf16_mixed, fp16_mixed + + trainer_args = {TRAINER_ARGS} + + # Set up trainer with bf16 precision + trainer_bf16 = run.Config( + nl.Trainer, + plugins=bf16_mixed(), + **trainer_args, + ) + + # Set up trainer with fp16 precision + trainer_fp16 = run.Config( + nl.Trainer, + plugins=fp16_mixed(), + **trainer_args, + ) + +It's also possible to change precision for a specific recipe: + +.. code-block:: python + + from functools import partial + + from nemo.collections import llm + from nemo.collections.llm.recipes.precision.mixed_precision import bf16_mixed, fp16_mixed + + # Load recipe + recipe = partial(llm.llama3_8b.pretrain_recipe)() + + # Change precision + recipe.trainer.plugins = fp16_mixed() FP8 Training ============ -NVIDIA H100 GPU introduced support for a new datatype, FP8 (8-bit floating point), enabling higher throughput of matrix multiplies and convolutions. NeMo Framework uses the NVIDIA `TransformerEngine `_ (TE) to leverage speedups from FP8. The following table summarizes the FP8 related arguments that can be configured in NeMo (`example config setting `_). For a more detailed overview, refer to the TE `documentation `_, specifically the FP8 `format `_ and `recipe `_. +NVIDIA H100 GPU introduced support for a new datatype, FP8 (8-bit floating point), enabling higher throughput of matrix multiplies and convolutions. NeMo Framework uses the NVIDIA `TransformerEngine `_ (TE) to leverage speedups from FP8. The following table summarizes the FP8-related arguments that can be configured in NeMo (`example config setting `_). For a more detailed overview, refer to the TE `documentation `_, specifically the FP8 `format `_ and `recipe `_. .. list-table:: FP8 arguments :widths: 10 20 @@ -27,25 +68,46 @@ NVIDIA H100 GPU introduced support for a new datatype, FP8 (8-bit floating point * - Argument - Description - * - transformer_engine - - TE and related functionality can be enabled by setting this boolean argument to True. If this argument is not set to True, all subsequent arguments will be ignored. * - fp8 - - Enables FP8 training. For transformer networks, the QKV, projection, FC1, and FC2 matrix multiplications are executed using the fourth-generation NVIDIA H100 Tensor Cores with FP8 support. - * - fp8_e4m3 - - Training recipe format for FP8. Activations, weights, and gradient tensors use the E4M3 format. - * - fp8_hybrid - - Training recipe format for FP8. Activations and weight tensors use the E4M3 format, whereas gradient use the E5M2 format to satisfy the additional dynamic range requirement for backward tensors. This is the default setting. + - The training recipe format for FP8 can be set to either 'hybrid' or 'e4m3', with 'hybrid' being the default. In the 'hybrid' format, activations and weight tensors use the E4M3 format, while gradients use the E5M2 format to meet the additional dynamic range requirements for backward tensors. * - fp8_margin - The scaling factor for FP8 tensors can be shifted by a factor of $2 ^ {margin}$ using this argument. * - fp8_amax_history_len - - Window size for amax history. The window size determines how many instances of the most recent absolute max values (amaxes) are stored per tensor. + - The window size for amax history. The window size determines how many instances of the most recent absolute max values (amaxes) are stored per tensor. * - fp8_amax_compute_algo - The choice between “max” and “most_recent” specifies how to select an amax value from the given history. - * - reduce_amax - - Indicates whether or not to perform an allreduce on the amax (absolute max) values for the FP8 tensors. Since the amax is directly used to compute the scaling factor for FP8 tensors, setting this argument ensures that the scaling factors for a tensor remain synchronized across devices in multi-GPU training configurations. * - fp8_params - Indicates whether to store module-level parameters in FP8. Enabling this option can reduce memory consumption by eliminating the need to store a copy of weights in higher precision for cases where these weights are externally maintained, such as master parameters in the optimizer. For more information, refer to the `fp8_model_init `_ API in TE. +Implement FP8 Training +^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: python + + import nemo_run as run + + from nemo import lightning as nl + from nemo.collections.llm.recipes.precision.mixed_precision import bf16_with_fp8_mixed, fp16_with_fp8_mixed + + trainer_args = {TRAINER_ARGS} + fp8_args = {FP8_ARGS} + + # Set up trainer with bf16 & fp8 precision + trainer_bf16_fp8 = run.Config( + nl.Trainer, + plugins=bf16_with_fp8_mixed(), + **trainer_args, + **fp8_args, + ) + + # Set up trainer with fp16 & fp8 precision + trainer_fp16_fp8 = run.Config( + nl.Trainer, + plugins=fp16_with_fp8_mixed(), + **trainer_args, + **fp8_args, + ) + Resources ^^^^^^^^^ diff --git a/docs/source/features/optimizations/attention_optimizations.rst b/docs/source/features/optimizations/attention_optimizations.rst index d5ffe3c6fae8..4666343e5971 100644 --- a/docs/source/features/optimizations/attention_optimizations.rst +++ b/docs/source/features/optimizations/attention_optimizations.rst @@ -11,18 +11,18 @@ Flash attention is an algorithm designed to improve the efficiency of the attent Compared to the standard, non-flash algorithm, flash attention applies two techniques to lower the memory requirement and improve compute efficiency. -The tiling technique decomposes the inputs based on the shared memory size and calculates the softmax one tile at a time. Instead of working on the entire query, key, value tensors at once, it makes several passes at these tensors and then combines the results in a subsequent step. +The tiling technique decomposes the inputs based on the shared memory size and calculates the softmax one tile at a time. Instead of working on the entire query, key, and value tensors at once, it makes several passes at these tensors and then combines the results in a subsequent step. The recomputation technique stores the softmax normalization factors (linear to sequence length), instead of the softmax results (qudratic to sequence length), and uses these normalization factors to recompute the attention scores. This saves the amount of data to write to global memory and reduces both the memory requirement and I/O traffic between global memory and shared memory. -Flash attention lowers the memory footprint and computational complexity from quadratic to linear, and greatly extending the range of sequence length allowed in large language models. +Flash attention lowers the memory footprint and computational complexity from quadratic to linear, greatly extending the range of sequence length allowed in large language models. The flash attention algorithm was first propsed `here `_. Two of its implementations are `flash-attention `_ by Tri Dao *et al*, and `fused flash attention `_ by NVIDIA cuDNN. Turn Flash Attention On and Off ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -In the NeMo framework, flash attention is supported through `Transformer Engine `_, including both of the implementations mentioned above. Transformer Engine selects the appropriate implementation based on input information such as sequence length, number of heads and head dimension. When both implementations are applicable, Transformer Engine prefers cuDNN flash attention on Hopper+ architectures and Tri Dao flash attention on Ampere architectures. +In the NeMo Framework, flash attention is supported through `Transformer Engine `_, including both of the implementations mentioned above. Transformer Engine selects the appropriate implementation based on input information such as sequence length, number of heads and head dimension. When both implementations are applicable, Transformer Engine prefers cuDNN flash attention on Hopper+ architectures and Tri Dao flash attention on Ampere architectures. To disable Tri Dao flash attention, set the environment variable ``NVTE_FLASH_ATTN=0``. To disable cuDNN flash attention, set ``NVTE_FUSED_ATTN=0``. @@ -54,24 +54,37 @@ Enable MQA and GQA To use MQA or GQA in the NeMo Framework, adjust the ``num_query_groups`` parameter in the model configuration: 1. **For Multi-query Attention (MQA)**: - - Set ``num_query_groups`` to `1` to treat all attention heads as a single group. + Set ``num_query_groups`` to `1` to treat all attention heads as a single group. - .. code-block:: yaml + .. code-block:: python - num_query_groups: 1 # Enables Multi-query Attention + from nemo.collections import llm + from functools import partial + + # Load train recipe + recipe = partial(llm.llama3_8b.pretrain_recipe)() + + recipe.model.config.num_query_groups = 1 # Enables Multi-query Attention 2. **For Grouped-query Attention (GQA)**: + - Set ``num_query_groups`` to a number that is a divisor of the total number of attention heads (more than one but less than the total heads). - .. code-block:: yaml + .. code-block:: python - num_query_groups: # Enables Grouped-query Attention + recipe.model.config.num_query_groups = # Enables Grouped-query Attention - For regular attention, set this parameter to `None` or match it with the number of heads. - .. code-block:: yaml + .. code-block:: python + + recipe.model.config.num_query_groups = None # Default setting for regular multihead attention + +It's also possible to set ``num_query_groups`` directly from CLI: - num_query_groups: null # Default setting for regular multihead attention + .. code-block:: bash + + nemo llm pretrain --factory llama3_8b model.config.num_query_groups=8 Adjust the ``num_query_groups`` to explore different attention mechanisms and optimize your model's performance based on specific needs. @@ -80,4 +93,4 @@ Implement MQA or GQA NeMo's support for GQA and MQA is enabled through the integration of Megatron Core's Attention mechanism. The underlying implementation details can be explored within the Attention class of Megatron Core, which provides the functional backbone for these advanced attention methods. To understand the specific modifications and implementations of MQA and GQA, refer to the source code in the Attention class: -Check implementation details from Attention Class in Megatron Core Repo: https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/transformer/attention.py#L49. +To check implementation details from the Attention Class in Megatron Core Repo, please refer to https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/transformer/attention.py#L49. diff --git a/docs/source/features/parallelisms.rst b/docs/source/features/parallelisms.rst index c14f94eac6a0..12ed55d338f7 100644 --- a/docs/source/features/parallelisms.rst +++ b/docs/source/features/parallelisms.rst @@ -41,54 +41,83 @@ Enable Data Parallelism In NeMo Framework, DDP is the default parallel deployment method. This means that the total number of GPUs corresponds to the size of the DP group, and training an LLM with model parallelism decreases the size of the DP group. -Currently, NeMo Framework supports optimizer distribution only for Adam optimizer. -To enable the distributed adam optimizer, set -``model.optim.name=distributed_fused_adam`` in the model -configuration. It can be configured with the following options: - -=========================== ========= ================================================================================================================================== -Option Default Description -=========================== ========= ================================================================================================================================== -``dtype`` fp32 Optimizer state datatype -``grad_sync_dtype`` ``dtype`` Gradient reduce-scatter datatype -``overlap_grad_sync`` True Overlap gradient reduce-scatter with compute -``overlap_param_sync`` False Overlap parameter all-gather with compute -``bucket_cap_mb`` 100 Buffer size (in MiB) for internal state and workspaces. Larger buckets have lower runtime overheads but may increase memory usage. -``contiguous_param_buffer`` False Allocate parameters as views into a large buffer. Helps avoid some data copies. -``contiguous_grad_buffer`` True Allocate parameter gradients as views into a large buffer. Helps avoid some data copies. -=========================== ========= ================================================================================================================================== - -See the keyword arguments in `Apex DistributedFusedAdam `_ and `NeMo MegatronDistributedFusedAdam `_ for a full list of distributed optimizer options. - -Implement Data Parallelism -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -DDP in NeMo Framework uses either PyTorch -`DistributedDataParallel `_ -(default) or a custom implementation (if custom multi-precision -training is enabled with ``megatron_amp_O2``). - -The distributed optimizer in NeMo Framework is built on top of -`DistributedFusedAdam `_ -from Apex. - -Fully-Shared Data Parallelism -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -NeMo Framework supports Fully-Sharded Data Parallelism (FSDP), which shards parameter gradients and low-precision parameters for computation. This is in addition to the model states that the distributed optimizer shards, including optimizer states and high-precision parameters. -Since FSDP shards the entire model states, it ensures linear model state memory savings with increasing DP size. -FSDP is preferred for LLM training with unbalanced workloads between pipeline stages (or Transformer layers) or with a large vocabulary size, where pipelining would cause significant computation bubbles due to workload imbalance. -Additionally, FSDP eliminates the need to search for performance-optimal mappings with 3D parallelism (TP/PP/DP) because it operates within a single parallelization domain. - - -NeMo Framework uses `PyTorch's FSDP interface `_ to shard LLM model states, flattening the parameters of each transformer layer and partitioning them across data-parallel GPUs. -FSDP introduces collective operations across data-parallel GPUs, including all-gather for parameter computation and reduce-scatter for parameter gradients. -The all-gather operation occurs during both the network forward and back-propagation phases, while the gradient reduce-scatter operation happens only during back-propagation. -These FSDP communications are overlapped with transformer layer computations. - -Setting ``fsdp=true`` enables FSDP. -The mixed precision recipe can be set by ``precision`` knob, which determines both the computation and communication precisions. -Also, one can use ``grad_reduce_dtype`` to override the gradient reduction precision specifically. +Currently, the NeMo Framework supports optimizer distribution only for the Megatron Core Adam distributed optimizer. +To enable the distributed adam optimizer, set up ``distributed_fused_adam_with_cosine_annealing`` optimizer recipe from ``nemo.collections.llm.recipes.optim.adam`` or you can create your own optimizer recipe. + +.. code-block:: python + + # Use optimizer recipe created by NeMo team + from nemo.collections.llm.recipes.optim.adam import distributed_fused_adam_with_cosine_annealing + + optim = distributed_fused_adam_with_cosine_annealing(max_lr=3e-4) + optim.config.bf16 = True + + # Create your own optimizer recipe with cosine annealing scheduler + import nemo_run as run + from megatron.core.optimizer import OptimizerConfig + + from nemo.lightning.pytorch.optim import CosineAnnealingScheduler, MegatronOptimizerModule, PytorchOptimizerModule + + @run.cli.factory + def distributed_optimizer_recipe( + precision: str = "bf16-mixed", # or "16-mixed" + warmup_steps: int = 1000, + constant_steps: int = 1000, + adam_beta1: float = 0.9, + adam_beta2: float = 0.95, + max_lr: float = 1e-4, + min_lr: float = 1e-5, + clip_grad: float = 1.0, + ) -> run.Config[PytorchOptimizerModule]: + + opt_cfg = run.Config( + OptimizerConfig, + optimizer="adam", + lr=max_lr, + weight_decay=0.1, + bf16=precision == "bf16-mixed", + fp16=precision == "16-mixed", + adam_beta1=adam_beta1, + adam_beta2=adam_beta2, + adam_eps=1e-5, + use_distributed_optimizer=True, + clip_grad=clip_grad, + ) + + sched = run.Config( + CosineAnnealingScheduler, + warmup_steps=warmup_steps, + constant_steps=constant_steps, + min_lr=min_lr, + ) + + return run.Config( + MegatronOptimizerModule, + config=opt_cfg, + lr_scheduler=sched, + ) + +For more optimzier options, please visit `this page `_. + +.. + FSDP is not supported in NeMo 2.0 yet. + Fully-Shared Data Parallelism + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + NeMo Framework supports Fully-Sharded Data Parallelism (FSDP), which shards parameter gradients and low-precision parameters for computation. This is in addition to the model states that the distributed optimizer shards, including optimizer states and high-precision parameters. + Since FSDP shards the entire model states, it ensures linear model state memory savings with increasing DP size. + FSDP is preferred for LLM training with unbalanced workloads between pipeline stages (or Transformer layers) or with a large vocabulary size, where pipelining would cause significant computation bubbles due to workload imbalance. + Additionally, FSDP eliminates the need to search for performance-optimal mappings with 3D parallelism (TP/PP/DP) because it operates within a single parallelization domain. + + + NeMo Framework uses `PyTorch's FSDP interface `_ to shard LLM model states, flattening the parameters of each transformer layer and partitioning them across data-parallel GPUs. + FSDP introduces collective operations across data-parallel GPUs, including all-gather for parameter computation and reduce-scatter for parameter gradients. + The all-gather operation occurs during both the network forward and back-propagation phases, while the gradient reduce-scatter operation happens only during back-propagation. + These FSDP communications are overlapped with transformer layer computations. + + Setting ``fsdp=true`` enables FSDP. + The mixed precision recipe can be set by ``precision`` knob, which determines both the computation and communication precisions. + Also, one can use ``grad_reduce_dtype`` to override the gradient reduction precision specifically. Model Parallelism @@ -116,11 +145,22 @@ To enable TP in the NeMo Framework, configure the ``tensor_model_parallel_size`` Set ``tensor_model_parallel_size`` to greater than ``1`` to enable intra-layer model parallelism. - .. code-block:: yaml + .. code-block:: python + + from nemo.collections import llm + from functools import partial + + # Load train recipe + recipe = partial(llm.llama3_8b.pretrain_recipe)() - tensor_model_parallel_size: 1 # Example to enable Tensor Parallelism + # Set tensor model parallel size + recipe.trainer.strategy.tensor_model_parallel_size = 2 -The configuration file can be adjusted here: `NeMo Megatron GPT Config `__. +Set tensor parallelism directly from CLI: + + .. code-block:: bash + + nemo llm pretrain --factory llama3_8b trainer.strategy.tensor_model_parallel_size=2 Implement Tensor Parallelism ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -129,15 +169,18 @@ NeMo Framework integrates TP through the implementation from Megatron Core. To u For detailed API usage and additional configurations, consult the `Megatron Core Developer Guide `_. -FSDP with Tensor Parallelism -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. + FSDP is not supported in NeMo 2.0 yet. + + FSDP with Tensor Parallelism + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -NeMo Framework supports FSDP along with TP. This is done by restricting the model state sharding to the data-parallel domain. -Using FSDP with TP can be helpful when the model doesn't have sufficient parallelism to deploy on a large-scale training system with the data-parallel mapping. For example, running a model with the global batch size of 1024 on 2048 GPUs. -Also, TP enables FSDP feasibility by reducing the model state size and the activation size per GPU, thus lower the FSDP communication overhead and the activation memory overhead. + NeMo Framework supports FSDP along with TP. This is done by restricting the model state sharding to the data-parallel domain. + Using FSDP with TP can be helpful when the model doesn't have sufficient parallelism to deploy on a large-scale training system with the data-parallel mapping. For example, running a model with the global batch size of 1024 on 2048 GPUs. + Also, TP enables FSDP feasibility by reducing the model state size and the activation size per GPU, thus lower the FSDP communication overhead and the activation memory overhead. -Using both FSDP and TP works by enabling FSDP (``fsdp=true``) and setting ``tensor_model_parllel_size > 1``. -Unset the ``CUDA_DEVICE_MAX_CONNECTIONS`` environment variable to set the number of GPU kernel queues, allowing the overlap of FSDP communication with computation kernels. + Using both FSDP and TP works by enabling FSDP (``fsdp=true``) and setting ``tensor_model_parllel_size > 1``. + Unset the ``CUDA_DEVICE_MAX_CONNECTIONS`` environment variable to set the number of GPU kernel queues, allowing the overlap of FSDP communication with computation kernels. Pipeline Parallelism ^^^^^^^^^^^^^^^^^^^^ @@ -157,20 +200,45 @@ To utilize Pipeline Parallelism (PP) in NeMo Framework, set the ``pipeline_model Set ``pipeline_model_parallel_size`` to a value greater than ``1`` to enable inter-layer model parallelism. - .. code-block:: yaml +.. code-block:: python - pipeline_model_parallel_size: 1 # Example to enable Pipeline Parallelism + from nemo.collections import llm + from functools import partial + + # Load train recipe + recipe = partial(llm.llama3_8b.pretrain_recipe)() -Adjust the configuration accordingly here: `NeMo Megatron GPT Config `__. + # Set pipeline model parallel size + recipe.trainer.strategy.pipeline_model_parallel_size = 2 + +Set pipeline parallelism directly from CLI: + + .. code-block:: bash + + nemo llm pretrain --factory llama3_8b trainer.strategy.pipeline_model_parallel_size=2 Interleaved Pipeline Parallel Schedule ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To minimize the pipeline bubble, the computation on each GPU can be divided into multiple subsets of layers (referred to as model chunks), rather than a single contiguous block. For instance, instead of each GPU processing a continuous set of four layers, it might handle two model chunks with two layers each. + + .. code-block:: python + + from nemo.collections import llm + from functools import partial + + # Load train recipe + recipe = partial(llm.llama3_8b.pretrain_recipe)() - .. code-block:: yaml + # Set pipeline model parallel size > 1 and enable interleaved pipeline + recipe.trainer.strategy.pipeline_model_parallel_size = 2 + recipe.trainer.strategy.virtual_pipeline_model_parallel_size = 2 - virtual_pipeline_model_parallel_size: 2 # Set for interleaved pipeline +Enable interleaved pipeline directly from CLI: + + .. code-block:: bash + + nemo llm pretrain --factory llama3_8b trainer.strategy.pipeline_model_parallel_size=2 trainer.strategy.virtual_pipeline_model_parallel_size=2 For more insights into this approach, see our detailed blog: `Scaling Language Model Training `_. @@ -194,11 +262,24 @@ Unlike other model-parallel techniques, EP is applied to only the expert layers Enable Expert Parallelism ~~~~~~~~~~~~~~~~~~~~~~~~~ -To enable EP, set ``model.expert_model_parallel_size`` to the expert parallel size you want. For example, if the model has six experts (``model.num_moe_experts=6``), then setting ``model.expert_model_parallel_size=3`` results in each GPU processing two experts. The number of experts should be divisible by the expert parallel size. +To enable EP, set ``model.expert_model_parallel_size`` to the expert parallel size you want. For example, if the model has eight experts (``num_moe_experts=8``), then setting ``expert_model_parallel_size=4`` results in each GPU processing two experts. The number of experts should be divisible by the expert parallel size. + + .. code-block:: python + + from nemo.collections import llm + from functools import partial + + # Load train recipe + recipe = partial(llm.mixtral_8x7b.pretrain_recipe)() - .. code-block:: yaml + # Set expert model parallel size + recipe.trainer.strategy.expert_model_parallel_size = 4 - expert_model_parallel_size: 3 # Set EP to 3 +Set expert parallelism directly from CLI: + + .. code-block:: bash + + nemo llm pretrain --factory mixtral_8x7b trainer.strategy.expert_model_parallel_size=4 For further information on configuration, refer to the following documentation: `NeMo Megatron GPT Config `__. @@ -230,11 +311,23 @@ Enable Sequence Parallelism To utilize SP in NeMo Framework, set the ``sequence_parallel`` parameter to ``True`` in the model's configuration. Note that this feature is effective only when the tensor parallel size (``tensor_model_parallel_size``) is greater than ``1``. - .. code-block:: yaml + .. code-block:: python + + from nemo.collections import llm + from functools import partial + + # Load train recipe + recipe = partial(llm.llama3_8b.pretrain_recipe)() + + # Set tensor model parallel size and enable sequence parallelism + recipe.trainer.strategy.tensor_model_parallel_size = 2 + recipe.trainer.strategy.sequence_parallelism = True - sequence_parallel: True # Enable Sequence Parallelism +Enable sequence parallelism directly from CLI: -For further information on configuration, refer to the following documentation: `NeMo Megatron GPT Config `__. + .. code-block:: bash + + nemo llm pretrain --factory llama3_8b trainer.strategy.tensor_model_parallel_size=2 trainer.strategy.sequence_parallelism=True Implement Sequence Parallelism ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -254,9 +347,22 @@ To activate CP in the NeMo Framework, set the ``context_parallel_size`` paramete Set ``context_parallel_size`` to a value greater than ``1`` to enable sequence-wide model parallelism. - .. code-block:: yaml + .. code-block:: python + + from nemo.collections import llm + from functools import partial + + # Load train recipe + recipe = partial(llm.llama3_8b.pretrain_recipe)() + + # Set context parallel size + recipe.trainer.strategy.context_parallel_size = 2 + +Set ``context_parallel_size`` directly from CLI: - context_parallel_size: 1 # Example to enable Context Parallelism + .. code-block:: bash + + nemo llm pretrain --factory llama3_8b model.config.context_parallel_size=2 The configuration can be found and modified here: `NeMo Megatron Core Context Config `_. diff --git a/docs/source/nlp/nemo_megatron/batching.rst b/docs/source/nlp/nemo_megatron/batching.rst index b7d6ea213067..c545b6ab6dc6 100644 --- a/docs/source/nlp/nemo_megatron/batching.rst +++ b/docs/source/nlp/nemo_megatron/batching.rst @@ -3,19 +3,41 @@ Batching -------- -Batch size is one of the first parameters you should play with. For efficiency and convergence reasons we recommend you first try maximizing your batch size per GPU so that your GPU RAM usage is maximized. +Batch size is one of the first parameters you should adjust. For efficiency and convergence, we recommend first maximizing your batch size per GPU to fully utilize your GPU RAM. -NeMo Megatron uses the following concepts. +NeMo Framework uses the following parameters: -*Micro batch size* is the number of examples per data parallel rank. It is controlled by ``model.micro_batch_size`` parameter. +=========================== ================================================================================================================================== +Parameter Description +=========================== ================================================================================================================================== +Micro Batch Size The number of examples per data parallel rank. +Global Batch Size The global batch size is calculated as: ``global batch size = micro_batch_size * data_parallel_size * gradient_accumulation_steps``. For details on ``data_parallel_size`` see `this page `_. +Gradient Accumulation This parameter supports training with large batch sizes while maintaining a fixed memory footprint, though it requires additional compute. The ``accumulate_grad_batches`` is automatically managed by PyTorch Lightning. +=========================== ================================================================================================================================== -*Global batch size* = micro_batch_size * data_parallel_size * gradient_accumulation_steps. For details on ``data_parallel_size`` see :ref:`parallelisms` section, but typically it is equal to the number of GPUs being used. -Global batch size is controlled by ``model.global_batch_size`` parameter. +Set the Batching Parameters +^^^^^^^^^^^^^^^^^^^^^^^^^^^ +The following example shows how to set up a pretraining recipe and batching parameters for a LLaMA-3 8B model: -*Gradient Accumulation* + .. code-block:: python - * Idea: Train with large batch sizes with fixed memory footprint at the cost of additional compute. - * Do k forward and backward passes through the network with different batches, do not perform parameter updates until after k passes. - * Update paramters + from nemo.collections import llm + from functools import partial + + # Load train recipe + recipe = partial(llm.llama3_8b.pretrain_recipe)() + + # Set micro and global batch size + recipe.data.micro_batch_size = 4 + recipe.data.global_batch_size = 16 + + # Set accumulate_grad_batches + recipe.trainer.accumulate_grad_batches = 1 + +Set batching parameters directly from CLI: + + .. code-block:: bash + + nemo llm pretrain --factory llama3_8b data.micro_batch_size=4 data.global_batch_size=16 trainer.accumulate_grad_batches=1 diff --git a/docs/source/nlp/nemo_megatron/gpt/gpt_training.rst b/docs/source/nlp/nemo_megatron/gpt/gpt_training.rst index a5914882da76..dbe8707c027c 100644 --- a/docs/source/nlp/nemo_megatron/gpt/gpt_training.rst +++ b/docs/source/nlp/nemo_megatron/gpt/gpt_training.rst @@ -4,20 +4,16 @@ GPT Model Training The Generative Pre-trained Transformer (GPT) is a decoder-only Transformer model. This section demonstrates how to train a GPT-style model with NeMo. - - - - .. note:: This example is best completed using the latest NeMo Framework Training container ``_. -Download and Pre-process Data -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Download and Preprocess the Data +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. note:: The example below will take approximately 3 hours to download data, pre-process it, and train the tokenizer. -1. Download data. +1. Download the data. The following step will download approximately 20GB of Wikipedia data, which can take several hours to complete. @@ -25,7 +21,7 @@ The following step will download approximately 20GB of Wikipedia data, which can wget https://dumps.wikimedia.org/enwiki/latest/enwiki-latest-pages-articles.xml.bz2 -2. Extract raw data. +2. Extract the raw data. .. code-block:: bash @@ -36,13 +32,13 @@ The following step will download approximately 20GB of Wikipedia data, which can Now, train_data.jsonl will contain our training data in JSON line format. We are particularly interested in the data within the "text" field. -3. Train tokenizer. +3. Train the tokenizer. Below, we will consider two options for training data tokenizers: using the pre-built Hugging Face BPE or training and using your own Google Sentencepiece tokenizer. Note that only the second option allows you to experiment with vocabulary size. -*Option 1:* Use Hugging Face GPT2 tokenizer files. +*Option 1:* Use the Hugging Face GPT2 tokenizer files. With this option, we will download a pre-built vocabulary and merge the files for the BPE tokenizer. @@ -52,7 +48,7 @@ With this option, we will download a pre-built vocabulary and merge the files fo wget https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt -*Option 2:* Use `Google Sentencepiece `_ tokenizer library. +*Option 2:* Use the `Google Sentencepiece `_ tokenizer library. Google Sentencepiece is included as a dependency with NeMo, so if you have installed NeMo, it should already be installed. Please note that training the tokenizer model will also take some time. @@ -72,11 +68,11 @@ Please note that training the tokenizer model will also take some time. Completing this step can take some time. After it is done, you'll have two files: ``spm_32k_wiki.model`` and ``spm_32k_wiki.vocab`` corresponding to the model and vocabulary. -4. Convert training data into memory map format. +4. Convert the training data into memory map format. The memory map format makes training more efficient, especially with many nodes and GPUs. This step will also tokenize data using the tokenizer model from Step 3. -*Option 1:* Use Hugging Face GPT2 tokenizer files. +*Option 1:* Use the Hugging Face GPT2 tokenizer files. .. code-block:: bash @@ -92,7 +88,7 @@ The memory map format makes training more efficient, especially with many nodes --append-eod \ --workers=32 -*Option 2:* Use `Google Sentencepiece `_ tokenizer library. +*Option 2:* Use the `Google Sentencepiece `_ tokenizer library. .. code-block:: bash @@ -106,130 +102,24 @@ The memory map format makes training more efficient, especially with many nodes --workers=32 -Train a GPT-Style Model -~~~~~~~~~~~~~~~~~~~~~~~ - -Once you have prepared training data and tokenizer, you are ready to train the model. -The configuration we present below has about 124M parameters and should fit on a single 16GB GPU using float16. -Let's go! +Create a Custom Training Recipe +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -*Option 1:* Use Hugging Face GPT2 tokenizer files. +To train a model with NeMo 2.0, a training recipe is required. You can refer to `this tutorial `_ +To learn how to create a custom training recipe or use an existing one, refer to the `LLM recipes `_ developed by NeMo team. -.. code-block:: bash - python /examples/nlp/language_modeling/megatron_gpt_pretraining.py \ - --config-path=/examples/nlp/language_modeling/conf \ - --config-name=megatron_gpt_config \ - trainer.devices=1 \ - trainer.num_nodes=1 \ - trainer.max_epochs=null \ - trainer.max_steps=300000 \ - trainer.val_check_interval=300 \ - trainer.log_every_n_steps=50 \ - trainer.limit_val_batches=50 \ - trainer.limit_test_batches=50 \ - trainer.accumulate_grad_batches=1 \ - trainer.precision=16 \ - model.micro_batch_size=6 \ - model.global_batch_size=192 \ - model.tensor_model_parallel_size=1 \ - model.pipeline_model_parallel_size=1 \ - model.max_position_embeddings=1024 \ - model.encoder_seq_length=1024 \ - model.hidden_size=768 \ - model.ffn_hidden_size=3072 \ - model.num_layers=12 \ - model.num_attention_heads=12 \ - model.init_method_std=0.021 \ - model.hidden_dropout=0.1 \ - model.layernorm_epsilon=1e-5 \ - model.tokenizer.vocab_file=gpt2-vocab.json \ - model.tokenizer.merge_file=gpt2-merges.txt \ - model.data.data_prefix=[1.0,hfbpe_gpt_training_data_text_document] \ - model.data.num_workers=2 \ - model.data.seq_length=1024 \ - model.data.splits_string=\'980,10,10\' \ - model.optim.name=fused_adam \ - model.optim.lr=6e-4 \ - model.optim.betas=[0.9,0.95] \ - model.optim.weight_decay=0.1 \ - model.optim.sched.name=CosineAnnealing \ - model.optim.sched.warmup_steps=750 \ - model.optim.sched.constant_steps=80000 \ - model.optim.sched.min_lr=6e-5 \ - exp_manager.resume_if_exists=True \ - exp_manager.resume_ignore_no_checkpoint=True \ - exp_manager.create_checkpoint_callback=True \ - exp_manager.checkpoint_callback_params.monitor=val_loss \ - exp_manager.checkpoint_callback_params.save_top_k=3 \ - exp_manager.checkpoint_callback_params.mode=min \ - exp_manager.checkpoint_callback_params.always_save_nemo=False - - -*Option 2:* Use `Google Sentencepiece `_ tokenizer library. - -.. code-block:: bash - - python /examples/nlp/language_modeling/megatron_gpt_pretraining.py \ - --config-path=/examples/nlp/language_modeling/conf \ - --config-name=megatron_gpt_config \ - trainer.devices=1 \ - trainer.num_nodes=1 \ - trainer.max_epochs=null \ - trainer.max_steps=300000 \ - trainer.val_check_interval=300 \ - trainer.log_every_n_steps=50 \ - trainer.limit_val_batches=50 \ - trainer.limit_test_batches=50 \ - trainer.accumulate_grad_batches=1 \ - trainer.precision=16 \ - model.micro_batch_size=6 \ - model.global_batch_size=192 \ - model.tensor_model_parallel_size=1 \ - model.pipeline_model_parallel_size=1 \ - model.max_position_embeddings=1024 \ - model.encoder_seq_length=1024 \ - model.hidden_size=768 \ - model.ffn_hidden_size=3072 \ - model.num_layers=12 \ - model.num_attention_heads=12 \ - model.init_method_std=0.021 \ - model.hidden_dropout=0.1 \ - model.layernorm_epsilon=1e-5 \ - model.tokenizer.library=sentencepiece \ - model.tokenizer.model=spm_32k_wiki.model \ - model.data.data_prefix=[1.0,gpt_training_data_text_document] \ - model.data.num_workers=2 \ - model.data.seq_length=1024 \ - model.data.splits_string=\'980,10,10\' \ - model.optim.name=fused_adam \ - model.optim.lr=6e-4 \ - model.optim.betas=[0.9,0.95] \ - model.optim.weight_decay=0.1 \ - model.optim.sched.name=CosineAnnealing \ - model.optim.sched.warmup_steps=750 \ - model.optim.sched.constant_steps=80000 \ - model.optim.sched.min_lr=6e-5 \ - exp_manager.resume_if_exists=True \ - exp_manager.resume_ignore_no_checkpoint=True \ - exp_manager.create_checkpoint_callback=True \ - exp_manager.checkpoint_callback_params.monitor=val_loss \ - exp_manager.checkpoint_callback_params.save_top_k=3 \ - exp_manager.checkpoint_callback_params.mode=min \ - exp_manager.checkpoint_callback_params.always_save_nemo=False - - -Next, you can launch Tensorboard to monitor training, as follows: - -.. code-block:: bash +Train a Model +~~~~~~~~~~~~~ - tensorboard --logdir nemo_experiments --bind_all +Once you have prepared the training data, tokenizer, and recipe, you are ready to train the model. You can follow `this tutorial `_ +To train a model using an existing recipe or a custom one, follow `this tutorial `_ to train a model with a custom recipe. Next Steps ~~~~~~~~~~ For more information, please refer to: -* :ref:`batching` section for batch size adjustments -* :ref:`parallelisms` section for understanding various types of parallelisms +* `batching `_ section for batch size adjustments. +* `parallelisms `_ section for understanding various types of parallelisms. diff --git a/docs/source/nlp/nemo_megatron/hiddens/hiddens_module.rst b/docs/source/nlp/nemo_megatron/hiddens/hiddens_module.rst deleted file mode 100644 index 0811252393e8..000000000000 --- a/docs/source/nlp/nemo_megatron/hiddens/hiddens_module.rst +++ /dev/null @@ -1,199 +0,0 @@ -Hiddens Module -============== - -The hiddens module allows to add **hidden transformations** and **hidden losses** to Megatron encoder-decoder models. -Hidden transformations are transformations that are applied to the output of the encoder. -Hidden losses are losses that are applied to the outputs of the hidden transformations. -A common use case for hidden transformations is to train a Mutual Information Machine (MIM) -or a Variational Auto-Encoder (VAE) models. - -Quick Start ------------ - -Below is an example command of training a MIM model with BART data augmentation (i.e., includes masking the input): - -.. code-block:: bash - - python examples/nlp/language_modeling/megatron_bart_pretraining.py \ - trainer.devices=2 \ - trainer.accelerator=gpu \ - trainer.log_every_n_steps=1 \ - trainer.val_check_interval=10 \ - trainer.limit_val_batches=2 \ - trainer.accumulate_grad_batches=1 \ - trainer.max_steps=10 \ - trainer.precision=16 \ - trainer.gradient_clip_val=1.0 \ - exp_manager.exp_dir=/results/megatron_mim \ - model.micro_batch_size=2 \ - model.global_batch_size=4 \ - model.seq_length=128 \ - model.encoder.num_layers=4 \ - model.encoder.hidden_size=64 \ - model.encoder.arch=perceiver \ - model.encoder.num_attention_heads=8 \ - model.decoder.num_layers=4 \ - model.decoder.hidden_size=64 \ - model.decoder.num_attention_heads=8 \ - model.data.data_impl=text_mmap \ - model.data.data_prefix=[1.0,/data/wiki.txt] \ - model.data.splits_string=\'\"800,100,100\"\' \ - model.data.whole_word_masking=False \ - model.tokenizer.library=sentencepiece \ - model.tokenizer.model=/data/spm_64k_all_langs_plus_en.model \ - ++model.hiddens.enc_output_name=z \ - ++model.hiddens.transform.q_z_given_x.cls_name=cond_gaussian \ - ++model.hiddens.transform.q_z_given_x.hidden_size=64 \ - ++model.hiddens.loss.mim.cls_name=a_mim \ - ++model.hiddens.loss.mim.loss_weight=1.0 - -The last 5 lines in the above command enable sampling with reparameterization (`cond_gauss` hidden transformation) -and MIM loss where the hidden part of the loss is weighted by `1.0`. - -The above example will produce the following plots in Weights and Biases: - -.. image:: images/hiddens-wb-logging.png - :align: center - :width: 800px - :alt: MIM training W&B plots - -See below detailed description of usage and configuration format. - -High Level Description ----------------------- - -Megatron encoder-decoder models directly pass the output of the encoder to the decoder. -The hidden transformations provides a mechanism to add transformations and losses to the encoder outputs. -This is achieved my naming the output of the encoder (`hiddens`) and any provided hidden transformation. -Each hidden transformation is defined over expected existing outputs and produces a new set of outputs. -This allows us to define losses on any of the named outputs (i.e., the outputs of the encoder or any of the transformations). - - -Detailed Description --------------------- - -Features -^^^^^^^^ - -1. Hidden transformations and losses can be added to any Megatron encoder-decoder model. -2. Externally implemented transformations and losses can easily be registered and used. -3. Transformations (and losses) order is supported to allow one transformation to use the output of another. -4. All transformations' outputs are named, allowing for easy access in losses or other transformations (encoder raw output defaults to `hiddens`, and respective `hiddens_mask`). -5. All loss outputs are logged, allowing for easy monitoring of the training and validation process. -6. Transformations' outputs can be used in more than one loss. -7. The joint loss supports weighting the terms, and is computed as follows: `loss = hiddens.tokens_loss_weight * tokens_loss_weight + \sum_i hiddens.loss[i].loss_weight * hiddens.loss[i].loss`. -8. Detailed error messages are provided. Please check raised exceptions and log outputs. Errors will be raised if: - - * The same named output is used more than once. - * A loss is expected an undefined named output. - * A Mismatch in a transformation or loss constructor parameters. - - -Configuring Hidden Transformations and Losses -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -A detailed example can be found in : `NeMo/examples/nlp/language_modeling/conf/megatron_hiddens_base_config.yaml `__. -Below is the content of the config file above: - -.. code-block:: yaml - - # this file main purpose is documentation, and it should not be used directly - enc_output_name: z # name of key in hidden transforms output to pass to decoder (default: hiddens). e.g., z for VAE/MIM. - tokens_loss_weight: 1.0 # weight of tokens loss (if not specified defaults to 1.0) - # the lists below are useful for adding multiple transforms and losses according to order - # if order is not important, you can use a single dictionary in the list with multiple keys - transform: # a list of dictionaries of transforms (or a joint dictionary) to apply to hiddens (list enforces order) - # - : # name of transform - # cls_name: # class name - # : # transform parameters - # ... - - q_z_given_x: # Gaussian posterior with reparameterization - cls_name: cond_gaussian # class name - hidden_size: 512 # hidden size of the encoder - min_logvar: -6.0 # minimum log variance - - logP_cls: # logP classifier logits - cls_name: guided_cls - input_name: hiddens - attr_name: logP - QED_cls: # QED classifier logits - cls_name: guided_cls - input_name: hiddens - attr_name: QED - loss: # a list of dictionaries of loss terms (or a joint dictionary) to add to reconstruction loss (list enforces order) - # - : # name of loss - # cls_name: # class name - # : # loss parameters - # ... - # below is example where order of losses does not matter so a single dictionary is enough - mim: # A-MIM example - cls_name: a_mim - loss_weight: 1.0 # weight of the MIM latent loss - vae: # VAE example - cls_name: vae - min_kl_value: null # minimum KL value if a float is provided - loss_weight: 1e-2 # weight of KL term in loss - logP_cls: # logP classifier loss (cross entropy) - cls_name: guided_cls_loss - input_name: logP - loss_weight: 0.1 - QED_cls: # QED classifier loss (cross entropy) - cls_name: guided_cls_loss - input_name: logP - loss_weight: 0.1 - - -Listing Registered Hidden Transformations and Losses -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The hidden transformations and losses are should be registered in the `hiddens` module. -To check available (i.e., registered) transformation and losses use the following python code: - -.. code-block:: python - - from nemo.collections.nlp.modules.common.hiddens import get_registered_hiddens - - # List all registered hidden transformations and losses - print(get_registered_hiddens()) - # { - # "loss": ["a_mim", "vae"], - # "transform": ["cond_gaussian"], - # } - - -Implementing and Registering a Custom Hidden Transformation or Loss -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Implementing a custom hidden transformation or loss is straightforward. - -* Example for a hidden transformation: `NeMo/nemo/collections/nlp/modules/common/megatron/hiddens/megatron_hidden_transform.py `__. -* Examples for hidden losses: `NeMo/nemo/collections/nlp/modules/common/megatron/hiddens/megatron_hidden_loss.py `__. - -Generally speaking, the custom hidden transformation or loss should inherit from `MegatronBaseHiddenTransform` or `MegatronBaseHiddenLoss` respectively. -Before using the classes, they should be registered in the `hiddens` module as described above. - -.. code-block:: python - - from nemo.collections.nlp.modules.common.hiddens import ( - register_hidden_loss, - register_hidden_transform, - MegatronBaseHiddenTransform, - MegatronBaseHiddenLoss, - ) - - class MyTransform(MegatronBaseHiddenTransform): - ... - - class MyLoss(MegatronBaseHiddenLoss): - ... - - # Registering a new hidden transformation MyTransform - # e.g., class_path = "nemo.collections.nlp.modules.common.hiddens.MyTransform" - class_path = MyTransform.__module__ + '.' + MyTransform.__qualname__ - # The command below will allow the use of `my_transform` as a config `cls_name` value for a transformation - register_hidden_transform(cls_name="my_transform", class_path=MyTransform) - - # Registering a new hidden loss MyLoss - # e.g., class_path = "nemo.collections.nlp.modules.common.hiddens.MyLoss" - class_path = MyLoss.__module__ + '.' + MyLoss.__qualname__ - # The command below will allow the use of `my_loss` as a config `cls_name` value for a loss - register_hidden_loss(cls_name="my_loss", class_path=MyLoss) diff --git a/docs/source/nlp/nemo_megatron/hiddens/images/hiddens-wb-logging.png b/docs/source/nlp/nemo_megatron/hiddens/images/hiddens-wb-logging.png deleted file mode 100644 index e9016a26d28fbc94a83f1cadf8affc57b8dc7a47..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 139984 zcmeEuby$>J7cY&9grXoIh$1R6fOLl-N_WiAF?4sQg3=-(Al)%ELkvR*NDLiAmo!63 z%Mf=wU!8M4zW?v@+{^O}^YZRqYwfky`mMG3sI2&g_!jjoEG#TynYU7^SXlTVEG*pm z8w8j)>z!#ov9NAcSV~GN%ScMnD?8bnS=xZHu-<-*Pb5_TJb5=@_bNa1T|$7%EBj#` zET+fRSh8hVcVn?K-#xg&Y%EEkWe6zvkWu+W?hd|IZWjnk#U?yRRoz8`;*P5x>qL3u zBmQ@@zH4yzx%0KOUz|xoTqk1gPS3Dn3T-ku@DuRqHDVs$I(Ex{E&Y1LVF`=yhByg7 zwg&HWy{;R#4+%f?qM8vfKdioWp**gN%M0`svy-mAAR*QZvPFJ7!RPlExw?{rcMDc-!Fh7v@*Dq&zG_$X7j_LYB;^BGZm;5}Q3K!NW>xHw(XuMZfXH4fO0e z_D9Km@rhS0D@0_{qPU5pqugxY-s&5EIK6Qv?W0mOTJGz+=8lNZ?WR@aPleqRfUVwp zBk!5ET(8zpr}MGNPtpc5qghq6x}HB6daE~R=#-rP4&M=hvzj!JdX&B*RiQK?>^z(_ zbkGa-K9xDnV?9V;^Jt=!yZb!&zTiNk%Rr1LMJ~RaQ%Crh2v4(civ6(9ZBLJ-=;ZE- zvApCqPUHHuh*%X65oot>4P~kbZ*Pq~^Te?~dEclXTa}-8wbJO5U}`UIToApywe=oT z^!-)u;aeG7-L%lPAf+$u245E0x*6qf#+@FhHZmR`_L6na-ClmhwnZG**!C1^zaWfD zx^VOSQz*fg7U~HsmgSFFX;h6hT<-TcM&6;M&fm4)rpDdiq>E%+_yWiS0=0p-93xfE zT0i<0Y}j9go#N{--wrsE!AoGD@DRZI>ixcYz6n3Yjtq;l7|X)o+nsREH5;yNJdS`H zrdao1;1Xxx-VS6^O`_eOUI6F&kl`)Tb4mrqJyv*2${AQfFSv~3`RY=c z;~sv;tD7-ccO@qWH4CtqgJd%qlmUlaE)-Uwr4k~8A{Kbb0s4~ZhWF%y3d;pcNK@Ke zzF?zUGxu)sW9t)}x7P2G)Zvu|M7Jq82>Iad1cxrJ?cKYTk)s^;0uaX(7G<1;qdZjL z5qG2JPJA@8v_{6J3Z#H-ly;P36MvI|Iy^7K(n778e2S1ZWHm!8n=gxKD0iLOg}i~> zmrx`qP0GI`=B3pG@(6~I_aEDz8X^t%IXb>D%QLKU=8)Fh_AVjiy_ND# zFB;HJ_l?(Zx?E&JW$PYEsM`0Wg|Cj-{JdOU| zmbHcOpPhS@d+<}ob8d-*j5iKX@yP1lxxPz%C;N(Jk-{i~Lm`fVnk@BWX8Y406vj6z z(may*rF<9|8FK+E^!J~3Bu=qvJaJXsEE@eV`X*Mm`%C-Nj+piw+8irYnhm-Q;v{+G zbJ(ZsZmMP4Wzat3KEpntWmt6*w_Gv{T&6+3L8d|LEcdpAv{K09D%EFldD^Shy?`Xj zN<5%1FppKoE!!q(3pj~A$s`c=HCH!4w@5d0=4HLLlhTUg{@H%ozV0vGU)lSK%Ws%o zgps_LSyYCqGBYJInW(dl1@vS+Qz0sNUNAccGO?^9WDy|`|*t z+KyU=+M9>XLf%4^TaWu!vNsRAm1s88;DhRWpqr4-yoI@WE7^=l!K^U|55zWuHZL`M zHseA$v_NwFylvCOkf-T07{mDCK30#;4RIqMx*9L%ojtr`Gq0IICa06Og+oios?HWbtZ z<%Iswjt9=xJk#+8+E?0D_<>D~+o}X?xGdx<^sHcg(6;07RCsDv>73m+{c%y`WyS?o zsw`;^<#3sx;lmkPUV*R9r|TALUlu)jekwgz%Qww8QB+ENz`6)1)aBeP+3s}{{UN;d z%K4+~uOaY+1|ohNw0Jh5&>4x+T;c8KFn;Dy{S>?PS!9v%?!$sq8 z)M;P^oeQNNaTKJn9u{h zZX>5-=QQWpI`4XNVRQXE_QG=lO)oAjgPgymz56IN-u|PV(&t%o+!b^8^FrYbUng-V z*ZtbK4{;&tK2|oHz`#a&;9yeTCnzuEjm48*a*{GPUAX;GPM4B;JV|cZTl?;eCv8It z6`7|(Ki}rbnE_Wrc^0h|%i?udw7s2Ee>QBRzE?F?`hlYL2Y~aRAn|ZUZ&O^8woj3R z-|Hf2Q(Smg?5L-=;Pqxij&l162F|QbVNP>)krPwN#c41B_d}uG-Oc@+11V43>95du z|LDuKE8i10)HWg1zi(~TfW7?13$|0;E~duI;1yCvF`wx>&Dq}hv;A$bau^=$%pJB< z$`s4e!%RI}<)SpAKQ(s_w?BbQxkD!M>U_i;n?0@^KKjcbuxD{$BtC2ktlKQk?AH7R zZVu?ylhC?QFH#t7DZ3Us`-`hXhO5q2hPgM%%!e#t(Z|;Tji^&Vg{UG$zl!;ZR8?x2)N5jXZm!0lll2*GhILqcro8-wk^G3Ptl>OSc-o{Ys_$ zKbmM*$K_qZ3Bm$GskOcFQPhAS=@o{_J;rzYJ(K@ z=-~&d2XxCMqVPng-LZ>a^av?0sZq%OqaIOKzntTOAAH&K)FIm;1JNE4Gd>UZPg`G@ zAO70iX!fT=_`dQ}6)qLfM~`l9)ZZ-oI6aUcM&r%qBYN>0!02@I{RyOY!X&CQRfK8cV7z?$MC;a6=ohxDhRy?sCZ z3(@J+y;3#USEE-rHJSJnNzGAO7#mg-tSw`vpn%1UdA@;#9cqb%hk3%re5f%WjHw$N zf<=h=zKi)tW#Ig)6d#m<`>$u*`rj2L)FfqOFyCq>PGGQ|vxU8j;6q;zOjqNU>e?>a z3i5&`_O@(BruN2QHV<2e-%YTDJp?h2wqO?{dJkJ0J7+-;ktcuD5X3zHe$D=b{*Njy z)*?@|6_n{E?VZ5%ylfn798W}V(bLllJDHjZs!B=!vpME3ktY@|E)Ih1?C$RFZ0=la z_D<&PF9iey*f}`aIXMBC8USZcI~OAlfSohLKRWqWKT=?46DLat7fX9P`rrK;8QZ(M zh&*}nd!T>+{9~SA56l0IWas?PWnmV`{=0VMb zRzE6q+I;zK*4FoA(jz4`H8oO3MrQIkHy)Wd7WOs20&R%y^&Efg{eXpYgFfJzUki9Q zs`taMEBXCySvf%5nglzE_PV|?@8}16cCYF5kM3e5v2h>*c&2XGOzU@d0pcvpxYtiA zK%4~sM)fLL%;cTxmxby1oaEZ|V1^dBM?aV>DQ@=WT8d!fg)I}bme-^#2j545SC45M z>b}fqKSf+K&=c7Vl~<1SO(4$=P^OL4QFuRnfVk<6W&S}c38YqH=_ksl62Nv(d{_h| zi}Jr$BF;v1Uf~!n%;8<3!`zD0{SJ1#@el{+B@+`9+NqM(Tp{WCRk1-Zy|p4)66Jpp zIO|nQ5JYLf;28sGG&<~ZxO@HH1E@`;55Id-?@S|)ARm2^H!VYC&(pW0k?w8~6{ZqE zx1)4%JQ4k89#-dP(tlrk;EVg^54w77g|e$pT!g6cSi8JV+LML`$0Vzx(WYj@$ZB6Q z#d01>x{%ijNXf(6u)bG+?*dya@Ioc#;q1e$S}D;dG5Rf>K+8{$_ZA!Xj$gCkff!8M zHt??%vmrL|`hc#CA)I|KpQNNDsETO+NARN zH;sz@_}~B)S*S)Nmc_sey>m7DlBvEE>U1tMspaU;dd0>lzq(B2RY>NxeDd1FgsV4w zGC&XJ*6q(__BMRAFIPD;LW7o*CVg_hit}=9sQK-uZ|`%LWN0~4eUMv|s)fdkOj@Nj zc_Sf0t!2#@X}O~BV7hsK6KQ@Q{L3@%u<6qk7A;gNv^ZC~rf0`F#UwgmlW^`)lb}s~ zS&R20AJ)%#@~*)k+SyljjhEE(Lyd3fGsM4J=(lt`i4JlC0bO1;2(yi#66kVIJadilP-7AwCf`<(*4m zdtQc7UX?QbmBVR%JNUo6f3%m#Qy+6@b1$F=a=MKMI&c4dQ=Q&wJUi~($)+@|A*IlU zqmb4!#qcZZANQ!Z550kbNl7hIl2)i$$_XQS~S9Ar;=*<2idHbC`my48#NPAJ)} zJ4T`_0*Z;lN8OBbrRuXCMd~3cdX}HG1yy05+J`I#I1r}f!g{+4X@S-CJP8#R&9`xa zHP&6RV3uG-a*OtsR>|ca>(bARa*47h#ijAs2Bf#Sfl(pvL7fE#e(@B2$uiyV-pP-v zH2GwG^|#Y11l7pqjnM6o>$)uBQ{l{o!fsAS|3zg!WX7R5zkPqS+EG7%n-4mvqnWZ4 zMS=cVskJ@k$0l4Gu3iQL5?Gb)i#fhO>BpAm@JnV7}u%u-JhNaWWGP# z^@t#`FpRGR#`^ z9WLuPR;!9`KxcCLQbvfQ!^1zbz}!E_GLDFyTO$$xm9EpgO!vq=+%b|-86XMHoI3YD z-jW%oM8|yn%37=m>5r8NNeaGQepI4g^$b8X;m(P;tU21Af=|1b<5ACLd2&HkV2Z3n zUqb#uU3v$Lo4PrH3Pe@}x1duTH0C4AW;9|y4W+XnZG3HGNE7S{XeZPm2~uCLuakhk z%gQp06OLrfFz*X;$DiCv@E|Jj0z!`Gf*`z&8E7-Mrq+Q*w@P1=>202QRqWm7g9aKK zs!}8?8O_aYq_pdDFLnHKWD?OzS8I08&xN7=K`}1y@;*h);fP9$~(3eFUL2N3tEej4#qfYXwz`Vmv&nQ?n ziJi)LAx9(nCZzIe@&>`Sk_}1Ddy5&eBowieB|`4cu`#A*iM~-VEW7uWNG@&d^gIziBY3i-Bn_$lg{&P zNtuA90m-SX=y^eU9GjkOa2?bBVDO0yj9k$nyxObuM077EZkYaH6_BDQeja9*C|AD7 zJEHQ2mhpgsHD+_HcqK=tH~g<+MP`JP@Ol$pXL}66aYS6{)#fzi%5rC6KA}vTQ8Rz6 z8CX7sQ+{l+>s;(9Aw*;WiW$Rk)6X-tgD?moD*QZs4^roGzIxKiSBvkUcXvn)r_kbb z6zA2JCG5z2@sZPGA=oZ}%no~1oMMg>?uaC0U*an}TY_+nTHhh2pK~#_O@caxH*(wR zPvxZd86+HE_Cfo2>ZlkFpf-tg#Ma%@RTSL^SN-w&eKd8R0PzOb6S@rnzvS~ur_{J; z7Ur^QJ7(JAICq401q09vRY?fYXL?j?pTz5P_Uk#K{Yb{?vfw5gJ$mP6U#@=lUAr`u zhOa4?UzwS_N1pRuy1IF92)Td!ngJ$>Vlt3!xZ;(`vzuYHueM}-I zhxvsr8In%_XpkZL{q1T3P$*!O(^xK%IrXuc`(oB>wmoYe9l!#5w|c{ArlPaX@AUG( zXg(S|S-HY#yl$AdBDjx!1l~$t8h+Mob51DOjIYy7bsC>z1)1th_nE@~WZ&2ns`LHB zr+Ea?fG}fUGSB^#V^m=9Fb>4N>7tY%AUzJ*NsHoq17P5PZaZ_@oHL)F-u>KHEQXq= z2=T7@dCvu$E`fcl9yC_8-<_Y1NYYbG)Uj)5J@Q=ou3+cd8ZzBgq)}8b(8~Vd>tVF# z-wfp^axG#LR`3H(pfgJsHBg7uNxt}k>N7r7#(_MmgC(}&ZlKcK-nrKiP9n5PnGX4e zy^VL&MPQ>go^6kmRcCLE(+Bd4(C8|OTLXQMpMT4&E7adAF~WVaz`sQQk&|tw7W4BJ z?f|d8FWsEhBbxOEiW(KuYw2SFsA$Bv)nHB2p=fDE^{->n$+$|L1gn^XN`tFU@f4YmgqLZ61EY&K$>Rxo30*h~Y+e2-iU5#W|L|34>;D9eSNi|v*qB0;$ z2ju49+;mA3Q5wkbAmj}?>YUprDcYn zc2M$gpkTz8zP%f3Ehj~5E`@#vi9@&Fz(jnu8;Jc(Nc?89moQ4_2{CG1gr9?OS5a(R zo$3N%Z+8>AGk^7fAwM139Myiavvv&2k_lMtQE_Yux0$K#hLgIx%JIAf<@Q`YzjxN} zJvM?!LL)%hvTiinq+gOXgEDXydK=N%0nb_=8CAc)c`E zFImK^QC@UUN?51+>n`|U#c0@q= zRisw)ej>O*F}Q^HYWXOIs~0`7Jh_i5IvVyq^Qk>2=W(#vE=i8pV+u-iJ)Pj8Sxzc+ z)^j+0Mf?;E%?#J-MsZQpi4sl`%v(Ay`8JtWgWd?Gx-|L-IaPo4)->+}%|`#Q4ZsJ6 z`twcAil*ymK@>M;WLT|7Hgo#ZMBXrKl)P}Y8h0zM&OmuTI*e~{wStLT7(w0`xym18 zF+PoR2voiT>pa{19!WET6SWvsTZxmxEq*?XD6<3xOjJ(-h`5#IUVNg5)$kVKr`mdZ zH=lm>*;{<$^y|LX#{C*irz;V6xvY7=o7GlakGvrJxnF`piz( z*bLI!-g}vE+HnR?=Dib%+6idrY(#GV&9e}v&&Q#VuStf?NmiEdTH7Vu+S^XYCx%DJ z6dzeiD6+>`v>h)G-CzAvGn-q5d_i=3 zkkfWTT464nU3CJV&AZkvt9DsyxWzOUX)!A%gWva|2CK;bU^DgDLRn}Qw~hk;3vyL2 z%XiL~p2OzI7}B(PlbYXF)xjo(i737S%ggEH0f8Jq}JRH1W$SO z^*mcPN7Ra2Lz2gGoIA;vQ|$d?y#~UD;Y=)OX9m!)!8Kklx)Gebm>GB$z zBu;8^8zz(Q5%E`M{jvzNnowzx^Yx{DU~|(Ce0_A|l@YYpdK0jJ?2y03jo7T7J~Ho1 z&IXfEn`uj#ErnbA0*l{cELX78Ss|Lsm5cRhJJvz{)f{XEnPTtaYHZ-Q?W!fm+s^v< z4KQT0XAB)&H21Dpt4y&NZB^s2Enzsz-YhSQ&T5`<_Gka4Z@rG;VYZg8W|3{=<`}%= zXV_nD6^9QuPm!iI3W)6z&D}t)O9}OGA-tm5WUWShh>A`V{vCkius+d~F6tK^ljRb= z0=a5$e(E3eJg!=45G(=dw^nKa=u-{)a0c}Wifa|8#q=L-!N+^q?^;!ZG>h<+b)F?S zlKNIo|A0(N=9K&zf9r~Sl#bbUb;)_Qp;wQEj<=2oVB^-*qj3k^h{JC~{s_E25d4*O zHgrq}4L1X`03?xqTX1jz$lDzGO6zJYg^i`mnj1Y9)I#nmGF!tfZq`RPHI`E{m+D8~ zV&kd{;TcmUu;$ho2XWEbAL+miOKH|mqOW*QBl~}L20xyA$j*F#KZ-O&o;au++N-YX0aOt_q+H0X&);=+w|YHP6g6<|=K zSS(`G!>J3&wRl#*Qw*`{+pOD}szJbO{aN=Cy)q2T#Y4RkKns^m1=W zX{Xibc%dW&G=QWVy>G{&TK8c~#DVeKW|`l~?1`l>#xEAzd90hNxU)`yK3F$s7lzDj z5A~(gpB%?j7`E9yLxeyw8A&Yu<|~J$1=wSZ&h+CY5qiY;6-}bU!D3~^rFE9*4b;Sp zFIfUO#!Etj=*iStW8?iffAq}uDfLXvD&6+sx~ zs<|Z6tZT8^T}rNN$cM1Rxst|LQXSLXum@gf6{k)qSVPWuiHxVt8aBM5232v@c%ODZ z1TZOD&})XEcrtD_Xfs;AnZUiK7F{V>oT zrWxG4&1zk%K%R&#WHBVm=QS~`5nJMJdt!xbIl5wmbXWl^FOC+0naNkY?~-|imi7VJ zV<=r2$n0RXqvXjF@qs=7F-8PU^YrSh&L{C&@-+awJ?S<#un2HSf#J`(e?4jPg^{n+ zQ+mS)O@JF%*6}P_30wo~PZC{VuULz#6i`b!nvmoP5w(Qiw`fz!&eW zmk`~)q3Vx5bECsk+0JWy?pnS+1m+vtw6(L$CnBfix*i)VJ8oyC=IM+T6@&;ZQ{Kz( zsIt#OW*m(P^%QgbTZ=_>it1G zV^MReWlFwJs_86jCs4DGK8O6T+GF9?O3CZSh^AL+Y^?7q- zOK9!mhMO)$($LfT%}u6ZiBu7to^6XYlQ6v0*Q2Xq+Yl-4sVMk@4`nH}$< zdKx)Np%<5d4n&t;jJPHJb{V}3=bL4Q;mSW`|yD%57Z=En$~wuiP)wb3X2C9hjG*Fp%V zLSney32;hVzc*v2J!lErj7oimP!fICI~J1Wvr!qpKeGPPX*-^>{&tt4nhvjxkI&DT z5xkUftIF>i+b07=FPV1 z#$P8}lMJZwvP9WP6SP@cg(Cbg?E@w2u4DFi#X@R9txzaAW9A~>2f zJnHmgMD45Jw=d=@Ig8t@Bs7bssf!HK9F7d0^?V~w@qUEc1TthR?!(`i^A!Q0c#03e zu7Zj1A_-&(fioOb`sTqG~<;LTT!Y!zBtD3j==kZqx<|8ghoh7l& zlaH{Yre*f|aQO!e!ws{SlkgKgb_%NFqgKZ`^Ay?n#79DcOZ@iwsX2`&b14J6&M#B* zoMId^&&cx;g*Lu5o)^jZ751`@?d1iEQ-2Q7&fW8NxlPCiuZD}@Q8sCD5t%=1-nB9!WFJGj1r`Kd0uk9JQ zso`v&isWPG;;QoVBK7Y?$sgF??XL5fs3! zZE$NNyQ1-|sx`q9d&&V+ZhOO=sx>dNOT5?>iHNz4{P~v3uC`vxVOPY+eu|#tSXXPtl|sP+-mW<=`H-7pg!}!#*+h%&tn*q^dp6OUcq>% zVHuL0J%wBK%mQsY9EeqZBqWCFAaE9(2`L*l+~o!YFI=^hzIFdFA#R=1_UpJT*L^G( z#rVY8ER0)N*=6Dce#MX8su5rESv&H5WHB|P-5&@Zg{VD$5;Q|ARDA*s)B3Bn#Kv=a z-9l8i+VjoH=y4Lhv9r;Am5LkAAGpKc-aM1;An6TO^vgan-hMs>l0sTU@=Bfk4OhY} zIy72b)2ft4$+S=|UaE12XjeKd;<#T%4uZiw|JL>=Qr_n7k3u5w!?!Tb{ z|8VZQgaW@b>Tt`EsL?n2N@VcJ>p%Ou)sGd}rX~L9Jp|CWfLJWvfiJ0XElVoMsBt_Q2ebVm8$sL%Vzq@cj+56> zxR7xz7IxBJ{jDGfx@_~^q{)8|kB#?4x5bz)!NTli4BZ&Bl!yI>J~xpIL`6nl0Q!lW zSxuEMcx&T)KJjJ$rN%7b+IrKC{EnQQoMnDdL7UlUzYIIPlIm^qONBbaiSn=H|C_cq zmV?D35T-Ts*5kLp`$oPcIM{eu3iMhykRj;M;eSWr{z*B7z=w$>U48xKtbb?sKmBHU zYD~aVdPzTf;6Kdx^Anj04kkcm*Hclaae)K4^y-*1b@{D z|6)Rx3MNp?y~A@B@mCH0uRU~!Q9Oeu%Z)Hq;$JQP;_wGrU}C1!k=~J_4F78J=h+8{ zOWh>1h%A_uF#k)lf6wLrFaQ5??khjd@{;^t%nKHml?{_m71ThM02wd=KLke_cKvfQ za&m@vW{p@10O)OnaYtq2!h>0vtt3|GmyfTN(O>KwM&Iz5)bb*@Ee6F|$=)J_u!&3u zQiZf)V`Fn%QAnWvom|{X`Hpvg5v_m8>%E8oaOGXozDM3frYSW^p(8h{L$W)nudf>r z00zc(C;RW2(b|0^z=8a#_17l9c2+kYW?^<7=@2c#?D6%YeR=ae=0{b@lYE zG-SaZpHT)=T|58fNb#mP6734(_pF#Wi9M8_GQkSF5AJ=^Adl45)-7fK7SxPvKEHjq zdL~^ieyzqJF$&Zrss@ocY}NhTpCf!E+pHv(=y|Z3UqMXGseVR!D!A?Z6kg%CJ6ZKy zDd-Eu!`TXxH>Eaq`o~;af?cn!+s0Xn^o)$JDOk11II$fuVzA%KHGnq*RbImBk0HeE zjG|=`JKvL}V-;w8y(*i*BOn0W**z+6ys&#zdkmR|_P|LGV`V<)zZ*~C$h(|hQtJ6q z;UDty&4P?%{H?b#lZ*%+nsJD8r?=LT1Iq%{E zo)4CPn!lpgGVpxwvNb3=yPi2ul9<^RB3G!&*nv|_I_saV4gtNs;O!136=PoMiH{d> zUhVNcnPIe@u2$qL^DVzrE#tu@!o7|om5(`m&s+q4Eq`8VzPkK;c7)2;3F_E{+Dv9S zmZ)xoOou8j*)(=+uqJ3>!fHUcSB;KzcPyiAoHfM~sM!x5j(f&7&uI#9vpBGuhAJ?t z7cBJs(8uIeFj^x^h8}9YhmzQV^qBnktP8rg;1w_MVdc06KdoJNRu z;M>(i?AyIa zY|V#v`GhCLT7yb>FY?pN6wVBDqSVS#XV)wDvh10vCg%< zib@sYA$QAdUvJkrqRcXV^=iuI7;z+#UaLnx(Zdw+;1_&ma0&Elzql;dIX}v22!9R1 ze6a%C?uV^}D~Y-_M_6(_@-Vqz)2%DNGQ9ruiveR3Wr0*pFhS_$F)ahvRu%Tu!*K)u z?k_J*Fdjds60_}G5Vv{yQyj*MG&nu>3@LBdh;M#-PqZ?srWBt5&5beWK>Es=GbIH6W}O zCAj&*J|6!P)E-WvgT*0%8`RXQPE-ksR6$qBkBO`N;3*!jqaA+nIWqZdm)rBKyEE7F z^Y?v~W+iY80UKQ0dXf{o(%pG%9znf;asL_2Qzu_rF(>orT*YLzEh7fbWD0RV?S`}O zvenMHWV=|*_bVK9Wk>&x=Z_5^c#iZ@K4EBaLjP=PxE&N;6S%VLq*E}8RT|;)h0Y#}*w#pGh;^tA0H zHzI-a3zvJEc6sg9T6$f{sDVFP&#oael-qjh>8wxFBfpCycT49PM>}QIQdIm<^Eq>0 z3eVUPz!_MH;`S9(CK}rnAL06&r}yOPsqnxiun78RF3@^X4XUxST)|?EvHJP8nACa_Lh%-p>;xe?o@%5xtTAJh2GAh$ zYCKuHge-@j^7UUSu&~*K!n6jdoX=9?YMux% zNZY8i4XRlBj>zhe(_Jm~h4$(aQRsN$6AklPxu`e9;_?y`|J&UC)8Q02Lsm|o3oZiL z4B~vzByxp*hi$cLx@p1Uv57}tMG|qt`Fazq9i@q;K&xAWnUZ?lL3XnZ^@djE0fi?s z&*g5{{7S)Ohyg&=4q|*W4#6xen~>}bgIV&zl9rM&9qmSs3Z$ssk70Y_DM5P)?fF+V zm$XpF(c1wz~6|8=d&hwViJ026$Klv?B&0dprGy)9k>h5D2y! z!X`C4P@IU`3=?%c^E^-(M+B5LyVW-7I3UTI*Tuw}==dj6o-U&lAtsPh|Kd1l=9no>(!Phq|UD zsJ*?~V=CqmQp(Id9(2c3Uh`7_({aIagbcA#N=f?&o?o7RNXa3YYpft}N{KTWlgVhw% zN@VQm#@bLrlT>77S%QvzlZriJ{;HbOp6*JEsQt`5*K5RwYfFdf=j@5yFN$+`_8MeI zLTk>$yT;Zpe*k^N=70vIfw1|0e#~-C*cDaJQ4{> z9G~~n>4^udJ-N8@K9wjeZGIYRY+{cvetFK81RxK99 zL24AKAB&o8j8#g)y+|&(w8cj_og{g5%#C?#C;jW8CCU<-DRXCX*Wf-Fv5m!NH5;$C z9C^EATzaxy-h4H~DV5FWVR?}wYF&CI)`D~0aqa|ciM{Fjs5%}I9^KWk;w5b@We0wi zS}CteWC-dTH3Z{v0;STTj)Pk9fCq6&@2|xsVr+l=VC$?OEOT*66pJpTUbT#!nMxR< zGyjLbw~WEaj;8tWcrV0H|3_MU(ybWgB&Iv$*X9W;m*cptIX|5V)GNRBtJVzzLPk6f zM4nHJmE64cfUvo#jmTU!-`+t zvbe33rk*4>2q$$I8XUfpOfsZc>f3*0a9SH?7_^>r2cdm{&^@IA`BF{^-cMIO%H)_%d7qu z7=X#MHw=6dNNDbBfk`8^nQH{{+D<*i*mC`65%M4ZfDFXn-DvOVkg0cCdHVRVgt4)4 z8u}(mj?f|lsrU0~7d2Fb>knCw3J6HWo4POO-PfH; zX$SqzBx>=$fR5E$rRP;CP1B33BFD24; zR(hLDV=!3g&xcY(`&t36e{7~@b^6*Ullq#kU+qW_{;|DcXt9N&fX|c^X*%1!-r^Md zy$*4STg%J*2`qugpcxNj7x=f`z*FWW@?Bpu}LDN3a;;Yp#0y zK`4wwg9g0=j;_@+0pg<=iHmvB~jQ(Qsh)ZC~0qY_yXV>XE44XE8L&O(srq{aq zWa1eam~x6kI!uJ>+5yOpFgqsOkFpZ``f}w#OgWJkX}RC^TkeAwMp3FX6HQ`WU;e@Z zQ;rWO)c%9z|IK45IEK9i^Y3l`yXcSk{y)Qhyt}8qz1p68i&r1vtD16(iV0f!-jZ`Z zXgv%9<7x-sMIeCXhMB_q>*Smcdt?<&QeHZZ9v_8IzRB(P4cdb-W|#zw#&-I?i{yhq z3^A8uTNWNmt{e;PeqxkM2+{pRq6cGZb7^a)VZ!7H#IORB9jO@nPgU&d za`RbrtNGh}Fqs6OY3z13qSBdQZr}$7gSfA3wImL0G5Pd6yOFJuO`kBh9RBvVcor(kwTpd&MJLlPT0L4UNzh`{EXfqswoHP)V`IMkJwU}O);tryU*Np=V(**XIO3Nkb z$>PrYS5Wg4y2(1rrOPFAJUZ)Fwux8J<<2$Zjxd>RP)&|%uI%GzSGO6(;bs9hoB79u zg;ddt>ApX6Z6+%<*qZim+-GhhPkIx`ZA3%<&=e2vO*GgR3n6JA-aXYT*eKE{enuS2 z3tK9`nt{1(Cmf;15ouVRlgdlyiE3o&lD1lIXC|G1s<(y!be&onEH@!fGOrW@63oyT`(RWa}uQ=#u|WUp(j zqLw1sG*A_76KJ86X8UG%6=3HJy-jp^X77!`T7Tbmda6%s>o<+O&!XAG+(!@<&ax_R z3!HmI|8nXR23Gm7{wv?y6zzE#gl~tYSoaHyX8&YI6MU4q`1??LdLt(0%b|zH zt7aiNxy5hq!I0!etYk4tgttWEf?>w7 zqsbfsG+6B$-&GSK*&0xh|eN^Bv(6Jp(oNhDqVpu)kk1IXh6mT-D&Ik~Xb-l^XxA);WImfj=##zp!(48X`U!6(eHYqAwVB%%b zR^ZZS+Z5W*Tf5j*Ty1c#URMblT8d(Sv5EIn@9r|r%-@Bk#TRSBnjg<_=$RD`=MF=~ z@{}V==F9b?-xud3>^Kv>o8E2zDsXVP`@uA+*&(o$MX@$hD&dN*oG1b6Z!WpK%*6)SJGTU3#|q2%{e8d+@gS%MBM_6>4W@#UXb= z)^7S1^Z7j1qYjH2hA>Jv+Z_V$i&DYYZ%=+Bc%#rri#}~HPJs9k$M^_|m%CnrY!lSPVVcws-B1-4F z{ONIRpN^)&g;4A2M;40bZyu@)&=Q($dZlpN#kqampJf{uzm%?U<`>eRtX}0eS!ESb zD&6cifQy2U#`<4hq3uJ2-q_5n7r1n8^m#=ISRdOY-iZIA-4EdWYTIY^=nF|;xGjVrLVn@{>WB8!0NK4OuYt67`q1 z^%(m#R!J1COGiA)XSq^L*$FD{thHqr*LTgG=pH~nI(B_t<`$q+!dhpvoCfmSqd$PYOv@84Z$4{q&$Z=RnCnlD#)^q@~aMhhkz$ei6Co`T$@y;|2Y-?MC4fH)P zTt!6VoLY@(+%aK-ZVfH{OvAjcMQ2e!?bDHTLSJuTQTfRY#3opIiR zL|m?ng29ub@e|WMFJHUc z)Y?22x_LD(312Q?py`#R4a?dbc=E!i-BaU$~@MD9%YdIngZ!(d(0>E78-7m;~xLmIl2YHZy&<4vn*hPU6~w z#(X;eQTOTi2Eb0VxFm>F$yoQjrb?>Fy5c8elL`R63-U?q=vvks7)iyfj1T z(EYyydhd7r{%_s2?wY$UGw(V3?DOn;_I~*+Eva9GzDD0&qa4LzeZeJ_rqr|H z2&vc|dAt}+we`)CgW@rj=%V|e{H6?mJ`ONV{ZIp*Hy%$FYG?}B%Gnx$8vG9Crd_%j zny6h9X&2igymiiI-1axVyW&%*jZT4#1P3MxmM>a!W|^FscXy*L$&3NKb3YoxCt$W|B>F658se_1?$L`T0b?>-ph)A&V{A-qH$#_h-kxlsdm6oWrsDyS^8Rx!F6X z;H)a^$(|8B5?u5XDA3fi$lX0Vuqbxy)hS)4ixx7M3iv7|Bl=Tc4R1KNuBLY~5J;@? z(>MjfERV}}f5o_&^FHPZ4(EOq8d^Y&#O=u@v!g;EA=E~u`>vn?E1C0?X&SesDEyI}8obTRmkk3B)7Fzkhx6_=cw;RX)c#9! zO*dSBJgDG`xx$Z_Ry)62M;#O8B`~ivtHj(?Q0gYPw!~iRIsZvV6o0@3Ay;Kg0onM0 zI$7U=o?<ps9@c@EnT%c z_e3gSmEXN(^aW{y>Cv)HdTj#-%Y}gG8vUjS$nEH{s7}8@(CImW=+766vu>M5XQw+B zA$?@FZE%;SMdBDt@aQ~M&F0;M!1-op4S#58Dox`)Z2Tpmw?Jw^{q5RQWAfv3g6+@L+V->! zPRC)(6~>ibNrHClp7%xUbqJj#V0&RnRJFx=nCss4B~ui!)g<>Dn#`)_5YFc(wez_{ zhna!fvOJ2phia`GV}%@X`|;fcCl*-8PSZ7Z2MSu=Z|BFKW5O)dC71;Egl>WOdX=X0 ztWY*Lic(d^2H!n}6_uNo%nq69+@)N{Ig8D|>?TR7HrJuq^7?`)KjN*!G4Gcf(Ri@} z+GaP)^Bm1L(o`3D+u&4*M~$muz#MyCZaQu7sc|-3k9F9o#&g0c(rd_ef3+wJ68B`F za?hit7M=g*s8l;LXFEu1swd>dXAi-w<@B+w@tAhG&p|27?3c_=20#bL1Flf2K?w0Oo^ZA?Y zgO)b5Wpl@aQNI+u2Ir;&aMWJlu~vew-7?0e*Qo@S8fvnGc8=ATk$lbRQQB(HnxUeh zr^Qwm(xviCyF4ci(`8fde&~6aQqqy7m>eu`!RTJ@h#+$qqU+lnpadq=AKF$zp7f6*Avmg+hCSpV%&VN^wg2JfD9KB) zB(pl@xxiJ4!*2n-S~_(Gq{Tw&1=g{aHIp($Wpg1F4*kweg60v%sx!;favHiWEr<3e z6N&~*#mm+#M#F+%<@!I)h!oO|H-kZp?)}+{Wcd zOoXyyl&fxM$=nY&qeLm0KvW4O}wlYal_NTgiQl*7^fyOQbgU^5X z>({p`;pncceNFo0KfZO4wf+(jKi6lEbz$0z`q_(e`L9RHmL=9iG?O18g;6$>725Kh zsHNJPV`GVv&}kZRZL;;9po=JK;knIQovCV)U2*h3Oj=5#E=8^gE`!XbHI@dv{oK0m zsf3Ng#R@*b#H-($^6YJIH7lEdj~{s>R;cZ>S?nk6P9vV2M(|wh8VKccjq=-^8R${j zbck@k%~{?w_l67lVSABbs1M?ifS}?(Cm8(TW|>ve#lm2^VhO*6A1nNt^Fs}wdbkmT zR0K7bi~T{^Xb;T3($h~+&WYNeW)VsYzC24H%k0SX=mp<${7Y3QOM%Ecpt8QeD_3c@ zzFH7>Pr*B&sy=Z4KF}(WMmpF+z|I(-0E`gnOic0}fqy>T<9@4Yh?!r*>0jMb5LA6{ za9PK2OO3d=P0hE7cYVIGh3zaf^i)*))CDpH&Odc3G7LHAvapW7v$vgGsz@pEsrp8F z++sQSY^G8jAZX3NH##K|Ym3_tylUZAe#qcg1D*58jzH)_GosupMMAG7cMIfIJ|^9= z{VLh$T)Tntb4WJCKKE56)xkMMo}PrOL&N?|4-n*%Q8u(hXQoz8mM=fx`aFF4ZIxfb zc|vrDEPX#_x~iwu#qaPz|MbqDiBo6)%S~zc@IlZm@Vn}+^N7lW$sKj9^lZ#Phkm(o z6{AwUoMLaX4kj{_JaXu{;nui6%^FjS{eh!e&8PuEDfUus@VrUZah?6xOX)ETPWO?i z+Gjy^JO;R%dVJeJB5p_;{x*)wV9H9nUPUprk6O*lwAMDnrE*ywYA``|TCoiC zycp&FVM8#Q#A}vOSZA@n84s@&^P_B%xZSPa5iaq#9N7MPgu;-$b24i`N}1b4=~pb3AqYa7bQeEM#SGCYnk+_t^9JNYqK|k)~Se8 zDTN1jPBl(u>kT8MzGYeMl2$yWvA0#vJk&4Gc(#4orvya@G5ZW#FIW5S<~Uc-1fEP% z3ACT@*A%APqeYD1k>j>q9PP3u3OUuEq~c%_GL@Xw?K?cM6SIeUzBb$)M(0;zuu)u#qvKVO#H$cZse!OTcf-8N|}f^ zCv3b&bn%aX2{Uyq0-Q-RMRo$<8#Y$?d6J}g`%Xdcjx}9SB)-O}9G%5P7;49zt48Zp zs#r9DhcF975S(sTW4K~uH$h30UY+Q>~OP%T7a#jG5 zRqEwiC&-C)Ikm;*$fi+4t&Lt4wD?n>FLZ4G=0*H$+c)E4MUe~T|hCz&R|yV2LUR(^v5`p=Xr!>&m*Y$1*71FCD33E$&Br2yawyFKjv{#LG>*ilBE$7^a zQb&=JFP}jTGYD)ecn)&Yz;m;RiBDjEyYX$=H>1yUtN)@{s>)UM8Y5A_eLB^pn;T9w z`(_dZo=d2#PvcRSIt@uX;>3aK3(uq}*b(e+1nzsfwagjoWxL&JqRMrbky!t(QV@RxxPO?Vzp^UWnnom34mvYS3Dx z@l6Tgdx_$9qGto|7xi1-=jPf3i~=F{7#2vh+Q5j5)oGdTmBUtp!Z`!|gb?pNz!xZ! zE(AyD#eV|tw8I}Yd$HWx*~pq3*%ewZw6Y0XJ4_{ubQr68gS&uFB{YLE=f*xE?Ls7O z?C|o|@VoZY`w!c;v42*`Z;m#1tZtMeyw1~%N(2ooI6+#;`!%av!gU~eBzM?Dw4rcs z5X*daz9 zalt3T2fYTXWCL)<b?i{ZbnAO5Y)q;G!D+gK~orTZH6AtnNzT_~xfpP&Mgc#|drcj4i23 zRVYQ!<)XTuvv=|B(SBoG!M^aygED#lyUX7aJjWMT68Vpk&E|?^OtvPOnoN9Er#v?~ z!BX2S0AW)z2ZvLJEWU$bQ+%M**~MfWp4*?Hvc3)Ovyu6F}gqUT8crjF5IFGlm$ zck|VhpeW?IhqHl2xsAET~MoDsyM)XY->;#gJzv#70>OIl?+4PylW_sykY!=50yV);m@p}>wj>%bn%yg_SV+Sp(EgVXc( zl^^rQIhDWRIo5h3ww)5JzD{^KjXG7u1R0dNR7$&LSWo%Ee^8Nag7che(3a#+Lf@rs zPCqv*dbO}>a^PK)pr7;Jg(_1TR%0~{^&#(s6}?mD$bTBdG!Zi>V9;!JuBZ6k zwON4QfO(O?2?b=VE8W|id+s7X($ZgbyHAqLwi-*|*aoWan}9B1J$V?UJuL+O5Hm|< zHotYTtGALlD`?6h+uT9yGU>(1<=D|iyV$hes5328sAV6-Fm?YK-;G+Dnz;CFd$(c#@N-N7w;YCS(? zBtlwMJuh)Nk5MCiAzFm70)M{1ZcSIneRn8D)IBSI%q5485C`=DY~%g?SlC-eIpUMo zcnY}mv_{O+9-iu%I3KkDN>9zr-Z;*sIz2PUdnQyYLD1Gmx8*Nh*t^l7pXveX(?^Y@ z0-YtlFU{!3>4I4^kr%Br1XTjrz_Utg@67n&UFFhA-@LfZO8rv6GXx(ThDoGM)f_t) z?zqk|L?)mP3w|y(`})ePl>Yo&&8)u<5BqjwFa|4{AjqyVy<8t_N4-hRnnk|3*O^bW zS(VZ|`UybDn18aJm6=5y`^D)3E3Bx;rAFSTm)~m(s(OhQC$aFCN3U1Dj!RInh>M>; zz6Wj1cQ1scRCBLdPCyNe`r>Dy^=`>%Xz~#>vzoOoPN9?Y#s)|oP#sPxQ0>$fO?;|j z&4BZGtIYciLBrw)^ULUdH1?B_9Gkny%rrhK=>XmVmGkHO4%+^`_93SUYIPaY%iN$b zVW0R(Ar{7a;&Vsp4l=^0IAfcrh9bcDr41i{r#j=#GCZS8EbS?bTK9&uRPD5mWJzHY zclTLHc(Jj`)|YisiNSMq<>lBxEXT>`48rSl6dH zS;6O?;|f;b9|@Ni*j_1BF)pV+T_7R1E{1;~5k}-TdI)&lm#jV17Srw{7TTQuVXE2u zdm?rP3i}k%Q)+?Ajcxy$*Aiv5mpW(yJ@(ly+@l~WM8)JYj>leQ7``-qKLTs)=V=S^ zS@X&6pb75iu1WQI8=QCiB!?)F&}Iq_9bli!ED+uH8%(Z`fNI*7HT^~fXhcm94 ze7}njPZC`?T{Vmtg0UVR69hsB=!7;($1~??G-Y8Vkw5zE3 zCuMA>5!dXx*6*B=^*nKz-@l*XwnDG!z9Nt&Kk+QOVCm1;0P*9l_gI*8$;|-%XN>^@ z-~z|#xadNd)h|}8Y)tE-RCpwgGHF*ylYeIZ^xhrd2mx}A;hox?OO^^wJIxaAn3uqB zjiHBGI`QW`xB(E0@76e$pVj-wbUyNQn9{IMV54+R0w>k6^YPJR8Ske$jNkJPc(wjq zHQDUJFHTBiKdPt%^;GJ;mwP^IO@6aMh&}hl8oScD`*RLc*mKvCeB6qPc9G1EE98Vd z^)}-rE{v*p`b`t2{aiWFqMLKPhj94USAb0s+V*Oxk-n0)g*|cz zOns>nV6L_IvYX;$rH(l+S@h9FhFf4|rqysb6=F2@joU`l`)r5Bm68{&{)-)*v3HcS zEz3n}N?zSzhl##VR!nU}(2e&hAVqU*9DA80TQBUurbVn|^vdx|9_DPvsO?9@;wIPhl&JvHmhMANsGW&qqK&kI zP#Qp^!mA^&#eg9u&>{`4g(Hdld8|KH1f-@Pm=&M?Ti@+MZm_$tQ8`!n-Ea(FS3-d5O<(2-j@Uy1XOj5~i9rmQ3DD z*c{YA(SgrWIY6BNGR48AuLC*g&i-AW4(XurogyAAj#K45H%qOKrFO1S@ds#?Nwa?Bp; z;)RXR#LofL^20Qbg1w6^npXR zsO+FQu@2+dEPuWyIx082cJ6yFL)ly|_Gjgf3|G`!amcSZm(nQMOG`eskf%V40eeYp z8!3Chg3nzahOqUYL*Z|j_9yI0e=+#z6+fDpW0a?FR{iBy^QzkIC(~XLFZ0q*UbUT9 zRUm1lj&F7D-RE=@n?-TJY*< z=elC;$`aM$`lvapD^>|`yd}7e!WRU`PM_~3HhIMvj-Q}&Rci8$kAMEfWPR-c=?J{X zt*OHulPmu|7BubcMqU~G}bu zNVoif`95h8zg4%T11y14kAAdJC*iMeY!@H6q^+;U?!Lx`9Ph3w1Dr)BH5a}Qvq}#Cc$4lpFfvSBRK%G>!x3@o;+r4JHLcYqjhC|+x@x-Kpys4QD zdaVCV7#(CH9E6Mkyr!?TLk8DRbW=3lTjLuloxh=1Y2CCr zoshYzyP^@Hi+61+elI(?RGMg>AnKqQU`d)wjRjtUQ#R+Cl!Lq)av#X+o2>tmi!YmN zHyhR7mqmRx4aN+DSzpE0iht<>w>#@)AM z_gv^xk(q%I?AsjLPs7^ezI|u!B$aJn0O+MkVblbN+0pov< zw?TajbtJF*@x^_n|NG`In@SsLfSSGdSC;!MgXKgDxtm$0HF3vRX7{)8%SdU5RwI9g zdd&aT`#(sYodS^E2zz!8sB^rXgEovB{F?uIS&$T@`O2sLMQ{E4lE|h?!@5Vay%OE{ z1Ge!La{{DCwxq<6?enr3fDfTC!?E`VpPRNHgWGP#u$|M0H95vROoI09^-iSKfEns5 zvi~)MF++Qy=;VEtD;=11>}*o}Hi!KoJr#1<^zyVBG_UT6s_da(c>#a_YvC~o>yxUy zlxJ;E_|3Of@^yrsbl-_cV7|klOoZP&^y}bt0O+kXSNX$(aqYzRa2ZpIuJUMQXbI$j zdpnNPI+)efKEti6al%prI*51~f4Y!kF{QL_{%vXN z&7vioTM1is%{jvW=`8%O2?z8tw30&F@M>tnJSk6PI$2*#tb{-PCxg9f?sW)~)cd3F zlf!SA`gQFvSX`%w@Uy|iVu+p(gNVbh>*wyY$Kp!uje0m5dMAm$c2jp zpaV0s*+`gG;xTYZvz{w&c*0;gRxeoLZDJo- zoV0)C*vTdWzsWjn=JD?_79z12bN)LdiVCk2elA`{*$rFQs-ACFLNxLo(~*n1#TFUX ziJ1sXl_(JTp6|J5%a}i^JI(!GAz~Mkg>Wqjn8hkWb_c#D1w=Ynk-Nt7FI0AYjk`H~ z!}`+$zM;G)5A9~_QTChhE; zWiqx=VE(lA=G7U|n|xGQ@{XeZxADNfcMFw|k8h~Tj=^{F{UOlLZOq-U8K4Jsd(*1rmR81P-!cWcBwYn5QWb89|`p)|!UoRKu*F!jaZ z)gk$EKF(73E^hp=k5v7mpiN-UAHhVVGO7X8G49dZ`0na)ub7?YQ@tFOL~fu!6;*6d zQzy6y__}Ci-zux2O#s9>NX{pAw~@vP3%2^fBfc&P;ny~g{KU(TeXL;~!19*_Qe4t$ z!1#<_0e#XxrgONm;&7TYYuJw!F*7MM*YcyBpAtlosGmvsAEMBVdNQ9~KH0gW#rT5o zQ7~b~bGG&gOvAE8;%U3&v|i?9)j8mwik)P~?ro{bO=rY>n;y&LZJR~L@P=n7lxUcm zDm$vQVK-1v?~FVq+wlz-uVBQyPaM4xzTz!~7ri~>)^|9U=-%(%FJS|H`t<3Pqdk0V zb2F-`ui{{bf|Pm;zCTkOX${?lpVCD2w;Y57Se#QLNQS!wC79z)3qhPK$ZH(PEKO4! z=+)ZA{V%mxcnT@Ea9cb)7fGxG3^!@Jb) z8)2T=PkyD(5e_LW4OHze9r2Y=WklWv>DE(J52}T83+bDGcqQ+oUz2-mxTl|T09@d( z7onlakn*W%OfCbcECmGx6DQ{grg3U;_UEyVoxsPE=iLGbb@*Itke)}vnmF?nlw98V z_^9xb+TShw*M+^lzakpaSYO;Q^o(%ugd;W`;My#zT~D4Mh8w6OEtahiN6_~gqZ12R zOHriAUxCN~Edyem3!At8b&I7<*RYou8M}ix>(sT=#hKN2=DDg>RUfjrFI@rtM;!nN z{l9(t@&{&Md)#4FP)*pGsh16`v1vGSJ`~Egv)`=05p;!^M=x)Ht4_X|F~|JzJv25~ zn?7B((iZy5=3^&O69X=j^cn@?FASJ&367Lkf;0o3NaZE1aO+>k6ddewQ#@XW_LGbZ z;`w`rtorgN&EC_*iec{%y*-ikS6ej39q^Pc?b5x1yBm-TvRc3Wg4=geV;_dz?9J;Q zzm@ew&sWcDXO6|DFC`{EKK?@ehQY6=IpsI~d-|I7%-rau^2!cyTA+6NMbc{8z zkLfa#_pbL3CSv6%jI49FQF`#85dc7a3d&?gvHJ5TUr$c_$b~p%fSyVx{YvQ6;hs&T z@BOIXo*0r=(c~m7rBo7(9B*$f2OVri#^%``H{-PS^>)XX(H1QBiMwUp1f;&! zaASTGTw|enrEusck3JR`tKR=mAccm8_Q;^j%eg!$I@+?H#5P*T*!S!9&YXXOg08M@ z>A~#S6!Vk>%GE#<;DpdIF_fgGgI$>CA~aLelekY#IxxIDn}>E0nW_%U@IsTApt^JC zI;SOD9NZkG?T%b5J^33DI@Z>j6GHJk%d54w)}Y_7E#P`;1Q{3@h~$o2Ct(R=5)zWj zQH3S+CfC4@ce4*%jQWPx3k>5(cWW~oXwphL+^+Pjj}RLlPtfYQz|aHs6K38?bi<%$ zJ6{o_TBV(rdX*Z@Ew=eHeD*~zNF28)Ssjp?dJACHZnQps5PfxP&(vw@>AzH&JGo3! z5D_^yU<}BVR{7u9it#-9@pM-yCL~0>&Zk^OF1#mp4qe|9IEjQ@zRdE zwc>$?&k+$0+MQBO=dbGU+1*$^YuivUYCH$7?#v6o=q0yWX#Enh^2d*+8Cw(|%gdjT za@Ib|$e>@Iv`K)yz*%Gz_I}rRejq36x={A?sbM^5r%{WZf#K9)+!5>7^~HQbOD`gO zvDIgVINvW^>PQ~%s@q^c9>`+LkkP%yjd|e4Z%lVYj9i&$8%qph$(J!Ue?>CpIN4T`=xlpK(ot<4a zQ&FxdC$i5(M1@vFWa1X~a+zh+z27qWIFEtaL)s$l4{4K^?G8$msjyF?Sau(_R?aw1 zzP@(HsNuJ&+B-VB&kyIV?sx+zX=SynVQfs-)uq@JU7ab!?`Bcjql;_>bPc;+R)&-id))-@J8; zuXG`NFhQah3Ju&+#nn}>fnDF=|1cMLv8@jmcbOP@6)rp%^)++xBm31-xp2M{p;d#R zSL4zK2$15Y7Wa{by3BK}_9t|gTXZb=$$vcK3YSF;7Mh=tWKVj8=5iT0P*5)KSobC@b+X#9B#m4-4x9`7(hkORK7%@| zY#a_R?CRK|O!hJ|Y4?SNr&|l5wpM?hYpV%5HHDO7h$_aheDYj;>S|wi4H9V#(@_@!#&WN#7>@`-f(bYBhWi*gSDz|VkBYyld z`qa@+@^WJrA$97dq-F8kVo3gaJkk`#J#yRSg@3;B>l5IWxbw?*UyA>rtFsundptRY zkLVrc$jZvuILA1;in3bafW2)B#8&vN<$wQ(1TJIZgdlp+1H?hf}X;}E6Mig0{oNz4K_}cONp=1MS!XBb4yV;A=0k?5CH7=e|vwytph|K zB4gR5j81~zh0!oeP3=JgwntYjVUy-|_D)n{iHt1Br$zvI^Zh8aNBn<;X@ssUyzF#O z0_!%-jwg+1B2VJ2kJ}I}o&8YfNW=-pHmMCYa@DOjaL@?`ye2uXZTdeN2qqSBTl!#F zr@<4$tQt8mKxk)c8;OP$uHffaU}>3kXqS9ZHnf%5-d*%A3XzM}u+%j>z3gW3Jiu;6 zT=8xA&(O^nc|a1tAy(P$pU~0%*>K4XNiZ>{V^O=UtgQU@p4d}c8>#&fcjCKuSvjPcLceU1YF6p;p7o=AH|E&D# z6PP%!w6q2?!V~y&m|M zDXOXxo#ziH3NS^dN>q$ z9fS8B?6Jhk5-tilrhKr=;OA%7Jb&y;0^8PMluZ+{{DvivCFtm@fZJ+n&E!W1ZS9=Y zn!t1BE*NafjH7YujkK{_l6c@aS~MXM-DTN{;AFr`?EM<3Uj;kt>b@Gv2a^_$L(GK! zt<$XwxSaa;fnK#_E*Tlf${kCRGRH|`K&@lT${thtP@O%+@0GA_YwuEzpL4;4`Dr#+ zk2N|sJ#?l@WH6I?ar4y*5sMCh0|S6YQ`wq9|HvH;Z~$&$WH@c1!?zzlcteV~-vyo} z5>nGiH7-&og3Pndc0n#(1|1PfU}blyZHOuor7jqxKaK{43yNN4T>b;87APQjk&lnJ zos^eK_!gH#DaWTsOG05v+1b5PScDCjuaPtHf3d? z8MCzXF80TdAKVQNc6Lz~9cr(+6JlbV<_O>j$)LYcO_<^1EXG;-v3F-@E&B27wi5yo z_1tYWuN;w|FJR?nFE)!agU1c04-TG3!c6i*Rk}+7jcTw zt8Y!S(F7ga)lFAF-_4LJN!5GE$3lB#NI6%^{Vsr9@}OKTKy0xx82dm2E> zoniyD0i&aPa1Iai)Jrt<7D{k+on9%m+HGvU=MVQ+{O8~J(yBJI@k%+wF6Cyy`k_vd zG|u`QpuW^7dBjad9#O3?&mRT|Xl79Wi@r+>SOPS3nDsT3|9C3>ypW5l7<#E?BrmVO zZJ6lnFuaYLu^bK}O#ws!HBshgGIJg68!E(%Ht^oj3 z?JDLFxsExwWn^TSkEYCn04Gw>3LvBWECAfXTGxE#w>s(?+0z1-AAZ9kL=txf2S(&=b$FiVFCxw#d3T|b029T8BNCB+8ib4j`sLFbuPm*I95U!=M08W;xQxl^ zl`W7?oA3XMIxPQcd*=3Ifk>}804OzR@Ep##=Yr??-muX9f{+CrRA09IjM9F!WUfn_ zUZvypF??bd{U$X<*ur=67BP}ez`l6^C7dav7cIY}8zQZy1`SvdU{FawF+a5%EkUCO z@1YFe6ddqkaGeJ$Z{5)QyizWn+VS^1Zyy6%cE+d8d@fM|?!7mUU52w;$DQ?9Er??p zQ#lWiy5pl7oRl=aHu>wtj7AtC&-Z^`doP9x?N&Ny9$yO8GC?rQd1?Ng$T4G7rB!2|#L)K*COS#h8B8WQNPSWvWS^lh}2+ zkKoq6$X426ZwKg1LNW`RwW&HX&@#;fhAB{O$bUGy1_%Ifem$7C74`5P6%SpGf9C9} za#qDB;{r;`tnS9NiQ|&9FCHAO^R+mL!81F4cF89pckep{M_?ucNp}|@1GgV9e)=a` znPTH`_K~xkY>3B=k9R4D5_D>wdUd~*zt!2nqMi_u80=v0p|bkz2>AxoyTpK!g|?Y7 z{6ka!#O}MTTSE+f8*dU zu>?!gptrMRcP@HMv0hfWmKT9M4KEH2O1t}ZZL;_64QQb4?7*J_$r9`;F}P3UAGv!J z^jD3oO%^Pjmo0mNIJfYkOB?G9(>Ihw!aiT)~F+Q;>Ez_WJ4;~LOTTzfp}@kVlKcXtNE(8LtK2fv~k z;N~je8AaX!I6r{9YVYy-eKyUJl@Vxnac^5Ki8fi64?Z-RLqIR zuh)DO*ZMXTm@Mbr$PtIXj~MJ4UIOAZWpmjq6Y@yCOQH|yf=cbTl;hJ~rdg$C>t${k zH`3Ci-PhtbUp&KtqyUyl9althz^}62$w|kF>0H+8Td#B2{?#4FRQy;_ZfwjU#@WCV zs{puPjo=Ce=0wTHp-c-V_WK@@8hm?Rm5VcaYz-R1d;D_Y$OY4}KlaMjzA&ZE<_;|F z?@mw|@RU2jPK@Jw@ZfpKE8CK;_!itOxkoT5o_gi6H1_EE?I~s+YDrq$O82GG9@STd zBrou9dYnD4y9mVYl0psS;$1>3rq{7LjzUAgC8p0#q+@&m$+YTVpfXBNsf_b^s;VCR zEN7(~m>Rb@-rY*JTwIR<8>Q}F9YG^2Vcznm$skXQ=T%9`kq;;Z1A8rGo!k?TH`UVB4E4y;*`Nv!B`G{ zkJ`T=Z<#lS3P)<`2i&!1IqG9yt;m)So>=r+L?_Ov4iirJ^F~bJLoyXe0a2!-SNR8D z8v3=I9k){F=e5vBEZY@#Rb- ze2ui&c?kC36VrcaqBu33tmg}C!DX%L9QnYtB>-48S31Yxy?q@U+P(_Ia@%E04KzI8 z!=szV_5tdhh2H0rAV4ktF$2g?dor{vW*^qz3^6&~g9zoEI5_oj6S;^oR{zP&o+zUA zNHJO(cU-cw;wJF7T7a8mvaCz+%ftEyCGOFEMEeG|`EGu^IiiR7^8FmKl(to|JEM2p zpts8?%=V4Qj*$rPo%b+MLjy^>{uq88%!6at1q17{Zd2cl{8h`7Jyf6=SUuyvV@@t9-uqpY)_0DymdQQq-Rrjr=}wch zQi0x0;&uMXvM*jtS>g+y*UUL!_9p=rXlj)MnNc1v0gTwGa`e_8!knp+*3)}=59DPC zWMZqP0ggv}UY-Kb$WXz^#H3_qMg*i;wGSfjE!*3=4oSJWT3J-f2M;R1b#A+LA;1LF z2XD^$n^Ug>V3S*$@jSgBSlm@`#}}mibUj3WlAy5ML{HAW_rGJ&GP_}+%BDiMwE?31 zf`WLXhJ(cTVsZ^lO^3tbp&@p_KLtEdP@@!E~SxG5cHRhlF1a zpw#Wge62PMH&C`>wvT>l4vma3ii$EYiHn~lJr%rvzx&%;s~jtJ{$jV1K{HI`m2G`Y za)d~b8js2bx-nnJKc8aPLMv6l-m8&TFpU&X(xJtzkn2Kxd5+lp)Px8-_H9{LS}5VG zlnmTRHIBh z4&%3NjBjN z*&4$Ua8hJ0-?0WY$m--7m9zfmD`6pF{4Zq$$-3tKKO zu7lNgULeALxxm?;@3qD_U2WF}=k{%tSY#z` zuXl=9Zd~N_K8JV)Rbq;wMdxkn3t~3P(>VOkt1yiV6 zm$z>6Tg+^zT%R;Lrp!kGt$}%xjN;;r>i!&@oUvzTXOhJ*C2Tw3Poi9&O7hfDDk$P)kDcG1V-DJ#!p#6T0-S|*0da;a1gOROTAD+^sfGFRy*BDyCz>pl zTU{221T=p}24?MTD&f!QRxGf!7iPEm@w*=0fWU#kuqh31_u5zbk31F-uae$%4k@6y zkD>(@gjOk-9W(jD+nx-6YbEksSun1X@UIHUFwI%_`UvJ737nHi27V0%D1C|r*E%9| zBLDeYz<<`J1s@EGi3-j(OX~4HoD(r-4Vr3EmfYQ_jJD8+t^nYZ(i5c-DQ5WUk*u+c zKo4LoLxIedXD5XAzuzLU(1QAvK65Gacz6k|F2+)QT~yDzKtU)~k|}7WMp>@y8%)QV zE7HF`{LqBfEj*Y2nF516mnHtMUA9W0XFPmpb@AHqbm$%pcLS@`bp4aSD!byR!a@_D zs~?1foS{88q+hT=g-8Oi+yC=Moul`yFS2eQx75th?E*;NU*i%zzcb72*{d2D6T}|| zI7kB-qY2@pcE4}yKay!7>3sJlrX{^nx~dw!bGR9qx~gnxu2JAQ5NMp6ZPgjO_nnwn zL0P8-B-&Y)iHuw~TqE!P^ZnnS7WA(|#-SeHXd+6MQc~*Z1Y80IRg26KAz`JwbtO{( z(vN#aCZj=|$5U~MK;8H`z4S-V#~&tfu7T%2HObW-wcPAG;!T&pCajuiOKzce@g*YJ zXnJWFmL3&B))NVoVCd^Pkr%lHK(EjH)-M0~^=lVZx&=q}<%@2h*0vnq6NOkkZ&>#B zhjz8N7_%rwJ&8_;X2-2?Tk3ma!8Ba;0w{GsAa?-8DQk8_<{AD_oV%`&y*1tADK6aQ z7)|TW?cLR-*~)Jf-x>p})}4G$TAh#gViVnVCf-2P$5GVrK+>)1$Q!Z_VVVA~h4Jp8 z1M1b4Dr&K%?5^|l3AcLlc(+!=-J`$W`SM%%tPH?JL?5)G0LVz?N~nPP-_qeFM71Ce z?K*TeU`M85S|%<^0@hwiDOoDKw(MZ)UaIn_`|82L%OapmAMy~>{LqmEr3L81-_Qw7 z?)ih<1;j}$IOtfI-Hk+q1o_vdqfr4 zS-RTyK=fMg@rfx(nV*?`cPlT^zVKfq^{xI9dDi>R!1Bq#T^#(I6piQ=Ea(9$t#AnN z!naZ-#V!n_kNNK1eIWY*r}b#k(es_`14>B1Xfa~Offlr5O*iMUmP zJ?E3W1b!5z?sR*P6-Se49KyDkC&ZYXYL^)C7}ln!-nN_@BOeA3bCzf7aEFG5=*hSZ zV;dU{2ZIaLR8>_gymtca937)aMs5RaKob1sUk=j0qbj^cph!`0w$|rwmAu}+|LyDP z@=qtq?2#%D)rjyY0<{#*2vyF=st*$ob!>m>RxI>JYTMEHA_n>JNPzQY35%x!{O4gJ z+tC{ij*jo_)|0qJMJFX&75^K109FGNA<;c^{f3nZoy_&|ec4BKjfOstFWg)s8lOC6 zO-~h5-X;gh&_8(aO1r%aAGPWB5BrZ=2D$dU)pd1`fTB)TbMxo3*_o)9P-vlso*om{ zt$R_l!&sd&bplp@waplNQVjs%L$7LYhSXKw7Ck_nmhrx9NN=VJGBWum=LClE4wu5w zhr6{*Xi_41T4tM61IwNjH~OCCAzf6yoFJW4;+_RPy=OSYebtSJ1Qcv{@1vBC84A`; zjg3tap^t`nJRUebFa2HX=R-20{;Bm-qpeFmc<@ldI1S?pf>W42&M*FZ3F46QuhGlt zE-eOOWM4Wsi+%Pk+%esJCCxTE$AX-Z-hOx#Mh0gZeLaU4VUehN4y?><28H~=DvjR1-%=)qe!uf%CE1~u zwrg>jti?y8`NvE3NmIC{x&+H`E?H;Tox0qEW$r-~y$6b=ppJla9o(lKBrc z3eCKWqymv+c6w$fZeV+WVxgnyM3Qoshz=wFtCfaQQV$^c{$Gm1-6YhmuCGfLH2!VYB^enS z;nWh`$O`nIar7tOqpnfHJFoF^6F~0mlR7cv3UEuSY$OlJh)n7K*n11NsJb<5obC`& zx)cGClI~DMqzxR9A*EwLI)@Mem6mRhjveP%d`g??n3L`MU$&=|rO$>$9Z(^94X ze9z3bfJEZ9(^Q=iC}{V*h5-C^9`(etfCca1FSI?yHP7>(+}1WWpHfq!ud}es;{zVy z(89m7=Tn^M7sU1^L`TX%tey4G82IV$PoOP-ltsAOn{@p{XlOOJrn-8RsO|iw>z3=A zm2nWklU-3J9=5>&9I3O15%g?oMy%-)ZT86SqfI^SXPj*>PxQFln^?-K2Su6RdW6 zJX+hcyj8)LUEcxutg)j*2KwvY^suM<#NNs=V(o(G_pdb;C^ zfm(7Q`=FHLcZs0{&D`AF=SD^j;8ho2oNq>Roxg*`mO<*tCTLBnywUVfkOF{vvJ%IU z`YXEchs$x(Gtf+@$UY2wLT4U|`(4(|tmxT#^R@|wPZqwtNG2V8qi(J;U*(cN zHSx=2izH&(Pgn4ADeOz@p=nl+8Ss9(exP_my58XQw6wO1pKRZqNtO;}`;^ zsjXv01)Sg0@wtHl&%oK;oRg8F7gkp9nVBl9LYvUQ*xmML`|)GvV${{QIe)?RVl6K| z0(u0O+51yKQ)AK95pfAB4IhKU_RvcGX*SuAYg2Ee*XN|e%|4jNVQ#hicqGAQ~ z^BqI##YIJT#Kb@k&n5-^bamfvrnrXN%^=hc4%*coKdzVgbd%Iw%aYX;J7 zxEFl$uk~RjSHR+8Y2=CfuZH|%&wtUTuP4cQ+}25J*&Fxl$A<%>8S$asvM@jjJq-z_+z>2@AyW%(g=%1^HKkEqpWqc8jv&Ek(Lej!& zZt3zomc_+6-x~Y14S+yYv#Up$;)ydj<=eZUuHL02Cjdq)B+sLKT|+#H*4Ea1rcT&t ze;8jy5PKTPBE)rGds`79zhS7#761}xt@?{7_b&_HfL5x(rdP_}+GljovSJ8CXB~qu z%NJnkI9m}sU23mcRXt3(Zi)lHX;D7?NH`NzxoNL}@t!losWNI<6`R39VBCH(0xRiM z+&`1R|Faqb+T+V}jpl}31I^$retdY!T~Uqj%Dpe1b;f7zX{pKlrqnSxZE%k{=d4FS zsF>#C*hYF)a&^Nhjs8LGi+bE%21GMnFk|YSo`MCYY*wGdOPI@Knep1e6Io! z7ylNX0e>!BCnBYY|DnQt;);y5MdR{BGDic7zLgjC^XamB5wQlb2ER+yO+7Y0I495i z=}S;C_`)2*Vx>La=SyH9@PMK`qV@jn)FGr+Xx!;!QF&1fUxaJ+73}0U_|V`@NKVH$ zx16TjYK@3kquBhDo!Ep(rgH3w5H9ujA>yxhMD7-tO8WjF2cB`0W9k!*zv(4^N@6^G zyv}zy+NW(u%7y!5)2!Qyc|30|DR<)Jx99kq7pfGmvIB09<9nH-`Dzlec4|$!#AgW{ zqyS!4v2X-#;eU1p01JE(fG6xRzMel2eHAP3-N=ZA=B}?U=8_&9LKs&G8ONVI*M^hV zYHBd@1EtT^H=cz|^)Am;Q8s<&yq;DMn|fIVyS0u}V;}%KK$0fs-&k_L^OG%+_%4Gb z=F6Ou2|^;ttyLn6XKO$@_oVRl3{oM}nmYx6FHQX*7WzWgSbZ@|?|HFd()Cd-DsoLL z<}KWXo{CWbh{Jt(H`7X}t&St4z5g>g0X&VxU>vFaarlus<_w6wHAu%9h{?%5jcz1H z95bljcUuT@uH5?e?6jwK?y3Y4>IRN7AsDZRj?#S~C$H&27$EN*^YvYs+1T2cTeSv_ zS$KRyM_9t6kn2HgXI>&xQI{yR@X1Sfr$NK^S@L}(k8LBk*a z`M_z0vJXxheX{!ly88D#3hNCb zQj++_*wKhsTU!vbK1!ZOAV!ocvWrp!jf3BO{-KuO`>;Ri)d=4MRc$yga}19P)icDe zN%2V5=!KnmPU}3%sjD0%kkT=KG+RB#?6Tb%Xw+S%AFb7VQ1UK`tr#(WalqKKy4oSf zEtu7h8U{uPAz`f2-}nn)h=^m8+szBRy(Q0!A0JPYGb&aB*MmewI&X|HxT#L>^WTA4 z_`|5pVjvhDQ4gwxrAAG2ik9BCJj4tduO38jkN;&Gr?Ap#7s*p%uxeFh#0*@g%c{q@0`Tp#&W~v0o3}WXK5K7DH zU3!t;1SnEpzTB>ReFocaQV&N3uI^&Za*c&c{IXwTAtue64F?e{YKXSzQDA8M-kF3cMh5_c32&BuR6>&n}cJ0bu-_{4U+jsB0fY*cGMBfgX4mIqZ+klH)&fU9r#* z&y(ZhZwp&$R-=HOA;1w#+@BH?t&(KD>mkf8C)<(@+qI6S+Xx(7+&g#g4)VII!(R@z zg(lc>I4^yvvp7Am$3NQGUhtc2^c#fNL!vfE@;kqMSl)gh?NL6Ma}ni<_uOoE_LY#+ z3K8}Fc~fxN*HBwo`2wos4$`LmXBH$?nT?ujpRbqU~Ks-NoErBB|C0d@=g8Xuk@pp#O` z+Z_*?-qOy%0u&mO!*x@;hu#;j(^^*L@& z5`**R9Xnv3lBVR|cf|9}K!PMhL>}$dt!p;y0*?_Q2WM)&IKh|wZd*JBFw~~FRP2$i zrFL`mBeAX=%ru0QEP{NtS}L}-Db>Uj6w!QOw?tH+Dv)6nGv5`VcIg*>V7VUtdDe98 z)$jE}rlLa6R~^Gw*3C23qU*Snv+RHn*T)?p_z)m(i0}h|->^?{aVA@8%G>B> z0&s@!8UBa&@5{|Q8$GMcrr|i?NbetQ6FG>Jj|P{e4bwf7)6;ra;4`12>_~vQ_e7cg z3NHU#-=}?CGK5?$HWIoW9dWcNzyoSD?-_}TGWOiaG8fa3@&h8yKB=+mH*a#Yo_2mk z^p11MC#yFXsi=HiX~M*By%tZUV7@0}e%A2Wd2Wh4>m1? z7hfv1nb)C8XE?;&x5?2bs;+cue zJQ{etJbAwjDNtg(E9vtyk-SkeE!iO+cAw9j+OAFWmaBLDfTXiUB92JAwii12t@JE| zrA1!PXkiTGR>|hzvB+RC_A{tU?&r_LG*RMGYS|QIYl_`ezQMxA^5t0~G9KTn>UqYQ$8Oy!;uAnQe8xyN_HiRw z6s0i*;vX9u%RBJYl-mzzc*GM~Pl^0MYfLaYxRAh)oQqe|%-_xyF? zKC?@HB&b2&zLm2YEoc3?^%?o1X1dH!tSza!B+WgC#)@eI`>$VTdiqh!gvcyOs8_(s zirLJ87H%^G6NnUv4K`XZMjPn+jJug#v`th@o%-bJyW^F{6tuT6F^f zec#JNqnWb>Mu(DWakh?dM!qX90>ZaowQ#=W{5!2YWMZb;HpBX9jBQVftT#pHMAM1V zP$?b|X+xfl@S9zZWZJvB!p@J4{AS~o8Cotz^W|PIjOVKZsfSPk9alzR8{&9E*2a$b zd@b2r4D1bLtzKT-Vcy=s#l~}d?qOg#Cj}pnV^VG#Gk6=1(OVev0Wi-F2j`5ZYvRpL zb34+fomMe)9251LGi#{4|LOH}{aeA_i-?+w$qtaz1qf|9U?1cAFrb z1cOl#%2{NUK&tZ9a8IoL`>!7Qf*ca13(a`iKi>9ar+^?S7Ci{gRi#2IVUY^CsSih$; zZn>>rksei8czA_4uIPS|dEmV9C@9NI@mWtDKz3)0?dzhq=E=O5C(d=8mn9dF#pitV zA)1s{RbyXtZhqUPa0?;AH~m_>VRtCSH<>049v6}Ktj8WHSr})qvwr@)O(+G#*Wm4S z!};=DQjD%7Zc{0gX41uNzGmKIbjNyLggIFrYNLck)e~gOxO@OWPXJI^4@Ylrn5|e- zPu}NvOdR5w<662KGtS}anRdjPtErbU%;@HQvMuh4$cwl%5-s0Kj$oDNrj+-|Ah+M5 zzUAfoyhntOU0O?TU^G}-%G$Gt&&nVsUQ23|nw8;vRAF<*!9fIL4>wPgLl&4Hhv#|k z8Q6g!gEY?*VifrmGx2ptZe9SB@r)sJVi^)Te>RWHV*=ZDI->74NlLadbua zm|Xqm$d4@YhIZ)n%0Xy;9%o92qVh~8%w7$=XH~3|2EQ4zrUbtDYxFhwBkqlnd}{Dt zY7YmSwZ4@+37#>!Jou!XGh3y2IpJpFNGT1eer<2KjCEUwC`oMQG+9x_CnBiVPa8(K2aV5T6%%UPJxE znGWmq+2iD{QtrEr#0ZN-{tb$xGXppPC%irsUzV<}HxIo$%z$fsuu8yU=yiQ%RWGu@ z`m+Utt&23-s<$7MDt04WDUR98o7HJ!j&J=#CLf0`VFI22VR#EHvRFzUM}M00!|C&i-Tk_a}YXLBJ*?%{JgVt7Zsr zmsBJ32l*G8`H1eJ#aw$M%+8Q>iXNNIqBeNZs_j`#J`}#??X~`t^d+CqafLc;an7lS zx5}<)M@xBq)BWAaR1wV1CFSMImx!AYh9WNM@YM)6)=cZc4vy-Z+YNiWaBOm+s^3SeV&pSk zOyAbVXv2#1Y%#j1l3Z+c$WW1%;YN0V*G{JRCA_h2J_kC2JQ^mMnW*J2zCgI9!=2(& zQ%9+Y#ws;xip|G?th|^)u{iHQI$s5!7aWM_nVvf+*vJ>@`KHXoBOG4tG{1DPlH2zO zUE(}qZ5{P5<}Zc?z>f>#^Swr%*AxZ}^~bDf^cd_IX*Mhk`A&*HDW*}2pS8dQkI9{h zuuh+e-oD)nGD{FJ`Y~^sGotP_-E-VOv+9mV@LZv}>C0c8yQl*D=N51+{Tiaz= zY}B#OxjJ^tNd??ugUiAvP@SG{&@FqqlCc`;=hGh#o+d#+N$$_BmQ*}=26>ZxQM@(6 zrdMif45}$Kjj)<3)Z*K64>449&q(*Xya{#~1?*`(+hDbpH=Ymt`s4zBem0>sbyji` zS{frZeL9knUIVSEuLo~585sSPZqY7T2z`m>15jzXClL<7p05jHEqaE)c3YK(qg!*H zkmvm_Q0$$CN4UeKa(rdYIeyLA?zw=p17x*zOmBGM(b0Fcy;19@P2R?a>$W_sP<2C| z1gT9WJ#?nIyD(oPoNcUwL|W^t^F2sc^hx1OIr7A6!Sd6d4Dz2cswE9-_12 zKo?Cz<5g!8d2+*rFgH=i98YoS4(0d&KZZ6DitfX?u||hNd=k`Q|!kCr!vvy{f#s zS9tCOnb&f8zLSE(jjfaVgff6+St)+)c<5J8fYelpziR55QVm_p?u>rrmfkc`CxbL(kM4f}GUza15y>nTXOec% zN^qSs+m&l>P(njD>UX!oq!to5m20fVaPz1jutC&v!Su7T`LtaYYc$6juG}~pSUM}Y z9^A_!H{+F`!_Zbz?4-0`y`3K-bA(2iA#5bRK+4I{h^I1rZOJ0ub}w?b=t9oD+r2&p z47WjX?>v0iU^b`>h5Ow>o_3DPU(vK@qk(h28u~mY%wE_Q@sYm`Gl1CjhNk%|NTmO~Ej zna^V*e6|<*taB6j(OwLV-6If+Q727a$`CWa>J^4NjD>|qAo9x757kmQPzKh&k(dFc zIC|0Q>gpR(BUygdZ^pQ>K_S;&RpJra;}qM~4hBf-l8gPV>>5pqJohfF-)z>vyO|3y zQbgGT>aii|%p81&a0&^?v>8S_gn~jfb4*w|9w-t1x;Z$W)pUT@^7J0%T#b=ir)L54 z$rdv^6t0Y^9q#alv!4OtDb>Gn>+8KR9+8TbtBGbC%(!WA4$6^m{3HId52RCvd+lmx z=!PBlEOmiYvG;2WG_tEr7l*XQxE8W}sJ%TDv#xt&+|QU$0=??EN`jH!%Kro0`7?lxt@8*C-8hW|xwh7&&IAQzuskox_yXYNa$oqx;9a{x1g} zmvWRlsT`eC7pI5otL5a}sq=MR(NZSVJ9cD+9a%YBBFiT(f1NowX3;iyQFV;IY`M8X zOw^KB+*|EeS2FEqh>>%i#-BDt@2rn_JNj6avS+S;0^M`0r+At_MOTf^9-W*t?s*7| zGgpR%4_(om}*R|PNpPKIp6 zJN*=&v$3;l0QhdRdQz)O_V*DiX^=n|va1B7iqR9?>GSn*e064Zd|oi5TxFvS_R}ApDIg;O$eNru?$8=~y!;qCFv;(dDpMP(ob(T237H#% z_Jx&|LceM9s0oKp05*+s!c6c_;fp}0Wr$C&TEPHF!}Vs!gI(@u-ir(Lb}GMehXBu? z$cp~wMmFUn4Qx6nBIa?vTky3%W2E zkdT*Jpy_+hFnq6p(YQytK`neWS_ya7#qV|w$!dAVM?cV2O|x>D5O%S$B7#wSTf8R5 zBhY8Nch9K>EvW658CWIg$>|?wsab!+l-t98^@syV*jx~i8=oD00lgdll76iQ>{*%P zCy)&runyRrl5CM&O`=x2b)OCg9(ABk&ofX$_o4;xZXcbyoq6IP8E4|bqwyZF47gNR zTnFiTaxR`e_i~Ou>~`IFmSL3dD=^r8@Dkt)i z$=nYKZh1z`w8ilpJDtJ3+GgN&&W_>(j`Z}4c-96Md-H)O^=#bMbRhBlaocx*C_|$2 zz0&OYr$91mlwRxOv~}YHuKr?1Hi}^|t$?9O$DP6nZ=f7!E3}~ZAh1N*eK&e*ivjGh z)n==zT8S(edx)q!s3g7@pQ7;U9k)XNwEydMPrb>r8MtW&izj9${5mbIDd{LVBSJTy znoIN5$9L~MrfMrtGNNN2U3Pm5CxRNmY3fQgV|j9k^$rFs~Y=k zWYpN+DGcmBJ&e^2OsWAiyrgX^og&H+osGSR{7?uv?`!vfT4f ztgDmG{vKg#rOzp&!P@DJtKzdEV%wYt4=)TSNQO%dFr|`y68VqPJuT{pzdxEziDamy z0|e$krTVtGqgUcv;ud4D?0N$tQ;dx={$mtWA!`_~?X=ui*Q;&Ly)K`}?14hS- zgM?;raotZl)v@!n<7{&4FS&BizH>EunH%eGzP(>M_(EC+$a4zYv11`0GmG&)8!uY` zw39sT2SoiY$P9}mJe?3*3iPDo;$Zb)*l|m55I6H9sK$`T6BhG06t*5bC*|j6%N6NM( z8WBA$3S0#!Ilo+ZQ2?SWgu;3u%ip}={n~!VBYEQ8OV2?eKVGS zQzE}U9b1njnPBjrHw15>&`947+m`CX#@^l~X4F}afY)+~tcR{emCx2_ zlC6IE^+Wi>tUB%#NMLZOvtk*0eTJg$hj4E7WRZDagqZ!TO~z*_pPvQ1V8rOW|L8(> zuZw%>>9XaZtK8|d7O)VgW_P3B9P7AU?NkUOhToo=oqf%Nf41SSs5l~e2!y~6uN|6d ziVQnhrQK!;K*B=9b_Cr$JUK8}AEI&))T}45kzk1S^ostj6@wazx)KAL_gpua`JUjs zH7%?VSVB*~S!p?dJ@ZhZL3a@AUAp|)i@-yG0F4m!M0;MkX&MxFFyRqsDu55I)SSp~ z+Z6j;oNt=$Gpr2l)$ASuT4jX%f)ulr2>3$tuc(#P)nwaXtCWW@4_jKC>B^^I99wgJ zhjQ19d~u!&>tR&BMhc?qN;TB;tyP&PUejgrVTg0LixlE^lPRS}5s#(pbnV`W{q1rpfbNk9)%3 z7VT-XArLcfGLl(@c z)q$wj@@#B_K4|gD%JS-$4hq53AT%&cTc(XTWgQq8wv@o{p&N>nfNG1@BxB{=04uqs zd&auMdhCgN*)qjX1UGqYxikui~ThqRXs#Rfr6#(XuE^o0j9`nmm1x1+T7NH*LP z2t4(*JU6Jdy_Su*E=q^%BN{yCHI5heE>72t1YxgUJj%En^Gs^SXN0-hC$sTkPUe)AAs!g+CIw%k2=}a{)xxhd_b%NKC zm(HRx*SzE^HIU9Zv~WO;aEt}AD*|V92TyJA;zy1H9QY3lCUm04kbh3fyRYkYio=# z_2#DaMDJn8W%R}7EL&M1Pv%7CfXv~iodczY?LifknB_ovRWnNKVrEXJ!JqY_#%jn1 zl>>FKHD%oSG>{hB;OYC#tg2{kd-DZ}R9joybjR{sU2Te`&O!Dscw4RSiO5M5AYw|W zzPtT-B3qAgTV~+6?ueZ;W8hw($?5s6Mn?h{V@gPJY2Q$_d0NC9T1G|)A?6Ap4R8Ru z5L-hXb#S!+z-SA<6W3H3H^GJwtX%`+IQjqE8vgP1|R-(9~3Iy=%X_%P6D4t`=w%ThBhH4#RA z&9dIWqg6kHW(q|_Uio@8Y%i#ZBKW59;~REAN?%xw$JMKXr6;pt(~3V_GW=P@iF_)M7N|C?no* zPda@+y(c_jIXWj(#xeLndVEj#f**A*QPbc_p?cuYrIfKXc&G%HncDyyq1P{!qW-z% zT4}%8KE{3AE($5UtK4H?;0?^EMn|vV`Ex)@*M4vX{hFM{^{s+pAjQ)OIks{h+1<6e z`NQ>$UH#d%pAwVu;+@ON?8Sla{fvpQVN zHv2VOLsMr^a~Q3TPslv_GoD&ozIFTg!GsOTkln`(uHt3ueB@~yaI7&^^YyN|(`9?Z zb=UJHclStHx0)A(z}Z_vWNt4)0{X>e^sRv|IZl`V<${K;*{93XTMtT#z-=A${|xFb zmBVS-_t}TDS6#YU3Dh6&zrop+%K-XUpTx(q=l9L0F&%5ER+9o7(_GzpeVKUdukm(p zT3|`C4zPx$tlaYPt9g!Vlv;0i^XxUn6XCxP=GYd^fpE-F4r`m`Z-Y5JpDQuSJyu1h z|I3#8mv@F&Qdq0FExUgUBD<2Y1-OWpf_Di2OEmcZ^~;|D`T&4d>OunBpC0h#83bfV z{2(0;lz8;Ni+cNt(nN><&;ls(k2l4}#uiT_=>D3*Y4!)4ZFr+L$!R+e7iMB&0!R+r zJw84*ykk7^r$>)86iA1XlNp9GZr!>Coq7L?M@Z=4R|R^$)zLtwe(#}X_-Lo(H8vs~ zHf)P$p9_pRc=Dt9LXv*WR;>4@B^M?af%lq<){DSs5c1c2F-FFHo8G&`SOxJG()B4$ zN$JyT9BQK5*Vx{tI`=z3Q|yTFu&>P#r8V{0xMgEftI(HeyHx{*-Y|!*mKJ|?O-(5u zvfU?uKG4;x|F*wlc}>kkSdg!;FNHX}jEugNv~*wX*8YQ9cH$_ktCz$FtaimPQ@Q~r zZaJGf2656`y}e;aXmrYhq!=}2O>FcnK(rM5`WrC-#cy1p2YX$i%XG4ml%5{HyK67Z z`P$Cx&jD=M?(F_bUu$b60))e}(_wU!Gcz-NxAPi~Gj=$BpMnkhnt!w?KnG!Z<)L4l z6_@Xi_vF978q->ea&a4(*)UpvwdPNo{_VF2{6PA5TmS9zfBso~M?n#F58KK8suln3 zm6;W{@ZbLnd`p)Dn?X;9II1D$cc=a7y8rl14m+p#j}QOq=c~_K?hy|T)~ou@rv0y$ zTUo8T|Mz|Yx(uY^A3XjNXJz-Fjr_YeiagD@fA{%6db3;=M+9Daoz0!=f3=)J@09t! z_1e|v0WYs-X1T^;)k*(9?fj2E`dLQPy1iWz77@|i+4){wORMbU&1}YBogkL#-`w2% zE+|Ol>C-4cB1uB~7hr;9l$0rgRHp2tf1S6MF`%VIp+cAU`n79|Z$<=JD9Fe<0a)GF zj>U+-nz^WslSupc(WCceWfzx)a#C2OHey7$JY+Dy`rm%I_y3#;NRtLQj#}8c=3FFd z1H2g`&cX z&8>f9!>WO@&Zg%s8SH+!9t^~wx6?kr{O45q0a#=?%*@Od3T@v8qlF|StlGcq@4sQt zW5(|M>rAPd#&&kQKPAK$3P0YJ)hm(_Yfj_}AuhPRoiV~l$6e`i ze+>*!?OnfGqK+RQ=Q5lwgZ-|{R9!s`{~GF_i(eyGzedW%x3;!&JPV7A?AbH)HqcC! z;V?Bd6}Llwh`2?kOb`-m|8sj}Kt4gf8QpY5NjAGb6?EX_d0NvPQ(9WeDCZm`9*;_3 z-?0S<%nOa@AN32%5|WbWl_@GrFeNMA6(-@YOq8;-O)Y%np=)7bo9^*{tjqWq&&F1u zr}_Mxwr8Ge5=^?w@upWzwEASXo^xhqtE{BRFlPA&+nq(ol`$cN`!A=JjBRXGHMhQ< zngK%cdS-9!q$izb`g(g+oz+6&a0CfiC8^rWF%C(el>&H;JMZ}uv~~1=+^>gRY2@NQ z2tx;vqE>XMK=Y@^Y5SFb^?`)jc`cK)@55l_$MKI$z;?w21vhw0`FJ%n`Yf0d^~}g5 zT(`oQB+shJD9Ht8HEaZo;wk0MpPbRW%4sk$Rj*N&%{7~2DO}NvdAX5~09HrSPXq}NRe(TE* zZ*L3$jDG^`(9yQAu;B4DD{X`EY-1h8Lq$cECs06{Ve&c%!mhCpf4P}m`s2e7)Yc<# z3xWFQ?9aWz5urG^SkpHOkAti;o^2NnY`6Tn^1#Mv#6a!6sHV$bmzRck)Rv>&|MC*s z8PZ-&Ix$_kYwyhb^|)83Ec-6C*TpwPt(d^e{jP66$81F#L4s2sEA-Y&iDS`qjGW=tgf>uW_3%#-(>a){#y<2b7U@cWN6Q=q->|efW zNIR0J58H}IOu(S!?_R-Kvz(zVuFRpHmr^XRva|bijiu+mSk6@3Z*Pea6S*@Q#w+>(Bf?BMp~lC(ci-?hlIT=Q6V z2rI<7!E9Q#GdqQ-lKpBcp@3GFHKw~sZ$2HmQ-(rU zpS&hhZl*0}T_6oZ(K$1^FouQyWZ`>R10zQqN{h;x_qP>z@h!>p?JzyEx$aCvBQvNk6-w?EvH&U6X8ein+e=KFGs`K+B4P^4RM&A-EWXG6lbnnIsl z(5=Ikc}a>T(|EK)5aO)x=*}Wbdl8z3PT)<(5U_jBv!Q1eysdI{0)m_-DHO<#7NjOT z;uP`r#N-w={&I`c@{ccP6_mTeddcA4+E1ymE_26>&Lh~&te;|H)^?%Wg_IqZx3INt z@Au6WaUFkz-U?76GT!$zk_|LALteB1+XH`HjnSf@$F*?-<)sSn02&rjRqJbgbx!*y zxKe^)dMi)VnuGR#=`47BB?~L$=63o8MU$lS6c_ax$0kNuxn1hZK90kfSFLP-!6b3vHt&G@Y`xr0*TAXv)7#Zl>;^fq**=bff*`4AOt*&-j+m=jQiAYr8 zB3Q*Gw2whHxEBv0COitnM8uL9{c&sN?|HlJiBLPuI3{>MPRsB;snZ9E`TAZ~+ICP# zzM1hAe&gr)vpn^=gAv&!w^aGO>pF9LEor2#G74PEcOG^%OEYoYmlC@tU`=<~|LOV^ z(l$S-;QVAvhr{8}qM`_UuedB?;~1e_)^VGozK^+Pp_G~>)L?8$Z((xQtn>r;K&&U5 zQodxO`!2Ap&+|tDhnt(5^@C#E4#2jzGLRilcE9>h$T`zKEfR0IIyZZZu$WYaDi3-c7Aq&6K z8E6ChP8Be;H3mX^W}j2si3_l z=;eYFWXbZ*A_d-E=0yt24-kE79Nn=%G(K6NZeO4~<Xt?_MQq^|nUgPN5jVkrX4^YA9NqZ@|eb&`_9Fc!He_Kl70!GOKS zR5@qR_vPiI#M5@nHys@to#*#eFh66o#)zN~goK5{#CtnJD3VNHf4P`iH`-#8FPi<> zyFzcMR9xu50r@U=8P0>7dw68_(Sa21>PHu{obVok0kwLVy1~qYvSO<0@rakxy1vrmo-Sps=$`OKsh^!4VlR9M>nynaz8(x{kyKapwjX63O3jI2NeVg1tpKfd^0IttH zSwF^0VfmU3re#mWvE(1lk_U@bxl{@DcJH)E(;%hYiTNt z7$3+3SWd03lWXcDIAK`E8{N9&cyP{(>?gj!h;(5(eN4?_x&-#~0!OgObJo?>t)(xX z(i)1OX={qUlHbNnf5Fb%s3A!CO@@4ksKI?tNKgj8+oDVGxMoX;_fMfEWZZZI2xrytic>^@yO z{TdO9$8-maa*$8zz5+iMb)}l9C_}Pj_Piv$6t;$r;_JNNhd&d`q3SJyQZ#TeCq(^(CrMq2gcnC4G*fLwbcDiidwwFdiHmRECgpW)U6h+TXUJ zaNB7fM}Us&i=s59rVf5)M|NjZ%39frgbnE;EghLWWrOf%ia6BUu+Mlkn=P&HxfTE%PDF< zP%_UI?0%Ssb<1zPyL%08AV@IlI|DfY0C8&ZUN3JKVJ%b27%hcQ6+B*hA{xn3=JeJKi*G&5^wliOUdjCSAdA&;7rpx!w zUw`qgat>H#s@(x~jUxajwnMlZgLABHXfLlmZt^5ZP~7qbLU+`d61P!fp0GoI7yZ=G z_ZhJ7V>p^Ohx$$F5;834Vp8i`ftD}DviPALi746ANmH*K zzb}ufeR4?4qGlD3lw%$rl0}lGbSjtYL7*q_@R?8kJV0ji|1kCyKv}-a+jJwP#EW!E zH`3kRozjgUDF{eOcT0zKNq0y~cQ*(~cYn_-{?0l7^Tm8K&oIouJkK3_@4b8NwYyWV zMvxY&?O+Ti{R8na`8x*TW7+&KyF@m-A_X|sh>!1on7}~4p~Ss>Da$YX!4#u7A?b(U zjJZ_@IX;%sujCF^1izd%@Aj{*(YDHw{Q{lb%#tL!M6f-w78h%LxLVAGi*@Fa@}J8C z@rBFbdqtlzX@d~)C?#+wnWcVY2C2#UUo4!D+fBa(TujJLHs<6IRWzb%f}@;>Aa+&Q z+wb+%Zh9J0Mqf4Myxv-)34oDY-zjuKNjB_8Pzp#+6dHSFwc(3@5W@=pW<$4t52)3$M(sb?m-1r+M;uBc!o!)5x7gS* zvYAXmDT;_bzkkd6zC{kRc%ym!c^RKy;umk(5-x4+wtf7Og+*PY9W>e)+Egm`gKpWX zCW5sDzKAOFOOQRTL4_u~g0yUY!aL|CF4@LR9wY_)b6DiSU&yl}-p&-T8%s%hGuiU8 zZ{EKSOFjmPlT!5|v|ryJwcVnjUf9RQ94yqdXw)1e@bLFsDH4n@860Yc))+OMHO*euUNOx*b*0)<`)S6Db8>zou(I+501^|y_nj$WLHxSa*^N0$dUQhkor`7a$}e(lE6YKr7n6KD0Hw6 zSQZ{h0B@vO13e+g9fwz#X)Q26i4V_%IZLmIyikG~Zj4rAPmYsN9m1B7A2*Ka#?Q>4 zX7BgpF30hn5Ob_rny7Kq(7-t2W$7KzoN$U(^sb^}mh!-j2usXjn7;(XhwHxv35ytz zhZF|wkSv_>9t)C;){bU0|1=awu7J!=ILt_cs>by-ZzgU2LRjT(h*`d8% z5;<}7sialay7|3ALPE+LRAU5oEy2u?HZYv*9d&iwjc(C~mG!uB*kn3t5!jA~ZQPBhZDr9l}NboBcuQ0IQOX3Q3 zLM~553Y>m9S*|6RxNM$0g4CChGz(mTI}f{BsnWPSSTM%Uvb|fsp(#)GrbgF0+tLUs zr(9QFmbkbwYpP53=53_}hjq-c+MN%UKOQD1>-jv~4>HQNBGW~@6FQ0-*!w+N;k`&} z=$6`~Fe=6qx5ayR!W9VHKJ2`ByZxRWf-tyljB^_5k8|Rs808HymR`MdcrSXoI@HZ zK8QN|XseDx&{iE+`x;>zF(Kq>S$fra+{l6<^nE#q<(w#!n;Z}Lbs)2ZfOnq>yFCil zOSL%?F{;$W)HJ0s3n-|b{Iz5n%f4Kk^ed-UQI=0RUL?9~LPci4=M$ree488rBsrC8 zXgJ2W>dBW4N39<>e+0*ZgpMU9>`bESEL)Sqv2W0%s)#jwy1U8_8Q8xbq@@0)uWC?P z`J9N(^2WQAWIF)%<}`}(?laDzQ;`Jx91&d{X$*9Fl_B&EghdBQzhvyd!(x^colz7A za-hsP3?ZkaBvd56P0)`-j^Om0ruKps-G^E1o$aX^Z~IL`rZLfnP=Z01#ef|gTi?V} zM_4RJ*+UYs50X?BqaV6makENf!nb0%PrBx0QoizgWU4;fPR#p+#@Q-m zn0H0WbZs1s-J^}0PCz}{(Y@Zusvuz6x>6tHd-9Z^bBSkfDiQGd2^OH!re&29q@Z z%@1SoqU{zmekM#<)V&HQkvFgHD>4H=cQc5V3wmsIC+c%FZc0ug9B3{|WD+zK^@j73 z4ZiAIZ^^oP!2}|+q(Z45e1+@zUf{C@5&44fdRfNP9pjey)7?%HJdf#N5TW-u2AL@Y zs+LWpJ7x6}M({g(ufdvtjA$Po%;URV1@GGxjR^-?bf(>k($SJ_&Ba6*j?aSipf?!v z2*xBR@l4g?h2P$&Uqz9_1WLMD2;-wc(=-boOHNIRd8bLDTFm0NUe)&g_)%YW3!6l) zlIjc*@b)FJh=g$XC37s$oL&;prZ0Q*0Ro>>PLjw zNyrOF5a>3a=oi{MnMwg(^|jqllrAPg;yL)v7h@TI@b)vluCZe`9ptEOPm_}-Er_#> z?(V5_m`MQ_6c$ZJeP$$(8kB~t&9j}x2ez~*_}34st{v-*FzC{W0PfbjpJ@<$X4>@F zt7)uSS0czK9-3QVSj?4i{JP_UTjip;(v7_Y`F-uaKsqxXl2W83U7+n!JJ-lMdk$JcgP}r5Pmt82YIQ8xkBimlA}QOJiRawFSQwxSsA(ZkNx9CU!OtNE0^w7Ob1NtB96k%<#QJO^O1zK4|0? zfpKf^oQ(x!&6r;Zv*HA*RdjZ~OOGgbU2+eSy8hxqN)6wm9`+44Xcf^vK+)|^u-4qI z<2o>4g0b~_+-=#1`h2d^EqErXv%{EQWYRZax_eK;>$(&N+fMDV((H9lwn^}^1~I;~ zfk4j;{@nWOb+_6xWD)yNQ1FqGJ-NU|c3|R8r;D+!I8mrvD(jFdgbndG2p~&mAt$4E z3O96lzqHqzoHuFZP`KUlqmtCtsymmk~tugiQHoBfrD4ylg$QlIpC?BTEIVXsRI z;7>DI3`$n6mxxdBKcO8Lbn?AQuRL-TfBzL4dk0-Go5K^$CIaHJ7RqQE(SzQY1k#n< z$Nl9tLp@IWXUei0Tn&u)FC&JtqW+L&UP)I*`teC_&ck1rQ9>=wdexhIYF=#c_dxs{ zQl3!$xrlCzQ5KB*bZNEeqa`|V0((YD$A9>Snt4W=4#n8Bs~$-~ISe@qH}=9c@CL6o zKoqTP3ODDyS5JK)GssCXjH1J%{Bx_|&Ry%rL*@)u*Q>=*bxl49BEev)Gf63WUvVsJ%ZWe7$!5j4b#i9NXX5E+-O)- z7WO3448J=l`(HVeeUG$V2)LN*3IKeM?+C<@gm$tz`n38dH(W z-~U1^n*rK%_B0Xo7i}jRvKx{@Vxy>Ez62V>qZ-&?ST&qLIupmC-7!p0chCD&qF$`s zh2IWmFi!kF>3b>Qvb25tMTi`lQet1tPW>%q2(HtZz(UgJ+MM#*$Mx6uXWkD2VXh3= zWEh=K0DQ<4{04B{#jqA`sxLOmc#h&So)+3tH##>ELgUZ&D{Sj#&pK~+?EOw&IL}2m ze}D8XJ-Bbp(@Hh)I&i*ed)j$o3RHC4Vwkq0^O(Q2E51{?aMZK9^S;|E|Jl&GqYZ`Q z##`$Cuqh$p75*FFW*SD|hJv#>FWp};Z(Glw1^{%F+Oo6*d zUTCxL7tv~>D=6n8r^klvLru-9?^UwZ@XHbenshbsdE1$*pPBKm@XiMBZ@1e$|nH>)=>!vzuQlt_5}npHMBosz=U|T+p>F zM?w&a9?N&);^Y%{gof+MT$md@oYk>zoaO8JVX(9wO-tc(wErek+u z=r;Z2Rcm~j36rCg@={18n zlf>s2HHoQAL*0~}_)A=pu*5$m{XH4hNwzFLimNJTW>qRVYs3HaCkie2(x_$i`L_k% z;)oOCXwH<=2>nO#Kb(gON2qfPJuX^1ssErrtoFc^Ok2d4Hu86Fnl1T0meRf=AJ%4Y zPsp*Y**)cI6u2V$59*Y|*Y>xS@)1OSAO9b`jwl}uEC5v~Rsgcl|Km&lMSrQl2IAv- z$RWINh@8poC96dL&{EvqLPi7vB|_#tf_)@n-(nauwi9OHuRcy-;>qj7l6aWuxc0F| zZLWz);tgaL$#P2yUfjP_9Dtfv$M`jIa7mAak9A|6-t-I{T>yh<4+y;8>jmdlj-uGW zTf?bV+fLM~#4(Tcp(OCj`^+`05@C;9vI%1FYTQ1fmB^49R~^i`WX@JNAf8|07#MhZ z)Xdga@+~X_q3vf@2xn@$M|co*w9-37BupZrh@fHq-jZj+*XlNmev5;;)yJm9MW)da zg-6RH$d0pjrs{ke%-eJ6whJ~P!SN7J`&X5=#vf~K2hD*I)ldK=zWLM5F-b!$sEkRy zn3b=zdM>G->;1)0OC=_)Ybx6`^7FrSkIZPm>v+#ar%;k%VL4V+>LX#skW;WIQ%)9N z3=Ssdw$@2}hK7>W3tq#h;^R_#*F`rtJ%sj`tzt-qP-4b=uX4kcJXz`B=BXPHk<8ZEaET!#}XUAR@?qzcwc}*6ef0n*mwtu(;gZT7npp;CiFf9r@SM76eBgfk719-(z{+ag zpSJ4#os8OaY=o9u+nHsTOwP+I2B@x&%e2weMZwA~&K(4o$Hgg%gc)lp9yALgDCzz1 zZZ7L6sMvhudNQ|tR$62~+v0}u&(5{OVQCJzxn*a`+x5?qXXqEj-POJ9I|-z%;-T*f zd!N|I{Q-kTZ#=t%g_)1Sv~(y23;){)eH8`25JNS)jGZnoYAL>Fs}{lY%hq8YSMCU- zBi6FI1twjgCp*lT$!J}!>l=k1Z&vFz^Hh2_+m@U&c)vM9GF87@z8cuYIz7Kk!z^?X z{fVq#VX~D2+wCaXD^Dj+IQ2#bq@DO<7){bp4SYCW@gK@UU1CU9_#+bnj5)3EC6#+) z++=egr19AqYqnqGs%FG$M3A)(m>08%s0eY@eHOdpLOhgovT#%TLARw5iaY$zx@BL; z(0L=PoPmzAKe`b_Jqg>RJoQFmJaPzxf%%P4;LEL`kcEkxLxo*zHgb$e6$Ocsyd@06 zh0e@I*lE)9D}7M6VYxQEVz7f1-X2CCqZ8=>8NnE}a{HDXGDeMqigmuIpCpGun0>&=CZPERHmS-*3+EI^ zZxSa}ozvM~JuIRdfp2;`A)?AI^^>)}<0aIu!@~rBf0&Q18<)UmT`<3l!)PW!lVYgn zOaGjEBuv3&z1`e3;|*hJ+*Bt(V=Yma!?wg|CMq70;wA)hkLctwb8-xK{&>X3;f ziYh8(CnqN_7_LHP0{b+p9k=N{t5KYdb;pFW&Gd&szJh(7rYFDzcqR1p{>mB zE@vT_d0j8PG5g}5|MlGmt|t$I7mt7v`=57RnGl3ls6at!%xMHia(Y^Hm}Idhk87wX zd{DhO^c>Rv&kHi8__S6EX%a*A2%v`QU}WkyHH}Rq-i5$i-!zDm{l`1Fpa^X(4qoHM z)Mk<3rKZHqr`zGBeT~wY_-^7PyZ9QS^&isYzrK1wJj%yS%6E&K_BBo^EjCO}uD@Cu zau#X1_~6)+5`u+ylkl%yy%nTX9Z3Eil&Go?X5C5M(J7=ao z4jvzWku8oB78SjI?ccqedglPk2s-CoeVut89y##MnLY5LbrOXuDVbjA6Ct0oJ|v0V z^GU40gd*hAHe?Dzd66($2vL}R5XZR3DB~dLV^h@=lCfN0@n3rx)vk?MaCjIOQsz>q zBoUeK`w0)1i1oGzi*l9TCC6<$hW%Dj_lW_|P;|sPmie;|O)N{SL}3x7yE9 zrxPXHn){Et%v?+Jzqtr- zkhqGpY`H{{!)qkQiUB#6gQGs~s033#-xU9rmVeb|S~e4Gv=)<}UP8SVjcI~XGIP}f z!lZ;}tuoaOuWs05irL%hX+?j-mOE9?dm-8SzJ&VaPH&~p~ygA-Oe`jY<;-5f|3Qap;s~!&Ont2v- z2C2;EW@RAxceCAB2PSZ8eEd7+%a`Qw@l#dS8o@wgh(?p9xi?^8J7$C0*pr)QWy$W5 z*Mq<(O57MiRnAKY@EOH9s-|6ic%>RYE;f|DJ(JT!q2-iLaB{%)s zqIk}Wz4v|fCyfO(M@UOcYHn=(6zlnSM?+rqoj4N{E0WxXI0k$<0hPfqR>5&UDiGWo zN>m?bDjByVx3+_RB4HGtwpSM zaZvg3&ka!~nOQ2wm&@HWIXVxd&||SMGZ&6*#H6Pe>6f;QuLuyYtzn-Sv%J*c^|HYN zZlzQVi^*J-A<#CO+~jnJkfjFn-fv!lNh#DjB4t}u-k<{JS0nui2y?M9l)F~4;fhC7 z#4UvIdt{<{1KEBe8m78>Un1eA14O4E?JZgv)H~IbS08^5y;GBFOdOOS98n;MHm^(Q zv)xUewADkwT>+cszRr|@_Tz@G-8tqOt2A@Q3$A3ia*?Sl6aB*#1>cp$7)hi3yvWBBQtzlwe(!9^Y&Y4NB z@UPqZ8D~{l#>^^U>o-EfYdllWMd-@;WQxsjGMARj1&}72PQRaKd(;i@X>1Jtlw+&) zSbXbCKnkAG6vi;bedA;=a4z3UPzpDgeb z(=&*yMk`F*tC5!-8phR|V!BWdQ?;L5_csYvg9cQD1Y$jj zFNEAG!UGqs^0)kaRSrYp@3a!hKZvgJt02{_s||TRB2h;3bI|(n@i^*$50?Wp6vCbT z__6omM%`YH)YBm@9?RPOnw4Otzxvmf`J8JyTi<_wDE_hoiuwz{3Z$+EN6mf_eECW3Eafb!+V z%67HCf=VATky;2P7jjWZ+08w!NmW;%POF|z_W85@Y=Zq?mF>fd0Mw`L-`tEVy#1Mc zW=4vKWgK5r1OilT*Z|zQw7PmDNE`aM6(Qwr7rB=VbdHqi^eD5ABqJ6K6#|kuLz&b{K!s`rUGq zQGT(1-uLEa@Sth44o7|v*~%tuOfuRpdG6ds&w7-X@OhOX%UP?1tu4)!i`dSu`pvCu z_!br4CK8;#n3@#|R0RCQKfbR6rSItIXu}@b>)CJGCGW*KG7H`qh)eXU=CDh(xRMr5 zy_E=N1XiZmw|#ap7o_@evSOYhv9T%F)kYO`kxppz=#cV8)jTBbi{yr=ks6*4Zc9Q> zQGyi&YSHrX#q2SE{+{ar{DR8>?N0dsLi?;0o6c0y$g>VwZGmZaeA)LUB!TKQH1}|R zz_n(%z{|5iJD*KRQr8H#MW^NJsy!Wi2l)(Q^!4|rTdTOZL z65KhC`f4dMUPQQ2GoR%hbPF$C0Mfwf>dYf&G-bFKQ0jWhsvHo8!WSc!GNo!Q?@=e3 zvhmVl*OB-}x%v2&o`F%MQpr>EPpXa^8Du$IHQddOOqtdGDs#TJtF4INX%n*f=6=3M zKj0;})&5KO&|)6zP|{Y>)+XBiIot8*zX@oS^sNCuI zIqaKN^tO)AOTlbF@xLGz=hM)VY)^(c*1Ojqd$Cb{COwBoW^i)64HaKrp|ISW>}9z0 zd<|Jkw9}%No}CDNqgLV5ZA)ts>poMHUzHBu?D>&`Uw^Z3!u~&v!}fV}LH*XiR{aar z0}jIC3ooyp_K;k)zC!fOQ0cy$C>(sM_gO|7Sn9iX*J1NT)=n?n;%dksjA>0^^4Wv8 zigAWPL(>vxhBm0WP10`U&yrF{ym}F+zMwS_8l0#zQMK7MPjk~)sZY3|+?UFU|C;}Q zB-R0ma$yPzQY|2jta5F@T5zU;hoFYQRbypqKo>4sgNQCzn`NpGER06FEoLhz!{l{c zL5J#J!#M3a-~m7YiRF4X{r&T3iL=`A*+!CcEyO<}{Q}6TDNjRot0nB~iwP-vaZ|Kb zwb|P(Cuc#c`#YLqi=i;OK0fq9*xL!-1wNH(7Br)Ae!+U2b({o`xA0j9hY-L-c+ zocr>^$~LilZ=YPBJ(Ez0#Oce?e^1&|1cJL5+Su5!wsfu!0;Ukh;ET5p;zjSn2TSYk zqHwT-Ua1^1LZ0ht8%5kKFf4^DZ_#fC$Pf()Z_+y7#bX?^vp~7EdQV`3bofqfzN>%- z;J^o&$W#rK8Ga#<>HJO1jA1{mGWQEfG6DL6Q| zsZpd2t6U&9E9vAE%3K{0sQ|Qd{%m5FZ<|!fPCH=L$*0e6Kx3~Fk5BUGM?>wCD)C=q zNex4-fR>xgzYgf_2=3<~6iboFc5yikG13FuhV9O-^_#ukBVdeeg^TGK7cVzn5 zA$0Y~*Ow$XyvDAt(AQtEjC~qchnculMXxMHnZ!)&lL)9?8hok$>uHyQAx324WoAeO zv@Z)MF+rCf3ll$sDS>Uin39z!y}WF~&@^ZN*he#RhkN^iPiK70nKZx&4q;v|3~wg&NJRtL`8E*Z@$F-|)4mF3eucmjD^aENpC~U0q~= z$!f`_ZA5i3(P8f4As${n;j)q3@u`@t)-EC>zL!mAlZckFk9J34@W*Bxb8Y8-{NFsM z2gK`r&ee@x>%-evGgLs(mt_s52?&NQl#8zz)cVBY;l7}vs)bLzmZ}n!tOjY7l8|8H zSY^HRdFw4Td!>dkn=8-j-DI3`FJR!!Fp|b+xyeym@QIl%T^qVy)Hb5!kZ**z1P>D z35qO_{4Fj03Z!$uO|h5+7KPWOKGqD+ICA?QLOa0F@EIt3v5Su%pFW1MOF)pu^Q)RN z)0Xh}+vQ?3+u=ma@O3~{$3l244l%DIV`|d-ey)ePxzoD~H{UNG`5NWD=#*+AD(0l$ zpk48t%B%YYJZPruY&H%K04N2CUjUgzWy_^qjyI>T3JVKkfBb+64hyTmPdx&azUVBj zm2@sJtoPbjcPFDj&(tD|7W=!r|H!Rwa(jUppZAe+-hIPu_J>Fmu-nU=Ns7#UimrJ9 zSw5AvCmB`TGev)kpxh4u2#;?KmCwqYg3B_gfPlNIDU5vo%0Z(TCO!76;f>DI`H5`| zu!(HAfynvrf`*Px)zPsk3=S}&j7$O;tR*EYh%vAk$zRR>Qwty$HYSG6*s{Qju+%yn zlY{{FeK6?_X?cfp*C3)=17^mk4lL`H~%u%thlxtLaufF8cpL3Q>|UNLR@b?@L#dGR^1?lg=06T#gO%VVl1o3yJBww9A&KXrQU7*rVt%!ASdKBns@=u@7tn1{GSd~hN?XBmHv5O;))En}isVa*m* z`q%vY0&!IPx*cw)XsKv~yQIHyx#feB{)9uxw>rB-z{rJ<=+XPT2G3j6&JT60T>ASk zDt0K`v8-jvtqg9TkA}4Luwq+M*{z=3KVaX<|3|?ikS$)11=-hoafOB}#ii5K)bx>E z>9is@*CKlzR%#x&FiadAW^IuA8-7__B_+aI2^=oOQIW-BovNEQM4!2_*%wPmoA18` zgS@~17DIu|*ca)HzqFQ8(y2wTHco|XX1Nebk*C)!mzkkPkOZWKkAuA_GW;m@W6K+xx z=u#UE)Lzz|sd3sF7+L{tXREI@jezSP{$gtL16r}mX~ARdN?Btj;@;=2vY)RJ$FBYg z*J30vsbWrZg#CIR9u4%Wi%)WS&6&5{BiIL{Fj^^Wq5ojUugOQ}=NX2EN?D3GWYpBc ze=lm4NRjyERjXn#7md|aRcX5%&@gLnzfkX30xy<7#3AcPaf#fL95otlo=s`*a<<~j zB6c>_%iLU|xbU#Bq?m zseEwb{g!l&J6dRzWYKRs3gMl2gW6FTnb%Tz^tUmanIQ9UC zSWanu6iemOg;3Vs z7G(`fLVt*SAp>}z{m2g1Lir}eS2VnUzV&q~?OsO--NAH-hw0dt;}qoM0(yOX&;zp2 zG&Xrw+;ul&bhFo$7NK-z;GjI&UP7VUuu}l|;tN4A#D(#v6`2B5YMvt?w z&Q9XD$bN$4Q~)6TYBLT4qnY(jxewoXOhF5B!q`|9aTbVB_hZNK;Na-`dd-Vpcv0!; z{2M1&n3z@LT?0RVRwo6MhcnZf^`J@_<{-AWYnSU66U|IXYMB?KSaZHFV;8qm7-G^d zw%ls~5t$`hPjCL%T(oXIwt<55h1B2g^9>MR5GLsn#Z}+Ikwi3H53fA=KJsD^?bGqk z`$&y*k2PkE>Isnj%W}lut>wW9PZyI)?n-&PpLDBc=n4$e`=59l08J&Xl>Q=pT%8PBTNkip>1$R<#oc@W^rURvNmW8eY@r7iKk9P*G;d%El-x z%w$sPf<9hd^^E8EdOYLXW}4Srh;A<-aW!Zg=kpqyWUzFc_xE4q^lN5%JOQd6z$Iry zHP#5H%v}Ou?8G7Z%ZMSFI63Kn)U?EwkFI^wrS6WKeTbG8796*mQ8?}Iax*g8Hn+|8 zU3$Y#YTCRNYi((pwO)mOI@}k|Dxm16KGmT3xQVkjcPpIiQCqb3Y6i+aI#Tl-=0xs~ z`Gy5`IW)SMt1@guStY)$X*>k2W#mep-$z6)s1w4S|-s(Qw$dxOXI(P zqxivcmuL$5#|b(*W}i5Sk(NwOzKm?I6`&V^W8<23GFRx6G0i*8S?9yV2W?|`8B>Ax zu{{=C6(+(QM|{RMSPHX93JDw^Uv5z)>3e-AsSg=my5E-;^oP#tO63#Px76f{D?-Ee zg$_IPWRjX@U?x}7m~!9I#DttrXYoPw`-8p+M0#=j5|CGlbTk#QMuG~Bji{b8R<@~{ z(~*RMN8i9)iJ8x$3S10BFzA)Oz4uW4U)R@3b6D*rw*A5&kWeabZc^ob1Z4z1eGHi~ z2X0|cTArD{FU{JYrgiaKHL69>#r~eE)kbhIcE@RBXTv%drnSvB=>Bot{YDiFU$%&< z>w(KXg`LAH&3b3;?==`L=;>J^^`WNXU~Vp%5)y&ejn|a4p(64=6^1vU9XG6)23B!- z&MaLA4n9oW@vBGfQqk>0veH;gW>!05%4|p=mIBxQ?_LkA3Op^Pk!%B#pRd ztJ|Ktcm&aC*2S{Jk-eu#6ufWxCK?AG!5+9zFZyxwSRbqcs{i2koRU+$A>x~pEgG(| z#W$!aa@gsm6g2ccKgj8pyE7%P9 zD&*tphA&D$TecgmbXrg*DBO2Hx&^wqNXFWmYZN;i#kZ&IpU=eVumTIl^J7SW5GE_% z*PkGMll@qw``hL%3($$hT*l=?Sg!R;6-Sux;(edAAaclZVu+)wqhcEfY)5?+Dej*H zuHSOscl6K>^YZ}8Y}F3fsijp`ILNc6Do613x!k5o?OoJJyaYlz{X*HFU4H|Yfub() z?+9HnIRx~6axHeard5=Rl?B+L`S^CLzbscZry*q2;LlM5?kP6s`J3W(z0>blL0fNk zWWvMRr|V%(%s*^ejX8RQPp_s5_)euBjwe8Qy9k66z=!6J~MZmjYDX$(e;|0%i@ark0s`oj>zUEJOQOwsc4SP$*z zu1p)s2{?d6hP^Mf_Q&X;&+o=DsFr1Z+aLi`+4?M>2 z7nYEbf9j=>1^~UQGhBX0XAlU`jihl=3(PeDMO?LD&t(28qa85iCwK=!#3)1>fj8QK zykl?c*3_#MfVc(?Y?jJA^wp_q!_@v00h>iYkqg_;c58WE)I<&h6)}shY`+NKy2!V_ zwD@IL%P!uj$^sZafQSI3;!1IeN3|2Xn>rPVEm*hOqD1Qu$~h5z^nX4N&SPr0mpec- zS`*KC)ibTYpfPCA_%*>CSw{902FHRh^*>^=`cj5gX5ygRk7_Dyy6l?x08`}$t_EvP zww#HwaFQ4l8%yx>cVi$>itZ*@U_u4k_?>k1IX^15=w2bkn2k*~Mw@?j{mjWeKBif3l?|A#T)Q%Tj(eUObXu z=Xe*Y^Ef*TU0t5(fKWU_pUQ*UVtM7CbIpnfbA4+Wv-KM6#%eep;)2kL1r}RF5fjqf zl{MdjT_-RAruWMlo!~X;%D@0Sy+>QVvi>;m^_MU4TNrVyC{v%!gFh=kC@mdX9`IMp>3rC326GM+7)f(=Y(Vzhlu8PjzG(@cN~r z+S)ud$jC}bvh0=^Inu$KvzQDR9Skm)*^{##Z^KHzzk?g}=5rS%?5B--C`{fKU+GR6 z+y+nO`SDr6^iitGE*5!I)neGvJBTR^mbM?6C1ujj1STq-y+>#l5@D2$SI0MK!-?p| z0r`_CWAZBz*y}K|t|Ah^C9% zs#RIQv6Cidm?T}Hexo!rjp(k!o+rZ2wE|5e1D`|t7hvcCL!QR&<@FpTvdhWI#%wN3 znyo5cO0mr6{Tq$kT3?fz*EbwQ{(a_%AyKg#^BX*x3-zUSQuE5ndrf`~hX%94P1_<$ zu}L3?0=u+Y3llSS_XrLKRDg6=>fPka&3zr2jInY^moCGq)_{Q>N&`d&9DXMwZq7q# z`B=9hi@VCbK(9Fmt#pBpoxf@i<%ZA&hB*FSUe8ir!RzaL!pt?V$(T_LXRT*Y2l#-4 z^eL4eLn&(qDtnMnUjDflRu5sKByS`rnzjr87MI3=yi8O^+wU1`y<>K+m##-d3xC2_ zhl1b%2*^AZp(d0z8Yh50jTmc_r;R{2wAm?m*^CB8RRrKTn=9 zJ=K?&&JarsH-80aNeWh)6v7gqIysp5V<%6nfD`qp8GwxJhb~U=<;q@XBR)j^L=)F| zKIH&t)?R#Y%c>s>isCv2f`&9!gyiM=oV1Kz`KV(jCL9wfhmp?DbYi*P(Eq*c$1qS} z+_a%iXSuX2Zo*cMvSEsEF4eS79I3-MqdF!33~zMP+bA9(~m&Y23FvSEH`muWOK6#q}xMg04z@i?0or%seQ zhhB5;RXh}Ooorp?&%)_rj*0v1PzQfOzzL<0^`s`+NTy0@sY(Sj+J#q(W5Hp}GEPm> z4~U* z9I<@2)wbXsYGQGSMntLZ5=G`)y3ou;) z7qSB~6aj&rW;5cCNp7Civ$9HvQ&Gt| zmf(z}#XaTfm-Tz3`I8g;)k*W10Ag)zZ2X<=ZC!G@m{_=^RZ)4FWGzT!|N7eBP>qF^ zRnEeua=@du;D^a%%X_p|K=T871AtVtbr#4eDa8Tg zn}Wi$Jq+;qD*=!9G|D2Z(5?L}2tX#nA;JoWL>^AnR9e37S}=?I%|#MQ4zCLx`a-#<2RFd1}Z&)CZ z)2HhNdDC#mvDKdh%J%-aR=uYN-;Y*z6jMK10jqQRTnNy23e%x6>lF#n#f9AZ(Ge9M zo+VH+(NLHlXifA19hGC5`*yxLzw;);w30L*)&s}~KZWP!#9pa-Ljjt^Fj1Gw54?480;hr)*1 zIB@fU@LQ5HeqZ;kViBi|heEz2Kv_5E78tP$p~BYQ;omc8O17&2c`&7(MnfuinFe2c zWMyQO@X}=RQ#E;i9~{hx^^t#%x?T&);F^3$wj3eE7hxkkg-m0Nkd^HVs!`h}T)zvT zh%s!SXVn*3i!b&`C0~9MCee4V)PHg`})(eLl_xnYwixNW*v& zaOTNq4I{H&L6_CJ|JI!B^GK!iI-Ww05>QX*e{#J)UZkNocdsp&gkaPCUa;19YJgBj z4#*%kq{SB{3E9ciDH@=P7$qGhauh%{V341G8wL*xACkdfQw0#;(c8Urcls(`tD{Y; zl*+LbrEA`t1S`ix(KSrZHhg^T`U`nM63SYreI{|{D34QE8y@xwdEx~~_S+<6--Jxk z_N^P|TcYgRQ+c_7=>5BNkj_S-`N8hD^anD?NK$l3k^R#hCt;1r%sk^t%`kXtAWk)A z67a29|4Ggb1#S@ULcgNUP|Xnz{v;c7eV_3zW`dmNjV3^N8M$aSw^5~0>-{YtPn1Y? z8e*a&jQPra``0ln;2hCn9cx5-5(!v+p`h%M0vzP2Jf55?$QGciyj`~7Z!Xtk|8Bnk z7~Qn?_UokA)^bGgv%X-g zZ*p)_xY|bk0ZHzPikW(#4~Cy1aSng*FPoaP8w4S=q30|h9jNQzDJOi>#@||!d3Ab! zqjGwkAUq_POaZBjS4RSVyH64N-&rSAjySh0^m<2cbhGUnDbdjOhv3Dp&gr98b% z6SQZw`OdtF(h4ySfVxZ!tO8A}V`#mXC)@`L%>*P3r(l>E{h8T$c=WP!a(DO7Kto@J!oDsZ8yXMl;Ezi@T7s)1Gdp z6n95uLggj(=D%Fy)mxZmit4e85g46%Y?vs>rp6`}{xPY?hFWZ6MLvKL*`x+%`JG+9 zb8_|B={rNr&8I{rJVQn)V`_hg5rZ6k)=&4uEpG~wt~{bXWJgM5P{t7qdo5ro(g9X-$a-kt1qrS|G7FD{e;2D zLd>bWj9-7n;YGsCnPzd@y(-pQ+5sBC&Q36t3prJ?jsJjxX8A?G49r=G${6&dpDr5Y zeeo~n0T~@v18q7bo#Cb^OCD^!1GfB2i)>n4&AL5SVXeQQJx79IvqvqzUWHt~nK^)+ zA%pZ*sQ_jqo?1oaH{TjyXWbAWwI_KuMdue#4N!}vLchd-gDGFy4R6k+!@p|SR(D>D zHt_N3AYoxrQyDUFOD6jbm$$SkJe7(@>AHLm4>@5$kO7U*s7W zvZ2`%gro#p z5Bo}1YBi4DA5z&Ax)s-he~*vnAHs-@Qt{m9=qR)GVf@k-3^|OUg?AYOM8z|5qX9V>&25 ztPQ}h6U zlEV%9)^ns_OWA=BKu#K$x!y~MuFAbrGv3oD5G5f}e3?NW#0Gr~7`eGc#VUk-OQa3c znjzUdTfZy*sRiqR1|?}V#WKoB#ojU1aEWb+`-?TWv{gfswCRG7jGc6xkGoU-Pb41) ztG)RMpn-;M3?Z8JZ;-#iwj1P~he4a+u+0BLWn`B5}gkaMJ z5P3TNz7YhQMcx3~*2oRi{8ePc^Vd`W58j7d&{m<&Q(J?~6odzDKTRGFd!0Zz?=LGD zAwLf!0X#>C+W8QOh1OOMXKA0_Z`U__K2{TFvGD0EZ7f#C1Dh}mWd0O8-89)8;7Y1Y z?r)AYL-ng{>~)vDDBPW<>#+L3H}58PI6m42P{24X9p_m40s#>Qo1pDI=oUnTlW&qh zPLWJ(%p^CBxJ`V_bE2KOR_jBf5FUcIKa&mV!TVfM3lmko(aUq$AG#yoZ$CJUD|evR zZyAT7sq>Q;1Y9IR;CjVf^1rlQsYu_NN!e&M@zGyyjM*`iQqC7g)iWtTs!aFkWmUkR&(lHvJR_#P zdJ7=n@l_M2dja2Ba-?2d0jW6yV%4T=CtyFh9J~%Q=^l67gujm%wUpF-Il0fL^p%ay z0n>Fu%uL0_1~+a3 zPD89Nn$H-h8F;ae7MbR&DQAke04-JD`K(wCq!!9pQN0d?=`#n*K9 z`%hw!8#APZ?Gq4A6LO^MLy)yR714bXNv`V(ZF}{b8`!vaV0jBeTG2@WM}YUrDf&cG z>r#ZUgR1~)`cHnVpjX8peHly@auL+3e?VKn_m?yD^cawYPqVO#`ff`PC8utZ+v=*4 zE)`{oh~jwkqUr*?*g(ZqQV8RHw5jh?`Ex}yP>wp>;ia^nd&2>=ra2HCB62F4sm+PP zoK5~p0|r(Aww4*LU3_IBwU?~boBoo$8a-uW>#E-~YQsa!mFwj_TK``4E0R$G4$7T* zolQ}h9H2_kMbi?aQqg$Eo` zkdkf~KPq#LEBySux)OS-!RN$C)fe(%BOf6jTo_(}cUbH%>aUVH7e zT^@!}sHwfL*8ou&tyjR+2R((SOJ@e$eC84V^krY+2G1x8wg13u}wuCJ71fYVi+ywDSG^NWFH@z#JI5r2GvR zr97jXDCc?Ee=D~t1^S52S|=jD7W#e1(9U>Kj91LO%JAtsPaNS}WG*kQJLqr2ZQdE; zr|IUYlq2=cJWu`1_t64qM6d{@8-B*C$yo!D;ZQ!a;7$joqWwo@F<^FN{a&aucF~R2 znlY~`E?C2aJsRjG15pN{Y?H!en1J>_lA z9B>*fE!PL-Nf)%7#;<~>uq52N|{9iT*bn8F>>Bua()z8=E@#)b{EGqp4gZ+Y4 zjlw_=0G`!gJM6#euz+<{a^<^j@T0EO`BvDVQ!g;O@FA0bE{QKsF6-ZFvvQHWSh>tM zE^=hxcxio?s2>WPup{CBe+l2`<@WAwg1ys~q?lNP8l(QNA&Ub36zO;?>K3)J0{s2- zVixJ8@eE4^$tZsIFA$bD@?8m1i@9T@#7|^=nJ|Tn(PkS|nX0|~grku^qJE?B?lHaf z>LKi)=}3_KLw_OYrQIvT3?Ibt#s#_HcQIm$RYvcn&#EuF&(lvl=QNH3uFi*()Ghi9 zTGh#x#;TZxq8NiwA-%)KyJ#6c;-kDfB*tC}7w@6j&v}7@3t3eYYP}WD)felPFQYHJ5`0~pl&VFBeRPs#v=e_@M;#x=nsp_wrgPjyyJe!X z4dffd1xR3_cs{wKqa)(a1$w_fExx6>d8|zQsEn*^%yQ$Gp9I>(#K~J{ThF5sc5AMJ zDje%*Svk3>t<6MaoGY7Zm>z}(pYQc1^>$X4mJ=~Nt;Z#GMD_rdm-A(duLb;Oc+;Yh zRA}kj$u~4g*(6UOhhiSsHX=*t-ODlj%mCfN5$;S#yO}FR`#5HF*#OG5l}l_)eopGA z5oc4_J5_=2Q&8|~pkFeY6@A_FxHa4e(1Nw9?--exNi83dT$rau6c(BR1D$4Pff{w}k26DSX&8p6$OI4(ZK=VDYieLDOF zDwTZOM>X0p!>uH#@z5DA^~hQZNVgI#+#>}h8P-FL&%|4E&%YY8Gh79f4yv2xL?V}o?6~3NqlE45*P`mAN#(P2#JUV zydnU4OpiiX)PhyZC^?ndUj4kL@_pbZ)fS~yGwU~s@YLsD&dhBSdwl(tO8QEk&@IVZ zpY828KFkaP>KqCH*MbBlc%LBSE{vaV=4#^H@tRE4?TUK)1KbCO*!7YDu+D0mBVgABd;w)`ND3RF?RLA(}wP#cO=|j3u?HK9Q&2KfJ zr|VAQ@4bQpe{AjRNPxW@2X@B{M8Syvp2!=O8B`^7coft8t#y)>ozW;sVBgg$Es_vf z>7&EbvLf5VCno+*&YxdhrQx=6r>v~3JUh{^9v&OBc$FiA$>*{^D;SHO5b+hho=Afb zt^y^L{0)jM6Vu?~1DT7$mJ6bZ38yQh)7MUQ7sL(4iR%#;vn(6JfW6>r7~r*1J|3PD zoN$iCH+=#$nI{L;P?c(d>HfWz#|OHz4f02upZT>#XRjG0Bq;beWp##`VkA8tT|`6g z+LO=-lS=V9e^cdHz(i(}YZ+iFs)Iw)F}me<<&5?G?C=rTHv1i8Dyo#Z=6atA2q)(< z1V}Cjv~-oq=elw>srkvAo+u|rI!t_g256ZUr@t~9$$IBEBNdgcq?Z;SximOtG=E%A z3r`*G^xlxZ^*Jh(a;eUJXRb_3p}FM}VS|l0OL8yElC`lj24jCoFnqeY+yRXVbRfkc z8qChj+~+PHL!#s2(ufVTpQx*UKRaqBCo90dm1B0v_y%4OuiR_B5%J9IDGbQ{<0so+%eOVu7qj?uo@@eh)vY;QiNx6z7kYgh|ho$W(6{1X{mH65!7pH z8L$*MKTDxaaR0b8Vi~*B=}Y@`O#X8C)YZMf8DZn+H_3s^Pj$4pcVBsmLb%!}d|_i>7B@->d^yozB<*z8LotjU=1_z-_Ma zGE-0rhA}t86uWE}V?D>mRV#})uLdv8Byow&eHxRHJ}q}KLr}_Vg(nnoso+h8rpC*gcKppR^lgesf&7bf=zzD)4~{bhuKLY%i{rtYK7WCDO<{xLw_3up zk%0b27r?12_{?SQQQ%)(J>XF5IDh`OsZ~pC*XiOMVMY6pP5$T{Mj93Ls>G-wsoJOW z1{^uyc?tml5z9@}mQv_Lf|}MigqrYd>nqi3dFf19;ffVB0bOil8+g{7y!?D2tci8U zorG#}UkMkNLbjey>`_?cmo0>PmZgd&N$rg%AwZO!L%Kpp`lZYCDzc}*)J-+1_NkrZ z88C_E0yN$*Fn|>}KB^dGEH3}iz&!Q^MRPt~?iV|e3(bJdS>f=8>0MV|4rtndqm0g{ z$pMuR3E01eg=r-!Z=c`Vv2!l%)gv`QjYV^}rB&5QdJxUYPaE=ussfC#$DM@5rxc4+ z6TK(`N3Zg&&shxTb@DlM6BbaC`&<(>v|_B|R4eYQ9&-&~ZF85B-lY>{?`&(QsJ#~y z2$Xe)S)y=fXi3u2V<|_nmW6oBo{b2(WH+w*%IXZ+4iy(|B@uPM+y1T1M3(qes9bA6 z9xi1Z`j+|5rMy}Na~+N_6^bgesUe@1Xo-g5Kr)xvsQdK!aM(QT@RHtl8++f+lJi_$ z%Cl9rYdzdrf2BkQ@>hMck$?pg8);w<1!+zM4my!34A`q)Z6GXoe<$QMu5i)C@ot$~ zr5XWbRH+=XcJHFi|9fjAgflQrXL=&qtz8O&sBUHLicPqJ`W@t$zZQh^?8PAs8g0A4 z5>VlBOq=4D%NWxQMCGo($O|eRKIy;nw7E<%nE%A)veQ0sfBs9_#Ux*F)=YPl~>vnC9<$;2J36}2-f{9f0@-;)afm!Bl?Y~25bLIVQ+ zQ58IBK7SgVsXe}39&m?ZY#txEGruhoVA+|fhD0f12D}^k4D6VEr1xjEQc+QAJ0NDi zW*p}DwEg)rDZJ?ZwY+LS;h?dujTQr#Fy(?J;c_ z{0a_cQwgB6Y%_28M_)w69!*fl;4@u|j(GBO;?iQJM)Vq6 z>Wa#_5_NQ<@+$QkXMQp~7H@~SPcJaT?mzw+s>672DJ;|+NK-&K#p;al>MXR?S+IE+ zeFl0F8W-l6$Q^5wWrxT1I?gTL=SC$*IL~W;ar^JeR4~Dby-t%En-&YlIulp=#y7w@ z;LA7UUf!Zn^$W*R(^~bZL%0r`$3b-eXfGC%m*IAdISCOse@z#BIktPE;;E6akS&u*3-a z3A<__6!1%*E=c@@ncRnglq>!vLr0f|yVUZ5YMxWF5AjKKqVRAsYt!hZ6v=66=Af$! z^ub@hs>Jq09psQmhHgHjCMVCeQ$zUpTy^o`FxEe$;!bhnVY7liO`GZ#R_7phGS1AA z1EmJ;BDXny4w|$tUf%luLbUFT^2a;zY`HMtfjX z1%oV~XFifwV}6dgf*m{os!84)&h%IB&N5 zhpu}SQx33q!~WL~CR~9nIcpkLj-MXs+y5$C{o!%>LH0rGVvw>Bha8KrM9p~>_`f`b zVGFvx*XV;k==WTuFPTfkq%1i24_r}Dg(_#{M_}xDPtU7LK+OYg z5EfXfxh!$mpT$oPy#fZg{KuS2uCan6a!yBu%kOUVTcyrUTf`Lwj+sBO%)Y$%RI5^d zz?we*3z$!^QW9Crd?~P!KtSs;hn+tmgCr$Y^|j9z;}n00##0KQM+aYbG{N!br+mHi z+AP9qX=2gq*T2g#;GiT1V~z=Bf4!s>q43S5{$&5bx9 ztBBo6Z?{4#b~;+-mZJWT1`!{+tN5{YDWRbE*+6@-@5kn0opH1wRusocU1QN9LXFyj zIEmnwBLIHFkfbxRUe=kVc~g%W_GejRs+$TgV`++kFK@WMDOz`aVK|YM>WG6a=a!Lv z?twTcsQObkb;w8DZ<|d3dtE#rD>N&9@9}w=8h~F>k>r6vk1l#~yBTZg;$u*en|ucOpN^6*l(T{V=J4-dtuAuler zkZ@N%3IkR>0hxy+^FRr43Sp=5CBP-k2+bbn4U3DDWz^R8q(R89E9!H@*+f4NN??-4e&=fnE9ra`gn2Co834uBWT<_!z-XHDt5&z#zWzOc*HKa`UT$(wFnUIt zlhIM?KYKFr`SOf1M*ZmOtC_;S{oa>*1=10s&T*p_m39p^D&0|*H##MfS^rfFZP>xA za0~PE^yTdjOtDizQDZ{Qa-#bK8TDHqGXfqy=|tufT8aou@+_K_WSel@Kd)=DNPEo(H=Y1nTwP<;_RK} zF2nPdQ?mSHsZ}89jc)U=7Ve+M< z9EJ4Xf%4Ak=p{416QfA(|2C!S#X3J3+#s97Cy4n9$e5Z25@}H9R#)E&3s97IV-tY8 z>`7E}UD%_tKPfjcNzsXGZXR2C8DpDhkG;!c6vsvEi``ZSGr%?ThXb5$i4*PEtr!YV zO{E0;%a)9=h61T-L=)p255R~T{m;Nio7H(!(qUXp0?%vboE=nBHQ-qItkHhZx>I}J zRKcn{J3DL7I^g>%-xm63Om--l3H^+2=95F=$BT>LFGcs;vprCG{i@KZ1}cH4ARlOk zQ?Aqc>hMszFWaGgf(%cTma#?>JnH6ZU(%xbf!+m>euq6>dUktxF2^WW-|tb92OMFAmB%Bki} z%ylKMQJ)L%I_k%i7qeDNfC;VMg2+4sah|{M;c*N@v%BG2>yt7)vwK6q#71SDK@0ir z-9gYaBtW4=;X^_3L0g+LJA0$5riL+Mkyq08!;d?PdCPXW7Ha9TB4y1y^S`Sf8p*|Q z9#&e}-f^zW(6dF5;VT@fF6q7pxSu;%hvI`ntJ)HX8sq{O?^^0=_^c{dfd;{U+gSON zp19S;RlmZqC-WBu263fVU;AP~%{Kz1K(O|rsK{CL0&e8yTquU2rM}KX$!184)`w3m zYNu$x7&lmI!Q%mcbv&0S4JZ@`Vi|TYlquy6$&izOTE?VrMk#X@;n6g61{p<*7YIy~ z5M?LzoQ0$)U8k!JqWm4HHGD7CaRhtCZgD@|fBg!=Htm%V8%&JhJl9YOEjV9Xd(;G& zJ*AP)iP}0*?L|$Jf|(f&#LiIwLUN=8F&#Q7)2>nbQrr_mOkCtSXDG@4`kw)$J1-oMhqK9Kj)*AKD01fvT{7akG)?V~uyH#_+L&{@Z ziHgEP1GYeCZigpOXlNqW(257k2MQ*qdnxf$*32Dc3TX4y}@t<+0VnkY)((#C%-Y zoNc~TW5x>|lj6H(ahzFnjD%B_SaE!+a>OTw@E{&IUAHuS7gB3rl#cl%EMgXbX(*3^ zFYN1YDk1z44`jUp+!g|5MB@INE^$1rF>v>7D5Y zt*IWu^50^rnY7Fy_7%-9d7w!q|Kkk8Q4EVhK#RAE&?5KADN1rDhyF*0eI*y(_I}eU z6EkckiNIEll(#~}p1{jo@eD4E%TW`QymSKa4?zCuZ4J+`Z*c~^k#{1YC0Q8r$Fb0% z=X^e~eX>1mKDPgxg$%}o>xZX=BGzaQDNV{j)-~zL?Ioi1g^rnH@Rj8$v@g8=;TIO* zw68p;DHDZ%L+vdvCz*zTo~d|YU#gmIJ~SU#qY(DIOUGmyd0%zGC;s{O&{@pc|D4gB zlZ2Q4CbX1(E7ZIfG>+QaPeKDw`1b*%bn#xAf2SRSIS$6o&f0X9Put{;-W(MUcb{B& z-R`Ppa)ZnQFv?tVNwFlPIOAx}cECRffC-t`3GFZ-Hs)XoCU2Ygw=A#YgMg-oq-3@z z_;1x5wBSpVTBng)ZW#ty{}H9)R~S!-x*?Zl3|F)XJ$IEL8#i$f2lqBWne4+`t@2N_6 zZBSd5uA8c0-PHi5c63n(@;f;-z3%>smj#IX^@F2>FaQx8CUo=6F8se00&-gkkR3b$ z<(Y=)|GWU+ZuRizvNI{QYBX=D>W1GP9*)U+hLq#R5(VJ5^RuY}9t&&pKp&s$L7)o= zo5uw{A&5!VPO5ISFU>ZW@LtuqF^p+g>UcQjF0=Ndor=a9@5NUS=6dfwzvy-`0V*Jt zpx~%!X>%p|u6N$(Ie*BS#@Vvi>qK4Qk`?mX_YVFimU#luEhrsqz^90wVMm8(Etv>v z+cmdCla%WY6AuI$(~{{5*(z6)PXGa=a0JkJ#MN&NO7qmi5zae!p17cxm_`}>{5gAD zLAjqx_RdWvt><;RQ#-4EbuXW0b4?xn_hJSjk!E_6smZOGEcR$$h6kWSK4#dkc?)6GiHZE@hD|_jNZpOL2*bZKI zkRf8|Mmvj^_214DX}DlS2xDR*V>n0_?V+vveZ^FVj5p;ca=tyVj3yRLg_jcnGN!fm zzDj)@qH~Q?LYI%Nt(biOIKcP9RolA zKv++d;bS>AT)Sz(gqTk?U~cxiJqBtRr75iA_y26%3u(|mmChhb6cCtb2llvMFupBr zE=GjLx2S3v$MK6KXq{)cH61hHGSTm~7!1&0iN^bcg|6%HK#IG#ER5MhiPpA=YOmS)EIeO0lEN^}xCTwQ|z z3{m_e%fN61ijh}T)NfWL%U5P>r|vU_u79Vp46E%eq_|Tnz3c|}z}dFCU_&aP!15f$ zzMP}>p`h}%Sv-reA8?{A{$WvvHYzXkI9yXBA2MK_v=>T?6;i4@g%AGqFNtRcCvov2 z2%N2lhNDmtsAsB-unIM6QO^F)9~u5T2-_Tw601@iq*^2}G15lXSbiS?WTmg63t9Lp zeu{ac^qdWdvkp585gm@Oa2SRb4@gT0sOceyVpSo~`L%-(fe)$OPD`4b z3-AKFld~oo5^Ygvlete}LaYuc?1XVcukjR5O&ldz7N)`46oKm^^ITjA&2#K$|8wGk z{Y6`BZvo;Im9;Hb`Ez~ui8&**GK}dpjV$bEvt_&I3vUwZPRdP*a{5^=!4J}dq0HX~;q8TNw-gxATq?<*6`iO- zZ0x|6`li}iT2X+rCSblY-D!}*GYuFuTu}V@L1$!OppA`_ijCDd>_O+Gc_Im^5QNK# zc!vZ>inLoBHas$Lc2iVRQZ`URq`B{Zv*pBu5ei*EIMj=2nj9|q?D$owRePiUCtEuZ z(Vf3`r1Ep+F)h!}vmSl|W|6|w9W}9`p(B9(3&NC;P~}I=hY3C*`p1b6umYsquQuS3 zES{4@ZB_erqO#{vN$SlmG z2$=-N?)gR5mX?Ydo9q~RdcHe5v-=Zk5!X511VG@##3J0pbQuGSwPh!PXb_-c*mI_j zoK13FbFk=2RFzAc;o&Qhs_DyhH2shlhc`XN)#u!7Ob3w+^Pe&-bo}Jtny2>`mI5hY z6A%*OwhRtRL+I%hKYhv?+1!|%jA~fn-J9W~R02kIlOHFhxZCZoT|ZBwe7sz16Cfdt zD#wfKuOFhPnM-_?X;b|l3Vzx>VodK&Tr4jma~r?)TGd?eQBlBtyHEpN`X6Vsvaww| z$c#uC8f>#4X>IoWRRgG%VOj9&*B=*`nhl+uhwlXW_{zQwz2XS$(X(@PRaI0Z@IUxj zPi#TH3nn4zLlYEh8CTy;NST+-nRz2btF44H$@2y>v{xAxLFD^a9QbLjS}9oJEK0|7 ziWg!ANYvxUFVlWnfe{h?IkOBz5s?C9HFxd%lE?l@NFe-fiv&7TlZr5Qynf}gXM0;6 zbMeh>ciVkq2+QzLev#XyYVuwsGs}QQQo`{}7rRt=SZwSTu9pzkY$}ZKsiVQf!WtEt zth4`%fHI#hEMgcF`BHdu#3=Sf!N=oS{%5Uc-*1Sn8;V5*wO0CuxMZTuc};C<9}$cI z!w6#7E~C$dY@MjR|9yDK{q3!DU9fLKC)dZ&V&=lqQmE+ua2r}Fftag~@!TCiG_Lug z^!UG*%nk0`Ky&CZK#^@|Vto{$3ANadWC%n^wZBzfB{X*5L*|@J6*W^0Lf1e4F< z0EQ;;krDvnZnBkcEHFspJ6!5my8-Gg?m1ELDrSj$*GU!-q)+zRPv?LNb`GA;t*uqS z=uIoDI%%-)+sWK$3D?qnz67mm)s=I8?4ScT%%+{zFYkd|uoc^zM>x*EYt1P3^$4m^ zw*L4RhM}UvCdMgq_s?YH6PRoIg@gz3PBRgTNhw8%{8Kqvg4za!Qn z2myXvFlpy)bID7ItZL-4lZ#4DTbSXJ|NF6S}ZKs30{cr=YN}W;Y7*UVf2916UXy zfwQ{1LQ_C?x?01-aW|2a94~H4>Sfw?_uqJsNWNHWj!95!PJgivXunPZJ08SMfB2#k z#;gxK!s0h)F&XNv;qt*T+X8Rqx~onuLI&LPXO+oAd=K`1Fpw(hP(qdeN^-UTQ z6cQA~HiUmCpHPc4++14G^b<$p?agia2GxO>F2k_!IVFiWs z!EN89o`G~vS0x=6qm7_971h-eWxDNDwzidf892BkADrXhBq(HW-$)XK%<;+*Ewk~K zA=pY;{d*MCqNf{*Ph5wCSGc^3S$o^vpG5Xqyu zMnp$Zl%ZDb*+uYq3oMb>bAu0by<7W6Y%UoZ{pZj0h~vZ}{NWbh_HA!(FXiGg;FDrc z!4Gu0I8tKfFP5kj5-O(bMtvumfEM$WIo|`h!0NtbY<1D8s~oAqXu+^X3oW5BrH>w! zo>TrH@I$FuHii?i$*;spAhfP64kd7T;js;Y00)hXtj5tiPPaEq7>|dZ?*oU((F{DQ zq4C6q$PsE5rjC7(g_|PRR(+3QwCV`&c5EA-44XT{)7papuDi$bv_h$C?R`;LC4^mh z>G~qryfJAj?yuwv$lc@K8QRk?djtbEt~5+cDr>$R{QTB`1|tb3>Bocs11~q8}L)yq5VEFpF18r{9S6(MJjk<9O@2H;%6UQYmEFZSX8e zUMw^m%Odmw&EV$ZFk5?vO;h~*G-Cg=hhc67Colf4Z8)(vGSuwZt z%SWV@qaei2ZTI?zoqX0HAoST|_Z?!_GfF^?_Fd}5Z#x>|F;$)|3RG}p-f(qFG)SIW9#ZTo`^fvSd}Thm zV|lRZRPSE=r z&(=BtY9Bp1DoW&fGkPpkXn%S-L>UStE70l0WV54SUG7%}WA4Y~Xn%y&Q-!T18b^<9wS7*r+<#8**y!%ZVyhrRg|hBEzkjJHPO0?vG-5n`GY zx5lK0Lb(xJ=KqTZl52yp&&9vt-q-HWu_SAe$>+;w6^khYY_L=jTBr)7YjEJB53ozf z@bH1#3{vtRccKk_=q#ECiz$B{ukTje?dhl9>0D@izqqB3#Ve2=GGw2iXrOZrz66w{ z#e!weEP%lRHa%cH;rsQ4^Qg?~)?G`qLP`^J+jq2hc!rKfQsH=15iccGS=0;<1&TkN z$1b8seJq9gvN$Oy5Lv51Hi3Ac*J5NABKYfj&q|~>wka7g+czKa`&6B=k@)iXlt``n zd%lA5;oE;1%o>(YOpKvzW0b6jq)hV6;V9eBEH9v8*W6xnJ$CSc$U0e%8e|exGtYDW z^9IoagBM|-xHZYNxSIj3Mop*?QQdwz zV8?h<-mBcbJWHB>nt)h-BOJYqu^96e*y_?DImdBIMy9XhCiW{f#{$-?oge4_r4oGb zyYcGcss|3m?`CeBv-#<-#NZ#^gTf%=rtf{nt7Wbyi<7SU=8JF^uaIu-wWbVOv?v)q zY)f-Y!ea?VmEX*%SJy%GSb#Kfao@E9Huqja7L>?MiL+MBSXMR|LZpIZY?1>t@ziwK zb~5x-#ipXl)iC*{&BoUE3{Ri?@dti-zr}kaJC*43%_2s))#dmf(hWrmG3@8~4{r=xo!LtaRL8ed0o+F_KVUKo0aI~<4BcoS(YIShXj zN|0;?1bD+&v3!>U4vJ$JHq(tNc%AvkhY<$;E`32 zHJ)z~a`mx`x{j}^3Y=;q%kFRX5id4+HMcG}E8o30^K;e7*|+xBj?x}KwfK+v0#<*X z8;&Q?S>`!~k7~SYQ(aLFh(yk_(6y|6#xSRMfSI>Et!#&DK(%S`$?b$B1SG+Qw9!`z z1ZVgN;jGcWzqYiyL`ea0e6dEkEsT3KCk-unr;1+O{rv31MOfT4gQF*|9eAj!qKWDX zP&H^o|70%aMARCl&6Z^x-gjl7-8ZR0o%`W0!5ZDfZR+^~6~0#V*_oT1>r75l$=ny^ z@Y)7h(OGPSw4MKe+&}pGLk2VnAA#GkpXAzjSJZ?Zzqij#&|BAY73ZZ92+qcze=q!D{=K+c`XAc?{0XM^agMeAS_0zKHc@6FS2Rxr zvLg03^q+CE?G(PK@ziX@l~$SMmc5iCNhxe9D~yuOwqRXl7TECsqM`$NMFTxdW`^TU zXD;9HMFJV0!!AdRpCW?jZ#~kLEUeAu^#s{#cnbsP`nG(cSngZbO=-ul)1z@^`clPh7RYr-mUP)?s>oY&okwTn2G54jV%mL7uy*=B5(qFhac zDTMPY9-6|FtrZL}05MA%w1F!le^^4Komm>*)A_7=YBIlX?jg~PI{H#wAwSY{aFT5- zF-Eyi(R%Jq%z+5ydHwIiN)6ca0f;bpX9cifoF%6BDkREcy;f;pf;EL5n#5V~G)$sqH8|dR*KT)fd9ThjLYGmoD15~;UO&wKRbyUoS zFSLJt%*Sd$br^=9cE2aV|98!xW~p2sFyTUPK ziNf*I4M{I?sUv6s8a9E@$hm_`beDVvvpI_>0eQxUN3hE&N(Bc`Tph^y=4%V|myx%` z0bb%F#*f=FiOAS2rZ({W{uS~xI!6E4-VsWaE&z_ZAZ{`fZtR;At}#by1ir;VCzA9fDbX;bo7nL~x3l=p_Vi!A zJe1nL5H`nm^?r1WocgCpn(A@59%FzPBRyB0YQ6~poZBvo93vw#yRG(!olKsiKci^r z&+bi>6l9-dkCihPb3<(%-RR?#!Wfe;=Lidy|HCj(ha5f`6jIhpPwI`|M-(_otoRuk zqJUXASIi=r>#@9Ig;u^$bQs((AX(Un{Jl~tCCp1rk^&PJ{E(rMJAeQWsJ>*?PXO+T z5h+#u$}=uJzeAiZjE7ER7@ApuaUXd4Y!iChc z_Ky}};R#ic=hh8z2M>kj(S}uO`^-`8^~q*0Veu`Wxy*DBk)}ofBTJ+tKXlhqD+39e zk)@^9V^}=k%!yL#LkDWfdAny0ZS=myE<6L3LzRbAF3bbbO-!zBoIv$Y9)tg`$0If7 zk&R@N_};n-Q=+jJJNsLPZ<0m1FQ~>H{Z22U(_SRbylO)}+{iP{Aq^?(mjlLFN}}6@ ztb5I+aa{sIGu8YpsGIQx50f8>=K55MFCD$g z2kbO5yKXS&!cnj{RsbmsAg<5_$hq7~TgB1z@)*E0z441gXrgN=FnNd*m*uL8%W~auI%WP2#`bQK6&M zhODb(pHr}bLg!%FQ%(NVpDPZPJv1ywUVyDBMO3RN(uoZJ>Ic5_ffdqiV$~#ID+vgH zSpBa6UYkkKmTM+k^YNg)Kh0Dj9=aESkqfx=(g zP(Ygf(WSXns@pR~F*c4ep5x01qrLB_F?o$}^yL*(=$R(h+DK7=RhlvL`|X?Jg+_Wa z%)hSkC&qp|<~lmHAt51yo0~NyB_)GVIxO}4679*bCbkPlmDCkc8=>8cPDg5b=qWjR zZIV%)6ulD1))GBF=o?S84yiwV{XZ2s3?TY+(0HHvZ;>dLq!kmmQAocKhw`Yeapi2F z_rK`)d-H)*05cQ6a3f5Ms{yaz2SW}bwu3wTH5||PiU{u&;MkkjGj!d+p5YD@?D<7r zpI07@M0$~|#Z)xfCJ8@`WcdYH9kVu`jf~6%j|OVCGNeXM@G-P98<@Won=AvAgxB9_ zw{J44<*L8u75x!zLtXh%MDi|B8sFKtoUVr258t>dvf=rnY(l0#7>-Y4Jjsu;-qv2t z^SVNn=!gFdh{W7J4KvL14Hxw*KIMs|KWK>4qoZV8Wo1BFKQmL`wowZhzDBjMFvTt; z^ct9(^wr@?V>b5G-xZrAkD_q#zSEPdj9cdW9$fn_Gp8a>Lzw=ei_X+2>r+iTqGJNb zTMI4vIhi07Cdga+gRFNDiH&2SQ-E+&xN8w+afBQqNb%jjQaoMyg|K6WK~-;mj=sD1 zDQX|~d#Q0(IZm>Q(f)!denOzPfedJRiLHOSG!Y$2;3)gJzkV$uF0?c8;NhVbK4)r2Kfy)Eup}Y2=~q3^QQcx^z&=J!5q=BHY{dF1cvy z_LGiJj#%+KZKtNZxwwDa*P1i({JZ#D=g|8Q3C;;}O6no@CzJ)d|?o$Oi+ritc^V4*=Ov z_!vMy)Ojo{_(N_Jb4~@LRzF33$yn?a9X0URe_@vY*jH2ylWOo=*m=)Y(Ip&!A5s$W z3K{y0VKA6TL2$J~iM_T)Pe3 zRO(v*fd1h#BJ^10xv=|J^5n-nk)<+oD|r0OL{MPx+wpMS>Zj&yH`^P|Bx@I&!0b$4^JKM zp1Ri{6@DP1q#2cqmT61^!@!@VK)@ih?u~gFKS0sGbhG5~mXk9pEp(qqHO;VvqIJ%~ z%xunR9VkT5oP1r@aJ*gBAWzEF)^6=DD=~AC6rlYW9ZR?@@oiy?x3~tJH35N&OEPBG$DD^y9i1Z`)t`=} zA*U2<+YGixfG)92%3;im7;pc}?Mhqy8xe?(fkClWyXS&N&BiS-ovDE%+3WgG!O}La z@dXSyb(Dd(x3>WX>qBT{5d6k(D;mb(%x~k!>Cf6$>MkBU0S|gK#&x*HTH!gI#C`pq zb0gE0LpKXAWNmT+0?ym{k@4WqmxJ)DrN0*kIxQ?tXDt?Qv@;$L9ZiTgz%5X=h=5EI zvT}gtt*WRfASzA#WzTnI*ujIwU@z^Z>NW?V`D?tO4hUz|L@NtC90>A&CQv{L0B}uz z{`@I~dbXK;bL2Pa&()(cWf5OUy+qKqAQ1LAMEbejm>3h2^TXAZrP;&!b7g@Ph5q=+ z(9lA3rb9bp$;OF?7g%rHzmp8%b6&Zdo|oI^BCwMWVxT@Kf%It=YLuy*Mjv#>l{8)V z1I9dWw7bQxNJPdKJ^c)A>Jp_JBA|x5hZ;KrHxD_8wY>FQc$(E z1?_%MHm0gB-Mxqo%?!_ae1lK6_6#k9mp4(U9_T-3GYCFJD+NC6@a0BpU0s+UvBnj! z`(ba}rpEWep`i`{4yvGmb0(gh7QiH+V$l_1sjfbU`A4;U0U(B zU#$so(rUNVRH9#5I>FDM%kW`_aFc}qJ;QnWWCR3puVx4VyKRo76hO3Vyz=J{Cs#&Z zqKB_v@9)aK5efriAjg#f&uM2q%Ji6$;1KSP# zj^TY|Y0=H-YM6to6BDI70>z=Ob-c&^{&{kE<2b#xHf;z9F2;_y&!|#3+PXt5JUx%v zu=&+@a08DciAI&RwDbg8B3=&t(p5{aj*=twd-sXk%Axz)SvXgHsN>PqVDXlVlg|Kl z^OED;lXdAhURN^W=kDI`GkJ1}>AKT(j9mgdKl=LQ+(6d%H8slTghXSeReSryD$%a$ zOT+3MQpn|G0_Q};n#w<1HRw`5%_L}8a+XeP1Cl=}b@fW1#+`h4a_39=a)PY7`b*w@ zNtw`X&wHR+F@7j6OYE{O8$_gwGjat?BKcZ}oz?)95chmuz9z8Ax))Clad2WVy_z5w z*3{I{DA<`Ye5g~)VCvx;pD!yBFj!o@yktHk&j_zDU0gmmU{nfN3F|#3$7yh@pWRI~ z7&tlEO$#Lozv@r`4w;WQsLH6&oU?a`zWwrWIP^d}j7bAGZ#%$C4Mb^#bIBmxTiLkQTWGvy0 zqZx`pDW%JHqRbw_#`s0t1p!;$p;$71x%f7K*w0%RuCu-N_Q=fL_{BKB` zC38rFDK*>b#gAXNVJAO0^iyGxZY>5yPtP9_=bIqGtK?+CPzpq2f|5=(e{sBTJe(RQ zvc8#Y%*^7fyWJVj)A9uc?~UJ{2>0s2k%bxLgh4R()jo(eExAr$EaS_|v0kn_oh&`vT1Y68~@+Dy6bHa56GANoagG(5J~ zfpwPoljGx-i*5wg)XFb@BqJyJR0>THX} zYaj%ljrnT|dPuO7%IbEpuE$4eWUlp+vhdwHV(zwxIi1_!y3bPE9BBU^dv6_8<+g>5 z(;=lG-7V7HB?!_Y(zOwgO(RIRC`gCW9fC^3rgPJXG;F#%H{J1j(R1!S=iYPw`+fg> z;~T@V$9BKGE9Y8kuKCRQ%)*L_GUw%&9r%cfu5O~FQ zcD`0Mx^xth*w(hswuX};+F{1dZu`Q>DA#o8&G};J)WMLFF^^6O$D|k}`q@Y*SVT<% zGpKUdIk)#Fy%cn~gC*C7+X$vh;k#F_i$(Bh1@NISRO}g%MM_IN z9xkMeR0a49I8i`c?Yxx8PnEMWDg81DePxDmwWg*9p_>@P5JmQ364Yywt(wUIZ z?%vWI<|L&b{)_;!5|=lm+Bxs4Mj&+?p#xOg?p#SOO9rN}yy}iye+-7nYqXzlB4q`KfPCX{fy^HTOhIDU1+OLVtW=a`& zzM+kvFTJP|W3|Y7Vx|J=9exxjY>PKa+3Dukcy)02S{WmoAx)mEnGXNHDv^D|ahk5n ztw;)guyvgCZ;DRP zU=wM2Tn&6aMRjpgbnRS*lUZBR6q2~l<|;6LzDJ?B{9Rcy&+{lCN^WvcQnY@du)!ju z8XE4&2$1f(UG%wZm3Fs*Fyso`R?mFJ^Ng9^m**n=BDIo~Q-y^d44yxEicROIxukh3V(Tnud5g3oe%tA0ek_?*p}rU1j|^hv&jw&Kyiz=U>6ek06|= zR6OexpGJ4J-xUl*wSOgE&l+@+dDQ6YuZ@F-2xd5r<2fC1DfflB?Abw;nU;}3IkH_0 zXz5x|i9r&R`$6x}urfajp=~1B>ynr)B!)oIa}ekEO}o*XmmcD9x5&+tqV$uw!9i2I zyAs|K@4wQzN}oQkL;D5>?+X@H@IR~Ae22&$);vy4w)*+=XFwATd1`^cnTSY-t=UYTer& zT!fjiPr5weLB!@#4}t5Ps3M#aFhOp&e6KlI$!@RKn&JQlA9uK@8-!_>C5{=hU{gH! zQq15Ioo$LhN4?k!5^QpjY_YF`(0}2~kAybV@V@bRK}3ZYp<9b;zJCS1xzw!JnjuTL zqoSN;+?BplWc_n>MvcjvX_CTdbIM{`?y&?@8J{uOBMn|MsL)cB8Zq-1K`YIh&hXPP zK4?G}zk7CtG{sY0$Gl?s5J4CNt=Nkn72Zcj#^U(S0W#T4@`28ruf)JH;+sPFOD|<5 zvl`yZKNbVc4L|DCuYxDKJiI8l}o zGjA_pbVD)y)CYk1+0FypDI)t@f>VqHJ%(GEa@tTyvo9U0FYZw3m(@v zWDbpV5*ycXE)fjxYOm{T=9P0!3bzG7ML%!q;1VK;9+%wH(AxIeaS;wkC47y|`dBe6 zo&_x&BM5?~)ZYFfm-b2a=Y)03`8{$7nJC=3us^LTN#@A2_KjQ3wc};>z z#5`*3gnh%rKE+q}vGAzhJL;hZ>W<)0RgnnMA3fL#`tX5_NJt>gG$U91bfP@+jDRhp!7E6yzz|58hnU}dXDe7 zlk_Hmh2!zVu`wh5;7l4p2No?xyO%zor&CRssSlOA6+dxwReyWntlOa58b4)~;f@98ua}$u0V0^f;1x{bNEb zyulRw++!BTU8haA?6#i8WctqonKT>UZlqXZ4Wbc#*Am(_=|TtnM3+y=XCY=(=}do9 zOAVO=rvrXzHj}DunG*SzExfN@QijxHrL}&PkWLN}Al56}d*9%)Q)VeYC}*r5X%NOT z_*&ko?B{^)GQN6)ez==-9d;hQ@3qPVY~Y>@LRmiam^77=45h(-s?`_G_^l#vpv{{+ zrQh1kX(QR=%D4x6Cr$qr%H@EJ{`R5}$&?NaO!CZ1vem5)R04K3SWvJc1PrdA63K_g zHiK?0AA|iF$%L`jxx4({5K>Wb5h^R1bOvm;||K54=#>7MQJ9Lb1!LYI37~!-)Vd>HqMI(iDVNcK%ghl({*9;bKxGpmt)Np9pr+6+M)Q6 zVX)T7U2BT2f4M^U>#cH*nZnly7VWps#65{(P@erYXwOov!=UH2VLx$t3RlS#o=+sLkjA!b*nN8L{8-0WFTO8L zHQ2=p-}L=nzQ2$6e?;UWEptm}N z3XIl?Lv%o-^?LhL`)T!^F0k=|OQG4zlc4@eehA%L^gu1QF6d-9qC_|VBPo?eFx;O; z8KALQYcNx>)4|y>QqV=NDBNnrLZ{*uVnR#7kd~!KN8X(%U+LHjhS!sS+c~Llt~nKe zy(z_gO(9Xj^<<&9<&q@fi!2fcNr*RP&m{U>WPI~GCU<~x$tVlL zdu(9W*d60hFTXqs7zo~dXod+iU)DEFA;VYdyie>j>c^gD$zvl`ObniakJDHL1IG3o zf|yWhz+x(;ji)EG04pU>d9lE~fF~q@8X6kCZ2=YH*d1UXh74|4S+*yEHVNI_ESF2~ z8!N3pZS9{TXx4dvhBb|h-q2JzCwdo1K;s`kFusxa{-muAj71kM(&)4~&cV2u* z^k(_>5PLX}o;qeKBfl1hHByneH@v@#C`k1JDkLf_$*Dq4X7ZI_*{O_UQ1{z`u6-4o z)h)HDA0hv{q77GT5f*G-t`v5<5t`6b2k&+;RU`?ewg`Q8`;PK-0aIFe^mkVB?Bg)l z>7yAYb$WrliN|}&PxtNMd#74NakRuhZcKdJ6PWW}sw-(i>Y$k^-Nu{9)TH{Rs#Xgi z8l0N|vrx)_;;lr+sNaB=T0%Z$N$D?cYKIGy`AjQ95`zaZh;T|}n^z}qtK@!6x=6yo zY}^UrHc-&R*{2jQsp6IwDsNv}lv$&rFY8 zvd*BC^>m}K#hIKrJ8Q-Ja$j+y8}f6l*ck`!qHz@TX5iXE^bUJ0VMT;ks88O^e_8Fz z`>vZ9S6YKTTck1BNj+S0!^r=&D9|ko!d1eNAMG+xFL{Hlrb_}^Tj0(gLd!mMLgLh8 zxNh0sn|Y_W4bmrh6>g$6mp#%uAH9#1+5WmkaN*gJf`+oj}CRaBv zB3kcnbFTGy+!0^@V%~es|AXn<0zvW2Ju$ubQ3*%^RT)=KONLc9Q8RF_(dpsB;+*Ww zBInq2S;BJSDKBHV<{jQN3Ede5%$>^TsS;eO@;(H++=j8l1b@=+K{&Dggw1twK_&jg zjeXfJB#s zBKhPHVi4UZE&M-2?UTvF-ta5aYfobaSMrs;^Jf>=i#`Il7EVs?{NA}pOQTCYBvEOF zo=vtK@F9}8gSv3>w8ysP>Dtwb+Zky9-F0buk6|epLHB%h(VC^~d2xQc1op>S-A~?y zJ?6jw{pY{?KT)D6wwnw+rS&@GRg8iUJ|8htJlk&Thzrz0c>MT>U~owWZ$e@sN7wV- zKjV-8vy3~p4+9jVF4oWGNqbm4$elaVo+M?!X3xaTjLGw{g*O&yKd7b+z*he1GV|B( zSIK@`35@q5-bL8r6bcEOVVet6AwAv>$V*iaqMs-WeWxvblnsr&Fe zY)X`~NCBx+1;J4yWqgL*yr1E07KXE0K}XkeEZooMR5ycsHM1l4aqiQd)7fnraL}8W zs4?wKxbMtg6M3`8r226sz%X>ZTsTx)#|^5_d0MIZxE^G+An0<#KHdTX4rvS-^j zxLN~ItM(4AS_T1kFzr!{%#$a_&M)MT`jP!9P6w^QHg43(9KMbZ~J|1%kE!I7&-EK;UOzTwI*FgM&^-C$pKev+b05 zkZ75&9S#)`p;lI1UCmBxe06nbgJ-mLeC$k0P9ERbC=K8`H2IXz7pummrZ@qB3d!z~ zlhffH#mw+9ujmbj>o|MU9eE9nKM*~5c&Tq-Ff^BL!Hk6!y6PG-mvbz7T(RvJbhOpW z#GKdbG`lGSU_D7b?LU6}SamA0O)~nGb&d0|` zCob;&uJN;`*WJ&$TZcfb*!()?yl`M6i;pMlkV>AEJUME2*GO0YFpRWH-{pwE3{C|_ zFJMyz0+pXt5!CdP(?;Uc1S>7W+Rjc(Sa`g%G_${8LWp4#&d#g`ppWxY6O z&`J~v$@6Ztn@bZbK|w)gATK>WC8c-5jKh>EjbD(iwssZSOXgk#9)Mc-m8t3SLhI|{ z!FE3ywx6^>q>385lL$dXrrgP#xu<9R$jFEiU;+o=Eb;aa1!7|*-@a+0D_&bQr$-MA zJYOMX0n%L87gtU%auo>xiKbKo*G`F;XlZF*lagtY zx=u!TAXreXz2q~%sDF`y7Y(pQnv6|}g3wTvl$3H)+Yr3Ac=oVz9WPJ3c@qPr1uk0G z?PnUjBTq--k~2z6+m(;hRC>GC*Q*&Ag~JD(x2{_~t+1*!&YZlw3_7t(MO1~x?cHr` zl!u0g)iPJbmZdSLn{Jzh?$Er18%u!Ojk|WX$vBT7%DQ7bDLHw^X+k=3qj;(U0_hQj zdp%m~*G_sfaF-_sdk{bbVqz9~mgtOou8zG<_2K8|?&s^-YO&$1Vu$7BycKg?O!e_; z48fil*QiJGK!zTO8X;%nF1c|}+B5->*afMzwe_M-b>ftEeX!Qv?4}&i2N-phcXAKV z_6G;KDKV70x3}v_xu7I|P^LJ0!$8EBC4_EgxHvdl*eiYq3H+(yg$7-UVT1Qmxkz>n z4;)d@q#fw;^L)9vAT3LRjEuT-#NkzZ_1)Uq+KjBM!`Lkg(&2LpT0YP-a<3$sc)-yI zzv^1)@t<4(ex-asTp%Qrt9G#}2ovvWc4%1RxT?m^CochGqW0%h@*3FMyuSg+oJ9}` zCSUk~XP;MDscK5mT25^g%%ys%_qv}~Bu#I@2#R&h4)|u>#gw9)!Y+eOKd*zzdX6Xu zICnNSMlUU(Yu5zZrz88*-= zIYWM)E)?0u+ z=agJcg~Y&+Bi@n~kdv_Jg7WgUZ{}x(Xd2KAvQ!_E3~ugFX~dl821N{}&)Y1-ZmT%T zpR5IZAl#)cA*G<;NfUL0E%Cy?<-rlIP6pO&tgQtJq8>b0P@9pg&r4hFU}SK>M>#lj z0-~HoWshlU(NRB9t*m^h6>E1BTylq7PbiG^<90Te-<0>;DX6RaZ?tBsCKP<|CsZW_ zuCRfT#pW(wZxu!}zkO>Da|e8>dUN=(3x5f&>i38|ZwjvN?(WZ?r<-8}8n0iY!M_YQ zK9cwHBGZc``AX?JHalzbgnkgAnZEtthctailZnkt?azy;h&3k)=gswX9yuAe45=!- zw5D}s9g40^Zf6p5t^jgkVvDek$jAX5$YBLkyk!z5Fmkkzy*&)mln#3w5m9SvszNQk zvUP}ioI9krz_WMJawilHXe9FT2(yniHY{H|ptR0+1g;4NbLqOJELGJylcsq>Uku!2 z9^!Q)fa7Ge2?%V!^iafpj?kb{CO1wjB2^(Z2dJz-mMBfdz?dLN%&%%z66&MoPdF({ z!Z6VP1cJUxYkUff#&{aw`Ze!{IZt}q&79s--fzT@-ZGaU_C(s#ukl;4`RflfXXt^_ zVWhN)fzSBes9Q2ytndcc(5a1k-XZj_vb5-;JdkB!Q6M8dY!*B8o)K(}v)yHAmdvpK zaEa!PE{mHvbLc#GrbjdnMp``rgK-j+HZEmpp5W}{QykMt>5%*U1T%1RNj=Kw4COff zW{%;Fm5a1QRPQq2pob^ohVp52*j4)N+s~UJfytLF^z<{>am|qhX|GW!^}#MrTD$2> zxvYJhtgZ7`1u+6}u|@Z?X8$6XR7e0*dx!Nr5RgZ(8a_vj9eO7I{eG8&$v6l6b}!c$uYXh(We@vyuT`zrsURn(>m^P}lmz1@{b7@3(#BV|)0(ryQsPAb?i_H5GV=q|X7+PvQb z!^diT8eVLkEQ+~@m(VQNbTD00{yGKY8`H0!5hIvDs+?|eIEYT&Cq8u7*D?p~#IDNp zejNmyVgvW%Q0=L(AMYc)m*R&V8xh%b_cqh;kDAwGnIr_hVPj>TG5dV9`b6thOHf8{ zP|1A~bdwyZbr~6%LxR)$y20GzQvE~$3hOuT2wUtjyLrW~KAdyQ%p&(Z$ldZTMxC;E zfU5}rx+*#|LUqEANL~JPzX*D&v9j%yD%_PcnIS=t4R*%)gsYwb0 z&eL8ngZYQrz-Qnf?xMw6|LD=N>m_Slt{Dw6ZG0gdB(5?#S}nkm>UFW90@$q;0SzD0 zTmLOB**p1M+zW86yGZNcUZ_SPMSTbfCfSlQCU^mQMu91_HJVTC#c}r-#?5I|0OM8( zs0`L%LPGQMx|8BmtyHy(Y7i3rQ2=^4lNCmEXMMct#bXNi{-1gYu0MlPTEPT#1<{@4 zHFE`f{quo4BDqAw2V!ja1O#E|cw)NT)$cp+wN%e%t~X$pKZm{?n1~Du;|q}c3Cnu$ zL3>z>!;F#XAr&&i)$SWLlPF9X+Ffe6=f{G!E0_b`!c7Yt2hEa!ycwJ64B8dqi6b{! zc&b-&jkdE_pO~1)oo4A~N0pNy6ReN~;_0G%KWu4-oahHY+@DgQ8)1U|?rui&iGczL zwsZ`rZ2jJFbLjHmJ({droYOoGnL&_#jL^t=~)OB*Ewx zP@5uom36&m<0SONn?v-REWPi+ATe6}`ua119skDIU+UoSxBYHvLQ@<_&^qhQ%+~&c{17!{A7{WV^EH^ptkf()rOK5W7t~e4m-w6CcAJo|3+!gkbd84Pnuo z(72}n{_cE+m>Af)!XJ=G#rJti?SzIFn?u{~naEM0Wty2EV$F?>S%`=O6o=^FEw#1d z$%dW67Otd3=jHMg=Wi|GjoC}J#WHjDATC}6v6Mvx$FVEL)oG-d z88SC^_txpc3ti`WJmF1MF#Vd-*Nj`Q83luv=z5-VI2xGgHxR$r2DE1+8Yz5)K~G=L z+l5t0EHTM-3_<1{HI45mX zV1PG61Wa=Ke|Q<|4+a(;^yM~+>NIrD#U>WV2KB)&kq2}?=L@EiGRlzh~rsFDlHYkcW*Iv@##T0DLPRYTD-26;q{>H!VTpBoyrM!cPEn)AsG zJ4=)h)DQsV5prYm)NDJYo~lDf3`?7kENBFcY%_!AP)4Aj0F z*~a*%r-&%8H2tKk3(|BWu>SeEma?DJR)~5z{Xdiqyzx;J5vj1$K5MpM(AAr=7vu}^YBN(x&;L&I)uZ4!X)KKM1IIv@RJh$o$Y z7Fl2=;s*u=5g%6%+Nq+Wp_u{qAbfm$q9HVx^snIoX`*hwvhp92qMkb1@lhkqdi+kC6c$P@1_HkiR2;yT&THYfd0VPO5Fvoi{%*#2L0y-lmih|4_a#Is|4Zq5kMy#XK zi)?R4QP@f2a`Ppb=8*i|fsagxb)1mP)(-}F@eBiGDmoCtf0niTI#&K4i05=S<4VMT z56*u{1&tygFmfBg*%QC&{I8Gu!bm83^lJtK`2SqM#}|M+s(UPg1@5}@t51LXkc~j_ zhjj@PLs0&?iCHg!11c? z&2N#w&~IMaf2IcndhDEjP<9dD-rbD@jx?k~EJQQPyP5OXBrXYx02F|jn3%G!L6P}B zGZD`O*sRH z^?-RNhZ%W6z^^H5w!+l0$5zVNSYN+iJjN=KZeNkq;`4{3>iK^-jrM{^$;-*L!A}8l z)5%B#9QnJVF;I|VqN3)1%(K00<gS-m@%_1pvt{STQ9 z0c7@>Fu@-+{g>coX#p}jk|shH@jqmi4v^UweJDHs;{^G7C$klK5rO|hW^?Xj_A{@v z=>L#etGgw;^DTSfA6FdB|Lgw$<+C4?Rk^|^_8s-&;v%Q6P98{T&j8roV5W)IH8(dt z8@n6PK-k8qt*P1Dmy^ZD#K7^CsH?lEa5cmGp=LI@f|m8H!=SSw>NSu&>{+rN!}^bj ziHXHy58|>!LS2iZLvd|nxnr-$@bH?a_{Rp!8{C0uhc&R3Unhuv*Mn-<&?4EV_+(ZE zg>qk&RJdcbli6Q(cN!`bA7by2pzIYAdb#=e%+>?f+a>BYpFaNHis|m*yp<_6F&1Xk zG>*aBQ%Yz}uv)hJh8Wt<#zaYg$_}Dylf<#!GScgG)&u7e?g= zn^*0y#4WG>(U0Y8eLbO^4U4N|M!qttn-*Ewa<+t?q4+rB#hYX}Un@s<}slf}LcIa7~%}l3aIhcTFi~j}>IQ zVbG(AE1cy;? zEhd%zUc*58a23DZ*4H509&CI9{FzGI)QwJapWsch>A;=N)r{1gscDPk?R7v^VNmby zJoEIIpNgvxACDNQoGQh|i(J29GPo=VKk-?A6SQmn<8?~~bkd3)D7vJ#vS_D=3HW2z z+-vmy)4i^ev^o>fnMKe3?n1GiThEj8HNz-=NzO6bI^)PwD7wVs^qUc8Mov!qj*5aA z7oWm#X!qpnbp2o3t$#b#c+Ci<*L0Q=Wj(SRXWd!Ouw7)f&tPlFv7RqG|?8=9+uWCq9 zw^vsd+e3HJa;9T>lVaWH*w`BKFG|P9LxZymNJgX9V~Sc@t6@_~$4_VE zm6Y=9ulsylTeS=uwC)`oGO|ZKUNQet+mBsk1?1WrF|5wGZ2B(U^`JDlWS(!#ljx<5 zl^txUI!Af}^P7?Wmnn89Xopffho#^V!wk$%r|Kl+z>$TM(@D?@MmY!D7(762Z*MQF zv;E7VX|&-{YpH8k!v5i2xa;*+{HN$p`klp>Zlk#yo@ zwM_-YV(Tp0OJUU^AZ`<_Y!v~IP_hD6w^VZ7INf^_bwe=VI9znpj@?+xb@7bu5 zh2rs>zf|IMq2b)@kdiu zRm7XMx2I*r!9E=TK@`X}x{yLt1YB%4oOZpt-pu9ld6*2$fwaH;H z;(Lp37VcwQJ?mmfMn*p4T=k+j$oetsOoemfRC=R9s8MJiKqH-MXt*m<;$v8G>BE&f zRxdZx4@}-JS-txbnk(hb_O|0#-pPrJE6z+#ab8Z2@*JD*Uk=3kalj#5Qmx}w>u3I< zzTws{Md;0o#3%WiyNeVPmpU{u z!dB-xv-mFBzVZAd46aMlKY7-g=d)eQqUQ%gdT)HQxMnEuv#RMb2PUDX=PUw zK5*hQi0WrCeGVZmHmX}y#7r3)!iuvr6U>ZU7d}4=!27Y1+cVh*uaWwSYPiER{w~t@ zYE&|XA7r_6z0rST$ou({VIrN)Ww_R94fp)I*h>Mq%JiV^)0~sEJ=W*R^JfDmSF00i zX1Uk;8bSP6=Mxs`uFkG9Rp!zcHB3D|WP|f1U$iX%NT~U+`n6WSz=W{-%?D!sd41*+ z&q~|Go&IR^1Qu^Okgz>p8{VsMzGZx!QX16%VuNvH@Nf9;H7j;q_c-Wl=RR2kwo>)8i;t<BMwYGJPq(x z>NvGcoZ(M%m;PuhoJz^B%>q^9GS64_?E#!1^JYiazij#cF>k<~AEolp z5%j4-Sf#$2jc+e|vLnYYk6v*xG$sj0`e4zQLfErhsyubBY~OfX*>iP$wI;#pI_;C} zwH6HjL2hNC`!#Og6j2K+NuLRh=W|AXp5Ov4o)B?D5U!TgUqfto<9$Ye7fgD+I$`mp zX!qUt8r06FP=B4;vIJInNPt73zq+q{S+%3PH>R9tzZHN~xRb>1y|-r~>~U31A>t+b zc*LVF14SiUBcrrE9RD2S}Eq!Z&2XD+7 zi+3K~I;y>3^!D3y71z31ZMrT@5jEZ_ipH8)A-~`{?iw^b$h%!oTRxdO=@M)_wmo0r z?e9txuUok~Kin1VGQFS~_V8QYq|tXt{WfYuIaOsF8!35|VP^ii2F{A2OPtj&@^7U% z6SyYG@Sk~0Bf$#A{N;P9r+w!QPt#^(zW|4V-HsJ?pPAq* zM#grItu~}8*o-F#w3n+go@}h`0+cbfEIz4!Rj~XiV|-W;h1~{=;^{AmU!dN# zgcfJMtFIUd4=RrQolmRmjx>`flfRHAXcrp|+v5E!iK|ZT9~yeWt`hZ_bc3`^wN6%# zW@-`$I@`4>>8oDE=A87?(OS$%n2Qo=K&n`Q^JumgS#i zFlHcXN;IE8$5o#I=WDHVq3ZWof|Bln0`fdtr=shrGG5yrtDkxu-)AWL1$4DGLphrk zC7Aj3t7^##VT8^_8?|x1CEI$J2D5dp#rm#AEAGGVuQy0l+tVy_w+k8<^M!razUi2by=x;eK2(R-vp`jHY32 z))W_Wr~eJ`2yLwpkm*#QhhFYB$Q!wBFj~T_J7?>5hnnEdU&1jeIhL_YaQ}6mlX?zZ z(Vo9c$091Ro7~FJH;l`2R_gET8=^4^#&Wos50ZpQC*ZH}PBz@`TwfYiNnkCb+M8{k zIxt~DP{(i#`;lU`(EF@1Pd39f({CWkipd4u7z5bs38vlJ{PDDZYR}pz^y-oNr@weXD+^ePdT)U#z6OXfd!3 zWFt1P=V&a{Mz0@x_ju)v&>3mGp3~eGg1QK_Qj5%c^I4 zys>u_JX3F(eSPl#3Fq_4^dTo0?Zo{2h(6sBx{Y^l)n{H#_t!;#K2XZXCyiCo6i=^k zE%=zCq;~TJ;Y3tVYjwYq>Qle>3GadN&Bd*YzfPUp#C)V3O#$AS&;8mI!>R&{4~A7c z(3E4`QuGh7o&w5t2M~qmSU{oZx7xr_GcpB{FCjp!O zb3#Hse=+NT(EKNcTVLxW#NSU)0rkYGa16L8*V#^iR2Y*qMfk*)(cY9HmetnQ@tJC% zfAioxvm}kB+R3#)R>-r+>#K~cF{--OEczBToJ48L|H|0&8mCH9KQE^m3_G>SV8;4c zgP6rP?Bq3F#>D?=Sgv+#yJv!_Cu7EU-tVoIfX!%U1r&T?B{7UQbH)ZQsH z@3Q$#CUK*5T*u>+DR@^g#3yZ3r^4=p$gI$Q0Q%&pTJmr_``}pe8qb5Ec4lTzZz7v}&a?7*D$9kVAm744|HS<mUm_xNsp~kLVrd-N6OgKq-(_@suuF48=iA3-&+uvA=koH~})mwO$!mAO|SA?2xr-VeE0)M9EbdyQu;OzN4T zXW2k;l$d_3?7q+*pOalK6VQ}w7;bl(`m}gS`QKY32AiB;7_5E0bQj0pIcd`9d`NftS@2qx=n);m0Pd9p}ZQ$;2 zsi{ZVUiQ3qS3%$Lkdd#Q`?}+6fE=z>);yXxn#|jiQz8BJF>4Q_Mr3cTpk82KbhiA= zyuMZt3@IUwMdZ4JOTAo#y@aH;P`QDYZOpWiTs8ULy#r@zOuJ^zaxAL&oiRn)akZ&H z^Wdad;c;sLzfV&U9^>3P8A5N%7+yF{e-q5?=M${Es1|6Bi+$mCQnR)K!KEluMNhmK zX-yxHmikd~vg6!~vFTW$yK}}~-dTICe^O-5qE@eckER+g@-?fP4q8m*%684+i~JcG zz&qM7@N24dzA3rKbmeSeooik4dg6)wv1egVUaaF;J7@Z!?U%0XB->h}I?q05FM+NQ zwJ9sz_RLSISgeIy!@{NgIXFqM%=8K0+jfBP)OYXF_>KShx9yby_I=U2EwA|rirpae zL1Ad4VV>ZoGqi}kYJ-0mDMRvnW4P#H-^KBkfr@P;vx0TdToI#t(n^_8`MrwW+Brc@ zv`5wUb->+h2D*~;R+x3>@M)pwO^<~SyV~t`3hSV0#VC6=tVpGQqd5p##&3MpV&UxF z6emzi*94m-Z>4V)@mq44Fw$6h*95)Z`|Az+0|o=Q8La2VTHmV!YI|oC$I|@1Ss6)& z?DG+r#?Hf4EdPVmM5tCW&E2esQra~6}n>pYn|sr~yOipZ~K6zL0K!kCr*>4WqG z1&HLARjpQH9A;Jn1>*To8$8(rVCmW+ZXv!JGi04J#oKzqytVNPkDC3N)fjA`+aqNJ ztU&Jh)uLzOxP$7^q}iDJRPMDPNqy18RwZYCw_S0N4{@n=lHutN?aYzDxQ8-ETfX0@ zW+m0r?qMoWXFhu8Z&SIH5Rw@rFuT1B3%_L7pI61j)XYiAI-f$+r^P(>J%@#$%m=bA ztOnhC-oCkW0aMtg;`%e>HyE2T9wP#>()P4AIT&#_n6&shi z&jL!qYhaK%WTveBVR#~q1I__W#jC#t>DQ(LCoV}tE;r;Y5DY+7?gtlRZ7tjv(R~i7 zwF<|AOgd(MGHl&mSUu`KF>iR9t5&|_d*OKX7et`h3$+WJTKuqw_C7k&%#66>-hc^l zNd@`vW`$;%k`nr|Wrj;-H=2}n?KFe=ih0r2noSRI4TbTq(VlWH!rgE!fyC$H6cl*YN1B>vief1Ahuyh41m@J|kX)n1O$ z`9G|_7;J#fv4ymtgY)Mr|6%^cxOX3nqxjcV^0z`Dlg+6qbDLMMOvpj(e=&>`(MCZ&I_m-uQp-cPEysp>17DNAipF}| z_=4^qK65-{mzQRsx3cALWdTabwl_@%o|RFJp}gsmyJk5*}e7vQ6zj&?enfXibgSsXDZMK`IodXx*ZH6(4c{(+V-V6d%v2o(|6_V11}-tKku z98}+UEe?nly=-Q#vWp*PA8T9Nua<#`6uSHSO*i?J*E`&VdAZHV2~STyOH6%shA@Ek zr!>&W-J!?p!7C+n8qKu=ll4*E`7`u|p=Kdt%s#y@RUA-J*!V7QSQN^2J{`&NK2^dz zixDFOr@Ggi7I(A$7Du+G=2t~QE{5gS8L@~ZTZUP2xCyppX_N{cJIbThd9ehbzd*|% zVW2{eJdP-=a{cRm^DnFEFVu6-2Urzw!3DxUC-PtG<@XY#Ve|j`bx?$xn-<9M8k3MP zU+c{Dle^YNN4~%1->v`Kw`PEq(<{UI1^S_-wRL=4oB&Q4lY*sXshL?0Cp5Dh^MBL? zvP7Yi7!cv(W6M)Zk7b}VtONjxJLt0~E_@Nwv`q&KHu;B{-Wv%P>Ys)^=sFAL{#(E9 z+ao=o2HV=hqkjHW1}w0#eSI9HWMnZnH~uAMdh!+9&0Xt{pfI<~w@&qdp@{&gyW2-w zfQbn=FyME0|JD1Cy8a*WASXvhA*0}mGDMehTPO?)%`8DGBuMHN5fh^W%>CS+bi!Of zZvfAU0{MWkt?<#-x{d0B)AsfCgt}GKakCtltFN$!_*JR*b7Ql*^7qtEKHeE7BrIhV&!|K=U ziL@}2Scs%=XBWXIXzUzda(r^4prw_Y^98lVDtIQRvnSj`hl?%jS97?LPNeqRf_Z^W zjg*D$r>j4#CqDU0ZQRHO~ z*7w<}*Qr<6{({TX$6C6d64|>=UU&7Cvd;e6a&+hbz`c!e5rCirrgl0Km93eV3)*SB1J}qVaDofL(p$31e zk$?1Hs0oYMM-ABF^yk6MgPA8VB4q&hjY zc=4Q}>6<9CMRC=S_phsY{yy;-(`-^1$FK2j{y5P6Es-`Pn!H?kTsEjlAQ zQS|dj_nH4zmsCb<|DWfVS4K1Y(=U2YK-u*U-?V%jFWwt$Z845t_*=34DPO-Dw9gRr zPz}AO^!GvP*W7|iHCJm`8-%_xR$;-&!YwUP?q2s0Wr%^B)?|U)T;*@a6O9~T`|s!q zUgr8e#5#8VpBaj;_KT+4A44RZZo<%X2>8cqRSqh3VV5`j1*zsfEscV!*Q>XLBdBVJzp5>3eAV7mF5xe=HH;EI72d{l}}1 zh@TyMtVFuZGHG$mZ}4`0@qMd9HTn~_2*KWunjl-Th+q8+1P0ve@6@B05y!vV{m~SW z*rs7cpm20E+Bij&F`Iy(C`?M%Bi9X|)S5Tq|0C-=`@Zh$eqPV( zd0&?_9m7H9<#^s6a|0EMy_zKH?|7y0>&g6Ajk<2*xzj`vEB&`P9zC)(lxl!K*Lq}e z&KAnRYxD14mbq}SCEiELM;!@yE)XPRtoBycp^^ErSmp$Y)Xulj-PnbMGv)Ip`u`F~ zU^~?w=vU%ybv{0VJ@*NXc$t3?lOrJ>nB~OVsnaLV1Gk{naq5N)M&Mnk{=bVAbQ?gK zb8bpjRKJJP%lYFqMUc=9v0S1@iH@|$Foz*NVuWBoL9d6H^`XZ(Hl-(=V7&KlTsl-2 zfm7rSF;6L8S*W7*$L4DZu*jQHky8;N2JHMoV2v#VjDByd--k8g@_8W1k>iMx`@MQ5 zC}N(ST3@Mndctr8D`iJA=~yO!Fz0B15ou;*?_-e-D=`i;$pD z?U&;W(@(1cOmea~J9gW1`W8edMT|ptSIqqq_5XaEDwuF|@xxdB=_9zSXO1}AVEpS5 z)-EzJ@~X5L{62(qdy34!O-aQ!B7Mu7?^I2@%A<|H>kNKsj0CF%)ld~aL=wONWC=Qq zP;U!7I0-zyZpv<9n|G#>B}!Epm0Ilr0_+^@F=w$XmyXngM-Xuyn40_^=hTTKTeAga zBSh{!guIpT`chk~Wt=75-IiHPxTS}eAJW&l3U)dh8{3_A*&eZfmrrJr2<$cA6I2dI z*3|fw*!4neQ0G{)oKx-e%Y%j3ZLV=>Z^l&wk;lfw(S_d$Cbu3JNZMoiPgDWcH1z??SHc3I zF*kMA(1?sDhnt%I2Sgm~H*d<;8u5>jrTvVOk3W*moh$OkB`;J%OCmL=Ea_|)>1iVr zM9}xNT`AxOatWLNqpeI;U`Zt`VD*eb_w`+aLdQT^X)aK?28il-?Ccho;DsWJO3K0P ztgMorPpmnAFLW?f5oyT!iN2ZDarYCVVlYZ}1-wwQyF}7mTOYWt3g&sP@ld^TiH641 z+%1l&iBN`cHT%J@Bpphn)dhJY66^oVH#}jddVn~%2Th}nBDmR=EPVxB64HRy$*U$y z;q3WpYP?1}zsK^@r)_@{$iMaB{o%Ym<_2*I4;~+vQBh%BQhF<+w)PJ1`E&p*jd3x% zyAqsST$ShiLv*sAV;y3@mbq*#{ghh?Bc13IAT*e+&xJ?Eou{JAIGKts>jTYG|Df!z zst2iw{+RXiw%_^0BLkvpCd^~9Vq{<#n4BvDc@lHNe6mNn zx?y>3p!DNu-aIs{3<8QeKWZQ=r=XyCdeJKJakYm+exIKLp7m~FH&DaM<<`Q0GpAOrAt)EDj){K&(_*g#0{T^;L8O@(Rxe=u`lpZ zUpJHOxyc+C1{LcrmCq9?Pjr1zdHynj zrx~QY|8rd$%_O>$*=kc*w}=eGj5K=&4hr=VX8O8)V;mRaQzU)Yw;wz^hait<LO0}VFSkP}xu#PqH;VS=RKf)oTNV-@}Vc4AM*b{?HBPYBP!7U@7t}BZD3Jx$!(*5$A8qy{chiV$x`y3mH1$^}`C@o~1-m zW-Xkj=H>b7MatgVJwGMQiw0`BkUM4QBe|#yV}Owvg}fZj@@;u)Un7aE7LP_ z|DwIP`1o!h@*~pMgU2Ahqb;hTc8_Umz1MA;W=6FwJ66%%d)1XwwVP#-u7e}V>@N={ z!*YiBDp!wKT(WuT5iFxpBLuZ{K~2ti0o^*W%UY&iwS4*%m_YW8L=UQ?X6TZ}ucRC& zU58gy+}N2Gfm@mzlAiXuA6Ni3SLB5(qKuFIexwZCzkSx>X6n5vvnBxPy0CZ{%>*eE zaFdz9_2f8k+spV*Sau0+(YwE9V@KGUlX<$0ZgUVpEN0)q;~$wBAx@UPB9wdd)c3zl z#y>O-@X9)JEVaG2bkd$K{7Dt4A3!nh;_sJe9PL1lng;0~)w|W}uK)^+xY_As{=Z&b zN0X(#@1c(7Tc6|8C6zc|s}Vr!woeJ(A~G0CczAy4V!_IsicrZ-5#wE^`e7crH{^dX z5g1luz{yrCC>1^T5twa+or#9|V5A%D3cc;X79JVDHgUVmn8L_9QkEAaEso*E;WP4| z0Vs%8w>_}2nZDaMQgree#JpN~|`#*+G6=VzGS%iVtW?l%|)wY6Yq_u6*pOTTDTOe>cr1jNXNR--7CO!-)){?+za$ zk3BX!A^Y5$70Zfo_Wcnkvn%hfPha*{7M?oy72pjx(xxt39Wk@NfB#M#n1)_>y%q1D zSNQR+9Tgdqz%wE0s$LIYgU(%M2=~IHCRy}B&_sVPxPoO^D`AbHJ zmJL=$@7&-6y{mp6JCZA7-=0tDqOztxD#O2SxIgkX)mZ88n`(m7qw-}k(zkYtomv@4 z|9w_|05A$|8Oyk%@f4L1#=IF?f%CFEZo*0p7i{AYz31i_X(ODm2Gj8h6!)-`Dc)tC zfseML*sol9XincU{;!#^Tsw$XK3Gq&9DO<&c+yEjW17_li!&8gQ^7+SSEhb8uhdaL zpWc>f@b=UUxd4mJ<3I?k%d+|&1xSZh+qiqOGWc~ewepdv%bYy}DE)}SKRDRivV`qp3K#3;kn2Fh z6UG!y)ESpy>3;7M2ZwIq!v)&E4*)Pu{y=G;z(x1+Xd8BLhGz$w__%k9Z3T}k#r}M8 zdMlZ9A~s8vF)FG+Hv`q62DrIc=M7u>|9Om||1ZY}zJ%7taUD-VBl4?2+YgQoHC5)g z1B~8)lw=hTR+>tBiPF$iXVZbPa7K{%5!M~o0g{3tu@xV@%$<+yI6_XZEK;| z_4qM5i1`J>;;mAEA3kG}6D2uk#D}MPRYs=9Uvjl6sh%rrVbyW<|DO!iR|7Cj-pPKU zhrj?h+V5Aw3kcBs4kPR%XI@UXQ9qwD00&vIO=pR2V8O>Z+Q`I&d8ISyTdhusq#VKj zxJRle@H%EIWq+89A_l?{vf%0c&$Y%zxrI>^14%IrBm32PSV&|NC$n*d;!+`mf6jN` zY2mHizqTjygbW0MKdA4l9?cm`$;o`fMl*=mq$Yj&Jx#T+1gko(LSe&jX56GcEA_XP zAKRUgN_&`AG3zptzl(>sNChHj!qx@Cqt8s$SU7uGZAwbUybY|Yymw}6_&S6-lCLsD zN_Bw0xA-`&KNIxNI=zOnJYKJGCq4SQj)4gX1r9Ea0X7!gI)gfc?x&+R-@bjz8T@4{1z*juu28SFP>p_)sXC4A3rj zeOa1qXMHhdbaWwOly?O3q%rjU`K?_{;!3lo!nDJQowZ)?Qhi{4(aKfPCZ>}Fl5EjQy##SOF z_j|=m-kelTl3+bRArMG9Z2@Ow?4@wG3CXKMLh(YgkVEx*gH5?|C9&X>jX$FF>-OsV zCRQL8$Mv$XHFw7aAMs{-7ZE{c%Us%)SGA7D9*p(GD-5s~UQ4#Vic%=<*FxAy}rcFihVsb3aVUo*&`p1q> zWcEJt#<2sQI&;S`1udZ4osbjrtwpU3>b4jGE*FAa30x%nl0WJ`me)WSmdP!`8nIKH zYJRv1%2!OV)$iYX57l}EO6)8sn<8{U5EE#oe*%MivGi%AB;oz*$hD@7&imZlCTPfp zmCj06iIb=|NBj@k)!8R4Z^s%I%-uNn;T{`bxm*M+CvwAqalBP_=_;CKrA4V zm)=@p|D*rWPMC)&{Wgpe?zF(SfeZ>+ZS_(*wXAA%Jv}c}qJDtFt?lbm;gH}l)J-Nk(=V?}0TEE#Ibt42Vwf!9d#~PkA!Ptw z)-hAQ6rP*Upop0i`yUGDPFQ2EUtojx)=P_Gqf2=$undBH7tj^EP)|jtW4xGNsbi9s zmbUkE;sf2VE?D2wTSJ}J0deg=zq~%o(17b&U(T*v;IuQ!b=7<04@BE(OLUjTE>E$B zNu*=t^E&2F+fe0az-UJF#Rzs>OA1o-e$99JtgOAEUI}*2O3%qst}lXTEpYP2o%LJk zTYSa)FHPH6awgw11>XD&Ry+i>jUX3h{eA8Uy`m`9y_m0G2}S*W1(4OQ|IP!^!$lcl za_qBCLYi-y9kO!Zx=)b+0Z?aP^Rq}b7fxLu>6^+E->AHgSx%O?y6-Y;XFb0d`y6BE zL}c#qKEIh>T!7*9KiH_3mFj_;wGLmSMf^cZYjo7M)1@=&(E-y7V(p^wRURv8{VfN$G`&7gj%mE;Zu_m04@%^lm(Q*RUeP~34}hx& zHyH0-W@0kZn(m8@y>uE}ER2dO&3|q(O~ez}B5z`GCqCZX>*0&t@}+!0 zqj3NR9=~k5_1dCt>H77E`{V&^^Kw?liQYq?6m%A`$?!>3V8NRBcq(sX$$(tJZX0rB z+JSj9+S`1Guc~i^)o-KA<`A!HSUx{%6UO|19z)P~hxMC8&zWNYZ=!<$S-LILPF^S7 zExN6_IqH?skIqizb=pi5wb1fQ7Ze+DF&7dgPfN%pUyKkr9tSu%^(+9`={WoO`)@t~ zOnJun?9ZQL51^#$rI=PqHKqtZ(cRF41O;lSs^o#+C6d0;6n$%mXJgd89(c~^&|hut z3BuKLEIne(?6H5S@~d}P^l_L8U6iWu)b|dX5E^q3iZIlB8h*OQkM0}k6<+4tQ-UOpzprfP~UEBp&xS~3yE-~0M3P5^<+cU7gZAO8gqGy(_?RYw!% z37cTJftjG6ks-?(lWcUS4&aQoYiK%5k>G#kAFwoF&c3zl{FyWDb44oz#>1|`-k$wB z9(Mx^q;Eggf`M}+B1EV}q0}I19MCAzL{iC{lHQDlG1MPbkG~3NDOG<5%uP2XQI_M? zlrdbNZ^7Ed!WggPYFa2eC|!Q!2kP=EXG-7wbnO!~9PVuN&bCjs%t?q<_@gqfu9V2} zXux~{d8Z7{bWQJ%iXzAn5mH6GqU$!w-2y=>K|wZ-nX%VHuglmb$`X}?gOGn1FATI< z1o3L^fFPqpaN+ZFzDm|i#}*0_e`kKEi2bK_w)0QS9;?&{tMqkOLB$7-EHE)e?x}=` zIbvwHTHGcu{x+c!OrwcmpD+4)ZE7I!?SC}yi7&W_5u4!8RudD8+i$$VL$8!XraI6X#X-RL%!{g6b zR*3t&eaS2B2OtGcSH1xkvqd(8`XEztkhvNPkLQmiR~ALIN{5yGjb%*m2NIKCqAA43 zbJ&%S*?tL%>r>#-xe$uoUjJ19JgJfj$Do%ECH990ia zvRALd6pdgqfRt|?yQp0vC9Z2b$x~veJ9nqp;CPBoAc79^ArG_2-fhWjhd!n zTmO9IJ6X{oPnB_WBaR@E_Zxm`MQG(F`6py&*SS3WyyCr}girndmJ86)150owuvGl` z`7CtAJkIb!i{jzY(Ggy0X}5Or1o*5{l(!aXe|15+VLPgeo1HlI=R%*w@mGMyIS2$U zytMs&{Tn!?AXY@*+Wjq%iJeZf5wU8-r|-`1T6@UP)Lpch8gGYj-czCW;IBf*&-Shm zw()%v3Jtog_4j$VFMuIF=SYFR7z)>10-oXv^K%5YQrhPlv zuk`nYJwQa2C~}ePi1!pD1VIa^0>G)L$fVelf&pmjRY=H7wOd4zOx^Q>FYB(}2$TUv zL)tRz-Y+g^X6BTN&)uC`d%QLNqIm_@e0NCOkW2iflHjV4N}tdq-H+`L9hO_yM1K9D z_o;%U4q{xluHOP38HUsMVOZR!PuFOflfze5G-#|)4PJkQDS{_DrX@n9&OdO=Zrk1( zO(Qb5>*S9%BB{Dvqu}MLupX&Abd+2<$cn8RknzotM*GQg&6!eWd4s_;BacUnffybOsFg z!Q;19YzKZ#M&b-;m)HvMVn%xUJ(7t$4}If_>mZoHqyDRMEznjz_3DLr9pu38r`}}| z7ro2WbDl_eBP`U45sO2Xq3@Ne{%LO9cfu8JgxEf%|RV=^E&ws#(XO)=GCYs*D&QT%*H^bSb z=BtqF)@J*8Ea9_?Q}2ekDod4rTiJVXF4Kix>Nv8n{|G^`6Q}Jp4_&^LjOpsZ(cWHm z0JP)AU_u7G@o1$pdObo3ZRK{jTUV&834O_{$MrR?)SOXHFaK6uJ2b!oPh2#8gXD)+ zvt(G)uC$16w5|u3Dk`Ri+N38+50};wO6^o6Jz1K*Yk~XiR(Ujlqqs_-|C=-h@|?;TooHAv9-Xxu0*Yri4I(gXok8sKC_~GTMZTy0#@;33 z>Q`zyxJSQ};2Q#oIzv`IfMiy83q@;O#bRXLv^Eln(^v?z`{UU_2KNGR$Z`Kg{Qit5 zxbd}xk*d{RFI(qV#aE?t>9H9QL&I4^-e7b)5eSR!0nJqHkN0_fquzIMWtVEsV$GW@ z(@xz9mwAb?I&{~T7Ht(_jL{CvKDv;d9l+e=(nE9VUNE&OD)xFJ>rcT%RsI}oR6!hw zARvPFXNs*egVGhOCn(rB>eIA<>#LoEiapgXUSrOwlU3gX`PU5eiQ zZ$^Zkzv>%PFX=hjUyp_=G9gPmdC|KA{_Z*7=vI?YQ~Wvff&>p<3xaJ0x358Ky&zD3 zPFYp8+^}JMsaj=tl6f5>FYlQy+IMi+nivs&+rH6;Zb=xRd$aA8u7>$=+6MO_z1)qf z#)W-YB3Cx*fTA-_lIA56yXv8>6S_N;RP)H;Bi&E7C%R$aS8Dopiu|LX)NiPQ6cC2w zslEW>lIJ30szY7lhK5sz3do^}D^DzHATJPV`4c=gYZD>S>1@TedJUNNAF$Zl|3+jT zoCi@yEXs4L^)yzpuUdW`kFTKzwFkPZd!r8$q>ao5<+d(wkR7L+%g@P}d?Cc3M*^Ih zCPxhoN_f^MoxZ)TV^W=c43jReCqB_^;N}@`OMhuIVOZyO!Rz{rs(q%OS9?;OJqygV z5lzJpu)MxYLOL7JTT57(EL4@+h^@YvYgepUN#6-3@+X6%g*&B94sShc#wwYkpp10t zReXSGWYa9gfQ&y&J@IZ5TTWM(v$MMCJ<{jmy`I`Y1yi}ZQDGd@_v$_;rm2)mh`ydn zTmeKsY|>OgBcC@@sQ*db$EqFVo_Ki+mhLGl2d5VmHIi@lVlYUdIRTL6)dCfBAAy2N z+dHK1+hM4b#&ip_q5aoXL2!hLiZacOg!MO|8)UeDY6sGrp9O|!ugc6}nbJCeF298p zOeeTQ){YN>$UN2(6cE9EWgSj3p(-?vt4Oh#bz$Ail&zc-662l4ZWvMZ1R$vUF{sZ9 zVX+Lw$^BUJvFA^%cy0QKF9^l^x_jHMo1})W$7jEj&P#stXav1jIB=rzg{#CeG3$(2 zQjb}1=7gpYj%REpPB3w7Mz=f5^%WNR^J_jd5p%IW^qS}};D4`$=MUw1DYa)@YiK1l zG_KX+utZk0;zDRXmP~P%luSqADq0uKFM?FH;~Qk_KQVAo^#5dphsVb9^z(f*bS+oe z^(*WPyt=E0AT)~fXFmiK1P01|A%@^BeD0SrWRgCNJs)Xzg(}ql>3OJ(;N`V4lPnTe zhpyZNPdiWbeqv>~$Bu*n#%lJ_2+9+5hbc>WH>O9z;nhtT6nj@Roi&CsM2%R^PBeZI z1#XsHBMtOicb37H^4ur0g|OhipO`fqyU76`dP;h#;F_fjk)yRy!}uMWkncV6^UG4? zUf(x0Mj5ts0!U;u;ah@?Z^p_W4yn1`SH3ya>0P8EJo|D9HpD?T1KGfGQYMpK|6p1q zbYW#~m1Ek2JbG}Z{JNwZ?95BHv1Ll=lL>IdAvR7AHpEoK=u~>>HSD9^+}ev{xt;En zPBnmJtAyXrT5jH=x}rO{%4;wWf+^*{o2*E`v`HtQy3vQSlXP1U`!rD;uq-Ul?{ z_-pM+WpUSzF&j)f9^s3{ zi>ToQ4wI>0PHp1pDsGBQbW+*~=MfwDd4GSsI}$;p5t%f{^ft!aqHR0DaOkZuk4H|< z(p#D?DZNWzdXBRkDF>;25!>ub@pdJ$g`iJydR|_Kd6NDoBVXTG!y3#yNps0YzN-y{ zPGqEsy0!b`X}un2EuuIK@t8%206FGTqS0!9Ku+ux7(vhXai-#Py*tnR;v``>4{8Zb zXq!-_73%A=akAILVu5dOie>vz&i%3-(QHy*^p&!6q)-kmGf5}D;f#tke7obrm10ex z;gC9$8Cd?A(MxhnKm?w2CVgIwJYbmo8Z}`rj|u9c-6278iQ7a!Z$nF`Z7EVi)T>Tq zMkfbX%h!U0M$GUh#03D};2v76Dy>XI1Mf-aZ30WYlAr$q{oE)w>0GenCCriP>HwDC zM+Cim57;#&SJ&79tjJ69S135{T26j`PoquP0LI7#$mwHYBK&))pPw|^66yqFiSG-k zGt=>qTckpGRmpN8ZvW+x52sFZLAJf7A;yI+l$0vOazZy?1P^dX#?KTeyK>6)8r!}} zn|DnbIHTm41Si%WYuWAqnYU!Gt0%Ka?(@pI2}y!&zGix5tbOju6fa=62s z+8KkRR3Qk(@31^{cOUc=<#&f!TSxd?j7?8X@wn6X>y~-uBBjz@ohejr)m`!qOa94j zLeT$!+_M_Fvt5HGL)rG8BYzbNbNltB_%Y256icpoIOJMxh^akJUk0V4z_&aw+$c;5 zOYCDK4~H}kkch0z+akIJ3y7@Wq`!mC@Lu=SY}AS?{;Zitt5_shFBQmOXM09aM5Kn) zLtkH?Osw(QGZId2?kG@3gt(v^RH!Se1}bh8pwEmd^|)0e$3*DND6RNiQ8i5Ll*kdb z4QmXrZeBUee2u^rDjPyL9Im0Rp7PC%t!4P#V~HbBRwHxAyo@AF*!tO07Z{geQgSi7 zNrWF+DkR)RJ#eX+?j9qfU!ih?YgGl`mQHE0msKmVPP|u5#=AVt{O{IrBx(vSV4^S-GQqZfL?Vy-+a+bpD{A@c#;B;zp}(VrAOZs1MKf7+Lp9FGqpVauslnYS zw8Aikh7S#Mpp7snJKMDPcDh=-Ay}^w?#YY4%rZ%J>>xmT4oDT@N$Nc1w+V2eSX}i= zSf%~1^&0aGi0-d)U&a$ErJCZ;NnHFnJC)F%wZMjO%|Sol?raTLsd}oBAQkg;-WTSG zViB@?9AbK$os}_3BD^Fs79PKB(cxr=UglRYK(->j%Wv)MIatNoL`dk>^`b2+mT#=9 zyXWeLbW1WM&DiUm{o}V-ULFVlf3^jeWrj8A^22BA`1Ua$Cj_(>Ej|QfDIkS90O+jB zhj%Z0XM3RV6E$q159xNa(?RBA<7qmSP-nH{$>1zGE+h#-W&QWPeh)Uti=e}a7lSCa zEtf5!fZ9baGfA`UcXM}FlYSJwoQZ>{P1p!qdmwnXp4vCwrx%P8xchkjCjp>s4!>^- zhy$Cm$)nR!We+;V7HOHB;93RXYEU}Hra&vt%p&lfp!K4bm*0Ox1#NWdDA_cn{#)j1 z)>Y9~EN~T_P~1yP*3G}7<=*Sz`KZZdK4aE{r08O%XVvH!yALZXl0z-cm)W!6XC%e} z-wNc&PHuN&yk6Vl2AeHJyo6S@OO5SV^!{))ClIqh)Jr!u<8T)u%^8K{_G=gyJX>jt zey~vi-;#F6ml)11Mr1D8(^!#jdPf#Qw|S$?)D+)nS!cpxNVM`woY(M;FU#T*{{$f1 zye9B3(wP}V|8a09x>RK;kN241#6jd*;n2WkCRUW?qsxUR?F9UmMnnv6%o}~aZ)n28 zOAaUjVjb7UUx)7dXuc0P$6jZoPFV(K%uj|iqht~`GwCllukwDb{E}O#2|}Bj*ktJP z(_wZbP@}HL>b3!)N8N)~*R*A%mDv#OF`Y++AXJ2yHcC2To_86Kmv^93E1IP2>Qjh( znQHqEG~wLT5Z^YEQLgURff8lv$TtZK^_q6*NTMOJR?Ey-jS{eb`tO-Kupg*C#-Cb0;4*x!aIoV# zRy>>Udi>N#9GzDZm~Q3U(@&Xx-RIM0Dys{t`P4-ERPI=Gl6??v3uVC`lP3r{ zag+9B4dpP3Iik_yM!cksD&yAnE^nxXO=92%UXEkehJZ!|CuAe^QQ5BOV_WH;lAAaD z5IK`JGJ%Bz1#ZTC#$#0qMs}4fg*J)U#PpuAN}VlVYP|7I<< zzw7q?Sc3y-mkv<15U#JsYL*sz?q)Uuy#&OYv`mpUK6OsWI+akA;bU_G`A?tLf<0c6 z=sF)N5?mKQmCM`Y|J`Dz?mUypm0I(?#~d&sW1z!2!4%%m>;Ayk>3ikw?Apzt*9IvB zSPj5L%mtL{L+n{zPs1pO74Wh(_*QuR z%DbGqdio~gyK;v|dY*9mg{4TWVE>mNXbai_0wl;;08xu=#C(F`27ArzkjD9!Ner&X zTI^Fi@BE3Ot7T=4tn?M_92f^z%7J7t53QseNmf98LP&W#;fO&U#?gU#)=R{F(>68B z(IZM7w5=|uyNQnS{lFn+9a_Th!U8=npdTe<_T^VD zT&&Qo34L9R=k2j#<`l~fH6Yw8;uHb(JCb&$ZN2h#*VZ2AEYH{849_7pR|9G-Kd+sO zyce}uDRIqic{B5??xPDTYoDFfc>aum<%cYwNTlywC!DAl&f8;UbwU*?1m<9L6T6Q`QnoFb;wa=Dg}$mXk^jj>=saM#8<>?`O8OHZJ0PEO zk|7-^L7?(Oaho5~DNqTs+01ckEHuotTTT3N3vNz1-7yMFO; zRw&6Ir+ALl1ku~MJ@#|IZutuKT8Fyb{9J?%qXZu25ViwK2W4d2`WFDcq(HyGh_j>B zl?VpyH_}@78HftMUZdPTy-~jG3OBj6CoviJn)<%n5w>8tdXTyLVD0Y5nlB-<{G*6O zdt@5*EZDvrf#edZX`DxnYd**@c3d5AxLSC4{(OjT1l$f&ce*=@+riW{K)f7uTA}Sf zdn>t{gQ=jj^*|B!gxf^6zx^|h$aAE)*iAmtLcz8X6>=vnQk_mE#fZhH^cmNAp};jv zZz<1A7aH%P4l+C5-h0<{T^j=`r3&z{CIf?s^MpK_!qQY!9hrYsonP;Q1O7?k+TXcm4`b@q_=WE3J-1U?hyGQL>V+2Z4aV~oZHww7(%3cbCu@5~+ zyA6PFWAM4lv>v8m%o~*{^9kD8+Ik)yZ#ujA^}Kdi=@}VO?d@msa&k)Ui<~%wp+&TW zo=Wo}+a&uhp%ba`KkHaq7CErUVXqDQ;lL)TI2@{{WRyI)WYjzUb$oGB@y!Wk*U!-I zUrohP>z>d+HH8$}ssQU@pR62B1YsGiy_EGSLfT_-pG+=UkAQL{>+Na2+oT*E`96em-ALI7`#re-)>m4WM@*gv~IO4URY4jHY9{s z>WAmnN}R8e$5yCgQ&9y77Y3TYmcl*|i4lT8iSvg}1tL?QNFeVz?dHXAK z3a*c_MmxPZ&N>EK3r=-iZ<2aEZwt=0FkN?%#FH6z@0!L1$b36%Wx_}P>Ejs$848tug;p?D6CZ-TAhhlU)PHO4eell9Bedl1MoC)U>JwJt< zOTjutd6xZJ=)$hkxH$|q>`&6HyEaQDldg};6e@=ap&k_rDzBPRc(+K_!QVr7bZ9O` zI+?ssp9;HEKl6>Ji{~h$A0z~z%D|t&f!fnhDe%nb(&9Q1g`qP7Bbgz-5Ttq+@y~=NBO%)9tEa|!kZ^*B&KaG=oKKjlyp5r!O!`R)vYI`2M!#!03VmU8Q&&969o%L-TO@4NE z&zaFlfvBh`DcwA$itkfa;rVMHTpJ7AddVS&#RqDw5Q)T3*O!g_Y|x#%?>c6-QOe2q z;7{V~9N5B21nSbB40K1i1A3<5j5Ld_uTHEQZwdvOlI9JZJssy=Sy|UmW$AFUyGxcX ze1qB|Z=IM;el$mmKe}-F0R6ML0QXw)Bb?NlFuHrbu13U1HP6k^H&6PuXZr+d;(h9G zIX~h8)$HP8#m2;bD+S~5a$t_q6?Nl={*1G;rMB398&}sZaOISmqRgV2srX~NCO3o?Qi*XQ@hY{9427BLd(mRPHelqOBvR*{9W z78X=gr=FL-9p~Bjy~exnY*~!0!kd-f2>yo?Sx?_^z_+}bC8b@z7D)j-cEe#vH%xqQ zQfGgA)MgzrX76{ilK7(u^*=RR@K4mEHvIkOq9~t}`Z!<>_+~w`3P;FWV;r&!`LJ{%jXKpG z%fs{I`wc1^3*8RJ70NXm+f2yzRnP4FKFTuGDCWWwZT`XYB2>EXc5<5P1v$g>m`dqh zbHAqXc)tH9kIYS$4lZRoTvs$uQw#HZRh*x1JI(ua4VrP<%1^+SxdK(GMG(SQ7&){- z!MJKo*gkrf?eIL5aXRJV>RLUTrLO~lR3iydC{)#WCLa0Rdb~PT?a`Ht3$NofjdWJ< z>|Z}wo|BoJzWDlWJc>J!(Y17=ScsAwg6r{Dsl@#drMaO^N0@QBr+Zd0=TxYc))2Qn z)$pn~j<*FLpe+{gE=Q66_2r7@r8+U1&)Tqn8^nh3Z{OzU=C+Tuc7%NY>UOkY^#V^w zLa_d2Nnx{B?Rq5F(*+6kJMgB9OpK`DnvFqbCULvx?cCT>r6U1KBbn8dHF?l)Uqy9J zeYszmd}o*YhP4wY7x3aVeRAS@4(T-uOGDrJNhsbhv7(y^HIzJ7ZQ%#dS4?%|*Vciu zDjrPioM+9Ynqq2S$AxI->~ajTp|e1&?=qKT-T-l$z5mF4kxkut=1QB@{);CAXSwtY zzqUum$RaV83K0{(&+mJDhzwn_Htq6go)Eq#NtM9`HG6gI!K7{r!AJ$E{dqt4Rj5~; zZ-se_VM@l7s)p6kfDf3*NW;m-P^VjUIu7o*B!je2goaYwr7^#@h`hUwGUPIkDn+Af*1x32os}MDzIywe-eG$e+ zfur{Fp@W|gzB}kctBizu;gZ7k@JJTIOXzT(XR6`o0q~erJo|9#AIp!K=>@6(|yNxw8XAq9)YFMn6 zo2Ud5)i*y4Kw0M6EZT|87HCjvRW;hVYVm7L`)jA@s5NrZs^JE(Ev z%&v^Le=YL++0Q4&7ueMpRqSt#>^Fv1myyizXRF+q_os3HuNZdd&f|<&ay-kpQil8c zE$ixhq#7Z*x;Urliw4FM{>g=nHqbFuZazMxYNFLgp2Jf14#m<9@UiL^`LU;HspM;u zH>?RGm-HU|(d~e>!d!KH=NgxY%z@YG)Ihn4IHV?XZG1A$Z|YhampY^1vD1AdR&SHy zOU0LR%Db5*U%AveCh~{sXjO;OwD_nEvD^?_aB5r+4b$)5%qFkYGCT8miJQ0d?R!%i zN4SS6H2&V)DbG<$`!!enRg;Dedhsu$peOE9XvD*jWlzwS`52vt8xksNi>s-szLD4K z>gYfL=;urXA3_lAEOaG$**AvWr>FJ!O0xgD)U1WVKz;!FWsyz$ZPq!r;x*^EYm@xT z<1REx#Ourxp4jp1iRO>pu1uH9|1H~e?bP*f&n`)%gJ;1F>l+27eAn}^($cFheTIMb zj63+6>@NmSR5Il|+sp8YT#v)$yj@Bf?Rc<^&SuIv(`QlHplsM4z@e#GpOv>#Kks~| z$D-1FZR>pkLvk!jv*cXMF}P^s2-iaB;H|X7QR(R)!kperpKsq-U*BnPTg)we_wHf5 z6Acp(`<}+bLfGoU+PxbTTV&m$cgCQz4Ne4@WRY=HKU#b-Of|Ve&OD+Cxh8e!)*3&Z zOr=kyH=tHNVvmIgJ@_s*R%|e>@e}K}KwsXR)?;)Fw4AOI4)y#1Yb-GF6cjTp;FE0Z znM$Wj=Tfr;$H_&DnL#gP!OxEGJHnzOxKG}LW~A+0N$aWPu`|cl;@O@zmXks_#y)wPE;V#y0>8{>FcfyE z;P0|$>ekMYwt$X{6wDQOHyUiQ9WnIbKcip&uF4Jkx~*3VDlM&JcvvjIq-3qqK3J!(-w?|>J zwvcf7CW|c;TA+4|s%}(g7pX;%#+TJM6E?iSzclJdx%7H3p3Gs;fPBaM>vq9)qLIJa znSXymOkKE_D+AuW9$LGZ%c&w$txSx zY0J9}!|~tNFO6s$!uA&iJj68~pknSmG&lqh|KC61>4b_Gt;kBTy-f*}B0FCFB|X>)^k1 z4jA9xky7cY`kyk5{ye)@w%3s9wf@*3udZgspR(+`xy4ESDDD6tqNmW@(mQ)v3UZWZ{6$$;Ol38nry|D~zvit+Q0UTQc9_`X+ZDQIN*VqlDZ ztk7iScO@F|GE+3Cd{VS&4ECtMt@n=@k904M7L^LY*ZI2)i^kZ+Bll+?fbL88xCn&m zL7f<%9%o)++~Q&>sQd)NTZ4|4xV_KK&lfS{zgjIUERuFUV~rfn|EMzdakp*|SyEmUbfN#l^%5P^eB~#pJ@4^Fu#J zDwYA(Oz!ONmaZ67VD^E5$}~%LGY(4g&3Sdq`HogrgHpdXo&C}TURU%TJQYH-d}KN@ zD`bzZ`JFSiJxzLzW1-s~T1xqrc~IPIeS}gi2^@u~G&K(};OabohCiK~+tO&o@GzaY zi&Xb^ zHX*@fM9&4+zS@C#2jW!b!(yq7HkWA;`9U#(@g`i_{HMO#=>%ZrO@^Eqe5TRnlFO(H7gQ{a= zQ8cWsKPHWg{pMD4DYgB8aGsw49mN!pRA>PT8pOuLEKV>pGrJLdm8}Rl%gxE@+L5^< zrh|L1moDaoQryYJXCt*mM~0cIwu^q|Je~i~M;`A1nkY|*_!wbHJ=@p@XVB#z5909y zLO0x-@Et{2R}ES1fis#Y7apouXDVY(60?liB5rewzJ8AIRQeNtx>&$T)Zffzff*P) zm-HRAxp>jM{;{xx#Mf~z)0mhT*R@$$><<0~<*TKk(hiaL+uJ0&-$F%2hw5c@^z=+B z?8iz$BWY}-azzwJ*wzLr5ooArK1)s#zCqXjjGJ3goM@Pss>oTi+WsVB1m6$21G#w0 z=SRxN4R38Z`;8?}V-laG-ZVa7_ZtMG)EuG1^*%LwzE<^gH5_V8@Q{J?J}yH!rK`m` z91$9143eI$8`sb9ynOj`)R$swya+bPB7lIt*DJ2R6f0Xvy2TA^ET1k$!5x&2jzW1=h2SE z(W-uA#MTSW*<1w%^fyg%WNhqnDGo#VXPyhMhF?%C*zW(T^R4&X&5jG9bAXjGNmO=| zK1R#<>{gCwa!uy#I)keCYvvkheD{ddH8t%lyOY+)q4=qsTrM}9y(2Ey*}QJiYh!9^ zY7@`1!5m}UH2Cba?)|sUF%r7>Di5vJRdn%*qHFCGKMgN8?S=)E(l(cDb#`{#n z#;COT)yE8XF%^9JK0XC|YgvBNNky;M;1Tx2bI-n6Ci{HB-6w`#Eb`tMQ|4oV%;PYL z;(0zSIwE4IP=zoianxPxE#dAxmk2cxRC)OhQ~AX1_q)cHew$xfkV`o~mUFtqe0PI< zetv{AGicYj?ruIbPTt$bCT~~Y7m?L{%Ot&Yb4&)^&&!2L(fDD z$ePD~tZ49Q+i=|Lm);}uV}c?k;m)&RfhO$TmVMJqJrV7mzirhP}8Q_r$GPeWJ&`B3u`(w1=+B ze!0qQrTs(4`b@OEOqeASqkMhb_01vD4Sz*wtz6%azg|1fH}(jSi>qq7*Wv#2w0N$N zq^s7pD?T8I)Xuh7hi1jQD3PaZU)7l4+b%nqyCcZCyjA6#<*laP*-zfs+xvjI2<;lE z*rbmy(H={SkIr3nY1WJ)%-hbNR~&O6=zK6OsykMI&pF!*cPr|eO{U>X_z8cvl7&+w zUo=*4u$C?xj~7|qjt>WD*Vj+W5?0exf3nUY4coDN>`LhfyOClVX|?e>?bdF+X)-p* znrzVr+GcZVL~QZpFQWAxIDXmSk%>P4aeU>!cEu{b=?i_D=AGXooVgNIVQidX@hk5? zQrt(6?)H$6@9DSEx@y0&-kR%z?%ZZb*D>!*(^1DZBx&PjT}4;j0zr9@YkgnF(AY)rBH=QOUkJJGB`H2*uQ%n?{BZfH62o(;VIQxg=BH8RoqGC(2wp?n4@+v z6Dm@#Ljrr#P1)1k%a#>{cc$$1;t}o0EZw8}O)@M(#QzxtSDYn>^s6^8@)VXeU)0X= zSQCCX&}3CKuaZTknYl@LMvK!boh6!Y;7u$|kBmw?NA9P4gV%`V&?juHt(_%hI=to7 z5lSlaL&Lp4TS*1cn>y-R2z8-L<{8ONmVpx>uDzAPTIQ>W8Ubn8k_%^ zmQ)C1gUL`P?r(SX2&Z!F#jB@ji$aA`6nuOVDf?hGCF;ve8Pry`w)ikxSjr827rOjn zw6>aY%w7I#C(AWi1X#ub_TBCl!M9l7EuZn3nf2m1&tmYa^r5f9MBJIFr@jBLy(^E0 za((+nR3ti-kR?t;n2v3-4AZ8NeXQA9WS@~Wld+ag;z+hIDBIYV3I}5+We+o>WJb1O z&=?G3|2^V3=ltG2pZCxApLhP6nP;B+nde@f`?|j0>-t_=`WGj^XffZGEXh$ZBqSwy z$(H~d%t7wBqtFIJx^!n3zCLP7M_oV~lbR}f)|9YjHQ-}bwP_&KVFLs6jMmOWVQwi3HcQU$1h5B94-4!Z8@@47q}zu}Q8zT1EOnTB zFD5ehY@r;BK7>(iCt*PH%AZ=|ILOfXZ5!Mlu-uR82ANR{msNHAYDRS_ig`%=21Ar) zur7x})~X?RF=RP?x^EoV1P^{P#-a>T@>dP$FPT~zGV6=J>uMsdQClppATFOk%3W=L zKWJJ4-{T0K52?9GVl@50S+NxxBIZroq#gJW{tsOXiF4bz2j!N~sKtXZyCbn{G)f#1%^z8b!G=gu>YqY0ouCn|O!o5~W$NHX6}zI~v4 zVCnnNhSxRN7HYYgwB+~k_*_4U1{YGdwo9qYAr4h~j_Ol$4$cfB0!N)Vzj&W`(z5G% zi)bCVbj&g3zHeEp_57N}vT^0HVjqBJpN{lS&r^99YIo-2%C+Bsv;uCH{^zjbm$1Z; zRypG=>_;QL$h(F9HDzVEfPL~1w)`rYWBT+ZHdcyO{DyXz#}y%}$tq`}EJ{I{uPs5^ z-hfj*i}jO!SH^f$c$Wk^e)%x*DO&&8CMo4}m|PM|{-Q>x8b*Z`S#u-?i4~u>-jbV< zU)uo{pm~QMiT0>mP`*`R1;TGWy`OwP5>xa^!pFyN0|@l;m@0a;JD%IEJ=RHoB0_b{<<1_g*{bg%)tH6*|7+ zX~4BDK>3K*zY7-f`zN{Xt)k$TUsj3&12{T%(?f!JWw^qjE&z}?wM(S}%a0$m zc&$vL8kN%spiiy0ap$y^y{97oTy!-zH(!L1L~Gv1*S6YrA8<$BEA2(BE@uT|+PJm% z-S?AZv9u=JqT?mPkxx{ih9}o|;zXcEY{2a%#m@7v?LZvfhi1N*OdGK-uI#r}m@i|! zqNC7bY5tO0UchVoE=cotvpE&H>jJsV>2B$P1jkupV|m$Sd!f+fD^1PoMZwcvdf#}M zxfd`-2LTB(RZIw()g)r&Y~UW%qy>VW6P@Bb#>d~i}yZ8Bgm z`E&C6-|;6uMy14!Y0+^=38}P`Dh3^4dil^dI{Y_48dQ(#>m=CARlTK*nAEs?;GpPhBh@9wq#`Vg^>Y4MHY6Nu^ zSCDhmj5{2g7N4zF?y@GGwiikA!Xn7jQcgHU=Il)$pQRJ7XXxP9tXZtr47sA6?1Rzh zQ)pB1d8!oVp5}W|j&?n(&?}qjSTexITH%CTXYzN^nzz26dygl}ixaQ+i#+k?$SN(#4G*0{9DT}7(h z;H$rW44e-ryxLy~pJDzdX!W@2&{&mK`8<>*=lt58@e~9ibs(&8^{>Gpb z5Oa2Or2;FKGYsA#{+cLUIsgA?C%c61DkjHIUtB5dYX&TquV*aWxF%oHpGa7kbr z7lPOazC8EL)$98WJw1!zU#RO(!Jq?bIzIRB-NTEEizlqFJ@egcPOi9nbsdN(OBUr? zu}ShE-FmBEM=l!yz@IJax)QEj?u6tD$;a1U$4ypCGE&V-9URDCeYeK)7K|Ku!B(&o zW4Wib*^N_vcn1A6xo^Q}Z&kvMe_+r6rG_*mBL;dd6 zRTK;+fHQBC?e9LCu5F2t$ROroFx_w7ytyH}v4J)&-LiS#Dp3FkQ>SP|p$tSppaf5; z6Ovk+LO=*gM8uIm*<#FfF)=YyE326;NP&Z2{AZQm+S-8S4cru^&d_YxekEbEu)Mtd zCQuS{>!!Co)(@y$e$%Yy*MS#s6=EG}hPk&2pPc-xKib+lN_W2#E`~Odg?WO}>-TON zZG~*2`2_?j($g;?GF%R$Q&XC&qG@xts_RmdQd0pW-ro!)%h*1ke5MHz8X9V9U@$z$ z{?S0FZTF!n1wg~PSs7(rZve@OHP#^H&9Bu2N9XmNW9}QP9K3UhbgB~$SkrJ~=a~iI zF|bZ1R8GZO6;xNZ25d}sySln|I6LR1DPeKXUt}?bM+teaynnEW9T{ni{Ys+qSVD{d zDB&Jk`NY`WwP6?%nKXp;d-!psN!i{9pxgtAlVLCbSz>RrUlR~!o4AsuaZ3Y(_Y{IJ z$sMo+-&9a0V2BJt(U5yX|Ec8-;shek#58`;%y!5fV(GB10~S9AY~Lv-RrN`Jw~~1L zH-|$xQA1{c=Sit+#SaXOFcqx$tSvzX0V#39lhOsAAddD-m(XsFtwuoL{A*d$pK%G) zIdH`t-NhPVRnQGRUEMCuoR-{ug|PhlBQz8&D#L`2?% zU9XZLTjJ?Z=5oknjd+&r*N*P)6-`4et*3P#1o`=wqO{A)ye((c=o6B%vX*P_jn-qf zQOjdJ6#iJ{ZE%ws*}>xu0T9rQ39y;}J|!O_gJB*{^}F3dotmPv17!!yy}je8pE8L( zrDRSrthwx!2+7&S#Xrh}#qV~vFbn+-`SC1Td*;Yk-=P9qD8z__oaU$t>FvF}4NB^t zK7ZkYD=(;aBJ^$@P}B%jujrGS%5}IeGueqP8;_gyA}v$~5O89EsC>qE)xs|%1*0f4 zmV!IvscKp2nn1)*l?w+)HQG2buu%uh=PfUX|8+*M>Ra*j0hV;{CR?zha$4hJWMl2l z9>R4rcutLQ{EyMf5u2l&8nj3)$STG^x@x$yt!?q*)$z};IccBNi6{0()J6bhUSkiW zkC}`buHgEUwd(R4a#GP0mi2Y4LnWeV7$}(1z`iif`R$7jl;bo6UvuYL zE{;I;g2;NH<_u~)`v+imv>;}+ZLz4IlZ+DX@qhfN3;rQ)}IjYL*E z5@VLy&XN7kTSBtE7lbh-{`n=lf{TwX2lekieW8mIhXe%YHq%WZz6x_tEv-^v^fhKa zz`eBbF)@jgeKO5VcuW7R2H89juQkO4#F33ZRLtEk3s#Kfk?yDRcU zq|x54V+|+41%!l_YU&h3wC(M4sg-=F%|{#P7dl)9xX~9BT9!>Ott;)pLZg>nY4IV~ zIH)`Gm0UX;6dk<<&aDSr_T1`^N@H+--Rk;Vce!3ZSSZg!57wlFgs44Sn?*3}@|b$_ z#Eu7<+H`)0bDWHyorOs#DfDZAK{tZ&tE-m6k4&MK;4Uw9$sr~^o$lK#v*Q^-Z;LKt zva>hUSy;$@Mv{9X>#!4t^a&9#b!`FZYRyKIIUUHs*lRad@QaR_tlqu*4p3Thqp!xv zpYuu8jNxq0)XZQC1<1{{1pYsq56ui#v}$}=qP3>2gxv#BnAxGhxvxJu!TuHTQ(K|W zRzS>F&Ffpso@D)j?%aq7uQU~C)EA1O*2Y?1UU=YkpRo1S2`_m+WZ^l`S8v9~_*ygQ znC3rUDO^9<{8UUG)gh5)MQgai!av$MNbE%n;pwQXuf2KPCc=~~;*&h?xD%RDAM#}l z_L-7wOBc$EG6(CmBm$vpVM+1w}jK>skTM{)JjGB60a547G|WN{dP z&eD6L6S5yHyvVM<348v4+-6C3Gdvb^TV7k^HL;Kl%UXjSNztH9oM}1wD3dECC8daP zKc4#e&s1x;i;HKjg=j+#UAkLt5X3chJhF%zxVOz0aKvGla93w%wVn6-K9%2y*jbrz zJiRMIJ@G77q>J92rj-#7( zcf)sVoYR10Lolxtm!KVo5)WW6*_a1PJN6p6X1;3&sq4s*zJds7xlnCB_k`F zM0rOC9G5A8Lnag661@Rs#vgY&DESI`!~2^irD;}LV{msaRkO%}ErYOFwdB-PAfnq4 zuuzGUlpW#deZexkFVScP7HzOm)Nvl30YKOf^zhz9SUy1pY!~{Y(QhYuI9j(Y+s;nF zu8eTMQ?k6%tp&y+0DH?zs4TF(aeThH`B8B!PVDy`emxk*{h_VxO=IJw_KzPed_h8x zq1Vv-kn+`(z2iWuBO|wsYktI)Z~GBX*T`+%3$RlO@km+V6R1OE?ij{pDw diff --git a/docs/source/nlp/nemo_megatron/retro/retro_model.rst b/docs/source/nlp/nemo_megatron/retro/retro_model.rst deleted file mode 100644 index 5bd7f03f77ac..000000000000 --- a/docs/source/nlp/nemo_megatron/retro/retro_model.rst +++ /dev/null @@ -1,167 +0,0 @@ -RETRO Model -================ - -The Retrieval-Enhanced Transformer (RETRO) `(Borgeaud et al., 2022) `_ is an autoregressive decoder-only language model (LM) -pretrained with retrieval-augmentation. -RETRO features practical scalability to support large-scale pretraining from scratch by retrieving from trillions of -tokens. -Pretraining with retrieval provides a more efficient storage mechanism of factual knowledge, when compared to storing factual knowledge implicitly within the network's parameters. This approach significantly reduces the model's parameter count while achieving lower perplexity than the standard GPT model. -RETRO also provides the flexibility to update the -knowledge stored in LMs `(Wang et al., 2023a) `_ -by updating the retrieval database without training LMs again. - -For the legacy native NeMo RETRO model documentation, please see `NeMo RETRO Model (Legacy) `_. - -Quick Start -************ -The following instructions demonstrate how to preprocess the data as well as train and evaluate a RETRO model. - -Data Preprocessing -------------------- - -For detailed information on data preprocessing, refer to the `Megatron-LM Github `_ repository. This repository contains scripts and comprehensive instructions for the entire preprocessing procedure, specifically focusing on `RETRO Data Preparation `_. The main stages of the process are summarized below. - -The outcome of the preparation step yields a processed RETRO data directory, fully primed for pre-training. Specifically, this directory encompasses the following key files and subdirectories: - -* ``config.json``: contains the hyperparameters used in the data preparation step, which will then be retrieved to use in the pre-training step for consistency. For example: sample length, chunk length, data splits, tokenizer files, etc. -* ``data``: contains the original data before any preprocessing. -* ``tokenizer``: contains tokenizer files used in the preparation step. -* ``db``: contains the chunk database of processed and chunked text used for retrieving neighbors. -* ``index``: contains the Faiss index of the chunk database for retrieval. -* ``query``: contains the queried neighboring chunks for all training samples. - - -The data preparation process contains the following main stages: - -Build Retrieval Chunk Database -############################## - -This stage involves creating a database of text chunks from a corpus such as Wikipedia to be used for retrievals. The chunks are non-overlapping and extracted from the original GPT token dataset, with each chunk traditionally being 64 tokens in length. The database is stored as a 2-D array and is not a relational database. - -The main output of this stage is: - -* ``/db/merged/train.hdf5``: the database containing all processed and chunked text. -* ``/db/merged/sampled.hdf5``: the database containing a small portion of all chunks, only used for training the index in the next stage. - -Build Index for Similarity Search -################################# - -The second stage is to build a search index using Faiss, a library for efficient similarity search. The index is trained on a subset of the chunks ``sampled.hdf5`` from the database. After training, all chunks are added to the index to enable querying. The index accepts 1-D floating point vectors, so chunks must be embedded using Bert embeddings before they can be added to the index. Particularly, the stage is comprised of two sub-stages: - - \- Extract BERT embeddings from the sampled chunk database (``sampled.hdf5``) and use them to train a Faiss index. - - \- Extract BERT embeddings for each chunk in the all chunks database (``train.hdf5``) and add them to the trained Faiss index. - -The main output of this stage is: - -* ``/index///added.faissindex``: the trained index, with all chunks in the database added to it - -Query Pretraining Neighbors -########################### - -To speed up the RETRO pretraining process, you pre-retrieve neighbors for all training samples instead of retrieving them on-the-fly. In this stage, the pretraining datasets are processed to find and save k-nearest neighbors for each chunk in each sample. The neighbors are saved to disk and labeled with unique properties to ensure they match the pretraining configuration. Query-time hyperparameters can be tuned to improve the quality of the neighbors. - -The main output of this stage is: - -* ``train_``: directory containing retrieved neighbors for all training samples. -* ``valid_``: directory containing retrieved neighbors for all validating samples. - - - -Train RETRO Model ------------------------ - -Once the training samples, pre-retrieved neighbors, and other data are prepared, you are ready to train the RETRO model. The training process will use the output directory from the data preparation step. We set the path to this directory at the ``retro.retro_project_dir`` argument. Many of the data hyperparameters will be retrieved from the ``config.json`` file in this directory, including data splits, sequence length, chunk length, number of training and validating samples, tokenizer, etc. - -The table below lists some of the common architecture and optimizer parameters that can be configured for model pre-training. Many of these values are set in ``examples/nlp/language_modeling/conf/megatron_retro_config.yaml``, which is used when training unless being overriden by the running command. Notice unlike other NeMo models, the `model.data.data_prefix` value is set to None, because all data information will be retrieved from `model.retro.retro_project_dir`. - -+----------------------------------+-------------+----------------------------------------------------------------------------------------+ -| **Parameter** | **Default** | **Description** | -+==================================+=============+========================================================================================+ -| retro_data.retro_chunk_length | 64 | the chunk size used to retrieve | -+----------------------------------+-------------+----------------------------------------------------------------------------------------+ -| retro.retro_num_neighbors | 2 | token sequence length | -+----------------------------------+-------------+----------------------------------------------------------------------------------------+ -| retro_encoder_num_layers | 2 | total number of encoder layers | -+----------------------------------+-------------+----------------------------------------------------------------------------------------+ -| model.num_layers | 12 | total number of decoder layers | -+----------------------------------+-------------+----------------------------------------------------------------------------------------+ -| model.encoder_seq_length | 2048 | token sequence length | -+----------------------------------+-------------+----------------------------------------------------------------------------------------+ -| model.hidden_size | 768 | model hidden size | -+----------------------------------+-------------+----------------------------------------------------------------------------------------+ -| model.ffn_hidden_size | 3072 | model FFN hidden size. Usually 4 * hidden_size | -+----------------------------------+-------------+----------------------------------------------------------------------------------------+ -| model.num_attention_heads | 12 | number of attention heads | -+----------------------------------+-------------+----------------------------------------------------------------------------------------+ -| model.init_method_std | 0.023 | standard deviation of the zero mean normal distribution used for weight initialization | -+----------------------------------+-------------+----------------------------------------------------------------------------------------+ -| model.hidden_dropout | 0.1 | dropout probability for hidden state transformer | -+----------------------------------+-------------+----------------------------------------------------------------------------------------+ -| model.attention_dropout | 0.1 | dropout probability in the attention layer | -+----------------------------------+-------------+----------------------------------------------------------------------------------------+ -| model.ffn_dropout | 0.1 | dropout probability in the feed-forward layer | -+----------------------------------+-------------+----------------------------------------------------------------------------------------+ - -The following example shows a RETRO pre-training script. The rest of the argument values are retrieved from ``examples/nlp/language_modeling/conf/megatron_retro_config.yaml``. - -.. code-block:: bash - - python /examples/nlp/language_modeling/megatron_retro_pretraining.py \ - trainer.num_nodes=1 \ - trainer.devices=8 \ - trainer.precision=bf16 \ - trainer.accelerator=gpu \ - trainer.max_steps=750000 - trainer.val_check_interval=10 \ - trainer.precision=16 \ - exp_manager.exp_dir=/path/to/exp_dir \ - model.mcore_gpt=True \ - model.tensor_model_parallel_size=1 \ - model.pipeline_model_parallel_size=1 \ - model.megatron_amp_O2=True \ - model.retro.num_layers=12 \ - model.retro.retro_encoder_num_layers=2 \ - model.retro.retro_num_retrieved_chunks=2 \ - model.retro.retro_project_dir=/path/to/retro_workdir \ - model.micro_batch_size=4 \ - model.data.num_workers=4 \ - model.data.data_prefix=["none"] \ - model.data.shuffle_documents=False \ - model.data.dataloader_type=single \ - model.data.splits_string=\'98,2,0\' \ - model.optim.lr=6.0e-4 \ - model.optim.weight_decay=0.1 \ - model.optim.sched.name=CosineAnnealing \ - model.optim.sched.min_lr=6.0e-5 \ - model.optim.sched.max_steps=650000 \ - model.optim.name=distributed_fused_adam - -During the training, we can monitor the process with Weights and Biases (WandB) by setting ``exp_manager.create_wandb_logger=True`` and set relevant wandb arguments. -After training, the model distributed checkpoint directory can be found at the result checkpoint directory. - -Run RETRO Model Inference -------------------------------- - -Once the RETRO model has been trained, you can put it into inference mode and experiment with it. -During inference, you are not limited to the indexed corpus to retrieve relevant chunks, but can directly provide any relevant contexts to the prompt through the argument ``neighbors``. -When performing inference, the input for RETRO differs from that used during training structurally. Specifically, the model’s input consists of only two chunks: one for the prompt and another for the answer to be generated. Unlike during training, these chunks do not necessarily have a fixed length of 64 tokens; instead, they match the length of the tokenized prompt. When context neighbors are supplied for a prompt, these neighbors correspond to the first chunk and are processed through the RETRO encoder to generate text for the second chunk. -The following example shows a RETRO inferencing script. The rest of the argument values are retrieved from ``examples/nlp/language_modeling/conf/megatron_retro_inference.yaml``. - -.. code-block:: bash - - python /examples/nlp/language_modeling/megatron_retro_eval.py \ - checkpoint_dir=/path/to/checkpoints \ - checkpoint_name=/checkpoint_name \ - trainer.devices=1 \ - trainer.num_nodes=1 \ - trainer.accelerator=gpu \ - trainer.precision=32 \ - megatron_amp_O2=False \ - inference.tokens_to_generate=10 \ - inference.greedy=False \ - inference.add_BOS=False \ - inference.temperature=1.0 \ - inference.retro_inference.retro_num_neighbors=2 \ - prompt="sample prompt" \ - neighbors=["sample neighbor 1","sample neighbor 2"] From a3c377c3c2418b5848ee366e23ba0a4546f1ee11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20=C5=BBelasko?= Date: Fri, 13 Dec 2024 16:31:50 -0500 Subject: [PATCH 038/128] Remove auto-import of lhotse when importing nemo.collections.common.data (#11578) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Remove auto-import of lhotse when importing nemo.collections.common.data Signed-off-by: Piotr Żelasko * Fix test import Signed-off-by: Piotr Żelasko --------- Signed-off-by: Piotr Żelasko --- nemo/collections/common/data/__init__.py | 1 - .../collections/common/test_lhotse_prompt_format_data_types.py | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/nemo/collections/common/data/__init__.py b/nemo/collections/common/data/__init__.py index d4b43d2b4edc..7a103094f3cc 100644 --- a/nemo/collections/common/data/__init__.py +++ b/nemo/collections/common/data/__init__.py @@ -13,5 +13,4 @@ # limitations under the License. from nemo.collections.common.data.dataset import CodeSwitchedDataset, ConcatDataset, ConcatMapDataset -from nemo.collections.common.data.lhotse import * from nemo.collections.common.data.prompt_fn import apply_prompt_format_fn, get_prompt_format_fn diff --git a/tests/collections/common/test_lhotse_prompt_format_data_types.py b/tests/collections/common/test_lhotse_prompt_format_data_types.py index 5d1bdc600aeb..ce2026829bfd 100644 --- a/tests/collections/common/test_lhotse_prompt_format_data_types.py +++ b/tests/collections/common/test_lhotse_prompt_format_data_types.py @@ -17,10 +17,9 @@ from lhotse.cut import Cut from lhotse.testing.dummies import dummy_cut -from nemo.collections.common.data import ( +from nemo.collections.common.data.lhotse import ( NeMoSFTExample, SourceTargetTextExample, - TextExample, get_lhotse_dataloader_from_config, ) from nemo.collections.common.tokenizers import SentencePieceTokenizer From 9d3d36e6a399db29ecd76830f68711bfec2d5339 Mon Sep 17 00:00:00 2001 From: BoxiangW <45734921+BoxiangW@users.noreply.github.com> Date: Fri, 13 Dec 2024 14:39:30 -0800 Subject: [PATCH 039/128] Fix example configs (#11571) * Fix example configs Signed-off-by: Boxiang Wang * Fix line length Signed-off-by: Boxiang Wang --------- Signed-off-by: Boxiang Wang --- scripts/llm/pretraining.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/scripts/llm/pretraining.py b/scripts/llm/pretraining.py index 1b4a33832a56..c08ab353bc3e 100644 --- a/scripts/llm/pretraining.py +++ b/scripts/llm/pretraining.py @@ -14,7 +14,8 @@ # NOTE: This script is only an example of using NeMo with NeMo-Run's APIs and is subject to change without notice. # This script is used for pretraining on local and slurm executors. -# It uses NeMo 2.0 recipes (https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/llm/recipes/) and NeMo-Run (https://github.com/NVIDIA/NeMo-Run) to configure and execute the runs. +# It uses NeMo 2.0 recipes (https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/llm/recipes/) and +# NeMo-Run (https://github.com/NVIDIA/NeMo-Run) to configure and execute the runs. import argparse from functools import partial @@ -66,12 +67,13 @@ def slurm_executor( time: str = "01:00:00", custom_mounts: Optional[list[str]] = None, custom_env_vars: Optional[dict[str, str]] = None, - container_image: str = "nvcr.io/nvidia/nemo:24.09", + container_image: str = "nvcr.io/nvidia/nemo:dev", retries: int = 0, ) -> run.SlurmExecutor: if not (user and host and remote_job_dir and account and partition and nodes and devices): raise RuntimeError( - "Please set user, host, remote_job_dir, account, partition, nodes and devices args for using this function." + "Please set user, host, remote_job_dir, account, partition, nodes and devices args for using this ", + "function.", ) mounts = [] @@ -149,7 +151,6 @@ def main(): pretrain.trainer.val_check_interval = 400 pretrain.log.ckpt.save_top_k = -1 - pretrain.log.ckpt.every_n_train_steps = 400 pretrain.trainer.max_steps = 1000 From b8f9b0b84e40b5a9617d0a746fc438719e4dfe90 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?oliver=20k=C3=B6nig?= Date: Fri, 13 Dec 2024 23:55:39 +0100 Subject: [PATCH 040/128] fix (#11575) Signed-off-by: Oliver Koenig --- .github/workflows/cicd-main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/cicd-main.yml b/.github/workflows/cicd-main.yml index 74f20ed52392..3008ca6d2435 100644 --- a/.github/workflows/cicd-main.yml +++ b/.github/workflows/cicd-main.yml @@ -4027,7 +4027,7 @@ jobs: uses: ./.github/workflows/_test_template.yml if: contains(fromJSON(needs.cicd-test-container-setup.outputs.test_to_run), 'L2_NeMo_2_SSM_Pretraining') || needs.cicd-test-container-setup.outputs.all == 'true' with: - RUNNER: self-hosted-azure + RUNNER: self-hosted-azure-gpus-1 SCRIPT: | python tests/collections/llm/gpt/model/megatron_ssm_pretraining.py \ @@ -4041,7 +4041,7 @@ jobs: uses: ./.github/workflows/_test_template.yml if: contains(fromJSON(needs.cicd-test-container-setup.outputs.test_to_run), 'L2_NeMo_2_SSM_Finetuning') || needs.cicd-test-container-setup.outputs.all == 'true' with: - RUNNER: self-hosted-azure + RUNNER: self-hosted-azure-gpus-1 SCRIPT: | python tests/collections/llm/gpt/model/megatron_ssm_finetuning.py \ From 7bc32cd5a7761e07767c49aea1b92489a18eec76 Mon Sep 17 00:00:00 2001 From: Jan Lasek Date: Sat, 14 Dec 2024 03:27:03 +0100 Subject: [PATCH 041/128] NIM supporting changes for nemo.export for NeMo 2.0 (#11488) * Move torch_dtype_from_precision for independent export module Signed-off-by: Jan Lasek * Apply isort and black reformatting Signed-off-by: janekl Signed-off-by: Jan Lasek * Remove unused imports Signed-off-by: Jan Lasek * Fix too long lines Signed-off-by: Jan Lasek * Apply isort and black reformatting Signed-off-by: janekl Signed-off-by: Jan Lasek * Fix signature and default for megatron_amp_O2 Signed-off-by: Jan Lasek --------- Signed-off-by: Jan Lasek Signed-off-by: janekl Co-authored-by: Bobby Chen Co-authored-by: janekl --- nemo/deploy/deploy_pytriton.py | 2 +- nemo/export/tensorrt_llm.py | 66 +++++------------ .../converter/model_to_trt_llm_ckpt.py | 31 ++++++-- .../trt_llm/nemo_ckpt_loader/nemo_file.py | 74 ++++++++++++++++++- nemo/export/trt_llm/utils.py | 35 +++++++++ scripts/deploy/nlp/deploy_triton.py | 1 - scripts/export/export_to_trt_llm.py | 2 - tests/deploy/nemo_deploy.py | 6 +- tests/export/nemo_export.py | 1 + 9 files changed, 156 insertions(+), 62 deletions(-) create mode 100644 nemo/export/trt_llm/utils.py diff --git a/nemo/deploy/deploy_pytriton.py b/nemo/deploy/deploy_pytriton.py index 1e1333f03b55..797f805f99b9 100644 --- a/nemo/deploy/deploy_pytriton.py +++ b/nemo/deploy/deploy_pytriton.py @@ -35,7 +35,7 @@ class DeployPyTriton(DeployBase): trt_llm_exporter.export( nemo_checkpoint_path="/path/for/nemo/checkpoint", model_type="llama", - n_gpus=1, + tensor_parallelism_size=1, ) nm = DeployPyTriton(model=trt_llm_exporter, triton_model_name="model_name", port=8000) diff --git a/nemo/export/tensorrt_llm.py b/nemo/export/tensorrt_llm.py index 864876899398..d1b5aa0e76d6 100644 --- a/nemo/export/tensorrt_llm.py +++ b/nemo/export/tensorrt_llm.py @@ -30,14 +30,18 @@ import wrapt from tensorrt_llm._utils import numpy_to_torch -from nemo.collections.nlp.parts.utils_funcs import torch_dtype_from_precision from nemo.deploy import ITritonDeployable from nemo.export.tarutils import TarPath, unpack_tarball from nemo.export.trt_llm.converter.model_converter import model_to_trtllm_ckpt -from nemo.export.trt_llm.converter.model_to_trt_llm_ckpt import dist_model_to_trt_llm_ckpt, get_layer_prefix +from nemo.export.trt_llm.converter.model_to_trt_llm_ckpt import ( + dist_model_to_trt_llm_ckpt, + get_layer_prefix, + torch_dtype_from_precision, +) from nemo.export.trt_llm.converter.utils import init_model_parallel_from_nemo from nemo.export.trt_llm.nemo_ckpt_loader.nemo_file import ( build_tokenizer, + get_model_type, get_tokenizer, is_nemo_file, load_nemo_model, @@ -54,6 +58,7 @@ refit, unload_engine, ) +from nemo.export.trt_llm.utils import is_rank use_deploy = True try: @@ -95,7 +100,7 @@ class TensorRTLLM(ITritonDeployable): trt_llm_exporter.export( nemo_checkpoint_path="/path/for/nemo/checkpoint", model_type="llama", - n_gpus=1, + tensor_parallelism_size=1, ) output = trt_llm_exporter.forward(["Hi, how are you?", "I am good, thanks, how about you?"]) @@ -138,7 +143,6 @@ def __init__( self.multi_block_mode = multi_block_mode self.model = None self.tokenizer = None - self.n_gpus = None self.config = None self.ptuning_tables = [] self.p_table = None @@ -154,14 +158,11 @@ def export( nemo_checkpoint_path: str, model_type: Optional[str] = None, delete_existing_files: bool = True, - n_gpus: Optional[int] = None, tensor_parallelism_size: int = 1, pipeline_parallelism_size: int = 1, gpus_per_node: Optional[int] = None, max_input_len: int = 256, max_output_len: Optional[int] = 256, - max_input_token: Optional[int] = None, - max_output_token: Optional[int] = None, max_batch_size: int = 8, max_prompt_embedding_table_size: Optional[int] = None, use_parallel_embedding: bool = False, @@ -186,22 +187,20 @@ def export( fp8_kvcache: Optional[bool] = None, gather_context_logits: Optional[bool] = False, gather_generation_logits: Optional[bool] = False, + build_rank: Optional[int] = 0, ): """ Exports nemo checkpoints to TensorRT-LLM. Args: nemo_checkpoint_path (str): path for the nemo checkpoint. - model_type (str): type of the model (optional for quantized checkpoints). + model_type (Optional[str]): type of the model (optional for NeMo 2.0 and quantized checkpoints). delete_existing_files (bool): if True, deletes all the files in model_dir. - n_gpus (int): number of GPUs to use for inference. tensor_parallelism_size (int): tensor parallelism. pipeline_parallelism_size (int): pipeline parallelism. gpus_per_node (int): number of gpus per node. max_input_len (int): max input length. max_output_len (int): max output length. - max_input_token (int): max input length. Deprecated, use max_input_len instead. - max_output_token (int): max output length. Deprecated, use max_output_len instead. max_batch_size (int): max batch size. max_prompt_embedding_table_size (int): max prompt embedding size. use_parallel_embedding (bool): whether to use parallel embedding feature of TRT-LLM or not @@ -226,15 +225,8 @@ def export( fp8_kvcache (Optional[bool]): enables FP8 KV-cache quantization. If not set, autodetects the type. gather_context_logits (Optional[bool]): if True, enables gather_context_logits while building trtllm engine. Default: False gather_generation_logits (Optional[bool]): if True, enables gather_generation_logits while building trtllm engine. Default: False + build_rank (Optional[int]): rank to export the model on. If None, builds on all ranks. """ - if n_gpus is not None: - warnings.warn( - "Parameter n_gpus is deprecated and will be removed in the next release. " - "Please use tensor_parallelism_size and pipeline_parallelism_size parameters instead.", - DeprecationWarning, - stacklevel=2, - ) - tensor_parallelism_size = n_gpus gpus_per_node = tensor_parallelism_size if gpus_per_node is None else gpus_per_node @@ -259,22 +251,6 @@ def export( self.model = None - if max_input_token is not None: - warnings.warn( - "Parameter max_input_token is deprecated and will be removed. Please use max_input_len instead.", - DeprecationWarning, - stacklevel=2, - ) - max_input_len = max_input_token - - if max_output_token is not None: - warnings.warn( - "Parameter max_output_token is deprecated and will be removed. Please use max_output_len instead.", - DeprecationWarning, - stacklevel=2, - ) - max_output_len = max_output_token - if max_output_len is not None: warnings.warn( "Parameter max_output_len is deprecated and will be removed. Please use max_seq_len instead.", @@ -292,7 +268,9 @@ def export( ) max_batch_size = 4 - if tensorrt_llm.mpi_rank() == 0: + is_export_rank = is_rank(build_rank) + + if is_export_rank: tmp_dir = tempfile.TemporaryDirectory() nemo_export_dir = Path(tmp_dir.name) @@ -333,6 +311,10 @@ def export( reduce_fusion=reduce_fusion, ) else: + if model_type is None: + # For NeMo 2.0 models we can get model_type from the model class name + model_type = get_model_type(nemo_checkpoint_path) + if model_type is None: raise Exception("model_type needs to be specified, got None.") @@ -499,7 +481,7 @@ def export( if tensorrt_llm.mpi_world_size() > 1: tensorrt_llm.mpi_barrier() - if load_model: + if is_export_rank and load_model: self._load() def get_transformer_config(self, nemo_model_config): @@ -937,7 +919,6 @@ def forward( self, input_texts: List[str], max_output_len: int = 64, - max_output_token: Optional[int] = None, top_k: int = 1, top_p: float = 0.0, temperature: float = 1.0, @@ -959,7 +940,6 @@ def forward( Args: input_texts (List(str)): list of sentences. max_output_len (int): max generated tokens. - max_output_token (int): max generated tokens. Deprecated, use max_output_len instead. top_k (int): limits us to a certain number (K) of the top tokens to consider. top_p (float): limits us to the top tokens within a certain probability mass (p). temperature (float): A parameter of the softmax function, which is the last layer in the network. @@ -979,13 +959,6 @@ def forward( "then it should be loaded first to run inference." ) else: - if max_output_token is not None: - warnings.warn( - "Parameter max_output_token is deprecated and will be removed. Please use max_output_len instead.", - DeprecationWarning, - stacklevel=2, - ) - max_output_len = max_output_token if prompt_embeddings_table is not None or prompt_embeddings_checkpoint_path is not None: prompt_table = self._get_prompt_embedding_table( prompt_embeddings_table, prompt_embeddings_checkpoint_path @@ -1373,7 +1346,6 @@ def _load_config_file(self): def _load(self): self.model = None self.tokenizer = None - self.n_gpus = None self.config = None self.ptuning_tables = [] diff --git a/nemo/export/trt_llm/converter/model_to_trt_llm_ckpt.py b/nemo/export/trt_llm/converter/model_to_trt_llm_ckpt.py index b0e134ab0c35..ca725f74d2ef 100644 --- a/nemo/export/trt_llm/converter/model_to_trt_llm_ckpt.py +++ b/nemo/export/trt_llm/converter/model_to_trt_llm_ckpt.py @@ -14,17 +14,15 @@ import logging -import math import multiprocessing from collections import defaultdict from pathlib import Path +from typing import Optional, Union -import numpy as np import torch -from tensorrt_llm._utils import pad_vocab_size, str_dtype_to_torch, torch_to_numpy +from tensorrt_llm._utils import pad_vocab_size, str_dtype_to_torch from tqdm import tqdm -from nemo.collections.nlp.parts.utils_funcs import torch_dtype_from_precision from nemo.export.trt_llm.converter.utils import save_scaling_factor, save_val, split_and_save_weight, weights_dict LOGGER = logging.getLogger("NeMo") @@ -38,6 +36,22 @@ } +def torch_dtype_from_precision(precision: Union[int, str], megatron_amp_O2: Optional[bool] = None) -> torch.dtype: + """Mapping from PTL precision types to corresponding PyTorch parameter datatype.""" + # Copied from nemo.collections.nlp.parts.utils_funcs to avoid extra depenencies for NIM. + if megatron_amp_O2 is not None and megatron_amp_O2 is False: + return torch.float32 + + if precision in ['bf16', 'bf16-mixed']: + return torch.bfloat16 + elif precision in [16, '16', '16-mixed']: + return torch.float16 + elif precision in [32, '32', '32-true']: + return torch.float32 + else: + raise ValueError(f"Could not parse the precision of `{precision}` to a valid torch.dtype") + + def extract_layers_with_prefix(model_, prefix): length_to_trim = len(prefix) model_state = model_.get("state_dict", model_) @@ -220,7 +234,8 @@ def handle_model_level_weights(model, tp_idx: int, pp_idx: int): # Let's rename/map the key to the old layer name previously. You can try printing out # the rename_key output of the old llama checkpoint and compare. rename_key_dist_ckpt(key, 0), - # Since the state dict value has the full layers, let's select the ith layer weights/biases here. + # Since the state dict value has the full layers, + # let's select the ith layer weights/biases here. [val], storage_type, None, @@ -238,7 +253,8 @@ def handle_model_level_weights(model, tp_idx: int, pp_idx: int): # Let's rename/map the key to the old layer name previously. You can try printing out # the rename_key output of the old llama checkpoint and compare. rename_key_dist_ckpt(key, i), - # Since the state dict value has the full layers, let's select the ith layer weights/biases here. + # Since the state dict value has the full layers, + # let's select the ith layer weights/biases here. [val[i]], storage_type, None, @@ -322,7 +338,8 @@ def dist_model_to_trt_llm_ckpt( reshard_model = True else: raise NotImplementedError( - f"NeMo currently only supports PP>1 -> PP=1 resharding, other types of resharding will come in future releases." + "NeMo currently only supports PP>1 -> PP=1 resharding," + " other types of resharding will come in future releases." ) num_layers = nemo_model_config["num_layers"] diff --git a/nemo/export/trt_llm/nemo_ckpt_loader/nemo_file.py b/nemo/export/trt_llm/nemo_ckpt_loader/nemo_file.py index f4ace00292f9..827bbf929796 100644 --- a/nemo/export/trt_llm/nemo_ckpt_loader/nemo_file.py +++ b/nemo/export/trt_llm/nemo_ckpt_loader/nemo_file.py @@ -21,7 +21,7 @@ import shutil from io import BytesIO from pathlib import Path -from typing import Dict, List, Optional, Tuple, Union +from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import tensorstore # This is important even though not used. Otherwise zarr raises error. @@ -358,6 +358,78 @@ def batch_decode(self, ids): return tokenizer +def load_nemo_config(nemo_ckpt: Union[str, Path]) -> Dict[Any, Any]: + """ + Load the model configuration from a NeMo checkpoint. + + This function handles both NeMo 1.0 and NeMo 2.0 checkpoint structures. + For NeMo 2.0, it reads the configuration from the 'context/model.yaml' file. + For NeMo 1.0, it uses the UnpackedNemoCheckpointDir to load the model configuration. + + Args: + nemo_ckpt (Union[str, Path]): Path to the NeMo checkpoint file or directory. + Returns: + Dict[Any, Any]: The configuration dictionary. + """ + if Path(nemo_ckpt).is_dir(): + nemo_ckpt = Path(nemo_ckpt) + else: + nemo_ckpt = TarPath(nemo_ckpt) + + if (nemo_ckpt / "weights").exists() and (nemo_ckpt / "context").exists(): # Stucture of NeMo 2.0 checkpoints + with (nemo_ckpt / "context" / "model.yaml").open("r") as stream: + config = yaml.safe_load(stream) + else: # Assume NeMo 1.0 case + unpacked_checkpoint_dir = UnpackedNemoCheckpointDir(nemo_ckpt, load_checkpoints_to_cpu=True) + config = unpacked_checkpoint_dir.model_config + + return config + + +def get_model_type(nemo_ckpt: Union[str, Path]) -> Optional[str]: + """ + Determine the model type from a NeMo checkpoint for TensorRT-LLM engine build. + + Args: + nemo_ckpt (str): Path to the NeMo checkpoint file. + Returns: + Optional[str]: The model type if it can be determined, otherwise None. + """ + model_config = load_nemo_config(nemo_ckpt) + model_type = None + + if model_class := model_config.get("_target_"): + # NeMo 2.0 case + NEMO2_TO_MODEL_TYPE = { + "nemo.collections.llm.gpt.model.base.GPTModel": "gpt", + "nemo.collections.llm.gpt.model.llama.LlamaModel": "llama", + "nemo.collections.llm.gpt.model.mistral.MistralModel": "llama", + "nemo.collections.llm.gpt.model.mixtral.MixtralModel": "llama", + "nemo.collections.llm.gpt.model.starcoder.StarcoderModel": "gpt", + "nemo.collections.llm.gpt.model.starcoder2.Starcoder2Model": "gpt", + "nemo.collections.llm.gpt.model.nemotron.NemotronModel": "gpt", + "nemo.collections.llm.gpt.model.gemma.GemmaModel": "gemma", + "nemo.collections.llm.gpt.model.phi3mini.Phi3Model": "phi3", + "nemo.collections.llm.gpt.model.baichuan.Baichuan2Model": "baichuan", + "nemo.collections.llm.gpt.model.chatglm.ChatGLMModel": "chatglm", + "nemo.collections.llm.gpt.model.qwen2.Qwen2Model": "qwen", + } + try: + model_type = NEMO2_TO_MODEL_TYPE[model_class] + LOGGER.info(f"Determined model_type='{model_type}' for {nemo_ckpt} checkpoint.") + + except KeyError: + LOGGER.error( + f"Model {model_class} not found in the NEMO2_TO_MODEL_TYPE mapping, " + "try providing the model_type explicitely for exporting:\n" + f"{json.dumps(NEMO2_TO_MODEL_TYPE, indent=2)}" + ) + raise + else: + LOGGER.warning(f"Parameter model_type cannot be determined for {nemo_ckpt} checkpoint.") + return model_type + + def load_nemo_model(nemo_ckpt: Union[str, Path], nemo_export_dir: Union[str, Path]): if not os.path.exists(nemo_ckpt): diff --git a/nemo/export/trt_llm/utils.py b/nemo/export/trt_llm/utils.py new file mode 100644 index 000000000000..bb30048b96c7 --- /dev/null +++ b/nemo/export/trt_llm/utils.py @@ -0,0 +1,35 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional + +import tensorrt_llm + + +def is_rank(rank: Optional[int]) -> bool: + """ + Check if the current MPI rank matches the specified rank. + + Args: + rank (Optional[int]): The rank to check against. + + Returns: + bool: True if the current rank matches the specified rank or if rank is None. + """ + current_rank = tensorrt_llm.mpi_rank() + if rank is None: + return True + if isinstance(rank, int): + return current_rank == rank + raise ValueError(f"Invalid rank argument {rank} of type {type(rank)}.") diff --git a/scripts/deploy/nlp/deploy_triton.py b/scripts/deploy/nlp/deploy_triton.py index 154ffc90dc9c..e133ecb8cfd3 100755 --- a/scripts/deploy/nlp/deploy_triton.py +++ b/scripts/deploy/nlp/deploy_triton.py @@ -319,7 +319,6 @@ def get_trtllm_deployable(args): trt_llm_exporter.export( nemo_checkpoint_path=args.nemo_checkpoint, model_type=args.model_type, - n_gpus=args.num_gpus, tensor_parallelism_size=args.tensor_parallelism_size, pipeline_parallelism_size=args.pipeline_parallelism_size, max_input_len=args.max_input_len, diff --git a/scripts/export/export_to_trt_llm.py b/scripts/export/export_to_trt_llm.py index d9e846547c68..66b9547bb7a3 100644 --- a/scripts/export/export_to_trt_llm.py +++ b/scripts/export/export_to_trt_llm.py @@ -44,7 +44,6 @@ def get_args(argv): parser.add_argument( "-mr", "--model_repository", required=True, default=None, type=str, help="Folder for the trt-llm model files" ) - parser.add_argument("-ng", "--num_gpus", default=None, type=int, help="Number of GPUs for the deployment") parser.add_argument("-tps", "--tensor_parallelism_size", default=1, type=int, help="Tensor parallelism size") parser.add_argument("-pps", "--pipeline_parallelism_size", default=1, type=int, help="Pipeline parallelism size") parser.add_argument( @@ -181,7 +180,6 @@ def nemo_export_trt_llm(argv): trt_llm_exporter.export( nemo_checkpoint_path=args.nemo_checkpoint, model_type=args.model_type, - n_gpus=args.num_gpus, tensor_parallelism_size=args.tensor_parallelism_size, pipeline_parallelism_size=args.pipeline_parallelism_size, max_input_len=args.max_input_len, diff --git a/tests/deploy/nemo_deploy.py b/tests/deploy/nemo_deploy.py index 45f2bae3425e..647fe52c9bc6 100644 --- a/tests/deploy/nemo_deploy.py +++ b/tests/deploy/nemo_deploy.py @@ -28,7 +28,7 @@ try: from nemo.deploy import DeployPyTriton from nemo.deploy.nlp import NemoQueryLLM, NemoQueryLLMPyTorch - from nemo.export import TensorRTLLM + from nemo.export.tensorrt_llm import TensorRTLLM except Exception as e: run_export_tests = False @@ -240,7 +240,6 @@ def run_trt_llm_inference( trt_llm_exporter.export( nemo_checkpoint_path=checkpoint_path, model_type=model_type, - n_gpus=n_gpu, tensor_parallelism_size=tp_size, pipeline_parallelism_size=pp_size, max_input_len=max_input_len, @@ -460,7 +459,6 @@ def get_args(): "--min_gpus", type=int, default=1, - required=True, ) parser.add_argument( "--max_gpus", @@ -516,10 +514,12 @@ def get_args(): parser.add_argument( "--tp_size", type=int, + default=1, ) parser.add_argument( "--pp_size", type=int, + default=1, ) parser.add_argument( "--top_k", diff --git a/tests/export/nemo_export.py b/tests/export/nemo_export.py index cb2b3619e4d3..5f3be23209aa 100644 --- a/tests/export/nemo_export.py +++ b/tests/export/nemo_export.py @@ -807,6 +807,7 @@ def str_to_bool(name: str, s: str, optional: bool = False) -> Optional[bool]: return None raise UsageError(f"Invalid boolean value for argument --{name}: '{s}'") + args.model_type = None if str(args.model_type).lower() == "none" else args.model_type args.test_cpp_runtime = str_to_bool("test_cpp_runtime", args.test_cpp_runtime) args.test_deployment = str_to_bool("test_deployment", args.test_deployment) args.functional_test = str_to_bool("functional_test", args.functional_test) From 929d643fb16e036f05090425a7eea77c329bdac0 Mon Sep 17 00:00:00 2001 From: Aleksandr Laptev Date: Sat, 14 Dec 2024 03:54:22 -0800 Subject: [PATCH 042/128] AED greedy confidence estimation (#11573) * upload Signed-off-by: Aleksandr Laptev * Apply isort and black reformatting Signed-off-by: GNroy * set prompt confidence dtype at initialization Signed-off-by: Aleksandr Laptev --------- Signed-off-by: Aleksandr Laptev Signed-off-by: GNroy Co-authored-by: GNroy --- .../transformer/transformer_generators.py | 64 +++++++- .../parts/submodules/multitask_decoding.py | 152 +++++++++++++++++- .../submodules/multitask_greedy_decoding.py | 39 ++++- .../asr/parts/submodules/rnnt_decoding.py | 5 +- .../asr/parts/utils/asr_confidence_utils.py | 46 +++--- .../asr/decoding/test_multi_task_decoding.py | 22 ++- 6 files changed, 279 insertions(+), 49 deletions(-) diff --git a/nemo/collections/asr/modules/transformer/transformer_generators.py b/nemo/collections/asr/modules/transformer/transformer_generators.py index e6775a48f635..16d8011ad0f3 100644 --- a/nemo/collections/asr/modules/transformer/transformer_generators.py +++ b/nemo/collections/asr/modules/transformer/transformer_generators.py @@ -13,11 +13,14 @@ # limitations under the License. from contextlib import contextmanager +from typing import Optional import torch +from omegaconf import DictConfig from torch.distributions import Categorical from nemo.collections.asr.parts.submodules.token_classifier import TokenClassifier +from nemo.collections.asr.parts.utils.asr_confidence_utils import ConfidenceMethodMixin from nemo.collections.common.parts import NEG_INF, mask_padded_tokens __all__ = [ @@ -29,7 +32,7 @@ ] -class GreedySequenceGenerator: +class GreedySequenceGenerator(ConfidenceMethodMixin): """ Greedy sequence generator based on the decoder followed by log_softmax. Optionally supports temperature sampling with ``n_samples`` and ``temperature`` options. @@ -51,6 +54,37 @@ class GreedySequenceGenerator: n_samples: number of sequences to generate (requires ``temperature`` to be set) temperature: temperature for temperature sampling. Even with ``n_samples`` set to 1, enabling temperature will sample hypotheses instead of returning the best ones. + + preserve_step_confidence: Bool flag which preserves the history of per-step confidence scores generated + during greedy decoding. When set to true, the results will contain additional List of tensor floats. + confidence_method_cfg: A dict-like object which contains the method name and settings to compute per-step + confidence scores. + name: The method name (str). + Supported values: + - 'max_prob' for using the maximum token probability as a confidence. + - 'entropy' for using a normalized entropy of a log-likelihood vector. + entropy_type: Which type of entropy to use (str). Used if confidence_method_cfg.name is set to `entropy`. + Supported values: + - 'gibbs' for the (standard) Gibbs entropy. If the alpha (α) is provided, + the formula is the following: H_α = -sum_i((p^α_i)*log(p^α_i)). + Note that for this entropy, the alpha should comply the following inequality: + (log(V)+2-sqrt(log^2(V)+4))/(2*log(V)) <= α <= (1+log(V-1))/log(V-1) + where V is the model vocabulary size. + - 'tsallis' for the Tsallis entropy with the Boltzmann constant one. + Tsallis entropy formula is the following: H_α = 1/(α-1)*(1-sum_i(p^α_i)), + where α is a parameter. When α == 1, it works like the Gibbs entropy. + More: https://en.wikipedia.org/wiki/Tsallis_entropy + - 'renyi' for the Rényi entropy. + Rényi entropy formula is the following: H_α = 1/(1-α)*log_2(sum_i(p^α_i)), + where α is a parameter. When α == 1, it works like the Gibbs entropy. + More: https://en.wikipedia.org/wiki/R%C3%A9nyi_entropy + alpha: Power scale for logsoftmax (α for entropies). Here we restrict it to be > 0. + When the alpha equals one, scaling is not applied to 'max_prob', + and any entropy type behaves like the Shannon entropy: H = -sum_i(p_i*log(p_i)) + entropy_norm: A mapping of the entropy value to the interval [0,1]. + Supported values: + - 'lin' for using the linear mapping. + - 'exp' for using exponential mapping with linear shift. """ def __init__( @@ -66,6 +100,8 @@ def __init__( batch_size=1, n_samples=1, temperature=None, + preserve_step_confidence=False, + confidence_method_cfg: Optional[DictConfig] = None, ): super().__init__() self.embedding = embedding @@ -77,6 +113,11 @@ def __init__( self.batch_size = batch_size self.n_samples = n_samples self.temperature = temperature + self.preserve_step_confidence = preserve_step_confidence + + # set confidence calculation method + self.num_tokens = getattr(self.classifier.mlp, f'layer{self.classifier.mlp.layers - 1}').out_features + self._init_confidence_method(confidence_method_cfg) def _one_step_forward( self, @@ -172,6 +213,14 @@ def _forward( decoder_parameter = next(self.decoder.parameters()) pad_profile = torch.zeros(batch_size).long().to(decoder_parameter.device) + if self.preserve_step_confidence: + if encoder_hidden_states is None: + raise RuntimeError("`encoder_hidden_states` must be provided to compute confidence scores.") + # start with prompt confidence which is always 1 + step_confidence = [torch.full_like(tgt, 1, dtype=encoder_hidden_states.dtype)] + else: + step_confidence = None + decoder_mems_list = None for i in range(max_generation_length): @@ -198,16 +247,27 @@ def _forward( pad_profile = torch.max(pad_profile, (next_tokens == self.eos).long()) tgt = torch.cat((tgt, next_tokens.unsqueeze(1)), dim=-1) + if self.preserve_step_confidence: + step_confidence.append( + self._get_confidence_tensor( + torch.nn.functional.log_softmax(logits, dim=-1) if not return_beam_scores else logits + ) + ) + # abort generation if all sequences end with if pad_profile.sum() == batch_size: break + step_confidence_tensor = ( + torch.cat(step_confidence, dim=1) if self.preserve_step_confidence and len(step_confidence) > 0 else None + ) + samples = None if is_sampling: samples = list(tgt.view(orig_batch_size, self.n_samples, -1)) tgt = tgt[:: self.n_samples] - return tgt, samples + return tgt, samples, step_confidence_tensor def __call__( self, decoder_input_ids=None, encoder_hidden_states=None, encoder_input_mask=None, return_beam_scores=False diff --git a/nemo/collections/asr/parts/submodules/multitask_decoding.py b/nemo/collections/asr/parts/submodules/multitask_decoding.py index 715ee7168037..790c95afbbfb 100644 --- a/nemo/collections/asr/parts/submodules/multitask_decoding.py +++ b/nemo/collections/asr/parts/submodules/multitask_decoding.py @@ -13,7 +13,7 @@ # limitations under the License. import re -from abc import ABC, abstractmethod +from abc import abstractmethod from dataclasses import dataclass, field, is_dataclass from typing import List, Optional, Tuple, Union @@ -29,13 +29,14 @@ AEDGreedyInferConfig, TransformerAEDGreedyInfer, ) +from nemo.collections.asr.parts.utils.asr_confidence_utils import ConfidenceConfig, ConfidenceMixin from nemo.collections.asr.parts.utils.rnnt_utils import Hypothesis, NBestHypotheses from nemo.collections.common.tokenizers.aggregate_tokenizer import AggregateTokenizer from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec from nemo.utils import logging -class AbstractMultiTaskDecoding(ABC): +class AbstractMultiTaskDecoding(ConfidenceMixin): """ Used for performing AED auto-regressive decoding of the Multi task model given the encoder state. @@ -62,6 +63,52 @@ class AbstractMultiTaskDecoding(ABC): In order to obtain this hypothesis, please utilize `rnnt_decoder_predictions_tensor` function with the `return_hypotheses` flag set to True. + confidence_cfg: A dict-like object which contains the following key-value pairs related to confidence scores. + preserve_frame_confidence: Bool flag which preserves the history of per-frame confidence scores + generated during greedy decoding. When set to true, the Hypothesis will contain the non-null value + for `frame_confidence` in it. Here, `frame_confidence` is a List of tensor floats. + + preserve_token_confidence: Bool flag which preserves the history of per-token confidence scores + generated during greedy decoding. When set to true, the Hypothesis will contain the non-null value + for `token_confidence` in it. Here, `token_confidence` is a List of tensor floats. + The length of the list corresponds to the number of recognized tokens. + + preserve_word_confidence: Bool flag which preserves the history of per-word confidence scores + generated during greedy decoding. When set to true, the Hypothesis will contain the non-null value + for `word_confidence` in it. Here, `word_confidence` is a List of tensor floats. + The length of the list corresponds to the number of recognized words. + + aggregation: Which aggregation type to use for collapsing per-token confidence into per-word + confidence. Valid options are `mean`, `min`, `max`, `prod`. + + method_cfg: A dict-like object which contains settings to compute per-frame confidence scores. + name: The method name (str). + Supported values: + - 'max_prob' for using the maximum token probability as a confidence. + - 'entropy' for using a normalized entropy of a log-likelihood vector. + entropy_type: Which type of entropy to use (str). Used if confidence_method_cfg.name is set to `entropy`. + Supported values: + - 'gibbs' for the (standard) Gibbs entropy. If the alpha (α) is provided, + the formula is the following: H_α = -sum_i((p^α_i)*log(p^α_i)). + Note that for this entropy, the alpha should comply the following inequality: + (log(V)+2-sqrt(log^2(V)+4))/(2*log(V)) <= α <= (1+log(V-1))/log(V-1) + where V is the model vocabulary size. + - 'tsallis' for the Tsallis entropy with the Boltzmann constant one. + Tsallis entropy formula is the following: H_α = 1/(α-1)*(1-sum_i(p^α_i)), + where α is a parameter. When α == 1, it works like the Gibbs entropy. + More: https://en.wikipedia.org/wiki/Tsallis_entropy + - 'renyi' for the Rényi entropy. + Rényi entropy formula is the following: H_α = 1/(1-α)*log_2(sum_i(p^α_i)), + where α is a parameter. When α == 1, it works like the Gibbs entropy. + More: https://en.wikipedia.org/wiki/R%C3%A9nyi_entropy + alpha: Power scale for logsoftmax (α for entropies). Here we restrict it to be > 0. + When the alpha equals one, scaling is not applied to 'max_prob', + and any entropy type behaves like the Shannon entropy: H = -sum_i(p_i*log(p_i)) + entropy_norm: A mapping of the entropy value to the interval [0,1]. + Supported values: + - 'lin' for using the linear mapping. + - 'exp' for using exponential mapping with linear shift. + The config may further contain the following sub-dictionaries: "greedy": temperature: None (disabled) or float, specifying this enables temperature sampling instead of greedy decoding. @@ -109,6 +156,9 @@ def __init__( self.log_softmax_module = log_softmax_module self.tokenizer = tokenizer + # initialize confidence-related fields + self._init_confidence(self.cfg.get('confidence_cfg', None)) + self.change_strategy(self.cfg.strategy) def change_strategy(self, strategy: str) -> "AbstractMultiTaskDecoding": @@ -132,6 +182,8 @@ def change_strategy(self, strategy: str) -> "AbstractMultiTaskDecoding": tokenizer=self.tokenizer, max_generation_delta=self.cfg.greedy.get('max_generation_delta', -1), preserve_alignments=self.preserve_alignments, + preserve_token_confidence=self.preserve_token_confidence or self.preserve_frame_confidence, + confidence_method_cfg=self.confidence_method_cfg, temperature=self.cfg.greedy.temperature, n_samples=self.cfg.greedy.n_samples, ) @@ -223,6 +275,11 @@ def decode_predictions_tensor( hypotheses = self.decode_hypothesis(prediction_list) if return_hypotheses: + # greedy decoding, can get high-level confidence scores + if self.preserve_frame_confidence and ( + self.preserve_word_confidence or self.preserve_token_confidence + ): + hypotheses = self.compute_confidence(hypotheses) return hypotheses, None best_hyp_text = [h.text for h in hypotheses] @@ -255,6 +312,38 @@ def decode_hypothesis(self, hypotheses_list: List[Hypothesis]) -> List[Union[Hyp return hypotheses_list + def compute_confidence(self, hypotheses_list: List[Hypothesis]) -> List[Hypothesis]: + """ + Compute high-level (per-token and/or per-word) confidence scores for a list of hypotheses. + Assumes that `token_confidence` is present in the hypotheses. + + Args: + hypotheses_list: List of Hypothesis. + + Returns: + A list of hypotheses with high-level confidence scores. + """ + if self.preserve_word_confidence: + for hyp in hypotheses_list: + hyp.word_confidence = self._aggregate_token_confidence(hyp) + return hypotheses_list + + def _aggregate_token_confidence(self, hypothesis: Hypothesis) -> List[float]: + """ + Implemented by subclass in order to reduce token confidence to a word-level confidence. + + **Note**: Only supports Sentencepiece based tokenizers! + + Args: + hypothesis: Hypothesis + + Returns: + A list of word-level confidence scores. + """ + return self._aggregate_token_confidence_subwords_sentencepiece( + hypothesis.words, hypothesis.token_confidence, hypothesis.y_sequence + ) + @abstractmethod def decode_tokens_to_str(self, tokens: List[int]) -> str: """ @@ -338,13 +427,59 @@ class MultiTaskDecoding(AbstractMultiTaskDecoding): In order to obtain this hypothesis, please utilize `rnnt_decoder_predictions_tensor` function with the `return_hypotheses` flag set to True. + confidence_cfg: A dict-like object which contains the following key-value pairs related to confidence scores. + preserve_frame_confidence: Bool flag which preserves the history of per-frame confidence scores + generated during greedy decoding. When set to true, the Hypothesis will contain the non-null value + for `frame_confidence` in it. Here, `frame_confidence` is a List of tensor floats. + + preserve_token_confidence: Bool flag which preserves the history of per-token confidence scores + generated during greedy decoding. When set to true, the Hypothesis will contain the non-null value + for `token_confidence` in it. Here, `token_confidence` is a List of tensor floats. + The length of the list corresponds to the number of recognized tokens. + + preserve_word_confidence: Bool flag which preserves the history of per-word confidence scores + generated during greedy decoding. When set to true, the Hypothesis will contain the non-null value + for `word_confidence` in it. Here, `word_confidence` is a List of tensor floats. + The length of the list corresponds to the number of recognized words. + + aggregation: Which aggregation type to use for collapsing per-token confidence into per-word + confidence. Valid options are `mean`, `min`, `max`, `prod`. + + method_cfg: A dict-like object which contains settings to compute per-frame confidence scores. + name: The method name (str). + Supported values: + - 'max_prob' for using the maximum token probability as a confidence. + - 'entropy' for using a normalized entropy of a log-likelihood vector. + entropy_type: Which type of entropy to use (str). Used if confidence_method_cfg.name is set to `entropy`. + Supported values: + - 'gibbs' for the (standard) Gibbs entropy. If the alpha (α) is provided, + the formula is the following: H_α = -sum_i((p^α_i)*log(p^α_i)). + Note that for this entropy, the alpha should comply the following inequality: + (log(V)+2-sqrt(log^2(V)+4))/(2*log(V)) <= α <= (1+log(V-1))/log(V-1) + where V is the model vocabulary size. + - 'tsallis' for the Tsallis entropy with the Boltzmann constant one. + Tsallis entropy formula is the following: H_α = 1/(α-1)*(1-sum_i(p^α_i)), + where α is a parameter. When α == 1, it works like the Gibbs entropy. + More: https://en.wikipedia.org/wiki/Tsallis_entropy + - 'renyi' for the Rényi entropy. + Rényi entropy formula is the following: H_α = 1/(1-α)*log_2(sum_i(p^α_i)), + where α is a parameter. When α == 1, it works like the Gibbs entropy. + More: https://en.wikipedia.org/wiki/R%C3%A9nyi_entropy + alpha: Power scale for logsoftmax (α for entropies). Here we restrict it to be > 0. + When the alpha equals one, scaling is not applied to 'max_prob', + and any entropy type behaves like the Shannon entropy: H = -sum_i(p_i*log(p_i)) + entropy_norm: A mapping of the entropy value to the interval [0,1]. + Supported values: + - 'lin' for using the linear mapping. + - 'exp' for using exponential mapping with linear shift. + The config may further contain the following sub-dictionaries: "greedy": - max_symbols: int, describing the maximum number of target tokens to decode per - timestep during greedy decoding. Setting to larger values allows longer sentences - to be decoded, at the cost of increased execution time. - preserve_frame_confidence: Same as above, overrides above value. - confidence_method_cfg: Same as above, overrides confidence_cfg.method_cfg. + temperature: None (disabled) or float, specifying this enables temperature sampling instead of greedy decoding. + + max_generation_delta: int = -1 # -1 means up to the max length of the decoder + + preserve_alignments: bool = False (unsupported) "beam": beam_size: int, defining the beam size for beam search. Must be >= 1. @@ -476,6 +611,9 @@ class MultiTaskDecodingConfig: # preserve decoding alignments preserve_alignments: Optional[bool] = None + # confidence config + confidence_cfg: ConfidenceConfig = field(default_factory=lambda: ConfidenceConfig()) + # compute language IDs compute_langs: bool = False diff --git a/nemo/collections/asr/parts/submodules/multitask_greedy_decoding.py b/nemo/collections/asr/parts/submodules/multitask_greedy_decoding.py index b38c02574d5b..f67cdd9f7944 100644 --- a/nemo/collections/asr/parts/submodules/multitask_greedy_decoding.py +++ b/nemo/collections/asr/parts/submodules/multitask_greedy_decoding.py @@ -13,12 +13,14 @@ # limitations under the License. from abc import ABC, abstractmethod -from dataclasses import dataclass +from dataclasses import dataclass, field from typing import List, Optional, Union import torch +from omegaconf import DictConfig, OmegaConf from nemo.collections.asr.modules.transformer import GreedySequenceGenerator +from nemo.collections.asr.parts.utils.asr_confidence_utils import ConfidenceMethodConfig from nemo.collections.asr.parts.utils.rnnt_utils import Hypothesis, NBestHypotheses from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec from nemo.core import Typing, typecheck @@ -27,13 +29,20 @@ def pack_hypotheses( - hypotheses: List[Hypothesis], beam_hypotheses: torch.Tensor, scores: List[Optional[float]] + hypotheses: List[Hypothesis], + beam_hypotheses: torch.Tensor, + scores: List[Optional[float]], + step_confidence: Optional[torch.Tensor] = None, ) -> List[Hypothesis]: for idx, hyp in enumerate(hypotheses): # type: Hypothesis if scores[idx] is not None: hyp.score = scores[idx] + if step_confidence is not None: + hyp.frame_confidence = step_confidence[idx] + hyp.token_confidence = hyp.frame_confidence + hypi = beam_hypotheses[idx] if torch.is_tensor(hypi): hyp.y_sequence = hypi.long() @@ -122,6 +131,8 @@ def __init__( temperature: float | None = None, max_generation_delta: int = 50, preserve_alignments: bool = False, + preserve_token_confidence: bool = False, + confidence_method_cfg: Optional[DictConfig] = None, n_samples: int = 1, ): super().__init__( @@ -146,6 +157,8 @@ def __init__( max_delta_length=max_generation_delta, temperature=self.temperature, n_samples=n_samples, + preserve_step_confidence=preserve_token_confidence, + confidence_method_cfg=confidence_method_cfg, ) self.preserve_alignments = preserve_alignments @@ -176,7 +189,7 @@ def forward( packed list containing batch number of sentences (Hypotheses). """ with torch.inference_mode(): - best_hypo, topk_hypotheses = self.greedy_search( + best_hypo, topk_hypotheses, step_confidence = self.greedy_search( encoder_hidden_states=encoder_hidden_states, encoder_input_mask=encoder_input_mask, decoder_input_ids=decoder_input_ids, @@ -191,7 +204,9 @@ def forward( hypotheses = [Hypothesis(score=0.0, y_sequence=[], timestep=[]) for _ in range(self.n_samples)] self.format_hypotheses(hypotheses, decoder_input_ids) packed_result.append( - NBestHypotheses(pack_hypotheses(hypotheses, topk_hypotheses[i], beam_scores[i])) + NBestHypotheses( + pack_hypotheses(hypotheses, topk_hypotheses[i], beam_scores[i]), step_confidence + ) ) else: beam_scores = [None for _ in range(len(best_hypo))] @@ -200,7 +215,7 @@ def forward( Hypothesis(score=0.0, y_sequence=[], timestep=[]) for _ in range(encoder_hidden_states.shape[0]) ] # Pack results into Hypotheses - packed_result = pack_hypotheses(hypotheses, best_hypo, beam_scores) + packed_result = pack_hypotheses(hypotheses, best_hypo, beam_scores, step_confidence) self.format_hypotheses(packed_result, decoder_input_ids) return (packed_result,) @@ -222,6 +237,9 @@ def format_hypotheses(self, packed_result: List[Hypothesis], decoder_input_ids: hyp.y_sequence[: prefix.shape[0]] == prefix ).all(), f"The decoder input IDs were not found at the beginning of prediction: {hyp.y_sequence=} {prefix=})" hyp.y_sequence = hyp.y_sequence[prefix.shape[0] :] + hyp.token_confidence = ( + hyp.token_confidence[prefix.shape[0] :] if hyp.token_confidence is not None else None + ) for hyp in packed_result: ids = hyp.y_sequence ids_len = ids.shape[0] @@ -232,6 +250,7 @@ def format_hypotheses(self, packed_result: List[Hypothesis], decoder_input_ids: break # empty sequence if pos < -1: hyp.y_sequence = ids[: pos + 1] + hyp.token_confidence = hyp.token_confidence[: pos + 1] if hyp.token_confidence is not None else None @dataclass @@ -239,4 +258,14 @@ class AEDGreedyInferConfig: temperature: float | None = None max_generation_delta: int = -1 # -1 means up to the max length of the decoder preserve_alignments: bool = False + preserve_token_confidence: bool = False + confidence_method_cfg: Optional[ConfidenceMethodConfig] = field(default_factory=lambda: ConfidenceMethodConfig()) n_samples: int = 1 + + def __post_init__(self): + # OmegaConf.structured ensures that post_init check is always executed + self.confidence_method_cfg = OmegaConf.structured( + self.confidence_method_cfg + if isinstance(self.confidence_method_cfg, ConfidenceMethodConfig) + else ConfidenceMethodConfig(**self.confidence_method_cfg) + ) diff --git a/nemo/collections/asr/parts/submodules/rnnt_decoding.py b/nemo/collections/asr/parts/submodules/rnnt_decoding.py index d3a63467c485..18fcc57e5184 100644 --- a/nemo/collections/asr/parts/submodules/rnnt_decoding.py +++ b/nemo/collections/asr/parts/submodules/rnnt_decoding.py @@ -681,10 +681,11 @@ def compute_confidence(self, hypotheses_list: List[Hypothesis]) -> List[Hypothes hyp.token_confidence = hyp.non_blank_frame_confidence else: for hyp in hypotheses_list: + timestep = hyp.timestep.tolist() if isinstance(hyp.timestep, torch.Tensor) else hyp.timestep offset = 0 token_confidence = [] - if len(hyp.timestep) > 0: - for ts, te in zip(hyp.timestep, hyp.timestep[1:] + [len(hyp.frame_confidence)]): + if len(timestep) > 0: + for ts, te in zip(timestep, timestep[1:] + [len(hyp.frame_confidence)]): if ts != te: # tokens are considered to belong to the last non-blank token, if any. token_confidence.append( diff --git a/nemo/collections/asr/parts/utils/asr_confidence_utils.py b/nemo/collections/asr/parts/utils/asr_confidence_utils.py index 20f75baf522e..cecc3c497429 100644 --- a/nemo/collections/asr/parts/utils/asr_confidence_utils.py +++ b/nemo/collections/asr/parts/utils/asr_confidence_utils.py @@ -215,6 +215,7 @@ def get_confidence_measure_bank(): neg_entropy_gibbs = lambda x: (x.exp() * x).sum(-1) neg_entropy_alpha = lambda x, t: (x * t).exp().sum(-1) neg_entropy_alpha_gibbs = lambda x, t: ((x * t).exp() * x).sum(-1) + # too big for a lambda def entropy_tsallis_exp(x, v, t): exp_neg_max_ent = math.exp((1 - math.pow(v, 1 - t)) / (1 - t)) @@ -230,36 +231,30 @@ def entropy_gibbs_exp(x, v, t): # fill the measure bank confidence_measure_bank = {} # Maximum probability measure is implemented without alpha - confidence_measure_bank["max_prob"] = ( - lambda x, v, t: (x.max(dim=-1)[0].exp() * v - 1) / (v - 1) + confidence_measure_bank["max_prob"] = lambda x, v, t: ( + (x.max(dim=-1)[0].exp() * v - 1) / (v - 1) if t == 1.0 else ((x.max(dim=-1)[0] * t).exp() * math.pow(v, t) - 1) / (math.pow(v, t) - 1) ) - confidence_measure_bank["entropy_gibbs_lin"] = ( - lambda x, v, t: entropy_gibbs_lin_baseline(x, v) + confidence_measure_bank["entropy_gibbs_lin"] = lambda x, v, t: ( + entropy_gibbs_lin_baseline(x, v) if t == 1.0 else 1 + neg_entropy_alpha_gibbs(x, t) / math.log(v) / math.pow(v, 1 - t) ) - confidence_measure_bank["entropy_gibbs_exp"] = ( - lambda x, v, t: entropy_gibbs_exp_baseline(x, v) if t == 1.0 else entropy_gibbs_exp(x, v, t) + confidence_measure_bank["entropy_gibbs_exp"] = lambda x, v, t: ( + entropy_gibbs_exp_baseline(x, v) if t == 1.0 else entropy_gibbs_exp(x, v, t) ) - confidence_measure_bank["entropy_tsallis_lin"] = ( - lambda x, v, t: entropy_gibbs_lin_baseline(x, v) - if t == 1.0 - else 1 + (1 - neg_entropy_alpha(x, t)) / (math.pow(v, 1 - t) - 1) + confidence_measure_bank["entropy_tsallis_lin"] = lambda x, v, t: ( + entropy_gibbs_lin_baseline(x, v) if t == 1.0 else 1 + (1 - neg_entropy_alpha(x, t)) / (math.pow(v, 1 - t) - 1) ) - confidence_measure_bank["entropy_tsallis_exp"] = ( - lambda x, v, t: entropy_gibbs_exp_baseline(x, v) if t == 1.0 else entropy_tsallis_exp(x, v, t) + confidence_measure_bank["entropy_tsallis_exp"] = lambda x, v, t: ( + entropy_gibbs_exp_baseline(x, v) if t == 1.0 else entropy_tsallis_exp(x, v, t) ) - confidence_measure_bank["entropy_renyi_lin"] = ( - lambda x, v, t: entropy_gibbs_lin_baseline(x, v) - if t == 1.0 - else 1 + neg_entropy_alpha(x, t).log2() / (t - 1) / math.log(v, 2) + confidence_measure_bank["entropy_renyi_lin"] = lambda x, v, t: ( + entropy_gibbs_lin_baseline(x, v) if t == 1.0 else 1 + neg_entropy_alpha(x, t).log2() / (t - 1) / math.log(v, 2) ) - confidence_measure_bank["entropy_renyi_exp"] = ( - lambda x, v, t: entropy_gibbs_exp_baseline(x, v) - if t == 1.0 - else (neg_entropy_alpha(x, t).pow(1 / (t - 1)) * v - 1) / (v - 1) + confidence_measure_bank["entropy_renyi_exp"] = lambda x, v, t: ( + entropy_gibbs_exp_baseline(x, v) if t == 1.0 else (neg_entropy_alpha(x, t).pow(1 / (t - 1)) * v - 1) / (v - 1) ) return confidence_measure_bank @@ -295,8 +290,7 @@ class ConfidenceMethodMixin(ABC): """ def _init_confidence_method(self, confidence_method_cfg: Optional[DictConfig] = None): - """Initialize per-frame confidence method from config. - """ + """Initialize per-frame confidence method from config.""" # OmegaConf.structured ensures that post_init check is always executed confidence_method_cfg = OmegaConf.structured( ConfidenceMethodConfig() @@ -305,8 +299,9 @@ def _init_confidence_method(self, confidence_method_cfg: Optional[DictConfig] = ) # set confidence calculation method - # we suppose that self.blank_id == len(vocabulary) - self.num_tokens = (self.blank_id if hasattr(self, "blank_id") else self._blank_index) + 1 + if not hasattr(self, "num_tokens"): + # we suppose that self.blank_id == len(vocabulary) + self.num_tokens = (self.blank_id if hasattr(self, "blank_id") else self._blank_index) + 1 self.alpha = confidence_method_cfg.alpha # init confidence measure bank @@ -345,8 +340,7 @@ class ConfidenceMixin(ABC): """ def _init_confidence(self, confidence_cfg: Optional[DictConfig] = None): - """Initialize confidence-related fields and confidence aggregation function from config. - """ + """Initialize confidence-related fields and confidence aggregation function from config.""" # OmegaConf.structured ensures that post_init check is always executed confidence_cfg = OmegaConf.structured( ConfidenceConfig() if confidence_cfg is None else ConfidenceConfig(**confidence_cfg) diff --git a/tests/collections/asr/decoding/test_multi_task_decoding.py b/tests/collections/asr/decoding/test_multi_task_decoding.py index 056bb90b2d8c..17d618ead9c4 100644 --- a/tests/collections/asr/decoding/test_multi_task_decoding.py +++ b/tests/collections/asr/decoding/test_multi_task_decoding.py @@ -77,12 +77,13 @@ def tokenizer(): return tok -def test_greedy_decoding(inputs, nnet, deterministic_rng): - gen = GreedySequenceGenerator(*nnet) +@pytest.mark.parametrize('with_confidence', [False, True]) +def test_greedy_decoding(inputs, nnet, deterministic_rng, with_confidence): + gen = GreedySequenceGenerator(*nnet, preserve_step_confidence=with_confidence) output = gen(*inputs) - assert len(output) == 2 - best_path, hypotheses = output + assert len(output) == 3 + best_path, hypotheses, confidence = output assert best_path is not None assert torch.is_tensor(best_path) @@ -90,13 +91,20 @@ def test_greedy_decoding(inputs, nnet, deterministic_rng): assert hypotheses is None + if with_confidence: + assert confidence is not None + assert torch.is_tensor(confidence) + assert confidence.shape == best_path.shape + else: + assert confidence is None + def test_temperature_sampling_decoding(inputs, nnet): gen = GreedySequenceGenerator(*nnet, temperature=10.0, n_samples=2) output = gen(*inputs) - assert len(output) == 2 - best_path, hypotheses = output + assert len(output) == 3 + best_path, hypotheses, _ = output assert best_path is not None assert torch.is_tensor(best_path) @@ -202,7 +210,7 @@ def test_transformer_aed_greedy_infer_strips_prompt(prompted_inputs, decoder_nm, assert torch.is_tensor(best_path) # Now run the underlying beam search generator that doesn't trim anything. - (untrimmed,), _ = gen.greedy_search(*prompted_inputs) + (untrimmed,), _, _ = gen.greedy_search(*prompted_inputs) assert untrimmed is not None assert torch.is_tensor(untrimmed) From 82544dfe35312329af13ddffb9127a500f0b5a67 Mon Sep 17 00:00:00 2001 From: Ao Tang Date: Sat, 14 Dec 2024 08:12:08 -0500 Subject: [PATCH 043/128] gemma fix (#11587) --- nemo/collections/llm/recipes/gemma_2b.py | 5 ++--- nemo/collections/llm/recipes/gemma_7b.py | 5 ++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/nemo/collections/llm/recipes/gemma_2b.py b/nemo/collections/llm/recipes/gemma_2b.py index 8bdf89696d56..3b43bbdb0e62 100644 --- a/nemo/collections/llm/recipes/gemma_2b.py +++ b/nemo/collections/llm/recipes/gemma_2b.py @@ -51,6 +51,8 @@ def model() -> run.Config[pl.LightningModule]: >>> model_config = model() >>> print(model_config) """ + # Disable cuDNN attention since TE 1.8 does not support head dim > 128 + os.environ['NVTE_FUSED_ATTN'] = "0" return run.Config(GemmaModel, config=run.Config(GemmaConfig2B)) @@ -274,9 +276,6 @@ def finetune_recipe( on fine-tuning LLMs with NeMo, see the fine-tuning guide in the `examples/llm/finetune/` directory. """ - # Disable cuDNN attention since TE 1.8 does not support head dim > 128 - os.environ['NVTE_FUSED_ATTN'] = "0" - recipe = default_finetune_recipe( model(), "google/gemma-2b", dir, name, num_nodes, num_gpus_per_node, packed_sequence ) diff --git a/nemo/collections/llm/recipes/gemma_7b.py b/nemo/collections/llm/recipes/gemma_7b.py index 46c91e27575a..40e43bda4d5e 100644 --- a/nemo/collections/llm/recipes/gemma_7b.py +++ b/nemo/collections/llm/recipes/gemma_7b.py @@ -51,6 +51,8 @@ def model() -> run.Config[pl.LightningModule]: >>> model_config = model() >>> print(model_config) """ + # Disable cuDNN attention since TE 1.8 does not support head dim > 128 + os.environ['NVTE_FUSED_ATTN'] = "0" return run.Config(GemmaModel, config=run.Config(GemmaConfig7B)) @@ -277,9 +279,6 @@ def finetune_recipe( on fine-tuning LLMs with NeMo, see the fine-tuning guide in the `examples/llm/finetune/` directory. """ - # Disable cuDNN attention since TE 1.8 does not support head dim > 128 - os.environ['NVTE_FUSED_ATTN'] = "0" - recipe = default_finetune_recipe( model(), "google/gemma-7b", dir, name, num_nodes, num_gpus_per_node, packed_sequence ) From 3483208ae062139490a6713253a3b9f5d80d7e8d Mon Sep 17 00:00:00 2001 From: Huy Vu <86480512+huvunvidia@users.noreply.github.com> Date: Sat, 14 Dec 2024 12:22:27 -0800 Subject: [PATCH 044/128] Update T5 DataModule regarding Pretrain/Finetune validate (#11584) * update datamodule to have mbs/gbs * update datamodule to have mbs/gbs * Apply isort and black reformatting Signed-off-by: huvunvidia --------- Signed-off-by: huvunvidia Co-authored-by: Huy Vu2 Co-authored-by: huvunvidia --- nemo/collections/llm/t5/data/mock.py | 2 ++ nemo/collections/llm/t5/data/pre_training.py | 2 ++ tests/collections/llm/megatron_t5_pretraining.py | 5 ++--- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/nemo/collections/llm/t5/data/mock.py b/nemo/collections/llm/t5/data/mock.py index 31198a4446e9..7ff78bcd9d4b 100644 --- a/nemo/collections/llm/t5/data/mock.py +++ b/nemo/collections/llm/t5/data/mock.py @@ -50,6 +50,8 @@ def __init__( super().__init__() self.seq_length = seq_length self.seq_length_dec = seq_length_dec + self.micro_batch_size = micro_batch_size + self.global_batch_size = global_batch_size self.num_train_samples = num_train_samples self.num_val_samples = num_val_samples self.num_test_samples = num_test_samples diff --git a/nemo/collections/llm/t5/data/pre_training.py b/nemo/collections/llm/t5/data/pre_training.py index 4bd6e5ed5e93..15ae0f7b5c25 100644 --- a/nemo/collections/llm/t5/data/pre_training.py +++ b/nemo/collections/llm/t5/data/pre_training.py @@ -125,6 +125,8 @@ def __init__( self.build_kwargs = build_kwargs self.seq_length = seq_length self.seq_length_dec = seq_length_dec + self.micro_batch_size = micro_batch_size + self.global_batch_size = global_batch_size self.tokenizer = tokenizer self.num_workers = num_workers self.pin_memory = pin_memory diff --git a/tests/collections/llm/megatron_t5_pretraining.py b/tests/collections/llm/megatron_t5_pretraining.py index ad63ae88fb73..960c074eddba 100644 --- a/tests/collections/llm/megatron_t5_pretraining.py +++ b/tests/collections/llm/megatron_t5_pretraining.py @@ -23,7 +23,7 @@ from nemo import lightning as nl from nemo.collections import llm -from nemo.collections.llm.api import train +from nemo.collections.llm.api import pretrain from nemo.collections.llm.t5.data import PreTrainingDataModule from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer from nemo.lightning import NeMoLogger @@ -148,12 +148,11 @@ def get_args(): wandb=wandb_logger, ) - train( + pretrain( model=model, resume=resume, data=data, trainer=trainer, log=nemo_logger, - tokenizer='data', optim=opt, ) From 6f084ddbbbe6c01b7c9f1ec490cf5cbda2f3f51d Mon Sep 17 00:00:00 2001 From: Ao Tang Date: Sun, 15 Dec 2024 17:25:45 -0500 Subject: [PATCH 045/128] fix llama3 (#11580) --- scripts/checkpoint_converters/convert_llama_hf_to_nemo.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/scripts/checkpoint_converters/convert_llama_hf_to_nemo.py b/scripts/checkpoint_converters/convert_llama_hf_to_nemo.py index b963578ed874..106328a4fd0f 100644 --- a/scripts/checkpoint_converters/convert_llama_hf_to_nemo.py +++ b/scripts/checkpoint_converters/convert_llama_hf_to_nemo.py @@ -108,8 +108,12 @@ def load_config(args, llama_config): rope_type = llama_config['rope_scaling'].get('rope_type') if rope_type is None: rope_type = llama_config['rope_scaling'].get('type') + if rope_type in ('linear',): nemo_config['seq_len_interpolation_factor'] = llama_config['rope_scaling']['factor'] + elif rope_type == 'llama3': + # Llama3 in HF actually means rope scaling for llama 3.1+, which uses custom scaling + nemo_config['seq_len_interpolation_factor'] = None else: raise ValueError("Only linear rope scaling type is supported now") if llama_config['rope_theta'] is not None: From a3eb2804dd24febcfc8fe7f2659540022a48292e Mon Sep 17 00:00:00 2001 From: Huiying Date: Mon, 16 Dec 2024 00:00:10 -0800 Subject: [PATCH 046/128] Add Hf nemorun tests (#11566) * minor fixes for recipe Signed-off-by: HuiyingLi * add peft nemorun script Signed-off-by: HuiyingLi * add sft script and data module Signed-off-by: HuiyingLi * Apply isort and black reformatting Signed-off-by: HuiyingLi * clean up Signed-off-by: HuiyingLi * add disable ckpt and data config for tests Signed-off-by: HuiyingLi * Apply isort and black reformatting Signed-off-by: HuiyingLi * add tests to cicd yaml Signed-off-by: HuiyingLi * cleanup Signed-off-by: HuiyingLi --------- Signed-off-by: HuiyingLi Signed-off-by: HuiyingLi Co-authored-by: HuiyingLi --- .github/workflows/cicd-main.yml | 48 +++++++++++++ nemo/collections/llm/gpt/data/hf_dataset.py | 38 ++++++++++ .../recipes/hf_auto_model_for_causal_lm.py | 25 ++++--- tests/collections/llm/hf/peft_nemorun.py | 71 +++++++++++++++++++ tests/collections/llm/hf/sft_nemorun.py | 70 ++++++++++++++++++ 5 files changed, 243 insertions(+), 9 deletions(-) create mode 100644 tests/collections/llm/hf/peft_nemorun.py create mode 100644 tests/collections/llm/hf/sft_nemorun.py diff --git a/.github/workflows/cicd-main.yml b/.github/workflows/cicd-main.yml index 3008ca6d2435..1ef4e5e7c034 100644 --- a/.github/workflows/cicd-main.yml +++ b/.github/workflows/cicd-main.yml @@ -3585,6 +3585,17 @@ jobs: AFTER_SCRIPT: | rm -rf nemo_experiments + L2_HF_Transformer_PEFT_nemorun: + needs: [ cicd-test-container-setup ] + uses: ./.github/workflows/_test_template.yml + if: contains(fromJSON(needs.cicd-test-container-setup.outputs.test_to_run), 'L2_HF_Transformer_PEFT_nemorun') || needs.cicd-test-container-setup.outputs.all == 'true' + with: + RUNNER: self-hosted-azure-gpus-1 + SCRIPT: | + TRANSFORMERS_OFFLINE=1 python tests/collections/llm/hf/peft_nemorun.py --model /home/TestData/nlp/hf_gemma/hf_gemma_2b --max-steps 10 --disable-ckpt + AFTER_SCRIPT: | + rm -rf nemo_experiments + L2_HF_Transformer_PEFT_2gpu: needs: [ cicd-test-container-setup ] uses: ./.github/workflows/_test_template.yml @@ -3596,6 +3607,17 @@ jobs: AFTER_SCRIPT: | rm -rf nemo_experiments + L2_HF_Transformer_PEFT_2gpu_nemorun: + needs: [ cicd-test-container-setup ] + uses: ./.github/workflows/_test_template.yml + if: contains(fromJSON(needs.cicd-test-container-setup.outputs.test_to_run), 'L2_HF_Transformer_PEFT_2gpu_nemorun') || needs.cicd-test-container-setup.outputs.all == 'true' + with: + RUNNER: self-hosted-azure + SCRIPT: | + TRANSFORMERS_OFFLINE=1 python tests/collections/llm/hf/peft_nemorun.py --model /home/TestData/nlp/hf_gemma/hf_gemma_2b --max-steps 10 --devices 2 --strategy ddp --disable-ckpt + AFTER_SCRIPT: | + rm -rf nemo_experiments + L2_HF_Transformer_SFT_2gpu: needs: [ cicd-test-container-setup ] uses: ./.github/workflows/_test_template.yml @@ -3607,6 +3629,17 @@ jobs: AFTER_SCRIPT: | rm -rf nemo_experiments + L2_HF_Transformer_SFT_2gpu_nemorun: + needs: [ cicd-test-container-setup ] + uses: ./.github/workflows/_test_template.yml + if: contains(fromJSON(needs.cicd-test-container-setup.outputs.test_to_run), 'L2_HF_Transformer_SFT_2gpu_nemorun') || needs.cicd-test-container-setup.outputs.all == 'true' + with: + RUNNER: self-hosted-azure + SCRIPT: | + TRANSFORMERS_OFFLINE=1 python tests/collections/llm/hf/sft_nemorun.py --model /home/TestData/nlp/hf_gemma/hf_gemma_2b --max-steps 10 --devices 2 --strategy ddp + AFTER_SCRIPT: | + rm -rf nemo_experiments + L2_HF_Transformer_SFT: needs: [ cicd-test-container-setup ] uses: ./.github/workflows/_test_template.yml @@ -3618,6 +3651,17 @@ jobs: AFTER_SCRIPT: | rm -rf nemo_experiments + L2_HF_Transformer_SFT_nemorun: + needs: [ cicd-test-container-setup ] + uses: ./.github/workflows/_test_template.yml + if: contains(fromJSON(needs.cicd-test-container-setup.outputs.test_to_run), 'L2_HF_Transformer_SFT_nemorun') || needs.cicd-test-container-setup.outputs.all == 'true' + with: + RUNNER: self-hosted-azure-gpus-1 + SCRIPT: | + TRANSFORMERS_OFFLINE=1 python tests/collections/llm/hf/sft_nemorun.py --model /home/TestData/nlp/hf_gemma/hf_gemma_2b --max-steps 10 + AFTER_SCRIPT: | + rm -rf nemo_experiments + L2_HF_Transformer_SFT_TE_Acceleration: needs: [ cicd-test-container-setup ] uses: ./.github/workflows/_test_template.yml @@ -4754,9 +4798,13 @@ jobs: - L2_NeMo_2_llama3_fault_tolerance_plugin - L2_NeMo_2_llama3_straggler_detection - L2_HF_Transformer_PEFT + - L2_HF_Transformer_PEFT_nemorun - L2_HF_Transformer_PEFT_2gpu + - L2_HF_Transformer_PEFT_2gpu_nemorun - L2_HF_Transformer_SFT + - L2_HF_Transformer_SFT_nemorun - L2_HF_Transformer_SFT_2gpu + - L2_HF_Transformer_SFT_2gpu_nemorun - L2_HF_Transformer_SFT_TE_Acceleration - L2_NeMo_2_SSM_Pretraining - L2_NeMo_2_SSM_Finetuning diff --git a/nemo/collections/llm/gpt/data/hf_dataset.py b/nemo/collections/llm/gpt/data/hf_dataset.py index 7880e26cf6b1..ac03267e9527 100644 --- a/nemo/collections/llm/gpt/data/hf_dataset.py +++ b/nemo/collections/llm/gpt/data/hf_dataset.py @@ -263,3 +263,41 @@ def map(self, function=None, split_names=None, **kwargs): if subset is None: continue dataset_splits[split_name] = subset.map(function, **kwargs) + + +class SquadHFDataModule(HFDatasetDataModule): + def __init__(self, tokenizer, **kwargs): + super().__init__(**kwargs) + self.tokenizer = getattr(tokenizer, 'tokenizer', tokenizer) + + def formatting_prompts_func(self, examples): + EOS_TOKEN = self.tokenizer.eos_token # Must add EOS_TOKEN + alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. + + ### Instruction: + {} + + ### Input: + {} + + ### Response: + {}""" + instruction = examples["context"] + input = examples["question"] + output = examples["answers"]['text'] + if isinstance(output, list): + output = output[0] + text = alpaca_prompt.format(instruction, input, output) + EOS_TOKEN + ans = self.tokenizer(text) + ans['labels'] = ans['input_ids'] + return ans + + def setup(self, stage): + super().setup(stage) + self.tokenizer = getattr(self.tokenizer, 'tokenizer', self.tokenizer) + self.map( + self.formatting_prompts_func, + batched=False, + batch_size=2, + remove_columns=["id", "title", "context", "question", 'answers'], + ) diff --git a/nemo/collections/llm/recipes/hf_auto_model_for_causal_lm.py b/nemo/collections/llm/recipes/hf_auto_model_for_causal_lm.py index 5d2bea23686c..6eeaedbddca2 100644 --- a/nemo/collections/llm/recipes/hf_auto_model_for_causal_lm.py +++ b/nemo/collections/llm/recipes/hf_auto_model_for_causal_lm.py @@ -21,7 +21,10 @@ from lightning.pytorch.callbacks.callback import Callback from nemo import lightning as nl +from nemo.collections import llm +from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer from nemo.collections.llm.api import finetune, pretrain +from nemo.collections.llm.gpt.data.hf_dataset import SquadHFDataModule from nemo.collections.llm.gpt.data.mock import MockDataModule from nemo.collections.llm.gpt.model.hf_auto_model_for_causal_lm import HFAutoModelForCausalLM from nemo.collections.llm.peft.lora import LoRA @@ -55,12 +58,6 @@ def model(model_name, load_pretrained_weights) -> run.Config[pl.LightningModule] def trainer( - tensor_parallelism: int = 1, - pipeline_parallelism: int = 1, - pipeline_parallelism_type: Optional[torch.dtype] = None, - virtual_pipeline_parallelism: Optional[int] = None, - context_parallelism: int = 2, - sequence_parallelism: bool = False, num_nodes: int = 1, num_gpus_per_node: int = 8, max_steps: int = 100, @@ -105,6 +102,7 @@ def trainer( trainer = run.Config( nl.Trainer, + num_nodes=num_nodes, devices=num_gpus_per_node, max_steps=max_steps, accelerator='gpu', @@ -177,6 +175,7 @@ def finetune_recipe( num_gpus_per_node: int = 8, peft_scheme: Optional[str] = 'lora', model_name: str = '', + max_steps: int = 100, ) -> run.Partial: """ Create a fine-tuning recipe for a HFAutoModelForCausalLM model. @@ -208,24 +207,32 @@ def finetune_recipe( on fine-tuning LLMs with NeMo, see the fine-tuning guide in the `examples/llm/finetune/` directory. """ + tokenizer = llm.HFAutoModelForCausalLM.configure_tokenizer(model_name) recipe = run.Partial( finetune, model=model(model_name, load_pretrained_weights=True), trainer=trainer( num_nodes=num_nodes, num_gpus_per_node=num_gpus_per_node, + max_steps=max_steps, callbacks=[run.Config(TimingCallback)], ), - data=run.Config(MockDataModule, seq_length=4096, global_batch_size=512, micro_batch_size=1), + data=run.Config( + SquadHFDataModule, + path_or_dataset="rajpurkar/squad", + split="train", + pad_token_id=tokenizer.tokenizer.eos_token_id, + tokenizer=run.Config(AutoTokenizer, pretrained_model_name=model_name), + ), log=default_log(dir=dir, name=name, tensorboard_logger=tensorboard_logger(name=name)), optim=pytorch_adam_with_cosine_annealing(max_lr=3e-4), resume=default_resume(), ) if peft_scheme is None or peft_scheme.lower() == 'none': - recipe.optim.config.lr = 5e-6 + recipe.optim.optimizer_fn.lr = 5e-6 elif peft_scheme.lower() == 'lora': recipe.peft = run.Config(LoRA, target_modules=['*_proj']) - recipe.optim.config.lr = 1e-4 + recipe.optim.optimizer_fn.lr = 1e-4 else: raise ValueError(f"Unrecognized peft scheme: {peft_scheme}") return recipe diff --git a/tests/collections/llm/hf/peft_nemorun.py b/tests/collections/llm/hf/peft_nemorun.py new file mode 100644 index 000000000000..ef34d4d39a11 --- /dev/null +++ b/tests/collections/llm/hf/peft_nemorun.py @@ -0,0 +1,71 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import nemo_run as run + +from nemo.collections import llm +from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer +from nemo.collections.llm.gpt.data.hf_dataset import SquadHFDataModule + +DATA_PATH = '/home/TestData/lite/hf_cache/squad/' + + +def local_executor_torchrun(nodes: int = 1, devices: int = 2) -> run.LocalExecutor: + # Env vars for jobs are configured here + env_vars = { + "TORCH_NCCL_AVOID_RECORD_STREAMS": "1", + "NCCL_NVLS_ENABLE": "0", + "NVTE_DP_AMAX_REDUCE_INTERVAL": "0", + "NVTE_ASYNC_AMAX_REDUCTION": "1", + "NVTE_FUSED_ATTN": "0", + } + + executor = run.LocalExecutor(ntasks_per_node=devices, launcher="torchrun", env_vars=env_vars) + + return executor + + +if __name__ == '__main__': + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument('--model', default='meta-llama/Llama-3.2-1B') + parser.add_argument('--strategy', type=str, default='auto', choices=['auto', 'ddp', 'fsdp']) + parser.add_argument('--devices', default=1) + parser.add_argument('--accelerator', default='gpu', choices=['gpu']) + parser.add_argument('--max-steps', type=int, default=100) + parser.add_argument('--disable-ckpt', action='store_false') + args = parser.parse_args() + + recipe = llm.hf_auto_model_for_causal_lm.finetune_recipe( + model_name=args.model, + name="peft", + num_nodes=1, + num_gpus_per_node=args.devices, + peft_scheme='lora', + max_steps=args.max_steps, + ) + recipe.trainer.val_check_interval = 50 + tokenizer = llm.HFAutoModelForCausalLM.configure_tokenizer(args.model) + recipe.data = run.Config( + SquadHFDataModule, + path_or_dataset=DATA_PATH, + split="train[:100]", + pad_token_id=tokenizer.tokenizer.eos_token_id, + tokenizer=run.Config(AutoTokenizer, pretrained_model_name=args.model), + ) + recipe.log = None + recipe.trainer.enable_checkpointing = args.disable_ckpt + executor = local_executor_torchrun(nodes=recipe.trainer.num_nodes, devices=recipe.trainer.devices) + run.run(recipe, executor=executor) diff --git a/tests/collections/llm/hf/sft_nemorun.py b/tests/collections/llm/hf/sft_nemorun.py new file mode 100644 index 000000000000..a3daa66ca774 --- /dev/null +++ b/tests/collections/llm/hf/sft_nemorun.py @@ -0,0 +1,70 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import nemo_run as run + +from nemo.collections import llm +from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer +from nemo.collections.llm.gpt.data.hf_dataset import SquadHFDataModule + + +DATA_PATH = '/home/TestData/lite/hf_cache/squad/' + + +def local_executor_torchrun(nodes: int = 1, devices: int = 2) -> run.LocalExecutor: + # Env vars for jobs are configured here + env_vars = { + "TORCH_NCCL_AVOID_RECORD_STREAMS": "1", + "NCCL_NVLS_ENABLE": "0", + "NVTE_DP_AMAX_REDUCE_INTERVAL": "0", + "NVTE_ASYNC_AMAX_REDUCTION": "1", + "NVTE_FUSED_ATTN": "0", + } + + executor = run.LocalExecutor(ntasks_per_node=devices, launcher="torchrun", env_vars=env_vars) + + return executor + + +if __name__ == '__main__': + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument('--model', default='meta-llama/Llama-3.2-1B') + parser.add_argument('--strategy', type=str, default='auto', choices=['auto', 'ddp', 'fsdp']) + parser.add_argument('--devices', default=1) + parser.add_argument('--accelerator', default='gpu', choices=['gpu']) + parser.add_argument('--max-steps', type=int, default=100) + args = parser.parse_args() + + recipe = llm.hf_auto_model_for_causal_lm.finetune_recipe( + model_name=args.model, + name="sft", + num_nodes=1, + num_gpus_per_node=args.devices, + peft_scheme='none', + max_steps=args.max_steps, + ) + recipe.trainer.val_check_interval = 50 + + tokenizer = llm.HFAutoModelForCausalLM.configure_tokenizer(args.model) + recipe.data = run.Config( + SquadHFDataModule, + path_or_dataset=DATA_PATH, + split="train[:100]", + pad_token_id=tokenizer.tokenizer.eos_token_id, + tokenizer=run.Config(AutoTokenizer, pretrained_model_name=args.model), + ) + executor = local_executor_torchrun(nodes=recipe.trainer.num_nodes, devices=recipe.trainer.devices) + run.run(recipe, executor=executor) From b98160cdf9ecea69a4b59e72076ab1eb2ba763df Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 16 Dec 2024 12:06:54 +0100 Subject: [PATCH 047/128] =?UTF-8?q?[=F0=9F=A4=96]:=20Howdy=20folks,=20let'?= =?UTF-8?q?s=20bump=20NeMo-Toolkit=20to=20`2.2.0rc0`=20!=20(#11555)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- nemo/package_info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nemo/package_info.py b/nemo/package_info.py index a60316270d57..1d69deff96d3 100644 --- a/nemo/package_info.py +++ b/nemo/package_info.py @@ -14,7 +14,7 @@ MAJOR = 2 -MINOR = 1 +MINOR = 2 PATCH = 0 PRE_RELEASE = 'rc0' From 711176a98aca521f6500118d1e850e8561d2c86e Mon Sep 17 00:00:00 2001 From: Jan Lasek Date: Mon, 16 Dec 2024 13:47:04 +0100 Subject: [PATCH 048/128] Pass the number of experts to modelopt layer spec (#11607) * Pass number of experts to modelopt layer spec Signed-off-by: Jan Lasek * Fix too long lines Signed-off-by: Jan Lasek --------- Signed-off-by: Jan Lasek --- nemo/collections/llm/quantization/utils.py | 6 +++--- .../megatron/gpt_layer_modelopt_spec.py | 12 ++++++++++-- nemo/export/quantize/quantizer.py | 7 ++----- 3 files changed, 15 insertions(+), 10 deletions(-) diff --git a/nemo/collections/llm/quantization/utils.py b/nemo/collections/llm/quantization/utils.py index 57022d9d3e98..aa7fa61f1b38 100644 --- a/nemo/collections/llm/quantization/utils.py +++ b/nemo/collections/llm/quantization/utils.py @@ -55,12 +55,12 @@ def quantizable_model_config(model_cfg: llm.GPTConfig) -> llm.GPTConfig: get_gpt_layer_modelopt_spec, ) - model_cfg.transformer_layer_spec = get_gpt_layer_modelopt_spec() + model_cfg.transformer_layer_spec = get_gpt_layer_modelopt_spec(num_experts=model_cfg.num_moe_experts) if model_cfg.sequence_parallel: logging.warning("Disabling sequence parallelism for quantization...") model_cfg.sequence_parallel = False - # Only custom ModelOpt spec is supported for Quantization: this custom spec is largely based on local Megatron-LM - # layer definitions to avoid Transformer Engine implementations that are currently not supported. + # Only custom ModelOpt spec is supported for quantization: this custom spec is largely based on local + # Megatron-LM layer definitions to avoid Transformer Engine implementations that are currently not supported. # This layer spec also requires RoPE fusion to be disabled for tensor view operations in attention # layer implementation from megatron/core/transformer/dot_product_attention.py to be functional. model_cfg.name = "modelopt" diff --git a/nemo/collections/nlp/models/language_modeling/megatron/gpt_layer_modelopt_spec.py b/nemo/collections/nlp/models/language_modeling/megatron/gpt_layer_modelopt_spec.py index 046e032093b1..514ef62a9ff3 100644 --- a/nemo/collections/nlp/models/language_modeling/megatron/gpt_layer_modelopt_spec.py +++ b/nemo/collections/nlp/models/language_modeling/megatron/gpt_layer_modelopt_spec.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Optional + try: from megatron.core.extensions.transformer_engine import TEDotProductAttention, TENorm from megatron.core.fusions.fused_bias_dropout import get_bias_dropout_add @@ -37,7 +39,7 @@ # Use this spec for Model Optimizer PTQ and TensorRT-LLM export -def get_gpt_layer_modelopt_spec(num_experts: int = None) -> ModuleSpec: +def get_gpt_layer_modelopt_spec(num_experts: Optional[int] = None) -> ModuleSpec: """Mix the native spec with TENorm and TEDotProductAttention. This is essentially the native local spec except for the layernorm implementation @@ -45,6 +47,12 @@ def get_gpt_layer_modelopt_spec(num_experts: int = None) -> ModuleSpec: prevents the apex dependency. TEDotProductAttention is used to support sliding window attention. + + Args: + num_experts (int): Number of experts. Defaults to None. + + Returns: + ModuleSpec: Module specification with Megatron-Core modules. """ if not HAVE_MEGATRON_CORE: raise IMPORT_ERROR @@ -79,7 +87,7 @@ def get_gpt_layer_modelopt_spec(num_experts: int = None) -> ModuleSpec: # Helper function to get module spec for MLP/MoE -def _get_mlp_module_spec(num_experts: int = None) -> ModuleSpec: +def _get_mlp_module_spec(num_experts: Optional[int] = None) -> ModuleSpec: if num_experts is None: # Dense MLP w/ or w/o TE modules. return ModuleSpec( diff --git a/nemo/export/quantize/quantizer.py b/nemo/export/quantize/quantizer.py index cbf3ea39921e..6f7027f12be8 100644 --- a/nemo/export/quantize/quantizer.py +++ b/nemo/export/quantize/quantizer.py @@ -164,10 +164,6 @@ def modify_model_config(model_cfg: DictConfig) -> DictConfig: if model_cfg.get("sequence_parallel", False): logging.warning("Disabling sequence parallelism for quantization...") model_cfg.sequence_parallel = False - # Only custom ModelOpt spec is supported for Quantization: this custom spec is largely based on local Megatron-LM - # layer definitions to avoid Transformer Engine implementations that are currently not supported. - # This layer spec also requires RoPE fusion to be disabled for tensor view operations in attention - # layer implementation from megatron/core/transformer/dot_product_attention.py to be functional. model_cfg.name = "modelopt" model_cfg.apply_rope_fusion = False @@ -248,7 +244,8 @@ def export(self, model: MegatronGPTModel): ) dist.barrier() # Wait until all ranks complete export_model_config step logging.info( - f"Exporting quantized weights, model artifacts, and tokenizer config to {self.export_config.save_path}..." + "Exporting quantized weights, model artifacts," + f" and tokenizer config to {self.export_config.save_path}..." ) if dist.get_rank() == 0: save_artifacts(model, export_dir) From 7210212ecfdf61e33888049f5216b7f1f8914fc9 Mon Sep 17 00:00:00 2001 From: Ssofja <78349198+Ssofja@users.noreply.github.com> Date: Mon, 16 Dec 2024 19:25:53 +0400 Subject: [PATCH 049/128] Adding changes to asr documentation (#11397) Signed-off-by: Ssofja --- docs/source/asr/all_chkpt.rst | 236 +++++++++++++++ docs/source/asr/data/benchmark_by.csv | 6 +- docs/source/asr/data/benchmark_ca.csv | 7 +- docs/source/asr/data/benchmark_canary.csv | 2 + docs/source/asr/data/benchmark_cn.csv | 3 + .../asr/data/benchmark_code_switching.csv | 6 +- docs/source/asr/data/benchmark_cs.csv | 4 + docs/source/asr/data/benchmark_de.csv | 16 +- docs/source/asr/data/benchmark_en.csv | 78 +++-- docs/source/asr/data/benchmark_eo.csv | 3 + docs/source/asr/data/benchmark_es.csv | 19 +- docs/source/asr/data/benchmark_fa.csv | 2 + .../data/benchmark_fastconformer_hybrid.csv | 16 + docs/source/asr/data/benchmark_fr.csv | 20 +- docs/source/asr/data/benchmark_hi.csv | 4 +- docs/source/asr/data/benchmark_hr.csv | 9 +- docs/source/asr/data/benchmark_it.csv | 9 +- docs/source/asr/data/benchmark_jp.csv | 2 + docs/source/asr/data/benchmark_ka.csv | 3 + docs/source/asr/data/benchmark_kab.csv | 4 +- docs/source/asr/data/benchmark_kz.csv | 2 + docs/source/asr/data/benchmark_mr.csv | 5 +- docs/source/asr/data/benchmark_nl.csv | 2 + docs/source/asr/data/benchmark_parakeet.csv | 7 + docs/source/asr/data/benchmark_pl.csv | 7 +- docs/source/asr/data/benchmark_ru.csv | 11 +- docs/source/asr/data/benchmark_rw.csv | 6 +- docs/source/asr/data/benchmark_ua.csv | 6 +- docs/source/asr/data/benchmark_uz.csv | 2 + docs/source/asr/data/benchmark_zh.csv | 8 +- docs/source/asr/results.rst | 280 +++++------------- 31 files changed, 474 insertions(+), 311 deletions(-) create mode 100644 docs/source/asr/all_chkpt.rst create mode 100644 docs/source/asr/data/benchmark_canary.csv create mode 100644 docs/source/asr/data/benchmark_cn.csv create mode 100644 docs/source/asr/data/benchmark_cs.csv create mode 100644 docs/source/asr/data/benchmark_eo.csv create mode 100644 docs/source/asr/data/benchmark_fa.csv create mode 100644 docs/source/asr/data/benchmark_fastconformer_hybrid.csv create mode 100644 docs/source/asr/data/benchmark_jp.csv create mode 100644 docs/source/asr/data/benchmark_ka.csv create mode 100644 docs/source/asr/data/benchmark_kz.csv create mode 100644 docs/source/asr/data/benchmark_nl.csv create mode 100644 docs/source/asr/data/benchmark_parakeet.csv create mode 100644 docs/source/asr/data/benchmark_uz.csv diff --git a/docs/source/asr/all_chkpt.rst b/docs/source/asr/all_chkpt.rst new file mode 100644 index 000000000000..55f918a40d92 --- /dev/null +++ b/docs/source/asr/all_chkpt.rst @@ -0,0 +1,236 @@ +All Checkpoints +=============== +English +^^^^^^^ +.. csv-table:: + :file: data/benchmark_en.csv + :align: left + :widths: 50,50 + :header-rows: 1 + +----------------------------- + +German +^^^^^^ +.. csv-table:: + :file: data/benchmark_de.csv + :align: left + :widths: 50,50 + :header-rows: 1 + +----------------------------- + +Spanish +^^^^^^^ +.. csv-table:: + :file: data/benchmark_es.csv + :align: left + :widths: 50,50 + :header-rows: 1 + +----------------------------- + +French +^^^^^^ +.. csv-table:: + :file: data/benchmark_fr.csv + :align: left + :widths: 50,50 + :header-rows: 1 + +----------------------------- + +Russian +^^^^^^^ +.. csv-table:: + :file: data/benchmark_ru.csv + :align: left + :widths: 50,50 + :header-rows: 1 + +----------------------------- + +Japanese +^^^^^^^^ +.. csv-table:: + :file: data/benchmark_jp.csv + :align: left + :widths: 50,50 + :header-rows: 1 + +----------------------------- + +Chinese +^^^^^^^ +.. csv-table:: + :file: data/benchmark_cn.csv + :align: left + :widths: 50,50 + :header-rows: 1 + +----------------------------- + +Georgian +^^^^^^^^ +.. csv-table:: + :file: data/benchmark_ka.csv + :align: left + :widths: 50,50 + :header-rows: 1 + +----------------------------- + +Kazakh +^^^^^^ +.. csv-table:: + :file: data/benchmark_kz.csv + :align: left + :widths: 50,50 + :header-rows: 1 + +----------------------------- + +Persian +^^^^^^^ +.. csv-table:: + :file: data/benchmark_fa.csv + :align: left + :widths: 50,50 + :header-rows: 1 + +----------------------------- + +Uzbek +^^^^^ +.. csv-table:: + :file: data/benchmark_uz.csv + :align: left + :widths: 50,50 + :header-rows: 1 + +----------------------------- + +Ukrainian +^^^^^^^^^ +.. csv-table:: + :file: data/benchmark_ua.csv + :align: left + :widths: 50,50 + :header-rows: 1 + +----------------------------- + +Polish +^^^^^^ +.. csv-table:: + :file: data/benchmark_pl.csv + :align: left + :widths: 50,50 + :header-rows: 1 + +----------------------------- + +Italian +^^^^^^^ +.. csv-table:: + :file: data/benchmark_it.csv + :align: left + :widths: 50,50 + :header-rows: 1 + +----------------------------- + +Belarusian +^^^^^^^^^^ +.. csv-table:: + :file: data/benchmark_by.csv + :align: left + :widths: 50,50 + :header-rows: 1 + +----------------------------- + +Croatian +^^^^^^^^ +.. csv-table:: + :file: data/benchmark_hr.csv + :align: left + :widths: 50,50 + :header-rows: 1 + +----------------------------- + +Esperanto +^^^^^^^^^ +.. csv-table:: + :file: data/benchmark_eo.csv + :align: left + :widths: 50,50 + :header-rows: 1 + +----------------------------- + +Kabyle +^^^^^^ +.. csv-table:: + :file: data/benchmark_kab.csv + :align: left + :widths: 50,50 + :header-rows: 1 + +----------------------------- + +Dutch +^^^^^ +.. csv-table:: + :file: data/benchmark_nl.csv + :align: left + :widths: 50,50 + :header-rows: 1 + +----------------------------- + +Catalan +^^^^^^^ +.. csv-table:: + :file: data/benchmark_ca.csv + :align: left + :widths: 50,50 + :header-rows: 1 + +----------------------------- + +Hindi +^^^^^^^ +.. csv-table:: + :file: data/benchmark_hi.csv + :align: left + :widths: 50,50 + :header-rows: 1 + +----------------------------- +Marathi +^^^^^^^ +.. csv-table:: + :file: data/benchmark_mr.csv + :align: left + :widths: 50,50 + :header-rows: 1 + +----------------------------- + +Mandarin +^^^^^^^ +.. csv-table:: + :file: data/benchmark_zh.csv + :align: left + :widths: 50,50 + :header-rows: 1 + +Kinyarwanda +^^^^^^^^^^^ +.. csv-table:: + :file: data/benchmark_rw.csv + :align: left + :widths: 50,50 + :header-rows: 1 diff --git a/docs/source/asr/data/benchmark_by.csv b/docs/source/asr/data/benchmark_by.csv index 750dfd82ff94..31594fc85885 100644 --- a/docs/source/asr/data/benchmark_by.csv +++ b/docs/source/asr/data/benchmark_by.csv @@ -1,2 +1,4 @@ -Model,Model Base Class,Model Card -stt_by_fastconformer_hybrid_large_pc,EncDecHybridRNNTCTCBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_by_fastconformer_hybrid_large_pc" \ No newline at end of file +Model,Model Base Class +`stt_uk_citrinet_1024_gamma_0_25 `_,EncDecCTCModel +`stt_ua_fastconformer_hybrid_large_pc `_,EncDecHybridRNNTCTCBPEModel +`stt_multilingual_fastconformer_hybrid_large_pc `_,EncDecHybridRNNTCTCBPEModel diff --git a/docs/source/asr/data/benchmark_ca.csv b/docs/source/asr/data/benchmark_ca.csv index bd7e174b922f..62d985407272 100644 --- a/docs/source/asr/data/benchmark_ca.csv +++ b/docs/source/asr/data/benchmark_ca.csv @@ -1,4 +1,3 @@ -Model,Model Base Class,Model Card -stt_ca_quartznet15x5,EncDecCTCModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_ca_quartznet15x5" -stt_ca_conformer_ctc_large,EncDecCTCModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_ca_conformer_ctc_large" -stt_ca_conformer_transducer_large,EncDecRNNTBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_ca_conformer_transducer_large" \ No newline at end of file +model_name,base_model,link +stt_ca_conformer_ctc_large,ConformerBaseModel,https://huggingface.co/nvidia/stt_ca_conformer_ctc_large +stt_ca_conformer_transducer_large,ConformerBaseModel,https://huggingface.co/nvidia/stt_ca_conformer_transducer_large diff --git a/docs/source/asr/data/benchmark_canary.csv b/docs/source/asr/data/benchmark_canary.csv new file mode 100644 index 000000000000..271eb5819c01 --- /dev/null +++ b/docs/source/asr/data/benchmark_canary.csv @@ -0,0 +1,2 @@ +Model,Language +`canary-1b `_ ,"English, French, German, Spanish" diff --git a/docs/source/asr/data/benchmark_cn.csv b/docs/source/asr/data/benchmark_cn.csv new file mode 100644 index 000000000000..fdb58f7b88bf --- /dev/null +++ b/docs/source/asr/data/benchmark_cn.csv @@ -0,0 +1,3 @@ +Model,Model Base Class +`stt_zh_citrinet_1024_gamma_0_25 `_,EncDecCTCModel +`stt_zh_conformer_transducer_large `_,EncDecRNNTBPEModel diff --git a/docs/source/asr/data/benchmark_code_switching.csv b/docs/source/asr/data/benchmark_code_switching.csv index 1320f19911e6..62f5c880b2e6 100644 --- a/docs/source/asr/data/benchmark_code_switching.csv +++ b/docs/source/asr/data/benchmark_code_switching.csv @@ -1,3 +1,3 @@ -Model,Model Base Class,Model Card -stt_enes_conformer_ctc_large_codesw,EncDecCTCModelBPE,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_enes_conformer_ctc_large_codesw" -stt_enes_conformer_transducer_large_codesw,EncDecRNNTBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_enes_conformer_transducer_large_codesw" \ No newline at end of file +Model,Language +`stt_enes_conformer_ctc_large_codesw `_,"English, Spanish" +`stt_enes_conformer_transducer_large_codesw `_,"English, Spanish" \ No newline at end of file diff --git a/docs/source/asr/data/benchmark_cs.csv b/docs/source/asr/data/benchmark_cs.csv new file mode 100644 index 000000000000..6b0f4e0d3e25 --- /dev/null +++ b/docs/source/asr/data/benchmark_cs.csv @@ -0,0 +1,4 @@ +Model,Model Base Class +`stt_ca_conformer_ctc_large `_,EncDecCTCModelBPE +`stt_ca_conformer_transducer_large `_,EncDecRNNTBPEModel +`stt_ca_quartznet15x5 `_,EncDecCTCModel diff --git a/docs/source/asr/data/benchmark_de.csv b/docs/source/asr/data/benchmark_de.csv index 6084e95c37c0..08939c20a431 100644 --- a/docs/source/asr/data/benchmark_de.csv +++ b/docs/source/asr/data/benchmark_de.csv @@ -1,7 +1,9 @@ -Model,Model Base Class,Model Card -stt_de_quartznet15x5,EncDecCTCModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_de_quartznet15x5" -stt_de_citrinet_1024,EncDecCTCModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_de_citrinet_1024" -stt_de_contextnet_1024,EncDecRNNTBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_de_contextnet_1024" -stt_de_conformer_ctc_large,EncDecCTCModelBPE,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_de_conformer_ctc_large" -stt_de_conformer_transducer_large,EncDecRNNTBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_de_conformer_transducer_large" -stt_de_fastconformer_hybrid_large_pc,EncDecHybridRNNTCTCBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_de_fastconformer_hybrid_large_pc" +Model,Model Base Class +`canary-1b `_,EncDecMultiTaskModel +`stt_de_fastconformer_hybrid_large_pc `_,EncDecHybridRNNTCTCBPEModel +`stt_de_conformer_ctc_large `_,EncDecCTCModelBPE +`stt_de_conformer_transducer_large `_,EncDecRNNTBPEModel +`stt_de_quartznet15x5 `_,EncDecCTCModel +`stt_de_contextnet_1024 `_,EncDecRNNTBPEModel +`stt_multilingual_fastconformer_hybrid_large_pc `_,EncDecHybridRNNTCTCBPEModel +`stt_multilingual_fastconformer_hybrid_large_pc_blend_eu `_,EncDecHybridRNNTCTCBPEModel diff --git a/docs/source/asr/data/benchmark_en.csv b/docs/source/asr/data/benchmark_en.csv index 1669ecdeefb5..343cdc49d8d2 100644 --- a/docs/source/asr/data/benchmark_en.csv +++ b/docs/source/asr/data/benchmark_en.csv @@ -1,41 +1,37 @@ -Model Name,Model Base Class,Model Card -QuartzNet15x5Base-En,EncDecCTCModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemospeechmodels" -stt_en_jasper10x5dr,EncDecCTCModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_jasper10x5dr" -stt_en_citrinet_256,EncDecCTCModelBPE,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_citrinet_256" -stt_en_citrinet_512,EncDecCTCModelBPE,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_citrinet_512" -stt_en_citrinet_1024,EncDecCTCModelBPE,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_citrinet_1024" -stt_en_citrinet_256_gamma_0_25,EncDecCTCModelBPE,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_citrinet_256_gamma_0_25" -stt_en_citrinet_512_gamma_0_25,EncDecCTCModelBPE,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_citrinet_512_gamma_0_25" -stt_en_citrinet_1024_gamma_0_25,EncDecCTCModelBPE,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_citrinet_1024_gamma_0_25" -stt_en_contextnet_256_mls,EncDecRNNTBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_contextnet_256_mls" -stt_en_contextnet_512_mls,EncDecRNNTBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_contextnet_512_mls" -stt_en_contextnet_1024_mls,EncDecRNNTBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_contextnet_1024_mls" -stt_en_contextnet_256,EncDecRNNTBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_contextnet_256" -stt_en_contextnet_512,EncDecRNNTBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_contextnet_512" -stt_en_contextnet_1024,EncDecRNNTBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_contextnet_1024" -stt_en_conformer_ctc_small,EncDecCTCModelBPE,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_conformer_ctc_small" -stt_en_conformer_ctc_medium,EncDecCTCModelBPE,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_conformer_ctc_medium" -stt_en_conformer_ctc_large,EncDecCTCModelBPE,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_conformer_ctc_large" -stt_en_conformer_ctc_xlarge,EncDecCTCModelBPE,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_conformer_ctc_xlarge" -stt_en_conformer_ctc_small_ls,EncDecCTCModelBPE,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_conformer_ctc_small_ls" -stt_en_conformer_ctc_medium_ls,EncDecCTCModelBPE,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_conformer_ctc_medium_ls" -stt_en_conformer_ctc_large_ls,EncDecCTCModelBPE,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_conformer_ctc_large_ls" -stt_en_conformer_transducer_large_ls,EncDecRNNTBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_conformer_transducer_large_ls" -stt_en_conformer_transducer_small,EncDecRNNTBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_conformer_transducer_small" -stt_en_conformer_transducer_medium,EncDecRNNTBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_conformer_transducer_medium" -stt_en_conformer_transducer_large,EncDecRNNTBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_conformer_transducer_large" -stt_en_conformer_transducer_xlarge,EncDecRNNTBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_conformer_transducer_xlarge" -stt_en_conformer_transducer_xxlarge,EncDecRNNTBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_conformer_transducer_xxlarge" -stt_en_fastconformer_ctc_large_ls,EncDecCTCModelBPE,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_fastconformer_ctc_large_ls" -stt_en_fastconformer_transducer_large_ls,EncDecRNNTBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_fastconformer_transducer_large_ls" -stt_en_fastconformer_transducer_large,EncDecRNNTBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_fastconformer_transducer_large" -stt_en_fastconformer_ctc_large,EncDecCTCModelBPE,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_fastconformer_ctc_large" -stt_en_fastconformer_hybrid_large_pc,EncDecHybridRNNTCTCBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_fastconformer_hybrid_large_pc" -stt_en_fastconformer_transducer_xlarge,EncDecRNNTBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_fastconformer_transducer_xlarge" -stt_en_fastconformer_ctc_xlarge,EncDecCTCModelBPE,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_fastconformer_ctc_xlarge" -stt_en_fastconformer_transducer_xxlarge,EncDecRNNTBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_fastconformer_transducer_xxlarge" -stt_en_fastconformer_ctc_xxlarge,EncDecCTCModelBPE,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_fastconformer_ctc_xxlarge" -stt_en_fastconformer_hybrid_large_streaming_80ms,EncDecHybridRNNTCTCBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_fastconformer_hybrid_large_streaming_80ms" -stt_en_fastconformer_hybrid_large_streaming_480ms,EncDecHybridRNNTCTCBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_fastconformer_hybrid_large_streaming_480ms" -stt_en_fastconformer_hybrid_large_streaming_1040ms,EncDecHybridRNNTCTCBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_fastconformer_hybrid_large_streaming_1040ms" -stt_en_fastconformer_hybrid_large_streaming_multi,EncDecHybridRNNTCTCBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_fastconformer_hybrid_large_streaming_multi" +Model,Model Base Class +`parakeet-tdt-1.1b `_, EncDecRNNTBPEModel +`parakeet-tdt_ctc-1.1b `_, ASRModel +`parakeet-tdt_ctc-110m `_, ASRModel +`canary-1b `_, EncDecMultiTaskModel +`stt_en_conformer_ctc_large `_, EncDecCTCModelBPE +`parakeet-ctc-0.6b `_, EncDecCTCModelBPE +`parakeet-ctc-1.1b `_, EncDecCTCModelBPE +`stt_en_conformer_transducer_xlarge `_, EncDecRNNTBPEModel +`stt_en_fastconformer_ctc_large `_, EncDecCTCModelBPE +`stt_en_citrinet_256_ls `_, EncDecCTCModelBPE +`stt_en_fastconformer_hybrid_large_streaming_multi `_, EncDecHybridRNNTCTCBPEModel +`stt_en_fastconformer_ctc_xxlarge `_, EncDecCTCTBPEModel +`stt_en_conformer_transducer_large `_, EncDecRNNTBPEModel +`stt_en_fastconformer_hybrid_large_pc `_, EncDecHybridRNNTCTCBPEModel +`stt_en_citrinet_512_ls `_, EncDecCTCModelBPE +`stt_en_conformer_ctc_small `_, EncDecCTCModelBPE +`stt_en_citrinet_1024_gamma_0_25 `_, EncDecCTCModelBPE +`stt_en_fastconformer_transducer_large `_, EncDecRNNTBPEModel +`stt_en_fastconformer_transducer_xlarge `_, EncDecRNNTBPEModel +`stt_en_fastconformer_transducer_xxlarge `_, EncDecRNNTBPEModel +`stt_en_citrinet_768_ls `_, EncDecCTCModelBPE +`stt_en_fastconformer_ctc_xlarge `_, EncDecCTCTBPEModel +`stt_en_citrinet_384_ls `_, EncDecCTCModelBPE +`stt_en_citrinet_1024_ls `_, EncDecCTCModelBPE +`QuartzNet15x5Base-En `_, EncDecCTCModel +`stt_en_jasper10x5dr `_, EncDecCTCModel +`stt_en_contextnet_256_mls `_, EncDecRNNTBPEModel +`stt_en_contextnet_512_mls `_, EncDecRNNTBPEModel +`stt_en_contextnet_1024_mls `_, EncDecRNNTBPEModel +`stt_en_contextnet_256 `_, EncDecRNNTBPEModel +`stt_en_contextnet_512 `_, EncDecRNNTBPEModel +`stt_en_contextnet_1024 `_, EncDecRNNTBPEModel +`stt_enes_conformer_ctc_large `_,EncDecCTCModelBPE +`stt_enes_conformer_transducer_large `_,EncDecRNNTBPEModel +`stt_multilingual_fastconformer_hybrid_large_pc `_,EncDecHybridRNNTCTCBPEModel +`stt_multilingual_fastconformer_hybrid_large_pc_blend_eu `_,EncDecHybridRNNTCTCBPEModel \ No newline at end of file diff --git a/docs/source/asr/data/benchmark_eo.csv b/docs/source/asr/data/benchmark_eo.csv new file mode 100644 index 000000000000..d8fae94f1650 --- /dev/null +++ b/docs/source/asr/data/benchmark_eo.csv @@ -0,0 +1,3 @@ +Model,Model Base Class +`stt_eo_conformer_transducer_large `_,EncDecRNNTBPEModel +`stt_eo_conformer_ctc_large `_,EncDecCTCModelBPE \ No newline at end of file diff --git a/docs/source/asr/data/benchmark_es.csv b/docs/source/asr/data/benchmark_es.csv index 0fa8b0ecedf1..500869ee92f4 100644 --- a/docs/source/asr/data/benchmark_es.csv +++ b/docs/source/asr/data/benchmark_es.csv @@ -1,8 +1,11 @@ -Model,Model Base Class,Model Card -stt_es_quartznet15x5,EncDecCTCModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_es_quartznet15x5" -stt_es_citrinet_512,EncDecCTCModelBPE,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_es_citrinet_512" -stt_es_citrinet_1024_gamma_0_25,EncDecCTCModelBPE,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_es_citrinet_1024_gamma_0_25" -stt_es_conformer_ctc_large,EncDecCTCModelBPE,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_es_conformer_ctc_large" -stt_es_conformer_transducer_large,EncDecRNNTBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_es_conformer_transducer_large" -stt_es_contextnet_1024,EncDecRNNTBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_es_contextnet_1024" -stt_es_fastconformer_hybrid_large_pc,EncDecHybridRNNTCTCBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_es_fastconformer_hybrid_large_pc" \ No newline at end of file +Model,Model Base Class +`canary-1b `_,EncDecMultiTaskModel +`stt_es_fastconformer_hybrid_large_pc `_,EncDecHybridRNNTCTCBPEModel +`stt_es_conformer_transducer_large `_,EncDecRNNTBPEModel +`stt_es_conformer_ctc_large `_,EncDecCTCModelBPE +`stt_es_quartznet15x5 `_,EncDecCTCModel +`stt_es_contextnet_1024 `_,EncDecRNNTBPEModel +`stt_enes_conformer_ctc_large `_,EncDecCTCModelBPE +`stt_enes_conformer_transducer_large `_,EncDecRNNTBPEModel +`stt_multilingual_fastconformer_hybrid_large_pc `_,EncDecHybridRNNTCTCBPEModel +`stt_multilingual_fastconformer_hybrid_large_pc_blend_eu `_,EncDecHybridRNNTCTCBPEModel diff --git a/docs/source/asr/data/benchmark_fa.csv b/docs/source/asr/data/benchmark_fa.csv new file mode 100644 index 000000000000..f8e6f434e63d --- /dev/null +++ b/docs/source/asr/data/benchmark_fa.csv @@ -0,0 +1,2 @@ +Model,Model Base Class +`stt_fa_fastconformer_hybrid_large `_,EncDecHybridRNNTCTCBPEModel diff --git a/docs/source/asr/data/benchmark_fastconformer_hybrid.csv b/docs/source/asr/data/benchmark_fastconformer_hybrid.csv new file mode 100644 index 000000000000..1feeee875d4b --- /dev/null +++ b/docs/source/asr/data/benchmark_fastconformer_hybrid.csv @@ -0,0 +1,16 @@ +Model,Language +`stt_be_fastconformer_hybrid_large_pc `_,Belarusian +`stt_hr_fastconformer_hybrid_large_pc `_,Croatian +`stt_nl_fastconformer_hybrid_large_pc `_,Dutch +`stt_en_fastconformer_hybrid_large_pc `_,English +`stt_fr_fastconformer_hybrid_large_pc `_,French +`stt_ka_fastconformer_hybrid_large_pc `_,Georgian +`stt_de_fastconformer_hybrid_large_pc `_,German +`stt_it_fastconformer_hybrid_large_pc `_,Italian +`stt_kk_ru_fastconformer_hybrid_large `_,"Kazakh, Russian" +`stt_fa_fastconformer_hybrid_large `_,Persian +`stt_pl_fastconformer_hybrid_large_pc `_,Polish +`stt_ru_fastconformer_hybrid_large_pc `_,Russian +`stt_es_fastconformer_hybrid_large_pc `_,Spanish +`stt_ua_fastconformer_hybrid_large_pc `_,Ukrainian +`stt_uz_fastconformer_hybrid_large_pc `_,Uzbek \ No newline at end of file diff --git a/docs/source/asr/data/benchmark_fr.csv b/docs/source/asr/data/benchmark_fr.csv index 0f17318caead..f2bd5ef8d01f 100644 --- a/docs/source/asr/data/benchmark_fr.csv +++ b/docs/source/asr/data/benchmark_fr.csv @@ -1,9 +1,11 @@ -Model,Model Base Class,Model Card -stt_fr_quartznet15x5,EncDecCTCModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_fr_quartznet15x5" -stt_fr_citrinet_1024_gamma_0_25,EncDecCTCModelBPE,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_fr_citrinet_1024_gamma_0_25" -stt_fr_no_hyphen_citrinet_1024_gamma_0_25,EncDecCTCModelBPE,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_fr_citrinet_1024_gamma_0_25" -stt_fr_contextnet_1024,EncDecRNNTBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_fr_contextnet_1024" -stt_fr_conformer_ctc_large,EncDecCTCModelBPE,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_fr_conformer_ctc_large" -stt_fr_no_hyphen_conformer_ctc_large,EncDecCTCModelBPE,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_fr_conformer_ctc_large" -stt_fr_conformer_transducer_large,EncDecRNNTBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_fr_conformer_transducer_large" -stt_fr_fastconformer_hybrid_large_pc,EncDecHybridRNNTCTCBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_fr_fastconformer_hybrid_large_pc" \ No newline at end of file +Model,Model Base Class +`canary-1b `_,EncDecMultiTaskModel +`stt_fr_conformer_ctc_large `_,EncDecCTCModelBPE +`stt_fr_fastconformer_hybrid_large_pc `_,EncDecHybridRNNTCTCBPEModel +`stt_fr_conformer_transducer_large `_,EncDecRNNTBPEModel +`stt_fr_quartznet15x5 `_,EncDecCTCModel +`stt_fr_contextnet_1024 `_,EncDecRNNTBPEModel +`stt_fr_no_hyphen_citrinet_1024_gamma_0_25 `_,EncDecCTCModelBPE +`stt_fr_no_hyphen_conformer_ctc_large `_,EncDecCTCModelBPE +`stt_multilingual_fastconformer_hybrid_large_pc `_,EncDecHybridRNNTCTCBPEModel +`stt_multilingual_fastconformer_hybrid_large_pc_blend_eu `_,EncDecHybridRNNTCTCBPEModel diff --git a/docs/source/asr/data/benchmark_hi.csv b/docs/source/asr/data/benchmark_hi.csv index 4d3df532ed2e..3350633f3496 100644 --- a/docs/source/asr/data/benchmark_hi.csv +++ b/docs/source/asr/data/benchmark_hi.csv @@ -1,2 +1,2 @@ -Model Name,Model Base Class,Model Card -stt_hi_conformer_ctc_medium,EncDecCTCModelBPE,"https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_hi_conformer_ctc_medium" +Model Name,Model Base Class +`stt_hi_conformer_ctc_medium `_,EncDecCTCModelBPE \ No newline at end of file diff --git a/docs/source/asr/data/benchmark_hr.csv b/docs/source/asr/data/benchmark_hr.csv index 35a5b5f04f39..25288b724bed 100644 --- a/docs/source/asr/data/benchmark_hr.csv +++ b/docs/source/asr/data/benchmark_hr.csv @@ -1,4 +1,5 @@ -Model,Model Base Class,Model Card -stt_hr_conformer_ctc_large,EncDecCTCModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_hr_conformer_ctc_large" -stt_hr_conformer_transducer_large,EncDecRNNTBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_hr_conformer_transducer_large" -stt_hr_fastconformer_hybrid_large_pc,EncDecHybridRNNTCTCBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_hr_fastconformer_hybrid_large_pc" \ No newline at end of file +Model,Model Base Class +`stt_hr_conformer_transducer_large `_,EncDecRNNTBPEModel +`stt_hr_conformer_ctc_large `_,EncDecCTCModelBPE +`stt_hr_fastconformer_hybrid_large_pc `_,EncDecHybridRNNTCTCBPEModel +`stt_multilingual_fastconformer_hybrid_large_pc `_,EncDecHybridRNNTCTCBPEModel \ No newline at end of file diff --git a/docs/source/asr/data/benchmark_it.csv b/docs/source/asr/data/benchmark_it.csv index 230194966573..8556b4ce7b10 100644 --- a/docs/source/asr/data/benchmark_it.csv +++ b/docs/source/asr/data/benchmark_it.csv @@ -1,3 +1,6 @@ -Model,Model Base Class,Model Card -stt_it_quartznet15x5,EncDecCTCModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_it_quartznet15x5" -stt_it_fastconformer_hybrid_large_pc,EncDecHybridRNNTCTCBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_it_fastconformer_hybrid_large_pc" +Model,Model Base Class +`stt_it_fastconformer_hybrid_large_pc `_,EncDecHybridRNNTCTCBPEModel +`stt_it_conformer_ctc_large `_,EncDecCTCModelBPE +`stt_it_conformer_transducer_large `_,EncDecRNNTBPEModel +`stt_it_quartznet15x5 `_,EncDecCTCModel +`stt_multilingual_fastconformer_hybrid_large_pc `_,EncDecHybridRNNTCTCBPEModel diff --git a/docs/source/asr/data/benchmark_jp.csv b/docs/source/asr/data/benchmark_jp.csv new file mode 100644 index 000000000000..600216eeb589 --- /dev/null +++ b/docs/source/asr/data/benchmark_jp.csv @@ -0,0 +1,2 @@ +Model,Model Base Class +`parakeet-tdt_ctc-0.6b-ja `_,ASRModel diff --git a/docs/source/asr/data/benchmark_ka.csv b/docs/source/asr/data/benchmark_ka.csv new file mode 100644 index 000000000000..ca455d0b7266 --- /dev/null +++ b/docs/source/asr/data/benchmark_ka.csv @@ -0,0 +1,3 @@ +Model,Model Base Class +`stt_ka_fastconformer_hybrid_large_pc `_,EncDecHybridRNNTCTCBPEModel +`stt_ka_fastconformer_hybrid_transducer_ctc_large_streaming_80ms_pc `_,EncDecHybridRNNTCTCBPEModel diff --git a/docs/source/asr/data/benchmark_kab.csv b/docs/source/asr/data/benchmark_kab.csv index 76a54cfe42de..5bdfee862921 100644 --- a/docs/source/asr/data/benchmark_kab.csv +++ b/docs/source/asr/data/benchmark_kab.csv @@ -1,2 +1,2 @@ -Model,Model Base Class,Model Card -stt_kab_conformer_transducer_large,EncDecRNNTBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_kab_conformer_transducer_large" +Model,Model Base Class +`stt_kab_conformer_transducer_large `_,EncDecRNNTBPEModel diff --git a/docs/source/asr/data/benchmark_kz.csv b/docs/source/asr/data/benchmark_kz.csv new file mode 100644 index 000000000000..daf41a18ebfa --- /dev/null +++ b/docs/source/asr/data/benchmark_kz.csv @@ -0,0 +1,2 @@ +Model,Model Base Class +`stt_kk_ru_fastconformer_hybrid_large `_,EncDecHybridRNNTCTCBPEModel diff --git a/docs/source/asr/data/benchmark_mr.csv b/docs/source/asr/data/benchmark_mr.csv index 00ae7211bd75..bc35c0c99378 100644 --- a/docs/source/asr/data/benchmark_mr.csv +++ b/docs/source/asr/data/benchmark_mr.csv @@ -1,3 +1,2 @@ -Model Name,Model Base Class,Model Card -stt_mr_conformer_ctc_medium,EncDecCTCModelBPE,"https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_mr_conformer_ctc_medium" - +Model Name,Model Base Class +`stt_mr_conformer_ctc_medium `_,EncDecCTCModelBPE \ No newline at end of file diff --git a/docs/source/asr/data/benchmark_nl.csv b/docs/source/asr/data/benchmark_nl.csv new file mode 100644 index 000000000000..91eb151c09c8 --- /dev/null +++ b/docs/source/asr/data/benchmark_nl.csv @@ -0,0 +1,2 @@ +Model,Model Base Class +`stt_nl_fastconformer_hybrid_large_pc `_,EncDecHybridRNNTCTCBPEModel \ No newline at end of file diff --git a/docs/source/asr/data/benchmark_parakeet.csv b/docs/source/asr/data/benchmark_parakeet.csv new file mode 100644 index 000000000000..93a75583a659 --- /dev/null +++ b/docs/source/asr/data/benchmark_parakeet.csv @@ -0,0 +1,7 @@ +Model,Language +`parakeet-ctc-0.6b `_,English +`parakeet-ctc-1.1b `_,English +`parakeet-tdt-1.1b `_,English +`parakeet-tdt_ctc-110m `_,English +`parakeet-tdt_ctc-1.1b `_,English +`parakeet-tdt_ctc-0.6b-ja `_,Japanese \ No newline at end of file diff --git a/docs/source/asr/data/benchmark_pl.csv b/docs/source/asr/data/benchmark_pl.csv index e3ad9bdb50b7..09f257ccea48 100644 --- a/docs/source/asr/data/benchmark_pl.csv +++ b/docs/source/asr/data/benchmark_pl.csv @@ -1,3 +1,4 @@ -Model,Model Base Class,Model Card -stt_pl_quartznet15x5,EncDecCTCModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_pl_quartznet15x5" -stt_pl_fastconformer_hybrid_large_pc,EncDecHybridRNNTCTCBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_pl_fastconformer_hybrid_large_pc" \ No newline at end of file +Model,Model Base Class +`stt_pl_fastconformer_hybrid_large_pc `_,EncDecHybridRNNTCTCBPEModel +`stt_pl_quartznet15x5 `_,EncDecCTCModel +`stt_multilingual_fastconformer_hybrid_large_pc `_,EncDecHybridRNNTCTCBPEModel diff --git a/docs/source/asr/data/benchmark_ru.csv b/docs/source/asr/data/benchmark_ru.csv index 66b9b321f5fe..5a2e79a414f6 100644 --- a/docs/source/asr/data/benchmark_ru.csv +++ b/docs/source/asr/data/benchmark_ru.csv @@ -1,4 +1,7 @@ -Model,Model Base Class,Model Card -stt_ru_quartznet15x5,EncDecCTCModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_ru_quartznet15x5" -stt_ru_fastconformer_hybrid_large_pc,EncDecHybridRNNTCTCBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_ru_fastconformer_hybrid_large_pc" - +Model,Model Base Class +`stt_ru_fastconformer_hybrid_large_pc `_,EncDecHybridRNNTCTCBPEModel +`stt_ru_conformer_transducer_large `_,EncDecRNNTBPEModel +`stt_kk_ru_fastconformer_hybrid_large `_,EncDecHybridRNNTCTCBPEModel +`stt_ru_conformer_ctc_large `_,EncDecCTCModelBPE +`stt_ru_quartznet15x5 `_,EncDecCTCModel +`stt_multilingual_fastconformer_hybrid_large_pc `_,EncDecHybridRNNTCTCBPEModel diff --git a/docs/source/asr/data/benchmark_rw.csv b/docs/source/asr/data/benchmark_rw.csv index 0264fc8a70cd..598de70f1b75 100644 --- a/docs/source/asr/data/benchmark_rw.csv +++ b/docs/source/asr/data/benchmark_rw.csv @@ -1,3 +1,3 @@ -Model,Model Base Class,Model Card -stt_rw_conformer_ctc_large,EncDecCTCModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_rw_conformer_ctc_large" -stt_rw_conformer_transducer_large,EncDecRNNTBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_rw_conformer_transducer_large" \ No newline at end of file +Model,Model Base Class +`stt_rw_conformer_ctc_large `_,EncDecCTCModelBPE +`stt_rw_conformer_transducer_large `_,EncDecRNNTBPEModel diff --git a/docs/source/asr/data/benchmark_ua.csv b/docs/source/asr/data/benchmark_ua.csv index df1b6c383d3b..31594fc85885 100644 --- a/docs/source/asr/data/benchmark_ua.csv +++ b/docs/source/asr/data/benchmark_ua.csv @@ -1,2 +1,4 @@ -Model,Model Base Class,Model Card -stt_ua_fastconformer_hybrid_large_pc,EncDecHybridRNNTCTCBPEModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_ua_fastconformer_hybrid_large_pc" \ No newline at end of file +Model,Model Base Class +`stt_uk_citrinet_1024_gamma_0_25 `_,EncDecCTCModel +`stt_ua_fastconformer_hybrid_large_pc `_,EncDecHybridRNNTCTCBPEModel +`stt_multilingual_fastconformer_hybrid_large_pc `_,EncDecHybridRNNTCTCBPEModel diff --git a/docs/source/asr/data/benchmark_uz.csv b/docs/source/asr/data/benchmark_uz.csv new file mode 100644 index 000000000000..510fe4ef64bc --- /dev/null +++ b/docs/source/asr/data/benchmark_uz.csv @@ -0,0 +1,2 @@ +Model,Model Base Class +`stt_uz_fastconformer_hybrid_large_pc `_,EncDecHybridRNNTCTCBPEModel diff --git a/docs/source/asr/data/benchmark_zh.csv b/docs/source/asr/data/benchmark_zh.csv index 3d98f2fa4cec..68a858e92a41 100644 --- a/docs/source/asr/data/benchmark_zh.csv +++ b/docs/source/asr/data/benchmark_zh.csv @@ -1,4 +1,4 @@ -Model,Model Base Class,Model Card -stt_zh_citrinet_512,EncDecCTCModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_zh_citrinet_512" -stt_zh_citrinet_1024_gamma_0_25,EncDecCTCModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_zh_citrinet_1024_gamma_0_25" -stt_zh_conformer_transducer_large,EncDecRNNTModel,"https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_zh_conformer_transducer_large" +Model,Model Base Class +`stt_zh_citrinet_512 `_,EncDecCTCModel +`stt_zh_citrinet_1024_gamma_0_25 `_,EncDecCTCModel +`stt_zh_conformer_transducer_large `_,EncDecRNNTModel diff --git a/docs/source/asr/results.rst b/docs/source/asr/results.rst index 3ad1ad0b5091..2e70b8b17c8e 100644 --- a/docs/source/asr/results.rst +++ b/docs/source/asr/results.rst @@ -37,14 +37,14 @@ Due to transparency, the ASR model can be extracted after training/finetuning se made for convenience purpose :code:`hybrid_model.save_asr_model_to(.nemo)` -NGC Pretrained Checkpoints +Pretrained Checkpoints -------------------------- The ASR collection has checkpoints of several models trained on various datasets for a variety of tasks. These checkpoints are -obtainable via NGC `NeMo Automatic Speech Recognition collection `_. -The model cards on NGC contain more information about each of the checkpoints available. +obtainable via NGC `NeMo Automatic Speech Recognition collection `_ or `Huggingface `_. +The model cards on these websites contain more information about each of the checkpoints available. -The tables below list the ASR models available from NGC. The models can be accessed via the :code:`from_pretrained()` method inside +The tables below list the Top ASR models available from the datasources above. The models can be accessed via the :code:`from_pretrained()` method inside the ASR Model class. In general, you can load any of these models with code in the following format: .. code-block:: python @@ -137,6 +137,56 @@ For more information, see `nemo.collections.asr.modules <./api.html#modules>`__. ----- + +Inference on long audio +^^^^^^^^^^^^^^^^^^^^^^^ + +In some cases the audio is too long for standard inference, especially if you're using a model such as Conformer, where the time and memory costs of the attention layers scale quadratically with the duration. + +There are two main ways of performing inference on long audio files in NeMo: + +The first way is to use buffered inference, where the audio is divided into chunks to run on, and the output is merged afterwards. +The relevant scripts for this are contained in `this folder `_. + +The second way, specifically for models with the Conformer/Fast Conformer encoder, is to use local attention, which changes the costs to be linear. +You can train Fast Conformer models with Longformer-style (https://arxiv.org/abs/2004.05150) local+global attention using one of the following configs: CTC config at +``/examples/asr/conf/fastconformer/fast-conformer-long_ctc_bpe.yaml`` and transducer config at ``/examples/asr/conf/fastconformer/fast-conformer-long_transducer_bpe.yaml``. +You can also convert any model trained with full context attention to local, though this may result in lower WER in some cases. You can switch to local attention when running the +`transcribe `_ or `evaluation `_ +scripts in the following way: + +.. code-block:: python + + python speech_to_text_eval.py \ + (...other parameters...) \ + ++model_change.conformer.self_attention_model="rel_pos_local_attn" \ + ++model_change.conformer.att_context_size=[128, 128] + +Alternatively, you can change the attention model after loading a checkpoint: + +.. code-block:: python + + asr_model = ASRModel.from_pretrained('stt_en_conformer_ctc_large') + asr_model.change_attention_model( + self_attention_model="rel_pos_local_attn", + att_context_size=[128, 128] + ) + +Sometimes, the downsampling module at the earliest stage of the model can take more memory than the actual forward pass since it directly operates on the audio sequence which may not be able to fit in memory for very long audio files. In order to reduce the memory consumption of the subsampling module, you can ask the model to perform auto-chunking of the input sequence and process it piece by piece, taking more time but avoiding an OutOfMemoryError. + +.. code-block:: python + + asr_model = ASRModel.from_pretrained('stt_en_fastconformer_ctc_large') + # Speedup conv subsampling factor to speed up the subsampling module. + asr_model.change_subsampling_conv_chunking_factor(1) # 1 = auto select + + +.. note:: + + Only certain models which use depthwise separable convolutions in the downsampling layer support this operation. Please try it out on your model and see if it is supported. + + + Inference with Multi-task Models ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -187,54 +237,6 @@ Note that using manifest allows to specify the task configuration for each audio pnc=True, # whether to have PnC output, choices=[True, False] ) -Inference on long audio -^^^^^^^^^^^^^^^^^^^^^^^ - -In some cases the audio is too long for standard inference, especially if you're using a model such as Conformer, where the time and memory costs of the attention layers scale quadratically with the duration. - -There are two main ways of performing inference on long audio files in NeMo: - -The first way is to use buffered inference, where the audio is divided into chunks to run on, and the output is merged afterwards. -The relevant scripts for this are contained in `this folder `_. - -The second way, specifically for models with the Conformer/Fast Conformer encoder, is to use local attention, which changes the costs to be linear. -You can train Fast Conformer models with Longformer-style (https://arxiv.org/abs/2004.05150) local+global attention using one of the following configs: CTC config at -``/examples/asr/conf/fastconformer/fast-conformer-long_ctc_bpe.yaml`` and transducer config at ``/examples/asr/conf/fastconformer/fast-conformer-long_transducer_bpe.yaml``. -You can also convert any model trained with full context attention to local, though this may result in lower WER in some cases. You can switch to local attention when running the -`transcribe `_ or `evaluation `_ -scripts in the following way: - -.. code-block:: python - - python speech_to_text_eval.py \ - (...other parameters...) \ - ++model_change.conformer.self_attention_model="rel_pos_local_attn" \ - ++model_change.conformer.att_context_size=[128, 128] - -Alternatively, you can change the attention model after loading a checkpoint: - -.. code-block:: python - - asr_model = ASRModel.from_pretrained('stt_en_conformer_ctc_large') - asr_model.change_attention_model( - self_attention_model="rel_pos_local_attn", - att_context_size=[128, 128] - ) - -Sometimes, the downsampling module at the earliest stage of the model can take more memory than the actual forward pass since it directly operates on the audio sequence which may not be able to fit in memory for very long audio files. In order to reduce the memory consumption of the subsampling module, you can ask the model to perform auto-chunking of the input sequence and process it piece by piece, taking more time but avoiding an OutOfMemoryError. - -.. code-block:: python - - asr_model = ASRModel.from_pretrained('stt_en_fastconformer_ctc_large') - # Speedup conv subsampling factor to speed up the subsampling module. - asr_model.change_subsampling_conv_chunking_factor(1) # 1 = auto select - - -.. note:: - - Only certain models which use depthwise separable convolutions in the downsampling layer support this operation. Please try it out on your model and see if it is supported. - - Inference on Apple M-Series GPU ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -259,183 +261,47 @@ Inference Execution Flow Diagram When preparing your own inference scripts, please follow the execution flow diagram order for correct inference, found at the `examples directory for ASR collection `_. + Automatic Speech Recognition Models ----------------------------------- -Below is a list of all the ASR models that are available in NeMo for specific languages, as well as auxiliary language models for certain languages. - -Language Models for ASR -^^^^^^^^^^^^^^^^^^^^^^^ - -.. csv-table:: - :file: data/asrlm_results.csv - :align: left - :widths: 30, 30, 40 - :header-rows: 1 - -| - - -.. _asr-checkpoint-list-by-language: +Speech Recognition +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Below is a list of the high quality ASR models available in NeMo for specific languages, all ASR models can be found in :doc:`All checkpoints <./all_chkpt>`. -Speech Recognition (Languages) ------------------------------- +Multilingual Multitask +^^^^^^^^^^^^^^^^^^^^^^ -English -^^^^^^^ .. csv-table:: - :file: data/benchmark_en.csv + :file: data/benchmark_canary.csv :align: left - :widths: 40, 10, 50 + :widths: 50,50 :header-rows: 1 ------------------------------ - -Mandarin +Parakeet ^^^^^^^^ -.. csv-table:: - :file: data/benchmark_zh.csv - :align: left - :widths: 40, 10, 50 - :header-rows: 1 - ------------------------------ - -German -^^^^^^ -.. csv-table:: - :file: data/benchmark_de.csv - :align: left - :widths: 40, 10, 50 - :header-rows: 1 - ------------------------------ - -French -^^^^^^ -.. csv-table:: - :file: data/benchmark_fr.csv - :align: left - :widths: 40, 10, 50 - :header-rows: 1 - ------------------------------ - -Polish -^^^^^^ -.. csv-table:: - :file: data/benchmark_pl.csv - :align: left - :widths: 40, 10, 50 - :header-rows: 1 - ------------------------------ - -Italian -^^^^^^^ -.. csv-table:: - :file: data/benchmark_it.csv - :align: left - :widths: 40, 10, 50 - :header-rows: 1 - ------------------------------ - -Russian -^^^^^^^ -.. csv-table:: - :file: data/benchmark_ru.csv - :align: left - :widths: 40, 10, 50 - :header-rows: 1 - ------------------------------ - -Spanish -^^^^^^^ -.. csv-table:: - :file: data/benchmark_es.csv - :align: left - :widths: 40, 10, 50 - :header-rows: 1 - - ------------------------------ - -Catalan -^^^^^^^ -.. csv-table:: - :file: data/benchmark_ca.csv - :align: left - :widths: 40, 10, 50 - :header-rows: 1 - ------------------------------ - -Hindi -^^^^^^^ -.. csv-table:: - :file: data/benchmark_hi.csv - :align: left - :widths: 40, 10, 50 - :header-rows: 1 - ------------------------------ - -Marathi -^^^^^^^ -.. csv-table:: - :file: data/benchmark_mr.csv - :align: left - :widths: 40, 10, 50 - :header-rows: 1 - ------------------------------ - -Kinyarwanda -^^^^^^^^^^^ -.. csv-table:: - :file: data/benchmark_rw.csv - :align: left - :widths: 40, 10, 50 - :header-rows: 1 ------------------------------ - -Belarusian -^^^^^^^^^^ .. csv-table:: - :file: data/benchmark_by.csv + :file: data/benchmark_parakeet.csv :align: left - :widths: 40, 10, 50 + :widths: 50,50 :header-rows: 1 ------------------------------ - -Ukrainian -^^^^^^^^^ +Fast Conformer Hybrid +^^^^^^^^^^^^^^^^^^^^^ .. csv-table:: - :file: data/benchmark_ua.csv + :file: data/benchmark_fastconformer_hybrid.csv :align: left - :widths: 40, 10, 50 + :widths: 50,50 :header-rows: 1 ------------------------------ - -Multilingual -^^^^^^^^^^^^ -.. csv-table:: - :file: data/benchmark_multilingual.csv - :align: left - :widths: 40, 10, 50 - :header-rows: 1 - ------------------------------ - Code-Switching ^^^^^^^^^^^^^^ + .. csv-table:: :file: data/benchmark_code_switching.csv :align: left - :widths: 40, 10, 50 + :widths: 50,50 :header-rows: 1 + + From 86393b518c71786e913f2788529c73a092c20911 Mon Sep 17 00:00:00 2001 From: meatybobby Date: Mon, 16 Dec 2024 09:59:53 -0800 Subject: [PATCH 050/128] Support Cosmos tokenizer TensorRT inference (#11472) * Add cosmos TRT * Add trt run script * Apply isort and black reformatting Signed-off-by: meatybobby * Clean code * Fix CodeQL --------- Signed-off-by: meatybobby Co-authored-by: meatybobby --- .../video_tokenizers/cosmos_tokenizer.py | 33 +- .../common/video_tokenizers/cosmos_trt_run.py | 93 ++ .../video_tokenizers/modules/__init__.py | 62 ++ .../video_tokenizers/modules/distributions.py | 43 + .../video_tokenizers/modules/layers2d.py | 326 ++++++ .../video_tokenizers/modules/layers3d.py | 966 ++++++++++++++++++ .../video_tokenizers/modules/patching.py | 311 ++++++ .../video_tokenizers/modules/quantizers.py | 512 ++++++++++ .../common/video_tokenizers/modules/utils.py | 117 +++ .../video_tokenizers/networks/__init__.py | 41 + .../video_tokenizers/networks/configs.py | 148 +++ .../networks/continuous_image.py | 88 ++ .../networks/continuous_video.py | 97 ++ .../networks/discrete_image.py | 113 ++ .../networks/discrete_video.py | 115 +++ .../common/video_tokenizers/utils.py | 33 + 16 files changed, 3094 insertions(+), 4 deletions(-) create mode 100644 nemo/collections/common/video_tokenizers/cosmos_trt_run.py create mode 100644 nemo/collections/common/video_tokenizers/modules/__init__.py create mode 100644 nemo/collections/common/video_tokenizers/modules/distributions.py create mode 100644 nemo/collections/common/video_tokenizers/modules/layers2d.py create mode 100644 nemo/collections/common/video_tokenizers/modules/layers3d.py create mode 100644 nemo/collections/common/video_tokenizers/modules/patching.py create mode 100644 nemo/collections/common/video_tokenizers/modules/quantizers.py create mode 100644 nemo/collections/common/video_tokenizers/modules/utils.py create mode 100644 nemo/collections/common/video_tokenizers/networks/__init__.py create mode 100644 nemo/collections/common/video_tokenizers/networks/configs.py create mode 100644 nemo/collections/common/video_tokenizers/networks/continuous_image.py create mode 100644 nemo/collections/common/video_tokenizers/networks/continuous_video.py create mode 100644 nemo/collections/common/video_tokenizers/networks/discrete_image.py create mode 100644 nemo/collections/common/video_tokenizers/networks/discrete_video.py diff --git a/nemo/collections/common/video_tokenizers/cosmos_tokenizer.py b/nemo/collections/common/video_tokenizers/cosmos_tokenizer.py index d81097e1e6e0..03e11e8c4f66 100644 --- a/nemo/collections/common/video_tokenizers/cosmos_tokenizer.py +++ b/nemo/collections/common/video_tokenizers/cosmos_tokenizer.py @@ -20,7 +20,9 @@ from tqdm import tqdm from nemo.collections.common.video_tokenizers.utils import ( + get_tokenizer_config, load_jit_model, + load_pytorch_model, numpy2tensor, pad_video_batch, tensor2numpy, @@ -44,9 +46,28 @@ def __init__(self, cfg: DictConfig) -> None: self._device = "cuda" - self._full_model = load_jit_model(self._full_model_path, self._device) if cfg.load_full_model else None - self._enc_model = load_jit_model(self._enc_model_path, self._device) if cfg.load_enc_model else None - self._dec_model = load_jit_model(self._dec_model_path, self._device) if cfg.load_dec_model else None + if cfg.use_pytorch: + tokenizer_config = get_tokenizer_config(cfg.tokenizer_type) + tokenizer_config["dtype"] = self._dtype + self._full_model = ( + load_pytorch_model(self._full_model_path, tokenizer_config, "full", self._device) + if cfg.load_full_model + else None + ) + self._enc_model = ( + load_pytorch_model(self._enc_model_path, tokenizer_config, "enc", self._device) + if cfg.load_enc_model + else None + ) + self._dec_model = ( + load_pytorch_model(self._dec_model_path, tokenizer_config, "dec", self._device) + if cfg.load_dec_model + else None + ) + else: + self._full_model = load_jit_model(self._full_model_path, self._device) if cfg.load_full_model else None + self._enc_model = load_jit_model(self._enc_model_path, self._device) if cfg.load_enc_model else None + self._dec_model = load_jit_model(self._dec_model_path, self._device) if cfg.load_dec_model else None @classmethod def from_pretrained( @@ -55,6 +76,8 @@ def from_pretrained( load_encoder=True, load_decoder=True, load_full_model=False, + use_pytorch=False, + dtype="bfloat16", ): cls._hf_model_name = f"nvidia/{tokenizer_type}" @@ -89,10 +112,12 @@ def from_pretrained( cfg = DictConfig( { 'checkpoint_dir': ckpt_dir, - 'dtype': 'bfloat16', + 'dtype': dtype, 'load_enc_model': load_encoder, 'load_dec_model': load_decoder, 'load_full_model': load_full_model, + 'tokenizer_type': tokenizer_type, + 'use_pytorch': use_pytorch, } ) diff --git a/nemo/collections/common/video_tokenizers/cosmos_trt_run.py b/nemo/collections/common/video_tokenizers/cosmos_trt_run.py new file mode 100644 index 000000000000..9e10801a0fe4 --- /dev/null +++ b/nemo/collections/common/video_tokenizers/cosmos_trt_run.py @@ -0,0 +1,93 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os +import shutil + +import torch + +from nemo.collections.common.video_tokenizers.cosmos_tokenizer import CausalVideoTokenizer +from nemo.export.tensorrt_lazy_compiler import trt_compile + +parser = argparse.ArgumentParser(description="Export and run tokenizer in TensorRT") +parser.add_argument( + "--tokenizer_name", + type=str, + default="Cosmos-Tokenizer-CV4x8x8", + help="Tokenizer name or path", +) +parser.add_argument( + "--engine_path", + type=str, + default="outputs", + help="Path to TensorRT engine", +) +parser.add_argument("--min_shape", type=int, nargs='+', help="min input shape for inference") +parser.add_argument("--opt_shape", type=int, nargs='+', help="opt input shape for inference") +parser.add_argument( + "--max_shape", type=int, nargs='+', default=[1, 3, 9, 512, 512], help="max input shape for inference" +) +parser.add_argument("--clean", action="store_true", help="Clean all files in engine_path before export") + +args = parser.parse_args() + + +def main(): + model = CausalVideoTokenizer.from_pretrained(args.tokenizer_name, use_pytorch=True, dtype="float") + + class VaeWrapper(torch.nn.Module): + def __init__(self, vae): + super().__init__() + self.vae = vae + + def forward(self, input_tensor): + output_tensor = self.vae.autoencode(input_tensor) + return output_tensor + + model_wrapper = VaeWrapper(model) + + if args.clean and os.path.exists(args.engine_path): + print(f"Remove existing {args.engine_path}") + shutil.rmtree(args.engine_path) + + os.makedirs(args.engine_path, exist_ok=True) + + min_shape = args.min_shape + opt_shape = args.opt_shape + max_shape = args.max_shape + + if opt_shape is None: + opt_shape = max_shape + if min_shape is None: + min_shape = opt_shape + + output_path = os.path.join(args.engine_path, "auto_encoder") + trt_compile( + model_wrapper, + output_path, + args={ + "precision": "bf16", + "input_profiles": [ + {"input_tensor": [min_shape, opt_shape, max_shape]}, + ], + }, + ) + + input_tensor = torch.randn(max_shape).to('cuda').to(torch.float) + output = model_wrapper(input_tensor) + + +if __name__ == '__main__': + main() diff --git a/nemo/collections/common/video_tokenizers/modules/__init__.py b/nemo/collections/common/video_tokenizers/modules/__init__.py new file mode 100644 index 000000000000..f4c89696ff74 --- /dev/null +++ b/nemo/collections/common/video_tokenizers/modules/__init__.py @@ -0,0 +1,62 @@ +# ****************************************************************************** +# Copyright (C) 2024 NVIDIA CORPORATION +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ****************************************************************************** +from enum import Enum + +from nemo.collections.common.video_tokenizers.modules.distributions import GaussianDistribution, IdentityDistribution +from nemo.collections.common.video_tokenizers.modules.layers2d import Decoder, Encoder +from nemo.collections.common.video_tokenizers.modules.layers3d import ( + DecoderBase, + DecoderFactorized, + EncoderBase, + EncoderFactorized, +) +from nemo.collections.common.video_tokenizers.modules.quantizers import ( + FSQuantizer, + LFQuantizer, + ResidualFSQuantizer, + VectorQuantizer, +) + + +class EncoderType(Enum): + Default = Encoder + + +class DecoderType(Enum): + Default = Decoder + + +class Encoder3DType(Enum): + BASE = EncoderBase + FACTORIZED = EncoderFactorized + + +class Decoder3DType(Enum): + BASE = DecoderBase + FACTORIZED = DecoderFactorized + + +class ContinuousFormulation(Enum): + VAE = GaussianDistribution + AE = IdentityDistribution + + +class DiscreteQuantizer(Enum): + VQ = VectorQuantizer + LFQ = LFQuantizer + FSQ = FSQuantizer + RESFSQ = ResidualFSQuantizer diff --git a/nemo/collections/common/video_tokenizers/modules/distributions.py b/nemo/collections/common/video_tokenizers/modules/distributions.py new file mode 100644 index 000000000000..cc2b12e30e6a --- /dev/null +++ b/nemo/collections/common/video_tokenizers/modules/distributions.py @@ -0,0 +1,43 @@ +# ****************************************************************************** +# Copyright (C) 2024 NVIDIA CORPORATION +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ****************************************************************************** +"""The distribution modes to use for continuous image tokenizers.""" + +import torch + + +class IdentityDistribution(torch.nn.Module): + def __init__(self): + super().__init__() + + def forward(self, parameters): + return parameters, (torch.tensor([0.0]), torch.tensor([0.0])) + + +class GaussianDistribution(torch.nn.Module): + def __init__(self, min_logvar: float = -30.0, max_logvar: float = 20.0): + super().__init__() + self.min_logvar = min_logvar + self.max_logvar = max_logvar + + def sample(self, mean, logvar): + std = torch.exp(0.5 * logvar) + return mean + std * torch.randn_like(mean) + + def forward(self, parameters): + mean, logvar = torch.chunk(parameters, 2, dim=1) + logvar = torch.clamp(logvar, self.min_logvar, self.max_logvar) + return self.sample(mean, logvar), (mean, logvar) diff --git a/nemo/collections/common/video_tokenizers/modules/layers2d.py b/nemo/collections/common/video_tokenizers/modules/layers2d.py new file mode 100644 index 000000000000..e1f24aa6a97f --- /dev/null +++ b/nemo/collections/common/video_tokenizers/modules/layers2d.py @@ -0,0 +1,326 @@ +# ****************************************************************************** +# Copyright (C) 2024 NVIDIA CORPORATION +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ****************************************************************************** +"""The model definition for Continuous 2D layers + +Adapted from: https://github.com/CompVis/stable-diffusion/blob/ +21f890f9da3cfbeaba8e2ac3c425ee9e998d5229/ldm/modules/diffusionmodules/model.py + +[Copyright (c) 2022 Robin Rombach and Patrick Esser and contributors] +https://github.com/CompVis/stable-diffusion/blob/ +21f890f9da3cfbeaba8e2ac3c425ee9e998d5229/LICENSE +""" + +import math + +# pytorch_diffusion + derived encoder decoder +import torch +import torch.nn as nn +import torch.nn.functional as F + +from nemo.collections.common.video_tokenizers.modules.patching import Patcher, UnPatcher +from nemo.collections.common.video_tokenizers.modules.utils import Normalize, nonlinearity + + +class Upsample(nn.Module): + def __init__(self, in_channels: int): + super().__init__() + self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = x.repeat_interleave(2, dim=2).repeat_interleave(2, dim=3) + return self.conv(x) + + +class Downsample(nn.Module): + def __init__(self, in_channels: int): + super().__init__() + self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + pad = (0, 1, 0, 1) + x = F.pad(x, pad, mode="constant", value=0) + return self.conv(x) + + +class ResnetBlock(nn.Module): + def __init__( + self, + *, + in_channels: int, + out_channels: int = None, + dropout: float, + **kwargs, + ): + super().__init__() + self.in_channels = in_channels + out_channels = in_channels if out_channels is None else out_channels + + self.norm1 = Normalize(in_channels) + self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) + self.norm2 = Normalize(out_channels) + self.dropout = nn.Dropout(dropout) + self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) + self.nin_shortcut = ( + nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) + if in_channels != out_channels + else nn.Identity() + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + h = x + h = self.norm1(h) + h = nonlinearity(h) + h = self.conv1(h) + + h = self.norm2(h) + h = nonlinearity(h) + h = self.dropout(h) + h = self.conv2(h) + + x = self.nin_shortcut(x) + + return x + h + + +class AttnBlock(nn.Module): + def __init__(self, in_channels: int): + super().__init__() + + self.norm = Normalize(in_channels) + self.q = nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) + self.k = nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) + self.v = nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) + self.proj_out = nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + # TODO (freda): Consider reusing implementations in Attn `imaginaire`, + # since than one is gonna be based on TransformerEngine's attn op, + # w/c could ease CP implementations. + h_ = x + h_ = self.norm(h_) + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + + # compute attention + b, c, h, w = q.shape + q = q.reshape(b, c, h * w) + q = q.permute(0, 2, 1) + k = k.reshape(b, c, h * w) + w_ = torch.bmm(q, k) + w_ = w_ * (int(c) ** (-0.5)) + w_ = F.softmax(w_, dim=2) + + # attend to values + v = v.reshape(b, c, h * w) + w_ = w_.permute(0, 2, 1) + h_ = torch.bmm(v, w_) + h_ = h_.reshape(b, c, h, w) + + h_ = self.proj_out(h_) + + return x + h_ + + +class Encoder(nn.Module): + def __init__( + self, + in_channels: int, + channels: int, + channels_mult: list[int], + num_res_blocks: int, + attn_resolutions: list[int], + dropout: float, + resolution: int, + z_channels: int, + spatial_compression: int, + **ignore_kwargs, + ): + super().__init__() + self.num_resolutions = len(channels_mult) + self.num_res_blocks = num_res_blocks + + # Patcher. + patch_size = ignore_kwargs.get("patch_size", 1) + self.patcher = Patcher(patch_size, ignore_kwargs.get("patch_method", "rearrange")) + in_channels = in_channels * patch_size * patch_size + + # calculate the number of downsample operations + self.num_downsamples = int(math.log2(spatial_compression)) - int(math.log2(patch_size)) + assert ( + self.num_downsamples <= self.num_resolutions + ), f"we can only downsample {self.num_resolutions} times at most" + + # downsampling + self.conv_in = torch.nn.Conv2d(in_channels, channels, kernel_size=3, stride=1, padding=1) + + curr_res = resolution // patch_size + in_ch_mult = (1,) + tuple(channels_mult) + self.in_ch_mult = in_ch_mult + self.down = nn.ModuleList() + for i_level in range(self.num_resolutions): + block = nn.ModuleList() + attn = nn.ModuleList() + block_in = channels * in_ch_mult[i_level] + block_out = channels * channels_mult[i_level] + for _ in range(self.num_res_blocks): + block.append( + ResnetBlock( + in_channels=block_in, + out_channels=block_out, + dropout=dropout, + ) + ) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(AttnBlock(block_in)) + down = nn.Module() + down.block = block + down.attn = attn + if i_level < self.num_downsamples: + down.downsample = Downsample(block_in) + curr_res = curr_res // 2 + self.down.append(down) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in, dropout=dropout) + self.mid.attn_1 = AttnBlock(block_in) + self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in, dropout=dropout) + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d(block_in, z_channels, kernel_size=3, stride=1, padding=1) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.patcher(x) + + # downsampling + hs = [self.conv_in(x)] + for i_level in range(self.num_resolutions): + for i_block in range(self.num_res_blocks): + h = self.down[i_level].block[i_block](hs[-1]) + if len(self.down[i_level].attn) > 0: + h = self.down[i_level].attn[i_block](h) + hs.append(h) + if i_level < self.num_downsamples: + hs.append(self.down[i_level].downsample(hs[-1])) + + # middle + h = hs[-1] + h = self.mid.block_1(h) + h = self.mid.attn_1(h) + h = self.mid.block_2(h) + + # end + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + return h + + +class Decoder(nn.Module): + def __init__( + self, + out_channels: int, + channels: int, + channels_mult: list[int], + num_res_blocks: int, + attn_resolutions: int, + dropout: float, + resolution: int, + z_channels: int, + spatial_compression: int, + **ignore_kwargs, + ): + super().__init__() + self.num_resolutions = len(channels_mult) + self.num_res_blocks = num_res_blocks + + # UnPatcher. + patch_size = ignore_kwargs.get("patch_size", 1) + self.unpatcher = UnPatcher(patch_size, ignore_kwargs.get("patch_method", "rearrange")) + out_ch = out_channels * patch_size * patch_size + + # calculate the number of upsample operations + self.num_upsamples = int(math.log2(spatial_compression)) - int(math.log2(patch_size)) + assert self.num_upsamples <= self.num_resolutions, f"we can only upsample {self.num_resolutions} times at most" + + block_in = channels * channels_mult[self.num_resolutions - 1] + curr_res = (resolution // patch_size) // 2 ** (self.num_resolutions - 1) + self.z_shape = (1, z_channels, curr_res, curr_res) + + # z to block_in + self.conv_in = torch.nn.Conv2d(z_channels, block_in, kernel_size=3, stride=1, padding=1) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in, dropout=dropout) + self.mid.attn_1 = AttnBlock(block_in) + self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in, dropout=dropout) + + # upsampling + self.up = nn.ModuleList() + for i_level in reversed(range(self.num_resolutions)): + block = nn.ModuleList() + attn = nn.ModuleList() + block_out = channels * channels_mult[i_level] + for _ in range(self.num_res_blocks + 1): + block.append( + ResnetBlock( + in_channels=block_in, + out_channels=block_out, + dropout=dropout, + ) + ) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(AttnBlock(block_in)) + up = nn.Module() + up.block = block + up.attn = attn + if i_level >= (self.num_resolutions - self.num_upsamples): + up.upsample = Upsample(block_in) + curr_res = curr_res * 2 + self.up.insert(0, up) + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d(block_in, out_ch, kernel_size=3, stride=1, padding=1) + + def forward(self, z: torch.Tensor) -> torch.Tensor: + h = self.conv_in(z) + + # middle + h = self.mid.block_1(h) + h = self.mid.attn_1(h) + h = self.mid.block_2(h) + + # upsampling + for i_level in reversed(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks + 1): + h = self.up[i_level].block[i_block](h) + if len(self.up[i_level].attn) > 0: + h = self.up[i_level].attn[i_block](h) + if i_level >= (self.num_resolutions - self.num_upsamples): + h = self.up[i_level].upsample(h) + + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + h = self.unpatcher(h) + return h diff --git a/nemo/collections/common/video_tokenizers/modules/layers3d.py b/nemo/collections/common/video_tokenizers/modules/layers3d.py new file mode 100644 index 000000000000..13abe9804208 --- /dev/null +++ b/nemo/collections/common/video_tokenizers/modules/layers3d.py @@ -0,0 +1,966 @@ +# ****************************************************************************** +# Copyright (C) 2024 NVIDIA CORPORATION +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ****************************************************************************** +"""The model definition for 3D layers + +Adapted from: https://github.com/lucidrains/magvit2-pytorch/blob/ +9f49074179c912736e617d61b32be367eb5f993a/magvit2_pytorch/magvit2_pytorch.py#L889 + +[MIT License Copyright (c) 2023 Phil Wang] +https://github.com/lucidrains/magvit2-pytorch/blob/ +9f49074179c912736e617d61b32be367eb5f993a/LICENSE +""" +import math +from typing import Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from nemo.collections.common.video_tokenizers.modules.patching import Patcher, Patcher3D, UnPatcher, UnPatcher3D +from nemo.collections.common.video_tokenizers.modules.utils import ( + CausalNormalize, + batch2space, + batch2time, + cast_tuple, + is_odd, + nonlinearity, + replication_pad, + space2batch, + time2batch, +) + +_LEGACY_NUM_GROUPS = 32 + + +class CausalConv3d(nn.Module): + def __init__( + self, + chan_in: int = 1, + chan_out: int = 1, + kernel_size: Union[int, Tuple[int, int, int]] = 3, + pad_mode: str = "constant", + **kwargs, + ): + super().__init__() + kernel_size = cast_tuple(kernel_size, 3) + + time_kernel_size, height_kernel_size, width_kernel_size = kernel_size + + assert is_odd(height_kernel_size) and is_odd(width_kernel_size) + + dilation = kwargs.pop("dilation", 1) + stride = kwargs.pop("stride", 1) + time_stride = kwargs.pop("time_stride", 1) + time_dilation = kwargs.pop("time_dilation", 1) + padding = kwargs.pop("padding", 1) + + self.pad_mode = pad_mode + time_pad = time_dilation * (time_kernel_size - 1) + (1 - time_stride) + self.time_pad = time_pad + + self.spatial_pad = (padding, padding, padding, padding) + + stride = (time_stride, stride, stride) + dilation = (time_dilation, dilation, dilation) + self.conv3d = nn.Conv3d( + chan_in, + chan_out, + kernel_size, + stride=stride, + dilation=dilation, + **kwargs, + ) + + def _replication_pad(self, x: torch.Tensor) -> torch.Tensor: + x_prev = x[:, :, :1, ...].repeat(1, 1, self.time_pad, 1, 1) + x = torch.cat([x_prev, x], dim=2) + padding = self.spatial_pad + (0, 0) + return F.pad(x, padding, mode=self.pad_mode, value=0.0) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self._replication_pad(x) + return self.conv3d(x) + + +class CausalUpsample3d(nn.Module): + def __init__(self, in_channels: int) -> None: + super().__init__() + self.conv = CausalConv3d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = x.repeat_interleave(2, dim=3).repeat_interleave(2, dim=4) + time_factor = 1.0 + 1.0 * (x.shape[2] > 1) + if isinstance(time_factor, torch.Tensor): + time_factor = time_factor.item() + x = x.repeat_interleave(int(time_factor), dim=2) + # TODO(freda): Check if this causes temporal inconsistency. + # Shoule reverse the order of the following two ops, + # better perf and better temporal smoothness. + x = self.conv(x) + return x[..., int(time_factor - 1) :, :, :] + + +class CausalDownsample3d(nn.Module): + def __init__(self, in_channels: int) -> None: + super().__init__() + self.conv = CausalConv3d( + in_channels, + in_channels, + kernel_size=3, + stride=2, + time_stride=2, + padding=0, + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + pad = (0, 1, 0, 1, 0, 0) + x = F.pad(x, pad, mode="constant", value=0) + x = replication_pad(x) + x = self.conv(x) + return x + + +class CausalHybridUpsample3d(nn.Module): + def __init__( + self, + in_channels: int, + spatial_up: bool = True, + temporal_up: bool = True, + **kwargs, + ) -> None: + super().__init__() + self.conv1 = CausalConv3d( + in_channels, + in_channels, + kernel_size=(3, 1, 1), + stride=1, + time_stride=1, + padding=0, + ) + self.conv2 = CausalConv3d( + in_channels, + in_channels, + kernel_size=(1, 3, 3), + stride=1, + time_stride=1, + padding=1, + ) + self.conv3 = CausalConv3d( + in_channels, + in_channels, + kernel_size=1, + stride=1, + time_stride=1, + padding=0, + ) + self.spatial_up = spatial_up + self.temporal_up = temporal_up + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if not self.spatial_up and not self.temporal_up: + return x + + # hybrid upsample temporally. + if self.temporal_up: + time_factor = 1.0 + 1.0 * (x.shape[2] > 1) + if isinstance(time_factor, torch.Tensor): + time_factor = time_factor.item() + x = x.repeat_interleave(int(time_factor), dim=2) + x = x[..., int(time_factor - 1) :, :, :] + x = self.conv1(x) + x + + # hybrid upsample spatially. + if self.spatial_up: + x = x.repeat_interleave(2, dim=3).repeat_interleave(2, dim=4) + x = self.conv2(x) + x + + # final 1x1x1 conv. + x = self.conv3(x) + return x + + +class CausalHybridDownsample3d(nn.Module): + def __init__( + self, + in_channels: int, + spatial_down: bool = True, + temporal_down: bool = True, + **kwargs, + ) -> None: + super().__init__() + self.conv1 = CausalConv3d( + in_channels, + in_channels, + kernel_size=(1, 3, 3), + stride=2, + time_stride=1, + padding=0, + ) + self.conv2 = CausalConv3d( + in_channels, + in_channels, + kernel_size=(3, 1, 1), + stride=1, + time_stride=2, + padding=0, + ) + self.conv3 = CausalConv3d( + in_channels, + in_channels, + kernel_size=1, + stride=1, + time_stride=1, + padding=0, + ) + self.spatial_down = spatial_down + self.temporal_down = temporal_down + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if not self.spatial_down and not self.temporal_down: + return x + + # hybrid downsample spatially. + if self.spatial_down: + pad = (0, 1, 0, 1, 0, 0) + x = F.pad(x, pad, mode="constant", value=0) + x1 = self.conv1(x) + x2 = F.avg_pool3d(x, kernel_size=(1, 2, 2), stride=(1, 2, 2)) + x = x1 + x2 + + # hybrid downsample temporally. + if self.temporal_down: + x = replication_pad(x) + x1 = self.conv2(x) + x2 = F.avg_pool3d(x, kernel_size=(2, 1, 1), stride=(2, 1, 1)) + x = x1 + x2 + + # final 1x1x1 conv. + x = self.conv3(x) + return x + + +class CausalResnetBlock3d(nn.Module): + def __init__( + self, + *, + in_channels: int, + out_channels: int = None, + dropout: float, + num_groups: int, + ) -> None: + super().__init__() + self.in_channels = in_channels + out_channels = in_channels if out_channels is None else out_channels + + self.norm1 = CausalNormalize(in_channels, num_groups=num_groups) + self.conv1 = CausalConv3d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) + self.norm2 = CausalNormalize(out_channels, num_groups=num_groups) + self.dropout = torch.nn.Dropout(dropout) + self.conv2 = CausalConv3d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) + self.nin_shortcut = ( + CausalConv3d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) + if in_channels != out_channels + else nn.Identity() + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + h = x + h = self.norm1(h) + h = nonlinearity(h) + h = self.conv1(h) + + h = self.norm2(h) + h = nonlinearity(h) + h = self.dropout(h) + h = self.conv2(h) + x = self.nin_shortcut(x) + + return x + h + + +class CausalResnetBlockFactorized3d(nn.Module): + def __init__( + self, + *, + in_channels: int, + out_channels: int = None, + dropout: float, + num_groups: int, + ) -> None: + super().__init__() + self.in_channels = in_channels + out_channels = in_channels if out_channels is None else out_channels + + self.norm1 = CausalNormalize(in_channels, num_groups=1) + self.conv1 = nn.Sequential( + CausalConv3d( + in_channels, + out_channels, + kernel_size=(1, 3, 3), + stride=1, + padding=1, + ), + CausalConv3d( + out_channels, + out_channels, + kernel_size=(3, 1, 1), + stride=1, + padding=0, + ), + ) + self.norm2 = CausalNormalize(out_channels, num_groups=num_groups) + self.dropout = torch.nn.Dropout(dropout) + self.conv2 = nn.Sequential( + CausalConv3d( + out_channels, + out_channels, + kernel_size=(1, 3, 3), + stride=1, + padding=1, + ), + CausalConv3d( + out_channels, + out_channels, + kernel_size=(3, 1, 1), + stride=1, + padding=0, + ), + ) + self.nin_shortcut = ( + CausalConv3d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) + if in_channels != out_channels + else nn.Identity() + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + h = x + h = self.norm1(h) + h = nonlinearity(h) + h = self.conv1(h) + + h = self.norm2(h) + h = nonlinearity(h) + h = self.dropout(h) + h = self.conv2(h) + x = self.nin_shortcut(x) + + return x + h + + +class CausalAttnBlock(nn.Module): + def __init__(self, in_channels: int, num_groups: int) -> None: + super().__init__() + + self.norm = CausalNormalize(in_channels, num_groups=num_groups) + self.q = CausalConv3d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) + self.k = CausalConv3d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) + self.v = CausalConv3d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) + self.proj_out = CausalConv3d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + h_ = x + h_ = self.norm(h_) + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + + # compute attention + q, batch_size = time2batch(q) + k, batch_size = time2batch(k) + v, batch_size = time2batch(v) + + b, c, h, w = q.shape + q = q.reshape(b, c, h * w) + q = q.permute(0, 2, 1) + k = k.reshape(b, c, h * w) + w_ = torch.bmm(q, k) + w_ = w_ * (int(c) ** (-0.5)) + w_ = F.softmax(w_, dim=2) + + # attend to values + v = v.reshape(b, c, h * w) + w_ = w_.permute(0, 2, 1) + h_ = torch.bmm(v, w_) + h_ = h_.reshape(b, c, h, w) + + h_ = batch2time(h_, batch_size) + h_ = self.proj_out(h_) + return x + h_ + + +class CausalTemporalAttnBlock(nn.Module): + def __init__(self, in_channels: int, num_groups: int) -> None: + super().__init__() + + self.norm = CausalNormalize(in_channels, num_groups=num_groups) + self.q = CausalConv3d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) + self.k = CausalConv3d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) + self.v = CausalConv3d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) + self.proj_out = CausalConv3d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + h_ = x + h_ = self.norm(h_) + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + + # compute attention + q, batch_size, height = space2batch(q) + k, _, _ = space2batch(k) + v, _, _ = space2batch(v) + + bhw, c, t = q.shape + q = q.permute(0, 2, 1) # (bhw, t, c) + k = k.permute(0, 2, 1) # (bhw, t, c) + v = v.permute(0, 2, 1) # (bhw, t, c) + + w_ = torch.bmm(q, k.permute(0, 2, 1)) # (bhw, t, t) + w_ = w_ * (int(c) ** (-0.5)) + + # Apply causal mask + mask = torch.tril(torch.ones_like(w_)) + w_ = w_.masked_fill(mask == 0, float("-inf")) + w_ = F.softmax(w_, dim=2) + + # attend to values + h_ = torch.bmm(w_, v) # (bhw, t, c) + h_ = h_.permute(0, 2, 1).reshape(bhw, c, t) # (bhw, c, t) + + h_ = batch2space(h_, batch_size, height) + h_ = self.proj_out(h_) + return x + h_ + + +class EncoderBase(nn.Module): + def __init__( + self, + in_channels: int, + channels: int, + channels_mult: list[int], + num_res_blocks: int, + attn_resolutions: list[int], + dropout: float, + resolution: int, + z_channels: int, + **ignore_kwargs, + ) -> None: + super().__init__() + self.num_resolutions = len(channels_mult) + self.num_res_blocks = num_res_blocks + + # Patcher. + patch_size = ignore_kwargs.get("patch_size", 1) + self.patcher = Patcher(patch_size, ignore_kwargs.get("patch_method", "rearrange")) + in_channels = in_channels * patch_size * patch_size + + # downsampling + self.conv_in = CausalConv3d(in_channels, channels, kernel_size=3, stride=1, padding=1) + + # num of groups for GroupNorm, num_groups=1 for LayerNorm. + num_groups = ignore_kwargs.get("num_groups", _LEGACY_NUM_GROUPS) + curr_res = resolution // patch_size + in_ch_mult = (1,) + tuple(channels_mult) + self.in_ch_mult = in_ch_mult + self.down = nn.ModuleList() + for i_level in range(self.num_resolutions): + block = nn.ModuleList() + attn = nn.ModuleList() + block_in = channels * in_ch_mult[i_level] + block_out = channels * channels_mult[i_level] + for _ in range(self.num_res_blocks): + block.append( + CausalResnetBlock3d( + in_channels=block_in, + out_channels=block_out, + dropout=dropout, + num_groups=num_groups, + ) + ) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(CausalAttnBlock(block_in, num_groups=num_groups)) + down = nn.Module() + down.block = block + down.attn = attn + if i_level != self.num_resolutions - 1: + down.downsample = CausalDownsample3d(block_in) + curr_res = curr_res // 2 + self.down.append(down) + + # middle + self.mid = nn.Module() + self.mid.block_1 = CausalResnetBlock3d( + in_channels=block_in, + out_channels=block_in, + dropout=dropout, + num_groups=num_groups, + ) + self.mid.attn_1 = CausalAttnBlock(block_in, num_groups=num_groups) + self.mid.block_2 = CausalResnetBlock3d( + in_channels=block_in, + out_channels=block_in, + dropout=dropout, + num_groups=num_groups, + ) + + # end + self.norm_out = CausalNormalize(block_in, num_groups=num_groups) + self.conv_out = CausalConv3d(block_in, z_channels, kernel_size=3, stride=1, padding=1) + + def patcher3d(self, x: torch.Tensor) -> torch.Tensor: + x, batch_size = time2batch(x) + x = self.patcher(x) + x = batch2time(x, batch_size) + return x + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.patcher3d(x) + + # downsampling + hs = [self.conv_in(x)] + for i_level in range(self.num_resolutions): + for i_block in range(self.num_res_blocks): + h = self.down[i_level].block[i_block](hs[-1]) + if len(self.down[i_level].attn) > 0: + h = self.down[i_level].attn[i_block](h) + hs.append(h) + if i_level != self.num_resolutions - 1: + hs.append(self.down[i_level].downsample(hs[-1])) + else: + # temporal downsample (last level) + time_factor = 1 + 1 * (hs[-1].shape[2] > 1) + if isinstance(time_factor, torch.Tensor): + time_factor = time_factor.item() + hs[-1] = replication_pad(hs[-1]) + hs.append( + F.avg_pool3d( + hs[-1], + kernel_size=[time_factor, 1, 1], + stride=[2, 1, 1], + ) + ) + + # middle + h = hs[-1] + h = self.mid.block_1(h) + h = self.mid.attn_1(h) + h = self.mid.block_2(h) + + # end + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + return h + + +class DecoderBase(nn.Module): + def __init__( + self, + out_channels: int, + channels: int, + channels_mult: list[int], + num_res_blocks: int, + attn_resolutions: list[int], + dropout: float, + resolution: int, + z_channels: int, + **ignore_kwargs, + ): + super().__init__() + self.num_resolutions = len(channels_mult) + self.num_res_blocks = num_res_blocks + + # UnPatcher. + patch_size = ignore_kwargs.get("patch_size", 1) + self.unpatcher = UnPatcher(patch_size, ignore_kwargs.get("patch_method", "rearrange")) + out_ch = out_channels * patch_size * patch_size + + block_in = channels * channels_mult[self.num_resolutions - 1] + curr_res = (resolution // patch_size) // 2 ** (self.num_resolutions - 1) + self.z_shape = (1, z_channels, curr_res, curr_res) + + # z to block_in + self.conv_in = CausalConv3d(z_channels, block_in, kernel_size=3, stride=1, padding=1) + + # num of groups for GroupNorm, num_groups=1 for LayerNorm. + num_groups = ignore_kwargs.get("num_groups", _LEGACY_NUM_GROUPS) + + # middle + self.mid = nn.Module() + self.mid.block_1 = CausalResnetBlock3d( + in_channels=block_in, + out_channels=block_in, + dropout=dropout, + num_groups=num_groups, + ) + self.mid.attn_1 = CausalAttnBlock(block_in, num_groups=num_groups) + self.mid.block_2 = CausalResnetBlock3d( + in_channels=block_in, + out_channels=block_in, + dropout=dropout, + num_groups=num_groups, + ) + + # upsampling + self.up = nn.ModuleList() + for i_level in reversed(range(self.num_resolutions)): + block = nn.ModuleList() + attn = nn.ModuleList() + block_out = channels * channels_mult[i_level] + for _ in range(self.num_res_blocks + 1): + block.append( + CausalResnetBlock3d( + in_channels=block_in, + out_channels=block_out, + dropout=dropout, + num_groups=num_groups, + ) + ) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(CausalAttnBlock(block_in, num_groups=num_groups)) + up = nn.Module() + up.block = block + up.attn = attn + if i_level != 0: + up.upsample = CausalUpsample3d(block_in) + curr_res = curr_res * 2 + self.up.insert(0, up) # prepend to get consistent order + + # end + self.norm_out = CausalNormalize(block_in, num_groups=num_groups) + self.conv_out = CausalConv3d(block_in, out_ch, kernel_size=3, stride=1, padding=1) + + def unpatcher3d(self, x: torch.Tensor) -> torch.Tensor: + x, batch_size = time2batch(x) + x = self.unpatcher(x) + x = batch2time(x, batch_size) + + return x + + def forward(self, z): + h = self.conv_in(z) + + # middle block. + h = self.mid.block_1(h) + h = self.mid.attn_1(h) + h = self.mid.block_2(h) + + # decoder blocks. + for i_level in reversed(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks + 1): + h = self.up[i_level].block[i_block](h) + if len(self.up[i_level].attn) > 0: + h = self.up[i_level].attn[i_block](h) + if i_level != 0: + h = self.up[i_level].upsample(h) + else: + # temporal upsample (last level) + time_factor = 1.0 + 1.0 * (h.shape[2] > 1) + if isinstance(time_factor, torch.Tensor): + time_factor = time_factor.item() + h = h.repeat_interleave(int(time_factor), dim=2) + h = h[..., int(time_factor - 1) :, :, :] + + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + h = self.unpatcher3d(h) + return h + + +class EncoderFactorized(nn.Module): + def __init__( + self, + in_channels: int, + channels: int, + channels_mult: list[int], + num_res_blocks: int, + attn_resolutions: list[int], + dropout: float, + resolution: int, + z_channels: int, + spatial_compression: int = 16, + temporal_compression: int = 8, + **ignore_kwargs, + ) -> None: + super().__init__() + self.num_resolutions = len(channels_mult) + self.num_res_blocks = num_res_blocks + + # Patcher. + patch_size = ignore_kwargs.get("patch_size", 1) + self.patcher3d = Patcher3D(patch_size, ignore_kwargs.get("patch_method", "rearrange")) + in_channels = in_channels * patch_size * patch_size * patch_size + + # calculate the number of downsample operations + self.num_spatial_downs = int(math.log2(spatial_compression)) - int(math.log2(patch_size)) + assert ( + self.num_spatial_downs <= self.num_resolutions + ), f"Spatially downsample {self.num_resolutions} times at most" + + self.num_temporal_downs = int(math.log2(temporal_compression)) - int(math.log2(patch_size)) + assert ( + self.num_temporal_downs <= self.num_resolutions + ), f"Temporally downsample {self.num_resolutions} times at most" + + # downsampling + self.conv_in = nn.Sequential( + CausalConv3d( + in_channels, + channels, + kernel_size=(1, 3, 3), + stride=1, + padding=1, + ), + CausalConv3d(channels, channels, kernel_size=(3, 1, 1), stride=1, padding=0), + ) + + curr_res = resolution // patch_size + in_ch_mult = (1,) + tuple(channels_mult) + self.in_ch_mult = in_ch_mult + self.down = nn.ModuleList() + for i_level in range(self.num_resolutions): + block = nn.ModuleList() + attn = nn.ModuleList() + block_in = channels * in_ch_mult[i_level] + block_out = channels * channels_mult[i_level] + for _ in range(self.num_res_blocks): + block.append( + CausalResnetBlockFactorized3d( + in_channels=block_in, + out_channels=block_out, + dropout=dropout, + num_groups=1, + ) + ) + block_in = block_out + if curr_res in attn_resolutions: + attn.append( + nn.Sequential( + CausalAttnBlock(block_in, num_groups=1), + CausalTemporalAttnBlock(block_in, num_groups=1), + ) + ) + down = nn.Module() + down.block = block + down.attn = attn + if i_level != self.num_resolutions - 1: + spatial_down = i_level < self.num_spatial_downs + temporal_down = i_level < self.num_temporal_downs + down.downsample = CausalHybridDownsample3d( + block_in, + spatial_down=spatial_down, + temporal_down=temporal_down, + ) + curr_res = curr_res // 2 + self.down.append(down) + + # middle + self.mid = nn.Module() + self.mid.block_1 = CausalResnetBlockFactorized3d( + in_channels=block_in, + out_channels=block_in, + dropout=dropout, + num_groups=1, + ) + self.mid.attn_1 = nn.Sequential( + CausalAttnBlock(block_in, num_groups=1), + CausalTemporalAttnBlock(block_in, num_groups=1), + ) + self.mid.block_2 = CausalResnetBlockFactorized3d( + in_channels=block_in, + out_channels=block_in, + dropout=dropout, + num_groups=1, + ) + + # end + self.norm_out = CausalNormalize(block_in, num_groups=1) + self.conv_out = nn.Sequential( + CausalConv3d(block_in, z_channels, kernel_size=(1, 3, 3), stride=1, padding=1), + CausalConv3d( + z_channels, + z_channels, + kernel_size=(3, 1, 1), + stride=1, + padding=0, + ), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.patcher3d(x) + + # downsampling + hs = [self.conv_in(x)] + for i_level in range(self.num_resolutions): + for i_block in range(self.num_res_blocks): + h = self.down[i_level].block[i_block](hs[-1]) + if len(self.down[i_level].attn) > 0: + h = self.down[i_level].attn[i_block](h) + hs.append(h) + if i_level != self.num_resolutions - 1: + hs.append(self.down[i_level].downsample(hs[-1])) + + # middle + h = hs[-1] + h = self.mid.block_1(h) + h = self.mid.attn_1(h) + h = self.mid.block_2(h) + + # end + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + return h + + +class DecoderFactorized(nn.Module): + def __init__( + self, + out_channels: int, + channels: int, + channels_mult: list[int], + num_res_blocks: int, + attn_resolutions: list[int], + dropout: float, + resolution: int, + z_channels: int, + spatial_compression: int = 16, + temporal_compression: int = 8, + **ignore_kwargs, + ): + super().__init__() + self.num_resolutions = len(channels_mult) + self.num_res_blocks = num_res_blocks + + # UnPatcher. + patch_size = ignore_kwargs.get("patch_size", 1) + self.unpatcher3d = UnPatcher3D(patch_size, ignore_kwargs.get("patch_method", "rearrange")) + out_ch = out_channels * patch_size * patch_size * patch_size + + # calculate the number of upsample operations + self.num_spatial_ups = int(math.log2(spatial_compression)) - int(math.log2(patch_size)) + assert self.num_spatial_ups <= self.num_resolutions, f"Spatially upsample {self.num_resolutions} times at most" + self.num_temporal_ups = int(math.log2(temporal_compression)) - int(math.log2(patch_size)) + assert ( + self.num_temporal_ups <= self.num_resolutions + ), f"Temporally upsample {self.num_resolutions} times at most" + + block_in = channels * channels_mult[self.num_resolutions - 1] + curr_res = (resolution // patch_size) // 2 ** (self.num_resolutions - 1) + self.z_shape = (1, z_channels, curr_res, curr_res) + + # z to block_in + self.conv_in = nn.Sequential( + CausalConv3d(z_channels, block_in, kernel_size=(1, 3, 3), stride=1, padding=1), + CausalConv3d(block_in, block_in, kernel_size=(3, 1, 1), stride=1, padding=0), + ) + + # middle + self.mid = nn.Module() + self.mid.block_1 = CausalResnetBlockFactorized3d( + in_channels=block_in, + out_channels=block_in, + dropout=dropout, + num_groups=1, + ) + self.mid.attn_1 = nn.Sequential( + CausalAttnBlock(block_in, num_groups=1), + CausalTemporalAttnBlock(block_in, num_groups=1), + ) + self.mid.block_2 = CausalResnetBlockFactorized3d( + in_channels=block_in, + out_channels=block_in, + dropout=dropout, + num_groups=1, + ) + + legacy_mode = ignore_kwargs.get("legacy_mode", False) + # upsampling + self.up = nn.ModuleList() + for i_level in reversed(range(self.num_resolutions)): + block = nn.ModuleList() + attn = nn.ModuleList() + block_out = channels * channels_mult[i_level] + for _ in range(self.num_res_blocks + 1): + block.append( + CausalResnetBlockFactorized3d( + in_channels=block_in, + out_channels=block_out, + dropout=dropout, + num_groups=1, + ) + ) + block_in = block_out + if curr_res in attn_resolutions: + attn.append( + nn.Sequential( + CausalAttnBlock(block_in, num_groups=1), + CausalTemporalAttnBlock(block_in, num_groups=1), + ) + ) + up = nn.Module() + up.block = block + up.attn = attn + if i_level != 0: + # The layer index for temporal/spatial downsampling performed + # in the encoder should correspond to the layer index in + # reverse order where upsampling is performed in the decoder. + # If you've a pre-trained model, you can simply finetune. + i_level_reverse = self.num_resolutions - i_level - 1 + if legacy_mode: + temporal_up = i_level_reverse < self.num_temporal_ups + else: + temporal_up = 0 < i_level_reverse < self.num_temporal_ups + 1 + spatial_up = temporal_up or ( + i_level_reverse < self.num_spatial_ups and self.num_spatial_ups > self.num_temporal_ups + ) + up.upsample = CausalHybridUpsample3d(block_in, spatial_up=spatial_up, temporal_up=temporal_up) + curr_res = curr_res * 2 + self.up.insert(0, up) # prepend to get consistent order + + # end + self.norm_out = CausalNormalize(block_in, num_groups=1) + self.conv_out = nn.Sequential( + CausalConv3d(block_in, out_ch, kernel_size=(1, 3, 3), stride=1, padding=1), + CausalConv3d(out_ch, out_ch, kernel_size=(3, 1, 1), stride=1, padding=0), + ) + + def forward(self, z): + h = self.conv_in(z) + + # middle block. + h = self.mid.block_1(h) + h = self.mid.attn_1(h) + h = self.mid.block_2(h) + + # decoder blocks. + for i_level in reversed(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks + 1): + h = self.up[i_level].block[i_block](h) + if len(self.up[i_level].attn) > 0: + h = self.up[i_level].attn[i_block](h) + if i_level != 0: + h = self.up[i_level].upsample(h) + + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + h = self.unpatcher3d(h) + return h diff --git a/nemo/collections/common/video_tokenizers/modules/patching.py b/nemo/collections/common/video_tokenizers/modules/patching.py new file mode 100644 index 000000000000..d16f8961472e --- /dev/null +++ b/nemo/collections/common/video_tokenizers/modules/patching.py @@ -0,0 +1,311 @@ +# ****************************************************************************** +# Copyright (C) 2024 NVIDIA CORPORATION +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ****************************************************************************** +"""The patcher and unpatcher implementation for 2D and 3D data. + +The idea of Haar wavelet is to compute LL, LH, HL, HH component as two 1D convolutions. +One on the rows and one on the columns. +For example, in 1D signal, we have [a, b], then the low-freq compoenent is [a + b] / 2 and high-freq is [a - b] / 2. +We can use a 1D convolution with kernel [1, 1] and stride 2 to represent the L component. +For H component, we can use a 1D convolution with kernel [1, -1] and stride 2. +Although in principle, we typically only do additional Haar wavelet over the LL component. But here we do it for all + as we need to support downsampling for more than 2x. +For example, 4x downsampling can be done by 2x Haar and additional 2x Haar, and the shape would be. + [3, 256, 256] -> [12, 128, 128] -> [48, 64, 64] +""" + +import torch +import torch.nn.functional as F +from einops import rearrange + +_WAVELETS = { + "haar": torch.tensor([0.7071067811865476, 0.7071067811865476]), + "rearrange": torch.tensor([1.0, 1.0]), +} +_PERSISTENT = False + + +class Patcher(torch.nn.Module): + """A module to convert image tensors into patches using torch operations. + + The main difference from `class Patching` is that this module implements + all operations using torch, rather than python or numpy, for efficiency purpose. + + It's bit-wise identical to the Patching module outputs, with the added + benefit of being torch.jit scriptable. + """ + + def __init__(self, patch_size=1, patch_method="haar"): + super().__init__() + self.patch_size = patch_size + self.patch_method = patch_method + self.register_buffer("wavelets", _WAVELETS[patch_method], persistent=_PERSISTENT) + self.range = range(int(torch.log2(torch.tensor(self.patch_size)).item())) + self.register_buffer( + "_arange", + torch.arange(_WAVELETS[patch_method].shape[0]), + persistent=_PERSISTENT, + ) + for param in self.parameters(): + param.requires_grad = False + + def forward(self, x): + if self.patch_method == "haar": + return self._haar(x) + elif self.patch_method == "rearrange": + return self._arrange(x) + else: + raise ValueError("Unknown patch method: " + self.patch_method) + + def _dwt(self, x, mode="reflect", rescale=False): + dtype = x.dtype + h = self.wavelets + + n = h.shape[0] + g = int(x.shape[1]) + hl = h.flip(0).reshape(1, 1, -1).repeat(g, 1, 1) + hh = (h * ((-1) ** self._arange)).reshape(1, 1, -1).repeat(g, 1, 1) + hh = hh.to(dtype=dtype) + hl = hl.to(dtype=dtype) + + x = F.pad(x, pad=(n - 2, n - 1, n - 2, n - 1), mode=mode).to(dtype) + xl = F.conv2d(x, hl.unsqueeze(2), groups=g, stride=(1, 2)) + xh = F.conv2d(x, hh.unsqueeze(2), groups=g, stride=(1, 2)) + xll = F.conv2d(xl, hl.unsqueeze(3), groups=g, stride=(2, 1)) + xlh = F.conv2d(xl, hh.unsqueeze(3), groups=g, stride=(2, 1)) + xhl = F.conv2d(xh, hl.unsqueeze(3), groups=g, stride=(2, 1)) + xhh = F.conv2d(xh, hh.unsqueeze(3), groups=g, stride=(2, 1)) + + out = torch.cat([xll, xlh, xhl, xhh], dim=1) + if rescale: + out = out / 2 + return out + + def _haar(self, x): + for _ in self.range: + x = self._dwt(x, rescale=True) + return x + + def _arrange(self, x): + x = rearrange( + x, + "b c (h p1) (w p2) -> b (c p1 p2) h w", + p1=self.patch_size, + p2=self.patch_size, + ).contiguous() + return x + + +class Patcher3D(Patcher): + """A 3D discrete wavelet transform for video data, expects 5D tensor, i.e. a batch of videos.""" + + def __init__(self, patch_size=1, patch_method="haar"): + super().__init__(patch_method=patch_method, patch_size=patch_size) + self.register_buffer( + "patch_size_buffer", + patch_size * torch.ones([1], dtype=torch.int32), + persistent=_PERSISTENT, + ) + + def _dwt(self, x, wavelet, mode="reflect", rescale=False): + dtype = x.dtype + h = self.wavelets + + n = h.shape[0] + g = int(x.shape[1]) + hl = h.flip(0).reshape(1, 1, -1).repeat(g, 1, 1) + hh = (h * ((-1) ** self._arange)).reshape(1, 1, -1).repeat(g, 1, 1) + hh = hh.to(dtype=dtype) + hl = hl.to(dtype=dtype) + + # Handles temporal axis. + x = F.pad(x, pad=(max(0, n - 2), n - 1, n - 2, n - 1, n - 2, n - 1), mode=mode).to(dtype) + xl = F.conv3d(x, hl.unsqueeze(3).unsqueeze(4), groups=g, stride=(2, 1, 1)) + xh = F.conv3d(x, hh.unsqueeze(3).unsqueeze(4), groups=g, stride=(2, 1, 1)) + + # Handles spatial axes. + xll = F.conv3d(xl, hl.unsqueeze(2).unsqueeze(4), groups=g, stride=(1, 2, 1)) + xlh = F.conv3d(xl, hh.unsqueeze(2).unsqueeze(4), groups=g, stride=(1, 2, 1)) + xhl = F.conv3d(xh, hl.unsqueeze(2).unsqueeze(4), groups=g, stride=(1, 2, 1)) + xhh = F.conv3d(xh, hh.unsqueeze(2).unsqueeze(4), groups=g, stride=(1, 2, 1)) + + xlll = F.conv3d(xll, hl.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2)) + xllh = F.conv3d(xll, hh.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2)) + xlhl = F.conv3d(xlh, hl.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2)) + xlhh = F.conv3d(xlh, hh.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2)) + xhll = F.conv3d(xhl, hl.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2)) + xhlh = F.conv3d(xhl, hh.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2)) + xhhl = F.conv3d(xhh, hl.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2)) + xhhh = F.conv3d(xhh, hh.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2)) + + out = torch.cat([xlll, xllh, xlhl, xlhh, xhll, xhlh, xhhl, xhhh], dim=1) + if rescale: + out = out / (2 * torch.sqrt(torch.tensor(2.0))) + return out + + def _haar(self, x): + xi, xv = torch.split(x, [1, x.shape[2] - 1], dim=2) + x = torch.cat([xi.repeat_interleave(self.patch_size, dim=2), xv], dim=2) + for _ in self.range: + x = self._dwt(x, "haar", rescale=True) + return x + + def _arrange(self, x): + xi, xv = torch.split(x, [1, x.shape[2] - 1], dim=2) + x = torch.cat([xi.repeat_interleave(self.patch_size, dim=2), xv], dim=2) + x = rearrange( + x, + "b c (t p1) (h p2) (w p3) -> b (c p1 p2 p3) t h w", + p1=self.patch_size, + p2=self.patch_size, + p3=self.patch_size, + ).contiguous() + return x + + +class UnPatcher(torch.nn.Module): + """A module to convert patches into image tensorsusing torch operations. + + The main difference from `class Unpatching` is that this module implements + all operations using torch, rather than python or numpy, for efficiency purpose. + + It's bit-wise identical to the Unpatching module outputs, with the added + benefit of being torch.jit scriptable. + """ + + def __init__(self, patch_size=1, patch_method="haar"): + super().__init__() + self.patch_size = patch_size + self.patch_method = patch_method + self.register_buffer("wavelets", _WAVELETS[patch_method], persistent=_PERSISTENT) + self.range = range(int(torch.log2(torch.tensor(self.patch_size)).item())) + self.register_buffer( + "_arange", + torch.arange(_WAVELETS[patch_method].shape[0]), + persistent=_PERSISTENT, + ) + for param in self.parameters(): + param.requires_grad = False + + def forward(self, x): + if self.patch_method == "haar": + return self._ihaar(x) + elif self.patch_method == "rearrange": + return self._iarrange(x) + else: + raise ValueError("Unknown patch method: " + self.patch_method) + + def _idwt(self, x, wavelet="haar", mode="reflect", rescale=False): + dtype = x.dtype + h = self.wavelets + n = h.shape[0] + + g = int(x.shape[1] // 4) + hl = h.flip([0]).reshape(1, 1, -1).repeat([g, 1, 1]) + hh = (h * ((-1) ** self._arange)).reshape(1, 1, -1).repeat(g, 1, 1) + hh = hh.to(dtype=dtype) + hl = hl.to(dtype=dtype) + + xll, xlh, xhl, xhh = torch.chunk(x.to(dtype), 4, dim=1) + + # Inverse transform. + yl = torch.nn.functional.conv_transpose2d(xll, hl.unsqueeze(3), groups=g, stride=(2, 1), padding=(n - 2, 0)) + yl += torch.nn.functional.conv_transpose2d(xlh, hh.unsqueeze(3), groups=g, stride=(2, 1), padding=(n - 2, 0)) + yh = torch.nn.functional.conv_transpose2d(xhl, hl.unsqueeze(3), groups=g, stride=(2, 1), padding=(n - 2, 0)) + yh += torch.nn.functional.conv_transpose2d(xhh, hh.unsqueeze(3), groups=g, stride=(2, 1), padding=(n - 2, 0)) + y = torch.nn.functional.conv_transpose2d(yl, hl.unsqueeze(2), groups=g, stride=(1, 2), padding=(0, n - 2)) + y += torch.nn.functional.conv_transpose2d(yh, hh.unsqueeze(2), groups=g, stride=(1, 2), padding=(0, n - 2)) + + if rescale: + y = y * 2 + return y + + def _ihaar(self, x): + for _ in self.range: + x = self._idwt(x, "haar", rescale=True) + return x + + def _iarrange(self, x): + x = rearrange( + x, + "b (c p1 p2) h w -> b c (h p1) (w p2)", + p1=self.patch_size, + p2=self.patch_size, + ) + return x + + +class UnPatcher3D(UnPatcher): + """A 3D inverse discrete wavelet transform for video wavelet decompositions.""" + + def __init__(self, patch_size=1, patch_method="haar"): + super().__init__(patch_method=patch_method, patch_size=patch_size) + + def _idwt(self, x, wavelet="haar", mode="reflect", rescale=False): + dtype = x.dtype + h = self.wavelets + + g = int(x.shape[1] // 8) # split into 8 spatio-temporal filtered tesnors. + hl = h.flip([0]).reshape(1, 1, -1).repeat([g, 1, 1]) + hh = (h * ((-1) ** self._arange)).reshape(1, 1, -1).repeat(g, 1, 1) + hl = hl.to(dtype=dtype) + hh = hh.to(dtype=dtype) + + xlll, xllh, xlhl, xlhh, xhll, xhlh, xhhl, xhhh = torch.chunk(x, 8, dim=1) + + # Height height transposed convolutions. + xll = F.conv_transpose3d(xlll, hl.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2)) + xll += F.conv_transpose3d(xllh, hh.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2)) + + xlh = F.conv_transpose3d(xlhl, hl.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2)) + xlh += F.conv_transpose3d(xlhh, hh.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2)) + + xhl = F.conv_transpose3d(xhll, hl.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2)) + xhl += F.conv_transpose3d(xhlh, hh.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2)) + + xhh = F.conv_transpose3d(xhhl, hl.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2)) + xhh += F.conv_transpose3d(xhhh, hh.unsqueeze(2).unsqueeze(3), groups=g, stride=(1, 1, 2)) + + # Handles width transposed convolutions. + xl = F.conv_transpose3d(xll, hl.unsqueeze(2).unsqueeze(4), groups=g, stride=(1, 2, 1)) + xl += F.conv_transpose3d(xlh, hh.unsqueeze(2).unsqueeze(4), groups=g, stride=(1, 2, 1)) + xh = F.conv_transpose3d(xhl, hl.unsqueeze(2).unsqueeze(4), groups=g, stride=(1, 2, 1)) + xh += F.conv_transpose3d(xhh, hh.unsqueeze(2).unsqueeze(4), groups=g, stride=(1, 2, 1)) + + # Handles time axis transposed convolutions. + x = F.conv_transpose3d(xl, hl.unsqueeze(3).unsqueeze(4), groups=g, stride=(2, 1, 1)) + x += F.conv_transpose3d(xh, hh.unsqueeze(3).unsqueeze(4), groups=g, stride=(2, 1, 1)) + + if rescale: + x = x * (2 * torch.sqrt(torch.tensor(2.0))) + return x + + def _ihaar(self, x): + for _ in self.range: + x = self._idwt(x, "haar", rescale=True) + x = x[:, :, self.patch_size - 1 :, ...] + return x + + def _iarrange(self, x): + x = rearrange( + x, + "b (c p1 p2 p3) t h w -> b c (t p1) (h p2) (w p3)", + p1=self.patch_size, + p2=self.patch_size, + p3=self.patch_size, + ) + x = x[:, :, self.patch_size - 1 :, ...] + return x diff --git a/nemo/collections/common/video_tokenizers/modules/quantizers.py b/nemo/collections/common/video_tokenizers/modules/quantizers.py new file mode 100644 index 000000000000..35b1976164f5 --- /dev/null +++ b/nemo/collections/common/video_tokenizers/modules/quantizers.py @@ -0,0 +1,512 @@ +# ****************************************************************************** +# Copyright (C) 2024 NVIDIA CORPORATION +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ****************************************************************************** +"""Quantizers for discrete image and video tokenization.""" + +from typing import Optional + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from einops import reduce + +from nemo.collections.common.video_tokenizers.modules.utils import ( + default, + entropy, + pack_one, + rearrange, + round_ste, + unpack_one, +) + + +class ResidualFSQuantizer(nn.Module): + """Residual Finite Scalar Quantization + + Follows Algorithm 1. in https://arxiv.org/pdf/2107.03312.pdf + """ + + def __init__(self, levels: list[int], num_quantizers: int, **ignore_kwargs): + super().__init__() + self.dtype = ignore_kwargs.get("dtype", torch.float32) + self.layers = nn.ModuleList([FSQuantizer(levels=levels) for _ in range(num_quantizers)]) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + indices_stack = [] + residual = x + quantized_out = 0 + loss_out = 0 + for i, layer in enumerate(self.layers): + quant_indices, z, loss = layer(residual) + indices_stack.append(quant_indices) + residual = residual - z.detach() + quantized_out = quantized_out + z + loss_out = loss_out + loss + self.residual = residual + indices = torch.stack(indices_stack, dim=1) + return indices, quantized_out.to(self.dtype), loss_out.to(self.dtype) + + def indices_to_codes(self, indices_stack: torch.Tensor) -> torch.Tensor: + quantized_out = 0 + for layer, indices in zip(self.layers, indices_stack.transpose(0, 1)): + quantized_out += layer.indices_to_codes(indices) + return quantized_out + + +class FSQuantizer(nn.Module): + """Finite Scalar Quantization: VQ-VAE Made Simple - https://arxiv.org/abs/2309.15505 + + Code adapted from Jax version in Appendix A.1. + + Adapted from: https://github.com/lucidrains/vector-quantize-pytorch/blob/9502a1f447876d53fd37685b226bf28f250dc4a3/ + vector_quantize_pytorch/finite_scalar_quantization.py + [Copyright (c) 2020 Phil Wang] + https://github.com/lucidrains/vector-quantize-pytorch/blob/9502a1f447876d53fd37685b226bf28f250dc4a3/LICENSE + """ + + def __init__( + self, + levels: list[int], + dim: Optional[int] = None, + num_codebooks=1, + keep_num_codebooks_dim: Optional[bool] = None, + scale: Optional[float] = None, + **ignore_kwargs, + ): + super().__init__() + self.dtype = ignore_kwargs.get("dtype", torch.float32) + _levels = torch.tensor(levels, dtype=torch.int32) + self.register_buffer("_levels", _levels, persistent=False) + + _basis = torch.cumprod(torch.tensor([1] + levels[:-1]), dim=0, dtype=torch.int32) + self.register_buffer("_basis", _basis, persistent=False) + + self.scale = scale + + codebook_dim = len(levels) + self.codebook_dim = codebook_dim + + effective_codebook_dim = codebook_dim * num_codebooks + self.num_codebooks = num_codebooks + self.effective_codebook_dim = effective_codebook_dim + + keep_num_codebooks_dim = default(keep_num_codebooks_dim, num_codebooks > 1) + assert not (num_codebooks > 1 and not keep_num_codebooks_dim) + self.keep_num_codebooks_dim = keep_num_codebooks_dim + + self.dim = default(dim, len(_levels) * num_codebooks) + + has_projections = self.dim != effective_codebook_dim + self.project_in = nn.Linear(self.dim, effective_codebook_dim) if has_projections else nn.Identity() + self.project_out = nn.Linear(effective_codebook_dim, self.dim) if has_projections else nn.Identity() + self.has_projections = has_projections + + self.codebook_size = self._levels.prod().item() + + implicit_codebook = self.indices_to_codes(torch.arange(self.codebook_size), project_out=False) + self.register_buffer("implicit_codebook", implicit_codebook, persistent=False) + + def bound(self, z: torch.Tensor, eps: float = 1e-3) -> torch.Tensor: + """Bound `z`, an array of shape (..., d).""" + half_l = (self._levels - 1) * (1 + eps) / 2 + offset = torch.where(self._levels % 2 == 0, 0.5, 0.0) + shift = (offset / half_l).atanh() + return (z + shift).tanh() * half_l - offset + + def quantize(self, z: torch.Tensor) -> torch.Tensor: + """Quantizes z, returns quantized zhat, same shape as z.""" + quantized = round_ste(self.bound(z)) + half_width = self._levels // 2 # Renormalize to [-1, 1]. + return quantized / half_width + + def _scale_and_shift(self, zhat_normalized: torch.Tensor) -> torch.Tensor: + half_width = self._levels // 2 + return (zhat_normalized * half_width) + half_width + + def _scale_and_shift_inverse(self, zhat: torch.Tensor) -> torch.Tensor: + half_width = self._levels // 2 + return (zhat - half_width) / half_width + + def codes_to_indices(self, zhat: torch.Tensor) -> torch.Tensor: + """Converts a `code` to an index in the codebook.""" + assert zhat.shape[-1] == self.codebook_dim + zhat = self._scale_and_shift(zhat).float() + return (zhat * self._basis).sum(dim=-1).to(torch.int32) + + def indices_to_codes(self, indices: torch.Tensor, project_out=True) -> torch.Tensor: + """Inverse of `codes_to_indices`.""" + is_img_or_video = indices.ndim >= (3 + int(self.keep_num_codebooks_dim)) + indices = rearrange(indices, "... -> ... 1") + codes_non_centered = (indices // self._basis) % self._levels + codes = self._scale_and_shift_inverse(codes_non_centered) + + if self.keep_num_codebooks_dim: + codes = rearrange(codes, "... c d -> ... (c d)") + + if project_out: + codes = self.project_out(codes) + + if is_img_or_video: + codes = rearrange(codes, "b ... d -> b d ...") + + return codes.to(self.dtype) + + def forward(self, z: torch.Tensor) -> torch.Tensor: + """ + einstein notation + b - batch + n - sequence (or flattened spatial dimensions) + d - feature dimension, which is also log2(codebook size) + c - number of codebook dim + """ + is_img_or_video = z.ndim >= 4 + + # standardize image or video into (batch, seq, dimension) + + if is_img_or_video: + z = rearrange(z, "b d ... -> b ... d") + z, ps = pack_one(z, "b * d") + + assert z.shape[-1] == self.dim, f"expected dimension of {self.dim} but found dimension of {z.shape[-1]}" + + z = self.project_in(z) + + z = rearrange(z, "b n (c d) -> b n c d", c=self.num_codebooks) + + codes = self.quantize(z) + indices = self.codes_to_indices(codes) + + codes = rearrange(codes, "b n c d -> b n (c d)") + + out = self.project_out(codes) + + # reconstitute image or video dimensions + + if is_img_or_video: + out = unpack_one(out, ps, "b * d") + out = rearrange(out, "b ... d -> b d ...") + indices = unpack_one(indices, ps, "b * c") + dummy_loss = torch.zeros_like(out.mean(dim=[1, 2, 3], keepdim=True)) + else: + dummy_loss = torch.zeros_like(out.mean(dim=[1, 2], keepdim=True)).unsqueeze(1) + + if not self.keep_num_codebooks_dim: + indices = rearrange(indices, "... 1 -> ...") + + return (indices, out.to(self.dtype), dummy_loss) + + +class VectorQuantizer(nn.Module): + """Improved version over VectorQuantizer. Mostly + avoids costly matrix multiplications and allows for post-hoc remapping of indices. + + Adapted from: https://github.com/CompVis/taming-transformers/blob/3ba01b241669f5ade541ce990f7650a3b8f65318/ + taming/modules/vqvae/quantize.py + + [Copyright (c) 2020 Patrick Esser and Robin Rombach and Björn Ommer] + https://github.com/CompVis/taming-transformers/blob/3ba01b241669f5ade541ce990f7650a3b8f65318/License.txt + """ + + def __init__( + self, + num_embeddings: int, + embedding_dim: int, + beta: float = 0.25, + remap: str = None, + unknown_index: str = "random", + sane_index_shape: bool = False, + legacy: bool = True, + use_norm=False, + **ignore_kwargs, + ): + super().__init__() + self.n_e = num_embeddings + self.e_dim = embedding_dim + self.beta = beta + self.legacy = legacy + self.norm = lambda x: F.normalize(x, dim=-1) if use_norm else x + + self.embedding = nn.Embedding(self.n_e, self.e_dim) + self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e) + + self.remap = remap + if self.remap is not None: + self.register_buffer("used", torch.tensor(np.load(self.remap))) + self.re_embed = self.used.shape[0] + self.unknown_index = unknown_index + if self.unknown_index == "extra": + self.unknown_index = self.re_embed + self.re_embed = self.re_embed + 1 + print( + f"Remapping {self.n_e} indices to {self.re_embed} indices. " + f"Using {self.unknown_index} for unknown indices." + ) + else: + self.re_embed = num_embeddings + + self.sane_index_shape = sane_index_shape + self.dtype = ignore_kwargs.get("dtype", torch.float32) + + def remap_to_used(self, inds): + ishape = inds.shape + assert len(ishape) > 1 + inds = inds.reshape(ishape[0], -1) + used = self.used.to(inds) + match = (inds[:, :, None] == used[None, None, ...]).long() + new = match.argmax(-1) + unknown = match.sum(2) < 1 + if self.unknown_index == "random": + new[unknown] = torch.randint(0, self.re_embed, size=new[unknown].shape).to(device=new.device) + else: + new[unknown] = self.unknown_index + return new.reshape(ishape) + + def unmap_to_all(self, inds): + ishape = inds.shape + assert len(ishape) > 1 + inds = inds.reshape(ishape[0], -1) + used = self.used.to(inds) + if self.re_embed > self.used.shape[0]: # extra token + inds[inds >= self.used.shape[0]] = 0 # simply set to zero + back = torch.gather(used[None, :][inds.shape[0] * [0], :], 1, inds) + return back.reshape(ishape) + + def forward(self, z, temp=None, rescale_logits=False, return_logits=False): + assert temp is None or temp == 1.0, "Only for interface compatible with Gumbel" + assert rescale_logits is False, "Only for interface compatible with Gumbel" + assert return_logits is False, "Only for interface compatible with Gumbel" + z = rearrange(z, "b c h w -> b h w c").contiguous() + z_flattened = z.view(-1, self.e_dim) + + d = ( + torch.sum(z_flattened**2, dim=1, keepdim=True) + + torch.sum(self.embedding.weight**2, dim=1) + - 2 + * torch.einsum( + "bd,dn->bn", + z_flattened, + rearrange(self.embedding.weight, "n d -> d n"), + ) + ) + + encoding_indices = torch.argmin(d, dim=1).unsqueeze(1) + encodings = torch.zeros(encoding_indices.shape[0], self.n_e, device=z.device) + encodings.scatter_(1, encoding_indices, 1) + z_q = torch.matmul(encodings, self.embedding.weight).view(z.shape) + min_encodings = None + + z_q, z = self.norm(z_q), self.norm(z) + + # compute loss for embedding + commit_loss = torch.mean((z_q - z.detach()) ** 2, dim=[1, 2, 3], keepdim=True) + emb_loss = torch.mean((z_q.detach() - z) ** 2, dim=[1, 2, 3], keepdim=True) + if not self.legacy: + loss = self.beta * emb_loss + commit_loss + else: + loss = emb_loss + self.beta * commit_loss + + # preserve gradients + z_q = z + (z_q - z).detach() + avg_probs = torch.mean(encodings, dim=0) + perplexity = torch.exp(-torch.sum(avg_probs * torch.log(avg_probs + 1e-10))) + + # reshape back to match original input shape + z_q = rearrange(z_q, "b h w c -> b c h w").contiguous() + + if self.remap is not None: + min_encoding_indices = encoding_indices.squeeze(1).reshape(z.shape[0], -1) # add batch axis + min_encoding_indices = self.remap_to_used(encoding_indices.squeeze(1)) + min_encoding_indices = min_encoding_indices.reshape(-1, 1) # flatten + + if self.sane_index_shape: + min_encoding_indices = min_encoding_indices.reshape(z_q.shape[0], z_q.shape[2], z_q.shape[3]) + + # TODO: return (indices, z_q, loss) + return ( + z_q, + loss, + ( + encoding_indices.squeeze(1), + min_encodings, + commit_loss.mean().detach(), + self.beta * emb_loss.mean().detach(), + perplexity.mean().detach(), + ), + ) + + def get_codebook_entry(self, indices, shape): + # shape specifying (batch, height, width, channel) + if self.remap is not None: + indices = indices.reshape(shape[0], -1) # add batch axis + indices = self.unmap_to_all(indices) + indices = indices.reshape(-1) # flatten again + + # get quantized latent vectors + z_q = self.embedding(indices) + + if shape is not None: + z_q = z_q.view(shape) + # reshape back to match original input shape + z_q = z_q.permute(0, 3, 1, 2).contiguous() + + return z_q + + +class LFQuantizer(nn.Module): + """Lookup-Free Quantization + + Adapted from: https://github.com/lucidrains/vector-quantize-pytorch/blob/9502a1f447876d53fd37685b226bf28f250dc4a3/ + vector_quantize_pytorch/lookup_free_quantization.py + [Copyright (c) 2020 Phil Wang] + https://github.com/lucidrains/vector-quantize-pytorch/blob/9502a1f447876d53fd37685b226bf28f250dc4a3/LICENSE + """ + + def __init__( + self, + *, + codebook_size: int, + codebook_dim: int, + embed_dim: Optional[int] = None, # if None, use codebook_dim + entropy_loss_weight=0.1, + commitment_loss_weight=0.25, + default_temp: float = 0.01, + entropy_loss: bool = False, + **ignore_kwargs, + ): + """Lookup-Free Quantization + + Args: + codebook_size (int): The number of entries in the codebook. + codebook_dim (int): The number of bits in each code. + embed_dim (Optional[int], optional): The dimension of the input embedding. Defaults to None. + entropy_loss_weight (float, optional): Whether to use entropy loss. Defaults to 0.1. + commitment_loss_weight (float, optional): Weight for commitment loss. Defaults to 0.25. + default_temp (float, optional): The temprature to use. Defaults to 0.01. + entropy_loss (bool, optional): Flag for entropy loss. Defaults to False. + """ + super().__init__() + self.entropy_loss = entropy_loss + self.codebook_dim = codebook_dim + self.default_temp = default_temp + self.entrop_loss_weight = entropy_loss_weight + self.commitment_loss_weight = commitment_loss_weight + embed_dim = embed_dim or codebook_dim + + has_projections = embed_dim != codebook_dim + self.project_in = nn.Linear(embed_dim, codebook_dim) if has_projections else nn.Identity() + self.project_out = nn.Linear(codebook_dim, embed_dim) if has_projections else nn.Identity() + + self.dtype = ignore_kwargs.get("dtype", torch.float32) + + if entropy_loss: + assert 2**codebook_dim == codebook_size, "codebook size must be 2 ** codebook_dim" + self.codebook_size = codebook_size + + self.register_buffer( + "mask", + 2 ** torch.arange(codebook_dim - 1, -1, -1), + persistent=False, + ) + self.register_buffer("zero", torch.tensor(0.0), persistent=False) + + all_codes = torch.arange(codebook_size) + bits = ((all_codes[..., None].int() & self.mask) != 0).float() + codebook = 2 * bits - 1.0 + + self.register_buffer("codebook", codebook, persistent=False) # [codebook_size, codebook_dim] + + def forward(self, z: torch.Tensor, temp: float = None) -> torch.Tensor: + temp = temp or self.default_temp + + z = rearrange(z, "b d ... -> b ... d") + z, ps = pack_one(z, "b * d") + z = self.project_in(z) + + # split out number of codebooks + z = rearrange(z, "b n (c d) -> b n c d", c=self.num_codebooks) + + # quantization + original_input = z + + codebook_value = torch.ones_like(z) + z_q = torch.where(z > 0, codebook_value, -codebook_value) + + # preserve gradients + z_q = z + (z_q - z).detach() + + # commit loss + commit_loss = ((original_input - z_q.detach()) ** 2).mean(dim=[1, 2, 3]) + + z_q = rearrange(z_q, "b n c d -> b n (c d)") + z_q = self.project_out(z_q) + + # reshape + z_q = unpack_one(z_q, ps, "b * d") + z_q = rearrange(z_q, "b ... d -> b d ...") + + loss = self.commitment_loss_weight * commit_loss + + # entropy loss (eq-5) + if self.entropy_loss: + # indices + indices = reduce((z > 0).int() * self.mask.int(), "b n c d -> b n c", "sum") + indices = unpack_one(indices, ps, "b * c") + indices = rearrange(indices, "... 1 -> ...") + + distance = -2 * torch.einsum( + "... i d, j d -> ... i j", + original_input, + self.codebook.to(original_input.dtype), + ) + prob = (-distance / temp).softmax(dim=-1) + per_sample_entropy = entropy(prob).mean(dim=[1, 2]) + avg_prob = reduce(prob, "... c d -> c d", "mean") + codebook_entropy = entropy(avg_prob).mean() + entropy_aux_loss = per_sample_entropy - codebook_entropy + + loss += self.entrop_loss_weight * entropy_aux_loss + + # TODO: return (indices, z_q, loss) + return ( + z_q, + loss.unsqueeze(1).unsqueeze(1).unsqueeze(1), + ( + indices, + self.commitment_loss_weight * commit_loss.mean().detach(), + self.entrop_loss_weight * entropy_aux_loss.mean().detach(), + self.entrop_loss_weight * per_sample_entropy.mean().detach(), + self.entrop_loss_weight * codebook_entropy.mean().detach(), + ), + ) + else: + return ( + z_q, + loss.unsqueeze(1).unsqueeze(1).unsqueeze(1), + self.commitment_loss_weight * commit_loss.mean().detach(), + ) + + +class InvQuantizerJit(nn.Module): + """Use for decoder_jit to trace quantizer in discrete tokenizer""" + + def __init__(self, quantizer): + super().__init__() + self.quantizer = quantizer + + def forward(self, indices: torch.Tensor): + codes = self.quantizer.indices_to_codes(indices) + return codes.to(self.quantizer.dtype) diff --git a/nemo/collections/common/video_tokenizers/modules/utils.py b/nemo/collections/common/video_tokenizers/modules/utils.py new file mode 100644 index 000000000000..e9a22c276b24 --- /dev/null +++ b/nemo/collections/common/video_tokenizers/modules/utils.py @@ -0,0 +1,117 @@ +# ****************************************************************************** +# Copyright (C) 2024 NVIDIA CORPORATION +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ****************************************************************************** +"""Shared utilities for the networks module.""" + +from typing import Any + +import torch +from einops import pack, rearrange, unpack + + +def time2batch(x: torch.Tensor) -> tuple[torch.Tensor, int]: + batch_size = x.shape[0] + return rearrange(x, "b c t h w -> (b t) c h w"), batch_size + + +def batch2time(x: torch.Tensor, batch_size: int) -> torch.Tensor: + return rearrange(x, "(b t) c h w -> b c t h w", b=batch_size) + + +def space2batch(x: torch.Tensor) -> tuple[torch.Tensor, int]: + batch_size, height = x.shape[0], x.shape[-2] + return rearrange(x, "b c t h w -> (b h w) c t"), batch_size, height + + +def batch2space(x: torch.Tensor, batch_size: int, height: int) -> torch.Tensor: + return rearrange(x, "(b h w) c t -> b c t h w", b=batch_size, h=height) + + +def cast_tuple(t: Any, length: int = 1) -> Any: + return t if isinstance(t, tuple) else ((t,) * length) + + +def replication_pad(x): + return torch.cat([x[:, :, :1, ...], x], dim=2) + + +def divisible_by(num: int, den: int) -> bool: + return (num % den) == 0 + + +def is_odd(n: int) -> bool: + return not divisible_by(n, 2) + + +def nonlinearity(x): + return x * torch.sigmoid(x) + + +def Normalize(in_channels, num_groups=32): + return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True) + + +class CausalNormalize(torch.nn.Module): + def __init__(self, in_channels, num_groups=1): + super().__init__() + self.norm = torch.nn.GroupNorm( + num_groups=num_groups, + num_channels=in_channels, + eps=1e-6, + affine=True, + ) + self.num_groups = num_groups + + def forward(self, x): + # if num_groups !=1, we apply a spatio-temporal groupnorm for backward compatibility purpose. + # All new models should use num_groups=1, otherwise causality is not guaranteed. + if self.num_groups == 1: + x, batch_size = time2batch(x) + return batch2time(self.norm(x), batch_size) + return self.norm(x) + + +def exists(v): + return v is not None + + +def default(*args): + for arg in args: + if exists(arg): + return arg + return None + + +def pack_one(t, pattern): + return pack([t], pattern) + + +def unpack_one(t, ps, pattern): + return unpack(t, ps, pattern)[0] + + +def round_ste(z: torch.Tensor) -> torch.Tensor: + """Round with straight through gradients.""" + zhat = z.round() + return z + (zhat - z).detach() + + +def log(t, eps=1e-5): + return t.clamp(min=eps).log() + + +def entropy(prob): + return (-prob * log(prob)).sum(dim=-1) diff --git a/nemo/collections/common/video_tokenizers/networks/__init__.py b/nemo/collections/common/video_tokenizers/networks/__init__.py new file mode 100644 index 000000000000..6c794d4e03b5 --- /dev/null +++ b/nemo/collections/common/video_tokenizers/networks/__init__.py @@ -0,0 +1,41 @@ +# ****************************************************************************** +# Copyright (C) 2024 NVIDIA CORPORATION +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ****************************************************************************** + +from enum import Enum + +from nemo.collections.common.video_tokenizers.networks.configs import continuous_image as continuous_image_dict +from nemo.collections.common.video_tokenizers.networks.configs import continuous_video as continuous_video_dict +from nemo.collections.common.video_tokenizers.networks.configs import discrete_image as discrete_image_dict +from nemo.collections.common.video_tokenizers.networks.configs import discrete_video as discrete_video_dict +from nemo.collections.common.video_tokenizers.networks.continuous_image import ContinuousImageTokenizer +from nemo.collections.common.video_tokenizers.networks.continuous_video import CausalContinuousVideoTokenizer +from nemo.collections.common.video_tokenizers.networks.discrete_image import DiscreteImageTokenizer +from nemo.collections.common.video_tokenizers.networks.discrete_video import CausalDiscreteVideoTokenizer + + +class TokenizerConfigs(Enum): + CI = continuous_image_dict + DI = discrete_image_dict + CV = continuous_video_dict + DV = discrete_video_dict + + +class TokenizerModels(Enum): + CI = ContinuousImageTokenizer + DI = DiscreteImageTokenizer + CausalCV = CausalContinuousVideoTokenizer + CausalDV = CausalDiscreteVideoTokenizer diff --git a/nemo/collections/common/video_tokenizers/networks/configs.py b/nemo/collections/common/video_tokenizers/networks/configs.py new file mode 100644 index 000000000000..29d7e1227bc3 --- /dev/null +++ b/nemo/collections/common/video_tokenizers/networks/configs.py @@ -0,0 +1,148 @@ +# ***************************************************************************** +# Copyright (C) 2024 NVIDIA CORPORATION +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ***************************************************************************** +"""The default image and video tokenizer configs.""" + +from nemo.collections.common.video_tokenizers.modules import ( + ContinuousFormulation, + Decoder3DType, + DecoderType, + DiscreteQuantizer, + Encoder3DType, + EncoderType, +) + +continuous_image = dict( + # The attention resolution for res blocks. + attn_resolutions=[32], + # The base number of channels. + channels=128, + # The channel multipler for each resolution. + channels_mult=[2, 4, 4], + dropout=0.0, + in_channels=3, + # The spatial compression ratio. + spatial_compression=16, + # The number of layers in each res block. + num_res_blocks=2, + out_channels=3, + resolution=1024, + patch_size=4, + patch_method="haar", + # The output latent dimension (channels). + latent_channels=16, + # The encoder output channels just before sampling. + # Which is also the decoder's input channels. + z_channels=16, + # A factor over the z_channels, to get the total channels the encoder should output. + # For a VAE for instance, we want to output the mean and variance, so we need 2 * z_channels. + z_factor=1, + name="CI", + # What formulation to use, either "AE" or "VAE". + # Chose VAE here, since the pre-trained ckpt were of a VAE formulation. + formulation=ContinuousFormulation.AE.name, + # Specify type of encoder ["Default", "LiteVAE"] + encoder=EncoderType.Default.name, + # Specify type of decoder ["Default"] + decoder=DecoderType.Default.name, +) + +discrete_image = dict( + # The attention resolution for res blocks. + attn_resolutions=[32], + # The base number of channels. + channels=128, + # The channel multipler for each resolution. + channels_mult=[2, 4, 4], + dropout=0.0, + in_channels=3, + # The spatial compression ratio. + spatial_compression=16, + # The number of layers in each res block. + num_res_blocks=2, + out_channels=3, + resolution=1024, + patch_size=4, + patch_method="haar", + # The encoder output channels just before sampling. + z_channels=256, + # A factor over the z_channels, to get the total channels the encoder should output. + # for discrete tokenization, often we directly use the vector, so z_factor=1. + z_factor=1, + # The quantizer of choice, VQ, LFQ, FSQ, or ResFSQ. + quantizer=DiscreteQuantizer.FSQ.name, + # The embedding dimension post-quantization, which is also the input channels of the decoder. + # Which is also the output + embedding_dim=6, + # The number of levels to use for fine-scalar quantization. + levels=[8, 8, 8, 5, 5, 5], + # The number of quantizers to use for residual fine-scalar quantization. + num_quantizers=4, + name="DI", + # Specify type of encoder ["Default", "LiteVAE"] + encoder=EncoderType.Default.name, + # Specify type of decoder ["Default"] + decoder=DecoderType.Default.name, +) + +continuous_video = dict( + attn_resolutions=[32], + channels=128, + channels_mult=[2, 4, 4], + dropout=0.0, + in_channels=3, + num_res_blocks=2, + out_channels=3, + resolution=1024, + patch_size=4, + patch_method="haar", + latent_channels=16, + z_channels=16, + z_factor=1, + num_groups=1, + legacy_mode=False, + spatial_compression=8, + temporal_compression=8, + formulation=ContinuousFormulation.AE.name, + encoder=Encoder3DType.FACTORIZED.name, + decoder=Decoder3DType.FACTORIZED.name, + name="CausalCV", +) + +discrete_video = dict( + attn_resolutions=[32], + channels=128, + channels_mult=[2, 4, 4], + dropout=0.0, + in_channels=3, + num_res_blocks=2, + out_channels=3, + resolution=1024, + patch_size=4, + patch_method="haar", + z_channels=16, + z_factor=1, + num_groups=1, + legacy_mode=False, + spatial_compression=16, + temporal_compression=8, + quantizer=DiscreteQuantizer.FSQ.name, + embedding_dim=6, + levels=[8, 8, 8, 5, 5, 5], + encoder=Encoder3DType.FACTORIZED.name, + decoder=Decoder3DType.FACTORIZED.name, + name="CausalDV", +) diff --git a/nemo/collections/common/video_tokenizers/networks/continuous_image.py b/nemo/collections/common/video_tokenizers/networks/continuous_image.py new file mode 100644 index 000000000000..c9d9370517fd --- /dev/null +++ b/nemo/collections/common/video_tokenizers/networks/continuous_image.py @@ -0,0 +1,88 @@ +# ****************************************************************************** +# Copyright (C) 2024 NVIDIA CORPORATION +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ****************************************************************************** +"""The continuous image tokenizer with VAE or AE formulation for 2D data.""" + +from collections import OrderedDict, namedtuple + +import torch +from torch import nn + +from nemo.collections.common.video_tokenizers.modules import ContinuousFormulation, DecoderType, EncoderType + +NetworkEval = namedtuple("NetworkEval", ["reconstructions", "posteriors", "latent"]) + + +class ContinuousImageTokenizer(nn.Module): + def __init__(self, z_channels: int, z_factor: int, latent_channels: int, **kwargs) -> None: + super().__init__() + self.name = kwargs.get("name", "ContinuousImageTokenizer") + self.latent_channels = latent_channels + + encoder_name = kwargs.get("encoder", EncoderType.Default.name) + self.encoder = EncoderType[encoder_name].value(z_channels=z_factor * z_channels, **kwargs) + + decoder_name = kwargs.get("decoder", DecoderType.Default.name) + self.decoder = DecoderType[decoder_name].value(z_channels=z_channels, **kwargs) + + self.quant_conv = torch.nn.Conv2d(z_factor * z_channels, z_factor * latent_channels, 1) + self.post_quant_conv = torch.nn.Conv2d(latent_channels, z_channels, 1) + + formulation_name = kwargs.get("formulation", ContinuousFormulation.AE.name) + self.distribution = ContinuousFormulation[formulation_name].value() + + num_parameters = sum(param.numel() for param in self.parameters()) + + def encoder_jit(self): + return nn.Sequential( + OrderedDict( + [ + ("encoder", self.encoder), + ("quant_conv", self.quant_conv), + ("distribution", self.distribution), + ] + ) + ) + + def decoder_jit(self): + return nn.Sequential( + OrderedDict( + [ + ("post_quant_conv", self.post_quant_conv), + ("decoder", self.decoder), + ] + ) + ) + + def last_decoder_layer(self): + return self.decoder.conv_out + + def encode(self, x): + h = self.encoder(x) + moments = self.quant_conv(h) + return self.distribution(moments) + + def decode(self, z): + z = self.post_quant_conv(z) + dec = self.decoder(z) + return dec + + def forward(self, input) -> dict[str, torch.Tensor] | NetworkEval: + latent, posteriors = self.encode(input) + dec = self.decode(latent) + if self.training: + return dict(reconstructions=dec, posteriors=posteriors, latent=latent) + return NetworkEval(reconstructions=dec, posteriors=posteriors, latent=latent) diff --git a/nemo/collections/common/video_tokenizers/networks/continuous_video.py b/nemo/collections/common/video_tokenizers/networks/continuous_video.py new file mode 100644 index 000000000000..ed0be35ef7ea --- /dev/null +++ b/nemo/collections/common/video_tokenizers/networks/continuous_video.py @@ -0,0 +1,97 @@ +# ****************************************************************************** +# Copyright (C) 2024 NVIDIA CORPORATION +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ****************************************************************************** +"""The causal continuous video tokenizer with VAE or AE formulation for 3D data..""" +from collections import OrderedDict, namedtuple + +from torch import nn + +from nemo.collections.common.video_tokenizers.modules import ContinuousFormulation, Decoder3DType, Encoder3DType +from nemo.collections.common.video_tokenizers.modules.layers3d import CausalConv3d + +NetworkEval = namedtuple("NetworkEval", ["reconstructions", "posteriors", "latent"]) + + +class CausalContinuousVideoTokenizer(nn.Module): + def __init__(self, z_channels: int, z_factor: int, latent_channels: int, **kwargs) -> None: + super().__init__() + self.name = kwargs.get("name", "CausalContinuousVideoTokenizer") + self.latent_channels = latent_channels + + encoder_name = kwargs.get("encoder", Encoder3DType.BASE.name) + self.encoder = Encoder3DType[encoder_name].value(z_channels=z_factor * z_channels, **kwargs) + + decoder_name = kwargs.get("decoder", Decoder3DType.BASE.name) + self.decoder = Decoder3DType[decoder_name].value(z_channels=z_channels, **kwargs) + + self.quant_conv = CausalConv3d( + z_factor * z_channels, + z_factor * latent_channels, + kernel_size=1, + padding=0, + ) + self.post_quant_conv = CausalConv3d(latent_channels, z_channels, kernel_size=1, padding=0) + + formulation_name = kwargs.get("formulation", ContinuousFormulation.AE.name) + self.distribution = ContinuousFormulation[formulation_name].value() + + def encoder_jit(self): + return nn.Sequential( + OrderedDict( + [ + ("encoder", self.encoder), + ("quant_conv", self.quant_conv), + ("distribution", self.distribution), + ] + ) + ) + + def decoder_jit(self): + return nn.Sequential( + OrderedDict( + [ + ("post_quant_conv", self.post_quant_conv), + ("decoder", self.decoder), + ] + ) + ) + + def last_decoder_layer(self): + return self.decoder.conv_out + + def encode(self, x): + h = self.encoder(x) + moments = self.quant_conv(h) + return self.distribution(moments) + + def decode(self, z): + z = self.post_quant_conv(z) + return self.decoder(z) + + def forward(self, input): + latent, posteriors = self.encode(input) + reconstructions = self.decode(latent) + if self.training: + return dict( + reconstructions=reconstructions, + posteriors=posteriors, + latent=latent, + ) + return NetworkEval( + reconstructions=reconstructions, + posteriors=posteriors, + latent=latent, + ) diff --git a/nemo/collections/common/video_tokenizers/networks/discrete_image.py b/nemo/collections/common/video_tokenizers/networks/discrete_image.py new file mode 100644 index 000000000000..249aec78697a --- /dev/null +++ b/nemo/collections/common/video_tokenizers/networks/discrete_image.py @@ -0,0 +1,113 @@ +# ****************************************************************************** +# Copyright (C) 2024 NVIDIA CORPORATION +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ****************************************************************************** +"""The network definition for discrete image tokenization with VQ, LFQ, FSQ or ResidualFSQ.""" +from collections import OrderedDict, namedtuple + +import torch +from torch import nn + +from nemo.collections.common.video_tokenizers.modules import DecoderType, DiscreteQuantizer, EncoderType +from nemo.collections.common.video_tokenizers.modules.quantizers import InvQuantizerJit + +NetworkEval = namedtuple("NetworkEval", ["reconstructions", "quant_loss", "quant_info"]) + + +class DiscreteImageTokenizer(nn.Module): + def __init__(self, z_channels: int, embedding_dim: int, **kwargs) -> None: + super().__init__() + self.name = kwargs.get("name", "DiscreteImageTokenizer") + self.embedding_dim = embedding_dim + + encoder_name = kwargs.get("encoder", EncoderType.Default.name) + self.encoder = EncoderType[encoder_name].value(z_channels=z_channels, **kwargs) + + decoder_name = kwargs.get("decoder", DecoderType.Default.name) + self.decoder = DecoderType[decoder_name].value(z_channels=z_channels, **kwargs) + self.quant_conv = nn.Conv2d(z_channels, embedding_dim, 1) + self.post_quant_conv = nn.Conv2d(embedding_dim, z_channels, 1) + + quantizer_name = kwargs.get("quantizer", DiscreteQuantizer.RESFSQ.name) + if quantizer_name == DiscreteQuantizer.VQ.name: + assert "num_embeddings" in kwargs, f"`num_embeddings` must be provided for {quantizer_name}." + kwargs.update(dict(embedding_dim=embedding_dim)) + elif quantizer_name == DiscreteQuantizer.LFQ.name: + assert "codebook_size" in kwargs, f"`codebook_size` must be provided for {quantizer_name}." + assert "codebook_dim" in kwargs, f"`codebook_dim` must be provided for {quantizer_name}." + elif quantizer_name == DiscreteQuantizer.FSQ.name: + assert "levels" in kwargs, f"`levels` must be provided for {quantizer_name}." + elif quantizer_name == DiscreteQuantizer.RESFSQ.name: + assert "levels" in kwargs, f"`levels` must be provided for {quantizer_name}.name." + assert "num_quantizers" in kwargs, f"`num_quantizers` must be provided for {quantizer_name}." + self.quantizer = DiscreteQuantizer[quantizer_name].value(**kwargs) + + def to(self, *args, **kwargs): + setattr(self.quantizer, "dtype", kwargs.get("dtype", torch.bfloat16)) + return super(DiscreteImageTokenizer, self).to(*args, **kwargs) + + def encoder_jit(self): + return nn.Sequential( + OrderedDict( + [ + ("encoder", self.encoder), + ("quant_conv", self.quant_conv), + ("quantizer", self.quantizer), + ] + ) + ) + + def decoder_jit(self): + return nn.Sequential( + OrderedDict( + [ + ("inv_quant", InvQuantizerJit(self.quantizer)), + ("post_quant_conv", self.post_quant_conv), + ("decoder", self.decoder), + ] + ) + ) + + def last_decoder_layer(self): + return self.decoder.conv_out + + def encode(self, x): + h = self.encoder(x) + h = self.quant_conv(h) + return self.quantizer(h) + + def decode(self, quant): + quant = self.post_quant_conv(quant) + return self.decoder(quant) + + def decode_code(self, code_b): + quant_b = self.quantizer.indices_to_codes(code_b) + quant_b = self.post_quant_conv(quant_b) + return self.decoder(quant_b) + + def forward(self, input): + quant_info, quant_codes, quant_loss = self.encode(input) + reconstructions = self.decode(quant_codes) + if self.training: + return dict( + reconstructions=reconstructions, + quant_loss=quant_loss, + quant_info=quant_info, + ) + return NetworkEval( + reconstructions=reconstructions, + quant_loss=quant_loss, + quant_info=quant_info, + ) diff --git a/nemo/collections/common/video_tokenizers/networks/discrete_video.py b/nemo/collections/common/video_tokenizers/networks/discrete_video.py new file mode 100644 index 000000000000..345b66ad7780 --- /dev/null +++ b/nemo/collections/common/video_tokenizers/networks/discrete_video.py @@ -0,0 +1,115 @@ +# ****************************************************************************** +# Copyright (C) 2024 NVIDIA CORPORATION +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ****************************************************************************** +"""The network definition for discrete video tokenizer with VQ, LFQ, FSQ or ResidualFSQ. """ +from collections import OrderedDict, namedtuple + +import torch +from torch import nn + +from nemo.collections.common.video_tokenizers.modules import Decoder3DType, DiscreteQuantizer, Encoder3DType +from nemo.collections.common.video_tokenizers.modules.layers3d import CausalConv3d +from nemo.collections.common.video_tokenizers.modules.quantizers import InvQuantizerJit + +NetworkEval = namedtuple("NetworkEval", ["reconstructions", "quant_loss", "quant_info"]) + + +class CausalDiscreteVideoTokenizer(nn.Module): + def __init__(self, z_channels: int, z_factor: int, embedding_dim: int, **kwargs) -> None: + super().__init__() + self.name = kwargs.get("name", "CausalDiscreteVideoTokenizer") + self.embedding_dim = embedding_dim + + encoder_name = kwargs.get("encoder", Encoder3DType.BASE.name) + self.encoder = Encoder3DType[encoder_name].value(z_channels=z_factor * z_channels, **kwargs) + + decoder_name = kwargs.get("decoder", Decoder3DType.BASE.name) + self.decoder = Decoder3DType[decoder_name].value(z_channels=z_channels, **kwargs) + + self.quant_conv = CausalConv3d(z_factor * z_channels, embedding_dim, kernel_size=1, padding=0) + self.post_quant_conv = CausalConv3d(embedding_dim, z_channels, kernel_size=1, padding=0) + + quantizer_name = kwargs.get("quantizer", DiscreteQuantizer.RESFSQ.name) + if quantizer_name == DiscreteQuantizer.VQ.name: + assert "num_embeddings" in kwargs, f"`num_embeddings` must be provided for {quantizer_name}." + kwargs.update(dict(embedding_dim=embedding_dim)) + elif quantizer_name == DiscreteQuantizer.LFQ.name: + assert "codebook_size" in kwargs, f"`codebook_size` must be provided for {quantizer_name}." + assert "codebook_dim" in kwargs, f"`codebook_dim` must be provided for {quantizer_name}." + elif quantizer_name == DiscreteQuantizer.FSQ.name: + assert "levels" in kwargs, f"`levels` must be provided for {quantizer_name}." + elif quantizer_name == DiscreteQuantizer.RESFSQ.name: + assert "levels" in kwargs, f"`levels` must be provided for {quantizer_name}." + assert "num_quantizers" in kwargs, f"`num_quantizers` must be provided for {quantizer_name}." + self.quantizer = DiscreteQuantizer[quantizer_name].value(**kwargs) + + def to(self, *args, **kwargs): + setattr(self.quantizer, "dtype", kwargs.get("dtype", torch.bfloat16)) + return super(CausalDiscreteVideoTokenizer, self).to(*args, **kwargs) + + def encoder_jit(self): + return nn.Sequential( + OrderedDict( + [ + ("encoder", self.encoder), + ("quant_conv", self.quant_conv), + ("quantizer", self.quantizer), + ] + ) + ) + + def decoder_jit(self): + return nn.Sequential( + OrderedDict( + [ + ("inv_quant", InvQuantizerJit(self.quantizer)), + ("post_quant_conv", self.post_quant_conv), + ("decoder", self.decoder), + ] + ) + ) + + def last_decoder_layer(self): + return self.decoder.conv_out + + def encode(self, x): + h = self.encoder(x) + h = self.quant_conv(h) + return self.quantizer(h) + + def decode(self, quant): + quant = self.post_quant_conv(quant) + return self.decoder(quant) + + def decode_code(self, code_b): + quant_b = self.quantizer.indices_to_codes(code_b) + quant_b = self.post_quant_conv(quant_b) + return self.decoder(quant_b) + + def forward(self, input): + quant_info, quant_codes, quant_loss = self.encode(input) + reconstructions = self.decode(quant_codes) + if self.training: + return dict( + reconstructions=reconstructions, + quant_loss=quant_loss, + quant_info=quant_info, + ) + return NetworkEval( + reconstructions=reconstructions, + quant_loss=quant_loss, + quant_info=quant_info, + ) diff --git a/nemo/collections/common/video_tokenizers/utils.py b/nemo/collections/common/video_tokenizers/utils.py index 255f0d2b5df4..78eaeba4e7d2 100644 --- a/nemo/collections/common/video_tokenizers/utils.py +++ b/nemo/collections/common/video_tokenizers/utils.py @@ -15,12 +15,15 @@ """Utility functions for the inference libraries.""" import os +import re from glob import glob import mediapy as media import numpy as np import torch +from nemo.collections.common.video_tokenizers.networks import TokenizerConfigs, TokenizerModels + _DTYPE, _DEVICE = torch.bfloat16, "cuda" _UINT8_MAX_F = float(torch.iinfo(torch.uint8).max) _SPATIAL_ALIGN = 16 @@ -317,3 +320,33 @@ def unpad_image_batch(batch: np.ndarray, crop_region: list[int]) -> np.ndarray: assert len(crop_region) == 4, "crop_region should be len of 4." y1, x1, y2, x2 = crop_region return batch[..., y1:y2, x1:x2, :] + + +def get_pytorch_model(jit_filepath: str = None, tokenizer_config: str = None): + tokenizer_name = tokenizer_config["name"] + model = TokenizerModels[tokenizer_name].value(**tokenizer_config) + ckpts = torch.jit.load(jit_filepath) + return model, ckpts + + +def load_pytorch_model(jit_filepath: str, tokenizer_config: dict, model_type: str, device): + """Loads a torch.nn.Module from a filepath.""" + model, ckpts = get_pytorch_model(jit_filepath, tokenizer_config) + if model_type == "enc": + model = model.encoder_jit() + elif model_type == "dec": + model = model.decoder_jit() + model.load_state_dict(ckpts.state_dict(), strict=False) + return model.eval().to(tokenizer_config["dtype"]).to(device) + + +def get_tokenizer_config(tokenizer_type) -> TokenizerConfigs: + """return tokeinzer config from tokenizer name""" + match = re.match("Cosmos-Tokenizer-(\D+)(\d+)x(\d+).*", tokenizer_type) + if match: + name, temporal, spatial = match.groups() + tokenizer_config = TokenizerConfigs[name].value + tokenizer_config.update(dict(spatial_compression=int(spatial))) + tokenizer_config.update(dict(temporal_compression=int(temporal))) + return tokenizer_config + return None From 5298ce37c791347c2737e8c34d69356b30e626c8 Mon Sep 17 00:00:00 2001 From: Yu Yao <54727607+yaoyu-33@users.noreply.github.com> Date: Mon, 16 Dec 2024 10:39:22 -0800 Subject: [PATCH 051/128] Neva updates to latest mcore and some fixes (#11565) * api updates and fixes Signed-off-by: yaoyu-33 * Apply isort and black reformatting Signed-off-by: yaoyu-33 * fix Signed-off-by: yaoyu-33 * fix arg Signed-off-by: yaoyu-33 --------- Signed-off-by: yaoyu-33 Signed-off-by: yaoyu-33 Co-authored-by: yaoyu-33 --- nemo/collections/llm/gpt/model/base.py | 4 +- nemo/collections/vlm/neva/model/base.py | 148 ++++++++++++++---------- nemo/lightning/megatron_parallel.py | 4 + scripts/vlm/neva_finetune.py | 11 +- 4 files changed, 99 insertions(+), 68 deletions(-) diff --git a/nemo/collections/llm/gpt/model/base.py b/nemo/collections/llm/gpt/model/base.py index e411077aca31..563a2cde5854 100644 --- a/nemo/collections/llm/gpt/model/base.py +++ b/nemo/collections/llm/gpt/model/base.py @@ -171,7 +171,8 @@ class GPTConfig(TransformerConfig, io.IOMixin): masked_softmax_fusion: bool = True cross_entropy_loss_fusion: bool = True gradient_accumulation_fusion: bool = _grad_accum_fusion_available - deallocate_pipeline_outputs = True + deallocate_pipeline_outputs: bool = True + scatter_embedding_sequence_parallel: bool = True use_transformer_engine_full_layer_spec: bool = False transformer_layer_spec: Union[ModuleSpec, Callable[["GPTConfig"], ModuleSpec]] = default_layer_spec @@ -216,6 +217,7 @@ def configure_model(self, tokenizer, pre_process=None, post_process=None) -> "MC seq_len_interpolation_factor=self.seq_len_interpolation_factor, pre_process=pre_process or parallel_state.is_pipeline_first_stage(), post_process=post_process or parallel_state.is_pipeline_last_stage(), + scatter_embedding_sequence_parallel=self.scatter_embedding_sequence_parallel, ) # If using full TE layer, need to set TP, CP group since the module call diff --git a/nemo/collections/vlm/neva/model/base.py b/nemo/collections/vlm/neva/model/base.py index e7b40c1c0209..388078484a56 100644 --- a/nemo/collections/vlm/neva/model/base.py +++ b/nemo/collections/vlm/neva/model/base.py @@ -21,15 +21,15 @@ import torch import torch.distributed import torch.nn.functional as F -from megatron.core import dist_checkpointing +from megatron.core import InferenceParams, dist_checkpointing from megatron.core import parallel_state as ps from megatron.core import tensor_parallel from megatron.core.enums import ModelType -from megatron.core.inference_params import InferenceParams from megatron.core.models.multimodal.llava_model import LLaVAModel as MCoreLLaVAModel from megatron.core.models.vision.clip_vit_model import CLIPViTModel as MCoreCLIPViTModel from megatron.core.models.vision.multimodal_projector import MultimodalProjector as MCoreMultimodalProjector from megatron.core.optimizer import OptimizerConfig +from megatron.core.packed_seq_params import PackedSeqParams from megatron.core.tensor_parallel import gather_from_sequence_parallel_region from megatron.core.transformer.custom_layers.transformer_engine import ( TEColumnParallelLinear, @@ -133,18 +133,17 @@ def neva_data_step(dataloader_iter) -> Dict[str, torch.Tensor]: def neva_forward_step(model, batch) -> torch.Tensor: forward_args = { - "media": batch["media"], + "images": batch["media"], "input_ids": batch["tokens"], "position_ids": batch["position_ids"], "attention_mask": batch.get("attention_mask", None), "loss_mask": batch.get("loss_mask", None), "labels": batch.get("labels", None), - "num_media_tiles": batch.get("num_media_tiles", None), + "num_image_tiles": batch.get("num_media_tiles", None), + "image_token_mask": batch.get("image_token_mask", None), + "packed_seq_params": batch.get("packed_seq_params", None), } - if 'cu_seqlens' in batch: - forward_args['packed_seq_params'] = get_packed_seq_params(batch) - return model(**forward_args) @@ -219,10 +218,22 @@ class HFCLIPVisionConfig(CLIPVisionConfig, io.IOMixin): """ hidden_size: int = 1024 + num_image_embeddings_per_tile: Optional[int] = None pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None def __post_init__(self, *args, **kwargs) -> None: CLIPVisionConfig.__init__(self, *args, **kwargs, hidden_size=self.hidden_size) + if self.pretrained_model_name_or_path is not None: + config = CLIPVisionConfig.from_pretrained(self.pretrained_model_name_or_path) + for key, value in config.to_dict().items(): + setattr(self, key, value) + self.num_image_embeddings_per_tile = get_image_sequence_length( + img_h=self.image_size, + img_w=self.image_size, + patch_dim=self.patch_size, + add_class_token=False, + class_token_len=1, + ) def configure_model(self) -> "CLIPVisionModel": # Monkey patch the method to the vision encoder @@ -232,9 +243,6 @@ def configure_model(self) -> "CLIPVisionModel": model = CLIPVisionModel(self) else: model = CLIPVisionModel.from_pretrained(self.pretrained_model_name_or_path) - # Extend all model.config fields to self - for key, value in model.config.to_dict().items(): - setattr(self, key, value) return model @@ -248,6 +256,7 @@ class CLIPViTConfig(TransformerConfig, io.IOMixin): img_h: int = 336 img_w: int = 336 vision_model_type: str = "clip" # ["clip", "siglip"] + num_image_embeddings_per_tile: Optional[int] = None transformer_layer_spec: ModuleSpec = transformer_engine_layer_spec num_layers: int = 1 # Placeholder, NOT used! @@ -257,6 +266,13 @@ def __post_init__(self): if self.vision_model_type == "siglip": self.add_class_token = False self.class_token_len = 0 + self.num_image_embeddings_per_tile = get_image_sequence_length( + img_h=self.img_h, + img_w=self.img_w, + patch_dim=self.patch_dim, + add_class_token=self.add_class_token, + class_token_len=self.class_token_len, + ) def configure_model(self) -> "CLIPViTModel": transformer_layer_spec = self.transformer_layer_spec @@ -311,8 +327,6 @@ def __post_init__(self): setattr(self, attr, getattr(self.language_transformer_config, attr)) def configure_model(self, tokenizer) -> "MCoreNevaModel": - from megatron.core import parallel_state as ps - self.language_transformer_config.tensor_model_parallel_size = self.tensor_model_parallel_size self.language_transformer_config.sequence_parallel = self.sequence_parallel self.vision_transformer_config.tensor_model_parallel_size = self.tensor_model_parallel_size @@ -394,8 +408,11 @@ def __init__( self.context_parallel_lm = language_transformer_config.context_parallel_size self.tensor_model_parallel_size_lm = language_transformer_config.tensor_model_parallel_size + # This attribute is needed to check if an all-reduce is required + # on the word embeddings inside `finalize_model_grads._allreduce_word_embedding_grads`. self.share_embeddings_and_output_weights = False if self.add_decoder: + language_transformer_config.scatter_embedding_sequence_parallel = False self.language_model = language_transformer_config.configure_model( tokenizer=tokenizer, pre_process=pre_process, post_process=post_process ) @@ -436,23 +453,7 @@ def __init__( # on the word embeddings inside `finalize_model_grads._allreduce_word_embedding_grads`. self.vision_model_from_hf = hasattr(vision_transformer_config, "image_size") - if self.vision_model_from_hf: - # img_h, img_w, patch_dim, add_class_token, class_token_len - self._img_seq_len = get_image_sequence_length( - img_h=vision_transformer_config.image_size, - img_w=vision_transformer_config.image_size, - patch_dim=vision_transformer_config.patch_size, - add_class_token=not drop_vision_class_token, - class_token_len=0 if "siglip" in vision_transformer_config.model_type else 1, - ) - else: - self._img_seq_len = get_image_sequence_length( - img_h=vision_transformer_config.img_h, - img_w=vision_transformer_config.img_w, - patch_dim=vision_transformer_config.patch_dim, - add_class_token=not drop_vision_class_token, - class_token_len=vision_transformer_config.class_token_len, - ) + self._img_seq_len = vision_transformer_config.num_image_embeddings_per_tile def forward( self, @@ -460,12 +461,14 @@ def forward( position_ids: torch.Tensor, loss_mask: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, - media: Optional[torch.Tensor] = None, + images: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, inference_params: Optional[InferenceParams] = None, - num_media_tiles: Optional[List[int]] = None, - media_token_index: Optional[int] = IMAGE_TOKEN_INDEX, + num_image_tiles: Optional[List[int]] = None, + image_token_index: Optional[int] = IMAGE_TOKEN_INDEX, runtime_gather_output: Optional[bool] = None, + image_token_mask: Optional[torch.Tensor] = None, + packed_seq_params: Optional[PackedSeqParams] = None, ) -> torch.Tensor: """Forward function of the LLaVA model. @@ -477,61 +480,69 @@ def forward( labels (torch.Tensor): Optional target text labels [batch, combined_seq_len]. loss_mask (torch.Tensor): Text loss mask [batch, text_seq_len]. inference_params (InferenceParams): Inference-time parameters including KV cache. - num_media_tiles (list of int): Number of tiles per image. Default None assumes 1 tile per image. - image_token_index (int): ID for input images. + num_image_tiles (list of int): Number of tiles per image. Default 1 tile per image. + image_token_index (int): ID for input images. Default None means `image_token_index` + arg in the constructor will be used. + runtime_gather_output (bool): Gather output at runtime. Default None means + `parallel_output` arg in the constructor will be used. + image_token_mask (torch.Tensor): Tensor indicating the location of + image token index in input_ids. + packed_seq_params (PackedSeqParams): Dict with padded token information. + Required for using SP/CP with padding mask type. Returns: - output (torch.Tensor): Loss of shape [b, s] if labels are provided, otherwise logits of shape [b, s, vocab_size]. + output (torch.Tensor): Loss of shape [b, s] if labels are provided, + otherwise logits of shape [b, s, vocab_size]. loss_mask (torch.Tensor): Loss mask expanded to combined sequence length. Shape [b, s]. """ use_inference_kv_cache = ( inference_params is not None and "image_tokens_count" in inference_params.key_value_memory_dict ) - has_images = media is not None and media.shape[0] > 0 + has_images = images is not None and images.shape[0] > 0 - # If running inference, we can skip media token computation if they were computed already earlier for this sample. + # If running inference, we can skip images token computation if they were computed already earlier for this sample. if use_inference_kv_cache: - media_embeddings = None + image_embeddings = None elif self.add_encoder and not has_images: vision_param = next(self.vision_model.parameters()) # If no images provided, use an empty image embeddings tensor. - media_embeddings = torch.tensor([], dtype=vision_param.dtype, device=vision_param.device).reshape(0, 0, 0) + image_embeddings = torch.tensor([], dtype=vision_param.dtype, device=vision_param.device).reshape(0, 0, 0) elif self.add_encoder and has_images: - # media is in shape of (num_images_in_mbs, c, h, w) + # images is in shape of (num_images_in_mbs, c, h, w) # note num_images_in_mbs is not mbs but total images in this mbs. - media = media.to(next(self.vision_model.parameters()).dtype) + images = images.to(next(self.vision_model.parameters()).dtype) if self.vision_model_from_hf: self.vision_model = self.vision_model.eval() - media_embeddings = self.vision_model(media, output_hidden_states=True) - media_embeddings = media_embeddings[-1][ + image_embeddings = self.vision_model(images, output_hidden_states=True) + image_embeddings = image_embeddings[-1][ self.config.vision_feature_layer ] # [num_images, img_seq_len, h_vision] else: # TODO(yuya): MCore Clip path not yet support taking a specific layer hidden states - media_embeddings = self.vision_model(media, num_unused_layers=-self.config.vision_feature_layer - 1) + image_embeddings = self.vision_model(images, num_unused_layers=-self.config.vision_feature_layer - 1) if self._drop_vision_class_token: class_token_len = getattr(self.vision_model, "class_token_len", 1) - media_embeddings = media_embeddings[:, class_token_len:, :] + image_embeddings = image_embeddings[:, class_token_len:, :] # contiguous() required as `permute` can sparsify the tensor and this breaks pipelining - media_embeddings = media_embeddings.permute(1, 0, 2).contiguous() # [img_seq_len, num_tiles, h_vision] + image_embeddings = image_embeddings.permute(1, 0, 2).contiguous() # [img_seq_len, num_tiles, h_vision] # map vision model output size to language model input size. - media_embeddings = self.vision_projection(media_embeddings) # [img_seq_len, num_tiles, h_language] + image_embeddings = self.vision_projection(image_embeddings) # [img_seq_len, num_tiles, h_language] # TODO: Support batched inference. # In inference, the language model KV cache will be updated for image token positions. # Store the image tokens sequence length to be used as an offset to the KV cache later. if inference_params is not None: - inference_params.key_value_memory_dict["media_tokens_count"] = ( - media_embeddings.shape[0] * media_embeddings.shape[1] + inference_params.key_value_memory_dict["image_tokens_count"] = ( + image_embeddings.shape[0] * image_embeddings.shape[1] ) else: - media_embeddings = self.encoder_hidden_state + image_embeddings = self.encoder_hidden_state if not self.add_decoder: - return media_embeddings + return image_embeddings language_embeddings = None if self.pre_process: @@ -569,32 +580,33 @@ def forward( language_embeddings = language_embeddings.transpose(1, 0).contiguous() # [b, text_seq_len, h_language] # Assume 1 tile per image if the number of tiles is not provided. - if num_media_tiles is None: - num_media_tiles = torch.ones(media.shape[0], dtype=torch.int, device=input_ids.device) - elif isinstance(num_media_tiles, list): - num_media_tiles = torch.tensor(num_media_tiles, dtype=torch.int, device=input_ids.device) + if num_image_tiles is None: + num_image_tiles = torch.ones(images.shape[0], dtype=torch.int, device=input_ids.device) + elif isinstance(num_image_tiles, list): + num_image_tiles = torch.tensor(num_image_tiles, dtype=torch.int, device=input_ids.device) # Preprocess input, labels and loss mask. combined_embeddings, final_labels, final_loss_mask, final_attention_mask = self._preprocess_data( - media_embeddings, + image_embeddings, language_embeddings, input_ids, loss_mask, labels, use_inference_kv_cache, - media_token_index, - num_media_tiles, + image_token_index, + num_image_tiles, attention_mask, ) # [combined_seq_len, b, h_language], [b, combined_seq_len], [b, combined_seq_len] output = self.language_model( input_ids=None, position_ids=None, - attention_mask=attention_mask, + attention_mask=final_attention_mask, decoder_input=combined_embeddings, labels=final_labels, inference_params=inference_params, runtime_gather_output=runtime_gather_output, + packed_seq_params=packed_seq_params, ) if labels is None or loss_mask is None: @@ -878,20 +890,28 @@ def forward( position_ids: torch.Tensor, loss_mask: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, - media: Optional[torch.Tensor] = None, + images: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, - inference_params: InferenceParams = None, - num_media_tiles: Optional[List[int]] = None, + inference_params: Optional[InferenceParams] = None, + num_image_tiles: Optional[List[int]] = None, + image_token_index: Optional[int] = IMAGE_TOKEN_INDEX, + runtime_gather_output: Optional[bool] = None, + image_token_mask: Optional[torch.Tensor] = None, + packed_seq_params: Optional[PackedSeqParams] = None, ) -> torch.Tensor: output_tensor = self.module( - media=media, + images=images, input_ids=input_ids, position_ids=position_ids, loss_mask=loss_mask, attention_mask=attention_mask, labels=labels, inference_params=inference_params, - num_media_tiles=num_media_tiles, + num_image_tiles=num_image_tiles, + image_token_index=image_token_index, + runtime_gather_output=runtime_gather_output, + image_token_mask=image_token_mask, + packed_seq_params=packed_seq_params, ) return output_tensor diff --git a/nemo/lightning/megatron_parallel.py b/nemo/lightning/megatron_parallel.py index 4a1251890547..1b1f5c790b61 100644 --- a/nemo/lightning/megatron_parallel.py +++ b/nemo/lightning/megatron_parallel.py @@ -1724,6 +1724,10 @@ def masked_token_loss_context_parallel(tensor: Tensor, mask: Tensor, num_valid_t losses = tensor.float() loss_mask = mask.view(-1).float() + if num_valid_tokens_in_ub is None: + num_valid_tokens_in_ub = loss_mask.sum() + if num_valid_tokens_in_ub < 0.5: # no valid tokens + num_valid_tokens_in_ub += 1.0 loss = torch.sum(losses.view(-1) * loss_mask) / num_valid_tokens_in_ub # sequence level nll torch.distributed.all_reduce(loss, group=parallel_state.get_context_parallel_group()) diff --git a/scripts/vlm/neva_finetune.py b/scripts/vlm/neva_finetune.py index 6fc4e2de13b5..4069fb2d9278 100644 --- a/scripts/vlm/neva_finetune.py +++ b/scripts/vlm/neva_finetune.py @@ -27,6 +27,7 @@ from nemo import lightning as nl from nemo.collections import llm, vlm from nemo.collections.vlm import ImageDataConfig +from nemo.lightning.pytorch.callbacks.megatron_comm_overlap import MegatronCommOverlapCallback from nemo.lightning.pytorch.optim import CosineAnnealingScheduler from nemo.lightning.pytorch.optim.megatron import MegatronOptimizerModule from nemo.utils.exp_manager import TimingCallback @@ -111,7 +112,7 @@ def main(args): ddp=DistributedDataParallelConfig( check_for_nan_in_grad=True, grad_reduce_in_fp32=True, - overlap_grad_reduce=False, + overlap_grad_reduce=True, overlap_param_gather=True, average_in_collective=True, ), @@ -134,7 +135,11 @@ def main(args): accelerator="gpu", strategy=strategy, plugins=nl.MegatronMixedPrecision(precision="bf16-mixed"), - callbacks=[checkpoint_callback, TimingCallback()], + callbacks=[ + checkpoint_callback, + TimingCallback(), + MegatronCommOverlapCallback(tp_comm_overlap=True), + ], val_check_interval=500, limit_val_batches=gbs, log_every_n_steps=1, @@ -223,7 +228,7 @@ def main(args): parser.add_argument("--name", type=str, required=False, default="neva_pretrain") parser.add_argument("--peft", type=str, default='none', help="none | lora") parser.add_argument("--wandb_project", type=str, required=False, default=None) - parser.add_argument("--gbs", type=int, required=False, default=64, help="Global batch size") + parser.add_argument("--gbs", type=int, required=False, default=128, help="Global batch size") parser.add_argument("--mbs", type=int, required=False, default=2, help="Micro batch size") parser.add_argument("--lr", type=float, required=False, default=2.0e-06, help="Learning rate") From 08bf53cf0459733c1f9f3597478e80b644bea6b7 Mon Sep 17 00:00:00 2001 From: Huiying Date: Mon, 16 Dec 2024 11:30:44 -0800 Subject: [PATCH 052/128] add nemo2-sft-peft to readme (#11613) Signed-off-by: Huiying Li --- tutorials/llm/llama-3/README.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tutorials/llm/llama-3/README.rst b/tutorials/llm/llama-3/README.rst index 1d12b8847c0d..38d4177942bf 100755 --- a/tutorials/llm/llama-3/README.rst +++ b/tutorials/llm/llama-3/README.rst @@ -20,3 +20,6 @@ This repository contains Jupyter Notebook tutorials using the NeMo Framework for * - `Llama 3.1 Pruning and Distillation with NeMo Framework <./pruning-distillation>`_ - `WikiText-103-v1 `_ - Perform pruning and distillation on Llama 3.1 8B using the WikiText-103-v1 dataset with NeMo Framework. + * - `Llama3 LoRA Fine-Tuning and Supervised Fine-Tuning using NeMo2 <./nemo2-sft-peft>`_ + - `SQuAD `_ for LoRA and `Databricks-dolly-15k `_ for SFT + - Perform LoRA PEFT and SFT on Llama 3 8B using NeMo 2.0 From 75bc0745cb357861ad01ee1bd44acd0b41f860c7 Mon Sep 17 00:00:00 2001 From: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> Date: Tue, 17 Dec 2024 03:03:54 +0530 Subject: [PATCH 053/128] Set Minitron width pruning batch size 1 (#11603) Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> --- .github/workflows/cicd-main.yml | 2 +- .../language_modeling/conf/megatron_gpt_prune.yaml | 10 +++++----- examples/nlp/language_modeling/megatron_gpt_prune.py | 11 ++++++----- .../pruning-distillation/03_b_width_pruning.ipynb | 6 ++---- 4 files changed, 14 insertions(+), 15 deletions(-) diff --git a/.github/workflows/cicd-main.yml b/.github/workflows/cicd-main.yml index 1ef4e5e7c034..7af191e98416 100644 --- a/.github/workflows/cicd-main.yml +++ b/.github/workflows/cicd-main.yml @@ -570,7 +570,7 @@ jobs: prune.ffn_hidden_size=192 \ prune.num_attention_heads=2 \ prune.num_query_groups=2 \ - prune.hidden_size=null \ + prune.hidden_size=128 \ export.save_path=examples/nlp/language_modeling/ci_prune_width.nemo AFTER_SCRIPT: | rm -rf examples/nlp/language_modeling/ci_prune_width.nemo diff --git a/examples/nlp/language_modeling/conf/megatron_gpt_prune.yaml b/examples/nlp/language_modeling/conf/megatron_gpt_prune.yaml index f174aafed0ee..85e46b6a6989 100644 --- a/examples/nlp/language_modeling/conf/megatron_gpt_prune.yaml +++ b/examples/nlp/language_modeling/conf/megatron_gpt_prune.yaml @@ -9,7 +9,7 @@ inference: repetition_penalty: 1.2 # The parameter for repetition penalty. 1.0 means no penalty. min_tokens_to_generate: 0 # The minimum length of the sequence to be generated. compute_logprob: false # a flag used to compute logprob of all the input text, a very special case of running inference, default False - batch_size: 64 # batch size for inference + batch_size: 1 # batch size for inference max_context_length: 512 # max length of the context, input sequence will be truncated if it is longer than this trainer: @@ -24,7 +24,7 @@ model: tensor_model_parallel_size: 1 # Pruning currently only supports tensor_model_parallel_size=1 pipeline_model_parallel_size: 1 sequence_parallel: false # Sequence parallelism is not supported with pipeline parallelism - restore_from_path: llama3.1-8b-instruct.nemo # Nemo file path + restore_from_path: ??? # Nemo file path ## Activation Checkpoint activations_checkpoint_granularity: null # 'selective' or 'full' @@ -34,11 +34,11 @@ prune: calib_dataset: wikitext # wikitext, cnn_dailymail, or a local dataset num_calib_size: 1024 # number of samples used for calibration # pruning constraints (null means no pruning) - ffn_hidden_size: 9216 # ffn_hidden_size in the pruned model + ffn_hidden_size: null # ffn_hidden_size in the pruned model num_attention_heads: null # num_attention_heads in the pruned model num_query_groups: null # num_query_groups in the pruned model - hidden_size: 3072 # hidden_size (embedding size) in the pruned model + hidden_size: null # hidden_size (embedding size) in the pruned model num_layers: null # num_layers (depth) in the pruned model export: - save_path: llama3.1-8b-instruct-pruned.nemo # Path where the pruned model will be saved + save_path: ??? # Path where the pruned model will be saved diff --git a/examples/nlp/language_modeling/megatron_gpt_prune.py b/examples/nlp/language_modeling/megatron_gpt_prune.py index 44992873f362..b89d3adbb081 100644 --- a/examples/nlp/language_modeling/megatron_gpt_prune.py +++ b/examples/nlp/language_modeling/megatron_gpt_prune.py @@ -13,7 +13,6 @@ # limitations under the License. import modelopt.torch.prune as mtp -import torch import torch.multiprocessing as mp from datasets import load_dataset from lightning.pytorch.trainer.trainer import Trainer @@ -36,7 +35,7 @@ Example usage: ``` python examples/nlp/language_modeling/megatron_gpt_prune.py \ - model.restore_from_path=llama3.1-8b-instruct.nemo \ + model.restore_from_path=llama3.1-8b.nemo \ model.tensor_model_parallel_size=1 \ model.pipeline_model_parallel_size=8 \ trainer.num_nodes=1 \ @@ -46,13 +45,14 @@ prune.num_attention_heads=null \ prune.num_query_groups=null \ prune.hidden_size=3072 \ - export.save_path=llama3.1-8b-instruct-pruned.nemo + export.save_path=llama3.1-8b-pruned.nemo ``` -where tensor_model_parallel_size must be 1 because of the current prune API limitation +where model.tensor_model_parallel_size and inference.batch_size must be 1 because of the current prune API limitation """ -def get_calib_data_iter(data="wikitext", batch_size=64, calib_size=512, max_sequence_length=512): +def get_calib_data_iter(data="wikitext", batch_size=1, calib_size=1024, max_sequence_length=512): + """Get a data iterator for calibration.""" if data == "wikitext": dataset = load_dataset("wikitext", "wikitext-103-v1", split="train") text_column = "text" @@ -73,6 +73,7 @@ def get_calib_data_iter(data="wikitext", batch_size=64, calib_size=512, max_sequ @hydra_runner(config_path="conf", config_name="megatron_gpt_prune") def main(cfg) -> None: + """Prune a model using modelopt.""" # Overwrite model config with the one from the model checkpoint and apply pruning modifications model_cfg = load_config(cfg.model.restore_from_path) model_cfg.update(cfg.model) diff --git a/tutorials/llm/llama-3/pruning-distillation/03_b_width_pruning.ipynb b/tutorials/llm/llama-3/pruning-distillation/03_b_width_pruning.ipynb index b4e323463078..c63cd9d2ef9e 100644 --- a/tutorials/llm/llama-3/pruning-distillation/03_b_width_pruning.ipynb +++ b/tutorials/llm/llama-3/pruning-distillation/03_b_width_pruning.ipynb @@ -28,9 +28,7 @@ "\n", "We use the above parameters to get a competitive model for this demonstration. You can use other strategies or parameters from the [blog](https://developer.nvidia.com/blog/how-to-prune-and-distill-llama-3-1-8b-to-an-nvidia-llama-3-1-minitron-4b-model/) or the [tech report](https://arxiv.org/pdf/2408.11796) for your experiments. \n", "\n", - "> `NOTE:` In the block of code below, pass the paths to your fine-tuned teacher .nemo model.\n", - "\n", - "> `TIP:` You can increase the ``batch_size`` (upto 1024) to speed up the width-pruning script execution." + "> `NOTE:` In the block of code below, pass the paths to your fine-tuned teacher .nemo model." ] }, { @@ -48,7 +46,7 @@ " model.tensor_model_parallel_size=1 \\\n", " model.pipeline_model_parallel_size=8 \\\n", " +model.dist_ckpt_load_strictness=log_all \\\n", - " inference.batch_size=64 \\\n", + " inference.batch_size=1 \\\n", " trainer.num_nodes=1 \\\n", " trainer.precision=bf16 \\\n", " trainer.devices=8 \\\n", From 69d84cc4a87ab65f875c6fe60fac073b47c3c578 Mon Sep 17 00:00:00 2001 From: Ao Tang Date: Mon, 16 Dec 2024 17:52:44 -0500 Subject: [PATCH 054/128] Disable CP for running Inference using megatron_gpt_eval (#11547) * Disable CP for megatron_gpt_eval * Apply isort and black reformatting Signed-off-by: suiyoubi * Update examples/nlp/language_modeling/megatron_gpt_eval.py Co-authored-by: Chen Cui Signed-off-by: Ao Tang --------- Signed-off-by: suiyoubi Signed-off-by: Ao Tang Co-authored-by: suiyoubi Co-authored-by: Chen Cui --- .../nlp/language_modeling/megatron_gpt_eval.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/examples/nlp/language_modeling/megatron_gpt_eval.py b/examples/nlp/language_modeling/megatron_gpt_eval.py index 4dbbee78e898..3678cda32b47 100644 --- a/examples/nlp/language_modeling/megatron_gpt_eval.py +++ b/examples/nlp/language_modeling/megatron_gpt_eval.py @@ -193,12 +193,23 @@ def load_model_from_config(trainer, cfg): cfg.pipeline_model_parallel_size = model_config.get('pipeline_model_parallel_size', 1) cfg.pipeline_model_parallel_split_rank = model_config.get('pipeline_model_parallel_split_rank', 0) + if model_config.get('context_parallel_size', 1) > 1: + logging.warning( + f'Model config has context_parallel_size={model_config.context_parallel_size}. CP will be disabled for ' + f'inference. Considering using tensor parallelism or pipeline parallelism for fitting the model.' + ) + assert ( cfg.trainer.devices * cfg.trainer.num_nodes == cfg.tensor_model_parallel_size * cfg.pipeline_model_parallel_size * max(1, cfg.get('expert_model_parallel_size', 1)) - ), "devices * num_nodes should equal tensor_model_parallel_size * pipeline_model_parallel_size" + ), ( + f"devices({cfg.trainer.devices}) * num_nodes({cfg.trainer.num_nodes}) should equal to " + f"tensor_model_parallel_size({cfg.tensor_model_parallel_size}) * " + f"pipeline_model_parallel_size ({cfg.pipeline_model_parallel_size}) * " + f"expert_model_parallel_size ({max(1, cfg.get('expert_model_parallel_size', 1))}) * " + ) if cfg.gpt_model_file: save_restore_connector = NLPSaveRestoreConnector() @@ -223,6 +234,7 @@ def load_model_from_config(trainer, cfg): # with dist checkpointing we can use the model parallel config specified by the user pretrained_cfg.tensor_model_parallel_size = cfg.tensor_model_parallel_size pretrained_cfg.pipeline_model_parallel_size = cfg.pipeline_model_parallel_size + pretrained_cfg.context_parallel_size = 1 # context parallel is disable for inference pretrained_cfg.expert_model_parallel_size = cfg.get('expert_model_parallel_size', 1) pretrained_cfg.micro_batch_size = 1 if trainer.precision == "16": @@ -250,6 +262,7 @@ def load_model_from_config(trainer, cfg): ) app_state.tensor_model_parallel_size = cfg.tensor_model_parallel_size app_state.pipeline_model_parallel_size = cfg.pipeline_model_parallel_size + app_state.context_parallel_size = 1 # context parallel is disable for inference app_state.expert_model_parallel_size = cfg.get('expert_model_parallel_size', 1) ( app_state.tensor_model_parallel_rank, From 0b14618df7dd6b02ba1b596549f3d211e3ccef08 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?oliver=20k=C3=B6nig?= Date: Tue, 17 Dec 2024 00:00:00 +0100 Subject: [PATCH 055/128] ci: Add `no-fail-fast` mode (#11608) Signed-off-by: Oliver Koenig --- .github/workflows/_test_template.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/_test_template.yml b/.github/workflows/_test_template.yml index 1e184a8d4160..f198ffe6af1b 100644 --- a/.github/workflows/_test_template.yml +++ b/.github/workflows/_test_template.yml @@ -94,7 +94,7 @@ jobs: exit $EXIT_CODE - uses: "NVIDIA/NeMo/.github/actions/cancel-workflow@main" - if: failure() && inputs.IS_OPTIONAL == false + if: failure() && inputs.IS_OPTIONAL == false && !contains(github.event.pull_request.labels.*.name, 'no-fail-fast') - name: after_script if: always() && inputs.AFTER_SCRIPT != ':' run: | From b975aaa1a08bc884591e3233357680fe4572bcfc Mon Sep 17 00:00:00 2001 From: Chen Cui Date: Mon, 16 Dec 2024 18:02:59 -0500 Subject: [PATCH 056/128] Chat dataset support (#11423) * chat dataset support Signed-off-by: Chen Cui * Apply isort and black reformatting Signed-off-by: cuichenx * add ci test Signed-off-by: Chen Cui * address comment Signed-off-by: Chen Cui * Apply isort and black reformatting Signed-off-by: cuichenx * address comment Signed-off-by: Chen Cui --------- Signed-off-by: Chen Cui Signed-off-by: cuichenx Co-authored-by: cuichenx --- .github/workflows/cicd-main.yml | 33 ++++++++++++++- nemo/collections/llm/__init__.py | 2 + nemo/collections/llm/gpt/data/__init__.py | 8 ++-- nemo/collections/llm/gpt/data/chat.py | 41 +++++++++++++++++++ nemo/collections/llm/gpt/data/core.py | 10 ++++- .../megatron/gpt_sft_chat_dataset.py | 1 - nemo/lightning/io/connector.py | 2 +- tests/collections/llm/gpt_finetuning.py | 30 ++++++++++---- 8 files changed, 112 insertions(+), 15 deletions(-) create mode 100644 nemo/collections/llm/gpt/data/chat.py diff --git a/.github/workflows/cicd-main.yml b/.github/workflows/cicd-main.yml index 7af191e98416..686b066652c0 100644 --- a/.github/workflows/cicd-main.yml +++ b/.github/workflows/cicd-main.yml @@ -4551,6 +4551,36 @@ jobs: --pp_size 1 \ --mbs 1 --packed + L2_NeMo_2_GPT_LoRA_TP1PP1_MBS1_Chat: + needs: [cicd-test-container-setup] + uses: ./.github/workflows/_test_template.yml + if: contains(fromJSON(needs.cicd-test-container-setup.outputs.test_to_run), 'L2_NeMo_2_GPT_LoRA_TP1PP1_MBS1_Chat') || needs.cicd-test-container-setup.outputs.all == 'true' + with: + RUNNER: self-hosted-azure + SCRIPT: | + + python tests/collections/llm/gpt_finetuning.py \ + --restore_path /home/TestData/nemo2_ckpt/llama_68M \ + --devices 2 \ + --max_steps 3 \ + --experiment_dir /tmp/nemo2_gpt_finetune/${{ github.run_id }} \ + --peft lora \ + --tp_size 1 \ + --pp_size 1 \ + --mbs 1 \ + --chat_dataset_path /home/TestData/nemo2_data/chat + + python tests/collections/llm/gpt_finetuning.py \ + --restore_path /home/TestData/nemo2_ckpt/llama_68M \ + --devices 2 \ + --max_steps 6 \ + --experiment_dir /tmp/nemo2_gpt_finetune/${{ github.run_id }} \ + --peft lora \ + --tp_size 1 \ + --pp_size 1 \ + --mbs 1 \ + --chat_dataset_path /home/TestData/nemo2_data/chat + L2_NeMo_2_Mixtral_LoRA_EP2PP1_MBS2: needs: [cicd-test-container-setup] uses: ./.github/workflows/_test_template.yml @@ -4661,7 +4691,7 @@ jobs: AFTER_SCRIPT: | rm -rf /tmp/nemo2_ckpt rm -rf /tmp/nemo2_ptq_engine - + L2_NeMo_2_LLAVA_NEXT_MOCK_TRAINING: needs: [cicd-test-container-setup] uses: ./.github/workflows/_test_template.yml @@ -4820,6 +4850,7 @@ jobs: - L2_NeMo_2_GPT_LoRA_TP1PP1_MBS2 - L2_NeMo_2_GPT_LoRA_TP1PP2_MBS2 - L2_NeMo_2_GPT_LoRA_TP2PP1_MBS2 + - L2_NeMo_2_GPT_LoRA_TP1PP1_MBS1_Chat - L2_NeMo_2_GPT_LoRA_TP1PP1_MBS1_PACKED - L2_NeMo_2_GPT_DoRA_TP1PP1_MBS1_PACKED - L2_NeMo_2_GPT_CLoRA_TP1PP1_MBS1_PACKED diff --git a/nemo/collections/llm/__init__.py b/nemo/collections/llm/__init__.py index c7d64f5b5552..4665ebfc9d77 100644 --- a/nemo/collections/llm/__init__.py +++ b/nemo/collections/llm/__init__.py @@ -32,6 +32,7 @@ ) from nemo.collections.llm.gpt.data import ( AlpacaDataModule, + ChatDataModule, DollyDataModule, FineTuningDataModule, HFDatasetDataModule, @@ -220,6 +221,7 @@ "Qwen2Config72B", "PreTrainingDataModule", "FineTuningDataModule", + "ChatDataModule", "SquadDataModule", "T5PreTrainingDataModule", "T5FineTuningDataModule", diff --git a/nemo/collections/llm/gpt/data/__init__.py b/nemo/collections/llm/gpt/data/__init__.py index c8690fd0668f..89b5a3dc4b54 100644 --- a/nemo/collections/llm/gpt/data/__init__.py +++ b/nemo/collections/llm/gpt/data/__init__.py @@ -13,6 +13,7 @@ # limitations under the License. from nemo.collections.llm.gpt.data.alpaca import AlpacaDataModule +from nemo.collections.llm.gpt.data.chat import ChatDataModule from nemo.collections.llm.gpt.data.dolly import DollyDataModule from nemo.collections.llm.gpt.data.fine_tuning import FineTuningDataModule from nemo.collections.llm.gpt.data.hf_dataset import HFDatasetDataModule @@ -21,12 +22,13 @@ from nemo.collections.llm.gpt.data.squad import SquadDataModule __all__ = [ - "FineTuningDataModule", "AlpacaDataModule", - "SquadDataModule", + "ChatDataModule", "DollyDataModule", + "FineTuningDataModule", + "HFDatasetDataModule", "MockDataModule", "PreTrainingDataModule", "build_pretraining_datamodule", - "HFDatasetDataModule", + "SquadDataModule", ] diff --git a/nemo/collections/llm/gpt/data/chat.py b/nemo/collections/llm/gpt/data/chat.py new file mode 100644 index 000000000000..1b51c4aa1524 --- /dev/null +++ b/nemo/collections/llm/gpt/data/chat.py @@ -0,0 +1,41 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from functools import lru_cache + +from nemo.collections.llm.gpt.data.core import create_sft_dataset +from nemo.collections.llm.gpt.data.fine_tuning import FineTuningDataModule + + +class ChatDataModule(FineTuningDataModule): + """ + Base class for fine-tuning an LLM on chat datasets. + This class calls `GPTSFTChatDataset` for chat template processing + + See base class `FineTuningDataModule` for more details. + """ + + @lru_cache + def _create_dataset(self, path, is_test=False, **kwargs): + # pylint: disable=C0115,C0116 + return create_sft_dataset( + path, + tokenizer=self.tokenizer, + seq_length=(self.seq_length if is_test or self.packed_sequence_size <= 0 else self.packed_sequence_size), + memmap_workers=self.memmap_workers, + seed=self.seed, + chat=True, + is_test=is_test, + **kwargs, + ) diff --git a/nemo/collections/llm/gpt/data/core.py b/nemo/collections/llm/gpt/data/core.py index af9686167dbd..54eb9e31c53a 100644 --- a/nemo/collections/llm/gpt/data/core.py +++ b/nemo/collections/llm/gpt/data/core.py @@ -47,9 +47,17 @@ def create_sft_dataset( memmap_workers: int = 2, hf_dataset: bool = False, global_sample_mapping: bool = False, + chat: bool = False, **kwargs, ) -> "GPTSFTDataset": - if path.suffix == '.npy': + """ + Create the dataset class (GPTSFTDataset, GPTSFTChatDataset or GPTSFTPackedDataset) + """ + if chat: + from nemo.collections.nlp.data.language_modeling.megatron.gpt_sft_chat_dataset import GPTSFTChatDataset + + dataset_cls = GPTSFTChatDataset + elif path.suffix == '.npy': from nemo.collections.nlp.data.language_modeling.megatron.gpt_sft_dataset import GPTSFTPackedDataset dataset_cls = GPTSFTPackedDataset diff --git a/nemo/collections/nlp/data/language_modeling/megatron/gpt_sft_chat_dataset.py b/nemo/collections/nlp/data/language_modeling/megatron/gpt_sft_chat_dataset.py index ef09c7ff068e..6d71a9d8e014 100644 --- a/nemo/collections/nlp/data/language_modeling/megatron/gpt_sft_chat_dataset.py +++ b/nemo/collections/nlp/data/language_modeling/megatron/gpt_sft_chat_dataset.py @@ -310,7 +310,6 @@ def _maybe_validate_prompt_template(self): def _build_samples_mapping(self): super()._build_samples_mapping() - assert hasattr(self.tokenizer, "vocab"), "tokenizer should have vocab property, not supported" LABEL_START = self.special_tokens['label_start'] END_NAME_SIGNAL = self.special_tokens['end_of_name'] diff --git a/nemo/lightning/io/connector.py b/nemo/lightning/io/connector.py index a38be6ee8f0a..bf07956f2cd2 100644 --- a/nemo/lightning/io/connector.py +++ b/nemo/lightning/io/connector.py @@ -274,7 +274,7 @@ def on_import_ckpt(self, model: pl.LightningModule): def save_hf_tokenizer_assets(self, tokenizer_name_or_path, save_path="/tmp/nemo_tokenizer"): from transformers import AutoTokenizer - tok = AutoTokenizer.from_pretrained(tokenizer_name_or_path) + tok = AutoTokenizer.from_pretrained(tokenizer_name_or_path, trust_remote_code=True) # Save tokenizer assets to save_path. tok.save_pretrained(save_path) return save_path diff --git a/tests/collections/llm/gpt_finetuning.py b/tests/collections/llm/gpt_finetuning.py index d265ba8ce20a..be5331c32f3b 100644 --- a/tests/collections/llm/gpt_finetuning.py +++ b/tests/collections/llm/gpt_finetuning.py @@ -46,6 +46,9 @@ def get_args(): parser.add_argument('--tp_size', type=int, default=1, help="tensor parallel size") parser.add_argument('--pp_size', type=int, default=1, help="pipeline parallel size") parser.add_argument('--packed', action='store_true', help="use packed sequence dataset") + parser.add_argument( + '--chat_dataset_path', type=str, default="", help="path to chat dataset. Uses dolly if this is empty." + ) return parser.parse_args() @@ -105,13 +108,24 @@ def get_args(): packed_sequence_specs = ( PackedSequenceSpecs(packed_sequence_size=2048, tokenizer_model_name="dummy_tokenizer") if args.packed else None ) - dolly = llm.DollyDataModule( - seq_length=2048, - micro_batch_size=args.mbs, - global_batch_size=4, - num_workers=0, - packed_sequence_specs=packed_sequence_specs, - ) + if args.chat_dataset_path: + assert not args.packed + data = llm.ChatDataModule( + dataset_root=args.chat_dataset_path, + seq_length=2048, + micro_batch_size=args.mbs, + global_batch_size=8, + num_workers=0, + packed_sequence_specs=packed_sequence_specs, + ) + else: + data = llm.DollyDataModule( + seq_length=2048, + micro_batch_size=args.mbs, + global_batch_size=8, + num_workers=0, + packed_sequence_specs=packed_sequence_specs, + ) tokenizer = get_nmt_tokenizer(tokenizer_model=os.path.join(args.restore_path, "dummy_tokenizer.model")) llama3_8b = llm.LlamaModel(Llama3ConfigCI(), tokenizer=tokenizer) @@ -123,7 +137,7 @@ def get_args(): llm.finetune( model=llama3_8b, - data=dolly, + data=data, trainer=trainer, peft=peft, log=logger, From aa7a4e1f4c79f7f6a01d028825caf9e500e96acf Mon Sep 17 00:00:00 2001 From: Taejin Park Date: Mon, 16 Dec 2024 15:39:33 -0800 Subject: [PATCH 057/128] Sortformer Diarizer 4spk v1 model PR Part 2: Unit-tests for Sortformer Diarizer. (#11336) * Adding the first pr files models and dataset Signed-off-by: taejinp * Tested all unit-test files Signed-off-by: taejinp * Name changes on yaml files and train example Signed-off-by: taejinp * Apply isort and black reformatting Signed-off-by: tango4j * Reflecting comments and removing unnecessary parts for this PR Signed-off-by: taejinp * Apply isort and black reformatting Signed-off-by: tango4j * Adding docstrings to reflect the PR comments Signed-off-by: taejinp * removed the unused find_first_nonzero Signed-off-by: taejinp * Apply isort and black reformatting Signed-off-by: tango4j * Fixed all pylint issues Signed-off-by: taejinp * Apply isort and black reformatting Signed-off-by: tango4j * Resolving pylint issues Signed-off-by: taejinp * Apply isort and black reformatting Signed-off-by: tango4j * Removing unused varialbe in audio_to_diar_label.py Signed-off-by: taejinp * Fixed docstrings in training script Signed-off-by: taejinp * Line-too-long issue from Pylint fixed Signed-off-by: taejinp * Adding get_subsegments_scriptable to prevent jit.script error Signed-off-by: taejinp * Apply isort and black reformatting Signed-off-by: tango4j * Addressed Code-QL issues Signed-off-by: taejinp * Resolved conflicts on bce_loss.py Signed-off-by: taejinp * Apply isort and black reformatting Signed-off-by: tango4j * Adding all the diarization reltated unit-tests Signed-off-by: taejinp * Moving speaker task related unit test files to speaker_tasks folder Signed-off-by: taejinp * Fixed uninit variable issue in bce_loss.py spotted by codeQL Signed-off-by: taejinp * Apply isort and black reformatting Signed-off-by: tango4j * Fixing code-QL issues Signed-off-by: taejinp * Apply isort and black reformatting Signed-off-by: tango4j * Reflecting PR comments from weiqingw Signed-off-by: taejinp * Apply isort and black reformatting Signed-off-by: tango4j * Line too long pylint issue resolved in e2e_diarize_speech.py Signed-off-by: taejinp * Apply isort and black reformatting Signed-off-by: tango4j * Resovled unused variable issue in model test Signed-off-by: taejinp * Reflecting the comment on Nov 21st 2024. Signed-off-by: taejinp * Apply isort and black reformatting Signed-off-by: tango4j * Unused variable import time Signed-off-by: taejinp * Adding docstrings to score_labels() function in der.py Signed-off-by: taejinp * Apply isort and black reformatting Signed-off-by: tango4j * Reflecting comments on YAML files and model file variable changes. Signed-off-by: taejinp * Apply isort and black reformatting Signed-off-by: tango4j * Added get_subsegments_scriptable for legacy get_subsegment functions Signed-off-by: taejinp * Apply isort and black reformatting Signed-off-by: tango4j * Resolved line too long pylint issues Signed-off-by: taejinp * Apply isort and black reformatting Signed-off-by: tango4j * Added training and inference CI-tests Signed-off-by: taejinp * Added the missing parse_func in preprocessing/collections.py Signed-off-by: taejinp * Adding the missing parse_func in preprocessing/collections.py Signed-off-by: taejinp * Fixed an indentation error Signed-off-by: taejinp * Resolved multi_bin_acc and bce_loss issues Signed-off-by: taejinp * Resolved line-too-long for msdd_models.py Signed-off-by: taejinp * Apply isort and black reformatting Signed-off-by: tango4j * Code QL issues and fixed test errors Signed-off-by: taejinp * Apply isort and black reformatting Signed-off-by: tango4j * line too long in audio_to_diar_label.py Signed-off-by: taejinp * Apply isort and black reformatting Signed-off-by: tango4j * resolving CICD test issues Signed-off-by: taejinp * Fixing codeQL issues Signed-off-by: taejinp * Fixed pin memory False for inference Signed-off-by: taejinp --------- Signed-off-by: taejinp Signed-off-by: tango4j Co-authored-by: tango4j --- .github/workflows/cicd-main.yml | 29 + .../neural_diarizer/e2e_diarize_speech.py | 1 + .../asr/data/audio_to_diar_label.py | 12 +- .../asr/models/sortformer_diar_models.py | 3 +- .../speaker_tasks/test_diar_datasets.py | 110 ++++ .../test_diar_label_models.py | 42 +- .../test_diar_lhotse_datasets.py | 173 ++++++ .../test_diar_metrics.py | 0 .../test_diar_neural_inference.py | 10 +- .../test_diar_sortformer_models.py | 168 ++++++ .../test_speaker_label_models.py | 12 +- .../utils/test_data_simul_utils.py | 549 ++++++++++++++++++ .../utils}/test_diar_utils.py | 109 +++- .../utils/test_multispeaker_utils.py | 352 +++++++++++ .../speaker_tasks/utils/test_vad_utils.py | 126 ++++ 15 files changed, 1686 insertions(+), 10 deletions(-) create mode 100644 tests/collections/speaker_tasks/test_diar_datasets.py rename tests/collections/{asr => speaker_tasks}/test_diar_label_models.py (79%) create mode 100644 tests/collections/speaker_tasks/test_diar_lhotse_datasets.py rename tests/collections/{asr => speaker_tasks}/test_diar_metrics.py (100%) rename tests/collections/{asr => speaker_tasks}/test_diar_neural_inference.py (87%) create mode 100644 tests/collections/speaker_tasks/test_diar_sortformer_models.py rename tests/collections/{asr => speaker_tasks}/test_speaker_label_models.py (95%) create mode 100644 tests/collections/speaker_tasks/utils/test_data_simul_utils.py rename tests/collections/{asr => speaker_tasks/utils}/test_diar_utils.py (92%) create mode 100644 tests/collections/speaker_tasks/utils/test_multispeaker_utils.py create mode 100644 tests/collections/speaker_tasks/utils/test_vad_utils.py diff --git a/.github/workflows/cicd-main.yml b/.github/workflows/cicd-main.yml index 686b066652c0..310d580e43f6 100644 --- a/.github/workflows/cicd-main.yml +++ b/.github/workflows/cicd-main.yml @@ -816,6 +816,33 @@ jobs: +trainer.fast_dev_run=True \ exp_manager.exp_dir=/tmp/speaker_diarization_results + L2_Speaker_dev_run_EndtoEnd_Speaker_Diarization_Sortformer: + needs: [cicd-test-container-setup] + uses: ./.github/workflows/_test_template.yml + if: contains(fromJSON(needs.cicd-test-container-setup.outputs.test_to_run), 'L2_Speaker_dev_run_EndtoEnd_Speaker_Diarization_Sortformer') || needs.cicd-test-container-setup.outputs.all == 'true' + with: + RUNNER: self-hosted-azure-gpus-1 + SCRIPT: | + python examples/speaker_tasks/diarization/neural_diarizer/sortformer_diar_train.py \ + trainer.devices="[0]" \ + batch_size=3 \ + model.train_ds.manifest_filepath=/home/TestData/an4_diarizer/simulated_train/eesd_train_tiny.json \ + model.validation_ds.manifest_filepath=/home/TestData/an4_diarizer/simulated_valid/eesd_valid_tiny.json \ + exp_manager.exp_dir=/tmp/speaker_diarization_results \ + +trainer.fast_dev_run=True + + L2_Speaker_dev_run_EndtoEnd_Diarizer_Inference: + needs: [cicd-test-container-setup] + uses: ./.github/workflows/_test_template.yml + if: contains(fromJSON(needs.cicd-test-container-setup.outputs.test_to_run), 'L2_Speaker_dev_run_EndtoEnd_Diarizer_Inference') || needs.cicd-test-container-setup.outputs.all == 'true' + with: + RUNNER: self-hosted-azure + SCRIPT: | + python examples/speaker_tasks/diarization/neural_diarizer/e2e_diarize_speech.py \ + model_path=/home/TestData/an4_diarizer/diar_sortformer_4spk-v1-tiny.nemo \ + dataset_manifest=/home/TestData/an4_diarizer/simulated_valid/eesd_valid_tiny.json \ + batch_size=1 + L2_Speaker_dev_run_Speech_to_Label: needs: [cicd-test-container-setup] uses: ./.github/workflows/_test_template.yml @@ -4753,6 +4780,8 @@ jobs: - L2_Speech_to_Text_EMA - L2_Speaker_dev_run_Speaker_Recognition - L2_Speaker_dev_run_Speaker_Diarization + - L2_Speaker_dev_run_EndtoEnd_Speaker_Diarization_Sortformer + - L2_Speaker_dev_run_EndtoEnd_Diarizer_Inference - L2_Speaker_dev_run_Speech_to_Label - L2_Speaker_dev_run_Speaker_Diarization_with_ASR_Inference - L2_Speaker_dev_run_Clustering_Diarizer_Inference diff --git a/examples/speaker_tasks/diarization/neural_diarizer/e2e_diarize_speech.py b/examples/speaker_tasks/diarization/neural_diarizer/e2e_diarize_speech.py index 1767a16cbe02..147d7a3aa002 100644 --- a/examples/speaker_tasks/diarization/neural_diarizer/e2e_diarize_speech.py +++ b/examples/speaker_tasks/diarization/neural_diarizer/e2e_diarize_speech.py @@ -386,6 +386,7 @@ def main(cfg: DiarizationConfig) -> Union[DiarizationConfig]: diar_model._cfg.test_ds.manifest_filepath = cfg.dataset_manifest infer_audio_rttm_dict = audio_rttm_map(cfg.dataset_manifest) diar_model._cfg.test_ds.batch_size = cfg.batch_size + diar_model._cfg.test_ds.pin_memory = False # Model setup for inference diar_model._cfg.test_ds.num_workers = cfg.num_workers diff --git a/nemo/collections/asr/data/audio_to_diar_label.py b/nemo/collections/asr/data/audio_to_diar_label.py index 0824c9c6ab51..3f4ae61e0d08 100644 --- a/nemo/collections/asr/data/audio_to_diar_label.py +++ b/nemo/collections/asr/data/audio_to_diar_label.py @@ -1065,6 +1065,7 @@ def __init__( round_digits: int = 2, soft_targets: bool = False, subsampling_factor: int = 8, + device: str = 'cpu', ): super().__init__() self.collection = EndtoEndDiarizationSpeechLabel( @@ -1084,6 +1085,7 @@ def __init__( self.soft_targets = soft_targets self.round_digits = 2 self.floor_decimal = 10**self.round_digits + self.device = device def __len__(self): return len(self.collection) @@ -1232,11 +1234,13 @@ def __getitem__(self, index): audio_signal = audio_signal[: round(self.featurizer.sample_rate * session_len_sec)] audio_signal_length = torch.tensor(audio_signal.shape[0]).long() - audio_signal, audio_signal_length = audio_signal.to('cpu'), audio_signal_length.to('cpu') - target_len = self.get_segment_timestamps(duration=session_len_sec, sample_rate=self.featurizer.sample_rate) + audio_signal, audio_signal_length = audio_signal.to(self.device), audio_signal_length.to(self.device) + target_len = self.get_segment_timestamps(duration=session_len_sec, sample_rate=self.featurizer.sample_rate).to( + self.device + ) targets = self.parse_rttm_for_targets_and_lens( rttm_file=sample.rttm_file, offset=offset, duration=session_len_sec, target_len=target_len - ) + ).to(self.device) return audio_signal, audio_signal_length, targets, target_len @@ -1355,6 +1359,7 @@ def __init__( window_stride, global_rank: int, soft_targets: bool, + device: str, ): super().__init__( manifest_filepath=manifest_filepath, @@ -1365,6 +1370,7 @@ def __init__( window_stride=window_stride, global_rank=global_rank, soft_targets=soft_targets, + device=device, ) def eesd_train_collate_fn(self, batch): diff --git a/nemo/collections/asr/models/sortformer_diar_models.py b/nemo/collections/asr/models/sortformer_diar_models.py index f6b0eab4c895..71de10cc2f79 100644 --- a/nemo/collections/asr/models/sortformer_diar_models.py +++ b/nemo/collections/asr/models/sortformer_diar_models.py @@ -175,6 +175,7 @@ def __setup_dataloader_from_config(self, config): window_stride=self._cfg.preprocessor.window_stride, global_rank=global_rank, soft_targets=config.soft_targets if 'soft_targets' in config else False, + device=self.device, ) self.data_collection = dataset.collection @@ -557,13 +558,13 @@ def test_batch( audio_signal=audio_signal, audio_signal_length=audio_signal_length, ) + self._get_aux_test_batch_evaluations(batch_idx, preds, targets, target_lens) preds = preds.detach().to('cpu') if preds.shape[0] == 1: # batch size = 1 self.preds_total_list.append(preds) else: self.preds_total_list.extend(torch.split(preds, [1] * preds.shape[0])) torch.cuda.empty_cache() - self._get_aux_test_batch_evaluations(batch_idx, preds, targets, target_lens) logging.info(f"Batch F1Acc. MEAN: {torch.mean(torch.tensor(self.batch_f1_accs_list))}") logging.info(f"Batch Precision MEAN: {torch.mean(torch.tensor(self.batch_precision_list))}") diff --git a/tests/collections/speaker_tasks/test_diar_datasets.py b/tests/collections/speaker_tasks/test_diar_datasets.py new file mode 100644 index 000000000000..28272d63bd43 --- /dev/null +++ b/tests/collections/speaker_tasks/test_diar_datasets.py @@ -0,0 +1,110 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +import tempfile + +import pytest +import torch.cuda + +from nemo.collections.asr.data.audio_to_diar_label import AudioToSpeechE2ESpkDiarDataset +from nemo.collections.asr.parts.preprocessing.features import WaveformFeaturizer +from nemo.collections.asr.parts.utils.speaker_utils import get_vad_out_from_rttm_line, read_rttm_lines + + +def is_rttm_length_too_long(rttm_file_path, wav_len_in_sec): + """ + Check if the maximum RTTM duration exceeds the length of the provided audio file. + + Args: + rttm_file_path (str): Path to the RTTM file. + wav_len_in_sec (float): Length of the audio file in seconds. + + Returns: + bool: True if the maximum RTTM duration is less than or equal to the length of the audio file, False otherwise. + """ + rttm_lines = read_rttm_lines(rttm_file_path) + max_rttm_sec = 0 + for line in rttm_lines: + start, dur = get_vad_out_from_rttm_line(line) + max_rttm_sec = max(max_rttm_sec, start + dur) + return max_rttm_sec <= wav_len_in_sec + + +class TestAudioToSpeechE2ESpkDiarDataset: + + @pytest.mark.unit + def test_e2e_speaker_diar_dataset(self, test_data_dir): + manifest_path = os.path.abspath(os.path.join(test_data_dir, 'asr/diarizer/lsm_val.json')) + + batch_size = 4 + num_samples = 8 + device = 'cuda' if torch.cuda.is_available() else 'cpu' + data_dict_list = [] + with tempfile.NamedTemporaryFile(mode='w', encoding='utf-8') as f: + with open(manifest_path, 'r', encoding='utf-8') as mfile: + for ix, line in enumerate(mfile): + if ix >= num_samples: + break + + line = line.replace("tests/data/", test_data_dir + "/").replace("\n", "") + f.write(f"{line}\n") + data_dict = json.loads(line) + data_dict_list.append(data_dict) + + f.seek(0) + featurizer = WaveformFeaturizer(sample_rate=16000, int_values=False, augmentor=None) + + dataset = AudioToSpeechE2ESpkDiarDataset( + manifest_filepath=f.name, + soft_label_thres=0.5, + session_len_sec=90, + num_spks=4, + featurizer=featurizer, + window_stride=0.01, + global_rank=0, + soft_targets=False, + device=device, + ) + dataloader_instance = torch.utils.data.DataLoader( + dataset=dataset, + batch_size=batch_size, + collate_fn=dataset.eesd_train_collate_fn, + drop_last=False, + shuffle=False, + num_workers=1, + pin_memory=False, + ) + assert len(dataloader_instance) == (num_samples / batch_size) # Check if the number of batches is correct + batch_counts = len(dataloader_instance) + + deviation_thres_rate = 0.01 # 1% deviation allowed + for batch_index, batch in enumerate(dataloader_instance): + if batch_index != batch_counts - 1: + assert len(batch) == batch_size, "Batch size does not match the expected value" + audio_signals, audio_signal_len, targets, target_lens = batch + for sample_index in range(audio_signals.shape[0]): + dataloader_audio_in_sec = audio_signal_len[sample_index].item() + data_dur_in_sec = abs( + data_dict_list[batch_size * batch_index + sample_index]['duration'] * featurizer.sample_rate + - dataloader_audio_in_sec + ) + assert ( + data_dur_in_sec <= deviation_thres_rate * dataloader_audio_in_sec + ), "Duration deviation exceeds 1%" + assert not torch.isnan(audio_signals).any(), "audio_signals tensor contains NaN values" + assert not torch.isnan(audio_signal_len).any(), "audio_signal_len tensor contains NaN values" + assert not torch.isnan(targets).any(), "targets tensor contains NaN values" + assert not torch.isnan(target_lens).any(), "target_lens tensor contains NaN values" diff --git a/tests/collections/asr/test_diar_label_models.py b/tests/collections/speaker_tasks/test_diar_label_models.py similarity index 79% rename from tests/collections/asr/test_diar_label_models.py rename to tests/collections/speaker_tasks/test_diar_label_models.py index 2ed6177d3cb2..f01a8add7aab 100644 --- a/tests/collections/asr/test_diar_label_models.py +++ b/tests/collections/speaker_tasks/test_diar_label_models.py @@ -16,6 +16,7 @@ import torch from omegaconf import DictConfig +from nemo.collections.asr.losses import BCELoss from nemo.collections.asr.models import EncDecDiarLabelModel @@ -24,7 +25,12 @@ def msdd_model(): preprocessor = { 'cls': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor', - 'params': {"features": 80, "window_size": 0.025, "window_stride": 0.01, "sample_rate": 16000,}, + 'params': { + "features": 80, + "window_size": 0.025, + "window_stride": 0.01, + "sample_rate": 16000, + }, } speaker_model_encoder = { @@ -165,3 +171,37 @@ def test_forward_infer(self, msdd_model): assert diff <= 1e-6 diff = torch.max(torch.abs(scale_weights_instance - scale_weights_batch)) assert diff <= 1e-6 + + +class TestBCELoss: + @pytest.mark.unit + @pytest.mark.parametrize( + "probs, labels, target_lens, reduction, expected_output", + [ + ( + torch.tensor([[[0.5, 0.5], [0.5, 0.5]]], dtype=torch.float32), + torch.tensor([[[1, 0], [0, 1]]], dtype=torch.float32), + torch.tensor([2]), + "mean", + torch.tensor(0.693147, dtype=torch.float32), + ), + ( + torch.tensor([[[0.5, 0.5], [0.0, 1.0]]], dtype=torch.float32), + torch.tensor([[[1, 0], [0, 1]]], dtype=torch.float32), + torch.tensor([1]), + "mean", + torch.tensor(0.693147, dtype=torch.float32), + ), + ( + torch.tensor([[[0, 1], [1, 0]]], dtype=torch.float32), + torch.tensor([[[1, 0], [0, 1]]], dtype=torch.float32), + torch.tensor([2]), + "mean", + torch.tensor(100, dtype=torch.float32), + ), + ], + ) + def test_loss(self, probs, labels, target_lens, reduction, expected_output): + loss = BCELoss(reduction=reduction) + result = loss(probs=probs, labels=labels, target_lens=target_lens) + assert torch.allclose(result, expected_output), f"Expected {expected_output}, but got {result}" diff --git a/tests/collections/speaker_tasks/test_diar_lhotse_datasets.py b/tests/collections/speaker_tasks/test_diar_lhotse_datasets.py new file mode 100644 index 000000000000..281742be9174 --- /dev/null +++ b/tests/collections/speaker_tasks/test_diar_lhotse_datasets.py @@ -0,0 +1,173 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +import tempfile +from unittest import mock + +import pytest +import torch +import torch.cuda +from omegaconf import DictConfig + +from nemo.collections.asr.data.audio_to_diar_label_lhotse import LhotseAudioToSpeechE2ESpkDiarDataset +from nemo.collections.common.data.lhotse import get_lhotse_dataloader_from_config + + +def get_train_ds_config(manifest_filepath, batch_size, num_workers) -> DictConfig: + return DictConfig( + { + 'manifest_filepath': manifest_filepath, + 'sample_rate': 16000, + 'num_spks': 4, + 'session_len_sec': 90, + 'soft_label_thres': 0.5, + 'soft_targets': False, + 'labels': None, + 'batch_size': batch_size, + 'shuffle': True, + 'num_workers': num_workers, + 'validation_mode': False, + 'use_lhotse': True, + 'use_bucketing': True, + 'num_buckets': 10, + 'bucket_duration_bins': [10, 20, 30, 40, 50, 60, 70, 80, 90], + 'pin_memory': True, + 'min_duration': 80, + 'max_duration': 90, + 'batch_duration': 400, + 'quadratic_duration': 1200, + 'bucket_buffer_size': 20000, + 'shuffle_buffer_size': 10000, + 'window_stride': 0.01, + 'subsampling_factor': 8, + } + ) + + +def get_validation_ds_config(manifest_filepath, batch_size, num_workers) -> DictConfig: + return DictConfig( + { + 'manifest_filepath': manifest_filepath, + 'is_tarred': False, + 'tarred_audio_filepaths': None, + 'sample_rate': 16000, + 'num_spks': 4, + 'session_len_sec': 90, + 'soft_label_thres': 0.5, + 'soft_targets': False, + 'labels': None, + 'batch_size': batch_size, + 'shuffle': False, + 'seq_eval_mode': True, + 'num_workers': num_workers, + 'validation_mode': True, + 'use_lhotse': False, + 'use_bucketing': False, + 'drop_last': False, + 'pin_memory': True, + 'window_stride': 0.01, + 'subsampling_factor': 8, + } + ) + + +def get_test_ds_config(manifest_filepath, batch_size, num_workers) -> DictConfig: + return DictConfig( + { + 'manifest_filepath': manifest_filepath, + 'is_tarred': False, + 'tarred_audio_filepaths': None, + 'sample_rate': 16000, + 'num_spks': 4, + 'session_len_sec': 90, + 'soft_label_thres': 0.5, + 'soft_targets': False, + 'labels': None, + 'batch_size': batch_size, + 'shuffle': False, + 'seq_eval_mode': True, + 'num_workers': num_workers, + 'validation_mode': True, + 'use_lhotse': False, + 'use_bucketing': False, + 'drop_last': False, + 'pin_memory': True, + 'window_stride': 0.01, + 'subsampling_factor': 8, + } + ) + + +class TestLhotseAudioToSpeechE2ESpkDiarDataset: + + @pytest.mark.unit + @pytest.mark.parametrize( + "batch_size, num_workers, split", + [ + (4, 8, 'train'), # Example 1 + (4, 0, 'train'), # Example 2 + (2, 4, 'validation'), # Example 3 + (8, 2, 'test'), # Example 4 + ], + ) + def test_e2e_speaker_diar_lhotse_dataset(self, test_data_dir, batch_size, num_workers, split): + manifest_path = os.path.abspath(os.path.join(test_data_dir, 'asr/diarizer/lsm_val.json')) + num_samples = 8 + device = 'gpu' if torch.cuda.is_available() else 'cpu' + data_dict_list = [] + with tempfile.NamedTemporaryFile(mode='w', encoding='utf-8') as f: + with open(manifest_path, 'r', encoding='utf-8') as mfile: + for ix, line in enumerate(mfile): + if ix >= num_samples: + break + + line = line.replace("tests/data/", test_data_dir + "/").replace("\n", "") + f.write(f"{line}\n") + data_dict = json.loads(line) + data_dict_list.append(data_dict) + + f.seek(0) + config = None + if split == 'train': + config = get_train_ds_config(manifest_filepath=f.name, batch_size=batch_size, num_workers=num_workers) + elif split == 'validation': + config = get_train_ds_config(manifest_filepath=f.name, batch_size=batch_size, num_workers=num_workers) + elif split == 'test': + config = get_test_ds_config(manifest_filepath=f.name, batch_size=batch_size, num_workers=num_workers) + + dataloader_instance = get_lhotse_dataloader_from_config( + config, + global_rank=0, + world_size=1, + dataset=LhotseAudioToSpeechE2ESpkDiarDataset(cfg=config), + ) + + deviation_thres_rate = 0.01 # 1% deviation allowed + for batch_index, batch in enumerate(dataloader_instance): + audio_signals, audio_signal_len, targets, target_lens = batch + for sample_index in range(audio_signals.shape[0]): + dataloader_audio_in_sec = audio_signal_len[sample_index].item() + data_dur_in_sec = abs( + data_dict_list[batch_size * batch_index + sample_index]['duration'] * config.sample_rate + - dataloader_audio_in_sec + ) + assert ( + data_dur_in_sec <= deviation_thres_rate * dataloader_audio_in_sec + ), "Duration deviation exceeds 1%" + assert not torch.isnan(audio_signals).any(), "audio_signals tensor contains NaN values" + assert not torch.isnan(audio_signal_len).any(), "audio_signal_len tensor contains NaN values" + assert not torch.isnan(targets).any(), "targets tensor contains NaN values" + assert not torch.isnan(target_lens).any(), "target_lens tensor contains NaN values" diff --git a/tests/collections/asr/test_diar_metrics.py b/tests/collections/speaker_tasks/test_diar_metrics.py similarity index 100% rename from tests/collections/asr/test_diar_metrics.py rename to tests/collections/speaker_tasks/test_diar_metrics.py diff --git a/tests/collections/asr/test_diar_neural_inference.py b/tests/collections/speaker_tasks/test_diar_neural_inference.py similarity index 87% rename from tests/collections/asr/test_diar_neural_inference.py rename to tests/collections/speaker_tasks/test_diar_neural_inference.py index 076eac129293..64c1196cd9a6 100644 --- a/tests/collections/asr/test_diar_neural_inference.py +++ b/tests/collections/speaker_tasks/test_diar_neural_inference.py @@ -28,13 +28,16 @@ class TestNeuralDiarizerInference: torch.device("cpu"), pytest.param( torch.device("cuda"), - marks=pytest.mark.skipif(not torch.cuda.is_available(), reason='CUDA required for test.',), + marks=pytest.mark.skipif( + not torch.cuda.is_available(), + reason='CUDA required for test.', + ), ), ], ) @pytest.mark.parametrize("num_speakers", [None, 1]) @pytest.mark.parametrize("max_num_speakers", [4]) - def test_diar_inference(self, tmpdir, test_data_dir, device, num_speakers, max_num_speakers): + def test_msdd_diar_inference(self, tmpdir, test_data_dir, device, num_speakers, max_num_speakers): """ Test to ensure diarization inference works correctly. - Ensures multiple audio files can be diarized sequentially @@ -69,3 +72,6 @@ def test_diar_inference(self, tmpdir, test_data_dir, device, num_speakers, max_n # assert only 1 speaker & segment assert len(annotation.labels()) == 1 assert len(list(annotation.itersegments())) == 1 + + # class TestSortformerDiarizerInference: + # TODO: This test can only be implemented once SortformerDiarizer model is uploaded. diff --git a/tests/collections/speaker_tasks/test_diar_sortformer_models.py b/tests/collections/speaker_tasks/test_diar_sortformer_models.py new file mode 100644 index 000000000000..41bd1537f16a --- /dev/null +++ b/tests/collections/speaker_tasks/test_diar_sortformer_models.py @@ -0,0 +1,168 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import torch +from omegaconf import DictConfig + +from nemo.collections.asr.models import SortformerEncLabelModel + + +@pytest.fixture() +def sortformer_model(): + + model = { + 'sample_rate': 16000, + 'pil_weight': 0.5, + 'ats_weight': 0.5, + 'max_num_of_spks': 4, + } + model_defaults = { + 'fc_d_model': 512, + 'tf_d_model': 192, + } + preprocessor = { + '_target_': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor', + 'normalize': 'per_feature', + 'window_size': 0.025, + 'sample_rate': 16000, + 'window_stride': 0.01, + 'window': 'hann', + 'features': 80, + 'n_fft': 512, + 'frame_splicing': 1, + 'dither': 0.00001, + } + + sortformer_modules = { + '_target_': 'nemo.collections.asr.modules.sortformer_modules.SortformerModules', + 'num_spks': model['max_num_of_spks'], + 'dropout_rate': 0.5, + 'fc_d_model': model_defaults['fc_d_model'], + 'tf_d_model': model_defaults['tf_d_model'], + } + + encoder = { + '_target_': 'nemo.collections.asr.modules.ConformerEncoder', + 'feat_in': preprocessor['features'], + 'feat_out': -1, + 'n_layers': 18, + 'd_model': model_defaults['fc_d_model'], + 'subsampling': 'dw_striding', + 'subsampling_factor': 8, + 'subsampling_conv_channels': 256, + 'causal_downsampling': False, + 'ff_expansion_factor': 4, + 'self_attention_model': 'rel_pos', + 'n_heads': 8, + 'att_context_size': [-1, -1], + 'att_context_style': 'regular', + 'xscaling': True, + 'untie_biases': True, + 'pos_emb_max_len': 5000, + 'conv_kernel_size': 9, + 'conv_norm_type': 'batch_norm', + 'conv_context_size': None, + 'dropout': 0.1, + 'dropout_pre_encoder': 0.1, + 'dropout_emb': 0.0, + 'dropout_att': 0.1, + 'stochastic_depth_drop_prob': 0.0, + 'stochastic_depth_mode': 'linear', + 'stochastic_depth_start_layer': 1, + } + + transformer_encoder = { + '_target_': 'nemo.collections.asr.modules.transformer.transformer_encoders.TransformerEncoder', + 'num_layers': 18, + 'hidden_size': model_defaults['tf_d_model'], + 'inner_size': 768, + 'num_attention_heads': 8, + 'attn_score_dropout': 0.5, + 'attn_layer_dropout': 0.5, + 'ffn_dropout': 0.5, + 'hidden_act': 'relu', + 'pre_ln': False, + 'pre_ln_final_layer_norm': True, + } + + loss = { + '_target_': 'nemo.collections.asr.losses.bce_loss.BCELoss', + 'weight': None, + 'reduction': 'mean', + } + + modelConfig = DictConfig( + { + 'sample_rate': 16000, + 'pil_weight': 0.5, + 'ats_weight': 0.5, + 'max_num_of_spks': 4, + 'model_defaults': DictConfig(model_defaults), + 'encoder': DictConfig(encoder), + 'transformer_encoder': DictConfig(transformer_encoder), + 'sortformer_modules': DictConfig(sortformer_modules), + 'preprocessor': DictConfig(preprocessor), + 'loss': DictConfig(loss), + 'optim': { + 'optimizer': 'Adam', + 'lr': 0.001, + 'betas': (0.9, 0.98), + }, + } + ) + model = SortformerEncLabelModel(cfg=modelConfig) + return model + + +class TestSortformerEncLabelModel: + @pytest.mark.unit + def test_constructor(self, sortformer_model): + sortformer_diar_model = sortformer_model.train() + confdict = sortformer_diar_model.to_config_dict() + instance2 = SortformerEncLabelModel.from_config_dict(confdict) + assert isinstance(instance2, SortformerEncLabelModel) + + @pytest.mark.unit + @pytest.mark.parametrize( + "batch_size, frame_length, sample_len", + [ + (4, 0.08, 16), # Example 1 + (2, 0.02, 32), # Example 2 + (1, 0.1, 20), # Example 3 + ], + ) + def test_forward_infer(self, sortformer_model, batch_size, frame_length, sample_len, num_spks=4): + sortformer_diar_model = sortformer_model.eval() + confdict = sortformer_diar_model.to_config_dict() + sampling_rate = confdict['preprocessor']['sample_rate'] + input_signal = torch.randn(size=(batch_size, sample_len * sampling_rate)) + input_signal_length = (sample_len * sampling_rate) * torch.ones(batch_size, dtype=torch.int) + + with torch.no_grad(): + # batch size 1 + preds_list = [] + for i in range(input_signal.size(0)): + preds = sortformer_diar_model.forward(input_signal[i : i + 1], input_signal_length[i : i + 1]) + preds_list.append(preds) + preds_instance = torch.cat(preds_list, 0) + + # batch size 4 + preds_batch = sortformer_diar_model.forward(input_signal, input_signal_length) + assert preds_instance.shape == preds_batch.shape + + diff = torch.mean(torch.abs(preds_instance - preds_batch)) + assert diff <= 1e-6 + diff = torch.max(torch.abs(preds_instance - preds_batch)) + assert diff <= 1e-6 diff --git a/tests/collections/asr/test_speaker_label_models.py b/tests/collections/speaker_tasks/test_speaker_label_models.py similarity index 95% rename from tests/collections/asr/test_speaker_label_models.py rename to tests/collections/speaker_tasks/test_speaker_label_models.py index 29b5c9eea643..81a051e32e66 100644 --- a/tests/collections/asr/test_speaker_label_models.py +++ b/tests/collections/speaker_tasks/test_speaker_label_models.py @@ -96,7 +96,11 @@ def test_ecapa_enc_dec(self): } modelConfig = DictConfig( - {'preprocessor': DictConfig(preprocessor), 'encoder': DictConfig(encoder), 'decoder': DictConfig(decoder),} + { + 'preprocessor': DictConfig(preprocessor), + 'encoder': DictConfig(encoder), + 'decoder': DictConfig(decoder), + } ) speaker_model = EncDecSpeakerLabelModel(cfg=modelConfig) speaker_model.train() @@ -142,7 +146,11 @@ def test_titanet_enc_dec(self): } modelConfig = DictConfig( - {'preprocessor': DictConfig(preprocessor), 'encoder': DictConfig(encoder), 'decoder': DictConfig(decoder),} + { + 'preprocessor': DictConfig(preprocessor), + 'encoder': DictConfig(encoder), + 'decoder': DictConfig(decoder), + } ) speaker_model = EncDecSpeakerLabelModel(cfg=modelConfig) speaker_model.train() diff --git a/tests/collections/speaker_tasks/utils/test_data_simul_utils.py b/tests/collections/speaker_tasks/utils/test_data_simul_utils.py new file mode 100644 index 000000000000..9a27820cdfa1 --- /dev/null +++ b/tests/collections/speaker_tasks/utils/test_data_simul_utils.py @@ -0,0 +1,549 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import numpy as np +import pytest +import torch +from omegaconf import DictConfig + +from nemo.collections.asr.parts.utils.data_simulation_utils import ( + DataAnnotator, + SpeechSampler, + add_silence_to_alignments, + binary_search_alignments, + get_cleaned_base_path, + get_split_points_in_alignments, + normalize_audio, + read_noise_manifest, +) +from nemo.collections.asr.parts.utils.manifest_utils import get_ctm_line + + +@pytest.fixture() +def annotator(): + cfg = get_data_simulation_configs() + return DataAnnotator(cfg) + + +@pytest.fixture() +def sampler(): + cfg = get_data_simulation_configs() + sampler = SpeechSampler(cfg) + # Must get session-wise randomized silence/overlap mean + sampler.get_session_overlap_mean() + sampler.get_session_silence_mean() + return sampler + + +def get_data_simulation_configs(): + config_dict = { + 'data_simulator': { + 'manifest_filepath': '???', + 'sr': 16000, + 'random_seed': 42, + 'multiprocessing_chunksize': 10000, + 'session_config': {'num_speakers': 4, 'num_sessions': 60, 'session_length': 600}, + 'session_params': { + 'max_audio_read_sec': 20, + 'sentence_length_params': [0.4, 0.05], + 'dominance_var': 0.11, + 'min_dominance': 0.05, + 'turn_prob': 0.875, + 'min_turn_prob': 0.5, + 'mean_silence': 0.15, + 'mean_silence_var': 0.01, + 'per_silence_var': 900, + 'per_silence_min': 0.0, + 'per_silence_max': -1, + 'mean_overlap': 0.1, + 'mean_overlap_var': 0.01, + 'per_overlap_var': 900, + 'per_overlap_min': 0.0, + 'per_overlap_max': -1, + 'start_window': True, + 'window_type': 'hamming', + 'window_size': 0.05, + 'start_buffer': 0.1, + 'split_buffer': 0.1, + 'release_buffer': 0.1, + 'normalize': True, + 'normalization_type': 'equal', + 'normalization_var': 0.1, + 'min_volume': 0.75, + 'max_volume': 1.25, + 'end_buffer': 0.5, + }, + 'outputs': { + 'output_dir': '???', + 'output_filename': 'multispeaker_session', + 'overwrite_output': True, + 'output_precision': 3, + }, + 'background_noise': { + 'add_bg': False, + 'background_manifest': None, + 'num_noise_files': 10, + 'snr': 60, + 'snr_min': None, + }, + 'segment_augmentor': { + 'add_seg_aug': False, + 'augmentor': { + 'gain': {'prob': 0.5, 'min_gain_dbfs': -10.0, 'max_gain_dbfs': 10.0}, + }, + }, + 'session_augmentor': { + 'add_sess_aug': False, + 'augmentor': { + 'white_noise': {'prob': 1.0, 'min_level': -90, 'max_level': -46}, + }, + }, + 'speaker_enforcement': {'enforce_num_speakers': True, 'enforce_time': [0.25, 0.75]}, + 'segment_manifest': {'window': 0.5, 'shift': 0.25, 'step_count': 50, 'deci': 3}, + } + } + return DictConfig(config_dict) + + +def generate_words_and_alignments(sample_index): + if sample_index == 0: + words = ['', 'hello', 'world'] + alignments = [0.5, 1.0, 1.5] + elif sample_index == 1: + words = ["", "stephanos", "dedalos", ""] + alignments = [0.51, 1.31, 2.04, 2.215] + elif sample_index == 2: + words = ['', 'hello', 'world', '', 'welcome', 'to', 'nemo', ''] + alignments = [0.5, 1.0, 1.5, 1.7, 1.8, 2.2, 2.7, 2.8] + else: + raise ValueError(f"sample_index {sample_index} not supported") + speaker_id = 'speaker_0' + return words, alignments, speaker_id + + +class TestGetCtmLine: + @pytest.mark.unit + @pytest.mark.parametrize("conf", [0, 1]) + def test_wrong_type_conf_values(self, conf): + # Test with wrong integer confidence values + with pytest.raises(ValueError): + result = get_ctm_line( + source="test_source", + channel=1, + start_time=0.123, + duration=0.456, + token="word", + conf=conf, + type_of_token="lex", + speaker="speaker1", + ) + expected = f"test_source 1 0.12 0.46 word {conf} lex speaker1\n" + assert result == expected, f"Failed on valid conf value {conf}" + + @pytest.mark.unit + @pytest.mark.parametrize("conf", [0.0, 0.5, 1.0, 0.01, 0.99]) + def test_valid_conf_values(self, conf): + # Test with valid confidence values + output_precision = 2 + result = get_ctm_line( + source="test_source", + channel=1, + start_time=0.123, + duration=0.456, + token="word", + conf=conf, + type_of_token="lex", + speaker="speaker1", + output_precision=output_precision, + ) + expected = "test_source 1 0.12 0.46 word" + f" {conf:.{output_precision}f} lex speaker1\n" + assert result == expected, f"Failed on valid conf value {conf}" + + @pytest.mark.unit + @pytest.mark.parametrize("conf", [-0.1, 1.1, 2, -1, 100, -100]) + def test_invalid_conf_ranges(self, conf): + # Test with invalid confidence values + with pytest.raises(ValueError): + get_ctm_line( + source="test_source", + channel=1, + start_time=0.123, + duration=0.456, + token="word", + conf=conf, + type_of_token="lex", + speaker="speaker1", + ) + + @pytest.mark.unit + @pytest.mark.parametrize( + "start_time, duration, output_precision", + [(0.123, 0.456, 2), (1.0, 2.0, 1), (0.0, 0.0, 2), (0.01, 0.99, 3), (1.23, 4.56, 2)], + ) + def test_valid_start_time_duration_with_precision(self, start_time, duration, output_precision): + # Test with valid beginning time, duration values and output precision + confidence = 0.5 + result = get_ctm_line( + source="test_source", + channel=1, + start_time=start_time, + duration=duration, + token="word", + conf=confidence, + type_of_token="lex", + speaker="speaker1", + output_precision=output_precision, + ) + expected_start_time = ( + f"{start_time:.{output_precision}f}" # Adjusted to match the output format with precision + ) + expected_duration = f"{duration:.{output_precision}f}" # Adjusted to match the output format with precision + expected_confidence = ( + f"{confidence:.{output_precision}f}" # Adjusted to match the output format with precision + ) + expected = f"test_source 1 {expected_start_time} {expected_duration} word {expected_confidence} lex speaker1\n" + assert ( + result == expected + ), f"Failed on valid start_time {start_time}, duration {duration} with precision {output_precision}" + + @pytest.mark.unit + def test_valid_input(self): + # Test with completely valid inputs + result = get_ctm_line( + source="test_source", + channel=1, + start_time=0.123, + duration=0.456, + token="word", + conf=0.789, + type_of_token="lex", + speaker="speaker1", + ) + expected = "test_source 1 0.12 0.46 word 0.79 lex speaker1\n" + assert result == expected, "Failed on valid input" + + @pytest.mark.unit + @pytest.mark.parametrize( + "start_time, duration", + [ + ("not a float", 1.0), + (1.0, "not a float"), + (1, 2.0), # Integers should be converted to float + (2.0, 3), # Same as above + ], + ) + def test_invalid_types_for_time_duration(self, start_time, duration): + # Test with invalid types for start_time and duration + with pytest.raises(ValueError): + get_ctm_line( + source="test_source", + channel=1, + start_time=start_time, + duration=duration, + token="word", + conf=0.5, + type_of_token="lex", + speaker="speaker1", + ) + + @pytest.mark.unit + @pytest.mark.parametrize("conf", [-0.1, 1.1, "not a float"]) + def test_invalid_conf_values(self, conf): + # Test with invalid values for conf + with pytest.raises(ValueError): + get_ctm_line( + source="test_source", + channel=1, + start_time=0.123, + duration=0.456, + token="word", + conf=conf, + type_of_token="lex", + speaker="speaker1", + ) + + @pytest.mark.unit + def test_default_values(self): + # Test with missing optional parameters + result = get_ctm_line( + source="test_source", + channel=None, + start_time=0.123, + duration=0.456, + token="word", + conf=None, + type_of_token=None, + speaker=None, + ) + expected = "test_source 1 0.12 0.46 word NA unknown NA\n" + assert result == expected, "Failed on default values" + + +class TestDataSimulatorUtils: + # TODO: add tests for all util functions + @pytest.mark.parametrize("max_audio_read_sec", [2.5, 3.5, 4.5]) + @pytest.mark.parametrize("min_alignment_count", [2, 3, 4]) + def test_binary_search_alignments(self, max_audio_read_sec, min_alignment_count): + inds = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14] + alignments = [0.5, 11.0, 11.5, 12.0, 13.0, 14.0, 14.5, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 30, 40.0] + offset_max = binary_search_alignments(inds, max_audio_read_sec, min_alignment_count, alignments) + assert max_audio_read_sec <= alignments[-1 * min_alignment_count] - alignments[inds[offset_max]] + + @pytest.mark.parametrize("sample_len", [100, 16000]) + @pytest.mark.parametrize("gain", [0.1, 0.5, 1.0, 2.0, 5.0]) + def test_normalize_audio(self, sample_len, gain): + array_raw = np.random.randn(sample_len) + array_input = torch.from_numpy(gain * array_raw / np.max(np.abs(array_raw))) + norm_array = normalize_audio(array_input) + assert torch.max(torch.abs(norm_array)) == 1.0 + assert torch.min(torch.abs(norm_array)) < 1.0 + + @pytest.mark.parametrize("output_dir", [os.path.join(os.getcwd(), "test_dir")]) + def test_get_cleaned_base_path(self, output_dir): + result_path = get_cleaned_base_path(output_dir, overwrite_output=True) + assert os.path.exists(result_path) and not os.path.isfile(result_path) + result_path = get_cleaned_base_path(output_dir, overwrite_output=False) + assert os.path.exists(result_path) and not os.path.isfile(result_path) + os.rmdir(result_path) + assert not os.path.exists(result_path) + + @pytest.mark.parametrize( + "words, alignments, answers", + [ + (['', 'hello', 'world'], [0.5, 1.0, 1.5], [[0, 16000.0]]), + ( + ['', 'hello', 'world', '', 'welcome', 'to', 'nemo', ''], + [0.27, 1.0, 1.7, 2.7, 2.8, 3.2, 3.7, 3.9], + [[0, (1.7 + 0.5) * 16000], [(2.7 - 0.5) * 16000, (3.9 - 0.27) * 16000]], + ), + ], + ) + @pytest.mark.parametrize("sr", [16000]) + @pytest.mark.parametrize("split_buffer", [0.5]) + @pytest.mark.parametrize("new_start", [0.0]) + def test_get_split_points_in_alignments(self, words, alignments, sr, new_start, split_buffer, answers): + sentence_audio_len = sr * (alignments[-1] - alignments[0]) + splits = get_split_points_in_alignments(words, alignments, split_buffer, sr, sentence_audio_len, new_start) + assert len(splits) == len(answers) + for k, interval in enumerate(splits): + assert abs(answers[k][0] - interval[0]) < 1e-4 + assert abs(answers[k][1] - interval[1]) < 1e-4 + + @pytest.mark.parametrize( + "alignments, words", [(['hello', 'world'], [1.0, 1.5]), (['', 'hello', 'world'], [0.0, 1.0, 1.5])] + ) + def test_add_silence_to_alignments(self, alignments, words): + """ + Test add_silence_to_alignments function. + """ + audio_manifest = { + 'audio_filepath': 'test.wav', + 'alignments': alignments, + 'words': words, + } + audio_manifest = add_silence_to_alignments(audio_manifest) + if words[0] == '': + assert audio_manifest['alignments'] == [0.0] + alignments + assert audio_manifest['words'] == [''] + words + else: + assert audio_manifest['alignments'] == alignments + assert audio_manifest['words'] == words + + +class TestDataAnnotator: + def test_init(self, annotator): + assert isinstance(annotator, DataAnnotator) + + def test_create_new_rttm_entry(self, annotator): + words, alignments, speaker_id = generate_words_and_alignments(sample_index=0) + start, end = alignments[0], alignments[-1] + rttm_list = annotator.create_new_rttm_entry( + words=words, alignments=alignments, start=start, end=end, speaker_id=speaker_id + ) + assert rttm_list[0] == f"{start} {end} {speaker_id}" + + def test_create_new_json_entry(self, annotator): + words, alignments, speaker_id = generate_words_and_alignments(sample_index=0) + start, end = alignments[0], alignments[-1] + test_wav_filename = '/path/to/test_wav_filename.wav' + test_rttm_filename = '/path/to/test_rttm_filename.rttm' + test_ctm_filename = '/path/to/test_ctm_filename.ctm' + text = " ".join(words) + + one_line_json_dict = annotator.create_new_json_entry( + text=text, + wav_filename=test_wav_filename, + start=start, + length=end - start, + speaker_id=speaker_id, + rttm_filepath=test_rttm_filename, + ctm_filepath=test_ctm_filename, + ) + start = round(float(start), annotator._params.data_simulator.outputs.output_precision) + length = round(float(end - start), annotator._params.data_simulator.outputs.output_precision) + meta = { + "audio_filepath": test_wav_filename, + "offset": start, + "duration": length, + "label": speaker_id, + "text": text, + "num_speakers": annotator._params.data_simulator.session_config.num_speakers, + "rttm_filepath": test_rttm_filename, + "ctm_filepath": test_ctm_filename, + "uem_filepath": None, + } + assert one_line_json_dict == meta + + def test_create_new_ctm_entry(self, annotator): + words, alignments, speaker_id = generate_words_and_alignments(sample_index=0) + session_name = 'test_session' + ctm_list = annotator.create_new_ctm_entry( + words=words, alignments=alignments, session_name=session_name, speaker_id=speaker_id, start=alignments[0] + ) + assert ctm_list[0] == ( + alignments[1], + get_ctm_line( + source=session_name, + channel="1", + start_time=alignments[1], + duration=float(alignments[1] - alignments[0]), + token=words[1], + conf=None, + type_of_token='lex', + speaker=speaker_id, + ), + ) + assert ctm_list[1] == ( + alignments[2], + get_ctm_line( + source=session_name, + channel="1", + start_time=alignments[2], + duration=float(alignments[2] - alignments[1]), + token=words[2], + conf=None, + type_of_token='lex', + speaker=speaker_id, + ), + ) + + +class TestSpeechSampler: + def test_init(self, sampler): + assert isinstance(sampler, SpeechSampler) + + def test_init_overlap_params(self, sampler): + sampler._init_overlap_params() + assert sampler.per_silence_min_len is not None + assert sampler.per_silence_max_len is not None + assert type(sampler.per_silence_min_len) == int + assert type(sampler.per_silence_max_len) == int + + def test_init_silence_params(self, sampler): + sampler._init_overlap_params() + assert sampler.per_overlap_min_len is not None + assert sampler.per_overlap_max_len is not None + assert type(sampler.per_overlap_min_len) == int + assert type(sampler.per_overlap_max_len) == int + + @pytest.mark.parametrize("mean", [0.1, 0.2, 0.3]) + @pytest.mark.parametrize("var", [0.05, 0.07]) + def test_get_session_silence_mean_pass(self, sampler, mean, var): + sampler.mean_silence = mean + sampler.mean_silence_var = var + sampled_silence_mean = sampler.get_session_silence_mean() + assert 0 <= sampled_silence_mean <= 1 + + @pytest.mark.parametrize("mean", [0.5]) + @pytest.mark.parametrize("var", [0.5, 0.6]) + def test_get_session_silence_mean_fail(self, sampler, mean, var): + """ + This test should raise `ValueError` because `mean_silence_var` + should be less than `mean_silence * (1 - mean_silence)`. + """ + sampler.mean_silence = mean + sampler.mean_silence_var = var + with pytest.raises(ValueError) as execinfo: + sampler.get_session_silence_mean() + assert "ValueError" in str(execinfo) and "mean_silence_var" in str(execinfo) + + @pytest.mark.parametrize("mean", [0.1, 0.2, 0.3]) + @pytest.mark.parametrize("var", [0.05, 0.07]) + def test_get_session_overlap_mean_pass(self, sampler, mean, var): + sampler.mean_overlap = mean + sampler.mean_overlap_var = var + sampled_overlap_mean = sampler.get_session_overlap_mean() + assert 0 <= sampled_overlap_mean <= 1 + + @pytest.mark.parametrize("mean", [0.4, 0.5]) + @pytest.mark.parametrize("var", [0.3, 0.8]) + def test_get_session_overlap_mean_fail(self, sampler, mean, var): + """ + This test should raise `ValueError` because `mean_overlap_var` + should be less than `mean_overlap * (1 - mean_overlap)`. + """ + sampler.mean_overlap = mean + sampler.mean_overlap_var = var + sampler._params = DictConfig(sampler._params) + with pytest.raises(ValueError) as execinfo: + sampler.get_session_overlap_mean() + assert "ValueError" in str(execinfo) and "mean_overlap_var" in str(execinfo) + + @pytest.mark.parametrize("non_silence_len_samples", [16000, 32000]) + @pytest.mark.parametrize("running_overlap_len_samples", [8000, 12000]) + def test_sample_from_overlap_model(self, sampler, non_silence_len_samples, running_overlap_len_samples): + sampler.get_session_overlap_mean() + sampler.running_overlap_len_samples = running_overlap_len_samples + overlap_amount = sampler.sample_from_overlap_model(non_silence_len_samples=non_silence_len_samples) + assert type(overlap_amount) == int + assert 0 <= overlap_amount + + @pytest.mark.parametrize("running_len_samples", [8000, 16000]) + @pytest.mark.parametrize("running_overlap_len_samples", [8000, 12000]) + def test_sample_from_silence_model(self, sampler, running_len_samples, running_overlap_len_samples): + sampler.get_session_silence_mean() + self.running_overlap_len_samples = running_overlap_len_samples + silence_amount = sampler.sample_from_silence_model(running_len_samples=running_len_samples) + assert type(silence_amount) == int + assert 0 <= silence_amount + + @pytest.mark.with_downloads() + @pytest.mark.parametrize("num_noise_files", [1, 2, 4]) + def test_sample_noise_manifest(self, sampler, num_noise_files, test_data_dir): + sampler.num_noise_files = num_noise_files + manifest_path = os.path.abspath(os.path.join(test_data_dir, 'asr/an4_val.json')) + noise_manifest = read_noise_manifest(add_bg=True, background_manifest=manifest_path) + sampled_noise_manifests = sampler.sample_noise_manifest(noise_manifest=noise_manifest) + assert len(sampled_noise_manifests) == num_noise_files + + @pytest.mark.parametrize("running_speech_len_samples", [32000, 64000]) + @pytest.mark.parametrize("running_overlap_len_samples", [16000, 32000]) + @pytest.mark.parametrize("running_len_samples", [64000, 96000]) + @pytest.mark.parametrize("non_silence_len_samples", [16000, 32000]) + def test_silence_vs_overlap_selector( + self, + sampler, + running_overlap_len_samples, + running_speech_len_samples, + running_len_samples, + non_silence_len_samples, + ): + sampler.running_overlap_len_samples = running_overlap_len_samples + sampler.running_speech_len_samples = running_speech_len_samples + add_overlap = sampler.silence_vs_overlap_selector( + running_len_samples=running_len_samples, non_silence_len_samples=non_silence_len_samples + ) + assert type(add_overlap) == bool diff --git a/tests/collections/asr/test_diar_utils.py b/tests/collections/speaker_tasks/utils/test_diar_utils.py similarity index 92% rename from tests/collections/asr/test_diar_utils.py rename to tests/collections/speaker_tasks/utils/test_diar_utils.py index cb364675fcf4..71ae2dc16d8e 100644 --- a/tests/collections/asr/test_diar_utils.py +++ b/tests/collections/speaker_tasks/utils/test_diar_utils.py @@ -13,7 +13,6 @@ # limitations under the License. import os - import numpy as np import pytest import torch @@ -48,6 +47,7 @@ get_online_subsegments_from_buffer, get_speech_labels_for_update, get_sub_range_list, + get_subsegments, get_subsegments_scriptable, get_target_sig, int2fl, @@ -115,6 +115,10 @@ def generate_toy_data( emb = emb_cent.tile((len(segments), 1)) + 0.1 * torch.rand(len(segments), emb_dim) seg_list.extend(segments) emb_list.append(emb) + if emb.shape[0] == 0: + import ipdb + + ipdb.set_trace() multiscale_segment_counts[scale_idx] += emb.shape[0] if scale_idx == len(multiscale_segment_counts) - 1: @@ -377,6 +381,109 @@ def test_online_speaker_clustering_instance_export(self): isinstance(offline_speaker_clustering, torch.jit._script.RecursiveScriptClass) +class TestGetSubsegments: + @pytest.mark.unit + @pytest.mark.parametrize( + "offset, window, shift, duration, min_subsegment_duration, decimals, use_asr_style_frame_count, sample_rate, feat_per_sec, expected", + [ + (12.05, 1.5, 0.75, 2.4, 0.01, 2, False, 16000, 100, [[12.05, 1.5], [12.8, 1.5], [13.55, 0.9]]), + (0, 1.0, 0.5, 0.4, 0.01, 2, False, 16000, 100, [[0, 0.4]]), + (0, 2.0, 1.0, 1.5, 0.5, 2, False, 16000, 100, [[0, 1.5]]), + ( + 10, + 1.5, + 0.75, + 4.5, + 0.5, + 2, + False, + 16000, + 100, + [[10, 1.5], [10.75, 1.5], [11.5, 1.5], [12.25, 1.5], [13.0, 1.5]], + ), + (0, 1.5, 0.5, 0.3, 0.01, 2, True, 16000, 100, [[0, 0.3]]), + ], + ) + def test_get_subsegments( + self, + offset, + window, + shift, + duration, + min_subsegment_duration, + decimals, + use_asr_style_frame_count, + sample_rate, + feat_per_sec, + expected, + ): + + for is_scriptable in [True, False]: + if is_scriptable: + result = get_subsegments_scriptable( + offset=offset, + window=window, + shift=shift, + duration=duration, + ) + else: + result = get_subsegments( + offset=offset, + window=window, + shift=shift, + duration=duration, + min_subsegment_duration=min_subsegment_duration, + decimals=decimals, + use_asr_style_frame_count=use_asr_style_frame_count, + sample_rate=sample_rate, + feat_per_sec=feat_per_sec, + ) + result_round = [] + for subsegment in result: + result_round.append([round(x, decimals) for x in subsegment]) + assert result_round == expected + + @pytest.mark.unit + def test_min_subsegment_duration_filtering(self): + result = get_subsegments( + offset=0, + window=1.5, + shift=0.5, + duration=3, + min_subsegment_duration=2.0, + decimals=2, + use_asr_style_frame_count=False, + ) + expected = [] # Only subsegments meeting the duration filter should remain + assert result == expected + + @pytest.mark.unit + def test_zero_duration(self): + result = get_subsegments( + offset=0, + window=1.0, + shift=0.5, + duration=0, + min_subsegment_duration=0.01, + decimals=2, + use_asr_style_frame_count=False, + ) + assert result == [] + + @pytest.mark.unit + def test_edge_case_short_slice(self): + result = get_subsegments( + offset=0, + window=0.5, + shift=0.25, # Shift larger than duration + duration=0.25, + min_subsegment_duration=0.01, + decimals=2, + use_asr_style_frame_count=False, + ) + assert result == [[0.0, 0.25]] + + class TestDiarizationSegmentationUtils: """ Test segmentation util functions diff --git a/tests/collections/speaker_tasks/utils/test_multispeaker_utils.py b/tests/collections/speaker_tasks/utils/test_multispeaker_utils.py new file mode 100644 index 000000000000..2e01cf4b94da --- /dev/null +++ b/tests/collections/speaker_tasks/utils/test_multispeaker_utils.py @@ -0,0 +1,352 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import itertools +import pytest +import torch + +from nemo.collections.asr.parts.utils.asr_multispeaker_utils import ( + find_best_permutation, + find_first_nonzero, + get_ats_targets, + get_hidden_length_from_sample_length, + get_pil_targets, + reconstruct_labels, +) + + +def reconstruct_labels_forloop(labels: torch.Tensor, batch_perm_inds: torch.Tensor) -> torch.Tensor: + """ + This is a for-loop implementation of reconstruct_labels built for testing purposes. + """ + # Expanding batch_perm_inds to align with labels dimensions + batch_size, num_frames, num_speakers = labels.shape + batch_perm_inds_exp = batch_perm_inds.unsqueeze(1).expand(-1, num_frames, -1) + + # Reconstructing the labels using advanced indexing + reconstructed_labels = torch.gather(labels, 2, batch_perm_inds_exp) + return reconstructed_labels + + +class TestSortingUtils: + @pytest.mark.unit + @pytest.mark.parametrize( + "mat, max_cap_val, thres, expected", + [ + # Test 1: Basic case with clear first nonzero values + (torch.tensor([[0.1, 0.6, 0.0], [0.0, 0.0, 0.9]]), -1, 0.5, torch.tensor([1, 2])), + # Test 2: All elements are below threshold + (torch.tensor([[0.1, 0.2], [0.3, 0.4]]), -1, 0.5, torch.tensor([-1, -1])), + # Test 3: No nonzero elements, should return max_cap_val (-1) + (torch.tensor([[0.0, 0.0], [0.0, 0.0]]), -1, 0.5, torch.tensor([-1, -1])), + # Test 4: Large matrix with mixed values, some rows with all values below threshold + (torch.tensor([[0.1, 0.7, 0.3], [0.0, 0.0, 0.9], [0.5, 0.6, 0.7]]), -1, 0.5, torch.tensor([1, 2, 0])), + # Test 5: Single row matrix + (torch.tensor([[0.0, 0.0, 0.6]]), -1, 0.5, torch.tensor([2])), + # Test 6: Single column matrix + (torch.tensor([[0.1], [0.6], [0.0]]), -1, 0.5, torch.tensor([-1, 0, -1])), + # Test 7: One element matrix + (torch.tensor([[0.501]]), -1, 0.5, torch.tensor([0], dtype=torch.long)), + # Test 8: All values are zero, should return max_cap_val + (torch.tensor([[0.0, 0.0], [0.0, 0.0]]), -1, 0.5, torch.tensor([-1, -1])), + # Test 9: All values are above threshold + (torch.tensor([[0.6, 0.7], [0.8, 0.9]]), -1, 0.5, torch.tensor([0, 0])), + # Test 10: Custom max_cap_val different from default + (torch.tensor([[0.0, 0.0], [0.0, 0.0]]), 99, 0.5, torch.tensor([99, 99])), + # Test 11: Matrix with 101 columns, first nonzero value is towards the end + (torch.cat([torch.zeros(1, 100), torch.ones(1, 1)], dim=1), -1, 0.5, torch.tensor([100])), + # Test 12: Matrix with 1000 columns, all below threshold except one near the middle + ( + torch.cat([torch.zeros(1, 499), torch.tensor([[0.6]]), torch.zeros(1, 500)], dim=1), + -1, + 0.5, + torch.tensor([499]), + ), + ], + ) + def test_find_first_nonzero(self, mat, max_cap_val, thres, expected): + result = find_first_nonzero(mat, max_cap_val, thres) + assert torch.equal(result, expected), f"Expected {expected} but got {result}" + + @pytest.mark.unit + @pytest.mark.parametrize( + "match_score, speaker_permutations, expected", + [ + # Test 1: Simple case with batch size 1, clear best match + ( + torch.tensor([[0.1, 0.9, 0.2]]), # match_score (batch_size=1, num_permutations=3) + torch.tensor([[0, 1], [1, 0], [0, 1]]), # speaker_permutations (num_permutations=3, num_speakers=2) + torch.tensor([[1, 0]]), # expected best permutation for the batch + ), + # Test 2: Batch size 2, different best matches for each batch + ( + torch.tensor([[0.5, 0.3, 0.7], [0.2, 0.6, 0.4]]), # match_score (batch_size=2, num_permutations=3) + torch.tensor([[0, 1], [1, 0], [0, 1]]), # speaker_permutations + torch.tensor([[0, 1], [1, 0]]), # expected best permutations + ), + # Test 3: Larger number of speakers and permutations + ( + torch.tensor( + [[0.1, 0.4, 0.9, 0.5], [0.6, 0.3, 0.7, 0.2]] + ), # match_score (batch_size=2, num_permutations=4) + torch.tensor( + [[0, 1, 2], [1, 0, 2], [2, 1, 0], [1, 2, 0]] + ), # speaker_permutations (num_permutations=4, num_speakers=3) + torch.tensor([[2, 1, 0], [2, 1, 0]]), # expected best permutations + ), + # Test 4: All match scores are the same, should pick the first permutation (argmax behavior) + ( + torch.tensor([[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]]), # equal match_score across permutations + torch.tensor([[0, 1], [1, 0], [0, 1]]), # speaker_permutations + torch.tensor([[0, 1], [0, 1]]), # first permutation is chosen as tie-breaker + ), + # Test 5: Single speaker case (num_speakers = 1) + ( + torch.tensor([[0.8, 0.2]]), # match_score (batch_size=1, num_permutations=2) + torch.tensor([[0], [0]]), # speaker_permutations (num_permutations=2, num_speakers=1) + torch.tensor([[0]]), # expected best permutation + ), + # Test 6: Batch size 3, varying permutations + ( + torch.tensor([[0.3, 0.6], [0.4, 0.1], [0.2, 0.7]]), # match_score (batch_size=3, num_permutations=2) + torch.tensor([[0, 1], [1, 0]]), # speaker_permutations + torch.tensor([[1, 0], [0, 1], [1, 0]]), # expected best permutations for each batch + ), + ], + ) + def test_find_best_permutation(self, match_score, speaker_permutations, expected): + result = find_best_permutation(match_score, speaker_permutations) + assert torch.equal(result, expected), f"Expected {expected} but got {result}" + + @pytest.mark.parametrize( + "batch_size, num_frames, num_speakers", + [ + (2, 4, 3), # Original test case + (3, 5, 2), # More frames and speakers + (1, 6, 4), # Single batch with more frames and speakers + (5, 3, 5), # More batch size with equal frames and speakers + ], + ) + def test_reconstruct_labels_with_forloop_ver(self, batch_size, num_frames, num_speakers): + # Generate random labels and batch_perm_inds tensor for testing + labels = torch.rand(batch_size, num_frames, num_speakers) + batch_perm_inds = torch.stack([torch.randperm(num_speakers) for _ in range(batch_size)]) + + # Call both functions + result_matrix = reconstruct_labels(labels, batch_perm_inds) + result_forloop = reconstruct_labels_forloop(labels, batch_perm_inds) + + # Assert that both methods return the same result + assert torch.allclose(result_matrix, result_forloop), "The results are not equal!" + + @pytest.mark.parametrize( + "labels, batch_perm_inds, expected_output", + [ + # Example 1: Small batch size with a few frames and speakers + ( + torch.tensor( + [ + [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9]], # First batch + [[0.9, 0.8, 0.7], [0.6, 0.5, 0.4], [0.3, 0.2, 0.1]], # Second batch + ] + ), + torch.tensor([[2, 0, 1], [1, 2, 0]]), + torch.tensor( + [ + [[0.3, 0.1, 0.2], [0.6, 0.4, 0.5], [0.9, 0.7, 0.8]], # First batch reconstructed + [[0.8, 0.7, 0.9], [0.5, 0.4, 0.6], [0.2, 0.1, 0.3]], # Second batch reconstructed + ] + ), + ), + # Example 2: batch_size = 1 with more frames and speakers + ( + torch.tensor( + [[[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8], [0.9, 1.0, 1.1, 1.2], [1.3, 1.4, 1.5, 1.6]]] + ), + torch.tensor([[3, 0, 1, 2]]), + torch.tensor( + [[[0.4, 0.1, 0.2, 0.3], [0.8, 0.5, 0.6, 0.7], [1.2, 0.9, 1.0, 1.1], [1.6, 1.3, 1.4, 1.5]]] + ), + ), + # Example 3: Larger batch size with fewer frames and speakers + ( + torch.tensor( + [ + [[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]], # First batch + [[0.7, 0.8], [0.9, 1.0], [1.1, 1.2]], # Second batch + [[1.3, 1.4], [1.5, 1.6], [1.7, 1.8]], # Third batch + [[1.9, 2.0], [2.1, 2.2], [2.3, 2.4]], # Fourth batch + ] + ), + torch.tensor([[1, 0], [0, 1], [1, 0], [0, 1]]), + torch.tensor( + [ + [[0.2, 0.1], [0.4, 0.3], [0.6, 0.5]], # First batch reconstructed + [[0.7, 0.8], [0.9, 1.0], [1.1, 1.2]], # Second batch unchanged + [[1.4, 1.3], [1.6, 1.5], [1.8, 1.7]], # Third batch reconstructed + [[1.9, 2.0], [2.1, 2.2], [2.3, 2.4]], # Fourth batch unchanged + ] + ), + ), + ], + ) + def test_reconstruct_labels(self, labels, batch_perm_inds, expected_output): + # Call the reconstruct_labels function + result = reconstruct_labels(labels, batch_perm_inds) + # Assert that the result matches the expected output + assert torch.allclose(result, expected_output), f"Expected {expected_output}, but got {result}" + + +class TestTargetGenerators: + + @pytest.mark.parametrize( + "labels, preds, num_speakers, expected_output", + [ + # Test 1: Basic case with simple permutations + ( + torch.tensor( + [ + [[0.9, 0.1, 0.0], [0.1, 0.8, 0.0], [0.0, 0.1, 0.9]], # Batch 1 + [[0.0, 0.0, 0.9], [0.0, 0.9, 0.1], [0.9, 0.1, 0.0]], # Batch 2 + ] + ), + torch.tensor( + [ + [[0.8, 0.2, 0.0], [0.2, 0.7, 0.0], [0.0, 0.1, 0.9]], # Batch 1 + [[0.0, 0.0, 0.8], [0.0, 0.8, 0.2], [0.9, 0.1, 0.0]], # Batch 2 + ] + ), + 3, # Number of speakers + torch.tensor( + [ + [[0.9, 0.1, 0.0], [0.1, 0.8, 0.0], [0.0, 0.1, 0.9]], # Expected labels for Batch 1 + [[0.9, 0.0, 0.0], [0.1, 0.9, 0.0], [0.0, 0.1, 0.9]], # Expected labels for Batch 2 + ] + ), + ), + # Test 2: Ambiguous case + ( + torch.tensor([[[0.9, 0.8, 0.7], [0.2, 0.8, 0.7], [0.2, 0.3, 0.9]]]), # Labels + torch.tensor([[[0.6, 0.7, 0.2], [0.9, 0.4, 0.0], [0.1, 0.7, 0.1]]]), # Preds + 3, # Number of speakers + torch.tensor([[[0.8, 0.7, 0.9], [0.8, 0.7, 0.2], [0.3, 0.9, 0.2]]]), # Expected output + ), + # Test 3: Ambiguous case + ( + torch.tensor([[[0, 0, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]]]), # Labels + torch.tensor( + [[[0.6, 0.6, 0.1, 0.9], [0.7, 0.7, 0.2, 0.8], [0.4, 0.6, 0.2, 0.7], [0.1, 0.1, 0.1, 0.7]]] + ), # Preds + 4, # Number of speakers + torch.tensor([[[1, 1, 0, 0], [1, 1, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0]]]), # Expected output + ), + ], + ) + def test_get_ats_targets(self, labels, preds, num_speakers, expected_output): + # Generate all permutations for the given number of speakers + speaker_inds = list(range(num_speakers)) + speaker_permutations = torch.tensor(list(itertools.permutations(speaker_inds))) + + # Call the function under test + result = get_ats_targets(labels, preds, speaker_permutations) + # Assert that the result matches the expected output + assert torch.allclose(result, expected_output), f"Expected {expected_output}, but got {result}" + + @pytest.mark.unit + @pytest.mark.parametrize( + "labels, preds, num_speakers, expected_output", + [ + # Test 1: Basic case with simple permutations + ( + torch.tensor( + [[[1, 0], [0, 1]], [[1, 0], [0, 1]]] + ), # Labels (batch_size=2, num_speakers=2, num_classes=2) + torch.tensor( + [[[1, 0], [0, 1]], [[0, 1], [1, 0]]] + ), # Preds (batch_size=2, num_speakers=2, num_classes=2) + 2, # Number of speakers + torch.tensor([[[1, 0], [0, 1]], [[0, 1], [1, 0]]]), # expected max_score_permed_labels + ), + # Test 2: Batch size 1 with more complex permutations + ( + torch.tensor([[[0.8, 0.2], [0.3, 0.7]]]), # Labels + torch.tensor([[[0.9, 0.1], [0.2, 0.8]]]), # Preds + 2, # Number of speakers + torch.tensor( + [[[0.8, 0.2], [0.3, 0.7]]] + ), # expected output (labels remain the same as preds are close) + ), + # Test 3: Ambiguous case + ( + torch.tensor([[[0, 0, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]]]), # Labels + torch.tensor( + [[[0.61, 0.6, 0.1, 0.9], [0.7, 0.7, 0.2, 0.8], [0.4, 0.6, 0.2, 0.7], [0.1, 0.1, 0.1, 0.7]]] + ), # Preds + 4, # Number of speakers + torch.tensor([[[1, 0, 0, 1], [1, 0, 0, 1], [0, 0, 0, 1], [0, 0, 0, 0]]]), # Expected output + ), + ], + ) + def test_get_pil_targets(self, labels, preds, num_speakers, expected_output): + # Generate all permutations for the given number of speakers + speaker_inds = list(range(num_speakers)) + speaker_permutations = torch.tensor(list(itertools.permutations(speaker_inds))) + + result = get_pil_targets(labels, preds, speaker_permutations) + assert torch.equal(result, expected_output), f"Expected {expected_output} but got {result}" + + +class TestGetHiddenLengthFromSampleLength: + @pytest.mark.parametrize( + "num_samples, num_sample_per_mel_frame, num_mel_frame_per_asr_frame, expected_hidden_length", + [ + (160, 160, 8, 1), + (1280, 160, 8, 2), + (0, 160, 8, 1), + (159, 160, 8, 1), + (129, 100, 5, 1), + (300, 150, 3, 1), + ], + ) + def test_various_cases( + self, num_samples, num_sample_per_mel_frame, num_mel_frame_per_asr_frame, expected_hidden_length + ): + result = get_hidden_length_from_sample_length( + num_samples, num_sample_per_mel_frame, num_mel_frame_per_asr_frame + ) + assert result == expected_hidden_length + + def test_default_parameters(self): + assert get_hidden_length_from_sample_length(160) == 1 + assert get_hidden_length_from_sample_length(1280) == 2 + assert get_hidden_length_from_sample_length(0) == 1 + assert get_hidden_length_from_sample_length(159) == 1 + + def test_edge_cases(self): + assert get_hidden_length_from_sample_length(159, 160, 8) == 1 + assert get_hidden_length_from_sample_length(160, 160, 8) == 1 + assert get_hidden_length_from_sample_length(161, 160, 8) == 1 + assert get_hidden_length_from_sample_length(1279, 160, 8) == 1 + + def test_real_life_examples(self): + # The samples tried when this function was designed. + assert get_hidden_length_from_sample_length(160000) == 126 + assert get_hidden_length_from_sample_length(159999) == 125 + assert get_hidden_length_from_sample_length(158720) == 125 + assert get_hidden_length_from_sample_length(158719) == 124 + + assert get_hidden_length_from_sample_length(158880) == 125 + assert get_hidden_length_from_sample_length(158879) == 125 + assert get_hidden_length_from_sample_length(1600) == 2 + assert get_hidden_length_from_sample_length(1599) == 2 diff --git a/tests/collections/speaker_tasks/utils/test_vad_utils.py b/tests/collections/speaker_tasks/utils/test_vad_utils.py new file mode 100644 index 000000000000..a7672e1aa43d --- /dev/null +++ b/tests/collections/speaker_tasks/utils/test_vad_utils.py @@ -0,0 +1,126 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import pytest +from pyannote.core import Annotation, Segment + +from nemo.collections.asr.parts.utils.vad_utils import ( + align_labels_to_frames, + convert_labels_to_speech_segments, + frame_vad_construct_pyannote_object_per_file, + get_frame_labels, + get_nonspeech_segments, + load_speech_overlap_segments_from_rttm, + load_speech_segments_from_rttm, + read_rttm_as_pyannote_object, +) + + +def get_simple_rttm_without_overlap(rttm_file="test1.rttm"): + line = "SPEAKER 1 0 2 speech \n" + speech_segments = [[0.0, 2.0]] + with open(rttm_file, "w") as f: + f.write(line) + return rttm_file, speech_segments + + +def get_simple_rttm_with_overlap(rttm_file="test2.rttm"): + speech_segments = [[0.0, 3.0]] + overlap_segments = [[1.0, 2.0]] + with open(rttm_file, "w") as f: + f.write("SPEAKER 1 0 2 speech \n") + f.write("SPEAKER 1 1 2 speech \n") + return rttm_file, speech_segments, overlap_segments + + +def get_simple_rttm_with_silence(rttm_file="test3.rttm"): + line = "SPEAKER 1 1 2 speech \n" + speech_segments = [[1.0, 2.0]] + silence_segments = [[0.0, 1.0]] + with open(rttm_file, "w") as f: + f.write(line) + return rttm_file, speech_segments, silence_segments + + +class TestVADUtils: + @pytest.mark.parametrize(["logits_len", "labels_len"], [(20, 10), (20, 11), (20, 9), (10, 21), (10, 19)]) + @pytest.mark.unit + def test_align_label_logits(self, logits_len, labels_len): + logits = np.arange(logits_len).tolist() + labels = np.arange(labels_len).tolist() + labels_new = align_labels_to_frames(probs=logits, labels=labels) + + assert len(labels_new) == len(logits) + + @pytest.mark.unit + def test_load_speech_segments_from_rttm(self, test_data_dir): + rttm_file, speech_segments = get_simple_rttm_without_overlap(test_data_dir + "/test1.rttm") + speech_segments_new = load_speech_segments_from_rttm(rttm_file) + assert speech_segments_new == speech_segments + + @pytest.mark.unit + def test_load_speech_overlap_segments_from_rttm(self, test_data_dir): + rttm_file, speech_segments, overlap_segments = get_simple_rttm_with_overlap(test_data_dir + "/test2.rttm") + speech_segments_new, overlap_segments_new = load_speech_overlap_segments_from_rttm(rttm_file) + assert speech_segments_new == speech_segments + assert overlap_segments_new == overlap_segments + + @pytest.mark.unit + def test_get_nonspeech_segments(self, test_data_dir): + rttm_file, speech_segments, silence_segments = get_simple_rttm_with_silence(test_data_dir + "/test3.rttm") + speech_segments_new = load_speech_segments_from_rttm(rttm_file) + silence_segments_new = get_nonspeech_segments(speech_segments_new) + assert silence_segments_new == silence_segments + + @pytest.mark.unit + def test_get_frame_labels(self, test_data_dir): + rttm_file, speech_segments = get_simple_rttm_without_overlap(test_data_dir + "/test4.rttm") + speech_segments_new = load_speech_segments_from_rttm(rttm_file) + frame_labels = get_frame_labels(speech_segments_new, 0.02, 0.0, 3.0, as_str=False) + assert frame_labels[0] == 1 + assert len(frame_labels) == 150 + + @pytest.mark.unit + def test_convert_labels_to_speech_segments(self, test_data_dir): + rttm_file, speech_segments = get_simple_rttm_without_overlap(test_data_dir + "/test5.rttm") + speech_segments_new = load_speech_segments_from_rttm(rttm_file) + frame_labels = get_frame_labels(speech_segments_new, 0.02, 0.0, 3.0, as_str=False) + speech_segments_new = convert_labels_to_speech_segments(frame_labels, 0.02) + assert speech_segments_new == speech_segments + + @pytest.mark.unit + def test_read_rttm_as_pyannote_object(self, test_data_dir): + rttm_file, speech_segments = get_simple_rttm_without_overlap(test_data_dir + "/test6.rttm") + pyannote_object = read_rttm_as_pyannote_object(rttm_file) + pyannote_object_gt = Annotation() + pyannote_object_gt[Segment(0.0, 2.0)] = 'speech' + assert pyannote_object == pyannote_object_gt + + @pytest.mark.unit + def test_frame_vad_construct_pyannote_object_per_file(self, test_data_dir): + rttm_file, speech_segments = get_simple_rttm_without_overlap(test_data_dir + "/test7.rttm") + # test for rttm input + ref, hyp = frame_vad_construct_pyannote_object_per_file(rttm_file, rttm_file) + pyannote_object_gt = Annotation() + pyannote_object_gt[Segment(0.0, 2.0)] = 'speech' + assert ref == hyp == pyannote_object_gt + + # test for list input + speech_segments = load_speech_segments_from_rttm(rttm_file) + frame_labels = get_frame_labels(speech_segments, 0.02, 0.0, 3.0, as_str=False) + speech_segments_new = convert_labels_to_speech_segments(frame_labels, 0.02) + assert speech_segments_new == speech_segments + ref, hyp = frame_vad_construct_pyannote_object_per_file(frame_labels, frame_labels, 0.02) + assert ref == hyp == pyannote_object_gt From 4256580da0c8e2656a016d88b617a0a297326e56 Mon Sep 17 00:00:00 2001 From: Vladimir Bataev Date: Tue, 17 Dec 2024 12:57:38 +0400 Subject: [PATCH 058/128] 2x more memory efficient Graph-based RNN-T (#11169) * Optimized Graph-Transducer implementation Signed-off-by: Vladimir Bataev --------- Signed-off-by: Vladimir Bataev Signed-off-by: artbataev Co-authored-by: artbataev --- .../asr/parts/k2/graph_transducer.py | 155 ++++++++--- .../collections/asr/parts/k2/rnnt_logprobs.py | 44 +++ .../asr/parts/k2/rnnt_logprobs_triton.py | 250 ++++++++++++++++++ nemo/core/utils/optional_libs.py | 34 +++ requirements/requirements.txt | 1 + .../asr/k2/test_graph_transducer.py | 91 ++++++- 6 files changed, 528 insertions(+), 47 deletions(-) create mode 100644 nemo/collections/asr/parts/k2/rnnt_logprobs.py create mode 100644 nemo/collections/asr/parts/k2/rnnt_logprobs_triton.py create mode 100644 nemo/core/utils/optional_libs.py diff --git a/nemo/collections/asr/parts/k2/graph_transducer.py b/nemo/collections/asr/parts/k2/graph_transducer.py index bcd49bcbd7a9..874e6e6fd2b4 100644 --- a/nemo/collections/asr/parts/k2/graph_transducer.py +++ b/nemo/collections/asr/parts/k2/graph_transducer.py @@ -15,11 +15,17 @@ import abc from contextlib import nullcontext from typing import ContextManager + import torch import torch.nn.functional as F from nemo.core.classes.loss import Loss from nemo.core.utils.k2_guard import k2 +from nemo.core.utils.optional_libs import TRITON_AVAILABLE +from nemo.utils import logging + +if TRITON_AVAILABLE: + from nemo.collections.asr.parts.k2.rnnt_logprobs_triton import rnnt_logprobs_triton def force_float32_context() -> ContextManager: @@ -129,13 +135,13 @@ def get_composed_lattice(self, units_tensor: torch.Tensor, num_frames: int, voca return composed def get_graphs_batched( - self, logits_lengths: torch.Tensor, targets: torch.Tensor, target_lengths: torch.Tensor, vocab_size: int + self, source_lengths: torch.Tensor, targets: torch.Tensor, target_lengths: torch.Tensor, vocab_size: int ) -> "k2.Fsa": """ Get batched lattice (grid or composed) for the batch of sequences. Args: - logits_lengths: tensor with lengths of logits + source_lengths: tensor with lengths of logits targets: tensor with target units target_lengths: tensor with lengths of targets vocab_size: vocab size (including blank) @@ -143,14 +149,16 @@ def get_graphs_batched( Returns: batched lattice - FsaVec (k2.Fsa) """ - batch_size = logits_lengths.shape[0] + batch_size = source_lengths.shape[0] with torch.no_grad(): if self.use_grid_implementation: + source_lengths_list = source_lengths.tolist() + target_lengths_list = target_lengths.tolist() return k2.create_fsa_vec( [ self.get_grid( - units_tensor=targets[i, : target_lengths[i].item()], - num_frames=logits_lengths[i].item(), + units_tensor=targets[i, : target_lengths_list[i]], + num_frames=source_lengths_list[i], vocab_size=vocab_size, ) for i in range(batch_size) @@ -167,7 +175,7 @@ def get_graphs_batched( ] temporal_fsas = [ self.get_temporal_schema( - num_frames=logits_lengths[i].item(), vocab_size=vocab_size, device=targets.device + num_frames=source_lengths[i].item(), vocab_size=vocab_size, device=targets.device ) for i in range(batch_size) ] @@ -175,22 +183,20 @@ def get_graphs_batched( k2.create_fsa_vec(text_fsas), k2.create_fsa_vec(temporal_fsas), treat_epsilons_specially=False ) if self.connect_composed: - k2.connect(target_fsas_vec) + target_fsas_vec = k2.connect(target_fsas_vec) return target_fsas_vec - def get_logits_indices(self, target_fsas_vec: k2.Fsa, logits_shape: torch.Size) -> torch.Tensor: + def get_batch_indices(self, target_fsas_vec: k2.Fsa) -> torch.Tensor: """ - Get indices of flatten logits for each arc in the lattices. + Get batch indices (for logits) for each arc in the lattices. Args: target_fsas_vec: batch of target FSAs with lattices - logits_shape: shape of the logits tensor Returns: 1d tensor with indices """ - # logits_shape: B x Time x Text+1 x Labels - batch_size = logits_shape[0] + batch_size = target_fsas_vec.shape[0] device = target_fsas_vec.device scores_to_batch_i = torch.repeat_interleave( torch.arange(batch_size, device=device, dtype=torch.int64), @@ -199,6 +205,21 @@ def get_logits_indices(self, target_fsas_vec: k2.Fsa, logits_shape: torch.Size) device=device, ), ) + return scores_to_batch_i + + def get_logits_indices(self, target_fsas_vec: k2.Fsa, logits_shape: torch.Size) -> torch.Tensor: + """ + Get indices of flatten logits for each arc in the lattices. + + Args: + target_fsas_vec: batch of target FSAs with lattices + logits_shape: shape of the logits tensor + + Returns: + 1d tensor with indices + """ + # logits_shape: B x Time x Text+1 x Labels + scores_to_batch_i = self.get_batch_indices(target_fsas_vec=target_fsas_vec) indices = ( scores_to_batch_i * logits_shape[1] * logits_shape[2] * logits_shape[3] # Batch + target_fsas_vec.aux_labels.to(torch.int64) * logits_shape[2] * logits_shape[3] # Time indices @@ -222,6 +243,8 @@ def __init__( connect_composed=False, double_scores=False, cast_to_float32=False, + return_graph=False, + use_triton=True, ): """ Init method @@ -232,8 +255,11 @@ def __init__( connect_composed: Connect graph after composing unit and temporal schemas (only for Compose-Transducer). `connect` operation is slow, it is useful for visualization, but not necessary for loss computation. double_scores: Use calculation of loss in double precision (float64) in the lattice. - Does not significantly affect memory usage since the lattice is ~V/2 times smaller than the joint tensor. + Does not significantly affect memory usage since the lattice is ~V/2 times smaller + than the joint tensor. cast_to_float32: Force cast joint tensor to float32 before log-softmax calculation. + return_graph: Return graph (along with loss) from `forward` function + use_triton: use optimized log probs calculations with Triton (faster and more memory efficient) """ super().__init__( use_grid_implementation=use_grid_implementation, @@ -242,6 +268,10 @@ def __init__( cast_to_float32=cast_to_float32, ) self.blank = blank + self.return_graph = return_graph + self.use_triton = use_triton and TRITON_AVAILABLE + if not self.use_triton: + logging.warning("Triton is disabled, memory usage can be larger") def get_unit_schema(self, units_tensor: torch.Tensor, vocab_size: int) -> "k2.Fsa": """ @@ -370,13 +400,14 @@ def relabel_states(states: torch.Tensor, n: int, m: int) -> torch.Tensor: anti_diag = m + n - 1 - diag max_idx = n * m - 1 cur_diag_idx = i if m > n else m - j - 1 - states = ( + new_states = ( diag.lt(min_mn) * ((diag * (diag + 1) >> 1) + i) + torch.logical_and(diag.ge(min_mn), diag.lt(max_mn)) * ((min_mn * (min_mn + 1) >> 1) + (diag - min_mn) * min_mn + cur_diag_idx) + diag.ge(max_mn) * (max_idx - (anti_diag * (anti_diag + 1) >> 1) + m - j) ) - return states + torch.where(states >= n * m, states, new_states, out=new_states) + return new_states def get_grid(self, units_tensor: torch.Tensor, num_frames: int, vocab_size: int) -> "k2.Fsa": """ @@ -445,13 +476,76 @@ def get_grid(self, units_tensor: torch.Tensor, num_frames: int, vocab_size: int) rnnt_graph.unit_positions = unit_positions return rnnt_graph + def get_weighted_graphs( + self, + logits: torch.Tensor, + targets: torch.Tensor, + source_lengths: torch.Tensor, + target_lengths: torch.Tensor, + use_graph_weight=False, + ) -> "k2.Fsa": + """ + Get batch of graphs (FsaVec) for RNN-T loss calculation. + + Args: + logits: activations (joint tensor). NB: raw logits, not after log-softmax + targets: target labels + source_lengths: lengths of source sequences + target_lengths: length of target sequences + use_graph_weight: uses weight from graphs (if `get_graphs_batched` returns graphs with weights) + + Returns: + FsaVec containing RNN-T graphs for all utterances. + """ + vocab_size = logits.shape[-1] + target_fsas_vec = self.get_graphs_batched(source_lengths, targets, target_lengths, vocab_size) + + with torch.no_grad(): + # last transitions in the graph are labeled with -1 label + last_transition_mask = target_fsas_vec.labels == -1 + batch_indices = self.get_batch_indices(target_fsas_vec=target_fsas_vec) + time_indices = target_fsas_vec.aux_labels.clone().to(torch.int64) + unit_indices = target_fsas_vec.unit_positions.clone().to(torch.int64) + text_units = target_fsas_vec.labels.clone().to(torch.int64) + # fill in the indices outside the logits with 0, replace later + text_units.masked_fill_(last_transition_mask, 0) + + cast_context = force_float32_context() if self.cast_to_float32 else nullcontext() + with cast_context: + # NB: do not assign scores -> modify, k2 will not update all scores correctly (modify -> assign) + if self.use_triton and logits.device.type == "cuda": + unit_scores, blank_scores = rnnt_logprobs_triton( + logits=logits, + targets=targets, + blank_id=self.blank, + source_lengths=source_lengths, + target_lengths=target_lengths, + ) + text_units_blank_mask = text_units == self.blank + scores = torch.where( + text_units_blank_mask, + blank_scores[batch_indices, time_indices, unit_indices], + unit_scores[batch_indices, time_indices, unit_indices], + ).to(torch.float32) + scores[last_transition_mask] = 0.0 # fix weights for the arcs to the last state + else: + log_probs = F.log_softmax(logits, dim=-1) + scores = log_probs[batch_indices, time_indices, unit_indices, text_units].to(torch.float32) + scores[last_transition_mask] = 0.0 + + if use_graph_weight: + target_fsas_vec.scores = target_fsas_vec.scores + scores + else: + target_fsas_vec.scores = scores + return target_fsas_vec + def forward( self, acts: torch.Tensor, labels: torch.Tensor, act_lens: torch.Tensor, label_lens: torch.Tensor, - ) -> torch.Tensor: + ) -> torch.Tensor | tuple[torch.Tensor, "k2.Fsa"]: """ Compute forward method for RNN-T. @@ -466,26 +560,11 @@ def forward( """ # argument names are consistent with NeMo, see RNNTLoss.forward: # self._loss(acts=log_probs, labels=targets, act_lens=input_lengths, label_lens=target_lengths) - logits, targets, logits_lengths, target_lengths = acts, labels, act_lens, label_lens - - # logits: B x Time x Text+1 x C - vocab_size = logits.shape[-1] - target_fsas_vec = self.get_graphs_batched(logits_lengths, targets, target_lengths, vocab_size) - - cast_context = force_float32_context() if self.cast_to_float32 else nullcontext() - with cast_context: - log_probs = F.log_softmax(logits, dim=-1) - with torch.no_grad(): - indices = self.get_logits_indices(target_fsas_vec, logits.shape) - # transition to the last state - # use 0 index (for valid index_select) and manually assign score after index_select for this case - indices[target_fsas_vec.labels == -1] = 0 - - # NB: do not assign scores -> modify, k2 will not update all scores correctly (modify -> assign) - scores = log_probs.flatten().index_select(-1, indices) - # fix weights for the arcs to the last state - scores[target_fsas_vec.labels == -1] = 0 + target_fsas_vec = self.get_weighted_graphs( + logits=acts, targets=labels, source_lengths=act_lens, target_lengths=label_lens, use_graph_weight=False + ) - target_fsas_vec.scores = scores - scores = -1 * target_fsas_vec.get_tot_scores(use_double_scores=self.double_scores, log_semiring=True) - return scores + scores = -1 * target_fsas_vec.get_tot_scores(use_double_scores=self.double_scores, log_semiring=True) + if self.return_graph: + return scores, target_fsas_vec + return scores diff --git a/nemo/collections/asr/parts/k2/rnnt_logprobs.py b/nemo/collections/asr/parts/k2/rnnt_logprobs.py new file mode 100644 index 000000000000..c41615f83bf9 --- /dev/null +++ b/nemo/collections/asr/parts/k2/rnnt_logprobs.py @@ -0,0 +1,44 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn.functional as F + + +def rnnt_logprobs_torch( + logits: torch.Tensor, targets: torch.Tensor, blank_id: int +) -> tuple[torch.Tensor, torch.Tensor]: + """ + Given logits, calculate log probabilities for blank and target labels needed for transducer loss calculation. + Naive implementation in PyTorch, for testing and prototyping purposes. + + Args: + logits: Joint tensor of size [B, T, U+1, D] + targets: Targets of size [B, U] + blank_id: id of the blank output + + Returns: + Tuple of tensors with log probabilities for targets and blank labels, both of size [B, T, U+1]. + For the last non-existent target (U+1) output is zero. + """ + device = logits.device + batch_size = logits.shape[0] + log_probs = F.log_softmax(logits, dim=-1) + blank_scores = log_probs[..., blank_id] + targets = torch.cat((targets, torch.zeros([batch_size], dtype=targets.dtype, device=device).unsqueeze(1)), dim=-1) + target_scores = torch.gather( + log_probs, dim=-1, index=targets.unsqueeze(1).expand(log_probs.shape[:-1]).unsqueeze(-1) + ).squeeze(-1) + target_scores[:, :, -1] = 0.0 + return target_scores, blank_scores diff --git a/nemo/collections/asr/parts/k2/rnnt_logprobs_triton.py b/nemo/collections/asr/parts/k2/rnnt_logprobs_triton.py new file mode 100644 index 000000000000..64bc8abbdbeb --- /dev/null +++ b/nemo/collections/asr/parts/k2/rnnt_logprobs_triton.py @@ -0,0 +1,250 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import triton +import triton.language as tl + + +@triton.jit +def _rnnt_logprobs_fwd_kernel( + logits_ptr, + targets_ptr, + source_lengths_ptr, + target_lengths_ptr, + max_source_len: int, + max_target_len_plus_1: int, + num_labels: int, # vocab size (with blank) + blank_id: int, + target_scores_ptr, + blank_scores_ptr, + BLOCK_SIZE: tl.constexpr, +): + """ + Forward kernel for RNN-T log probs. Stores result in `target_scores_ptr` and `blank_scores_ptr`. + Calculations are performed in float32 (but original tensors can use any precision). + """ + batch_i = tl.program_id(axis=0).to(tl.int64) + source_i = tl.program_id(axis=1).to(tl.int64) + target_i = tl.program_id(axis=2).to(tl.int64) + + # load lengths for source/target + source_len = tl.load(source_lengths_ptr + batch_i) + target_len = tl.load(target_lengths_ptr + batch_i) + + if source_i >= source_len or target_i > target_len: + # no calculations required + return + + # calculate offset in [B, T, U+1, V] tensor for the current vector with target logits + flat_index = ((batch_i * max_source_len + source_i) * max_target_len_plus_1 + target_i) * num_labels + logits_ptr += flat_index + col_offsets = tl.arange(0, BLOCK_SIZE) + mask = col_offsets < num_labels + logits = tl.load(logits_ptr + col_offsets, mask=mask, other=-float("inf")).to(tl.float32) + # stable log softmax calculation + logits_max = tl.max(logits, axis=0) + logits_minus_max = logits - logits_max + denominator = tl.log(tl.sum(tl.exp(logits_minus_max), axis=0)) + blank_logit = tl.load(logits_ptr + blank_id).to(tl.float32) + flat_index_output = (batch_i * max_source_len + source_i) * max_target_len_plus_1 + target_i + tl.store(blank_scores_ptr + flat_index_output, blank_logit - logits_max - denominator) + + # calculate log prob for target if needed + if target_i < target_len: + target_id = tl.load(targets_ptr + batch_i * (max_target_len_plus_1 - 1) + target_i) + target_logit = tl.load(logits_ptr + target_id).to(tl.float32) + tl.store(target_scores_ptr + flat_index_output, target_logit - logits_max - denominator) + + +@triton.jit +def _rnnt_logprobs_bwd_kernel( + logits_ptr, + grad_logits_ptr, + targets_ptr, + source_lengths_ptr, + target_lengths_ptr, + max_source_len: int, + max_target_len_plus_1: int, + num_labels: int, + blank_id: int, + grad_target_scores_ptr, + grad_blank_scores_ptr, + BLOCK_SIZE: tl.constexpr, +): + """ + Backward kernel for RNN-T log probs. Stores result in `grad_target_scores_ptr` and `grad_blank_scores_ptr`. + We recalculate part of the forward here to avoid using extra memory in forward. + Calculations are performed in float32 (but original tensors can use any precision). + """ + batch_i = tl.program_id(axis=0).to(tl.int64) + source_i = tl.program_id(axis=1).to(tl.int64) + target_i = tl.program_id(axis=2).to(tl.int64) + + # load lengths for source/target + source_len = tl.load(source_lengths_ptr + batch_i) + target_len = tl.load(target_lengths_ptr + batch_i) + if source_i >= source_len or target_i > target_len: + # no calculations required + return + + # calculate offset in [B, T, U+1, V] tensor for the current vector with target logits/grad_logits + flat_index = ((batch_i * max_source_len + source_i) * max_target_len_plus_1 + target_i) * num_labels + logits_ptr += flat_index + grad_logits_ptr += flat_index + + col_offsets = tl.arange(0, BLOCK_SIZE) + mask = col_offsets < num_labels + logits = tl.load(logits_ptr + col_offsets, mask=mask, other=-float("inf")).to(tl.float32) + # stable log softmax calculation + logits_max = tl.max(logits, axis=0) + logits_minus_max = logits - logits_max + denominator = tl.log(tl.sum(tl.exp(logits_minus_max), axis=0)) + log_softmax = logits_minus_max - denominator + # softmax for gradient + softmax = tl.exp(log_softmax) + + flat_index_grad = (batch_i * max_source_len + source_i) * max_target_len_plus_1 + target_i + blank_grad = tl.load(grad_blank_scores_ptr + flat_index_grad).to(tl.float32) + target_i_valid = target_i < target_len + target_grad = tl.load(grad_target_scores_ptr + flat_index_grad, mask=target_i_valid, other=0.0).to(tl.float32) + target_id = tl.load(targets_ptr + batch_i * (max_target_len_plus_1 - 1) + target_i, mask=target_i_valid, other=-1) + + grad_not_in_targets = (-softmax) * (blank_grad + target_grad) + grad = tl.where(col_offsets == blank_id, blank_grad + grad_not_in_targets, grad_not_in_targets) + grad = tl.where(col_offsets == target_id, target_grad + grad_not_in_targets, grad) + tl.store(grad_logits_ptr + col_offsets, grad, mask=mask) + + +class RnntLogProbs(torch.autograd.Function): + """ + Function to calculate log probabilities for target and blank labels for RNN-T, supporting torch.autograd. + """ + + @staticmethod + def forward( + ctx, + logits: torch.Tensor, + targets: torch.Tensor, + blank_id: int, + source_lengths: torch.Tensor | None, + target_lengths: torch.Tensor | None, + ): + """ + + Args: + ctx: ctx object for storing the context + logits: Joint tensor of size [B, T, U+1, D] + targets: Targets of size [B, U] + blank_id: id of the blank output + source_lengths: optional tensor with lengths for source utterances + target_lengths: optional tensor with lengths for targets + + Returns: + + """ + assert logits.is_contiguous() # logits are huge, so here we just check if logits are contiguous + targets = targets.contiguous() + device = logits.device + float_dtype = torch.float32 + + target_scores = torch.zeros(logits.shape[:-1], dtype=float_dtype, device=device) + blank_scores = torch.zeros_like(target_scores) + if source_lengths is None: + source_lengths = torch.full([logits.shape[0]], fill_value=logits.shape[1], dtype=torch.int, device=device) + else: + source_lengths = source_lengths.contiguous() + if target_lengths is None: + target_lengths = torch.full( + [logits.shape[0]], fill_value=logits.shape[2] - 1, dtype=torch.int, device=device + ) + else: + target_lengths = target_lengths.contiguous() + + # run Triton kernel + _rnnt_logprobs_fwd_kernel[(logits.shape[0], logits.shape[1], logits.shape[2])]( + logits_ptr=logits, + targets_ptr=targets, + source_lengths_ptr=source_lengths, + target_lengths_ptr=target_lengths, + max_source_len=logits.shape[1], + max_target_len_plus_1=logits.shape[2], + num_labels=logits.shape[3], + blank_id=blank_id, + target_scores_ptr=target_scores, + blank_scores_ptr=blank_scores, + BLOCK_SIZE=triton.next_power_of_2(logits.shape[-1]), + ) + + # saving for backward + ctx.save_for_backward(logits, targets, source_lengths, target_lengths) + ctx.blank_id = blank_id + return target_scores, blank_scores + + @staticmethod + def backward(ctx, grad_target_scores, grad_blank_scores): + """ + Backward calculation for RNN-T log-probs. + + Args: + ctx: ctx object for storing the context + grad_target_scores: upstream gradient for targets + grad_blank_scores: upstream gradient for blank scores + + Returns: + gradient for logits, None for all other arguments for `forward` + """ + (logits, targets, source_lengths, target_lengths) = ctx.saved_tensors + blank_id = ctx.blank_id + grad_logits = torch.zeros_like(logits) + _rnnt_logprobs_bwd_kernel[(logits.shape[0], logits.shape[1], logits.shape[2])]( + logits_ptr=logits, + grad_logits_ptr=grad_logits, + source_lengths_ptr=source_lengths, + target_lengths_ptr=target_lengths, + targets_ptr=targets, + max_source_len=logits.shape[1], + max_target_len_plus_1=logits.shape[2], + num_labels=logits.shape[3], + blank_id=blank_id, + grad_target_scores_ptr=grad_target_scores, + grad_blank_scores_ptr=grad_blank_scores, + BLOCK_SIZE=triton.next_power_of_2(logits.shape[-1]), + ) + return grad_logits, None, None, None, None + + +def rnnt_logprobs_triton( + logits: torch.Tensor, + targets: torch.Tensor, + blank_id: int, + source_lengths: torch.Tensor | None = None, + target_lengths: torch.Tensor | None = None, +) -> tuple[torch.Tensor, torch.Tensor]: + """ + Given logits, calculate log probabilities for blank and target labels needed for transducer loss calculation. + Optimized implementation in Triton. + + Args: + logits: Joint tensor of size [B, T, U+1, D] + targets: Targets of size [B, U] + blank_id: id of the blank output + source_lengths: optional tensor with lengths for source utterances + target_lengths: optional tensor with lengths for targets + + Returns: + Tuple of tensors with log probabilities for targets and blank labels, both of size [B, T, U+1]. + For the non-existent targets (U+1 or beyond target_lengths) output is zero. + """ + return RnntLogProbs.apply(logits, targets, blank_id, source_lengths, target_lengths) diff --git a/nemo/core/utils/optional_libs.py b/nemo/core/utils/optional_libs.py new file mode 100644 index 000000000000..9aa39260963c --- /dev/null +++ b/nemo/core/utils/optional_libs.py @@ -0,0 +1,34 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib.util + + +def is_lib_available(name: str) -> bool: + """ + Checks if the library/package with `name` is available in the system + NB: try/catch with importlib.import_module(name) requires importing the library, which can be slow. + So, `find_spec` should be preferred + """ + return importlib.util.find_spec(name) is not None + + +TRITON_AVAILABLE = is_lib_available("triton") + +try: + from nemo.core.utils.k2_guard import k2 as _ + + K2_AVAILABLE = True +except (ImportError, ModuleNotFoundError): + K2_AVAILABLE = False diff --git a/requirements/requirements.txt b/requirements/requirements.txt index 7fd5e88eebe3..1b9fc88000b9 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -11,5 +11,6 @@ tensorboard text-unidecode torch tqdm>=4.41.0 +triton>=3.1.0; sys_platform == 'linux' wget wrapt diff --git a/tests/collections/asr/k2/test_graph_transducer.py b/tests/collections/asr/k2/test_graph_transducer.py index 5879226e782d..592772767484 100644 --- a/tests/collections/asr/k2/test_graph_transducer.py +++ b/tests/collections/asr/k2/test_graph_transducer.py @@ -12,29 +12,36 @@ # See the License for the specific language governing permissions and # limitations under the License. +import random from typing import List import numpy as np import pytest import torch +from nemo.collections.asr.parts.k2.rnnt_logprobs import rnnt_logprobs_torch from nemo.collections.asr.parts.numba.rnnt_loss.rnnt_numpy import RNNTLoss as RNNTLoss_Numpy +from nemo.core.utils.optional_libs import K2_AVAILABLE, TRITON_AVAILABLE + +if K2_AVAILABLE: + import k2 -try: from nemo.collections.asr.parts.k2.graph_transducer import GraphRnntLoss - from nemo.core.utils.k2_guard import k2 -except (ImportError, ModuleNotFoundError): - pytest.skip("k2 is not installed, skipping Graph-RNNT tests.", allow_module_level=True) + +if TRITON_AVAILABLE: + from nemo.collections.asr.parts.k2.rnnt_logprobs_triton import rnnt_logprobs_triton + EPS_SM_INPUT = 1e-6 EPS_L_INPUT = 1e-4 DEVICES = ['cpu'] -if torch.cuda.is_available() and k2.with_cuda: +if K2_AVAILABLE and torch.cuda.is_available() and k2.with_cuda: DEVICES.append('cuda') +@pytest.mark.skipif(not K2_AVAILABLE, reason="k2 is not installed, skipping Graph-RNNT tests.") class TestGraphRnnt: @pytest.mark.unit @pytest.mark.parametrize("device", DEVICES) @@ -214,9 +221,12 @@ def test_small_grid_transducer(self, device, rnnt_test_helper, rnn_loss_sample_d @pytest.mark.unit @pytest.mark.parametrize("device", DEVICES) - def test_medium_grid_transducer(self, device, rnnt_test_helper, rnn_loss_sample_data): + @pytest.mark.parametrize("use_triton", [True, False]) + def test_medium_grid_transducer(self, device, use_triton: bool, rnnt_test_helper, rnn_loss_sample_data): + if use_triton and device == "cpu": + pytest.skip("Triton does not support CPU yet") sample_data = rnn_loss_sample_data.get_sample_medium() - graph_rnnt = GraphRnntLoss(blank=0, use_grid_implementation=True) + graph_rnnt = GraphRnntLoss(blank=0, use_grid_implementation=True, use_triton=use_triton) graph_cost, graph_grads = rnnt_test_helper.wrap_and_call( graph_rnnt, sample_data.logits, sample_data.targets, device ) @@ -225,9 +235,12 @@ def test_medium_grid_transducer(self, device, rnnt_test_helper, rnn_loss_sample_ @pytest.mark.unit @pytest.mark.parametrize("device", DEVICES) - def test_medium_random_var_size(self, device, rnnt_test_helper, rnn_loss_sample_data): + @pytest.mark.parametrize("use_triton", [True, False]) + def test_medium_random_var_size(self, device, use_triton: bool, rnnt_test_helper, rnn_loss_sample_data): + if use_triton and device == "cpu": + pytest.skip("Triton does not support CPU yet") sample_data = rnn_loss_sample_data.get_sample_medium_random_var_size(blank_first=True) - graph_rnnt = GraphRnntLoss(blank=0, use_grid_implementation=True) + graph_rnnt = GraphRnntLoss(blank=0, use_grid_implementation=True, use_triton=use_triton) graph_cost, graph_grads = rnnt_test_helper.wrap_and_call( graph_rnnt, sample_data.logits.detach(), @@ -261,3 +274,63 @@ def test_small_random_grid_compose_equivalent(self, device: torch.device, blank_ assert k2.is_rand_equivalent( graph_grid, graph_composed, log_semiring=True, treat_epsilons_specially=False ), "Grid and composed graphs are not equivalent." + + +@pytest.mark.skipif(not TRITON_AVAILABLE, reason="Triton is not installed, skipping RNNT Log Probs tests") +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA is unavailable") +class TestRnntLogProbs: + @pytest.mark.parametrize( + "batch_size,num_frames,num_text_units,vocab_size", + [ + (1, 4, 2, 4), + (2, 3, 2, 5), + (2, 16, 31, 17), + (16, 129, 65, 2048), + ], + ) + @pytest.mark.parametrize( + "float_dtype", + [torch.float32] + ([torch.bfloat16] if torch.cuda.is_available() and torch.cuda.is_bf16_supported() else []), + ) + def test_rnnt_logprobs_random( + self, batch_size: int, num_frames: int, num_text_units: int, vocab_size: int, float_dtype: torch.dtype + ): + """ + Test Triton-based implementation using etalon Torch-based implementation for RNN-T log-probs. + """ + device = torch.device("cuda") + torch.manual_seed(777) + + targets = torch.tensor( + [[random.randrange(0, vocab_size - 1) for i in range(num_text_units)] for j in range(batch_size)], + device=device, + dtype=torch.long, + ) + + logits = torch.rand( + [batch_size, num_frames, num_text_units + 1, vocab_size + 1], + dtype=float_dtype, + device=device, + requires_grad=True, + ) + + # Triton-based implementation works in float32 precision for accuracy purposes, should compare with float32 + target_scores_etalon, blank_scores_etalon = rnnt_logprobs_torch( + logits=logits.to(torch.float32), targets=targets, blank_id=vocab_size + ) + logits2 = logits.clone().detach() + logits2.requires_grad_(True) + target_scores, blank_scores = rnnt_logprobs_triton(logits=logits2, targets=targets, blank_id=vocab_size) + target_scores[..., -1:] = 0.0 + target_scores_etalon[..., -1:] = 0.0 + assert torch.allclose(blank_scores, blank_scores_etalon, atol=1e-5) + assert torch.allclose(target_scores, target_scores_etalon, atol=1e-5) + + # test backward + target_scales = torch.rand_like(target_scores, requires_grad=False) + blank_scales = torch.rand_like(blank_scores, requires_grad=False) + loss_etalon = (target_scales * target_scores_etalon + blank_scales * blank_scores_etalon).sum() + loss = (target_scales * target_scores + blank_scales * blank_scores).sum() + loss_etalon.backward() + loss.backward() + assert torch.allclose(logits.grad, logits2.grad, atol=1e-5) From 2b6100d42138d7d85ccab1f0542c3374ff761555 Mon Sep 17 00:00:00 2001 From: Hemil Desai Date: Tue, 17 Dec 2024 08:41:01 -0800 Subject: [PATCH 059/128] Use explicit subpaths in io for exporting a checkpoint (#11352) * Fix llm.export_ckpt Signed-off-by: Hemil Desai * fix Signed-off-by: Hemil Desai --------- Signed-off-by: Hemil Desai --- nemo/collections/llm/api.py | 2 +- nemo/collections/llm/gpt/model/llama.py | 9 ++++++--- nemo/lightning/io/connector.py | 2 +- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/nemo/collections/llm/api.py b/nemo/collections/llm/api.py index d030eb88863c..3e63bcea9447 100644 --- a/nemo/collections/llm/api.py +++ b/nemo/collections/llm/api.py @@ -605,7 +605,7 @@ def import_ckpt( def load_connector_from_trainer_ckpt(path: Path, target: str) -> io.ModelConnector: - return io.load_context(path).model.exporter(target, path) + return io.load_context(path, subpath="model").exporter(target, path) @run.cli.entrypoint(name="export", namespace="llm") diff --git a/nemo/collections/llm/gpt/model/llama.py b/nemo/collections/llm/gpt/model/llama.py index a7e995addb83..04540294d82a 100644 --- a/nemo/collections/llm/gpt/model/llama.py +++ b/nemo/collections/llm/gpt/model/llama.py @@ -344,7 +344,10 @@ def apply(self, output_path: Path) -> Path: target = target.cpu() target.save_pretrained(output_path) - self.tokenizer.save_pretrained(output_path) + try: + self.tokenizer.save_pretrained(output_path) + except Exception: + logging.warning("Failed to save tokenizer") return output_path @@ -366,11 +369,11 @@ def convert_state(self, source, target): @property def tokenizer(self): - return io.load_context(str(self)).model.tokenizer.tokenizer + return io.load_context(str(self), subpath="model").tokenizer.tokenizer @property def config(self) -> "HFLlamaConfig": - source: LlamaConfig = io.load_context(str(self)).model.config + source: LlamaConfig = io.load_context(str(self), subpath="model.config") from transformers import LlamaConfig as HFLlamaConfig diff --git a/nemo/lightning/io/connector.py b/nemo/lightning/io/connector.py index bf07956f2cd2..258d2848a63a 100644 --- a/nemo/lightning/io/connector.py +++ b/nemo/lightning/io/connector.py @@ -226,7 +226,7 @@ def nemo_load( from nemo.lightning import MegatronStrategy, Trainer, _strategy_lib from nemo.lightning.io.api import load_context - model = load_context(path).model + model = load_context(path, subpath="model") _trainer = trainer or Trainer( devices=1, accelerator="cpu" if cpu else "gpu", From 1da96328847dd5a04caa212a4313f18838d295b6 Mon Sep 17 00:00:00 2001 From: Dong Hyuk Chang Date: Tue, 17 Dec 2024 12:59:29 -0500 Subject: [PATCH 060/128] Remove triton requirement (#11627) * Specify pytorch-triton instead of triton Signed-off-by: Dong Hyuk Chang * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Remove triton Signed-off-by: Dong Hyuk Chang --------- Signed-off-by: Dong Hyuk Chang Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- requirements/requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements/requirements.txt b/requirements/requirements.txt index 1b9fc88000b9..7fd5e88eebe3 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -11,6 +11,5 @@ tensorboard text-unidecode torch tqdm>=4.41.0 -triton>=3.1.0; sys_platform == 'linux' wget wrapt From 2eb897b77dab69723d18aaf1d0d6632e41cd631a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?oliver=20k=C3=B6nig?= Date: Tue, 17 Dec 2024 20:08:43 +0100 Subject: [PATCH 061/128] ci: Remove comment if no changes required anymore (#11624) Signed-off-by: Oliver Koenig --- .github/workflows/code-formatting.yml | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/.github/workflows/code-formatting.yml b/.github/workflows/code-formatting.yml index b08e9676aabd..0eaad048b3a5 100644 --- a/.github/workflows/code-formatting.yml +++ b/.github/workflows/code-formatting.yml @@ -118,7 +118,7 @@ jobs: FILTERED=() for file in $CHANGED_FILES; do - DATE=$(git log --format=%ad --date=unix $file | tail -1) + DATE=$(git log --format=%ad --date=unix "$file" | tail -1) if [[ "$STRICT_MODE" == "true" ]]; then if [[ "$DATE" -gt "$THRESHOLD" ]]; then @@ -139,16 +139,18 @@ jobs: echo "Will run on these files: ${FILTERED[@]}" - set +e + set +xe LOG=$(pylint ${FILTERED[@]}) EXIT_CODE=$? set -e - echo "$LOG" echo "OUTPUT<> $GITHUB_ENV echo "$LOG" >> $GITHUB_ENV echo "EOF" >> $GITHUB_ENV echo "log=$LOG" + set -x + + echo "exit-code=$EXIT_CODE" | tee -a "$GITHUB_OUTPUT" if [[ "${{ matrix.strict-mode }}" == "true" ]]; then HEADER="🚨 The following files must be fixed before merge!" @@ -160,7 +162,7 @@ jobs: exit $([[ "$EXIT_CODE" -ne 0 && "$STRICT_MODE" == "true" ]] && echo $EXIT_CODE || echo 0) - name: Find Comment - if: ${{ always() && env.OUTPUT != '' }} + if: ${{ always() }} uses: peter-evans/find-comment@v3 id: fc with: @@ -168,7 +170,7 @@ jobs: body-includes: - name: Delete comment - if: ${{ always() && env.OUTPUT != '' && steps.fc.outputs.comment-id != '' }} + if: ${{ always() && steps.fc.outputs.comment-id != '' }} env: GH_TOKEN: ${{ secrets.github_token }} REPOSITORY: ${{ github.repository }} @@ -182,7 +184,7 @@ jobs: https://api.github.com/repos/$REPOSITORY/issues/comments/$COMMENT_ID - name: Add PR comment for PyLint - if: ${{ always() && env.OUTPUT != '' }} + if: ${{ always() && steps.pylint.outputs.exit-code != '0' }} uses: peter-evans/create-or-update-comment@v4 with: issue-number: ${{ github.event.number }} @@ -200,5 +202,13 @@ jobs: ``` --- + + Mitigation guide: + + * Add sensible and useful docstrings to functions and methods + * For trivial methods like getter/setters, consider adding `# pylint: disable=C0116` inside the function itself + * To disable multiple functions/methods at once, put a `# pylint: disable=C0116` before the first and a `# pylint: enable=C0116` after the last. + + By applying these rules, we reduce the occurance of this message in future. Thank you for improving NeMo's documentation! From e4afd2d8e4afa23c8fcc3fb539665b418a7a172e Mon Sep 17 00:00:00 2001 From: Alexandros Koumparoulis <153118171+akoumpa@users.noreply.github.com> Date: Tue, 17 Dec 2024 13:26:28 -0800 Subject: [PATCH 062/128] Jit with peft (#11586) * move jitransform at the end Signed-off-by: Alexandros Koumparoulis * add docstring & post-init Signed-off-by: Alexandros Koumparoulis * Add remove_extra_batch_keys and remove align_labels Signed-off-by: Alexandros Koumparoulis * Run JitTransform on_train_epoch_start Signed-off-by: Alexandros Koumparoulis * add --use-torch-jit option Signed-off-by: Alexandros Koumparoulis * add docstrings Signed-off-by: Alexandros Koumparoulis * fix Signed-off-by: Alexandros Koumparoulis * pep8 Signed-off-by: Alexandros Koumparoulis * Apply isort and black reformatting Signed-off-by: akoumpa --------- Signed-off-by: Alexandros Koumparoulis Signed-off-by: akoumpa Co-authored-by: akoumpa --- examples/llm/peft/hf.py | 17 ++++- examples/llm/sft/hf.py | 35 ++++++++-- nemo/collections/llm/api.py | 11 ++- .../gpt/model/hf_auto_model_for_causal_lm.py | 53 ++++++++++----- .../pytorch/callbacks/jit_transform.py | 68 ++++++++++++++++++- 5 files changed, 155 insertions(+), 29 deletions(-) diff --git a/examples/llm/peft/hf.py b/examples/llm/peft/hf.py index 3a0930732e87..45675398a421 100644 --- a/examples/llm/peft/hf.py +++ b/examples/llm/peft/hf.py @@ -16,6 +16,7 @@ from lightning.pytorch.loggers import WandbLogger from nemo import lightning as nl from nemo.collections import llm +from nemo.lightning.pytorch.callbacks import JitConfig, JitTransform def make_squad_hf_dataset(tokenizer): @@ -53,7 +54,7 @@ def formatting_prompts_func(examples): return datamodule -if __name__ == '__main__': +def main(): import argparse parser = argparse.ArgumentParser() @@ -63,6 +64,7 @@ def formatting_prompts_func(examples): parser.add_argument('--accelerator', default='gpu', choices=['gpu']) parser.add_argument('--max-steps', type=int, default=100) parser.add_argument('--wandb-project', type=str, default=None) + parser.add_argument('--use-torch-jit', action='store_true') args = parser.parse_args() wandb = None @@ -74,11 +76,17 @@ def formatting_prompts_func(examples): ) grad_clip = 0.5 if args.strategy == 'fsdp': - # See: https://github.com/Lightning-AI/pytorch-lightning/blob/8ad3e29816a63d8ce5c00ac104b14729a4176f4f/src/lightning/pytorch/plugins/precision/fsdp.py#L81 + # See: + # https://github.com/Lightning-AI/pytorch-lightning/blob/8ad3e29816a63d8ce5c00ac104b14729a4176f4f/src/lightning/pytorch/plugins/precision/fsdp.py#L81 grad_clip = None use_dist_samp = False tokenizer = llm.HFAutoModelForCausalLM.configure_tokenizer(args.model) + callbacks = [] + if args.use_torch_jit: + jit_config = JitConfig(use_torch=True, torch_kwargs={'dynamic': True}, use_thunder=False) + callbacks = [JitTransform(jit_config)] + llm.api.finetune( model=llm.HFAutoModelForCausalLM(args.model), data=make_squad_hf_dataset(tokenizer.tokenizer), @@ -94,6 +102,7 @@ def formatting_prompts_func(examples): gradient_clip_val=grad_clip, use_distributed_sampler=use_dist_samp, logger=wandb, + callbacks=callbacks, ), optim=fdl.build(llm.adam.pytorch_adam_with_flat_lr(lr=1e-5)), log=None, @@ -102,3 +111,7 @@ def formatting_prompts_func(examples): dim=32, ), ) + + +if __name__ == '__main__': + main() diff --git a/examples/llm/sft/hf.py b/examples/llm/sft/hf.py index ce79e136a1c2..ff85180cf86b 100755 --- a/examples/llm/sft/hf.py +++ b/examples/llm/sft/hf.py @@ -20,10 +20,12 @@ from nemo import lightning as nl from nemo.collections import llm from nemo.lightning.pytorch.accelerate.transformer_engine import is_te_accelerated -from nemo.lightning.pytorch.callbacks import ModelCallback +from nemo.lightning.pytorch.callbacks import JitConfig, JitTransform class SquadDataModuleWithPthDataloader(llm.SquadDataModule): + """Creates a squad dataset with a PT dataloader""" + def _create_dataloader(self, dataset, mode, **kwargs) -> DataLoader: return DataLoader( dataset, @@ -37,17 +39,30 @@ def _create_dataloader(self, dataset, mode, **kwargs) -> DataLoader: def squad(tokenizer) -> pl.LightningDataModule: + """Instantiates a SquadDataModuleWithPthDataloader and return it + + Args: + tokenizer (AutoTokenizer): the tokenizer to use + + Returns: + pl.LightningDataModule: the dataset to train with. + """ return SquadDataModuleWithPthDataloader( tokenizer=tokenizer, - seq_length=2048, + seq_length=512, micro_batch_size=2, global_batch_size=128, # assert gbs == mbs * accumulate_grad_batches num_workers=0, - dataset_kwargs={"sanity_check_dist_workers": False}, + dataset_kwargs={ + "sanity_check_dist_workers": False, + "pad_to_max_length": True, + "get_attention_mask_from_fusion": True, + }, ) -if __name__ == '__main__': +def main(): + """Example script to run SFT with a HF transformers-instantiated model on squad.""" import argparse parser = argparse.ArgumentParser() @@ -60,6 +75,7 @@ def squad(tokenizer) -> pl.LightningDataModule: parser.add_argument("--fp8-autocast", default=False, action='store_true') parser.add_argument('--wandb-project', type=str, default=None) parser.add_argument('--model-save-path', type=str, default=None) + parser.add_argument('--use-torch-jit', action='store_true') args = parser.parse_args() wandb = None @@ -87,6 +103,11 @@ def squad(tokenizer) -> pl.LightningDataModule: model = llm.HFAutoModelForCausalLM(model_name=args.model, model_accelerator=model_accelerator) tokenizer = model.tokenizer + callbacks = [] + if args.use_torch_jit: + jit_config = JitConfig(use_torch=True, torch_kwargs={'dynamic': False}, use_thunder=False) + callbacks = [JitTransform(jit_config)] + llm.api.finetune( model=model, data=squad(tokenizer), @@ -101,8 +122,8 @@ def squad(tokenizer) -> pl.LightningDataModule: accumulate_grad_batches=10, gradient_clip_val=grad_clip, use_distributed_sampler=use_dist_samp, - callbacks=[], logger=wandb, + callbacks=callbacks, ), optim=fdl.build(llm.adam.pytorch_adam_with_flat_lr(lr=1e-5)), log=None, @@ -116,3 +137,7 @@ def squad(tokenizer) -> pl.LightningDataModule: if args.model_save_path is not None: model.save_pretrained(args.model_save_path) + + +if __name__ == '__main__': + main() diff --git a/nemo/collections/llm/api.py b/nemo/collections/llm/api.py index 3e63bcea9447..7d7762edef3c 100644 --- a/nemo/collections/llm/api.py +++ b/nemo/collections/llm/api.py @@ -37,7 +37,7 @@ io, ) from nemo.lightning.base import NEMO_MODELS_CACHE -from nemo.lightning.pytorch.callbacks import PEFT, ModelTransform +from nemo.lightning.pytorch.callbacks import PEFT, JitTransform, ModelTransform from nemo.utils import logging from nemo.utils.get_rank import is_global_rank_zero @@ -875,7 +875,14 @@ def _setup( trainer.callbacks.append(model_transform) else: trainer.callbacks.append(ModelTransform()) - + # Move jit callback at the end ensure it's applied on top of any model transformations (peft) + jit_cb = None + for i, cb in enumerate(trainer.callbacks): + if isinstance(cb, JitTransform): + assert jit_cb is None + jit_cb = trainer.callbacks.pop(i) + if jit_cb is not None: + trainer.callbacks.append(jit_cb) return app_state diff --git a/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py b/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py index a51bbffdd6b6..2d8b32964767 100644 --- a/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py +++ b/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py @@ -26,24 +26,11 @@ def masked_cross_entropy(logits, targets, mask=None): if mask is not None: loss = F.cross_entropy(logits, targets, reduction='none') - return torch.mean(loss[mask == 1]) + return torch.mean(loss * mask.view(-1)) else: return F.cross_entropy(logits, targets) -def align_labels(logits, labels): - logits = logits.float() - n_cls = logits.shape[-1] - if logits.shape[-2] == labels.shape[-1]: - logits = logits[..., :-1, :].contiguous() - labels = labels[..., 1:].contiguous() - elif logits.shape[-2] == labels.shape[-1] + 1: - logits = logits[..., :-1, :].contiguous() - else: - raise ValueError("Mismatched labels and logits shapes (" + str(labels.shape) + " " + str(logits.shape)) - return logits.view(-1, n_cls), labels.view(-1) - - class HFAutoModelForCausalLM(pl.LightningModule, io.IOMixin, fn.FNMixin): def __init__( self, @@ -111,14 +98,21 @@ def training_step(self, batch): labels = batch.pop('labels').to(self.model.device) loss_mask = batch.pop('loss_mask', None) + # GPTSFTDataset emits `tokens` instead of `input_ids` + if not 'input_ids' in batch and 'tokens' in batch: + batch['input_ids'] = batch['tokens'] + batch = self._remove_extra_batch_keys(batch) + outputs = self.forward(batch) # Prepare for loss calculation - logits, labels = align_labels(outputs.logits.float(), labels) + logits = outputs.logits.float() + n_cls = logits.shape[-1] + logits, labels = logits.view(-1, n_cls), labels.view(-1) assert logits.shape[-2] == labels.shape[-1] loss = self.loss_fn(logits, labels, loss_mask) - self.log('train_log', loss, on_step=True, on_epoch=True, prog_bar=True) + self.log('train_loss', loss, on_step=True, on_epoch=True, prog_bar=True) return loss @torch.no_grad @@ -126,12 +120,20 @@ def validation_step(self, batch, batch_idx): labels = batch.pop('labels').to(self.model.device) loss_mask = batch.pop('loss_mask', None) + # GPTSFTDataset emits `tokens` instead of `input_ids` + if not 'input_ids' in batch and 'tokens' in batch: + batch['input_ids'] = batch['tokens'] + batch = self._remove_extra_batch_keys(batch) + outputs = self.forward(**batch) - logits, labels = align_labels(outputs.logits.float(), labels) + # Prepare for loss calculation + logits = outputs.logits.float() + n_cls = logits.shape[-1] + logits, labels = logits.view(-1, n_cls), labels.view(-1) assert logits.shape[-2] == labels.shape[-1] - loss = self.loss_fn(logits, labels, loss_mask) + loss = self.loss_fn(logits, labels, loss_mask) self.log('val_loss', loss, on_step=True, on_epoch=True, prog_bar=True) def save_pretrained(self, path): @@ -141,3 +143,18 @@ def save_pretrained(self, path): self._tokenizer.save_pretrained(path) else: logging.warning("A tokenizer wasn't created before to save.") + + def _remove_extra_batch_keys(self, batch, reserved_keys=['labels', 'loss_mask']): + """Remove extra keys from batch that are not kwargs in model's forward + + Args: + batch (dict): dictionary of tensors. + + Returns: + dict: dictionary of tensors; keys that are not in model's forward are removed. + """ + import inspect + + fwd_signature = inspect.signature(self.model.forward) + allowed_keys = list(fwd_signature.parameters.keys()) + reserved_keys + return {k: batch[k] for k in allowed_keys if k in batch} diff --git a/nemo/lightning/pytorch/callbacks/jit_transform.py b/nemo/lightning/pytorch/callbacks/jit_transform.py index cbfca8a25d88..33e76555f65d 100644 --- a/nemo/lightning/pytorch/callbacks/jit_transform.py +++ b/nemo/lightning/pytorch/callbacks/jit_transform.py @@ -22,6 +22,17 @@ def extract_module_attr_name(pl_module: "pl.LightningModule") -> str: + """Extracts the held nn.Module from a pl.LightningModule, will try "module", "model", or fail. + + Args: + pl_module (pl.LightningModule): the LightningModule used in training. + + Raises: + ValueError: if the pl_module has neither a .mdoel or .module + + Returns: + str: the attr-name of the nn.Module + """ if hasattr(pl_module, 'module'): return 'module' elif hasattr(pl_module, 'model'): @@ -31,12 +42,34 @@ def extract_module_attr_name(pl_module: "pl.LightningModule") -> str: def listify(x): + """Wraps input in a list, if not already a list. + + Args: + x (Anything): the input, can be anything. + + Returns: + Anything | list(Anything): Anything (if it's already a list) o/w list(Anything) + """ if not isinstance(x, list): return [x] return x def get_modules_from_selector(model, module_selector): + """Iterator over model's modules whose FQN match the module_selector. + + Args: + model (nn.Module): the model to iterate over. + module_selector (str): module selector, if empty or '*' will return the whole model. If + there's an asterisk in the name will match it as a regexp. + + Raises: + AttributeError: if the user provides an invalid selector. + AttributeError: if user's selector selects a non-nn.Module attribute. + + Yields: + Iterator(nn.Module): iterator over modules whose FQN matches module_selector + """ if module_selector is None or module_selector == '' or module_selector == '*': yield model return @@ -50,7 +83,7 @@ def get_modules_from_selector(model, module_selector): # handle wildcard selector # TODO(@akoumparouli): support more complex selectors e.g. net_b.*.net_c.*.conv for name, module in tmp.named_children(): - if re.match(item, name): + if re.match(item.replace('*', '.*'), name): yield module return @@ -65,6 +98,15 @@ def get_modules_from_selector(model, module_selector): def compile_module(config, module): + """Jit-compiles an nn.Module + + Args: + config (JitConfig): jit config + module (nn.Module): the module to be compiled + + Returns: + nn.Module: the (potentially) compiled module + """ if config.use_torch: module.compile(**config.torch_kwargs) return True @@ -88,12 +130,26 @@ def compile_module(config, module): @dataclass class JitConfig: + """Config POD for Jit transforms (e.g. torch.compile or thunder) + Options: + - module_selector (str): reg-exp to match modules to apply JitTransform to, useful for multi-trunk + models where you want to apply it on one of them only. If empty will apply transform to root + module. + - use_torch (bool): whether to use torch.compile or not. + - torch_kwargs (dict): kwargs to pass to torch.compile. + - use_thunder (bool): whether to use thunder or not. + - profile_thunder (bool): toggle for thunder's profiler. + """ + module_selector: str = '' use_torch: bool = False torch_kwargs: dict = field(default_factory=dict) use_thunder: bool = False profile_thunder: bool = False + def __post_init__(self): + assert not (self.use_torch and self.use_thunder), "use_torch cannot be used at the same time with use_thunder" + class JitTransform(Callback, IOMixin): """ @@ -112,7 +168,15 @@ def __init__(self, config: JitConfig): self.config = config assert not (self.config.use_torch and self.config.use_thunder) - def on_train_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: + def on_train_epoch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: + """Jit-compiles the model at the start of the epoch. + While other events such as on_train_start are more suitable, we use on_train_epoch_start + since that is what is used in peft (we want to jit after adding the adapters). + + Args: + trainer (pl.Trainer): PTL trainer + pl_module (pl.LightningModule): PTL module + """ if self.config is None: return if not self.config.use_thunder and not self.config.use_torch: From 44689b13446883e4391b5b3dbe236c8872c97e93 Mon Sep 17 00:00:00 2001 From: Alexandros Koumparoulis <153118171+akoumpa@users.noreply.github.com> Date: Tue, 17 Dec 2024 14:32:46 -0800 Subject: [PATCH 063/128] NeMo-UX: add Hf's AutoModelForImageTextToText (#11321) * init commit Signed-off-by: Alexandros Koumparoulis * fix Signed-off-by: Alexandros Koumparoulis * wip Signed-off-by: Alexandros Koumparoulis * fix Signed-off-by: Alexandros Koumparoulis * fix Signed-off-by: Alexandros Koumparoulis * fix Signed-off-by: Alexandros Koumparoulis * Apply isort and black reformatting Signed-off-by: akoumpa * fix Signed-off-by: Alexandros Koumparoulis * peft examp;le Signed-off-by: Alexandros Koumparoulis * fix Signed-off-by: Alexandros Koumparoulis * Apply isort and black reformatting Signed-off-by: akoumpa * move peft example to multimodal_llm Signed-off-by: Alexandros Koumparoulis * surface HFAutoModelForImageTextToText Signed-off-by: Alexandros Koumparoulis * add hf vlm dataset Signed-off-by: Alexandros Koumparoulis * move processor Signed-off-by: Alexandros Koumparoulis * train_log -> train_loss Signed-off-by: Alexandros Koumparoulis * vlm.HFDatasetDataModule pass collate_fn as argument Signed-off-by: Alexandros Koumparoulis * Update peft example Signed-off-by: Alexandros Koumparoulis * typo Signed-off-by: Alexandros Koumparoulis * remove unused var Signed-off-by: Alexandros Koumparoulis * Move example Signed-off-by: Alexandros Koumparoulis * Apply isort and black reformatting Signed-off-by: akoumpa * remove unused Signed-off-by: Alexandros Koumparoulis * Small change Signed-off-by: Alexandros Koumparoulis * Fix loss calculation Signed-off-by: Alexandros Koumparoulis * Add extract_skipped_token_ids Signed-off-by: Alexandros Koumparoulis * Use vlm.HFAutoModelForImageTextToText.extract_skipped_token_ids Signed-off-by: Alexandros Koumparoulis * add test Signed-off-by: Alexandros Koumparoulis * Update logits/labels handling Signed-off-by: Alexandros Koumparoulis * add trust_remote_code to configure_processor Signed-off-by: Alexandros Koumparoulis * Apply isort and black reformatting Signed-off-by: akoumpa * mini refactor Signed-off-by: Alexandros Koumparoulis * add LLAMA_TOKENS Signed-off-by: Alexandros Koumparoulis * update hf_dataset Signed-off-by: Alexandros Koumparoulis * Add lora_dtype for models with non-FP weights Signed-off-by: Alexandros Koumparoulis * Add load_in_4bit option Signed-off-by: Alexandros Koumparoulis * add default_dtype Signed-off-by: Alexandros Koumparoulis * add load_in_4bit to llm collection Signed-off-by: Alexandros Koumparoulis * rm import Signed-off-by: Alexandros Koumparoulis * fix asset path Signed-off-by: Alexandros Koumparoulis * move vlm test Signed-off-by: Alexandros Koumparoulis * move data offline Signed-off-by: Alexandros Koumparoulis * use signel gpu Signed-off-by: Alexandros Koumparoulis * pylint fix Signed-off-by: Alexandros Koumparoulis * pylint Signed-off-by: Alexandros Koumparoulis * pylint Signed-off-by: Alexandros Koumparoulis * drop align_labels Signed-off-by: Alexandros Koumparoulis * remove align_labels from llm too Signed-off-by: Alexandros Koumparoulis * use loss * mask instead of loss[mask == 1] Signed-off-by: Alexandros Koumparoulis * fix path Signed-off-by: Alexandros Koumparoulis * Apply isort and black reformatting Signed-off-by: akoumpa --------- Signed-off-by: Alexandros Koumparoulis Signed-off-by: akoumpa Signed-off-by: Alexandros Koumparoulis <153118171+akoumpa@users.noreply.github.com> Co-authored-by: akoumpa --- .github/workflows/cicd-main.yml | 11 + examples/llm/peft/hf.py | 6 +- examples/vlm/hf/peft.py | 127 ++++++++ .../gpt/model/hf_auto_model_for_causal_lm.py | 18 +- nemo/collections/llm/peft/lora.py | 21 +- nemo/collections/vlm/__init__.py | 4 + nemo/collections/vlm/hf/data/hf_dataset.py | 281 ++++++++++++++++++ .../hf_auto_model_for_image_text_to_text.py | 191 ++++++++++++ tests/collections/vlm/hf/peft.py | 128 ++++++++ 9 files changed, 777 insertions(+), 10 deletions(-) create mode 100644 examples/vlm/hf/peft.py create mode 100644 nemo/collections/vlm/hf/data/hf_dataset.py create mode 100644 nemo/collections/vlm/hf/model/hf_auto_model_for_image_text_to_text.py create mode 100644 tests/collections/vlm/hf/peft.py diff --git a/.github/workflows/cicd-main.yml b/.github/workflows/cicd-main.yml index 310d580e43f6..fce4ef2acfbd 100644 --- a/.github/workflows/cicd-main.yml +++ b/.github/workflows/cicd-main.yml @@ -3600,6 +3600,16 @@ jobs: inference.repetition_penalty=1.0 \ inference.outfile_path=/tmp/nlp_mcore_t5_lora_tuning_tp2/out.jsonl + L2_VLM_HF_Transformer_PEFT: + needs: [ cicd-test-container-setup ] + uses: ./.github/workflows/_test_template.yml + if: contains(fromJSON(needs.cicd-test-container-setup.outputs.test_to_run), 'L2_VLM_HF_Transformer_PEFT') || needs.cicd-test-container-setup.outputs.all == 'true' + with: + RUNNER: self-hosted-azure-gpus-1 + SCRIPT: | + TRANSFORMERS_OFFLINE=1 python tests/collections/vlm/hf/peft.py --model /home/TestData/vlm/qwen2-2b/ --max-steps 3 --disable-ckpt + AFTER_SCRIPT: | + rm -rf nemo_experiments L2_HF_Transformer_PEFT: needs: [ cicd-test-container-setup ] @@ -4863,6 +4873,7 @@ jobs: - L2_HF_Transformer_SFT - L2_HF_Transformer_SFT_nemorun - L2_HF_Transformer_SFT_2gpu + - L2_VLM_HF_Transformer_PEFT - L2_HF_Transformer_SFT_2gpu_nemorun - L2_HF_Transformer_SFT_TE_Acceleration - L2_NeMo_2_SSM_Pretraining diff --git a/examples/llm/peft/hf.py b/examples/llm/peft/hf.py index 45675398a421..3137a542ae01 100644 --- a/examples/llm/peft/hf.py +++ b/examples/llm/peft/hf.py @@ -40,7 +40,11 @@ def formatting_prompts_func(examples): output = output[0] text = alpaca_prompt.format(instruction, input, output) + EOS_TOKEN ans = tokenizer(text) - ans['labels'] = ans['input_ids'] + # 'input_ids' is a list, we want to remove EOS_TOKEN from input_ids and the first token from + # labels to align the two: + ans['labels'] = list(ans['input_ids'][1:]) + ans['input_ids'] = ans['input_ids'][:-1] + ans['attention_mask'] = ans['attention_mask'][:-1] return ans tokenizer = getattr(tokenizer, 'tokenizer', tokenizer) diff --git a/examples/vlm/hf/peft.py b/examples/vlm/hf/peft.py new file mode 100644 index 000000000000..d51984677a74 --- /dev/null +++ b/examples/vlm/hf/peft.py @@ -0,0 +1,127 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import fiddle as fdl +import torch +from lightning.pytorch.loggers import WandbLogger + +from nemo import lightning as nl +from nemo.collections import llm, vlm + + +def mk_hf_vlm_dataset(processor, mbs, gbs): + """Creates vlm dataset""" + skipped_tokens = vlm.HFAutoModelForImageTextToText.extract_skipped_token_ids(processor) + + def collate_fn(examples, processor): + def fmt(sample): + instruction = "Describe accurately the given image." + conversation = [ + { + "role": "user", + "content": [{"type": "text", "text": instruction}, {"type": "image", "image": sample["image"]}], + }, + {"role": "assistant", "content": [{"type": "text", "text": sample["text"]}]}, + ] + return {"conversation": conversation, "images": [sample['image']]} + + text = [] + images = [] + for example in map(fmt, examples): + text.append( + processor.apply_chat_template( + example["conversation"], + tokenize=False, + add_generation_prompt=False, + ) + ) + images += example['images'] + + # Tokenize the text and process the images + batch = processor( + text=text, + images=images, + padding=True, + return_tensors="pt", + ) + + assert batch["input_ids"].ndim == 2, 'Expected input_ids to be 2D' + batch["pixel_values"] = batch["pixel_values"].to(torch.bfloat16) + labels = batch["input_ids"].clone() + labels[torch.isin(labels, skipped_tokens)] = -100 + batch["labels"] = labels[:, 1:] + batch["input_ids"] = batch["input_ids"][:, :-1] + return batch + + return vlm.HFDatasetDataModule( + "quintend/rdr-items", + split="train", + micro_batch_size=mbs, + global_batch_size=gbs, + collate_fn=lambda x: collate_fn(x, processor=processor), + ) + + +if __name__ == '__main__': + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument('--model', default='Qwen/Qwen2-VL-2B-Instruct') + parser.add_argument('--strategy', type=str, default='auto', choices=['auto', 'ddp', 'fsdp']) + parser.add_argument('--devices', default=1) + parser.add_argument('--mbs', default=1) + parser.add_argument('--gbs', default=1) + parser.add_argument('--accelerator', default='gpu', choices=['gpu']) + parser.add_argument('--max-steps', type=int, default=100) + parser.add_argument('--wandb-project', type=str, default=None) + args = parser.parse_args() + + wandb = None + if args.wandb_project is not None: + model = '_'.join(args.model.split('/')[-2:]) + wandb = WandbLogger( + project=args.wandb_project, + name=f'{model}_dev{args.devices}_strat_{args.strategy}', + ) + grad_clip = 0.5 + if args.strategy == 'fsdp': + # See: + # https://github.com/Lightning-AI/pytorch-lightning/blob/8ad3e29816a63d8ce5c00ac104b14729a4176f4f/src/lightning/pytorch/plugins/precision/fsdp.py#L81 + grad_clip = None + use_dist_samp = False + processor = vlm.HFAutoModelForImageTextToText.configure_processor(args.model) + + llm.api.finetune( + model=vlm.HFAutoModelForImageTextToText(args.model), + data=mk_hf_vlm_dataset(processor, args.mbs, args.gbs), + trainer=nl.Trainer( + devices=args.devices, + max_steps=args.max_steps, + accelerator=args.accelerator, + strategy=args.strategy, + log_every_n_steps=1, + limit_val_batches=0.0, + num_sanity_val_steps=0, + accumulate_grad_batches=10, + gradient_clip_val=grad_clip, + use_distributed_sampler=use_dist_samp, + logger=wandb, + ), + optim=fdl.build(llm.adam.pytorch_adam_with_flat_lr(lr=1e-5)), + log=None, + peft=llm.peft.LoRA( + target_modules=['*_proj'], + dim=16, + ), + ) diff --git a/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py b/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py index 2d8b32964767..cea7264543ff 100644 --- a/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py +++ b/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py @@ -42,6 +42,7 @@ def __init__( model_accelerator=None, trust_remote_code=False, default_dtype=torch.bfloat16, + load_in_4bit=False, ): super().__init__() self.save_hyperparameters() @@ -55,6 +56,7 @@ def __init__( self.model_accelerator = model_accelerator self.trust_remote_code = trust_remote_code self.default_dtype = default_dtype + self.load_in_4bit = load_in_4bit @property def tokenizer(self): @@ -75,7 +77,10 @@ def configure_model(self): # create all your layers here if self.load_pretrained_weights: self.model = AutoModelForCausalLM.from_pretrained( - self.model_name, torch_dtype='auto', trust_remote_code=self.trust_remote_code + self.model_name, + torch_dtype='auto', + trust_remote_code=self.trust_remote_code, + load_in_4bit=self.load_in_4bit, ) else: from transformers import AutoConfig @@ -108,9 +113,10 @@ def training_step(self, batch): # Prepare for loss calculation logits = outputs.logits.float() n_cls = logits.shape[-1] - logits, labels = logits.view(-1, n_cls), labels.view(-1) - assert logits.shape[-2] == labels.shape[-1] + logits = logits.view(-1, n_cls) + labels = labels.view(-1) + assert logits.shape[-2] == labels.shape[-1], "Expected logits & labels to have the same length" loss = self.loss_fn(logits, labels, loss_mask) self.log('train_loss', loss, on_step=True, on_epoch=True, prog_bar=True) return loss @@ -127,12 +133,12 @@ def validation_step(self, batch, batch_idx): outputs = self.forward(**batch) - # Prepare for loss calculation logits = outputs.logits.float() n_cls = logits.shape[-1] - logits, labels = logits.view(-1, n_cls), labels.view(-1) - assert logits.shape[-2] == labels.shape[-1] + logits = logits.view(-1, n_cls) + labels = labels.view(-1) + assert logits.shape[-2] == labels.shape[-1], "Expected logits & labels to have the same length" loss = self.loss_fn(logits, labels, loss_mask) self.log('val_loss', loss, on_step=True, on_epoch=True, prog_bar=True) diff --git a/nemo/collections/llm/peft/lora.py b/nemo/collections/llm/peft/lora.py index 766b8993bf35..0ce6138d1c6b 100644 --- a/nemo/collections/llm/peft/lora.py +++ b/nemo/collections/llm/peft/lora.py @@ -45,7 +45,14 @@ class LinearAdapter(nn.Module): """ def __init__( - self, orig_linear, dim=8, alpha=32, dropout=0.1, dropout_position='post', lora_A_init_method='xavier' + self, + orig_linear, + dim=8, + alpha=32, + dropout=0.1, + dropout_position='post', + lora_A_init_method='xavier', + lora_dtype=None, ): super(LinearAdapter, self).__init__() assert isinstance(orig_linear, nn.Linear) @@ -62,7 +69,8 @@ def __init__( in_features = self.orig_linear.in_features out_features = self.orig_linear.out_features - dtype = self.orig_linear.weight.dtype + dtype = lora_dtype or self.orig_linear.weight.dtype + self.lora_a = nn.Parameter(torch.zeros((in_features, dim), dtype=dtype, device=device)) self.lora_b = nn.Parameter(torch.zeros((dim, out_features), dtype=dtype, device=device)) if lora_A_init_method == 'xavier': @@ -112,6 +120,7 @@ class LoRA(PEFT): dropout_position (Literal['pre', 'post'], optional): Position for applying dropout. Can be 'pre' (before the low-rank projection) or 'post' (after). Defaults to 'pre'. a2a_experimental (bool): Enables the experimental All-to-All (A2A) communication strategy. Defaults to False. + lora_drype (torch.dtype): Parameter data type for LoRA weights. Default None (will use model's dtype). Example: -------- @@ -140,6 +149,7 @@ class LoRA(PEFT): lora_A_init_method: str = "xavier" lora_B_init_method: str = "zero" a2a_experimental: bool = False + lora_dtype: torch.dtype = None def transform(self, m: nn.Module, name=None, prefix=None): """ @@ -159,7 +169,12 @@ def transform(self, m: nn.Module, name=None, prefix=None): if name in self.target_modules or any(wildcard_match(pattern, full_name) for pattern in self.target_modules): if isinstance(m, nn.Linear): return LinearAdapter( - m, dim=self.dim, alpha=self.alpha, dropout=self.dropout, lora_A_init_method=self.lora_A_init_method + m, + dim=self.dim, + alpha=self.alpha, + dropout=self.dropout, + lora_A_init_method=self.lora_A_init_method, + lora_dtype=self.lora_dtype, ) input_is_parallel, in_features, out_features = get_adapter_attributes_from_linear(m) diff --git a/nemo/collections/vlm/__init__.py b/nemo/collections/vlm/__init__.py index b5e693830fa5..3e9eebe47cbe 100644 --- a/nemo/collections/vlm/__init__.py +++ b/nemo/collections/vlm/__init__.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from nemo.collections.vlm.hf.data.hf_dataset import HFDatasetDataModule +from nemo.collections.vlm.hf.model.hf_auto_model_for_image_text_to_text import HFAutoModelForImageTextToText from nemo.collections.vlm.llava_next.data import LlavaNextMockDataModule, LlavaNextTaskEncoder from nemo.collections.vlm.llava_next.model.base import LlavaNextConfig from nemo.collections.vlm.llava_next.model.llava_next import LlavaNextConfig7B, LlavaNextConfig13B, LlavaNextModel @@ -51,6 +53,8 @@ from nemo.collections.vlm.recipes import * __all__ = [ + "HFDatasetDataModule", + "HFAutoModelForImageTextToText", "NevaMockDataModule", "NevaLazyDataModule", "MLlamaMockDataModule", diff --git a/nemo/collections/vlm/hf/data/hf_dataset.py b/nemo/collections/vlm/hf/data/hf_dataset.py new file mode 100644 index 000000000000..a73e6d3e3504 --- /dev/null +++ b/nemo/collections/vlm/hf/data/hf_dataset.py @@ -0,0 +1,281 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import lightning.pytorch as pl +import torch +from datasets import Dataset, DatasetDict, load_dataset +from torch.utils.data import DataLoader + +from nemo.lightning.pytorch.plugins import MegatronDataSampler +from nemo.utils import logging + + +def clean_split(name): + """name="train[:100]" returns "train" """ + if '[' in name: + return name.split('[')[0] + return name + + +def make_dataset_splits(dataset, split, split_aliases): + """ + Given a dataset (e.g. from datasets.load_dataset or datasets.Dataset.from_dict) it + returns a dictionary containing the corresponding dataset splits. + + For example: + + $ ds = load_dataset("dataset-id") + $ ans = make_dataset_splits(ds) + + # `ds` contains the following + $ print(ds) + > DatasetDict({ + > train: Dataset({ + > features: ['id', 'title', 'context', 'question', 'answers'], + > num_rows: 87599 + > }) + > validation: Dataset({ + > features: ['id', 'title', 'context', 'question', 'answers'], + > num_rows: 10570 + > }) + > }) + + # In this case the value of `ans` (returned value) will be: + $ print(ans) + > { + > "train": Dataset .. (with 87599 rows), + > "val": Dataset .. (with 10570 rows), + > } + """ + valid_split_names = ['train', 'test', 'val'] + dataset_splits = {_split: None for _split in valid_split_names} + + alias_to_split = {} + for split_name, _split_aliases in split_aliases.items(): + assert split_name in valid_split_names + for alias in _split_aliases: + alias_to_split[alias] = split_name + + if isinstance(dataset, Dataset): + assert isinstance(split, str), "Expected split to be a string, but got " + str(type(split)) + split = clean_split(split) + dataset_splits[split] = dataset + elif isinstance(dataset, DatasetDict): + dataset_split_names = dataset.keys() + logging.info(f"HF dataset has the following splits: {dataset_split_names}") + for alias_split_name, split in dataset.items(): + split_name = alias_to_split[alias_split_name] + assert dataset_splits[split_name] is None + dataset_splits[split_name] = split + elif isinstance(split, list): + logging.info(f"Loaded HF dataset will use " + str(split) + " splits.") + assert isinstance(dataset, list) + for i, alias_split_name in enumerate(map(clean_split, split)): + split_name = alias_to_split[alias_split_name] + assert dataset_splits[split_name] is None + dataset_splits[split_name] = dataset[i] + elif isinstance(split, str): + logging.info(f"Loaded HF dataset has a single split.") + assert not isinstance(dataset, list) + alias_split_name = split + if '+' in alias_split_name: + raise ValueError("Split concatenation not supported") + elif '[' in alias_split_name: + alias_split_name = alias_split_name.split('[')[0] + split_name = alias_to_split[alias_split_name] + assert dataset_splits[split_name] is None + dataset_splits[split_name] = dataset + else: + raise ValueError("Expected split name to be None, str or a list") + + assert set(valid_split_names) == set(dataset_splits.keys()), dataset_splits.keys() + num_init_splits = sum(map(lambda x: x is not None, dataset_splits.values())) + assert num_init_splits > 0, f"Expected at least one split to have been initialized {num_init_splits}" + return dataset_splits + + +class HFDatasetDataModule(pl.LightningDataModule): + """HFDatasetDataModule wraps HF's load_dataset (datasets library) + so that it can be used within NeMo. + Users can select whether to use an mcore-sampler via use_mcore_sampler arg. + + Usage examples: + + - loading a single split (train) from a dataset + llm.HFDatasetDataModule("rajpurkar/squad", split="train") + + - loading multiple splits (train, validation) from a dataset + llm.HFDatasetDataModule("rajpurkar/squad", split=["train", "validation"]) + """ + + def __init__( + self, + path_or_dataset, + split=None, + collate_fn=None, + num_workers=2, + pin_memory=True, + persistent_workers=True, + seq_length=1024, + micro_batch_size=2, + global_batch_size=2, + pad_token_id=0, + use_mcore_sampler=False, + mcore_dataloader_type='cyclic', + train_aliases=["train", "training"], + test_aliases=["test", "testing"], + val_aliases=["val", "validation", "valid", "eval"], + **kwargs, + ) -> None: + super().__init__() + assert pad_token_id is not None + # A dataset usually will have several splits (e.g. train, val, test, etc). + # We map synonym names to canonical names (train, test, val). + # A synonym can be a prefix/suffixed word e.g. train <> training. + split_aliases = {'train': train_aliases, 'test': test_aliases, 'val': val_aliases} + + # self.dataset_splits will hold the actual dataset for each split. + if isinstance(path_or_dataset, str): + logging.info(f"Loading HF dataset from {path_or_dataset}") + dataset = load_dataset(path_or_dataset, split=split, **kwargs) + elif isinstance(path_or_dataset, Dataset) or isinstance(path_or_dataset, DatasetDict): + logging.info(f"Using passed HF dataset {str(path_or_dataset)}") + dataset = path_or_dataset + else: + raise ValueError( + "Expected `path_or_dataset` to be str, Dataset, DatasetDict, but got " + str(type(path_or_dataset)) + ) + + self.dataset_splits = make_dataset_splits(dataset, split, split_aliases) + + if collate_fn is None: + self._collate_fn = lambda x: HFDatasetDataModule.collate_fn(x, pad_token_id=self.pad_token_id) + else: + self._collate_fn = collate_fn + + self.num_workers = num_workers + self.pin_memory = pin_memory + self.persistent_workers = persistent_workers + self.seq_length = seq_length + self.micro_batch_size = micro_batch_size + self.global_batch_size = global_batch_size + self.pad_token_id = pad_token_id + + self.use_mcore_sampler = use_mcore_sampler + self.mcore_dataloader_type = mcore_dataloader_type + + @staticmethod + def from_dict(dataset_dict, split, **kwargs): + """Creates a Dataset from a dictionary""" + dataset = Dataset.from_dict(dataset_dict) + return HFDatasetDataModule(path_or_dataset=dataset, split=split, **kwargs) + + @staticmethod + def collate_fn(batch, pad_token_id=0): + """Collate for VLM data""" + + def batchify(tensor): + if tensor.ndim == 1: + return tensor.unsqueeze_(0) + return tensor + + def extract_key_from_dicts(batch, key): + return list(map(lambda x: x[key], batch)) + + def pad_within_micro(batch, pad_token_id): + max_len = max(map(len, batch)) + return [item + [pad_token_id] * (max_len - len(item)) for item in batch] + + return { + key: batchify( + torch.LongTensor( + pad_within_micro( + extract_key_from_dicts(batch, key), + pad_token_id, + ) + ) + ) + for key in batch[0].keys() + } + + def setup(self, stage: str): + """PTL hook""" + if not self.use_mcore_sampler: + return + self.data_sampler = MegatronDataSampler( + seq_len=self.seq_length, + micro_batch_size=self.micro_batch_size, + global_batch_size=self.global_batch_size, + dataloader_type=self.mcore_dataloader_type, + ) + + def _make_dataloader(self, dataset, collate_fn=None): + """Creates a dataloader""" + assert dataset is not None + + if collate_fn is None: + + def collate_fn(x): + return HFDatasetDataModule.collate_fn(x, pad_token_id=self.pad_token_id) + + return DataLoader( + dataset, + num_workers=self.num_workers, + pin_memory=self.pin_memory, + persistent_workers=self.persistent_workers, + collate_fn=collate_fn, + batch_size=self.micro_batch_size, + ) + + @property + def train(self): + """Train data split""" + return self.dataset_splits['train'] + + @property + def val(self): + """Validation data split""" + return self.dataset_splits['val'] + + @property + def test(self): + """Testing data split""" + return self.dataset_splits['test'] + + def train_dataloader(self): + """Creates a dataloader for the train split""" + return self._make_dataloader(self.train, self._collate_fn) + + def val_dataloader(self): + """Creates a dataloader for the validation split""" + return self._make_dataloader(self.val, self._collate_fn) + + def test_dataloader(self): + """Creates a dataloader for the test split""" + return self._make_dataloader(self.test, self._collate_fn) + + def map(self, function=None, split_names=None, **kwargs): + """Maps a function to all/selected splits + Additional arguments can be passed down to dataset's map via kwargs""" + if isinstance(split_names, str): + dataset_splits = {split_names: self.dataset_splits[split_names]} + elif isinstance(split_names, list): + dataset_splits = {k: self.dataset_splits[k] for k in split_names} + else: + dataset_splits = self.dataset_splits + + for split_name, subset in dataset_splits.items(): + if subset is None: + continue + dataset_splits[split_name] = subset.map(function, **kwargs) diff --git a/nemo/collections/vlm/hf/model/hf_auto_model_for_image_text_to_text.py b/nemo/collections/vlm/hf/model/hf_auto_model_for_image_text_to_text.py new file mode 100644 index 000000000000..33ad04970d35 --- /dev/null +++ b/nemo/collections/vlm/hf/model/hf_auto_model_for_image_text_to_text.py @@ -0,0 +1,191 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import lightning.pytorch as pl +import torch +import torch.nn.functional as F +from transformers import AutoConfig, AutoModelForImageTextToText, AutoProcessor + +from nemo.collections.llm import fn +from nemo.lightning import io +from nemo.utils import logging + + +def masked_cross_entropy(logits, targets, mask=None): + """Cross entropy with optional mask""" + if mask is not None: + loss = F.cross_entropy(logits, targets, reduction='none') + return torch.mean(loss * mask) + else: + return F.cross_entropy(logits, targets) + + +class HFAutoModelForImageTextToText(pl.LightningModule, io.IOMixin, fn.FNMixin): + """Wrap's HF's AutoModelForImageTextToText in a pl.LightningModule + for use within NeMo""" + + def __init__( + self, + model_name='gpt2', + load_pretrained_weights=True, + processor=None, + loss_fn=masked_cross_entropy, + model_transform=None, + trust_remote_code=False, + default_dtype=torch.bfloat16, + load_in_4bit=False, + ): + super().__init__() + self.save_hyperparameters() + self.model_name = model_name + self._processor = processor + self.tokenizer = None + self.model = None + self.loss_fn = loss_fn + self.load_pretrained_weights = load_pretrained_weights + self.is_hf_model = True + self.model_transform = model_transform + self.trust_remote_code = trust_remote_code + self.load_in_4bit = load_in_4bit + + @property + def processor(self): + """Return's module processor""" + if self._processor is None: + self._processor = HFAutoModelForImageTextToText.configure_processor( + self.model_name, trust_remote_code=self.trust_remote_code + ) + return self._processor + + @processor.setter + def processor(self, value): + """Set's module's processor""" + assert self._processor is None + self._processor = value + + @staticmethod + def configure_processor(model_name, trust_remote_code=False): + """Initializes an AutoProcessor and returns the instance""" + return AutoProcessor.from_pretrained(model_name, trust_remote_code=trust_remote_code) + + def configure_model(self): + """Instantiates the model""" + # create all your layers here + if self.load_pretrained_weights: + self.model = AutoModelForImageTextToText.from_pretrained( + self.model_name, + torch_dtype='auto', + trust_remote_code=self.trust_remote_code, + load_in_4bit=self.load_in_4bit, + ) + else: + config = AutoConfig.from_pretrained(self.model_name, trust_remote_code=self.trust_remote_code) + dtype = getattr(config, 'torch_dtype', self.default_dtype) + self.model = AutoModelForImageTextToText.from_config( + config, torch_dtype=dtype, trust_remote_code=self.trust_remote_code + ) + self.model.train() + + def forward(self, batch): + """Runs forward with the model""" + return self.model(**batch) + + def training_step(self, batch): + """Run one training step""" + labels = batch.pop('labels').to(self.model.device) + loss_mask = batch.pop('loss_mask', None) + + outputs = self.forward(batch) + + logits = outputs.logits.float() + n_cls = logits.shape[-1] + logits = logits.view(-1, n_cls) + labels = labels.view(-1) + + assert logits.shape[-2] == labels.shape[-1], "Expected logits & labels to have the same length" + loss = self.loss_fn(logits, labels, loss_mask) + self.log('train_loss', loss, on_step=True, on_epoch=True, prog_bar=True) + return loss + + @torch.no_grad + def validation_step(self, batch, batch_idx): + """Run one validation step""" + labels = batch.pop('labels').to(self.model.device) + loss_mask = batch.pop('loss_mask', None) + + outputs = self.forward(**batch) + + logits = outputs.logits.float() + n_cls = logits.shape[-1] + logits = logits.view(-1, n_cls) + labels = labels.view(-1) + + assert logits.shape[-2] == labels.shape[-1], "Expected logits & labels to have the same length" + loss = self.loss_fn(logits, labels, loss_mask) + + self.log('val_loss', loss, on_step=True, on_epoch=True, prog_bar=True) + + def save_pretrained(self, path): + """Saves checkpoint using HF""" + assert self.model is not None, "Model has to be created first." + self.model.save_pretrained(path) + if self._processor is not None: + self._processor.save_pretrained(path) + else: + logging.warning("A processor wasn't created before to save.") + + @staticmethod + def extract_skipped_token_ids(tokenizer): + """Returns list of tokens to mask in labels""" + # qweb2-2b + QWEN_TOKENS = [ + '<|im_start|>', + '<|im_end|>', + '<|vision_start|>', + '<|vision_end|>', + '<|vision_pad|>', + '<|image_pad|>', + '<|video_pad|>', + '<|im_start|>', + '<|im_end|>', + '<|vision_start|>', + '<|vision_end|>', + '<|vision_pad|>', + '<|image_pad|>', + '<|video_pad|>', + ] + # llava-1.5-7b-hf, llava-v1.6-mistral-7b-hf + LLAVA_TOKENS = [ + "", + "", + ] + LLAMA_TOKENS = [ + '<|begin_of_text|>', + '<|end_of_text|>', + '<|finetune_right_pad_id|>', + '<|step_id|>', + '<|start_header_id|>', + '<|end_header_id|>', + '<|eom_id|>', + '<|eot_id|>', + '<|python_tag|>', + '<|image|>', + ] + PAD_TOKENS = set(QWEN_TOKENS + LLAVA_TOKENS + LLAMA_TOKENS) + tokenizer = getattr(tokenizer, 'tokenizer', tokenizer) + skipped_token_ids = [] + for key, val in tokenizer.added_tokens_decoder.items(): + if str(val) in PAD_TOKENS: + skipped_token_ids.append(key) + return torch.IntTensor(list(set(skipped_token_ids))) diff --git a/tests/collections/vlm/hf/peft.py b/tests/collections/vlm/hf/peft.py new file mode 100644 index 000000000000..109bccfcfa1f --- /dev/null +++ b/tests/collections/vlm/hf/peft.py @@ -0,0 +1,128 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import fiddle as fdl +import torch +from lightning.pytorch.loggers import WandbLogger + +from nemo import lightning as nl +from nemo.collections import llm, vlm + +DATA_PATH = "/home/TestData/vlm/rdr-items" + + +def mk_hf_vlm_dataset(processor, mbs, gbs): + skipped_tokens = vlm.HFAutoModelForImageTextToText.extract_skipped_token_ids(processor) + + def collate_fn(examples, processor): + def fmt(sample): + instruction = "Describe accurately the given image." + conversation = [ + { + "role": "user", + "content": [{"type": "text", "text": instruction}, {"type": "image", "image": sample["image"]}], + }, + {"role": "assistant", "content": [{"type": "text", "text": sample["text"]}]}, + ] + return {"conversation": conversation, "images": [sample['image']]} + + text = [] + images = [] + for example in map(fmt, examples): + text.append( + processor.apply_chat_template( + example["conversation"], + tokenize=False, + add_generation_prompt=False, + ) + ) + images += example['images'] + + # Tokenize the text and process the images + batch = processor( + text=text, + images=images, + padding=True, + return_tensors="pt", + ) + + batch["pixel_values"] = batch["pixel_values"].to(torch.bfloat16) + + labels = batch["input_ids"].clone() + labels[torch.isin(labels, skipped_tokens)] = -100 + batch["labels"] = labels + return batch + + return vlm.HFDatasetDataModule( + DATA_PATH, + split="train[:10]", + micro_batch_size=mbs, + global_batch_size=gbs, + collate_fn=lambda x: collate_fn(x, processor=processor), + ) + + +if __name__ == '__main__': + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument('--model', default='Qwen/Qwen2-VL-2B-Instruct') + parser.add_argument('--strategy', type=str, default='auto', choices=['auto', 'ddp', 'fsdp']) + parser.add_argument('--devices', default=1) + parser.add_argument('--mbs', default=1) + parser.add_argument('--gbs', default=1) + parser.add_argument('--accelerator', default='gpu', choices=['gpu']) + parser.add_argument('--max-steps', type=int, default=100) + parser.add_argument('--wandb-project', type=str, default=None) + parser.add_argument('--disable-ckpt', action='store_false') + args = parser.parse_args() + + wandb = None + if args.wandb_project is not None: + model = '_'.join(args.model.split('/')[-2:]) + wandb = WandbLogger( + project=args.wandb_project, + name=f'{model}_dev{args.devices}_strat_{args.strategy}', + ) + grad_clip = 0.5 + if args.strategy == 'fsdp': + # See: https://github.com/Lightning-AI/pytorch-lightning/blob/8ad3e29816a63d8ce5c00ac104b14729a4176f4f/src/lightning/pytorch/plugins/precision/fsdp.py#L81 + grad_clip = None + use_dist_samp = False + processor = vlm.HFAutoModelForImageTextToText.configure_processor(args.model) + + llm.api.finetune( + model=vlm.HFAutoModelForImageTextToText(args.model), + data=mk_hf_vlm_dataset(processor, args.mbs, args.gbs), + trainer=nl.Trainer( + devices=args.devices, + max_steps=args.max_steps, + accelerator=args.accelerator, + strategy=args.strategy, + log_every_n_steps=1, + limit_val_batches=0.0, + num_sanity_val_steps=0, + accumulate_grad_batches=10, + gradient_clip_val=grad_clip, + use_distributed_sampler=use_dist_samp, + logger=wandb, + enable_checkpointing=args.disable_ckpt, + ), + optim=fdl.build(llm.adam.pytorch_adam_with_flat_lr(lr=1e-5)), + log=None, + peft=llm.peft.LoRA( + target_modules=['*_proj'], + dim=16, + ), + ) From ff568cd1f7bbfa31d0dd562d97611aba6542c442 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?oliver=20k=C3=B6nig?= Date: Tue, 17 Dec 2024 23:47:18 +0100 Subject: [PATCH 064/128] ci: Bump release workflow (#11635) Signed-off-by: Oliver Koenig --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index dcaac34901cd..2ddad31e159e 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -28,7 +28,7 @@ on: jobs: release: - uses: NVIDIA/NeMo-FW-CI-templates/.github/workflows/_release_library.yml@v0.15.1 + uses: NVIDIA/NeMo-FW-CI-templates/.github/workflows/_release_library.yml@v0.17.3 with: release-ref: ${{ inputs.release-ref }} image-name: nemo_container From b9457dbe9ef92352671cd9aa58dddc105e2ab607 Mon Sep 17 00:00:00 2001 From: Somshubra Majumdar Date: Tue, 17 Dec 2024 17:43:34 -0800 Subject: [PATCH 065/128] Add fix docstring for speech commands (#11638) Signed-off-by: smajumdar --- tutorials/asr/Speech_Commands.ipynb | 1 + 1 file changed, 1 insertion(+) diff --git a/tutorials/asr/Speech_Commands.ipynb b/tutorials/asr/Speech_Commands.ipynb index c8a54e5135b2..927c0a15b76c 100644 --- a/tutorials/asr/Speech_Commands.ipynb +++ b/tutorials/asr/Speech_Commands.ipynb @@ -65,6 +65,7 @@ "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]\n", "\n", "## Install TorchAudio\n", + "## NOTE: TorchAudio installation may not work in all environments, please use Google Colab for best experience\n", "!pip install torchaudio>=0.13.0 -f https://download.pytorch.org/whl/torch_stable.html\n", "\n", "## Grab the config we'll use in this example\n", From 53b8eb4224e5b87d3bda1651d192ff402c561ad6 Mon Sep 17 00:00:00 2001 From: Weiqing Wang <164252040+weiqingw4ng@users.noreply.github.com> Date: Tue, 17 Dec 2024 23:42:04 -0800 Subject: [PATCH 066/128] Fixing Multi_Task_Adapters.ipynb by replacing canary2 with canary_custom (#11641) Signed-off-by: Weiqing Wang --- tutorials/asr/asr_adapters/Multi_Task_Adapters.ipynb | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tutorials/asr/asr_adapters/Multi_Task_Adapters.ipynb b/tutorials/asr/asr_adapters/Multi_Task_Adapters.ipynb index 0d35feb11a9a..978793ae4a06 100644 --- a/tutorials/asr/asr_adapters/Multi_Task_Adapters.ipynb +++ b/tutorials/asr/asr_adapters/Multi_Task_Adapters.ipynb @@ -433,7 +433,7 @@ "outputs": [], "source": [ "@registered_prompt_format_fn\n", - "def canary2(cuts, tokenizer, inference: bool):\n", + "def canary_custom(cuts, tokenizer, inference: bool):\n", " \"\"\" Users can implement this as needed \"\"\"\n", " raise NotImplementedError()\n", "\n", @@ -449,7 +449,7 @@ }, "outputs": [], "source": [ - "temp = get_prompt_format_fn('canary2')\n", + "temp = get_prompt_format_fn('canary_custom')\n", "temp.__name__" ] }, @@ -549,7 +549,7 @@ "class CanaryPromptFormatterV2(model.prompt.__class__):\n", "\n", " # make sure to provide a new name\n", - " NAME: str = \"canary2\"\n", + " NAME: str = \"canary_custom\"\n", "\n", " # Make any changes as necessary.\n", " # For this demonstration, we will not change anything other than the name" @@ -565,7 +565,7 @@ "outputs": [], "source": [ "# Next, lets update the model's prompt formatter\n", - "model.change_prompt(\"canary2\")" + "model.change_prompt(\"canary_custom\")" ] }, { @@ -577,9 +577,9 @@ "source": [ "---\n", "\n", - "We have now successfully changed the prompt format to `canary2`.\n", + "We have now successfully changed the prompt format to `canary_custom`.\n", "\n", - "**Note**: It is important to know that when changing the prompt format, the name of the new prompt format class (`canary2` in this case) **has to match** the name of the prompt function registered with `@registered_prompt_format_fn`!" + "**Note**: It is important to know that when changing the prompt format, the name of the new prompt format class (`canary_custom` in this case) **has to match** the name of the prompt function registered with `@registered_prompt_format_fn`!" ] }, { From 88291068c7e3d36932a982c76eb0be48310b0906 Mon Sep 17 00:00:00 2001 From: nasretdinovr Date: Wed, 18 Dec 2024 17:52:15 +0400 Subject: [PATCH 067/128] fixed config name in online augmentation tutorial (#11628) Signed-off-by: Rauf --- .../Speech_Enhancement_with_Online_Augmentation.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tutorials/audio/speech_enhancement/Speech_Enhancement_with_Online_Augmentation.ipynb b/tutorials/audio/speech_enhancement/Speech_Enhancement_with_Online_Augmentation.ipynb index e8b734537a41..41936e79675a 100644 --- a/tutorials/audio/speech_enhancement/Speech_Enhancement_with_Online_Augmentation.ipynb +++ b/tutorials/audio/speech_enhancement/Speech_Enhancement_with_Online_Augmentation.ipynb @@ -540,10 +540,10 @@ "config_dir = root_dir / 'conf'\n", "config_dir.mkdir(exist_ok=True)\n", "\n", - "config_path = config_dir / 'masking_online_aug.yaml'\n", + "config_path = config_dir / 'masking_with_online_augmentation.yaml'\n", "\n", "if not config_path.is_file():\n", - " !wget https://raw.githubusercontent.com/{GIT_USER}/NeMo/{BRANCH}/examples/audio/conf/masking_online_aug.yaml -P {config_dir.as_posix()}\n", + " !wget https://raw.githubusercontent.com/{GIT_USER}/NeMo/{BRANCH}/examples/audio/conf/masking_with_online_augmentation.yaml -P {config_dir.as_posix()}\n", "\n", "config = OmegaConf.load(config_path)\n", "config = OmegaConf.to_container(config, resolve=True)\n", From faa04edcd3d632f7a5c29f6855d537c601a1aa06 Mon Sep 17 00:00:00 2001 From: Ao Tang Date: Wed, 18 Dec 2024 10:47:27 -0500 Subject: [PATCH 068/128] fix default nodes (#11632) --- nemo/collections/llm/recipes/gemma2_27b.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nemo/collections/llm/recipes/gemma2_27b.py b/nemo/collections/llm/recipes/gemma2_27b.py index d6b41c0a221c..c03ae6c21aa3 100644 --- a/nemo/collections/llm/recipes/gemma2_27b.py +++ b/nemo/collections/llm/recipes/gemma2_27b.py @@ -62,7 +62,7 @@ def pretrain_recipe( virtual_pipeline_parallelism: Optional[int] = None, context_parallelism: int = 1, sequence_parallelism: bool = False, - num_nodes: int = 1, + num_nodes: int = 2, num_gpus_per_node: int = 8, max_steps: int = 1168251, precision: str = "bf16-mixed", From 276c0750ccf85f09dc7cd89f848f03a921355f9b Mon Sep 17 00:00:00 2001 From: Dmytro Pykhtar <37850217+dimapihtar@users.noreply.github.com> Date: Wed, 18 Dec 2024 19:44:08 +0200 Subject: [PATCH 069/128] add renormalize_blend_weights param (#11647) Signed-off-by: dimapihtar --- examples/nlp/language_modeling/conf/megatron_gpt_config.yaml | 1 + .../nlp/models/language_modeling/megatron_gpt_model.py | 1 + 2 files changed, 2 insertions(+) diff --git a/examples/nlp/language_modeling/conf/megatron_gpt_config.yaml b/examples/nlp/language_modeling/conf/megatron_gpt_config.yaml index da160390b431..e70f3ca418c4 100755 --- a/examples/nlp/language_modeling/conf/megatron_gpt_config.yaml +++ b/examples/nlp/language_modeling/conf/megatron_gpt_config.yaml @@ -293,6 +293,7 @@ model: shuffle_documents: True # Set to False to disable documents shuffling. Sample index will still be shuffled exchange_indices_distributed: False # Set to True to exchange indices via torch.distributed instead of filesystem data_cache_generation_only: False # Set to True to generate only the data cache and stop the training script + renormalize_blend_weights: False # Renormalize the blend weights to account for mid-level dataset oversampling done to ensure fulfillmenet of the of the requested number of samples. # Nsys profiling options nsys_profile: diff --git a/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py b/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py index a8ed1ee7d28f..c2c3431070a6 100644 --- a/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py +++ b/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py @@ -1660,6 +1660,7 @@ def build_train_valid_test_datasets(self): "mmap_bin_files": self.cfg.data.get("mmap_bin_files", True), "drop_last_partial_validation_sequence": self.cfg.data.get("validation_drop_last", True), "num_dataset_builder_threads": self.cfg.data.get("num_dataset_builder_threads", 1), + "renormalize_blend_weights": self.cfg.data.get("renormalize_blend_weights", False), "add_extra_token_to_sequence": add_extra_token, } From 368ed62474993fe3aa8f806f679182dc303fdc5c Mon Sep 17 00:00:00 2001 From: Taejin Park Date: Wed, 18 Dec 2024 09:46:48 -0800 Subject: [PATCH 070/128] Sortformer Diarizer 4spk v1 model PR Part 3: Speaker Diarization Mixin (#11511) * Adding diarization mixin for one click inference Signed-off-by: taejinp * Apply isort and black reformatting Signed-off-by: tango4j * Resolving CodeQL and Pylint Signed-off-by: taejinp * Resolving CodeQL and Pylint - unsaved files resolved Signed-off-by: taejinp * Apply isort and black reformatting Signed-off-by: tango4j * Unused package manifest_utils Signed-off-by: taejinp * Resolved diarization mixin test issues Signed-off-by: taejinp * Apply isort and black reformatting Signed-off-by: tango4j * Removed commented lines Signed-off-by: taejinp * updating mixins code Signed-off-by: ipmedenn <65592416+ipmedenn@users.noreply.github.com> * Apply isort and black reformatting Signed-off-by: ipmedenn * fixing test_diarizartion.py Signed-off-by: ipmedenn <65592416+ipmedenn@users.noreply.github.com> * moving diarization postprocessing-related stuff to vad_utils.py Signed-off-by: ipmedenn <65592416+ipmedenn@users.noreply.github.com> * Apply isort and black reformatting Signed-off-by: ipmedenn * Resolving PyLint Signed-off-by: ipmedenn <65592416+ipmedenn@users.noreply.github.com> * Apply isort and black reformatting Signed-off-by: ipmedenn * fixing batch_idx issue in sortformer_diar_models.py Signed-off-by: ipmedenn <65592416+ipmedenn@users.noreply.github.com> * adding sync_dist=True for sortformer validation Signed-off-by: ipmedenn <65592416+ipmedenn@users.noreply.github.com> * Apply isort and black reformatting Signed-off-by: tango4j * Reflecting the comments from PR Signed-off-by: taejinp * Reflecting the comments from PR 2nd Signed-off-by: taejinp * Apply isort and black reformatting Signed-off-by: tango4j * Resolved a codeQL unused variable Signed-off-by: taejinp * Now moved existance check after Signed-off-by: taejinp * Apply isort and black reformatting Signed-off-by: tango4j --------- Signed-off-by: taejinp Signed-off-by: tango4j Signed-off-by: ipmedenn <65592416+ipmedenn@users.noreply.github.com> Signed-off-by: ipmedenn Co-authored-by: tango4j Co-authored-by: ipmedenn <65592416+ipmedenn@users.noreply.github.com> Co-authored-by: ipmedenn --- .../neural_diarizer/e2e_diarize_speech.py | 105 +--- .../asr/data/audio_to_diar_label.py | 5 + .../asr/models/sortformer_diar_models.py | 172 +++++- .../asr/parts/mixins/diarization.py | 493 ++++++++++++++++++ nemo/collections/asr/parts/utils/vad_utils.py | 108 ++++ .../common/parts/preprocessing/collections.py | 61 ++- .../speaker_tasks/mixins/test_diarization.py | 271 ++++++++++ 7 files changed, 1108 insertions(+), 107 deletions(-) create mode 100644 nemo/collections/asr/parts/mixins/diarization.py create mode 100644 tests/collections/speaker_tasks/mixins/test_diarization.py diff --git a/examples/speaker_tasks/diarization/neural_diarizer/e2e_diarize_speech.py b/examples/speaker_tasks/diarization/neural_diarizer/e2e_diarize_speech.py index 147d7a3aa002..60600b59db59 100644 --- a/examples/speaker_tasks/diarization/neural_diarizer/e2e_diarize_speech.py +++ b/examples/speaker_tasks/diarization/neural_diarizer/e2e_diarize_speech.py @@ -24,7 +24,7 @@ Usage for diarization inference: -The end-to-end speaker diarization model can be specified by either "model_path" or "pretrained_name". +The end-to-end speaker diarization model can be specified by "model_path". Data for diarization is fed through the "dataset_manifest". By default, post-processing is bypassed, and only binarization is performed. If you want to reproduce DER scores reported on NeMo model cards, you need to apply post-processing steps. @@ -45,45 +45,32 @@ import lightning.pytorch as pl import optuna import torch -import yaml from omegaconf import OmegaConf from pytorch_lightning import seed_everything -from tqdm import tqdm from nemo.collections.asr.metrics.der import score_labels from nemo.collections.asr.models import SortformerEncLabelModel -from nemo.collections.asr.parts.utils.speaker_utils import audio_rttm_map, timestamps_to_pyannote_object -from nemo.collections.asr.parts.utils.vad_utils import ts_vad_post_processing +from nemo.collections.asr.parts.utils.speaker_utils import ( + audio_rttm_map, + get_uniqname_from_filepath, + timestamps_to_pyannote_object, +) +from nemo.collections.asr.parts.utils.vad_utils import ( + PostProcessingParams, + load_postprocessing_from_yaml, + predlist_to_timestamps, +) from nemo.core.config import hydra_runner seed_everything(42) torch.backends.cudnn.deterministic = True -@dataclass -class PostProcessingParams: - """ - Postprocessing parameters for end-to-end speaker diarization models. - These parameters can significantly affect DER performance depending on the evaluation style and the dataset. - It is recommended to tune these parameters based on the evaluation style and the dataset - to achieve the desired DER performance. - """ - - onset: float = 0.5 # Onset threshold for detecting the beginning and end of a speech - offset: float = 0.5 # Offset threshold for detecting the end of a speech - pad_onset: float = 0.0 # Adding durations before each speech segment - pad_offset: float = 0.0 # Adding durations after each speech segment - min_duration_on: float = 0.0 # Threshold for small non-speech deletion - min_duration_off: float = 0.0 # Threshold for short speech segment deletion - - @dataclass class DiarizationConfig: """Diarization configuration parameters for inference.""" model_path: Optional[str] = None # Path to a .nemo file - pretrained_name: Optional[str] = None # Name of a pretrained model - audio_dir: Optional[str] = None # Path to a directory which contains audio files dataset_manifest: Optional[str] = None # Path to dataset's JSON manifest postprocessing_yaml: Optional[str] = None # Path to a yaml file for postprocessing configurations @@ -114,36 +101,6 @@ class DiarizationConfig: optuna_n_trials: int = 100000 -def load_postprocessing_from_yaml(postprocessing_yaml: PostProcessingParams = None) -> PostProcessingParams: - """ - Load postprocessing parameters from a YAML file. - - Args: - postprocessing_yaml (str): - Path to a YAML file for postprocessing configurations. - - Returns: - postprocessing_params (dataclass): - Postprocessing parameters loaded from the YAML file. - """ - # Add PostProcessingParams as a field - postprocessing_params = OmegaConf.structured(PostProcessingParams()) - if postprocessing_yaml is None: - logging.info( - f"No postprocessing YAML file has been provided. Default postprocessing configurations will be applied." - ) - else: - # Load postprocessing params from the provided YAML file - with open(postprocessing_yaml, 'r') as file: - yaml_params = yaml.safe_load(file)['parameters'] - # Update the postprocessing_params with the loaded values - logging.info(f"Postprocessing YAML file '{postprocessing_yaml}' has been loaded.") - for key, value in yaml_params.items(): - if hasattr(postprocessing_params, key): - setattr(postprocessing_params, key, value) - return postprocessing_params - - def optuna_suggest_params(postprocessing_cfg: PostProcessingParams, trial: optuna.Trial) -> PostProcessingParams: """ Suggests hyperparameters for postprocessing using Optuna. @@ -303,26 +260,19 @@ def convert_pred_mat_to_segments( """ batch_pred_ts_segs, all_hypothesis, all_reference, all_uems = [], [], [], [] cfg_vad_params = OmegaConf.structured(postprocessing_cfg) - pp_message = "Bypass PP, Running Binarization" if bypass_postprocessing else "Running post-processing" - for sample_idx, (uniq_id, audio_rttm_values) in tqdm( - enumerate(audio_rttm_map_dict.items()), total=len(audio_rttm_map_dict), desc=pp_message - ): - spk_ts = [] - offset, duration = audio_rttm_values['offset'], audio_rttm_values['duration'] - speaker_assign_mat = batch_preds_list[sample_idx].squeeze(dim=0) - speaker_timestamps = [[] for _ in range(speaker_assign_mat.shape[-1])] - for spk_id in range(speaker_assign_mat.shape[-1]): - ts_mat = ts_vad_post_processing( - speaker_assign_mat[:, spk_id], - cfg_vad_params=cfg_vad_params, - unit_10ms_frame_count=unit_10ms_frame_count, - bypass_postprocessing=bypass_postprocessing, - ) - ts_mat = ts_mat + offset - ts_mat = torch.clamp(ts_mat, min=offset, max=(offset + duration)) - ts_seg_list = ts_mat.tolist() - speaker_timestamps[spk_id].extend(ts_seg_list) - spk_ts.append(ts_seg_list) + total_speaker_timestamps = predlist_to_timestamps( + batch_preds_list=batch_preds_list, + audio_rttm_map_dict=audio_rttm_map_dict, + cfg_vad_params=cfg_vad_params, + unit_10ms_frame_count=unit_10ms_frame_count, + bypass_postprocessing=bypass_postprocessing, + ) + for sample_idx, (uniq_id, audio_rttm_values) in enumerate(audio_rttm_map_dict.items()): + speaker_timestamps = total_speaker_timestamps[sample_idx] + if audio_rttm_values.get("uniq_id", None) is not None: + uniq_id = audio_rttm_values["uniq_id"] + else: + uniq_id = get_uniqname_from_filepath(audio_rttm_values["audio_filepath"]) all_hypothesis, all_reference, all_uems = timestamps_to_pyannote_object( speaker_timestamps, uniq_id, @@ -332,7 +282,6 @@ def convert_pred_mat_to_segments( all_uems, out_rttm_dir, ) - batch_pred_ts_segs.append(spk_ts) return all_hypothesis, all_reference, all_uems @@ -348,10 +297,8 @@ def main(cfg: DiarizationConfig) -> Union[DiarizationConfig]: if cfg.random_seed: pl.seed_everything(cfg.random_seed) - if cfg.model_path is None and cfg.pretrained_name is None: - raise ValueError("Both cfg.model_path and cfg.pretrained_name cannot be None!") - if cfg.audio_dir is None and cfg.dataset_manifest is None: - raise ValueError("Both cfg.audio_dir and cfg.dataset_manifest cannot be None!") + if cfg.model_path is None: + raise ValueError("cfg.model_path cannot be None. Please specify the path to the model.") # setup GPU torch.set_float32_matmul_precision(cfg.matmul_precision) diff --git a/nemo/collections/asr/data/audio_to_diar_label.py b/nemo/collections/asr/data/audio_to_diar_label.py index 3f4ae61e0d08..1dbe68589c0a 100644 --- a/nemo/collections/asr/data/audio_to_diar_label.py +++ b/nemo/collections/asr/data/audio_to_diar_label.py @@ -1120,6 +1120,11 @@ def parse_rttm_for_targets_and_lens(self, rttm_file, offset, duration, target_le Example of seg_target: [[0., 1.], [0., 1.], [1., 1.], [1., 0.], [1., 0.], ..., [0., 1.]] """ + if rttm_file in [None, '']: + num_seg = torch.max(target_len) + targets = torch.zeros(num_seg, self.max_spks) + return targets + with open(rttm_file, 'r') as f: rttm_lines = f.readlines() diff --git a/nemo/collections/asr/models/sortformer_diar_models.py b/nemo/collections/asr/models/sortformer_diar_models.py index 71de10cc2f79..483ff5328ad0 100644 --- a/nemo/collections/asr/models/sortformer_diar_models.py +++ b/nemo/collections/asr/models/sortformer_diar_models.py @@ -13,23 +13,29 @@ # limitations under the License. import itertools +import os import random from collections import OrderedDict -from typing import Dict, List, Optional, Union +from typing import Any, Dict, List, Optional, Tuple, Union +import numpy as np import torch from hydra.utils import instantiate from omegaconf import DictConfig from pytorch_lightning import Trainer +from torch.utils.data import DataLoader from tqdm import tqdm from nemo.collections.asr.data.audio_to_diar_label import AudioToSpeechE2ESpkDiarDataset from nemo.collections.asr.data.audio_to_diar_label_lhotse import LhotseAudioToSpeechE2ESpkDiarDataset from nemo.collections.asr.metrics.multi_binary_acc import MultiBinaryAccuracy from nemo.collections.asr.models.asr_model import ExportableEncDecModel +from nemo.collections.asr.parts.mixins.diarization import DiarizeConfig, SpkDiarizationMixin from nemo.collections.asr.parts.preprocessing.features import WaveformFeaturizer from nemo.collections.asr.parts.preprocessing.perturb import process_augmentations from nemo.collections.asr.parts.utils.asr_multispeaker_utils import get_ats_targets, get_pil_targets +from nemo.collections.asr.parts.utils.speaker_utils import generate_diarization_output_lines +from nemo.collections.asr.parts.utils.vad_utils import ts_vad_post_processing from nemo.collections.common.data.lhotse import get_lhotse_dataloader_from_config from nemo.core.classes import ModelPT from nemo.core.classes.common import PretrainedModelInfo @@ -40,7 +46,7 @@ __all__ = ['SortformerEncLabelModel'] -class SortformerEncLabelModel(ModelPT, ExportableEncDecModel): +class SortformerEncLabelModel(ModelPT, ExportableEncDecModel, SpkDiarizationMixin): """ Encoder class for Sortformer diarization model. Model class creates training, validation methods for setting up data performing model forward pass. @@ -108,7 +114,6 @@ def __init__(self, cfg: DictConfig, trainer: Trainer = None): self.streaming_mode = self._cfg.get("streaming_mode", False) self.save_hyperparameters("cfg") self._init_eval_metrics() - speaker_inds = list(range(self._cfg.max_num_of_spks)) self.speaker_permutations = torch.tensor(list(itertools.permutations(speaker_inds))) # Get all permutations @@ -119,7 +124,6 @@ def _init_loss_weights(self): raise ValueError(f"weights for PIL {pil_weight} and ATS {ats_weight} cannot sum to 0") self.pil_weight = pil_weight / (pil_weight + ats_weight) self.ats_weight = ats_weight / (pil_weight + ats_weight) - logging.info(f"Normalized weights for PIL {self.pil_weight} and ATS {self.ats_weight}") def _init_eval_metrics(self): """ @@ -269,6 +273,113 @@ def forward_infer(self, emb_seq): preds = self.sortformer_modules.forward_speaker_sigmoids(trans_emb_seq) return preds + def _diarize_forward(self, batch: Any): + """ + A counterpart of `_transcribe_forward` function in ASR. + This function is a wrapper for forward pass functions for compataibility + with the existing classes. + + Args: + batch (Any): The input batch containing audio signal and audio signal length. + + Returns: + preds (torch.Tensor): Sorted tensor containing Sigmoid values for predicted speaker labels. + Shape: (batch_size, diar_frame_count, num_speakers) + """ + with torch.no_grad(): + preds = self.forward(audio_signal=batch[0], audio_signal_length=batch[1]) + preds = preds.to('cpu') + torch.cuda.empty_cache() + return preds + + def _diarize_output_processing( + self, outputs, uniq_ids, diarcfg: DiarizeConfig + ) -> Union[List[List[str]], Tuple[List[List[str]], List[torch.Tensor]]]: + """ + Processes the diarization outputs and generates RTTM (Real-time Text Markup) files. + TODO: Currently, this function is not included in mixin test because of + `ts_vad_post_processing` function. + (1) Implement a test-compatible function + (2) `vad_utils.py` has `predlist_to_timestamps` function that is close to this function. + Needs to consolute differences and implement the test-compatible function. + + Args: + outputs (torch.Tensor): Sorted tensor containing Sigmoid values for predicted speaker labels. + Shape: (batch_size, diar_frame_count, num_speakers) + uniq_ids (List[str]): List of unique identifiers for each audio file. + diarcfg (DiarizeConfig): Configuration object for diarization. + + Returns: + diar_output_lines_list (List[List[str]]): A list of lists, where each inner list contains + the RTTM lines for a single audio file. + preds_list (List[torch.Tensor]): A list of tensors containing the diarization outputs + for each audio file. + + """ + preds_list, diar_output_lines_list = [], [] + if outputs.shape[0] == 1: # batch size = 1 + preds_list.append(outputs) + else: + preds_list.extend(torch.split(outputs, [1] * outputs.shape[0])) + + for sample_idx, uniq_id in enumerate(uniq_ids): + offset = self._diarize_audio_rttm_map[uniq_id]['offset'] + speaker_assign_mat = preds_list[sample_idx].squeeze(dim=0) + speaker_timestamps = [[] for _ in range(speaker_assign_mat.shape[-1])] + for spk_id in range(speaker_assign_mat.shape[-1]): + ts_mat = ts_vad_post_processing( + speaker_assign_mat[:, spk_id], + cfg_vad_params=diarcfg.postprocessing_params, + unit_10ms_frame_count=int(self._cfg.encoder.subsampling_factor), + bypass_postprocessing=False, + ) + ts_mat = ts_mat + offset + ts_seg_raw_list = ts_mat.tolist() + ts_seg_list = [[round(stt, 2), round(end, 2)] for (stt, end) in ts_seg_raw_list] + speaker_timestamps[spk_id].extend(ts_seg_list) + + diar_output_lines = generate_diarization_output_lines( + speaker_timestamps=speaker_timestamps, model_spk_num=len(speaker_timestamps) + ) + diar_output_lines_list.append(diar_output_lines) + if diarcfg.include_tensor_outputs: + return (diar_output_lines_list, preds_list) + else: + return diar_output_lines_list + + def _setup_diarize_dataloader(self, config: Dict) -> 'torch.utils.data.DataLoader': + """ + Setup function for a temporary data loader which wraps the provided audio file. + + Args: + config: A python dictionary which contains the following keys: + - manifest_filepath: Path to the manifest file containing audio file paths + and corresponding speaker labels. + + Returns: + A pytorch DataLoader for the given audio file(s). + """ + if 'manifest_filepath' in config: + manifest_filepath = config['manifest_filepath'] + batch_size = config['batch_size'] + else: + manifest_filepath = os.path.join(config['temp_dir'], 'manifest.json') + batch_size = min(config['batch_size'], len(config['paths2audio_files'])) + + dl_config = { + 'manifest_filepath': manifest_filepath, + 'sample_rate': self.preprocessor._sample_rate, + 'num_spks': config.get('num_spks', self._cfg.max_num_of_spks), + 'batch_size': batch_size, + 'shuffle': False, + 'soft_label_thres': 0.5, + 'session_len_sec': config['session_len_sec'], + 'num_workers': config.get('num_workers', min(batch_size, os.cpu_count() - 1)), + 'pin_memory': True, + } + temporary_datalayer = self.__setup_dataloader_from_config(config=DictConfig(dl_config)) + return temporary_datalayer + def process_signal(self, audio_signal, audio_signal_length): """ Extract audio features from time-series signal for further processing in the model. @@ -291,7 +402,7 @@ def process_signal(self, audio_signal, audio_signal_length): - processed_signal_length (torch.Tensor): The length of each processed signal. Shape: (batch_size,) """ - audio_signal = audio_signal.to(self.device) + audio_signal, audio_signal_length = audio_signal.to(self.device), audio_signal_length.to(self.device) audio_signal = (1 / (audio_signal.max() + self.eps)) * audio_signal processed_signal, processed_signal_length = self.preprocessor( input_signal=audio_signal, length=audio_signal_length @@ -372,7 +483,7 @@ def _get_aux_train_evaluations(self, preds, targets, target_lens) -> dict: } return train_metrics - def training_step(self, batch: list) -> dict: + def training_step(self, batch: list, batch_idx: int) -> dict: """ Performs a single training step. @@ -382,6 +493,7 @@ def training_step(self, batch: list) -> dict: - audio_signal_length (torch.Tensor): The length of each audio signal in the batch. - targets (torch.Tensor): The target labels for the batch. - target_lens (torch.Tensor): The length of each target sequence in the batch. + batch_idx (int): The index of the current batch. Returns: (dict): A dictionary containing the 'loss' key with the calculated loss value. @@ -439,7 +551,7 @@ def _get_aux_validation_evaluations(self, preds, targets, target_lens) -> dict: } return val_metrics - def validation_step(self, batch: list, dataloader_idx: int = 0): + def validation_step(self, batch: list, batch_idx: int, dataloader_idx: int = 0): """ Performs a single validation step. @@ -571,10 +683,46 @@ def test_batch( logging.info(f"Batch Recall MEAN: {torch.mean(torch.tensor(self.batch_recall_list))}") logging.info(f"Batch ATS F1Acc. MEAN: {torch.mean(torch.tensor(self.batch_f1_accs_ats_list))}") + def on_validation_epoch_end(self) -> Optional[dict[str, dict[str, torch.Tensor]]]: + """Run validation with sync_dist=True.""" + return super().on_validation_epoch_end(sync_metrics=True) + + @torch.no_grad() def diarize( self, - ): - """One-clieck runner function for diarization.""" - # TODO: A direct one-click runner function that generates - # speaker labels from audio file path lists. - raise NotImplementedError + audio: Union[str, List[str], np.ndarray, DataLoader], + batch_size: int = 1, + include_tensor_outputs: bool = False, + postprocessing_yaml: Optional[str] = None, + num_workers: int = 0, + verbose: bool = True, + override_config: Optional[DiarizeConfig] = None, + ) -> Union[List[List[str]], Tuple[List[List[str]], List[torch.Tensor]]]: + """One-click runner function for diarization. + + Args: + audio: (a single or list) of paths to audio files or path to a manifest file. + batch_size: (int) Batch size to use during inference. + Bigger will result in better throughput performance but would use more memory. + include_tensor_outputs: (bool) Include raw speaker activity probabilities to the output. + See Returns: for more details. + postprocessing_yaml: Optional(str) Path to .yaml file with postprocessing parameters. + num_workers: (int) Number of workers for DataLoader. + verbose: (bool) Whether to display tqdm progress bar. + override_config: (Optional[DiarizeConfig]) A config to override the default config. + + Returns: + *if include_tensor_outputs is False: A list of lists of speech segments with a corresponding speaker index, + in format "[begin_seconds, end_seconds, speaker_index]". + *if include_tensor_outputs is True: A tuple of the above list + and list of tensors of raw speaker activity probabilities. + """ + return super().diarize( + audio=audio, + batch_size=batch_size, + include_tensor_outputs=include_tensor_outputs, + postprocessing_yaml=postprocessing_yaml, + num_workers=num_workers, + verbose=verbose, + override_config=override_config, + ) diff --git a/nemo/collections/asr/parts/mixins/diarization.py b/nemo/collections/asr/parts/mixins/diarization.py new file mode 100644 index 000000000000..fe8f6bbecb21 --- /dev/null +++ b/nemo/collections/asr/parts/mixins/diarization.py @@ -0,0 +1,493 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import os +import tempfile +from abc import ABC, abstractmethod +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Union + +import numpy as np +import torch +from torch.utils.data import DataLoader +from tqdm import tqdm + +from nemo.collections.asr.parts.utils.speaker_utils import audio_rttm_map, get_uniqname_from_filepath +from nemo.collections.asr.parts.utils.vad_utils import PostProcessingParams, load_postprocessing_from_yaml +from nemo.collections.common.data.utils import move_data_to_device +from nemo.utils import logging + +GenericDiarizationType = Union[List[Any], List[List[Any]], Tuple[Any], Tuple[List[Any]]] + + +@dataclass +class InternalDiarizeConfig: + """Internal diarization configuration parameters for diarization inference.""" + + # Internal values + device: Optional[torch.device] = None + dtype: Optional[torch.dtype] = None + training_mode: bool = False + logging_level: Optional[Any] = None + + # Preprocessor values + dither_value: float = 0.0 + pad_to_value: int = 0 + + # Scratch space + temp_dir: Optional[str] = None + manifest_filepath: Optional[str] = None + + +@dataclass +class DiarizeConfig: + """Configuration parameters for diarization inference.""" + + session_len_sec: float = -1 # End-to-end diarization session length limit in seconds + batch_size: int = 1 + num_workers: int = 1 + postprocessing_yaml: Optional[str] = None # Path to a yaml file for postprocessing configurations + verbose: bool = True + include_tensor_outputs: bool = False + postprocessing_params: PostProcessingParams = None + + # Utility + _internal: Optional[InternalDiarizeConfig] = None + + +def get_value_from_diarization_config(diarcfg, key, default): + """ + Utility function to get a value from the diarization config. + If the value is not present in the diarization config, the default value is returned. + + Args: + diarcfg: A dataclass that represents the diarization config. + key: The name of the arg to retrieve. + default: The default value to return if the key is not present in the diarization config. + + Returns: + The value of the key in the diarization config or the default value. + """ + if hasattr(diarcfg, key): + return getattr(diarcfg, key) + else: + logging.debug( + f"Using default value of {default} for {key} because it is not present \ + in the diarization config {diarcfg}." + ) + return default + + +class SpkDiarizationMixin(ABC): + """ + An abstract class for diarize-able models. + + Creates a template function `diarize()` that provides an interface to perform transcription of audio tensors or + filepaths. + + The following abstract classes must be implemented by the subclass: + + - `_setup_diarize_dataloader()`: + Setup the dataloader for diarization. Receives the output from + `_diarize_input_manifest_processing()`. + + - `_diarize_forward()`: + Implements the model's custom forward pass to return outputs that are processed by + `_diarize_output_processing()`. + + - `_diarize_output_processing()`: + Implements the post processing of the model's outputs to return the results to + the user. The result can be a list of objects, list of list of objects, tuple of objects, tuple of list of + objects, or a dict of list of objects. + + """ + + def __init__(self): + self._diarize_audio_rttm_map = {} + + @torch.inference_mode() + def diarize( + self, + audio: Union[str, List[str], np.ndarray, DataLoader], + batch_size: int = 1, + include_tensor_outputs: bool = False, + postprocessing_yaml: Optional[str] = None, + num_workers: int = 1, + verbose: bool = False, + override_config: Optional[DiarizeConfig] = None, + **config_kwargs, + ) -> GenericDiarizationType: + """ + Takes paths to audio files and returns speaker labels + """ + + if override_config is None: + postprocessing_params = load_postprocessing_from_yaml(postprocessing_yaml) + diarize_cfg = DiarizeConfig( + batch_size=batch_size, + num_workers=num_workers, + verbose=verbose, + include_tensor_outputs=include_tensor_outputs, + postprocessing_yaml=postprocessing_yaml, + postprocessing_params=postprocessing_params, + **config_kwargs, + ) + else: + if not hasattr(override_config, '_internal'): + raise ValueError( + "`diarize_cfg must have an `_internal` argument, which must be of an object of type " + "InternalDiarizeConfig or its subclass." + ) + + if override_config._internal is None: + override_config._internal = InternalDiarizeConfig() + + diarize_cfg = override_config + + # Add new internal config + if diarize_cfg._internal is None: + diarize_cfg._internal = InternalDiarizeConfig() + else: + # Check if internal config is valid + if not isinstance(diarize_cfg._internal, InternalDiarizeConfig): + raise ValueError( + "`diarize_cfg._internal` must be of an object of type InternalDiarizeConfig or " "its subclass" + ) + + # Hold the results here + results = None + + try: + generator = self.diarize_generator(audio, override_config=diarize_cfg) + + for processed_outputs in generator: + # Store results + if isinstance(processed_outputs, list): + # Create a results of the same type as each element in processed_outputs + if results is None: + results = [] + + results.extend(processed_outputs) + + elif isinstance(processed_outputs, tuple): + # Create a results of the same type as each element in processed_outputs + if results is None: + results = tuple([[] for _ in processed_outputs]) + + # If nested list structure + if isinstance(processed_outputs[0], list): + for i, processed_output in enumerate(processed_outputs): + results[i].extend(processed_output) + else: + # If flat list structure + if len(processed_outputs) != len(results): + raise RuntimeError( + f"The number of elements in the result ({len(results)}) does not " + f"match the results of the current batch ({len(processed_outputs)})." + ) + + for i, processed_output in enumerate(processed_outputs): + results[i].append(processed_output) + + except StopIteration: + pass + + return results + + def diarize_generator(self, audio, override_config: Optional[DiarizeConfig]): + """ + A generator version of `diarize` function. + """ + if override_config is None: + override_config = DiarizeConfig() + + if not hasattr(override_config, '_internal'): + raise ValueError( + "`diarize_cfg must have an `_internal` argument, which must be of an object of type " + "InternalDiarizeConfig or its subclass." + ) + + # Add new internal config + if override_config._internal is None: + override_config._internal = InternalDiarizeConfig() + else: + # Check if internal config is valid + if not isinstance(override_config._internal, InternalDiarizeConfig): + raise ValueError( + "`diarize_cfg._internal` must be of an object of type InternalDiarizeConfig or " "its subclass" + ) + + diarize_cfg = override_config + + try: + # Initialize and assert the diarization environment + self._diarize_on_begin(audio, diarize_cfg) + + # Work in tmp directory - will store manifest file there + with tempfile.TemporaryDirectory() as tmpdir: + diarize_cfg._internal.temp_dir = tmpdir + + # Create a DataLoader if not already present + if not isinstance(audio, DataLoader): + dataloader = self._diarize_input_processing(audio, diarize_cfg) + else: + dataloader = audio + + if hasattr(diarize_cfg, 'verbose'): + verbose = diarize_cfg.verbose + else: + verbose = True + + for batch_idx, test_batch in enumerate(tqdm(dataloader, desc="Diarizing", disable=not verbose)): + # Move batch to device + test_batch = move_data_to_device(test_batch, diarize_cfg._internal.device) + uniq_ids = list(self._diarize_audio_rttm_map.keys())[ + batch_idx * diarize_cfg.batch_size : (batch_idx + 1) * diarize_cfg.batch_size + ] + + # Run forward pass + pred_outputs = self._diarize_forward(test_batch) + processed_outputs = self._diarize_output_processing(pred_outputs, uniq_ids, diarize_cfg) + + # Yield results if generator + yield processed_outputs + + # clear up memory + del test_batch, pred_outputs, processed_outputs + torch.cuda.empty_cache() + + finally: + # set mode back to its original value + self._diarize_on_end(diarize_cfg) + + def _input_audio_to_rttm_processing(self, audio_files: List[str]) -> List[Dict[str, Union[str, float]]]: + """ + Generate manifest style dict if `audio` is a list of paths to audio files. + + Args: + audio_files: A list of paths to audio files. + + Returns: + audio_rttm_map_dict A list of manifest style dicts. + """ + audio_rttm_map_dict = {} + for audio_file in audio_files: + uniq_id = get_uniqname_from_filepath(audio_file) + entry = { + 'uniq_id': uniq_id, + 'audio_filepath': audio_file, + 'offset': 0.0, + 'duration': None, + 'text': '-', + 'label': 'infer', + } + audio_rttm_map_dict[uniq_id] = entry + return audio_rttm_map_dict + + def _diarize_on_begin(self, audio: Union[str, List[str]], diarcfg: DiarizeConfig): + """ + Internal function to setup the model for diarization. Perform all setup and pre-checks here. + + Args: + audio (Union[str, List[str]]): Of type `GenericDiarizationType` + diarcfg (DiarizeConfig): An instance of `DiarizeConfig`. + """ + if audio is None: + return {} + + if isinstance(audio, str): + audio = [audio] + + if isinstance(audio, list) and len(audio) == 0: + return {} + + # Set num_workers + num_workers = get_value_from_diarization_config(diarcfg, 'num_workers', default=1) + + if num_workers is None: + _batch_size = get_value_from_diarization_config(diarcfg, 'batch_size', default=1) + num_workers = min(_batch_size, os.cpu_count() - 1) + + # Assign num_workers if available as key in diarcfg + if hasattr(diarcfg, 'num_workers'): + diarcfg.num_workers = num_workers + + # Model's mode and device + diarcfg._internal.training_mode = self.training + + # Switch model to evaluation mode + if hasattr(self, 'preprocessor'): + if hasattr(self.preprocessor, 'featurizer') and hasattr(self.preprocessor.featurizer, 'dither'): + diarcfg._internal.dither_value = self.preprocessor.featurizer.dither + self.preprocessor.featurizer.dither = 0.0 + + if hasattr(self.preprocessor, 'featurizer') and hasattr(self.preprocessor.featurizer, 'pad_to'): + diarcfg._internal.pad_to_value = self.preprocessor.featurizer.pad_to + self.preprocessor.featurizer.pad_to = 0 + + # Switch model to evaluation mode + self.eval() + + # Disable logging + diarcfg._internal.logging_level = logging.get_verbosity() + logging.set_verbosity(logging.WARNING) + + def _diarize_input_processing(self, audio, diarcfg: DiarizeConfig): + """ + Internal function to process the input audio data and return a DataLoader. This function is called by + `diarize()` and `diarize_generator()` to setup the input data for diarization. + + Args: + audio: Of type `GenericDiarizationType` + diarcfg: The diarization config dataclass. Subclasses can change this to a different dataclass if needed. + + Returns: + A DataLoader object that is used to iterate over the input audio data. + """ + if isinstance(audio, (list, tuple)): + if len(audio) == 0: + raise ValueError("Input `audio` is empty") + else: + # Assume it is a single variable, so wrap it in a list + audio = [audio] + + # Check if audio is a list of strings (filepaths or manifests) + if isinstance(audio[0], str): + if len(audio) == 1 and audio[0].endswith('.json') or audio[0].endswith('.jsonl'): + # Assume it is a path to a manifest file + diarcfg._internal.manifest_filepath = audio[0] + self._diarize_audio_rttm_map = audio_rttm_map(audio[0]) + audio_files = [] + for uniq_id, meta_dict in self._diarize_audio_rttm_map.items(): + audio_files.append(meta_dict['audio_filepath']) + else: + # Make `audio_files` a list of audio file paths + audio_files = list(audio) + self._diarize_audio_rttm_map = self._input_audio_to_rttm_processing(audio_files=audio_files) + + tmp_dir = diarcfg._internal.temp_dir + ds_config = self._diarize_input_manifest_processing(audio_files, tmp_dir, diarcfg) + + temp_dataloader = self._setup_diarize_dataloader(ds_config) + return temp_dataloader + + else: + raise ValueError( + f"Input `audio` is of type {type(audio[0])}. " "Only `str` (path to audio file) is supported as input." + ) + + def _diarize_input_manifest_processing( + self, audio_files: List[str], temp_dir: str, diarcfg: DiarizeConfig + ) -> Dict[str, Any]: + """ + Internal function to process the input audio filepaths and return a config dict for the dataloader. + + Args: + audio_files: A list of string filepaths for audio files. + temp_dir: A temporary directory to store intermediate files. + diarcfg: The diarization config dataclass. Subclasses can change this to a different dataclass if needed. + + Returns: + A config dict that is used to setup the dataloader for diarization. + """ + with open(os.path.join(temp_dir, 'manifest.json'), 'w', encoding='utf-8') as fp: + for audio_file in audio_files: + if isinstance(audio_file, str): + entry = {'audio_filepath': audio_file, 'duration': 100000, 'text': ''} + fp.write(json.dumps(entry) + '\n') + elif isinstance(audio_file, dict): + fp.write(json.dumps(audio_file) + '\n') + else: + raise ValueError( + f"Input `audio` is of type {type(audio_file)}. " + "Only `str` (path to audio file) or `dict` are supported as input." + ) + + ds_config = { + 'paths2audio_files': audio_files, + 'batch_size': get_value_from_diarization_config(diarcfg, 'batch_size', 1), + 'temp_dir': temp_dir, + 'session_len_sec': get_value_from_diarization_config(diarcfg, 'session_len_sec', diarcfg.session_len_sec), + 'num_workers': get_value_from_diarization_config(diarcfg, 'num_workers', 1), + } + + return ds_config + + @abstractmethod + def _setup_diarize_dataloader(self, config: Dict) -> DataLoader: + """ + Internal function to setup the dataloader for diarization. This function is called by + `diarize()` and `diarize_generator()` to setup the input data for diarization. + + Args: + config: A config dict that is used to setup the dataloader for diarization. + It can be generated by `_diarize_input_manifest_processing()`. + + Returns: + A DataLoader object that is used to iterate over the input audio data. + """ + pass + + @abstractmethod + def _diarize_forward(self, batch: Any): + """ + Internal function to perform the model's custom forward pass to return outputs that are processed by + `_diarize_output_processing()`. + This function is called by `diarize()` and `diarize_generator()` to perform the model's forward pass. + + Args: + batch: A batch of input data from the data loader that is used to perform the model's forward pass. + + Returns: + The model's outputs that are processed by `_diarize_output_processing()`. + """ + pass + + @abstractmethod + def _diarize_output_processing(self, outputs, uniq_ids, diarcfg: DiarizeConfig) -> GenericDiarizationType: + """ + Internal function to process the model's outputs to return the results to the user. This function is called by + `diarize()` and `diarize_generator()` to process the model's outputs. + + Args: + outputs: The model's outputs that are processed by `_diarize_forward()`. + uniq_ids: List of unique recording identificators in batch + diarcfg: The diarization config dataclass. Subclasses can change this to a different dataclass if needed. + + Returns: + The output can be a list of + objects, list of list of objects, tuple of objects, tuple of list of objects. + Its type is defined in `GenericDiarizationType`. + """ + pass + + def _diarize_on_end(self, diarcfg: DiarizeConfig): + """ + Internal function to teardown the model after transcription. Perform all teardown and post-checks here. + + Args: + diarcfg: The diarization config dataclass. Subclasses can change this to a different dataclass if needed. + """ + # set mode back to its original value + self.train(mode=diarcfg._internal.training_mode) + + if hasattr(self, 'preprocessor'): + if hasattr(self.preprocessor, 'featurizer') and hasattr(self.preprocessor.featurizer, 'dither'): + self.preprocessor.featurizer.dither = diarcfg._internal.dither_value + + if hasattr(self.preprocessor, 'featurizer') and hasattr(self.preprocessor.featurizer, 'pad_to'): + self.preprocessor.featurizer.pad_to = diarcfg._internal.pad_to_value + + if diarcfg._internal.logging_level is not None: + logging.set_verbosity(diarcfg._internal.logging_level) diff --git a/nemo/collections/asr/parts/utils/vad_utils.py b/nemo/collections/asr/parts/utils/vad_utils.py index 83a811ee4adb..fc29129295c0 100644 --- a/nemo/collections/asr/parts/utils/vad_utils.py +++ b/nemo/collections/asr/parts/utils/vad_utils.py @@ -18,6 +18,7 @@ import multiprocessing import os import shutil +from dataclasses import dataclass from itertools import repeat from math import ceil, floor from pathlib import Path @@ -29,6 +30,7 @@ import numpy as np import pandas as pd import torch +import yaml from omegaconf import DictConfig, OmegaConf from pyannote.core import Annotation, Segment from pyannote.metrics import detection @@ -44,6 +46,53 @@ """ +@dataclass +class PostProcessingParams: + """ + Postprocessing parameters for end-to-end speaker diarization models. + These parameters can significantly affect DER performance depending on the evaluation style and the dataset. + It is recommended to tune these parameters based on the evaluation style and the dataset + to achieve the desired DER performance. + """ + + onset: float = 0.5 # Onset threshold for detecting the beginning and end of a speech + offset: float = 0.5 # Offset threshold for detecting the end of a speech + pad_onset: float = 0.0 # Adding durations before each speech segment + pad_offset: float = 0.0 # Adding durations after each speech segment + min_duration_on: float = 0.0 # Threshold for small non-speech deletion + min_duration_off: float = 0.0 # Threshold for short speech segment deletion + + +def load_postprocessing_from_yaml(postprocessing_yaml: str = None) -> PostProcessingParams: + """ + Load postprocessing parameters from a YAML file. + + Args: + postprocessing_yaml (str): + Path to a YAML file for postprocessing configurations. + + Returns: + postprocessing_params (dataclass): + Postprocessing parameters loaded from the YAML file. + """ + # Add PostProcessingParams as a field + postprocessing_params = OmegaConf.structured(PostProcessingParams()) + if postprocessing_yaml is None: + logging.info( + f"No postprocessing YAML file has been provided. Default postprocessing configurations will be applied." + ) + else: + # Load postprocessing params from the provided YAML file + with open(postprocessing_yaml, 'r') as file: + yaml_params = yaml.safe_load(file)['parameters'] + # Update the postprocessing_params with the loaded values + logging.info(f"Postprocessing YAML file '{postprocessing_yaml}' has been loaded.") + for key, value in yaml_params.items(): + if hasattr(postprocessing_params, key): + setattr(postprocessing_params, key, value) + return postprocessing_params + + def prepare_manifest(config: dict) -> str: """ Perform VAD on long audio snippet might cause CUDA out of memory issue. @@ -1785,3 +1834,62 @@ def ts_vad_post_processing( cfg_vad_params.pad_offset = 0.0 speech_segments = binarization(ts_vad_binary_frames, cfg_vad_params) return speech_segments + + +def predlist_to_timestamps( + batch_preds_list: List[torch.Tensor], + audio_rttm_map_dict: Dict[str, Dict[str, Union[float, int]]], + cfg_vad_params: OmegaConf, + unit_10ms_frame_count: int, + bypass_postprocessing: bool = False, + precision: int = 2, +) -> List[List[float]]: + """ + Converts floating point number tensor diarization results to timestamps using VAD style + post-processing methods. + + Args: + batch_preds_list (List[Tensor]): + Tensor diarization results for each sample. + Dimension: [(num_frames, num_speakers), ...] + audio_rttm_map_dict (Dict[str, Dict[str, Union[float, int]]]): + Dictionary mapping unique audio file names to their rttm file entries. + cfg_vad_params (OmegaConf): + Configuration (omega config) of VAD parameters. + unit_10ms_frame_count (int): + an integer indicating the number of 10ms frames in a unit. + For example, if unit_10ms_frame_count is 8, then each frame is 0.08 seconds. + bypass_postprocessing (bool, optional): + If True, diarization post-processing will be bypassed. + precision (int, optional): + The number of decimal places to round the timestamps. Defaults to 2. + + Returns: + total_speaker_timestamps (List[List[List[float]]]): + A list of lists of timestamp tensors for each session (utterance) + Levels: + - Session-level (uniq_id) [session1_list, session2_list,...] + - Segment-level: [[start1, end1], [start2, end2],...]] + - List of start and end timestamp [start, end] + """ + total_speaker_timestamps = [] + pp_message = "Binarization" if bypass_postprocessing else "Post-processing" + for sample_idx, (uniq_id, audio_rttm_values) in tqdm( + enumerate(audio_rttm_map_dict.items()), total=len(audio_rttm_map_dict), desc=pp_message + ): + offset = audio_rttm_values['offset'] + speaker_assign_mat = batch_preds_list[sample_idx].squeeze(dim=0) + speaker_timestamps = [[] for _ in range(speaker_assign_mat.shape[-1])] + for spk_id in range(speaker_assign_mat.shape[-1]): + ts_mat = ts_vad_post_processing( + speaker_assign_mat[:, spk_id], + cfg_vad_params=cfg_vad_params, + unit_10ms_frame_count=unit_10ms_frame_count, + bypass_postprocessing=bypass_postprocessing, + ) + ts_mat = ts_mat + offset + ts_seg_raw_list = ts_mat.tolist() + ts_seg_list = [[round(stt, precision), round(end, precision)] for (stt, end) in ts_seg_raw_list] + speaker_timestamps[spk_id].extend(ts_seg_list) + total_speaker_timestamps.append(speaker_timestamps) + return total_speaker_timestamps diff --git a/nemo/collections/common/parts/preprocessing/collections.py b/nemo/collections/common/parts/preprocessing/collections.py index 12f5a9b3ecff..afd35e01c993 100644 --- a/nemo/collections/common/parts/preprocessing/collections.py +++ b/nemo/collections/common/parts/preprocessing/collections.py @@ -22,6 +22,7 @@ import pandas as pd from nemo.collections.common.parts.preprocessing import manifest, parsers +from nemo.collections.common.parts.preprocessing.manifest import get_full_path from nemo.utils import logging, logging_mode @@ -1513,14 +1514,6 @@ def __init__( for item in manifest.item_iter(manifests_files, parse_func=self.__parse_item_rttm): # Training mode - rttm_labels = [] - with open(item['rttm_file'], 'r') as f: - for index, rttm_line in enumerate(f.readlines()): - rttm = rttm_line.strip().split() - start = round(float(rttm[3]), round_digits) - end = round(float(rttm[4]), round_digits) + round(float(rttm[3]), round_digits) - speaker = rttm[7] - rttm_labels.append('{} {} {}'.format(start, end, speaker)) audio_files.append(item['audio_file']) uniq_ids.append(item['uniq_id']) durations.append(item['duration']) @@ -1540,6 +1533,13 @@ def __init__( def __parse_item_rttm(self, line: str, manifest_file: str) -> Dict[str, Any]: """Parse each rttm file and save it to in Dict format""" item = json.loads(line) + + if 'offset' not in item or item['offset'] is None: + item['offset'] = 0 + + # If the name `audio_file` is not present in the manifest file, replace it. + if 'audio_file' in item: + pass if 'audio_filename' in item: item['audio_file'] = item.pop('audio_filename') elif 'audio_filepath' in item: @@ -1548,25 +1548,54 @@ def __parse_item_rttm(self, line: str, manifest_file: str) -> Dict[str, Any]: raise ValueError( f"Manifest file has invalid json line " f"structure: {line} without proper audio file key." ) + + # Audio file handling depending on the types if isinstance(item['audio_file'], list): - item['audio_file'] = [os.path.expanduser(audio_file_path) for audio_file_path in item['audio_file']] + for single_audio_file in item['audio_file']: + audio_file_list.append(get_full_path(audio_file=single_audio_file, manifest_file=manifest_file)) + item['audio_file'] = audio_file_list + elif isinstance(item['audio_file'], str): + item['audio_file'] = get_full_path(audio_file=item['audio_file'], manifest_file=manifest_file) + if not os.path.exists(item['audio_file']): + raise FileNotFoundError(f"Audio file not found: {item['audio_file']}") + else: + raise ValueError( + f"Manifest file has invalid json line " + f"structure: {line} without proper audio file value: {item['audio_file']}." + ) + + # If the name `rttm_file` is not present in the manifest file, replace it or assign None. + if 'rttm_file' in item: + pass + elif 'rttm_filename' in item: + item['rttm_file'] = item.pop('rttm_filename') + elif 'rttm_filepath' in item: + item['rttm_file'] = item.pop('rttm_filepath') else: - item['audio_file'] = os.path.expanduser(item['audio_file']) + item['rttm_file'] = None + + # If item['rttm_file'] is not None and the RTTM file exists, get the full path + if item['rttm_file'] is not None: + item['rttm_file'] = get_full_path(audio_file=item['rttm_file'], manifest_file=manifest_file) + if not os.path.exists(item['rttm_file']): + raise FileNotFoundError(f"RTTM file not found: {item['rttm_file']}") + + # Handling `uniq_id` string + if 'uniq_id' not in item: + item['uniq_id'] = os.path.splitext(os.path.basename(item['audio_file']))[0] - if not isinstance(item['audio_file'], list): - if 'uniq_id' not in item: - item['uniq_id'] = os.path.splitext(os.path.basename(item['audio_file']))[0] - elif 'uniq_id' not in item: + if not isinstance(item['uniq_id'], str): raise ValueError(f"Manifest file has invalid json line " f"structure: {line} without proper uniq_id key.") if 'duration' not in item: raise ValueError(f"Manifest file has invalid json line " f"structure: {line} without proper duration key.") + item = dict( audio_file=item['audio_file'], uniq_id=item['uniq_id'], duration=item['duration'], - rttm_file=item['rttm_filepath'], - offset=item.get('offset', None), + rttm_file=item['rttm_file'], + offset=item.get('offset', 0), ) return item diff --git a/tests/collections/speaker_tasks/mixins/test_diarization.py b/tests/collections/speaker_tasks/mixins/test_diarization.py new file mode 100644 index 000000000000..84ec29d84437 --- /dev/null +++ b/tests/collections/speaker_tasks/mixins/test_diarization.py @@ -0,0 +1,271 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json +import os +from dataclasses import dataclass +from typing import Any, Dict, List + +import pytest +import torch +from torch.utils.data import DataLoader, Dataset + +from nemo.collections.asr.parts.mixins.diarization import DiarizeConfig, SpkDiarizationMixin + + +class DummyModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.encoder = torch.nn.Linear(1, 1) + + self.execution_count = 0 + self.flag_begin = False + + def forward(self, x): + # Input: [1, 1] Output = [1, 1 + out = self.encoder(x) + return out + + +@pytest.mark.with_downloads() +@pytest.fixture() +def audio_files(test_data_dir): + """ + Returns a list of audio files for testing. + """ + import soundfile as sf + + audio_file1 = os.path.join(test_data_dir, "an4_speaker", "an4", "wav", "an4_clstk", "fash", "an251-fash-b.wav") + audio_file2 = os.path.join(test_data_dir, "an4_speaker", "an4", "wav", "an4_clstk", "ffmm", "cen1-ffmm-b.wav") + + audio1, _ = sf.read(audio_file1, dtype='float32') + audio2, _ = sf.read(audio_file2, dtype='float32') + + return audio1, audio2 + + +class DiarizableDummy(DummyModel, SpkDiarizationMixin): + def _diarize_on_begin(self, audio, diarcfg: DiarizeConfig): + super()._diarize_on_begin(audio, diarcfg) + self.flag_begin = True + + def _diarize_input_manifest_processing(self, audio_files: List[str], temp_dir: str, diarcfg: DiarizeConfig): + # Create a dummy manifest + manifest_path = os.path.join(temp_dir, 'dummy_manifest.json') + with open(manifest_path, 'w', encoding='utf-8') as fp: + for audio_file in audio_files: + entry = {'audio_filepath': audio_file, 'duration': 100000, 'text': ''} + fp.write(json.dumps(entry) + '\n') + + ds_config = { + 'paths2audio_files': audio_files, + 'batch_size': diarcfg.batch_size, + 'temp_dir': temp_dir, + 'session_len_sec': diarcfg.session_len_sec, + 'num_workers': diarcfg.num_workers, + } + return ds_config + + def _setup_diarize_dataloader(self, config: Dict) -> DataLoader: + class DummyDataset(Dataset): + def __init__(self, audio_files: List[str], config: Dict): + self.audio_files = audio_files + self.config = config + + def __getitem__(self, index): + data = self.audio_files[index] + data = torch.tensor([float(data)]).view(1) + return data + + def __len__(self): + return len(self.audio_files) + + dataset = DummyDataset(config['paths2audio_files'], config) + + return DataLoader( + dataset=dataset, + batch_size=config['batch_size'], + num_workers=config['num_workers'], + pin_memory=False, + drop_last=False, + ) + + def _diarize_forward(self, batch: Any): + output = self(batch) + return output + + def _diarize_output_processing(self, outputs, uniq_ids, diarcfg: DiarizeConfig): + self.execution_count += 1 + + result = [] + for output in outputs: + result.append(float(output.item())) + + if hasattr(diarcfg, 'output_type') and diarcfg.output_type == 'dict': + results = {'output': result} + return results + + if hasattr(diarcfg, 'output_type') and diarcfg.output_type == 'dict2': + results = [{'output': res} for res in result] + return results + + if hasattr(diarcfg, 'output_type') and diarcfg.output_type == 'tuple': + result = tuple(result) + return result + + # Pass list of results by default + return result + + +class DummyDataset(Dataset): + def __init__(self, audio_tensors: List[str], config: Dict = None): + self.audio_tensors = audio_tensors + self.config = config + + def __getitem__(self, index): + data = self.audio_tensors[index] + samples = torch.tensor(data) + # Calculate seq length + seq_len = torch.tensor(samples.shape[0], dtype=torch.long) + + # Dummy text tokens + targets = torch.tensor([0], dtype=torch.long) + targets_len = torch.tensor(1, dtype=torch.long) + return (samples, seq_len, targets, targets_len) + + def __len__(self): + return len(self.audio_tensors) + + +@pytest.fixture() +def dummy_model(): + return DiarizableDummy() + + +class TestSpkDiarizationMixin: + @pytest.mark.unit + def test_constructor_non_instance(self): + model = DummyModel() + assert not isinstance(model, SpkDiarizationMixin) + assert not hasattr(model, 'diarize') + + @pytest.mark.unit + def test_diarize(self, dummy_model): + dummy_model = dummy_model.eval() + dummy_model.encoder.weight.data.fill_(1.0) + dummy_model.encoder.bias.data.fill_(0.0) + + audio = ['1.0', '2.0', '3.0'] + outputs = dummy_model.diarize(audio, batch_size=1) + assert len(outputs) == 3 + assert outputs[0] == 1.0 + assert outputs[1] == 2.0 + assert outputs[2] == 3.0 + + @pytest.mark.unit + def test_diarize_generator(self, dummy_model): + dummy_model = dummy_model.eval() + dummy_model.encoder.weight.data.fill_(1.0) + dummy_model.encoder.bias.data.fill_(0.0) + + audio = ['1.0', '2.0', '3.0'] + + diarize_config = DiarizeConfig(batch_size=1) + generator = dummy_model.diarize_generator(audio, override_config=diarize_config) + + outputs = [] + index = 1 + for result in generator: + outputs.extend(result) + assert len(result) == 1 + assert len(outputs) == index + index += 1 + + assert len(outputs) == 3 + assert outputs[0] == 1.0 + assert outputs[1] == 2.0 + assert outputs[2] == 3.0 + + @pytest.mark.unit + def test_diarize_generator_explicit_stop_check(self, dummy_model): + dummy_model = dummy_model.eval() + dummy_model.encoder.weight.data.fill_(1.0) + dummy_model.encoder.bias.data.fill_(0.0) + + audio = ['1.0', '2.0', '3.0'] + + diarize_config = DiarizeConfig(batch_size=1) + generator = dummy_model.diarize_generator(audio, override_config=diarize_config) + + outputs = [] + index = 1 + while True: + try: + result = next(generator) + except StopIteration: + break + outputs.extend(result) + assert len(result) == 1 + assert len(outputs) == index + index += 1 + + assert len(outputs) == 3 + assert outputs[0] == 1.0 + assert outputs[1] == 2.0 + assert outputs[2] == 3.0 + + @pytest.mark.unit + def test_diarize_check_flags(self, dummy_model): + dummy_model = dummy_model.eval() + + audio = ['1.0', '2.0', '3.0'] + dummy_model.diarize(audio, batch_size=1) + assert dummy_model.flag_begin + + @pytest.mark.unit + def test_transribe_override_config_incorrect(self, dummy_model): + # Not subclassing DiarizeConfig + @dataclass + class OverrideConfig: + batch_size: int = 1 + output_type: str = 'dict' + + dummy_model = dummy_model.eval() + + audio = [1.0, 2.0, 3.0] + override_cfg = OverrideConfig(batch_size=1, output_type='dict') + with pytest.raises(ValueError): + _ = dummy_model.diarize(audio, override_config=override_cfg) + + @pytest.mark.unit + def test_transribe_override_config_correct(self, dummy_model): + @dataclass + class OverrideConfig(DiarizeConfig): + output_type: str = 'dict' + verbose: bool = False + + dummy_model = dummy_model.eval() + dummy_model.encoder.weight.data.fill_(1.0) + dummy_model.encoder.bias.data.fill_(0.0) + + audio = ['1.0', '2.0', '3.0'] + override_cfg = OverrideConfig(batch_size=1, output_type='list') + outputs = dummy_model.diarize(audio, override_config=override_cfg) + + assert isinstance(outputs, list) + assert len(outputs) == 3 + assert outputs[0] == 1.0 + assert outputs[1] == 2.0 + assert outputs[2] == 3.0 From 2b3b158a05d8b3c2a8bbc514a6212ec8a7da6988 Mon Sep 17 00:00:00 2001 From: Chen Cui Date: Wed, 18 Dec 2024 16:47:44 -0500 Subject: [PATCH 071/128] Fix peft inference (#11568) * fix peft inference (trainer not attached) Signed-off-by: Chen Cui * enable greedy generation Signed-off-by: Chen Cui * add ci test for PEFT inference Signed-off-by: Chen Cui * Apply isort and black reformatting Signed-off-by: cuichenx * typo Signed-off-by: Chen Cui * fix test Signed-off-by: Chen Cui * handle remove_special_tokens Signed-off-by: Chen Cui * move llama3configci to common file Signed-off-by: Chen Cui * Apply isort and black reformatting Signed-off-by: cuichenx * incoming commit Signed-off-by: Chen Cui * address comment Signed-off-by: Chen Cui --------- Signed-off-by: Chen Cui Signed-off-by: cuichenx Co-authored-by: cuichenx --- .github/workflows/cicd-main.yml | 72 +++++++++++++++--------- nemo/collections/llm/inference/base.py | 7 ++- nemo/lightning/pytorch/callbacks/peft.py | 5 +- nemo/lightning/pytorch/utils.py | 12 ++++ scripts/llm/generate.py | 11 +++- tests/collections/llm/common.py | 10 ++++ tests/collections/llm/gpt_finetuning.py | 11 +--- tests/collections/llm/peft/lora_merge.py | 11 ---- 8 files changed, 87 insertions(+), 52 deletions(-) diff --git a/.github/workflows/cicd-main.yml b/.github/workflows/cicd-main.yml index fce4ef2acfbd..25e0c5252100 100644 --- a/.github/workflows/cicd-main.yml +++ b/.github/workflows/cicd-main.yml @@ -4254,7 +4254,7 @@ jobs: SCRIPT: | python tests/collections/llm/gpt_finetuning.py \ - --restore_path /home/TestData/nemo2_ckpt/llama_68M \ + --restore_path /home/TestData/nemo2_ckpt/llama_68M_v2 \ --devices 2 \ --max_steps 3 \ --experiment_dir /tmp/nemo2_gpt_finetune/${{ github.run_id }} \ @@ -4264,7 +4264,7 @@ jobs: --mbs 1 python tests/collections/llm/gpt_finetuning.py \ - --restore_path /home/TestData/nemo2_ckpt/llama_68M \ + --restore_path /home/TestData/nemo2_ckpt/llama_68M_v2 \ --devices 2 \ --max_steps 6 \ --experiment_dir /tmp/nemo2_gpt_finetune/${{ github.run_id }} \ @@ -4283,7 +4283,7 @@ jobs: SCRIPT: | python tests/collections/llm/gpt_finetuning.py \ - --restore_path /home/TestData/nemo2_ckpt/llama_68M \ + --restore_path /home/TestData/nemo2_ckpt/llama_68M_v2 \ --devices 2 \ --max_steps 3 \ --experiment_dir /tmp/nemo2_gpt_finetune/${{ github.run_id }} \ @@ -4293,7 +4293,7 @@ jobs: --mbs 2 python tests/collections/llm/gpt_finetuning.py \ - --restore_path /home/TestData/nemo2_ckpt/llama_68M \ + --restore_path /home/TestData/nemo2_ckpt/llama_68M_v2 \ --devices 2 \ --max_steps 6 \ --experiment_dir /tmp/nemo2_gpt_finetune/${{ github.run_id }} \ @@ -4312,7 +4312,7 @@ jobs: SCRIPT: | python tests/collections/llm/gpt_finetuning.py \ - --restore_path /home/TestData/nemo2_ckpt/llama_68M \ + --restore_path /home/TestData/nemo2_ckpt/llama_68M_v2 \ --devices 2 \ --max_steps 3 \ --experiment_dir /tmp/nemo2_gpt_finetune/${{ github.run_id }} \ @@ -4322,7 +4322,7 @@ jobs: --mbs 2 python tests/collections/llm/gpt_finetuning.py \ - --restore_path /home/TestData/nemo2_ckpt/llama_68M \ + --restore_path /home/TestData/nemo2_ckpt/llama_68M_v2 \ --devices 2 \ --max_steps 6 \ --experiment_dir /tmp/nemo2_gpt_finetune/${{ github.run_id }} \ @@ -4341,7 +4341,7 @@ jobs: SCRIPT: | python tests/collections/llm/gpt_finetuning.py \ - --restore_path /home/TestData/nemo2_ckpt/llama_68M \ + --restore_path /home/TestData/nemo2_ckpt/llama_68M_v2 \ --devices 2 \ --max_steps 3 \ --experiment_dir /tmp/nemo2_gpt_finetune/${{ github.run_id }} \ @@ -4351,7 +4351,7 @@ jobs: --mbs 2 python tests/collections/llm/gpt_finetuning.py \ - --restore_path /home/TestData/nemo2_ckpt/llama_68M \ + --restore_path /home/TestData/nemo2_ckpt/llama_68M_v2 \ --devices 2 \ --max_steps 6 \ --experiment_dir /tmp/nemo2_gpt_finetune/${{ github.run_id }} \ @@ -4370,7 +4370,7 @@ jobs: SCRIPT: | python tests/collections/llm/gpt_finetuning.py \ - --restore_path /home/TestData/nemo2_ckpt/llama_68M \ + --restore_path /home/TestData/nemo2_ckpt/llama_68M_v2 \ --devices 2 \ --max_steps 3 \ --experiment_dir /tmp/nemo2_gpt_finetune/${{ github.run_id }} \ @@ -4380,7 +4380,7 @@ jobs: --mbs 1 --packed python tests/collections/llm/gpt_finetuning.py \ - --restore_path /home/TestData/nemo2_ckpt/llama_68M \ + --restore_path /home/TestData/nemo2_ckpt/llama_68M_v2 \ --devices 2 \ --max_steps 6 \ --experiment_dir /tmp/nemo2_gpt_finetune/${{ github.run_id }} \ @@ -4399,7 +4399,7 @@ jobs: SCRIPT: | python tests/collections/llm/gpt_finetuning.py \ - --restore_path /home/TestData/nemo2_ckpt/llama_68M \ + --restore_path /home/TestData/nemo2_ckpt/llama_68M_v2 \ --devices 2 \ --max_steps 3 \ --experiment_dir /tmp/nemo2_gpt_finetune/${{ github.run_id }} \ @@ -4409,7 +4409,7 @@ jobs: --mbs 1 python tests/collections/llm/gpt_finetuning.py \ - --restore_path /home/TestData/nemo2_ckpt/llama_68M \ + --restore_path /home/TestData/nemo2_ckpt/llama_68M_v2 \ --devices 2 \ --max_steps 6 \ --experiment_dir /tmp/nemo2_gpt_finetune/${{ github.run_id }} \ @@ -4428,7 +4428,7 @@ jobs: SCRIPT: | python tests/collections/llm/gpt_finetuning.py \ - --restore_path /home/TestData/nemo2_ckpt/llama_68M \ + --restore_path /home/TestData/nemo2_ckpt/llama_68M_v2 \ --devices 2 \ --max_steps 3 \ --experiment_dir /tmp/nemo2_gpt_finetune/${{ github.run_id }} \ @@ -4438,7 +4438,7 @@ jobs: --mbs 2 python tests/collections/llm/gpt_finetuning.py \ - --restore_path /home/TestData/nemo2_ckpt/llama_68M \ + --restore_path /home/TestData/nemo2_ckpt/llama_68M_v2 \ --devices 2 \ --max_steps 6 \ --experiment_dir /tmp/nemo2_gpt_finetune/${{ github.run_id }} \ @@ -4457,7 +4457,7 @@ jobs: SCRIPT: | python tests/collections/llm/gpt_finetuning.py \ - --restore_path /home/TestData/nemo2_ckpt/llama_68M \ + --restore_path /home/TestData/nemo2_ckpt/llama_68M_v2 \ --devices 2 \ --max_steps 3 \ --experiment_dir /tmp/nemo2_gpt_finetune/${{ github.run_id }} \ @@ -4467,7 +4467,7 @@ jobs: --mbs 2 python tests/collections/llm/gpt_finetuning.py \ - --restore_path /home/TestData/nemo2_ckpt/llama_68M \ + --restore_path /home/TestData/nemo2_ckpt/llama_68M_v2 \ --devices 2 \ --max_steps 6 \ --experiment_dir /tmp/nemo2_gpt_finetune/${{ github.run_id }} \ @@ -4486,7 +4486,7 @@ jobs: SCRIPT: | python tests/collections/llm/gpt_finetuning.py \ - --restore_path /home/TestData/nemo2_ckpt/llama_68M \ + --restore_path /home/TestData/nemo2_ckpt/llama_68M_v2 \ --devices 2 \ --max_steps 3 \ --experiment_dir /tmp/nemo2_gpt_finetune/${{ github.run_id }} \ @@ -4496,7 +4496,7 @@ jobs: --mbs 2 python tests/collections/llm/gpt_finetuning.py \ - --restore_path /home/TestData/nemo2_ckpt/llama_68M \ + --restore_path /home/TestData/nemo2_ckpt/llama_68M_v2 \ --devices 2 \ --max_steps 6 \ --experiment_dir /tmp/nemo2_gpt_finetune/${{ github.run_id }} \ @@ -4514,7 +4514,7 @@ jobs: SCRIPT: | python tests/collections/llm/gpt_finetuning.py \ - --restore_path /home/TestData/nemo2_ckpt/llama_68M \ + --restore_path /home/TestData/nemo2_ckpt/llama_68M_v2 \ --devices 2 \ --max_steps 3 \ --experiment_dir /tmp/nemo2_gpt_finetune/${{ github.run_id }} \ @@ -4524,7 +4524,7 @@ jobs: --mbs 1 --packed python tests/collections/llm/gpt_finetuning.py \ - --restore_path /home/TestData/nemo2_ckpt/llama_68M \ + --restore_path /home/TestData/nemo2_ckpt/llama_68M_v2 \ --devices 2 \ --max_steps 6 \ --experiment_dir /tmp/nemo2_gpt_finetune/${{ github.run_id }} \ @@ -4542,7 +4542,7 @@ jobs: SCRIPT: | python tests/collections/llm/gpt_finetuning.py \ - --restore_path /home/TestData/nemo2_ckpt/llama_68M \ + --restore_path /home/TestData/nemo2_ckpt/llama_68M_v2 \ --devices 2 \ --max_steps 3 \ --experiment_dir /tmp/nemo2_gpt_finetune/${{ github.run_id }} \ @@ -4552,7 +4552,7 @@ jobs: --mbs 1 --packed python tests/collections/llm/gpt_finetuning.py \ - --restore_path /home/TestData/nemo2_ckpt/llama_68M \ + --restore_path /home/TestData/nemo2_ckpt/llama_68M_v2 \ --devices 2 \ --max_steps 6 \ --experiment_dir /tmp/nemo2_gpt_finetune/${{ github.run_id }} \ @@ -4569,7 +4569,7 @@ jobs: RUNNER: self-hosted-azure SCRIPT: | python tests/collections/llm/gpt_finetuning.py \ - --restore_path /home/TestData/nemo2_ckpt/llama_68M \ + --restore_path /home/TestData/nemo2_ckpt/llama_68M_v2 \ --devices 2 \ --max_steps 3 \ --experiment_dir /tmp/nemo2_gpt_finetune/${{ github.run_id }} \ @@ -4579,7 +4579,7 @@ jobs: --mbs 1 --packed python tests/collections/llm/gpt_finetuning.py \ - --restore_path /home/TestData/nemo2_ckpt/llama_68M \ + --restore_path /home/TestData/nemo2_ckpt/llama_68M_v2 \ --devices 2 \ --max_steps 6 \ --experiment_dir /tmp/nemo2_gpt_finetune/${{ github.run_id }} \ @@ -4597,7 +4597,7 @@ jobs: SCRIPT: | python tests/collections/llm/gpt_finetuning.py \ - --restore_path /home/TestData/nemo2_ckpt/llama_68M \ + --restore_path /home/TestData/nemo2_ckpt/llama_68M_v2 \ --devices 2 \ --max_steps 3 \ --experiment_dir /tmp/nemo2_gpt_finetune/${{ github.run_id }} \ @@ -4608,7 +4608,7 @@ jobs: --chat_dataset_path /home/TestData/nemo2_data/chat python tests/collections/llm/gpt_finetuning.py \ - --restore_path /home/TestData/nemo2_ckpt/llama_68M \ + --restore_path /home/TestData/nemo2_ckpt/llama_68M_v2 \ --devices 2 \ --max_steps 6 \ --experiment_dir /tmp/nemo2_gpt_finetune/${{ github.run_id }} \ @@ -4702,9 +4702,26 @@ jobs: SCRIPT: | python tests/collections/llm/peft/lora_merge.py \ - --lora_checkpoint_path=/home/TestData/nemo2_ckpt/llama_lora_ci_checkpoint/ \ + --lora_checkpoint_path=/home/TestData/nemo2_ckpt/llama_lora_ci_checkpoint_v2/ \ --output_path=/tmp/nemo2_lora_merge/${{ github.run_id }} + L2_NEMO_2_LoRA_Inference: + needs: [cicd-test-container-setup] + uses: ./.github/workflows/_test_template.yml + if: contains(fromJSON(needs.cicd-test-container-setup.outputs.test_to_run), 'L2_NEMO_2_LoRA_Inference') || needs.cicd-test-container-setup.outputs.all == 'true' + with: + RUNNER: self-hosted-azure-gpus-1 + SCRIPT: | + + python scripts/llm/generate.py \ + --model_path /home/TestData/nemo2_ckpt/llama_lora_ci_checkpoint_v2/ \ + --tp 1 \ + --pp 1 \ + --devices 1 \ + --top_p 0.0 \ + --top_k 1 \ + --num_tokens_to_generate 3 + L2_NeMo_2_NeMo_Mcore_Mixtral_bitexact: needs: [cicd-test-container-setup] uses: ./.github/workflows/_test_template.yml @@ -4900,6 +4917,7 @@ jobs: - L2_NeMo_2_Mistral_LoRA_TP1PP1_MBS1 - L2_NeMo_2_Mistral_LoRA_TP2PP1_MBS1 - L2_NEMO_2_LoRA_MERGE + - L2_NEMO_2_LoRA_Inference - L2_NeMo_2_Mixtral_Pretraining - L2_PTQ_Llama2_FP8 - L2_Community_LLM_Checkpoints_tests_Llama3 diff --git a/nemo/collections/llm/inference/base.py b/nemo/collections/llm/inference/base.py index 6c89a1b42b15..dd53d97b21ad 100644 --- a/nemo/collections/llm/inference/base.py +++ b/nemo/collections/llm/inference/base.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import inspect import json from pathlib import Path from typing import Optional, Union @@ -61,7 +61,10 @@ def detokenize(self, tokens, remove_special_tokens=False): Returns: str: The detokenized string. """ - return self.tokenizer.ids_to_text(tokens, remove_special_tokens) + if 'remove_special_tokens' in inspect.signature(self.tokenizer.ids_to_text).parameters: + return self.tokenizer.ids_to_text(tokens, remove_special_tokens) + else: + return self.tokenizer.ids_to_text(tokens) def tokenize(self, prompt): """ diff --git a/nemo/lightning/pytorch/callbacks/peft.py b/nemo/lightning/pytorch/callbacks/peft.py index 09a0885ead17..d138117e4599 100644 --- a/nemo/lightning/pytorch/callbacks/peft.py +++ b/nemo/lightning/pytorch/callbacks/peft.py @@ -32,6 +32,7 @@ from nemo.lightning.megatron_parallel import MegatronParallel from nemo.lightning.pytorch.callbacks.model_transform import ModelTransform from nemo.lightning.pytorch.optim.megatron import MegatronOptimizerModule +from nemo.lightning.pytorch.utils import is_trainer_attached from nemo.utils import logging from nemo.utils.callbacks.dist_ckpt_io import AsyncCompatibleCheckpointIO @@ -105,7 +106,7 @@ def __call__(self, model: nn.Module) -> nn.Module: else: model.walk(self.transform) - if hasattr(model, "trainer") and model.trainer.state.fn != TrainerFn.FITTING: + if is_trainer_attached(model) and model.trainer.state.fn != TrainerFn.FITTING: self.freeze_model(model) return model @@ -128,7 +129,7 @@ def freeze_model(self, model: nn.Module) -> None: model.module.freeze() else: model.freeze() - if hasattr(model, "trainer") and model.trainer.state.fn == TrainerFn.FITTING: + if is_trainer_attached(model) and model.trainer.state.fn == TrainerFn.FITTING: model.train(mode=True) def setup(self, trainer: pl.Trainer, pl_module: pl.LightningModule, stage: str) -> None: diff --git a/nemo/lightning/pytorch/utils.py b/nemo/lightning/pytorch/utils.py index 045cf79b5777..77fd702da410 100644 --- a/nemo/lightning/pytorch/utils.py +++ b/nemo/lightning/pytorch/utils.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import lightning.pytorch as pl import torch @@ -55,3 +56,14 @@ def dtype_from_hf(config): return dtype_from_str(torch_dtype) else: raise ValueError("torch_dtype is not of type str/torch.dtype") + + +def is_trainer_attached(model: pl.LightningModule): + """ + Returns true if trainer is attached to a model + """ + try: + trainer = model.trainer + return True + except (AttributeError, RuntimeError): + return False diff --git a/scripts/llm/generate.py b/scripts/llm/generate.py index 56653aa3bbb5..f01c384604a2 100644 --- a/scripts/llm/generate.py +++ b/scripts/llm/generate.py @@ -72,6 +72,12 @@ def get_args(): default=0.95, help="""top_p to be used in megatron.core.inference.common_inference_params.CommonInferenceParams""", ) + parser.add_argument( + "--top_k", + type=float, + default=0, + help="""top_k to be used in megatron.core.inference.common_inference_params.CommonInferenceParams""", + ) parser.add_argument( "--num_tokens_to_generate", type=int, @@ -118,7 +124,10 @@ def get_args(): prompts=prompts, trainer=trainer, inference_params=CommonInferenceParams( - temperature=args.temperature, top_p=args.top_p, num_tokens_to_generate=args.num_tokens_to_generate + temperature=args.temperature, + top_p=args.top_p, + top_k=args.top_k, + num_tokens_to_generate=args.num_tokens_to_generate, ), text_only=True, ) diff --git a/tests/collections/llm/common.py b/tests/collections/llm/common.py index 8e93c9c84c9e..f8015950aa93 100644 --- a/tests/collections/llm/common.py +++ b/tests/collections/llm/common.py @@ -13,6 +13,7 @@ # limitations under the License. import os +from dataclasses import dataclass import lightning.pytorch as pl import nemo_run as run @@ -191,3 +192,12 @@ def verify_precision(tensor: torch.Tensor) -> None: assert tensor.dtype == precision return verify_precision + + +@dataclass +class Llama3ConfigCI(llm.Llama3Config8B): + seq_length: int = 2048 + num_layers: int = 2 + hidden_size: int = 768 + ffn_hidden_size: int = 3072 + num_attention_heads: int = 8 diff --git a/tests/collections/llm/gpt_finetuning.py b/tests/collections/llm/gpt_finetuning.py index be5331c32f3b..384faa383435 100644 --- a/tests/collections/llm/gpt_finetuning.py +++ b/tests/collections/llm/gpt_finetuning.py @@ -22,17 +22,10 @@ from nemo.collections import llm from nemo.collections.llm.gpt.data.packed_sequence import PackedSequenceSpecs from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer - -## NOTE: This script is present for github-actions testing only. +from tests.collections.llm.common import Llama3ConfigCI -@dataclass -class Llama3ConfigCI(llm.Llama3Config8B): - seq_length: int = 2048 - num_layers: int = 2 - hidden_size: int = 768 - ffn_hidden_size: int = 3072 - num_attention_heads: int = 8 +## NOTE: This script is present for github-actions testing only. def get_args(): diff --git a/tests/collections/llm/peft/lora_merge.py b/tests/collections/llm/peft/lora_merge.py index 2ca7390ea7e6..0e0c9361c4f5 100644 --- a/tests/collections/llm/peft/lora_merge.py +++ b/tests/collections/llm/peft/lora_merge.py @@ -12,20 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. import argparse -from dataclasses import dataclass - from nemo.collections import llm -@dataclass -class Llama3ConfigCI(llm.Llama3Config8B): - seq_length: int = 2048 - num_layers: int = 2 - hidden_size: int = 768 - ffn_hidden_size: int = 3072 - num_attention_heads: int = 8 - - def get_args(): parser = argparse.ArgumentParser(description='Merge LoRA weights with base LLM') parser.add_argument('--lora_checkpoint_path', type=str, help="Path to finetuned LORA checkpoint") From 45f2a4c131639dea093de4cff5543becf0174730 Mon Sep 17 00:00:00 2001 From: Somshubra Majumdar Date: Wed, 18 Dec 2024 18:53:06 -0800 Subject: [PATCH 072/128] Add fix docstring for speech commands (#11659) Signed-off-by: smajumdar --- tutorials/asr/Voice_Activity_Detection.ipynb | 1 + 1 file changed, 1 insertion(+) diff --git a/tutorials/asr/Voice_Activity_Detection.ipynb b/tutorials/asr/Voice_Activity_Detection.ipynb index fb3cef1b44ea..aa81b79ebd94 100644 --- a/tutorials/asr/Voice_Activity_Detection.ipynb +++ b/tutorials/asr/Voice_Activity_Detection.ipynb @@ -34,6 +34,7 @@ "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]\n", "\n", "## Install TorchAudio\n", + "## NOTE: TorchAudio installation may not work in all environments, please use Google Colab for best experience\n", "!pip install torchaudio>=0.13.0 -f https://download.pytorch.org/whl/torch_stable.html\n", "\n", "## Grab the config we'll use in this example\n", From 90f6fb75ece9e5cc67e7e767e5004499aa6adfa6 Mon Sep 17 00:00:00 2001 From: Huiying Date: Wed, 18 Dec 2024 19:21:10 -0800 Subject: [PATCH 073/128] update nemo container version for notebooks (#11651) Signed-off-by: Huiying Li --- tutorials/llm/llama-3/nemo2-sft-peft/README.rst | 4 ++-- tutorials/llm/llama-3/nemo2-sft-peft/nemo2-peft.ipynb | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tutorials/llm/llama-3/nemo2-sft-peft/README.rst b/tutorials/llm/llama-3/nemo2-sft-peft/README.rst index 7adf2777db2c..d1bd7b87759c 100644 --- a/tutorials/llm/llama-3/nemo2-sft-peft/README.rst +++ b/tutorials/llm/llama-3/nemo2-sft-peft/README.rst @@ -20,7 +20,7 @@ Requirements * Software Requirements * Use the latest [NeMo Framework Container](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/nemo/tags) . Note that you must be logged in to the container registry to view this page. - * This notebook uses the container: `nvcr.io/nvidia/nemo:dev`. + * This notebook is tested on the container: `nvcr.io/nvidia/nemo:24.12-rc0`. * Get your Hugging Face [access token](https://huggingface.co/docs/hub/en/security-tokens), which will be used to obtain the tokenizer required during training. * NeMo 2.0 and NeMo-Run @@ -42,7 +42,7 @@ Start the NeMo Framework Container --rm -it \ -v ${PWD}:/workspace \ -w /workspace \ - nvcr.io/nvidia/nemo:dev bash + nvcr.io/nvidia/nemo:24.12-rc0 bash Once you are inside the container, you can run `nvidia-smi` to verify that the GPUs are accessible. diff --git a/tutorials/llm/llama-3/nemo2-sft-peft/nemo2-peft.ipynb b/tutorials/llm/llama-3/nemo2-sft-peft/nemo2-peft.ipynb index aa463e2b84be..b3393d133a45 100644 --- a/tutorials/llm/llama-3/nemo2-sft-peft/nemo2-peft.ipynb +++ b/tutorials/llm/llama-3/nemo2-sft-peft/nemo2-peft.ipynb @@ -533,7 +533,7 @@ "\n", "2. [NeMo-Run GitHub repo](https://github.com/NVIDIA/NeMo-Run/)\n", "\n", - "3. NeMo Framework Container: `nvcr.io/nvidia/nemo:dev`\n", + "3. NeMo Framework Container: `nvcr.io/nvidia/nemo:24.12-rc0`\n", "\n", "\n", "\n", From 18448b9dce36b1804883994d7a3bc8ba0c0a59a0 Mon Sep 17 00:00:00 2001 From: Ao Tang Date: Wed, 18 Dec 2024 23:28:02 -0500 Subject: [PATCH 074/128] Fix Optimizer & LR scheduler & Consume Samples when Resuming in PEFT (#11631) * Fix Optimizer & LR scheduler Resume * fix unit test Signed-off-by: Chen Cui * Apply isort and black reformatting Signed-off-by: cuichenx * typo Signed-off-by: Chen Cui * Fix consume samples * Fix unit tests * Apply isort and black reformatting Signed-off-by: suiyoubi --------- Signed-off-by: Chen Cui Signed-off-by: cuichenx Signed-off-by: suiyoubi Co-authored-by: Chen Cui Co-authored-by: cuichenx Co-authored-by: suiyoubi --- nemo/collections/llm/gpt/data/fine_tuning.py | 7 ++++--- nemo/lightning/pytorch/callbacks/peft.py | 7 ++++++- tests/lightning/test_data.py | 19 ++++++++++++++++--- 3 files changed, 26 insertions(+), 7 deletions(-) diff --git a/nemo/collections/llm/gpt/data/fine_tuning.py b/nemo/collections/llm/gpt/data/fine_tuning.py index 0d866bb600fe..a22ed72f4656 100644 --- a/nemo/collections/llm/gpt/data/fine_tuning.py +++ b/nemo/collections/llm/gpt/data/fine_tuning.py @@ -93,6 +93,7 @@ def __init__( self.packed_sequence_size = -1 if not packed_sequence_specs else packed_sequence_specs.packed_sequence_size self.validate_batch_size_for_packed_sequence() self.dataset_kwargs = dataset_kwargs or {} + self.init_global_step = 0 def validate_batch_size_for_packed_sequence(self): """ @@ -163,9 +164,7 @@ def state_dict(self) -> Dict[str, Any]: A dictionary containing datamodule state. """ - consumed_samples = self.data_sampler.compute_consumed_samples( - self.trainer.global_step - self.data_sampler.init_global_step - ) + consumed_samples = self.data_sampler.compute_consumed_samples(self.trainer.global_step - self.init_global_step) return {"consumed_samples": consumed_samples} def load_state_dict(self, state_dict: Dict[str, Any]) -> None: @@ -240,6 +239,8 @@ def _create_dataset(self, path, is_test=False, **kwargs): def _create_dataloader(self, dataset, mode, **kwargs) -> DataLoader: # pylint: disable=C0115,C0116 + self.init_global_step = self.trainer.global_step + self.data_sampler.init_global_step = self.init_global_step return WrappedDataLoader( mode=mode, dataset=dataset, diff --git a/nemo/lightning/pytorch/callbacks/peft.py b/nemo/lightning/pytorch/callbacks/peft.py index d138117e4599..d2e93fe9ab42 100644 --- a/nemo/lightning/pytorch/callbacks/peft.py +++ b/nemo/lightning/pytorch/callbacks/peft.py @@ -204,7 +204,12 @@ def apply_transform(self, trainer): ) trainer.strategy.load_model_state_dict(adapter_state, strict=False) if trainer.state.fn == TrainerFn.FITTING: - trainer.strategy.load_optimizer_state_dict(adapter_state, selective_restore=True) + # Load optimizer + trainer.strategy.load_optimizer_state_dict(adapter_state, selective_restore=False) + # Load lr scheduler + if (lr_schedulers := adapter_state.get('lr_schedulers', None)) is not None: + for config, lrs_state in zip(trainer.lr_scheduler_configs, lr_schedulers): + config.scheduler.load_state_dict(lrs_state) for cb in trainer.callbacks[::-1]: if isinstance(cb, MegatronOptimizerModule): diff --git a/tests/lightning/test_data.py b/tests/lightning/test_data.py index 2519616766f4..b848bec3dae9 100644 --- a/tests/lightning/test_data.py +++ b/tests/lightning/test_data.py @@ -15,11 +15,18 @@ from pathlib import Path from unittest.mock import MagicMock, patch +import pytest + + +@pytest.fixture +def trainer(): + return MagicMock() + @patch( 'nemo.collections.nlp.data.language_modeling.megatron.gpt_sft_dataset.GPTSFTDataset.__init__', return_value=None ) -def test_finetuning_module(mock_gpt_sft_dataset) -> None: +def test_finetuning_module(mock_gpt_sft_dataset, trainer) -> None: from nemo.collections.llm.gpt.data import FineTuningDataModule dataset_root = 'random_root' @@ -30,6 +37,8 @@ def test_finetuning_module(mock_gpt_sft_dataset) -> None: global_batch_size=8, seed=1234, ) + datamodule.trainer = trainer + datamodule.setup(stage='train') datamodule.train_dataloader() mock_gpt_sft_dataset.assert_called_once() @@ -38,7 +47,7 @@ def test_finetuning_module(mock_gpt_sft_dataset) -> None: @patch( 'nemo.collections.nlp.data.language_modeling.megatron.gpt_sft_dataset.GPTSFTDataset.__init__', return_value=None ) -def test_dolly_module(mock_gpt_sft_dataset) -> None: +def test_dolly_module(mock_gpt_sft_dataset, trainer) -> None: from nemo.collections.llm.gpt.data import DollyDataModule datamodule = DollyDataModule( @@ -47,6 +56,8 @@ def test_dolly_module(mock_gpt_sft_dataset) -> None: global_batch_size=8, seed=1234, ) + datamodule.trainer = trainer + datamodule.setup(stage='train') datamodule.train_dataloader() mock_gpt_sft_dataset.assert_called_once() @@ -55,7 +66,7 @@ def test_dolly_module(mock_gpt_sft_dataset) -> None: @patch( 'nemo.collections.nlp.data.language_modeling.megatron.gpt_sft_dataset.GPTSFTDataset.__init__', return_value=None ) -def test_squad_module(mock_gpt_sft_dataset) -> None: +def test_squad_module(mock_gpt_sft_dataset, trainer) -> None: from nemo.collections.llm.gpt.data import SquadDataModule datamodule = SquadDataModule( @@ -64,6 +75,8 @@ def test_squad_module(mock_gpt_sft_dataset) -> None: global_batch_size=8, seed=1234, ) + datamodule.trainer = trainer + datamodule.setup(stage='train') datamodule.train_dataloader() mock_gpt_sft_dataset.assert_called_once() From ccb3dc1a92fbf409503f287e5fcd12277f243dd6 Mon Sep 17 00:00:00 2001 From: meatybobby Date: Wed, 18 Dec 2024 22:08:14 -0800 Subject: [PATCH 075/128] Add vlm generation function (#11063) * Add vlm inference * Add init * Apply isort and black reformatting Signed-off-by: meatybobby * Add KV cache and xattn cache in inference * Fix position id for KV cache * Apply isort and black reformatting Signed-off-by: meatybobby * Add doc string * pylint fix * Remove max_output_len in inference controller * Modify generate script * Apply isort and black reformatting Signed-off-by: meatybobby * Rename wrapped model * Rename var --------- Signed-off-by: meatybobby Co-authored-by: meatybobby --- nemo/collections/vlm/inference/__init__.py | 15 ++ nemo/collections/vlm/inference/base.py | 129 ++++++++++++++++++ nemo/collections/vlm/inference/vlm_engine.py | 52 +++++++ .../vlm/inference/vlm_inference_controller.py | 79 +++++++++++ .../vlm/inference/vlm_inference_wrapper.py | 119 ++++++++++++++++ nemo/collections/vlm/mllama/model/base.py | 16 ++- nemo/collections/vlm/mllama/model/language.py | 2 +- scripts/vlm/mllama_generate.py | 54 +++----- 8 files changed, 426 insertions(+), 40 deletions(-) create mode 100644 nemo/collections/vlm/inference/__init__.py create mode 100644 nemo/collections/vlm/inference/base.py create mode 100644 nemo/collections/vlm/inference/vlm_engine.py create mode 100644 nemo/collections/vlm/inference/vlm_inference_controller.py create mode 100644 nemo/collections/vlm/inference/vlm_inference_wrapper.py diff --git a/nemo/collections/vlm/inference/__init__.py b/nemo/collections/vlm/inference/__init__.py new file mode 100644 index 000000000000..6c338b383c73 --- /dev/null +++ b/nemo/collections/vlm/inference/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from nemo.collections.vlm.inference.base import generate, setup_inference_wrapper, setup_model_and_tokenizer diff --git a/nemo/collections/vlm/inference/base.py b/nemo/collections/vlm/inference/base.py new file mode 100644 index 000000000000..bbc85a8ee4a8 --- /dev/null +++ b/nemo/collections/vlm/inference/base.py @@ -0,0 +1,129 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional + +import pytorch_lightning as pl +import torch +import torch.distributed +from megatron.core.inference.common_inference_params import CommonInferenceParams +from megatron.core.inference.model_inference_wrappers.inference_wrapper_config import InferenceWrapperConfig +from transformers import AutoProcessor + +import nemo.lightning as nl +from nemo.collections import vlm +from nemo.collections.vlm.inference.vlm_engine import VLMEngine +from nemo.collections.vlm.inference.vlm_inference_controller import VLMTextGenerationController +from nemo.collections.vlm.inference.vlm_inference_wrapper import VLMInferenceWrapper + + +def _setup_trainer_and_restore_model(path: str, trainer: nl.Trainer, model: pl.LightningModule): + """Setup trainer and restore model from path""" + fabric = trainer.to_fabric() + model = fabric.load_model(path, model) + return model + + +def setup_inference_wrapper( + model, + tokenizer, + params_dtype: torch.dtype = torch.bfloat16, + inference_batch_times_seqlen_threshold: int = 1000, +): + """Set up inference wrapper for the model""" + config = model.config + + mcore_model = model.module.cuda() + mcore_model = mcore_model.to(params_dtype) + + inference_wrapped_model = VLMInferenceWrapper( + mcore_model, + InferenceWrapperConfig( + hidden_size=config.language_model_config.hidden_size, + params_dtype=params_dtype, + inference_batch_times_seqlen_threshold=inference_batch_times_seqlen_threshold, + padded_vocab_size=tokenizer.vocab_size, + ), + ) + + return inference_wrapped_model + + +def setup_model_and_tokenizer( + path: str, + trainer: Optional[nl.Trainer] = None, + params_dtype: torch.dtype = torch.bfloat16, + inference_batch_times_seqlen_threshold: int = 1000, +): + """Set up model and tokenizer""" + model_id = "meta-llama/Llama-3.2-11B-Vision-Instruct" + + processor = AutoProcessor.from_pretrained(model_id) + tokenizer = processor.tokenizer + config = vlm.MLlamaConfig11BInstruct() + model = vlm.MLlamaModel(config, tokenizer=tokenizer) + _setup_trainer_and_restore_model(path=path, trainer=trainer, model=model) + + inference_wrapped_model = setup_inference_wrapper( + model, tokenizer, params_dtype, inference_batch_times_seqlen_threshold + ) + + return inference_wrapped_model, processor + + +def generate( + wrapped_model: VLMInferenceWrapper, + tokenizer, + image_processor, + prompts: list[str], + images: list, + max_batch_size: int = 4, + random_seed: Optional[int] = None, + inference_params: Optional[CommonInferenceParams] = None, +) -> dict: + """ + Generates text using a NeMo VLM model. + Args: + wrapped_model (VLMInferenceWrapper): The model inference wrapper. + tokenizer: tokenizer for the input text, + image_processor: image processor for the input image, + prompts (list[str]): The list of prompts to generate text for. + images (list): The list of images to generate text for. + max_batch_size (int, optional): The maximum batch size. Defaults to 4. + random_seed (Optional[int], optional): The random seed. Defaults to None. + inference_params (Optional["CommonInferenceParams"], optional): The inference parameters defined in + Mcore's CommonInferenceParams. Defaults to None. + + Returns: + list[Union["InferenceRequest", str]]: A list of generated text, + either as a string or as an InferenceRequest object. + """ + text_generation_controller = VLMTextGenerationController( + inference_wrapped_model=wrapped_model, + tokenizer=tokenizer, + image_processor=image_processor, + ) + mcore_engine = VLMEngine( + text_generation_controller=text_generation_controller, max_batch_size=max_batch_size, random_seed=random_seed + ) + + common_inference_params = inference_params or CommonInferenceParams(num_tokens_to_generate=50) + + results = mcore_engine.generate( + prompts=prompts, + images=images, + common_inference_params=common_inference_params, + ) + + return results diff --git a/nemo/collections/vlm/inference/vlm_engine.py b/nemo/collections/vlm/inference/vlm_engine.py new file mode 100644 index 000000000000..bce373e7a2f5 --- /dev/null +++ b/nemo/collections/vlm/inference/vlm_engine.py @@ -0,0 +1,52 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List + +import torch +from megatron.core.inference.common_inference_params import CommonInferenceParams +from megatron.core.inference.engines.mcore_engine import MCoreEngine +from megatron.core.inference.inference_request import InferenceRequest +from PIL.Image import Image + + +class VLMEngine(MCoreEngine): + # pylint: disable=C0115,C0116 + def generate( + self, + prompts: List[str], + images: List[Image] = None, + common_inference_params: CommonInferenceParams = None, + ) -> dict: + # pylint: disable=C0115,C0116 + if self.random_seed: + torch.random.manual_seed(self.random_seed) + + for i in range(len(prompts)): + prompt = prompts[i] + image = images[i] if images is not None else None + prompt_tokens, image_dict = self.text_generation_controller.tokenize_prompt(prompt, image) + + # Reuse encoder_prompt from scheduler to pass image + self.scheduler.add_request( + prompt=prompt, + prompt_tokens=prompt_tokens, + encoder_prompt=image_dict, + inference_parameters=common_inference_params, + ) + + self.run_engine() + + result: List[InferenceRequest] = self.scheduler.completed_request_pool.values() + return result diff --git a/nemo/collections/vlm/inference/vlm_inference_controller.py b/nemo/collections/vlm/inference/vlm_inference_controller.py new file mode 100644 index 000000000000..9db1ce24031d --- /dev/null +++ b/nemo/collections/vlm/inference/vlm_inference_controller.py @@ -0,0 +1,79 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import OrderedDict + +import torch + +from megatron.core.inference.inference_request import InferenceRequest +from megatron.core.inference.text_generation_controllers.simple_text_generation_controller import ( + SimpleTextGenerationController, +) + + +class TokenizerWrapper: + # pylint: disable=C0115,C0116 + def __init__(self, tokenizer): + self.eod = tokenizer.eos_token_id + self.vocab_size = None + self._tokenizer = tokenizer + + def detokenize(self, tokens): + # pylint: disable=C0115,C0116 + return self._tokenizer.decode(tokens, skip_special_tokens=True) + + def tokenize(self, prompt): + # pylint: disable=C0115,C0116 + return self._tokenizer.encode(prompt, add_special_tokens=False) + + +class VLMTextGenerationController(SimpleTextGenerationController): + # pylint: disable=C0115,C0116 + def __init__(self, inference_wrapped_model, tokenizer, image_processor): + super().__init__(inference_wrapped_model, TokenizerWrapper(tokenizer)) + self.image_processor = image_processor + + def tokenize_prompt(self, prompt: str, image): + # pylint: disable=C0115,C0116 + tokens = self.tokenizer.tokenize(prompt) + if image is None: + image_dict = dict( + pixel_values=torch.zeros( + 1, 4, 3, self.image_processor.size['height'], self.image_processor.size['width'] + ), + aspect_ratio_ids=torch.tensor([0], dtype=torch.long), + num_tiles=[0], + ) + else: + image_dict = self.image_processor.preprocess(image, return_tensors='pt') + image_dict = { + k: v[0] for k, v in image_dict.items() if k in ["pixel_values", "aspect_ratio_ids", "num_tiles"] + } + return tokens, image_dict + + def prep_model_for_inference( + self, prompts_tokens: torch.Tensor, active_requests: OrderedDict[int, InferenceRequest] + ): + """Preparing batch for inference, using respective wrapper's prep_model_for_inference method + + Args: + prompts_tokens (torch.Tensor): A tensor of shape [batch_size, max_sequence_length] + active_requests (OrderedDict[int, InferenceRequest]): The input active requests + """ + images = list(map(lambda request: request.encoder_prompt, active_requests.values())) + + self.inference_wrapped_model.prep_model_for_inference( + prompts_tokens=prompts_tokens, + image_dict=images, + ) diff --git a/nemo/collections/vlm/inference/vlm_inference_wrapper.py b/nemo/collections/vlm/inference/vlm_inference_wrapper.py new file mode 100644 index 000000000000..29d7d83a9d54 --- /dev/null +++ b/nemo/collections/vlm/inference/vlm_inference_wrapper.py @@ -0,0 +1,119 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from argparse import Namespace +from typing import Dict, List + +import torch +import torch.nn.functional as F +from megatron.core import tensor_parallel +from megatron.core.inference.model_inference_wrappers.abstract_model_inference_wrapper import ( + AbstractModelInferenceWrapper, +) +from megatron.core.inference_params import InferenceParams +from torch.utils.data import default_collate + +from nemo.collections.vlm.mllama.model.utils import create_vision_mask_tensor + + +class VLMInferenceWrapper(AbstractModelInferenceWrapper): + """Constructor for the model inference wrapper + + The wrapper prepares the model for inference, provides the required input + data, and runs the forward pass + + Args: + model (MllamaModel): The Mllama model + args (Namespace): The command line arguments that were passed + """ + + def __init__(self, model, args: Namespace): + super().__init__(model, args) + + def prep_model_for_inference( + self, + prompts_tokens: torch.Tensor, + image_dict: List[Dict] = None, + ): + # pylint: disable=C0115,C0116 + super().prep_model_for_inference(prompts_tokens=prompts_tokens) + max_num_concurrent_media = max(instance['pixel_values'].shape[0] for instance in image_dict) + for instance in image_dict: + pad_num_images = max_num_concurrent_media - instance['pixel_values'].shape[0] + instance['pixel_values'] = F.pad( + instance['pixel_values'], (0, 0, 0, 0, 0, 0, 0, 0, 0, pad_num_images), 'constant', 0 + ) + instance['aspect_ratio_ids'] = F.pad( + instance['aspect_ratio_ids'], (0, max(pad_num_images - 1, 0)), 'constant', 0 + ) + instance['num_tiles'] = F.pad( + torch.tensor(instance['num_tiles']), (0, max(pad_num_images - 1, 0)), 'constant', 0 + ) + batch = default_collate(image_dict) + + batch_size = prompts_tokens.size(0) + seq_length = prompts_tokens.size(1) + self.position_ids = ( + torch.arange(seq_length, dtype=torch.long, device=prompts_tokens.device) + .unsqueeze(0) + .expand_as(prompts_tokens) + ) + self.pixel_values = batch['pixel_values'].cuda(non_blocking=True) + self.num_tiles = batch['num_tiles'] + self.aspect_ratio_ids = batch['aspect_ratio_ids'].cuda(non_blocking=True) + + self.inference_params = InferenceParams(batch_size, seq_length) + self.inference_params.xattn_caches = None + self.inference_params.cross_attention_masks = None + self.inference_params.full_text_row_masked_out_mask = None + + def get_batch_for_context_window(self, context_start_position: int, context_end_position: int) -> List: + # pylint: disable=C0115,C0116 + tokens2use = self.prompts_tokens[:, context_start_position:context_end_position] + positions2use = self.position_ids[:, context_start_position:context_end_position] + data_at_step_idx = [tokens2use, positions2use] + + return data_at_step_idx + + def forward_pass_without_pipeline_parallel(self, inference_input: List) -> torch.Tensor: + """Utility to carry out simple forward pass for TP or no model parallel models + + Runs a very simple forward pass for model. Used in the case of models without + any parallelism or only tensor parallelism. + + Args: + inference_input (List): A list containg the inputs for the vlm + model [tokens, position ids] + + Returns: + torch.Tensor: The output logits of shape [batch_size, seq_len, padded_vocab_size] + """ + tokens2use, positions2use = inference_input + batch_masks = [create_vision_mask_tensor(tokens2use[0], 128256)] * tokens2use.size(0) + logits = self.model( + batch_images=self.pixel_values, + batch_masks=batch_masks, + num_chunks=self.num_tiles, + aspect_ratio_ids=self.aspect_ratio_ids, + tokens=tokens2use, + position_ids=positions2use, + xattn_caches=self.inference_params.xattn_caches, + cross_attention_masks=self.inference_params.cross_attention_masks, + full_text_row_masked_out_mask=self.inference_params.full_text_row_masked_out_mask, + inference_params=self.inference_params, + ) + logits = tensor_parallel.gather_from_tensor_model_parallel_region(logits) + self.inference_params.sequence_len_offset += tokens2use.size(1) + + return logits diff --git a/nemo/collections/vlm/mllama/model/base.py b/nemo/collections/vlm/mllama/model/base.py index 9279936e23d7..1e8bb8d5adcf 100644 --- a/nemo/collections/vlm/mllama/model/base.py +++ b/nemo/collections/vlm/mllama/model/base.py @@ -22,6 +22,7 @@ import torch.distributed from einops import rearrange from megatron.core.enums import ModelType +from megatron.core.inference_params import InferenceParams from megatron.core.models.vision.multimodal_projector import MultimodalProjector from megatron.core.optimizer import OptimizerConfig from megatron.core.tensor_parallel.layers import ColumnParallelLinear @@ -425,6 +426,7 @@ def forward( cross_attention_masks: Optional[torch.Tensor] = None, full_text_row_masked_out_mask: Optional[torch.Tensor] = None, xattn_caches: Optional[List] = None, + inference_params: InferenceParams = None, ) -> torch.Tensor: """Forward.""" if xattn_caches is None: @@ -467,6 +469,15 @@ def forward( total_len=position_ids.shape[1], ) + xattn_mask_index = position_ids[0] + + if inference_params is not None: + inference_params.xattn_caches = xattn_caches + inference_params.cross_attention_masks = cross_attention_masks + inference_params.full_text_row_masked_out_mask = full_text_row_masked_out_mask + else: + xattn_mask_index = [cross_attention_masks.shape[2] - 1] + assert self.add_decoder, "Language model required for forward pass." language_embeddings = None if self.pre_process: @@ -474,7 +485,7 @@ def forward( language_embeddings = language_embeddings.transpose(1, 0).contiguous() # [text_seq_len, b, h_language] full_text_row_masked_out_mask = ( - full_text_row_masked_out_mask[:, :, position_ids[0]].permute(2, 0, 1, 3).squeeze(2) + full_text_row_masked_out_mask[:, :, xattn_mask_index].permute(2, 0, 1, 3).squeeze(2) if cross_attention_masks is not None else None ) @@ -485,10 +496,11 @@ def forward( decoder_input=language_embeddings, attention_mask=None, cross_attention_masks=( - cross_attention_masks[:, :, position_ids[0]] if cross_attention_masks is not None else None + cross_attention_masks[:, :, xattn_mask_index] if cross_attention_masks is not None else None ), full_text_row_masked_out_mask=full_text_row_masked_out_mask, xattn_caches=xattn_caches, + inference_params=inference_params, ) return output diff --git a/nemo/collections/vlm/mllama/model/language.py b/nemo/collections/vlm/mllama/model/language.py index 5d4cc2e09f21..bec3ec526f6e 100644 --- a/nemo/collections/vlm/mllama/model/language.py +++ b/nemo/collections/vlm/mllama/model/language.py @@ -346,7 +346,7 @@ def forward( full_text_row_masked_out_mask=full_text_row_masked_out_mask, rotary_pos_emb=rotary_pos_emb, cross_attention_bias=cross_attention_bias, - inference_params=inference_params, + inference_params=None, # Skip inference_params for xattn packed_seq_params=packed_seq_params, ) hidden_states, context = layer( diff --git a/scripts/vlm/mllama_generate.py b/scripts/vlm/mllama_generate.py index c97a0a81d5b9..10dc197f63a0 100644 --- a/scripts/vlm/mllama_generate.py +++ b/scripts/vlm/mllama_generate.py @@ -21,12 +21,14 @@ import requests import torch +from megatron.core.inference.common_inference_params import CommonInferenceParams from PIL import Image from transformers import AutoProcessor from nemo import lightning as nl from nemo.collections import vlm -from nemo.collections.vlm.mllama.model.utils import create_vision_mask_tensor +from nemo.collections.vlm.inference import generate as vlm_generate +from nemo.collections.vlm.inference import setup_inference_wrapper model_id = "meta-llama/Llama-3.2-11B-Vision-Instruct" @@ -54,44 +56,22 @@ def generate(model, processor, image, text): } ] input_text = processor.apply_chat_template(messages, add_generation_prompt=True) - batch = processor(image, input_text, add_special_tokens=False, return_tensors="pt") - input_ids = batch["input_ids"].cuda(non_blocking=True) - position_ids = ( - torch.arange(input_ids.size(1), dtype=torch.long, device=input_ids.device).unsqueeze(0).expand_as(input_ids) + model = setup_inference_wrapper(model, processor.tokenizer) + + prompts = [input_text] + images = [image] + params = CommonInferenceParams(top_k=1, top_p=0, num_tokens_to_generate=100) + result = vlm_generate( + model, + processor.tokenizer, + processor.image_processor, + prompts, + images, + inference_params=params, ) - num_tiles = processor.image_processor.preprocess(image, return_tensors='pt')["num_tiles"] - - min_prompt_len = position_ids.shape[-1] - - input_ids = input_ids[:, :min_prompt_len] - generated_ids = input_ids.clone() - - from tqdm import tqdm - - for cur_pos in tqdm(range(min_prompt_len, min_prompt_len + 100)): - with torch.no_grad(): - position_ids = torch.arange(0, cur_pos, dtype=torch.long, device="cuda").reshape(1, -1) - batch_masks = create_vision_mask_tensor(generated_ids[0]) - - output = model( - batch_images=batch["pixel_values"].cuda(non_blocking=True), - batch_masks=[batch_masks], - num_chunks=torch.tensor(num_tiles), - aspect_ratio_ids=batch["aspect_ratio_ids"].cuda(non_blocking=True), - tokens=generated_ids, - position_ids=position_ids, - ) - - next_token_ids = torch.argmax(output[:, -1], dim=-1, keepdim=True) - # Broadcast the tensor from rank 0 to all other ranks - torch.distributed.broadcast(next_token_ids, src=0) - generated_ids = torch.cat([generated_ids, next_token_ids], dim=-1) - if (next_token_ids == tokenizer.eos_token_id).all(): - break - - generated_ids = generated_ids.tolist() - generated_texts = tokenizer.decode(generated_ids[0][min_prompt_len:]) + + generated_texts = list(result)[0].generated_text if torch.distributed.get_rank() == 0: print("======== GENERATED TEXT OUTPUT ========") From db0a2d0985771a5402d76f3ac07a7b3c89e63ccb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?oliver=20k=C3=B6nig?= Date: Thu, 19 Dec 2024 14:05:31 +0100 Subject: [PATCH 076/128] ci: Small pylint fix (#11667) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: oliver könig --- .github/workflows/code-formatting.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/code-formatting.yml b/.github/workflows/code-formatting.yml index 0eaad048b3a5..3730e0bcf955 100644 --- a/.github/workflows/code-formatting.yml +++ b/.github/workflows/code-formatting.yml @@ -139,11 +139,12 @@ jobs: echo "Will run on these files: ${FILTERED[@]}" - set +xe + set +e LOG=$(pylint ${FILTERED[@]}) EXIT_CODE=$? set -e + set +x echo "OUTPUT<> $GITHUB_ENV echo "$LOG" >> $GITHUB_ENV echo "EOF" >> $GITHUB_ENV From 53c64ed31eb6aa0254d396bf4f717eaff23b59cb Mon Sep 17 00:00:00 2001 From: Hemil Desai Date: Thu, 19 Dec 2024 11:39:58 -0800 Subject: [PATCH 077/128] Add slimpajama example (#10671) * Add slimpajama example Signed-off-by: Hemil Desai * Apply isort and black reformatting Signed-off-by: hemildesai * Fix Signed-off-by: Hemil Desai * Fixes Signed-off-by: Hemil Desai * Fixes Signed-off-by: Hemil Desai * Fixes Signed-off-by: Hemil Desai * Fix Signed-off-by: Hemil Desai * Add notebook Signed-off-by: Hemil Desai * Fix Signed-off-by: Hemil Desai * Add basic pretraining notebook for slimpajama Signed-off-by: Hemil Desai * Add docs for pretraining notebook Signed-off-by: Hemil Desai * PR feedback Signed-off-by: Hemil Desai * PR feedback Signed-off-by: Hemil Desai * Pylint fixes Signed-off-by: Hemil Desai * Apply isort and black reformatting Signed-off-by: hemildesai * PR feedback Signed-off-by: Hemil Desai * Update README Signed-off-by: Hemil Desai * PR feedback Signed-off-by: Hemil Desai --------- Signed-off-by: Hemil Desai Signed-off-by: hemildesai Co-authored-by: hemildesai --- tutorials/llm/llama-3/slimpajama/README.md | 48 +++ .../llm/llama-3/slimpajama/data/concat.sh | 70 ++++ .../llm/llama-3/slimpajama/data/download.py | 61 ++++ .../llm/llama-3/slimpajama/data/extract.py | 168 +++++++++ .../llm/llama-3/slimpajama/data/preprocess.py | 113 ++++++ .../llama-3/slimpajama/data_pipeline.ipynb | 338 ++++++++++++++++++ .../llm/llama-3/slimpajama/data_pipeline.py | 124 +++++++ .../llm/llama-3/slimpajama/pretraining.ipynb | 164 +++++++++ 8 files changed, 1086 insertions(+) create mode 100644 tutorials/llm/llama-3/slimpajama/README.md create mode 100644 tutorials/llm/llama-3/slimpajama/data/concat.sh create mode 100644 tutorials/llm/llama-3/slimpajama/data/download.py create mode 100644 tutorials/llm/llama-3/slimpajama/data/extract.py create mode 100644 tutorials/llm/llama-3/slimpajama/data/preprocess.py create mode 100644 tutorials/llm/llama-3/slimpajama/data_pipeline.ipynb create mode 100644 tutorials/llm/llama-3/slimpajama/data_pipeline.py create mode 100644 tutorials/llm/llama-3/slimpajama/pretraining.ipynb diff --git a/tutorials/llm/llama-3/slimpajama/README.md b/tutorials/llm/llama-3/slimpajama/README.md new file mode 100644 index 000000000000..a35be5dc1bd8 --- /dev/null +++ b/tutorials/llm/llama-3/slimpajama/README.md @@ -0,0 +1,48 @@ +**Introduction** + +Welcome to the NeMo SlimPajama Data Pipeline and Pretraining tutorial! This tutorial provides a step-by-step guide to preprocessing the SlimPajama dataset and pretraining a Llama based model using the NeMo 2.0 library. + +The tutorial includes two Jupyter notebooks: `data_pipeline.ipynb` and `pretraining.ipynb`. The `data_pipeline.ipynb` notebook provides a data pipeline to preprocess the SlimPajama dataset, including downloading, extracting, concatenating and tokenizing the data. The `pretraining.ipynb` notebook provides a pretraining recipe to train a language model using the preprocessed data. + +This repository is designed to be used with the NeMo 2.0 and NeMo-Run. + +**Pre-requisites / Requirements** + +- System Configuration + - For Preprocessing: access to any CPU node should be sufficient. Please reach out to us if you run into errors. + - For Pretraining: access to at least 1 NVIDIA GPUs with a cumulative memory of at least 48GB. + - A Docker-enabled environment, with NVIDIA Container Runtime installed, which will make the container GPU-aware. +- Software Requirements + - Use the latest [NeMo Framework Container](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/nemo/tags) . Note that you must be logged in to the container registry to view this page. + - This notebook uses the container: nvcr.io/nvidia/nemo:dev. + - Get your Hugging Face [access token](https://huggingface.co/docs/hub/en/security-tokens), which will be used to download assets from Hugging Face. + - Download Jupyter Lab or Jupyter Notebook in your environment if not already installed. +- NeMo 2.0 and NeMo-Run + - We will use NeMo 2.0 and NeMo-Run for this tutorial. Both are already available in the NeMo Framework Container. + +**Getting started** + +Assuming you have all the pre-requisites installed, you can get started by following these steps: +1. Start and enter the dev container by running: + ```bash + docker run \ + --gpus device=all \ + --shm-size=2g \ + --net=host \ + --ulimit memlock=-1 \ + --rm -it \ + -v ${PWD}:/workspace \ + -w /workspace \ + nvcr.io/nvidia/nemo:dev bash + ``` +2. Log in through huggingface-cli using your Hugging Face token. + ```huggingface-cli login``` +3. From within the container, start the Jupyter lab: + ```jupyter lab --ip 0.0.0.0 --port=8888 --allow-root``` +4. Follow the directions in data_pipeline.ipynb and pretraining.ipynb notebooks to preprocess the SlimPajama dataset and pretrain a model. + +**Note** + +* Make sure to replace placeholder paths with the actual paths on your machine. Make sure to update the docker volume mounts to persist data. +* The `data_pipeline.ipynb` notebook assumes that the SlimPajama dataset is stored in the `/data/slimpajama` directory. +* The `pretraining.ipynb` notebook assumes that the preprocessed data is stored in the `/data/slimpajama_megatron` directory. \ No newline at end of file diff --git a/tutorials/llm/llama-3/slimpajama/data/concat.sh b/tutorials/llm/llama-3/slimpajama/data/concat.sh new file mode 100644 index 000000000000..78c3b5ca51a4 --- /dev/null +++ b/tutorials/llm/llama-3/slimpajama/data/concat.sh @@ -0,0 +1,70 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#!/bin/bash + +concatenate_chunk() { + local data_folder=$1 + local chunk_number=$2 + local chunk_folder="$data_folder/chunk$chunk_number" + local output_file="$data_folder/concatenated_chunk$chunk_number.jsonl" + + echo "Combining files for $data_folder/chunk$chunk_number to $output_file." + + if [ ! -d "$chunk_folder" ]; then + echo "Chunk folder $chunk_folder does not exist" + return 1 + fi + + # Check if the concatenated file already exists + if [ -f "$output_file" ]; then + echo "Concatenated file for chunk$chunk_number already exists. Skipping." + return 0 + fi + + # Use find to get all files in the chunk folder and sort them + files=$(find $chunk_folder -maxdepth 1 -type f -name "*.jsonl" | sort) + + # Concatenate all files in the chunk folder + cat $files > "$output_file" + + if [ $? -eq 0 ]; then + echo "Successfully concatenated files for chunk$chunk_number" + else + echo "Failed to concatenate files for chunk$chunk_number" + fi +} + +# Check if enough arguments are provided +if [ $# -lt 2 ]; then + echo "Usage: $0 [ ...]" + exit 1 +fi + +# Get the train folder from the first argument +data_folder=$1 +shift + +# Check if the train folder exists +if [ ! -d "$data_folder" ]; then + echo "Error: Data folder '$data_folder' does not exist" + exit 1 +fi + +# Process each provided chunk number +for chunk_number in "$@"; do + if [[ -n "$chunk_number" ]]; then + concatenate_chunk "$data_folder" "$chunk_number" + fi +done diff --git a/tutorials/llm/llama-3/slimpajama/data/download.py b/tutorials/llm/llama-3/slimpajama/data/download.py new file mode 100644 index 000000000000..7354d9ec0be9 --- /dev/null +++ b/tutorials/llm/llama-3/slimpajama/data/download.py @@ -0,0 +1,61 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import nemo_run as run + + +def download_slimpajama(include_pattern: str = "", exclude_pattern: str = ""): + """ + Configure run.Script to download SlimPajama dataset from HuggingFace. + + Args: + include_pattern: Include pattern for HuggingFace CLI. + exclude_pattern: Exclude pattern for HuggingFace CLI. + """ + hf_cli_cmd = "huggingface-cli download cerebras/SlimPajama-627B {include_pattern} {exclude_pattern} --quiet --repo-type dataset --local-dir /data/slimpajama --cache-dir /data/slimpajama" # pylint: disable=line-too-long + hf_cli_cmd = hf_cli_cmd.format(include_pattern=include_pattern, exclude_pattern=exclude_pattern) + + download_script = """ +pip install "huggingface_hub[cli,hf_transfer]" + +retry_command() { + local max_retries=$1 + local sleep_time=$2 + local retry_count=0 + local command=${@:3} + + echo "Running $command" + while [ $retry_count -lt $max_retries ]; do + eval $command + if [ $? -eq 0 ]; then + echo "Command succeeded" + return 0 + else + echo "Command failed. Attempt: $((retry_count + 1))" + retry_count=$((retry_count + 1)) + sleep $sleep_time + fi + done + + echo "Command failed after $max_retries retries" + return 1 +} + +export HF_HUB_DOWNLOAD_TIMEOUT=30 +export HF_ENABLE_HF_TRANSFER=True +""" + + download_script += f"retry_command 5 5 {hf_cli_cmd}\n" + download_task = run.Script(inline=download_script) + return download_task diff --git a/tutorials/llm/llama-3/slimpajama/data/extract.py b/tutorials/llm/llama-3/slimpajama/data/extract.py new file mode 100644 index 000000000000..5f64d7b670ec --- /dev/null +++ b/tutorials/llm/llama-3/slimpajama/data/extract.py @@ -0,0 +1,168 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import logging +import multiprocessing +import os +from pathlib import Path +from typing import Optional + +import tqdm +import zstandard + +SOURCES_LIST = [ + "RedPajamaCommonCrawl", + "RedPajamaC4", + "RedPajamaGithub", + "RedPajamaBook", + "RedPajamaArXiv", + "RedPajamaWikipedia", + "RedPajamaStackExchange", +] + +DEFAULT_APPROVED_SOURCES = [ + "RedPajamaCommonCrawl", + "RedPajamaC4", + "RedPajamaGithub", + "RedPajamaArXiv", + "RedPajamaWikipedia", + "RedPajamaStackExchange", +] + + +def approve_source(filename: str, source_list: list): + """ + Function to remove data from non approved sources. + Books data is removed by default due to copyright issues + + Arguments: + filename: path to jsonl file with the data + source_list: list of sources that are allowed to be included in the dataset + """ + + with open(filename, "r") as i: + with open(filename + ".tmp", "w") as o: + for line in i.read().splitlines(): + j = json.loads(line) + if j["meta"]["redpajama_set_name"] in source_list: + json.dump(j, o) + o.write("\n") + os.rename(filename + ".tmp", filename) + return + + +def _split_shards(dataset: list[str], w_size: int) -> list: + shards = [] + for shard in range(w_size): + idx_start = (shard * len(dataset)) // w_size + idx_end = ((shard + 1) * len(dataset)) // w_size + shards.append(dataset[idx_start:idx_end]) + return shards + + +def _get_shard_list(data_dir: str, w_size: int, extension: str = "*zst") -> list: + files = Path(data_dir).rglob(extension) + files = sorted([str(f) for f in files]) + return _split_shards(files, w_size) + + +def _extract_single_zst_file(input_path: str, save_dir: str, file_name: str, rm_input: bool = False): + os.makedirs(save_dir, exist_ok=True) + save_path = os.path.join(save_dir, file_name) + if os.path.exists(save_path): + print(f"File {save_path} already exists, skipping extraction.") + return save_path + + total_length = os.stat(input_path).st_size + with tqdm.tqdm( + total=total_length, + unit="B", + unit_scale=True, + desc=file_name, + ) as pbar: + dctx = zstandard.ZstdDecompressor() + read_size = 131075 + write_size = int(read_size * 4) + save_path = os.path.join(save_dir, file_name) + update_len = 0 + with open(input_path, "rb") as in_f, open(save_path, "wb") as out_f: + for chunk in dctx.read_to_iter(in_f, read_size=read_size, write_size=write_size): + out_f.write(chunk) + update_len += read_size + if update_len >= 3000000: + pbar.update(update_len) + update_len = 0 + if rm_input: + os.remove(input_path) + + +def _extract_single_shard(shard_tuple: tuple): + data_dir, shard, source_list, rm_downloaded = shard_tuple + file_path = os.path.join(data_dir, shard) + _extract_single_zst_file(file_path, data_dir, shard[:-4], rm_downloaded) + shard_path = os.path.join(data_dir, shard[:-4]) + approve_source(shard_path, source_list) + + +def _run_extraction_on_shard( + data_dir: str, + shards_to_extract: list, + shard_index: int, + approved_sources: list, + rm_downloaded: bool = False, +) -> int: + source_list = [] + if not approved_sources: + approved_sources = DEFAULT_APPROVED_SOURCES + + for source in approved_sources: + if source in SOURCES_LIST: + source_list.append(source) + else: + logging.warning(f"Source: {source} is not recognized, should be one of {SOURCES_LIST}") + + print(f"Task :{shard_index} is extracting shards {shards_to_extract[shard_index]}") + + shards_to_process = [(data_dir, shard, source_list, rm_downloaded) for shard in shards_to_extract[shard_index]] + with multiprocessing.Pool(multiprocessing.cpu_count()) as pool: + pool.map(_extract_single_shard, shards_to_process) + + +def run_extraction( + data_dir: str, + rm_downloaded: bool = False, + approved_sources: Optional[list] = None, + num_tasks: Optional[int] = None, + task_id: Optional[int] = None, +): + """ + Function to download the pile dataset files on Slurm. + + Arguments: + cfg: main config file. + conf variables being used: + data_dir + """ + if not num_tasks: + if "SLURM_ARRAY_TASK_COUNT" in os.environ: + num_tasks = int(os.environ["SLURM_ARRAY_TASK_COUNT"]) + task_id = int(os.environ["SLURM_ARRAY_TASK_ID"]) + else: + num_tasks = 1 + task_id = 0 + + shards_to_extract = _get_shard_list(data_dir, num_tasks) + _run_extraction_on_shard(data_dir, shards_to_extract, task_id, approved_sources, rm_downloaded) + print(f"Extracted {len(shards_to_extract[task_id])} files") diff --git a/tutorials/llm/llama-3/slimpajama/data/preprocess.py b/tutorials/llm/llama-3/slimpajama/data/preprocess.py new file mode 100644 index 000000000000..19c3a915e330 --- /dev/null +++ b/tutorials/llm/llama-3/slimpajama/data/preprocess.py @@ -0,0 +1,113 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import multiprocessing +import os +import subprocess +import time +from typing import Optional + +from data.extract import _get_shard_list + + +def _execute_cmd(cmd_tuple: tuple): + cmd, task_id = cmd_tuple + start_time = time.time() + print(f" ****** Task ID {task_id:02d} starts to preprocess {os.path.basename(cmd[2])}...") + + subprocess.check_call(cmd) + print(f" ****** Task ID {task_id:02d} finished preprocessing {os.path.basename(cmd[2])}...") + print(f" ****** Task ID {task_id:02d} time elapsed {(time.time() - start_time) / 60:.2f} min.") + + +def preprocess_data( + data_dir: str, + output_dir: str, + dataset_impl: str = "", + tokenizer_type: str = "", + tokenizer_library: str = "sentencepiece", + tokenizer_model: str = "", + vocab_file_path: Optional[str] = None, + merges_file_path: Optional[str] = None, + num_tasks: Optional[int] = None, + task_id: Optional[int] = None, + extra_args: Optional[list[str]] = None, +): + """ + Preprocess data for Megatron Core using scripts/nlp_language_modeling/preprocess_data_for_megatron.py + + Args: + data_dir: Path to the directory containing the data to preprocess. + output_dir: Path to the directory where the preprocessed data will be saved. + dataset_impl: Dataset implementation to use. + tokenizer_type: Tokenizer type to use. + tokenizer_library: Tokenizer library to use. + tokenizer_model: Tokenizer model to use. + vocab_file_path: Path to the vocabulary file. + merges_file_path: Path to the merges file. + num_tasks: Number of tasks to split the data into. + task_id: Task ID of run. + extra_args: Extra arguments to pass to the preprocess_data_for_megatron.py script. + """ + if not num_tasks: + if "SLURM_ARRAY_TASK_COUNT" in os.environ: + num_tasks = int(os.environ["SLURM_ARRAY_TASK_COUNT"]) + task_id = int(os.environ["SLURM_ARRAY_TASK_ID"]) + else: + num_tasks = 1 + task_id = 0 + shards_to_extract = _get_shard_list(data_dir, num_tasks, extension="concatenated*.jsonl") + shard_files = shards_to_extract[task_id] + cmd = [ + "python", + "/opt/NeMo/scripts/nlp_language_modeling/preprocess_data_for_megatron.py", + ] + + os.makedirs(output_dir, exist_ok=True) + final_cmds = [] + for split in shard_files: + if not split: # Remove empty split + continue + + output_arg = os.path.join(output_dir, os.path.basename(split)) + + flags = [ + f"--input={split}", + f"--output-prefix={output_arg}", + f"--tokenizer-library={tokenizer_library}", + f"--tokenizer-type={tokenizer_type}" if tokenizer_type else f"--tokenizer-model={tokenizer_model}", + f"--workers={multiprocessing.cpu_count()}", + "--log-interval=100000", + "--apply-ftfy", + ] + + if dataset_impl: + flags += [f"--dataset-impl={dataset_impl}"] + + if vocab_file_path: + flags += [ + f"--vocab-file={vocab_file_path}", + "--append-eod", + ] + + if merges_file_path: + flags += [f"--merges-file={merges_file_path}"] + + final_cmd = cmd + flags + if extra_args: + final_cmd += extra_args + final_cmds.append((final_cmd, task_id)) + + for cmd in final_cmds: + _execute_cmd(cmd) diff --git a/tutorials/llm/llama-3/slimpajama/data_pipeline.ipynb b/tutorials/llm/llama-3/slimpajama/data_pipeline.ipynb new file mode 100644 index 000000000000..8d081e5d27eb --- /dev/null +++ b/tutorials/llm/llama-3/slimpajama/data_pipeline.ipynb @@ -0,0 +1,338 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Data Processing for NeMo 2.0 LLMs with the SlimPajama Dataset\n", + "\n", + "This tutorial will guide you through the process of transforming a raw pretraining dataset into a configured data module for pretraining with a NeMo 2.0 recipe. We will use the [SlimPajama-627B](https://huggingface.co/datasets/cerebras/SlimPajama-627B>) dataset as our reference. Additionally, we will demonstrate how to exclude specific sources from the dataset, such as excluding all data from the `RedPajamaBook` set by default.\n", + "\n", + "This tutorial involves four steps:\n", + "\n", + "1. Download data\n", + "2. Extract data\n", + "3. Concatenate data\n", + "4. Preprocess data for NeMo 2.0/Megatron\n", + "\n", + "First, we'll define each step. Next, we will see how we can use NeMo-Run to execute the steps sequentially on your local workstation using Docker or on Slurm.\n", + "\n", + "### Prerequisites\n", + "This notebook assumes familiarity with [NeMo-Run](https://github.com/NVIDIA/NeMo-Run). Additionally, the Docker execution and Slurm execution steps require access to Docker on your host and a remote Slurm cluster, respectively.\n", + "Additionally, you will have to complete the following steps:\n", + "\n", + "1. Set HOST_DATA_PATH in the first cell to a parent folder on your workstation where you want to save the data.\n", + "1. Create directories `HOST_DATA_PATH/tokenizer` and `HOST_DATA_PATH/slimpajama`.\n", + "1. Download the Llama `tokenizer.model` file either from [Hugging Face](https://huggingface.co/meta-llama/Llama-2-7b/blob/main/tokenizer.model) or https://www.llama.com/llama-downloads/ and place it at `{HOST_DATA_PATH}/tokenizer/tokenizer.model`.\n", + " For HF, you can do it by running \n", + " ```bash\n", + " HF_TOKEN=... huggingface-cli download meta-llama/Llama-2-7B tokenizer.model --local-dir {HOST_DATA_PATH}/tokenizer/\n", + " ```\n", + "\n", + "> [!NOTE]\n", + "> All code for this tutorial can be found at https://github.com/NVIDIA/NeMo/tree/main/examples/llm/slimpajama." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import nemo_run as run\n", + "\n", + "from data.download import download_slimpajama\n", + "from data.extract import run_extraction\n", + "from data.preprocess import preprocess_data\n", + "\n", + "HOST_DATA_PATH = \"/data\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Download Data\n", + "\n", + "First, we will configure the task to download data from Hugging Face. We will use the Hugging Face CLI for this. The function that configures the download script can be found [here](./data/download.py)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "download_task = download_slimpajama(\n", + " include_pattern='--include \"train/chunk1/*_100*zst\"',\n", + ")\n", + "\n", + "# The configured script looks like below\n", + "print(download_task.inline)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Extract Data\n", + "\n", + "The downloaded data is in compressed ZST format. We need to extract it into JSONL files. For that, we will configure the `extract_data` function defined [here](./data/extract.py). This function also allows excluding certain sources. By default, we exclude all data from the `RedPajamaBook` set, but this setting is configurable." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_extraction??" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "extract_task = run.Partial(run_extraction, data_dir=\"/data/slimpajama\")\n", + "extract_task" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Concatenate Data\n", + "\n", + "This optional step concatenates small JSONL files into a single large JSONL file. The example script is [here](./data/concat.sh), but feel free to change it based on your needs." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "concat_task = run.Script(\"/nemo_run/code/data/concat.sh\", args=[\"/data/slimpajama/train\", \"1\"])\n", + "concat_task" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Preprocess Data\n", + "\n", + "This final step preprocesses the JSONL files to the BIN and IDX files required by NeMo and Megatron Core. It uses the `preprocess_data` function defined [here](./data/preprocess.py)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "preprocess_data??" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "preprocess_task = run.Partial(\n", + " preprocess_data,\n", + " data_dir=\"/data/slimpajama\",\n", + " output_dir=\"/data/slimpajama_megatron\",\n", + " tokenizer_model=\"/data/tokenizer/tokenizer.model\",\n", + " tokenizer_library=\"sentencepiece\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "preprocess_task" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Put it all together\n", + "\n", + "Now that all the tasks are configured, lets define an executor to run them on and an experiment to run them sequeuntially. \n", + "\n", + "> [!NOTE]\n", + "> Each task can be run individually or in any combination. The notebook runs all tasks sequentially. To remove a task, just remove the corresponding `exp.add(...)` for that corresponding task.\n", + "> This customization is handy if you already have JSONL files processed, for example, from NeMo-Curator." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "# Let's define a local executor to run the experiment locally.\n", + "def docker_executor(host_data_path: str):\n", + " packager = run.GitArchivePackager(subpath=\"examples/llm/slimpajama\") # This will package all code inside the folder. NOTE: only committed changes are packaged, so if you make a change, make sure to commit it.\n", + " executor = run.DockerExecutor(\n", + " packager=packager,\n", + " ipc_mode=\"host\",\n", + " shm_size=\"30g\",\n", + " env_vars={\"PYTHONUNBUFFERED\": \"1\"},\n", + " volumes=[f\"{host_data_path}:/data\"],\n", + " container_image=\"python:3.11\",\n", + " ulimits=[\"memlock:-1\", \"stack:67108864\"],\n", + " )\n", + " return executor" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Replace the host_data_path with the path on your host to save the data to.\n", + "executor = docker_executor(host_data_path=\"/data\")\n", + "\n", + "with run.Experiment(\"slimpajama-data-pipeline\") as exp:\n", + " exp.add(download_task, name=\"download_slimpajama\", executor=executor)\n", + "\n", + " # Use NeMo image for the remaining tasks\n", + " executor.container_image = \"nvcr.io/nvidia/nemo:dev\"\n", + " exp.add(extract_task, name=\"extract_slimpajama\", executor=executor)\n", + "\n", + " # examples/llm/slimpajama is automatically mounted to /nemo_run/code\n", + " exp.add(concat_task, name=\"concat_slimpajama\", executor=executor)\n", + " exp.add(preprocess_task, name=\"preprocess_slimpajama\", executor=executor)\n", + "\n", + " exp.run(sequential=True, tail_logs=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If the experiment runs successfully, you will see the BIN and IDX files as shown below. These files can directly be used in NeMo and Megatron Data Loaders." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "concatenated_chunk1.jsonl_text_document.bin\n", + "concatenated_chunk1.jsonl_text_document.idx\n" + ] + } + ], + "source": [ + "!ls {HOST_DATA_PATH}/slimpajama_megatron" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Appendix\n", + "\n", + "### Running on Slurm\n", + "\n", + "You can also run the same experiment on a remote cluster like Slurm by replacing the Docker executor with a Slurm executor. A sample definition of a Slurm executor looks like:\n", + "\n", + "```python\n", + "def slurm_executor(\n", + " user: str,\n", + " host: str,\n", + " remote_job_dir: str,\n", + " account: str,\n", + " partition: str,\n", + " nodes: int,\n", + " tasks_per_node: int,\n", + " time: str = \"04:00:00\",\n", + " custom_mounts: Optional[list[str]] = None,\n", + " custom_env_vars: Optional[dict[str, str]] = None,\n", + " container_image: str = \"nvcr.io/nvidia/nemo:dev\",\n", + " retries: int = 0,\n", + ") -> run.SlurmExecutor:\n", + " if not (user and host and remote_job_dir and account and partition and nodes and tasks_per_node):\n", + " raise RuntimeError(\n", + " \"Please set user, host, remote_job_dir, account, partition, nodes and devices args for using this function.\"\n", + " )\n", + "\n", + " mounts = []\n", + " if custom_mounts:\n", + " mounts.extend(custom_mounts)\n", + "\n", + " env_vars = {\n", + " \"NVIDIA_VISIBLE_DEVICES\": \"void\", # Might be needed for CPU only nodes with NeMo docker image\n", + " }\n", + " if custom_env_vars:\n", + " env_vars |= custom_env_vars\n", + "\n", + " executor = run.SlurmExecutor(\n", + " account=account,\n", + " partition=partition,\n", + " tunnel=run.SSHTunnel(\n", + " user=user,\n", + " host=host,\n", + " job_dir=remote_job_dir,\n", + " identity=\"/path/to/identity/file/for/ssh/to/cluster\", # OPTIONAL: Provide path to the private key that can be used to establish the SSH connection without entering your password\n", + " ),\n", + " nodes=nodes,\n", + " ntasks_per_node=tasks_per_node,\n", + " mem=\"0\",\n", + " exclusive=True,\n", + " packager=run.GitArchivePackager(subpath=\"examples/llm/slimpajama\"),\n", + " )\n", + "\n", + " executor.container_image = container_image\n", + " executor.container_mounts = mounts\n", + " executor.env_vars = env_vars\n", + " executor.retries = retries\n", + " executor.time = time\n", + "\n", + " return executor\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/tutorials/llm/llama-3/slimpajama/data_pipeline.py b/tutorials/llm/llama-3/slimpajama/data_pipeline.py new file mode 100644 index 000000000000..e06991d1f343 --- /dev/null +++ b/tutorials/llm/llama-3/slimpajama/data_pipeline.py @@ -0,0 +1,124 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional + +import nemo_run as run +from data.download import download_slimpajama +from data.extract import run_extraction +from data.preprocess import preprocess_data + + +def slurm_executor( # pylint: disable=C0116 + user: str, + host: str, + remote_job_dir: str, + account: str, + partition: str, + nodes: int, + tasks_per_node: int, + time: str = "01:00:00", + custom_mounts: Optional[list[str]] = None, + custom_env_vars: Optional[dict[str, str]] = None, + container_image: str = "nvcr.io/nvidia/nemo:dev", + retries: int = 0, + ssh_key_file_path: Optional[str] = None, +) -> run.SlurmExecutor: + if not (user and host and remote_job_dir and account and partition and nodes and tasks_per_node): + raise RuntimeError( + "Please set user, host, remote_job_dir, account, partition, nodes and devices args for using this function." # pylint: disable=line-too-long + ) + + mounts = [] + if custom_mounts: + mounts.extend(custom_mounts) + + # Required to run on CPU nodes + env_vars = {"NVIDIA_VISIBLE_DEVICES": "void"} + if custom_env_vars: + env_vars |= custom_env_vars + + executor = run.SlurmExecutor( + account=account, + partition=partition, + tunnel=run.SSHTunnel( + user=user, + host=host, + job_dir=remote_job_dir, + identity=ssh_key_file_path, + ), + nodes=nodes, + ntasks_per_node=tasks_per_node, + mem="0", + exclusive=True, + packager=run.GitArchivePackager(subpath="examples/llm/slimpajama"), + ) + + executor.container_image = container_image + executor.container_mounts = mounts + executor.env_vars = env_vars + executor.retries = retries + executor.time = time + + return executor + + +def docker_executor(): # pylint: disable=C0116 + packager = run.GitArchivePackager(subpath="examples/llm/slimpajama") + executor = run.DockerExecutor( + packager=packager, + ipc_mode="host", + shm_size="30g", + env_vars={"PYTHONUNBUFFERED": "1"}, + volumess=["/path/to/save/data:/data"], + container_image="python:3.11", + ulimits=["memlock:-1", "stack:67108864"], + ) + return executor + + +def run_data_pipeline(): # pylint: disable=C0116 + executor = docker_executor() + with run.Experiment("slimpajama-data-pipeline") as exp: + exp.add( + download_slimpajama( + include_pattern='--include "train/chunk1/*_1*zst"', + ), + name="download_slimpajama", + executor=executor, + ) + + # Use NeMo image for the remaining tasks + executor.container_image = "nvcr.io/nvidia/nemo:nightly" + exp.add(run.Partial(run_extraction, data_dir="/data/slimpajama"), executor=executor) + + # examples/llm/slimpajama is automatically mounted to /nemo_run/code + exp.add(run.Script("/nemo_run/code/data/concat.sh", args=["/data/slimpajama/train", "1"]), executor=executor) + exp.add( + run.Partial( + preprocess_data, + data_dir="/data/slimpajama", + output_dir="/data/slimpajama_megatron", + tokenizer_model="/data/tokenizer/tokenizer.model", + tokenizer_library="sentencepiece", + vocab_file_path="/data/tokenizer/tokenizer.vocab", + ), + executor=executor, + ) + + exp.run(sequential=True, tail_logs=True, detach=True) + + +if __name__ == "__main__": + run_data_pipeline() diff --git a/tutorials/llm/llama-3/slimpajama/pretraining.ipynb b/tutorials/llm/llama-3/slimpajama/pretraining.ipynb new file mode 100644 index 000000000000..420aafab24f3 --- /dev/null +++ b/tutorials/llm/llama-3/slimpajama/pretraining.ipynb @@ -0,0 +1,164 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Pretraining using SlimPajama\n", + "\n", + "Let's see how we can use the data generated from the [data pipeline notebook](./data_pipeline.ipynb) to pretrain a model. All we need to do is define the data module based on the generated data and replace it with the mock data module provided by default in the [NeMo LLM recipes](../../../nemo/collections/llm/recipes/__init__.py)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import nemo_run as run\n", + "from typing import Optional\n", + "import pytorch_lightning as pl\n", + "from nemo.collections import llm\n", + "from nemo.collections.common.tokenizers import SentencePieceTokenizer" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Define the data module\n", + "To define the data module, we can use `llm.PreTrainingDataModule` and pass in the data paths and tokenizer. In case you don't have either of the two, please refer to the [data pipeline notebook](./data_pipeline.ipynb). You can also look at the definition of the data module for the other parameters supported like `split`, `num_workers`, `index_mapping_dir`, etc." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "def slimpajama(\n", + " gbs: int = 256,\n", + " mbs: int = 4,\n", + " seq_length: int = 8192,\n", + ") -> run.Config[pl.LightningDataModule]:\n", + "\n", + " return run.Config(\n", + " llm.PreTrainingDataModule,\n", + " paths=[\"/data/slimpajama_megatron/concatenated_chunk1.jsonl_text_document\"],\n", + " seq_length=seq_length,\n", + " global_batch_size=gbs,\n", + " micro_batch_size=mbs,\n", + " tokenizer=run.Config(SentencePieceTokenizer, model_path=\"/data/tokenizer/tokenizer.model\"),\n", + " split=\"99,8,2\",\n", + " num_workers=2,\n", + " index_mapping_dir=\"/data/index_mapping\",\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Configure the recipe and launch pretraining\n", + "Once the data module is defined, you can use an existing recipe and replace the data module as shown below.\n", + "To learn more about the recipes, refer to the [quickstart](https://docs.nvidia.com/nemo-framework/user-guide/latest/nemo-2.0/quickstart.html)." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "def configure_recipe(nodes: int = 1, gpus_per_node: int = 1):\n", + " recipe = llm.llama3_8b.pretrain_recipe(\n", + " dir=\"/checkpoints/llama-new\", # Path to store checkpoints\n", + " name=\"llama_pretraining\",\n", + " num_nodes=nodes,\n", + " num_gpus_per_node=gpus_per_node,\n", + " )\n", + "\n", + " recipe.model.config.num_layers = 1\n", + " recipe.model.config.hidden_size = 128\n", + " recipe.trainer.max_steps = 30\n", + " recipe.data = slimpajama(\n", + " gbs=32,\n", + " mbs=1,\n", + " )\n", + " recipe.trainer.val_check_interval = 20\n", + " recipe.trainer.strategy.context_parallel_size = 1\n", + " recipe.log.ckpt.save_optim_on_train_end = True\n", + " return recipe" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "def local_executor_torchrun(nodes: int = 1, devices: int = 1) -> run.LocalExecutor:\n", + " # Env vars for jobs are configured here\n", + " env_vars = {\n", + " \"TORCH_NCCL_AVOID_RECORD_STREAMS\": \"1\",\n", + " \"NEMO_ENV_VARNAME_TESTING\": \"1\",\n", + " \"CUDA_VISIBLE_DEVICES\": \"0\"\n", + " }\n", + "\n", + " executor = run.LocalExecutor(ntasks_per_node=devices, launcher=\"torchrun\", env_vars=env_vars)\n", + " return executor\n" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "def run_pretraining():\n", + " recipe = configure_recipe()\n", + " executor = local_executor_torchrun(nodes=recipe.trainer.num_nodes, devices=recipe.trainer.devices)\n", + "\n", + " run.run(recipe, executor=executor)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Run Pretraining\n", + "Now, you can just call the `run_pretraining` function to start pretraining on your local machine using torchrun." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run_pretraining()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 5cebc855fc55a23420d74b6e5bd57bcf6c65011c Mon Sep 17 00:00:00 2001 From: Chen Cui Date: Thu, 19 Dec 2024 15:54:00 -0500 Subject: [PATCH 078/128] Remove NeMo 1 docs (#11670) * remove nemo1 docs Signed-off-by: Chen Cui * fix link Signed-off-by: Chen Cui --------- Signed-off-by: Chen Cui --- docs/source/checkpoints/dev_guide.rst | 234 ------------------ docs/source/checkpoints/user_guide.rst | 94 ------- docs/source/features/optimizations/index.rst | 1 - .../optimizations/sequence_packing.rst | 136 ---------- nemo/collections/llm/gpt/data/fine_tuning.py | 3 +- 5 files changed, 1 insertion(+), 467 deletions(-) delete mode 100644 docs/source/checkpoints/dev_guide.rst delete mode 100644 docs/source/checkpoints/user_guide.rst delete mode 100644 docs/source/features/optimizations/sequence_packing.rst diff --git a/docs/source/checkpoints/dev_guide.rst b/docs/source/checkpoints/dev_guide.rst deleted file mode 100644 index 601e69749b64..000000000000 --- a/docs/source/checkpoints/dev_guide.rst +++ /dev/null @@ -1,234 +0,0 @@ -Community Model Converter Development Guide -=========================================== - -Guideline Steps for Checkpoint Conversion ------------------------------------------ - -1. **Understand Both Frameworks**: Familiarize yourself with the architectures and naming conventions of both HuggingFace and NeMo models. - -2. **Load Community Checkpoint**: For example, use HuggingFace’s ``AutoModel`` to load the pre-trained model. - -3. **Inspect Model and Config**: Understand the layer names, parameter shapes, and essential configs. - -4. **Adjust NeMo Model Configuration**: Modify the NeMo model configuration to match the HuggingFace model’s specifications. - -5. **Initialize NeMo Model**: Create an instance of the corresponding NeMo model. - -6. **Create Key Mapping**: Define a function to map HuggingFace layer names to NeMo layer names. Adjust for any structural differences. - -7. **Rename and Reshape Parameters**: Implement a function to rename keys in the HuggingFace state dictionary and reshape tensors if necessary. For example, QKV weights usually need some special handling from HF to NeMo. - -8. **Load Converted Weights into NeMo Model**: Apply the transformed state dictionary to the NeMo model. - -9. **Save NeMo Checkpoint**: Save the updated NeMo model as a new checkpoint. - -10. **Verification**: Verify the performance of the NeMo model to ensure successful conversion. - -11. **Add Docstrings and Comments**: Please kindly comment the expected shapes in the parameter reshaping part. - -12. **Add Jenkins Tests**: Please use `Llama Huggingface to NeMo converter test `_ as an example for development. - -Script Placement and Naming Conventions ---------------------------------------- - -- **Script Location**: Place scripts in the ``NeMo/scripts/checkpoint_converters`` directory. - -- **Script Naming**: Name your script following the format ``convert_{model}_{source}_to_{target}.py``, such as ``convert_llama_hf_to_nemo.py``. - -- **Unified Arguments (APIs)**: User only needs to define input and output files. Configs should be automatically updated. - - - ``--input_name_or_path``: Specify the name or path of the model. Give one example default value. - - - ``--output_path``: Set the path for saving the output .nemo file. This argument is required. - - - ``--hparams_file``: Define the path for the configuration file needed for restoration. Set default path to an existing and working yaml file e.g. ``f"{os.path.dirname(__file__)}/../../examples/nlp/language_modeling/conf/megatron_bert_config.yaml"``. A regular user should not change it, but for advanced/internal users, this can be modified. - - - ``--precision``: Choose the precision for saved checkpoint weights. Options: "bf16", "16", "32". Default: "32". - -Code Template -------------- - -Below template tries to address the 11 steps in the guideline part. Please also use `Gemma Huggingface to NeMo converter `__ as an full example for development. - -.. code-block:: python - - import os - import torch - from omegaconf import OmegaConf - from transformers import AutoTokenizer, AutoModel - from nemo.collections.nlp.models.language_modeling.megatron_bert_model import MegatronBertModel - from nemo.utils import logging - from nemo.collections.nlp.parts.megatron_trainer_builder import MegatronTrainerBuilder - - # Add additional imports and custom functions as required - - def create_rename_keys(num_hidden_layers): - # Your implementation of create_rename_keys function - ... - - def adjust_tensor_shapes(model, nemo_state_dict): - # Your implementation of adjust_tensor_shapes function - ... - - def adjust_nemo_config(model_config, ref_config): - # Your implementation of adjust_nemo_config function - ... - - def rename_model_keys(model_state_dict, rename_keys): - """ - Rename keys in the model's state dictionary based on the provided mappings. - - Parameters: - model_state_dict (dict): The state dictionary of the model. - rename_keys (list): A list of tuples with the mapping (old_key, new_key). - - Returns: - dict: A new state dictionary with updated key names. - """ - - # Create a new state dictionary with updated key names - new_state_dict = {} - - # Track keys from the original state dict to ensure all are processed - remaining_keys = set(model_state_dict.keys()) - - # Iterate over the rename mappings - for old_key, new_key in rename_keys: - if old_key in model_state_dict: - # Rename the key and remove it from the tracking set - new_state_dict[new_key] = model_state_dict[old_key] - remaining_keys.remove(old_key) - else: - print(f"Warning: Key '{old_key}' not found in the model state dictionary.") - - # Check if any keys were not converted from old to new - for old_key in remaining_keys: - print(f"Warning: Key '{old_key}' was not converted.") - - def get_args(): - # Arg names subject to change, feel free to suggest. - parser = ArgumentParser() - parser.add_argument("--input_name_or_path", type=str, default="intfloat/e5-large-unsupervised") - parser.add_argument( - "--hparams_file", - type=str, - default=f"{os.path.dirname(__file__)}/../../examples/nlp/language_modeling/conf/megatron_bert_config.yaml", - required=False, - help="Path config for restoring. It's created during training and may need to be modified during restore if restore environment is different than training. Ex: /raid/nemo_experiments/megatron_gpt/hparams.yaml", - ) - parser.add_argument("--output_path", type=str, default=None, required=True, help="Path to output .nemo file.") - parser.add_argument( - "--precision", type=str, default="32", choices=["bf16", "32"], help="Precision for checkpoint weights saved" - ) - - args = parser.parse_args() - return args - - def convert(args): - logging.info(f"Loading checkpoint from HF: `{args.name_or_path}`") - hf_model = AutoModel.from_pretrained(args.name_or_path) - - nemo_config = OmegaConf.load(args.hparams_file) - nemo_config.model = adjust_nemo_config(nemo_config.model, hf_model.config.to_dict()) - - nemo_config.trainer["precision"] = args.precision - trainer = MegatronTrainerBuilder(nemo_config).create_trainer() - model = MegatronBertModel(nemo_config.model, trainer) - - old_state_dict = hf_model.state_dict() - rename_keys = create_rename_keys(nemo_config.model.num_layers) - new_state_dict = rename_model_keys(model_state_dict=old_state_dict, rename_keys=rename_keys) - nemo_state_dict = adjust_tensor_shapes(model, new_state_dict) - model.load_state_dict(nemo_state_dict, strict=True) - - # Additional verification and processing steps - ... - - model.save_to(args.save_path) - logging.info(f'NeMo model saved to: {args.save_path}') - - if __name__ == '__main__': - args = get_args() - convert(args) - - - -*Notes:* This template abstracts some functions (create_rename_keys, adjust_tensor_shapes, adjust_nemo_config) which are crucial for the conversion process. These functions need to be adapted based on specific model architectures and requirements. Ensure that the NeMo model’s configuration is properly aligned with the HuggingFace model’s configuration. It is important to thoroughly test the converted model to validate the conversion process. - - -Development Tips ----------------- - -A Simple Guide for Model Mapping and Conversion -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -1. **Mapping between community model and NeMo model**: - - - Match the configurations between the community model and the NeMo model. - - Create two text files, ``state_src.txt`` and ``state_tgt.txt``, containing the state dict weights and their shapes for easier reference and debugging. - - Example code to generate ``state_src.txt``: - - .. code-block:: python - - file_path = "state_src.txt" - state = model.state_dict() - with open(file_path, 'w') as file: - for k, v in state.items(): - file.write(f"{k} {v.shape}\n") - - - Utilize language models (LMs) to assist in completing the key mapping through the ``create_rename_keys`` function. Here's an example prompt for Gemma: - - .. code-block:: text - - Map the following key names and tensor shapes from Model A to their equivalents in Model B. Here is an example mapping: Model A's 'model.layer.weight' corresponds to Model B's 'module.block.weight'. - ============================================================ - embedder.weight torch.Size([256128, 2048]) - ... - ============================================================ - - Based on the results, update the following code accordingly: - - .. code-block:: python - - def create_rename_keys(num_hidden_layers): - rename_keys = [] - for i in range(num_hidden_layers): - # encoder layers: output projection, 2 feedforward neural networks, and 2 layernorms - # @chatgpt to fill in layer-dependent keys above - - # @chatgpt fill in non-layer-dependent keys above - rename_keys.extend( - [ - # ... - ] - ) - - return rename_keys - - **Note**: Also list all the keys not included in the conversion above. - -2. **Common issues when converting: results not matching between Community model and NeMo model**: - - a. Megatron Core uses a special QKV layout, which needs careful handling and reshaping from community models, especially when GQA or MQA is used. Refer to the `Gemma Huggingface to NeMo converter `__ for guidance. - - b. GLU Variants weights could also be a common source of error. In Megatron Core, the regular feedforward projection weights and gated forward weights are fused together, requiring careful attention to the order of these two. Refer to the `Gemma Huggingface to NeMo converter `_ for more details. - -3. The ``create_hf_model`` function can be used to create a model programmatically. For reproducibility, see the example provided at `GitHub `_. This function creates a randomly initialized HuggingFace model for testing purposes. The model can be specified by name or path for creating its config and tokenizer using HuggingFace transformers AutoConfig and AutoTokenizer functions. - -Example usage: - -.. code-block:: python - - create_hf_model( - model_name_or_path="/home/TestData/nlp/meta-llama/Llama-2-7b-hf", - output_dir=os.path.join(args.save_dir, "megatron_llama/llama-ci-hf"), - config_updates={ - "hidden_size": 256, - "num_attention_heads": 4, - "num_hidden_layers": 2, - "num_key_value_heads": 4 - }, - overwrite=args.overwrite, - ) - diff --git a/docs/source/checkpoints/user_guide.rst b/docs/source/checkpoints/user_guide.rst deleted file mode 100644 index 451679a7e3ae..000000000000 --- a/docs/source/checkpoints/user_guide.rst +++ /dev/null @@ -1,94 +0,0 @@ -Community Model Converter User Guide -==================================== - -This guide provides instructions on how to use the conversion scripts to convert models between Community model and NVIDIA's NeMo format. - -Support Matrix --------------- - -+----------------------+------------------+---------------------+---------------------------------------------------------------------------------------------------------------------+ -| Conversion | From | To | Github Link | -+======================+==================+=====================+=====================================================================================================================+ -| Baichuan | Hugging Face | NeMo | `Link `__ | -+----------------------+------------------+---------------------+---------------------------------------------------------------------------------------------------------------------+ -| Baichuan | NeMo | Hugging Face | `Link `__ | -+----------------------+------------------+---------------------+---------------------------------------------------------------------------------------------------------------------+ -| BERT | Hugging Face | NeMo | `Link `__ | -+----------------------+------------------+---------------------+---------------------------------------------------------------------------------------------------------------------+ -| BERT | NeMo | Hugging Face | `Link `__ | -+----------------------+------------------+---------------------+---------------------------------------------------------------------------------------------------------------------+ -| Falcon | Hugging Face | NeMo | `Link `__ | -+----------------------+------------------+---------------------+---------------------------------------------------------------------------------------------------------------------+ -| Falcon | NeMo | Hugging Face | `Link `__ | -+----------------------+------------------+---------------------+---------------------------------------------------------------------------------------------------------------------+ -| Gemma | Hugging Face | NeMo | `Link `__ | -+----------------------+------------------+---------------------+---------------------------------------------------------------------------------------------------------------------+ -| Gemma | JAX | NeMo | `Link `__ | -+----------------------+------------------+---------------------+---------------------------------------------------------------------------------------------------------------------+ -| Gemma | PyTorch | NeMo | `Link `__ | -+----------------------+------------------+---------------------+---------------------------------------------------------------------------------------------------------------------+ -| GPT/LLaMA | NeMo (Legacy) | NeMo (Megatron-Core)| `Link `__ | -+----------------------+------------------+---------------------+---------------------------------------------------------------------------------------------------------------------+ -| LLaMA | Hugging Face | NeMo | `Link `__ | -+----------------------+------------------+---------------------+---------------------------------------------------------------------------------------------------------------------+ -| LLaMA | NeMo | Hugging Face | `Link `__ | -+----------------------+------------------+---------------------+---------------------------------------------------------------------------------------------------------------------+ -| Mistral 7B | Hugging Face | NeMo | `Link `__ | -+----------------------+------------------+---------------------+---------------------------------------------------------------------------------------------------------------------+ -| Mistral 7B | NeMo | Hugging Face | `Link `__ | -+----------------------+------------------+---------------------+---------------------------------------------------------------------------------------------------------------------+ -| Mixtral | Hugging Face | NeMo | `Link `__ | -+----------------------+------------------+---------------------+---------------------------------------------------------------------------------------------------------------------+ -| Mixtral | NeMo | Hugging Face | `Link `__ | -+----------------------+------------------+---------------------+---------------------------------------------------------------------------------------------------------------------+ -| MPT | Hugging Face | NeMo | `Link `__ | -+----------------------+------------------+---------------------+---------------------------------------------------------------------------------------------------------------------+ -| Starcoder | Hugging Face | NeMo | `Link `__ | -+----------------------+------------------+---------------------+---------------------------------------------------------------------------------------------------------------------+ - - -Convert Hugging Face LLaMA Checkpoints to NeMo ----------------------------------------------- - -To convert a Hugging Face LLaMA checkpoint into a NeMo checkpoint, use the following command: - -.. code-block:: bash - - python convert_llama_hf_to_nemo.py \ - --input_name_or_path \ - --output_path - -Convert NeMo Checkpoint to Hugging Face LLaMA ---------------------------------------------- - -To convert a NeMo checkpoint into a Hugging Face LLaMA checkpoint, you have two options: - -1. Generate only the Hugging Face weights: - -.. code-block:: bash - - python convert__nemo_to_hf.py \ - --input_name_or_path /path/to/file.nemo or /path/to/extracted_folder \ - --output_path /path/to/pytorch_model.bin - -2. Generate the full Hugging Face model folder: - -.. code-block:: bash - - python convert__nemo_to_hf.py \ - --input_name_or_path /path/to/file.nemo or /path/to/extracted_folder \ - --output_path /path/to/model_folder \ - --hf_input_path /path/to/input_hf_folder \ - --hf_output_path /path/to/output_hf_folder - -Replace `` with the specific model you are converting. - -Use the ``--cpu-only`` flag if the model cannot fit in the GPU, such as for Llama2 70b models. Note that using this option will significantly slow down the conversion process. - -Command-Line Arguments ----------------------- - -- ``--input_name_or_path``: Path to the input .nemo file or the Hugging Face model folder. -- ``--output_path``: Path to the output file or folder, depending on the conversion direction. -- ``--hf_input_path``: (Optional) Path to the input Hugging Face model folder. -- ``--hf_output_path``: (Optional) Path to the output Hugging Face model folder. diff --git a/docs/source/features/optimizations/index.rst b/docs/source/features/optimizations/index.rst index 60f4428f9299..c9492967b8a0 100644 --- a/docs/source/features/optimizations/index.rst +++ b/docs/source/features/optimizations/index.rst @@ -5,7 +5,6 @@ Optimizations :maxdepth: 1 ./attention_optimizations - ./sequence_packing ./activation_recomputation ./communication_overlap ./cpu_offloading diff --git a/docs/source/features/optimizations/sequence_packing.rst b/docs/source/features/optimizations/sequence_packing.rst deleted file mode 100644 index 40c04ce65350..000000000000 --- a/docs/source/features/optimizations/sequence_packing.rst +++ /dev/null @@ -1,136 +0,0 @@ -Sequence Packing -================ - -This section explains how to use the sequence packing training technique with Supervised Fine-Tuning (SFT) and Parameter-Efficient Fine-Tuning (PEFT). - -Sequence Packing for SFT/PEFT ------------------------------ - -Overview -######## - -When fine-tuning a large language model, whether using SFT or PEFT methods, GPU underutilization often occurs due to an inefficient data pipeline. This inefficiency arises because most fine-tuning datasets have a skewed distribution of sequence lengths, with many short sequences and a few long ones, following Zipf’s Law. Since transformer models require fixed-length inputs, shorter sequences must be padded with unused tokens, leading to two main inefficiencies: - -- Computation performed on the pad values is eventually ignored for model output, resulting in wasted FLOPs. -- Micro batch size is often limited by the batch which contains longer sequences, so that most other micro batches have - underutilized GPU memory. - -Sequence packing is a training technique where multiple training sequences (examples) are concatenated into one long sequence (pack). This method eliminates the need for padding, allowing more tokens to be processed in each micro batch. As a result, it maximizes both GPU compute and GPU memory utilization. - -While sequences for pretraining can be concatenated naively, this is not the case for SFT and instruction fine-tuning -where each input sequence should be treated individually. The conventional solution is to build an extended attention -mask to mark the sequence id each token belongs to, and mask out attention values between sequences. However, this -increases the complexity of attention from :math:`\sum_i {s_i}^2` to :math:`\Big({\sum_i {s_i}}\Big)^2`, where :math:`s_i` is the -length of the ith subsequence. In practice, the conventional solution puts a limit on the length of packing. -Instead, NeMo provides a highly optimized version of sequence packing which makes use of variable-length attention -kernels in FlashAttention and TransformerEngine. With this approach, attention values between sequences are never calculated, -so the complexity of attention remains at :math:`\sum_i {s_i}^2`. This allows packing sequences to arbitrary lengths so -that GPU memory can be fully utilized. - -All things considered, NeMo’s implementation of sequence packing provides [#f1]_ - -- Up to 10X performance improvement in terms of FLOPs -- Up to 6X performance improvement in terms of training time -- No impact on model convergence - - - -How to run SFT/PEFT with packed sequence -######################################## - -Prepare Dataset -^^^^^^^^^^^^^^^ - -We provide a convenient script to pack your SFT or PEFT dataset. -This script assumes that you already have a prepared dataset file for SFT/PEFT training in NeMo. If you do not, please -follow `this `_ to -download and prepare the Dolly dataset as an example. -You will get a file named training.jsonl. The rest of this tutorial also assumes you already have a recipe for -training with the unpacked dataset. - -Two main steps are run in this script: - -1. The online processing code in GPTSFTDataset is run. This includes tasks such as prompt template manipulation, sequence length truncation, and tokenization. The result is an array of tokenized sequences, represented by indices. -2. The tokenized sequences are grouped by length and a packing algorithm is run. - -You can read more about packing algorithms `here `_. -Currently, two variants of ``first_fit`` are supported. -- ``first_fit_decreasing`` sorts the sequences in decreasing order before applying the first-fit algorithm. It generates a -more optimal packing, but it tends to keep all short sequences together, which may have an impact for convergence. -- ``first_fit_shuffle`` runs first-fit in a random order. Packing is less optimal but it keeps the dataset order random. -The recommendation is to run ``first_fit_shuffle`` and check the packed sequence lengths. If they are similar to the -target length (i.e. efficient packing), then use shuffle. Otherwise try ``first_fit_decreasing``. - - .. code-block:: bash - - python scripts/nlp_language_modeling/prepare_packed_ft_dataset.py \ - model.data.train_ds.file_names=[/path/to/training.jsonl] \ - model.data.train_ds.max_seq_length=2048 \ - +tokenizer_path=/path/to/tokenizer.model \ - +output_dir=/path/to/output_folder \ - +pack_sizes=[2048,4096,8192] \ - [ +packing_algorithm=first_fit_shuffle \ ] - [ +seed=0 ] - -.. note:: - - 1. If your model or dataset requires non-default configs for conventional SFT/PEFT training in NeMo, you will need to pass in the same configs to ``model.data.train_ds`` as you would for training with an unpacked dataset. - - 2. ``model.data.train_ds.max_seq_length`` is the length to which each sequence is truncated before packing multiple sequences to the size of packed sequence (``pack_size``). ``max_seq_length`` should be set to the same value as unpacked data and can be determined by examining the distribution of sequence lengths in the dataset. - - 3. ``pack_sizes`` is a list of packed sequence lengths. In this example, there will be three output files, one for each pack size. The output files are named ``/packed_{pack_size}_seed{seed}.npy``. - This argument is a list because you will likely want to experiment with a few ``pack_sizes`` to find out which length - can fill the GPU memory without exceeding it. Adjusting ``pack_size`` is analogous to adjusting the micro batch size in - the unpacked case. - - -Adjust Training Config -^^^^^^^^^^^^^^^^^^^^^^ - -To train with packed sequences, you need to change four items in the SFT/PEFT config file. - -1. Turn on the packed_sequence flag: - - .. code-block:: bash - - ++model.data.train_ds.packed_sequence=True - -2. Use the new dataset file instead of the original jsonl file: - - .. code-block:: bash - - model.data.train_ds.file_names=output_folder/packed_{pack_size}_seed{seed}.npy - -3. Specify the packed sequence length. This should be one of the ``pack_sizes`` you specified during data preparation. - - .. code-block:: bash - - model.data.train_ds.max_seq_length={pack_size} - -4. Adjust the batch sizes. - - - Micro batch size has to be set to 1 as a nominal constraint. This is because batches are now concatenated in the - preprocessing step. You can increase the ``pack_size`` to achieve the same purpose of increasing micro batch size. - - Global batch size has to be adjusted so that the training recipe is maintained. Because each pack contains - multiple sequences now, global batch size needs to be reduced by the average number of sequences per pack ``n``, - where ``n = num_sequences_in_dataset / num_packs``. This ensures that each gradient iteration sees (on - average) the same number of tokens. The value of ``n`` is printed out when the script is run. - - .. code-block:: bash - - model.micro_batch_size=1 - model.global_batch_size= - -Now, you are all set to fine-tune your model with a much improved throughput! - -Sequence Packing for NeVA -------------------------- - -Sequence packing with NeVA for multimodal large language models differs from the LLM SFT/PEFT approach. For details, please refer to the documentation below. - -:doc:`../../multimodal/mllm/sequence_packing` - -.. rubric:: Footnotes - -.. [#f1] Experiments were performed on Llama 7B with Dolly dataset. Actual performance improvement depends on dataset - and model. \ No newline at end of file diff --git a/nemo/collections/llm/gpt/data/fine_tuning.py b/nemo/collections/llm/gpt/data/fine_tuning.py index a22ed72f4656..3879cd834913 100644 --- a/nemo/collections/llm/gpt/data/fine_tuning.py +++ b/nemo/collections/llm/gpt/data/fine_tuning.py @@ -110,8 +110,7 @@ def validate_batch_size_for_packed_sequence(self): f"Set packed sequence length to {self.packed_sequence_size*self.micro_batch_size} " f"(currently {self.packed_sequence_size}) \n" f"For details please visit " - f"https://docs.nvidia.com/nemo-framework/user-guide/latest/nemotoolkit/features/optimizations/" - f"sequence_packing.html" + f"https://docs.nvidia.com/nemo-framework/user-guide/latest/sft_peft/packed_sequence.html" ) def prepare_data(self) -> None: From c87273965724c6a4b5d72cbf3e707935d58cbbae Mon Sep 17 00:00:00 2001 From: Huiying Date: Thu, 19 Dec 2024 14:02:46 -0800 Subject: [PATCH 079/128] Change peft merged model to bf16 (#11663) * Change peft merge model to bf16 Signed-off-by: HuiyingLi * Apply isort and black reformatting Signed-off-by: HuiyingLi --------- Signed-off-by: HuiyingLi Signed-off-by: HuiyingLi Co-authored-by: HuiyingLi --- nemo/collections/llm/peft/api.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nemo/collections/llm/peft/api.py b/nemo/collections/llm/peft/api.py index a089a6d17515..6c12062bd935 100644 --- a/nemo/collections/llm/peft/api.py +++ b/nemo/collections/llm/peft/api.py @@ -17,6 +17,7 @@ from typing import Tuple, Union import pytorch_lightning as pl +import torch from megatron.core import dist_checkpointing from pytorch_lightning.trainer.states import TrainerFn @@ -78,7 +79,8 @@ def merge_lora( def _load_base_model_and_lora(lora_checkpoint_path: Path) -> Tuple[pl.LightningModule, LoRA]: model = io.load_context(ckpt_to_context_subdir(lora_checkpoint_path), "model") model.model_transform, model.__io__.model_transform = None, None - model.config.bf16 = False + model.config.bf16 = True + model.config.params_dtype = torch.bfloat16 lora: Union[io.TrainerContext, LoRA] = io.load_context( ckpt_to_context_subdir(lora_checkpoint_path), "model.model_transform" ) From 72634f3f171efa23d27ae2160db1b481d7d276fa Mon Sep 17 00:00:00 2001 From: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> Date: Fri, 20 Dec 2024 04:46:20 +0530 Subject: [PATCH 080/128] Add Minitron depth pruning (layer dropping) to megatron_gpt_prune.py (#11609) * Add depth pruning (layer dropping) to megatron_gpt_prune.py Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> * Apply isort and black reformatting Signed-off-by: kevalmorabia97 --------- Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> Signed-off-by: kevalmorabia97 Co-authored-by: kevalmorabia97 --- .github/workflows/cicd-main.yml | 22 +++- .../conf/megatron_gpt_prune.yaml | 13 +- .../megatron_gpt_drop_layers.py | 4 + .../language_modeling/megatron_gpt_prune.py | 123 +++++++++++++----- .../03_a_depth_pruning.ipynb | 21 +-- 5 files changed, 131 insertions(+), 52 deletions(-) diff --git a/.github/workflows/cicd-main.yml b/.github/workflows/cicd-main.yml index 25e0c5252100..e258391e04e9 100644 --- a/.github/workflows/cicd-main.yml +++ b/.github/workflows/cicd-main.yml @@ -571,9 +571,24 @@ jobs: prune.num_attention_heads=2 \ prune.num_query_groups=2 \ prune.hidden_size=128 \ - export.save_path=examples/nlp/language_modeling/ci_prune_width.nemo - AFTER_SCRIPT: | - rm -rf examples/nlp/language_modeling/ci_prune_width.nemo + export.save_path=/tmp/ci_prune_width.nemo + + L2_Prune_Depth_Llama2: + needs: [cicd-test-container-setup] + uses: ./.github/workflows/_test_template.yml + if: contains(fromJSON(needs.cicd-test-container-setup.outputs.test_to_run), 'L2_Prune_Depth_Llama2') || needs.cicd-test-container-setup.outputs.all == 'true' + with: + RUNNER: self-hosted-azure + SCRIPT: | + python examples/nlp/language_modeling/megatron_gpt_prune.py \ + trainer.devices=2 \ + trainer.num_nodes=1 \ + trainer.precision=bf16 \ + model.restore_from_path=/home/TestData/nlp/megatron_llama/llama_ci.nemo \ + model.tensor_model_parallel_size=2 \ + model.pipeline_model_parallel_size=1 \ + 'prune.drop_layers=[1]' \ + export.save_path=/tmp/ci_prune_depth.nemo # L2: ASR dev run ASR_dev_run_Speech_to_Text: @@ -4923,6 +4938,7 @@ jobs: - L2_Community_LLM_Checkpoints_tests_Llama3 - L2_Distill_Llama2 - L2_Prune_Width_Llama2 + - L2_Prune_Depth_Llama2 - L2_Speech_to_Text_AED - L2_Speech_Estimate_Duration_Bins - L2_Speech_Batch_Size_OOMptimizer diff --git a/examples/nlp/language_modeling/conf/megatron_gpt_prune.yaml b/examples/nlp/language_modeling/conf/megatron_gpt_prune.yaml index 85e46b6a6989..16a1a89c0d2f 100644 --- a/examples/nlp/language_modeling/conf/megatron_gpt_prune.yaml +++ b/examples/nlp/language_modeling/conf/megatron_gpt_prune.yaml @@ -1,14 +1,14 @@ inference: greedy: false # Whether or not to use sampling ; use greedy decoding otherwise - top_k: 0 # The number of highest probability vocabulary tokens to keep for top-k-filtering. + top_k: 0 # The number of highest probability vocabulary tokens to keep for top-k-filtering. top_p: 0.9 # If set to float < 1, only the most probable tokens with probabilities that add up to top_p or higher are kept for generation. temperature: 1.0 # sampling temperature add_BOS: true # add the bos token at the begining of the prompt tokens_to_generate: 30 # The minimum length of the sequence to be generated. - all_probs: false # whether return the log prob for all the tokens in vocab - repetition_penalty: 1.2 # The parameter for repetition penalty. 1.0 means no penalty. - min_tokens_to_generate: 0 # The minimum length of the sequence to be generated. - compute_logprob: false # a flag used to compute logprob of all the input text, a very special case of running inference, default False + all_probs: false # whether return the log prob for all the tokens in vocab + repetition_penalty: 1.2 # The parameter for repetition penalty. 1.0 means no penalty. + min_tokens_to_generate: 0 # The minimum length of the sequence to be generated. + compute_logprob: false # a flag used to compute logprob of all the input text, a very special case of running inference, default False batch_size: 1 # batch size for inference max_context_length: 512 # max length of the context, input sequence will be truncated if it is longer than this @@ -38,7 +38,8 @@ prune: num_attention_heads: null # num_attention_heads in the pruned model num_query_groups: null # num_query_groups in the pruned model hidden_size: null # hidden_size (embedding size) in the pruned model - num_layers: null # num_layers (depth) in the pruned model + num_layers: null # num_layers (depth) in the pruned model using on cosine-similarity based importance + drop_layers: [] # drop specified layer numbers (comma separated, 1-indexed). Cannot be used with other constraints export: save_path: ??? # Path where the pruned model will be saved diff --git a/examples/nlp/language_modeling/megatron_gpt_drop_layers.py b/examples/nlp/language_modeling/megatron_gpt_drop_layers.py index 4cd3fb6a8ef6..e14a75efdb42 100644 --- a/examples/nlp/language_modeling/megatron_gpt_drop_layers.py +++ b/examples/nlp/language_modeling/megatron_gpt_drop_layers.py @@ -13,6 +13,8 @@ # limitations under the License. r""" +NOTE: This script will be deprecated soon in favor of `megatron_gpt_prune.py`. Please use the new script for trimming layers. + Script to trim model layers. Example to run the script with checkpoint: python -m torch.distributed.launch --nproc_per_node= * \ @@ -112,6 +114,8 @@ def trim_layers(model, layers_to_trim): def main(local_rank, rank, world_size, args): + logging.warning("This script will be deprecated soon in favor of `megatron_gpt_prune.py`.") + app_state = AppState() app_state.data_parallel_rank = 0 num_nodes = world_size // args.gpus_per_node diff --git a/examples/nlp/language_modeling/megatron_gpt_prune.py b/examples/nlp/language_modeling/megatron_gpt_prune.py index b89d3adbb081..100f86f59aef 100644 --- a/examples/nlp/language_modeling/megatron_gpt_prune.py +++ b/examples/nlp/language_modeling/megatron_gpt_prune.py @@ -32,7 +32,7 @@ Please consult examples/nlp/language_modeling/conf/megatron_gpt_prune.yaml config on available pruning arguments, models supported as well as how to set up data and inference for calibration (with defaults recommended). -Example usage: +Example usage to prune width automatically: ``` python examples/nlp/language_modeling/megatron_gpt_prune.py \ model.restore_from_path=llama3.1-8b.nemo \ @@ -45,9 +45,54 @@ prune.num_attention_heads=null \ prune.num_query_groups=null \ prune.hidden_size=3072 \ + export.save_path=llama3.1-8b-width-pruned.nemo +``` + +Example usage to prune depth automatically using cosine-similarity based importance metric: +``` +python examples/nlp/language_modeling/megatron_gpt_prune.py \ + model.restore_from_path=llama3.1-8b.nemo \ + model.tensor_model_parallel_size=1 \ + model.pipeline_model_parallel_size=8 \ + trainer.num_nodes=1 \ + trainer.precision=bf16 \ + trainer.devices=8 \ + prune.num_layers=16 \ + export.save_path=llama3.1-8b-depth-pruned.nemo +``` + +Example usage to prune width and depth automatically: +``` +python examples/nlp/language_modeling/megatron_gpt_prune.py \ + model.restore_from_path=llama3.1-8b.nemo \ + model.tensor_model_parallel_size=1 \ + model.pipeline_model_parallel_size=8 \ + trainer.num_nodes=1 \ + trainer.precision=bf16 \ + trainer.devices=8 \ + prune.ffn_hidden_size=9216 \ + prune.num_attention_heads=null \ + prune.num_query_groups=null \ + prune.hidden_size=3072 \ + prune.num_layers=16 \ + export.save_path=llama3.1-8b-width-and-depth-pruned.nemo +``` + +NOTE: for above usages, `model.tensor_model_parallel_size` and `inference.batch_size` must be 1 +because of the current prune API limitation + +Example usage to prune depth by dropping specific model layers (1-indexed): +``` +python examples/nlp/language_modeling/megatron_gpt_prune.py \ + model.restore_from_path=llama3.1-8b.nemo \ + model.tensor_model_parallel_size=8 \ + model.pipeline_model_parallel_size=1 \ + trainer.num_nodes=1 \ + trainer.precision=bf16 \ + trainer.devices=8 \ + 'prune.drop_layers=[16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]' \ export.save_path=llama3.1-8b-pruned.nemo ``` -where model.tensor_model_parallel_size and inference.batch_size must be 1 because of the current prune API limitation """ @@ -79,51 +124,61 @@ def main(cfg) -> None: model_cfg.update(cfg.model) model_cfg.name = "modelopt" # Use modelopt transformer spec for pruning - assert cfg.model.tensor_model_parallel_size == 1, "Pruning currently only supports tensor_model_parallel_size=1" - trainer = Trainer(strategy=NLPDDPStrategy(), **cfg.trainer) model = MegatronGPTModel.restore_from( restore_path=cfg.model.restore_from_path, override_config_path=model_cfg, trainer=trainer ) - data_iter = get_calib_data_iter( - cfg.prune.calib_dataset, - cfg.inference.batch_size, - cfg.prune.num_calib_size, - cfg.inference.max_context_length, - ) - dataloader = [data for data in data_iter] - def forward_loop(model): + data_iter = get_calib_data_iter( + cfg.prune.calib_dataset, + cfg.inference.batch_size, + cfg.prune.num_calib_size, + cfg.inference.max_context_length, + ) + dataloader = [data for data in data_iter] + # NOTE: Alternatively you can also use `model.forward_bwd_step(data_iter, forward_only=True)` # if your model is setup for training. model.set_inference_config(OmegaConf.to_container(cfg.inference)) for i, batch in enumerate(tqdm(dataloader, desc="Calibrating")): model.predict_step(batch, i) - model_pruned, _ = mtp.prune( - model, - mode="mcore_gpt_minitron", - constraints={ - "export_config": { - k: cfg.prune.get(k) - for k in [ - "ffn_hidden_size", - "num_attention_heads", - "num_query_groups", - "hidden_size", - "num_layers", - ] - if cfg.prune.get(k) is not None - }, - }, - dummy_input=None, # Not used - config={"forward_loop": forward_loop}, - ) - - model_pruned.save_to(cfg.export.save_path) + export_config = { + k: cfg.prune.get(k) + for k in [ + "ffn_hidden_size", + "num_attention_heads", + "num_query_groups", + "hidden_size", + "num_layers", + ] + if cfg.prune.get(k) is not None + } + + drop_layers = OmegaConf.to_object(cfg.prune.drop_layers) # convert to native python list + if drop_layers: + assert ( + not export_config + ), f"Cannot specify `prune.drop_layers` with other prune constraints. Recieved: {cfg.prune}" + mtp.plugins.megatron.drop_mcore_gpt_layers(model.model, layers_to_drop=drop_layers) + setattr(model.cfg, "num_layers", model.model.config.num_layers) + else: + assert ( + cfg.model.tensor_model_parallel_size == 1 + ), "Pruning currently only supports tensor_model_parallel_size=1" + + mtp.prune( + model, + mode="mcore_gpt_minitron", + constraints={"export_config": export_config}, + dummy_input=None, # Not used + config={"forward_loop": forward_loop}, + ) + + model.save_to(cfg.export.save_path) print(f"Pruned model saved to {cfg.export.save_path}") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/tutorials/llm/llama-3/pruning-distillation/03_a_depth_pruning.ipynb b/tutorials/llm/llama-3/pruning-distillation/03_a_depth_pruning.ipynb index d64f8c15bd00..20be1b054605 100644 --- a/tutorials/llm/llama-3/pruning-distillation/03_a_depth_pruning.ipynb +++ b/tutorials/llm/llama-3/pruning-distillation/03_a_depth_pruning.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "source": [ "#### Step 3.a.: Using depth-pruning \n", - "To depth-prune, we will trim the last 16 layers in the finetined teacher model. For depth-pruning, we would be using the [megatron_gpt_drop_layers](https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/language_modeling/megatron_gpt_drop_layers.py) script. \n", + "To depth-prune, we will trim the layers 16-31 (leaving 1-15 and 32) in the finetined teacher model. For depth-pruning, we would be using the [megatron_gpt_prune](https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/language_modeling/megatron_gpt_prune.py) script. \n", "\n", "Per the [blog](https://developer.nvidia.com/blog/how-to-prune-and-distill-llama-3-1-8b-to-an-nvidia-llama-3-1-minitron-4b-model/) and [tech report](https://arxiv.org/pdf/2408.11796), removing contiguous layers from the second last block (layers 16 to 31 continuously) yields the best overall results. \n", "\n", @@ -34,14 +34,17 @@ }, "outputs": [], "source": [ - "!python -m torch.distributed.launch --nproc_per_node=8 \\\n", - " /opt/NeMo/examples/nlp/language_modeling/megatron_gpt_drop_layers.py \\\n", - " --path_to_nemo \"./distill_trainings/megatron_llama_ft/checkpoints/megatron_llama_ft.nemo\" \\\n", - " --path_to_save \"/workspace/4b_depth_pruned_model.nemo\" \\\n", - " --tensor_model_parallel_size 8 \\\n", - " --pipeline_model_parallel_size 1 \\\n", - " --gpus_per_node 8 \\\n", - " --drop_layers 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31" + "!torchrun --nproc_per_node=8 \\\n", + " /opt/NeMo/examples/nlp/language_modeling/megatron_gpt_prune.py \\\n", + " model.restore_from_path=\"./distill_trainings/megatron_llama_ft/checkpoints/megatron_llama_ft.nemo\" \\\n", + " model.tensor_model_parallel_size=8 \\\n", + " model.pipeline_model_parallel_size=1 \\\n", + " +model.dist_ckpt_load_strictness=log_all \\\n", + " trainer.num_nodes=1 \\\n", + " trainer.precision=bf16 \\\n", + " trainer.devices=8 \\\n", + " \"prune.drop_layers=[16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]\" \\\n", + " export.save_path=\"/workspace/4b_depth_pruned_model.nemo\"" ] }, { From 6dced1d36abf1030dfe435af60a4de94b05176bf Mon Sep 17 00:00:00 2001 From: Dmytro Pykhtar <37850217+dimapihtar@users.noreply.github.com> Date: Fri, 20 Dec 2024 03:27:49 +0200 Subject: [PATCH 081/128] add documentation for checkpoint averaging (#11594) * add torch dist support Signed-off-by: dimapihtar * Apply isort and black reformatting Signed-off-by: dimapihtar * add changes Signed-off-by: dimapihtar * Apply isort and black reformatting Signed-off-by: dimapihtar * revert changes Signed-off-by: Dmytro Pykhtar * revert changes Signed-off-by: Dmytro Pykhtar * add deprecation notes Signed-off-by: Dmytro Pykhtar * Apply isort and black reformatting Signed-off-by: dimapihtar * add readme Signed-off-by: Dmytro Pykhtar * update readme Signed-off-by: Dmytro Pykhtar * update readme Signed-off-by: Dmytro Pykhtar * update readme Signed-off-by: Dmytro Pykhtar * rename script Signed-off-by: Dmytro Pykhtar * Apply isort and black reformatting Signed-off-by: dimapihtar * update readme Signed-off-by: Dmytro Pykhtar * fix style Signed-off-by: dimapihtar * fix styling Signed-off-by: dimapihtar * fix styling Signed-off-by: dimapihtar * Apply isort and black reformatting Signed-off-by: dimapihtar * remove unused import Signed-off-by: dimapihtar --------- Signed-off-by: dimapihtar Signed-off-by: dimapihtar Signed-off-by: Dmytro Pykhtar Co-authored-by: dimapihtar Co-authored-by: Dmytro Pykhtar --- scripts/checkpoint_averaging/README.md | 25 +++++++++++++++++++ .../{ => legacy}/average_model_checkpoints.py | 9 +++++++ .../{ => legacy}/checkpoint_averaging.py | 19 +++++++++++--- .../checkpoint_averaging_model_parallel.py | 12 +++++++-- .../megatron_checkpoint_averaging.py | 13 +++++++--- ... zarr_distributed_checkpoint_averaging.py} | 17 ++++++++----- 6 files changed, 80 insertions(+), 15 deletions(-) create mode 100644 scripts/checkpoint_averaging/README.md rename scripts/checkpoint_averaging/{ => legacy}/average_model_checkpoints.py (97%) rename scripts/checkpoint_averaging/{ => legacy}/checkpoint_averaging.py (91%) rename scripts/checkpoint_averaging/{ => legacy}/checkpoint_averaging_model_parallel.py (92%) rename scripts/checkpoint_averaging/{ => legacy}/megatron_checkpoint_averaging.py (95%) rename scripts/checkpoint_averaging/{distributed_checkpoint_averaging.py => zarr_distributed_checkpoint_averaging.py} (91%) diff --git a/scripts/checkpoint_averaging/README.md b/scripts/checkpoint_averaging/README.md new file mode 100644 index 000000000000..614b4b697e0e --- /dev/null +++ b/scripts/checkpoint_averaging/README.md @@ -0,0 +1,25 @@ +Checkpoint Averaging +==================== + +Overview +-------- +The checkpoint averaging script is used to compute the average of multiple distributed checkpoints. This can be useful for improving model performance by combining multiple training states. + +When executed, the script processes checkpoints stored in a specified directory, averages their weights, and generates new checkpoint containing the averaged weights. + +Average Zarr Distributed Checkpoints +------------------------------------ +Use the following command to run the checkpoint averaging script for zarr distributed checkpoints: + +```shell +python scripts/checkpoint_averaging/zarr_distributed_checkpoint_averaging.py \ + --name_prefix \ + --checkpoint_dir \ + --steps +``` +**Arguments**: +- `--name_prefix`: Specifies the prefix for the generated averaged checkpoint. +- `--checkpoint_dir`: Specifies the folder containing zarr distributed checkpoints. +- `--steps`: (Optional) A comma-separated list of checkpoint steps to average (e.g., 1000, 2000, 3000). If not provided, the script will average all the checkpoints in the directory. + +After execution, the script generates averaged checkpoint in `` named `-averaged`. diff --git a/scripts/checkpoint_averaging/average_model_checkpoints.py b/scripts/checkpoint_averaging/legacy/average_model_checkpoints.py similarity index 97% rename from scripts/checkpoint_averaging/average_model_checkpoints.py rename to scripts/checkpoint_averaging/legacy/average_model_checkpoints.py index ce88bba9716b..a9eca6d06875 100644 --- a/scripts/checkpoint_averaging/average_model_checkpoints.py +++ b/scripts/checkpoint_averaging/legacy/average_model_checkpoints.py @@ -71,6 +71,9 @@ def process_config(cfg: OmegaConf): + """ + Process config + """ if 'name' not in cfg or cfg.name is None: raise ValueError("`cfg.name` must be provided to save a model checkpoint") @@ -107,6 +110,12 @@ def process_config(cfg: OmegaConf): @hydra_runner(config_path=None, config_name=None) def main(cfg): + """ + Main function + """ + + logging.info("This script is deprecated and will be removed in the 25.01 release.") + name_prefix, checkpoint_paths, save_ckpt_only = process_config(cfg) if not save_ckpt_only: diff --git a/scripts/checkpoint_averaging/checkpoint_averaging.py b/scripts/checkpoint_averaging/legacy/checkpoint_averaging.py similarity index 91% rename from scripts/checkpoint_averaging/checkpoint_averaging.py rename to scripts/checkpoint_averaging/legacy/checkpoint_averaging.py index 0988479a17e3..846777fe70b5 100755 --- a/scripts/checkpoint_averaging/checkpoint_averaging.py +++ b/scripts/checkpoint_averaging/legacy/checkpoint_averaging.py @@ -42,6 +42,12 @@ def main(): + """ + Main function + """ + + logging.info("This script is deprecated and will be removed in the 25.01 release.") + parser = argparse.ArgumentParser() parser.add_argument( 'model_fname_list', @@ -56,15 +62,20 @@ def main(): type=str, nargs='+', default=[], - help='A list of Python file names to "from FILE import *" (Needed when some classes were defined in __main__ of a script)', + help='A list of Python file names to "from FILE import *"', ) parser.add_argument( - '--class_path', type=str, default='', help='A path to class "module.submodule.class" (if given)', + '--class_path', + type=str, + default='', + help='A path to class "module.submodule.class" (if given)', ) args = parser.parse_args() logging.info( - f"\n\nIMPORTANT:\nIf you get the following error:\n\t(AttributeError: Can't get attribute '???' on )\nuse:\n\t--import_fname_list\nfor all files that contain missing classes.\n\n" + f"\n\nIMPORTANT:\nIf you get the following error:\n\t" + "(AttributeError: Can't get attribute '???' on )\nuse:\n\t" + "--import_fname_list\nfor all files that contain missing classes.\n\n" ) for fn in args.import_fname_list: @@ -77,7 +88,7 @@ def main(): # loop over all folders with .nemo files (or .nemo files) for model_fname_i, model_fname in enumerate(args.model_fname_list): if not model_fname.endswith(".nemo"): - # assume model_fname is a folder which contains a .nemo file (filter .nemo files which matches with "*-averaged.nemo") + # assume model_fname is a folder which contains a .nemo file nemo_files = list( filter(lambda fn: not fn.endswith("-averaged.nemo"), glob.glob(os.path.join(model_fname, "*.nemo"))) ) diff --git a/scripts/checkpoint_averaging/checkpoint_averaging_model_parallel.py b/scripts/checkpoint_averaging/legacy/checkpoint_averaging_model_parallel.py similarity index 92% rename from scripts/checkpoint_averaging/checkpoint_averaging_model_parallel.py rename to scripts/checkpoint_averaging/legacy/checkpoint_averaging_model_parallel.py index bf5b49a9e4e9..df03458a22d8 100644 --- a/scripts/checkpoint_averaging/checkpoint_averaging_model_parallel.py +++ b/scripts/checkpoint_averaging/legacy/checkpoint_averaging_model_parallel.py @@ -46,12 +46,20 @@ def main(): + """ + Main function + """ + + logging.info("This script is deprecated and will be removed in the 25.01 release.") + parser = argparse.ArgumentParser() parser.add_argument( - '--name_prefix', help='Name of the final checkpoint. Will append -averaged.ckpt automatically.', + '--name_prefix', + help='Name of the final checkpoint. Will append -averaged.ckpt automatically.', ) parser.add_argument( - '--checkpoint_dir', help='Folder containing all mp_rank_X subfolders.', + '--checkpoint_dir', + help='Folder containing all mp_rank_X subfolders.', ) args = parser.parse_args() diff --git a/scripts/checkpoint_averaging/megatron_checkpoint_averaging.py b/scripts/checkpoint_averaging/legacy/megatron_checkpoint_averaging.py similarity index 95% rename from scripts/checkpoint_averaging/megatron_checkpoint_averaging.py rename to scripts/checkpoint_averaging/legacy/megatron_checkpoint_averaging.py index 7b964fd7bade..5bf921e74518 100755 --- a/scripts/checkpoint_averaging/megatron_checkpoint_averaging.py +++ b/scripts/checkpoint_averaging/legacy/megatron_checkpoint_averaging.py @@ -44,6 +44,12 @@ def main(): + """ + Main function + """ + + logging.info("This script is deprecated and will be removed in the 25.01 release.") + parser = argparse.ArgumentParser() parser.add_argument( 'model_fname_list', @@ -57,7 +63,7 @@ def main(): type=str, nargs='+', default=[], - help='A list of Python file names to "from FILE import *" (Needed when some classes were defined in __main__ of a script)', + help='A list of Python file names to "from FILE import *"', ) parser.add_argument( '--class_path', @@ -68,7 +74,8 @@ def main(): args = parser.parse_args() logging.info( - f"\n\nIMPORTANT: Use --import_fname_list for all files that contain missing classes (AttributeError: Can't get attribute '???' on )\n\n" + f"\n\nIMPORTANT: Use --import_fname_list for all files that contain missing classes:\n\t" + "(AttributeError: Can't get attribute '???' on )\n\n" ) for fn in args.import_fname_list: @@ -82,7 +89,7 @@ def main(): # loop over all folders with .nemo files (or .nemo files) for model_fname_i, model_fname in enumerate(args.model_fname_list): if not model_fname.endswith(".nemo"): - # assume model_fname is a folder which contains a .nemo file (filter .nemo files which matches with "*-averaged.nemo") + # assume model_fname is a folder which contains a .nemo file nemo_files = list( filter(lambda fn: not fn.endswith("-averaged.nemo"), glob.glob(os.path.join(model_fname, "*.nemo"))) ) diff --git a/scripts/checkpoint_averaging/distributed_checkpoint_averaging.py b/scripts/checkpoint_averaging/zarr_distributed_checkpoint_averaging.py similarity index 91% rename from scripts/checkpoint_averaging/distributed_checkpoint_averaging.py rename to scripts/checkpoint_averaging/zarr_distributed_checkpoint_averaging.py index 89b1430198b3..9d146c1e8501 100644 --- a/scripts/checkpoint_averaging/distributed_checkpoint_averaging.py +++ b/scripts/checkpoint_averaging/zarr_distributed_checkpoint_averaging.py @@ -27,10 +27,10 @@ # limitations under the License. """ -Example: python scripts/checkpoint_averaging/distributed_checkpoint_averaging.py \ +Example: python scripts/checkpoint_averaging/zarr_distributed_checkpoint_averaging.py \ --name_prefix= \ - --checkpoint_dir= - --steps + --checkpoint_dir= \ + --steps will generate a new directory in each of the distributed checkpoint subfolders named -averaged """ @@ -40,19 +40,24 @@ import os import shutil import numpy as np -import tensorstore # need to import it for bf16 support import zarr logging.basicConfig(level=logging.INFO) def main(): + """ + Main function + """ + parser = argparse.ArgumentParser() parser.add_argument( - '--name_prefix', help='Name of the final checkpoint. Will append -averaged automatically.', + '--name_prefix', + help='Name of the final checkpoint. Will append -averaged automatically.', ) parser.add_argument( - '--checkpoint_dir', help='Folder containing all the distributed checkpoints.', + '--checkpoint_dir', + help='Folder containing all the distributed checkpoints.', ) # list of checkpoint steps to average parser.add_argument( From 8078cd4a4a2c01fb9a6dbe080a9d833ff987176a Mon Sep 17 00:00:00 2001 From: Weiqing Wang <164252040+weiqingw4ng@users.noreply.github.com> Date: Thu, 19 Dec 2024 17:32:11 -0800 Subject: [PATCH 082/128] Downgrading the 'datasets' package from 3.0.0 to 2.21.0 for Multilang_ASR.ipynb and ASR_CTC_Language_Finetuning.ipynb (#11675) * Downgrading the 'datasets' package from 3.0.0 to 2.21.0 for Multilang_ASR.ipynb Signed-off-by: Weiqing Wang * Downgrading the 'datasets' package from 3.0.0 to 2.21.0 for ASR_CTC_Language_Finetuning.ipynb Signed-off-by: Weiqing Wang --------- Signed-off-by: Weiqing Wang --- tutorials/asr/ASR_CTC_Language_Finetuning.ipynb | 1 + tutorials/asr/Multilang_ASR.ipynb | 1 + 2 files changed, 2 insertions(+) diff --git a/tutorials/asr/ASR_CTC_Language_Finetuning.ipynb b/tutorials/asr/ASR_CTC_Language_Finetuning.ipynb index 6ad3307da496..b0cbdf2375b7 100644 --- a/tutorials/asr/ASR_CTC_Language_Finetuning.ipynb +++ b/tutorials/asr/ASR_CTC_Language_Finetuning.ipynb @@ -37,6 +37,7 @@ "!apt-get install sox libsndfile1 ffmpeg libsox-fmt-mp3\n", "!pip install text-unidecode\n", "!pip install matplotlib>=3.3.2\n", + "!pip install datasets==2.21.0 # downgrading to 2.21.0 because latest version (3.0.0) has some issues\n", "\n", "## Install NeMo\n", "BRANCH = 'main'\n", diff --git a/tutorials/asr/Multilang_ASR.ipynb b/tutorials/asr/Multilang_ASR.ipynb index 800f8a2d2ded..8557ab849cf5 100644 --- a/tutorials/asr/Multilang_ASR.ipynb +++ b/tutorials/asr/Multilang_ASR.ipynb @@ -98,6 +98,7 @@ "!pip install matplotlib>=3.3.2\n", "# this is needed for RNNT loss\n", "!pip install --upgrade numba\n", + "!pip install datasets==2.21.0 # downgrading to 2.21.0 because latest version (3.0.0) has some issues\n", "\n", "# this is needed to pre-process MCV Spanish dataset, which contains mp3 files\n", "!apt-get install -y sox libsox-fmt-mp3\n", From 054bd464a0461279a9d84ce0f1297a8e80da43e5 Mon Sep 17 00:00:00 2001 From: Jan Lasek Date: Fri, 20 Dec 2024 02:54:59 +0100 Subject: [PATCH 083/128] Utilities to detect and drop deprecated arguments from NeMo 2.0 checkpoint context io.json (#11648) * Utils to detect and drop deprecated arguments in io.json Signed-off-by: Jan Lasek * Unit tests for drop_unexpected_params Signed-off-by: Jan Lasek * Apply isort and black reformatting Signed-off-by: janekl * Add copyright header Signed-off-by: Jan Lasek --------- Signed-off-by: Jan Lasek Signed-off-by: janekl Co-authored-by: janekl --- nemo/lightning/io/__init__.py | 3 +- nemo/lightning/io/mixin.py | 39 ++++++++ scripts/llm/update_io_context.py | 94 +++++++++++++++++++ .../llm/io/test_drop_unexpected_params.py | 84 +++++++++++++++++ 4 files changed, 219 insertions(+), 1 deletion(-) create mode 100644 scripts/llm/update_io_context.py create mode 100644 tests/collections/llm/io/test_drop_unexpected_params.py diff --git a/nemo/lightning/io/__init__.py b/nemo/lightning/io/__init__.py index d53fa1e5f57e..388156ecf4a7 100644 --- a/nemo/lightning/io/__init__.py +++ b/nemo/lightning/io/__init__.py @@ -2,7 +2,7 @@ from nemo.lightning.io.api import export_ckpt, import_ckpt, load, load_context, model_exporter, model_importer from nemo.lightning.io.capture import reinit from nemo.lightning.io.connector import Connector, ModelConnector -from nemo.lightning.io.mixin import ConnectorMixin, IOMixin, track_io +from nemo.lightning.io.mixin import ConnectorMixin, IOMixin, drop_unexpected_params, track_io from nemo.lightning.io.pl import TrainerContext, is_distributed_ckpt from nemo.lightning.io.state import TransformCTX, apply_transforms, state_transform @@ -10,6 +10,7 @@ "apply_transforms", "Connector", "ConnectorMixin", + "drop_unexpected_params", "IOMixin", "track_io", "import_ckpt", diff --git a/nemo/lightning/io/mixin.py b/nemo/lightning/io/mixin.py index e356caf92162..cd58d09cb3e1 100644 --- a/nemo/lightning/io/mixin.py +++ b/nemo/lightning/io/mixin.py @@ -687,6 +687,45 @@ def _artifact_transform_load(cfg: fdl.Config, path: Path): pass +def drop_unexpected_params(config: fdl.Config) -> bool: + """ + Analyzes config to detect unexpected keyword arguments -- for example, deprecated parameters -- and + updates the config by dropping them. Returns True if the config gets updated and False otherwise. + + Args: + config (fdl.Config): The configuration object to analyze. + """ + + updated = False + + def analyze(config: fdl.Config, prefix: str): + + if isinstance(config, fdl.Config): + signature = inspect.signature(config.__fn_or_cls__) + + accept_kwargs = any(param.kind is inspect.Parameter.VAR_KEYWORD for param in signature.parameters.values()) + + if not accept_kwargs: + to_drop = [param for param in config.__arguments__ if param not in signature.parameters] + + if to_drop: + nonlocal updated + updated = True + logging.warning(f"Deprecated parameters to drop from {prefix}: {to_drop}") + for param in to_drop: + del config.__arguments__[param] + else: + logging.info(f"Skip analyzing {prefix} as it accepts arbitrary keyword arguments.") + + # Proceed recursively for all arguments + for key, value in config.__arguments__.items(): + analyze(value, prefix + "." + key) + + analyze(config, "") + + return updated + + def load(path: Path, output_type: Type[CkptType] = Any, subpath: Optional[str] = None, build: bool = True) -> CkptType: """ Loads a configuration from a pickle file and constructs an object of the specified type. diff --git a/scripts/llm/update_io_context.py b/scripts/llm/update_io_context.py new file mode 100644 index 000000000000..24ec6c816c1c --- /dev/null +++ b/scripts/llm/update_io_context.py @@ -0,0 +1,94 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import sys +from datetime import datetime +from pathlib import Path + +import fiddle as fdl +from fiddle._src.experimental import serialization + +from nemo.lightning.ckpt_utils import ckpt_to_context_subdir +from nemo.lightning.io import drop_unexpected_params, load +from nemo.utils import logging + +IO_FILE = "io.json" + +""" +Script to update NeMo 2.0 model context (stored in io.json) for unexpected +keword arguments for compatibility with the currently running environment. + +It accepts path to a NeMo 2.0 checkpoint and optional flag for building +the updated configuration. It performs the following steps: + +1. Loads config from the model context directory. +2. Checks the config for unexpected (e.g. deprecated) arguments and drops them. +3. Attempts to build the updated configuration if --build flag is on. +4. Backs up the existing context file and saves the updated configuration. +""" + + +def get_args(): + """Parses command line arguments.""" + parser = argparse.ArgumentParser( + description="Script to drop unexpected arguments from NeMo 2.0 io.json model context." + ) + parser.add_argument("--model_path", type=str, required=True, help="Path to a NeMo 2.0 checkpoint.") + parser.add_argument("--build", action="store_true", help="Whether to test building the updated config.") + return parser.parse_args() + + +def save_io(config: fdl.Config, path: str): + """ + Saves the given configuration object to a specified file path in JSON format. + + Args: + config (fdl.Config): The configuration object to be saved. + path (str): The file path where the configuration will be saved. + """ + config_json = serialization.dump_json(config) + with open(path, "w") as f: + f.write(config_json) + + +if __name__ == "__main__": + args = get_args() + + model_path = Path(args.model_path) + context_path = ckpt_to_context_subdir(model_path) + logging.info(f"Path to model context: {context_path}.") + + config = load(context_path, build=False) + updated = drop_unexpected_params(config) + + if not updated: + logging.info("Config does not need any updates.") + sys.exit(0) + + if args.build: + try: + fdl.build(config) + except Exception as e: + logging.error("Build for the updated config failed.") + raise + else: + logging.info("Build for the updated config successful.") + + # Backup the existing context file and save the updated config + io_path = context_path / IO_FILE + io_path_backup = context_path / f"BACKUP_{datetime.now().strftime('%Y_%m_%d_%H_%M_%S')}_{IO_FILE}" + io_path.rename(io_path_backup) + save_io(config, io_path) + logging.info(f"Config saved to {io_path}.") diff --git a/tests/collections/llm/io/test_drop_unexpected_params.py b/tests/collections/llm/io/test_drop_unexpected_params.py new file mode 100644 index 000000000000..b60c7236ba82 --- /dev/null +++ b/tests/collections/llm/io/test_drop_unexpected_params.py @@ -0,0 +1,84 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import fiddle as fdl + +from nemo.lightning.io import drop_unexpected_params + + +class TestDropUnexpectedParams: + + def setup_method(self): + """ + Setup common test resources. + """ + + class MockClassOld: + def __init__(self, x, y, deprecated): + pass + + class MockClassNew: + def __init__(self, x, y): + pass + + class OuterClass: + def __init__(self, z, t): + pass + + self.MockClassOld = MockClassOld + self.MockClassNew = MockClassNew + self.OuterClass = OuterClass + + def test_valid_config_stays_same(self): + """ + Test that a valid config remains unchanged. + """ + + config = fdl.Config(self.MockClassNew, x=1, y=2) + updated = drop_unexpected_params(config) + + assert not updated, "Expected the config to remain unchanged." + assert config.x == 1 + assert config.y == 2 + + def test_config_updates(self): + """ + Test that a config with unexpected parameters gets updated. + """ + config = fdl.Config(self.MockClassOld, x=1, y=2, deprecated=3) + + # Simulate deprecation issue by overriding target class + config.__dict__['__fn_or_cls__'] = self.MockClassNew + + updated = drop_unexpected_params(config) + assert updated, "Expected the config to be updated." + assert config.x == 1 + assert config.y == 2 + assert not hasattr(config, "deprecated"), "Expected 'deprecated' to be removed from the config." + + def test_nested_config_updates(self): + """ + Test that a nested config with unexpected parameters gets updated. + """ + config = fdl.Config(self.OuterClass, z=4, t=fdl.Config(self.MockClassOld, x=1, y=2, deprecated=3)) + + # Simulate deprecation issue by overriding target class + config.t.__dict__["__fn_or_cls__"] = self.MockClassNew + + updated = drop_unexpected_params(config) + assert updated, "Expected the nested config to be updated." + assert config.z == 4 + assert config.t.x == 1 + assert config.t.y == 2 + assert not hasattr(config.t, "deprecated"), "Expected 'deprecated' to be removed from the inner config." From fc54ceee488181f594427067f8b13eef49c85173 Mon Sep 17 00:00:00 2001 From: Jan Lasek Date: Fri, 20 Dec 2024 04:26:47 +0100 Subject: [PATCH 084/128] NIM supporting changes for nemo.export for NeMo 2.0 (part II) (#11669) * Remove trt_compile from __init__ as it triggers imports from nemo.utils Signed-off-by: Jan Lasek * Get tokenizer for NeMo 2 from model.yaml using local SP or HF classes Signed-off-by: Jan Lasek * Apply isort and black reformatting Signed-off-by: janekl --------- Signed-off-by: Jan Lasek Signed-off-by: janekl Co-authored-by: janekl --- nemo/export/__init__.py | 2 - .../trt_llm/nemo_ckpt_loader/nemo_file.py | 57 +++++++++++++++++-- 2 files changed, 52 insertions(+), 7 deletions(-) diff --git a/nemo/export/__init__.py b/nemo/export/__init__.py index 6b1f8c90aa8f..d9155f923f18 100644 --- a/nemo/export/__init__.py +++ b/nemo/export/__init__.py @@ -11,5 +11,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -from nemo.export.tensorrt_lazy_compiler import trt_compile diff --git a/nemo/export/trt_llm/nemo_ckpt_loader/nemo_file.py b/nemo/export/trt_llm/nemo_ckpt_loader/nemo_file.py index 827bbf929796..b721a8ea60b3 100644 --- a/nemo/export/trt_llm/nemo_ckpt_loader/nemo_file.py +++ b/nemo/export/trt_llm/nemo_ckpt_loader/nemo_file.py @@ -38,6 +38,13 @@ from nemo.export.tarutils import TarPath, ZarrPathStore from nemo.export.tiktoken_tokenizer import TiktokenTokenizer +try: + from nemo.lightning import io + + HAVE_NEMO2 = True +except (ImportError, ModuleNotFoundError): + HAVE_NEMO2 = False + LOGGER = logging.getLogger("NeMo") @@ -289,14 +296,54 @@ def copy_tokenizer_files(config, out_dir): return config +def get_tokenizer_from_nemo2_context(model_context_dir: Path): + """ + Retrieve tokenizer configuration from NeMo 2.0 context and instantiate the tokenizer. + + Args: + model_context_dir (Path): Path to the model context directory. + + Returns: + The instantiated tokenizer (various classes possible). + """ + + if HAVE_NEMO2: + # Use NeMo tokenizer loaded from the NeMo 2.0 model context + tokenizer_spec = io.load_context(model_context_dir, subpath="model.tokenizer") + return build_tokenizer(tokenizer_spec) + else: + # Use local nemo.export SentencePieceTokenizer implementation + # or directly a HuggingFace tokenizer based on the model config + with (model_context_dir / "model.yaml").open("r") as stream: + model_config = yaml.safe_load(stream) + + tokenizer_config = model_config["tokenizer"] + target_class = tokenizer_config["_target_"] + tokenizer_module = "nemo.collections.common.tokenizers." + assert target_class.startswith(tokenizer_module) + target_class = target_class.removeprefix(tokenizer_module) + + if target_class == "sentencepiece_tokenizer.SentencePieceTokenizer": + tokenizer = SentencePieceTokenizer( + model_path=str(model_context_dir / tokenizer_config["model_path"]), + special_tokens=tokenizer_config.get("special_tokens", None), + legacy=tokenizer_config.get("legacy", False), + ) + elif target_class == "huggingface.auto_tokenizer.AutoTokenizer": + tokenizer = AutoTokenizer.from_pretrained( + str(model_context_dir / tokenizer_config["pretrained_model_name"]) + ) + else: + raise ValueError(f"Unsupported tokenizer type: {tokenizer_module}{target_class}.") + + return tokenizer + + def get_tokenizer(tokenizer_dir_or_path: Union[str, Path]) -> PreTrainedTokenizer: """Loads the tokenizer from the decoded NeMo weights dir.""" tokenizer_dir_or_path = Path(tokenizer_dir_or_path) if (tokenizer_dir_or_path / "nemo_context").exists(): - from nemo.lightning import io - - tokenizer_spec = io.load_context((tokenizer_dir_or_path / "nemo_context"), subpath="model.tokenizer") - return build_tokenizer(tokenizer_spec) + return get_tokenizer_from_nemo2_context(tokenizer_dir_or_path / "nemo_context") elif os.path.exists(os.path.join(tokenizer_dir_or_path, "vocab.json")): vocab_path = tokenizer_dir_or_path / "vocab.json" if tokenizer_dir_or_path.is_dir() else tokenizer_dir_or_path tokenizer_config = {"library": "tiktoken", "vocab_file": str(vocab_path)} @@ -476,7 +523,7 @@ def load_nemo_model(nemo_ckpt: Union[str, Path], nemo_export_dir: Union[str, Pat elif k == "activation_func": nemo_model_config["activation"] = v["_target_"].rsplit('.', 1)[-1] else: - from nemo.lightning import io + assert HAVE_NEMO2, "nemo_toolkit>=2.0.0 is required to load the model context." config = io.load_context(io_folder, subpath="model.config") From 3c9c3f6ca1f393c4fae46b41122ed3201e30ce20 Mon Sep 17 00:00:00 2001 From: Abhishree Thittenamane <47577437+athitten@users.noreply.github.com> Date: Thu, 19 Dec 2024 21:07:47 -0800 Subject: [PATCH 085/128] Add check for symlink in _safe_extract (#11611) Signed-off-by: Abhishree --- nemo/core/connectors/save_restore_connector.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/nemo/core/connectors/save_restore_connector.py b/nemo/core/connectors/save_restore_connector.py index 2c4c826d1daf..cbbb30772036 100644 --- a/nemo/core/connectors/save_restore_connector.py +++ b/nemo/core/connectors/save_restore_connector.py @@ -601,7 +601,12 @@ def _is_safe_path(member, extract_to): # Construct the full path where the member would be extracted full_path = os.path.join(extract_to, member_path) # Ensure the member would be extracted within the intended directory - return os.path.commonprefix([full_path, extract_to]) == extract_to + if os.path.commonprefix([full_path, extract_to]) != extract_to: + return False + # Check if the member is a symbolic link + if member.issym() or member.islnk(): + return False + return True @staticmethod def _safe_extract(tar, out_folder: str, members=None): From 86c0f1a546470056410e1badb6d6d0cf1b4e9e62 Mon Sep 17 00:00:00 2001 From: Chen Cui Date: Fri, 20 Dec 2024 01:23:13 -0500 Subject: [PATCH 086/128] Fix baichuan export (#11640) * Fix baichuan export Signed-off-by: Chen Cui * update import Signed-off-by: Chen Cui --------- Signed-off-by: Chen Cui --- nemo/collections/llm/gpt/model/baichuan.py | 34 ++++++++-------------- 1 file changed, 12 insertions(+), 22 deletions(-) diff --git a/nemo/collections/llm/gpt/model/baichuan.py b/nemo/collections/llm/gpt/model/baichuan.py index c283b802a118..df3263559338 100644 --- a/nemo/collections/llm/gpt/model/baichuan.py +++ b/nemo/collections/llm/gpt/model/baichuan.py @@ -20,13 +20,13 @@ import torch.nn.functional as F from torch import nn -from nemo.collections.llm.gpt.model.base import GPTConfig, GPTModel +from nemo.collections.llm.gpt.model.base import GPTConfig, GPTModel, torch_dtype_from_mcore_config from nemo.collections.llm.utils import Config from nemo.lightning import OptimizerModule, io, teardown from nemo.lightning.pytorch.utils import dtype_from_hf if TYPE_CHECKING: - from transformers import AutoConfig, AutoModelForCausalLM + from transformers import AutoModelForCausalLM from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec @@ -142,16 +142,23 @@ def make_vocab_size_divisible_by(vocab_size): @io.model_exporter(Baichuan2Model, "hf") class HFBaichuan2Exporter(io.ModelConnector[Baichuan2Model, "AutoModelForCausalLM"]): - def init(self) -> "AutoModelForCausalLM": + def init(self, dtype=torch.bfloat16, model_name="baichuan-inc/Baichuan2-7B-Base") -> "AutoModelForCausalLM": from transformers import AutoModelForCausalLM from transformers.modeling_utils import no_init_weights with no_init_weights(True): - return AutoModelForCausalLM.from_config(self.config, trust_remote_code=True) + # Since Baichuan is not importable from transformers, we can only initialize the HF model + # from a known checkpoint. If more than 1 Baichuan model is supported in NeMo in the future, + # the model_name will need to be passed in. + return AutoModelForCausalLM.from_pretrained( + model_name, + trust_remote_code=True, + torch_dtype=dtype, + ) def apply(self, output_path: Path) -> Path: - target = self.init() source, _ = self.nemo_load(str(self)) + target = self.init(torch_dtype_from_mcore_config(source.config)) target = self.convert_state(source, target) target = target.cpu() @@ -177,23 +184,6 @@ def convert_state(self, source, target): def tokenizer(self): return io.load_context(str(self)).model.tokenizer.tokenizer - @property - def config(self) -> "AutoConfig": - source: Baichuan2Config = io.load_context(str(self)).model.config - - return AutoConfig( - num_hidden_layers=source.num_layers, - hidden_size=source.hidden_size, - intermediate_size=source.ffn_hidden_size, - num_attention_heads=source.num_attention_heads, - max_position_embeddings=source.seq_length, - initializer_range=source.init_method_std, - rms_norm_eps=source.layernorm_epsilon, - num_key_value_heads=source.num_query_groups, - rope_theta=source.rotary_base, - vocab_size=self.tokenizer.vocab_size, - ) - @io.state_transform( source_key="model.layers.*.self_attn.W_pack.weight", From 3a8e75dfbdf2314d33706dfb342575adbbae661d Mon Sep 17 00:00:00 2001 From: Yashaswi Karnati <144376261+yashaswikarnati@users.noreply.github.com> Date: Thu, 19 Dec 2024 23:20:13 -0800 Subject: [PATCH 087/128] Rename multimodal data module - EnergonMultiModalDataModule (#11654) * rename multimodal data module * Apply isort and black reformatting Signed-off-by: yashaswikarnati * fix long lengths * fix lint issues * fix long lint issues --------- Signed-off-by: yashaswikarnati Co-authored-by: yashaswikarnati --- .../data/diffusion_energon_datamodule.py | 6 ++--- nemo/collections/diffusion/train.py | 6 +++-- nemo/collections/multimodal/data/__init__.py | 6 ++--- .../multimodal/data/energon/__init__.py | 4 +-- .../multimodal/data/energon/base.py | 27 ++++++++++++------- scripts/vlm/llava_next_finetune.py | 4 +-- scripts/vlm/llava_next_pretrain.py | 4 +-- .../data/energon/test_data_module.py | 6 ++--- 8 files changed, 36 insertions(+), 27 deletions(-) diff --git a/nemo/collections/diffusion/data/diffusion_energon_datamodule.py b/nemo/collections/diffusion/data/diffusion_energon_datamodule.py index 07747528363a..5ad15c654555 100644 --- a/nemo/collections/diffusion/data/diffusion_energon_datamodule.py +++ b/nemo/collections/diffusion/data/diffusion_energon_datamodule.py @@ -19,10 +19,10 @@ from megatron.core import parallel_state from megatron.energon import DefaultTaskEncoder, WorkerConfig, get_savable_loader, get_train_dataset -from nemo.collections.multimodal.data.energon.base import SimpleMultiModalDataModule +from nemo.collections.multimodal.data.energon.base import EnergonMultiModalDataModule -class DiffusionDataModule(SimpleMultiModalDataModule): +class DiffusionDataModule(EnergonMultiModalDataModule): """ A PyTorch Lightning DataModule for handling multimodal datasets with images and text. @@ -62,7 +62,7 @@ def __init__( max_samples_per_sequence: int | None = None, ) -> None: """ - Initialize the SimpleMultiModalDataModule. + Initialize the EnergonMultiModalDataModule. Parameters: path (str): Path to the dataset. diff --git a/nemo/collections/diffusion/train.py b/nemo/collections/diffusion/train.py index 404602084b85..0db2e8fd2326 100644 --- a/nemo/collections/diffusion/train.py +++ b/nemo/collections/diffusion/train.py @@ -38,7 +38,7 @@ DiTXLConfig, ECDiTLlama1BConfig, ) -from nemo.collections.multimodal.data.energon.base import SimpleMultiModalDataModule +from nemo.collections.multimodal.data.energon.base import EnergonMultiModalDataModule from nemo.lightning.pytorch.callbacks import ModelCheckpoint, PreemptionCallback from nemo.lightning.pytorch.callbacks.megatron_comm_overlap import MegatronCommOverlapCallback from nemo.lightning.pytorch.callbacks.model_transform import ModelTransform @@ -64,7 +64,7 @@ def multimodal_datamodule() -> pl.LightningDataModule: @run.autoconvert def simple_datamodule() -> pl.LightningDataModule: """Simple Datamodule Initialization""" - data_module = SimpleMultiModalDataModule( + data_module = EnergonMultiModalDataModule( seq_length=2048, micro_batch_size=1, global_batch_size=32, @@ -221,6 +221,7 @@ def train_mock() -> run.Partial: @run.cli.factory(target=llm.train) def mock_ditllama5b_8k() -> run.Partial: + """DiT-5B mock Recipe""" recipe = pretrain() recipe.model.config = run.Config(DiTLlama5BConfig, max_frames=1) recipe.data = multimodal_fake_datamodule() @@ -256,6 +257,7 @@ def mock_ditllama5b_8k() -> run.Partial: @run.cli.factory(target=llm.train) def mock_dit7b_8k() -> run.Partial: + """DiT-7B mock Recipe""" recipe = mock_ditllama5b_8k() recipe.model.config = run.Config(DiT7BConfig, max_frames=1) recipe.data.model_config = recipe.model.config diff --git a/nemo/collections/multimodal/data/__init__.py b/nemo/collections/multimodal/data/__init__.py index 7e6ac24828f5..9a78712f026d 100644 --- a/nemo/collections/multimodal/data/__init__.py +++ b/nemo/collections/multimodal/data/__init__.py @@ -14,7 +14,7 @@ from nemo.utils.import_utils import safe_import_from -SimpleMultiModalDataModule, _ = safe_import_from( - "nemo.collections.multimodal.data.energon", "SimpleMultiModalDataModule" +EnergonMultiModalDataModule, _ = safe_import_from( + "nemo.collections.multimodal.data.energon", "EnergonMultiModalDataModule" ) -__all__ = ["SimpleMultiModalDataModule"] +__all__ = ["EnergonMultiModalDataModule"] diff --git a/nemo/collections/multimodal/data/energon/__init__.py b/nemo/collections/multimodal/data/energon/__init__.py index 04926758cbac..8c7465880b39 100644 --- a/nemo/collections/multimodal/data/energon/__init__.py +++ b/nemo/collections/multimodal/data/energon/__init__.py @@ -13,7 +13,7 @@ # limitations under the License. -from nemo.collections.multimodal.data.energon.base import SimpleMultiModalDataModule +from nemo.collections.multimodal.data.energon.base import EnergonMultiModalDataModule from nemo.collections.multimodal.data.energon.config import ( ImageTextSample, ImageToken, @@ -28,7 +28,7 @@ ) __all__ = [ - "SimpleMultiModalDataModule", + "EnergonMultiModalDataModule", "ImageToken", "ImageTextSample", "MultiModalSampleConfig", diff --git a/nemo/collections/multimodal/data/energon/base.py b/nemo/collections/multimodal/data/energon/base.py index 8c7819c3d7dd..3dfd495edd82 100644 --- a/nemo/collections/multimodal/data/energon/base.py +++ b/nemo/collections/multimodal/data/energon/base.py @@ -30,7 +30,7 @@ from nemo.utils import logging -class SimpleMultiModalDataModule(pl.LightningDataModule, IOMixin): +class EnergonMultiModalDataModule(pl.LightningDataModule, IOMixin): """ A PyTorch Lightning DataModule for handling multimodal datasets with images and text. @@ -70,7 +70,7 @@ def __init__( decoder_seq_length: Optional[int] = None, ) -> None: """ - Initialize the SimpleMultiModalDataModule. + Initialize the EnergonMultiModalDataModule. Parameters: path (str): Path to the dataset. @@ -80,8 +80,10 @@ def __init__( micro_batch_size (int, optional): The batch size for training and validation. Defaults to 1. num_workers (int, optional): Number of workers for data loading. Defaults to 1. pin_memory (bool, optional): Whether to pin memory in the DataLoader. Defaults to True. - multimodal_sample_config (MultiModalSampleConfig, optional): Configuration object for multimodal samples. Defaults to MultiModalSampleConfig(). - task_encoder (MultiModalTaskEncoder, optional): Encoder responsible for encoding and batching samples. If not provided, a default (MultimodalTaskEncoder) encoder will be created. Defaults to None. + multimodal_sample_config (MultiModalSampleConfig, optional): Configuration object for multimodal samples. + Defaults to MultiModalSampleConfig(). + task_encoder (MultiModalTaskEncoder, optional): Encoder responsible for encoding and batching samples. + If not provided, a default (MultimodalTaskEncoder) encoder will be created. Defaults to None. """ super().__init__() @@ -113,7 +115,7 @@ def __init__( self.val_dataloader_object = None def io_init(self, **kwargs) -> fdl.Config[Self]: - # (pleasefixme) image_processor and task_encoder are problematic with Fiddle so we skip serializing them for now + cfg_kwargs = {k: deepcopy(v) for k, v in kwargs.items() if k not in ['image_processor', 'task_encoder']} for val in cfg_kwargs.values(): @@ -168,7 +170,8 @@ def train_dataloader(self) -> TRAIN_DATALOADERS: return self.train_dataloader_object if not parallel_state.is_initialized(): logging.info( - f"Muiltimodal data loader parallel state is not initialized, using default worker config with no_workers {self.num_workers}" + f"Muiltimodal data loader parallel state is not initialized," + f"using default worker config with no_workers {self.num_workers}" ) worker_config = WorkerConfig.default_worker_config(self.num_workers) else: @@ -176,7 +179,8 @@ def train_dataloader(self) -> TRAIN_DATALOADERS: world_size = parallel_state.get_data_parallel_world_size() data_parallel_group = parallel_state.get_data_parallel_group() logging.info( - f" Multimodal train dataloader initializing with rank {rank} world_size {world_size} data_parallel_group {data_parallel_group} ****** " + f" Multimodal train dataloader initializing with" + f"rank {rank} world_size {world_size} data_parallel_group {data_parallel_group} ****** " ) worker_config = WorkerConfig( rank=rank, @@ -206,7 +210,8 @@ def val_dataloader(self) -> EVAL_DATALOADERS: if not parallel_state.is_initialized(): logging.info( - f"Muiltimodal val data loader parallel state is not initialized, using default worker config with no_workers {self.num_workers}" + f"Muiltimodal val data loader parallel state is not initialized," + "using default worker config with no_workers {self.num_workers}" ) worker_config = WorkerConfig.default_worker_config(self.num_workers) else: @@ -276,7 +281,8 @@ def load_state_dict(self, state_dict: Dict[str, Any]) -> None: """ if not 'dataloader_state' in state_dict: logging.warning( - f"Data loader state cannot be resumed from state_dict, it does not have the required key dataloader_state. It has {state_dict.keys()}" + f"Data loader state cannot be resumed from state_dict," + f"it does not have the required key dataloader_state. It has {state_dict.keys()}" ) return @@ -288,7 +294,8 @@ def load_state_dict(self, state_dict: Dict[str, Any]) -> None: else: logging.error(f"Cannot restore state from state_dict {state_dict}") raise ValueError( - f"Cannot restore state from state_dict: Is the trainer object is initialized and attached to datamodule???" + f"Cannot restore state from state_dict: " + f"Is the trainer object is initialized and attached to datamodule???" ) except Exception as e: raise RuntimeError(f"Failed to dataloader restore state due to: {e}") diff --git a/scripts/vlm/llava_next_finetune.py b/scripts/vlm/llava_next_finetune.py index 334b360d7c70..91df8a39452d 100644 --- a/scripts/vlm/llava_next_finetune.py +++ b/scripts/vlm/llava_next_finetune.py @@ -49,7 +49,7 @@ def main(args): from transformers import AutoProcessor from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer - from nemo.collections.multimodal.data.energon import SimpleMultiModalDataModule + from nemo.collections.multimodal.data.energon import EnergonMultiModalDataModule from nemo.collections.multimodal.data.energon.config import MultiModalSampleConfig from nemo.collections.vlm import LlavaNextTaskEncoder @@ -65,7 +65,7 @@ def main(args): image_processor=processor.image_processor, multimodal_sample_config=multimodal_sample_config, ) - data = SimpleMultiModalDataModule( + data = EnergonMultiModalDataModule( path=data_path, tokenizer=tokenizer, image_processor=processor.image_processor, diff --git a/scripts/vlm/llava_next_pretrain.py b/scripts/vlm/llava_next_pretrain.py index bb84e3dae1e5..0beb9b5b08d0 100644 --- a/scripts/vlm/llava_next_pretrain.py +++ b/scripts/vlm/llava_next_pretrain.py @@ -49,7 +49,7 @@ def main(args): from transformers import AutoProcessor from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer - from nemo.collections.multimodal.data.energon import SimpleMultiModalDataModule + from nemo.collections.multimodal.data.energon import EnergonMultiModalDataModule from nemo.collections.multimodal.data.energon.config import MultiModalSampleConfig from nemo.collections.vlm import LlavaNextTaskEncoder @@ -67,7 +67,7 @@ def main(args): image_processor=processor.image_processor, multimodal_sample_config=multimodal_sample_config, ) - data = SimpleMultiModalDataModule( + data = EnergonMultiModalDataModule( path=data_path, tokenizer=tokenizer, image_processor=processor.image_processor, diff --git a/tests/collections/multimodal/data/energon/test_data_module.py b/tests/collections/multimodal/data/energon/test_data_module.py index 179d3f09f2df..c499ecfe9ca4 100644 --- a/tests/collections/multimodal/data/energon/test_data_module.py +++ b/tests/collections/multimodal/data/energon/test_data_module.py @@ -25,10 +25,10 @@ from PIL import Image from transformers import AutoProcessor -from nemo.collections.multimodal.data.energon import ImageToken, MultiModalSampleConfig, SimpleMultiModalDataModule +from nemo.collections.multimodal.data.energon import EnergonMultiModalDataModule, ImageToken, MultiModalSampleConfig -class TestSimpleMultiModalDataModuleWithDummyData(unittest.TestCase): +class TestEnergonMultiModalDataModuleWithDummyData(unittest.TestCase): @classmethod def setUpClass(cls): @@ -47,7 +47,7 @@ def setUp(self): self.create_vqa_test_dataset(self.dataset_path, 10) - self.data_module = SimpleMultiModalDataModule( + self.data_module = EnergonMultiModalDataModule( path=str(self.dataset_path), tokenizer=self.tokenizer, image_processor=self.image_processor, From e0b14e7c217990c3cceeb55d25b1aaffb2eec12c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?oliver=20k=C3=B6nig?= Date: Fri, 20 Dec 2024 10:09:20 +0100 Subject: [PATCH 088/128] ci: Bump release workflow (#11686) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: oliver könig --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 2ddad31e159e..8010b43847c7 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -28,7 +28,7 @@ on: jobs: release: - uses: NVIDIA/NeMo-FW-CI-templates/.github/workflows/_release_library.yml@v0.17.3 + uses: NVIDIA/NeMo-FW-CI-templates/.github/workflows/_release_library.yml@v0.17.4 with: release-ref: ${{ inputs.release-ref }} image-name: nemo_container From 6e3cccff25dd776e81adad1a39622097005a303a Mon Sep 17 00:00:00 2001 From: Taejin Park Date: Fri, 20 Dec 2024 04:27:11 -0800 Subject: [PATCH 089/128] Fixing the device assignment issues during inference (test_batch) in Sortformer model (#11671) * Fixing the device assignment issues during inference (test_batch) Signed-off-by: taejinp * Removing the commented code lines Signed-off-by: taejinp --------- Signed-off-by: taejinp --- nemo/collections/asr/data/audio_to_diar_label.py | 8 ++------ nemo/collections/asr/models/sortformer_diar_models.py | 1 + 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/nemo/collections/asr/data/audio_to_diar_label.py b/nemo/collections/asr/data/audio_to_diar_label.py index 1dbe68589c0a..817938b758ae 100644 --- a/nemo/collections/asr/data/audio_to_diar_label.py +++ b/nemo/collections/asr/data/audio_to_diar_label.py @@ -1237,15 +1237,11 @@ def __getitem__(self, index): np.floor(audio_signal.shape[0] / self.featurizer.sample_rate * self.floor_decimal) / self.floor_decimal ) audio_signal = audio_signal[: round(self.featurizer.sample_rate * session_len_sec)] - audio_signal_length = torch.tensor(audio_signal.shape[0]).long() - audio_signal, audio_signal_length = audio_signal.to(self.device), audio_signal_length.to(self.device) - target_len = self.get_segment_timestamps(duration=session_len_sec, sample_rate=self.featurizer.sample_rate).to( - self.device - ) + target_len = self.get_segment_timestamps(duration=session_len_sec, sample_rate=self.featurizer.sample_rate) targets = self.parse_rttm_for_targets_and_lens( rttm_file=sample.rttm_file, offset=offset, duration=session_len_sec, target_len=target_len - ).to(self.device) + ) return audio_signal, audio_signal_length, targets, target_len diff --git a/nemo/collections/asr/models/sortformer_diar_models.py b/nemo/collections/asr/models/sortformer_diar_models.py index 483ff5328ad0..e2ac0b09c81b 100644 --- a/nemo/collections/asr/models/sortformer_diar_models.py +++ b/nemo/collections/asr/models/sortformer_diar_models.py @@ -666,6 +666,7 @@ def test_batch( audio_signal, audio_signal_length, targets, target_lens = batch audio_signal = audio_signal.to(self.device) audio_signal_length = audio_signal_length.to(self.device) + targets = targets.to(self.device) preds = self.forward( audio_signal=audio_signal, audio_signal_length=audio_signal_length, From b4aecafb58ff55af1edaec5163dc1174824818ca Mon Sep 17 00:00:00 2001 From: kevinhu-nv Date: Fri, 20 Dec 2024 13:05:00 -0500 Subject: [PATCH 090/128] add timestamp support (#11591) * add timestamp support Signed-off-by: kevinhu * Add unit test, fix a branch, and refactor. Signed-off-by: kevinhu * Apply isort and black reformatting Signed-off-by: kevinhu-nv Signed-off-by: kevinhu * Apply isort and black reformatting Signed-off-by: kevinhu-nv --------- Signed-off-by: kevinhu Signed-off-by: kevinhu-nv Co-authored-by: kevinhu-nv --- .../common/tokenizers/canary_tokenizer.py | 54 +++++++++++++++++- .../asr/test_asr_multitask_model_bpe.py | 56 +++++++++++++++---- 2 files changed, 98 insertions(+), 12 deletions(-) diff --git a/nemo/collections/common/tokenizers/canary_tokenizer.py b/nemo/collections/common/tokenizers/canary_tokenizer.py index cb83fe7ddf3d..04dc6e3a68a9 100644 --- a/nemo/collections/common/tokenizers/canary_tokenizer.py +++ b/nemo/collections/common/tokenizers/canary_tokenizer.py @@ -68,13 +68,63 @@ def nospeech_id(self) -> int: def pad_id(self) -> int: return self.special_tokens[CANARY_PAD] + def _text_with_timestamps_to_ids(self, text_without_timestamps, time_text, lang_id) -> list[int]: + trans_words = text_without_timestamps.split() + + # Get timestamp ids + time_ids = self._tokenize_special_prompt(time_text) + + # Tokenize text word by wordd + word_ids = [] + result_ids = [] + time_index = 0 + + timestamp_every_n_words = 1 # Add timestmap for every N words + word_index = 0 + # Both start and end time + for word in trans_words: + # Insert the first time_id once + if word_index == 0 and time_index < len(time_ids): + result_ids.append(time_ids[time_index]) + time_index += 1 + # Tokenize the word + word_ids += super().text_to_ids(word, lang_id) + result_ids += super().text_to_ids(word, lang_id) + word_index += 1 + # Insert time ids every N words after the first one + if word_index % timestamp_every_n_words == 0 and word_index != 0 and time_index < len(time_ids): + result_ids.append(time_ids[time_index]) + time_index += 1 + if time_index < len(time_ids): + result_ids.append(time_ids[time_index]) + time_index += 1 + else: + time_index += 2 + # Ensure the last time_id is appended at the end + if time_index < len(time_ids): + result_ids.append(time_ids[-1]) + # Make sure the last time_id is appended only once + if time_index < len(time_ids) and result_ids[-1] != (time_ids[-1]): + result_ids.append(time_ids[-1]) + return result_ids + + def _text_to_ids_maybe_with_timestamps(self, text_no_eos, lang_id) -> list[int]: + time_pattern = re.compile(r"<\|\d+\|>") + time_text = "".join(time_pattern.findall(text_no_eos)) + has_timestamp = bool(time_text) + if not has_timestamp: + return super().text_to_ids(text_no_eos, lang_id) + else: + text_without_timestamps = time_pattern.sub("", text_no_eos).strip() + return self._text_with_timestamps_to_ids(text_without_timestamps, time_text, lang_id) + def text_to_ids(self, text, lang_id) -> list[int]: if lang_id == CANARY_SPECIAL_TOKENIZER: return self._tokenize_special_prompt(text) lang_id = _map_canary1_to_canary2_lang(lang_id, self.langs) if text.endswith(CANARY_EOS): - return super().text_to_ids(text[: -len(CANARY_EOS)], lang_id) + [self.eos_id] - return super().text_to_ids(text, lang_id) + return self._text_to_ids_maybe_with_timestamps(text[: -len(CANARY_EOS)], lang_id) + [self.eos_id] + return self._text_to_ids_maybe_with_timestamps(text, lang_id) def _tokenize_special_prompt(self, text: str) -> list[int]: """ diff --git a/tests/collections/asr/test_asr_multitask_model_bpe.py b/tests/collections/asr/test_asr_multitask_model_bpe.py index 5ee2d8279cf2..63185f687fea 100644 --- a/tests/collections/asr/test_asr_multitask_model_bpe.py +++ b/tests/collections/asr/test_asr_multitask_model_bpe.py @@ -643,7 +643,9 @@ def canary2_tokenizer(asr_model, tmp_path): "<|notimestamp|>", "<|emo:undefined|>", "<|emo:happy|>", - ], + ] + # Timestamp frame special tokens + + [f"<|{i}|>" for i in range(900)], tmp_path, force_rebuild=False, ), @@ -659,7 +661,7 @@ def test_prompted_dataset_canary2(canary2_tokenizer): tokenizer=canary2_tokenizer, prompt=Canary2PromptFormatter(canary2_tokenizer) ) - cuts = DummyManifest(CutSet, begin_id=0, end_id=3, with_data=True) + cuts = DummyManifest(CutSet, begin_id=0, end_id=4, with_data=True) # backward compatibility c = cuts[0] @@ -693,11 +695,24 @@ def test_prompted_dataset_canary2(canary2_tokenizer): c.emotion = "<|emo:happy|>" c.decodercontext = "some decoder context" + # transcript with timestamps + c = cuts[3] + c.supervisions[0].language = "en" + c.supervisions[0].text = "<|0|> hello <|3|> <|4|> world <|5|>" + c.source_lang = "en" + c.target_lang = "en" + c.pnc = "<|pnc|>" + c.itn = "<|noitn|>" + c.diarize = "<|diarize|>" + c.timestamp = "<|timestamp|>" + c.emotion = "<|emo:happy|>" + c.decodercontext = "some decoder context" + batch = dataset[cuts] assert isinstance(batch, PromptedAudioToTextMiniBatch) - assert batch.audio.shape == (3, 16000) - assert batch.audio_lens.tolist() == [16000, 16000, 16000] + assert batch.audio.shape == (4, 16000) + assert batch.audio_lens.tolist() == [16000, 16000, 16000, 16000] # Test example 0 i = 0 @@ -706,11 +721,11 @@ def test_prompted_dataset_canary2(canary2_tokenizer): == '<|startofcontext|><|startoftranscript|><|emo:undefined|><|en|><|en|><|pnc|><|noitn|><|notimestamp|><|nodiarize|>' ) assert batch.prompt_lens[i] == 9 - assert canary2_tokenizer.ids_to_text(batch.transcript[i]) == 'i##r##r##el##e##v##a##nt' + assert canary2_tokenizer.ids_to_text(batch.transcript[i]) == 'i##r##r##el##e##v##a##nt' assert batch.transcript_lens[i] == 8 assert ( canary2_tokenizer.ids_to_text(batch.prompted_transcript[i]) - == '<|startofcontext|><|startoftranscript|><|emo:undefined|><|en|><|en|><|pnc|><|noitn|><|notimestamp|><|nodiarize|>i##r##r##el##e##v##a##nt<|endoftext|>' + == '<|startofcontext|><|startoftranscript|><|emo:undefined|><|en|><|en|><|pnc|><|noitn|><|notimestamp|><|nodiarize|>i##r##r##el##e##v##a##nt<|endoftext|>' ) assert batch.prompted_transcript_lens[i] == 18 @@ -721,11 +736,14 @@ def test_prompted_dataset_canary2(canary2_tokenizer): == '<|startofcontext|><|startoftranscript|><|emo:happy|><|en|><|en|><|pnc|><|itn|><|timestamp|><|diarize|>' ) assert batch.prompt_lens[i] == 9 - assert canary2_tokenizer.ids_to_text(batch.transcript[i]) == 'a##s##d' + assert ( + canary2_tokenizer.ids_to_text(batch.transcript[i]) + == 'a##s##d' + ) assert batch.transcript_lens[i] == 3 assert ( canary2_tokenizer.ids_to_text(batch.prompted_transcript[i]) - == '<|startofcontext|><|startoftranscript|><|emo:happy|><|en|><|en|><|pnc|><|itn|><|timestamp|><|diarize|>a##s##d<|endoftext|>' + == '<|startofcontext|><|startoftranscript|><|emo:happy|><|en|><|en|><|pnc|><|itn|><|timestamp|><|diarize|>a##s##d<|endoftext|>' ) assert batch.prompted_transcript_lens[i] == 13 @@ -736,10 +754,28 @@ def test_prompted_dataset_canary2(canary2_tokenizer): == '<|startofcontext|>s##o##m##ed##e##c##o##d##erc##o##nt##e##x##t<|startoftranscript|><|emo:happy|><|en|><|en|><|pnc|><|noitn|><|timestamp|><|diarize|>' ) assert batch.prompt_lens[i] == 25 - assert canary2_tokenizer.ids_to_text(batch.transcript[i]) == 'a##s##d' + assert ( + canary2_tokenizer.ids_to_text(batch.transcript[i]) + == 'a##s##d' + ) assert batch.transcript_lens[i] == 3 assert ( canary2_tokenizer.ids_to_text(batch.prompted_transcript[i]) - == '<|startofcontext|>s##o##m##ed##e##c##o##d##erc##o##nt##e##x##t<|startoftranscript|><|emo:happy|><|en|><|en|><|pnc|><|noitn|><|timestamp|><|diarize|>a##s##d<|endoftext|>' + == '<|startofcontext|>s##o##m##ed##e##c##o##d##erc##o##nt##e##x##t<|startoftranscript|><|emo:happy|><|en|><|en|><|pnc|><|noitn|><|timestamp|><|diarize|>a##s##d<|endoftext|>' ) assert batch.prompted_transcript_lens[i] == 29 + + # Test example 3 + i = 3 + assert ( + canary2_tokenizer.ids_to_text(batch.prompt[i]) + == '<|startofcontext|>s##o##m##ed##e##c##o##d##erc##o##nt##e##x##t<|startoftranscript|><|emo:happy|><|en|><|en|><|pnc|><|noitn|><|timestamp|><|diarize|>' + ) + assert batch.prompt_lens[i] == 25 + assert canary2_tokenizer.ids_to_text(batch.transcript[i]) == '<|0|>h##el##l##o<|3|><|4|>w##o##r##l##d<|5|>' + assert batch.transcript_lens[i] == 13 + assert ( + canary2_tokenizer.ids_to_text(batch.prompted_transcript[i]) + == '<|startofcontext|>s##o##m##ed##e##c##o##d##erc##o##nt##e##x##t<|startoftranscript|><|emo:happy|><|en|><|en|><|pnc|><|noitn|><|timestamp|><|diarize|><|0|>h##el##l##o<|3|><|4|>w##o##r##l##d<|5|><|endoftext|>' + ) + assert batch.prompted_transcript_lens[i] == 39 From 099bc805849e36946f86ad323d140e43c929795d Mon Sep 17 00:00:00 2001 From: Alexandros Koumparoulis <153118171+akoumpa@users.noreply.github.com> Date: Fri, 20 Dec 2024 13:36:55 -0800 Subject: [PATCH 091/128] Make LinearAdapter a nn.Linear child to maintain ckpt structure (#11642) * Make LinearAdapter a nn.Linear child to maintain ckpt structure Signed-off-by: Alexandros Koumparoulis * add _is_fsdp_v1 attribute Signed-off-by: Alexandros Koumparoulis * lora+fsdp Signed-off-by: Alexandros Koumparoulis * set precision=bf16 in nl.Trainer Signed-off-by: Alexandros Koumparoulis * add test Signed-off-by: Alexandros Koumparoulis * fix Signed-off-by: Alexandros Koumparoulis * fix Signed-off-by: Alexandros Koumparoulis * fix Signed-off-by: Alexandros Koumparoulis * Apply isort and black reformatting Signed-off-by: akoumpa * add unit tests Signed-off-by: Alexandros Koumparoulis * Apply isort and black reformatting Signed-off-by: akoumpa * Update docs Signed-off-by: Alexandros Koumparoulis * Apply isort and black reformatting Signed-off-by: akoumpa --------- Signed-off-by: Alexandros Koumparoulis Signed-off-by: akoumpa Co-authored-by: akoumpa --- .github/workflows/cicd-main.yml | 12 ++ examples/llm/peft/hf.py | 1 + examples/llm/sft/hf.py | 1 + examples/vlm/hf/peft.py | 1 + nemo/collections/llm/peft/lora.py | 159 ++++++++++++++---- nemo/lightning/pytorch/callbacks/peft.py | 1 + tests/collections/llm/hf/peft.py | 1 + tests/collections/vlm/hf/peft.py | 1 + .../lightning/pytorch/callbacks/test_peft.py | 47 ++++++ 9 files changed, 195 insertions(+), 29 deletions(-) diff --git a/.github/workflows/cicd-main.yml b/.github/workflows/cicd-main.yml index e258391e04e9..8a939de34394 100644 --- a/.github/workflows/cicd-main.yml +++ b/.github/workflows/cicd-main.yml @@ -3626,6 +3626,17 @@ jobs: AFTER_SCRIPT: | rm -rf nemo_experiments + L2_VLM_HF_Transformer_PEFT_FSDP: + needs: [ cicd-test-container-setup ] + uses: ./.github/workflows/_test_template.yml + if: contains(fromJSON(needs.cicd-test-container-setup.outputs.test_to_run), 'L2_VLM_HF_Transformer_PEFT_FSDP') || needs.cicd-test-container-setup.outputs.all == 'true' + with: + RUNNER: self-hosted-azure + SCRIPT: | + TRANSFORMERS_OFFLINE=1 python tests/collections/vlm/hf/peft.py --model /home/TestData/vlm/qwen2-2b/ --max-steps 3 --disable-ckpt --strategy fsdp --devices 2 + AFTER_SCRIPT: | + rm -rf nemo_experiments + L2_HF_Transformer_PEFT: needs: [ cicd-test-container-setup ] uses: ./.github/workflows/_test_template.yml @@ -4906,6 +4917,7 @@ jobs: - L2_HF_Transformer_SFT_nemorun - L2_HF_Transformer_SFT_2gpu - L2_VLM_HF_Transformer_PEFT + - L2_VLM_HF_Transformer_PEFT_FSDP - L2_HF_Transformer_SFT_2gpu_nemorun - L2_HF_Transformer_SFT_TE_Acceleration - L2_NeMo_2_SSM_Pretraining diff --git a/examples/llm/peft/hf.py b/examples/llm/peft/hf.py index 3137a542ae01..c0562663c2cc 100644 --- a/examples/llm/peft/hf.py +++ b/examples/llm/peft/hf.py @@ -107,6 +107,7 @@ def main(): use_distributed_sampler=use_dist_samp, logger=wandb, callbacks=callbacks, + precision="bf16", ), optim=fdl.build(llm.adam.pytorch_adam_with_flat_lr(lr=1e-5)), log=None, diff --git a/examples/llm/sft/hf.py b/examples/llm/sft/hf.py index ff85180cf86b..ad22c8a733f4 100755 --- a/examples/llm/sft/hf.py +++ b/examples/llm/sft/hf.py @@ -124,6 +124,7 @@ def main(): use_distributed_sampler=use_dist_samp, logger=wandb, callbacks=callbacks, + precision="bf16", ), optim=fdl.build(llm.adam.pytorch_adam_with_flat_lr(lr=1e-5)), log=None, diff --git a/examples/vlm/hf/peft.py b/examples/vlm/hf/peft.py index d51984677a74..2400c333f398 100644 --- a/examples/vlm/hf/peft.py +++ b/examples/vlm/hf/peft.py @@ -116,6 +116,7 @@ def fmt(sample): accumulate_grad_batches=10, gradient_clip_val=grad_clip, use_distributed_sampler=use_dist_samp, + precision="bf16", logger=wandb, ), optim=fdl.build(llm.adam.pytorch_adam_with_flat_lr(lr=1e-5)), diff --git a/nemo/collections/llm/peft/lora.py b/nemo/collections/llm/peft/lora.py index 0ce6138d1c6b..a0318c587e57 100644 --- a/nemo/collections/llm/peft/lora.py +++ b/nemo/collections/llm/peft/lora.py @@ -17,6 +17,7 @@ from typing import List, Literal import torch +import torch.nn.functional as F from torch import nn from nemo.collections.llm.peft.utils import get_adapter_attributes_from_linear, is_expert_linear, wildcard_match @@ -39,9 +40,23 @@ def forward(self, x): return linear_output + adapter_output, bias -class LinearAdapter(nn.Module): +class LinearAdapter(nn.Linear): """ - A simple LoRA linear module for non-megatron models. + Linear + LoRA, maintains ckpts structrue (i.e. Linear's weight/bias remain at the same FQN) + + The _init_wrapper and _forward methods provide the LoRA functionality. We want to be able to + use those inside LinearAdapter but also for monkey-patching modules, without repeating the + same code -> therefore those are decorated with @staticmethod. + + Args: + orig_linear (nn.Module): the linear module to augment. + dim (int): lora's dim in_features -> dim -> out_features. + alpha (int): lora's scaling alpha. + dropout (float): dropout prob (default: 0.1). + dropout_position (str): where to apply dropout rel. to lora (choices= ['pre', 'post'], default=post) + lora_A_init_method (str): init method for lora_A (choices= ['xavier', 'uniform']) + lora_dtype (torch.dtype): weight's dtype, by default will use orig_linear's but if they + are quantized weights (e.g. 4bit) needs to be specified explicitly. """ def __init__( @@ -54,46 +69,127 @@ def __init__( lora_A_init_method='xavier', lora_dtype=None, ): - super(LinearAdapter, self).__init__() assert isinstance(orig_linear, nn.Linear) + super(LinearAdapter, self).__init__( + in_features=orig_linear.in_features, + out_features=orig_linear.out_features, + bias=orig_linear.bias is not None, + device=orig_linear.weight.device, + dtype=orig_linear.weight.dtype, + ) + # copy weights + self.weight.data.copy_(orig_linear.weight.data) + if orig_linear.bias is not None: + self.bias.data.copy_(orig_linear.bias.data) + # initialize the adapte + LinearAdapter._init_adapter(self) + + @staticmethod + def _init_adapter( + obj, + dim=8, + alpha=32, + dropout=0.1, + dropout_position='post', + lora_A_init_method='xavier', + lora_dtype=None, + ): + """Adds LoRA weights to obj. The obj is either a LinearAdapter or an nn.Module (when + monkey-patching). - self.orig_linear = orig_linear - self.dim = dim - self.scale = alpha / dim + Args: + obj (LinearAdapter | nn.Module): input module to adapt. + dim (int): lora's dim in_features -> dim -> out_features. + alpha (int): lora's scaling alpha. + dropout (float): dropout prob (default: 0.1). + dropout_position (str): where to apply dropout rel. to lora (choices= ['pre', 'post'], default=post) + lora_A_init_method (str): init method for lora_A (choices= ['xavier', 'uniform']) + lora_dtype (torch.dtype): weight's dtype, by default will use orig_linear's but if they + are quantized weights (e.g. 4bit) needs to be specified explicitly. + """ + obj.dim = dim + obj.scale = alpha / dim # Freezer - device = self.orig_linear.weight.device - self.orig_linear.weight.requires_grad = False - if self.orig_linear.bias is not None: - self.orig_linear.bias.requires_grad = False + device = obj.weight.device + obj.weight.requires_grad = False + if obj.bias is not None: + obj.bias.requires_grad = False - in_features = self.orig_linear.in_features - out_features = self.orig_linear.out_features - dtype = lora_dtype or self.orig_linear.weight.dtype + in_features = obj.in_features + out_features = obj.out_features + dtype = lora_dtype or obj.weight.dtype - self.lora_a = nn.Parameter(torch.zeros((in_features, dim), dtype=dtype, device=device)) - self.lora_b = nn.Parameter(torch.zeros((dim, out_features), dtype=dtype, device=device)) + obj.lora_a = nn.Parameter(torch.zeros((in_features, dim), dtype=dtype, device=device)) + obj.lora_b = nn.Parameter(torch.zeros((dim, out_features), dtype=dtype, device=device)) if lora_A_init_method == 'xavier': - torch.nn.init.uniform_(self.lora_a) + torch.nn.init.uniform_(obj.lora_a) else: - nn.init.kaiming_uniform_(self.lora_a, a=math.sqrt(5)) + nn.init.kaiming_uniform_(obj.lora_a, a=math.sqrt(5)) - self.dropout = nn.Dropout(p=dropout) + obj.dropout = nn.Dropout(p=dropout) assert dropout_position in ['pre', 'post'], dropout_position - self.dropout_position = dropout_position + obj.dropout_position = dropout_position - def forward(self, x): + @staticmethod + def _forward(obj, x): # pylint: disable=C0115,C0116 - res = self.orig_linear(x) - if self.dropout_position == 'pre': - x = self.dropout(x) - lora_res = x @ self.lora_a - lora_res = lora_res @ self.lora_b - lora_res = lora_res * self.scale - if self.dropout_position == 'post': - lora_res = self.dropout(lora_res) + res = F.linear(x, obj.weight, obj.bias) + if obj.dropout_position == 'pre': + x = obj.dropout(x) + lora_res = x @ obj.lora_a + lora_res = lora_res @ obj.lora_b + lora_res = lora_res * obj.scale + if obj.dropout_position == 'post': + lora_res = obj.dropout(lora_res) return res + lora_res + def forward(self, x): + return LinearAdapter._forward(self, x) + + +def patch_linear_module( + orig_linear, + dim=8, + alpha=32, + dropout=0.1, + dropout_position='post', + lora_A_init_method='xavier', + lora_dtype=None, +): + """Monkey-patches a nn.Linear (orig_linear param) to be a LinearAdapter, for all purposes + think of this function as replacing a nn.Linear with a LinearAdapter defined above. + + The orig_linear might not contain valid weights, for example, the given orig_linear was + initialized within a context-manager that uses a "meta" device. Therefore, we cannot copy + the weight/bias from the orig_linear to the LinearAdapter, since those have not been allocated, + + To circumvent this scenario, LinearAdapter's additional functionality (_init_adapter, _forward) + is based on static functions, so that we can use them for patching or when allocating a + new LinearAdapter object. + + Args: + orig_linear (nn.Linear): the module we add adapter to. + dim (int, optional): Lora dim. Defaults to 8. + alpha (int, optional): Lora alpha scale. Defaults to 32. + dropout (float, optional): dropout prob. Defaults to 0.1. + dropout_position (str, optional): location to apply dropout wrt lora. + Defaults to 'post' (choices: 'pre', 'post'). + lora_A_init_method (str, optional): lora_a init method. Defaults to 'xavier'. + lora_dtype (_type_, optional): Lora weights' dtype. By default will use orig_linear's dtype + but orig_linear might use non-trainable dtype (e.g. 4bit), in which case the user must + specify the dtype manually. Defaults to None. + + Returns: + (nn.Module): the monkey-patched (nn.Linear + LoRA) nn.Module + """ + + assert isinstance(orig_linear, nn.Linear) + + LinearAdapter._init_adapter(orig_linear, dim, alpha, dropout, dropout_position, lora_A_init_method, lora_dtype) + orig_linear.forward = lambda x: LinearAdapter._forward(orig_linear, x) + return orig_linear + @dataclass class LoRA(PEFT): @@ -168,7 +264,12 @@ def transform(self, m: nn.Module, name=None, prefix=None): full_name = f"{prefix}.{name}" if prefix else name if name in self.target_modules or any(wildcard_match(pattern, full_name) for pattern in self.target_modules): if isinstance(m, nn.Linear): - return LinearAdapter( + if self._is_fsdp_v1: + lora_cls = patch_linear_module + else: + lora_cls = LinearAdapter + + return lora_cls( m, dim=self.dim, alpha=self.alpha, diff --git a/nemo/lightning/pytorch/callbacks/peft.py b/nemo/lightning/pytorch/callbacks/peft.py index d2e93fe9ab42..bc40a7f10d15 100644 --- a/nemo/lightning/pytorch/callbacks/peft.py +++ b/nemo/lightning/pytorch/callbacks/peft.py @@ -138,6 +138,7 @@ def setup(self, trainer: pl.Trainer, pl_module: pl.LightningModule, stage: str) super().setup(trainer, pl_module, stage=stage) + self._is_fsdp_v1 = type(trainer.strategy).__name__ == 'FSDPStrategy' trainer.strategy.trainer = trainer wrapped_io = partial(WrappedAdapterIO, peft=self) diff --git a/tests/collections/llm/hf/peft.py b/tests/collections/llm/hf/peft.py index 018774280946..3be0443d69fe 100644 --- a/tests/collections/llm/hf/peft.py +++ b/tests/collections/llm/hf/peft.py @@ -100,6 +100,7 @@ def formatting_prompts_func(examples): use_distributed_sampler=use_dist_samp, logger=wandb, enable_checkpointing=args.disable_ckpt, + precision='bf16', ), optim=fdl.build(llm.adam.pytorch_adam_with_flat_lr(lr=1e-5)), log=None, diff --git a/tests/collections/vlm/hf/peft.py b/tests/collections/vlm/hf/peft.py index 109bccfcfa1f..96caebb5c243 100644 --- a/tests/collections/vlm/hf/peft.py +++ b/tests/collections/vlm/hf/peft.py @@ -118,6 +118,7 @@ def fmt(sample): use_distributed_sampler=use_dist_samp, logger=wandb, enable_checkpointing=args.disable_ckpt, + precision='bf16', ), optim=fdl.build(llm.adam.pytorch_adam_with_flat_lr(lr=1e-5)), log=None, diff --git a/tests/lightning/pytorch/callbacks/test_peft.py b/tests/lightning/pytorch/callbacks/test_peft.py index fb6728acee8f..2b295324a7ab 100644 --- a/tests/lightning/pytorch/callbacks/test_peft.py +++ b/tests/lightning/pytorch/callbacks/test_peft.py @@ -14,8 +14,10 @@ from unittest.mock import MagicMock, call, patch +import torch import torch.nn as nn from lightning.pytorch.trainer.states import TrainerFn + from nemo.collections.llm import fn from nemo.lightning.pytorch.callbacks.peft import PEFT, WrappedAdapterIO from nemo.utils.callbacks.dist_ckpt_io import AsyncFinalizableCheckpointIO @@ -49,6 +51,51 @@ def test_peft_call(self): assert transformed_model.linear.weight.requires_grad == False assert transformed_model.conv.weight.requires_grad == False + def test_linear_adapter(self): + from nemo.collections.llm.peft.lora import LinearAdapter + + for has_bias in [True, False]: + linear = nn.Linear(10, 10, bias=has_bias) + linear_adapter = LinearAdapter(linear) + bias_in_state_dict = 'bias' in linear.state_dict() + if has_bias: + assert bias_in_state_dict + else: + assert not bias_in_state_dict + + # Check if the state-dict keys changed + for key, val in linear.state_dict().items(): + assert key in linear_adapter.state_dict(), f"Key {key} not found in LinearAdapter" + assert torch.equal(val, linear_adapter.state_dict()[key]), f"Key {key} diff. val in LinearAdapter" + # Make sure the additional keys are in the allow list + for key, val in linear_adapter.state_dict().items(): + if key in linear.state_dict(): + continue + assert key in ['lora_a', 'lora_b'] + + def test_linear_adapter_monkey_patch(self): + from copy import deepcopy + + from nemo.collections.llm.peft.lora import patch_linear_module + + linear = nn.Linear(10, 10) + state_init = deepcopy(linear.state_dict()) + linear_adapter = patch_linear_module(linear) + # Check if the state-dict keys changed + for key, val in state_init.items(): + assert key in linear_adapter.state_dict(), f"Key {key} not found in LinearAdapter" + assert torch.equal(val, linear_adapter.state_dict()[key]), f"Key {key} diff. val in LinearAdapter" + # Make sure the additional keys are in the allow list + for key, val in linear_adapter.state_dict().items(): + if key in state_init: + continue + assert key in ['lora_a', 'lora_b'] + + for key in ['lora_a', 'lora_b']: + assert hasattr(linear_adapter, key), f"Expected {key} to be in module" + assert key in linear_adapter.state_dict(), f"Expected {key} to be in state dict" + assert getattr(linear_adapter, key).requires_grad == True, "Expected {key} to require_grad" + def test_peft_setup(self): peft = self.DummyPEFT() trainer = MagicMock() From da10109a4245915ec1fe73b23fb52a53a4ddd1ae Mon Sep 17 00:00:00 2001 From: meatybobby Date: Fri, 20 Dec 2024 15:25:31 -0800 Subject: [PATCH 092/128] Add multi images support for mllama generate (#11672) * Add multi images support for mllama generate * Apply isort and black reformatting Signed-off-by: meatybobby --------- Signed-off-by: meatybobby Co-authored-by: meatybobby --- nemo/collections/vlm/inference/base.py | 7 ++--- nemo/collections/vlm/inference/vlm_engine.py | 4 +-- scripts/vlm/mllama_generate.py | 27 ++++++++++++-------- 3 files changed, 23 insertions(+), 15 deletions(-) diff --git a/nemo/collections/vlm/inference/base.py b/nemo/collections/vlm/inference/base.py index bbc85a8ee4a8..77918bae26b9 100644 --- a/nemo/collections/vlm/inference/base.py +++ b/nemo/collections/vlm/inference/base.py @@ -12,13 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Optional +from typing import List, Optional, Union import pytorch_lightning as pl import torch import torch.distributed from megatron.core.inference.common_inference_params import CommonInferenceParams from megatron.core.inference.model_inference_wrappers.inference_wrapper_config import InferenceWrapperConfig +from PIL.Image import Image from transformers import AutoProcessor import nemo.lightning as nl @@ -86,8 +87,8 @@ def generate( wrapped_model: VLMInferenceWrapper, tokenizer, image_processor, - prompts: list[str], - images: list, + prompts: List[str], + images: List[Union[Image, List[Image]]], max_batch_size: int = 4, random_seed: Optional[int] = None, inference_params: Optional[CommonInferenceParams] = None, diff --git a/nemo/collections/vlm/inference/vlm_engine.py b/nemo/collections/vlm/inference/vlm_engine.py index bce373e7a2f5..6e5fd7fa11ec 100644 --- a/nemo/collections/vlm/inference/vlm_engine.py +++ b/nemo/collections/vlm/inference/vlm_engine.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import List +from typing import List, Union import torch from megatron.core.inference.common_inference_params import CommonInferenceParams @@ -26,7 +26,7 @@ class VLMEngine(MCoreEngine): def generate( self, prompts: List[str], - images: List[Image] = None, + images: List[Union[Image, List[Image]]] = None, common_inference_params: CommonInferenceParams = None, ) -> dict: # pylint: disable=C0115,C0116 diff --git a/scripts/vlm/mllama_generate.py b/scripts/vlm/mllama_generate.py index 10dc197f63a0..afa6ee05a221 100644 --- a/scripts/vlm/mllama_generate.py +++ b/scripts/vlm/mllama_generate.py @@ -45,10 +45,8 @@ def load_image(image_url: str) -> Image.Image: return None -def generate(model, processor, image, text): +def generate(model, processor, images, text): # pylint: disable=C0115,C0116 - tokenizer = processor.tokenizer - messages = [ { "role": "user", @@ -60,8 +58,8 @@ def generate(model, processor, image, text): model = setup_inference_wrapper(model, processor.tokenizer) prompts = [input_text] - images = [image] - params = CommonInferenceParams(top_k=1, top_p=0, num_tokens_to_generate=100) + images = [images] + params = CommonInferenceParams(top_k=1, top_p=0, num_tokens_to_generate=50) result = vlm_generate( model, processor.tokenizer, @@ -113,11 +111,11 @@ def main(args) -> None: model = model.to(torch.bfloat16) # Load the image - raw_image = load_image(args.image_url) - if raw_image is None: + raw_images = [load_image(url) for url in args.image_url] + if not raw_images: return # Exit if the image can't be loaded - generate(model, processor, image=raw_image, text="<|image|>\nDescribe the image.") + generate(model, processor, images=raw_images, text=args.prompt) if __name__ == "__main__": @@ -133,12 +131,21 @@ def main(args) -> None: default=None, help="Local path to the model if not loading from Hugging Face.", ) + parser.add_argument( + "--prompt", + type=str, + default="<|image|>\nDescribe the image.", + help="Input prompt", + ) parser.add_argument( "--image_url", + nargs='+', type=str, # pylint: disable=line-too-long - default="https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg", - help="URL of the image to use for inference.", + default=[ + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg" + ], + help="List of the image urls to use for inference.", ) parser.add_argument("--devices", type=int, required=False, default=1) parser.add_argument("--tp_size", type=int, required=False, default=1) From 7682cf24676139de8b1d23580530363237b4e13c Mon Sep 17 00:00:00 2001 From: Boxiang Wang Date: Mon, 23 Dec 2024 13:26:31 -0800 Subject: [PATCH 093/128] Fix merge Signed-off-by: Boxiang Wang --- .github/workflows/cicd-main.yml | 3 --- .github/workflows/release.yml | 4 ---- 2 files changed, 7 deletions(-) diff --git a/.github/workflows/cicd-main.yml b/.github/workflows/cicd-main.yml index 1bc3011b33bd..e9fcc7276d6c 100644 --- a/.github/workflows/cicd-main.yml +++ b/.github/workflows/cicd-main.yml @@ -4731,8 +4731,6 @@ jobs: --lora_checkpoint_path=/home/TestData/nemo2_ckpt/llama_lora_ci_checkpoint_v2/ \ --output_path=/tmp/nemo2_lora_merge/${{ github.run_id }} -<<<<<<< HEAD -======= L2_NEMO_2_LoRA_Export: needs: [cicd-test-container-setup] uses: ./.github/workflows/_test_template.yml @@ -4745,7 +4743,6 @@ jobs: --lora_checkpoint_path=/home/TestData/nemo2_ckpt/llama_lora_ci_checkpoint_v2/ \ --output_path=/tmp/nemo2_lora_merge/${{ github.run_id }} ->>>>>>> main L2_NEMO_2_LoRA_Inference: needs: [cicd-test-container-setup] uses: ./.github/workflows/_test_template.yml diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index be41898beee4..ab0fbd58ce0f 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -32,11 +32,7 @@ on: jobs: release: -<<<<<<< HEAD - uses: NVIDIA/NeMo-FW-CI-templates/.github/workflows/_release_library.yml@v0.17.4 -======= uses: NVIDIA/NeMo-FW-CI-templates/.github/workflows/_release_library.yml@v0.18.2 ->>>>>>> main with: release-ref: ${{ inputs.release-ref }} image-name: nemo_container From 0e694f76f79b3632f964a2e2e1caa5f3cf3444ed Mon Sep 17 00:00:00 2001 From: Boxiang Wang Date: Mon, 23 Dec 2024 16:02:58 -0800 Subject: [PATCH 094/128] Fix non-mcore fsdp2 Signed-off-by: Boxiang Wang --- nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py | 2 +- tests/collections/llm/hf/peft_nemorun.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py b/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py index cea7264543ff..786c232ff748 100644 --- a/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py +++ b/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py @@ -99,7 +99,7 @@ def configure_model(self): def forward(self, batch): return self.model(**batch) - def training_step(self, batch): + def training_step(self, batch, batch_idx=None): labels = batch.pop('labels').to(self.model.device) loss_mask = batch.pop('loss_mask', None) diff --git a/tests/collections/llm/hf/peft_nemorun.py b/tests/collections/llm/hf/peft_nemorun.py index ef34d4d39a11..b36802cfba3f 100644 --- a/tests/collections/llm/hf/peft_nemorun.py +++ b/tests/collections/llm/hf/peft_nemorun.py @@ -41,7 +41,6 @@ def local_executor_torchrun(nodes: int = 1, devices: int = 2) -> run.LocalExecut parser = argparse.ArgumentParser() parser.add_argument('--model', default='meta-llama/Llama-3.2-1B') - parser.add_argument('--strategy', type=str, default='auto', choices=['auto', 'ddp', 'fsdp']) parser.add_argument('--devices', default=1) parser.add_argument('--accelerator', default='gpu', choices=['gpu']) parser.add_argument('--max-steps', type=int, default=100) From a2a67ef38a3a581b5703d9a754278207ee5735dd Mon Sep 17 00:00:00 2001 From: Boxiang Wang Date: Mon, 30 Dec 2024 13:08:16 -0800 Subject: [PATCH 095/128] Add run code --- tests/collections/llm/hf/sft_nemorun.py | 105 ++++++++++++++++++++++++ 1 file changed, 105 insertions(+) diff --git a/tests/collections/llm/hf/sft_nemorun.py b/tests/collections/llm/hf/sft_nemorun.py index a3daa66ca774..4bcf4a1fb788 100644 --- a/tests/collections/llm/hf/sft_nemorun.py +++ b/tests/collections/llm/hf/sft_nemorun.py @@ -21,6 +21,108 @@ DATA_PATH = '/home/TestData/lite/hf_cache/squad/' +import torch +from torch.distributed._composable.fsdp import MixedPrecisionPolicy +from torch.distributed._composable.fsdp.fully_shard import fully_shard +from torch.distributed._tensor import Replicate, Shard +from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import checkpoint_wrapper +from torch.distributed.device_mesh import DeviceMesh +from torch.distributed.tensor.parallel import ( + ColwiseParallel, + PrepareModuleInput, + RowwiseParallel, + SequenceParallel, + parallelize_module, +) + +def parallelize(model, device_mesh: DeviceMesh): + """Apply parallelisms and activation checkpointing to the model. + + NOTE: The passed-in model preferably should be on meta device. Otherwise, + the model must fit on GPU or CPU memory. + + """ + + dp_mesh = device_mesh["data_parallel"] + tp_mesh = device_mesh["tensor_parallel"] + + if tp_mesh.size() > 1: + # 1. Parallelize the first embedding and the last linear proj layer + # 2. Parallelize the root norm layer over the sequence dim + # 3. Shard the first transformer block's inputs + + # Parallelize the first embedding and the last linear out projection + plan = { + "tok_embeddings": RowwiseParallel(input_layouts=Replicate()), + "output": ColwiseParallel( + input_layouts=Shard(1), + # Optional: Shard the output along the class dimension to compute the loss in parallel. + # See `loss_parallel` in `train.py` + output_layouts=Shard(-1), + use_local_output=False, + ), + "norm": SequenceParallel(), + "layers.0": PrepareModuleInput( + input_layouts=(Replicate(), None), + desired_input_layouts=(Shard(1), None), + use_local_output=True, + ), + } + model = parallelize_module(model, tp_mesh, plan) + + # Parallelize each transformer block + for transformer_block in model.layers.values(): + plan = { + "attention": PrepareModuleInput( + input_layouts=(Shard(1), None), + desired_input_layouts=(Replicate(), None), + ), + "attention.wq": ColwiseParallel(), + "attention.wk": ColwiseParallel(), + "attention.wv": ColwiseParallel(), + "attention.wo": RowwiseParallel(output_layouts=Shard(1)), + "attention_norm": SequenceParallel(), + "feed_forward": PrepareModuleInput( + input_layouts=(Shard(1),), + desired_input_layouts=(Replicate(),), + ), + "feed_forward.w1": ColwiseParallel(), + "feed_forward.w2": RowwiseParallel(output_layouts=Shard(1)), + "feed_forward.w3": ColwiseParallel(), + "ffn_norm": SequenceParallel(), + } + + # Adjust attention module to use the local number of heads + attn_layer = transformer_block.attention + attn_layer.n_heads = attn_layer.n_heads // tp_mesh.size() + attn_layer.n_kv_heads = attn_layer.n_kv_heads // tp_mesh.size() + + # Apply the plan for the current transformer block + parallelize_module(transformer_block, tp_mesh, plan) + + if dp_mesh.size() > 1: + assert dp_mesh.ndim == 1 # Hybrid-sharding not supported + + # NOTE: Currently, the user is required to manually handle precision settings such as the `mp_policy` here + # because the model parallel strategy does not respect all settings of `Fabric(precision=...)` at the moment. + mp_policy = MixedPrecisionPolicy(param_dtype=torch.bfloat16, reduce_dtype=torch.float32) + + fsdp_config = {"mesh": dp_mesh, "mp_policy": mp_policy} + for layer_id, transformer_block in model.layers.items(): + # Apply activation checkpointing + transformer_block = checkpoint_wrapper(transformer_block) + # As an optimization, do not reshard after forward for the last + # transformer block since FSDP would prefetch it immediately + reshard_after_forward = int(layer_id) < len(model.layers) - 1 + fully_shard( + transformer_block, + **fsdp_config, + reshard_after_forward=reshard_after_forward, + ) + model.layers[layer_id] = transformer_block + model = fully_shard(model, **fsdp_config) + + return model def local_executor_torchrun(nodes: int = 1, devices: int = 2) -> run.LocalExecutor: # Env vars for jobs are configured here @@ -66,5 +168,8 @@ def local_executor_torchrun(nodes: int = 1, devices: int = 2) -> run.LocalExecut pad_token_id=tokenizer.tokenizer.eos_token_id, tokenizer=run.Config(AutoTokenizer, pretrained_model_name=args.model), ) + + recipe.trainer.strategy = run.Config(nl.FSDP2Strategy, data_parallel_size=8, tensor_parallel_size=1) + recipe.trainer.plugins=None executor = local_executor_torchrun(nodes=recipe.trainer.num_nodes, devices=recipe.trainer.devices) run.run(recipe, executor=executor) From 446c8885194e69b40f0accb0b1cd27825e8dfe92 Mon Sep 17 00:00:00 2001 From: BoxiangW Date: Mon, 30 Dec 2024 21:15:44 +0000 Subject: [PATCH 096/128] Apply isort and black reformatting Signed-off-by: BoxiangW --- tests/collections/llm/hf/sft_nemorun.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/collections/llm/hf/sft_nemorun.py b/tests/collections/llm/hf/sft_nemorun.py index 4bcf4a1fb788..bfb0db8eb1b7 100644 --- a/tests/collections/llm/hf/sft_nemorun.py +++ b/tests/collections/llm/hf/sft_nemorun.py @@ -35,6 +35,7 @@ parallelize_module, ) + def parallelize(model, device_mesh: DeviceMesh): """Apply parallelisms and activation checkpointing to the model. @@ -124,6 +125,7 @@ def parallelize(model, device_mesh: DeviceMesh): return model + def local_executor_torchrun(nodes: int = 1, devices: int = 2) -> run.LocalExecutor: # Env vars for jobs are configured here env_vars = { @@ -170,6 +172,6 @@ def local_executor_torchrun(nodes: int = 1, devices: int = 2) -> run.LocalExecut ) recipe.trainer.strategy = run.Config(nl.FSDP2Strategy, data_parallel_size=8, tensor_parallel_size=1) - recipe.trainer.plugins=None + recipe.trainer.plugins = None executor = local_executor_torchrun(nodes=recipe.trainer.num_nodes, devices=recipe.trainer.devices) run.run(recipe, executor=executor) From 4782c7ea2f070a7b23789832f1c619d6ace2624a Mon Sep 17 00:00:00 2001 From: Boxiang Wang Date: Tue, 31 Dec 2024 13:19:53 -0800 Subject: [PATCH 097/128] Add hooks --- .../gpt/model/hf_auto_model_for_causal_lm.py | 108 ++++++++++++++++++ .../hf_auto_model_for_speech_seq2seq.py | 99 ++++++++++++++++ .../hf_auto_model_for_image_text_to_text.py | 100 ++++++++++++++++ tests/collections/llm/hf/sft_nemorun.py | 11 +- 4 files changed, 313 insertions(+), 5 deletions(-) diff --git a/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py b/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py index 786c232ff748..5f1ec7904c3e 100644 --- a/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py +++ b/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py @@ -15,6 +15,18 @@ import lightning.pytorch as pl import torch import torch.nn.functional as F +from torch.distributed._composable.fsdp import MixedPrecisionPolicy +from torch.distributed._composable.fsdp.fully_shard import fully_shard +from torch.distributed._tensor import Replicate, Shard +from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import checkpoint_wrapper +from torch.distributed.device_mesh import DeviceMesh +from torch.distributed.tensor.parallel import ( + ColwiseParallel, + PrepareModuleInput, + RowwiseParallel, + SequenceParallel, + parallelize_module, +) from transformers import AutoModelForCausalLM from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer @@ -90,9 +102,14 @@ def configure_model(self): self.model = AutoModelForCausalLM.from_config( config, torch_dtype=dtype, trust_remote_code=self.trust_remote_code ) + + # Apply FSDP2 and TP to the model + parallelize(self.model, device_mesh=self.device_mesh) if self.model_accelerator is not None: self.model_accelerator(self.model) + + print(self.model) self.model.train() @@ -164,3 +181,94 @@ def _remove_extra_batch_keys(self, batch, reserved_keys=['labels', 'loss_mask']) fwd_signature = inspect.signature(self.model.forward) allowed_keys = list(fwd_signature.parameters.keys()) + reserved_keys return {k: batch[k] for k in allowed_keys if k in batch} + + +# Taken and modified from torchtitan +# https://github.com/pytorch/torchtitan/blob/main/torchtitan/parallelisms/parallelize_llama.py +def parallelize(model, device_mesh: DeviceMesh): + """Apply parallelisms and activation checkpointing to the model. + NOTE: The passed-in model preferably should be on meta device. Otherwise, + the model must fit on GPU or CPU memory. + """ + + dp_mesh = device_mesh["data_parallel"] + tp_mesh = device_mesh["tensor_parallel"] + + print(dp_mesh) + print(tp_mesh) + + if tp_mesh.size() > 1: + # 1. Parallelize the first embedding and the last linear proj layer + # 2. Parallelize the root norm layer over the sequence dim + # 3. Shard the first transformer block's inputs + + # Parallelize the first embedding and the last linear out projection + plan = { + "tok_embeddings": RowwiseParallel(input_layouts=Replicate()), + "output": ColwiseParallel(input_layouts=Shard(1), output_layouts=Replicate()), + "norm": SequenceParallel(), + "layers.0": PrepareModuleInput( + input_layouts=(Replicate(), None), + desired_input_layouts=(Shard(1), None), + use_local_output=True, + ), + } + model = parallelize_module(model, tp_mesh, plan) + + print(model.model.layers) + + # Parallelize each transformer block + for transformer_block in model.model.layers: + plan = { + "attention": PrepareModuleInput( + input_layouts=(Shard(1), None), + desired_input_layouts=(Replicate(), None), + ), + "attention.wq": ColwiseParallel(), + "attention.wk": ColwiseParallel(), + "attention.wv": ColwiseParallel(), + "attention.wo": RowwiseParallel(output_layouts=Shard(1)), + "attention_norm": SequenceParallel(), + "feed_forward": PrepareModuleInput( + input_layouts=(Shard(1),), + desired_input_layouts=(Replicate(),), + ), + "feed_forward.w1": ColwiseParallel(), + "feed_forward.w2": RowwiseParallel(output_layouts=Shard(1)), + "feed_forward.w3": ColwiseParallel(), + "ffn_norm": SequenceParallel(), + } + + # Adjust attention module to use the local number of heads + attn_layer = transformer_block.self_attn + attn_layer.num_heads = attn_layer.num_heads // tp_mesh.size() + attn_layer.num_key_value_heads = attn_layer.num_key_value_heads // tp_mesh.size() + + # Apply the plan for the current transformer block + parallelize_module(transformer_block, tp_mesh, plan) + + if dp_mesh.size() > 1: + assert dp_mesh.ndim == 1 # Hybrid-sharding not supported + + # NOTE: Currently, the user is required to manually handle precision settings such as the `mp_policy` here + # because the model parallel strategy does not respect all settings of `Fabric(precision=...)` at the moment. + mp_policy = MixedPrecisionPolicy(param_dtype=torch.bfloat16, reduce_dtype=torch.float32) + + fsdp_config = {"mesh": dp_mesh, "mp_policy": mp_policy} + for layer_id, transformer_block in enumerate(model.model.layers): + # Apply activation checkpointing + transformer_block = checkpoint_wrapper(transformer_block) + # As an optimization, do not reshard after forward for the last + # transformer block since FSDP would prefetch it immediately + reshard_after_forward = int(layer_id) < len(model.model.layers) - 1 + fully_shard( + transformer_block, + **fsdp_config, + reshard_after_forward=reshard_after_forward, + ) + model.model.layers[layer_id] = transformer_block + model = fully_shard(model, **fsdp_config) + + print("here") + + return model diff --git a/nemo/collections/speechlm/models/hf_auto_model_for_speech_seq2seq.py b/nemo/collections/speechlm/models/hf_auto_model_for_speech_seq2seq.py index a039edc66a39..35940ed72a18 100644 --- a/nemo/collections/speechlm/models/hf_auto_model_for_speech_seq2seq.py +++ b/nemo/collections/speechlm/models/hf_auto_model_for_speech_seq2seq.py @@ -15,6 +15,18 @@ import lightning.pytorch as pl import torch import torch.nn.functional as F +from torch.distributed._composable.fsdp import MixedPrecisionPolicy +from torch.distributed._composable.fsdp.fully_shard import fully_shard +from torch.distributed._tensor import Replicate, Shard +from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import checkpoint_wrapper +from torch.distributed.device_mesh import DeviceMesh +from torch.distributed.tensor.parallel import ( + ColwiseParallel, + PrepareModuleInput, + RowwiseParallel, + SequenceParallel, + parallelize_module, +) from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer @@ -93,6 +105,9 @@ def configure_model(self, train=True): config = AutoConfig.from_pretrained(self.model_name, trust_remote_code=self.trust_remote_code) self.model = AutoModelForSpeechSeq2Seq.from_config(config, trust_remote_code=self.trust_remote_code) + + # Apply FSDP2 and TP to the model + parallelize(self.model, device_mesh=self.device_mesh) if train: self.model.train() @@ -133,3 +148,87 @@ def save_pretrained(self, path): self._processor.save_pretrained(path) else: logging.warning("A processor wasn't created before to save.") + + +# Taken and modified from torchtitan +# https://github.com/pytorch/torchtitan/blob/main/torchtitan/parallelisms/parallelize_llama.py +def parallelize(model, device_mesh: DeviceMesh): + """Apply parallelisms and activation checkpointing to the model. + NOTE: The passed-in model preferably should be on meta device. Otherwise, + the model must fit on GPU or CPU memory. + """ + + dp_mesh = device_mesh["data_parallel"] + tp_mesh = device_mesh["tensor_parallel"] + + if tp_mesh.size() > 1: + # 1. Parallelize the first embedding and the last linear proj layer + # 2. Parallelize the root norm layer over the sequence dim + # 3. Shard the first transformer block's inputs + + # Parallelize the first embedding and the last linear out projection + plan = { + "tok_embeddings": RowwiseParallel(input_layouts=Replicate()), + "output": ColwiseParallel(input_layouts=Shard(1), output_layouts=Replicate()), + "norm": SequenceParallel(), + "layers.0": PrepareModuleInput( + input_layouts=(Replicate(), None), + desired_input_layouts=(Shard(1), None), + use_local_output=True, + ), + } + model = parallelize_module(model, tp_mesh, plan) + + # Parallelize each transformer block + for transformer_block in model.layers.values(): + plan = { + "attention": PrepareModuleInput( + input_layouts=(Shard(1), None), + desired_input_layouts=(Replicate(), None), + ), + "attention.wq": ColwiseParallel(), + "attention.wk": ColwiseParallel(), + "attention.wv": ColwiseParallel(), + "attention.wo": RowwiseParallel(output_layouts=Shard(1)), + "attention_norm": SequenceParallel(), + "feed_forward": PrepareModuleInput( + input_layouts=(Shard(1),), + desired_input_layouts=(Replicate(),), + ), + "feed_forward.w1": ColwiseParallel(), + "feed_forward.w2": RowwiseParallel(output_layouts=Shard(1)), + "feed_forward.w3": ColwiseParallel(), + "ffn_norm": SequenceParallel(), + } + + # Adjust attention module to use the local number of heads + attn_layer = transformer_block.attention + attn_layer.n_heads = attn_layer.n_heads // tp_mesh.size() + attn_layer.n_kv_heads = attn_layer.n_kv_heads // tp_mesh.size() + + # Apply the plan for the current transformer block + parallelize_module(transformer_block, tp_mesh, plan) + + if dp_mesh.size() > 1: + assert dp_mesh.ndim == 1 # Hybrid-sharding not supported + + # NOTE: Currently, the user is required to manually handle precision settings such as the `mp_policy` here + # because the model parallel strategy does not respect all settings of `Fabric(precision=...)` at the moment. + mp_policy = MixedPrecisionPolicy(param_dtype=torch.bfloat16, reduce_dtype=torch.float32) + + fsdp_config = {"mesh": dp_mesh, "mp_policy": mp_policy} + for layer_id, transformer_block in model.layers.items(): + # Apply activation checkpointing + transformer_block = checkpoint_wrapper(transformer_block) + # As an optimization, do not reshard after forward for the last + # transformer block since FSDP would prefetch it immediately + reshard_after_forward = int(layer_id) < len(model.layers) - 1 + fully_shard( + transformer_block, + **fsdp_config, + reshard_after_forward=reshard_after_forward, + ) + model.layers[layer_id] = transformer_block + model = fully_shard(model, **fsdp_config) + + return model diff --git a/nemo/collections/vlm/hf/model/hf_auto_model_for_image_text_to_text.py b/nemo/collections/vlm/hf/model/hf_auto_model_for_image_text_to_text.py index 33ad04970d35..900354258ab8 100644 --- a/nemo/collections/vlm/hf/model/hf_auto_model_for_image_text_to_text.py +++ b/nemo/collections/vlm/hf/model/hf_auto_model_for_image_text_to_text.py @@ -15,6 +15,18 @@ import lightning.pytorch as pl import torch import torch.nn.functional as F +from torch.distributed._composable.fsdp import MixedPrecisionPolicy +from torch.distributed._composable.fsdp.fully_shard import fully_shard +from torch.distributed._tensor import Replicate, Shard +from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import checkpoint_wrapper +from torch.distributed.device_mesh import DeviceMesh +from torch.distributed.tensor.parallel import ( + ColwiseParallel, + PrepareModuleInput, + RowwiseParallel, + SequenceParallel, + parallelize_module, +) from transformers import AutoConfig, AutoModelForImageTextToText, AutoProcessor from nemo.collections.llm import fn @@ -95,6 +107,10 @@ def configure_model(self): self.model = AutoModelForImageTextToText.from_config( config, torch_dtype=dtype, trust_remote_code=self.trust_remote_code ) + + # Apply FSDP2 and TP to the model + parallelize(self.model, device_mesh=self.device_mesh) + self.model.train() def forward(self, batch): @@ -189,3 +205,87 @@ def extract_skipped_token_ids(tokenizer): if str(val) in PAD_TOKENS: skipped_token_ids.append(key) return torch.IntTensor(list(set(skipped_token_ids))) + + +# Taken and modified from torchtitan +# https://github.com/pytorch/torchtitan/blob/main/torchtitan/parallelisms/parallelize_llama.py +def parallelize(model, device_mesh: DeviceMesh): + """Apply parallelisms and activation checkpointing to the model. + NOTE: The passed-in model preferably should be on meta device. Otherwise, + the model must fit on GPU or CPU memory. + """ + + dp_mesh = device_mesh["data_parallel"] + tp_mesh = device_mesh["tensor_parallel"] + + if tp_mesh.size() > 1: + # 1. Parallelize the first embedding and the last linear proj layer + # 2. Parallelize the root norm layer over the sequence dim + # 3. Shard the first transformer block's inputs + + # Parallelize the first embedding and the last linear out projection + plan = { + "tok_embeddings": RowwiseParallel(input_layouts=Replicate()), + "output": ColwiseParallel(input_layouts=Shard(1), output_layouts=Replicate()), + "norm": SequenceParallel(), + "layers.0": PrepareModuleInput( + input_layouts=(Replicate(), None), + desired_input_layouts=(Shard(1), None), + use_local_output=True, + ), + } + model = parallelize_module(model, tp_mesh, plan) + + # Parallelize each transformer block + for transformer_block in model.layers.values(): + plan = { + "attention": PrepareModuleInput( + input_layouts=(Shard(1), None), + desired_input_layouts=(Replicate(), None), + ), + "attention.wq": ColwiseParallel(), + "attention.wk": ColwiseParallel(), + "attention.wv": ColwiseParallel(), + "attention.wo": RowwiseParallel(output_layouts=Shard(1)), + "attention_norm": SequenceParallel(), + "feed_forward": PrepareModuleInput( + input_layouts=(Shard(1),), + desired_input_layouts=(Replicate(),), + ), + "feed_forward.w1": ColwiseParallel(), + "feed_forward.w2": RowwiseParallel(output_layouts=Shard(1)), + "feed_forward.w3": ColwiseParallel(), + "ffn_norm": SequenceParallel(), + } + + # Adjust attention module to use the local number of heads + attn_layer = transformer_block.attention + attn_layer.n_heads = attn_layer.n_heads // tp_mesh.size() + attn_layer.n_kv_heads = attn_layer.n_kv_heads // tp_mesh.size() + + # Apply the plan for the current transformer block + parallelize_module(transformer_block, tp_mesh, plan) + + if dp_mesh.size() > 1: + assert dp_mesh.ndim == 1 # Hybrid-sharding not supported + + # NOTE: Currently, the user is required to manually handle precision settings such as the `mp_policy` here + # because the model parallel strategy does not respect all settings of `Fabric(precision=...)` at the moment. + mp_policy = MixedPrecisionPolicy(param_dtype=torch.bfloat16, reduce_dtype=torch.float32) + + fsdp_config = {"mesh": dp_mesh, "mp_policy": mp_policy} + for layer_id, transformer_block in model.layers.items(): + # Apply activation checkpointing + transformer_block = checkpoint_wrapper(transformer_block) + # As an optimization, do not reshard after forward for the last + # transformer block since FSDP would prefetch it immediately + reshard_after_forward = int(layer_id) < len(model.layers) - 1 + fully_shard( + transformer_block, + **fsdp_config, + reshard_after_forward=reshard_after_forward, + ) + model.layers[layer_id] = transformer_block + model = fully_shard(model, **fsdp_config) + + return model diff --git a/tests/collections/llm/hf/sft_nemorun.py b/tests/collections/llm/hf/sft_nemorun.py index bfb0db8eb1b7..2c0b01a6d244 100644 --- a/tests/collections/llm/hf/sft_nemorun.py +++ b/tests/collections/llm/hf/sft_nemorun.py @@ -14,12 +14,13 @@ import nemo_run as run +import nemo.lightning as nl from nemo.collections import llm from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer from nemo.collections.llm.gpt.data.hf_dataset import SquadHFDataModule -DATA_PATH = '/home/TestData/lite/hf_cache/squad/' +DATA_PATH = '/lustre/fsw/coreai_dlalgo_llm/boxiangw/squad' import torch from torch.distributed._composable.fsdp import MixedPrecisionPolicy @@ -147,9 +148,9 @@ def local_executor_torchrun(nodes: int = 1, devices: int = 2) -> run.LocalExecut parser = argparse.ArgumentParser() parser.add_argument('--model', default='meta-llama/Llama-3.2-1B') parser.add_argument('--strategy', type=str, default='auto', choices=['auto', 'ddp', 'fsdp']) - parser.add_argument('--devices', default=1) + parser.add_argument('--devices', default=8) parser.add_argument('--accelerator', default='gpu', choices=['gpu']) - parser.add_argument('--max-steps', type=int, default=100) + parser.add_argument('--max-steps', type=int, default=1000) args = parser.parse_args() recipe = llm.hf_auto_model_for_causal_lm.finetune_recipe( @@ -171,7 +172,7 @@ def local_executor_torchrun(nodes: int = 1, devices: int = 2) -> run.LocalExecut tokenizer=run.Config(AutoTokenizer, pretrained_model_name=args.model), ) - recipe.trainer.strategy = run.Config(nl.FSDP2Strategy, data_parallel_size=8, tensor_parallel_size=1) - recipe.trainer.plugins = None + recipe.trainer.strategy = run.Config(nl.FSDP2Strategy, data_parallel_size=4, tensor_parallel_size=2) + recipe.trainer.plugins=None executor = local_executor_torchrun(nodes=recipe.trainer.num_nodes, devices=recipe.trainer.devices) run.run(recipe, executor=executor) From d53ab1506568c2b07c17198cb22014de46aea4d4 Mon Sep 17 00:00:00 2001 From: BoxiangW Date: Tue, 31 Dec 2024 21:20:53 +0000 Subject: [PATCH 098/128] Apply isort and black reformatting Signed-off-by: BoxiangW --- .../llm/gpt/model/hf_auto_model_for_causal_lm.py | 6 +++--- .../speechlm/models/hf_auto_model_for_speech_seq2seq.py | 2 +- tests/collections/llm/hf/sft_nemorun.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py b/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py index 5f1ec7904c3e..4152c0bad863 100644 --- a/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py +++ b/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py @@ -102,13 +102,13 @@ def configure_model(self): self.model = AutoModelForCausalLM.from_config( config, torch_dtype=dtype, trust_remote_code=self.trust_remote_code ) - + # Apply FSDP2 and TP to the model parallelize(self.model, device_mesh=self.device_mesh) if self.model_accelerator is not None: self.model_accelerator(self.model) - + print(self.model) self.model.train() @@ -268,7 +268,7 @@ def parallelize(model, device_mesh: DeviceMesh): ) model.model.layers[layer_id] = transformer_block model = fully_shard(model, **fsdp_config) - + print("here") return model diff --git a/nemo/collections/speechlm/models/hf_auto_model_for_speech_seq2seq.py b/nemo/collections/speechlm/models/hf_auto_model_for_speech_seq2seq.py index 35940ed72a18..5c43c93bf54c 100644 --- a/nemo/collections/speechlm/models/hf_auto_model_for_speech_seq2seq.py +++ b/nemo/collections/speechlm/models/hf_auto_model_for_speech_seq2seq.py @@ -105,7 +105,7 @@ def configure_model(self, train=True): config = AutoConfig.from_pretrained(self.model_name, trust_remote_code=self.trust_remote_code) self.model = AutoModelForSpeechSeq2Seq.from_config(config, trust_remote_code=self.trust_remote_code) - + # Apply FSDP2 and TP to the model parallelize(self.model, device_mesh=self.device_mesh) diff --git a/tests/collections/llm/hf/sft_nemorun.py b/tests/collections/llm/hf/sft_nemorun.py index 2c0b01a6d244..cb33d52871af 100644 --- a/tests/collections/llm/hf/sft_nemorun.py +++ b/tests/collections/llm/hf/sft_nemorun.py @@ -173,6 +173,6 @@ def local_executor_torchrun(nodes: int = 1, devices: int = 2) -> run.LocalExecut ) recipe.trainer.strategy = run.Config(nl.FSDP2Strategy, data_parallel_size=4, tensor_parallel_size=2) - recipe.trainer.plugins=None + recipe.trainer.plugins = None executor = local_executor_torchrun(nodes=recipe.trainer.num_nodes, devices=recipe.trainer.devices) run.run(recipe, executor=executor) From a1f08a2e2634447feccbd1e4ea7ee8f50604609a Mon Sep 17 00:00:00 2001 From: Boxiang Wang Date: Thu, 2 Jan 2025 14:51:08 -0800 Subject: [PATCH 099/128] Add test --- .../gpt/model/hf_auto_model_for_causal_lm.py | 62 ++++++------ tests/collections/llm/hf/sft_nemorun.py | 94 +------------------ .../pytorch/strategies/test_fsdp2_strategy.py | 42 +++++++++ 3 files changed, 75 insertions(+), 123 deletions(-) create mode 100644 tests/lightning/pytorch/strategies/test_fsdp2_strategy.py diff --git a/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py b/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py index 4152c0bad863..cdf22c048904 100644 --- a/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py +++ b/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py @@ -108,9 +108,7 @@ def configure_model(self): if self.model_accelerator is not None: self.model_accelerator(self.model) - - print(self.model) - + self.model.train() def forward(self, batch): @@ -194,8 +192,7 @@ def parallelize(model, device_mesh: DeviceMesh): dp_mesh = device_mesh["data_parallel"] tp_mesh = device_mesh["tensor_parallel"] - print(dp_mesh) - print(tp_mesh) + print(model) if tp_mesh.size() > 1: # 1. Parallelize the first embedding and the last linear proj layer @@ -204,39 +201,43 @@ def parallelize(model, device_mesh: DeviceMesh): # Parallelize the first embedding and the last linear out projection plan = { - "tok_embeddings": RowwiseParallel(input_layouts=Replicate()), - "output": ColwiseParallel(input_layouts=Shard(1), output_layouts=Replicate()), - "norm": SequenceParallel(), - "layers.0": PrepareModuleInput( - input_layouts=(Replicate(), None), - desired_input_layouts=(Shard(1), None), + "model.embed_tokens": RowwiseParallel(input_layouts=Replicate()), + "lm_head": ColwiseParallel( + input_layouts=Shard(1), + # Optional: Shard the output along the class dimension to compute the loss in parallel. + # See `loss_parallel` in `train.py` + output_layouts=Shard(-1), + use_local_output=False, + ), + "model.norm": SequenceParallel(), + "model.layers.0": PrepareModuleInput( + input_layouts=(Replicate()), + desired_input_layouts=(Shard(1)), use_local_output=True, ), } model = parallelize_module(model, tp_mesh, plan) - print(model.model.layers) - # Parallelize each transformer block for transformer_block in model.model.layers: plan = { - "attention": PrepareModuleInput( - input_layouts=(Shard(1), None), - desired_input_layouts=(Replicate(), None), + "self_attn": PrepareModuleInput( + input_layouts=(Shard(1)), + desired_input_layouts=(Replicate()), ), - "attention.wq": ColwiseParallel(), - "attention.wk": ColwiseParallel(), - "attention.wv": ColwiseParallel(), - "attention.wo": RowwiseParallel(output_layouts=Shard(1)), - "attention_norm": SequenceParallel(), - "feed_forward": PrepareModuleInput( - input_layouts=(Shard(1),), - desired_input_layouts=(Replicate(),), + "self_attn.q_proj": ColwiseParallel(), + "self_attn.k_proj": ColwiseParallel(), + "self_attn.v_proj": ColwiseParallel(), + "self_attn.o_proj": RowwiseParallel(output_layouts=Shard(1)), + "mlp": PrepareModuleInput( + input_layouts=Shard(1), + desired_input_layouts=Replicate(), ), - "feed_forward.w1": ColwiseParallel(), - "feed_forward.w2": RowwiseParallel(output_layouts=Shard(1)), - "feed_forward.w3": ColwiseParallel(), - "ffn_norm": SequenceParallel(), + "mlp.gate_proj": ColwiseParallel(), + "mlp.up_proj": ColwiseParallel(), + "mlp.down_proj": RowwiseParallel(output_layouts=Shard(1)), + "input_layernorm": SequenceParallel(), + "post_attention_layernorm": SequenceParallel(), } # Adjust attention module to use the local number of heads @@ -257,7 +258,7 @@ def parallelize(model, device_mesh: DeviceMesh): fsdp_config = {"mesh": dp_mesh, "mp_policy": mp_policy} for layer_id, transformer_block in enumerate(model.model.layers): # Apply activation checkpointing - transformer_block = checkpoint_wrapper(transformer_block) + # transformer_block = checkpoint_wrapper(transformer_block) # As an optimization, do not reshard after forward for the last # transformer block since FSDP would prefetch it immediately reshard_after_forward = int(layer_id) < len(model.model.layers) - 1 @@ -268,7 +269,6 @@ def parallelize(model, device_mesh: DeviceMesh): ) model.model.layers[layer_id] = transformer_block model = fully_shard(model, **fsdp_config) - - print("here") + return model diff --git a/tests/collections/llm/hf/sft_nemorun.py b/tests/collections/llm/hf/sft_nemorun.py index cb33d52871af..ec3118a74480 100644 --- a/tests/collections/llm/hf/sft_nemorun.py +++ b/tests/collections/llm/hf/sft_nemorun.py @@ -37,96 +37,6 @@ ) -def parallelize(model, device_mesh: DeviceMesh): - """Apply parallelisms and activation checkpointing to the model. - - NOTE: The passed-in model preferably should be on meta device. Otherwise, - the model must fit on GPU or CPU memory. - - """ - - dp_mesh = device_mesh["data_parallel"] - tp_mesh = device_mesh["tensor_parallel"] - - if tp_mesh.size() > 1: - # 1. Parallelize the first embedding and the last linear proj layer - # 2. Parallelize the root norm layer over the sequence dim - # 3. Shard the first transformer block's inputs - - # Parallelize the first embedding and the last linear out projection - plan = { - "tok_embeddings": RowwiseParallel(input_layouts=Replicate()), - "output": ColwiseParallel( - input_layouts=Shard(1), - # Optional: Shard the output along the class dimension to compute the loss in parallel. - # See `loss_parallel` in `train.py` - output_layouts=Shard(-1), - use_local_output=False, - ), - "norm": SequenceParallel(), - "layers.0": PrepareModuleInput( - input_layouts=(Replicate(), None), - desired_input_layouts=(Shard(1), None), - use_local_output=True, - ), - } - model = parallelize_module(model, tp_mesh, plan) - - # Parallelize each transformer block - for transformer_block in model.layers.values(): - plan = { - "attention": PrepareModuleInput( - input_layouts=(Shard(1), None), - desired_input_layouts=(Replicate(), None), - ), - "attention.wq": ColwiseParallel(), - "attention.wk": ColwiseParallel(), - "attention.wv": ColwiseParallel(), - "attention.wo": RowwiseParallel(output_layouts=Shard(1)), - "attention_norm": SequenceParallel(), - "feed_forward": PrepareModuleInput( - input_layouts=(Shard(1),), - desired_input_layouts=(Replicate(),), - ), - "feed_forward.w1": ColwiseParallel(), - "feed_forward.w2": RowwiseParallel(output_layouts=Shard(1)), - "feed_forward.w3": ColwiseParallel(), - "ffn_norm": SequenceParallel(), - } - - # Adjust attention module to use the local number of heads - attn_layer = transformer_block.attention - attn_layer.n_heads = attn_layer.n_heads // tp_mesh.size() - attn_layer.n_kv_heads = attn_layer.n_kv_heads // tp_mesh.size() - - # Apply the plan for the current transformer block - parallelize_module(transformer_block, tp_mesh, plan) - - if dp_mesh.size() > 1: - assert dp_mesh.ndim == 1 # Hybrid-sharding not supported - - # NOTE: Currently, the user is required to manually handle precision settings such as the `mp_policy` here - # because the model parallel strategy does not respect all settings of `Fabric(precision=...)` at the moment. - mp_policy = MixedPrecisionPolicy(param_dtype=torch.bfloat16, reduce_dtype=torch.float32) - - fsdp_config = {"mesh": dp_mesh, "mp_policy": mp_policy} - for layer_id, transformer_block in model.layers.items(): - # Apply activation checkpointing - transformer_block = checkpoint_wrapper(transformer_block) - # As an optimization, do not reshard after forward for the last - # transformer block since FSDP would prefetch it immediately - reshard_after_forward = int(layer_id) < len(model.layers) - 1 - fully_shard( - transformer_block, - **fsdp_config, - reshard_after_forward=reshard_after_forward, - ) - model.layers[layer_id] = transformer_block - model = fully_shard(model, **fsdp_config) - - return model - - def local_executor_torchrun(nodes: int = 1, devices: int = 2) -> run.LocalExecutor: # Env vars for jobs are configured here env_vars = { @@ -146,9 +56,9 @@ def local_executor_torchrun(nodes: int = 1, devices: int = 2) -> run.LocalExecut import argparse parser = argparse.ArgumentParser() - parser.add_argument('--model', default='meta-llama/Llama-3.2-1B') + parser.add_argument('--model', default='meta-llama/Llama-3.1-8B') parser.add_argument('--strategy', type=str, default='auto', choices=['auto', 'ddp', 'fsdp']) - parser.add_argument('--devices', default=8) + parser.add_argument('--devices', default=2) parser.add_argument('--accelerator', default='gpu', choices=['gpu']) parser.add_argument('--max-steps', type=int, default=1000) args = parser.parse_args() diff --git a/tests/lightning/pytorch/strategies/test_fsdp2_strategy.py b/tests/lightning/pytorch/strategies/test_fsdp2_strategy.py new file mode 100644 index 000000000000..e8f1c9e54314 --- /dev/null +++ b/tests/lightning/pytorch/strategies/test_fsdp2_strategy.py @@ -0,0 +1,42 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest.mock import patch + +from nemo.lightning.pytorch.strategies import FSDP2Strategy + + +class TestMegatronStrategy: + @patch('nemo.lightning.pytorch.strategies.megatron_strategy.create_checkpoint_io') + def test_checkpoint_io(self, mock_create_checkpoint_io): + class Dummy: ... + + mock_create_checkpoint_io.side_effect = lambda *args, **kwargs: Dummy() + strategy = FSDP2Strategy() + + first_io = strategy.checkpoint_io + mock_create_checkpoint_io.assert_called_once() + + assert first_io == strategy.checkpoint_io + + new_io = object() + strategy.checkpoint_io = new_io + assert new_io == strategy.checkpoint_io + + strategy2 = FSDP2Strategy() + second_io = strategy2.checkpoint_io + mock_create_checkpoint_io.assert_called() + + assert first_io != second_io + assert second_io == strategy2.checkpoint_io From ae1104e5718d53afa59e7a67f77c2e957c0bf788 Mon Sep 17 00:00:00 2001 From: BoxiangW Date: Thu, 2 Jan 2025 22:52:28 +0000 Subject: [PATCH 100/128] Apply isort and black reformatting Signed-off-by: BoxiangW --- nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py b/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py index cdf22c048904..7d30d31a3bee 100644 --- a/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py +++ b/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py @@ -108,7 +108,7 @@ def configure_model(self): if self.model_accelerator is not None: self.model_accelerator(self.model) - + self.model.train() def forward(self, batch): @@ -269,6 +269,5 @@ def parallelize(model, device_mesh: DeviceMesh): ) model.model.layers[layer_id] = transformer_block model = fully_shard(model, **fsdp_config) - return model From a31f36757101f13b251f9f075812af1543a9e968 Mon Sep 17 00:00:00 2001 From: Boxiang Wang Date: Thu, 2 Jan 2025 16:12:00 -0800 Subject: [PATCH 101/128] Add fsdp2 support on hf_auto_model_for_causal_lm --- .../gpt/model/hf_auto_model_for_causal_lm.py | 56 +------------------ tests/collections/llm/hf/sft_nemorun.py | 7 +-- 2 files changed, 4 insertions(+), 59 deletions(-) diff --git a/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py b/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py index 7d30d31a3bee..ef2a0683415b 100644 --- a/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py +++ b/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py @@ -192,61 +192,7 @@ def parallelize(model, device_mesh: DeviceMesh): dp_mesh = device_mesh["data_parallel"] tp_mesh = device_mesh["tensor_parallel"] - print(model) - - if tp_mesh.size() > 1: - # 1. Parallelize the first embedding and the last linear proj layer - # 2. Parallelize the root norm layer over the sequence dim - # 3. Shard the first transformer block's inputs - - # Parallelize the first embedding and the last linear out projection - plan = { - "model.embed_tokens": RowwiseParallel(input_layouts=Replicate()), - "lm_head": ColwiseParallel( - input_layouts=Shard(1), - # Optional: Shard the output along the class dimension to compute the loss in parallel. - # See `loss_parallel` in `train.py` - output_layouts=Shard(-1), - use_local_output=False, - ), - "model.norm": SequenceParallel(), - "model.layers.0": PrepareModuleInput( - input_layouts=(Replicate()), - desired_input_layouts=(Shard(1)), - use_local_output=True, - ), - } - model = parallelize_module(model, tp_mesh, plan) - - # Parallelize each transformer block - for transformer_block in model.model.layers: - plan = { - "self_attn": PrepareModuleInput( - input_layouts=(Shard(1)), - desired_input_layouts=(Replicate()), - ), - "self_attn.q_proj": ColwiseParallel(), - "self_attn.k_proj": ColwiseParallel(), - "self_attn.v_proj": ColwiseParallel(), - "self_attn.o_proj": RowwiseParallel(output_layouts=Shard(1)), - "mlp": PrepareModuleInput( - input_layouts=Shard(1), - desired_input_layouts=Replicate(), - ), - "mlp.gate_proj": ColwiseParallel(), - "mlp.up_proj": ColwiseParallel(), - "mlp.down_proj": RowwiseParallel(output_layouts=Shard(1)), - "input_layernorm": SequenceParallel(), - "post_attention_layernorm": SequenceParallel(), - } - - # Adjust attention module to use the local number of heads - attn_layer = transformer_block.self_attn - attn_layer.num_heads = attn_layer.num_heads // tp_mesh.size() - attn_layer.num_key_value_heads = attn_layer.num_key_value_heads // tp_mesh.size() - - # Apply the plan for the current transformer block - parallelize_module(transformer_block, tp_mesh, plan) + assert tp_mesh.size() == 1, "Tensor parallelism is not supported yet in this model." if dp_mesh.size() > 1: assert dp_mesh.ndim == 1 # Hybrid-sharding not supported diff --git a/tests/collections/llm/hf/sft_nemorun.py b/tests/collections/llm/hf/sft_nemorun.py index ec3118a74480..168a162b4db8 100644 --- a/tests/collections/llm/hf/sft_nemorun.py +++ b/tests/collections/llm/hf/sft_nemorun.py @@ -56,9 +56,8 @@ def local_executor_torchrun(nodes: int = 1, devices: int = 2) -> run.LocalExecut import argparse parser = argparse.ArgumentParser() - parser.add_argument('--model', default='meta-llama/Llama-3.1-8B') - parser.add_argument('--strategy', type=str, default='auto', choices=['auto', 'ddp', 'fsdp']) - parser.add_argument('--devices', default=2) + parser.add_argument('--model', default='meta-llama/Llama-3.2-1B') + parser.add_argument('--devices', default=8) parser.add_argument('--accelerator', default='gpu', choices=['gpu']) parser.add_argument('--max-steps', type=int, default=1000) args = parser.parse_args() @@ -82,7 +81,7 @@ def local_executor_torchrun(nodes: int = 1, devices: int = 2) -> run.LocalExecut tokenizer=run.Config(AutoTokenizer, pretrained_model_name=args.model), ) - recipe.trainer.strategy = run.Config(nl.FSDP2Strategy, data_parallel_size=4, tensor_parallel_size=2) + recipe.trainer.strategy = run.Config(nl.FSDP2Strategy, data_parallel_size=8, tensor_parallel_size=1) recipe.trainer.plugins = None executor = local_executor_torchrun(nodes=recipe.trainer.num_nodes, devices=recipe.trainer.devices) run.run(recipe, executor=executor) From b5014b8793fd722cd50899149749dd4100b7ad69 Mon Sep 17 00:00:00 2001 From: Boxiang Wang Date: Thu, 2 Jan 2025 17:49:25 -0800 Subject: [PATCH 102/128] REvert model hooks for fsdp2 sharding --- .../hf_auto_model_for_speech_seq2seq.py | 99 ----------------- .../hf_auto_model_for_image_text_to_text.py | 100 ------------------ 2 files changed, 199 deletions(-) diff --git a/nemo/collections/speechlm/models/hf_auto_model_for_speech_seq2seq.py b/nemo/collections/speechlm/models/hf_auto_model_for_speech_seq2seq.py index 5c43c93bf54c..a039edc66a39 100644 --- a/nemo/collections/speechlm/models/hf_auto_model_for_speech_seq2seq.py +++ b/nemo/collections/speechlm/models/hf_auto_model_for_speech_seq2seq.py @@ -15,18 +15,6 @@ import lightning.pytorch as pl import torch import torch.nn.functional as F -from torch.distributed._composable.fsdp import MixedPrecisionPolicy -from torch.distributed._composable.fsdp.fully_shard import fully_shard -from torch.distributed._tensor import Replicate, Shard -from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import checkpoint_wrapper -from torch.distributed.device_mesh import DeviceMesh -from torch.distributed.tensor.parallel import ( - ColwiseParallel, - PrepareModuleInput, - RowwiseParallel, - SequenceParallel, - parallelize_module, -) from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer @@ -106,9 +94,6 @@ def configure_model(self, train=True): config = AutoConfig.from_pretrained(self.model_name, trust_remote_code=self.trust_remote_code) self.model = AutoModelForSpeechSeq2Seq.from_config(config, trust_remote_code=self.trust_remote_code) - # Apply FSDP2 and TP to the model - parallelize(self.model, device_mesh=self.device_mesh) - if train: self.model.train() @@ -148,87 +133,3 @@ def save_pretrained(self, path): self._processor.save_pretrained(path) else: logging.warning("A processor wasn't created before to save.") - - -# Taken and modified from torchtitan -# https://github.com/pytorch/torchtitan/blob/main/torchtitan/parallelisms/parallelize_llama.py -def parallelize(model, device_mesh: DeviceMesh): - """Apply parallelisms and activation checkpointing to the model. - NOTE: The passed-in model preferably should be on meta device. Otherwise, - the model must fit on GPU or CPU memory. - """ - - dp_mesh = device_mesh["data_parallel"] - tp_mesh = device_mesh["tensor_parallel"] - - if tp_mesh.size() > 1: - # 1. Parallelize the first embedding and the last linear proj layer - # 2. Parallelize the root norm layer over the sequence dim - # 3. Shard the first transformer block's inputs - - # Parallelize the first embedding and the last linear out projection - plan = { - "tok_embeddings": RowwiseParallel(input_layouts=Replicate()), - "output": ColwiseParallel(input_layouts=Shard(1), output_layouts=Replicate()), - "norm": SequenceParallel(), - "layers.0": PrepareModuleInput( - input_layouts=(Replicate(), None), - desired_input_layouts=(Shard(1), None), - use_local_output=True, - ), - } - model = parallelize_module(model, tp_mesh, plan) - - # Parallelize each transformer block - for transformer_block in model.layers.values(): - plan = { - "attention": PrepareModuleInput( - input_layouts=(Shard(1), None), - desired_input_layouts=(Replicate(), None), - ), - "attention.wq": ColwiseParallel(), - "attention.wk": ColwiseParallel(), - "attention.wv": ColwiseParallel(), - "attention.wo": RowwiseParallel(output_layouts=Shard(1)), - "attention_norm": SequenceParallel(), - "feed_forward": PrepareModuleInput( - input_layouts=(Shard(1),), - desired_input_layouts=(Replicate(),), - ), - "feed_forward.w1": ColwiseParallel(), - "feed_forward.w2": RowwiseParallel(output_layouts=Shard(1)), - "feed_forward.w3": ColwiseParallel(), - "ffn_norm": SequenceParallel(), - } - - # Adjust attention module to use the local number of heads - attn_layer = transformer_block.attention - attn_layer.n_heads = attn_layer.n_heads // tp_mesh.size() - attn_layer.n_kv_heads = attn_layer.n_kv_heads // tp_mesh.size() - - # Apply the plan for the current transformer block - parallelize_module(transformer_block, tp_mesh, plan) - - if dp_mesh.size() > 1: - assert dp_mesh.ndim == 1 # Hybrid-sharding not supported - - # NOTE: Currently, the user is required to manually handle precision settings such as the `mp_policy` here - # because the model parallel strategy does not respect all settings of `Fabric(precision=...)` at the moment. - mp_policy = MixedPrecisionPolicy(param_dtype=torch.bfloat16, reduce_dtype=torch.float32) - - fsdp_config = {"mesh": dp_mesh, "mp_policy": mp_policy} - for layer_id, transformer_block in model.layers.items(): - # Apply activation checkpointing - transformer_block = checkpoint_wrapper(transformer_block) - # As an optimization, do not reshard after forward for the last - # transformer block since FSDP would prefetch it immediately - reshard_after_forward = int(layer_id) < len(model.layers) - 1 - fully_shard( - transformer_block, - **fsdp_config, - reshard_after_forward=reshard_after_forward, - ) - model.layers[layer_id] = transformer_block - model = fully_shard(model, **fsdp_config) - - return model diff --git a/nemo/collections/vlm/hf/model/hf_auto_model_for_image_text_to_text.py b/nemo/collections/vlm/hf/model/hf_auto_model_for_image_text_to_text.py index 900354258ab8..33ad04970d35 100644 --- a/nemo/collections/vlm/hf/model/hf_auto_model_for_image_text_to_text.py +++ b/nemo/collections/vlm/hf/model/hf_auto_model_for_image_text_to_text.py @@ -15,18 +15,6 @@ import lightning.pytorch as pl import torch import torch.nn.functional as F -from torch.distributed._composable.fsdp import MixedPrecisionPolicy -from torch.distributed._composable.fsdp.fully_shard import fully_shard -from torch.distributed._tensor import Replicate, Shard -from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import checkpoint_wrapper -from torch.distributed.device_mesh import DeviceMesh -from torch.distributed.tensor.parallel import ( - ColwiseParallel, - PrepareModuleInput, - RowwiseParallel, - SequenceParallel, - parallelize_module, -) from transformers import AutoConfig, AutoModelForImageTextToText, AutoProcessor from nemo.collections.llm import fn @@ -107,10 +95,6 @@ def configure_model(self): self.model = AutoModelForImageTextToText.from_config( config, torch_dtype=dtype, trust_remote_code=self.trust_remote_code ) - - # Apply FSDP2 and TP to the model - parallelize(self.model, device_mesh=self.device_mesh) - self.model.train() def forward(self, batch): @@ -205,87 +189,3 @@ def extract_skipped_token_ids(tokenizer): if str(val) in PAD_TOKENS: skipped_token_ids.append(key) return torch.IntTensor(list(set(skipped_token_ids))) - - -# Taken and modified from torchtitan -# https://github.com/pytorch/torchtitan/blob/main/torchtitan/parallelisms/parallelize_llama.py -def parallelize(model, device_mesh: DeviceMesh): - """Apply parallelisms and activation checkpointing to the model. - NOTE: The passed-in model preferably should be on meta device. Otherwise, - the model must fit on GPU or CPU memory. - """ - - dp_mesh = device_mesh["data_parallel"] - tp_mesh = device_mesh["tensor_parallel"] - - if tp_mesh.size() > 1: - # 1. Parallelize the first embedding and the last linear proj layer - # 2. Parallelize the root norm layer over the sequence dim - # 3. Shard the first transformer block's inputs - - # Parallelize the first embedding and the last linear out projection - plan = { - "tok_embeddings": RowwiseParallel(input_layouts=Replicate()), - "output": ColwiseParallel(input_layouts=Shard(1), output_layouts=Replicate()), - "norm": SequenceParallel(), - "layers.0": PrepareModuleInput( - input_layouts=(Replicate(), None), - desired_input_layouts=(Shard(1), None), - use_local_output=True, - ), - } - model = parallelize_module(model, tp_mesh, plan) - - # Parallelize each transformer block - for transformer_block in model.layers.values(): - plan = { - "attention": PrepareModuleInput( - input_layouts=(Shard(1), None), - desired_input_layouts=(Replicate(), None), - ), - "attention.wq": ColwiseParallel(), - "attention.wk": ColwiseParallel(), - "attention.wv": ColwiseParallel(), - "attention.wo": RowwiseParallel(output_layouts=Shard(1)), - "attention_norm": SequenceParallel(), - "feed_forward": PrepareModuleInput( - input_layouts=(Shard(1),), - desired_input_layouts=(Replicate(),), - ), - "feed_forward.w1": ColwiseParallel(), - "feed_forward.w2": RowwiseParallel(output_layouts=Shard(1)), - "feed_forward.w3": ColwiseParallel(), - "ffn_norm": SequenceParallel(), - } - - # Adjust attention module to use the local number of heads - attn_layer = transformer_block.attention - attn_layer.n_heads = attn_layer.n_heads // tp_mesh.size() - attn_layer.n_kv_heads = attn_layer.n_kv_heads // tp_mesh.size() - - # Apply the plan for the current transformer block - parallelize_module(transformer_block, tp_mesh, plan) - - if dp_mesh.size() > 1: - assert dp_mesh.ndim == 1 # Hybrid-sharding not supported - - # NOTE: Currently, the user is required to manually handle precision settings such as the `mp_policy` here - # because the model parallel strategy does not respect all settings of `Fabric(precision=...)` at the moment. - mp_policy = MixedPrecisionPolicy(param_dtype=torch.bfloat16, reduce_dtype=torch.float32) - - fsdp_config = {"mesh": dp_mesh, "mp_policy": mp_policy} - for layer_id, transformer_block in model.layers.items(): - # Apply activation checkpointing - transformer_block = checkpoint_wrapper(transformer_block) - # As an optimization, do not reshard after forward for the last - # transformer block since FSDP would prefetch it immediately - reshard_after_forward = int(layer_id) < len(model.layers) - 1 - fully_shard( - transformer_block, - **fsdp_config, - reshard_after_forward=reshard_after_forward, - ) - model.layers[layer_id] = transformer_block - model = fully_shard(model, **fsdp_config) - - return model From 1d9257a7d985655cf50c401ddd03a458973d68e8 Mon Sep 17 00:00:00 2001 From: Boxiang Wang Date: Thu, 2 Jan 2025 17:51:44 -0800 Subject: [PATCH 103/128] REvert some test changes --- .../gpt/model/hf_auto_model_for_causal_lm.py | 9 -- .../pytorch/strategies/fsdp2_strategy.py | 1 - tests/collections/llm/hf/sft_nemorun.py | 16 +--- tests/collections/llm/hf/sft_nemorun_fsdp2.py | 87 +++++++++++++++++++ 4 files changed, 88 insertions(+), 25 deletions(-) create mode 100644 tests/collections/llm/hf/sft_nemorun_fsdp2.py diff --git a/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py b/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py index ef2a0683415b..961e10a866d3 100644 --- a/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py +++ b/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py @@ -17,16 +17,7 @@ import torch.nn.functional as F from torch.distributed._composable.fsdp import MixedPrecisionPolicy from torch.distributed._composable.fsdp.fully_shard import fully_shard -from torch.distributed._tensor import Replicate, Shard -from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import checkpoint_wrapper from torch.distributed.device_mesh import DeviceMesh -from torch.distributed.tensor.parallel import ( - ColwiseParallel, - PrepareModuleInput, - RowwiseParallel, - SequenceParallel, - parallelize_module, -) from transformers import AutoModelForCausalLM from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer diff --git a/nemo/lightning/pytorch/strategies/fsdp2_strategy.py b/nemo/lightning/pytorch/strategies/fsdp2_strategy.py index 7e33ffa37849..6fd4fa258563 100644 --- a/nemo/lightning/pytorch/strategies/fsdp2_strategy.py +++ b/nemo/lightning/pytorch/strategies/fsdp2_strategy.py @@ -25,7 +25,6 @@ from lightning.pytorch.strategies.model_parallel import ModelParallelStrategy as PLModelParallelStrategy from lightning.pytorch.trainer.states import TrainerFn from lightning.pytorch.utilities.types import STEP_OUTPUT -from megatron.core.transformer.transformer_layer import TransformerLayer from torch.distributed.checkpoint.state_dict import ( # get_state_dict, StateDictOptions, get_optimizer_state_dict, diff --git a/tests/collections/llm/hf/sft_nemorun.py b/tests/collections/llm/hf/sft_nemorun.py index 168a162b4db8..a1f60b5b70a0 100644 --- a/tests/collections/llm/hf/sft_nemorun.py +++ b/tests/collections/llm/hf/sft_nemorun.py @@ -20,21 +20,7 @@ from nemo.collections.llm.gpt.data.hf_dataset import SquadHFDataModule -DATA_PATH = '/lustre/fsw/coreai_dlalgo_llm/boxiangw/squad' - -import torch -from torch.distributed._composable.fsdp import MixedPrecisionPolicy -from torch.distributed._composable.fsdp.fully_shard import fully_shard -from torch.distributed._tensor import Replicate, Shard -from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import checkpoint_wrapper -from torch.distributed.device_mesh import DeviceMesh -from torch.distributed.tensor.parallel import ( - ColwiseParallel, - PrepareModuleInput, - RowwiseParallel, - SequenceParallel, - parallelize_module, -) +DATA_PATH = '/home/TestData/lite/hf_cache/squad/' def local_executor_torchrun(nodes: int = 1, devices: int = 2) -> run.LocalExecutor: diff --git a/tests/collections/llm/hf/sft_nemorun_fsdp2.py b/tests/collections/llm/hf/sft_nemorun_fsdp2.py new file mode 100644 index 000000000000..6fb42e6f9001 --- /dev/null +++ b/tests/collections/llm/hf/sft_nemorun_fsdp2.py @@ -0,0 +1,87 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import nemo_run as run + +import nemo.lightning as nl +from nemo.collections import llm +from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer +from nemo.collections.llm.gpt.data.hf_dataset import SquadHFDataModule + + +DATA_PATH = '/home/TestData/lite/hf_cache/squad/' + +import torch +from torch.distributed._composable.fsdp import MixedPrecisionPolicy +from torch.distributed._composable.fsdp.fully_shard import fully_shard +from torch.distributed._tensor import Replicate, Shard +from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import checkpoint_wrapper +from torch.distributed.device_mesh import DeviceMesh +from torch.distributed.tensor.parallel import ( + ColwiseParallel, + PrepareModuleInput, + RowwiseParallel, + SequenceParallel, + parallelize_module, +) + + +def local_executor_torchrun(nodes: int = 1, devices: int = 2) -> run.LocalExecutor: + # Env vars for jobs are configured here + env_vars = { + "TORCH_NCCL_AVOID_RECORD_STREAMS": "1", + "NCCL_NVLS_ENABLE": "0", + "NVTE_DP_AMAX_REDUCE_INTERVAL": "0", + "NVTE_ASYNC_AMAX_REDUCTION": "1", + "NVTE_FUSED_ATTN": "0", + } + + executor = run.LocalExecutor(ntasks_per_node=devices, launcher="torchrun", env_vars=env_vars) + + return executor + + +if __name__ == '__main__': + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument('--model', default='meta-llama/Llama-3.2-1B') + parser.add_argument('--devices', default=2) + parser.add_argument('--accelerator', default='gpu', choices=['gpu']) + parser.add_argument('--max-steps', type=int, default=1000) + args = parser.parse_args() + + recipe = llm.hf_auto_model_for_causal_lm.finetune_recipe( + model_name=args.model, + name="sft", + num_nodes=1, + num_gpus_per_node=args.devices, + peft_scheme='none', + max_steps=args.max_steps, + ) + recipe.trainer.val_check_interval = 50 + + tokenizer = llm.HFAutoModelForCausalLM.configure_tokenizer(args.model) + recipe.data = run.Config( + SquadHFDataModule, + path_or_dataset=DATA_PATH, + split="train[:100]", + pad_token_id=tokenizer.tokenizer.eos_token_id, + tokenizer=run.Config(AutoTokenizer, pretrained_model_name=args.model), + ) + + recipe.trainer.strategy = run.Config(nl.FSDP2Strategy, data_parallel_size=1, tensor_parallel_size=2) + recipe.trainer.plugins = None + executor = local_executor_torchrun(nodes=recipe.trainer.num_nodes, devices=recipe.trainer.devices) + run.run(recipe, executor=executor) From 27ffb82644f538429da1e40b852128146b9792fb Mon Sep 17 00:00:00 2001 From: Boxiang Wang Date: Thu, 2 Jan 2025 17:52:44 -0800 Subject: [PATCH 104/128] REvert tesst --- tests/collections/llm/hf/sft_nemorun.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/tests/collections/llm/hf/sft_nemorun.py b/tests/collections/llm/hf/sft_nemorun.py index a1f60b5b70a0..406505db9107 100644 --- a/tests/collections/llm/hf/sft_nemorun.py +++ b/tests/collections/llm/hf/sft_nemorun.py @@ -14,7 +14,6 @@ import nemo_run as run -import nemo.lightning as nl from nemo.collections import llm from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer from nemo.collections.llm.gpt.data.hf_dataset import SquadHFDataModule @@ -43,9 +42,9 @@ def local_executor_torchrun(nodes: int = 1, devices: int = 2) -> run.LocalExecut parser = argparse.ArgumentParser() parser.add_argument('--model', default='meta-llama/Llama-3.2-1B') - parser.add_argument('--devices', default=8) - parser.add_argument('--accelerator', default='gpu', choices=['gpu']) - parser.add_argument('--max-steps', type=int, default=1000) + parser.add_argument('--strategy', type=str, default='auto', choices=['auto', 'ddp', 'fsdp']) + parser.add_argument('--devices', default=1) + parser.add_argument('--max-steps', type=int, default=100) args = parser.parse_args() recipe = llm.hf_auto_model_for_causal_lm.finetune_recipe( @@ -66,8 +65,5 @@ def local_executor_torchrun(nodes: int = 1, devices: int = 2) -> run.LocalExecut pad_token_id=tokenizer.tokenizer.eos_token_id, tokenizer=run.Config(AutoTokenizer, pretrained_model_name=args.model), ) - - recipe.trainer.strategy = run.Config(nl.FSDP2Strategy, data_parallel_size=8, tensor_parallel_size=1) - recipe.trainer.plugins = None executor = local_executor_torchrun(nodes=recipe.trainer.num_nodes, devices=recipe.trainer.devices) - run.run(recipe, executor=executor) + run.run(recipe, executor=executor) \ No newline at end of file From 346554d36d290a32d2d3c64cad0878d76baa1b6f Mon Sep 17 00:00:00 2001 From: Boxiang Wang Date: Thu, 2 Jan 2025 17:53:16 -0800 Subject: [PATCH 105/128] Add line --- tests/collections/llm/hf/sft_nemorun.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/collections/llm/hf/sft_nemorun.py b/tests/collections/llm/hf/sft_nemorun.py index 406505db9107..a0cf24341b00 100644 --- a/tests/collections/llm/hf/sft_nemorun.py +++ b/tests/collections/llm/hf/sft_nemorun.py @@ -66,4 +66,4 @@ def local_executor_torchrun(nodes: int = 1, devices: int = 2) -> run.LocalExecut tokenizer=run.Config(AutoTokenizer, pretrained_model_name=args.model), ) executor = local_executor_torchrun(nodes=recipe.trainer.num_nodes, devices=recipe.trainer.devices) - run.run(recipe, executor=executor) \ No newline at end of file + run.run(recipe, executor=executor) From 7cd8186eb4ea66955698d069ec576f5909ab5397 Mon Sep 17 00:00:00 2001 From: Boxiang Wang Date: Thu, 2 Jan 2025 22:14:29 -0800 Subject: [PATCH 106/128] Fix test --- tests/lightning/pytorch/strategies/test_fsdp2_strategy.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/lightning/pytorch/strategies/test_fsdp2_strategy.py b/tests/lightning/pytorch/strategies/test_fsdp2_strategy.py index e8f1c9e54314..9468f0761756 100644 --- a/tests/lightning/pytorch/strategies/test_fsdp2_strategy.py +++ b/tests/lightning/pytorch/strategies/test_fsdp2_strategy.py @@ -17,8 +17,8 @@ from nemo.lightning.pytorch.strategies import FSDP2Strategy -class TestMegatronStrategy: - @patch('nemo.lightning.pytorch.strategies.megatron_strategy.create_checkpoint_io') +class TestFSDP2Strategy: + @patch('nemo.lightning.pytorch.strategies.fsdp2_strategy.create_checkpoint_io') def test_checkpoint_io(self, mock_create_checkpoint_io): class Dummy: ... From 0a10868e316f329ecc368a39b240089089773d88 Mon Sep 17 00:00:00 2001 From: Boxiang Wang Date: Thu, 2 Jan 2025 22:28:16 -0800 Subject: [PATCH 107/128] fix test --- tests/collections/llm/hf/sft_nemorun_fsdp2.py | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/tests/collections/llm/hf/sft_nemorun_fsdp2.py b/tests/collections/llm/hf/sft_nemorun_fsdp2.py index 6fb42e6f9001..578e7b31219b 100644 --- a/tests/collections/llm/hf/sft_nemorun_fsdp2.py +++ b/tests/collections/llm/hf/sft_nemorun_fsdp2.py @@ -22,20 +22,6 @@ DATA_PATH = '/home/TestData/lite/hf_cache/squad/' -import torch -from torch.distributed._composable.fsdp import MixedPrecisionPolicy -from torch.distributed._composable.fsdp.fully_shard import fully_shard -from torch.distributed._tensor import Replicate, Shard -from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import checkpoint_wrapper -from torch.distributed.device_mesh import DeviceMesh -from torch.distributed.tensor.parallel import ( - ColwiseParallel, - PrepareModuleInput, - RowwiseParallel, - SequenceParallel, - parallelize_module, -) - def local_executor_torchrun(nodes: int = 1, devices: int = 2) -> run.LocalExecutor: # Env vars for jobs are configured here From 03110a96130fa304db1a7e33c229da10607803c9 Mon Sep 17 00:00:00 2001 From: Boxiang Wang Date: Fri, 3 Jan 2025 14:19:53 -0800 Subject: [PATCH 108/128] Fix test --- .../pytorch/strategies/test_fsdp2_strategy.py | 50 ++++++++++++------- 1 file changed, 33 insertions(+), 17 deletions(-) diff --git a/tests/lightning/pytorch/strategies/test_fsdp2_strategy.py b/tests/lightning/pytorch/strategies/test_fsdp2_strategy.py index 9468f0761756..11eda2a09ca7 100644 --- a/tests/lightning/pytorch/strategies/test_fsdp2_strategy.py +++ b/tests/lightning/pytorch/strategies/test_fsdp2_strategy.py @@ -16,27 +16,43 @@ from nemo.lightning.pytorch.strategies import FSDP2Strategy +def get_torch_version(): + """Get pytorch version from __version__; if not available use pip's. Use caching.""" -class TestFSDP2Strategy: - @patch('nemo.lightning.pytorch.strategies.fsdp2_strategy.create_checkpoint_io') - def test_checkpoint_io(self, mock_create_checkpoint_io): - class Dummy: ... + def get_torch_version_str(): + import torch - mock_create_checkpoint_io.side_effect = lambda *args, **kwargs: Dummy() - strategy = FSDP2Strategy() + if hasattr(torch, '__version__'): + return str(torch.__version__) + else: + return version("torch") - first_io = strategy.checkpoint_io - mock_create_checkpoint_io.assert_called_once() + global _torch_version + if _torch_version is None: + _torch_version = PkgVersion(get_torch_version_str()) + return _torch_version - assert first_io == strategy.checkpoint_io +if get_torch_version() >= PkgVersion("2.4"): + class TestFSDP2Strategy: + @patch('nemo.lightning.pytorch.strategies.fsdp2_strategy.create_checkpoint_io') + def test_checkpoint_io(self, mock_create_checkpoint_io): + class Dummy: ... - new_io = object() - strategy.checkpoint_io = new_io - assert new_io == strategy.checkpoint_io + mock_create_checkpoint_io.side_effect = lambda *args, **kwargs: Dummy() + strategy = FSDP2Strategy() - strategy2 = FSDP2Strategy() - second_io = strategy2.checkpoint_io - mock_create_checkpoint_io.assert_called() + first_io = strategy.checkpoint_io + mock_create_checkpoint_io.assert_called_once() - assert first_io != second_io - assert second_io == strategy2.checkpoint_io + assert first_io == strategy.checkpoint_io + + new_io = object() + strategy.checkpoint_io = new_io + assert new_io == strategy.checkpoint_io + + strategy2 = FSDP2Strategy() + second_io = strategy2.checkpoint_io + mock_create_checkpoint_io.assert_called() + + assert first_io != second_io + assert second_io == strategy2.checkpoint_io From bb52f1742fa0b166e08835c83e7318bbf839759b Mon Sep 17 00:00:00 2001 From: BoxiangW Date: Fri, 3 Jan 2025 22:20:50 +0000 Subject: [PATCH 109/128] Apply isort and black reformatting Signed-off-by: BoxiangW --- tests/lightning/pytorch/strategies/test_fsdp2_strategy.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/lightning/pytorch/strategies/test_fsdp2_strategy.py b/tests/lightning/pytorch/strategies/test_fsdp2_strategy.py index 11eda2a09ca7..6eaa77d10403 100644 --- a/tests/lightning/pytorch/strategies/test_fsdp2_strategy.py +++ b/tests/lightning/pytorch/strategies/test_fsdp2_strategy.py @@ -16,6 +16,7 @@ from nemo.lightning.pytorch.strategies import FSDP2Strategy + def get_torch_version(): """Get pytorch version from __version__; if not available use pip's. Use caching.""" @@ -32,7 +33,9 @@ def get_torch_version_str(): _torch_version = PkgVersion(get_torch_version_str()) return _torch_version + if get_torch_version() >= PkgVersion("2.4"): + class TestFSDP2Strategy: @patch('nemo.lightning.pytorch.strategies.fsdp2_strategy.create_checkpoint_io') def test_checkpoint_io(self, mock_create_checkpoint_io): From 648d8304598e1bf5c328d85fcfd26d233b332a83 Mon Sep 17 00:00:00 2001 From: Boxiang Wang Date: Fri, 3 Jan 2025 14:22:51 -0800 Subject: [PATCH 110/128] Add CI test --- .github/workflows/cicd-main.yml | 11 +++++++++++ tests/collections/llm/hf/sft_nemorun_fsdp2.py | 6 +++--- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/.github/workflows/cicd-main.yml b/.github/workflows/cicd-main.yml index e9fcc7276d6c..356385b9d2ea 100644 --- a/.github/workflows/cicd-main.yml +++ b/.github/workflows/cicd-main.yml @@ -3702,6 +3702,17 @@ jobs: TRANSFORMERS_OFFLINE=1 python tests/collections/llm/hf/sft_nemorun.py --model /home/TestData/nlp/hf_gemma/hf_gemma_2b --max-steps 10 --devices 2 --strategy ddp AFTER_SCRIPT: | rm -rf nemo_experiments + + L2_HF_Transformer_SFT_2gpu_nemorun_fsdp2: + needs: [ cicd-test-container-setup ] + uses: ./.github/workflows/_test_template.yml + if: contains(fromJSON(needs.cicd-test-container-setup.outputs.test_to_run), 'L2_HF_Transformer_SFT_2gpu_nemorun_fsdp2') || needs.cicd-test-container-setup.outputs.all == 'true' + with: + RUNNER: self-hosted-azure + SCRIPT: | + TRANSFORMERS_OFFLINE=1 python tests/collections/llm/hf/sft_nemorun_fsdp2.py --model /home/TestData/nlp/hf_gemma/hf_gemma_2b --max-steps 10 --devices 2 + AFTER_SCRIPT: | + rm -rf nemo_experiments L2_HF_Transformer_SFT: needs: [ cicd-test-container-setup ] diff --git a/tests/collections/llm/hf/sft_nemorun_fsdp2.py b/tests/collections/llm/hf/sft_nemorun_fsdp2.py index 578e7b31219b..962a8370b878 100644 --- a/tests/collections/llm/hf/sft_nemorun_fsdp2.py +++ b/tests/collections/llm/hf/sft_nemorun_fsdp2.py @@ -20,7 +20,7 @@ from nemo.collections.llm.gpt.data.hf_dataset import SquadHFDataModule -DATA_PATH = '/home/TestData/lite/hf_cache/squad/' +DATA_PATH = '/lustre/fsw/coreai_dlalgo_llm/boxiangw/squad' def local_executor_torchrun(nodes: int = 1, devices: int = 2) -> run.LocalExecutor: @@ -45,7 +45,7 @@ def local_executor_torchrun(nodes: int = 1, devices: int = 2) -> run.LocalExecut parser.add_argument('--model', default='meta-llama/Llama-3.2-1B') parser.add_argument('--devices', default=2) parser.add_argument('--accelerator', default='gpu', choices=['gpu']) - parser.add_argument('--max-steps', type=int, default=1000) + parser.add_argument('--max-steps', type=int, default=100) args = parser.parse_args() recipe = llm.hf_auto_model_for_causal_lm.finetune_recipe( @@ -67,7 +67,7 @@ def local_executor_torchrun(nodes: int = 1, devices: int = 2) -> run.LocalExecut tokenizer=run.Config(AutoTokenizer, pretrained_model_name=args.model), ) - recipe.trainer.strategy = run.Config(nl.FSDP2Strategy, data_parallel_size=1, tensor_parallel_size=2) + recipe.trainer.strategy = run.Config(nl.FSDP2Strategy, data_parallel_size=2, tensor_parallel_size=1) recipe.trainer.plugins = None executor = local_executor_torchrun(nodes=recipe.trainer.num_nodes, devices=recipe.trainer.devices) run.run(recipe, executor=executor) From 752ff1afbb18143078a39c76ee3e65b1934ce986 Mon Sep 17 00:00:00 2001 From: Boxiang Wang Date: Fri, 3 Jan 2025 14:35:26 -0800 Subject: [PATCH 111/128] Revert test change --- tests/collections/llm/hf/peft_nemorun.py | 1 + tests/collections/llm/hf/sft_nemorun.py | 1 + 2 files changed, 2 insertions(+) diff --git a/tests/collections/llm/hf/peft_nemorun.py b/tests/collections/llm/hf/peft_nemorun.py index b36802cfba3f..ef34d4d39a11 100644 --- a/tests/collections/llm/hf/peft_nemorun.py +++ b/tests/collections/llm/hf/peft_nemorun.py @@ -41,6 +41,7 @@ def local_executor_torchrun(nodes: int = 1, devices: int = 2) -> run.LocalExecut parser = argparse.ArgumentParser() parser.add_argument('--model', default='meta-llama/Llama-3.2-1B') + parser.add_argument('--strategy', type=str, default='auto', choices=['auto', 'ddp', 'fsdp']) parser.add_argument('--devices', default=1) parser.add_argument('--accelerator', default='gpu', choices=['gpu']) parser.add_argument('--max-steps', type=int, default=100) diff --git a/tests/collections/llm/hf/sft_nemorun.py b/tests/collections/llm/hf/sft_nemorun.py index a0cf24341b00..a3daa66ca774 100644 --- a/tests/collections/llm/hf/sft_nemorun.py +++ b/tests/collections/llm/hf/sft_nemorun.py @@ -44,6 +44,7 @@ def local_executor_torchrun(nodes: int = 1, devices: int = 2) -> run.LocalExecut parser.add_argument('--model', default='meta-llama/Llama-3.2-1B') parser.add_argument('--strategy', type=str, default='auto', choices=['auto', 'ddp', 'fsdp']) parser.add_argument('--devices', default=1) + parser.add_argument('--accelerator', default='gpu', choices=['gpu']) parser.add_argument('--max-steps', type=int, default=100) args = parser.parse_args() From 6650a1fdc75e69234e7547c2427ac18040aeb1ef Mon Sep 17 00:00:00 2001 From: Boxiang Wang Date: Fri, 3 Jan 2025 15:35:24 -0800 Subject: [PATCH 112/128] Fix test --- .../pytorch/strategies/test_fsdp2_strategy.py | 23 ++++++------------- 1 file changed, 7 insertions(+), 16 deletions(-) diff --git a/tests/lightning/pytorch/strategies/test_fsdp2_strategy.py b/tests/lightning/pytorch/strategies/test_fsdp2_strategy.py index 6eaa77d10403..1cc56a26c7c5 100644 --- a/tests/lightning/pytorch/strategies/test_fsdp2_strategy.py +++ b/tests/lightning/pytorch/strategies/test_fsdp2_strategy.py @@ -16,25 +16,16 @@ from nemo.lightning.pytorch.strategies import FSDP2Strategy +def get_torch_version_str(): + import torch -def get_torch_version(): - """Get pytorch version from __version__; if not available use pip's. Use caching.""" + if hasattr(torch, '__version__'): + return str(torch.__version__) + else: + return version("torch") - def get_torch_version_str(): - import torch - if hasattr(torch, '__version__'): - return str(torch.__version__) - else: - return version("torch") - - global _torch_version - if _torch_version is None: - _torch_version = PkgVersion(get_torch_version_str()) - return _torch_version - - -if get_torch_version() >= PkgVersion("2.4"): +if get_torch_version_str() >= PkgVersion("2.4"): class TestFSDP2Strategy: @patch('nemo.lightning.pytorch.strategies.fsdp2_strategy.create_checkpoint_io') From 6edd7fc23afb4705f868937f0a832a8435014e1d Mon Sep 17 00:00:00 2001 From: Boxiang Wang Date: Fri, 3 Jan 2025 15:38:38 -0800 Subject: [PATCH 113/128] Fix test --- tests/lightning/pytorch/strategies/test_fsdp2_strategy.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/lightning/pytorch/strategies/test_fsdp2_strategy.py b/tests/lightning/pytorch/strategies/test_fsdp2_strategy.py index 1cc56a26c7c5..62a26a7c3352 100644 --- a/tests/lightning/pytorch/strategies/test_fsdp2_strategy.py +++ b/tests/lightning/pytorch/strategies/test_fsdp2_strategy.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from importlib.metadata import version +from packaging.version import Version as PkgVersion from unittest.mock import patch from nemo.lightning.pytorch.strategies import FSDP2Strategy @@ -25,7 +27,7 @@ def get_torch_version_str(): return version("torch") -if get_torch_version_str() >= PkgVersion("2.4"): +if PkgVersion(get_torch_version_str()) >= PkgVersion("2.4"): class TestFSDP2Strategy: @patch('nemo.lightning.pytorch.strategies.fsdp2_strategy.create_checkpoint_io') From 6f4cb649d9c85455c0d6f37efde9852b3108e1ae Mon Sep 17 00:00:00 2001 From: BoxiangW Date: Fri, 3 Jan 2025 23:39:31 +0000 Subject: [PATCH 114/128] Apply isort and black reformatting Signed-off-by: BoxiangW --- tests/lightning/pytorch/strategies/test_fsdp2_strategy.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/lightning/pytorch/strategies/test_fsdp2_strategy.py b/tests/lightning/pytorch/strategies/test_fsdp2_strategy.py index 62a26a7c3352..5432e0df0420 100644 --- a/tests/lightning/pytorch/strategies/test_fsdp2_strategy.py +++ b/tests/lightning/pytorch/strategies/test_fsdp2_strategy.py @@ -13,11 +13,13 @@ # limitations under the License. from importlib.metadata import version -from packaging.version import Version as PkgVersion from unittest.mock import patch +from packaging.version import Version as PkgVersion + from nemo.lightning.pytorch.strategies import FSDP2Strategy + def get_torch_version_str(): import torch From 7e18cfef562e975aff27b4d0af817120c07c0e15 Mon Sep 17 00:00:00 2001 From: Boxiang Wang Date: Fri, 3 Jan 2025 16:46:06 -0800 Subject: [PATCH 115/128] Add check for parallel --- nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py b/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py index 961e10a866d3..6d390346af82 100644 --- a/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py +++ b/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py @@ -95,7 +95,8 @@ def configure_model(self): ) # Apply FSDP2 and TP to the model - parallelize(self.model, device_mesh=self.device_mesh) + if hasattr(self, 'device_mesh'): + parallelize(self.model, device_mesh=self.device_mesh) if self.model_accelerator is not None: self.model_accelerator(self.model) From 70b9f7c426377dcb15480dda037ad415505970ee Mon Sep 17 00:00:00 2001 From: Boxiang Wang Date: Mon, 6 Jan 2025 11:51:06 -0800 Subject: [PATCH 116/128] Move function into nl.strategy --- .../gpt/model/hf_auto_model_for_causal_lm.py | 44 +------------------ nemo/lightning/pytorch/strategies/utils.py | 39 ++++++++++++++++ 2 files changed, 41 insertions(+), 42 deletions(-) diff --git a/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py b/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py index 6d390346af82..77abc94484e9 100644 --- a/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py +++ b/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py @@ -15,14 +15,12 @@ import lightning.pytorch as pl import torch import torch.nn.functional as F -from torch.distributed._composable.fsdp import MixedPrecisionPolicy -from torch.distributed._composable.fsdp.fully_shard import fully_shard -from torch.distributed.device_mesh import DeviceMesh from transformers import AutoModelForCausalLM from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer from nemo.collections.llm import fn from nemo.lightning import io +from nemo.lightning.pytorch.strategy.utils import fsdp2_strategy_parallelize from nemo.utils import logging @@ -96,7 +94,7 @@ def configure_model(self): # Apply FSDP2 and TP to the model if hasattr(self, 'device_mesh'): - parallelize(self.model, device_mesh=self.device_mesh) + fsdp2_strategy_parallelize(self.model, device_mesh=self.device_mesh) if self.model_accelerator is not None: self.model_accelerator(self.model) @@ -171,41 +169,3 @@ def _remove_extra_batch_keys(self, batch, reserved_keys=['labels', 'loss_mask']) fwd_signature = inspect.signature(self.model.forward) allowed_keys = list(fwd_signature.parameters.keys()) + reserved_keys return {k: batch[k] for k in allowed_keys if k in batch} - - -# Taken and modified from torchtitan -# https://github.com/pytorch/torchtitan/blob/main/torchtitan/parallelisms/parallelize_llama.py -def parallelize(model, device_mesh: DeviceMesh): - """Apply parallelisms and activation checkpointing to the model. - NOTE: The passed-in model preferably should be on meta device. Otherwise, - the model must fit on GPU or CPU memory. - """ - - dp_mesh = device_mesh["data_parallel"] - tp_mesh = device_mesh["tensor_parallel"] - - assert tp_mesh.size() == 1, "Tensor parallelism is not supported yet in this model." - - if dp_mesh.size() > 1: - assert dp_mesh.ndim == 1 # Hybrid-sharding not supported - - # NOTE: Currently, the user is required to manually handle precision settings such as the `mp_policy` here - # because the model parallel strategy does not respect all settings of `Fabric(precision=...)` at the moment. - mp_policy = MixedPrecisionPolicy(param_dtype=torch.bfloat16, reduce_dtype=torch.float32) - - fsdp_config = {"mesh": dp_mesh, "mp_policy": mp_policy} - for layer_id, transformer_block in enumerate(model.model.layers): - # Apply activation checkpointing - # transformer_block = checkpoint_wrapper(transformer_block) - # As an optimization, do not reshard after forward for the last - # transformer block since FSDP would prefetch it immediately - reshard_after_forward = int(layer_id) < len(model.model.layers) - 1 - fully_shard( - transformer_block, - **fsdp_config, - reshard_after_forward=reshard_after_forward, - ) - model.model.layers[layer_id] = transformer_block - model = fully_shard(model, **fsdp_config) - - return model diff --git a/nemo/lightning/pytorch/strategies/utils.py b/nemo/lightning/pytorch/strategies/utils.py index 4f5a78419d6d..37ae9f319904 100644 --- a/nemo/lightning/pytorch/strategies/utils.py +++ b/nemo/lightning/pytorch/strategies/utils.py @@ -25,6 +25,8 @@ from megatron.core.dist_checkpointing.mapping import ShardedBase, ShardedObject, ShardedTensor from megatron.core.dist_checkpointing.strategies.torch import sharded_tensor_to_torch_sharded_tensor from megatron.core.transformer.utils import _get_extra_state_offsets +from torch.distributed._composable.fsdp import MixedPrecisionPolicy +from torch.distributed._composable.fsdp.fully_shard import fully_shard from torch.distributed._sharded_tensor import ShardedTensor as TorchShardedTensor from torch.distributed._tensor import DTensor, Replicate, Shard from torch.distributed.device_mesh import DeviceMesh @@ -328,3 +330,40 @@ def _convert(state_dict, k, sh_key, v, prepend_offsets, prefix="", allow_shape_m _convert(state_dict, k, sh_key, v, prepend_offsets, prefix, allow_shape_mismatch, device_mesh) return state_dict + +# Taken and modified from torchtitan +# https://github.com/pytorch/torchtitan/blob/main/torchtitan/parallelisms/parallelize_llama.py +def fsdp2_strategy_parallelize(model, device_mesh: DeviceMesh = None): + """Apply parallelisms and activation checkpointing to the model. + NOTE: The passed-in model preferably should be on meta device. Otherwise, + the model must fit on GPU or CPU memory. + """ + + dp_mesh = device_mesh["data_parallel"] + tp_mesh = device_mesh["tensor_parallel"] + + assert tp_mesh.size() == 1, "Tensor parallelism is not supported yet in this model." + + if dp_mesh.size() > 1: + assert dp_mesh.ndim == 1 # Hybrid-sharding not supported + + # NOTE: Currently, the user is required to manually handle precision settings such as the `mp_policy` here + # because the model parallel strategy does not respect all settings of `Fabric(precision=...)` at the moment. + mp_policy = MixedPrecisionPolicy(param_dtype=torch.bfloat16, reduce_dtype=torch.float32) + + fsdp_config = {"mesh": dp_mesh, "mp_policy": mp_policy} + for layer_id, transformer_block in enumerate(model.model.layers): + # Apply activation checkpointing + # transformer_block = checkpoint_wrapper(transformer_block) + # As an optimization, do not reshard after forward for the last + # transformer block since FSDP would prefetch it immediately + reshard_after_forward = int(layer_id) < len(model.model.layers) - 1 + fully_shard( + transformer_block, + **fsdp_config, + reshard_after_forward=reshard_after_forward, + ) + model.model.layers[layer_id] = transformer_block + model = fully_shard(model, **fsdp_config) + + return model From 94d1f901954bc6782b3242bbabf4e82c1fe8c675 Mon Sep 17 00:00:00 2001 From: BoxiangW Date: Mon, 6 Jan 2025 19:52:23 +0000 Subject: [PATCH 117/128] Apply isort and black reformatting Signed-off-by: BoxiangW --- nemo/lightning/pytorch/strategies/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nemo/lightning/pytorch/strategies/utils.py b/nemo/lightning/pytorch/strategies/utils.py index 37ae9f319904..51e4a7dbfa19 100644 --- a/nemo/lightning/pytorch/strategies/utils.py +++ b/nemo/lightning/pytorch/strategies/utils.py @@ -331,6 +331,7 @@ def _convert(state_dict, k, sh_key, v, prepend_offsets, prefix="", allow_shape_m return state_dict + # Taken and modified from torchtitan # https://github.com/pytorch/torchtitan/blob/main/torchtitan/parallelisms/parallelize_llama.py def fsdp2_strategy_parallelize(model, device_mesh: DeviceMesh = None): From c175ca0c5d84afb2e0b60208533843c1d1ed1485 Mon Sep 17 00:00:00 2001 From: Boxiang Wang Date: Mon, 6 Jan 2025 14:34:49 -0800 Subject: [PATCH 118/128] Add tests --- .../gpt/model/hf_auto_model_for_causal_lm.py | 2 +- tests/collections/llm/hf/sft_fsdp2.py | 129 ++++++++++++++++++ tests/collections/llm/hf/sft_nemorun_fsdp2.py | 2 + 3 files changed, 132 insertions(+), 1 deletion(-) create mode 100755 tests/collections/llm/hf/sft_fsdp2.py diff --git a/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py b/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py index 77abc94484e9..36105ac47ae9 100644 --- a/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py +++ b/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py @@ -20,7 +20,7 @@ from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer from nemo.collections.llm import fn from nemo.lightning import io -from nemo.lightning.pytorch.strategy.utils import fsdp2_strategy_parallelize +from nemo.lightning.pytorch.strategies.utils import fsdp2_strategy_parallelize from nemo.utils import logging diff --git a/tests/collections/llm/hf/sft_fsdp2.py b/tests/collections/llm/hf/sft_fsdp2.py new file mode 100755 index 000000000000..4b7dab9a3627 --- /dev/null +++ b/tests/collections/llm/hf/sft_fsdp2.py @@ -0,0 +1,129 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import fiddle as fdl +from lightning.pytorch.loggers import WandbLogger + +from nemo import lightning as nl +from nemo.collections import llm +from nemo.lightning.pytorch.accelerate.transformer_engine import is_te_accelerated + + +DATA_PATH = '/lustre/fsw/coreai_dlalgo_llm/boxiangw/squad' + + +def make_squad_hf_dataset(data_path, tokenizer): + EOS_TOKEN = tokenizer.eos_token # Must add EOS_TOKEN + + def formatting_prompts_func(examples): + alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. + + ### Instruction: + {} + + ### Input: + {} + + ### Response: + {}""" + instruction = examples["context"] + input = examples["question"] + output = examples["answers"]['text'] + if isinstance(output, list): + output = output[0] + text = alpaca_prompt.format(instruction, input, output) + EOS_TOKEN + ans = tokenizer(text) + ans['labels'] = ans['input_ids'] + return ans + + tokenizer = getattr(tokenizer, 'tokenizer', tokenizer) + datamodule = llm.HFDatasetDataModule(data_path, split="train[:100]", pad_token_id=tokenizer.eos_token_id) + + datamodule.map( + formatting_prompts_func, + batched=False, + batch_size=2, + remove_columns=["id", "title", "context", "question", 'answers'], + ) + + return datamodule + + +if __name__ == '__main__': + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument('--model', default='meta-llama/Meta-Llama-3-8B-Instruct') + parser.add_argument('--devices', default=2) + parser.add_argument('--accelerator', default='gpu', choices=['gpu']) + parser.add_argument('--model-accelerator', default=None, choices=['te']) + parser.add_argument('--max-steps', type=int, default=100) + parser.add_argument("--fp8-autocast", default=False, action='store_true') + parser.add_argument('--wandb-project', type=str, default=None) + parser.add_argument('--model-save-path', type=str, default=None) + args = parser.parse_args() + + wandb = None + if args.wandb_project is not None: + model = '_'.join(args.model.split('/')[-2:]) + wandb = WandbLogger( + project=args.wandb_project, + name=f'{model}_dev{args.devices}_strat_{args.strategy}', + ) + grad_clip = None + use_dist_samp = False + + model_accelerator = None + if args.model_accelerator == "te": + from functools import partial + from nemo.lightning.pytorch.accelerate.transformer_engine import te_accelerate + + model_accelerator = partial(te_accelerate, fp8_autocast=args.fp8_autocast) + + from nemo.lightning.pytorch.accelerate.transformer_engine import te_accelerate + + model = llm.HFAutoModelForCausalLM(model_name=args.model, model_accelerator=model_accelerator) + tokenizer = model.tokenizer + + llm.api.finetune( + model=model, + data=make_squad_hf_dataset(DATA_PATH, tokenizer), + trainer=nl.Trainer( + devices=args.devices, + max_steps=args.max_steps, + accelerator=args.accelerator, + strategy=nl.FSDP2Strategy(data_parallel_size=2, tensor_parallel_size=1), + log_every_n_steps=1, + limit_val_batches=0.0, + num_sanity_val_steps=0, + accumulate_grad_batches=10, + gradient_clip_val=grad_clip, + use_distributed_sampler=use_dist_samp, + callbacks=[], + logger=wandb, + ), + optim=fdl.build(llm.adam.pytorch_adam_with_flat_lr(lr=1e-5)), + log=None, + ) + + print(torch.cuda.max_memory_allocated(device=None)) + + if args.model_accelerator: + if args.model_accelerator == "te": + te_acc = is_te_accelerated(model.model) + assert te_acc, "Transformer Engine acceleration was unsuccessful" + print("TE Accelerated: ", te_acc) + + if args.model_save_path is not None: + model.save_pretrained(args.model_save_path) diff --git a/tests/collections/llm/hf/sft_nemorun_fsdp2.py b/tests/collections/llm/hf/sft_nemorun_fsdp2.py index 962a8370b878..01768c6567cf 100644 --- a/tests/collections/llm/hf/sft_nemorun_fsdp2.py +++ b/tests/collections/llm/hf/sft_nemorun_fsdp2.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import torch + import nemo_run as run import nemo.lightning as nl From adeeffd9c0e42eaabe89a639731960648e4d258d Mon Sep 17 00:00:00 2001 From: Boxiang Wang Date: Mon, 6 Jan 2025 15:08:43 -0800 Subject: [PATCH 119/128] Remove test --- tests/collections/llm/hf/sft_fsdp2.py | 129 -------------------------- 1 file changed, 129 deletions(-) delete mode 100755 tests/collections/llm/hf/sft_fsdp2.py diff --git a/tests/collections/llm/hf/sft_fsdp2.py b/tests/collections/llm/hf/sft_fsdp2.py deleted file mode 100755 index 4b7dab9a3627..000000000000 --- a/tests/collections/llm/hf/sft_fsdp2.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import fiddle as fdl -from lightning.pytorch.loggers import WandbLogger - -from nemo import lightning as nl -from nemo.collections import llm -from nemo.lightning.pytorch.accelerate.transformer_engine import is_te_accelerated - - -DATA_PATH = '/lustre/fsw/coreai_dlalgo_llm/boxiangw/squad' - - -def make_squad_hf_dataset(data_path, tokenizer): - EOS_TOKEN = tokenizer.eos_token # Must add EOS_TOKEN - - def formatting_prompts_func(examples): - alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. - - ### Instruction: - {} - - ### Input: - {} - - ### Response: - {}""" - instruction = examples["context"] - input = examples["question"] - output = examples["answers"]['text'] - if isinstance(output, list): - output = output[0] - text = alpaca_prompt.format(instruction, input, output) + EOS_TOKEN - ans = tokenizer(text) - ans['labels'] = ans['input_ids'] - return ans - - tokenizer = getattr(tokenizer, 'tokenizer', tokenizer) - datamodule = llm.HFDatasetDataModule(data_path, split="train[:100]", pad_token_id=tokenizer.eos_token_id) - - datamodule.map( - formatting_prompts_func, - batched=False, - batch_size=2, - remove_columns=["id", "title", "context", "question", 'answers'], - ) - - return datamodule - - -if __name__ == '__main__': - import argparse - - parser = argparse.ArgumentParser() - parser.add_argument('--model', default='meta-llama/Meta-Llama-3-8B-Instruct') - parser.add_argument('--devices', default=2) - parser.add_argument('--accelerator', default='gpu', choices=['gpu']) - parser.add_argument('--model-accelerator', default=None, choices=['te']) - parser.add_argument('--max-steps', type=int, default=100) - parser.add_argument("--fp8-autocast", default=False, action='store_true') - parser.add_argument('--wandb-project', type=str, default=None) - parser.add_argument('--model-save-path', type=str, default=None) - args = parser.parse_args() - - wandb = None - if args.wandb_project is not None: - model = '_'.join(args.model.split('/')[-2:]) - wandb = WandbLogger( - project=args.wandb_project, - name=f'{model}_dev{args.devices}_strat_{args.strategy}', - ) - grad_clip = None - use_dist_samp = False - - model_accelerator = None - if args.model_accelerator == "te": - from functools import partial - from nemo.lightning.pytorch.accelerate.transformer_engine import te_accelerate - - model_accelerator = partial(te_accelerate, fp8_autocast=args.fp8_autocast) - - from nemo.lightning.pytorch.accelerate.transformer_engine import te_accelerate - - model = llm.HFAutoModelForCausalLM(model_name=args.model, model_accelerator=model_accelerator) - tokenizer = model.tokenizer - - llm.api.finetune( - model=model, - data=make_squad_hf_dataset(DATA_PATH, tokenizer), - trainer=nl.Trainer( - devices=args.devices, - max_steps=args.max_steps, - accelerator=args.accelerator, - strategy=nl.FSDP2Strategy(data_parallel_size=2, tensor_parallel_size=1), - log_every_n_steps=1, - limit_val_batches=0.0, - num_sanity_val_steps=0, - accumulate_grad_batches=10, - gradient_clip_val=grad_clip, - use_distributed_sampler=use_dist_samp, - callbacks=[], - logger=wandb, - ), - optim=fdl.build(llm.adam.pytorch_adam_with_flat_lr(lr=1e-5)), - log=None, - ) - - print(torch.cuda.max_memory_allocated(device=None)) - - if args.model_accelerator: - if args.model_accelerator == "te": - te_acc = is_te_accelerated(model.model) - assert te_acc, "Transformer Engine acceleration was unsuccessful" - print("TE Accelerated: ", te_acc) - - if args.model_save_path is not None: - model.save_pretrained(args.model_save_path) From 6dc57c4c9e5f6a2c09695b6c57b344e3786ae710 Mon Sep 17 00:00:00 2001 From: BoxiangW Date: Mon, 6 Jan 2025 23:09:43 +0000 Subject: [PATCH 120/128] Apply isort and black reformatting Signed-off-by: BoxiangW --- tests/collections/llm/hf/sft_nemorun_fsdp2.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/collections/llm/hf/sft_nemorun_fsdp2.py b/tests/collections/llm/hf/sft_nemorun_fsdp2.py index 01768c6567cf..d9fe3f4df711 100644 --- a/tests/collections/llm/hf/sft_nemorun_fsdp2.py +++ b/tests/collections/llm/hf/sft_nemorun_fsdp2.py @@ -12,16 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -import torch - import nemo_run as run +import torch import nemo.lightning as nl from nemo.collections import llm from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer from nemo.collections.llm.gpt.data.hf_dataset import SquadHFDataModule - DATA_PATH = '/lustre/fsw/coreai_dlalgo_llm/boxiangw/squad' From 9c9b972b14c65247034f0910dec39dc25d37c355 Mon Sep 17 00:00:00 2001 From: Boxiang Wang Date: Mon, 6 Jan 2025 16:30:07 -0800 Subject: [PATCH 121/128] Add test --- .../gpt/model/hf_auto_model_for_causal_lm.py | 2 +- .../pytorch/strategies/fsdp2_strategy.py | 8 +- tests/collections/llm/hf/sft_fsdp2.py | 133 ++++++++++++++++++ tests/collections/llm/hf/sft_nemorun_fsdp2.py | 3 +- 4 files changed, 140 insertions(+), 6 deletions(-) create mode 100755 tests/collections/llm/hf/sft_fsdp2.py diff --git a/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py b/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py index 36105ac47ae9..abe966229ffe 100644 --- a/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py +++ b/nemo/collections/llm/gpt/model/hf_auto_model_for_causal_lm.py @@ -93,7 +93,7 @@ def configure_model(self): ) # Apply FSDP2 and TP to the model - if hasattr(self, 'device_mesh'): + if self.device_mesh is not None: fsdp2_strategy_parallelize(self.model, device_mesh=self.device_mesh) if self.model_accelerator is not None: diff --git a/nemo/lightning/pytorch/strategies/fsdp2_strategy.py b/nemo/lightning/pytorch/strategies/fsdp2_strategy.py index 6fd4fa258563..6db6414724c7 100644 --- a/nemo/lightning/pytorch/strategies/fsdp2_strategy.py +++ b/nemo/lightning/pytorch/strategies/fsdp2_strategy.py @@ -209,7 +209,7 @@ def save_checkpoint( self, checkpoint: Dict[str, Any], filepath: Union[str, Path], storage_options: Optional[Any] = None ) -> None: """Converts PyT checkpoints to MCore format and save using MCore dist ckpt library.""" - checkpoint["sharded_state_dict"] = pyt_to_mcore_state_dict(checkpoint.pop("state_dict")) + checkpoint["sharded_state_dict"] = pyt_to_mcore_state_dict(checkpoint.pop("state_dict"), device_mesh=self.device_mesh) checkpoint["state_dict"] = OrderedDict([]) if "optimizer_states" in checkpoint and self.trainer.state.fn == TrainerFn.FITTING: @@ -222,7 +222,7 @@ def save_checkpoint( ## the checkpoint will contain only model weights. Optimizer states will be omitted. if self.ckpt_save_optimizer: checkpoint['optimizer'] = get_optimizer_state_dict(self.model, self.optimizers) - pyt_to_mcore_state_dict(checkpoint['optimizer']['state'], prefix="optimizer.state.") + pyt_to_mcore_state_dict(checkpoint['optimizer']['state'], prefix="optimizer.state.", device_mesh=self.device_mesh) self.checkpoint_io.save_checkpoint(checkpoint, filepath, storage_options=storage_options) @@ -248,12 +248,12 @@ def load_checkpoint(self, checkpoint_path: str | Path) -> Dict[str, Any]: sharded_state_dict = {} with _get_sharded_state_dict_context(self.model): msd = self.model.state_dict() - pyt_to_mcore_state_dict(msd) + pyt_to_mcore_state_dict(msd, device_mesh=self.device_mesh) sharded_state_dict["sharded_state_dict"] = msd if self.ckpt_load_optimizer and self.trainer.state.fn == TrainerFn.FITTING: osd = get_optimizer_state_dict(self.model, self.optimizers, options=StateDictOptions(cpu_offload=True)) - pyt_to_mcore_state_dict(osd['state'], prefix="optimizer.state.") + pyt_to_mcore_state_dict(osd['state'], prefix="optimizer.state.", device_mesh=self.device_mesh) sharded_state_dict["optimizer"] = osd checkpoint = self.checkpoint_io.load_checkpoint(path, sharded_state_dict=sharded_state_dict) diff --git a/tests/collections/llm/hf/sft_fsdp2.py b/tests/collections/llm/hf/sft_fsdp2.py new file mode 100755 index 000000000000..d82b78cd4839 --- /dev/null +++ b/tests/collections/llm/hf/sft_fsdp2.py @@ -0,0 +1,133 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import fiddle as fdl +from lightning.pytorch.loggers import WandbLogger + +import torch + +from nemo import lightning as nl +from nemo.collections import llm +from nemo.lightning.pytorch.accelerate.transformer_engine import is_te_accelerated + + +DATA_PATH = '/home/TestData/lite/hf_cache/squad/' + + +def make_squad_hf_dataset(data_path, tokenizer): + EOS_TOKEN = tokenizer.eos_token # Must add EOS_TOKEN + + def formatting_prompts_func(examples): + alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. + + ### Instruction: + {} + + ### Input: + {} + + ### Response: + {}""" + instruction = examples["context"] + input = examples["question"] + output = examples["answers"]['text'] + if isinstance(output, list): + output = output[0] + text = alpaca_prompt.format(instruction, input, output) + EOS_TOKEN + ans = tokenizer(text) + ans['labels'] = ans['input_ids'] + return ans + + tokenizer = getattr(tokenizer, 'tokenizer', tokenizer) + datamodule = llm.HFDatasetDataModule(data_path, split="train[:100]", pad_token_id=tokenizer.eos_token_id) + + datamodule.map( + formatting_prompts_func, + batched=False, + batch_size=2, + remove_columns=["id", "title", "context", "question", 'answers'], + ) + + return datamodule + + +if __name__ == '__main__': + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument('--model', default='meta-llama/Llama-3.2-1B') + parser.add_argument('--devices', default=2) + parser.add_argument('--accelerator', default='gpu', choices=['gpu']) + parser.add_argument('--model-accelerator', default=None, choices=['te']) + parser.add_argument('--max-steps', type=int, default=5) + parser.add_argument("--fp8-autocast", default=False, action='store_true') + parser.add_argument('--wandb-project', type=str, default=None) + parser.add_argument('--model-save-path', type=str, default=None) + args = parser.parse_args() + + wandb = None + if args.wandb_project is not None: + model = '_'.join(args.model.split('/')[-2:]) + wandb = WandbLogger( + project=args.wandb_project, + name=f'{model}_dev{args.devices}_strat_{args.strategy}', + ) + grad_clip = None + use_dist_samp = False + + model_accelerator = None + if args.model_accelerator == "te": + from functools import partial + from nemo.lightning.pytorch.accelerate.transformer_engine import te_accelerate + + model_accelerator = partial(te_accelerate, fp8_autocast=args.fp8_autocast) + + from nemo.lightning.pytorch.accelerate.transformer_engine import te_accelerate + + model = llm.HFAutoModelForCausalLM(model_name=args.model, model_accelerator=model_accelerator) + tokenizer = model.tokenizer + + llm.api.finetune( + model=model, + data=make_squad_hf_dataset(DATA_PATH, tokenizer), + trainer=nl.Trainer( + devices=args.devices, + max_steps=args.max_steps, + accelerator=args.accelerator, + strategy=nl.FSDP2Strategy(data_parallel_size=2, tensor_parallel_size=1), + log_every_n_steps=1, + limit_val_batches=0.0, + num_sanity_val_steps=0, + accumulate_grad_batches=10, + gradient_clip_val=grad_clip, + use_distributed_sampler=use_dist_samp, + callbacks=[], + logger=wandb, + ), + optim=fdl.build(llm.adam.pytorch_adam_with_flat_lr(lr=1e-5)), + log=None, + ) + + # Check memory usage compared to non-parallelized version + assert torch.cuda.max_memory_allocated(device=None)/1024/1024 < 29326, \ + f"using {torch.cuda.max_memory_allocated(device=None)/1024/1024} MB, larger than 29326 MB when not using parallelization." + + if args.model_accelerator: + if args.model_accelerator == "te": + te_acc = is_te_accelerated(model.model) + assert te_acc, "Transformer Engine acceleration was unsuccessful" + print("TE Accelerated: ", te_acc) + + if args.model_save_path is not None: + model.save_pretrained(args.model_save_path) diff --git a/tests/collections/llm/hf/sft_nemorun_fsdp2.py b/tests/collections/llm/hf/sft_nemorun_fsdp2.py index d9fe3f4df711..ea7a8100f497 100644 --- a/tests/collections/llm/hf/sft_nemorun_fsdp2.py +++ b/tests/collections/llm/hf/sft_nemorun_fsdp2.py @@ -20,7 +20,8 @@ from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer from nemo.collections.llm.gpt.data.hf_dataset import SquadHFDataModule -DATA_PATH = '/lustre/fsw/coreai_dlalgo_llm/boxiangw/squad' + +DATA_PATH = '/home/TestData/lite/hf_cache/squad/' def local_executor_torchrun(nodes: int = 1, devices: int = 2) -> run.LocalExecutor: From ff2c54cefa76ca7278e2f20af59f398c20864865 Mon Sep 17 00:00:00 2001 From: Boxiang Wang Date: Mon, 6 Jan 2025 16:31:15 -0800 Subject: [PATCH 122/128] Add fsdp2 ci test with memory check --- .github/workflows/cicd-main.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/.github/workflows/cicd-main.yml b/.github/workflows/cicd-main.yml index 356385b9d2ea..364492e6bcb3 100644 --- a/.github/workflows/cicd-main.yml +++ b/.github/workflows/cicd-main.yml @@ -3691,6 +3691,17 @@ jobs: TRANSFORMERS_OFFLINE=1 python tests/collections/llm/hf/sft.py --model /home/TestData/nlp/hf_gemma/hf_gemma_2b --max-steps 10 --devices 2 --strategy ddp AFTER_SCRIPT: | rm -rf nemo_experiments + + L2_HF_Transformer_SFT_FSDP2_2gpu: + needs: [ cicd-test-container-setup ] + uses: ./.github/workflows/_test_template.yml + if: contains(fromJSON(needs.cicd-test-container-setup.outputs.test_to_run), 'L2_HF_Transformer_SFT_FSDP2_2gpu') || needs.cicd-test-container-setup.outputs.all == 'true' + with: + RUNNER: self-hosted-azure + SCRIPT: | + TRANSFORMERS_OFFLINE=1 python tests/collections/llm/hf/sft_fsdp2.py --model /home/TestData/nlp/hf_gemma/hf_gemma_2b --max-steps 10 --devices 2 + AFTER_SCRIPT: | + rm -rf nemo_experiments L2_HF_Transformer_SFT_2gpu_nemorun: needs: [ cicd-test-container-setup ] From db85277bf2016364458a2fc3f16284cc1e7c334a Mon Sep 17 00:00:00 2001 From: BoxiangW Date: Tue, 7 Jan 2025 00:32:19 +0000 Subject: [PATCH 123/128] Apply isort and black reformatting Signed-off-by: BoxiangW --- nemo/lightning/pytorch/strategies/fsdp2_strategy.py | 8 ++++++-- tests/collections/llm/hf/sft_fsdp2.py | 10 +++++----- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/nemo/lightning/pytorch/strategies/fsdp2_strategy.py b/nemo/lightning/pytorch/strategies/fsdp2_strategy.py index 6db6414724c7..d59dca7be3aa 100644 --- a/nemo/lightning/pytorch/strategies/fsdp2_strategy.py +++ b/nemo/lightning/pytorch/strategies/fsdp2_strategy.py @@ -209,7 +209,9 @@ def save_checkpoint( self, checkpoint: Dict[str, Any], filepath: Union[str, Path], storage_options: Optional[Any] = None ) -> None: """Converts PyT checkpoints to MCore format and save using MCore dist ckpt library.""" - checkpoint["sharded_state_dict"] = pyt_to_mcore_state_dict(checkpoint.pop("state_dict"), device_mesh=self.device_mesh) + checkpoint["sharded_state_dict"] = pyt_to_mcore_state_dict( + checkpoint.pop("state_dict"), device_mesh=self.device_mesh + ) checkpoint["state_dict"] = OrderedDict([]) if "optimizer_states" in checkpoint and self.trainer.state.fn == TrainerFn.FITTING: @@ -222,7 +224,9 @@ def save_checkpoint( ## the checkpoint will contain only model weights. Optimizer states will be omitted. if self.ckpt_save_optimizer: checkpoint['optimizer'] = get_optimizer_state_dict(self.model, self.optimizers) - pyt_to_mcore_state_dict(checkpoint['optimizer']['state'], prefix="optimizer.state.", device_mesh=self.device_mesh) + pyt_to_mcore_state_dict( + checkpoint['optimizer']['state'], prefix="optimizer.state.", device_mesh=self.device_mesh + ) self.checkpoint_io.save_checkpoint(checkpoint, filepath, storage_options=storage_options) diff --git a/tests/collections/llm/hf/sft_fsdp2.py b/tests/collections/llm/hf/sft_fsdp2.py index d82b78cd4839..25ea8e3c55f9 100755 --- a/tests/collections/llm/hf/sft_fsdp2.py +++ b/tests/collections/llm/hf/sft_fsdp2.py @@ -13,15 +13,13 @@ # limitations under the License. import fiddle as fdl -from lightning.pytorch.loggers import WandbLogger - import torch +from lightning.pytorch.loggers import WandbLogger from nemo import lightning as nl from nemo.collections import llm from nemo.lightning.pytorch.accelerate.transformer_engine import is_te_accelerated - DATA_PATH = '/home/TestData/lite/hf_cache/squad/' @@ -89,6 +87,7 @@ def formatting_prompts_func(examples): model_accelerator = None if args.model_accelerator == "te": from functools import partial + from nemo.lightning.pytorch.accelerate.transformer_engine import te_accelerate model_accelerator = partial(te_accelerate, fp8_autocast=args.fp8_autocast) @@ -120,8 +119,9 @@ def formatting_prompts_func(examples): ) # Check memory usage compared to non-parallelized version - assert torch.cuda.max_memory_allocated(device=None)/1024/1024 < 29326, \ - f"using {torch.cuda.max_memory_allocated(device=None)/1024/1024} MB, larger than 29326 MB when not using parallelization." + assert ( + torch.cuda.max_memory_allocated(device=None) / 1024 / 1024 < 29326 + ), f"using {torch.cuda.max_memory_allocated(device=None)/1024/1024} MB, larger than 29326 MB when not using parallelization." if args.model_accelerator: if args.model_accelerator == "te": From ca5ffe495c05c906169aa5d2bb0ea716e6c2a303 Mon Sep 17 00:00:00 2001 From: Boxiang Wang Date: Mon, 6 Jan 2025 16:32:36 -0800 Subject: [PATCH 124/128] Fix import --- tests/collections/llm/hf/sft_nemorun_fsdp2.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/collections/llm/hf/sft_nemorun_fsdp2.py b/tests/collections/llm/hf/sft_nemorun_fsdp2.py index ea7a8100f497..3d24335f7251 100644 --- a/tests/collections/llm/hf/sft_nemorun_fsdp2.py +++ b/tests/collections/llm/hf/sft_nemorun_fsdp2.py @@ -13,7 +13,6 @@ # limitations under the License. import nemo_run as run -import torch import nemo.lightning as nl from nemo.collections import llm From b78205a9ef5dd8525927b2e4a4fc7b4255f22171 Mon Sep 17 00:00:00 2001 From: Boxiang Wang Date: Tue, 7 Jan 2025 14:36:20 -0800 Subject: [PATCH 125/128] fix test --- tests/collections/llm/hf/sft_fsdp2.py | 130 +++++++++--------- tests/collections/llm/hf/sft_nemorun_fsdp2.py | 62 +++++---- tests/collections/llm/hf/utils.py | 11 ++ 3 files changed, 111 insertions(+), 92 deletions(-) create mode 100644 tests/collections/llm/hf/utils.py diff --git a/tests/collections/llm/hf/sft_fsdp2.py b/tests/collections/llm/hf/sft_fsdp2.py index 25ea8e3c55f9..caa3282f4b08 100755 --- a/tests/collections/llm/hf/sft_fsdp2.py +++ b/tests/collections/llm/hf/sft_fsdp2.py @@ -19,6 +19,9 @@ from nemo import lightning as nl from nemo.collections import llm from nemo.lightning.pytorch.accelerate.transformer_engine import is_te_accelerated +from utils import get_torch_version_str + +from packaging.version import Version as PkgVersion DATA_PATH = '/home/TestData/lite/hf_cache/squad/' @@ -61,73 +64,74 @@ def formatting_prompts_func(examples): if __name__ == '__main__': - import argparse - - parser = argparse.ArgumentParser() - parser.add_argument('--model', default='meta-llama/Llama-3.2-1B') - parser.add_argument('--devices', default=2) - parser.add_argument('--accelerator', default='gpu', choices=['gpu']) - parser.add_argument('--model-accelerator', default=None, choices=['te']) - parser.add_argument('--max-steps', type=int, default=5) - parser.add_argument("--fp8-autocast", default=False, action='store_true') - parser.add_argument('--wandb-project', type=str, default=None) - parser.add_argument('--model-save-path', type=str, default=None) - args = parser.parse_args() - - wandb = None - if args.wandb_project is not None: - model = '_'.join(args.model.split('/')[-2:]) - wandb = WandbLogger( - project=args.wandb_project, - name=f'{model}_dev{args.devices}_strat_{args.strategy}', - ) - grad_clip = None - use_dist_samp = False + if PkgVersion(get_torch_version_str()) >= PkgVersion("2.4"): + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument('--model', default='meta-llama/Llama-3.2-1B') + parser.add_argument('--devices', default=2) + parser.add_argument('--accelerator', default='gpu', choices=['gpu']) + parser.add_argument('--model-accelerator', default=None, choices=['te']) + parser.add_argument('--max-steps', type=int, default=5) + parser.add_argument("--fp8-autocast", default=False, action='store_true') + parser.add_argument('--wandb-project', type=str, default=None) + parser.add_argument('--model-save-path', type=str, default=None) + args = parser.parse_args() + + wandb = None + if args.wandb_project is not None: + model = '_'.join(args.model.split('/')[-2:]) + wandb = WandbLogger( + project=args.wandb_project, + name=f'{model}_dev{args.devices}_strat_{args.strategy}', + ) + grad_clip = None + use_dist_samp = False + + model_accelerator = None + if args.model_accelerator == "te": + from functools import partial + + from nemo.lightning.pytorch.accelerate.transformer_engine import te_accelerate - model_accelerator = None - if args.model_accelerator == "te": - from functools import partial + model_accelerator = partial(te_accelerate, fp8_autocast=args.fp8_autocast) from nemo.lightning.pytorch.accelerate.transformer_engine import te_accelerate - model_accelerator = partial(te_accelerate, fp8_autocast=args.fp8_autocast) - - from nemo.lightning.pytorch.accelerate.transformer_engine import te_accelerate - - model = llm.HFAutoModelForCausalLM(model_name=args.model, model_accelerator=model_accelerator) - tokenizer = model.tokenizer - - llm.api.finetune( - model=model, - data=make_squad_hf_dataset(DATA_PATH, tokenizer), - trainer=nl.Trainer( - devices=args.devices, - max_steps=args.max_steps, - accelerator=args.accelerator, - strategy=nl.FSDP2Strategy(data_parallel_size=2, tensor_parallel_size=1), - log_every_n_steps=1, - limit_val_batches=0.0, - num_sanity_val_steps=0, - accumulate_grad_batches=10, - gradient_clip_val=grad_clip, - use_distributed_sampler=use_dist_samp, - callbacks=[], - logger=wandb, - ), - optim=fdl.build(llm.adam.pytorch_adam_with_flat_lr(lr=1e-5)), - log=None, - ) + model = llm.HFAutoModelForCausalLM(model_name=args.model, model_accelerator=model_accelerator) + tokenizer = model.tokenizer + + llm.api.finetune( + model=model, + data=make_squad_hf_dataset(DATA_PATH, tokenizer), + trainer=nl.Trainer( + devices=args.devices, + max_steps=args.max_steps, + accelerator=args.accelerator, + strategy=nl.FSDP2Strategy(data_parallel_size=2, tensor_parallel_size=1), + log_every_n_steps=1, + limit_val_batches=0.0, + num_sanity_val_steps=0, + accumulate_grad_batches=10, + gradient_clip_val=grad_clip, + use_distributed_sampler=use_dist_samp, + callbacks=[], + logger=wandb, + ), + optim=fdl.build(llm.adam.pytorch_adam_with_flat_lr(lr=1e-5)), + log=None, + ) - # Check memory usage compared to non-parallelized version - assert ( - torch.cuda.max_memory_allocated(device=None) / 1024 / 1024 < 29326 - ), f"using {torch.cuda.max_memory_allocated(device=None)/1024/1024} MB, larger than 29326 MB when not using parallelization." + # Check memory usage compared to non-parallelized version + assert ( + torch.cuda.max_memory_allocated(device=None) / 1024 / 1024 < 29326 + ), f"using {torch.cuda.max_memory_allocated(device=None)/1024/1024} MB, larger than 29326 MB when not using parallelization." - if args.model_accelerator: - if args.model_accelerator == "te": - te_acc = is_te_accelerated(model.model) - assert te_acc, "Transformer Engine acceleration was unsuccessful" - print("TE Accelerated: ", te_acc) + if args.model_accelerator: + if args.model_accelerator == "te": + te_acc = is_te_accelerated(model.model) + assert te_acc, "Transformer Engine acceleration was unsuccessful" + print("TE Accelerated: ", te_acc) - if args.model_save_path is not None: - model.save_pretrained(args.model_save_path) + if args.model_save_path is not None: + model.save_pretrained(args.model_save_path) diff --git a/tests/collections/llm/hf/sft_nemorun_fsdp2.py b/tests/collections/llm/hf/sft_nemorun_fsdp2.py index 3d24335f7251..b2fa456ff3ec 100644 --- a/tests/collections/llm/hf/sft_nemorun_fsdp2.py +++ b/tests/collections/llm/hf/sft_nemorun_fsdp2.py @@ -19,8 +19,11 @@ from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer from nemo.collections.llm.gpt.data.hf_dataset import SquadHFDataModule +from utils import get_torch_version_str -DATA_PATH = '/home/TestData/lite/hf_cache/squad/' +from packaging.version import Version as PkgVersion + +DATA_PATH = '/lustre/fsw/coreai_dlalgo_llm/boxiangw/squad' def local_executor_torchrun(nodes: int = 1, devices: int = 2) -> run.LocalExecutor: @@ -39,35 +42,36 @@ def local_executor_torchrun(nodes: int = 1, devices: int = 2) -> run.LocalExecut if __name__ == '__main__': - import argparse + if PkgVersion(get_torch_version_str()) >= PkgVersion("2.4"): + import argparse - parser = argparse.ArgumentParser() - parser.add_argument('--model', default='meta-llama/Llama-3.2-1B') - parser.add_argument('--devices', default=2) - parser.add_argument('--accelerator', default='gpu', choices=['gpu']) - parser.add_argument('--max-steps', type=int, default=100) - args = parser.parse_args() + parser = argparse.ArgumentParser() + parser.add_argument('--model', default='meta-llama/Meta-Llama-3-8B-Instruct') + parser.add_argument('--devices', default=2) + parser.add_argument('--accelerator', default='gpu', choices=['gpu']) + parser.add_argument('--max-steps', type=int, default=100) + args = parser.parse_args() - recipe = llm.hf_auto_model_for_causal_lm.finetune_recipe( - model_name=args.model, - name="sft", - num_nodes=1, - num_gpus_per_node=args.devices, - peft_scheme='none', - max_steps=args.max_steps, - ) - recipe.trainer.val_check_interval = 50 + recipe = llm.hf_auto_model_for_causal_lm.finetune_recipe( + model_name=args.model, + name="sft", + num_nodes=1, + num_gpus_per_node=args.devices, + peft_scheme='none', + max_steps=args.max_steps, + ) + recipe.trainer.val_check_interval = 50 - tokenizer = llm.HFAutoModelForCausalLM.configure_tokenizer(args.model) - recipe.data = run.Config( - SquadHFDataModule, - path_or_dataset=DATA_PATH, - split="train[:100]", - pad_token_id=tokenizer.tokenizer.eos_token_id, - tokenizer=run.Config(AutoTokenizer, pretrained_model_name=args.model), - ) + tokenizer = llm.HFAutoModelForCausalLM.configure_tokenizer(args.model) + recipe.data = run.Config( + SquadHFDataModule, + path_or_dataset=DATA_PATH, + split="train[:100]", + pad_token_id=tokenizer.tokenizer.eos_token_id, + tokenizer=run.Config(AutoTokenizer, pretrained_model_name=args.model), + ) - recipe.trainer.strategy = run.Config(nl.FSDP2Strategy, data_parallel_size=2, tensor_parallel_size=1) - recipe.trainer.plugins = None - executor = local_executor_torchrun(nodes=recipe.trainer.num_nodes, devices=recipe.trainer.devices) - run.run(recipe, executor=executor) + recipe.trainer.strategy = run.Config(nl.FSDP2Strategy, data_parallel_size=2, tensor_parallel_size=1) + recipe.trainer.plugins = None + executor = local_executor_torchrun(nodes=recipe.trainer.num_nodes, devices=recipe.trainer.devices) + run.run(recipe, executor=executor) diff --git a/tests/collections/llm/hf/utils.py b/tests/collections/llm/hf/utils.py new file mode 100644 index 000000000000..f45046bc4680 --- /dev/null +++ b/tests/collections/llm/hf/utils.py @@ -0,0 +1,11 @@ +from importlib.metadata import version +from packaging.version import Version as PkgVersion + + +def get_torch_version_str(): + import torch + + if hasattr(torch, '__version__'): + return str(torch.__version__) + else: + return version("torch") \ No newline at end of file From 1e069c3b1fa26b18b806f0aa3805b0f479ce8964 Mon Sep 17 00:00:00 2001 From: BoxiangW Date: Tue, 7 Jan 2025 22:53:02 +0000 Subject: [PATCH 126/128] Apply isort and black reformatting Signed-off-by: BoxiangW --- tests/collections/llm/hf/sft_fsdp2.py | 5 ++--- tests/collections/llm/hf/sft_nemorun_fsdp2.py | 6 ++---- tests/collections/llm/hf/utils.py | 2 +- 3 files changed, 5 insertions(+), 8 deletions(-) diff --git a/tests/collections/llm/hf/sft_fsdp2.py b/tests/collections/llm/hf/sft_fsdp2.py index caa3282f4b08..300b4a08c596 100755 --- a/tests/collections/llm/hf/sft_fsdp2.py +++ b/tests/collections/llm/hf/sft_fsdp2.py @@ -15,13 +15,12 @@ import fiddle as fdl import torch from lightning.pytorch.loggers import WandbLogger +from packaging.version import Version as PkgVersion +from utils import get_torch_version_str from nemo import lightning as nl from nemo.collections import llm from nemo.lightning.pytorch.accelerate.transformer_engine import is_te_accelerated -from utils import get_torch_version_str - -from packaging.version import Version as PkgVersion DATA_PATH = '/home/TestData/lite/hf_cache/squad/' diff --git a/tests/collections/llm/hf/sft_nemorun_fsdp2.py b/tests/collections/llm/hf/sft_nemorun_fsdp2.py index b2fa456ff3ec..53dd863cb185 100644 --- a/tests/collections/llm/hf/sft_nemorun_fsdp2.py +++ b/tests/collections/llm/hf/sft_nemorun_fsdp2.py @@ -13,16 +13,14 @@ # limitations under the License. import nemo_run as run +from packaging.version import Version as PkgVersion +from utils import get_torch_version_str import nemo.lightning as nl from nemo.collections import llm from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer from nemo.collections.llm.gpt.data.hf_dataset import SquadHFDataModule -from utils import get_torch_version_str - -from packaging.version import Version as PkgVersion - DATA_PATH = '/lustre/fsw/coreai_dlalgo_llm/boxiangw/squad' diff --git a/tests/collections/llm/hf/utils.py b/tests/collections/llm/hf/utils.py index f45046bc4680..dc2715ba32d3 100644 --- a/tests/collections/llm/hf/utils.py +++ b/tests/collections/llm/hf/utils.py @@ -8,4 +8,4 @@ def get_torch_version_str(): if hasattr(torch, '__version__'): return str(torch.__version__) else: - return version("torch") \ No newline at end of file + return version("torch") From 81aeb50a57c22e69eb97d4c0e570a687f09f75bb Mon Sep 17 00:00:00 2001 From: Boxiang Wang Date: Tue, 7 Jan 2025 15:49:36 -0800 Subject: [PATCH 127/128] Add copyright --- tests/collections/llm/hf/utils.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tests/collections/llm/hf/utils.py b/tests/collections/llm/hf/utils.py index dc2715ba32d3..2f1730a5fa32 100644 --- a/tests/collections/llm/hf/utils.py +++ b/tests/collections/llm/hf/utils.py @@ -1,3 +1,17 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from importlib.metadata import version from packaging.version import Version as PkgVersion From d8e7247b7aa16cc9ab9168782260ef90bde2e97a Mon Sep 17 00:00:00 2001 From: Alexandros Koumparoulis Date: Wed, 8 Jan 2025 10:29:14 -0800 Subject: [PATCH 128/128] include test list Signed-off-by: Alexandros Koumparoulis --- .github/workflows/cicd-main.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/cicd-main.yml b/.github/workflows/cicd-main.yml index fad2eac9522c..6b2470791a86 100644 --- a/.github/workflows/cicd-main.yml +++ b/.github/workflows/cicd-main.yml @@ -5069,6 +5069,8 @@ jobs: - L2_NeMo_2_PTQ_Llama2_FP8 - L2_NeMo_2_jit_callback - L2_NeMo_2_LLAVA_NEXT_MOCK_TRAINING + - L2_HF_Transformer_SFT_FSDP2_2gpu + - L2_HF_Transformer_SFT_2gpu_nemorun_fsdp2 if: always() runs-on: ubuntu-latest steps:

mlq$l|mR8WP0~qOV@d1s546YBBys>0JOO?yT zijWwQcUktCkvkzS+kgcq5*VGOM$A-TdV}W!-(TGpPYdF~Swg|*3NEXJNnzi!TC!8D#*lHI$`|8aPcJ<$(N9wyLJv+;w!(y z4?ipmFhW2qTiOojf295O(%%l-7>vihyu5NQz_RhV@O7Wr;p>%vB~nQwR)nF;I)K3L zUy=5^(PP5BII!~DZ@$I$5^Ko|H&NWC;6@B;@XIhB)VMCT!Ps?|U7d446n$^H)V7NL zOokjnBCZq)xKJk2B(ds$Qs_vkGDw`L#k`u>0^kbFGPvJ!89q%UF=J&*mCMD7FtnwK zpyK~@Gp-R!DY=x4@xyOpruK$WfUPXBI{m)rO)WB(6|3_ z*19rC&ZglP%7d{L(8n0io;>A;aQulUhBa4TBRp{b{o$JHuEh$>;bDcVX0DufU7YO~c+8T%T#? zUf~#=9q<^oWm+0rH0{3kUSR;XYhrj26BL)>3-g!o)%~KFpg7>b{bg&X&p!POXDVEZ ziR!y#V3l96r*|9h@ww^AC!dl7y*V&`2)=S3i4RdOyznBdig**g*ez_c?KYSoxHnvN z^_BRlec7-EK0bI1s}=^s{_W7tYp=UrxcP54VuIuaOW4Y$Q>PAic9|#4+Xn{$V~e5Luu^sY`TBgnNb_!r$N{O-u3aQ4Cmp)0nldIp2(XPkBhCdoKKu>vM2_QkeKi^^f#oZ$M$ zV~@(&5Pka2hY6NG(4_`(J1%Zxgf1Bt#|nvyamE2xG3>GD9>Pn1adKj_%{Rm91g^Zr zGz#oI8e4Sz9qr1AH?ETG)2FvgdT>Gd*I0G*F0Pf&aSk2N;ou?VJ#k*+U#|a4xc)EK z!`{xa-59qoW8hQ81gvP;?w8wzk3P{uq`8&RQdnKV6+zsxi)TS>y6Hw?7xb~Yv9gEO z^Sqa_c$Vbh@tg?Yc^V&oiU}h;e{i(~&zx8ZD@LZls*fr7t>ZT}L$N~WT^uq$4*Cwl z{f1}4a8*z0e-s?s>wa9D=c4cMtc)eF!srN0pzv%BYtC=*T>FPVoP=xaabY_6phMCB zx0Y2>zxnO2!>6B&kX2BBIOz{znPrzjpHQD)4?kRkZOy*IB-#2KYydkBlr7qL_Q`R_ zA15nMSoe!CdB!jnlPPClGK!O#9XoUm=Us4~T+8p`+>v9CK05Hcl$BNU2z%;1QrgFq1b108Gkl{%jaGI5qBu@3O|JJ{mstaGq68YU>#!UP5E^zwu>(Z`IH zElq+gqXJoFmDCt2 zNEGddIb$+ym6zpXu*DNQuWa#zWjqTloF^!JbHrK5RZy)4_dOtCEMsXV zuH^LPgDk=`mL%^Y5rFS|eg1aan-Ou^hMFTWDb!wcTGqrSnH zUcZ$+u1U>(g+KUkm>j~$FY@M`bIvf|{PV^Bm1U6DmwcYM5?huNtz|3~zDg8vn$!t- z03;8L3;0H*7<2{Bb8>ONKS;O=T=D zRcTe3nZPiX)`d1;3}k5*SIg;@M{si^RYlaV)J&0bma(*|)xkCE02SIgrfNZ^NV%99 z^fb<(!1g2jy7PE^rO7iW@X==xlr(V`I2NiH)1KiCIRjZ*lbQia;t74euD=e+^eJZ< zOS8IKPDonZn@L<3QH7XC9IcU9TGN^VO46FJjwvCgolpkp#YSSOq8ei*X-!ziR3W*X z?o|e|w4#|`d~YW!D0l|N!E%^$as4?ymyJ@YkX*K0YEA=G^!uWcG!yb1g4^%769*07 zh5_rRCv9Gqpx;MdSEyzpE&L=MYl4LV>e7!FipLAq zR$4y%$iuSz&IccUD8J2nLgrGKP_XdIE3aU+#qsjBHVS#1Dje+1 zGZ%VcVr9$Cw+wsly(cD|K9Lm^pW%)JI$*L!Wn+J$<1pp@}R2pi6 z899z6^;@7{*zbV-!fdn57OubHdO2^Td-rbGR%|=;yZy<)rV+W^@`+m@@zC-M&%ZDX zz}84?y9e)oP|l~=Zu{-RuGp@r7q*Dy~SOuR35xV%lk@!SmTXOm4g!OD-B#Rf{z@lVz=$f4e6pM9205OCGT(MKPHZJOp39HUXAzLj$p zcy0(Qz`o6uH(c3q_~F>%X~ZX?Pw(F07<>@0c>l$4I0_B{Z&w0&lf+626qVV5S(Cx1Q(7L2i{i$} z$viJv600kK_D6LQkc5=%P>(!UC6xw4_!YAv{fCHc_YuQ^X-+(D&bxmbVcJw#bIdM2 z1oGT7v;8kK{IkUh>rzz8)RT=OfRTs}I1*hEt*&K(R9X!<2|*I)*0bBm;Ab<8()Hfa z6uVtEU4PQLs-nWv)w#sv!USyZCLYbN4W1Y$U6_lRZP`*)%U85t4o>PlKlAq582((D z1zI96CXQLCQ!h=|8kAiIw(4{84ZbT&R7O5i@(S_pSuX)mJ128Pejv0PC-|P1FZ-jr z0;cr{?CeS=wJe2-BicsiH7P6ZCRK13d=x2ZL9p=XZX}uKF<}^NoAw4yc~$jn`-C`3 zx~)d6BU}{@32IWQ;$K51&JZK!1MK;fy#u+9NXAE2QTHK^SeP8)ccEC2IauX_f;G!V zXZV|X&&Ao<4qL(U8xyacS?Dv-8ieFZ7gAey%ZD{$7jI`vB?@Lo;70{{m|H*m!RngK z@yal)M{t`BTsf(4Umo!VpLjl-*QD|mG_AyPa}rY0F@}lx_Q<2%SFI5PVpbC0-WTIc zm|71eq}To(h-V}%A811Fgf;Z)a+Eeo(3;!avyo#B`zc%9GWfpk!`t#%52BPFoJOcn zU7caHcGgjPDs?5RJjUWcN57eG;t6)7$Wn5|8R!RliF_LKxw{~(WjhAcPal(COvk$G zcWm5t>wD0{8XVD6wJ4iTkf7@AIIw-4Bl3WJx33SMFsxJ@c3$+3a1&-RAR$mGgh9*8 z=)cedeCzIa+`cStmOS`EhVXjV!CkbY?q+|DC0qRir(JKj8}#>tG~POtTWq8N(;Rx< zfqQz8AFYCG#E^I|<#@DEPnt39GpJ@}=h%2oKuE2yDmz&26&mQn|tR4AkuxF2Y00a{Ok^ef76lq{S}@^R@l1a2p?Cl&B+-;(1iT5 z%FgQ%3HQ#9R52%H0|`+zpnFk=No|e_ELY{$tID2Em)(;k2z(ac zeq|A_GokKQ48Khb=@7$@W1XWXN;gt5XO6 z45shWS@U_@u=Y56H zE_oM)1rvan*E32rdl@>O_xCz4peCh0yK4}Yp5I>S()G5>+(hkFEUmU*zYQY%R$F{e zE{>&k|5zK04dk$L3&fG8)YFpWjKTtatu59!vMsCII}Hej#{u}#{yfa;oA-RZR)Mmp zZ@Y$WW?3hFn3;@c-NkWdieZCU z^%z_kpM^AJ(74&o@bETR!gkJSWqe^4i@5et`QF&ZTd*&|)Z@QgDMM;ZU4iFFSYd4K zG58ybZg5M{1S9OdqnU%-T;a_{_TH*b)W(^Vm`ey+yCW9-d@&w;lDpv;D&@USV7{q| ze``q&K>`&M*8egrJThWkZOTKBYhO1gQh*BCk+p7fKR^<18ei*vitBu*PlOBX8ZmJ9 zyo_Wn%Ac)ulCCE+orPDqa$Z3e++*#y>djW1qq(g-<0>@ei126Dy^w=C)JOK1Nu$mtEn#WZ!Vls*SGJ?EG;#chVi$MYA{S6BeF;P^0~ zqQN(-wv8;;FtI)Q>Ni=ThChVrP3Le&0a1G>`PG{qgD&;I7DzQEnI>LI`q(V)WDuC1 zn?PpCj}aH&h|qr*UJX(hne>1l1fmv#r!a8&MXfoJ>N_66Pz+Fp zE|y)va5~!UhM14%f@KuFTgx?HR#6?1LLULiR8=+R#NP!{>tDx|c_v#;>Uop<-EAX6 z5}XmfT!Cc&DD67Ol8C(9c1}^K*^l@fle|U7wW*{YB*gOa_c+JmROs*mF?SykXhf2) zBm=C|r}-pKLv>$4AhiOr=Q!+~6j>mY1JLs$1j`CaM%NdN_vD&i-DZe|mCP8;JNK!; zJ_9z%LKL}G|1&(whRDGE#oBA2Y|n7PtLwea9r|kJ{rTF$1;yn8WEtMC5crT)0ufXL z{F*onENG4!j(^|CoG42}zxi#Tx(riCJk|Ec|E7dbvFhrs*Gd1awY7b+2hNUET?h`+s+$M+FOp$J*tUofpdX1e_5d>Cvm{M>8mL@q1D z!>Y1-i!$Jn&q^iQ3<1X~V@&{nvE;C0J0-^Xuo;o2-gCgi@(!r-r9Rx{AZpEDwBFy? zh>8P{htPXOVwNI5*2R}4q8N6;;7V5?5-Q(xX22=0;|L~cYil1O;EXlkkF$qP2cT9s z{K28+Eu!QE0r~~MkiP)$BM@N^fYxoDypPDL&YJji_{!jh#N`;$M) z)h%PXUeWTE4Cm#qcEdKxIdw}dHL1a*W&}H}OKi`I=DA=`r@hZNz^W=v>x>*4HvBJ~ zW9xO<6_hcEu3!M|S!hC<5}C%3waMgp9%b zWB~`kQEi%ps{(HYkTP;niY}C@ zj;rN)gn%?L_mtaJ9ViClT>~@@(Pz{NJ)`@`1(VO|4JxU^3LvXigw8b!&Ag7c zu`k8Fg&Aw|FJk3irdJLMpw?tg2BjWoTI0wPUaEVW2z(tOGu|U&h902UdQlbh6c<1`^ zR%1W`>4RtjW1Qf%e0ahGMD7&^yd(T$^SoQ=f`V`IiziG*W@FX}a_C%rpV zQZl+Ms0H;cUtz_qq@~~Se8;5Rvjqo7<{MWBUc!&ySe(pnS?JJG2+tj=7gAlyws8~* z5sHr~J4fd6B`}F%V zV{R=Jgh(}QPtT(@j4&vNRjAc_q2dgku9aHc_y?(G`H-hV#It~xN&*Vcd+gyqeh9^Y z3*70%*|D=Sw%9b4G4{SxHTd4X9Y})GSh5pw+72~Pr*U(5d6OyUq~oNerbZf^2-lJ0 z6aJ!{|DE_zW*xqmmlbAq&xA0a0dx)>a}qoyJ<43iA_AYi;yBc`Hhi{{!-#N)l~r{OBo~tMBObt%$Q8SoyAHXUE#R$l{aa^lz|nc+Q+)vL=54k&yMva12pZJXz+R2Dzc{p)HIiq`n3Osi6vKM{UWehHYT5o+Q;U6>#8y-(J#7LVCS`+dddZO^J zFDD<7$0TH6lvD_VL4PN*QapmvD{9euk30&%QQBHT<)eZ2EOr28WqpDX{@?ft^!i@B zX6FpP=Dz>idYxe`VCh~WInliMi5CA41Fs~BdSN6aP#aU7y>Zh5uns6f+$=k%>OKb& z9Y&I0=Dpavmw!BM)->>0y>^rCy=-VJJYz)4dOB0NE}t}Lg~S0xRvm?c%-DIA4ci@_ zcnP%&9{!R|iK$R{#1m6~ueHz>TLD9-M<6^$2^AnR7?p7i)Z8DEX zv-mg9uHcVnQ67Q*7X1F6!0iqBoc3UpMvnr`ITAy_%%KhbuShfd0<;D{$VyVil zwN9eGjSCCEIkRw_Sm8M%G`v@yN^FHPNP7^yJ1opyaGzC#nZaK$?Re3Cw4yN2`I~Uh z!dZWjuEukKa$(EP913vZ5wk;coIt{x0BbXltE%|S0*!-XeNaN2(#Rx|p0}#zL5*%B zp({fEuS~Da3`MRB@y(h~;bfzl{*S5&*y#fodyX%aFvq%q*`dVgf;GSPd7^rYi6@h( z;z5Bn@cacUnK=)k?Wqr-;89h%4YkN0-gkZ=1Qk`7^D z%8DMTf9(4cC2ND|{bz}|jKPxPo~G{hEk+XBgf*h~X#71hj1#Gv($ryL3m{TL$nzlD z6k{4aSw#%D(CPQXxGM0S2pVnLNQt;fAZ}m@^jD8}n=s54I;V1VM!ss;EsfA7{aU1>z z3ZsOvs3+8#$u<3>oF_s3R(67@SZOi;T|!FSh?TtQq>%VQ3@lR5`{Fr%^e(ck zFXNpvQCVQM+k9W!ui=bN$VrN^Bvil|4>)Y8Odu#iX>b+ohGy zTX6{ge{T%g=Lz5MtTn^bWe1H5Dhb^vw%^|d43M(9y&vcNuSsA2|7ucg*8@$pRjXoC zQbAazoU>0&d%U?TvJ>=bDF5pODfR!moygf=^x!_mzT-Sjq$gWZWQwwp_{G6xl3@71 zw@~)qTOj*vvle(sKsPflBN=xBqpRM*-CTq*{f&ua%h=~)ME`J(a*2=0>fC;k2*YJ3 zgfYjR%;Q;a&_|J3aiDSlTC8Ik&m@zb*AnSBDC}_cBzpQuZ6;9sR%G4C2tjm2KIoc~ zH^8UAYtau>ER3po==T_yUiY#$R3@)iVox4iER(abEG~+`=dMw2DA307EE~6PkOVEn zuiTBTvJZF1m-SH9>Wjxw^V0#Q&^W6{#7|2aLiE6g)(YG1NA5t*rh$y6^t{oEZ!h>w)GB-V2^i%Xg?w)N@ut^_Xt+}Wz7SE-jT4?cypD66P`eJcDMF2?6& z$KUi1y-k)#$x1mX^cKQ9WlSuF?!)D8mDX+T`_aRpE|YwPR@2tr)pREGSRXvd_i;yg z0bw8$oZ*mM6e{)awCKU56G=&%h5}S|3Yl!Ztm?qdh}jI!vpTPd0;s`hQTJE86VP3; zQtzwkn^GCq8a-2s5RBf6%5?$Pt)>}4HA~@h=}oQ#34fsSKo7;DEQ-WK9)*vVuc_a6 zm0xUi+Dkz$^&hDIe~p!IB?p@aNP-PIv^}r+EdzBqA(p(Go3}?pHf8VgAJ@gqt6Z74d*%MQbXJE!NtgIp`X%lbm zqMeY=z3T%gg_+TI)6p>{JXkV2PY}nf~*@A;jr0>euI3fbHLqLLFBhC zTLI1h2E7Lll3cJvtu=oZUxn{8Y8?80R}>fwut41$sj5l2&GHifPW(mzCNI;5ZL4{o z#IBOb+ma)|W`VLJPkg~j;#)osNS$K5d`dOvGzC|Rx+lPflDT z5q#N2;}~=@s}Ef}6nt{-Bo+rWxGs=_^TCUoqV6?4cju&&8n=!gjr8VTK%eAUz3=8ET)8+}Js;Oda&@BO$bzK44?lDm{ zsOAQ-^Q9`h3K7U=AeJ?*dzHF9X?MC?ELlL>XiHQe8-jz|@d!(HWDp1M8X*UAvd7X- zUHbg?4zmS?=IbyDpWwk-P>JhH*ElQw+!m&V>12SVd=-iZ-~?G8CJ1*6M&k($i&Zy!$VhqqNxHjsW&c20xBz|;C3B``u3Sab___wKK) z$KFfNG{^bCPwh`Kez(DwA>xr)W869Vy8S2#N6xq-e!ZZbLL!wI_=*jah&rC$gc5q3 z*JJtbbfLcw2lqgNm!HjB<$ufnaQy$h{J$e5;42_I{y6;N-xs$ZB@remY_HL!19Tu| zGci%D7{@Y22v&71$#I<6#<^a{i(t$99bhu~8H}-1rdg(pEn-6{osp6M&dRQFZ2Ow@ z2thOhGJDx5vZ!u?@+kZ8nr*ySZXxL>g0a+mS#9JTaa{u6smM#1b|rVBug7rwbY!0W zs)sa3?gr5ef_qxC=^t{AayXV|FFtUTm<&)JJ%j4tkEUppny`w+8mEB8%~t$^BNKtk z^>uhd^$Hh%Q{*g*RluWXYzE0UBGwc+*t$8~Mo6u^75lOHd1Cu%;Q}FZ_1I313G`DI zS(O-OT6C5AI3vu>h$OEvRw6X(tq#}%VXocRF&qq}qHjleIzKKr{`+gQBPK)95_**O zL6gv>>%9DKcGdCny=~k>^)EX})I~CE-#TgYMc0JQ{ERFXf`ajH)#$5B-Jn{0(bd3I z*l~2a#S?Ikwfj9);0D&RJ4|=AYJN@7ke|Tm`8+GB_bf@=2=nKj)4Hr)5-(a#eOC0w z+X1B82pi*h6n(IP0Up|6-f;_wIOeJpb`|<^L8PVwmWfKU&q= zUK)nL9@ILW(agjN%qQJ4y$Wn|!K zIcZvsd?s9w35>(Z0&haHPcarg>w5@azXRr#i z=>E0&-QM}QVSlYt0+YMVVi|sT7j437;^72##T!4*rmdp$S#ib^t8?YjTkQ;`XU?=p z-$-VXFU#|ss*{wytM?_v$^|oZRdQ%%)9DScV#rLkTggS*A~y+J{Lxp{Eo2)?Tt~K+ zbl=5@mthEkOf!hUiy7yFCd*OYfT3+-EW6xmnYNqnXAW~XKiMTOXZ{rfFfHC$H!l64 zl3cKW5KQ_Fui|=5(tv&sAC9-}G5UzFg#T7~xCI;PP4G3&=(y_6GNkHqTkot>T~$Ou zY>tigCs9a0E?71w?IVi4;X~C#}T?oE7ZL)jYxt2t#7~SwVi;YiMElmdhRr;}7G*3Hk z&8le__|OFH3jOp9Z0Z%YUDhoWe-y~}$hJ#U@t4F^e6P{==)ldIY3!Qd@_twB-cDz8 zbhuth*2Y}-RV6?Up`Ht4oJ24_JqOW9Z?@t^r+%og!s^eEYDz;9XvZs+nJ9&SU(?D= zQG%5mfq%;A)(cxxaBPFrLFK;$?$=Aq*A6YlHO&ySauv*Ise&mFArE3bZI3nW!!EPL zz<*z>D(y}m?5~<<5clowNOoO>1a!%snVpKFk8d=cAG`gIB+lCX#lw{O{ldoCxj$@5 zX8kF@_j!TX4MPv4*^7P|>w456hS}Qco`0HpI^Y8Ep>%8*W~U-r(x<>hq5{orQMdDb z_MP2LkBb)cF}r^e-WgvBvXk)j@!__Cp}+HjFWbG2*umO?gTC8SgEn=~^GDUZq^xn9 zK1pc#Nm4gd-c!6-<_N6O-)kdWq-67^VXOXth(haGF(0kTL1}P-QUVS2xYQoqqG=BX z#^>@aFbGIb>00!|rLNzRf@uzNy+8Ft4SE=I0}KsRD`QL>vI6jt}P;kgvkR4eRIC}bVyinaT3;H+S*F_m zqXpoUMbPAkMrjQWQxoNz-muyFrbN%iH*{CCycaLMe^ynL=>TY9*f~gYi2!tEMd!!o zj`s1aW=h{pLVUrJiWD@z8r;QMmk;wq+@~J5k}jno<4zqVd=Y!yuYEbl0ffF4PB#xa zC!jRL?kD(%$|J`dWzQ73WCVPF9MClg=G}A5`>I054f?pE0QwL?t}yz*-B-rR;#?EGD&V?luK_9hzDeRq)_P##b^F zGHp*0In37YX8Wk~Ii#4_zYUAu5^;kPjIgvR0q~cGN^>V6H6QBbuK(?8?r=Zkgh>^P zgDT-qHDoGYvgM5?p_%{SoRI(83{lPoMJRQwR463j2n(bnUnObHCD zX*j8IV+qC)1%sJ17exgXS<9{@T-}rV?n|VJaY~nkN?zV_U1`cXd4ry4^&(3A}GP?V1|6%aT$25{5=XWbqZ6WLFrs zUU;y{@qqMpz{Uf_>gu&Hi7kvJnFgl?3E6UClKRI0Hk*72htM(f-u;!{a}?}W6MZYX z9{9WE!~Wba?x(#|Y7xkd%Ludx0wHp1oUz|0`gacP3 zr5JM!>`K zXU4)vc_K~8bD-h-K?q}##4fPqy+7f^-vd}?$GZhAXS#j8SjH}&o*;QduFJQ4G6$=M zy?B`d98p7m?@;V_$^Jv0fq%f!x`pztHCQVsF9fW%v9rgZN(V0duj*!WR=a!F$L$+p z-EHxkT;(cGtqNPINy$VlLqh?=mzFyxhIWM!NoVHa^_6=pZU%>S#AhRiUA7r!M&2n; zB+lJyzFS_^=*N_Bf_ailK+6knq*bcSCf(3x>HIqU<^{X((r`E)(Sl7-P!xHbd=$Yq zh}!F1#&Zzd!@Pynn3$9)DN2p6Rbl+2VZ(9?%=}qGj5M5-%;bFZW@DVZ%g3o+F{zvU zyXP-2A``poej99`ZrATnqCteLx6NePLQ#m%NcW--m5tcZ*l%0rc=7>yvRZJY5GTK3 zIVstywy?|DScUAPz;O8N1q*#6l7d|GO6YjR8s@2iLOGpiEDu~KOdTnXTeqYq`+Vt@81h! zpC5tj$*k>yz4%L6WBVrGIKrP~mf&lrllQ63GJjpXK?d(XjsUi)@ooHSY7jfVsYdQ> zpRKvJZc5qTSzQYux2J@$>-0TBoZe3p&0sIVA6UO6MB}_d@EOHcPF#l-Glz;AKWb_7 zU$`ap^H|pRi;sq%(l|UD1#>@Mx4*-Tm+6&w$#K36RWF?w)O<<#mYWAGw_rUX)eAU( z+I!23^{;i=k_uDUZw<{9>lyhahoNrvv!idZ$A?J_C_&>8W-a=3#6a2g`mI1;tZbfB z4lxUa(9n*=z=S@WIykdpgbzX|a0`;!&7gFMq@Ebig z&lK5EAp9OQj9SekU!UrNI_{eVW?`4_N*_MQpftk9a&zH~q65^uTG8BG_qh}3LMP<} zweFd(&LX{W@>YH5f(5r-N8W zBw(lQbw}5J**1Hsz8cf6ScjBbgTt&3NYrM$Dad&EjA-Kf1MT4zs)p8A?~_IlubtVQ z6zby2^8=G!^FO?xb#6GFUXSxf4OaaA@6nI0r`;b6T4Zk}Ow#1^byLCjM&naP=x4es z`Mo`#PsK}TB@4gqeV7uS+bc{+^-@5A7sD0BPycKycu%%o00F1H2OdU@$gL&j-{0Y>8c*^Ibf-a;TYkqyK%!Iy z7NyQ8j46i^bhK5U^PD$sp*+mrFfbsIqoEnC%YS6s-)9O{hy&`gj$2k8Zl&sQLC#__ z@q1rjs|jaGl*o#VfycFbXlR@x185T|+VFUq8Nsp=GE6Tm2?~Aw{b5k!!faGXu&t=Z zcX!rlq0?sfq7QW;CQDw)k z@`mCpTJkL3dBFFbVZgEE9ov$N#2*}=URci8i#5MJ(+1y2Qs<81)-8KfcHlyPM&Bcl z$i*Cs)G?20gFIL9-;ekvUIGcdx5Qt2+?(b z*|2R9yJO<~g+~_{fS*}sXT>oK(2K!ZmlxrXY7GmuTfXWWUz9&z$;CP=A$s2Gc`K^@ zyU4N1>UOB`@h1IN^NrUVSMhNYKh^PK=r)OIwR@EKs>9!P`{4Rc=^@oU70u7t#5x4E zg;q}?5}dy829kT^`}#>I&a|M|irRP&Aq@|RVld);nUaES==W)qv3Gr+T?F36XTD`i z!zF3T`KhDUWikHx!c#Y8$1ZZ{L$L#vAjl9>u@op8X?WSH#y_inO?esnIBTkFNDLzI5jgLVxT)7 z`8iHZ9}QK$ms-sg#rq)-iq!DFW2cS!;(dJ2Ug0ezJcj(GXzWKvDS|Vil>@z({75$e zb4Xs;g=a}{=Vf}it?Gr|0joLW-Ah1@*{vgSDY|Y9&WB87Z_PrcL>j+!%)BS7t;Qwa zu+;s|@`Y49JJnbD>faiop8vb)KYeTds8bc-OP_JSJq1clBj(~E!|$`mCz6`(3hVcN zPtK`S<-Mo*6zZ$H4F2|!DZzE8Hj+))EZY?{^oNpcM=+LQ-I0${Hize}R7^ZKY7 z`Dcj`j;riZ$4lwMzIvpnN2$&nj)ztOyKF%-M=+rWlQ2%1+DyuYqrVw(U!1M`pSM3l ztc=T5ByFASosSr#AW93e^&HYbpo%P|W^0FJKC9tRZIIekteWPV;dy?lXsq`h!br1e z^6^>FQ?w+8be`W7(!=i@dIy6fDHm|}`1EI7wDM1Wc|uB>h0$dU6D=gPyU^X7&Z>E1 znnVb3I)|DYwlUseuoLYL4nBruz1gswUv>d^-j$cTn8JetBUHlUpz}VHtG7i`<(74z zxH{+q?a(22o?-MQr!?->{G+sV4U!M*5!iCVi$;PJb-{^6s>7vQBo!Es5XQIG{mKQ; zJ-q}Hr_!;k(GpW+xxih|%QCGtJ?+&U)iY=F%|T)GWOq=S==AtFz*O1(vgFmBc3O56 zRhJN5yHtiT({ZG`O9%9w;EV2&jaPjNfm&{xF<2J=oC%{rFMQGgflaRiai!(&q$#~GUeM(`fJ&xq{b<(_L|AKyQNs#2##zL-@KDghhtP6%K72`XYe zFB}^-@vPi(burqfFRcHL0o2RbtE1+gf^v&Wb+PWTMo(!T7(-{HOo{z3gsiZ+m3ZW} z?HX!30pZttPd~Pcm)l?&L0Pxazf|G9_pHAmwO3ypl&OtEaw$1bTf|fM#E;uXYb@ti zebVXKpN6_#q7mN8z(lEsz84-MrSC*t*(6P^VUv^2s~Naay~1;C24Yn{BA5dF0-l0; zRZ}kFgK}ahy|Sn`?+XM37|>91U0Q7yh%S%{Z*rtI-J4&o){XgNE_vB${WsuJyDeM2 zRClvonA}c`PlNIo#?9yDx#8F6+M2EZrWp5|jyv$+WL=Q23&R)uB(J$gPyaX~?m;;6Ph$fIU%XdeJbnJa zry)!0zu?xH8}>+++M+jPO%tpDOs3->7;xV_zt)s{vG}g_xgl(`K@B1SS?h^a)6jCo zig?nQSFWx_i057n=klkh8fNB)UHlSWJHH>jYwfJeVq~W_Y%{pO$WE+SMnKvRf<8u z!Pi!N*8R-`3FYH(9MrZ(lP}q}u;VNW_V`C;Np!@ui`C$I%enc+{T=Ukd5#oMV!jNlGySX zn{%4T+UJrwh54F`04dBfB7L)8Pk%HqOP!jBz}??< z^iz)vvH*kNnz8bBnKlj(=<2bd{VN{#T|k?l_odfhfaYK#{Z5eFNniH?DWWg?!-VCe zmz;r3v0|@3ZB0_ld7)zV;msZ)51B>&_-d2se%Dl_0eTN4XLRqQ8R8RQqImPg6Rk@Z zU+ROWfJ(rw{j#37DxBt&&98>shsIUs*{fxzjVP6jZlU7Q^z`tJR6Jl{L)51MHdu&9 z-Nv`5xc1}oF=tvPU)5Uo#uKGMGK@EP52!65!yfL-j$z8d7rk2LJ*7xGA$Nzzei4OF z%O0n>G$xRFWnVMxLSS|zq{p!2dM0(N4BkZV$E>V+uT>F9Tx>idYoO43n^Hryuue&; z$HNV15}Wu{RK<(CN)dL!ZRyr`?7O!OoxJU7+~uI^Hm69{UKL%WI%B2cPongB;Wh>F zFz7o+VYe%{(C0a-VtF_P@Kbd`{g12%(I!6T@=16g4f-BnOx`LQ5ScEB#~z1i^zOWm zK`Bi9;qG!migXR4z_O~-@GNc-#W^}o&l$&Cex^7xkx%F8r`r6p%39D8?^vYXyV&># zQuv5Do)IDD{blSmUlWq}v3c0SABGp^tNezjd$(Va$u#3QR#DtxIAo)m>|0dStM&TA zuk8()h->DRpkRuw5N_6+sQW$hmw2{w-_XkZ%1yS)D6&oaKo|D1%W~&$b5cycp?=l&DmRr+YQn{%_;JkjKtZE0GmlC9C&CdtoM$v=sx{!f!5c)xxR zL?(Bj_0atj8&ag1v{FnHCLN6o4! zi0s{cdr)-cTYccMa8KgmtEgNu{)MpdkDxScl_8ORt?FJr9E10n1u~YtUaI4z##jC2 z2>hnZs(0)FkiC*a!XvhMta@vCIHJ48SPU*UDbMvkk^X3`;Go-B6r2@&LtP`%Nf73D zCE>T$3SP*HkBTKQF1>1uO{MLD@VapM7g4sl zp2mcyhdmA!&7)MUJ9hi<(Bb+O&CeeU9F;V?Ih^THHkT~^Nm92dv!V0?vQxgc57$b( z?OwSD_Lvo(5W{MkQ95Z87|Lk64M-djD7MiWaGT&dwEd%Zg51^Q{%UvGWs}23Jdm=G zTx1F9jP^89(_0hb z8rONdBJm%~yFoQ6w1wnUp#{VD@Q0nO3<0A!yBz5GSN1w zcSEu^isCrtC<|@>bv=r7T-9dwHj0`={kpH+#E*)AjJF??`CBFXpNcp|L^0aXvHBjc z(WS)`-6lbEqPf{#hj4S>Pes4RzoR{6$5L_ksyHnIe#9gf|2wZjIGjpPR z@St{vp!b_(buY?dR+xcK5?oLH_9A=a*KqMH%XX1WDq4~5sNY^hOJlN$+V6!7ftii0H4*D!YV;nqvz<@jAlBt1@$|UiH0@MPFKK7AzHFZ* zn-xM?(yO>#Iv8pQcC!fVRF0#i%Oy0*i|Z%)w#QkBq)7h5-s)y5$0XSivT_&Ua}>Dh7<{6xN!0B$hh$!|Wtt=oM=zr| zL^Q2XSG^|c?VH+xG6H{x@{#VF2iRRPwxu=f$>?93f=;eC`Qn0qB_YQO`%b6a+?gTy zZx8+{Rp}@p+_|oEv01uj5Qw!SvS1OkXeuDOBJ_0A9Mvl|TeazYJR@DvLnMJ|PE(Wf zxL!5Bw$q(9abticX3977m4drwEdA;jQ7!rihbh(1u-Si*;Ii|u*F(H6AXZY_aVf9B zJbe{#=Ju7!LX7a#BvNJ6l4`$L)3NEC=l9s*iTTUHu2MD&sxoB^0henl*HHD;{!$N( zzhuz(QFGNVhDOsL-P2oFzV1E{q<(N{Ykf0+malA|>yeca-P9in1C54!kK;H~Q*}v6O*Ba?OBmT4 zimMJJYzvApJ9~;>L)KTVM1Gi28HMq8iC$17x4!_sjU!0UH`EiWS% z#LMwzGw;~S>T3AbBaNXKPumP*I!g{iSD1jg1**+vAHZfKQ2J@^!u3;EEzECX9XUh{ ziMJ?(bN}dPdws_zBKMltn{+5=)-N7qMx}^JcUB@6aGx%H4x@+NyOtM7g$&D`3b{sRSPvVrvvQLXIk1~l)AqE_c{N7$);AObhVVp%gy);EKa z&I+eNpYqY-RFU)&2@3n?oR68ljJbFH=-cI6&t=x1B_>vM zK44&tjCGjQgJUBrDf{(I$*I}_ix?gT^y3;72K)1UZEi*-{&fFJ7+~(Vn zxlDLrBg26VpwCEt48V9Qk87v4d{PV>Ce4b{_+0zXDleKjnYh zBd;s-aZTQEaqz)V=nQyTw~~yk3rEVZe=ICkg5@$(ncw?MD|-<($D#wc!8K3r&5C?K z>qbFY_}wjkigT(QK+t!^7+BBKWG{@CIU_&T?{<9179i#e9Vudawv`Gaxt_SOAg+{v zs=fXZP44@{2%UTf!}=RmK()IrjgU$$ZnbMnkP&(I3xTqbhVS*EfHBPmb&y;bQIlW5 zi(-KGpCt0KZvzh<0? z!ch`TwU-Q{r&3>v9wFp5<)n%?iS?LlrT=N7KcbQSzGj=hp z?&y8-0!DUzi$wly_{-eY(_@%J7M2LiJ7vu6%O)vnJe>zr{pQwIJevDUKHNI-o) zmPh^pfgevpL-x=Jf!#-2NKd-3^))Nzc&}v21~<*#2mtc)BN_|5Tzay z_|%F-Phv@zG4^~hCS=c#H)X%l91k9jA|{%qL%${dRcGX4js&%ZqYWV7w_u3WVj;h; zAVbubT5O+}6}>@&DQgJufNCF7OBNNYim*FqitG66D(T9wJM~kL40^Mbxke5ZaIq(S z1dqqU3BMW5wX<8MO>CJ2i<8mkxE{Jn%+gzyX%g?j3Fl?pjN@y=CY(DBzcuZo1-6P4 z0Kivdf5xUqBRZjT)-@u(qt+SX>B&B;6-4#Qv$p=j)+Nt{kQ%)gwp7vfV`L`Dl*UY! z4YeO}sRXq;M3`I)-G2xMY5kyK2Gf63`rx~x$%}F)uZuSc!>3>-P_}eePuas`732*B zRTA26FfDFw;!HGsn6mT5RQwH*#Urn0ZG{`y7~qU#4Htq(%vG6wP>hfoE&L4pmNP$o z`}`bd#qEC5tWeLHkj>=wLtZ=PRrA2T!pqMMH<@tcYJq#7HX|X|&|}lDr$7GSK66lE z(t8On%v9cP)zZugjRgMILg!XGM!}E}oV{rO6RLImK_|j^Xno4mCjx22rV#A2P3&vt z1Ylk?{Ag`PBEZou_DKW0uM<&O{#{rtL_?lj;KL4oT^eb2&IPy1J7vL1hsQTYuf8|D z+Cu-cd%p)LA&WQa1)Z#`D2Gu{9>yr}80ln3^bWns;)+fm+R$MZoI9#mwW2cg^J}uo zh9s~;@c2e4-K{>cUnvPp(QS8=E^?NZpSq8NI^a4=dKOa!s1So{;a*t9BU~$cI_0N9 zmHaZr%Nv$Y%EiNuPhl_GONp3;NUs4r+I(etXK%SimRS(I_C-EoX*pD*a7|`2TQT7( zR9peo<6>xhLtXP*=6_d@C4sTiGbUi}DwXX#Pg@bfc2#5&uVL~LnJx-LZSzV7>FJah z+a=mBwVfmIK>;md&R~4Y;;_{kk`&j+EAxflH* zE?MkD%rLI3j#=!aCxu>*_Ys#Qk?xQRlIMhqh4t&T1m{gAJsEGB(Hk6Tu<`GV+C$L< ze>B5!HCkRktBK6^$lIh6sgMg6(~gtR(k0PNKaqd}%a11?)jvzM2Y+l>zvBb4OBRmW zJ5sRx%ewfex%gd|t9@1QxhzLcTZ1j7q3wt2C+f?nxL9GXQNte|!`UdTdpiK8u}b_* zGN-!0L}9AdD=sD#?hk)}RlF=tJmJxTo@8|QYDexgvx7ubK=KigxM*m@_$rpr|t z{d}PHo$L&Y*(SG!z#nzlOTyAT8&N2=Mqh2HT%3M_HXstuhsRH z4&s{(b#%}kQddCbncly}e?KIN>f{I4_ZxUXk~J*erwd!vy=>||3}8t4JB%Y0_MOF7 zH@81cgymVKkVdCaR7_ju`_n92bB<06>QPq!B35?ove(4mr1LzI);e^POuIWhRMPjJ z%@Oba0uDj(zGoF3fBYZdx}k@-ULpZC+%#Ge`dfdi)5E(hU2 zZ5}#J-@EM0aN{fAJ!~}|=FD>fCSc`z-@bpwc?tvNKySJOqI)B@Dr?}q5$|HM^gOI~ ze_775I2!}~+~F||GY34(lI{UWc^7t2bXlScQqyqQ_vhidYp#PUs(%QcZU%PUZ8r=y zV@H^F7trC5MND3tfeDXKKK%rTDJ~X{I6@D|<2dlkhkb!OUT(7^I>Hf z4|Ap;)Ouiq8|Fv%KI9xr>+N(Mods`C9| zXd`D_2de=anZ_FGuW+LwZ4klGUmoYK$jxpV@bprxyE^D+87&^!jk?b-ybBmD^~_sDJkW1&5bnH z-YPWduPaehTooeVlCyaJpgr_AFSSXOgs9ZhC?!#P%Jf&_#H*AgS{uz^E~`>&Nrt9) zsPC@3?tfBc^;c!2WY|{i*Ba9Q&D22{;R}MR??bM6*aNS%m|Ac!P*`sN9hu^Ni88p zO=+~Amo>Fni zto}uQy?!k!r04@Ji9xC=+wvM`p&^8WB3LhKyT$x2o{kZ1wW?6ls3@XcY>_vKQ9mLv z-HFQ|Gox-+|03FXgDi65`EoqpWz_qla1P!$EEa)_EVxHI8oP!U#p0aQTr=1H+8W6c z@5ojSnSKQ+pbEBpjy2ej!O_JNCnz@FRE|5KEYsRJ`eB?xL=zjXj6!gh*SPZhb8e$j zfa#YB3crD4=P0BilWy!-v5=K28=~-`Fbh@zfxTjIL0AfwO>tA}J8eUykuG^iSprltWvb@->- zzGAvYzZ()0Gpv8mS(lc&DDEXuielo?q9$hjm^j$wwpKNZ$r8u|oVFXi$%-tdpJ0|V zJ(s8hFcnbfl0hDPd{MuUL|N6yzB8>;3O-}dWEN&Pku3S7B%#?ip+5VFt?Fj}`_#Y4 zP6|*i)S0wL(l0U*NS&cRtx5^6yK?>HlIAiUh?QRfqRMb03>F}7KP<{@TAbMc9_J!* z{VpdXP`m;p%y4z}?@HL}mGbK(ft1ku#0>RAVBY_{pBr54(fXIVa{E+2>%>9e$!(ym zE$Js8FV_!%Sg%EZT;4MzNJ=DY!HgOotkkbUAxjg5!FszZk%S5W6R+6*nSO;UuCDXB zI&w$lWG_AiGonnh)?W*{}=lqGnPNE z?yE7FGvL#g@mcu|%-DP%5TgLh~80mBUOcRjGgp3$fX@Am@FgHkGjY0nW zQ`t#Ylme@$IfjK;W{4_zR7*i9AR(1NDHuu>N*h`ICq9z$s_Ku`1Pp0O5cMlDLtORk zFXR$LDx{U&^b0GM%B7@PnDMF(@@M6DJQAq?88v~mb>9A3Ca_23jLM(?{;JgsRlk)Z z!BdK+vj52LO3>T=Z#D@UM7G7fq5q2xQ03((lU!hoD~B?Oo6{41`8h$c`|f+m!FL_t z+Q9T%`N>BcXH@=NztTs}n#xaj(y)%r{8WA(lFh>=&&LGCoO59Z1zgjde)<`xDx3}R z<)qN>TDTs%`Bvnn!B2Y+Ih1*xd9aNfzx8zabJ4{Y!Y4mm7jXV%gw+^@kyA!U*LDsY5Q1zWL!E?BDOx|;6VgUc)M;{Z{ zW5*wNeAsOBP2mm%2RGscQ+H4d!UyS`a**bJmZ#eqdTe07&n-Un$14+gG2XuGD zE-AWqngaJ^hj0hQ>#xC=^cRn~c#Q~>Lul>}CkT7miV&)QSk1`WkN z(b8q~^8B@ijrtk!%AY^l9~4d9j#D*@D>kM#WY zvH+)5sGL$4g%FgH{8|7e{3_bh1v=UjE$RoA;#Da_iYnS!Q1$hnJt#z9{_d|5O+Lk@ z@|#Uj9p3R(sqh%1N)&ry1*$s7BN6+bgJ?cvr7;2mTBv_Tx7_|BJe6Ov7BubuLZBr; zvT6H+f^9w;E}nMTbvI0IVh6>caPhPgCMcw_WBaQB1p%Yd5jNEiBGR#5A&2Zg_WL6$ z6}cMFw)*FfYE`QI`$KFYI3v(x7G^jhl)Tn|SzI6h5Lm%u*2NkGuBG}{))ZQQaVc_B zR@z^X;E4taw~A1EE9eNb;`~37}x1H7)kW^Q4Okl{%Z%x%J1?U^{W&okc7xbs#t805lT{j zNL5vaV;fiXCp2-e3;**g;T7ur4+(oNtIUW;R9F8hhdMYxF&PW0?hV)Dy_)joz*#mt z4}`l^dH%+%{9vfqVK!M^^Zu*0KWSV~w1N+0ic=hTgka|CU%65^7!wp1;+(yqLxZiWcR?L7d@Bhk%PzW2FKPy)h%b)F_NDPvy2(fC?m?5fC zXmUcPBTf_ADAocFQ#vA*JC{KsT!}>Uky10UDGN5s$YWL;+Cb#Bjw&!iXnf*{Cx$2f z2^UXm@C*vwK~bm^rN+aI=?9E8>lbOP{79S5T$qTFz`4pqUo%KrBPnxQ%>}THV zJ)@CimUra}b4=Djl>`_;(kLrG^}B$Wh&blzgb9qYz*xgepprypr-^yd(o8>Ts5ai_ zGb(>BQaF`c49jtnE&(E%1#tUJKwkfVN&65-LdcME=dy^axuWe#Nj}hcUyJ3pFROyR zuKqK9l5A+)LV{$fEPP_Q&$4FqPtIIFgow>146&D=w3W&;G8Gfi2DF$epaXKtu5b8U zKUEOUE9%mQi+SS_f--18X`Lw;ZH&4QiFpyC?9~5EZ6-{TSzfeO^8hw95ovhw{?PO+ zmBB~FSIbYR$w%zStZBhNONnJw^SKE5qgK94x|f zfYtyfQ>8dLAJSI;SHmUBfZH)>gO$WC1#t+BH*q8}IYQ{&w#YB}oFs?G zbkZ*X1jdlsMlePDgC^Edtxglg*u)8p8D3faLyqJOBzUFMwU9{r{gDZiWR@2`3o>|? zRBYa7V*BSH6M-AoR(yLI^0qYn5*Nlt2~n}@w*+He5|z^Ocy`=t1G(?PnHATfdS{WziPKL%U0nDK z8#W{yc+kPvP4OiL@H>V+7^tQ@C(fuo_Q<2*A8^ORmF+y>m@8;J82mgv$& z4(7ZRgYF}S566y-d1bfDIylpz^=z}tPaB(cHf$s3CI zoER2bWa01;c9EQa{sm#gsCUEE$y398ojQtpAs)QRwmAg$@j%DfXPqq^{M&Rp{yFD_uW=COVvBbc-?fY30BRn}{MOrp zrJm>_ivFk11>H)RgyAFu>-*Vfo<^It#%_wEu@ZMjZ~MpLW|appzW3gHIJk4gaQq)m z&}X(#SchcH-e|x71`l#L<2C)R4kIyva_T83!|m7lvDy<27y)CzD?PjEGyC@22H>#i z>o9o&7g}5Q#LBm0eQ^2oKBwVaiAQlp$X7V@{74*%-XnCt;fwrGI~$WBg9hTvkv46@ zzP;@Mhg&O5m|=qAXzZY% z8-XXDcs!i-$Fngx^mXXcty?$+ZO?-yr+zy%Jb(kd@5OGU4w$Iluh)LqDZaYAUWkh) zOi<9JRi8f5#gkS{dAk!92M83M-~U8DR(9-OPU0UBU~edk}zel{+oV( zMBAc_;$>D~jwn4z(N%fx*M#BZiv{VvpHX6iUeiB%Dn8syfMOuNXkqDKS z^b?j}(KdpFl>x7)EA}6g<8UQPHsrPl&g>w^_x=9JZYB|v<%>d_5r}gVWJh9xVrRH` z;u#5tW8!v)9d^LP7|Ij}%A6C=319K-W$;8+Ct8oG{22{}G0Q7fTUjwQ57WBdzDlZL zwf+gsWEb^c!4-7C6((jfKv%YZ4UVGy{>bo>tW<8U*U}jaMWwzhnW!H`Gy49ZJgLK2 zks`M$Tr^M#{)~=%F)e81{h67UUwJ$fGI=s(;uLFQ%IANEO9UCO01-qwiE{mhx0pDX zPFs0NWI7Q0{a-1Ym0P)7T}qe?5XpVi8X}>=*UK@?C>NC9-XDkZJ`%El< z4l+JI8*o!m-(7%!`s|RR1;YZ+%C^MVYntuPAXdG*{{-ti1ms zk3`WnAp)P=AO4FFn$~u;L!2B9tF6ZGYq?k#!UigCN?@!?7>M!ll7O z-^Ky(n_ve8&Y<9B>O8}Q2tyqEn8|WDY4Z9v787QAna7pYp8~QlvSbq!3W;!`^fVK+ z*eintrm^^Q9FtZ6D~d~SvlWuyl&n|8pn`1{%c)|*^P-fx0Fgo}hW(unKQ>Q<5Fp~U ziL%NQs0RFydlE1BzrsO!`|r1ZSbkZow2eg*ZWZPn$^8RBa+8sOo^F{3t3MEuKEV+W zg4lf|mjY6l-3oPXMI}>m^?P2w=>P@4T-$ay96Pch!M~tKs%7_i4abEw)Dy5aW zVNy3XswGmjkuuk2Eu?sIxqiwiRv~44NQ!$POeqqo^~;#rh(wGEvAQ9Ry;Cu5HVJJ* z{YShC&WJNkNh09?czBLJ;+Nk%i5Gx3ps7uVR(5nsBMr^4Z zf^;%UWF-I)MgLQ@=mC>Tu^fvKYc`XYlA6l%ZiyNlbu&s-`VvZ5YIW61vQ$qd-DQ%c zxUBq?q#77XT?ligv(OJ>ZsGC%uly?7cv4%ITB0Ip57kBvyFa}5m>3mmOqvrM0<;5V zqv6H=3@J%_ZBkJhDh)!(sy5eYU=mfx?Oq{DaZxHG0POdp9|o&##SV)07}PxH{PV)H z%V2_{iG8NTYKo%xCv~CXDLh`H2x+vG;FLh12&5;-vq0fwvXs&!8&#k{MP7b?cwIzh zjS{U$ZPNcmZbSde?0^_CQ3(-Ju0BKc_OzOaDDTu{P)nH7q*|I=rnm@!BIB@FS@XLg z$uLz;8xx*~H01=u%TH63)A+@>h&N41hFGY9R63K|hfRvmGNAQY86+Qd<QHs(DP6;v&&=JPVq-jd48n|eK(l9g`K!J$O_SoZ3gmdBF z|Fh4=$|~=L7hWW+wBpagQO6vKLD|{Er59g}K~>ZM6IXJPKJA|^4&I~XpACSsQb zSHf?xWltRLxiij>SV9g`?2AK~Z|;u?3asGYVaM%cz?|BL!WZkg7hD)#f#2Bqa9H@h zSOvc$CKqtpB@Q#bDE#&Q`@=*`QnYW|KCH09^5NTQ--f|&4Gv#pB4z1im&PQ-VPUn^ zR)gLz!&QB+2zOuy#9SEo-C@U_WF@|}*xOjKbPCQq8!=*J=!A&^u3n-%4(zYPL$cv| zhXXHn!DY~5I5_iM3_Abg!GFkE5v|}}W42bUVJ$vDV<%#*ErLmiop#J;H+mU&hLQ{$_RGsC(FEyKOP?^9wl#<4G*g`4R_C zc9^Gq;6wv=ovgars^PX<283H~z8Skg+J)_~qSmGVBrX9X(&f2;roNRE2{1DwO~5)OeJAJ$uc zeYnV4BGM@tIY^$KM##i#JPyeoaQhwMntuIoCc_+I3;6#3!wDy_riH1`8GXXTEGt|y z(REgjo;_v2A7;x0#od^UXoHEfefI4YdhNZB=z0CMSKzYh>hRjDuZF2;-vx1gNw=kz z3FG0S?5(%n6q~sV zuCPc0pT6xPb7w0|g!#?x1cK=MNt#M|v&c}LK568c9fEmfTd{m{QF=(XuN_o0600RqedMuUXty94rNW}Ek`bH|FyuvfCN1Tdicg+QUP@|8x>JoG7Zew>jIgPqi{$z+zQf-;o*m2f?~%wu`C@*+e3NWGPP6s zzqUlGm)Lhy1sIYxrtJREx==Avsic>SR8fYoYM!ciCFzOo$9;c5PU=+9kmPDI>SqLi zeShS1ib4oEb@s&%W0x%SRu*OwljuNz3qZjsf$$)ZUMBV`oRpU0J(>=qCdMwM$nTG8 z{YQ$(K!X7B=WlxI#M?{p(Qwo6F%qdt3FY6+et)=Sv0y5dAuq2g7DfQ^zC&6m73}x7 zWL0J@J_ib|aRTAxr)<^i`m_46`>rZV>zhIqW0D<0Fmb4?#G6tx|NWsBWSkV7t{?dQ zq4uY`qrjs4dHX}TlrJ@W(ytiFD?ceI3pplux!|mpgh(kf4iJ}2N&zSm>HZL@F;I&9 z{xF_sh|&o#^*=3tdP3R6GIVC;Fr6UE zJ2-jlGFzCh$TIyj*c(66(8bde|9m2>wZ@uZfA|Yt4!+9eisdI6|9r5D$oLc%5d{H} zxCEielsBN>V5DJD1j>Ma_UlZhto~V5ES^97or&cE+zF83B|uH4ya8hzBkx8HGC&aP znxrPjt6%t|kQj+xqitx9oX85Up9WsGoHr}KaZ-N6iwKdtOeFLCE9#PX0;ws+Sp=Yf zi?Wd}D?d4NR%QjpgjZrDX{bvQH`i~ni?VUHo%-?LI8#DlRBgOWyAES0;v(Zh4QjHP z=f%OEC|!St2}~g752{ZRCA7002M$NklRx^z}g zh(sU&sN8>rT+;KRk_O%ouD%9mP+;{h55hg~TpW^(fkZ2|)`Rjot0yQFSB-w^Deh77 z*yV){uiA&0-NhPe%X}u{hJ{4l0h`)uD;hiiBYEu7dh$+*eQ@-EYhSI9joTxTk zu^yt0t_FF(J%!7Js*bwyn<|$<0Oj(pp*;wslxRm3FHi=t5T2*dI4~x#NK!37!J-eM zMGUg_?O{?h?twEX#(eUT_`ct3&ppL;kv*e@Vg|oIqSpcw9ZiWvNd}QlZ-Ph>jYXtS zg#;!k89)B$<8U|J2o1t{43n`^pC8tp;gA1v_}ibieS4X3xZ%bdv9k z#1+TfX|Xw0m~Xqy*5cBL_DsP*`t>*j`S#my!w!ns;hJb`**!qhInnSWTm+qe{`na5 znT}QMoiTavC%8eHAFI&63)f?1J-2XAoH!A9SPZ*o<)H8K%ZFaQdSSKHDmbLOH4dZx zQrraHJ?L(@rkXQs4gCk=>0uZSmOlHOv%&}*mfaC&K%9Bz8DY8QSCD%8&2N4K z*E^FTdKR3MaTCsw=o~J%=z{PVb}LN8V72^wT0KL*U5jAihC2~>$lYU)JsSF5b6xlX zuBX_2W}khwupA~pjy>kM@b0_Av zqhZ)O-yah^&pr1%cG^rqIcURlafh`CTy4=E6em+!!*z<4a_qi|UErI;*x1^pg7 zYH-ml6VH{HHubyk;KL7!Yq3o>*%WS~jzInLOq38l8uJl;?`}bRJRGKcH5qejSZ565 zTVt|nrImh(9U(_zGHX#eFJ&^^aQz;;S>77_mQ2?1OqUH{M_FeODi76WzyGYc|Bmku z=^s*4Vv^UTfwXSbUPs2P|Libt7aLtWoj{y3Ci1S869}3C|5|wdOGQUnjO0S%S-3yD z6uZCPftxd)xwB=@9(eAhAqVRt#^ZMQLo}Ao3zcLyd;Uf)XSm#JG%H3BkN(=KCYjl0n!s$YnFlzaRSj0bUVD`I1l-d6;z-1=qF*nB{(o zLdvL7HiX)O1|em@zr0_W5p}YNi@&+r9+Aj?hwFHXIs^giCE0r;M~2<8gJRZMu!G`v zhht)S2h*%^CW`=5;q@hWBZ{#QX2s6HA~dU1@6M#LtFRToEKgSE9VG3cjUvAS?C7LH zUAkINL`$ky(9GE0Qz-N3*g7n40J>crC2A~VfQo+F$AMH* z|JQwgz&NQFvzf76NW|Y?LrFbn_3D&~w1xICDRKu09mNZj0aNjQm#Sj>|A6n0qCL!3 zS3-bfynAU5V7VV64%u1$H@-ih*B@Ti|6!8Bh`~DClBt6I6om7yBR3YE)gCxIj`ZVMQsDt8Z;q2rF~DVBulJq zPbe?VthAEey0S*Tc>cLyBvcAR{-s}>OuAqg`0fTlD#c0ILGd_tQ1FoK1K{F`huZ0b zG|4tQqT!i(z1g*HSW2c?7e%C5z_ghNkB@OtRz+0Nx@5PksN|tMY0=Yax+A`g--*$(N$X` zHkw{)@m!9Tt_h4kXhghO0Qq7dG*pzRW}qbze@G&)66bx>sD&T~A{gS)qhfU@Y@+9f ztJVz+NMKJUP)eB`mV&leD=ECBladN3$+yxAJ@o6#aO5i^w}XkY4s&yT&ecpqtb!!8 zMqni?08zZt1ZS16Jh`q2NpdrQCK>AGG7SJh0uw@`qYX%-(iN;}V-c9~0jPPy#u&i# z(nDB713QD(KXh6VMIxof=M{Cq7Dod^EJJBoWb%7QP>RN`LEF@+97#zmw@3k$=;HaB z+1up)Q;HSF*ucD5F#oh7p)>DMg&EY*144BdMH>#smm4sHFVF^HLkgVpo=QPU7Ni#1#HA zs>}w#IHC2Yj6Sa5p}ce%ok}J=lOD+;jVfrQh&GV3vHvIesSue>D39jwj6kC9{v)2& zNvk#^B83ow&V?u^?P5v>V+x#>R4sH3R2FTLWF;SaiwI13rUV4^bi5>65A2D->5s>7 z2gM$kq`(9P2F$CMiOOoK!GMy65@5Rlqd>_r;N=g3jHpT7O~J$j54wH_1N*aLpmoVE zONK68ySjWz_!5%_JVW6VxT|6#EinHAVL7;Sn+Gm<)IExl^Bv>U&&P%@J{u>G-S%*K zvjE)S$c6q#zx*G4^bz8zGWjq+b{){CKU;{kHVHc@c$hTnj5`#%b?FBGvR%R)bIlc> zdUzK=ImUiERtAu%yF-V0u~VXxRtJn_c|Q91pu#ff&plK1>Yd zyFeF|lamV{!&T6i6DP=_$m|~C)=mwEMcOZ0V?cf0j`PZj{%P?2L;Y{U&Ixd_wCJJ> z*Zv6Q0_M2~q&rr9mG4Zp|0t?~{i51{FXijMl zt3THN2WZQ$F+sv>0n~4M)Q?;!Gj|M(g}be9P)BoMa%VxvnFG5SOdsuUywW{tH)+_ADntm%&%R40KDVc0 zhr`=%zZKrc?iacVSOz;U7DZp-`758n9o4sR1=bF~YYV^@?vBv9!O^miGwyS=2kr;9 z-Rx+CWtUzmbX^j=FxX!}$o80qwiu0xAnw><`!BTMg1BGWi79ztWk~mbN*P^(keZiT zV-?^~6Q)E6BR~icu>AQ2kiFX|rG_L)DN7T=%VgvfN<*exMY0=gs1F;A5iv1fv)J=W zC7WD}O+Oh3d1+oGwf+g?Wfp)>SZ5J8WtYYs6sKVa1$Xk$e|6HPoXS8wKmPke zWaq!XnKDaSob;Ltu7e4zf;=TLBzCThOnia`1rWC6B5sFC&6o4pN zAPto46++OM$wF>|GkB>%30$VpTS=o%1=9uq&>=rpQ7Qiwvo$6RTmu~ddq*-$F?C^~cg{4;tHEczvd02-qZYoStR zq>I3W%K-^WYH2~anR2+L5lgZxDPd%>%aY2aq0uNV!i+)qwcP|GWmFv*RH^glpBf`@ zW2s0s4TxjkR0YVJm60UcAibiXkrn}Uq&G#GWR)$P_NXYZ%upmlky9%}2xRFrE?7tMy_<(J*to#lW0UD2=ng`-1H#ecmcGC`bJ*0B&(}rnhQ6O z*(%l1>MW&QAb%^!boTyl4Z@W^4 z=}k``6O!axEt*2($?HL28LJwF-R5{Fy%-B@24e^LCfq@BB3(Qs8;m7Egfl4zf!(A2 zY>Fhx8$(1=(pX$2iOhs~`6F|T(gZ~gSD6VCnFU11tC$y$!LO*qxEJ={FL165*PMGmTof#@Z5Y421G6;Vz zglS%4OOc6k8_mqwg%^Ms)qhdessLA8ML;e<{+t(O8Wfbs{1E+B8B$$4-M6REtj%&zNktDFI;1Zxikl+rA0oXyoLB{jWKNl{ZmQy{r zNy--`lNar9ArW59J9#6b;rFM31ORjr>3^xSxI`uzZmR|vM&5`ph{Zx#WK@Bw_8Z0o zY7F|qv?{+-HzIOKo)#7ZAhyXY0U6xnX8BsY(UrZXP+=sQC@2M#eG+l%e+rZ^!>7Ir zTEP7=Yl0;>XQ@2@0XGp&QkMXc=`ZM+u`4fcL1YAs z8xf6=oIa*OWcm$T@KwuOZ4bAt;1W@d5Yv#wHTsElUP~0QW+XIt;&9=Pa!5@AMAh;e zRUw}lyQIuXy3!%6JeZ8dtH}8C7(~tV3z-Pd7}y>Z=t7mCvG=7qtcu8_3DL~xcNt9Z za{Zd6AbJ0B!la)R8Q2&!24ZIPyNp~=p7sHjC|=|?npvsj4S9*I$R9 znes<%Qf>=G76V@12~>mYK_b^rQpYu9sz?&pRd5N=A)Y8{1&o`d?)$@8JP~=Tc@tS$ zEzs&uXGx-hX4L;1o*~~uKS_Cc)iYD&hl2kbzCYY-Pp~lg)rhDKhm}RC(Jt~$f33Y4u$-U}!!rfdmdY4K zah5MufSXl@mzV)olAeNPAWFJ7G*@0yIehWCwSaR2UKU`Z+=IPaHtE z#u{sc{ovwhdF-H2JHeBFZH3Z=Cr!A@<$RfI=B8Pk+|jdSN$Tv+rRQ66vew} zY7tW_mReR;XDCy?lEKg>j`GT9CIChGD|Tc8BiH2v)&OljIfQ#MA&;nopK%M@G zl6E1HKNd0pnyd&gyu=K^jVRt-cuCIY-J+6a`XkzHpp?7-s!BuB3XU<0D_{mpLVJAR zDMU#kvT6AR9|PBd!q~0@g*iqk?ZGNw22sBxBQMfk<~%LL0I;E&U6g~)%rfz6xKoI? ztJ(c$eTKS83sJ7oa3-S?=V&de7?h|iD}oU~aP9NQ1VP9&OvI+SSn{|vNv@@$d=U>= z+z4n`EfCp-PN{13lOqa|2t1`D^Ll*m<3Dt{~^z*k?@FWhz;4|t6e6nw6uu|+n5 zj9rBQAgyUPCn;v4{A8xAq*?ofJ?B(WMxn{n?}S&_6=vneb89-b*owVIlgE@3k($gy z%0u4D(yaegCa@4snA9HshP^FIBLy!HJJg3 zMpl{+36nd+{+E~41bAGv4@zRi)af_-l${h@?b^zB`j*xLjZSQ5StFMfpiDcTCn%Yw zWB}n8pdj^yQ0Z71td|}*gJKMJ4D7kbUSY4j_QDGM4k{s*DODha*%fsllGIF;UkQ0n z@PcmgMm|#|s0bIB5F@C&|ExW-`XYHt<=2-E+N}B!A+20)1G3gn#GWRrI@MO?Wjd85 z!ITCeg$tMglhyLm3cwUu>SsBLaRW(|U70#1bxBHOc!?P#{gkQCOofUPkQC*&dd^6g z^edAud^tUdGJJjgSBeJyX=v}Jzdt6=49N2S{Q*O~42GAOL0Nu7n>gD0qrX3_u_z0I z_4&gDb1m!FHe{W|K2fbtkq`qF=M^Zr$_h^nEXWz_CwuWu2>tecm8OfJmS|NBV z3^D?piSm=s+D>?4`7^1mT6M%M`m)kgC@sqv6qL;h6H}aruy}#a^b5r6Kf6bbma)nA zWCqCb-{bq^X}EZzd#`tfy%SD5%6MMh!HT&v#-D9B8|B;&n> zwCl{%q6~n#-#<}~fO1foEEBKV-#@_+N#a<3;bXe|y|7zWrI9%49|;#vyTZj&tJ!7^ zzdP&*IV3wn$KR4DSTu<;8Wo>Ib@~Mi4GIkYOec%5c`b*8RWle#^8AaCr#TF|PNTfbZZ`B^7 zv)JS#ugMHRNNSuW{V&#YCeK);PY68wJvTawGj&z{l8?a;{{FyjNj!Y5aHciIE$Ana zRU;EtF1h0(pR-#6vC)YV6X0;PQKn@jX23@w z-fv?*_ctL(z>D_(ANc+lGGuU|i_)Qkhv6Kwjl&6;ps?!Kdk{GXh?OZY`(U@eH%h@0 z-m(%iV3VZ&7av0feNleYvrbT$phW6zt3pMrVslz)D1w`nKv|lGw2-Wn>0c#Et!hkJ zeBcsgASH{$0E!9qcGR&5BN4bxJmIA9#FN-TvE~|Kf4F#B7H3eDJSk^=Q#rHnht!3M z&0~s0F$|;{nBxURc2g)U3unTHLat@@an?SZzNFRv|*; z&FLIv#Q@yYj;0M|Mn+xKk%>2~7l8nKqh{J^W1>YHD!k;+FOy|)h7%A2UbXs(h>VDu z)&t3`QoWJ|nApKzeGW&-ktS2%h$thEGO`>j>ALnXSv7oyQaKqx!&A*PMUITrFdhp@ zq$ALh7CT`=DJUbbXO{QPjL8IML1 zCVM1j1X;3<&-h&^=$rOU`GEsR4MDEPWl?yJVA`%!Sq2IHaNsG=plAnw^yi(2L$d8` zA)_!^78@LK4$6LgN(AX=U_+H5m+MLdXo9pL1sw?e7q~>I#m359O+6$cZELS?ljSiI z!e9N=H3CS=k}5X~iLM??g1#b1v%LZeD9wglR8)Eq0To~AU3zazhy7pI zOlC64Irr08@cVv0vil^H>6zr5bKfU-k{AaQk}_9Nqp@62XW1lP} z=AwKic8RQCbqA~p#a#Rt?1QkH0xN;!A=$?tk3+IKL4hrvHpM-fTX5w@m6)0HkR2Qg ziUs9V6Oy~w!y+hVOi@S0n=2>=`vBsg{xET}G@;6*++NhofD_cU%;1G#JEb=^P)t`) zZ&V{`JuuV+V@H_@fi)0aO>ZoVsD`R&%PN9VyJt1=3I^PjEL<1y`Y^Rt25SV1i7Dxh zJMNSf6w@TGpuhx0dj^GDDUGo%b5aE82h&)-e~28i#&4oa8i617EbVbLdzWJn3S!o%9& z?;mEOw;aj`F@h`)NU3He5%xfl=7YDkR`b_1i?d*398AbcG61Ftrow6rB$7}l!Z^~5 zvX-a<_3Lp4#W6=8i?b-O#S>RhaEmA4U&;3mGtpZ{AIOmX2#pmnvXNA;pb!!>BO3yB z>zE!=??X4}uyGhg6^?ndnmH>0RQftbc$6`Ka6EtN22xzNGB-&jSecRzMyFhW0}|I- z1BYbG1O*S~YX*S}IG_$wMfd<`Z4E@d=RsYeDYdacO*sV{hRV&+YM=`~)B$Q&6`Vm< zvTLk6O?}jBk0a+NkrgI)I#Z~c;E!c|QOJyb_! z!0J+3B`PM4sjO>?LzvXfH{Bd(9NdQ$6g$c`Pds}-lE+Z1ibGzFy=+sI0=B`G(WOn= z(?L_|3nHCAa%5zr^`JzUcBzDYP97%lvk2Q*E0`X$k!YVCsmt(I5*L%;kqvBrRH4@u zMJ)A&R3*rfsU}b{XANdws$j5A4t*#rplhkQvlT0<(DgOOh-Yfi!27vwv>^RKz6^t( zq7<`|P`tx{)TAP-gitR^oK#G;p19!MJ#r9H}b87C;dfi0eHx+NX@IZjY)qUY$iUgh%+6ny9$2aABA6ccmkDr{LX z3quB8fZDGO8>{X2566`lnv`{)S!hgUHm9PV;kqz&`F>~oDBVfJxdzchLG25cjZznJ zAlpG5dR=v~o|P*-C>W6RV!Q3P$C)|f(!L+xFKvl4C?t-eTZB}JD<+P~qwGy2)+DSX&ar(CSBzSb zykwOM*CkrZbJsqqMRgHYISQe5(m=qmR4XM9_5(--u0!?PU9BP&Jr$%970TmUVbxtU z%AThdx=e^}fXBF;+9QK!lKRzh)hE#HQ_YKN?LbM3evR(DsY*>+TZd^ zJs<&^eY0WI5}V2cH{lML&4TFixKc3Lpd!B;Q3mErK7_Jdp|$}fcV>!EK@~D_OqG4s zi7Zj<3td!K6pC1_uY<~xra+Wi#h6isHkf@Yof^a@X$@@gv}anDXHbM>QY@K76Dl$$bE%ZR+}aeSBOmDA{^%k3wY3VF9_671 z-j}Q&qQ)97dZPBiQ|yzI$zp8$rlELHKHwNlCHwNoVHz4-XuplMsL8mkB+X#wH-we0 zAR_wU(*CH|#od8aKg^uG12csv>9Qw&vx*?9p2jI zD6v*r%nPlwrj3Ot&*XmXFQfm*=Z9=Rhw6;982=a%8gf1XkoiU86mwU&`>(&kVp2ZV z+=h8!yv7-Q0L~s>@q>hkH2tqOiXO6AjU^M?8*$oxA@yXLQwn4r3uEn$KT^uqihf&N z`-Ulc%~SiJ=$b0luKl(>#%?7o5VlnKBS%Unop540{mj$l42mPLf|wZiJz&Z7gj}lsI}MeH=vo8a9nAnM$-&820gV3H_C3(W|_g zM-3)D6fVlv8cVcHo*zcNT~O}$pu})IeRWhD-4`vz9g1s#QYh|j!71+U?xjGXloFu0 zJ4K7TySo-B7TgL&i+hleRmcyuWrDCV5<8xHUZkL?-HGyn- z7^>s~q#4P2f~FA9{FQffYu3@%y$gFHp6^qzcg{go>*c zNy#aQiS0%qbd4h$d+Y$W!7CVAL#4;F3txPOS^KLVCm@kp?=@W4w@Ber6DK<~q?g5) z?x9LYJTBAfd1pLk_$|}0UHES)PFaf3G~vh-Cg7bE7B9dB>0G_gN81oy54p{62s@^> zZIYOzVChSmiIDtoFa8+X%KCd?#U)+Kz@i0ljQcG`@tNs?jU{B-)n34t-FeW8eaXc< zR-}}^y>X`o3f32LVG)WJ^rX%S-)4!3rr}fd`|cC^=^YBv(*##1-wcOTNxl=-rKvr*t=Cm|=E|vZ ziV|6JqF{*R@VAmMNU7a`kHkb~)pmuaM%huFfLtbvzm6yvwVKT$N|p;cu-I9a$FXBV z0TAQH^IRmc_L?`4c^kZL=HwgK=`59(=$OqK#3G;dPQfnLPnV(S*9Y4e+PX}c&M)L1 zxKE)OH;UpjXgY1y+9v|vLBad%ImN>p<}(GOg9wu+Jnu<+L|c+QBkBt@a?o5 zMJ`SgrBIp~r=F-@J~jUnGVYw44xWAmnSmEk1ikG|g4e)1^t7^6G9<@g-$vr4?l|Q| zRR{dSFnlG^B+tT&JcGr6oU1sxQJTojce-9g zW<4~Fl$oa!8zW7??ak9P_9x81UVUkbBQTltXUDIGd(ta6(kZWSfVXIfkk?imx<(Y2{DMWJU@$hM#xocH;U0d zWT;o0e3kE=O_m6k(c9F%nTEgNqF0BQ$*v{Qpt7-Uy|6W|^>Mn(>p)LCERsnNsv4R< zP2MpHG~VBvx&H+I^Kv*TEvNkJhw+PN*G|VDuAu(BeWt5^m^AdHT&;0D9Jr36$@cvRzxT&lf3h?$tY!XovDxVj!*@|q9cckBKjExgtv$j^;WR+!?P6LwDdCWYiE zGA3%oA$lnPSjoF?SF+j1S7 zon?LBAo8m4YJj-K{8D-V`xPcuyVt0h6#Q2t(>GYlGRBgAthRZ1OMiUfPPamBva=db4Ge)s7Yk)^Q7&zid`t-L=~eYWJRXlbR(C4#@0FWR_a*H~l;6s42S zvzPEi`~PEwYP$VM9}?w%6aJr_lJH=CSIV|$e{1#-)Z(#1;rZg)7lbOaA}os$mp^Uw z&=k3ZJ|zP4Hvfn@Lf!0457gp&!;X{xpG^uTEyH)tdtvnS52!9yIfw%DlcXk~%dDfCnJz#2nU%Bkj23Yo+5%MVdY| zCJGg%{DF_uyonM{!dIdQ$qOQ!xv^31kIWqQQPz57J)SM%7TMC-W^_A_kgQGS zoe=(Fc`D+mIw%)@M>cO-;wP?UgjNbC8zv70qt ziTB5@>j%v_7o;gBI>Z(3n8YpP#e&vBhkmd82!*kbqF~b%UUT<%^^-0xGDgC0&sdeu2_%|rnTL5 zVjEVD(6}6ed?B_{XQ}TngjZ|QjqX#L;ly**XHz7W$$gr1iGLNQFB7hwg}7w)yK6|w zJoC(zIvw~ZdMPySCt7On_{kt-+w|5wA^wz&BZG_S-v>}EjIurt`;)|&GMtU;%%$wy-pmL|AQ}dv zob;+r6?xpBH(;#I%~nR*5I2Cjrx|)60~a>R zal-W{`I;m`jj4(_i5R?;u^siOsQHaZhfFQ#e5h3HN9AA&OYLBez`~WsiZ~aXMuB@B z^qw4^2o3^!AtAv}YmNYF@-jQK|Wv@Fj9WoM4;;BDI0+hO|P0^jDp{B5!xnmj4dm zS@s9*`-Lz7Kmj!sO0`wV>*d^|F5jb+kc(pa*9?h@QCKj3adFGwwb8wEDpaARA_Rgb z#@~Sl=_QysLeuJwW-TXUHYsT4UtveeezA!ycmFa#rb#?jo_N{qv3>(Ioq8HvEZx3r zM#cVa_et7)_5s1HWxNiZ>$281+29+mr~~{mm<7E$7j_kRZOvQBWkf*G0oTEAQ={0k zipfKCY%4bK373`s`-H>*%#l28>&NXnfnYpZj)n8B&-Ry1n|nh>tkhK1(yRK2A}$WaT|)cTzLTJm4=vL!qzNxW;9_ujz4rfxlQ3S{j@j6PR@sn^vCx^Ca) z?KU7obcZ`JOG$ixhM93l(f?aQioXquhbHH|Wb<7K8oCJ}oR}xTlSexb`F?9A(QdVYKgf)>HCt7%|A$ zF%O|en2V6 zgETfDR!~*FYj!Oa<^HHemwG7mProVk;XdfN$*q81@6Q>PXiH&neY(4Z(^I_pl+~|G zfEMB0x|&?l;>dzjiGXh{)l=*bS$FhoqPTgLFBM<&QcbC3=^ro#lZv3hGX+#p`SoE| z8K!OhYYsU!Xrn03<3D}V0CfeQT*>C@yasWb@;QftoF?<@UwA*BxAC>~7uVZvhexz% z+RW=ub%-j7BdWwChZ3)?62x;*Xam!mHXmz#`&Ij6(QmLwQF|0-EgShhGV%&6Dhay3 zJ@_~=jQyb>F5>7wGZXmW_@0f$Aj$LHypx~LN*tiLdW7XHc4Oc1<~6(E^y0@w?nnG} z!u_zm6N^{b_d&~M9sEJ{?tIkc=?dS+^D909KR-9~!>opg$b3ET#aBudHTuSUg?QP11WxO= z@zXR0nb_Tyos}_z9X0>iJ7t)XOMW)X%;xU4&l0W?D>9Q*!VFT(3}mDZR3jgo(2v(j z`HxwzusGp{Lm*$P?V;a_agxaW!X?XxM`y_qn6HZV zD1F<*QqON$dx7P0u97MUm&`Gqt7m7Z<(aDbNUrJDx@wj3L=#}mSgQ6u?%7sUoX z+X3ymgr0Y0Y$uuDW(h&)0Q@?+uNX#2i@z4jXq@%t9=! zrbjdcemK9@-?e z)FkGdDnISB6}{eBwvX69Cgb+FSup+)%F@*uQ1-=S$%C;1^x?u?W9x7lC1062Qk>v; zb|t|;u{bYD;01@A9(E=aumMb8RsUN-%@o>sGwqDa-pwF&~!mf+dx({m86e^3F{Ib+u#P2fW>ym~kpSJ$KtE)?E z)~pjyq^6|&mn_Bp?ZWHN(^X~XaE{HGRv+*BCj0aXln^hdF_y`qpwFJAIK6FA@1v3( zz`p{+%W0NdEq@oT&7Vv9A*!m$PCN;Z*XvOylfL+b6E4 za%;jCtmR`51g^0yY>p1^7FO3!o(Y9^?wED&^$BU`kLYfidkee8uP?iO5k|u-wN&H_ z^$xMwIB(Nj@Io6Me^58=&o`AIweEI&He!1>Z(`WW{8c?z#mpOFGRn=GOq|s$Gscn2 z^fpUYthI0bTXfcF1YpIU1KRNe>H~~Fi)#4mnMcu9{!yhvZP0A^-r(BW3o(T6_+!RQ zKIuKf=4};i(PcaIyDU-tR?>YxltN$L*W~%!VW7$gD*j}`Qr+P%i>ew!mM;#Nw@Z1o zz|wxo*w$+8`Iq6`bX=?*SzD0@uc(sj$D0s*b0#B*v263w+@LiN^$JHoeBt4bV%z~*|1 zEMlpVZw}gAT9!+m!e(E9Vu!1?h+edKXDHr^ro=_TtdyskgQY7|B43C*p}Jr*ZV>#I zl*FY!SuB6b5-w55#gmK-s*c)WNKf9}Qaun?A`Px)z;{f~7g0k}J`*~#)uQpKfD`_~O|K@C={ zP9#x>F~HI@9k%$n7GvI*b%JdrE5y#}D@)dG=Q#)J((cwx@MAl;H~>o6VpKKytDK@W z-mFvWC6y`GI@`Fxf}fSoq$ z#GWpCbPw$php`e2&U6%tkn*hwzJZ|djOj2fyILma5s1S=QW7k`AI2LHFeTb4b=$Wf zH%38_}fEdmbc#EY?_+L3-(x6v^UuboM`;l!g5m~EqE z$Y4IW4Oo55p;1-gwGUzkbc4T{h18fvkPXPK9+7E2YzJAczs9h;Y}re2PFR~F)7(|G zx3UAeOsH%44`5%+v+C{1EfoCm zvyLqNxV8e1ejb-me^gme7gzTmM)vqHRo{W!@snK=@|*uS+qF)aK=LK2C&|pEvJ`|6 zc0k+g2N7#Zk zVKYP~dSJ%VW&7(1S(a?xJvz@R|LUIiR?b4lKh z=sF|h<29%18)UQbh!0Y-FX_10_Pz_c9<836LTurt&3nyf2N@PBLGUBM0?NZS_r?Hc z^I4N~aya<}SKLV8&TFmznWRhxI(DbK1_&g5B17qQvRY1776AR$&jLv|}Rbo7O<|OQC!vDM^W?aPxW; z=-Ase>cS7RO4a}i^7S?+`k^%=f~t+ctM%rYM+iXVu4Nw8iF>Gq@xB1J^4&fN7U78t zMab;7+oGYh`(>jOlhCbTVQX*vo6%q;dt%wu+@5-%UEC>G$8Q5XR&US&KTUO*K+cFU zoWapeF>KQ&2s0Ily(>_wOi6lZ>wgGkOl!lol^l{=yX=PX_mdX?gM;X%9|*dJ?Go8) z84LlEm+pWHU-b?e1=Aka`98OJR#6u}dvLkR3w3_!I%?&DhO6vGfl*?^&4#-?i-y6# zt)UmRJAZL(v8_1PUjYiQ8ljUi{8-FMl#9(;%fIc77Otp$`ua_b`K2{UI-S#90-V=jpbp=K!#3bEESTU8(mNniZhndmhd81(0$mGS=StSC?vDMz*yO>rL z-nvyJ=RiVKDKC7Feu=w^rZxj-Y(1~T#m0I*X3-w#kY;@SW}oPf;&`04ICal9qt^>i zFcaHfoBCoCw+{_j#^Y#2gYzxb<#b5 zHr#UIT#vi@bq?`*9TSNA|F{4=Ul-z+RW87HUvo=qKTYNZ#XD|Us`sN`XE=PYz0=Td z|6szt$VNP#9Nz%fpIVF#@+A#lH{8nhXj?lx?!&lH7kBTsU8hVX=?V!EC^H=IJE^-( zwm@8^v%QtGig#!*0`o1k^^1N}%QilL85L})UM+f}2W4#AHc{%wjO0V&aXA7L%Kmzo zwXp8-?c2J~!`29H`{dWhL5u|g4N*b!SZ*v$sr#@VmOlZpoS+jAbH<-x?k696p=7xP ze1_|`^}V2~>Kc#{^`E|uQMb7Bc1mdzn<`WBUwykwEt5Ua;Si>?uOi%#vrJ>d%!&*KhV4Z}WWlY`b^S$!$hvnjF6PcJH+BmA?R&JeQ7>xKF z2|T+w&nI{!APZ4oKk#BlTT`W6G-)=Oe7$CvQ~wBwd@`>&g%n7@TA99MUiFK@{!P$B znC^6m5b=WVH@9m6YuR-VfSQ)uePANI+8esSANPGXESPd?=p14+LBTNQ76miZ>4fRA zhfh7m&cdZBlYyR=eIiIFov=y|trulo#v%o&OHlRMnlxmR6O{d0=EBe^-{n!b^`ieS zhP5r|-3pyO1p_9XXr&H`zJz>Q3yiyAFt)_aP|0?oW5mWEnU7Wx^+-D&{)&b!E%GTtm%42u^2+&2)>nffmP9 z?_}24V&JC8)pvDy@tYGlSIhcp*;LE0gf%HtarBCs5`1H|{$=o#`}vZ4(1W7IdcAv| z3Aj@I3==50^}K3sBTAz_4%9qxPufJN**d;7OH}q^*C|U*XhlmQvCOa(}i+n^pko z+}QpFRqFbsJh313#O)YZmSke$*Kxztcoi$!1Q;q^W$l6>$4`w)bJ`)tdNu6N-V>?FgKy&*q-^mr4gPcmD&Hit3A-Rx#^`SY|i-N1tN=Uwk*DWc_5 zavJNUt{ccx+jBFy-o{+NuBT6>I1;68b91G34xYlHq#_)(VRxW?C~Jcze0S>YG*G4j z^%1(FNw!YDV}n1A13*~-DgLe7{BJzGysYQXgXRk*%Bk{AyG)8|9}J{-F-8(gg!KQL z6%L^&Abz!P-&5^niFe?`#@%ijIfzRf$m4$f=b^Oo#=yfqSe_R8$-b%T^OLxpr)pCf z)O?Mm2EPzwtS?q$6S_K|762rj zde4@sUurGiDn}Odj#}oW`#Gxevq3snP$Wkg(`jSDgueO2eiO}_bfH(ENNb9nyyyk;u|lj{D`y(blk&NJPzzY+;G>+e>TLiG0UgB z71$_)Qy2IftWW3j+9q4<+R!{cRGREt)^Kd4Jzz6NE_m~|w3Qp!`&)_Ds*K4EK9$ol zR+_E9&u)E}YF>Tf9Nnl?f%YaG0g^N-(fzFJSDq~ppxBuh!RxZjxQ^_(0ZQ*MEffzX|HrGU}6pO{N~- zpI*XeSGJ@7Q?co{`RTqKeO#>s6y$8y>#4u3g2TsdkM1Ff34GI*{qP=xhkVXY=%ZT? ze{y{PV3|r-qlJIGzVm-6_kFP(csH{3uoapi@ukGMEK6uwd|SdfdNd;qGMlQYt`Ygh z->K_gQS3GA7nODE$a9d`X-x*1k(HFF_)!FPN0p6NcHsIkC`FYs>oC`~?&Mo4*x;gh z)n}SXaPeq+MekA6Ci0pn~9)y4{(Q?*2-{9MnDagX;pb<3bq3F8jaW}RM+tlJD%v%|^4E)OedSb7% z9M0WK5^48eLA8Bs`EZJcRso<(j>Cycx<$iJ5>}rG=<#9GeH%KSPar^Njyh(IhK4`! zClDeKn2mKzjL2S8*M3D0_w1K~?efba6F0Ba_ai=re$d`P_&A!rRK(L#>6bjjUGxq% zvfz2bJ1?H87aB!ELx@SK5rBo99a=b$(aJ1KWyiaOG~Vo#a)T%y=dm#mHjIt^F^b9| zVOm)CLIX7{G*rS1{S_ORLFQ(H4pTivdi)d8XKRXx;uObl)>Qy%v*MBECm9nQb`~(A zB)gmR15-Un^2w?$24ov_cI~lM!6Di>)K>+bwzF@%__;D71!a*UVJTL|hXU?*HAi_X z58!#g`%-^ARmV(=RB&}A8#Y`Ja)&nNE(R9#=pU1fU>byX0nbUhHA);R&ZEO- zY#rjHlRH44w3nNh#agD}4Vt7)MGBo0U6~K7x{?Reb|HRZ#eQT9f%#L0owh3z9TsC4 z`;+zgmvn<0^P7fDl|VhPep3qFvPni^3ISLcu_J_B^kN{Qyx9}7d}Cii>iC*P4c{+WMuy-`3|zW<5xl|&x1RnAXJ z$2>GK$JF!Oi!n0k$e@FuPZooCHldIi@9LkTN4bpC$bFGlMUYMezt5lTJx}J*P3H(o zhu}X%@74o$uit})2_!S~(CCi6X9Wq?GiSxG$Q-`N!ijG;Z)at}uX9tcWwWijjz_uv zLN2=L94#e&lnstxp3ex+N;nS*EJP6nIDSaY52W%U_`9;TXRrT@rg=K=qU-eHGd%R6 zJe=HwB9-U1sD4rN`H_Cc&X3~ARRVI?yxQcDEr5u(76NW(GaT}qng5-^Vb^b8{2u3( z7#=?PQ$l%9M~rzrZzBY@J}*SmEr00@5s~C}toqc`^N9RR&~=pmrB8r;Y*1h|TV4l& zO8EfB(C+~cFSmSH0_4oWWJZKSWj~h}4IV7pzBhh7sVu)S%FR4H%FK2ba?`cq2J8Bx zR}+sU9$9`^7})MC%n0m9?Z5c8BNJDD#3bbruqW#@)^V5LeK}-cNx<-j7$o668_Fx7 znN4<-&NHh0u%D_~6n~cUeq{XXQ4aU~QTcmKjYNqX77O|W&E^>~_Lsk3`Lc_8SWuqr z?-&S_UbZ}fa+oy(na}SFdqI#kKaXsby&0l@DkVmo``SSrPYur#Q;FUL zyQ00V6p}fGf8ksGf||G&lru~JEOo8T6e)P;Y-zmkhTd3o^TRq-nBG)zqvZ>m82eMm z8wNcfWX?(lL`@PvW95Y89|o?4H<6L4)pr7^!mr`a4~aC9+HG(RM2!O!EdC1`xDE9D z;G0)qRUM(aR@C(M1+ThvKf;9sUs~8Kv^rgaT@3`&d7T%dy_QKJO&(!gppr+*oY?>3 zVCMe+-)j{88(N4;diCA?CEdRaHWQ*XE{Duu;r;&(y$mWHYIsu+XnFEW}D#Ma5QC8XH1Z1T3YRg`M*g4x^=yYwD%a` z%#-@|0pS1u=yYsPV|}g4er$?9a_xR^H|0KSo{ysWp>GbO$B?&9V~`r=eIHuYu_BQV zT%#Q7!)kR=0X6{Z5EXJSI1P7!c!(7+%l?K;v+nag_}FgSgVdy_65J$m!h)eVADGJP zV}O9_rV2h=eqI4VsF5S?IROm(;( zcwX(ys-|^k8rirB`@<7AFO{Qp7cT)*V8-~X-%Q7W)&S0lNAe7t-9Bq1*PFWA?t?eI zAM=MlNZQP2%H#*Nq;K_v`Md+;d@04c&ba+nmI7?$W165O@muS<)etYE{jyp4a8U60 z`WbF-CVe32c3|&O+T!%?yk7s4Ye|s#?d<1+bVF-yIJ~5sXDt|o>dNaIxaHs5*fQZ> z=h3_6B}2^xYkg$yGuQfFAt*xoaXc3OegS)`KIw3HYUA|k)k_(*$5`y#dk`-FY$igH zyz#EB^$#=TY$Ns})GlJ{O&EPjOCe=G;4uCpBTSUF$Dgv*u;O!1CVYZEf~ z{U+j1gCCL*$Af?5c&M$s+*%%&nR+NE?l8gASh=G;V!1ozP2AJ<|J&v6m%{97f;+JL z4FH!zIH0(N+2G+iL$%Y|c8lkY?K-t1*fqg%y`fq1n^9FS@b$(gsZkp5-wCrtu9Jh4 zM>}XYk`J4q8Mnj>WvdtV6QvnMkT?>{N7yMGMqtTRH*8@3Q0kZu6q^4Cs*;X zxSn=C&^ORnupN@U{7Mxx>HJfRXlv8v1O}l~^DwhxsNpkZ-uk*?ehi1**WXbvo0pB2 zjD+BDu^ctmHr=o}GpoIP+T9TgDE#|FKkhG)$lSL|rXL^rUkE`1`^F@F?wRE@vn>ru8iC;nEooC8*iGSL{|Jx|@^ z>Gr3ByKQ@>->DR_Jxm%SCq3ch#v;8>)9i))JdJ7XN?pfgIiP&lUHQmx6Zy!ym9iYS zxQRO9l5yU@UWdkx(du<&YY)VbQdRCg7IV+hbRzLF1PR@d=B3d$_kRJz>pZcWL0@UN zyb!f~e3Zp2#U5`o7c_*`P24~>$Lbz|{2m^U_}*`r@g3Bv2OsX2jSbC}-axjW^LQm6 z`7z3a{^DY5h@1rtd_O*Stw*!y?RsxJ6uL6AJ&z%gzw48y|Z)%_Evg>$LWg&vEoIHIwX38kr>+_U&es&Ojm$TBNb{GHe$@JSS%<6Ov zhH2>DtitJH#3}c(akQ;vL&EiWYu#t3xtM3UO#v&4(IqI6Mav!jkbR=y0F-O#cWoRa zX>|kS|2&L;vKP`~t27u`M`%U{!dUy}(P0q{LE}Hal%2P2X&7jo3CCc?6bb#IU9Dgy z9h2qRtX_oKRD_A_(gfn2@x#tr?@cz^VeJiH&TNr12hk*)GDU)rV(RAaf`%bfqy6rg z5SV3?hjqYtUr^>~*+?Q~+0HmtZa1ZfBK7NPf7}DClG(S#`u5f}b2<~(j)PMEOnZsX z{s#ywp>F=W*as&a?TxtO2#*ek2jqp_-&Db;_v(J*2zAs`;&trBA!xBrYEd=Fx>4$%od#gC9=Tz5xRAhq%R8=t_L{ zrjRo34AO7<6HSNh8|CzPMT9hb2HG@i>@1(q(U@7ln?M3{8D1)gach>|yjw++refL4 zy41#{Jp!y*>HXZsJk4Ah@A9jAw#p>sHCFt?BRlg^fg?Ev3yXElzE^nDG7CF<-xNumx1Du28U;Q>V($#{+#JN`0YHIr#f`^jl{{%ZT*nB{B2hu?yNhC^xR zI9&&?-%(1loW#_eg05ErEA_nK!++#UzZy_lZO$%fQ8sv-4O{Dsl|$u+nS0DW4o;QJ2k?C6Adu%EE{uw!bU zxB9|aHs#Wj259uLRXy{zm7tuz3OBueg&c_ZG9`o^O=9idP*@-p<311^qkw)-<;&~b zMm)Gs{pM_Thx|FHcenm{Q8y?!V@CKFSl7eIsqpS*iuz*5NPE7&(c^TfmMxl*`pBm>kc1XSeDrb& zg33M|ZcD%5d(nOXM!$TJ7-!Sfke{^_v5hykfs#a2#s+%{u(5VDhP_Yutihx11+sK3 z-2bfKMe=RTJAA~Ql&P3k!r2ogPd;ezh8EPxna&;Wtcvqsk|xBO}VN#;Z$RoA*!gXf!!{*Z%}mC~<` zA%FQX=;GSyI0L@Gjx=a`pzqZ_y=Yss%|?*Ja&vdEQ_A!6a(|fmcD9_iZIpfK^_Db4 z>A6Xs|5!MCk*SrP8~C!&e8mp+ z2B8=k^I{m({l)_1f4x|{qc_00Iz6fKFy?d*wcM+VQKr}M?Fo(!ruOLJh3=B~e<1%m zDrq=fXQAmigNRrJAS=4K8`S(*Nuu207|aO6T+#>Bz=o$7crci$afP-~*tvmKpJ7 zf#nLGrx%Eu2Gqn z^ZsHJuUh&n=`ScfJUN+=L4SohU&>Jj-R;qpZ^OV}jw_=(@RhM^-DPxh30ARsk!sR)|cUvSl!lyFe-csXUb1mF$98dBFMGm|sJotOF(Nc87-!-PS#9!5++r`3R zU|1;+&?~?~Xsm06-C85>k`e*>q`eu=$FupE+DJ~C3DWQ&MP{gSv@yO98e6L$kwIjF zN8c&G#3FyxtZ>v)O9gK{*g8+uwki7UnC5?T$d7sc_c`vn65DJLw!K5Ll0%bhQ%qER zA6%c~XwBdeR+t(0-f;WTJ5m~5^<3M4v*{JST9M9)F;%$q_i@25XE`?Z7whGGb03)J zbxJpqjK!ipQwc0SSUw6}^(hnEHhR<@=vYqbYKsS_ThG1MDomJ@%K31wAH~AXi@Jk~ zu_)qY&+nkbE-fIsO4obg*!df@gnxYnEoR}&HI{1&eJfS|A+9pY{tWZX`-NAb*E)#= zGU@O-*`#H=Q9U6LqVS%+dNosszf-^~e#VIG$EH+LUG?8HBD-PhoLL0y}z#?IP=1I1`HbK{Z@wS(bKH(h)oru+b24D zGQ{F$vY~Hkfv6O(0w|vz|G5Sq7i(yl1@p5kBl14qc4FDvkSjTt1ykEo7BN-BS!jpI zB5O(_g)z!Og_~#!QCK|@Ejm~Xh9SwdpCbZnm;DR zL%qiA0)|=v2kvw$J9cU>+gD~nR=JIyOJ^JRd*ryNBT>enraC+w7c+&(O<_JUtwF-) ze5f9=I67je{Cb$x1$SWTI>F>?#WoNrbpa})sq^Ccr8Hi_^>T$7zny894dEOaA~HSw z-HfQ1s9D{uq)9n$TjK;)^90!zIncig0fG7t??EA$Y%R>-GA!Po55 z#{YLzb?8;`u8wgG40O638le>+4nx@kpZ)kxxc=dW=7MZ1Mn*2k)xD(w%&w);@~MsC zf)<^z91P5D06;mmwsIc(;4cHAeKWj8Br1>iNkP`6r+b%812)UU3n6x5p>fqu8d_&z zOT^;VvE`!vZGL>TRiZ1ntoEVAileKVt8huvaC>=| zn*YKf8Jv!NIS_j^yBGdfzGkfJ;o{qZ_IQA#x#!bClOQ7P{4~?T_TqODezY)3Zk`Lf zeyHY2<~3jUI;{n9MV&X#0y1V4#XAl*Yno?L{`Ag&1HA{s<-5?h8R~nUC+}{XgBT){ zZp60HF_O7ZgG2`h+>`KOXO0VaMzuV`%q*)jUuvNEFDEa`J!SxL=)MwQP<1=<3OS(j zuI_#%Lpj&AzMBlg3y#&V7bL;izk746YWY&sXE!Uy=u4@+g3twT82%g*Ld?GD8x2_e zE9#c(CVKgDYkxo7k}0r;3cR`oN^x=jp6%ZWcv(NudLo7Q{Gn(5w=$g*@W_kXol`y~-ds%SVFhCGgDxb4ATJZw90X}t7a35VQ1D5J-|4A2$FcOK7u%VQ~Xb(5c7PVdWN}Iry8n8WOT)r zFSNi7k`igqk-mPP|4ALj3Op6x9(?ShB__TtX)Sat6|N0)-8HW9dou09-hPR6)v2k)*a)j-?ehB-MakB@*ayO3u87QVVXS-bf5w zm<(F4EJ99qsmFs*5b6U6=mKAz+G7a81;nZJpXd0`&ov~L)3#y1W_wj5C@6TNjI7l= zfbEC-PS>$i`zL+WTzQS~toPG}Mi0wGwpw9&iJ>wU!iO2Np|a~Gp4tu^(Sep~Ih<|` zQVLZNE;Iel1-6Wg3q})FM%A2gX|`)bONzjEyh3|(U7&WnOTAmGNJ_$tDwQC(6}^P z@r3og%YTFa`4I{8pK8v_af5!87M9h#z!0LcNj2EHWF;S`EuNH9Wg2RI&6{jyqL0c} zX}wG5hjwNbWKUD!y)T=XEyJ!L;_%;qXS5k)nS#emi@oB*neZEu(oj0@`oSTRkv}=B z=lv^C=lopt>I&)hC5_)(q`f6rSUd|_GU^Ws{+WkRZK=v%GritvPy2{PZ|51+P|D0G@!!bnr!8nsYojQ4W15CWZ#_6*lOZHU5 zT9R$?tjx*MdX;I;-klc3@Vf;1_aWvt2=Y}4O~P=wVnG?Cv&@z+POI}g+-g@aY@#PF z;P+CUQnV$%siK(HC5K*Mr!0Kw?c{&F`*fZ2L+|W80H{ zDVIOm8y+Kai$!dWbWp-DTiZyoaD1#_+|YrK;Uqr(+CMyjB=$G1llIt(r7dWDGM<+x zh$81NNy^x3aA1SUii6lQhI2GNTFX+JRTBjXBR*x)R1h4peO!{Np+deJ4u98kwFOL+ z1Z_0xgg{OK(BI8|SV15CU!uns%$lc}`d|L~iUx^Z<SP5>yBz!VsM%fCfZsQ^}Wp zh@{`3IJ;l|hxgV)H%9Q?7M}gnpUAKKPdVk3v^ZY0bG60E-~S#aLLN>lF27>>1h(nn zuk_hGzO4QYe&zlECVp1Iui)SN-uKCicAcO&LHvIYR!|)DsZWcIC$Lq}K?fcn+dbi( zbo$P>zJr5`SCU`)^Vj(YVC$V5u!4dU6kI{Ut$(h<>W%LsHryI&-nr++q{g;sryX|0 z#KCx+1MwgH^8Kgj8f=X<*PNJ$0gYQ!+2`H1KX^DEf2n;7UWiVZc7kl<@qrI~KvpgA zOpYHx*J-DoERzh}D(Pb%`&gQ^*kb7dY$tRGzRZlro?qNLYvWBeNgu+m_BqMLGbdia zWX*^7-W!t}*W;e@QrdUlPo#}E-9&yt&u7Qwm;V+0dt&<{=NaLMEK(Q zEpJ&p9e(&1Wd*?tFTIHI`?d5(tc>BA4O?xsW!eGf!z_puB*z|iEXMB7vHIjW**WW9sZ>+W5vs#(-T0H_K{qLVFlX-7lV@<5~`G~ZEziQ=#%vaC{H(|S`6|u$Ix4-!voUt)& zb!F40UFk<)<>DXCIR}$!Po?#72;;8k1NJMQGZ*08zwe!NGPW1^H`+gYdLP>R(GP#5 z2t!YETae}`&M1PavM!~?l)6FmhfbI8A4aKEOGPQe(Ssb7S1QO$z4jIt0M#Gt3QSE1HV{D)G#GCTMW1Z|2 z19E9qlZZOBeO(UY?NVAysVjus9=f8urJH6gUB*XwADuYkEU_|-*>g(K+`d{?;k21N2p&b1_XO%t1U+|{wPx-9+eViv*PEU)~<>cQ|g46?7r)_%EF=yAy-miD3CI>b$V*?ZaIv1BzDaO zl`+Ypkr4Z%rJMC4(E}OEN>$sNt3HxoP54!axur5Lh-sCUx`|Sql{d2 z>QaKRuv$cdXU38{!m71qtto9V2Pu{`4u-Lsj)n?r>Z~*cJ*Bcl#sCAf_z}z0y;l`} zhYYY%MLRjkOx^P$i(rA2w?td(A?o<-bpuvV9F5O)n4sVaiqBwzq8>aV6m=>bF{an- zwJKT3 zRw}m2O*N1jrhsrplV`~^gnh6S_w$9(#^2PbGT240Q3!bZg-a$GN3kiG#(wYhXTF*W z$WY8u@WDX=wAP2y=u$yr-nvb~s#_ooI5Ow3yiFhH&Gbp%E_}93C0%r@=>|SF-bF!<7G^SCs-+0vl z6>oC4D1g_LOM*j?4{qLFkSwNlO$U(xl2PPJ8BhQXZ}Ha>GNxB}$Yx`L9?+9fw%-i6 zJ}NQa%nRC~Y5kyRtXdi`OQswP^V7(n1TfTqHC~E0$z=raTHrDqN@DhK}VgspqHih`>K8G78zrvpuFF=>Z*$D4>&%4tii!7RE zoo!aRrsu@3^?9a)+M1I7@|W|{kw<<_whvlzi6!v@{gAZ63M;topZ@fx_!asOU7#^Ka_YKiWm7Ct-oRV z!r@<#l@gP25b_7Goz9Ls?TB+4-T;|#coF+JUP9lCZI+&vm+?FYgTFlgnXI5VAVw<jnWu}u8`1ANjKbZWBS}- zhsmDzoLo5Mvsk6^j&R$7fcLo z^ZsqpPk;Jjtb993>~liosH47)gTJST+HvTQ7t*!=cU^jE%8P0G>GbT2dvSKiNA~%M zta6zRTS)D(+pcNRMJK@m654p~pUzF^VLK|OU9d`nf0tZ*F>D{3?zr=IOsLG6-i}}O zzYW`I&5iRXX2n)j+%Ag;IBN{_`rrSPSV8gE%Q3n2oJ=6S6K9AliisM&Sf7aT$gQhp zoM~oxbn>o78~i2wEw^%lVmVAuU8dOpA%QU3UE-wxZn~`@v5mvAh)%CnueF zV)`Q{**M9_$&@`Xp(m-i#H{+sPwb!m`q#_kY>h8|>5FNDcWog3z@Tw*;^dQ0mTlJF zf7>^Yc*y4%X*%_zu+0|&JV%ywh1n^qX z<>Qq!rW}7e{Hll)8R0X6m4fVKm3sLcL=&`Ov4Q-E#-H(vs7vehMqKurWp;@LlUNg6 zCMY`RhKt;Q2lc7y2#Bztox1(u(r1tBfp1myAqQ3&AnNPkHo&Z-ml%1(F&7{7X@=Z68p+3_dkmh#Kv;{ zjo5y6>xA!O1;vkWNH(@r`Rb8J;Q-jB&6zRVw-&o1iAs&l!5LHkn{mwh{ zknCM#dzgW`vZcmC#`f7qRDkaL{6jq5mbM7om45~=|1ZMoiSJ{7{=4tF8>=R8xH5jd z$^G^hS#*)K_+m?7g~Cesb^R)7?z!iJo$<0A5zm@=5C;ISv);P+1^gV?!SNf=RdE(nG?_9h3ZGKN@9yNCsr%8$>RUW!w;sPoc(jW%;gyr6Y)!> zjnbz+{V8exlh~H$pido;uK3&4V)HxyiwO#B-NOkAT|sexe0i7$6BOLyX>F{a;Fdrwf(tLgAByzn z4(YR4HSh!uS6*z=qUpul7ObFH zAx$^^v|@*=8qPTVjCAWQH_JH}yz6i~r3JBN)Z&XTCX-4l;h^jVFj>R%6)wgKisO$v z9%oS8Dl0KQjR}fnmswU0Gd~F1MO}Bp^=W#vkrOu@$K%FLkQV*zs=r}%!&lPXI24`- zz<&&@fM&z?M@M6y;PWxj!WA65@3vdoVaFZCFUIC>wCA+b&cJqLKf(58v&-2dyJD>J z0vvSx?Tu9t|NO__ahUk?>BytLmeyVOT@u=t@q*}E-~2Wv@=k&LI2ngC;GlI9^_rN1 zl_xJErZ{(Ht~t^dzxXAbm$3?b$zw}ep&3`w?1_~e|M$;-rk5ap*rA`pB;&?%@H)re zgAYECPW}EVa!$$Yv(J^b+;YqG;SY0yq9pBiaV1y~ESOLl7^o&~*B_axJ2b^$pC0Qy z%9WUCRgy|O)zA|Tle4lCtFFJ1egBw(>*g}7Ai~5Jwg5Zqu+L+HVq-#7j!HpqUEHgCm^f|sty-@^>clBteC z+kWY9a)EuQ?;oaBROYM?mpuW;0O~N`Kd4&2Pxi}a(^B+vg=Z(*2-)@SBw3;jWWrku|S{5XcgW@URkQ#IIL8n)vT;U zn)cJ4SQHw(*D8s-@fr@+N`9`2p1cMwX#q|CinNN`TawCk@lH# ze6`ZV-=jaG_-7SfSfm{xfVR7duCt6f{&s!11_i8jtTipnn8ERHuYci0WBlw%L*&(1 z!}YX!WVzY3((o(?-+$OlCV8HVXrH|DSl)7I-LO^~x0c-Yl%J2qmvZ$<6NkJ0YE5-i z%v$Mp!_-nE>Hq8TLz!cbI~pq~Zo&iwR#4zaT~*3sqP3uP!&+HMHshaTXXN)E( zD)nA*ze==g;;L9 z#v1vMY}w+;sH)HhluH-DIIZ1232ZFJFwAXM*az_fKgb6WAsq^M(U~7+et_AE%!-6e zl(6OiE1t?l;V9=cyH*MZ)iA~W5Jb^$kd#(&t-@Sq;EAy?5wqKgfB)SbI#tr-Yqu(F zZJ#wE*Ya*3Xlbssc_gaLLgwv@v$1#yUdDlmMP=V?lQ<&#ZrBIHP#+qp){?0oXuplW z31kJO3dlRQV9m~;QaF)`8MNE{aDFl{4Two)$tw_!zYrx9R^iWJl4CSrQr4~UAiF+{GZ1EHwDe{6fWv1Jg2;e$14tW>?xwq z4U)MIDC)Rw3>1uT)?A@jVT!S;7?iBJ5{ztit#nRfa0CG0EEW~nDye=m7XrQSnSIybKv!1d(BJ=0JvA9G$p4=H zh>3+;Z@(=~#^KLA#CkID+(L*4GH<`#j%l|&cS|$y42r*;pT2g~SJOlHKa|#A|6TIy z^F=2u>V3`$ij%QT)6F;EoL0m3OWZb!6CwYDm*)@Ok8Po5nJFzkX$fq@G&9_S(=wRp z8$O4kQR}V0UV0DCkN6B$4sd&=m+*3TzfbOut&28Jv*ThsqXD%P+rt+VA80;e3Ua zK~^D7P;d*Z%W&StVt5hH1JyU&V8e95MHi%lKm8d@3QUuh!j?Th#H2~IfAPf^r^BF^ z2R|>q*plfhUpZX1$8uM|eB@C_$xC&<$mYb)2e38K*=L`P37Au5i>?he+7Rbl)O+HZ z-`TzNw=4dZ4*A??($g*j9@Zbi3`gJI;RC z-a~O0R??h)`f2IMnCzKj&e_v;AJ`6KdpDcH8aK3M;N4w@c|;yZ?gz zGAAhZ+B04Ak85zRdpUjK^Iwp$KLIO0m>$4H;;EQ0JnP3l!oqaBDsg*jl2)9hO5EZvWCnWT~?Pb9Z^`X>DQe;L;+PSNYyH+I*V5ly{R0kJ{SCdC$-DeIQPgY4D({?WBsFy`a1-#?78hzeAp zy;2Wa+2To7P)xuIihVJej0p;fQ@JE;+{x?s_}S5QHimQ6#!c-Eg_yDtt(4)J>nu#Y z=`lZ=t_%`E$Us*DMCkvby%vu(7X?s#{zY9ra+tB2A^ zCSs+t7=t4+06b$sr#;fnE5zIvtzca@5wq*1XhvoXwV2jGv*(8(oY#RqsS0PDZWjoP zztD`Kk}8#o>=g%;v8l#38AmSm3?|o<71=8abhda>v~vah8HcNI3Vw}L1{CmBG#cxASCD~54chMJ)n_oL zA4{0$gL`2}AC;NOqR^Td7*Lbr#Q--@)n88(qlaLE;sPvLUvsTB5{ z?GaY^MUQ3mYuL5{oqDS~`&-azrYAR5%9=kgb=qfKD8X38LUfX@(H1?CM->MJq5|AC zLFJH3^VxoW9M)8G0@ot!Ta%GMuYJsla-prPKOAb4vmmdA<`-41$y#Hf3Y68D|IVA) zSg;Y{3fCrC-0dmcX{c+e03AI!{?I|aL;J*|EdT&O07*naRJC8-h*B0(a}cFpe_d}g zSMdQhZM+Jty?+%>fmFU|A;jckf^k5N|1~Lxw5*635oJLNglr`34+ir6Z@~CBrDD;Y z>bFl`CUf8VRI!qaI`dg8Qu8sDViLHr?V0FM@dg7q-4oxhmlhJ*o@#%} z`#-{2Lb)|JosOd*^Q$y1v*KERkA2Ga`XEZP zF5!U(sJ@H<<9(?FqfmueK0=VG+<)y}XaXqx{--kCJZc4;8?2$=_@%_Sw8f@aL4mDg zcEA=-J7bF{?q4sGV6=NCIaOQMIiUUIt5fFB(k#Rc1eh<2b#!SSy1FZRycpALy{5e)9%#UBcUw}ifkHA)LJZ$()IJe+{1NO(S z*0~y1?eOe~(@yvEA5Rl6SxQe(MKPhZoBoiwAd1hVcVilr&U&3$z|wU zH74-V^$4uSI_D4Pq^Ds2LwkKFZTY?}(_C}w3W{fOJ_gUmcpj4%JV@$Ajc^9Vr$3D|DA3p^pLzmYJRPKGP~bZK_IJJ|XD&=T?R2um(?JIwn6AY+ z4)e}CS2_eM5_k^BAO3i5I`!0((~VdOvL?<5*b6V4S6oq7P`rq947e(Tf80W7W}L+_ z5!(>)3arL|?w*?v(?jus|7NTVc*7eOk}Zecj?clm@-WFt0G<)JPEhdk@~Nku%20XwH~MJb{q{|dJ@#0d4_g6ot0r!-CodAw9X#LT zXyAVF%U{aMi=8nBKe+pDa?Zl>#~zoaL;KfRcfIt5FMVDn20CJ3Sk5s@ZALkZ%KN=rbRKZ41mSJEJr zg!IgPo@bT4_BrSKX1?b-bKdto=R0SA=R4okrtQ~YgYJFsRL^rT zK2O3v7ra4p!aMkv@aNzOiU;GS)+>hx;18cJNBl3x(=#iv$I7PIc>J&4``37V8ODWw zX@_z7cf303b+7vqJmv8{?N9N-7yg09@6B)iGwsj8S6e;%InR|JJ}JV&eJ=!^s-T=tIxXLjv2CDDufGmIe3Ip*FMa9o z^c4pWo8yT_#&Q1n=j&Bcr{FbTd|ewimFMxqC&T$Q^N!o@z&d2m%W<6ZGfdHU=O#q~ z6^baG76@G;Ds&qgGfRzW9atu@vw#Bg*Pn%a;g*&x7MXBRWvm7)jUZ1u?KHgN>+QqY zXP=83e8&w3tvC?lt)oVI7G}#d>`aJy%vj>@4>RvUv~KjpiGDE-tNZ-{^URR}3C>5j zlV#KyH&xkbpMNg0AmAPv8h?Ryc5PG&ZW$1*UCuNV=XC?l&y@~io>0?}c=`G3F%)+b z4&si_QptD@sn<`6Ey*0~5}{!_G8HJx1lVU(7%>-#krkMTTcLxeX{2`MkcuJ_PDo>_NdNC{U!ziUmQrX+s!M=;rvxoSLy}({`KE5LhXTm&q4P zl5SAAbtWZ6GVAjD!zZYCC~oC|l|^e&wg^Hj4G1%fG7i!i(R7VzU07-J{fAQ?Q}n`lY;`ubPJE55+iVQqxjiz&{mcOQ43G z4}-?v&n29JCf0mdH*5{K<5@7X9s`TQX2YJ%wB_$Iv3ftfo!OAb6`apd;`aw0(ItDn z6v!5X`-eq${6k^oSu63nQ@rAdn`A%f0Q?aYpP;DokDh>>XProCK8B9l){hW?rS)gQ z`Cv2yV~3FO@{EwB3EE&guWw`#-xVa%=(j;lND&3M7mRvEFG&#Ph)Omr!=-pBNTMWP zcq5PO>ITIZzBv5SV;_ybJo;_?kiw7JG)Y31>N%_Wr+)j)FE-U+#&^#e+fiD0qh9sj zEj)o%yGl#AKvndTOroJWE6(HU_G%;iMTJGT=p~s%^Y0d!KUB2x%JS>2>=2&-6wgK$ zt0a?{n9D!7TAsy4z&1-#EG0l=jOu35roHf>U3-h=-vf765G@o((MvK3LDpXSCt@cU zyNHvNAo{A>)$Ed=gzRc>)_*8OTdX=(^_3A7jUbw(U>SZG+&Nrz)ejKEt-5%-!@YLI#p;cQQ$F>n;a%^3CoVQ$i3_{G zHaz!v&mSJJ@4mQrvwGY=(_ZvS0Pe;g_HAgyD2t-2Q9i z@&#W-Pymr%rzV4_+`l2(Li>GM*-Q z`OE(oE*vk%h4%*y@5cpgx$vpJ;l>*f*O9uRz$X%3``Xtb_Fu+D`Av1+;ti(1_=~?7 zzH-`E@T#C4hG#$f*}AZ9`-uD*ZZdseH(ic9?l^34eP7Me$Lp9j#tntn4v&BAFKIsoZmfMR_PV&~rW>&aHo=YRt%gmomTth4 z57%CIjsC)ad+b}m*Cw&tiJLp;op(O|>i>6f^XYnR2L88rnu9+Nuf68l;d*GWO1xG|nqc$f0zhS^;IU%>B>x4ikycx}^V!($)ES3JEyd!oGgO>Y_g z;pBf9ZpBT5gPwMf_R8C7r(JYofKT)Arq1~no{uLHwjT~V;;`Yl&wegbTJYffBfKt* zH&J+;9dpbvda{9geQ*-F8Tb3J55=W;+T-!KS@nB(jT!gPV7d90TZXH#IXowEBaG>e zxG~2co|j&732yvdthdhI2m7Af_rAO1^-vq@hS5KN_Mb3LHx4VX4v)kW7klk>KkXxO z>Zzv=@5d7qyy3V##+0x4`kmkXAH!|9xpz5k0$qYvyK&?99kBNKdNCF)FJEJGzdu|g z-B!(Wn>?tZmt+!Bs=dX|zf~_TF-!o$ZMJyD6OM=Dv7Wd|_OZtui@APU-YleA#g%{Z zaA?~hC>i5NJfP zwm^+Ka@vVg8Ql1;nF>iv>=yMe?TDk`20ujErymw9;zEvNuhFcLe?o>L=~1b8ov#!m zAC%{usKXP9C&r>5fJ#3tE~dGuq3=RW^=`in3s z#KO}LV*{`Yv2tNfSnH=n4Y-6tl6NEd*rVOYwF#9SD*DihBLjtvj!~EK!tamQKjLHn zKQaG5>HP1x{`okZ{_<)>@?-E+sE2S1NT7XIWxr0d5Yqi|1MfA7IXybpim z!`Opjo8k3%H~j-2aDUAiDw{o^;7DXoactyb0E=*AJ%x^CQGNSKJj zg<424K;7fgX^=$bF#fh8ji#2!#;SFO#^El-bS1;9~gU!|&pcQLe?) z4kv%&ZbKissq z8v91z$%g;pYkD>so`;Ll`{S>V_?ONf{@{nS@$)S=-!$xh!2Wu=VGF$e=kM|K#(Qvs zVwHwCy~0e?i?8#fzx zQJH&C+;a2H!(Zbs??3dR59{U$H+bfg4Lk3=GxGb^@DBXt{^gfnG3>g_u6R=8<+#xO zcv~%bvhbOOKRyZYYFwZ`8-MA)9P#t7{Ga`tqlbrMAB;;czZ4sle-N)V`W$YUEZ0Ts zm*KDAc_I6YxH-aC6>*$@4i~Ra96dq7*?a@W>y%SIgI6KlFgyycPksLLo;&Qd$6mut zxXAtC4}WO*%%@M$_&*jG!JmEfvxi4u?9aq2r~c+|PSg#SEwG{Zlkua8PXlm+?9bvt z_d9Vxe>vvqe*4}J#!yX6UAKvqxzZ(Ai zD_>JyM;&?i@Zbmkyq? z%RMW84o^`0A)cUk5N?oPj+-pJLGfMOWZGoIjfc14ujL>4$VcFh!tH5@f5J^S&L5xB z;QD<5_KJ7_p8PurH@*Ju10NVJy67UUg{NS{`2F!nKKIZ#<&;kipT{d|Z@KlhVF%ox zI09>);~A$U&V?OUsP&<;YIxI|-!gpWQ=i88Uq8G6H$i?KH>G%!fO_Bk?su?x^@*C} z9dYyM*Kw1L`$O=l8a^5GRy=XUb^EACJ_=8`{ITZa#P^>#{1slQ$R{P9jy+lUga$WV z|0Hhay$@^kqKhxa&7`e{Lw@s6y_Sr}-KS6f#P9{&=(+_PyKleU4tTHqA=*#nD%=41 z=tn*}ya%rq+X5TCKN(}r-)Mcwj7plu>-DTR)>eOf5(2Y+o>PZ~=Qp!g*DK>p`4msN zcIY*}C{`@`QfX&VzT)YGckl)UZXEIk#eoNDota{-?HHDMWhdQyk!Ai+bTYN!q0e?lj>CrpPtqelJKH8b?<=lNJ%vE#F8F`ZtYsW$C$DWIzP_bJm; z-Ap>YX-2^rsd;xiTY{Y#yy~vf4K=u9MAHaq)35yIP7<@Ln~~p-?*Y$H`EfHL>-G>frc#g_eTS3 zlxeXxi_uthiq@&DN;f12^J|GyoyKWYj9TM}YF&r-b(uY6IMInnAkg@?oVvmD6Kj~) z)k+@A9CqkaUg^Hmq~p((rq@|JyYWP|E&){he7L-*wc%5=fFtf)@ng5zN==Nyb<%R7MxX!KL9y&&nT7yO zt-qNV1yYe>Ej3=K#1j-};x`7rPref0v*Pa@ns#`v9HVDWVWI3!U5}Zkvd=6;i_gDo zXcY-G>_+*}LQNO0sS6AHo@36b+o--5{AinISHUWkD5u(w+bx)jH=${qOoI?N*f!p;dOGJY2R zcUQp>?tZixj{EfWF3g!2GR z@Rl*p@RR$IE3h#XX01;iLdmOQ z(eOcPF;kJzwsEIF8x1}P$a%A3aq*qKw@V))I&mG2GofrA?T#uHfF?oshrZ7d{d3Ol z#N$tR(TKn-V|t`MYs(%aibcRkRfx;EM2?bp{t8OgGy!6)3#cob7z>K&1lBs9g=-#} zzvA2nbzUcWlS=UsmMEtjF$!lv!D^;_0|xdMR#D96cQM%p-9@aeE!Mls1I$VjR^e zEWDA;cofYe^;tO5F%DuBf9ON)yz|e)#=jrHQxgBAuQoekL*D!06*`-4z8U^n{v!OP z`#IXD;g@j{_fP)hwYpL9r7wOF`$=$<>??-*?y-mVtDv8)a8uwa+|;=Ky6c7w@sz`3 zaIyU8qmLdQ@d(=tnolUa^PPXLn*@AS&DOXuzRRw=YZG5?m-(_t^)p&3YD|V0a2{Xk2AaQ2gb) z^)%OkPdRXSIX1@TIZ97ZU=NC~V-E_>@4Mc`Jt%%bxm|YI6~ilD{<7ipm1p4*#C@N&eqA)fTOKc1-IO$9@wA6qk5;3ml{U-^gl>#Vc!%Bx!t-?n&N(Y=T3 zFb~}Kf#bI8E;|j+!dyHJ7xg#7>$-R&f=?gty|`E4<`mCS+hAR8xy5F~#TQ;YY=Bq& zY_a)P*hu>S!#qA37tA&szIVx`cum!7aC74f+_1TAxYv%`4?E(9&iB86)o{%*j(>+<7xkbA{~YXJr9XV)9u%*_jV8VtYGbUcH(~#W$3B*?)Ur>bQ%?Ex@XkNS z(>b_t$o)pRY4pDM+`0_w>^*<=p5e3q`=4?1u!$JKHink#v8&GK|OP>8GE8S3L0vibL%N1=hw# zKKjw&y|_WK*=C#KhTc<$SL5lSJg0bK@fUm$aO!kcAwBN(oY0K(nhZwv#xoq&oSSmK z3W&;c6BDI^xY4V!@ZQU?xjbI+bON5BI19(+amVrriUV<);P=na1Z0v)uT4~=JF3zS^$sV9|h8|do0tlnQFGAqD*55tk8l+nkM1q5kuoQDm(-y zaE%|?g*3EV{vkojFVUW7U3JZdzYX}E)k;{_UU&}~Tb8h~9_z8DSgY}GXl*{qkmcY7 zu6c?$tXFxacM&$pJ{(U!E#GMQ@S+#KXn4jmo&lv8HR@M{M#pzGL#%p5rwKFvkkUJo zU3mS|csy!h9l$^-Yt!_5oyeqejc(un(oZTS0Y{Y%wD51OjYXj5w@C@fWf^BeT>kR8 zYkq%l9N79@{IvP$#aHkeang6h^Xk(f>TxFGyBN<&@znYeX=RflA zk%%zmG+fG}G>62}^#^A)gyvM^-u zhit}wqE8}k45Ok?3ub~`NU-FU95DP##hicMp!mWUzcAd4_tDx|07EwhY+O`_8;S_0 z|KLr0R^ahRhLOVtxJIUn#K5r&ZdkTB6Y;tzPXxL_VTAj1!x#$!@xm)wN+Jec8^mvH z27p7%7mXDBOOZZ&;Dn2dYFcDG83O}94A@%1vR6D5P?P@{G<-7P@ah*M<$_!%WFjnQ zKp;v2+M_TB`lpPz%h3m(E-LE+ame7C?pf%77`zc6-{etf_~2%EWb32~CQsw~Qb;Cm z+(-yzaAkc5m6^X?2(=5$;K3h9if%1l8*{@TEeHY$(>C>SwrnB>MnFDn1O_4V(El8N zUaZE?9K@mb>iWfH#>Y8g;faku+La(#7L+g`QmdO@z9wV?zEbwFCQn7dForNU+*R<8 z2muELPnB`X6}WaI0}32}&Y5RL9L6ADir4a`h6(_h1pP{lPypWs=jSSIKw22+#`ZQTq}iPpU0zJ@J(+Ax18(Z9x%1~?}D_@f>9RN=6Q zK*doP7lIgKv*#^Ej-(4{!}&#*#~;t~${&Zgtv@hy{$T)6WTS$3X@vP#%;2GyF(`k= zMjrmfSg;otoRzbwD757qAB+=I)B2?`Pmr2_S7cU*Q#YI7TXTf`Ln;$g`m|y|DDuPT zJV2d)ar2Pl*v4iUcEu}J-hfxa@LlPZ&v~=rfb^DgF_yV%X{ZsFB^uLkD3iBN`d4O< zia`@cO3)ypHP~ABk{k_F>4fMoFZ|E((>YUGR6>!=4b)V=6i|75?0RvF?AhALZZSb@ z@WHjh_3wwfD>_3xAbOl2Tuvj9a-T$Km#9{sKcR^wC{=_BOm7tqX%;tN8)}Ux6m?hc zUoegn?t3c;6(n7c#$_gk42kZS;9v6qi-QlwCfRr={!!0*7B)ZTCfVDM@+59VO4^G% z9>>1e{Kveu^&hS*=Wg2NlKTx@K78U6pBO&<=}+NeGcIWRfBXZFt#GmQsd&}R@BQBI z;3DSc2n?^s!3~Nxy#9^4LBWf@zxr#xI{X{I^=jk46>e7i)}hbPQw4mbjTJ^R4FB|*&ti|858^dG z=MHz=aT|te87{JKJUsLv55eZ$hYx%*fPZm%^;JK>=FcbNFT&r0i`v*E`$r1{>;WkArdZiBA%2y6L7+w=rhAb^M8QE^hR^>Xom?#^GlVd+oL7 z@LRui*zn!&dwkEi zPISEJe=odl=n(wT(Si6&^n2a=UdsRW+i%BH0{=OD5OepnuYFBVq1g8oj^9SZeQ|@~ z`M>l0VZUG95BTta|8KnEhT(%B!qYK$B7je3@bytlk>^t00Dtn64;X&^Hx9uQB717M zdEGhOeCV zRgBT!4gZCwJ@{k<^X2C4_uqG);knOw?(oa-%kjAlPj39}2mS{8V7yPS&04YIpyBub z$M0)T8^(PFZgPC+gC80G<@5iFS9e{FV_*y9_SoTxPuzd_*0;Wa4Zc69*L)p{Cn$dV z`M)##0LRNoAO9F$E%rCVX4v?g??&dv^ePA<&J~Quz9GN~5;Hz%mPj;c24ZUVokdC# zqH>$AYv`#4A~_xdWzEvhhEj3`3CTe5@AWk-6Ue|w)Tf<_S3KdVnsd%QTYFGE?V#x< z*>MZ?xj>I!p={he|IpCslUj^{N+dB` zXfrG^X%KOms&&Jb^7$ec6cJwE<)-H!^E;D&!Y7J3Nq!RZ&dqIPht~3^va*r6V`!o% z80SxI>ah42q}wD+JX2SVoP97gA1Ne(GFgAF&`n4f(;xxZ(YfdF!;iqdvJLQxr8&$nV@1Jn)Qo-oE8Qf%CiA59^pR5E8B3jd`buXFv z4@l+a!Tk66{oxbQCt1^v8FSw1^XCxINDV3*aXMhJo`F`L2jGnWiltk@e)axoHlF|9 z3QiOx!^wpxY{fTQ}XKr%fP zH7yPkM?n2ZD*OnY9miU9{HX>X_W4MPg_FicG<^kIR9)LPNO!}~-Q6hzNOwv~BOqN0 z2n^jYv~+i;gwoyJDBVa%NDMRI+|PS_KVZk&>%7)paYnk>44ao@*DqiFdj&|t&_AbN zgqO*^E`W*wvU{~_0s?Y(DVVQZ>i}YC} z;?;=34)_^ItUX7Xuq~b+yTO9PweRE+%sGx?ar58fK?Kaoi?Kc@ZAM74OoP%WwQd&q zd5I0%5!qZ?V$^M<94Ys;aB@^~K&HcUOW?1{g_iu9_UKgIg+P^&5zDP1HGuSgBth{Fo-{0e#vCeVr26ZL(r`ZlC`x1@;lhl zG5;tRTZ=lnZN$EZBooB+Yh3a};i-O1@X=7ALt-fkC)12+ya@<9=Cl04ru0ovyT5*JQ8;*!pc=DoS5k@?W?8DyLcMSL>qNs1J$2{1k?t&_^+QN~pkoya31?cXd$ z%dwefy>DQ5q z2KeM!Ggar$A5EU+zoQKGDs}!nhf*_A=tGMoSmYl#j3$|%<4jG-a}Jl`ICQ)rQ-4}r zoQP9$REPV}DJ zp8sZb_szZLAdlp>{TedaTXGekx9=s%@jazG{qvijZZ8rSUQNR0IPJXV%&qK@>wBfZ zaCgjCQ+Jy^>z{D*7DKa;Ud(NLE%OUZprm!Gy_x&!4VLg$d@oLea2EjxVK9(JWaKwe z&Tg9)2YRlw+n^qnI_R9ceMOpVini*k;mK`q*y=aBWzQB8qY?(|sU)AYA$&FIe)ErK zM4D1`zxW!rgZ_tPzQCFgum7l#3BL=JqU6$k4mRteB0O(dofa|Cp@i#{coV`EIlQV;^KdvIdNI^XBz9nX3^i=;6 zHw!aF`|!70Et1y!OLulbO)8HofPa}lN$AtlXI58zv#>zT8Q?#uKPpZ952I^1 zx9!LIUr*(7Me2{F`Y?WN#YnjhOL^$IT#kDjyL`dk+;vQN`>>{`^>Ii8wO--(&@K_u z&&FQoCq)3Tb{Xe}8^P~pJk8YYusv4xYuUB1-Dmup5rm7zQPg9f{RENzTsMXnw+E@;PZB!Um2_+y+O9H*oGq0;~9*S&@~TUEpNY18jEEy!x$P$NB>& z*ssjoXL!0&yz&_rDQwgAhl17*v*&KL8(W-w&Hhx1f4>6pe#tCW`dV&LL6ye7MTnYr zh<_au-=n++^g*sqsay-Ysho~Hr4nv}eo|cQuN?%Ty_LPezJT3qcEq%O>Ns7Y-9=rm zd*=dQZn;v8#+ZxWyi}alrPBynWP?9E0ch;69gNLvy4&pDJzWO z#nS(ruy*n1B|S?VEzhp&<_lyKyjruG_;uRDxA~;X@;2+iBg8=cTG`fDgfw3Ah3<}; z1kM19-M6-_{kuTclVCW^kF&(zIF4?bXvBF+Tl;v>UNU#D)jcxPInLx>Iz;<8VzR$MN}51Na4$_#+f z=_6<*Oeb@OXA6F9Y0~6iEplA`Vn0p#w8ivd{fA&EXNWflU2H@m+I! zFq(3l)fKikOxo8{JP$Jr3PASLxLAX!g(Dm>98q^x=lHr+Pl78Zuh472-WbO9wZl1s zzt2V0x>8~O(coP=!khsM|cMn z69O8(ILD3&kI-I2Zfe%5cwtAIf7^*_ta)xHD_fDdWE3kvpTn@;uG&RsdnLm6rtuOm zUrxrc^Oe<*Dg4~kPX+(l&*lqQs+p=9Z}H%v4soKGPZa_(0JK)k7S`HQ+I!62KiDu( z_;Nt`BwV;3zL$m^PofK7c6lJ0{Wa)ohrJa_thGvVThmDA{|DjK1U% z-Im>i=O+5vr0@6T>KLDWB)Sc8LK6}7&yJwD4x{>W??T}*TL7iEJA8U6Mj~26<=jA} z^cT?fB#lCOb1y)vU7}^(^gj7Rk}&8wD&ta92Sb4%#1P6`T%$+FMgr=zyI(hH{=qbZ)PdCijYgqH%Ei-Wog0^Y76{#K{eC&2i z3k=Z)_l-@etGY08yRx4G&x1#lq_VpAOXx)9FBe(58douBlfPe%kG zR6Obbq&R77`1;HJFnxRUJSWJ?cYV@4vR4{Jr-}4nJatRmyXX@}42?-+<;#unNKRwt z9vqg9%Z<&%$~-14`KtV6LV4D>r?~#vYLi?5-Grm(Gql(`nl;%e(6g77im{*OHg=A6Z;ih(j9Lo_eucm zdi9tXM+iageHf(*B28mI&Woxg_Di1y1CsqfM$ zXL}!=ccgKqcp+o>_-9x$fTP&`;kMA#eTSJ-Co`@nNj?zAl6`$LRu<|c+j26?3Tp56 z$qV&CmldJNiPF5D*i;lh8d2TIz>COL(!2hV`0T>F5BaL!k7h$g$u6#GQWrpVShR^ZtFt0zxZ&rx>xO4w|XKRzE>nLFDYUK?( z*(JXRTr9XFto1dUZxnEzddKl~m<=AOi!?OCd<5{k_6<|+)U6Mtv^>t047lI!mBb&itIBQjK2s+h^jdXk{B+0o|)w4l2Jn&e2 z+}G-(Axy{m&)O%}0xL-yhwsg*}Hg^`1tY%%P|WpWp+ zekKC}{H}8)$8k5Y6@z!k!_B+Qy#A-|CxX7Ygx|UxthZ8#TLz2R)WlF2$Bqi5hW0#& zs!StB3d{Pdz(y%o3^NL2rN$(cVbTFpPok2G1xpz37~Ch0EyggWJgNrqcFb&CITHtX zoBM(_jh}?MOiE-Zyhym$o0UkmVF|Jrm3vWvmHb1OYuwNTE*;`5DvaPKqDm&U0s_4a zreu%s^|yQfq6QXi4}KaC)qOPK*6?P+q+Ap%M_ooEYeii*6_i8MLrxuk;>}E=w62o< z-f4}dian-}ej4Fak`LB}V{t&3)6`9&vOeIuUM<9gD6v;rUd25_hh_*HEE)ZrRqTHWQV-wT`YdrtCi z;%DyV)?587v(U$1;0aX3;iLMtjpQij@kEjY5AhQI$+m61(|f!B1_@ zxdQE_P+#T^By&;)puuTT-tTDn*l!!^&2&(QmzV`h=R=VerY8NyhYv9@>AzR6OMaZ) zDPuBcEWQu>aK9ErdWM=PivCdsFre?*$T2&z&FSoDC)g4|Zdkovz-m|-gx`Xz12mqNof zg;U8-O7n7FSchQ5M%pO(UA%=V&_b-X=ab%F_@!q9RqnpIjxQ@`hlUAWbAxOsY?(pd`~)pJFR z)mP{Nx>EX5*4wwI6iu^0aogLDm|n4+$9Fi+^b`FYT$(T3v&zinWzpAeib>B9H5y9_ zk#|dc8TYI*{`(uH+uky--=PW%SaYDDBR{ zM9yFNgVh2L(JA~Xw5kW$Ad4Adza07-X<#f-gg4s5&n<0=HlOG$zMEJ&h5-0PRexXT zl-xnbo8r^s@5M%f=il+@%UDT&pMY%1bdHc9_sVp@#9+)6HaiMbl^QF(&14eQfCv%j z5ZWf@1*7f^&wes?A0^MG5j}ch;sW}CSyrWx;?$T_d_wPg!)&Jh>z%H<`MU00JU-#i z-Y;@JrGPC(?yc>TkqSmhJg8*-WR)`IR(mF;cOHpWx7{RBRN;Oy0 zTHU4V`5tJZnBO-B>{OUv34gL^g0GmazwHEs?ioLhYBaaVlof1}3fh;vKr|Q$1<$v0 zvsXefzR-osZVeBS|N8Q?_}#Rg7MxlPeASbZy?NawqrHjVg{zmc%+FaGmCGM#vnGvW z*S%m&lale=r*$9H|!$ex{hke|HT{;9>{{2>)3c>l*AOf2~cK;bJDx3%o zAar(g5n(IfCD&_);`64Rq07z`C9(V5p#}L+$=qMJ(F~8c_94J6UG$UXC$C|b<<<$| zmwUrGdfvCAl})B6>?iWbS}KGyHBMpy#MF4sr%=g?NQO0n2hpAzsl%f&!1sVx`i}AU$r#F1zqr#@&Ec92 zm=gYrbHkOq?AX87{n=#)lsYO9b(nYtC-Zy7^UvPc7)5|P5rILV+z3hww}8R($aMSK zrBspt>SddzFJs{A=SI*=J7}?ox5ho{((vDl@RpGu|HI?v4wK8!d4jU)HHV{5Sh?RT`{n>DjnDl$q6d%ss? z_7TVx>gBSa?=XS?G5Zb`>BOmUYHHe+TX^HW!Z8FINR!knK83oZ5JtXGwoa`mG)bn--Uis2?o z32pi*I+wX%qa6?%-xUP;=cU_Lw9vKNt+r~-$EU|FZLLgqX{)cb#pLX&gqxG5Xx>+Q z-tdC$wL_pzp}Epanuf+By9xmx`fr7oRV%n{G+tcmy#Hku#i2m6o9DbL$2a#&D5>fy>QY;ou;+Hb5#00ba+3BYPO50=z;?RpA4==U^2F&&ek zW)2-jz)$QQJz&W4vCH#!9S})Pcs6EBW?wQ->v>IO(ERJ4gfAqtR;aoNEGvd|hLkoB zDcFk6sZAdpq34gOv^tsQ7NYTuqPVRo?s1=we8`C(dueH{+~hV|e?FsgG|ua3J~pIe z7*bQ_sS>=qD(F6HE9Mj6JZNqeTUYzVG=?Lj+m|s#Y}BIuiuOKkP>9q0?O&yZY8IXU zHIDQa_~)L{s;kZwK+x+^m@Mxg>THS`P5tG}m#s0p`xnkqU1)fwKYxyxp}t?2%cYq( zlf&*3G#jO~pnVbK@4_BlhPa$RpP+YC&94NV&&!3WLGwV8x9Wvd{0v;?R)6ekV&G4* zRX{fF{i$N7BY%DpEew(xW8i41&}fN{M#pvydYCVFh-sDHPq>IEt)Es@E@oD>P}e;S zgmyDQGABPyu5FMPt|ODUU@Q-!$A9dV_&~18?u=7yM>TKDlD8RlUH`~g-$9roPFrRu zy9|&ymcRmBj&?fN=?N)i4jr81(}p7;tqa(H*GV& z!*t)!Jp%uT>DL<*(K8hJ){wM1&}qZp($qo`Nv^3kt%-hmm4sELhMzWm9vQF9l9Z2j zSq(h`q8PNDTA?`H7f3C|f|wa|ojY%OWkY(`j%sqD_%CKBs3_jqFl(9pG#SQQ*7nhQ z54893@9t0x`6z)xaX*$Acgekvfh~z!Cb}b)!f3I{Ap1On zTD&TAcxqPB&NfPpIm6{-`mH3PA|Y9xY_=>3>xY|D>LJ2vIk5Ss2KkTKZ6es0AO0rp zYXN2?{^jlpkM?{Nd+Z|t5YdTf14y(41WorpF6Eg!=vV#B>GRnf(^-+%w8N&C%^3wv z!L}aTo3PcM>eCDMtEK9OE||sXK9?aYFM|+7DN6|XiB%~nV$A-M{XA8z+s!6?hHQhu zR4f>dD#2Ro$Pll_2aI49*VYE!fEZa{u*Gc;GNz zvAG~~CvtdvNO$yJOg`p$5*LMgNaZo6}N`}4*lAt6s?oewVVD!&TL;h%|oXo!cr=P}WLla#7AY4Q1Qp8TKr8m>5n)N`N zy7`;q9Ztji2>5EEKb5!wPb(m$d(ZKVb^?c3bE9Je1D|V1kCCwg)%DVuTANT>*%Tvz zg$NA|yJ8zV1)Y*lsxg4I(6}@72R*pVJ8)`9Gu75VlrT+btXvaY(Kxf-Bn>0ajBz}6 z+*1cTTz0rmH@bM7n{6f|J`(G3l}{<2pF1&~9x;xKRQT-nfB=Q~wTGdu>>iC7wSw*K!Zy1zB#M{jO@6Um;7t!}oB@(jiY8Fm(+&v?dz}H_|P+^9&GQpW?Y-2CzK9!T8627IWyEeY8#^~So z*!l|2J=l`z%ZF3CBojwsuwgO*zY)oT^NbDAj-B+@h-90WOG zVcD5Y`P{73v@%fFe_4_${Y)kF}0GagcmcV#53-N=VQYVap zw%WG_LfRdIf(>(m4j5^d6RP1ccb|9kgkLUNz#(KwjkTktX-qPQ9>U$0;z|jG??tlP zI2fjNGt@u4b9#PZw4&OS(W#+T2M_+}&kJZLKb-a5%XJK9-a+OE3)%4`C(15E$Wz%% z*)d4)CG7pNXcV?`_6RRTZ}MQz^)%DHN7*%PosT>}E(IWvF!+S*L>3s;O%-5)2f)u) zNO+D1VzMFoa6g!cKOJb{9`7y}nA*@Lzv78m`i{M$`J^Q?S5t6t0(4jQ-hQnzTxCxG zmBF;%gH8Dx^a6yvqwKNpnl{)_5Biq(g*pgo1#Hk*`LbrEbnOfzG^wdQzXE5}@|KMa zB(g}CO+Z37(f`q=qRX1EsoJf^usmtXTX1v4&z^xmJ0w~bPZQUld-gE2@~K|34gZsU z7G9f*OtHETHnbns6njXC`Zx@x6xj46QghOe%J(NlllYxqzYM4qMIDuFo@oN~9}53{a65amo_48%Jup0IWGjVv1JVIQSmg=#!w&RpC_U zX!t*}RXLK!36#!_nsB3KN#iKcFFL^J^yui0>9GMM_*gS&(z9*(X2wSOrfWYyF#?T` z37D=&yb10bdze(JRGb5!h%UZ=QtTX`IglC^BpXN?Bs^9>&&ofMQ_XrE4+tj=2#udz zh%byeRn{Es1#*>Yx3aN7#|vXIqjSNHEh35YJ1hIs7n?U0;CYdH$Muok4i}_jf4^Wq z3yqIg0FsvVLsn6v&+6f7tLz_>KaZ%8!wyq_bmM$YbLxcG9d&TXrCcwtvWg`4JjIfY zg-_zyx|k@@j8)%8dJ5f4<2!e8?mw}QNF!QU9!3q2%-Kzjjl63cyX*>| zFz2e)%_ja^KofbG&Nx4Kptx$<5iBM{o+Qu;v zIR`1CO5ioJKMRL^4vio>&N+KzwgSe)eTj!b&bj4tt#0}7*xIt}whj7_L)pgAMMj&7 z7u5(O_Gw|Zrk3pR^CT_3qvbANTCs}b>NoZDz<9fYSYa@~vI6@#a9Jk$!ODZJ_+wG7 zf`FB+7Q~&NN}(8Fh-~^0DOSbp)ck=nm%h3&2aEgO4s&{b zql_DiFzELYSuQujVa_qO@pz7`|5$c z=tG`?jQDoHfP%}J#Gh|L<(8J%AU5-;@pzo(7Ev~?#DQn8z=$6=##Dn|G{|bg{(6^| zp)PVPxq6D@C8B%`YdM;ldtD{fk`@!|sY2>coAzRM#T` z@V64mr2S(}jyV{^F^u2yPn;`B2&=*^S{}d^%LHk!6w8Hf(Y*0QA?FS2*fG#Fr*J{G zK_w4Ch0JFvtlXNTR3-DF@VArGVuBJtpU7!|_jxILZTzDwl@(zf#ttkur@Y(iY^MMxtD$0!kChu-&Xzy|ujap#4SkD5YfO|{I?hq7Y#R*1 zNhsyaQk8Jmj5g_^-#UvJ4xg}7i|Y5Uoci9g3B?&{s@buvSw6z)kLL|Pa4E4H78IA@ z$s_4JI}=2IRK)7DqL^={Wn-C}Z_?^xrk(Z4h?NQHr-qU>O)C@UxUww&y@pZXkKzpi zq5xWG+Kbv0!WnwS+ZFWMnKg~;fAa?0R?SOrlD_S~xe+mSOX!{1W@>?hXOSL5t8JWu z#I@Mq41bi^ltuw0)So6_L`js>bEX|U*RkZPzotkj;vp5IrOh%=72Xx2qh|wmc-hfu zvXY?5DoJ_1i?GhFbgX?Nfi`OFU_GAnZd&rHul!~!=~+qCa;A|N*;!;{k=WF=$?lR} z@s~%|Z3Ej5OkxTs+IckHSl9j=ld{7t|A^E+jzk z*&jO?NO|KBFyBJCpn84(<1=ics=^8{mZ*_6{z}$kZn2g8d9C$9p{BV-{vpQfwo>t^ zjZH=@U$W75Z*Wh$IX>2tbUf|d!_?Z30UZU!w1H~~dsSvXPqtNUMgX*>;U3G;kA}#@ zDx6WNf(g@$GUi>Wi1%g#HN{Q=Xg3vJ4y2WvYU3v;RBQ1Pxuyv@GP}urSz~^>tG)wP z3RVr4?pTi%!jlnXW@1zx2fMxop8`V%Y>z^`#Yr{N_6yx4iU4(^oXZLj!7ZLU^od*L zj#x5iH;L3?(@2EQO9an%{gNDqIJPW}6}Gdc(Uga5(J$$0KO5X&)L~O|Io^yVLK7N)8cApavVjCfrJcdTBx3};0Jd^mgUS#lMV|(|w(7vIa@Eg%(!vg$`2bu_nTF4d7tNx*C}Pom884dP@T=iPP^7Aw&O#p$wP z2Qwi6H$^*oS!!xZ+E3q`@b!-FzZsp?GJh^`odG91eTKaeCG zJ-SOj^8-$coYcBjRWGVFkGwNANLWYs6SE##(mG_vKt1gI9jP~XT;fG5Oj{m>ac~Jj zvX{)rQ~RiPLt`!2|6#F-O(I|)Sy+(tSBDO`jEStAC6gunFOxdaFUe%G-? ^t8hW zpC>^hF*y-F>hbqAe8O*R=V_ZYs-7DFB%7_n>>P@yj2rTa+LVk{bHhds^CJ*FdfR#` z+QYVe7a9H|jV6bumlbOj&9ybctJ|sc1?n5sXh$Q>@Z1jd^z=1Vb4^f?g3{c8TUG?w^ln-&$LQHM0TRCcYUE!TW;#TH#(BQ7nOMAllrGC@XaGx zt=xpc>c7NnP0Q*THz(`r!GNR0UiH>YF*Dri7&9vY?U*D+A+^4r|E_$Cs5-EIcuXf# zci3oxn1uRHufW#qFz;QbAJJP)qQs49Z8mQ`3}o$6YG#1@Pi`!NDw&O)9*5GBIjiwq z2#I4V>NeuUe|<{?4%QoNxMc064!x~qb~_BA9uPh0>97K=tZ`py!sZh(UU8LDPr}zL zur?PzGACDaK{%dVlgF<%*ek)z94TC}d1Fa!cwilDokfP}FtNwEDzQ(Y+QhO+L);+? z=UycGk%)|=Uf%sv!r?e^$O&Vw7*7e+eJnUoEqk-DU6^F0!h_bw6`S|O5pSH3RAZ;G zE#@Nu96~MkNcO_viMT5O8Az;<8lgHZBx`yvH=Aq$(MF`N%Oo(z{My*JXQvG64yX2r za+b4SM2VKnfrpfA!aH z(Sa_S!-8a$jOLynTqiU0+Ge+l!s*RQ0^Ure&rN2AgPc~Udx!Hcr|9Wlc|9-mRqBRE zVcuKpuV|OD9=+X>gHN%n+46|yIG{^pK=mhX=1wMYV~|IHvcrTFRGg-%c6A+y7|crM zVclddDGjK#T{3-yFjDQHrOFWTBnPsDDvH^SAB~QU7^$G+|Y>51iRn5^Z9d1%m9t&5FMKf`DJORzUHH zj3xumsS-6^eQUI2doKtS>wx0onx*;q%yx*debGDTIewURLl~%ko_U`UP%{I&oBVLE z73VX)eW4Qt1HjshKonSriPa2z&3u&vq>g2K~p@YoiwyZ92g zu&d<_xkqz`k7<`=;R*DHFJ`e7toiVU33 zJV-qnKcZ?1CODscwmh(mwsBq(APByJ->tc%CWFM-}}+~%J1bnz6`qy{#)+7@Ge%U4SARKE)mr81rb zOPUJ7lE3FVLEa@n69_K6wE-d-5wd0Q*ASit>ZVWK4>M}OO15crJ-Ua;c=T`(=e3J- z6yshhdOnEpdp#ZR)yQ@!YwT2T+Av9?x7EJC0zEupQPBfpn^7!^iUiWj**Ln^jn!bg zDK3d9G6c@GWO0&Ix&+K!=R2UAeZ0P)AOXYOxveOW=}20gG)%A8bHjb$#&>v8{1|(* z`$%{&#P={;gzAejT9XIcC1JOw<=Fh^0^kguhn!jiEnJLJOL!6dM@LZk)TTso4 z!Wf_*TfYGx=6DQOc}*wW8y7&&wemX^H3RNa)EAcpwZk9~o#FxI={7agezuoDWW!ZG z6Ie1HoGHt0g_Z$FVjCO+3h!${l5ca%G@3inhS%YGV%YVBVeiB**#elClr8UV4@Uqa z)?lvjg>}o>D`iC~+K3q3$&Vgk-F~@6wIQkM=XZI_BA1YKBBi}Z20(=R4b{BW3_Ecx z`Bc7ve}owV3BOzn+QXUmJDNJA=E(sc$Kqn`vE@B1yJU{WV(#i7Ts>r#hZ0*wIG<5f zQ$38prRmbR%y6uj25GseS?{Q%PEZ+%b3@31q1joXUKT1ShT^Fj8Puv!VxNwlr1pi7 zTf|8&-vbqHsm7WA@O#otyH#N&3;cTQSW4zC^1+_JI}?vf;6qZ@{2${owC0?_nbS(W zDMiBbNU4qDr54rM@ey_}y^Gao>?DUcomq}b8_s%z>8 zh*PEUw+8$k>152L;?i$8H$oTx`)&$>8D@2#;4X{W{_nAgkC5KLX3SexQ?Ob0N!|Me z4i-2MHvC*}n?k>8@BFtv<$E;mI}GOyc!)j~gww=3sL8#Gk)fneQNw~rj%%#Dj%@oB zO}U0ug9z=cF&oynW4u`xS&?HY5lJl~%u*W252^P5uS3+w5JH_*ZV8O$S$0egX!(ib zms57z<2DoVHl9VLwTsZ3^Drx$U70Pr6L?+0n`@hCDs_8B3HSd)vI z`<(fSqn!EfVS8&fQ5A3hIIcw|>2M{1?%%s}n0m+17YckM|B-L7!st%=`Z`akIdB3f zqI4m6;5x5*s0K>M?&>P5pKm~*V>hOG4bm~@Ik!3?0sil*zGdX6!p{#`;+-~$Iag1T z6ikKuSE_dh;8`;){{hH0r2R7zqEdtrX!twikLg^?SAT50V&e_6EljhQu(=TF^X zk^GIV>=+7sSnSLV96uHel)On$saK$Bfz6hG~IZd9|3u^zkVX*UbUC(uuAtzf zFf+oBzqRRurrUqN?@kFC;-bO5#SE!!8hlm349$j+hN{$vdrxk%$2r{hFk}T~M!zet zjk++QjTIePyrsfs7K+iA(8}z3zQGiEh}dFCs=vyCTkHt?C*-1FW}C`vmQNkO!HU(D z15Ac$e6f`s4hx}Ly8P;;2#9W%#gpGVDxLSqtWs1;9o^TO z58O@Lm#0D&s_{oOCfRP{_>}*|Dg=MFcq=bd+lshMv2fF#784@adKb$#PPg%`eFtuv z$D$Qxa^@S2hf>!bnrllK+=S=rIdexz{Pm)H)txxM(9$vc?12i?cQi^40~NDQSEooB#k0fe$s?G& zDmWoOos_I<{N@CrR6t=hS2;otb_#{-eT)q>>=TVIl&ad-^ky-HZVZdqL-QQ+YS0A| zi#~Owl4r96V|<3O1eOnrAY{tdwmEhKH*%7$@j0Ent;#QAal!8SalH zgvgtYXxj>+t8!OsbCX79-u7QAQ_wAoo+6Y-FH}O5be~j)2L-T6zq`i@H2@YO}`BNIR@=O%i1(2DtEWm@V5_%nz zsHYpdHQ4`nzBS2vjDvY08}i=@XvX43h#aoes|HyR?TYSM?)0}XjoioU+wcrYOo zE;3D#b?u4CrzQsKH9+Wa`EiUUy53vJpm2{fP4T7~0`pu2zd4055+WskWgHERrQl_2 zt7VILnsUf)T$3mnxR<~%*)LM{bQH+2 zoQ-FVUuuO+!mYtM-GiCM@Xw$m#Atgb7E4!ia0oglq-44Y~n4efGm zr5n|Xo6zRCdZDw?(A=JIh20dAvJtxFIi;GXU~BQn*rh=*IYyrcC27JeoWKJN&9UXB zG!JEE!`&q!j(=CJ032uXtfh?l*aROq6b&?v{{tL*XL*YvlL5W_Dm)xh*Hrie-yk91 zQ|A-k$r59KXs2dA4tZyU@M%KX5-oXjndhlOAhv#vr=R#B{vQb)uJ zD0=R_n~=`%zJ%@UP5ORVsAa@1@4~ ziVkshS+4YU(0J9;J*KNIE|WTO7|-D~D4A2yd^Ox2!d`YD+nauJV^lbuLc^Q8ns}PE z6wK_b)I+zvhcccyI8s;6zaPr4r>Kb(e4j6#d)EZU`TA%0zX?&qOGPqm*q6h{cIDH; z0-V&M9xO1>|1{t;Ete0Mx*k9GJW%HoIoL4Wr|F>9AYpkHBu~v<7nb;);lX>kvenT; z@zK%h7ATmPC%cJ9wdelGTj820dat4Bka#+GdE2m|VZ#m` zMQ^h>jqEwPy#iJv5>5Kz@+Z~RQKaM6{g8n5C47Ibz2mFhfxCXndMqW+QYDVy#JYHd z6|bHMRyG%w;J7V*uRjt017G;8UM@NA>KCNrrY}E6jc=Ku>9&T5xYQ_?y=!@l5(|h( zb1_o*f#0wpz(0g z+0!ZwaTK+*b8EoUH3OW{nG;1-?Id!)_!j>K{=K~&rVnU8{Z8x>$lXV=-i~DHgm=d0 zS4XP^loi+;IAkveSWPpG`d4h!)t(_OQb3ubozh8g9o2|F9M>lr zw-u#{x<(3Wesl9Fq88O7RB!Js0*M4`i$6pgzDDuPC9Bb)wy&Cr`l+iFL`_WK)0#pJ)5YknnaI;f*vY*Uc ztR$=Wg$`&eg^MqvBL>@s7`^HcJf?oO0(`4f;eJ}xXX1>(c*y2kppY+!R9h-t0pY=f zx0bQxszZ$ep-Uh8Q|cmn!Zp6Pb`8}21~&aFrue=1YG1q9q(Tr~AP=qBhi89p6~bMg zx+59E)nB!JZ9fT2!1DKj)DH)?AL&*CAw=9ahDxQwAND{#zsvs?RKmuxWD9Fw9w}#Z z&k7u%Q{=Ea6uJ?HurD#$OP@%`RT;Ry$(0JD&p&aGGJnb?bw5niu~WNo?sCMjeucc4 z%*{OpBUASx+x$H5`=w9-LZkj^-SKyVGtz!f@m>e+^_l;D^LK@49z8`xYI?ZF3E_Dw zYEuwY>2|hhsqla;IRsR4W$~q+!nNln`%{(AX0otk^b1EvbJcL&;Rp0 z?~01&_4mKX?yE1oa94)F%j>=upyisSxE7{_T2bCjL9^dC;pZmB2^lUwD8>jaL-+35 zaPe!|Q-#Q+1H>@3cYSJA?N$rBy)shmuAm%8qw$sRJy7t!_PxuQ_0G>(p<7vp{ftxS zthsdqj_6?-X|i(0D7g*+k)buzm9($_6~Rj%w63!U}`#1 zC9Dt;N9|+8CW!QL=9>#p!T-xndW7Udx{y(;%DzdRmQq0Vtm-$ZtNME~KPAaPgG4wS zNEvDZP)+PG15^x5!XBHzpadxEMZvKjPl*)lv!Rt&n#mCS**K%!tFtuG1&24s0`*TX z1|Kbc8=Np6C2-d6u4ye-7n}h-4q5Ylq1OHgoiM5uLYq{e1t4;TA6`sr!v8|OLChTWfFLIO0R!=h%C_Cx^hy>_#5ft<0?x9VI zkc&??28vwDZRew)%X-HrhY9BQF@S-CkekIu==(hqE!vmI(a7$9TVKWYl)nz^9T`Oz ztU$PjRX#J{q6h`49)UtGR*r!bA>)|F|Bt4xjB2Cn!Yzg36n85Hio0t{aVYK@q_{(I z3GP~~xVyWS;7*~%-J!U9NN(Qm-uo|W*2>J}%*mcDj{sl*!b9-gNgl09)r-kme)}o+ z8oaJcdVHw9jxL*FL>YR%110FebhaweIY3V=1AL2yVxk9s637Ce1=vF>;jY8z>Sz6% zFh{s5%l4TQX!UY=8p`A7u_i7GcjqS8Hc>jQe|=BLhMkcYegVjsv<>(+gmZxVX80Mh z+IDaU2A}6{0#`zI9`P@chL+)&Xo;neV9*9Q;I#$-9e+1Z{cz=p3)kxfQF{;8(->b3 zDm8kCn7!UbJlm*?a|LU5T^>3Gv`*q6Bz@#u070u^kSKCy!1*rd^>A9{@Tv{ZNz?np zz&m7Ta$6qd)CsPMB7d0c3LPUSNS?J8dwb2g92pgWpL#1}BO|cFjfvlV2VE7|a{Hmf7+c|C(s7{kIBPlEqRQb7{8!#Mo5FX8m7wI0GD z!g5^UuH@+$Wd?}MN&i2YRb?rS?z@uO~n>vempmEeeH2xKL0L<8HJ8S?q!W^yyJv8gI64$iTlC&+b;{LE%AEyt7F4n=G zA$nkOvvujNfL9wNGr>Y5*ek}sb(8z6li%xrqiVNa1_XZHPSr}?|Jwhp5tx; z1h$9>#AUr>51-%3l8dMzz;ptbMYxjUNQ!E!2PI3e$^FrIYAWr>6^A}X@AKY^;~TsS z@_G4e_g1YYD?inJ@hNY(X8~%3X*cQM%u}-5krkALkRIyKG+ZuWg9r;2z526N_DH+0 z))pq{_Ca&K%N9Q0FL9Zi&zNAgi-Qc95K_e)i*vG|I?JyA0LU3^oc~&+d@1`YXXJqo z!KLlAf@5yr+s)V~xX?FqBFXlXIn1rchUhW)r)li|DI8b)plf$QO*M|))c|@Xa*|Qi zi+RZtslp*G3I5upWKk@@KSU)VC)pJcM-zrSk7PMW;-0=mUA4_F1U;#2EGL}(YLeRW z_$YhNwdNo`9HPZFV0#9crU`?6mJv}WcQ0xG31Ys~HPD9!x7hAE&!zr0gYF6fbztzN z$1A}46Pw?-Fb{gH`bqiUrML4$(@2O&wRB5(|GIt5 z_Kz42vfAf9EicLbO4C(`R;tI!{we-dR&%)Ja8|@bZM%K@-2q(nv2D$DDd6QSqN=vj zvid^vpMK!`ET^6h(RH}ZT`$*nMfE`@L%FraoZg*_o=<>wDG3?YB&p%*wCxbneb}MJjk5@T2U%ja=$ERRI_X_9H6G|OI;qIqY_6#RNm-U|2R+un%G+P@cZBoA=j*IEMZ zHEu^lB8qflWg(iE<(!9+cs3|`7K{{tU(*aC0B|lz#5C;WFk+SS0Cd?;Y+}AaIIv*g zAYBf-KcuGb6TNiuJ7sT$erfY3&K=Rk0sZ$hmop#hPMqHFT8)FTKTj&{{r2GI|L-|z zTv;N0yL#Mx$nElkY!^ZGc-L}0Rx02^S)tp8rxFE9jo6~9hcscKw(Jjj%Zznu>7V#5 zd4k9Fji3eOHG%k|zOzqyg`j{Y^LtMx=t0pzV<&`mz?OC+lIxg1l~lrlZ634|A8`2z z@K%Op1HOs+0dPcSxQ^%%8oVlZ8KsHXvm&LbiZhHQDRR`vz~x;?*nLL)LdsTokXIJu z{_={i8@R0#S}R9{F0HO-cMJ|tMym`C{C_8(dZ{BLdJN@(Z^4s)n|=#E!2tM2G905O z(i~8fp8MfVU2a@0e}Au?pupB}XFPwJBFcQH5e#+ix+}exm8Nl{eN|J&$9%SB3Uul` z99I$gxyC)XM0erp@#a=h(4jhdLRF*d92&%_uS3X_z3va)HifU|l<2gm`Tk{V+b;(} ze0ZdBfxSkU*P}?soQ$|Y!`l8tH4JB}s4nGe;R@Akmd^8PP3O3;53{Nze5zbgnk>UC zAJO|Y^aFRY{^J4PRi2=HEP@^Quj5QZp}E6C%wC>jGHR^JTtJD~MDF}Mi;r_BJK(GY zQY)V77D`yGz+u*rCQ9nL`kgXwP$)KP;{rX5{?X}A7^v3JPjL`A(nuj1O}HrJZ=n<8 ztoXNG!{K9Y?gQBLhHs#ew&qd)Jesv@;)m$B$cc7j(jzS9WN7b&qCcbbCgtO4(uD{n z_`pPNND?^@NXPVgvuVddGaQ3?<*Z#D%{(_Bj4HIxeoa64uLpv>pux5JEel;=YASy{ z2w8!Ab>=8@guE_ppWucA16cfUP3d(V(fK07GI5d7W|_s}5nXYsB8XjNBKy>1R}=;6 z*z3um+Z*9skj$%R50W*y*mr#N;xkF3^re!(p<`Ap7UPU&B zrLzX_soLx(H4pvQ<2t_zyU*RTj(aG`z6rskm>x@D02jpu2L zWF_P9ZZ>vov1{k2`?em909aSmL`uc%B>@sYD?D_$&Zp`W#{FIk(TeA8O z48}7_PEwaXr-MxW=e2ZF;ZnvOtZi$zjeW=`>BOe6$VD*pZl7{=AcSP(;!<<44$io~ zUL<#9eAtA&3E0Zk)He1`?avabdy3F8H>_h}sqOw@q^IySECAN~FFn~gnBFwm1&)jR zUTFLW`81vL4(4G)vl$w8EiF_7kdNd=aJR?z-~2oTTfN-KN^zM%T%dOia?s4*Ft?iv zx3g`Ztl@mtIoe^^={`XGiR}->x7&L_9XKJpW|>HfVVU64kf`r|D0^_4iQn!J=sF8l$O&1zNLYn zZSSasyzos*)pM&AWRc!$3U#SYU&OC_g^L-A0|}kb%~0|~-%+gS|6=V{<4NsO$d{aE zX8s5=vSX_cFjKpV1?u5tO518c3#%lx6-0O`zM$5p5tpm%3Plu-8fmBW5zXkA7KyNE zjeyJ+rVN95mJf70Uu*5(D5sY3!Jcj$Tj(u)8^Tx!WhrT?A0AH_)n*JK`)TtS_FTq51C-?*=0@XvtR6zQUs`l~l2E&9H zV}Ex*dh)sIBq9x41&n{+C}c8^Xi`V1a@flMkDBxXE9r9gOY2>!$wFZyv!7;CB$pTA zAMSJ}e^ErJSOxxdp6pP0+8}tB@7nNWi3!DX^2!R_>V6|J zy?0VM^r=LF9qPi1A8|M<(WU_{G6}$9&ay@EyH5dhlDU-oaybxS2sr_CxDo2HnY0Qr1v= zE}(x~0<#;AB9FixMgc`sXF%BVuuM4hFt2jfEHC?y@E_VzQ<)m@duEF+hbr%;GrBT@ ztY9{K3hV_iGU0>I0Y@dIzy307O^M?#ZdyX!8=?VMAN#DOxMP_cGV1<}lbvK!T1&nv)4M=VoG3>=bTmEg-JayK z?!K5tc=>JEZP5sY8oM;b_oAn2&HqBo!a)@4s&@T>5gyyz^emqnN1uwX3kKc~U` zC##4V-+L|Q?mvJY{k?HiMAl$ilKZ%#L0k^UDUI{m2C0WNZN^XY93U;nVPKAUI0FHd z&!xoLqi%zJ!w}K_D++aJ8NN2V=ca#K_xOI+<}*>Q^-pz=UJVTJ&{VWVx`7Oy=ftMt z&@5R(>b%SBKQWZ z#_IxJv#!H90t%dOy7;c7hC038B2r&p5sVpm3GZ6Q-gH=iJz;jsW>ubYJ?)u}gIJjf z0k=i21C|3=0;~}MTZz}hu#ODnEu;hR6NJeNSm0hWwqh9kl%OTPm8X4jXpBH7OSzeX z({K9~HmI7u=%(&rnHuk^+teO0?jRJc(85?qHVbM^>W)l4W|C)BPOWH*jyZX&r~X+~ z5t{YXcDh$ir>d<87l)%|P`$aJhICJ`?tdK;^pB+>4A;iAjC#!?CXLh-#?;jph6O;i z49I>iJUx#vFzykmEN-fbyF$=hCADC9IP6OuiR%I`*9GKsH>xl)?$uhV`u zix{wLPn#Z4dw?U{16KBc_FtTaEFvV1ce#C-(({M#>zfKlo>eSZBz2*c%wLT>+r35$e-oQY! z4+-t1qo2PU(&_m%3(M4KaR+}Qw}V{3UI%t0c4h65>JSvgl6I#PxR}4%LzWS2kQ$`m z)Ml?AG>=KOyEX|P`?wA!^lnOWhY;C#yZ3S69FTN0=|8sD$0Mzlp^wZ@N^Wlt0mowg zFe;(latmt+RoNl<1ZduHQ->Me8de^jgU4AJ^pxjzmVI5J5>RO zVMds|{-hJ>Gb`-2=Fl9<*_sP#;*^2jp`rJYl9cW>(5_HSmhCY94l^WDvk`u6b{|Am zt!160`s|RPCBVEeCXkSbIF=BpOEW`~dz(s1m-Egf#fk@s>#Rx%=O`T#UC!^-S#re5 z@j)6{-s{a0X_sn(v}R`fwVRj=j7(#JQd)pLOL^Ye&OU$DeM!GXGhNP^1 z*uRZlT#ho8`5ZhUseIK)(l^k(9j0LMV=F%vzu+g%j}*Ji4LUk>P=O2@P^kZ-R|e>I zK4>Y54N7y?;W;jFj9fe@&S1ByHK`bkIU*V@YcpmcOsztR9E_ zqP+CUUv1dFo+RZNpVZ?Sx2>u*mZUMUPnVLq()dIw$`*Z|A0P&mqzE~29?#l@SY~wPL2nQDMK)hN^xBv@)ZqVL*6+J6iSQzxHtBQE65h`Qf^54`iPs9k3-I(z2jm z)%}#Wp=k^o1`YcB45mJUsrR@XmAE2q3)$7Gi9@dv(igiAS}3%iE={S~)3;Fu8S=cU zMM1yrCWgtKH2GJocU^aZF#X#0<8fDx+-F^U6|w5(9p)XkqBvkQIlVwCyyNiIc)$6N z40$Scz$H9swKIj)x*p3jq}PFN4CLl_8n2mIQU!h;)$GT?nlo6tEN^agl=SY~k|oTGpN-?=EEDayemi5=+qBp$I>rGO z8N%+%u6wu<={0S`2i;!RW`$ul2SeBfxdPS_2XkQyRsz)+pVcJz-*|L?uVKnFB!WNO zl)RH%-LK3#)3EUPH%<4P>r3{+(Zs|bq`&2Ga`;;y%eTBKk87|!ZWzTK9JDJN`;~|O zO7R<>X*%mC0JgiOmxK7__8D=}wL=4_ zF7P~lVPGR9Mi`B9eJF^VWs^gEW;o2is4UuG2=7|@?;*is09L#tShmzoyjK&qUAJ z!xnYsSJBj&I{mUqhd{cxfXzjmGI#`(ikVj>Ct{JY)%@!dIMYok(6-q!(zUnZ`>zir z-N<8=?KJ5_?*EN&>-aG|6}LAn2%(4}^4U|o%@tpJLC+AHa8sF2fd{t`$q4RvUg+@h zXR94JR@0s0proq%179|R$^h$e112?$`KHq>4qv(J5IEtleVZ>m@-Et3Wl#)-P_~l6 z1J>?1W`+D4g)M{QgioeZ^j_selA9V=B%weZ=CaO8fYbuMlrX$66Av)`_ZauJ7I`e{MP&qRaQr0D7x`!uR}(Rk zHlf|)T10vBEWxVylfj%3hJfScPGlUHP}&d`|MT>ifrpOi;9x25i2&Xjjf-aE-)y9a zyLg8ejksh9GSmI3G=Im@kM>BMy`(uHq5j4nLR1EbL8~vU$y$R0(fA5vQ7LH{s9vr{ zy?;;{v3gUSUCWnUkVgcje+-ApYQy{?p|E;NQ^bI}q<~Tz)1fcUMsEZGW(E0jOyT9GrnE=AoBT(-NC3-e*z! zL>z#0>3I#;eM$fZ@04#-i(Y*idpGlaP%!lh{3(~Ma}xZoK#|MK8)7C?5Db?b5QH+l zx$&AW?yWlnA_j8387hZ;|8mIqy?+N9girK}VPJP6$jj*sA-L8p=DMmw>`i#(WCwH4 zGQM8rNmVs>Z9N`Q!}AWSHN0&8J&i0vC3Gsuzgd{p+rZx_}reRGkk2gsYswJ_n?@MAjjuyEN{>XQH)*yfHaC5=fWc52` zQMBe$ofo%3p<VhM^kIfA@L1sLRzRVl&j;fTXphU~iWqo1S!CEu zjIt!ei$J~fWFvMXUh;q*r1uV9-S&`1rExV`N~Su;=haO|<~&G($`0XIo$vBFLRt#A);Z=w|z$I)Ij`691BKk)Gl%TGU)`=FQV1>C7p;j6C3gpdK0k2ro>S=0W znHADA;NieR(<4GBIm&ZP?;N0{Pz)vZ+DZIf8Lw0GMG%}zyQ>dD*vQ{v$yK$E;`=BS zqL1pKjxah>w{``YjWj5gzf~IITkpJ=Ma5@)sViET&Vw`?Dc*n1wAg|2y_*kL{Ymxo z8-MHywOflrU&m-V=#s5XPod{NZj~1ejXPnpWl7oZwGTDk#lVK_7!^kI-HZYj`qXGc zyF;QY3ad%M`?ynGP1#t4%3=f}_ag&o)uiZ`F+FTvp~#zaRLLNrkAq?JYMPb(O}KyI zZJ0eK<->vRjm$L0y}%z&b#}p5e_j$nWDk$&bKtsR|F{H|MY2m7+eP1HPm#J@}-?hoE=>G9uS;Z&o)dm zvd4H}!WJhp43y!>gM+P#Q}{~_E<6^$Fx~Y7=WhC{%jPCUNh3O7N)0uKWB}&1JK{ue zT@~8-&qn}-cIiA|t@V#pL`l}0MzZ|;VzR#4Kq)4En?QSZGJ=Lk5Z)1vifxJ)sTuYa z!9`5%M?Y5!Auf`%%R0e|s%W$7+EWF-Ko{XSA`#XQC=JD9c+s=tkO3c0(hAQ<-O{Wc z^TuR%nwR^R>?kkySP&`~i~XaDcxs|agXH9g@hbPcGYTid)%(T(VN5hL(n-((elbzR zned{)PK2V&WJkEKs$r-Y;{u286kI*BRnQ_HdAgQrsQyfC?UZNsmSK7H1{=K7NyQkq zECcWEj^J)EZ~EF>?GnO!yVkh;3>JWJvfaZUgJC!>YPI-C(LjZur2Q|XgcjF>=yCzx zdy&oMtq<8{WT!4m!D&~?+D2n&>woZ|48Q=Ma4W|YHUL{Mu}zB%b^ciljW2})(%T%4fT-4wUid_Etw^}GJCYzEqRPHaWXI_%#f}F8>T!xXa53i*BD1u=CxFe2 zV@;J2fQPXG|3beo&)t>nqKHHvgtd00k!;dI9_5vg2cH7@&Jtzah8OFan3KKnX%_RW z@#FB&xy2~~=DUC)uD3a$$3mBmj?ka<%TyuKYExR|Gk@+>h^%YX4&K6Ss=Xog9G*KyqIsxT9}?g@~v`zKb>tPq@894nCA`4(~5OBMq&W)xwIVOq*GZuUB*Vx~_i5 zGCzd*K)7k$Fc3`P2u94!Z$E40c01T;L>W4+GD|+?daXKkUiXs6F|sh_hlohs6)S&o z?ATv$CCWwKviqssZ@GBkHKM&ANgE&fV`aB=L6*{8vjZ|d;qu_3F0^xro$k=OseP{| z_{_MG->$}ACH`K3Wg~u(`f9{0E$cD_rUN|zj|+hBrW%=tyWpWX6%pI7PMK+eUtk%! zJPk6A7k}1;aO#fxaCn(^Yz5!Yv&m9si?YQjO^TWO5*hRS-h;nyyb(J4S zWyd{xUX6qtCNue(PMfC1mx_H$t_AFm{f9HGwp*f!O{Qb{F2~D_Ccr(1V(k(xooWfJ^^&QpB>$;epd$L~K?a7}PV29By4Pi2G=FTys@f>N_#UkY^ac zD8RVD(&Tbu5pm|3*x)j7e&Z~82*2VFZ$EM4i)O6c&?S-=nS&n5kE+uo%kWZS43R3C%r}exTW^-2p z`E@HfD`H-&(d6tRtoYY;Gu3%Esqw(7c$-F3G}zJUU!D!?%8hn+ka&CAZTH(R9JOPh z>w<67k`_9ZD7GXHnE%BAu7Fg9^*4mW!Zd}4e{q@F(^~uL2R<%RBPUg3Ea$(|7fQwJ z3$3Jofw~CYXFQ!+!GT3sf@KuI*5Rs6LC>UGF2DHb_$CPq*OVv7Y&?lp#vCmsW>d~d z4f4W0vKLbQV4$X?Hl{g7ProkqvgVPs{QUX9U6ZXcs(^4+E9-3?!$m~;s)TKD(4Fv@ zBqdP>qp;@5XXaWa-NGx4zKMJR+)g@Sy8Z-(634*c_o-P56gbNJuKTc7jAx>L+Jc8|Dk7t(5$Ri^jnT*d`+ndKnU!AflS&?rrT`{#0i@g@VAedsq8EV8 z0)(Rf?L5}0qzaBoPsLmRuw)u_<3whJs7YdHk$|JUggS1uG`OATf05YFXBrw~>al}N zDuu6zk3hqU_>|WUYWoR!M;iJpvF^NN$^~L7DJsVaKJKlsDQP&K z>}!?qhzBGnb7H9~Z6k+S3p&mOK`mI=KXT?1yY_wyL?jX+VlMooMHI42@qvxGOKv{- zW8hy?J8C*Z0eu0y*bjzZE->{Ehs?Q1q*)uRo5@>H5Dk49Fi|~mw$jSYVEHTKMLI#l zcvg7$7c^tQcH}C=ct;#WOA+()kB0LGNky~_bCq_p_$vcad_O8gFH%)7>aho0TipR2Yvh+2aeW(%L z(zhtT*WAy$YJ)KhBqZMNRORM>8kjN{Mxvgeiazb&LYX-8hk7?2pHLJaT^TkemFfd7rkfX<)->`LV^pMBX680H84E)-3n@ab8!xYdwz{WUoPM! z1!TgF<@WH?vcj7$Kb2W{ zk*Wn0vWhVs$74CkT2&KIPVnnXnaTb2=G%98)x6r3(KFu75r*73A8fQ0?B0;8z|B_S z3QO5;6Pb23Om*o$!?NCr!`AY)0npve3Ho$HX8wElQ1vMZ#{Z8p4E(n9XqdcV>^5`X zOZ4?}81^L7t_lMU4==%$esr9Rw#zKm-SZ;i01v$Zc)r${k^4;cfv;VgupWvxzN{>@ z^?pxR383KFi*FtBmrX#(m-{|J%sSr{pA%5u!R&Kp@ooZ44n8La)7SM1Uix1QX)rZO z!94aJy4|KZ%JS~#8_671Pb0I?E_^p%oQn_y7~$9as|co1xux(PRI|uBYaK2b#10;E zm4tg>G+Z&>uLzFbFMC~XqZD|eJE}rZRaCc-hfbFpVaPdRQA?l@ozFS0$4vW90x%`x z!)E6{U%c@`$vwd&bsj#5#wbI6)xUnP+#i$VVE0@f2gk6pdak?ZEV~WoTBz)W39I`R z9?vm>vNJWt3pFOo2pI$-+7)WfCqXPXCwXi+y~uxh-O4`ty;?9?qsYbBIy(eUOME^! z;kae~!&SX*Ys@qy%|H~bD;23M5MD9~Q0fG!U(-@#s3V)rY*DDL=E7{}zs@f?v>J85 zvs{X;8pL1j1C#H}7OOOihg)pPeI(l7#gwn)=f{w}z9PyQ4_=aDQ=9IQ@i8kk8^rjO z2k65FwMa!0cfWZJS?HwTs|e$&NWv|2+mAZna#dRpXo`T(4aKL=^s-qd z&Ig>5xW3JJx$8bc!kVT>I}kbJ9pcIlV)~w# zcQj+SRix2Wl#X1#&QTUPFl+G$UG05diu*(QzCz!Y=@cP9Z#_h#pjh~{G$ss$U(2xO zX~XT9PwjI_W3jjnL4KDgPR%(>Y1A^CG0R11YhAz+5<-4MEB>W?NzFZs9@d9hC;%PK zWgx!fhhd@Tpvy6*CI5cLSd@_h+a+7l*vG?oiTcUcR|?2-PO`s$FHeto85J#4T38`J zT4*U&`t#u{@HLhE10Hl={%6JYQvNU)`$k0$jVpNYCzt6D;iiI)58e17Ijg>YL2p<0 zz$cF19i>0Ul={A&4bGsIMbX9q>cF)Mvu(=zaq((08bM<%_;|A}(-nHvgc`2~h_1@{ zlVVWdY5B#CrkIJeKZsHTvChj?%sP^Z;}1;0yuv5 z!+Z3H{9)U5lMn|N)qfUCg2gy*1g=95W4H_5LRLIvFg2){7^}@Z4bc8c&&cGHwrqKC z%1xs+mprOsYh*~?5FuhCTgzkO%EfQq&U}N#@vQ_pE_RuBPicpg&P*`Ec;kP(c15F` zASIH>($WD3qp}1)&>{-vKl5Y8&<~TpAE0?l8?0TKX`B2zq8Kk*cHwQT9rRBuU%)gt zi<|A}eVl@e+UHexydmIA0ZH;&SLwf?3=-%2XpQ2G3d%2hf$I3ACRI?+cs$ zS&p}H1VoXV{hdzwM$@aBPT0n81wq>&xRCS@gSO(~F26luh{CQbkfa4B-Gxw$i$59x zv7Wj*$=pIq;4fZ!PL>EU%}bCP)P1Cgwbau6J_@5%p5kZC0v+foH5~w)x6E<6fMvwLTSnsTNK%ExbCuO%$xN-3wN_2Ko!_!iU8KE0mLBGKx-e}M- z5aRUL!s_;G1q;~gd&bq>I*lq=r94cOe?Nle>V$~#&&Pk-L(+Vzt|SI^|CSYp(Y?dy zLihtLYu`7LDU6PPkN(0j@1;O^14lx&?NbqW!qF-jJPjyAw%rn@X{6czJ|SuyGd|$i z_LFP_LM+~mlDu@#pKHTmeGAK0>23{9{VQ-|?OZR|opF2AjzwQlLK+CR3G4FP&DzA- z?A~rZr1DK$bp+Q?_|qB3@m|O*d+z)*k7v%>k@cn?%7Z?RT<_QUb-)F*!Hm;xFw(VpU$CSUw7;2c4JyxazgGTUW4$U_fTNJAJdA!bh9J5hcdDxz0X z9p@`2;{rXA7@aewDfW5G{nbya z4(k~WgP8W6FZ8NB+6QfZi)o&0sXV^O7M0wOU31%0sG|mw-@Qr#V6!-N@q97@tE60< zOvG}PT{TF&1BtlSKS61|cEoQ{!}RD2-pBsUkjx$G-6b2RbZ?_r;r($4Zjd48{LHeP zZr*(-x7O};FOj4-;EAelP5$Xc<;`eNkYlW5Xi+zRrtJowOOCw*j-7`y>AwJ2JLGx- zz9?1`cZ={B6$3>=S!rt%c~lKHT=G7`%EM?0B~tkjMVVKhHd_8kfIdOLQ?iI#nnVft zDPz3wRP7eKJilsYG7v#R!1H|*OZk#VEE+lu@2F>wfm^ZsUhZb(Y2~#g7%u zC_a|OCk+42?Hzigya-PQ5$`V;&2(25uY_sJJ?Z!pn;BO2f4z+X!8nhm1lB_VcyPW& z=~s5@-WGGK?j5ER`ip>n&3T_V(D9Gyw+N%p$2Fb8Qm0V=H;#s(^XcwIxFnT7^@dSWXErABh~vj2M9>bJ_Y3YHResd7KXTuc?7M1?8Py1U0qNtxY*`-s?xLqBig>2`ASOmy4|Gap9q^Kt$fw#O7u4i4>s3oKa_e`z4i3feEB7 zj1hWNI^}^&Q5ZI7IdxbF-TyP63)}~a%tzX~epS$l`dCsrIwAK4srT-1lVA$ey(7PL zz8+WPp0@RBcKG!*B~jBT-=XfXDd~0&k#bp17}1sddm;6TyU7nPVjo3J=oLqnP=k8N zwqsv0u4X-MfFbGG=T<}JPD`VnE|YaBQ8S$>Z3^>-80y%$6Z=J#>ko951kOMDo9|G$ zJg3kWG7|r`$>P{!u`@}2ab`QVr$J+C1Sro}+8MBd0sFx)+aPmgY-bX%yLz*wPL2F>3|G-6 zrq?TSrpSITh?~rBQik$drRgf;03rFvF z6b6{!7~hj}VZP{)b0YAFIvsSRe?T+wTR}ApC&Q%m`yz%NJUPDvJ?fQo5I?5#bqul# zk+!?xP%4ZFO@o{dBhh@qi92#0lb;yx^5Q#~*;jYae!#sID6_sz_p`)UrfKQuvDGN@ zvG#O|$CA2y>xc*%gWEv`r}fFH8Wzwyb=&Cax!&A|S5l68=6CZGju&e!-M*WkFeA*j9@GO<5fhhl{o4`LQCLL`sV$cTeTxl_ zy4DYeOC#HAkQopifAT7NUA(?cN$NKw>Ldon9;37Y=%)*Rj@*OZDZJl%MoYW9r*6}1 zW#;Qwsq!Lc&4{_b1?$(`E`GroQ;OgK`3!z{(&Z+zP5=8;tiB_=?S5&mzdOJ7k2ha? zSO=4F*I;H@HErw<>u%KdtMDipa68aQ#gDR?1?MJJb^84}Vwoel4x*3UzjEEhg2U(` zYu8kEUZoM@tdA^c(^5-Rj3NKuDpeo;Pvs>5}h#b)yJW--FHmF~<_6 z*`Mv>Y)mN@KcSH3i7T23&%P3a2+zxJb@BzbgI;(6D!QoOwPuJGo}BxPl3+q)*E}ml zd>A39GDD{*J3<*rZX)`=&-H^QDrEx3S?}FiN#RM;_E#sS$>#5)WQQ*i|LBH{?J1ki zk(g`ev+J_anEZW_awACGwuFP=P(FG#-Ox@Q^VUT*#qv>ttxmGI8dYvRq~GshV!tk^ zc^8S6q#)uKoooL_%&Zrvh2Khj{=hN7a-#f0jIox3A=}yT3688_(|D{EVzM2W#bNvA z3#LC9Y|K;NQz$l6O)$AEl|^1Jw5JTrF~@U1UOS`dtc1OGp<0X2io$PL2XyC zDNm&@$c)x+G1Sq&c%!jfEWbCwFdp>Rw)F=YuaFKuF+p4ZB|AJ0Sgj=t^h&A>@qXrw z*PwU!QMs+ar2ggyLza@=psHr27T{AUAjt-zO2an1K>96`Qq-9I&=lSZ`0IR}Vlm!_ z0tV*glL!KBP;E!2ysNmm>~`r!ZV(B6eDsz_7X}25Wd?qz!9t72XiZ1t+p=8Zey9}Y z+~oYAYe1qfTUK77`IDF6D{-YumeWVWE#^PLqXVP5fABhry43GypEVjn@kb*i9NOuf zqLOKTL}q8CU@4MObu|k22jaS$?r*9Dg^McXS9vS?s#An3`@vNR+X&npREszJrhZg| zwo6^=FQjkwj!3RKc0)}d7t}7d{p~L(V-Rl&ZzToPLABW3&g5wv1E~L<=hHHwMdz2) zXi<5=^5UuZP=wZ6_vw&FlM=HNGNL$4n~EOLXT8a#{K+8wQbtNQS_lUDtTvdqicN%$ zeqmILuUcYNjxI8BxdVyYqTpUiaJV`g_e<|?wl`Wi&dTi1&jYzXF>zX!wgIoXf3rZ* zy8W{1zv--x`N5Rl+#fW+=y-e;bIrL7Bw5BfeoO2vp>m$*EsH5CpktovhzSGx9wFna z@AkcNGpu$mbQSSqU+j}2gh>=sc}a3SE0^x3Mot?PPI6T7F^uOK7%2{GXp!~9FcG@& z4AgU%>ZK_n3UC)=cph_=xJvH7Orh?7()&2?mJQ?lW3ajWpc0`VM;rEhn-E0QTYXn=y2gF(qcQvs|uYdo_XeKpG!!0HTrvx zTc?8pOTkn-B}PD^f*hMfJKn1PTdL~uZ7Kpdw@E?|6oNH@2~T+Oh@K?f0`6;7j~**3 zt{t$nkms}h{=`8OOtR+guhPp{>TWAlhYp$E=`;Po0X11pvAbRQRONWAkY1eSYURd& zb1f*`{bqLhi@0Mr2+s;?b#q|vRkIc7Kk2*gM^uEOVohVmj<_t5MUhxr{TuOX+q^3c z86OVh1O?4Ykj@R0eQBFqn9eXVJzIpdPcv&MF39cS-K73OJ-VcsZ62_U7; z@{!;%6yL@BPKvKla`$SFd1?r@L_m<4_)vDrPa)rDR@=-z6hiz-uuh zhVB~Ud22Gs=^oQp-Rly~5iG0>Yk_kI>Ch);Zt(i>0?_@(n@nGBzv5#Fp>#vPD2OTq z4r+$;Tv5hZa{F`e*ZHfv0*T~jW$*AfN1s9Kg^!U6W;z(A^Jq>?zncYB!xwr?q-HeZ zub=8wevEy<`I&ImU^U>-wy9@1wW8>#KFX4-*|BnRKZMTKWX+ZsT2)MqvBithdE~Lo zYc<{mr+K>l7V655(h*h45{2_kwCWgPi(pv9TJ?v$^RDyW;f}~2 zZ%Rig=jjOA%i@7?(?!Im-8SZy> zE^;xPOm4N4`qe7MA}vRI-*mQMVIdmU z+pi<<|FXI{tQ1$#E^%etu`LOu^!l>D<~Geo9mDvqhYgg~hjfcFB{p*V_8_sO29g&`(I};M z71qNooA@EGvso~0y3?9HPppex#*oTxUHK#Lc*w&#@cm~9T!hY`yC-^gRJyd>6S6#r zAQvsoqg**IXNRqhGu4>?h0%&~rCgd`@|v}wg`t`~&A;<6YTm*_Laj~1YP^wTAcuQ8 z&XM6ZTx0Si&>=3XRJeami<=9gPg?!q`Kb0|=iiUr%&L|5lm`x}fWD-mGCSG80A|9s*+%hWQ73}I2rc;i z%F-yB*LO1Iko?en7~v+so@6CftdyT6?xV~}r$@ZmTff(Y+=a?_?(32v(FgKhD~;Ne zE7At#1;@YagpO?cH#y_iO8Q0P60`iLBP)8=h!MhRihSL!kG)MP0`?u^gn5P?SI<@a>d6;O~``g`t7yGV|@m5wm zD{sY?uT4&v7K?VeP0erV(e;Ny+a;R$sqaM4^C{vb4S%hoe0CQOo~!xR&yOM;H$WPQ zu1PA zT<7(CTQ+Xky<-#O38(fhfjPbki^FZ1%r!X2PZYCvy}xV*f0qv>!E}$_>>zPqk9TgJ z$|OO;u3`8Kn)Zd7S3Yq&C~gnKfX zftu8o_3ll6oEmf$)edQg&d`>?adkig)0}2TwpJI|-CU=Bm6Gg;P8I~x)Na0UJ)W;! zhIDa5s41o#lZhB3#RQgw3_FI`0=ew8> zfGJD~Z~P&<;pDI%`BVEKE_kL999gCdfOJTYXv4f;yXICSl~EUinh68@mZ$T^Scj$HM?(Q6E1pnzJ@(|?7 zGcBXZDkZ}go;-RFCD)cWyscNj^~2k-hpR=v zhwe!?SOU#Z;?t-4i&Our?|mBkr*6NQVeTt7?f-|Vw+?HoiQ2tOp|}-ycXx;2?(XjH z4#A2`ad&qw(&Fy!R-EGQkdx#nP`8Q>?vsx*Gau^_RM$~hIj0^s35?J&Kk z6U|T0ziJZ*>hdl!KXmQy38=CEwT2H?XO*~4x{QnlVkqr#%EI)33Yr-6^tF+)m6{2F z?=Ij64q?g#?@S<2NRz()d6ep~VPr_jkZt zn|SqO1-(ZH0|Y&bninOZTv)r&kb1kX+PANeD0hG5SjlaTO|>;(Y|~G9imU=F3IyUp zKF$Gy7#!qv$AoRPKf_enrZc}zob5cEKBuye1Mx|Vqwd0~N5vwggF{q6n{%K~kpzaL z1?$FhEKE0PwHYcyeX*<6ffXngj6$` zlh%%`&I1j!es4t?AHLMR40?@KA5`7v3BcO{R4L(e5yB^9#`vn{<)LS|rCKx4K_dN1 z0G^zigiEBSwl^B?D?l+lq`?=c@tlulSs%$Eb*BAtU?$fWH5_CRPveQx^#9%~d}81; zbMS;BgGdj^WaptJA`J*J>B!)|vh;D-T^v&bD?poO6wFRnlS>?@lStQ|Sq5?vL!$=R4acdI0I`|Z7|0coNhb?M` zoYz~6Jwo~sN^L(2vFy?r;k!*0Bq4Qq?O5$ew|wYcKo3V?(g|#l^c{CcUczsMt#F~q zZ{4t*AWMW}mkUF%Ii$5vwCL8aL8~k-DlO0 z5Xa#lUOl|0uk@Jysq;BEQ4=@i6~oM_xzyB&QI;R2@Ae9&GCry(hF1KM`1ma3Qq%6rJrBE5;orV?DPIt0-Oz9d zvwj~eekQqtQ=)|A+|myBaPH*@8!;ha|DG*p1xhht3G7~aHbbqV-I{EEj>AmKDEymY zrGS84ox|{R2M6#yC<4soJOvxQW|kWNn$Xhy`#_ty9M!MWFehs9 zgKZ@2IGs3wb~L?{FVyvlBvbMkL?Z-u%mUaMf#ucD0$(9Bj1rjq(qXUjARQgYwD-Wm z6xsL$Bl$x3@NvvA-6U}Y5<3f$75sSNWk6y>-$@(+@B?n1?#AYG%+|D_nC;ehw-fo9 z6O^Jrx8jTveK#{=q;(4izgb{?-YQf}ok1mN*&~S=t4un_|H+{EdVjVxy5D1_+DT~G zh10H=&G4G`#eT|o&(f5A&h90;oY0G|Ss37DuvsAdZ>F-r&O&Iw12wFGJSwWW_!O5a z9uTYd$v0c8isb%;5Wz;6qU*KK(Cnl+8$>E*h1nncE@uBTLn#=g-pFfYxt!^)s{WMy zj)Q?mJV}ZOc-ziWTXrVJWrZAy3H9w{R2Z-9)&}J&=qFMZBWUp_2U0GGoYVt~HZ}7c zvU4oa`KMaT=ZBJJ(GM9R$V6>K`mPe=pVycH$RU(x5=80;twg|ewE%C5V7Axi>b#Jz ziSxzKC7vQA$%uS735KBadIy1+G+@Yx;Ac>w2i6c>ZR?AI4|-t3#XAh|e$PD`xatSJ zJp!^`Ls3$`hVexj!#tr_T?eaUE_@8$Hi4KcuX~n3{~gz^e9R4k1lPc^jkTFs0uoJo)G(*0@?1bC{9w{U<``C~Qt zJztIrR6i>M-yGW3kfNIpIc3iTo;p_3_popm*u0DWe!>uiW#wrg^S`sa6y^a|@5Z7A z0RB9NHyrf>#N5Qs!EYY*8yJ~MsWdl&uv>S`e^9xRx^;W=%- zB%N2V3d4=h^T~hJ-kjsL&3o9R1n8>iI$F1Q^16(VtF=n~4L;g{;a7l(A#6ka>+o`L zCzKOS&*ua4gzNS1lgc4)!G&U$R-89AdHM*_aK>Njs*a)pF9*r(J}$DnSMLbd6eGp1 zfqI^677h91$QH}n&5b>aK8J`kH$Qlu7}*_g{~VpZSWuo9yKE5NeV-ugRibtBJJWM% zK3r{re>B}i^LDfzv=G^WM#M6M@f3*!Y_BJ-(#{+;P$gv0SbXhiy z`}On@BI^X*pHWb%cD1-Z@4N(e_$AfIFXgezjYkLCP5aEy)qB6fk#890?+N-~cNAu6 zyJi4PPRoFdt-|ZDl~j2X8`C^JsECSmi%}LaELv#Y|s;OP9K?uVT{`9p)cjL@b{+)mAm@)9ACN5 z;n!UgYAHWH`?QvGz4ykzo%g!D<>2Eh*DaL9gEDQV#6*up5uEU_60)m8N1`ih^|8@- zvqJFFxOgyXzn`bNZ?{c!R^7nEh)I>gLnvAPrL(+%O39O)iu= z>x{$^;yU^mm*gxW5B;*)RZlahRJJQiA(2L@%>Q7Syg1x6=E?ReV)VN)5;P~;?VlMO zd6{PK90;EB=@}=ZQU6lrUbyp3W-(3F`a=k00wueF(lw6xwJ0U@v!8?DlhKPEL#ZO! zP;J?2`_=*M#&Esb_ZRN(E(d;4a_3bHy~GW2qUKTv)P^*mMcel)r0k{d%{yl`l$%oZ&Qq zGWQRmX5ts2O$aLHoI@6MrT+{-qot%=*9uQ=*Dw?Lyfw~M zh=YBvKus5VdxOgfdGl$GO83y?^(ct>m9n=4tm~UJG{(lkl?fKYu&;bulNjG~WV`>) zexMgO;T4~@w$>Yoycv`6W1jtTS0)td{0tC$!(KE#hkP|&LZCYI`A!>11Dy}JiD?%^ zsS4(M>4By$K>+U+NZeqpxc3mc99sG2QVlGP#ukqL&d7z!pvd1KaC>FuAT5ro&MU8S z_fpVHN+mFsIM(+T2026LS;d;kN%^k_PIE)!N#}Fl#D_0ER`I?LC3nWxF-%H^*rxa$ zX8GX~cVisK*0C1$DJ%DpNRfL842E}af>=5DZz){Vr4*{FVi@G%#PI82;|NUND`Aok z9A@7OC<~3kfcK)~Vd!D}qWH-dg0?Wk(5Wkl>GC#1R|doA3Zhk#3d~a1x0LKh0r6Kl zj`IVard5Z7C9TwyD(})i#GS^@RX=8;DQ6O3zJ;@+JbVtTIrxW1d$0(83pg%AvdbHb>s`29)%b z?xWVU1-yGP6d`S<-|z761;D_UJEPAKYM8vA|BeGunf=y=o3!Tm8*CmHZ5BlTnR)Z= zmIGcY&o7+6aP@C{X4g*8Ro9SZz7CE6;;D(c6BNYd)~<{iO`Z(>U_o{KR}9z&&L+GF zO8zw+uUQrmxh~`o^<|w;d4A*nEGg);k(ldm~dG zD*<`-#?xK{b!19y)->GP-e$ZAJ}-SJ-9|+HNNE=3%vzq?E+e-P0v0YH$ft23TxzQ z|H{blW;}gB=XEZ3jPUri@UCv6J$OCmQ7b-vWN7}ik78~A)uqxj!dC01bOWK)s>tFX z?_^%43$-8_RdAm!wf)O}yCCvzxsftQws_bfBKJ>Tj6TKt@(KMWTG8W~W`mRL{ z5%T!>-1H)+ahVOX|GR2C6s&qNs%_;6!}aM|g*^d=-URVP69C3WhL@)Vyim2v6}@## zT)KqBKaMvKa`%Tk)VW|FAXsq*rpDxL*bm@iyzqqm%GKObe2yRuyd3-dw*N)zqy?in zQm2oL08^K64Bij1v29l( z0!of3(zUa-+IriL0FP!CnMLBtQ|~+eg4_DT(gpf-KTW{)egrA}Xo>qEOOwzpr{)s7 zgTB)77&?X;SQJwTF5okHYZPK<(ti1d0LY7Q=JTbK7`c#+yljBp+e>8BC<*$zej@<2 zfNez2Qu?}I-%hj(SS{3mOZTaIfx(e`{Rmy=`NS*Is|}RaaLji#;02*EzVVty+P!oO#sL^B?%G`RMmD+B=~0%G;iRF+-!R8bZW<`{oMNBkprmZi9Wbr#ryHSU zHvdPgt5n*#mCb#lopD!EDr%cPFNLt8#O%*`*8_Wzlpxj{Y$VfNDS?Aqt>GqiqwEUG z&l4012Kx-#d$ZRImLUJw7dCghFhRp%svCrj*=Ic-mY|W43c{!_)9J$iDp`l<4i=_F zB+Xr}NNt9P`rxXv;+BWJ@z9ag%!s2?JqJWpS|d}K)cBqU5d&9k)4Eo>e;j|->=bRo zW6D_L;wkl4TM@h=F+2`i-ow*}*-Lg3`s@o6S^glAzzskx#VeME+q|o}{mH-4-SA)8 z{AM(Oj)89z;#Zi(8ziO-SMiK+uPo9AnkDb_{t}gP|kJI1QdW z9mx2@oofd*Lf9X&>jIDPjXLs(I1718*E{deYkbv$y`0$QmF$fq{!?PfK+c_KS2H@- zFS-mwXT0X-*`J=W9tGADb1=&%w!V62>&={vB`-?ejmaOsj}H`CD;6M%OqXfqE0fFF zFp*Zmg_~hx66yz|ZmfR5545yswjO_Jq2=P9v*k&5NM*sCj|e1$@M)##JXM~*Cd+3m z(>+ne^kU5`;MuP(PyePQ0Oo_dO_Ys?o?I8IUS#D;YC8PRZR8;nqkMT%q(-l*_b^-d z@ICAQ?$A10IQ#Gdh8A9oJ9ur!2OwHbD1epMGIM1M9LqOG#D0MadC%jO{)NgNz+l-P z!vTygoO-sbgf0@Wjl+5KHD2k`&A{ug!yvE<4`pxotC1Y1Lnf`og~u|}1S5afbrd`! z&-!=b{GUFyyP}&68Hg`Uw|L~^&A`BTz2`+W*XG^A2x7V?v#W2zt*7JFdu0Q%tJx;J zn>-nGmKrFYE@m-GEG)W?;a!M%j=g;(IF30}THSwW+LEehdJA^2OUWGds)}6T3Ns~CFvXD%)0* z8@qumwuBQ{Xe@l6LKy^U8Fm-H+(JTUK(CvFA7!kB$PVw9`L;;P=kH)M%89Xj!SCEO zjhJ)QwEU!|0#xS7utb^a5JM0BUiAKE^ z2xG;Z`3Rf3^7hI2I2dE%qHa`l5*2mb0^lB2b6MP%4(@OnH{$=fExl%*~&n2 z3X9VEtnUDfBQ?oEkgPP-m<=Ofw};Eda{JZ5(ZUFDmGJ@kTX76ol}^t*Q_?Tw1hJT1 zx=b8Y3&T4zM;=0wg8NB-&^4wbtH&}Z=s3J}a0gI8Xg9@Bd0YzmEWC5jq-0KXd@vR> zlNz1I(U@F^@VHH?8u#;%7-jp>X*R!v6+yX7_4+ZOt_M09 z8yg5Sd*uRpGQ@z5w!VLAQwS<8Yg_wgikA<(bGj`gm@veTDlfe!h_?AJiA}k1e{S4x zLPjaJmU<=uSvq^tY1ZAdBzqg8J+BWO{$A6M9%-Y6Wkz8#eE5DS)9e1O45yBPT{r1f zvDXtkbhn38z03DVb@b2F_RjNidaJ_CE#ahUU3dkHH7@}pfbA{+iK&UXc9B9LvI-0i zE9-UQixSfh_nz%rZ1?vu^iDap&uOG++nOW38x8>%=0efIg*^Yi_9h`wwtq@G>;9_C zUG^sqQb+R*CFN=3J8CXU1-g`dCsEa0%;hg7km!Uv9!_Q=Wf1ZZ&|0@YRiJ)jBQGdB zeGaLaDHOfEYKF_pnU*K*Sakc=a^S*SRk=0)|FZzNaaISufi5@?K(L_sYbQTlpEZrBf3gjCsr^^NI$~lO;Lg$A5vlGN0Yay>l&^KniHdJB2l*|2*kuwXeL6zD8c2D(x zmpF#TuQPgVqNahOajnES-S;%3Ya-%ji3%R)L~xHaqR@mW>tL~HOJ&VO&TpA=UP7r5 z?x1A5AD|masCV`>Ht(Pb?u#J-**@rkg{l0%t){kN>b-TT?KR14U}Qoh%us)gae(-& z2(pa06>4m27TW3DB4x8&3d%YAcq6i`#3ttjaC{-r#i7GuM5s8EIYZR)Dden7(aFLo z|7eG`gm{<;J0xL^K0bdLKwkk9YZN+Rr51_=l&X(K(8YAP!~MT~H^C3JuFy2+-o#79 zBRlK_CoyNz|7suN$m_H_EJ?oyX<8tZiX0%Wy*kZAnHVgpYiTMc-wU&jrXYBd#DsRj ziKi?Uow8qb;{{H_)4L+VgMu*U+1N+=mmrPmkiUOi#_DH6sbxrjXG99G{zA^ zj>3!g@IhWFyc1-w)4}|?SeaB|A)0zjTLYWdcBlkRDKKvK*=$fa6^{RJEbjZ=kDo(C z58D32jJe}b(DYvwhdOJqpUhEJtOLX3gubFfwFtVH(7=kw%xGt6W`Do-f3JmJ{L3!1 zGAoNhQl=MN4-D$KbjpkPZmk5r~Jj`)>tFD>bfSTjcLxQ7>$dZbTZWe=LwWG}Il-PZhi=k2)hlehv}2E0ipE zkeOgg@*I=hz9hvL*&=qqQVbiWCa5)viKBj<>9YLb{wi$IJ}th1s4Z`^6uDYhhZ2oy zLi1|@|Mf#EsBW*;-?pX=D&#`5Z}wWgAMWr=7Apsj<+bBq&bl5?^*qGf{c<^FDn7PRkljSQNi_^6} z|5B%diyf!jZ_gJ1>V;ii0ocp*NQA(VTvRGYe)@;81k5V@sZqo$(yn5xK_B?}QveIM zB#E;fk2>eT%}Xc==7U-*wI+o(+HzXION8|}tHXjGT%@%AGpspV__f~+hB38pxEJOl z;!xIl^8C0lOW?gBpHI@$QJLc>qSH51Kl!$!F0RvhXcF}cXqIL+!XOg$^$`ligc`&` z+cqaPG##->iF68wHw@U>U2D+kAt33UV-X6cIhK}?r z*l|O!RVpqL+Y-&utiv>x>|_yp8DVV14(ZQb;)sQdCx1CAE38uXkAfyk9b zZ`*?NjjUzZp7go^brGihdjFmicL<`vl108lWgTK+PCm0^55Q#nvC zV(5}|vA<@eU@nnmK0;PYHRqA-;&z=2i{1}P;=VN9AL;5CfDZ1-ka-OB4*_!;=ASE1 zh1Qzf2QiJMN9cXsYulr-qA?M2Gb+gDt;``x=~8?oi6dD>a@F-ElGh=)t&jFSWZ1#I zXIw>wMRzE@xL4ib>!cu;Em?RXQtr?raE^ak0G(S=b0iRVBRv$^w}&*!G#L2vF;P^9 z{1ooZ%g_3^nHv5jiNTz1Fl3e{DW*5dW5F|-qq2S^J_Z=rIQsc^p=CLp6ZJ;k{?9ZE zb1TEP1gY#_3m;w8mqO(L%s1zp(5ca@&bPd#iD~Z-bi=2IkcVaT!)Ql`#$zj(AqsP{ zW=(AB4+wip&I|_B0XztqDXp&*^JdDLn6MhZk?rEkAzc2x2NCl8(2!>V9R`78G9D%J?ln03<{MSuU@_vEh-@gi`(TRd*=`Az$->eB5TMm>rxKXnE$*)dJVwNw_t<%Xs|mv5m=N0Ye?Q! zdoGgbK<|Jx7t05-xSN{$8XT4gaYWM0CO8j(av>sCc24`X-R!^Um@!i$4?_YAjs`gA zy9ma6B7oxeJ!<`*Yldajl;x7oUX)Hq(@eGd)Uw6G8@j0B8aj*%xWz5tu${igf;VWn zPu{$+(ky!!9~8W{?WJwk83r>MFMlDZ4?n*rroy~~Ks$i@_J2&lIltuJ-WN4X&CJm7 zgkM5?T&4kQRkP%T=H?IV(eN#AFk+HyZm^C>vAQ{$$0sGHh372zhDuRp|Uib7&?5Pa?S=hnU((v!4k zVW@d)!H6PJbTZyUbRaif&wq_?7hOW72WU-)h zVFB{!WTPaysibyaErvoUvFSw|D56Ab%Dncn16zlDNM-h5ioqMf4~Uk3j=t8XM>gE-oKKA zbF0gb(9mdSg13vFIa3lJY#fb}hW*#sIfIZnjV=t6nlJ;s@-GYsFnaMYBeAKhO&miS9-;QT$MAykq)C6LU*$CsqzkXq05Ly^?;XbX)agnGc?Jk1C`j zv4)5Cg?f9r@owQ|N5`Idmp)GspkR15KpfWAEJ$PB=TfqfCy{f^*DF$?yIA~u@$6wx zscJO9o5y-@wk0AJexBiMFMgz@OMAsU{Or~HpM?~wSsGaS_0TdS(wg!9bbJ1*(=m>C zkHqwItLm*1_7JSU>F^KPn`CbQ_2aEV`Tf5Ad?GFp4beG;`F_!iCH8M~o|Axm3%=jH z)jUOt&zQYFO0vF{vBGqit&uaK2{qn9Y8*XFRI@|Td!Z&n}59~k*hO@zZ0}I%7fq~}k&@?La%v8F+ z_a-23xS~?d7a#7*tw@k}apa^dDDrup&T3C(>ot+oKIpoCP~_Qa*S?2i5+*N+L{e~s zMH2N(q`AOYtG>#A7poArD;_{uL4p^8jt5~)w!$V|V_f9aW#pZN`1mM-wj>tTA86`zWKqJu+;&agto zAKu{T*hB-fE+)mIHGv8)6j8Kc+YN~=*J>E8Jhi1Oy6i`W1vo@Iaiz1?T?ktj)*tiQ z#4<1}ADPd}QkP<6ex=*A8rh}$fH9Y*VhDqjaKviq-b)ZPhrbu~L^rd{^cl@^%4_W? z!jhAKMdt!%@9uTZX>ChRhqIy^pj70Mu6JUThT1 zOWbDy|09~=)Qsfnh{~R)&H(LZy8XucsyUTAOttYS+(s`cZ#_G@#U^3FwG>) z1zu$6=LqKw1KdC1oFJwihfM2jzdO|DR=)^M zek2|3+|;IKwIKsuJY~8?krH+ityw?ix&vi+k-$sj{YGlybCp8RNts8IaIry8s5Db- zJ!ZN+VWb^beoFDvK&!UtS1S7Kuy8Ls_d*LQSEOUui8OIO+U)D=j-#JrQY|-)_f zRA7I5t1yn=Ao6(Hj$882y4I>kkzaJf^D`i~!+x}9-G$zgvr|O)%laPmIOR57{p0II z+o_-a;@qC^vg`o3UJ1C3!SNS*jm!e7aJq@=y%T(uY5U%E4QUd~UhgR3c{cnFF@Y0A z^c&11Ubn4f>ApEOaudlxf^&FK+Q_rwj94E&e82fPzL8SaK>9qu1aL{*Pg~c}`#AEA z0bY0=@f+5WwJ5YVO<_~wIuBd+oqh=S1H?%3@2)xt89PL7bih&@NJnJ?;Mcr@zev&3 zX46=~8wH$4so`e&8Z?UepYK+ovR245S~1r=zogcu*?-c4ikEv<5sF1tt=G&Ax!rOD z!&RAIh_iqZEA6@e#W_~uN9d(LX!KKR>VMt`4(GN9sXAg%NUv}S^8_}>Fms@Zr~0P` zO6yq=DjI`Nh-rf00bCEb{TZj;dmZVqdF1^CA70`4q~zV}<2R-Xt6In#^Qy!{<;`zR%N)}$@P*rl4l+e3-^wf zE$byhydGK=I|2|vZccFpS%eHb9`|C)_hx4>&(6+Ij+|4m@a_*sUhDq8)|9Q(QL!if z?r3fhOgJLY;m7&Q)j4|mA};Pj^50rWQ~b$hU!p3v>Sk?Ud~2pZil zU$GpfI>;x%A`JdnST626q$Z7ceA#3+ARRgm%?Mv<%c#>&q7_(|oFtQrUhZdT?Ls9d z<{XJ%6Ht6gW*u;tH?PxrxTvBhh2Kae}(9Gm5AVg_Kol zUsGN6@QT1>Z&pxj*iZ!SW9X>o>w@Y)P15YJD=8FJl%h!~j)YH`k8-H@Id_9XgXd8EgB6@VQ0H;( zR~JPW_mi#)yK8(p&pgC?FFj!bb2_n{lm)!DsHC=P7^8sU$!0&j*!*H;Ea!sQZ9gkZ z*xO1zEB{fNvW7oZ1BcI!)stUEY{`B2KKJ_%%f7=%m2%;#d%Z(7Eo!{EzjH6o-n`5@ zyVe@EYIeKj)q(huI=PyTuc@Huv8Iv^VD9>!@F!4GZ-%6VTj9x5C8{PKUq+89hj3K! zq$+Ez+}`#57T;)7(u9OcgF1*Gf2;H8$hRHS>v(=f#i;|qzyndECzApNEU()zh=Ez*q(~#4F7qeA$(V^v?Ka{1Kf+6t4kBm|RbOrCcN�Br*->k{rEr!6j5}0ia66{ z=qHNkhV>Xv5x@UdR~?PV_CYA=NQk~7LlmZ=lG^!FmOY`UU68@%f%eP0NvsJy@L}OW z{j4hQ&iwER?W#GWkTa>I_CR3Y4JxDaqMW>hBaz;=RGs$F?5~LixAh7*7Hpk|NV~@N{wS|CmBeTbW>tOCr zu>rS&}9bu&8@pm%=7u$NR=y|G+{cv2O&@78F`RduD7m`fF?Fms&$c6kRF zTP8c~8YuE*sK@nlKFq1#wardz*aZLL+@&f;C=vp4lSKb&oR$f z34ckv<^vyMi2H(t24o?5VP2iqbcW@#Q1Nt1GF0x2pl6z7sYfiia3 zc5i?(fe~p_-0Zk=;qC;P>``c%On%=~k)C`FsMSqsw;AG+`aQX?G6Xat>v$YN6jjHr zqQXqe-FAwOSy}#x!ZMBl$fj5^9cOsmd85ar$-JdNcdl%EE)@Kmu*=CR%zNF{j3)U! zk83d_t`AZAld;=ac=nwAlxhwj>&?)0mSEcXV~c{9!MREF2Qiwo>eYTs??!#r>Yj}x zFOrqAe7+E2g!vW2CU1uBJ2~SX`fNSEX)+}fgY=v-=biMothl(HjM*ehmhZ6enb$02 z-H{ZKlxlhAr;)#&kCFe$p<%?0Fy>iK4w9}p`4=-N*Zi56HI@CH_@R_99gJwSr^qBh z%xEesvyKvCK{2j8Q}a`!{|b1iLb0c2(9C4kpMyG%$~lK>;5;=9MbnEDoUgHUOmGi; zM?zgcaVn^A9B*zk{!+#LsV<5a%uHr(FLw`s{ZN%Jg9|0-#l~Xm;_OGUR9aX;ZK036 zUIuHNXA57-fgV-QT=!hC*Aiwj^Jo_^`_kmY);-lx2vPjIdf64lQ{wT%v`CgM=maJq zq@{kG6TMe(VsjfiZ||j&?HYb#_*xaxgwaIJv^EezpDPpu_2U1Y#qRQD>9q8Y1}ojr z1E}C#A&b>-#0jHq-e-ot4&6}a1VuC@>?1{Y-9Y~!N)ima66i$N8dk>pV~te=t#(uk zYqJX%TXiyN_)nXB0|f!s=NZil37NGbCIy3w%$f`phrZ)y!*vXT~ZRm;k$EBV@RApx;>H`WP$A8`KSdRB_nL$nBR6B#fAiY<)AWkrathD9^DX?W~BrRDL7PK6xJa^YkJ|gAnlf+7pw|*_Lq8z_A*rg@ir+bJN09=^LsL zXw+=7Uo3{@6Ju!+2fga%fe_9PS)@`!>Q2*^{&M_*E`*q`(%ZxE zD^vbrG(yY?*j7uV2BTJeYR%wp%QEyFN{Si`8Qu;dCcCr(MqYU|*c8$>f$sTT$rb)* zCgEsKU5;Z}D#AD{-xR)8tq1Iu+Qjsi>dX8%vPxT=6KhBQ@M`oS!UjTPQN{D@jk2Lz zsn~Ztq#csO06FA0Dhmo+EBS>udA;Sq4D7a4Rll5luBwlJNbo~Zn}n3%O+4)OG=6;Q ziUWup@k*)~^JiQ0y`z@VGNaLFbc|w|I_7e=H~)%#Ui9suk+y<=XG-}$t&Lng%6$CT z5asH;Qx>+^wB>J=eB;A zi;LcyjsO$N>_)Q&pZPj!^4lVVXO|MwWF{L=ri=e)Q`*KVjTuE?EX@5N%7fdu^Slj4 z7S%eHkvQ~k)>cM;C>2Y6-r^jW;Yy3Re|9FpUn9)PDG@61o6-FqzGG!#rj6;1iaf{Z zdXwTbsTwL#vLli=_p5zmsbOz63@uz<4SpTcI8*?=m)%Z#U4lioVKQX>7N|@Vp;2bW zI0l533D&?^LIZcCe{EE5=mWKAZ6-EulR8z#FHko_p3X;=`P*HTp>azf1OK@6xbIPu zkatT1(nw>@{nN2bfl|YEY7t||!AjWC6_y`inPVkI))>A(3x6#z+j7b|O~1#I{tqg} zAacEy0t)j-^ujdDE`7`D}Lj72q8GH|ipi3(K^ALB|3ZW)3kWcg#+sN`od9UmLnp<+-tA z6G_vCb=s5sq|R2n^BJvMiRapc$+pDbpz=eO|Btya-HH@(zUQe2*|Uv6RUtn~d8uEl ziNA^%hD-Tukgk=>hZ;Xsde)JEeQqYM+5XuF%IjS4nC7Uu*V9&0xv0G4WCl`CkC94h zVo31zbP_wM8li93blGaMdm{Uv9;*339u}Mh&4hmTVCS62`5^u!XG^X-&yRB0L8qCH z;Ig9fB;4)5>-W2mGOjksfX}FiEXoFmrXP^VIIQ`nPxY5ZLQVOSy9nd$&(QkR3Ex(r zIqE-VGP7@%vS+A6g$uzJsBvOV^-K{&kNG^IrugqSs5?8j#726G3U3Z*D^g9Es%KGp zkj_=#oB9tNzMu_nEid%Wj6b}2Sx|TUtlVy-iY`%hA=_aOPUs++yYF=V90urkTr=ja zx67c7Np)GPIwO_H@0)X7-7SvHnq@S}vbhWpPrshVbu=&+~$5(7MG~CV0RZAy@g- zf~o(-6Bj-b3b6HIQNJMUuH!G8%`97dRmqA=4NPmG(7ZJAKSJAjuZmp?{%XqW !tX2O= zGPQ3<@~sZwvjHvhm|e2$t(E5q^e$EuX~}XX`-jH!n2>S^>eDEim~<_>)%f!|kpWP{ zCJk)!_^|sp4yU4u9A!{nVJ0wY;p+D!lc(n`W0VN#Q5@rP$5wN_;`ba;ltnDTdt^1b~W z5{{8iUx+dRF6()78-{C}67OgwbA{Fq^6NE~FQr5x9T6;VqU%c36^bD_&ul$?3*Dqi zJzvJRjf>J{8x^r*W3N6=cp9u$EvXLXl~d+HZW9ahRL`kLz0hPYeyWdvdk+C)_<)yS zd9`q=eQAFdb+z~8;x{rkpn04WdBp0?zeW4@R2Vz|vnby`^-2(e5g!SwC5)&i35d4y zs&2T%m9r5XUrO8b-b50ia3w1mKXCIeA}a4$Bfj#nrLP4}E8wCGt-RDiOqM1+tF+;z z3>SXXx%?bpH6<7nu;?omI;EgqCM>3g+R(uKV1K@ zOWT3$FhVcVgMw_P3IUFeeSXjnyhI4Q!n+>JI^}=h06!_C>LkS&sXVKLAXi`u-;tu% zZlQ;SyoGmm_9}H7v?j4-U#pvBH7Flzseb9HExC=$t^8EYpmX7sqq;eiw;)wQByF9} zv0?BhL|IB^r*Z62`{k9yPSY@?cB}ty5@TTyRrPFA&<0Th9Et|@X>?<^1)Wf0L!q-(ujN_6uHQ$G_(YMGIMz7>ZMo%9G!(& zP!AdZ#qF7a)7R(IlH}@ygN`EeGVOmKl6ztDmrTzKtW~VwelSI^-BQ~leae?tSOYZq zT^Xe55s0=foB;SQxc>wp^%pDr?~hdnRdY<_4yXp?BrxX1_u*o!==(YF9?Xi3BX8U? z0=#P!#nLsmI>fLv?8J3rX6%yH>J@6gS&WZ`>4k}>96T`#)g0at({V0V9jByUR{qaT zxs#xUCdFL~e9(KyxlOv2_hY;ZK^E=mtXmW>1K)t~^(zI`+;$;#Bk+*3# zZF4)QA5_2}n18UXOkzO68&u%lQTJT;DewOl{jt*q^tsd9K~yUcPRj?*=8xnX(DpvIxxFsjPx|Rd0?B*>jxS3tDwrCb2u}9+8eIY^BGMJbItjRE`$0V?DtM)b4AP zND}<>B|}MNMZ*6xWbiz?!(e67u~^+cyZycFms^_jdDsPd<9CX*2mbG6_IG0xT}dUE z$1(}sQL8dHN-h8U#>Fd?;UzaptjQ{r1b};}^Wp!7!{&BC(lM4{(n=ZlCyH@nb#9SS z9JQ~j4qSlK2uY><3#xS@xdV>G;LFSW@8unhz*XZF75o)trEvtlNKGx<|JnHl>j~l{ z&!(|k6_;D3J00z@Ui}CF(}^t`Rg6CStv%y%nZeD}@qD+DU}J>XCX3^r=Rrsz+nN8n z9aeo-=c$3?Z(0Mq{d5Ir8^o-Zj%<6p4=rt5vXVwz4axz@y5O6AhBKvuPU zy7C|w8HgU=e06IU#_;PQ@B1i0IL;(k%xZaGJSe~_31r@BgXFy}8OQ086xmdYslp|K z(8;|2%uu(9z@~~BRlaNdH*|+p(Es7-E#ul~zvtoNw75G|aCa+GtazcgQ@pr)AO(tB zad&rjcZU}DV8JbD(EM|MKhN)Z#T&AF?QYJMJu_!!!Vpyoa=tQ+uqd2LryRafL8+RP zWXzDMefhK4gnT9x z_o2Y?RfoWlx}!yYSm7XF<~QPnY83Q{fY;1b(blQ;mmEF`vUz();I5*f zfugbE;GZ!>oR|sabyU=^ zwP~tzq*#}HSImZw|nDLq8fLwsi8*33pO2Aj#y1>!v{3YV?7X{k@sfD3!f_M?wD2|18{eLG&IC|Tc z{;d?%M7RtnMfm&4*^s3Rr`<;lens|pklzQtfxF5ANm<_io%s%~i*f`Lt3Y%yF+9(P z;kT!`clzu>E+_Pp`|zM^-pGLww?BX!yAi=>1`2ik#qB?jZ{l3?dOy0s+oYtc@xv#1 z*!Z%8G@{fS`RC;So(hI-&q?nL@)}3jn4Q~3lHQMlyT`kleu0`PFTq)!-xuf0TS~E& z9kZQVbGole(m|cdgx)SoW{ZZ_MwdjBY`$ld1T(>mN59exW`GO;ApchM&jC_WsaS#V z*-WMNkVLHv$GobeXZxBU?SQtX1Uty@vzeAn7q|dh_6+M?o%1u-c^vju^+;t$d=e4} znBErLyePn!9`aeizNO<_u{=iI&oJQzIa~q|%Wn8)`nG;YyL_Bkvq&MlaA~+O$07~O zH6%fYYRUQ%wQ>EMLe1~c!BJ>p#wcGW=A3kCl#u$dh(#Dn+)XC!SCSl5+(=XddgW`q zNahp&fePV|WfAmpQ6B(5+d@(n%!sSEU~h5LwNF~u<5WTYfb;u1J;$NT<6>wPB~Ck-Q?5^` zf*VItu6VmaKq=ZF4{X9q&pFLJ4-#~eqskpc(#=D`NEG~26!oa8YmS>1Hz_4tLh?4g za2Ah8$8G53E49uhT1@-5M8K1v?vyOYSC?nBGd3u57yhOYf0Csvg8cD@r%7CLQ;ilySFA?_X;w>SX z&Gh#f^0lfim=Y}1D5{s!q=&$NPJXsMzwLBx2D{^jIPaOK-Jhw9F5k+8JQcVKV}#6d zm)QLJ+uM&sf>+zTcg7Ig9D^UpeIr0G)@Kqe34G%6ApeWWi@e&y&5EBbjY0&}Qfb?L zf<}G}nDW9m=8GTvF8T32)6EC#A;gh=8co&jWOajbt=!p-{+9uvx#y!fTQc=fggi)JEoMWTWYJN{h{4whWEgv&^lXVQkphUP0<7becwbolPU zlGWN>l*W;Q`;#Os`yKba_S`FZkSs2}CN1HE(rQ&fBWpSXx2EyE#LaJThK3Ar=o9%8R;pK)1^G< z@JUvL#ju1(nihtB|A4wpZlt9Pb}Q4n_<4^U-uHUIdhC{C>!mBI^?${H*U)eN%lkPY zJHe=2dD^w6MX!zC%`zSHtA!H(O+*6Sv&9d<`*ba_6hnp%R2aF^dFxI_2_u zA3FYbrD284>4ID#mw8}vBafGQBa6b+^snz^X-8KTEA<`^Kqv=h0XwM$6MRbsky7dLAkqSQd8lIc_-1y8$E=1u(}> zJPg8)L-=6+8SsmDWTr%bE*i;>4E>K=fhZgYR|2`mFy(F_y_7=Qp|JHk4y%t9y2L0O z5gQpPs)~LItpOR{+w)<6=xBn3^A2a-dXAUdZCczW4BEfls9F3@ul0EzJlvZ5qgLDu zONfprh(*kKj!D<}+*ytsFlvsk+1$pJ8qiU$P`;)RPOBNLbXyG$<2d{m#j8gBEpfrV zsJ%aYyG2x)<~)ur;r5Rft7lgEMp)6u0mX7>RkrKr8GnNT#r9D*{R;)g1=@Hc26{ha z#-@F0jf=nX>XiFFMJ(V}qln*W3`e1b;+(LuV?0{rdt8eoP3GTU!)PNrqT-;G+_4{h ztEO#Qw2T&MH-ZU-IISi-+bP`%o4wkPnKLEo+&6Z@`H9uDk_KMKEXbek(e^}LId?Fq z4=qop7y_MO<0hN#d*oAl)M}?0S|#Cu;y^Y|XBMH~pn+l^ZRarc4bLyk{U4Ne3HpA)9H7KO#uqPNSwb-g zOLBa^>Y7#Y)qbfvsnu)2+wjr3Md?3k7?4CCWe|RiH4WHN908PN`oLpH=qMaiJ(f*E z>yMc$dJ$mdQNW^D3M9xMugI;b^di9a{(zuISRrBJ_I1sj_X$Zj>32M2$8gh%WQ{Fe z5^cmU7|Y}Oh?WJ(0V>r{e{xh#%S`gORR2h5I9a+61K9*F5ddP8`%*S5`wY8M@6SlG zr8l>1WjS$e-vkhLhF;X(e00QCDtw8VIp;*jkokjX{G;qk|Juw{O0gVGIJdFe`vwzY+D~lPD8FOFSQ0b`|y*$2|Mj0&r;CXgq?XW}C75w7wu%R`YuFF(j1e zl_$+cCnnUn-Y_)^NG|fPNF2A>enbH%escCa6SJT_w6ZAtRy&j6-IMel=rfDCp2UF^w77L zbBlT{Fa#vt2cd}2K4BQME*Sx_Hgg3r9aZgutYb4w9M2p2%9_5?Pk$L%`sSmxFW&n= zWx_d68GZOL>q$HhC1?0Vu|R^U6fV}=5RSuZ4HR!A9|0luil`3urMv0bh?n#azWm98{!!%#mR!<|21hL>F&uSU>KA{dVG2ylLCe1z#%+g04< zReB$INdNI3ND^sdXw*Vo{TL|5kjR2i)HCsXkN$Pr_tJ>|#i|NMHK1t91IDRy5GfDn zgx@g9Hhk5`o+4OV!FU{;UJe&vY+kjorHuD;vQHKmo>)aJfFG#DiuRRZ_4N@I<)zox zbt>lGI>*2D=Z~z*PG&kDF;J~v6BmL3E28BOH%Ldtah~m?8jdkltWi=t*$3#GZ_YZ7 z-Abh5`^8PhlV$75?C77ZQuDQo%(WnIGuvYklY&4)+8u-1noV35!C$g_YB31LnKfXG z{(*m{2))qds3(~yl48U!H;PINBjQJ|-?SSjST-)bdOr7R<)w#Gg{@OV{+K$6LMA+D zLmm=_-)5{VL+#ja>FB@9^8ez9nNMRHs3}cH(!N9>=5MHjkolipe*@zIHRa-H4}Co` z234O7Qxd3;$}23BdA`A4Ty3kud%{*^){cb@fnu-6>rFP+fZNIHLq;FXM~NnS-q)Bn z&$2Wbn%y@=X5&l@Ici3?$J$9<_ZuCR9e3^J+ppi}s=r~{cxftF?%wy@moT5?f0_J# z&mifnHA17=3@a3aPO9>1F4FkaL#^U<{^N}6a4 zE|&2+kBKU|EV&Y4395mDf|2ZKB%X5MUUl`j&CR-&X+;yvI+?nzaGQmXk*puXoY=zi zHr#fx!0om*EI4A1wQi$?BEO8B`cMWvAao=n6pRze;AG=G*6vHwM@k*hQlvc`QK+gA ztc4_brNPofZ1)*u@?~uK8fY@uzHAzqWRYUnTv@yG&Z16+Bq4OoH1+Zts;KP^ltFn3 zr&;4BGC9~me-q^Z^Q`F^98jyLzsFKFr#>(j)P(PZF zkz$c-SX|jS%WM%ol+2h=qSLstjeJRFn<|LsF+;jMonVG7t zl^@e@uXP^&jP&Q>klpUcOF>8?eQUhC)s-_gOrtTkL@3ecy<<(-eH0<3KdNRv#gx3K z=}uqid?D`Ljs#nQzVx*Uaj5GE_w`=h**=#u`mE;jdrB+|4b1;n!={TTGmbd)fAi=3 zd&oLizF)C&S(J8~7(So_qUAE!>30FIH#amS{CJRb8l1qDlr$fYU?`b8Y>o!4oKFkZ zAa!+PJq)U47Azf{vt{BcJ=9FD%>8~H0a^#H(`;}RwSOt~OtNS@OhYAS^kb>h5${0J z2w7E`$f$#}z3V&H%<$H9gpAN}n2c~oJ347n2+E!kPo~>Tb8s!YLtwF7*miL~8ArHE z8d^>+>CML+G9#=ur)A>ig|yVuL5OyQTK@c1uOzlMjSg@DvqNkTw|-;n2fh8kZf{gK zI*qJk@dIWIdkV_qF~6B8g=j}7mb}I%dp;?qxHf3?aV!BUTPV=xaM~p3sQ66(z zTwCd{Bz-shGhP}+Pou@IeUw(O;QbIJzKdI_Ky_{|Of|dTQ&Ha=p(3>UGr6tBc5OuP z0{O7=Zv+15mtqnY`y5D(Gfbd|KOh`_|H$-8-JXm|7=`^^*AmZ_o_`_~DIsBNaC)3u zmfgI`D#BJ{i>tbh3shB)E<7kq$y>4it|n)$CJ--KY=p+tkPV)m1j)autB%5#z zcmKpukMZWl5PB;ey6SLY@H-GhpL>g=G1Hmlg zHWk&VFbwJWgXc?Gutp|_%e|;)N-&8LdL+S|`HaqMsgC(}@I(ZQ2%5fcmC>TU=Jg)P5i4?oORPaW)IZECGCJamtL*t!Y30Cj}#c~{5tQG|HPLC@ub#RFI?OlV23NblnY#RNQb zsSvQUo5Kj~KAy3N3notXaJ|?c^K!M|R$y0S?O4D1pVLK#U8l1*^=j?tN1f8}S4XPY zN-zhO(65I)s=*1j4}wt{; z9_F`oa$9ft;Q>?_g!1CFRsWr+t z_4vqNj5q=#dU3W1TgJ~IB7=A(hHve4DW+vfAw)fmx$gtDgq?e1iaQ|O?hdtTN^C1O zZX)*;Qpdv->2p5G!t4L%1yJQvPUrHIn|H^bkZmNXOKEoeNzV zvr7Uaa^(FPEfh=6)x0Ku;T$@M)k?F9b0PTou_9RA?44m*a`m}Il3RKjVFLfP{;c% z=Y=NQx2!D^GhRK<{2UWpPLoZy{vl#8rKp(*FE_o9CqsX4RIlR4hE-9fuei%vDm9Q+ z9?<2quI1YGIZVzAQ;xY7@P%T37JoxiBE`7}7rmo()GbRMEp#Zv=sLC4z;^5Wgf_3g zxb8BiEC8RKQLmdhLqMa>#UP}bKlTkw1S#rqa(N^tAAK*zphxE_PKQeFzx+wmPt{N~ zSEC7~G}*Ib>dLSFx~L~XLvj_ce{(zPY;1&D5HZh5c$#Nhh4VT4{nCaPlVv5YxVXCf zyKusxhw;hqrz`FUn~R_FAL8Wwc;CJ}N9BHZGd7~ngIh+^5#r0=YHtu1VUs@j%5Lf4{NTMLh6@>Y*Q}dhWn|a+N3Qe*BR9=FtLQ->Fg5i65 zz8~kq6Xk`8mkGFq)7#^4I*;74{iL3uh#R&d+7I5566;84c^`ycIqr<(XAUm4TqI#2 z^q`LyR!NsYeZ?T_@&>)voX@P%m_*j15?DZd-x+gPJ zinmN0TXy$vwrsd?jkw02@Ab<2qJCP~IzTe>!}UTUXHyMnsc%nksYGdM(s=qX)eycI z_pi+R3{aw3E%4G#D*LIl?8ed*_g1@lJZ~#nn&8CjLY)#4jd6W8Hg4|Bnz;Fv`^|p&m z--*SV4L#yiEkk#Ak*JbTd2pioHztNGCpBF+$j~LO|3E!0b{Cj}3g874-yCnZ8w$bN=C>9jm`@_hYT4*8Y--hZJ2 zm0Er0i*ijj;H1<`lgA5*HSVqW+n~xSCnBb^d>lSuYv!ORm$9Lg!~thc{sWMuV9-v3 zKWgyj`wn@iTA&bt$^URhm&lTp<(;?paa3M%cR$`Sd`!}tDAzJHIYhym!oWt)Avl(! zi(jfV{iBwYO!7>N$sNWS9ZT=pg0%U*ZJ@<#=fjGkStJAGt$t4q^8s0(m9!FNWEeX- zo-Thc<&W_C%LLBR_lOlP_+nY+67J=8Dc($1iT|w=^W(a9jO1*jAvV?5#ozc9N_mx< zV={b4Xk>$O&z6+y7E8th_X^A%j|dclsn9WXx|a}5p;}TsBj|c&wvF z!7zRT!8&C@@b+CNnl2Wc7nVz#9#z1Ri(W{0uG%-oS=sST%(|IXeD+ z+5LMKZi_!e0j4+{9izOOtMe&W(U(~so4T($M_=Ih*C$Z!m)SMb+1L zbPhz6<9=7{W`>U*9L-p>t5Q1e#evCBE9;Zss3=>uv;vziL*Td)Uab}#iR?L6CVb!1 z0&6r)Rv596U=S|jA~iJKKj<~Fyg6<6SnLau zt<>Q`BW|WXQPoK~dAj`;@bNsYWo)J`%Wn$22TYQ$O;$8nL0VyC@aJTSaRq-l#5)o9lEti<8o%4*5uj4 zf>S3ox*l;njPxNjUD|c)6)8kDiWXP}FG%Q`Ml_j&8&46w-lzXY#{Vtvgj=cF)uF)6 zb&zh{>%MFob-BG16PhfJHrh@bKV9=lsGR2D0HFh50Hq6*7~l;&Spykq=|O~WH73_C zTC1M7qIf2}y%=E;+THkT<)>Q2#-?{=<07C#1)xeE4sU&3BRWrSI!gVNDT03oHJ(Cu+^{EV+ zRY07r`>PcbC6CXp3S)U7mr zH-ekeX)N|&E#Am2tXL5Sntpv6gWT5Vdhukl*e8XROFD{CeL_2NE~}YrL(A#bddgUo z%t(%Iokvf26wPq5$_x`cmT0(HJT|#3%$0nT*BjYcG)g9m4sSwD(QJ*vnu1uk_R{S_ z^gb70;00i@NV7<0!C(`a(xZ#uGwU(yU}Mh~31=&9otnp2WYsKYYyGhh2a!;8${zR> zE2efnB>6WXED1wfk+s`P*C)+l_ZiC_@5IT~-6!WA$ev9LS_6Djcsph9MW!>DTE39>pX^jrPb?hI zDsnL8JkD=}@769+gV|J79Y@WYXf^q!#h#QlcHZu`2yEsqQ?FU6im{6s)G~F(EdPnp z?;@~e{I8N6`a``Auh*+O3ybEA+FSg~$oWc8_2G~$d?W%s`#D5Gc;PkTkb0sRYW%%+ zlB<(;&6=aZU(pKwQ7s$b)rnfN1DFm?a`Pif!Ad+W1qzkcm^yl}fXnmLyr`GFZAsT8 zwZ5>eM!On73fs}==+AG^>41Az;pG2+l-P$u*mtNCD5r;vXO0>yvNh28B>F+*YR4_F zBbl35ZC*DoR;3Ipl(8L+W6W(;@jK;Y{nui{wjK5H#5!G177kg+Xv&@+LQv1z1x_qH zvBbO$g`C;@Q;oR#7^tb!ab3N)wZrHrozQ%hXk-Nb{|zORuCexMe7}PK-SU?jMFL-g zNsTu?u4|>Bs3>9uwPF$(i|RQ`jMBK4bW}2ykz#SLo(619Sm$FFhOMOn61RGQc0F~1l_zfRK3=XI#y{cr?jkiwXhvMS+ zWp^Z{G{P_ZevzXAe&KVgDzVV#TvYXQmH%%`oKc^c;QQN!Ga7yiY>C~dNExhdoML)g z>e3K0qOdvoRFek3bv#=8nmK9X@U1Wl4=Uzpvg7(u|MOXq?@5a8r@7(>Oaq{IHRf(Z ziZ_U+m!<+HDxE0yYA%$es7}SmZCPySE)DA-Ez(=blnho+J#vh^aAs_78B_#ggLMb2 zZ6ixn8ee8;{bO9kAY6_QPw6Rd(fUJZKl87yP1+P|ikR(dg3;kK|5&er?kw=XxSP2@ z0eypxx+|;J99VX_qvUCKKZhNFK0*CaMWZoxoIymtQRb>}22*wNup|#t!Sr$cKmX-& zC1z@df2ax99^*t|`H8y;d544w+zONYsTuhL2G9doE-lLw)r&<&g&o?npqOe z+~@_!6N$(0Sf7R1OEB8)nP!AfxMVT8RA~^YT@T}LV!LQ;EnLm7VO;7>ZSQ=ivjXgY zEgBH$i)?SZad5*0m;p3B*!6Zwf3<%4@1l_(x4n3zaJ5E24$WfWW4UG%WuSWj#yjSd zN?R+{Ms&RssJ#Z~VMMsr{SBu2yMiG>h-;UJ!I#bx!wY6@Q-s5W9H<~Mw$XQDy`Oo1 zgq+XZmy*)FA}x-JQkXcr{$YUak;UA=2)zf-hnZL8otlt5;LH2wpF3m(N^f&ab|vm` zrlA3yShZu~c4|iTeTsWZ=r-dNaQaz;f7Pp?euVmhy`rj_c|~~#=`05}152y{Jg`_C z2ddYx&5lfbrLf1$Y7=BUIKQR(0oSYoS6(h|*nk?6E57R9@(PxFp z9I*POY>NUc0oEl0(T3;O9Z(9mc4(fNrACeYal>&!^nP96=&?Q}N&7=JLtW}sivFuw zb?=?Nwh2a=>}4~?hKs}xKRfr^9g-eozovJ@f_s|yM~>`=>%W6gYM$qECZZXwL>~n= z4RqTpLz_iu{NNBe%T8a=S8)oQ$(pvibQhH7+je-IG4w{J#TPPmQ2Th4;()Y7yF|t-RBG51)lX_IBJ}Q1Ronh zzT#cTuTKBv1`r=29aXYYgzKSHI}~Y$6R^vCX8BDV0Kw_4qp?Z8HDNB1`iIHH`5i}( zZZsU}bq|`DAAB_>#{ZW%!+SEUY;&+S7KcCWZ~2-R{qrF(T`_o2qzI}vsB_v^B0MUn zoQR9WqVuiRoIGZrNuy-H&q{oNx7$+q7rd}BPei5i;#tXgD5YeoG<1S+(~BijY3DxX zvLz~Ih8;J)BXCTebTEA$E5z{#Vf>wZH8MmqJZ^A*Nau{06Y(0H+6aWvpV?1@d>Vcx zdwK~tQF#Oq`V+J=y4Tx*A|w&>-Ocd*Z@iu?^vJ|1Vr+m&M5sXq7Y+fqSPMN{06T~q zFEL|vTQ#N6a^7-?lfF{qLlJH3^nQZ5U%IRoHEzw(lk6yk6zFV2%+1kz*~ay~w=z^hTdA=m$g#HoLKZ@cds}=o z$!unP3()3kk3%a$r)>d>0K9_`fq!`{jtgDj(HY59pnOMFjTwRhLYnqjlGn4WX8O2~ zUke5VL~%qiE9#MJRBlDI!7eUqsO150vdvK7b3V*-uRw6bC&3w~`9sdfc`y9z-K0 z$>x-$Lyf;Cm!bTZQ{p9ypmRe&8E3wnw0j1Vvt87&2(PK7RLzqA*H2F8zp;Pg$$ad$ ziXGjj%JU`=Fx&mi&lVyM?AJysDDz0nfA{g#D_^SRUZ(@{wRSr%fK1ofouzBs0V~Bc zlSmQ+|DYhzS0-6=-ozm(lEO!-rQYg*3IupS5Yw6(k~YPqqR<<5;mdxY^d7_u#vsza z#)=4Xi!6!j5x;zvf0EFP+h&ly_El*FBKC;SBFdcf*t6RFRCZ(6Kp8dF*f8HNOmvo~ z|Mj)60Lia$tq-9162ibFYt1~4fA*Y=&mX+M06 zeI7nZ(*B4YBxx?Ow`*YYX^~lJPmN(y6#c z|23aAuT`(Rz~MBgBe3N8xYPAth~|V#6iULjQD%-ai@Cm}BRt!Njb--hr#XQ;WUlZ;$ zMOv`H7l)3sZ@QO0a}*$*W_VXQp&_!|x&t4UbNq03Qj6aBNM4abmu!HT8k^#K-dVEl ze!l@egI2U=d2YUqe1|cCIOl!v+78&w01Z>n;-nDaf_9eKPn|nWT-K^ul7or$8#|Ap zVFpGWx3*kbisK%N@D+GYI9{q+Bt#1^E^XzF?>6Lop{ddmgN&&pqd>mC6DMiER;eNV zCX`{o&tZYDw`lQ#pfBQw?|8c&BSQc;*XGHQGOnmUZbwmwiZ-Z_WX_SZf=><&?!swC zf`&cTj59c1@cWp(*JW0b`}vWCTo_-Jd*Q9Jl}_3!jp_S!grjwft@(Zs7|LJv}6-eXF3lE>#jGZaMuaM~7!3CTImWZ(Djq)DT&WHG`in;{zZEubQ|w zd|UYACmN<$`5W;EsDLxTmA>n+`DU}2FydV=X@AWoGk8Y0(N53wz32s-piLF3%^RBj zl05$nXN-jfc|?}1cWQWdNA?mi1I-4Ck-R2tK^SA!4RGba}~*%Ho^|726r;PWx6 zc921oh_U7`+vjwYKct_iKnzGk%g*?+ervz%Eb61OEs9b)-+p*wb_$}|G^=%*z1+PP z|D3_uEfW}cW))wYCuZ|HbVJ>%?U=5QCWGhE>x6}1j%+ zoN8Im4_L{{8TTHPJ}}&iBcHp7Ym-0wF7soDPxS*Xvx>wp1uXU8 zT(uf$#1oz=CX7p-UxF~q>3?E;x}Hi`+kWl11KpQ1trw#$05R3>h$>&ShFBO>=yC_n%W3wetfpSfW?5@iE1dMRHQa+ z1`AD?|Ek$7;ZtnyIlf2`A_gJNHn9qCbVu6b2OKer>42CX5(2}kikbIlkTG*L zl`(~t;7jOo8h6Tglo@iips1E-$TnB*gHp?rzQuV%A8eWOGd?;AcSaa{v zs1DvW;R~@7&w?V)LrM&t$Y1husOA^&r;J7QQdd zo6O!ti@%<$S6wZ={t?8h`z&R@N;+0?1%ZU#j9o*s#(fsV+kWq6yOv}{2rzn~qI5ew z-sYuSS5{$B;T$8Uc6p)n^5{H#dCDvq&GNR!)Ox7+N>d<@(!RBW{ml59|CDN0J4DdT z;j>Ty-%B>1M#%{Mo}Ki}uAQB}w?zFS)oaft4P4U`?3dgZEqvS)F4@|CjvZg>d4&r= zFP&CQo`CEb_TARHCJ#gt{mZYG-$mYJ^V{cfli`%JoEivyf;TKsi~p&muAMDWW@>Y* zk2OHjrsc|)URen(UaPo{@ZV4Y#@1W4E0gkuTDBSZ*6R4c$XQa;^zA{6n!7>NK>2`J zucG&SZuwg%22sK6iuj`myshfi)EXos_C0H3*wk76@_L+&CUV6V34BNqbtZ*c)*reE zY$ikTSBKC}I(kmBEK~;Jt3ugR4#4rdG@hUn6bN+8>O8f`2o%S{xh^s7RADRGRnBpD zO1))=POx>C^$L)^LPx~qXh1Qc_k$bMPlr55#40p@QQp}lHEp9(tgMjU5mE@dLik5; zfPffQ{>sKH1+ejE7ux3Fa_uuA2B9&$Pbu*FWImA6S!j1Pt2~ZjeEE^JomHEjgR?2q zdzcJ-0)l?^KBDJfdphf(!4@`Fd^`y`sbR^1J;#A#m$i<2k+4~7M)Kd#9A4*Mn_Vu> zNG+>{ul?-W=c^fR*2LJR3|5>72Yr^`99|}Sdgw-p)DbZIu+K6=rOp0)7HqL*-#iqw zvjL~@*<%IREt8C2E^2U>mbi!x5@)uzpY?2Og6vbMnP7L@zyS(g0mY}5;hj;_<#)#t zol}=RJ&#GO&6NScVH+O4X!~o3I?}>jnI4bUV5}Nn!UgY=Gu)(#m}RYoqXiAV0%93$DX&-)Fw z;kL3#7Esh+hNWAd5<9<)fxqy;`GpJOC9{(kf6H(C2*zC+#y7<;=S5Ga^%b4>-roSz z6yLJSEn`r3u-?8ExSaiMbjzK+*Cu{^WDtFkVoABBj{}6+w2ZGnV*d^Tu>aaa)+w`6(qBW6p27WO_*u1L7xwk$6TLCvAWlr+wB>L z-rF8N%6)lA_jD&=cn(|nE({gvux9`;>}AZNnSNfR@?6Ioof(8 z0b4MhBFf`2t{_cB`gzm@)wzis`CA}!qH=NJmj2Wd=yt2QBWHxFYOE9kK+}MU0<&JV z(fBToLc^AoHeufiZfZL*zxKR%zn1xs{xg{~u97Yy62Hx1`Z|~Srsv-KoLl0$@?*3E zaQNa*{PZD>+yPlf^2{$AXUCV){)uJje_y(jpvh#;%gyd?81^L6{E;KRHO7H?XKSon06%kO+muP^E{5CU+1!2$Vq<3hS+)={D7>rirFHdIpvALxM`Nu2v*o{|N<8M|| zjuF}_Naee+{o&Tp6&i8U#0gPY^yRNbg|C0KX_#xgso7nZA^IFBdS$AWS2EVv4M$E- zEH1l>%lrk;Gs!Ke`9&~`()is|8Z`R zd)C^xY9y+<>lUILuqw~#M5jL{rQ}6*s$2qNdsl^3F)!D>#whqK_}j>bmAMi)H%uAN zcFm=tP^#^i`$O>e1*cIvux4pmE&Q=C@yAvkurP8q_(*I$uw7*9VFGFkOMomr>f1qV z>?D3m)h{WBnpX%2I~>G+CJ2AuPm!jyx;0Js9q-C+JaphHLbO$wJ#P`$Zdl!m6S(ed z%yN(gbZDah=5-IoM{b+k344A1K}G4+74mHH0H35~2vy;L51pc$;ZIw8X;3rM1T39G zQ?_%&cdnHaDPbi}{(Za4!?HUTA%mIvq|Ov&OzOo7hRt5W@-w;1yIH=83tGm?ZiXN| zwos3m-iU|Kn-AD4#|I4JFI+z6^>$|iQlr{N?h98dD!^i=iT8oW(3Z0UT?)|Sq5v2W zBzm`8VD}oKotWX!ngR7-5`?lLEk-+gio z@a)7Hbx42nak2i&RqN}dI0B5~%Cc|G*<K5wS$oseMY9vgVd(LYiIT7)tJIS_}z z1LM-f88aN4g|HPsMF!6upzRuBF}1}(q}NZe!uYO1Rz|rq;2lQr+Hy?9%VcnyeI4mQ z!w0d)e?iX8Bbis5_njqnfv%rhQ3zhdLxv(L96IYn9&QpW&W>!y^ls4ljehn!&G&LbKzul$4E7_hZukpWay{cChVDa6p!fGD65RaYm!NedR zG2z-dttWW=8SB~8vF>-D1@2(*tI3QSl5$|VtAjnKcDf_{T9r}TCehO`|5H8ps#s`k z9cPukdxAJrQWtiWa&?um{pWNBj)K;@VBE5tO4m;x>sr0Mfy*3dt-?;v^hMlnO3Rjn zP)hUumgQb^z)9dXD+jPULL~yfW+)*jaoNCi?HD7`0QmSS^}LY#*$F98R13?XvE60S zdwPn>>%_8U!?Vv@uw#OZP(#ab0wu@5R(vdgaF>O9dH5g)_^ngmNqDVYu@kMEtq9l6 z<&ZmQnBl^>{>TR(%Q@unO$7_QFEug|xVo3+udKp=>0(+sB*@N1u0(BU>}=hRhz-N0 zaiXMuBAYM2SAyQ7b>5weyC0@QNEY%KJ6{sHxR&=w%&Ey2>h3UGCs;=qV#{8-az*y3 z1AOSDo+kO~RTi}6PgI)ft9R8u4|B4VX1p%TJu&Thy{BB}T|)D6hN%<%>(~ke{YR+O zF6=ge{|sO=t#zB|e^yk~Pu7qDwb{#+%0G=+>~vvVk3J117IEs`^3GJQzlLxa#ib?q zIC86Gp<3zBIjLWKw6d+_k6nnq{lNQphDI>mWj!Weu-+l|<2^rD=^~*!gDCwAF$h*6 z!ijIP@rJZr8 zQOJ+Ko$QRkaNE09`EC}&_vSeFpm^X=qXG@DHIZ(2havl_@WqbrRlDpyz{Y1vG%QGq zwld6+a7uvIzb%AHRl`O!8bStpq)5%k`lVxmUqhdeUm#0mq00`n5mGR=Z(Lem%~Yke ze)Clm6?v2VarQyr@jf)e_K5EbeLR9&xf+A9C#y}7UW37T&naIHUgL2|Hdbt*X`t}$EjBv0BIM}b+Jzd&!gmJB$ckf) zh;H6E0RsK_>o%m8`%Q8yRZ76$zMttcbgkhJ7Y%`0N{e1d=MNX0jjP;6i}i!9mT*Au zBV=>&-Bl%dikBRPv(p0o)wDDoUmMejYJ<^t;gg?#BbVj|Lsl2q$<%(xe~u?2Q=@59 zMNNN0FaEbm%VtbOmbh_QZ&0hW`e1y16OjtaUZOAO2aFh`5Ale`?)Iq{%;^4wcTp+T zU6p0?S(4#Wvm)fj?T-VdCPpG+ekJ!MKmLNoOYTKtzU+$4^6MlFK@>!D59ZItHtN6G_1^7Pc+NKwH+yW2q z@jtCIj<*jox@C5N{DvnB2Lvufh!W<-hRJZ<(kqIs?o8!TtHYCMd2twqCn2cY+8in>)0v{|hwts;w#?4~I z^!&qI@u2&IERa7rXb4L>q{jw!S+4eNq-$Rws;2C;mqtSn9j`o@K}!_Mn@Rd?pQ|$P z?}TrEc<7*SyxqGcKAfr4NyUb~H})L31)n*ty?vVI(Dc8W%NdW|s(|$R?H zl3iu@>@B;&r2lic%ckQr3+Q=xX(G91<(*>yK;tnCO&Ev{mo@kd(TqEMzR=S$b`{1x z*k5B#*i(+ycMcgGTJ|_{=%lLMzd7<6*frkU7VClQCwhP(E!^D^!M}^Zc8Fp(R;K?C z2toJ0A76g?pUQjQ`yTYk%H^b!zAJBcczo;@Zg;gi7JB%`1K%E5b=8&1zvDyxk2ss* zpo9Lsyy3OmG%pdrsr1LBNmJwyYHlxe47M=aVvASe)%pG5nqPwWH_*K1^;{6XU9sxf$4a=V%mb!%CtqMVGAZ4?w#)6pZLVb%FllGi?ZMQ-dA3SHz9a{J7du_w|pPx zz?^XW@tC`tVxH_>o|yflY&G>GygBrSH@v?5+rRxARQ0-~nY02osDNNlbm!NPMCtyC z;tHghw9foe%>To22F2~S-HwGkzCrO>hzT-mqdzR&g<_m_*4fzN>3BJZWAFFuBZtuw zrKVwDlylT-Uzo*UJ8I1B_YZFkok0hk(xS1(G^#%aB4{0`dFr!@-%&LD?Qj6=U3cDD zc74Zg*yagmP{<+n>5Z2r!j2E3H7u{_JyUDQ?fzS%=lo#vt(|Tr(KnS2Kr544%MuSB zl!}lWz6rI6acj6}j*W3Rr`*S*4>s&3kYPED&-*X8g7a7-3OKCacL_b>^T!SPA%#zE zi&wo3W+*sp8Trv_Rc`t#17zC$pzIB!>S>Pn$6lF=A`4HxYRA{^nVo@o)sK(drv}qHP^BLAoKf3 zI8o!s2is3O8Zc8$HLrC3{h@aSlU0@0R2_srRYPw0+05_%eFyr78ApHN=yEmAq&fuK zVe%f$KicT&(lxl!d?6e`%g9svU8CYN=-_tfDvNswS$V+y@~*0&0vVd16F$`>4URwU zJo&XzV;lMV112rR#PH)j>yT3B`z;w2pR|VUv30~^Z#D>M{(pbahhwz(!T0_{*|Y=d ze$P~uJU|g3l%UwVZY=f&+ifOcf#SxpGu~vLia&WOr{|0Lfd+TUr3`JzwaCVR4&j8z}vk(KWjs9`Sj zx_ShDI8^TyKhkYzex( zIH`qU1z)~qc4JUmRie8*vs#&e4f&>(@t>*R9~#dn`~g1tXz2Hc_<_2#i<3f3vwfzi zUF&UMXhiiou6k}m4zHg{6aN0C7VS0G`pu&u?nUf6ZK*ZnLu2Ig-%Z4s#@qR^45rVK zp0YPJtd-E~q>EqskMjdw{h|aNA&P)rhA>-vskJPNLz$6>bKrm}u=xHp7r)UR#SBrH zLn)Xsxt7V4DS7xyJiljZdm%{pp%#XCzJNCQP9LA) z_T8&q!Ml1!7s`?sb#U}8X_ITf2|E(I=uCZ}x6sZtSWMjzD1=!m|> z;r$dhSHn`H7N36EUF4cXB9)u47?5j@rJgdw=j?Eez@{21b;3eJ01QP8)QY*MKyybT zP(9a)QVfy#MG*oeqWw~Jl+pN(P4)ImK%P+QySnV-6N$d~;BPYi+F6^D@Pky%sG941 zD^Ql|nYb2Wh-{$!DzaN2g3-R9skL7~)UW7PNsPZ)LjsMsV$_D7M3LD5KE?U3Bup7x z!xuN|m#&$t2z+uBO>@;^Zgm!8;QRQy4=rsxXlf7PI6ed)!lzo`X{F`~|0*1Yb2_$t znLd53vJ1AEVRC$-5hwjepLY8nzPuYX91DuZ*51q!mdM~}a8a(srw_H)_VWS$F~@R? zr&G%oufi5jc*wdgwvwTe4s}MWbfo~!Hn0z+n#u{a1PEOJ7!w zz#9~LFH#g9GJVcZeuOQOjw`dDoF$Fs?^V88vgA@r;jj8XQ8wP>9|Zo?Q%;sMC^p)7 zSWUs_G zD2CgA?zvbb`1Uu;Lf8Unk9Y4`UiuF&#Tx>%abCk;%K6wwnFl`KfA786qHC6HW5q?9 zTW-0ftoz(`%h$g0)v_2Ke$)B>2VjBXqTiPzKL2?R`y1GrYc-tz@V@uGR~GN+^GRS`1pE(u_(RzG zX`^!Nmycz4=#q^MH9kPgTi^CJ{D8X&KE8%Ur{&6je(XQXt+(7-{`Fr!R9=O%BgBZA z^*>xl;$q3^==*Pf`&&4#<2iWCW-ah}olwkdQkjJ}Y_6PnWqAqk4?6fD(fif^{t|Bj zoLH{C?)tJMwv<{I2UNe{`7gv_+UsM>uC?XNjs>SK2z$K3iNB0E4huq;VQZ`@IJBCJ zdoO(c3(E8G?*%xpd+BADDb(fLE(acXAkGB2sC?|>AHyNhoVH*&<@?_&Kl`6w;4kXlf;VK|*|yR8XjJ<{j6Rfa2GVZ1Sgrli1o)gQ z+hctS=c33KPk5u|bD#a3hUU3+FS5A*!z&HWt11!b0cv*gk`) zx&HNH5x$Ip1Swg#k}zq9f4ob?y;5xLzF{7ukQk74#@|&M{4R=sIFed)@g>dxO89DK zeP9(ghCa{Ax%19DkCMJX4Z7x6-?awAKoCrQ` zJvpTw$*r79yEe);aIM3n&QWW>#3jnrXYe||Kfu>pjZ9Dj6XgV)*bD+4=Nf@cmE8X# zAk*3;25QBescEe{5{Y}4O3lO&S&XrY+|t}ihFMEiwfWC|0E*m!?t!P*#Y-FAGQ>r4Fnd$~XsG6b7>-{JCf}T?!5@`URG5GHf zw*SHl&&MIz*y8D09Fl#=VP!jP`-s0~5ML6jP^?>%wc$;!(yC#N9EtD!uXYth#)C}n ze~Qo~Yt;azDbOa7^Mdc!w2g=jvN~fiaz1{4(6fL47dNQ4Xc~p$ zRk~7;a1OB##n7$Qg88Wa7Y%Ww-sbi9M1TFp(%c~G72R6QypnP@pJp3!FU6o`tk=AL{g!KVL0H* zcMN5+7D|EdJdU?u<98ckoFjGZX{s=EP7@h_&JVEzzRVqKzjmX_*iKMl2wS?tqx*^v zxsSg%rigt(6Ptg0(9fkGCg~=StTpW?KKx4mlM)>`>)?|wdIA8kj6dwe&;pnE5EaH$ z`(0-YH`+DH!^1SDkAiR?_)Cy?AHRcR|ph`xsqn_6zPbbn9575gPsiI7b=A7ywsA*9mA zkafkQIFZakQJRj8>H~K;F?q>{k=V_1`_rVP9}8Q2jS~pPOk$EMYG%gP7fGx}B2Jp2 zpgdI?J9iUm)$v!G#U^Z&SKb9MsGqPiO;5@e9!ry{4{LwwAS=Bu|R=0C}fV(P>{#LR~`DQ6l+>%*~L#@ zDV8*RuSl9CncyrA$>svZtGB=dbZqg&1qwxqc==&P>@GgtB+sl$ojkqUzyco-ym8Z7 zbeS=UGbP8%Ip!P>aWh~~)}vTd;9|gczxRDS3}02Y(b;&TO|VVSZ^}nLdJwkxS*v{U zm@kzja2T;9_~Y&Qc05G9>AD-r)A9vWqdf8pU&IzZ50y`T{J-!fz{dP&!W$H);II5o zls72e{cddWWD6APOmm3?i$1^j#V_y|>4#%c$pNFlLw%qD9*zV{M36Xjq_`nCt<=EoszysbdZJ#rF zPFVmys20WH(#tKod|6?|6~vx>z<)me|NT?ELGjJ6mg&>i#u*a_;BfJkAe|&{Zg8uq zgFbRl*?7Z^%h5QzawzdSx*CfpU&UDxtDwDnu)u@0KlHC3#G4VX>P;hJGx4w2PsW&i z7h}f$T6NXcb^dWMLHVW0Sf0D?y0RtLvdb<@p={;!8ywnw5f*oDy5WZMz{3w=p?MA# zDPB-^-FdgdZLzrc#rbpJefO1f&pQu?e_tSnVcq}0gE-IQiLwk1<9;9BVA&E2Jquzx ztoOcmKaAlY%i*8;q%0Qk3=3jwZ%ryc#`zuJ!j@BOPoIv1yZ0%JEV?kxFZq7?-(UW+ zycMti^IQ$NWTb(}*Z{Xi)$do{ewC_%J_L|fH}`Mq@~XV_l0V2I-fg$vj<(?qiqC$Y zt|K)*qoUSSB6Zf8T%b6<@a-PHiM4|tk}Ybg%dY2-4r#XqXf^PVD6@7cLv!jd)%JUZ z-0DH}kj1?s#H!_HB_`Dwib3q4F^r#VL9&g!-S4o&c5(*A?s$7**LUy@ie(f5 zGC>;n{vdphBTCgL%4mE_t2HV4-3bGQsWPck9kRqFr=L_o^e7w~U(IR4L|rjQhww)b z??+P43a&$gltj;>W5MxcnmB&?q7QpNOkR9+@WruYidyTH5);f#c?dAlUiGWk{GpKR z4~@_tedgENR8g$IyxmC<-#`7@p=`jO?26ACl7ykL$o0qZfbGC#ruqZ9%xO51-yd3; zM|?rn*boZmARhpXXo7}pxi^8O=(8zI1J@7PA!db%44d@Q0%DUz5&HqFC3pfrJ2M_d4R<(6dFWS zL@G?Da!G-Noa|NZT4Ji7fAA$e5V}b-c&gv#tuK**_5X$6ANJ`cy~wy@c%@0&t{@u) z9Fi?tJiT4s;$DCK_2v7%j0YwG{lvpt+6j*jmVu#e$<>59Sxov+6m|Um;zKt+5a+`- zJ`@zEbZquSVf+*^arFEP1Tnh!VrclBr|w5=#fLtW)};yDJ>W#%b|4NwM1^+tZ;}kK zf-4>b);Owgk&%hCTo{bi@Oc?}+#!a1G6Iu!&{P&7=tm#Q(~rbW1M#Cy$U{|P)d$z? z7uYct{7<@zPo{|}rJ6A0p*$TjI68%*$T%LXQKW3t$Ar>DM5YmkQ!`_>KT~<`*!bNv8gEFazAfwR?Mommqj1im_aJtSo zAV$&LnEd)b`4jKGQS)e267@#K7&U4@jA0Ct#F5+w#Gug6aQqamPPu6 zkmL70FUqJWnV>v3F>1;*ei>(6HhvNcS$Twi=AU2zNLVrJHq#O^cl4ng+c*QO7IC;2 zy`!KIi(T+Qn{9 zjnPRG3jbJ(wwz6swJV81n#D)e8)HMPJZZDRY*bZ4Sj|b|1f-n+a=LBlr9lyqnPvV% z-(7lgGz+m&!A_z;Z~@s>QjR7hQ~z|%T>w`!3vn!c;Gt>!&#&o!B6U{CjRW2ZJqXlEV_hu4(*s<@toG7l==VKzkjX0_r33F-#z!cc&y67?Fmo%Z|xgj|2p11@n+m5@I&1Nz+bQT zT);i!X6a$zMa9p@^Y|}puX)XD+i(2FuS>`rec%Hh(E9)meE}{|>}q#VFiDnwGs2e} zzXgvY`W#-&{JcXBX@?y0dv25Z$#`_-L`L1{A zV#||ptM^M^`rq3RaJR$9uCj%W{m_`}>MjZkAja=` zjQtho8rq-u*vH$u@yMvX_T01m0q*G71s54uZp57?U&384zyJIHPy3}`{N?tZ_x_D( zmUVU`9=r97r$4QofBpsS-S2vLyVoz?6OTIjqxN;o=^y@of7l-J@JGOT_s;`4S3M{5 zhd=m!JO0Fz+A)9s_w9fK4#fL3UJib&O-4X0kv8$9Noo`x1Dh*xapsF({8BsdgcI>T zk1yb5;}6h9qhGk^J*hGH=bbFPi|51>|Dk;Zj}}{lyDIpeirx3vqrKvf{%m&C-zT24KK;&9TZse55}Xw{v3BuaBV7Ewy|iO=^{d) zZ7dwF|9`|Io{l=|XuQwm>UJ3Kpg8bANPARffQm4zkXZ4rAAg3~5r%l~Cp0TPbh4uW zw+%J!Lt3E;B*|Cs-|EKNy$El~{ z4vM$F4R=s{tGx>EzB=H51N7_aSX+z7Uz0OMaJU$+mZmMv7(Mf#6QW>utxg%2`3gYS zB$vg|u9K=sq>1$%|3X6^ayufICH#j!f9%r+aX8tW0{PL6)h1<8mw&?a2sXZ|nRk#;{8Z z{qv7RZlrIg@Q%27hw!rweirWHz5(CY>6Zl&g4X^I2R-GCJcP^G*>cxRK94^1QVSecgUFP9@kCmR4m&cynw%87RNI@K^Y&fo0tE zK|F2V#JB~*F`hnnG6FbM5t7*^kFns7pEnSy3R}i#Lh6gs7<8VVcEHyD-~HR22K-Dh zAHTrvUsm0=-5kN(V*L75J*2Q}{y{`=`l81C84HeS2II%282T3!G4UTdd}y$_KrLf5 zN)CZm$0rn~hra?>`iQk5K%WBYvoT%O%msM7xU-h?@Ae#scx?P&n_lK$oPbd%ns+YP z{+jEL9?0fA!3ksbFo>6b1&T1iXBxP!H202=^{3B|Fm5MV-Ik2W1*&2j<5%4D1-??o zoO9fL8M=S!A!7Kw2^S!={ul!TqBh5mK2T66*4BT`zktO*=b!fy&;ovsmSPQ+hP;r3 zX;t#0{C9w@fSZ(0)8)LvDmvVzps4=}jyAlxzZL^IX7=PdYNKy6_vy79+HQ_eQr2FSgGho`X{Rl5mTAykFYuua@ z(r!bsp+lP!|5RC}F=LFl`G{+jxmAoj4Mcaq*!XFc3E;$;56uJOrri!EEhJi}EF9%u z+rNUL3txUl{iD+MZ$d+0ZF2vIkQ?6#`f!}9;NzpIZN;6kbzEhYo?~3fhGhl+2I%!*cjy?VvML=G2A(!TZV(ZINmUQP zZktquy9^h;qOGPXZ4+t+jbG{k7?!lzVj~)b>WwTZR94W@CV+%5_SxEqfeLKZpg)PD zz>*yCxvh3%q|y&}R=j=C_wC6i$V#){VqJp>2AV&^Q^P< z2(4Z3cDH8lpfEn8i0R_5^iMvSCwh;>Km-hS@yrBK^FaL@TqEz5TnGb&OgLvWbhj2iB7@pJoD%rUb7foLN zvX{3nef2Bto$q)DUS@s2yilaiT`>R#h0>SWI=pw`6YX8NyJPP?_S7S%?uPM4IqREe z;jvG@*S5h+&5!=O545fMa`RI7$f3hL0hi$98!N-dpi3yg2(=xZt*^^a$J- z4Jsi-oAveW|M(7y!w)~aU4RQg55zkt-iTanXt6yk9`S@Vb2KhcTz&0zm@_=4056FS z|B508Ix)PMZQRAO(Li9-`tg?=RQ56HMnf<7Zi0N41PtSe! zbKA3@`yAa-6OO8m$51Y0+$kZ4o10|Ma!Zzkm%j&|6n8d-wpz&jM zKn}*D80z>3TZ4ag7E1>%J1jmRA3Zej1tWmb8ggcJB6d`53=z0!gJ)J7uoVC7h>gm=?50rAK_8sjue88JWzsr-EBVj-7Z0?+?sUIg{Z@K# z@15BH3CD^hhiKX<1K&6XW3Bk7Ocz&nrMq_6b155^75p2Z*Mr@mW0=aSbXe#-P*`K? zZ=io-SpqIyRmNXbPsKx@9hS_qNmUO(gR$;=2#aDRY&8*dw6e$!i!Zk6+Bo&Ir|Ml+ ze8khM@$RX|;UYRuUd8xhHqFT5dhiFx&`%|+;pd+dRI_(Su|vHB%Y1G&i`nDy^HZV;eX-z9KSBGC<2dxIPj$aC7zq)$yA-BW*(l;A}LXDAMxj8*Ui0mVtsa=;v8 z9~o$xhMr;)hE*+$f^v<3%BI9*Q;8D_BU~evf0=`TCvF)VYMjV5|9~<86y=2|=#i!2 zP$VD`R_OOeplM6<0SxnD`6Px)=&`{chs|PWGE7E&z|5EARDjImrc4(Ls0=y^Nv72b z;((f>LBN73ctw->r%fGmAmg?|r*^jqb!9-hq9wMI+%%>6Kr7p{1IJ}rWs6*yl`i6d zl6(S7yxD32$mk8aFE2XN7^)u`{-t}-&MVWIXHO1$pbPwS<#Nk` zZR8;=Y)A|}=MfjpFVOTD7tQWC{%&!Jr4m1{SK7{tKBbwilQ4XCljwFLHSn3xc5LPNI!FYBG{eHZr=!(J zbv0g+%@;@?dmQed*zaMugW@oI2SotXhF-KLOtX&ZD|bl@v0>HNs2jOKTem;+V)eh` z5jwAb!|QNRUDf{R6|ZQ&iT6VA#l<@C2(qr-fM475SKNP#$K>p=!}jga=O5Z0^3aE3 z*mxN;?yPwI;jeEOop*72Iw6yFh_QJo%%JV*Hn0a%uYz-b3)u|8z3$n7FJx}Kwf*UT|TlYLUAUxkJG>OrS^u`y`i0tmt;Sfk9fibt;JP=J9rc;yKT2&*n?Au}@lPEfN5s zef}z_LWbAccCVf4@=!)Q8GJ~DE&lbvK3B0@N^lbCqH`Ac4;WD6#F|^wTFrxh^1@cP z-V;}N(Ck>v3{2AGknW|c8?Q?i#GDT3AAfoL?OK8$Hmy6b=l;WP zH5PQ0jkISrfbKuK;on=&4I8F!p24skdccJ$2+Mh3!Ux+qzDq-fRwTTlUHSV*XooIT zKx*GnKYs>Htz)}Nk%lZj|HGb-e<9rM*@z9H+AHJCv!{WD;o?0>?0;_Y_m5aZHRtI7 zL=UubjaCwg;r=(~(?w_&jmQlaZ$AH)1R5JPvzvrDU>%dVzAKI0hMF`J#WGC_@5_FeK7RSGeOj`6XXVWvH5os!3(C`;!9-Mtn;t+Mm>em{iiMnpLJ1ovCGF> z{Av8O;p>kg6`BO0Q=|xlb4UMHBr&;2R6c%VRvc+4dk)z%Z6icS{wZY9m;%QRQN~vO zy9J|Yn@Rz7eKy?dTK+*7KJCdWDs3-rg8VQq7KM063ct+9W3y~d-Iy!%i+f-cKg8-Y zv&L_N>Wbsxd4h%c2S4&pH^clZ=x_y#TL0+FAkikHcjn*LpCXYgVqmM(Q7?+eZy~b- z6k`F#@TgCz?;iX9p^G_Aa23V$L(SgqmOD5$&V#zfA>HbgWY`CTZhjDp4 zLoIm}ehU&%cHTEAFV`MH|0M&C^nR2M(4N zJmRSaMVbSdXrpU^PWY}Wc+gNEQ4G`*L_4G{SD?s1hdSITX~Bfhw8HF4JsAyPs7GHd zcUw4YguB6*GigM+3k}GHB&nN(4J!N<^^PEgg0a>KH>TG*Vn7+miS^OtG1-AdTZ3`) zk{17=pW499zVJtoa!P=2k-<-wT-XbYQ?4C{%N9aB0{OSl2lMP?{z<~f#7@U|idveO zB<6Y&`RqlQW;(Ot5ZIL<9HSp`6tdbg8nyNc-V5~?)ZIylTlM6QKgz=my?Fi=bjfc_9x_^QInQw-IJ z(-S)t5g%x;zUrFx_P6sT*~j`zvR{HbDE3B3SbrWwkJX*V#Ij4@X*dXP(JUQ@jGDb+ zrFT`B?gp;A?uPci@tCHMef*n?{D|H_kHl<>_gjj zcf4c!+8O_j6Y3wvit2udQ{21V@eX%rulmzJZJ+$VpKK3#$Zxg% zAGUwnXYYM<@!@^%eJ?KBe6>C1v5#wydCX(lP59;3sdyj6nP;7;mpA|4PksvTW!R_v zumABs+pc%tO)n(>e{lx|9}o4{co)Qde)T?>2Ylkx+T-Ya;z-84k%Ih`Pn@EQCcEyo zYkTQ`|Gl=yo_j(7JY?5nUisK0{(5X1ynOq4I5~fR+$F;gyI;WjAC5ZeNWHk5cfH&P z7ezM3OT&MJ9P^P?r+)Tx?ZFS;w><*y%HWH;KmUa%dCI9-TBUU(&NECj(2H%>=Per_qyl3^oXZ5t2b@$Ir4pY3Hj&Rz7Kj(d)xt! z(?zT6ufG=S=!<%1#?{we)Bg6oN4CB8-V^UQxv{d?nX-{p39CC=InJKP?b5PT-4teGH zH^?co{Ko`-@$}Q%>+sGL-W|fR9FBbGzVnvt%nP>7&x8%s$GLgCL~)9wq!G&mhZA zI79@%JRTQlF1#3bP~h=S&v_0mQ1~4)vZez}*i8e$nE4;#!QACOld7Z8ZD8&>PyX>D z4F?hPpA$3~8BMxQB1i-xLQi3l!N0!DK!(k^6fhDGLDETdR?$k5uqc?r%msNe;dUZV zbOF{7!*u<*N|>E=gKaH@DIdlaFqaaC`0_Xkal#}2os+i7nv5z8cT8})!IB3#79yuN z@N5|#E9tT#BMqr*dJ{g?Sfp=cRdWu+J|n=%LYgK69sFPK_YXjAzGNXxTT51yv>gn# z8}|J}mWM^rZKT&Zv2^?i^#pMrz_BXCE;J`){%32mXV9@|su9|XB7StDb!TW80!zXo z4ml-Y1HXTS9>*3&2g7pXm9GGY(s!y8!49hkD%J#9&NIB#L}wvO#bse5*hR)6(uYAT zrq7=GS=>SKmiFy$;t@}J2gQN*h^ML#JrBvbZYVIr@h_OQbv&A-7u;h02S?rUdcS`} zNIHg1lhFN#`G8xL$ORadYv_(b^{J;R8ccV)b+ne?YNX@BKk*e2-#_w5ItVA&!5Iw| zu*W&Zu-N}#Ci3sABV}C`5_Eh3Jzw-sv;!kQ-gP155Oj4a4237fGXG&K5QARtWDN8b zCHzNUtpgBy&Ho6&8h~!*A2HP9X!=Tf#nA@&=lJQPdzkvS6fKt1g*+xfZFCqkDUixv zhE;(D$K$VAl3rFaz+M_i5&iTZeu7<1vtw6jVh`3s@RX0y5Cj9SsD++UO0bDIF(iVt}$kURi%BcVdZuQ?ye&J+!Wr{^Dck#HsWC_L`KupL?aM!J>~3MEf`kF&HhK1*B!%1nDDikubVLNjWwEUVvd4{ELIir_s7$4Ei94MG63= zEH)aWazIk;`U0wosO{i8Lkx`40Xfb85NbFMm1yO&c9;nk?gS?XbFu@fHadk*jF*sF z>pf4@JYOT)zlOq{@i`YN0h@LP{u~VaXt^yW#maywzqbyiR*MIHe!=PsP!hO!1uqN4)**xo4UXdft+osjx9vU8I^v9afzg^KadC?eZ(GX#aA`|G`PCy zSc}1}gKz$dXUFZg!`%$`(XapSbHDqx?Qn+&U&PF^7MO2+^P75!^1q$>>9+Y6TenSC zuWnmyz8UTac{F}u|FrggT)g029$Rmpky#Z@cXKm+8?~+)rMEM+My-7e@{~^iaG9pRh*M*jw)~#*dzw`$<$^Q{-mnY~B z8NLXdCuXj{{+hN2#`>Zcz6j&FPum>)8*f;P3tj()JBp6MOU=Kj?R6FMd=uuFhw^*> z%Dt7tU;Fj@>-hZbZ+#0nKenBU7i9C8u<087ng$EPsRN5;tkS* zVi}Lk+1l7jY`b+Juz?D-*=z}V+80m9OWkpY(fJqYBFP)$4hnNfrz*vZM`6)WcrqUG z#0zNGUV}$Gz2va=cwB^wSk;zw5mAi$Zs&%ugf8b<)hN0LiX|D$gfR+14+r4f<3rDm zWt#9%i8lQPVS)%RVA+D8n}hI@@QZMfi5E(FH`VrdX}WzW=}C8dcwKlj}w*kZhD-SsdixmzB}kzp_r zX3%13h3_BKW${fuaz)m}99as9t*uIZ~XU<&)^YHZ^Qe+^oXZdzY>pWJOH2n;v|tfj0Wv+xEK?I*zKNi&YQ*J z@wvYBN~e&HOpENt{M+~eF8i3k6J;gxA6gwT7>!O>E5M$AV|sw*8iy(nll@i2IoczN zRgQQNch6g(pc#{0BA}FIecK-~-I7r;^592HU|(m+t*N6ql_G_jkHML-4z0Vqj*-)# zY;@=1#ON584>Si;Su-YtVXAr$T@06f}6>}FVX@`tjOc3L? z)_AH1y+A=9{OR$0fcm)9gu6(T_YGfbOBclfov$gEj?QF}nV_a;$gBjegf4JaqyHH! z^o1{-F)&DD9(t=$gbG`h7?|}?8=%4n$v%i!=V4i@0tCv8^}R#ML_ne?w9-rbCk$BI zZm2|6&u`jDqL?)-2b*c?Fxlx~6NGr0|I~NynZQsmYlwhXxliA?nhY;?lD&qQ=Hrex5{;$puZ>6s9!=-GlahUzk2 zive7_JtV_Pf>@4gKC}#m2*ztW^JFI>IeOiM+ngf6&M(@OQ9)1%s)5ubVbGERVM z&*+j7F#-V_vB+a94iUXfw-$F`kOY>g%T29}o$->-;vbsJe8@`8c{l6c&`fyN9hqTr zvMSt0%@tW)Hz^sQ$Y-rjp4muXXcvYd(&}5YJTLP}aBcA>WcTgOIldQZz!C(0A zjYrqS*B`1)%V1715K+ezkwptJmE_-BGQ_KTkfBH`WX8(+DxA!|0>5s*?6My;zMp`1 zEHJ5eyyJFyWYSK!NN|Vk?w|_*+PSFcT8-HyCk;39Ri!CUlM##^`)0y+OET+knCCBV8SS0+PwvSg}>S6 zYueUZZqv5ec57U~z)61}8g=<)?^-c*8w!no4JsOJ2UT(PY2ApjFemnPj-@~J0F2LOco3=f6-=p2_ZoA-dRomgth#%q| z73a1eVQhSl$X)Jymv%i)+MoBm@3wQ!`7VRNJ1XwkcG-0oT#UH^ztF$9opbK@+7&)w70c)SO)z-W-1OrsD$?e-{ zd?~Og{?X-E$k(P@Zr<*C=bbTDzW>8@sfuz9?lj)+?vniyel4owaddLFy z_b8{hE{o8Q zcJ4MggwRIrr&3~sFkhi40Iw4M%pT(!SP0 z*;mA(NoXbJo8zbZ3h4Rd=II6bj1jhIZBVs_yDU4KhJ;Qd51(aB*qzo$^ z(yjZNFwqR)xCN>-R@RfP7sCDXH%+lHc+CarB{2ZBA^y`G&@?OK8L~w{ODT<{6^sQW z<)O0qqe-(o2+;`>z3rcOO=uFOmXzzH}G32WU+gXG~!@lzWq{a^k$9mcV1PqK|jB zENH74d+lbt+DHo?*#90@9DK$9m&{=WEz`^4UH1o~{G>3|VKSbV{>$V*m@87Es45~~ zbS^t0!c|Af&--Ya@i5U%>$*);v^;Ig_i3VnEg7S*4;_Vuk;nret%hCV{XVU=9Mh6d zx84*{gv2_phf=I_6hLn%%uYmh^DoN7HJ|Fyf^`e>1G)Mh^_p_QR?`4e z=P`tGr2r1ncE(%MLWAz$)DQ&(i98N@`s|KR%4rnYRK0FpS(uj@ZJ0fx#xW^xO-;y5 zIRa}A?i)3=3`_zGL~nIZ|FFneeB7R;bKbR-J&FU}*A;_=S;YY(v|m63;RD9|d{&UX zv-&GZ1TIDuE)l(s1{8c7;y?cwyZ=tO;;N@NHtKmATjUT=Ru)|Zx`xw{;Bv=d9DkrC4Hd$!d~HJEURr>KYwwXcPW?XMV@xG8kx{2bZ9rfrAO z01m@)mG9aFajdj*K@yRM)Ptd98+ zwz(MOZ_77Of%6|q0YGyvx}2TKJx~%hl@KbI7vSZaNXx0-dTKB_e!>w2ev@&`NR{Ad z8M#RPK#eJ{MtNs0{E)5&QuOJf0oS#$e>8G@45#}kO-V`}xnq)-Rs!b4-LA7md8`-GPhP|o`SSX2?lT-Zx$2s^XpNaGGl94L1;Hzq6((3 zCqm}wlw(M;)}S5bhTU?b(}P0axjJF;YVsw-RyaE4Q*e7gV9}?KeroC24Yi}*2x8I& zbTC0FQ`W_JDg#yt(oi-(rj>GExNl7Ol^7mRT)QTj|K3*}BQ5$jj^)UD<#iua z%WOx`dJoRA8ApwBX)v(cLO zrv!h0gegB6UcaK?zwQh;eAu&oQZ*p|{k4a`d->~&wKQ$@x3XB_MX6O>cHh}>Pg@!^ zz7}#beI~{SJUCmGrUA9v1Sg(LQt|ui(D?eY?@p8r>i+NkQtZFfLUwQvBd^@p)xvU# zzblwFwmNNKNvpk*2r_b?01uxr}Az8EsvQk z=9pQ;3qpIrjgJ(a^&KDJ^kGc`oHT$&xCrTUfa8;tHgODhNN~=vG=8)VN7tewj;rrx zd7*?r=zB+dRhb%E(X8yBGh^4D zyG1z=wdmlwG-dCcWy?n#$tH9K!c&#u`LryvZeV*>aGQc7Akb$>d!AAv{Rl;H^m#m2S4M8AyVH|kX)U2mY%7Rv;iKv!S)nL35FL;=U zH)J^dQPq@XwNCJy4A3@LWZ5|TqBUoe>e0pAJah0LqgQ+cC-z#A01Od zY+Igk{KCLG(4^8VKa02@c=(fJwcc^=``if6x{l$s{a6FUoN^-gvw z?k9GyIk=v#G^ZrNwdwQ2J1GK#7IwqzFPx6TB=4ajZAoJYu$qnpkFCx}liJFNcvRNa zc%~A2;7#Y>C-@u(;6l3Y*~`w@YQ6Qc(ex1&ETOpv@?1ytMnR&axOMotD~p~eMBD@V zJ~5TrM;X*%RJ~6{ipzId`s*R_7A#iF9ivTj{fBPn99EKpxl+pkum6%hzMi>bJtt%o z5;SVuzA00g83R=^_Ion0$FQHv-Zb6XjQZW1aIKn|p#|ORJ-BB4l6rY0g);gjj1RN< z>K@{foQ%`~->KG&Gm3#}_ZLtD(-{HH`5l2({K$+~ft+e|vY0fgkkQc4xLK?7B&*pm z7fXxofKXlT02c9#3k#i8$=h)YibWWx?p$-!JQFm9{4{-?K5L;j$$Zk#?Mofz%)_uA zjy&F@N^`dE8N(&RBBROeo}8;+qB-eESZTmn=l*w4462 zzx{9)k{5xFzov=`#9@ik(VhArc}$%AX(=zhr_R=3fSn4soYgvSorJ^}ZiGDtCNrjT zbRNFej+fK)kC1Vv=!!m${{Bj~1i0DBlgU6gijDmuyM#IZBeZN=k8Is$?EE7a<#4my zi}eL9uss*)Qpn27h)djJo|&yc?Eb5E_R)XEGR*Il zZ@?0PTjMr%6S`NV>Wf_`vM}m!Ve(P^t(Of zB_ia+3o44w;tUIg#%8|>F~puevc^!KxW28}XiP6|jaSA9?eID1!^Vy2@5*z+S4rFA zohV2!r>;L1X#g$aEb^8fS44uXz8NglgXGc< z{pb`rg1yF+Y0G47d9tEuz1FkSraSZge3=nJ8rh zmlf@q$D2}S6`gM$#4%5fQhpl4uN?}{nYce^jj4SIDz)IGQY3f_W=jA1`dHUQy?Y1( zN+*Y|u;VZX#7v8ZRE3H{>!JGHkf?u|#V>=+?4Cwt7`B+^{tqOgF#FxBlKNJ>zg?RF zvcmWOZC&( z{<42}7)I4-8n#r#e5<{C3lS8)o4jYKwB(^}o7;=HKH371Cs?^cDo2#mV;F8n;uYGz zd`H0HC4+v1O5#vL98c05#W2He#4`G%8r2V>hXK$T3j7Vz`%f+$R9}qLbP;Sr zEX@)@9a#vXadLiuWQLIOcVem)1>13s85liJM&V(o3TUF*e0^Sg1!Yd-$u9H3!!$vI z5K-gz)(^k+-jo%>x|dhr3kKf}tkusF(*a>05)VAaWYzbh;>PO+Q46zj$Z}cmdw;m@ zdj>&)OyQO{1uGdbr`*C5K(uk<5N(AIRcG|7iS!|t9HfWJNFQZolaJbtg`4=}xwSlV zM!W5QQz_UtqL>YV2L?&nL|qYJ^u6Yz+;d%T8ZJake;<>Gl0zU|M%yAyyiY9u2W5D? z1JgI8#Z;dlm}`qL6!3bPAR|IiI)bREU_uNQodMJwKq4M=XrUxI^}1yf1$-{)fr_64 z2yjZxDN6t1s?#X*pb`#ct;+@`65;I3Dnd>6pV$IDIBG=w3EO`;K^gVxjn1~(okWfV zdrfh82FHZz)p8}NiCP2096z8iQ#jcpxGBgh?%#}BB)=a}Zh8GY3{`p47i2Goc55S1&(o) ziC}4GtOj)aWRFzx9;yh#oWK--M{-vPLc*x;bquCG%2x=r*k1;}6?huGAgr@)9gxKD zVj%a)&FOeJRnzgXLB@(frPHTatp(JsLLi#%dc=?!DasDQq!jGSyW%bO;C_u6eSVdE z*1d*RS=y~eG5Y>{3v_~~Jyk_#UJ<5r+6&Ge_u0dHVO9D+k}b zfU3|24;PM?_54jR;9Euv4c#}siAZjefFeck%MBJ{yV1Zh`%Kd!s+S}I!74lrUXmXg zf{Kq?o!ry0^+;5`ZZhDOtg~k2Y0gTJWKFEfAPEmitd=Q!2CHW_JZ#2zk?Lxv1ETSf zfC%p&RsC)7yqO=~h6+MwVhNv1XG5bpn%OeMM3HI0cNIpUEmgCP0i6jJFlLsv6qD`6 z^ZP4Yjd_{%;!XVZ3Z)5Mq1SZ^pD?cPz7gaxu6g`f3K#p3@${j5o%!D0~wCfd-sP+fOxTmj zh~IP0%DR9uE#k@qcqTK8XtNjA1fNqz%@27t$DarG6U%0qt48xKhACkPmLq2Fq0l8* z4;JRRlW*6-mpnmeaM;p1AhC^2m?<|{KOX`=1(K&^zYmkz_{qTYD2yW|Fj9NY)oyv^ zuD<_HP3`i@=m#zPiw7!4xO-U)!+qhZwbzG8GW))RFw za1|74;uOftWIRrzj0lOaDsQF;F8lW*`y7H3Cwcq|!F*1&21kzIkqG}+FiCk`1M>^+C{vHalM#Ky?F`8c-Sxl zEkEh|$B1Yf86P*qRlEoQ2gx)~>v&5kuV^^)o1Q4Md+5yx@AvU1e1vQ!4Bn?0zbv<^`=Im{ywMGB^Hm)kfk|9 z^KZS&`w%W>!?2@MD%U~de+T1OTh7o2TBH14Vdk`SQej&{`84|9PO^})*SxyVn9>IB zhREd0<xK-2tOeFBOq)3B!#nr?+1=O2T#@c!+;=?sHP9NfU>9I-1!Bwc# z?s17Gg>VruCcKlso~{{_P9EJ4 zr}3Cz)pxF7hTdgELEzLekPkO%%oVO?i())Qo7>e+IT)Pz!6PDN3xokJm zy6I6_9C6-TZUgfjenpUkr_#$Fz$}Y3A(Tju5^w$sW|MyWw#5#+m)?MY0G?}KyCrnt zi#)jj>~*x+J8$<^fWTgyF1m8`yM^l3Hy%urz)J2uM^y zs(YvN;LQhRI{IKnBaN|2_@v(BL5dS8rIL`*^uEG!$)7 z|AG9r_RZNEYjv113~_n{K#Po5B7q+hCnfaT4U8U3Jb@&E_e=liX{pg3N7~WohcTDc z8F~k_=kfq~Pfm6jpY-azyxI(P`n z89o3M#xF~EI^o3aspW)HH@D6MkyP0N8|u_F@lzDUqnn9ZRid?#Br)F#8pYv|(k*cL zpw}zvkvRO!cF$4DsO~muMmp{4Br+u$_>?-$0{2wq z+1WLE?zTtx2E<&HES6CmEYyElE5a-8qx*S(2g~!Kl2mowKPO1&yQ+lfi4vX4GS<&R zGCQQ~y@SlH35;ctO&#xkV*?&=YSFAPweSg>_Ll3`Pfne~1nF6C=u=d_HMe#@r}rDM zAukTe5}eACy1v-9g&@FUK`O4?Ep0CqFj5QfN1RxEsHmm;uVFM({;#3v_%g8Ndtqr8 zVJOBmXt&obNZe*1Zr`SjdOMG*^sQ2!0DoGV6Hn>6BrgvK9c$R`^6;Ps#S8m_y8S&7 zb8ZZg1%Ql*)T{41? z+N`>LbsaiybSE$az*r@%iI?73UDJVT3eb|R1%Km36|?7h3n|y!lP_Dt@Mtn@$Y2#r zs?zD$2_{Z}TAnDBEoxTYlzH@(oX)z4#X>4g>^zt4I$dhi1Z5d%hE^xQ68&78nWb`A zdVE2}UH^>>F?&7V6V;qX3l)v({bzb>SC;N%1##8`Q_UhM4nI#%VVwy&>|R4FyG+5F zD(s}~3W5i<_IaQ08{0&$wP5k@+PP-!AzdzIggM4m`KQ*V-D_*^xT%{E^9*VIpRiN4 zt?oT0w2TW5ys#0*Rq64Wn&OjxLvOzdtm@Lwh3@KE=nnp4My0WHzUYywbi8to0S`F5 zfd>?;fQPwHJp?U)_;`kv{pTmeueIq1meK5jN$Lq?!cbGL)$*Rd%D}H38nWWstNNEu z@J3DHF-xerRZN+KAgJTRTS)F-N0|MR1KGp%|GZ6T@nmG1?JU%NllW)|MRU{ABumXj zh+!Y2e^=904n4^BUdVc>dV^V76cPj$2T6M@8-YI9I-@&f0f`6(CqckRX}fh}f>U~3 zM)XQ*CjzMduQ}cNTnpTH8iNQ>x|Y48A0*kDNa@9R=5&+)USb?wtvn-rITbNs?{P6` zu=N~c@JAtJ>>T>}4g6DG?nBvMq}^g!?O~5<7QMG}-WSj0BwwG&1~QW09ARW!k@*z?3}ZRCY6AH7@t%;&A-I0TAw z!D&KmRY4^SAOLYReQOwtS*#^D`!PAdWSM0wvoIz~Y|>@8;x2Zt#T`JYbtsoi+=(uA z?_`73nT%w%J$k=x;@kkvXV%fOvNseyjcr`LhNVU?{B{{uAdyT#>zTT()wSR$>T{2N z=?lAGU7@&~HNWA^{juv?-Mj1(?iOGa?+q~+V(zs$eH&qD^Ysb=sibPexJbhvWi;Zt zQ$ynt&F;bX8~R{4y5&J0jyHKt-M49PL)r?jBM6PYkCS41wo3)yU&g4gp><)$$@j#7CHCtTia3N%zi zbWQeCWFE#_6gzmsF{|FED?L~KR$Rry^8V3z^%wm**IK3@DMD^D`|E-!Fs z5O$XU^5W!+d_e>v=I-b72c}wIP2uA}M3(`jKmOFe=mclJ{n*L)z#ZO(VjvT9vTgf^@bXH%8pMFPTqB(?IA_fA>0RdP(!kviIND;tXV)Oa-? z7n%c(V5hDlJvq^KE&1S(lVt20MjsHZl}Er}iDD&qBQbL9<2V{Eps)iPox)z|wC!$S z{S`>BAT$u!7hN(P;mk2C@vClZxB7uq(Y|@G-jqRMGM^(oo)C7Z5GSmXAez&bSQGnC z3q)nNWv#{w1>P_;?7;9L9C0!o-s@CZJy@FlKd-Agb*Ol+?35T=oTC=2lGp zG#j`zMf4JJbJ3L@!p+zHqahW0(R#Ygw{kcf~Do@QmugO zj1=4@e5ssgT%(RW^~=B$1&;VxML+eXmo8qx7Y2q9;9RX%9N&?)LRp#32f9H^gvu|o zu0e%p;?sIt1eRvKiY~ZCawrNds$RvK6j(s)UeX<&1yXO5`0v?5*K+BcB0wcgJ z91Z!Rve~`mN;>F4JrB9N%R_&~VhYiYgR1 ztE7O;zQ%A{8|T}U#M&PBT}j0{_c_k9|C*`g%L8l{pa6uI*=*K3ARGcaEti>Tar>+r zV%PDW>-E5`2S4UbvFl`_R)24lp-FOJUN-FNF;B~zgnQOkMI-f(?#BQWV#y}{Mc7|K z2)SD9$JP5-su4rKVzEA^Oi4ZCginzYOsn^{V2>&&7Yuan45OZ&2gm$}=J*d~=W9+9 z6IZabw;T+_g+&c_uUlI%UQEYBAyHg{w_cUlWM2F=mN63VGQC;kCgDT?L3*$eLTpr6NyQiI>QXLKCUi*WIO&U?|$-Jg(lkGTQdCp?{d@*-nqxd+7D zPG2`lLOAHAvJ$rVtOA_7fzs2sn}U1-2VnCqJ4J^dkOG=Yo-E>YpOahVt#JAwTv9R~ zs4ozydRrOb?xok_RQ)^>lP#c&juq)Y`w($1KCNcLu7?*JWWFK@HHYlPNsQk|aLBxw z@EYGWUf)hH7tV-ZN)3L2J~U6z>bF1}dl~CRlbE*K1WFd9UVCbRZ}#VdcuEdi!k2#@ z#ZIrx46T30cOhsOw>;{c082&8@k3~07A4JVdj$r8VKGKd z=lmp4@Q$5VRn{JEA&@I}aKII(DEq4CD!eWSl2#(3(;z`ih% z_`2cw^mu97k&>QA(m*^l`+n|sJRFL({JFMnB9h95OEOu@Q-glM`k|~M!d=q4J?%}} zt%8vhn0{_P$$&{qhl}6NmU5{YmgoM5DPpR*uIO(1kT2Z%&ADK*oaQiX`=el(q*N>G zEf#W!c1XQT9XV8Q+b7o;fi(8I)0MBfEtUEbMOcsYVUz^UCN}d{8PK#5bdw4}Ro9sz zoM*}rIUfAzP>6T>{^yXRDrGsVLo#zD3Nq2_<2{xSG>myq;7d48yBdatB=yhKU7ASs zf3tw$P@io^89wkqzS#)ZbtY8tPKPohpQ=h`NHGify6k&9B{Z~w*;BO})`ag>FscAZ z?fy7h# zD&K;Q&?>l=8!ABaSZONzT=qrKoObtAz?S$AI6}J#CZXDD=su~b2n$QLMc2ht>2%0r zz;u5lG5ZRN_VeLwpRt4yRd_OzPyyQ|6gZ>MbBOaauqwgSHQ)X9WiEdI;r#O9ZTkFm zYgx9A!V|wlct@L{OlI(WBrk7d&)H{iuHj#H>{?&2AfiQ3147&FUr-+ z{QTYD$)*4M5i&{8VN`!u_qxhrdN2~*%YyqA0w09}3^o=2-Ofoem__(yaj`=W8|Ce(kBmV`I z^~_RQ)bDZj-ANd#?7NG%CPic%nxYjcZO#3P%K6Ls5S0mxu%`59_Z=BtJfDe>R}{Cl zSi}(pF-y#XkmgyFYk4avxsJHwUXtPr*xBT-RgfO;@ye;^qDvcvzA5q7O<@Zyd-nah z3>ko9l>1(6f=H~Qwd7>#FNy_|VySb@c%ulsP7&j3b0P`JoTka!KOj7Yj)73xH(kof z#)Z4-q$=d#G}&wd9F=0m*fGk^*u!4uI!k<#{gAJVO zO^nUkbCJ)e&VxASdi!>1E`1|fSnikcAI3XsfxFn&PamCeSp8jJYj3Al3Iwbe(O7Q# z7aQKifB?xg=n0bg-_)WR307f8YPj7@*g7h!#wDJ+b2|h@o|7 z;Tu};R&`@2eB~B1$s@O?=X>#lep{11>?uqAEG;+LTb%(UE>XnKtZn9@?_{{8vT{0o zzHGlqWSZo7oaPwmk<=NNbLu7b#ydLRbc1p?1lgPf;GUE_Y6BmUddDuIceW)*+K1LE)__2tyB9k-~c7nKkB|C_I8-e z*=EVmk0Kz_OMT5@ZgQa zi5*+1su?#i#EmQkI$SO#e;?|iVAuP0P=lL(-LC{+ zS7_-!ViH^()?oTD;-&=ZCAV?7CPrII2ChRc-o<-LLsYZ9hd;X>na=Td@AP zXVMTRpza+RjL4cEAndh%qG5AvR+_)P69VzHDKPG#%-uTMhZBN&;(LVprr5JMJ|;_J zyp)LCo#XQ#Q6)Fe$P8Su9D$sJ1MgI5qU#{^#S9QH=vVHKgkn$DiG;kpn6xDbud&%s zd^~@%x6_RGU!=Ry_Kx(>968xm=VQ6QJu3e_Q%5I6{U~9zMlK{xLEgEM9;dXIK(2>LHHm{51 zWxsIJU&4W4`Y$Y?yvSf8UfK!aIU5$09eYsQUqDWlw7j=_z?z_g$#3Vb zbF8m|NxDvN=hI$qmU-e-E>C=|P}Q07`Ra7wAx#Pv!piP7a9gu|wsjB?FBu^6w6Y0x zd%jxEJ>q=1domp&u82{HQGWYC0?p6BbCtj8afQqsh^~`E-|C`y9Iu{GT32*W@*))~ z50sjdY7*jKP!&4U$gZw|B@Occ&Fj9@!z0W0!MlO8+59nMC=3zDA!$I6ESc<+iCkVO zQ7TC>Z`xBj#VEJms&bheF07d=SQGoJ%N#=t|2EH z98~c0P+8V1SVRAF+kZrclXf}F8A`=8vkG-!yNQlwp#OqNfXsNHZLrm z|8bvo-0B^o|LS`X@Yq?4Vku>edjsP zRojvOk@;!a^wrN^q#1MtlbTy{fO-`9070k4cuVj{zs?3ODr!y+{At=kEP0jL%O z;JHVKEyLkJu%it``{9y*LF4smnp6HTe_UV_=z!NCoNLMhnDg~0JC7w|=KKQ(L%Pi% zUMHUkOag$T;Um&Qj>N8i$6(kG`Ykb6ML!%)k71#q*vUm;Z^z9g?=~a z#)Y_)P#+cURQN(&Tz=jwC~g{vDa}bZ^ewOCz4i+ZneJ$qQr0v#F2XaOAjEa8taF8) zom3xnekAWXKTfLT_wtN;$h%d44XxESAIO{`_AFuN7pq5}0qh^Q;IeFLs}?qM1{k1h zN&Gr7re^dn7H|&cYhY9Zl)h+*UgVkdI&M{*V)0KZUQ>Opsfwj)-&wUVU{R(uCr4C1 zP{JoL*BX|4=Hg|wd&O1-Y3L8#O9t z-QH3OK32K3G)n8qL%MFc(IQ_!&4-f`Ib44*4;97FTuFQaJdI(F`%5zEPoI6c>Lc=l zwXYKHzvFp+eG*c2z&A{q3wUC73?gpj)w+T<@^78_L9p+Iiq%Cv$+s1pOBWgJr%8T$({3ML)+s{;9 zl1>6~oEjYZV=c1 z?8kj+R}CVvFSkKkt8}8f!(*fMQ(^H)O0qzGi$;%&97t^DmlPoCF+gDaiDyJ?D8?u13iYJ-je3JSv`HnUdUZ-&ec%<`@hV0->D$_3X!>A&@sC6CsTJm?p zqXw~m(PC2eH=hf~Qx=OY`H?%xPNA`UFm`n4-WsJJEOu!GBZQG>ws!azL+KAyoqt1h zyll}VDK9M+=~*6CjocR&&LinI7vsNRCU~y`p%1u!);%x%wi#CH$BSpa+uHe`)=osl zoAJ{sx$&jZlgD783siN9agjl@y`YNvesgi>B+5W(KSs_c2QsD+c>jS@+vMpa4$CGL6Ai>C*PtyLyd0PW9yb!_;0^2dJLkgV9={ z2n&hL7yROYjo|;JFI!-`Du(9>e?;6Wt6QBawQG~Q6uHwrdAHnJWjii<{<$?=t%Z!& zHu+OEIj`C0b=?if8WIO**AY10J2c5wi2S1$udKpNDzJAP-0JeQvtm{74b{vw8nuue zKw|j&!lx7OI{5VrnoxBelg{Gz4z$)o$MmWSjfVISz8iA-;>8>+ukR0U^RY zX%^=YDPfxMT_g$Kh3|R4a8a_QdTY5l{F5;xgO0)QSNdV#{cXCCvKJ35O2*P}LWY@W`P?vk z@~4Ty>yd7Yc%EOQExNo$e`RBJ?&0<#|r3D+F`ktu(QQ@AaX1XsRC>1;hoe9b!Nx4Oa6p$!qG z+U>j;eqw4#cQU_H_WX%`2U_>t%X0*_-(&r2QDN7brS&`8x6ELe3LD@yH{x1_wpH&= z^@h>>eQ6SnvxJOsWnBnl)y?iHJK&m96JJF>w{{*nY)RR-h68;~k~rGY^Y#h#ZmmYe zBC3MX<7{MXFNXq>bBZf*k%nrLkWD84j^q)lnOlu(sKDM=-ZDLc0kK=q*TbN^fwqyq zLo8y#u)!Npb5>M4kWDeU?hQ|6O{Dpt1=u0Ws}Pft-_${_#AR5FIxG9sJNOaRK5f+a zk@V;DvD<+DhGXcuU?L;VD6jNb2WS2LYeYbmfr5fUrr5&9@C%qPKNimJ;W^u24#OWk zXZZCfI_^5J(yLU1n6`R4%ks{-YbJ6rvDx}A2hqY(uAWDm0a z1L4GmqorI4w`Aephoy7H$M6DGtD2CQ$=;;YCcKqPTqwCI%wR^o#%}o)ka3(R4AE=m zdf*nQ{&xYPaluo6_VRze?eYE!ED-V(wk}=ZpD9p%V8yXriR7j@3-dQWbn53pC4lu%rs7Y@ zmK4149AE0XVo(`F+&^E&XEMcu1FVKxA@f>V06b<1AIZ|7<(Py7&V~_gQy<^zW<2k8 zZO4fomYVsl+3xld05#|BOk66kN=02tk5A6Ozyh8-#B~s-e?7>fuX1nc0f*XIUIMSt zc+j-k`1}3B`MJ030}pJ2yq+%^frObaT(SB`lGls{PDU69k|q;!a+Q&m3J5D6=dS9i zRT1GRBd;!RCb*or{8lDl9NjMb1PPs`maYMcwH;u9H;i{{==zt+9r;-1>&vVsOsu-a zF}-iQ9v=!>sBPu~{)_2wxytT&hFj;TN4l`dKUZ9h7e1Sze7K5e=4e?jeA^piYHfw_ zY0h^Nye0i6?Truv{jUX&&fGl;AI{vnLpnJw)-Hh1tzJXK&RwTS#kB)%gYZs_DF@?T zsiX;5BZ`ORkuNo0ZZoHC)|`93P#)au!TMDAzhm;;T7MX(YoNS69uc+Hgw40pwbVAm zz1AsNC$v za1I$V95pp|`)K*W6jf#eE=zirE<(}2JOs-UPf-vPl?an&TPPpp-8%BkYvS?Mr~YXD z@(}V_NGpLre#2_~Dcz~4w3+&$oa12tHI=)p4gflL$U_A;yiqC67mDFapO>xG9g$U< z<7S1fvFriPVS-jWFvk?uQ$)nY7>FX}N;H=k#oQ6x_Yk4WaxQ79s1 zL}<4D@kg~o@jt6Rj3O(E9v4y^CcnXz;9Q$+QQqzl)^&>KwsDD}PAbulN4^sT9v7)E z0rwfp`sj!~j`3slMW9X^ih{rY5Dd5AK+rwHemF!zdtIKMe$~YjQPTZ#Q9J$P+hKkC zck-uq@IS|oHmO#537~Hz8a7j4hbE|sXyvD*O|+d1{cT`N+NVI`uvYU%Bj`JfEr8sQ z#y_Ez#5FH{FmBP{K)beH2i$BmbI{d8vPjII;`rgGGVUfdxVtE_ z?+=Y9zAE%8My-4N6KaVIR+l_MnOm#Zy`XAa_aBp4X5sNjTc5n4_X+$%LH20OP3tJy z_iN>oGrCy>wVPNhMks+5=I)xyYYO5^95Ew=;mU=!Vr4n4x~EQ`z@2>}o`JLu<)cuI z6|d1tA`tf>Q$H^dk(^_QoopT}v9+WiN<^e_puqv0q%NULQ5q7_T~<8deEo=(k@ZsW z{iaaTA%4P9<4r}$)DYQy4SfX5kfN$TO?b*Y{gKHmMmS?LeiAyp=@+=6so^+)hnt1% zD?<%mvWz4tDkUoV^I_Fitwc{0fQ0^j89myBnK1feQsK80Go*XXfA8PD`$r*;nkb6) zeCgM$+xYld8TYQI=CY>-m~1_+_sFXLuz7U$nQv~Hs3)wJz-=t(zHAH*6!zBs1i?9> zGxro3^Emtx`AWU~p{l>PPJD*eydQCmO61Z!=zD6$Suex|u{u_LgK7>A+FL&j6P&xG z@(750^WMxt8H1=$ZLGkxv8y>V-Oy8~0BoWZ3}|-Jirn8M{4Ed&Di(6#lP?hxA#l^f zL4rpraDxjq7sYfsG<#d5paeJ=ty+b7%<1kFYWxa;&l|KD>5iK{hOhH)5VPYEf2&UW zzjTBGxp8cn`-X!=CU?BP7s?c{2ZVq z@R2-BnZ+~ZJdZH5sBr$2{tQAd8{tR5qdl_%C8H2s&;R>eJ&n-vZX_W-pPlYm0UIfb zJ0G&otD-ONiKeGv5JJlm?BaVS>GB#SXGj`<1Jq__>AtA5RyApBw{ z>-B+ev4=TD+R~AT$)ZKM3Q5NM#6wxO);FSP-}SFKQ$P1>;LK5|#~iGqUtB1zk7Vn{ zltjZc+um@sp68|y%5hz%VMCsy&*mI=7H_JydlW}^Bj~kpovjM*#2*o40hsvu2);dN z+v65Tcn{PG?Ccd(xa(J*SD^6rJVjABUBP-vKT ztPY4X_PY%OmtOj6Kc6ghqxt)4e@47v`UP&hSzW3LTOdUFsj026i5$6_t!b3QUk!$M z0hx(RgtK9?8Kpn=>8Ow&nh#cvxW$4rNk&wdrfvkmkpsH68f3mSpasmt#2uSi|UY{ojZt9&~-#FJGQlc%L(^$D4@CWPwvXywRwsyE`Rj9WBxn6Tt|g5_*^ub-*m44NWh|v0 z%~EI+JpuWWz>?>sp%-BXrT$_LM8Y>w%)~?xuiEnAw1>E5nUv4hs6VSWm2Wz~=1f1A zh?h&NiXHaw4abEs=|0l#@}f4W=!SDd+&B^&Tnhg14|hM3q=@v3+56911E+V8$X`UB zGsk8oZ(#*v$Ijux=_eUsj9orjBbvEc=LBV6aYoM>^4{1Wt{4+qd(=DlS1d2KC*BW^ zHy>BcjU4ocdA=ZZD+jz}HeiE|e9KGT;dN9JjQP-es=Bo=nxVTCp)7LifeB9BUU6xxanfk;ZA|4kiupd$u6#=N>FN=2t z*tZvkE}GMe+d)|mNbR#;$6Ph#CqGZC@7yg%C7xUJ;@54Q5{GbYm(|EQro7vzE9o~K z#HO%)Q`S=>)!8;EDhu_LI9M%PPTxD*^pHr!<-?vdLQBlb?I@N(xa_*9HkL1=DZmadL&elr7x%{U#!}K{m+f7kz3jEqM^>Fa{9MmT=Y4mF1Jmc1WXs;y7f~%BZ zi24jkzumkf$o#3-a$`58s+2U3ZsCdANCMQI|L%A4RhsR4FFWtx5f=xoQZM|uat58a z%;=)R!Dk^V3)MWiL$iN7@gD1g*>U@%9-c>Oq+m-^s7uBphF#QUq*5G-SxS z3|=*|Mhy6OIsLMHa%=x&x#yPs{loq-F5~Lg1J-A}a`V{#1qng+zP>_@>7fP&Ehc?R zj#xN7b|NID8S4n94Ap(-5t`loz{cPn82ILn(cFMY#Ps;CnJ%@oqVk$I z`iz#6ODT`6*zbK{9-_9}AB2gzq%%Zw`v)+%(pPSOP~U`z>Q*ymr*^aI8}OmVkY=x+ z+;$r#D9*tI#j4}V&O7bg2awWH((8|q>HvG9UA~o(`1K#Em~ptLAn}Fnu@jCh=bUpk z4#~y}ioJOT1y)c3*+enbT?)$R_rc?DxQKtcpf;X^aMbsYE|*?*aryc;zF8K* zwoYIC!WVE>#Mf)8!_b6zKxsS8z*!Y1{`4p19ISlc1jiwV98%U=b1iv^ z&h4Y_x%=*N4Gt{6<6r;A#KJSUC5=M;7cFb9wPtzCTd1E5Cu`=XMYrhFA zO8637s$Ks4=PNO}GNCMv7y1W({vb@utR{@O+TcDXj!rz~q;k?vPbyn&^txe2%+OKW+gu8ZYga zT5>5l|Kkm>e3o~T3Yuri#EG|*>us4jE;rtIV|nP|htNJVVSmB0+PKxqI_s`eUX2w&3t$q2{mUP%@Vt#1Z@RJE zbmL7JYdl}%C9Jeru#Cr~%o|_-2C2K_pTI=Jt+z~+?Y_7QhJC)k{0o#7aTdqx-|+gf z`m0yRB*>zod*;lSFgY`++<3!HmHNZH2TuB(cG{_!^m(!Dz2Dwt+pV{C zmZc`s6_s+A2obMO($akCO!OuTRRiyT4r)vZ*qXUv0NpM3DT?UeP@JphUq{^CY^)ik zg=#5c9}^(VZU5^qYqdA_>6GZLBU1hB$~Z3W916>A*@$$NW@i6`LFSs`CJ{cyjW&f} zJMIFwyy10~3>arB(FG>aJ_{mBq`D#pl4GK?a{V?1?gp9}kq4rA z{$vTuXAoF9nK#c*a_n!e0T~aRR{D6)Lc4U%aZwv6MeeyeC$BC$ba}~2f4436H)?e6=p?%R)VUaUB z>qj50*Nts*QlcJJ!}`x-c=gr_#338)FJ;O>Q1TXe*=fesU-6Mh0(CM$o<%XFG}hd91@KW_QIXi(96g%#;lWe%78W00tf5ieJ zkz)lq!CmkaY|T2sQb`awnK)EXG$LZNFz*($AJbI{!t+z6VvDB<1?>=&9^>@Gbz4E%(y%xWs$0`9_KQgf(1{lpMr(wIJ6Mu42c_+3h z`qZZnz@gSl(2@kLf019R{~9YGCLDcq*>tllu*K6?Y#5Fz7hH5`n}P|7%PuWPAFV4Wj=&0vaLd~U1lE7@Gf$OIANZLv z@zz_*7r*o+nIu^V=RVvvaUxbioQRk0x0L0VTdpj(-12DJS>?X_A1F^udc3Uv*7eGM z`|U52E_~VlqvKB~x8Hsn%+D%sz{_$TAb#kV4=>AN65=oadsX@V4}MT);Me@Cth@>i z=w7N!#>$L;-+h<7kl$hZ?aMY>ZzWr2u`*X(b)`(sa6;$c&)Eb;sxGrL@pAs6i!LhP z!d6>vf5$t@pE*vETW+~!x$EC|mq|}NS=Lx{_41*Qe5kCo#+v2I zKV4PM`uSPqVN4#ZJbo3d4p;`O3#OF&Fj+ALs}r`}W}EWSkA74p31~+qkk0>AISZ?L z7R8E*<#6W53pjZB{`>Bi6&X8!bf@x;cWfjt_BlE8Yy5Km!V53JWWbE_%9U0w3uBeY zq$i&!4?p&3Sq2A_?~QX+)?aT!RaKM2FmN&iuV`~0+%T1WbI11a0 z@r)C$ZsEiUCtbGQdh4?Nhdx}!U}D2A3%kTqhp;kofg8>4|8mV4DVB{B%YboG?>}0$ zfnTo07Ee5b;-N=yNcLMdC`WPy1uq!89}j*0nO@%Lcuk#p>ghNG=0#kudt)23ZPb0& z19kd2fkL7t<03I}Nf}WkB2yP^Rin++dw5TP>r^=RNMHU}+fe0?JuTz1f)8n&u&mV~(b`xH* zo&I+psYF%Q_UCM6eE#eBV0Z^>%ez5skGd)gU`$BR|K9i$wTSEkiFX;*M+aB6BnpeV z`dQI#d>02^lv0jYsek@f@n|hmJJ3EQ(N4VgXy|~&MD2TW1?d?{8PR5^{e@}~nrNi& zA3YyLull5>G*&zC!HY=e`1=QhWgJO=Krrf@9ksmZS0fQeTSJSB8;%J>{r=JR7yC2X zU%-fl*{5>0vUN%S{#%u)AEI#N_OBx1=j^_J#OEuZ)jn;c?@KJwKhdk>!~380BqQDJ z3>Q^GIVl*_4rDVxmm(8^bT2Yv7+M_u{R6dt9dS+MDQ6|>3I>@7i8lN1AA0@swLHfq z)h6-K7yU2#4X9ibNK&0FzpvKsA7BdK4Y5Fy+Sm5)R0-`}>!0M^zNb2%nhJ<18q5_C zqSIIa8+>U-MA{jJu`2iy|CjL4C_GuV{fR;Q+4v_4Ay?`Cn zNu>f1LT9@6a0t-2K7>gyXv6Nm$_)D=($rrqh!T%TvY>uL)wRjR!PkZfss^~ zthj=`pUs9zN69KJWrYr~FYQlBj!m|cjz9VnJ?&yh^Epa%sE)^KHgYZpf@J9gL-(3h zCNRwS^G{1Q8byq3?|&6R?79RYRK;d6D9S-B3Jm9PNg06+zKk_Q%65<{k&-{SI`!}4 zK{5aOJTb>^HA6%&tL-nfhphNwo-nEIv%s<Jn zY6Ai0G6iN6!fiLgG+oye4~*ocSxK&rr3==^e$zJ!9y#{$+ZZ#EY@bGB=r;^4NX&>~ zZrTDeGQg2`uLIGk8WC%WF|$6NjT|V+w1O?KTQY3E&_yS3F~+(|A=B~*#^1~75BJsm zpc=6Um7qmEb43%DP{+S4A3FA_k0cLuTsaV&PL|l#2bZ=q;YqoT6BOs2SH_RW1cjYJ zk#!P75&p>~A@WH{$`Z@aBMp)vFdSt8!38~4s>FV4;I=Xoj+;=&NbDpPtJnYY&puzyzwmrn)v^8dA1R;s#3w=`evSYDKmbWZ zK~(V$X`XV%8JKuD@x<~GyjcJ6hdw0dK4|@1lPW(QhxGpRrzc{v1rrpXIuK_(V1lBB z$f}X^IYDvEgt8e{P#p2KBj7eZ*h0m9_uP-|nGP!x@#6n2>#ke2-ELc%uz3ZxBU%94 z8O?{uiutgukDP;%>ht__&*Dss%gZsx9D@}U8n#(+y?!J@+W@-eSwL$f9G*OdJ4y0nXlFTP}lD zA0OLekFxeU>xjfvSN*vhgDsw91;xP!$vGOLhzhMwU%+J62|xK!`8i&|f8YZjDEsaI zalEi!sC@g|-^SKem&rC%@7QQVwEZS!<&{>ERSy6D_nq+lq;mTmx0MgP{{!U%AN(Lz zgp8G~vTnHkdaO7(x;%w#j<`bRZ5zI=%s^d!_lMt?!w);Wthe4<%4h%2=gNvJu7G>e zENr25X*ubXld(c%@$xC0f$=)5ytv|u%gZU)=4uRH{O-8pM{&?Qw*Z?DlO#`IGUXCE z{C)f?zPi>LLm#gj}> z1XnX+wr9jZNPhk63vfvG&u|9C3v!mtHe5kLp=3<5h$M}AbOBYT=Mq9mJ%S@^8qb)( zYKAc#5c5$fb0)A=PtJ)f2q8wMiDn%0@!WNtl4?P^MnH({*~(&WD=80%T%E93U>M|s z0izuhDY5I&6tRyHXC&;fYy)iyCgC)sRL|gUs!s}#PhT&?zG_S{)iHz-%bOpWEu@Ip z?7Z3#qGMQ?vta5K+%p1l0yvvscQXT|U0Nnfa}>f%*GMp`8dj6`)dv8i{WbNj$bMJJ zk#mhHH$stGSy4cohxYpiIJjqu9tU+~U2a$^vShkE(;?qK6cW9kyP~RhJ#*^^5Z@viO;OPfD{>7(yQYv4G$_>?}J2=#soU58UvDU*~$}m z8<~!zpyhkX)gq-m21!_EK8!dJM9VT^Xd%j!xCc;t6DvG)lGl(zhf@>4vXmL*1L@wD z6(H0axmr(+!y^CY+o+jtm=bP9T!r2N*PdI9hOF#t=k9yt*t%ZMR6>KIQI zA+eJ*^%7XKx&J4mIh82PGPn=U@>IDJXA-Y|3W-?TFiAqQL^8MuEp=j7-c1wCSxV~M ze@GnBKvLw{q)@Vq_{Tw(cmv=Kp!uvaK+ffv_Siy5RBuayJM~GZRudTAdj-Y2@vnRX~--vL`AjOi(SZ5YfTYH zB;pbUpwr+zjdRx@rnY(hTfw_VECc|hiu%Rf4F)zj<`$kvZYKI=%$b&VmaOaJD*iWXoSerE23w_ zjkO9~rd>f17-v_Vd5knQ6==G{g=l1?vaU(030Aj5gfAkD7^0bwA10tI{Q^xH1Q~Hi z;jlNVs0B&A)RNOE(gZQ(l8IviJvkvb6{%$5yWMqJWboQ(zWOH2UPL{-X(m4g-Nh*< z_Hz4!AXl3(M6>5SB{8>!>JX?!hC~uRWMK$|XF^ZR$kGC9T^~TnOEF1f;3_WUNKFM~ z#fKpSMt%{H2WQhx)J1I=b)rYh-WN4Z_Dn*3SG>ezPk};uB?IyxczQ zu*2kx1phTz)^^6}r(=TRC)ncYo#jAIP~c!}H|MzLu^kg%rXGXsm^RyNQ>>u)8pbc` zW)Frd|MaJF%BiQ|tcCk_J;eXt^h2ZZ~*`x#73{04p=f5HivDB7$X^5sLZy5+gD&t7|D3#~iL zkAL!$^4izD4l4=RF{5OQDV`hhz(Ws~U3cCYt2EY<2?W-4C#+=n=q|g;L;?M#eNO0b z%dfxw@87W9*Qdnan{K+PeEj2|DC^oT6^;ROW&o{_;xpQSLVwIo)YaAN5A z<4(YN9rKl4cHOP4z4qF&a)*t~38{y%< zg1%5W&$`%c_ua}P4?kRffQb>VqPY3yTV;acR;--aA8qqNOuTWcsV1-@bWSgNtr$C* zH1>_%n@Q5^UvvB=rwwO<5vWK6El--2!Mawqc=~pE_`wIt1{-WpzKazUBH-{hMVf{jP0L_eB06pBVG|$mjJ_zMm#H_ZU37LMBk`gH?3BumTMm2S!Gqh=g zKt;t1&a`qk|As@VliUxdF^zkmgGdYs z&DPJEnP&o$B;m+1qaj~^qNU^l^H!g=lSU?Ja7h7aZW0~n$aZz?#wVFO55Q#9G>rX*iUv0K*L0i)OsCP%l zc&b{|cjmoyz!yl?Z&;zLyk-&0L(Wpblxrypc}!$(eLlH_*Ygm zU|IGDYwmR?85Fx7)e+LHEhMWF%vNaaL+5&GFu6o~&vUU`Wksh5Kw9o=F3l7^2sCMq ztdA>8L60gxGQmU*wbc+@_=t5i^`&AGtmsgdeQ_1tG>KW(5iI7sBT)f(E@R3_(lvxK zSU>1(UO3jUTCc9gZn?GD=aTRCX|C5b5b=gpX^p)toJUy?t(9EL`HIf=9pNcadK1bu z(M}f)0LHj+m6XAswpPt$YK6A-&uGUu&HW%(2g++gn{_t|ED-4n_D~`aNwu0(qb{l>iy5u$QDl*;AJZHzY8zMkNE0WWhblZ zb19GD<@2Afx~g1#%{ADz=&te%wq@e4`?>88Cp|WK*CyB^X&dS1(b#V4S)65X*=2eL z#m1QEi4_#^?D3=X!oX4e|`|+O`WTQevj1{+;-^0ANpYV z7`A$P^zp}Ka$_>KhT3z_J#m)CHsuuyFN}$nF>?6z7;LqqOVx2!#Uqbk0^tX8&c~*k zZc^U&{`X;9s8ujwGOrwtJ>QslQ6G4OHoyCxyURDe`StQ3Rv>&G6CZDW)0?GTxebhb zFoR29z$%d||NQ6j#m|2pD?K(TcmMm}*=Xfeqdw>NitgV zw9a+)qpZ<0!qt&>-F4TNZ+`Pz*y;*fJlO;VeHnyzIi^-SAzkq6^QEo1f`VJJadJ!$ ze*Hn24Nt*))VtS+lnkNS+HXBHZ<#PiW5Pp}y-$NI0F4};d8X;**{32`it(Y(AJyoe zYoenK+HLEXYrlR8a38FPY+yBdtDvLF~Sm{OO@W7BEfR0VVF2-0%iG1We~3}QAFREjO74%sbLN`o}2 z7LPL`=PN&d{_}f{a0y57F_EPukH~q7)HvhXI=380XZr&YAaZNz-0cB;~TkoOj4v+NW4*i$NM)v!>csI&ub~XK&x@7}Cf{ zKKVggLJN?4p@bYeN7LEcca`o3<)kl|!iQWp!McDL$c3C2CNB zytfDmgx~*zCJG{QUp*zm^Lb44eaOgeo@Iu?-#?V6vD@-grE2@L?Cl~F1TqLQ7m%+5 zRxz+YxBX8+_r=pp6UAZH5lWD|mXi$V_CrMIHFCJ8k~_h>RO_>a76w>rbiN zo(}cYw6^1)L<<=w_M^?6qp8HWKBP^|V2eLl8 zd)nHpC99ngfR45xiPA6+ntc6%kwN;r*D}xZo{FM@QTD*ljS%=K|9|28hqnW(=~eJ_ zVAX|~5xFf)2{hI5r+JF;GoB9>&*oGxh@8gq;E)kH?`YWUeg923WPQja>i-qrKfDHh zMR~bRv)4JVD7H8&=(ngYBCVh>mLwUL=+v2eE_;$iGQE=z7&6}QgxD}a207FiE}LYs zPg3*1H%Lz0Ku5nroNetAR>A`T47e1sV3KmZO`3cRF#|*ynOuL`mn%Z(a><}9-KTAl z1>Jto8tYncA3B&QA~VcB``A6;luh2ZGW>Ltq0kAPH2Fb$43f;oK2fTgF+w;y#NZIJ z2_u13U~n(chXL}TQb49a63ZK1+9KGsWmI>HvsfaV8g&5}xfqeoseSid8!nH{t`U?+ z@&W9L_%*a_@wCeLvIF+Y_m{CDDDqi}<&mhrEHhYVYM~!z6^SckjtTGIP-6lL5I)78 z$Jq%Jjy(=rJn;+)tf0W*wmkS*LmsC_noVL>_^ANDG^ux)IjR5Sk3WH5(;r)Y|GVGg zrS<;hz3s5cUHSF@N-M2Y+2+@2SV3_TCMbAd^8p7QpercI zLz~Diub_@~<_Zd#pvWsIq^;auRn0@GAARgG`Bncv{{D~hx4-=r=P5iQ+X?NBEwRSo zrS)^zn(9(aP#lddp5BF*&xap=L>+%}jnW6Z=5YO=eDcrA&(As&D>&X&KDzTRICEn8 z@;uIExcch9m#eP4s{9Kt-JigV>#0+x%AwDk9GMSW7j5~T_h5qJ^HSUU?!T|>1N+lw zOfQ#UJEQsLn=foW{nXRt3bfbX|Nb{@TXnA-r2Qf$GG=13V%m&pWf@FB@Vt?&w_+QQ z5}Q0A{NDTS#uirdl~JST(MJKeTwyT(0t;diVJ%FMY*DtrA6E`|IP!Z(!v1f{ZoBPL zw%&Hzvc%#zm>kXc?3Cxr3CABNt2;ir%SX%ZyYC_6LM{rn1=^bjVehrqK4lpkn0@xS z=c3-XmECsT9jh@`!L~?0D?FRSL+%;gB_>Y16)PN$FV|gvogDZ*cI?73T4YxvfM?>~ zIt2%!Pkmu(*$G4XW}4(NWdTzcsx=%*jbRzwS8#R6A+ymrmkmUVFm`Kwo7P0pDZ zg~=HP9sqvknP-%L-F^qoLRp}!GH!f%Jilrp;vY zv)nk61WN7E%OH8iQZLTVu<;#kkHF#e4^G#PYv>pRIhw4)%EgdICO!+Ha!U7l1`48< zkB=RhXPVQsGdmwb8krVL4rn0eU!;Y7WuxCigBiEP$w*thgJG;z#?|WrIW$B4WMGOz zKQbJ9vkM*l4tF(O`>bdEoLPT;+|*yph6Y#JPrX0D>7t{IOs>BpRWwlUm9txo6*k<5 zj-3$c(8GWLqBo%TKm7jTo|!!sy%`79UmHxltTBWFtW;M!XQ{~`iE9q|P$^0(CXexG ziD2h@R>!(wI#eyXJW?6HNaw_U+b>iVH-dp?dIK(_Ft~Je0f?|4Z6O$U5QXL%uXOd7 zWrjJp1{K)EqK$(b@)Cy{rRDxt!Az&yBeZ{GX>h@`q4Ll)m)}2P;5Ky+jk7V>)JEE0 z>Js-dtC^_FIxQch$Y@iE+Bt8PkVYoPpWJky2n68|>Hk6X4}F7c-%=Z!$v#p3xdy{~ z7CxX4z5l0LXXV+CoX;3B1Ln}{kAke^T+~17uXcy`hhW4`r~Y0q;h_=i7ek>blt(6; zj(p|TTriOdDl(EEcKro2k4tI_4Owk3Ur|v5!2}(d7=M`~07o%pfRj$D8D)(ShX`bR zL8ksqH{-+6wwt}I^T>>F@hCfp{VDyqn5}p+qE!2UKDoK`y7GTX_ ziVWAYZ!+3W(ehUUj41^;%oj;s>Bd(a%PaAEzDSlPI zx2&M(+qdhVZa_}PPPR`w;R6fSc+3t6Za-KvXO-LTyrX=4|NU@?^s_j0c#NE1%5kUH z0_q7NzRX>8;YG@~kNggPrM|Xc^7xZ8C{FxI*%+%qK83A{WQ(UTqM7HhYUKR$&quwv zg5q7+;_0jK*4ma3*paH((@lN#cvkuQ-~L`sI{BpX5PmJcBUT3NfU`!p#S;(7{^8Ns z;%U>3afZSX)&v9E0jdr??91g(fBYj}&hI6M2``L;z)!%IM*I~zS46D2=4<5O>t&Z+ zy39NO0_C4KUWW;X-(!N{-MWGTYVX4qRQq6yrx!7S!Y!U;1qIxnI`#Q-&}R?Ai~H-b z5@DgT&N}PLxgtwqH%_dVDqYAisbWaY(icv;VLB6h*{ zOS|s6tB7Y6oY31F=V&as*y81^bI!)O4-b})eE1`n^jQ^WOPq{Dv0q{1&5c(3|HK&% zC;aHRa`P<{%WC7t%efjHn0x`pRW+2L9}6rne|ZD8%;E|NmU&p%gV_G*a`<&U+K4_p zhZQhbvr^vqj<=UjU<;^KRv9m2iy9ups)DOA(et;z{X?*iVX}(Pir1{MM%m+Id&t&N zcVI&4;DbMp$)p+O_#dB8R$P8XiZkFq|>j%1jRRS`1!-|Y5nza2E})ejORlm!2cl2fi zSGQm|AoOu$kxaH`jX=0(>4!gmMGu(_4zLtSu3JXLJ(J_r>izv#?T>kioF*)1IM2>R@Eg+cwers8x+#h-gBUb072`ooI&=Zo-QnDsz1vN@qpA~ zjZF|q`{5l3(5ijpStn~xl`>gEkrkehl6@L7TGN@NM^HVX!|5QaeI@w&9F|N1x$2N$ z%~rIFimC(w3^69O=y7=Mc0DzK_{Y4D?>vCiA!~=wIE8kQ33`U_wxzgd^`W*%^cwg+^6{ z62L%4+(`EyK{!C+y%_93xDTWuA4Qb?8h^RpKf*@O0kcm)3IpwfFV)QVG*>M0m|&9Y z!#$@$2dMJ4)S-zE6&d{f!+X=yq+S#o-TFdXuy3(TY050%Z?Ju@jXnhh2e|i5y+2V0 zlteQ`1`G|^X3h(j_8}rt%!4R4kSdlj0}yaUd=*A&7qZ0%GWy>$Hn0LKXglFBua)fYpq%7RjpK5b*m1w?Dj$PEK?NP7lBF>{@ed9Z41F0&c@dV#v}uwl4(Zvab6^U8pndRhBCr3zw904gW;QCMHSQs3 z2Q1|xk8+GZ$tM7^0+iIppAiMt9}(h3^1)S@cT&mxnW(WZ=0vf=Da_$5O58qKuGLf0 zxiTa`=ASxN&9)4oE-lD?;kpZ;zH}RSMGnkSFq7-bb6L4 z+B-&@4_@hmC}^_->;0=zp-uVt!j~tMvwbz^M#ZEG3kh;i9M&Y0u&TWm_Yfecf65tT zKkDhilBW8z%#iODO0n1kk>qPln4ner$g@sXDU~u=fWtQ-CHqulw5GAeKpVNXn4svW zNmfFuYB0%+cV!j89K3f=9neknW2X3Qp4X0~EKAl1&KDbC4f-XctFKrAstz(4s4sw( z=)YO2EaciA5OEjI^*4qjD(_nAhZeExPhDqMN08H9@tmbMsHnV~4IdN-z6&HLs&8a6 zm>#$9X%bc$#fnPrJCV02*>Zn{ljw?6LiK}G&~${W&m^S_rD9DM_I=q|Y6VJ3=;DoN zRdt6dO8KC(Prd%BjvXX?RIF=Kt2J)>C!r+kWlg;l&D|xS+#AngpZ4O>7viM${LS2` zR~Vv*wEM(=-CoW;8xs_(jw?Iwv=a?Wo3-^J7P3D6MOY)(*thQ>A{d~zs{<@C=SsXI zgpU69@j-EueI5t?@Ia*3scE|(zq zr=N*4D1MABo^S@m0XTPHi6wac0wHx*%;)jT{_}r{gShcRe$&l1D_{M}SL9j{AdNs- zul~vuThBfJEPma8Dh``IwY=|r?=PSI?B~iCqIYO*@t+oDC zGD=>;fAW)`Dvx3n#*s&T4~KETMGmy)%lt>Mt5en1>_Qb4wFdS-~U9c z43RCKzHl)77*+1O|6WW`?28o?(`*F=4#`GSU3lTe*gEL@W$C4t#(5oIMq9oblM9^0 z8&&vn{V1G4aqG>u$^^w$+hBqMRlW*aJaI+I;#mD~&_M@b{Ne?@G&?`&=?4I98P>sa z)`xt_&O^Gnnqy0>$Qb*Ih2;a}bZjqmELP|I@|V9XJMXe{+3RC_(>)D*1BXwU|Geeq z^07TXR#tlDE6cg(ohy?XTW+~!S#+^Q%XwH)!NbW}DYvbu#<$~IJMP%yWwK$?+=t^ zmsysQ2*B|J6za?Ei*A9>XP{m)89H zNB*l{{d)P`Z!a$&|HQ}3mN>Zlfd?Lt^FYR6GH!2dKedh=Snd{mP^ zT$VC$L~Q#Q5_cSe$~~f!icr~6>jwcR+`vlwBrTZ_7tROpE^>LP?w7; zo)iEo(ePb@mAm8+q&bYg7*7BSwqu;;5XfTO+;tk=O+nBq0y=DX)8;x$z8QbKN!Uvy zS64N`y4!c6fh0lHHnf6VkEcmXPoF#K>Y0Wdvt9cM;If{&4wtty>&SV5=NZ7(uyuL0 z?`qWyO9)>7rcFU-mxe&SKAadmPTvWx1QS;I?&q~o3AXE!)a{#Ft^G>$=XvPqwq2u_ znrr=qWs%Ce7P}91inadEpf&~_(UR@jcc?N=1sbQJ3lk*wO|?oRrfij|!YV^~dzNzyTYi2p#?HmM_+UQrE(9d#P3&5ja1lf$OpP{ zo$P6{s`iQVacm$TaAANMqKQ;ys0$1zO-CETfu5Qc7OAs&o)4|R8p-~;I!~h`nmz>A z$5%8#Bl4g-y$g9uUpHDq~W^>TgCeUVUJ!Oy_wTd~bJwF=Ej6=snM2zxh09W6ezTuUA!u zIO#>+dr8>pp1TQ>I|Nc=k7@u$U%80LdkMs@zsmPm^}6-Y;Tfh3(f>rUC9HGFOYQGs zB!hv!biZnuf^dwd0r5C$14WC5O%btJyH-f z6U8JZmr=v$V&OJO{y~wVaZM#X%r7_2P!zH^~R<%1eN?n*;$f$!Ej76srG-j{VR$&hGuM5w(vT|N(;w@ zRO!fx-su0{kPmj#!$Yz!xZr{^;g|_!<4rdy-}u_s*h%G1oHfA%w)qSD zPk!=E2RUpK(UH;%yC1gUdGFS8|GNt3Z5)Fwo|eFw9S38AV%61Fa}i0k{;3sG<&HY)sB*!D7nQyD z!kG@xx5%Q4NaquB+Nr1FT!NpKcf;@f(WbE?0=ySqe4$*4l^b8df!ph@w;m==et?(T zQ_KE1tK!yMZ^gvMd1d+KSHMaJY&Qiw+v87v{8PCX6EkmF=Pj6kSxP2VuEMzvdw%R= zIP+x(K}owoK*oLNIt3ucistqHKV#w~!gOv!MJLq5>`n{>z5lI!^2iMj=Z@RI33#${> zf9qSzomesSy(5pr^*;{|mEISV9qWb-7xUEX@8#~rrTmIwjsZXLlF6cIKol#AZK3tQ z784ZT;1*9z5_drt6docI?&!TOF78_Yim=xM*)g{nE}<>NroJekY^ZGH2Ix1w z!#vIQNux=-eH*LW{zE?hxy=_zl-&TvZrO4g#nQ=F9=j(8uE{qG+g0fG^N>KRX!UHJ(s zkPeRU-u=f0NDHkc<``mZvQXJ9b&FCawRitdSka4y^%r#=l?j=Ac2KTFgG9lC>gV`( zA!X#!?7x2~I?IR_Cs^u5x6M>&h`M7pW1IUAWOQ6)glf!JhwH6_sBa_N51jQ<)}HNm zn$&9`4ya6(jO)QjQC4@WOce>5j0>-6r~e7^YeNKjKD)sd*fArbLL0!y_ljKq0Z?@T zKJ@Nxslt?Fh+$@mBpIMI@YIXK@bRyD8LOJ4OtQ5cedi>kGVZ)wRhcd&==f*8`?6a3 zF7By&Qk5u&F@yA-fhDoLay8DR+WrID*o25@sN({Z&PA1_UB9%gie};JI(TlsZ^H#O zb)7*KLZxc1EfYJZ-#>_LrE6_7FEE`<6q6Pi6J#(XbroCdPe`VZR1#bZ?#NP`X#g&| z%4E`|6YU>(k#y_TNX<0>EA;WIZ@}}%{Rv-2dj7cjJi1!@?w6rDv>9h zfp$^|c;@4>$NEM!3%c14D8*xadP5}xf7wPVH+7{F1YtX-O|{)YOKW8ufl+5>vaL`k zsx?xHh`ViB6$0+M{PE5U2B9lM672g!x7L};7+eqYVE{S10EJ}q$|l%?5)}`U6iuh& zVc&oC>{6P>W4zI&1s6^d+Q^B(Y_30K>RMODQ4O8KhyGThFu)sJ|J)X;2(g%IwKz_x z07&~&Cn(+*9lxsv<`@hgd1fjqN&(M2%||RY16Xdyb}`&bf7Ml2EjwZBAFpoUoglVz ze<(u89t|M5pP^4OULR}Qxc;Ch>4-5V`pMuHPgBaVc(Ho+FMd&Y1_k%RU+vZQa@u4= zg=D<{Sv3%%{$^WfP%JJXat+wZVL`S1rnT$aWbOF7J%H4_I){}q!7 zC&@NRJMOSk+4-Y8%LK)xzqt&P9!F!MXJcDIK}l@@Zmo2~4cC`be|8#tzg`ZQKKzKo z%i3>VTh5NS{PN47^JMweK34<0;f-&U`tv0|&q4U{kAIAp;gic|TWp3y!VeXf9>Bx} zw`rP&l?trS!d$t4@Y5gvsGN4@nR1Bt?z`_+mRu66OwgE={}J^3=91q?n{k3-+ikWJ znX9lGfGa4Lz?lkML9yy;t3ssPe}rmJ#3;c1_vjyDg5rXU%5Iog*%n(yExPEUR_#$} zt3TsK{z*81dLB%?>>&p*yWR(}g5W1u74lcCUf6B7T?^ZiXKkE;RWQflYzeNc*a_#j zyaL-_F+Ba`)8+859EKPB|CULBx4(Tu^!FL%7iayvym{@l%O2R8ir1aq|3_gpzysx? z3oa~AKRX$dg*%o-7g?m7g|k+8#>rua9ah#@b9MC2Jm%{x9Egrp1mE~pdF4tgluuxS z;`-}v#39?i!=%erm>AjuD_Srnt^T)Q0_8+ZW?v;hQq?AdVxt>7O^1&mVjcw!d1meEn_1;u{ zRX|)zuq^}+9^Bo6I|TO-Ah^3b1a~I5y9IZ5hv4q+7J|FG%*>mdd+*2l?b*F4+f8>@ ztyQN#6pG9J_FKzSF?7lJH8=NS7693pgb_xgcxJ`=J{Q~#fxA0lWGqAwfdqc>MX+XR zSf*&oVV1J@s|#CZxo2c$7KE8OR#IYW`mDz?6#~l*vHN*t)$_Vy1gNA*ZN%F__%TPd z;cdijhk_G}Lyv#SZo5Z<2L$0-;af%CUuJIbX11keSlDJnH~1Z;Tx2uo5<1Z;7+z51 zVQt&&6uk7mwPbF5aBZm!ORj$G0zox9{)7(jEeKK1S1o6BMN>tOILf^sro~{EWFi=+ zRvHl=`b?_Dn_zxF5UTaVwNAgeNatEPZ*K;gCDys3M>(GbFigcganN}aZ02_7l=GT; z`v6xqL50fSdR}f?wRtadsNFq*N7gG*ahWGQdS9_V+HPO66ndzZ_Qz^8E>^i}gNo6U z0s`f3*bHt6sp*`bK}r`k3+Bk;R7Lc3ReKbt+yI zj1hw}Dt!C?8q{5t7X|SYpWspyS&L!lgZ51**p{yu*1bB7nAE zZ8lFL^$MW~wmnwJg>>5aU$y--a82rS1B#%TKV!1T#1H?PB_0)@&J?ATT3*+f2={rQ zLj^!e>4Q7J*ubY#hv6i9PO69u^9*;A5BLX0__5OY(Vo$+FOQn`nT4%|1zsC+tikKw zgXoe1eD_Hh8BW%M$P@F8p~jL2^_8xANQ@S@&%Hx3Ed>?S+RtTYt81r-jgT=Din4_vLP<8Nv@KSI7@gV z`+Y4^lx{5W_HgwZb_Z_V5^7q+MH96j(=SFPP1$4+Lh_RK#=6uNT_%Z*IMMhHNtVw& z0#+;@8J=WpknzMaDlAi!EC&6cg4t5pK!b^ApHB3Q{6$A!KXqDgmpa|@S2j4Qnva@A zP^^ehIqUxXE+SY|b-MF>_eQ49162-)qy7C`xXrH=YyY?C-HbaL%u1~aO^hCZ-8x|x z^XM^!$H}KkWP~z^z)WDCER5Qqa^^>WDc|@Ok8A^4AxU~(IO`;|$#7NVB^w>$Q>lt> z!L0#oZ;c>Hteu`p7gGwQ%OqdqnFCn2l90qEu}X{fZ;O@bH{ zEaMe4sX#oklyz^(!g19*9rTY4lVv*_7NO$DC@eiS(-tbdp3l zoY?7!ORZbM&FFS)9|xUwL(x>@cO}BTsCg)BONKb=ZbuQ{Qe+57YIPWwb}~d^RBH+R zj+Hp5+~60>Pc4^Oyj~*#ZYsl=kuyDs&Tqz2eFL+y`WWlIpsdfrF{ zo%uio_OK`w6jEI*mX;)|M;G?}cHa^mDaQiKBq7}{90wqU^*R5c%-{?nIm-MFts2W| zw%`fe1>C4rv~UZ{u=^NftiR#9F3y?b%+-n5G7jd^<GiZ#eud2~#p!8lHg^ zNcE?DI_q)kYM7|$Fkb5WYz)5V{Enqhw1W}}Bm5v+MXJ@HT`VpFsh5750PnDmKrlXA zMZO1Tf?d zoAJw*jZ5t!|9b@8IOmL@ zdaAyIb|(~=`h6RU@gr_BsOKo>jrVq^d&@iRVGG5!}mKB#d+=DKn-I!`ZFiO1mc!-89}VF9-pG?egH@SfSRE6XGdX z<$2Q`nigiGx7Y5?$OKt!X9H97V&d%taD$?8=6b;1gE6$E~Bi1BRw0 z_f?M5W=BsLwALIA(DmDPs_nHLIHq3QTr-C>hYo^~rLA`r_ws)V=uBu1;Nfn7tN zDGp{hw|{>$o`V!(CtdkE^)Hk zINq#5l<@%)I`h%aJXXEmvH-7_<(g}f$Ig3f$hgqcAKmGJRHdga0N~}m6rDcPvEEDD zGrzvhP4`8Z_Cv{3R%k>Ya6sdoN-}Q%3tP0=u1`!vF>nDROgWanGUAx%g&j$um@RFzb7nT%akj#_Sz!Rx zVnpmYEz8iYnCg0|rJFcX0&%Ch)e4>ASFb3`5TMPxhQ}>C&ui-~7r+MKs;}0g@>+!9Rc94soac@>^w);Z(*_0l~{xCvl_}UG4}Sd z_6%GZEd1=*@qfTEPz$&sTonvWF*k=LvaZ)TJ|j zP^wKM>imOf2Cu%olvE;)Jo6$xM!9NJeWRND>J$1+)U4POSvbg}8-P*+xCCoIr?ZOy z8qkO{K}Zvwr9m2QJz#Gj6Aeb(!Y0(c*@z%rgVuf)4VIWMD|#^Hru<&A^gS|`&3oZ@ho{D2_djTeSvK8pY5 z1;O&~ztW+*l?Sn%%Wa%V!ASl&hvAu3%&wU)$-CEEYjrm(zOcNU#sVyzM75N z7q=vn61NSvT=!5b*wIA?u>GZNQNE!d9$^BeL@T@)YwUZihs%ivS8K7#l$&SdGjS z-ZmWqVkFMzpd=3r_XavrjgEXK|Hpf2qFo66=x5asNEHLn7ZZcC#`v0A(M7W;9|JdI zVn!_!86GZSK&~EC_!1IsZ%%7V_dH-9-g!ol%tNV=FC`Ea)1M7f`D&S>_El&Z zmuhKyXcM6?k^YOCvRV2K;~bo7g%wp^F9HS5*nuLg1paKnh<=N(CZ7^{FBlD93_&}% z9SJYNS(Us2HJ$kj#z{y(oWQ6|bGbZJ^?TkYW0WNiC2zbUlMj_r9iofzgZfZT<2(`m z^P9^BY+7Zd21f3Uc{Za9ZWKN+Nf4wle?O(}dMrUGQzf>%(k89XNgP=mC%yZoc6otj z$^?q@-~~o^~QZ(S#Dlf(o0-r%5f$PuZW4A`>R=e1niiNFi%iJAq(p zK+k9za}GAkZnTye20wN%GhDH4l{jiD3Wa7)9EAazujlrU1%(4!8R7tSIOOD7Kj4}9 z`}2)<`F`W{+hv!bhL9llOZja(_^!Qyl=ky`{p47bs=xb3gX=Tn5kb{_Tfj$JbC&k! zUFY|>@4Zp`G4;iZ`S>e8;K8Ne(<6hX9f#Z%JPG`_mLS#Fxy*&^bG^`JW_bm;AXi20(-c9mK?~jq&dm+#A20wn0=XmtJKp@Oj z;fAlZsyzLy=bFPs;~#pT5uVbu_1GNY3;D+g)6c2aG}6Ra z1jHPpVTGHWGe8+30T8d>eX^@>qCy+6jiMQLc?%^YLW@g1*ZclEi!qBKw7FU}e1MIB+X0p8=tBq2c31f1XL} zF)BDDu5$LqfFS32w|$+3M*GI*K(CX5{9_`w9s-u{)FBly>@PwX&-5^2@wK7h(+M%10Gp z)nZe`hR3U2fvv*lH9wy9b`xpHS3!1QH9!ti}3^sMJ_R|nz&+zA*h3^6+ zrm>KGuG=(^+T8lpIHO4mJMeGU?ZNVPE?|*rd*r0CdCV?mp(UVb9P}>QS5c#<{Kq7O z;{Np=v$1F>Xf-)5zRgdZUS$3*$W-b_Mw!IGtD&#U-|NK-IiPYpc0#%hET;y(i3BBy z;^?<>x~gB&={Tdt7zrhLuEQWE)RxL5HRJ>u;kT>Rikh*6soCZ$6XOjep(+k)i^17S z5i3~(He7DVDihg;?q411Jy#w{CD#Q#ni{WZHH{ci<^YsEgOB7KJUEb^`#XOKt+d(8 z=d0vEHIJ=hZCTF@?+=C&svt?%7|HuoYw&Ga7T7_i2)q+HkV2*k#XPU%4pIJEdy@79 z9EbtQY>P}%lm~p_Tmbzs)F@}{k$gp473hs${wb<@|i=vPJ zh3UK8rfLQ(kNDnOtKy&gu5`DGbm2vi_Ew>4(8)JE<86y%hzrDznjn5ga%YEdb>}hJ z-*Q0i38`YBaR(7O{oEU7n4sq#M%83F5D*ezcc?#AkgnZld8qfRikgj0xbb@RgmskhqOGAAfOX>au(-9H`;Y(LSeBq3Jxdpt zk(>Vl&WbrP{%VE*Ihavdlxec@?|}m}n_=*_&cYx#DkwJid%&@1XiKMv_#o$^fasqo zZad}A$`_IFz2YKeu*aiZt$G~z(shboH!Xn$2nv_|jR~cv4px#G;&=>~AK5pl{8x`= zx;-T@m6hH(W1^vDW`DwDSy1B!0m)W9n$`O1$#WSj?%v5$DOSKD5C$4SfH(YNPODU$ z!iV)-zZr6UF&#nqe$^fHNIg2Q_EG`;DJ( zWf656uC!%gpgLi(?oGap8Dn5#!%r~M)_wS(pu#Mfx}Ou5>_Ks8OP(>;;O9qOO20I~@1K31n22YKU%LiTLc1HbnQ1*%Ranpx+|l8cs5-H_Q&qWi=HwK$lHpk_BW zG^Z9R{emN%n~5ZNDE!MkU|Zy|LxPnVYGecE(TF#}>axEt(!=G%BVd^|EjGY7`#m2b zJHi%C+#k!+poKG$dIG#uJuWoMkd%nm+8qH{W|ZIZVROHVR~}>2wn?Lw%v0f38Cd@g z6mRuh$J~IO>aYFg43L(R`t^~Z69jxzNwQe|lrpFF6|zZ|ZN?}2`FaLD-|(r5wigx- zokREykB=JChU8%7<+ed<=px`lj1|#av~gClsQG3gBt=)()|E9%hz#Cbw+zY!o z&HK22H@m5*x(EU13=(X9Uiq_11_FwUZ6So0t2HUVo{hSiA-hC26 z*C1U41;LNr_3oAC`hP=ut{LNp_j>Vdh>wC&Ty!wu!AwoVh9&zFs@g50> z#}s8cQ8sk_!5>N9mx;^W*w<)BmGJ1(UouVoA5mG#@9>BccB~+tSm4a>DF~KlSlmL{ zOKd7Cn-Bg1%%EDDEK@(e%l;mj&AN_f;AvddqJ{RL_3`#DMOINhQ62xC`!cdtPuag2 zOx#lZ8@&>dH2Ypl+o~^Q9D)E-$2Bn9xjo+>Y2kyIdNx1K)5Zyb=xI$4nHizzbnW^$t0P{l@rq|LUbtV6sp3qC zic2R{`0BFk2V{xMv5LIZC;RvN&7NQIHQePh?qN0R>LUHWkq`DxQ=m#{F`MZ`u?FcWY;E2HcmvDR_KM=AYQR=ec*mJS!cwJ%}Cw&?$|B$HETg9ev6yV zCq_-r<4191R>Wk|Wgz9dFRXoklpUl%2-=at){&iA*&aYbHP08Zw+6lkc`&!O_QMVZ z;d43;;3{qpjiZI`JTj)`rLnED@{uvpe;2{Q5>P!riL|fVO8bK&l_L3t^4Ed7*hREg zHF@q;Pk(wg@Z;&S;RRYZm~W;i!6-WNML36a`7I2);@1(>_)||s@geak_dG{4=VG_I z&$nXL*E97(9{B+_X#KO9z?RAtWz~5hGn#uvns<}f2B-TKndEBW-z8rMgzY)oa4UPR zr@mhk%eeM{)q^JXKGeTesVUkOS>^d1sPYE2d{ljkQ9C5)1Xmt?dukXnQ$}Ee>kkWR1$~8RPd0DXVZhcM`+Sijc@rTO2P|;A9xf9=rWM#vfH50F7EMbWodJ ztPS5b{7ObR11cTTJ}kX@KEUR^xQvSEJ2jELz6Z_N<-yavB^8YrfyYcxnE85*cjsTV z3Nioi2n{s8ut*AZ}P9zO(2Hz@kzs3H@(FllY2Lr5WI8gd1*WBg7lH5z$~bk-KN zP41a^d)v9J_Fwn7KWc5V76K(mIMxE!(?E6IQ+nU5*UI!u%HWjGTlN%nH){={b@sVF z-$BJGCv)=dbOG=vH5F_&+b#>>$AMS4^x0i12~~5iB5s}9A1?@meBQ7Z{yg&dA?4!T zw;z9QggYd#+19AhmL52kiH%fF$A-~4%XN)a0;FhEy?v9gX>RGbo&A17sE{h5(5$Gk zB<=-ok5e*DrG9~ovvv$~@~FXq;HW zUaEEQ%?{64Gi<`+q>-_MSXDy_{uAeN7YR) zKC;Jd8=CvVwn{J!Zydu&!XlvWLN^n+1o~4^Fll;KJ0c4!0~PGp#IIqRU0w%-&(ymV z`+ryD#id(Zu&A@e>|`b?MJNg};=c z)MvT;VXeml@!4rMCB=3Ab`J434=|`7AfAI8`gU$Y>{!*gw7n6k){S`J$Ai$#l*48$ z&i`bWB^FTa^0)?JW-y2`Y@BiWfsBDcMax$O3^G7%J4@MZ-yxP^vsb#5SXHix$2=K8 z<4C=lZ!|cxoXiSv!7Q2h7B}~ck03~7n`NecG5;`rkB%BW@c=b_;whk9R7y)5lcflY z&3A5!xxi|s8LJO3^nF732hxG7-P2$S%zH@BU*pVEFAIbl(`#`+w8Don($51hC@_Kc ziJi1QUyn6JjZstJLY2z&DE5>`VM?@H=d_mCYpdlkgZsTN?mziv3ETgE9<{G&yCrA9 zQ>FdUQYfYY#Fa)Q3R@pr(Rh-ji>_Vp)?L3yc;E-n{b+PJ^y>!X@Y!EYweX=rr| zY-p}HQ&v@-d91=G34#Tjh@GGL0*1#3YC?^-SHF4h&>~egex0%};(mIKYFZ;(1eKr0 zP(LUch4Y422h_Ma!H}N zYWO=`ge8uKpXeppB6^3U;xkmrf18FR*AA5n=OQE);HN$Wt=7WzafybolZw~;gywfd zP6+z|sJS26MtlM+2+aCpP*EN0IgjEnNx{fx5!Lh&fuZ%k~_W<8kK?ke>TW93xEF<%T| zxf~w5><5`q7CmQ@f>o9$?->RzI^f6$!zomOdo*(x%@U&}Y}lx76kbYxi0n%&H&7DP zW2-2>E0a+9XhmY-;!xt-h%;nZb~$7uTRL`3_ZXY1{Qx>wV;#Z*MWG1 zqOO?T=QFjQ`Ybn&nY?GaTs0|pVUB$_anl}Q2T8M^dbCSnwk!U&0}4%QjQm)UnT99< zLX!w9s(#9Et)_-A|Juz*lZlX8D*cVOLlS*S5soH3a^dfjjw{f)t#8FN~j zSo$N1PF^X?Z<^oSG;5=7#{Vr2NkK4CCI5_RP~vBkM+g+vKFj6OWK93sPHeQTIXda^ zUY-FVu+7D;1Xg{yRj2-%Fek*e&oelD_lOKaOs{Z%d<Z zM2ft`RfI)X!Y#r)CBx@km1XSN2KT%CEDh2l=<^@ZtJPYn@cVGS*Jd<;V2)UgH?`cY zQ{GY$`t59`ry#zvHOVUr=TQT^xk?2p_HzT@Njd74j96 zXo%1@@oK_S)+Cr-8ncf2=SN&v?72a!b7|9j9jnje^j%NxJFUxqEH4^oCIzqj95Dfa z{od@xnLoC*U5e>mhhB%-1a@KU1ENIHSVx8~?cqP#z%zpG)0BldW5v&l;g+dH>`YVL z&o6Z4dzono*N$!HYe;o4g{v02lO^m^;I|R_FoHc1lr0l(xdrBzr#K^R`FC82J-i$_ zLC_SZ?89A}Squ2!ClU`0&#@;c6E#P_(DD|Ti%-m-W(;}*&cH~oV!W;{>r6hQ3vn!Q zTmh&RZ;sx868vR{P)|TIGt|G6^$Ir;#_3nWeBfu%UHpY>(vg;+=ermM!`phMv8Om? zG?zgSyF07I5%tW%877NEmb{KN9yOOiI<_BUnkJ3R{-C%03`+w5=SuhowA11FU0(fh zA;vJ`;~rr!9^SS!;ab%d8LDzn3^qf=7f=dN#3-EI5g;%z5T`MPD2^mzRmM3p<$k`NV<0Z-LBe929ydDwP>an+?y zpFi*#rt|F0SZ4q}YfuB96@q`B|kP6TD+X@vzd@)8n;>79ooxprK8+tl0x^JN^c z>85bxo@BY-Q@pSZgAFv*YSk*|bvYf7r%9PZMGC}Y%X2up-CHIJ(r2~+$obDdO?*Qo z_A!jk6(jc)FI1dS@)v-@VR?AoqW&xfiGGwhjy*TSkj8X!Pq%YClRxQR6$_w|xL}$J zyc7z@M^n-@rKbR@qk&LL{ZFa74g*vZLi=wp{s!c)fSg;7y? zpPsQYktz&L$@6g6l#3Mf`)<2_ z=q3#|d&p`yW7y#bx8*P$vk2gUpE69x;|77}_pYI0HW#+~;U@T6`=_bij4BBpk;nl_ ze&y_tCS|)rTl;eEMoB~FNZiy%-h-Ms2|{N7DF!>M%q!{urO5e}A{B!62}GDd++4IK z^{}DXp7<|Vvfb|urEL|mWW^>jH!wfdjJUR?<0<#$cZ!HBL;^lf=@TxdAgOPIQRa?V z-Aog+~(Ih{o> z=PDBfOdEUaByUbuMqg7gTVlkO7W6XoVK@kSI25aTxZ+K|X2}u{dJK=-*5R6-!~ynd zq+maO)(3|u`A!2SlLb3-&H+J91h|%N66RsPVja&ahtGTd_@8$hxoMq3gw9^NcxgQ1Ab?y{-13SOR!ymZMRJ%#sPYWbO*d=FwX1e(8nD!v5`DCI9;;7_qx zXU~B%U>H+mpF(X%aw`gRUrbGrtz{wXaJ5L2oZT`r{A~UQR)N@@uZB}am?qPAWO*1= z{jdUCIDq-jkGQ3G3Nf-z>xyc3k%l{Sr6*;~^f#a7aB_2v@zVss2|-xs8IjA;^OC+^ zbru28yOYQ)cccJITiy8SO|UZ>DZpTFdsM%IVwcn3Op_SD08F5E(U)Yee)-BOymU+{ z*`u-O+9#eeKKTrlcP9sa;ViU`-WbXqRrEps!Y&M29_#>V>Q{VOwo(g#JOpALc&7q@ zO}7!x{aUnUr}O~dDh~J!01f#q*rh$W>dA<28PX}rVDl_PFr+`=FfZnkI>(~-tQt#o z&OKpE>EN zxPj+7{6<=}^*P!`=bQwa&Ntt#li@P2S1w4v4(jOrl_E_esf|(fCdl_MuiRBI?;a4n zQ?xIQ_9_@pr01}A$#2)cwym z8`61iVE6-+=>n|48GW`PWQJCGXASdYNA~KybbH?A?4HBejGzit1m&4}GflBgK%slk zWCg+}qB|qOZr#CDL?HQzjNS1#hFJEutGW5JgGpOODU4+47b^ic zA|}CbPn?Hn{}6r8yBQ+h(W-R}yF9CLU8T=AwHwNz6(2Fab0c-kgPR|%vcB{zel0lm zR=1Ze{~f5iK+~)q)p%vuK&z~!8q!VLwy5zxHhAcbre$VYl_6iBWlg(z3Xh!V3OW}9 zA_#zVEbDGOKNbp_>F?fyH%gs9@Dwr+J9wC#khc8@X{~5?BZPbUN9P3`{bM@AWe> zLpmWb!wQ|s0yPCxl>zNM5hg1OPw4y^!hi;~p!13t3dc?I##2!>skjECmd+2nDsHh! zcL&`jkDHa6g%{Z636cRm>h}2`g${@1tY%%c375oqENv3q#JSXuD>2?gAUFAS2&J29 z2nDK5p^q=Ua7kP^jCe~NG8gdaGMyaw!rY1U6$a|MDi!c;abhqs%W*GA7O#T)jlpl;Qfr(Z^9vhFpE!tIR_ z?6HU;3;w_#hDFirI4X7K$~Uy+F81S;(n(-LV2eSDbC=Q&#e&iQi-e?4{YLT6oH7fU z-g#e_sWtX{TS?o3RsSy<^fJdXB#kA^#UXK{^S5jZVt%%fo3xBYeCYbgU79tH3bYoc zwt9POqFjn-hR}>8iWA{?<5) z5RWB3E>cCMkHfde4Bb@7)-th~Kj$r+LAyxhqIc|~ivB|!LnvX>nQyREpiP3pB3e4? z`9&5%tO{wuAyFdi5|&eWL3**@yU#}J_-~wlM9eVtZNsR=n)9%lB`jb@Qu9liCFZrUvA1NF zIEFny;VNs34!JtQpgTmgQ0wVbD^o|G&RT@(j}(JAMM}NP)4zl6xi-c6WCVevtXA6) zuAcGywjh(XaYS0qC`zx1J-?HE))dpY0|9!6MuGZgw8&A(z*7sS$R@Y1P)Rg06Q>jq z!*D=nI@}Iqj2#Mrrpk|Qv`bTXb5?9a0?Qnxsm37stZ~HHhsRTw*?_w?$XhGqwR)~;dCC#dTCo$U)jxs~6ua{fmVnN;Ll`Yn(f#_#) z5RG9P`A@^dJnzAEY_uJ|A8=1TUzGoM@aMXRnC925f+)cFZdtu~ne_#}RJq#GPnb0d z2_sux*;sX~P>gl0|9#tV?;4{^xcvo0F47@4m{z97e>8S`z5>*cv5LB9pkwzP+2mJ> ziOH{h4e+eM(rT!M{1K4bzut1BwDToGrP~(_M>Zmh2V6VyhQ%f)159x z&ED%E9-56}7}rKCSwbQh;v#{6P9x%L8I`c?ctHDDhoBL7IyPE^o%x!iPv2Np`X?g> zF$#9+l5c%}lEViq=jTk{A~BDIE2g%6i1YzTpdi~FEuW!A(hFTi z`?1qf-c&apq6hApOl%1^Fd)v!KD5dZ`6+Q1eDuH0s;ZtV()P2l zHmkDrner6=`N@nw4$RgaW=%kE7lj_@f8-PiMZa`i^{5fUETQl(D%}d@5{c4Qz23ar1QJIZ5qP1}|nYkA6u?Tx(`e`{rZgnYBeS zum#(!6eSXx#*_Loq`8?I=7Zj9e6?$-Ha@l}hnwql%thV!&E4JZEWopq7qac5m)wlm z*eA94TK|SFriNONV~p@|O|_1EHk^o=hoj8hZ`+1G%+zb0Z=ZO-FT;Fn1h7iSp2%#5 zwWyMfxmZ8R>MuxF1Ki)cbgM+H5K6Furr}?4%`tqh2cQa;O%GG$d>6hcYS@u(6RqdH z%EOQDn(C-|oA-Jd*1S4205G5Ex*`nScp?l@UY*B3Ec^Tb%pER%vQhv_YTPXW3zGExik#{=awdS%i5g zWrcbu5l_kp?tBk!#>qL4);Y-)4IWFI6P*AvT!w_gMokL6!=SKVk`Qmzae! z(v!!d1>al9cqgH%JB3ybdG}ofTX#&wn+VB;MZBlWS!cU=SE6D!CO#Qf`T17Aoq#GW zPvLi?q$_m7)`P8Rl3lB`SMf&I*Sy&=n2jDeU0U;(q(>k>L_>ZUzG*fKcFoeM&==SY zU4jh$f(#zUvPy0P&M4$}DaYlkI6U5M;KqTs-MgU8NKV-jn^*Q!p46cc zx(X8H5G6{<~sB#a$Fsz!hq!;HAV_v$C zpUf~nc_ULZ8mlsM4Qa}c&tG0-mGJ#tn&`t7t7s&RVkU>vu60o)Th-0I@&z1w`k$FT zh5yz|H3X;o_(qF8%B;qV;2m(SyyWhMoa=VAv=;Bc*uCC-zO|eJJY-RR3L95(kl7Rt zegDrMu#O?qy*Bk40i9Xhfbgvq<)-HU`$FV>fg1{#~s)6;obKNR>!)-}qL%aMcutN)5bJ zEHKk-R?u3Fxgi<~#o1a8ky((VEin`)OKK^EFT)1%q=MdLT(MFQx9YTQFM|}Ge_>Py z{}bW~-2Dui^G~`N?Y7?&*)&fs-%DoIy-%nAdCYbgw8GgI=DKTpW62`rl1%m_@x1CW z=Y%HCEa&T1yA3l~rryPPsfDnH@QxbN6VS2M?;5_V0U^vWggw_$&ix?{5xTw*Ng? zKV%6`Hx~Ml@01H6GQiRNJAq^RZkzL@?t1|!d#DoX2-*$Cnvl-4Q+fP@j7QH@oS`Y= ztN-8JY4i4TaO+>1H5p>3;4QKGiSLlR3}}SpDW2_4con^+s0dd4(>F5^H;5g~d?D-) zD7*66VJ`l&QKT+ZH;qF!`{;Ao1hB}I{#yCP$0+(Jas~ncm{JE-_~iVIS?sU8gq9=i zd77yvz=XO1c0AH{FlmEL#lj2XR$D`V5iHY?fUOCUdF&H4ns0BtUt+*r2WonGe8G71 zKM;mm+!5eNZ}y|;mRv?eLLe)EhaRNdYhxVLhGqM-^&_BPC3h5%N9Y^4Ec&55N1(gf zLmVpZglEMXe4Nu^RE+9mQ)w`LV_T!Q>j&E$`ankRUp%PtxTlV{D;yoycB3hs5EQ#H zPK83t)bo%#+=E;kdfSG+%NQJ@;k(RK-(UTJaMHuRqQLa=ARz|7kwysgaH(%A9sHXL zz4}kL;_lfU9UA>NA{nM4r(9n!Au$PMkjU~=T!ttS7z_mMY=cj~Na0Kjsj1gSWR{g; z&R1NL*uS?6hXKN2p8aqkQ3y0Xd>tt}dL}Fpu>hyTd?NzTpVw`Xb&xEzeCx8Nm#y?R z%07EWJ_h@Pd&yU2JV<+(T5qy+1qQyXJiD|~ceR^fA&ay7Om}k+PS&x=`dV-*nD&(@ z%vWd_7_{zz|DKavAJ?>hf78TO+vB{l%S?IZ7$Pg;It~bngC_cEu0MZM>#t0sgO}7V zYEy}(*vew-+n{!en=fmOU54J{I|Ig8j@8shIVkwe$+G>N2Lc~Gp+9)Nztqd0^~|r6 zU~gSZQ^A_EHR66co1;~B<3q%VG&r7|L(E4|YzkJ>TS5!rj+MrmRvO!`nnN^RXRr=| z3_@d)V|UG_7h;N|=U5??Lk?7*MYk8~(=&#Q1{jc>mH$1_i>+^3h2(-L#wmxP;-rpX z@RJ~Nnm7id-!_%JXtx5b6Dt2WoPJTTpJrGHf|5uU|vMp~# z$1y3$M2-I@#)iT!btyU^{n@zr)<9DUnOF3G8hrL#bs%n_uQ?U{ zKkN1NmhO>YUkL?!D7};7(YK6H<#-14N5w-9VeY82Y2D$-*txw(5Gf?+%qeg*-G0LT z)8)`Acs7Vm=k;9)Vsw2Jb<0@ke4g_*0KJ z6BQxYUt<3}*pE_Mg4-~3^n_s@AZK!Cqw?ls=#kR)6tXf*OagY*J_9)Lm4)R{NQJ^1kKcj-)p5N7TNJ~ zxP~ZSyjNOARs6-LD-If5Zy+@(s9(*VtbKE=S9$?W&x8Q#*&5y#fBAZx>qtY>B? z4y$Ksf=tA}>pe~qC<0r)yDMs2UEK2hv;afv<><@iKNY`U z1{J9?(VK>UD#R*Une(uwy`fvKs4w0|J0_>J9`;f1noV)2_)lQ;xNGf%0&9({ZgX0* z%I=qacc1*HH(Y6rIW>Jv@Y!pXEX#DGAq6n8jjaA>{?n1j*p|7v-`{tU%#&hx78Wvsomk8hs~}IZgwqjPVHVvD{8L6mW0?Mx9$mF`$pkus$wI(6&$^}QY z$USsWPtjiPo(u5AA#YP|tejE*(e4*&rg>D{($9|hVW;H2I)~8%EHb7?xcNP;CyNkFV&z&tb#=?H1%3 z5ZKx1VwKA3e|ZNLE&D+rO9fZCq{Oa$pKi;+ZYR;IC$1Y z0N9`jZlrSh%$?vafH!T&U$krlSD9+SYhlDuaaV{YVNz#tuf;JUD>ndq$4v)z;GDwnD!}8;H=v z-B;)Q7at}7zuuAeQTrwWZ``&yt9*eu?HPC@bhJTYTVbrddz4})xQa3&GD1f?{F^Y9 z#-C{I^Awe^rVS}Y+!a-0r!cq))D=bA=R@ifnQmn~Hj?WLI#_OR5o*QNA?tY&`28|F zUmbDK04j+Sisr`2{jG(C*)uaoo|GBNiIh21-GhHNeuUx{^*nJ4^za0oR59NW%dLM8 zd!BfGX=DPfypkPOr>jm^JA6&5(=K`T{47~tD()FP{D<;UMdDZRl-^B8vq3~w{_Wta zt#U;%!FGd1*4agumai z`Yd?c*hE#tsg~n4OeV=Ky~@y6nyUy znZ#MMzZ_wnxPCrcZ?QlA{qcI``z_q~6g)b6uz>=Xy!#O1?Ph=v8r6IWXMX+~vLE7M zg6_bx&H}-nF?@!YYB`fF*dD{g*hD_i)1p^2`nCue8NnSs>48jxsDV^^hAR_GS^|Z) z7j?u?jP{k=_I;%4#nr`1N?;z@SSKw%8G7c76*UO~6ABtPDPT{R@-A*hkZ+DVOTrdugyf;?}7VY2OXPVf6ZI$@-|)2NsB|h`yw~j z^OUnAeVlM`bN*6Tlk8dV6s}q|+zGpf59JB!ci_ESv8{es{o59gADlk(JW#pmw~~FJ z(K)e)w#eA;0U=Let*ournOeM~nbx4kz1xbqWQn>oF!N$B#nI5y(&!hVJ?8tVe2BG8 zG7m~a%A&PTVjsjnNz+xm@vD0Nif|Z42!y14jz8>^8QO8kMxvoN*^t3tiANc#tKJCw zwz?>rLPg0fabFj{mkeqy+;a>s`c6&p zy3P+&>!c>~P`ajB+koT+Iz{u@8!~*?ANn=L^qg)P9zG0RcqGH1ZT6hEoLzsAazM{MrXZX)@yL!l(7 z)ktL=Nh+MRc5FvYm`WAtt7eGouy@nUb1EZ)=bX$i*tHX1lCi{{(b!=Jj#7sZT7gEe z5`-nt9*=~-AjOU<3O2*gqPo}FzqeB}s;m~)4Svr6uw zMJeP*!df=K$=DCiM>4Ur1Y?s5{j&G6m+2J>`S9_WK7TA_hQCwnR@lMd(-%ug4TfbM ziS$wwp>T+;(ojRR!l!Sg-Lr%es)Ag+$tvq+V^D+|)ospas}O=$6*#Q+mkMHv_wgx% z$){~?#)b_UmDxG{-aL`*KPJ2Wn64hX(fnES-N2df2fyH6Ui!={75a~EPzAuSIpCIj zi${tY3-m&|F<68p{$6(R9>lF($vD(~Puw-78{4blLc*ec%;++&l)-S_xK_$kk0n6W za(4`%$%x)Q$S+bh&FjbRXDck@nuDY(x zIyJDDvgZeImL??C~fUb#JU;j!dHA55=_)6FTMGn@5yJJNWt0^MXz|KEL{Y>` zK{3p5CX_jzIqSb6Q@+}%T}u+utq>5^wM0{M82Qh}&l9`Wh*~cqvGI)~R>w>D3N}+v z7C7dq?0`nI&cf2xSX-;4I%`!n$)=p?I473HzqLwG>hH!!g2+eXx4-mXO617Cn8ftC z84wl9RuhnFryM{jeVvX?k3iwwnHD}BbfiguX>6%QGKh#Z;{{L0&QhKa^sj{YA_=NZoS z`@emg)~;P6Rc-B6dnH9tU%RzqD{9qhZ9?r$ZE96ZQG4$#wW+;HY&9ZA5Xqh2|2Xak z9^o)PBE&fS8NfC zJ5^Mg&f^T~=RKtIHS!!q<8Ncb0pcQJ`6FfZ z8QbIFRNsnFrdU*BdYn*8jeydLZ}%hV6JGxBljKIG(Q%p;X^qK9!Ugl@-`AU@nG)_r z_99y5W3BMJ{FhQCcJgEM%XAaV*bg=bMkz;ua_`5QYFDQk=7crEx+A|oFHN&szWvMW zuxEUOmw-b;=_xd668$L`SmDY)l;iOYg$12L0Dnz5*P;uigGyYcM>5|-9t#h5Q6~8= zV?*w7r2G@$DI)j!Ke=G^u zVpCd}EOC|B@;suMS=RbKN#lCVVKa-n+ZY#~2g!~{3y|^7?5XOJB zc}aMbh3*aBIpV)8UeJO>oO&hif{!k1l6J_i+PQjI54t6raqxMJ>wut$Wqbhqum=Izuuf zD-94LcbkqJR{=W<(>zwC`0hJtp)(Yb9rR~?|?>*RX z5Glr|;yl>Ty~rMUzoai`brlsl^uRZ$=-~nliJtwzgVf5TH-e*9ol2>&(Z^M=mkUid z%JGnGS6-9f0Y;ma6G-Txb#_W-@B4ShT|pHIf8S$?n}YvBq@4()eb&#}=`E!XaKrZ@ z+|;?cv;T)46)}-R%5#rMW+iX($+fm#0jszWB!<=%MJAjB8dd;hzBLZCX}p7nUJeKKgmYc!OUU7zE& zh7_*mjrX<{p4dKO9ToI{CAtE_kekubI7_sRu@TSS7U^XY1Z_F$Ce%^bacfX5^gnXM z*iqY26~9~13V3vKLY5p?B_>#L{yl0<5np(`?^smfy8UjyDNR`b~I^P|5w% zHSA5gW_ugV)?tMk>80#GL`Qd5nRm31-YGyHTCgxV&v#*$cW9|0ul+8H0p?9h_Rj4e z>PPs3ebkStl3ZE0>}!cwc#n&l*SrI6Xob#yW2d5)iBr?t;{=hG5TA9wQU$7iC^!g= z0~C$d?H)QW$uB*i{<^g$;{3bTf9ZXy*66$Z^V##)V|#5aQ%ZNAs>Z+wc;QSN9ysvD zwvc@jZ7qGd;A=M1gcsSSfqW*?-WJb$1zhPuw0G`E4J2n8z2D?e84VcT>wM#kq}d;d zj1?szKJy;?7j$QMg+v7o%=Dfr{4n#s`Dxc)3ZsfuMrzctZ%F5)Wp{;>7=jJ z>YR3igmnK!lG8V8224ex%&<3R+&zw3(SHu{KAF1dvy#YTTcpm(RM@fdio=pJ6Rx3S zi8>UwKW*)G6Xl)IV=S9An-UrICDZ?bkGh;{l3&1}HE|kNjRqP8&P)333H_tbb^HlS9jt z8geH^ngm?)F0_NlKk%SGd8uO6v!LBv`;P@1)a%N;y*A2)bkBVwsMlNXEVn9i-aG%0 z6h6$XaPS}Sc*?0nQ4~>Jq`r*^$uDu~^a%5NI;KRfx=?x14vU4DHd!J*_EPjqR6}uO;|_*|0zzv`838h8S{ogz47QSqvkglExkhNPBz}FPj~z3PsuW6kcO30 zwjwK}pIDkmW_I~+OmwAi49UPQ)X__+Alg>uuo#qRAVY0tcGU0Q(7YsF*R%#xmu^rl@D3!6DkH{c5q$Fnr#+SAV(^RAk-o;dGHnF0ro>5MjYFci0V-n3 zC9RNS$B|6yeT2`+xFcFEcijH|ryR-pD#Ne0R24!*@_|qI%f-Iy0py4LcK>r}e@&_x z23o&SLxja;{wBUjJt0_R(EnATf>y_iEXlW6&7Ch_GN66 z*>RN8@Y;)SWq1+TJt^!bdym~e!_sR*Y_kf~KRrH8p4Gmfk-*5{=B4Vm2l53gZUB5@ z-Wfkr6m|&;pz73EOw7S1m#~NMwX2Z8*}t?n8DtMvUROtVS}2Xj#3Naq!p-Rl=tkGw|Ny&y=8oN|}B1-m;}>qRj68dFN>mQ&Q6McX?YV`)CH(RnO4! zbAl(=i3{qodO{eB?8owU%kTZG8)QrpI1Pl#jf4R@@)i6Ka4&D3YDhopKW#$vbg&<; z>Dn=0H#LfW zgZ!2j`5njJb_T1*Jy010ba689M9l8<>hl_8;v`Q}z$)D+A;-ll`L4r>^Y#t%x54MB zfR%0r5$03>>vng+;o3$AQ~!vGxv)9-Df6(&-ZB7APk=yaXC_q#WKq@ZY`G=NXaMpq-O0XN#dyO zLstGzt$=FK@h5rMLFWOj&MD5^d#Ee(HR%#Y9n<&=U_U^DEkN(!@Q4M{RjVF*$2)kd ziZ}({-L3UAVw^r6|5uc6!~bUiq)cgvvbDpl)#8TSF%?0Zb`Z(PqB;p@@*?W*Q--(% zbZh~M`KNK8WJPABeVbj?D7@O>9`Q~B-gkKrYYQ}r-)$~kCWo^jnSIYa;_80CX!CJm z_jz60xCgQV5#=cU3e1=8ypGff4BuAT5uKUj{cx@y73z4Pk+grefxY2OCQ)UZm3>~v zrpamXjNu38AL(+WW&5`=UJ`nMe#mu`DGy2+5DZ``iJo2ew6h> z2kv#S(qf#;S~svi@692D$l~?9WJE_H_%&Zquekpu9V1dxmvT8^wkdkw@E{6m1B**l z-kG6Q%34)bsPBKH+z|BGwQ7$Uus<%RGxTV{=NGU?(Cm3b581BFeyOV350t(!!N=O6 zK$u+R<;K!JAdkjmHfW%W0U=23LJtCBU(Yb{M(%c_W=;=21q8#2W*t&48^ zZDSZ^N!BkM9xX!e|2iM?C)z>OB^9Rpqx^HFgQFky1#b)3Q~uHSgqTM2{=~fal$GdM zY-TWqg%JB2B;*MWa2#6o{LQB8jOO${((w!H{qNt^dAhaPb@Y}mo?Gjq!J91j9jRyB z4cT9sq0kCdvg*BCOsKXA#j}RhYlu!{r$fxmnLTTm3H+39=h^DZx#u^SBE?PS=6$yF z$GJZ~P{5>+;Q)o2NU&wd2_vNpDW@u;p89E zM#{R073)?x3urxTC@i;r^K`qzt2a{A$$2P+r8?SAgjFr|DUkS44O(=_a=7A3(?(9e zBG1S4ps$Qo(ObuM@keCk->d-&{rV3d1~$(+K7VryczLs}iTeR;Jgpkvs?7Pp@P87> z#0^G6-Am0beIhyRYW?T~?qGZ$U7UpOcc8KIL~swWuqej61D9x?ol63+5Zc){ux;Tq z>KHj5?H=M-c8@3mA>CP=l47>Ke+NEmLdnw~T@DSKbzL=9r~i+c-2%3nJ3lw-=#aOK zatKjofg%1gZeiMrI!+3{;M&P?uI8=wz2W~?MF=E1qgYi$_~SmZpf2x(P~&dy*8}|{ zxf|yhb#q(*6UOsSN@lVl({R8f=#Lyv*X8(mUFH^5Y-Y#6{9da!=^%8J7J~|UaH6S!LHjqmxuki`?qp*Mpg<3-Lb4%BP#2VLUQS0 zRMKRvFhZiB<$L1x%!;b6@<3RYPKFxLGftK+gYl%RWzJVeW+$!hn!p9|N62OxFt9GhCv?^O$aP1kW5#BSpySTwa&EMZ+8>WYZ-+_jk1xZ?396SmSQq&D zA2))m+I*xw|589+`>3de&?ceo(TUZTZOKQ~G1ltt+SNokaEp+1l{1Vg=JSaUwr+}} zbf$R!qFwHARo!<{Ye{V3=UWE|6<^UAtw*hDs~TUYC^0;QhjK3pjcYVu~0;3t4@9jddddAy9`g_qIEpAbAcKt$1nxj+prJ6f6oI%N4!( zRm)%1(I@2O!EtlKm=Lw#$R9>&upnXqQWEKVxN|Y14JD4fzGbIxp>Yr%J~0pMsbw#p z11@nO1~T6xaq_tb>0s5C*oBj&6mvnlNB3!VaWozIV3Db7HBvIP@eRLG+~IveG?0qE z>!2oI`PIDRPtTW3&Q`^A49ICGoYA8BnI^16<#SA%B#A_Tp}@G+xRXRSYh&lcN{F~V zaWi1U>xB$`h-1f#EF}Y8XO6fS9pU036e}bS5dP@(Z|}+|r~iHJKc2KB)7cjZQ=M^a zYW+`*69vq14=*jloyuL&&myfuGgI3de!(U50u3!-eVOjd8^?y^zwt9)R7G&VX8VJk zHpWJE7FSPCb(@Kns^^x?GM+>HHr;J=M=S{So|lW)zeYTMQ$|3#Xn+lmdSU*VOF3L! zg~3YPnZN8ZlyHBlA=$iA>&FR{=aKmjc9F)e?LW2)7GkHU_z{^mCy(w-d(SV#~ri4QCU@l8jmeQofNhW+g0ovZi28M>ZQo~N}}r@wD~V41}WE=Y=FM0 z@o>+$2zmsT5c8e5jp&S7>>WQrG>8em3!K{R?}NeVdI_o@TAUzyj{plCPgE|Zu9VF? zpST!TCTFMDTGo&I{wqv5-B zXX*I+?^8-A71wN{RjpCsE`)XJL@C4P#C73r)W5~oSmgC&%=9(tf}L4HUcC0#@#pon zwPvL<2@8nw>v$u#XkVMmrK1jpFO9lF%cquf8=yz!UfI3xoo=@O{FUgoD86YXr_W*K zl`|Aq9VnsUbfOd0c&nnnT)0P+BZ#l`THmUrz6n4a?j!a0GwJ-z=nawc>uc@4UFL2;+^+^-Od^-7mqxYZ?DCT0I(X!p8)brDx5p*`etBm%^pBWQ7x z)A#TM4^mbfSTJyas2r%J3U|s7ZcfH zr$a!A4bWyXAGms<|*V=T*hf5QjY65@mt)H~X|$Z5Gr1xGP3Yy7O$(A{+q4 z5}ZaEQ`2AVk@+56tnDbbyh0?n1s4Bn-WWZGdXelA$0h~+gYWdk3Gr3?&+(5`O%^1h;L+&HdKC@fC4Kmdf^q8_a;xc_#(ywFK?I$+E?dc|>PeWw{edJQ=X zGJD;+Dx`ZxdK_gV?>%7tNGA|R=MjqUlNGYZWA^5T^F;ebLa5v&{MyH*VOrJh?zmZ4 zCuro|8Ck{M*3UoP!XV7B&ZvpQw?uNng>*TOTH8wlL{`bTq|l`At=I^z@xd-BAJ*4N z=M%*iJg~LnbwDs!-qSbB(1nEe?NmZ{ropP6=H%}CJU95$UWLt75~M#P z$Hr$>zu96A1BJ?T*3vRow{LjzILs4);KV4!QSY;M`odCK;i= z{T+DV((Un%PbRwSoA;1o7PNkpa$zHcsjB+pk@MYglOXG6-iyF*3=4uAq30`c!h9eU zhI+&V1gcn#je8zgd%f%%ztDtI#FXjyy*yd=`_+jJUu(U*0w3eLY#(@W3gxdw#z~Rm zlA)xqpl2(a2(V6%tS_?=jr;hSP5vS6wk#GdQpLdmyf1vNb*C&N1Aljo8b3-2%BG5W=h`lFPXHBXj?GxZKcuI4aGh6d>f0QIq|fErZXSAF3wb zd2J;Rdmuc#p2)<5istU$jDC;*iUz7ljK}3NG47)qSjOffAf#($3ET`J#2E!GzbYhB z!_pce%AREQR!gWT&~3Tm+v~f`>psgbcdXJ)wr2zJ@;;tujjxJRR=}4G;T1moN~x9p zPdghE#|mM`j!V%Q19}y|Y(<47P+t6dFX(BYb@>Tv{VO<8YvtcJ_`AtRh{9hGnVrak zN|;U_&_`@h0v-oENuHjLlubY?VRle&^)ilxm z=$d&=_;4@sIa*1?VE!(%EjRpHd~kj|$q2tmN6x2)vIWYm6b2uf;-9 z0l#8guDQf(LHZmB-&p=DgW20rq_BC*ociOp%w?gZPyoIc`pJ=3 zO$JUVF2Keb&Kx5Y$NT_;hh2?u2tgtsGC`EI3YH;EGPiCPK*GBc!yN#iUqL6+1 zx!TF689DaLW8rn1Y+nL!-klb4c|om2mVUf<2Vi=!hReP9;>sOr*Slg7$+IL4Q64i+ z7a}pdS`BF|Egt79*AtoxHHBfb!N&zfX5j;0A{#{|f8B|~>!Nz-$ zMwHhWgqTG~ZHNSOa7RkG$flcAHw)L%=5peyu6@>+%UpOFKu_M3G9llWr=I~tefm)} z66RkxJttl%v-dP98iXf0|vcu8?8{!@9Eg>77{`- zgg!Q`z;34HU(mr%^;V3xF=BoimZGosek90VdoWw#M`t9oZBq2=ucFQp79ee5PmRq# zunqkS+c)vxG*Od{N6)+@ztd$RkuixEuP+n(vfbHzt~LFndEdTK!*;xh)}x%x`VspR z;zi91>TLwN)VaIo23SQgB{&nqAHflepHi6f3QZAB2MT;%%Q?zOwOWH+vwAFT{YTWG zVIeC)Tfj8^LXF=WC^oMJBl!a@T{w7R!?q+b%4KV)4}AChBU0!1@Tpb=mxp6p%J4KP ziGN)UR%nIUDEQE3jB^mWnzr`ITEy_9YEBBS2J+URt^Wxrg=9fGM{Br3cpo{vC93a% zuR8L@71MQUEqD616+(3X?v!z3zR@J=KjdlfD`UQVoze13&?kdU;@Ey%V2TJAn9`6T z1M#>G0cKL7AdC@eT3_z>=ocDe`4Zj?B#9DSL2&1`fPonUBZ~+q@3D1~i76fEnQ)IB zx#_Tn6NTpR0Z6XCV~Y|O)i@m;5t2K7OC^IU7#Z~Cz1-N;(^>i0-gzG~UWl^2J1?RR zstJT2VI$OM9KrS)@`ASFh(+3GnMnSeH7yFb&1nZ^_vyP+%@1uSJK=){-5RfMonM|?nXN7c z#tHtjTMK+WC$pDt$?+xAy!+k_yL=*KEsSC=q!AGMN2#)Z|<|PszR)IR`=b*DG>a_Y= z2+*b&w*#EmZRzn~x>~6BNZEZGK+kZh?KZ4pj0Y~8I7AIm><85L|EV3Q}Z zg^-`XaQl?+=3wu@+Zm7Eqq(b>NXz>}qpR;XxJF5VqZOPm`1Zu&lQ4(*RmHjU>SaC4 z&?R8~{^Xp-9Z7hkzsk~2E^TBM5$BQX;)3*K5_*lk{X#y%kZJzWhx#~<+>_OUGD$O8wEbhGkaN4(TMH;>PR+b)=Nt;el}e2eD{5@xx?2u zuDb7+1qeHYe-(z%no#xGD370!7g*hSj{Ze!)144OXZ<*XrFen_LGNt3`0K=zpp0M) z?cgH^@k@}`f5_fI%F!*KF-OV%XR92LFQy{Au$h>T%GZaCiHvuIjvc59N?e&#D^_&mY@kvD<{(^smY4@vV;*SQDF> zSIKyvE?gpJ0;gPv-?xgTgK^mgQ4(gi2?k5i==a06J(7VE0K$*U;SNM28a|i@mgI8r zv$4N$`bBb^;^8)he%59G9>z# z)OewkR0Q)1O#3T z(%?*wciXgH*l(#KwFO=iei*9Q1Sy=WypvlXN02iz$NhztV8TL7t}y<@8}``GEeXbN zV!jjT4UR}Ka5k@bvRVWN8sDHF;KVl}UwT@O@j6~1U7m+aqQP!W$KIro3~6z5Lt}rsSi$ z8YjmyA^d-fsW~e5{J+ftT)@yB?d#JIKKuF)8SJMm1N)Pd1_d(WjHI=)hJj~{TWV*i zh=oJk$|Q|8?$Gm-5ps9w2Xr9*o(kuq2R&!?u(5JdC%ifV%U3ONHa;Dzw_7c5YF`Rb z6oP!WB!rz7W5i{SYGEAPID*P6kK?(P_N}ZshLgfyaZu$_6uS402mN7^oCNnj)$zlv zRnY*ra1T2tfVaDIA9fylf5i{EUEMQ0Anz!d5!XEQbaL^F#g!x zOka8@O*lRCq>=h$-B;0sG>I054kk?Mzy0S?fNO)vWeVXywm=_a{_DU)Bf0Z+wFhjw zy_a^fci3At73BeOpQLe62EPv7`@)3|lBDKaakoTJ2d&%%WRX_;ER`5+18?*hl@y}BOz z)S+hr*57Fl$}wYh-LN`KcBOe|i)C{o*!%Wweo$W?ccA|C$952a$!1Cli6@h7UF?&!5NqtqtoXv_G(Y90n(4wHdP2(Oe32LBN3^2wgTarccB5MJ&a zwrWuhV0~KUr8cLJ-iLg4W%e%|xJ`wa@;CTUX>b&9@VR1>RShdaI1aCUWRde&I~Mjf zk1Jqd)n=tFM!`cI^LRW*`@W-?RfCmp?}zr^8Y*Rm+sn+Pxw#p>+UUiV)w(fmphvHb z5`0SF!rQ+_cl2%X;-YVWRp?7*C8x)?+pr9mq}rOTiHt2&KrUIv&DF;A@;vI-izE=c zM{Js>#`1rM-(23jHODXW)M(0$haS2|cfRA)?`{z3r&l6(`fz9>F3Z3~|KTp?XFr88 z)P#9X>5Me-mK$%n_XW|20k1IMZy8hN98cn_K*~|d4%omf+ zPHOtf)JR$DWy;rZylsU&98jDB1}-?@lU**lxHRYuGJwj_HU?C0?%kRi%!g}zXW?E! zDSl((IU_uX%-y7l?Y{8Q<2l>pW=l<%|u@PZmq2? zcdb|QAzuf%4{WKW!F5m__jC85s0y47Z;|5cN-SDMw2pCd_|E&z2Hblfa^{RqMrTh3 zk_eAr@;i@0CeI$w4>{o%7HQTM7B|Jpcxwu+zSAqwV+H!oQP$o!0|<2md+Q6TDwTy}X8ZcYOU4H^B4JedU9> z*~>_R5eyPDS>O_n9e>vqdu!V;w@0|OxOsDMbuX|2W-&mskaQEw4;{uLtw!^{)^PKe+s9Y?r{ z9ZhC{T^y5WOdmiHlb=;A9=5}6h763%!*E)l0Y?nrn^sa~f;8)AJ){kh@V{@7fa~j3 z_)ojjPD`gGQJf1>2<2OR+!Tv$o)}o1^)aD*Img_DZv{D?f41x{4!{naw@wB#p0=UP{LbIyheKVC~1xdgel{ssol<(`vGa5*f{(dfbh!5&7a&t6<5g@xzJrC&z$t}7Q8Cc zM=n?$bbK`8brvny1&V+OLe=!mldctU!tOD1GwBjM2bIWhqCDw8FE(A>yx9KCcRF75 zW7p1$j??Yi^W;ko8=u89QQasuPqLE^~+mg`&ENjcqu=B;VxShXA9GH?g^5~JM0N{1{FsFUyTiC(?~Jg@_1ls z4&4%JzNgF{`65w;_%Wp&kL)*_`X<{-Ok36W-*pBjW?SE46#i4tZ^rBKmci?JJ`QVG z^gMpkYnoar;M`#<@zJZVB#}CIpoWN;Racb?zYyQ1xavViNKf{{HypYf zYUura*M&{oH{g{6XB%Zppi!^yTYCas)V@+vs@@s6&n1<~5YnWdpaVc7mqN0S4^R(w z95*vbK^vdRH%jXXS_KKG4TTY3yN*B%+G-#Sh##nDPt^i&K2gK~Gxk|Mn>|ECqK{E< z8IvbeQlPi4i$^Ugfh2{!Oa=oF6+@l#hBjK})UE))3_4hV_vX%I)+`qXI2g)3>C*`q zI!tV6>Y9jlbADhJZSO0m(}SwOVisl@hehVKcE=D7QrT|?>h~L6{~X%lXHB$(#A)!o ze9i=x1kgC{nTW%A)UH62ARi*C=GJ)+u?~5wvnP0z4==)Ir&1Nep?vQa&gjZbQ_i{FhLTaD$#a-ngO;j&pBfFfuFq9SY*B!GuLRelY$%@7n{7 zd}f;yxx9NM=HJfK!=WQm`hAmlZ3g`bs|T&HDcp6a3^>eBGEa;gV-i{A0vu2UXd z%%Q%`nn<;;Z_>J;gc4X}c;S6D|M6DF`m$`OEB5tM0iw#xXag@prJ^KX<-a*0WyIyJ z4m|Ij=c!DnX03jgmCpGeCCFOoI=Pe>*p;IpPSM+`aL;;)GKPX&e=^~-&y_I~22MYvy_+eLYO>=Vw35(0 zP)<*0JDBD@ffS>rmIUTdet^q3iZHfs_|}Y^FM#@7J}})~+Kr@as%F72=jiZG5k?oD zyCE7^!r+pTZFo<4d$V^#p4TTuRqx=PwUXBiWbr?Nhu^qh+^j()_mhYf2V*-fiT+fA=RZkV3JT|+ zD?j5}A%a7mZP=$;ZFIj_D7Yz>iHgf z6F^|=3uN~>w7UF&VEF!`2g#p5>V>Y%^bp}resV={2jIgL@oprvzV%n*t&9{4rA$){ zEEmV7#WXus)*X~XJS7^v#Axvjv-ay7oZ5YN)82%umv1(*5rxbTEMGkYhk9x~+ig~= zD4yX9ZV$7emw~NJeGRARGFyGIOZG%XqCM)4xdfm83+b4Dk4Ua6lT+-}4biN+R5k5` zAy!eBYL=CKCAUZ3sdC$~#CDO3(bKr7?>k#-Q&v#h9FQoCeAR=|*Wv3=L}PvG55l)I z-*Zx;M3uyfsVP1%yDTd{ZJ>90^6>MfGiabPC)&)$Max8w&gxx+#TO@OHPX~XCv<)M zll`6#MSmlip9XCop-2k|6V%>E=Cht!yw^I){LkYT6_J()OaIzCs(uuM?$R*%z^^dD zHis>Xezx#)7a5Ds6nv|fU6`9UMRYpywJse9Rb2kd;uR_?=af~s`%JU*_m63a&7Z#S z_t~B^rsVha9c?s}Uc&QL_aFXq=aa%kFV#5wd}T0R+_RZ7fDkzzNjEax{j5DXX)TT) z2^!-sChE4=VP0sr{-ndbZ$VV+F=vG6=s@pQveStF1-Pu+NQIwRL<5GK`Gg@${J81$ zZJMvT$VTItY)7#b04;H&fg5?7`ce~ac%12+-l(Bb0sIzO_xAFKj^LveYg~w|GdZC9 z{;J}%@E1-EMBDl%^$_FK@ga@60&F0wGbS0+)ZEpGrbyzvO$GVA;5deZo*);(bKQ-G zPlZ|+*A;s*WkB~VI?%O;)jjY}P0?NTQGvY|gG`ohgF`UiLdki%Z|2-a<`&HCn529S z+l1DQ&b}&K59E;qpa!X{K=J(_+fOa32DlP{eq-D{H*Z+S#os=b52)_tKI4c5+@sWR zGJwO9&qj~{F!TIr$Mxn%oKnpyv3GX0a#9m$fsK?(t8gbB7;dqJS^EmmP9_7#aKn4NwCaO%6smCU%MP26?5X(&ImPh}P@&6EU`MRk&9Mpbqz-ds2& z);Fr$(Y9D2Wqzka<-J+uZ$I$ki~#M#XS80dFiXE12H)_%OWr;sCAlAC5y5s#TaJ@G zW>vV$Ho>EYvyt(;@Aa`1E8{NYaV%FHL4AtE2x*3pYE_uFx zd8htHO9&RZbm|Ul&<;Nq0oX}z*>B)Jk=%TsjzJ?Dhsm{C7Co1BMlHAc`VGNu=0MNw zlqMVjCIj%f772uy$69csZ5)#CEQEivYn6H5-+0 zJVFgX6Ys3OcRC;j=YwN!?kSFd6KkKc#kg8gA#x+LrP^=eX_JAsVmt2&fQ)y`VxvH5 z;Mvd9)m(8bHmvc&O%0H69sJwu3hKW@{JR>nXXGlBfANERvG@nNesSN%7USVd1722U z>8|PyWTXF@lqb7gKrNKHHF&1e%asodQIjttgPXj~74xP)(CKBTFOWT1^yJ1_Jlr8T z9tZBOgT`(lO25W}8|!bDXFZyha-My2s(0^ybnsmM(ZvkmB-FhG$@9Rmq{Ff4MXUL- z>_nxBS4bbwOE*{W5xIXJcvyl$y~ZndzU+O`etw1`5GyJn`PUIs(F{|eyqj%%OEiQ? z(SRGZeyGtbei&&j6 zmh2>$;+ISoIr?VeDF?S7MSY6uDzUUTGN2&H8>}Q^8kj$Gmoy!}0(Ge8bKcER$zyE; z+n?JuEJMEG+OQi2)t=sVM#wDIrgumHZ5Mg^nXpJ(vBYi?Yh6!Uf#@Inp=0057OBsLw9qY$UMDUE`z(to_{4r{`uU?%*Nzv{8(Z&cFHwE&9Z9` zfTi`Rzg>%urLNiHPZ{(iSrGXgQJ1qy!&d)(1JnjLV26+cSUoDLA{D7{EYSneTvNX4 zgB@d2s!>`|`Mc*Vt3EDRd|RJTYS#Bz-FPD;7BQh1&i zhXO3jbb>`1XNbYruAbkPvNpyA2eH&|+La5S0uw5N90m4lb4EmdY*_TK{w}0+9bRYM zq}~zUq)4HZ!w-TyN(q^1{L5xJ4BjLT%Mx)^zgvj9*xLg^6G1$5|K#)W?c}_Tj0T^4 z9h{)`rGRG9sJVIi#?B1`EzNDAu8Eww8C2AJ7ZH6OS_hgFyMgoF0SLv=>?`K+?#SQD z+V&YaT9owWYA#iC0cVpm=7CN%xKcRK!X2uRD6;@Q$Nb;;(A1YluvZnGST@7^K)2R& z-@$NHcH(JSB{rR=OI(c9-Z8ZDA*1bHYvnz&#R1t?;G#O-gDgtAuP$aT$_Z?~>yAvc zE#4G|z|Ze%x>_(bu;i%HErhzd4W?>*cVcm|^E4(a3_zv@jboPf@O%8+%=n`%>KLs1O)Um_w*oh5#&;S(TM# z2jMxjh2nq2#Z>-%dLvF>P$bLBkR9%gM{-A`cjip`=v~&t^Mv0)BGh}fGzA>yR8AC5 z5(-}eOh|^G=rtBig?GY2NIIWXrbIa#(j{@F|mZnj1Gg%JiK;I9B}e5>gkcu)8#ygz% zu$VP$uSyNWXgNPzn`~g)P2j4Sxx+WOKhtW%E$?nCMKfwej=QSkoUh&e+EqUPz;M;B zVM3Og2W%t2aTgA?9#+LJ%MvbKSgB&W;Lw^_xA@5f+-Q2PBDSy^dX zHEwsMjQH~tmsx7f{k>dYv7=`J&U4@`tTbXLMj z8C-z9-}9U7{ebhjt^MlBx+^%|l|prp|9$8p6F>PX(qj&D3PAh#LFLxo#zZaLdJjkus7g1J{{~L4Ndl?nO~dirwSU7ldl7wW3g+N9-S1B@ffrg+V5G> z`)>yF5)3_I_TzGV+jL*+3O4rMHHQL;ULz*Mke?FQ6(tK#xi+C}IZ%f|;txrNCygEN zi9!&P*&wowf+ojb^Y7=9Ts7z|kpI{VJIX($-Mgjxo0ZxqJKaKMT6_VHQHr5qS#@0Bw7z7ae{!`79zk1_a-$;N>U z^hLiEpZ)g%^7-d}#C6PK@hLDc|2YI!L?;g)98Sy0u4w&PW$f^pXI&!{`+8f?Jt+61g((sXIpOv7#R`)>_p;}nAdrPibD zsNI~SL5U{kD7(@nqbv`A7T5odhXgE%*f}$^LoC&?y3b*r-#>yT1jNWP8aDqP+O{;a zIauMqO7vXMUVud>&U-IR0E) zwz09}XNMnL?4|`fe)9Oo!m&l+rcqF1g~v-f{;?LI9)Gz5nbtm6NdW}I(2jq0p%(FZ zgy-3d76wzbiworA&cnhEO~f}HxODzQC|#oe_+zvM_`*w9{jj}&G1VW+JpR}C{HWyl zl=TZw3|?jfL!zep;_&Ojbnk&Jn=9%?(kB8M?l zU5ZbO?SsU!VUx+422}VwToLO5AD563&(nnw{fFM-1IvDRUxBlvp%aG`B<24peuaRK z9Cchd#^GaW_{P*siqY!%5B$hMoj!jV-j)ar_-FHBAmnKM<9RACs+lslDOk?zt1Xo3 z&o)UJ61nkHoI8e%1t@G;ps@%(78_7I4&@9VtH4OUvo5HZ$%r!~{WvpYe#?fO9V!@Q@I9PW4>9r9qJMX`E2yd2pm$vWSOq?nj0c;|{vba8>`?hi}%?EZleE_<3LJs`e->v?msjdOfzybjGtmvI_@2Il1|#h-^nCm3Eq0}G6fBF z2IU{P8o#D#YQj~ZurJQ!vH3uqSE~3DK0-mKEZkf6JS)R2o z0ZIZw^zrlfJD=rmINHdn^~bRYcStBHWp6{ld`3WGt5QP^mG_^XSt(NDx#IYR$|1FC z{DP9glED`7jdwIyfY^zy$8s()*1BtY|IfkWVoo~Yq_XiQ8{?V(dnwcZkG=1J-=wIr zt$`Va43ZHE1B&Rb0q? zVgeN<8UU4?gc-sNF!Rniw{F#~?*9H^W?=Q_)XcxaP3P9F>gxW!{<_P2AS%pd@6mt` z&T_?e!!#FNxX3sG>@sJ?WET7HM;`@q0-Qc~dK)o*=;253t86?$aTZ_kv^_S0#wOVt z;Ry;lX`qs}MVr;)eda?*epA><*2$g+QB?L#f=?pcamVfH3;4qRqKhv|ufU!K|K~mL zMfc|Gi|x0+{hj!N{tftoeXq2_irhbekMus6KKQ{8;mL)cr9%!m1UDXbNuT@N=hC;n zbrD|qvo|*5ejVJOf-m>EN%m*32gNP=wf!!5B^06gil#&VjjwpR9vcE5r_GJIKfu&! z)9_TlGWaz-<`F;YqSWTu>JmEuWj|(BM_VN`e38IdAkGgzGCO@2`zn0keeX|e;z@=J z&%Xd_HrB7iD~|5I=brS%&wn93`?=47UQBK@rSD^N>(77wb6_r!-hAKzx*2ml{5|%# zk7DEK8`C=Y_5JqSZI?Ejv0++adBQL687Ib? z>1ymf!B-bOIO{>|BXN*k|K#)crkid``@d;_?L)#FDNEyZN=gKU4MyYx*kt;$OD@st zzFx8OPU#b$_>c50JVEi!cfKRN9-DPf;T{mqNg@&EVu^baq6nZAV8gyTG8VxcL+ zEFS-vOc@hU(JV1JzGPf=<07Z^aWyFHXlFcf2%x00m)}iP;(`b~4N6F`q8L z6BJ+KE1u?P9oY>}Q26+p8}Kj8319yV$Plcab1td*^2E5znQTt@I&TgcuZ%y44HnfP z;unr=`utPf2*`uS1{yelV8(L%4V$V3Uv;$_^A`VXr^LUCSk3upWp_+8;)9pROHcs zi=w>z``^P}IY(l@nOSL#Rae6kTD#+1r$NwhPE4Lp_#u#W&K=Rve}4VTzG$_Q3ZjTM z{+kdb#bbJ1+khQ*u`u&4NE9|6Yx z%dYne7Z~~?gCFj}B!=*zM+*uKD{Mi3zCM4@E=V*+)3nQW9%~kM{Xtu>LophH48PbW zzJI~ZFAMm^3=Qy0GRm7A3JUTAKM3R~{=if`2~SM$8AHtq#*RKg)^BvFO_Rig4`kCf zDWk4mBFaJ1zz0up0FCnuRDG~UA-{+OEz#*J{v%mV5Xzg!ENRo!C?Qyi7d*(bp4%++ zR5@jkD6cOr)G(BJnE&=gonhsvqq)WyIUf5+PwC4%&I!bZN&Q!x*f0#@Yh0)OFsg!ym%C!IFD&mt>W^n4Tb7u6Yv(<$}~di)^)p9JRh$EKS!gj=mTaJHfd^W+%};8JTA-=1h?YT8=jXdY?bC8L7EwX;3wLBO7AWEYDXG z%#0h$Woe>PT=F=&PBuqbqy}appFfBU9}UWKvqWR~Bd$tjvHz^IZy_Pv-OnGIKyIQK z%sOtLe2fR@TIndOj$7jIBnaYvzLKS(002M$NklH7am0*HIg<>^Mf}bPK%^P}Fu$an zEtD9CV?eOkGZQlk<7{Ndhjs=T(rK>Ag8hmgYB1pb+E8Uo7s>uVfjt3t+kMw`H*QcY zfjiN!#%G)rR$NiB<(k5^LFXhmDtn-DX8PJ}^TfKr{!o_P;?3x0u|}Jpb!bmts%zcj z%7jD=6n6Y$4E5!9cm0JPmYcj};rv72*s}Fs`esD7+*!)l{O1}70bSRa53&WnbPkg# z`ffhbCpxFTBDeKduG+Mt9%Kn=Nr5TTnccVZpLs|*O;=W|(=={%o$uLypy|BGzM>8r zM3LXD%ZP}Oq~CBFK=e^bv9%a+cu7`&AWI;9hGwj30zUB9|BQ{s6%zE>lvJs9$)iDZ zfJo*Oa$JAN4{EhWC}jR-35J)t^B8~ipMEnT&(}LVX5}0E0R@h62^t(`vqT(iUH`EE zFkv1;9DdL)`^5@T1LC#K_jJQ zG8x0k!ysDh8_D8AMu7-Gv=|gHva(X4SzFDl$->%^n(I!AC8aSaPBJsrqNC*? z6{D3hrAn(cv(o0^dNiUS>oC-usG>Ji!#`gbt#{Pv$0C|FtCR{J?@f=psO zvRN(o=#n+t{2{z1iC>zYjeP~S-vKu$uqp5}pJh)_l=;)bTdNlfk~?z(oo2rY`~>zV zU^x}<1*8XT)Df76Jry2HSN!k_Y+!tN+GwK<)9Kh_fEN^e#S>p!v=?qp?1QH$mS157 zY16mMzyJO8S-gs8I(~6~@IeQrE3k*bx4->u>{o#s6!_AbvAiC8P<-|?c!J^js!W}VFPNI+ z)OyEG-hTTXiLaZQ{m^V|w0&AyfBp4k@|AeK5pPiZ7hZYv96mvzlmlfG1shV&NoU~6 zh5y29pm?Hv*T4LW_IvooH@~S@Ha-7upO=n&|NHec1YbL3k)M)YvD3@bgSdIaO|jQr zYaI>AiMY{l{@2dK4IaE&37ced&yGLd`p5LS|NKvFI{vCxzY?z-+B+?~+;TX*@d_bn z|Alt~bH4QK7>b9r4{<+;jD+jVnA zO#FrxzczXtdsp!9Pna{`zUU&|U|1F#X>(tcU2((96Ccy&E4L^+2TyKXb=A+($3O9j zbl<)ArFS3puC(jUyXp0Br_G$1cG`94^ai|EY7O2DL0|n?_z~tXpP-nJ`E|gX--3NH zHpQIA6BMw+&BovJp8w-;79z%}GdEp;$B+F29F%g^XEUWnmo@A`aphHdg5nOm;)zcT z9e4ck@Tp35$3sfYq>cx0p1K0kdQ=Ysbh`M$I))&Z{nufmrUK9)KOl@D^J)wo2gyzZ+1*%kSn zK`74%MSpA#`&=TAiYB2NtEwQIFg}zxcG83Xh-7Te;6;rOK*q!8Pr#2ZJ-PVw0e=?C z;z6JEWc5Gd`yjS1(HDIfeNy5d@!5UzA@<+uPeuQUZ)q(2u7qSgbUd~uJpW;d$`Q1=jNyii$4jM9RER)R&<@}f1#TG9r7v%Ft4}(6#YL26IuL{}uC;K0x!S&yd|KPD# z+c=O)n@j_-$Z&Zn&nQNWc@ChN>6kO(KwdgWUPnpBk)hMX^0NPF1h9)(Ox%wp=osW|QWtRIQ2H zb(9KGgzcf!_>mRd*1$CdO}HQUS{1E){Xx~(e+qh%S!6z#na=$J`GcXm?!G6z;h$cQ zr`7jOE8zwOT`~|D?5sZl5i$^rz7pS^bL@#O-KfA(*Y z;ln<`;>8PH$`FKid|~SNCk=n@BxR5y$?6m7KNJZz(0|%cLi}~ZhlD2`pPgzy*?9jg z{$hvDnmqlqKC|Za1?_*k`j$X*+@Hk$>ku{fHMBAQWkNiO_;EhfsG_F8B4ZiwqrWxg zrYRLD#OvSF@O=qiuRd+5rP2|H9g6Qww)TOtGWVUuf`0xm^yPwdGh>*$! z%C0HYG6T(lX}ZqsnCJkSY(sQK5n2u)@>?sOplXRwM<2P14MCZ)GS zW|-$}c0wK{jZG)|bHjpUdN*bb)cvud+Dw-)!gf%a-hN-ZTQ{|)91RDaCDQPyD?Bdqtt&}Pq zAp-vAS}CdLG}gPRoE|8|)OFsdvcb&m4rd@EKq4cc@UD<@1AgKD72Kd$f5QzDHzgiQ z1)oeRd7>ZPBp7AJae3S!=MknRGIar&n+O_Ri)b5{`NMq0(@A*6)0tX-~ayi=}df~ed{f^q*v{^XWC)M9jQ!9__G?z zx!A+t2Up+*1-?k0f!KW+{_w-l&wuud^p1D@GxizSORq^#s=%9e9v#_FMoT*IDc7>PLLV)2ncUVx^T= z!p7gfO8@z}&!@ZYx-0FAO~qgFvX^5znXOsa3*sAJ{{}X${)Fbj>1WJTK<3WH4GO$2 z>MV@sKfm>m^jhq#!A;GX7%okTJVCn4)Gx2b6BPL3{Xsl|@RkGLf-k|hk{+`=5GQWX z{EMr9nT|R7=(NR_Thti>Lwq{n&A0*eyWjpUee}4GrY-({OZazTI_1=v=>>StBwz9M zjCIy2I)A{N`)~ZBnkND7^>9Dt#XE45i%&587oJYwz7lnS;WwWoq{Dh@h8`uvaE|OP zr5hF?U~u>gB#yzEXMP#4#X2AJ`PJA&`=8TexFPWokMVwhp)s^%2SN{O?&}Q*6?N)Zvs6XkE>pE(Kpk_ zj{kVt5q)_dUSne*bxwMXUEnxqELe)+T%pi0XJmpES7Hx}k6`oe+i$y7V{;syDpJbw z37c;Eg2I4ZMRVx9XMD>Yr8U|vs&{_YvpBb+oF1_>;JVCK}IuSQ0riUjeij}&E3qo08 z8i`FN3l&-#94oDw6%c}4D<$=u#(F+AWFtR+-KQiC!`DiXRbLCe>x!mtW+{&DJxlCyHXOzWgE6+a|oG|xMv zqM}M>mhqrcRd89Beh3*i~=I^|+kc0>wO^g7q%x|m>e&OgzF)}&$tm28L6ku@Hm zOt~<=k(#{G1E;{Ek`Ho;7^^Yob{wMmKn|!>WTJo@5g}xv2PUcpn`vy*L^kJGncG0f zsu-QwK0@v&XnBI&O(PrV=x?T!RhblgqzIM^g!weEx&4G46KlGS8Qy`eBF(Bg2Fb*-V5*imyL{1947U}LDBZAN@RLm6H-Gxa#t9Qo;#X~Z;mhO~pj;Ba`uCnc{`kD~o8R83_y7MAzqH>4uWLH|@FOs^ zc=gOLeufQ>-^aRE0UnigVM9XLJ z1O?>|$TukHDff!tCfRm_;$-Z3fg2Qh+|&H`2-%#-V@8khCG`atT$ujri~p6DnZAtn zy4dLzJK^buWyvkd_uYS=zN|g-%rnwD>#Ubf$FJs>!iLg}|10t8n|t_*Cpus<;HpxB>-`y{X8$!52aT52S zI4d2D8x-99dl@`I!4!`g5lRND@b8*m@fA;BNcZ0Nr*t6qpxAbsA~6dabARI-U(;)w zcG&(U>BApBO0U)Oi=Im^z9fC(|9(8Jzux-kU;p)Av5EUecxBhk>68EQ$#e%^tMnFZ zCcXU*JLqN&_ch_GpichG$?4g6V&XmT`FCub`!F`gK1rLIbN+EJk`i=y!B;??h57&T zm%jox0d~ZVkl*0u$Ej(Zwbx0nM!aThutD*a`%Q39j&r|yZhGx&UYmB>`4xD=$u_eO58}|Q$ZZti!S8Pf$3JdQ%mhY$cT8B7B&F7dF35wl!=SJv`pT$vFbl7fvVvC7%dfr2}&==V?T2<}VsN2<)dSfkqF=32qM^V#YL|Q)oH~lxep1ZVp zKq^o`WGrGt$uZfip(5AGETg7I&V2l16HbmQNm)*ePW z>r5EzLJs)gm_&)Hq+QfY620!F1haOuUn6B&jn`7{<9Fw>VYnqLX^nC%zF{!0KDZuf zeQDB4{JRoa+_kEuq0b*KMI0t?dl_jG$P(j!{_r_rmOO7FX$A-Ldi>3r{XjbE=#Sw1 zf3dDL`D#n;K>?Hc{DIQWRjnaItUV&vV?iEMJ7%sy4)Mty9xYk&`46e6iKdd%xaRVj zn`Z%|xfervMJrqJqV+W5Q-etou8y`>RU#Yy{NYkE5wb`RvB+fhfI-j8Up}7{rkrsL zUedVNy{v#WCPt$KlgHm$)iCBV@Z0mh%UJ0cUXrRhGM}4W$-08L7E7k+ht)?Of2J_l z*J3qSG-i@7oPgzA=VimPjCh65|Gw6{1WVWQbACfZ!>yCvPwl7n+JD#L@z3!`8$hTk zIhPIh$^9VB>t2#(`Ze;Rfyf&ayY03IexP<&+6Ol$_QQUE{4tXIQv2n{m1%nJ80Lqy zT*rAX+xH=QPpQ~5wi$n)|6%S1!(L^{L@(7#5E!qnMfy;Qu~99^kTT*!yt)3!I=P5F zAeLE)>rl1U)F8NEpFa%2JCWUJy5$nYQ~zD4aR3c;&SF$OHw}+9`|t6$zSVe|Q8(?F zZi%JGMzAi}_yA%Vq#=Q!^{18dR-AK$a6hc>QVlp7kEw82 zbGhhPr$$G>+ce3#V&U4P`R5O_Pl|iTI+IIB36Dh+{~{ZGaP5>%#SMxxPd^i{=boAl z#S;`;Z_V!*V6`*;#Ed?;1uL0T3sFRs-C^p>w^}CUG;3Kn(1S^T{$M1CiD;wZ=gw@k zQ{)Iqp@UCJ9v`%nJJ6K~Pf%o5&kzYT1ZQ9b3gdZX@+0z$0(&wU0-GI`KxAf(f}7jp z*&mM|AtdWKXmI3OgxX@Mypip8K&$IFZO zIQYfB`$c?Cq7s{8X-vqjRB2BDpN2~$T(Qi97FKMRX<6O_QHuyFW5wS(OO>oKTCvDT zq{s=bWKQ;yW+#hp^2I`Nt$<9Ra_x23rLX2!JiQ_ALDBRxU^H|>+J!-ZilT)$+O;B% z%0&9l8we+z^eKCSV*4HNtNa78Np^UGf|xYQR90z2QEpY^Bljk!5#_1!t6%;yed*Lw z@Sgjt(yI7!eB)<73$KydIIV=un&)71;+t>&eflN#D7gKOJJWOjW|MUIVTY%6*IN%C zEasMcc!Gkj%-L-hJVAkDj89Pf+o8NcaZUR4$)8To|C{Gy<3@hb>ph4l8IlFq zciwr2UgPyO{EDCNJmnYWn>_ouY0WjTp8zoV;z=BC0RD!(-jH7N z>Q`$M@t@<#nq!VR2Ag5e0roa&bNs?`I&K)Icsp1;}i(nGjm z@`+D;JpJ^iS82oe7ry9)*qdYBG#9Vk;$9m3GL-Y|AlyLMWaDRJ-;-})pOeq0XRP=1 zwDmUIq-Sopkv8Q14d&d}FT60_hnrDH9di`+_}D_PIODrAKaZy*rsKxMHrs9ujE#kV z<FI{;w%-mL$RAaL-ON-rkV?!E)AAZc31;b7 zHx1-dlAfTzjg;GN|0DJ~*-Ec?5__C1Ta^?yIOmO>xwt`gAfDve4f_D___qW}I>%Jb zactxjuNW#7GAEo|OC>Y>;kq#6lRy$K)HFdQC7J{&RY;|h`4cY;C1V0dV^IQNM+VnL zU0L`QqXQS%Qkgh#kZg`W&U)GJH1qK!)1 zjm+s8h;%}R&mUwlmBS&+8g(!)5~wT){^(9eI73lh0FCcWU+#U*?rWm1X5z zFQig&T8-?#r znwZ^epx7yu2s`1rX(I=CbN$owGx*ui2{Up0Mv@I`V{Y2~2L>jB9}DHYBBiLA>xEP* zW^kOmkw!ypg0hZtS@ysP^hN^8vgI6$LMoMUkcZjDJP`q>{tJ}Qf8L)z%-@CUf4q#5 zB?|zfVF43J#K`qRDwW{?k3pS@AA$16n|6cZ@U+#|c@GMij5t=0xy=h2&vIknjKwP2 zs5Hl)pOC0L!OtJX)>wcYUeQ6N&OZuej*21$SNNWifmB5@6NG5cE~JfTvO&YED&u0~ zKrZ0z;vNXb>q&qxA$$)sk9Fwq7rJp7h#pt;er;TQ%=GrDgP_6p^zkPVkkKZ7VHuhn ze()V8d;O;|_ESZ`_o~WZ!@!ZdKq2%hZ1t(|D0gff=#cgg#RexFC_A%e8C#3xC!#I8TrpPeHz4f2VFTUF53BL z<3Xn;-2X8DuEPzAubg`hzEs{YwKpi(vzojMg#toc&<6YOqvgsfhvr|NUEq7j3Qn%L ziHGotrxWptr*rVD_?PU68x-0k`7ruClwC&bgt2HmoNaFaz82^VJjFo#*JESi9q~(h zCcZl9BlyDl*T4C7`p}0yj4zt;%l9P>lYGXX;bsr4nh9vndmOL&x$o}uGkk&ngDWn_ zm-@fPrpB{P9)0G9&8x1wN_rli%HS{ZpM$3)mgcLFV1gG-2fpPXJRNbj_HVE~IS@nK zq~I%tR$ggEz4m9b&0diH1{-LvhA+aGz%K&$GB5hX*H~SQFRU-X&4v5#y-yosuej36 zxPh>xzLfjz?|zHT!v7N+Uq65^^S8lGoqh3T{bP7-(C4*h2iJ-1cYG;gi6F}&x23oKHqR64hi8lhyKKpFleECP*YIn-z}?Ig*e}P&%NpY{r7j%#TQ?iZpEG&{3X>? z+{jsXy=SC$D`%< zV1JCuA%6+{xK-yZY_ATCd1-BIRK5rH9NBi;Z8bN!PI9jN_vPP97h^w*TWp zQ_`~d)fe}ic-c!|nl{F>ALGX3kj{x#1dgim3{69T~U zXDHZLpRwJl+Bxfv&{cTF(@`HiHr>HjJmF^4ao8jqE(H@oBUf#W;_=U+> z(Ox#(B)g=bPFmB<2OHNaCKhH}%`6)fgGyF#x;d+;6}n^@u_3( z*LaXND)sO|h--BYiNyj&2G?I1sr|1u^ZGvu9Q4>fiA8)cJ*IU`s`WB>9sCLxWGk_$ zWmOLv9P0ERUBKZP=)tB}vl05rmX%iV)zx$YPFsH0%2_Ypy5@O6T>HptFhiYLcnbBef|}_puNaGf6$Yhy7*{oJway3 z0pF`E!JUV}7`gtt{Z3i?pW_ExP5;sMF*dq`3eubtY2xvZE>M-p*Gz&B;zP!e$G``R z=hX*sO?T8WN2kj!yCiMC>E_rZyM4uz0CN_s83lxK@m;``ReJo;V8CRjd$dR70+;(< z9WwJ~k9HJ)umzKI6;xs1#8~k7TkL5J3AXCzk8ytWYYvTpAJz(hG&qLly~!=Z_wbU~ ztbb?^p8o_hqW|&{$S9*-+Gg@DSjz}e&JS@)`d%}oC$7vA{PIO?G&b1lA}}sK`%e>$ zomCyQ5He##|0BK!bHGIL2)x*TS0R7G_{Dyy6!Xo17Mqn#kcr`=zYz4biRT|)XLApV zd+|e{z47$KzWeQyR$O7FV7y=Lc{l<5a#SNY8gm($K>WM#j^!o!vCbyigU5fCtrKGm zeAtN7fU2JOwPf?1%_Q+PR!uz)2opR0V9AwL*7@h;fvc-7d@)fN zL->R$V^XabJF|gNCwaBwT+=gp;d;eO`k7~#`r}6&jfDQ2%{Cv77iH+L19+Krz3F9< zBV6u}cejV1l~9<=vHxx#OxJJxa6ZcDUc^Ve*?_!hZ)TsD3<{fcYX> zN_XTl-3lsn99Mrxj{dqFXUu$jh^0jg+Ckn3h*^K2jkT&+$6h?pKYP2 zYe}^)t3Ia+xm42Q_?62nY+`udefOe#AkD@T15e=T2Yvy~U*WHeFP>My(-kYP{4{id z7Yo*Z?ve4kn{Q65t-5Mj9lz>d5}Rz-*qMDE+jrf0r@pYBf=#Q}z?b^`g*&782)=l~ z6FlA!S!0bg(<)DU8mRn2{4wp1@i1OTv*C;x`4vr4QCHM|A0JU-1`C*!4`Ea1d-3J< z19Q*e`GnWdM((+JC@l~-9AKCh|QS;csAj_`F!+CM_$#lGU6E52M`YN@64 zMK+%<@%@FNf5 z%lsAeB+69W;P@lHIOiAX?DwjONr16;~XCwP0&G_&*!tb?YB* zO>?pF^XjXwj#pK!lkT|vc6?!fPg-lur>9j{T^TnKI3DmYPDgJC2Itw`ctYgyCmzR> zCTr?x4WIum&KnGO;OUfm;3s3tah^VXY29R4d#$x`GiDjh4F_aQAAH~eJ&nS>Ex1R< zw569yPg`-7wB}l?hsV4z|FKp)Huo{@cX8kS_ojz2|2S`!ML(a0IrsFn*T(B0rf2(@ zADFM)6q`Qt)k}O88so@E$5&fzRrFyk-JD^^`6{P}u@U<1x80HMf8bBJ`8A7F$*!`> zs*2mvcomk8KOKU+Nij?B%)ReV_iK*w#>#Sd+J?4QU3E3xw9|S4AMeNfy6v{x@S$tP zwC=j=BG`zS=B7t3&k)FpUY6y`kMmFZV%7EF$}6!41)g9b4qx$fJocby)+?Wc)bnx0 zF=_*E%nSGg#c8LcM<1S}b!4|ab}RfAA7sQ#KQhq7G5whL)~m=QzE098<1y;)sAPr& zIf+82=?`PY6!ndwyytJ=mHx{{j^W_^&jSIdd5G1}`fFz8a0j9KJH)lPgJYemy3r3G zCvbcH@wwufYO`Ve>B_f0c*{!xSXlB+1v69m(VnPXd_o5wqC8n`a=Tt8yFr0HD3)7c z`LzFjZ%(gzwY^@f?GLTw{sBuL&|AZ~MRZD- zO3?fp^1*?;bekwCPO=gzd47+D-?5T4?VMzGx<0sumnK!Fo0Hkc978{!XUS%)i(X-} z3w8cmhoTQ|*UJ(3G@OqQ*^9^ec=2!WX`fh39^hCSMi$ z)}L^L;#llKfgdbvy7^|fuKlF0AIR<58~5`^_1Do1#jGiii|X@7HC$@uFPj)JI1q=f z_TN3rWv5x?SgZXJNF0CaG5Op;waU9uN#x9FUXqNuJ6Xu3GV=3>3{uQf_MzfCqn4Fd z?ThPks*p=1^e5Yi{dd#uheF(nfbKTSvRwJqUN$0@`6ip4 ztjhNo1i~M5iyv9W;W9+z_v~qIwzu^o(wLt=*iVf)$~cDt*`|aO2I46jG&3B7p1;vK z@4x%uCLGR7%sG6>Qn-5Oa~~_&EeZu?LiqH?=3ho$zPEPMt7UbCJx_>^gV=;C*| zCH@2=6X`VUNqPENXW)09)3lfAR$ILoR0~emUYV=X%$a;*9sc59_xiv3;59F^zR&wY zE|qN0Y%}l0BbzY4JQ_Z*&h*mldYS3oq#slb$49R}IsQ|)1x1dLTUR_C+8iZ|L1}Cj zc~%^|5IclUQ(|%o6R3OL5M^VaBE8_`N{{CgEHk_r^~4%rL|c=^FS=$NaEK)8-p4+^ ziS+21FpOVNO5g*EraT~%)Ql^kG39#2&jcG2+o`?y3KUavZt(jj>B zy6bU+;vDWlF(d88S3J2<7l3-G0-s+**oiXI9oNequS4*rm(=s)UpBblWM-6^jaTQK zeA1`Vm(SrI6nuiBvq`p;p$esXE=9u+xB;74Wpwt2d=!GL{&z z4nn6{MY;^M<8jfSf2J^?#+9a~jsTbWdH8~zb4gEzpri=1CT_42W7)^A5V6*Q!v}nD z@v0EM9%>2P6e;{t$uS9Svlosf5QLAC7i9cp(vtqNUJ08rbEdKS-&!?!Y4`B6UnZib z6{cj&C`!L5X~$;X2r=uZh{7N+{tPVT{dks0M~+P;d_itsn(=F6^bG&wxVRWgj;lT8 zfF~7j15Eb2cG2cI({DbpLW6uw*Akp;o$Fv<5@?&pfejvgkS8SfQiM}I5+FTM8VyHP zpMO{32E{QS`DnTgPolh7HzDH;Tp+JHTWDi z-l$_L7^PBFD$hwBnNCwuqSeea(e|Z_1n&B`d^nkh3KyPakg8DPYy0~aI;|5}t9e@70Y_*z4 z#~Im$PSc^sJ^pO|Q+)L2587s`^8$?yV5cd0(UAG^<}zMVs2+cDR!F_cqK+6HLZ?|p zItvx$n7l6)zt<%b%(>h5()yoUD0;^S^u#R+)m$VE;|Gac4U~>u<3zqAi8jD+5=#s~ zo&O-zx$07s$^CP~(nID!G?Cbpq$~t8BElFDRb@W7{+nQv510|Y(^>L|FUK5x44!1P z8x$veGQZ*}_(`L#qk8N=2pv(1C>0&AcnL16Or~2Is@0|mA&;au{=SA6!BFWP9nna~ zrmIa1-#&j(*3(c7I&f_#pxEP!hLPrh!Nej9McLT@NS98?dORoO(wkKAy2{S+ANi_J zX8hf-DWXrgZ=m}cN0ukUN2?Ri)=W75Bx+oj>6UK0bTSiMuJBP2tzlRrhGe1R-^CAj zj^D;6&*Ut(DDa=a6BN7dzH7So?z^!E1#VE_Z$@?+tXzqCME0PxJ_YWN>)MUmKoR4CYAAi&KIqbH)%)=!} z(F#MPO3%$f{Bd+N=5aHS(I(TawCF*X2dM#)6V0_gzHE2=u4Zb@e3+je z!Y{*5`qWA3tS^5#KS6P@-n(yhke6_{uK7&h0;wqt#w z_=_t3DP4KxPw@l=_Mo`!cI-j11@@r$Sl?asz%HoFTjZht=bv{0E)q|{J|B;ygAO?u zkKKn?JgH0FKE`&eQ@FJ)^zgTv9CBUN3xkBth)%K+Et4*ubwoQf5?#&H= zMRxHbTEd!%`1hu|_p|7DB^XpSw1>bl+X|PBE!4yKJIaKQzs66uU#v{1|BlZa6i2@A zeR@UPo8GiP_Tb?wp1L;cF&=v%#KxEGLs#7dk?YK7`MCX$Mq-`DVmsCa)Uq>)L>_@G zHFL+sFN)Hs^PP0VEVGPYauV*PB8vOG3&DJ zMq>wKJ?jjRM&k$Jk^K)cf{5){&-fmmhGmVC`*4~k>32gP?T#w(s)fF~$Ed7}22s@kO!nH1Zx zKENhPBFNBZ>$cU)bjoTIUjJ%P-MCw@GR&jmU8Y&F@c4B+3PSZ^fHYS8i(>GbG&94T z$&WqOBS5T^hcrNPqOp)qa{etUd^>W0U*C&mKFe>=Cv^N-FHAAkvni3fbZeJ$-((^HiMq-^9u^sCzzHyu%Td0Tr&zyN0UiE#te(-wu5j?$h zSXA#9?u~*XE#0YfcY}1dbcy89Ejlg+k;u5HP>;3!Y{X%ZU|1|i6R$d5Qo;@K{sX)(I8 zuW7s46)pCv?pE(#Ko*fJwtkP3XckA;`r(bamo1+^!N;g}abh_O3$)1GiPn2FKiBBQ zfvOc^7z6fIg0)$x6&{Mt=T$PwT+Mm2ARa6>zD!fo$TIt0&=eDESFB+Ap*3qmkdZ{w zhduU0jw@3cqJ%wbBlcw=-L@(4)#HTBeqGpnFce)1W zN@vXjWwCxa``v-ctTY0MNZ;mq-GsyO9M4t9mkSWmK+lQ(R6hlt{6d8Q&AwNrN8vPZ zi7WKLe1}wC=1-_$%~#5I1=4HfiON!z5W`k#h-;G4{2|d2#~-X6H2T{r3fFR8@Fcgd z5L953XIu}3szCnpnC0J)A%0&g%uX5$TvAs8OmP|pmf+VZife^**S@3-W%HfPdoM;C zA&m0)w6V|ivqPgehC`Hg{e>K`8&FBU_1Ym3(`9<0-(kF?kC>1wqlIE@f9xwgl|$45 zu)(;~Rk?tOf1j%PXzQQUUvU_q#28rJAG$q6W(Pl&DX1GV$BKwh^S$ed!EMTrmXL>k z`Q+rKdD~{(4rjSj<>YOfKkhO>>#%4(A*xW-Mv@vE)h|DA=s-%(JetjfqQ8Hu6Y_+i z$d}0oLl1>7Fn1C&UZ^D^nJbhxr5a}RMvY1i5@gWHGO6!h)U5jXg}FXq+7h8>OTs&4 zE-JI}+fvI|h&d-0tb)OtK;j_l!_0hrM>=!Sz(7C22lpL}hy$H}6nIX(bSrSoO)Izt zI;|=UBgWE9hotfIj78&~$!e%(D!Z=9P05dwLIP<}(s|lf6i+&{3!>ugy zGI|32G|V>k#_YV;4=qb!d+g_TuCF$Z@lim~hH6AD$#+&S3rlEb<3IWkCm+WZgBhL5 zc8|BUF_qiM6oP4#2hZZ;^!gR)=|3U8dt;?fpCRW4;pD$#>TAK%M-^s2`rxU-{khn3 z@OSb5VRNFQf?L2AkEHXy7ElTLC*~casRm{6sOqUmQkSDr8>e8A4Q;<$ zehq`Zoe*y}0a@pGW;~E)hb{$DdV;ZAB;YmtXhPrzJUW7x-8QS}_!OQLup8C)D(;e&g zNJfKqDq>5B;R_=>Xf0t=k2R-3ZxA)aO8+`{>g>7B9fuz>60wY7+!lQ;(|KR&RGo5ebJfD}%5F0ddMS&N?OFX&af7+W4K0FCzoIcz1y zg%T`{+pO}wDzLw`M=uhhm^V*Qoa68r_dS0r!~^S(It{)bw3<7Pd|aji3%86Ie%@1) zvJ5;4W@$)>g?-$7D>M%*i^`n&Wgd0ZJ;KHia3SYjUbn*YCuzagKK-js@gIY|$!^`>Hn~$Odc6}B^h>Pv zB+XPQ81Kl`3^AMeE)AMiL-cgx*veVQ>J`>oHUjS6pSX3syvJYx0#&xHL7 zUwL(e&sz=Omd#S0=q|rlmEJ#kKuh|}@BaOeY_b5p)sxVeSPrr1iQ*Xz>4qVKU-dpr ze+*i`oU9#d{_j5*B!+ZdGAJSvlT=Rj1-P_dM5xqXbqZf}J_Z1C{ow(qqk7V{hE4At z`%Vt2A3k|^!wm7ALOayep1p#!kb`v{^!Pa!*06SipT2cmo#;5TbEf3(_XzP9``W|BQh zlNr%!+_-NBpTXBAzk8PPSnRv9h!etp2~`|>KJXHzU+rTnr@me5M&M2`JfZAxtQ)Os zA_BTVG6CP_5L+h-yAz&=WQ6 zxPcTe7fxEh=ji3X7vHjgxP-(M-5T(;g<#AP#yoYP2c5dlJ&pts4vDL|v)da`!R*pu zx=_4Ki#?PG!#b4bMZ=r-@M!+S>rz3-=l+2x2RfwiB}&2^poCC}uljfoa}F5kUBE&U z80@qJ%+;WEE{7yj0s}&f`o8Vdm4z@`%jFY>oZ#1XKfYd>VWGmIEtM;*Wgt*Q!mP&2 zKm-PDJK7Q%uX^a&6iMazp=Vu=%kDZ-=UMETj`!`YKj^5&tI#_yXZ5Um1#41K4uldh z#-@PE4loYg1TQiM2Gro~nL zQ1wZd?b==$mG3t2 zwA(tu8%h27(okbulKF$^2K|s)QKPi%?pD4TL^-((&uB+we6i$j{c-Fv!^~RkjjOz&cO9``rf)3)+bCdYei;+c=273+_X9%Sac*lJ=oW9s? zvX=thd^QeK=$h(WVg?x08-;m<7OWN-4dv)ho0kP9yqZRJ3qU6z#BRkH+@!DF(@#I} zAMykqmd)p?nm2X$M<^~GV^3ejVI8-P8Y`b-CsmNi{isYp{h|y}73=SW31s7hH48GM z#YmNLjbIF`$B7&x^T+Ii&BvkxAeb1uoOB}(A5uS`IVkcstO(shjm8;`P4uti`~UCJ zPTVEEQfuZ*8f|@zN3&}C@hhJQ}l;$5S>|*!u)gYYXqL1I;B_;8?sje5Z#B=<5 zVnlx~P^M#DPa_!IEWpwQU9!*ojFv!ML{1f0^$`!=rXHXSKgr7VY(K9r` zH|uwQ7f9HiQ;pa6K+y=Po`<6+s=3#9k8xgw+6_`aB7+Ofku=MMt`r844fGq)>b0Rv zptIqVC*#e-XS8T>A_PPx5M zEJ+L;EgvFygo+W-P+Rv1ceRO~aDX!?3hQ~LRd|i@Bob=CxpCTEOF62vUeXW%ke>Oa$)Pa*HSS-?pkJ z#nW#_eTZgNhfOH+z8p0`hf7ZNmS(yup)bpKodiXmZasUZ6j)((??hoHP|+9`oV~9x^s|OuQ%w-{ulJsg2ynzGV2;Gwi~|Jzn+w9>rd1`@6Y=6)AJdK|mak zAi9WLtdPo&yakBcDGYHv_$@yJTl+Z|{C4_6JBjdk>*VCn)1z@Kdf*l-xC{J@6?XkL z7noCj-_7lN))){mDGm$;-{z#j>j4)Dmt9vT8+psRHD0Io3C4&ECvP%v#h@>;Ob0dm zP|q%K?lG`G>K^*>cA3Kad*(Ix@j|BoEv)9X;+wrh4G<=>NEr%uov;`Y6aYGOa7(z;50r7IdgH1ZGZb&7JKecQ%e9NuVz@zz7VR!k(X z$K6~VgG&^n+}Z)};un?bI`7J=lX!e zT%#JN8hJixbvp&z3Wqrds(aGic2++tSKNfVk(a!Xm{Pq+1YQta4q5jC0gvb2@YI1Qg`rkuhbzos!yL(1f0_61}rJ#n2W<&AGq z$FQg6;GnBF3(Pr~`^-6>l%g@@ez8BLycK*Zl^l_}))+|Q&ZY|N9pFD6n+XLUNbXom zCH6Z(W+=X_Otzd&^$&_0GRp+(mg+E8H?&TJ8AU^Nc_9B`S85_LF+CJcP5OkwgKp~; zPI!DHXVsm2E{Pq0x#FU#W^Wfb5M)y5vU`su&U5mf;!M3*yBd~7x;VrFDBeSX*>MF~D<4bQ#&%MBH=1zb%b83s^z?( zlDcb7^jl8!J{pdWraieX=^s5&SKM>@ynW_qZ#)hT4CWw+Btb%P~ui-J&c$;QVN$d z7YoTlt)lc?1t~Ykz{K)ho6Rzb*x>V6qFxM}NBTT6@BXh8&ieC8f=S>XjZdFHXez~z z@(DzWSh=gSH-%+Y0f}?#@;)XEY*i95xiySrtc_;OG7`Uw|4lgslkHo7)Ab!wup64_ zK~y06_6bZXBZ@7y_Gt<@Z;1*fLD#PW9a#rlb-y_@`Iy_r5W;SL^Wn4XblP9KH(?F$ zZ!yK-9?IfBsjkV)2gOZbD7kdQ{a*42?>cL}rPZ93ElFf<;__4G5V~Giyn_$~2-WL1 zEk6=7f~E-dLUS9f=ii^zzks*PlgZ0E5ls@; zpeKj=G8xLV2o3hyT%NioX2=AP5YsaFD!U9KR&?VBks&4oN~>`Ncn>5-VdOOxuDkw8 z?cuK=>02=&s0;l_QflyzscNw_+fencp6DThGwY)7#DU*@HpF1O9Y5u#A^bWQ^L+6_ z$Au-q84q(4iar>8SX&V|VV{^xsvYBJ@UvzN;LjK1Ob46`;%ZV}kmb6Hn zVJa&#;3eZ&dWK0c{)uI{tOEX?Pb`Yb=5Vk2XQkRBtnNA%Zqnw=7`=1ucw(iz-n!t& z89aL4q41z@?Sd6?F>8qBV+S{aiSBi;d@f(8{0A0c@T^coTb)*EqdiE$)$m-ZD8=tz zHFFL9B~|cMspyV+m9?MA>O6rIJ|>>aE6UrUz$w* zAU+sqIttDxaTl)32UqAM+ulij`f9u^pL3L0Hs*cOTjc)4p@N2$EL+33+L9)@ZG|SV zoqcTCSk=ZUa%KFM$7ct9Zn_<8Z~Z3qc2-)-5#1UYsbL!(@ZZRA(!b*IGl&Jj6Pu2U zm_~T)EdZB#r=m&1hZN9So!TMMBg$|n!X>Bn9aQ%+OV8B)jdz~f@(uoQ8*ck?x>3sI zn$4Y2`7Uw_L4i`Sdo_J7V%q{%6#H$t*#=1r8Dc^`}U#l#N?j&bxdv^YP`I zK0<7Yp6CbRt2S=YpG%@FoR%YvclF@!I9_cMQJR$T>Wh^2fk6XvCjXc;s!E?rOG4ht zB10Aq5bnTllI&l;Sk*tPpQCXRLurwYKH=h>&XZAgSL?$2GK}Ng2yX}5f6$Mi>P60b z(Yj`}GH9?t?$Ii>MEvSkP{G>Q{(mGJyrTDNRrPorG)so4_xo zva#JUqn8kK*tZN*)dhAIK^W&8L!`9DsjEq4`)1pTgOjiCs%s=Gx+xB)8^GM~KP!;O zJoxDxp=k==OS=lZ3QIqyIaktW1@|2cE=x)JZ^I26F2EBk^XQjpE&p3a=)czg`}hYQB&R%S>-k)o7Mtrn;=#7#xL%ZK zbq~V-t=O@T0-pvt){ynse56sL;ocm>RrQmTMf`w?1dXiAA~(Z=pQPn%q~^MQ9)j=& z^+|)yO{tR)Cu4j|rFAjlUEjg`YPnIA3=rvArET!hbBX1f(7%_hX5>RgS`O5pOrrkw zgOtpK0};av|hN*tUm0{wS*-y!Yp++ zt;f8Jo4)KV-3LLO!BM=L#52;0>Vnp_3gS>M@|475Pfd1{@iyI7vAvPtc;xq0k01hi zCeF>je5O5C=@~TiBN#U!6&F85w`L125^sf6GCO)@Az)2Lhs@fbJLzjvbU~!1% zfAiJRP)2Fw+`~ZiZg!<8%7o_w-OO=aj|J!k?AvYBShtL$(m6tULY7Fi{Y(=A|E6&F z59&=fX_IEiwk&E6`cnNMXdKaeQMj@Mi`0((SE`wv}?g zmx5Co1Bi$t&Z#BcDXOR^rzP1WoHMZJ|Av>N+i{-BikJC!v$CtLJe5sD_$Y}4jFc23*Yd6`pYyt*JPyidrJqd}_x%fF zwY$54-Rqq^<&x$2p%z|wdCVNry3%u*YPD%PjH&+HAY-q(*V{qVm-ti)9X>$pxET4NlD;;90Sif&jihP(p7@crhDU)Kn}mZ_Isl$|OoF6Z{XVDJ46OLra7l5U+|B zL4SR8+gA@kc%M~^b)58#3J`BYfc!j6(*1iEs8(t4_)i;>pJ+LOjKd0zXc*jbemW3N zro}q+%c|t<)%@Hg0kDdbvw7$v-bK{)r^61X1F7X?hLfSyE(6_g*nWn$GZxzg-ENP; zoqkr|j(FM-(ulzrRC2>*I45DNRPioWlA=*z%GNWDpS_~QaWwS$E<`ew(uO^iIaeD) zi2lHMLSTyX`3bQ}B(51a-~Q2~q3tQ6Rw7JwsU!WA>IK_DMtIjGgK2UD(;}A;tGfnC zPHLsFRMvKq3#1qe@KbhU2%OZ({Nedh7oM8BeV4b;4$@|xo`JyWFT{GXPwB605l=$) zz2=8PCOX+8drN|3#zmIJO=$R2Eq{&S+jtfU*$tPNb@|Ah(ViPHxG&g4cg)AxPoA9q zw|auYyZDGYu`FNKqJLX|EI;0~~(K zo_-GF9kjx!(+Iw@x9i;X(ZmrxaHwYYw%qQR+d=AD1ZvgZPfm8<*2*d&{?SLH%BwXV z$H^}KvP3f2q|q`->WM8&>T$eAxVqRlFlSMSB4v~k&S>=U{vG?fkeY@>M5x zLwZ;5W!wclLh;wnPkf~Pf8Wpv*;@#OtT^$ce=QK9rrH<(0)nnV;O}vk;abZkZEkt9 zo9CftL;pO$LmdrC?TG;<>eCG&cE8@L2W8goz1(H&m@8ED=C}axa1K9*{O+^rooYPS zz4spR#lERCV{cn!kEU6l$L+AJMFN|D1a6qbc81R$>Db*1!(bQq@U?8s9_V9c*r#tnexcUvzKH*hXOoG}e zs3g>|DlRr9SX*|QI)%IAGe#Lt)hwS9^KYT(BDiD$g`Z*and>2M+3dyLIbZxNlv`~9ZtswT)S*x}=3?!fhsL|RlA7R5 z*?+s?eG?WHstgv|iWTy$S;?ObVHbWst)}h%b{<&%#f#Ey4kF)cH(?nm13l7y`Rr`u zs<&0TvF;xCBU&NOs5pxF3|ADIdB5a>5N6BGLL-n8=z8e(=l)R*C$xNvl8>TZYz7~pL^-kp3k`aGF^D=&1-id?kuxQ&ZvC1I7+s{JfsGF05q z50DOeS<*$Im47bnd8D+fG|ni~Xz|J6k9s{}>u}SO%X`+- zc&jR0A{Z;0(d0XVd9speU+FO@CQ?rsH{^G?YM(k+TK2-nuEg>o%!0CIWt&ng)aADz z#Vp7>HKY^vylr7EXhNzn5RmSq_bwGXRElc>^i#!PDHd6Or+H0q#IAL?28mO6KJ}bR-+-(eNt%_0$ySc}zP(z5P zoMyCpZH~REmN{QU?oErCX(%M4dC#jpP5+_vi=oLFJ!c5A{4x#SJ1R791M|_DR6)%@ zSZtO@xONJW%jHxY|1?_;uzq*^Lwk_Q}{0oE>>5*=_qb$zNkr{By2!(D{Hz*OT5K7TP2kozvZOXpvAc%C?Z zW*b&%ueBFb+{dGhlP*-Rv5dqd;~7*pUV|XTCqer36zsC}(;@(Gu88K>tY%RoYFq5r z1NY~z#2#$CDZyWAPhXjgv=y24oHHgSxO@kr#21XG#tdM! zK6mBzed1B;^j2XFzy7zkTd8kbk@~9mVzH$ohyI}9T^leOkq;L(06ANcL}`~^utWqt zPP7*Se8iwvjsA#!tbAMqF5EuLQFlB3d7PhX!E?9`mJZcZkf><- z#ZJL(0?g|Nw)HBCNsWG=Nhr_{g$A81QEEza@aPaL)o}#Znb(_MF91ji^6pz|63p{k z-<642H1>S@N}-VLpND)d-=sNd#>LL%0sb0XO-L3+dr|-CDJw|>?(~IsE)FAAW~I)* zWiZZ1;9$kH=hpbiwSUs>m}B$ItYeib@>+)wjzN%uOGrVS?_QIoB-4B!k#W}-CLwDh z@bbl|9a>0K^DQsyLNWPm7OqfiLh>i_7#JC9Mv^giK7ma!dtzyQBRU3yE%-~L3s1HP zL_ZD(kBa?~n9mNjQx~-w2Vj%XbRq29{TDDIRDX)te;xa&eC(Y<;95GY_>By4tsdlX zsar)HvV%2e?nbHBe%|CJw0r%@DDzmp@x1hqYN4T`p7*vf{p5~&QhpB9H!20YsBK)} zTMzytKm8%W4u#C&MniYHv(P9xiuJMA=5@V_R2jgyPM}+I;NlE%-u#hj%(eTY^VCP( z{j1p^p7}P;U}li%yz8B?bbB}=upsB* zOLi#$ChyHW$q3^VI_HF~ zNenT)4u`Yw%op4K`w{QR$oaAAZoNvVc_AZA+bbMmSwqQ~5D1pOF3?^~KXxm6Y0-)J z<+_*&%rpJ*iUFqv1p&br)`k2~_Vv6trutp=M)f*bsxK|~rM7+i3{RjMTrG+>v-%eU zK!jg4O9Cf_iEvLX%r!V>)Pr1Ny^DgXvM8 zxGY-L_MCAtnhWLehoYIvyIu42Wj<_)JG4(M3d$masX&D7xleZ`bDhgZ7aLarKMY(3 z@QJ6RB(IF7#*R5#ykFrSCF1{tUt&{D;o>)+jEE9?FOuBYO#IvHuLGlMhKvpZd-?!o zKhR6okr;hd!vpvAuXn1iJ@%#VKbH8MR6d<_yO zT*;SnI#8wejc@|dPH>LxuxZadZptrNu6!Z+i4YV0eykduo%K%1Ehgl113HNm zxc`K%`AG}jw}d4JPW|$&{|A4yNN6DLYF6>RP!7R@Zl~bXRY{Q?MGppQ<)@l;a(;%w z)uo!cS`t=fPi&esO5$ukNWAF~;d_LoN#M}q>=)Y#xE>uGVMzO_JG$GuW@dEP#Hj`t z>L9oO1|pmX0bq^VYA^uecRIjpK|Gw+P|0f>Q#< zo`YI8vBj|0D_D+a??i-U-HJyxNGfA*Z%)n-Rvu6lKW336w&69=#`Khdac}Aw=M8Z{ z{jsa1l4PL!$LD}z6~csCx;*E@cPe#jN^+?VJ02C~kHoLpoiF$^8S@^PwO14AM6{Vp zFylQLZSuGAv_~B>5K`v|@%L~cW8W}n7A5Ice7MdInFX;9PdYaS2(p+h8E?Xv%o{=o z`2${?Irh}r`#*g5XwX=Fyp!{fXczTgcD{@%j3UYf!3gJx%_o zoq0t)sqMpr0|IM-22p8}!>mx*yvK%Xq`{dXvP8DmNm{@hp?!|3$j&dGY_(d|-CH@1 z*mB<^%dx6laqy(0Sa>3CKU}9$w(+p28}i%qY~$(Z+1>XK*wLZI{>b#wn6V8kCe?CS z_sO;*2L)4;MK8pHZmF5TP4~LqwcofuSG*0(`888hxx zDtXobSk5C1N>adRJ2*&P>^AI7V#0C{)Ei$Z;X>{$QHi>viU;MF^V`A3 zO`kJss-eHLhJ*dbzrR)J;8ITeLzg(qb#Yg`v>KbTwXMH6oiom+12;grhM|UM4r8_N z`#%JV!{2C=c*EKIU^!u9$~Ff?m-5ti2FV1uIhg*?!0NYVGsFjdACR`yosd#-zvM2x zDh}-1n^@?@hu(Mc?{lSx>FDNM#D<8s-a4JhIkdvwtdSw`bdb0}ZTC#4y6a$zArbQz zk+DP0=5e+RWV?52>y@2}rS+KlCQUd{g)TjZ!x45xv!q+gW7l$8wDyFTba`;k% zw^nmZ(V8xG#qN|01O1mfeub3c!fNPN!Np(j$RqhixlI0Oo78d2$}s&J@m$9$ZxM{_ z#vL}}$~sKNc8q#7c$$-p&QypU73~O+(+^Nz_6Ww!&zJyl6xa~3fr}CjWC@d!uv0{q zIb!kD_;)O2|L7?X7E_e1#2)6+V!2A!=85YG|C+xIZAj|gL(j?dc~m0c)B^OlQm z+hr)}93>E_)P|RW z9x~iFYO3E^q7Lqf0?w+Mdd!y(_Pib_5iU!oZ5QJfIAU#T1kuYhgQ9%kuFqOlY?>cQ z0wqI)CBd-$PUK~>ff?RaS&NT?$cZ~{d~eZd zOtbj&Cb35vS9+2vX;=gMbKGH$)`GUNXnRdu?UmaV41$&HjvNdj>YM%oq0aRo9{S&+MZllm1+!~d11*$CUxS65!8zU8JT>T}AUHwJhy zVVilaE7Q^?@6`}6)P~^v*%BNrqi5jQp(7$=cO6DxqZlU-@;k0GXVfxIT*}*yK19S4 zpmK=R6>%A}itqeBQnsiOR7G~2JA{42c2wZ&>`~nGMQCurj=yDpKcx3Y*X^W$;Ae3# zOQ0_1fomGlmkHq&R+>`aWG0E=u}oZt(CauN(eISv#x>tyk^O&k*9c#$t`H4>>jHu| zrT@B3Md#bE-`^r*A#v~H$TUxU`#y&M;@YEW0&2WpOv?i!;NpoZAvUkqXXMo?HAS}{ z?2xO!`-aC*IoEh-d>|-Q3u3_l1dqtri9x?GzanIt!qTJa*1V>}f4XA6!IsoE0F`?7 zR9-Ck(S8-f!o$k*Cq);Fuh8@zc=yw1>#(UD=S5^($GZK(E$_Gmw>)FY;kzts#Y1XpQr`Q;q^J$d zLjyY<_A#&02*$OefChZ$6j$HBDZz7Cu=UUQ=Y{SLbT;@aGmh+vMthOf#7j3ipQX%1 zAXgpcGuTyElt$vahonx5w>xq;Venas=D%c5I{bdoB+{Yr_MdbK9PyK>ck0Bb@p@)5 z0sTTj6{#x^kWUkEOz^ehb^YtU5t;cS`R~n$)VQ#Yb{;Lml{Q{i9;?`;@^9P^U7g%a zUDf7%kXL40sZ%Df00ryY+f)HpNF94#vMfd%wyuoK(nX5yS(8e1K4B6<#@6&^vxX zZUYNQmvxQTs+pX!I{GRuE?%y7SO+1=d12J6BLin%!dUsx2{PrCf!(j4(S3>yq$P7f zyyBK-AKgyEN6#fK{d6YBA7i6<3B&E4yn>a@W?WCYm_B5|cSK{XYe>kud=^8EG>eO- z*|_Yk^fxggR~J`dHuv9t9aqMgo6qY${#ypg#mgUJ=C3`*9ur%FxI?skY<{I-B>JCw zojyON>N$$?zM=d1t)z9$nvkLQr$Us*Z!$bSod@r~Ve{!SOB1F2vA>DoEi&EnxuTy9 zo#N=tkphiv)If_5r1N2&AKrEz92(9t0tb->)~*VxK*rxb&=C;@&aWTWT*&&7N!_dh z)Rf;4et+$Ygr8ikfMmm|<;Z^O;@_e~5D#HhVOL%x89cfUOD2)v%v^1KS*7F+Nw<^J zPwxyGK4vd+wq({X;;M*lPp0^d;J7(b`6Yd*4^qauQN0=imxHyrj)>_mVH?}{F3%tv zDkCvfg%Xi}SDz+_-3&2(Rd0x)|FOjSd^Cpj6=z8igw~g9+&G*(ZhT_JBt{+`Olv3N z>K^6#<9AnQ#dphs&~e>K;(_Kke1L|c9kvnALXRZ!1)8Ftk?2F+whVpks4OIk-^Xpt zrKjNPFMhJ#uBss;cEjo*!&LGOLQG<-)eu!bBsyRUGvpQxsgYT~0nQVXI309Dm1Cs87{*}s?SdgFIe z_L7P9;AMgs@gH_b3Q9$w;0M@d%Ol*Q#pNWY{jwLA9J$2F&wPo8e^V`DXl52M?U%rb zZg&DN1*Yiv^gObuwD0|HG&G5#A5N9R?kn%8P4Mfl#I_Vj;d#`CA_WLSOkpZ;-{DZK*2i2zcjlYM z+1gXVWZJ-km7{aq`Nc`(Z(a>knvjRB&+Ff%Inl@U>`9TAN*FsF*1m<0qIMJd!s=0Yp-RoX2}^0**xZTBp>b z5SLnyax>X_kprjvca%<|=F}2-#lVUwk6XT`Ii=eb#e?}y#oJ4_XnXfYe!O^2NDboj zsu>KpYLt5boPgFGw-}Qf=8?@#!Y?L9N%&JJs~*S)JQRpm-C>WKs)KD5a|a&mer=Xw zA|v@{X3I}yxH^6+_`ts~Y_PA{`eBx@stCqgk${j3($${ZVE!rkZSd|KH>7t9?W!o( zZL*cavvW?wZD-`8=8?JE={(~NYURjwPN>*H`}d=N!d$+=KXRT92bM?1M1G|WE#O5> zfFFm7MuszMMo&Z9Q5p(N33|_lz<8)HQoi_V|F%88X>m@Xu{I*OK7ZJY2}lwl=&y6E zDY`zVE-kS!D*T6`AeAIH_w+8Nq)HC8FjCC1QA1%g;Bq!XN7Ve9Ypyrg=(ZMsJA)6vwOb zwUe5m4iw6q#_nXOWZ#YxHsV=?^%jf|47F7)+OeLUi#XFq?nO{d57sq-!q9=O)bd}+ zWg+Eye?~YW?JN(n+>Uz*y8|MK+hg{o(1*1c-D?p_B%q-j*}L7ByXRD{XpEza5Q_Esc?#)Ca*Ql zqW|tIdvR@~h_^_;!5vM{R;~&9CLIY~lLj%DS}bTJj#JXAK6xd=_7Sv%j%VzLYPUGS z(d9b-qW=oTem0dfFrBpZvqMyBGMiU;1=q zBit2C8(ds6!eQ;if(FN-?n zgrQ)F0hRx05e*d?-jd*zKtO6Y8KcA0M+|tb6XWkKhefKo!n-ysBd4nSGy;2;KYli;_9^ca6gfg} z&N7;Ky|GgL(HTY*p{!frsY>&G18e3ccqKX*c?P=M*SM32G zG;ZhWm3@%7f3#70J%XNIP4s^L?qTf-B|=0ZKThP5SNmadiEAF*w-5m_6`zN8>OM8IZ%tIH;J zX1Hf8w}rU>E&X8RCr3ks_T!*HbQWWL<(tv^oJm6~=I}D9{pn%ZBxI6NSF(&M*?KFY zoj%pUs?Z9p6IP#{+?-0Dlxl*MKQ0u6AFz~%|3>i$;qb+wm|ncCag9esVihyDMj<$2 z_W%B2Oy6pB+Qw{(Q?wDyfql_I>*M444J|qjk0=k_>>!jKIZHj+DnjajHXd)Fb9L9m zUKZ0A^2vxZzn7EHcu?2#!Sm+{Pwl<)cK_Mi_{w@{u?9Y$53 zEOiTxPD}+1RPuZ0&mQ5qNUS`@*7@P7(#H|FU`i+-#MTr&8q2~BKK>l3gX+T8)fkIkl?*gx_>cnz=v}$)yW_* zdsb2sf0V7mWvM>J;i!=QXxJB9uZDo}W6l#f@#i{a0ti8k+cQ^A3K9QKr~5vPBHP3Z z6ykpU6S?S3CPti$HfwB{bK?Cu_f#*&Y#Ctd>O5Kwa*ZVOp$uQPUr%Z(bG2LOr2p>0 z(5WO}DpmoFt)63_A5^+i<{EsSNz@H@HE$Av)wDhsbQqje|7T>ntNqz90Ua0bu{iNs zG3y;?zRO*_+zWf4h&#dHHY2L4aum6+>}Grmt>frKZ01JCWXHp)-3c6>lZORt|Ga|- z)x$w+Ml7Bl$^;JeCxMMRxqoW|*F^yHr5bwddv7~7PdTqU5_C)~5AIeDQd8M)4bj&n z>5NUh0Mc#HHoG}3ubZ9D$3X={!eddOfp3-Aal^z;11cROM_I5Vk)Z1@RdtbPqDAEu z(Tk2TwpqFy3{QxQ=8kqh{O~v18SF>5;W<`y!#rf26Ch;lwIkz|aDWB~r%h4j-;o+r zni5Za=v%h0KY^cA!$EhW%P;K|#87#X$IbSe$JDE|124y}0V_aqvhE-*-ORjG@clt{f)YzzlCmnNxGJ6~2#k9YBkZU&CH9h4 zIl_rdSCL}u+1jYljj@lCAw$snVjs`_;?)aw0f4XN=2)B8qjZToLw{_l^%V9S0m_O} zR9``M6Mq*?gjrjrCDZo7=(_=N6yE3I?vEQkiU>{p7*z(@I}g*`;C2J`(x-DJSaj*n z#g-q#)Do_>u@bM@Hikf7$D+No7k^_HaI|tM5crs)gWnK!LtmeZLI@2$Qi_2?7n3Fh zF79}2$88`67R)CFhBGA}N`@8kkQf4jM0>)h_ycF#Bb20XI7E zL*##As=AtvAS}Is#hezF;yctp=KWX$KUKdfvQ+nSL zXNeBIy+QRTJ5FJE8fp$eH!d2So%8S&O9}tkj%mH0Ut>y5_USi?>~pP5!RWeMg`0Xb zRqgGW8k(h;_6s={CN)*6gV}r#yn3)#K}Jq!Xhc07aQ99~VVN6l)xJ9^Pon=gV7tli z7le>j{f*U>yui;M(W=DsLZZ7g+_kQZZyAEXa6##LUYC~Ff+FsCoc(@}FphM`$?E8- z#GAk>6q!A6uP$W*1^(*;9TABcdT|NPA){LTbsQzF;+)LCEekkBtkdJ>_IF>2eXrDP zTV;}LmFWriwYG}|e|;Lsz~99!iMeFUaGm_9;q->_Rhn5GOF=>*a*Ba@M?+fb8 zwF;z+IPw_PL&Xod-JlaT5GB*`(pOI8(PQi4K9x2RGF){)v3E_#7eMWvDZa~A^rux7 z%wKq|A!rp9{Rpb=AI(>vwp@Or#I*lAYs>R!Kw*exp}Xk#s-YDf!iBBS zqrhI!I@PvO-ruXg=d!VGJgC%j9vd<(-G^$D?X7W^q~@I0V48`W``3joK7|Zc8jOlv ztq$cLj@2HTYi?ucml1iHpOqdSE?HF(SUG@LK2g##v@x6qVTe zJdMt5-Sv%vOA+2Jy65W2u4VOL{8@GA=Ci4qhTct&ItPKR6;D6TkV*|qQ8(7u;?xtW zZ-|EbBxE=1gcI($7DZn4*m~;te*k7dnZCg*p7^TqciyolD6j?vOw3Ub3actqakIZ+ z7hHILy7(eIL4kW4hhvfL4f^U66pG^BNK90Qa>m!6zw7wG+Q}$1g{$tt5-mnlbnz*v z_=zZvL~xV-ljFZ?vKnpgbee(rso9N&Mj3QjY)w#9kO(ibbEsf{BpAg_A*6{J4G@|Z z2bBP}HgDSLhK@rM_{D4(bJsd)Tynv5S%E^*2zHMT!P4nsHkd{Mi66(b*b$>v5cwSA_UxYx9364bYN^;R5<#af z*vE@l7r&^AveA|j){g#%)pKP~U~)Xa^yUs0?ie7;T~sBcNQTIasQN{Ub~Bq-&Vgzg z?Zbt@+E^l>>)|>h47np!Hpa~3bS^jyyoimydG?v-FhPMeC|1H66r7;2PkR{}U87u> zBq);f9<92ZIQMfQ>+~DC~+`^c-3j_`o7mAh%3OqrvF`l3}=)eQgDzXNJ0_0Z_ zLXbV{!jn-oeHbsFi5V+7z%#u zNb%@VW7BoGp})jZOUTm+ha7w`e#O^2{o)t@n^waFLHuyY*9KgE`Q`FUpYQJXU1`<* z_uZFH#014c_!a)pp+nR1ctlxZ+9%x7tn}Bvo|*WEw9lG-(xHbPmX=*+8F^~rx4->e zy6azeVM1h?^o1{c5l>CbkJm4aN{?dVVDZHkNrU#N)(lw}e~brUg6WOd-bg?A;SbYmZ@iw?{PI`Q>acT0{?SJs#bVgcr**!zj=UD@ z^Z)aC=`*fDao&07Amyh`e0EMj&24gW}sF1i?U zxKAc_*8Iv>(g;kxOv0Cq1s7N-?f2dN@GJbj=_yPeoORCGSkU|_MX;U0|n@yDGY?dOC6*SmP~$tTiFBSv6SXh7O$uf1df zXgVgbuDkyFbkRi@%2OPl`@$E}O5l4BPrp3z#8dLb#^%5uc*sF=vy-nfyYYs9re9;S zYl$TmmtR*bw)kS{t=Hd_Nuf8;Uk4v_Fn;yFEfxh|P$o#uIpg1~Z7u0m{M(`tf;Y$va{ z=nroHBg80bGd|X2gMzPl;AAh)|DR%UyshvA#k_cef_=!X)dbPCcbPr+e~xBBoWNS8 z-uY~4Q{}odmOfHdCm#CF{%_}Pd~#$6M^AJbfTZNrPc)8op-!Wt|ACXU5E`>Z(l=eql7n!PKeS%{C`~=0up+(s#79kIw%MO$s4UE5L&B9*Eq5LHoj00lAR_FJ1OKxaiq2s<`X5em-3G3Gj2RQ-McL!KL9MbcyW*9dbCJ2Mmbtrp|FGlP z!&u@xI|hI<{yisQ$F%xwQ> zyl0yaI2jKrxrH3!-*X)^g>>%W46gR^t8EWx4V=}PQC+*XP2puvrjT9-u^<&*Y+dSl zpTZ!>8yNC>{@EwG+TU1uxxOWM)u^p(8FaXcQY7xUArZ6WVHLGvun*^l^o1J4(fo>o zNF6)HVJ{8NEsMe6K@2zR?}}SZVegENcVpWTb?xVDkUGxAoJ;HgKjz#=S>+W^r=E=O z!tdf~>aEk@pAJUcJdT#ICjcRbG8LP8A?~YOj8{DMntgUz!)gPrK~a5pDsFV=)^YxM zpJ^X<#fT?%AqN$ULL~0Ed60O-)z=~g=Dp+~j@1vADom^(PG|pzumM{#)bw$p=!RdR z_ORjimtw(wdPX_eEr!S(4yZT_no^Cinv_<_tXefCN)wBaks8i2)>^$UwrQ2YDJv~2 z=0QY8x!CEY8uRHOD;&m^A)QP)8v5Kg6^fdHr`K(;Xl)PEyT1sAs(m8>da!L0<6}?m_E>mIK7cp+ObESLqq$cy#QQtmmW05-WgwQp}!nV!7C6=wo+A}~QPH!eo9N(O9;;)*u*6Pz7_LD;!+&uQwjH~ux zEpyi)GR%~I@eUS;Qno^PAuPx>>*P2F(wD_`{~(MjJJQ2JP9r@WOM*AcOpv zXqpEfe6Tt2paYwIzq4<1*IoZIgp`5ZQ^4Y^%?>+m-}G5)?dG-DUcuot9p!5c7Hezp zF4X3^=bja;_+|pU4y)aZ?`G7fky)<}7S|4`Cmw&i8Mwt3 z&9>Wa+r0SVi@;8AZoA`-X3xC_H9z>l_nSu_d&IkU;=~Ef*=L{CY_w6o<`@6_i{^t5 zJ`~&Quf5uwc;X4o7T~$+sw+GXo*(DqxZ8ZQ&6?wnKd!-Rgv7=i-hBJ522XD^8*a2= zbLAC(Z}7S$vAsNEM04bkM>jj{uzhpcrI#s}C7pY4%9N?nw`;Gnc7vxm8a!PfIK0}b z!Hvacz<^DgBaS$tdH(t5n`zUO7q6IVjy?AHreFX5&2NAA`{wO;-pOpp<;^#nKcDtz z^ke@9>u59|la>lp&79GfM)BhwrR$W8PiOgu3Qr*Ol*GiqaO+W%B!wwCg7NT z9@s;^_x%PpGn-2;xdeTqIbq_WdE)WMnjd`sd(F-}@6ud<-Sv{|n6YD&Bk=%F(+n9rq}g=Sjhm~lzOrHaZ5uUeG|rdf zn|;2sPjk~vx5zQ`YAGs}js7qjJ@>Ro=`2C)>Dj3Ny6B3oWiD| zW8GveWsjX{JMf4fkS%kEMeTvD=B3#*R;tFkJ2OO_AbHWT&0Sq^R)rKj>!n7?YE~8x z(bQ@fL;EdB$EaXcD@zRJuJ=UQzhW_8DXk)A8)Z|XG_eSUDEqV&UNtthD{f9{N>`Xi zzz3VbtyGiwm^PN?Y^B=jTEL^vjnk0^j>klFX?X+_M0c*#rmd8MMtm^_go_ls5?n3k zpmNdhT9%qg4$MON{xR&BVfehzPyYVjdB>fw_v~XVhG2WjuCD>ahUtSmiJ z$0u5vDZqj*ctI#>mwjQ2zO{F8Bc^z`-d$aAlL}ei)oe=RtvsB^P%{<3fV0035Zv2F zt=G|eYQIKTkH~3h8W-wS7*;BUIRt(1g*LDUEXDHq$2pzag$|#h2$*u<2(Mv?uz#kr za^i&X&7p@Kj_+V=HpdJ-M!!Efr3(hvR`<~70`hBS?Q89?we+x&Qj6>zU`Yii?WK2c|~`GCZED$#)1Y+T*ILFZl+ioQq&pvL}DR74wR zbw=+%m@&kOF>5*3I()1jG#xivnz^+2mW8Mttb$iPpQ153(Ucw3hS9PznNtp?nT8C^ z)j0~Ax;mg@Vx_Ru7_W6XcW5&|!4+^9evQ)3r~tEbD~|CCuCl3x-UeuwRvyYIQXO0h5%$^Ou(rsyrbp5oLQ6hBSH>VXxa zX=@A{6Fz@iTO90yS#nwQMKXwKtW%=H+bmKHEn zD+1An2%VlFt~|o^FFUJG1uF4f-6@El1&mP~nvGT6DpF#X>!tk_z~~(pgc9E*p^KT& z5bC5hdNGLuB|8j6R!{^+d#%q7rGVK^T~&toRJ8j)*Gv12ig7}$EVkGTT$SW1xFQKC zPzrypKmF9Rn4lQe;8*gQ%YeDGYGAH{hKfU>fdYyDAAInk25VL{`|kT4;O2AC4-g|^+?|t@ZMvffQYMSO|U_9Y~o9bJF zqrXppy!F;w&G6yFQGX5-MiX%kAr}5IpIdLewb^yoU1joO%vf+^<0J}x?cQv@#pcb` zS6vNFi7|Tg*yf(Q?`uY*9UtMQKX2F@-zk_};p7D;SH_KhU${n$-~`1lrOkNFSM$+F zAHnZq`G++Uc`Rtj=jrfA9&WI>ck|6}?$*5W$}0_CztrFn&t^Tu`17f!VxnW3 zL51($`|fE@KJ}F5&O85woB89LOD?_yH}|)1@aro~DvXde63F;iM7+5PA9w~19M~Ln z)KStsc&!%NuvdfEEj3R)`IJw72sw6~1hL5#_7^8yHrZs;=87w?#4-4x_G1Q5nKGsM zV6sk#aAM^8>;BR7U9WHRqo4dpCO3>z%jq~)&S>z&MRN-BfB!w5h&b%BdB^xm((%OQ8qF1w!i-#V1sKL`Sm`Lf{9C|1xC|dDQZ(njt zAdukytFFGf*=fgJnyt6lrn&kW9+%v50Ig_MR6!p^gQ;gUXg;oSL6H{5jz8909!l+g zF0}Mo&8(%8*-YVaL?OXc>Y%N9Ep$imF;1hhdQH`N{V(>yRp$jJD5{ZkCl^9aIdV1y z1c|!xC0|;avRoru&=8;S z$_15NkMT(typ-MjTyT9GTOr#0UtLy!%;0u>GdNeLpz*mE8V491y?DE`M9Sopg=q*-SjPEhRE;B}ELAYkt2 zg2%4}nCmNUtNrc%&-KD@Y!%S+Zm}7-Dz#abXUl_XGf~KLXM6)0z(Tb6OdU$`z)q1Z zywMV`iHfJhk2Xt8t)8a|6;v=~SJ^;ZjYn z@tvyzI?fYzTrhPDkw<}XB$J9(l;YL3|0C9A3SRLnq9)2Ys^iMvF{*&fU~j)WyP&>a zRe|5Z@HBC=HokxIhkYYQjS?KK4$YdXo&i28+34{#TFsJu)Ksz6xa?b>xjh{JLIdQC z;>S>wr5=?zppoRNL#5tB{A|~jVvYEO_!&II&mY7%DFl~WDM+;y z58!G%D&NAz`Kij(s!3G<0TslUNu7b#9O?o+P|0=ge^F7H4Myx$7u3?KQmHC4)Y;Uu zz+{-m&+%Hl<~R|II%AEad$qdyw%mJ;&jYf`++(^R;cmJ4W}TqGk5FxbqDFM*Q4IF@ z|NILtz`d1i8ocSD!5W?31EoER)4F<^t=)FkYedzGma%}_{a*nIb5a*Pg;v0TC8Qlc z`jc2A6@H_QIHAb~)rVI+#fK~Sctw63ElBnxh9fEPIg%b8Q*nHTsGm4FyI{nCU3lb1 zmKMo%SzOS_SSnE|MF(a3QKals(dYKiR6UtvkIrOnTN;Z&N>8+W@H|kp;8fKbvRFkn12j6y?wRXgk@(hjIuM)dlmW>h#l{?_L*Z{uLftSLF7DNZKGwNvWekbJ z2rfa$IBJnY?lSYqY)n~_vb@wykBjG@#Uk04V-1QGS5ABE@ojSfAyDD`;cc7b*X?`p^Y@2m07s)>7>@%@`z?8JlzHyOkB8^nN*jJOVDD|Fu4oV}j7Qpq_{sRkT zFPUB*^>X_0kA9LC#6pe#`-@-T6;F6YlBA~oXPohubjc+br`>kjP4c4YUcBPzwqzj3TJx)GrW1a3g1r8U_6Hw$FdcQ|k*PNpdOj9wc&xG-UWcU( zNmrvkFF5bqv>J5#?z0b$sa4?OXD!y3cs`x{r&H1xye{eR!v|xnjnxw-O0Zb>;|X6> z5`Ia^t1`oA@1ZX#vW*n@I;E?xxhCzl>o?PmJMV-AwC9#~&nAf3u;}w6>5qT>eOi2p zCDI@N@CUrgY+^bWefBqu-Ezwt;W!a*%ul$(I_wjmV4uviJDv>j{V;P?e=>Kte z&BHcZZk?v!xcT`{hopg6k78afaBtv}f#}-jgdV|lHsb2LOqCs}rbhc*y{YjLuzu(k zUp#>gHXD!JB4IKLZ3Dr2T$-Q#{Z)M)eZZ==g;#Wdz&8(_1*^xFh|Ub+$%<~wlNSqq z{-qCF?rlfS-NXeuo=9PHJ9Vg7&Z_60>cA-m11d_|FOKD#fHgY;6c96VmHXd5$ zJqkN)RHJ8!`xuAxzaD{HgW_haV>SPL^Wzl_hhyEQ{`H`MhR@m`s1q|1!0M!qaij%#HsuoV& z<-;&XAc)agoEBeQ!a3CC`lkdLi9#BigQKX<0hqHT#Lwr1B_>RUQrhM>ivxSj6I->A% z(2kIjiQ3gcpluQjkh_!>ZZ_7;%WH)Su&B53C{c@~d||IiNQ|I0irgL++7?Y$YsANM zwEF%bIdy4&6(mMh z6j-NnYrNuh$dDni^R%gmtb~KIth0sihDd!6i^}$24Z%&&X^f0v+iPew4 z(gxKViJ@hJ!VjR(UB%Io*Q8?b94hn#GV)e16cE;17P(9LiSZ@0atWOV&P-^E*b!Lt z!AJuWs&eW~;1lBu3%C$m;)e@^wvndt2dPAWk9|5R$_{C!Y9YRu^{HWh-&Nr^0mYBx*mho^msR&J0=K8O+cRm8sC{(!M0K6QK& z&{?+JM(=;7gG-yo?-DqB{zviIXluWXOXigh@%XC5>toVoSR{L;6|n}zx3LBVyA2e2 zy$OF>Lb@u3eXwZ^aFf8|yB$J~?`VZ z!kfv0!mc7vC2c}V8!C=9AC>|Tg}s`#Tl|7lywQ>qzoJjS!ot1RUVB}_69?(Od+$#R zF1SD%ddx7a)A0G!E8j@B`1logI`gcvu+Z|9^c~z3-*7{$L4iEVVpHgU_G1pp$r(H; zkw#%b?i>DrH7J%`Iz98;b7{yCgJr?%U;O+RxN*K}ZVLR6++WZ9D<&u|O}k<->HYWL z4@{p)_hBstzO#QpOi&zsv@VieS`Rl=Z2U)#7?}>jP3gJkoICyYx4)4EtMQ7Xbl07? zr)&Rl9Tv8J8WRJP(XAM4_^}VBef)7+cinZzJT8_uRA7ZMWTq#ljE4 zjq&Ysjw#^@h;+%Nm!_L;yb)_9j7fNv6BZi-5&i;y4BIrs`tEo3#f|;l(!z^!g5qUN zP@IeP8Lr2d`FXLxRIh|zp`_Vn|4f=^?s?K;i!GW~#iHr!W07h+B_KEKtq&i3@PTyX zFOQNo9DX=*!^Dh@1zx*^cS2r>oAT?Vz4zWHEx-J79EoiHOkzL$aE! zS{sX69)b7IKll6#>6Aa6oW8Q=mx1M2A$ROu_z{XUQNH_S|4}+3zIOjOIuc6X=TLfleXMqv&2coUUSSY*S~k) ze>a_j^-TVG<3F*u_5>_YK5trP>1ETGzw)JoS8k;>*ZeBh$CxX<`pRqRZx>&b_zq33 z;jzaagEU9USrZzL1;V-J#44+;l7`~T*wRZaB{yL1x%-~om1dhrhSYZD6<4=?csrA>xBH6#;8Wh|Z1BD*I-v43} z8^$P~Qe1=LJbXyx8ZZa!e?a>BcDhJ*xAWg}$`P#Pdah8JIcFul=G59-`cSvOBv}mY z3ca|BY}Z+#G6E!|zz~48(*9tmL~lPDZRd`0k&;^;c~x5+DH(%+{~$j6?fTO>{?;rB zQpaav94K!8t@zeoxsQ2MYdjap-fAEwwx*_^{*;SkZ5Dk;Rc{!(a9Y#Xba=tVOJOWY4_QrTu9DEb(Vd_gNoEYA-?%oX`MiR z*V*w8h2`y{f@H5^W*`7!d;G;pTWCLZ=4&NYkm+826cLmzs)Oun%6STBvvxblPU?Z7@MG7+P!`5N+&#!->k2RIB7g6idc)|XM?HBiQ{7IimyK?vD!Ie-J+CZ$^LB@{s4F>B^*L4b2 z`;8~Z2v1P796brsM@2rBy9N(`=G_H?VB00R9b|!2lhwsqB>G5KMpf{(8-9g40Hrqw zjssfkF1et@p;b_{<416?iSeZsTDG zgy)g51~Hf1nQHA8-v~ndOyz%p2oYU2MA>bbCqhJ(6AsELyvY3Kwp)e>^SScFV14*m1+s|6n2Eq4*Gc9d6)Xiiwo3 z;)e3~@#Mhr{y|m~;RMB5XP%j+VzJe|_t`g3P=MX@4Dq3xh$jU0!Y_74VuIp2Oi(Pj z#8T;5PEZU!JT0)$!s(ZopjdT{)n$l`;*8TzOBdnh`?tUO&9wi12gvx{hlP-T{p(-L z;>tshIyx=C!g2^y=Hx?Ed5VEI$-nf)FJK|a!Lms96<1z?ng5H^+jw$fEwpW&udSVy z#jpGqoPUAz*was@zvI{WOW~$ACn#3LLePAI;(Sah+;Zz}_yzvK3F{ajgkGqAjm5fe z{O9$!X+KxmWYbO37r*$WwD{tSGMmbBAOyQFXKwrg5=kupK?D1Je| zGbUZW&V`NfiH5)L#@gDLsF*GF>NR_sbIv*Ohkp)4DZlu7=)s55|KiDj&#kdW`teVG zg7H}1$Dflp7jc3Di*oOSu~>H5WlJ}pGe?dbnfR2#lTSP@Pl2qn&f0i_VrY8o(c$S& zC;tf(0bj*0K$rmCtoK#s@yFxghU;$-{x5y`i^y?Ke1^fff#XVlxvl-NB%YvPKg>Jt ze6UT&&Cm%6>qy{+{KM&KjP>JqLS-V(fqCYhGkp&eC7W)tDf(*OG##<0O#UE^9y>Zc z@#K@~si&UA0@+U|E~Y)_JoBW1n{SSGeFrz=KS+PO=pyv%ZE1HrF*WGhdmw?BZ;}Pb ze{$%LWuk`@SW7RxbQ*lb5$SGBP~a6*X|v5Ylh0o^hHS51f5F2KX4O`{X4okct_b;0 zK^?!i2YnFPy$=t^1O+B?UdIy@>-Ak16K=n4EpiMN@nJM(io77`^+7YhYn{>s=bn!V z!VmC-N?s(p1g|9}DLQM6XsrFHNb>_2IDdlC)fo@C(x}1BB}$R1cvJ)J7rS=+;zpeR zZKPVpS_CrWp)Zk(L`>K^3_)RS3@Y)IdKv$6!@OKft^IzDhGUzia9KV?aI<3#ZA4#4 z%~T2i8Enwm_^VrpAY!Q($p~_32JqoE4{6J-ut;{EpxAQDfp~&qe*Xn=?V0nl zV<#OzqkZKHu$GHAmvA9t$p(ipZnxiTgi5zF32qrEIfe1a7Ecv*k)wj{sujs^C3IjE z=VMDdH~XnI9uSywP6K0ASNd$7^ewoCt)@h`5Cqfi!YXcwIP8YBD+WXOQ? zFlIAYsMEFDvJHg|uWIUjXMB8l|`FqcY zCkzLtjW_CF@bs6BNU9OfoGG9&<0pDtGnBB%#QIFn#d(|o$crtyE3lcLT#3Yu<(`}>)wVCuUmZcjW){M{P@+1GS%Nd zR3~WcN}lWj_4mfm3>|iMaG9Hx=F=|AA2T4k_rJPyi*ID&s_~gvP+NE@si(2}Y-M0E z!piFpU>!6wiVp{Aw{c0d?mG=ra%Dc=XZde@P!-1V`Mb2QSXCFsqkHtyH{!}2H)|~0 z$kb3bmDsr}?f1j4B$9appZkfc@QF$S@H-eLD2^JM?!4nxnV`VyeDMVO@?f<7&p>yt z+pj4WOimnVBp&mUpkWtWR11ZV9X%1>yD@@G`;9(mLi-ti2AgvmCdZJXjK4HPAeKYj zezmJj_GM?pxitS!rRZq}I|f#}0!FuIKb)uj>@QsOzOnyBs6Kz2M=4{B9NSecz>s@X z`Y7l{q>X7IF$gyPoK@hwMW{XQjWk9LEFuwyY}PY`8B|NFwe_WLjHZbm zE{FCio|bHtLgk>%w3b9P!d!W&F)UF+vhqoHMjc?2{%=YEC@iguDjDVa_4`lJ; zbIv)t!Rw2(p}BRsNcKTkgJR$2ZmdM?Y+K*nS7BD{*JT+=pRttPPR-?YG~S zqnVpUwd1a7PW;u0STudZ2EUTTLe#G{6DCc>yXz-3n1I0AB9teIZZg-3XdS~`XJaDp zZ=-)+edX2WkH7zeEav@<-M-Phlou{%uG~KU=wlg=aqqv^j2Sbwx#*&cWFhEpeB&F< zwOD82&9~lc&N$GwbX`Bdps=_~69aQqAFq2TptSfk>{&FgQxA-qQn8PcqWF~8}i zn`JR=QTi@8i)ig9vu%{DR{F&>L`B(ZQRgxQS%YHNU9g_W`pr*%@)HY^-4w~a`v}5D zlltj67O!k}+Ii<@8@xmOnrp5p93rV_@d#X*v~Y*^h*B-w7Ys>D&7|TfLGupQe9<&q z*^Y8JO3g(&RwGlltAT&8riv1tgavF~bVsi|a0CklcgipsrRP+P?! zw&EMdFJ@yDR+cCx6%P8bW>{S$Ti2ktf{SG1JT{vSTk$o$Wm>ZrPSY1$;V_-Ir6z?r zC`2s2l!~=eDjG(KHH{WEF1tVhB+lEwvWTg}; zlOby}u+&Uq8D(QMZD^U}ia>N^FfwH<7R$pT8GFwqEyZ86a&H$@U*#Y+&ARqSEu1&j zt_~`r%#Dn8^T{ShR;>^R?mF|2$BFS;8OX9&`zxcHTJXRkZPa?@BOZq1_Yd7VF-EOe z!%<6_Yrk>LGp>O+6l>6omNh7h7XT5JRrmBXyOr8IOw}}260KX^e(D^P1+N!N2C0P% zHKwi++_?rr4xc@XM;N`R6nIdGsCDjR2B8I>mm$NUnG-OJR+XzV1rEiERm9?3ER?KP zvN8>QqinUP^J5_@OHh%NL;>8_P-lR?VAPR&Xx*XH-ZBMlV*`VLim&@f5Y~!KBZ{H5 z112XD{atp!+=2q4WkYSSRJDj{qm=cNzkd|&B56Rav$un#s;S#eZ{#dcVO;$8y6RygR#ntQuo@Y0Sr z{?`5i@05zhxL8uFmSQj0%#w)t6clZi4Yk2i)l6@tGR2z8ZH-J&xYC83NI83QU;u~_ zTm)x*c9SBSsi=Iiu`a6CDI9zKU)`NYBGkLgB}&S%PNmdMnajH9;$wI!Oo5G&Vy!F_wY>+NEVHp=q>#mQ$cKU*{|emmu^co12Dklm?Qj>2g^YfjBeGp#8?g zSRzLS-MwyHs(kusOi*C4uM5t<02l>1)CKb?3YT2S7%SR(%U$%?St&xUXPp$9p0Q~O z4&KP+BH4Xxk?iNoBH0Fx)Lpr#bP&DrahQv)K7?5tD6)=?lVNYa`%d#iOhl}^-nz}j zSPYp95epQcN3fXmuDkBm;Ftc*9k<_s8~j)>TO7Ga_5;m92OWqBihXop=1l5gTu>Lh z5&#nvSR}j8TFt9ic$%@_c>N7o@R|!#Kl9Af3e5amKpXEyY}Wl67jotVg>LuWdvCKN zZt(BD_g>8?`G6f8u76&peT>DDPdlU87&of7#UjFZ6_`xAy!GZ=&Hnf@y~P$=$b#6@ zu)wozZ1=h6o{I^G^_xQu`F`{A%cI1^MY2!CV%1x0KCrn`CMeM3A2-cQFTEg>Cp+)7 zliakQo&tAEh>c2N{V`7RQMz-Dd>&?)i zTqJvB&btJ_`|M@$U2?GfQ>T7}K6xFJHfXmhVbEOP!(`1~m|WTblOuR41lXy~8*jWR z6JmHpR=_vSTbPje!yo@Bi*5huPp34KF-BP1quFSKehr?AkYgw(llhzG%(KpJ@Vc?) z_rLqSaMC{Hr$ewXaNp*J8~%yOqmQN6JTHw{@2zR3ZPleIWRd@~Q3^f&x^kmkjls$e9OSXzPRDwuYjkCIe zv^09tcKW~~M(;7yGrH~Mb%{Px@hzUWA3{ZgcggXZJMR5)SezXb6!;s&YY=`_sX-hJ z!R1+xPgilzf>TLbwWiI6?8Y3&inhLM-ITek3-4N^j0*GAgT-r>dHW#h%u`&!ONJ zA0B(=Yn~+$HT4MagSv2(08v-89o!ZlLBs>Qpfar*Go@$6y6fmjiz#wh7sYHOA(%I( zeU2KZ1G2M)axMy{tOJ|Y&F&l)OQ)HU5711chTw$fdhx0r`!?4?YGDJ^~XI zYwIG}ALwVpHq@Cy`@3SG>`7k_R0cXhYAsa2XaE}Mpalhox{#edA-+*dnfaW;g&arfrp%q=!;tl6HsV=`?@(|E zjdNu|V%eZ%U0q~H3(mZqq~|^|c!3H}a8-oZ4V25QbCV01G>~ME&K$eNr!G6xe%9gC z;-6!(VBO<}>G&~`E|SgfpIjt+)JXk~V`06_6jt}tEsWT?%m1VhrA706jN?=HKM6ps z>{*tIX>wT?*&!&U5n9jMKGsdAVWNwMCN>9awS&v_muLFBn;}!*W zR!6p`D#Cvj-}yjOpLSx=RJo#f-b&w@rd4J)*LJO&QcG(dOVOCwNDP4bOtV6A~YMNh%&LLXI1v>p>Cks zpEc2BAzPtzOFk4(FGdYzbt^4_u60x9vMvI5CPDo21VtDOi@~x*8BH!5rX0|2a)B5P z;b>E65PlhSvSUV+H_440ZZec*Zw-E?LwntI@k`MO&m|RY^`YGh68yKw<4So0KI(Q@43p zFSa7JanihC)^Ht)wwrU8OQk`IZ#RjkqR(;%NkAYWtwmkopfy-52`P)dNEN_xf%zFs zPz*a}Sbl;+@zNLCA=$0)m|DxCkNibjq-dvHjXxED#unMl>TF^m+XUPU;;+&B;3oZ{ zhv5l|=btx|ml~D(p5u$PZMMddDay`Vax_;?8k}+ZUz+~?`!z@4hVT3MW&Jewoz_g5 zI;A=9yz_9Ae_PzNKcso`$tR_q^v9yj&4C9U)a-*NDDJ|q);(Vs{boc+g&&TwuyM2Y zI(?c~U&RCk{=+&A;@`i2e@uYf-e5foX*J%f-~8=w7d1QXv_rGrdh0c({W(lf+;=bj zxMG50FFZjp3OCDh#*>RkPWE0S(@}C_45LCs%l*k9vO4 zx10Qvar6JoGtX>x!Oi2o$emA8y#A*ClK-U8V)xEF?>1+jdrq_IfK8j9{ru+` zQ#|2;2?t9(f3!zojn1;xG?`4n+7$9+$bI+S-%Ld=-0-e_nGC?XBh6Gi9mG6(+T3R6 zfhgkHottiUCvr~!g6#I}hq_Eqd_$k0_%SB3Y@=6+6gWhzEmvK66`r8j3FoFgK@ssm z7EX;|TBc_*EiqlatkW71>Ti3oROS{w5@x4}s9cRS;8bj(LRHR=P*;5TgVZHHG3H{W z_*Iqs(J32RCMOVj>}q?ceM0<@sYHrIWKnnJi+cs zLS4hV=&2Ga?5feVIN2_?pi`=#n8o<&-(mqrV2lvYu~ud@i~rw#|Ii$JY8Trh=}W@` z3Sy{r`TkM7lpvyotYYkLRh-#6dLM>j1J)`|9~5!?vzmn=w6vNNh)}desu7vT%vOS3 zMeHrU*`zGLB4l+KCMfv1ZyWr8BTi75OUbq!UFqM)=Yk`K491Tz)@i(e8kSkBW8QSWjoNILT^gC=lqiC_VxIYx`LD?&gaC>EvO3@U_{=gJ} zlgy=HX+g~t-GT$5j4{v5nEhB7g-WHCky41Z!xR)@GMWyS4iWkiqt5*x`rC_s?N?^MF1<5r`qG8`)X}p)QuTIeNqf2YPd{GMH3^L2R-M zf6pDLo(1Cu2x=3J#Wyodv>GCu9$9A=U`!OjDn_KGXgf?n0pCz_Cp)5P>#*k#MPDSM z>lz87hR&hhVN-Hb2c#PEp866gphRq0vaS$Be2@#kGh`kSVR;d1=cpqLD5X|{*o7Uc zNSG1yQSVX)XIC1=6%|M;!c~A7VWW#`m-qx3!d+d+6m)g5^mkB+DD>DBEY+dbLwrNr zm?tRuae@Nh^($902~g2-FTr!9^UpnBp1#B@ubaC$K~a%dB=gYHv@A__1Qbyp?;$xR z3*y_2z<}C8R`b$ataO73Fce#n%&cYZcbW|%Ww>7T(Er`nplH7sD%Zo?rSwfv@=HpCW{}v;F4IQoIEh@^L`sZuhs1M;q7C zQFZDLZwWX|CCM`WZ2;=_f=!xoS}jz|pdg2~+Yp@rnvVCN;Vt%2TVyAp3Kf6%o>uc~ z*m91X$M*3K-Qo*>&*1rGtZ^vS6V(Tw(#Q8_Y#AL5aS}y;)^Yo z@C$ZXgJMrCl06!05?puPKhhFQEh)ot)>-GIOD?`REw%KLY3;9lE%lyfJ}jg?S{9(j zjeRWeJVMrMIOw2*g!;bwut@evC#7-Y#$i$BFQx?+SSWoo{i8Hx;s@!y@$aYcl`e!$U<1-VYY-ndg-)3pN56NuSrWSvvgW--SyJ^^UsGh8b+u0 zu~2jG1$w8kV_!*Q$Ba$=u{iSptXHra`UJn)$65`4Oe?OiVj3`Dv$V{z%cRXW+dSd* zJZbpD!_&GOtee(Za~m6<1U@*s-*Cdf@&Cu$cF` z2~T{aK5MO+R$pxm*guo-8nHC|u}9Lv3oe9pAHIhLr`N}+H3cb#i?I-?uGOK7Ubq4+y{UEd+Cdx|GbRFKQLxI*YN2D{C(0&E3G7pfIs)_ zbLr71A4|(GvwZqK@T;!6Y8o?UG>(}+q$i(zDt#FXXMYuoaxc2bVtD`kxb*Nt52t@W z@PI7De&GICz1$%m z5RUteSd{#)fBkD(a>*qUZnmZ+aqLW*G+lLAlkfLNIs`;oNj(%s$6QDbBK?eo2^-*s*O?4LdF^PKlQ=iKK$_YoxOKx6O}pJm(9 z#@qbP+H**u$M=U*T7W9LZD&HnZ%A_{4ToSvaii6wiQIN0rKGsIJY7QAy#sc?;hvDZ z;6_7qz17E}0kKiIH-K1^#$GR|gE2Zjx$+LD7slByFjUYksjcC(t1SYSk_Yl-0q15u z8rO?I-yAQ=3w!kDz8ubGD04r0wE)`QYRxV;!zYeSAP?}=PMxb9lg|+wcRJ)kx9gv> zr8=lnZeM-+-2PfM!Rw_S@0me~gDZc@r(78cf)hS$y@af13=UZ1YC#@C{ImrS9dQp7 z+G%KVTE0?QXd?*rO?`z^era4HYKisZ`1%WqF0W&PU2jInvo_E&f8d2ZOl z#P5)>%*=ZvyVsfBYh16jos-X#&j~(Q4f;raTKt(KaKZ#Za0Q_c$216qZNG0QE`z9b znjw8D!t(obPHPU!6CFumK(r7UPx}wt6n6*u|D0A?z8)D?>K%1uxIPv2m$I?rTk>0E z{~cDp=h|SOjwt7!*l4>}{)~YFGh83)#I-|%p8DLe`nrTO%z$Ba@dsGREW?SdJ83NH zK|JhFKd0S0DCEDeKM})!F(f}8bqtZry)V_7$?+Wj&>TPl$)*cpFmwfECs4lMs6`@ zz9kc%wRPO5A%1QBw89C@lAP?{F!M0di+FKJ1v(jia2uRF1o#HtC>)X*Gd!h!0zZ|$6 zQpz-)*Zv<X__X~gD_++LI*If7ZF&*FYrs`L;7VaQrz%)5>R0XU_Oy@Be12S$uOQGE zf04wZl&@Vi8GUphV0-ah`$YOI8E1BFaKlr)+2@EFuVUz$z&O$Y0_g87ES@_ z8#d`M+hG_~!ShF$ohjP0TW+&0abckD)s-nx?x@W`cT!z+_qNfq^DvSL2-^~E=~A8D zK7~@kdxlD|LN~5Z3Z@z4M~{o!fku3G>{q#_9gCm)yBrZgJxMlf=3HltRg5^^H0@;R z@S$th{Vw1vCDjnm{!?_mkF0#90%4`KmLPqF$}kh4d(%;SzJ1vkXTWPHWToZ!v3reg zX0*zGUDp)>krKG_xeu;(wE2h2C@F_BTWp%u-SE)EMuohtLn;m+TQT!?MLW0L%Pt+E zH~R(cAyMSSud0qUbhqkru6bL&{CSf9B<1C<{5jMnXv@IB8ED1h^KXO}yZrc0PZ&2k z=mk--;vp)X{@pTE;Go=k)xJGcUWJG0JD?>l@~h@C5Yz6xRn=%HNYZ?29eHo`eG-4! zk4}%wb&D>_1ZnvymLCf08J_}shFKSsp5TqzkjdU;R1j#gsfE?&D;?45Y6!M1gbskt6>LBT6=u(+7#5wyvVqb6I#UB10 zi9pwn>aP9etJRswttgrLo>!9%P(~Y#Luxh5LPtzKH+u^XmPwy16;P{S{+l);9>!? znYnGjk4V)N-B^#z8^%KoTiz!6c}~a$8e`b{f@QS<_iQd+HEqkUzJ#uKx-a(#C89Hg zY^JXlECj@d>rij90;^m1qWm_l?@I)Nj!2H^)mlt_`>v06%5-K1>5A66T1^7{`$vyf zJ-w{06jl#6es4s50YBXIkzUzP+oqeQ<@ujyUePxKQT_)Xc&ZvbmYuryzX+*~t$K~} zwXmYKjQtAmD9!Ow^$ovpWajv9Mbgf6^ELPX8sHRblj^Dz(tCLf1#ZD8C8rvgb2zuTqpvu4qht-bzI6{j zlX>{z(yN&1i=nP$$Yf-csH5pfEGI)Frx)I1rp0roka%Ksm6kooX|kDReW+;k z(S#4nJlA^3lB)x82Rt;+)N0*cihKqI z3knt25o2M|>3?{mp!>3kKY`(J#pKZC0_}&LZKiTAp&@0sxUgU>I^PzAoVpUEAq8yV zVzBKbr^-6ExUK_*^so%gHk5nPDQu$sxjNiZWf|T5VB6!pzs2y;$pXFWH~idDqrX9N zb#`9SP*3H&@d0{4?yb-D%^TEanJBau(Up^k3sILZ*An9XZ8s-I)a&SS0;4_MPOYP{ zSXD$bsOv?!y&CMKDEdj#iGK-I2IKh_M{cKVugoQr33cl4PID$C(zO;z?3{puP`a;) zco!AgQ;$|Y&3csJo^$Gpwx*(4juaLU<1Ngp1Id350N zcNQP&@G!a7@`rw6y%-Ss^4YS=HlPe0+3(Yzk_7pE7_nB#Y~lZ96zbV+s(a@XWaQR` zlVSFqD;?y8no)f%0+_8>kL+5xB@eX+F&f3?>j-hj)RzBT{StI6S!w6DJqfENXzUKoDs}#Oeg_=OP9qeueJ7ntR^pq_{L%XD_aMg4q6fMVdr0TRH8*tdx?FMdmQ5Sw6fu{w}K(Mx{rymD_?Q= z>Ux`FI-;qcpPc~A&tfqpzuw-PX0A%*^qbYVxKBX+mK+PeKo_tVd(Y>LfzllGV;Gld%qX|#gf24L?3OyERR@aPpj zUV(mUva~bc`V?GVV>Ib^Wb%meP}Jd=Z^y{(c}US*4#Cdv|Ma%29Uu zJJt{Bz0)8^kdxY!1g>zczy=*NihuyP`U1PNUwili4k-aOPXta12a5W6^ zv^2+qxJwzF@@L7x5$WdoUT$AhNWJ*Qu5&U@d4QK`S#xEw{CI``c7KL)oX8sqi>!&H z>GDB%hZW)&4d4f2MQGTNfkJS&>XRa{JE7l3Z1CitkXG0xM&*qUZVWW(M;bq=2ub3e zqT7?wsiE&z5pT?V^yG2E4VhCkTWwI-=jx9R12_NPF4v4dGGtN^-WCTj3yOF)P#RiO zgV-pj929=p?l9gr>mE^i(x3%k7=`-5Zl@YNZ~jB0#?0 zd)wY+?4y69=sXNDyZGwN^WuMPHS_^iq+K02|6D3(8(6} zB40*h`8yP_+a(5DsWCCz{%(_Zpmq!jvsQTq-6s5g=@s=F3aiCIb%<7Ih{NWhpO z(m3#SNL81Yu~X7p!OTmA07^{>%OpJ}6-30y$@6G~zRjpR(=XR^3*9p$GrJ|%KNCxS zO`^$NK1~X>U(s6``bl;|2N~`nCts{SzeuGq^P*6D#+!24_Uno!Z_^ML>erXaU{0NC z9(YZ?3!oO2t-lLSJ(?h!@ZZnk;%ht%`%-{MJ5L@JHUi(wunEKrm9M&b#;L-Y2#Og_ zUAl}I=>3veoHjT^M=~UEWziGAQ`XW07Tm6G*^VWu2~)AL@TU%`8Ktii;WX3l7lMez zGt1(q$hxY?VMry)#@0{{=!@=;pEq)*OT?{g7}EP@9(5jE`?nIb=$OepniFCtkilv0CsjYFYWq^oYN)@`z+T! z&ws``P9RT0TLi-B*^SpdHYtGc+xz%T*yp>9fA0s91X_@q;j@RH;iwk%!8^B{%U66z z>_C+)QYHY4iyJRd$Y4e;a+y%ZOh`Si%m3_pffDFEJj|Eie1R8ORFj@Wlw;mtRVY9B z>%&DShAuB)E}T|k=@b|mWUgjc3&_-*GvAe@fNU7_&DgaS3SNIMAE)_&n&d)QEhy*TG4uC5cwHlyf%VWtw+kq&HRCD6Em0L zvT-8dXz{uRC_ihf?k$xj}in!_d@{rH&=a+hhI#JW7`aVKsudsSI7RgpB z^BCVb+Ddmph`&C^zbmB_2Gcyhv1vbFDi`a!!@R&b<|ddbh!T&3-fukZE$6rG%6Cv(i*cB5Sh>QeldkvnR*%Kn2@~rdP9JpA7}# zC#gBFHa|J=kS5gY8BByO33ZI(_ zFzRa=35)@{Z99LYr*EXsf7~py%PD9J`_6v<#TC9(^|-~D+CiaF`y=(udV;(N`&3Ki z>2!#<@7Ku3l|3|#!W^+YK6>jTw6}U%qGMFS1i#N5y?Mfw{@ZtaKbpBi1SWkC6$K8$ z&d+;~7(K!;{&Eeh@&fynlKnuwKK>hR?d>$cZy(1LY`fqM47;NpVf&LK``lT~^!8(C zo$CR=`snIC!5~q+*g|$cQEZB)EJr6|zWqzKvyejQ#Umz@v)!&`-rhAD5d@)=k!9cP%drtwle|Bu`~P_R;Ea&+9(+D0!!DmD*lYm*q4%#cXH>c=Y4K zV?{DyN(-G!(66K=$j2R3!P6m{`pQpzKc|=VM4Y1~;%?ASeDP${b>Ci{y+9Ey9)5KD zmJ!3iE*w_>ZbxN{K=A1zca*l4{`V~cECnKZMZ=dVnRK_+J)MK`PqA^gRVK^Ib#7PB zLoQA(5YN}g7Mfk=%5=O>d~O3>@4dm_CkgLPp7VLBDB08!ZqdAQ7)yBVyUY>6`RDIw zOqV9MQfYH~gs|!#-W0)E#-uX+l!n36zVRRg7C)2J+QdM{uT7A@Wup_ry!is2SiYg- zmH!;e&5)-=%y?kElRDQ@v};0Lv1cRUubx4q6|uEsKXBtF2YtiLueI9Y-@>rbkD?IJ zQagJS1yL(Vr})IDl9AYlyy?DQ%$TNo0cE-D@Oao~xQ^ipf=Mz%MGl25NO`+tQ3_MY z7;*Br65dr6rom1Nl9Xe&c|I#Vrt3*+nw8%XsbARdYe!P*o}a|W7e66*dLUrZ-uRc| z#wxL)&Nu_tsB7r;4-EhK@RqZ zzJ1Q~?i9SYqM-6Tq7pN`CDAV+KGCl?vJoT)m0*TOkWf{;6ET@%%lVMZGB%#vQLpJ& z*ehS)F7?Kkc7Ay=V6TFG$AB!G>>s;R|1+y-jgJw}u?Bur8`3aUIhYe1Ir%8O5WA8c zuG7pA)xlHm!!Q2uFTea#S>fcPX31QkL5AfF3j`^?(3E4cl8|#Z#n0cAcYXwTV9{%O z{x-RIR}I++>DDgBM8BungXFO^CRlo1{~oJP>Ga4;`4T7`1)V81MXLJJG+C@jy(jsO zCDu2@=hv#xVCLK)rM{qeKk#?LfbPJHO7mf?%`!7S)gAx$Ss6x;p(5&u=f*RN7iC;U zqfF5wo7Mp8PkpT2)MJarP6alkq(=b*bWS2+Y|#zd>8d}TdB)Kz^nYTgXxzE8V9p=s zd6%eZ_pY18-KkBTpl||LW6NW+RNW|?grWK@N+}56dQ1)-{tL!Trl5lyOLjK&lyMw$ zm{@@Nm6%+hUUrE|}oNXzHgwyH+P zt>yEZdZQ>0GD@)#)~naO#czpV9Vza?PJ+e(ZliW}l2lB#V(~=_-B+Z~e?p`X_ZtM= z*O{|r-1fpn^L$cg%Zd%fgUxKl z$3ZzBDMN3AGwP%2$)Bq`+efxns0K-U`!en>CFH&bE z(W{;))*uE0>^4Kk*+O-$!{o1HdL*$|{iY651uvbM?yWUU+f)b)VuW~{`lTw>?G-QH zl(A<>^6ozweE#|mjme?^0sOPptMpkx%_@T6IbzYn-oJfM0;s<1zmSX$icD%M7y=JJ=FB2R``b&!jF;JHcmF+gBa&ZL`91{l)yZ{TZgw=*8A*^ zJ+IAr_=)7*c4BwyALR+O@!8jK6_qpqwFDzm3kDraUE;PM0wii zSNb!pZ5Si43Oiv1p&&t&$2Er=MaEE*{Z>$WT~|`+akPtOkQN_IMqm8hs2Ie8-*`8I zN3vqUptWeBdR^^_i0i^$Z-z}weD{!TycXp=$k55|&69|Pr_KR7>9FtO3@Ge9wY!LnOSxw5wXkdUZbKytL=O$_< z=DICoLMC%8i#9I*K57c&_P$IMO!&$gRqdIX=fp0jyu83&3iN%}_JxuK%ee$+7c+`v zBM|Kd+c&urL##zZn0H*Y#-N!6#$zNn+I~05k+fn#wzK)Z<-=N5^U&NAogfkvJ~NaG ztPn~bp}7g50f%kmKbff)nAR4VAm{m+qbN??dvy0^MAbXPi_WjB_i0K(S7EIWA2vmP z!vxp_k@dO6@2%vCjGYl@HwvyP{D@$Zo-(zBs^O!VOo=dIBS0>5(eT1@;GH*N-{7DD zpp&qkx~~u;{Z&5VW1Tz^I3h!fkQ0);?iS|8r@+I%!OWAzHP93vaY$-ou#J%;?P5Cq z^bO_;cbe0gJtj((srro>=akFSNyFAt{B%{WG04o9_;tUl7V?>|)+~Eo2m>v5!ffb7 zXFvOYOaP`tzx&6u+5F7X_vNT z?FkV(lyZKeZtaf1_F7bwiaEgMPD&% zx(&p1;ipXCRDW72eLsX}r=#+h&jQFPj4uUwS@QX23|OrtoDskT9OkLX`4F;cLl)W> z^#J8`W$V{E6m*98$_ag9@(?*;?ED$|D`bc-ROHwqZatuAN^;&gd5$57RjSw4RtI_% zsW=hyt58F|=_WW#3}8W&-6`D0%i~o#0r)x`=WQ8;isUytxTdAcaH@9mU%U-L`JGFh z=eML{97~j|yj+Ylw=O~5TRD}@LyLSUMEUrRJ(sy^N15Vlo%yP7KW)v8a2qZ=)s!%I z)~6e%1Qs0QuZOgv{@OKSlRyS5glQ{L^Xb4ckz&kk`Xj0~?I#lyEa$I^GiFngUay#v zSZF_QqxrT*Y4Mn%A}of}tFSy|+PMn-`0xRAp7N!qR0f-1^Pj9jPZ5I*$6N8nBE!;? zN$FW&g0jqjt5y&|Kx;K#S>~ID_vIr0!f|@6xl^ZNh8wGjEj=fd$x#RGsP6iWTaPeS z-z%#{flGL}V}a3|nq3*Mp)Z$JWvpy+9#0oub;^I@C1+i_{l>40`}f`ZgiabETxi!M z%b2U(Gsz?)IKk)QA9M7CiYvxwJT;~3t*H2eHw@Wq@t>MIg821;65`8*#X6G`$#XiG zRMTejSvnoxP}wb6*}4lFuFfpg$&1+&`%BOzavWlpdo z7l2#V-oG;r%>y4&`z|bi?0H$2vrSy&8-APFpC%u`G}XdRtgl3>L~VvEL+CAb{1Z16 z6Ym=9^KIp^jHG~s6i)9bG`STigb6Snnn zJ&ZgxE3)*EO~MRq*fFtQy8pqjE*OkD0Zq-XBzdaRek0#b0aQ1Vbxe0elTf*Ck;Mj{ zw-NxPPQeNVJB;7nw(2&FEK)WHRAcApE)Em7*X0?f@-vr)Z`HjAZ7u-?Sl0U!DS;s`7P9nvKZP>u2DU8%9>Pl|xJtvas_X~FIsNqXQS%E0}kH7Yjtmy2S6{#g?7Dj{_t>5TEDYk%ycj-DO^#2PVpR3_|Vwug=}Oc7jLh z>{EK7aDfv|nOlP`%`F(pZW@HNzdMiB6j{-KnDJOYgKwvF%B;F)wcG#A z*XI|NjdFuuUkH*E6!BikWV#l2r0$m`x#$|4J;>7wzxuA^vA(Z>JJ#fFso|X&%;qhv%3)OZFla5 zspEiS2Q_z1lkPx5D>LAn5%-PLv`$)l{ksN!zLk}CiW1Utk}Vdm4EZtn1i8x2U&8&cM1x2wtdr+hb~vw%jC?!SFoo-)^El# zJgaVl!A%RsrA*33V#JZ9-jcWw)*s*XO9#5^#ud#R69phv9MYv!`OjBxokkT8GeE^xx9K<(R+6nkyDm34*2#~gO5ih=Om>_f6{qKrWlOF^>r5+s@ z-)IavPB{=Lok#D^Z>0M*wK?zkKG(&pL0Y%&ZmXtU zgxq#fqEOH=a*z>o$lcc7*+y5>YS7O3XcGvHbl6a>l~U9=Ljyqw78VhRIpB{erk82d z1bxyx+U(Nq-Aeo*JSlS$aSOVk=^8GqU@!aC*{qne3`EP_g2V>OuF>H&=v?28PMO=& z#PkMCeXeycyO}@T0!44a1khihsHkqbM%-eQl}*mEAtVBfX8diNg?oXw8{i-eTYa9G zyo&oHVNclx*n3{)`T;$Ai{5~2fXsmp*LxUz#SjIB)20c8z}xm5E!wRl4{Q!{?jOFv zrY_jgr4zuUP@Mxb;P7B$${u8Ljz$oqTY&E?_OdE!j9faeFNrSD(JcO-{E>B#v>8qG zrpMvms?p-~5UJ+dRmp4k1?al;2>Wet{Pd%#33<;OLGvww@6#3fIiKO}yaP){uxr=n zIL+i5a0`o@GLe@x4oXD@%f>M}aAo|c(a-32w;QTxm;PLI(_ z!^eL2?3ri=*REDZNsmwd$HQFE@JkQV{1$&nbdYI&#zKV{AYggZLGuha`gOWM4A$r_ zhh{fITN$lv&faStRHS7C;}GZpS_{*2jAF?irZQ-Rky57RK1ZDUb;)%jfmarJM$7?J zwA||fEBs#O2)LuwH6$3tP%?*KdV{`jMYHs=(A2cuAMfS*t{b_upIv&)>9xqa&u4OV zp#OOV9wQl|%{Z~kGUs8l2jm`&2)6lG7H6xMu23XEjXWICkGyDuP3##V3wrsr81MHz z;I`?LRs)6ztL)YVI#CWe^zVU-*+lLtbS7OXnw$^Rb(Jikp~(cS zfEncgHKbAe0W-IoGCG2z8eT6&6~>{w?;(%x0lrqu(2bvV z>CW73hvcF_{wF&RN^pbfbz1bv-saUh8c9w6r z_(ko^CFV+jhO;0o;r*=#%S=?IAFNKzy_1kSGkc^iYnZ=#+$RCgaMF@+q=j14A|G(b z`88!8c0HDCL1r#pM}vDhASs{^IH4*9OO}*ppY$#5833s4o6S}uXb>>aSa4YQ@X(&< z;@OMJWXjtK|K-_PgOY|`IH920&3mIdsZ9!;jHzZcSfRZNr#U;v2WwRr@92?r1`z7d$o>8)*I`m03RJzkR!F zzu-qcZadDZNN?V_k>;-cLqcU=%;VD*|B|yugqM(N#$X`yBvBSEmL!JGvG-vVB8`Sm zaz!OPEQkU3Yz+I;K(ngzTnr=p0W~n%^yQ6aARN)WiJp*tAd7AACm1MDx|0jYED;yD zJ&x#xvmi(@(fEI62MlT5R5Z_T0m6sqTf(jH<&RneYA~WE(g)91FiU@lF2y*!3-|>9 zlE7qwx>?yU&qKe&gd4r-=@|AFCP8*jAS-Y}2-)<05F{n3$Luz+83IDRb349B1ik%+TWu<()YZmg42xyCaP#Cd1}Cs z7$*pqQ7*RqmL(7g-BV(bV)C_QH0E!;nx;fN5G5eXpFpCmGghl;Fb#|a!(vos5hpmK zUsw)}QXq9_VkMclGqukdV);j;xmV3Fy4XAB()!e7BZ!vUzLGePz81j~N+^gi_KO=9>(eIIr^_q8Q$f!yu|U6p zd{I@yauaP*;ciy*3@Govr5}94evpn{E)@N39XugTdb%rzC4mb41Mc-pwNDaL^vN9P zh+Xw!(xCE4HEDTGPVY2U*=QH=1daPt*-m(43s^`NO&y76@CV}nM@KO*b5oA*xu`w2B-*2wZnrpl?fltQ=8?cPYo+`A9i{xJyAqonG&XEY6T2zIZt=y z?G1_}ciAAeokuLj4cYN)DziJdTujkOS5Z>Tk2ByP>3T@Wb2KhgSNWN?Hci}iC05QU zbfcxRy=l>FI&L6ISlRIKLK;XBQ>i)F$~c9vntA)>yth&uc@d1_XtvH^df&ALVvBo3 zolw<@B)^3aq>8VB2-gnc1W=ECwJxy-1k&RKTK!rt%Pu*O0{`9_1qj+#zDvIQ z`)}UFl}|i)XMYd2Fp=nY9C6z4dBTjgS~;mt)br2|{pf4mOHaexZ+|B!uO`u)Oz~oG zp0*KRx7X$-PF(>gPoL>b6q?>0b|T30oP6M1-Qa~6KXdVGs&DIEPC$1#=x(v6S;V|| zVA<`X@(LvOK(k996P3Xh8L?@;8yDq=fOmgBC+56#)-_VFhr#Ugiad+Q<#DI0$?S;C zBHIvLQN$B~5U*dE-mB%Gf>#l5EM+{!$w0$o-~R!0sySyLb+S-wLsBlBIV86Kjn%+d z?EH~Q$sFVMEWxsNOs5}z=2ErcUh5}_0PoLBb}T=V$s3>7s!#SI2~N&8GCRzN+x0G@ zr+Dk#tC5@F_F*Ks;^RY5TwN>kY@PhK>SEDNeuJ~#%G*)!SS6|TaLERX?^+}Y6~Ny| z@}vm!a*m3pkIH%a$LYQM$A~_!fgiBvop0F`=1I4i-r3XgDR)IH%LM-dY!5qA$Eu4= zQmTu$O>-N=?LQlmz(7RaV-C4*h2V7aHq%pYWVGf)+JP@QG3)+(#g5B%EfWwCLOlg_{rIkGvkN<2h_Ua4Ru^#sl3~wK|B$A&5d^2i9&40|7Smog` zPuOAM+9Zp0-LRsi245AQq}2e=PDUXU@Qe!o@}jaa^%% zJGUgd&}vT7gptQ}f9Xe}-h-@4vO<6@6lXGnjP|9J^^^5y8$X!NEob??Xb(Z}Q3ph% zCv{s+Nb z%dNunsLtJLo)1k9Kpy_<-`TK`yXzreVxmjm#vU-V>y@(u74PL&n0;H0G!jRbyk=|+ z3`WVH*-dQEGCc50#Q?28_l^5^Z?O1;S|h{p=ndH@XmBU{QOd4Te^=G0iG)%LE}mAz z*VC7b(ECwB2<|@&T&)593sqEgr?r!>PM`GG3%}Hz11EEb-}Zr(4~6n;`u*P+u?zux z9*dKzB4FgGs15%TQiMMH#h<3 z?0U`&N3dh?*;$Vz>hv88xs66-Gz?(N-z!xY`+GS?;V@+k%>VPH@Gl!LZ~fEOkNjh0 z27laYLoAR8^XJfWxcCOP14wOaDJR7K02~t@7_%3g3HEt64xEUQRyrg4Cw2omdO^M! z0s&Q_-B8|2rBJiK9114!>MV0I=$InJEep2DT%Pqp=sTS(N(=NAOTrO;F(UDkLcl+i zvX7LYz=Y%(V5SjRTsMl210diYuc&D$ne&3hgtrN>+7Xu$o{KSC7s zs5B;yc!!CF`~&-SvcdG!1U9f2(hLhx{zvOqMox==1Hp6ezP*BNcpWlB$U|qY2*Q23 zM1Qt=f#2b54c{L`nk>=yW&??rh;|XX8#mVz@QXWCDrGgip%naKw{DxIKWE4%A>KZk zEa>E6qr=j*>t@h#6eC&R(D6Wa@!u6v-Q^%ja*_Z1xU&+xB97*?EDjTWzw{vp6+6hS zZG?(QC5r^{cSVv+h9mdnNg?1=}J)`w3W?c zvIh}@QeQZ#7yWcSCDCoTnkW@CS%+5i`*JC*LST6i3#H6?z!v*qPzS@ z|IuTx2{s7Y5rRpMx(@R8{AIR}uw}MCV8VJGRJ0`f#MoPop0vY)x0WcYLeal5Yp9ri%Ge9oMV!;FKd3org ze{V6%akM}z|AV%K_u6+skj3@>U&X;Y?i%Ou*MZU~dRGKNS*{`f7@+*WCufN~?|YI} zv!3qRNCoP?mySySe6L)&&GD3;IN^teR1i(AYh9WJf8Aht-22&*E7lqP!iU#ADhYj? z1^ifadS+n^F%D)!J@gfJbn63wub+**2eV03XHKW6E;7W@7iJx>xNV>twFYOE9q9<_ z?WP{9N~ti;IbdBQ`kb@K*1{QqlUu@wAsl5Ix$UFnNw_rkG+53v`a*G>V2_&|5bLXj zal|v2dKXPcUoQ)^F%c-;F{6|GN(W`25XAPZ-yI`MxLTA2W4S+r>ktXVFMKw2NGtL9 z8jr*HFrUF%zQ5tT76vkD?*iU4QAPQroF)V2%|6kq62BshH5~gy*V3`T(*$ju{R6~S zn^O0^nTd!IbBjtUEi*we)Z_RBJbtT1&e><#>69XcrSa*SxK{H?H;m58UjV+4E{~A2 z{KT9>VIrES9G_Xc%pPJOsTm)x=;w1~%xv?lAfX z9kD>(k~;C0({R$TYR)*&g7Y4G-ewd!=)+A)hrB!edH$uZ*7tS#Qt54x?}=F1GvBWy zNgaf}5vOEdMWa<$qk?Y?SE4bA@d)?=y}1?xmdWa?#82;g&bnk&7C-w6M9{V$&<9Lb z%MG2uf8mZzRz=H%=!klLq<%hv*|SF}4RrXq@rxD*VD`sPxG%VZrOYmX$}TW1E>aVk z9Y%cqi)$6<7y6PLxNhq9hPMPMR<(osM<4pTj<8lm(V`GudeeP*vWOcYM^q_5WcHUY zqB;H3Mj$yRq{NF=MKNCF-vQn_@WFMDVX9AE&p4J6G3VaAQAd(%@mIB;kGJ9#Rn{&( zz5kzg|A4PgE)D>Kn-Q)DNYi{i&6WCac#UK-N4<006foj+WaE#0`uV>L=JsN=RZ_BA zP3WsXDX@zht`xMf*CZh%Hh$`#5P6W$`btafbyWz#+iaT7hMWvwVk^?~d1H8n%_l~Zc9S~-pGSbT`NJD5hg`}i zc~b<4V}MAVp;o$NYxkc8W|7w(eCPQR{nFgVtUR)|r?ZhZ1DAy6$q#gxZx1;J-ZydW z!HWgE;p0fWY9C)$2wdp;btSk43O>J(~EQOJ&mPX1!M z5*n)ZVH4__->yrccRVyvpAMXO9&2rjHTH^HK^w-U-*GxnLPossZOVOF#E!l(j8fZR zO7OML5&`jc6VN)Uue@8sI)K)C=f^Y7bAkNO80Wd4lq#6J88+&VIR6l6!_~aaMXoBD zm%*O*!#~KfnLa}83&v7SJ)bSqX`uY-2HROIzp2>qmg|B zP!IS50}ih%K(9RbS)f-eHOi*v7C{qgHr1h^8=nal!e){aHdX~HLTA*SLGR#8^N@Q> zv-}y_rqI!h2kZeHO#L!NbCY8UV-fqVydWt6GxXDAFUpF~Uls}34m2x?cmASMzB@PD zgYXt5u-ej|u@GqXa#2O?+@BzY;o`#0Raxd*PB@x=u}}+UfZX!{-XH0}Fx??YgSB{N zS0K9&8h)|PzTW}o0_{)21bp!p6pN9FQ|?l9K~FbcCruQM5#iU~>GY7W?;fz~I-7Bj zH*53XAFMvYkGHr2zkOLD*-Zt$!u{FV{Zskdwa;(ql9&O8gG29WsY-{hV_*q&B^tOd zO=4UyKrF8NVW#fKG+g3WLw)E!*n*%j5KjAp?-4IE=pK`53Jec2QGxGRz@~B!2eblc zA`CUf8i_3OdX>M61ocRBGxx-etcMU_YLrtK`HR6Qdq$edl!2-d^=YdhxAm*A~dEHzcQlmn>b zBkLL0W2v1CLoOnZ675OYdFvk7Ds5(+pC$GnR8+;!0F;id$&pbZq1)vZa?KsEmFBcQ z%m_A*@l;H7ocXthOR>R^_Z|7%L-RcriaH4{qH|7!kzhlwe?NS9u*svbcl-9(d6Kwk zpS!0WNigx(lzjJlV!f8tAvZmT6a>n&Cn8_QE0OiV{iX;ff82O;kr95MIAd{F!QDZ-LuC)a8E|t&_cqwp zeaLsV@w({16wxp1#}c>C%S_1DtS|E2RBNJVuOEdScyyqag-mz5nWzB4F0GfnacKzf zt*mMO-TMIH1^8M_(G(zbYq%iPkOi$6DVwtbO;2p+eNcc2Kx~hU{&`Q`vp+A^!2d3? z4>$*4`!buF%}f&?`*)AYUm6)wA~mgFId( zs|vQK$f9Fur-tj|jEULsQ+zd9qkTVcK!XrD(IIzkt1)tS@Mf4t!3aRqJT^OEjPzFA zN`>=WYHRA|?dNSA-#D61D6jI(BEpcH!+6ZZ?daz7C@4IL@#FpG)BSfhb!+?LL!%0Y zKf4>|22Wg?%j5zWU>oA-e3q_$I;*>J_p=-Q0oj(jy>xT)ynuTGjA!M-K*H6ccI+pgn}t4D{78-N#KU1H;HJM71ag;> zw}`I?Q3`NL)yq7*SXXc#pdQ6ksv^=qA=vYBUOHrOmP$5`{G2P==_V(spM;Zh5^>>q zY&HKnYi2%&Kio{xU(pR7{$?6gK(1&@U=7z%px<`};_6gIyXHQ(?@K0fmc!ouTJ9S| z`PZ|6+*Bv3K(w5WYLp0JK;@9jc_Je>f3MK{XjBf>u6{kIbE1owi+X2!((?%2M@~cD zYf{0HMd%yDrAvwk_^Z6!FcTO2Te}twz4gRg6w({;iGOhx9rdPR0zS8Ew`L79%x8_K zw{%yf+U@dCAmG$jD`@AP%=Wj7{L91233=r|Oxumrkk5KWJ$d2WAwlwA6M{p1LT8uA zZeGgzl`>mxc8$Hh0Zmx`3C@f=LaLtjAW4eg?`l)5jN zoBD>ri0IU@N8t^NSJ{u+YcSEkp&pK4pnJ*cm8q5`*5A-GA82lRV5Pk=8{MjeU+I$e2G7%0%UtIG~NJgN4QMci%2w@UiTx)@z><(qr>YGi?*qGH}EpvUzyzCA8DY6z6PU6KeNC7mMcPkb$=)LmwB$64s}vA-_($pmM?^oMgAHY z7QA}V9{ty&8#{H>L>7x_Fd%wS7z#R zY*oOTeD`jx)7_*_GJ8E)7F}WywgR(-`M#`>X>+OMe|DFCTv0k-rn`{$qkrYm_#;(~ z`6hjG`RF?4ft}x4CbEzcJn2ru$5f|i1pCa~SrzCsyu`zu5x8Fi%<2KdSD;A|e1p#A zUr=lDk7s?Dkd`|z>D&ChuY18z&BD;!Fg^MwDL^J6pYO?za#%fLi#3b4v>BsAc);jI zXaU@lQSSO8$o}0jAQ70+?OT94Huzt8Oj%|NJCd8Y=KWmTyP{~Q-xe1$Ed3LUR1gWX z9`Xy8sbkaEx1T#P$56cWO!mEm<+n{?G}TcY*|ptnTDYt?mmz*2nbug%-dgT6J1i-( zjp0_&F}83Yy$Tx&f0>n@)Hh3CHtS4cq_mEb>dHjpuoG|@i3rD#Tg~{1$%WU&`DuF}yaNB{go}Otq1<`+Imx;sM@bSEiSTs?5A^Gz{UPYO9XMm~G{7Mg=?kHgM zBA5nm5emLhl2j{R2yEYVuuOZhsrMZ#w$I69GneuF5)?w5QF!_n_$hD{;2!y&CuKgT z`2h6!NsM$UNhVX@)5|_m;Np`bx+#5|+K?W5787 zZm*uHXVfxWWAgVN^B#cneUz-zdnM7hom&eun@@XX(wj4O5>_0nZ7iY3C`u5MuQ2J1 zxLG$C?M@N=!~2%0d5)IPUS72!wUz)>PfFv=W_bv*tC&nA6^@PlBL@^Kz%#AL1_m!_ zYWF9J^|4?)O*Hdh zQ2>%cGbB_|zVQll-hgBDqi|^%tyoU_&Qax09{MYe;EqPo;pq+-b|p71QECRiFq{Z< zbV+-%^Jr8x+>BTr@bJ%uxD|{Zw^?#y=HTFW2<@CZ`yk% zdLq&p>~ySp3d?(VLk zQBt~FO1euxx=Xrq=$c}>O{YrzVQ|$YMMb)xIQR~^GR$mn{Ql|MqAaoIBZD_}NKm30~u<|L~KoxK# zTk*}$d0Ad1?Q*wHk04=B?P4h7>VNlAS&L6I2bv^C7+VKU11*&U2tSR-%okyUS+|WB zVqj?;1DKRH=^YgEJ!sVKj_c3rtBUE|11SYZPfecIUvLxfn$4mkvtP-v&Uu>xnb&-=(trtUuL$fXa~NT09& z9Bh(GwW^+N8+8lczC>dY3iWXvQeiS#{Z;V(S{W_3QWve7$_;dI&|u($LvKsRJb*8R zaz>2(aivI|q-3gFLxOgQ(A?o|^I?%ei6UVNg{k}`;YJm1CTjlY?olu zzm35>c%e?H5g-7>@pkuleF5NQ@zPac=hoX<0LbY@m|~fu5-Fq2S+8bAagv@X-G`GO>NE=ksjU<6*nP&m z!K`*~UxN4__B|Mt=sl-My`wt^vR)qx5w1MVFRe1TE{U-Ly~+{z4?rWHtDj1*r2RU-h6)>c#j-o zw)@C(wI>*<5M;HP#pSN>VY+fbSmsrl+-23O4aZ_hN4yx(5rPu^+m(1Bv=VEYXjEbA zD*OWA`kCzN#W z4J}xUcOT{HN09@g{`=-dgm>6~#kLQ8mJ|BqS>8LEE**7OhOWg6Z)u&7L8q6Av0JNN zfF>kXHEos#w*V`CdgHy62A#&phLwhOcQnWiHs8=A}v=}9?k2`2dXFE&y(0y`^1C8iS z1+Mzy^zgI&BlgqjWDvdt$1tT|N8F8ZKE^0VFN^S0x%yWkeeW_U z=*OU58L~_7%)$2QlED|n5XU%sOq8`Mpkimm`JwoWVTysfN!>r|RhMZZKS%vnt}Cyy z8c4$c1G~*n=N!C~<=u}3?HRW`8Vc$N=GHl&@PKSn@9@l{3NIiZv4B_`hAd+2q0 z)pRpE2wLDh%Ufbm5<}b^>N9K}yM6vj_`@xobh%w!(Iww2<=|Ut2MqFQ*lW@s)7ssd zi0e}(Fg+pU?Gzq{=HeiO30+3vPeIWa$m&8EW-Lw+(JIr<>kq1xtMza7Huc+-@g?l2QvZ@HwgS2i1!%rlyvQ=R^e^W9B1 zM7l-N+uJP+ABV1y_wYb5?cVdh0$P`C>SY&1NwObNYM4j{eG`|Zces*HXadll5AS%w zoZO~LA{ z(~sYl{~W68cf1WfqZyhHt(}e{6QMzHWR^0Y^Sgzu)d^ei8Vz@HR}`WB-^t^I6#yD9 zEpl-7TKUKgI1oQTQ2!fRAJ^vd`uOQk$=S8nVmsRep%%d7BLUuz%kQOnXjd4@X}OQj z(%v`JdU5)j^w%$FM9w-$uilHXATw|t7H%-ny{{KTFZYbNj) zS&0Ot?Vj}491RLTqsM59+^qTuj}5D>!QwS-#6S@TI{x5c%teU-Uq6L$~)^jT-KY#M#yK@*o6sRT8s}QX( z+GVquqcKHqOd{WNut8R$iv~@76ko33lpyss!fZk?vv)Q9q{--D__em(#^guJFD4|y zh89%~OmF@;0;Nw_O`<2Cm=bEFW0Kjvz$*!PE*wC^ii!CdS-nn5{&Bbt1cPi4naL8u z7g)VDCMyDj*%CiMsYP#ouEX&pZIP$>YBa+Th@VA zD2cBJPPDj?b%tbrEqL69LfN1W#_L>4W#$`}>6o;2$Z)uhnzf?S2+I?bc)z6zCV37& z*S`kh@YI#vv9SKTatbMNFfeKIRKkbZp7x|4F#Z@J#@CaBbw_3$x`2}!zu`j+y?&zq zgLeUVny8mS4f;ORBvs6s=2RTppj1z2~1)VnZ{K9zg?_V=GghZ29MG_M$%*8 zwcaN1kW@65!E^tF8Z{>Dw*2WLq2paC6M|v>mLFrF?`=4^SGdFYX#>;+XxOADFL~Ky zQf-)3^$YQ2%NCHosSaaqVj1gWv`W1MQ69zEhB{#n$jOy0*yybg@+A7n5_2>41G|2&1oKnjVkEFu` zW{%7V@#SrX*HU<*++&reAkinQlo(mDP#j1s{~lq>S?ZFjn6-Y|cu}2PH;ms7KUN*> zEBJTKnrD(r=8z_;aY}GZT7Xz5@a-7dp&tvXbbsXf6ACw~l;ZO-+ zuS*R1*a!N=4O_@AN^+Y8IYIq|U#{5fG4#djoivK{Z4PMymzo+TAu+Z0H0^L1MSM|h zyzeH{{&%DVL62!i@cLZv6%E4qUt8I^)F{l)0oK?Qe#d1Vp#@wx^zk)8^EaHH%Iog2 z)4so?h?`7=P?#Pn?|o@l9JEatintr*e5sVsn>0@8P?r(ZMokf=8ttIrqkZD;wy_qK z`o-!}jf9VTVQu!lTvXH@<2Pif#_lq9WW?LaJTmM)WOs8`B7Hp?GH8-2Wmq~B~ zuW_+g4iXdyJX;2ffs2Wh?m%PT#&_jGFDz#UaOgY-lir zn5#-Vs9Gx1J8{Z+c)kuFK5x)g4*Fn=Ha=V(a8>FwMhFqu)j8>=t9(zbL9bHdR9h|c zP61*WCDrHMKFNz#;B0J94^2|?0q9JGcfK2J zn9t~Ae#L_UVu65Zueu8XP`rd!^=-C0w0&Y85z1m7$^Ap;)4)9vJFPRjus*{3^HBG6 z_dVBj?L9@H&D=RS4=>dp3imz$+SYvBPg&R*FZ{4^k}yDf=xaTffhICC#si6~EBK&b zGpEjwNMOaxcP>;+qKOh4B#uyN#F4~TYRXdKnqoGTEO|dCdh(P;roA%Vp~xcI%=!LE zy$F3-UE0CKI0xE{Y7U4FkDYa3v@bTGmmsRw?QG4 zV@KZoNpDC>!Se)Tn#H?cS5W-(x+|=H4xTCK7y4X9kbT%4_YW{ju^^Ng!j7dtZCaK; zDJiQfw>MJMw@??%Gn)9PkZEu-NYMIgkFb;HV!Sl(TDLPpkszt)dRJV{(oRUr{s|G- z)xy4toAj^%krq<&t=S|!@&3#3$&NE!(CNj?eR*zlxXYdW-O%dL<~aS=a6Y%m=fC*& z{r68+l_6!Ed|gy8ihrnH?!n=>hT2_A=lRnajemUO_ZeNxy>+#+epAT&dHwB?S&%xa z7mM&6#b2qI+o`YQxP275F=XUZoMdoQ8YlA^Y^=>%g#Sj!B~hV)pG1#1ag(I!P&ejQ zeQhfkrr3>+T#jjD7jQRRxK4;(-Ga z!mbdMGi_fQWz3hh(m`&gGiu#elKT1^>U!GZq-r)>qJ+b8Okx+wTR7n;dUL~JboPQXS^aj7Jrgm`2B^yp1~~5Q<;mZ$rp|pK zV(vw z#nVatBq1nwPbqeJ>xg}}e(@&FV_-`#*+ zmv6{XSxO4X_Y{MUrD>wsW7hIUfi5p96p(WDBKExj5@|q#G8MQ$G64vloe|;Xd=hhr zX1Y$O(~wn-g-XGtYDdH~LjiBw(mvW(cFg4-aZxS=Sz-rK5u;AXdj%KgqsABcU47zE2NMh2DpwN~9!Hugq*^Iko6VO=+8@jfQq?!Qex`PB!CE8;p4YXN zZs*t6NrFYkLp79wmXb>DmEF0LDerq@JcXj*6GwHq$2l8J;t14Gm(-xJN776UrR#1ijONb6fhxq97b+- zImNEi?{K7Ht9)mRb2HCjB9wld%zr!V7Rz`*J=1vhr<1RlXHVGcq}~WnlM6Mg*#!_u z0F?yJY==mfU^rZcNxmD#vXy7F1BaI+$|^0%T!EiNu%&@hAB)#^r<*)lE;GYd|KaRR z^6(g@#VoIllsXelQM&>}Q#i+8^D~ENy*ipg9^&W~7E`)_iEy^#lN&3pFLsloTCtkB zh)(o!>e!RW*bOIA?>!ty2eN&ArSk~tEI#V}=)2*`uC=6nuLl`=qWmm~qV7Psm1TW_av}C{0S}_6ALhGlov9ERupULRv_y~viB8>db?!eW4JE&1e zU-i4Z_Mp9h8<88fD8=XpCGKd)Oa~e(zgp3IU8#W`>MR$#bAM()KvK!`b#`O(qw;Zml6XBch>Mx|bJ>8!1r+X7O z%lT?yyoG=NL~fFP+~|b5!}ekEguw+amwTki5@PJ(-8{_sRpOALdlu57c_gS;sbh{q z$`37eP`+jA4LP{`$8?qLg7jXdVqVnVbD4e%;X+65qhSAZP>>fzj$S7W7{p@1tDA%T zba4<$!$jc5Sg*~#xpMm;$3Xat*`?imQmuM&tGnT&C`=TXIG~Tx)lNLTq@FQcmmO7N zq~0orh__(=`E^MbwAF+b#AJ7`z5m+LxM*Xh!53&1*7_sv4%!KY7?Z4AAWd{A8c$ns zH3kMK&3n0J3H(bbB0f3vjHtIYgpvfi`RrS3@fZDJsQHQKA2W~N>7tZv{}Wk$k!Xa&DWd-xJO=nFU<{j z*iEQ(yD%j6*_#2s#0P+QcVp1DU-|&wT3#0wa`VB3W~T7U;X;=U#?n(1%!F)R%oz`q z2=-UkDQ}7U;zj&X4xbG(3A@p^(dxdI+Qc^K>}42jKiPH^^LyEo-LQud7GAR%CX;MS z`QkH0npMUau`pYc;OYP+1n@AKa4!SuNaq)7y`cFt}^&m{9eNmBJgH3A4aUz2KWc(E)q7U1y2-=lCN(vSF7iHn&TDyyZt`!B;B4HwPvb9)A1QGlK|`>+*(4rn z9xQS-sJHsy3E3@Wyym~yoQDv?j})MiUIC)Cz#sf{Uwh>1-!PP;Y*&oPC*R^(52k=y zUw&uRsk3eEJAYl*C7?kbS%@>-e;hc&h@l*5{5{9E%QQqZ^}@VA>`~X)ilHChfh1Ak z%q4M)F4WQdyq|Y7dc*G?13Ms4Z+BVGN?n2NLmT<=kZgdwI6gy(53Zf!{o;^r_FmLY%K#~=8@E# zl^%`-qlCc?GHmL%RWV~vbnUWg8aU{Dt~pWlh3nmXNu7=rB;nGlZkqy}0W#w@UD>{& z48V@}JtF6yv(DGFc6ypEyjIo${{>d_;c}etl*}u8@j0roy#M<)_3J9Bu10u3p=v_- z`l4tj!we7K*hxi`ey}9C>Ftqp3iRJtX?8hGFXR2A{W&K?LmprEwyi=yN)*3^pgQ0mhi|4xo~MhOt;=ek(0u#2 zo8wAF2UT7w3XUeDo=UY|UMVRk=%Q|wtHLSFVtf^ZjJ`;2eQuh{%=Kp>5Qg@Kj(_A< z9p=PVw8Kgurms5(&2ZldqhKX}~b1mwETdT18b`$=qOag+7*>c!8W#K%WU5 zTl~Paf8c@`-c9uq0m#k${}3%p`2MOc45JO=gXrC=WWv*05k?zNB)@s!$2*@*T=Ls1 zC59qFji3;8A#x0v;YCMvvG?-vYN~j$U+GjQwVs%%nzG#>B&=nx6`XS_vPBCix{Ipx zeGfMeL#I7MiS%)oM5cMd=6G&h_Pe_x^5$$gP@I;Qoecun5_asmU!&~V@ovy>{wt1c z`umn^26g^}1-3CIzs}tvQz30;koi|g`uTm*kz$D>XK4Ia zpox(wm7OU+NxL4e)5P%|WZWNFe+ZC#z)w`30kU^Xj{fkLN9XPt zF5(Jya1PrVeAODwZn0fqwa9F_HukU7CHo_C^$u_^Y_#QRR}WFP@ac{A;ne61TQ@e==a4c{S8=8hqugceLAxJ|11C|{y`f-_X+3U zVRO%ZCu$Y&EFC5+vgjZ8Pwlw3OA5q*eJyNXkXLj;bky9xeq+{4|Fj@m?7XdCnm^wC zR$`ljQ+N8yHGHm_&|xAHXBo@|uGfNqja>7fhYYa=F&SB9LLJ|yDjAs}M7VTaKoHei zv{8QRMaP5BKa>W3=spfk>75b4#)<=Pc~#5ipwD{%EX(y$vGx%Viceu9#v$k4(}<<< zA<-hiB!;|Mn^ii}SvGx^C2e4I%N zwZH$Y+L2*b#lX_$&@`V4w#Yy0KKE)qb+Il+fPVg&Fjv0-sh%PyP?kQ zL;FD>>o+Og`Zz24G(7s>aXmtV(;J-MWgkiR5z^5Xs4`U5i@PpZGTSNUMGXkZB+z|^ zB{#?DGICU&^4=15&i#PMx%j++fsy#HEo-hHE+k%J@P8OKZ2tc;WZJOgboEA5JtZRgM_BQWq*oAr0+qS0z~(!6sHgY5$dbW8^Q0aIe#f#py(h zm`oVWD`OfJX-ZH&?>Onsk8rl>137x1P5bgf$*W5%9u^a@@cLTz7Vm%C$AvPdYB$KL zelcV-#N!5yux`O(%NAoCe~^Q5{6Pau7A|(avXopZY@$BlJt!)Y zk}UgRC`oS=>}G!P@t@lf(8EOOxWD=gE%tDhX*C!9lFgvMfL2(G)2b?)97tX}WnyfrNK9>?A-y3PA8?Up@LXX^9%)FamKHgl4ZF1U}ALXOBY^klYRP8E2)xUYQgVj*H*lXone*C`E4i7|X+X zZ!({JAXi#?Na}CwbEnlE^`Dw09M|Ri z;bm9#_5KwCXfa8j(Kkec0YjSWQSpb0ea^vA4EX^v;p~)pv9LWZr`WMdnD;Fd{CVB+ z^j`mdd-PrFyWW{s@UXeVip;c24YSHeWWiiil3eZWNMx`|>wCNuVQKb2zqOXvuhhF0 zx3X}$w@oM}Vqn0_p1Ml=F8LwIv#AIlLov&`fnDmGuy$ zHOcDRLDQk|llB*RXv+{RWuYQNiCtHaXQt5`Opg~>jDQUfTP_Y7Xj6zC?K6p_Kdj_5 zc8?V2GKaN&a!1P4lj6vJEHm2<<-2@0c)dxHY3j^;ZDINHg19z|@9CaqAbC(VBAayt zKV8ctZDB6~3FrU7y}!9WLTF*Q7RO2K!rbra?6D9&b|4x#$#Gr5b*4M?g=~0oy7U&D z2B0~lbNqA&bTeBIYvv(vm`Fpa)M%dxsn?_jVPSC1xttmyHH$>%Oj7x2Q2HpMv8yYp7nmB(B zi^S=rpq+~kkZp8$!W>h0b=$6iu{=ZFZ5vtk@aY|XxcJwM0I=BRD>tqn0>9KOqK^d{ zHj3ci8IxTC93uD*mL3OrGMz%AiFOdNE8c?J27aR*_>V)nqvOrr@Z&`Z3HrhoHSKmT~_ zaL5|m%cAeb^K(wXR=KRnqZl!NZFpUWSgC}Uz+Qa5lQ(7YmuWnB02kEkf3R`ESn~>p zOXqA?X^@}4kRLTyXBaJhR{welSoN-^`v3urdoUb238_T=;h(J{ZRWF&RSEB*Sn?&kpavQ2fA|nvbQXwJ2C3x9r z$pcM(d&v_N$^-+uocrbLf$nS7QLp;XGl70SPlGF*t4|Rr^K~>AQD%RH@g_!?o@DIc z)aqH@@uAk^*1#2wEpVU8zBbJ1n;up-U?~V52lwANak7*USe+bdqV4nerHh;6xauiu zOtQe}Jr-pbtzjX)y(M5D_h*_~Cz^c1_Em|yUx6#Md$oGW!;85#0%5XqIm1I9ahCc06EQR}#KSBZP(4n~)wbd|(_{ zQ0v8<)bhN-w@ROc_aCz=>v1CSZ{48}uG58pGkRE%uj{|DBa3;gZIE8=l0v52XEhU6iTSUq4Wabe0*3EU>+Lu;t zCJheT>o!{uNW8s3vJ93x2)?@X656CZJRsU8MR@X-ueO?v&5||rJNbjW`8ptA4^JH` zXV6S^M7RKK{O(!{d?6k~nj$sm@?TF&BkTgs!GN9Jh~IJx)a8apTp&>LimCHvW3}Ku z`U2g>)q}KuEWDqa!UkS@T4r~wD%c7R`f03VkX}Gn8rGSo^$SgCXcaDcR9no=)96f* z`x4elWGr`f3r%*qp4G2^zJ$NeCyv&onymVy-~26OXg1`ew)C=TMCa9i9nm4QBvvVp zu%9;(E00^XQ;`Ex3{5<~d^&qP#Ze@fag5;dvW$871KJ2=ex;Uc~j zoS)SsG{Q>cY6ABm!GyaFI#8I@N z>#|xCMc=Uigxh0LJTvL*<dHn1_b<%36vWa--RHukS2hi7czsQQq|DcGzf`zJTh%c39h5 z2Bn13alwn*O`M#mM7A%b1$|%4f3)d&CEhiP)S$;!yWI=<+?HFvrMeM5{04uy$;hd# zw%9nz8zx6U)piYyvDGSclsX=jJyak{fn4gI1zvv&*mIXkM4Fr&pPF#QS>&K#irQQCFh+@7fP!r%`5g&ZbHCfFk zk z6TCZBsPL1|Iv^>i=RmaD0|S*HLl;PX0k(~Ggzn%1XEdPvnlnm7i6MptoxDU6BT*^6 z5D4`Bhck!q(*feFc6=`t{Sz-0^zCANux9f3Y5@WbkOb3gC@jY1{e=zl>QmAuT$PE! zq}ceC#vMFKzLq0hqPFUWDZxRt;wr?gowAwBS2JN+HY1EO&M4Tw0=a9=DgkTH%dwM7 zfBmzClWI?HYv@J~=GYxI8S8}mNI~eGo~L)<&qB2$)S|?V$B!x>z#Lg$j-kx$G_yy(xjXEerO$OrwEW4PWAbF5GwIQb%>R#7B`{WDs&cIVlM%?+_AUXuXuOjA3~|JXf!1e}XA4O75z3v?4x`j$s*X$_ku<2@la z!*yU1U~o8`IY>i-{4Y}!kRBThxMa}MmxSRvXgs}np}5KQ)m6mlrWKP?bGUGb=GNj~ zqfrLq;{`20JVfHVGd!PDFPII2)3tffqPlzoU%g%?S6^5Ri9w>)EG{C=D^Lez7=CT| zHS?92b%*%7m%#yL`s0HM<)Z ztd+VYTRjpEQ-;<$FWhA`sxEmMELj3&17;!teDhq$z_&jqo#VX#d3P&Y<*@B zb|Y}=3?G3xU(GpwXm$u?#|H27soNL^(3mg)f+shib=KP^Jt)uy?cB}GP#PPl3WNZ0 z$Z>#o3mu?R?=G2u=re!ckdoOH5G)}agq5F%6aV?%XC0R9_ed)h>+(m?|0;9&K7o0{ zJ5Vl7Tw2q%qkc?RxP2BrDizn6!<)tP$HQuJ`y$Q;?XI=%bvGK7f#4IPqlBvqH@m{pVM#yqpkPKcWfGEs=C!`%Y5^@9EmWFA{wG zB9OLMIoLj`CvxDhJcxsf=gj7y^|v&yG=2E8F6AN|VUF0W5Gp;A)m&?;$JF~Fw=et; z!tt|B9s*NwvW}=aE6Yu6bEm|`<;aw*?ap4veCdK?J?pVm7jCGC3-;jDPZp)>t?Rlw z-c!%Q-M81*bHV_De~{bZeHCz-`hTc%eq(hGGybCY%vHgpn|5|lP?T#phZLP&@+ zJ@P3`2DI!AP@wI@NxLD&=4Q$hfT|Bla-|yZUgO(e#Cg)QpqemX${imbyAHKmou#Rg zCNH#>EMl*wQYiIBeoI9D=>aacT2JQyu5^RDR6cK-v3pB=Ln%HDVHdQt*aq%k%9gyOg zf*qnu;b=>1W~$8^S8Lwbp+Aq2P>GAIg=dxHCz?7*@B^pl)@L9uPFcX>s zG%YT3b(Q`e7xTEeHsjwT*7(e>YdXb8ce4e?#;ij%Om_TRyUNq(8J1K5X?aReWBP&Tfq@qwG6ev# z8E{slqt4eMVr)mn3J+OgzU+&-#lVKO=>OtC0r>X|(C3hSRmtu_!ge7;2PFDFU2*v@ z&Ym^YMq|G&^3){=TS&3lno}PQ{U_S{U!oLW!?lM*))|{zzEMc#9=8_iwmFN`2*SyE z4)7H6M&ZD_Y=WXD(Wj4YvR6ab2Ap8AB6i-sy7<({^B1j_lObD%3ADDS^Xz}8o}@gC z^;MSw69_AN3s-T+G*)wby*tmK%>x~_CV6YS$ z&WRNHDgCa>YDdxkelw6CaDv?eHc5A4;F4$p6Y1xxK2$NCB)lvi#O++DNT(VB)5Lsh=<}t+8ERB65hdN{MEbONp=`XI3kL#(r>XdD1XE@ix^*!Tr^QI(yj&9lk9e;3S02q`D z`Wi-zW}z$S^X&x__(z}#Z^1_?Ga9u)DlDbfjagAPT6U*gMV$n`4Lni#qkPp9aRTHI zlxVMS*Ki}oeud?IZ?tAwx6uEuU?Lzp(mgyaB0+xa?hmvd zXN#!556*vBE_}lo^1yn>*vFwz*rX7#XHP7SKtz?1pICH&yrX*Z(*@g-X6-WX)`XJ~ z?Zx#2c~PykE~M`p4cQm3GvR-ZPU+XIN&jdTR2x=0g;HwXCcA%4u)Pos64g4Vu!r#C zM3jxX2m|2vc_*;O;yDjEI`aU35D0j+D?`vKy0oILx8b8A0nC^G;?=Y5s>V@Y2pZ1( z%kzW!KW*vBysqYMXn!1M2#17KXOcn11eU{+?C&o>en1s2c_0}Y{ATAKwGlcQ_W9H{JAyZh_4G=tn)7iFbVi4I(I#cwNu<#T)$@QiwSr0w5 zP!Im%^d!k=xa!9M5^15@YQlyWG{)@ZMR^=VhKGPv>~3oy=v2<(ThlF^NXN(J^pU}H zKW`wDyfH}|YVroqMG1C;La9F*iKoU%2M!?b08p_81juXqk+9#Wm&5NjGQ;)23DQrA zrx(mt*@5kyeaHWl7m2YOB;zoaykAAJvcB=FuhtgIoM2igw_6Wce%Sm*_@s5b5Z4#& z|7&(3LFsI(e)~`KjZj`YPDf~c1^v2QXe;tnf*AQ^83j?&*!m9eS)1sa6oNHfeOm9_&oe17 zEB*Bik9wL(h(y#;tCTJ+#&Wisz0UH7;Feg7JU?T&N!H|G4ReJvQS2*X zkFAW4m_xqsUf6&LBJ<_cpZJq{Q=4D!IubH}{pe(Xwd!S@^g$-QoWe$Lw0oNnVpxI4 z?NC*lXcO80aL=Q3VPHc5hUP(z@&K9AMLy5P?wD2=5F8#mz#{^_0_%L=@qqoB%HiIW zNK^I>r4LSi{5!)v*Hjo^2rk`Y&*fec@ScN#l{Ub&+$Fg4C9srcW-rh6xoIRn*0>Gg zt`NV00=vz~JFbLFAc2Fy80hZ~okkuhzgmE-8gf5eaCU0MQ>Kc-nL^3j)_=vAo1pcU zT?k9&=$|N|S7(J~(x+2t*agE(UvIv-ea`I8K=yPnLOv%>p5D(h&_g>(hr_wQc6sF| zod#csmk&y?W`FQ7$NlpQr{%=fK7IRA_TMN7(_+ZFQD}f>YJODg>~!;o$^i|cD?8NO z*v3cc&=8~b+mjbbRgsOj4GOI!>9}xv-}4j_ri$M8&uKt}YnoQz4?Qzx!}E>{AwAyb z>%+T9fg7QWP;9hZ$TsM0=K4SI2o+G}%_@Sz0gon}B%ga9kuPne=qlk+*e#dqmtuV5 z+6)l=s+5h7?g0FrNE?Cj31`q)cMj-?tJi&jX4n}44z6CNXaVOuFQACzbNlN7_AC#7 zAx3bzK1tKGy^brvaNK>K$#1w%p*c_7`7Sys;w!D`z=)N|JQ6sxDe7|fO=}t8k-wSH zK`GIs_r!iZLVtdq{Y&p|fg9I?jJ$`Vn%xaRXtNP&e27(3(8i58z0-4s?Eb%m6xsp^ zLk&(AEJMaauQ;9Kqwyc+;8>}Z37j6FZ*@6JbrMT>D?|~@ohY-W)@lf8J{wOL0f5R-NbK;Nnv3?=8ANBN;p zvq!UobuT~+57;)h!$Iw9puOSiZT89GfZ1LBrw6(S-L)=~+bKhdz0kjAFi-_p(I)0H zk{}13k?1d=OxzH`%-)VLTk!@4=tKdf$-z~um6p1c#ax-Gta^I@Nm2V>K1fU-V?}Af zjoPFd!|CpC>9N?mDf4&uJ@>m)XVR&aXNAZ7p59ciVjI{^HK?mv4YbVqkR#EkXx1RF7k>JXXvN%$Z^)FEL`@=s#}jw7 zSn_)45mDq61JG;x{n~O(qOZA^1&t9&g=nuvH}b!UyM_C?o@E$LREBnDE}$==EukqO z)(Vg6(N!um52vupR1Egd@g?5YuoIRL@ieX;YwQ_Xs!>M#q0yz!c*B=aLlDS4uXBob z|7r-Geun55bnTN(rBWloKPey4i$gcy6@Lfd{!<2_p!_Z7kvRJ-v@@-SO8Ph9>o@7N z3*qM_*JpgvUP=ll9if0P%qlT~wjbKcN8kO`G-oB+ff*AP?Yot2G<%z%cb8N`W4si^ zY{P3p4`!2na0)`sfxz}4PGwGF|GG*Pzu1BcXTnb|XEj!A@fB5-IJH18CeDaj1{=nY z6rWia_Y=p1P&&3PxvpSDW^W9}Go>X(4Xzv?xvNgI1IsK5xsq=vWX=@cU)`<-sB_7U z+TG??gTPV(a@;m=3&h4>FPh5VvD-7N*y#&C69S>T@av2VORI)yASEf1FIB2{d@D4< z%HI9A!cO0gcFPne~rxaA{|N7?!0I^Px=AbH;T#ii#xbPUL&%|=lNpO$mjj>bkj zYdjhSzh>>ZPJK_S&U~JpFO@p79cEG10D<4?$)zJjf&M}#Jr<5#i=M0?Q~qq5*!YH2 ztR;_oqimwCAw%N%h<5hx=&HP(L+ewAnvQanuM9QwCxYM^`#Yih^#%yV6S|$g-gg4(h|tNf>+uPK8f6)N?g^O< z_bSPjT}@Z`Ro9}br_P|K8s$6MEwdMPmTE7~nBGz))iTA7JIjNuSM}ZiRkEL^GT3*u zO1_w(OI6LVB28O#UMWhavaY35Nt_BUuy);JKUFJGFpvot6HxCb99jbG+K%ObMs&Pk ztu^m3R0+~w?ef5w!P1=mLF9xSnruh?siSX(>?_*w9i?*p4iPo*xWkEwbPnHm7-=qS z)}#}04(JNBjADr)<(8_DET0ns2{R`@hvTxa>jIyqyUWTE#TK5rUF91(=pOwB3bZ6; zeQO!9NEU8?Hqm%eLOTaqoP}HWN7HE#`G%PScgoD$L$!u|Z%z*Q_|kC1q!D&P=0l#J z?SSVYT@jd_vUt26-a+~ZM3Ms{;P%*UV!XHB%@Bd&QK)7hL&0LG)f4c~>tUr2F$#{+xO2W@^SZSh_yEy@^JB z;d#R97+h~K?|rQP!e-IALuY=kNwqFs=ue}9@-*cA21%SH9IJBYM+7tS55k7r%^Xi>yuUsM#3qCQ zM;OG>KyO9r=2qDkLYTYeCK|=2)(&vN(RxJtq}*5)geAOl!BpVgCF3-1cA*zozEv)X?B1+ zsjpRp95LxYr}FAQdKN1l1bQL^LipUrOaLDb6_Zu{GRW{T4PI7<-kg+@E{9j_r-?67 z=jRZnz2TOHBJJO45^QetvGq9v1P?&HabymI=fA30=gC`3#rAZl0xmT)tq%&}3cUTW zn(Bx?@`d73`Cm<+MYs3wnK%BAthbDdqj{o6gA?4{gA+86;1V>rySuvv+2Fw~1PJb~ zK^AwH#U;4AyX@|}&;Pym%e`NBe%n*iQ#IXHb^4r@@N;|fP=P?4mISEv?mC_$i25bH z=rl7`apqshzx3dO{+4EI?jA0vX46zN;N6aTM|T*i;m=`rGf2q0q0ehBkjc%p_2XRe zfi3cl!@m8Zez`bo;UBsvWaU656M&ttEj5eNjivgoXQ$69$Cb0{3`!dY>djN+tQkp9 z7rog+X7(+%(4&c0Zuj6vsH2+wxTGqAj`O?&v@rYB$l6oAu<2gHf%@R{1s3ARm2ueo z%)ugW&BF}^35KIEe{-x~e6BCgV;%;y4KBVin(pi47r3xX=_OJkX89MpF(Oh%ok}Ks zeM6D&3JuYz$%VnKIojXQ-o_7IV0F}7&n09u{}`meWiD1un%?d#sz*diQq{b#jgOGq zA61Kxxp&nf5e$3r^ersk#V=edJRm(bkbMN*%kNHW`!oT0+eHF?zZHA_L2aXP-Roh~ zK;+u0RM~m@J@bPb;@A8&GHZQ+zPM-=uA}4PLFr`o^ObXD<`gPBA5C5JZUHtSr28-P znaGluxaA(jNs<9cI+t+K2mF7K)m@1(B8`_}lx@3!Q&RNF(DC-NB>r)uInwE&ci8U9 zG_?QT@(mQBv+v|I4k zhst}K`RH{Uu89vQL=KsNu&q`*ZY0^k!}7vqENagilR0vxMAVwi9S8GcDa&1iCl!kq z>5|Yx)7)+>Wi;R|{QGeEfC#g%N9GV~)O39>uFpv;=f;3liPv+$>IhZp5k+3{MP; z$PHapsZ9yf1O(Edlef834BxIV+z*XlZ$c%$ zO-G857pWVuSDW+^8~0;oR|&-s(Vri1D6{<)WhnOom8}2g<~AbGC74zdV8BSl`lm>ix(jo(cMj0s+%(#%0f;dyh5CrK!(qi^Ftj>`6n=j_jQ_UQ0nW>!@BZ!xoRdtKd|83KQ89{nIiZrzVy zB8ZU9R95pAP@FuFTrOwQM3|$I-C#LJqSZf> zV?N5L?$yUg%h-?%D;SMIgU%Va((k5`{&v6}J3z6JK9`=7Sx1{O@`g=?An_oW*@T^8 z2`c_kF|YDE!}WTeIY&Z5rf^e?E()WS4@?aV;)h-xm*ysaV*c0Sav3j3W}{kXiBsS9 z@MzAkoAVn@^&@~3OuwD_4SoNSMf$>aQ&*N5aPWLQ8VCsqivF@c@oO`MgmS{*{Y&a^ z`d{xG%t2z@sw7b?W$*-rP0ZIctST0BsKa-th*?qSDoF-#QPNe#5Z$B*9dUfl0)15^ zdLp$lF(oUq*Prviw3=?IA$soO2Sn)$Bzid9r*l)9ampJR%U`&`m`j{vXydsD+Wp`=fuy=o~2k;nJO9H2kYi^VA__@-B+Z&#WV9(pCP?k}?pRAV9rH z5whY6!93SuqI8wy^C;*rx2sGlHqDAbh7XN^UVlLF$mVs`C+%l_gQgkBN0=pE^SPow z6&wLoplX4#r$ZZbbY3taNNT=R5pO4RbF0NVxH^HbBj zbYz6VB-iV^T<8Wq3Q%t`@e5rc26o2{>r~#+IyOhn?-8tM4YE~A0zDR%S~PQ#jpHNG zsSz|eHJv+U_$zpYkwJWO9qxhUPWDJk~1RL3!m7qpr1v=K)w z)_dMLoa0;@_@F(8Q)BdO1IpQrz#!cjOWW+D^fmR;Bh115RT1O9E2KD1Vn_GmBr@o& znPC-|GI)AZNw{!c>W2P-)Hp9UV64u2UuE3fQ(tvZdPaPJ+H%^=j_#5=&&1Ne%3&5X z8*W4B=OjzWt*t%bOT(9 zMaE5HfQPf$VX=U@!Z*{rqgcrXv%KX33anA*OZz!-v$0Dg+vn<_+?i+egP537>@84N z8@H+@VWciDxV(X{soN##hIz3q;4>S_mD@IH$KAdzzTH*8pPP3WmUq{$wT!pB}7 zhU6Q=oc|VFUvv`_=KH}-;t=<}gd&F@<=t(|x0&llGv7kU!jUyHM$8^lsBZU7Ocvj< zPAA#-{Z^8ktGSt>|QQ7DX8(Ubll!KJ3OF199Y^hLphj#4VQ_{_X1(XUJ&dx3BX6 zWv9<={{&aX!5?FCaVNn;ibRfXNKUxqW!=8R++%S1UfSJTPzvz%J^RaUKcZh~Q2aI8 zja*;i7wm`^zA#!SIx&a4Nzik#L6;-xl)K=RQ}c@T3J*DvT5fq_o`uzpOdR?7P(0;n zg=YEA!=?+gY0<>J-`|PUOB|-i^^Sy@DoL_|{ImT#VwEp79?!34D1#c{?VWN*yCtef zOKJv=oRF>n)uOlSPCBnxJWFzBYvEaGQe0Fo#~r`U*iU-3Pk*k2znhdE=2k1>ZWWhB z7nsp-Bco|$e>Nh(ha@d$z9Qq|GnX;Gh!!(~AdSY8hM0@kA!yssNH}&IR zdEx0-n$-N~N0K5LaZ-%0;?P}&5rt>=LU-FEmh<(}l5OSKqeB@`?&R(3}kj^D#RBo?Qp7x}?&_&Mo_G}x05-ou{C_(jr4 zPYXBxN1Ds&!e2jlCnzuWF{iY&ZCUc{^nWuT`(r-#N`BQOjbKk*JcWm~DF5fkT0pc? zf-B;$UK+$Zw++8)6L9Gz*M5RN-PZo9`^t+6IoLR6tUh1ITuYjyXGyZ#+h@b8H1qSl zjn4pXqy;Re&e3~em*~a|4_{jN{5kj6J*%Pc{c+4E=Ad0t0w-7G!BOLgwa(@;d>|_R zRxx2p31Y#oFOt?rn!h9HI6V}seEIA)`OM)Xljuyx+6BSd*+dMfzs8v56r!|iO^#+C zW$j56$JJ?AtxtTWsV(gwO6WtZhBjbR-Jhu-CO*fl-y8-fg61fL-?&tNg$94+z)e5M z1}+?%)VmjF@>oUtgPWGtZwSXN3tAkH)t_uxcE_~JwwnLhv$Z%3j@8IFRXE4o>>Jr# zl#N-n`CM-7*dKU44T$c>GO7Xv!J@K%l%PtX*zIZ-Ya)-3C9RsqnD(Or?HcpJY=%Q^ zt*{T`dL1xlTcb&D@Ut4D7A=5zsEXaC=wOK~KfJsOBByOKuSy<_HVd+Yj18Gpf_nX_ znkr1PBZSDOvBnXeX0}!94Y!{icyAU~RH}A`5EG6y;s|}t^*V>7yI_^)wW4qaA zn@hfrfNv{&jbjFnvqsg#W<%d^=hgFO4=t`7yUICE7;nVh7VQekb)sXe!xxtJ_dJS0AX3e5ANz2xG@FeT7IVkw)1s$U z8H)2&>W^U#UaFjXkQl>jG^xa6Ol}M{R@OXC^(|Vn7!zC&yS3tv6Alq&Friw7u!O?g zY6}x_a2)C%2q^L+^ip-|7E4d5Lm^HCrR~dc_c=NLKkq6he&^<1IvN&@Pw;B2>ZhAm z=}fa(ZV#r*bv_?tuAxu|9Vx%Q!sN1UIbGX( z#BrNYCC3!(w7cUYG;1P?#*Ia-4kq-RN76#qkLoSf#!@Tyz@Wot>r9se)~(Jam>giQ zctd8TZfi1Is84ilv*($p*NNuiB>+5kUe_Og(dJO=CUgc&DO2ktv-bLQpy8X#7l=IN z;6E71?OP|*zM@y-yxwM)ANJ7+0Q!BSp3L4oBphx}s|ArC&wCrgZ?GmYm&XAzBXxa<+~upA^H|Y0=bj&M&ZL`{)58E`?3XurTaG3?&0@VeuUhTuna2~}!!h?aKE~R=NhiwR4!2r|y6O!9sU%blmKyMRzf7jYwI?su~%_B}N zV62PF(j4ugnXeL~tpB-MjNgNfd}(5Y*zQM(i;&zeF2U{On_jEV?U=f6F1pXDi>Im? z7=dFIT5Rhvpzb5%p}xN<$`zMWLeKzO^`K&lvTn)qRyA*eQ*I(^vv-%>37?p${FWz& zF8{(!6}H2nr|#j=sf-H!ExDA8O24ipVa^E&hVMRn;O{R;k742*{fWh+RQ#PG8V4CP zC{){bv-+p*IA>$C6Qi1XM|cQ6XSN8@)#zS!v^@S@F$*hMciqoe@sXc^S2--5{DMJ$ zwVV>U9Hi9J6d;{sIwsHl1CKb4vErDWe9@mklK1fc29O4nO!m~&@}i1xRY}FGB7<-> zXwZ2IN4e>(L({ACDBW-f780l_{(YeLyD4}nD55clYKSJxtTr%Z==FKr;q`lVh^{1{ zw!bFGT)@_o7sml4r=XG;A>R7p6LWPI4+JwN%g~G%JAiGm1Jo*fh_*QrkyL7V?>A00<#ZnT$RQS}NS>$VV5v6T) zr8Zw4x|vUWDgl=2VHJDRJG2$c$zve8-*UGozf7cSj=|Ko18=kq2iJ-2VO-?tUn3~Q znVG^{LkmQ)hKz^)+nSaotP||ledYEjx0NP}V{(4JAza41?srEsjU0$_5 zYN;vTZ)){tNZx0W3*4YAo!1Yq*;c{tqp1fSrS+N8xUm&O%55{9n!Nx*TP_y^Dax)3 z56f(e=M#FLjADMXuem4$rC>kD?G;8+PZ@JFLCZ{PVdAnvFER)%nE%eut55^bP-x>R zg{GX4CFMw;$gFcMSFbn6wAi?g8)vo3R5#AM{P}dC11~j0w0z7f>)xl;&U-(TxKyi9 z3-Cw#8}`S!{nQqnIz}0}>hdsXN+!A~U4J;tHkPk`&vfF-@!#lt+ZkV8_}fY%E^h`q zD3L>0sl9ChY+S{Xi8k`7hVQ?~uaSw9biI(!JCdUgnCFy^{$WkaEXm6Vr`zb8PcC>z z{>9-yQSl#my;K^0Un~D}jTG$!vP{&?k00TK#aS#kw!A+P;c$08$i{o_GAZ_0PHs4Z zM0{lF_QMApab{R~w7s{ECO$En!jcYnL*XWeOH2ZJ!;m1Qk}*H@gH|EIO$jFWx`uC0(+5PSICRRVjI99rV&Wz zePsvTy?Js`2>t)ADL#v|0-%3P93vR#5Lxw;q<5n+^52hWb2UqX=S%}*D3Za!Lg2=^ zwP!BXv$oZm3I{Y70r7QSR@zz1YnkAsOJ_<=M_8_Ph&BIhV+Z1-<%_Go6Gf0AZ= zcKUM~6E2M4ri}#(5XRhJbU~bBXeyHFk|<}D{$`k@HyEwNMrS$5%qCM)(md^;n;AZ+ z?`*v#p|RgO^Z0^*ey6v$WPc^eoK9*3#xi%+i^4Uuikom2x7PW%zAO52WYKsi)B9AH z#AX!n0nJ6-u-6AXWVNm=bWXq9wi_?9c;0wBbzwjveE|Uh5+94}6Wrkjz5=QC;ZZL_ z*p1LEj*5h#?nEN>e+&qSCJKBr)yqA;nd%f%?W~d968$K8t%`0ATpwZ^+P6Wb<|YB zuoap#8&Q`;5mM+MdO}2jHB6IMIa@7Gm#}1&6epJ0 zF|#0zB3}dIf_{^9a`=F|H6VQre_0`IH?N`(OUI9t+W={pJ|g5N@f|vsswQmBph~9v zcM5`l_t}aN!4x#2@ZJ9)j z-RBZ9i_TzF16$F7||+Ws+pG0h5%<1^-8bz9qH zc;;O2HhZ%mD?u&nSNUns@J?U&ts9p&LO{m}Rx~otb+zBO;-de}7m$58lDehY#An^B z=7uA6ZLoTED|DfikjwBb0(-jE<6tE0q|R(}6s|2RY->zOnaiUgW6AuC_3gZcq2Zs) z+~jAY+81gp1ldTeb8fY+c8AE(#r3*A$fF(Lpw1PeY#+q<{x`tqJ!vAxlY%GxgXPu9vlOkeC{zojdz4ROIIS_^uM@!Bm) zO_AFzHCI2O|9+$sg6A*uC#2I_op*HVk*l?&iGvRL{ntD;Ep(Y?umIx?zi z-O}Ce!&&0Dp6;`jVrL?4<{&88^$X&Q!#w{o_|ybSgwS`G8x-+2H83WB6zuz8uWTJ~ zJNbnt-}NXzvKO_X$@W0QABBYSVa0~J1(D}f?7qnV@Z4wX<-!%HP!=aE5NniftM?9i z{K3k&`GgP~u~w_7{M@vG~R2x32Cn9lNg_OyiQ{hh{LELqd- zm1vdbRMpXvG0g+-C8Y$*UG~a%aOCAB8YHS}gGN+}i&T|6OIhST|AfZeWvUO_;}}YW zNW7V7LUe5rs$8C*F*@%T4c2-d&GLS5Kfr2r|H3cjJ&K#szcT8*Q0+Q=f{>sWe%BE; zkd3FH1*L^ziIQXmnP;Oj1chI34XYJe5;MltFfEAT$~>eD_$sxfa+yzL3Sqai$y<~X zGu438CG;KG|6eWuGovvT1SB6az>mXu1}Bc8?X6x^Cv>Z=#(5UYWF$jk@LRLAYpVz<>=unIp1vdO<;O+UBi}vk)!={S%e!{cC8tchF zvk?m12jB6~v)cN9cuq?!tVc2Bkc<%#B}DwTj$f|P7MZycr8QEeKfu;yFxOK|Zv!xR{3Aa^H#R)8 zGbusf_MnZ3U4H~o43ir24>iUB5^=rHVUBS*Q^_>M_le7TdBbS>4PHPpcnIIX<0BN~ zBY$2g8d>S*VR(x8_wA2Ia8BN_l$z-@PMHKeqww<4-*Uo?XuoXuhGq&NAfu6x*yiaj zT_08uvQdsNw!642{X&kew9T6q966w=-7NisQN8`lgp4*JIXU!H9n>##A@lKKhAlPu z>z@)@jb)#lK2m};_PH(*BjM}Bv&2olBTl^rr@`d%|9wK_=PzHI^XKUXxs(+0(@EZa z`_DJ6>$`41+8eq?mWLp zfBt6f2MG{vAYhbIM5J>=qdCW3%C%<;gE)ep+n)a3A3AK+&eq;v=^V?7yvZR9dT;{Q zy=!--&?7AmD{Vpn03>*ipI*#jpgiBF{K)H_fJI8507@7Mc!bEq@+Z`JG^KrF^Vvq! za!4|jwjb>?kg$j|{RdkT@69_^@K~mezJ~DF7=E;EDp@(&j}C~igt4P2)iF9g8g~*u zlDOvGbLj*btE_0vQ%L-aP-^ZcafWg;5;olq4%4yt^gOC0k+ z@^Ze!PfsqyT(X4`T^R2Q9V8_fY~0tI%MyTXThOpr`)OBnDFd&IiQ9*mzBG@rWD)oA za|$uyO6An)#)onFBN)s;YBl}ppMkUBne*E3H!l{#@bY@}vfM04JH z36m|dzQH*>pl=&)mkq4rL>GF}M^QY;D1|V%1wv_Wru^4-f}-iO-sUxq^UU|4U>%5)8@0ofp?9jnyrmlDl}lvZk)YkK>3|x9Jai`dr=>V@QT&IC`MZj zTgx+%D~qO@H-?mS9`xe%(r_4y(ugn?0Z10`UUPWFkU8QBlN4EP@3XLXvfwzRkzPuCGbnD+-gy&;_PN}b)L>sF?_rb8Ps)~8LT$K z%rKnQ{0-Mmr<%J7__8K)H4=JJ$}QHKxg{FL!7Fd^mBm=Xaj7PuHO)9^W}(sml=}t%;0FnOKSos=zz$ zDtPwLx6O|!rvCux_xn%TiWxi$lQ|;fGM)RjfU1}9)mX|c9&`LN#Ow24uw@N7^jh(b zn#@Jpbk3jDQhpDC1WO%Q_+ zINNNb5pQERnSGg^u;A&^|}y_ zr96NgAnP?S8?QDE+m`(UBJue$O#)?uX!nHAmQ43&GX@+k`><_xt2~4U7pxZ63_ed9 zRW4R@mGnN*8U{dPDY*MYu2hL9Qe)kh@|{%#itEoDw+a%w(k(CMN^8$p?N%TDm6<&2 zEJ4;X#1RIq(hN^da%0!oeX&kigXe-bgSx^hNOy{Dy8aOy1~CsXa1IU^b4f*tsfrddgQDk?{crI^Gduu z^(S29_+Nx4S=SguWp@}9#QXP&ClQZ@Q+6w6a_n-&Z#Gxdr2LOh$PKiefruM6z@jX1k8JkGD@ z+(f-3PM;`GgIyVhW<|Cye!(&iJNLHsn~KUCy}0{CIGXOF&D;}^r}=a@3b6`yzc@EK zN6r(wD?%4Sv@V5Cr3q}QYgxgH35^#xIZ|vkr{9daabqw{xWs1PZ2K*yL0!=;XQwb` zZ%O;EplKnOPZhNgvd4n@ldkflfG33TaZ z!a(kgUMLIez-@Xt#10f2Zao&ugDe$^&2d8M!1XF`;V*#}`I8Wo4Y7Z5^ePY5c*3?g zMRTw}RJTG??_PD(=%iebk0nB{#@-N=r#q^Z2_i+G?K?0#WSR~AK-yeVW*N5&< z0eyJ}jmW{Z9nqhdU$zn*F@Cl3ob72SKab}^$!{R_VxT6LZkL7yF{u0rpnj4a=Oh=d zwhBd$=4%_a$ihEisCo7>e00Q`(mnzvIT6fXB-G>NM^<0%0m|O9ay{4qZPo?tOm5dj zH=z|YMt37hb8!%>acv-H{W;|2VW}OVZtOhDIl<seysS6^g3g>LKK~YYsXlG2eBSX?}U6%Pn8fP8!^|{7ozkYk*7Se9faqFCF4}=vlq;hsM88iJ>J!=L~dK) zmi3n0o}EJg;_2Y6mFU}^#fo&_H~$yagWKGDQTP<(+`xzirL1>42IGlKh)#9ks}8jy z+hck0!1XI3#n0~j-PBkC|RuscXstX;(r^XmDgZ$-^>2 zJC7Qbvi+C-ioyN#I_jTJ;LM67sZVy2P9|HZK{umJ4WZ_+w9JjTX=qG=B5VRB_HwA` z>V>$MPx6s!-3U-s(!SMkyX|~|*R6p-{|{0Wcs3)u9>b-+Q0W72_=$j9^+(VNNTZ&P z0698A)YGQ-r&n({aq@zRCaLj(#~~=3`;7bXq_Va>w2~TwdW>>ltJm`4A1Tis8<2`T z`UVRBp%?8~h;TaI|FWIh+nQqgp{zMF6PsAt1Ow97dHs^ff4f7@Ele4I`=eQ0s4;RJ z0Rj=|y%A-+mFb-%VoE7`o2~~xthR;LTkOFa4~eN+1GiUu^)Yod^d|BI;rPz;;a0tJDh5$S94Ta6r>x5&2&fVl5>JJhfO&0XG zyIzVl!d`5knURNbhpU&a)Eyv7f`#=^Y%%7cbaD3Vk!C107<`_aq4t_vW8$D7xww?8 z2PxC3+~QmW9U=mV&v@LQQ6h)XBsJb)lOdM#vC_IoRX>AO1V{T#ip0R9G$6qB6M)v6 zZNq(hlK%@bG8#~@Z3~`+dE!-td|aog`!8f~_J3gv@q0idW_JzuQve>9C%8pM$%KFQ2zat+8sYG@<1TzHS*J zk{EEYDC=la7`Sdv@2=>-01Jvn3O6Ro5>Pr!gFTAt9!D;v)5L zp2hbd00bX&{K$PA*dzpwowG6Kb)OEgajAF&Zj1!`dxP1{{Hx<%uf#UBYb-BEvGgR-BlZ5J=Fez+4KE@-iEAS zd;TSb-vF-b{T=3pN2W#O*V?j_c3~iS(uFr*$UnW|$#|E)%D!pch&}d@AEr&h&01T$ zye@DPnRHQzE!LOr*>SgeW8ipPv4xS!+OFdUetGPV&ofDc=5V>1UL)}|I_|qa4%*~e ze7s5hD5DqQ#)aQJbTJd?>P&2ub7Q1Zh<`hWKNQ!+MZ#zIJ)-?&Yz@}&&kO5NoBfju zs7Ic>y%dQ>I<^lW^4|YE;Bpf|?wt5$k5DuDC@V$+ei6HmWix2|IHYi19}2&VGpYro zjd6tj{Pe=E7uFOYo<2KhDWRS|O&Z-p9D)IkI9j^wX4Zjw?`NUCSPkh6{)$wr7iV?L zENLn(*%g#GHajghTA(=bBO(ASo?4urlq{W*4u|>t63nU~`GFyZr=HJC|AR$bUi1PN z&t8isDLjJA%{>yVK`*0BWMz)byLXCG@ewob*T=B|4BHU|;J<<7(U>|bHJI%B*I#5)jjaq(zdommjAn!gR~S8K_OrI|9=i{ z1hSiZc!6yT@RB~Q9v^HVg>HQyJ=LItZJ4E6lw!_$uVe#ktwlY{1Il`Gdn^iEU^qT+y0udprg1#BADRU8 z@^#~|y)$S^_M4y%dV>>!|K4`#7T*g85b8NsFXw4*aG=)bw_C9Y$DZx$cj1ri~wpt~>WxHl8}mqlIB9$l@*H4t~;(z+Y>lHBw5V-UEC<*?NH<-#W(}lutUs%K{!g+0c7zCc%nrAx| zYa35jhxQZb28KuaEo~(Dtkx(<>1EgTHAd?Sx=Mb1QCzBdU_Y+rNi?flRn3dMts?M> zO*VrN^H0tWye~m-h(7lVI-?w^up<4iR&7usN!>k?j0A z#=PwMcdzwUU9WG@Ws_c#dorv$Dnta3_o_3Kvz%>J`%X05K=`6Xg4l(z@3nM`O_TkR z!NNeoi(}Qu8*&Pn)tGba`>xv_4e|^2&uNt8wVe>MHI2^G=6}`-$1YhP|Dd-JT5O+$ zq$t_s_?0+BK!!k6Wb|#K|FpF8FXFnsJz7rSuR0DqZYqFX6%Fw5bqi$^yj{9)u`s-I zqbu2zv(6(oS83DB^4mYQHtO;5%DsMElux517&#?!&HjoI-sa0%b-I)8cS~%FBF(fS z;WpjNr5g5$`Nm29Lh-lb$4phjB=aSy%40It&3`8e0k~Xvi}0GCF)57Q04Q3Ig zYXD^pI0rhO_Q}M~=E#ONgMP+w4z`H@H;{7>mZF)f$LaQ9Q6kL+FR*@!qlHUGrcK7# zU#`$Y2LE5gB3yUiKL$!$vPQE3UE21ofmW~6*<+Fopua-Qi`v!l<1=jflJp6|F{Q&? z0wQj77zFHp#)CTs$3WTXn|->jV3_KZ_-bWK?p`)(=%sYJXKk3z`FXa`_@|`H_R^e_ z1fT0nUl$joLdZtL&mT2;UDVeiMB-JhN+8D_Wv76Ok3W8L^+X5^rBkLU4G|p_swCeiTxMB2CHqDUBsuyK~DanuhdwjX}T7vrW=!XD`bA;r*~FyFs&O zg6QIclU7&AS=Tf~c4L_8JR-a5b{!7o?9y8*vqlQ+Zg@SXA~wpA^Op;GCY)g{>Dsb8 z=pys7i_kE&VqrQb>a1*w#P;l{-krn;MAd1T#(Ar}ON{AB;h>Jfli-k3x zDM@yZZaj&Bwb3@JTZ%aGD>WTA6#>^e)Ju%B)2cRVQH8-f`La}Qe{Ck*k_)#7ijFI%EF}T{i_=e_Xm*IkUn9o(O zIQyzdP(V+HR-T`bhr<6pvb3PF>A097`qsPOY(9%rZhEsu5JvxtG`ihY|1o!86jS2g^QcC`t z;_nTuiq|skh#zqFr7(U>$Vb7~uPlDMC%}maks2*Ar5YBh#jJ+b`XLV*`LvFvFm2b( ziLCqS`kjS;y{H6>VwNT46+IEpngcPcIua40Fk9MwZl}7SCp10&`eGiFN3V3nh?@tpkDlAK zjMiJakxa3|%D~h7e!DD396a*JJUSX(owVpzfat8Wgd`dB(a!sJtRwGW(eL|sv%f^G zl-$s`4Y7!oB_nzU@i7R5i&|HA*Na3txCk@4vR|S$|3PYAHnBX}^^L;1x+6)LpSCv( zq!pGbtk?qRIee%Cg-7l~fTvtl zFnLu?lOJEa&-W4?bH;l@-#ya?F^ck@&ewc!E&{eWUmfPQccr76boeGcS=EI4a5F{t zdtOG3!UQcpy#3~9=_XuLW{=E_fPqx6NP{ZQvX<%M`v!LJ{;JG-roFee#3p zfZFDx$LjfC#<-AV2y51a57I3x;X}NxU{cfL9KUZl`H89jrgEp{0J+n{4(x|OV$gXb z1RV@7l`=Mt>IjR5w(-EnzS3LsrLzg^3zssXF2K=JMhag_yW-;tOitqp*zq}a5icRlTr<%`LoN94b;bk4eb&XwD^dtisN>pmJuAu!J|{nG1Rk_G7~UiJ#rH{;wQIS)Z>l#4`xe=p z&b7Optz+cYDulYfN#Z3XcA;XSe#8J=7D78j$xa~5YS${?iRiPPLExZ&dbN*V=v%`b z_?8i#PViDt@uR-=!L_HC<$@=#$^E15rtRO8Ie;o7 zuABNThcX{P$QEC!;Fs!Rbf=(^E?PCqTcn6CONq;zY_En?Cn3cJuci~dA`a%x)yMfD z5Q58GVKN*>VAUPmar7$<)R^fe_Zbu?aDNHGhz5p!O?UgW`A1C;?dh6H>O_Se2EyIG z@ERi}Sf}crY8b8Y<5m4ZKdHV~(9YQPSYocY_Bq7Nd-~y0u&o8pacY#;#M4piJY;?8P!(TrRuS^*^k(>d>@f8(vMv5fE$4 z3-75uzfU0j^iKpKZIXI7TRJWDd<)b~q&?&@`*JhR5su!QbkVeAJZv^2oJNygt{T*q z{r%;*)Xb4zZ)0ZFsXNpk(DTY~*$OK-TCMBZ7Rzs6ouC$xTLq+TgH;jNrZ6Dc-aCgj$-UQ%XZ>c+@>u*xO!5nk0fCQ9x$ZpP0?r9F z-MCZ0KKSGd5z{j=IAYtwHVrrHZ!k8uH4n}X#I&}+Ivg7@AGOLwXBCnS;Zqq&LkEV_qSVG5y z#34t9^a<=2hywK8p_^uqV{J1#0ObHNQD<1Rod|(OInF)$+6Pg;xh%+FU*R%02kepudc&UcKIS zuu4cKvsTf%4RR`&jn_Ucq~)T7h@uj!xFaKpqH+#KvKR^5J!-PlC!z7EDVE|TWuCV` zfW&4!&@}xDf;ds5o)_x~h!_!SHGe0BQ`yTAA~JZ|#0mXjEQsFt7+#IOP_qB%`CEUG zMOkv+@*BJI@!$Mnnu+nnB=+v#(;CM{47PF|ef3h&swHxk_#dmnv;nL-8ige9ey{e6 zb{Ew%mXWkN8Z$`WYM7&M!CL4`R_~w_ z)LcKAZ9g+T&gspB#4&0cuP;cDYz{4q?}HTaW5V^q$;FzSBK0gKxrZ-s9|*{XR_I8N)=zyeIO*f1aAx8lf_U8kW>y zK=9b8YljC~zo-tpaNdXl9(u8*bWC`yf z)LeF|xRnFgQF^-Y#ELFm&qPy>&z#yh*(WtFtI9!QhSwunrtO!;gno9H6+P0DU!J;x zsMsFJvpkc9$T`BFgtiCqCi*KL4s-mesCB{hFSYsYSR5onag+$8U&aIoAJ_eJNwC7< zTC{ZCMzcv_rD_7|VWw{z0^3)2pFqIU7qRfsb&LF6Sf|E28tgJ*tH}I#e9owpPSXv7 zVGc*9;>ZOzQ2p4}10UBfE9i31B=svl$UxqSIC1L*zS9;mW=~;+DRXRsA0?*f&zh?k zsb?paV5dR$a3*r=PfZEewB{>)=)B#sqOiTI=IP;_@66T-Qa&oA!P5H*xTz6lHWeaK z>iP${905&baJzY5;nel;E5Z=&At}LI-k|{9+mB6~+CZAQ!(aFHo+~a_X2J$*9++V& z7BxrRyYW-5xlNdiyfp8+C0~WFg_wzNx?A=7cl|$GyD06JiFg z{@{>%D<&Yr%LHb+bV(ZuG)CZ7a>F#C+m44^SFYlKeig=GaiGdF-$^>+E*?|Gk#PdP zI&431teez^=PVl0HeRn^kGw`85<-djhlz|#G^Dbe$JI>rbXk7`Q-9<*u{Ppo<<#Vt zRrrXin6+O1++Bq?tnWPwgr}8O<3z61u!3}Co-kSRkxUVrS#;d-c^%G%;{={##+75L?3D<_p>x%>n;u%dJGgx<+3WK0)QJ47p%4Rp9 zf=l|lt}RBjgb|zqvGsXfXu)T!(M8@=Z4y>#cw)Vc!MUDb1hD-nNd;~!4gPikqaQw#xj6t)|BvQMv=)FCLN~k-@3x-#9Mm3h}43-+yvP~nE0jG zrMZ_!q#MBCa^d~iMBi{e&3}EF&lN6?hcSv^U@xRrcHz(PWlak zMpoY;$*+-Cz(aC!O3L7g0^jSjd}Ez@wG4nphlBc`|MgqN-p)%@lb7Xiw|<{yA6?ZA ztZw4ll@tA!gad;kr@;@tlTLl{$KUs84stt=&$0=WF^{e~!mo_uXnr|9#q(s}Ihar$ zZZhr|V#mMqSt%AqFpb0KXlDh~=<3Dh6SAFSe1IyhXz5nm$&iRq&3stFo*=YnQ=6Aw7rO_6vIPDP?DpaL@^0l1weL)@D&sih9PuO#ojA3u zXm8y6pLY^Zv7DS3`#m$90Ua6t4*>l@0>7*BxwDXUPa{2D1>FLpLAi39%T7-2mZX7# zXE%+-IDk9rc9w&aQyu*=_Mkh%Mi_sb(X^>7hUnV08;mTkxYKbyNOL^nxie2LWQ^w; zJ@eet;*yYV6i+?%6zrf_CWmA{0;7D@s=H$@VRw1Q;2|sqh-onT{E8hEdt#2^f(tKj zhtxd8mEB|$=`nPp$sGYt!0jf>&x5F?{P+=Mj1-w0Zj>UWMXuR9~?#SCa-zF}e z_}xK6I=^9LT!BXfcY5%U?9q5hPtPUc@2V&D|kb zSU|(qHgK!NL$W#U`Dy%ESq#Hn6EtLSv57T$KmYu5F#Pm)2OU^l>TQ$FHpPzKKJJr2 zAImt81_vDr$u43Tfw*9%!j{;jf&RzwEf0ThjdrR67ixFkeYfnOVFhWq{TYIN;I8y& z>5JQMw=Ik>ID-QHu?ZHHT!q~(g_r)ODTSB)?IVhDpzPHOXw@a8;YQs17_U9_kUu1w z3pqN#KtYxaeI}w3(&vA_C;MRR3l~pVgwei3JM1hzMkK7;^WQ7cyZt6rjoHv<%D@pc z&z}tbs28x20 zo5%4%;rKg?vV$}RysWh-R+CS%_edU)UW#^BAPXA9>H*9rhg=3PD8vP2(@7ZOlID4M z=cSZ{eToONFZYll0d6tT;;n>$Cn}XHvHYzlrBxyPC%o<2T^B(=nwS5c6xXzujmY+LwMiXM3~l7MB4eLB>IGOn)N1==Xe$`Z+5C(t4? zwiRQf2cpP3o;xV+yz?$MY4QZDx~kx|+#3E{Q57PCY*7ydvW_T+3Hakpc2lq{@Q02A zf6v5ft2)Qm33&mQw7iL&05k9c5cu|D+}2t;d=}!8Z&Xb2J`f;ne?&1*mQ~zB5b`gX zQbvT*CQ(Akx=Km`tF$KN5JrIi;X(>k5EJX4x_EhdPfT3OCCRvXtWC49Qh^R=<<9Gm zdMGXSztG=RTomAIo|2x5Cu6yTf`)?q|8{_T0$ch0mDNw#Y^#9--*b;V($!UjKjk);U53?Y`?@ao zKj2&9&FaWK7wzZ~?%!U^$`S4Sek0d1q%ibox#lSpUPw zAF&#+jU15KzyH(mz-z4)u9EBBvzI#xlMk1*p^K-TumZ84dlp8D+IW|r#e<)92L;-7 z+_-Tv`SH_F#ZhaLTK|MMRm-DWTtafekk zOlA+GizgT;xKfXYb#KLI1sfY>Sq@{)isj2Nxq`Dfs#KBl6>8S3AuBoggP4v9iYCpP zpskm>12JJQAN^E`Kk0f{8R>r`PIPh7o;$jDeKLp!ik@Obr8_IG4lK>F8Y+Z_QBI=s zJOsK>s)`AYrZ6ZRepqlf6zds+_+SHHS4>oK(tw7Pv(MBW6rnvtBV0U9nIdOde2B>; zKGW!5T7SR3rbN9^Gv2AwZ8+Ox09Hx1a*a=GjDxVZmbRtKh*?;@$w?wkAh2F%&6$l! zs)2AjG8X#Jc4uKygMEsA$gjHc3ULwjD0X?oJ_2av%2k*U`&G`9;Dio$4AB@wV-XMh zE{A7#n~N`YhhjDD1g!jhy8n~zL%0&5vF@ak8i;|7hoAF+POgOYzw%OUF(wLGU-VCY z7Yx_rFaxFWY5zfp1Y)PMs$#cI>}#FOo`9m`1~h#D4>dDMoRep$d0)UugOG^CchK2 z#2e(4H2y8x{!v5(LiP|Zp)UCT(6Nfor@?^Dwtq|!@kWQ1H+C?aCdKf#67rzzUcZ#u z=P9ROQ%J8Xg1sjDyQGwy{#0;iH$~*az~%$o`5$?frWT~vBi#%APwQ>rq6IRM{^9#D zP+(>F1s9%&Gbnb;2@g@q;Uz*nXzw~k^*Y)9BvoR`1a<|&#naAZNR72iL6#xGgCSd0WK zU^JgNVLT?y-<3S`oqOaFN8;e+pE2o+9TXpZfQ14VyT;gQFb?i`&c(|4s=MuqU3Gos z5b889P7b%e>E@eY6qtpv&2{olSgI)6%7QxHSHu@%f(S`5ar} z43FwCBJ=Dnwl~iW;0phK{rX_c)*ZV-w#BOd%CS7!w!ZOKVT@JQorImXr!+cQj1|Pw zUFW-a_tJ>7?bh44GH_2QMjkA_Sb%;u1MY*Edm5FFI;xiJXxy2*NQgy|*k1^+&v3yA z$A8nXJEj-LSbhfu(}jqDTyMY49TYH79A8&lbAFC9nO=l@&?9gb)cG*L(8&J-cC2u$ z!Ns6Fk84%3Qg+jDoYJFvcX{N#_2!$dbLTtV@pWsv>#n^HV=9ar%r*Fn!I1Ho>{e;j ziiVv9CEDJ2D@ZS;TyCE3_d$os)${(uef{u^Iz=LJRAmx zCgMubS1?Mv(!s^&D2#_*z`~GSU32s)j=NZPo|ke4==~WKCR0T~d5nctv(G=t9P3UV z1MELqkN&yfcTm8^)2tb2mqucs@VzXhv|L_w7Q?YGcY$n-zR@1LmXE~vR<%U^!h)NG zOeGKk?c0)ZAqXY$Bx`f`TwY#OA&enK0uTFl0uzxBsW{C~nrmUjEpThJ#4yqUC|dtQ z5UKFHmtR()yp$xQp`G-Dj%hMZVLq962@HpV68d6lk%%IPJeYkXZDC3CH%($pc!975 zvi0Th3XJLNE(j0Ol`O*(NHS@} z0bvJJ81u(*qW=kjKr2X@CCrqzKsrfyX&q}ls0g2mI}p@QI{wA__esD74l7VltJe>9 z;bbUz2na0-qjEu(;zR{*@_R+1s0kx@fqjL9gf;6A8ngsnospl{&p0Ops9)*=abJF> z08(Db$J*;Bw_Z{X-gpHiT7IIaL-35?iuKQkrU^gvBcock^b7dHi_g2NRs12@BrkSp z6~@K^QC`2+f9!v%#pvfpAQ?r!W+G;=m}ZZYtOHz(RRs^h?ZopiP#jy=yLj5O8g@{C zK=pfdrk*{*R}yr_gW)M!@}LA>SFyWNFys}Ap(QL)ztRe!2r>C$V!(xr5L5J0{Y+Gr zSpPr>F4U)|^>?8MD0L&Kto}$Fg%wp_*g{||>G>07YV80)@f5=6qCEeENGX940?>pq zCX*>e%A)#-(cgc_0$2@)f)e`r4^n~(B#1Ci7|1+H$b(N&E-DNx z0j0k`((w}gl&M$DSla(Gw9H9?5@jw-8VUnNXME7i#N^i@H4nuK>^kB1ul38PP>@Nv zSi$5U(y4w*XVPYy!&mK|`(U;3E<_bxAd;-H40;iVxT3WDOcq+)<9psCLW04otjLkd z^C)EmD?rf|6|Px7IYferkt%_HJPcHf&w?l5omWgV%qk*eRm!ra@~foSH$_1xe^f$^ zQ92{Jn4q#alH*UupTdK9{`s%^g_in+Cg`V(n1@V+;zS=qCGqVGUKE-upBuqIu>=#h z9burTe-d0g;R9@S`b7}b$omfwk&E~;OpXu!YOw3jw!Pc`0GvS)4HP~PQ9ttndQR&0 z?A6O1fR&FeuriXq{kejTZW_6RqE)Mw@}WL!=C7E{A1V|2KTn9)>p%-B)z`+Fr^2kZ;C2R)f%A(hGmNcRHw+~Fzf@=vpFX)Pbg`k@TPM{ARGSs~@U;tL;FB3o1C!TPk^rH##+I!%HqaTF;=U3-=7?VI}3xSY@bWzJFo| z1q~F~LGdvrlbX>$!5tL7ib5Fre~`<=N52^Snd{K5111~l$|2z6F{ zq>*7ICMj0HxWHAYJdcF#xXy%uf;(A;V#fvjJs))NfpR8<%Hsg{@TEjqbnC$%Fgvm=Uz9>6@Sf+|FV(nJ7svNet&pvy*s(`sF zb|L!k@h8=TJBsD*kb`UD+ovpkf6$By;d7VkY=4>dD<*E{NuO+l$+`*XPdto#GImh3 zz+}z|Ct#99F0G(6Mo8HU*(7eZQvv0^}y*UvIC8~jVxSqB6&DtUx{ zFzT}!SWWO?33uwJ2q|w+vM%o*VU&j0@b3@JL$UHR`%+BY`8(hh+f6FbuMm#-2zXjX zi(Ou(PYKnS);|z}3nDyidS{~4x1ctl|JjT|R{Z_|EL9r@)uP7dC}ueJKY9M5{X<^Z zqLL#L@G7Aiyi6zV=D6$Zqk$i~#;4Q86Wzz{>h%ZyV)`kUdDO-bH8E}&1IU2aSnNuC ztn1@2-cEEa@yu$_pn*>SueLwMQ1+~uGu%J$ZXAip{=5Iv8Sl%Z<#6p**i~5x@AJ;s zQC6{XMZEKtx=}dS;q%YG5Vwd67R`stJdB$mV=;^sGp7IQcH6CryBN>qBjI+2uAuth zJc%e^bEy$Y-AYs&dfblt}`X1SP`MGNPPtEMqy#yFnSwL5kubj8Ao z(PKtoGP$Kc1HzYt9P+%UXN1u(d|Y?!weI*jb!6ATx#ylE`d@zaRacQ`P?(3@^Dx%; z!4AulV6bn}q#0ZXF2F90N95gAsp8Hs{NsHp{QvaMcjf@dv^I-V75Qd>?lAnF{-4jLdJ1u;MJP*r3SCuqS@GOqm zFi>^x@dUc%M&jm{hg0(~^haT^Ir69@Fmc}AO~7ITj@P)5i|(Pg`}4_uPhux8>I{oF zTDE8@i^SwN?$x*-Fm`wVJ80OS=<0KG?2O^gh$X)*f;*{;u$$y)W;2AM$_LH-=4uI>Zo^T1;8@oK4 zHT}E0@`|g4-v91Uv#2vxoP3wb4^YK1!6Squ#xz~3J5T@zA{r`DX0OxZ!)p*?m` z1eso1DpT8vq0E1vu1VVWNw_G3JGHVKmzMF@k)}=zw`9V`1x6=Z_xBHY{DaSzB?^r| zT3}zySZ1L}Z&C-g|FMo&SAG!`_&NW<@=JADnHlZbh>(#-4HN~h5t9QYPDPNK3aOG1 zFH!Hokm}P6US-NJnxv^@eQB^lMXu12Ms*lD!WrmDB3CL)o|ve;!ZLwS#gQj89zjbh zk~FVOj9027rB88bU4TJaGo=y{N~&SL{I(8d1WGO~w=#K|)aC1+G@7O`c$SYL{sKBQOJGaeCxt-H%jowAib(5Rr^T{ZrPL0tc_BXJf!T=#iaWe!QZSGF)kt3L7m!e-H&T#8d9x|D zfSxi`HdqknQ!S#?d@IBx$x)zPgclxHNj&%Lv-lva4d2vlu>Eomh{(6Uax#T7^7#mQ zEjmy#|Eiy`T$)!#RJ2d=$v|dxSVG(WQX0>xawO!9mru937PQq*gz`$F$uIEP->4I>-Xg=D%F@+OH?z7T0tLy^c7eOIX{ zj2SA;u}6Y4bd4wnSThiMX@;O_;$7n`W{3PoOJ5Tc4z ztHpS;j68KH%PX?R9tBsf}d?+>Y2uAqI*Uo|6y7mMvRhQhBa>{mnOI zcf!VH%esB{-3Nyf?(MeTc5B&AK5EoxdFR)Mf#M(V;a>(T28R!)i>D5*YSn78nyX^v zoe_3`f#WL}1UibFC;R(j&bZC}PV7?HYtOyht+(9*7f)mHA$v9S?JFzUm~~%be}Ctl zx8YjqUbwTVj&lUA5f?>0`@lef^Q7oJNYv-6-k9EOm)@46eViORU?FkUo;`v^|955(?)O*iEvfijAeAWEO{ic92t zo9-}BYzD){MHjWMFGT@6c(NhmqojY3|(FaIJ+ysbOH?D%f&3RC@X^zqq&HYHUj!(t2_0OWa|H9V&c0xbtV6 zqcLRY5SeJ3G-;BXiB*boF{#9ZUYoaS>FOL;8@|Eki-CdDt*2nZtjs1G%kC6;cFJ!R z6~;0no&L5DKImXKZu~e*dh`$%Pc2%6Rh*tgn#I>4YpclfL}X^$=60-r9e@?EEKj@k z?Og3+Ym>RirTvA^Al z-MRVZo3YDlpd1!W129+A=k*6g;rGE973A6>qhEECj?$RRQZV|Dfs3bZn5>=%mpUzF z*CVCz^6dV;5{6>B;JXa(w`Ev3(Hj%@JU?RM1n{+PiPinPV7FE$IV5}NH{ZBNvB+;C zCZ2bzR#jXNazeO#`Ep_m_~;|J<-=}C?x5hoz1%_3r&nM1A@G-6dZ}v!Bgv2<+{t*g zoFj4Hz4zjMOT)eIJIou;>SzbQ`lE1;RL71t;w*zb+@qM#9XocMETlO3CK7 zo<9A#VDjp#uflkso=jHL<>IfHgnsoE8Yr-X0?z=gHXiidzkh#s;)(S!7HR_e`7kte zm0c2h<6TpwN)=`W7dbHhg^7PQDbFe5!Oc8x>NnI?|Gqqf;t}ksyFez>{W(+ieV(|H zBSzqys0ZZzc=_d*%R7Z`2kGL8F0M{JjqWn954xa4GfDL^0wgk^&&=Cz!w!n~aRx;j z)F~E#l&`=uD105hJK$Y8PmA?>0p8)9&@YXhu9~hQRgE5!Jyi@8J={J#fE)%2FHWfv zoDcSnxSiyg8O@qD!@H}zdlKdU^s_;(Nz*g%4zGc8MV@nmJ{^SJu$SSTb}ahdcz4BR zSIBrkTs)z#^@D+e3nu7BvU!UZvO84UpV~&kKtUH@JTvs-)~&G!3_B=b2;olDRxnT; zebiC%bI|XPiP$A`-SyXrL8yDT$K6g?eDMUrKG+Sh8+MoOi}vK9=d3&VDME){G^TN9 z%Pug&aR&w4gF7x#7f-(4?fkCrhH6P4ZA9>5AM?Le-x@%rL_+cYdjn*l?5tU{y@3Kd zmNN5|LZvMqe)l;Ua-Q5XID-Paclld68fDP-O`j(PnPg@nore|Ub8AzXjRILj@=`(# zg_1d7`T6|e#e1mp62JdQ?Vne|QgBlR8j46y<;90$BRxKI<5SwxY2C>Y6nHz>i>FfP z1?w3=k6B5iO;n;{=p0=xDx88d2?|=g*H3b66c(=HH%-$pPaN@vU2wSsMlEU;_L1r3 zA$!-991qbcn-JxqDXvHYQHqxGhlt|R)RZty6VVwGVhYj~S0q7E4Ac52Oi6l~2tX6_ z1OcIV4V9ar;EE(z)JmhYBznp!=$l3eK@%mpTqVh~v|9d*j7)^0Oy5*y4ZSL#zW{3> zBsn^!S(NH0Udl*-mnD*D0Yb?1rl})Gj!dx4Kf#OEi_iom6j^D+>L+Odc|hdxrrAW8 zO;=b#jif3wI^c^jx%qd*8XYD5?InUH9!Ze8_9`Boud(~Uvm6Ge|z z7MPi;?7xeC%CkbPo)YkZB(^^$P7@r2mYjIvi3xn=YjO-Vzj=h>HH-xVq2OYIBGR%` z*6&t+3e$@^@t5lNnb5e8d1BR1oW-js4GYq8hys2fR@){)uc94q+yqMo_@o&HV zCb{gg%MwfiB)xm}POwWO%Ebp`^4zn}C5Ih$cyiNC|4wj#weWoR-LM3!Gr@OxGGW35 z!TqCS#{?ge$%`+(l;B(o!D;=oPTqL)tpsjul54KMI>Ac0gnC&He6%KToh0<}hVnf9 zbpPb|LvfZ>puy0N<_RBJZ18f1pbp9QwL|@$Ep1Mc=bw8%IrY?26CAdleE#|8((cL%Ls4?e&9@{tOfi)H7E1CSQIzSa8Mi2^UkzXUIoK z_QhJqBvXJl9+;qi!wjsQ8cGCd~X za&&oGtDmF=bQdi~0x)Hc2*{p2CN`PtBzUyzV;KL}OCEgS zf#jE8egVzoxC=^Ut;zJ7yf(@+2AG z@Hd6Ctr8qwo*Z%H5y@?xI{Cbi$FzU0gcz$VPp|_y!C7U={av~+8|kIW0!E`65-p-L zLVc>-1EK04@ZNv~F5Z$_#~z#9(fN*4PfmVJu!^V)e|`J*NpOB!0z+9c=+jSqeK4U2 zE#*j3!Td8*3RvYNA6=vd0^G1P|7KS2w z*(Bm^9I%b%lcN|<6z_Q+;VVQ&`iv4wXW^i%2ns!25?OG0DHNqCh79zv9FkEJ%keOp zLwRB#{eK}S)&NCYMVRvf`H0plg%pz=F)G&6`5zYGB5Z4;KS+Vl%&S$E_^_DlpfK_m z`+*g$6pG9WRLvzSv*Zzun!P-UhD4=VOZ}RkRH6#-Im%@>kf+pwh%6BR+Q30n@iL}# z*}5n}KhZ(9=fph~AxuBQKymA>w#X+qP{nz#1Dhv8;IxRp{}!7ZLzRWYL6Fwm_D~!hu^*`8;^Kc+b25 zgZR)!UPUT5FDaYHluk35im{wgGsplJvH@2NNpdTPS}dJ~nTD$<$|K|5(kNV1KNIn% zGzAJW36g4Q(^UV=^l{wNgp{YKekL1YVKs4^<|Spw6{X1V7Sd>%OlVE|Rc)p{6lp{i zL8(HYgt{n3qsW3ZgAf|T{eNcKC=Or|l+vZ;FG8gHDJGMgiDVMtiyj7wCdo+{)MAC1 z&p@7BupY^~^k#|X`nA>XC53xlpvJwlY+gk{hTBIxPrR!LOIecs0=!`kn2xRbwz>$utWKhcwOCb68zi&!#K&6!bI~W5v0fE(l$#@tjNdNG|56K5| z*GIeJV|JEE5Mj7s#{oXD`r-roSowe?9;+V{xQ$BS0wRH1qfi&V3^)uN`u>$1bJQ^j zR!AmT884z};NgV$?YG~ane5?&^S}?@PjH}Sf}Iffs2vFURr2v&50et7Ha;bRKVy** zZ&39cCVAnODOrex46>LA?e*QT;bPdK;o*zVy@4X~5CF~MC5w}nUV14x0*0H5Fj@TD zG8%tYB{Wd*BlO54X`onS*nGL9q7)lY8Xfcz4HOsRgnz z$aC&_=Ssenm!W-1Q8Wz{G|2wvzcf&I1COO=UerLr4__QgDN=~{S%XCUMsPMsGHT3d zO!l9f)H?c@q(_e)$=rE!MHDARPJoUs4|Kt#4~+``!Wk6F=U;r5V5eHr7?Vc*pL$Xf zR>IJM)wE(Xx}-G>6yYNMl~-Je3A00!_XfNt#vRE8^m8HsD_`+pej55{E147_8yBB& zqG>)tfB9S!#$lnDSZN9)TK(k5F{3b%v?zf;^aN*DBpuF~Rvs@^?&_);*za@`9P1=ezubx^)wrFW}!9U~Aj9eHobGsKG#o}ph2 z#dG$&q(!Ti$5!SRgv2RUIy>dn8j*SEGmI(Cm1LmK|OFxhcN>TS9jcTM{?DbS0!EU zzdtz}ivW7}?gb{VVEj+x6P%xsVArA0GxsBqH`X%^5OCG#-yxbG@V)-}>q)aFXQGX6 zz*u#Zj45xr`QMVqXP*B{yf;uBn{?@-1`4ky-c_wgkm5j8nwx;p1RqA82qXgrybFWSX~}UI|FZ7n zLiSRMAR=Hq9(O~~7jte68#YLArc3;-DVGloF%g;PRFO2I@Rv>Tv3SoW1~nFGpScz< z88E{0X-GG7yIyeMLEx!zFT$qfDon~_u#Hkg7F2{G1gJ$I)}#b{wi8jFy@XyEEoYzz zD7#C+YcR9|`Wz56DFFs1=HI5Kxsc>Ylj0TfA2u-tz{nBM$0?$)w1HLvo2v4>xRM8j z%s!aOFMkRIen!)hN>K%s!745FYihtT5Rpajfh6WF(ivI-eVncEf7p8ufK7|)Y__dJkPtksQL4Kzq9+^bLO1ulqq+4?lL0@b8)Mg;8ce_d%E_|GNa{1 zCYz@unZ`f$h^%NMvt~#v#;CuSdOhn>*EKVS;ik3){4~prGMMURiUV2LVW?SVV3(4d z=wA^4s854pc&f$6YCEM zrX{eqIi9)=zMy!^!FVg~s!()m*(PZJ9~dJDi8jc1hfU|-tK>RiqFS%qT4H96W#!Hb z3$^`CIB2J#|2(2P|Je*|j{5Jswm3%o(-6hg!bof>f{Y6dC#p6L)#My6R2rF??|cYO z419Gm=T$@>r=deqt3U_h1yLEbjBe?D2=6)O)lLlRBs(D08Uso1-&B%bvS`4rj#*e^ z3;DoHwJUD+;B@5}AWM@EljS-l8OUfGQztIjG1AN2CP}3ehjmEB@Is!|uL=jS@NWvl zq*zU5;nal}@C%B4re1&_H6Q=P#{=0rOomO*U=HcpxAG+p85va6UnNsbWws&2kuGzj z1{nBvao$cm{+f+us8O!XvK#kuPgfB^)}@XX((`JeyU-*rVU6_u7)YX38oE*8hb*dh zK0*p&G53Gi;eMDsI6b<}){U0`teikT{;CjTq4R>c@4D;v_GQ5~+iWv++UciF-GiI; zOxd%Wo8*9C$nf94p!nX@w%a}fZ!P}a6y{O3w;?h83*H*LbLzI+Zk_tipI$t57=GyO zu*1BmonN@q)H&y#jUTJ`n?J_-p@03CDJ&p8^-24JVxj%m-yd&GzGDCVjONXD-oX9c z9d}Hfci#DUh_Lh2w)i2-x5@bj^=n_X`do*b=7${oR{KhV4=*s~!qfw}>ACp)c&}U@%OKL{nU@~BlZ6K@3WY0z>nr*jyZM;pS|M; z^IzM;3I=6wV!q?;=HpYJ{l_rL!G#PIi1&)e~NQy+LgZZ_294=A2B^-MgF!c-Ge zcR}|~^grKt{5@_A@sBq6HvI3AFF>8Kj)iG|??1kaAN8-C!ouO!Ub4xq_v7Cb9z5ML zbr||#dpx-L;J^Hf=}!9n!3RZ{ZyOIN-fs^g%#4iEB>t{PAAA5`Q0%nR3-Q3}U3l>Y_N4@GYO&4vkO+&g;$gtSQ_tA;=~Jhia*Dm-iN&_@VC!Cc zkoA@~y~*CdW$4oQxZ7&%|}$TM*xGr?6-#9=KtA;-=HpSO042T}K@`^)b9DdL6zzV!{+;c*FLH zDNJpF2SZ;moNiPZpl-VJk9SrRTRVAU@d3qiaD!==UHO3Gd~1L21;J~hvY73Vjk0|~ zarj|VPsc-+SH1c`Yd41`J>Sd8cGD(uxtVc^30;(rR*q|sS~kUjtePjbtN+ly9r|6@ z%!F}LXim+5k0xEN%8JxJUW>!z<>_S%vJu=>cbe zu_y>JWg}8xPOAOg(g0)3DnXW;v2v5W+7@(|xK;>7N;>IC|K}0q(B>(dXp6c&b4BR3#NX1EwK(HFuHN{cMa^eV#z?0MkBlVXZs$uP$se=!BtG%4?F8prB z)JtK&{L_XJWCu|(wTy1*>lh|XC;$Gq>X%o--&=5=-P^uor2o6_!FdbkA1rWfZ?~WD z#pCTEKOdwYjAP;bxHfRt3-M*#uct75qTT4XFDT%F$H47y+{C&P_5ha;DR^AQI#g3w z*xF?KK9E-0ZcD8dGrX92ja+U_ZC69Ft?8OOQd+^18Yp=ZqHrJkuY< zV3FpjqmFtf9v*Ci2P0?Ty6Eny2l3VDf_v|o`XSEOe~laI{EEXKrt=0iynF>XKB#%a z8{Rl|5gwfL>kb~{c}~LGN;n^UZ0gn2_ zF7x_}{(g@e|2N)r{nST}`3SC$cA0t&j?))h;9vYP{*Qm+6ZT~v))KPw|6O>{&*M7# zhcwS)R<73^cHPEnUyKvpcfmB3{sjddR%^6qZ16hZYq+-BefJkl{oMhtv}=@wIIq^u zHN150z`6a!_<~~Fr~ie$#K9Z(!hvuStZ#ku+jww*2NcI1J9YbQ{sjeZ(DRFvz4zLC z>P0Ww&Aw#0^pZl&a*2`W>S>-hn>~zJrT}=+x5-dqONA_J z${>=Fsw!+qjPFAEw^9IL%*C{dlnRw(#zYlsPZgLC66FMjc`LgGn~4mh*~u8%sCol) z#^+RGOqRJRO)usGDMMyh`lvcCa^toohPEJgZZ$DYMjXLr(+Vh$=)1M3zZa{MDx=y& zeLAU0x~WJqbgkgR9-79vi|bUC_;W1cD>-1AkFFVZJ z0c%ja25V4Yk!;f^$W!zwi*myH4}oBfG>>5%XxkA0jWYGda)~s>ZTzV~L7xl@R&);; zT<{S`uh@6Vvx6D(E*Ur?@ls!96H><<2!V1XnK4E47~i=z6QJrZkfEpFNF`j-T*Pk< z+yKbo@US{h9DmdKZFWiVuO0tUhz6#Ogso(Vt(^8tmYJe~KsR+04L7Pk`Ed-$`k^rJ z6Bu`3qageboDPa%5=5wwiGo3+f)!B9-LWDRO~dVgG^lNy?*?gnd~!2BkEF)1?aLXS zOff>uVd~R=y3n?6tn0u3yy)WcKCD53i+{{FiZv*(5L@owrUD9%%?{$Xd=SHM{5h!V zK_~ZLuU|_&)2x%l6ipTdV|Q^F7MphdW5E^FcBW;#VI8OqLCxH{7u^E3lriOf3Pq_5 z#8qG;&oLUz)xX)Aw5diJH^VZOa6ly&RB?nxNsmo9=?92}C)w#rU|OFdZ^vICBK}b= z>i1&D*fv#$m&aU;5mOPA`yY6qEt0+BiXKAUCKa!8=RnXYERy|D`SFi`Qr`NOH0}qyCF{kslzxmCw(MB7V z=RW_rm~Z>>SXBDfa{m1D%TIs)Gdl-k8m6)e=1<0t&GLiq|F9hWp7)e8b4J+%b3AW? zxt?#k<@WNeZ=G4@Zn9DN-gp0_;IsXLxAkm3Z0^7KBGTqO=6u)Be%5m^RrJF0;|qR- z1s`v+1;4qV@~dBUApFlO_z_xuamg>rk^gXH!GjN5Z1WkeZn{8FrEaS9f? zez?5jo$n}kfP{IAm%$?0m)b&}zy9s7F_-a^%DRtP*VdQdoWb~Ei#fJ$vPCIzqXz3^ zY-4j5^LmI!q+4#ewY+5SeSo{K?7Hi2W#%$7`4X2+hi`q1Ua%Q^F`4qzj43Q6xh@u< z+_7x5$;MbD`+I2T&me~7F`xIVES@_2wHw&N(D*t4KL1z2!yn9Z{o%6Z7F(62X7Xr= zg}7lKW-Wc};~y(u{_3S^4eG=y7{_5AhRyKLUCa|?7&Ve(q zSmlCp<1IIqtFOGe-2S^eY>crj4#yfQ>!VL+VgX8ye~!hEVWH8RfZciLU2RTkyro$1 zfC@{HEQJNtmquTGzdRKSQy*}^E6bd@bIJ`jTwh*~1t}lG8Xtf2(wAY*=*O35Z1>DE z|J?J+u^;&`#@T8%kM_Fjty8YS9OUPncV2no6P{Eq!93O1Uw?gh@$S19&PmQitWP@m z6kDHT+h;z#JZt-BmDN^X)5_;#;m+^<@CR5-`)L?2@5k_(QSMuCUwOlu-&n4=?9#F= z#?JGf|Gcu!+Uu5^Zn&|0p&&LVtX&V5$_vzfHl{NM+8P zIc4)Lx3K=eaPZ_iJ?MJgK-pN3TV7%K>ZhEAH7Hi*8WdQsWrrP}WjYiS$Bo&v0?GsZ zyY9NPyzjmLSk9i$H7K?!$Nk$eWm(KQ?oQm3ZR^2gk5<=J;)QO)=*D;nrO@%(EB;N7y!a0;r>EQ@I)6x>_|;uPHkH3~uho zt-alTfAP8juA&XYZFSO{>Pi!j|6^1~n#V8>q@8dZgk1H;a)~q>q4lW>(rr1h{Od2z z$wB?8Mra2hL#g6WB+g5o6A^6}g^@~_j)}1`{n)UKqo5i{Am&}_W1xr}rih)4zJewv z(5@APMQ(M@W1YHHA7Aj7qmDYNoPXZ=w(bDF%CmD~7;}#X50@mEx~h}ujBczD2a)bS z{SxaieGmn|orbW%D0{Bz4euX(N28}|*%HNW~5)*gC;og4STdImg( zaovmyFZfCM_E~4yH31jAe(E+)D>HEJxa^9{3#OdJ`V~JZn{Bpv!Ali5@60HdUUr!+ z*3HGKxghtOus+rEajk)e+~tQq`cXOju*1vhtF2bJfc4sIuWbu|&p+qfvf65^lq-LE z73#1H7Hj_N@|gA3we$4}ShwIzTyJcIw!#CEvH{iz!Nd7-{`bCHe*A+UV!`Qc$~)ij z4!efBAM1$mTIr)7JGMMw<0q6&pYX)89M-6qu?)_=SUCL7yRb<1)xX4ARJYpo64zV6 z!~e1()>OdP7v*@YLGc+Z=KOfH;TBtN&W^KQ<=RM0Gcb;(7UJ3fJ;HPP)>~~|aI+t4 zfh|*Pk?gb1DF^-iYYV;{DXXrsGBhwIY-srSxb?Q%%Zc!TskO>;<~^sp@eOZ8|E^`W zE6_5$%!0+oUr}zn;imFAEP}l$)@+!8bEJ(h55>m$cfS2?Tj+Wo)^Xzc5o=;CFN**3 zKQFdxLatAOuV8Uqa2T$Uma%=|T8uG#1zH}5wWvPxnNNFo&^Zt1WPHtOb=hN&y~>j| zdmM-EaeF%qVAJG3pQf^S`hmvF6L}WlJm?&ZP)1yx<~R18pT- z!@l;Q1Iu>X!4}rT`rKzeUodr2*$cl3xduh9e=Ee1-s-~Gn0l-?njL>g@-L1bSby!D z`B;PE7_5(Vd)W=^ha7hJ5f$Y9XU7!iwBxTFAg?WW{0Y8DHrEz86l+k#!r}DQ9e<4B z0alV$rb}^5AzsNUs6xwicnz`qqY|t~9K!?9bVUurl3P<@fq$v}q(=eUSYqa0b)$@* z{V&C56@45Ibc3X65<8)jtpZgknN>fys_C(ffv}33l`3;B1>5{rCq8t5ZB6MXkr5YO zp}JKT0VoD2Tf>Ll=07iu<9hU|mQ_47gedVpq}7DSr2=rRGH#Arh}*)cm0>{dEUHus zi6Tb{h}r-R2btMr2ZP|A(Fvt1oZ6otCif~XIj)|KwDx!5XuOMGfA{3p)r!p?{WJ9( zqWOpZsA1KNP)lwJ+&qd|wVX`cYU(~5(6~~_9!N8dYk$W%qFQOy^5%7Ai(MBW&XNyC zLTCt}}q&XzXoaUlS#~gcHITZ_Q?)Y3TlKtAU2`)s0 zYyQbX#nn_)S31;{kOZf(uzp!?-7&lag3h-y)Ld! zn#paa3_Qo>t(rVrFm*6;`?nZKF%FN$^$naDRa%Y8w&$%KbBtyX8(~Qd+ut4VXO;h` z39Ph?!41>6`n$x(e?2NSBQC^NW;FhtL$SBNt9To@QZ*8agiiKP7;Tz`(atw1gJ}L8 z?*iA%-pR53&0wVQfC4{Sv0eji;6G3>zrHOXJ*z&T@Mx=!b`lv^p8Vs@e=J0Ofi1Gj zeR^SSf3M@hviu6mW0B$w3g(u^P263}dRX*%CKhkjfuH_9ddx9cJozhSzy0^eLXWRN z$P3FQmt0ytg&)orUUVUfkJyjM$8RvFU|O8A%~PII-v2NET+YGGRrb_7kNgMx@L#9A z=csqt5AC?amH85H@@-pa??#W1I^Z$fgAB_mz{RmsqBTD_v@^!AIzn2 z5z&9g0-~SC0?^lEv135(LBy*MIKYm%AN;@v3#NrC2OM}ndCa^z%!9;Wzsi;^qU+{~YPO;lA4Ht6~x7P0O>l-ySys z=9Se~UCsQku6!7SsgMe$MFP)zmqyXVW}E%Q+Yjt&ra2qV#SiA!V8O#@;bF}W$``O$ zDHrU<{LSSlTW#eQ09L%z!PHUZ^wYnDMIk>?=Hg|Gj~;hy;msOPbzef^Q&>R#Yk2VR zT-;da0+;K}eyl-!tb+u>Hb3pOQ_LQPc^8<0n?Wn#M%EK?6NV3aUhupZ;K9!FM7c6@ z+;=MYx}cnL+G*vw-~1Z&T4)bg@TGt)%+2esPk-t&1?!EVt)EcdfSXBk@PGr;irAu_ z58)=kiWswBIR1pP?mFvWq0Enzub=tNaz7rBpxMjHc(C>|JT%z(MZ1>&`1kLYQ%^e; zi(+42cEQ-@Lzi1`y}g`*8)z5e_|At%ygSJjUIh;U7y}%tw#;NICEP z?^+-6rV`eCH_S`+c}aO8ZYH%GI;G&P;PQDar1{HhuEGtfmF>aE8{hDH)PIJxL+tA) zG1WcU3+9w<1uU|o-+OfV&e>;|Rj}_La_FI0w0#G|VRRrzWtlNq2RUh5@Ok{f+t9G_ z-ZKBZ`B=pJDdiX}(7p`%qlf1RWDr(r$5u~d;Lkmi=Dwg~gu*f3+*JRDF{XU9<9}e2 zvZE-ebweHc_ENd#3YQgXfsXa>jbyj+Y=3Hv)+>5R{(G~GEXO$Z@LOXVJ^p);tPhiK zN%j7n)2@+R+Ayt5Ran9QCZ#qKcIid^{^;yPG~_b5QJSwMCu;56_yfMb0P|pgp057Y ztl7-%8Oo_r?daqoba<OZp9-|sWocYtRgShs5)UPfVBF@!BH~# zk{{+S^X)=AzrE|ociVAfk3IJ&`{R7O0dCw1$$WDAS3k%W_xH!uzq+b?=F^`oSjWM9 zFdo})5j)RaYvA0w>#bLg#DkUDc&L5vJ@=N+ z9{;)W?K8h!@D&EI3vmuyy}TF?iFey=cRVn-q6V*sEMNcnnYd;-6X%m#&>Fs1vyOZy zz5jmum9^Gco1@rX!ob>C#Hg^6C#LyC#4W?ShPL_ZPjWyzzg%u`Ij7as|^-*-dt= zKZNttPB_P{Y<7mfzb?A);_^9MAKh@n4Y={XkDVv!KkbI)_rL!g?u8swZv5>{*&QBE3Bxrth=YOY8v!5x@KW@2y|XLmy*GP4uxAF`p{e zUh`k&_5bVjW&QOZS1@%}=a>)IKaa8U>+7y7kKpAIw!v$VUo3mV7L;b-WtqRO2(oEhKaT88#NSZ5W+U_%9 z2g{btEH}m+ClQZ16WbQn<&f27Som|Kw!%oCx}JudC>s$a!I6u{oFyKFKr;qs`0t}y zpq$Bf3F#WkHlv*8!BNvA{P0EOQsv6aE-$Bk>5Q`BhI0xQyaLa4Z4`tV5uU5$z9wu9 zG!E(BeT36pZ3CJiS^J!t3`Hk*7MT{@d#~MWIQ`U9%Df$SEU!86AlzKo*oAc)UX6%S zU=)ElpqP&H@dq*4@TjUo3tk>_!+cK|tjBixEHH(Cv^?L&WJta%nmAi%bfXM3*o;b@ z)A76GZ!=)~sEVnJ$m*?h)}+zpfiR-AzJbVNG9N7P&u;mk{$TojgDNL*aQ>*#C@M#c zZp}5>Aarsx6a`Gt{?0JG27%?k7&9_c^Gy{9kR+K56bK*JAZVK@+SohYTFDeBsJMzT zwkhR}xXQDvr(;+Vs+hySF~P+#%JxNKddCwE%LF3HzjEj1t$V0e84S&L{_`=qLz@}; zzxd({3+Bbe7Zi7uefHkFyb2E}!o(tCP-K>K!#|EQOD+vrj0*~fvS68Jzj3T|vP=@= zpgJ+u>T>5xC~~So{f@}dqGpE?IzD1 zj0Gn6aXz-NXZxt^IwH1#GY$iEonfqwlbTA?9e;>*IVnH>N{<{*m>LLgyzNvTc<_O8 zINqGX)CXn76>&pZ@g|i-R&DvO5@h%sA3wey@V2(&F@Gvgm1S|W!F~)6I>I$&dCGpY z@ohui*k5`XJiMrJU=HST^DQ@*Tkv3EC4Av9ckbM>EWRjU1pI%;9k=5q!2|eVzaHN1 z!;OFZfMjbfYd7}2L+m&OEZ-cx89)5*M85J$tKdibx@E2U z--^SOFZRa&@CP?}Xt0Q8;f8Mlyc;UnLW+>kd zeF*lryYuZ$zAecgyKS(0eQe7O>|il+tL04e!$OWt{IkOaHuj(bD00mi-t=QX@F5b` z(I{9yghs$y#@=XTJZ9F>QNTy^7*Dt%58?95t$^dv=>EsI4IhS0)^AzF4ufVpZ+MIM zVLY($n+24x4X`)knu23vX~fkH(+H-gka9=g6YWYP17}SBWU*Q#{ucaZ@=vh`$C5g8s^N|pscj= zD%SS2b#!y9UM zFSrK}m{!Nll-YPNgm$bkSUXfro-=N_`DVm0(;oP(Fl$z{iSX_GhxQvJ`R458=F=6%Lmm+2@TWEBnvP;n+H6lti{)!6!l(+Oh_4fxK zz#nmg6pT~cU6#VgvK+H&ETpY&joHLW> z8GLEUx8!T5r9AnCK3}BZF?t!C`)6Q4v&omj;KI~~<;Lr8w1?8OANN>%!MTR{;K6`j zC*FRN*IxLKB$MxBdeO%!=U7U7vXl}p8cm!uYJfL7( z<9dvBm)tPX)YSe*3w(g`ko6BGY!yCuWP|z<*71$zbpWmn&~LQH`okH{t7ua^ z)bShn$%m6R26*o0IeiAMBiQe8Y)BaPGmFcijo8m#!wNu&iPt|5;3ogl=o>!ZVr-U> zSv$ao#RIu~V9FhPki$!kW$^&rEjGJsFMff0$L+V-1NQZ2uaB=mRK#UI5)9h z@U?`UtMP@)jHQ>dw!{=x@Ubj(n}3TBgZ8)A|JJTA*iMg|J-e*2#v1sN64$$|LJfy? zco<`Xbz2(kY{!A3~!_=`TYgRoohZ6M0I=bY^iC~$3q^}g&YA2p>k{u@sGin#sR;?A<%5cB92;rLa4qN?kyn0y9&0sUW!V&i6f*OgE8r!2SAKse7$Bv z!5cQ|o5#@&5llSHM}3SV-I}XtGDMr(0Wxb7nO-#eDd(yF=I21WW{TaE0aR6~-c6WM2` z9oua~QCu2ChnTh z+6q7)GCKZBbC8d*9f2ppej+<#v;zNFc(459`}_-veef3dfd{_2!p19VOwcP^AkIH1Y9BHKCLS`YX=Q5n z5tu0hrCbUvZxa3-;^ZqeWm^Q%hVQ6Cw7j-IGdBK&5$rYw(q`IQ!rP*nhA`S9u29EV zul^Y#@l0b-E*P{$4Hk4kslMyZt{ z@g zP=9%29y+AaR{Y1r8~n|K*Ppu_&s{kNtF??ES0CG*46}tO`I>r_sqgv5`&Cz6g=rUl zS)PdLgEq(8ljmhY`g7W;KoG%wm5$XGx4&U?Z7%l`-9)cCLD9e z0fT1|a{?rr#LvUDLK{{SqqS-ErzY*22Z=pr(Bz|WqCY5+LCL}Jn8w&}OQu3n2$|r; zvwy?UI90{6iN8Mpx4vQ{9;`Tq zM>-y?ad9+c@1I_qEEmL8P8n$KL&)fZ5FglXBciyZ{ewk38~7 zyw!bP*#lE2VhtTkA@ul!fU8OU0h>$u`-3%Rit){Y)t~+E)V-p8snI? zqb0{MS?Et6LsqQq1sVR6U)5YFZ{f13#aha#--(b@B1fvNPR4R-_PLnMXEI0lv7*rv~O|7_|tf*aSt+HQz#DJ&m-Ee zxt&Y6xv>gzgJhXCl9z|usK0fAwWV9B`Z@5bt1&^Z#4-|#9L2nU#4*QVL(g7TyJuaK zNJ)9G&KS^m7M=HgJNq##i-W3`9IH~7S3AsTjO|By*#DhQEtq-R@PQC@@f(PJ$wV7! zb_)5h$E;IG`vjof@W=aKtV!%o4yoI$muYih78n2bN3U-V;Qt(aIdbeV{DR{4vMU}? z9CrBODN+R^UFFR`sV`vau2W7r*&bRQf+?Q1-)?&;8Zg?z)SC^q0sFCQFK8x2vlCpG zHcTCV)MI;EA2goS0&heuP4G{rflm=Q>5}H5QKpw-us)w)Xv`E()sqE9x(=qwwhYF+ zF(+6hTPdjvtx)ayQa=iXS3btS^a;uIuw+8nN&dwmUQg_8zsMLF9^=31Cy&DCh{#jQ zwkuEdr%DV;;g#FP5DQ}rfNOCJJB(ow{uMo|=zA|RxKYu}nvnAG{^eq$*PAhZgZdmsHn|&uWul-=OYeFeg0=<=kfv8kM{%!163krVk!76b|%wib|0=bsg@iyGRJ3i57Wt$ew&oIEA7k%pn+ zD*(1um%Nh7LR^GF^D0I zRAQtJsxxFZjywzpD-QiA>^ip@7K|Lxs(+|bDBl8+Ck^nl|BZ-=v0v6>8&DR^nl`c# z{>jMd0!SNW`KOH#vk=!GET|HsJ#|;S6?*{y3dJKQjpTOycUodC5&{KTd*Vs!cfJO zHF%4iQwZo2eg0^B(i1r^@dM?X-~3kjIHm{KVD6mq_J4SLS^sg5YwZB9cGRbTm>+nc z)5-o#CcPSrDWnbAN@I-c`1=R_{(z6M$8Gvugnt75C;a|U{TXlUt7^AN{a@vOjNc#h z%l?;tHd}NT{WFaO=Ikm>cw#A&QO7kvEn8MSAzPB)AHD_!=32)Cih~b51Yc0h%SN46 zV2p##qe8{bU07VToO|AUtQE7hP4Q%FP&iT=Rt0YqnoI^*tf^X8vFVcYD1U#1F%mrf zARFGqR5(Nbrb4_30pg}V1xi(jY|{Sk59}DmlK=j&z~zd01f{lIwEtXx+ZCird(*^C zvB{B{3OChVBNpvn=bvhYRvA+44rU?jnz(JqS5n-8l@J1uliJ^%5mK+utG;6B%*ls* z$_2|qnPDmb&KKpM2INb=N8%P>iTtzvJjYJ|`(wd9_<{mcA)be?p7z)SQ#|1d3a+_h zHNd7jJSh$n)?iZoC60y%r2D!;@iVu|l1K)~uAD#Q_eWTPvDW^wOrJ;-{JYRCFc{W9 z`bENe{Q1dGF2EE#pSP)scE|cuY(K8$aM_iYl@m@l5o-xuUiQN}6Mu!N`MjDbhDG@A z&Oh?2oMxDq=j&f%N`P@y-B}BV>q;!bf559vwLz$bFA#kff*~L6{FC8E$3^^zvWu~m zM6pB+Xq*_A1PNf8+%*5{{|>?eF-!3KL;552SCGl=IRBlPh5sqPKWZz_#}^cwYMfso z?S=;whm3td!N5^FHk}dJq~9MW-~k1{px~FShhhyY`+@?%+(cAk>W=?vT=g5}_#3B3 zT?#w-2(U^qPX4F={)j1_RIy>LyNcFo`;Nd?XstUI2cvA8UN&S%+UyypvL&_nWJ$~n zbU;P;;Y&}71h0@mKuM}SJb(k}82{b`$%g5&OkJG(U~}Xo*uhgF$RPYd1d5#>iEHke znC_XrYXpj#0#6y5r5(RPN%2?}CsW~+JG5@eWJ(tn-VzLF%zzLRvP9AX8F7JB%Vt10 z|=5%u#}7>vX@FwbM4q-3miEGjNUJPyfofrSLreK8mn2cVQTha zGwNY3>&CUDtDM5iG3Vo#PQw&W8_mU>fqzqjszx%fvs4RuEC!s7$1!+Ex#c-o=74vThiF)Y6_YH_4P5wv>$UH{WI#BF~fV{z+W z1GFrRxVKhx2e4Vuk<*hU2~hC1Ar~>kRi~kBlb|Tql9=guC6;S?Nj&A08J!t50*jW7 zLPYBvd33WXNnd%DGILf}D`^EPg`G6uRs!WAm1=b;zsgOV3!qV#xcLBi?O`|M;>t^U z!5432xGJ<9{@5ijFWJI#B5H6B`euKv3{Ha zDD6bDV6y0sYN%$tog4rGwCu+}+yB4{^U|*1Q{_9~`BqsKQ~9inPyHXl^i6zQe4TaIEr-12tyq+JORQ_ascdTM zjHUue^@HVrCux-2y})kMaYBz9HPtsW!HlH>^6Jz$l`W}jH>Ssl^032_%|GP#hy2za zclQchLgXH@7!_tXR`b#R{*ay8R6{>Rt+3X8~17lfg^V<>DD7`lS0sT+te6R2xP85nH3sOVdF*|(~2?}x`Z=DSDC)^I;S1qDI{&Q)bUJhH(kd@G)#3Uzsjl5B?@R`Nq>JpFO5C7AuC?W?KIl3#4vea zX=EE$m#owM{$MvqUleoSn<((>$~)gIxIkj%0RzK^n&qi~M}V?a%Gu{&4GJuh{X48dvGY!t z;^~M(-3WaR#*DONPWcHZU=50szKH3zmMw>2QS$Ac`OJvbViPa|bxSNp|Fh$P^Y}qn zL)JX>+>mb-O$Y~!+NuB>z78*;dmFX?E7SW?2x%~Po+Qcx5eyPc)oJjZYm!(bTSDax zGoUm~GBDE$uc_%EXQ;L@Kxm4e4pPlRPq|KLCSfmW2*eD#u~TRdfo5PtBv~@tkW=M& zF}9p#Lw^Vj9TM4di>HGGDszRM!XlJcV2Y>H%iOsemY3NY6t&sm*T0ykwHjA7GpYK? zX7=NBlZZkd`!#t??jWiwNXiLWu;AWu98L_U@Bzgr?FOKS5kxj4<;cx$K&=!qC!9Ab8FH4O|EmYJ3oS8j zyFogp?B^V0OnnxY@oF*Py_c0f)bhYf$WjIgz=j@t_XX z{J;dbLypR-OM*yLZH>zuG71!jITNEZULI2>5i&(4mU6?5-(;Jdd&nk20JRXZ7Zf=T8ali~;xp&1JVo{!O{E=VN%xZ~tGBMJ4qF$DG1ersc3H=Y! z!w4er9CZ)Apm^6&M`B@nJfPTP&+>A-@&9;C@ub_oj?&is_*OjTLr&|DsIQ12w3^9c zn)~DY{z#F@uN=uQa_x!|S@bGYT+YH2nD^oZ!(adU*X7D9uf)0qSXdeBC9S>AT3CE~ z)A9r?sJsf+55i1LEf72Pny7ho`^*sh7NVIz>Rd6dig{_Vc(KQV-^gs!JjQ#O{ zf6y-e)Zp|pNah>7CHwsmm8#a7lb}T8eid&JgIlY@J~QN~nmEHeRkstV0CPbGQCY2= z!y}h~KhE!uW?745^9zcd@dd>Zwn%p0QHp5T+N#6}#~)u#I{6f=modW^$;K2<(g`9M z&38~$Ha!1im9~H6VU!WHrgFLxXsoOlu7d#1ca_mv3L{f28=c3Lff^zx5z4DEyaZ-d zszqi(FlVYq5ADEUCo&MdUf*#9WveDNhPY;?ZpdpXE=ID`YQEQ2NqMs&@3OpT|3qXo z=n4Mm#lul&kMmEI^P^j>I1Vcw?a4OPaY%xrxR4Etvg~L@0rA)*n?7b#%24r`(>9+% zBkKAsP#$YSy0%KnojcsR6kDs$xRZ?ZnKRYCn9%;-_Qc6@vnflIDh1CfqWVe7TnOe& z&A){pF(lTPQ5MnnfVgG~neMF&SuCxCNj#l6}V(Qz!icQk$lPTz_tqj#~!lMyUN+8)%+|&DSEhbya zlgUKp`gdfhtd z{Z=tD32Q+b5he5F8}XPk(Ld^hkGztWLn5TXqBM@%criMR)%b3MTk)7Pxzmd-#1|C* zgejh|(AwTF!5S2=K5%6J7uIvv=GHCdt(fxFutGC;>7@F5-%*o@tS64K2b<_|7`OdT z`&HnCpm$nb{QYlI{lhc}Z+~+RxO`=V*SfEr@D@;1&ymz6m15NZF>b#JoqGNaV*wLl zU=!^${r;z#d5eX)0U_(&xluBdm0}00{*m2v4;IPBTT@(=@bDv!z}FNz*;EB%v|*>` z4%|1X!PNd)2Xs`F+CKgk_xPv2pkB7BID%({m-4p7PcBnG{-b#y(Z0-DJcI=x7um}0kmTdp?8cpxNh4aVR z|Gb{uGFiM{IhBm>8;uFJE|GovNAp19R$Yswh}AtvP;*DC&s@2;;6U-1Gd2I#?qQ($ zcMWYH{r>0vdjD<>P8ad}L$z;FxwYX|QF0%f{`!YVChkXw0!2>Gbk=w91;r|}%3+5b zTAqzXvaK%-m*aM>I3~vCE=(22g|^Q>?_A8uzE$}M7Rlx!q|FAA(qhik+Fzs;tSbah zdbtNe?8XA+gm-3-QHeQI@89=-$7DnwcV8p7<6BvG|0*?m?WmQlgHAiWlfxeGZDpUL zv&G&&5kviq2A%w?{iWgdYbhTftK*D|-N%?&q8J!SJ?EQ#-~JIMWYlKzHWS6M3Y|e0 z$dhun6D70b8}Zn_G&8;bDg{b4tNkJ~$uh}cgP4B*qdnH_54`>fhaN~2jc8-YrQ8a^ z9Mc%yb1$ZNIbxA* zqgr;kWz4=j(p#>Fm*pQh55j7Pgmj#&D9_x+(c!Px?0bEce z&JU>t5~Ye{CLVL9N&d%+3)L{ufe2DO<{u$wYel15*dHTS$Ot zElQYZ8MNV1Oy(15f*cJs$EDD*flNe=ya|lwj$BUq?6a{3#c{`$JFrOhuDf9kio*`e zp#Z3wjn;@(m^j7LiJ0PPIV|RVFs>iA-=1Gkcr6WJ8wJ9gsjvU^`=4t}jvzIkxygO4 zI3S1vjr*ro{{1ngD*()KyDf1%D#cc7T>USq{!3x5NA^Cn6<{{kXd;+;l=PH4Mq7P3 zVHJdC=mM@vdcDT!i|4sYvhBIJ$|bEjtQx)A4{^p`BY?R1N5R+;os4w(1P{PaFH1$U zQz7Ni9E27ZGCcUrKTSxdbD0X`1`@q$D2~@(^7bEZhk=auCA{(!GIhKG&_Zc}Dncnk zvsjCpYSz!RV-1gu;TsE-ur0bQ_WC==k<|i@CH41CH6c4$yQh1EN`6pM9O6o|wSTHV zg#b>jzqI>&XxpXaDvfC!j{kZ$QX}PZu0e5{e?hSyA5d5o##?vGE*p>N>#s}-&{Y`|pVzF^EAv?W(E;(X| zQ<*svYa|}13H4vR{%>|@xs@8#xS2<^r^WLRUF(Q6e#V#^!-5lWm4{~#II-e&u%-A} z4saZQOtuBQAOC~>!;t-E=)tAx_iVPHz7ic9>ADmAetE6>QrSPgsM3Ur-!z#NqgY zV&^i;)}R>I|INmz|0+W^x&BQ*!UNHBJ-)^rCq+AfJv{%LCivt1kF(>x#!JX;QK*L$ z?C|&>qhq3f_cdX>Ti$SD;v^*`eH!+w9rs%(1{ZBOj^MBDj(v5r5mwaDK;vdA{dz7#>`d2HN{ z?tfJVcx!*~I{y=F0rp-@&vf?LXP1?*2E`$mLwbiDut>Ib-UJ=%fA>iQ*Rpxa9%is%&&cEY_+X)^3Yi%;bwEA=Tb1Y7(wgb?xrV7f2o^6J1qdc_j z0&eYvpet#I48>MmM4>4P(4UV5sQ~Uc)LL06>DTaZC>Qrdu zip6uUf2CXNS+_O+kmt9)dlOOO6i>%ts&RfnvCFQzmc#LY!VEF)yrYK(*{Z+qe_zA{ zij!=S?3tJ%@^CDYjRzE_OQkXGm$-cm)f%b)`wCeU&A;%*wy=A6ww`H&T4X)f?KG?7 zq`CeL>48Cp>ROFWb-X^Y_60>JjzSQCWjYs~FRA)TIUQIMpcc}uW~L)p`l%E5wZ^SB zrpaVpewBv`0L=%v(r`qF4^a?dW-Jl5(Ft(E{jsx0rA3(P#8C*tWAp=LA*m$4$|GI@ zf{_W`{HQ#Qc?%l6+HfN_lL?s@MtML42u4qE_;R?$fTegbF4|>9=FKmdR?hkYNTKFp zD>rr&f*5GQK190kpisM?`1oCB(2%FmwnUlX%$fo1(xMiv^DC6M-g)A- zhh6aL`#(=c)xYgK)jzoAvpVANB#ud@Rp|VK+4T4gJahyf=||GAsS*S`i=Id5@d)g+Q0fV|J)D(UM0=; z*!mMlfm+LSypCp@c58bxGJh;WJbNhBzdDNcN13WDTcTa#8P`};F|i3M5KJ3al-2sP z|7U=<`k<{?rl#R$Fb$QR2$;Ev;M2l}92?|<51SMjC&&2_wvZA)PF6?BbL0+>waqVpvUzhFdt=s^I|V^R}? zn)v&pp@t?Tna+PdKXq7H?UZVv zm#HMb%0p9tVDv;Mjsgj!+HfN_qt>P-c55B@UZ~nPu>#d(sKKzl7l;xLOuGXZU z1!OGMFYHVpbpDXv{`?1}usPm9Cyqi8y;5b%7lqDh#}SiJU=11oa>l04lNGuik%OuW zfOe*;D1F7GJe&y-jGmC#ausm(G==lOs?clnC8@~o$i0g=lnXgt-?#w@q6c>+V9*w9 z4(kMDe*R^i^hq*Js7ykpw$5CQFgG(CPeS75c>L~w2{0br@fY@G41|tP?sZtMu_P7w zRUU}2SvgU5B8n78!3jQ~cvpQuvB#eHf?|JsL4hfra*~Sq%_h{|1`w)hMsZND#Glw< zgSL^1*BkcuJ23Zy06FMHcj71n5y+m?N=9L0IqJN~_796rG(3&KfVG=h8z9FQg_IrT zhNck7FzS!SY1Gh28wiG(Zo?k5ppn`Bhy9@Y{ZX$e1RL6tYvV^Dh#`EW>`2_lL)+O; zbTF){s_!C9bz&Bn7W4NFPA4bFoYa@gZQc)hjIR26YbRA>D zN#F2M2*TLsV3F)&kHaF_x8VT=rg%CWi)1SaIsE+*$1ehq_<-WXlTRwkU>z2If%(ks zw>Q4@`$FO-(lCRK#H^2XJn|(y{<2w2M>cT&>wfrj``U!t}?# zktv>Bx3f^~k%wfW#T@xfp;6H9f;EY6ss|LJYnz^GrB+CKgBo{|kjby|ilR@|hAc1l z+>STaKlK!IuPcjT8{$dz4~*(s;~}57dMwh;|72g(@zgLzVKd1;RJc&%3)S{1tbcdA{X^+&tVGz-{Fsy_{MzN*E{o4)>zR3IJ$ zOI{a~VzapZWzF+iLjCjGaKgyvXifx^_v#gx;^}lu@x%ud`_=~(CYJd?wwRkXc#GNo zW-}RPTQCORNR6)-8Yn17)nLF~aPI=VX~spePbteUyIfg*`Q^*f_(aZb3PWx;R~VDd z%=7_Y+#BsAOH;MMZ6tu6s!zu4R z(Z(`=6f;?U7%v_oFwq|MJ$^7?!`}LGZEssXaInhqcDL}bp*pxJ=)yYt7ATshoPXqb zf-WNf({ihH1+tE?&6}EDALJC7>tA7lwc0JwCw+Ec(9G>5m&W* z>xs#04n0=@2mWi@ss3h__Na}5wYzCHo>kAcE~my`qT3kvG>iGFFjrN%J|o7x-=k|;-epSc}9z^``dcu6XYAgdlwq-?{g zPSBEnBBW`0D6zdbp1e^e7#Ly8O6!0Bz2y8Rb;VJx+M*(t(7&{4ofcH7#i>8xOzzo6 zFA5l&98WIlw>88`OcP_1`+un7<;G?FT-ect3oVUz3Mq^>o6{Za2||K3Y& z7%+zxTiH~JfEl&mcq>V#0$Tkcvx71P4d*bGbAO0e(I_sie`$J~)RsXdx`&?=W2k{A zx!GHU|6%)6$J11KAJ&gJ>#VcNiYu*B4#64}^DxEJ38JUscfY;NP!(I&L!2$578YCN~K- zi`)NHq7IYBpIvWZ{J9t3I%3jRnA#60>VDxkjd6B6-n8vMv`9@RZ~ON9!!3^62RmP` z+}CxC|E%M5RkX}UG)a?1|7A?0vn_EmPwn5!H1bBhOG3Q+M~;fC2W3Zj_Ejss$ofln zeBNTR5di0H{7KgK(at{%F2Ew$?>zEdwrRY#zw~_{rii{?X|5;P$VeZ=G8imbx|1Kwv zzr6CpLAOxt4C+t(^htf|d2P(vBGjyQtC5OAY1l)H=fAf`>)!w5v*a&JX6q)1jqCST zI^LtGzr2|pY$mn8M@V$%(MlFO+vJbnfBN4aI%jFjdMt`*N&Tz+RkvC-Yj4?f58efR z%^QyoC_Z}Zakj?VF1zsyiuQoQQP^ffiK!#N@pwRS@`)$eLh*+mibb-w-!4!hCi(YR ztCN0V>+K%*85i*Y42)M*U7PvgX1MfoKk`Z zS&5qELx;vHMRYtkZpo+%t)#G$$Vez#t5qlD1sQ~pap=hYdEsUIk7o^QeAz&5Q;q&8AT>1q|zK&~_bAHQYenR<&s|NB52r4}x5>o~spC{bfAkdadyY=SmBCT*Zmk zBUtL{-@_bC4F0q`8QtMfu(BF434;v6Z06Fm{u6NIq?2SGs2Iv4@JvkWpLhTGyH&fh z+tqeFS5nXEo2ib?2&MAtQ6<Sly!zkny zATyYVeKVqNhuR2-E^FdACBhO7fYt*Fa;NNuH*YSR1Wn|(kdcz--7Cr)9wh1`2OS35Of3is(o>Y*-nBU~4 zM>U?;nX1&ZNNIqH@q%Lyfj}`1wUGJe!yM!F{;Aqxa9?J_;-dtFRZJ+w;JI)P4qqL>v)F0JAHqM-LYwyFX>^ z0yHpe2impvXJI7KZ|;BUvELXs+K*0{*ss+V`%@2j%sTOeX#CMH=!K6kCqnICgEoWO zP?KkEU_e^4gErsvXXA+&%7Cjok@XkxKr(uZCm#5K4?{8jEI#;V(^-2mH)UGIUu}P~ z*jVI&Ae2(Fn0r(pSOU|=D@tBJSb_glS6-z&|M|}=d+xbsjh#5uEp;6&m;tjoq)j+s zqe!y4wUouixXew3S^OTV*Nc@Ho08qJazqP=9cv)B-lhtZHCQv8E%pA*fa{EG5w)IZ zj>s}Y){#g5iRZZ7%)Nie8LPq7D6&l^Q>%xUhuh>U$RG^dEOGq69!-L2mTl7z&3bR6 z@rP8y*?w;0d11Nh?t98EJMUcX$2`i1zwK>hXDpJs_(vb+ej2+Jc^ggufgMg0*?6RwYAqMh|Bm&oY#=vsTpRJ+;0uc7mR+_6$}y4sqiDk<@p{DA{@2;cT!*T-!7y>Zp^T}H&431B(p=K8G9ET< zSY;H|ygcNZ!ETezE)yS)dUsW=T1yGcrJWw{ z3|Q186QaP90~lC7V$Fi-!BBTaxtHjtpO? zu~V*o!-x4Yue_82Tg-&-mhe+1y;cJXR-3o8>Gy4mDSH^dmf~cH9($h)x~IgF2H|7B zH7W_h!XnT^E-tb^T+A7NfF)Ynh?WubsJziM)>?qwRbm@mR~z=>ti)#LUm;(_1lLgw z4j6}waDG3Ioj{`%G0`GZJ%{TW*!ERctOk^DR#KjU*{4oR+V%yu6i1G&wN1ros!toJ zaKv2bwi4^kABms*Y4<{%=5ZbAS|PvOV3* zIksXb7a%>|v@bw_KbVKd6nN_^UL;I{&73hpN*I-PSs)T9jf+OtRi!H1FZF~GFGYrT zI(N8=g}PjFrcql4zo7C(_qZ!qb`^~MM(^WC==*MBTX>JwS1%sKdU2G|Wc-s%7Qbv2 z&W4Kr$rdw{zf-%`FyTq;Alk4lwO-lVII4qyYWnxSZ*N_khmTi5uhwM=ci(v~*M8KA zR19?6&Nt`*a$osf--C3Sc|u|T|fR-R$&mMX} z=`l0akdxo`$0xEg^J5a``=ihFt4%+~Y1Jw4MCm5EN9%EX*Zw|wpf8)#d8t6Gv_)Zb z9L1-V|ClDo>VBtznEKfbot6k~%CgROHBA7o$a#j{Pz5v9E7Qk{yj$Prd^@^0?zvO%HT_JuOq#s4`@@L$kr$|IZHphJTNDVbxQ zG)2t;+|vAlEz`oaG}&2?gNSr);>vu8Wf^Qf^APpUl43{anKo|qI}&si4(jk@oqJ`# z3ttZA@5F?U5`T7Xwh8%%fWLqZtx}s(Jh35IzqIkj-6602k%AoBBPkmnbCFxqheD-{?q2VX+28svY z?^XH@%;bNebxk35K=%5JGkXLZ>5YzGaPbZ!?>-ZLz50}QWMv;9?M}5^bc2ll7CJaA zh!&scec)Z~WxGt(crUy3v9_0Reaj%|I9<&E&jE+)-_w)W6*KVVb#^kyu;$-a%^+i= zfc#dudE6mLl<<>o)Q9KALUU7?7L38+M6RoT!P_vCRWycl`10yj9Zv(UON$h_kaRmO zrr~6bn(DTP(tI>nQq*XOLEYsO$6a+T@>M|5CSWJX4oMOS0|lnLISrPc`o}v<^67EV zk~tSR2oJE4lDpEem1157Dw^s|>n=`k7>QMH_ns5Z6XQyPFPUl$<80_+nZsQ3=dEA*81UI6f;NC$Fi!N1X*oCd_nYFKIq&dTthSU^ z#RkiY;xQ64Stxh1g*DG*i`1mtQ})*h4j(&X@?l}^(tYZB0w+fQeACGmkY*lffO98j zk2~Uu(?7451~dV?aFcTC;ZXU4+kreKjCp;RZn>p%N4e?uCjaFJ@*J_5*YPzcN>XhC zg=A#OW+pee%TpDQ5|EGq{_*BivjQ{q^eTvcbkEID)|AMdU}KclVm8X?ux{_SG}+G?7y- zF26fDwA3WZWG$OQ@H*x01}1lq8F1Z8#WgIUqu-B#HO0{$h#)ok@y`gEg8u3sOy^H* z3K#?LlQ(GJr8jKDS&FJHyLVe{(S02W`K>EdhdwFL+U$+aTusy3=iGFGWkYyx^6XnVXy*l3g1{WoaR+;G@Ze-z5D^R9Uf=a6_ z2w1GFym&g#!SSIv`rcK~Nt@jYWB?xD{NeZHZQ6sP@D^s3jDhvu%AE2cgchBan9j3E zC7L?yM6YB6>B7O)bcE5!n#dQA{^JiB405~Rl7v6M(##Chf8mw3XZP?D~d-luQv)FwAurJH^FjA$kzsGyr3k z0|q^#J2h{3=AVIcOG6MES0N@&y)J2?ZmAD58LtNkQX*R0Z}C!pKuP-lu>F1^-Hug5 zm3CgmN*#nYqz0XF09k;nQ!dXGGxDi7#@Kr&vVCg8MX6gg@NHC{w(5R-BGB8HAd*mA z#6FG7@BWX?KOMPy-@`&O^zS`_8MIR;q^(H~#FmXoYT zs-mWAofRI6h)qz(`XBX-2duU|eWmxx>u^GI8-rN$Vf4>><8z^E;p;=iRC^!z@@^rs zujQim*<7n>VoPl!9zhtzru}z^O@yjIjnf3xz|BIYZLsE95q?n zzG)tO8&|ldp1X0?(JLM=Sjh8D!N_Y#Z|Y*Lc`DV7?if{bwPo7WKl_hM`5VLs$aU|Y z`Wi`AFBFhe)b+rRClsGClqM0_f15Q{6ZMr}C^Ua8Bb(1YBZ?}x?Cod;syFj?lr5-N zq|WihVADZvwih6-q5ujd5(hf6&pSxOeCn1uZR2 zb)DClP)gZ{@dos+op7#|A^!Z7IQk6+%A~k985OoGJpKJ~YrOiBWOo}0x0KxDTT0WN z{)&6AJPWAs)pNxnX;+?5QYm+!odHS8zxlbXLI9ffZFoU7PUcP!vVC9hobhdDfC#@? zpCxJ^bNqOicCF<{swW?|h1M#3Z0W7wQ>UG3?*6Ia=EWcpz8fp%p?dRc(#d=Lk6~m> zF9{z$=Cvfr(${HmK!@Ih#7h2F&gxAvFKklv9P9SLVaYlO6b@Gy@*dWGfGx8=3Susd zcve%5&SKmP2->-XQRsS82(@LDISjcnNIY7(!%S+hOV?ErQa*yH0*0QFLJ;=mRyFqV zsIxr`at!x*P(ZEgJ1~iOTJfajW%Tj`KZ{AFO!fvY=54bXDc9h&A;O7ZKO+EmY$a-8 zVaC6V>9)CM?4jJL9t?S@cZ604UXodGVegBE2Ms<-W{eOSHh-=S=ZZ7nJl`|rv!BWP zkM(clmK2)`>9n~JT%)etjy-eQebq+xQ`MXXCl%~yOFQQEd?~C6W1$P1^cM#9-jDmF z39Oj1Iq_RJwJr-vC@>t>q8IBzCBDSeEZAfjX4$+f9q=v_5(-!+X#L~GEQ>xmR+Atue@mtQr<1V$BX zZXaAgL8d}Q#0dl+`CP4~tarZ6hTgT-2X_XZi|w$8?dACG1X*OH1#Ia%q><&b5}7^} z9=^;5wbgXb*i2dgFUsvoF@-)WO=Qr)s*y3KN}Z`elV;Ui0;)Caj(k+Q;uLW#1;Q}i z*cj~paFN6uHm!|0xUj;K5oK8H(5T1OXS_Hn;}WI~KyGKso!|x)h3I9-b)Dzg(mWIV zP)<=f=yVZw-MD2k=KB=(3yqb{)qC8^;qingXreK444ZpgM+k{GI+9XLLKf3VCLafJ z>knKEn7te;%lKQrCMj*ea3L6ERoterBFy~O33krHelp|NU(u=*-gTTS?@@f);el{= zUrc(8l2l}NeiQ0PEtM#Wc91U*MVbF`f(!Ne1BG6Lp~wHAuW$Fq_pXvDkBW`nqQp1N zD&??eCjp>b=71acg8*)UxFDJD0?aQ%w#WH5A)&hBR^%6es<-8^DQv*QP}68m zwGGd<-qb=5`ge*a+#a3@0XKb;f!M*Y^3!;(LTz)e{_06K^INnI*H+JfKCtc|pn!v`1c+dZ zjt~g$tRn#Oix4e(o5tkgJnwxTj%BO2UAz>3=^r|L_JLqoj7sPn&8#)Rs)pMRe{-Azs9_~yDU*BC7e^~tik=!oAn)MmEAQ4)*a!=$M!#L>t)VIvGD0IEN_ zhYPA)YzBM9D}=eO=7NJ{KZYK@Yyhb=deapbp>Kny(|kZ}!ObtFO|PF#(g^hX$?Edg z6b%6UZU%N5w+|FB%>11S+X1E$tiFzhhEM`>HK<-!x&wouxi{alnApTt*d*Sv6r`jR z`WSrY#3Jw?E&(t8F06+Jwri7NA*rxF)o$D*oZ$jopirV&D6IHY2^Lc!n+dq!f#o;l ziYaIy4`65*jF8U7>CRig)or#v%(Zs(5w@qxIJB=d{4C`yXs+hll+U-E+e@$fBIR)DhQc~BnE*pD0Ti!n%ut4d2wZB^cZwC60UP^Xf zX9LV(zctP}IG29W+=>h;@XeUuuMQ0wPMHRz$@c*bz3=OId~1)#Tzx}_1gtlb;+qFo~Vto;eXiL2VT z*SDmjrm(7Z)tvoG*I$C0q;?0?uKwviFxFTD3fha`%Q7De)F(`SDGf-+X+Jh-I`o<; zP}B?1xL^O}MqOfnyW%hys$m}L_y4~(PD0YB&bIi0>4t0LEa~DExCjgc11@(qXcUVH zt?QWcSNwamNt`|FKkU<(*c};IW1?x?^4niUzJ!KqwxoyOdEBov)6iY(F*YIIZ?==- z6+i5J0!R$l;j67HLc78rnb|^t*45Ccm2eiVOXwhvfvmNSi+g~`(3?FJ-w>(%nJ9v7t0}rYt2kS zi~x+6^@;s$2HE3;>;Z&aX-K1ykkf#V_ThWW|J#qFpf5prfMPpW{vs$P^ue0vBX#VB z<}Ouz^;QIxvDiUvB{EFL1NM6`_pZWbhTuN}?sIp-sYazZ1Aw+vfir#bMxSAHaWZN= z4eu=6H(~0chgH?}FxIngwLwRaL13WOn$?)!(^F3Z_S;PmiecaDOnb{q21RPl#C^{r zj7?mtJn$xrY8dmDE8PHiCJjTjQ^8xb%kWB)TunfSwhj87^_;E~k?|ji$FBOX%2X%y zwLak^ew3E*W*d9|hi)|{hQlzrmYAB^8#y`}B^pI1G&*fMs6e3k^K$UcGyH=u-GF6v^Th#%e{L?h%hsr#uy)d-hVhcnSctRO;fhzQM z|6}_YWp_6P&s@WdsN>W;=4L0a*O{iv@HnoafEWfmbgCOq)D{yJ!=U&u3_N{TY< zoy1ld6{Co#+NSr$<=`gbWDsSdKo-xpNYLj$E35gp42(I;o|k)utt3dz6_#$tL)bft z`mg$j-2{rNzb#Jcx}S{Iq`_AjQee-u-huS0xA$&1*6mx~?l2vW#(?*ympj*1{bZd- zzm3P=jag%OPv<|A=|BEX+alSCr{usRekA?l6V)c!auF5Cj2w`;A1lOBj$_vGrI_sI zC6zcU;5S7?u=V}V-yGjU;%H;)PA0ATSvgSCTXLjyprt^_sF}BsX8Uy?s3Xjzb?~YL zfWN)8zS5EbjR#+ZSrcGz=UqRAV#kISXJzYbkyfN$Y-jx?jvtsY^Vy&X;}nXDVNBMo zEBc%CS1Pl3k}$PA2dW9H;i!a~uq;6n-l9BEe4Oz*FIv+01=i0eSSEUKS!3xvSYR3T zX&i7mQ%^lR(%Eg~uL+j*6g2Nw`sCt0Qqwdw^nMX3Pt!Z&D8?xLXn($HK;lKx$ws`5x z0Bw3)*pz~uVZwa}C-rq9iC7D;&@1?&*ADR~D*`m+l1KIaW38qWq1;(nb9w8yeMtmF z2d^5o+t(V`sUFt}^RlV!LL+TxE&NaASGu2P8JRL-YU-%4nQ~SF+H<~gtu17Cm*gUM@lK@foBdj) zQ6SmZ%Hul&iIP988qx2VshFNAVH{B}I6<2f+dmV;;KW>8BG@zuIjn1z_FQ`@7?}mP z=e#5!zU>nKOgsqWQ>t`YM&-U-DAm+8srVziaTV52w7>YM#zN5yX@6vZSd>{D5 z>M)seN)KvO1Wn!hZu@PS(mgER>7}u@76S2%O6VtIwEm=BBKQ0^tn!t1tyC+$hVl$K zO`jb*za;F-$FFCiqV@RvwJTR@{QLs{c#LZMvgPy>qrN3^ zi=q&0G*dz7o0777qqTJ>;@l>tI;-Zs&Ua-6{ATOwFHrr6R;t>#b?B<(v+Nyl1k@U= znhVBz#XiJSMZJU8K3V9!X0ETOKvk!JEs)ldTKE1~2wMTv`df7uc#Zx|1hgLd6@Fb7 zF{7nb%GF|V*b_ z{QLWBAo8$$$mWG>U(0a>P7fSEYn~v`SkjBWGmSbGprK-c? zcWW}=sZPgyBq-r|$5gh%V`opNjhZ*HZ%_pSX4s7|$Cu8|Ynux;P}oGPt)zQjY7RDo zgoz7WUUE`dfjqgZOFQbhcyu648d81zG<+33lP}txAXJP^X>OjFzB{>$&2xLltM3X! z@t~~q`0=Uv=0%E*`u%2v>T*v!!XsG)%jX`2;Q0V*fCG^(W;Nkv!H5{fdsOFkgDyB7uAQ>Me6-JKloC$q)#XHtt5l`F-Y88TmixVf}9=AYhR1 zau|y5qlFQM9lI>NubgL_83@RDeC6VL4KZ521E61bew&dXf^XUA4!Ay#V@$H2=>hzN z@Gk{D+$vt3+821yDF*Ht?}K0=a!%gL-(xA)XRy8Yn=v$Hgp_g1Ss3d2uop#%l}~0l_d*zG-x&5jiKI|UBTX$wd_p}} zlp-kpCh@pEUs1ERk2I-_-5i}KTMDsTlIg%iRSLVhwvTm`p{?KNi=UAHpD#u@4Lsh6 z`f@-k!37}#e>;Q09LH_~hutmC%rW;Y^f{j_&Y-B)2EOk*ROR5lFFf8KA?)BVi!5c`u{NrX`Uao4A7g5&`1`m82wI2vD8Ss(tmGcE`tG-$Bd$o2V zLhjXTs;RUtWJz#Z_i;gh+0@Z*w=#Xr5>=#88bn(~VORUt`mXh=tZ3uEoWt5>;sq&U ztUoS1)yJ^jJpm+Nc3}iZMwEE^ab`>pOiybSc1^R8S+3^e{^w(a>=$dmXS<6zQ|vK+ z_r~G76Q9rd+=)=Wxw_!2m$2LjeuJSpiYuhUvMF8o!VJ_{!4vy@&jn=`iWV{CprPBH zB%kKbGbw?!EqAdlHsrMv2dt+->dtPVp{Z76q05Iac$-)oF1cUYz)^$BjPJOe$9pD# z<*YT27RC@(WDXaUMBgp~Mezx&STOdm#u<8v?>G<)`5~3Xz78P9!-vsAMYkZ|%FI&> zPnZ*e+Q0-%3X_+PeUOwlI(I#)MnzSq8R#>z{7i;)XByoAt+k%9)c_*kp-qOUa zl$+jZg&R1SDj=iDOEM4=bhty&FRPs!Z4x*}Bq`CJG7Wubz^E@auLhv$Tz>p2^jN6k zy(VZ{&3`pB1)4PpGZ#^3Bod)amZlH^l%Ovdtg8CI0739SlxumegQ4H6$u? zPRSyII;kO|Aoc@ch>NZ=d}VHWO5Qsmhri5kH^J|#-?u5@;mb(gEA`Nm;!�li7*W zbFKB^Bf8#&eDq2W<=rCGUJKqeL;{Tn1u~qET%e6DT&ivt1t=fZixZG)@K7w9wiG_d zDyPo^CCSp1ML*!UGn((>$?fS6TBpVAvtH)5#Gv^}#`D6dZ)UNF*s&8fhQCz@v8v8S zB@@gIqbsPF#gEWt%uCAqu01h2)YSFsbTEm;wekY``!b}q4etKIhL_**w=ysLzUC@S zsRJ+{tYoETLW9kCxxlDqdZ)kl*$9D?He(%9-BjPzBFUOX_{pJr@`h-`KFBcLL1BnD zN1ojTd^}s1Owhnzr=b4D@clAv#_!;n!v8vuF!7$)C<__VutkLj1Dk#E$7N)O8G%Ln z-Og=kYe`Odi&SzYGxMYS(VG1vk9ww!R+-hosE4QKS4d~-K5m}K+gtPO9y`cv6V*^} zRPPP1G1OyDm1__AmCNwZ_g8g{tGF&7I{b`W6xK-6S<IxhgZHzw3JZ0>JtPsNrs|8L6K1LX&H&r#ti~&tO!gqjl*IvYrC%MX1@r3O zz{HO(uQN-WZrntIUvLD)uKc&4>#aGfq0P)yWp1i4lsC98$r)Ui)dorL&_Fgt`uQ>0 zQiTM&UH{dF;u|2(jDYj{^ipK1Yn~guD<;;qW)}DMmiw`pOKxquw^^5`2DrZRrk#hJ z4MXbnukK%Gq*w@jc(gV!s0 zv&??^BovVcL;NxV+~?4<_*ZdK@x8b$KaiIuMRCPYN^oT=XMSraX@z5AULZK50>154 zSVwm^t)w>*|3$0Gvw-a@4)}=93ZFZL2dxT&o<|TyXrEyMXvmajXzVP3_;~o(&)3&y z%XOEDL+)5)&OORTpwiT9Y^k??GaqomoY+`?z?GlNbq~yO7x##7Z_N@+go5n?? zW5wP5cfUt-o#N?aNC#pLXIu!>cmp5i$G#|XSvQ>9kZe|>T9Ar4wbxd!G|-TXEsx!E zlZ{36983CM2R+j<;tGcx@0wiu|U6HuVJ@C;LCJxe+2o!&Wo8XP)()mdZsf zA7E)>UYfo7&8!)OLiGoE;ZdyVi$Op~t$m68 zAOUyl$9qe!*uRTOY{)*C=B{D+=aW1{fUjiqEY`YQ2KJCoRu+bL>(6-*m(FGduD;N63-L!GDmPRgWJ8}8L_K>=XGInJ0w?w z0JG?4E4g;wKC@ZiOHTka|M|9e2Yqace-z;gEs+i4_jlIhr7npKl+TX8i;bv3d{3rZ zA8Z!5LpmOc^c2w%w(oXPWJrZaM9y!{RJU+Eh;sGz7Jlvfz>_fq$`KPW7#SYMx;gv= z43;zaR{eWx=Wi(`EF}-M@I0%vzkJzG2hsk0eoW z-v6rBUC1zU4ssNk_OpsQp3bTpOJM_-Y07SwxovybA;0V5 zSl|bw{-RnQL=^>@gXvi9NrM0K-OQ$!jxL+{P_!&w9!4uYDQ}Q_3o|l9Sd@)A-1P-985c13nWz*~r0ko5J!ScJo#F)r%`+;`bUC$n?~Rh{=*$IHm_ z%eck#)%bD!Vpy}kdi*mUAj-6;%(K8Mdz7{ZV}9E2Bec_EPxUPpDTJTr)U?78@Kd!R z!Pw4QZm3yyU+F4D?pex;)#H{(nkZZ7w-J7&$)FJL_uL!jfN7JWcQd>YUJX&{kyCgH zab@G00>cg>8u>xlE}t^iD%Mv2R2CR~z<5F+vhbzPXq3LX>nCw1% z4Ud5p%YECg+?KJL;DU@mMupd!zDZ5m7!Dr&zeZG^c<|WS0zzQ7MKroiiGGs2Wt2wb zg-I@E=*h`M?S(PBlazMxPm4}oM3uzE|FxNl9^;HQ%wR~JG;#bsG=aRQOn&>RnURTk z8dtM1F`a9bIcR>AUWv1$T*{hCaL}pdtkG&F?59*Y1KeaiO8A`uru+75l&e`!T7fF9 z8|zl&ck5GBjgTrtT5pV18f**JH>IS&9Eg*ETzQoT+WV0OJa*|=nc@Bz1*e9di>HKw zU(MvY{6q_m9VtP5fcM+$D@m;?X_?6x(z{xc)_8d#wDrSEbWCS-Cs^gw-sLW`4xb_- zt5~A`Ylg9(?K68%CY@~7d;8>CafRD{<+TO2^SL|K9#k!oFdXh-zvJ?|&ykF496c)I zd7tbkSS@5}eR@7HOxhdq{tEOJ3`N0WA4tM$2+KOrHbH5^DraIt)q)FqcXI2P#=Gb8 zXy2!0zeiU2Pj3U@U=dK&GRk(|{uHzF388Oa;wFcYtm>yJ%W%{7H2$4JzTSFN7Dx58 zV&#{eaoA(1skr{92P@V50<6WwcZb~PbDj*_2+ZyOO^3tljOmIWkus?Xh#eU42>BaV zNmLB?MAJS*){f7?Zdt!k(Kg?u5&~1H-Lmx=IG3}?w8>pI za&&G76$DI94s?Dy>e7?~A+Q|+eJunP6yLlNqkln)P$EX1NqFyJ_dLZNq zCupB#GUy_1cB?W=Yj=1gp?xzv#TYB>0)2}P`+3Y=J;~Z@$KPe+vTdlU7hoy-`DfaQ zY}wwfepG+uUZ0CsO~#w|G_em99=jt+@}-|*{z+-2^WI&l(!BV``b7Ssbrx%Q`AzO%hr?C1E$WDLR95CR~BiHh-0%VtIa}#JYf!CQ91y8Xl(?}e~ z1Ya10B!;$}#D8BG5_HtKLkBj^ml#4oG{~4__`294hM}VF1KV^I1?=C#@fr@vc$u3@ zOxaY9##5X>tp;JQo%WmY?Pz92qsCy*Q*BOrUHSK@odpW)t6q@%P4t4VHWDvw1JcfC zh{xA@p9*f=8|ltYY<%#LFoK7!l0;k2yS#)9SHOvSxfVfS0dHszmm)|0`yCQbq{+O# ze`jpW&&WJ*k*F9LM>Sys3|ITFSa=MZ1VhSA8e`sRHj4JMkdfY)fn?<5D0$+)>v)PZ z?&+L9JPDCf@vfGo9Nw7j&(~FkYyQw6>~VMUyj5%PN{!LtQoR9oqLiZ*14rVaQaMRc zN|o*WFk2uAhXetK$W1~gc{ddja2hohIAwHW`bbLp^~ZjG=5lS2oAM$BzGq4=*&Ttm z)*l>_L(H7_tOKagHK!BOreD5)_hE?Oe@9pLwuPQoHTHi?2C3!7G*D_#sQe+JzK4ZD zhZ9k6IZ(~_C(#D5K_{KNp^kW zw6DK%JcyYy3U>d`KAm``{W!O#D{$n@V-!)f1?30id^wR)0v=AT%*VQW1oXfA47vwB zK!5N@OT*q-M@>fhfbv|_ZCG?E`sSR2nuBEkmpD;5{C-vh`?}M4v-jm`3Ja}xFVcJb zr_zuq9N;h=n(=%Ehur3oFWTmFZv6=QEwmNBMW8u#E2T*3mv*7`@*y~qayaDCKzQMY zB!S>Y%}*1`orqgQ)0+wV!p_Gzrjy8lKhE4Nvbu{62gR6)i79jiE+e>_Zllv%AH)Uy zcl#@Bd99E!sOvkQjCo%rKQIYu{nS3_-PHDV{q`eQj7f69@+seE>x!`ysznrsA{Ys* zS<9+9rk@ZraXp|sgZ}8D3$T*)!F)0IeLa-9dJ?;Kd)zaDiP(XJ);t%r+?4Qrf;Z8T zcMAcx5$Dl;a%>M2C&&)`NUNPDyWhwIS{FIHJ2`~vS?q>KunLO`8#x#k*LJF6%>439 z0fGJRe4t?L4(E9bk<+wPcc;a+;(YFd%7M9>|M^>f8$7mCyRz_F5@{- zhLgPV*kyea+9BqIpkR&MS}iq{4tyEi4>7u0t0;8$dG1A*q%|&~cnYEC8e%9K`>3IK zp_X^dIZ!qR(@age-X#?Fj}_yPlltxy*U+67M~>7hjFO7sHWyE2d)eNBXN)#n86K%e^Fd;g8t9vUJIV$}gJ#c(`%YN~6Hnex6{kG#gMlqZ+(61@ zwZC4mJErPwyh&bvY_t^*^PR5iWD89tS}U2{5GnVh0SbC@*{8;c2c0ra)k4muqY)VTPZ$kDHGhUgQ~ zi2k8muWd1k=htd(-NG+1(KlC`Vr>}@b5m4XDn5o=J2pZc{we`p4>SFyj~D}3R8e2f zFQfix!8bNoe`-9m%V5q<(`NjZBZa5`D``Yob>2-UI|L{RHHOIC!<45|Let_A$^>an z`LVVCGB4QhAexiG)x0NF3Hm}GUS-~>tf0pdrfo2q>Prx9r9&FQP&ZCY7?$^0@6lkb zqOW|F>xV#U3$TU%XA3LN5OZdOLw_Vgm%4XYmJGcYs!|T!b<^-tQ6xi2c^A~=Nfk{X zTU~luwW)B2-0)EMl{J;a3a^WIWDY4jom{>VQ-`6ocjYg!?((BrVIMpf-lXg- zFdam7#OXfJOUEAa@-gGq6aV)KWajLQegKvSYK)r2gMYkAJZ~;y`&x&LRjFiYjb^Q; zd|?q8PG^Z6k^W@e4Xn4Q?*lw88Au?7w9+?qG4p+0pbs45dM2iSY~S(D9S;`RXG{8S zwbOQ)3^_=WBs3ylIjHYEA_e?OzI^*OD{<~#CanUx5sCuo*J3>>Qh23B$^A_$Kd%SQ z4DmDruL}ae`#VGd7u2`d;x9#G(p^%}o_lZ>kBv^3q2SB>;j_cSA1c5gDy7t1gy31V zHYFR11bLi)&4-8kV-B2dx4)P5-B-rIoj%aN&+56aOi1E1ejo_w8k`R8Tc8Sl*@LXv z7h?Xi0S(h*totE|Y&aRNyAw@957g^AC&J%u4--x~6;@RPs(jp`9x^*Qs* zyIyatj{2rOvC)0qcaS6n@dZrJU%olk_h#Hxx2B+r1WQLIXT(-ju|Nd)K~1C7TN$;x8HM#dS%}w!7|ZA1Ial0BAM9k4ZhP9?UJx?|6fpgCTFP zG%RxwddbYGa>>Vxl!|h%^5|>JJ;1;Fs#+_*gMh)<0cuZhcm>t2V}a+5I?FZ@O#mqP zxx6-km}jss#CzC3X#aK$+~aru5!w4KdIq*vxZ}uazbOp`_xgFK$xjsu z(+~d#i+rPSG6)Y_iww)^SjV&K&$C)j6^a{}x<|N%_E7ll!Udnq700NDl+l-tLdJhs?%T&aujUR^p+N-X&zgG8?jjY$jnq8PSt{ow_z=7Y8xZ0g4JQ#EKx{Hr*P z=@KCQQ2L)mNx2p(d}<}Ffz9pt2J0y=dKE^yG-EU?mOfeXTFFW7VGCFf4wP@HDxqcc zzX;9BJ8X--2sbiFr)J}#sz|HYy;Hd*IEan6;z2nqX8-fg{+_Fe#^{vOU(XLkuh&{N zF6!y^QtWy28<_It$>RGp#~U_%IwCboj3F5lJK(?jlrCvbmk<=(gX_=t#O5n?nR?h% zDSh=QB48;TkypsRA%)b@hO7K@g3vbg&L-%pd*vXDW&ktzQg5eIl?WRuN*T6D`&$w_ zQSCwE*4$;ir4o2QBW80)1Y$`A+$E3+$WoQo9=GQa1#;}=u5VKXui`?=GtFh8C_6Z1 z$cpO!p0@*Lhb$uJ+-!?>-T8}v5dX4rBfcF_)i!17Y)RDhz0bDa*pN?TeT2{7uN4Jn zJaZhpe>S?j*iPx<3%O-p9!Fcum+w6Q!+VNleej9hcRo8_?XKH3B7J_DFI0>g??Kj1 z4<;8C>iK7`9eYS=c0mBKJ}aOSU-1;t+uw=QfdX^^8C0!vo_ODk^M0nn z2REI|0MF~aHJ@bFlkcdNH*bBEQ4r;OLXNk%FeK&INRwsI{d6`2X0Oi)q(XOF-9Fgi zDGml#Ph*}pisarMfgAOG{~Q3G-KAg{8W$fFup4@Mi#G~F=js_R^xa2=L=c(Z2<>?C zy{x}xS_I4gdE*5UL8lirOJ|-_L_sbWhI-eH#KS`|?GJd|^SgkTJ>+65S-7C@Sdj5H zMZcG?N;~Ik&7VPvR6)~Jfj0wvc5xYfv$ZFsw77;LyU199I_pcY0XuU4Xv2CW(hPWf z-U1Z=J;uKV95T1Ar^ zf#U>pUJe6HrpW%nh^_tK<86hWmW8#0ZM+|_q+?X%bX1NSe^}vbbu=s43ZVQowIS8f zvcgn-G%gr6`);Il=SEh-;fzj3H(6Uu!_Z(osfs{v9IASnu9k)?59~`)s+}%=VQ~Lg zz>A2KOlmV)seq{)sTxx95tT$W!2%sI0&vQ4lBFz# zF)47#tat*^Do--Y;v_RQ{IdV`gt3^u={;$vm;_W_Sv_NqotHY}D+z6C0;&_PtN@C@ zrxG&}es-ETf^t39YBe_HAIv(eUy%rnJ6QCGWb0^kE(yOVAj??&R-x6yMNz#}OxZ)> zjg2NHw*3@|VI-}iqHd`Cw$DPK?150V?DW-|;Z!?29I>dQ-`4st29NM?j+=#vdffCM z0dLlY{Bo!-jeu&i1rHnzrU;@QPk9;Pg}umGC403YCOVom{UUGL>-)rK4?IT1!;jj< zqyp3ad}&`w=tk6T6~w4XSaogFh=pzN6iJq-d9!FrWu=SLk}=6mC#KUjqk%{}Bb}NM5>IIEJkQd51~VLIV_TQw}y2{JJn~<7Har z2Y`VwNcjg55RrI=xyAqG&QmJNzX(RP-fCi!XT9KOJsi;QV5O2L;~4G_kvqa7iA@_8HK+Lz(Ibh=f1!(YnmVQfo3cs;iw_w8nAS&E6=|iF4S!|EO+5s6TV%Gx z@pKbL6UHyxB+W?Eperjo3IiP%j^8Avp~lYS2c7HJ+AY}cW8?93aw{VtH;CeeVV9QW z35yI4Tm)S8GX1p}wxOA0KeL?4>+GXpVm|wGD+S47Vop;Xf-%*}xZlVSN1qxA$`p%F zV=qSQHTDT2{c+N=4hX&k#oO*o6a475)B8#lszj&SnAj zJ1#s<+CvyQmy`YX9f}FheRDvAC(ce~-Xbj)SA1bX$(*Q8JaB;TYsTwv@Z<6I$RpxfrV#& z?0qbL7IF_QVPImunu3K~CxloKJMG9}JV)WW{BU0-bM(egV|#CE$lZ3o2cI;E%OrR5 zr9CKwoVnX(NkK!=1M}BggA6pZcGoZjWm>-S3|48&$*EZWv>UfocnSZI&!^&oUY}nM>CiNA(SCb!YeS zkA{Ms(pOTna_P^*)L8&LJrS&eA)eM9n$###9=~mzuytsxSI;VxKLQUwfrola5Q94GwIJYn#CE*T5XD^psV*+{ z*l>w!2gsOpaBl8T?|^xif%`%Sk>&f4#O&yekE6S)B9fM?mE4 zd&VBMjx{$hkNN(zkAMiG5|*8(^7bWzPB+h@K*w<{CO--hiT6AY1Qa_Y>mMc9{f;0r zM7>Vxr|(m1c{dgl-`h~MJAh1ib5lv}A5MkCB)AKN$NSW%8S;Z0gtSUP%6K-w_bilO z^y^@D2tg`Q^-gUE`z`k+j7rSACaMqz@Jf&g0zYjKgS6sB&@>Gf~&4F zy@l}MRSF1Ifk)gAvmGy%!bm{(;R<$#A}sUiu{f?i`ki}6?(ertzqBDgb6p-T*C`Gy z{1EZ*mBff*Ok;U1&9DMlAMW{~7@dY8U~%$^Qz^NCR|-5W?Bc>Pndj=dF0{K5e=ER) zxVRXHaobJ1CCz)hfx8eYAs`e2vf(`+(V|M}xBLp{)bLy9IE!aTsT26P?3Vq`$|xw} zwt7Kzk=WzwuNa&{^kD)5*>8#vHnZ3u*!PK|3`IoCDv~Z-HlIvwC3B$XMZ}gQo~-h! z*4I%)gp{w&YI-wl#ZIwk$4APt(2))6Tl`hqMfa&RqxF25s%^j(~`w|i=n9j2Rge3|T~0tUGZQqP@W z1rFEdz{A)^VWVCF-7jp^x_z? z%04|1oLi`W)M2mF2ZDKL3w*UC)puh?tYIoHXi>E<*P_#%g@$Sq!G(!S(B=nD zIAuA6Nt1x|8g1E=jqzNPh^u8qfzh?F?}qk4TOeWi zclh|1OmXt>)Z{_(62?&FUn=E?;U4i0Jov;YkGVU!?6#o5)h8IxqN=gRTAEv**Cdpd zK!yhzL-Vd&MP;9qBf}Vn13;H?^4YWU$w+eV7s~p6BOmg5q}HN67I93y3TE?VlplM5}&W?(96zIhy^gplfe1)#-JE$6S8U z)0zwZOiEw5CY5k}UaSpCSLc>nB9~*9B>732KjX~Pu{#F(G1u^|NZcIi9ab1O@HXsOvOjypXG-j#kf|X2;M)^>SuWNGzncE@u&Ay$ zuHMzlIJ&sHVRvCq?x6Lr6);$qIL_GqFkK_HvJ^sZeeYyY$UV#ly2STE$Wu!MyRawb z_|7E|i!69K?Z7GWQII=RXDdO4gtsenk2PF;^0s!4+cxMewz|whY~F8snu&UOcV9J< zCFU7xdzUtwXI&2n3%wo6aK*t-B?{-j!_#5GZ@7$*vUTEViq2nlMjeIy6rcR^Gw)ga z*+6*F7TjF$9(9cSGNHNFaU$8|VC+7+f^BoP(Z(%)`O1Y(I2&RjKzj2!uG^xV#bWA;){e1 zt%+m)`#<`q9P{+A3opd66wSrY#N&@W<{kyUMvWS7pZ)f6yY9T3`^TEAyVX}&9X>Bs zNzTLR!1Wn8c)cITbu?(y0Of`wNLdWXlY9?RsdV1mtJjq{KfH-M2ghrzjiW?vz2#Q- zG>*qQ7Il13t5$Ffu1-=l#U-IB{}M`wnxYqk#Ni|xZTgi#uSlACM8jTies24oH7OLu;Q7SH|_U+&=xU!+Hp)IZ^0>4O5!iHG5sL>}?PZ_}om zZ5o8Cv~R6NR`A;XmDayf3t_2vjPr=6^Uj733LK$y4}DO`S;oaDT}A0EyGPF+ZjIH~ zfTQM%-8S294Sj28u)$SF^u=!+t<~r zQ_o$8?>k%6PgCH7;&3?0h7XEcaMa?u@IlcNXGr#-v(fPbK&?nL-+IH=V@aO}+^0)rV$=D1;R40A(=4huMK zo%k?qfMXe3osxBT+UH819$3 zbKFMjZ|K?`eYC4vw{H3dsy(WoHOu&*XxZFdiu7qYg{d)EZ;>OGF6h=x zjzH|%wTs(otF3X==R9}&9k;`W*xRoC(e34Up-1{Z3SUz1I4l-%d+)sue2a~C|HhG! z^u5)q_rKvgWQUMfEjPsY-Fxe;H{B(dbazdfG;)1zzump~!i(;>PMzGLIA(Me__fe9 zPJyp9?8A?anymba9|gBg@eT1z#tSz7uWbqfb9^ zH^CPg`&T?R^442#bsKEFk@QJ<l#pogeN|aui5#?)W7Mb#!Bn(wXatRt;sk{Vn#`4zOq7(M$Q)3(+maNd z8f1jFL&u*PRZ`9YK>;5WTh_!`j_p7$^8fkr zFSQ@@A5s_#EG8)?IM|ZJ370LhAO>2M&mTCVC<9)2nHBjpF4e+Y7`pgxw7*dEu#Zfo zGK}d9=?tl?SpSI+tr+=DHKOU4+*P=#97q8)2(oKb!kB*~PW0nekYR)(w&VUM?W@`J zwH5?#@%fj+3QugvYm@+9pQn%*;FqEPCEd8$_Mc3ORjMH-<~deSv?_Qd3P}mr{z&7+Y?a9X5vxXc?Dvi7Rf#|>M$2?rb|Lac4JG9-*m$FT zfNA32wtpdzF98)mL$j=c<}KK#3e7)pp+rcG&%g3ZNU8uSpp0A*O*nuCk&%%|8o`kN zSPfwi7ACZKQ~MKU$W&^7YQ~gLKq)~?X`(9Hr@<0#L6p@$8TExNYNHebUXd2CB^os< zA#XIXg!;$hz8H^tdgAf?v%kW#F8?V3K zJvnfoJHAU7*Rj)a7{DIp?z-zvoaw)Y!*NA8yLlTVNh=UexZ#xht~>8^Pr*6k8E2jb z{|;^4zu=tnO*lck9mUQL_9XGh2f%YV`QMKS^N8YU3VIQ3M=^RUN!^R2^5em83L zD4dbq(cx$SdH;C)u;;+{w`0b*j_^~kJNzfy*5@`!|MAD4=)U;!b8#%a(#k8lYOGt- z7ajMq?CB$cj@v8K$`f}CX%9xCygU`CP zYl-vlRXNZh?Xr=D`_ZJ<6VkSz0jae%)+&X30Gdzqlv2o57t z`ywm;`2FMH(g*Mnapq}fxGlHb3KLilgSecV3BMe}F|aUf*qd(3q{%q1dahfDK^IO^ z|2pRvIKKWBXN$Ly^Sw9Pa6>pw9pxUtIp;r2o-Fp7H)|$F7XH4_VI?RWTn@t^(0gvi z%o$E5DB$?|Z}?XJ3_Ei$_|pB-9bgeg15cMWJc`5-+(Wa>{Bbg~X;& z33-zyjOG-qhT>7CK-3}&%%GY27o5EnMPvR=jx>Q%ExfUWDz(3;2*IDVA8I}*NIFDq zR4R25``?1svXEqjTt(KD15LlpvxTD`q6qTNH*mU?E}`Lw_z;)~HK3g&9e>b%Xn#wx zAM-DTMOZDxGyorde`uP#n}csiPYm?G{`%|gP`q0@;2X55@qeJ%E#yT?0<}$~!m0fe zAJSOJYh)BXnuU`5zcm~a6z89d@9%c*-h1vr)8xBROi20L{0|;H*j;z+b?~FGl{@o{ zv)txeZ0>G`-=JrndPe*-90`9AHQ?WBD!%{kVF37@x8HRW;0Nlr`Sakc_;*$W93Ibe zvwoTF>R|x=I`j#!658SDBur2o<~G`BLwEDdx57#GI#?WWj(g|bcU+I|mtrEjIR@lc zu!(~Ih?k}Ru-Rstxzo=$(@nznF{^7 zgZp>ytKiFM4fs@g!r_=n*QilLOrp2P1O=$}TEXG;Fig7B zL3lOvaX6X3Ts71+%F1(P7V91L5x!mX@croHw%vMLDW7Lwc+MS(1rykf;MT_XM+z7s zI2zME4Idj^xFOX73Kxm6Z$M|_mt1-Yd^K$=xEJ6rqc`63^u<8mSpIh?z=X(h@N-eU z+VXB4`1ROf$L-x-&6>K+(Uz)LUp|#rdUk=C{qxW6sb`;Y_n@EG0_ECWd{D$pQ^J7l zJG6(-kGZZ77N~HdwwlE{L+0=A0hA+BjY0#eD8n)|5XHt0rl2~DsgQ<=!k;_*sihYgctJrKZ=fve-HL~D0Y($5 zL_vi}u`~?tvKl0x>Wwm~K2)8hM_AJqIuVA!fPW+jOoL`q7|Xd=FAW zt%q_~1h6roX%$Fi^~;W>N^ncLtNJKSJgLa7!9-YuG@X$wK_pmI30%gZ?;k@@kRh+o z=?jn`5~29M5?U<1058K`jUgR&V4m%fS!a#|s0jtafTY?|S_X?n7PTKkjgkp{g)}xD ztPqP7Stg>Yyq3h=&~p0bxrym^T6nN@zJ% z*%Cq{TX;7?#I^ycXD}3>6%}5B{sO52zoKHOQHlwSTS!-+C{7w?(j90LRWLFxwQUyo zlfr_kfHma`84)YI4pb4i71Xe(wir_$4Gp#Mm{BPTLdqiYT9E)-j8Rl22>wvGIn9Qb z+IWR*X5E#Vf8eZeS1}1q87iwPQ$EEbETT*-f(BFgeg?duDi}kJiYbj~xCHyDEg4<| zE;$WGZvj*V5^5xHy-gU=aSoOXJCKND15glg6M-USfv)tWx-Q}Ko`f2&0dL$fBcf*g+;lr`g z?&1saLmU$nGSLzwaMrZLaXB)%a@y&qIh>d5Mt}Q_dk_wf>5Q8zJaL>6iX`y=j@=$i ze-}(D?6K?avJ#L*jI*=d$Ppv3n!d5@P|zQ{{4vMhbBj^FoHW=G&Oi6uV-FeFegFdVJL2bL(5Y5y`*U%0a_xq||}Ixvy6P*#bq zhaD8mb>xT;oMb=xm^iqsTdOwm-C2IDgP1PmXI(r^$C{{*W-VH{%P;FGJP$tbknFfw zef8Dco^UiyN2Y6Gpycmx`26Ou;cmz|^)!sITFidxALbrgPGL_TCSxVvj}{dI$d&7>s%hlNVbfoqL8Bz{=!HSX>i2$zIkADrUjacduSoUUJn#DciJZY=YCI_c%M6sI6y-1d{z$N@i@YXAkQW-QO<_P9zJMsZ7dSMc zVdQ3%7u!xCnck2AN&Pcv5aqn7-56?=q$U3TAVW%=l_Fu;uTW|Gj)V4%@oE_uPw5 z7ewx#{ zv(Cok^LyxLba%}#=)U|4ae^Y|=apA5L2>6@nAqV2#p!UI9407gZoY+sAEPuu!7Okw z0{jc1A90%74o<4CzV;eS6s#%Vh5Io<(WGJHxPu}%f2Ilei@=1A2B6^ccLa^lb@iwnLpurOd<_Y@`|?!`BcJ2%+3*rI05m;=@TCJZoH zGkfMQsjlEZK~}~8_S>(rqkseF@PmZ`d`wXE>g}2}Z{|+Hpgiqx0KPgVZdO|zI|i|X zb2+|vq|TsN!)T3x*pqMh)2Qpdu4VHU?ov!p=(oy>5bhW!DA@no0DTDV7VX{pYV7iB zH;{P$5EI; zfnn7o6j322cS`_VNV=t|q9ugV{8>y%E&&-O@&*r3^I1YlmNK`Z3`RkQu%bJaV?$Gc zY@ZVp<(HljWhg+EVR>$pAPpsf2}p7oqq-EMVYElRyb>Bff&(et1#gH6OaRUi4RFR- zb&A!nU^tm6wVzJU5;5E=r8!EIh5SpgP~c96RjBOzuAnn0jZCRND6N7S%9xBxb_7t4 zWGs-0*1rN@#2Z@~R9)GYVoJ**7}2{LQtpT^i7I!nXEZ8bJkyYT36+F!reMMvWCB$p zAr2lZwiZ;BA)PZ}b*6u)-$T(B zIh8|EHXdPZ-^!w)kPUwVO`6CH`9xw={8jRqA+A)RQIT}eQJ7)pR!2|6PJWYa<2 z#o8#fMrEon(n_L9Lhy#DN!Kzlmh3YZ#Ee{0NT33~0!sJV(%Q$%Gk!3WKPRKAq|lnhOdu-UQ*aw^D}g@p=K1zch^v;;-~fj}BY-Nb2GL<`1- zNF=lX6;K(kM1%@vASFF#AzRLnHJ6lRWC3L&0@}F3xlJgGQmE9z!HC5G&WSB2i+tpl zz{n;r0m+tSEvGU~ijAR7gOWmO@?G$Tm_Py{qIxY!V2el^iA;oqt%MSyu;#MPe5xZ` zR}AG6oPcb~P&2ZMGpd6k+K7shz62y%#uy8V)KK(gAcj#~gEvuT!_KfB!xD zWWWV7eOicF`47*!_nmn&p+p}Q$pDzojD8^)h^Ey@7uSp-wQqty7%lM4p2*kVl+x1;2FzU zwdv_U0mmAN&ha>V`ncmdiBFEVhrw^NE${fQ- z=!4>*gW-eXGC$Ee$rd{K7mGto4F3BrsH>JOnxh=UdE5)(kAvrJ*MW^ea5fs+Rmdvu z_uSLhAAmEV>0EOX{AtiQCcDHTdWYyFn_Uw2E$OR-=P+Zy!GHe6=P{ijk))pkN0#;K z*B9R$>To+|q0r}^0_WT;TRKE-4d;_=C)1`)^L_8`3n!U-Nm^10sX+e(sT!Fk?{B{O zM&AO?MjznW;b@EDGCU=i+%h7e5U8OnFJ$B&J19K;wH%1^v4;+QJxB?g;Z?B0?+^Mh z!N7z#Fz<2#eNY4^*#+}zOhQ$Lhye>E!3UzW_rwQ)g=0h$DdP0`5T?RSIZdb_`4na- zaDM-n{r*52EW4UfTu^6Y0i%4$Sjc%Wm8#Q<&uhqREb1QgrM3KxR5X;yF_c`H5ybPh5ym-kJJx5$|n&x9E3Z&aeD7F7`Lv zc%wKV#sstf6}}QM5#Y}_;|zZwd{yvWO#ctzf-eQgJpcUj_%=0=?=0IGo6eLeQ=*e> z_1)lo&mP_7y@-X69>+u_9RffG$M%U&f%EXqpFH_TkK>p8KJdSQ$%N!%BJjcnd~0BZ zKKvNf7heSQQN(M&69eGS=^%gQm2~E9d>uglxA0X!KN)S{cYzM7dA$uk4A>RmF=-9Q z-ctmpk4dft3(@b=2srz0=MO!kHOhK;b}&t8*eSh6j%0u9R5+SGO2$=4XSqH39@n>BAL{=le${;Q6FUywskS6`1BP0hHX z@&ubWD(x-QMLThl4IdQfmlQ5xwfss|oR*Y^fv(hSI$?k^&vR7w8XSoqa>$|L52Sam zUTP#Jow}oZQj>s0doVf4h7XFj;Uv4tE666DEJd-)p8rakEKW5t%$BHMV=V2`t7yMW9|Tl}p@pd_;gnFSK1q=9J6`Z3 zrK9!GLk~yJqWz@L=+We>3Cp9n#9jfBQMN2JFXi^BE6ZfK6vw#5CAk7JMC{!*cv!d5 zGJQ#>+KRf>L|J{6@D)?2Qq&5H6ztONtBoWLKw*Xoi_+4YR-WL?Ir)4jOI5lQZ>1I1 zkm@b0$zb_rLguj0@8$)dc~yC3_DzSy#CfPbDf8_psd_&wlwc)*i%l07Hs*R)kiL$Fbz%tj@hMf+!9 z(47OkufFoCp9d$|L5Boz7X7=^LaN-hp6Q>h>gEXR*O`q;O^BuA}UbudBkUb4j>t_|TFlTN7lq8a_o7=Q647yHBE zP?8R;DXZCF+k~G2-w{7-o9xvTj=Dz*{hoX7fy2or{<>?g_0y+KkNlKc?BD<3eK^TH z4t8r{VB`U5qCZTW=y7hdZ-pP(k3IHS$Z)*V0rp*Y-z5_iCm`RvSnjvqe!FkjxRJm9 zh8z5JIO67|>0%l=STbwY&v4ASKxm}C`R1E4DRaaThsyx|Zy2c2Hbo|_PvYibg6PgW z?(+5Flv#d&!;v$uahRa!1Sj8Z+O)wBO-@koQp=?=i??v#oAc{0aKPRW6D@V+2R%7T zxfp*06H**V>Io;=bY`hM%FjHGhr`aBFbIVsIs7pl+WT2QV?qTlm)RIN;6PLJ7ENV< ziI!C!D}?<;7hUWd!m%|cC?>+mCa;m4pn#KXPEfoyc!=O|#2Mhh&|lA=_|JbrM{Wny z#{m^OGG)C!hjxM^C&Vc#cACiG#iB)8528*cd~WO02kp42q?-fwFTD6724ouf)8HU= z{=E6ZsA*+52kE5))A{q!=3_g=4hlHQZZ3m4bYvPXD-}^G*BWQI?7u%Qhm&khP|$(r z>qCc%617s)m}B<$$E?{u%LGN8TDAT0$9IVn6evTZB!LOYb!AW~C)!@K#QepDV zugqVCePw`XmM+3%Ax-GYq;e??gph_xOy%}VxitBTXJ}!XlDHz1?Lf(BTNbLL+MSu49M^sg=(WZiF14m*?Kt=&t((ey0MmPr(6noa%(_e;#eL6v*no==F zvx?8DkQBr;6saK=>pvO!<%HG6xzn*oH4ZP*&U^x5-z=>}!->6ArkE471ci)W_ zY4Y84(+x5~p-!^jeDe)B+&&h4i4*-tAATr&tZTLn4lZAf!DkLezwyQ!L{6CFjZU#?kMGSZdtWKvVyvRaeYD4JP{|_)(Q$mAEQA<4cC6rNoBa2L-R#F^6m$iSWSb(zr5M|c& z_~SkNNMNuX1Nyk;&Yg=1iYH|;K#%S{FhLReW;Wu{XoP-L zcT56lI>@Yi8Xm(5iq@^gNwz2h0mout0-`PI{bv8qf1ZSPai#PfNasY+wdk91;mXT{ zUJ(&-lHH?+IPZS`g%T9oJAw_GMEaC(OHR+#WO<=YZAp{RS z{Gjw5TD8Ii81fV@V;=~q<18#JjOrf|d_I-Mwh3V_yD6tgR-hK`Cp$(=L2HXn<5i>5 z%alisOpj1TfTP^L)q@ENy{xd+VDaR&$z;3&FVR(8iQ5#i=!h8(>XeINgcYUnD)a|* zV$NI@)Z}U=8}daS;j{7;1d~?FN2HBgI0^ujuof0_ObyFH%Oc_>(6>a^kd;QLJV@3O zNie+8Ou`Zf^%_javJiUYiZZ2AK}AthIWE%^WKASHpvr%8DQ65dn()C&@t&9>x-_1( zik$*EMx+1&9PXi9vRM_8|K(D(EZ3)h3w3e?yH5B(Qmm_=Jjak+g6{ zbFfq7NihJaERrrkMn%dQLk0pWUW62>kr0-$5F2rc1R%j8r3?&}Vqirjnh2HM0vqm$ z->}#=Utl3~6p$doA{uE4l>nny!9cTjrD({`om>ba6iJLJQYjHf4wbV~$`&=)pejL2{KbKd?7n18etH5&pm4S0RtX4J18<=5MKH{ zSb-dXqBLTt`!i2J)7Qr!#Ebax2EPFwz5@JQ45DL8zMnAed;cI-iE{;7{rdGVuzV3F z4@Sdi&;$q5{*+Tr^=;c8A(Iar%;n1T`k3%I|C|djAo`p9u%3?}n%u>3-g)Qy7R{Ss zFrbcyqht&oV6`9GC3jHB1jYULLGvQ9|ImXEc^vbD0fLMD%;_`ahc|zG^M~Y+A%i8& zI7%WF<3}HT>~VIc+{I9Sis z_?+hd7dmJFgxzZS9_!W~X(|o`F{g8(iz9CkkK8IDH zzhc0hWj%h}cQV0pSewJJQnO?Sg&HaiV`Zm0XLaKrb+WAMV#Vc;nCJmYY-I7!Vsr<^ zrI%hJ11GFY=9fE1m{+chJoC&mWj9Q#)~!68IwlqK=g;#^QLgnd3Go%mYys*4ej`w? zzxja!pY%BH19G+ft$iGhUY{X$F@%){SX_+cGZw_*YW zJB!!`a9jf>?SAz)V4{eF5w$Rw_xKZ!qpoNA&#=1(M}_%IF1|$ip47pWx;W+w0}(Sl z{E7IzvHPJM50ZnJ5)0GAqULj{e;9;tq<-xPxL`!BF5- zxdJKZD?wTQY=uyaNxCy7;gRj(Z*m7kU4Lwcj`F?Y`=7hvI4I8VFn2+6Vz+*SdYFi4 zj>-5pWRhKzz{%^T&6>%C0ayCtSSAcypXfVw=-^+&04ygH_+5Y>A^+;DuV6y_Kum1c zN1x(kzYuu%Nx}rhWZ6M+B)%E&86@fc67TGzu@jREBi_P}LVia%c+Kzk7uaRT@@sSG zVffYzlw3~04hZf>Yk~d&Cr!8zfZr(gbGUPl-}G9wYx!e4b@WrG(FRU_!0ym??T(fS zb2v?ww6Z@jed=`I=ayUjUQPD$hhTz&yBAny-05-K?S15X)B}_H{C2Qkz=;L!K!v{r zSqyP^-@8HkMPz8_%P+o^Z#(ndy=Qm-<%rMmjh!R?0)D$#=Cjd<_z*h@xf>40^!O97 zvvlGF4$foe2^N(+i9QAURNdna3hLD>^|?{Sww5cke||sjf8ai>+Hd6Z4hkb>K<=Pu zje&0*!6>Fk8ar+*`b)>l?n&;xgpVs(YJ{?IUy z)C$kZTP5t#o&*sVJ@&mwM9KS4c+Aa;CUiyy2zRK-STZ8UcaI<~g|OlyUqRCHk{+*$ z==Am#Tg6v&6p$bb2Okva8f!SybUHt9&?N@oX#gr(iW^ z-lU&^umnEKEj&OJ&ILz2*ys6hOcxMxAXf+p5=Cb?axNdD`D`)}5Ht9KbMHr5MYC$VogJdeaRcBtb<@*n2AvYDhyFL2NFkj0C_RjnIT9DDryOE<`A@6 zfN+op9irO)!g-ad3!#899f*W-3$;)E+747gZ5pf5Vx}-JrG5g`e$b@Hi{v$dSDHTyG zghIMPs|m2A0t~7BL9gT>lFMXKA4X zOfV*_1cg_WFs*9CS|Y1$6G2q(#ry|_1^Yk+ZQw+iYnu}JXb|slIpa6vpGaPmQid3f z7z9ygd~}qL1eUqjVML;#{-H{=>EV<@#4VY@`xgv6S2A4W=AV6ehz4s-VDhJYfu$_} zQDu@A0LXxdVJJzaU)m;>%8P-*R)Go$LsG$*Pzw2nP9#RN6eMwH0#!1B!M;RO8YrPx zOar6A5P;=MQz*2h=AZeY4%k6~Cw-y*X&_wsMbvm?%uzsPDFh;X|42ctASla!p*+DR z6;EC1>5DcXENV3GjPQzalF^>DRSQLMiDMbe}e0b~aw{aV7upyi(R)eGGxo$Fi0ZjPu2RIM>&DB`G27Cgvz}eDAyKT1J z8fwCmidFCKHk^|??WgJP{C{2GcH4Ou_wCqk-HHD^$t{@on`;Z-5Ie#t@%KMWaId`d zvfCNw;A7PsoFjhW8Z>O^a14-~zudR)z2c{;YgahQhEq^Jz4RM!?}HD(sp30sXXvH< zmEmvUYxF0cd+|9}{cqLWrQLfttniflvkK@)_`P@D!uie{!k(vQHY^zU%cyf*w)?CjQBb4`cAd7P0w$$bXr+jF6ZzD97g6&$3tb~XOK zvV1k^BpW*!+^pF%9aaOndi5KkgtXFO0wD9xaB06kShxG`Lnqmb#D~ZF>#v{WZHsyA zkw@Lbk3IsWi`~{+Y~@yg50Qln=Q|u}4Z*UX7$tD{+3`*ck>3!lm3hfW+ zxa8uCQ6{guowncJ9e}f<>8SPn_uqq$iOF!Jy0yc^0Q4<{ua*7a6Jvi@4ebsq{@kcP<=Xeqo^l-uA`d#J04I%o*LDqC$;(=D90(7l6m%iqSi>U28WrAt?bRkIEQ zKyK)eVRH6(4V)W}m6mR&op*K^#BguD^N#o}VY|2n4q(?=du^1j!&&Q3x`!XS4{$Zt zxJhHT+b(;!op;#@^}4P+jC?-y3Xs;7hNN29#E6cxD|%n)Uc-6ntE{}LJNLW`T+^nF z<(w71YV&u10;xa3+0ib#ry*u ziZ|w^urGW|4%PNrs3&G?jFoHB3-_rBXJx}~O)vi-leE!T|FyGCbIm?az zZVcYTKTA2Uz1CXpyl&^adUfi-w?qvjL!JnC)6F-zXW*Zxkju69B{9``s%J}lcsKqt+sI3Iq8NDdmVnIzLEYDzoToykJZ9O z3(&`T-(jJon~(Q82J`Wb-^op%J_C-Ohrn)g_sHY@-7Y)r>afTFzLM60qv^%&A~{o^3lOMQP-~LeO-%YE!`zOx=a0rwgd(Md_I_ewo5vfz774M z4bk5cMkwihRWJAAi-X)QC|~*%V|r<0@{f}pPSJ3mz~2Xst8^{lr-RQ2j);P_~tvzd_{jK2Uq5ZKxFm%XkaC*Ht`UDM7 zUu(OOD7(q%Tl_M6mYekB6oKe-t69e=Uwz)w?o_54}MS9b`#+| z9Y?6TpMIL=&OYaC^h5SW|7#WZ*4uBpKJcHi!KQSQ-33nKx0mOSKe2g*(&U{NkO2|H zP(G10;>w<`AS(B)&Ay!nzxtZH1^!ZIqOXG;Bklq^$xcdhJ&26HsHAdfGCY6%9)^=_ z^y8{8U(I!c53Bm<<5qR)?~g!7n)qMd^T32C>PtY$Zt?dA7?ddAKl}crM$omK7&ZA| zlovS)?@Z(+*(V7_h}XJYp){jTVgQEE3jj%D$@fX^N8YOJGg)bZqNExnags?Y;U)G% zB6u$(p7FvJ_@Ypz0IlzEmY=jTrcN@U)fn01ldGBpE0KUn>1C7`^`%RtG>n88%`XKY zBtZ-++oxoS7D0GaJY4Vv#bn@>(x4I=269pq(J2og%isVNsE(Y-C|pcO7)&53e!>fB zP`esN6&lqL31Y6p1xOz(BC4=Y5+!ollQNP*BAL-p!9b|~NDIOd9b|c@zQSWv!9LjJ zD&rS5Sr-~L_+TiF6d1t@_DL=2#)iQ?=7IbbF7g-C3#MpK%BIlJqk@<$7EjQd0Li~T z#^laD4UiB*)p)kiivSjM=dfnltVGJeu<1u_guTQ;vwrh$KaWEl!$0)Uc~ zf)KCR{>Z^AvByXmKk%71>J=SHnvLua+oyC+WsEQ=eP59gS4e~AMbiNS+a}5+6$>+D zo{zHpRCxYUI;aZ&Qv0Cc!WB3RI*DTH&@NK3#CV`+nxmo$_M--6K%F2H!BoU)knOYP zr0z&6Eq~e|Rn#n-eX5}p^Cr(fBfOFkMxtKf3Iw?;TG9~&#VWwE@~k5N)XuB${t2o~ z8q|=6LB=F2AmWcY5r;h~Db2@H?Ay9cRzwk0r*@Sn!5*U0{uC`XNaBPL%2SAewbJ(| zQY0v~Ph=`%W;DKkA~uLd`-!XtBKYt<*Q6D^GzRIy-P$LCa=bzsqLx$+fu`m(v^vVZ z=XM4lirc`-7V(V5x@7q>MX63h)Y>PJWYK3Tr>s8$9(z?~pFEic`_#!R7%7I4Gcc4s ze|SjH1C|`{#!K@7FxwDH$t~nN@u)nxY&wE=k*)as$G3v1nmzj$cK`-%vBJy!>%wku z&bvP*C{_l=B&d*rU|&zi1_(ZO3+68r$BG_~fk~>d;!19h4Ma{p$u4T-Q#s?IO}q!H$$9fCUSnBE9z_C6EL{`ul(1IdjgO zx%d8F-V3;^yG-)#oH^$^Wy&qTd+*Hs@lPQ?H2vd&Z`a)!LEM9z7MO8>o3zu@tfywl z&5V&FS4>;uNAKZ>9gZKrlk+)2>oRBVYN;w0w3;vTs~gB%Wk{i#`OW|41BEm zS6BZkVa7)~06$(?e=G@HrShG4|IRf0I{$9H0u6q*R+C)3{;Ya%VX&-#daLPXWln+X?DRciXz>S1I<1o{8|cqqTzufj z52WpJ{>HGAyl(4^3{c!3KCxvLkMq?}i%$tBEZ7e|px_xLNc0^ow8pGA)?5 zAYpK&Nt3orN4)oaX=6O_xZu0zryu?J$9TAdI(^_sxq5Azq7<9v!84g$PMo36X|I*gsO(hWD> z2>fE{8$J|#JwEbz#CwiNtF5s*9w6$)+-%&K!7>*3knK#w|4e$_KfE^a!=IaNzA3zf zeYDjr%HWpcufCoKSI!|2=5)dd{IL7w_^>!Wl0EgPwA-$`#U>V%SoGtr+Bf~Vi1P$I zAIG z2K5NR@QTh`kAY<B=jvNLb=UKEga6AA5G&>Pp@Ew)kg5do7yn&p;@}f465g zI_m$s-~ARJqCPGyT)aqh_~GR~yfrff$MUh`#^A%%8_4mL54=}h1?K>aHP(uW+i&4R z)fZp#^Ypg2zBL_yhb<#~ z_uhR^n!jMagg#`*5WEbrTADO@QrheFd*FqPozlorE6TBgRl^KS=vgvi}lAh)YAU@?Vnb~wE^27-=M%pvoA zsFV8x^o;4#(KlOc7HIF}PS+v<^7K`A+vB}Mc7)l$vG!SOX;>ebbZE*yjEqnR+ zw*qHtlp3fHv%^xnQCRqe(w^iN4bfzK%hdvdEOXTNwG+;3@(Da4BN2i`C|h1GOQ@yirGGn-RGal&}67S;2)9WusDp!A9Bw zxn3w)7pjFA0j-wZi*h8XoRU(+Cc04+wBin&;n4#o<>Vg%7Q0$2{!}UI2uC`O6ud&% z;YNS2mk#y-qb++2gu=)-uNFs6ip;d4%ZCx` z21QC%8Z)eaE=T6|e|qFNh0xb*4eqGuuj0-c4GtR?PLwEpP(af^VssI%C#$d(XPcND zHYm?{^Sl*Jap=XaJpNiZK`=_ckIOaO{X_$=t6-63lfGuH-HK_yJ(yrjrCMZ>6oVm4 zwyM7f#>Taiviitg>)32+d|N5g@M$olPGJh9@~&g}Gj7P-LOAIM7sC3ZbijcLUZLdM zetQDFt5R%RG4>DICwpNN;c^~$2LwhD?aP|rbEo0Y{u?0{=g=wzY%1mj7G){ z%d-%1oRo<&HIW-f78EB>;)&&py@5*1O)Q+{vTM?_18>7d_A{?@Cz9$qY!}n3FxLqO z+DwEl(=U7>x7!HXkE!l0dHo%JIM1_JA5i$_S)^x8_vfDwi^46khfhT3S3`W8lZTmm zJbzsq*6sbz?S4pS*`p7(OE{{0Uqs$KGD1VQcoI($j?eXR419aCh&KG;X%M}Fh~k2k z=|p@Z4T@}asbk;K``{$N?%!NHL>;R^Vy9F}J-sU)V|6H$bRd#iR~<(5iDBme&8Zok zh|j>W(K*h12Y5ctqn#dxzT~0x+9pg}Mr@m4ANSt%SiSN1d#i3ARd;0POd#O?5}gDR zb$zhY8!B%Om^KV$B35>Lgh6yCrr0&5l}Ww@TbW!5^y(t{+Tq&ikB!H>>}mnSk%;Ae zk-GU@&Z40)Ji@>~MxAbKmI>oY>7N<4XqHEhqHubK=6$0JpMP;lZ>;V`r`1_s;;`)o zzBD#$wS^v?=Fk3MFdjEeM-p(;Cl|iDu0|$c@#XNq>CfA>r_GecG&>0Xvf4wqeYC`D zgJchNS7wy$I`{JsJ6Ls1Psf%KZT4peXHs>taeM67*>Dbyf4VECUNuJ+|*4zB$$7FnihU!9$eDRn*Szmqoja&;U@;3>l9<=WfAp^};~8;7aOoybx5 z_w?=5zi41=c|O3nC(h1Rv3bRLlG75KIK$4&6_)`*Nqbm=FD_siNkEdeLYbhdyb>di z9WxZq`*T!y{?7NCq)d%XTbDhm#Q>0r!kUHE93me3Z3xQ+dzGMV-)O*8EYC~*B$dwn zC|*qi9g{<*-LbjrqP=3~c(a;aErzR*0wd~(M-YHN@2vOHnmW|MeEhlMY|9^Dey#;+ z$xtxEYkiQuz>R?3kwm(Ljj2dEn%IryhHNKBI5O?IfaVM?mgV*5)3P)VD{aS5yRpjJ z23#EH;RGh(B;GQs&TnCqQAAAxf5y#*@K@}Fh^14`$QViH>4Oc4(d!_1Cb&HX@ZL4? zzg*I+m_NAh!$nb~jeK=`UxV)3p7Ng3A;#i<&w!Ws&=#!PoBlyyYI0!geb)BC_tT2X z>OkELD(7*Mfay9gR=nF4IP_1T4IKw z5pkWIu6w?MXo(y^og10I8M7$Ewr4ESTBl{(ZB56l4{Kp82Ln3)8S8i_@fXd#`ytb; zN~>Mp04JUi7tgaa7q!%ZpXhd2C+CaXY|r21W*_L===NS5w%Mav4M6`$OYxKMd% z{N_p3NPd^*5k%}mGlT)>`M@RgENbE(e%zLc;dXAnmWoKT0hD^*Oj zC(ORkDfh60O^jA4mvUQu&9O)Z;1bSTVx7{R-l0g}2ekkTY|PQE-+kaHdrx#ANr9PH z_Y>KTBIU-Ru(01#yV10UGjRD}@Zsg)r`;1?N6+BRlU1nLd(&!E&L*8V^2XRfE~5UC zctQz~S+MsT_oqtkF*S2`g)yClw4R0OcREUt=q_6{0vhMNKl2`IzOhV8U0(ST9rae8 zUC3XYG+LZZ-|OO1h%pq#=Z{~{eRNg1&ld_!eH|$r2E&=Qy@`K-oYfQL zSjPk2n&?LaVBvF5a66?yb%TY_rnlBqjMr|{{0eRLs%f_BV+kLf6NPAWrW2MnpBjRN zX54LGe(uZIEr7O^Fw~DwEb^Il+F2um*jF)kYmxQMo8r!>?YJyVONes>5C&|VFw`L` zDm>k&&TpxM4zE@a?Z5I!>A$M2VY}<=*$FiEHmp|TvhOP>dk|))4zxZ41Ua4IDe6~yE<_M8HLJ4Vm}Cwa!Jo5%i<*TDZECCs zfX?BPXN+^I7|lI7ebqUWtVZ*L*Q3u$8g;D=w2rKrAr4nYdL(b;8u;mmjW#<9E9r5FOR7U_!_9bdrT!&gyKYen0f18!%pL^xnrKoNUI8h&Bi@ z&wZ_4@v7+4MXZFPR@aUD^o@n-@slfu=HVyWz+-A-x3rMF)QVFm$B>ONM?#i z{l{;X2V2cqP}!_FSW%44Acs&aoSvRcmlsQ+8H`eCjXH`+4Pts`${E$E;G5dfnBQ40 zvw@twZ5 zofdYmwk!AN90}*Y>wkoJw81fBNj z;9|sAWX3F_)I-2l_31N{tg|WtAsZdy+Lwz(I#E%7H?%9s%?k3|vkf+wJP7 z)VeE0IISf~PS;c4411V*N1%M)`dzj!O;6RPh+{)%JYCF)7Ufj|umq^UX||l!sUx;Y z(Vi&#h!MQg*s?soJXZM-=%{t}3(4fYv_#Q&Ut`S)c_hTpIN=;B&zcd>v5Ia7m7YD215dVRil| zG`kquigNR7zuB9aS!qlaH#&N@(_qyH-wsL#P8X_ijr9Svo#6fBtnZTpYLcbJQ+^Nq zjdPVAgMF?WY3SgY)=s*~+f)O9xx)nZ5R?1PCAzO=Gz|Rj#qJQL?Ud&E=KP`juU5RD zLq1=atG|60(0t8Bpmwo&m1a!IL@4Z}I}MnVXx`2c0!x%-LhUkCnjb4Oj&8r5cFi^{ zgq@7LuCJq}i`elMVEK#XTJ`@<6IY z4A6}pSkeEZ!F)y5OGLGQepojB58Aoc!^)v?^K9GAPQQl0)>ajeZvmlrux2yat7V=B zK^3w}Ij+<3Q&MgP)gxtDY{^-)5`jo#X4(7gqxI8=w<^uA>i{kjK%L=icjr>Sx<_gH zqt!t#sT*9)*I{gn_pNb+s++&;6+Aw844`V56)j@hVj%VC3#`P^nl_+93g{8aQhy^g zh{FP#e~+$r9a-d<=ny-Dco~oV2*3-JDl2&3B=|lyi@s>f^TLt}d_tBtMd&3?_m1)} z01%q6Q?DkF#f-+TwL=ssmZAq*u|GuV>Pj4_BacQjhXR8c>6Vk*=YJZ4;o+0^BPxm6 z-+c9-zqHy$Z3T;fw;sqmMQLo8G5qW?$CK^ol;&%178r`HNKTWzUR?8kpC{C+%bt=Pl5}Q4yVsR zdZM;LwN)D!1#%x1-IfSQw6dmTq6Zx5lrGkTNwf_~k+tIeGOFqC1&Uz}q?=9m6bl zvf2#7Qg=Dd$SMKACam6n;qk!`Up7A6a$vyQYz@qRE*bf{^kIW4%>=b7eyRm(3#Evk)2n9Z6oVb2UrT329);6p zHvh~R7Ky(%=YV}$(%W}Qr$_;mqr=C%6f#;$6!`CGDbh(F|H6^9=X)`dT;!g|_~`od zCE_9N_KbKuqZ<{u6ULvBRK@w5@1K08`|V}q`7+CTurG+4t?l&$!oUm+pHv$_fAi-$d9!2W8?^_Q>qClA^z{#hX;)uoHcd?`G) zRD(ePy2F^X=vNzTkJm>aPLg-bkKPYohB1}RhGLb}>%#7s4REQJl%N>Rxd6Zw41B6* zJ0=hR7Zb(1BxNC4QWdv_O*=Z81?Cn7O;91Ef(e^JpB^0Z21Zil<(+=HRT*w?nBWyk zLzoB8IxjCjHF&_v$J)@YT_Y0LRpyA)s0Ajy9gTVnqqVu`=Yfod^lIHmy9--E1CZ|$C12l`mk-3MLahMn+ z_q)0E%xtGqp7>F#qvf@t#ZzeP~zJO%8;bCPb3o2WTWR5yi+xQWX&Knj;v zjTAoM&k~p2dJ{ujv2B&QcjRBG_%V`}?`#3LEY3mCEpJ`I^-GCdZ@W|rI#M^O$G5o< zUp>3*rdeoX-<&H|`%Xz=mT&G0DNh#z$2Sg9NlJW@mi$K?H9hm2FH+(n_tP2cZf-*4 zfpIW0?yh-w*g@ z#Ff`uI&mrV1sW1wm6j=mZp%ST()Q&5qFr!#K{kBBb}kbW`zspTR|qD|TcxHTz#RSe ztI>`Lt)I=yUHTJnkq+ufqRx3x8Y1>P%R5nqSml0K;=6qNK6jN*H9KF(7ECFeS9Zdp zb&Nk#SDw~BNVd8I06$6#IiA2^1vbBG?WB4e&*4MDE_4Cqe8oW;tx1ioM}EH`|wy4!o)f5@*RI4r3|Ws*)M}#lXFF4!|)d)WGITC^;DbR2*$K zxCbs)agm%6QjsP34FTxZ$(t;JpRqHlr@~AK>4ZG^Js}FSsUp z=)bJ!hDUqfF}?~Tw4RYE`t^J&{$p^a^nERlksZ-o(p?o(Qwq3bQJp`JBX*^%WAn%pL82p@}(i_(2amWvlSaq zZW)D)1aJ$3Saff6lSo{NAFY_5@~*=RJpFFGQmf$Dh*hhrfIQhO#(2XEq%4X~_1iwt z$)mw`PLA3#5ReU?Sgtn<|Gpmazp((O$u6UDmq%e7XveA=``Pb$B*o?RTi)3gT9ox1 zOcoyIo{bP&5>F&9u(QD0ALZh&n6uU=){Eq8PO0>Mmgi0@+BU*?8IPmHi_&eHa5>%W zash9_MUJs_Fvj#GT1szIt#s`8&;!?n2^gnUb{~hb-V)9Tp0p5_ScT8`)Mt?<^T>rR z0B7hW^fDGr%>|V-3dNkPp_3DCaPp3GUFGRvJjrZ~ER&UX$aAGRoV3>iGT7>r@j4Ks zVP;OLf(|T)=}^yz7X);d#(rY~$R^pz&xJg{h(ZnBP+kPm-E;#+2d*Ds!eZ5^&6p=YZ`2|*-z*l;y~%yFNSA72~U zpwY|$VSWyz%^Hz<=?E=@g2})#Ks-GgR3MyQC#PU=IJR9?GvA+U4$VLL1H~~2iT%Rd ztE;+&^zqWzCqJVdw)@nVB};r@-Z6pfTc`@TlTfJ`TH#WTxi1g9pPq1+>SvG?yT~As z%pMFl`~bHXbib4Zcb=oGDeKQ+7b2#pDG;5XMO8s|S%ee)Mr}%m!`C0v>!CR~X=W2md+m>VwE7?4!Ccz=ets43{b5F16Z}sq z_ITbf*~lcg{QPq@cjdaWzDgWmcK>sl-pgoBtfo19X+5MtIPY{56Q`KIjhU*jMt=nDD?yh{11=ic9>uGhG==E<- zlfsB&W9aQ&QIxZAR*I1o7knH;v?#okEGC=$vQZey_yKA@5M*?_o07a^Sx5MjBujcU zvf^ZG0{z8o+4=y=2y<{~m=^K2uDY|ODvIt36x6#(rlG6Cgm(OgFWiTA^K>dYlhyW#@LnRNXd=`}tXi0&7+rxT za-Ce)XWmlRRGGchVNF_++R?aPx#U4laY9=7oCW98oHIIEo0H<)xHiVmT%FWt%s(-_ zH}yF04L1I0*^^*o*I!f>(wq{q1mll`OTD>Ow$TtyQ4;WBcFj#uHE9eSlO4qNSb_36 zN!D?~Tol!&$i0#9vkG_Ue9PRsk&}Fh7J4{12wLk9CfkELZ4K%Zb)`!!NHovSA^dF$Cd@}Tk9*`NywPW?|m7Q4*eA^>FOjbrBbwS|Qe9g@VqXjQ1~ zE7|p_GH`$Y{$ym2v!Ei8FN*`=c2%r#fXn_BSHWmc9db0{dv~M!?aAD?Y|EfiawP6; zV(4XbXs0m7=d!L;I8OzIz{yfKdIXd-vZ~J*irIRUiE+TY0yfI%`TVO0m)cN$X1WPk zBFN~)NR&|Y_9?eV?MGoc-q1;$fZ{4ydgkK{G4^7uVH&hq!?xRF_K#vjQvXkO+EdDv z+Z}MbLrwvBpSk5Kw|7&;o#BGEf#%KK38J~wUJBfxhH>OUzebI9H!~1UZCcM8IN%FE zID|ZTJi2=-r}%te_ZAqTKoJ+DG2U*8?ix>`o#=XZTXL05$^VMDuQ?qy<6yRKAx@q2$v&5AZHMI#^4D1F z(c^ER+<^Ub=DwK7I)j}L9JC$-v!Cgw9>(q2fV*bvI?Pg}DsMg+Y~T5W5`Vk%-B*4o z-hNt?J!YYwMZ#(eyx107c`n1>GVSHwN_~jSt9hxE)YPnM%vx5d8OrqX76>2b`;w6t zxrML_R!!%GygY+o>IB#E$`GZ8cwx-$OPgH51V@+XwEOMle+U+j)^kv@RRYRVUsAQu zcgGYF{#x7#Cn#J9qr>eZ@HazVWjJ&rB-~pk*hgATpl#y0zqX~ZLL1+Mk99?jc?XN!iE2ZAg$Q{K*0RnX+2IC^?l0PYdYqvTu`kemO;s)DKE>#1 z&bu_7X_uP8jceA0+{fko8Yi<%nHX$|zi4~(AFt@-7v!?TT|6Mm;m*3kG$6D8+@SFk zgLEIT=}@OX?ge~#vic@pCL|>*0}}MK)7E2ITC~-QNoH!Z52jbAZHBi>Lmfw=Gub_N zLjAx=23RMIC{xZTU#{7~=}5LD(blBI{oA$7A^UfYl3hR6OrkhOVa|B#qlji_u(A6az5G*T{x5Y#Ibly__H2g5he_r&mGsj(rnOAHL%8i2NsmSPz=fTmb zWE^{e2v&`JN2C~|n0YKaLh?;4G<@FLN-$FZt5$+XEnaWhvgPko(l*dy&U#I;uq=|| z&_ZY6wBK=Q_UIblj8MY@x_;3ogTp zjEVpeZ3e_dM=unL>owV}=~pJ04N9Yb3FucuGO{vhgBavlXBngR7ikKhsQHU`g3Q;A z7*;AIk`5Kw$$jJta2j=f|fO-WIL0l(su^FlHp(szsKFpJj+sDZhwf^%*`#L zuEO}aZXhh8hG*{NnLrpWu}xP|jLxu!KIy3&koW#snB z8%0)+!@NCrtO9NACxSBJQGXfxw5bd<$ByMex>6{#5k}(Wi5B3e#EuKjtEBMI#>`1r ztj_)lfe;mvv8!+eg-O}x;pCadbglx7xwCNwKiss=y7MPQ;5ltd(1Pv)LIW2EFsgVR zY;h*f`tmwu23JcfS-)>+0<$irQKB-uZ22lal^MbvG$P`m!(;8FL2VLPzMqTNXS~z3 zD;m#b^|0PJl`Oaqw`j0qqwE`J>Sl%zN_f7JAvn@zf(a?kW4FJBPfNxrzm||Xc$~C# z)-(;2eM>@;PDPeUr!jeqC!je%W_4MZpk|GRjRBh28BCPoS<#!|vGS<^H}vNd3Wy{a*PG@26z@)htRPBph#@ z-4Zn`5LK6sYc+syEDX6Dir;sp49V-R6NdB48ZbmoGao(E8C6 zROCO~9SlHELK$|y$-Jkt4^OcJ(G1X}vgTfC9_1eC`nEsbejpRihQEwNo35!uCSC{{ zlDP42Z$v|$@ETdhbu$OHuo)JFlg49kD0_Kpxt3|29qWNNQQ{0Ox1Z!n0T6f%!eVyQ z(*4L4*SAkr-9$_B_W%N+tmLW}`437pu^WW5w<$H{*77%$Lqtb1T zA2U<;Q0=o{c*+&V2}VqKvt*Zwt+}rL03MUdIzhaz_7s1qIksoVue|ajqS7B5gWPMv zRO@j-i#07=X+@9eBh(_I+nWAIVP3OT@)7N7Q9p zRc9xHgEq)xPJ&tcFwV3&*p^U5X22qFZWK?1o7 z5c3{iEnh2#|ESzcP)ON{%6TzC0A zTS`ZYhGOCO-^Qgi@|Z}yo2*Y>@Prmr5+5qs9H&h=(x53H4Lw=F@44Z8AqR8_3f0cY z3s=g4oPkGfbc8Ii?d{+J&k}_J_`sPfWwIznEg6y zP?lq}zaB#CaS1r6&Q8_4b%1%0jNL$m*B-Rh)eQr1s19u~>Keu8GE~RYu(~#&l72q( zrD};eS|Ouup2f_6j4X!5Hq=bMq}+-mJfHT3;=&)5M3US(5g#bD3A>3t({qZiD!OUa zTalHst>6moIG}SvLhyrlu}oQk>?h~!FI@8J}3zmREYs@7kJgAwU z?9WTWI{WG<7_EG0zv-2!F?2^Vg#rP=X_&4=!h^6OpOd`Ly!BP~>(RUeQ4*UIu2ZRG z&1WH|DX=A^?m-e7@NxdYR_*NP~Bncllf&?DJ z%j>CE8>q!Q(rxEHk6B8L4;KCmR7~f^I!9jajdx9@nMoWENiq8fxxb79cWW@-$lO1m zCpT(~N-LMP#y_Hk9iSEjb8R-UFGP}k4a3FQT{ zL#T7K8sp6MiNcrvR*Wzz;N4M<+Y!5)Ksa19E_c#9`?~rA$SwEgi zsl?$+nd*(jK;pRe9zg=jP$}eZ59n)}{UXaG48ejYWNeQ5NG$NV zF)uuE=}?FU&DAN0m#{rm$ecyNRR+lD^KAFn8wdvPsNu$^K>8D!$2BI1D3uqBaAP%1 z5-WHQQ+cig>Tu9DMHm1>ks+yb&WNx>9^@G{rKz^Fy@u_3L15fIn1=1Gf_5n{o;P~sSx|`b ziZ>xkxeQFxDF!*BG(KGVQv9;vs-mFn+hq3#xFKTrN$JfTh<0QNiRDTMUkUpmkg8<4 z5?ZPVq(7t}VrPj4?n%+l!VlOP@u8572tnHp&$5ma3K4S9LKmpWuNIqm@-%imkN-~4 z>?d1_Oo2_I!%rK(AW>L};q}-q^9O5&NHKcgk#IjLs~D~^^|8(l-53)s0_A8^XuHOW zMrU1xcbe8@f{VfIpIEe{Bx#x$4TRx%?D#=?-cX>yUrq%inBhf#6Yr_HF0a#%!t#i_ z!lt1Yer-%3i~Fk=qTsgYp*+#P7V;{7tk?r^oe;1=FW9_JN%vbC)9#*S0Q%F%>05gM zdRbqi_Ine?ACSZ5Pq$)LQe`>)thA6B1nZdrQ5c7O=uV25ULs$6$^LfAL;EL}#xw$6 zD3A7@XOkDg(9#uc*h&wsBoujPQUegDl#E)mn8CgLa*`s`yj`YXeqJ^gBtpymrkNQe zV&A@L&QHVUW3ErA0m;O4<)urn-{>w%w(gv<}|$n9tqbt_}ZzLl{7w z9}`$_6U@~LX5mJLC$f&Rh2+IYcv(H^60LNDtVr0ZO3Xm=s4Gr`?mV;gph`i)@=njT z*ukv*EHU|#kA~%T2mz{|CGoc~=YYSe&T1tOi1z7#40q3uVT{96A*`}vcP!GMME+W9 zX#gLkTU-LjhfUnX$+1OCIepHrCTNG7=gV_Q;Hd8LIMDRRssM2wg_~gjHfoo&xUAi; zZP>5jXOuMct%&EFsN`0~j0Czt0iVq{=E+MapV{ATKW%O_O*l^4l&!TQ7eMj~&WAE* zmxTke_iox&u0=gTNn)OrJ=h%^%&_)5j zMqj9&@9Ta>cB&lE?p0Jo0|8jJzpKFho%J~+=C6pCvxcNb`1q)zi6|qyYbhiwKwLXBTIr2)t ztC2y9As)&=-b33x;xiDq0w#4|9G24GMSpYtP4q33VG3~zTC>*L4z^5z)k#|BXHe-s zn7=lThsvYISSvxOo#Inup8D#8GC(EaW!ka%Z(l1Pw}5-*$XafY2kQQ#Gy#@l&>yBI zO^T5ocu2aLUO>QZSC^hzgUVF`<+=vKQSM0zWr-^&*Xv;A4K0%43M~=@BErVe!tM$!T}LI+5eMGkpnim?Le3V0MvU6>64#Xt)!2vs-f` z{6(0S%6GZH*;~pTL-ly;;}D3(*A1GA!A0S80tK=eG^KPmd~`u7YzTw9$vx-P54M;8 zJ4g{~91or#Qj}gb@DfsiRBw{?oCKU)lET3d&9C0odzwhVUmyGo3l`=Cm}t{*R#&K$*D>RffXx@$ynG0 z)BSsq_{<8d8=jljzEsb|mk^Jp7XtZPgNk$nG_TNRM+R4p;Ps`rqQ+hA(l#{DeVnvq*T|5;5@P%y&Uniz%l$;QbPic`q9oMf7&Bh#M0zRO_w`t|I0#BRJe~U` zjalGx(d3WTWj!Vy4w{JODk-txlR%F5eBr%V%X$ut8hSDh2=@Jbjz$SubaRnj!{bQg9!6sv6hNgE#?5XKkc~ZU6~2yT)&^T#nR*Mv%(lWIjBP&m?RIj zTFe9uyPm}qwB5$Q@T*?q53?eKiw4$!cgA0g%5`e;pks+!PTqul#S7+)53AN~GglZx zJ8MH$7|$9BPtcSbG>lIR%5zt5a2-hl5^aph>CCtW4^VVt~V*(w+!tDa?+E!>UcT;|*mn00?J&$ht5Ao1}jc7NhbHXS*iJg|K0D^=*?; zt=b~~67Ecr^!cDJN`I`pgVOzfufX0*JS$3cRrE$`oWoP3?r}r?SN~k+-L)CSX!ZM) z;GA`pI9cYT14@3_c-DR_TiBjf!2g#8&0ZgqLBSTnmIQKap~@;8MbTV1V>pD(|48j) z8GimJnLa|OO4Z+nlNh6(d4)RiR1)C$(x!cDx$Rvt>NGKXm1qiAAKvr3?R_nMZeI}t z!O0SS|?s}(F zheGpTkuA~JSeRA9SN=0s4&8qrSvk|uTUd-uSbi%fkNIrf>~s2v_=A|n5{5-!v#h%x z$kX43UzCoT6&8g+4*U1(2b{E4E+s{BzJDla|3?p&zvC=#SN)f&1!Faw+{D8d)*lha zjVGHSX{xG7p&}Vf^=U;N7$NH+JX!n5AO1BfM4dhv;eu)h#wnP_QStSqQqlw?qbExTPukvQ5ENUzT23e@JJYGdcT^$(%hmx zC_25O&{OPrm1M$AjH;z#hXyskE4d2j8u%GG{<)>qF{xI$GYpid%=-xkQiz(FEnvOl zNtsbWDl)PA$1-#!bf+}wt-Htf$lulo`ZY2?_Bb`BQl?G;+_UUkBRzhDs)BHH4ts=$ zD?oInH?6@5BJ^dP3vSHUZYOj$i%H7GbZ-a)b!nnAf!cyN%WY#f4Q#T&6j1lasAq|xu|kB!EjyDh%{$F}~XnS?TZeDXgH*h}^i9t_}VnmZ{b2maXw#!!*5 z^c;W28c_TWbdXI_A5~g8a+3<*9aocaGS;8nORiU!(%D$^BUQt|xAB?8^hzk%R`VD| z8bgGlyS6BD@)n;ZV*tbP?M`uj!)5M#oS?fH;~monqlo`IBcWfG zSarh0b|L!%-=hARllrW{#41vW$q=E5*O;9NZPC2!s3=RkZutnE1=L^F*=nbXe2iu< zpOMAq!PlK+YyS(dywbSe#5%7xXQ^&Dnx7I%@Y+|6Z{nrlG>vQnE8jLhi5;czpLJx~ zO4r11bT=14i5359s-*qGL1V>+YT=H=UiQzl)3K8t0T|_ZW|h#lN+e#0?i=U_p<#XK zVy^5ML+=opsTGyz_$@;Av{>WDInZBa56{ZqIoR?1OMbX1I~oJkN{xCfQTjI_(Q7a* zF9Tu!P1!~s&MeU~@V~s|@f|D>yD{7dbZW@R`j^!W;-t712m>2FCI|yTZm54m>+md9 zq6vOue)15+D7;O;OhYT=1_tDXL~N1~9Nl>}EjE^Wv^qdp@bBkQZMellPW5J>hJs5n z)oPo@e+2{`wdrt8+AFx}zd{_gfHc$-ptVJM!>uOLl>YnDR;?`kXIQGgM*!a1IWcLY zz|#ziW!~meEWMzb4UVtp80WI?Nuxw2P`O)$1Zw4vI6(Ng>fiD6-80@)w4Y7=pppV z$)AZu)A~Oe#xlwzN3qU6up8<6|Ik>_IMn)&uJ^I4dq%)BwzaJV2)!~|swiaApwm9| zN!O!KZ06Bs*wQ&pgzQ^56I6(sGLE6GCNLDCEX&XK1OK&%?IoT6su`5W9{uCfMkjZOcZEJK{TD0Co6)8u{2f4S)z?6LPh-yn?6^~5Ey!k z#XRZEbS!7L7+$KHr(;Wy z>1K}r(H82#YV$U)s@u$Kv`8`oPm>H*7!KNx2dK!4EHt;G{0ad1d+D$F-hrq=8DoU~ zdukM&*yMv|lk-L(&=s=5yj^oSequ~M!YbqhMsM-bG0#)ul;&6GQ*^R?=#^RkejKkl z;6ASK%gMbqWZe>$hvdFUf0xb5J0EnIW^@-9_S2!N`kRzyav<1<1Ds<7S#y7{FCP}q zm6>W=6UDLp&v3>bAQ6H&Nb@zM_X4sgR?O4BszIp6{hx*3Q&qvTKc@5ATnh3An+7so zH!flOe)cJziPf-Zu<&9fkV!?}F8=u5Lg5^ISg#O;$1FF+WDx%X%oB8p;C4Hj=7c57 zBNg)1S;W|14AXiAGWW=2vG@~r>mZZ$e(uaFLRN@G%Of&Nfi)gp>ix2W7O-!KW~??rJR`B-NV$- zYlm%c4bO{)tgG`%{}`TzB3$NoDv<`E2eK&1*9m+&7E+J}W*XAE5J$kUO2vwhja*~P z`)-E@93BX#;KG!OSSqNJ?Sk@z3tifI2JUuhTft zdI2@+(Q*#S)w)Ytzct<&R_k`IrVW<_d)FCEBBluWcguDyy$J$V00n$FLLp%iZ>cOyZU2)Tty0&g#Q|7&_J0MOfB+ zaqTqSm9xgw+a(4vnPn~n`wv1O1`s>I+mELfp+gley;q}D&kSHqnpZK@CZ@$d$`ZwH zsth(#xCiKuxbAbe{3A#hOMwH0;A)5(Z|Qh$*Qdi|$uczDktDm|?N)C5!0)Lw^fPvE zO#SS@AJDSWm)m=QT&8`!?5bnOIR>0*Pj}PsS>B;V#GG7)@KrbHc#3B|O^c9=-l1t< z%jTX-t5)Uro82q)LV}Hd)%{6 zDO1f>J6tQs;$fprtCR+0&I|a=!3}~0MW+JdhI#6(b%>>Ay%C;xl z){Kq<{V4rtelb@;MO_Y<%o4b_^Arh;Ggn@*lH)@l8{sb6sY({qQ4Ou%m#&|J-kGF~ z1r!94p+Ql9&1NzY$EJtNFR{iv@vO$wV}%o9|ELX)h(-7rUOQ-^pgkWz^YY7_t=t~| zjP{7ljIb>G2UWD4SM}lPFN%y4E0nD!~BP5m=KpklR*IwTN zS&txNuNu_6-+1jOm;@eFQ&&t~q zBcp8a+oAACoGPhN*tYcfPgSZp9f7a9?hBf;e5cj9-2H0Ur&SF~r%w$?=sX{N4*5eN zmtXvGc?^D;OAak?rR8K;J(cF>Br&(`Ssg=5ybZl~i8j+d3cooI5nexff)D$Ga*bSV zys!SePFMJfbZ4U5Z(E|caH=Y9IV;_avj^Aai0aJ!lhrbX+euf z0Lz%ZzC6&hVP#D-y_$?&VdN&X4 zM$$iQ z@;L4{E@s%3pnXx0Ij!w7B;>+Gm+1p_wM~sH&XrCn9A%4gQO@qA$f=X+8`mlX3M8b|1-`GH>-3uH ziOGS0p5iv!Z}7h?#i3Jpf}u@E|IQw;0Cd}LoB3xKx}K{U3FsSs=~5U!^@@{`y)!np zhm4-j#qytpAfhl7={8svpVo9R&m2}%)?^rhGM0*BXp|aHKKyBe5)K!ZLl<}wAq+bG z$#eD^c9)piDdEg=cUfEOzhC*}{5F2K@kpBBvF4y{ARqm~g|4mY0r0dP&iZE~{4{H( z)de6bFU-D56|J_bN6JO+2NMTLh1PG(K|Q6fXH;Zuq*ajblYIQy;Z7>3WRXrPTSU10 z-y^}!henaO1gdfSMHvUXM%3!iou~L?ygKfnoQcTqDf8+|_ zur0v!>4_j*S7k4HZd%Vrcqa}`SyoYULo|u_QbPVQG?Jm!km%nLoc<4u;4G!6VaoDV zHJD#6jI-nea<9QEVL@g2 zy8V@k6!3QKwrXP}pZfN8XoI%iZTP*KYmc$_*Wfz+iO|2fZ029yB zgj9KkEg#sV*~|z2cRV;@#dBRCqvO=BCu&m0DmpTC%fjk+e$V*4tKp#gTF48F&0qJW zfxhD6W83v%UfYuy8%f6dhV3w-EyaEePcyYwm+Kwsn=sHnXnoDgJNq`_Bpx!m*AEw0 z+rEAEpHIsJn<0ukf0LoOCaZVe<2fe8B`mm5jdg({H$q)NJQHdDa}fJ=V9ZPH^HPpaoGd0(X{0_z(pVbd(x#qQ#pQ2-tUvuip?yM?v>OLJG=D%pA z`t}%ZH&>GBnB%b;ak}@I+kOIA(9|w=0=(We9%R@RJ$Bv-wu*L1DpQrPykq$~qdxUM z?ClaCP_^XM>H&o#Kij+<0Q^wUKZg}9XzMDHQvUjgD{LdA;dz_6`I+O}McbjFfl+q4Kl1OUMMDM> z&*?yDP{_Eq!u&;f=M{XnPVt8 z7_UH8*Jr<@T%jz(uESI4pEVEP{uO$-f;;YpLsu|!QM|v+HCjh10YKPj#Xl}~t+4vh z`Sg4+8N*aAbwL9nP=R7sKlcO(Zb;on`yY-i!$ zNaD<6kor4G$H7jm-;Kzd8el@OzPvy=$MqOViu2>Q?*Up!yP+&eQa*l!_Y zySaRwHi8zHf^)s_C~QryX^nV7X%j6|h(LrBBNrFD2L{qr$n}>TnNq)VyJP5YN}<;8 z{B-uB#Ps_K6MAVJ^0kNxG*bjMwHrrU479m@%g#E;&66W7n?4?$e;qy58d z9&?iO%2yXJ_y4S?@&k(RoR!}C*0-kjzyJNkk{YSBSr9fhFL?hK&mB|=W+46-EL@mw zz{80ro}7g}kS8i!KjQ7iOE0}RU3blOSkhoyOwHdJZ=#|trA~cmjWyOx8*j2vdi3E( z(|?_QdV1jg`_n(X{`F~-mu`Z}ntwh1jT$*BZ8>R6%#c_K%RDSizrx#?N8`ut#v5;f zB{$!JJ{p4?9gEYv=jNx!AAce}@W6fPrkifYL$Wz(+ikX$*(xu`k{MjbcwnhCxYhsL z-#H`w_(wlVpZ@fx(<@%_ipq>eYcZ>+;^qFYl=>|cTS8$o(Eh*gzWdY1Kk@Oj<(6CG zhCp8P8Ovtj?oGcQYvEx1^H%~>xk6D3ed)v#PQ*=_%hH&!W7Efu!bh^P#M81<@jn|g zC}?BmOnpFca(+M&ZJ4hv|HAukY=4WZimBzl)X(sBUgSbyGtmB5JNdfd1-SlPKL3ap zf%rMORsXuS?UHJfU;N82(KggDR@aOeod4oa)l0efkE5#&qw=*}{%d>IdgT7?`fs#R zX#Z!P#SDsR$EPbWBY`t04v{6E#y2b3Px-&$`lsRjGCZL8{1?9{Gb8uk?~Q4-)z-jL zK}*uJ^PWvlKK^9759gFyZoV}=i^ea8y8Wk38~7 zy6^`VrTNd!OK*AWThhb{6Q!kmi!ELuvsKPH_uO>$nP;V)U$b-C4of5PJ7%n# zCp7DZ8;nmACyvKYj3G%LP@MIhbm?W&(;BO-k~ZCB({%i&k54Q4%Q4zAra%4i%hMOX z^kppVwGb~kEKOg)L#l1I-Wp=v0130!+m z*=Ju_;;GcJKdRK_EG+SK{<-I2sjgx8hSkyX4T`2_Ro5)6c)9)+uW6(z7ym+GGnoFT z4$}*J{<9IOw&PZ#)vfr&8)6m%%0*B}6Nf>bE!q?s(F&^3q0;>bD-nigUhAxk!wOKs zVzdzPUV$KLSR(|x$Z4=!#bCrqCj=c!wSP!@-nfWbEvJDIa85cA{41~a4{?~P7mR|e zZPc<@0?tOSsIUxqsFxJ8O54BLiYW4$N`0b~1ixsP%;;Y5djJ!x~7;cU;|zng0tovs_! zJ>*8)MN5VeUKF$(bP1QibFmYR2urgHqJ}jhDC4g-(o_m%C!MHOQEJy8*nIplnFdnZ zP+r$I;GA?q6ttAL$Od};8Q9pTi@cAxTPK2lAv@}?{PwoC(UQA3^po?P5VcscHWpqK zxC1byKjQJkg&hQqFxg3+bZt16qJP4|X^M&=MXkVYkpXAM2k5`&saJdiyDguQk#WqJ z#HbKbZ*2doZG3p@C=-e~tYQPUg&cChd)nWG`4eA2&)r?lUM}5dYbw+UoEhW0DB_ z(i6CG{dGK`_|?@{r^63BEWI6Xp~dmH2y0J2h#%5?`~0Ur{TY@cn43QE!GB8o@3)`S zmG^jWzV#;5bOp?^*eI-n--kB?=g1H4S-3&JaM1$Xv>1Y$@vEfu)?E*8|F0?fY@w&| zfZ~Tg`f)nztTXYW{B1IW!kUhtM;?A8J&Zrz6j>^7ZH|(+0N00|k&SitIg?=4lTQNw zPS$l1)V~rav2>zbC?na%Sm7Dr2;YL3Pq1FFM zEQ|2iV}D8G#*D+vk~Q)6D3;L4Z8!%v`5$@cA^gaEDlJ;FP-LsEx|+OAIu{UGiCrv1}FZ$AichOU{%Umoj#U4O&%>Gc0P9kW;#rz4IyB2AL- zasrMkEt@hf+DWt#SHpTcaG)kIDJLwWkCS?QWPz`pw4Q0`&Kjk3F6qe)QqQHPdN##Sx=qX3h8s6Y|516=bQh`>}M@>8GEO zZn){s>HSB(KW#j5W6a7JFW({I+SpvW;?YMR!9%XcF=GffVGzTZF=NsG>tNZib!AO! z_CFJU|FN$3Kky)Kd;!1cIV@>0JdMLcmUY)zH?6k%YI4&{YpYuGusqqrk35P8K#$8e zU4~+bDK1~ZC9GD%(q!EK_^p?RA9^_5c;gM}%U}Ld+IIU_rbF;D%@{lsnlN#KylzCB zc8w~n>rLzgL(Hj*XEMzdk#WqJB;=xd0+x8X6f-Esjv0*`4EP4cF1z+P{!zK;8O`yh z4=AokleeCfPCDsinK?tWrl8}b69S1P>wn=@0zj+SqcFSb%mU6yCzjzKxSl8A+D51A zrgc{Vk@rbL;+ciZxML+0ZE44MS*MUm8b!&@Z7|}b6QU7IJ=?!&+kn-WXo6S{)}3@h z6k^HDlGpzIQ6T}ax<|g444WWx(h0#yDYMjnIsS!i3N}gBHdMF_9>i*|#>G;sza9S@ zj&m_;MMRSy%9(r?OCC)-4i6}1UV(2=?4J%f^w2au&!8BbfBLmP=oLfTf3xiZ0>?ky zJgP#lg26fai+Z15ca;6o4Ky#A!T^f3&x5m&}d>J2tnALj!7Rz(>yeKK?P zA)aG!ZeNI5Agch&*@j#K@bSkVOT%%FUuUhgvBctvi7!q(jC1^G9HS?UpMXOW>#eP} z7~g+*@cswW96XqL>82aYOotq<%w~K9*B_6d-uy5+uZ>2J9j(_rxbCo7i(T!18(suC ze%f*JO@(R5x7`}E7D{u&y>`?$zWL4c{qKG^z3+WTrakxG1LqN5bHo&^Lx1=}-G1AhxSkuC9>?{-i6@?z zw%Hc3h7^RK>ilLJR6OK~0XG_JqyzWW|rS3D!< z{?#!fhRaNyiscD^cJa^Chd%Vdw8x%%VFm?e6hoD>cAmg2gNO0ZW)7AC<xL@TH^ty=xtd~%{H;Wos%Od2( zSm}j`D;9++MYkPUk#>L+>Vd`RHI^I-Lff)f6si;*8d=TukFvH!H_=I`ZeP0f*4yxb z%nQ@_i4)Snd_ZBrXXDWaX(oec$+l*(f4QOcod_*)%rbRdR5ty0`=)l=f7B(yFoh}z zY1>j1birw{L+m)(_Ie0+S!lLX!3c#-^P=7n9kSpYq#_+(3tOdTw9+5qyzD7diuNMi zGB^PLQD5mYqNVI7!Q!{A)Ap0wFX3) zIrfOBpjlS1@rzWbQgqG8QaWBu8yHD$ z3jYdA4*l_uf51(Or_s`w?T8x|W3Z&nD_*f>+I6>G(k6J*pJ^6uoc{zj&A;=V@1(cA z?QK4Tf;Tb#eEpx(l~-Mn?!5gD-0;9hA@Oes_#0r&YyK$PX1ncVnIxY7`EcUyd+$zH zU3pcy>4qEe5NIL%jmFx^8>G!P-y%JOx6ZG{azs<6Ou^0ho#p2BH@^OjbldH>rKxxe z?Irlm#!%d)wCmts|LWK2vgwzlEfMeTyYHS>UmZ6*;OBn)80Lo^|MZ7H$n1zd=x`1K zm$%w}`|Twz{(!bbi}C!1bgYcX-mTeseWuP#lGCSiCa*+oz_bmGNx~n;)P$2p?^C)m6VpXME?| z()WiQdRThXTMm%#j?8;*UgE6SE3x#%op;_PvoAyso3+Ha;JS6+wZ-0xMM^8!}hb%?A4j+OUB`;08V9o4JH{BHa zBSrQ&9vof!`)kwhe*e4l=wpvzatgn?(T6ptSC%(;`O(OU6DMNr?I+VE7hjUD{N+{Y zMvSF(u}sO!v4;0X8*hXMa)--}5<9Y38+k(qBFdC59HC0lwVL}M1-5^mfDc?>j<;0D zjK&O#sUO1(irsR*x_VE7uI6HY{4BmfF=NI|d2{*HubwR5jfqMzq)11M(kiVn&64dE z?OzLmtl?rdvLfB~uPqhZE5_Bt(-!-T#{#9$t{n~{OLa>7i=uB78SyUaAR^lBHkDD1 z=kunxHmlNsV5mnNdx3MuSrV#4IkF-x>eWW21=B1WIPx3lQZ#Tio;$>j(;BUif7e#f+8tr2hmS2x?#>NRPnYkg`;-MXq7UKZTe8HTz577&y#OZa0bQ9wEzD5;~Nx* z<|UpK8S%v%sajr2l<75CbNmy-WXGG{wuw?{sS$a~z3kFU(-*$*1-zxdZThDV|C229 zRQ#)To|muw<*(BjxB>q76OYM_VlKDyEFMt*{`bGf@%Q5N${lx1Z+Y{Z#V(f{`~%Jn zGtdqX-1h+b9iCxy3BI#59&h~bzWZ)z(pHoFP5SbH0!uu-`#ov1Ew;dn!6kUO@tB-5 zuDSNQ^b~HEr=dI|tRUxzNjSG~Mkv4OWar-J@G#?-zy4L?d41MXPh$Q4VRG&8imf;c zaZI}YPk+MA@3nBle=m6`el5;521GxA!5b>-dPVR-Pd=}TXlUW1ncwm|%R7*kSd(L3>g zV%qV?$;%ylP@xY66sagZ{+{%eFX5W$r|DllhI7YmyQk6kW|en#Hf(qn@4ok*bQZ3! z?!*fj|A2P+>Cb+O_SrohdE^Hm1u|T}^Jf1Cj{KnX`D5tMyYIRyoq*YTvcwb63-J=e z?YL&T8tr%s>d%KnL)r4U4q5{b9d>x-4*2%Pj)@N-I6^q%=8;Dq#W?u6To3TY5H91y z>!}^^GR1H3EsbCN@|Wp@@VPf;7jxM|z8J&rpZp3wZoc&vJj9xZ-K$TQtz7RV>&rJ6 zw%c|)nSsc9K6F23P#km+9#D+M0}7o%QT9ZsVbwgbZA@D8V6>4;uDO-+`Ox?OqEry( zL1ojY<0ILpo`Q#Jm_acGOFZEt*}fl}dQ>)E&M7+Q?6a`6>iM`n8jfW>WzpC5002M$ zNkl0l$ge9JAXAd}I#kQchlu1!FA4jrof0^P-fI^j`tH$F=Q^!F}cIzrQgqKZ8 zn_1YE3J^V}PW~*}0YFjV>u#+42J5JowZWH@PBa4$f5wa` zvt}`MVT`A_X#^`yI$_m_HEp_pIpUevctw{S^Fq2#SrQw_Rh_6+8n&bGmRoO5KluJd z@_^zXtY;`g2;sOT^HUg*@zZMpxlgJri7S6s!` ztlCxJrek^AW6d!O0Z)?28c$1M8?b?^q{%F=xpmV4gZ}8ltDIb#L-!ltO*Qf=KL)6n zxsYW+mmC`P6JDj>qyGKv{~lm=N$aqwPut6nw0O{|ToyHnOCzg~S2`4eRv=3QRfclG zfR-z}X9UW4ZFZKYw7ljz(#3X`563I7UiKp~xF!f!j?7G(%yMOyw?VbG!WBr||HzGXjW&AG)Pc;~ z!f=*T-cQ8U_-9*3aBZXbY4TM~o5f_7tE?_%ykxolVFxK>)@mx_$5ij>YzDTx+rRnX z+vs!VKAZNLvR9fv4-Y7?oBGI3wzbuaiaWyJr^Iw{NWFNAaAr!nmk$FteZW1cKXxxH{js~mMwbS>(jg6 z`)-+?@bptpryt?t&YVH<=C}M~I`aKT@OzKxp$8vMpFVzCy666T)3%ehOPg-C3En;& zfw$!zOuTu(oA>W}_q)@p@Z)S0exUyaKZ4IY|AI7q=8QBR4<)w5jfauAG4S9+4`C(* zZepX#x8HGB`rrrNpWg6>H)83ddFiNsor+lnzsJLoGw={&Gu*H)OH*8Q(GSyi&O9T% z<~6%WyR5(dy7Hs)BFui^G6Lhqjg=cId^_|Byh(cVO*cUPRND6q`(kFoA@cTNvn{zt zo^tY6(?u8l5HlzafZeyt$E%xr4VP;={q%3A)v+AI;qQDWW)iHNo|`uhYfTT4o7$^_ zKN3GqWo)8v_%P?{tAB$BEl0^il4;YXN!xe%pNFMJzVY>Mri*@fQQCLPzUc^j^JCT3 zR+bwA-}=_+>4xjCm%iNuH&ljU`KS9IxE~+${4LskSo%DcW7y=Sn zfSD5W(4Twa=EUmqgL~pe6Vr6e4mcD2xd_W#ZN1GlGP7ji!UgFjEC+HMt{Y$R@-5Rx z|M{cxR_Q`~X!|<6h0C`zM~xbd{@f8CWM4PU$H%vCzvEWC_4$XiHNJoHfe##+Hoy#x z1BjH+i`p`jvQKH3MVfTHF+y zxX~u*RXgs0cwT~~%J#!8Ui^N;xUh3r)m}36n|o6%8=Z6A$^81H*n(V+nPvYw;WHNiKK7Jy$*&To&zWj+EO2zt@@k89d0HcHF1al~+v148%93Ll46PiVZf*oi#ZB zT4m~3vN0yn7|$G42o^GOCSF5hvWf;c`TkdTXJqX=bW?B zx#ymrUiZ4!%6D~^V7BAM7ym5%=YReuzEAZAIo{*B0?xx@12+#`MTBOghl@J2rOl3)Dd7wJo1ISDt@`_i`CZ7)kD&3<|| zt}`CQjJ^5k{(J9FTj9aTJKy!rG-1L7EWvkq`oiZwhvVzy^dF!5JgyDOUiB1aR~&cT zGS2mT|@-80jy*-xb}f8on%>^Oc?jDav)A)Sro zgHAvFbX-eJO*5~Uk#4x*27N%0XLIno6ue&gGnTL1c;k)bx@o0RW71!+9O(6U34qsC zoXxQR{`=vYVYqyE?R#J$I&m{^&>3o_p+t zhYq9h0OSt%I1|3F$4tkK(&n3OE-yE5Ht)?h-I7M&MTf%=KNMr_brSXi4?Ki#P+*BC z*yXrjbykq>@&*PE;~&iY3ze0X`s&USaAMKQmJ73Ngl#g*Ra}>f3jOi;Hv=ywoO;@6 z@*>8Ry)c8~*kb_7T1{p9{n6l`jRzFxVTq^VXosm-A~MdPXh1^%VgI-Bg0g@gV7Sf;$%{?| zVNGI+RY;f+6iQmi90CKWFvAy_fHF=MybvurT}#a##SNP*;}tHER7%vT7#?RJRIy`Q zX`+tJa#6wa$1JqG$SSSiHt>ZE<*ZzDn*NK2*dF4)QnW(WXHi&As3GSSyTbBffBw<< z8BG7Rsb~c@yy%2`Xc9G|;Xn7c>HwJyoBg6&D8YKsiKbhxK#gelr_cc)GHw2g(ITt; zW6T(+O^ChPV_hoBG*W!vMA7%@<3H1P*4bxe>%mWks(?;|O^+d~iL}ramh1|TLTmps z0Q2le$!yf*x!938LWo}VpdtVn+}ZwO+W-o81(9*nZi~Qyn5Aq(;jSsR%zS_-pus>< zISsymg9pc%n;;{rQZHbyzsrrtxn#L6CNdX|ycP%l6u2WQfsw%#TkO?V4CezzFkPFa zHl(uc9J4`XyqWFg@gH<)(~4$aSh3E5ZsC{5e>{L?=ZzSwsQI$|XOt;n-22U{%-*ors<}^H#h*5b0b7C?L{YwtU)Ov`-S03Lkh-2;raG|Wt@SS zZKGpZ#1!NN<*e`ZH<1af?0%HMTjYInX3y^1`BgjjZM*F@ec${3_xmuTLeRCxL7aao zA6|e>&|Y-HBJ&(K?)#2A?)bhv_S&oOoO92~{1T05=Jv+22z|#MGqrE09e3)(&F4OR zNV{+0!i9Z%?YUPU)(-Bw`|dkM|LNIJ_nmmc34QN==ezo5%$VMX<(-6&H|hGWm^riW zpo0$Vd(V5`(|65v*9m_X?4Ewc8GWyP?Q8q+Jq+-c^vz$mpzrHn`+6TfyxI53PkyQo zOHpCyF`A`)Shh#-d+$9(WOv@FelNT1|Fier0b3PE-|$lGJ+UND5)~6m)EIkLR76E= z*f0t<(7cJU7gRJh5L=8Qf(rKD6}y7iJ8BdaH3FhoVwYa`{rzTkcITYyx&iaN|9#(q z`|RxWojvW^J!j{!h&MMQmK%zgbja<;KuGN0zkl3v%PnK4PMsnWf{R@&zYwwRaoljj z4I^?hM0^b*cnb30J@?)lci(;Yh(Y;?AI3t%=lQW)w{8)W266b4BO>0I6g-wNig^1! z_Biy=xYwRt;;6UZ&h`~3eQ;U-OXD`%Y!ffN^peQ4IA-fJOFlj9@aAs8FF>R zfJ8j*_~YV9C!P>5x%iSee8h-hF4v{#}Q5tEtlc}P64`+@P6TW*OHC;gcC=fqKOy&aD_^2pew zOPBcIgAWKEgFT4FrE#mRwv5+adu`T3;>3v)<3$(!BOZk|z5DLFcyV@?#J1<*hs8a+ z>=p07e`uV9e&jX_b}?uoZQ5s_z2ngPp&vfdJ_fR&f2(-jdFQ3LRAW-FRqFLhs^{}R z(=z~Xa0KP50anJ?=bS!q>#etrop;_j;tk=3{x6)Oip@(KC{T+G!8}mRQr_j29vg|Gy~3J6|7i9 z!>V=93tgtZL2V6@s~K>LoCT1T!}uJH5{4G(%dR+2acESlK#0@swAfD6boH*{|kJX%VXPAN&4@BIQ6}{#7Gg0G7^;|U|l~y$| zlogB8|4Mx1(MRL<+ixF{+bDiIf%iZNo0^)Q`|i6p?y$oS@u;JYj30gUQT+b9AL7k7 z-yFB=uuc5O#TSdh8Mt>pi~G!M9l$9LZug?qr9_`-`X#sd!M9`OYto-u}}c+HV}_(x;LM7$snF=!j{1xK8X zd&bO}GvL=za?M4u=*W8xH$GD!j}PwW2gi*!-Z)}nKaL;&u{80q#~zOzJ9do6A9uXy z5CKxqA6Whg_t|5`#;8%F;_NxI<^FTp>8H!Rz5k{CY7YM%-q{F=IcH`y8JU-}=|T;EuL~u^&2gs63Ock8|QqJdin z-o1Or(eI5GA8827bb{~BxCfpejz0Rhh$Y4(chU7BJgZI(7$wsxdG&i?`N` zUR(w;N`bk@9}^U|nWWaOYt?X-Cvz6N#vo;;C#=;~r|zq6kwdwRxBT2Io>YvmV$7ZZ zTpew7?Hsktz2Q~7?vYVjvJblc_tpij9|WdBHTu2xLGh-HGR9f;%s5acLtc`wuan#A zS~lOCHS%+_y7^%`l$!QUk;R^Hfydai)Yb@Gc4E)?C|8EN%#&o(7hWJ)AP$2(@^9DQ_xw9$$Nycv3&2ncjZCr zMm&V*=BNT{o%R*7(rc`V)<`C2Xsz2lqE00jafvGz#y4;KNyN;VrmWNY!!(kyE>p)8 zt9m3>`BM#r;jL_@+h|()J-KbGbD=IMJrW2She{aT{U!ja)6YX1F3+#bh1$5&PYbX@Y`3$nH)P6lAZ8WL0&v#ai(yXgsAfNY$8q z|0G}rzDyGoop#tM;;RQkr?N=WKTfW2uy`{JR9|-a<@oVAGfv0k!%mpo&ja0k%Rc7-W?TBIpvhtb-%9hkwZfC-90135u~ zAG?^i*$0E)STad;$>afiFptLK$8XObJuuL{CkE#6BO9^Bn~V|3hU3K-|0C*vtu}*$ z2XDObFZtoT69$AIewcRW$lyrN!w!=PiuW)5GD|I!yv~F7&N%-vP&Y?-j;Ke6AaICf&!CAM<0D8CMdoY zO;pR6sd_7;#DIOk!6Yng5wY%c{Njr*;!7{S6yJIG9SG@Q&om4uv0cM3(S)TNWRM77 zMPz+@@4Z)S-+tTp7A9n@2^PGU=;_~ofE+V}uO2Ku zap%JR1t*&v= zn&DKEwS>uq^@sVPoRxi5NhTGvLe?KzOZ&o<_6fS*ALOWTRVpecA_Wx4*h-HUJw~G) zzgg5L;W~8u@tC04V8eLMIp@TW(?EDdu@j^g`!lp@)>8Yq+S}wbE|v2AU#Dd53ZYaU zb`um6CQQhhmBa62#FBNGu-;o<5EzAt1YWl}L9y+2+s2Fk`A_j{CY}W*O!y3gs?Wy{ zagWf+1Kb;?O^;JC$vSxOV0o4pgy#$7rx1H1p27shfjU9Kfp_xWiUIbmx85qA{`b@I zTrnfJnI0jh6b5i_j2msTG49iU#016Wxc9vybmXv*`%>p!I>W~MVuSn^|2N-! zBmU)xzs6&ZIaVgpMWO9P@f;cO|I_~a%S6UVyuhLM%xZZaKjygOFhQ|-+;z9zV)p|N zidd>CB3G3>Gjzc7$6x;X*ZA}^&)~i=UF?q;Ggc-q4m|iE+~41mXDUwObHZL?0TV3W zdh4y&9uw!pvyG=>B19%AM!qJVe)ZK?n5;T6cHD7?c<;UUi0}WGJ4& zd|y%Y<8f%qS!cz4_t_`D@WKo7{KG!mVheo%rSy^-(yI>Lw=+?iQr$V9Jb`HYBVO68 zN9`(Lc$2IuGj4K`^7s?OWP*anuT4-S#il?@Fq^Dtw306fb;j|`I6wRR3st$AGSfkA z{#s$WggsZNs#I#1vMjG0eqW@DJ!^*PE7yN>*eK4Vk)+J@gtfXq8x2jq}~EXAR+EOBH(k?jk=$q6Ot zO)z;Cq%=12qEH1WQ&mWb(R?M2TSXpKG=T~6=Bxclp5|1Kvv2D(7>Nv6lU0_Tq+AP| z#jU|U`O#SImGn6eRElZrSW0k&4>h)nJ8_N>BLJ_i8SCSTB zq@L`HQ;9?+5Y8^B3M1z7r^dhD{~A9SuJ{G@a%`s0!BG|0n)uLS{8L+0#I;2bNfCu< z3IPI#sE$>Oi%8+BAhMrbNajPzDp7vNs?^n zF*!TZgxTz)qV!}R^?Ll(9u<+83l$^+|79~nsATm31QxmZxc)#k+ZRDJKF2H>5?wS4 zhWh!}#0Nt)Z9n~kzUJ{~L+Awlq}N3};!-J& zRD4z0v_~NviLLSa2b)HtL+&#|@rS$YjAByLfEG^@QVZ&;*ay1RyyBnr1Mu-rDsz$e zvVB~yTJbo-W;hzJ9Dks-82@ZnRHzby?3>hzS|Ml=lXkNI6Tj1?pxZlIO#!XB_=B-> z{w2X`G0*$2h8sc%gv27rt1(oBA$L@6s`>^zn{N$Qp~~K3-EjU<2C-9fX-^cTWk&!A zk_?gYMgzidqC$2mM?yus$@vH8{3p&86*OTA+;vCLMyteM*~zS#ZO=e&?MCBG9F zgf$kVl|u>@VbHcnTU9`fH9P)=o|*Eo-f;fa?YsWZYT0?*>g_ihnNis)`{Y9Ie>B9N zmog5(j}s?_&RAy`KdQ0z?8!)yy|XOwq~h4ukH2gLw7UIdv%-Rkx^K(h0pa-67e#RY zHI{f9bk(5nB9;g0eqeX`G+*u;mFp5M!1o^}$+AOt+;JQ7Rdkc3ftXifx83(Z+~J?;W!31!U_1aok^d2pL%M*AV^?t z2fUGoPyY`_GR;E}2kZ10#*G~pF71Cw7>zZakNE3fWyy)jlc$Ei0Uq_^`7Fk@Op*jbk1KzCw^B*}L!2B^-bywU%39*`$HIWNRhsWOJS9R#{tkL4<2ap+KASRisw}lMZtR#aSGL?I->I@bZ*JorhY}_JgMJQz7A{L2;8fY$^VxC%Fnj78bUab|%-8Ue>J9R zLsemqxG4<0bROxz2vQp0@QIezf@n(MLk7b?kgn zH_P&fhxRxW$;(&9Qb7-g3$Uc&MjLG)Wos-B#Y-@^AAk5hoPWWE;i(Z%g)6VRLK1gl zLLr=jykft`5>|)$+mfvMI5(0O}|7E?%Qv_BTR(sp@;Si?YG?~AL6Pth7U#~ zN%lEsOAd;&kc(pdoFp49%g292UXXua8Ke&azWxk5blgE~GI8~LSVHmcyY51+jb{SB zuEV{(M_8RnvB!>4Ym7xoAZXJ942@(??n{pK5cssfG$@08&=$GeA`38TfBt@d`0)?9dKLxZS256nec^XpsMp)CP64me6NNcL zH6uFT2VWmYjLZJjBS6LV#FqhN#+CnwhgTvw#(4q-MY! z?8KWs!nLi4B!c$=_ED8vNrQ@(+b@blZ-erhaa6$~$5v)FfF;SWZX=t5vEAI;uc1&~ zrqRd8v{=Z5r|c3r-O7uCEJj^+n%PNKFkq|cDk=&j%4*fBk4a;N$12VmPgPj&mW zWs+job`3~xvkK?qNDqqKepBo`YG}WiYbcx8ryW7VPX0k6$Dd=y(~jgUpu&t+fcw$7 z8Qycxoq=lv9&iAXWY@ML0!YqvW5^V!RoI?IjYCU|P2#Wlr^iGkB2{tJM6#GmbdN9_ zAT^wS?lCi;A5PXOci09W=tQ};larenEA|VIa?pF#3tkb!PQ;R|5S61y6wQW9N~{3d zUo(q+>aJ_69_!Qy4s*0--(;<<+fULkLk@0iN7cTd1q-Sjn?z9&_g>6(b~<;UzCPsb==y>31@!(@>y1}yUsc9JC7GeYlEP6<2h zw4)5d`qRp|OZ$PzX8iFEyc4Xull7QvlQ!V3zc6sn6*@tIHGL1j+QDVB_yYqS-{FV# z#s9b{JURSHNesDXmpzgAdS=*FCMcp#P_%1@1cx)iD6GZ&7!qv0`R2dm$2+;jtyWbcE7l!u22pL`nj-gD3J3*-v8 z{nlGBQGq-iav0hjDPl0-6bxRB2)$6YZ@)Dr1_q-3bih{zp__jmf$z*iF=WW#@FJ2h zb5MK__&gc86As6Z?9uOk5N^KV#<1!dt6^PerhC+IbB*V#1`i3FBIzTOesaQN;GoN~ ze)Dkj*&<;PtoO{pPI_bq7$+uMqn+zv(6|o}!}{INOS12cF`;tC-_MX76w>wX zkWuAxWP*f~GHb5+`+zs_!zwGS9KQVKD-0Aohr#?|$T#u{26|?OHf6MQj33quAW3)^NdV6HO~GWw?RVda1eXtCz-p4T=T}HH{#)d`*mRRk!XJ=lVp$AK zwMA@CJMHhtDKQK=I2OWjfuz~#`ePx+`*X;RGAH~IxfxD5<7xfuCh zn3VD)@rS=Z5g=JC&+S(7Q!1xjjaA~E#A8sbl2f`UohPeg+DZ8$+;4ago; z?dv5_aaax|^g|vCt`Yu6tY@AlC?Kn9oDU9E)gF;-2+HB*TEQc*djBzx3cHiZe%F8o z7DcEIXhi>&%a~)H%kK}dpb7vs1g?+?i9AncNMq zr^g>8vvWm@_*4!Yt*V#mrukAA5l#|mscPRuv^4v|26igqrgG|&Qw~bCCd?Vh#L8!2 zg5oKhvz(yd{cpt;e*+nR{3jVHV4ufkGN+Alv1{S?M}rS7AQa6uvS~0vJC!YlM;?1T z{PV(#kgR!wfMxcOgW{LqNQ^2db)I5avJVp!S4d8Rxrm;@g#0zg$F$t? z%gKueOJOh_UuJ}7pMFLrF}ig>5HW@W={~?`jQ5dP`QU>OL4xa5!wtAUzKHu!pT6fH zU(q_@_~Varxis`Q*8R92-*n53xF4xW{g>`DVD+UVtRnC4b`& z$ai$n1s5UV`7k^aT!nkzT5{jy7Q@7PYuxX6?(pReo%EX%W?>Rx#b5s_@P*C} z+i#Dym*bCJ@ZpCaU{U}%D1P`coQ-2;-Ss!nXiKaUxAj$yKh#*C0D9}~x571eZW%jn z9G-;^k^3xRouC*SPDXBtcJ10>GU!>kCmw^o+OcCtc_D$%BTFu^WaxkCrQv5k`xz#6 zP7802d@b}nx1S{Je)W}C!XQjGEP&?%e8Czv+IYi?ZRX~<|Ni^KjW^s7unbh_fu!bl z;AM&%Z@C3|xef@29ey|?qQ^Yl26WareZq(*hK0Wzc|_Q`Qzzspdoy4esqhgdU=~LH z5a!zAd0>HHSL3btu=JA4;3bD$1M{ES1O@Yg;R{*OoR5hVJ4NP*r)r763t^Q8ALn}g z$-Wf}xj$%|TRLS3@@0tumk$ize)A1t)CoDLP6@4RK9Y}~w)Fdhc|s%y#e!|{{BSak zQJtVrx!S(Lhhi+9{qFwjWnDjIkqpp1w5!x>)uPB)@;PboveVa zUMyCj#Ns9>6yrrRoB`9}5CUQcPv{J{(yzu>oGuv8M=7Nky$w1LT`>b&jUmAo8Lf`sT&Wemk zJHDigUr^k0&s{n}fj11xrU!YEa6{rAS<~Az{#j`eG)PLTWIh6l^rQ@DzyusZK)pXePOVCu90amybg!-#>?zvU$ zr<&4~uh5lW_CGQH*@~!Cy=o&+$!UC`=}+ zrMGX_P}5~)wr>D*)(1_67dyUHz7vvUV`-#tYA+cV#3o92GA5=Rd=T?|+E5-_{PBY_e%+_uEy%&z4#yV2OahF9Vo( z`JiqG1}ujlZ|pyS35x6SqZZ2!;El*Xhw&3W4SRLjOA=Y$dfTmdOMYSarDuaRp%*52 zp2pxsFAVUu-)8GD2=+(d?bJh%9QlB5-AkJxIex+?;aVhs<{&T574zbPg>_Lm=vR}2~$oc!eBMBU>|IBAPg*B+d-S^&u35qUZKP1UsZu#ZmAvR%BPVTvxd?E^t z{W|(x9e`P3h2_Ho4?G}=IA47JMf_Mj5fdsKh11VOlI*3I$?{n>Wch~a6<1yr?!4pn zu-k6Cg~O1PobN0Qy>Dn3a?M~#V7W2cyxw~2gmfx!~qaad~5=l)z?@fEcf$e1J)qN zp!x_TG9MQ9!XVwBFSa;uTmvG=AhlXEa zfUNiFr-vtp4G$Y*;(`gXnK)hI!%mYR&cX9rD|!32J>Kxd!U8f`a_PmFAc=P;OlUDF zCAV6XbCC4;i;Za(e_>0~g)R!yPM;}QF}lf!Ng(^2+cyluAl+h1EGm;q@&$#rpETM1 z+xI`&Ch`jkOmd7s^6z!mT_5qk6ba#Zj&M_JcAY4gOd=|6<{8S`{f7+Zzv(2-NjN$w zwke145u4!*RC$F5*BE`?Y;yf`*J)E>rD-aqw%pTmEId*K90E+SwGIgp%WaZXg2Q`yk|`ZKokryLmx zDNYDhN!A9%(~3b>ddnump7ZqVfk%pixY`DSt;crJCjr;L0A)&vEog}^pofdAo;pTAdb=QPWcmacxuE=K)9)0lPF!=%I&0Cwx8Gb>FVx zm%qRl6i+`DuEQ4;%i;@)9z704Qs<>GID3nHY0(vv7~Q*fS7+0H4(@O7e(-)c_@INs zYQOs(e7X)3Urn& z3!87YdARU`^D)POFDR1K4|u7f?|J8j7oK}TUX1tyo^yubo^<|s=i$D)G`^15PLfZz zMyJsV&!sj3&k0L{&Sc!Hv}>2NQ7d{1j30dXJ|-s44&UMW&IyVQ)?c4QY$e(T@f_uI zhaaUN=l9=z8wTK|6W${a#v~Y)bwYCNvEiocZwzBT`nPQL!E-OGdu-s=UzB7We-{@$7`9 znZz|cHt`aROi&-7rCcc8+**gxKwb zXXAe%!TD8#uaZd`PJsO8*S`Tf2C{K;Xw4GFc%R@)D{HL2x_teKr5?k6m|R(U2`0&Y zPz+d{XeYxNQ0tD8Kr#?i2eX?+Lx!5GQh7Ge(=!ZU`0!z20KT&Q7B6-nCmNDupQ6qP zlXCKUMIGM0^)x9rV}jxqd_l1Q`k_~^laVBQGu0t`z1k5pdDR940d7X{UJ8?PDw|oP zfZ{z=FvR*zDf9FD!|PQOD$6~$>}JirQa#-;EQ=147k&JKBCr0G0b;Sa#jG+33rS_z z^Hh+OB{6M;rEV)rrB$^OPc|cHWfoG(sG%6A%k=s(sm!RPJJ+*rx{@>|9w`#a{1k_? zW9ok9Go?TXj&+lFGy|m+x^bsCq^e;nI)tB2CvS~6Gbin+3(D~3v(?QB&s5{FUQtD2 zQl%vR6in-&DVW(zwUl*xIU7pi^7g4A=^{0Eo!24s#0TaEA0g-2O<%@|e6r0#2u4k1 zGMrg0E+)o$VpS6B$rHn8-UOIPqUUo+>KUobshZ8AuBaNp@I(u96CaX@C>Fltj}__o zmmC!5oE>jM!k*kV@;Dh)g?)C+fk{smHvKAOwX83r31D4g-4eE1xC0?+&-~6>MTIEU zW+Z%4SpZnNFkf}^DVJiibVwk9qG5&P;P!~{NVx4FF5SE+(OlWK-6i9pM!kdkf zw_qbAlI*g_o=B9eiLYlMuK<%|?}VHPW5sMTPg(S=DeQIx6XIgcteI`0S@x+rP zx$@AVnj~3sP+W^7*&UGd5jiNp`rUWmBTkzkSHreQjQB%pv--oP%z*u4jy@Xe4sQm( z9*$EcO^H~JL6TrzJ>=@t-V_V)b8X}!k#Av#9XcY3ZEN<$J6PKqlP$@B z7S|(jH**A>hxMt)kN+fo^Yu4KAbl2cP;4LZHlp-rj;1y*hgr{2BCS7^f%{AQGf8&) zc$Z-dq3>yA(E$ugvNcxDs9U*H=-+ssKV*736kT@FgWD?JtZ^CgP zvg+0}tRwyCBacOVnIVZrXG}+&((%MK$C)SNZLD3*jbaZx_>kle>Di-a#L{7c*XOl4 zNbLQ&tVhmdssmHL5n&MDN`Dc@{=nBN5tArLV7dtstdEfO$ItECPu3qFf<)XuAR)WS zvrHO_|Io|iwD;V_B-vXc*>^wRHNKfyPik7OH!w+TWG5Ddo~r4zEB2{{v=o0<_)l_B zY}G#Hpm>U0WlOrug31>%>ufg$mn4b&VaTI0Z3>fQCsP7sX~FaQXPZLU>wgsjF>@GD zqg$UVqhq4tR5h&C&8C+X9_}R2)jrQO69l5#A4HHSyHciVCiyyfostYnXOXeyLz1Nx z*($n#5fSU@Dlv;1IkA`DE6H=wq3U&?CF_@zB%q3f%XTS#BA>*w76Q3KxKuLac?7A5 z2DMor`H*>8PYq2xnKc8+k7S?d6z$@l@k)tZrH-oAAuF5ll?q~}Oqn$UNyZ)j>M
qCmN7 zKv5}>3t|Srs@zG&MC{lF`ccV2(E-<*ljDR>Cpc9mgJ&MxQ(hnWMm+kMqb0F122Z6w zBnQPUNRo}*QWyW@A0o>f20QMwW88F;O(owEb4z(223;`-Ih=Nod*kPye*IzHWFM1(K_xqzY@$-WZJQ%mya;ta(l4Q@q{na{#@%tL>I2ehw`Hb?$ z8?Vd#_vM#gjvYE|j|9;@#vLYg%R0r*^U9P(+9e%r0L;=cBS97{cW z9u_h2Bzd)pKoX`&x$Y3q-yh@0jgMO*ht*CycLEMua*L&a7JrmRDu9aQy&eCi#$=5_ z=%!eY3uB>fwP(x;VGO7~{`lh&U-07C+B%+_l4Lt>Rgj%T)xpAtI=)bm#N)f~hD7H` zI&Vyw6+nh5L?sEA@$Q<~U|(bsHpyj;q9AE8=Tb}2!rq8psaOf&t^O(y7c~i3PbF zXB~8P6?TZ9V&92T537o*5sYZ`2C_rUuYD7-(sWDdkUkE^J4jUm8!KuD^S*(1V}OWid5onBqk~6UdA*6#j4Xswn9+Q)m4i_&^cE%D~7Vt@V+dx z0tww!1+3SzK@coBj)yv|YNfYpaG@2*Xr@5a*U0ItR95@Z#G2zX(8{dW0fkl|M;1^h z^L71kIsjVD%DP_G3#~wPKA{-dN-t^g7s1TXQnyspJMIeJSA>T`_gK^RjOu zO&&Z+GP3iaAiZvZjXM!sA(RGkLa<=_8|Zo5#gQCS0t==<&a{9+X&^g2dO5lb<|z4qEGKF1$uFmmOUSK&wMmhs|?E|SEU ziCvm95(&F^#6$`QHE!YnJT|uLm}8HXB-tEXn27;#Z9fx4&x~iB`FHsd*$+S5nS_x8 zF_#a#9Lb!wl;oROQ`}6T8aY_f^Ofbtxuc=p+6CDG^6M;(=uWRu2?0|87R z{n=*|;+bch6}Liy#*=%U97n(RzR+Jm`wvC}ZVm*Ee4T?4#UPe;ihcX`jT>*Yu}tzz zN0MwK%LE1b`t7%Mf`ZC!LV{9Gbo4o=Z%#N$Jdb}Y-yj1VoS+yqQ2Uw#E=t?1LoPOh$?&ry{-r%B)38fih75jSMI4 zIC!)%l8ed&#k8q~Eyb`P6f%ug*6Hk1p_N@`@iO9pA*@u%WGExEJORU6Ss|uMc0B`y z%jBf2(|z5;MSTA3lV1%u$tw8&|NhVzS$}AB+zG+>SBb)6C@ZT`)motyDCXU>vq{JH1$#mfR) z$Rq-CBZz8FqVk!71Las=Pp<6~KA#Xb#5If05q5nxKX~8cO9Y!?lAQzRGC=|2(<4U2 zgOCXNcur8f`;OeF@5Y4l*0_G2*1I<*^Z8;y@tiMx;H!za30|i72PQ^l;C}VlXP-rU zl_5#8ISJsRCHHLPKa%??zCetlM~@bLd{*hyX=iy6g7*{K_#86^&mZ{eE#m8n_&V;F zvvF_5mxOZP+rC4G$lS5=rvLyz07*naR2MNApR(aO3Q52xVPat#o|(M;!lM%u2S}3a z&hiq1rx`r{lTRaZMMNy?Cw-!J*df{{FTeB>CT9K|cgMs&@zgK5%%fu;ZMd*H{BF3Mf?rN04EemT<~>d#8;#d`7$DMD#!~wUwrX} zBqrYx&uaHpY-j}d{DCi#kpLW%@+rpS$A65Z;agy`1rrp+6%#6iy`3^jYQg}Xf-O*1 zkN}}HkR8H)e>oIIt@;>}$?wr+kGM6SN6tO3Ur`SR4J?-f$ycZrEA+maC@ONkR8J8Q^wFJ9q6=GLE$O8DlyfXlTACAynUykOi(yv z_R4ThkD;`z5m2pRs_aG;KI62q$a-IeT3DLss}kqRZ$y-G!e!A6sE0y#rV8012{sZk zCIdAQ5u^2H3<-zAs8SYmO__0tQlWWFjk#38)ZBhzso87jkF2TDT2sAsHnT5yM>NnG zth1PDWiMTb*f=mpATL!r#}Gm$`IFg~FPvq6=z znU;#Ea94NlVmAGpv-`vw`Nl+jGvS8P5ZS}zX~CPSMhq!6EUL}hpIBZ0i)a?lK(=2n z>OsA(SneGKpxAe)!dGP#Joz$j22@8yu-s5<=u}OzC@SqQDwWUbQVMfTU12cD80vMy zS4zaUV!uK}#Va9P?~lyy85L;{6s$E;lOPCqOimS7n1Gc=VqI{Kkn0YY*gVF=rjEMi z+Wxw06(z3X1p`E`9bWD0rhRS-3~v- zH;sS8k4z-M&hE^ZHa+gxX-D~r0?7?U^`)2f$6JuwMAq{<^sf6NLB{Dkp6IcEsNzl0K&x z&XK_uBnHPBGH{ay;^Y~=_!3Q6V2OV?}1`9Tk z2_nA9XWN@^z7dZ&;;)!I+B-guI6_1Ql=znBGusU$RfS4CZ4JJ};XUNblBPa*qtxltIZ)f+4P^U7dd|dTD3sFvgl_7C z(MU{kPCP3#oUvEy)td3B$|f{JHI#KfRTCd`EQ6sB`-dz8KsVlN0>%VEnjGWMZAB`9i?e*IbPO*WK~8+(z;> zerFb&wqS*0m-7{f!z+o{r@t=?klpFjX?n4 z2k(CXU-riY{I2m;nG_&iH-mn}{fs=E$hjTQGT;69U2Z>L_PF4}3-DsamKfweL0%%I z^q4VY^$QBvABB6eZG8SX51o%WAWw1mtTA- z@kP!oyf}gh3iwsoENF)Gbq_;JWkZ!V8@GZJ2s?ZJCvTx9$vOh{VQGSbU*{nCcoM2J z0xc+q&)^(+Hs%DyZoBRlpM9P$=u|hy+Jum)g{HyinMBw1jRUs~uIVr8|cylj-PHm)G)=%QCw z2$L1EXb}rdr9$LoCwwFs14>=sQj~cgjNcZ#Qi&ud+z_p((G|j|J>$V=>s_fn@W7tp zAT06~4$++bAPdOkP*Z*n9I7dNvYs9Jd~!$S7R$N=LiHs|R^WI9OpJt~l-%5M+yIM) zq^4AgecIuc3auoyierTgteGNZGv+u4u}TYWD@Tpbswj26ZEAu()Ty?t5SjD}pYzEQ z6$#mtIUfY2$0pVK_InWxW3~US{p4c{WoxhxL__q@UhmzKz&ZLl;!{}0S7s9)v-w*fP#T*pvBuO^8Gg|_RKR_gXBAhu{1I-Tptb+A zvY8-|3UbdaNf;f91q9lk|3+3B?15;h{~LHI^Il{Nh>9Fa|i_;f*y%ouu3~jMD5d`oJ=gK^CE(B_G_Be zZy?j*Uu%C-Xz>$a_6_e4W}mT8jK7~-|C@apFxyrp?V=zzG3nBXCJqyjlDlLx2^l#n zB3(KnX)*sz&XoZD>7U!uvi+9YPffb*~%mW~~m>Q!+(!l5%Yo@DnY%R}H zCM~2AJ!2iW@Jstamwt8qUz9qv#s{LhE{Y4QRi)&PvW@+pTLc#E8u$i)JNnf>ZbO6`-P#RPb$(VjZoqcwaf`|CDtqEy3Z^8 z&o5+cX40olo)o^u8oA$KJ>xmEW`~845V~W>?ZZ(=9u<~addcKct1xr=Oe8AYJmm_Cexg<|T--UiFbk%=ykc?}k4jx#gzFWw7wVi-fn|elxuC>Z{@B zKVL2!cg(R!)VL9@fvu5r@au5>_1A@a@3~(RYOb^HI!KVbaQOJ+kHa)1W?pvrUqJR> z$Z_$1LKh@x#t)k??&I-T8=3jDTA?p?M2>@BhjHV_A<6e!l2d^RCz(T|8xk?Dw9<-U z>Xd2Wj=Sy%x7>15Sa89G!p0kJ99CUz)$jw77QcpeGoQs4n{N?LKmG4vA>_@-XS_T9 zFT1RNxchD-$=?#G(eKa=+VeA%Ce2@^g;;_=rd z+4v4S>?G}9XyFA@4vOco_V-C)(S;TbtNgYd>@N~#&X^It`S$DZ&40cMQ_)xWIW5Vo z4?gIiuqoCpX0l6e-+c3Jxa;n_!gbeP7k=}bUk40IgtnmlYt&na-}rD?kHfHr_c@Zh z`p}+-N>cKb+x<42c1rK?!i&$zn%!$6&qRj~9m1lEE{4RYL&7Vsy%Ml|mgKUSgruTx zy!Dpk#aLyPRb(CVU;X-5lFWR~->(&3c>eit#Z^~^pD(vO)+OII{0_&-teLaI%ZTYq zFTE^2VekjX%BqM{4ENrLq^39D8YcZXIc&Jm`eDtr)(o@I*4JNuBVbZQY#wvmv0=sE z{6>;}|Lcu6!x1=k87sW~8Gg3RG654=p|$+z!1fo>+IOy!YNO1l5FeJ%FGF2=q67V*#DgwJ63c63yhD_J#D90O?x(XwPj|hKU z?~lkqaT#(@EM$j}@K^08N3%i#&V~d3o8KSS{;UQ-%l0!@@xh^ZHRJ!e{r(_3IOO_I zJ6bC(;_1d|p~W0#UUqM#jMOa0&M{x5=gXG%p`c2==Q zLp0!MXlKsJTo4H3$g({`nK=>ZVMnlaPPm%(Ko!lI*7fmUsyV9@HI4vR70Qr=Z&YL@>IP^Zxz8a55k{f+@Zg zHQ8AJM#!L^(T^bq#f2AL7~-rLR$h4}T-z57GiK^N<*Tp04b!Jihmjc8L>`ImxJP1O z9QTd3q7QG|BVqBw;p!nn!Y)Xdi~(hN{=o|m;lBIsle|>BcHR~DsOjOw7hei9XU#;8 zqs_y^k3JIS%$$jP(Dq@sF1v@ZW5xw6RTP$AZrO0iAw6)d|9zM;bxL^Tk;lTo0RxeX zVBxS;`>n#tE3Jfk-FHZ;{dB-&O4u9utd2VBD0w!(;3RTKy%_o-_Yc16L_US}h5w5$ zzYO1f`z>-@EgC*W-m5j&Tq_)MD3WWh(k^`UpRdF1cit}R{ja=IyMQHxaQ~Yf-g)=k z@cj?pOYY5QpL!--+W!(sX3gAFpW->^wA21B&jIWFah@2?c;sHw|;eI$WEV<<3 z$Z^pl-~|r3zFF8)_k<5V_)w0KiHIFu90}`N4hmxsfjQwh+%GS>=;H7-?(1tK_P<^A zcVRYSHtOwBVcfWn!)mLq9u7u3w%TF~u`_PWm~j1#H;0eL{5za@;tBE$z+(f;Na9)K zZsZgj5SChIX(YHFD)F?l0AEyuf8l<5A(Dl!zrlLw*H+;ReSQ5HE!?WX%!Gpu_C!fS~*GgfNjW-F)qW?b~|0xolzYy?} z4DyKWi##p|$!&E!;=ccWT@l}<<(Qk-?~fdJ#6ZrGgkYf?&+dHol=DOQxZ_zmx8EP> zH>s(x{)6POVMwfg`Q_nj9CysqaWe9JIPyH7KX|?|N&L;XAO}U81&~MNq<{&(27h^9 zU_aXL4_>rVGgBK~0fO>tQ@LmRGrs{+a+f$#y+kW9qA-1;Q7$JMjKtxLg9AvoC-z8i zmW9vg>JT-MSMUEQ02dDu(b*?6=rTdk#CjEFHI!@tH@nM8CDDydaPzS0k-P~%&oWZ21iN4I@0pqjM(W)ra&~E9xOh|wf83_#@PlcZ540uu#7LW2BS#7?z zRh8i3GB?xWiHTV0+^eFT2k0383^;EiP+C=1tQg+PLaB*3hUQ=~B6P=OnldT_#V?04 z7pf>%1DH=@4~t;plOn+lc(Z`gSgdS9Qj}vzV8#H*{s?oXIBf-Fl5BixDaoGpKj2U0 z3X@JRaurlz<_)tOOh(#3-DwmYmG|AnTmrMl}E4>qw@L78*~nJb{uh;12EDLXW@ z!DLDdb60BGDrFW)CW|Q`*nTjtPuy}S-Qi^_@)bZ#IEE}#IP`!CD!x z1vC9+^5JEEwoF)kwbd~RunEdlC1J7+98wjQMGD6vu_9hl482e5jRF2&g;{fEg=e05 zHr$IJzWiZ60}~u8V$i$u&O3+gw%;y%{nghPDV_;fPD_3e39qXx8GXgfaMy(bI&~!h77qn ze1adjY};z9trqs%XI~`DUH}OfA3`F=)#QisFYrT}5u7q<5(ds+#Sh^J!@Kxlja(Mu z!^-X21$=3O#NRuIrI%Wo;gKyLaT4+vZolnz{O}$gwr#&n=!B%r%b-2dfYeJA!s~MZ zOF4y?UwS2Asfq9rl3U`7kFXSey#IcU--iy{cf`cW#+b0t#K?FP6axuwhD-k0Kg^0V z@zWVUo?D|AVY4*`PB;m+5(eux*l2^W_F8|CANy@vV<0J=t9T6!lOGc#!Q>;4JQ}|J z_M5QCqKky})?F_g@TdJT0J2(m6azx&{uAwQ0*lrLDq@88gGkk*~@i)mv}A zf#jUuhc*kel?kTy+qMro?YILb+kPc&XP^D>{r4D%xd#JIw_&nsT3F-vYlJ}q28GrO zw6PE{>Y`hV1Ot=t0=KX;b}JoQ6{WRvFiI=$Ik2;pOt96J^$fr$#CY`XNIS7&N4|h-lh*L zV$yrQH)lp=k`We+a{m3{@c#S~p+rj}?!*vkefPriFXF|4>v6rA6c*t9L$bl{1Gx{vkZZ09BQU@{ z881RG+3_ZuZUTCDB;o!aOqAaphU1<$b?UT$TuEWEMHffH>IZQzSPCyg>=8J@&HLJ< z$rHotBS(h&?zuN$HHUyDl<-2rBH_2IwZlswI|nQyi2LJ08h`N5nmsFweB-t7=tGZ& zF&~e`~^Dkg9{|kAU;O7{a-vCLb zciyFQ_`_Oj<9T7h`iUW-=gSmWsxn}iD%@lL64t<^f3s~So~g!;`6xW};6u0%z83hR zNgGTI@%d|ut+v36AzRDj7blC@;~emR@Sz6+2l5Z;en|KoCNvmCK2LE{g)dZK873sL zK0LH(qshQ&_aaPA{rle^g=6p{$Qo;`uC74l^Dn-{bIIe97<>W}RL`0@J1qV4rNjE` zt}o9&fBfSg<@u4u=incfWD0lOc}IBQp`qa$OlR@Zt>adDy=A{0k(TJ{hu0 zh5-ZmtDn+xeFkck$Cw!nZa6XWgPCIfH+zIcPCx3bk7?TZull3JY``ES$^Pn_uybcj zP$0>*(ZsL$aO5UTP%u$^TRiiejD+-jMoOn79?zNe+H(SThT65hy(n;buBGkKJD zDWodG+_WGQHeskSW`BeMyClf!{A^TB5i6aHp{id{WCkN=M9WHqHSx3HCVnc82LbCu z>9LjYL?@a$SZ)f%m@$CafvA;D@Qz53TUHLcz~MSADue_mAAlw*Wu+o&^3F&Zk$@!E zs1G)9^I^NmBD)j^sTU8DqEv{!CTC$jo2u-pSA;yM2Gdce)a-&NP#@SYMpY4Y7&wWS z`kw{ZnEEE-O>{WDb_`Hd;*r<|7F=B(^>jis#Pz?iPa9C_-;VGRR_o3O=X(xnie)jM z_@!{N87#+`p?nt_uIRJ=A%&ZCKnfV^<#=LEH}KkpLm~@oR-A#1BIbQgP~c%=CA_WI z4L@SJdA|hV?*gVK13S%WUp7Ku3f~J*s7eG~#0IV^#}71FYi5^Y0SX>2Sj8Z$!%$f6ri z>XM@olnnqB$+aSrjaaU5nFJXRCncq*oe)wbRcx?u($Y-S#X0K_G8Z0$tgKR5t8c2$ zbwTqE%MLls8O%WP#GNxRSE$8y!6gVq_Ms57fS}4wl^V5GIVMchdtf+Yua-3`tK95h zab)ePa;)W(JawAClBk&9tJ=oBGhjrR@DH*15D`}HxTOO`z@oz-D@pdTV7xOc%>wBc_vZ|ic3~f z(F9jVp)NiM-ko=b&j$kdfXm|mEm{yoO{h`cAl!M!ox>DPP~Zm@zou9egU4*5NTvIq z*r5h#SkY&Y`5|&irW)nUX-bkmC=!VUUJ1n8h*RZ@4hEr>oNGl>8w^A&h(X*1@OCzT z08^Mz7Zsu)gzxY+Au)?Dv1DkQe%xZ+XMAliB}|(>RqiVbEUY@wqX$rbRZ9b3`i`3H{2Iqc+uP+G%*E(7*p}Zi40l;u>c0C7lfWg@r6KJ_?M3# zY0ezcJ8jx@8RX|6{leh=8iOLpoq-?oBg2K~Um$NBf?YQH%E6=Q(`MkuIrO8gi{UE( z#u&?7!Cq?&{w^Xv+@;ANOvfPobPS+Sb{0k@c*o+P!@~H|gl}}(^Lh@p1sxWrO_?g~ zS!kgJ!y=eq;MX7Av`=Xv9nF~ZnSlwQY15~moTUt)QEnl~(H`TjJ%Y(14BAgbtmH>Z zevSea6Hj~wXv?@kW`VYl8 z8;Aqzj4PI02otAF3bXKa1~%XR9ssR657Xg{OBIH5w<*q_rtYs1MC z$Z#NK=8Rd8nS}Pv2tQ&#gU80Ai!T-y#URrni!25obl`-Z(+4a?Av)L>JT{1Li-C_t zVS``%v~GjAN>G&_xhcXF9Fv^PSpbt-%V4mnvPtM=Cur>tf{i%CkcWn8TS2230`+up zv3??B-*fQ=#S@Q(CDHa1PdpI=c-tfeb$RIV_EBT}zQ;u8dHv1}PY!=Fth4UA;quEc zLBVs2bjP0#DzBmnQ@!zvaC7_SyHV@yrix~dbXDES6qw8TAr2^^T!a;A#*%-max1ac zdS*kl^qJDo^8obxvSu2}%L%t295#a)5EZ7GesCj|o7J-&;PmWtRAUxY(aWmJ83VLs zat1QWwJMAG>tKmPkrf+ehh+|Ax`qg=@uSQN4Od(uhFey_N?~J7w*TGl4=9v_swg(d zN*eL8P@~RuI+fdzTr9yUK2ssLeBA_`jbtFB3P#yPlsnrpg90?k!T7VzIt!DEBLbE< z3`hc8zM!BSS-F$0$6Z!MjexS8*_o86m3{zDHevFd(2gp3)rQvyoCFdK={)4`LB4=6 zmDhJ%aOGY@dG-nZTH}w`?FDc@S`cq?H{4TTjQ3AYH1K}C7{0#XhBa}*WZWBg{pbCY z_a)wEI1$17?-cNJG9F6-g~gX#5)<0EU*LYuiFV%icwbb3!sk{Uj+QzK!qrui)Q0lV6nxQz`eB%4iVS@6}h6q48$Bc zCgjT(-jMlx$a@f{V{A7Re*cytNi|e0;Ae9N>Ql%sSyY*7oO+qV{a8E8P2 z$A^{FX?m)h6~n3GISjUvEZ#nA7NJI{U<#k%ibrM^6gjAkLv0o9r3G`1zj&xpqA97f zSX8x}PV)h!yLc9_6J2yeLyP^d+RcWQ43ZoZRu*-PW;)glnWe-U5-iHH0PWhXge9K3 z3ywb=`QXCMa|B47^(SaO|0OQ_A6gAeTMTK83&-+%|N;|V_CD96dFP(%jO358JYZajs z1Hm_MzmU?-yf(E&8*F94B%w^mfRiG(^>MOKjX01!chV#{({G*|pOwbjP+C6=BX`_X zi~0b~cKrcwv-`Q(usBeI;jARCITjppv1(4|`N5I@bot48MP1sNjZ!M4J!-8niNd9n zRF}eN#>+CrKV7FYhs8}eDw;&N;r=h!X1lCWHCZe&+MYke6Y#jBGx=bwR?-S#+w7RI zdbKo87cEmHCEE|HsRMi;o1O}LLV6rgE&E^^DZwpqW(9f_&b$8;pDM}6Nmc`li(KMj z;q4l?Cp*1xt;(UbAMoaL=bby@NB`8&`_xlqi6>4_xQL3vs;t@YSRV_E80_QC3H-hj z<9c{_nc|bj8f>LmW9eF?N-gG1V*zfS{}iC<7XsW2uQ1u{40h6%QrvzB4MEhlz0W8vv;Wx!#bT+B5qi{@|_(SN6C7zzd5*UBPQX&HeT#kX81)JD687-?dwPEUwMyy3Idm+RshqbBj zL#}E2m4p1I!Ac_Jn6xq*Y~}-cNG@(gpJNAza~OJl^6wAG)Q<_@wqgT}JW?cJ#L9tI z$c&n_U-=TZqtio`^6@RWx<;dz3SVzHTES%Y1rke*9-4i#RXp+9v!ew7RQvrQ0^UNg zPmf9FUe7M6WNm88_SNfPjY)LWy;kkXwjIC_YkstSe}5q1H@={FU6!g`Ywfi$k*$OA{PN<;E3Uv&g)ibOj&5PsU3S6sz&t57`9-G; z&kM=$RZ~hqcrIb4%s5W;lE1CN1LtTicuaV4QQ2`YFFZ=5kEX0yl55S|f#< zI~L~JesH4ENh+z8BEh1}RHy=&C&p>%}5j$^fDh$4lie7s++Z1HA&|qq<&10ZfaVz-)vF= zc9g$rP-<0_c;->f;=m?0v)}Ca7mt%duS*Qs&Kopblk&aMy2KOLXFQ<${y9z(bJoN! zf3eMmOW>ST6B7+6?;2<2<;Mt+{)-lGzv-k-R+0g_+Te-cnh*@7bgeauzbZ9udR06A z{Rs~Hg|jqUZ8UMt^MlFTuiEii6D6Z+g2n87K`pECb~0D-IHNMqV6PdajlQ<&GR$3OREVlHPu6T$jXmzAm$wW@hqd?5s;X7;&k zhyI?ofr0t3U$HRvO1`|)1&hD9Q;u`_QJV%8a=de+GSJ*mja;ji^97FhFA^jmjH?KE zsRUag+(fvFY7aZ6Ge&~bs;KBJIhlV zwKu^ewMOxeQI+=FbuHO1SJtE;>&bwrY$)f(k!b)b_N~)|A#*`@YH>oz56*mS7BA~g zcnzMG@c^3HmoU(m^!&kdVIr1F*m&nrLQU z@i_i6F?pN{#^a}1tV)P9ci6HCtzrMu&%gTf6*M&6%xf#H{hwhC=^L)U0c%Y^FKZU> zwBt@#qH1X@ANB9>2$m-r|H=5!ZU1gqLTYdQ47yqSb5N^oZRSYbe(_f+sw#6qMLPcc ziIcAXo~xOyX1FW^Aje;ji6Z!N{mCs#^4 z`31$Jn4nl9oP+^}t+#Gp6t@8R^Wz7+EsEsr;VJoof;lJ#$QLbYG4(m^^Y=$~P7vaf zc2=q@9&S&%4E+6t{!Y>Q@ESBXegy?t)=$Q?fJJJXw&Ol&=@x%5XDW-EL7&#zQiKy zYYMzeh?(SXxjZohh@`!tJ9Ml#^9pnFheymv#}s&BfpB4aXN01LO;eufi_K zxp5LED6%e8r?LrB!QbX7xk*QsUg7I8C+v2_ElRYL8%dG1_xWY@Q6|Dgpf!5T&L4ODXW@B zW36B&R$OJ}Rl@!U98ip!Ou}f@?P1Wr-QbT8$EDJyD>6@N@Paurlwh*si{ zdbR)at*>gvEj5!B2~e}G#Jo383qsq^I*+CN)+_4DO9x5J9g^x6e@fa$MsDVVYEYc+ zeS8H&^MiPoL#TDX!fbf`#1mgMUp#gW>;(Vy=#q< zfM{l)eB3JMw+YLud2CDd>XTZhf-US#MJCrCXnp(j$a4%PEM-gm&-NRmSt<1fDUh1A zpOPw7WV2d9B~9Mp%wTqXM16H!)8F^Mpma$IQlk_U1eDH+lF}m5Or%5UjwvAx5`st! zq`L&^2@)cmBcwKZ)PTVlZ2Rv0c|3lP-yeG{9*^DEz31K&&vVW_XSdnJZ)~|+LsjoB zWc(&9Z9j4vsi)Zb@n&p%*jKks{7aJLD0>@CyIfo~R5hfOy?`s3;RRh<(q_|y%|Xzo6ILJVcUv*U3HCkF_+qc&hirls+-Yqk#9p| zMnxn(RdSPlkyBc_NTrl91QPE0I<5M$z5*@rm;r%S`ohZMAfI*JAVP0U= zqQSeTg(z8EpHHNXgLcW}m&y=3iPtk>h+l$S%^G*^?5J%Dp}~(`7JVv;D;Ze*6|C?5 zoFLfe>RdHV(Ha))&i$JF9+pt2EvFF&uqKEfLJjRutQ2_`b8gl*X&fPH*Qn#Psq5#; z9?}jx;t?#-u#(^ErU23Lvpz(McG^C(!9Hutf}>mND<4M|;hQ1{DUG*YLw&CJvW6r! zs8s3W@i*&V#nlexS!?x{@*_T$w??-2YX<4{KBoRlz$i( zMo$;(EDUbhJso;9~Qy^Sj? zYv`MPZwB8jxJmrr@miEvdCH{n&sOOV%+>E@k^aW?cgMxNqWHcI1fbsa_X;_sF1L=M z-z9f%%iYM?8c|H}%zWwAd6IFWDkI$0wOFcs;f)I|k5m1WJK@oZ`Lfp)_qtc8_G9LE z=CtD!RwvF=#Sd(k{a~t7n_ZthgSkcf`!?eqQm8#a>~9y$9>9=#O1E@Q&pE~YJaYz* z7*ML0QC-n_!5h~RR=?5W6%{An$u;{;7kjrhZ#`Z`vAOL3Fvw{EjW*2w-Bx4Sgd2Qw zkCVNKS=ZV9K~kadZPC<~=A4fLwvD|iaurrI)MNm|<2)C5Lse09-iEy5mUZdJWa^uq z#sC-bm}eO~Xh^(A_wIlvv6!N5#ov}fw*P9Cs3d2$yalS7cg=2{DJg9{-<{>9?IWKf5mu1{YLzU>e=Ng zz*X**GM*5lWi~H(ez@Ef>_@CfnHzEhhK6|XDh190a=-4+c_hRnr|>L(~HNUJ=W z?XyHnbIurCPckoMLC-c1269kT@eHTTEcu{ijb_v1S;;m2WQ8}sfT6=bbF5#7>d1|( z9k}MSM=-YWnM+0;@?_s`eJfjDUF0G)!x2}K|cBHcNbK- zQT&WK=~c9@H}gV+DhZ10pCmdE?s$g044^pkvv{fABfLAPO%CtNv6&6Lu_KJJGpiX) zGy)4yJJG-QsW^G}A@jFtXq($v)abHFd`$L(6IW+;&DY%m!MZyPwWus!i8i<4RL}A0 z%SD7d56Blc{OHfSuP9Tsk$2;F`RDGLeKY#qI*7qR~B4jI;?QmijJHK4V#neUWoWbE!+cUF2pM1_N^SgD_ zt7#njQ<}Z^I&?LT58Q~6YK^NsH3}Vo*8%CnPx-3@gS-cjDb;yRQ-j{~{o?&;54>qD z@7MiN5gRdN8rO({n}m)2Fup*Gr);;8PxUZfo&UOdfuM`8L&z$wwBi=?0_IJX?jG%A zDlKLQpL7_!XSl9|Gy=YbEiHL0s6?(7ta(I83@!%;hn|RB#5NzoLRG5$qSNViKGRI| zWr1GU%6_T6OQvNg{Lf&_Ei~dG1?pi+!DkCQ%b9T8vGOJMrgL>s-aci4Dv4ZUKi{5f zIlHmwJ&HejtDz4nN#0&=;k3w;Zb|g4HOkcoO?fR$YUKYJ6&#ZK6`lqOZve%}rR0FW z08-cBx$j$ctbj|{z(bUak^ZilL&8y0c2zycXtch5S;K?!m!b)Q1MRUR=&Pu%e|*9p zG%BCnZ;RWXFZfUn15_ko48TVrOJSIJA^h>t)heO;!0w#YKz0P9ztJh!L}t%xj?Dkd z>n|g`Bj4AF-?MmlA$EZ8Qq!>fP|Hmts8h{3{fd*FdKoP0MZvC-vZ25Je$UT)6Tyyb znoHJ8DCNphxC|dav}K3UMd249xR?V2!kNKw(ImJXcL^b;dNTfAy)+<<`Tx#Y?Zhn; z_Ky~ij3!DfZZ#?u%=*nB1P;Q(_Ibwb2njwf5ZBq-8@XBo^B%2w2Tp3#!On!|vha>% z11??x}JP=+vvuoq=Ixe4vD-T6Lu$*DAPN+WmdhZO$Frd;3YEuX% zn`kZnGROq3cA9~JP*DbkN>PBJ0Zx{m>za+gIDk1IFzVQOf1&k|*8ZFLnyhg65yLTg zzX&MF;LVCHq#!owL*4mwff0DN)~7eQ;?Ptr#OlJw+^eI6q~yJuQlh~NYsEYLEc>2H zpFI{G>7|W%Pd{)V*VA6o@%hG}dI;3`Tha2V0v1@i73N(mHC+F<#_acr41t`@xVYuA zrLee`WQXhP@E7n?RfyJ)8^eQNN?@H!52_YIsB}LO)H&x{7>V5jdhOm<4it7K%at_Q z35q32$UH_Ga1(}K-u;uh@ABjLZ;##&Gt1HnV+kKgenZVVw^PyuiawAzX)k@X`eE~O8Bw89uDHrY8!#R_RCB5c zmRS;KcK>F5^4#v^`-F6+47@(M2nVRG!50m17k|yBJ0%aOnm)vSP=3IX%u-SI4{kgz zvqo~LLT+&;Q&Y=pXH=>p+5Of@iQ2icZoK>#n1nvI)I~Bj(bpZ5*i;P)?hUa-E+d#> zbuOpRtwPcnp*Yb=S)@+S#Q&`Py5?=b9a^LCHlyW`=Rg+wK<bO#u;*N8c)+Vx<**~h1Tpk_+0v3*H9ICx2q~eJ!S-1_2QOg04@dPyn!St5 z=C31<$H=`%ihmiImm@blKF1l8@~zWwlg*c~eW^a7(yjGU0+9KGE9rD1J;n^bXg% z()h!m&Wa%bcPAZGwWltS1DN)R|MEv$8f%!>9p_oE+S0CX;@^$P8L8P3`pYaVmmnL! znUOMQvTIAYF9Q<1)@1xB9&u%0$#4vg1Nd(+_?~ErT^7ZkM`o8D0%-_OmbvdNNPFku z6t=DO*psaY49iG{dzFlFwXn})pvXkz>Kr+ulOSh-+}6DS?9Ka%EvdidS8Sr1&|dqg zn=~x9`xE0^4WG6R_>@+Hd;Tp~nq$j1)W)F5Klit$JQ{PcnnGl(ljegY7?G_|taL}c z9)`}gTg(j>78QkgKuWAjX#ccbigG*@AJzdS_B66JG3vLq(+v(?&|Y-Nd0q$`a7419 zNL1wS@ar#l>*!m9>4%jPOBJ9kT1?lEqp}$2RL;U6;yCQ{k5XxOPv#M^{ts2q3uER` zPn|YUCk(L2PL^ymJX_xGcJm4JR#eHT`htl9c&1?ikwPUi`W{0W;7YjZ{yc!TrFIWh z0RsHaczRJdc6d;2rt&7&P7SSlvQhw^l}3RwZoxjY^W; z=NoC;DSm53Z=3q0GM?;NY*0$r zr^5d}VHc_#fpq^@)M^MY>{$W~x$N0gy-UF0$4WwS+_f$9Xz59f7-w!>Ksr$u-oN)y_x*{CvD!dht`q!ay(lS@H{fnz2?1g zHRq(r6ITQ41juo$xV_hX{&{TYFvXVP?&|~RnGyKU+_;V%Y}GH{UT_|T58|BD)toCU z?9Hd6F9ROU8&8s9si~f)pO>qy|1tH-@Q>9dT!l_?@EXVfu0AZ_F ztD{qGc4)V;A74QxqSn8Wf#J8^%cXX~mUPRuI4`keyaKWj71Vi54em}q46=t>0$)2_ zjhoa;PjGaS`T*ZpdI&Nf>Nk8kMM%mRx&K9AL>0Gq=Rf|HM>CM$VAr@F+W>PnD2d2U zznI^)zQZt}PZx`C{59F4`b)Gs1_pTSPGdWkwaC>ox@zYD_9ldI`KZN&KH^a|^LzCF zY5}-wOxP(DeU{-VKSxu*D9Q9OyL!-`08gI3m&7NzCV|ZUHd6^D1NF3<}RlGtJnGyro{q-^d?<=>}X+*@xcoa{|PyyrJdId)*OgWM;Skj zZN>8)90Fggy~4rBl5lTmW$p;+n=`n{_}BX0D(lK8;Xo;BXGPNB@eFtHoGj;5@v=5Q zm%RqLmpa1n%kSzU6cN)=7CUaHZ^BQ8%8of%7~E6V0g5Z<=a6NU1Ovn4q*hl7)Tu!bW|ZrmRm@yd>fp11$Cj+5|6K`p?sHlvqudgLt=gTDigPjk&0>?`v>L;E;_7XL zRm?3c+my6!8jk#7lLMnV54RE>#{6h~NZ0~C}PW$!wykqd>)zofR zm+zntOMs*j&812&ETo!1b*OVdXtz)K78EhOQm}J0g(0Pii?mhAmkV+Avp-KTyZMLwes28p+<|E%2+NayhJ(*L>l8uqVO=>Xt zRBKPXJ-jH6^4{~m-;9Sb9MAg-&^fKXrc4kPf82x2dr@K+fun^>j-;j>s{%jdM((*y zzPqv7ddbg>LKlb5k6?R(a4+ztAO& z>VMz7z2S?4VQl(`XIFpcl|z^1fei{SrH0;hJ>8Eo;>290Hfkxl=;v;6I^L-mg>MjMOWmyM$1sYD9x&IRFvB z_q{>db%2c=6}{^kM@+G{nq=;E zuY026f`P2uFT0Km#(kVHAyl_Hs7<@S5=S2W5m^Rz;Q47dHE2`b;8d^k*NEDYF@pYE zwR&~-_IjNpURnjb8nU;0u(H}~s-%3b*Qtua9~2Q`Wmp8<6BqB{ra>3j@HKI{7s+rD z=iTqAhpfX;7b@+$8{UOA@3VtuFo|}u^n%`TNbtoG=#Qn&Bz0%z%9Zzj8(1-i$jJSz z%M2yje{2jd=j`Bs7rvdu5H!ERsdNd7TM zYwJ%BwTnvKG*k)1F{<%E*bf`{hqV0Ey;-iRd5W`B?}o6U+VH8ix?+uZqF|tf6a;!+ z$#lbayN&jg761v+;_jepoH(5MY2C`~AtRWD2Ga!d z$oRDPxMSq&B!BoJ*jO^Y27(eT{z?_mE1YL~CvtN9_6qZ(q~RJ_OOJt?QE%DV|W2~de`6>tvW&T13>LMoY6{Hg>7HCDwvF_wBPq7ON*Tr~Hl>=I zR@@G0C4{KBl5c=KHI^zuv|;>mTS)}7aY6l?utMi;rfJF}a{ zCJEgS{ku>>o~eKLET$OWaVuMpA{G=h!$PZF47XJzoj^DgtC@n%T< zRm*agK8_)?)-UN^3b7EmlMEAbPq>s5Z_7xuI+j-o0v$VYJPdc3S>jMk-j$~wrQthl zqD8ywlK^@KION#l)q|;=gYPJ2ot|f)NQ}PMA!6_k`Uxr$BId@KYuCu1)%E=@29J&O zsY2+~;LZNF6GZ#l6Rm@2H~z4gXU(RzGruFQX2sTNtiX>TJIf%^?aeHU^!d_?|M2;Q6X2iCuz?|0H9LxQcJRCh2py!>BkZRZ-4eM1q9iHmn0utab(+#q8k0=Mcj6Hj32%)MuLBMGm#~(hQ5>j}+TZ(SJmt z-;~YZ_&E8qT^i4W{gDA%YwLJ5$(E|=iwp_k**s3zdC22 zEj)bvc0kdl%BXq*N%j=Q|IjA;?uAxCx}l?hKW(YQwkz0^@qQB14TVI8m~NREP2LWVU+)f#(sn-ix>6%gGCP0t=TM8(Sr`O)ZqhLz_3IDsJ52OC?Sow?f z?j3PfXn&`BXnP|*FGAZ&AzX;BhYVJSVU$FAs-x7OO|_%WZ$ImswgAZ*A0^Q{n+D($ z3$l9TlGv8BQB=KcUqR7jCOcRGEj^hrfuJg&y%N2D)$I!Fq@<}ih^z;R_-z2}+!!PB z0xxmigb9o42*>3g_=he}bJli@&F4M9s8zLc6pmwNcEh7*&pOh6D^fYMps!g6O1@?- zF3&s%`&~f?guGHhqfrMz+RI;*m%Azdx@)4A;jFprKY#E@#UPzRJqJU2)+Z1@IW5XN zzR;_rglxIKqMiT@#9ZJ<&-9;VA5^2KIa-xg55M5dQElNQkACBiuCR2}DJ-bBp4&1X{}7$B7ZnHLM4seK zA$r$%up>?_u+BrY3Ao?1tBrn%Yu5i~qnS&eE1w93mK+KPmAwPyARp724aU>Q78lunuTktHrU`O|K13gNX+`vY<){a2J~nx4$V>x;5O zgomNhC%_Q?7ez}-I2TcQ8i)0#7or1KtkcJxGqE(~r*&5+ML|>uGSk4 z3XtJD7GZ#tH3nuL>^5Yn8=ZILPQb`?tFA*;Rj~;x;O^rNJQej79W~Kr$~y|Ac$pIb zF!<_}qKAa_UThQP`(=puoG+A=un8AYOsS+LTE3PQZGQ>Z7|g9`TwCcE{?l^@d-3*H zRxjD{x%Dm#N+-i8^E4yjsx&{L88SId4!k&QwF)hJc8!u1#*+~(i>(}wK2s&)w`*TB zApHyOO6EP%Tmd?;{yhHE-0EN42^O?M$Qp+(liPF?Fw2uzucE}0H*$qyzvQQmx1$a= zPq+PAe<1)+MSR!KE~k*Kwr!d43(?i;RHqhN!Z4i_sIN$6|UuNLxKoh#L{jA&(^bJZ(J5)fVx|43;>RVuY?>ez-$sX~*A*$d6k?49{ye0tBB@J z;)Z+z1I!WkEXInsTnT*1f^~-K@qqZPF2*K($a` z5OFAr0q_mtj!6@Vl>}SzFziHH1Ht^R6(QG0Dro!8|BQ`bMI!zkiCjY30Dx)Mzq~Vj zU*n?XAKy99f#SuZVFaYi^`Hg80@edU5^`MBZgd#aQO2*Q)qBU z6OKF@#o}oYLI(r4`fDhObcd12DSd34BQ}01mB<1UCr48M-EfS-lXjdrG`Y~|pq_s{ z$yThjp#}4O&+fT@^RQH<^~3I5iEb7+x3lv5Y~ztp{}THWie*jWlUb@zb$s z8`Y<#SCdMFfjmNkzrNX>W}9wNQ%d&QT0HglhQJc}?l6|a#etOaVpAkCOFy8_ts772 zIZ2cvM>MS}^uV@&!cz#%w_bGwloILWY?<-|Yg+{pIIIG3Y^{V32}B!~dsXp@i21zOh|bxT1I=Tq_}^1c5=4?$U!O2l zf>~GY87R+rn-$mFwtl}lG7mR3A+#`A-a1tZCG|?G1%}3v?)k8Y8z9pq22|2EOpwTgbPo{fmZ zgch9|GlaT^oaLmv_keD`TXKTa#Uof$-+p?ItlbbG36rGw5_4#zOl#_k`#5(uLTH;d zR-<{*WB`axX=gS6xEMHuIs1nb8Im}hC_;^Tz}faqc?r?=Q*uN%TD ziR(%0a8ypnW8D%*wpfn~)`6Ju zb%2sp6LOVbxQYGwAf|lWEAEZd??Mw+t3b{!(U?PEC`zD6i9>t^3Vp)FNqrOb9Svq7 zs>0*K@&83V3<4i{QmAN$nOVFPQHxstH9(|X7?C%n>lRglgrM-bUooCdAe}MIYsD?N|s|fP;e%j89nJO{w+He|pjX`DM}b zqO!CDj@i|(iMP31+k|E4V^A&|k{g(w3IVeTIU-!@lYQQ&`yz=}o`dd1ir+8w3E9X2 zvrmSvtQEQ+oeIZw<0Au-V{~$)CQlYhSZ>g^a1o<3k5p!#slg=IQtZK-!=UB&*ovHe zSz+?m^yaaL+K0qR8rk~zMvnNrmZE2Kojs~1`<$Q=qY?b75q-Mq5mAp6Wv@+@n$+DX zedzeobzg3}P1P|8o?AQ`%-jBg9B8^L?#HC_)ke|mp|IMt->8eC0abH>ZN^JG!$~DoHN+g(uKFBYq~cGDL8SMNxg;~r1x-S>62M3-^(xA1}Q`YwY}1 zA9NKedIm~I+<3y$9m4^>yv|=*=}^Ad2f1JT!$IATg2qEykhOyzhWoLbTjbf&e-&^X znf#U^gPzy{UD;?CUl4Mc_Y^A}&H`Zk)^tBh}%fCZgPBMX#BmQ1)4g{tk3RHdU~fyooVmhC@3I9# z9Ued9e_3Eh+<)P+X2)I^UHXSJ*wZ?zLeA-wX&IOAX2mCr%53ToOv39g`hfsH0;(0R zHDdT9Zp?&K(|^fB*^_WS4+AW(9Ft#@lZ9`bECUI(4z}KcLSeYE8p_b3bPw<2tNqpU zCZfLi`4e`k+4zp}>8#$ubzV^n<$Xq1-5!Dy!x_!IMhXh6!M1Qt@lB8j@U`ydO59bU@8aIzpieH-D!B>TV_(%YGf z+iFr#3s1EgYVsB<;u_95;=RE{?d*~$CLQn(Z5udpPc7dj1AQ;CUqT?Y-@Vxp1A3Dfb!GRo47qq$e!LD z2)#f$&AYA%$67@-K2hlnFguJ^n!`X53q;f&UgSg0$B{v)N zw}Y_`9Ffhn8E+x#pRq;Daw#m=`G?mUZZL1fPFAxlEFyx;r(=tGA(z*~T^m)CLz4{u zNv?V9t>{FK=r!l|C#|ne>Gd#%we#PM;SmS?Pi^-#5hq)ZwY=dg{i&aBpWjI!b+dB* zbjlm|rNHanJD-?b@~Pg}!)6WA2HFuCW#v5B&SYLFoV8ttMQtko?d*)wJu3EtH%oa2 zno3`9A|knny>hx7W|oZlfm;tnV9)8mARB^d1y%8&#QZ_2)&b?~Jv;{PoDR zwIc>HizHtkeZG~+S4^DUj8$3-A+i4MVAw|X9*h0t4ui8)6}d}==SvsemaetWq}UGJ z#vv@`s%E~nBv+J4N}(S8WM|*?@blk7uhNS?e>DrizH*nBU)L5_3_8ZoyAxvG$ut~@ z@e3ts_7z&r`)h^qzq(2_THAp+|L^JCZV$M;&k+E-D`q!rP4R!U9H4f3Y1%Q8ujjPr zw68eVTvfVtzo7`0ZGg>v@HuSz@9mcj9i&i6a#t{8taWlrmCVNJ2iK!z$!Tr-{ zSl371$lAQ}7f@FzFo^>=nm`s-F6IsJFPD25U+l7;ShH<$m1;)CvAkO!@aNXK`B2)N zn+evxMysSjQ^xwPRb^1j%N5JldZWfqKKk`oKQD>UAAgWLGuZ{Qg8(2a?mT^Mw<%;t zO^>zJd2VQOquJs>jKU5qe-bUP_=f49{bU{uBi8kdGq&qzJNd=`MI&;rLFW1mBD{WG z9Qi$}(6bpG2{$y3%@FLH2aKzS0avUr3gM!s?Z~=8&{I4IkvW#1BpJObIrCFuC2D<9 zB4p?p4|fcy!D-1T`A(dVsWYej@4U?x-F~1w(VDPF{cYjo@p4&1|6ymDne_h-F>1i< zsxV}WiuX8VxU$0M%N^#41(jm|q299h%1H~v)jj?^sb9~_!5~0f)1iB~Nvv+sQgHMX zziLqdg#cfxZNE=Ev0{;Oe`Q6`3nnR!*{^c)&k@k&OP#8Hy#__b{v4*Lq8h@=H0D-+ z*u^3z3yztEEeX;_f0AgCtzFVk2K_U1w?p?7kKNH1arxh&?I_#VfCiWUTMemLHm?5m ztxQl=AWmP3$b5&73VYUmnp61!{uEz~b%=6#b7;|2tc)o0AB>OtP*g*d7INcQ=)l( zxVRFxTA)SbW}RylKP|=8|+R!7n2C^P@pSei$InmwY)@<>CD^hkHpJ@NS-M8ZQC=W1}tNu}(8Cair zN}0plAQHYZV0(O}Nxkkfgb9jv_`_F?>gwj^zoprd&|l|960T4+Om#13z8T*Hzj|pA z%2aO@ooQt!)9mQF3{XK~&c&f8Uk(O1iAH@&C}k$EPU6)SGf!VV@}@#f#iT6b$~PC< z-rRPHGWp@1^q9RC{OvC%XLGs;l6$ZIn<2%U#rcQf-;3-|%ZDl_yI?9c_cyA0iV=T8 zFK5WhXi_$=Wi@bIpO>(Qeo(CmcE>FFoTp3i2TG0414^y{>xxzCQqhM4B#ngv`;Tv*n?9u{3vf0?b0O=nZSnWPl*n*5y`(=1g*#>7TYgodVZ{yu0npvY zPA35ei^F@!30>i_1%AgY@p?i~-9)yjI8FpTxfu8ewPoAds_Xz}QGEyxuUnm&*RvS_i=FJ?B~L zbEogu*?$WB+|pw(#U9wS90pS+_S{)*w~m2+&nFVsJws1dgzO1s6mg@6wl%m0U>7UX z{_3&ny_0FS-_c766as;rixTtlSwY`l$OyQUHfTPQuen_v?vx9>Xx9zx zFC6zmqQ<-MbfF5}uDhvyO3iL}o?ULuQdKtIOqCT5ben#x@&=feM8p-Auy>G7#Ml9V zh&AWYvU?4lZ*O8VTbh_q>=`v0=dZmDl$};;&l()J$PLC7 z*89b)!cpmsRLuEJH=_oHvNjWu2F@LV>xFHKZlw)IGLLKS|M;E3&L{VtbG4wA&BHJh z@%Wm&pLn@8UnPR*Az2@FWS+X8Jv!4~b33D2;m)GnNbnGJiMK9+p7l5OQEs5b`1AFT zmVk0$n=$&^KN=oe{(01`rkv$@$IC@(+dAttv-cgj|D8nEA8k^MTBB0mPHS~UqS%hF zry(He3)E4d?`qxZtL5k?d*K+Yz|FL(#xq|w#M6lZ|L{jJX zhP>HtGWY;G5crmWsqc^Q@qrNF;4O`geS821&5lw~&{yTB@wP?2TnnA}l(qD9hOooJ zgQZpM*!jxE$|vFqMGSHge+|lBG)SF@#+K_iinzvYl>kQ362OU zxe{;xTd>XhdXw)*iVrTlD(D&|)FO`1Z1m{Ph7|nbWZ4SdUtQp8fJ?#Vm7j87;i#2% zmhdszt~Sj6nfuA=zm?2MMr}1FL@P;qucdT?1FtKA0&29ap!b9#-J~V;@!yR%Y`w`3 z%khp1tiZiynw!zDmEN@;Gj;H$uWhD`V6^n7J3r#|Rc;7X3(c(}t}@55@|K=Nr-zOxD zX*k@@?@zHG6cS!}163_V%B1hCGt7ZCLRkdeo|uJt^OGi8T~LX5AHhJcci!N-x+&HT z)ax3&9_eM`sRkF;$T>L)LuYJRSEZYB6UQ~HC*eHafGNc3^F-qh~ ziz~X4GA;S|+JOtlo6QTPEgG~GG+O-+p&=7#V27luV;T&Sdbt1WKT|A^CYe`@{$!(~ zv*pTTMrTk2H%$yd#t`8KjF{sP09PztMb{FjEgWC9`Tq>UqcnuN;c-OpF>goo6CV)8 z_51KU1T&O{A=ec{ZS0I6>%|}7`Kk^05k%pa&mr5f3wKpX8_#83>fWuP68uMxBLlrd z;GXfy41%zzlln+evB<52b#VDt0(ur`ZBSpo5adyXxn@bVD0yH8woYu-%3?xwU^$?BTCt z$d~wbV5diy+$g!_26c-hMuks!?C)COFVR{J_y~F)+O!)r@STOEXlzv}hAoNWS+fxoZD6=YmkbFJS}Bkq2=4M3mwtIV zJMLtq{SSNvs^OUHz+rz=_M)$iQ`-mly!N3PZ5YJ0I4f{W<7LbnNCEqqytXaM1$(<> zvTgQjGO6L@=8X*Mm#-^+4Ea2}{nE?l)=Dy+%LBW&MO+acx;|;WapkWR2ZDWN;PJ!I zbEaO~h#1q!zwY01cY2_nWrA%xxpO^~3?!Kq6_XA_E(W4xEx71y?A~UYUrcX48x8-5 zEw1N-AHCtsIi!qEGJKk$!5dr8V;v}UnvRR6sI)#B{_s0oeDBrWLhzn66MN2q^6^eK zw!8fjIr;TANkL$QhvC|P(rUN#Ad;Bvk{RuI@bS_+{6yVYK!9G4qknd1?uO@g+=QcO zX}n&&*#pRO6ft}JkgSTn^#e^nX;8UFEpT z&~xbZ`2MpG7XQqgz&lm{mgM!n+o_(N%zkwy(Ve`{x0NowIGy`!DwpZMLF3qXS&iAZ zaucghY;3psj}1fa_L9|je4^pK0fnYE`$(3Wk%md5PU1?&Fmg* z6X1t3iH_xORG_@8qBl{lo9zu#%}x>8m_$Q#jdwclz9xCOuA*ty;UG!Nn^i|>53e^n z5u8C}J{~V#(tTZ0{g{tb)Gbwc^woS6x22LZcEjFd=P)8-UiQV%Z#f2`QY9INcOp|W zJ_7@~s0(3E0*}LmhyHZ`aTn)SZtJhB`mlcHaP}i}yq15bsu%Z4fnj>WUaiqNvqOyh z+oykDB)H3!JbBQ;TLoX#x}i0iIsTwMYa<9!RsWXt{oNdMe#GK4iDcx5;1;W5gW2KH zEqQI#diER@wVJ2M6mZr9=+?`>X@6;VpTVqxKC4Ui!@e?VCB)4}6>*VDli|qvSUebLRetL>ejh65K z@Vtz3{?}sAFC<(Wf19CkHX}jf;)BTr3rUuA;*!rB-%9L!SipW3kk zky0JUzQjco5p%m4Lf39$Qr){%QCr2OylnlmUwKKAS?YHKzsff}plx<3bBTk_B(2b_ zP+H24WOurC=rtyNlL~Hg9l1@H)6Qanj5lw9q~wfR}r@YkTop=^Ra-8Ow$R6d$;OB*r1YWBmia!H7Pq)(q0JYxPM4&9yUNN28+(p^9IR-UZR8ChQ+sVm& zOYJf{Imuq}!|kE89=j=sX7jqEF3{VLOlW$7FBmNA!*BVIr7N_r0kyx`7BIWRU6U5# ze*c${&Fg{Xy_l;WL4j@No+fxmMl7rQ^Phc?Kat1HVW0ORhHtjDo|&cL0H67%4rjB@ z2jk8!GX{$1f!|ZSJbc4lMlJ_;2)kK3MC4}ZUC^4gXNCzZNr*6z;(I?j)=pHQMCe#x0Z$%HAdilGNV<(& z)yep>&DP{~;q%Ej3F@ZCCGbv*Tj@iaYE>U(W*G zy;b{QLtx}e@5Y;X0Q)G8qvw>J92>%#IqXBTNM_z%rAVENz&DV9?-pNaOa%?VT-r2W0=&rjOof zkS(#szJbS47fzYj-c{*m7!xo#fV2B>SAm!`5S$0Q4+Rn?lZ88a=Pxbiv_w+<@84PS z^4&&uN%D$i_~0aCW`%H_!Smr8UVCuvq1X!jG#7e!fGjX*W+PdoaW@c;MI($9em`V` z^P_Osi+tTP+bk#kJy7+n+}T8u510^HT^KgNL(_EB`P6I$00QUPe)-KHYESp%Ma;Xt zCsBuFJwC+DiO(c=z$;j{ZDs3HzPvo3BUJQh)j#n{(ffr}&}(3{7=YjRz6zQc)sF$- z7ta@Agg@tT2BB|527%Wx8c%H*v*5GfROq6SO8Ry%@b{0;HyBhhpo~$3*7~On!EtGN=j+G9#NNY; z)yMk_V>ZCoQC9|ouWnsH!pw0i@rmby6k^GzU19+5OHp4h+&KF?8SBxI+_?G+Wseof z*X(2QK=(UzXij;S2;U#*?Nvs83eE9{EH@if%n0=fLtnm@tTB}iq5}rMB>1%aJBq|4 z%T3C5dKDp&7lE6|)2S0?mW!3Eod2Wgs-vR(qAej^5&{C!DI(o12q-N`Hxdd+cMOVj zcb9Z`cMeGB(A`4_I505G%$wg^@BR7BTC?WPch8Np&))m2i~jB6?~4%SlzAjYohd^^ zhxIB|eWrj0i>AW;6m`4)K$Esd9Z3%-=}Xs2Du)epz7`>&JQvyqjQvhA@(4f!5P;?4 zJgxSw?GnJ`QFoEm8^m!MkBoBr_HSw7jAJZm2StNZet$3);fJ^o^kv8gxQnm8hYx(M z#3LqNRiuQ*)J> z;L%M^&^AklJ}AXRWW>lEpfW>8XDV~B8-t={c7)5c!NX-?O&5I#XTu;oK-+Mcw)nHZ8|{NFMs}~s0?sFaU@9u*!kN8XpZtpQ>B5$}Ci;9; z69$jrb3+$OSRLENSyEUDViyPd^AD3w!wllhQvywM*F|XR26eV+v90;OPUL5%0{94p z+g$Se=xjzwprtRlc326}d#9?@PXHxuCEdP=8sdA5pfRD4>((u_&)8yPQyGx z$?eH??sI>wX<62LV+?W1v%)_f#+fGGAq_f<;go0_i_;EQiC+KQDBX_P+lTb<8n5g) z3QUO=ueU_%s12x}RLp(VjVc9)DSkLDvi*?u)2;m+QAEiZe0@3A^?TywKRGS>{`{5j z=>srH{`VFBt5|=c+;W>4@XN;OWH-Fw2Z(6aaz(A+Sq|x2qGgJEN)GMechnM}gzC;V zZ2k!~CIW*+ti9dR!Y_o=26`70ng8w)RrH%U%o4(Cc;Ur1N*|lN7kKtP>RFrb1y0xdDrzC)NvZ{)9M`H zzNt*wH!3}N|Iv>^sE{k8VTSl#7M-jADy7_xVAR&mBVX_ll_Tf4fgXPqZ^4g+f^QjirG@tEx!m)8x83a< zBX793!^B9IMrUIlQLOmDV-aroYv6j&lqs^CyQ+ zeEOidygevv5yeG4D0#M0KPXgIFa5V4j=CDu?fakRZv~wl<*x;u1CivCfTu^|EtJ+c zP0>;+k7wMV={OHIaj}h9N7CL@wYI^}dDiVvwY)n23oy}Xu9fAa-v?3BoLHEqx2rw) z*xEoa0Soz6Iopw0Qe;(<#z7%2-52k-Y zHvHhH2rwvLOgQgFmE&CiXitmEK{Qc{eYm_j*PLt3k?1S^Cv5soiSZA3@>Snp414!^ zLEpOWMXym8xb2m#8Pwi)H+K<+g8WynUu3&>ZtQzyQn!xrnX*F5UnbJax?N$@)O=W7 zW?R@*k!tE$u9Hmx5_W<<>fBS_mxJXcZG16gX0{LNUiz*Y7?1FcoW6|e>LvXcf1?*y zsH%WZ-|u%SB(m^4TYALzO2l(TV;eayz5;r{U1KLoXT}ndO|J+$db+iXQ!MQhDgn2) zc2%V9h&*1e%C4`1%->fUnA#DZJZAniSmt&-Kyad%aq=iOQ6Fqyb}Q&y8Q=kf$->Qd z$jQf^Ilo)kXLPL_1_W%dm$3n<1Jd`9P1mkZ2=D?l3z!cb&u>QY!){-JX(?VVe!43| zQ^%pqe;-f%08LMa4iB;Qn8++GXvRJ$9DVx8XI)tcpvUVa6AkJ!$91kR;jT+{#-M#W zd`J{^y0wEN)l62mFTezz?D!Mz<*y|uf8sT#>{69DY5ivO`ir&2w>=nD@te0dN4_`e zoF-k0!E2d?iOb5StR=j)Z%H?+Y+Rq7Q4$t>f~eu|B{lI&P7F#IeKL48xaW1}==W*v zzJ6Ij#DKZ5*89p(8&t75EohfFxn0!Lqa#&DpiU*as+a2(s7zlpo-F@C@vVOVw6+VELWUssm-w=1=C zLmBtkX5AbhXF%L*=O2?XDNQ+GDL`|EW)$$awXw5GBqk5l`Kw~{!9(-h=2lR(vItrc zZ!q0>eDhn2g#P()a`E3=%f}iMk^JZGC{y6Jdx)9~VA+2RLSP=0f9A1S!lFVWmqq{mGn!_&D3+W|4vT93 zGpz)IAZW)z=$>oX{|tDc+h|D4Z8gdbgL7@N~Ckl?eRX9HOE?r0PHgS%#}Jylg4-Ei{My)=32h~ zXwctbI?(dVG;*xz1)unE`fK0w4I>vtkSD7(RF!^|s0u}+8 zXWMy;F`U{1F|yh)x+nsn28KEVXFj2p6t&B!#2>Vs9TQ*9mTryJnbe}7C7%O8^>*(x z_j&W3doHBcn@}3-am0c29rg$<3KnsN$9KnZjGu)5^*oZ7qek-)dUgE6A7S&H`K1Ga zv3+6jO-UGKNQvdbyH=0Mnm1Mh8Gi;=(Ad})H^lZ`Z!zt_o}s5p;(y|8?$bAw1jXDI z?SqeoJ1WZ{m1cn9uqho8>$}zJIkDI;tKmRoj zyhPDT0$!n*y|?hb6|;vk{k;+KJOG9rM;7R<$^|*|-C&ZR(QD16{CIiip2M^YBy<~b zf8mKjKb-S0ZT2x;xAV~O{>X)$2U-bKPGC59WsK?4p~ZL5z99;i z)ttcZo%a_`D>1%=;IiebIEyAkQ%nEGl4BaIG9f=M~K>NjUx8WM$SYOq+EV zh#w_tw*6N_SG;t>)uJoV%5qRe>c|J(H`-U>#V>=zqu|Q_B#mt-Nh5lG0aV5Bg+sD%*q>DO235_JeA$|1xGw*+U*63A93hfAwTje)Q4{Go9zIOgVd%vnc*E6Y54v=Pxf=GysBm2hu5?M1y3J_bm>SLY!c^?wEZx30G#q8+6KQB=lXbEZzJVVK|e$@H-Y&ryefqi|# zI^2F-cODMJP2Kek-lVlT_(My9H%v@s%Gxi>cE3|GyUiixlDOR!RaZ zS%8f;D~JvpX~}xs8LtsKQ+dq68MNQKz1ACvB8nqnL9VbBAPPQn_G{8s3tJoN{T)ag zRZQpU2@BB2JTwfi#aOBs|HV;NWk;M-@wr)oh}_ikr&+u|o-0$CeAn{(Id(rk*U_Vw zp4j6=`BwOsIXz&TC6%U?$_Q?~c$#<1<>%GstX-vbvPrpxvOEaKJFAAat9*~b*q=%oWYC;eKT>cM+I ziz-imCAZhU`z*xzB;R2Fdg4Y=w!CeF>?mxdh@Tc?`K{_@^@6wFX8XjU@L@PK-ns8D z0n!I{+J`i^ounsCuwSGRbeRkh|H(AKMLjK{^Xi10DZ7rlF?0#ctSmra#I7X7@Vu&}UfN6INMsWSKu{t&Gs?S^dJeA&&dcyD+33kKzRuI3N@o zyzYEC|9E(rPYF`pGPFSR$=TyNyEq~E`GPJ%p~oKVT@SO{!|#6_c$imFQNM!JRnUyWGlFD1GLTT^qGZq z&6^Ck5jb2b0XdGYL6>)Qn_IT5Ox1b`x*30lI%f>I#Jsf%%z`GOa{|c>1&I~k1R$q* z_WOY(Y5TFvt3&jU%e3&xH_NdLtU~j`$kQb;?Y0VSUG7B+8+iM6L4IUH6$nQ4Xcw)dSyCyksr0(Wk|3{@yAv$YRQf3Wbc#ym0v$j5KHM-Tj>vU~(M) zWg8{v3Oa(J(njff$2JtiL-+wW~un&IHzPoi)QXgj?FT)>`{+(<*mr6+R@FWk`hLZuH$>Ik#Mki>`-^Lr{HDh* zwTxcNDC?-Vc7R<)&4D|9DYLV9Z9kze*wdqA_02r_%d7hQne>dRxOO@`@Qt#vzi23J z%-Rr%b6Yp0234#@?$`yDW}3%<7*r}jO5&p#6=Zs$kR1Cj(bDrhzxd7?HS{jZ4|fP{7EMUswz~5@SGW*`WEMqW3+=h zryFmWlUIG^73s2%Qv`()#a28mXEWUnWn>(V!J+Pp>frbtRMU_v^hu3Dse*rpIbZ0$ z{u8;s6boW@t@YUi=P?l{DtSyS=y(zVSYfChvLhtxK-8v*5-qgvP92$@%IzT&txNTl zET-()W@+$oj7^>&B?U1ZgTiYXziY|jQcg?w zCJ+d?P4@_qgGpMF)*gHU9u6|RiNqGdr_z8rq~yesGUWzWi}&zwfh(3>I$y5u+DcoM z@{&ln37Ip`K0h0yfb;)b%5IjCO53hd#yG_{;(!anfb#iitp8QhW;pn+XiP3Dx2 z`Ajv8h_d((Oi?SRMGkMh?=9ya^IfgvWxPd`M+#!~W$3M1{zwbp<#oy( zm|E(5<2T+(TaGzYQyeYSV8*{dC;kp+NaqnOGQxPPGiSr4Q20Vj%52PPFI=`qZ8m5T zG1I;FWA(z{?hh?n3Voa1z)~yKE$QmL!*BTL`dcY0qMsU~3b%FBFMsI-#F@M@UNYD9 zv=c>32y!K@$f6+U^Y|hfZ9n~T+g1)M<`H=uk8pvJp*XN%+26Nh9(PGzhF@JBeA11Z zY&$!3FWNFimJ*SquV&+P!XX@LieDkRPp)sV+W7If<#gL@IExMMK<3JiR%+IZm3}r! z_IR(~6pb}p)L^A^DSK0fT8mllpZ@jPXokb<+!}n2<}=hBsn_Oy{pI%~ygiwmYOb2T zvp#fi|J1OgzERpUt?Q#0OPc>(pgkQTkIeud@{Oj#W!R~{O_JjF&K|0mnP(F(EB!rc zUa#g}j7AY9mTsPB)NT@Z9*C{%Hrqy&3FfFZ581WT<)L4ig@7k@m3VT*xmp-$;!4T0 zetELldk;=kcqJQsjK*p}4&A%NMvT&j(Jy@tI-U$D|B}4Mera!f;SIclX{^f0ZZQokldmd5f4Kkl06}WIL!k&zKJu*axPtOBt4*^s5Dy3|p z9lb28YWds5ln`5~n2tet+_+?p9c^qzn0U>m-o*5?UI2d*e@O%)OcJC1OF-iN`wNuL z5~_?Udj?+JoeKgQHsN5D;L;zZylJdY{qe%Qjm=IV^&)(GPRi|S^A*jy^7iEB^^md_sO@{!H2<@b}*U;_FJN%s6h)DxJ;7qu}EGyU9)!@FLhZ{8n7jt=*J$G=R7Kn5*`V^Hqt6H1-A!=2arFy;K;IolRUXny@Azko4*TJ@&g zBG9vJUvOiO`~AhE>Ip=-y2R}m1N1_iGyUSZaz&mle^ht`eE9f8E1y45)u@zTz(A8x40lft-A?k+}~omhhpIla6QX^UV|XV zAmhBb7LB1^vk_{iYq)y#tmmT0+4eopxJ!@V>tpaYV`m*z4|e7T4y{WwfDeXJN%|7w&OgR{)ufP55nW>Yo_&H5^@^&o&Lq-LlYuNkT?B|f>vb~thRU zA9o3pPshJ0wT_TEHh*zauLxO)$8B98Ip44zzQg)5JslI&ovhB@rZsN`)yU;tl~p?s z+1X3TH3K9>kVyr01c~IXi!~)Pjdn zwiI#?DK$$`_|Ctvf^t64Q%5LgmAB`i;g&J^&5&mhA``Z zMH32~%e26C)Y3BWRr~kx(9hgDk$mjKoPlPl8@!xGThHioWkSUMZR`K2mu5(6%jTw{)E;Ss%`h=h4 z=#llaj=eq@TG_MNeb)+!4pqUMxoc5%fRqw4$ zT8%$K7%XTVWwinHM9z8Z65wN54FD^!sl#a-HH}XgsL}>(M*0|4p4{G~G{074M^o%t zcUbcgk+v#LB)&9S=naBqgjDXyuaL_we_;=HYM#av@D<6W&3|t$R)ai;#ZzrQxqCtcGZF2)*m z9!V*GaPC8sh`ua4^Q+OG)3F}8WWNbmyLm~h`qvBuNPXcaXo)T&@zx)W9Q={(x%Hus zlQ|%vw_SObjijdKjv!biOG~6?+az4p+C{cG2rsK z>Gt{V`}jWvEl%S;c`Ke2S9gfct_8RXvtXH*zNntKe;4` zS~3TbDU4=0+I8Tf!e0Dvng@bIrXG=D`Dfq15NK|78W#8FyysH)S>ji`fhWFRarW0B z2H%%`41Smb#M=hbAO`l%w545Se4T8bdLL4J?IdlQzqcGkJ$tPX;OEk%yH!N%}q6(Ky=mBL3DyN<7=b_Th!q*?eWK-fKcQ zmUE!_#kl(&3v^!6!5DC$G(CvF28l6FD8|<}U^x9Foy?D6QBIV~gan~8L9q(Nczge- zpt_@i4}cm>n{Ujz!Bxh-h#O;P*w-4Z=e*KUcn?R4+-neDW+9&69oKCFEKsE2R#Byp zX(f7rm&_E$j%c*Qc5~@U-=n?+^R;Pe5nkaMb$W;ZGeR9-pe>4%bG%4w;Q(elXCX=5Vo@mu<-J_W@Tpk5WB8KpOMb5K}2f=A2P$C2y`k%fKWj@mJA_ z9_^2e6w5tkVT<3f4juwEMZ9k@fI5+D7DX;p!^(M{n30Nm-O`9~6!2LW^tJ1{G`o$Y zjbVK2Q0{%Tx~F<*4QUgE&=|wc6o95kbgO8e%Yv@<=#^cAX&nzV3k%c-)D}IgWincJ zeHCd6wc^NC;&PaW*Jyogd3=(C&e-e& zo4j@L?05FGoRJkvi$Qh%zNQ5Ksjx>o4eJ>T2XX4OSH7zr2uHF3M+2}k+EulPM`%;K zHKG0vBLoI;FEbER6Tn%Jn73klr_=F6+N}v5J~!P7PU0NTWN;kU)aOh|ccj4)vr6cM zxlcp2vLqbT1JM-HGY&Yg)y3sx7q0qm&ls*x2h&JWd!Q3;k@W zSWW-}Q!6j_Um5R{Kn^LE`IrX1gc$zMjr=`*sZB5N-DJ~@)BT1gRS;jc`Lzd}lUud3nb|3tb4$2Rw_A-)vv_ zZyr9s8M2souztF9=+2G4+hVStjw#)#@A?P@Jz}EarxQ<`8@%}l^|zd_AbXyxWHeZI zM&qEShL5e0^R`J?=D6!oA0Jy&&83PSJ6bU9Y1e_ev}+4s9K`;6qQ}v-E8{I0vjH@} z8xdy)__r}63*NT=q}PrL-Y`t8-sF4P=RRV4J4Lt0?@s1-iYFGI?D@@OYytGBf}I~s z0%c+bHr{T8|EYUAyQ2&k(Xq-I?>9tt%UJbAGziq9FLeC!bavqHPar8=r)MwvrojGs zbyZ>wOtSFRb(l_w^xMA(LjwOe(ESSvVjH+CLWS2HP9QXt@s_YrXB*||{jmY2KO(}3W7 zt};0mQUki}M2V`%BfaFe1mtwdCzIbgXJny<+p#aO{0 zhQEZXyVEF6?R_r{mG{K2qN;UBYE=3;5{LzQr*dzRgL=Af1u=@`v=@#-zi{ zbB4~%2f_SFAvYT`<`8@1xx4*x++R&2YfZLVVhiG6pb?ABF*SsP-ykO2bfRV^g?ZOL zJ^;RZA>S=OAWA9=9WY-*_y@F1_m;1% zG%bS?bAZzzSy-A3eCx*}H0p~aR-r!cG)QrEqp`f&n7`l(w@~oBq!T4%2WC@U#Yp!i zm1Y~Qmh7H%e42+?7H+W8Ort@>-WJ6JxEiVJSIx;&_}TE|_fcBW$5fO#^EDAZEQ&+0 z=+?e6`KG}{kQAu*`S1uWnk2Q(S+>aRiJ&BcnLtgQysHgUG?er)NBVa9l|66F!7lZT zwuF21m}!}bau17AEg5mE`O5tsC4mxoOHrj;{7K`Rn_>|glw^Lf?}cl$t;!@+bt(}9#GsY|H` zfj>fDlg>o5Mq>oZ%|=2V=vp3hb4UG3jes@*`?WbJ8_P84Ee0t?gI@Y>g z$aA@2!I53-t=Wto0WNb4iG~T&A`@)2ZV9%d9hE4@N`{A~%fjvVDEcehqO7t{Sh2E}pCwD122?{!Li?kIxyD?;%?duJDB0)*!0Q z!Mux;?=0r)TkR@2?Vy0>-xiJ})swp5368u6zB{lv! zc3PZ;sOd`z#lbAa^Om4X5uHgP@9WPUN4$Uf@4zhDEL`?Ha*F&3EUQ%=OOVA6mE>H- zqkCG2n-6yKU4wFms;8*-AOaZOQTyl6;RaHyhn;I>Jy*9{6`G3dDW1)|n_7GIYY_Bt zAE7B*FMT5%FhY(3gNN&aFi_HN)i>2{$630vG@hc`qaCFkK9f-jh)8vL4RuSqWIq#t ztHvR(hS6!Ep4V3~2|H_I^(=g;TAXrT=wpbhB!KjjTlCp*$G0{KVb%7z^xt*W%ldPy zk>>91gw9n=UmomoUbSopYVzyNZO+^bm_DUZC@@G5gkQtU`odAv_tCo*QItVL z&`T7dkG)$yQ9Eblh~i;;sT(o-@JwB|m|(GyAD>pQbJ)A5QLw1OBTbd1SgDOu#?Dc1 zV&sDf<*pg>i(r0Nohb@qzQg<}T>k|e zXKz5NGUh;$6b$VC@Xv*^fSd3@`ac8E@q?jbs8;OHIu-wg)nwjDzvLtd|nSG9wMg!ftj9+P6Uh&DBf2fPvW9N@< zelYiC?O^EBS&PlbPi41HeZ7oBuOEp~->psJD{!pFIW6K3q5W1}V^``*2>r&)j zAsNG3e*=>!kRwH4e|PO1OR&9}?j)bS2I(L(4BaNoNXjWb^3iLw)YGxRy_8FYt5 z)n6~lI#R`_6+9(Su}qP`F-+{Qx~_29%~~1 zt&>*>UoE68vSyj2V`)22qxi^;*$YB6)Xji>C`N)qho%d^hkX-ypT*Acuy{1}!y)si zob=s@-B&)@F625#T90}3-H@zt#*U1Ztvg&;S#%^^I`Eg1Y0W?<%2t&H?uFPXs+VNo z)Y#|UvFV)9@d|Ov32$iqDQ^)#nA+xKWS3oRkVV$Wm}MME`YR~SNb*pP?S)z22nI)8 z_V<7neB-_xB(JRA#oT)DO>8v@ARxCSUWbLCVy}wZ`;VZ@U-C zq$V0j5_%X)58&Y~yC{G7GIp5|63(E5@N1MSC^*mTW-onGJY#KoLh5el58bhR{+4Wb z-S8>4O2I5U58$NJEAFRoVTFVKGz&8mEK1~&@F}hVw2ev$LX*W11F>@ z;~}?KQjM#*^E#JAd=rEj=7rFWJm9P(n~V&&{+VPd?pp5^fVvN(Zg=XzmF( zrsj(8RhKraCdhF#Gx>cC*j;s8x*$@ z0oHZMi?jE&{LB8+HB6N|F!<=F^alKbdeaCnMf68Iq;4EDPWAdF4#HEDUl`(;(*&Vu z25W}>w_FAxmG=yPwTX3hclumLq*Be&%QO}qaY=|5v2lK?a(0pt{&8KZyKO;CrV}a1 zvn{G!2QsP}ziZdmlwLc8qp9FXeK;y)V=m26&if$yy)bII^I*@a`9&V>Z&Yad^{*^6 zPRzmLJ)^NHKc(qPgrnMObMyZffKGcp-%%deWvFQa1i(^(4BLn^uvuFV7K2uR4I=T} zWo&CvkA3@pBLY<3vd@@7p4QE@^x0?!?lsjB5`yx9Fk$eTcBRL*9AMN9n78aTu?xF& zpMiFy)Wb?s%mNE0j~*T3zDJS3R?$`7y4Fi(DTvQ27YtJ@o@N#YVbm6=qgE_)hY$`f z-eTR2+5VMS!Or6l&qqPaHP(^&l-b*cd5f5xHZR=2g-N+!=Z5}qhqBKAu{HrXQR~w*#tNYhKIr{zzC`e$b4+NRZt1sAwNv-_dC- zRj4?b$+mDg}3UJz+1})M~8ku)9~}g|DQGE&yylo6(KHyyOh3juzoA_0se4=C`yb zlk7fCnw=vDK>BKo11r{{#BYumEr@x_Q^(Rtks}5Ci4&XCqf%T_ePv|B!tmNiX4KrD z;ySKxCsZjvLPn^>C?Jnw@KV9PWxy`6Ad72{tBAXCarB?}?5Xkni6I`zOc~!4N}-tc z<3&|9zORAqo&_no;20znC>uw@xfPBQ@h?4)Q*<0(Jb6=2q~^T{?=l;Wh^e@G(b!Cl zHG4W3gU)FuVR`=zXb-2p)&DpiD8Yt4PA3*=OhDnuWTQso@FORNvc=TrqBK!NBo9wz zr`5Oq3qn#sr}}zfm?=vmm!pUg4N60_|Ed2*>6;%KzYh1haN8({;>HgKyUA(mN!>!m zC~$rq$tZc)t96y&ao`iDOu^9|C$fZFA5X|O*rT#uGO3g^DRU#@J8>?LlhXnb`$Zu2 zTheF6o_$tmtrFS%Ez!W)xK0-tM+KU#<|2B&hSzVsR|^xS3I0(B@0n#TR9l+7iWD$l z=?VxsC)OU~9@r4|RTR}04rF8h9t2VwHoTW{ag9BTb~q^KvkljPgcmv%-XerpU;Zv} zQ{3X29aFGeM+fYpp-FsO8DI@B+gwfF?>k52qy3KO3!Rs9aK`9QvNER zn3Ar8#P!-*k=O@*Xq+Jqa*fJYvMz`9E-oMNcI(Si$m z4B;)P43=>VXx&=6^qs9Z<+{3brQb7rPM~K@T3Cax9qqy5hZ*{Aq7h#YReeTtekHeq?`VJMUOh+v&rnN?aE~|gSG?n|W$8Ga}JAyW7@~K^wH}_+Bo{7I&b0sqs zTNi4yuA(UX8~a~`YN9Zkp4(j1{VJVd+YI>#a(+TNe_yAXbJ2LmkqQ@!U=|k+(nRnN z(j-JkP}%C?4cqD)y`!i3P^5Y+)rF^H6Q*jZRLf`f5^(Tb=zUj}rJ+{|mnOx#mijs% z+mO|3CUoksZz_%7{>079gtvyTUnk7x1(zF{0VW1w=!cvyLqzss{4j>B8G+qO4O+Q` z9s?i)tSQ~)2SOqdwS}t(vJxA<`*ehA~ZWZnTsxK7^V|)|v zZ{t(q&#dzfHC7Htm+Wi5+2g(F2|N>~cX>Gm78))xr1w8>xF`eA^zGrHW|7D_OSXW| z)|;+E9x^D?;V}jxc7lbL5`7fxzv+4 zIU{ZP%KkO(;P_>d^>NdZ?vuMW^u!PxCP@eO!qH~cA^Fp=xJCSUa>#+i8}&|#{1nFyV8H&(0lDV) zI+Od%OoqtJ~?>i{S+Hxl`e#!XGsC$^R}RiJBT% z)+cX#9tK=7(V2A^$t+lrj`h=PT!`u7WEEjf!KM(&B08U`K5~!*CT$8BX+sT{!Uq>^ z>N=b1eQS`RusikU6_I~zgVQtnI8EbQ$9sPSQ4|9ZC7wVF8d)&+g@aqK9xf5&6sU{p zeU_3FixRNy^`^B!D!9Aff5=Q{hY9Tfs<2udWX{H9yd6JVu&uoAfG_nc8T#MUK6woE zwb{9-e9LG)7mg+<>qWlWs9v7Ekzw1Uy~_%LqM4%1ZvA>daL#p^H8b~n=m=Y(t0hHr96rxG$(M>DR2|U1Rsr_N0z4%-2ugF|RCD67>54L>8VVEzvZ)M^u>$u0Z zee&wym`8VvmVF$nOB-H&K2fZttl#8Zb)q8YM9=0&O=t|usjmww{la`f$&9>Mk%Q!^ zA9?Hl1q|;SFzjiFdk~TM^#9b713nl=apVVJFnsK`h5ai- z9oIq6P`Lrndk4h>!NjkN?{zb!mR=fr>)N5xMXQu_y?OFX!4*}%L)#gH$D?dg_}{J@ zIKNW}?1FwwjTym4GQWnDJc900vsI*xl~&d)wGUd%J`M83-1Jdw!!!%@(BA{#JU_QG z89Gt1C5DPE_tzFcq)7U+(0f^v*=7rS8`f6+%^|7E7+6sTjG z*MAktvbsChZq-nPn0y|+S!4e8G+aI@+O6s_S;m(-yD^$exGMxYZwoRhHr$I=qU)F- zVh~}$-kySJzQI{Ce}Qk)gLu`gfKjae&Jkb0r!vvhSk`87wM4_ZeUYG;HAC*5l3kG7 z?zRc?Q)waTo$hrZCfTA>G`c<*p?RKv|jS`@w` zE8j4z=~b=0v@7&1_S`Sv&qZ~(Yac@Oew)+pW*S_Vz-jKdul+QsL-_IgFcsaVWk3V) z@nnM9NL53up6!hiDgNhlD#nEptT7M7*(23@uOJ}w8~$(iH4xwd!lCyAqNf3VFc-An zWu@Huu`4H_wolMV@%c?^O9xJqOAY%76&*`MutOk42$=$heIrIgG!(Ryk2t9Pc3>hj zH@-DeTCk=Qd(JEaDJD`k=k=J?A106`&H93+7lF?S_6e+&2%Cn@h&j>?ivAK0V#i;A zp~7x!2-Wm9XX{s=?6ndis?4n!Hxzfif8fHb53v}Mc{>Q(7Ho~F*K5K zx%i>R9Bzyi7wlisF1Y=%aNrH<0{O2B6`0w#&RW579p1}tHPcpIth-r)xlsS)p2B*t zZz{`Y*c7Vdb7mC+R1&LNs~#rXSvpj5OSTt{k33%FseQz@|IP1BpF9DZtyq6S@@V^Y z58}G((admkvw~~)Ia|14;F!+8SI|B$bHcjGEA=XL-cw;{`-4zAkac%)3RZ_BnX<*b zekH<;aUT*3k}mD_4khd4J8YiHvOm9G`C%$TBat78d0YLhY2rJ3-*=%J;L*}&Wtejp z@W`*n-Y(`XR&Isu#H*iQcHWS=2RhIlHGM4N81RwECIW@azRQ}h9-6Ea2{|zmuF==E zE|Q>9-)TsIw*0`o;j1G$$qP8cgGX_|6CF`}z_$G>?QUuYtr$2L&=}W3#z;lr@8ldk z$VRn3QWUF;^o#(B_TMXri~|%jp5E9i#}i3ceGfwN@6*n)&1uj`7owU1NiHZ|__*6l zIE~WF2kH4NevQ`oT!|Mk?kGm+904TG^2qaX>^EGpB|Cl;a<}mH$HG;Z&!{hW5&B6w@;0qsgw}x|EL!u)}=^$UmQHP z*)fV4nNEBC-E(B-qc1Fq%(z|{igTs#QA@wuy9tdTjKa{vFsM4BH-Y4`gTtZxRs?lwT&AE!kj%|CvK~m7ouDnut8DP;MhAc`YDdbeh$r5 z@2p^>LY1~P>lSVuCJpsN`y*+U@ta_s$M{S8o{}@PDC|<*D-F%39TUZ}bw7q8nI0cy z*7j45de}@JeQRbHnhX9wgLxuP&V?96b22x@vw2{1iG;{Y3-l}UqG6d z3qOxP!B=JJtrj6?gu%4F5OVCtS7L7|03vZJATuG!f^6mKUa} zk&o~}<7P^ph3Pdk%;;b^;_vq_BfI&HF;ACLG6REGOo z?47HsbM7xvG;726Uk0+d_nAwaGMr4lnq_ewq*M`$*-ZVY^xHe){KZd)ZzBGYFBk^_ z3sKXdxc24R7erCc*&N{QvPr9HPeaJRA!}SmlABnG)LG1gbW-Wwm5&QT|-Z;F<*vmK5U(b_NbmVd5y5 zg%3UTs76<;8g?GEVPL`h`1i>cV{TvJZHaQTethOk&vMo9dD4siGu%=&Ag=EiE8t`y zxy=sW5%Z)>U#`ACHQ(^c$ApK#ERA=q!}o$p7a$nW|jb<6#aL# zje6Y0Pc9n)omS$S>mq@rhpq!$!JkAxA!&3g7n^ zr>JEYitw#+&G964IQ{JW(acmNQ>oQG{mPD?apT#;_A7ooiZ@OF>%JRnXEQz}Kyzyf zYlq*+ciX&K$yNWh`NEFDSjm0)xp&ZU(zy4Jzt+;ix0lK>R`GMXnaNwfZ|=?3vDan1 zqn~`6VYaPrkNamW$J(Zs-!FXmtX+uBPCNN>1=1RCDbDEQvDX8q1qyi={jrMAc1Z~? z%Tz1Kr1A$fu;P9#)lraG>+~efgaO=%KE9{eKh^e%CWft2f){MszZJSL3_s zPJlLcT6#;DmsR>eBOi_CYAA2EWWU}Sd*)4`!tu;wCzD``?-&3>(62)L9Up@(oIN}n~}+Pk9W6OEX`DmB0q$+WShF}!Ac z820q*R@QM9XXjsS4ohzlkL>v3*VjD0WDUg~)@gA<4axm{97_7AAJxw=mrMKpbYl%9 zpI?-cNczWANOB~(=x>d890ikwW*pWcW)GBNs14p>X1@%wdsqL3BmT>3JkyL7o48VZ z^x3Cs^wvJ)`g^SmjL7xrlvgtKtM1cXUTgQ$zg{Er9cNE7Y_K+=^}m~&&!1o2IzS%2dO~F+B>BF&ekc_+h!#Pm@l(OuH^7-%A2Vd zN^3hW7uLQ>VGUi1*QSY4_4*CYDf@+vGmfe{t^3%lhH&vn8!24AWha-p(rt{Bh~(Qb z^@_>$UTkX`V*W2uD}vd7Q^CkZfS~8?O8KCYUzg089`c|$N8C?AK~GNwM}5KBE9T3E z^2gzeN1LNdMo;KlCr#%_pxZmA7Q*|RQ|lCk>30IpTKi)(_`jEbf!GVqv7cg0=Cy{g zqpZDeADqOVeK&1{-9zqK{h?&RSR@q5N$Ve!X7Qr?E(w*>pv2^AeNsp90JjHY-;9NO zuMeyPtS3~NCz=45x2rx?0LNwQ_kG~yrGL6YK+t|nP^EPdvlO>7;V|Bu@x2)sa*4uN zq;4SMn2zG0cJ<|AfP7SoB1Mn#WGmyRW72aV$%h#=zryz?iXrrMF&L}%Df$nU+w*N> zTwY`B|Jv#LChFQ>T2dGm)N5U^`qEF>FxEEBYmXp64w3Tl?(7^$=jJxpDV73wGUJTZ+@7YG# zLz14(BCE2TUW&4QPi6G*42TwNT#+_?-#0ZWHum4AS|eb2hY*onsoONQlRSGxncIE2O=*N&(f{3S zLB3{m=Qh_yLHZ1P-})=mjM^<+e)Pc{@h(_Q=q%DL_iRaGVtoNb>^0iG(O%OWQ=6z` z!ym=k+Bw1fIyQCHo3N42&0w6o3Ixo0VnjTIt=^Cuu%CXq3wT(%tw)~%H)JmJ$s9qq z|3IA`H-4L~9W-5r58DxdFIWh_z^bksZL6-zE;~-y{FL_LImc`tK3Z#`pQ52WQNaW-qZS{rusQ*iqUpPvy3~CDGRK2+;s+i$)pzn3ab$Ac&!t&Lj1n_$-3+Z^9YU>CyPk# zAQEGc$z@lS3L0;bxVIJMGWq0Db+U?X?^Be%m6iknyG)})SvXeMx7V0=S>o+m&$I+XDRN)_ zHwuN#uTPz4#t##=&-@U)w;|k+apM^9B}#6x@ta{~pzPR#une(>mZmZp@wx9}8%F*v zkOFh`C&Apf?=kWrH@ez&+p*HS*IrN9CmZk3nYa=wvMG;y5d*Q3{pXg2Htb8mbH-JY zU>}-e2kJiThlyX8;nTZ|f53#AT;wH2zbKtZo7if?;Y@dVM^Jj}_Q?|g?bo`5yndOv z7Na<3vF8t)9_PTQ@_~-eFRPzGcaiw@>D66g<^x~dYH;O!gk*Tns$<^X!UK?6SJ&s# zk(grr$y3~}am+9L)bE<=DHtAfpUBgK%}v<*8Oa2sduYikq3iV@>uh(X0MGp+6AhfJ z`1Ez`C%#m>1$1!%?F0c%);A=)YqKv=kjww!zKGsl0C%8#Qx=^R?`6e)0du2W-osfD zN*2g2gi5?4@J0w&Fa2<0jjQw+hr(w z#C?l$jyYXXv^$|z6Y6C$*g*Ix(Vur6+6HW<5-heM1$+dEu~O!VzJkH_9(7jYwB%XK zh@hRf{_8xtrT91CuPDT~yh4fp&jql;Q}xqxKz|OlcDrnyvo3>z#yg%PTkf0ygAr63 z;LD>^H|But+c%)AH>()&YrDYJT(sB4L(@I$fK3S($y2hbF8oDMheNkd^DW{iR*&g9 zQQPdB&tw#2lbazBvWWAZ`)HMaJy&S5eN>-bHAby3w;vN@p$fb^5e?WCb>#Ekj{b_? z4R~p?DKk#yaUWz^m=XUm&@t9G%OKa#rpS*#{}}hBqh~Oq^IRfn*3?eC`Jv=LZ@y5p zn<0h&w1BThr`Q`b-_QLAANSWs6|A1sga?%hPu1q!{J`*8jT zgan~ePp3LAryQOAR#*R|b|I_^ECZ!pY{&EzDEjgpbYZyyPgJ!OxoRwnm6)bJbjbkv zovG`~pl^|Ytgbc?SOX)Na_hAL-Cf1Ugl#(iZK4)V`A~l7v)cjiPPDTNn*L8nYYN#J z&?;%`0c7J7v-se=Mr;##g(Se5x};LtVD)xuKE6@9IDgoMO-U!N<#lb|ZH@-3Y{gF6 z2c0~eFGJ#Q?n_r8NAR9l>z%7x#O^!I+=vUrfHQ8?>G$9in>#b~XLLmEfq!<5A+WF} z?hb(2WMt+`Z#X#x7dqcz7!D84O>QwgkMpVJ_gX@uF?@v{ z4*>KLaNPtZj?si0#02^wn7p{<^H&<>%@J%QwZOHw83s=-PTPo}cP})a-tD2urqJ*p zbG<>6;Bt3?hztr=3Bx-Sb(% zSEO_^3htA3#@8OkcLy0cdloZgNFo%rLJ518R zD4lifH?Ecfk=zL1i0UO7^i`LsA1omP`k2C51;#{6LUlVG4rG7vMAtQ#-TKRDfjmj@VEiR7bI8RVck{o% z>zmagEpc)*LGupp(=4cPO@GW(Lq8Kv^0X~kv;A-o$lH7HK|={)xMJ(SwR5*_AJS72 zx;Mf1`Wo)`TZ-Eud5d1C%$qrR>?`dg6Er?ggzIAyo*j!GpL^CIM($A9{1-}E<_-W{ z9;+X5zKJ)&u!H2vOSDafSPVMfyujT!`Gsozl5SmS$_ME993LRRbN z)DZVsP1O_t4gu3N&ZAi?-&RN4^!tWcD0iW{1)F2XWkrbN>V33f;=4ZJ`vIHrnCgIcXUN__NA+gv0C>+75%@!V{w6_? zwGcX2e@4x<(92y7q>gQ5tk!5zQvG0?UxA$R?z(F0sz*j|qG(rX17>I)|8GTODt$L% zDHgTADx{bb_o+KRN*HgkFteD^SRn;&k3;@jq~mX9TX$zYkP!ehs{h zF?=Lg43TL=Ga&f0&Hc>VH7U>H+|d6=)x&Q!TbGWK2P!b>l6a{Yet1=@GX!*p?>2DM zBUoF74#ALVUnie|5dR*3orN1m1AkoQ_ApWvRu}ty2BPz(hJYyhRZMWH^At+vZ|Vmx z=Y-1k85?L2K8_^g?%e)az1DgDu+h}9Lk$hLp9AO-a7>|Z(1^;eC<1tG357~4uAE-> zLRUx}%(-sWx<{a`N+MRn)P7y*SfN%Gf3JI5_YR0MO%*O_%&5YIA+kyy{5Y|%1Uob! zfJ(VhtCvw$z6ywoW?!V~wY0?#>(_LXyM2PxPJ9bT`+tIQT-p8?n?zKd zugKgVO&zM0Z1`@E*X8(U9b@gP~;H{biizQ-%%xm_&+8{J=C79-Wiub-x5G)>!C|e*u z6!#B#1SBv|*A>UpHu8)alaQOMt_W_Ut5FOTkiFHEf2j~(G+Y3(dWR{Fo>93?pTEsK#E?5M{Bs8wsOPW%dQ(zW z4ZN~IT|Y8X1BG^5vqLukG7V$#P<^i^{b~;KT}cMpZD0XQzD2-&7}kOwCk4@E`RiYV zK@QW~;3LPoGJUuXS92J?!Ot(`G|6t%3v~OjHwUyl&0^WWp6wA+r?2^`m#R#C+ut;0 z@OWt)FD%Z|9mUvY${rJcFF#j*^bSA@58-(oPBK{yFVS<{d;oyYZ#`i9%%a-2$;p}@ zPfm&orh@Y;oWN+cbM+m4742^?#bi0>PIr^A9k-23a9vFge^KO0(j90|sw@UPzQ;N} z;eWqK!mYt$BHD6i3`~<-S^hxrPR-D)fh`-I?r55@D?J1pA+!5$5yVzW#BXY~oo;Y{ zaUVOvIOG9TXgMUS`1rYI%n=*(%a*;Q7H+L0=B5X{2cgzaLjV|k^lzZL1MYFYfSnSp zDS!>&z4ZO@eP0_6L4fl!fSqjhgYiq*JMcR34yrF{vf*06NpY{^xrBkaLc^>v(>XAo zHfD;)f3;0U_OaHSR^+steTn)M@djdxmv}J`Q5^LEJikgC3=sHMOXN?pzvycg%}Mlo z^cm^HBJg<+@Xg-nO(Go|q_?#Cd)b!uG$QP{6^$$Ru|N2E=Mq`xYpCbtxx^(W@$pT3 zE)c4T2i(CPKVKwIHNT9Cf!9CxkK>VXfX1?|Q9U0QHd}HGyYBjUU1ujl$N* zR)&k`@(&=u0P+F@`IOOj)<4CAiDGf5Y8ZT)(f%mce(>79FH(sl51J7wGtPU?n&@BXuDM2gH9AS@?TL|ns_QZDg{>UgLhtO@|JhzkEwH%^TUW;CmEo_dU_|R>5&s+#3 z`mx%F>t2*I5V;6VqB=*0VO$cPhnnWxJb|kLa%I~AS8uO-rvhefkHHYbrgOALr}INS z_||?nRFSzU#FT<<_mSN-KiibPh?RC4uHb<`&&AvKRHgHu)N;M(NwexE@HFw_$beOj zb6xj*e&R3X*cw}IU2S%33359-V*sj$WVh(Um(+m287J%4iV-n6xeIR~zQc~!;tSz~?n z-etLk|8~p#CN2jkp^7n)dXqExS&&^3AscRF#>H4x8S^5iKSJQrjg(`z`PhF`ioi4P z7t9<-cBsj%cME=wSQL`Mn!Pv6aOhj($gf1%#501PcNEfQUU>vfA5e+1oH#OPXonLb ze?lz+o*&mPwm8leu8YV(U3<46xjcW2jp2dgJ;Ir~Bb1{CQ#0_=tMe_!q#AGT8khCr zRD#;tj<)tXO6ozRfa;E@CD?aiWdfzRohY2xN7py<`1$U_eJ&1Uu7R2!>eu zF8~tl-qLn>>TO-b|4hh53rQ%cdUIR={9a+<0+q=%rGDJ3&`_De>ty(WI4rVmNP z=n!GP=ELJ|gBp4@GR495d0G0;ZHN0L1o2@)Ql%G*pAy*r%2kfzhq#8l)uPMfKxA+u z;e0(95uW!ev0Jo8z<{2^CSqc3zRXzM%jw(mjwg19(ogEve`v-(6x=P^e8==SOR!%vQWyPbJo8z+%OL%@ z9MP@Jx$9tDVG0#lu*DQ4bgIHY{jI6-E!g|!QJBbDYnHrD%TGh=Z!r&`%6ZvCJZ}7F zdHeGs*H?jf&cWR7x+APQAj}2#LTo*c(iz^;(TCAG&95AEt^$9P344Pqh=W2n-Dn6JDF~U$ISEly+^jB} z+Zv|>9~Y)TRX>>>qWbV)v$j5pyUe79Qtqr52+C{^?K4eTz*O22y~|8ExFBYfg=Tm6 z5UA~=*S?fjV z0e}?_eFVoz%KcK|?~9t{v(VA90GbA2Snv+}2?=%QNf}ml?l(okoYE+#p6k+w??CwA z-<5g4w`=a|vrg9%)(&02y;@A`k9e(bskQEp+JOMDzD`7u(ceT$zeS}#JK*1)ft4o)Z-`V4|23K;`T~++QYqz688*M@{Uxe`#^6r`ka!O>9PmC zjw3Gii<3nl_&MGu4(2@V>Zp5r6kh$Cx_w~(k6&F+bKUz~vTfo5^yE58RmqyNqOr20 zC>)sR4aMzyyKV~PBj|d5G-X|Muh9YSzkji$F?!v^#d(_$#5gn0aL_vUtZ65Yf1z5G z*BY9xkevwG75X+~0Nmw%-sr$C!>6+$A0ztml1*bxtUE<54L%chBcilqZjq35cixSfJks5h}!?BP9 z^jDpyCSFUbfvE%0CfB{Ee2B2F@3}>=_$J^zHJGTzTl3Ms@5$*dWw5;{VjND3bA6hE ztNdxTx-28Kxmziw3VX65Hm!qpG;mn;55Xqq$y(wCDk3V*fzZg?@~EWpn-Pb&@UcSV zS@@Nx7*{L{v1Gvxq3`ky7O+(3Et?mfGTMsMz}lzu|;U|H{bvJ1YyFQ&}M5LsEj5>>{ba2g4D zzX@g;AHwy8!&M?)evhec|7pGC+f6tW$$}vs+ID+5K&wF~3=3Uly~YmW5!)q;pZ!sq z!CQYKHF8_g?$!xP2Unbd@`R`uZ9IB_TdGzGms4F>iI&RVivhJVX%o{pK;S7N%ck%!^)_zD?PqVC zx|^UHB=^jRN-}?@Y!X(Ue%Z`-4pgjVG>^fD+PXIx0QGbk8}@k~{hbZ@(Adxoocwir z{5+iT^-^!LwUC9H)`wKkUJ!KW3Qc;4K0*#rPXYFa7`PgLu{GxB3%~hgdle>0Z#MF9 zzQt}@1w1HtKlY=KmkYn|nvRxkodi0vEv@T+FR-=sV(3F+pL|o37Hm`8GoF$Md;f09@(_TQ-|9b^2-C)!9W^#GlmuK6c2w;_s+BZqhLKW~Cc_(I@n z@()ZKV16F-1PDEB#PCPH%mW1N`Fvu~b(P~}gcT*|yw|(%&&OT>OhG<%@dz8|UGxD-H5h5=gay+S}~| zmYuhYJ4p-IQhC0PJnc>kHO62xXg4|>dr2$-XPpw{={|eaRo5DbUWDzfA7D)|ZGy{C z{-_^xwoIo$HI=#i1dA`0)}ITn?x~2iaK3E>Qc44?hzIwWW*F-4(}liPfxWQ%p;XzZ z^hYu6UftsDAD+MDH&g#ZmqMVa-N%t?O9|h&3YRtgYi*q0&vA%*_Y5JK`p>BUDY!#V zUVoj3diKU(ajM0&0#7M9rST^N`@c1#$0ax#b>GF`78jNrJ|eP=q{@2$2qXr^OznM}QzNY9QgKD=-_aUEMG2YWP$zu!=5-^Z zQw$()tixvr9-J%MOu+H7QO8Ih>?O5I<|Y%N@eCCYLdTvST;L;RF1E?G`Up`5oc?-Y zSP7b@_nO@iCoR>RcDxFV(Yz9xto=%2tjla-HI-_`O z{DRy^sw}ctg~6@W7{DC3Jw?U1nZAhZQ~b~r9P87(@v1XjJkbZ$YktYs7fK>ZYgY75 zq!L&?{BrKyTuD#*i%brZ2!R9r(=j#k`( zOnuc}d5;i9LYLJq0ZT#IgAw*k>|8s0veviVfpgK=<4IP|mpAgEDd3K~zuXay%GWqi3 zl1h;#A9+8GF*b+md_oWsp6RS3S+gbXi_X^z4n)zI9f+l*)2u@8_5NbZUUy&0@Zke+ z{50;S+$ESK9!0uu25G}v9EZr9;uk%L0o-Wt zUy{iWy(!=zRp;_bY9XKP$YAV5eck-~`w8kYZQY=uqxs2u<<+e<%*D3Jwro1y?O*Bx zr(ny8x^&?gpFPUqg zrEYW;f7wwrsAy+PQSN_4v2uU8cnD8!WA#y)Qy~6w>!DACgTio&-J)!c^}Enun5sk- zx>-iuMCOz*KmDD0lC;0zVrXw58v4Vcrc}?6EZ~ z<)2-)HW5NhZF$;#=OE|NYxaTlp;QKABYihl@e0QDy*HR7-I*GF>|3sdXK~%bQ5deL zVe?_Dz<*mBNB|6{9#|7!^tzSp=?D0iTgMfl8L>3*`P7cwDr zKP3Q8({ewV!W{0nW$dR)Sltc}&OakM_t95q_`xdpP6IV^n@<+_^`Re=KJlVs7abhJ zCx`B)UmwIDLi-}210ND_hR9x^2JhNE9_+!VZP)n0um~dR1Sq-pt6CGoy$R&Xm1tEh ztBdiB;17=EWpc6k^$*eeV^0`D`V^lE54%Vuj=MTTM|wXwe<&*}jZ>X*d7_DaV9MR| zxn!}5Et|v6|3TtoYyi~i8)(e;th_Ks>RGt4KiS0U7r&8_80?q_%^hCWrhpEMX(m{; z_p4Ru`QAIWcR}ghI)FT6C2rAIqUuCfmTv3&Rx^~mBR6}p$TDIv6vTUBSGEYYy%oBK3zAxyq4b`Ji2}$_wpY+cj$^1@^*74 z;&oja-0#@(&1KcM^#sSRz_$K0Azt?3!+c;RSl-m5aAfm2qXqsTew8@cb&XrFV|<3V zeA?8`U7L9@Z{cvhn{^Qp#dom*^}-mM*ZmqA>;gpXS-@YlfX`*W^zsrMO_c0izjO!& zHggi~oFCFT8Nrxl78u4-fOYyH3kCWX1&umh1T07@y^v=;EgFxK*nY1}JvijE>I z_^nvDIoVC*3|HDY1CUG6RAKaMLm1tZ89W~PTxusUla1o%l+cBVV zTn2oiq0oWWydEY~c);~R_e^Xr?{;nc7;Qyi4ZsPRUMF}pfBZ7m7|S|$h?{UH(&j5n zbQQ&yiCu_Vz$>iJPjSR!Zmz+|u&*}{V@jmA69T$;y#`r7w#oz=#c%)4!Je^03dl{p z*+9cy=_+V@9U)k^3-17h>XyxcNP~s%jFiI-C?0j1N1rgb5YwZ1&cSu*&ZTKxe?=(r z*aw4Edw5C$p?L|8dmarcf99(!Mp`StmxkUc2*m2<9K=6;%YpI!Qvy6}Y6xAfNY9%j6>ehm%=`dsvO*3)3u&l~CYOyz^S3#cenQ*gZ%d%-pPtf6oDThj z`o-j^ENWanA_u?A3k)4s*P91+hxmd;)Jk2jrsn%=*12Vv;@!5a5Sd*!nX6N%lPM=a zf3=724xztTFbLAmAGogUjLfk|nQ&fq)9J}{x#7OS`B4%9@aehD?L?lgf>30r%iDX< z#iL)w#VU!0P314MEZtX(TgV={74T4le2Hx&ag;O;dXzI^vbO+S}waK#e_%AAi zYu+n%Gvu=TQUSQ#CZRP!uhdZV)=^~HCR|xYhe>D>t@xVa!mUpb;)*IQ4qabd=Qb2F zP{&_eA|fgYYW8a}mc5~91YCP_MDO(1t=E$~&@ZrI@$t4-1Zn|FUnu?tdVJW&DT9~2 zf#Gh3Of?b#vr5D`j|ts`Yd785$}HUwMrAdg1oAf?jtAGZ`t5~%m~pNNwCCICH-YLxt)990slRf*GEG;fqPzZ z0@9sLI6|#oMf+96(c(0SzUyP|8ck}@!R%zQoW>ziAMGn?t2R80G)$YNtULsp zA2W1Z!4K-i3yR!-^cC%do=!rK{N+eP8=9gu1_FH1GrcDR#O=|1&l}p8YWZ#|tU0#w zB(=%bNgA=fwsOuUoaP(@o)!X@iPwK{K(zc1&lVrzDLj;^H*0MT7UKgh{@AJB;4~oc z|2?wp0we*Qzr=J5mfGROk#innE8A}EGlnzEsxQ4Bxa+{tUoihRbo~kWdpD^=e0>#( zjvss=qcJLOFF~nz7;^i5rkT5McmS=TlirPi#i=~}C;L*Y;&QF)33?d>{uVn3--bHFSOhGw0XB#e zd*wUuf)MozDtV$44w^CFy@O~}xL7O9Z}RmbbHz+Q=?{o0_p9W&t)a0#rlW`z;|7k&3fnh{@ zKWD=Me~{mTvA=96(5r5MovGwX2{+S?u6tSuYaroZ=3dVv`?>KdzYRt&PYi!IJNjVq z?L**^$N6{dDsHjvFx+R8CIm~q)RC@Nl;C4Sag-@8C``K36jCZ!KcLPl*ee+M^wi&NLZ zlKjen^x7ZgDegXy(f1TwR27=xDkq z7!w!4zzy*C=o1xE_7)77C2oqb?cg0nTx1%4+QO9*mSLi5ze*M};KDat#7m3Goqd!6 zuw@ecLZeYm?{wG1k+_BdAS|j}14r2-8 zKf*S}RT%%;NY)44edK!!yWd?ll!D-YxoW zd#TgwLaEkhKf#{zGyV?Af%e8}S9SSY4IE#ZXI?in&WXF+bLmM&Zw}X>hJRDIUk$U+ z8t)0*HLNL(`$k}o2O@*7;P%=3V=hI4MJukq)_7#72lu@v)Q*~iZZZUV(?sP$1%3OS z7aEEl_XQ%uyUXxBk@UYJm55LBkYLomt~C^D6YU54y3@DH2}`27-wqW4F-q|w)W3<5 z`d~;KMk*;xpR+ z6|9L{(&xU!i5h{8p3MGU5Il_0@sxxKiStem6n#@`4D2Th(z3(jHO36VFVzrVLn ztDW(Ne+p{vNJ>vK>PH}4F0LzBqD60XEZ!NwjLU7FZ^9Vj|IoFYNoc7u=g*nB!gO~f!pJoD#Uzy_xRLm>4g<7Hp}6;2wz zVkUmgvaNfj2zR@YLUZ6I_%f?1k7AQlT%rkE@)FDDj%D%={B@~1yDQ4nFyyD& zI~;F6Gb42fidWY26y?^0I=cTuwkG`3^+#W?w2uxJ}uWXC>=2n-c^^O(lAdv)bNxm4_+}b=$c4+fHxiM8l*QUJ=FWl%t_FCTrYt!VfDGA8*!Dkh)*HTKLk}4zdZGuWk`0lqx#w!*cbHi3{!PgAn$7|K)KM$ zb83rJf5{}EgXDa@wJ&Em6s3JZNkrLrb#0DlMg_>97S!$}v3$GMG|oYY8D}#$x^dF(x7%Za6oKK_HFFt--h=*NW zSsxbeck@l5O}{^o{rd>VI>ROs*x9q1_WrwnllzoQE6RMi@HUZ#=a;gT8m?f(==J)C zMHiG?*b`G#xl1GVwvAuPRCZCD+ld~i?_K*yrHhP?1&xBJR|_XASGhmfRkOjZSf-bZ z(rUdvc{9J=Qa|Eo`pr2liJ36MiWdCf&J_JWO(m&IZc7OfS_cdm(6Y093kpuHq0mk)kuoRq(+4d6 zLF(P~7=Bxmg*=&C>p|$JD_U2W=U~*T(U?MS=wtuR@v&UiHZ>=qqwSc{jg$-@rPF8s zE{WsRvN)1taEPg2h|EzJOimujgr){ahXdj71vmy)gWUcJvKYCq6E9v8uo8>-6{)%E zU0ddXs)uaBF^%9M(d_fJq+eEI)w5iji$O_IVj>PrM$=Yx12qIWzQo|`qyjYzLb#IQ zMvtL65>B7rzHdT~j!+uu*3q%v4RxPPRa8mQF z`*vJzRS027O@x}hB|tF`sGwq}Rs&%X=-b=hoABSODkcc?3&t+f^s7g#yM;P?lY;bh zhTyDP_AMNb?d0>QR6&c-lYm=&LiaLp+pO#0vtJU?pK>Vi6NwiqLDo^G7mB|bP1U$> zMC`9a@tpGOt-iVRe!6>En5R-}TbVdUIvq@v__$zKg`iW7y69=}FG~&?t>iIbm9;8F z7-`TQZ`}yj?A|^s!S1mzIP+|>iGME^5^p7vWzhJ3hC}T{bF_y>n}T_(kB?YnN@x!^ z>udL5UE)gVh)nF!3b2Cq6HE+73UJ>ju1`)osC;B^7d&5;}XI?DX zHk@5qU%%RK5!!ij-FW2=vYn(`crPzn1apK{;8j#de{p8R67S2KuCG0emCSphdaq?V zRxeRf^0hz6=LV3*9^X-+cB-|2Mam1yZxYo=*&qsF5lA@&lvhbs%biH#%JeX>Rj;ALKo#9ee|i z{d`GsGrm@J;glk^yti$7^&dG_-~KFCJBh0k1xH5=$p`jlXVm1*4=8pi zg_`K|M^Bx57v>j9D4`zZaml3Ow#K?#y)z^TnP46x8aT^Q zw7`+<923GyQElRn-YQobvd;Qi9QKNAnpB?xcOO65#j&q4>WyhQR+ZIq;s9h%SRL?U zuLgG^jw&bxYggEp^82BjHRg+|M7dzD?627DY^7TH?hRc^+YiqzQ->^OJZ-Ap|6;27 zlvQ7s}+U4(h=EPDtTa6VK?k7 z!Rj^SJ2kt2+2TbN-%1>Qi8wB!+Ll(d-A?2 z3;hXW=KuUtmE+n*Xi#&a0|MY*67*IYp@|y&B2)1fx9@ZVJWg+!`2Lv3>=;t;Z$=)= zHT0u8T)JqWA-^IXXq z=G=#(2IZ{%UIEkl9RRTp!HxJ#1dkSY2qsx{`nyf4OTKh^e3B8Y6i>_K5?QI;MBTp< z2k@`e1Zr8)mU4=*2!HTv#vPTHv8GdiwTQ z4?{;Y6K(Q*^xnTmA-1z3taW1_c{p}Mp_xHn>}Oz7*zd0&&{RoJmj8klZ_RN;RfNkx z#@_$iD9JmQ6tB-8cONw1Gvg_%eJ(cyvNnDIxRdndsY9|-w87hh_&Oy!+~*^VNVs|L zgUClQlOYDP*I!{Qj6J2~w*o_!bCv~~E4-d8mR}k>Y{x^Mb-RCjZcD{lZ|5XeH>e-q zD1ytd8iOtCBWrjso5~s!>tg&dLG*UI#oR1U)bPh+T6mNSmE%I#AtS#E>S9l7C5_B> z={O_VWzS@(6YU{nR7N!({5d9`X`Hgq`smfmMMK{q9u=xlMd8&>SnV6#`5@|tU!^T< zPYemlIwl@b%z4L;By1_yuF7X-FSm7!_53@gPNm|uAAwo_PqY_%12mt9FvJ{ zdy!|DKUeYp0Ovp$zZDt~WG@`{rr|Bd*k>fjeJrk+6UA0XuJ%9lb06ra-8jQN2jU$$ zd>w%^=P(eKDy(R&4Tix(f3Dt4$u|^&a?0jt^Ybq_Y%Ny^Is$v=AgkWzi;0pe`P%tu zV&>3EyWDC;^+MGaTG1&)23FCD%>K0boVG~qXO8uUoY0;EN3E{~;RBiuT>|q~x zdiZ(^t$mx`aXilMNOc%O8uNg1&eqn%sbH(e0X86B96k3fxneDLAj~pH&X)HdZ9@WS z{P3(z8zynND>a1U_a8>In%dd<8G^a@BUB=d8vF2;?-1-B%kdr#dr0OcnvLfZ*sI@v zz|V(Q{`)Q>JDa_ISe!#*ytcLw*##?9Z=t%cGva;XnJ+6E49;ym+6-M)nt zz1DGn_3wLD9z2Ly3#?zSam!_0rwtxQMm>wLqF4NO$O{BsnBz4vW8m)=aBak+$v;!7 zA5R??BcyRBpPZDeAVvgt%mRTw-i{nvOz(RUWGhZosYn#LbT39XW+}iC=iiZ;%Oj6IvVGZ?eEIf+KlFp!H-FQswntz1 zXvG?lat*bw!{Q7$)=DAhy8y&w1g&d}ynWr*vV!j^F)wb+A6MQV|YLnZj;nj-_e#(h2BU>__=}O*Yqh%~=MxwAxE;o3T#hKwlZs z#(!Kta+%&>LxncHIqUaL#D~!-2Qbj(BcWy_NIvU0vi1Fku1JmN$VYax@XlF>haqDC zt9dHZ*9`FjfHpSArwF;ZG!gd+-mjuoanF?Vgr_F%C-BrdIz!?!z*v*xW90SP{AO>@ zJT9Trl~KqTiPVfjHv9L}oz9txiyb+N4Zh6}V%)oD1R-_LdedI+bQfzzHtuZ@57o8PPIFwg~F`Zkb^jtDCV|VR?@((44Dcx}@Ty1*XUKLXCzE z>u*~x7kdoK9nP;;wePw)NxNAQ|GYo;K!C|FJ+ zr_=WzxajMpP~}Niuf?OeW3<*|zG?gqVG=phl*O zCPbnFe_|gQI)1u(O{T2%+9M53f0TpWS6k_GO~FcSyrv!(9aOPkd{Z5sp1M)=N;nTN z04Jzap-*cjT{A6mMsU`ejIL|w?akll>VA>a4Tfb7Q^#ey}C?(|8ND?#~kU7Nr z_5Q4a=48G)0m5?(!uZuY8?9T~X z9y%=^Q(r!QZt$9^`la)nN@m+vp?Wh;gs)Kgb*_ z^;$eS<~;X|e>Zo$^zXGQga{r25J)Rcx*w-y7u?!XOpfDV#<8GL<+dB=a!vnZ8gbIZ z)|WC)OCt=J`Zc~{lLKV(fFfS_TnL2ngF&P>tC6?@SR>+WE_C} zGOdzhcH&l)83DcTH^HAg!ViWn?P}BF_eym2c+oH_W&2@#7)MRO+VW9`k>>W9Pk(y* ziFdtw`^ZN=v_19YQ+T1q)6Cmm{4@k(5bB!%Ezja^z`bUjxG&G)#lDUJe{=%B*6AU< z>yi&-z4PIO4)~wXmKQww==Nt``qJ$`|C@hn`!~Pw8@Goa$u~}75tw18rv0a#`wL6$ z5oxI=T87!*v)%eT`{ZYj0icEj)Ps@h#UfY-Ma6I_zFEx5Jd`ZPe9i`>5;VY`G0rcb zYxP?5HSx(}W^EG+Rv{GvRuLH$aorzYe}tl48(&y?&?(kGa&TOXMQAWo0Q!aE^kEW? ztKZ}w?f+mHQl=f-P#xs z=6Vnv`**gEuxx&h9Zz}a_xGRr;)ckI5nj%6W;3E|&FVNSChkAvGcvsau+9+b-7Bn_%p1rDb$ppub)9&9grXhA4@(Oz zAUZ6;H8v(j$MrZ{YyG(3>WvT~fXgFF6X$PKUnCPF@xa#eqrw>Fu?!OG{-5y~RA;pK z0F&#nf7kM!5$Ai-f}j>L2Pbtp>G=FF_vm!>%$_fJSXXj_kHNFp| zIUKu2km__Ba1h^dJ?i|?>mJ{KFbB~Tam;)Doqvb0fOmcWF)b6MoU?O1701PiO_Zv4 z_56YT+WL2Y%Fg4i??0F@pFb5}3jr^;r2;t08a?LeQtPi}JG-Hn2>Si!Uf+NCNz(&# z{&oL}>vpc6&1=?*IZ^Iqp>Wt-p&B9F<^6~J(cPy0OvrJ&?Dcu9#1lM8@5`tF06+jq zL_t*aZU2+tUf+L22=HPdtOG0Cb5s}{T79SqOP#-r&-Js_^KULl7+b$`hsnIoqd7p- z`nlt}|IDxN9WIXU5&Nv0XO0uvkp-*3!1xED>W?71*S}1wux6kF?I75JssR^&k+k@( zzezB9{Shk+Hp{&x2e#xNi57UW=-R?6Z}a|RVrr9<6(Sz1*npJ*UbFiB*IIwvOm6!6 z$Ha(9)N9hVxT`)gI$S^VD?U@~{Sy{vgfKwj+0(I>J<@A5o%li-g1jR&OlM+V6 zl)^IXl+++_5o0k!Un@;jA0|H2uAAB&7P-0PdBN>Si2!(heu~BZedmr3WVw_;U;1eq zFk#qum?tZX!6cZ+NlQdnHDSk=H(gBYf)2yf$HNmD19bQLCuJqGJd!WuV3*kUkp~%N zn*meqg|XIHc~TjLE46WLw9TdF0gN{W2@)Vtk!17E9IY`~WZnrRo2u`JT}(agKfM5$ zZZHP!*aI6&Hfw@1pU-~wv)hlq^PSsAKKzmGDgNYyh=JuzkB;@@i=2@ID%=g@1p&Vo zDoz!@FO4r?d0=WSgj1!jWcWMc$q@O!;L#UsFMrv~w;%X{AJ|^`%2$FR&Nbb*OJ0^o z`O#(o6=*R{CBW;$Q)Gz>kF_6!l&WEJe*T9~;hhO`sPj^=5(a#dhFw<*rW#R*zJ9sD zHtm1oAqy*_TGBCV>MF^6+66v108#=NpEdfr`xgN`HboNx$rH{SKjATA^Y#gpBM&PIOhq-bP z&*V}Qn#iF24>6O;%`~ZS$DQd7trwQI6$oRB~bJS`5nwH0^&8Igj+nK=yZBkL6~-yb6&q2ezv8 zf{ke@g`ISm)H)zf*PqbnW_N3g%l3Bc4xDC^n7NPZ*X#CJfo9iK52bb*Q>~-aB~+eO zqau!_vFH6GzSxZ!T-)&o(2;X4oRQdg$tvnxh%ja^y9<^&-#>K!g-zM?c;PhhjESEf zp;7J-=kM+tXi94ioU1|5{^J1D3dr_h`gJ*4o~>0M|UZYY5x(YD3}`C zd@BoG>odd9)MRnGkON)RAgJsfq13Vops-6!Ui0=CD=qC&IIBH|=j`uNGfuspBt%Z> zd62`zSrZE$TP&#hei&iuY5y~R;1g+JP+nMnti45+C>95W#-eFGCj9`EB83FnU)q|K zuzC#`AY2z)-`*ZyEDKdkQwi{Nofo!*2vGfAPmLOce*UbU_P>-Af~&t?t6~me@)(nm zhprS%HKGuG{lddG?SGPFbVSPg935Fn)imW1@IW#{ntaJZ%{o+TRKsHc!Yx5@c;VWd zis)D{((Cqfu+RpiBBn!0iLG9LGPcqiIR@gmH{ZR!{}7KCaWCs<`b-_8_B|3jPF}D& z0j=!0nU&9}du19>Tu%t9>i{mQpz}DPnD#$&)$>nyFx-%XmuHys$mp>qa-D1^{n={& zGwFP)$|Ar?cO1Wg0q{P=b=YhW?Jun+?AUt3iSi-?px*vAc?<}wM?nw#C$im<+&}Ii#kJ$#Hy=7&!=;16x(PAdx}o1ExM8 z3B*wAfUI+$W4RmO$25Yt&X>2~qyHsk+4_TKLIwk@d++xMJ%yE}v!+XN8)Xbd(;hS*X35{Czy zASF?{NpR?86 zoFE=}i5!~#y7%0SF{(yY&AHazzmI#)y|i`ay=TsvRij4LtU1@(Ywvx2ztdCd*&^7y z5Qx#mMC))5aVP*>UMNoED+s#bSE8Z{K$RiNC@~tQY>>R{k~}x#j{xZ#-kj*=FFpi7 zaF<^mi0xW1^zz{z0Fm08mpAa`#Gn83|K;VUe)<#ma^nBHJbQz*v_`|l3~c_J5{$qg z%s20VvO?DA3xM(Pg~T%~k8kRAM)`yXIj&ov>5o(M1pmE;FDd@jKm3O;KlG=6=<)~t zr9Y67uMN%({!W8AA~qS>h?ugJT7|X7B(my1J;Up~ANvm%vhE*#OeV^#8Q)-9A7joc zzJTaDPil@$j8LELzaxa#MEb5F9lAObOMLpR9KkgEw%7h6<-X?HPOqp|=p0HqPdv^L zl2@KKw_XzbYyZnsf>y&d^$GC`6H{+sIzmg$bEcCO-M;^gU1OPzW1R`)uiUO~yS>cX zb4lp(awIAKBvFdmBe{m2YBrr7U5BH7VY%S2z={mj_F|Xb;|8TUQ>W7KqdhApe$v*QAvFNt_pOF@#oXqQ0JUH6V z_5DXAcNRMK+r4od(Y7NLO7wBsgcv$jBMH2*vYND@5LTT}FA?w5`;UCZRA#=l%oB9X zeqY12_1kmcOSIrIU&k=zIb<^216doA)f)iXA3l#UL)jKie7eS^U)nZiStGK>yb7JMP+`uoY znzT(4{A>TqRDw<}GDP$^A+$t;Zr(VoUdHBWb?YRdTlT-qA#xlCQl^kd7K zVR{}vP|N;j8W~v}Jco1tvmB8_;)BV3E5(iAx8pO@t{GW^A$1OD`WziaqbIWUBy4N` z8_lu{wgp4-Y_C7f4)mOqTtd}ox&9p_$3V9aN#lx#j)f!3_WHY^)7-jE!nEt(Hm4Dq zRln%!{l^)`EQkSfJYOOE3@8tD9iR9!p4M$&P_&`_hA!zyt5OXUFZ{d@(7ZJlc~c*z z#(}I>G39kRT?7yTw(Z+spSQT7K8Udh^&tlFSXilP8I$uHp92uc7B|X}&-y_KF-p)E z>(1i!3z4paVFZ}kkf)}BLt7#Rw#a>n1ydYCjD-*MI$dXhSoiYaRW3dg2Sh!q3Yz$G z1H9#y9NX5_GG--~AU^u};l*$Gv`-BianGZ44lRZI8A<5)Yn^`dM}PG4zy8g?dHL*T ze;xJ47Z%(MU5LyGxTly?H)sODpN0SiSl%pjZl3~#|8;K5AsvC!eVFNaV`U2ayWjJk z%XfUoAGv(wkN=6ww|?vI5ASrP%WR>Z{j^S*7^alfExr=c9xLKxd6)-m-v>FMTThU+ zXJg0mwMFlaxXx6^xGc`fGy<#~!?20QVdOq>zwU_7J&kJ;K38vm=3T=bwL?<{7&^Gm znIY1l+w--)ij(0N4(PIkD|>-TkoTPJ@ktc+MoBU-<8x@lP)=bvB_hwPOF1$%ZY-k25`>Rg z^nO;-Y=v#yToqfNv;|O;ai;*^ex3>IFu(>t-gEzpu7vQKJx!W7p*5R+0gU+Yhsm$M zG+e#>7q3c6g4ioW3B^5EOov9S@%1`_@}5_CdYPkjXHo}Rt<1qO=KA@1$O1H)NVi+s zX@l%NE%i7PpqnRojSu71Lr`Q+sK&6^p0PM((?V?v)tnvU2j*6VjhJI_c2LbLPziFG z>%V>dh2QLF%yXG&HY^;Xw!(`HgZ4KOI-@=e=cRY_x~nD2wZVJs;KWVYl?{KVrkX^k zE5uQ?n-F;hLcPfmU)iVga=*dq5O7EEXNzj6&n^hXU%NA1v*IL-uepqh=@c>RJVzT9X}+?1fo7S_g4J!U1A zAip_#>+e5;tvOrjRE7>2%#&Q_4-oCnI9}7s71RPwJEJ`URALG8yb!Ee|YXmSke0zVsysKWF~SA94M|+!xXoWqPe9S;|~Oo$cjJz_k&O zG4bm^&(-xKKE2G*I!WC{f@Lm2v1#-&*Z9C8nMMcAMpnwe?P$!sh%&8vH&5~!pNXBb zdc_rd2&+fNqLNJutwf}nrel0Ts%y`d%6KXaT#DNl6zft3Ty*O^X71+;;8|?ryuF%x zg~K6mRCr@rGhnilYqg&EBp@tg7&i&ob_OHXTCA~eWp|!KoVz|=C!rN$W;5l8KQ;S` zW`@q8b>V6Mw0l~x)$d>y&e|9~D~d&EzCz~(D2p$~;nNu;;$dmL0LI`VjtfJ&in zo-^z!-7eF5S5g+u&)S^kjN{+eFs&FVQ1vNTB6GQ)yJ|z(2ETa{9T>;+cLKCEeZd|k_rYHdd7SAd)|Hdp6~hh zE+6{P_g{Y3@A{pGBJH(^Is1sI{;>aZ_Z13}uC->I>*}j_^`B^3^gBr(y#K)x8Lf;R zZDMw9+a#fj5)3`W*uS+FT?J!f^lawT*p}ktHCj@3aYDk@2<9G^W|`6v-&V7Dj5@H- zw||hZ&N4fa$6zXR6Ay{j$v8}DiL($avk$)+zkO$&*26|rws_dNayVexV`ZkadW`z>HHOxd1U z9!%#>l@-FNS8A`X-(DWc81mF)9I4n5;}e~>E5pi_d%N9#g0YW8_ypnV&%n@JeIRxc z&9M6`0~Nn9^y&(E{pQ-oWW?b6RcAi2Gjhk0w5^|BjacPoG-B+(Rf~6fG;aX|SyKz^ zH8*Rqv$e;*|F-NNtWGluwLoJsGoXg8ubY@hZ~SI2&JqM~G^c(2t>lj0==0d*FveHF zh*x3>g^p}9PMb-n7Ap7Vp0)%ojBDP52%@=a)os3c{!Ta@x*3vvF|3nZW3bT_6_vWX z@=3Vn&Y+Bo5XgxSv%Q7|V%%r1oxjGmT5T&L++g;}`P%~O+*jpQ8 zuc#4cQlQc23yw0}Uwp*gXU>tD&l-+w_j&ydpQuRqa0zkig|I%XUpcJ`wsnz1XWNz8 zfSN{~x?0)~TKXYOxX4x_a+&O*>pr-4%=B-pb8D+4c5FI8VQa+EC&2E*IWnAHwy$He zla~R}ANGG;&283Z?DM~R%-M=QdH<2%nZfj5SHs!L=y{LfVU8pEZ2i-FW-_B+W9*zJ zi#?C=ee@h>N023U*Bm_(%XZiG=UgKr_I|aNrOb}x5!A57@hFfYb{G0<)`%GAtn+NN zq@F4uvt#plTFkP{F03QtX-v5pj5fRGZ7yH(5yQ%MwQQaaAg*7x76?Fn?MLY|srl!_gB zeWKHr+ViZMt0;Wf#1$M?HQMdC>3T02>-9s6(96%Tp`q<}Hkv~`V(cs?yLn?0-N9QXaVW#VteH!l3?ESo*%ef{m^GkeMSU{ZNZgGpm_$^%iAD`9o1@|j^S(mu(z4z8r% ze}lmB?I%?dj+~YZj{4S6a+oYVRQrvp=^Wn-6Kq{GhTqgrCZ=th$7>$2=LP%>579U#^?OzAdNob~I&4W{b5*S#yQ`ReaQ$x~49*fM zu#HPdwiA5-(c5{=z}kY9U>q{c-wn_|<#olYZhS>D1p9alQ_otG1zfM=tNMTN4?c7G z&;Qf^?DGHq(|>w-{dML7D}K2=zf8bCg^D>V$U5*3WbGMc0@*CbKJv1spUw*M8mCUH;^s`oYWl-~YbLyWjnu2vPl5!w|!4g~!%X zVwTZtcl|ww)neB9SZ>yW|B%Ms=*Z?;9HWJO_RmlY{&Fh^g{{Pnv*s}4OfJfH_89hc z)+kDGeX`GdD{RI&+`M+SkwYa~Br=|PgwFLJ_yU)(EJ9y)EpNOh&a%hFbtG!h@r}nE z>kWGA8tZ%JsFsa6+Q(z4*V%MAsege}sJTNxv8;dPTh3omT+6uEUx<77C`RETVz1Qs z+x+dL)W25Y85Tm=F#4@x={>bT;G8&v4kMWe#vWrV4^v86E&*WbH(WFMN@ue zE5(*yoS|Wbd%v1wN@MW8gSW2ZK{=2Tk!So!x&5H6U?l_;eP=`1IF~}OvyVq;Qp~Gi zU;-a`62t~nxt0*7>aXbmsI!@v9Q?QOilTyK{mYLW_@iV?Xg5jTGppy^K4Zi_@cJk3 z@wR1O;{$hBd?q+NGOMXwt;=ki#6AvX9fcCS&>RPLvvlzivkqnMa(n$F7Jb;}eJ(WF zALF+soMWCR4wJtB_9Sv}{P0kK5=(&kn%s#6!npMs?e#W3qe+ICW0Jjn z)(ki!zB~+r{&4<>?KH;q%|r}4dyKT!S*FSXTyg&;f{l9-(t7?U!!?lu&$=9bY$Z$7l|~Ud6Y6O z*Z05U%=E?vO`1F^_E{sXvzcA2pPD+Irf}8r=G8guY@J3kub6cf_|SPD#{G&0wtL@y zC%a{jLAB08$DYwyxaHNXz$K&u=BkBrPW3XRPZcZXJ5#%y)W5*y0=jM-kdwPY6dCbs z*Uw+rXYaLmgssAkGf_FwcbQDGv{{u&1D;37hA}#BaH=o*5(b{)(5Kk7u&oZUe2;ch zuJHqmUa6aFiHHgG^&mAcQhV+(;;b{r=6F56EoSK5*xBaXvPXN5BUB~|%;3J~Ke)DP zSShC1nuomq@O58F4f>%QQJ(Ajj~n$Li70w!GwYU#l`(9g?Tfa|%Q`s0F4yZH=kO_T z2{fpM=BT&5ofDJ8{ojAUp)S^Iad!PYskW`7XPswA_!5b!b)jx8z84qHJ~}a*NkFqi z#3UCruibz&P*OqT2GZ7uGW)hsijZ8(cJByFJ=6fPEVnP~A~_Psw07%y`H?O^6s4>3 z>f}Omvi{&(fK@lMhjsSh#oie6EE&`x3glV+?dSbhluBo1cVcB$e0xTWt|U-iFHCxI z_Z%kGzlcguI(BKRuh`8-nqzEfn&~yFz zwy7d7|?EbzM=^?Q?>cGB4K6_Eft(lWRr2|Lwo2O9@6qR9^`Hgj9d$OA#(Y)Oi2OCP(sDs zHQxWw$N3}pm_85;&bScoyZ^~l@hQw4D6=UZENlJC0DUH`E4-*}N{7CKlWpSM*1lXR$J^BQ~oP%ikp z_~^rW07T$M|dg`~Jsm)itVqIW;aASBF0=l*f*1X|bRf@j+Hv(-;H#ZT;=r zl00i#!b+I?e9Y{)ex+~HjKA&~SJzjqB34)mY<*Zd!t@H1nF?Jzv}1z!Oo5`$UqH;7 zv$Drr4kL#-HzG1HPt5XhG$%lA^-n{tzfXg4Gmf!yWHi=>o^gUaCW>Y@6{O?u_21*P zed7~Z#FU7xU%4Wi$S17}^gMn*8YrotabIeyqqZg_&$oQ7mK!GbeE;F~2OFL1L^**P zXNb4*{$sfV6ln-GMbs%XiFL(m2EN}$vhcie{T)~<$jWY9nJXV_HoVnJ0Fym_*-Ho{ zrg43SezOA8`?1?*9s9#`qR{sEp``(Tnv>IjZu-Pe?fS~e^^+;N#N&)`rRYl*a6|9= zPf$)qXlhzQMs8DBqTVz{#Pj+W>H+HyLPs%wq~WYMGNZTy5py2DyyOT}5_HAQD7t{l zIP{@C^ef^lz7ZI)ewKjsW&LfLk^+>WghN~_y#Il`_Wpxbw59YdTkXdL(stN?1Nts50;`9Q^rJJxW0=nBZzmuR@mYDEAA9GOQ=}=>++01brhfjF zJ}SM)3 zuPi;iM|`;Uy}au$#)TG>JNG|xsiMt2SjY%eTjO;6HYSjZ0xzk(KHjfcsm-hG7TSKy zVbwHp#&;C2-}k@LE2X^l=~$#Ch;A4PvLepohcl3){yZRe??2huCekN7Vx3x@lt*!_ z@%zls=)A+B&%X&gim1@^^E>3-qWR*6Nv_}Z51)?ic@;N>@hhGd#w8Km#$k>ZRu>7B z;~JXYQ)b|pqY%jv2XifbfUa*wTWTpL9*R)0pD&bIn_LsK!QY3jT2&WNv* zd>&tpt4na9-_>TAbqNf52}~NoxN`dV+q>vn2$G)96%L!@j#IJaNHANUvX0tPU`9V; zIDMkYBc`+)7US#(*LE@Wz9u6M@fW}J#mlGu&ZjPa@xT6ymtXzWU%kBkMSe*!*i&3H zjv0Rfpig-GDkm0SKjy68^+abhao~kR`_G?3Km;s?xTjB^UcTzLebwc|-~VC!0#QE@2aI*fdXW{rgH&Kl^$mk3MrS(*ba%HV`k9Vf$Lk7Cxfu(MGy zzkU9Iu6US8$uheyZGns16g6{FIDEy26?dy zkDAsz^wIl4p{omh=16%n0R%-EA6Zuy!)(Tj0Zj?_mc6G8v< ziwBc3h6nm>KS~d_kik)H)y&tw`5-2gp`MJjlFnvRdmiKLjo?ynBC(FS?)ViHp&{~+ zwTVU3la_*C49GFkJde+;{U(ZD&SOfUuQJmVt+ovc{*QIc`;{XGzh{F-ETK=vWBg$* z2Cr$@lrQFbc1gX>G0xu6I2si?{^~@2k?&VcEpb33j%)VF)L=_7vPU%J&STrnsL-(f z+%W!xWZW?qut7fJO#`W!fVTO~n{e z()VEEaL`^*9Ivp#`j`fsR-Wti8g-TAfbOpN;v^2|6HMWj`EX$K{mR=E=Gg}>`gBf} z&&e(x?ejQ_0y##J5gmGx~{Fi@OXRL5dAxKsoX1m(^aen@Qwu3Izpu4O7 zLfMZoA^U8y^4gKK8y1`lr$aoK3kL7fS$r^#+ycR%_>)^NUC#U2A&`c-4s^0E5bUYR z0Sh_Z|J`Yy2g8Beuozv1NL{+_A_1&b=Q?kkkJ*1F;o}r>y+<)S)WREBYw%M?pM*5U zKpM7L|IpO>P3irJ$lb5*p-&2MW%6R{=rg%Jm+xmMgD3dr@ zt~HM9H%|q_@Pc}NpMS(Q`Td9Z5qMssT+BRYJ!>fz%;e(wrR4l&%WL#nnL4ym`-q-?R^ok#bD=WtYl=Xz>cvK1w&-oDyrH+QGgvi@&&A#K{v$&) z4WwoYNqx{&Wb4{LuqN{Q^#w()6U2Eah#L41M{k=(bQ-xK5d5mFNVGv8b#m++JDPuG z&T=VgUqqa#3^~N&iPs#)hiZuh1^-7*^9^$+5;+hP7sdnEFQSN1fh#`rY`K2U$|OT~ zE}OD(y@o^BLh_iO2pR)wMPnG zPle2VFAN}%qyDh~)oI${>q|=NVN)>8M|{RIu;NdrK6`?~banq>3ySbpuQX$RHNJ_t z>snAHMPX^Z{(1hu&!o`89`U|_FC{+pv;W)WFa0-v>GJ>llmA;^TEq`Q&GpldNunuv z&gmaLYdIpt-)d%spWvnpy#C3FH~0aKe$E13uUuZmps^8r7(p$q8fhi}Ay%Zx8OJG5)@WkW}PK*QY+1@sGB!$P4xZKY9B22nY|@PPYY zUlm}kyz^BllbrFHWX)Y<6^#k7O`g!4^`a(v`LGARd6pC-xs+BYl8P}P&iYpn+K`ZP zTAT_&q3RD|z5c@I&@WV&XpZ{_Mx|W#QGe;ma2_8-^+CvhhS)vhWoejw#b*M3n2nr< zZHc%}oOka(>^f6{i^!pnOj?$;CcxcU|25{EoyI3O8{#O;>_ZXuW&MFq0|o$V|EV30 z5@UWfKF>{2Bo?amSqfEyDeo4_myx33i2~Q(dM1L$2#z=7(~dvD>i`P8Y@A#zA*@U8Wi({mcu$$LE-1P6#fk+HRLxBYNbM5?J`AanN%WhqN zCYyoyK85Xg|A#U$H{-kh!ip-pR=7;8*ZTZJ=p*-EQ4quNgKF$WE3U6!B(c2NkF)78 zL5z6ZhiY4V>U-IV!upWo&HV>5cRsF=g@e~PVv<04P2)-Yj5_l;u0L}k7PP_GJr%u7 z#vrbMj)0Li<~CGQlpi1%!+pN~rZQM+{c=gapu$A!-r@_x+TeBhT6_`!C#c-(w5PHn zT#3)K33_%8G9S*kFqv*8fKY%D@woRNe8%JX(E=)s2n^y|^$$?jABn8jAM;;MUaz;7 z#2AE)_~8a6ZO{`dT^XS0*87k63!a#VT>ro#52faK8l}0K!xBHdm)FwBT8FHbp)~gx zIp-;gh){Svz1)Lz8lPNjAQJ8DmpF_;Tmj`49c|D@GD8nBIcU5ZpMFIlhv}Al1VB8E zf5-a|`Q^Pv>38LVGeQImJz}H_WJ#GI_Z{zZVv8~_4#4pLR?th*%XdEL9k5 zj~3UN1Xr%5%TF-54ooXo=`_A8pZ*67DF%1O60vtlr-7*8GGkyyf8d^qjz6!zK)?p& zy6kasB{o+}BR0ihFmD{AFzQmulRcnvtjnwn7BDJd&&d^`I4-qFh>@eJ@`(7m6Ilk* z&dXw;R3LP+2dJQ=V7sa4`mafcqu1{XzwUs=JN9FXQ_~XY#MTN%=OT{hpL)KwCG*uk?$`PP37u!j9aGySX{d1XXb9`varG(Ut`QjXU^$BTY zOC$*lf}g}czW(&y#bq^eT;h+(3@7zeq<-r#FBpOGbj%<~DOlN=Ko{C%X$7(Cg@%-J4%{*o}lx3O#s6XPPbm}(7TrV@IP18q z{*mX8ys?>axcdC1$~+lEc+7-~WV8#Hf;=|N6Ewd?k)~M_i=^!U>xa$I#EeK7V0n z|FHUpM`XUp;}%JMKFoxOh0W)`CDkkO*L%YB2zhE0Ig} z@m%eMo7So5`g2Q|O+=pYOFe~zPA-_YCTl_p*@p>et`V`0n=hNsU#2A;GgC|Q+K1=YfI0>mxatcx2z>Ng`tobbM|aY{H(v1@cgd7 zjIzha+<)6|7A`V;Wc}?N%L9pm#Edr94n>oQ*|Pqe(MG*Th+7Mb?Ht+m_m`nH3Nwi@ zFP{L}sF^*Sy62rxb2$Ga#>8d0JHGbcuU8(;Gsm|OpNp9Fuk=c&WwvI8b>_pY*zP=kBYin&9?EGdxdfrn4TZ#*vuE@9RYjin z^N%8o`1|#T>!bb_?8ws|9!0ZsVVQQx?8wuH3ezz!&|BF5jpIdJe?CGp^=0{4E2WZ# zl1s41P)Lk9`?XZi$dvb$Fpu+TzI4oM$mcg$&zx#C;Lp>B{h^jK`)6>H=jwWta_J&k z#T-6|=igaGIkYb+9~8d=Hi!K;)5x|gxuEtv#NV&KaeWLHxHZSaxdb~6g+z3?&*)YR zi1VNOWDk#K>M<P0`lm{*9W8k@qQ|hPPf%Td zB^W(eh66=5=RXs-$AAa8`al>*bzt$SdMQgtuW=9)3^L*sgtt?U>(I{jI*kt} z6AydkRlcB`TcubT(BON-_cztWWeUgfxa-GUVt4RJ<2sbuln>hf{UX}9qg1JUuJK18 zChk3VC@%){{t?RI1AIYCNT;#CHc6Fn_>&Vq{nI~v`JaCL$1nfzAM#6zFX$&9wDAP; zcQlEe0}lg>#2a~j#|a3(zlrzw{JqHdW*#p#{)nB(*WUG>%OAr}O?>Z%K6H8Sdw;k7 z9g12b^Kf&5o}ccABj=btW!7!pcqYWZFA5O%>at{BdQVz zNIi9IjzxLH#oWpqbEs0;On)}S?=N(E&z3lWyb{kwj0)laYkbbw4iC=vSbun;H?*CB7jLZhT8R~b1QS417)^x{O%i{ZnXwZ*c>WmTaq*IfF69HnD<}W z*=CNR86>GVaM91o9M_>a3I;*({oDc$+Z@;9&`TV1K^7$^Xn7$Ov;^7&Sl6Gb`>g+b zKO7*(bvUkVkl1rseuH9CCCl(brkPrRrcP${zhyReCxdalqq+n31~NtP z@G*Bt*dKh3L#6|`N2ZqZTWtp&<@9iW9!o!v5Y3-qFZz$2-;opn5rGzWIH4cjEE}eG z;+d1~6!>jifLRbV+X5;zg+FjZVg&Hd^g&tG5GzB_CMBfx?xOn=Ce(cnzWcp>2El+V zMlBjLpgCCfdEM=g{g|m(9V{?ICo29Ie@Zj1Ix)Y5FlbFh(DmQr`!8coHC<2E>tu)t zY%GLoAC1{IKx4o{7d08kSSOnpw{5Q^E25tSG{HttN6*SZuWpR5LaA6!`u1PQw85;#m=KFk zP73Fz$O2J?EG<Yf}U?+aL&?wmCqb8dfwHa9jv@HW{XqfQ!wLYji+R`Rr zTi!M|HL`!(wPuM8tOBoVhRiohj6Zddc^GYp^qtRquB|O+n11wI{>1I=UDjfT0jM~@ zQ@$zFQeE3CE%m>~Io#_J9X#=`Y$sKZ6QmZx{ZHV_Zpgl0xx~u}?;g(lPE6T;SU$Ep z;F6Zg2zXb#x*Ms$keL0b1xp{}l4z1)|1C%$1Q$mhGWhAQa=RNy&hmcc%q4b8#5rm= zu|*WkCPf(#wQ0KWG&6q5z;28VyB&7jFh*bQt=Vs2_f7QCc{)5rNO=F@_1g#Z64@g3 zpuG(hf?N+;b%2zu3|R@T;t3BEkVv9C$oiG^AR{r>6C&&mJ*yzk>*weRFyb^Ec!HDR zcBioB=YP@EAk6(j7wW3{YFWz`1ftw*k^Iq8n{wa<-?`QvfBoTF32Uzn%H8dE6+7O; z+g*mz!Crx}JmX8Dr3WZ`vy-CSJ%bn5(!sf_6orzI`7=bd~2qw!L~R(3zPGV%j!Bb#khkEc zhkT77_K}|mZtP`{dAC-81e2t4%Q@D&1if|L`l+~s57M8WtUOY>iggo9n)v#A#(j<{ zx3Md5u#}&0aVIze?_PAtz-XNYtKAnA@NQy*?y&4%EvLc*TV`i4)QYB6r?I1`#N=<^ znOXNE<|1ZE=H}vE3Wz;7S&p02UCY^BAm(Q@A6C1>R^Qspg_9M35$CJ}`-)dECJkFR zap38j{{Vp9$k8ITc^D10@Nrt5L^=BS$-%EG-UgIz{-QZH2AMw4D*K`RuF?J6w#-4g zReyKqfyXc)6uBpTyK=M3yw<_7dV`BgJeJjl`177TQksf*r(EN+@!;C;ouAPYVi8CO z&>SbgU!T=@Z@x64D1?nvecs64up!VK12&mjdpOnmF~%Ld@Agg+odSSnP<6XsuegCo zF^}<0#t`GT!dWl?8GTpwH2Lmk+FgAZ@HgslH^aUaCh?(!lq`5p&#LR+RZv6lH6DOC6~e6)QAebAN3m;wkxfbJYUag}al?Vl|y!%yHu-4Az2`9}p($ZN^>s!S))n4ME;Gm}4=BpyA-;}0fQ{g;V=!Q1tC zKfVu0#g3x4vS?IT{BoJ!8RwKlT7bMol-;R_xQohZ%