From 0e2ba8bd1a3a04662dd4c92adab8908428742b14 Mon Sep 17 00:00:00 2001 From: "jadenfk@outlook.com" Date: Fri, 31 Jan 2025 12:16:35 -0500 Subject: [PATCH 1/2] Merge 0.4 --- conftest.py | 20 +- pyproject.toml | 28 +- pytest.ini | 3 + src/nnsight/__init__.py | 549 +---- src/nnsight/config.yaml | 5 + src/nnsight/contexts/Conditional.py | 111 -- src/nnsight/contexts/GraphBasedContext.py | 465 ----- src/nnsight/contexts/Tracer.py | 192 -- src/nnsight/contexts/__init__.py | 91 - .../contexts/backends/BridgeBackend.py | 38 - src/nnsight/contexts/backends/EditBackend.py | 27 - src/nnsight/contexts/backends/LocalBackend.py | 27 - src/nnsight/contexts/backends/NoopBackend.py | 10 - .../contexts/backends/RemoteBackend.py | 319 --- src/nnsight/contexts/backends/__init__.py | 21 - src/nnsight/contexts/session/Iterator.py | 110 - src/nnsight/contexts/session/Session.py | 154 -- src/nnsight/intervention.py | 617 ------ src/nnsight/intervention/__init__.py | 7 + src/nnsight/intervention/backends/__init__.py | 3 + src/nnsight/intervention/backends/editing.py | 18 + src/nnsight/intervention/backends/noop.py | 9 + src/nnsight/intervention/backends/remote.py | 392 ++++ src/nnsight/intervention/base.py | 503 +++++ src/nnsight/intervention/contexts/__init__.py | 7 + src/nnsight/intervention/contexts/editing.py | 28 + src/nnsight/intervention/contexts/globals.py | 69 + .../intervention/contexts/interleaving.py | 163 ++ .../contexts/invoker.py} | 79 +- src/nnsight/intervention/contexts/local.py | 115 ++ src/nnsight/intervention/contexts/session.py | 37 + src/nnsight/intervention/contexts/tracer.py | 61 + src/nnsight/{ => intervention}/envoy.py | 595 +++--- src/nnsight/intervention/graph/__init__.py | 3 + src/nnsight/intervention/graph/graph.py | 433 ++++ src/nnsight/intervention/graph/node.py | 159 ++ src/nnsight/intervention/graph/proxy.py | 142 ++ src/nnsight/intervention/interleaver.py | 128 ++ .../intervention/protocols/__init__.py | 6 + .../intervention/protocols/entrypoint.py | 18 + src/nnsight/intervention/protocols/grad.py | 73 + .../intervention/protocols/intervention.py | 215 ++ src/nnsight/intervention/protocols/module.py | 106 + src/nnsight/intervention/protocols/noop.py | 14 + src/nnsight/intervention/protocols/swap.py | 36 + .../session => modeling}/__init__.py | 0 .../diffusion.py} | 61 +- src/nnsight/modeling/language.py | 336 ++++ src/nnsight/modeling/mixins/__init__.py | 3 + src/nnsight/modeling/mixins/loadable.py | 24 + src/nnsight/modeling/mixins/meta.py | 49 + src/nnsight/modeling/mixins/remoteable.py | 86 + src/nnsight/modeling/vllm/__init__.py | 1 + .../modeling/vllm/executors/GPUExecutor.py | 9 + .../modeling/vllm/executors/RayGPUExecutor.py | 6 + .../vllm/executors}/__init__.py | 0 .../vllm/model_runners/GPUModelRunner.py | 435 ++++ .../vllm/model_runners}/__init__.py | 0 src/nnsight/modeling/vllm/sampling.py | 170 ++ src/nnsight/modeling/vllm/vllm.py | 274 +++ .../modeling/vllm/workers/GPUWorker.py | 10 + .../vllm/workers/__init__.py} | 0 src/nnsight/models/Mamba.py | 344 ---- src/nnsight/models/mixins/Generation.py | 46 - src/nnsight/models/mixins/Remoteable.py | 28 - src/nnsight/models/mixins/__init__.py | 2 - src/nnsight/module.py | 24 - src/nnsight/nnsight.log | 7 + src/nnsight/patching.py | 71 - src/nnsight/schema/Response.py | 56 - src/nnsight/schema/{Config.py => config.py} | 13 +- src/nnsight/schema/format/functions.py | 46 +- src/nnsight/schema/format/types.py | 457 +++-- src/nnsight/schema/request.py | 161 ++ src/nnsight/schema/response.py | 80 + src/nnsight/schema/result.py | 26 + src/nnsight/test.py | 40 + .../logit_lens-checkpoint.ipynb | 1144 ----------- src/nnsight/toolbox/das.ipynb | 1768 ----------------- .../transformations-checkpoint.py | 18 - .../toolbox/interventions/interventions.py | 96 - .../interventions/interventions_utils.py | 6 - .../toolbox/interventions/transformations.py | 18 - src/nnsight/toolbox/lens/__init__.txt | 0 src/nnsight/toolbox/lens/lens.py | 63 - src/nnsight/toolbox/lens/utils.py | 34 - src/nnsight/toolbox/logit_lens.ipynb | 1144 ----------- src/nnsight/toolbox/optim/__init__.py | 15 - src/nnsight/toolbox/optim/lora.py | 23 - src/nnsight/toolbox/optim/softprompt.py | 21 - src/nnsight/tracing/Bridge.py | 99 - src/nnsight/tracing/Graph.py | 278 --- src/nnsight/tracing/Node.py | 570 ------ src/nnsight/tracing/Proxy.py | 319 --- src/nnsight/tracing/__init__.py | 13 +- src/nnsight/tracing/backends/__init__.py | 1 + src/nnsight/tracing/backends/base.py | 69 + src/nnsight/tracing/contexts/__init__.py | 5 + src/nnsight/tracing/contexts/base.py | 96 + src/nnsight/tracing/contexts/conditional.py | 69 + src/nnsight/tracing/contexts/globals.py | 188 ++ src/nnsight/tracing/contexts/iterator.py | 76 + src/nnsight/tracing/contexts/tracer.py | 51 + src/nnsight/tracing/graph/__init__.py | 4 + src/nnsight/tracing/graph/graph.py | 342 ++++ src/nnsight/tracing/graph/node.py | 423 ++++ src/nnsight/tracing/graph/proxy.py | 338 ++++ src/nnsight/tracing/graph/viz.py | 222 +++ src/nnsight/tracing/hacks/__init__.py | 23 + src/nnsight/tracing/hacks/comprehension.py | 67 + src/nnsight/tracing/hacks/conditional.py | 74 + src/nnsight/tracing/hacks/iterator.py | 106 + src/nnsight/tracing/hacks/util.py | 101 + src/nnsight/tracing/protocols.py | 965 --------- src/nnsight/tracing/protocols/__init__.py | 4 + src/nnsight/tracing/protocols/base.py | 45 + src/nnsight/tracing/protocols/lock.py | 31 + src/nnsight/tracing/protocols/stop.py | 33 + src/nnsight/tracing/protocols/variable.py | 34 + src/nnsight/tracing/util.py | 27 - src/nnsight/util.py | 143 +- tests/test_lm.py | 103 +- tests/test_tiny.py | 133 +- tests/test_vllm.py | 264 +++ 124 files changed, 8192 insertions(+), 10545 deletions(-) mode change 100644 => 100755 pyproject.toml create mode 100644 pytest.ini mode change 100644 => 100755 src/nnsight/__init__.py delete mode 100644 src/nnsight/contexts/Conditional.py delete mode 100755 src/nnsight/contexts/GraphBasedContext.py delete mode 100755 src/nnsight/contexts/Tracer.py delete mode 100644 src/nnsight/contexts/__init__.py delete mode 100755 src/nnsight/contexts/backends/BridgeBackend.py delete mode 100644 src/nnsight/contexts/backends/EditBackend.py delete mode 100644 src/nnsight/contexts/backends/LocalBackend.py delete mode 100755 src/nnsight/contexts/backends/NoopBackend.py delete mode 100644 src/nnsight/contexts/backends/RemoteBackend.py delete mode 100644 src/nnsight/contexts/backends/__init__.py delete mode 100644 src/nnsight/contexts/session/Iterator.py delete mode 100644 src/nnsight/contexts/session/Session.py delete mode 100755 src/nnsight/intervention.py create mode 100755 src/nnsight/intervention/__init__.py create mode 100755 src/nnsight/intervention/backends/__init__.py create mode 100755 src/nnsight/intervention/backends/editing.py create mode 100755 src/nnsight/intervention/backends/noop.py create mode 100755 src/nnsight/intervention/backends/remote.py create mode 100755 src/nnsight/intervention/base.py create mode 100755 src/nnsight/intervention/contexts/__init__.py create mode 100755 src/nnsight/intervention/contexts/editing.py create mode 100755 src/nnsight/intervention/contexts/globals.py create mode 100755 src/nnsight/intervention/contexts/interleaving.py rename src/nnsight/{contexts/Invoker.py => intervention/contexts/invoker.py} (56%) create mode 100755 src/nnsight/intervention/contexts/local.py create mode 100755 src/nnsight/intervention/contexts/session.py create mode 100755 src/nnsight/intervention/contexts/tracer.py rename src/nnsight/{ => intervention}/envoy.py (59%) create mode 100755 src/nnsight/intervention/graph/__init__.py create mode 100755 src/nnsight/intervention/graph/graph.py create mode 100755 src/nnsight/intervention/graph/node.py create mode 100755 src/nnsight/intervention/graph/proxy.py create mode 100755 src/nnsight/intervention/interleaver.py create mode 100755 src/nnsight/intervention/protocols/__init__.py create mode 100755 src/nnsight/intervention/protocols/entrypoint.py create mode 100755 src/nnsight/intervention/protocols/grad.py create mode 100755 src/nnsight/intervention/protocols/intervention.py create mode 100755 src/nnsight/intervention/protocols/module.py create mode 100755 src/nnsight/intervention/protocols/noop.py create mode 100755 src/nnsight/intervention/protocols/swap.py rename src/nnsight/{contexts/session => modeling}/__init__.py (100%) mode change 100644 => 100755 rename src/nnsight/{models/DiffusionModel.py => modeling/diffusion.py} (68%) create mode 100755 src/nnsight/modeling/language.py create mode 100644 src/nnsight/modeling/mixins/__init__.py create mode 100755 src/nnsight/modeling/mixins/loadable.py create mode 100755 src/nnsight/modeling/mixins/meta.py create mode 100755 src/nnsight/modeling/mixins/remoteable.py create mode 100755 src/nnsight/modeling/vllm/__init__.py create mode 100755 src/nnsight/modeling/vllm/executors/GPUExecutor.py create mode 100644 src/nnsight/modeling/vllm/executors/RayGPUExecutor.py rename src/nnsight/{toolbox => modeling/vllm/executors}/__init__.py (100%) mode change 100644 => 100755 create mode 100755 src/nnsight/modeling/vllm/model_runners/GPUModelRunner.py rename src/nnsight/{toolbox/interventions => modeling/vllm/model_runners}/__init__.py (100%) mode change 100644 => 100755 create mode 100755 src/nnsight/modeling/vllm/sampling.py create mode 100755 src/nnsight/modeling/vllm/vllm.py create mode 100755 src/nnsight/modeling/vllm/workers/GPUWorker.py rename src/nnsight/{toolbox/.ipynb_checkpoints/__init__-checkpoint.py => modeling/vllm/workers/__init__.py} (100%) mode change 100644 => 100755 delete mode 100755 src/nnsight/models/Mamba.py delete mode 100644 src/nnsight/models/mixins/Generation.py delete mode 100755 src/nnsight/models/mixins/Remoteable.py delete mode 100644 src/nnsight/models/mixins/__init__.py delete mode 100755 src/nnsight/module.py delete mode 100644 src/nnsight/patching.py delete mode 100644 src/nnsight/schema/Response.py rename src/nnsight/schema/{Config.py => config.py} (74%) mode change 100644 => 100755 src/nnsight/schema/format/types.py create mode 100644 src/nnsight/schema/request.py create mode 100644 src/nnsight/schema/response.py create mode 100755 src/nnsight/schema/result.py create mode 100755 src/nnsight/test.py delete mode 100644 src/nnsight/toolbox/.ipynb_checkpoints/logit_lens-checkpoint.ipynb delete mode 100644 src/nnsight/toolbox/das.ipynb delete mode 100644 src/nnsight/toolbox/interventions/.ipynb_checkpoints/transformations-checkpoint.py delete mode 100644 src/nnsight/toolbox/interventions/interventions.py delete mode 100644 src/nnsight/toolbox/interventions/interventions_utils.py delete mode 100644 src/nnsight/toolbox/interventions/transformations.py delete mode 100644 src/nnsight/toolbox/lens/__init__.txt delete mode 100644 src/nnsight/toolbox/lens/lens.py delete mode 100644 src/nnsight/toolbox/lens/utils.py delete mode 100644 src/nnsight/toolbox/logit_lens.ipynb delete mode 100644 src/nnsight/toolbox/optim/__init__.py delete mode 100644 src/nnsight/toolbox/optim/lora.py delete mode 100644 src/nnsight/toolbox/optim/softprompt.py delete mode 100755 src/nnsight/tracing/Bridge.py delete mode 100755 src/nnsight/tracing/Graph.py delete mode 100755 src/nnsight/tracing/Node.py delete mode 100755 src/nnsight/tracing/Proxy.py mode change 100644 => 100755 src/nnsight/tracing/__init__.py create mode 100755 src/nnsight/tracing/backends/__init__.py create mode 100755 src/nnsight/tracing/backends/base.py create mode 100755 src/nnsight/tracing/contexts/__init__.py create mode 100755 src/nnsight/tracing/contexts/base.py create mode 100755 src/nnsight/tracing/contexts/conditional.py create mode 100755 src/nnsight/tracing/contexts/globals.py create mode 100755 src/nnsight/tracing/contexts/iterator.py create mode 100755 src/nnsight/tracing/contexts/tracer.py create mode 100755 src/nnsight/tracing/graph/__init__.py create mode 100755 src/nnsight/tracing/graph/graph.py create mode 100755 src/nnsight/tracing/graph/node.py create mode 100755 src/nnsight/tracing/graph/proxy.py create mode 100644 src/nnsight/tracing/graph/viz.py create mode 100755 src/nnsight/tracing/hacks/__init__.py create mode 100755 src/nnsight/tracing/hacks/comprehension.py create mode 100755 src/nnsight/tracing/hacks/conditional.py create mode 100755 src/nnsight/tracing/hacks/iterator.py create mode 100755 src/nnsight/tracing/hacks/util.py delete mode 100755 src/nnsight/tracing/protocols.py create mode 100755 src/nnsight/tracing/protocols/__init__.py create mode 100755 src/nnsight/tracing/protocols/base.py create mode 100755 src/nnsight/tracing/protocols/lock.py create mode 100755 src/nnsight/tracing/protocols/stop.py create mode 100755 src/nnsight/tracing/protocols/variable.py delete mode 100755 src/nnsight/tracing/util.py create mode 100644 tests/test_vllm.py diff --git a/conftest.py b/conftest.py index ad7500ab..9402c375 100755 --- a/conftest.py +++ b/conftest.py @@ -1,6 +1,15 @@ +import pytest +import toml + def pytest_addoption(parser): parser.addoption("--device", action="store", default="cuda:0") - + parser.addoption( + "--tp", + action="store", + type=int, + default="1", + help="An argument for specifying the number of gpus to be used by VLLM" + ) def pytest_generate_tests(metafunc): # This is called for every test. Only get/set command line arguments @@ -9,5 +18,14 @@ def pytest_generate_tests(metafunc): if "device" in metafunc.fixturenames and option_value is not None: metafunc.parametrize("device", [option_value], scope="module") +@pytest.fixture(scope="session") +def load_pyproject_toml(): + """Fixture to load and parse the pyproject.toml file.""" + try: + with open("pyproject.toml", "r") as f: + data = toml.load(f) + return data + except toml.TomlDecodeError as e: + pytest.fail(f"Failed to load pyproject.toml: {e}") collect_ignore = ["examples/test_server.py", "examples/test_server_llama.py"] diff --git a/pyproject.toml b/pyproject.toml old mode 100644 new mode 100755 index 6bd94e3d..39676dda --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,7 @@ [build-system] requires = ["setuptools>=45", "setuptools_scm[toml]>=6.2"] build-backend = "setuptools.build_meta" + [tool.setuptools_scm] [project] @@ -11,15 +12,18 @@ authors = [ ] description = "Package for interpreting and manipulating the internals of deep learning models." readme = "README.md" -requires-python = ">=3.10" +requires-python = ">=3.7" +license = { text = "MIT License" } + classifiers = [ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ] + dependencies = [ "transformers", - "protobuf", + "protobuf", "python-socketio[client]", "tokenizers>=0.13.0", "pydantic>=2.9.0", @@ -28,14 +32,28 @@ dependencies = [ "torchvision", "accelerate", "diffusers", - "einops" + "einops", + "msgspec", + "toml", + "ipython", ] + +[project.optional-dependencies] +test = [ + "pytest", + "toml" +] + [project.urls] "Homepage" = "https://github.com/ndif-team/nnsight" +"Website" = "https://nnsight.net/" +"Documentation" = "https://nnsight.net/documentation/" +"Changelog" = "https://github.com/ndif-team/nnsight/CHANGELOG.md" +"Releases" = "https://github.com/ndif-team/nnsight/releases" [tool.setuptools] include-package-data = true +license-files = ["LICENSE"] + [tool.setuptools.package-data] nnsight = ["config.yaml", "nnsight.log"] - - diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 00000000..ba9c593b --- /dev/null +++ b/pytest.ini @@ -0,0 +1,3 @@ +[pytest] +markers = + config: mark a test as part of configuration tests diff --git a/src/nnsight/__init__.py b/src/nnsight/__init__.py old mode 100644 new mode 100755 index 6a9105eb..e230d4e6 --- a/src/nnsight/__init__.py +++ b/src/nnsight/__init__.py @@ -1,40 +1,45 @@ -# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # -# # -# :::: ::: :::: ::: :::::::: ::::::::::: :::::::: ::: ::: ::::::::::: ::::::: :::::::: # -# :+:+: :+: :+:+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: # -# :+:+:+ +:+ :+:+:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ :+:+ +:+ # -# +#+ +:+ +#+ +#+ +:+ +#+ +#++:++#++ +#+ :#: +#++:++#++ +#+ +#+ + +:+ +#++: # -# +#+ +#+#+# +#+ +#+#+# +#+ +#+ +#+ +#+# +#+ +#+ +#+ +#+# +#+ +#+ # -# #+# #+#+# #+# #+#+# #+# #+# #+# #+# #+# #+# #+# #+# #+# #+# #+# #+# #+# # -# ### #### ### #### ######## ########### ######## ### ### ### ####### ### ######## # -# # -# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # +# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # +# _____ ___ _____ ___ ________ __ _______ __ __ ___________ ______ ___ ___ # +# (\" \|" \ (\" \|" \ /" )|" \ /" _ "| /" | | "\(" _ ") / " \ (: "||_ | # +# |.\\ \ ||.\\ \ |(: \___/ || | (: ( \___) (: (__) :))__/ \\__/ // ____ \ | (__) :| # +# |: \. \\ ||: \. \\ | \___ \ |: | \/ \ \/ \/ \\_ / / / ) :) \____ || # +# |. \ \. ||. \ \. | __/ \\ |. | // \ ___ // __ \\ |. | (: (____/ //_____ _\ '| # +# | \ \ || \ \ | /" \ :) /\ |\(: _( _|(: ( ) :) \: | \ /))_ ")/" \_|\ # +# \___|\____\) \___|\____\)(_______/ (__\_|_)\_______) \__| |__/ \__| \"_____/(_____((_______) # +# # +# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # import os from functools import wraps -from typing import Dict, Union -from importlib.metadata import version, PackageNotFoundError +from importlib.metadata import PackageNotFoundError, version +from typing import Any, Callable, Dict, Union try: __version__ = version("nnsight") except PackageNotFoundError: __version__ = "unknown version" +from IPython import get_ipython + +try: + __IPYTHON__ = get_ipython() is not None +except NameError: + __IPYTHON__ = False + import torch import yaml -from .patching import * -from .schema.Config import ConfigModel +from .schema.config import ConfigModel +from .util import Patch, Patcher PATH = os.path.dirname(os.path.abspath(__file__)) with open(os.path.join(PATH, "config.yaml"), "r") as file: CONFIG = ConfigModel(**yaml.safe_load(file)) + from .logger import logger, remote_logger -from .models.NNsightModel import NNsight -from .models.LanguageModel import LanguageModel -from .patching import Patch, Patcher -from .tracing.Proxy import proxy_wrapper +from .intervention import Envoy, NNsight +from .modeling.language import LanguageModel logger.disabled = not CONFIG.APP.LOGGING remote_logger.disabled = not CONFIG.APP.REMOTE_LOGGING @@ -42,25 +47,16 @@ # Below do default patching: DEFAULT_PATCHER = Patcher() -import math -from inspect import getmembers, isbuiltin, isfunction - -import einops - -for key, value in getmembers(einops.einops, isfunction): - DEFAULT_PATCHER.add(Patch(einops.einops, proxy_wrapper(value), key)) -for key, value in getmembers(math, isbuiltin): - DEFAULT_PATCHER.add(Patch(math, proxy_wrapper(value), key)) # Tensor creation operations from torch._subclasses.fake_tensor import FakeTensor -def _bool(self): +def fake_bool(self): return True -DEFAULT_PATCHER.add(Patch(FakeTensor, _bool, "__bool__")) +DEFAULT_PATCHER.add(Patch(FakeTensor, fake_bool, "__bool__")) def fake_tensor_new_wrapper(fn): @@ -83,475 +79,44 @@ def inner(cls, fake_mode, elem, device, constant=None): Patch(FakeTensor, fake_tensor_new_wrapper(FakeTensor.__new__), "__new__") ) +DEFAULT_PATCHER.__enter__() -def onehot_wrapper(fn): - @wraps(fn) - def onehot(input: torch.Tensor, num_classes=-1): - if input.device.type == "meta": - return torch.zeros((*input.shape, num_classes), device="meta") - - else: - return fn(input, num_classes=num_classes) +from .intervention.contexts import GlobalInterventionTracingContext - return onehot +apply = GlobalInterventionTracingContext.GLOBAL_TRACING_CONTEXT.apply +log = GlobalInterventionTracingContext.GLOBAL_TRACING_CONTEXT.log +local = GlobalInterventionTracingContext.GLOBAL_TRACING_CONTEXT.local +cond = GlobalInterventionTracingContext.GLOBAL_TRACING_CONTEXT.cond +iter = GlobalInterventionTracingContext.GLOBAL_TRACING_CONTEXT.iter +stop = GlobalInterventionTracingContext.GLOBAL_TRACING_CONTEXT.stop +def trace(fn): + """Helper decorator to add a function to the intervention graph via `.apply(...)`. + This is opposed to entering the function during tracing and tracing all inner operations. -DEFAULT_PATCHER.add( - Patch( - torch.nn.functional, - onehot_wrapper(torch.nn.functional.one_hot), - "one_hot", - ) -) + Args: + fn (Callable): Function to apply. + Returns: + Callable: Traceable function. + """ -def noop_wrapper(fn): @wraps(fn) - def noop(input: torch.Tensor, *args, **kwargs): - return input - - return noop - - -DEFAULT_PATCHER.add( - Patch(FakeTensor, noop_wrapper(FakeTensor.tolist), "tolist") -) - -import warnings -_str = str -try: - - - - from torch.amp.autocast_mode import autocast, is_autocast_available - - # Hacky patch to get around the fact this init method has no handling for 'meta' tensors. - def autoamp_init( - self, - device_type: str, - dtype=None, - enabled: bool = True, - cache_enabled: Optional[bool] = None, - ): - if not isinstance(device_type, _str): - raise ValueError( - f"Expected `device_type` of type `str`, got: `{type(device_type)}`" - ) - if dtype is None: - if device_type == "meta": - dtype = torch.get_autocast_dtype("cpu") - else: - dtype = torch.get_autocast_dtype(device_type) - if torch._jit_internal.is_scripting(): - self._enabled = enabled - self.device = device_type - self.fast_dtype = dtype - assert dtype is not None - return - self.device = device_type - if not is_autocast_available(self.device): - raise RuntimeError( - f"User specified an unsupported autocast device_type '{self.device}'" - ) - self.custom_backend_name = torch._C._get_privateuse1_backend_name() - self.fast_dtype = torch.get_autocast_dtype(self.device) - if self.device == self.custom_backend_name: - necessary_funcs = [ - "get_amp_supported_dtype", - ] - message = f"Tried to use AMP with the `{self.custom_backend_name}` backend, but the backend has not " - message += "registered a module or the module miss some necessary funcs. The backend should register " - message += "a module by `torch._register_device_module`, and the module must have these funcs: \n" - message += "`get_amp_supported_dtype() -> List[torch.dtype]`. \n" - - assert hasattr(torch, self.custom_backend_name), message - self.custom_device_mod = getattr(torch, self.custom_backend_name) - for func in necessary_funcs: - assert hasattr(self.custom_device_mod, func), ( - message + f"But the func `{func}` is missing. \n" - ) - - self._cache_enabled = torch.is_autocast_cache_enabled() - if ( - enabled - and torch.cuda.amp.common.amp_definitely_not_available() - and self.device == "cuda" - ): - warnings.warn( - "User provided device_type of 'cuda', but CUDA is not available. Disabling" - ) - enabled = False - if dtype is not None: - self.fast_dtype = dtype - if cache_enabled is not None: - self._cache_enabled = cache_enabled - - if self.device == "cpu": - supported_dtype = [torch.bfloat16, torch.float16] - if self.fast_dtype not in supported_dtype and enabled: - error_message = "In CPU autocast, but the target dtype is not supported. Disabling autocast.\n" - error_message += "CPU Autocast only supports dtype of " - error_message += ( - ", ".join(str(dtype) for dtype in supported_dtype) - + " currently." - ) - warnings.warn(error_message) - enabled = False - elif self.device == "xpu": - supported_dtype = [torch.bfloat16, torch.float16] - if self.fast_dtype not in supported_dtype: - error_message = "In XPU autocast, but the target dtype is not supported. Disabling autocast.\n" - error_message += "XPU Autocast only supports dtypes of torch.bfloat16 and torch.float16 currently." - warnings.warn(error_message) - enabled = False - elif self.device == "ipu": - supported_dtypes = [torch.bfloat16, torch.float16] - if self.fast_dtype not in supported_dtypes: - error_message = "In IPU autocast, but the target dtype is not supported. Disabling autocast.\n" - error_message += "IPU Autocast only supports dtypes of torch.bfloat16 and torch.float16 currently." - warnings.warn(error_message) - enabled = False - elif self.device == "hpu": - supported_dtype = [torch.bfloat16, torch.float16] - if self.fast_dtype not in supported_dtype: - error_message = "In HPU autocast, but the target dtype is not supported. Disabling autocast.\n" - error_message += "HPU Autocast only supports dtypes of torch.bfloat16 and torch.float16 currently." - warnings.warn(error_message) - enabled = False - elif self.device == self.custom_backend_name: - supported_dtype = self.custom_device_mod.get_amp_supported_dtype() - if self.fast_dtype not in supported_dtype: - error_message = f"In {self.custom_backend_name} autocast, but the target dtype is not supported. " - error_message += f"Disabling autocast.\n {self.custom_backend_name} Autocast only supports dtypes of " - error_message += ( - ", ".join(str(dtype) for dtype in supported_dtype) - + " currently." - ) - warnings.warn(error_message) - enabled = False - elif self.device == "cuda": - if ( - enabled - and self.fast_dtype == torch.bfloat16 - and not torch.cuda.is_bf16_supported() - ): - raise RuntimeError( - "Current CUDA Device does not support bfloat16. Please switch dtype to float16." - ) - elif self.device == "xla": - supported_dtype = [torch.float16, torch.bfloat16] - if self.fast_dtype not in supported_dtype: - error_message = "In XLA autocast, but the target dtype is not supported. Disabling autocast.\n" - error_message += "XLA Autocast only supports dtype of torch.bfloat16 currently." - warnings.warn(error_message) - enabled = False - self._enabled = enabled - - from torch.amp.autocast_mode import autocast - - DEFAULT_PATCHER.add(Patch(autocast, autoamp_init, "__init__")) - -except: - pass - -try: - - from accelerate.utils.modeling import ( - check_device_same, - is_npu_available, - is_xpu_available, - ) - - # Hacky patch to get around this function trying to set the parameter of a non meta tensor to meta. - # Also handles FakeTensors. - def set_module_tensor_to_device( - module: torch.nn.Module, - tensor_name: str, - device: Union[int, str, torch.device], - value: Optional[torch.Tensor] = None, - dtype: Optional[Union[str, torch.dtype]] = None, - fp16_statistics: Optional[torch.HalfTensor] = None, - tied_params_map: Optional[ - Dict[int, Dict[torch.device, torch.Tensor]] - ] = None, - ): - """ - A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing - `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function). - - Args: - module (`torch.nn.Module`): - The module in which the tensor we want to move lives. - tensor_name (`str`): - The full name of the parameter/buffer. - device (`int`, `str` or `torch.device`): - The device on which to set the tensor. - value (`torch.Tensor`, *optional*): - The value of the tensor (useful when going from the meta device to any other device). - dtype (`torch.dtype`, *optional*): - If passed along the value of the parameter will be cast to this `dtype`. Otherwise, `value` will be cast to - the dtype of the existing parameter in the model. - fp16_statistics (`torch.HalfTensor`, *optional*): - The list of fp16 statistics to set on the module, used for 8 bit model serialization. - tied_params_map (Dict[int, Dict[torch.device, torch.Tensor]], *optional*, defaults to `None`): - A map of current data pointers to dictionaries of devices to already dispatched tied weights. For a given - execution device, this parameter is useful to reuse the first available pointer of a shared weight on the - device for all others, instead of duplicating memory. - """ - # Recurse if needed - if "." in tensor_name: - splits = tensor_name.split(".") - for split in splits[:-1]: - new_module = getattr(module, split) - if new_module is None: - raise ValueError(f"{module} has no attribute {split}.") - module = new_module - tensor_name = splits[-1] - - if ( - tensor_name not in module._parameters - and tensor_name not in module._buffers - ): - raise ValueError( - f"{module} does not have a parameter or a buffer named {tensor_name}." - ) - is_buffer = tensor_name in module._buffers - old_value = getattr(module, tensor_name) - - # Treat the case where old_value (or a custom `value`, typically offloaded to RAM/disk) belongs to a tied group, and one of the weight - # in the tied group has already been dispatched to the device, by avoiding reallocating memory on the device and just copying the pointer. - if ( - value is not None - and tied_params_map is not None - and value.data_ptr() in tied_params_map - and device in tied_params_map[value.data_ptr()] - ): - module._parameters[tensor_name] = tied_params_map[value.data_ptr()][ - device - ] - return - elif ( - tied_params_map is not None - and old_value.data_ptr() in tied_params_map - and device in tied_params_map[old_value.data_ptr()] - ): - module._parameters[tensor_name] = tied_params_map[ - old_value.data_ptr() - ][device] - return - - if ( - old_value.device == torch.device("meta") - and device not in ["meta", torch.device("meta")] - and value is None - ): - raise ValueError( - f"{tensor_name} is on the meta device, we need a `value` to put in on {device}." - ) - - if value is not None: - if old_value.shape != value.shape: - raise ValueError( - f'Trying to set a tensor of shape {value.shape} in "{tensor_name}" (which has shape {old_value.shape}), this look incorrect.' - ) - - if dtype is None: - # For compatibility with PyTorch load_state_dict which converts state dict dtype to existing dtype in model - value = value.to(old_value.dtype) - elif not str(value.dtype).startswith( - ("torch.uint", "torch.int", "torch.bool") - ): - value = value.to(dtype) - - param = ( - module._parameters[tensor_name] - if tensor_name in module._parameters - else None - ) - param_cls = type(param) - - device_quantization = None - with torch.no_grad(): - # leave it on cpu first before moving them to cuda - # # fix the case where the device is meta, we don't want to put it on cpu because there is no data =0 - if ( - param is not None - and param.device.type != "cuda" - and torch.device(device).type == "cuda" - and param_cls.__name__ - in ["Int8Params", "FP4Params", "Params4bit"] - ): - device_quantization = device - device = "cpu" - # `torch.Tensor.to()` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)). - if is_npu_available() and isinstance(device, int): - device = f"npu:{device}" - if is_xpu_available() and isinstance(device, int): - device = f"xpu:{device}" - if value is None: - new_value = old_value.to(device) - if dtype is not None and device in [ - "meta", - torch.device("meta"), - ]: - if not str(old_value.dtype).startswith( - ("torch.uint", "torch.int", "torch.bool") - ): - new_value = new_value.to(dtype) - - if not is_buffer: - module._parameters[tensor_name] = param_cls( - new_value, requires_grad=old_value.requires_grad - ) - elif isinstance(value, torch.Tensor): - new_value = value.to(device) - else: - new_value = torch.tensor(value, device=device) - if device_quantization is not None: - device = device_quantization - if is_buffer: - module._buffers[tensor_name] = new_value - elif value is not None or not check_device_same( - torch.device(device), module._parameters[tensor_name].device - ): - param_cls = type(module._parameters[tensor_name]) - kwargs = module._parameters[tensor_name].__dict__ - if param_cls.__name__ in ["Int8Params", "FP4Params"]: - if ( - param_cls.__name__ == "Int8Params" - and new_value.dtype == torch.float32 - ): - # downcast to fp16 if any - needed for 8bit serialization - new_value = new_value.to(torch.float16) - # quantize module that are going to stay on the cpu so that we offload quantized weights - if device == "cpu" and param_cls.__name__ == "Int8Params": - new_value = ( - param_cls( - new_value, - requires_grad=old_value.requires_grad, - **kwargs, - ) - .to(0) - .to("cpu") - ) - new_value.CB = new_value.CB.to("cpu") - new_value.SCB = new_value.SCB.to("cpu") - else: - new_value = param_cls( - new_value, - requires_grad=old_value.requires_grad, - **kwargs, - ).to(device) - elif param_cls.__name__ in ["QTensor", "QBitsTensor"]: - new_value = torch.nn.Parameter( - new_value, requires_grad=old_value.requires_grad - ).to(device) - elif isinstance(new_value, FakeTensor) or isinstance( - old_value, FakeTensor - ): - new_value = torch.nn.Parameter( - new_value, requires_grad=old_value.requires_grad - ).to(device) - else: - new_value = param_cls( - new_value, requires_grad=old_value.requires_grad - ).to(device) - - module._parameters[tensor_name] = new_value - if fp16_statistics is not None: - module._parameters[tensor_name].SCB = fp16_statistics.to( - device - ) - del fp16_statistics - # as we put the weight to meta, it doesn't have SCB attr anymore. make sure that it is not a meta weight - if ( - module.__class__.__name__ == "Linear8bitLt" - and getattr(module.weight, "SCB", None) is None - and str(module.weight.device) != "meta" - ): - # quantize only if necessary - device_index = ( - torch.device(device).index - if torch.device(device).type == "cuda" - else None - ) - if ( - not getattr(module.weight, "SCB", None) - and device_index is not None - ): - if ( - module.bias is not None - and module.bias.device.type != "meta" - ): - # if a bias exists, we need to wait until the bias is set on the correct device - module = module.cuda(device_index) - elif module.bias is None: - # if no bias exists, we can quantize right away - module = module.cuda(device_index) - elif ( - module.__class__.__name__ == "Linear4bit" - and getattr(module.weight, "quant_state", None) is None - ): - # quantize only if necessary - device_index = ( - torch.device(device).index - if torch.device(device).type == "cuda" - else None - ) - if ( - not getattr(module.weight, "quant_state", None) - and device_index is not None - ): - module.weight = module.weight.cuda(device_index) - # clean pre and post foward hook - if is_npu_available(): - torch.npu.empty_cache() - elif is_xpu_available(): - torch.xpu.empty_cache() - else: - torch.cuda.empty_cache() - - # When handling tied weights, we update tied_params_map to keep track of the tied weights that have already been allocated on the device in - # order to avoid duplicating memory, see above. - if ( - tied_params_map is not None - and old_value.data_ptr() in tied_params_map - and device not in tied_params_map[old_value.data_ptr()] - ): - tied_params_map[old_value.data_ptr()][device] = new_value - elif ( - value is not None - and tied_params_map is not None - and value.data_ptr() in tied_params_map - and device not in tied_params_map[value.data_ptr()] - ): - tied_params_map[value.data_ptr()][device] = new_value - - from accelerate import hooks - - DEFAULT_PATCHER.add( - Patch(hooks, set_module_tensor_to_device, "set_module_tensor_to_device") - ) - -except: - pass + def inner(*args, **kwargs): + + return apply(fn, *args, **kwargs) + return inner -DEFAULT_PATCHER.__enter__() -from .contexts.GraphBasedContext import GlobalTracingContext - -bool = GlobalTracingContext.GLOBAL_TRACING_CONTEXT.bool -bytes = GlobalTracingContext.GLOBAL_TRACING_CONTEXT.bytes -int = GlobalTracingContext.GLOBAL_TRACING_CONTEXT.int -float = GlobalTracingContext.GLOBAL_TRACING_CONTEXT.float -str = GlobalTracingContext.GLOBAL_TRACING_CONTEXT.str -complex = GlobalTracingContext.GLOBAL_TRACING_CONTEXT.complex -bytearray = GlobalTracingContext.GLOBAL_TRACING_CONTEXT.bytearray -tuple = GlobalTracingContext.GLOBAL_TRACING_CONTEXT.tuple -list = GlobalTracingContext.GLOBAL_TRACING_CONTEXT.list -set = GlobalTracingContext.GLOBAL_TRACING_CONTEXT.set -dict = GlobalTracingContext.GLOBAL_TRACING_CONTEXT.dict -apply = GlobalTracingContext.GLOBAL_TRACING_CONTEXT.apply -log = GlobalTracingContext.GLOBAL_TRACING_CONTEXT.log -cond = GlobalTracingContext.GLOBAL_TRACING_CONTEXT.cond +bool = trace(bool) +bytes = trace(bytes) +int = trace(int) +float = trace(float) +str = trace(str) +complex = trace(complex) +bytearray = trace(bytearray) +tuple = trace(tuple) +list = trace(list) +set = trace(set) +dict = trace(dict) \ No newline at end of file diff --git a/src/nnsight/config.yaml b/src/nnsight/config.yaml index af4c9131..f76f0b42 100755 --- a/src/nnsight/config.yaml +++ b/src/nnsight/config.yaml @@ -1,8 +1,13 @@ API: APIKEY: null + FORMAT: json HOST: ndif.dev JOB_ID: null SSL: true + ZLIB: true APP: + DEBUG: true + FRAME_INJECTION: true + CONTROL_FLOW_HACKS: true LOGGING: false REMOTE_LOGGING: true diff --git a/src/nnsight/contexts/Conditional.py b/src/nnsight/contexts/Conditional.py deleted file mode 100644 index 93e4bf01..00000000 --- a/src/nnsight/contexts/Conditional.py +++ /dev/null @@ -1,111 +0,0 @@ -from __future__ import annotations - -from contextlib import AbstractContextManager -from typing import TYPE_CHECKING, Dict, List, Optional, Set, Any, Union - -from ..tracing import protocols - -if TYPE_CHECKING: - from ..tracing.Node import Node - from ..tracing.Graph import Graph - from ..intervention import InterventionProxy - -class ConditionalManager(): - """ A Graph attachement that manages the Conditional contexts defined within an Intervention Graph. - - Attributes: - _conditional_dict (Dict[str, Node]): Mapping of ConditionalProtocol node name to Conditional context. - _conditioned_nodes_dict (Dict[str, Set[Node]]): Mapping of ConditionalProtocol node name to all the Nodes conditiones by it. - _conditional_stack (Dict): Stack of visited Conditional contexts' ConditonalProtocol nodes. - """ - - def __init__(self): - self._conditional_nodes_dict: Dict[str, Node] = dict() - self._conditioned_nodes_dict: Dict[str, Set[Node]] = dict() - self._conditional_nodes_stack: List[Node] = list() - - def push(self, conditional_node: "Node") -> None: - """ Adds the Conditional to the stack of Conditional contexts. - - Args: - conditional_node (Node): ConditionalProtocol node. - """ - - self._conditional_nodes_dict[conditional_node.name] = conditional_node - self._conditioned_nodes_dict[conditional_node.name] = set() - self._conditional_nodes_stack.append(conditional_node) - - def get(self, key: str) -> Conditional: - """ Returns a ConditionalProtocol node. - - Args: - key (str): ConditionalProtocol node name. - - Returns: - Node: ConditionalProtocol node. - """ - - return self._conditional_nodes_dict[key] - - def pop(self) -> None: - """ Pops the ConditionalProtocol node of the current Conditional context from the ConditionalManager stack. """ - - self._conditional_nodes_stack.pop() - - def peek(self) -> Optional["Node"]: - """ Gets the current Conditional context's ConditionalProtocol node. - - Returns: - Optional[Node]: Lastest ConditonalProtocol node if the ConditionalManager stack is non-empty. - """ - - if len(self._conditional_nodes_stack) > 0: - return self._conditional_nodes_stack[-1] - - def add_conditioned_node(self, node: "Node") -> None: - """ Adding a Node to the set of conditioned nodes by the current Conditonal context. - - Args: - - node (Node): A node conditioned by the latest Conditional context. - """ - - self._conditioned_nodes_dict[self.peek().name].add(node) - - def is_node_conditioned(self, node: "Node") -> bool: - """ Returns True if the Node argument is conditioned by the current Conditional context. - - Args: - - node (Node): Node. - - Returns: - bool: Whether the Node is conditioned. - """ - - curr_conditioned_nodes_set = self._conditioned_nodes_dict[self.peek().name] - - return (node in curr_conditioned_nodes_set) - - -class Conditional(AbstractContextManager): - """ A context defined by a boolean condition, upon which the execution of all nodes defined from within is contingent. - - Attributes: - _graph (Graph): Conditional Context graph. - _condition (Union[InterventionProxy, Any]): Condition. - """ - - def __init__(self, graph: "Graph", condition: Union["InterventionProxy", Any]): - self._graph = graph - self._condition: Union["InterventionProxy", Any] = condition - - def __enter__(self) -> Conditional: - - conditional_node = protocols.ConditionalProtocol.add(self._graph, self._condition).node - - protocols.ConditionalProtocol.push_conditional(conditional_node) - - return self - - def __exit__(self, exc_type, exc_val, exc_tb) -> None: - - protocols.ConditionalProtocol.pop_conditional(self._graph) diff --git a/src/nnsight/contexts/GraphBasedContext.py b/src/nnsight/contexts/GraphBasedContext.py deleted file mode 100755 index dc219064..00000000 --- a/src/nnsight/contexts/GraphBasedContext.py +++ /dev/null @@ -1,465 +0,0 @@ -from __future__ import annotations - -import inspect -import weakref -from contextlib import AbstractContextManager -from functools import wraps -from typing import Any, Callable, Union - -import torch -from torch.overrides import TorchFunctionMode -from typing_extensions import Self - -from ..intervention import InterventionProxy -from ..patching import Patch, Patcher -from ..tracing import protocols -from ..tracing.Bridge import Bridge -from ..tracing.Graph import Graph -from .backends import Backend, BridgeMixin -from .Conditional import Conditional - - -class GraphBasedContext(AbstractContextManager, BridgeMixin): - - def __init__( - self, - backend: Backend, - graph: Graph = None, - bridge: Bridge = None, - **kwargs, - ) -> None: - - self.backend = backend - - self.graph: Graph = Graph(**kwargs) if graph is None else graph - - if bridge is not None: - - bridge.add(self.graph) - - def apply( - self, - target: Callable, - *args, - validate: bool = None, - **kwargs, - ) -> InterventionProxy: - """Helper method to directly add a function to the intervention graph. - - Args: - target (Callable): Function to apply - validate (bool): If to try and run this operation in FakeMode to test it out and scan it. - - Returns: - InterventionProxy: Proxy of applying that function. - """ - - proxy_value = inspect._empty - - if validate is False: - - proxy_value = None - - return self.graph.create( - target=target, - proxy_value=proxy_value, - args=args, - kwargs=kwargs, - ) - - def cond(self, condition: Union[InterventionProxy, Any]) -> Conditional: - """Entrypoint to the Conditional context. - Takes in a condition argument which acts as the dependency of the Conditional node in the Intervention graph. - The condition is evaluated as a boolean, and if True, executes all the interventions defined within the body - of the conditional context. - - Args: - condition (Union[InterventionProxy, Any]): Dependency of the Conditional Node. - - Returns: - Conditional: Conditional context object. - - Example: - - Setup: - .. code-block:: python - import torch - from collections import OrderedDict - - input_size = 5 - hidden_dims = 10 - output_size = 2 - - model = nn.Sequential(OrderedDict([ - ('layer1', torch.nn.Linear(input_size, hidden_dims)), - ('layer2', torch.nn.Linear(hidden_dims, output_size)), - ])) - - input = torch.rand((1, input_size)) - - Ex 1: The .save() on the model output will only be executed if the condition passed to tracer.cond() is evaluated to True. - - .. code-block:: python - x: int = 5 - with model.trace(input) as trace: - with tracer.cond(x > 0): - out = model.output.save() - - Ex 2: The condition is on an InterventionProxy which creates in return an InterventionProxy - - .. code-block:: python - with model.trace(input) as trace: - with tracer.cond(model.layer1.output[:, 0] > 0): - out = model.output.save() - """ - - return Conditional(self.graph, condition) - - def exit(self) -> InterventionProxy: - """Exits the execution of a sequential intervention graph. - - Returns: - InterventionProxy: Proxy of the EarlyStopProtocol node. - """ - - if self.graph.sequential: - return protocols.EarlyStopProtocol.add(self.graph) - else: - raise Exception( - "Early exit is only supported for sequential graph-based contexts." - ) - - def log(self, *data: Any) -> None: - """Adds a node via .apply to print the value of a Node. - - Args: - data (Any): Data to print. - """ - self.apply(print, *data) - - def bool(self, *args, **kwargs) -> InterventionProxy: - """NNsight helper method to create a traceable bool.""" - - return self.apply(bool, *args, **kwargs) - - def bytes(self, *args, **kwargs) -> InterventionProxy: - """NNsight helper method to create a traceable bytes.""" - - return self.apply(bytes, *args, **kwargs) - - def int(self, *args, **kwargs) -> InterventionProxy: - """NNsight helper method to create a traceable int.""" - - return self.apply(int, *args, **kwargs) - - def float(self, *args, **kwargs) -> InterventionProxy: - """NNsight helper method to create a traceable float.""" - - return self.apply(float, *args, **kwargs) - - def str(self, *args, **kwargs) -> InterventionProxy: - """NNsight helper method to create a traceable string.""" - - return self.apply(str, *args, **kwargs) - - def complex(self, *args, **kwargs) -> InterventionProxy: - """NNsight helper method to create a traceable complex number.""" - - return self.apply(complex, *args, **kwargs) - - def bytearray(self, *args, **kwargs) -> InterventionProxy: - """NNsight helper method to create a traceable bytearray.""" - - return self.apply(bytearray, *args, **kwargs) - - def tuple(self, *args, **kwargs) -> InterventionProxy: - """NNsight helper method to create a traceable tuple.""" - - return self.apply(tuple, *args, **kwargs) - - def list(self, *args, **kwargs) -> InterventionProxy: - """NNsight helper method to create a traceable list.""" - - return self.apply(list, *args, **kwargs) - - def set(self, *args, **kwargs) -> InterventionProxy: - """NNsight helper method to create a traceable set.""" - - return self.apply(set, *args, **kwargs) - - def dict(self, *args, **kwargs) -> InterventionProxy: - """NNsight helper method to create a traceable dictionary.""" - - return self.apply(dict, *args, **kwargs) - - def vis(self, **kwargs) -> None: - """ - Helper method to save a visualization of the current state of the intervention graph. - """ - - self.graph.vis(**kwargs) - - def __enter__(self) -> Self: - - GlobalTracingContext.try_register(self) - - return self - - def __exit__(self, exc_type, exc_val, exc_tb) -> None: - - GlobalTracingContext.try_deregister(self) - - if isinstance(exc_val, BaseException): - self.graph.alive = False - self.graph = None - raise exc_val - - self.backend(self) - - ### BACKENDS ######## - - def local_backend_execute(self) -> None: - - try: - self.graph.reset() - self.graph.execute() - except protocols.EarlyStopProtocol.EarlyStopException as e: - raise e - finally: - graph = self.graph - graph.alive = False - - if not isinstance(graph, weakref.ProxyType): - self.graph = weakref.proxy(graph) - - def bridge_backend_handle(self, bridge: Bridge) -> None: - - bridge.pop_graph() - - protocols.LocalBackendExecuteProtocol.add(self, bridge.peek_graph()) - - self.graph = weakref.proxy(self.graph) - - -from inspect import getmembers, isclass - -from torch.utils import data - - -def global_patch(root, name: str) -> Patch: - - fn = getattr(root, name) - - @wraps(fn) - def inner(*args, **kwargs): - - return GlobalTracingContext.GLOBAL_TRACING_CONTEXT.apply( - fn, *args, **kwargs - ) - - return Patch(root, inner, name) - - -def global_patch_class(cls: type) -> Patch: - - if cls.__new__ is object.__new__: - - def super_new(cls, *args, **kwargs): - - return object.__new__(cls) - - cls.__new__ = super_new - - fn = cls.__new__ - - @wraps(fn) - def inner(cls, *args, **kwargs): - - return GlobalTracingContext.GLOBAL_TRACING_CONTEXT.apply( - cls, *args, **kwargs - ) - - return Patch(cls, inner, "__new__") - - -class GlobalTracingContext(GraphBasedContext): - """The Global Tracing Context handles adding tracing operations globally without reference to a given `GraphBasedContext`. - There should only be one of these and that is `GlobalTracingContext.GLOBAL_TRACING_CONTEXT`. - `GlobalTracingContext.TORCH_HANDLER` handles adding torch functions without reference to a given `GraphBasedContext`. - - """ - - GLOBAL_TRACING_CONTEXT: GlobalTracingContext - TORCH_HANDLER: GlobalTracingContext.GlobalTracingTorchHandler - PATCHER: Patcher = Patcher( - [ - global_patch_class(torch.nn.Parameter), - global_patch_class(data.DataLoader), - global_patch(torch, "arange"), - global_patch(torch, "empty"), - global_patch(torch, "eye"), - global_patch(torch, "full"), - global_patch(torch, "linspace"), - global_patch(torch, "logspace"), - global_patch(torch, "ones"), - global_patch(torch, "rand"), - global_patch(torch, "randint"), - global_patch(torch, "randn"), - global_patch(torch, "randperm"), - global_patch(torch, "zeros"), - global_patch(torch, "cat") - ] - + [ - global_patch_class(value) - for key, value in getmembers(torch.optim, isclass) - if issubclass(value, torch.optim.Optimizer) - ] - ) - - class GlobalTracingTorchHandler(TorchFunctionMode): - - def __torch_function__(self, func, types, args, kwargs=None): - - if kwargs is None: - - kwargs = {} - - if "_VariableFunctionsClass" in func.__qualname__: - return GlobalTracingContext.GLOBAL_TRACING_CONTEXT.apply( - func, *args, **kwargs - ) - - return func(*args, **kwargs) - - class GlobalTracingExit(AbstractContextManager): - - def __enter__(self) -> Any: - - GlobalTracingContext.TORCH_HANDLER.__exit__(None, None, None) - GlobalTracingContext.PATCHER.__exit__(None, None, None) - - return self - - def __exit__(self, exc_type, exc_val, traceback): - - GlobalTracingContext.TORCH_HANDLER.__enter__() - GlobalTracingContext.PATCHER.__enter__() - - if isinstance(exc_val, BaseException): - - raise exc_val - - def __init__(self) -> None: - """We create an empty `GraphBasedContext` by default.""" - - self.graph: Graph = None - - @staticmethod - def exit_global_tracing_context(): - - return GlobalTracingContext.GlobalTracingExit() - - @staticmethod - def try_register(graph_based_context: GraphBasedContext) -> bool: - """Attempts to register a `Graph` globally.] - Will not if one is already registered. - - Args: - graph_based_context (GraphBasedContext): `GraphBasedContext` to register. - - Returns: - bool: True if registering ws successful, False otherwise. - """ - - if GlobalTracingContext.GLOBAL_TRACING_CONTEXT: - - return False - - GlobalTracingContext.register(graph_based_context) - - return True - - @staticmethod - def try_deregister(graph_based_context: GraphBasedContext) -> bool: - """Attempts to deregister a `Graph` globally. - Will not if `graph_based_context` does not have the same `Graph` as the currently registered one. - - Args: - graph_based_context (GraphBasedContext): `GraphBasedContext` to deregister. - - Returns: - bool: True if deregistering ws successful, False otherwise. - """ - if ( - not GlobalTracingContext.GLOBAL_TRACING_CONTEXT - or graph_based_context.graph - is not GlobalTracingContext.GLOBAL_TRACING_CONTEXT.graph - ): - - return False - GlobalTracingContext.deregister() - - return True - - @staticmethod - def register(graph_based_context: GraphBasedContext) -> None: - """Register `GraphBasedContext` globally. - - Args: - graph_based_context (GraphBasedContext): GraphBasedContext to register. - """ - - assert GlobalTracingContext.GLOBAL_TRACING_CONTEXT.graph is None - - GlobalTracingContext.GLOBAL_TRACING_CONTEXT.graph = ( - graph_based_context.graph - ) - - GlobalTracingContext.TORCH_HANDLER.__enter__() - GlobalTracingContext.PATCHER.__enter__() - - @staticmethod - def deregister() -> None: - """Deregister `GraphBasedContext` globally. - - Args: - graph_based_context (GraphBasedContext): GraphBasedContext to deregister. - """ - - assert GlobalTracingContext.GLOBAL_TRACING_CONTEXT.graph is not None - - GlobalTracingContext.GLOBAL_TRACING_CONTEXT.graph = None - - GlobalTracingContext.TORCH_HANDLER.__exit__(None, None, None) - GlobalTracingContext.PATCHER.__exit__(None, None, None) - - def __bool__(self) -> bool: - """True if there is a `GraphBasedContext` registered globally. False otherwise.""" - - return GlobalTracingContext.GLOBAL_TRACING_CONTEXT.graph is not None - - def __getattribute__(self, name: str) -> Any: - """Prevent attribute access if no `GraphBasedContext` registered.""" - - static_methods = [ - name - for name, value in inspect.getmembers( - GraphBasedContext, predicate=inspect.ismethod - ) - ] - - if name in static_methods: - - if not GlobalTracingContext.GLOBAL_TRACING_CONTEXT: - - raise Exception( - "Global ops cannot be used outside of a tracing context." - ) - - return object.__getattribute__(self, name) - - -GlobalTracingContext.GLOBAL_TRACING_CONTEXT = GlobalTracingContext() -GlobalTracingContext.TORCH_HANDLER = ( - GlobalTracingContext.GlobalTracingTorchHandler() -) diff --git a/src/nnsight/contexts/Tracer.py b/src/nnsight/contexts/Tracer.py deleted file mode 100755 index 1fb24fa6..00000000 --- a/src/nnsight/contexts/Tracer.py +++ /dev/null @@ -1,192 +0,0 @@ -from __future__ import annotations - -import weakref -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union, Tuple - -from typing_extensions import Self - -from ..tracing import protocols -from ..tracing.Bridge import Bridge -from ..tracing.Graph import Graph -from . import resolve_dependencies -from .backends import Backend, EditBackend, BridgeMixin, EditMixin, RemoteMixin -from .GraphBasedContext import GraphBasedContext -from .Invoker import Invoker - -if TYPE_CHECKING: - from ..models.mixins import RemoteableMixin - from ..models.NNsightModel import NNsight - - -class Tracer(GraphBasedContext, RemoteMixin, BridgeMixin, EditMixin): - """The Tracer class creates a :class:`nnsight.tracing.Graph.Graph` around the ._model of a :class:`nnsight.models.NNsightModel.NNsight` which tracks and manages the operations performed on the inputs and outputs of said model. - - Attributes: - _model (nnsight.models.NNsightModel.NNsight): nnsight Model object that ths context manager traces and executes. - _graph (nnsight.tracing.Graph.Graph): Graph which traces operations performed on the input and output of modules' Envoys are added and later executed. - _args (List[Any]): Positional arguments to be passed to function that executes the model. - _kwargs (Dict[str,Any]): Keyword arguments to be passed to function that executes the model. - _invoker_inputs (List[Any]): Inputs for each invocation of this Tracer. - _invoker (Invoker): Currently open Invoker. - """ - - def __init__( - self, - backend: Backend, - model: "NNsight", - validate: bool = False, - graph: Graph = None, - bridge: Bridge = None, - return_context: bool = False, - **kwargs, - ) -> None: - - self.model = model - - self.return_context = return_context - - GraphBasedContext.__init__( - self, - backend, - graph=graph, - bridge=bridge, - proxy_class=model.proxy_class, - validate=validate, - sequential=False, - ) - - protocols.ApplyModuleProtocol.set_module(self.graph, self.model) - - self._kwargs = kwargs - - self.invoker: Optional[Invoker] = None - - self._invoker_inputs: List[Any] = [] - - # Module Envoys need to know about the current Tracer to create the correct proxies. - self.model._envoy._set_tracer(weakref.proxy(self)) - - def __getattr__(self, key: Any) -> Any: - """Wrapper of .model._envoy's attributes to access module Envoy inputs and outputs. - - Returns: - Any: Attribute. - """ - return getattr(self.model._envoy, key) - - def __enter__(self) -> Union[Self, "NNsight", Tuple["NNsight", Self]]: - - tracer = super().__enter__() - - if self.invoker is not None: - - self.invoker.__enter__() - - if isinstance(self.backend, EditBackend): - if self.return_context: - return self.model, self - - return self.model - - return tracer - - def __exit__(self, exc_type, exc_val, exc_tb) -> None: - - if self.invoker is not None: - - self.invoker.__exit__(None, None, None) - - self.model._envoy._reset() - - - super().__exit__(exc_type, exc_val, exc_tb) - - def invoke(self, *inputs: Any, **kwargs) -> Invoker: - """Create an Invoker context dor a given input. - - Raises: - Exception: If an Invoker context is already open - - Returns: - Invoker: Invoker. - """ - - if self.invoker is not None: - - raise Exception("Can't create an invoker context with one already open!") - - return Invoker(self, *inputs, **kwargs) - - def next(self, increment: int = 1) -> None: - """Increments call_iter of all module Envoys. Useful when doing iterative/generative runs. - - Args: - increment (int): How many call_iter to increment at once. Defaults to 1. - """ - - self.model._envoy.next(increment=increment, propagate=True) - - ##### BACKENDS ############################### - - def local_backend_execute(self) -> Graph: - - protocols.ApplyModuleProtocol.set_module(self.graph, self.model._model) - - self.graph.reset() - - invoker_inputs = self._invoker_inputs - - # If ths graph has a Bridge, we need to check for Nodes in the input itself. - if protocols.BridgeProtocol.has_bridge(self.graph): - - invoker_inputs = resolve_dependencies(invoker_inputs) - - self.graph.execute() - - self.model.interleave( - self.model._execute, - self.graph, - *invoker_inputs, - **self._kwargs, - ) - - graph = self.graph - graph.alive = False - - if not isinstance(graph, weakref.ProxyType): - self.graph = weakref.proxy(graph) - - return graph - - def edit_backend_execute(self) -> Graph: - - self.model._default_graph = self.graph - - def remote_backend_get_model_key(self) -> str: - - self.model: "RemoteableMixin" - - return self.model.to_model_key() - - def remote_backend_postprocess_result(self, local_result: Graph) -> Dict[str, Any]: - - from ..schema.Response import ResultModel - - return ResultModel.from_graph(local_result) - - def remote_backend_handle_result_value(self, value: Dict[str, Any]) -> None: - - # TODO : graph mismatch handle. hash json ? - for node_name, node_value in value.items(): - self.graph.nodes[node_name]._value = node_value - - def remote_backend_cleanup(self): - - graph = self.graph - graph.alive = False - - if not isinstance(graph, weakref.ProxyType): - self.graph = weakref.proxy(graph) - - def __repr__(self) -> str: - return f"<{self.__class__.__name__} at {hex(id(self))}>" diff --git a/src/nnsight/contexts/__init__.py b/src/nnsight/contexts/__init__.py deleted file mode 100644 index 961eb76a..00000000 --- a/src/nnsight/contexts/__init__.py +++ /dev/null @@ -1,91 +0,0 @@ -"""The contexts module contains logic for managing the tracing and running of models with :mod:`nnsight.tracing` and :mod:`nnsight.envoy` - -The primary two classes involved here are :class:`Tracer ` and :class:`Invoker `. - -The :class:`Tracer ` class creates a :class:`Graph ` around the underlying model of an :class:`NNsight `. The graph tracks and manages the operations performed on the inputs and outputs of said model. -Module's envoys in the model expose their ``.output`` and ``.input`` attributes which when accessed, add to the computation graph of the tracer. -To do this, they need to know about the current Tracer object, so each module's envoy's ``.tracer`` object is set to be the current Tracer. - -The Tracer object also keeps track of the batch_size of the most recent input, the generation index for multi iteration generations, and all of the inputs made during its context in the ``.batched_input`` attribute. Inputs added to this attribute should be in a format where each index is a batch index and allows the model to batch all of the inputs together. - -This is to keep things consistent. If two different inputs are in two different valid formats, they both become the same format and are easy to batch. -In the case of LanguageModels, regardless of whether the input are string prompts, pre-processed dictionaries, or input ids, the batched input is only input ids. -On exiting the Tracer context, the Tracer object should use the information and inputs provided to it to carry out the execution of the model. - -The :class:`Invoker ` class is what actually accepts inputs to the model/graph, and it updates its parent Tracer object with the appropriate information about respective inputs. On entering the invoker context with some input, the invoker leverages the model to pre-process and prepare the input to the model. Using the prepared inputs, it updates its Tracer object with a batched version of the input, the size of the batched input, and the current generation index. It also runs a 'meta' version of the input through the model's meta_model. This updates the sizes/dtypes of all of the module's Envoys inputs and outputs based on the characteristics of the input. - -nnsight comes with an extension of a Tracer, RemoteTracer, which enables both local and remote execution. -""" - -from typing import Any, Tuple - -from .. import util -from ..tracing import protocols -from ..tracing.Node import Node -from ..tracing.Proxy import Proxy - - -def check_for_dependencies(data: Any) -> Tuple[Any, bool]: - """Checks to see if there are any Proxies in data. - If so, convert them to a Bridge Node, then a Lock Node in order to - later get the value of the Bridge Node come execution. - - Args: - data (Any): Data to check for Proxies. - - Returns: - Any: Data with Proxies replaced with Bridge/Lock Nodes. - bool: If there were any proxies in data. - """ - - has_proxies = False - - def check_for_nodes(proxy: Proxy): - - if not proxy.node.done(): - - nonlocal has_proxies - - has_proxies = True - - node = proxy.node - - return protocols.LockProtocol.add( - protocols.BridgeProtocol.add( - node, - ).node - ).node - - else: - - return proxy.node.value - - return util.apply(data, check_for_nodes, Proxy), has_proxies - - -def resolve_dependencies(data: Any) -> Any: - """Turn any dependencies (Locked Bridge Node) within data into their value. - Executes the Bridge Node. - - Args: - data (Any): Data to find and resolve dependencies within. - - Returns: - Any: Data with dependencies converted to their value. - """ - - def get_value(node: Node): - - bridge_node: Node = node.args[0] - - bridge_node.execute() - - # Get value of Bridge Node - value = bridge_node.value - - # Clear Lock Node - node.set_value(None) - - return value - - return util.apply(data, get_value, Node) diff --git a/src/nnsight/contexts/backends/BridgeBackend.py b/src/nnsight/contexts/backends/BridgeBackend.py deleted file mode 100755 index a2617340..00000000 --- a/src/nnsight/contexts/backends/BridgeBackend.py +++ /dev/null @@ -1,38 +0,0 @@ -from typing import TYPE_CHECKING, Any, Callable, List, Tuple, Union - -from . import Backend - -if TYPE_CHECKING: - from ...tracing.Bridge import Bridge - - -class BridgeMixin: - """To be inherited by objects that want to be able to be executed by the BridgeBackend.""" - - def bridge_backend_handle(self, bridge: "Bridge") -> None: - """Should add self to the current Bridge in some capacity. - - Args: - bridge (Bridge): Current Bridge. - """ - - raise NotImplementedError() - - -class BridgeBackend(Backend): - """Backend to accumulate multiple context object to be executed collectively. - - Context object must inherit from BridgeMixin and implement its methods. - - Attributes: - - bridge (Bridge): Current Bridge object. - """ - - def __init__(self, bridge: "Bridge") -> None: - - self.bridge = bridge - - def __call__(self, obj: BridgeMixin): - - obj.bridge_backend_handle(self.bridge) diff --git a/src/nnsight/contexts/backends/EditBackend.py b/src/nnsight/contexts/backends/EditBackend.py deleted file mode 100644 index 347e4f5a..00000000 --- a/src/nnsight/contexts/backends/EditBackend.py +++ /dev/null @@ -1,27 +0,0 @@ -from typing import Any - -from . import Backend - - -class EditMixin: - """To be inherited by objects that want to be able to be executed by the EditBackend.""" - - def edit_backend_execute(self) -> Any: - """Should execute this object locally and return a result that can be handled by EditMixin objects. - - Returns: - Any: Result containing data to return from a edit execution. - """ - - raise NotImplementedError() - - -class EditBackend(Backend): - """Backend to execute a default edit. - - Context object must inherit from EditMixin and implement its methods. - """ - - def __call__(self, obj: EditMixin): - - obj.edit_backend_execute() diff --git a/src/nnsight/contexts/backends/LocalBackend.py b/src/nnsight/contexts/backends/LocalBackend.py deleted file mode 100644 index 44a7434d..00000000 --- a/src/nnsight/contexts/backends/LocalBackend.py +++ /dev/null @@ -1,27 +0,0 @@ -from typing import Any - -from . import Backend - - -class LocalMixin: - """To be inherited by objects that want to be able to be executed by the LocalBackend.""" - - def local_backend_execute(self) -> Any: - """Should execute this object locally and return a result that can be handled by RemoteMixin objects. - - Returns: - Any: Result containing data to return from a remote execution. - """ - - raise NotImplementedError() - - -class LocalBackend(Backend): - """Backend to execute a context object on your local machine. - - Context object must inherit from LocalMixin and implement its methods. - """ - - def __call__(self, obj: LocalMixin): - - obj.local_backend_execute() diff --git a/src/nnsight/contexts/backends/NoopBackend.py b/src/nnsight/contexts/backends/NoopBackend.py deleted file mode 100755 index 4264ee56..00000000 --- a/src/nnsight/contexts/backends/NoopBackend.py +++ /dev/null @@ -1,10 +0,0 @@ - -from typing import Any - -from . import Backend - - -class NoopBackend(Backend): - - def __call__(self, obj:Any) -> None: - pass \ No newline at end of file diff --git a/src/nnsight/contexts/backends/RemoteBackend.py b/src/nnsight/contexts/backends/RemoteBackend.py deleted file mode 100644 index bb89521b..00000000 --- a/src/nnsight/contexts/backends/RemoteBackend.py +++ /dev/null @@ -1,319 +0,0 @@ -from __future__ import annotations - -import io -from typing import TYPE_CHECKING, Any, Callable - -import requests -import socketio -import torch -from tqdm import tqdm - -from ... import CONFIG -from ...logger import logger, remote_logger -from .LocalBackend import LocalBackend, LocalMixin - -if TYPE_CHECKING: - - from ...schema.Request import RequestModel - from ...schema.Response import ResponseModel - - -class RemoteMixin(LocalMixin): - """To be inherited by objects that want to be able to be executed by the RemoteBackend.""" - - def remote_backend_get_model_key(self) -> str: - """Should return the model_key used to specify which model to run on the remote service. - - Returns: - str: Model key. - """ - - raise NotImplementedError() - - def remote_backend_postprocess_result(self, local_result: Any) -> Any: - """Should handle postprocessing the result from local_backend_execute. - - For example moving tensors to cpu/detaching/etc. - - Args: - local_result (Any): Local execution result. - - Returns: - Any: Post processed local execution result. - """ - - raise NotImplementedError() - - def remote_backend_handle_result_value(self, value: Any) -> None: - """Should handle postprocessed result from remote_backend_postprocess_result on return from remote service. - - Args: - value (Any): Result. - """ - - raise NotImplementedError() - - def remote_backend_cleanup(self): - raise NotImplementedError() - - -class RemoteBackend(LocalBackend): - """Backend to execute a context object via a remote service. - - Context object must inherit from RemoteMixin and implement its methods. - - Attributes: - - url (str): Remote host url. Defaults to that set in CONFIG.API.HOST. - """ - - def __init__( - self, - host: str = None, - blocking: bool = True, - job_id: str = None, - ssl: bool = None, - api_key: str = "", - ) -> None: - - self.job_id = job_id or CONFIG.API.JOB_ID - self.ssl = CONFIG.API.SSL if ssl is None else ssl - self.api_key = api_key or CONFIG.API.APIKEY - self.blocking = blocking - self.handle_result = None - - self.host = host or CONFIG.API.HOST - self.address = f"http{'s' if self.ssl else ''}://{self.host}" - self.ws_address = f"ws{'s' if CONFIG.API.SSL else ''}://{self.host}" - - def request(self, obj: RemoteMixin): - - model_key = obj.remote_backend_get_model_key() - - from ...schema.Request import RequestModel - - # Create request using pydantic to parse the object itself. - return RequestModel(object=obj, model_key=model_key) - - def __call__(self, obj: RemoteMixin): - - self.handle_result = obj.remote_backend_handle_result_value - - if self.blocking: - - request = self.request(obj) - - # Do blocking request. - self.blocking_request(request) - - else: - - request = None - - if not self.job_id: - - request = self.request(obj) - - self.non_blocking_request(request=request) - - obj.remote_backend_cleanup() - - def handle_response(self, data: Any) -> "ResponseModel": - """Handles incoming response data. - - Parses it into the `ResponseModel` pydantic object. - Logs the response object. - If the job is completed, retrieve and stream the result from the remote endpoint. - Use torch.load to decode and load the `ResultModel` into memory. - Use the backend object's .handle_result method to handle the decoded result. - - Args: - data (Any): Json data to concert to `ResponseModel` - - Raises: - Exception: If the job's status is `ResponseModel.JobStatus.ERROR` - - Returns: - ResponseModel: ResponseModel. - """ - - from ...schema.Response import ResponseModel, ResultModel - - # Load the data into the ResponseModel pydantic class. - response = ResponseModel(**data) - - # Log response for user - remote_logger.info(str(response)) - - # If the status of the response is completed, update the local nodes that the user specified to save. - # Then disconnect and continue. - if response.status == ResponseModel.JobStatus.COMPLETED: - # Create BytesIO object to store bytes received from server in. - result_bytes = io.BytesIO() - result_bytes.seek(0) - - # Get result from result url using job id. - with requests.get( - url=f"{self.address}/result/{response.id}", - stream=True, - ) as stream: - # Total size of incoming data. - total_size = float(stream.headers["Content-length"]) - - with tqdm( - total=total_size, - unit="B", - unit_scale=True, - desc="Downloading result", - ) as progress_bar: - # chunk_size=None so server determines chunk size. - for data in stream.iter_content(chunk_size=None): - progress_bar.update(len(data)) - result_bytes.write(data) - - # Move cursor to beginning of bytes. - result_bytes.seek(0) - - # Decode bytes with pickle and then into pydantic object. - result: "ResultModel" = ResultModel( - **torch.load( - result_bytes, map_location="cpu", weights_only=False - ) - ) - - # Close bytes - result_bytes.close() - - # Handle result - self.handle_result(result.value) - - # Or if there was some error. - elif response.status == ResponseModel.JobStatus.ERROR: - raise Exception(str(response)) - - return response - - def submit_request(self, request: "RequestModel") -> "ResponseModel": - """Sends request to the remote endpoint and handles the response object. - - Raises: - Exception: If there was a status code other than 200 for the response. - - Returns: - (ResponseModel): Response. - """ - - response = requests.post( - f"{self.address}/request", - json=request.model_dump(exclude=["id", "received"]), - headers={"ndif-api-key": self.api_key}, - ) - - if response.status_code == 200: - - return self.handle_response(response.json()) - - else: - - msg = response.json()['detail'] - raise ConnectionError(msg) - - def get_response(self) -> "ResponseModel": - """Retrieves and handles the response object from the remote endpoint. - - Raises: - Exception: If there was a status code other than 200 for the response. - - Returns: - (ResponseModel): Response. - """ - - response = requests.get( - f"{self.address}/response/{self.job_id}", - headers={"ndif-api-key": self.api_key}, - ) - - if response.status_code == 200: - - return self.handle_response(response.json()) - - else: - - raise Exception(response.reason) - - def blocking_request(self, request: "RequestModel"): - """Send intervention request to the remote service while waiting for updates via websocket. - - Args: - request (RequestModel): Request. - """ - - from ...schema.Response import ResponseModel - - # Create a socketio connection to the server. - with socketio.SimpleClient( - logger=logger, reconnection_attempts=10 - ) as sio: - # Connect - sio.connect( - self.ws_address, - socketio_path="/ws/socket.io", - transports=["websocket"], - wait_timeout=10, - ) - - # Give request session ID so server knows to respond via websockets to us. - request.session_id = sio.sid - - # Submit request via - self.submit_request(request) - - # Loop until - while True: - if ( - self.handle_response(sio.receive()[1]).status - == ResponseModel.JobStatus.COMPLETED - ): - break - - def non_blocking_request(self, request: "RequestModel" = None): - """Send intervention request to the remote service if request provided. Otherwise get job status. - - Sets CONFIG.API.JOB_ID on initial request as to later get the status of said job. - - When job is completed, clear CONFIG.API.JOB_ID to request a new job. - - Args: - request (RequestModel): Request if submitting a new request. Defaults to None - """ - - from ...schema.Response import ResponseModel - - if request is not None: - - # Submit request via - response = self.submit_request(request) - - CONFIG.API.JOB_ID = response.id - - CONFIG.save() - - else: - - try: - - response = self.get_response() - - if response.status == ResponseModel.JobStatus.COMPLETED: - - CONFIG.API.JOB_ID = None - - CONFIG.save() - - except Exception as e: - - CONFIG.API.JOB_ID = None - - CONFIG.save() - - raise e diff --git a/src/nnsight/contexts/backends/__init__.py b/src/nnsight/contexts/backends/__init__.py deleted file mode 100644 index 7a56b011..00000000 --- a/src/nnsight/contexts/backends/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -from typing import Any - - -class Backend: - """A backend is what executes a context object when it __exit__s.""" - - def __call__(self, obj: Any) -> None: - """Handles execution of a context object on exit. (like a Tracer or Session). - - Args: - obj (Any): Context object to execute. - """ - - raise NotImplementedError() - - -from .BridgeBackend import BridgeBackend, BridgeMixin -from .EditBackend import EditBackend, EditMixin -from .LocalBackend import LocalBackend, LocalMixin -from .NoopBackend import NoopBackend -from .RemoteBackend import RemoteBackend, RemoteMixin diff --git a/src/nnsight/contexts/session/Iterator.py b/src/nnsight/contexts/session/Iterator.py deleted file mode 100644 index b3700e16..00000000 --- a/src/nnsight/contexts/session/Iterator.py +++ /dev/null @@ -1,110 +0,0 @@ -from __future__ import annotations - -import weakref -from collections.abc import Iterable -from typing import TYPE_CHECKING, Iterable, Tuple, Union - -from ... import util -from ...tracing import protocols -from ...tracing.Node import Node -from .. import check_for_dependencies, resolve_dependencies -from ..GraphBasedContext import GraphBasedContext - -if TYPE_CHECKING: - from ...intervention import InterventionProxy - from ...tracing.Bridge import Bridge - - -class Iterator(GraphBasedContext): - """Intervention loop context for iterative execution of an intervention graph. - - Attributes: - - data (Iterable): Data to iterate over. - - return_context (bool): If True, returns the Iterator object upon entering the Iterator context. - """ - - def __init__( - self, data: Iterable, *args, return_context: bool = False, **kwargs - ) -> None: - - self.data: Iterable = data - self._return_context: bool = return_context - - super().__init__(*args, **kwargs) - - def __enter__( - self, - ) -> Union["InterventionProxy", Tuple["InterventionProxy", Iterator]]: - - super().__enter__() - - self.data, has_dependencies = check_for_dependencies(self.data) - - proxy_value = None - - if self.graph.validate: - - proxy_value = util.apply( - self.data, lambda node: node.args[0].proxy_value, Node - ) - - if len(proxy_value) != 0: - - proxy_value = ( - next(proxy_value) - if hasattr(proxy_value, "__next__") - else proxy_value[0] - ) - - iter_item_proxy: "InterventionProxy" = protocols.ValueProtocol.add( - self.graph, proxy_value - ) - - if self._return_context: - return iter_item_proxy, self - else: - return iter_item_proxy - - ### BACKENDS ######## - - def local_backend_execute(self) -> None: - - self.graph.reset() - - bridge: "Bridge" = protocols.BridgeProtocol.get_bridge(self.graph) - - bridge.locks += 1 - - data = resolve_dependencies(self.data) - - last_idx: int = len(data) - 1 - - for idx, item in enumerate(data): - - if idx != 0: - - self.graph.reset() - - last_iter = idx == last_idx - - if last_iter: - - bridge.locks -= 1 - - protocols.ValueProtocol.set( - self.graph.nodes[f"{protocols.ValueProtocol.__name__}_0"], item - ) - - try: - self.graph.execute() - except protocols.EarlyStopProtocol.EarlyStopException as e: - break - finally: - graph = self.graph - graph.alive = False - - if not isinstance(graph, weakref.ProxyType): - self.graph = weakref.proxy(graph) - - def __repr__(self) -> str: - return f"<{self.__class__.__name__} at {hex(id(self))}>" diff --git a/src/nnsight/contexts/session/Session.py b/src/nnsight/contexts/session/Session.py deleted file mode 100644 index 851abd14..00000000 --- a/src/nnsight/contexts/session/Session.py +++ /dev/null @@ -1,154 +0,0 @@ -from __future__ import annotations - -import weakref -from typing import TYPE_CHECKING, Any, Dict, Iterable - -from ...tracing.Bridge import Bridge -from ...tracing.Graph import Graph -from ...tracing.protocols import EarlyStopProtocol -from ..backends import Backend, BridgeBackend, RemoteMixin -from ..GraphBasedContext import GraphBasedContext -from .Iterator import Iterator - -if TYPE_CHECKING: - from ...models.mixins import RemoteableMixin - from ...models.NNsightModel import NNsight - - -class Session(GraphBasedContext, RemoteMixin): - """A Session is a root Collection that handles adding new Graphs and new Collections while in the session. - - Attributes: - bridge (Bridge): Bridge object which stores all Graphs added during the session and handles interaction between them - graph (Graph): Root Graph where operations and values meant for access by all subsequent Graphs should be stored and referenced. - model (NNsight): NNsight model. - backend (Backend): Backend for this context object. - """ - - def __init__( - self, - backend: Backend, - model: "NNsight", - *args, - bridge: Bridge = None, - **kwargs, - ) -> None: - - self.bridge = Bridge() if bridge is None else bridge - - self.model = model - - GraphBasedContext.__init__( - self, - backend, - bridge=self.bridge, - proxy_class=self.model.proxy_class, - *args, - **kwargs, - ) - - def __exit__(self, exc_type, exc_val, exc_tb) -> None: - - self.model._session = None - - super().__exit__(exc_type, exc_val, exc_tb) - - def iter(self, iterable: Iterable, **kwargs) -> Iterator: - """Creates an Iterator context to iteratively execute an intervention graph, with an update item at each iteration. - - Args: - - iterable (Iterable): Data to iterate over. - - return_context (bool): If True, returns the Iterator context. Default: False. - - Returns: - Iterator: Iterator context. - - Example: - Setup: - .. code-block:: python - import torch - from collections import OrderedDict - input_size = 5 - hidden_dims = 10 - output_size = 2 - model = nn.Sequential(OrderedDict([ - ('layer1', torch.nn.Linear(input_size, hidden_dims)), - ('layer2', torch.nn.Linear(hidden_dims, output_size)), - ])) - input = torch.rand((1, input_size)) - - Ex: - .. code-block:: python - with model.session() as session: - l = session.apply(list).save() - with session.iter([0, 1, 2]) as item: - l.append(item) - """ - - bridge = weakref.proxy(self.bridge) - - backend = BridgeBackend(bridge) - - return Iterator( - iterable, - backend, - bridge=bridge, - proxy_class=self.model.proxy_class, - **kwargs, - ) - - ### BACKENDS ######## - - def local_backend_execute(self) -> Dict[int, Graph]: - - try: - super().local_backend_execute() - except EarlyStopProtocol.EarlyStopException: - pass - - local_result = self.bridge.id_to_graph - - self.bridge = weakref.proxy(self.bridge) - - return local_result - - def remote_backend_get_model_key(self) -> str: - - self.model: "RemoteableMixin" - - return self.model.to_model_key() - - def remote_backend_postprocess_result(self, local_result: Dict[int, Graph]): - - from ...schema.Response import ResultModel - - return { - id: ResultModel.from_graph(graph) - for id, graph in local_result.items() - } - - def remote_backend_handle_result_value( - self, value: Dict[int, Dict[str, Any]] - ): - - for graph_id, saves in value.items(): - - graph = self.bridge.id_to_graph[graph_id] - - for node_name, node_value in saves.items(): - graph.nodes[node_name]._value = node_value - - graph.alive = False - - def remote_backend_cleanup(self): - - self.bridge = weakref.proxy(self.bridge) - - graph = self.graph - graph.alive = False - - if not isinstance(graph, weakref.ProxyType): - self.graph = weakref.proxy(graph) - - def __repr__(self) -> str: - return f"<{self.__class__.__name__} at {hex(id(self))}>" diff --git a/src/nnsight/intervention.py b/src/nnsight/intervention.py deleted file mode 100755 index 7bc9175d..00000000 --- a/src/nnsight/intervention.py +++ /dev/null @@ -1,617 +0,0 @@ -"""This module contains logic to interleave a computation graph (an intervention graph) with the computation graph of a model. - -The :class:`InterventionProxy ` class extends the functionality of a base nnsight.tracing.Proxy.Proxy object and makes it easier for users to interact with. - -:func:`intervene() ` is the entry hook into the models computation graph in order to interleave an intervention graph. - -The :class:`HookModel ` provides a context manager for adding input and output hooks to modules and removing them upon context exit. -""" - -from __future__ import annotations - -import inspect -from collections import defaultdict -from contextlib import AbstractContextManager -from typing import Any, Callable, Collection, Dict, List, Tuple, Union - -import torch -from torch.utils.hooks import RemovableHandle -from typing_extensions import Self - -from . import util -from .contexts.Conditional import Conditional -from .tracing import protocols -from .tracing.Graph import Graph -from .tracing.Node import Node -from .tracing.protocols import Protocol -from .tracing.Proxy import Proxy - - -class InterventionProxy(Proxy): - """Sub-class for Proxy that adds additional user functionality to proxies. - - Examples: - - Saving a proxy so it is not deleted at the completion of it's listeners is enabled with ``.save()``: - - .. code-block:: python - - with model.trace('The Eiffel Tower is in the city of'): - hidden_states = model.lm_head.input.save() - logits = model.lm_head.output.save() - - print(hidden_states) - print(logits) - """ - - def __init__(self, node: Node) -> None: - super().__init__(node) - - self.__dict__["_grad"] = None - - self._grad: InterventionProxy - - def save(self) -> InterventionProxy: - """Method when called, indicates to the intervention graph to not delete the tensor values of the result. - - Returns: - InterventionProxy: Proxy. - """ - - # Add a 'lock' node with the save proxy as an argument to ensure the values are never deleted. - # This is because 'lock' nodes never actually get set and therefore there will always be a - # dependency for the save proxy. - - protocols.LockProtocol.add(self.node) - - return self - - def stop(self) -> InterventionProxy: - """Method when called, indicates to the intervention graph to stop the execution of the model after this Proxy/Node is completed.. - - Returns: - InterventionProxy: Proxy. - """ - - protocols.EarlyStopProtocol.add(self.node.graph, self.node) - - return self - - def update(self, value: Union[Node, Any]) -> InterventionProxy: - """Updates the value of the Proxy via the creation of the UpdateProtocol node. - - Args: - - value (Union[Node, Any]): New proxy value. - - Returns: - InterventionProxy: Proxy. - - .. codeb-block:: python - with model.trace(input) as tracer: - num = tracer.apply(int, 0) - num.update(5) - """ - - return protocols.UpdateProtocol.add(self.node, value) - - @property - def grad(self) -> InterventionProxy: - """ - Calling denotes the user wishes to get the grad of proxy tensor and therefore we create a Proxy of that request. - Only generates a proxy the first time it is references otherwise return the already set one. - - Returns: - Proxy: Grad proxy. - """ - if self._grad is None: - - self.__dict__["_grad"] = protocols.GradProtocol.add(self.node) - - return self._grad - - @grad.setter - def grad(self, value: Union[InterventionProxy, Any]) -> None: - """ - Calling denotes the user wishes to set the grad of this proxy tensor and therefore we create a Proxy of that request via a SwapProtocol. - - Args: - value (Union[InterventionProxy, Any]): Value to set output to. - """ - protocols.SwapProtocol.add(self.grad.node, value) - - self.__dict__["_grad"] = None - - def __call__(self, *args, **kwargs) -> Self: - - # We don't want to call backward on fake tensors. - # We also want to track the number of times .backward() has been called so .grad on a Proxy refers to the right backward pass. - if ( - self.node.target is util.fetch_attr - and isinstance(self.node.args[1], str) - and self.node.args[1] == "backward" - ): - - # Clear all .grad proxies so allow users to get the ,.grad of the next backward pass. - for node in self.node.graph.nodes.values(): - - try: - - if node.proxy._grad is not None: - - node.proxy.__dict__["_grad"] = None - - except ReferenceError: - pass - - # Use GradProtocol to increment the tracking of the number of times .backward() has been called. - protocols.GradProtocol.increment(self.node.graph) - - return self.node.create( - proxy_value=None, - target=Proxy.proxy_call, - args=[self.node] + list(args), - kwargs=kwargs, - ) - - return super().__call__(*args, **kwargs) - - def __setattr__( - self, key: Union[InterventionProxy, Any], value: Union[Self, Any] - ) -> None: - - # We catch setting .grad as that is a special Protocol vs. setting attributes generally. - if key == "grad": - return getattr(self.__class__, key).fset(self, value) - - return super().__setattr__(key, value) - - @property - def shape(self) -> Collection[torch.Size]: - """Property to retrieve the shape of the traced proxy value or real value. - - Returns: - Union[torch.Size,Collection[torch.Size]]: Proxy value shape or collection of shapes. - """ - - if not self.node.attached(): - - return util.apply(self.value, lambda x: x.shape, torch.Tensor) - - # If we haven't scanned in a proxy_value, just return a proxy to get the attribute. - if self.node.proxy_value is inspect._empty: - - return super().__getattr__("shape") - - return util.apply( - self.node.proxy_value, lambda x: x.shape, torch.Tensor - ) - - @property - def device(self) -> Collection[torch.device]: - """Property to retrieve the device of the traced proxy value or real value. - - Returns: - Union[torch.Size,Collection[torch.device]]: Proxy value device or collection of devices. - """ - - if not self.node.attached(): - - return util.apply(self.value, lambda x: x.device, torch.Tensor) - - # If we haven't scanned in a proxy_value, just return a proxy to get the attribute. - if self.node.proxy_value is inspect._empty: - - return super().__getattr__("device") - - return util.apply( - self.node.proxy_value, lambda x: x.device, torch.Tensor - ) - - @property - def dtype(self) -> Collection[torch.device]: - """Property to retrieve the dtype of the traced proxy value or real value. - - Returns: - Union[torch.Size,Collection[torch.dtype]]: Proxy value dtype or collection of dtypes. - """ - - if not self.node.attached(): - - return util.apply(self.value, lambda x: x.dtype, torch.Tensor) - - # If we haven't scanned in a proxy_value, just return a proxy to get the attribute. - if self.node.proxy_value is inspect._empty: - - return super().__getattr__("dtype") - - return util.apply( - self.node.proxy_value, lambda x: x.dtype, torch.Tensor - ) - - -class InterventionProtocol(Protocol): - """Primary Protocol that handles tracking and injecting inputs and outputs from a torch model into the overall intervention Graph. - Uses an attachment on the Graph to store the names of nodes that need to be injected with data from inputs or outputs of modules. - """ - - attachment_name = "nnsight_module_nodes" - condition: bool = False - - @classmethod - def add( - cls, - graph: "Graph", - proxy_value: Any, - args: List[Any] = None, - kwargs: Dict[str, Any] = None, - ) -> Proxy: - """Adds an InterventionProtocol Node to a Graph. - - Args: - graph (Graph): Graph to add to. - module_path (str): Module path of data this Node depends on (ex. model.module1.module2.output) - proxy_value (Any): Proxy value. - args (List[Any], optional): Args. Defaults to None. - kwargs (Dict[str, Any], optional): Kwargs. Defaults to None. - - Returns: - Proxy: _description_ - """ - - # Creates the InterventionProtocol Node. - proxy = graph.create( - proxy_value=proxy_value, target=cls, args=args, kwargs=kwargs - ) - - cls.compile(proxy.node) - - return proxy - - @classmethod - def compile(cls, node: Node) -> None: - - graph = node.graph - - module_path, *_ = node.args - - # Add attachment if it does not exist. - if cls.attachment_name not in graph.attachments: - - graph.attachments[cls.attachment_name] = dict() - - # More than one Node can depend on a given input or output, therefore we store a list of node names. - arguments = graph.attachments[cls.attachment_name] - - if module_path not in arguments: - arguments[module_path] = [] - - # Append the newly created nodes name. - arguments[module_path].append(node.name) - - @classmethod - def get_interventions(cls, graph: "Graph") -> Dict: - """Returns mapping from module_paths to InterventionNode names added to the given Graph. - - Args: - graph (Graph): Graph. - - Returns: - Dict: Interventions. - """ - - return graph.attachments.get(cls.attachment_name, dict()) - - @classmethod - def concat( - cls, - activations: Any, - value: Any, - batch_start: int, - batch_size: int, - total_batch_size: int, - ): - def _concat(values): - - data_type = type(values[0]) - - if data_type == torch.Tensor: - orig_size = values[-1] - new_size = sum([value.shape[0] for value in values[:-1]]) - if new_size == orig_size: - return torch.concatenate(values[:-1]) - - return values[0] - elif data_type == list: - return [ - _concat([value[value_idx] for value in values]) - for value_idx in range(len(values[0])) - ] - elif data_type == tuple: - return tuple( - [ - _concat([value[value_idx] for value in values]) - for value_idx in range(len(values[0])) - ] - ) - elif data_type == dict: - return { - key: _concat([value[key] for value in values]) - for key in values[0].keys() - } - return values[0] - - def narrow1(acts: torch.Tensor): - if total_batch_size == acts.shape[0]: - return acts.narrow(0, 0, batch_start) - - return acts - - pre = util.apply(activations, narrow1, torch.Tensor) - - post_batch_start = batch_start + batch_size - - def narrow2(acts: torch.Tensor): - if total_batch_size == acts.shape[0]: - return acts.narrow( - 0, post_batch_start, acts.shape[0] - post_batch_start - ) - - return acts - - post = util.apply( - activations, - narrow2, - torch.Tensor, - ) - - orig_sizes = util.apply(activations, lambda x: x.shape[0], torch.Tensor) - - return _concat([pre, value, post, orig_sizes]) - - @classmethod - def intervene( - cls, - activations: Any, - module_path: str, - key: str, - intervention_handler: InterventionHandler, - ): - """Entry to intervention graph. This should be hooked to all modules involved in the intervention graph. - - Forms the current module_path key in the form of . - Checks the graphs InterventionProtocol attachment attribute for this key. - If exists, value is a list of node names to iterate through. - Node args for intervention type nodes should be ``[module_path, batch_size, batch_start, call_iter]``. - Checks and updates the counter for the given intervention node. If counter is not ready yet continue. - Using batch_size and batch_start, apply torch.narrow to tensors in activations to select - only batch indexed tensors relevant to this intervention node. Sets the value of a node - using the indexed values. Using torch.narrow returns a view of the tensors as opposed to a copy allowing - subsequent downstream nodes to make edits to the values only in the relevant tensors, and have it update the original - tensors. This both prevents interventions from effecting bathes outside their preview and allows edits - to the output from downstream intervention nodes in the graph. - - Args: - activations (Any): Either the inputs or outputs of a torch module. - module_path (str): Module path of the current relevant module relative to the root model. - key (str): Key denoting either "input" or "output" of module. - intervention_handler (InterventionHandler): Handler object that stores the intervention graph and keeps track of module call count. - - Returns: - Any: The activations, potentially modified by the intervention graph. - """ - - # Key to module activation intervention nodes has format: . - module_path = f"{module_path}.{key}" - - interventions = cls.get_interventions(intervention_handler.graph) - - if module_path in interventions: - intervention_node_names = interventions[module_path] - - # Multiple intervention nodes can have same module_path if there are multiple invocations. - for intervention_node_name in intervention_node_names: - - node = intervention_handler.graph.nodes[intervention_node_name] - - # Args for intervention nodes are (module_path, batch_group_idx, call_iter). - _, batch_group_idx, call_iter = node.args - - batch_start, batch_size = intervention_handler.batch_groups[ - batch_group_idx - ] - - # Updates the count of intervention node calls. - # If count matches call_iter, time to inject value into node. - if call_iter != intervention_handler.count( - intervention_node_name - ): - - continue - - value = activations - - narrowed = False - - if len(intervention_handler.batch_groups) > 1: - - def narrow(acts: torch.Tensor): - - if acts.shape[0] == intervention_handler.batch_size: - - nonlocal narrowed - - narrowed = True - - return acts.narrow(0, batch_start, batch_size) - - return acts - - value = util.apply( - activations, - narrow, - torch.Tensor, - ) - - # Value injection. - node.set_value(value) - - # Check if through the previous value injection, there was a 'swap' intervention. - # This would mean we want to replace activations for this batch with some other ones. - value = protocols.SwapProtocol.get_swap( - intervention_handler.graph, value - ) - - # If we narrowed any data, we need to concat it with data before and after it. - if narrowed: - - activations = cls.concat( - activations, - value, - batch_start, - batch_size, - intervention_handler.batch_size, - ) - # Otherwise just return the whole value as the activations. - else: - - activations = value - - return activations - - @classmethod - def style(cls) -> Dict[str, Any]: - """Visualization style for this protocol node. - - Returns: - - Dict: dictionary style. - """ - - return { - "node": {"color": "green4", "shape": "box"}, # Node display - "label": cls.__name__, - "arg": defaultdict( - lambda: {"color": "gray", "shape": "box"} - ), # Non-node argument display - "arg_kname": defaultdict( - lambda: None, {0: "key", 1: "batch_size", 2: "batch_start"} - ), # Argument label key word - "edge": defaultdict(lambda: "solid"), - } # Argument Edge display - - -class HookHandler(AbstractContextManager): - """Context manager that applies input and/or output hooks to modules in a model. - - Registers provided hooks on __enter__ and removes them on __exit__. - - Attributes: - model (torch.nn.Module): Root model to access modules and apply hooks to. - modules (List[Tuple[torch.nn.Module, str]]): List of modules to apply hooks to along with their module_path. - input_hook (Callable): Function to apply to inputs of designated modules. - Should have signature of [inputs(Any), module_path(str)] -> inputs(Any) - output_hook (Callable): Function to apply to outputs of designated modules. - Should have signature of [outputs(Any), module_path(str)] -> outputs(Any) - handles (List[RemovableHandle]): Handles returned from registering hooks as to be used when removing hooks on __exit__. - """ - - def __init__( - self, - model: torch.nn.Module, - module_keys: List[str], - input_hook: Callable = None, - output_hook: Callable = None, - ) -> None: - self.model = model - self.module_keys = module_keys - - self.input_hook = input_hook - self.output_hook = output_hook - - self.handles: List[RemovableHandle] = [] - - def __enter__(self) -> HookHandler: - """Registers input and output hooks to modules if they are defined. - - Returns: - HookModel: HookModel object. - """ - - for module_key in self.module_keys: - - module_atoms = module_key.split(".") - - if len(module_atoms) == 1: - continue - - *module_atoms, hook_type = module_atoms - - module_path = ".".join(module_atoms) - - module: torch.nn.Module = util.fetch_attr(self.model, module_path) - - if hook_type == "input": - - def input_hook(module, input, kwargs, module_path=module_path): - return self.input_hook((input, kwargs), module_path) - - self.handles.append( - module.register_forward_pre_hook( - input_hook, with_kwargs=True, prepend=True - ) - ) - - elif hook_type == "output": - - def output_hook(module, input, output, module_path=module_path): - return self.output_hook(output, module_path) - - self.handles.append( - module.register_forward_hook(output_hook, prepend=True) - ) - - return self - - def __exit__(self, exc_type, exc_val, exc_tb) -> None: - """Removes all handles added during __enter__.""" - - for handle in self.handles: - handle.remove() - - if isinstance(exc_val, Exception): - raise exc_val - - -class InterventionHandler: - """Object passed to InterventionProtocol.intervene to store information about the current interleaving execution run. - - Like the Intervention Graph, the total batch size that is being executed, and a counter for how many times an Intervention node has been attempted to be executed. - """ - - def __init__( - self, graph: Graph, batch_groups: List[Tuple[int, int]], batch_size: int - ) -> None: - - self.graph = graph - self.batch_groups = batch_groups - self.batch_size = batch_size - self.call_counter: Dict[str, int] = {} - - def count(self, name: str) -> int: - """Increments the count of times a given Intervention Node has tried to be executed and returns the count. - - Args: - name (str): Name of intervention node to return count for. - - Returns: - int: Count. - """ - - if name not in self.call_counter: - - self.call_counter[name] = 0 - - else: - - self.call_counter[name] += 1 - - return self.call_counter[name] diff --git a/src/nnsight/intervention/__init__.py b/src/nnsight/intervention/__init__.py new file mode 100755 index 00000000..33353bbd --- /dev/null +++ b/src/nnsight/intervention/__init__.py @@ -0,0 +1,7 @@ + +""" +The `intervention` module extends the `tracing` module to add PyTorch specific interventions to a given computation graph. +It defines its own: protocols, contexts, backends and graph primitives to achieve this. +""" +from .base import NNsight +from .envoy import Envoy \ No newline at end of file diff --git a/src/nnsight/intervention/backends/__init__.py b/src/nnsight/intervention/backends/__init__.py new file mode 100755 index 00000000..1f0b7ba3 --- /dev/null +++ b/src/nnsight/intervention/backends/__init__.py @@ -0,0 +1,3 @@ +from .editing import EditingBackend +from .remote import RemoteBackend +from .noop import NoopBackend \ No newline at end of file diff --git a/src/nnsight/intervention/backends/editing.py b/src/nnsight/intervention/backends/editing.py new file mode 100755 index 00000000..353d0051 --- /dev/null +++ b/src/nnsight/intervention/backends/editing.py @@ -0,0 +1,18 @@ +from typing import TYPE_CHECKING +from ...tracing.backends import Backend + +from ...tracing.graph import Graph +if TYPE_CHECKING: + from .. import NNsight + +class EditingBackend(Backend): + """Backend to set the default graph to the current InterventionGraph. Assumes the final Node is an InterleavingTracer. + """ + + def __init__(self, model: "NNsight") -> None: + + self.model = model + + def __call__(self, graph: Graph) -> None: + + self.model._default_graph = graph.nodes[-1].args[0] \ No newline at end of file diff --git a/src/nnsight/intervention/backends/noop.py b/src/nnsight/intervention/backends/noop.py new file mode 100755 index 00000000..cffd8db1 --- /dev/null +++ b/src/nnsight/intervention/backends/noop.py @@ -0,0 +1,9 @@ +from ...tracing.graph.graph import Graph +from ...tracing.backends import Backend + + +class NoopBackend(Backend): + + def __call__(self, graph: Graph) -> None: + graph.nodes.clear() + graph.stack.clear() \ No newline at end of file diff --git a/src/nnsight/intervention/backends/remote.py b/src/nnsight/intervention/backends/remote.py new file mode 100755 index 00000000..46a3187a --- /dev/null +++ b/src/nnsight/intervention/backends/remote.py @@ -0,0 +1,392 @@ +from __future__ import annotations + +import io +import sys +import time +from datetime import datetime +from typing import Any, Dict, Optional, Tuple + +import msgspec +import requests +import socketio +import torch +from tqdm import tqdm + +from ... import __IPYTHON__, CONFIG, remote_logger +from ...schema.request import RequestModel, StreamValueModel +from ...schema.response import ResponseModel +from ...schema.result import RESULT, ResultModel +from ...tracing.backends import Backend +from ...tracing.graph import Graph +from ...util import NNsightError +from ..contexts.local import LocalContext, RemoteContext + + +class RemoteBackend(Backend): + """Backend to execute a context object via a remote service. + + Context object must inherit from RemoteMixin and implement its methods. + + Attributes: + + url (str): Remote host url. Defaults to that set in CONFIG.API.HOST. + """ + + def __init__( + self, + model_key: str, + host: str = None, + blocking: bool = True, + job_id: str = None, + ssl: bool = None, + api_key: str = "", + ) -> None: + + self.model_key = model_key + + self.job_id = job_id or CONFIG.API.JOB_ID + self.ssl = CONFIG.API.SSL if ssl is None else ssl + self.zlib = CONFIG.API.ZLIB + self.format = CONFIG.API.FORMAT + self.api_key = api_key or CONFIG.API.APIKEY + self.blocking = blocking + + self.host = host or CONFIG.API.HOST + self.address = f"http{'s' if self.ssl else ''}://{self.host}" + self.ws_address = f"ws{'s' if CONFIG.API.SSL else ''}://{self.host}" + + def request(self, graph: Graph) -> Tuple[bytes, Dict[str, str]]: + + data = RequestModel.serialize(graph, self.format, self.zlib) + + headers = { + "model_key": self.model_key, + "format": self.format, + "zlib": str(self.zlib), + "ndif-api-key": self.api_key, + "sent-timestamp": str(time.time()), + } + + return data, headers + + def __call__(self, graph: Graph): + + if self.blocking: + + # Do blocking request. + result = self.blocking_request(graph) + + else: + + # Otherwise we are getting the status / result of the existing job. + result = self.non_blocking_request(graph) + + if result is not None: + ResultModel.inject(graph, result) + + def handle_response( + self, response: ResponseModel, graph: Optional[Graph] = None + ) -> Optional[RESULT]: + """Handles incoming response data. + + Logs the response object. + If the job is completed, retrieve and stream the result from the remote endpoint. + Use torch.load to decode and load the `ResultModel` into memory. + Use the backend object's .handle_result method to handle the decoded result. + + Args: + response (Any): Json data to concert to `ResponseModel` + + Raises: + Exception: If the job's status is `ResponseModel.JobStatus.ERROR` + + Returns: + ResponseModel: ResponseModel. + """ + + # Log response for user + response.log(remote_logger) + + # If job is completed: + if response.status == ResponseModel.JobStatus.COMPLETED: + + # If the response has no result data, it was too big and we need to stream it from the server. + if response.data is None: + + result = self.get_result(response.id) + else: + + result = response.data + + return result + + # If were receiving a streamed value: + elif response.status == ResponseModel.JobStatus.STREAM: + + # Second item is index of LocalContext node. + # First item is the streamed value from the remote service. + + index, dependencies = response.data + + ResultModel.inject(graph, dependencies) + + node = graph.nodes[index] + + node.execute() + + elif response.status == ResponseModel.JobStatus.NNSIGHT_ERROR: + if graph.debug: + error_node = graph.nodes[response.data["node_id"]] + try: + raise NNsightError( + response.data["err_message"], + error_node.index, + response.data["traceback"], + ) + except NNsightError as nns_err: + if ( + __IPYTHON__ + ): # in IPython the traceback content is rendered by the Error itself + # add the error node traceback to the the error's traceback + nns_err.traceback_content += "\nDuring handling of the above exception, another exception occurred:\n\n" + nns_err.traceback_content += error_node.meta_data["traceback"] + else: # else we print the traceback manually + print(f"\n{response.data['traceback']}") + print( + "During handling of the above exception, another exception occurred:\n" + ) + print(f"{error_node.meta_data['traceback']}") + + sys.tracebacklimit = 0 + raise nns_err from None + finally: + if __IPYTHON__: + sys.tracebacklimit = None + else: + print(f"\n{response.data['traceback']}") + raise SystemExit("Remote exception.") + + def submit_request( + self, data: bytes, headers: Dict[str, Any] + ) -> Optional[ResponseModel]: + """Sends request to the remote endpoint and handles the response object. + + Raises: + Exception: If there was a status code other than 200 for the response. + + Returns: + (ResponseModel): Response. + """ + + from ...schema.response import ResponseModel + + headers["Content-Type"] = "application/octet-stream" + + response = requests.post( + f"{self.address}/request", + data=data, + headers=headers, + ) + + if response.status_code == 200: + + response = ResponseModel(**response.json()) + + self.handle_response(response) + + return response + + else: + msg = response.reason + raise ConnectionError(msg) + + def get_response(self) -> Optional[RESULT]: + """Retrieves and handles the response object from the remote endpoint. + + Raises: + Exception: If there was a status code other than 200 for the response. + + Returns: + (ResponseModel): Response. + """ + + from ...schema.response import ResponseModel + + response = requests.get( + f"{self.address}/response/{self.job_id}", + headers={"ndif-api-key": self.api_key}, + ) + + if response.status_code == 200: + + response = ResponseModel(**response.json()) + + return self.handle_response(response) + + else: + + raise Exception(response.reason) + + def get_result(self, id: str) -> RESULT: + + result_bytes = io.BytesIO() + result_bytes.seek(0) + + # Get result from result url using job id. + with requests.get( + url=f"{self.address}/result/{id}", + stream=True, + ) as stream: + # Total size of incoming data. + total_size = float(stream.headers["Content-length"]) + + with tqdm( + total=total_size, + unit="B", + unit_scale=True, + desc="Downloading result", + ) as progress_bar: + # chunk_size=None so server determines chunk size. + for data in stream.iter_content(chunk_size=None): + progress_bar.update(len(data)) + result_bytes.write(data) + + # Move cursor to beginning of bytes. + result_bytes.seek(0) + + # Decode bytes with pickle and then into pydantic object. + result = torch.load(result_bytes, map_location="cpu", weights_only=False) + + result = ResultModel(**result).result + + # Close bytes + result_bytes.close() + + return result + + def blocking_request(self, graph: Graph) -> Optional[RESULT]: + """Send intervention request to the remote service while waiting for updates via websocket. + + Args: + request (RequestModel):Request. + """ + + # We need to do some processing / optimizations on both the graph were sending remotely + # and our local intervention graph. In order handle the more complex Protocols for streaming. + + # Create a socketio connection to the server. + with socketio.SimpleClient(reconnection_attempts=10) as sio: + # Connect + sio.connect( + self.ws_address, + socketio_path="/ws/socket.io", + transports=["websocket"], + wait_timeout=10, + ) + + remote_graph = preprocess(graph) + + data, headers = self.request(remote_graph) + + headers["session_id"] = sio.sid + + # Submit request via + response = self.submit_request(data, headers) + + LocalContext.set( + lambda *args: self.stream_send(*args, job_id=response.id, sio=sio) + ) + + try: + # Loop until + while True: + + # Get pickled bytes value from the websocket. + response = sio.receive()[1] + # Convert to pydantic object. + response = ResponseModel.unpickle(response) + # Handle the response. + result = self.handle_response(response, graph=graph) + # Break when completed. + if result is not None: + return result + + except Exception as e: + + raise e + + finally: + LocalContext.set(None) + + def stream_send( + self, values: Dict[int, Any], job_id: str, sio: socketio.SimpleClient + ): + """Upload some value to the remote service for some job id. + + Args: + value (Any): Value to upload + job_id (str): Job id. + sio (socketio.SimpleClient): Connected websocket client. + """ + + sio.emit( + "stream_upload", + data=(StreamValueModel.serialize(values, self.format, self.zlib), job_id), + ) + + def non_blocking_request(self, graph: Graph): + """Send intervention request to the remote service if request provided. Otherwise get job status. + + Sets CONFIG.API.JOB_ID on initial request as to later get the status of said job. + + When job is completed, clear CONFIG.API.JOB_ID to request a new job. + + Args: + request (RequestModel): Request if submitting a new request. Defaults to None + """ + + if self.job_id is None: + + data, headers = self.request(graph) + + # Submit request via + response = self.submit_request(data, headers) + + CONFIG.API.JOB_ID = response.id + + CONFIG.save() + + else: + + try: + + result = self.get_response() + + if result is not None: + + CONFIG.API.JOB_ID = None + + CONFIG.save() + + return result + + except Exception as e: + + CONFIG.API.JOB_ID = None + + CONFIG.save() + + raise e + + +def preprocess(graph: Graph): + + new_graph = graph.copy() + + for node in new_graph.nodes: + + if node.target is LocalContext: + + graph.nodes[node.index].kwargs["uploads"] = RemoteContext.from_local(node) + + return new_graph diff --git a/src/nnsight/intervention/base.py b/src/nnsight/intervention/base.py new file mode 100755 index 00000000..5f55b7d9 --- /dev/null +++ b/src/nnsight/intervention/base.py @@ -0,0 +1,503 @@ +from __future__ import annotations + +from typing import (TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Type, Union) + +import torch +from typing_extensions import Self + +from .. import util +from ..tracing.backends import Backend +from .backends import NoopBackend +from .contexts import EditingTracer, InterleavingTracer, Session +from .envoy import Envoy +from .graph import (InterventionGraph, InterventionNode, InterventionProxy, + InterventionProxyType) +from .graph.proxy import Proxy +from .interleaver import Interleaver +from .. import CONFIG + + +class NNsight: + """Main class to be implemented as a wrapper for PyTorch models wishing to gain this package's functionality. + + Class Attributes: + + proxy_class (Type[InterventionProxy]): InterventionProxy like type to use as a Proxy for this Model's inputs and outputs. Can have Model specific functionality added to a new sub-class. + __methods__ (Dict[str,str]): Mapping of method name, which will open up a .trace context, and the actual method name to execute / interleave with. + For example lets say I had a method on my underlying ._model called `.run` that I wanted to have the NNsight interleaving functionality applied to. + I could define a method on my NNsight sub-class called `._run` which might look like: + + .. code-block:: python + + def _run(self, *inputs, **kwargs): + + inputs, kwargs = some_preprocessing(inputs, kwargs) + + return self._model.run(*args, **kwargs) + + I could then have my __methods__ attribute look like `__methods__ = {'run', '_run'}` + This would allow me to do: + + .. code-block:: python + + with model.run(...): + + output = model.output.save() + + + + Attributes: + _model (torch.nn.Module): Underlying torch module. + _envoy (Envoy): Envoy for underlying model. + _session (Session): Session object if in a Session. + _default_graph (Graph): Intervention graph to start from when calling NNsight.trace. This is set via the editing context NNsight.edit. + """ + + __methods__: Dict[str, str] = dict() + + proxy_class: Type[InterventionProxyType] = InterventionProxy + + def __init__( + self, + model: torch.nn.Module, + rename: Optional[Dict[str,str]] = None + ) -> None: + + self._model: torch.nn.Module = model + self._envoy: Envoy[InterventionProxy, InterventionNode] = Envoy(self._model, rename=rename) + + self._session: Optional[Session] = None + self._default_graph: Optional[InterventionGraph] = None + + #### Public API ############## + + def trace( + self, + *inputs: Any, + trace: bool = True, + scan: bool = False, + method: Optional[str] = None, + invoker_kwargs:Optional[Dict[str,Any]] = None, + backend: Optional[Union[Backend, str]] = None, + **kwargs: Dict[str, Any], + ) -> Union[InterleavingTracer, Any]: + """Entrypoint into the tracing and interleaving functionality nnsight provides. + + In short, allows access to the future inputs and outputs of modules in order to trace what operations you would like to perform on them. + This can be as simple as accessing and saving activations for inspection, or as complicated as transforming the activations and gradients in a forward pass over multiple inputs. + + Args: + inputs (tuple[Any]): When positional arguments are provided directly to .trace, we assume there is only one Invoker and therefore + immediately create an enter an Invoker. + trace (bool, optional): If to open a tracing context. Otherwise immediately run the model and return the raw output. Defaults to True. + scan (bool): Exposed invoker kwarg to scan for the provided input. No effect if there is no input. + method (Optional[str]): String name of method to interleave with. Defaults to None and therefore NNsight._execute + invoker_args (Dict[str, Any], optional): Keyword arguments to pass to Invoker initialization, and then downstream to the model's .prepare_inputs(...) method. Used when giving input directly to `.trace(...)`. Defaults to None. + kwargs (Dict[str, Any]): Keyword arguments passed to Tracer initialization, and then downstream to the model's execution method. + + Raises: + ValueError: If trace is False and no inputs were provided (nothing to run with) + + Returns: + Union[Tracer, Any]: Either the Tracer used for tracing, or the raw output if trace is False. + + Examples: + + There are a few ways you can use ``.trace(...)`` depending in your use case. + + Lets use this extremely basic model for our examples: + + .. code-block:: python + + import torch + from collections import OrderedDict + + input_size = 5 + hidden_dims = 10 + output_size = 2 + + model = nn.Sequential(OrderedDict([ + ('layer1', torch.nn.Linear(input_size, hidden_dims)), + ('sigma1', torch.nn.Sigmoid()), + ('layer2', torch.nn.Linear(hidden_dims, output_size)), + ('sigma2', torch.nn.Sigmoid()), + ])) + + example_input = torch.rand((1, input_size)) + + + The first example has us running the model with a single example input, and saving the input and output of 'layer2' as well as the final output using the tracing context. + + .. code-block:: python + + from nnsight import NNsight + + with NNsight(model).trace(example_input) as model: + + l2_input = model.layer2.input.save() + l2_output = model.layer2.output.save() + + output = model.output.save() + + print(l2_input) + print(l2_output) + print(output) + + The second example allows us to divide up multiple inputs into one batch, and scope an inner invoker context to each one. + We indicate this simply by not passing and positional inputs into `.trace(...)`. The Tracer object then expects you to enter each input via `Tracer.invoke(...)` + + .. code-block:: python + + example_input2 = torch.rand((1, input_size)) + + with NNsight(model).trace() as model: + + with model.invoke(example_input): + + output1 = model.output.save() + + with model.invoke(example_input2): + + output2 = model.output.save() + + print(output1) + print(output2) + """ + + # If were in a session, this trace is simple a child of the open trace. + if self._session is not None: + + parent = self._session.graph + + else: + parent = None + + # Create Tracer. + tracer = InterleavingTracer( + self, + method=method, + backend=backend, + parent=parent, + **kwargs, + ) + + # If user provided input directly to .trace(...). + if len(inputs) > 0: + + if invoker_kwargs is None: + invoker_kwargs = {} + + invoker_kwargs['scan'] = scan + + # Enter an invoker + tracer.invoke(*inputs, **invoker_kwargs).__enter__() + + # If trace is False, we'll enter the Tracer context immediately and enter an Invoker context with the provided inputs as well. + # We'll also save the output of the model and return its value directly. + if not trace: + + with tracer: + + output = self._envoy.output.save() + + if isinstance(output, Proxy): + + output = output.value + + return output + + # If trace is False, you had to have provided an input. + if not trace: + + raise ValueError("Can't execute on no inputs!") + + return tracer + + def scan(self, *inputs, **kwargs) -> InterleavingTracer: + """Context just to populate fake tensor proxy values using scan and validate. + Useful when looking for just the shapes of future tensors + + Examples: + + .. code-block:: python + + with model.scan(" "): + + dim = model.module.output.shape[-1] + + print(dim) + + Returns: + Tracer: Tracer context with Noop backend. + """ + + return self.trace( + *inputs, **kwargs, scan=True, validate=True, backend=NoopBackend() + ) + + def edit( + self, + *inputs: Any, + inplace: bool = False, + **kwargs: Dict[str, Any], + ) -> Union[InterleavingTracer, Any]: + """Create a trace context with an edit backend and apply a list of edits. + + The edit backend sets a default graph on an NNsight model copy which is + run on future trace calls. + + This operation is not inplace! + + Args: + inplace (bool): If True, makes edits in-place. + + Returns: + Union[Tracer, Any]: Either the Tracer used for tracing, or the raw output if trace is False. + + Example: + .. code-block:: python + from nnsight import LanguageModel + + gpt2 = LanguageModel("openai-community/gpt2) + + class ComplexModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.one = WrapperModule() + + def forward(self, x): + return self.one(x) + + l0 = gpt2.transformer.h[0] + l0.attachment = ComplexModule() + + with gpt2.edit("test") as gpt2_edited: + acts = l0.output[0] + l0.output[0][:] = l0.attachment(acts, hook=True) + + with gpt2.trace(MSG_prompt): + original = l0.output[0].clone().save() + l0.output[0][:] *= 0.0 + original_output = gpt2.output.logits.save() + + with gpt2_edited.trace(MSG_prompt): + one = l0.attachment.one.output.clone().save() + l0.attachment.output *= 0.0 + edited_output = gpt2.output.logits.save() + + print(original_output) + print(edited_output) + """ + + return EditingTracer(self, *inputs, inplace=inplace, **kwargs) + + def session( + self, + backend: Union[Backend, str] = None, + **kwargs, + ) -> Session: + """Create a session context using a Session. + + Args: + backend (Backend): Backend for this Session object. + + Returns: + Session: Session. + """ + if self._session is not None: + + raise ValueError("Can't create a Session with one already open!") + + return Session[InterventionNode, self.proxy_class]( + self, backend=backend, **kwargs + ) + + def interleave( + self, + interleaver: Interleaver, + *args, + fn: Optional[Union[Callable, str]] = None, + **kwargs, + ) -> Any: + """This is the point in nnsight where we finally execute the model and interleave our custom logic. + Simply resolves the function and executes it given some input within the Intreleaver context. + This method is on here vs on the Interleaver because some models might want to define custom interleaving behavior. For example loading real model weights before execution. + + Args: + interleaver (Interleaver): Interleaver. + fn (Optional[Union[Callable, str]], optional): Function to interleave with. Defaults to None and therefore NNsight._execute. + + Returns: + Any: _description_ + """ + + if fn is None: + fn = self._execute + elif isinstance(fn, str): + fn = getattr(self, fn) + + with interleaver: + return fn(*args, **kwargs) + + def to(self, *args, **kwargs) -> Self: + """Override torch.nn.Module.to so this returns the NNSight model, not the underlying module when doing: model = model.to(...) + + Returns: + Envoy: Envoy. + """ + + self._envoy.to(*args, **kwargs) + + return self + + @property + def device(self) -> Optional[torch.device]: + + try: + return next(self._model.parameters()).device + except: + return None + + def clear_edits(self) -> None: + """Resets the default graph of this model.""" + self._default_graph = None + + def get(self, path:str) -> Union[Envoy, InterventionProxyType]: + """Gets the Envoy/Proxy via its path. + + e.x: + model = nnsight.LanguageModel("openai-community/gpt2") + + module = model.get('transformer.h.0.mlp') + + with model.trace("Hello"): + value = model.get('transformer.h.0.mlp.output').save() + + Args: + path (str): '.' separated path. + + Returns: + Union[Envoy, InterventionProxyType]: Fetched Envoy/Proxy + """ + return util.fetch_attr(self, path) + + #### Private API ############## + + def to_device(self, data: Any) -> Any: + + device = self.device + + if device is not None: + + data = util.apply(data, lambda x: x.to(device), torch.Tensor) + + return data + + def _shallow_copy(self) -> Self: + """Creates a new instance copy of the same class with the all the attributes of the original instance. + + Returns: + Self: NNsightModel + """ + copy = self.__class__.__new__(self.__class__) + for key, value in self.__dict__.items(): + copy.__dict__[key] = value + + return copy + + def __repr__(self) -> str: + """Wrapper of ._model's representation as the NNsight model's representation. + + Returns: + str: Representation. + """ + return repr(self._envoy) + + def __setattr__(self, key: Any, value: Any) -> None: + """Overload setattr to create and set an Envoy when trying to set a torch Module.""" + + if key not in ("_model", "_model_key") and isinstance(value, torch.nn.Module): + + setattr(self._envoy, key, value) + + else: + + object.__setattr__(self, key, value) + + def __getattr__(self, key: Any): + """Wrapper of ._envoy's attributes to access module's inputs and outputs. + + Returns: + Any: Attribute. + """ + + if key in self.__methods__: + return lambda *args, **kwargs: self.trace( + *args, method=self.__methods__[key], **kwargs + ) + + return getattr(self._envoy, key) + + def __call__(self, *args: Any, **kwargs: Any) -> Any: + return self._envoy(*args, **kwargs) + + ### NNsight VIRTUAL METHODS BELOW ##################################### + + def _execute(self, *args, **kwargs) -> Any: + + args, kwargs = self.to_device((args, kwargs)) + + return self._model(*args, **kwargs) + + def _prepare_input( + self, *args, **kwargs + ) -> Tuple[Tuple[Tuple[Any], Dict[str, Any]], int]: + """Virtual method to prepare inputs before batching and execution and return batch size of prepared_inputs. + + Default implementation just returns inputs and length of first input. + + Args: + inputs (tuple[Any]): Inputs to prepare for batching and execution. + int: Batch size of prepared_inputs. + + Returns: + Tuple[tuple[Any], int]: Prepared inputs, batch size of inputs. + """ + return (args, kwargs), len(args[0]) + + def _batch( + self, + batched_inputs: Optional[Tuple[Tuple[Any], Dict[str, Any]]], + *args, + **kwargs, + ) -> Tuple[Tuple[Any], Dict[str, Any]]: + """Virtual method to batch together results from _prepare_inputs. + + Default implementation returns list of all prepared_inputs. + + Args: + batched_inputs (Any): Current state of batched_inputs. Initially None. + prepared_inputs (tuple[Any]): Most recent result from _prepare_inputs. + + Returns: + Any: Batched inputs. + """ + + if batched_inputs is None: + return (args, kwargs) + + args = tuple( + [ + torch.concatenate((batched_inputs[i], args[i])) + for i in range(len(batched_inputs)) + ] + ) + + return args, kwargs + + +if TYPE_CHECKING: + + class NNsight(NNsight, Envoy[InterventionProxy, InterventionNode]): + def __getattribute__(self, name: str) -> Union[Envoy[InterventionProxy]]: + pass diff --git a/src/nnsight/intervention/contexts/__init__.py b/src/nnsight/intervention/contexts/__init__.py new file mode 100755 index 00000000..cb52102b --- /dev/null +++ b/src/nnsight/intervention/contexts/__init__.py @@ -0,0 +1,7 @@ +from .invoker import Invoker +from .local import LocalContext, RemoteContext +from .tracer import InterventionTracer +from .session import Session +from .interleaving import InterleavingTracer +from .editing import EditingTracer +from .globals import * \ No newline at end of file diff --git a/src/nnsight/intervention/contexts/editing.py b/src/nnsight/intervention/contexts/editing.py new file mode 100755 index 00000000..cc44bfa5 --- /dev/null +++ b/src/nnsight/intervention/contexts/editing.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING +from ..backends import EditingBackend +from . import InterleavingTracer +if TYPE_CHECKING: + from .. import NNsight + +class EditingTracer(InterleavingTracer): + """The `EditingTracer` exists because we want to return the edited model from __enter__ not the Tracer itself + While were here we might as well force the backend to be `EditingBackend` + + """ + + def __init__(self, model:"NNsight", *args, inplace: bool = False, **kwargs) -> None: + + + # If its not inplace we create a shallow copy of the model + # With the same references to the underlying model. + if not inplace: + + model = model._shallow_copy() + + super().__init__(model, *args, backend=EditingBackend(model), **kwargs) + + def __enter__(self): + + super().__enter__() + + return self._model diff --git a/src/nnsight/intervention/contexts/globals.py b/src/nnsight/intervention/contexts/globals.py new file mode 100755 index 00000000..e285c013 --- /dev/null +++ b/src/nnsight/intervention/contexts/globals.py @@ -0,0 +1,69 @@ +""" +Global patching allows us to add un-traceable operations to nnsight by replacing them with ones that use the GLOBAL_TRACING_CONTEXT to add the operation to the current graph. +""" + +from __future__ import annotations + +from inspect import getmembers, isclass + +import torch +from torch.utils import data + +from ...tracing.contexts.globals import ( + GlobalTracingContext, + global_patch, + global_patch_method, +) +from ...tracing.graph.proxy import proxy_patch +from . import InterventionTracer + +# Torch classes +global_patch(torch.nn.Parameter) +global_patch(torch.nn.Linear) + +global_patch(data.DataLoader) +# Tensor creation operations +global_patch(torch.arange) +global_patch(torch.empty) +global_patch(torch.eye) +global_patch(torch.full) +global_patch(torch.linspace) +global_patch(torch.logspace) +global_patch(torch.ones) +global_patch(torch.rand) +global_patch(torch.randint) +global_patch(torch.randn) +global_patch(torch.randperm) +global_patch(torch.zeros) +global_patch(torch.cat) + +# Module methods + +global_patch_method(torch.nn.Module, torch.nn.Module.zero_grad) + +# All Optimizers +for key, value in getmembers(torch.optim, isclass): + + if issubclass(value, torch.optim.Optimizer): + + global_patch(value) + +import math +from inspect import getmembers, isbuiltin, isfunction + +import einops + +# Einops +for key, value in getmembers(einops.einops, isfunction): + setattr(einops.einops, key, proxy_patch(value)) +# math +for key, value in getmembers(math, isbuiltin): + setattr(math, key, proxy_patch(value)) + + +# Give it InterventionTracer methods +class GlobalInterventionTracingContext(GlobalTracingContext, InterventionTracer): + GLOBAL_TRACING_CONTEXT: GlobalInterventionTracingContext + + +GlobalTracingContext.GLOBAL_TRACING_CONTEXT = GlobalInterventionTracingContext() diff --git a/src/nnsight/intervention/contexts/interleaving.py b/src/nnsight/intervention/contexts/interleaving.py new file mode 100755 index 00000000..d686ad73 --- /dev/null +++ b/src/nnsight/intervention/contexts/interleaving.py @@ -0,0 +1,163 @@ +import weakref +from typing import ( + TYPE_CHECKING, + Any, + Dict, + List, + Optional, + Tuple, +) + + +from ...tracing.backends import Backend +from ...tracing.graph import GraphType +from ..graph import ( + InterventionGraph, + InterventionNode, + InterventionNodeType, + ValidatingInterventionNode, +) +from ..interleaver import Interleaver + +from . import Invoker +from . import InterventionTracer +if TYPE_CHECKING: + from .. import NNsight + + +class InterleavingTracer(InterventionTracer): + """This is the Tracer type that actually interleaves an `InterventionGraph` with a PyTorch model upon execute. + + Attributes: + _model (NNsight): NNsight model. + invoker (Invoker): Current open invoker so we can prevent opening two at the same time. + args (Tuple[...]): Positional arguments. First is which method to interleave with and subsequent args are invoker inputs. + kwargs (Dict[str,Any]): Keyword arguments passed to the method to interleave. These are "global" keyword arguments for our chosen methof + while kwargs for a given invoker are used for preprocessing the invoker input. + """ + + def __init__( + self, + model: "NNsight", + method: Optional[str] = None, + backend: Optional[Backend] = None, + parent: Optional[GraphType] = None, + validate: bool = False, + debug: Optional[bool] = None, + **kwargs, + ) -> None: + + super().__init__( + graph_class=InterventionGraph, + model=model, + node_class=ValidatingInterventionNode if validate else InterventionNode, + proxy_class=model.proxy_class, + backend=backend, + parent=parent, + graph=model._default_graph, + debug=debug, + ) + + self._model = model + + # Tell all Envoy's about the current Tracer so they can use it to add InterventionProtocol Nodes. + self._model._envoy._set_tracer(weakref.proxy(self)) + + self.invoker: Optional[Invoker] = None + + self.args = [method] + self.kwargs = kwargs + + + def invoke(self, *inputs: Any, **kwargs) -> Invoker: + """Create an Invoker context for a given input. + + Raises: + Exception: If an Invoker context is already open + + Returns: + Invoker: Invoker. + """ + + if self.invoker is not None: + + raise Exception("Can't create an invoker context with one already open!") + + return Invoker(self, *inputs, **kwargs) + + def __exit__(self, exc_type, exc_val, exc_tb) -> None: + + if self.invoker is not None: + + self.invoker.__exit__(None, None, None) + + self._model._envoy._reset() + + super().__exit__(exc_type, exc_val, exc_tb) + + @classmethod + def _batch( + cls, model: "NNsight", invoker_inputs: Tuple[Tuple[Tuple[Any], Dict[str, Any]]] + ) -> Tuple[Tuple[Tuple[Any], Dict[str, Any]], List[Tuple[int, int]]]: + """Batches together each set of inputs from each Invoker by iteratively calling the models ._prepare_input and ._batch methods. + + Args: + model (NNsight): Model which defines its own logic for preparing and batching input + invoker_inputs (Tuple[Tuple[Tuple[Any], Dict[str, Any]]]): Tuple of invoker inputs. + + Returns: + Tuple[Tuple[Tuple[Any], Dict[str, Any]], List[Tuple[int, int]]]: One single batched input. + List[Tuple[int, int]]: Batch groups + """ + + batch_groups = [] + batch_start = 0 + batched_input = None + + for args, kwargs in invoker_inputs: + + (args, kwargs), batch_size = model._prepare_input(*args, **kwargs) + + batch_groups.append((batch_start, batch_size)) + + batched_input = model._batch(batched_input, *args, **kwargs) + + batch_start += batch_size + + if batched_input is None: + + batched_input = (((0, -1),), dict()) + + return batched_input, batch_groups + + @property + def _invoker_group(self): + + return len(self.args) - 2 + + @classmethod + def execute(cls, node: InterventionNodeType): + + graph, method, *invoker_inputs = node.args + + graph: InterventionGraph + model = graph.model + + # There may be Nodes in the inputs. Convert them to their value + invoker_inputs, kwargs = node.prepare_inputs((invoker_inputs, node.kwargs)) + + # Batch each invoker input into one input + (invoker_args, invoker_kwargs), batch_groups = cls._batch(model, invoker_inputs) + + # Compile Intervention Graph + graph.compile() + + graph.reset() + + graph.execute() + + interleaver = Interleaver(graph, batch_groups=batch_groups) + + graph.model.interleave(interleaver, *invoker_args, fn=method,**kwargs, **invoker_kwargs) + + graph.cleanup() diff --git a/src/nnsight/contexts/Invoker.py b/src/nnsight/intervention/contexts/invoker.py similarity index 56% rename from src/nnsight/contexts/Invoker.py rename to src/nnsight/intervention/contexts/invoker.py index c60b94fd..64d5d66d 100755 --- a/src/nnsight/contexts/Invoker.py +++ b/src/nnsight/intervention/contexts/invoker.py @@ -2,26 +2,23 @@ import copy from contextlib import AbstractContextManager -from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple import torch from torch._subclasses.fake_tensor import FakeCopyMode, FakeTensorMode from torch.fx.experimental.symbolic_shapes import ShapeEnv -from .. import util -from ..patching import Patch, Patcher -from ..tracing.Node import Node -from ..tracing.Proxy import Proxy -from . import check_for_dependencies -from .GraphBasedContext import GlobalTracingContext +from ... import util +from ...tracing.contexts.globals import GlobalTracingContext +from ..graph import InterventionNode, InterventionProxy, InterventionProxyType if TYPE_CHECKING: - from .Tracer import Tracer + from . import InterleavingTracer class Invoker(AbstractContextManager): - """An Invoker is meant to work in tandem with a :class:`nnsight.contexts.Tracer.Tracer` to enter input and manage intervention tracing. + """An Invoker is meant to work in tandem with a :class:`nnsight.intervention.contexts.InterleavingTracer` to enter input and manage intervention tracing. Attributes: tracer (nnsight.contexts.Tracer.Tracer): Tracer object to enter input and manage context. @@ -36,21 +33,23 @@ class Invoker(AbstractContextManager): def __init__( self, - tracer: "Tracer", - *inputs: Any, + tracer: "InterleavingTracer", + *args, scan: bool = False, **kwargs, ) -> None: self.tracer = tracer - self.inputs = inputs + self.inputs = (args, kwargs) + self.scan = scan - self.kwargs = kwargs self.scanning = False self.tracer.invoker = self + self.batch_size: Optional[int] = None + def __enter__(self) -> Invoker: """Enters a new invocation context with a given input. @@ -64,47 +63,49 @@ def __enter__(self) -> Invoker: """ has_proxies_in_inputs = False + + def check_for_proxies(proxy: InterventionProxyType): + + nonlocal has_proxies_in_inputs - # If were accumulating, we might have Proxies in the input. - # Therefore we first: Check to see if there are any Proxies. - # If there are, preserve the raw inputs with Proxies converted to a Locked Bridge protocol. - # Set self.inputs to be the proxy_value so we can prepare_inputs, get the batch size, and scan. - if self.tracer.model._session is not None: + has_proxies_in_inputs = True - self.inputs, has_proxies_in_inputs = check_for_dependencies( - self.inputs - ) + return proxy + # We need to check if there were any Proxies in the actual Invoker input. This might be True in a Session where values from one trace are used as an input to another. + util.apply(self.inputs, check_for_proxies, InterventionProxy) + + # We dont want to create new proxies during scanning/prepare_inputs so we exit the global tracing context. with GlobalTracingContext.exit_global_tracing_context(): + # If we dont have proxies we can immediately prepare the input so the user can see it and the batch_size. if not has_proxies_in_inputs: - self.inputs, batch_size = self.tracer.model._prepare_inputs( - *self.inputs, **self.kwargs + self.inputs, self.batch_size = self.tracer._model._prepare_input( + *self.inputs[0], **self.inputs[1] ) if self.scan: - inputs = self.inputs + input = self.inputs if has_proxies_in_inputs: - inputs = util.apply(inputs, lambda x: x.proxy_value, Node) + input = util.apply(input, lambda x: x.fake_value, InterventionNode) - inputs, batch_size = self.tracer.model._prepare_inputs( - *inputs, **self.kwargs - ) + input, _ = self.tracer._model._prepare_input(*input[0], **input[1]) - self.tracer.model._envoy._clear() + # Clear all fake inputs and outputs because were going to re-populate them. + self.tracer._model._envoy._clear() self.scanning = True - with Patcher() as patcher: + with util.Patcher() as patcher: # Some logic (like gpt-j rotary embeddings) gets "poisoned" by FakeTensors. # This does not happen when `torch._jit_internal.is_scripting() returns True.` patcher.add( - Patch(torch._jit_internal, lambda: True, "is_scripting") + util.Patch(torch._jit_internal, lambda: True, "is_scripting") ) with FakeTensorMode( @@ -112,17 +113,23 @@ def __enter__(self) -> Invoker: shape_env=ShapeEnv(assume_static_by_default=True), ) as fake_mode: with FakeCopyMode(fake_mode): - self.tracer.model._execute( - *copy.deepcopy(inputs), - **copy.deepcopy(self.tracer._kwargs), + fn = ( + self.tracer._model._execute + if self.tracer.args[0] is None + else getattr(self.tracer._model, self.tracer.args[0]) + ) + fn( + *copy.deepcopy(input[0]), + **copy.deepcopy(input[1]), + **copy.deepcopy(self.tracer.kwargs), ) self.scanning = False else: - self.tracer.model._envoy._reset() + self.tracer._model._envoy._reset() - self.tracer._invoker_inputs.append(self.inputs) + self.tracer.args.append(self.inputs) return self diff --git a/src/nnsight/intervention/contexts/local.py b/src/nnsight/intervention/contexts/local.py new file mode 100755 index 00000000..7c66dab0 --- /dev/null +++ b/src/nnsight/intervention/contexts/local.py @@ -0,0 +1,115 @@ +from typing import Callable, List, Optional + +from nnsight.tracing.graph.node import Node + +from ...tracing.contexts import Tracer +from ...tracing.graph import GraphType, NodeType +from ..protocols import EntryPoint, NoopProtocol + + +class LocalContext(Tracer): + + send: Optional[Callable] = None + + @classmethod + def set(cls, fn: Callable): + + cls.send = fn + + @classmethod + def execute(cls, node: NodeType): + + super().execute(node) + + uploads = node.kwargs.get("uploads", []) + + if uploads: + + values = {index: node.graph.nodes[index].value for index in uploads} + + cls.send(values) + + for index in uploads: + + node = node.graph.nodes[index] + + node.remaining_listeners -= 1 + + if node.redundant: + node.destroy() + + +class RemoteContext(Tracer): + + send: Optional[Callable] = None + receive: Optional[Callable] = None + + @classmethod + def set(cls, send: Callable, receive: Callable): + + cls.send = send + cls.receive = receive + + @classmethod + def from_local(cls, local_node: NodeType): + + local_node.target = RemoteContext + + graph: GraphType = local_node.args[0] + + start = graph[0].index + end = graph[-1].index + + uploads = [] + + # TODO check for swap and error + + for node in graph.nodes[start : end + 1]: + + for dependency in node.dependencies: + + if ( + isinstance(dependency.target, type) + and issubclass(dependency.target, EntryPoint) + ) or dependency.index < start: + + local_node.args.append(dependency) + + if isinstance(node.target, type) and issubclass(node.target, EntryPoint): + continue + + node.args.clear() + node.kwargs.clear() + + node.target = NoopProtocol + + for listener in node.listeners: + + if listener.index > end: + + uploads.append(node.index) + + if len(uploads) > 0: + local_node.kwargs["upload"] = True + + return uploads + + @classmethod + def execute(cls, node: NodeType): + + graph, *dependencies = node.args + + dependencies = { + dependency.index: dependency.value for dependency in dependencies + } + + cls.send((node.index, dependencies)) + + super().execute(node) + + if node.kwargs.get("upload", False): + + values = cls.receive() + + for index, value in values.items(): + graph.nodes[index]._value = value diff --git a/src/nnsight/intervention/contexts/session.py b/src/nnsight/intervention/contexts/session.py new file mode 100755 index 00000000..78364e77 --- /dev/null +++ b/src/nnsight/intervention/contexts/session.py @@ -0,0 +1,37 @@ +from typing import TYPE_CHECKING, Optional + +from typing_extensions import Self + +from ..graph import (InterventionNode, InterventionProxy, + ValidatingInterventionNode) +from . import InterventionTracer + +if TYPE_CHECKING: + from .. import NNsight + + +class Session(InterventionTracer[InterventionNode, InterventionProxy]): + """A Session simply allows grouping multiple Tracers in one computation graph. + """ + + def __init__(self, model: "NNsight", validate: bool = False, debug:Optional[bool] = None, **kwargs) -> None: + + super().__init__( + node_class=ValidatingInterventionNode if validate else InterventionNode, + proxy_class=model.proxy_class, + debug=debug, + **kwargs, + ) + + self.model = model + + def __enter__(self) -> Self: + + self.model._session = self + + return super().__enter__() + + + def __exit__(self, exc_type, exc_val, exc_tb) -> None: + self.model._session = None + return super().__exit__(exc_type, exc_val, exc_tb) diff --git a/src/nnsight/intervention/contexts/tracer.py b/src/nnsight/intervention/contexts/tracer.py new file mode 100755 index 00000000..850b0d52 --- /dev/null +++ b/src/nnsight/intervention/contexts/tracer.py @@ -0,0 +1,61 @@ +import inspect +from functools import wraps +from typing import Any, Callable, Dict, Optional, TypeVar, Union + +from ...tracing.contexts import Tracer +from ..graph import (InterventionNodeType, InterventionProxy, + InterventionProxyType) +from . import LocalContext +from ... import CONFIG + +class InterventionTracer(Tracer[InterventionNodeType, InterventionProxyType]): + """Extension of base Tracer to add additional intervention functionality and type hinting for intervention proxies. + """ + + R = TypeVar("R") + + def __init__(self, *args, **kwargs) -> None: + if kwargs['debug'] == None: + kwargs['debug'] = CONFIG.APP.DEBUG + + super().__init__(*args, **kwargs) + + def apply( + self, target: Callable[..., R], *args, **kwargs + ) -> Union[InterventionProxy, R]: + return super().apply(target, *args, **kwargs) + + def local(self, fn: Optional[Callable] = None) -> Union[LocalContext, Callable]: + + if fn is None: + + return LocalContext(parent=self.graph) + + elif inspect.isroutine(fn): + + @wraps(fn) + def inner(*args, **kwargs): + + with LocalContext(parent=self.graph) as context: + + return context.apply(fn, *args, **kwargs) + + else: + + # TODO: error + pass + + @classmethod + def style(cls) -> Dict[str, Any]: + """Visualization style for this protocol node. + + Returns: + - Dict: dictionary style. + """ + + default_style = super().style() + + default_style["node"] = {"color": "purple", "shape": "polygon", "sides": 6} + default_style["arg_kname"][1] = "method" + + return default_style diff --git a/src/nnsight/envoy.py b/src/nnsight/intervention/envoy.py similarity index 59% rename from src/nnsight/envoy.py rename to src/nnsight/intervention/envoy.py index e072aabe..9f7d8d53 100755 --- a/src/nnsight/envoy.py +++ b/src/nnsight/intervention/envoy.py @@ -1,57 +1,318 @@ from __future__ import annotations import inspect +import re import warnings -from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union +from contextlib import AbstractContextManager +from typing import (TYPE_CHECKING, Any, Callable, Dict, Generic, Iterator, + List, Optional, Tuple, Union) import torch +from typing_extensions import Self -from .contexts.backends import EditBackend -from .contexts.Tracer import Tracer -from .intervention import InterventionProtocol, InterventionProxy -from .tracing import protocols +from . import protocols +from .backends import EditingBackend +from .contexts import InterventionTracer +from .graph import InterventionNodeType, InterventionProxyType -class Envoy: - """Envoy object act as proxies for torch modules within a model's module tree in order to add nnsight functionality. +class Envoy(Generic[InterventionProxyType, InterventionNodeType]): + """Envoy objects act as proxies for torch modules themselves within a model's module tree in order to add nnsight functionality. Proxies of the underlying module's output and input are accessed by `.output` and `.input` respectively. Attributes: - path (str): String representing the attribute path of this Envoy's module relative the the root model. Separated by '.' e.x ('transformer.h.0.mlp'). Set by NNsight on initialization of meta model. + path (str): String representing the attribute path of this Envoy's module relative the the root model. Separated by '.' e.x ('.transformer.h.0.mlp'). + output (nnsight.intervention.InterventionProxy): Proxy object representing the output of this Envoy's module. Reset on forward pass. + inputs (nnsight.intervention.InterventionProxy): Proxy object representing the inputs of this Envoy's module. Proxy is in the form of (Tuple[Tuple[], Dict[str, ]])Reset on forward pass. + input (nnsight.intervention.InterventionProxy): Alias for the first positional Proxy input i.e Envoy.inputs[0][0] + iter (nnsight.envoy.EnvoyIterator): Iterator object allowing selection of specific .input and .output iterations of this Envoy. + _module (torch.nn.Module): Underlying torch module. + _children (List[Envoy]): Immediate Envoy children of this Envoy. _fake_outputs (List[torch.Tensor]): List of 'meta' tensors built from the outputs most recent _scan. Is list as there can be multiple shapes for a module called more than once. _fake_inputs (List[torch.Tensor]): List of 'meta' tensors built from the inputs most recent _scan. Is list as there can be multiple shapes for a module called more than once. - output (nnsight.intervention.InterventionProxy): Proxy object representing the output of this Envoy's module. Reset on forward pass. - input (nnsight.intervention.InterventionProxy): Proxy object representing the input of this Envoy's module. Reset on forward pass. - _call_iter (int): Integer representing the current iteration of this Envoy's module's inputs/outputs. + _rename (Optional[Dict[str,str]]): Optional mapping of (old name -> new name). + For example to rename all gpt 'attn' modules to 'attention' you would: rename={r"attn": "attention"} + Not this does not actually change the underlying module names, just how you access its envoy. Renaming will replace Envoy.path but Envoy._path represents the pre-renamed true attribute path. _tracer (nnsight.context.Tracer.Tracer): Object which adds this Envoy's module's output and input proxies to an intervention graph. Must be set on Envoys objects manually by the Tracer. """ - def __init__(self, module: torch.nn.Module, module_path: str = ""): + def __init__(self, module: torch.nn.Module, module_path: str = "", alias_path:Optional[str] = None, rename: Optional[Dict[str,str]] = None): + + self.path = alias_path or module_path + self._path = module_path + + self._module = module + + self._rename = rename - self.path = module_path + self._iteration_stack = [0] self._fake_outputs: List[torch.Tensor] = [] self._fake_inputs: List[torch.Tensor] = [] - self._output: Optional[InterventionProxy] = None - self._input: Optional[InterventionProxy] = None - - self._call_iter = 0 + self._output_stack: List[Optional[InterventionProxyType]] = [None] + self._input_stack: List[Optional[InterventionProxyType]] = [None] - self._tracer: Tracer = None + self._tracer: InterventionTracer = None - self._module = module - self._sub_envoys: List[Envoy] = [] + self._children: List[Envoy] = [] # Register hook on underlying module to update the _fake_outputs and _fake_inputs on forward pass. self._hook_handle = self._module.register_forward_hook( self._hook, with_kwargs=True ) + # Recurse into PyTorch module tree. for name, module in self._module.named_children(): setattr(self, name, module) + # Public API ################ + + def __call__( + self, *args: List[Any], hook=False, **kwargs: Dict[str, Any] + ) -> InterventionProxyType: + """Creates a proxy to call the underlying module's forward method with some inputs. + + Returns: + InterventionProxy: Module call proxy. + """ + + if not self._tracing() or self._scanning(): + return self._module(*args, **kwargs) + + if isinstance(self._tracer.backend, EditingBackend): + hook = True + + return protocols.ApplyModuleProtocol.add( + self._tracer.graph, self.path, *args, hook=hook, **kwargs + ) + + @property + def output(self) -> InterventionProxyType: + """ + Calling denotes the user wishes to get the output of the underlying module and therefore we create a Proxy of that request. + Only generates a proxy the first time it is references otherwise return the already set one. + + Returns: + InterventionProxy: Output proxy. + """ + output = self._output_stack.pop() + + if output is None: + + if isinstance(self._module, torch.nn.ModuleList): + + output = [envoy.output for envoy in self._children] + + return output + else: + + iteration = self._iteration_stack[-1] + + if len(self._fake_outputs) == 0: + fake_output = inspect._empty + elif iteration >= len(self._fake_outputs): + # TODO warning? + fake_output = self._fake_outputs[-1] + else: + fake_output = self._fake_outputs[iteration] + + module_path = f"{self._path}.output" + + output = protocols.InterventionProtocol.add( + self._tracer.graph, + module_path, + self._tracer._invoker_group, + iteration, + fake_value=fake_output, + ) + + self._output_stack.append(output) + + return output + + @output.setter + def output(self, value: Union[InterventionProxyType, Any]) -> None: + """ + Calling denotes the user wishes to set the output of the underlying module and therefore we create a Proxy of that request. + + Args: + value (Union[InterventionProxy, Any]): Value to set output to. + """ + + protocols.SwapProtocol.add(self.output.node.graph, self.output.node, value) + + self._output_stack[-1] = None + + @property + def inputs(self) -> InterventionProxyType: + """ + Calling denotes the user wishes to get the input of the underlying module and therefore we create a Proxy of that request. + Only generates a proxy the first time it is references otherwise return the already set one. + + Returns: + InterventionProxy: Input proxy. + """ + + input = self._input_stack.pop() + + if input is None: + + if isinstance(self._module, torch.nn.ModuleList): + + input = [envoy.input for envoy in self._children] + + return input + else: + + iteration = self._iteration_stack[-1] + + if len(self._fake_inputs) == 0: + fake_input = inspect._empty + elif iteration >= len(self._fake_inputs): + # TODO warning? + fake_input = self._fake_inputs[-1] + else: + fake_input = self._fake_inputs[iteration] + + module_path = f"{self._path}.input" + + input = protocols.InterventionProtocol.add( + self._tracer.graph, + module_path, + self._tracer._invoker_group, + iteration, + fake_value=fake_input, + ) + + self._input_stack.append(input) + + return input + + @inputs.setter + def inputs(self, value: Union[InterventionProxyType, Any]) -> None: + """ + Calling denotes the user wishes to set the input of the underlying module and therefore we create a Proxy of that request. + + Args: + value (Union[InterventionProxy, Any]): Value to set input to. + """ + + protocols.SwapProtocol.add(self.inputs.node.graph, self.inputs.node, value) + + self._input_stack[-1] = None + + @property + def input(self) -> InterventionProxyType: + """Getting the first positional argument input of the model's module. + + Returns: + InterventionProxy: Input proxy. + """ + + return self.inputs[0][0] + + @input.setter + def input(self, value: Union[InterventionProxyType, Any]) -> None: + """Setting the value of the input's first positional argument in the model's module. + + Args; + value (Union[InterventionProxy, Any]): Value to set the input to. + """ + + self.inputs = ((value,) + self.inputs[0][1:],) + (self.inputs[1:]) + + @property + def iter(self) -> IterationEnvoy: + + return IterationEnvoy(self) + + @iter.setter + def iter(self, iteration: Union[int, List[int], slice]) -> None: + self._iteration_stack.append(iteration) + + def next(self, increment: int = 1) -> Envoy: + """By default, this modules inputs and outputs only refer to the first time its called. Use `.next()`to select which iteration .input an .output refer to. + + Args: + increment (int, optional): How many iterations to jump. Defaults to 1. + + Returns: + Envoy: Self. + """ + + return self.iter[self._iteration_stack[-1] + increment].__enter__() + + def all(self, propagate: bool = True) -> Envoy: + """By default, this modules inputs and outputs only refer to the first time its called. Use `.all()`to have .input and .output refer to all iterations. + + Returns: + Envoy: Self. + """ + + return self.iter[:].__enter__() + + def to(self, *args, **kwargs) -> Envoy: + """Override torch.nn.Module.to so this returns the Envoy, not the underlying module when doing: model = model.to(...) + + Returns: + Envoy: Envoy. + """ + + self._module = self._module.to(*args, **kwargs) + + return self + + def modules( + self, + include_fn: Callable[[Envoy], bool] = None, + names: bool = False, + envoys: List = None, + ) -> List[Envoy]: + """Returns all Envoys in the Envoy tree. + + Args: + include_fn (Callable, optional): Optional function to be ran against all Envoys to check if they should be included in the final collection of Envoys. Defaults to None. + names (bool, optional): If to include the name/module_path of returned Envoys along with the Envoy itself. Defaults to False. + + Returns: + List[Envoy]: Included Envoys + """ + + if envoys is None: + envoys = list() + + included = True + + if include_fn is not None: + included = include_fn(self) + + if included: + if names: + envoys.append((self.path, self)) + else: + envoys.append(self) + + for sub_envoy in self._children: + sub_envoy.modules(include_fn=include_fn, names=names, envoys=envoys) + + return envoys + + def named_modules(self, *args, **kwargs) -> List[Tuple[str, Envoy]]: + """Returns all Envoys in the Envoy tree along with their name/module_path. + + Args: + include_fn (Callable, optional): Optional function to be ran against all Envoys to check if they should be included in the final collection of Envoys. Defaults to None. + + Returns: + List[Tuple[str, Envoy]]: Included Envoys and their names/module_paths. + """ + + return self.modules(*args, **kwargs, names=True) + + # Private API ############################### + def _update(self, module: torch.nn.Module) -> None: """Updates the ._model attribute using a new model of the same architecture. Used when loading the real weights (dispatching) and need to replace the underlying modules. @@ -67,7 +328,7 @@ def _update(self, module: torch.nn.Module) -> None: for i, module in enumerate(self._module.children()): - self._sub_envoys[i]._update(module) + self._children[i]._update(module) def _add_envoy(self, module: torch.nn.Module, name: str) -> None: """Adds a new Envoy for a given torch module under this Envoy. @@ -76,10 +337,26 @@ def _add_envoy(self, module: torch.nn.Module, name: str) -> None: module (torch.nn.Module): Module to create Envoy for. name (str): name of envoy/attribute. """ + + alias_path = None + + module_path = f"{self.path}.{name}" + + if self._rename is not None: + + for key, value in self._rename.items(): + + if name == key: + + name = value + + alias_path = f"{self.path}.{name}" + + break - envoy = Envoy(module, module_path=f"{self.path}.{name}") + envoy = Envoy(module, module_path=module_path, alias_path=alias_path, rename=self._rename) - self._sub_envoys.append(envoy) + self._children.append(envoy) # If the module already has a sub-module named 'input' or 'output', # mount the proxy access to 'nns_input' or 'nns_output instead. @@ -129,7 +406,7 @@ def _handle_overloaded_mount(self, envoy: Envoy, mount_point: str) -> None: # Update the class on the instance self.__class__ = new_cls - def _set_tracer(self, tracer: Tracer, propagate=True): + def _set_tracer(self, tracer: InterventionTracer, propagate=True): """Set tracer object on Envoy. Args: @@ -140,10 +417,9 @@ def _set_tracer(self, tracer: Tracer, propagate=True): self._tracer = tracer if propagate: - for envoy in self._sub_envoys: + for envoy in self._children: envoy._set_tracer(tracer, propagate=True) - - + def _tracing(self) -> bool: """Whether or not tracing. @@ -174,6 +450,23 @@ def _scanning(self) -> bool: return False + def _set_iteration( + self, iteration: Optional[int] = None, propagate: bool = True + ) -> None: + + if iteration is not None: + self._iteration_stack.append(iteration) + self._output_stack.append(None) + self._input_stack.append(None) + else: + self._iteration_stack.pop() + self._output_stack.pop() + self._input_stack.pop() + + if propagate: + for envoy in self._children: + envoy._set_iteration(iteration, propagate=True) + def _reset_proxies(self, propagate: bool = True) -> None: """Sets proxies to None. @@ -181,11 +474,11 @@ def _reset_proxies(self, propagate: bool = True) -> None: propagate (bool, optional): If to propagate to all sub-modules. Defaults to True. """ - self._output: InterventionProxy = None - self._input: InterventionProxy = None + self._output_stack = [None] + self._input_stack = [None] if propagate: - for envoy in self._sub_envoys: + for envoy in self._children: envoy._reset_proxies(propagate=True) def _reset(self, propagate: bool = True) -> None: @@ -197,10 +490,10 @@ def _reset(self, propagate: bool = True) -> None: self._reset_proxies(propagate=False) - self._call_iter = 0 + self._iteration_stack = [0] if propagate: - for envoy in self._sub_envoys: + for envoy in self._children: envoy._reset(propagate=True) def _clear(self, propagate: bool = True) -> None: @@ -216,92 +509,27 @@ def _clear(self, propagate: bool = True) -> None: self._fake_inputs = [] if propagate: - for envoy in self._sub_envoys: + for envoy in self._children: envoy._clear(propagate=True) def _hook( - self, module: torch.nn.Module, input: Any, input_kwargs: Dict, output: Any + self, + module: torch.nn.Module, + input: Any, + input_kwargs: Dict, + output: Any, ): if self._scanning(): - self._reset_proxies(propagate=False) - input = (input, input_kwargs) self._fake_outputs.append(output) self._fake_inputs.append(input) - def next(self, increment: int = 1, propagate: bool = False) -> Envoy: - - self._call_iter += increment - - self._reset_proxies(propagate=False) - - if propagate: - for envoy in self._sub_envoys: - envoy.next(increment=increment, propagate=True) - - return self - - def to(self, *args, **kwargs) -> Envoy: - """Override torch.nn.Module.to so this returns the Envoy, not the underlying module when doing: model = model.to(...) - - Returns: - Envoy: Envoy. - """ - - self._module = self._module.to(*args, **kwargs) - - return self - - def modules( - self, include_fn: Callable[[Envoy], bool] = None, names: bool = False, envoys: List = None - ) -> List[Envoy]: - """Returns all Envoys in the Envoy tree. - - Args: - include_fn (Callable, optional): Optional function to be ran against all Envoys to check if they should be included in the final collection of Envoys. Defaults to None. - names (bool, optional): If to include the name/module_path of returned Envoys along with the Envoy itself. Defaults to False. - - Returns: - List[Envoy]: Included Envoys - """ - - if envoys is None: - envoys = list() - - included = True - - if include_fn is not None: - included = include_fn(self) - - if included: - if names: - envoys.append((self.path, self)) - else: - envoys.append(self) - - for sub_envoy in self._sub_envoys: - sub_envoy.modules(include_fn=include_fn, names=names, envoys=envoys) - - return envoys - - def named_modules(self, *args, **kwargs) -> List[Tuple[str, Envoy]]: - """Returns all Envoys in the Envoy tree along with their name/module_path. - - Args: - include_fn (Callable, optional): Optional function to be ran against all Envoys to check if they should be included in the final collection of Envoys. Defaults to None. - - Returns: - List[Tuple[str, Envoy]]: Included Envoys and their names/module_paths. - """ - - return self.modules(*args, **kwargs, names=True) - def _repr_module_list(self): - list_of_reprs = [repr(item) for item in self._sub_envoys] + list_of_reprs = [repr(item) for item in self._children] if len(list_of_reprs) == 0: return self._module._get_name() + "()" @@ -373,16 +601,16 @@ def __repr__(self) -> str: return main_str - def __iter__(self) -> Iterator[Envoy]: + def __iter__(self) -> Iterator[Envoy[InterventionProxyType, InterventionNodeType]]: """Wrapper method for underlying ModuleList iterator. Returns: Iterator[Envoy]: Iterator. """ - return iter(self._sub_envoys) + return iter(self._children) - def __getitem__(self, key: int) -> Envoy: + def __getitem__(self, key: int) -> Envoy[InterventionProxyType, InterventionNodeType]: """Wrapper method for underlying ModuleList getitem. Args: @@ -392,7 +620,7 @@ def __getitem__(self, key: int) -> Envoy: Envoy: Envoy. """ - return self._sub_envoys[key] + return self._children[key] def __len__(self) -> int: """Wrapper method for underlying ModuleList len. @@ -403,7 +631,7 @@ def __len__(self) -> int: return len(self._module) - def __getattr__(self, key: str) -> Union[Envoy, Any]: + def __getattr__(self, key: str) -> Envoy[InterventionProxyType, InterventionNodeType]: """Wrapper method for underlying module's attributes. Args: @@ -428,145 +656,70 @@ def __setattr__(self, key: Any, value: Any) -> None: super().__setattr__(key, value) - def __call__( - self, *args: List[Any], hook=False, **kwargs: Dict[str, Any] - ) -> InterventionProxy: - """Creates a proxy to call the underlying module's forward method with some inputs. - Returns: - InterventionProxy: Module call proxy. - """ - - if not self._tracing(): - return self._module(*args, **kwargs) +class IterationEnvoy(Envoy, AbstractContextManager): - if isinstance(self._tracer.backend, EditBackend): - hook = True + def __init__(self, envoy: Envoy) -> None: - return protocols.ApplyModuleProtocol.add( - self._tracer.graph, self.path, *args, hook=hook, **kwargs - ) + self.__dict__.update(envoy.__dict__) - @property - def output(self) -> InterventionProxy: - """ - Calling denotes the user wishes to get the output of the underlying module and therefore we create a Proxy of that request. - Only generates a proxy the first time it is references otherwise return the already set one. + self._iteration = self._iteration_stack[-1] - Returns: - InterventionProxy: Output proxy. - """ - if self._output is None: - - if isinstance(self._module, torch.nn.ModuleList): - - self._output = [envoy.output for envoy in self._sub_envoys] + self._open_context = False - return self._output + @property + def output(self) -> InterventionProxyType: - if len(self._fake_outputs) == 0: - fake_output = inspect._empty - elif self._call_iter >= len(self._fake_outputs): - # TODO warning? - fake_output = self._fake_outputs[-1] - else: - fake_output = self._fake_outputs[self._call_iter] + self._output_stack.append(None) + self._iteration_stack.append(self._iteration) - module_path = f"{self.path}.output" + output = super().output - self._output = InterventionProtocol.add( - self._tracer.graph, - fake_output, - args=[ - module_path, - len(self._tracer._invoker_inputs) - 1, - self._call_iter, - ], - ) + self._output_stack.pop() + self._iteration_stack.pop() - return self._output + return output - @output.setter - def output(self, value: Union[InterventionProxy, Any]) -> None: - """ - Calling denotes the user wishes to set the output of the underlying module and therefore we create a Proxy of that request. + @property + def input(self) -> InterventionProxyType: - Args: - value (Union[InterventionProxy, Any]): Value to set output to. - """ + self._input_stack.append(None) + self._iteration_stack.append(self._iteration) - protocols.SwapProtocol.add(self.output.node, value) + input = super().input - self._output = None + self._input_stack.pop() + self._iteration_stack.pop() - @property - def inputs(self) -> InterventionProxy: - """ - Calling denotes the user wishes to get the input of the underlying module and therefore we create a Proxy of that request. - Only generates a proxy the first time it is references otherwise return the already set one. + return input - Returns: - InterventionProxy: Input proxy. - """ - if self._input is None: + def __getitem__(self, key: Union[int, List[int], slice]) -> Self: - if isinstance(self._module, torch.nn.ModuleList): + # TODO: Error if not valid key type - self._input = [envoy.input for envoy in self._sub_envoys] + if isinstance(key, tuple): - return self._input + key = list(key) - if len(self._fake_inputs) == 0: - fake_input = inspect._empty - elif self._call_iter >= len(self._fake_inputs): - # TODO warning? - fake_input = self._fake_inputs[-1] - else: - fake_input = self._fake_inputs[self._call_iter] + self._iteration = key - module_path = f"{self.path}.input" + return self - self._input = InterventionProtocol.add( - self._tracer.graph, - fake_input, - args=[ - module_path, - len(self._tracer._invoker_inputs) - 1, - self._call_iter, - ], - ) + def __enter__(self) -> IterationEnvoy: - return self._input + if not self._open_context: - @inputs.setter - def inputs(self, value: Union[InterventionProxy, Any]) -> None: - """ - Calling denotes the user wishes to set the input of the underlying module and therefore we create a Proxy of that request. + self._set_iteration(self._iteration) - Args: - value (Union[InterventionProxy, Any]): Value to set input to. - """ + self._open_context = True - protocols.SwapProtocol.add(self.inputs.node, value) + return self - self._input = None + def __exit__(self, exc_type, exc_val, exc_tb) -> None: - @property - def input(self) -> InterventionProxy: - """Getting the first positional argument input of the model's module. + self._set_iteration() - Returns: - InterventionProxy: Input proxy. - """ + self._open_context = False - return self.inputs[0][0] - - @input.setter - def input(self, value: Union[InterventionProxy, Any]) -> None: - """Setting the value of the input's first positionl argument in the model's module. - - Args; - value (Union[InterventionProxy, Any]): Value to set the input to. - """ - - self.inputs = ((value,) + self.inputs[0][1:],) + (self.inputs[1:]) \ No newline at end of file + if isinstance(exc_val, BaseException): + raise exc_val diff --git a/src/nnsight/intervention/graph/__init__.py b/src/nnsight/intervention/graph/__init__.py new file mode 100755 index 00000000..f77893ce --- /dev/null +++ b/src/nnsight/intervention/graph/__init__.py @@ -0,0 +1,3 @@ +from .proxy import InterventionProxy, InterventionProxyType +from .node import InterventionNode, ValidatingInterventionNode, InterventionNodeType +from .graph import InterventionGraph diff --git a/src/nnsight/intervention/graph/graph.py b/src/nnsight/intervention/graph/graph.py new file mode 100755 index 00000000..a09fb713 --- /dev/null +++ b/src/nnsight/intervention/graph/graph.py @@ -0,0 +1,433 @@ +import copy +import sys +from collections import defaultdict +from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, Union + +from typing_extensions import Self + +from ...tracing.contexts import Context +from ...tracing.graph import SubGraph +from ...util import NNsightError +from ..protocols import ApplyModuleProtocol, GradProtocol, InterventionProtocol +from . import InterventionNode, InterventionNodeType, InterventionProxyType + +if TYPE_CHECKING: + from .. import NNsight + from ...tracing.graph.graph import GraphType, NodeType + + +class InterventionGraph(SubGraph[InterventionNode, InterventionProxyType]): + """The `InterventionGraph` is the special `SubGraph` type that handles the complex intervention operations a user wants to make during interleaving. + We need to `.compile()` it before execution to determine how to execute interventions appropriately. + + Attributes: + model (NNsight): NNsight model. + interventions + grad_subgraph + compiled + call_counter + deferred + """ + + def __init__( + self, + *args, + model: Optional["NNsight"] = None, + **kwargs, + ) -> None: + + super().__init__(*args, **kwargs) + + self.model = model + + self.interventions: Dict[str, List[InterventionNode]] = defaultdict(list) + self.grad_subgraph: Set[int] = set() + + self.compiled = False + self.call_counter: Dict[int, int] = defaultdict(int) + self.deferred: Dict[int, List[int]] = defaultdict(list) + + def __getstate__(self) -> Dict: + + return { + "subset": self.subset, + "nodes": self.nodes, + "interventions": self.interventions, + "compiled": self.compiled, + "call_counter": self.call_counter, + "deferred": self.deferred, + "grad_subgraph": self.grad_subgraph, + "defer_stack": self.defer_stack, + } + + def __setstate__(self, state: Dict) -> None: + + self.__dict__.update(state) + + def reset(self) -> None: + self.call_counter = defaultdict(int) + return super().reset() + + def set(self, model: "NNsight"): + + self.model = model + + def context_dependency( + self, + context_node: InterventionNode, + intervention_subgraphs: List[SubGraph], + ) -> None: + + context_graph: SubGraph = context_node.args[0] + + start = context_graph.subset[0] + end = context_graph.subset[-1] + + for intervention_subgraph in intervention_subgraphs: + + # continue if the subgraph does not overlap with the context's graph + if intervention_subgraph.subset[-1] < start or end < intervention_subgraph.subset[0]: + continue + + for intervention_index in intervention_subgraph.subset: + + # if there's an overlapping node, make the context depend on the intervention node in the subgraph + if start <= intervention_index and intervention_index <= end: + + # the first node in the subgraph is an InterventionProtocol node + intervention_node = intervention_subgraph[0] + + context_node._dependencies.add(intervention_node.index) + intervention_node._listeners.add(context_node.index) + # TODO: maybe we don't need this + intervention_subgraph.subset.append(context_node.index) + + break + + def compile(self) -> Optional[Dict[str, List[InterventionNode]]]: + + if self.compiled: + return self.interventions + + if len(self) == 0: + self.compiled = True + return + + intervention_subgraphs: List[SubGraph] = [] + + start = self[0].index + # is the first node corresponding to an executable graph? + # occurs when a Conditional or Iterator context is explicitly entered by a user + if isinstance(self[0].target, type) and issubclass( + self[0].target, Context + ): + graph = self[0].args[0] + + # handle emtpy if statments or for loops + if len(graph) > 0: + start = graph[0].index + + end = self[-1].index + 1 + + context_start: int = None + defer_start: int = None + context_node: InterventionNode = None + + # looping over all the nodes created within this graph's context + for index in range(start, end): + + node: InterventionNodeType = self.nodes[index] + + # is this node part of an inner context's subgraph? + if context_node is None and node.graph is not self: + + context_node = self.nodes[node.graph[-1].index + 1] + + context_start = self.subset.index(context_node.index) + + defer_start = node.index + + self.context_dependency(context_node, intervention_subgraphs) + + if node.target is InterventionProtocol: + + # build intervention subgraph + subgraph = SubGraph(self, subset=sorted(list(node.subgraph()))) + + module_path, *_ = node.args + + self.interventions[module_path].append(node) + + intervention_subgraphs.append(subgraph) + + # if the InterventionProtocol is defined within a sub-context + if context_node is not None: + + # make the current context node dependent on this intervention node + context_node._dependencies.add(node.index) + node._listeners.add(context_node.index) + # TODO: maybe we don't need this + self.subset.append(node.index) + + graph: SubGraph = node.graph + + graph.subset.remove(node.index) + + node.kwargs["start"] = context_start + node.kwargs["defer_start"] = defer_start + + node.graph = self + + else: + + node.kwargs["start"] = self.subset.index(subgraph.subset[0]) + node.kwargs["defer_start"] = node.kwargs["start"] + + elif node.target is GradProtocol: + + subgraph = SubGraph(self, subset=sorted(list(node.subgraph()))) + + intervention_subgraphs.append(subgraph) + + self.grad_subgraph.update(subgraph.subset[1:]) + + if context_node is not None: + + context_node._dependencies.add(node.index) + node._listeners.add(context_node.index) + subgraph.subset.append(context_node.index) + + graph: SubGraph = node.graph + + graph.subset.remove(node.index) + + node.kwargs["start"] = context_start + + node.graph = self + + else: + + node.kwargs["start"] = self.subset.index(subgraph.subset[1]) + + elif node.target is ApplyModuleProtocol: + + node.graph = self + + elif context_node is not None and context_node is node: + context_node = None + + self.compiled = True + + def execute( + self, + start: int = 0, + grad: bool = False, + defer: bool = False, + defer_start: int = 0, + ) -> None: + + err: Tuple[int, NNsightError] = None + + if defer_start in self.deferred: + + for index in self.deferred[defer_start]: + + self.nodes[index].reset() + + del self.deferred[defer_start] + + if defer: + + self.defer_stack.append(defer_start) + + for node in self[start:]: + + if node.executed: + continue + elif ( + node.index != self[start].index and node.target is InterventionProtocol + ): + break + elif node.fulfilled: + try: + node.execute() + if defer and node.target is not InterventionProtocol: + self.deferred[defer_start].append(node.index) + except NNsightError as e: + err = (node.index, e) + break + elif not grad and node.index in self.grad_subgraph: + continue + else: + break + + if defer: + self.defer_stack.pop() + + if err is not None: + defer_stack = self.defer_stack + self.defer_stack = [] + self.clean(err[0]) + self.defer_stack = defer_stack + raise err[1] + + def count(self, index: int, iteration: Union[int, List[int], slice]) -> bool: + """Increments the count of times a given Intervention Node has tried to be executed and returns if the Node is ready and if it needs to be deferred. + + Args: + index (int): Index of intervention node to return count for. + iteration (Union[int, List[int], slice]): What iteration(s) this Node should be executed for. + + Returns: + bool: If this Node should be executed on this iteration. + bool: If this Node and recursive listeners should have updating their remaining listeners (and therefore their destruction) deferred. + """ + + ready = False + defer = False + + count = self.call_counter[index] + + if isinstance(iteration, int): + ready = count == iteration + elif isinstance(iteration, list): + iteration.sort() + + ready = count in iteration + defer = count != iteration[-1] + + elif isinstance(iteration, slice): + + start = iteration.start or 0 + stop = iteration.stop + + ready = count >= start and (stop is None or count < stop) + + defer = stop is None or count < stop - 1 + + # if defer: + # self.deferred.add(index) + # else: + # self.deferred.discard(index) + + self.call_counter[index] += 1 + + return ready, defer + + def clean(self, start: Optional[int] = None): + + if start is None: + start = self[0].index + + end = self[-1].index + 1 + + # Loop over ALL nodes within the span of this graph. + for index in range(start, end): + + node = self.nodes[index] + + if node.executed: + break + + node.update_dependencies() + + def cleanup(self) -> None: + """Because some modules may be executed more than once, and to accommodate memory management just like a loop, + intervention graph sections defer updating the remaining listeners of Nodes if this is not the last time this section will be executed. + If we never knew it was the last time, there may still be deferred sections after execution. + These will be leftover in graph.deferred, and therefore we need to update their dependencies. + """ + + # For every intervention graph section (indicated by where it started) + for start in self.deferred: + + # Loop through all nodes that got their dependencies deferred. + for index in range(start, self.deferred[start][-1] + 1): + + node = self.nodes[index] + + # Update each of its dependencies + for dependency in node.dependencies: + # Only if it was before start + # (not within this section, but before) + if dependency.index < start: + dependency.remaining_listeners -= 1 + + if dependency.redundant: + dependency.destroy() + + def copy( + self, + new_graph: Self = None, + parent: Optional["GraphType"] = None, + memo: Optional[Dict[int, "NodeType"]] = None, + ) -> Self: + + if memo is None: + memo = {} + + new_graph = super().copy(new_graph, parent=parent, memo=memo) + + new_graph.compiled = self.compiled + + for key, value in self.call_counter.items(): + new_graph.call_counter[memo[key]] = value + + if new_graph.compiled: + + for module_path, list_of_nodes in self.interventions.items(): + + new_graph.interventions[module_path] = [ + new_graph.nodes[memo[node.index]] for node in list_of_nodes + ] + + for key, values in self.deferred.items(): + + new_graph.deferred[memo[key]] = [memo[index] for index in values] + + new_graph.grad_subgraph = [memo[index] for index in self.grad_subgraph] + + return new_graph + + # @classmethod + # def shift(cls, mgraph: MultiGraph) -> MultiGraph: + + # InterventionProtocol.compile(mgraph) + + # intervention_subgraphs = InterventionProtocol.get_interventions(mgraph).values() + + # graph_id_to_invoker_groups = defaultdict(set) + # graph_id_to_intervention_node = defaultdict(list) + + # for subgraph in intervention_subgraphs: + # for (start, end) in subgraph: + + # node = mgraph[start] + + # invoker_group = node.args[1] + + # offset = 0 + + # for graph in mgraph.id_to_graphs.values(): + # offset += len(graph) + # if start < offset: + # graph_id_to_invoker_groups[graph.id].add(invoker_group) + # graph_id_to_intervention_node[graph.id].append(node) + # break + + # global_offset = 0 + + # for graph_id, invoker_groups in graph_id_to_invoker_groups.items(): + + # min_group = min(invoker_groups) + # max_group = max(invoker_groups) + + # offset = global_offset - min_group + + # for node in graph_id_to_intervention_node[graph_id]: + + # node.args[1] += offset + + # global_offset += max_group + 1 + + # return mgraph diff --git a/src/nnsight/intervention/graph/node.py b/src/nnsight/intervention/graph/node.py new file mode 100755 index 00000000..89808420 --- /dev/null +++ b/src/nnsight/intervention/graph/node.py @@ -0,0 +1,159 @@ +from __future__ import annotations + +import inspect +from typing import TYPE_CHECKING, Any, Callable, Optional, TypeVar, Union + +import torch +from torch._subclasses.fake_tensor import FakeCopyMode, FakeTensorMode +from torch.fx.experimental.symbolic_shapes import ShapeEnv + +from ... import util +from ...tracing.contexts import GlobalTracingContext +from ...tracing.graph import Node, Proxy +from ...tracing.protocols import Protocol +from ..protocols import EntryPoint + +if TYPE_CHECKING: + from . import InterventionGraph + + +class InterventionNode(Node): + """This is the intervention extension of the base Node type. + + It has a fake_value to see information about this Node's future value before execution. + It adds additional functionality to Node.prepare_inputs to handle Tensors. + """ + + def __init__( + self, *args, fake_value: Optional[Any] = inspect._empty, **kwargs + ) -> None: + super().__init__(*args, **kwargs) + + self.fake_value = fake_value + + @classmethod + def prepare_inputs( + cls, + inputs: Any, + device: Optional[torch.device] = None, + fake: bool = False, + ) -> Any: + """Override prepare_inputs to make sure + + Args: + inputs (Any): _description_ + device (Optional[torch.device], optional): _description_. Defaults to None. + fake (bool, optional): _description_. Defaults to False. + + Returns: + Any: _description_ + """ + + inputs = util.apply(inputs, lambda x: x, inspect._empty) + + def inner(value: Union[InterventionNode, torch.Tensor]): + + nonlocal device + + if isinstance(value, Proxy): + value = value.node + + if isinstance(value, InterventionNode): + if fake: + value = value.fake_value + else: + value = value.value + + if device is None and isinstance(value, torch.Tensor): + device = value.device + + return value + + inputs = util.apply( + inputs, inner, (InterventionNode, Proxy, torch.Tensor), inplace=not fake + ) + + if device is not None: + + def _to(value: torch.Tensor): + return value.to(device) + + inputs = util.apply(inputs, _to, torch.Tensor, inplace=not fake) + + return inputs + + def update_dependencies(self): + for dependency in self.dependencies: + if len(self.graph.defer_stack) > 0 and ( + dependency.index < self.graph.defer_stack[-1] + or ( + EntryPoint.is_entrypoint(dependency.target) + and dependency.graph is not self.graph + ) + ): + continue + + dependency.remaining_listeners -= 1 + + if dependency.redundant: + dependency.destroy() + + +InterventionNodeType = TypeVar("InterventionNodeType", bound=InterventionNode) + + +class ValidatingInterventionNode(InterventionNode): + """The ValidatingInterventionNode executes its target using the fake_values of all of its dependencies to calculate a new fake_value for this node. + Does not do this if the Node is detached from any graph, already has a fake_value (specified by whoever created the Node) or is a Protocol. + """ + + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + + if ( + self.attached + and self.fake_value is inspect._empty + and not Protocol.is_protocol(self.target) + ): + self.fake_value = validate(self.target, *self.args, **self.kwargs) + + +@staticmethod +def backwards_check(target: Callable, *args) -> bool: + + if target is Proxy.call: + + node: Node = args[0] + + if not isinstance(node, Node): + return False + + if node.target is util.fetch_attr and node.args[1] == "backward": + return True + + return False + + +@staticmethod +def validate(target: Callable, *args, **kwargs): + + # Enter FakeMode. + with FakeTensorMode( + allow_non_fake_inputs=True, + shape_env=ShapeEnv(assume_static_by_default=True), + ) as fake_mode: + with FakeCopyMode(fake_mode): + + with GlobalTracingContext.exit_global_tracing_context(): + + if backwards_check(target, *args): + return None + + args, kwargs = InterventionNode.prepare_inputs( + (args, kwargs), fake=True + ) + + return target( + *args, + **kwargs, + ) diff --git a/src/nnsight/intervention/graph/proxy.py b/src/nnsight/intervention/graph/proxy.py new file mode 100755 index 00000000..2112aadd --- /dev/null +++ b/src/nnsight/intervention/graph/proxy.py @@ -0,0 +1,142 @@ +from __future__ import annotations + +import inspect +from typing import TYPE_CHECKING, Any, Collection, TypeVar, Union + +import torch +from typing_extensions import Self + +from ... import util +from ...tracing.graph import Proxy +from .. import protocols + +if TYPE_CHECKING: + from . import InterventionNode + + +class InterventionProxy(Proxy): + + + def __init__(self, node: "InterventionNode") -> None: + super().__init__(node) + + self.__dict__["_grad"] = None + + self._grad: Self + self.node: "InterventionNode" + + @property + def grad(self) -> Self: + """ + Calling denotes the user wishes to get the grad of proxy tensor and therefore we create a Proxy of that request. + Only generates a proxy the first time it is references otherwise return the already set one. + + Returns: + Proxy: Grad proxy. + """ + + self.__dict__["_grad"] = protocols.GradProtocol.add(self.node.graph, self.node, fake_value=self.node.fake_value) + + return self._grad + + @grad.setter + def grad(self, value: Union[InterventionProxy, Any]) -> None: + """ + Calling denotes the user wishes to set the grad of this proxy tensor and therefore we create a Proxy of that request via a SwapProtocol. + + Args: + value (Union[InterventionProxy, Any]): Value to set output to. + """ + protocols.SwapProtocol.add(self.node.graph, self._grad, value) + + def __setattr__( + self, key: Union[InterventionProxy, Any], value: Union[Self, Any] + ) -> None: + + # We catch setting .grad as that is a special Protocol vs. setting attributes generally. + if key == "grad": + return getattr(self.__class__, key).fset(self, value) + + return super().__setattr__(key, value) + + @property + def shape(self) -> Collection[torch.Size]: + """Property to retrieve the shape of the traced proxy value or real value. + + Returns: + Union[torch.Size,Collection[torch.Size]]: Proxy value shape or collection of shapes. + """ + + if not self.node.attached: + + return util.apply(self.value, lambda x: x.shape, torch.Tensor) + + # If we haven't scanned in a proxy_value, just return a proxy to get the attribute. + if self.node.fake_value is inspect._empty: + + return super().__getattr__("shape") + + return util.apply(self.node.fake_value, lambda x: x.shape, torch.Tensor) + + @property + def device(self) -> Collection[torch.device]: + """Property to retrieve the device of the traced proxy value or real value. + + Returns: + Union[torch.Size,Collection[torch.device]]: Proxy value device or collection of devices. + """ + + if not self.node.attached: + + return util.apply(self.value, lambda x: x.device, torch.Tensor) + + # If we haven't scanned in a proxy_value, just return a proxy to get the attribute. + if self.node.fake_value is inspect._empty: + + return super().__getattr__("device") + + return util.apply(self.node.fake_value, lambda x: x.device, torch.Tensor) + + @property + def dtype(self) -> Collection[torch.device]: + """Property to retrieve the dtype of the traced proxy value or real value. + + Returns: + Union[torch.Size,Collection[torch.dtype]]: Proxy value dtype or collection of dtypes. + """ + + if not self.node.attached: + + return util.apply(self.value, lambda x: x.dtype, torch.Tensor) + + # If we haven't scanned in a proxy_value, just return a proxy to get the attribute. + if self.node.fake_value is inspect._empty: + + return super().__getattr__("dtype") + + return util.apply(self.node.fake_value, lambda x: x.dtype, torch.Tensor) + + @classmethod + def __torch_function__(cls, orig_method, types, args=None, kwargs=None) -> Self: + if args is None: + args = list() + if kwargs is None: + kwargs = dict() + + proxy: Proxy = None + + def get_proxy(arg): + nonlocal proxy + + proxy = arg + + util.apply((args, kwargs), get_proxy, Proxy) + + return proxy.node.create( + orig_method, + *args, + **kwargs, + ) + + +InterventionProxyType = TypeVar("InterventionProxyType", bound=InterventionProxy) \ No newline at end of file diff --git a/src/nnsight/intervention/interleaver.py b/src/nnsight/intervention/interleaver.py new file mode 100755 index 00000000..36b23ed5 --- /dev/null +++ b/src/nnsight/intervention/interleaver.py @@ -0,0 +1,128 @@ +from __future__ import annotations + +from collections import defaultdict +from contextlib import AbstractContextManager +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +from torch.utils.hooks import RemovableHandle + +from .. import util +from .graph import InterventionGraph +from .protocols import InterventionProtocol + +class Interleaver(AbstractContextManager): + + """The Interleaver is responsible for executing a function involving a PyTorch model alongside a user's custom functionality + (represented by an `InterventionGraph`). This is called interleaving. + + The `InterventionGraph` has information about which components (modules) of the model the user's custom logic will interact with. + As the `Interleaver` is a context, entering it adds the appropriate hooks to these components which act as a bridge between the model's + original computation graph and the `InterventionGraph`. Exiting the `Interleaver` removes these hooks. + + Attributes: + graph (InterventionGraph): The computation graph representing the user's custom intervention logic. + batch_groups (Optional[List[Tuple[int, int]]]): A batch group is a section of tensor values related to a given intervention. + They are a tuple of (batch_start, batch_length). So if batch group 0 was (0, 4) it means it starts at index 0 and goes until index 3. + The batch index is assumed to be the first dimension of all Tensors. + InterventionProtocol Nodes know which batch group they are a part of in their arguments. That value is the index into the batch_groups. + input_hook (Optional[Callable]). Function to hook onto the inputs of modules for interleaving. Defaults to None and therefore `InterventionProtocol.intervene`. + output_hook (Optional[Callable]). Function to hook onto the outputs of modules for interleaving. Defaults to None and therefore `InterventionProtocol.intervene`. + batch_size (Optional[int]). Total batch size. Used to determine which Tensors need to be narrowed to their batch group. + i.e If a Tensor's first dimension isn't batch_size, we dont need to narrow it to convert it for its batch_group. + Defaults to None and therefore the sum of the last batch_group. + """ + + def __init__( + self, + graph: InterventionGraph, + batch_groups: Optional[List[Tuple[int, int]]] = None, + input_hook: Optional[Callable] = None, + output_hook: Optional[Callable] = None, + batch_size: Optional[int] = None, + ) -> None: + + self.graph = graph + + self.batch_groups = [] if batch_groups is None else batch_groups + + if input_hook is None: + input_hook = ( + lambda activations, module_path, module: InterventionProtocol.intervene( + activations, module_path, module, "input", self + ) + ) + + if output_hook is None: + output_hook = ( + lambda activations, module_path, module: InterventionProtocol.intervene( + activations, module_path, module, "output", self + ) + ) + + self.input_hook = input_hook + self.output_hook = output_hook + + self.handles: List[RemovableHandle] = [] + + self.batch_size = ( + sum(self.batch_groups[-1]) if batch_size is None else batch_size + ) + + def __enter__(self) -> Interleaver: + """Registers input and output hooks to modules involved in the `InterventionGraph`. + + Returns: + Interleaver: Interleaver + """ + + # Keys of `InterventionGraph.interventions` are the module paths + if they are for input or output. + # e.x 'transformer.h.0.mlp.output' + for module_key in self.graph.interventions.keys(): + + module_atoms = module_key.split(".") + + # Get just the hook type i.e input/output + *module_atoms, hook_type = module_atoms + + # Get just the module path + module_path = ".".join(module_atoms) + + # Get the torch module using the module_path + module: torch.nn.Module = util.fetch_attr(self.graph.model, module_path) + + if hook_type == "input": + + # Input hook activations are a tuple of (positional args, key-word arguments) + # Include the module_path not the module + def input_hook(module, input, kwargs, module_path=module_path): + return self.input_hook((input, kwargs), module_path, module) + + self.handles.append( + module.register_forward_pre_hook( + input_hook, with_kwargs=True, prepend=True + ) + ) + + elif hook_type == "output": + + def output_hook(module, input, output, module_path=module_path): + return self.output_hook(output, module_path, module) + + self.handles.append( + module.register_forward_hook(output_hook, prepend=True) + ) + + return self + + def __exit__(self, exc_type, exc_val, exc_tb) -> None: + + # Remove all hooks + for handle in self.handles: + handle.remove() + + if isinstance(exc_val, Exception): + raise exc_val + + + diff --git a/src/nnsight/intervention/protocols/__init__.py b/src/nnsight/intervention/protocols/__init__.py new file mode 100755 index 00000000..51ff2943 --- /dev/null +++ b/src/nnsight/intervention/protocols/__init__.py @@ -0,0 +1,6 @@ +from .grad import GradProtocol +from .module import ApplyModuleProtocol +from .intervention import InterventionProtocol +from .swap import SwapProtocol +from .noop import NoopProtocol +from .entrypoint import EntryPoint diff --git a/src/nnsight/intervention/protocols/entrypoint.py b/src/nnsight/intervention/protocols/entrypoint.py new file mode 100755 index 00000000..5f571ced --- /dev/null +++ b/src/nnsight/intervention/protocols/entrypoint.py @@ -0,0 +1,18 @@ +from typing import Any +from ...tracing.protocols import Protocol + +class EntryPoint(Protocol): + """An EntryPoint Protocol should have its value set manually outside of normal graph execution. + This makes these type of Nodes special and are handled differently in a variety of cases. + Subclasses EntryPoint informs those cases to handle it differently. + Examples are InterventionProtocol and GradProtocol. + """ + + @staticmethod + def is_entrypoint(thing:Any): + + return isinstance(thing, type) and issubclass(thing, EntryPoint) + + @classmethod + def add(cls,*args, **kwargs): + return super().add(*args, redirect=False, **kwargs) diff --git a/src/nnsight/intervention/protocols/grad.py b/src/nnsight/intervention/protocols/grad.py new file mode 100755 index 00000000..6539b2ea --- /dev/null +++ b/src/nnsight/intervention/protocols/grad.py @@ -0,0 +1,73 @@ +from typing import TYPE_CHECKING, Any, Dict + +import torch + +from ...tracing.protocols import Protocol + +if TYPE_CHECKING: + from ..graph import InterventionNode, InterventionNodeType + +class GradProtocol(Protocol): + """Protocol which adds a backwards hook via .register_hook() to a Tensor. The hook injects the gradients into the node's value on hook execution. + Nodes created via this protocol are relative to the next time .backward() was called during tracing allowing separate .grads to reference separate backwards passes: + + .. code-block:: python + with model.trace(...): + + grad1 = model.module.output.grad.save() + + model.output.sum().backward(retain_graph=True) + + grad2 = model.module.output.grad.save() + + model.output.sum().backward() + + Uses an attachment to store number of times .backward() has been called during tracing so a given .grad hook is only value injected at the appropriate backwards pass. + """ + + @classmethod + def execute(cls, node: "InterventionNode") -> None: + + args, kwargs = node.prepare_inputs((node.args, node.kwargs)) + + # First arg is the Tensor to add hook to. + tensor: torch.Tensor = args[0] + + # Hook to remove when hook is executed at the appropriate backward pass. + hook = None + + def grad(value): + + # Set the value of the Node. + node.set_value(value) + + node.graph.execute(start=node.kwargs['start'], grad=True) + + # There may be a swap Protocol executed during the resolution of this part of the graph. + # If so get it and replace value with it. + if 'swap' in node.kwargs: + value:InterventionNodeType = node.kwargs.pop('swap') + + # Remove hook (if this is not done memory issues occur) + hook.remove() + + return value + + # Register hook. + hook = tensor.register_hook(grad) + + @classmethod + def style(cls) -> Dict[str, Any]: + """Visualization style for this protocol node. + + Returns: + - Dict: dictionary style. + """ + + default_style = super().style() + + default_style["node"] = {"color": "green4", "shape": "box"} + + return default_style + + \ No newline at end of file diff --git a/src/nnsight/intervention/protocols/intervention.py b/src/nnsight/intervention/protocols/intervention.py new file mode 100755 index 00000000..666fe423 --- /dev/null +++ b/src/nnsight/intervention/protocols/intervention.py @@ -0,0 +1,215 @@ +from typing import TYPE_CHECKING, Any, Dict + +import torch +from ... import util +from .entrypoint import EntryPoint + +if TYPE_CHECKING: + from ..graph import InterventionNodeType + from ..interleaver import Interleaver + +class InterventionProtocol(EntryPoint): + + @classmethod + def concat( + cls, + activations: Any, + value: Any, + batch_start: int, + batch_size: int, + total_batch_size: int, + ): + def _concat(values): + + data_type = type(values[0]) + + if data_type == torch.Tensor: + orig_size = values[-1] + new_size = sum([value.shape[0] for value in values[:-1]]) + if new_size == orig_size: + return torch.concatenate(values[:-1]) + + return values[0] + elif data_type == list: + return [ + _concat([value[value_idx] for value in values]) + for value_idx in range(len(values[0])) + ] + elif data_type == tuple: + return tuple( + [ + _concat([value[value_idx] for value in values]) + for value_idx in range(len(values[0])) + ] + ) + elif data_type == dict: + return { + key: _concat([value[key] for value in values]) + for key in values[0].keys() + } + return values[0] + + def narrow1(acts: torch.Tensor): + if total_batch_size == acts.shape[0]: + return acts.narrow(0, 0, batch_start) + + return acts + + pre = util.apply(activations, narrow1, torch.Tensor) + + post_batch_start = batch_start + batch_size + + def narrow2(acts: torch.Tensor): + if total_batch_size == acts.shape[0]: + return acts.narrow( + 0, post_batch_start, acts.shape[0] - post_batch_start + ) + + return acts + + post = util.apply( + activations, + narrow2, + torch.Tensor, + ) + + orig_sizes = util.apply(activations, lambda x: x.shape[0], torch.Tensor) + + return _concat([pre, value, post, orig_sizes]) + + @classmethod + def intervene( + cls, + activations: Any, + module_path: str, + module: torch.nn.Module, + key: str, + interleaver: "Interleaver", + ): + """Entry to intervention graph. This should be hooked to all modules involved in the intervention graph. + + Forms the current module_path key in the form of . + Checks the graphs InterventionProtocol attachment attribute for this key. + If exists, value is a list of (start:int, end:int) subgraphs to iterate through. + Node args for intervention type nodes should be ``[module_path, (batch_start, batch_size), iteration]``. + Checks and updates the counter (number of times this module has been called for this Node) for the given intervention node. If count is not ready yet compared to the iteration, continue. + Using batch_size and batch_start, apply torch.narrow to tensors in activations to select + only batch indexed tensors relevant to this intervention node. Sets the value of a node + using the indexed values. Using torch.narrow returns a view of the tensors as opposed to a copy allowing + subsequent downstream nodes to make edits to the values only in the relevant tensors, and have it update the original + tensors. This both prevents interventions from effecting bathes outside their preview and allows edits + to the output from downstream intervention nodes in the graph. + + Args: + activations (Any): Either the inputs or outputs of a torch module. + module_path (str): Module path of the current relevant module relative to the root model. + key (str): Key denoting either "input" or "output" of module. + intervention_handler (InterventionHandler): Handler object that stores the intervention graph and keeps track of module call count. + + Returns: + Any: The activations, potentially modified by the intervention graph. + """ + + # Key to module activation intervention nodes has format: . + module_path = f"{module_path}.{key}" + + interventions = interleaver.graph.interventions + + if module_path in interventions: + intervention_nodes = interventions[module_path] + + # Multiple intervention nodes can have same module_path if there are multiple invocations. + # Is a set of node indexes making up the intervention subgraph + for node in intervention_nodes: + + # Args for intervention nodes are (module_path, batch_group, iteration). + _, batch_group, iteration = node.args + + # Updates the count of intervention node calls. + # If count matches the Node's iteration, its ready to be executed. + ready, defer = node.graph.count(node.index, iteration) + + # Dont execute if the node isnt ready (call count / iteration) or its not fulfilled (conditional) + if not ready: + continue + + value = activations + + narrowed = False + + if len(interleaver.batch_groups) > 1: + + batch_start, batch_size = interleaver.batch_groups[ + batch_group + ] + + def narrow(acts: torch.Tensor): + + if acts.shape[0] == interleaver.batch_size: + + nonlocal narrowed + + narrowed = True + + return acts.narrow(0, batch_start, batch_size) + + return acts + + value = util.apply( + activations, + narrow, + torch.Tensor, + ) + + node.reset() + + # Value injection. + node.set_value(value) + + node.executed = True + # Execute starting from start + node.graph.execute(start=node.kwargs['start'], defer=defer, defer_start=node.kwargs['defer_start']) + + # Check if through the previous value injection, there was a 'swap' intervention. + # This would mean we want to replace activations for this batch with some other ones. + if 'swap' in node.kwargs: + value:InterventionNodeType = node.kwargs.pop('swap') + + # If we narrowed any data, we need to concat it with data before and after it. + if narrowed: + + activations = cls.concat( + activations, + value, + batch_start, + batch_size, + interleaver.batch_size, + ) + # Otherwise just return the whole value as the activations. + else: + + activations = value + + return activations + + @classmethod + def execute(cls, node: "InterventionNodeType"): + # To prevent the node from looking like its executed when calling Graph.execute + node.executed = False + + @classmethod + def style(cls) -> Dict[str, Any]: + """Visualization style for this protocol node. + + Returns: + - Dict: dictionary style. + """ + + default_style = super().style() + + default_style["node"] = {"color": "green4", "shape": "box"} + default_style["arg_kname"][0] = "module_path" + default_style["arg_kname"][1] = "batch_group" + default_style["arg_kname"][2] = "call_counter" + + return default_style diff --git a/src/nnsight/intervention/protocols/module.py b/src/nnsight/intervention/protocols/module.py new file mode 100755 index 00000000..d5d5fb14 --- /dev/null +++ b/src/nnsight/intervention/protocols/module.py @@ -0,0 +1,106 @@ +from typing import TYPE_CHECKING, Any, Dict + +import torch +from typing_extensions import Self + +from ... import util +from ...tracing.protocols import Protocol + +if TYPE_CHECKING: + from ..graph import InterventionGraph, InterventionNode + +class ApplyModuleProtocol(Protocol): + """Protocol that references some root model, and calls its .forward() method given some input. + Using .forward() vs .__call__() means it wont trigger hooks. + Uses an attachment to the Graph to store the model. + """ + + + @classmethod + def add( + cls, graph: "InterventionGraph", module_path: str, *args, hook=False, **kwargs + ) -> Self: + """Creates and adds an ApplyModuleProtocol to the Graph. + Assumes the attachment has already been added via ApplyModuleProtocol.set_module(). + + Args: + graph (Graph): Graph to add the Protocol to. + module_path (str): Module path (model.module1.module2 etc), of module to apply from the root module. + + Returns: + InterventionProxy: ApplyModule Proxy. + """ + + from ..graph.node import ValidatingInterventionNode, validate + + # If the Graph is validating, we need to compute the proxy_value for this node. + if graph.node_class is ValidatingInterventionNode: + + # If the module has parameters, get its device to move input tensors to. + module: torch.nn.Module = util.fetch_attr( + graph.model._model, module_path + ) + + try: + device = next(module.parameters()).device + except: + device = None + + # Enter FakeMode for proxy_value computing. + kwargs['fake_value'] = validate(module.forward, *args, **kwargs) + + kwargs["hook"] = hook + + # Create and attach Node. + return graph.create( + cls, + module_path, + *args, + **kwargs, + ) + + @classmethod + def execute(cls, node: "InterventionNode") -> None: + """Executes the ApplyModuleProtocol on Node. + + Args: + node (Node): ApplyModule Node. + """ + + graph: InterventionGraph = node.graph + + module: torch.nn.Module = util.fetch_attr( + graph.model._model, node.args[0] + ) + + try: + device = next(module.parameters()).device + except: + device = None + + args, kwargs = node.prepare_inputs((node.args, node.kwargs), device=device) + + module_path, *args = args + + hook = kwargs.pop("hook") + + if hook: + output = module(*args, **kwargs) + else: + output = module.forward(*args, **kwargs) + + node.set_value(output) + + @classmethod + def style(cls) -> Dict[str, Any]: + """Visualization style for this protocol node. + + Returns: + - Dict: dictionary style. + """ + + default_style = super().style() + + default_style["node"] = {"color": "green4", "shape": "polygon", "sides": 6} + + return default_style diff --git a/src/nnsight/intervention/protocols/noop.py b/src/nnsight/intervention/protocols/noop.py new file mode 100755 index 00000000..ed1b41a4 --- /dev/null +++ b/src/nnsight/intervention/protocols/noop.py @@ -0,0 +1,14 @@ +from typing import TYPE_CHECKING, Any + +from ...tracing.protocols import Protocol +if TYPE_CHECKING: + from ..graph import InterventionNode + + +class NoopProtocol(Protocol): + + + @classmethod + def execute(cls, node: "InterventionNode") -> None: + + node.set_value(None) \ No newline at end of file diff --git a/src/nnsight/intervention/protocols/swap.py b/src/nnsight/intervention/protocols/swap.py new file mode 100755 index 00000000..2c24b6d9 --- /dev/null +++ b/src/nnsight/intervention/protocols/swap.py @@ -0,0 +1,36 @@ +from typing import TYPE_CHECKING, Any, Dict + +from ...tracing.protocols import Protocol + +if TYPE_CHECKING: + from ..graph import InterventionNodeType + + +class SwapProtocol(Protocol): + + + @classmethod + def execute(cls, node: "InterventionNodeType") -> None: + + intervention_node, value = node.args + intervention_node: "InterventionNodeType" + + value = node.prepare_inputs(value) + + node.set_value(None) + + intervention_node.kwargs['swap'] = value + + @classmethod + def style(cls) -> Dict[str, Any]: + """Visualization style for this protocol node. + + Returns: + - Dict: dictionary style. + """ + + default_style = super().style() + + default_style["node"] = {"color": "green4", "shape": "ellipse"} + + return default_style diff --git a/src/nnsight/contexts/session/__init__.py b/src/nnsight/modeling/__init__.py old mode 100644 new mode 100755 similarity index 100% rename from src/nnsight/contexts/session/__init__.py rename to src/nnsight/modeling/__init__.py diff --git a/src/nnsight/models/DiffusionModel.py b/src/nnsight/modeling/diffusion.py similarity index 68% rename from src/nnsight/models/DiffusionModel.py rename to src/nnsight/modeling/diffusion.py index e08fd40b..1c75d92b 100755 --- a/src/nnsight/models/DiffusionModel.py +++ b/src/nnsight/modeling/diffusion.py @@ -1,16 +1,15 @@ from __future__ import annotations -from typing import Any, Callable, Dict, List, Optional, Union +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union import torch from diffusers import DiffusionPipeline from transformers import BatchEncoding from typing_extensions import Self +from ..intervention.contexts import InterventionTracer from .. import util -from ..envoy import Envoy -from .mixins import GenerationMixin -from .NNsightModel import NNsight +from .mixins import RemoteableMixin class Diffuser(util.WrapperModule): @@ -18,7 +17,7 @@ def __init__(self, *args, **kwargs) -> None: super().__init__() self.pipeline = DiffusionPipeline.from_pretrained(*args, **kwargs) - + for key, value in self.pipeline.__dict__.items(): if isinstance(value, torch.nn.Module): setattr(self, key, value) @@ -26,35 +25,36 @@ def __init__(self, *args, **kwargs) -> None: self.tokenizer = self.pipeline.tokenizer -class DiffusionModel(GenerationMixin, NNsight): - - def __new__(cls, *args, **kwargs) -> Self | Envoy | Diffuser: - return object.__new__(cls) +class DiffusionModel(RemoteableMixin): + + __methods__ = {"generate": "_generate"} def __init__(self, *args, **kwargs) -> None: self._model: Diffuser = None super().__init__(*args, **kwargs) + + def _load_meta(self, repo_id:str, **kwargs): + + + model = Diffuser( + repo_id, + device_map=None, + low_cpu_mem_usage=False, + **kwargs, + ) - def _load(self, repo_id: str, device_map=None, **kwargs) -> Diffuser: - - if self._model is None: - - model = Diffuser( - repo_id, - device_map=None, - low_cpu_mem_usage=False, - **kwargs, - ) + return model + - return model + def _load(self, repo_id: str, device_map=None, **kwargs) -> Diffuser: model = Diffuser(repo_id, device_map=device_map, **kwargs) return model - def _prepare_inputs( + def _prepare_input( self, inputs: Union[str, List[str]], ) -> Any: @@ -62,9 +62,9 @@ def _prepare_inputs( if isinstance(inputs, str): inputs = [inputs] - return (inputs,), len(inputs) + return ((inputs,), {}), len(inputs) - def _batch_inputs( + def _batch( self, batched_inputs: Optional[Dict[str, Any]], prepared_inputs: BatchEncoding, @@ -72,11 +72,11 @@ def _batch_inputs( if batched_inputs is None: - return (prepared_inputs, ) + return ((prepared_inputs, ), {}) return (batched_inputs + prepared_inputs, ) - def _execute_forward(self, prepared_inputs: Any, *args, **kwargs): + def _execute(self, prepared_inputs: Any, *args, **kwargs): return self._model.unet( prepared_inputs, @@ -84,7 +84,7 @@ def _execute_forward(self, prepared_inputs: Any, *args, **kwargs): **kwargs, ) - def _execute_generate( + def _generate( self, prepared_inputs: Any, *args, seed: int = None, **kwargs ): @@ -108,3 +108,12 @@ def _execute_generate( output = self._model(output) return output + + +if TYPE_CHECKING: + + class DiffusionModel(DiffusionModel, DiffusionPipeline): + + def generate(self, *args, **kwargs) -> InterventionTracer: + return self._model.pipeline(*args, **kwargs) + diff --git a/src/nnsight/modeling/language.py b/src/nnsight/modeling/language.py new file mode 100755 index 00000000..6cfa4af7 --- /dev/null +++ b/src/nnsight/modeling/language.py @@ -0,0 +1,336 @@ +from __future__ import annotations + +import json +import warnings +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Generic, + List, + Optional, + Protocol, + Tuple, + Type, + Union, +) + +import torch +from torch.nn.modules import Module +from transformers import ( + AutoConfig, + AutoModel, + AutoModelForCausalLM, + AutoTokenizer, + BatchEncoding, + PretrainedConfig, + PreTrainedModel, + PreTrainedTokenizer, +) +from transformers.models.auto import modeling_auto +from transformers.models.llama.configuration_llama import LlamaConfig +from typing_extensions import Self + +from ..intervention import Envoy +from ..intervention.contexts import InterleavingTracer +from ..intervention.graph import InterventionNodeType, InterventionProxyType +from ..util import WrapperModule +from .mixins import RemoteableMixin + + +class LanguageModel(RemoteableMixin): + """LanguageModels are NNsight wrappers around transformers language models. + + Inputs can be in the form of: + Prompt: (str) + Prompts: (List[str]) + Batched prompts: (List[List[str]]) + Tokenized prompt: (Union[List[int], torch.Tensor]) + Tokenized prompts: (Union[List[List[int]], torch.Tensor]) + Direct input: (Dict[str,Any]) + + If using a custom model, you also need to provide the tokenizer like ``LanguageModel(custom_model, tokenizer=tokenizer)`` + + Calls to generate pass arguments downstream to :func:`GenerationMixin.generate` + + Attributes: + config (PretrainedConfig): Huggingface config file loaded from repository or checkpoint. + tokenizer (PreTrainedTokenizer): Tokenizer for LMs. + automodel (Type): AutoModel type from transformer auto models. + model (PreTrainedModel): Meta version of underlying auto model. + + """ + + __methods__ = {"generate": "_generate"} + + tokenizer: PreTrainedTokenizer + + class Generator(WrapperModule): + + class Streamer(WrapperModule): + + def put(self, *args): + return self(*args) + + def end(self): + pass + + def __init__(self) -> None: + + super().__init__() + + self.streamer = LanguageModel.Generator.Streamer() + + def __init__( + self, + *args, + config: Optional[PretrainedConfig] = None, + tokenizer: Optional[PreTrainedTokenizer] = None, + automodel: Type[AutoModel] = AutoModelForCausalLM, + **kwargs, + ) -> None: + + self.automodel = ( + automodel + if not isinstance(automodel, str) + else getattr(modeling_auto, automodel) + ) + + self.config = config + self.tokenizer = tokenizer + self.repo_id: str = None + + super().__init__(*args, **kwargs) + + self.generator: Envoy[InterventionProxyType, InterventionNodeType] = ( + LanguageModel.Generator() + ) + + def _load_config(self, repo_id: str, **kwargs): + + if self.config is None: + + self.config = AutoConfig.from_pretrained(repo_id, **kwargs) + + def _load_tokenizer(self, repo_id: str, **kwargs): + + if self.tokenizer is None: + + if "padding_side" not in kwargs: + kwargs["padding_side"] = "left" + + self.tokenizer = AutoTokenizer.from_pretrained( + repo_id, config=self.config, **kwargs + ) + + if getattr(self.tokenizer, "pad_token", None) is None: + self.tokenizer.pad_token = self.tokenizer.eos_token + + def _load_meta( + self, + repo_id: str, + tokenizer_kwargs: Optional[Dict[str, Any]] = {}, + patch_llama_scan: bool = True, + **kwargs, + ) -> Module: + + self.repo_id = repo_id + + self._load_config(repo_id, **kwargs) + + self._load_tokenizer(repo_id, **tokenizer_kwargs) + + if ( + patch_llama_scan + and isinstance(self.config, LlamaConfig) + and isinstance(self.config.rope_scaling, dict) + and "rope_type" in self.config.rope_scaling + ): + self.config.rope_scaling["rope_type"] = "default" + + model = self.automodel.from_config(self.config, trust_remote_code=True) + + return model + + def _load( + self, + repo_id: str, + tokenizer_kwargs: Optional[Dict[str, Any]] = {}, + patch_llama_scan: bool = True, + **kwargs, + ) -> PreTrainedModel: + + self._load_config(repo_id, **kwargs) + + self._load_tokenizer(repo_id, **tokenizer_kwargs) + + if ( + patch_llama_scan + and isinstance(self.config, LlamaConfig) + and isinstance(self.config.rope_scaling, dict) + and "rope_type" in self.config.rope_scaling + ): + self.config.rope_scaling["rope_type"] = "llama3" + + model = self.automodel.from_pretrained(repo_id, config=self.config, **kwargs) + + return model + + def _tokenize( + self, + inputs: Union[ + str, + List[str], + List[List[str]], + List[int], + List[List[int]], + torch.Tensor, + Dict[str, Any], + ], + **kwargs, + ): + + if isinstance(inputs, str) or ( + isinstance(inputs, list) and isinstance(inputs[0], int) + ): + inputs = [inputs] + + if isinstance(inputs, torch.Tensor) and inputs.ndim == 1: + inputs = inputs.unsqueeze(0) + + if not isinstance(inputs[0], str): + inputs = [{"input_ids": ids} for ids in inputs] + return self.tokenizer.pad(inputs, return_tensors="pt", **kwargs) + + return self.tokenizer(inputs, return_tensors="pt", padding=True, **kwargs) + + def _prepare_input( + self, + *inputs: Tuple[ + Union[ + str, + List[str], + List[List[str]], + List[int], + List[List[int]], + torch.Tensor, + List[torch.Tensor], + Dict[str, Any], + BatchEncoding, + ] + ], + input_ids: Union[ + List[int], List[List[int]], torch.Tensor, List[torch.Tensor] + ] = None, + labels: Any = None, + **kwargs, + ) -> Tuple[BatchEncoding, int]: + + if input_ids is not None: + + assert len(inputs) == 0 + + inputs = (input_ids,) + + assert len(inputs) == 1 + + inputs = inputs[0] + + if isinstance(inputs, dict): + inputs = BatchEncoding(inputs) + elif isinstance(inputs, BatchEncoding): + pass + else: + + inputs = self._tokenize(inputs, **kwargs) + + if labels is not None: + labels = self._tokenize(labels, **kwargs)["input_ids"] + + return ((inputs,), {"labels": labels}), len(inputs["input_ids"]) + + def _batch( + self, + batched_inputs: Optional[Tuple[Tuple[BatchEncoding], Dict[str, Any]]], + input: BatchEncoding, + labels: Optional[torch.Tensor] = None, + ) -> Tuple[Dict[str, Any]]: + + if batched_inputs is None: + return ((input,), {"labels": labels}) + + batched_labels = batched_inputs[1]["labels"] + batched_inputs = batched_inputs[0][0] + + attention_mask = batched_inputs["attention_mask"] + batched_inputs = [ + {"input_ids": ids} + for ids in [ + *batched_inputs["input_ids"].tolist(), + *input["input_ids"].tolist(), + ] + ] + batched_inputs = self.tokenizer.pad(batched_inputs, return_tensors="pt") + + if labels is not None: + + batched_labels = torch.cat((batched_labels, labels)) + + batched_inputs["attention_mask"][:-1, : attention_mask.shape[1]] = attention_mask + + return ((batched_inputs,), {"labels": batched_labels}) + + def _execute(self, inputs: BatchEncoding, **kwargs) -> Any: + + inputs = inputs.to(self.device) + + return self._model( + **inputs, + **kwargs, + ) + + def _generate( + self, + inputs: BatchEncoding, + max_new_tokens=1, + streamer: Any = None, + **kwargs, + ): + + if streamer is None: + streamer = self.generator.streamer + + inputs = inputs.to(self.device) + + output = self._model.generate( + **inputs, + **kwargs, + streamer=streamer, + max_new_tokens=max_new_tokens, + ) + + self.generator(output) + + return output + + def _remoteable_model_key(self) -> str: + return json.dumps( + {"repo_id": self.repo_id} # , "torch_dtype": str(self._model.dtype)} + ) + + @classmethod + def _remoteable_from_model_key(cls, model_key: str, **kwargs) -> Self: + + kwargs = {**json.loads(model_key), **kwargs} + + repo_id = kwargs.pop("repo_id") + + return LanguageModel(repo_id, **kwargs) + + +if TYPE_CHECKING: + + class LanguageModel(LanguageModel, PreTrainedModel): + + def generate(self, *args, **kwargs) -> InterleavingTracer: + pass diff --git a/src/nnsight/modeling/mixins/__init__.py b/src/nnsight/modeling/mixins/__init__.py new file mode 100644 index 00000000..97c1d4cb --- /dev/null +++ b/src/nnsight/modeling/mixins/__init__.py @@ -0,0 +1,3 @@ +from .remoteable import RemoteableMixin +from .loadable import LoadableMixin +from .meta import MetaMixin \ No newline at end of file diff --git a/src/nnsight/modeling/mixins/loadable.py b/src/nnsight/modeling/mixins/loadable.py new file mode 100755 index 00000000..914af587 --- /dev/null +++ b/src/nnsight/modeling/mixins/loadable.py @@ -0,0 +1,24 @@ +from typing import Dict, Optional + +import torch + +from ...intervention import NNsight + + +class LoadableMixin(NNsight): + + def __init__(self, *args, rename: Optional[Dict[str,str]] = None, **kwargs) -> None: + + if not isinstance(args[0], torch.nn.Module): + + model = self._load(*args, **kwargs) + + else: + + model = args[0] + + super().__init__(model, rename=rename) + + def _load(self, *args, **kwargs) -> torch.nn.Module: + + raise NotImplementedError() diff --git a/src/nnsight/modeling/mixins/meta.py b/src/nnsight/modeling/mixins/meta.py new file mode 100755 index 00000000..7ae550d3 --- /dev/null +++ b/src/nnsight/modeling/mixins/meta.py @@ -0,0 +1,49 @@ +from typing import Dict, Optional + +import torch +from accelerate import init_empty_weights + +from ...intervention import NNsight +from .loadable import LoadableMixin + + +class MetaMixin(LoadableMixin): + + def __init__( + self, *args, dispatch: bool = False, meta_buffers: bool = True, rename: Optional[Dict[str,str]] = None, **kwargs + ) -> None: + + self.dispatched = dispatch + + if isinstance(args[0], torch.nn.Module) or dispatch: + + super().__init__(*args, **kwargs) + + else: + + with init_empty_weights(include_buffers=meta_buffers): + + model = self._load_meta(*args, **kwargs) + + NNsight.__init__(self, model, rename=rename) + + self.args = args + self.kwargs = kwargs + + def _load_meta(self, *args, **kwargs) -> torch.nn.Module: + + raise NotImplementedError() + + def dispatch(self) -> None: + + self._model = self._load(*self.args, **self.kwargs) + self._envoy._update(self._model) + + self.dispatched = True + + def interleave(self, *args, **kwargs): + + if not self.dispatched: + self.dispatch() + + return super().interleave(*args, **kwargs) diff --git a/src/nnsight/modeling/mixins/remoteable.py b/src/nnsight/modeling/mixins/remoteable.py new file mode 100755 index 00000000..8bfee919 --- /dev/null +++ b/src/nnsight/modeling/mixins/remoteable.py @@ -0,0 +1,86 @@ +from typing import Any, Dict + +from typing_extensions import Self + +from nnsight.intervention.contexts import Session + +from ...intervention.backends import RemoteBackend +from ...tracing.backends import Backend +from ...util import from_import_path, to_import_path +from .meta import MetaMixin + + +class RemoteableMixin(MetaMixin): + + def trace( + self, + *inputs: Any, + method: str | None = None, + backend: Backend | str | None = None, + remote: bool = False, + blocking: bool = True, + trace: bool = True, + scan: bool = False, + **kwargs: Dict[str, Any], + ): + + if backend is not None: + pass + elif self._session is not None: + pass + elif remote: + backend = RemoteBackend(self.to_model_key(), blocking=blocking) + # If backend is a string, assume RemoteBackend url. + elif isinstance(backend, str): + backend = RemoteBackend( + self.to_model_key(), host=backend, blocking=blocking + ) + return super().trace( + *inputs, + method=method, + backend=backend, + trace=trace, + scan=scan, + **kwargs, + ) + + def session( + self, + backend: Backend | str = None, + remote: bool = False, + blocking: bool = True, + **kwargs, + ) -> Session: + + if backend is not None: + pass + elif remote: + backend = RemoteBackend(self.to_model_key(), blocking=blocking) + # If backend is a string, assume RemoteBackend url. + elif isinstance(backend, str): + backend = RemoteBackend( + self.to_model_key(), host=backend, blocking=blocking + ) + + return super().session(backend=backend, **kwargs) + + def _remoteable_model_key(self) -> str: + + raise NotImplementedError() + + @classmethod + def _remoteable_from_model_key(cls, model_key: str) -> Self: + raise NotImplementedError() + + def to_model_key(self) -> str: + + return f"{to_import_path(type(self))}:{self._remoteable_model_key()}" + + @classmethod + def from_model_key(cls, model_key: str, **kwargs) -> Self: + + import_path, model_key = model_key.split(":", 1) + + type: RemoteableMixin = from_import_path(import_path) + + return type._remoteable_from_model_key(model_key, **kwargs) diff --git a/src/nnsight/modeling/vllm/__init__.py b/src/nnsight/modeling/vllm/__init__.py new file mode 100755 index 00000000..509cbbe3 --- /dev/null +++ b/src/nnsight/modeling/vllm/__init__.py @@ -0,0 +1 @@ +from .vllm import VLLM diff --git a/src/nnsight/modeling/vllm/executors/GPUExecutor.py b/src/nnsight/modeling/vllm/executors/GPUExecutor.py new file mode 100755 index 00000000..f7212980 --- /dev/null +++ b/src/nnsight/modeling/vllm/executors/GPUExecutor.py @@ -0,0 +1,9 @@ + +from vllm.executor.gpu_executor import GPUExecutor + + +class NNsightGPUExecutor(GPUExecutor): + + + def _get_worker_module_and_class(self): + return ("nnsight.modeling.vllm.workers.GPUWorker", "NNsightGPUWorker", None) diff --git a/src/nnsight/modeling/vllm/executors/RayGPUExecutor.py b/src/nnsight/modeling/vllm/executors/RayGPUExecutor.py new file mode 100644 index 00000000..588aa432 --- /dev/null +++ b/src/nnsight/modeling/vllm/executors/RayGPUExecutor.py @@ -0,0 +1,6 @@ +from vllm.executor.ray_gpu_executor import RayGPUExecutor + +class NNsightRayGPUExecutor(RayGPUExecutor): + + def _get_worker_module_and_class(self): + return ("nnsight.modeling.vllm.workers.GPUWorker", "NNsightGPUWorker", None) \ No newline at end of file diff --git a/src/nnsight/toolbox/__init__.py b/src/nnsight/modeling/vllm/executors/__init__.py old mode 100644 new mode 100755 similarity index 100% rename from src/nnsight/toolbox/__init__.py rename to src/nnsight/modeling/vllm/executors/__init__.py diff --git a/src/nnsight/modeling/vllm/model_runners/GPUModelRunner.py b/src/nnsight/modeling/vllm/model_runners/GPUModelRunner.py new file mode 100755 index 00000000..2bf69c0b --- /dev/null +++ b/src/nnsight/modeling/vllm/model_runners/GPUModelRunner.py @@ -0,0 +1,435 @@ +import dataclasses +from functools import wraps +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union, Callable + +from nnsight import NNsight + +import torch +import torch.distributed + +from nnsight.intervention import NNsight +from vllm.distributed import (get_pp_group, + get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size, + split_tensor_along_last_dim, + tensor_model_parallel_all_gather) +from vllm.forward_context import set_forward_context +from vllm.model_executor.layers.sampler import SamplerOutput +from vllm.model_executor.layers.linear import (ColumnParallelLinear, + RowParallelLinear) +from vllm.multimodal import MultiModalInputs +from vllm.sequence import IntermediateTensors, SequenceGroupMetadata +from vllm.worker.model_runner import (ModelInputForGPUWithSamplingMetadata, + ModelRunner) +from vllm.worker.model_runner_base import ( + _add_attn_metadata_broadcastable_dict, + _init_attn_metadata_from_tensor_dict, dump_input_when_exception) + +from ....intervention.protocols import InterventionProtocol +from ....util import Patch, Patcher + +from ....intervention.interleaver import Interleaver + +from .. import VLLM +from ..sampling import NNsightSamplingMetadata + +if TYPE_CHECKING: + from vllm.attention.backends.abstract import AttentionBackend + + from ..sampling import NNsightSamplingMetadata + + +class NNsightModelInputForGPUWithSamplingMetadata(ModelInputForGPUWithSamplingMetadata): + + sampling_metadata: Optional["NNsightSamplingMetadata"] = None + + def as_broadcastable_tensor_dict(self) -> Dict[str, Any]: + tensor_dict = { + "input_tokens": self.input_tokens, + "input_positions": self.input_positions, + "lora_requests": self.lora_requests, + "lora_mapping": self.lora_mapping, + "multi_modal_kwargs": self.multi_modal_kwargs, + "prompt_adapter_mapping": self.prompt_adapter_mapping, + "prompt_adapter_requests": self.prompt_adapter_requests, + "virtual_engine": self.virtual_engine, + "request_ids_to_seq_ids": self.request_ids_to_seq_ids, + "finished_requests_ids": self.finished_requests_ids, + } + _add_attn_metadata_broadcastable_dict(tensor_dict, self.attn_metadata) + if self.sampling_metadata is not None: + tensor_dict["selected_token_indices"] = ( + self.sampling_metadata.selected_token_indices) + tensor_dict["intervention_graph"] = self.sampling_metadata.intervention_graph.copy() + tensor_dict["batch_group"] = self.sampling_metadata.batch_groups + return tensor_dict + + @classmethod + def from_broadcasted_tensor_dict( + cls, + tensor_dict: Dict[str, Any], + attn_backend: Optional["AttentionBackend"] = None, + ) -> "ModelInputForGPUWithSamplingMetadata": + selected_token_indices = tensor_dict.pop("selected_token_indices", None) + intervention_graph = tensor_dict.pop("intervention_graph", None) + intervention_graph.attachments = dict() + batch_groups = tensor_dict.pop("batch_group", None) + if selected_token_indices is not None: + tensor_dict["sampling_metadata"] = NNsightSamplingMetadata( + seq_groups=None, + selected_token_indices=selected_token_indices, + categorized_sample_indices=None, + num_prompts=0, + intervention_graph=intervention_graph, + batch_groups=batch_groups + ) + if attn_backend is not None: + tensor_dict = _init_attn_metadata_from_tensor_dict( + attn_backend, tensor_dict) + return cls(**tensor_dict) + + +class NNsightGPUModelRunner(ModelRunner): + + _model_input_cls: Type[NNsightModelInputForGPUWithSamplingMetadata] = ( + NNsightModelInputForGPUWithSamplingMetadata + ) + + def __init__(self, *args, **kwargs): + + super().__init__(*args, **kwargs) + + self.model: VLLM + + def load_model(self) -> None: + super().load_model() + + self.model = VLLM(self.model) + + def make_model_input_from_broadcasted_tensor_dict( + self, + tensor_dict: Dict[str, Any], + ) -> NNsightModelInputForGPUWithSamplingMetadata: + model_input = \ + NNsightModelInputForGPUWithSamplingMetadata.from_broadcasted_tensor_dict( + tensor_dict, + attn_backend=self.attn_backend, + ) + return model_input + + def prepare_model_input( + self, + seq_group_metadata_list: List[SequenceGroupMetadata], + virtual_engine: int = 0, + finished_requests_ids: Optional[List[str]] = None, + ) -> NNsightModelInputForGPUWithSamplingMetadata: + """Prepare the model input based on a given sequence group, including + metadata for the sampling step. + + The API assumes seq_group_metadata_list is sorted by prefill -> decode. + + The result tensors and data structure also batches input in prefill + -> decode order. For example, + + - input_tokens[:num_prefill_tokens] contains prefill tokens. + - input_tokens[num_prefill_tokens:] contains decode tokens. + + If cuda graph is required, this API automatically pads inputs. + """ + model_input = self._prepare_model_input_tensors( + seq_group_metadata_list, finished_requests_ids + ) + if get_pp_group().is_last_rank: + # Sampling metadata is only required for the final pp group + generators = self.get_generators(finished_requests_ids) + sampling_metadata = NNsightSamplingMetadata.prepare( + seq_group_metadata_list, + model_input.seq_lens, + model_input.query_lens, + self.device, + self.pin_memory, + generators, + self.sampling_metadata_cache, + ) + else: + sampling_metadata = None + is_prompt = ( + seq_group_metadata_list[0].is_prompt if seq_group_metadata_list else None + ) + return dataclasses.replace( + model_input, + sampling_metadata=sampling_metadata, + is_prompt=is_prompt, + virtual_engine=virtual_engine, + ) + + @torch.inference_mode() + @dump_input_when_exception(exclude_args=[0], exclude_kwargs=["self"]) + def execute_model( + self, + model_input: NNsightModelInputForGPUWithSamplingMetadata, + kv_caches: List[torch.Tensor], + intermediate_tensors: Optional[IntermediateTensors] = None, + num_steps: int = 1, + ) -> Optional[Union[List[SamplerOutput], IntermediateTensors]]: + + if model_input.sampling_metadata.intervention_graph is None: + + return super().execute_model( + model_input, + kv_caches, + intermediate_tensors=intermediate_tensors, + num_steps=num_steps, + ) + + if num_steps > 1: + raise ValueError("num_steps > 1 is not supported in ModelRunner") + + if self.lora_config: + assert model_input.lora_requests is not None + assert model_input.lora_mapping is not None + self.set_active_loras(model_input.lora_requests, model_input.lora_mapping) + + if self.prompt_adapter_config: + assert model_input.prompt_adapter_requests is not None + assert model_input.prompt_adapter_mapping is not None + self.set_active_prompt_adapters( + model_input.prompt_adapter_requests, model_input.prompt_adapter_mapping + ) + + self.attn_state.begin_forward(model_input) + + # Currently cuda graph is only supported by the decode phase. + assert model_input.attn_metadata is not None + prefill_meta = model_input.attn_metadata.prefill_metadata + decode_meta = model_input.attn_metadata.decode_metadata + # TODO(andoorve): We can remove this once all + # virtual engines share the same kv cache. + virtual_engine = model_input.virtual_engine + if prefill_meta is None and decode_meta.use_cuda_graph: + assert model_input.input_tokens is not None + graph_batch_size = model_input.input_tokens.shape[0] + model_executable = self.graph_runners[virtual_engine][graph_batch_size] + else: + model_executable = self.model + + multi_modal_kwargs = model_input.multi_modal_kwargs or {} + seqlen_agnostic_kwargs = ( + { + "finished_requests_ids": model_input.finished_requests_ids, + "request_ids_to_seq_ids": model_input.request_ids_to_seq_ids, + } + if self.has_inner_state + else {} + ) + if ( + self.observability_config is not None + and self.observability_config.collect_model_forward_time + ): + model_forward_start = torch.cuda.Event(enable_timing=True) + model_forward_end = torch.cuda.Event(enable_timing=True) + model_forward_start.record() + + ## NNSIGHT ######################################### + + intervention_graph = model_input.sampling_metadata.intervention_graph + + intervention_graph.set(self.model) + + batch_groups = model_input.sampling_metadata.batch_groups + + interleaver = Interleaver( + intervention_graph, batch_groups=batch_groups, batch_size=len(model_input.input_tokens) + ) + + def inner(): + + nonlocal interleaver + + with set_forward_context(model_input.attn_metadata): + hidden_or_intermediate_states = self.model._model( + input_ids=model_input.input_tokens, + positions=model_input.input_positions, + kv_caches=kv_caches, + attn_metadata=model_input.attn_metadata, + intermediate_tensors=intermediate_tensors, + **MultiModalInputs.as_kwargs( + multi_modal_kwargs, device=self.device + ), + **seqlen_agnostic_kwargs, + ) + + if ( + self.observability_config is not None + and self.observability_config.collect_model_forward_time + ): + model_forward_end.record() + + # Compute the logits in the last pipeline stage. + if not get_pp_group().is_last_rank: + if ( + self.is_driver_worker + and hidden_or_intermediate_states is not None + and isinstance(hidden_or_intermediate_states, IntermediateTensors) + and self.observability_config is not None + and self.observability_config.collect_model_forward_time + ): + model_forward_end.synchronize() + model_forward_time = model_forward_start.elapsed_time( + model_forward_end + ) + orig_model_forward_time = 0.0 + if intermediate_tensors is not None: + orig_model_forward_time = intermediate_tensors.tensors.get( + "model_forward_time", torch.tensor(0.0) + ).item() + hidden_or_intermediate_states.tensors["model_forward_time"] = ( + torch.tensor(model_forward_time + orig_model_forward_time) + ) + return hidden_or_intermediate_states + + logits = self.model.compute_logits( + hidden_or_intermediate_states, model_input.sampling_metadata + ) + + # patching the batch_size to be the number of logits, + # since vLLM optimizes the inference by turning the size of the input to be of size power of 2. + patches = [Patch(interleaver, logits.shape[0], "batch_size")] + + # `batch_groups` is adapted to the token positions of the flattened input during the first token generation iteration + # since the logit and sample tensors have different number of tokens, + # we need to patch `batch_groups` to reflect the correct batches specified by the invoker contexts defined by the user. + if model_input.sampling_metadata.seq_groups[0].is_prompt: + patches.append(Patch(interleaver, model_input.sampling_metadata.nns_batch_groups, "batch_groups")) + + with Patcher(patches): + logits = self.model.logits(logits) + + if not self.is_driver_worker: + return [] + + if model_input.async_callback is not None: + model_input.async_callback() + + # Sample the next token. + output: SamplerOutput = self.model.sample( + logits=logits, + sampling_metadata=model_input.sampling_metadata, + ) + + og_sample_tokens = torch.tensor([token.samples[0].output_token for token in output.outputs]) + + with Patcher(patches): + sample_tokens = self.model.samples(og_sample_tokens) + + # inject any changes to the sampled tokens + for idx, seq_out in enumerate(output.outputs): + sample = seq_out.samples[0] + sample.output_token = sample_tokens[idx].item() + logprob = sample.logprobs.pop(og_sample_tokens[idx].item()) + sample.logprobs[sample_tokens[idx].item()] = logprob + + if ( + self.observability_config is not None + and self.observability_config.collect_model_forward_time + and output is not None + ): + model_forward_end.synchronize() + model_forward_time = model_forward_start.elapsed_time(model_forward_end) + orig_model_forward_time = 0.0 + if intermediate_tensors is not None: + orig_model_forward_time = intermediate_tensors.tensors.get( + "model_forward_time", torch.tensor(0.0) + ).item() + # If there are multiple workers, we are still tracking the latency + # from the start time of the driver worker to the end time of the + # driver worker. The model forward time will then end up covering + # the communication time as well. + output.model_forward_time = orig_model_forward_time + model_forward_time + + if self.return_hidden_states: + # we only need to pass hidden states of most recent token + assert model_input.sampling_metadata is not None + indices = model_input.sampling_metadata.selected_token_indices + if model_input.is_prompt: + hidden_states = hidden_or_intermediate_states.index_select( + 0, indices + ) + output.prefill_hidden_states = hidden_or_intermediate_states + elif decode_meta.use_cuda_graph: + hidden_states = hidden_or_intermediate_states[: len(indices)] + else: + hidden_states = hidden_or_intermediate_states + + output.hidden_states = hidden_states + + return output + + def parallel_intervene(intervene_func: Callable) -> Callable: + """ Create an intervene wrapper that handles tensor parallelism execution of vLLM models. + + Args: + intervene_func (Callable): intervention function. + + Returns + """ + + @wraps(intervene_func) + def parallel_intervene_wrapper( + activations: Any, + module_path: str, + module: torch.nn.Module, + key: str, + interleaver: Interleaver + ) -> Any: + """ InterventionProtocol.intervene wrapper handling the parallelized modules of vLLM. + If some activations were parallelized, then they need to be gathered as a full tensor to intervene on them, + and then split again before returning them. + + Args: + activations (Any): Either the inputs or outputs of a torch module. + module_path (str): Module path of the current relevant module relative to the root model. + module (torch.nn.Module): Module to be intervened on. + key (str): Key denoting either "input" or "output" of module. + interleaver (Interleaver): Handler object that stores the intervention graph and keeps track of module call count. + + Returns: + Any: The activations, potentially modified by the intervention graph. + """ + # If the activations are parallelized, they must be gathered before intervening on them + if isinstance(module, ColumnParallelLinear) and key == "output" and not module.gather_output: + full_tensor = tensor_model_parallel_all_gather(activations[0]) + activations = (full_tensor, ) + activations[1:] + if isinstance(module, RowParallelLinear) and key == "input" and module.input_is_parallel: + full_tensor = tensor_model_parallel_all_gather(activations[0][0]) + activations = ((full_tensor,) + activations[0][1:], ) + activations[1:] + + activations = intervene_func(activations, module_path, module, key, interleaver) + + # If the activations were parallelized originally, they must be split again before returning them + if isinstance(module, ColumnParallelLinear) and key == "output" and not module.gather_output: + tp_rank = get_tensor_model_parallel_rank() + splitted_input = split_tensor_along_last_dim(activations[0], num_partitions=get_tensor_model_parallel_world_size()) + activations = (splitted_input[tp_rank].contiguous(),) + activations[1:] + if isinstance(module, RowParallelLinear) and key == "input" and module.input_is_parallel: + tp_rank = get_tensor_model_parallel_rank() + splitted_input = split_tensor_along_last_dim(activations[0][0], num_partitions=get_tensor_model_parallel_world_size()) + activations = ((splitted_input[tp_rank].contiguous(),) + activations[0][1:],) + activations[1:] + + return activations + + return parallel_intervene_wrapper + + if get_tensor_model_parallel_world_size() > 1: + intervene_patch = Patch(InterventionProtocol, parallel_intervene(InterventionProtocol.intervene), "intervene") + else: + intervene_patch = Patch(InterventionProtocol, InterventionProtocol.intervene, "intervene") + + with Patcher([intervene_patch]): + output = NNsight.interleave( + self.model, + fn=inner, + interleaver=interleaver, + ) + + return [output] diff --git a/src/nnsight/toolbox/interventions/__init__.py b/src/nnsight/modeling/vllm/model_runners/__init__.py old mode 100644 new mode 100755 similarity index 100% rename from src/nnsight/toolbox/interventions/__init__.py rename to src/nnsight/modeling/vllm/model_runners/__init__.py diff --git a/src/nnsight/modeling/vllm/sampling.py b/src/nnsight/modeling/vllm/sampling.py new file mode 100755 index 00000000..5dcf63fa --- /dev/null +++ b/src/nnsight/modeling/vllm/sampling.py @@ -0,0 +1,170 @@ +import copy +from typing import Dict, List, Optional, Tuple + +from nnsight.intervention.graph import InterventionGraph +import torch + +from vllm.model_executor.sampling_metadata import ( + SamplingMetadata, + SamplingMetadataCache, + _prepare_seq_groups, +) +from vllm.sampling_params import SamplingParams +from vllm.sequence import SequenceGroupMetadata +from vllm.utils import async_tensor_h2d + + +class NNsightSamplingParams(SamplingParams): + + intervention_graph: Optional[InterventionGraph] = None + nns_batch_groups: Optional[List[Tuple[int, int]]] = None + invoker_group: Optional[int] = None + is_default_param: bool = True + + def clone(self) -> "SamplingParams": + """Deep copy excluding LogitsProcessor objects. + + LogitsProcessor objects are excluded because they may contain an + arbitrary, nontrivial amount of data. + See https://github.com/vllm-project/vllm/issues/3087 + """ + + memo = {} + + if self.logits_processors is not None: + for lp in self.logits_processors: + memo[id(lp)] = lp + + if self.intervention_graph is not None: + memo[id(self.intervention_graph)] = self.intervention_graph + + return copy.deepcopy(self, memo=memo) + + +class NNsightSamplingMetadata(SamplingMetadata): + + intervention_graph: Optional[InterventionGraph] = None + nns_batch_groups: Optional[List[Tuple[int, int]]] = None + batch_groups: Optional[List[Tuple[int, int]]] = None + + def __init__( + self, + *args, + intervention_graph: InterventionGraph = None, + nns_batch_groups: List[Tuple[int, int]] = None, + batch_groups: Dict[int, Tuple[int, int]] = None, + **kwargs, + ): + + super().__init__(*args, **kwargs) + + self.intervention_graph = intervention_graph + self.nns_batch_groups = nns_batch_groups + self.batch_groups = batch_groups + + @staticmethod + def prepare( + seq_group_metadata_list: List[SequenceGroupMetadata], + seq_lens: List[int], + query_lens: List[int], + device: str, + pin_memory: bool, + generators: Optional[Dict[str, torch.Generator]] = None, + cache: Optional[SamplingMetadataCache] = None, + ) -> "SamplingMetadata": + ( + seq_groups, + selected_token_indices, + categorized_sample_indices, + num_prompts, + ) = _prepare_seq_groups( + seq_group_metadata_list, seq_lens, query_lens, device, generators, cache + ) + selected_token_indices = async_tensor_h2d( + selected_token_indices, + dtype=torch.long, + target_device=device, + pin_memory=pin_memory, + ) + categorized_sample_indices = { + t: async_tensor_h2d( + seq_ids, + dtype=torch.int, + target_device=device, + pin_memory=pin_memory, + ) + for t, seq_ids in categorized_sample_indices.items() + } + + + ### NNSIGHT ########################################### + + intervention_graphs = [] + nns_batch_groups = [] + batch_groups = [] + batch_groups_offset = 0 + + for idx, seq_group in enumerate(seq_group_metadata_list): + + if isinstance(seq_group.sampling_params, NNsightSamplingParams): + + seq_group_intervention_graph = ( + seq_group.sampling_params.intervention_graph + ) + + seq_group_nns_batch_groups = seq_group.sampling_params.nns_batch_groups + + if isinstance(seq_group_intervention_graph, InterventionGraph): + + if seq_group_intervention_graph not in intervention_graphs: + + intervention_graphs.append(seq_group_intervention_graph) + + nns_batch_groups.append(seq_group_nns_batch_groups) + + batch_groups_offset = len(batch_groups) + + seq_group_batch_group = ( + seq_group.sampling_params.invoker_group + batch_groups_offset + ) + + batch_size = query_lens[idx] + + if seq_group_batch_group >= len(batch_groups): + batch_start = sum(batch_groups[-1]) if len(batch_groups) > 0 else 0 + batch_groups.append((batch_start, batch_size)) + else: + batch_start, seq_group_batch_size = batch_groups[ + seq_group_batch_group + ] + batch_size += seq_group_batch_size + + batch_groups[seq_group_batch_group] = (batch_start, batch_size) + + n_graphs = len(intervention_graphs) + + if n_graphs== 0: + intervention_graph = None + nns_batch_groups = None + elif n_graphs == 1: + intervention_graph =intervention_graphs[0] + nns_batch_groups = nns_batch_groups[0] + + """ else: + intervention_graph = MultiGraph(intervention_graphs.values()) + + InterventionProtocol.shift(intervention_graph) """ + + ########################################### + + sampling_metadata = NNsightSamplingMetadata( + seq_groups=seq_groups, + selected_token_indices=selected_token_indices, + categorized_sample_indices=categorized_sample_indices, + num_prompts=num_prompts, + intervention_graph=intervention_graph, + nns_batch_groups = nns_batch_groups, + batch_groups=batch_groups, + ) + + return sampling_metadata diff --git a/src/nnsight/modeling/vllm/vllm.py b/src/nnsight/modeling/vllm/vllm.py new file mode 100755 index 00000000..005b356f --- /dev/null +++ b/src/nnsight/modeling/vllm/vllm.py @@ -0,0 +1,274 @@ +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Union, Optional + +from vllm.transformers_utils.tokenizer_group import init_tokenizer_from_configs + +from ...util import WrapperModule +from ..mixins import RemoteableMixin +from .executors.GPUExecutor import NNsightGPUExecutor +from .executors.RayGPUExecutor import NNsightRayGPUExecutor +from .sampling import NNsightSamplingParams +from dataclasses import fields +from ...intervention.interleaver import Interleaver +from ...intervention import Envoy + +if TYPE_CHECKING: + from ...intervention.graph import InterventionGraph + from torch.nn import Module + from vllm.transformers_utils.tokenizer import AnyTokenizer + from vllm.config import ModelConfig, SchedulerConfig, ParallelConfig + +try: + from vllm.distributed import (destroy_distributed_environment, + destroy_model_parallel, + init_distributed_environment, + initialize_model_parallel) + from vllm.engine.arg_utils import EngineArgs + from vllm.entrypoints.llm import LLM + from vllm.model_executor.model_loader.loader import _initialize_model +except Exception as e: + raise type(e)( + "Install vllm in your environment to use it with NNsight. " + + "https://docs.vllm.ai/en/latest/getting_started/installation.html" + ) from e + + + +class VLLM(RemoteableMixin): + """NNsight wrapper to conduct interventions on a vLLM inference engine.\ + + Attributes: + - vllm_entrypoint (vllm.LLM): vLLM language model. + - tokenizer (vllm.transformers_utils.tokenizer.AnyTokenizer): tokenizer. + - logits (nnsight.WrapperModule): logits. + - samples (nnsight.WrapperModule): sampled tokens. + + .. code-block:: python + from nnsight.models.VLLM import VLLM + from vllm import SamplingParams + + model = VLLM("gpt2") + + prompt = ["The Eiffel Tower is in the city of"] + + with model.trace(prompt, temperature=0.0, top_p=0.95, stop=['.']) as tracer: + model.transformer.h[8].output[-1][:] = 0 + + output = model.output.save() + + print(model.tokenizer.decode(output.value.argmax(dim=-1)[-1])) + """ + + __methods__ = {"generate": "_execute"} + + def __init__(self, *args, **kwargs) -> None: + + self.vllm_entrypoint: LLM = None + self.tokenizer: "AnyTokenizer" = None + + super().__init__(*args, **kwargs) + + self.logits: Envoy = WrapperModule() + self.samples: Envoy = WrapperModule() + + def _load_meta(self, repo_id: str, **kwargs) -> "Module": + + # no parallelism during initialization + kwargs["tensor_parallel_size"] = 1 + kwargs["pipeline_parallel_size"] = 1 + + # creating vLLM Engine args + engine_args = EngineArgs( + model=repo_id, + **kwargs, + ) + + # creating the vllm engine configuration + vllm_config = engine_args.create_engine_config() + vllm_config_dict = {field.name: getattr(vllm_config, field.name) for field in fields(type(vllm_config))} + + # starting the distributed environment + init_distributed_environment( + 1, + 0, + "tcp://127.0.0.1:47303", + 0, + backend="gloo", + ) + + # start tensor parallel group + initialize_model_parallel(backend="gloo") + + # initialize the model + + model = _initialize_model(vllm_config) + + # load the tokenzier + self.tokenizer = self._load_tokenizer( + model_config=vllm_config_dict["model_config"], + scheduler_config=vllm_config_dict["scheduler_config"], + parallel_config=vllm_config_dict["parallel_config"], + enable_lora=bool(vllm_config_dict["lora_config"]), + ) + + return model + + def _load_tokenizer( + self, + model_config: "ModelConfig", + scheduler_config: "SchedulerConfig", + parallel_config: "ParallelConfig", + enable_lora: bool) -> "AnyTokenizer": + + return init_tokenizer_from_configs( + model_config=model_config, + scheduler_config=scheduler_config, + parallel_config=parallel_config, + enable_lora=enable_lora, + ).tokenizer + + def _load(self, repo_id: str, **kwargs) -> "Module": + + destroy_model_parallel() + destroy_distributed_environment() + + distributed_executor_backend = NNsightGPUExecutor + if ( + "tensor_parallel_size" in kwargs.keys() + and kwargs["tensor_parallel_size"] > 1 + ): + distributed_executor_backend = NNsightRayGPUExecutor + + llm = LLM( + repo_id, + **kwargs, + distributed_executor_backend=distributed_executor_backend, + ) + + self.vllm_entrypoint = llm + + # load the tokenizer + self.tokenizer = self._load_tokenizer( + model_config=llm.llm_engine.model_config, + scheduler_config=llm.llm_engine.scheduler_config, + parallel_config=llm.llm_engine.parallel_config, + enable_lora=bool(llm.llm_engine.lora_config), + ) + + if kwargs.get("tensor_parallel_size", 1) > 1: + return llm.llm_engine.model_executor.driver_worker.worker.model_runner.model + else: + return llm.llm_engine.model_executor.driver_worker.model_runner.model + + def _prepare_input( + self, *args, **kwargs + ) -> Tuple[Tuple[Tuple[Any], Dict[str, Any]], int]: + + if "processed" in kwargs: + return (args, kwargs), len(args[0]) + + prompts = [] + params = [] + + for arg in args: + + if not type(arg) is list: + arg = [arg] + + for prompt in arg: + + param = NNsightSamplingParams( + **kwargs, + ) + + if kwargs != {}: + param.is_default_param = False + + prompts.append(prompt) + params.append(param) + + return ((prompts, params), {"processed": True}), len(prompts) + + def _batch( + self, + batched_inputs: Tuple[Tuple[Any] | Dict[str, Any]] | None, + prompts: List[str], + params: List[NNsightSamplingParams], + **kwargs, + ) -> Tuple[Tuple[Any] | Dict[str, Any]]: + + if batched_inputs is None: + batched_inputs = ([], []), {"invoker_group": 0} + + (bprompts, bparams), kwargs = batched_inputs + + invoker_group = kwargs["invoker_group"] + + for prompt in prompts: + bprompts.append(prompt) + + for param in params: + + param.invoker_group = invoker_group + + bparams.append(param) + + kwargs["invoker_group"] += 1 + + return (bprompts, bparams), kwargs + + def interleave( + self, + interleaver: Interleaver, + *args, + fn: Optional[Union[Callable, str]] = None, + **kwargs, + ) -> Any: + + """ if not self.dispatched: + self.dispatch() + + for param in params: + + param.intervention_graph = intervention_graph + + fn(prompts, params, **kwargs) + + intervention_graph.alive = False """ + + + if not self.dispatched: + self.dispatch() + + for param in args[1]: + + param.intervention_graph = interleaver.graph + param.nns_batch_groups = interleaver.batch_groups + + if fn is None: + fn = self._execute + elif isinstance(fn, str): + fn = getattr(self, fn) + + return fn(*args, **kwargs) + + def _execute( + self, + prompts: List[str], + params: List[NNsightSamplingParams], + **kwargs, + ) -> Any: + + kwargs.pop('invoker_group') + + for param in params: + if param.is_default_param: + for attr, value in kwargs.items(): + if hasattr(NNsightSamplingParams, attr): + setattr(param, attr, value) + + self.vllm_entrypoint.generate(prompts, sampling_params=params) + +if TYPE_CHECKING: + + class VLLM(VLLM,LLM): + pass diff --git a/src/nnsight/modeling/vllm/workers/GPUWorker.py b/src/nnsight/modeling/vllm/workers/GPUWorker.py new file mode 100755 index 00000000..77160380 --- /dev/null +++ b/src/nnsight/modeling/vllm/workers/GPUWorker.py @@ -0,0 +1,10 @@ +from vllm.worker.worker import Worker + +from ..model_runners.GPUModelRunner import NNsightGPUModelRunner + + +class NNsightGPUWorker(Worker): + + def __init__(self, *args, **kwargs): + + super().__init__(*args, model_runner_cls=NNsightGPUModelRunner, **kwargs) diff --git a/src/nnsight/toolbox/.ipynb_checkpoints/__init__-checkpoint.py b/src/nnsight/modeling/vllm/workers/__init__.py old mode 100644 new mode 100755 similarity index 100% rename from src/nnsight/toolbox/.ipynb_checkpoints/__init__-checkpoint.py rename to src/nnsight/modeling/vllm/workers/__init__.py diff --git a/src/nnsight/models/Mamba.py b/src/nnsight/models/Mamba.py deleted file mode 100755 index a4fe100e..00000000 --- a/src/nnsight/models/Mamba.py +++ /dev/null @@ -1,344 +0,0 @@ -from __future__ import annotations - -from typing import Any, Callable, Dict, List, Tuple, Union - -import accelerate -import causal_conv1d_cuda -import mamba_ssm -import selective_scan_cuda -import torch -import torch.nn.functional as F -from einops import rearrange, repeat -from mamba_ssm import MambaLMHeadModel -from mamba_ssm.models.config_mamba import MambaConfig -from mamba_ssm.utils.hf import load_config_hf, load_state_dict_hf -from transformers import AutoTokenizer, BatchEncoding, PreTrainedModel - -from nnsight.util import WrapperModule - -from ..patching import Patch, Patcher -from .LanguageModel import LanguageModel - -from torch._guards import detect_fake_mode - -class Mamba(LanguageModel): - - def _load(self, repo_id: str, device='cpu', **kwargs) -> PreTrainedModel: - - config = MambaConfig(**load_config_hf(repo_id)) - - if self.tokenizer is None: - - self.tokenizer = AutoTokenizer.from_pretrained( - repo_id, config=config, padding_side="left" - ) - self.tokenizer.pad_token = self.tokenizer.eos_token - - if self._model is None: - - model = MambaLMHeadModel(config, device="meta", dtype=None, **kwargs) - - setattr(model, 'generator', WrapperModule()) - - return model - - model = MambaLMHeadModel(config, device=device, **kwargs) - model.load_state_dict(load_state_dict_hf(repo_id, device=device, **kwargs)) - - setattr(model, 'generator', WrapperModule()) - - return model - - def _execute_forward(self, prepared_inputs: Any, *args, **kwargs): - - device = next(self._model.parameters()).device - - patcher = None - - with Patcher() as patcher: - - if detect_fake_mode(prepared_inputs): - - def blah(hs, *args, residual=None, **kwargs): - return hs, residual or torch.rand_like(hs) - - def blah1(hs, *args, **kwargs): - return hs - - def blah2(hs, *args, **kwargs): - return hs - - def blah3(conv1d_out, delta, A, B, C, D, z, delta_bias, delta_softplus): - return ( - conv1d_out, - torch.zeros((*conv1d_out.shape, A.shape[1] * 2), device="meta"), - conv1d_out, - ) - - - patcher.add(Patch(mamba_ssm.modules.mamba_simple, blah, "rms_norm_fn")) - patcher.add( - Patch(mamba_ssm.models.mixer_seq_simple, blah1, "rms_norm_fn") - ) - patcher.add(Patch(causal_conv1d_cuda, blah2, "causal_conv1d_fwd")) - patcher.add(Patch(selective_scan_cuda, blah3, "fwd")) - - - return self._model( - prepared_inputs["input_ids"].to(device), - *args, - **kwargs, - ) - - def _execute_generate( - self, prepared_inputs: Any, *args, max_length=1, **kwargs - ): - - device = next(self._model.parameters()).device - - patcher = None - - with Patcher() as patcher: - - if detect_fake_mode(prepared_inputs): - - def blah(hs, *args, residual=None, **kwargs): - return hs, residual or torch.rand_like(hs) - - def blah1(hs, *args, **kwargs): - return hs - - def blah2(hs, *args, **kwargs): - return hs - - def blah3(conv1d_out, delta, A, B, C, D, z, delta_bias, delta_softplus): - return ( - conv1d_out, - torch.zeros((*conv1d_out.shape, A.shape[1] * 2), device="meta"), - conv1d_out, - ) - - - patcher.add(Patch(mamba_ssm.modules.mamba_simple, blah, "rms_norm_fn")) - patcher.add( - Patch(mamba_ssm.models.mixer_seq_simple, blah1, "rms_norm_fn") - ) - patcher.add(Patch(causal_conv1d_cuda, blah2, "causal_conv1d_fwd")) - patcher.add(Patch(selective_scan_cuda, blah3, "fwd")) - - output = self._model.generate( - prepared_inputs["input_ids"].to(device), - *args, - max_length=max_length, - **kwargs, - ) - - self._model.generator(output) - - return output - -class SSM(torch.nn.Module): - class DiscA(torch.nn.Module): - def forward(self, delta, A): - return torch.exp(torch.einsum("bdl,dn->bdln", delta, A)) - - class DiscB(torch.nn.Module): - def forward(self, delta, B): - return torch.einsum("bdl,bnl->bdln", delta, B) - - class Hx(torch.nn.Module): - class Bx(torch.nn.Module): - def forward(self, deltaB: torch.Tensor, x: torch.Tensor): - return torch.einsum("bdn,bd->bdn", deltaB, x) - - class Ah(torch.nn.Module): - def forward(self, deltaA: torch.Tensor, h: torch.Tensor): - return deltaA * h - - def __init__(self, *args, **kwargs) -> None: - super().__init__(*args, **kwargs) - - self.bx = SSM.Hx.Bx() - self.ah = SSM.Hx.Ah() - - def forward( - self, - deltaA: torch.Tensor, - deltaB: torch.Tensor, - x: torch.Tensor, - h: torch.Tensor, - ): - return self.ah(deltaA, h) + self.bx(deltaB, x) - - class Yh(torch.nn.Module): - def forward(self, h, C): - y = torch.einsum("bdn,bn->bd", h, C) - - if y.is_complex(): - y = y.real * 2 - - return y - - def __init__(self): - super().__init__() - - self.discA = SSM.DiscA() - self.discB = SSM.DiscB() - self.hx = SSM.Hx() - self.yh = SSM.Yh() - - def forward(self, x, delta, A, B, C, D=None, z=None, return_last_state=False): - dtype_in = x.dtype - - x = x.float() - delta = delta.float() - - batch, dim, dstate = x.shape[0], A.shape[0], A.shape[1] - - if A.is_complex(): - B = torch.view_as_complex( - rearrange(B.float(), "... (L two) -> ... L two", two=2) - ) - C = torch.view_as_complex( - rearrange(C.float(), "... (L two) -> ... L two", two=2) - ) - else: - B = B.float() - C = C.float() - - deltaA = self.discA(delta, A) - - deltaB = self.discB(delta, B) - - last_state = None - - h = A.new_zeros((batch, dim, dstate)) - - ys = [] - - # Main recurrence loop - for token_idx in range(x.shape[2]): - h = self.hx( - deltaA[:, :, token_idx], deltaB[:, :, token_idx], x[:, :, token_idx], h - ) - - y = self.yh(h, C[:, :, token_idx]) - - if token_idx == x.shape[2] - 1: - last_state = h - - ys.append(y) - - y = torch.stack(ys, dim=2) # (batch dim L) - - out = y if D is None else y + x * rearrange(D, "d -> d 1") - - if z is not None: - out = out * F.silu(z) - - out = out.to(dtype=dtype_in) - - return out if not return_last_state else (out, last_state) - - -class MambaModuleInterp(mamba_ssm.modules.mamba_simple.Mamba): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self.dt = WrapperModule() - self.B = WrapperModule() - self.C = WrapperModule() - - self.ssm = SSM() - - self.delta_softplus = torch.nn.Softplus() - - def forward(self, hidden_states, inference_params=None): - """ - hidden_states: (B, L, D) - Returns: same shape as hidden_states - """ - batch, seqlen, dim = hidden_states.shape - - conv_state, ssm_state = None, None - if inference_params is not None: - conv_state, ssm_state = self._get_states_from_cache(inference_params, batch) - if inference_params.seqlen_offset > 0: - # The states are updated inplace - out, _, _ = self.step(hidden_states, conv_state, ssm_state) - return out - - # We do matmul and transpose BLH -> HBL at the same time - xz = rearrange( - self.in_proj(hidden_states), - "b l d -> b d l", - l=seqlen, - ) - - A = -torch.exp(self.A_log.float()) # (d_inner, d_state) - - # In the backward pass we write dx and dz next to each other to avoid torch.cat - x, z = xz.chunk(2, dim=1) - - # Compute short convolution - if conv_state is not None: - # If we just take x[:, :, -self.d_conv :], it will error if seqlen < self.d_conv - # Instead F.pad will pad with zeros if seqlen < self.d_conv, and truncate otherwise. - conv_state.copy_( - F.pad(x, (self.d_conv - x.shape[-1], 0)) - ) # Update state (B D W) - - x = self.act(self.conv1d(x)[..., :seqlen]) - - x_dbl = self.x_proj(rearrange(x, "b d l -> (b l) d")) # (bl d) - dt, B, C = torch.split( - x_dbl, [self.dt_rank, self.d_state, self.d_state], dim=-1 - ) - dt = self.dt_proj(dt).t() - - dt = rearrange(dt, "d (b l) -> b d l", l=seqlen) - B = rearrange(B, "(b l) dstate -> b dstate l", l=seqlen).contiguous() - C = rearrange(C, "(b l) dstate -> b dstate l", l=seqlen).contiguous() - - dt = self.dt(dt) - B = self.B(B) - C = self.C(C) - - dt = self.delta_softplus(dt) - - y = self.ssm( - x, - dt, - A, - B, - C, - self.D.float(), - z=z, - return_last_state=ssm_state is not None, - ) - if ssm_state is not None: - y, last_state = y - ssm_state.copy_(last_state) - - y = rearrange(y, "b d l -> b l d") - - out = self.out_proj(y) - - return out - - -class MambaInterp(Mamba): - def __init__(self, *args, **kwargs): - patcher = Patcher() - - patcher.add( - Patch( - mamba_ssm.models.mixer_seq_simple, - MambaModuleInterp, - "Mamba", - ) - ) - - patcher.__enter__() - - super().__init__(*args, **kwargs) diff --git a/src/nnsight/models/mixins/Generation.py b/src/nnsight/models/mixins/Generation.py deleted file mode 100644 index 91df0733..00000000 --- a/src/nnsight/models/mixins/Generation.py +++ /dev/null @@ -1,46 +0,0 @@ -from typing import Any - -from ... import NNsight -from ...contexts.Tracer import Tracer - -class GenerationMixin(NNsight): - - def generate(self, *args, **kwargs) -> Tracer: - - return self.trace(*args, generate=True, **kwargs) - - def _execute( - self, *args, generate: bool = False, **kwargs - ) -> Any: - - if generate: - - return self._execute_generate(*args, **kwargs) - - return self._execute_forward(*args, **kwargs) - - def _scan( - self, prepared_inputs: Any, *args, generate: bool = False, **kwargs - ) -> Any: - - if generate: - - return self._scan_generate(prepared_inputs, *args, **kwargs) - - return self._scan_forward(prepared_inputs, *args, **kwargs) - - def _execute_forward(self, prepared_inputs: Any, *args, **kwargs): - - raise NotImplementedError() - - def _execute_generate(self, prepared_inputs: Any, *args, **kwargs): - - raise NotImplementedError() - - def _scan_forward(self, prepared_inputs: Any, *args, **kwargs): - - raise NotImplementedError() - - def _scan_generate(self, prepared_inputs: Any, *args, **kwargs): - - raise NotImplementedError() diff --git a/src/nnsight/models/mixins/Remoteable.py b/src/nnsight/models/mixins/Remoteable.py deleted file mode 100755 index a3acdea8..00000000 --- a/src/nnsight/models/mixins/Remoteable.py +++ /dev/null @@ -1,28 +0,0 @@ -from typing_extensions import Self - -from ... import NNsight -from ...util import from_import_path, to_import_path - - -class RemoteableMixin(NNsight): - - def _remoteable_model_key(self) -> str: - - raise NotImplementedError() - - @classmethod - def _remoteable_from_model_key(cls, model_key: str) -> Self: - raise NotImplementedError() - - def to_model_key(self): - - return f"{to_import_path(type(self))}:{self._remoteable_model_key()}" - - @classmethod - def from_model_key(cls, model_key: str, **kwargs) -> Self: - - import_path, model_key = model_key.split(":", 1) - - type: RemoteableMixin = from_import_path(import_path) - - return type._remoteable_from_model_key(model_key, **kwargs) diff --git a/src/nnsight/models/mixins/__init__.py b/src/nnsight/models/mixins/__init__.py deleted file mode 100644 index 99f7f00c..00000000 --- a/src/nnsight/models/mixins/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .Generation import GenerationMixin -from .Remoteable import RemoteableMixin \ No newline at end of file diff --git a/src/nnsight/module.py b/src/nnsight/module.py deleted file mode 100755 index fc3a35f3..00000000 --- a/src/nnsight/module.py +++ /dev/null @@ -1,24 +0,0 @@ -from typing import Iterator - -import torch -from torch.nn.parameter import Parameter -from typing_extensions import Self - -from .tracing.Proxy import Proxy - - -class Module(torch.nn.Module): - - def save(self) -> Self: - - [param.save() for param in self.parameters()] - - return self - - def parameters(self, recurse: bool = True) -> Iterator[Parameter]: - return [ - param - for param in self.__dict__.values() - if isinstance(param, Proxy) - and param.node.target is torch.nn.parameter.Parameter - ] diff --git a/src/nnsight/nnsight.log b/src/nnsight/nnsight.log index e69de29b..55bd99e2 100644 --- a/src/nnsight/nnsight.log +++ b/src/nnsight/nnsight.log @@ -0,0 +1,7 @@ +2024-10-11 18:39:52,036 MainProcess nnsight INFO => SET(InterventionProtocol_0) +2024-10-11 18:41:28,137 MainProcess nnsight INFO => SET(InterventionProtocol_0) +2024-10-11 18:44:15,969 MainProcess nnsight INFO => SET(None) +2024-10-11 18:44:15,970 MainProcess nnsight INFO => DEL(None) +2024-10-11 18:45:19,831 MainProcess nnsight INFO => SET(InterventionProtocol_0) +2024-10-11 18:50:56,360 MainProcess nnsight INFO => SET(InterventionProtocol_0) +2024-10-11 18:51:59,171 MainProcess nnsight INFO => SET(InterventionProtocol_0) diff --git a/src/nnsight/patching.py b/src/nnsight/patching.py deleted file mode 100644 index 263a187b..00000000 --- a/src/nnsight/patching.py +++ /dev/null @@ -1,71 +0,0 @@ -"""The patching module handles patching of classes and functions in modules.""" -from __future__ import annotations - -import importlib -import types -from contextlib import AbstractContextManager -from typing import Any, List, Optional - -from . import util - - -class Patch: - """Class representing a replacement of an attribute on a module. - - Attributes: - obj (Any): Object to replace. - replacement (Any): Object that replaces. - parent (Any): Module or class to replace attribute. - """ - - def __init__(self, parent: Any, replacement: Any, key: str) -> None: - self.parent = parent - self.replacement = replacement - self.key = key - self.orig = getattr(self.parent, key) - - def patch(self) -> None: - """Carries out the replacement of an object in a module/class.""" - setattr(self.parent, self.key, self.replacement) - - def restore(self) -> None: - """Carries out the restoration of the original object on the objects module/class.""" - - setattr(self.parent, self.key, self.orig) - - -class Patcher(AbstractContextManager): - """Context manager that patches from a list of Patches on __enter__ and restores the patch on __exit__. - - Attributes: - patches (List[Patch]): - """ - - def __init__(self, patches: Optional[List[Patch]] = None) -> None: - self.patches = patches or [] - - def add(self, patch: Patch) -> None: - """Adds a Patch to the patches. Also calls `.patch()` on the Patch. - - Args: - patch (Patch): Patch to add. - """ - self.patches.append(patch) - - patch.patch() - - def __enter__(self) -> Patcher: - """Enters the patching context. Calls `.patch()` on all patches. - - Returns: - Patcher: Patcher - """ - for patch in self.patches: - patch.patch() - - return self - - def __exit__(self, exc_type, exc_val, exc_tb) -> None: - """Calls `.restore()` on all patches.""" - for patch in self.patches: - patch.restore() diff --git a/src/nnsight/schema/Response.py b/src/nnsight/schema/Response.py deleted file mode 100644 index c9a15527..00000000 --- a/src/nnsight/schema/Response.py +++ /dev/null @@ -1,56 +0,0 @@ -from __future__ import annotations - -import logging -from datetime import datetime -from enum import Enum -from typing import Any, Dict, List, Optional, Union - -import torch -from pydantic import BaseModel - -from .. import util -from ..tracing.Graph import Graph - - -class ResultModel(BaseModel): - id: str - value: Any = None - - @classmethod - def from_graph(cls, graph: Graph) -> Dict[str, Any]: - - saves = { - name: util.apply(node.value, lambda x: x.detach().cpu(), torch.Tensor) - for name, node in graph.nodes.items() - if node.done() - } - - return saves - - -class ResponseModel(BaseModel): - class JobStatus(Enum): - RECEIVED = "RECEIVED" - APPROVED = "APPROVED" - RUNNING = "RUNNING" - COMPLETED = "COMPLETED" - LOG = "LOG" - ERROR = "ERROR" - - id: str - status: JobStatus - description: str - - received: datetime = None - session_id: Optional[str] = None - - def __str__(self) -> str: - return f"{self.id} - {self.status.name}: {self.description}" - - def log(self, logger: logging.Logger) -> ResponseModel: - if self.status == ResponseModel.JobStatus.ERROR: - logger.error(str(self)) - else: - logger.info(str(self)) - - return self diff --git a/src/nnsight/schema/Config.py b/src/nnsight/schema/config.py similarity index 74% rename from src/nnsight/schema/Config.py rename to src/nnsight/schema/config.py index cc19676c..2e5a7c48 100755 --- a/src/nnsight/schema/Config.py +++ b/src/nnsight/schema/config.py @@ -7,14 +7,17 @@ class ApiConfigModel(BaseModel): HOST: str = "ndif.dev" SSL: bool = True + FORMAT:str ='json' + ZLIB:bool = True APIKEY: Optional[str] = None JOB_ID:Optional[str] = None - class AppConfigModel(BaseModel): LOGGING: bool = False REMOTE_LOGGING: bool = True - + DEBUG: bool = True + CONTROL_FLOW_HACKS:bool = True + FRAME_INJECTION:bool = True class ConfigModel(BaseModel): API: ApiConfigModel = ApiConfigModel() @@ -26,6 +29,12 @@ def set_default_api_key(self, apikey: str): self.save() + def set_default_app_debug(self, debug: bool): + + self.APP.DEBUG = debug + + self.save() + def save(self): from .. import PATH diff --git a/src/nnsight/schema/format/functions.py b/src/nnsight/schema/format/functions.py index 72283aa3..2143b552 100755 --- a/src/nnsight/schema/format/functions.py +++ b/src/nnsight/schema/format/functions.py @@ -1,15 +1,24 @@ import operator -from inspect import getmembers, isbuiltin, isfunction, ismethod, ismethoddescriptor, isclass +from inspect import ( + getmembers, + isbuiltin, + isclass, + isfunction, + ismethod, + ismethoddescriptor, +) from typing import Callable import einops import torch from torch.utils.data.dataloader import DataLoader -from ... import intervention, util +from ... import util +from ...intervention import protocols as intervention_protocols from ...tracing import protocols -from ...tracing.Proxy import Proxy - +from ...tracing.graph import Proxy +from ...tracing import contexts +from ...intervention import contexts as intervention_contexts def get_function_name(fn, module_name=None): if isinstance(fn, str): @@ -25,16 +34,18 @@ def get_function_name(fn, module_name=None): return f"{module_name}.{fn.__qualname__}" + def update_function(function: str | Callable, new_function: Callable): - + if not isinstance(function, str): - + function = get_function_name(function) - + new_function.__name__ = function - + FUNCTIONS_WHITELIST[function] = new_function + FUNCTIONS_WHITELIST = {} ### Torch functions @@ -107,7 +118,7 @@ def update_function(function: str | Callable, new_function: Callable): get_function_name(print): print, get_function_name(setattr): setattr, get_function_name(util.fetch_attr): util.fetch_attr, - get_function_name(Proxy.proxy_call): Proxy.proxy_call, + get_function_name(Proxy.call): Proxy.call, } ) @@ -123,7 +134,22 @@ def update_function(function: str | Callable, new_function: Callable): FUNCTIONS_WHITELIST.update( { get_function_name(protocol): protocol - for key, protocol in getmembers(intervention) + for key, protocol in getmembers(intervention_protocols) + if isinstance(protocol, type) and issubclass(protocol, protocols.Protocol) + } +) +FUNCTIONS_WHITELIST.update( + { + get_function_name(protocol): protocol + for key, protocol in getmembers(contexts) + if isinstance(protocol, type) and issubclass(protocol, protocols.Protocol) + } +) + +FUNCTIONS_WHITELIST.update( + { + get_function_name(protocol): protocol + for key, protocol in getmembers(intervention_contexts) if isinstance(protocol, type) and issubclass(protocol, protocols.Protocol) } ) diff --git a/src/nnsight/schema/format/types.py b/src/nnsight/schema/format/types.py old mode 100644 new mode 100755 index 245bf889..1a41a9aa --- a/src/nnsight/schema/format/types.py +++ b/src/nnsight/schema/format/types.py @@ -4,43 +4,48 @@ from types import BuiltinFunctionType from types import FunctionType as FuncType from types import MethodDescriptorType -from typing import Any, Dict, List, Literal, Optional, Union +from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Tuple, Union import torch -from pydantic import (BaseModel, ConfigDict, Field, Strict, field_validator, - model_serializer) -from pydantic.functional_validators import AfterValidator -from typing_extensions import Annotated - -from ...contexts.session.Iterator import Iterator -from ...contexts.session.Session import Session -from ...contexts.Tracer import Tracer -from ...models.NNsightModel import NNsight -from ...tracing import protocols -from ...tracing.Bridge import Bridge -from ...tracing.Graph import Graph -from ...tracing.Node import Node +from pydantic import ( + BaseModel, + ConfigDict, + Field, + PrivateAttr, + Strict, + ValidationError, + field_validator, + model_serializer, +) +from pydantic.functional_validators import AfterValidator, BeforeValidator +from typing_extensions import Annotated, Self + +from ...intervention.graph import InterventionGraph, InterventionNode +from ...tracing.graph import Graph, Node, SubGraph from . import FUNCTIONS_WHITELIST, get_function_name +if TYPE_CHECKING: + from ... import NNsight + +FUNCTION = Union[BuiltinFunctionType, FuncType, MethodDescriptorType, type] +PRIMITIVE = Union[int, float, str, bool, None] + class DeserializeHandler: def __init__( self, - graph: Graph = None, - nodes: Dict[str, Union[NodeModel, NodeType]] = None, - model: NNsight = None, - bridge: Bridge = None, + memo, + model: "NNsight" ) -> None: - self.graph = graph - self.nodes = nodes + self.memo = memo self.model = model - self.bridge = bridge + self.graph = Graph(node_class=InterventionNode) -FUNCTION = Union[BuiltinFunctionType, FuncType, MethodDescriptorType, type] -PRIMITIVE = Union[int, float, str, bool, None] + +MEMO = {} class BaseNNsightModel(BaseModel): @@ -48,85 +53,78 @@ class BaseNNsightModel(BaseModel): type_name: Literal["TYPE_NAME"] + @classmethod + def to_model(cls, value: Any) -> Self: + raise NotImplementedError() + def deserialize(self, handler: DeserializeHandler): raise NotImplementedError() + def try_deserialize(value: BaseNNsightModel | Any, handler: DeserializeHandler): - + if isinstance(value, BaseNNsightModel): - + return value.deserialize(handler) - + return value +def memoized(fn): + + def inner(value): + + model = fn(value) + + _id = id(value) + + MEMO[_id] = model + + return MemoReferenceModel(id=_id) + + return inner + + ### Custom Pydantic types for all supported base types class NodeModel(BaseNNsightModel): type_name: Literal["NODE"] = "NODE" - class Reference(BaseNNsightModel): - type_name: Literal["NODE_REFERENCE"] = "NODE_REFERENCE" + target: ValueTypes + args: List[ValueTypes] = [] + kwargs: Dict[str, ValueTypes] = {} - name: str + @staticmethod + @memoized + def to_model(value: Node) -> Self: - def deserialize(self, handler: DeserializeHandler) -> Node: - return handler.nodes[self.name].deserialize(handler) + return NodeModel(target=value.target, args=value.args, kwargs=value.kwargs) - name: str - target: Union[FunctionModel, FunctionType] - args: List[ValueTypes] = [] - kwargs: Dict[str, ValueTypes] = {} - condition: Union[ - NodeReferenceType, NodeModel.Reference, None - ] = None - - @model_serializer(mode='wrap') + @model_serializer(mode="wrap") def serialize_model(self, handler): - + dump = handler(self) - - if self.condition is None: - - dump.pop('condition') - + if not self.kwargs: - - dump.pop('kwargs') - - if not self.args: - - dump.pop('args') - - return dump - def deserialize(self, handler: DeserializeHandler) -> Node: + dump.pop("kwargs") - if self.name in handler.graph.nodes: - return handler.graph.nodes[self.name] - - node = handler.graph.create( - proxy_value=None, - target=self.target.deserialize(handler), - args=[try_deserialize(value, handler) for value in self.args], - kwargs={ - key: try_deserialize(value, handler) for key, value in self.kwargs.items() - }, - name=self.name, - ).node + if not self.args: - node.cond_dependency = try_deserialize(self.condition, handler) - - if isinstance(node.cond_dependency, Node): - node.cond_dependency.listeners.append(weakref.proxy(node)) + dump.pop("args") - if isinstance(node.target, type) and issubclass( - node.target, protocols.Protocol - ): + return dump - node.target.compile(node) + def deserialize(self, handler: DeserializeHandler) -> Node: - return node + return handler.graph.create( + self.target.deserialize(handler), + *[try_deserialize(value, handler) for value in self.args], + **{ + key: try_deserialize(value, handler) + for key, value in self.kwargs.items() + } + ).node class TensorModel(BaseNNsightModel): @@ -135,6 +133,12 @@ class TensorModel(BaseNNsightModel): values: List dtype: str + @staticmethod + @memoized + def to_model(value: torch.Tensor) -> Self: + + return TensorModel(values=value.tolist(), dtype=str(value.dtype).split(".")[-1]) + def deserialize(self, handler: DeserializeHandler) -> torch.Tensor: dtype = getattr(torch, self.dtype) return torch.tensor(self.values, dtype=dtype) @@ -148,12 +152,18 @@ class SliceModel(BaseNNsightModel): stop: ValueTypes step: ValueTypes + @staticmethod + @memoized + def to_model(value: slice) -> Self: + + return SliceModel(start=value.start, stop=value.stop, step=value.step) + def deserialize(self, handler: DeserializeHandler) -> slice: return slice( try_deserialize(self.start, handler), try_deserialize(self.stop, handler), - try_deserialize(self.step, handler) + try_deserialize(self.step, handler), ) @@ -175,6 +185,11 @@ class ListModel(BaseNNsightModel): values: List[ValueTypes] + @staticmethod + def to_model(value: List) -> Self: + + return ListModel(values=value) + def deserialize(self, handler: DeserializeHandler) -> list: return [try_deserialize(value, handler) for value in self.values] @@ -185,6 +200,11 @@ class TupleModel(BaseNNsightModel): values: List[ValueTypes] + @staticmethod + def to_model(value: Tuple) -> Self: + + return TupleModel(values=value) + def deserialize(self, handler: DeserializeHandler) -> tuple: return tuple([try_deserialize(value, handler) for value in self.values]) @@ -195,8 +215,15 @@ class DictModel(BaseNNsightModel): values: Dict[str, ValueTypes] + @staticmethod + def to_model(value: Dict) -> Self: + + return DictModel(values=value) + def deserialize(self, handler: DeserializeHandler) -> dict: - return {key: try_deserialize(value, handler) for key, value in self.values.items()} + return { + key: try_deserialize(value, handler) for key, value in self.values.items() + } class FunctionWhitelistError(Exception): @@ -208,8 +235,16 @@ class FunctionModel(BaseNNsightModel): type_name: Literal["FUNCTION"] = "FUNCTION" function_name: str + + @staticmethod + def to_model(value:FUNCTION): + + model = FunctionModel(function_name=get_function_name(value)) + + FunctionModel.check_function_whitelist(model.function_name) + + return model - @field_validator("function_name") @classmethod def check_function_whitelist(cls, qualname: str) -> str: if qualname not in FUNCTIONS_WHITELIST: @@ -220,6 +255,9 @@ def check_function_whitelist(cls, qualname: str) -> str: return qualname def deserialize(self, handler: DeserializeHandler) -> FUNCTION: + + FunctionModel.check_function_whitelist(self.function_name) + return FUNCTIONS_WHITELIST[self.function_name] @@ -227,232 +265,183 @@ class GraphModel(BaseNNsightModel): type_name: Literal["GRAPH"] = "GRAPH" - id: int - sequential: bool - nodes: Dict[str, Union["NodeModel", "NodeType"]] + # We have a reference to the real Graph in the pydantic to be used by optimization logic + graph: Graph = Field(exclude=True, default=None, validate_default=False) - def deserialize(self, handler: DeserializeHandler) -> Graph: + nodes: List[Union[MemoReferenceModel, NodeType]] - graph = Graph(validate=False, sequential=self.sequential, graph_id=self.id) + @staticmethod + def to_model(value: Graph) -> Self: - handler.graph = graph - handler.nodes = self.nodes + return GraphModel(graph=value, nodes=value.nodes) - # To preserve order - nodes = {} + def deserialize(self, handler: DeserializeHandler) -> Graph: - for node_name, node in self.nodes.items(): + for node in self.nodes: node.deserialize(handler) - # To preserve order - nodes[node_name] = graph.nodes[node_name] - - # To preserve order - graph.nodes = nodes - - return graph - - -class TracerModel(BaseNNsightModel): - - type_name: Literal["TRACER"] = "TRACER" - - kwargs: Dict[str, ValueTypes] - invoker_inputs: List[ValueTypes] - graph: Union[GraphModel, GraphType] - - def deserialize(self, handler: DeserializeHandler) -> Tracer: - - _graph = handler.graph - _nodes = handler.nodes - - graph = self.graph.deserialize(handler) - - handler.graph = graph - - kwargs = {key: try_deserialize(value, handler) for key, value in self.kwargs.items()} - - invoker_inputs = [ - try_deserialize(invoker_input, handler) for invoker_input in self.invoker_inputs - ] - - tracer = Tracer( - None, handler.model, bridge=handler.bridge, graph=graph, **kwargs - ) - tracer._invoker_inputs = invoker_inputs - - handler.graph = _graph - handler.nodes = _nodes - - return tracer - - -class IteratorModel(BaseNNsightModel): - - type_name: Literal["ITERATOR"] = "ITERATOR" + return handler.graph - data: ValueTypes - graph: Union[GraphModel, GraphType] +class SubGraphModel(BaseNNsightModel): - def deserialize(self, handler: DeserializeHandler) -> Iterator: + type_name: Literal["SUBGRAPH"] = "SUBGRAPH" - _graph = handler.graph - _nodes = handler.nodes + subset: List[int] - graph = self.graph.deserialize(handler) + @staticmethod + def to_model(value: SubGraph) -> Self: - handler.graph = graph + return SubGraphModel(subset=value.subset) - data = try_deserialize(self.data, handler) + def deserialize(self, handler: DeserializeHandler) -> Graph: - iterator = Iterator(data, None, bridge=handler.bridge, graph=graph) + value = SubGraph(handler.graph, subset=self.subset) + + for node in value: + node.graph = value + + return value - handler.graph = _graph - handler.nodes = _nodes - return iterator +class InterventionGraphModel(SubGraphModel): + type_name: Literal["INTERVENTIONGRAPH"] = "INTERVENTIONGRAPH" -class SessionModel(BaseNNsightModel): + @staticmethod + def to_model(value: InterventionGraph) -> Self: - type_name: Literal["SESSION"] = "SESSION" + return InterventionGraphModel(subset=value.subset) - graph: Union[GraphModel, GraphType] + def deserialize(self, handler: DeserializeHandler) -> Graph: + value = InterventionGraph(handler.graph, model=handler.model, subset=self.subset) + + for node in value: + node.graph = value + + return value - def deserialize(self, handler: DeserializeHandler) -> Session: - bridge = Bridge() +class MemoReferenceModel(BaseNNsightModel): - handler.bridge = bridge + type_name: Literal["REFERENCE"] = "REFERENCE" - graph = self.graph.deserialize(handler) + id: int - bridge.add(graph) + def deserialize(self, handler: DeserializeHandler): + + value = try_deserialize(handler.memo[self.id], handler) - session = Session(None, handler.model, bridge=bridge, graph=graph) + handler.memo[self.id] = value - return session + return value ### Define Annotated types to convert objects to their custom Pydantic counterpart GraphType = Annotated[ Graph, - AfterValidator( - lambda value: GraphModel( - id=value.id, sequential=value.sequential, nodes=value.nodes - ) - ), + AfterValidator(GraphModel.to_model), ] -TensorType = Annotated[ - torch.Tensor, - AfterValidator( - lambda value: TensorModel( - values=value.tolist(), dtype=str(value.dtype).split(".")[-1] - ) - ), +SubGraphType = Annotated[ + SubGraph, + AfterValidator(SubGraphModel.to_model), ] +InterventionGraphType = Annotated[ + InterventionGraph, + AfterValidator(InterventionGraphModel.to_model), +] + +TensorType = Annotated[torch.Tensor, AfterValidator(TensorModel.to_model)] + SliceType = Annotated[ slice, - AfterValidator( - lambda value: SliceModel(start=value.start, stop=value.stop, step=value.step) - ), + AfterValidator(SliceModel.to_model), ] EllipsisType = Annotated[ type(...), # It will be better to use EllipsisType, but it requires python>=3.10 - AfterValidator(lambda value: EllipsisModel()), + AfterValidator(lambda _: EllipsisModel()), ] -ListType = Annotated[list, AfterValidator(lambda value: ListModel(values=value))] +ListType = Annotated[list, AfterValidator(ListModel.to_model)] TupleType = Annotated[ - tuple, Strict(), AfterValidator(lambda value: TupleModel(values=list(value))) + tuple, + Strict(), + AfterValidator(TupleModel.to_model), ] -DictType = Annotated[dict, AfterValidator(lambda value: DictModel(values=value))] +DictType = Annotated[dict, AfterValidator(DictModel.to_model)] FunctionType = Annotated[ FUNCTION, - AfterValidator(lambda value: FunctionModel(function_name=get_function_name(value))), -] - -NodeReferenceType = Annotated[ - Node, AfterValidator(lambda value: NodeModel.Reference(name=value.name)) + AfterValidator(FunctionModel.to_model), ] NodeType = Annotated[ Node, - AfterValidator( - lambda value: NodeModel( - name=value.name, - target=value.target, - args=value.args, - kwargs=value.kwargs, - condition=value.cond_dependency, - ) - ), + AfterValidator(NodeModel.to_model), ] -TracerType = Annotated[ - Tracer, - AfterValidator( - lambda value: TracerModel( - kwargs=value._kwargs, - invoker_inputs=value._invoker_inputs, - graph=value.graph, - ) - ), -] -IteratorType = Annotated[ - Iterator, - AfterValidator(lambda value: IteratorModel(graph=value.graph, data=value.data)), -] +def check_memo(object: Any): -SessionType = Annotated[ - Session, - AfterValidator(lambda value: SessionModel(graph=value.graph)), -] + _id = id(object) + + if _id in MEMO: + + return MemoReferenceModel(id=_id) + + raise ValueError() + + +MemoType = Annotated[object, BeforeValidator(check_memo)] ### Register all custom Pydantic objects to convert objects to -TOTYPES = Union[ - TracerModel, - IteratorModel, - SessionModel, - NodeModel.Reference, - SliceModel, - TensorModel, - TupleModel, - ListModel, - DictModel, - EllipsisModel, +TOTYPES = Annotated[ + Union[ + MemoReferenceModel, + NodeModel, + SliceModel, + TensorModel, + TupleModel, + ListModel, + DictModel, + FunctionModel, + EllipsisModel, + InterventionGraphModel, + SubGraphModel, + GraphModel, + ], + Field(discriminator="type_name"), ] ### Register all Annotated types objects to convert objects from -FROMTYPES = Union[ - TracerType, - IteratorType, - SessionType, - NodeReferenceType, - SliceType, - TensorType, - TupleType, - ListType, - DictType, - EllipsisType, +FROMTYPES = Annotated[ + Union[ + MemoType, + NodeType, + InterventionGraphType, + SubGraphType, + GraphType, + FunctionType, + SliceType, + TensorType, + TupleType, + ListType, + DictType, + EllipsisType, + ], + Field(union_mode="left_to_right"), ] ### Final registration ValueTypes = Union[ PRIMITIVE, - Annotated[ - TOTYPES, - Field(discriminator="type_name"), - ], + TOTYPES, FROMTYPES, ] diff --git a/src/nnsight/schema/request.py b/src/nnsight/schema/request.py new file mode 100644 index 00000000..6223a69f --- /dev/null +++ b/src/nnsight/schema/request.py @@ -0,0 +1,161 @@ +from __future__ import annotations + +import copy +import io +import json +import zlib +from datetime import datetime +from typing import TYPE_CHECKING, Any, Dict, List, Union + +import msgspec +import torch +from pydantic import BaseModel, ConfigDict + +from .format.types import (MEMO, DeserializeHandler, Graph, GraphModel, + GraphType, ValueTypes, try_deserialize) + +if TYPE_CHECKING: + from .. import NNsight + + +class RequestModel(BaseModel): + + model_config = ConfigDict(arbitrary_types_allowed=True, protected_namespaces=()) + + graph: Union[GraphModel, GraphType] + memo: Dict[int, ValueTypes] + + def __init__(self, *args, memo: Dict = None, **kwargs): + + super().__init__(*args, memo=memo or dict(), **kwargs) + + if memo is None: + + self.memo = {**MEMO} + + MEMO.clear() + + @staticmethod + def serialize(graph: Graph, format:str, _zlib:bool) -> bytes: + + if format == "json": + + data = RequestModel(graph=graph) + + json = data.model_dump(mode="json") + + data = msgspec.json.encode(json) + + elif format == "pt": + + data = io.BytesIO() + + torch.save(graph, data) + + data.seek(0) + + data = data.read() + + if _zlib: + + data = zlib.compress(data) + + return data + + @staticmethod + def deserialize(model: "NNsight", graph:bytes, format:str, _zlib:bool) -> Graph: + + if _zlib: + + graph = zlib.decompress(graph) + + if format == "json": + + nnsight_request = msgspec.json.decode(graph) + + request = RequestModel(**nnsight_request) + + handler = DeserializeHandler(request.memo, model) + + graph = request.graph.deserialize(handler) + + elif format == "pt": + + data = io.BytesIO(graph) + + data.seek(0) + + graph = torch.load(data, map_location="cpu", weights_only=False) + + return graph + +class StreamValueModel(BaseModel): + + model_config = ConfigDict(arbitrary_types_allowed=True, protected_namespaces=()) + + values: Dict[int, ValueTypes] + memo: Dict[int, ValueTypes] + + def __init__(self, *args, memo: Dict = None, **kwargs): + + super().__init__(*args, memo=memo or dict(), **kwargs) + + if memo is None: + + self.memo = {**MEMO} + + MEMO.clear() + + @staticmethod + def serialize(values: Dict[int, Any], format:str, _zlib:bool) -> bytes: + + if format == "json": + + data = StreamValueModel(values=values) + + json = data.model_dump(mode="json") + + data = msgspec.json.encode(json) + + elif format == "pt": + + data = io.BytesIO() + + torch.save(values, data) + + data.seek(0) + + data = data.read() + + if _zlib: + + data = zlib.compress(data) + + return data + + @staticmethod + def deserialize(values:bytes, format:str, _zlib:bool) -> Dict[int, Any]: + + if _zlib: + + values = zlib.decompress(values) + + if format == "json": + + nnsight_request = msgspec.json.decode(values) + + request = StreamValueModel(**nnsight_request) + + handler = DeserializeHandler(request.memo, None) + + values = {index: try_deserialize(value, handler) for index, value in request.values.items()} + + elif format == "pt": + + data = io.BytesIO(values) + + data.seek(0) + + values = torch.load(data, map_location="cpu", weights_only=False) + + return values diff --git a/src/nnsight/schema/response.py b/src/nnsight/schema/response.py new file mode 100644 index 00000000..466265ee --- /dev/null +++ b/src/nnsight/schema/response.py @@ -0,0 +1,80 @@ +from __future__ import annotations + +import io +import logging +from datetime import datetime +from enum import Enum +from typing import Any, Dict, List, Optional, Union + +import torch +from pydantic import BaseModel, ConfigDict + +from .result import RESULT + + +class ResponseModel(BaseModel): + + model_config = ConfigDict(arbitrary_types_allowed=True, protected_namespaces=()) + + class JobStatus(Enum): + RECEIVED = "RECEIVED" + APPROVED = "APPROVED" + RUNNING = "RUNNING" + COMPLETED = "COMPLETED" + LOG = "LOG" + STREAM = "STREAM" + ERROR = "ERROR" + NNSIGHT_ERROR = "NNSIGHT_ERROR" + + id: str + status: JobStatus + + description: Optional[str] = "" + data: Optional[Union[RESULT, Any]] = None + received: Optional[datetime] = None + session_id: Optional[str] = None + + def __str__(self) -> str: + return f"{self.id} - {self.status.name}: {self.description}" + + def log(self, logger: logging.Logger) -> ResponseModel: + if self.status == ResponseModel.JobStatus.ERROR: + logger.error(str(self)) + raise SystemExit("Remote exception.") + elif self.status == ResponseModel.JobStatus.STREAM: + pass + else: + logger.info(str(self)) + + return self + + def pickle(self) -> bytes: + """Pickles self and returns bytes. + + Returns: + bytes: Pickled ResponseModel + """ + + with io.BytesIO() as file: + + torch.save(self.model_dump(exclude_unset=True), file) + + file.seek(0) + + return file.read() + + @classmethod + def unpickle(cls, data: bytes) -> ResponseModel: + """Loads a ResponseModel from pickled bytes. + + Args: + data (bytes): Pickled ResoonseModel. + + Returns: + ResponseModel: Response. + """ + + with io.BytesIO(data) as file: + return ResponseModel( + **torch.load(file, map_location="cpu", weights_only=False) + ) diff --git a/src/nnsight/schema/result.py b/src/nnsight/schema/result.py new file mode 100755 index 00000000..bf905279 --- /dev/null +++ b/src/nnsight/schema/result.py @@ -0,0 +1,26 @@ +from typing import Any, Dict +from pydantic import BaseModel, ConfigDict +from ..tracing.graph import Graph +RESULT = Dict[int, Any] + +class ResultModel(BaseModel): + + + model_config = ConfigDict(arbitrary_types_allowed=True, protected_namespaces=()) + + id:str + result: RESULT + + @classmethod + def inject(cls, graph:Graph, result:RESULT): + + + for index, value in result.items(): + + graph.nodes[index]._value = value + + @classmethod + def from_graph(cls, graph:Graph) -> RESULT: + + + return {node.index: node.value for node in graph.nodes if node.done} \ No newline at end of file diff --git a/src/nnsight/test.py b/src/nnsight/test.py new file mode 100755 index 00000000..50194a9c --- /dev/null +++ b/src/nnsight/test.py @@ -0,0 +1,40 @@ + + +from .tracing.contexts import Tracer + +with Tracer() as session: + + ls = session.graph.create(list) + + with session.trace()as tracer: + + ls.append(4) + + + ls.append(1) + + + + for itt in ls: + ls.append(itt) + + + + if ls[-1] == 4: + ls.append(10001) + + elif ls[-1] == 1: + ls.append(11111111) + else: + ls.append(123) + + with session.trace() as tracer: + + ls.append(3) + + + + ls.append(5) + session.graph.create(print, ls) + + print(session.graph) diff --git a/src/nnsight/toolbox/.ipynb_checkpoints/logit_lens-checkpoint.ipynb b/src/nnsight/toolbox/.ipynb_checkpoints/logit_lens-checkpoint.ipynb deleted file mode 100644 index 8d3517c4..00000000 --- a/src/nnsight/toolbox/.ipynb_checkpoints/logit_lens-checkpoint.ipynb +++ /dev/null @@ -1,1144 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "8d66d5e6-d3ba-42c8-9b07-04e6c635ef64", - "metadata": {}, - "outputs": [], - "source": [ - "from lens import LogitLens\n", - "from nnsight import LanguageModel" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "e4141eea-916f-4f67-9f26-0303636c0617", - "metadata": {}, - "outputs": [], - "source": [ - "model = LanguageModel('EleutherAI/pythia-1B', device_map=\"cuda\")" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "511bb57b-8334-4c1e-aafa-c3d97247e07a", - "metadata": {}, - "outputs": [], - "source": [ - "import torch\n", - "from abc import ABC, abstractmethod\n", - "\n", - "from nnsight import LanguageModel, Module\n", - "from typing import Any, Callable, Dict, List, Union\n", - "\n", - "from functools import reduce\n", - "\n", - "class Lens(ABC):\n", - "\n", - " def __init__(self):\n", - " super().__init__()\n", - " self.tuned = False\n", - " \n", - " @abstractmethod\n", - " def __call__(self) -> Any:\n", - " pass\n", - "\n", - "class LogitLens(Lens):\n", - " \"\"\"Returns the probability distribution over all tokens at specified points in the model.\n", - " \n", - " \"\"\"\n", - "\n", - " def __init__(self, \n", - " layers: List[Module],\n", - " decoding_modules: List[Module],\n", - " ) -> None:\n", - " super().__init__()\n", - " \n", - " self.tuned = False\n", - " \n", - " self.layers = layers\n", - " self.decoder = lambda x: reduce(lambda acc, func: func(acc), decoding_modules, x)\n", - "\n", - " def __call__(\n", - " self,\n", - " indices: Union[int, List] = None,\n", - " as_probs: bool = True,\n", - " ) -> List[Any]:\n", - " \n", - " observations = []\n", - " \n", - " for layer in self.layers:\n", - " logits = self.decoder(layer.output[0]) # apply decoder to hidden state\n", - "\n", - " observations.append(logits.save())\n", - "\n", - " # Return logits over a specific token\n", - " if type(indices) == List or type(indices == int):\n", - " observations = [logits[:,indices,:] for logits in observations]\n", - " \n", - " # Raw logits to probabilities\n", - " if as_probs:\n", - " observations = [logits.softmax(dim=-1) for logits in observations]\n", - "\n", - " self.observations = observations" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "7f6bad60-ebb1-4cf2-9d65-b4a76bbcc921", - "metadata": {}, - "outputs": [], - "source": [ - "layers = model.gpt_neox.layers\n", - "out = [model.gpt_neox.final_layer_norm, model.embed_out]\n", - "\n", - "lens = LogitLens(layers, out,)" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "ee021f9c-fec1-46b7-ba6c-9a20b15dce78", - "metadata": {}, - "outputs": [], - "source": [ - "prompt = \"John and Mary went to the store. John handed the milk to\"\n", - "\n", - "with model.forward() as runner:\n", - " with runner.invoke(prompt) as invoker:\n", - " # should i call save here? rather than in the class.\n", - " lens(-1, True)\n", - "\n", - "probs = lens.observations" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "ae51870e-7dba-47c3-8809-3f3a882553d2", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - " \n", - " " - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "application/vnd.plotly.v1+json": { - "config": { - "plotlyServerURL": "https://plot.ly" - }, - "data": [ - { - "hovertemplate": "variable=0
Probability=%{x}
Layer=%{y}", - "legendgroup": "0", - "line": { - "color": "#636efa", - "dash": "solid" - }, - "marker": { - "symbol": "circle" - }, - "mode": "lines", - "name": "0", - "orientation": "v", - "showlegend": true, - "type": "scatter", - "x": [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ], - "xaxis": "x", - "y": [ - 1.632572639209684e-05, - 5.580651304626372e-06, - 3.4238528314745054e-06, - 2.03671970666619e-06, - 2.5014485345309367e-06, - 2.499897846064414e-06, - 8.576263098802883e-06, - 1.7609881979296915e-05, - 7.855775038478896e-06, - 5.198211511014961e-05, - 0.0011284518986940384, - 0.19210906326770782, - 0.20111367106437683, - 0.023693585768342018, - 0.09532259404659271, - 0.5439841151237488 - ], - "yaxis": "y" - } - ], - "layout": { - "autosize": true, - "legend": { - "title": { - "text": "variable" - }, - "tracegroupgap": 0 - }, - "template": { - "data": { - "bar": [ - { - "error_x": { - "color": "#2a3f5f" - }, - "error_y": { - "color": "#2a3f5f" - }, - "marker": { - "line": { - "color": "#E5ECF6", - "width": 0.5 - }, - "pattern": { - "fillmode": "overlay", - "size": 10, - "solidity": 0.2 - } - }, - "type": "bar" - } - ], - "barpolar": [ - { - "marker": { - "line": { - "color": "#E5ECF6", - "width": 0.5 - }, - "pattern": { - "fillmode": "overlay", - "size": 10, - "solidity": 0.2 - } - }, - "type": "barpolar" - } - ], - "carpet": [ - { - "aaxis": { - "endlinecolor": "#2a3f5f", - "gridcolor": "white", - "linecolor": "white", - "minorgridcolor": "white", - "startlinecolor": "#2a3f5f" - }, - "baxis": { - "endlinecolor": "#2a3f5f", - "gridcolor": "white", - "linecolor": "white", - "minorgridcolor": "white", - "startlinecolor": "#2a3f5f" - }, - "type": "carpet" - } - ], - "choropleth": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "type": "choropleth" - } - ], - "contour": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "contour" - } - ], - "contourcarpet": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "type": "contourcarpet" - } - ], - "heatmap": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "heatmap" - } - ], - "heatmapgl": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "heatmapgl" - } - ], - "histogram": [ - { - "marker": { - "pattern": { - "fillmode": "overlay", - "size": 10, - "solidity": 0.2 - } - }, - "type": "histogram" - } - ], - "histogram2d": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "histogram2d" - } - ], - "histogram2dcontour": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "histogram2dcontour" - } - ], - "mesh3d": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "type": "mesh3d" - } - ], - "parcoords": [ - { - "line": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "parcoords" - } - ], - "pie": [ - { - "automargin": true, - "type": "pie" - } - ], - "scatter": [ - { - "fillpattern": { - "fillmode": "overlay", - "size": 10, - "solidity": 0.2 - }, - "type": "scatter" - } - ], - "scatter3d": [ - { - "line": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scatter3d" - } - ], - "scattercarpet": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scattercarpet" - } - ], - "scattergeo": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scattergeo" - } - ], - "scattergl": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scattergl" - } - ], - "scattermapbox": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scattermapbox" - } - ], - "scatterpolar": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scatterpolar" - } - ], - "scatterpolargl": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scatterpolargl" - } - ], - "scatterternary": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scatterternary" - } - ], - "surface": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "surface" - } - ], - "table": [ - { - "cells": { - "fill": { - "color": "#EBF0F8" - }, - "line": { - "color": "white" - } - }, - "header": { - "fill": { - "color": "#C8D4E3" - }, - "line": { - "color": "white" - } - }, - "type": "table" - } - ] - }, - "layout": { - "annotationdefaults": { - "arrowcolor": "#2a3f5f", - "arrowhead": 0, - "arrowwidth": 1 - }, - "autotypenumbers": "strict", - "coloraxis": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "colorscale": { - "diverging": [ - [ - 0, - "#8e0152" - ], - [ - 0.1, - "#c51b7d" - ], - [ - 0.2, - "#de77ae" - ], - [ - 0.3, - "#f1b6da" - ], - [ - 0.4, - "#fde0ef" - ], - [ - 0.5, - "#f7f7f7" - ], - [ - 0.6, - "#e6f5d0" - ], - [ - 0.7, - "#b8e186" - ], - [ - 0.8, - "#7fbc41" - ], - [ - 0.9, - "#4d9221" - ], - [ - 1, - "#276419" - ] - ], - "sequential": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "sequentialminus": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ] - }, - "colorway": [ - "#636efa", - "#EF553B", - "#00cc96", - "#ab63fa", - "#FFA15A", - "#19d3f3", - "#FF6692", - "#B6E880", - "#FF97FF", - "#FECB52" - ], - "font": { - "color": "#2a3f5f" - }, - "geo": { - "bgcolor": "white", - "lakecolor": "white", - "landcolor": "#E5ECF6", - "showlakes": true, - "showland": true, - "subunitcolor": "white" - }, - "hoverlabel": { - "align": "left" - }, - "hovermode": "closest", - "mapbox": { - "style": "light" - }, - "paper_bgcolor": "white", - "plot_bgcolor": "#E5ECF6", - "polar": { - "angularaxis": { - "gridcolor": "white", - "linecolor": "white", - "ticks": "" - }, - "bgcolor": "#E5ECF6", - "radialaxis": { - "gridcolor": "white", - "linecolor": "white", - "ticks": "" - } - }, - "scene": { - "xaxis": { - "backgroundcolor": "#E5ECF6", - "gridcolor": "white", - "gridwidth": 2, - "linecolor": "white", - "showbackground": true, - "ticks": "", - "zerolinecolor": "white" - }, - "yaxis": { - "backgroundcolor": "#E5ECF6", - "gridcolor": "white", - "gridwidth": 2, - "linecolor": "white", - "showbackground": true, - "ticks": "", - "zerolinecolor": "white" - }, - "zaxis": { - "backgroundcolor": "#E5ECF6", - "gridcolor": "white", - "gridwidth": 2, - "linecolor": "white", - "showbackground": true, - "ticks": "", - "zerolinecolor": "white" - } - }, - "shapedefaults": { - "line": { - "color": "#2a3f5f" - } - }, - "ternary": { - "aaxis": { - "gridcolor": "white", - "linecolor": "white", - "ticks": "" - }, - "baxis": { - "gridcolor": "white", - "linecolor": "white", - "ticks": "" - }, - "bgcolor": "#E5ECF6", - "caxis": { - "gridcolor": "white", - "linecolor": "white", - "ticks": "" - } - }, - "title": { - "x": 0.05 - }, - "xaxis": { - "automargin": true, - "gridcolor": "white", - "linecolor": "white", - "ticks": "", - "title": { - "standoff": 15 - }, - "zerolinecolor": "white", - "zerolinewidth": 2 - }, - "yaxis": { - "automargin": true, - "gridcolor": "white", - "linecolor": "white", - "ticks": "", - "title": { - "standoff": 15 - }, - "zerolinecolor": "white", - "zerolinewidth": 2 - } - } - }, - "title": { - "text": "Probability of Mary after each layer, according to logit lens" - }, - "xaxis": { - "anchor": "y", - "autorange": true, - "domain": [ - 0, - 1 - ], - "range": [ - 0, - 15 - ], - "title": { - "text": "Probability" - }, - "type": "linear" - }, - "yaxis": { - "anchor": "x", - "autorange": true, - "domain": [ - 0, - 1 - ], - "range": [ - -0.03021918985829567, - 0.5742053417017511 - ], - "title": { - "text": "Layer" - }, - "type": "linear" - } - } - }, - "image/png": "iVBORw0KGgoAAAANSUhEUgAABbkAAAFoCAYAAAB67/YgAAAgAElEQVR4XuzdCbxV0/vH8eec05xCyjz7GX7mMmQoQ5KiNMoUIqWkQuYMmUXSoFKJkClKEmXIrP6IDJnlZ0pIk+bh3PNfz772de/t3u4Z1tr7nHs/+//34ld7P2vv99rnynev8+xIwmzChgACCCCAAAIIIIAAAggggAACCCCAAAIIIIBADgpECLlzcNY4ZQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAFPgJCbGwEBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAgZwUIuXN26jhxBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAUJu7gEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBnBUg5M7ZqePEEUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBAi5uQcQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEclaAkDtnp44TRwABBBBAAAEEEEAAAQQQQAABBBBAAAEEECDk5h5AAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQyFkBQu6cnTpOHAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQICQm3sAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIGcFCLlzduo4cQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAFCbu4BBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgZwVIOTO2anjxBFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQIubkHEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBHJWgJA7Z6eOE0cAAQQQQAABBBBAAAEEEEAAAQQQQAABBBAg5OYeQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEMhZAULunJ06ThwBBBBAAAEEEEAAAQQQQAABBBBAAAEEEECAkJt7AAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCBnBQi5c3bqOHEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABQm7uAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIGcFSDkztmp48QRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEECLm5BxBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQRyVoCQO2enjhNHAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQIOTmHkAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBDIWQFC7pydOk4cAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAgJCbewABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAgZwUIuXN26jhxBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAUJu7gEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBnBUg5M7ZqePEEUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBAi5uQcQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEclaAkDtnp44TRwABBBBAAAEEEEAAAQQQQAABBBBAAAEEECDk5h5AAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQyFkBQu6cnTpOHAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQICQm3sAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIGcFCLlzduo4cQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAFCbu4BBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgZwVIOTO2anjxBFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQIuZO8B154ZaZcc8fojfbecvNacnj9faRn5zayx647JFmt9N1+X7hYTjjtcrn5ivOlQ8tjM6rnn/O0xwfIzjtsU2qt5mddJQ0O2FPuuLarFB/fr/HKUwNlh23rZnQ+6Rz8xTc/yrXG/ef5f8glF7STC886ZaMy/jlutWVtef3Z+6RSLLbRPp9/9YOc0eMW79c/nTG2xH3SOT9Xx9x636Py4oz/k/XrN8hHL29837kaN4i6S5Ytl0ate8l1vTvJ2e2apjSk3qsH7buHDLj+opSOY+f0BL7+/mdpf+GNMvTW3nJC4wZyw90PyayPvpDXnr43vYIhHWXz52rxS0jHJNmfzSFxMSwCCCCAAAIIIIAAAggggAACOSdAyJ3klPmhxHW9z5Y9d9vJO2r9hg3yv58XyMNPTZMVq1bLpLG3ZhwE2wxjkg1SNEytW2dzaVj/vxuF3D/9+of8nwm1Wp54lNSsUc277rYXXC99Luwgxx11cJJ66e922U33y4effCPDbu8tO22/tXeexTf/OqtVreKFn00bH7LRPjffO06ef/k9WbtufdaH3N//b760Pr+fnNOhmbRt0Vj23mMnee2dj2TUYy/IM6P7p4+ZJUcScmfJRCRxGsVD7g8/+Vr+WLjE/Dw4MomjM9/ljqGPS+VKMbny4jMyKmbz52rxEyluksxnNdmfzRldNAcjgAACCCCAAAIIIIAAAgggUIEECLmTnGw/lHhs2HVm1fNeRY76Zt4v0q7LDdLlzJPl8os6Jlmx5N1shjHpBClljb9q9VppeEp3GXZbn0BC7s6X3iV5eXny6NDrSnX1r7PR4QdIJBKRBwZcXmTfNWvXybHt+sh+e+0q78/5KutDbg3N9Lofuu9q78GDboNGTTAraL8k5GYld0Y/X1I9uHjInerxme6v3744xPy8zeaQu/g1JvNZTednc6aWHI8AAggggAACCCCAAAIIIIBAeRYg5E5ydjcVcmuJw1p0l2OPPEgG3thDpr46S66+fZQXzN408GGJx/NEW4bo9sbMOd6K3G9NMK7bXrvvKBeceYo0O/ZQ73/7IfP1l54j8378TV56/f9k9Zp1csA+u8mNl50n/9nt35Yo4ye+KhOmvCG62rpG9aqy9392lsu6nea1c9DNP+eH77tGHnrqJZn96dcSjUbl+KPqyw2XnSub1azu7Zdsu5L5C/6S8y+7q0CserUqsv8+u8vipctlyrjbi0i+8/5n0v3qQfLgwCvlyEP3K1F5UxYb4nE56IQuRY67+LzW0vP8thvVKrzK/q77nzCtFAbJNvW2LNhPf//6AWNNu5O2MnjMs0VC7nc/+Nybj6+++1ESCTFtXbaWzqe3kNYnHe0dr6tWm5x2mdx1XTeZPP1d+ejzb+Uu09al7y0jZVD/nnLScYcVOZ8zut8slSpVkvH39yvxmn/85XcZ8uCzZnX8l2Ze18rWdbeUk084wmt3U7lyJfN7E2X0+BeKHHvoQXubufum4Neu6H66nH9GC2/e7xv9jHz21TxZsmyF7LbTtuZeOllaNv13lW2Ls6+SY444yJt3vVf69+0srZodVeK5JVNv1eo1xnCivPr2h7Joyd+i7Xo0iO9rzqmwuc7/8HGTvfu8dq2a0rjhgeYB0Gne/v5K7n59zpE//1oik156W/5evlL22XMXufWqC8w3JXYs9VNZvF1JWZ4jzDk88NgUmTFhkNTbaouCun+vWCXHtO0t55u57nNhe/Gv690PPpPf/ljkfWOgzUmN5KJzT/VWEut24z0Pydyv/yddTMucO80K4yaN6sstV15Q6rkW/o2yPqu673JzTjr/r749W1asXC2777K9dOvUUk48Jv9ng26bctXf1+u6zzwQef29OZ6zeuvPJf25oP+8qevQMbVNjn4uE+bDcESDfeW0VsdJj2vuK7VdSf+B4+TTL7+Xq3ueJYNGT/DuSf3Gx6nNjpZLu3YoOG9d3TzEfPZ++e1P2d60PdLP8azZX3iekx++rUTD/Y7rXOTXnx51k+y/926i33TQz/Hsz76RNeYztN02daVN80ZeK6NYLFpirZIe3iVTR/e5dfCj5jP2g9QyPy/bn3Ks9zNCf57MnDJcNq9ds0gLl/P63FniZ7X4SZUUcr/34Vzvs//dD7963xI68L97yGXmM6PXrNvCRUvluPaXyoB+F8mcud+ZOf7Yu0/2MT/39bOkf9dN533QqGdE7+Ul5mfz5rU38z5/V/Y4wztfNgQQQAABBBBAAAEEEEAAAQTKowAhd5KzuqmQ+6/Fy7yVwl3PbukFOy+/+YFc3n+E1N9/Tzn91ONlTxNkawDxzvufm+D3Xjmt5XGmFcWJYpYdy7inp3sh34g7L/PCKD+M2W6braTF8Q3llKZHmHBjmfQ3YXnUBDgvjR/ghW7PTXvHC1qu6nmmF1rrauVRJsybaYKSF80+dbaoVRBya69wHe/g/f4jn3zxvdw+ZLycdOxhBX2Nkw25NZD9yIStXfre7R3b6LADzMroL71rfXLEDXLgP+G6kmrI/+kX87xwX1dXF9+SsdCw5pLrhniH3n9HH6lWtaposF5aYKR9gluee603D91NOOlvGsxvVrOGOd/95RYT4vk9uTUgbd25n7Q4oaFcaB40aMisAaMGx344rwF+4za9vIcRGjZqWLSXaR9yWrf+st3WdWTU3X0Lxvl5/p+iobL2NvdD8sLnmpeXMA8UrpRam9XwwuYtNt/MhMC/er3eO7U/0Qtb9YHGrNlzpdf1Q81q+d5S3/RK1x7jarxoyTIZO+gqY1DVC8jbnH+9F1zeePl5Xij7kmk7M+yhSXLPDT1McN7QG/rU867zAu7dd9nOG2OXHbcV7V1efFPrZOpde8cYL1zToE0fuPz511ITjD5igs3qBcG+rpbvcvnd3nitTJsbrX3LoEe8QF/Dfz/k3n3n7UxQ3EBaNGko+hm66Z6HPZOJD+b3Ti9pKxxyJ+OpweAJHS+X3l3aF+nnPvHFt70HUC8/eY/XYkjvae3/rvNy4H93N5+TeXLzoHFysjm3m8yv6aYB8JszP/FC2ovOaWUst/Fa6JS1JfNZ1Rp6n/7y20ITWHYy99ZW8sKrM72fD6PvuUKONvduWa4aTJ/d8zYvpNd7Yh9zn+pK7JuNvd6rT4680fsslnYdV5gHN2+agPu2qy+UA4yBBqkPPDrFa8lUWk/u2wY/5p2n/mzpb94jsK15uDTppXe8BwJ6/+r86jddOnS90fvs6Dzot0HuGDpeVpoWT3ova5unkja9T048va/5LDXyHlDp50ZDW/3M7mbunavNz7462of/3Y9l4ANPy7mnnST6AKikrXjIrfdFWXW0tdHJZ18tVapU8q5Nf6ZqCD336x+9dwTMmjpCaptzKtyTWx9UFP+sahul0n5m+e9L0IdYOv/aaqmXMYqbh3z3P/SczDQ/C/TzoO9U8D83+jBJ/z3ToskRJuRe5X3WdF79z02fG4bJtz/8Yt7rcIG5V7eSX8zPJfXeYbu65lsu//68Kuu+5fcRQAABBBBAAAEEEEAAAQQQyCUBQu4kZ8sPuccNvsYLHnXbsCEuGpTeM+IpLxB67qHbvODr5Tc/NEHHcC+I0MDV33SVn664fd6sXPSDXw2mWpggRVcHapjlhzHaeqNwgKqriPvd9aCMvfcqOeKQfWWpqaP7+qv3dIxvzQpA7ZetgbAG3/456yrOwi9s1DovvjZL/u/FkSY4rpL0Sm4NA+d+8z85/aKbZfgdl3rtSnTFddOOfb2AXl+WqZsG7o3b9PaCwJJeFKn7JGPh76d/f2TItaXOlH+dGjrdbkI3nQsNL9X41wUL5aQzr/RCur8WLy0Scmug/OsCs7LUrAT1+43rIEe16umtDNUHCH6wpCGjzo+/PfHcDC84etUE6xog6qaB4CMTpssbEwd7rsU3DWU1HKtRvZoJfP9dVayh1Pzf/5Jnx9zsHfLBnK+9wEtX4OtLTXXT1bQaBPs9uXUsDbR1BX3hF57qQxQNSl98LH/F/akmEPzD3CfvPDfUhHWVSzVMtt6CPxfLWjO/u5pV4/72+KRXjcXj8r65n/TbARoYL1r8d5EVurpKdfL0d+TaXp302Y734km9Z/Thjr8Nf/g5GfHI8/LxK2OkainnWjzkTsZT+7p//f0vBd+m0PE0GKxkHhbpZ+xjszr/nF53eN9uOKN1k4LzGfvkS94Dj9efuc+bLw10n5w8Q54wD3T8b0uUClroN5L5rH78+XfmHG6XwbdcUmTltobF+m2JjmZFdVmuuupY75tB/S823zA4vOAMtBf9dXeOKTjvkq5Dw9mjW19irv8E80LQswuO1aB15KPPbzLkVhO93/x7Qn+m1W/WVTp3bO79DLx7+JPy+KTXvM+FBsW66YruU865Rv5jHsCVFnLrfoc272YeFDYpaFei94iuzNc5KbwyXx/46cPFmS+MKFh5X3huiofcydTRldD6kM0P+LWefitHH6SVFnKX9Fkt6R4pvpJb78effv3de0Dp3/v686np6ZdLM/NwTR+0+D+L9AGWPsjytzGPT/VWts8xnxv9jOuLi/XbM7dd/e83YX4zP1+WmW9L/Nd8W4INAQQQQAABBBBAAAEEEEAAgfIoQMid5Kz6oURJu+tqTv26eOOGB3i/7Yfc2q7kkAP/7d99yEndvJWt/a/oXKTMVbc+YFbsfSHvPj+sIOQuHkz7q4Q1gDq73YleuPz082/I9Dfel99+XySr166VhAlRtV2Bhhv6wkL/nIuvstZVrBqeaSivK5STXcldUsitF6KB0bgJL8tbk4Z4bVOmv/GBXH3bKHn92ftKXDWsxyRjoftpGK5bsiH3F6b9wYVX3FPwMGDo2Iny1POvm3MbKpNefKtIyK1135r1qTw95XX54acFstysitSA7u/lq0y7haO8Fdl+sKQrSXuc27pg3lauWuO1DrjgzBYFv66rpo84ZL8iIWHx+0UfRDz6zMtmlfv33gOPvESet7JVV2LrSnTdkgm5tRXMDz/9Jq88NbDIEI+Y2hoqfvDSA/ltI0zIXdesdtX+3pvakq2nHo+Yuda2GRq6a1uF9es3eNeg56L3yOEndzcroI/Y6D73x/dNtX+99rH3N50nXWWs91FJLxjV/Yq3K0nF0/886nkf3+FSue/mS7yVs9rK594HJnj++g0Kf/vqu5/MCuSbCh7oaDg84YU35JNXx5rV8Rt/O6E032Q+q/68vWmC4MLhbeGaZbn611G8hrYQObnT1aItkM5sc4IX1he/js++nCdnXnzrRgG53x9+Uyu5daX6Ry+PLnL52grmBGN7k1lR3qvfEPnBrAb3H7z4O7a/8Ebv85ZKyK33qQbM+o2WwtszU98033YZt9FDH3+f4iF3MnX0wcwA81kqfj/qr+lnuKSV3Dpe8QdSJd0XxUNu/Xmo32goHEzrcRqy/2Fa+ujDLf9z07d7R7ngjH8/N0+bz41+Q8U/zzuHPe49VNBvkxx/dH3zoOy/3opzNgQQQAABBBBAAAEEEEAAAQTKswAhd5Kz64cS2oN37//s5B0VMf+n7RU02Cu8+SG39pr1+wv7Paa1l3Lxr9RrOwFdqa0r8fwwRlfu6epNf9P+xxoc+avDNQx82vRY1hYXump7s82qm7D7L29FavGQu/AqS62nIXTfm0cUrOzMNOTWlhW64lBXcmu4rq02tKWK9qwuaUvWQo9NNeTezLTNaHZGXznYtIq5+/ruXruDExo3MMFzJ/NQID8M8tuV+G1lWp54pHQyDw40WI2Y8FJXquvK7cIhtx5/drumRS5Ha2nY+4pZNa4tGTS0e/7h24v0TS98gK4qb3N+P9ljlx28edzJrN7X1cS6IvzLb39KKeTudMnt3op1bbFSeNMHHeo79dE7vZYOGnJrWxBdIbypLZl62upEX7C60KyI79f7HNlv712latXKXpsUDYk15NY2Gwc0Od9bxVvaywL9sK64afGwrqTzLRxyJ+updfQBxP6mr73O6WPPviIPPvGizHhmkNcKxu+DXtxSj9MAX4PajqbtkIbD2iNfezGnsiXzWfVXTH847QFvpX/xTb8FUJarfx2zp48u0tbHb6fkPzgr6Tq0LUbXKwbKyLsu83q4+5u2cOl4Uf9NruQuycQLuU2rEv05pivUN5gV0PqwrfDW87rBssC0Vkkl5Nb7VOdE+3MX3qa9/r5ou5XiD/T8fYqH3MnUeXPWJ16/fn+FtF9rnPm2hn57x1bIre1v9P0D+uAkZu7Hwpu2LamzRW0vwE72c6P3iraQmWJW8Os7BPTFvUcdur9cc8lZRb6Bkco9zL4IIIAAAggggAACCCCAAAIIZLsAIXeSM1TWiycLlykp5Nbf16/et2xa8kpu7berQYYfxmhAqEGhv+nX+zXg81djai1dKam9kf1N+7pqKFw85NZVgPvutWvBftoDXPvI+q0uMg25tfClN97vvTxwyK29pLEJuEaaNhSlvXAyWQvdL9WQW1csatinQdRgs1L34mvvkwmj+nuBbPGQW1dcfmd6177y1L0FK3M1ID7UrKrUl0GWFXJ/979fvT7WukL4LROIacuJ0l44qdfy8FPTvN7B0x6/22tP429+L+ZUVnLrdX1n2lMUbmlT+B7UBy/a9iDZkDuZenq9+gBAH/S0P+WYguG0xYiu5vdXcmvbC33wUnxVqn9AsmFdSR/NwiF3sp5aR1e2Dh7zjLz93DCvpceRZsW9PiDSTfte3zPyKW8et/ynnUbhsbcyIaO+sC/dkDuZz6q2v7l9yGMbrSYvfB5lufoepa3k3lRYrw9MNPgt3upEX5CqbVI2tZK7rJBbW3HonBcPs3U8feFnKiG3fma1rUdpK7n9hzvF753iIXcydfTnuLYE8sNsv6b/MMFWyK39tg9rcZH5JtBBXu/x4lvU9PfRVjDpfG60dZR+S0hD+fXr15s2TgNLfTlnSZ83fg0BBBBAAAEEEEAAAQQQQACBXBEg5E5ypmyE3BdcNkD+MiuyNVz2N//leXvtvpPXS9sPY7Tftfa99reppoe2tgDRIK6B6Qle/8QL5cy2Tb2Xr/mb9qXV1gHFQ+7igbm+cE+v5/9MD2vt4ZpOyO33/fbH9l+K1/2cU0XPtbQXTvr7J2Oh+6YTcms7A+1zvscu23sv9/S9i4fcGt5pv+TCLzr0+xef2uxoufO6f9uVlLSSW8/v3N53eCum3/6/T0VXyupxpW26KlTbp/i9q3U/bUPTyvT4rbeVaVcyYZB3aGntSvRleX7fbu3Dq8GyHlO4tYeuqtfV1ZvXqunVSjbkTqae3zd62O19pIlpg6CbPhTQVdLaEkP7oO+4XT3v5ara/sXvi6776UrhgSOf9laU6wsEtSd3piu5k/XU8VesXO21KDn/9BZe32//hZP6e/oyVn1hY+Hey/rrGsAu+3tlQQuTdEJurz91Ep/Vz7/6Qc7ocUvBZ9e/h7Rfu5rqZ7gsV31Brd6PxYNq/8WXeu9oT+aSrkOvU0P0czo0K/IzRcNRfWCUScjtf1PlbfMQT+deN+1Bry913GPX7csMuTu2Ot7rj6+b9gcfMW7yRj25tee4vhT0ncnDSgxxi4fcydR55a0PvdXh/os/dXztyd3afBtDX8a5qZC78Ge1pJ8HxduV6Cp6fbFs8cBffz7oyzz153QyIbe+C+At87NIXwqsD2b8zW9R9bbpzV/Si2dL+5nFryOAAAIIIIAAAggggAACCCCQKwKE3EnOlI2Q228JoC+3O/e0k8yLKzeIvtxOv1r+yJD88NoPY7YxwcY57ZvJicceKhpc9rtrjPdVdm2HEYtFvfB3vml/cb8JwnX18viJr3p9kZ81fae1vq5S1dDnmjtGy9577CQ9zmst+++9m2lx8b1ZxT1WTml6hLciV7dUQm5/Rbn2BW93cmMvSPbbPGhY+5MJmHt3aV/qCyd97mQsdN90Qm49zm+/Ubh/bfGQ23/53JBbesmB++7heT3/8ruybt0G0ys74bVuUOuSAln/OrRNwtW3j5KapsXEmybEK+2Fibq/H15369TKa32iLwocMPwJr6XNK2/PlsmmR/oOJtCcY1aEF3/xpM6jnt9o86LErUxbFe19rqvIdUX4pV1P815++bVpmaL9eP+7585eKKlbsiG3Bmhl1dOXE57Q8XKpb1rB9DdtKJab4PjuEU/KTuacJ7zwptx9Q3cTfjcwrVd+9OZN28Ccbtp8aI9zPS996eDjw6+XpX+vsBJyJ+uprXN007B1ovl86Cru4ivgtY/7vB/ne8G7fuvhT9MHWVuI/O8X7SU9wGv/UVI4rK1/9DN8V79u+Q9VStiS+axq+KsPXbQP+PV9zvVeYKsrpHWV+QMD+nr9/j/67NtNuuqLVrVd0fzfF3qtg/5j7qu5X//g9TnXcNu/5tLCeu3//P6cL+XGy87z9tcXcmprF+2nnUnI7X/DpGXTI6X7uad6Dxz0ftAXIernZVMrufUlitonXV8Kqi//9ENmtb6651lekPvaOx+ZdjlPm595HUr9uVM85NYWLhpWb6qOnqe2O9LxbzY/KzczPe71YdAX5v7Wz25pIXfxz2rxdlZ6ixQPudVIP/PtTz5WzmjTxGtZ8+4Hn3vfMOhretd3an9iUiF3TdOuSc953712MdatRf89oveyPqxYaR7a6L8/2BBAAAEEEEAAAQQQQAABBBAojwKE3EnOqo2QW4fSoPIBsxrxG/MCQv0augZqPc9vI0c02Nc7E+0zfNKZV3qriD/+7DvR1YSrzVfO6+/3H+9FfvrVdt1+/OV30RXZc82LFjerWV3aNG8kvbq0kwH3PyHPTH3LvPivoRx28D6iq7ufeuAmGWpaeGhopT2gTzzmUO9FmRrc6ZZKyK3797vrQRPAve8d/5IJALUvuW66wnL0+BdMr+PSXzhZmLssC9033ZD7WWPQ/95x3opPDcd0Kx5y60pd74VtZk401NY+3Ooy66Mv5GZz7Jab1/J6/24q5Nb+wIeZFy3qgwXteVvWpiGZts7Q1i4691ebY2oYx65XDhQNkZ8aeaNZ0bl8o5BbXwx42U3DvdWenc1qZO3prQ8c7hv9jGhLiVVr1poVn3Wk6TGHSM/ObQvmNtmQW887mXq6Yl0Ds1/NStztTfh30TmtpPlxh8t5l94lX5nw7wYTkGorkzdmzjH3w/PyvQmO9SFM44YHir5oUoPuZFakJvviyWQ899h1B29a/JcoDrutt/bSqZMAACAASURBVDQx/aILb/qASFtTvGJaVGgAqm1LDjt4by841ZXUupUUDutcak/14i2BCtdO5rOqrXH0pbGDTG/zGe9+JPpiU/2GgIbC+nn1t0256j56Dw0aNUFef2+O56yOzY49zHvwpA9GSrsO/XXd/xbzIEDDVd3054d+M0NfSKkr8PU8tM2Rfj781jqlBeaFe3JrLX0Ioj8b1FZ7xOv9qz+ndMWz3vOlbRPMewe0xY/2ldYe/9ovfJ554apeo87n2rXrvQcCZ7Y9wXupZmlb8ZBb90umjn57Qef3e9OqRy3PMg+ntI+7vnxSX7apK6eLm5T0WS1+XsVDbv19/RwPNz9D9SGR9ufWn/X6kEj7wfvzk8w3IPRhzSDzc0Fb0KxcucY8FKstDevvK73Ng099GMaGAAIIIIAAAggggAACCCCAQHkUIOQuj7MawjVp24p2XW6Ug8yK6Fuvyl8hXhG2l2boSu4HZPoT92z0AtKKcP25dI364OL/Pv7KrMy+q6AHu43z1z7lQ27r5T1kYCtZQF+cW9u00PFX1ete2uZmn//s7H0DIFs3Xc2tK+RrmlXc/qYr42e8+7Fo73M2BBBAAAEEEEAAAQQQQAABBBDIDgFC7uyYh5w9C119uuCPRd7q9FlmJaJ+HV6/Il/eN11xr32UdbV4h5bHypU9zijvl5yT17du3Xr5zdyf2lZEV2qPMC9EPfbIg6xdy9ff/yw33vOQ93JTtpIFvjFtdNpfeKP3bRNt0xQzq5QnmXcHaCuWcYOv8VaMZ+Om904z862abc3qZ333gfay1rYitw5+TLp1aik9TDsQNgQQQAABBBBAAAEEEEAAAQQQyA4BQu7smIecPYsnnpshd93/uLci83rT6kN7W1eETV/SFzH/16rZUXK5eeGk35e8Ilx7Ll3jt6YtULsuN3ir7LX1R9sWjXPp9MvNub4161MZ9dgU+c70stZWHNqKRXvT+y8wzdYL1X7k95nWKB+ZVk9r1qzz7qO25l0EGtZr2xI2BBBAAAEEEEAAAQQQQAABBBDIDgFC7uyYB84CAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIA0BQu400DgEAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIDsECLmzYx44CwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIE0BAi500DjEAQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIHsECDkzo554CwQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEE0hAg5E4DjUMQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEskOAkDs75oGzQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEhDgJA7DTQOQQABBBBAAAEEEEAAAQQQQAABBBBAAAEEEMgOAULu7JgHzgIBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAgDQFC7jTQOAQBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAgOwQIubNjHjgLBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgTQECLnTQOMQBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgewQIOTOjnngLBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQTSECDkTgONQxBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSyQ4CQOzvmgbNAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQSEOAkDsNNA5BAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQyA4BQu7smAfOAgEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCANAULuNNA4BAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCA7BAi5s2MeOAsEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBNAQIudNA4xAEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACB7BAg5M6OeeAsEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBNIQIOROA41DEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBLJDgJA7O+aBs0AAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBIQ4CQOw00DkEAAQQQQAABBBBAAAEEEEAAAQQQQAABBBDIDgFC7uyYB84CAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIA0BQu400DgEAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIDsECLmzYx44CwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIE0BAi500DjEAQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIHsECDkzo554CwQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEE0hAg5E4DjUMQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEskOAkDs75oGzQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEhDgJA7DTQOQQABBBBAAAEEEEAAAQQQQAABBBBAAAEEEMgOAULu7JgHzgIBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAgDQFC7jTQOAQBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAgOwQIubNjHjgLBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgTQECLnTQOMQBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgewQIOTOjnngLBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQTSECDkTgONQxBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSyQ4CQOzvmgbNAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQSEOAkDsNNA5BAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQyA4BQu7smAfOAgEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCANAULuNNA4BAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCA7BAi5s2MeOAsEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBNAQIudNA4xAEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACB7BAg5M6OeeAsEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBNIQIOROA41DEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBLJDgJA7O+aBs0AAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBIQ4CQOw204of8tmi1hSqUQMCtQI2qMalSOSZLV6xzOxDVEbAgUKVSVGrXrCx/LVtroRolEHArEI1GZOvNq8rvS9a4HYjqCFgS2H6r6sKfXy1hUsa5wNZbVJPFy9fKhnjC+VgMgECmAnVqVZFVazbImvV5mZbieAScC2xu/ntLf7auNPcsm10B/bMWW/AChNwWzPmPBAuIlHAuQMjtnJgBLAoQclvEpJRzAUJu58QMYFmAkNsyKOWcChByO+WluGUBQm7LoJRzKkDI7Y6XkNud7aYqE3JbcCfktoBICecChNzOiRnAogAht0VMSjkXIOR2TswAlgUIuS2DUs6pACG3U16KWxYg5LYMSjmnAoTc7ngJud3ZEnI7tiXkdgxMeSsChNxWGCkSkAAhd0DQDGNFgJDbCiNFAhQg5A4Qm6EyFiDkzpiQAgEKEHIHiM1QGQsQcmdMWGoBQm53toTcjm0JuR0DU96KACG3FUaKBCRAyB0QNMNYESDktsJIkQAFCLkDxGaojAUIuTMmpECAAoTcAWIzVMYChNwZExJyuyNMqzLtStJiK3oQIbcFREo4FyDkdk7MABYFCLktYlLKuQAht3NiBrAsQMhtGZRyTgUIuZ3yUtyyACG3ZVDKORUg5HbHy0pud7abqkzIbcGdkNsCIiWcCxByOydmAIsChNwWMSnlXICQ2zkxA1gWIOS2DEo5pwKE3E55KW5ZgJDbMijlnAoQcrvjJeR2Z0vI7diWkNsxMOWtCBByW2GkSEAChNwBQTOMFQFCbiuMFAlQgJA7QGyGyliAkDtjQgoEKEDIHSA2Q2UsQMidMWGpBXI95G7X5QZpf8qxcna7pmUibWrfv1eskiNbXiyTH75N9txtxzJrZboDK7kzFTTHE3JbQKSEcwFCbufEDGBRgJDbIialnAsQcjsnZgDLAoTclkEp51SAkNspL8UtCxByWwalnFMBQm43vN/Pi8oxh1d1UzygqnO/+Z/Uq7OFbFNvyzJHJOQukyi3diDkzq35qqhnS8hdUWc+N6+bkDs3562injUhd0Wd+dy9bkLu3J27injmhNwVcdZz95oJuXN37irimRNy25/1t96Oyow3o/LgkMr2i2dpRULuLJ2YdE+LkDtdOY4LUoCQO0htxspUgJA7U0GOD1KAkDtIbcayIUDIbUORGkEJEHIHJc04NgQIuW0oUiMoAUJuu9KLFkVkyPCYVzTIkFtbghzTppc8MKCvHHHIvgUXddbFt8phB+8jl3U7Taa88p6MGT9V5v/+l9TZsrZ07thcOrU/0dt3wPAn5e/lK72/Pv1ynrz93FApHFzH43kyeMyz8uKMWbJ02QrZdadt5aqeZ8oRDfLH0n2bHnOofPbl9/LBnK9l+23ryjWXnCWNDj9Aircr+XXBQrl9yHgzzveidZsde5hc17uTVK9Wxcpk0K7EAiMhtwVESjgXIOR2TswAFgUIuS1iUsq5ACG3c2IGsCxAyG0ZlHJOBQi5nfJS3LIAIbdlUMo5FSDktsebSIiMfigm8+dH5JjGeXJuh2DblfS45j7Zcbu60q/POd5FLVy0VI5rf6lMGnurVK5cSVqde63cf0cfaXTYAfLJF9/LhX3vkfHDr5cD9tlNBo2aIJOnvysXndNKmh/fULYyIXjhkHvClDdk2EOT5LFh/WS7bbaS8RNflbFPvChvTRri1dZ9Fy35W26/5kI5eL//yLinp8vDT0+T15+5TyLRSEFP7j122UHanN/PC+Iv69ZR1qxdK9fcPlrq1tncO9bGRshtQZGQ2wIiJZwLEHI7J2YAiwKE3BYxKeVcgJDbOTEDWBYg5LYMSjmnAoTcTnkpblmAkNsyKOWcChBy2+N9/8OovDgtKnXqJKRXj7jstHV1e8WTqPT8y+/J0AcnyoxnBnl7Pzl5hjwx6TV54dE7vRXTi5f+LfW22qKg0qmd+8nZbU+Q01s38ULuV96aLdOfuLvg9wuH3GvXrZdVq9fIlpvX8n5/ybLl0qh1L6/27jtv54Xc/91zl4Kges3adXKEednkPTd0l4Zmtbf/4sm/l6+SCy4bIO+/NFKqVc1fuf2ZWTl+bu875MNpo7zAPNONkDtTQXM8IbcFREo4FyDkdk7MABYFCLktYlLKuQAht3NiBrAsQMhtGZRyTgUIuZ3yUtyyACG3ZVDKORUg5LbD+/fyiAy9PyYmC5ZuXeKy4w4J0T9rBbmtWLlaGpmWJY/ff73st/eu0uXyu+XQg/eWHue29k7joadekudeescE1CtE/9tl6d8r5Irup8u5p53khdxfffezjBl4RcEpFw65V65aIwNHPiXvfPC5rFmzTiIRMaH5cpn44C2yz3929kLuFk0aStezWxYc37Tj5dL59BZy6klHF4Tcc7/+n1w/YGyJLC8/eY9ZiV4vYzJC7owJCbktEFIiAAFC7gCQGcKaACG3NUoKBSBAyB0AMkNYFSDktspJMccChNyOgSlvVYCQ2yonxRwLEHLbAX5kfFTm/RCVww7Jk1an5HlFgw65dcye1w2WvffYSc47rbk0bmtWWj9yp+yy4zYy4YU3ZfDoZ2TkgMvloH338M6v/YU3SmsTQPsh93f/my8j77qsAKRwyH3lrSPll98WyuBbLpFt69WR5aYHuK7ULhxyt2neyKvlbxpydzvnVNP+5PCCkPt7M8Ytgx6RWVNH2IEvoQohtwVaVnJbQKSEcwFCbufEDGBRgJDbIialnAsQcjsnZgDLAoTclkEp51SAkNspL8UtCxByWwalnFMBQu7MeT+bG5FnJ8WkVq2E9OkZlyr/vD8xjJD7hVdmer2wz+nQTJ54boY8M7q/d4HX3TlGtOXIvTdd7P1vfRnk8aZfd58L2ycVcjc9va90PesUr7WJbu99OFe6XTmwSMh98P57yo2Xnev9vo7V8JQeMvTWXqK/7rcrWbdug3S8qL+8NmGQbLd1HW/fVavXmt7c66TOFvmtUDLdCLkzFTTHE3JbQKSEcwFCbufEDGBRgJDbIialnAsQcjsnZgDLAoTclkEp51SAkNspL8UtCxByWwalnFMBQu7MeFetisiQ4VFZvToi53XKkz12z1/FrVsYIbe2FdGWJfvvvZs0Obq+nH9GC+9chj/8nLz85ofyxIgbvFD5rvufkG/m/SzHm320ZYm2K9nUSu7z+twp229TV267uot8+8MvMuKR5+XtWZ/KsNt7yzFHHOS1K9E+3UNv7e21Lxn75EsybsJ0eXPiYNPCZUNByL3nbjvK6RfdLFvX21JuvfICicWicuewx2XBn4vk4fuuyWwy/jmakNsCIyG3BURKOBcg5HZOzAAWBQi5LWJSyrkAIbdzYgawLEDIbRmUck4FCLmd8lLcsgAht2VQyjkVIOTOjFdXcOtK7v33T0jHdvEixcIIufUEel0/VF5/9+Miq6WXmj7cfW8eIZ9++b1sXXdLubrnWfLX4mVyx9Dxcsn5bb2AelMh9+eml3Y/sxr8tz/+MiF2/gsmxzw+Vaa/8b6MvudK6T/wYWnborHMnP2FfPTZN7LDtnWl36XnyBHmpZO6atxfya0h968LFsptgx+T2Z9+bVa9V/b2ua53J6lbZ/PMJoOQ24qfV4SQ254lldwJEHK7s6WyfQFCbvumVHQnQMjtzpbKbgQIud24UtWNACG3G1equhEg5HbjSlU3AoTc6btqD27txV21akIu65UnNWoksiLkTv+KyseRrOS2MI+E3BYQKeFcgJDbOTEDWBQg5LaISSnnAoTczokZwLIAIbdlUMo5FSDkdspLccsChNyWQSnnVICQOz3edevEtCmJyfLlEWnbOi71DyoacGvVsFZyp3dF5ecoQm4Lc0nIbQGREs4FCLmdEzOARQFCbouYlHIuQMjtnJgBLAsQclsGpZxTAUJup7wUtyxAyG0ZlHJOBQi50+N9cVpU3v8w6vXg1l7cJW2E3OnZZnoUIXemguZ4Qm4LiJRwLkDI7ZyYASwKEHJbxKSUcwFCbufEDGBZgJDbMijlnAoQcjvlpbhlAUJuy6CUcypAyJ0676/zIzJ6bEwqVRK5tFdcatfaeBW3ViXkTt3WxhGE3BYUCbktIFLCuQAht3NiBrAoQMhtEZNSzgUIuZ0TM4BlAUJuy6CUcypAyO2Ul+KWBQi5LYNSzqkAIXdqvHHzbslhI2OyeHFEWpyUJ0c2LHkVNyF3aq429ybktqBJyG0BkRLOBQi5nRMzgEUBQm6LmJRyLkDI7ZyYASwLEHJbBqWcUwFCbqe8FLcsQMhtGZRyTgUIuVPjnfFmVN56OyrbbZOQ7t3iEomUfjwruVOztbU3IbcFSUJuC4iUcC5AyO2cmAEsChByW8SklHMBQm7nxAxgWYCQ2zIo5ZwKEHI75aW4ZQFCbsuglHMqQMidPO/CPyMyfHTMO6CnCbjrbV1ymxK/IiF38rY29yTktqBJyG0BkRLOBQi5nRMzgEUBQm6LmJRyLkDI7ZyYASwLEHJbBqWcUwFCbqe8FLcsQMhtGZRyTgUIuZPjTZg8+wETcC/4IyLHHZMnTY4rvU0JIXdypq72IuS2IEvIbQGREs4FCLmdEzOARQFCbouYlHIuQMjtnJgBLAsQclsGpZxTAUJup7wUtyxAyG0ZlHJOBQi5k+OdOSsq01+NSp06Cel1cVxi0bKPYyV32UYu9iDktqBKyG0BkRLOBQi5nRMzgEUBQm6LmJRyLkDI7ZyYASwLEHJbBqWcUwFCbqe8FLcsQMhtGZRyTgUIucvmXbIsIsOGx2TDBpFuXeKy4w6bblPiVyTkLtvWxR6E3BZUCbktIFLCuQAht3NiBrAoQMhtEZNSzgUIuZ0TM4BlAUJuy6CUcypAyO2Ul+KWBQi5LYNSzqkAIXfZvGPHxeSnnyPS8PA8OaV52W1KCLk3Nn3wiRflqckzZN36DdL0mEPlut5nS6VYfn9z2xshtwVRQm4LiJRwLkDI7ZyYASwKEHJbxKSUcwFCbufEDGBZgJDbMijlnAoQcjvlpbhlAUJuy6CUcypAyL1p3o8/icjkKTGpVSshfXrGpUqV5KeDldz5Vh/M+VpuGviQPDasn9SoXlV6XT9UTmh0iJzV9oTkMVPYk5A7BazSdiXktoBICecChNzOiRnAogAht0VMSjkXIOR2TswAlgUIuS2DUs6pACG3U16KWxYg5LYMSjmnAoTcpfOuWBmRIfdHZe3aiJzXKU/22D35VdxalZA73/bW+x6VbbeuI13Pbun97zdmzpFxT0+XR4Zc6+TeJuS2wErIbQGREs4FCLmdEzOARQFCbouYlHIuQMjtnJgBLAsQclsGpZxTAUJup7wUtyxAyG0ZlHJOBQi5S+d9+pmofPFVVA7cPyEd2sVTnoewQu6160R+/Cm5vuEpX9QmDqhaVWTXnSMb7dGl791yRusmcqJpU6LbDz8vkPMvvUvemjTE5vAFtQi5LbAScltApIRzAUJu58QMYFGAkNsiJqWcCxByOydmAMsChNyWQSnnVICQ2ykvxS0LEHJbBqWcUwFC7pJ5v/kuKo8/GZXq1bVNSZ7UqJF6aBxWyP3rbwnpP8C8JTPgbcftI9L/6kobjXp2z9uk+7mnSuOGB3q/t+CPRdLmguvl/RdHOjlDQm4LrITcFhAp4VyAkNs5MQNYFCDktohJKecChNzOiRnAsgAht2VQyjkVIOR2yktxywKE3JZBKedUgJB7Y961a0ybkuFR0XYl7dvG5aADUg+4tWpYIffCRSLjngg+5K63VUQ6n7XxyyQvvOIeOa3lcXLScYd52PN+nC/6a288O9jJvU3IbYGVkNsCIiWcCxByOydmAIsChNwWMSnlXICQ2zkxA1gWIOS2DEo5pwKE3E55KW5ZgJDbMijlnAoQcm/M+/zUmHz0ccTrwa29uNPdwgq50z1fV8fdMXS81N6splxyQVtviKmvzpLJL78rDw680smQhNwWWAm5LSBSwrkAIbdzYgawKEDIbRGTUs4FCLmdEzOAZQFCbsuglHMqQMjtlJfilgUIuS2DUs6pACF3Ud6ffo7I2HExqVJZpPclcaldK71V3FqVkDvf9uPPv5Wrbn1Axg+/XmpWrybao/vMNidI2xaNndzbhNwWWAm5LSBSwrkAIbdzYgawKEDIbRGTUs4FCLmdEzOAZQFCbsuglHMqQMjtlJfilgUIuS2DUs6pACH3v7xx827JIcNjsnRpRE5pnicND09/FTchd9Hb9uGnpsljE1+ReDxPTj7hCLmyxxmi//3kYiPktqBKyG0BkRLOBQi5nRMzgEUBQm6LmJRyLkDI7ZyYASwLEHJbBqWcUwFCbqe8FLcsQMhtGZRyTgUIuf/lfeW1qLw7Myo77JCQbhfEJZJhBstKbqe3bqnFCbktuBNyW0CkhHMBQm7nxAxgUYCQ2yImpZwLEHI7J2YAywKE3JZBKedUgJDbKS/FLQsQclsGpZxTAULufN4Fv0fkgTExL9judXFctqqTfpsSf8IIuZ3euoTcLnkJuV3qUtuWACG3LUnqBCFAyB2EMmPYEiDktiVJnaAECLmDkmYcGwKE3DYUqRGUACF3UNKMY0OAkFskz3QlGT4qJgsXRqTJsXlynPnLxkbIbUMx9Rqs5E7dbKMjCLktIFLCuQAht3NiBrAoQMhtEZNSzgUIuZ0TM4BlAUJuy6CUcypAyO2Ul+KWBQi5LYNSzqkAIbfI2+9G5bXXo1KvXkIuvigusagdckJuO46pViHkTlWshP0JuS0gUsK5ACG3c2IGsChAyG0Rk1LOBQi5nRMzgGUBQm7LoJRzKkDI7ZSX4pYFCLktg1LOqUBFD7kXLY7IsJExyTMvnezRLS7bbZt5mxJ/wgi5nd66pRYn5LbgTshtAZESzgUIuZ0TM4BFAUJui5iUci5AyO2cmAEsCxByWwalnFMBQm6nvBS3LEDIbRmUck4FKnrIPWpsTObPj8iRDfOkxUl22pQQcju9ZcssTshdJlHZOxByl23EHuELEHKHPwecQfIChNzJW7Fn+AKE3OHPAWeQmgAhd2pe7B2uACF3uP6MnpoAIXdqXuwdrkBFDrk/+CgqU1+MSq1aCbm0V1wqV7I7F6zktuuZbDVC7mSlNrEfIbcFREo4FyDkdk7MABYFCLktYlLKuQAht3NiBrAsQMhtGZRyTgUIuZ3yUtyyACG3ZVDKORWoqCH3ipURGTw0JuvWi5zXKU/22N3uKm6dNEJup7duqcUJuS24E3JbQKSEcwFCbufEDGBRgJDbIialnAsQcjsnZgDLAoTclkEp51SAkNspL8UtCxByWwalnFOBihpyj38yKt9+F5WDDkxI+zamIbeDjZDbAWoSJQm5k0AqaxdC7rKE+P1sECDkzoZZ4BySFSDkTlaK/bJBgJA7G2aBc0hFgJA7FS32DVuAkDvsGWD8VAQIuVPRYt+wBSpiyD13bkQmTIpJ9eoJubxXnlStZu9lk4Xnk5A7nLu7Qofcv/z2p9xw90Pyzfc/y/bb1pV+fc6RBgfsWepMLF22Qk4+52rp06W9nN66ScF+hNzh3LyMmpoAIXdqXuwdrgAhd7j+jJ6aACF3al7sHb4AIXf4c8AZJC9AyJ28FXuGL0DIHf4ccAbJC1S0kHvtmogMGhaV1asjclq7uBywv5uAW2eAkDv5+9DmnhU65D6vz53SpFED6dTuRJk5+wsTeI+VV5++1zScj5VofN2dY+TDT7+RC888mZDb5l1IrUAECLkDYWYQSwKE3JYgKROIACF3IMwMYlGAkNsiJqWcCxByOydmAIsChNwWMSnlXKCihdwTJ8fk088iXg9u7cXtciPkE9rP+AAAIABJREFUdqlbeu0KG3IvWvK3ND/rKpk1dbhUiuWH2h263iRXXXymHF5/n43EPpjztQwf95zsuduO5q8dCLnDuV8ZNQMBQu4M8Dg0cAFC7sDJGTADAULuDPA4NBQBQu5Q2Bk0TQFC7jThOCwUAULuUNgZNE2BihRyz/shKo+Mj0qVyiK9L4lL7VruVnHrdBByp3lTZnhYhQ25P/78O7ll0CMy+eHbCgj73jxCGjbYVzq2Oq4I6/r1G+S0bv3l3v4Xy5PPzSDkzvCm4/BwBAi5w3Fn1PQECLnTc+OocAQIucNxZ9T0BQi507fjyOAFCLmDN2fE9AUIudO348jgBSpKyG0iPRk8LCbLl0ek5cl5cvihbldxE3IHfy/7I1bYkHvm7Lky9MGJ8tQDNxXoXz9grOy1+45y7mknFZmREeMmy/oNcelzYXu5bfBjG4Xcq9aaTwwbAlkuUCkakWg0KuvMvcyGgE2BRCIikYjdJ+HRSMS0jorK2vXcrzbnilpuBMwnQKpVMf391nG/uhHOjap55r+XzL9mc2KrUbWS8OfXnJgqTtIIVKsSM38eyJNEwu6fNcB1I+Diz4VuztRN1aqm9emGeJ7EuV/dAFPVqoAuLMozP1r1ni3P26QpCXnzXZFddhLp2ysSyKXqn7XYgheosCH3nLnfyU0Dx8mUcbcXqF/ef7gcdej+0qHlsQW/9uMvv0ufG4fJhFH9par5XkNJIffSleuDnzlGRCBFAf0XWKVYlP+oTdGN3ZMQ0P/mtPxnBX0oU72qedq+moeIScwAu4QsYJ7JSO3qlWXZKv48EPJUhDq85hl6L+TCtkXNysKfX3NhpjhHFahdo7KsXL3ehIZ45ISAgz8X5sR1/3OSNatVknVmkcZ6bthcmrYKe641zENEfSCjDxLL6/bzryJDR4hol+IrLo1Iva2C+ZeJ/lmLLXiBChtyL1m2XJp27CvvPn+/VK9WxZNvcfZVcse1XaX+/nsWzMS4CdNl1KNTpHLl/KcwK1etMR+OqJzVtqlc2rWD92u/LVod/MwxIgIpCtCuJEUwdg9VgHYlofIzeIoCtCtJEYzdQxegXUnoU8AJpCBAu5IUsNg1dAHalYQ+BZxACgLlvV2JLlAfNiImixdHpMnxeXJc4+DCfHpyp3AjWty1wobcatjl8rvlsIP3ka5nt5Rpb7zvtS+Z9vjdXog99bVZcoTpz123zuZFuEtayU3IbfGOpJQzAUJuZ7QUdiBAyO0AlZLOBAi5ndFS2JEAIbcjWMo6ESDkdsJKUUcChNyOYCnrRKC8h9xvvBUV/atevYRcfFFcTNQX2EbIHRh1kYEqdMg9//e/5Lo7x8g3836RnbbfWvr37Sz77b2rB3RM294y+JZLpMEBexFyh3NvMqplAUJuy6CUcypAyO2Ul+KWBQi5LYNSzrkAIbdzYgawKEDIbRGTUs4FCLmdEzOARYHyHHIvMqu3dRW3tpPr3jUu220bTJsSf3oIuS3eqCmUqtAhdwpOm9yVldy2JKnjUoCQ26UutW0LEHLbFqWeSwFCbpe61HYhQMjtQpWargQIuV3JUteFACG3C1VquhIoryG3BtujH4rJ/PkROeqIPGneLLg2JYTcru7W5OoScifnRMhtwYkS4QoQcofrz+ipCRByp+bF3uEKEHKH68/oqQsQcqduxhHhCRByh2fPyKkLEHKnbsYR4QmU15B71vtRmfZyVLbYIiG9Lo7LP6/YCxSaldyBchcMRshtwZ2V3BYQKeFcgJDbOTEDWBQg5LaISSnnAoTczokZwLIAIbdlUMo5FSDkdspLccsChNyWQSnnVKA8htx/L4/I4GEx2bDBvIevc1x22TnYNiX+hBFyO711Sy1OyG3BnZDbAiIlnAsQcjsnZgCLAoTcFjEp5VyAkNs5MQNYFiDktgxKOacChNxOeSluWYCQ2zIo5ZwKlMeQ+5HxUZn3Q1TqH5yQtqfGnfptqjghdzj0hNwW3Am5LSBSwrkAIbdzYgawKEDIbRGTUs4FCLmdEzOAZQFCbsuglHMqQMjtlJfilgUIuS2DUs6pQHkLued8GpHnno/JZjUT0qdnnlStFs4qbp00Qm6nt26pxQm5LbgTcltApIRzAUJu58QMYFGAkNsiJqWcCxByOydmAMsChNyWQSnnVICQ2ykvxS0LEHJbBqWcU4HyFHKvWhWR+4ZFZe3aiHTskCf77xv8yyYLTxYht9Nbl5DbJS8ht0tdatsSIOS2JUmdIAQIuYNQZgxbAoTctiSpE5QAIXdQ0oxjQ4CQ24YiNYISIOQOSppxbAiUp5B7wsSYzP0iInvtmSedzgw34Na5IeS2cYemXoOV3KmbbXQEIbcFREo4FyDkdk7MABYFCLktYlLKuQAht3NiBrAsQMhtGZRyTgUIuZ3yUtyyACG3ZVDKORUoLyG39uDWXtxVKotc2jvutSsJeyPkDmcGCLktuBNyW0CkhHMBQm7nxAxgUYCQ2yImpZwLEHI7J2YAywKE3JZBKedUgJDbKS/FLQsQclsGpZxTgfIQcq9bJzJkeEyWL49Iq1Pictgh4QfcOmmE3E5v3VKLE3JbcCfktoBICecChNzOiRnAogAht0VMSjkXIOR2TswAlgUIuS2DUs6pACG3U16KWxYg5LYMSjmnAuUh5J7yYlRmfxSVXXZOSJfOcadeqRQn5E5Fy96+hNwWLAm5LSBSwrkAIbdzYgawKEDIbRGTUs4FCLmdEzOAZQFCbsuglHMqQMjtlJfilgUIuS2DUs6pQK6H3L/Oj8josTGJxkT6XBKXLTfPjlXcOmmE3E5v3VKLE3JbcCfktoBICecChNzOiRnAogAht0VMSjkXIOR2TswAlgUIuS2DUs6pACG3U16KWxYg5LYMSjmnArkccsfNou1hI2OyeHFEmjbJk2Mahf+yycKTRcjt9NYl5HbJS8jtUpfatgQIuW1JUicIAULuIJQZw5YAIbctSeoEJUDIHZQ049gQIOS2oUiNoAQIuYOSZhwbArkccr/2elTefjcq9eolpOdFcYlGbYjYq0HIbc8ylUqs5E5Fq5R9CbktIFLCuQAht3NiBrAoQMhtEZNSzgUIuZ0TM4BlAUJuy6CUcypAyO2Ul+KWBQi5LYNSzqlArobcC/+MyP2jTI8Ss3XvGpftts2eNiX+hBFyO711Sy1OyG3BnZDbAiIlnAsQcjsnZgCLAoTcFjEp5VyAkNs5MQNYFiDktgxKOacChNxOeSluWYCQ2zIo5ZwK5GLInWe6kowaE5MFf0Sk0VF50qxpdrUpIeR2esuWWZyQu0yisncg5C7biD3CFyDkDn8OOIPkBQi5k7diz/AFCLnDnwPOIDUBQu7UvNg7XAFC7nD9GT01AULu1LzYO1yBXAy5350ZlVdei8oWWySkT8+4xPIXdGfdxkrucKYklJD72x9+ld123k4qV8rSuzHFuSDkThGM3UMRIOQOhZ1B0xQg5E4TjsNCESDkDoWdQTMQIOTOAI9DAxcg5A6cnAEzECDkzgCPQwMXyLWQe8myiAy5PyZ55qWTXTrHZZeds69NiT+JhNyB387egKGE3A2adZVpj98t29TbMpyrtjwqIbdlUMo5ESDkdsJKUUcChNyOYCnrRICQ2wkrRR0KEHI7xKW0dQFCbuukFHQoQMjtEJfS1gVyLeQeOy4mP/0ckUMaJKR1S5N0Z/FGyB3O5IQSco95fKr89vtf0v3c1uUi6CbkDufmZdTUBAi5U/Ni73AFCLnD9Wf01AQIuVPzYu/wBQi5w58DziB5AULu5K3YM3wBQu7w54AzSF4gl0Lu2R9FZMqLMdmsprYpyZOq1bJ3FbfOACF38vehzT1DCbmbn3WVLFm2XFasXO21LKlUrG3J7OmjbV6j81qE3M6JGcCCACG3BURKBCZAyB0YNQNZECDktoBIiUAFCLkD5WawDAUIuTME5PBABQi5A+VmsAwFciXkXrFS25REZe3aiJx9Zp7svWd2vmyy8HQQcmd4c6Z5eCgh9+vvzTHhdqVST7lxwwPSvJxwDiPkDsedUVMTIOROzYu9wxUg5A7Xn9FTEyDkTs2LvcMXIOQOfw44g+QFCLmTt2LP8AUIucOfA84geYFcCbnHPxmVb7+Lyr775MkZHbM/4NYZIORO/j60uWcoIbd/AevXb5A//loiO25Xz+Y1BV6LkDtwcgZMQ4CQOw00DglNgJA7NHoGTkOAkDsNNA4JVYCQO1R+Bk9RgJA7RTB2D1WAkDtUfgZPUSAXQu65X0ZlwrNRqVrVtCm5JM9rV5ILGyF3OLMUSsi9es06uW3wozL11VmyIR6XL94cJ4uXLpe+Nw+Xe27oIXXrbB6ORpqjEnKnCcdhgQoQcgfKzWAZChByZwjI4YEKEHIHys1gFgQIuS0gUiIwAULuwKgZyIIAIbcFREoEJpDtIffaNaZNyfCoaXUc8V40qS+czJWNkDucmQol5L5+wFhZuGipXNy5jZx18a1eyL1q9Vq55b5HZMOGuAy8sUc4GmmOSsidJhyHBSpAyB0oN4NlKEDInSEghwcqQMgdKDeDWRAg5LaASInABAi5A6NmIAsChNwWECkRmEC2h9zPTYnJnE8issvOCenSOR6Yi42BCLltKKZeI5SQu+EpPWT6E3fLlpvXkv2O6+yF3Lr9vWKVnNLpanln8rDUryTEIwi5Q8Rn6KQFCLmTpmLHLBAg5M6CSeAUkhYg5E6aih2zRICQO0smgtNISoCQOykmdsoSAULuLJkITiMpgWwOuX/6OSJjx8VEX+fXq2fc5Ie5s4pb8Qm5k7oFre8USsh9VKue8sbEwVK1SuUiIbe2LGl2Rl+ZPX209Qt1WZCQ26UutW0JEHLbkqROEAKE3EEoM4YtAUJuW5LUCUqAkDsoacaxIUDIbUORGkEJEHIHJc04NgSyNeQ2r++TYSNisnRpRJo1zZNGR+XGyyYLzwkht407NPUaoYTcvfoNke23rSuXdTtNDjmpm7eSe8Efi+S2IY9JXl5CRt51WepXEuIRhNwh4jN00gKE3ElTsWMWCBByZ8EkcApJCxByJ03FjlkiQMidJRPBaSQlQMidFBM7ZYkAIXeWTASnkZRAtobc016NyqxZUdlum4Rc1DUu0WhSl5NVOxFyhzMdoYTcC/5cLN2uuEd+nv+n9+LJWpvVkOWmVckB/91dBt10sReA59JGyJ1Ls1Vxz5WQu+LOfS5eOSF3Ls5axT1nQu6KO/e5euWE3Lk6cxXzvAm5K+a85+pVE3Ln6sxVzPPOxpB7we8RGTk6JpGIyCUXxaXe1rnVpsS/kwi5w/lMhRJy66XG43nyyRffm6D7D6lWtYrsvMM2st/eu4ajkOGohNwZAnJ4IAKE3IEwM4glAUJuS5CUCUSAkDsQZgaxKEDIbRGTUs4FCLmdEzOARQFCbouYlHIukG0ht4kJZcSomCxcGJFjGuVJ0ya516aEkNv5bbvJAUIJuc/ofrO0anaUtGhyhNTZola4AhZGJ+S2gEgJ5wKE3M6JGcCiACG3RUxKORcg5HZOzACWBQi5LYNSzqkAIbdTXopbFiDktgxKOacC2RZyv/V2VGa8GZU6dRLSq0dcYjGnl++0OCu5nfKWWjyUkHvko8/Lq2/Nlu9/nC9HH3aAtDrxKGnSqL63ojsXN0LuXJy1infOhNwVb85z+YoJuXN59ireuRNyV7w5z/UrJuTO9RmsWOdPyF2x5jvXr5aQO9dnsGKdfzaF3IsWR2TYyJjkxUW6dYnLjjvkZpsS/w4i5A7nsxRKyO1fqvbkfvXt2Sbw/lB++HmBNG18iJza7Ghp2OC/pv+OacCTIxshd45MVAU/TULuCn4D5NjlE3Ln2IRV8NMl5K7gN0AOXj4hdw5OWgU+ZULuCjz5OXjphNw5OGkV+JSzJeROmDx79EMxmT8/Ioc1yJNWLXO3TQkhd7gfqFBDbv/S12+Iy3MvvS33jpogK1aulh23qyddz24p7U85JifCbkLucG9iRk9OgJA7OSf2yg4BQu7smAfOIjkBQu7knNgrewQIubNnLjiTsgUIucs2Yo/sESDkzp654EzKFsiWkPv9D6Ly4vSobFYzIZf2ikuV3GzyUAScldxl338u9ggt5E6YRzUfffatvPDqTHnlzQ/NTVxZWjY9Ulo3byTzf18odw9/Upoc3UCuvPgMF9dttSYht1VOijkSIOR2BEtZJwKE3E5YKepIgJDbESxlnQkQcjujpbADAUJuB6iUdCZAyO2MlsIOBLIh5P57eUSG3h+TdetFzj4zT/beM/dXcetUEXI7uGGTKBlKyD14zLMy9bVZ8tfiZSbIri+tT2okjQ4/wDSVjxacsrYv0RdUfvDSA0lcRri7EHKH68/oyQkQcifnxF7ZIUDInR3zwFkkJ0DInZwTe2WPACF39swFZ1K2ACF32UbskT0ChNzZMxecSdkC2RByPzI+KvN+iMp+++bJ6R3KR8BNyF32vedqj1BC7jN63CJtzIrtFk0ayua1aha5tvXrN0jlypUkHs+TIQ8+K5df1NHVtVurS8htjZJCDgUIuR3iUtq6ACG3dVIKOhQg5HaIS2knAoTcTlgp6kiAkNsRLGWdCBByO2GlqCOBsEPuTz+PyMTnYlK1akIu65UnNWrk9ssmC08TK7kd3bRllA0l5C7tnLQfd7Mzr5CZU4aHo5HmqITcacJxWKAChNyBcjNYhgKE3BkCcnigAoTcgXIzmAUBQm4LiJQITICQOzBqBrIgQMhtAZESgQmEGXKvWhWRIcOjsnp1RNqeGpf6B5efgFsnkJA7sNu4yEChhNy//Pan3D5kvHz57Y+yVhvv/LOtXrNW9thle3nuodvC0UhzVELuNOE4LFABQu5AuRksQwFC7gwBOTxQAULuQLkZzIIAIbcFREoEJkDIHRg1A1kQIOS2gEiJwATCDLmfnRSTz+ZGZJedE9Klczywaw5qIELuoKSLjhNKyH3hFfdIrZrVTbuSI+TGex6SO665UD79cp73Ispht/eWLTevFY5GmqMScqcJx2GBChByB8rNYBkKEHJnCMjhgQoQcgfKzWAWBAi5LSBSIjABQu7AqBnIggAhtwVESgQmEFbIrT24tRd3pUoivXrGTQZYvlZx6wQScgd2GxcZKJSQ+7AWF8mbE4dIzRrVpHGbXvLO5GHeSb3y1mx5c+YcueParuFopDkqIXeacBwWqAAhd6DcDJahACF3hoAcHqgAIXeg3AxmQYCQ2wIiJQITIOQOjJqBLAgQcltApERgAmGE3OvWiWlTEpPlyyPS/MQ8OerI8vOyycITR8gd2G0cfsh9ZMuL5eWnBkrtzWrIse36yEvjB3iB9/oNcTm2bW+Z+QI9ucO5HRi1PAsQcpfn2S1/10bIXf7mtDxfESF3eZ7d8nlthNzlc17L61URcpfXmS2f10XIXT7ntbxeVRgh99RpUfngw6hst01CuneLSyRSPnUJucOZ11BWcve6fqisNC+ZHHpbb7ny1gfMzb2VdGp/onys7UoemiRvTRoSjkaao7KSO004DgtUgJA7UG4Gy1CAkDtDQA4PVICQO1BuBrMgQMhtAZESgQkQcgdGzUAWBAi5LSBSIjCBoEPuX+dHZPTYmESjIj1NwF1v6/LXpsSfPELuwG7jIgOFEnL/tXiZ3Dnscbn5ivPl9z8XS/er75UF5u9Vq1SWGy8/T9o0bxSORpqjEnKnCcdhgQoQcgfKzWAZChByZwjI4YEKEHIHys1gFgQIuS0gUiIwAULuwKgZyIIAIbcFREoEJhBkyB0375YcNjImixdH5NjGeXLC8eWzTQkhd2C3b4kDhRJyFz+TDeZu/2PhEtlqy9pSrWqVcEXSGJ2QOw00DglcgJA7cHIGzECAkDsDPA4NXICQO3ByBsxQgJA7Q0AOD1SAkDtQbgbLUICQO0NADg9UIMiQe8YbUXnrnajUqZOQXj3iEosFeqmBD8ZK7sDJvQGzIuT2L/3Pv5bKhVfcI1PG3R6ORpqjEnKnCcdhgQoQcgfKzWAZChByZwjI4YEKEHIHys1gFgQIuS0gUiIwAULuwKgZyIIAIbcFREoEJhBUyL3wz4gMHx2TPLN4u1uXuOy4Q/ltU+JPHiF3YLdxkYGyKuSe//tf0uyMK+SLN8eFo5HmqITcacJxWKAChNyBcjNYhgKE3BkCcnigAoTcgXIzmAUBQm4LiJQITICQOzBqBrIgQMhtAZESgQkEEXJrsD1qTEwW/BGRww/Nk5Ynl+82JYTcgd2+JQ5EyG3Bn5DbAiIlnAsQcjsnZgCLAoTcFjEp5VyAkNs5MQNYFiDktgxKOacChNxOeSluWYCQ2zIo5ZwKBBFyz5wVlemvRqVWrYT06RmXKrnXoTitOWAld1psGR9EyJ0xoQghtwVESjgXIOR2TswAFgUIuS1iUsq5ACG3c2IGsCxAyG0ZlHJOBQi5nfJS3LIAIbdlUMo5FXAdci9ZFpFhw2OyYYPIeZ3yZI/dK8Yqbp00Qm6nt26pxQm5LbgTcltApIRzAUJu58QMYFGAkNsiJqWcCxByOydmAMsChNyWQSnnVICQ2ykvxS0LEHJbBqWcUwHXIffYcTH56eeI7L9/Qjq2izu9lmwrTsgdzowEGnIf2rzbJq8yYXrPr1m7jp7c4dwLjFrOBQi5y/kEl7PLI+QuZxNazi+HkLucT3A5vDxC7nI4qeX4kgi5y/HklsNLI+Quh5Naji/JZcj98ZyITH4hJtWra5uSPKlRo/y/bLLwrULIHc4HJ9CQ+6UZ7yd1lSef0DCp/bJlJ1ZyZ8tMcB6bEiDk5v7IJQFC7lyaLc6VkJt7INcECLlzbcYq9vkSclfs+c+1qyfkzrUZq9jn6yrkXrEyIkPuj8ratRFp1yYuBx9YsQJuvasIucP5bAUacodziaWP+stvf8oNdz8k33z/s2y/bV3p1+ccaXDAnhsdMO+n36T/wHHyzbyfZZu6W8oVPc6QY488qGA/Qu5sm1nOpyQBQm7ui1wSIOTOpdniXAm5uQdyTYCQO9dmrGKfLyF3xZ7/XLt6Qu5cm7GKfb6uQu6nnonKl19FvR7c2ou7Im6E3OHMeoUOuc/rc6c0adRAOrU7UWbO/sIE3mPl1afvlcqVYkVm49TO/aT9KcfIuR2ayXsfzpXLbrpf3n5umFSvlv9aWELucG5eRk1NgJA7NS/2DleAkDtcf0ZPTYCQOzUv9g5fgJA7/DngDJIXIORO3oo9wxcg5A5/DjiD5AVchNzffBeVx5+MSqVKIpf2ikvtWhVvFbfOACF38vehzT0rbMi9aMnf0vysq2TW1OFSKZYfanfoepNcdfGZcnj9fQqMN8TjMunFt6XtyccUhN+Hn9xdnh1zs+y8wzaE3DbvRmo5FSDkdspLccsChNyWQSnnVICQ2ykvxR0IEHI7QKWkMwFCbme0FHYgQMjtAJWSzgRsh9xr15g2JcOjou1KWpyUJ0c2rJiruAm5nd2yZRausCH3x59/J7cMekQmP3xbAVLfm0dIwwb7SsdWx5UK9/lXP0ifG4fJK08NLAjHf1+8pkxodkAgbIHqVaNSxXxLYdnK9WGfCuOXM4GEJCRi/s/mVrlSRGrVqCyL/15nsyy1EHAiEIlGpF7tKvLn0rVO6lM0RwQiZqVSwu7PQldXvm2dasKfX13pUte2QN3Nq8qSFeskHq+YqwFte7qu5+LPhemcs1mrJuvXR2T9BpEN/l/m1zaY/xTK/9+R/L+b+2qd92sRievfveMSRY/z9jW/5v2++ed/9tPjvfr+/zbj6fG77pyQ1qcmZKstuWfTmTuOCU6gds1K3mdg1Rpz41rYJr8QldkfR2THHRJyUZc8ieTGH4ssXPnGJfTPWmzBC1TYkHvm7Lky9MGJ8tQDNxWoXz9grOy1+45y7mknlTgTvy5YKF2vGGh6d3eSRocfULBPXoJ/eQV/6zJiqgJeCGn+P8H9miod+5choH8wqhSz+ycYvV/1D0X8fOX2ywUBvfsj5oblfs2F2XJ3jhqAVDIP6HJhi3K/5sI0cY7/COj9qn9+5b+4cuOW8P9cmGcWcGoArMGw/5cfFHu/boJj7+/r8gNlb5/Cf1+XVxAgawid//v5QbN/fH5gnR9SFz5ew+ewN+2A2vyEiJzcLOa1bWBDIBsF/IVKNn7CfjcvIfcMi0s0KnLztZVkm3rZeMXBnZP+u4steIEKG3LPmfud3GReJjll3O0F6pf3Hy5HHbq/dGh57EYz8c28X6TPDcPk6kvOlOOPql/k9+nJHfyNy4ipC9CuJHUzjghPgHYl4dkzcuoCtCtJ3YwjwhWgXUm4/oyemgDtSlLzSnbv3xZEZKVpKaBB8Xpd4Vx4RbIG0OvzClY8F6yGLrSCOe6H1Hqs+eKdrmD2V0hr4JwNW9WqCdNyVLyQOVYpf1FGpcryz6/l/17MPJysZH7P369SZf3n/H7CRX5dj/f2NceYEK+0rVqVSjL9tbj88GN+wLXFFmZVd8uE9wI+NgSyTcBWuxL97A8dEZOlSyNy/HEJOf4YOyvDs80rlfOhJ3cqWvb2rbAh95Jly6Vpx77y7vP3F7xAssXZV8kd13aV+vvvWUT4l9/+9FZw6+81OKDo7+mOhNz2bkgquRMg5HZnS2X7AoTc9k2p6E6AkNudLZXdCBByu3GlqhsBQm67rqtXR2TqtIh8PncTSa2lIf2gWP9e2axs1gDZD4o1QM7//fy/vJDZBNC6AtoLl6uYoDn27z5+CF1Z9ykIrf859p/gWl+1lR9SW7qANMr4Pbk/mJOQadOjsnJVfti97z550vLkhGy2Gd9JSIOVQxwJ2Aq5X341Iu/NislWW+VJn5480NHpIuR2dNOWUbbChtzq0uXyu+Wwg/eRrme3lGlvvO+1L5n2+N0SM49mp742S44w/bnr1tlcOl96l5zZpomcdNzhJXIScodz8zJqagKE3Kl5sXe4AoTc4fozemoChNypebFacF0xAAAgAElEQVR3+AKE3OHPAWeQvAAhd/JWZe351VdRmfKiWcH9T/C6i+kdXc2sdvaCZhMUa0isAXFlDaP1nyvriuaEWe2s/5wfPutq6H//+Z9gWlc4ayCtYbM5rmq1ihvkFn7x5BrzEr5XZkRk9kf5DxSqVElI0yYJOeJwQsCy7lV+PxgBGyG3fivkgTHmg2+2bl3iXj9uNkLusO6BCh1yz//9L7nuzjGirUh22n5r6d+3s+y3967eXBzTtrcMvuUS2brulnLSmVeaf5EXfRw88MYe0rTxId6+hNxh3b6Mm4oAIXcqWuwbtgAhd9gzwPipCBByp6LFvtkgQMidDbPAOSQrQMidrFTp+2moPdWE21+YkFu32rUT0qFNQnbdlbA1c92iFQqH3P7vaAj43PNR+ePP/FXd22ydkLat82T77QgDbftTLzWBTENu7b0/fFRMFi6MSEPz8OaU5vxM8WeAldyp3Yu29q7QIbctREJuW5LUcSlAyO1Sl9q2BQi5bYtSz6UAIbdLXWq7ECDkdqFKTVcChNyZyc79wgTcL0VllWlTotthh+TJSc3ypIpZdc1mX6CkkFtHMe9OlfdnR2WGWdm9dl3+C9YPqZ8nzU7MX03PhkAYApmG3G+/G5XXXo9KrVoJubRXPNRWQWH4bWpMQu5wZoSQ24I7IbcFREo4FyDkdk7MABYFCLktYlLKuQAht3NiBrAsQMhtGZRyTgUIudPjXWFeKvn8CxH55tv81dv6AsQObfNk550IVNMTTe6o0kJu/+gVKyLy0stR0YcPutWskZDmzRJy0IGsgE1OmL1sCmQSci9aHJFhI2OSZ94xeV6nPF6uWmxiCLlt3qnJ1yLkTt6q1D0JuS0gUsK5ACG3c2IGsChAyG0Rk1LOBQi5nRMzgGUBQm7LoJRzKkDInTrvp59FTZAaEX3JpK4YPvxQXTGcxyrL1ClTPqKskNsv+MP/ot5DiCVL88PunXbUFiYJqWte3MeGQFACmYTco8bGZP78iBx0QELatzVJN1sRAULucG4IQm4L7oTcFhAp4VyAkNs5MQNYFCDktohJKecChNzOiRnAsgAht2VQyjkVIOROnldXb0+aHJHv5+Wv3q6zZULatWH1dvKCme+ZbMitI8VNLvjOezF5652I989RM21HHxWX44/JfxkoGwKuBdINuT8wL1Od+mJUqldPSJ+eeVLDfCOBragAIXc4dwQhtwV3Qm4LiJRwLkDI7ZyYASwKEHJbxKSUcwFCbufEDGBZgJDbMijlnAoQcifH+/GnUZlmVm+vXZO/evvIhnnStEkeYWlyfNb2SiXk9gddsiwik5+PyP9+/PfFoK1bJWTPPVjVbW1iKFSiQDoh99/LIzL0/pisWy/SoV1cDtyfgLskXELucD50hNwW3Am5LSBSwrkAIbdzYgawKEDIbRGTUs4FCLmdEzOAZQFCbsuglHMqQMi9ad7lJnCaaFZva/sL3bYy7S7at0nIjjsQPDm9MUspnk7I7Zf64suIvDg9Ktq3W7d99smTVi0S3kv92BBwIZBOyP3I+KjM+yHq9eDWXtxsJQsQcodzZxByW3An5LaASAnnAoTczokZwKIAIbdFTEo5FyDkdk7MAJYFCLktg1LOqQAhd+m8sz+OysuvmNXb6/JXb2urixOOS0gs5nRKKL4JgUxCbi27bp3Ia69H5f0Po5Iw2XblyiJNzJwe2TDutTNhQ8CmQKoh9+dzI/LMpJhUMfdl70viUpsHMKVOByG3zTs1+VqE3MlblbonIbcFREo4FyDkdk7MABYFCLktYlLKuQAht3NiBrAsQMhtGZRyTgUIuTfmXWbaW0ycHJUff8pf8Vuvnnnxm+m9vf12rPh1ejMmUTzTkNsf4o8/TQuTKVGZ/9s/c1w3T9qZF1PuwAr9JGaBXZIVSCXk1lZIg4ZFvRfantI8TxoeziruTTkTcid7F9rdj5DbgichtwVESjgXIOR2TswAFgUIuS1iUsq5ACG3c2IGsCxAyG0ZlHJOBQi5/+XVlb0fzo7KK69FvX64urK38dH5LyqMsnrb6X2YbHFbIbeOp/P90Ryd74isMQGjbg0OzpPmzRJSrRoPNJKdE/YrXSCVkHvS5Jh88lnEe9DS7YK49+0RttIFCLnDuTsIuS24E3JbQKSEcwFCbufEDGBRgJDbIialnAsQcjsnZgDLAoTclkEp51SAkDufV19OOHFiVH7+NT9Z2mZrs3q7bZ5suw1hp9MbMMXiNkNuf+hVqyIy/f/buw8wqarzj+PvzNKrIqCA0mxEsSGIIigCChil2k3EqNgCFmx/xRZRsUYQWyJGTDTRKE2UjmADsdcEogIKiIqAoCBtZ/7n3HGXZZVl9s77XmZ2v/d5fDRy73vufM7ZjfObM++dFpP33cNF/VG1alK6HpuUQw5KEDSWcn44fWuBdENu34Pb9+L2H6wNvDhfdqnD753trSVC7u0J2fw5IbeCKyG3AiIlzAUIuc2JGUBRgJBbEZNS5gKE3ObEDKAsQMitDEo5U4HyHnL73by+P/M0t3t702a3e9vt2O7YISkd2udLHj2aTddemOIWIXfBffgPOMaOj8mKFamJb9QwKb17JqS+a1fDgUAYgXRCbv97Z9iIPPEPue3UMSEdj6JNSTrWhNzpKOmfQ8itYErIrYBICXMBQm5zYgZQFCDkVsSklLkAIbc5MQMoCxByK4NSzlSgPIfcK1e6h7yNdX2Zl6Z2b/ue231dqFnP7eLmyE4By5Dbv+J8ly/OmZMnM1+OpT70cHm3fyilfzilf0glBwKlEUgn5J44OS5vvBkPev9ffAEfrqXrS8idrpTueYTcCp6E3AqIlDAXIOQ2J2YARQFCbkVMSpkLEHKbEzOAsgAhtzIo5UwFymPI7Xdvz34jT6a/FJP8fAl2bPsQs73rv00fXNPllnFx65C74Ab9w0fHT4jJZ66NhD9q1Uq6hwEm5Tct2GWb8SSWowLbC7mXuA/Y/vpYquH/RefnS4Pd+IAt3eVByJ2ulO55hNwKnoTcCoiUMBcg5DYnZgBFAUJuRUxKmQsQcpsTM4CyACG3MijlTAXKW8j9nWtF8dyYmHy1bMvu7ZP6JKXuLoSXpgtNqXhUIXfB7c6bH5cXJ8fEh97+2Kt5QnqemJTatQkjlaa0TJcpKeT23xoY8VCe+G+UHNE2Id278juoNIuBkLs0WnrnEnIrWBJyKyBSwlyAkNucmAEUBQi5FTEpZS5AyG1OzADKAoTcyqCUMxUoLyG337392ut58tIst3vbZUl5bvNkl05JaXc4u7dNF5hy8ahDbn/7m13bkhlu3cxxu/8Tbu1UrOD6th/l1k47WksoT2+ZK1dSyP3Sy3GZ5f6qWTMplw3MD9YVR/oChNzpW2meScitoEnIrYBICXMBQm5zYgZQFCDkVsSklLkAIbc5MQMoCxByK4NSzlSgPITcy7+Nyejx8cLd240aJeXk3gmpU4fduKaLy6D4jgi5C16G/xaAfzDlYveASn/s4nb/9+6ZlMa7s44MprpMlNxWyO1/Jz3419SHJv1+l5A93TcEOEonQMhdOi+tswm5FSQJuRUQKWEuQMhtTswAigKE3IqYlDIXIOQ2J2YAZQFCbmVQypkKlOWQ2+/YftXt3p71SkwSrve23yl5bJeEtG2ToPe26aqyK74jQ+6CV/X+B3GZPDUm635Khd0HHehaTRyXlGrVCLvtZj43K/9ayO2/VfKIC7iXfROTgw9KSp+e7pcTR6kFCLlLTaZyASG3AiMhtwIiJcwFCLnNiRlAUYCQWxGTUuYChNzmxAygLEDIrQxKOVOBshpyf+0CpNFj4/KN2zHpD7/btm/fhOxML2XT9WRdPBtCbv8a16+PyZTpMXnn3dSDKatUScpxnZNyaCs+QLFeA7lU/9dC7jlz4zJpSlyqVk3KoIEJqezWDkfpBQi5S2+mcQUht4IiIbcCIiXMBQi5zYkZQFGAkFsRk1LmAoTc5sQMoCxAyK0MSjlTgbIWcvsd2zPdzm2/g9u3AqhUUeQ4t3u7TWvCR9OFFFHxbAm5C17u0qUxGeNamCz/LhV2N9jN7cztlZBd6xNcRrQksnqY4iH3mh9iMmxEXtDn/ZQ++dKyJesk7AQScoeVy+w6Qu7M/IKrCbkVEClhLkDIbU7MAIoChNyKmJQyFyDkNidmAGUBQm5lUMqZCpSlkPurZW739ri4LF+e2r3dtInbve0Cx9rs3jZdQ1EWz7aQ2792/2HK3LfiMmNmTDZujAWtcHxLnC6dElKpUpQ6jJVtAsVD7ieejMvnC+JBD27fi5sjvAAhd3i7TK4k5M5E7+drCbkVEClhLkDIbU7MAIoChNyKmJQyFyDkNidmAGUBQm5lUMqZCpSFkDvf7d6eMSsmr8/OE9/vtnKlpHR1PZJbu9YRHGVLIBtD7gLhH3+MyYuTYvLJf1O7umvUSMpvuyVk//3YrVu2VmH6r6ZoyP3+B37Xf17w7ZJLBuRLrZqsi/Qlf3kmIXcmeuGvJeQOb1d4JSG3AiIlzAUIuc2JGUBRgJBbEZNS5gKE3ObEDKAsQMitDEo5U4FcD7mXuHYRo8fFZMWKVLDYvFnC7d5OSk0CJNN1s6OKZ3PIXWCyYGFcxk2Iyfffp75R0KxpQnr1TNIPfkctmh04bkHIvXxlvtw3Ii4bNsTkhN8m5LBD+QAu02kh5M5UMNz1hNzh3La6ipBbAZES5gKE3ObEDKAoQMitiEkpcwFCbnNiBlAWIORWBqWcqUCuhty+p61vDzH7jZ93b7uHt3XvmpRWBxEemS6YHVw8F0JuT+S/XfDyz73h892SzMsTObpDUjocmR/8M0f5ECgIuf/2VFI++SQmjRol5YJz3eLgyFiAkDtjwlAFCLlDsW19ESG3AiIlzAUIuc2JGUBRgJBbEZNS5gKE3ObEDKAsQMitDEo5U4FcDLn97u3nxsRl5arUTtm99ky4h/0lpUZ1vv5vuliyoHiuhNwFVH6Njh0fly++TK3VnXdKPZiySWPWahYsJ/Nb8CH3h/8ReeyJpMTdhxsDL8qXXeow9xrwhNwaiqWvQchderNfXEHIrYBICXMBQm5zYgZQFCDkVsSklLkAIbc5MQMoCxByK4NSzlQgl0LuTW739rTp8eAhf773dtWqSTne7d4+6EB2b5sukiwqnmshdwHdR24X76QpcfF9u/1xwP5u7XZPSPVqBJ5ZtLzUb6VqhYpyy10J+cHNe+djEm43P7+rtJAJubUkS1eHkLt0Xr96NiG3AiIlzAUIuc2JGUBRgJBbEZNS5gKE3ObEDKAsQMitDEo5U4FcCbkXuZ2wY8bFC/sct9g3IT1OYPe26eLIwuK5GnJ7yvWuH/P0l2LypvuQxh+VKyelyzFJaXsYwWcWLjWVW5o8pYLMniuya/2k/PFC2pSooP5chJBbUzP9WoTc6Vtt80xCbgVESpgLEHKbEzOAogAhtyImpcwFCLnNiRlAWYCQWxmUcqYC2R5yb9wkMnlqXN5+JxUMVnG9t084PikHtiQYNF0YWVo8l0PuAtJvvnUtTJ6Py1dfpXZ116+XlN49E9KoIbu6s3TZpX1bS5bEZNGXcVmwMClfLo7Jxo2pOb6wf740bMD8pg2ZxomE3GkgGZxCyK2ASsitgEgJcwFCbnNiBlAUIORWxKSUuQAhtzkxAygLEHIrg1LOVCCbQ+4FC+Nu93ZM1vxAiwfTRZBDxctCyF3A7dvuTJuxJQg9rHVCunROShW3w5sjNwR8kP3FFy7UXuRD7bhsch/KFT2aNRU5rLXI/vu5XkscqgKE3KqcaRcj5E6batsnEnIrIFLCXICQ25yYARQFCLkVMSllLkDIbU7MAMoChNzKoJQzFcjGkHuD2/04aUpM3n0vtXu7muu93atHUnyLEo7yLVCWQm4/k2vXpdb6hx+l1np19/DU7q7PPN9UyL51nu+6jSx2D731ofZCF2ov9qF2kew6zz1YcvdGSWnWNOH+iknjPRJSp1ZF2ZyflLXrCbm1Z5SQW1s0vXqE3Ok5lXgWIbcCIiXMBQi5zYkZQFGAkFsRk1LmAoTc5sQMoCxAyK0MSjlTgWwLuT9fEJfRbvd24QP6XFuSE7ong4dMciBQ1kLughn94uee86u+T31roUnjVAuTOjuz7nfUqg9CbRdk+13ai1ywvdi1IvH/ruCoUEFkj91TgXbTJgn3z0nxQXfRo3Z1Qm6r+SPktpItuS4ht4I7IbcCIiXMBQi5zYkZQFGAkFsRk1LmAoTc5sQMoCxAyK0MSjlTgWwJudevT+1ofe+D1I7WGm5Ha++eSdl7L3Zvmy6AHCteVkNuPw0+QH19Tp7MeiUmm93G37gLTDu0y5eOR/0yPM2xacuJ2/XmvuXIQtdP24faS9yu7fwiv34q+lDb7c5u1uznUNv1UPdzVNJByG039YTcdrYlVSbkVnAn5FZApIS5ACG3OTEDKAoQcitiUspcgJDbnJgBlAUIuZVBKWcqkA0h96efxWXseLd7e21qF+vBByXkeNeywT9kkgOBogJlOeQueJ2rV8dk/ISYfOa+1eCPnXZy7XpOTErzZnzgo/nT4FuN+DB70Rcu1F4Ul6XuQaCJIsSVKkrQcqRgp3ZD14okLzUlaR+E3GlTlfpEQu5Sk6lcQMitwEjIrYBICXMBQm5zYgZQFCDkVsSklLkAIbc5MQMoCxByK4NSzlRgR4bcfvf2C5O29COuUSMpfXslZc/mhHmmk57DxctDyF0wPf+dF5cXJ7sHr65JffizXwvXuuf4pPifE47SC2zcKPLFl6l+2gtduL1s2dahduVKSbdT23+YkNqp3bCB26ldylC7+F0Rcpd+ntK9gpA7XSnd8wi5FTwJuRUQKWEuQMhtTswAigKE3IqYlDIXIOQ2J2YAZQFCbmVQypkK7KiQe978uDz/wpbd260OSQQP3PNBEwcC2xIoTyG3N9i0SeSlWTGZMzcv2GVcyf18dO6YlLaHJTIOYMv6Klu/wT8kMuZCbb9jOybLvo5Jssivl8qVk0Hv88Kd2i7UjqU+T1A7CLnVKH9RiJDbzrakyoTcCu6E3AqIlDAXIOQ2J2YARQFCbkVMSpkLEHKbEzOAsgAhtzIo5UwFog65f/opJhNejMvH/0mlSbVqJeUkt3u7aVN2b5tOdBkpXt5C7oJp+3Z5zLX0SbXU8Ee9ugnp43rWN3ItNDhSAhvcN0MWLNoSan/z7dahtm9/5EPt5k3dgz2bJKXBbvqhdvG5IOS2W52E3Ha2JVUm5FZwJ+RWQKSEuQAhtzkxAygKEHIrYlLKXICQ25yYAZQFCLmVQSlnKhBlyO2D7RdcwL3OBd3+aHNoQroelxDf+5YDgXQEymvI7W38LmT/YNYp02LiPyzyx6Gt3M9Ql/LZv94bLPw51PZ/9x8EFD2qVnWhtguzfajd1P191/r2oXbxNUzInc5PdbhzCLnDuWV6FSF3poLuekJuBURKmAsQcpsTM4CiACG3IialzAUIuc2JGUBZgJBbGZRypgJRhNxr18Vk3PMxmf+/LQ/S69OD3dumE1tGi5fnkLtgSn0v+8lTY/Lu+6mfp2ouzO12XDJ4YGtZPta53yMLFoksXBhz7UdEln+3dcNs79CkqQu1m7hQ2/3dh9o7+iDktpsBQm4725IqE3IruBNyKyBSwlyAkNucmAEUBQi5FTEpZS5AyG1OzADKAoTcyqCUMxWwDrk//Ng9PM89XLJg52nbNgk57tiEVKxg+rIoXkYFCLm3TOzSpTEZMz5WGPbusXtSersWJnV3KRth949rXajtAu1UT22RFSu2DrVrVPc7tUWauV3aPtSuX2/Hh9rFf+wIue1+ERFy29kSchvbEnIbA1NeRYCQW4WRIhEJEHJHBM0wKgKE3CqMFIlQgJA7QmyGyljAKuT2AdWYcTH57PNUMFVn56T06ZWQxntkXxCVMSIFIhMg5N6a2j+Mcs7cuMycFZeN7iGVcffj1u6IfOl0dFIq5NgHST/8UDTUjsnKVVu3H6lRwz0k0ofaLtD24bbvS57tByG33QwRctvZEnIb2xJyGwNTXkWAkFuFkSIRCRByRwTNMCoChNwqjBSJUICQO0JshspYwCLkft/1DZ44JSa+rULM5VRHtE1Il06JnAvdMsalgLoAIfevk/qA+AX3jYn/zkt9qOQf6NrzxKTsvWf2BsGrvvdtR1I7tb9wPbVXrd461K5Vs0hPbRds71In9z4gI+RW/xVQWJCQ286WkNvYlpDbGJjyKgKE3CqMFIlIgJA7ImiGUREg5FZhpEiEAoTcEWIzVMYCmiG3D9pGu93bCxamgrZdXNuEvr2Ssnuj3AunMoalgIkAIXfJrJ+6b06MnxCTNWtSgXGLfRNy4vFJqekC4x19rFzpAm0XavvWIz7cXl0s1K5d27Udca1HmjVNPSjSf/sj1w9CbrsZJOS2sy2pMj25FdwJuRUQKWEuQMhtTswAigKE3IqYlDIXIOQ2J2YAZQFCbmVQypkKaIXc774Xl0lu9/aGjand20e2y5fOHZOSl2d6+xQvZwKE3Nuf8M2bRWa9EpPX5uRJIl+kYkWRY1z7kiMOz5e8rdtab79YBmd853poB4H2IvewSBdq+w/Bih47u1DbPygyCLXd3/3/LmsHIbfdjBJy29kSchvbEnIbA1NeRYCQW4WRIhEJEHJHBM0wKgKE3CqMFIlQgJA7QmyGylgg05Db78Yc+/yW3dv13MPf+rre2w0blL3AKmNsCmQsQMidPqHvaT12fFy++DIVLvtvVvgHUzZ2D6i0OL5dXtB+JLVbe63ry1/02HknH2inQu3mzbJjd7mFQ9GahNx2woTcdrYlVWYnt4I7IbcCIiXMBQi5zYkZQFGAkFsRk1LmAoTc5sQMoCxAyK0MSjlTgUxC7jffjsvU6THZ6HZv+wfedXC7t/2O0Ti7t03nrDwXJ+Qu/ex/+FFcJk+NiX8YrD8OOSghXY9NSrVq4cPupLvUh9p+h3ZBT+11P20davse2k1c25HmzVKhdo3q4ccr/avOjisIue3mgZDbzpaQ29iWkNsYmPIqAoTcKowUiUiAkDsiaIZRESDkVmGkSIQChNwRYjNUxgJhQm7/gLix43wrglSotWt9t3u7d0J227X8hVgZTwAFSiVAyF0qrsKT12+IyfQZMXnrnbj4gLpq1aR07ZKUQw5OBO2Ftnf4a77+1u3Qdg+I9O1HFrnd4T8VC7Xrup3iTZtKsFvbB9vVMwjRt3c/ufLnhNx2M0XIbWdbUmV2ciu4E3IrIFLCXICQ25yYARQFCLkVMSllLkDIbU7MAMoChNzKoJQzFShNyO2Drrlu9/a06XHZtEmCHdsdOySlQ/toe/2aglA8qwUIuTObnq+WpVqYfOMCa380aph0LUwSUt+1GSp6+J91f67/ICsItRe7fvvrt07DfWuiZsGDIlMtSDLZGZ7Zq8reqwm57eaGkNvOtqTKhNwK7oTcCoiUMBcg5DYnZgBFAUJuRUxKmQsQcpsTM4CyACG3MijlTAXSDblXrvy5v68Lu/zhe273deFYPbeLmwOBqAQIuXWk33grLjPczm7/oFh/HNE2IfvuI7J0qW9BknR9vFMfZBU9/Dc2mv7cfsT/3e8G5yhZgJDbboUQctvZllSZkFvBnZBbAZES5gKE3ObEDKAoQMitiEkpcwFCbnNiBlAWIORWBqWcqcD2Qm6/o3PO3LhMfykumzeL5Lne2506JqX9kflptTkwvXmKlzsBQm69KV+7LiYTJ8Xlo09+2a/EtzApCLWbNXU9td1u7cpVCLVLq0/IXVqx9M8n5E7fSvNMQm4FTUJuBURKmAsQcpsTM4CiACG3IialzAUIuc2JGUBZgJBbGZRypgIlhdzLv4vL2OdjsmTJz60NGiWlT8+k1KubML0niiOwLQFCbv21sWBhXCa8GJPKlX0/bddXu0kseGBklcqE2plqE3JnKrjt6wm57WxLqkzIreBOyK2ASAlzAUJuc2IGUBQg5FbEpJS5ACG3OTEDKAsQciuDUs5U4NdCbr97+/XZeTJjVkzy893ubdd7u0unpLQ7nN3bppNB8e0KEHJvl4gTskiAkNtuMgi57WwJuTO0XfzVt3LDXX+T+Z99KQ13qyuDL/29tDpg78KqhNwZAnN5JAKE3JEwM4iSACG3EiRlIhEg5I6EmUEUBQi5FTEpZS5QPORe7h5IN9o9mM4/dM4fjdzu7ZN7J6ROHXZ1mk8GA2xXgJB7u0SckEUChNx2k0HIbWdLyJ2hbb9Lh0qn9q3kd32Oldlvf+IC78dk2jP3SsUKbsuAOwi5MwTm8kgECLkjYWYQJQFCbiVIykQiQMgdCTODKAoQcitiUspcoCDk3rApKa+53dszX45Jwu3erlhB5NguCWnbJkHvbfNZYIB0BQi505XivGwQIOS2mwVCbjtbQu4MbFesWiPdzrha5rzwoFTw34Nzx0n9b5KrLz5dDjukBSF3BrZcGq0AIXe03oyWmQAhd2Z+XB2tACF3tN6MlrkAIXfmhlSITsCH3PM+3yj/ejYm37hd3P5ovHtS+vZNyM612b0d3UwwUjoChNzpKHFOtggQctvNBCG3nS0hdwa27370qdzy5ydk3OO3Fla54k8PSdtW+8kpJ3Yk5M7AlkujFSDkjtab0TITIOTOzI+roxUg5I7Wm9EyFyDkztyQCtEJvP9eZRkzwW3ddkelilt2b0d3B4yEQPoChNzpW3Hmjhcg5LabA0JuO1tC7gxsZ7/9sdw/crQ8/chNhVWuv/Mx2af57nLWyV3lvEs3ZVCdSxFAAAEEEEAAAQQQQAABBLYn8Jt9YnL26RVklzrbO5M/RwABBBBAAIHyKBBLuqM8vvB0X/N7H38qN90zSp4fdVvhJYNuflDatW4pJ51wNCF3upA5fF4F1+8vzBFLfZuyVEeIS1L1Q1wY4pLIxwpjGMYiJGGk/R9DWZRq9W05OSNnhgAAABjdSURBVMxYYa4JeXs54R7q5yvURe7HP8R1IS6J/Oc/zM9y2NcVxjDK+4tyrFAWIX+YY2EGCznJYS4Lc3shKUL9HIf9/64w6ykYKwxiSJAwY4W+vRAXhrgk1H+rhXUPdX8hF1TYsUo7x21bx+XodvGQK4rLEEAAAQQQQKA8CBByb2eWV63+QbqccoW8Nv4BqVqlUnB29zOvltuv7S+HtNw7+N88eLI8/Kjk/mukXUnuz2F5egW0KylPs537r5V2Jbk/h+XtFdCupLzNeG6/3oIHT27OZ29Wbs9k+bh72pWUj3kuK6+SdiV2M0m7EjvbkioTcqfhfu6gu6TNwS2k/5knyKSZc4P2JZOeukvy8lK7CQi500DklB0uQMi9w6eAGyiFACF3KbA4dYcLEHLv8CngBkopQMhdSjBO36EChNw7lJ/BSylAyF1KME7foQKE3Hb8hNx2toTcGdou/fo7uW7oozL/88WyR8P6cvMVZ8v++zYtrErInSEwl0ciQMgdCTODKAkQcitBUiYSAULuSJgZRFGAkFsRk1LmAoTc5sQMoChAyK2ISSlzAUJuO2JCbjtbQm5jW0JuY2DKqwgQcqswUiQiAULuiKAZRkWAkFuFkSIRChByR4jNUBkLEHJnTEiBCAUIuSPEZqiMBQi5MybcZgFCbjtbQm5jW0JuY2DKqwgQcqswUiQiAULuiKAZRkWAkFuFkSIRChByR4jNUBkLEHJnTEiBCAUIuSPEZqiMBQi5MyYk5LYjDFWZntyh2La+iJBbAZES5gKE3ObEDKAoQMitiEkpcwFCbnNiBlAWIORWBqWcqQAhtykvxZUFCLmVQSlnKkDIbcfLTm4725IqE3IruBNyKyBSwlyAkNucmAEUBQi5FTEpZS5AyG1OzADKAoTcyqCUMxUg5DblpbiyACG3MijlTAUIue14CbntbAm5jW0JuY2BKa8iQMitwkiRiAQIuSOCZhgVAUJuFUaKRChAyB0hNkNlLEDInTEhBSIUIOSOEJuhMhYg5M6YcJsFCLntbAm5d4wtoyKAAAIIIIAAAggggAACCCCAAAIIIIAAAggYC9CuxBiY8ggggAACCCCAAAIIIIAAAggggAACCCCAAAJ2AoTcdrZURgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEDAWIOQ2BqY8AggggAACCCCAAAIIIIAAAggggAACCCCAgJ0AIXdI25H/fFGeHjdDNm7aLF2Oai3XXXKmVMjLC1mNyxCwFXjoifHy7ISZsmHjJunQ9kC5adDZUq1qZdtBqY5AhgJvvjdP/nD5HTLh70OleeMGGVbjcgRsBJZ9s0KuHfqofDxvgezRsL4Mve58abFXY5vBqIpAhgLzPvtShtz3d1mxao1Ur1ZFrrjwFGnXumWGVbkcAT2Bzfn5MuzR5+TxpyfJa+NHyM61axYW5/2XnjOV9ARemD5H/nTvKLn1mvOka8c2hYV5/6VnTCU9gW2t14IReP+lZ02lHSNAyB3C3f/g33TP3+QfIwYHQeHA6++Xzu0PlTN6dw5RjUsQsBWYMustGT7yORk17FqpUb2qW6/D5dAD95WL+/W0HZjqCGQgsNF9IHP6xUNk+YrvZdTwawm5M7DkUluB3w+8XTodeYj87qTj5IVps+Wt9+fJ7df2tx2U6giEFOjR7zq5+Oxe0u2Yw+Tj+QvlvCvulpeevY8PvkN6cpm+wMDBw4MPCh/5x/Pyytj7C0Nu3n/pW1Mxc4FR/54s73wwP/jv1T+cdnxhyM37r8xtqaAvsK31WjAS77/0zakYvQAhdwhzvwNmt/p1pP+ZJwRXz5z9nox6ZrI84YIYDgSyTeCjeQtl06ZN0uqAfYJbe+LZKfKf+YvkzusvyLZb5X4QKBR48PGxkkyKTH3lbRl2ywBCbtZGVgosWbZczrrkdpn+zJ8lHo9l5T1yUwgUCCTdL9UDOp0jr40bITvVrhH863Y9/ihPPnA9v2NZJlkj4L9t4EPuAzr9YauQm/dfWTNF3EgRAb9e991zj+ADw1N6HFMYcvP+i2WSjQLbWq8F98r7r2ycNe6ptAKE3KUVc+efe8VdclrPTnKsa1PijwVfLpM/XHaHvDxmeIhqXIJAtAIXXvNnOabdwXKqW8McCGSjwKLFX8ulN46Qf//lZjn5/JsJubNxkrinQGDGq+/Kk2OmBm1KZr/9iTR2f7/+8rMIDFkfWStwzuV3Bm32/LcP/bcOBt8xUiY+dSct97J2xsrvjRUPuXn/VX7XQi688nMH3bVVyF38nnn/lQuzWH7u8dfWK++/ys/8l/VXSsgdYobP/OOtcuFZPYLexv7w/Th7nXO9zH3x4RDVuASB6AQeGjVO3nZfqfvrPVfyhjY6dkYqpYAPYfw3ZY5ovb/0OHswIXcp/Tg9OoGxk14N+hs/dMfl0vaQ3wTflBk36TUZ9/it0d0EIyFQCoHPFi6VfpcNlUR+Qta7tlD33nRx0G6HA4FsEygecvP+K9tmiPspKlBSyM37L9ZKtgn82nrl/Ve2zRL3E1aAkDuE3HlX3i0nn9Cx8OtIny9aKv7fzXxuWIhqXIKAvYD/ivLQEU+J/4R2+JBLpGqVSvaDMgICIQTGTX5N5rgdsQXtdAi5QyBySWQC0199Rx52D/YdPfKWYMx8Fxwe2rW/zBo9vLAdRGQ3w0AIbEfAP3za9+S+cVA/ObJNy+C/CfpdOtS1KxkcfBuBA4FsEigecvP+K5tmh3spLvBroSHvv1gn2SpQfL3y/itbZ4r7CiNAyB1C7fb7n5RaNarLgHN6B1e/MG2OjJvymoy856oQ1bgEAXuBux78l3y9fFUQHFaskGc/ICMgEFLAP8j33Y/+J3nxeFDh+zU/Ss0a1eQ298T6jq7NDgcC2STgexsOcA9Jm/7MvcFt+ZC71XH95bXxI4J1y4FANgn49eq/Mj9r9JZNGf2vvEd6HNdOTnR/cSCQTQLFQ27ef2XT7HAvxQV+LeTm/RfrJFsFiq9X3n9l60xxX2EECLlDqPkA5uohj8iTD14v1atWCXp0n96rs/Tu3iFENS5BwFbA99y844F/ytMP3ygVK1awHYzqCCgLsJNbGZRy6gK9Xbuys07uKr26tZcnR0+TCdNmB/3kORDINoE1P66TzicPkr/dd40c0KKZLF/xvfQ+5wZ51LUw+83eTbLtdrmfci5QPOTm/Vc5XxBZ/vKLh4a8/8ryCSvnt7e9HvK8/yrnCyTHXz4hd8gJfPzpSfKP0VODXVvHdz5crrroNInHYyGrcRkCdgLX3v6ovDB9tuTlbdnBvVfTRvLco3+yG5TKCCgJ8B9ZSpCUMRP4cum3MujmB2XJsuXif7cOufocada4gdl4FEYgE4GZs9+T+0eOlvUbNrr/bo3LWScdx4OoMwHlWlWB71f/KB1PuiyouWnT5sLNGf7bMnXr1Bbef6lyU0xB4KT+N8lnrnXp5s35wbcQYy4PuHPw+TJr9ge8/1LwpYSuwLbWa9eOh201EO+/dN2pFq0AIXe03oyGAAIIIIAAAggggAACCCCAAAIIIIAAAgggoChAyK2ISSkEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBaAUIuaP1ZjQEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABRQFCbkVMSiGAAAIIIIAAAggggAACCCCAAAIIIIAAAghEK0DIHa03oyGAAAIIIIAAAggggAACCCCAAAIIIIAAAggoChByK2JSCgEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCBaAULuaL0ZDQEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQEBRgJBbEZNSCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAtEKEHJH681oCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAooChNyKmJRCAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQiFaAkDtab0ZDAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQUBQg5FbEpBQCCCCAAAIIIIAAAggggAACCCCAAAIIIIBAtAKE3NF6MxoCCCCAAAIIIIAAAggggAACCCCAAAIIIICAogAhtyImpRBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSiFSDkjtab0RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQUBQi5FTEphQACCCCAAAIIIIAAAggggAACCCCAAAIIIBCtACF3tN6MhgACCCCAAAIIIIAAAggggAACCCCAAAIIIKAoQMitiEkpBBBAAAEEEEAAgbIlMHTEU/LN8lUy7JYBoV5Yn3NvkL6/PVrO7NPlF9ev+XGdHHHCxTLu8Vtl13p1Cv9572a7S0nXhboRLkIAAQQQQAABBBBAoAwLEHKX4cnlpSGAAAIIIIAAAuVF4LuVq+XoPpdu9XJ3ql1DDtpvT7nyotOkeeMGoSgsQ+7N+fny7oefSssWzcT/c0Hg7UPuj+cvlHp1dnLh987y+aKl8rUL2o9s0zLUa+AiBBBAAAEEEEAAAQTKugAhd1mfYV4fAggggAACCCBQDgQKQu6H77hc9tlzj+AVL1+xWh4aNU4+XbhEJjwxVKpWqVRqCcuQu+jNFN3V7UPuosfwkaNl48ZNctXFp5X6/rkAAQQQQAABBBBAAIHyIEDIXR5mmdeIAAIIIIAAAgiUcYGCkPuZv9wkLfdtVvhqv1/9oxzZc4CMGvZ/0ubgFtL3vBvl+M6Hy7hJr8pezRrJfX8aIG+9P0/ufeQZ+fyLZVK3Ti3p0qG1XNb/JMnLi0tByL1b/ToyZuIrUr1aFTmjdxfpf+YJwRj5+QkZ9uhz8uKMOeLHarrHbnL1H0+Xw1vtF/y5bzvS5ajW8uF/PpM335snDXerK/834Axpf9gBkk67klXf/yB/efJ5icfiUq/uTtL9mLby7kf/k6cevL7wNb79wXw578q75ZWx90utGtXK+Ezz8hBAAAEEEEAAAQQQ+KUAITerAgEEEEAAAQQQQCDnBbYVcq/7aYO06X6BjLznKjmi9f5y2kW3yOo1a+WmK/rJ/vs0Ff/nXc+4Sq675HfSu1t7F3R/JedfdY+cc/rxcvYp3YKQe6wLxC88q4eccuIx8s6H/5MBg4fJw3cMCoLqfz8/U0b8bYz8Y8RgabDrLvLk6Gny2D9flJfHDJeKFSsEIfeKVWvktv87Tw7efy8Z9cxkefyZSfLSs/dJLB5Lqyf3wMHDpXGjXYOd3P7+evS7Tib/8y7Zo2H9YN78PX797UoZPmRgzs8jLwABBBBAAAEEEEAAgTAChNxh1LgGAQQQQAABBBBAIKsEfi3kXvfT+mCX9cQZc2XSU3dKTbfL2Yfcvh3IkKvPCe5/pAukJ854Q8Y8NqTw9fhr/O5uv1vaB8gzX39PpvzrbonFYsE5Z11yu7TYq3EQjG9wbUT8ODvXrhn82arVP0j7ngNlwt+HBn3Afcj9m72bBCG3P9Zv2CiHu4dN3n3DhdLW7fZO58GTRUNuX+PUC/4kR7c7WC7u1zOo2fnkQXLtJWe6HeiHZtWccDMIIIAAAggggAACCEQlQMgdlTTjIIAAAggggAACCJgJFITclStVlLjbIe2Pn9ZvlGYuaL7lqnOk1QF7B//Oh9yd27cqbDdy8z2jZO1PP7nQ+aLCexs/5XW576/PyqzRw4KQe+GXy+Svd19Z+OfX3v5ocM39Qy6RtevWyz0PPy2vvvmRrHfj+Rx8pWsxMnrkLUEQ7kPu7p3aFo7ni3Q5ZZCcfWp36dH1yFAh97/GzZB/PDdVJj55p3z03wXBznPfqsTvHOdAAAEEEEAAAQQQQKA8ChByl8dZ5zUjgAACCCCAAAJlTKAg5PbB897NGwWvbie3u7p4j2ofcnc9uo384bTuwTnbCrnvfPCfMvv5B4OQ+6uvv5MRt126Vci9OX9zEIxfNeRhWfzVchl2ywDZrV4d+eHHdcFO7aIhdy/XBuWsk7sWXu9D7vN/30O6HXNYqJB79Q9r5eg+l8qTDwyWKTPfkh/XrnPtV84uYzPKy0EAAQQQQAABBBBAIH0BQu70rTgTAQQQQAABBBBAIEsFttWTu/jtFg+5H/vXRJkwdbaMe/zWwlN9u5I33v2PPP3wjUHIPfutj4P2IwWHb1dy4H57ypUXnipdTr1C+p/xWzm1Z6fgj1935/qd1UVD7oNb7i03Xn5W8Oe+vUnb317kdoEPFP/vw7Qr8XUuv+mBoE/3xJfmyp2Dz3c71ffJ0pnhthBAAAEEEEAAAQQQsBcg5LY3ZgQEEEAAAQQQQAABY4GwIfe3330fPHjy+kt/Lz27HSnzP1ssF1x9rww8p3cQXPuQe8zEV+SaP57h/ry9vPPBfOl/1d1uF/X1cpALuvtdOlQa7lpXbr3mXPnfgsXy0BPj5ZU5H7id35fIUYcfFLQr8X26/Q5z377Eh+qj/j05aIWycdPmtELuK295WDa5c4e4MWpWrxr0Bp81+3255ra/SK2a1WVqkX7hxsyURwABBBBAAAEEEEAgKwUIubNyWrgpBBBAAAEEEEAAgdIIhA25/Rg+MH7g8bHyxZKvpd4uO8kZvbvImX26BGHykPv+7h4suUGqV6siE6bNlqpVKsvZp3RzPbW7Bbf30byFMnjoo/LVN9+5EDv1gMlHn3pBJs+c6/p4X+XaoTwuvbt3kNlvfyLvfDhfGu1WVwZf9ns53D10co1rbZLOTu5X3vjAtUV5RPLicXl5zPCg9/bm/Hw5pu9lcvKJHeWSc/uWhopzEUAAAQQQQAABBBAocwKE3GVuSnlBCCCAAAIIIIAAAmVdwD/c8ljXKmX8qNtk9wb1yvrL5fUhgAACCCCAAAIIIFCiACE3CwQBBBBAAAEEEEAAgRwRSCSSssY9ePKGux4LdpXfdcOFOXLn3CYCCCCAAAIIIIAAAnYChNx2tlRGAAEEEEAAAQQQQEBV4NW5H8qA64bLEa33dw+cvEBq16quWp9iCCCAAAIIIIAAAgjkogAhdy7OGveMAAIIIIAAAggggAACCCCAAAIIIIAAAgggEAgQcrMQEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBHJWgJA7Z6eOG0cAAQQQQAABBBBAAAEEEEAAAQQQQAABBBAg5GYNIIAAAggggAACCCCAAAIIIIAAAggggAACCOSsACF3zk4dN44AAggggAACCCCAAAIIIIAAAggggAACCCBAyM0aQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEMhZAULunJ06bhwBBBBAAAEEEEAAAQQQQAABBBBAAAEEEECAkJs1gAACCCCAAAIIIIAAAggggAACCCCAAAIIIJCzAoTcOTt13DgCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAITdrAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCBnBQi5c3bquHEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABQm7WAAIIIIAAAggggAACCCCAAAIIIIAAAggggEDOChBy5+zUceMIIIAAAggggAACCCCAAAIIIIAAAggggAAChNysAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIGcFSDkztmp48YRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEECLlZAwgggAACCCCAAAIIIIAAAggggAACCCCAAAI5K0DInbNTx40jgAACCCCAAAIIIIAAAggggAACCCCAAAIIEHKzBhBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQRyVoCQO2enjhtHAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQIORmDSCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjkrAAhd85OHTeOAAIIIIAAAggggAACCCCAAAIIIIAAAgggQMjNGkAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBDIWQFC7pydOm4cAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAgJCbNYAAAggggAACCCCAAAIIIIAAAggggAACCCCQswKE3Dk7ddw4AggggAACCCCAAAIIIIAAAggggAACCCCAACE3awABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAgZwUIuXN26rhxBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAUJu1gACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAzgoQcufs1HHjCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAoTcrAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBnBUg5M7ZqePGEUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBAi5WQMIIIAAAggggAACCCCAAAIIIIAAAggggAACOStAyJ2zU8eNI4AAAggggAACCCCAAAIIIIAAAggggAACCBByswYQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEclaAkDtnp44bRwABBBBAAAEEEEAAAQQQQAABBBBAAAEEECDkZg0ggAACCCCAAAIIIIAAAggggAACCCCAAAII5KzA/wNse4q2W6G5SAAAAABJRU5ErkJggg==", - "text/html": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "import plotly.express as px\n", - "\n", - "mary_token = model.tokenizer(\" Mary\").input_ids[0] # token id for Mary\n", - "\n", - "px.line(\n", - " [layer_probs.value.squeeze()[mary_token].item() for layer_probs in probs],\n", - " title=\"Probability of Mary after each layer, according to logit lens\",\n", - " labels={\"value\":\"Layer\", \"index\":\"Probability\"}\n", - ")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.18" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/src/nnsight/toolbox/das.ipynb b/src/nnsight/toolbox/das.ipynb deleted file mode 100644 index 3becb0ee..00000000 --- a/src/nnsight/toolbox/das.ipynb +++ /dev/null @@ -1,1768 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "09db4a7a-4e6a-43f8-b217-2f942d1538a3", - "metadata": {}, - "source": [ - "# Distributed Alignment Search" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "b8374691-18f6-4841-8bb7-a255528967ad", - "metadata": {}, - "outputs": [], - "source": [ - "from typing import Any\n", - "\n", - "import torch\n", - "import nnsight\n", - "from nnsight import NNsightModel, LanguageModel, util\n", - "# from nnsight.Module import Module\n", - "Module = nnsight.Module\n", - "from tqdm import trange, tqdm\n", - "from nnsight.toolbox.optim.lora import LORA\n", - "from torch.utils.data import DataLoader, Dataset\n", - "from datasets import Dataset as hf_Dataset\n", - "\n", - "from transformers import get_linear_schedule_with_warmup\n", - "\n", - "# For initial llama load\n", - "# from huggingface_hub import notebook_login\n", - "# notebook_login()" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "ded734e4-c134-4541-b050-61d8a5a7df8f", - "metadata": {}, - "outputs": [], - "source": [ - "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "ca7ada08-77d2-41b3-9ef1-1b341fddf377", - "metadata": {}, - "outputs": [], - "source": [ - "model = LanguageModel('sharpbai/alpaca-7b-merged', device_map=\"cuda:0\", torch_dtype=torch.bfloat16)" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "id": "59baa369-9b3f-4547-a2a8-3999abd2667c", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[LlamaRMSNorm(), Linear(in_features=4096, out_features=32001, bias=False)]" - ] - }, - "execution_count": 19, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "filtered_list = [mod for name, mod in model.named_modules() if \"model.layers\" not in name]\n", - " \n", - "# Get the last two elements\n", - "last_two_elements = filtered_list[-2:]\n", - "last_two_elements" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "01a71b3f-fede-4a62-8818-163adac114a0", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "model\n", - "model.embed_tokens\n", - "model.norm\n", - "lm_head\n" - ] - } - ], - "source": [ - "len = 0\n", - "\n", - "val = \"\"\"model.layers.0 LlamaDecoderLayer(\n", - " (self_attn): LlamaAttention(\n", - " (q_proj): Linear(in_features=4096, out_features=4096, bias=False)\n", - " (k_proj): Linear(in_features=4096, out_features=4096, bias=False)\n", - " (v_proj): Linear(in_features=4096, out_features=4096, bias=False)\n", - " (o_proj): Linear(in_features=4096, out_features=4096, bias=False)\n", - " (rotary_emb): LlamaRotaryEmbedding()\n", - " )\n", - " (mlp): LlamaMLP(\n", - " (gate_proj): Linear(in_features=4096, out_features=11008, bias=False)\n", - " (up_proj): Linear(in_features=4096, out_features=11008, bias=False)\n", - " (down_proj): Linear(in_features=11008, out_features=4096, bias=False)\n", - " (act_fn): SiLUActivation()\n", - " )\n", - " (input_layernorm): LlamaRMSNorm()\n", - " (post_attention_layernorm): LlamaRMSNorm()\n", - ")\"\"\"\n", - "\n", - "for name, mod in model.named_modules(): \n", - " if (\"model.layers\" in name): continue\n", - " print(name)" - ] - }, - { - "cell_type": "raw", - "id": "b09e288c-93c8-40b7-9f95-645f25f2982a", - "metadata": {}, - "source": [ - "model" - ] - }, - { - "cell_type": "markdown", - "id": "842334dd-7214-419c-9155-62022c32731b", - "metadata": {}, - "source": [ - "## Testing instruct accuracy on prealign task" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "3e3ca1d3-43ab-410f-8b05-d43664b57476", - "metadata": {}, - "outputs": [], - "source": [ - "raw_prealign = factual_sampler(model.tokenizer, 5000, game=\"pricing_tag\")\n", - "\n", - "prealign_dataset = hf_Dataset.from_dict(\n", - " {\"input_ids\": raw_prealign[0], \"labels\": raw_prealign[1]})\n", - "prealign_dataset.set_format('torch', columns=['input_ids','labels'])\n", - "prealign_dataloader = DataLoader(\n", - " prealign_dataset, batch_size=8\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "67a9b667-4f7c-4cec-8da2-f2c48f706a54", - "metadata": {}, - "outputs": [], - "source": [ - "prealign_input_batches = torch.split(prealign_dataset[\"input_ids\"], int(5000/1000), dim=0)\n", - "prealign_labels_batches = torch.split(prealign_dataset['labels'][:,-1], int(5000/1000), dim=0)" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "d658a931-3b51-48f0-b643-a25b4ee5b30a", - "metadata": { - "scrolled": true - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "0it [00:00, ?it/s]You're using a LlamaTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.\n", - "20it [00:18, 1.07it/s]\n" - ] - } - ], - "source": [ - "per_batch_acc = []\n", - "per_batch_res = []\n", - "total_correct = 0\n", - "\n", - "for i, batch in tqdm(enumerate(prealign_input_batches)):\n", - " with model.forward() as runner:\n", - " with runner.invoke(batch) as invoker:\n", - " logits = model.lm_head.output[:,-1,:].save()\n", - " \n", - " # out = generator.output[:,-1].cpu()\n", - "\n", - " pred = logits.value.softmax(dim=-1).argmax(dim=-1).cpu()\n", - " correct = (pred==prealign_labels_batches[i]).sum()\n", - " per_batch_acc.append(correct)\n", - " total_correct += (correct)\n", - "\n", - " if i == 20: break\n", - " " - ] - }, - { - "cell_type": "markdown", - "id": "9c660549-be2e-4de4-9986-ca4ee91b0fae", - "metadata": {}, - "source": [ - "## Actual Boundles DAS Training" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "70cd676a-ee18-40eb-9f42-50bf1b30dc55", - "metadata": {}, - "outputs": [], - "source": [ - "from das_utils import factual_sampler, bound_alignment_sampler, lower_bound_alignment_example_sampler\n", - "# from das import BoundlessRotatedSpaceIntervention,compute_metrics" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "d84be357-6f49-49a8-bb8f-340b1715c9b6", - "metadata": {}, - "outputs": [], - "source": [ - "raw_data = bound_alignment_sampler(\n", - " model.tokenizer,\n", - " 1000,\n", - " [lower_bound_alignment_example_sampler]\n", - ")\n", - "\n", - "raw_train = (\n", - " raw_data[0][:800], \n", - " raw_data[1][:800], \n", - " raw_data[2][:800],\n", - " raw_data[3][:800]\n", - ")\n", - "raw_eval = (\n", - " raw_data[0][800:900], \n", - " raw_data[1][800:900], \n", - " raw_data[2][800:900],\n", - " raw_data[3][800:900]\n", - ")\n", - "raw_test = (\n", - " raw_data[0][900:], \n", - " raw_data[1][900:], \n", - " raw_data[2][900:],\n", - " raw_data[3][900:]\n", - ")\n", - "train_dataset = hf_Dataset.from_dict(\n", - " {\n", - " \"input_ids\": raw_train[0], \n", - " \"source_input_ids\": raw_train[1],\n", - " \"labels\": raw_train[2],\n", - " \"intervention_ids\": raw_train[3], # we will not use this field\n", - " }\n", - ").with_format(\"torch\")\n", - "train_dataloader = DataLoader(\n", - " train_dataset, batch_size=16,\n", - ")\n", - "eval_dataset = hf_Dataset.from_dict(\n", - " {\n", - " \"input_ids\": raw_eval[0], \n", - " \"source_input_ids\": raw_eval[1],\n", - " \"labels\": raw_eval[2],\n", - " \"intervention_ids\": raw_eval[3], # we will not use this field\n", - " }\n", - ").with_format(\"torch\")\n", - "eval_dataloader = DataLoader(\n", - " eval_dataset, batch_size=16,\n", - ")\n", - "test_dataset = hf_Dataset.from_dict(\n", - " {\n", - " \"input_ids\": raw_test[0], \n", - " \"source_input_ids\": raw_test[1],\n", - " \"labels\": raw_test[2],\n", - " \"intervention_ids\": raw_test[3], # we will not use this field\n", - " }\n", - ").with_format(\"torch\")\n", - "test_dataloader = DataLoader(\n", - " test_dataset, batch_size=16,\n", - ")\n" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "c15601b2-a7cb-4e4d-8988-a635090590fb", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "BoundlessRotatedSpaceIntervention(\n", - " (rotate_layer): ParametrizedRotateLayer(\n", - " (parametrizations): ModuleDict(\n", - " (weight): ParametrizationList(\n", - " (0): _Orthogonal()\n", - " )\n", - " )\n", - " )\n", - ")" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from interventions import BoundlessRotatedSpaceIntervention\n", - "\n", - "model_hidden_dim = 4096\n", - "rotatedSpaceIntervention = BoundlessRotatedSpaceIntervention(model_hidden_dim)\n", - "\n", - "# need to define train dataloader first\n", - "\n", - "t_total = int(len(train_dataloader) * 3)\n", - "warm_up_steps = 0.1 * t_total\n", - "\n", - "optimizer_params = []\n", - "optimizer_params += [{'params': rotatedSpaceIntervention.rotate_layer.parameters()}]\n", - "optimizer_params += [{'params': rotatedSpaceIntervention.intervention_boundaries, 'lr': 1e-2}]\n", - "\n", - "optimizer = torch.optim.Adam(\n", - " optimizer_params,\n", - " lr=1e-3\n", - ")\n", - "scheduler = get_linear_schedule_with_warmup(\n", - " optimizer, num_warmup_steps=warm_up_steps,\n", - " num_training_steps=t_total\n", - ")\n", - "\n", - "epochs = 3\n", - "gradient_accumulation_steps = 4\n", - "total_step = 0\n", - "target_total_step = len(train_dataloader) * epochs\n", - "temperature_start = 50.0\n", - "temperature_end = 0.1\n", - "temperature_schedule = torch.linspace(\n", - " temperature_start, temperature_end, target_total_step\n", - ").to(torch.bfloat16).to(\"cuda\")\n", - "\n", - "# is this correct\n", - "rotatedSpaceIntervention.set_temperature(temperature_schedule[total_step])\n", - "rotatedSpaceIntervention.to(device)" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "65230e84-b72b-4b37-a71c-2855d8032af5", - "metadata": {}, - "outputs": [], - "source": [ - "from torch.nn import CrossEntropyLoss\n", - "\n", - "vocab_size = 32001\n", - "def calculate_loss(logits, labels):\n", - " shift_logits = logits[..., :, :].contiguous()\n", - " shift_labels = labels[..., :].contiguous()\n", - " # Flatten the tokens\n", - " loss_fct = CrossEntropyLoss()\n", - " shift_logits = shift_logits.view(-1, vocab_size)\n", - " shift_labels = shift_labels.view(-1)\n", - " # Enable model parallelism\n", - " shift_labels = shift_labels.to(shift_logits.device)\n", - " loss = loss_fct(shift_logits, shift_labels)\n", - "\n", - " boundary_loss = 1. * rotatedSpaceIntervention.intervention_boundaries.sum()\n", - " loss += boundary_loss\n", - " \n", - " return loss" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "06e6c7ff-56ab-4b8e-bcd1-049a91b5878f", - "metadata": {}, - "outputs": [], - "source": [ - "def compute_metrics(eval_preds, eval_labels):\n", - " total_count = 0\n", - " correct_count = 0\n", - " for eval_pred, eval_label in zip(eval_preds, eval_labels):\n", - " actual_test_labels = eval_label[:, -1]\n", - " pred_test_labels = torch.argmax(eval_pred[:, -1], dim=-1)\n", - " correct_labels = (actual_test_labels==pred_test_labels.cpu())\n", - " total_count += len(correct_labels)\n", - " correct_count += correct_labels.sum().tolist()\n", - " accuracy = round(correct_count/total_count, 2)\n", - " return {\"accuracy\" : accuracy}" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "11379264-fee9-4637-8c23-c71a3f25b60b", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Epoch: 0: 0%| | 0/50 [00:00 12\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m step, inputs \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28menumerate\u001b[39m(epoch_iterator):\n\u001b[1;32m 13\u001b[0m number \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m\n\u001b[1;32m 15\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m model\u001b[38;5;241m.\u001b[39mforward(inference\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m) \u001b[38;5;28;01mas\u001b[39;00m runner:\n", - "File \u001b[0;32m~/.local/lib/python3.8/site-packages/tqdm/std.py:1192\u001b[0m, in \u001b[0;36mtqdm.__iter__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 1190\u001b[0m dt \u001b[38;5;241m=\u001b[39m cur_t \u001b[38;5;241m-\u001b[39m last_print_t\n\u001b[1;32m 1191\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m dt \u001b[38;5;241m>\u001b[39m\u001b[38;5;241m=\u001b[39m mininterval \u001b[38;5;129;01mand\u001b[39;00m cur_t \u001b[38;5;241m>\u001b[39m\u001b[38;5;241m=\u001b[39m min_start_t:\n\u001b[0;32m-> 1192\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mupdate\u001b[49m\u001b[43m(\u001b[49m\u001b[43mn\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m-\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mlast_print_n\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1193\u001b[0m last_print_n \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlast_print_n\n\u001b[1;32m 1194\u001b[0m last_print_t \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlast_print_t\n", - "File \u001b[0;32m~/.local/lib/python3.8/site-packages/tqdm/std.py:1243\u001b[0m, in \u001b[0;36mtqdm.update\u001b[0;34m(self, n)\u001b[0m\n\u001b[1;32m 1241\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_ema_dn(dn)\n\u001b[1;32m 1242\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_ema_dt(dt)\n\u001b[0;32m-> 1243\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrefresh\u001b[49m\u001b[43m(\u001b[49m\u001b[43mlock_args\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mlock_args\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1244\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdynamic_miniters:\n\u001b[1;32m 1245\u001b[0m \u001b[38;5;66;03m# If no `miniters` was specified, adjust automatically to the\u001b[39;00m\n\u001b[1;32m 1246\u001b[0m \u001b[38;5;66;03m# maximum iteration rate seen so far between two prints.\u001b[39;00m\n\u001b[1;32m 1247\u001b[0m \u001b[38;5;66;03m# e.g.: After running `tqdm.update(5)`, subsequent\u001b[39;00m\n\u001b[1;32m 1248\u001b[0m \u001b[38;5;66;03m# calls to `tqdm.update()` will only cause an update after\u001b[39;00m\n\u001b[1;32m 1249\u001b[0m \u001b[38;5;66;03m# at least 5 more iterations.\u001b[39;00m\n\u001b[1;32m 1250\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmaxinterval \u001b[38;5;129;01mand\u001b[39;00m dt \u001b[38;5;241m>\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmaxinterval:\n", - "File \u001b[0;32m~/.local/lib/python3.8/site-packages/tqdm/std.py:1348\u001b[0m, in \u001b[0;36mtqdm.refresh\u001b[0;34m(self, nolock, lock_args)\u001b[0m\n\u001b[1;32m 1346\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 1347\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_lock\u001b[38;5;241m.\u001b[39macquire()\n\u001b[0;32m-> 1348\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdisplay\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1349\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m nolock:\n\u001b[1;32m 1350\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_lock\u001b[38;5;241m.\u001b[39mrelease()\n", - "File \u001b[0;32m~/.local/lib/python3.8/site-packages/tqdm/std.py:1496\u001b[0m, in \u001b[0;36mtqdm.display\u001b[0;34m(self, msg, pos)\u001b[0m\n\u001b[1;32m 1494\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m pos:\n\u001b[1;32m 1495\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmoveto(pos)\n\u001b[0;32m-> 1496\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msp\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[38;5;21;43m__str__\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mmsg\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01mis\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43;01melse\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mmsg\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1497\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m pos:\n\u001b[1;32m 1498\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmoveto(\u001b[38;5;241m-\u001b[39mpos)\n", - "File \u001b[0;32m~/.local/lib/python3.8/site-packages/tqdm/std.py:462\u001b[0m, in \u001b[0;36mtqdm.status_printer..print_status\u001b[0;34m(s)\u001b[0m\n\u001b[1;32m 460\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mprint_status\u001b[39m(s):\n\u001b[1;32m 461\u001b[0m len_s \u001b[38;5;241m=\u001b[39m disp_len(s)\n\u001b[0;32m--> 462\u001b[0m \u001b[43mfp_write\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;130;43;01m\\r\u001b[39;49;00m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;241;43m+\u001b[39;49m\u001b[43m \u001b[49m\u001b[43ms\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m+\u001b[39;49m\u001b[43m \u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43m \u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;28;43mmax\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mlast_len\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m-\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mlen_s\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 463\u001b[0m last_len[\u001b[38;5;241m0\u001b[39m] \u001b[38;5;241m=\u001b[39m len_s\n", - "File \u001b[0;32m~/.local/lib/python3.8/site-packages/tqdm/std.py:456\u001b[0m, in \u001b[0;36mtqdm.status_printer..fp_write\u001b[0;34m(s)\u001b[0m\n\u001b[1;32m 454\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mfp_write\u001b[39m(s):\n\u001b[1;32m 455\u001b[0m fp\u001b[38;5;241m.\u001b[39mwrite(\u001b[38;5;28mstr\u001b[39m(s))\n\u001b[0;32m--> 456\u001b[0m \u001b[43mfp_flush\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/.local/lib/python3.8/site-packages/tqdm/utils.py:195\u001b[0m, in \u001b[0;36mDisableOnWriteError.disable_on_exception..inner\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 193\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21minner\u001b[39m(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[1;32m 194\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 195\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 196\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mOSError\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 197\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m e\u001b[38;5;241m.\u001b[39merrno \u001b[38;5;241m!=\u001b[39m \u001b[38;5;241m5\u001b[39m:\n", - "File \u001b[0;32m~/miniconda3/envs/interp/lib/python3.8/site-packages/ipykernel/iostream.py:580\u001b[0m, in \u001b[0;36mOutStream.flush\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 578\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpub_thread\u001b[38;5;241m.\u001b[39mschedule(evt\u001b[38;5;241m.\u001b[39mset)\n\u001b[1;32m 579\u001b[0m \u001b[38;5;66;03m# and give a timeout to avoid\u001b[39;00m\n\u001b[0;32m--> 580\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[43mevt\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mwait\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mflush_timeout\u001b[49m\u001b[43m)\u001b[49m:\n\u001b[1;32m 581\u001b[0m \u001b[38;5;66;03m# write directly to __stderr__ instead of warning because\u001b[39;00m\n\u001b[1;32m 582\u001b[0m \u001b[38;5;66;03m# if this is happening sys.stderr may be the problem.\u001b[39;00m\n\u001b[1;32m 583\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mIOStream.flush timed out\u001b[39m\u001b[38;5;124m\"\u001b[39m, file\u001b[38;5;241m=\u001b[39msys\u001b[38;5;241m.\u001b[39m__stderr__)\n\u001b[1;32m 584\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n", - "File \u001b[0;32m~/miniconda3/envs/interp/lib/python3.8/threading.py:558\u001b[0m, in \u001b[0;36mEvent.wait\u001b[0;34m(self, timeout)\u001b[0m\n\u001b[1;32m 556\u001b[0m signaled \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_flag\n\u001b[1;32m 557\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m signaled:\n\u001b[0;32m--> 558\u001b[0m signaled \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_cond\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mwait\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtimeout\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 559\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m signaled\n", - "File \u001b[0;32m~/miniconda3/envs/interp/lib/python3.8/threading.py:306\u001b[0m, in \u001b[0;36mCondition.wait\u001b[0;34m(self, timeout)\u001b[0m\n\u001b[1;32m 304\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 305\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m timeout \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[0;32m--> 306\u001b[0m gotit \u001b[38;5;241m=\u001b[39m \u001b[43mwaiter\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43macquire\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 307\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 308\u001b[0m gotit \u001b[38;5;241m=\u001b[39m waiter\u001b[38;5;241m.\u001b[39macquire(\u001b[38;5;28;01mFalse\u001b[39;00m)\n", - "\u001b[0;31mKeyboardInterrupt\u001b[0m: " - ] - } - ], - "source": [ - "loss_track = []\n", - "acc_track = []\n", - "\n", - "train_iterator = trange(\n", - " 0, int(epochs), desc=\"Epoch\"\n", - ")\n", - "number = 0\n", - "for epoch in train_iterator:\n", - " epoch_iterator = tqdm(\n", - " train_dataloader, desc=f\"Epoch: {epoch}\", position=0, leave=True\n", - " )\n", - " for step, inputs in enumerate(epoch_iterator):\n", - " number += 1\n", - "\n", - " with model.forward(inference=False) as runner:\n", - " with runner.invoke(inputs[\"source_input_ids\"]) as invoker: \n", - " source = model.model.layers[15].output[0][:,80,:]\n", - " \n", - " with runner.invoke(inputs[\"input_ids\"]) as invoker:\n", - " base = model.model.layers[15].output[0][:,80,:]\n", - "\n", - " rotated_source = rotatedSpaceIntervention(base, source)\n", - "\n", - " model.model.layers[16].input[0][:,80,:] = rotated_source\n", - "\n", - " counterfactual_outputs = model.lm_head.output.save()\n", - "\n", - "\n", - " eval_metrics = compute_metrics(\n", - " [counterfactual_outputs.value], [inputs['labels']]\n", - " )\n", - " \n", - " # loss and backprop\n", - " loss = calculate_loss(\n", - " counterfactual_outputs.value, inputs[\"labels\"]\n", - " )\n", - " loss_str = round(loss.item(), 2)\n", - " epoch_iterator.set_postfix({'loss': loss_str, 'acc': eval_metrics[\"accuracy\"]})\n", - "\n", - " loss_track.append(loss_str)\n", - " acc_track.append(eval_metrics[\"accuracy\"])\n", - " \n", - " if gradient_accumulation_steps > 1:\n", - " loss = loss / gradient_accumulation_steps\n", - " if total_step % gradient_accumulation_steps == 0:\n", - " if not (gradient_accumulation_steps > 1 and total_step == 0):\n", - " loss.backward()\n", - " optimizer.step()\n", - " scheduler.step()\n", - " rotatedSpaceIntervention.zero_grad()\n", - " rotatedSpaceIntervention.set_temperature(temperature_schedule[total_step])\n", - " total_step += 1" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "af5276b2-e03a-4e07-a220-86902bc35340", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - " \n", - " " - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "application/vnd.plotly.v1+json": { - "config": { - "plotlyServerURL": "https://plot.ly" - }, - "data": [ - { - "hovertemplate": "Metric=Accuracy
Epoch=%{x}
Value=%{y}", - "legendgroup": "Accuracy", - "line": { - "color": "#636efa", - "dash": "solid" - }, - "marker": { - "symbol": "circle" - }, - "mode": "lines", - "name": "Accuracy", - "orientation": "v", - "showlegend": true, - "type": "scatter", - "x": [ - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 17, - 18, - 19, - 20, - 21, - 22, - 23, - 24, - 25, - 26, - 27, - 28, - 29, - 30, - 31, - 32, - 33, - 34, - 35, - 36, - 37, - 38, - 39, - 40, - 41, - 42, - 43, - 44, - 45, - 46, - 47, - 48, - 49, - 50, - 51, - 52, - 53, - 54, - 55, - 56, - 57 - ], - "xaxis": "x", - "y": [ - 0.56, - 0.44, - 0.38, - 0.5, - 0.38, - 0.44, - 0.69, - 0.62, - 0.5, - 0.62, - 0.44, - 0.25, - 0.56, - 0.88, - 0.62, - 0.88, - 0.81, - 0.88, - 0.69, - 0.94, - 0.62, - 1, - 0.88, - 1, - 0.81, - 1, - 0.88, - 0.94, - 0.88, - 0.94, - 0.88, - 0.69, - 0.62, - 0.62, - 0.62, - 0.88, - 0.69, - 0.75, - 0.94, - 0.69, - 0.88, - 0.81, - 0.69, - 0.62, - 0.62, - 0.75, - 0.69, - 0.75, - 0.81, - 0.94, - 0.81, - 0.75, - 0.75, - 0.75, - 0.81, - 0.69, - 0.81 - ], - "yaxis": "y" - }, - { - "hovertemplate": "Metric=Loss
Epoch=%{x}
Value=%{y}", - "legendgroup": "Loss", - "line": { - "color": "#EF553B", - "dash": "solid" - }, - "marker": { - "symbol": "circle" - }, - "mode": "lines", - "name": "Loss", - "orientation": "v", - "showlegend": true, - "type": "scatter", - "x": [ - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 17, - 18, - 19, - 20, - 21, - 22, - 23, - 24, - 25, - 26, - 27, - 28, - 29, - 30, - 31, - 32, - 33, - 34, - 35, - 36, - 37, - 38, - 39, - 40, - 41, - 42, - 43, - 44, - 45, - 46, - 47, - 48, - 49, - 50, - 51, - 52, - 53, - 54, - 55, - 56, - 57 - ], - "xaxis": "x", - "y": [ - 1.26, - 1.48, - 1.45, - 1.39, - 1.61, - 1.3, - 1.11, - 1.29, - 1.42, - 1.3, - 1.33, - 1.52, - 1.22, - 0.98, - 1.16, - 0.99, - 1.22, - 1.05, - 1.29, - 0.96, - 1.2, - 0.84, - 1.03, - 0.83, - 1.21, - 0.79, - 0.92, - 0.81, - 0.96, - 0.89, - 0.91, - 1.02, - 1.09, - 1.06, - 1.1, - 0.91, - 1.04, - 1, - 0.88, - 1.15, - 0.95, - 1.22, - 1.19, - 1.02, - 1.37, - 0.9, - 0.94, - 1.12, - 1.23, - 0.97, - 1.02, - 1.05, - 0.97, - 0.98, - 0.98, - 1.03, - 1 - ], - "yaxis": "y" - } - ], - "layout": { - "autosize": true, - "legend": { - "title": { - "text": "Metric" - }, - "tracegroupgap": 0 - }, - "template": { - "data": { - "bar": [ - { - "error_x": { - "color": "#2a3f5f" - }, - "error_y": { - "color": "#2a3f5f" - }, - "marker": { - "line": { - "color": "#E5ECF6", - "width": 0.5 - }, - "pattern": { - "fillmode": "overlay", - "size": 10, - "solidity": 0.2 - } - }, - "type": "bar" - } - ], - "barpolar": [ - { - "marker": { - "line": { - "color": "#E5ECF6", - "width": 0.5 - }, - "pattern": { - "fillmode": "overlay", - "size": 10, - "solidity": 0.2 - } - }, - "type": "barpolar" - } - ], - "carpet": [ - { - "aaxis": { - "endlinecolor": "#2a3f5f", - "gridcolor": "white", - "linecolor": "white", - "minorgridcolor": "white", - "startlinecolor": "#2a3f5f" - }, - "baxis": { - "endlinecolor": "#2a3f5f", - "gridcolor": "white", - "linecolor": "white", - "minorgridcolor": "white", - "startlinecolor": "#2a3f5f" - }, - "type": "carpet" - } - ], - "choropleth": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "type": "choropleth" - } - ], - "contour": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "contour" - } - ], - "contourcarpet": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "type": "contourcarpet" - } - ], - "heatmap": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "heatmap" - } - ], - "heatmapgl": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "heatmapgl" - } - ], - "histogram": [ - { - "marker": { - "pattern": { - "fillmode": "overlay", - "size": 10, - "solidity": 0.2 - } - }, - "type": "histogram" - } - ], - "histogram2d": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "histogram2d" - } - ], - "histogram2dcontour": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "histogram2dcontour" - } - ], - "mesh3d": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "type": "mesh3d" - } - ], - "parcoords": [ - { - "line": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "parcoords" - } - ], - "pie": [ - { - "automargin": true, - "type": "pie" - } - ], - "scatter": [ - { - "fillpattern": { - "fillmode": "overlay", - "size": 10, - "solidity": 0.2 - }, - "type": "scatter" - } - ], - "scatter3d": [ - { - "line": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scatter3d" - } - ], - "scattercarpet": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scattercarpet" - } - ], - "scattergeo": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scattergeo" - } - ], - "scattergl": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scattergl" - } - ], - "scattermapbox": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scattermapbox" - } - ], - "scatterpolar": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scatterpolar" - } - ], - "scatterpolargl": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scatterpolargl" - } - ], - "scatterternary": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scatterternary" - } - ], - "surface": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "surface" - } - ], - "table": [ - { - "cells": { - "fill": { - "color": "#EBF0F8" - }, - "line": { - "color": "white" - } - }, - "header": { - "fill": { - "color": "#C8D4E3" - }, - "line": { - "color": "white" - } - }, - "type": "table" - } - ] - }, - "layout": { - "annotationdefaults": { - "arrowcolor": "#2a3f5f", - "arrowhead": 0, - "arrowwidth": 1 - }, - "autotypenumbers": "strict", - "coloraxis": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "colorscale": { - "diverging": [ - [ - 0, - "#8e0152" - ], - [ - 0.1, - "#c51b7d" - ], - [ - 0.2, - "#de77ae" - ], - [ - 0.3, - "#f1b6da" - ], - [ - 0.4, - "#fde0ef" - ], - [ - 0.5, - "#f7f7f7" - ], - [ - 0.6, - "#e6f5d0" - ], - [ - 0.7, - "#b8e186" - ], - [ - 0.8, - "#7fbc41" - ], - [ - 0.9, - "#4d9221" - ], - [ - 1, - "#276419" - ] - ], - "sequential": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "sequentialminus": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ] - }, - "colorway": [ - "#636efa", - "#EF553B", - "#00cc96", - "#ab63fa", - "#FFA15A", - "#19d3f3", - "#FF6692", - "#B6E880", - "#FF97FF", - "#FECB52" - ], - "font": { - "color": "#2a3f5f" - }, - "geo": { - "bgcolor": "white", - "lakecolor": "white", - "landcolor": "#E5ECF6", - "showlakes": true, - "showland": true, - "subunitcolor": "white" - }, - "hoverlabel": { - "align": "left" - }, - "hovermode": "closest", - "mapbox": { - "style": "light" - }, - "paper_bgcolor": "white", - "plot_bgcolor": "#E5ECF6", - "polar": { - "angularaxis": { - "gridcolor": "white", - "linecolor": "white", - "ticks": "" - }, - "bgcolor": "#E5ECF6", - "radialaxis": { - "gridcolor": "white", - "linecolor": "white", - "ticks": "" - } - }, - "scene": { - "xaxis": { - "backgroundcolor": "#E5ECF6", - "gridcolor": "white", - "gridwidth": 2, - "linecolor": "white", - "showbackground": true, - "ticks": "", - "zerolinecolor": "white" - }, - "yaxis": { - "backgroundcolor": "#E5ECF6", - "gridcolor": "white", - "gridwidth": 2, - "linecolor": "white", - "showbackground": true, - "ticks": "", - "zerolinecolor": "white" - }, - "zaxis": { - "backgroundcolor": "#E5ECF6", - "gridcolor": "white", - "gridwidth": 2, - "linecolor": "white", - "showbackground": true, - "ticks": "", - "zerolinecolor": "white" - } - }, - "shapedefaults": { - "line": { - "color": "#2a3f5f" - } - }, - "ternary": { - "aaxis": { - "gridcolor": "white", - "linecolor": "white", - "ticks": "" - }, - "baxis": { - "gridcolor": "white", - "linecolor": "white", - "ticks": "" - }, - "bgcolor": "#E5ECF6", - "caxis": { - "gridcolor": "white", - "linecolor": "white", - "ticks": "" - } - }, - "title": { - "x": 0.05 - }, - "xaxis": { - "automargin": true, - "gridcolor": "white", - "linecolor": "white", - "ticks": "", - "title": { - "standoff": 15 - }, - "zerolinecolor": "white", - "zerolinewidth": 2 - }, - "yaxis": { - "automargin": true, - "gridcolor": "white", - "linecolor": "white", - "ticks": "", - "title": { - "standoff": 15 - }, - "zerolinecolor": "white", - "zerolinewidth": 2 - } - } - }, - "title": { - "text": "Accuracy and Loss Over Epochs" - }, - "xaxis": { - "anchor": "y", - "autorange": true, - "domain": [ - 0, - 1 - ], - "range": [ - 1, - 57 - ], - "title": { - "text": "Epoch" - }, - "type": "linear" - }, - "yaxis": { - "anchor": "x", - "autorange": true, - "domain": [ - 0, - 1 - ], - "range": [ - 0.17444444444444446, - 1.6855555555555557 - ], - "title": { - "text": "Value" - }, - "type": "linear" - } - } - }, - "image/png": "iVBORw0KGgoAAAANSUhEUgAABdAAAAFoCAYAAABXHdmaAAAgAElEQVR4XuzdBZhVRRvA8ff2IoqBQdgtJnYgdqOoiFJSUkp3d3eHICkoYWAjtmIn6mcLiAgiIojE3ti738ysd11g4/ae3fOf5/meT5dz5sz85uCcfc+cdxzZqggFAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEE9hJwEEDnjkAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAYH8BAujcFQgggAACCCCAAAIIIIAAAggggAACCCCAAAII5CNAAJ3bAgEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABAujcAwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIRCfACvTonDgKAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAwGYCBNBtNuB0FwEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCA6AQLo0TlxFAIIIIAAAggggAACCCCAAAIIIIAAAggggIDNBAig22zA6S4CCCCAAAIIIIAAAggggAACCCCAAAIIIIBAdAIE0KNz4igEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABmwkQQLfZgNNdBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgegECKBH58RRCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAjYTIIBuswGnuwgggAACCCCAAAIIIIAAAggggAACCCCAAALRCRBAj86JoxBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQRsJkAA3WYDTncRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEohMggB6dE0chgAACCCCAAAIIIIAAAggggAACCCCAAAII2EyAALrNBpzuIoAAAggggAACCCCAAAIIIIAAAggggAACCEQnQAA9OieOQgABBBBAAAEEEEAAAQQQQAABBBBAAAEEELCZAAF0mw043UUAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCIToAAenROHIUAAggggAACCCCAAAIIIIAAAggggAACCCBgMwEC6DYbcLqLAAIIIIAAAggggAACCCCAAAIIIIAAAgggEJ0AAfTonDgKAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAwGYCBNBtNuB0FwEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCA6AQLo0TlxFAIIIIAAAggggAACCCCAAAIIIIAAAggggIDNBAig22zA6S4CCCCAAAIIIIAAAggggAACCCCAAAIIIIBAdAIE0KNz4igEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABmwkQQLfZgNNdBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgegECKBH58RRCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAjYTIIBuswGnuwgggAACCCCAAAIIIIAAAggggAACCCCAAALRCRBAj86JoxBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQRsJkAA3WYDTncRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEohMggB6dE0chgAACCCCAAAIIIIAAAggggAACCCCAAAII2EyAALrNBpzuIoAAAggggAACCCCAAAIIIIAAAggggAACCEQnQAA9OieOQgABBBBAAAEEEEAAAQQQQAABBBBAAAEEELCZAAF0mw043UUAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCIToAAenROHIUAAggggAACCCCAAAIIIIAAAggggAACCCBgMwEC6DYbcLqLAAIIIIAAAggggAACCCCAAAIIIIAAAgggEJ0AAfTonDgKAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAwGYCBNDjHPBwOFturNtFNm3eKlOGtZdrLq8aZ02cFo/Adz+tl1rN+smkwe3kuivPz7eKvqPmyPIV78hXr8+N5xJpOycrKyzPrnxXnnn5Xfle9Wv3Hr8ceshBcv7Zp0iDWjeo/z81bW3J70LPrXxPegybWWgbWjS4Xdo3q5X2dt7RuLeceGxFmTCoTdqvzQURQAABBBBAAAEEEEAAAQQQQAABBEq/AAH0OMf47Q9Wy4M9xsupJx4tlSocLlOHdYizJk6LR6C0BNAz/QFp3XOCfPDZN3LzNRdL9UvPlXIHHSAbf9+qAuqr5H/frzOBaR2gLq4SCaB3fbCOnHR85XybcWzlI+W4o49KexMJoKednAsigAACCCCAAAIIIIAAAggggAACthIggB7ncLftPVF27NwtNW+6QvqPmSuvLR0vRx5+SJy1cVqsAqUlgD504qPy2NOvybgBreWmqy/aiyE7O1t6j3jErEyfNaaLXH7hWbEyJeX4SAD90cm9in01/L4dIoCelCGmEgQQQAABBBBAAAEEEEAAAQQQQACBAgQIoMdxa2zZul2uu7eTDOzSxAQ9r7q7vTSvX2O/VcL/qAD7xEeelFfe/kR27tojJx5XSR1TQ26ofmHuVd/58EuZOm+5/PDzr2rlcVm58pJzpFPL2nLowQfJ+t82yy31u8uIXi3k9hsvzz1n1qLnZcKsJ+SzlbPE5/VIv9Fz5Ovv1soD9W6T4ZMWybXVqsqgrk1l3a+/q+s/IR98+o3syfSrAP+hcut1l0rrxneKx+Musg1ej0eurtXBvCTo0+H+vaTGzliqAr+vyptPTpCDDjxgP8XdezJVG3XfP5at23aY/lxS9Qzp3Oo+OeqIQ83xL7z2gXQbPEOemDXQOH313RrJVqlxLrvwTOnXsZEcXK6sOU7bDR6/QN5473PRQeVLz68itW+/2nwBkIwULj+t/c14fvLl95KpnCoedbjceXM1aaY8XS6naYMO2E+YtUw5r5Ndqm8VVB9uv/EKaXX/HeJ0OkSn9Jm58DkT7P59y19SxueVs884UTq2qC2nn3xsvnfZ3//sMvfO9VdeIGP6PZjvMXsyA3Jd7Y5yxinHyexx3eT+tsNE31fL5w7Z6/hX3/lU2vedLPMm9JCLzjtdftmwWcbPXCZffvuzbPt7p5xwTAVpWvdWqXH9Zea8zVu2ybWqXn1vLV+xSj796gd5aeFI1ffy+7UjlgD60y+9I31Gzpb5E3vKlLlPyzc/rBOHwyHXXnG+9G7fQA4sW8bUr73mLnlJnnzhLbXa/k/JyPBJ1bNOMavt83rpe3jM9CXy0RffKmennKNMO7W8N/cYHUA/5YTK6p4/X6bMedrUpe+vrg/Vyf17Fs/YxPGfBU5BAAEEEEAAAQQQQAABBBBAAAEEECiFAgTQ4xjUGQuelTmLX1TB44lyQBmf6FzbH33+rax4bJQJFkZKk44j5NeNW0zgsOKR5eW5V96TeUtWyMzRXeSKi86SD9U5D3QaZfJc337D5SrQ+Y8MGjffBLoXTukddQBdB5fffO8Lk0qm5f23m1QalSscITfX62qC2wM6N5ZDDj5QBek3mFzW+nqRfNVFtUHXrQPdbz010QTrddFB7Bvu6yyXXnCmDOn+QL6CPYfNktff/UxG9m4pJ6sA5x9/bldB8PlS9oAypm+6vPzmx9JpwFQ587TjpVe7BnJulZPkhzUbpO6Dg+TeO66RHm3qmeO6DJqu+ve5ulYzE5T+/OsfRY/B2vWbEg6g65chNVUQ9gSVR7t767py2KHl5PVVn8mYGUukYe2bpIsK+IeysuQa9SLhHNW+Nk3uMkFg3QZt06phTXlABaYXPfWKOmep8Tj79BPV1wm75OFHn5NPVVD+9WXjJUMF1Pctb72/Wh7qOX6/FyT7HqctX3r9A/noxRnyjMqVPmDMPHl2/jA5Sb2QiRTt+JV6ibLy8dGyfcdOubNJH/PSol+nRnL4YQfLi2oMJ895Skb3fVC9RLlE/tr+j1x5Z1uTgki/0NEvbk496ZjcMc7bhkgAXQfwdZA7vxK5NyLHnqbq0hY68P/F/34yLztuvOpC82JHFx3cn790hXRRaWGuvvw885JlxJTHZM0vG+W5+cPN1xz6Z3c17SOnqDY+qJz1NSbNfkr+98NaeXbeMNMvHUDPUuNzvHpBoF9iuVwuGffwEjU+Pxn3w1Qu+XjGJo7/LHAKAggggAACCCCAAAIIIIAAAggggEApFCCAHuOg6tWsN6nAdLWLz5b+Kjipy5ff/Cx1Hxoss8d2U0HlKuZnn331o1otPNRsbph3xbleLX6WCrDeq1ZQP9B5lGz9a8deq4nf/fhrs/Flz7YN1Mrr3VGtQB8y4VF5fPlr8ti0viYIrYtup17BfkCZjL1Sy+hVyr+pVbp61bcuRbXhLxXErNmkt4zs0zJ39fJnarWyXgn9uLqeDirnVzb98Zf4VX5vHdiMFB3IHKZWyH/4wnQThI4E0PurAL/2iJTmXcbIrt17TH/0ausraraROjWvU0H2+rnH6NXG0xc8k3AAfapaJT3j0WdNsPWI8v+l4NGrqF9+8yN577lp8vsfW9XLiG4yrGdzsxo/Ur5XXw2UPSBDjq54hHkx8bka85dVADtSdu3OVC8EfpUqpx6fb2B66bNvyED1wkS/UCgoMK3r0i8LdPD71SVjpaxyq35XO2mhgsUPqS8JdNGr/avVbCtN69wqbZrelXv8s/OG7pWzvFX3seaFzguPjjAva/Q5+kWOfqFTWIlmE9HVr80WtwpeR47t17Gh3Ffz2txqdaoavTr93WenmnuzmhpTvcq/rzouUvTq8RvqdJEOze8xwXD9AmLqvKfljScmSHn1YkMX/SJm+OSF6iXQjXLBOaeaAPq27Ttk5eKxUiYj5yXFR59/J/rl1YyRndWLgbPjGptCQfhDBBBAAAEEEEAAAQQQQAABBBBAAAHbCBBAj3Go3/nwK9GByGUzB5jAaKToFb96pXUkFcf8ZS/LqKmPmxQneQOzeS938a2t5NZrL5UBXRrn24poU7joAPrS596QL16ZbdKJRIpezb1AtWO1WgGs03iEs8Mq2Oo3K3d1MFaXotqgj2nUfri4VPqMOeO7m3MGqZXXn6sg+tNz9k4jkrcTOkA7f+nLolPU/PnX3xIMhSQYDJnrr1w8Rq2QPzw3gL54Rn+1avuE3NM7D5wmOjj9/ILhuS8nxg14SKXLuTj3mI+/+E4adxiRcAC9Vfdx5kXDiyp9Sd6y7Pk3c1Z6qyD08cdUlDpqVfyvG/+Q+9TK+MtV0Pl8tRI7bxocnaanQ78pcnHV083XBDrNjP4ioLCy9Lk3ZeDYebLk4f5y1mn/9X/fc3R6GZ2259Wl49SXDIdJx/5T5Od1G80qdF0iQeuXFo0SvZmn7pNeya2d85bIPalXsgeCQRNA1wF3vbq7sBKpX7/o0CvL8yuRFzeRY/ftU8TzpUUj5e9/dkudVgNlVN9WcptKKZS36JRB5599iskJ36bXRFmzfuN+Y5P3eB1Ar3TUYSZYHik/q77f0aiX+bt4y7WXmBRKsY5NoSD8IQIIIIAAAggggAACCCCAAAIIIICAbQQIoMc41O36TpLX3vks37N0QPVNtVpWp0uJrJD++KUZZhX4vkWvwj372ibS+N6bTb7m/EosAfQXVYqP99Tq3kjZsGmLSuPRW6X5qGxW9B6jAqtut0utAF+o8lL/YgLo0bRB17fijY9EB7V1ihqdI/vquztI6yZ3St07r8u33breux/oK1v+2i69291vUrT4fB6TRkTnTt83gK7zeZ9ywtG5delr6ZzjeqX0e598LXpF+vQRHaX6pefmHvO/79fJvS0HJBxAb9BmqAns64Bv3vLS6x+a1DGRVfY6D7vO+b7yrU/k2x9/MWNa4/pLTQoSvQpdl09Wf2++BHj/k/+pIPEuk6db2+v0KPmVVR99JS27jd1rdX9+x2mP19/9XD5SK/f1Pab/WW9i+8zcoealjQ6Y6xX7j07OSY2j+6RTzOQN8Ouf6/zyOh2NfjGh71EdQNepc+rffX2hfwtiyYEeOVaPXd6vDyL57vXLkp3qq4JmXUbvN6a6ETUa9jQvVx4e1VkathumcvcHzMuqgkp+m4iuUal9blf1RNLVxDM2hYLwhwgggAACCCCAAAIIIIAAAggggAACthEggB7DUOuV1HrjxQcb1ZRrLq+615n+QFAaqYCf3uBQ585+7OnXRKet0IHq/DZm1Cfr1CS6noLyiK//7Q+VwqXbfjmydToPndYjsomoXoG+bwB97uKXTB7vyKrkSGMjedkjK9CLaoM+LxjKkuvVpql17rxWrZQ+Ua3mnWxyokc2hNyX8Ovv18p9LQeafNe1bque+8fT5j8jOmVKLAF0HQjWAeF9V6DrjVF1+plENxHVubl/2fB7gSvQdbBZ50fPW7ar1fwvvfGhyrW91GyOqdPb5C36BYLeEHX6/OWiU/LofN371qGP1yledDoWnfZn6rAO+d6JmSoNjt6wVq9Q10HlyHhcXau91FMvMOrffYPZiFSnQrmnxlXmz3Ve9R/VxqiR4/etWAeoI2lfUhVA33cF+uJnXjc54/VLGP1yQd8fBa1Av/Dc08zqcb3SXr8o2Xclfd7+RBtAj5wT7djE8J8FDkUAAQQQQAABBBBAAAEEEEAAAQQQKMUCBNBjGNyZC1VO5rnL5e2nJ8nB5crud2brXhNMjmmd9uOrb9eYtB86OH7XLVfmHqtzkOuc2XrVuU4Fs+aXTSZvdmTzUb3iesz0JSZ3erkDy5og+75BTr3iWKdGKSyArvNHT5r9ZG6+cd0AHZDXK3OPKK9SuKh0ILoU1YZjKx9ljtMr6leonOB6VbXeELOgoL8+NpL/ffLQ9irAnPOiQa981mk1ftmw2fRXG0RyoBe2Av3vHbuMwf333Gg2+YyU0dMWyzy1CWWiAXSdR33avOX75UDvNXyW2Zj1neWTZfOWv+RzlQZn33Qj3Yc+LN+oAO9zKsiuj61c8fC9VtJHXoCM7f+Q3HzNf+ln8t44+iWHftmxb351fYzerLX/mLny5Atvy/yJPUUHliNFvzTRG5TWvet6GTF5kbknIy80dLoX/aJCj7FO1xMpOn+4/hLg4IPK5uZAT1UAvWOL2tKs3m2519aer77zqaxSnmHVL50DveZNe+dA119N3FS3q3RT49xIvYSa/fiL5iVF5H7Rlen7oWmnkWrj1tvMZqjRBNDjHZsY/tPAoQgggAACCCCAAAIIIIAAAggggAACpVSAAHqUA6uDmXojyZOOryTThnfM96wXX/tQug6eLoum9pHzzjzZrJDW6T76tG8oxx19lFklPm/JitzNDT/98geTX7zGDZeZ3No7VG7o4SoYetghB5k6dFBdBxR1EHTGyE6SkeGTZ1askikqOLpl6/ZCA+iRjRRbNLjdpOj4Sa1IHjn1MRPgXalyQi9X+csrqyC23gC1qDbozm7esk1t8JizAnqhShVS0Oah+s/1xp961bTeGHOAypv9j0p/Mmra43KMup7O+61XHuuV229/sFo6DZhqNlEtKIWLrk/nwv7w82+kX8dGcsYpx6kA/Q/y6BMrVX7sTUUG0PXGldNHdNpvvHxej8lXrr8q0JuknnRcJRWgr2dejOhA71gV2G7f7B4TBI68DNFB3Zpq40udsuXHtRuk36g5JjDeu/39Jo3Kul9/lx5t6pl7RKceWfjkSnnh1Q9MypSCvkLQXy606zNJdDqXm66+yLjoNuiNXpersf76u7XmxYF+gZC3RDau1S80TjyuoklXEik6/7zOya/zoXdoXtvkTf9O5ZTX99YZpxxrzCKbiMYSQO+q0tWcdHzlfO/9A8r4zKaekRQuejz1Jqc6fc9Hn39rcr3ffdtVagxzNg3VL3fmqAB5j7b1pbpKcfOHup+Hqw1mN/+5TZ5RL6B0kF/fRzerLzD0ink9Fvoa09VXDNpkuUpfc+Thh0QVQI93bKL8TwOHIYAAAggggAACCCCAAAIIIIAAAgiUYgEC6FEObiQXd35pJyJV6KBp9bvams0u9QrtHSoAOE7l/H5t1acmXYdO49Gq4R1yQ/ULc6/6xnufqxXQz8hP635TK84PMPmydRoYHUTXZbUKcOvUF2tVsPgg9ec6YKsDtHqDy09WzJQyGWo1eD4pXPS5eiXyoqdeVYH5XWbD0+4quHuAOr551zEmOLl4ej8TEC2qDZHG6k079XlPPjKoSDUdHNerxDeoQHAllTe95f23y83KpZGq49sf1qmUI43MiuloAug62Dto3HwTZNblovNOl1b33yF1HxpsVurn9czbsL4qwP3Ui2/n21YdfH1D5avXRW86qVc6641J/f6gedlR967r9srxrvOOP6I8dWoUvZq+whGHmYD3Qyqdj1cF47WLruPN97+Qbdv/kbKqb1VOOV6l+7lDbYp5aqFeOq3IsyvflWdeXiXf/bjebLR6qBp/veK8kcqRn3eD1bwV3VK/u9kAVadqqXbx2XtdQ294On7mMtGpbnZn+k17r69+gbRufJe5Z+IJoBfWiWMqHWnSs0QC6LPHdpO5S14yq+RdLpdcV+1886Ihki9ev5DSf77subdko7pH9M91Khu9cl3XFSk/q78Xo9UXGfplk8vllLPUZrNdWt1nvoTQJZoV6ImMTZE3OgcggAACCCCAAAIIIIAAAggggAACCJRqAQLopXp4k9c5vSL6tvt7yGCV1/z2Gy9PXsXUVKoEIgH0lxaNVCvgc9L/UBBAAAEEEEAAAQQQQAABBBBAAAEEECipAgTQS+rIpandOsWJTk0yeMIClUIjw6Rv0SuBKQjkJ0AAnfsCAQQQQAABBBBAAAEEEEAAAQQQQKA0CRBAL02jmYK+dOw/xWxYeun5Z8qALo332pQyBZejyhIuQAC9hA8gzUcAAQQQQAABBBBAAAEEEEAAAQQQ2EuAADo3BAIIIIAAAggggAACCCCAAAIIIIAAAggggAAC+QgQQOe2QAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECAADr3AAIIIIAAAggggAACCCCAAAIIIIAAAggggAAC0QmwAj06J45CAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQsJkAAXSbDTjdRQABBBBAAAEEEEAAAQQQQAABBBBAAAEEEIhOgAB6dE4chQACCCCAAAIIIIAAAggggAACCCCAAAIIIGAzAQLoNhtwuosAAggggAACCCCAAAIIIIAAAggggAACCCAQnQAB9OicOAoBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEDAZgIE0G024HQXAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIDoBAujROXEUAggggAACCCCAAAIIIIAAAggggAACCCCAgM0ECKDbbMDpLgIIIIAAAggggAACCCCAAAIIIIAAAggggEB0AgTQo3PiKAQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAGbCRBAt9mA010EEEAAAQQQQAABBBBAAAEEEEAAAQQQQACB6AQIoEfnxFEIIIAAAggggAACCCCAAAIIIIAAAggggAACNhMggG6zAae7CCCAAAIIIIAAAggggAACCCCAAAIIIIAAAtEJEECPzomjEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBGwmQADdZgNOdxFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSiEyCAHp0TRyGAAAIIIIAAAggggAACCCCAAAIIIIAAAgjYTIAAus0GnO4igAACCCCAAAIIIIAAAggggAACCCCAAAIIRCdAAD06J45CAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQsJkAAXSbDTjdRQABBBBAAAEEEEAAAQQQQAABBBBAAAEEEIhOgAB6dE4chQACCCCAAAIIIIAAAggggAACCCCAAAIIIGAzAQLoNhtwuosAAggggAACCCCAAAIIIIAAAggggAACCCAQnQAB9OicOAoBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEDAZgIE0G024HQXAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIDoBAujROXEUAggggAACCCCAAAIIIIAAAggggAACCCCAgM0ECKDbbMDpLgIIIIAAAggggAACCCCAAAIIIIAAAggggEB0AgTQo3PiKAQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAGbCRBAt9mA010EEEAAAQQQQAABBBBAAAEEEEAAAQQQQACB6AQIoEfnxFEIIIAAAggggAACCCCAAAIIIIAAAggggAACNhMggG6zAae7CCCAAAIIIIAAAggggAACCCCAAAIIIIAAAtEJEECPzomjEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBGwmQADdZgNOdxFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSiEyCAHp0TRyGAAAIIIIAAAggggAACCCCAAAIIIIAAAgjYTIAAus0GnO4igAACCCCAAAIIIIAAAggggAACCCCAAAIIRCdAAD06J45CAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQsJkAAXSbDTjdRQABBBBAAAEEEEAAAQQQQAABBBBAAAEEEIhOgAB6dE4chQACCCCAAAIIIIAAAggggAACCCCAAAIIIGAzAQLoNhtwuosAAggggAACCCCAAAIIIIAAAggggAACCCAQnQAB9OicOAoBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEDAZgIE0G024HQXAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIDoBAujROXEUAggggAACCCCAAAIIIIAAAggggAACCCCAgM0ECKDbbMDpLgIIIIAAAggggAACCCCAAAIIIIAAAggggEB0AgTQo3PiKAQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAGbCRBAt9mA010EEEAAAQQQQAABBBBAAAEEEEAAAQQQQACB6AQIoEfnxFEIIIAAAggggAACCCCAAAIIIIAAAggggAACNhMggG6zAae7CCCAAAIIIIAAAggggAACCCCAAAIIIIAAAtEJEECPzqnAozZu3ZNgDZyOgH0EDi/nkx17ghIIhu3TaXqKgAUFvG6nlCvrkT//9luwdTQJAXsJMDfaa7zprXUFmPs3Xz8AACAASURBVButOza0zH4CzI32G3Mr9bhS+TJWag5tsYgAAfQEB4IAeoKAnG4rAR6EbDXcdNbCAgQJLDw4NM12AsyNthtyOmxRAeZGiw4MzbKlAHOjLYfdMp0mgG6ZobBUQwigJzgcBNATBOR0WwnwIGSr4aazFhYgSGDhwaFpthNgbrTdkNNhiwowN1p0YGiWLQWYG2057JbpNAF0ywyFpRpCAD3B4SCAniAgp9tKgAchWw03nbWwAEECCw8OTbOdAHOj7YacDltUgLnRogNDs2wpwNxoy2G3TKcJoFtmKCzVEALoCQ4HAfQEATndVgI8CNlquOmshQUIElh4cGia7QSYG2035HTYogLMjRYdGJplSwHmRlsOu2U6TQDdMkNhqYYQQE9wOAigJwjI6bYS4EHIVsNNZy0sQJDAwoND02wnwNxouyGnwxYVYG606MDQLFsKMDfactgt02kC6JYZCks1hAB6gsNBAD1BQE63lQAPQrYabjprYQGCBBYeHJpmOwHmRtsNOR22qABzo0UHhmbZUoC50ZbDbplOE0C3zFBYqiEE0BMcDgLoCQJGebpz86/i+GuLZJ1xfpRncJgVBXgQsuKo0CY7ChAksOOo02erCjA3WnVkaJfdBJgb7Tbi9NfKAsyNVh6d0t+20h5AHz55kWzesk0mDGpT+gcziT0kgJ4gJgH0BAGjON2x82/JGP6QZGdni3/QPMl2e6M4i0OsKMCDkBVHhTbZUYAggR1HnT5bVYC50aojQ7vsJsDcaLcRp79WFmButPLolP62pTuA/uJrH0rXwdOlaZ1bpXOre/cDvqNRL/n5l42y+rXZ4na5ihyAtz9YLcdWPkqOP6ZCvseu/22zBAIhOfmEykXWxQH/CRBAT/BuIICeIGARpzuCAfGO6ySudd+bI4O31JfgHY1Te1FqT5kAD0Ipo6ViBGISIEgQExcHI5BSAebGlPJSOQJRCzA3Rk3FgQikXIC5MeXEXKAQgeIIoA+btFA8Hpe8tnS8OJ2O3NZ999N6adJhhOzYuTvqAHqDNkOlWb3b5OrLz2OckyhAAD1BTALoCQIWdrpace6dOVDcX7ybe1S2etvmHzBXwodXTOGFqTpVAjwIpUqWehGITYAgQWxeHI1AKgWYG1OpS90IRC/A3Bi9FUcikGoB5sZUC1N/YQLFEUCfv3SFCpLvkv6dG8ul51fJbd7o6Ytl4+9/ysq3PskNoO/JDIj++ZvvfS5/79gl51Q5UQZ3e0COrniENOk4Qj76/Dvxej1y41UXyk1XXyyjpj4u99S4SqbNWy6LZ/SXJ194a68ULo889oIsXv6a/P3PLrngnNOkf6dGUvGo8twk+wgQQE/wliCAniBgIad7nnxYPK8+YY7wdxorrvdXivv9lyXr1HPF33FM6i5MzSkT4EEoZbRUjEBMAgQJYuLiYARSKsDcmFJeKkcgagHmxqipOBCBlAswN6acmAsUIlAcAfTZj79gVoz//sdfMrRHM9O6cDhbrr+vk3R7qK50HjgtN4A+cOw8+WndRhnb/yE5uFxZmbHgWXnp9Q/lxYUjzer1S257UEb2bmnqe+fDr6TLoGlyyzWXSKtGd0j5Qw+WMSr4HsmBrtPHDJ+8UKYO6yDHHV1Bhql//vW3P+SxaX25R/YRIICe4C1BAD1BwAJO96x6QTyLJpg/DTTrLaELrhbH7p2S0aeBOPbsEn+THpJ18XWpuTi1pkyAB6GU0VIxAjEJECSIiYuDEUipAHNjSnmpHIGoBZgbo6biQARSLsDcmHJiLlCIQHEE0B957HkZP7CN1G7RX95+epJk+Lzy4effygi14efYAa3l9oY9TQA9WwXVL1YB8mkq4H3ZhWeaXmRlhVXQvJVMG95JLq56+n4B9Fbdx8pry8ZJhSMOM8fn3US0eZcxctrJx0iXVveZP/vzr7/lg0+/kVuuvURcLif3SR4BAugJ3g4E0BMEzOd099cfiXdaH1G7hkqgZlMJ3Vw39yj3uy+Jd+E4yS5bTjKHLJTsjDLJbwA1pkyAB6GU0VIxAjEJECSIiYuDEUipAHNjSnmpHIGoBZgbo6biQARSLsDcmHJiLlCIQHEF0J+aPVjqPTRYGtS6UW697hLpN3qOnHhsJal+2bm5AXS9cvzGOl3ybf2Q7g/IXbdcuV8AvW3vCfLFq7Nzz8kbQL+5Xjdpct/Ncl/Na7knihAggJ7gLUIAPUHAfU53blwrvhFtRG8eGrr0Bgk06rbfBXyj24lrzbcSuuoOCdRpm9wGUFtKBXgQSikvlSMQtQBBgqipOBCBlAswN6acmAsgEJUAc2NUTByEQFoEmBvTwsxFChAozgD60mffkDfe+0ImDmojV9/TQZ6ZO1T+2bUnN4C+/e+dctXd7UUH20876Zh8e7BvCpeO/SfLJytm5htAv6V+N2lY+yapeycZHor6C0EAvSihIv6cAHqCgHlOd27/U3zDHhTHP9sl6/Sq4m87XMTp2u8Czs0bJGPQAzohlGT2mCrh405NXiOoKaUCPAillJfKEYhagCBB1FQciEDKBZgbU07MBRCISoC5MSomDkIgLQLMjWlh5iIWDKDv2LlbrqvdUXq3v19eePUDmTWmi6xZvyk3gO52ueSiW1pJv44N5fYbL8/twW9qo9HKFQ43/x5LAL1F1zEq9/lR5nq6bN22Q554/i2zKl1vREr5T4AAeoJ3AwH0BAH/Pd2xZ7f4RrYWHRzPqnS8BLpNkmxfwelZPE/NEs8rS82x/j7qTZrDkZyGUEtKBXgQSikvlSMQtQBBgqipOBCBlAswN6acmAsgEJUAc2NUTByEQFoEmBvTwsxFChAozhXoukmdBkyVL7/5WTo0ry01brhsvwD6mBlL5LV3PpOpwzvIMZWOVAHvN2XiI0/Kq0vGyoFly5gV6k3r3iq1bq0un3/9kxS2An3FGx9J/zFzZYLKv37aycfKuIeXqg1Kf5PF0/txf+wjYPsA+vOvvi96B9sh3ZvJTVdflO8NUufBQfLdj7/kBmnLHXiASeqvCwH0xP9OOUIh8U7sJq6fvpLwIeXF33O6ZJc7tNCKHUG/+Po1Fr1qPXBfGwldXTPxhlBDygV4EEo5MRdAICoBggRRMXEQAmkRYG5MCzMXQaBIAebGIok4AIG0CTA3po2aC+UjUNwB9LfeXy1dBk1TccfJUibDu18APdMfkJFTH5eX3/xIgsEsOV1tAtr1wTpyTpWTTG+mzHlaZi9+US6peobUv/uGQgPo+vgFy16W2Y+/KLv3ZMoF55yqVrc3kkr/rmbnBvlPwNYB9HlLV8inq7+XLVu3S5M6txYYQL/t/h4q/1BbOfmEyvvdOwTQE//r5F0wStzvv2JWnPu7T5FwxWOjqtRsNjq1d855g+ZJuFzOjsIU6wqUxAchh3+3uD541eTcpyBQWgQIEpSWkaQfpUGgJM6NpcGdPiCwrwBzI/cEAtYRYG60zljYsSXpDqDb0bgk9tnWAfTvflpvku436zxa7r3jmgID6PrzhyUP95cKR+wfoCWAntht73lpkXienWcq8XcaI1mnnBtThd6HB4j7i3cldNG1EmjaM6ZzOTj9AiXtQciRuUd9HdFVXOu+F3/XiZJ1YpX0o3FFBFIgQJAgBahUiUCcAiVtboyzm5yGgOUFmBstP0Q00EYCzI02GmwLdpUAugUHxQJNsnUAPeL/QKdRhQbQq97YXKpfco7KHfSjHH7YwSoP0T1S/dKcQC8B9PjvYvfHr4t3jtooVBUd/NZB8FiLc9sW8fVvIjqli7+jCsCfGlsAPtbrcXxiAiXpQcgEzyd1F9fab02n9ZcRmf1mJwbA2QhYRIAggUUGgmYgoARK0tzIgCFQmgWYG0vz6NK3kibA3FjSRqx0tZcAeukaz2T1hgC6kiwsgB4OZ0vfUbPl5msukcsurCJvvbdaegx7WJ5bMNysSP97VzBZY2Gver5fLdmjO4tkZYnUaiaOGvXj7/+KJZK9ZIb6DbSiOIYvEHG746+LM1MqcGCGW/aoHF1ZWdkpvU6ilTv27JKwvj/Xfr93VXc2FkfNRolWX+LPV/9ZFCf79pbocXSpASzjc8nOPaES3Q8aj0BpECgpc2NpsKYPCBQmwNy4vw7PfPydKS4B5sbikue6WuDgsh4gENhPgAB6EQH0/O6ZJh1HSK3brpIa118muzIJPsT892rzbxLo11wce3aLs9pN4mqRYOqVcJYEez8g8ts6cdZqKq6aDWNuEiekRyDD65JgKCxZ+mncosWxe5cEh7WT7PU/i1Q8Rjy9Jors3JFzj6niGT7P/NzOJVsNn4MAeom+BXSQwON2SmZAvcSkIIBAsQqUhLmxWIG4OAJpEmBu3B+aZ7403XxcZj8B5kZuiuIUKKsW/lEQ2FeAALoSKWwF+u49fvlx7QY599/dbDVggzZD5f57bjQ501OVwsXz9vOSfeBBEj7qGAlXPrHU3LmOnX9LxojW4ti6WUJVLpRA25wULokW15pvxTe6nalmz5AFkl2+YqJVcn4KBKz+KZ5+qeOb0EWc63+UcIVjxN95vPp7eLCR8CyfLZ6XF0vWCVXE300F1SkIlGABPlMvwYNH00udgNXnxlIHTocQKECAuZFbAwHrCDA3Wmcs7NgSUrjYcdSL7jMBdGWUXwD9+Vffl0vPryJul0tuqNNFJg5uI5dfeJa88+GX0nXwDHnh0RFS/tByKQmge56bJ54XF+01euFDj5Dso46W7CPV/yqo/+nAuvrnsEpbUlKKzlPuG9NJBSd/kPDRJ4m/iwpO+sokrfmeRePFs+rFpAbmk9Y4KjICVn4QMsHz8Z3F+etPklXxOAl0GpsbPI8MX0bfhuL8c5ME67WX4JU1GFUESqwAQYISO3Q0vBQKWHluLIXcdAmBAgWYG7k5ELCOAHOjdcbCji0hgG7HUS+6z7YOoN/TvL/8tO43CYWyxOV0ikN90j6ydwu1svxiqX5XO5kwqI2cf/ap8tb7q2XM9MXyx9btUrnC4dKtdV0TXNcl2SvQ3R+sFO/80UWPXJ4j9OaG2YdXknCl41WAvbJatZ4TYI+snI2pshQe7JveT1xfvi/Zhx0p/u6TJVzusKRezbF7p2T0Vzmq1Sr3QIv+EqpaLan1U1niAlZ9ENoreK7+HgXUhrT5/f1x/bBaBdm7mBc//oFzJXxw+cRRqAGBYhAgSFAM6FwSgQIErDo3MmAI2E2AudFuI05/rSzA3Gjl0Sn9bSOAXvrHOJ4e2jqAHg/YvuckM4Du+vFL8Y1TGxeqEqzfQYLVbsu9nPPvreL4fYM4N/8q8vt6caj/d23eII6/Novo5HT5lOwDDjSr1M3KdZWOIie4rv5f/7vbm4zuR12Hd9k0cb/+tGRnqMBj96kmPUYqiku9gPCpFxDhcoeKf8ijku3xpeIy1BmngBUfhBy7dqq0LWrl+YY1OSvPO0+Q7LIHFthD76Njxf3eCsk67wrxtxwQpwSnIVC8AgQJitefqyOQV8CKcyMjhIAdBZgb7Tjq9NmqAsyNVh0Ze7SLALo9xjnWXhJAj1Vsn+OTFUB3/v6r+Eaq3OCZeyR4w70SvLt5VC1zhILi+CMnsO5QdThUcN2pAu2OP9Q/q7ryLWr3v6yTzpKwWqEdOq+aWRGeyuJZ9YJ4FqmgpEqHE2g/SrJOOSeVlzO50HVO9OD1tSRYq1VKr0XlsQlY7UFIB8+9YzuIa9MvUQXPdW8de3ZJRt/7xbHrH/E/OEiyzrksNgSORsACAgQJLDAINAGBfwWsNjcyMAjYVYC50a4jT7+tKMDcaMVRsU+bCKDbZ6xj6SkB9Fi08jk2GQF0x45tKnjeRpx//SGhC66SQLM+CbYq53TndrVqXQXXdVDd8cdv4ty4Tpz639UGnnmLzkeedUF1CZ9zhWRVOi4p145UolO26NQtugSa9pTQRdcmtf78KnOqlfkZA5qYP8rsO1Oltjkh5dfkAtEJWOlBKO/Kc5OTv4NK21LIyvO8PXR/+pZ4Hxki4UPKi3/AHJXS5YDoADgKAYsIECSwyEDQDASUgJXmRgYEATsLMDfaefTpu9UEmButNiL2ag8B9L3H+833vpDuQx+Wvh0bSo3r7buAkAB6gv8dSDSA7vDvEe8otWJaBbezTjgjZ+NCtyfBVhV+us4R7v78HXF88Z64vv9CHFnB3BN0ypdQ1SskW61Mzzr+9ITa4Vz/o9o0tIM4ggEJ1GwqoZvrJlRfLCd7n5kj7hWPS/jYUyWz59RYTuXYFApY5UEo78rz8NEnir+T2tC2TGxBcN+k7uL69jOzmajeVJSCQEkSIEhQkkaLtpZ2AavMjaXdmf4hUJQAc2NRQvw5AukTYG5MnzVX2l+AAPreJh36TZFLzj9DXl/1ucwa08W2twwB9ASHPtEAundyT3F/84nZ+NPfdXLUK2ATbHbu6TqA7/r6I3H+70NxfbZK9L9Hit7kM1z1SpPrOev0qjFd0rH1d8kY/pBJcxG6/GYJ3J+T2z2dpUyfBma1vb62bgMlOgHn+h9EVO74sMoHnuxihQchs/J8XCf1RcZa84LF32F0zMFz7eLctkV8A5qKI5Cp/u5OkKwTz0w2F/UhkDIBggQpo6ViBGIWsMLcGHOjOQGBUijA3FgKB5UulVgB5sYSO3SlouEE0P8bxr937JK6Dw2SFx4dIXc06iWzx3WXIw8/xByg/6z3iEdk9Tc/ySHlDpTubepJtYvPLvDnLbqOkTtvvlJuve4Sc37ef7/41lbSosHtMm/JClm5eIz8sOZXGTx+gezanSlej1t6d7hfLql6htoCMlvGTF8iz7/6vnjcLql39/VS67ar5JpaHcx5hx92sKl71FS1oFYd20O1KVmFAHqCkokE0D2PTxTP289L9oEHS2aPKZJdvkKCrUn8dL2i1rX6XXGq1Cs6QBgp2WXKqlzPl0rW+VcVmfNZ54j2jWqrcrH/KqEzL5JAm2GJNyyOGtzffCzeyb1Eb6aaOXCecaYULuB5bp54XlxkDgpef4+Ebm8k2d6MpLEV94OQfqHjHddRffGhcp4ff5oE2o1UwfOycffP/eZy8S5Rm+KqLzcyB86Nux5ORCDdAgQJ0i3O9RAoWKC450bGBgEEcgSYG7kTELCOAHOjdcbCji0prgD6n1tFtv6VnXbyw8s7pPxh+V/2sadfky1bt0v7ZrVk+oJnxOf1SNM6t5qD+42eIxk+n/RsW0++/m6tNOsyWt5+epIMnfhovj9v23tigQH0y+9oLXfdcqV0bnmfOJ0OubNJH2lW7zapccNl8tzK92TGo8+aIP7zr7wvC59cKfMm9hS/Pyh3Nu0t4we2kVmLnpfLLjhTGtS6wbTt5nrdZGSflnJulZOS5kkAPUHKeAPonleXiefJmZKtVvr6u4wzK2GtVnQKFtdqlebly/fEuWFNbvN0m7POukjC56nV6Wdfut/qXd/4LuL6YbWYvNJdVGoMX5li65p35iCTriZ02U0SaGjfT02KGgAdUPbMHbbXOOtz9FcIwQYdzTgnoxTng5BOXeQd1zlnw1CdLqn9iKTkLtf7F7jWfS/BGg0leNv9yWCiDgRSLkCQIOXEXACBqAWKc26MupEciIANBJgbbTDIdLHECDA3lpihKpUNLa4A+tMvZMkLK8NpN73zNpfUuNGZ73XvazlQhvduISceW1E2bNoibXpNlOVzh5hjr1arvqeP6ChnnJKTveDvf3bJwQeVLfDnha1A1wH0acM7ynlnnmzqCgZD4nK5TDD9jz+3yw33dZbVr82WboNnyJmnnyCNat9kjtu5a4+UyfDJy29+LI+qwPrj0/rK9z//Kq17jpdXlowVh8ORNE8C6AlSxhNAd3+xSrwPDxQ1khJ4aIiEzro4wVak/nTn9j/F9fnb4tR501VwPG8JVblQsqtWk9C5V4hnyRTRGyyazRV7TpfscoemvnGFXMG54y/x9WtsUtOQZiN/KM+Kx8TzTM7q6ezyR0nggd6SnVFWPI+OEdfab83Ps869XIJ12plxTaQU14OQDp7rfPx6g9ms406VQEeVtiVJG3/qlw++oS0NS2a/WSod0zGJEHEuAmkRIEiQFmYugkBUAsU1N0bVOA5CwEYCzI02Gmy6ankB5kbLD1GpbmBxBdDfeT8sH3yS/gB6tUudctlF+wfQf173m1rh3UcOKPNfVoLdezJlyYz+UuXU4+X8G5vLcwuGS+UKh+91PxT086IC6I9P6yfHHX2UqUsHxPVKcx1Izwpny3c//SJfvT5XWnUfKzddfbFZrZ637MkMSPW72srTc4bIs2rFum5nl1b3JfU+JYCeIGesAXTXmm/EO76rOEIBs/Gg3oCwpBWdokWvTHeqVC+u/32iNgn179UFveLc332KyqF9rCW65nnjafEsnWbak9lvtiXaZIVG6GCyZ+5wcf2icp6rErq6pgTvbm6+iogUzzvPi/upmeLI3KN+7pVQzSYSvO6euJtfHA9Ce608V3nKA+2GJS14nuukNq31qE1rs06oIv5uE+P24UQE0iVAkCBd0lwHgaIFimNuLLpVHIGA/QSYG+035vTYugLMjdYdGzu0rLgC6FazHTtjqZQ76ABpXv+/uOX8ZS/Lxt//VGlb6st1tTvJhMFt5Wy1IlyXtes3mWD6LfW75/vzdn0nm5QsNa6/zBxf76HBKuXKjSYnul6Bvnh6Pzm28lGyddsOs+J82ayBctJxlWTT5q1yY90uJoDeY9hMOfXEo3PTyPy+5S8po9LIHFyurFmdftrJx5qUL0N7NJMzTzs+qaQE0BPkjCWA7tyyUXS6B52HOXDjvRK6q3mCV7fG6W61CalDBdN1qhTdN3+nsZJ1yjnWaNy/rcgY2kqlJ/lZBYhbSPCG2pZqW3E0xrNisVp1nvMywaRpadxdss44P9+mOHZsE89i9WWB+gLBHH/0iRJo0FnCaiV3rCXdD0Jm5fnYjiYff/iks8TfVgfPU5NSKKNfI9F/xwN120mo+u2x0nA8AmkVIEiQVm4uhkChAumeGxkOBBDIX4C5kTsDAesIMDdaZyzs2BIC6CoLQVZYrru3k8wZ392kb4kUncal7oOD5I0nJ8iwSYskEAjKwC5NVNqU9dKs82jz85FqA8/8fj5kwqNS4YhDpXWTu+TXjX9Izca9ZUj3ZvsF0H9cu0EadxghbywbL263WyY+8oQ88tgL8unLM+XN976Qh1U+9AWTeplNQu9tMcDkOtepX/SfDVH5190q9cuKx0Yl/dYlgJ4gabQBdMeunSZ47tzym4QuuEoCzfokeGVrnu7YukmlAfnvL5dVWulUq6wzRrTOyTmvNnsMH3qEVZqW1nbo+88zb6S41uSkZgldcr0E72sT1Uaa7v99LO6FY8W5Xe1soc+9Sq1Yv6tpTKu50/kglDd4nnXy2RLQwfMkboi678A5f/5aMsZ0NAF6/wB1jyWY7iatNwYXs50AQQLbDTkdtrBAOudGCzPQNASKXYC5sdiHgAYgkCvA3MjNUJwCBNBF3vnwKxkxZZHZuHPfcvcDfaXdA7XkgnNOlV7DZ8mnX/4ghx5ykPRq10CuuOgs+Wfn7nx//sOaDdJ18HQpf0g5qaRWquuc6Tdfc7Hcdt2le61A19frOWyWfLz6Ozn80HJqA9N7ZOq85RIIBk36mImPPCnLnn/TbGiqNw2NbGoaDGXJVXe3k3tvv0Y6NI8/c0JB9x4B9AT/VkYbQPeNaq/ySX8jWXoVrNpYk5J+Ae/iyeJ+61kJnVdNAi37p78BxXxFzxtPqVQ2000rsg88WK0i72Rym8dSdC559wuPiueVZeY0vXo9VKe1hKpWj6qadD0I6dz3esNQk/P81HMl0HpISoPnkc57Fo4Tz7svSdY5l4lfvZWlIGBVAYIEVh0Z2mVHgXTNjXa0pc8IxCLA3BiLljWPdW5abxatZR91tPjbj5LsAw60ZkNpVZECzI1FEnFACgUIoKcQN8VV12jYUyYOaiMnHV856VcigJ4gaZEBdPVJgXfmQHF/8a6Ej6ikcoNPleyyTOQJssd1usO/WzJ6N8hJodNmmITOvCiuekraSY6tv4t3zgi16vx/puk6uBto2FXdhwfF3RWdDsc7f5RKi7Mmp84zLpCgCsiHDzuy0DrT8SCkU87oF1ZO9TVE1ikqeK5znru9cfc1lhP1/gAZ/dWmtf9sF3+L/pKlNtelIGBFAYIEVhwV2mRXgXTMjXa1pd8IxCLA3BiLljWP9U3rK66vPjCNC9R+UELX3m3NhtKqIgWYG4sk4oAUChBATyFuCqtevmKVyX8+e1y3lFyFAHqCrEUF0D1PPyKelUtUsLKcZPacptKb5OwoSykeAdfHr4lPBZN1Chf/oHlpC6wWT29F3G8/J54n1SaggUyzAiN4X1sJXXxtcpoTDovnreXifmae6JXpOj1O6PZGapNR9aDqdOV7jVQ/CBVn8DzSYdenb4nvEbXi/aBDJHPw/JhS3CRnYKgFgaIFCBIUbcQRCKRLINVzY7r6wXUQKOkCzI0lewT1YiHf6A65ndCL1zIHzS/ZnbJx65kbbTz4Fug6AXQLDEKMTWioFk7u3LVHJgxqqzYiLXxhZ4xV5x5OAD1euX/PKyyA7n7/FfEuUJ+OebwqbcsECR97SoJX4/RkCPjGdRHXj6sldHNdCdRsmowqLVeH868/xDNvlOmnLlmnV5VAk56SXe7QpLfVuW2LuB+bIHozWV3CFY+VoFrhnnX86ftdK5UPQnsFz9WGqAG1o3O6Vp7v21HfpO7i+vYzCVW7TQL1/3uQTzo+FSIQpwBBgjjhOA2BFAikcm5MQXOpEoFSK8DcWLKHNmPYg+L89ScJqt/xXO+tEKf6KtXfaaz6IvWckt0xm7aeudGmA2+RbhNAt8hAWKwZBNATHJCCAujubz4W7+Repna/yr+cddYlCV6J05Ml4NyyUTL6NTLVZerNHlWOvNJU3OqB0bN0Ws6qcLWhZeielhJUlxkzLgAAIABJREFUgdxUF/fnq8St8szr/OO6BK+sIaG7mu21QWkyH4Qc6iWB8/f14ty4VmTPbnF9/EbOJr0qNY9O0VOcRb9U8A18wIyB3vNA731AQcBKAgQJrDQatMXuAsmcG+1uSf8RSESAuTERveI91/3ZO+KdNcjs85Q5ZIG4X1N7Pz03X0IXXiOBB3J+J6eULAHmxpI1XqWttQTQS9uIJqc/BNATdMwvgK7zQvtGtzdpM4L12ptAIsVaAp7n5onnxUUmR7a/0xhrNS7O1ji3bxX3o2NFv7zRJaw3rG3SXaUNqhBnjbGf5lCBbPfyWeJ5+3lzsk5jEqzVUkKXXG/+PZ4HIYfKZa43BNL/c6hgucMEzX8xf7/2LfpFlX5hZYXifvMZ8S6ZYl7Q6Bc1FASsJECQwEqjQVvsLhDP3Gh3M/qPQCoEmBtToZqeOvXiKL1IKlBHpau86g7Rvxdl9KxjLr5n9BMmsE4pWQLMjSVrvEpbawmgl7YRTU5/CKAn6LhvAF2vPPWOaGNW4QZuvFetwG2e4BU4PVUCGX0amo0mA417qADvdam6TFrq1emCPMumit7EUpfgPa1ULvJaabl2fhdxrf1W3AvHiksFunXJ0ilV6neU8iccJzv2BCUQDO93ml497lBBcpf6n2xco1aX/6r+XQXKg4F8+6FTI2Wr4HS4wnGSXTHnfyGLbdrpG9VOtEXw1voSvL1xsY0HF0ZgXwGCBNwTCFhHgCCBdcaClthbgLmxZI6/+61nxau+gt0357lvRn9xrX5Pgur38aD6vZxSsgSYG0vWeJW21hJAL20jmpz+EEBP0DFvAF2na/ANf0icmzdI6LwrJNCiv4jDkeAVOD1VAq4fVotvfJecDV7VBjN6k82SVhw7/xbP3JH/rTpXefYDzXqrB8jKluiK9+Ul4nphgQmC64C39877JfPsyyX0268m/Ypj4zq1snydONTfmQID5WUPMkHycIVjRFR+9Wz1z1nqn9O5sj5eTP0CwTe0pTk9s98stRpd9YGCgAUECBJYYBBoAgL/ChAk4FZAwBoCzI3WGIdYWqF//87oXU8cu3aKv3k/yTr/ytzTXf/7SHxT1O9Fhx2p0ros5PfyWGAtcCxzowUGwcZNIIBu48EvpOsE0BO8L/IG0H2TeqiNAz81+Y513mOK9QW8s4eJ+5M3zAppf7uR1m9wnha6P35dPEsmmwdGXYJ3NJbgLfUt1wfnn5vEozYZ1ZtqFlbChxyuVpEfqwLlx4uoALnejDRcQQXMVRqYklw8z6p0QS+pdEEnVBF/t4kluSu0vRgE9J4GoctvTvqVCRIknZQKEYhbgCBB3HSciEBSBZgbk8qZlspy03KecIZ6zp603zUjXxz7246QrCoXpKVNXCQ5AsyNyXGklvgECKDH51bazyKAnuAIRwLo3vmjxf3BSpPv2N91slrVXPJWMydIUSJP16l2fINbiF7JHTrrYgm0Hloi+hG533RjdaA50LSXhI8+ydJtd3/0unifnGE2FdUrscM67YoKkOf8v1pRrjY8La0lNy9j3XYSqn57ae0m/UqygPvD18Q7b4TZ/EpvgpXMQpAgmZrUhUBiAgQJEvPjbASSJcDcmCzJ9NRjfo/rc7/5itXfdYJknXjmfhfWX8O6lz+S83V4ywHpaRhXSYoAc2NSGKkkTgEC6HHClfLTCKAnOMA6gO5ZsVg8z8zO2fW7x5QSkVoiwW6XqtN17m3vmE4mb72VNqEsCNn78EBxf7HK/HHwhtoSvLtFiRkPuz4IOX/6SjLGdpJsb4b4B86T8CHlS8yY0dDiEXBu+FkyhrYyF9f3i1+nmfL4ktYYggRJo6QiBBIWsOvcmDAcFSCQZAHmxiSDprg6z6IJ4ln1QqHBccc/26VMt9qmJZkjFkv4YJ7BUzwsSaueuTFplFQUhwAB9By0iY88KX/+9bcM7tY0DsXSdwoB9ATH9I+VL4n3kaEmv7O/ywQJqxzUlJInoNOM6A0f9UOW2fDyITWmbrelOuII+sU7tY+4vv/CrOL2dxil7rdTLdXGohpj5wch76Lx4l71omSdc5n4HxxUFBV/bmMBx65/xDeslTj/+iNXIXjb/RKs0TBpKgQJkkZJRQgkLGDnuTFhPCpAIIkCzI1JxExxVc7Nv0rGoGbmKpn9Hil0nyHv7CEqZedbkuxnqRR30fbVMzfa/hYoVgAC6Dn8BND3vg0JoCf413L7vdVMDf7WQ8zqZUrJFdAr0X2j2pt0LlmnVxV/+1GW6Yxjz27xTu4prrXfmPRA/k7jJFzpBMu0L9qG2PlByLF7p/gGNlVfOmwzGwyHqub8t4OCwL4CvnFdxPXjapMSLFivQ85mx271knaQ+nrh0COSAkaQICmMVIJAUgTsPDcmBZBKEEiSAHNjkiDTUI1vRn9xrX5PglfWUM9K7Qu9on6m0s9W+ou+zOGL09A6LpEMAebGZChSR7wCBNBz5AoLoP+wZoMMHDtPtv2tFn95PdKuWS255vKq8tvvf0qPoTPNyvVwOCz31LhKmtevUeDP4x2j4jiPAHqC6tsaXC+hWi0lWL1GgjVxuhUEnFs2qnQuHXPSuZx2rsmJnsy0CfH00bFzh/gmdhXnhjXmwS/QaayEj6gcT1XFfo7dH4Tcn70j3lmDJFzuUPH3nyPZB7BXQrHflBZrgOeJh8Xz2hNmTwB/r2kSPvJokwdd50PXedB1PvRkFIIEyVCkDgSSI2D3uTE5itSCQOICzI2JG6ajBufPX0uG+n1N/46WOWyRSaNaVMkY0EScmzeYr0D116AU6wswN1p/jEpzC4srgB7+Y6OEt/yedlrnkZXEeUSF/a5bUAA9HM6Wmo17SfMGNeSOG6+Q73/+VRq0GSqvLB4jU+Y+LYcfdrC0aniH7Ny1R/qMnG1SwOi68vv5QQcekPb+xntBAujxyv173qZfNqtJu1yCtXC6lQT2WolezEF05/at4p3QxTzwhY+oJIEOoyV82JFW4oqpLTwIifim9RXXVx9IsNqtEqzfMSY/Di7dAu5P31IpwYaYTub9qsn591bx9W347yZZE9UmWVUShiBIkDAhFSCQNAHmxqRRUhECCQkwNybEl7aTfaPbiWvNtya1nU7LEk3xvPGUeJZOl1CViyTQdlg0p3BMMQswNxbzANj88sUVQM9cPFMyn1qQdv2MOs0l4+5G+123oAD6+t/+kNrqq/oPnp8mDofDnFdfZeV4oN5t8oMKpr/3ydfS9aG6cuapx4vTmfPnMxY8m+/P097ZBC5IAD0BPH2q3kSUUvoE9sqJfkIVCXQclfaV6I6tKi+7+txQ50EOVzxW/B3HSvZBh5RobB6ERJzbtqhULg+Iw79H/F0nqWDoGSV6TGl8cgScG9eKb0RbFST3S/CW+hK8o/FeFXteXCie5+ZL+OgTJbPXDFFPKgldmCBBQnycjEBSBZgbk8pJZQjELcDcGDdd2k50qa85feprTr3qPHPIo+aLvWiKToeZ0fUecWQFZc9QtWq9BC9Iiqa/peEY5sbSMIoltw/FFUAPvP68BN5+Oe1w3mtriLf6Tftdt6AA+upvfpaug6bLSrXiPFIe6jlerr3ifKl5czWZt+Qlef7V92Xb9n+k0b03ywN11eLBUFa+P097ZxO4IAH0BPD0qQTQEwS08OnFGUR3bvpFBc87m3zsOte5v/O4UpHugwehnBve/eYz4l0yxaTn8Pd92OS3pthXwLFnl/iGtDAvy8z+C+1G7hcgd4QC4uuvPj9WxwQadJLQFbckBEaQICE+TkYgqQLMjUnlpDIE4hZgboybLj0nhrOkTL9G4ti6WQJ120soxhSq3vmjxf3BSgneVEeCdz6QnjaX4qvo51f38wvEsetvCTTukfSeMjcmnZQKYxAorgB6DE1My6EFBdA3bNoitZr122sFer2HBpuULjoPeqSsWb9JmnQYIVOGdZCzT/9vD7+Cfp6WTiVwEQLoCeDpUwmgJwho8dN1OhfvmE4mJ3r4pLPE31blRPelNkeTc/2PKud5N9GbTmadeKb6zHC4ZGdEt7rC4pzCg9B/I5T7+ek1d0vw3getPnQlvn2e5bNFVLqt4PW1LdcX36Qe4vr2UwkfXlHlPZ8u2WXK5tvGvXLoqw1FE/lvEUECy90GNMjGAsyNNh58um4pgZI6N+qFGdlqk/HsIyqahTeltXjefl48j080m6xnDpgbczdd674T38i2ZvX6ntFPxHw+J/wn4H7rWfVl5DwVPP/H/DDQqJuELr0hqUTMjUnlpLIYBQig54AVFEDPzs6Wu5r2lWYqZUuNGy6T/32/Tpp1HiUrHh8tg8cvkJo3VZMrLzlbMv0Buad5fxnRu4Vafb4i35+fdVrJmbcIoMf4F2nfwwmgJwhYAk7fKye6TufSXgW0UxREd/34pXin9jHpPbLOOF8CrQZKtjejBChF10QehP5zcm5aLxmDcla/BBp3l9Al10eHyFExC7g/eUO8s3PyXYauqimBOm1iriNVJ+jAvuflxSpFlFcCPaZJVqXjCr2U3jRLb54VuPFeCd3VPO5mldQgQdwd5kQELCzA3GjhwaFpthIoiXOjDmR6F0/ea5z0fknZKsicfURlya6g/v/IY9RXj5XUfkqVS+x46t+NMvrcb77O9bccIFnnXRFXXzIGqy/+VNq8QLM+ErrgqrjqsPNJrm8/E8+yqaJ/j8lb9Iau/t7T1cuNY5LGw9yYNEoqikOAAHoOmg6gz1z4XG4ec/2zc6ucLAun9Jaf1/0mA8bOl21//yM+r0c6t7pXLr/wLPnq2zUycNx82b5jp7icTrXJ6OXSusldBf48juEptlMIoCdITwA9QcAScnreleg6Z3Wg7cikrwp3f/OxeGYMMBsFhs6rJoGW/UuITvTN5EFobyv328+J9/FJJoWLv/skld/6pOgxOTIqAecvP4hPBZ11CpRICZ1/lQSa94nq/FQe5P5ilXgfHmguoduj21VUcW5YIxlDW5rD9gxZINnlKxZ1Sr5/nsoggX4R6Ni4Tr2suCOutnFSdAJ61Zd3prp/stX9o4IJ2WUPjO5EjrKcAHOj5YaEBtlUIJVzYypInb//Kr6hrcwzTtYp54iofVScv601v0sUVPTq7fARlSS7onphf2Rls5o7+wj1s0PKp6KJSavT88Kj4lHpQrLUYiZ/t4lx1+tZ9YJ4Fk2QrFPPVftL/Ze7N+4KbXKi848N4nniYXF99YHpsV7FH6rZRILVbpPIYpBwZbVPT5+HkybC3Jg0SiqKQ4AAehxoNjiFAHqCg0wAPUHAEnS6c8tGlc5Frf5U6Vyyjj9NAipPcUGpFmLtlvuzt8Q7a4g5LXTxtRJo0jPWKkrE8TwI7T9M3vmjVD7GV9QvLoeLv89MFQQ7qESMZUlopPPvreId9qD6O7vN5LvMPvls8cwcZDbq1L9oBh4crP4OpzYlU0FOeuWOb0RrcQQyJXhDbQne3SJqUs+i8eJZ9aJknXu5+NVXKvGUVAUJnNv/VPnccwL8mUP15l7F4xuPSUk7x/PMHPGseNw0O1jzAQneXKekdYH2/ivA3MitgIA1BFI1N6aqd77BzcS18RcJ3qo2H7+9ce5lnNu3ikMFPB2/rxeHet7Q/+za/KvJHV5Q0V/CZVc83qSTy1aB9ewKx5j/D6ufFXcqSb3qPKNXPfNiwN9tkgqinxE3qVnJ3u1e8/yVOWBOUldMx90oC5+oU4p6np0n7reeyW1l8Nq7JVSj0V7P0L5R7cS19tukfunJ3GjhG8MGTSOAboNBjqOLBNDjQMt7CgH0BAFL2OlmJfrYzqIDc+FjTxV/h9EJB+Dc760Q76Njc4IgV9aQYL32JUwl+ubyILS/lf5lwDe6vTh//SlnA8n2o6IH5cgCBXSQ3De6Q47rOZeJ/8FB5lj9cO+d0itnj4FKx0uw/UgJlzssrZIO/261YkwF9tVLuazTzjP/HYmlmF8k9WfM6pdAf4cxqo5zYzndHJuqIEHG8IdE7+Ng/numfpnXv9RTki/g2LlD3QMNzD2gS7jcoZI5cmnyL0SNaRFgbkwLMxdBoEiBVM2NRV44jgP0amDPa0+YRT3+7lOirkHP0c4/N4pjswqwq+C6UwfW9T//O5/kV1Hw5rrqRW3TqK+R7AN13nOd/zxUVX2h2yLxL3Q9j6n63nledCA4WJt9iAoaL51b3/P8/Nw85yGVNidUq6V5ybJvcWz9XXR6HPNsqjYSzDr70oRvA+bGhAmpIAEBAugJ4JXiUwmgJzi4BNATBCyBp+sHBN8YFZhTqzt0yg39+V/2AfF9Ou954ynxLJ1uFGJdhVoC6dhEtIBBc27bogKqLc0Dqh3ug3Tcu96Zg8X9+dtq5dSx5hfLbN9/G/GafQ0mdjcrsfTK/4B6aRFWK63SVbxTe4v7649yvjro/bD6DLZczJfWvzTrX571SwB/31kxn5+KIEHka4rssuXUvbzDmGcOfYzUIjGPTtEnRD6XDl10rTj+3GReDAUadpHQZTcVfTJHWE6AIIHlhoQG2VQgFXNjKihd338hvgld1f4pKu90v1n5BjRjvW5Rq9b1/KLnmXQXpwruZwxoYi6bOWi+ST+TaHH+ptLhqa/lwuorRP/IZWYfGsp/AvvmOdfPmqH72pi0N4UV96fqi+pHhpjfi/VXtWG1sW0ihbkxET3OTVSAAHqigqXzfALoCY4rAfQEAUvo6Trw5hujVg2bIPqJahXo2JiDRJFcfprALis1eRAq+IbXeaP1L0MSDpvVNXqVDSU+AZ0jU//90vkZ/T2nid5Ma9+iV3H7JnYTnVNc/wIVbDNcsk6sEt8FYzhLr+TxvLBQsl0e8feaKuFK8e067giFxKc2odWr2AN12sacbzzZQYJIPn/zC64y1/6uL9+X4PW1JFirVQxCHFqUQO7qc/35+cB54lD3sE/lQtd5bDP7zxFxOIqqgj+3mABzo8UGhObYViDZc2MqIB27dubM/yqlZKB+RwlVuzUVl8mtU6eZ9MwZLo6sLAlVuUiCrdSeG2kMOPtm9BfX6vckVP12CdRtl7S+ZoxsK85136mXAl3Vy+cbk1ZvSa7I5DlfNkNcX39oupF90CEqz3lTCeqX82ojwGiK/qpaf11tvozoOinq8/Krm7kxGnGOSZUAAfRUyZbsegmgJzh+BNATBCzBpzvVqj+TE12nc4kxiK5XnevV57oE1Bv90NU1S7BE9E3nQahwK8/r6ouEZdMl25sh/h4quKpWT1ut6E8zXS8ulKzb7jfttFpxf/aO2k8gJ12Lv6vaJOrEMwtsok6l4p3eT1zfr875u6g27tUb+Kaq6ICyT13PtK1pT8lSq4cTKXoVu17Nrld8Zw6cH9NLvGQGCVw/f202ajWG/66Cdm76RTIGNTM/yxz+uFltT0mOgPvpWeJduVT06vOAuo90yejXyLxM8bceIllnXZKcC1FL2gSYG9NGzYUQKFQgmXNjqqgjAeW86elSda1Iva4fV6vnjb4mPYfZB0otOkjHnj2uNd+qdHztzEr7zGGLzMKIZBX3+6+Id8Eo9Zx4Rk6g18bFsWeXuJ9Ti0/+/d1UUwSvv0dCt6pn/Rj3CtK55X16/yH15UDw1gZqkVijuGWZG+Om48QkCBBATwJiKayCAHqCg0oAPUHAEn76XkF0tZLU32lckUEs74Ix4n7/ZdNzu31yz4NQ0Te8d/YwcX/yhvlE1d9bBdMttAmjzhvundxTXGrFTviYk8XfbkRSf5kpWqfwI3S+c51PXueVDzTsplYU3RBVld65w8X90es5fyfjWM0dzUV0jlHfcLVpqPrlU78w0y/OklF8k7qL/tQ2eM1dErz3oairTFaQQH+F4xvWShz/bJdgtdskWL9Dbhu880aI+8PXJHj5zRK8v3PUbePAggXybqSWOXCuhI882hwc+QJAb47r75Szpwal5AgwN5acsaKlpVvA989WOWDd17Lt7Kss2VHPuy+JZ+E4s++Fv+8jcaWAi7djrt/WiWdKj5yvb4+orPbsGSnZ5Y+Kt7qoztPBcx1ED96h9lS5Jbl7quhnxYzuajNRFTzO7KvSjcT5RWBUHbHwQbHkOY+2G5EUOfp4/Uyin03iKcyN8ahxTrIECKAnS7J01UMAPcHxJICeIGApON0E0ceqlejqgTKr0nES6KjSuRSwQsI7a4joTyF1CTTrLaELri4FAtF3gQehoq3M5pcj1GelG9eaDXj0RjxWKDpA6p3QRVwbf8ltjv7Fyd92uEodkb784QVZOHZsUwFqtYHl9j/Nqpmg2uQoluJ5aqZ4XllmTkn2Zllm01AVPNerccInnSWZXcbH0rRCj3X+/qtkDMzZ2Eun74g2l3uyAug+9Qm0fqGSdfzpKtf85L3aqveLKKM2OzVtG6DaZoH7JGnwxVRRfqvPdVP0fzcyetY1+yhk9lRfr6hNriklR4C5seSMFS0t3QJlVCo9h8ovHmjRV6XSq26pzpr9Wwar/XLUf+/9bUdIVpUL0t4+/dWtZ1IP9Sy4LidNnt6IXe0HlYri+uJd8T08IOdlwRCVlk+tQk920V996q8/g9VrSLBu+2RXb+n64s1zHm2ndGDeu2SK+gKxvMqHrl72lI19vzDmxmi1OS4VAgTQU6Fa8uskgJ7gGBJATxCwlJyedyV6VkUVRFdv2/cNonsn9xL3Nx+bHgdaD5XQWReXkt5H3w0ehKKzMjvZD1E72WfusUR+fMdff5j87DpFhA7MBVT+S8+jY8zKZ/1AHHhomPkEtjiLb0Rrcf3yg4TOvEh9Wjwsrqa433pWvItzgsChS66XQOPucdWz70k6bYtO3xI+WP0S0Ut9VaB+GUxm8SyZKp43l0vWaeep/RhGR1V1MgLoka9pzC/RfR42/du36F+e9C9RoapXqoBETvoaSnwCBa0+j9TmeW6eeF5cpF7MXqVe0PaJ7yKcVSwCzI3Fws5FEdhLwPPqMvE8OTP3Z1ZLiZWh02KoL+2CV98pwftaF9voOfbo9Hd9Re/dozcL1/v2pCKYn9G/ieic3MEGnSR4xS0p6W9kg1Kz6fmopZZMTZjsjps852oTetdXH5iqdSqe0J0PmK8Ik118U/uYfOrxPpszNyZ7RKgvFgEC6LFo2edYAugJjjUB9AQBS9Hpe20sqjZzM+lcVKBMp2zwqeC5U+UJ1pvu6LQX4ZPPLkU9j74rPAhFb6WD077JPdSTbba6Z0ZK1hnnR39yEo/UD9recV1ycv2r1dP+tsPML0wSzhLvvFHi/vh1yXZ7zaZS+gG5OIp35mBxf/62SWehN+ZMJO2N3qhKfyniyApK1ulVJfDgoIR+oXK/tEi8z85Tm4a61Iu18Sl50aA3FMvod7/oFDt+1V6dF7WokmgA3f3O8+J9bKLZHMrfWeeaz/8Fiv4yIKN3A3GEArb+RLqo8Yjmz3VgRwd4si66TuXQV/9t2Kfor0QyeqlV6Gqjtz2DVU788hWjqZZjLCDA3GiBQaAJthbQX/3pr9j0Bt2Rku12S0A/f8WZfiKZoJ7ls8Xz8mKzWbS/z0z13OVJZvUx16Wd9Mai+tlLPwcEGnWX0MWJ7euStxHut9UzxuMTzXNdZv9H1DVcMbcx2hMyxncW5w9fSqBeewldWSPa00rccTl5zuerlG/PmucEvZl96No7c/KcZ6jn+hQU/VzqU19JOtWzYKD2g+p6d8d0FebGmLg4OMkCBNCTDFpKqiOAnuBAEkBPELCUna5X6HrHdsoJNqqH3MCDg8WrHjCd63/IWaWhH8SLeaVucZLzIBSbvvflJeJerj57VA+2/l4zTF70dBaTU3yi+pxZBWhDVdTKbhU837d4npkjnhWPmx+nKn94YX32rFgsnmdmq02OyprV3eHDEw8a6pddvim9zBcAieR6d33ziXoJkrPJY1D9YhZM4S9mkdXzuv+ZgxcUeZskEkB3rVWbeo1qlzPmddtJqPrthV4v8ou/fgmkXwZRYhfQwfEy3WqbE/PmPt/v7+Oi8eJZ9WKxr1CMvYf2PoO50d7jT++LX0CnQtMp0cJ3N5MD72koO2aMNntL6IUvgQ5jivXZ3fnTV5Khfrcw//3vrb72OvrE4gf7twV7pb9LUp5yvfAoo29Ds7dKtIsCEgHR+w7p/YfClU+UTPU1XWks7k/fFI9a9KAD2rqEzrtCQirVYTKemYvy0l8q+Mbl7IOTqZ/T1R5K0RbmxmilOC4VAgTQU6Fa8uskgJ7gGBJATxCwFJ6u02/4xnQwOdEjJbtsOfF3HG0ezuxceBCKffS9MweqFT6rJFzxWPH3UKurvRmxVxLHGa41/zMbhuogss4DqvOBFlQ8aqWQR60U0iV4Q20J3t0ijivGfopOi6LTo+ji76h+wT313NgrKeAM/Yu0d2I3k1M9u3yFnDyfMbzAcGzdJL6hrcSpPnUOXXaT2TA41cU3uJnJUR+8p6UEr7un0MvFG0A3ueaHtjSriUKX3qBWnXUrslt61ZOvdz1j4deBiNOSN05FXryUHKA/t/a89oSELrpWAk1zXsrkV3I/R1e5YjNHLJbsA2LPOVpKyEpUN5gbS9Rw0dhSJhBJg6a/sgv3mCjlynrkz7/94n10rLjfWyHhMgdIoPP4YnmGN/PnoGY5+7uoZyv9jGW14lYp5LwqlZwuoavuMIspEimeFxeKR62UTvaeMYW1qUyXWmqxyA6zl4ve06U0lbzpCbMqHS8htYl9Mp+Xo7HyqC8xPeqLTP08rTdsNV+yRlGYG6NA4pCUCRBATxltia6YAHqCw0cAPUHAUnp63o1FdV7ggAru6RXpdi88CMV+B5gUQCPbiHPT+rTlNtbpY7wqMK03qoo2+Ov+YpV4Hx6Y8wuU+ow30KTgIF/sCvufYT63HtlOHIFMCdbvqHI33pqMaveqw2yWNbG7uDb9YnJEBtoMV79YnVbkdXSbvGrMdDBb54zXmzqmo7h+WC2+8V3MLyaZesOtAjYz1m2JN4DuG91Bh0dSAAAgAElEQVRB9MuV8LGnqH5Ni7pbnpVLxfP0LMk67lTzIogSvUBOapb6OWlwBs41n7QXViI594M1m5oNcSnWF2ButP4YRdNC529riiXIGk3bOCZ/Adc3n5p0eWbe7PeIeI6skBtAN3PlnGEqVd0b5hlApyvTixnSWXRKOfdnb5mAp14oYNWy1zNg1WomL3o8xcx3Ou2bev7MVM8KYfXMkI4S+VIudOmNamFA13RcMi3X0F+I6i9FdQne00otrqiVluvmdxHfmI7iUl94xvI7AnNjsQ0XF1YCBNC5DfITIICe4H1BAD1BwFJ8ul6J7p0zQq0W/D971wHmVLV1d27KDEVEUawoKh2UZi8IIqLSFSuKKAqCBfXZ27M+2/8sCCqK1GeXjlgoKmJBpUoVVDo27MDMJLn51zqXQGaYmbSb5CY5+/vmG8ot5+xzk3PuOmuvhYU5dtx1iOiFUGJPgfHLRimAeZQyFU3xAphsd7LeGYFTu4FJdG3MjVas9SF3CRlTwfrNYS76YEp0FWmmyHwYv/8Sdxtj7syOA9kX9ol9I/u/pP99Uc2ywprsqvKE5po194n3tgkfX/DCv4U67txQ4MZCRZEIgO57bbBV0p5Av1z+Eim4+xJLB7P/v1E+fHLCfcy3E71vvwD2+biYjW3D5f4cp+3/Ny7f0pWV/dVzY/YNGzdxjbWrIdG3UlyQ6XN/v9yaN9ti3gTDU4fzM+D65y9h5Zaal7DpT2CvvLlxpxE4fI2Kb30mbf4Snrkz4DXzmCVRB3A/nWuJREav1BrwsCYgHTwcdxVUeJ0RaAUz7KvSZ4bNd7Yqd0MLHJ4+ykwUOc/2iJTXKelzO9YQ7TPaJa7ZCx68Sr0jlPSBZv5xp0dtj54bo6ZIH5DCDGgAPYXJzeJLawA9ycHTAHqSCdSn51UG9EIo8eH2LPlSfEPvUhcovum/KTG18nw+XXxjHlf38He8UPzd+8bdYDLlfYNvU+XG5oGHQff/EWEVhp0RZrHQ5LN4kNXeVEdYSof3Kel9K5j5Hcq9pXf6W8KXllSOU2V9pXRMlbt7q0Oo5VmRbFS8ALrniw/EN/qJHf2CDAs2SOKNsNSPuX8dmIJZjCgdlWcgVu3zslcphBmesW6VlFz6LwmceKZOs8MzoOdGa4C4WezGPOT5cgYmuiIJ7VdHgqzew+8QpAfMfQ9ANV+dtI8mzbSN9d+J64cV4l6zQlzwB2GlUUVhh4xF2juZhzcMA+OB1m2l5EprfVXR3FgASTf3igUS2ru2FN8MJvpe+6Y0Y5zLC+8H2AgmNoFkAsrZEGoNCEY/wVKy9UuuezTmXPHzX3hvH2v9Ai+XdOhzR+bU9+yd4ln2lfjPHyj+dj2yId0VttG7ww+FBziJtBAm6dBbQBFMolTU6bkxqx/DrG+8BtCzfghT0gENoCeZVg2gJ5lAfXpeZUAvhJIbbu87Y8U7dQwYwNWx8CQbyT5gmsxiMn8Y/vMGiP+0cxJurPHXb+J9Gi+bkD4hY6rkBuj/2yRh5Bv5CMCVWZaOIsy0QtAmTUuEQuJ9fbAQBFY5KscsSxklQUJFcGwJchhIIofJ9Mk7/iXxTn9TgvWORMn5k+VeKh4AnUBswePXiSsYTK5fZlCq4OWYTC8N7MY2wr63nhfPrPESPLa9FF9+e2wn4SjP1x/DFO0h9blTmxUuV8zn6gPTn4F8nhtZUeSGiZ977kwFTkeLkNstIZglE1zn8x3a/xDrN0H2SmSrol03/P+uLT+Je+23ImsBluM3v//ImCwV+DwFAQ6GDmko5mGNIDPRUELwuCEb3Ye5jxU3/pPOEv8llvGjDudlwAOzZR9ARrPG3lJ838ida4mK5kaOqQ/zO020zX0PspjoNjxv5WbGNKXw0WuwabNagmDpFoOtm01Rag0I1r5/0BMSPPDQqF0ogAygG3KAmariYPUeq/jod1P0wOio7XXkAVin0RDVs2C2ZYA78CEh2cRJ4XvlafHMeUeZ4RbfNgSsf2+FzcvnudFJY5avbdEAer6OfOX91gB6ks+FBtCTTKA+Pa8yoBdCyQ93wXP3iPubL+LWoK7szmHDJh5jF7DpgmGk73m0FaCyMuDCIt4EoJtMUMaCchbUKi2+87mozJVk7lXRuZEM80iZFILChZTZ2fZPVKPHVLQr8pqu4m1SeM9lQvYyzV9pAls2YgXQeQ0ll4OKgmgGlrH0y/PVLEhboSoBGytFj7wWyyl5ewxBiMLbLlD958t8PCa2PIfl6HwuiyGlFDzy+LzNYzZ0PN/mRrJq3V/PFuPrWWB8fl1qiAJHtxOzdRtIP+whAjDI+GWTyM8bxbV5jbh/2qCe6YqCcwMrn0K1DwSwfih+H2Sx2A+qW+4p/F5zKZAcP2CXG2shx4Lv8LJh1gLzvW4DCQEoN+GDQV3migy9DWj8Fjx9m/Is0CC6Mz99Shbvwf6WzjY2mSPXJpXNjZTRK3jqX2pThWaMJWCip0LqgwaaXJeR7V50z/CUSOGlemSYK98LWAOuXKTaT0m/yirXuDFR8Pj1AH1hgP0fmE2manMiSscL77gQ650tKav0THXeWanKilXmsWTQoxKEMa7Tgp+7AlbJoVrB3+4cMP4HVNjEfJsbnTZW+d4eDaDn+xNQfv8zBqCv2/izTPngU9mw+Vd55M6rxDRDMm/xSjmmRXY5X2sAXX+wdAZiz4BeCMWeq4qOJDha8B8wk1BSThkRyokkE943nxfvh+PVJUr63ikEL+yMsB64uv5V96IM+ZSELh82+uLJxdc/JsHGrRK6jh0necCWJMOHQWCyBMxgyspQE5fgTdE9loRLJsPz+fuQ44F5MV/AH35lt6bECqAXsl8AhCgFQ0kYO6LggStVdUKylQ52tMXJ1/C++Rw+mxOUbin1S+MNz0cTxffG0EorEeK9pj4+NRnIl7nRs/QrMbCJ5p7/iQIvw2EC5AniOQ8c0z4msNC9cY24flovLgLrP67FnyGxwr+XZYpHDBerligHQ0CdRu8ugOXUvi4bZCSHCJKTWQ4TaPOwxnGDpPQhKHjmdgtEh4SSH1JKOpyTgQKwu1ld4O9wnvjP6VeqYdHmRtfWv8X3JMwQlUl4fQCt/4dNffsq4ailX/DE9apNxTc/5UgANJ6R9I16VDyoLmFUtgYseHwQ2P3LxN/tcphfXxzPLWw9Nkwo4VqYa+JsCVcxNiyGkrSyyCKtsPIT319ODX5+6D+gnotrHpZAs2PLbWq+zI1OHad8b5cG0PP9CSi//xkB0D/7eokMvONpORZg+adfLZGlH42STT/+Kj363iN3DbpEup5xUtaMlgbQs2aodEMdkAG9ELJnEIwf1wtfALlg9l80SPxtOid0YQKsBFrVi9qAByR41AkJXSfaSb7Xh4jn40nqsES0JVV/wVah5izN2Vjem+lwr1wIhv29agzCEapaXYrufN4xpsFkxLME3N/tCryQXlQqZdFAAh4cBnCtfoH5X2s/W9LuXjJXCobercxIix4eayv4YEsDHXCRUuzz+0cmVG1BgJJsOtfWf6ToDlRsAOzR4cwM5PLcSGapAdktz1cz8Sz+vXMAKLtCeYrgcR3URp8dwYoZAutuzBmCTWbXpjXYbN6ktNXLCzJjzboAyvETAqtc/dkm02fFRB98h5q3NBPdjtG15xreKaPA7n5FyUdQBq5sxDI3KiPzJ25QRIbg4U0V07eiioR4Wk3WdiFARddvPysQmWByLoR34sviff911ZXy1nA7pVPgl1MM7XNKj2QqjD+3SOHtF6rbb3/i7Ywx4ePpP+d437O3qU0hMw7JnHjukYpjvZ9MFe+rz1iA/70jypWlzMa5kTJg/N43D4guW5SKvOpr2pcBDaDbl8tculJGAPQuve+Qm64+X9qd2FKatu2jAHTGlwtWyMODx8qkkQ9nTY41gJ41Q6Ub6oAMZONCyAFpK7cJ4RcO/mfxLYPxEtc4rqb6XnpIPPM/Vi99qrS2YYu4zo/3YO8MmGuOs5jZNGcikB5L8MWg4FGUeoIxGCmZEsu5qT7G2PA9WIbQu8XLtBqHDDPjy/bX/f0yvOQPUi+jfCmNNHONBhJQZ55686pfYDLZ/XwQfHB/v1T8XS4T/9mXpHqosu76O9nnx6PK5LLEq0y8k0aI973XlAEdjeh0ODMDuTY3UnbFTab5FzNKgdfcjKOev/oBszudwY1Y168A08F85GagWeeIhDam4mlzKRDdoUx0ylWEPJ6sAArjyX15x+6cEz00MHyhXEPaaHNj+Lo0ySz4742QFPoJFXGtMf8/mmzzxDfqMbC1Z0gQmznFtw9N+npOuoAXmtdeaF+rNeDpPcV/bv+dzSu8/wrh57Ok982orOyY8Wb7ht0nnoWfir/HVeI/4/yMt6eyBnDT0Pc0tPlZEQFj25IbUXkIDfdsiXCug/WPUrI5ZcOpcyPJMwY2aF3YoFXVUHh+Dc4vIK2EI4gN2WCbrqpaWEd2ZkAD6Nk5bqludUYA9JZnXCVfvztM3G6jFIDuDwTluLOvlvkfvJTqftt2fQ2g25ZKfaE8yIBTF0LZmvowOEbGSTENNfE7WpCV6nv+3+JePk+xPvwEfbHIS0dESp/ECujRlNP97SKlnclSaacF9XgLnrlNgjSM62ixlpwUykwKkjNlZUAqAwnUxsBjMA2l/ADK21nmbndQ4qDwvzcpPfuih6D1Wr2G3beI+3qUf0iFnm28DbGDfR6+Jzd3qtzSU/11+0Ng9kHLWYfzMpALcyPZ5e55H8EMdDo2x5aXSjJ9GIIAEfJRi98NJrrv2TtVtVIAIDp9RpwSlNbwvDFYDHiWkHmv5Grwo3TeycrPIBPY7hwpbxDqntPE+qLrJdCmS7m3iBVA58muLZuh2z1ISQGxgo+VfImGB58d3/CHlXa1AvdrH5zopRx7HqvPfC8+oEx2wxIpXpi5emHmSpPRYui9OyHCkoH0PijCvOnUMFCpQGNbEkwImpcAgLargiZdfabnRMGDVymfnfIIFZmeGykNZvz6o/LgEILkkA0zflynPIZiDW4cB1BpFTyth5gwwNaRPRnQAHr2jFU6W5oRAP2sXrfKU/dfK43qHVIKQP/wswXyn8GvyPTXnQeSTJ3xudz/31Hy0G1XSse2x+wcIw2gp/Nx1ffK9gxkeiGU7fkrr/2+Z+9QJmw0CqJeZmWhjD2H3KmYv5TPKL4RGonQtk5nEAz3wQiVYIIqfb4WL4xVqpXbBLKVyFoiW7DoDkijVIOpnAODLwBcIDsxFEvu3j4KDC++7dmdmyUVgQSuf/6Sgof7W6ahKWYtE1TyLPsqqolUOvKqJHnAOivp928JNmqZjltWeA/fG5A8+mgSXrhOh/b5bUm3xTf2v+L57D0lfcTyeR3Oy0C2zo3KDHTxF2Lg+SprBhps0FyCx3eUYKuT8l6miTI2Pmy0KhAdDFsybTMZBH+8Y55QZoOVhVmnnmJDK1CdEjdpXi/YmSPfaPT3iw8k0OQYKbnO8jApd03lMaRGNa/8+ucujf7K2qEk5v5vkJInSlQ3W83TD/QVSrj4e92oqu1yNdxrVmAdCmkjVBeSGOH6aZ3agCi59j8SaLrr/TrT/S+8u7cY2CApvg5GnE1aZ7o5u92f8kEKPEcFiTK0BfM8U8arySaHm4z0EWKUNfVNx9yopL+QTyX9RcCcZtU7fkf7fjT3pVH1wZBrOUQEmxjmQTCxxiYY37fcX84QD3xseL1wsFqFa7FUSWYmOxbZcD43Q92rlohr2TyYFC9Qm6LMf2h//BxQV0L4PAQPqGMLYUQD6NnwRKS/jRkB0N+a+pE8+/J46dn5VBk2dorcfu3FsvK79fLOzC/klgEXysU92qc/E5XccdSb78m8RSvlly1/yOUXnq0BdEeNjm5MNmUgHQuhbMqHHW1V7I3/QOIEC31/2+7iv+Caci+rgFHIjRgbvlNSHqrME4u+TISx8XvxDb7dYm1BI9B/3SOq9DQyPB9PFt/rzyqGcvFtQ6zFqY6EMhA2xWKlAUF0RkUAegFLgVeC8Q82WAnyboeua0WNJtO9EGC9emn6z6u7PQMJdTaBk9zL51ubOthk4IsP9Wy5IZWJ4MswdctVTh4YBVbZQUk3w9i8TgoBzpBNWvTI647diEq6o1l8gWybGznveKaOEQ/Yo6XMQPevozTNKdFil655Fg9rqaZTPoTzngWiJ28AnmhePAvniPd/T+7Uo6dHhnl4E3H9/qu41n0rxg8rlJljecHvR2WuSkAd5/AnG0A79tk37H7Ld+Pe4ZVW68XDQA/niLINvqf+pZj8iZjGhs26k2WxJ/pMpPs8gr/KHwDAF8OJFYbeD94U74SXJNDiJCnpf1+6U1Tp/biOL3ga8oFb/wIpoiE2hKDB71ASR6yJoy8B/QnIoC+++yV8Vi1Sit1zI+crY9U3YqyYL8bqpZBh+aGUl1HZ9po1aynjafVTGwD5gYejOgSgeRyeQCRouD+eIp4Fs3denu88QfhXBU48K6bq4VjzmKvH0RzXWIYxYy4rmJ/K9p1r3hDeMU2sS0IHHIaxOxQge524qns0gJ6rT1Ry/coIgM4mEywfN/VjWbfpZyks8MkhB9WWi7q3l1OOOyq5HqXg7BWr10nDI+rIlf96Qs7v2k4D6CnIsb5kfmTA7oVQfmQtei+pfeiDTjjLYkv63K7kOiKDoBw1EsmCYPkgNa3jWfxFb0H8R6jS08G3WW2C9EwJ9EPD7Db3igUK7GeUXPOwBJodG/8N9Bk7M8AXhoJ7L1NMpfDzUR5I4H37BfHOHCc01iu6G5JAaZD7YMk6S9cDx58Bre9b0j5qnqVfqaoMBpnnfPbUpg03mMC4THdw04ibR3axz8PtZx/ZV3/XPuI/q1e6u6XvFyUD2TQ3emdPFc/kkQq8YbAySJWnEzTPwGcmmx4uBaKTiQ6DubKyWqnuBzc9vPx+gTY9wzzwMPFfcacED6pb7q2NdasApsMAFr9dYA27ofdbXhAI4riHjmiK386TfmE1VcEDMOaERFcs64lEAHTmhbJFXGcpeZJTu0rJhdfFNKT0qKAcX2iPmha4X33PmM7L9oOUESuJFNh8KLpjKEyu0z/fVpbDSPmzokdfL+Uhk8nc8zvECwY/N2u48VByzQM5U+FT8OS/wCxeLMFmx0nxNQ+pNNsxN7Lq1lixUAxIV7ohH1g2FNAKsoJJBjMBVvxEssntGm8a1Lo/fVdVGEZKwARat5XgqV0wns7DwOzqe7zXMWCIy40HY/nXaqODBJfIYGWUqhbFZ0BJjYVClt8KCCPGZmjT472Y+a4oaCJt7g9AHfNfaL9DlOGrGvcyoQH0eEcuP47PGICejente9PjGkDPxoHTbXZMBuxYCDmmMw5rCA1BaQwaojkWWMZcHDCo0Vn4JF7qAFiTca6Y52CgOyFY8sxSXvealQq0pJlpCGyPgkevVS+7qdLfdkLf092GsP680suHoai3atVSZeqR+vTFqAgINjk6LU2k2WAhwH1G0b0vR600mPWhS447TqRa1VDS7aMea8FQy1jTfy603k8/T8LmnWRzldwEXdYKwKV4bl6Cdf/adYbUr2dWehoX+4W372Cf3z8yLpZMtPZQOol+AmRgbv+/cdEOV/+/arUhhx5iig/vJjpSm4FsmBtZZk8AlpUjDAJ+/u59la63jtgzUErOJUmT4FjvyiobLwwq6a+gvu/g18GxiycIDJOhTgkO1w8rxcC8zcq38iJS+oWs6kyCwgVP36KAGP8pncV/8aCoXU4UQOeFCf75yKymf8iZFwnZ/ZUF1z4Fj1myWpSVobxMPgWrMTwAFP2nnePIboc9ZJxidq6k5rBmISkigDWa/2pUVeSQT4Ha7IIeOjf7KDdHqZNE5kZWuSrwFYQIMpa5YRkZfBcKNmwhZuOjYSZdLyOEIlbFuD+cqDyewsGKXLNddwkc014RWfIpjM0AvDlmkGRRP9ggKjVmqEI2G7RQ7yYmNhpi8SuiJJaqMKBmPQ1feQ/+QDKrouDGsslqAwLr+HPt07UBbD49h7H2NSMA+iPPvlJh+wIwEr3nxt6xtj+tx5UHoAeClb8Qp7WB+mY6Aw7PgNswxMQucQg/OuzPQPErz4t/6msitWpLtUdHiqAke9t/bpTQH7+JcWg9qXL30+KywawRX9PicdvU/pJi2f70PRJc8EWpC3pO6SiFA++y6Sb6MszAtnsHiLlqqfi6XyoFF/YTw+WSoGmKue472XYPpFSA9Prw775ul6Q1YUUvPSGBWVPEfUwbqXKTxToqL5Z/K/LMC0GpUihyThdDTjnBlXA7g/M+BZB8hzq/4JKB4u20ywC2aNhjEvjoHXHVqClV7x8qrnJYKbHeeP7CkLwx0ZQ/QdbtdT7afHzFbS5++Unxz5goqXr2t93RV8w1q6Sw363iade50i588kVIXnnTlD3h7XpBd0NatUg817HmKp+Pc/TcuOVn2T5msAS/3FV+zu8Ifo9Inr3k2/WMmt8tl20PQfO3aBs+72dgrrM28mwPXL9ozBAJfDhVXdq130FS5fr7xDi8oS23CgHoMlctkyD6wx/OL6G//yx1bRcqFPgd62nbyZZ7xnMR/7Q3pXjsEPUdXu2xEdAuK4h6ugvzYnhujHpwOQcEFnwuRY9b3hUFF/UXb9cKKn4A7my99XIJYRPZ26GHFFxhaUA7MWxd8zmxgxW0Kbh8kWx/AIbqIHZUe35CRlse/Gq2bH/S+p5wH99Oqgy6P6PtSdXNI9dmVfEe4z2sftT3xtAvP0pw6Tzxf/O1mPjZ7Tuo+h5iHHmMeI86RtzNjhbXPvulqvlxXzcExnTJe+PFP/s9NR+owPeU5+QzpODMnuKqc1jc18yKE36Dz9KSryWw+CsJ4nfoz99Lzxv4zLmPbC2e5seJu2kr9Rm0LfDda274QUyw1IPr+HsN/r4G38WWpFRk1Hxzjm231RfKnQxkBEC/4d4hpTJIMG3TT1tkzfrNcnb74+X+my93ZIbLA9B//iM2kxlHdkg3SmcgzRnYq7pX/ikKiD+gAfRUpd7z5C1K248l1S68mJHJYdZrJsHrHwGjoWqqbpv0dd1jnxL3J9ZLvnl4YwncXnqeSPoG+gLiWrtKvA9frTIR+r/XpfpBB8ofP/4unof6oVLhJzFbniSBAQ+kPVMuMK+9t5yv7hu483kx65Zfyv3CcJesW78LyK1zUEjO6R6S/WrH12Rj0WfiGXqPOil43tUS7HDebhdwo5rD/dWH0KbcG8/iYAlB+iie+OMPl0zE4/ztqtLAc7dOITnu2N2//1xgXnlvvUDdwv/QGGhtJq99Xra97A/7RXZN4MFRFXZn7pcumfRO6XY3qB+S7sDca9bU393xPAexHuvEuZGMY+O918U9ZfTObgSPbismqjUyLQEWa16dfBzlUdxPQscYhmjm8adL4AprQ8+ucEHn1zPiEfXdrr7r2p8j5jlXpZyxymo3F8rvhfIv3y4WA5ITDJMSL73/pUrl0xEGQBHPfRbLPnAX5pUY5YW8HpdUr+KR3//2J9xM9/zZ4n7BAjgDvQaJCUmX3b6PR/+fknPg93Hw3hdTPi4JdybPT/Tc0wdSg+slcM2DYjY/MSPZML6Yjs/yo9bn+KSzJHhZZk2IU50E92vPKnZ2qPbBUh0bX/+EPKXeG1m96oJXj4vyHqiucf0M6Y6IYCVuqP6RIjB/DTZqJaFD66e6yUlfX2mzfzFDDMq7rP9u5/WUz8RpPSCRdlrS98jkBXaN2TxxsTIAn6lSY1almoQatcAPxqtxKyWnk+7gGLg2roUMDNnq60TwblLrxnvT3Qx9vyzIQEYA9Iry8sncxcKfO69PL/st1nHSEi6xZkofpzNQfgYSKcXTuYwvA1ykFD4yYNdLMxYiJQBFaf7l9PC++wpA9HekGCBqJku+nZ6nZNrnG/N/4vn8fTFbt5Hq/3pI/n7wBmF5P7X/im8fqqR0MhHe8S+Kd/pbEmzQXOmPl41Vq10y9tXdyx5Q1CInHBeU9u1C4vFEb3nYTI5HhmVbKjqrAMai7m++AFi4vxTf/JQyt4olPpnjlg9nuyQQEKleHcBzl5AUFIZkzFg3XgJFOp1pAkQvXb3mfeVp8c55B1rwMBe8zNL/T0UU3tVL6D9QjO8ESiuUjblfGvLOe4Z4kcvelwaluMglk6e65K+/XSq/7dqE5MQTg+JG3nXYlwGnzY2eeR+LZ9ywnaXO1Cf2XzBQgoc3ta/T+kowQ4NuNjXRaSwK7xJ6VCQb3PjwTAJzc+bb6lI0c/X3vhWSBc2TvXRC51Mqi99vlGdQ37uQjwl0ujTlgDF1z90o16dUDe8ZayQj4RJ5D8/n08U35nH1TyXIP41jwxE5DxXdNWyn5F6sbdTHpS8D3g8nKGk3yutQZifdQU8UeqOoz067HuI/f2C6m5CR+1HKhb4L7jZnwU8Aci7LsRkHchB9aqibXzZopmo2bKlkWTL1XWdXouin4J49STxzZ+68JN+JAtg8CbTpIiF8p5cNyl6qH7CrpWgrvnQABm9D6SP+zn8XzDECQpVEHGMUF0loOzYjdhyjjktTcJPDrN9MQg1bSaAxxs1hHgjhNGgN9DQ9EFl2G0cB6Mxd18vulMmj0z9BxTJuGkCPJUv6GJ2BijPgNJAgV8eKGrUFj10nwSOPlZJ+/86qbnIBF4u2XVZ1ykGNdf31uxTe01tpQhotjxcT0jnUWiy+4zlbNbfj7TIrJQrvvFgBScXXPyZBbPxExnPD3PLjTy6A0aa0amlKcYlLps90yVdfG/QOkho1QtINQHX9IyqWVfMsmCO+Fy1WYKz6+gUwhXOD6WTue6AU3/KM0n2uKNZtcMmESS7ZssUQqADIMUeb0qE9wHOfxdpeC/Z8eSA6GZtVAGwzimzWPi/bVu+s8eJ963kJ1jtSiv/1ZKn/LgueH1rHancJiJgzZxnyBcB15rpWLVN6dAvJIQdrNnq8z3lFxztlbiFvLdYAACAASURBVKReqPeVZ2CIuFQ1lZtGAeqcH3e6XV3V1ymTAWqK+54mEx0g+tHtpKSvZWqcSBjQKPeBdU5zbgZBW/9512JOzWz1mdK6RiUDTaoZBIBKLvnXbt/zifS5vHP4HcfvOvOIZlKEzc94wi4Anff0zJ4ivtcGW9+jV90tgVanKmO7gvv7ap+XeAYlg8dyTVp46/lK1377Q2PVhnq6Imwwy/v5z+4l/i590nXrjN+HDOUCVExyQ7C8UDrmNJBsSsC8ZU6+N9DI1vvpeyAWTdlJimIuzIOPwJc78gLJFxe0wvn9mg0RPKwJNjmgPd+klRq7bAgNoGfDKKW/jY4C0FesXif9b/2vfDz+mfRnopI79rzq37J6zUawyci6woux4ZLH7uonHdseK5u2ZMeXlqMSqhuTtxlwCkiQDwPgRtl0EKV/OnQGymbA+8Gb4p3w0s5/LoZ5a/DI4zOeKO97r4oXzEnzkPpSBEA/HJRB+d9rbtlzz5DccF1p9vPGTS4ZP9Elv/xqUaIbNzaly1khxfyODM+C2QDPH1T/FI15HnkeX0x8g28DoAiN3wPrwlj0SRhx7lHq2kVgab/3vkvmL7LasO8+ppKWOejA3QHm8kD0dLHP2TbKRRTegY0KgALFtw0RsrYYn37ulvenu3Yyz8PgeWRHN2MDY8JEQ21kMFo1N+XMjiEpBLs+H4IVMuL3S/DEjmLGKekTLT+Znhtd//wF1vIIVQXBoDFdACaIgQ7np5wpHC03+fD/ZKJ7n71dGaclCqJ7J48S9Yxy/LDRV9L7Zgk2g+Oyg8LY8J34xj4pBPoZgdanSgCMWhNSWXYFGaoFz9yqqqmK7h1eLluzsnvZCaDzPqysYoUVg3OtB5UB3JQlgFR80+7VVnblQV/Hvgz4Rj8hni8+SMh8N9FWhKvyeH48a5ZE7+fE8zyfvYfvi/9a32kwdldAOSQ+Ak2PyTsZMVbyeD6GVxB+lxckwiiZTvyECquJYNNU/b0AvyGPIvzh38P/jmNMbqyq/0flaQEkVGzwyHLic5RMmzSAnkz2cvfcjADo7XresFtGi0Fx+vOvrdLvki4y6MpzsybjGkDPmqHSDXVABjINEjggBboJOgOOyECVey4V168/ir9zb/GjnN4JQVZ84V2XCFk3Jf3ukUDLNqpZYfZ5105BObr17mCtCVPbOQCAP/wYpqj4Mxnf7cH8Pg4McDLBKUfhG26Zk/p79hd/+55xdZdgc8GTNwkrO1hmWnzTEwBnLEbnAoDm73/gkm3bXeKGwky7U0Ny8glBMSox2Y0E0bud8KOcMv4ida2iB0YrpnuqI/xiHmh5CvJ8b0zgebhNZKDP/cqQGTMNxUyvWiUkHc8ISUuA6bkaZHh5RzwsniVf7uwipUyoSRo8tr0tzLdMzo1kBXveGWuVeSPINg/0uFLMPW007crVh8PGfhFULnjyZouJDmC55MrYjEXJNuf3G8FpNX74XPt7wTy8zEafjU1N+lLeD8eLZ8IIsEuLFYgT6HqFBNp2S/q6rq3/SMGDVyqWdzE05YPHxK8bbDeAzk5Fbm7w7wT3iwHuU15Hh/MzwCoRVnVSRmP7E5Y0UirDC9DYC/CYwc+y/+SzU3k7R197r/kzZNv+daX4wHqObme6Gufa8qMYqCTdBZID+NZm3ilLvwbQU5barL5wRgD0aTN33z0r8Hmlbp395Ii69htnpXKENICeyuzqa+daBjIJEuRaLnV/dAaSyUDwyy9k23vTZI97028aWlm7PR9NFN8bQ2GsdrAU3TdSVn9nyJhXDKkJ9vlNg4COVxK//QbjzimGrFlrMaQPBAP8ksNnSe23doDnMD/0n767YWgseSSoX/B/NyhpBIKnmy55XCa+X0XWrrPuVfdQap2bsvfesbGxed6Y/7ml669Pygnbp6Rc+zyyjwSXCm+3NIFnd3tFJn95oMU8vyQohx4SW/upiT4FRqMrv7VY9zyva+eQYt9nMgjq//mnYVs73BvXiPf5e8XYsllCvkIJADT3LPxUbfKEg4BlENr15WnKx5qLQFGBeAv9EpL05Y/eB943h4jxo2XmRbmLkguuEbOOBipiHTe7j1MSLE/DCJxMdMh9UPajsvC9/4Z4Jg5Xh5Ch6b/wOgkkABrb3Y9YrkdNdC/kTdyLP1eHB+s2kgBlXQ6qG8vpMudTt5x8Uuk5wTfsPvX5DLRuiw2Iu2K6TtmDUgGg8x7U0aaeNoObI9wk0ZE9GSh8qL8YG79P+dj5Xv6PeL7+UCUm0U2g7Mlq9Jb+tKlAatUOwIOl8vVf9CvpI3QG4s+ABtDjz1k+nJERAD2XEqsB9FwaTd2XVGdAA+ipzrC+vs5AbBkYPtItP6/dLhdfUSB1YwRNY7ty8kdRC934/RcpufRfMmRpJ9m0eZf2eSxXXwhW+DRIqjT5faZc/MfD6pSirn3FPCt2I7ny7qO0a58YpLQoV/tayMt7PybuKl45GzImLRJgYK9f+Is0HHaxutVHncfIsZ0OiKV7thzjG/04StKny6dVu8nUWjfIpQDPE3kOlq8wZOq7LvkbgDqjzclBOf202EB4WzqCi5D9v/JblyxbJrJylQXo771XSBo3MiHpIwlrtXu+miVeyE0oliwqA0oGPCjmAYeo69Nc1vPpu+Je9NnObrBMOtCqjZgw+goehhtHCWrmL18uwhz+9ruVv3rQ8D+qmUiDBiHF7k9FGL9uVjr4YeAyVGs/JREQrvhIxT31NWPPgLFuFUD0my0QHVU4rMYpG8YvG8U7Et8/kH5h0DPCD/PhbKwaoKGm5w1s5PyxRfXF3+E8CaAyihtWFcXbEwxZ/I0hJx4PGakzrE0nmmPTJJu6/cX3vpyw7nuqAHS2UclRQNO5BOx4HdmVAcpbUW6tIqPzeHvDShNumBk/rBQXGO7utfgNP5RwFF99vwSbnxjvZXPmeErjTZnmkm+WGEqSr0dXeNzUS98Gc84kUnckqQxoAD2p9OXsyWkD0E/sek3MSfxs8tCYj830gRpAz/QI6PtnUwY0gJ5No6XbmqsZeO8DQz77wgIa96sdkmuudhazx/P5dAAhj4u/6t5yx57jYmKflx2r4CczZI9XH1P/PG2Pq+SrAy6SLmeHpFHDxF/AVoENP2f8T3LpmmulhvmbbNjneCm44yGpVjUxoJNgCjU+51c9Q17d8w45FeBz+zSBz3PHr5F2068Sv8snPwx6Uw5uCH3MBIOGrjNg6EppF8ZeqBboDDZ6ZYauCd5q52m//0nw2fqhJE5lQSC6UaOQNAWmHesLeCRblP4AJZffUS4gR9kTz7yPxAB4R538cJi1YXB2PKRQjutQSoN51WpDluKwFSss2Z/K4mCYtDZpJPiJvbKhsusRsHFP+5/44IHAYNk3gcp4JY2SHTt9fvQMVAais0rHO+4lZWpIKRA/qgYCJ3SMflEHH+Eq2i7eiS9D43eSaqUyr4V0RaDZsbu1ehx8GBYttr5rGBedb0qTfTdK4QP9lDl28U3/hb74UQn3NpUAOhtFH4qwBFjCjdQnpj0D/P5UZqJ4xoruG4EquTpxtYEyMK61AMz5G5tk7k1ryj3f3L8OKkmuh953i7iun0sHf/e9IePgbfPPP6XnyFYtTDnrzF3G7LnUZ90XZ2ZAA+jOHJdMtyptAPrMT+bH3Nf2p7SK+dhMH6gB9EyPgL5/NmVAA+jZNFq6rbmYAUpuvPL6LvCBfTyroyknHJc4sGx7nkwTYEhfJZcyZY+rZc8LeparfV7RfVn+zDJoxo9trpThP18sfwBwZRBAJ5C+xx6xg958iXvnPQCfy6xrHF5lvfTfNFDcRf8ohlgxdMQrFT0vp6EGmGaF0KEXiIp/e+UYefldAK7YxzgJ+ukdO8TetkRyT7NQmob2++0WaVD8tW06+DR0nQQJnbDJaNMmIel0prmboaubzG5obgchp+MHW1sJ1ccQvO7ylYZimv/0865zqD1/+GGmAsgJlFeBqSnZ3csIsAOo/uOPXcdSH79+fQuUblA/JD5f6RsrqZ4X7hPjuyWqXf6ul4sfZpqxBFnB7k/fE/eXM1UFRTj+PvAo+WbPjjJj66nyl3/XRkXNmmTJAyRvHJJWTXyy+deALF4SQrtFvl9jCD4GO4PSOOxbE3i+HnRQnM8HnjHPFzOU1Ifx1294Vg3xn3iWBLpdrjR9dTgzA9Q0L/jvjUJwmUCy/6JB4hv1uLhXLVINppSUv++dOaWjrUxGUR1DvwlGoMVJErjoemUyys/D2xPcsmRp6e+LAo8p9wX7i3fTavGfcYH4od+fTKQaQE+mbfrczGbA++oz4v1kqgROO0dKzhtQfmPwfUtZLAOMcgWW8/f678UVhL5YmeBzHYKRt4mKJfNQ/IaMUYjGjnka/oDAU8aQL78uvUaNTEeNGiE5/1xTDqkT5zyYpznV3U4uAxpATy5/uXp22gD0WBM48I6n5LlHboz18IwfpwH0jA+BbkAWZUAD6Fk0WLqpOZcBAolDnncr88f2bU1p1sgrz7wQBIhIfXEzZZIRiSTyp2mfy2FT7pVtxh4SeuIVcVWtEtNlwmX8PNjfva/4O14ofCmbOcuQz+caxKsFlitgepty/LGWyWhFwWO/pGHmLJeQZc1judHAcwt+/F58T95oySzAdLGkz20xtS98UJh9HoB2dgmkF77/wZCxrxoKRD/uGFM6nZWaDY133jUUU5yg84CTvpa6b9xiqzEac8bqhlkfAaTFc0bAmpIux6JPxtY/VQk8JRvCYR58uJRAt5n622WD1yK7nCxzMrbJOg9HAUDyBvUIPkPuBGXdXoxpRUGwfRlkUiiXEgb3eawb7+iHHw72KljeDRuGZM+fl4mX4DlA5lC16lLS9x4ljRFv/LPVJT/Nmi8FX38gh/0yW7yhkp2XWF6znWxt3lFqnXa0qv4IR9m5sRjl6ysoS7NCZDVY63yGw8Fy9kZob2O0+4i6yGsZw1rKAFBuiFIRDA+YvZFSH4GeAyV44KHxdksfn4EMGOtXS8FTMBbdYfAabgIldxL1c8hAN+K+pXfGW+KZPFrJJ5FlX9Llchm75Vx8Dxjq+7v3pUGpg42kkWPcUn/pWOn490gJ4ruk+K5hcd+r7AkaQE86hTl7AWPTD1L4YD/lN1D06BsS8vqU7IobUiwCOSX3GoLmkGIBS323+QySRCbA8uBhAMkPayKhQ+qLude+OZureDtGqb7X3zJ2bng3awpflU6mHFzbJ39t92NzWWTi5F2VWydAvumM9qZay+jQGUhVBjSAnqrMZvd1MwKgF+Pt/ZXx02XpyjVSwjf5HfHLlj9k/eZf5NNJQ7ImqxpAz5qh0g11QAY0gO6AQdBNyMsMEJh94SW3Yu7S8LL/FQAMq3tlyEt+BS62aB6Sc7o5R8rluWFuOX/pADnEv1L8Z/UCE7hP1HGjHIrSmEWEwfPIkzaDwTwB5f9hEJUAZo9uphx4wO5MJuZpPI7d/KMF2u6/H47tbsoB+B0OgpI+glsAKv0nnil+aLbHEi4YUla5u7c6tOiB0Upfm5FqED0SPL/0YlOxtsPGaP5eN4j/5E6xND+mY/76C2z0qS6hZAmjTZXZ0umnJ8UNED3kLZBA267ihtmf8csm9f80/QsQFKyxr6xG+TbB7hUrS8uc1EDVAFnYjcHCrgvgmAB4vMFKBLLSyU5fB3CeID3jpG0Tpcefz6g/lxwEYGPgfXExe6lhTi1zssc3bNx13cLQdmlbdba03vqu7PWTxRxmhPaoCUPS9hLEjwkgpbK5MQDwnHlct2Cz/LrqdynY/hskhLbIHvjZS36R/Xy/SU3Xr1Kl6DdsUvxVbkr4jPnBmKQcjY7syoAC0f97E6Q/tuNZaQA5oduFMg+5Hqzi8LyKDbclX6qubvIeIRNq3SJnXFVPDoJBNMO/fIXsOfg69ed3Th0t7S60vkuTCQ2gJ5O93D+34InrlVwXjc5d//wlrgq+c4OHNgCzHEA5fofwudWbluU/GyaWnTM/dilTYM7HVSC5Rr3zsNxe5Ny4HbJn4yftMi+vVcuUC3qG1PpMh85AKjKgAfRUZDX7r5kRAP3ux16WeYu/lZOPbSbjp30i53VpK4uWfafA9Idvv1Ia1bNMmrIhNICeDaOk2+iUDGgA3SkjoduRbxmYNNUt8+a7lHTJNf1NqVnDJTWqeeW7dSXy9LNuCYLw3P/K4E5gIpP5oUzHa28Y0tyzQC5df5MCXIv+80qlchNeGDp6//ekanZ54Hm4P3xBIxN95ocWQ5qs8mOPNqUDmEyU8ygBaXcG2OpkafNYMpvbt7Mkbspjq7tXLhLfkDuVHrEfZd0EKaNFWfZ55PGpAtEnQlpl/gKLeR4Gz3lfz5ezxDfyEQUGFN03MlrT4/7/lQu2ifu1wXLU3zPVub/Uai4F190qnv1qq797Z44T99QxYhRtU3+fseflMrMKqgagzc7YF5scTRpYZqDlbXTE3aCIE6hBTpC+9sTHpMFPH6j/mVv1bHlrz1uk9r6WCSkZ7gfsX/7LORlzBPqXQRbplzKSMvXAbG/cRKQxzEAJCDAo2+OeO13c0Pin3Es4ggccKoWndpTiPfeXIEDD0J8Awv/4VVx//S4u/sbfXdv+idrV7a7qsq3KPnBQ3VeqHriPeGqD3bhnLWwE7CfBJq2jnq8PcG4GCKK7v5kr/rN7ObeRKWgZZVu+eP4zOXbZM8p3ghE4tZv6jhd8dxc82F8MbEhOqHmDfFqlm1xwHqWckqve0QB6CgYyhy5J823KDEVG8MC6EqpTT4HllGMJQopFR/QMcN58fRzmz192mWif2yNUylemvPfGxTAWnQqDURqNQpFM2rYJySnwkElkUz16K/UR+ZwBDaDn8+hX3PeMAOgndbtW3n7pATmg9t7Spsf1MnvCYNXCoSMnSGFhgfS96OysGS0NoGfNUOmGOiADGkBP3yAsXuKCNAG0bz3pu2eydyoqtmQyKPuQTUFWjMcbEq9Dc03DNRqvMfr1DcrBKH2PBAlmz3ErmRIClFdflXkW+vNgn5Mt3r0LgOvZN4t7xQLxt+2uzPLKC8/sKeIDSMvwd4Nsy5kXRn18yESeDIb0ahiDMiiLcRz86uaC7Bg2rqp3hCldYYZZE6aYlYV7yVwpGHq3dX8AXP4ufSo8nMBp4b3W/xc9MArs84N2O5Yg+v9eM4TMYzvkXKa8Y8hX8wz1XXDJRRbzPDIKb7tAyZYUX32/0nS3KzxLvxLPmP+zdLcRk2tcK7Ornaty3eaUEPEvANjQM1/9l5z110ty3PZp6ri/ffvKDycMlL07niJ775W67wKW3hc8f89OveV1p90oH3k6y0qA6pTsCQfHn5IpDRtY/8I2U17lz0hJGXxnhTXKj4CkDGUmKn1mvl9q6aXP/1hpXEcLs8ZelhkpNXNR9h+qiZ+99pEf/fvKtz/XknlrD5Bf/i4odRl+zhuBrd8QGxCRcjHR7qX/33kZoITPli2uvGNavjXOLd9A87yG5x+5rvYw2WveVDU4SjsaLHz3t4sk0PQY+fjoR+VdaCdzDu6POYwbYIlGKgF0yjtVr5Z42xLtkz7Pvgyw4swLLwJB5RCBcrMuGOaQGdIRXwbIOP8A5uMMSrJ1hjlo86N23/yq6L2R67QJkHQJV7kdiKqUngDf9wErXYfOgF0Z0AC6XZnMretkBEA/+sx+8vmU58Ds8sjJ3a6Tj8ajRA+0qK3biqTLZXfIrLeeyposawA9a4ZKN9QBGdAAenoGgeDgM0Pc0rqVKZ1TpKWcip68Bv3D5csNoflg8yN3lXCm4l52XJMsmEWLLXmFI8A47XWh6bgNCzJ8nh/uVmAsDR2Pg+43oyxI8NQzbqUxTc3Jo1tn7gWEMhivvWko0PqmQUEYcX0rhY9awPn2h/4noVr7lRq6RMDzyAss/saAQahLuAkSDjKGO+Fl7qgjY8+DZ97H4hv+kLqEv2d/8bfvWe4jRuYaGWxh7fOKnkOC6KPGWuA+P8fdOsfelshrhpnn/Lc+l+4OnvPfvdPfEu/4FyUIHfLim5Nff7mgC+99+3mhpA6Dpew0O1xXcrBMABM+kq0dbmt9gM7H1P5Ojpw/WLw/QOyU59U7EsaJ0Ec/8DA7Pq6lruFePl+8L92vNOzNmvtICTYPTLQzHNxYIVC+DHIv4Q2Vso3gRgA3KZUWOT7/iQZNVasu+kT8EDMP1gRIDtY4wfEQ2mUSKCdwHkNQbogSMstQwVE2x2TndYAWvY7szAA/x0shO3Q5PsN2V2I4NSNvvg3DUBg306OD313cEHLDlNEDmS73pjWq2aFqNaTo3uESwgYT5w3OHwTQBvSr3Behsj6nCkDn/PrySEOKMNdwbmkJ2bQ62gjRqY+fbleKMsDPwdtgna+H0TeD1Vo9ulVs7B7tvXEeKuumwdsl7BNy1hkgXkAfXYfOgB0Z0AC6HVnMvWtkBEC/aOCDcmyLRnJNn+5y2Q2PygVd20n3M0+W79ZslIuveUjmvvN81mRaA+hZM1S6oQ7IQLSFkAOamBNNCL9IsjMXnQ8pAUgROD3mzTegmVxa1LgQrJSjAKS3OCqkXp6dEGvXuWTBQpcsWWooM87I4ItA70uck2u2j2zuLb+51KbEBT13scvLggTcBKCJJXN+43XmTtmJdOd86AuWTjvZ561aWrksGHY/9LLnwKwThpt9bt3ZJO+caTCltADfymRbovWB4Pl7H2BcFxkANUw584xdshvRzo38f8/cGeIb9ZjVnl43QlO8dDUd9b4L/91H/X/R/WSfV67X+x20wMlEp349c8GcxBMTJ0O2ZWHFzPPwtciALrz9AqWxXHzbELDqQFtOMMgI9Y54RJlYhkCMCJx9CYxcL4Jj5y6nLzLPPv0coPMRIcgt4AUaTP9IE1DPvI/EM/4lJXnCkhT/SWdLAJINoWp7JNiq0qd533tNvJMhVwONnmDD5lJy5b2QB6pR4bWpaU5gmuCcywXAvLGlw27nd5LdcyM12ZdhM5ImpBt2ABWnnAQQvb0zvkdtGcg8uQhBZILJ6rsQ389X9IYXQwWyQrmSkjfQ36Xod0FBSC5Hf0ttGkA02TtzvHimjFYbX2F5IlaNPPeCIb/DLPuoZqb0PCe+78tw7lIBoNPAe/goQ+gNERm19g5JyxYCDxJT6PGgQ2cglzPwNdb5775vyeexSuvMjiBsgCBQWcQyN5I0NAHa6D+ssd4h6JFyDkD5aNWDuZxr3Td7MqABdHvymGtXyQiAvnDpahl0z7MyceRDsuCbVXLDv4dAj7W6/PnXVjm/a1u5a9ClWZNnDaBnzVDphjogA7EshBzQzKxuwiowJ8e+sguIphzKNVdD87qmc1/OyJZ8DgaXBArPwoL6Nyg+UIImkhVMKYcWzfGiiRLPdPflNwDQ8xeKLAJbOVK2gUzlo5qFoBsuCvxn+wkG9u6V2Iu73Q/e62Dk0SCURktk5EXKSpQHEhBAJ5DOF5quCTKek+lDWfZ5+FrGT+ul8L4r1F+L7xmuzLi8H44X75vWZnsy4Hlke8k0Jqs4mfB8PFl8rz+rLlHS+2YJnNBx5+UIrhNkj8Y+j7w/QfTR/7M+z60AsnSH6WksQckeSvcwKmKeR17HO26YeGe8LYGWJ0tJv3/HcotSx7j8xeLBeHjnvKP+3ax9MIDpu8SELmwiwTJ5zwww4yePUqeHqlSTQKdLwew/N5HLqXPIjPeNelTci4Hec3w6XgBg/sqEr2fniamcG6n5T9CCoZnodo5a6q9FMHgoQOGSCEkhgui5zER/A5VoS7H5Q+b5FZdVzLh3bf17t021n7GWGIJNWEbns03lbxFv2A2gEzQn85zMW87Fp7cLqc0tStNEBqW1Whwl0qyp8yrZ4s2hPl5nIDIDlC4aP3GXZN6hh4TkXJiyx7KWj2dupHfNe5By4lqc7z5ndQztJGLoEdEZSCQDGkBPJGu5f05aAfQHnxqjDENpEmqaIRg/WIuH1T9slG9WfC8H7b+vHNsyu4w3NICe+x8S3UP7MhDPQsi+u+bXlZ4c7BaynbpAiuOnn0S+/NpQxpBXXuFMgx1Ki/CFlyD1scfskpyheRhlLBYsouawxVgJxyEHE0wPyZFNQ4qRl4ogeL8IIP7CRS7ZtGnXiy6Zso0asvxalJY0DYwY68DyHDPWrVjpBNEp5xJBuk1FEyu95twvKU1iacJeDfB8331KAwnlgQTcHAgbil5zdTDtusnPPgfpiV8NyMgEISNTelx9o5+A9MkHEjzqBAk2bi2+N4ao/tsFnts5QN5p/xMv2JFkT5dAuiTQui1MI+Njn0e2J9JYtBme+fPOCZZraMpzaHz61nhIHwCcKWsYWlkfaVpZeNcluIAp2x8cDamcA2JOifu7JTAifVRcW/CFQ8b4aecCmL5CQp4oQuAx3MGAqaZn3ItCVjqDmvH+iwdJsFHLGM7edYjx0wZl9mr8ullCvkIF7gePPD6ua6Ty4FTPjeHvA/bhZDDRz9BM9FQOp23Xfg4VRD/CD+IYyGqdDTm2V1+3NjkJDl1xOZjo+6Vm/rOtA3FciHP+629DOgmbvsn0L+z5wbl5QL/45zE7AXSC52Sec01G8Lxvn9BODXQy5pfie3oB1hisbAsH+94U3/MtW4TkUC3xEscTpA91Yga4UUQfFhp+0ofljNMhJYi1fnmm7OW1P965kRtVb+J7ZCMqxxhcj5c1JnVinnSbnJkBDaA7c1wy3aq0AugXDnhAvln+vTRtWFd6dm4rndofD6flwkznIKn7awA9qfTpk/MsA/EuhPIsPUl396NPDJn1oaGAz4H9g9ioFHnuRbdyuD/pBFM6doifjZV0o6JcYOIUt8xf4JJ9Yfo1EC+75YHO1DakNvpC6I0TTGS/GJAMlob1LdYWf/PvyQRZK8thIEjQnBrIO++DF/EwO6xxY8hNVGAWuhFA+8jRmQfRN212yYsvu1X7LzjPhEzG7uNeEUgwE8/Px3iOSGC0FAAAIABJREFUaMh09ZXpMxQl45DMwz2hfX7Ddbtv9hBILbzz4lLD60TwPNzAMKObfy+55mGhzrX7y5m7ydDE+rzGAqJzvN+eED94Hm5DwQiws7+aKf42naE9Pihq0xRLfOLLqAaYoJB7E+aW/qvukeBh0GWxOdw/LIdcz9NibPxeXZngt//8gWLuEx3o9yyYI16w/10lRWLuB2b8gAfVbydFOuZGDaI7acSjt4WmmJ9/Yai5cQCMMQk+BfEZf8MGkDn63dN7RCR4Thmxy8E8T2ZzYNJUt8yb71ISDgP7m0qaLNawC0AvC55fdUVIqqJqrbzg5vVCVLgtXBSCWeyuCkJW3lHipVULU/bQEi+xDqE+zgEZIGA+Dqzzld9az/NBkGE8r4cpe0O2KJ5IZG4kkeCzL9wyAyal/M5ktSgNRum1okNnIJ4MaAA9nmzlz7FpBdCZ1u/WbpKJ786RyR98CtPQ7XJmu+MUK715kyOyMusaQM/KYdONzlAGElkIZaipWXdbvoANHupWRjr9AXySdc6g/vVzYHjz32ORckhnx8OSHQSkB4LxTD3QaLENzHDKuyxa7NrJMOE5XCBT55vMdDLUYw0utNett1hg1FwtLt7FBOOCvzn01ynTUtGLb9n7EEQfNcYQsssywUTnS8uzzxvy999g9KN8nWXs5UVFIAE3EZ561q20Wrt3CaL8NfZcxprz8o6rjH0ePt77FiRCZo1Xf/V3u1z8Z5YG1JO5fyrO9b72jHhnTy116aIHqH1+UEK3Kwui9+wBk9UdWEskABUP8zyyIcaG76Xw4f4S8vqk6D+vVaoLTjM/LwB345eN1nicdJYEzrtaQgVVE+pbrCd5P31Xgfauf/5Up5Sccb4EobMeKqhS7iVojkqTVEag5Sniv+zmlLcx1r5EHpeuuTHSa0Iz0RMZqfScE/ak4Gf52gGl50a7mNrp6Un0u5QFz6/qg4opkACSCc5jL0AWjn4aNPi9LA5vEjsA9EjwnBsgfdGneNYQ3MT/BuscrnfCcRh0nVn51hQSLxVt4ieTM32uzoBdGaD03LgJMOCGdIsba5T2MLA+6YSKK+cqu28yc+Ov2Ix64y2X+h5gNIcEZOezUbmKKg8dOgOxZEAD6LFkKf+OSTuAHk5xEFuCn371jUx49xP58LOFcujB+8l5YKV36XCi7FmjWtaMhAbQs2aodEMdkIFkFkIOaL6jmxDWu26Fst/uXUszh2l6OWGyW73AXTvATFrn2Y5EsMxyKIBearue0y2ogO94g/qwC6HzvAjMdErAhGMv6L03PxIvm1B54J/Li7Cu+WKwvmhAFA6yY5qD0U6t9YrOjdbOzSi5HwHN03SD6NwMGAP9e7680HStH2R7KmLlVwYSUJ/1dbDB+bzcOMhM+cvGkmWGKrmtiH0ezjdB08I7e8GYshfAcxhTZkFQc9szd6ZqaTza5xV1LRJEbwRz4At7msInPMxKTRQ8D9+v4Jlbxb1igfi7XCZ+ANPlhWfCS+L74E31X6Hqe4r/8tsk0OSYtI0Gtcw908YqzXaGWWMvCXS7QgInnrmzDa5//hLfSw8ITU0Z/nP6ib/DeWlrY7w3SufcqEH0eEcnvcfTi+EZyFkVYzO0PDkr9czbJHeS3p7tfrdIRj1Z4naA5+G7UDZlyPNWRRhlI04+MTb2abIAelnw/EqA59zgjzeYm+9QCUcwfcW3LqHcHYNeJk2awGSbEi/Qko5VCiPe++vjdQbizQA/a9MgHTh/gbWzz2rYC84LyT6QL0o0kp0b+Tn6BMblH33sUt+b9Lk5/5yQMhrVoTMQLQMaQI+Wofz8/4wB6JHppnno+x99KW+/87Gsgh76gg9eyprR0AB61gyVbqgDMpDsQsgBXXBkEwiqjRoL3VC8gN54XflMpzfHWdIOh0BTs2+fxJggdnWe7LBhwy1tV2o6n39u8lIh1DtcCFY6+7h12y5A/GCwyFuARX4kWOSMxd9Yx5EpHo5qVUOqHTyOrHM7IhMg+uw5hsyYZT0H12GjpEYlJd/RQAI+T3yujjvWlE5npvZFIxb2eXhMjA3fiXlwdlWsFbzwb3Ev+kyKHhgN9jkcZ5OMSBCd0kX8PK3Gpkmy4Dmb5V72tRQ8ewfM+WrI9v8bV6qlxvrV4hv+sBg/b1D/Hji6HaRerpdQ1epJ9iix09kO79vDxP3NF+oCNCz1X3g9tJ2g/T/s32L8sUX1o6T/vRKsD+qmgyPdc6MG0Z35MHAT9OVRblUVxQ2yi8+v+Ls320F0AlthTXe7wfPw6FI+4hXoxrNSp+9lQakTg6Z4tLmxsicnEjzfH/r0l/dODDwve4+SEhFuNBNMX7M2giwAiZrmMJduDbIAN6B16AxkKgMbsAYn8YKfAX7eTm0TkjYnJ++9ZNfc+AtY6K+Pg88OJC0Z1GGnrCWlsXToDFSUAQ2g62ejvAxkHED/+59t8u6HX8q0mV/IwqWr5cSjm8pzj9yYNaOlAfSsGSrdUAdkwK6FkAO64qgm0Pjxt98t41CajZUXZIbQkIzM69PahaTtKcmD1okmIaztSsmWAdBqJ6PKzmD5O/XSyaQmuFheEGxsAm1w6qenSheRIPrI0ZZ5Eu9x6cWpA6L5Uj0C+uuM3r1MJR9TWUQDCbZsAQsSkkCMVBqKhrXPqVV706DMPZN2Pn/lXcs7Z5r4Tz7bttuwymD0/3Zp5fJ5vuQiU8kVJBuFD/QVY/M6ZdbpP6Wzupz33VfEO3mU+nOoWnX83w0SaHVqsrey5XzPsq/E88ZzO4H98EWDdRuJ/+r7xNyzli33SeVFMjE30mB66jTrGToV80F7zAs6MpuBWR+55KPZbqmJyiml3V0QfUzeGm9A7gMbpziWUiXcNM6GeA1gG71NWOnUF5rnycq2VNTnD6CDPAcMVDJPr7nahPdW5fmJNjdWdB/K6I2AYSir6w7Y3wLP49Fej3XMeJ9FqJxbtDikTLfDQcPRk08OKT8YHToD6cgAq2ToG7T4G2sTn0HJop7QOudnwI6we26cPssln8yx1rb87mmNd6YmjaDRvkP20o4262vsygBlqJavoK+U5cWVLfMTe/DDGkNOal2gh1NnYLcMZARAD4FiMXf+ciXfMn3219C93VPOPbuNnIOf2vvUzKph0gB6Vg2XbmyGM2D3QijD3XHE7bkQ5IKQhlsEoysLArrPA0RnkIXO8t90x7erDPnfa9ZCm+1NxigsWtu5abBiBVlbuxb39QAwtgAZlew+u4H78tpDlv3LANH5opEqEJ0l/88NM5TeJDdGuEESLWIBCT6YAeDhM7fSlL8ScjCpiGdRYk9GUPcuKElvqV/848kxjW4p2cMgcGYHeM5reT6fLr4xjyu2fMnAh8Q34j9C9jmDUi0B6IibNfaOp6lpOZb6+B6A/K7i7RI4tauUXHhdWu5rx00yNTey1H7iFOsZOq0tNlbbpOZzbkeOcv0afFkfCf8MRv++8DGJAwgfN5GAqiE+aPvS68TpIMVrbwA8X2koaRPqg9cG6JbKIKt/7TqXMgNnfiqLWObGsueTdfsyZNsInlM+7XLcg5VgqQ5W0oWr6rbv0EtvjLVN57NC2nQ01cnP0+tzvbl0OUFRUVWKkXHKSUHp0N7e5z4VcyMrVt8EG50ykOGoUSMkjRuGQKwRod+AjsQzQPksVsyswDOybsOuHPOK9GbqcLqzdehJUJk81aWej+HP2MzwSjyt+kwHZSCtAPqGzb8oA9GJ78+RX7f8Ie1Oaik9oXtO1rkrS0XcNIDuoKdZN8XxGUjFQsjxnU5hA/nS9vQQt9LG7IcX7lhemud+acg70CjcA/IeZGPFamxlRzdobEmDSzKyKQtCeZB0BcFlRvVq9i7uY2k/DYzIRCcTg+ywXmAK2xlknpOBHo9ZWiwggR8bEDQU5QtTz3NMmKna2+58YZ/bOdZlr8VqC5ZL2wWeh69feNsFYvz1287b0aTTf85VEmjTJZXdSfra1Mk3vl8mwaNOSPpa6bxAJufGSBCdZm+nouxeR3ozwLlhCOZGfteeAXDh5BPjH4NsAdHDzHMywa/oE5J997F3Xilv5Dj/q01m5DfaJnMsc2PkPRR4TuY5wBauwXpjMzOWygG7nzBWlEzHpje9V7iRcjo+y8encY1ld3/09ZyTAT7bSwGILgMguqEMIMpqmaas5gQxhZrndkeq5ka+Ny1G5c7SZSJcR0UGK0eaEExvItJAV3TENKTclFi+UmQ5ZLMolxMOL/Dn+qiILQCRe8EiK8/V8B7W+SxTmjax/3mJqbEVHMR5eNp73JS02snN0Adu9yVzSX1ujmYgrQB607Z9pG6d/aVnp1Ol25kny94198j6tGoAPeuHUHcgjRlI1UIojV1w1K2oN0iZkpYwk+pRxji0soaOfdVQC8ZY2Fh2dThS2zVVTGy72pqK6/y6xZDhI1wKRKe8Sq8LTaVZnWxMn2nAIGnHhghK/qtGKU8P3y9WkIDstrcnWOXvN14XFC6G7Qg+DwSMWIJekVGeHffR10gsA7733xDPxOHq5GC9I6Wkz20SqrVfYhfTZ0XNQKbnxoV4sR0/yXpp7ADg7RQNokcdM7sO4Hch5ZjI5uSczGqSRDhFvM5b4y2vE1ZXXQ7NbyfJEkRqnnPj/sorkjMXjDf/66ErPxxMdObpCsirVGQiGOvcyPtHap4fCAmIK3pDki6DeAs3CKa8CyAL60IGAU1ufqcC2Iw3//r47MoAiR/LUMG5DAAz/xwZfJ4ILjdBtUOqn610zI2sVl21ypIaWYnfxcW7+svNqAb1LMnHBvVDGf18O+kJ4vf5GlRNETRfgbz9BYJUOPj93mgHm5/kjvC7zqbNLhk3wbVTeorvgt06h4Ts/0zHPFTjseqWlTycP9ufZqoNyIP2qZLppun7OzADaQXQv160Uo5u3tCBaUi8SRpATzx3+sz8y0A6FkL5ktWwcSgXdzcNio9JTgY4wUsueDqebspJJ6aeATbrIwParoYytrwWBpep0AZ1+tjbDaKHJTzIQGYFAtkSsUY8IMHwETC1A+voxONNOfMMe56Vb5a4FNhD47MbAMy7SxOAYu2GPi5FGXAVbZfCuy6WwNm9xH/auZIQopeituXiZZ0wN2oQPTNP1pzP+OJuaYFff218c3nZFhMc5kaIknNxEIgeCZ6zCozM831q2TOXxDNq4VxTOoYVeOUZbcc6N0aC53Ugc3bZJZkFzyPzsAryXhMmuRTjnpsxNEzs0N60bQM8npzrY7MjA/zuILucLHMScyLlTfgM8Rlv0hgyJ/jZK42GtemeG00U/3xHYBh5WAF9d36GwsF16hEgwFDmhXIv/B7Jp2BV6rcgXy1bzt/YaMC7ZDjoY0TQnKzyQ+DHUNEmMM2vP59rCN8JeT2Sck5ra8oJx5mqmjLdsQW+YBMxZ67FBiujLKivTUTTPSLZcb+0AujZkZL4WqkB9PjypY/O7wykeyGUq9nmAu9pGDxSZ64TyuD4chRvsNzuRQCjXOT0vyq1WuRcmIwA84vRD1ra8Wi7xtsvpx9PEH3EKCzKUVKeDBOdmx+Uw+EC9qyO1uIznogVJOA1yRLnhguDmx/JltzzRY3Gt3xB0+zzeEYtvce6tm+VUJVq6b1pnt7NKXOjBtHT+wByHn5ppFsIKlCbmwz0ZMNpIHpZ8PyqvqbsBdmHTMUr8GBZCS8WbjhfhY3nspu3scyNZcHzPmCeez2Z6lH59yU4RVPaz+dazxc3CzpBG50a6Tp0BpgBPhf0XiAgSjPQSLDYwJL9cGiBEywmMJoJ+UO2MZNzI79L+R2tNhXAxv8NYGs4+O5EH6kmAIybNspdzwFWzZJhvhTPyPfQBuf3eTho/NykofWMxGsaS0lPbvKFzWfpg9GjGxjfaTJzDeI9+qPZMJSFzxPfqSlrSu+Ist+PGkDX35XlZUAD6Ek+FxpATzKB+vS8ykAmF0K5lOjZcwyZMctQpZMDYcSZSLk380FGOFkA1DAkG6sAbHa7g4uvZ58zZCsA49PamdAf1S9vBI5fetky/UwEROdij5sfLIdkCe2F58ef01hAgshn4d33DcUascNQdDHY529r9rndHzV9vSzOgJPmxm8gAfLWOGvDU8u5pO6holb1s0OtSrCTTjClY4f4v8crap1TQHSngefMV0kJ5cNAQIDhJzeeuQEdGdHmxkjwvO6hIbm0l/PA88j+/AyT7nETDNn8owX+0Ui9R7fcBfxS94nNjStT+5sSjgSFV37rUp5E4WDVChm4ZJk3qC8Z0fIvm2UnzY38LNH8eDnAZK6/I4MSTk0bQeqliSm19rb/XSqdTx/JWfQoYj/XYwOB80k4DmYlAvsJSZu990q+n8uxMfEOZKc4D/Jdliajp8OINhXvo+E+cNNoEkxCuSES7Z4aQE/nk5c999IAepJjpQH0JBOoT8+rDDhpIZStiecig+xdZRwKNjcXM8nEiDGG0rFLFIiNdu9M6K1Ha5MT/p8Lt5dhLEoWRrzGou+8a8jcrwyphRL4Af1MVaofb0QDCcpej2DPM89aoP+F55FxkjjYww0Vstq7dzGlVcvErxNvn/XxOgNOzYDT5kbNRE/9k/Lam5QJMBTjrv+V8ZuGxtLCSVMNmTffknOhxEgdlNanK378ySUfggFNwIkeGlf2AeDiEGCJms4vvOQWshAvwgZ0JOuwsrkx0jD0MLBzL7kYsigOY55XNL6RJqMetLkDNH5PgCybjvgywDUQtZJr7+uCXEVmqylibXkRNL1XgmG+FKD5aoDnfH8IB+UUKUfS1KGGmU6bG8N5+xMbcGTtcyOC71CRwSpN6qXb5RkU6zgne5ySaIH8U6QJKK9JHfPGBM3BtOd3ud3B94sZM13qvYbBe3Q52/5qmW3bXPLedJdwfcPYf7+QdMN7SGWsdw2g2z3auXE9DaAnOY4aQE8ygfr0vMqAUxdC2TQIr+Olm6WELZuTRZT8yw9LNikFQuOUrp1MObp18tcM5/PTzw15f7qhdAKvg/RHKhZe2TR2ZduqmOgjAEpjDGI1VqVB3Jtgh/IF+GpI77DsMZGIF0DnPRZg0TkBWoEsBR90bWKGoouXGGCfo+oBeok3DUoNaJRIPvQ5OgOZzIAT58bF3+CzCuYqQzPR7X06CGZOnWYB29cMCKZU0mTiFEPmwyCNYE6fFIPoa9cBpFtGUAmMwr8shqYCzy+3h61o5yjQNG4ScsO8XHN1cCebsqK5MRI8T6cBu5195lpjGqrJuI5gpFs2wc6+ZOJaZOVOmlKatU0QjuBzs6bOYh4T6F+2nMaY0PSG9EZkUK6iKVjmZBJXZKabifyWd08nzo1l28n3pxVg8zPX9CaK3KBwSh7jaQc3BeuhEoHPR6MGYINjkyUdwY1NvmOE2f0NG5gKSLfDZHQ+3l/efd/SbWf/TmsXQuVX9HcQDaCnY+Sz7x4aQE9yzDSAnmQC9el5lYFsWAg5eUB2Gofihe9GgI/VqtqzqKHh1NhXDOWUPoCgLKRhko2Nm1wybLglA3DZJaZiMOjYPQO/g8UyYpQhZLM0gpbgxRdUnCfqpz//omW8c965QTmyaeLjlAiAzta/+LJbNqCks83JQTn9tPjvz+qJ3353afa5/jDoDERkwKlzY3jDi03VILo9jyxBgqEvWHPj+fgeb5bE93isLSJYT9CegH3vS4NKisuuoBwE9XGXAzQnkBQOgrPNmoq0agGzzhr23c+udvM6BGu4MUw5PILojPLmRgLPw0dCAxlzF2XXevfK7vUMx4xAMCsaGWGTUZ/PzuzmzrUoczLlHUMob1VZkHlMlm5jaELHY+puV6YoS0TQfBk2sWj8HhlsW2O0jXrVmWhbon106txYUX+URA7eqTZvduZ3XrRxOPAA6zuOJJ1MxRdgok+HsbYyGUU7TocBcrw+T+G20ySUG6Vr1u4yCe3SKaRIPLGEBtBjyVL+HaMB9CTHXAPoSSZQn55XGci2hZCTBqeUceiZMA491t4XODLFyRhnifU10FVPpvSQ5XhDXzCUyenJJ5pyxun2ttVJ42JHW/gSOxxMdL78kHFBTfOyxmZcSL7wkiV9cgyqBLqgWiCZSBRAD4M/BghNZKHHYwa3cLFLxk90y55YuN5w3e7mbcn0R5+rM5DNGXDy3EjQiJ4F1EFNxLA4m8fF7rbze3zoMGwi4qW+VYuQdO8anQFnVxvCTHSC6JdfBjPvBM3aStAHArDUx6WGMuf7cNShPi4AOuoA7xUjQGFX/xK5DiVcKOXCea11K5Tzdw7uBqBHguckAlx6kSk0WMz2INA388NdJqMVmehlez+TbT/Z2+MmWMbvDDJX255ifW65obIMrHTKeGwoA1jz+efnoCk+D8lKLVbWB8ptLKUu9zKRzZBNigy79aqTzWUi5zt5bkykP/qc2DLA790p08jq3yW30qO7KQeg4iOW4Hf77E/dMhtGofTiYCUUTZSbxik/qQH0WLKdf8doAD3JMdcAepIJ1KfnVQb0Qijx4f7kU+zIz7SYUgP6BYUApp3BBcYwvEhSt7RF85Cc0y3xF/tXITOzYoe265XQaS8LBtvZ7ly5VjQQPSzdQ+YQte+TfYFPFEBnvieDifX1PEPiKWMn+Eb2OWVrunYKQiootkVwroyv7ofOQGUZcPrcyJfY198yNIie5GNM+S3KZ9ixUZ1IUxIF0cksJ1hOpjklCghOMLgOoRY4QXPqKGejTBs3+mkqyo2BcwHQHNNKpEY1r/z6Z7GSVwszzymz1gsVYsnOvYmMWyrP0Saj5WeXz8M0+M3MX2gttuk5c0HPkNJNLi/4rKzYoYn9PTSxSXoJBzcnyEwnmH7ooaYyLkwmNqIKcAk+i1xnk2EbDj6bh+/4PDbi57Fa9q+znD43JjOO+tzoGSCbf8IkbGDh88XPzfEgj7WHf0Nl3k9rICU2cTIqhnaYhJJ01OH0xIxJNYAefYzy8QgNoCc56hpATzKB+vS8yoBeCCU23Fw4PDXYLX4whmg2lihzLNrdKScyFHroJWCUJSoR8vU8FwBWt/h8Ibl+oHNLt6PlIhP/r0B0lInzhT6SiR7OKXUIqSVPDfJkIxkAneXM/x1sKC1BSs5QeiZaLFhEbUPNPo+WJ/3/+ZmBbJgbNYie3LMZ/g4kyHUNpNL2tUEqLZEWRYLolcm5cN2h5CBokrcWgOCOr3mW1B8BMLlpmvVxE+lrrOes/NaQV16HjJ2qrApJvUM9smZjSSnw/OILd68Mi/X6Tj+OG9xfYVN8Ooz8imE4yQrE9u0s2YRkwV6n97289q1bT68ZQ+n4s/8nHm9J1lHmMJagaeeq1aLY6fzNNXU4qsITqBHA9GbQTT/ssNieKX721q4D032HVFJYeofX5Fq7QT1sYIFZWx+/CwuSXx/G0sd0HZMNc2O6cpGv92Hl1swPDfniS2se4jtQ18547uuXfvfgRu97H7iULBfDDo8HDaDn61NXeb81gJ7kc6EB9CQTqE/PqwzohVBiw/3G29AYXWYkzQyP5e4ESV4Dg5wvyQOhCVoLki6xBktJnwOLney0iyBD0rhRdGA11mvny3EsE37x5V0gertTQ6rEnEHdVWoT2hHJAOi8P1+2qQlKbdsbKcdSyYulZp/bMWL6GrmcgWyZGzWInthTSJboc9A95yZ4p7MgwXaMPd/jibVG5B0wa+dCZ5Y6t31679JE3ylJAaCOXhfh4OYtzeRoKsc5KBmJt0TbnOrz3p/ugoydW615Bg3wyNPPBZRERyOsYy7GeiYfYus2lzK3pREsgxWPNKvPJs3sZMaJsjaUM+Rng1GzZgg+BaYcfFDs6+Cy9+d6mP5FrN5YCYY6c1ze54pgYOQ6iuex2oOgOQ0qI/0FyCxvgM8jGe2sBIwV2E8mN5k6N1vmxkzlJ5/uW7Zahu+YNBll5dOixZZJ6DaA6Hx/bdc2pDa+kq3W1gB6Pj1hsfdVA+ix56rcIzWAnmQCc/h0LsSWLDVQ2opSI23Mo0ZaL4Tif+B/QCnoyDEw/wLL5IbrzLSUZE6eComO+YYqVR0IPfRYgsDACzC4tEujO5Z75uoxBNFfHuWSLTANDcep0NxsD+1NuyJZAJ3tILC/abNLTsNCtW2bip+ThWCDjIdZG017boL5rQ6dAZ2B0hnIprlxKViVb0DOhXHmGSZeUvMDXEzmmaVECF/+CZJdAg1tJ8S09yxGH6NVS1PWAzDnJng4CNJRBoLyLHZt3Dqh35W1YfhIt5B9HA7q5V5wnjPGK525I3A7eapL+bIwyESn/0EuBzeM3gLrnDJzjNatTDkbfkME4+wMykvQcJc/rDYMB+9DmaA6dUTWr7eMKMm8DQfBfD6PjbCJdWgd+9aCdvYtFdfKprkxFf3X19w9A9zgmjHLqpahlAuruShrxOBc1ZUmofi82BEaQLcji7l3DQ2gJzmmGkBPMoE5fDr1t8LaeUc2DUnzo3YvN8rh7pfbNb0Qin/EBw815FcAqZ1SYBxaUWsiwXAy5ciYixbhsnAuZAZAZiaTDu7R2poN/08QfeRoABowDY1HazzWvtkBoG/c5JJhw92K/URD0Ypc7Z96BtrneBHv3sVUQI0OnQGdgdIZyLa5cTlM6157wwJfCbQeeSTWOPjJF6ZqPM/vVLC9v8QLP8vOr4UEVyHY3E6JMOs63B5+hzeF8WFjgOaHwBA034KyNUOHGbIV8y/X7ZSyy9cgCejDj13yCYz4wsGc0COHQG8uxQczXDLnM6uf1arCA6gH+mhTtV9leaJ5Lat6yDKn/1DZoGFiE0i9UCaP1QD5GNk2N+bjGGWiz/yunva+oTxFGJRG6tzJhDSSvZ8TDaBnYnSdf08NoCc5RhpATzKBOXp6mHFZtntcmDXbAaYnUxKYrWnTC6H4Ro4Lei7s42GCx3eHio8mW44MY75ERZNjWYJy3zffdiumzoD+puwDsyUdyWeApb7jxruk57khtTi0M+wA0Nme8EZhRWXu1CJf3sT2AAAgAElEQVScoNnndg6dvlYOZiAb58ZVqw2Z9ZFLuJEWjn33MUEWgBH1Udr/gjkJS6Lxz30vDzqSOfrhbDcMDy0j0AP2t3eeycaP6oYNbuEavnOnCPpvNnbEpjazKuHD2QCqdsi68LJq06xZSI7CplmqPHlsan6ll9kM0Prt8aicxHqXcVQzEkZCUsXm9VYsfSEbnaz0H9ZBG/0QapqDRYsNrXyPbJwb833M0tl/Vmp8+63I6ajQpdSY3aEBdLszmhvX0wB6kuOoAfQkE5iDp5NRMBRalwyWfjZEye4yLIoWwkTvu+8NoR4wg47uLXa8aO6ZJ4skvRCK/YFPl3FoZS0qZV55dfmASKTxaPcuQTCM7V/AxJ41fWSsGbALQKfe4JPPWMazfS41FVs+HDT7eRLmtzTi6topKEe31s9GrOOjj8uvDGTz3Lhli0vmLRRZDA3SSHO7unWxxmku0ixPZez4vTf4Oeu78bRTTWmLHx3Oz4Bdc6PzexpfC4vxHH+zBAZ9C12yfkPpTTP1LtPclD1sMDiPr1WJHY39IsWs/wgMe65TCJj36ErJIv0ZTSyjqTsrm+fG1GVFXzldGdAAeroynV330QB6kuOlAfQkE5hjp5eUiFDrkrqBxx0LJgNkNyKDYNOixdYPtYPDcQj07FgWeSRKj1Kxg+qUNOuFUOwj8cY4tyxFaRqlf87tnrky4ldhKLoCJaZkGV15Bcwid8lyC19ChoGlztJTVlacn8flzrGPrDOOtBMkoI4u9XSpOXjDNTDt2VHxPX+BSyZOcQs3CG+g0WjEs+OMLOhW6Aw4IwO5MDeSHEDPjgWLBJIEu/R7KedFA0qucahPmqyplzNGrPJWcG4cPsKt2Plc3/XtExTX7goN2dCVvGujnXNjriaPOuHzF4gs+sYyPA9H3UOtz3kzyAA51fuJkohvvOUSkp0Y3PQ/D1V+rBDW4bwM5MLc6Lys6hbFmgENoMeaqfw6TgPoSY63BtCTTGCOnf7Ka4asXGUoHdB+ABvDQFJ53SRraz5eNBdjAfrnDqMeAkwNGpCZjt9lHNlzIVV6IRTbKNLEimZWNA69aZBpu3xHbK2wjiLraOgL1kvSySeacsbpuzaF3p1uyOefW8DpdQOC4oWZi47syICdIAEZXEOet7T6Tz/NlDYnm4rVpdnn2fEs6FZmPgO5NjfSR2MpTNQXLrZA9XDlHaWoKP1AkC2bpR+iPTEfzDCgqWwoQsSggTD/rq7BuWg5c8r/2zk3OqVPqWoHP9drYYpJVvrS5S5VbcGgL0pjGNA237Fp5oTNc7b187mGTJ9pSBCcFK6vzz4zJK1aaNZ5qp4PO66ba3OjHTnR10hfBjSAnr5cZ9OdNICe5GhpAD3JBObQ6Z/iZel9vDTxhek6GEXRMCqWUAvQtXjRBJjOBSgBSwaNpmiGwRdNMphyIfRCKPoomljYDwFgTTDyrI6mnHBc5hf31Ikk05ygaFimg/q3Y1811CZRf5iG0uxIR/ZkwG6QILzpQx38G68PykpoEk6aqtnn2fNE6JZmMgO5PDdSjmzBQoLpIWWKHA56ZTRvDumHI01VpZIr8f0Phowaa/Xz0ovNnDNczJVxqqgfds+NuZ6vcP/ol8PKE1ag8DPgpE0zSg2++bYhGzda71c0yD3v3Nz63snV5yyX58ZcHbNc6pcG0HNpNO3riwbQk8ylBtCTTGCOnL4ejOGXR1sAY+9epipTTiTIili+0tJLXw1jDF6PsRdeLo+CKVdL6InuvXf2vmjqhVD0p4KsNbLXCC5ci40Yp5S7f/aFIe99YCg2/GW9TRkx2pDiIhcYPKYcD7kiHdmVgVSABG9PMFRFTZNGpmyAdAE1gLt3MaGLr5+P7Ho6dGvTnYF8mRspacL1DbWUKWkXjkMPCWF9E5KmkAIrADM0W4ObBazGYd84L3J+1JFdGUjF3JhdGUi+tfwcLIQnwoKFpTfNlPdTmjfNvppnrV398IQlM55VlPxsakml5Mc5HVfIl7kxHbnU94g/AxpAjz9n+XCGBtCTHGUNoCeZwBw4fes264Vp61aXnHpKUNrDCdqO2I4XsG+ggc3SSL50hoNlzy1boAwaL5qZcIpPpm9V3AUSNPxS4tcvleXlMdI49MrLg46rPBjziqE2dsJBmaFLLtJjmcxnIlPnpgIk4PP79LNufL6tXpFV+q9BmdPvz1Ru9X11BuLNQD6CBCtWEmAjaaC0OUJjmI62aiHKgD3bYsQYQ9ZAsmZ/VGQN7K+/+7Jt/NjeVMyN2ZgHu9rM95d58ENZgveZIpAuwnEoKmtbtsKmWePUbJr9/bdLxk10KTY8g9KaPc8JKXKKjuzJQD7OjdkzOrnfUg2g5/4YJ9JDDaAnkrWIczSAnmQCc+B0lupygUYjGspbpCKol86ySLI7aVAaDoL1BO2zIeajhPsDaGbXrwdTzB7Z0eZ05/UtGIdy06Q5qg3O7Z6aZymZPpFVx80iAqWUKLrmajPrNnGS6X8unZsqkCBcqcBcafZ5Lj0xui+pzEA+gwSsZFpMsgCY6Rs2ZL/TJmWsBl4dlFpZXC2Yymfd6ddO1dzo9H6no31q0wy+CMsh9ZLOyKZ3pXTmJRvulc9zYzaMT663UQPouT7CifVPA+iJ5W3nWRpATzKBWX76zFku+XiOW/YAmDiwv5kWF3ca9nw93yWLUB7J2HcfU3p0DcnB0PRzYhD8nzjFUEZD4Wjc0JSLLnAeQJzJ/K1Z64IsCoxDYcR5AzSkq1dz5niG29kXDHkyiHRkZwZSCRJ89AnqpPFotG2jN8qy8+nQrU53BjRIYGX8t99grg5W+qIIc/V0j0Wy9zunG2Uq9Pom2Txm6vxUzo2Z6pPT7ltRha3d7eT7EVnnB+yv16p25zZd19NzY7oyre9TXgY0gK6fi/IyoAH0JJ8LDaAnmcAsPj1sosguXAUwsU6awUQC0hMmGfLb7xYwfUxrU87okJpSyESHiUDarA+t9u29V0iOb23IrE9CUlQsUreuJf9BwFiHyOChlnFoNmiK/whTUZao68jeDGiQIHvHTrc89zKgQYLcG1Pdo+zMgJ4bs3PcdKtzMwN6bszNcc2WXmkAPVtGKr3t1AB6kvnWAHqSCczS0/+Ctt6zkLJg6XFHGNKcdGJm2EY0Hf3oY5d88pllYFq9egiyCSGhNnUmg2XYb08EuA82GU0wTzkxKG1PDcl+exXI6nUBeXGEKBkQahL2gSFlYUF+g7Gffm7I+5C3cZpxaCafIX3v1GZAgwSpza++us5APBnQIEE82dLH6gykLgN6bkxdbvWVdQbizYCeG+PNmD7ezgxoAN3ObObOtTSAnuRYagA9yQRm4ekmQOsXR7hl02aXMrnq5QATxV9+NcBGh37oRovtTROuLmeH0i4Dwg2Fd6ejBHuBJS9z8EEh6dEtpGRmGOGF0C+/huTl0dBz/8Ml+9UOyeUA0atWzU8Q3enGoVn4EdVNjiEDGiSIIUn6EJ2BNGVAgwRpSrS+jc5AlAzouVE/IjoDzsmAnhudMxb52BINoOfjqEfvswbQo+eo0iM0gJ5kArPw9CnvGPLVPENq7hmSawcExedzRidCwJ/ZrukzXFJc4pICsLo7nh6S1q1McaXBl4vml+++B4PJrbi3LyQdcG/KykTeO3IhxONGjHIp2ZK9YbbV9zJTacnnW7w53i1LlrjkqGYhaDVqzeh8G/9M9VeDBJnKvL6vzsDuGdAggX4qdAackQE9NzpjHHQrdAaYAT036ucgkxnQAHoms+/ce2sAPcmx0QB6kgnMstOXwjn+jbcMcYNgfXW/oGJPOy3IaJ48zSUrVuxigZ/bw5RaAKlTEWSRT5rqku++t+7XqJEpXcl+h5xM2Si7ECoCY30kmOiboam9JzYkrgCIvlfN1LQzFX1P9prr1rtk+EgYh2LD4aZBYOFXyZ++J5s7fX5yGdAgQXL502frDNiZAQ0S2JlNfS2dgcQzoOfGxHOnz9QZsDsDem60O6P6evFkQAPo8WQrf47VAHqSY/3axBJp0niXREWSl9OnOzgDlEl54UVD/AGRbl1Mad0yszrj0VK1YqWhgO2tYHoz2rUNSbs29jKc50B7feb/t3cncFdN+x/Hf2c/TSqlURrI1KSkRMakQUWaZCrRIEQT4W+8medojpuI1C2XJkNKZIh0uYWSJkOUUEkhGs45/7X2uc8jlZ5znn3OPmvv9dmv1/917589rPX+7ee/zv971llLbRKq12IvUSIu7dvGpVbNv3fZ1wehHTtFJv7Lka++dqSEWsZFL+dS0cAvJvLzLsi/HzHaEf1etWkVk5Mbm/0+FaR/XGOuACGBubWhZfYJEBLYV3N6bKYAY6OZdaFVdgowNtpZd1N6TYBuSiXMagcBusd6XD5ApX/qOOO0qDRvxuxRj5zGXr5TlXm0Cs83qeVG6tWNyfmdghF26qVc5qg1yfXSLvooWyYunTrE5NBq3t7Vdd9FZOpMFf7+mAjn9VItZ7WMu0u37O/Y3wehf01x5HMV+uulZ7p3i0mVyt7aaOzL9L+GzX8vR+a8EZEKFeLSTy0FxIGAnwKEBH5q8ywE9i9ASMAbgoAZAoyNZtSBViCgBRgbeQ+yKUCAnk19c59NgO6xNk9O3C4f/CcRTuqlJ9qfG5cjDg9GuOqx61Zd/vyLap1qtcZ3BTUz+qrLo1K4ULC6v3atCrzVJqN6vXF9NFLrorc6K//Ae89e6tnic99w8t758uVi0kltElq1anJhd34fhKbOcOTjTxwpUljkki5RqX5YcvcNVjVEft4SkeGjcmSX+jVD7x5RqebxC42g9Z/2Zl+AkCD7NaAFCOQK5Dc2IoUAAv4IMDb648xTEEhGgLExGSXOyZQAAXqmZIN9X6sD9G+/+1Fuf+gpWbH6G6lcqbzcOqCbNKx39F4VvajPXbJ81RrJ3Q2xVMni8s604e55eg10vX7ztOmOfK/+Ux/H1ovJ2a3jrGcc7L+NvNYv/NCRV2YlQt2rr4y6G14G8YipSc7vLsiRt95OLLmi1yg/t01catdO7guflascmamWhNn6S0RyckSanhGX00+OiqP+e7JHfh+E9EaoM152ZNHixDrzXS6KydFHJde+ZNtgwnnPTnRk9ReONDguLh3bMfvchJrY1gZCAtsqTn9NFshvbDS57bQNgTAJMDaGqZr0JegCjI1Br2Cw20+AHuz6Zar1Vgfolw24X5qd1lAu6dRS3v/oMxWmj5PXpwxRs4v/mgie0+0mGXZXPznq8Cp71SF3E1Ed/H2w0JG589Qa2WqW7gFqM8BWakmLhseFL/zL1Mto4n2/Wx+Rf47LkZgqY1cV5tasEfx6blabfk6d5sgatYGlPnRA3bHdvjf91P9eb0r60qyIfK42UNXHYWq2dCe1KWlBNvtM9oPQvHdyZN5bEXHUI8/vFJVj6gTzS4t9vdN6mRq9XE3RYnEZ1D8mxdR/ciDgtwAhgd/iPA+BvxdIdmzEEAEEMivA2JhZX+6OQCoCjI2paHFuugUI0NMtGo77WRugb9q8VVp3uVEWvDxKCunptOro3Huw3Hj1xXJig1p/qe4ZnQbIlCcGS6UKZfeqem6Anvsvtm6NuBs3rlr9Z9jYoX1MygV01nI4XvOC9eKPPyIyYowjv6gZ13qDR73RY5iORWqplNlzIvL77xF37fKWzeNyQqNY7g8tRH8ppNdOf12t0719eyTxpZBa9qVh/YI7pPJB6L+L9Caoib+jDu1iofgySm9AO2xkjuj/O9Hh3Kg0bEB4Hqa/qSD1hZAgSNWirWEXSGVsDLsF/UMgmwKMjdnU59kI/FWAsZE3IpsCBOjZ1Df32dYG6IuWrJK7Hn1Gpj99T151Bt05Who3rCMXnNv0LxVrcFZvadL4WFm8dJWUL1taBvbuLE1Oqu+es2eAnnvhMjVb92U1a1fP3tVLXDQ5JSpnNIm7S19wmC+gw2O9zMYXXzpS+ZC49O4VdZcUCduxTYXnr7zqyBK1vrs+qlZRS4qoNc31MU2tmb52XeKf1zsmLuecHfO8LFGqH4R0u16YmuOG+foLDP1FRpCP2a878t4Cx10z/oqeLN0S5FoGve2EBEGvIO0Pk0CqY2OY+k5fEDBJgLHRpGrQFtsFGBttfwOy238C9Oz6m/p0awP09z9aKsOffFEmPz44rza3PThOahxRVS49v1XeP4vF4u7SLq3PbCwnN6ojb7//idx03xPy0rP3uzPSN/2y429ru139q1mviSz4TyKE1LPQO3eMqE1GwznrdNYckZUrI9Kxg8ihSW4qaeofxptq+ZDZc0WKF4/LwL4RKV0qnDXL9V+5KiIvTo+7m1vufhxUOi7ndYhIjaPT0//SxQvLtu27ZGc0+ft9tiwiz05KtKp507ic1cLUt2b/7fpxQ0SGDEucc13/uBxcMZj9cFutv9GI/PVdCXBvrGx6YScixYsVki3b1JpjHAggkFWBgoyNWW0wD0cgpAKMjfsoLJ/5Qvq2m98txkbzaxTmFpY7sEiYu0ffCihgbYCuZ5MPfmS8zBx/bx7ddXeMklMa1ZXObc/YL2ePax+Q8845Q9q2OFm278h/Fumab0Qm/Dsm679P3Lbx8RE5r11EShQvYNUMu2zlFyITn4/Kxp/+DNTOOFWkXRtHihU1rLFJNGfFapHhTyRmOve7wpFae+8rm8RdgnfKDpWjvTw7Jm+8nWh7c/Vn0LZVYvPUdB2FCzmyS30pFVf/k8qh37Ex42Ki23jmaWq5pfbB+znAIyNj8pXai7hFU5GO5wSv/bvXS3//kUN+nsorbNy5ERWgF1JF3Lkz2L/qMA6WBiFQAIGCjo0FeBSXIIDAfgQYG/fG4TMffzLZEmBszJY8z9UCRYuwdARvwt4C1gbom7f8Ii0uGCTzZ4yUA4olvl1q0/VGue/m3tKg7p+J6bbft8uqr9ZK/TpH5uld0vde6db5LGnV9IS/XcJlXy/b/Pdz5I15EYmqzD0d60ln+4XWa2fPUmtof6zW0tZH5cpxqaaWAFn4YeJ/P/DAuLQ7Jx6ojTe3qvXORz/uiF7apGmTqDRTM55tO9Z/n0hGD6mU/r57+Sneuu8iMn6C467Hrtdh13sLBOVYrP5Gps1wpJT6JcOAvlG1UXFQWk47wyrAz9TDWln6FUQBL2NjEPtLmxEwVYCx0dTK0C4bBRgbbay6OX1mCRdzamFSS6wN0HURel33kJxwXC3p3bWtzJq30F3SZdbEh9Q65Wr98rkL5CS1HrreYLTlRdfLsLv7urPT3134qdxw9+PyyoQHpFyZUikF6PqZP/+c2GRUr62tj8Oqxd0gMGibjH66xJFXX4u4QXMRtQFli2ZxaXxCYgPKzaqP02ZG5OuvE32sXTsm57aJS8mS6Q9k0/nHFFV57NhxOfLd+ohUPywuPS6NskpFOoHVvbx+ENqw0ZGnxkfkt20ROUa9V53Pixm/Nr3ejHbIcBX8q//s1jUmRx8ZnOA/zeXndgYJEBIYVAyaYr2A17HRekAAEEiTAGNjmiC5DQJpEGBsTAMityiwAAF6gelCfaHVAfq67zfKLfePlRVffCvVKleUOwZ1l2NqVncL3qRjfxl6V19pWK+GvL3gE3lkzGT5cdPPUqVSebnxmovdcF0ff7eJaH5vzVK1OeIrrzny228RNwDUG4yefqraqNLwX4roLwCm7haO16oVk3Zn7zsc1zPTX1Mz1HXIXrSoXrs6Lo0aJkJ2E49ZcxxZ8IHjzpy/5kq1YaZa/5wjvQLp+CC0Wa3TPu5pR7ZujchRKozuepEK0Q3+u5k+M0cWfRyRWjVj0uVCwvP0vlHcraAChAQFleM6BNIvkI6xMf2t4o4I2CfA2GhfzemxuQKMjebWxoaWEaDbUOXU+2h1gJ46195XFDRA13fSM1Jnz43IR4sSM7XLqk1GO7SLSfVDzQtuYyr3W7AwR95US9Ds3CVSSi/P0jauNpfcfyC45zIvVdUSLx3bx6VCebOCxBUrHZk42RFHleKKXlGpfIh5NUjH+5rte6Trg9Cvv0bkqWcisnGT4/6Ko9sl0bSu1Z4up2/WRuTJp3KkkFqyZaBaukUv4cKBgAkChAQmVIE2IJAQSNfYiCcCCHgTYGz05sfVCKRTgLExnZrcK1UBAvRUxew4nwDdY529BOi5j16rQja95IlenkIfen3nVmfF3XXSTTjW/xCRF6c68uOGiDt7XC/V0qJ5LKXA8utvIjJ9piM/qY1GHTVbuMkpUXfWvQkzh3WbRj+R425Q2bplTE452axw34R3IF1tSOcHIb00ytPPOKLfT/2FR/duMSlWzIy/Ge2lv3QaMcaRTSrkb6Xeq1N5r9L1GnGfNAgQEqQBkVsgkCaBdI6NaWoSt0HASgHGRivLTqcNFWBsNLQwljSLAN2SQqfYTQL0FMH2PD0dAXruPd+ZnyNz30ysb6LD8w7nxuWI6nEpmqVQUAfK896KyHsLEutjVDo4MUPey+xsvYnq2+8m7le2TGL992zOuNfrnb+oNnfc8KNaYkMtR9PlAsJzj38S+7083R+E9Dv63KQc+XpNRCpUVOvWqxC9ZAkzQnS9afAc9QuTihXi0reP2jmYAwGDBAgJDCoGTbFeIN1jo/WgACBQQAHGxgLCcRkCGRBgbMwAKrdMWoAAPWkqq04kQPdY7nQG6LopegPOGS9F5MuvErPR9VG9ekxq1xCpUzsupUv7Ew6u/sJxNzvdotabLqyWn2jRLCYnn5SecHnTJjUb/SVH1qhZ6fpo2CAmbVr690XBqtWOrFglsuzziOilQPRR5qC4XH1VTIqqDVE5MieQqQ9Ck6Y4snyFI+XKxaRXd7Umf5ZDdP13M2xUjuxSyx317hGVamqZGQ4ETBIgJDCpGrTFdoFMjY22u9J/BFIVYGxMVYzzEcicAGNj5my5c/4CBOj5G9l4BgG6x6qnO0DPbY4OsD/7XOSzZRHRS1XkHgerWba1asTUhoQiVdR64uk+flWbmr6qNjfVm5zq4+ijYtJerXWeibWbF6lNRmfNjrhrwevA8+w2MalbJ/190vf/bEVEVqwQWa3Cc72Ge+6hZy3XVp6NGoocpEJ0jswKZPKD0KTnVYi+3HH3Euh5Wcxdpz9bx79UoP+5CvSPV5vmtm+bni+estUXnhtOAUKCcNaVXgVTIJNjYzBFaDUC2RFgbMyOO09FYF8CjI28F9kUIEDPpr65zyZA91ibTAXouc2KqwxQb0T4uZotvVyFwD9t/jNML1lShek141KnlqilXmLu2uJejv8udmT264nAXt/73DYqXK6d2fBv2+86sI/Ip0sSM+6PPCImHdt5D+z1DOBly5Wb+p9vvo2461HrQ28SeqiaDVy7lnbzb0a/l7qE6dpMfhCKqhpPUhvB6l8YHKR+qXF5z+yE6Pr5EyY57jJM1/Yza132ML1L9MWbACGBNz+uRiCdApkcG9PZTu6FQNgFGBvDXmH6FyQBxsYgVSt8bSVAD19N09EjAnSPipkO0Pdsnl6re9lKNbt1uch33/0ZphdRS48cdYQOhkXNUE9tOZRNahPN6Wod8DUqaNabhJ7YSG8Squ7h43ImX33tuBup/qyWsClSWC0ZozYp1ZuV6vYke6xTHsuVi575qzc8zT30/Y5UM+nrqFn7qdok+2zOS04g0x+Esh2i6183DBuZI1u3RqRT+6gcVz97s+CTqwhn2SpASGBr5em3iQKZHhtN7DNtQsBEAcZGE6tCm2wVYGy0tfJm9JsA3Yw6mNYKAnSPFfE7QN+9uXq5Fb2Otw7TdQCd6izrmNrX8F210eFbb0dEB496s8OOalPPKpWzE/pFVXvmqbbozRd1X/Smped1jIletmZfh27/l6rfy1T/9ez83PXM9bl6Bn1N9UWCOzv/8JjkeJyd7/E14fL/CfjxQUi/y1NeSCznomei6+Vc/FqeZ85cR72/jlStGpcrerJxKC++uQKEBObWhpbZJ+DH2GifKj1GIHUBxsbUzbgCgUwJMDZmSpb7JiNAgJ6Mkn3nEKB7rHk2A/Tdm75jh6iNMfVSL3oJC5HtO/6cga2D8do11brpKkzODcfXqmVh9IzvDRsdd5PQZmfG5eTGUXeJk2wfuk3TZkRk7bqI255TTorKmU3jbjv1eubLVya+NFj9ZUR27NbPCuV1H3VfE/1MZfZ6tvtsy/P9+iCkv4CZ/L8Q/UC1FnrvHpkP0TducmTkmMQfUN8+MSmvNjTlQMBUAUICUytDu2wU8GtstNGWPiOQigBjYypanItAZgUYGzPry933L0CAzhuyLwECdI/vhSkB+p7d0JuQLl8p7gz13Wdmlygel8oqXNbrNOvjqCNj0u6cuG8zdFPh/u8iR16bm9hkVG8IWaaMyJpv/rqmy2FqPXP9xcAxaq12v2YZp9IHzv2rgN8fhKb8W2/G67i/SOit1kQvk8GNYp98Osddb7/JaVFp0Sw7v+LgfUMgWQFCgmSlOA+BzAv4PTZmvkc8AYFgCjA2BrNutDqcAoyN4axrUHpFgB6USvnbTgJ0j96mBui7d0vP5F6+Qq0NrjbU1LO79VFcbXDY9pyY1K1jdtCnl6mZNTsiS5Ym2l1IzUI/WoX+NY4WqVM77m7UyBEcgWx8EJryQo58tiyxMe7l3WNStmz635nFn+hfTSSWjLluAEu3BOeNtLelhAT21p6emyeQjbHRPAVahED2BRgbs18DWoBArgBjI+9CNgUI0LOpb+6zCdA91iYIAfruXdSbdK5cHZH6dVPbaNQjk+fL9Yx6vUZ6zRosi+EZM4s3yNYHoedViL5Uhej6FxiXq5no5dIYov+hfiExdIQj236PSLcuMTlabVjLgYDpAoQEpleI9tkkkK2x0SZj+opAMgKMjckocQ4C/ggwNvrjzFP2LUCAzpuxLwECdI/vRdACdI/d5XIEPAlk64OQXhP9hWkqRP9MzUQvoZZz6ZW+5VxmvJwj/10UUb+IiJe7dNsAABq6SURBVMlF5xOee3pBuNg3AUIC36h5EAL5CmRrbMy3YZyAgGUCjI2WFZzuGi3A2Gh0eULfOAL00Je4QB0kQC8Q258XEaB7BORyqwSy+UEorlZu+ffUP0P0nt3jnjf6XPddRJ54MsddWui6/lF3mRgOBIIgQEgQhCrRRlsEsjk22mJMPxFIRoCxMRklzkHAHwHGRn+cecq+BQjQeTP2JUCA7vG9IED3CMjlVglk+4PQ7iG63gfg8p4FD9H1rPbRT+TIjxsi0rplTE45mdnnVr3MAe8sIUHAC0jzQyWQ7bExVJh0BgEPAoyNHvC4FIE0CzA2phmU26UkQICeEpc1JxOgeyw1AbpHQC63SsCED0LpCtHfe9+R2XMdqVAxLtdcERUnsc8tBwKBECAkCESZaKQlAiaMjZZQ000E9ivA2MgLgoA5AoyN5tTCxpYQoNtY9fz7TICev9F+zyBA9wjI5VYJmPJBaM8QvddlMTcIT/bYsiUiw0blyK5dIlf1jkrlQ5K/NtlncB4CmRQgJMikLvdGIDUBU8bG1FrN2QiET4CxMXw1pUfBFWBsDG7twtByAvQwVDH9fSBA92hKgO4RkMutEjDpg5AO0afOcOSTTx0pVkxtLNo9+RD9X8878vlyRxodH5N257B0i1UvcUg6S0gQkkLSjVAImDQ2hgKUTiBQQAHGxgLCcRkCGRBgbMwAKrdMWoAAPWkqq04kQPdYbgJ0j4BcbpWAaR+E9gzRe6iZ6IccvP/Z5KtWOzJhkiMHqDXUr+0Xc8N3DgSCJkBIELSK0d4wC5g2NobZmr4hsD8BxkbeDwTMEWBsNKcWNraEAN3GquffZwL0/I32ewYBukdALrdKwMQPQruH6EWLxKVnj78P0XeqJVuGjcyRrVsj0qlDVI47lvDcqhc4RJ0lJAhRMelK4AVMHBsDj0oHECiAAGNjAdC4BIEMCTA2ZgiW2yYlQICeFJN1JxGgeyw5AbpHQC63SsDUD0LJhuhz33TknfmOVD9MBe2XRa2qHZ0NlwAhQbjqSW+CLWDq2BhsVVqPQOoCjI2pm3EFApkSYGzMlCz3TUaAAD0ZJfvOIUD3WHMCdI+AXG6VgMkfhPYM0btfGpMqlf+cYb5xkyMjxzhuvQb0jUqZg5h9btXLG7LOEhKErKB0J9ACJo+NgYal8QikKMDYmCIYpyOQQQHGxgzicut8BQjQ8yWy8gQCdI9lJ0D3CMjlVgmY/kFIh+gzXnZk0WJHihQW6aFmmeeG6GOfypFv10bkjCYxad6UjUOtenFD2FlCghAWlS4FVsD0sTGwsDQcgRQFGBtTBON0BDIowNiYQVxuna8AAXq+RFaeQIDusewE6B4BudwqgaB8EJr+0l9D9A0bRKbOyJEypePSX80+z8mxqmx0NoQChAQhLCpdCqxAUMbGwALTcASSFGBsTBKK0xDwQYCx0QdkHvG3AgTovBz7EiBA9/heEKB7BORyqwSC9EEoN0TfvUA91bIu1asz+9yqlzaknSUkCGlh6VYgBYI0NgYSmEYjkKQAY2OSUJyGgA8CjI0+IPMIAnTegZQECNBT4tr7ZAJ0j4BcbpVA0D4IvTLLkYUfJtY9P6ZOXC7szMahVr2wIe4sIUGIi0vXAicQtLExcMA0GIEkBRgbk4TiNAR8EGBs9AGZRxCg8w6kJECAnhIXAbpHLi63XCCIH4T0TPSlSx0Z2C8qJUuycajlr3Bouk9IEJpS0pEQCARxbAwBO11AYC8BxkZeCgTMEWBsNKcWNraEJVxsrHr+fSZAz99ov2cwA90jIJdbJRDUD0Jr10WkahXCc6te1pB3lpAg5AWme4ESCOrYGChkGotAEgKMjUkgcQoCPgkwNvoEzWP2KUCAzouxLwECdI/vBQG6R0Aut0qAD0JWlZvOGixASGBwcWiadQKMjdaVnA4bKsDYaGhhaJaVAoyNVpbdmE4ToBtTCqMaQoDusRwE6B4BudwqAT4IWVVuOmuwACGBwcWhadYJMDZaV3I6bKgAY6OhhaFZVgowNlpZdmM6TYBuTCmMaggBusdyEKB7BORyqwT4IGRVuemswQKEBAYXh6ZZJ8DYaF3J6bChAoyNhhaGZlkpwNhoZdmN6TQBujGlMKohBOgey0GA7hGQy60S4IOQVeWmswYLEBIYXByaZp0AY6N1JafDhgowNhpaGJplpQBjo5VlN6bTBOjGlMKohhCgeywHAbpHQC63SoAPQlaVm84aLEBIYHBxaJp1AoyN1pWcDhsqwNhoaGFolpUCjI1Wlt2YThOgG1MKoxpCgO6xHAToHgG53CoBPghZVW46a7AAIYHBxaFp1gkwNlpXcjpsqABjo6GFoVlWCjA2Wll2YzpNgG5MKYxqCAG6x3IQoHsE5HKrBPggZFW56azBAoQEBheHplknwNhoXcnpsKECjI2GFoZmWSnA2Ghl2Y3pNAG6MaUwqiEE6EaVg8YggAACCCCAAAIIIIAAAggggAACCCCAAAIImCJAgG5KJWgHAggggAACCCCAAAIIIIAAAggggAACCCCAgFECBOhGlYPGIIAAAggggAACCCCAAAIIIIAAAggggAACCJgiQIBuSiVoBwIIIIAAAggggAACCCCAAAIIIIAAAggggIBRAgToRpWDxiAQHoFd0agMHfuCPD15lsyfMULKlD4wr3NPTnpFJk9/Q3bs3CUtmjSSW/p3lUI5OeHpPD1BwCCBX3/7Xe569BmZ/+ESKVK4sHTt1EJ6d23rtvDb736U2x96Slas/kYqVyovtw7oJg3rHW1Q62kKAuESePWNhTLiqany089bpU6Nw+TO63vIoVUOdjvJ2BiuWtOb4Ah0H/iAlCtTSoYMvpqxMThlo6UhEdixY6c0OKu3FC5cKK9HzU5tII/ecQ1jY0hqTDcQCIsAAXpYKkk/EDBMoN+tw6TWUYfK4xNmyjvThucF6P9ZvFwGP/KUTBhxqxQ/oKj0u224ND/teOnSsblhPaA5CIRD4J6hE9yw7r6be8tPm7fKxVff7YYEjerXlMsG3C/NTmsol3RqKe9/9JkK08fJ61OGSOFCfKEVjurTC5MEvv72e+lyzd0yfujNcuRhleWxsf+W5erLqycfuUEYG02qFG2xSWDarHdl9DMz5NjaR+QF6IyNNr0B9DXbAht/2iLte9wq780YuVdTGBuzXR2ejwACuwsQoPM+IIBARgR0KKAD9HrNevwlQL/7sWelUsWyeTNg572/WMZPeU2eGXZzRtrBTRGwXeDN+YukxpHVpOohFVyKvrcMk6anHCdnqtk9rbvcKAteHpX3C5DOvQfLjVdfLCc2qGU7G/1HIO0C677fKF+uWS+nN67n3vuTZV/IoDtGydznHxXGxrRzc0ME8hX4ecuv0rXvPdKt81ny4cfL3QB9k/qimbExXzpOQCBtAl99s1763PSYvDbpob3uydiYNmZuhAACaRAgQE8DIrdAAIG/F9gzQO816CG5qH0zaamWbtHHl+pDUw/109m3pw6DEQEEMiygl3M5p9tN8tSjN8qWX7a5S7tMf/qevKcOunO0NG5YRy44t2mGW8LtEbBbQP8t3j9iovtLLL10EmOj3e8Dvc+OwK0PPCnHH1tDShQ/QOa8/aEboC9asoqxMTvl4KmWCnyqvkzWv0g+4rBDZNWX66TmUdXk9oGXSvVqlRgbLX0n6DYCpgoQoJtaGdqFQEgE9gzQu15zj1x1aTs1A+9Yt4frf9gkHXreJgtfGROSHtMNBMwU+GP7Dhn4jxFyTM3DpV/PTmrJlqUy/MkXZfLjg/MafNuD46TGEVXl0vNbmdkJWoVACAQeHjPZ/eVVw3o1ZOS9A6R0qRLC2BiCwtKFQAnoGefDx02VZ4ffrMLzj/ICdMbGQJWRxoZA4Iuv18mEF16XSy9oJVXUfjyjx0+Xtxd84k7wYGwMQYHpAgIhEiBAD1Ex6QoCJgrsGaBffv3Dcn7bptKq6Qluc/WHJv3P5r0w1MTm0yYEQiGw9ddtcs3Nj8kJx9WS/r3Oc/u0eOkqtR/BeJk5/t68Pl6nlpM4pVFd6dz2jFD0m04gYKrA73/skCkz3pQZs+fL1HF3S+8bHmFsNLVYtCt0AjvVJvYXXHmHPPKPPnJk9Soy+60P8wJ0xsbQlZsOBUxg566onND6CpkzeYjc8sBYxsaA1Y/mIhBmAQL0MFeXviFggMCeAfp9w5+TUiVLSN+eHd3Wvfz6ApmuAgS9iRoHAgikX2D7jp3S67qHpPWZJ8ol57XMe8DmLb9IiwsGyXy1adMBxYq4/7xN1xvdzUYb1D06/Q3hjghYLqD3Bvl5669yklomSR+7olE5rsXl8taLQ+Wfz73E2Gj5+0H3/RNYsvwrNS4+KMWKJsa+HSpQ12Ol3kh06F19GRv9KwVPQkA2bPpZtv7ym/tllvv3qP4Wj1cB+jvThssYtcEv/38jLwkCCJgiQIBuSiVoBwIhFdgzQF+0ZKXcePfj8tyo26TEAcXcte0u7tBcOrY5PaQCdAuB7Aron8LqTdFuv/bSvRqig3U9K71317Yya95Cd0mXWRMfkpwcJ7uN5ukIhFDg3YVL5B8Pj1ObZt8ih1apKNNfmy+PPvG8uweInvXK2BjCotOlQAjsPgNdN5ixMRBlo5EhEXh34ady55Dx8szwW6RShbIyavw0tczgZzJ5zD/UngT8/40hKTPdQCAUAgTooSgjnUDALIGft/wqTTsPdBulfyZbuHAh97/PnTJEypctLU9PniUTXpwj0WhMzm5+ktzQ5yJxnIhZnaA1CIREoMUF16nZPVskstvfmN7I96a+XWTd9xvllvvHyoovvpVqlSvKHYO6qzXSq4ek53QDAfMEnpr8qkya9oboTUSrHlLB/TtsVL+m21DGRvPqRYvsENgzQGdstKPu9NIcgbETX1Zj41z3lyB11V49g9XnUb0eOmOjOTWiJQggIEKAzluAAAIIIIAAAggggAACCCCAAAIIIIAAAggggMA+BAjQeS0QQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEECBA5x1AAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCA5AWagJ+fEWQgggAACCCCAAAIIIIAAAggggAACCCCAAAKWCRCgW1ZwuosAAggggAACCCCAAAIIIIAAAggggAACCCCQnAABenJOnIUAAggggAACCCCAAAIIIIAAAggggAACCCBgmQABumUFp7sIIIAAAggggAACCCCAAAIIIIAAAggggAACyQkQoCfnxFkIIIAAAggggAACCCCAAAIIIIAAAggggAAClgkQoFtWcLqLAAIIIIAAAggggAACCCCAAAIIIIAAAgggkJwAAXpyTpyFAAIIIIAAAggggAACCCCAAAIIIIAAAgggYJkAAbplBae7CCCAAAIIIIAAAggggAACCCCAAAIIIIAAAskJEKAn58RZCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApYJEKBbVnC6iwACCCCAAAIIIIAAAggggAACCCCAAAIIIJCcAAF6ck6chQACCCCAAAIIIIAAAggggAACCCCAAAIIIGCZAAG6ZQWnuwgggAACCCCAAAIIIIAAAggggAACCCCAAALJCRCgJ+fEWQgggAACCCCAAAIIIIAAAggggAACCCCAAAKWCRCgW1ZwuosAAggggAACCCCAAAIIIIAAAggggAACCCCQnAABenJOnIUAAggggAACCCCAQNIC23fslIZn9ZYpTwyWujUPT/o6TkQAAQQQQAABBBBAAAGzBAjQzaoHrUEAAQQQQAABBBBIk8CdQ8bL8y+9tc+7NTmpvox54No0PWnv2xCgZ4yWGyOAAAIIIIAAAggg4KsAAbqv3DwMAQQQQAABBBBAwC8BHaB/vfZ7ufP6Hns98oBiRaVCuYMy1hQC9IzRcmMEEEAAAQQQQAABBHwVIED3lZuHIYAAAggggAACCPgloAP07zds3u9M83uGTpBfft0mOlD/6NMVsnnLL9LzorOl18Vnu838Y/sOGfL4FHlz/mLZ9vsfUvOoQ2XQVRdKvVqJZVnWrP1B7nrsGfl46WopW6aUuraNXNyhueQG6EMGXy3jn39Nlq9aI4dWrSSP3XG1HFm9il8EPAcBBBBAAAEEEEAAAQQ8ChCgewTkcgQQQAABBBBAAAEzBZIJ0O8fMVGmzJwnw+7qJ2ecXF++WPOdnN97sDz+4CA5sUEt0QH7x5+tlhH39JcyBx0oQ8e+IK++8YHMmfyIFClcWNp3v0VOblRX+lzWTr76Zr1ceeMQGaru1ah+TXcN9OOPrSGDB3WXcgeVkkF3jZYSxYvJ8Lv7mwlGqxBAAAEEEEAAAQQQQGAvAQJ0XgoEEEAAAQQQQACBUAroAP2FV96WokUK79W/x+7sJ6c3ric6QP9g0TKZ8fS9eedcccMjcsRhleX/rrlYjm91hTx8ex9pfnpD99//tu0PObXdNTJarZ9e/IBicmn/++T9maOkZIkD3H//7sIlUrH8QVK9WiU3QH/kH32kTbPG7r/T67E/o2ajvzLhgVB60ykEEEAAAQQQQAABBMIoQIAexqrSJwQQQAABBBBAAAHZ3xroFcqVUcu2FHED9HXrN8rI+wbkid324Di1XMt2ublfF2l63kA38NaBeO5x1kXXS+9L2kqxIkXk4TGT5Z1pw/fSzl3CZfLjg/OWe3lpzvsybNyLMnfKEKqDAAIIIIAAAggggAACAREgQA9IoWgmAggggAACCCCAQGoCyS7hskZtNKqXbMk9br5vrOzYuVNu6vv3AfplF7SW0geWkAdGTpL5M0b8bYA+5YnBUrdmYr10AvTU6sfZCCCAAAIIIIAAAgiYIECAbkIVaAMCCCCAAAIIIIBA2gWSDdDf+eBTmTXxwbzn62VZ6tU6Qq7vc6E0an2lPHDrFdKySSP33+cu4TLi3oFS6sDi0vWae9wZ6GXV+uj6mP3Wh1KqZHFpqNY+10u4EKCnvazcEAEEEEAAAQQQQAABXwUI0H3l5mEIIIAAAggggAACfgnoAH3N2h/k7v/rtdcjI+qfVK5U3l3CZdqsd1VYfpF0bHO6LFTroV9982MyfuhN0rBeDblv+HOyeOlqGXXfQDlQBeNDHp8ib72/WAXuD0mhQjnSvsdtckyN6nLtFefL2vU/Sp+bHlPrnl/tbkBKgO5XpXkOAggggAACCCCAAAKZEyBAz5wtd0YAAQQQQAABBBDIooAO0PXGnfs6HCciS9582g3Qf9iw2d34c6Zao1wH61d2ayfdL2ztXrbt9z/k3mHPyXsfLpUdO3ZK/WOOklv6d5VqlSu6/379D5vk5vvHyifLvpDyZUtLd7W0S9dOLSR3DXRmoGfxBeDRCCCAAAIIIIAAAgikQYAAPQ2I3AIBBBBAAAEEEEAgmAK5AfrQu/oGswO0GgEEEEAAAQQQQAABBDIqQICeUV5ujgACCCCAAAIIIGCyAAG6ydWhbQgggAACCCCAAAIIZF+AAD37NaAFCCCAAAIIIIAAAlkSIEDPEjyPRQABBBBAAAEEEEAgIAIE6AEpFM1EAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQ8FeAAN1fb56GAAIIIIAAAggggAACCCCAAAIIIIAAAgggEBABAvSAFIpmIoAAAggggAACCCCAAAIIIIAAAggggAACCPgrQIDurzdPQwABBBBAAAEEEEAAAQQQQAABBBBAAAEEEAiIAAF6QApFMxFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQT8FSBA99ebpyGAAAIIIIAAAggggAACCCCAAAIIIIAAAggERIAAPSCFopkIIIAAAggggAACCCCAAAIIIIAAAggggAAC/goQoPvrzdMQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEAiJAgB6QQtFMBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAX8FCND99eZpCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgERIEAPSKFoJgIIIIAAAggggAACCCCAAAIIIIAAAggggIC/AgTo/nrzNAQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIGACBCgB6RQNBMBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEDAXwECdH+9eRoCCCCAAAIIIIAAAggggAACCCCAAAIIIIBAQAQI0ANSKJqJAAIIIIAAAggggAACCCCAAAIIIIAAAggg4K8AAbq/3jwNAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAICACBOgBKRTNRAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEPBXgADdX2+ehgACCCCAAAIIIIAAAggggAACCCCAAAIIIBAQAQL0gBSKZiKAAAIIIIAAAggggAACCCCAAAIIIIAAAgj4K0CA7q83T0MAAQQQQAABBBBAAAEEEEAAAQQQQAABBBAIiAABekAKRTMRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEE/BUgQPfXm6chgAACCCCAAAIIIIAAAggggAACCCCAAAIIBESAAD0ghaKZCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAv4KEKD7683TEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBAIiQIAekELRTAQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAF/BQjQ/fXmaQgggAACCCCAAAIIIIAAAggggAACCCCAAAIBESBAD0ihaCYCCCCAAAIIIIAAAggggAACCCCAAAIIIICAvwIE6P568zQEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBgAgQoAekUDQTAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAwF8BAnR/vXkaAggggAACCCCAAAIIIIAAAggggAACCCCAQEAECNADUiiaiQACCCCAAAIIIIAAAggggAACCCCAAAIIIOCvAAG6v948DQEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCAgAgToASkUzUQAAQQQQAABBBBAAAEEEEAAAQQQQAABBBDwV4AA3V9vnoYAAggggAACCCCAAAIIIIAAAggggAACCCAQEAEC9IAUimYigAACCCCAAAIIIIAAAggggAACCCCAAAII+CtAgO6vN09DAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQCIjA/wMkgjfw0EDiQAAAAABJRU5ErkJggg==", - "text/html": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "import pandas as pd\n", - "import numpy as np\n", - "import plotly.express as px\n", - "\n", - "data = pd.DataFrame({\n", - " 'Epoch': np.arange(1, len(acc_track) + 1),\n", - " 'Accuracy': acc_track,\n", - " 'Loss': loss_track\n", - "})\n", - "\n", - "# Melting the DataFrame\n", - "data_melted = data.melt(id_vars='Epoch', var_name='Metric', value_name='Value')\n", - "\n", - "# Creating a line plot\n", - "fig = px.line(data_melted, x='Epoch', y='Value', color='Metric',\n", - " labels={'Epoch': 'Epoch', 'Value': 'Value', 'Metric': 'Metric'},\n", - " title='Accuracy and Loss Over Epochs')\n", - "fig.show()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.18" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/src/nnsight/toolbox/interventions/.ipynb_checkpoints/transformations-checkpoint.py b/src/nnsight/toolbox/interventions/.ipynb_checkpoints/transformations-checkpoint.py deleted file mode 100644 index b4cf2d08..00000000 --- a/src/nnsight/toolbox/interventions/.ipynb_checkpoints/transformations-checkpoint.py +++ /dev/null @@ -1,18 +0,0 @@ -import torch - -class RotateLayer(torch.nn.Module): - """A linear transformation with orthogonal initialization.""" - - def __init__(self, n, init_orth=True): - super().__init__() - weight = torch.empty(n, n) - # we don't need init if the saved checkpoint has a nice - # starting point already. - # you can also study this if you want, but it is our focus. - if init_orth: - torch.nn.init.orthogonal_(weight) - self.weight = torch.nn.Parameter(weight, requires_grad=True) - - def forward(self, x): - return torch.matmul(x.to(self.weight.dtype), self.weight) - diff --git a/src/nnsight/toolbox/interventions/interventions.py b/src/nnsight/toolbox/interventions/interventions.py deleted file mode 100644 index e60995f7..00000000 --- a/src/nnsight/toolbox/interventions/interventions.py +++ /dev/null @@ -1,96 +0,0 @@ -import torch -from abc import ABC, abstractmethod - -from transformations import RotateLayer -from interventions_utils import sigmoid_boundary - - -class Intervention(torch.nn.Module, ABC): - - """Intervention the original representations.""" - def __init__(self): - super().__init__() - self.trainble = False - - @abstractmethod - def set_interchange_dim(self, interchange_dim): - pass - - @abstractmethod - def forward(self, base, source): - pass - -class TrainbleIntervention(Intervention): - - """Intervention the original representations.""" - def __init__(self): - super().__init__() - self.trainble = True - - -class BoundlessRotatedSpaceIntervention(TrainbleIntervention): - - """Intervention in the rotated space with boundary mask.""" - def __init__(self, embed_dim, **kwargs): - super().__init__() - rotate_layer = RotateLayer(embed_dim) - - # Orthogonal parametrizations constrains the weights of the rotate layer to be - # orthogonal during training - self.rotate_layer = torch.nn.utils.parametrizations.orthogonal( - rotate_layer) - - self.intervention_boundaries = torch.nn.Parameter( - torch.tensor([0.5]), requires_grad=True) - self.temperature = torch.nn.Parameter(torch.tensor(50.0)) - self.embed_dim = embed_dim - self.intervention_population = torch.nn.Parameter( - torch.arange(0, self.embed_dim), requires_grad=False) - - def get_boundary_parameters(self): - return self.intervention_boundaries - - # temporarily in here - def zero_grad(self): - """Zero out the gradients of all parameters in this module.""" - for param in self.parameters(): - if param.grad is not None: - param.grad.zero_() - - def get_temperature(self): - return self.temperature - - def set_temperature(self, temp: torch.Tensor): - self.temperature.data = temp - - def set_interchange_dim(self, interchange_dim): - """interchange dim is learned and can not be set""" - assert False - - def forward(self, base, source): - batch_size = base.shape[0] - rotated_base = self.rotate_layer(base) - rotated_source = self.rotate_layer(source) - # get boundary - intervention_boundaries = torch.clamp( - self.intervention_boundaries, 1e-3, 1) - boundary_mask = sigmoid_boundary( - self.intervention_population.repeat(batch_size, 1), - 0., - intervention_boundaries[0] * int(self.embed_dim), - self.temperature - ) - - # print(boundary_mask.get_device()) - boundary_mask = torch.ones( - batch_size).unsqueeze(dim=-1).to(boundary_mask.device)*boundary_mask - # boundary_mask = boundary_mask.to(rotated_base.dtype) - # interchange - rotated_output = (1. - boundary_mask)*rotated_base + boundary_mask*rotated_source - # inverse output - output = torch.matmul(rotated_output, self.rotate_layer.weight.T) - # return output.to(base.dtype) - return output - - def __str__(self): - return f"BoundlessRotatedSpaceIntervention(embed_dim={self.embed_dim})" \ No newline at end of file diff --git a/src/nnsight/toolbox/interventions/interventions_utils.py b/src/nnsight/toolbox/interventions/interventions_utils.py deleted file mode 100644 index 84cee591..00000000 --- a/src/nnsight/toolbox/interventions/interventions_utils.py +++ /dev/null @@ -1,6 +0,0 @@ -import torch - -def sigmoid_boundary(_input, boundary_x, boundary_y, temperature): - """Generate sigmoid mask""" - return torch.sigmoid((_input - boundary_x) / temperature) * \ - torch.sigmoid((boundary_y - _input) / temperature) \ No newline at end of file diff --git a/src/nnsight/toolbox/interventions/transformations.py b/src/nnsight/toolbox/interventions/transformations.py deleted file mode 100644 index b4cf2d08..00000000 --- a/src/nnsight/toolbox/interventions/transformations.py +++ /dev/null @@ -1,18 +0,0 @@ -import torch - -class RotateLayer(torch.nn.Module): - """A linear transformation with orthogonal initialization.""" - - def __init__(self, n, init_orth=True): - super().__init__() - weight = torch.empty(n, n) - # we don't need init if the saved checkpoint has a nice - # starting point already. - # you can also study this if you want, but it is our focus. - if init_orth: - torch.nn.init.orthogonal_(weight) - self.weight = torch.nn.Parameter(weight, requires_grad=True) - - def forward(self, x): - return torch.matmul(x.to(self.weight.dtype), self.weight) - diff --git a/src/nnsight/toolbox/lens/__init__.txt b/src/nnsight/toolbox/lens/__init__.txt deleted file mode 100644 index e69de29b..00000000 diff --git a/src/nnsight/toolbox/lens/lens.py b/src/nnsight/toolbox/lens/lens.py deleted file mode 100644 index 94dee0c5..00000000 --- a/src/nnsight/toolbox/lens/lens.py +++ /dev/null @@ -1,63 +0,0 @@ -import torch -from abc import ABC, abstractmethod - -from nnsight import LanguageModel, Module -from typing import Any, Callable, Dict, List, Union - -from functools import reduce - -class Lens(ABC): - - def __init__(self): - super().__init__() - self.tuned = False - - @abstractmethod - def __call__(self) -> Any: - pass - -class LogitLens(Lens): - """Returns the probability distribution over all tokens at specified points in the model. - - """ - - def __init__(self, - layers: List[Module], - decoding_modules: List[Module], - ) -> None: - super().__init__() - - self.tuned = False - - self.layers = layers - self.decoder = lambda x: reduce(lambda acc, func: func(acc), decoding_modules, x) - - def __call__( - self, - indices: Union[int, List] = None, - as_probs: bool = True, - ) -> List[Any]: - - observations = [] - - for layer in self.layers: - logits = self.decoder(layer.output[0]) # apply decoder to hidden state - - observations.append(logits.save()) - - # Return logits over a specific token - if type(indices) == List or type(indices == int): - observations = [logits[:,indices,:] for logits in observations] - - # Raw logits to probabilities - if as_probs: - observations = [logits.softmax(dim=-1) for logits in observations] - - self.observations = observations - -class TunedLens(Lens): - - def __init__(self): - super().__init__() - self.tuned = True - diff --git a/src/nnsight/toolbox/lens/utils.py b/src/nnsight/toolbox/lens/utils.py deleted file mode 100644 index 8887e1aa..00000000 --- a/src/nnsight/toolbox/lens/utils.py +++ /dev/null @@ -1,34 +0,0 @@ -import torch -from abc import ABC, abstractmethod - -from nnsight import LanguageModel, Module -from typing import Any, Callable, Dict, List, Union - -from functools import reduce - -import torch - -def shift_preds(x: torch.Tensor, shift: int): - """Shift predictions by a given amount. - - Args: - x: (batch x seq_len) predictions to shift. - shift: Amount to shift by. Positive values take from the end, negative values - from the start. - - Returns: - (batch x (seq_len - shift)) predictions shifted by the given amount. - """ - if shift > 0: - return x[:, :-shift] - if shift < 0: - return x[:, -shift:] - - return x - -# implement kl loss - - - - - \ No newline at end of file diff --git a/src/nnsight/toolbox/logit_lens.ipynb b/src/nnsight/toolbox/logit_lens.ipynb deleted file mode 100644 index 8d3517c4..00000000 --- a/src/nnsight/toolbox/logit_lens.ipynb +++ /dev/null @@ -1,1144 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "8d66d5e6-d3ba-42c8-9b07-04e6c635ef64", - "metadata": {}, - "outputs": [], - "source": [ - "from lens import LogitLens\n", - "from nnsight import LanguageModel" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "e4141eea-916f-4f67-9f26-0303636c0617", - "metadata": {}, - "outputs": [], - "source": [ - "model = LanguageModel('EleutherAI/pythia-1B', device_map=\"cuda\")" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "511bb57b-8334-4c1e-aafa-c3d97247e07a", - "metadata": {}, - "outputs": [], - "source": [ - "import torch\n", - "from abc import ABC, abstractmethod\n", - "\n", - "from nnsight import LanguageModel, Module\n", - "from typing import Any, Callable, Dict, List, Union\n", - "\n", - "from functools import reduce\n", - "\n", - "class Lens(ABC):\n", - "\n", - " def __init__(self):\n", - " super().__init__()\n", - " self.tuned = False\n", - " \n", - " @abstractmethod\n", - " def __call__(self) -> Any:\n", - " pass\n", - "\n", - "class LogitLens(Lens):\n", - " \"\"\"Returns the probability distribution over all tokens at specified points in the model.\n", - " \n", - " \"\"\"\n", - "\n", - " def __init__(self, \n", - " layers: List[Module],\n", - " decoding_modules: List[Module],\n", - " ) -> None:\n", - " super().__init__()\n", - " \n", - " self.tuned = False\n", - " \n", - " self.layers = layers\n", - " self.decoder = lambda x: reduce(lambda acc, func: func(acc), decoding_modules, x)\n", - "\n", - " def __call__(\n", - " self,\n", - " indices: Union[int, List] = None,\n", - " as_probs: bool = True,\n", - " ) -> List[Any]:\n", - " \n", - " observations = []\n", - " \n", - " for layer in self.layers:\n", - " logits = self.decoder(layer.output[0]) # apply decoder to hidden state\n", - "\n", - " observations.append(logits.save())\n", - "\n", - " # Return logits over a specific token\n", - " if type(indices) == List or type(indices == int):\n", - " observations = [logits[:,indices,:] for logits in observations]\n", - " \n", - " # Raw logits to probabilities\n", - " if as_probs:\n", - " observations = [logits.softmax(dim=-1) for logits in observations]\n", - "\n", - " self.observations = observations" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "7f6bad60-ebb1-4cf2-9d65-b4a76bbcc921", - "metadata": {}, - "outputs": [], - "source": [ - "layers = model.gpt_neox.layers\n", - "out = [model.gpt_neox.final_layer_norm, model.embed_out]\n", - "\n", - "lens = LogitLens(layers, out,)" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "ee021f9c-fec1-46b7-ba6c-9a20b15dce78", - "metadata": {}, - "outputs": [], - "source": [ - "prompt = \"John and Mary went to the store. John handed the milk to\"\n", - "\n", - "with model.forward() as runner:\n", - " with runner.invoke(prompt) as invoker:\n", - " # should i call save here? rather than in the class.\n", - " lens(-1, True)\n", - "\n", - "probs = lens.observations" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "ae51870e-7dba-47c3-8809-3f3a882553d2", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - " \n", - " " - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "application/vnd.plotly.v1+json": { - "config": { - "plotlyServerURL": "https://plot.ly" - }, - "data": [ - { - "hovertemplate": "variable=0
Probability=%{x}
Layer=%{y}", - "legendgroup": "0", - "line": { - "color": "#636efa", - "dash": "solid" - }, - "marker": { - "symbol": "circle" - }, - "mode": "lines", - "name": "0", - "orientation": "v", - "showlegend": true, - "type": "scatter", - "x": [ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15 - ], - "xaxis": "x", - "y": [ - 1.632572639209684e-05, - 5.580651304626372e-06, - 3.4238528314745054e-06, - 2.03671970666619e-06, - 2.5014485345309367e-06, - 2.499897846064414e-06, - 8.576263098802883e-06, - 1.7609881979296915e-05, - 7.855775038478896e-06, - 5.198211511014961e-05, - 0.0011284518986940384, - 0.19210906326770782, - 0.20111367106437683, - 0.023693585768342018, - 0.09532259404659271, - 0.5439841151237488 - ], - "yaxis": "y" - } - ], - "layout": { - "autosize": true, - "legend": { - "title": { - "text": "variable" - }, - "tracegroupgap": 0 - }, - "template": { - "data": { - "bar": [ - { - "error_x": { - "color": "#2a3f5f" - }, - "error_y": { - "color": "#2a3f5f" - }, - "marker": { - "line": { - "color": "#E5ECF6", - "width": 0.5 - }, - "pattern": { - "fillmode": "overlay", - "size": 10, - "solidity": 0.2 - } - }, - "type": "bar" - } - ], - "barpolar": [ - { - "marker": { - "line": { - "color": "#E5ECF6", - "width": 0.5 - }, - "pattern": { - "fillmode": "overlay", - "size": 10, - "solidity": 0.2 - } - }, - "type": "barpolar" - } - ], - "carpet": [ - { - "aaxis": { - "endlinecolor": "#2a3f5f", - "gridcolor": "white", - "linecolor": "white", - "minorgridcolor": "white", - "startlinecolor": "#2a3f5f" - }, - "baxis": { - "endlinecolor": "#2a3f5f", - "gridcolor": "white", - "linecolor": "white", - "minorgridcolor": "white", - "startlinecolor": "#2a3f5f" - }, - "type": "carpet" - } - ], - "choropleth": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "type": "choropleth" - } - ], - "contour": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "contour" - } - ], - "contourcarpet": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "type": "contourcarpet" - } - ], - "heatmap": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "heatmap" - } - ], - "heatmapgl": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "heatmapgl" - } - ], - "histogram": [ - { - "marker": { - "pattern": { - "fillmode": "overlay", - "size": 10, - "solidity": 0.2 - } - }, - "type": "histogram" - } - ], - "histogram2d": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "histogram2d" - } - ], - "histogram2dcontour": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "histogram2dcontour" - } - ], - "mesh3d": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "type": "mesh3d" - } - ], - "parcoords": [ - { - "line": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "parcoords" - } - ], - "pie": [ - { - "automargin": true, - "type": "pie" - } - ], - "scatter": [ - { - "fillpattern": { - "fillmode": "overlay", - "size": 10, - "solidity": 0.2 - }, - "type": "scatter" - } - ], - "scatter3d": [ - { - "line": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scatter3d" - } - ], - "scattercarpet": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scattercarpet" - } - ], - "scattergeo": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scattergeo" - } - ], - "scattergl": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scattergl" - } - ], - "scattermapbox": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scattermapbox" - } - ], - "scatterpolar": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scatterpolar" - } - ], - "scatterpolargl": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scatterpolargl" - } - ], - "scatterternary": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scatterternary" - } - ], - "surface": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "surface" - } - ], - "table": [ - { - "cells": { - "fill": { - "color": "#EBF0F8" - }, - "line": { - "color": "white" - } - }, - "header": { - "fill": { - "color": "#C8D4E3" - }, - "line": { - "color": "white" - } - }, - "type": "table" - } - ] - }, - "layout": { - "annotationdefaults": { - "arrowcolor": "#2a3f5f", - "arrowhead": 0, - "arrowwidth": 1 - }, - "autotypenumbers": "strict", - "coloraxis": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "colorscale": { - "diverging": [ - [ - 0, - "#8e0152" - ], - [ - 0.1, - "#c51b7d" - ], - [ - 0.2, - "#de77ae" - ], - [ - 0.3, - "#f1b6da" - ], - [ - 0.4, - "#fde0ef" - ], - [ - 0.5, - "#f7f7f7" - ], - [ - 0.6, - "#e6f5d0" - ], - [ - 0.7, - "#b8e186" - ], - [ - 0.8, - "#7fbc41" - ], - [ - 0.9, - "#4d9221" - ], - [ - 1, - "#276419" - ] - ], - "sequential": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "sequentialminus": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ] - }, - "colorway": [ - "#636efa", - "#EF553B", - "#00cc96", - "#ab63fa", - "#FFA15A", - "#19d3f3", - "#FF6692", - "#B6E880", - "#FF97FF", - "#FECB52" - ], - "font": { - "color": "#2a3f5f" - }, - "geo": { - "bgcolor": "white", - "lakecolor": "white", - "landcolor": "#E5ECF6", - "showlakes": true, - "showland": true, - "subunitcolor": "white" - }, - "hoverlabel": { - "align": "left" - }, - "hovermode": "closest", - "mapbox": { - "style": "light" - }, - "paper_bgcolor": "white", - "plot_bgcolor": "#E5ECF6", - "polar": { - "angularaxis": { - "gridcolor": "white", - "linecolor": "white", - "ticks": "" - }, - "bgcolor": "#E5ECF6", - "radialaxis": { - "gridcolor": "white", - "linecolor": "white", - "ticks": "" - } - }, - "scene": { - "xaxis": { - "backgroundcolor": "#E5ECF6", - "gridcolor": "white", - "gridwidth": 2, - "linecolor": "white", - "showbackground": true, - "ticks": "", - "zerolinecolor": "white" - }, - "yaxis": { - "backgroundcolor": "#E5ECF6", - "gridcolor": "white", - "gridwidth": 2, - "linecolor": "white", - "showbackground": true, - "ticks": "", - "zerolinecolor": "white" - }, - "zaxis": { - "backgroundcolor": "#E5ECF6", - "gridcolor": "white", - "gridwidth": 2, - "linecolor": "white", - "showbackground": true, - "ticks": "", - "zerolinecolor": "white" - } - }, - "shapedefaults": { - "line": { - "color": "#2a3f5f" - } - }, - "ternary": { - "aaxis": { - "gridcolor": "white", - "linecolor": "white", - "ticks": "" - }, - "baxis": { - "gridcolor": "white", - "linecolor": "white", - "ticks": "" - }, - "bgcolor": "#E5ECF6", - "caxis": { - "gridcolor": "white", - "linecolor": "white", - "ticks": "" - } - }, - "title": { - "x": 0.05 - }, - "xaxis": { - "automargin": true, - "gridcolor": "white", - "linecolor": "white", - "ticks": "", - "title": { - "standoff": 15 - }, - "zerolinecolor": "white", - "zerolinewidth": 2 - }, - "yaxis": { - "automargin": true, - "gridcolor": "white", - "linecolor": "white", - "ticks": "", - "title": { - "standoff": 15 - }, - "zerolinecolor": "white", - "zerolinewidth": 2 - } - } - }, - "title": { - "text": "Probability of Mary after each layer, according to logit lens" - }, - "xaxis": { - "anchor": "y", - "autorange": true, - "domain": [ - 0, - 1 - ], - "range": [ - 0, - 15 - ], - "title": { - "text": "Probability" - }, - "type": "linear" - }, - "yaxis": { - "anchor": "x", - "autorange": true, - "domain": [ - 0, - 1 - ], - "range": [ - -0.03021918985829567, - 0.5742053417017511 - ], - "title": { - "text": "Layer" - }, - "type": "linear" - } - } - }, - "image/png": "iVBORw0KGgoAAAANSUhEUgAABbkAAAFoCAYAAAB67/YgAAAgAElEQVR4XuzdCbxV0/vH8eec05xCyjz7GX7mMmQoQ5KiNMoUIqWkQuYMmUXSoFKJkClKEmXIrP6IDJnlZ0pIk+bh3PNfz772de/t3u4Z1tr7nHs/+//34ld7P2vv99rnynev8+xIwmzChgACCCCAAAIIIIAAAggggAACCCCAAAIIIIBADgpECLlzcNY4ZQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAFPgJCbGwEBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAgZwUIuXN26jhxBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAUJu7gEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBnBUg5M7ZqePEEUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBAi5uQcQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEclaAkDtnp44TRwABBBBAAAEEEEAAAQQQQAABBBBAAAEEECDk5h5AAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQyFkBQu6cnTpOHAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQICQm3sAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIGcFCLlzduo4cQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAFCbu4BBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgZwVIOTO2anjxBFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQIubkHEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBHJWgJA7Z6eOE0cAAQQQQAABBBBAAAEEEEAAAQQQQAABBBAg5OYeQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEMhZAULunJ06ThwBBBBAAAEEEEAAAQQQQAABBBBAAAEEEECAkJt7AAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCBnBQi5c3bqOHEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABQm7uAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIGcFSDkztmp48QRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEECLm5BxBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQRyVoCQO2enjhNHAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQIOTmHkAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBDIWQFC7pydOk4cAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAgJCbewABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAgZwUIuXN26jhxBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAUJu7gEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBnBUg5M7ZqePEEUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBAi5uQcQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEclaAkDtnp44TRwABBBBAAAEEEEAAAQQQQAABBBBAAAEEECDk5h5AAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQyFkBQu6cnTpOHAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQICQm3sAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIGcFCLlzduo4cQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAFCbu4BBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgZwVIOTO2anjxBFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQIuZO8B154ZaZcc8fojfbecvNacnj9faRn5zayx647JFmt9N1+X7hYTjjtcrn5ivOlQ8tjM6rnn/O0xwfIzjtsU2qt5mddJQ0O2FPuuLarFB/fr/HKUwNlh23rZnQ+6Rz8xTc/yrXG/ef5f8glF7STC886ZaMy/jlutWVtef3Z+6RSLLbRPp9/9YOc0eMW79c/nTG2xH3SOT9Xx9x636Py4oz/k/XrN8hHL29837kaN4i6S5Ytl0ate8l1vTvJ2e2apjSk3qsH7buHDLj+opSOY+f0BL7+/mdpf+GNMvTW3nJC4wZyw90PyayPvpDXnr43vYIhHWXz52rxS0jHJNmfzSFxMSwCCCCAAAIIIIAAAggggAACOSdAyJ3klPmhxHW9z5Y9d9vJO2r9hg3yv58XyMNPTZMVq1bLpLG3ZhwE2wxjkg1SNEytW2dzaVj/vxuF3D/9+of8nwm1Wp54lNSsUc277rYXXC99Luwgxx11cJJ66e922U33y4effCPDbu8tO22/tXeexTf/OqtVreKFn00bH7LRPjffO06ef/k9WbtufdaH3N//b760Pr+fnNOhmbRt0Vj23mMnee2dj2TUYy/IM6P7p4+ZJUcScmfJRCRxGsVD7g8/+Vr+WLjE/Dw4MomjM9/ljqGPS+VKMbny4jMyKmbz52rxEyluksxnNdmfzRldNAcjgAACCCCAAAIIIIAAAgggUIEECLmTnGw/lHhs2HVm1fNeRY76Zt4v0q7LDdLlzJPl8os6Jlmx5N1shjHpBClljb9q9VppeEp3GXZbn0BC7s6X3iV5eXny6NDrSnX1r7PR4QdIJBKRBwZcXmTfNWvXybHt+sh+e+0q78/5KutDbg3N9Lofuu9q78GDboNGTTAraL8k5GYld0Y/X1I9uHjInerxme6v3744xPy8zeaQu/g1JvNZTednc6aWHI8AAggggAACCCCAAAIIIIBAeRYg5E5ydjcVcmuJw1p0l2OPPEgG3thDpr46S66+fZQXzN408GGJx/NEW4bo9sbMOd6K3G9NMK7bXrvvKBeceYo0O/ZQ73/7IfP1l54j8378TV56/f9k9Zp1csA+u8mNl50n/9nt35Yo4ye+KhOmvCG62rpG9aqy9392lsu6nea1c9DNP+eH77tGHnrqJZn96dcSjUbl+KPqyw2XnSub1azu7Zdsu5L5C/6S8y+7q0CserUqsv8+u8vipctlyrjbi0i+8/5n0v3qQfLgwCvlyEP3K1F5UxYb4nE56IQuRY67+LzW0vP8thvVKrzK/q77nzCtFAbJNvW2LNhPf//6AWNNu5O2MnjMs0VC7nc/+Nybj6+++1ESCTFtXbaWzqe3kNYnHe0dr6tWm5x2mdx1XTeZPP1d+ejzb+Uu09al7y0jZVD/nnLScYcVOZ8zut8slSpVkvH39yvxmn/85XcZ8uCzZnX8l2Ze18rWdbeUk084wmt3U7lyJfN7E2X0+BeKHHvoQXubufum4Neu6H66nH9GC2/e7xv9jHz21TxZsmyF7LbTtuZeOllaNv13lW2Ls6+SY444yJt3vVf69+0srZodVeK5JVNv1eo1xnCivPr2h7Joyd+i7Xo0iO9rzqmwuc7/8HGTvfu8dq2a0rjhgeYB0Gne/v5K7n59zpE//1oik156W/5evlL22XMXufWqC8w3JXYs9VNZvF1JWZ4jzDk88NgUmTFhkNTbaouCun+vWCXHtO0t55u57nNhe/Gv690PPpPf/ljkfWOgzUmN5KJzT/VWEut24z0Pydyv/yddTMucO80K4yaN6sstV15Q6rkW/o2yPqu673JzTjr/r749W1asXC2777K9dOvUUk48Jv9ng26bctXf1+u6zzwQef29OZ6zeuvPJf25oP+8qevQMbVNjn4uE+bDcESDfeW0VsdJj2vuK7VdSf+B4+TTL7+Xq3ueJYNGT/DuSf3Gx6nNjpZLu3YoOG9d3TzEfPZ++e1P2d60PdLP8azZX3iekx++rUTD/Y7rXOTXnx51k+y/926i33TQz/Hsz76RNeYztN02daVN80ZeK6NYLFpirZIe3iVTR/e5dfCj5jP2g9QyPy/bn3Ks9zNCf57MnDJcNq9ds0gLl/P63FniZ7X4SZUUcr/34Vzvs//dD7963xI68L97yGXmM6PXrNvCRUvluPaXyoB+F8mcud+ZOf7Yu0/2MT/39bOkf9dN533QqGdE7+Ul5mfz5rU38z5/V/Y4wztfNgQQQAABBBBAAAEEEEAAAQTKowAhd5KzuqmQ+6/Fy7yVwl3PbukFOy+/+YFc3n+E1N9/Tzn91ONlTxNkawDxzvufm+D3Xjmt5XGmFcWJYpYdy7inp3sh34g7L/PCKD+M2W6braTF8Q3llKZHmHBjmfQ3YXnUBDgvjR/ghW7PTXvHC1qu6nmmF1rrauVRJsybaYKSF80+dbaoVRBya69wHe/g/f4jn3zxvdw+ZLycdOxhBX2Nkw25NZD9yIStXfre7R3b6LADzMroL71rfXLEDXLgP+G6kmrI/+kX87xwX1dXF9+SsdCw5pLrhniH3n9HH6lWtaposF5aYKR9gluee603D91NOOlvGsxvVrOGOd/95RYT4vk9uTUgbd25n7Q4oaFcaB40aMisAaMGx344rwF+4za9vIcRGjZqWLSXaR9yWrf+st3WdWTU3X0Lxvl5/p+iobL2NvdD8sLnmpeXMA8UrpRam9XwwuYtNt/MhMC/er3eO7U/0Qtb9YHGrNlzpdf1Q81q+d5S3/RK1x7jarxoyTIZO+gqY1DVC8jbnH+9F1zeePl5Xij7kmk7M+yhSXLPDT1McN7QG/rU867zAu7dd9nOG2OXHbcV7V1efFPrZOpde8cYL1zToE0fuPz511ITjD5igs3qBcG+rpbvcvnd3nitTJsbrX3LoEe8QF/Dfz/k3n3n7UxQ3EBaNGko+hm66Z6HPZOJD+b3Ti9pKxxyJ+OpweAJHS+X3l3aF+nnPvHFt70HUC8/eY/XYkjvae3/rvNy4H93N5+TeXLzoHFysjm3m8yv6aYB8JszP/FC2ovOaWUst/Fa6JS1JfNZ1Rp6n/7y20ITWHYy99ZW8sKrM72fD6PvuUKONvduWa4aTJ/d8zYvpNd7Yh9zn+pK7JuNvd6rT4680fsslnYdV5gHN2+agPu2qy+UA4yBBqkPPDrFa8lUWk/u2wY/5p2n/mzpb94jsK15uDTppXe8BwJ6/+r86jddOnS90fvs6Dzot0HuGDpeVpoWT3ova5unkja9T048va/5LDXyHlDp50ZDW/3M7mbunavNz7462of/3Y9l4ANPy7mnnST6AKikrXjIrfdFWXW0tdHJZ18tVapU8q5Nf6ZqCD336x+9dwTMmjpCaptzKtyTWx9UFP+sahul0n5m+e9L0IdYOv/aaqmXMYqbh3z3P/SczDQ/C/TzoO9U8D83+jBJ/z3ToskRJuRe5X3WdF79z02fG4bJtz/8Yt7rcIG5V7eSX8zPJfXeYbu65lsu//68Kuu+5fcRQAABBBBAAAEEEEAAAQQQyCUBQu4kZ8sPuccNvsYLHnXbsCEuGpTeM+IpLxB67qHbvODr5Tc/NEHHcC+I0MDV33SVn664fd6sXPSDXw2mWpggRVcHapjlhzHaeqNwgKqriPvd9aCMvfcqOeKQfWWpqaP7+qv3dIxvzQpA7ZetgbAG3/456yrOwi9s1DovvjZL/u/FkSY4rpL0Sm4NA+d+8z85/aKbZfgdl3rtSnTFddOOfb2AXl+WqZsG7o3b9PaCwJJeFKn7JGPh76d/f2TItaXOlH+dGjrdbkI3nQsNL9X41wUL5aQzr/RCur8WLy0Scmug/OsCs7LUrAT1+43rIEe16umtDNUHCH6wpCGjzo+/PfHcDC84etUE6xog6qaB4CMTpssbEwd7rsU3DWU1HKtRvZoJfP9dVayh1Pzf/5Jnx9zsHfLBnK+9wEtX4OtLTXXT1bQaBPs9uXUsDbR1BX3hF57qQxQNSl98LH/F/akmEPzD3CfvPDfUhHWVSzVMtt6CPxfLWjO/u5pV4/72+KRXjcXj8r65n/TbARoYL1r8d5EVurpKdfL0d+TaXp302Y734km9Z/Thjr8Nf/g5GfHI8/LxK2OkainnWjzkTsZT+7p//f0vBd+m0PE0GKxkHhbpZ+xjszr/nF53eN9uOKN1k4LzGfvkS94Dj9efuc+bLw10n5w8Q54wD3T8b0uUClroN5L5rH78+XfmHG6XwbdcUmTltobF+m2JjmZFdVmuuupY75tB/S823zA4vOAMtBf9dXeOKTjvkq5Dw9mjW19irv8E80LQswuO1aB15KPPbzLkVhO93/x7Qn+m1W/WVTp3bO79DLx7+JPy+KTXvM+FBsW66YruU865Rv5jHsCVFnLrfoc272YeFDYpaFei94iuzNc5KbwyXx/46cPFmS+MKFh5X3huiofcydTRldD6kM0P+LWefitHH6SVFnKX9Fkt6R4pvpJb78effv3de0Dp3/v686np6ZdLM/NwTR+0+D+L9AGWPsjytzGPT/VWts8xnxv9jOuLi/XbM7dd/e83YX4zP1+WmW9L/Nd8W4INAQQQQAABBBBAAAEEEEAAgfIoQMid5Kz6oURJu+tqTv26eOOGB3i/7Yfc2q7kkAP/7d99yEndvJWt/a/oXKTMVbc+YFbsfSHvPj+sIOQuHkz7q4Q1gDq73YleuPz082/I9Dfel99+XySr166VhAlRtV2Bhhv6wkL/nIuvstZVrBqeaSivK5STXcldUsitF6KB0bgJL8tbk4Z4bVOmv/GBXH3bKHn92ftKXDWsxyRjoftpGK5bsiH3F6b9wYVX3FPwMGDo2Iny1POvm3MbKpNefKtIyK1135r1qTw95XX54acFstysitSA7u/lq0y7haO8Fdl+sKQrSXuc27pg3lauWuO1DrjgzBYFv66rpo84ZL8iIWHx+0UfRDz6zMtmlfv33gOPvESet7JVV2LrSnTdkgm5tRXMDz/9Jq88NbDIEI+Y2hoqfvDSA/ltI0zIXdesdtX+3pvakq2nHo+Yuda2GRq6a1uF9es3eNeg56L3yOEndzcroI/Y6D73x/dNtX+99rH3N50nXWWs91FJLxjV/Yq3K0nF0/886nkf3+FSue/mS7yVs9rK594HJnj++g0Kf/vqu5/MCuSbCh7oaDg84YU35JNXx5rV8Rt/O6E032Q+q/68vWmC4MLhbeGaZbn611G8hrYQObnT1aItkM5sc4IX1he/js++nCdnXnzrRgG53x9+Uyu5daX6Ry+PLnL52grmBGN7k1lR3qvfEPnBrAb3H7z4O7a/8Ebv85ZKyK33qQbM+o2WwtszU98033YZt9FDH3+f4iF3MnX0wcwA81kqfj/qr+lnuKSV3Dpe8QdSJd0XxUNu/Xmo32goHEzrcRqy/2Fa+ujDLf9z07d7R7ngjH8/N0+bz41+Q8U/zzuHPe49VNBvkxx/dH3zoOy/3opzNgQQQAABBBBAAAEEEEAAAQTKswAhd5Kz64cS2oN37//s5B0VMf+n7RU02Cu8+SG39pr1+wv7Paa1l3Lxr9RrOwFdqa0r8fwwRlfu6epNf9P+xxoc+avDNQx82vRY1hYXump7s82qm7D7L29FavGQu/AqS62nIXTfm0cUrOzMNOTWlhW64lBXcmu4rq02tKWK9qwuaUvWQo9NNeTezLTNaHZGXznYtIq5+/ruXruDExo3MMFzJ/NQID8M8tuV+G1lWp54pHQyDw40WI2Y8FJXquvK7cIhtx5/drumRS5Ha2nY+4pZNa4tGTS0e/7h24v0TS98gK4qb3N+P9ljlx28edzJrN7X1cS6IvzLb39KKeTudMnt3op1bbFSeNMHHeo79dE7vZYOGnJrWxBdIbypLZl62upEX7C60KyI79f7HNlv712latXKXpsUDYk15NY2Gwc0Od9bxVvaywL9sK64afGwrqTzLRxyJ+updfQBxP6mr73O6WPPviIPPvGizHhmkNcKxu+DXtxSj9MAX4PajqbtkIbD2iNfezGnsiXzWfVXTH847QFvpX/xTb8FUJarfx2zp48u0tbHb6fkPzgr6Tq0LUbXKwbKyLsu83q4+5u2cOl4Uf9NruQuycQLuU2rEv05pivUN5gV0PqwrfDW87rBssC0Vkkl5Nb7VOdE+3MX3qa9/r5ou5XiD/T8fYqH3MnUeXPWJ16/fn+FtF9rnPm2hn57x1bIre1v9P0D+uAkZu7Hwpu2LamzRW0vwE72c6P3iraQmWJW8Os7BPTFvUcdur9cc8lZRb6Bkco9zL4IIIAAAggggAACCCCAAAIIZLsAIXeSM1TWiycLlykp5Nbf16/et2xa8kpu7berQYYfxmhAqEGhv+nX+zXg81djai1dKam9kf1N+7pqKFw85NZVgPvutWvBftoDXPvI+q0uMg25tfClN97vvTxwyK29pLEJuEaaNhSlvXAyWQvdL9WQW1csatinQdRgs1L34mvvkwmj+nuBbPGQW1dcfmd6177y1L0FK3M1ID7UrKrUl0GWFXJ/979fvT7WukL4LROIacuJ0l44qdfy8FPTvN7B0x6/22tP429+L+ZUVnLrdX1n2lMUbmlT+B7UBy/a9iDZkDuZenq9+gBAH/S0P+WYguG0xYiu5vdXcmvbC33wUnxVqn9AsmFdSR/NwiF3sp5aR1e2Dh7zjLz93DCvpceRZsW9PiDSTfte3zPyKW8et/ynnUbhsbcyIaO+sC/dkDuZz6q2v7l9yGMbrSYvfB5lufoepa3k3lRYrw9MNPgt3upEX5CqbVI2tZK7rJBbW3HonBcPs3U8feFnKiG3fma1rUdpK7n9hzvF753iIXcydfTnuLYE8sNsv6b/MMFWyK39tg9rcZH5JtBBXu/x4lvU9PfRVjDpfG60dZR+S0hD+fXr15s2TgNLfTlnSZ83fg0BBBBAAAEEEEAAAQQQQACBXBEg5E5ypmyE3BdcNkD+MiuyNVz2N//leXvtvpPXS9sPY7Tftfa99reppoe2tgDRIK6B6Qle/8QL5cy2Tb2Xr/mb9qXV1gHFQ+7igbm+cE+v5/9MD2vt4ZpOyO33/fbH9l+K1/2cU0XPtbQXTvr7J2Oh+6YTcms7A+1zvscu23sv9/S9i4fcGt5pv+TCLzr0+xef2uxoufO6f9uVlLSSW8/v3N53eCum3/6/T0VXyupxpW26KlTbp/i9q3U/bUPTyvT4rbeVaVcyYZB3aGntSvRleX7fbu3Dq8GyHlO4tYeuqtfV1ZvXqunVSjbkTqae3zd62O19pIlpg6CbPhTQVdLaEkP7oO+4XT3v5ara/sXvi6776UrhgSOf9laU6wsEtSd3piu5k/XU8VesXO21KDn/9BZe32//hZP6e/oyVn1hY+Hey/rrGsAu+3tlQQuTdEJurz91Ep/Vz7/6Qc7ocUvBZ9e/h7Rfu5rqZ7gsV31Brd6PxYNq/8WXeu9oT+aSrkOvU0P0czo0K/IzRcNRfWCUScjtf1PlbfMQT+deN+1Bry913GPX7csMuTu2Ot7rj6+b9gcfMW7yRj25tee4vhT0ncnDSgxxi4fcydR55a0PvdXh/os/dXztyd3afBtDX8a5qZC78Ge1pJ8HxduV6Cp6fbFs8cBffz7oyzz153QyIbe+C+At87NIXwqsD2b8zW9R9bbpzV/Si2dL+5nFryOAAAIIIIAAAggggAACCCCQKwKE3EnOlI2Q228JoC+3O/e0k8yLKzeIvtxOv1r+yJD88NoPY7YxwcY57ZvJicceKhpc9rtrjPdVdm2HEYtFvfB3vml/cb8JwnX18viJr3p9kZ81fae1vq5S1dDnmjtGy9577CQ9zmst+++9m2lx8b1ZxT1WTml6hLciV7dUQm5/Rbn2BW93cmMvSPbbPGhY+5MJmHt3aV/qCyd97mQsdN90Qm49zm+/Ubh/bfGQ23/53JBbesmB++7heT3/8ruybt0G0ys74bVuUOuSAln/OrRNwtW3j5KapsXEmybEK+2Fibq/H15369TKa32iLwocMPwJr6XNK2/PlsmmR/oOJtCcY1aEF3/xpM6jnt9o86LErUxbFe19rqvIdUX4pV1P815++bVpmaL9eP+7585eKKlbsiG3Bmhl1dOXE57Q8XKpb1rB9DdtKJab4PjuEU/KTuacJ7zwptx9Q3cTfjcwrVd+9OZN28Ccbtp8aI9zPS996eDjw6+XpX+vsBJyJ+uprXN007B1ovl86Cru4ivgtY/7vB/ne8G7fuvhT9MHWVuI/O8X7SU9wGv/UVI4rK1/9DN8V79u+Q9VStiS+axq+KsPXbQP+PV9zvVeYKsrpHWV+QMD+nr9/j/67NtNuuqLVrVd0fzfF3qtg/5j7qu5X//g9TnXcNu/5tLCeu3//P6cL+XGy87z9tcXcmprF+2nnUnI7X/DpGXTI6X7uad6Dxz0ftAXIernZVMrufUlitonXV8Kqi//9ENmtb6651lekPvaOx+ZdjlPm595HUr9uVM85NYWLhpWb6qOnqe2O9LxbzY/KzczPe71YdAX5v7Wz25pIXfxz2rxdlZ6ixQPudVIP/PtTz5WzmjTxGtZ8+4Hn3vfMOhretd3an9iUiF3TdOuSc953712MdatRf89oveyPqxYaR7a6L8/2BBAAAEEEEAAAQQQQAABBBAojwKE3EnOqo2QW4fSoPIBsxrxG/MCQv0augZqPc9vI0c02Nc7E+0zfNKZV3qriD/+7DvR1YSrzVfO6+/3H+9FfvrVdt1+/OV30RXZc82LFjerWV3aNG8kvbq0kwH3PyHPTH3LvPivoRx28D6iq7ufeuAmGWpaeGhopT2gTzzmUO9FmRrc6ZZKyK3797vrQRPAve8d/5IJALUvuW66wnL0+BdMr+PSXzhZmLssC9033ZD7WWPQ/95x3opPDcd0Kx5y60pd74VtZk401NY+3Ooy66Mv5GZz7Jab1/J6/24q5Nb+wIeZFy3qgwXteVvWpiGZts7Q1i4691ebY2oYx65XDhQNkZ8aeaNZ0bl8o5BbXwx42U3DvdWenc1qZO3prQ8c7hv9jGhLiVVr1poVn3Wk6TGHSM/ObQvmNtmQW887mXq6Yl0Ds1/NStztTfh30TmtpPlxh8t5l94lX5nw7wYTkGorkzdmzjH3w/PyvQmO9SFM44YHir5oUoPuZFakJvviyWQ899h1B29a/JcoDrutt/bSqZMAACAASURBVDQx/aILb/qASFtTvGJaVGgAqm1LDjt4by841ZXUupUUDutcak/14i2BCtdO5rOqrXH0pbGDTG/zGe9+JPpiU/2GgIbC+nn1t0256j56Dw0aNUFef2+O56yOzY49zHvwpA9GSrsO/XXd/xbzIEDDVd3054d+M0NfSKkr8PU8tM2Rfj781jqlBeaFe3JrLX0Ioj8b1FZ7xOv9qz+ndMWz3vOlbRPMewe0xY/2ldYe/9ovfJ554apeo87n2rXrvQcCZ7Y9wXupZmlb8ZBb90umjn57Qef3e9OqRy3PMg+ntI+7vnxSX7apK6eLm5T0WS1+XsVDbv19/RwPNz9D9SGR9ufWn/X6kEj7wfvzk8w3IPRhzSDzc0Fb0KxcucY8FKstDevvK73Ng099GMaGAAIIIIAAAggggAACCCCAQHkUIOQuj7MawjVp24p2XW6Ug8yK6Fuvyl8hXhG2l2boSu4HZPoT92z0AtKKcP25dI364OL/Pv7KrMy+q6AHu43z1z7lQ27r5T1kYCtZQF+cW9u00PFX1ete2uZmn//s7H0DIFs3Xc2tK+RrmlXc/qYr42e8+7Fo73M2BBBAAAEEEEAAAQQQQAABBBDIDgFC7uyYh5w9C119uuCPRd7q9FlmJaJ+HV6/Il/eN11xr32UdbV4h5bHypU9zijvl5yT17du3Xr5zdyf2lZEV2qPMC9EPfbIg6xdy9ff/yw33vOQ93JTtpIFvjFtdNpfeKP3bRNt0xQzq5QnmXcHaCuWcYOv8VaMZ+Om904z862abc3qZ333gfay1rYitw5+TLp1aik9TDsQNgQQQAABBBBAAAEEEEAAAQQQyA4BQu7smIecPYsnnpshd93/uLci83rT6kN7W1eETV/SFzH/16rZUXK5eeGk35e8Ilx7Ll3jt6YtULsuN3ir7LX1R9sWjXPp9MvNub4161MZ9dgU+c70stZWHNqKRXvT+y8wzdYL1X7k95nWKB+ZVk9r1qzz7qO25l0EGtZr2xI2BBBAAAEEEEAAAQQQQAABBBDIDgFC7uyYB84CAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIA0BQu400DgEAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIDsECLmzYx44CwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIE0BAi500DjEAQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIHsECDkzo554CwQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEE0hAg5E4DjUMQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEskOAkDs75oGzQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEhDgJA7DTQOQQABBBBAAAEEEEAAAQQQQAABBBBAAAEEEMgOAULu7JgHzgIBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAgDQFC7jTQOAQBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAgOwQIubNjHjgLBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgTQECLnTQOMQBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgewQIOTOjnngLBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQTSECDkTgONQxBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSyQ4CQOzvmgbNAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQSEOAkDsNNA5BAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQyA4BQu7smAfOAgEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCANAULuNNA4BAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCA7BAi5s2MeOAsEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBNAQIudNA4xAEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACB7BAg5M6OeeAsEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBNIQIOROA41DEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBLJDgJA7O+aBs0AAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBIQ4CQOw00DkEAAQQQQAABBBBAAAEEEEAAAQQQQAABBBDIDgFC7uyYB84CAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIA0BQu400DgEAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIDsECLmzYx44CwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIE0BAi500DjEAQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIHsECDkzo554CwQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEE0hAg5E4DjUMQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEskOAkDs75oGzQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEhDgJA7DTQOQQABBBBAAAEEEEAAAQQQQAABBBBAAAEEEMgOAULu7JgHzgIBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAgDQFC7jTQOAQBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAgOwQIubNjHjgLBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgTQECLnTQOMQBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgewQIOTOjnngLBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQTSECDkTgONQxBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSyQ4CQOzvmgbNAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQSEOAkDsNNA5BAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQyA4BQu7smAfOAgEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCANAULuNNA4BAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCA7BAi5s2MeOAsEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBNAQIudNA4xAEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACB7BAg5M6OeeAsEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBNIQIOROA41DEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBLJDgJA7O+aBs0AAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBIQ4CQOw204of8tmi1hSqUQMCtQI2qMalSOSZLV6xzOxDVEbAgUKVSVGrXrCx/LVtroRolEHArEI1GZOvNq8rvS9a4HYjqCFgS2H6r6sKfXy1hUsa5wNZbVJPFy9fKhnjC+VgMgECmAnVqVZFVazbImvV5mZbieAScC2xu/ntLf7auNPcsm10B/bMWW/AChNwWzPmPBAuIlHAuQMjtnJgBLAoQclvEpJRzAUJu58QMYFmAkNsyKOWcChByO+WluGUBQm7LoJRzKkDI7Y6XkNud7aYqE3JbcCfktoBICecChNzOiRnAogAht0VMSjkXIOR2TswAlgUIuS2DUs6pACG3U16KWxYg5LYMSjmnAoTc7ngJud3ZEnI7tiXkdgxMeSsChNxWGCkSkAAhd0DQDGNFgJDbCiNFAhQg5A4Qm6EyFiDkzpiQAgEKEHIHiM1QGQsQcmdMWGoBQm53toTcjm0JuR0DU96KACG3FUaKBCRAyB0QNMNYESDktsJIkQAFCLkDxGaojAUIuTMmpECAAoTcAWIzVMYChNwZExJyuyNMqzLtStJiK3oQIbcFREo4FyDkdk7MABYFCLktYlLKuQAht3NiBrAsQMhtGZRyTgUIuZ3yUtyyACG3ZVDKORUg5HbHy0pud7abqkzIbcGdkNsCIiWcCxByOydmAIsChNwWMSnlXICQ2zkxA1gWIOS2DEo5pwKE3E55KW5ZgJDbMijlnAoQcrvjJeR2Z0vI7diWkNsxMOWtCBByW2GkSEAChNwBQTOMFQFCbiuMFAlQgJA7QGyGyliAkDtjQgoEKEDIHSA2Q2UsQMidMWGpBXI95G7X5QZpf8qxcna7pmUibWrfv1eskiNbXiyTH75N9txtxzJrZboDK7kzFTTHE3JbQKSEcwFCbufEDGBRgJDbIialnAsQcjsnZgDLAoTclkEp51SAkNspL8UtCxByWwalnFMBQm43vN/Pi8oxh1d1UzygqnO/+Z/Uq7OFbFNvyzJHJOQukyi3diDkzq35qqhnS8hdUWc+N6+bkDs3562injUhd0Wd+dy9bkLu3J27injmhNwVcdZz95oJuXN37irimRNy25/1t96Oyow3o/LgkMr2i2dpRULuLJ2YdE+LkDtdOY4LUoCQO0htxspUgJA7U0GOD1KAkDtIbcayIUDIbUORGkEJEHIHJc04NgQIuW0oUiMoAUJuu9KLFkVkyPCYVzTIkFtbghzTppc8MKCvHHHIvgUXddbFt8phB+8jl3U7Taa88p6MGT9V5v/+l9TZsrZ07thcOrU/0dt3wPAn5e/lK72/Pv1ynrz93FApHFzH43kyeMyz8uKMWbJ02QrZdadt5aqeZ8oRDfLH0n2bHnOofPbl9/LBnK9l+23ryjWXnCWNDj9Aircr+XXBQrl9yHgzzveidZsde5hc17uTVK9Wxcpk0K7EAiMhtwVESjgXIOR2TswAFgUIuS1iUsq5ACG3c2IGsCxAyG0ZlHJOBQi5nfJS3LIAIbdlUMo5FSDktsebSIiMfigm8+dH5JjGeXJuh2DblfS45j7Zcbu60q/POd5FLVy0VI5rf6lMGnurVK5cSVqde63cf0cfaXTYAfLJF9/LhX3vkfHDr5cD9tlNBo2aIJOnvysXndNKmh/fULYyIXjhkHvClDdk2EOT5LFh/WS7bbaS8RNflbFPvChvTRri1dZ9Fy35W26/5kI5eL//yLinp8vDT0+T15+5TyLRSEFP7j122UHanN/PC+Iv69ZR1qxdK9fcPlrq1tncO9bGRshtQZGQ2wIiJZwLEHI7J2YAiwKE3BYxKeVcgJDbOTEDWBYg5LYMSjmnAoTcTnkpblmAkNsyKOWcChBy2+N9/8OovDgtKnXqJKRXj7jstHV1e8WTqPT8y+/J0AcnyoxnBnl7Pzl5hjwx6TV54dE7vRXTi5f+LfW22qKg0qmd+8nZbU+Q01s38ULuV96aLdOfuLvg9wuH3GvXrZdVq9fIlpvX8n5/ybLl0qh1L6/27jtv54Xc/91zl4Kges3adXKEednkPTd0l4Zmtbf/4sm/l6+SCy4bIO+/NFKqVc1fuf2ZWTl+bu875MNpo7zAPNONkDtTQXM8IbcFREo4FyDkdk7MABYFCLktYlLKuQAht3NiBrAsQMhtGZRyTgUIuZ3yUtyyACG3ZVDKORUg5LbD+/fyiAy9PyYmC5ZuXeKy4w4J0T9rBbmtWLlaGpmWJY/ff73st/eu0uXyu+XQg/eWHue29k7joadekudeescE1CtE/9tl6d8r5Irup8u5p53khdxfffezjBl4RcEpFw65V65aIwNHPiXvfPC5rFmzTiIRMaH5cpn44C2yz3929kLuFk0aStezWxYc37Tj5dL59BZy6klHF4Tcc7/+n1w/YGyJLC8/eY9ZiV4vYzJC7owJCbktEFIiAAFC7gCQGcKaACG3NUoKBSBAyB0AMkNYFSDktspJMccChNyOgSlvVYCQ2yonxRwLEHLbAX5kfFTm/RCVww7Jk1an5HlFgw65dcye1w2WvffYSc47rbk0bmtWWj9yp+yy4zYy4YU3ZfDoZ2TkgMvloH338M6v/YU3SmsTQPsh93f/my8j77qsAKRwyH3lrSPll98WyuBbLpFt69WR5aYHuK7ULhxyt2neyKvlbxpydzvnVNP+5PCCkPt7M8Ytgx6RWVNH2IEvoQohtwVaVnJbQKSEcwFCbufEDGBRgJDbIialnAsQcjsnZgDLAoTclkEp51SAkNspL8UtCxByWwalnFMBQu7MeT+bG5FnJ8WkVq2E9OkZlyr/vD8xjJD7hVdmer2wz+nQTJ54boY8M7q/d4HX3TlGtOXIvTdd7P1vfRnk8aZfd58L2ycVcjc9va90PesUr7WJbu99OFe6XTmwSMh98P57yo2Xnev9vo7V8JQeMvTWXqK/7rcrWbdug3S8qL+8NmGQbLd1HW/fVavXmt7c66TOFvmtUDLdCLkzFTTHE3JbQKSEcwFCbufEDGBRgJDbIialnAsQcjsnZgDLAoTclkEp51SAkNspL8UtCxByWwalnFMBQu7MeFetisiQ4VFZvToi53XKkz12z1/FrVsYIbe2FdGWJfvvvZs0Obq+nH9GC+9chj/8nLz85ofyxIgbvFD5rvufkG/m/SzHm320ZYm2K9nUSu7z+twp229TV267uot8+8MvMuKR5+XtWZ/KsNt7yzFHHOS1K9E+3UNv7e21Lxn75EsybsJ0eXPiYNPCZUNByL3nbjvK6RfdLFvX21JuvfICicWicuewx2XBn4vk4fuuyWwy/jmakNsCIyG3BURKOBcg5HZOzAAWBQi5LWJSyrkAIbdzYgawLEDIbRmUck4FCLmd8lLcsgAht2VQyjkVIOTOjFdXcOtK7v33T0jHdvEixcIIufUEel0/VF5/9+Miq6WXmj7cfW8eIZ9++b1sXXdLubrnWfLX4mVyx9Dxcsn5bb2AelMh9+eml3Y/sxr8tz/+MiF2/gsmxzw+Vaa/8b6MvudK6T/wYWnborHMnP2FfPTZN7LDtnWl36XnyBHmpZO6atxfya0h968LFsptgx+T2Z9+bVa9V/b2ua53J6lbZ/PMJoOQ24qfV4SQ254lldwJEHK7s6WyfQFCbvumVHQnQMjtzpbKbgQIud24UtWNACG3G1equhEg5HbjSlU3AoTc6btqD27txV21akIu65UnNWoksiLkTv+KyseRrOS2MI+E3BYQKeFcgJDbOTEDWBQg5LaISSnnAoTczokZwLIAIbdlUMo5FSDkdspLccsChNyWQSnnVICQOz3edevEtCmJyfLlEWnbOi71DyoacGvVsFZyp3dF5ecoQm4Lc0nIbQGREs4FCLmdEzOARQFCbouYlHIuQMjtnJgBLAsQclsGpZxTAUJup7wUtyxAyG0ZlHJOBQi50+N9cVpU3v8w6vXg1l7cJW2E3OnZZnoUIXemguZ4Qm4LiJRwLkDI7ZyYASwKEHJbxKSUcwFCbufEDGBZgJDbMijlnAoQcjvlpbhlAUJuy6CUcypAyJ0676/zIzJ6bEwqVRK5tFdcatfaeBW3ViXkTt3WxhGE3BYUCbktIFLCuQAht3NiBrAoQMhtEZNSzgUIuZ0TM4BlAUJuy6CUcypAyO2Ul+KWBQi5LYNSzqkAIXdqvHHzbslhI2OyeHFEWpyUJ0c2LHkVNyF3aq429ybktqBJyG0BkRLOBQi5nRMzgEUBQm6LmJRyLkDI7ZyYASwLEHJbBqWcUwFCbqe8FLcsQMhtGZRyTgUIuVPjnfFmVN56OyrbbZOQ7t3iEomUfjwruVOztbU3IbcFSUJuC4iUcC5AyO2cmAEsChByW8SklHMBQm7nxAxgWYCQ2zIo5ZwKEHI75aW4ZQFCbsuglHMqQMidPO/CPyMyfHTMO6CnCbjrbV1ymxK/IiF38rY29yTktqBJyG0BkRLOBQi5nRMzgEUBQm6LmJRyLkDI7ZyYASwLEHJbBqWcUwFCbqe8FLcsQMhtGZRyTgUIuZPjTZg8+wETcC/4IyLHHZMnTY4rvU0JIXdypq72IuS2IEvIbQGREs4FCLmdEzOARQFCbouYlHIuQMjtnJgBLAsQclsGpZxTAUJup7wUtyxAyG0ZlHJOBQi5k+OdOSsq01+NSp06Cel1cVxi0bKPYyV32UYu9iDktqBKyG0BkRLOBQi5nRMzgEUBQm6LmJRyLkDI7ZyYASwLEHJbBqWcUwFCbqe8FLcsQMhtGZRyTgUIucvmXbIsIsOGx2TDBpFuXeKy4w6bblPiVyTkLtvWxR6E3BZUCbktIFLCuQAht3NiBrAoQMhtEZNSzgUIuZ0TM4BlAUJuy6CUcypAyO2Ul+KWBQi5LYNSzqkAIXfZvGPHxeSnnyPS8PA8OaV52W1KCLk3Nn3wiRflqckzZN36DdL0mEPlut5nS6VYfn9z2xshtwVRQm4LiJRwLkDI7ZyYASwKEHJbxKSUcwFCbufEDGBZgJDbMijlnAoQcjvlpbhlAUJuy6CUcypAyL1p3o8/icjkKTGpVSshfXrGpUqV5KeDldz5Vh/M+VpuGviQPDasn9SoXlV6XT9UTmh0iJzV9oTkMVPYk5A7BazSdiXktoBICecChNzOiRnAogAht0VMSjkXIOR2TswAlgUIuS2DUs6pACG3U16KWxYg5LYMSjmnAoTcpfOuWBmRIfdHZe3aiJzXKU/22D35VdxalZA73/bW+x6VbbeuI13Pbun97zdmzpFxT0+XR4Zc6+TeJuS2wErIbQGREs4FCLmdEzOARQFCbouYlHIuQMjtnJgBLAsQclsGpZxTAUJup7wUtyxAyG0ZlHJOBQi5S+d9+pmofPFVVA7cPyEd2sVTnoewQu6160R+/Cm5vuEpX9QmDqhaVWTXnSMb7dGl791yRusmcqJpU6LbDz8vkPMvvUvemjTE5vAFtQi5LbAScltApIRzAUJu58QMYFGAkNsiJqWcCxByOydmAMsChNyWQSnnVICQ2ykvxS0LEHJbBqWcUwFC7pJ5v/kuKo8/GZXq1bVNSZ7UqJF6aBxWyP3rbwnpP8C8JTPgbcftI9L/6kobjXp2z9uk+7mnSuOGB3q/t+CPRdLmguvl/RdHOjlDQm4LrITcFhAp4VyAkNs5MQNYFCDktohJKecChNzOiRnAsgAht2VQyjkVIOR2yktxywKE3JZBKedUgJB7Y961a0ybkuFR0XYl7dvG5aADUg+4tWpYIffCRSLjngg+5K63VUQ6n7XxyyQvvOIeOa3lcXLScYd52PN+nC/6a288O9jJvU3IbYGVkNsCIiWcCxByOydmAIsChNwWMSnlXICQ2zkxA1gWIOS2DEo5pwKE3E55KW5ZgJDbMijlnAoQcm/M+/zUmHz0ccTrwa29uNPdwgq50z1fV8fdMXS81N6splxyQVtviKmvzpLJL78rDw680smQhNwWWAm5LSBSwrkAIbdzYgawKEDIbRGTUs4FCLmdEzOAZQFCbsuglHMqQMjtlJfilgUIuS2DUs6pACF3Ud6ffo7I2HExqVJZpPclcaldK71V3FqVkDvf9uPPv5Wrbn1Axg+/XmpWrybao/vMNidI2xaNndzbhNwWWAm5LSBSwrkAIbdzYgawKEDIbRGTUs4FCLmdEzOAZQFCbsuglHMqQMjtlJfilgUIuS2DUs6pACH3v7xx827JIcNjsnRpRE5pnicND09/FTchd9Hb9uGnpsljE1+ReDxPTj7hCLmyxxmi//3kYiPktqBKyG0BkRLOBQi5nRMzgEUBQm6LmJRyLkDI7ZyYASwLEHJbBqWcUwFCbqe8FLcsQMhtGZRyTgUIuf/lfeW1qLw7Myo77JCQbhfEJZJhBstKbqe3bqnFCbktuBNyW0CkhHMBQm7nxAxgUYCQ2yImpZwLEHI7J2YAywKE3JZBKedUgJDbKS/FLQsQclsGpZxTAULufN4Fv0fkgTExL9judXFctqqTfpsSf8IIuZ3euoTcLnkJuV3qUtuWACG3LUnqBCFAyB2EMmPYEiDktiVJnaAECLmDkmYcGwKE3DYUqRGUACF3UNKMY0OAkFskz3QlGT4qJgsXRqTJsXlynPnLxkbIbUMx9Rqs5E7dbKMjCLktIFLCuQAht3NiBrAoQMhtEZNSzgUIuZ0TM4BlAUJuy6CUcypAyO2Ul+KWBQi5LYNSzqkAIbfI2+9G5bXXo1KvXkIuvigusagdckJuO46pViHkTlWshP0JuS0gUsK5ACG3c2IGsChAyG0Rk1LOBQi5nRMzgGUBQm7LoJRzKkDI7ZSX4pYFCLktg1LOqUBFD7kXLY7IsJExyTMvnezRLS7bbZt5mxJ/wgi5nd66pRYn5LbgTshtAZESzgUIuZ0TM4BFAUJui5iUci5AyO2cmAEsCxByWwalnFMBQm6nvBS3LEDIbRmUck4FKnrIPWpsTObPj8iRDfOkxUl22pQQcju9ZcssTshdJlHZOxByl23EHuELEHKHPwecQfIChNzJW7Fn+AKE3OHPAWeQmgAhd2pe7B2uACF3uP6MnpoAIXdqXuwdrkBFDrk/+CgqU1+MSq1aCbm0V1wqV7I7F6zktuuZbDVC7mSlNrEfIbcFREo4FyDkdk7MABYFCLktYlLKuQAht3NiBrAsQMhtGZRyTgUIuZ3yUtyyACG3ZVDKORWoqCH3ipURGTw0JuvWi5zXKU/22N3uKm6dNEJup7duqcUJuS24E3JbQKSEcwFCbufEDGBRgJDbIialnAsQcjsnZgDLAoTclkEp51SAkNspL8UtCxByWwalnFOBihpyj38yKt9+F5WDDkxI+zamIbeDjZDbAWoSJQm5k0AqaxdC7rKE+P1sECDkzoZZ4BySFSDkTlaK/bJBgJA7G2aBc0hFgJA7FS32DVuAkDvsGWD8VAQIuVPRYt+wBSpiyD13bkQmTIpJ9eoJubxXnlStZu9lk4Xnk5A7nLu7Qofcv/z2p9xw90Pyzfc/y/bb1pV+fc6RBgfsWepMLF22Qk4+52rp06W9nN66ScF+hNzh3LyMmpoAIXdqXuwdrgAhd7j+jJ6aACF3al7sHb4AIXf4c8AZJC9AyJ28FXuGL0DIHf4ccAbJC1S0kHvtmogMGhaV1asjclq7uBywv5uAW2eAkDv5+9DmnhU65D6vz53SpFED6dTuRJk5+wsTeI+VV5++1zScj5VofN2dY+TDT7+RC888mZDb5l1IrUAECLkDYWYQSwKE3JYgKROIACF3IMwMYlGAkNsiJqWcCxByOydmAIsChNwWMSnlXKCihdwTJ8fk088iXg9u7cXtciPkE9rP+AAAIABJREFUdqlbeu0KG3IvWvK3ND/rKpk1dbhUiuWH2h263iRXXXymHF5/n43EPpjztQwf95zsuduO5q8dCLnDuV8ZNQMBQu4M8Dg0cAFC7sDJGTADAULuDPA4NBQBQu5Q2Bk0TQFC7jThOCwUAULuUNgZNE2BihRyz/shKo+Mj0qVyiK9L4lL7VruVnHrdBByp3lTZnhYhQ25P/78O7ll0CMy+eHbCgj73jxCGjbYVzq2Oq4I6/r1G+S0bv3l3v4Xy5PPzSDkzvCm4/BwBAi5w3Fn1PQECLnTc+OocAQIucNxZ9T0BQi507fjyOAFCLmDN2fE9AUIudO348jgBSpKyG0iPRk8LCbLl0ek5cl5cvihbldxE3IHfy/7I1bYkHvm7Lky9MGJ8tQDNxXoXz9grOy1+45y7mknFZmREeMmy/oNcelzYXu5bfBjG4Xcq9aaTwwbAlkuUCkakWg0KuvMvcyGgE2BRCIikYjdJ+HRSMS0jorK2vXcrzbnilpuBMwnQKpVMf391nG/uhHOjap55r+XzL9mc2KrUbWS8OfXnJgqTtIIVKsSM38eyJNEwu6fNcB1I+Diz4VuztRN1aqm9emGeJ7EuV/dAFPVqoAuLMozP1r1ni3P26QpCXnzXZFddhLp2ysSyKXqn7XYgheosCH3nLnfyU0Dx8mUcbcXqF/ef7gcdej+0qHlsQW/9uMvv0ufG4fJhFH9par5XkNJIffSleuDnzlGRCBFAf0XWKVYlP+oTdGN3ZMQ0P/mtPxnBX0oU72qedq+moeIScwAu4QsYJ7JSO3qlWXZKv48EPJUhDq85hl6L+TCtkXNysKfX3NhpjhHFahdo7KsXL3ehIZ45ISAgz8X5sR1/3OSNatVknVmkcZ6bthcmrYKe641zENEfSCjDxLL6/bzryJDR4hol+IrLo1Iva2C+ZeJ/lmLLXiBChtyL1m2XJp27CvvPn+/VK9WxZNvcfZVcse1XaX+/nsWzMS4CdNl1KNTpHLl/KcwK1etMR+OqJzVtqlc2rWD92u/LVod/MwxIgIpCtCuJEUwdg9VgHYlofIzeIoCtCtJEYzdQxegXUnoU8AJpCBAu5IUsNg1dAHalYQ+BZxACgLlvV2JLlAfNiImixdHpMnxeXJc4+DCfHpyp3AjWty1wobcatjl8rvlsIP3ka5nt5Rpb7zvtS+Z9vjdXog99bVZcoTpz123zuZFuEtayU3IbfGOpJQzAUJuZ7QUdiBAyO0AlZLOBAi5ndFS2JEAIbcjWMo6ESDkdsJKUUcChNyOYCnrRKC8h9xvvBUV/atevYRcfFFcTNQX2EbIHRh1kYEqdMg9//e/5Lo7x8g3836RnbbfWvr37Sz77b2rB3RM294y+JZLpMEBexFyh3NvMqplAUJuy6CUcypAyO2Ul+KWBQi5LYNSzrkAIbdzYgawKEDIbRGTUs4FCLmdEzOARYHyHHIvMqu3dRW3tpPr3jUu220bTJsSf3oIuS3eqCmUqtAhdwpOm9yVldy2JKnjUoCQ26UutW0LEHLbFqWeSwFCbpe61HYhQMjtQpWargQIuV3JUteFACG3C1VquhIoryG3BtujH4rJ/PkROeqIPGneLLg2JYTcru7W5OoScifnRMhtwYkS4QoQcofrz+ipCRByp+bF3uEKEHKH68/oqQsQcqduxhHhCRByh2fPyKkLEHKnbsYR4QmU15B71vtRmfZyVLbYIiG9Lo7LP6/YCxSaldyBchcMRshtwZ2V3BYQKeFcgJDbOTEDWBQg5LaISSnnAoTczokZwLIAIbdlUMo5FSDkdspLccsChNyWQSnnVKA8htx/L4/I4GEx2bDBvIevc1x22TnYNiX+hBFyO711Sy1OyG3BnZDbAiIlnAsQcjsnZgCLAoTcFjEp5VyAkNs5MQNYFiDktgxKOacChNxOeSluWYCQ2zIo5ZwKlMeQ+5HxUZn3Q1TqH5yQtqfGnfptqjghdzj0hNwW3Am5LSBSwrkAIbdzYgawKEDIbRGTUs4FCLmdEzOAZQFCbsuglHMqQMjtlJfilgUIuS2DUs6pQHkLued8GpHnno/JZjUT0qdnnlStFs4qbp00Qm6nt26pxQm5LbgTcltApIRzAUJu58QMYFGAkNsiJqWcCxByOydmAMsChNyWQSnnVICQ2ykvxS0LEHJbBqWcU4HyFHKvWhWR+4ZFZe3aiHTskCf77xv8yyYLTxYht9Nbl5DbJS8ht0tdatsSIOS2JUmdIAQIuYNQZgxbAoTctiSpE5QAIXdQ0oxjQ4CQ24YiNYISIOQOSppxbAiUp5B7wsSYzP0iInvtmSedzgw34Na5IeS2cYemXoOV3KmbbXQEIbcFREo4FyDkdk7MABYFCLktYlLKuQAht3NiBrAsQMhtGZRyTgUIuZ3yUtyyACG3ZVDKORUoLyG39uDWXtxVKotc2jvutSsJeyPkDmcGCLktuBNyW0CkhHMBQm7nxAxgUYCQ2yImpZwLEHI7J2YAywKE3JZBKedUgJDbKS/FLQsQclsGpZxTgfIQcq9bJzJkeEyWL49Iq1Pictgh4QfcOmmE3E5v3VKLE3JbcCfktoBICecChNzOiRnAogAht0VMSjkXIOR2TswAlgUIuS2DUs6pACG3U16KWxYg5LYMSjmnAuUh5J7yYlRmfxSVXXZOSJfOcadeqRQn5E5Fy96+hNwWLAm5LSBSwrkAIbdzYgawKEDIbRGTUs4FCLmdEzOAZQFCbsuglHMqQMjtlJfilgUIuS2DUs6pQK6H3L/Oj8josTGJxkT6XBKXLTfPjlXcOmmE3E5v3VKLE3JbcCfktoBICecChNzOiRnAogAht0VMSjkXIOR2TswAlgUIuS2DUs6pACG3U16KWxYg5LYMSjmnArkccsfNou1hI2OyeHFEmjbJk2Mahf+yycKTRcjt9NYl5HbJS8jtUpfatgQIuW1JUicIAULuIJQZw5YAIbctSeoEJUDIHZQ049gQIOS2oUiNoAQIuYOSZhwbArkccr/2elTefjcq9eolpOdFcYlGbYjYq0HIbc8ylUqs5E5Fq5R9CbktIFLCuQAht3NiBrAoQMhtEZNSzgUIuZ0TM4BlAUJuy6CUcypAyO2Ul+KWBQi5LYNSzqlArobcC/+MyP2jTI8Ss3XvGpftts2eNiX+hBFyO711Sy1OyG3BnZDbAiIlnAsQcjsnZgCLAoTcFjEp5VyAkNs5MQNYFiDktgxKOacChNxOeSluWYCQ2zIo5ZwK5GLInWe6kowaE5MFf0Sk0VF50qxpdrUpIeR2esuWWZyQu0yisncg5C7biD3CFyDkDn8OOIPkBQi5k7diz/AFCLnDnwPOIDUBQu7UvNg7XAFC7nD9GT01AULu1LzYO1yBXAy5350ZlVdei8oWWySkT8+4xPIXdGfdxkrucKYklJD72x9+ld123k4qV8rSuzHFuSDkThGM3UMRIOQOhZ1B0xQg5E4TjsNCESDkDoWdQTMQIOTOAI9DAxcg5A6cnAEzECDkzgCPQwMXyLWQe8myiAy5PyZ55qWTXTrHZZeds69NiT+JhNyB387egKGE3A2adZVpj98t29TbMpyrtjwqIbdlUMo5ESDkdsJKUUcChNyOYCnrRICQ2wkrRR0KEHI7xKW0dQFCbuukFHQoQMjtEJfS1gVyLeQeOy4mP/0ckUMaJKR1S5N0Z/FGyB3O5IQSco95fKr89vtf0v3c1uUi6CbkDufmZdTUBAi5U/Ni73AFCLnD9Wf01AQIuVPzYu/wBQi5w58DziB5AULu5K3YM3wBQu7w54AzSF4gl0Lu2R9FZMqLMdmsprYpyZOq1bJ3FbfOACF38vehzT1DCbmbn3WVLFm2XFasXO21LKlUrG3J7OmjbV6j81qE3M6JGcCCACG3BURKBCZAyB0YNQNZECDktoBIiUAFCLkD5WawDAUIuTME5PBABQi5A+VmsAwFciXkXrFS25REZe3aiJx9Zp7svWd2vmyy8HQQcmd4c6Z5eCgh9+vvzTHhdqVST7lxwwPSvJxwDiPkDsedUVMTIOROzYu9wxUg5A7Xn9FTEyDkTs2LvcMXIOQOfw44g+QFCLmTt2LP8AUIucOfA84geYFcCbnHPxmVb7+Lyr775MkZHbM/4NYZIORO/j60uWcoIbd/AevXb5A//loiO25Xz+Y1BV6LkDtwcgZMQ4CQOw00DglNgJA7NHoGTkOAkDsNNA4JVYCQO1R+Bk9RgJA7RTB2D1WAkDtUfgZPUSAXQu65X0ZlwrNRqVrVtCm5JM9rV5ILGyF3OLMUSsi9es06uW3wozL11VmyIR6XL94cJ4uXLpe+Nw+Xe27oIXXrbB6ORpqjEnKnCcdhgQoQcgfKzWAZChByZwjI4YEKEHIHys1gFgQIuS0gUiIwAULuwKgZyIIAIbcFREoEJpDtIffaNaZNyfCoaXUc8V40qS+czJWNkDucmQol5L5+wFhZuGipXNy5jZx18a1eyL1q9Vq55b5HZMOGuAy8sUc4GmmOSsidJhyHBSpAyB0oN4NlKEDInSEghwcqQMgdKDeDWRAg5LaASInABAi5A6NmIAsChNwWECkRmEC2h9zPTYnJnE8issvOCenSOR6Yi42BCLltKKZeI5SQu+EpPWT6E3fLlpvXkv2O6+yF3Lr9vWKVnNLpanln8rDUryTEIwi5Q8Rn6KQFCLmTpmLHLBAg5M6CSeAUkhYg5E6aih2zRICQO0smgtNISoCQOykmdsoSAULuLJkITiMpgWwOuX/6OSJjx8VEX+fXq2fc5Ie5s4pb8Qm5k7oFre8USsh9VKue8sbEwVK1SuUiIbe2LGl2Rl+ZPX209Qt1WZCQ26UutW0JEHLbkqROEAKE3EEoM4YtAUJuW5LUCUqAkDsoacaxIUDIbUORGkEJEHIHJc04NgSyNeQ2r++TYSNisnRpRJo1zZNGR+XGyyYLzwkht407NPUaoYTcvfoNke23rSuXdTtNDjmpm7eSe8Efi+S2IY9JXl5CRt51WepXEuIRhNwh4jN00gKE3ElTsWMWCBByZ8EkcApJCxByJ03FjlkiQMidJRPBaSQlQMidFBM7ZYkAIXeWTASnkZRAtobc016NyqxZUdlum4Rc1DUu0WhSl5NVOxFyhzMdoYTcC/5cLN2uuEd+nv+n9+LJWpvVkOWmVckB/91dBt10sReA59JGyJ1Ls1Vxz5WQu+LOfS5eOSF3Ls5axT1nQu6KO/e5euWE3Lk6cxXzvAm5K+a85+pVE3Ln6sxVzPPOxpB7we8RGTk6JpGIyCUXxaXe1rnVpsS/kwi5w/lMhRJy66XG43nyyRffm6D7D6lWtYrsvMM2st/eu4ajkOGohNwZAnJ4IAKE3IEwM4glAUJuS5CUCUSAkDsQZgaxKEDIbRGTUs4FCLmdEzOARQFCbouYlHIukG0ht4kJZcSomCxcGJFjGuVJ0ya516aEkNv5bbvJAUIJuc/ofrO0anaUtGhyhNTZola4AhZGJ+S2gEgJ5wKE3M6JGcCiACG3RUxKORcg5HZOzACWBQi5LYNSzqkAIbdTXopbFiDktgxKOacC2RZyv/V2VGa8GZU6dRLSq0dcYjGnl++0OCu5nfKWWjyUkHvko8/Lq2/Nlu9/nC9HH3aAtDrxKGnSqL63ojsXN0LuXJy1infOhNwVb85z+YoJuXN59ireuRNyV7w5z/UrJuTO9RmsWOdPyF2x5jvXr5aQO9dnsGKdfzaF3IsWR2TYyJjkxUW6dYnLjjvkZpsS/w4i5A7nsxRKyO1fqvbkfvXt2Sbw/lB++HmBNG18iJza7Ghp2OC/pv+OacCTIxshd45MVAU/TULuCn4D5NjlE3Ln2IRV8NMl5K7gN0AOXj4hdw5OWgU+ZULuCjz5OXjphNw5OGkV+JSzJeROmDx79EMxmT8/Ioc1yJNWLXO3TQkhd7gfqFBDbv/S12+Iy3MvvS33jpogK1aulh23qyddz24p7U85JifCbkLucG9iRk9OgJA7OSf2yg4BQu7smAfOIjkBQu7knNgrewQIubNnLjiTsgUIucs2Yo/sESDkzp654EzKFsiWkPv9D6Ly4vSobFYzIZf2ikuV3GzyUAScldxl338u9ggt5E6YRzUfffatvPDqTHnlzQ/NTVxZWjY9Ulo3byTzf18odw9/Upoc3UCuvPgMF9dttSYht1VOijkSIOR2BEtZJwKE3E5YKepIgJDbESxlnQkQcjujpbADAUJuB6iUdCZAyO2MlsIOBLIh5P57eUSG3h+TdetFzj4zT/beM/dXcetUEXI7uGGTKBlKyD14zLMy9bVZ8tfiZSbIri+tT2okjQ4/wDSVjxacsrYv0RdUfvDSA0lcRri7EHKH68/oyQkQcifnxF7ZIUDInR3zwFkkJ0DInZwTe2WPACF39swFZ1K2ACF32UbskT0ChNzZMxecSdkC2RByPzI+KvN+iMp+++bJ6R3KR8BNyF32vedqj1BC7jN63CJtzIrtFk0ayua1aha5tvXrN0jlypUkHs+TIQ8+K5df1NHVtVurS8htjZJCDgUIuR3iUtq6ACG3dVIKOhQg5HaIS2knAoTcTlgp6kiAkNsRLGWdCBByO2GlqCOBsEPuTz+PyMTnYlK1akIu65UnNWrk9ssmC08TK7kd3bRllA0l5C7tnLQfd7Mzr5CZU4aHo5HmqITcacJxWKAChNyBcjNYhgKE3BkCcnigAoTcgXIzmAUBQm4LiJQITICQOzBqBrIgQMhtAZESgQmEGXKvWhWRIcOjsnp1RNqeGpf6B5efgFsnkJA7sNu4yEChhNy//Pan3D5kvHz57Y+yVhvv/LOtXrNW9thle3nuodvC0UhzVELuNOE4LFABQu5AuRksQwFC7gwBOTxQAULuQLkZzIIAIbcFREoEJkDIHRg1A1kQIOS2gEiJwATCDLmfnRSTz+ZGZJedE9Klczywaw5qIELuoKSLjhNKyH3hFfdIrZrVTbuSI+TGex6SO665UD79cp73Ispht/eWLTevFY5GmqMScqcJx2GBChByB8rNYBkKEHJnCMjhgQoQcgfKzWAWBAi5LSBSIjABQu7AqBnIggAhtwVESgQmEFbIrT24tRd3pUoivXrGTQZYvlZx6wQScgd2GxcZKJSQ+7AWF8mbE4dIzRrVpHGbXvLO5GHeSb3y1mx5c+YcueParuFopDkqIXeacBwWqAAhd6DcDJahACF3hoAcHqgAIXeg3AxmQYCQ2wIiJQITIOQOjJqBLAgQcltApERgAmGE3OvWiWlTEpPlyyPS/MQ8OerI8vOyycITR8gd2G0cfsh9ZMuL5eWnBkrtzWrIse36yEvjB3iB9/oNcTm2bW+Z+QI9ucO5HRi1PAsQcpfn2S1/10bIXf7mtDxfESF3eZ7d8nlthNzlc17L61URcpfXmS2f10XIXT7ntbxeVRgh99RpUfngw6hst01CuneLSyRSPnUJucOZ11BWcve6fqisNC+ZHHpbb7ny1gfMzb2VdGp/onys7UoemiRvTRoSjkaao7KSO004DgtUgJA7UG4Gy1CAkDtDQA4PVICQO1BuBrMgQMhtAZESgQkQcgdGzUAWBAi5LSBSIjCBoEPuX+dHZPTYmESjIj1NwF1v6/LXpsSfPELuwG7jIgOFEnL/tXiZ3Dnscbn5ivPl9z8XS/er75UF5u9Vq1SWGy8/T9o0bxSORpqjEnKnCcdhgQoQcgfKzWAZChByZwjI4YEKEHIHys1gFgQIuS0gUiIwAULuwKgZyIIAIbcFREoEJhBkyB0375YcNjImixdH5NjGeXLC8eWzTQkhd2C3b4kDhRJyFz+TDeZu/2PhEtlqy9pSrWqVcEXSGJ2QOw00DglcgJA7cHIGzECAkDsDPA4NXICQO3ByBsxQgJA7Q0AOD1SAkDtQbgbLUICQO0NADg9UIMiQe8YbUXnrnajUqZOQXj3iEosFeqmBD8ZK7sDJvQGzIuT2L/3Pv5bKhVfcI1PG3R6ORpqjEnKnCcdhgQoQcgfKzWAZChByZwjI4YEKEHIHys1gFgQIuS0gUiIwAULuwKgZyIIAIbcFREoEJhBUyL3wz4gMHx2TPLN4u1uXuOy4Q/ltU+JPHiF3YLdxkYGyKuSe//tf0uyMK+SLN8eFo5HmqITcacJxWKAChNyBcjNYhgKE3BkCcnigAoTcgXIzmAUBQm4LiJQITICQOzBqBrIgQMhtAZESgQkEEXJrsD1qTEwW/BGRww/Nk5Ynl+82JYTcgd2+JQ5EyG3Bn5DbAiIlnAsQcjsnZgCLAoTcFjEp5VyAkNs5MQNYFiDktgxKOacChNxOeSluWYCQ2zIo5ZwKBBFyz5wVlemvRqVWrYT06RmXKrnXoTitOWAld1psGR9EyJ0xoQghtwVESjgXIOR2TswAFgUIuS1iUsq5ACG3c2IGsCxAyG0ZlHJOBQi5nfJS3LIAIbdlUMo5FXAdci9ZFpFhw2OyYYPIeZ3yZI/dK8Yqbp00Qm6nt26pxQm5LbgTcltApIRzAUJu58QMYFGAkNsiJqWcCxByOydmAMsChNyWQSnnVICQ2ykvxS0LEHJbBqWcUwHXIffYcTH56eeI7L9/Qjq2izu9lmwrTsgdzowEGnIf2rzbJq8yYXrPr1m7jp7c4dwLjFrOBQi5y/kEl7PLI+QuZxNazi+HkLucT3A5vDxC7nI4qeX4kgi5y/HklsNLI+Quh5Naji/JZcj98ZyITH4hJtWra5uSPKlRo/y/bLLwrULIHc4HJ9CQ+6UZ7yd1lSef0DCp/bJlJ1ZyZ8tMcB6bEiDk5v7IJQFC7lyaLc6VkJt7INcECLlzbcYq9vkSclfs+c+1qyfkzrUZq9jn6yrkXrEyIkPuj8ratRFp1yYuBx9YsQJuvasIucP5bAUacodziaWP+stvf8oNdz8k33z/s2y/bV3p1+ccaXDAnhsdMO+n36T/wHHyzbyfZZu6W8oVPc6QY488qGA/Qu5sm1nOpyQBQm7ui1wSIOTOpdniXAm5uQdyTYCQO9dmrGKfLyF3xZ7/XLt6Qu5cm7GKfb6uQu6nnonKl19FvR7c2ou7Im6E3OHMeoUOuc/rc6c0adRAOrU7UWbO/sIE3mPl1afvlcqVYkVm49TO/aT9KcfIuR2ayXsfzpXLbrpf3n5umFSvlv9aWELucG5eRk1NgJA7NS/2DleAkDtcf0ZPTYCQOzUv9g5fgJA7/DngDJIXIORO3oo9wxcg5A5/DjiD5AVchNzffBeVx5+MSqVKIpf2ikvtWhVvFbfOACF38vehzT0rbMi9aMnf0vysq2TW1OFSKZYfanfoepNcdfGZcnj9fQqMN8TjMunFt6XtyccUhN+Hn9xdnh1zs+y8wzaE3DbvRmo5FSDkdspLccsChNyWQSnnVICQ2ykvxR0IEHI7QKWkMwFCbme0FHYgQMjtAJWSzgRsh9xr15g2JcOjou1KWpyUJ0c2rJiruAm5nd2yZRausCH3x59/J7cMekQmP3xbAVLfm0dIwwb7SsdWx5UK9/lXP0ifG4fJK08NLAjHf1+8pkxodkAgbIHqVaNSxXxLYdnK9WGfCuOXM4GEJCRi/s/mVrlSRGrVqCyL/15nsyy1EHAiEIlGpF7tKvLn0rVO6lM0RwQiZqVSwu7PQldXvm2dasKfX13pUte2QN3Nq8qSFeskHq+YqwFte7qu5+LPhemcs1mrJuvXR2T9BpEN/l/m1zaY/xTK/9+R/L+b+2qd92sRievfveMSRY/z9jW/5v2++ed/9tPjvfr+/zbj6fG77pyQ1qcmZKstuWfTmTuOCU6gds1K3mdg1Rpz41rYJr8QldkfR2THHRJyUZc8ieTGH4ssXPnGJfTPWmzBC1TYkHvm7Lky9MGJ8tQDNxWoXz9grOy1+45y7mknlTgTvy5YKF2vGGh6d3eSRocfULBPXoJ/eQV/6zJiqgJeCGn+P8H9miod+5choH8wqhSz+ycYvV/1D0X8fOX2ywUBvfsj5oblfs2F2XJ3jhqAVDIP6HJhi3K/5sI0cY7/COj9qn9+5b+4cuOW8P9cmGcWcGoArMGw/5cfFHu/boJj7+/r8gNlb5/Cf1+XVxAgawid//v5QbN/fH5gnR9SFz5ew+ewN+2A2vyEiJzcLOa1bWBDIBsF/IVKNn7CfjcvIfcMi0s0KnLztZVkm3rZeMXBnZP+u4steIEKG3LPmfud3GReJjll3O0F6pf3Hy5HHbq/dGh57EYz8c28X6TPDcPk6kvOlOOPql/k9+nJHfyNy4ipC9CuJHUzjghPgHYl4dkzcuoCtCtJ3YwjwhWgXUm4/oyemgDtSlLzSnbv3xZEZKVpKaBB8Xpd4Vx4RbIG0OvzClY8F6yGLrSCOe6H1Hqs+eKdrmD2V0hr4JwNW9WqCdNyVLyQOVYpf1FGpcryz6/l/17MPJysZH7P369SZf3n/H7CRX5dj/f2NceYEK+0rVqVSjL9tbj88GN+wLXFFmZVd8uE9wI+NgSyTcBWuxL97A8dEZOlSyNy/HEJOf4YOyvDs80rlfOhJ3cqWvb2rbAh95Jly6Vpx77y7vP3F7xAssXZV8kd13aV+vvvWUT4l9/+9FZw6+81OKDo7+mOhNz2bkgquRMg5HZnS2X7AoTc9k2p6E6AkNudLZXdCBByu3GlqhsBQm67rqtXR2TqtIh8PncTSa2lIf2gWP9e2axs1gDZD4o1QM7//fy/vJDZBNC6AtoLl6uYoDn27z5+CF1Z9ykIrf859p/gWl+1lR9SW7qANMr4Pbk/mJOQadOjsnJVfti97z550vLkhGy2Gd9JSIOVQxwJ2Aq5X341Iu/NislWW+VJn5480NHpIuR2dNOWUbbChtzq0uXyu+Wwg/eRrme3lGlvvO+1L5n2+N0SM49mp742S44w/bnr1tlcOl96l5zZpomcdNzhJXIScodz8zJqagKE3Kl5sXe4AoTc4fozemoChNypebFacF0xAAAgAElEQVR3+AKE3OHPAWeQvAAhd/JWZe351VdRmfKiWcH9T/C6i+kdXc2sdvaCZhMUa0isAXFlDaP1nyvriuaEWe2s/5wfPutq6H//+Z9gWlc4ayCtYbM5rmq1ihvkFn7x5BrzEr5XZkRk9kf5DxSqVElI0yYJOeJwQsCy7lV+PxgBGyG3fivkgTHmg2+2bl3iXj9uNkLusO6BCh1yz//9L7nuzjGirUh22n5r6d+3s+y3967eXBzTtrcMvuUS2brulnLSmVeaf5EXfRw88MYe0rTxId6+hNxh3b6Mm4oAIXcqWuwbtgAhd9gzwPipCBByp6LFvtkgQMidDbPAOSQrQMidrFTp+2moPdWE21+YkFu32rUT0qFNQnbdlbA1c92iFQqH3P7vaAj43PNR+ePP/FXd22ydkLat82T77QgDbftTLzWBTENu7b0/fFRMFi6MSEPz8OaU5vxM8WeAldyp3Yu29q7QIbctREJuW5LUcSlAyO1Sl9q2BQi5bYtSz6UAIbdLXWq7ECDkdqFKTVcChNyZyc79wgTcL0VllWlTotthh+TJSc3ypIpZdc1mX6CkkFtHMe9OlfdnR2WGWdm9dl3+C9YPqZ8nzU7MX03PhkAYApmG3G+/G5XXXo9KrVoJubRXPNRWQWH4bWpMQu5wZoSQ24I7IbcFREo4FyDkdk7MABYFCLktYlLKuQAht3NiBrAsQMhtGZRyTgUIudPjXWFeKvn8CxH55tv81dv6AsQObfNk550IVNMTTe6o0kJu/+gVKyLy0stR0YcPutWskZDmzRJy0IGsgE1OmL1sCmQSci9aHJFhI2OSZ94xeV6nPF6uWmxiCLlt3qnJ1yLkTt6q1D0JuS0gUsK5ACG3c2IGsChAyG0Rk1LOBQi5nRMzgGUBQm7LoJRzKkDInTrvp59FTZAaEX3JpK4YPvxQXTGcxyrL1ClTPqKskNsv+MP/ot5DiCVL88PunXbUFiYJqWte3MeGQFACmYTco8bGZP78iBx0QELatzVJN1sRAULucG4IQm4L7oTcFhAp4VyAkNs5MQNYFCDktohJKecChNzOiRnAsgAht2VQyjkVIOROnldXb0+aHJHv5+Wv3q6zZULatWH1dvKCme+ZbMitI8VNLvjOezF5652I989RM21HHxWX44/JfxkoGwKuBdINuT8wL1Od+mJUqldPSJ+eeVLDfCOBragAIXc4dwQhtwV3Qm4LiJRwLkDI7ZyYASwKEHJbxKSUcwFCbufEDGBZgJDbMijlnAoQcifH+/GnUZlmVm+vXZO/evvIhnnStEkeYWlyfNb2SiXk9gddsiwik5+PyP9+/PfFoK1bJWTPPVjVbW1iKFSiQDoh99/LIzL0/pisWy/SoV1cDtyfgLskXELucD50hNwW3Am5LSBSwrkAIbdzYgawKEDIbRGTUs4FCLmdEzOAZQFCbsuglHMqQMi9ad7lJnCaaFZva/sL3bYy7S7at0nIjjsQPDm9MUspnk7I7Zf64suIvDg9Ktq3W7d99smTVi0S3kv92BBwIZBOyP3I+KjM+yHq9eDWXtxsJQsQcodzZxByW3An5LaASAnnAoTczokZwKIAIbdFTEo5FyDkdk7MAJYFCLktg1LOqQAhd+m8sz+OysuvmNXb6/JXb2urixOOS0gs5nRKKL4JgUxCbi27bp3Ia69H5f0Po5Iw2XblyiJNzJwe2TDutTNhQ8CmQKoh9+dzI/LMpJhUMfdl70viUpsHMKVOByG3zTs1+VqE3MlblbonIbcFREo4FyDkdk7MABYFCLktYlLKuQAht3NiBrAsQMhtGZRyTgUIuTfmXWbaW0ycHJUff8pf8Vuvnnnxm+m9vf12rPh1ejMmUTzTkNsf4o8/TQuTKVGZ/9s/c1w3T9qZF1PuwAr9JGaBXZIVSCXk1lZIg4ZFvRfantI8TxoeziruTTkTcid7F9rdj5DbgichtwVESjgXIOR2TswAFgUIuS1iUsq5ACG3c2IGsCxAyG0ZlHJOBQi5/+XVlb0fzo7KK69FvX64urK38dH5LyqMsnrb6X2YbHFbIbeOp/P90Ryd74isMQGjbg0OzpPmzRJSrRoPNJKdE/YrXSCVkHvS5Jh88lnEe9DS7YK49+0RttIFCLnDuTsIuS24E3JbQKSEcwFCbufEDGBRgJDbIialnAsQcjsnZgDLAoTclkEp51SAkDufV19OOHFiVH7+NT9Z2mZrs3q7bZ5suw1hp9MbMMXiNkNuf+hVqyIy/f/buw8wqarzj+PvzNKrIqCA0mxEsSGIIigCChil2k3EqNgCFmx/xRZRsUYQWyJGTDTRKE2UjmADsdcEogIKiIqAoCBtZ/7n3HGXZZVl9s77XmZ2v/d5fDRy73vufM7ZjfObM++dFpP33cNF/VG1alK6HpuUQw5KEDSWcn44fWuBdENu34Pb9+L2H6wNvDhfdqnD753trSVC7u0J2fw5IbeCKyG3AiIlzAUIuc2JGUBRgJBbEZNS5gKE3ObEDKAsQMitDEo5U4HyHnL73by+P/M0t3t702a3e9vt2O7YISkd2udLHj2aTddemOIWIXfBffgPOMaOj8mKFamJb9QwKb17JqS+a1fDgUAYgXRCbv97Z9iIPPEPue3UMSEdj6JNSTrWhNzpKOmfQ8itYErIrYBICXMBQm5zYgZQFCDkVsSklLkAIbc5MQMoCxByK4NSzlSgPIfcK1e6h7yNdX2Zl6Z2b/ue231dqFnP7eLmyE4By5Dbv+J8ly/OmZMnM1+OpT70cHm3fyilfzilf0glBwKlEUgn5J44OS5vvBkPev9ffAEfrqXrS8idrpTueYTcCp6E3AqIlDAXIOQ2J2YARQFCbkVMSpkLEHKbEzOAsgAhtzIo5UwFymPI7Xdvz34jT6a/FJP8fAl2bPsQs73rv00fXNPllnFx65C74Ab9w0fHT4jJZ66NhD9q1Uq6hwEm5Tct2GWb8SSWowLbC7mXuA/Y/vpYquH/RefnS4Pd+IAt3eVByJ2ulO55hNwKnoTcCoiUMBcg5DYnZgBFAUJuRUxKmQsQcpsTM4CyACG3MijlTAXKW8j9nWtF8dyYmHy1bMvu7ZP6JKXuLoSXpgtNqXhUIXfB7c6bH5cXJ8fEh97+2Kt5QnqemJTatQkjlaa0TJcpKeT23xoY8VCe+G+UHNE2Id278juoNIuBkLs0WnrnEnIrWBJyKyBSwlyAkNucmAEUBQi5FTEpZS5AyG1OzADKAoTcyqCUMxUoLyG337392ut58tIst3vbZUl5bvNkl05JaXc4u7dNF5hy8ahDbn/7m13bkhlu3cxxu/8Tbu1UrOD6th/l1k47WksoT2+ZK1dSyP3Sy3GZ5f6qWTMplw3MD9YVR/oChNzpW2meScitoEnIrYBICXMBQm5zYgZQFCDkVsSklLkAIbc5MQMoCxByK4NSzlSgPITcy7+Nyejx8cLd240aJeXk3gmpU4fduKaLy6D4jgi5C16G/xaAfzDlYveASn/s4nb/9+6ZlMa7s44MprpMlNxWyO1/Jz3419SHJv1+l5A93TcEOEonQMhdOi+tswm5FSQJuRUQKWEuQMhtTswAigKE3IqYlDIXIOQ2J2YAZQFCbmVQypkKlOWQ2+/YftXt3p71SkwSrve23yl5bJeEtG2ToPe26aqyK74jQ+6CV/X+B3GZPDUm635Khd0HHehaTRyXlGrVCLvtZj43K/9ayO2/VfKIC7iXfROTgw9KSp+e7pcTR6kFCLlLTaZyASG3AiMhtwIiJcwFCLnNiRlAUYCQWxGTUuYChNzmxAygLEDIrQxKOVOBshpyf+0CpNFj4/KN2zHpD7/btm/fhOxML2XT9WRdPBtCbv8a16+PyZTpMXnn3dSDKatUScpxnZNyaCs+QLFeA7lU/9dC7jlz4zJpSlyqVk3KoIEJqezWDkfpBQi5S2+mcQUht4IiIbcCIiXMBQi5zYkZQFGAkFsRk1LmAoTc5sQMoCxAyK0MSjlTgbIWcvsd2zPdzm2/g9u3AqhUUeQ4t3u7TWvCR9OFFFHxbAm5C17u0qUxGeNamCz/LhV2N9jN7cztlZBd6xNcRrQksnqY4iH3mh9iMmxEXtDn/ZQ++dKyJesk7AQScoeVy+w6Qu7M/IKrCbkVEClhLkDIbU7MAIoChNyKmJQyFyDkNidmAGUBQm5lUMqZCpSlkPurZW739ri4LF+e2r3dtInbve0Cx9rs3jZdQ1EWz7aQ2792/2HK3LfiMmNmTDZujAWtcHxLnC6dElKpUpQ6jJVtAsVD7ieejMvnC+JBD27fi5sjvAAhd3i7TK4k5M5E7+drCbkVEClhLkDIbU7MAIoChNyKmJQyFyDkNidmAGUBQm5lUMqZCpSFkDvf7d6eMSsmr8/OE9/vtnKlpHR1PZJbu9YRHGVLIBtD7gLhH3+MyYuTYvLJf1O7umvUSMpvuyVk//3YrVu2VmH6r6ZoyP3+B37Xf17w7ZJLBuRLrZqsi/Qlf3kmIXcmeuGvJeQOb1d4JSG3AiIlzAUIuc2JGUBRgJBbEZNS5gKE3ObEDKAsQMitDEo5U4FcD7mXuHYRo8fFZMWKVLDYvFnC7d5OSk0CJNN1s6OKZ3PIXWCyYGFcxk2Iyfffp75R0KxpQnr1TNIPfkctmh04bkHIvXxlvtw3Ii4bNsTkhN8m5LBD+QAu02kh5M5UMNz1hNzh3La6ipBbAZES5gKE3ObEDKAoQMitiEkpcwFCbnNiBlAWIORWBqWcqUCuhty+p61vDzH7jZ93b7uHt3XvmpRWBxEemS6YHVw8F0JuT+S/XfDyz73h892SzMsTObpDUjocmR/8M0f5ECgIuf/2VFI++SQmjRol5YJz3eLgyFiAkDtjwlAFCLlDsW19ESG3AiIlzAUIuc2JGUBRgJBbEZNS5gKE3ObEDKAsQMitDEo5U4FcDLn97u3nxsRl5arUTtm99ky4h/0lpUZ1vv5vuliyoHiuhNwFVH6Njh0fly++TK3VnXdKPZiySWPWahYsJ/Nb8CH3h/8ReeyJpMTdhxsDL8qXXeow9xrwhNwaiqWvQchderNfXEHIrYBICXMBQm5zYgZQFCDkVsSklLkAIbc5MQMoCxByK4NSzlQgl0LuTW739rTp8eAhf773dtWqSTne7d4+6EB2b5sukiwqnmshdwHdR24X76QpcfF9u/1xwP5u7XZPSPVqBJ5ZtLzUb6VqhYpyy10J+cHNe+djEm43P7+rtJAJubUkS1eHkLt0Xr96NiG3AiIlzAUIuc2JGUBRgJBbEZNS5gKE3ObEDKAsQMitDEo5U4FcCbkXuZ2wY8bFC/sct9g3IT1OYPe26eLIwuK5GnJ7yvWuH/P0l2LypvuQxh+VKyelyzFJaXsYwWcWLjWVW5o8pYLMniuya/2k/PFC2pSooP5chJBbUzP9WoTc6Vtt80xCbgVESpgLEHKbEzOAogAhtyImpcwFCLnNiRlAWYCQWxmUcqYC2R5yb9wkMnlqXN5+JxUMVnG9t084PikHtiQYNF0YWVo8l0PuAtJvvnUtTJ6Py1dfpXZ116+XlN49E9KoIbu6s3TZpX1bS5bEZNGXcVmwMClfLo7Jxo2pOb6wf740bMD8pg2ZxomE3GkgGZxCyK2ASsitgEgJcwFCbnNiBlAUIORWxKSUuQAhtzkxAygLEHIrg1LOVCCbQ+4FC+Nu93ZM1vxAiwfTRZBDxctCyF3A7dvuTJuxJQg9rHVCunROShW3w5sjNwR8kP3FFy7UXuRD7bhsch/KFT2aNRU5rLXI/vu5XkscqgKE3KqcaRcj5E6batsnEnIrIFLCXICQ25yYARQFCLkVMSllLkDIbU7MAMoChNzKoJQzFcjGkHuD2/04aUpM3n0vtXu7muu93atHUnyLEo7yLVCWQm4/k2vXpdb6hx+l1np19/DU7q7PPN9UyL51nu+6jSx2D731ofZCF2ov9qF2kew6zz1YcvdGSWnWNOH+iknjPRJSp1ZF2ZyflLXrCbm1Z5SQW1s0vXqE3Ok5lXgWIbcCIiXMBQi5zYkZQFGAkFsRk1LmAoTc5sQMoCxAyK0MSjlTgWwLuT9fEJfRbvd24QP6XFuSE7ong4dMciBQ1kLughn94uee86u+T31roUnjVAuTOjuz7nfUqg9CbRdk+13ai1ywvdi1IvH/ruCoUEFkj91TgXbTJgn3z0nxQXfRo3Z1Qm6r+SPktpItuS4ht4I7IbcCIiXMBQi5zYkZQFGAkFsRk1LmAoTc5sQMoCxAyK0MSjlTgWwJudevT+1ofe+D1I7WGm5Ha++eSdl7L3Zvmy6AHCteVkNuPw0+QH19Tp7MeiUmm93G37gLTDu0y5eOR/0yPM2xacuJ2/XmvuXIQtdP24faS9yu7fwiv34q+lDb7c5u1uznUNv1UPdzVNJByG039YTcdrYlVSbkVnAn5FZApIS5ACG3OTEDKAoQcitiUspcgJDbnJgBlAUIuZVBKWcqkA0h96efxWXseLd7e21qF+vBByXkeNeywT9kkgOBogJlOeQueJ2rV8dk/ISYfOa+1eCPnXZy7XpOTErzZnzgo/nT4FuN+DB70Rcu1F4Ul6XuQaCJIsSVKkrQcqRgp3ZD14okLzUlaR+E3GlTlfpEQu5Sk6lcQMitwEjIrYBICXMBQm5zYgZQFCDkVsSklLkAIbc5MQMoCxByK4NSzlRgR4bcfvf2C5O29COuUSMpfXslZc/mhHmmk57DxctDyF0wPf+dF5cXJ7sHr65JffizXwvXuuf4pPifE47SC2zcKPLFl6l+2gtduL1s2dahduVKSbdT23+YkNqp3bCB26ldylC7+F0Rcpd+ntK9gpA7XSnd8wi5FTwJuRUQKWEuQMhtTswAigKE3IqYlDIXIOQ2J2YAZQFCbmVQypkK7KiQe978uDz/wpbd260OSQQP3PNBEwcC2xIoTyG3N9i0SeSlWTGZMzcv2GVcyf18dO6YlLaHJTIOYMv6Klu/wT8kMuZCbb9jOybLvo5Jssivl8qVk0Hv88Kd2i7UjqU+T1A7CLnVKH9RiJDbzrakyoTcCu6E3AqIlDAXIOQ2J2YARQFCbkVMSpkLEHKbEzOAsgAhtzIo5UwFog65f/opJhNejMvH/0mlSbVqJeUkt3u7aVN2b5tOdBkpXt5C7oJp+3Z5zLX0SbXU8Ee9ugnp43rWN3ItNDhSAhvcN0MWLNoSan/z7dahtm9/5EPt5k3dgz2bJKXBbvqhdvG5IOS2W52E3Ha2JVUm5FZwJ+RWQKSEuQAhtzkxAygKEHIrYlLKXICQ25yYAZQFCLmVQSlnKhBlyO2D7RdcwL3OBd3+aHNoQroelxDf+5YDgXQEymvI7W38LmT/YNYp02LiPyzyx6Gt3M9Ql/LZv94bLPw51PZ/9x8EFD2qVnWhtguzfajd1P191/r2oXbxNUzInc5PdbhzCLnDuWV6FSF3poLuekJuBURKmAsQcpsTM4CiACG3IialzAUIuc2JGUBZgJBbGZRypgJRhNxr18Vk3PMxmf+/LQ/S69OD3dumE1tGi5fnkLtgSn0v+8lTY/Lu+6mfp2ouzO12XDJ4YGtZPta53yMLFoksXBhz7UdEln+3dcNs79CkqQu1m7hQ2/3dh9o7+iDktpsBQm4725IqE3IruBNyKyBSwlyAkNucmAEUBQi5FTEpZS5AyG1OzADKAoTcyqCUMxWwDrk//Ng9PM89XLJg52nbNgk57tiEVKxg+rIoXkYFCLm3TOzSpTEZMz5WGPbusXtSersWJnV3KRth949rXajtAu1UT22RFSu2DrVrVPc7tUWauV3aPtSuX2/Hh9rFf+wIue1+ERFy29kSchvbEnIbA1NeRYCQW4WRIhEJEHJHBM0wKgKE3CqMFIlQgJA7QmyGyljAKuT2AdWYcTH57PNUMFVn56T06ZWQxntkXxCVMSIFIhMg5N6a2j+Mcs7cuMycFZeN7iGVcffj1u6IfOl0dFIq5NgHST/8UDTUjsnKVVu3H6lRwz0k0ofaLtD24bbvS57tByG33QwRctvZEnIb2xJyGwNTXkWAkFuFkSIRCRByRwTNMCoChNwqjBSJUICQO0JshspYwCLkft/1DZ44JSa+rULM5VRHtE1Il06JnAvdMsalgLoAIfevk/qA+AX3jYn/zkt9qOQf6NrzxKTsvWf2BsGrvvdtR1I7tb9wPbVXrd461K5Vs0hPbRds71In9z4gI+RW/xVQWJCQ286WkNvYlpDbGJjyKgKE3CqMFIlIgJA7ImiGUREg5FZhpEiEAoTcEWIzVMYCmiG3D9pGu93bCxamgrZdXNuEvr2Ssnuj3AunMoalgIkAIXfJrJ+6b06MnxCTNWtSgXGLfRNy4vFJqekC4x19rFzpAm0XavvWIz7cXl0s1K5d27Udca1HmjVNPSjSf/sj1w9CbrsZJOS2sy2pMj25FdwJuRUQKWEuQMhtTswAigKE3IqYlDIXIOQ2J2YAZQFCbmVQypkKaIXc774Xl0lu9/aGjand20e2y5fOHZOSl2d6+xQvZwKE3Nuf8M2bRWa9EpPX5uRJIl+kYkWRY1z7kiMOz5e8rdtab79YBmd853poB4H2IvewSBdq+w/Bih47u1DbPygyCLXd3/3/LmsHIbfdjBJy29kSchvbEnIbA1NeRYCQW4WRIhEJEHJHBM0wKgKE3CqMFIlQgJA7QmyGylgg05Db78Yc+/yW3dv13MPf+rre2w0blL3AKmNsCmQsQMidPqHvaT12fFy++DIVLvtvVvgHUzZ2D6i0OL5dXtB+JLVbe63ry1/02HknH2inQu3mzbJjd7mFQ9GahNx2woTcdrYlVWYnt4I7IbcCIiXMBQi5zYkZQFGAkFsRk1LmAoTc5sQMoCxAyK0MSjlTgUxC7jffjsvU6THZ6HZv+wfedXC7t/2O0Ti7t03nrDwXJ+Qu/ex/+FFcJk+NiX8YrD8OOSghXY9NSrVq4cPupLvUh9p+h3ZBT+11P20davse2k1c25HmzVKhdo3q4ccr/avOjisIue3mgZDbzpaQ29iWkNsYmPIqAoTcKowUiUiAkDsiaIZRESDkVmGkSIQChNwRYjNUxgJhQm7/gLix43wrglSotWt9t3u7d0J227X8hVgZTwAFSiVAyF0qrsKT12+IyfQZMXnrnbj4gLpq1aR07ZKUQw5OBO2Ftnf4a77+1u3Qdg+I9O1HFrnd4T8VC7Xrup3iTZtKsFvbB9vVMwjRt3c/ufLnhNx2M0XIbWdbUmV2ciu4E3IrIFLCXICQ25yYARQFCLkVMSllLkDIbU7MAMoChNzKoJQzFShNyO2Drrlu9/a06XHZtEmCHdsdOySlQ/toe/2aglA8qwUIuTObnq+WpVqYfOMCa380aph0LUwSUt+1GSp6+J91f67/ICsItRe7fvvrt07DfWuiZsGDIlMtSDLZGZ7Zq8reqwm57eaGkNvOtqTKhNwK7oTcCoiUMBcg5DYnZgBFAUJuRUxKmQsQcpsTM4CyACG3MijlTAXSDblXrvy5v68Lu/zhe273deFYPbeLmwOBqAQIuXWk33grLjPczm7/oFh/HNE2IfvuI7J0qW9BknR9vFMfZBU9/Dc2mv7cfsT/3e8G5yhZgJDbboUQctvZllSZkFvBnZBbAZES5gKE3ObEDKAoQMitiEkpcwFCbnNiBlAWIORWBqWcqcD2Qm6/o3PO3LhMfykumzeL5Lne2506JqX9kflptTkwvXmKlzsBQm69KV+7LiYTJ8Xlo09+2a/EtzApCLWbNXU9td1u7cpVCLVLq0/IXVqx9M8n5E7fSvNMQm4FTUJuBURKmAsQcpsTM4CiACG3IialzAUIuc2JGUBZgJBbGZRypgIlhdzLv4vL2OdjsmTJz60NGiWlT8+k1KubML0niiOwLQFCbv21sWBhXCa8GJPKlX0/bddXu0kseGBklcqE2plqE3JnKrjt6wm57WxLqkzIreBOyK2ASAlzAUJuc2IGUBQg5FbEpJS5ACG3OTEDKAsQciuDUs5U4NdCbr97+/XZeTJjVkzy893ubdd7u0unpLQ7nN3bppNB8e0KEHJvl4gTskiAkNtuMgi57WwJuTO0XfzVt3LDXX+T+Z99KQ13qyuDL/29tDpg78KqhNwZAnN5JAKE3JEwM4iSACG3EiRlIhEg5I6EmUEUBQi5FTEpZS5QPORe7h5IN9o9mM4/dM4fjdzu7ZN7J6ROHXZ1mk8GA2xXgJB7u0SckEUChNx2k0HIbWdLyJ2hbb9Lh0qn9q3kd32Oldlvf+IC78dk2jP3SsUKbsuAOwi5MwTm8kgECLkjYWYQJQFCbiVIykQiQMgdCTODKAoQcitiUspcoCDk3rApKa+53dszX45Jwu3erlhB5NguCWnbJkHvbfNZYIB0BQi505XivGwQIOS2mwVCbjtbQu4MbFesWiPdzrha5rzwoFTw34Nzx0n9b5KrLz5dDjukBSF3BrZcGq0AIXe03oyWmQAhd2Z+XB2tACF3tN6MlrkAIXfmhlSITsCH3PM+3yj/ejYm37hd3P5ovHtS+vZNyM612b0d3UwwUjoChNzpKHFOtggQctvNBCG3nS0hdwa27370qdzy5ydk3OO3Fla54k8PSdtW+8kpJ3Yk5M7AlkujFSDkjtab0TITIOTOzI+roxUg5I7Wm9EyFyDkztyQCtEJvP9eZRkzwW3ddkelilt2b0d3B4yEQPoChNzpW3Hmjhcg5LabA0JuO1tC7gxsZ7/9sdw/crQ8/chNhVWuv/Mx2af57nLWyV3lvEs3ZVCdSxFAAAEEEEAAAQQQQAABBLYn8Jt9YnL26RVklzrbO5M/RwABBBBAAIHyKBBLuqM8vvB0X/N7H38qN90zSp4fdVvhJYNuflDatW4pJ51wNCF3upA5fF4F1+8vzBFLfZuyVEeIS1L1Q1wY4pLIxwpjGMYiJGGk/R9DWZRq9W05OSNnhgAAABjdSURBVMxYYa4JeXs54R7q5yvURe7HP8R1IS6J/Oc/zM9y2NcVxjDK+4tyrFAWIX+YY2EGCznJYS4Lc3shKUL9HIf9/64w6ykYKwxiSJAwY4W+vRAXhrgk1H+rhXUPdX8hF1TYsUo7x21bx+XodvGQK4rLEEAAAQQQQKA8CBByb2eWV63+QbqccoW8Nv4BqVqlUnB29zOvltuv7S+HtNw7+N88eLI8/Kjk/mukXUnuz2F5egW0KylPs537r5V2Jbk/h+XtFdCupLzNeG6/3oIHT27OZ29Wbs9k+bh72pWUj3kuK6+SdiV2M0m7EjvbkioTcqfhfu6gu6TNwS2k/5knyKSZc4P2JZOeukvy8lK7CQi500DklB0uQMi9w6eAGyiFACF3KbA4dYcLEHLv8CngBkopQMhdSjBO36EChNw7lJ/BSylAyF1KME7foQKE3Hb8hNx2toTcGdou/fo7uW7oozL/88WyR8P6cvMVZ8v++zYtrErInSEwl0ciQMgdCTODKAkQcitBUiYSAULuSJgZRFGAkFsRk1LmAoTc5sQMoChAyK2ISSlzAUJuO2JCbjtbQm5jW0JuY2DKqwgQcqswUiQiAULuiKAZRkWAkFuFkSIRChByR4jNUBkLEHJnTEiBCAUIuSPEZqiMBQi5MybcZgFCbjtbQm5jW0JuY2DKqwgQcqswUiQiAULuiKAZRkWAkFuFkSIRChByR4jNUBkLEHJnTEiBCAUIuSPEZqiMBQi5MyYk5LYjDFWZntyh2La+iJBbAZES5gKE3ObEDKAoQMitiEkpcwFCbnNiBlAWIORWBqWcqQAhtykvxZUFCLmVQSlnKkDIbcfLTm4725IqE3IruBNyKyBSwlyAkNucmAEUBQi5FTEpZS5AyG1OzADKAoTcyqCUMxUg5DblpbiyACG3MijlTAUIue14CbntbAm5jW0JuY2BKa8iQMitwkiRiAQIuSOCZhgVAUJuFUaKRChAyB0hNkNlLEDInTEhBSIUIOSOEJuhMhYg5M6YcJsFCLntbAm5d4wtoyKAAAIIIIAAAggggAACCCCAAAIIIIAAAggYC9CuxBiY8ggggAACCCCAAAIIIIAAAggggAACCCCAAAJ2AoTcdrZURgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEDAWIOQ2BqY8AggggAACCCCAAAIIIIAAAggggAACCCCAgJ0AIXdI25H/fFGeHjdDNm7aLF2Oai3XXXKmVMjLC1mNyxCwFXjoifHy7ISZsmHjJunQ9kC5adDZUq1qZdtBqY5AhgJvvjdP/nD5HTLh70OleeMGGVbjcgRsBJZ9s0KuHfqofDxvgezRsL4Mve58abFXY5vBqIpAhgLzPvtShtz3d1mxao1Ur1ZFrrjwFGnXumWGVbkcAT2Bzfn5MuzR5+TxpyfJa+NHyM61axYW5/2XnjOV9ARemD5H/nTvKLn1mvOka8c2hYV5/6VnTCU9gW2t14IReP+lZ02lHSNAyB3C3f/g33TP3+QfIwYHQeHA6++Xzu0PlTN6dw5RjUsQsBWYMustGT7yORk17FqpUb2qW6/D5dAD95WL+/W0HZjqCGQgsNF9IHP6xUNk+YrvZdTwawm5M7DkUluB3w+8XTodeYj87qTj5IVps+Wt9+fJ7df2tx2U6giEFOjR7zq5+Oxe0u2Yw+Tj+QvlvCvulpeevY8PvkN6cpm+wMDBw4MPCh/5x/Pyytj7C0Nu3n/pW1Mxc4FR/54s73wwP/jv1T+cdnxhyM37r8xtqaAvsK31WjAS77/0zakYvQAhdwhzvwNmt/p1pP+ZJwRXz5z9nox6ZrI84YIYDgSyTeCjeQtl06ZN0uqAfYJbe+LZKfKf+YvkzusvyLZb5X4QKBR48PGxkkyKTH3lbRl2ywBCbtZGVgosWbZczrrkdpn+zJ8lHo9l5T1yUwgUCCTdL9UDOp0jr40bITvVrhH863Y9/ihPPnA9v2NZJlkj4L9t4EPuAzr9YauQm/dfWTNF3EgRAb9e991zj+ADw1N6HFMYcvP+i2WSjQLbWq8F98r7r2ycNe6ptAKE3KUVc+efe8VdclrPTnKsa1PijwVfLpM/XHaHvDxmeIhqXIJAtAIXXvNnOabdwXKqW8McCGSjwKLFX8ulN46Qf//lZjn5/JsJubNxkrinQGDGq+/Kk2OmBm1KZr/9iTR2f7/+8rMIDFkfWStwzuV3Bm32/LcP/bcOBt8xUiY+dSct97J2xsrvjRUPuXn/VX7XQi688nMH3bVVyF38nnn/lQuzWH7u8dfWK++/ys/8l/VXSsgdYobP/OOtcuFZPYLexv7w/Th7nXO9zH3x4RDVuASB6AQeGjVO3nZfqfvrPVfyhjY6dkYqpYAPYfw3ZY5ovb/0OHswIXcp/Tg9OoGxk14N+hs/dMfl0vaQ3wTflBk36TUZ9/it0d0EIyFQCoHPFi6VfpcNlUR+Qta7tlD33nRx0G6HA4FsEygecvP+K9tmiPspKlBSyM37L9ZKtgn82nrl/Ve2zRL3E1aAkDuE3HlX3i0nn9Cx8OtIny9aKv7fzXxuWIhqXIKAvYD/ivLQEU+J/4R2+JBLpGqVSvaDMgICIQTGTX5N5rgdsQXtdAi5QyBySWQC0199Rx52D/YdPfKWYMx8Fxwe2rW/zBo9vLAdRGQ3w0AIbEfAP3za9+S+cVA/ObJNy+C/CfpdOtS1KxkcfBuBA4FsEigecvP+K5tmh3spLvBroSHvv1gn2SpQfL3y/itbZ4r7CiNAyB1C7fb7n5RaNarLgHN6B1e/MG2OjJvymoy856oQ1bgEAXuBux78l3y9fFUQHFaskGc/ICMgEFLAP8j33Y/+J3nxeFDh+zU/Ss0a1eQ298T6jq7NDgcC2STgexsOcA9Jm/7MvcFt+ZC71XH95bXxI4J1y4FANgn49eq/Mj9r9JZNGf2vvEd6HNdOTnR/cSCQTQLFQ27ef2XT7HAvxQV+LeTm/RfrJFsFiq9X3n9l60xxX2EECLlDqPkA5uohj8iTD14v1atWCXp0n96rs/Tu3iFENS5BwFbA99y844F/ytMP3ygVK1awHYzqCCgLsJNbGZRy6gK9Xbuys07uKr26tZcnR0+TCdNmB/3kORDINoE1P66TzicPkr/dd40c0KKZLF/xvfQ+5wZ51LUw+83eTbLtdrmfci5QPOTm/Vc5XxBZ/vKLh4a8/8ryCSvnt7e9HvK8/yrnCyTHXz4hd8gJfPzpSfKP0VODXVvHdz5crrroNInHYyGrcRkCdgLX3v6ovDB9tuTlbdnBvVfTRvLco3+yG5TKCCgJ8B9ZSpCUMRP4cum3MujmB2XJsuXif7cOufocada4gdl4FEYgE4GZs9+T+0eOlvUbNrr/bo3LWScdx4OoMwHlWlWB71f/KB1PuiyouWnT5sLNGf7bMnXr1Bbef6lyU0xB4KT+N8lnrnXp5s35wbcQYy4PuHPw+TJr9ge8/1LwpYSuwLbWa9eOh201EO+/dN2pFq0AIXe03oyGAAIIIIAAAggggAACCCCAAAIIIIAAAgggoChAyK2ISSkEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBaAUIuaP1ZjQEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABRQFCbkVMSiGAAAIIIIAAAggggAACCCCAAAIIIIAAAghEK0DIHa03oyGAAAIIIIAAAggggAACCCCAAAIIIIAAAggoChByK2JSCgEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCBaAULuaL0ZDQEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQEBRgJBbEZNSCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAtEKEHJH681oCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAooChNyKmJRCAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQiFaAkDtab0ZDAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQUBQg5FbEpBQCCCCAAAIIIIAAAggggAACCCCAAAIIIIBAtAKE3NF6MxoCCCCAAAIIIIAAAggggAACCCCAAAIIIICAogAhtyImpRBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSiFSDkjtab0RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQUBQi5FTEphQACCCCAAAIIIIAAAggggAACCCCAAAIIIBCtACF3tN6MhgACCCCAAAIIIIAAAggggAACCCCAAAIIIKAoQMitiEkpBBBAAAEEEEAAgbIlMHTEU/LN8lUy7JYBoV5Yn3NvkL6/PVrO7NPlF9ev+XGdHHHCxTLu8Vtl13p1Cv9572a7S0nXhboRLkIAAQQQQAABBBBAoAwLEHKX4cnlpSGAAAIIIIAAAuVF4LuVq+XoPpdu9XJ3ql1DDtpvT7nyotOkeeMGoSgsQ+7N+fny7oefSssWzcT/c0Hg7UPuj+cvlHp1dnLh987y+aKl8rUL2o9s0zLUa+AiBBBAAAEEEEAAAQTKugAhd1mfYV4fAggggAACCCBQDgQKQu6H77hc9tlzj+AVL1+xWh4aNU4+XbhEJjwxVKpWqVRqCcuQu+jNFN3V7UPuosfwkaNl48ZNctXFp5X6/rkAAQQQQAABBBBAAIHyIEDIXR5mmdeIAAIIIIAAAgiUcYGCkPuZv9wkLfdtVvhqv1/9oxzZc4CMGvZ/0ubgFtL3vBvl+M6Hy7hJr8pezRrJfX8aIG+9P0/ufeQZ+fyLZVK3Ti3p0qG1XNb/JMnLi0tByL1b/ToyZuIrUr1aFTmjdxfpf+YJwRj5+QkZ9uhz8uKMOeLHarrHbnL1H0+Xw1vtF/y5bzvS5ajW8uF/PpM335snDXerK/834Axpf9gBkk67klXf/yB/efJ5icfiUq/uTtL9mLby7kf/k6cevL7wNb79wXw578q75ZWx90utGtXK+Ezz8hBAAAEEEEAAAQQQ+KUAITerAgEEEEAAAQQQQCDnBbYVcq/7aYO06X6BjLznKjmi9f5y2kW3yOo1a+WmK/rJ/vs0Ff/nXc+4Sq675HfSu1t7F3R/JedfdY+cc/rxcvYp3YKQe6wLxC88q4eccuIx8s6H/5MBg4fJw3cMCoLqfz8/U0b8bYz8Y8RgabDrLvLk6Gny2D9flJfHDJeKFSsEIfeKVWvktv87Tw7efy8Z9cxkefyZSfLSs/dJLB5Lqyf3wMHDpXGjXYOd3P7+evS7Tib/8y7Zo2H9YN78PX797UoZPmRgzs8jLwABBBBAAAEEEEAAgTAChNxh1LgGAQQQQAABBBBAIKsEfi3kXvfT+mCX9cQZc2XSU3dKTbfL2Yfcvh3IkKvPCe5/pAukJ854Q8Y8NqTw9fhr/O5uv1vaB8gzX39PpvzrbonFYsE5Z11yu7TYq3EQjG9wbUT8ODvXrhn82arVP0j7ngNlwt+HBn3Afcj9m72bBCG3P9Zv2CiHu4dN3n3DhdLW7fZO58GTRUNuX+PUC/4kR7c7WC7u1zOo2fnkQXLtJWe6HeiHZtWccDMIIIAAAggggAACCEQlQMgdlTTjIIAAAggggAACCJgJFITclStVlLjbIe2Pn9ZvlGYuaL7lqnOk1QF7B//Oh9yd27cqbDdy8z2jZO1PP7nQ+aLCexs/5XW576/PyqzRw4KQe+GXy+Svd19Z+OfX3v5ocM39Qy6RtevWyz0PPy2vvvmRrHfj+Rx8pWsxMnrkLUEQ7kPu7p3aFo7ni3Q5ZZCcfWp36dH1yFAh97/GzZB/PDdVJj55p3z03wXBznPfqsTvHOdAAAEEEEAAAQQQQKA8ChByl8dZ5zUjgAACCCCAAAJlTKAg5PbB897NGwWvbie3u7p4j2ofcnc9uo384bTuwTnbCrnvfPCfMvv5B4OQ+6uvv5MRt126Vci9OX9zEIxfNeRhWfzVchl2ywDZrV4d+eHHdcFO7aIhdy/XBuWsk7sWXu9D7vN/30O6HXNYqJB79Q9r5eg+l8qTDwyWKTPfkh/XrnPtV84uYzPKy0EAAQQQQAABBBBAIH0BQu70rTgTAQQQQAABBBBAIEsFttWTu/jtFg+5H/vXRJkwdbaMe/zWwlN9u5I33v2PPP3wjUHIPfutj4P2IwWHb1dy4H57ypUXnipdTr1C+p/xWzm1Z6fgj1935/qd1UVD7oNb7i03Xn5W8Oe+vUnb317kdoEPFP/vw7Qr8XUuv+mBoE/3xJfmyp2Dz3c71ffJ0pnhthBAAAEEEEAAAQQQsBcg5LY3ZgQEEEAAAQQQQAABY4GwIfe3330fPHjy+kt/Lz27HSnzP1ssF1x9rww8p3cQXPuQe8zEV+SaP57h/ry9vPPBfOl/1d1uF/X1cpALuvtdOlQa7lpXbr3mXPnfgsXy0BPj5ZU5H7id35fIUYcfFLQr8X26/Q5z377Eh+qj/j05aIWycdPmtELuK295WDa5c4e4MWpWrxr0Bp81+3255ra/SK2a1WVqkX7hxsyURwABBBBAAAEEEEAgKwUIubNyWrgpBBBAAAEEEEAAgdIIhA25/Rg+MH7g8bHyxZKvpd4uO8kZvbvImX26BGHykPv+7h4suUGqV6siE6bNlqpVKsvZp3RzPbW7Bbf30byFMnjoo/LVN9+5EDv1gMlHn3pBJs+c6/p4X+XaoTwuvbt3kNlvfyLvfDhfGu1WVwZf9ns53D10co1rbZLOTu5X3vjAtUV5RPLicXl5zPCg9/bm/Hw5pu9lcvKJHeWSc/uWhopzEUAAAQQQQAABBBAocwKE3GVuSnlBCCCAAAIIIIAAAmVdwD/c8ljXKmX8qNtk9wb1yvrL5fUhgAACCCCAAAIIIFCiACE3CwQBBBBAAAEEEEAAgRwRSCSSssY9ePKGux4LdpXfdcOFOXLn3CYCCCCAAAIIIIAAAnYChNx2tlRGAAEEEEAAAQQQQEBV4NW5H8qA64bLEa33dw+cvEBq16quWp9iCCCAAAIIIIAAAgjkogAhdy7OGveMAAIIIIAAAggggAACCCCAAAIIIIAAAgggEAgQcrMQEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBHJWgJA7Z6eOG0cAAQQQQAABBBBAAAEEEEAAAQQQQAABBBAg5GYNIIAAAggggAACCCCAAAIIIIAAAggggAACCOSsACF3zk4dN44AAggggAACCCCAAAIIIIAAAggggAACCCBAyM0aQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEMhZAULunJ06bhwBBBBAAAEEEEAAAQQQQAABBBBAAAEEEECAkJs1gAACCCCAAAIIIIAAAggggAACCCCAAAIIIJCzAoTcOTt13DgCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAITdrAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCBnBQi5c3bquHEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABQm7WAAIIIIAAAggggAACCCCAAAIIIIAAAggggEDOChBy5+zUceMIIIAAAggggAACCCCAAAIIIIAAAggggAAChNysAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIGcFSDkztmp48YRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEECLlZAwgggAACCCCAAAIIIIAAAggggAACCCCAAAI5K0DInbNTx40jgAACCCCAAAIIIIAAAggggAACCCCAAAIIEHKzBhBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQRyVoCQO2enjhtHAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQIORmDSCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjkrAAhd85OHTeOAAIIIIAAAggggAACCCCAAAIIIIAAAgggQMjNGkAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBDIWQFC7pydOm4cAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAgJCbNYAAAggggAACCCCAAAIIIIAAAggggAACCCCQswKE3Dk7ddw4AggggAACCCCAAAIIIIAAAggggAACCCCAACE3awABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAgZwUIuXN26rhxBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAUJu1gACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAzgoQcufs1HHjCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAoTcrAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBnBUg5M7ZqePGEUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBAi5WQMIIIAAAggggAACCCCAAAIIIIAAAggggAACOStAyJ2zU8eNI4AAAggggAACCCCAAAIIIIAAAggggAACCBByswYQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEclaAkDtnp44bRwABBBBAAAEEEEAAAQQQQAABBBBAAAEEECDkZg0ggAACCCCAAAIIIIAAAggggAACCCCAAAII5KzA/wNse4q2W6G5SAAAAABJRU5ErkJggg==", - "text/html": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "import plotly.express as px\n", - "\n", - "mary_token = model.tokenizer(\" Mary\").input_ids[0] # token id for Mary\n", - "\n", - "px.line(\n", - " [layer_probs.value.squeeze()[mary_token].item() for layer_probs in probs],\n", - " title=\"Probability of Mary after each layer, according to logit lens\",\n", - " labels={\"value\":\"Layer\", \"index\":\"Probability\"}\n", - ")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.18" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/src/nnsight/toolbox/optim/__init__.py b/src/nnsight/toolbox/optim/__init__.py deleted file mode 100644 index d040e01a..00000000 --- a/src/nnsight/toolbox/optim/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ - - -from abc import abstractmethod -from typing import Any - - -class Optimization: - - @abstractmethod - def parameters(self): - pass - - @abstractmethod - def __call__(self) -> Any: - pass \ No newline at end of file diff --git a/src/nnsight/toolbox/optim/lora.py b/src/nnsight/toolbox/optim/lora.py deleted file mode 100644 index bb96322b..00000000 --- a/src/nnsight/toolbox/optim/lora.py +++ /dev/null @@ -1,23 +0,0 @@ -from typing import Any -import torch - -from ...module import Module -from . import Optimization - - -class LORA(Optimization): - def __init__(self, module: Module, r: int) -> None: - self.module = module - self.r = r - - self.WA = torch.nn.Parameter(torch.empty(self.module.input_shape[0][-1], self.r), requires_grad=True) - self.WB = torch.nn.Parameter(torch.empty(self.r, self.module.output_shape[-1]), requires_grad=True) - - def __call__(self, alpha:float=1.0) -> Any: - - inp = self.module.input[0] - - self.module.output = (torch.matmul(torch.matmul(inp, self.WA), self.WB) + self.module.output) * alpha - - def parameters(self): - return [self.WA, self.WB] diff --git a/src/nnsight/toolbox/optim/softprompt.py b/src/nnsight/toolbox/optim/softprompt.py deleted file mode 100644 index bacd779e..00000000 --- a/src/nnsight/toolbox/optim/softprompt.py +++ /dev/null @@ -1,21 +0,0 @@ -from typing import Any -import torch - -from ...module import Module -from . import Optimization - - -class SoftPrompt(Optimization): - def __init__(self, module: Module, n: int) -> None: - self.module = module - self.n = n - - self.embedding = torch.nn.Parameter( - torch.zeros((self.n, self.module.embedding_dim)), requires_grad=True - ) - - def __call__(self) -> Any: - self.module.output = self.embedding[:] - - def parameters(self): - return [self.embedding] diff --git a/src/nnsight/tracing/Bridge.py b/src/nnsight/tracing/Bridge.py deleted file mode 100755 index 1923af9d..00000000 --- a/src/nnsight/tracing/Bridge.py +++ /dev/null @@ -1,99 +0,0 @@ -from collections import OrderedDict, defaultdict -from typing import TYPE_CHECKING, Dict, List, Optional - -from . import protocols - -if TYPE_CHECKING: - from ..intervention import InterventionProxy - from .Graph import Graph - from .Node import Node - - -class Bridge: - """A Bridge object collects and tracks multiple Graphs in order to facilitate interaction between them. - The order in which Graphs added matters as Graphs can only get values from previous Graphs/ - - Attributes: - id_to_graph (Dict[int, Graph]): Mapping of graph id to Graph. - graph_stack (List[Graph]): Stack of visited Intervention Graphs. - bridged_nodes (defaultdict[Node, defaultdict[int, Optional[InterventionProxy]]]): Mapping of bridged Nodes - to the BridgeProtocol nodes representing them on different graphs. - locks (int): Count of how many entities are depending on ties between graphs not to be released. - """ - - def __init__(self) -> None: - - # Mapping fro Graph if to Graph. - self.id_to_graph: Dict[int, "Graph"] = OrderedDict() - # Stack to keep track of most inner current graph - self.graph_stack: List["Graph"] = list() - self.bridged_nodes: defaultdict["Node", defaultdict[int, "InterventionProxy"]] = defaultdict(lambda: defaultdict(lambda: None)) - - self.locks = 0 - - @property - def release(self) -> bool: - - return not self.locks - - def add(self, graph: "Graph") -> None: - """Adds Graph to Bridge. - - Args: - graph (Graph): Graph to add. - """ - - protocols.BridgeProtocol.set_bridge(graph, self) - - self.id_to_graph[graph.id] = graph - - self.graph_stack.append(graph) - - def peek_graph(self) -> "Graph": - """Gets the current hierarchical Graph in the Bridge. - - Returns: - Graph: Graph of current context. - - """ - return self.graph_stack[-1] - - def pop_graph(self) -> None: - """Pops the last Graph in the graph stack.""" - - self.graph_stack.pop() - - def get_graph(self, id: int) -> "Graph": - """Returns graph from Bridge given the Graph's id. - - Args: - id (int): Id of Graph to get. - - Returns: - Graph: Graph. - """ - - return self.id_to_graph[id] - - def add_bridge_proxy(self, node: "Node", bridge_proxy: "Node") -> None: - """ Adds a BridgeProtocol Proxy to the bridged nodes attribute. - - Args: - - node (Node): Bridged Node. - - bridge_proxy (Node): BridgeProtocol node proxy corresponding to the bridged node. - """ - - self.bridged_nodes[node][bridge_proxy.node.graph.id] = bridge_proxy - - def get_bridge_proxy(self, node: "Node", graph_id: int) -> Optional["InterventionProxy"]: - """ Check if the argument Node is bridged within the specified graph and returns its corresponding BridgeProtocol node proxy. - - Args: - - node (Node): Node. - - graph_id (int): Graph id. - - Returns: - Optional[InterventionProxy]: BridgeProtocol node proxy if it exists. - """ - - return self.bridged_nodes[node][graph_id] diff --git a/src/nnsight/tracing/Graph.py b/src/nnsight/tracing/Graph.py deleted file mode 100755 index 0fcf7a12..00000000 --- a/src/nnsight/tracing/Graph.py +++ /dev/null @@ -1,278 +0,0 @@ -from __future__ import annotations - -import inspect -import tempfile -from typing import Callable, Dict, Optional, Type - -from PIL import Image as PILImage - -from .. import util -from ..util import apply -from .Node import Node -from .protocols import EarlyStopProtocol, Protocol -from .Proxy import Proxy -from .util import validate - - -class Graph: - """Represents a computation graph composed of :class:`Nodes `. - - - Attributes: - nodes (Dict[str, :class:`Node `]): Mapping of `Node` name to node. Order is preserved and important when executing the graph sequentially. - attachments (Dict[str, Any]): Dictionary object used to add extra functionality to this Graph. Used by Protocols. - proxy_class (Type[class:`Proxy `]): Proxy class to use. Defaults to class:`Proxy `. - alive (bool): If this Graph should be considered alive (still tracing), and therefore added to. Used by `Node`s. - name_idx (Dict[str, int]): Mapping of node target_name to number of previous names with the same target_name. Used so names are unique. - - validate (bool): If to execute nodes as they are added with their proxy values in order to check if the executions are possible and create a new proxy_value. Defaults to True. - - When adding `Node`s to the `Graph`, if the `Graph`'s validate attribute is set to `True`, \ - it will execute the `Node`'s target with its arguments' `.proxy_value` attributes (essentially executing the Node, with FakeTensors in FakeTensorMode). - This 1.) checks to see of the operation is valid on the tensor shape's within the `.proxy_value`s (this would catch an indexing error) and \ - 2.) populating this new `Node`'s `.proxy_value` attribute with the result. - - sequential (bool): If to run nodes sequentially when executing this graph. - - When this is set to `True`, `Node`s attempt to be executed in the order they were added to the `Graph` when calling `.execute(). \ - Otherwise, all nodes are checked to be fulfilled (they have no dependencies). These are root nodes and they are then executed in the order they were added. - - - """ - - def __init__( - self, - proxy_class: Type[Proxy] = Proxy, - validate: bool = False, - sequential: bool = True, - graph_id: int = None, - ) -> None: - - self.id = graph_id or id(self) - - self.proxy_class = proxy_class - self.validate = validate - self.sequential = sequential - - self.alive = True - - self.nodes: Dict[str, Node] = dict() - self.name_idx: Dict[str, int] = dict() - - self.attachments = dict() - - def reset(self) -> None: - """Resets the Graph to prepare for a new execution of the Graph. - Calls `.reset()` on all Nodes. - """ - - # Reset Nodes individually. - for node in self.nodes.values(): - node.reset() - - def execute(self) -> None: - """Executes operations of `Graph`. - - Executes all `Node`s sequentially if `Graph.sequential`. Otherwise execute only root `Node`s sequentially. - """ - - if self.sequential: - is_stopped_early: bool = False - early_stop_execption: Optional[ - EarlyStopProtocol.EarlyStopException - ] = None - for node in self.nodes.values(): - if not is_stopped_early: - if node.fulfilled(): - try: - node.execute() - except EarlyStopProtocol.EarlyStopException as e: - is_stopped_early = True - early_stop_execption = e - continue - else: - node.clean() - if is_stopped_early: - raise early_stop_execption - else: - - root_nodes = [ - node for node in self.nodes.values() if node.fulfilled() - ] - - for node in root_nodes: - node.execute() - - def create(self, *args, **kwargs) -> Proxy: - """Creates a Node directly on this `Graph` and returns its `Proxy`. - - Returns: - Proxy: `Proxy` for newly created `Node`. - """ - - return self.proxy_class(Node(*args, graph=self, **kwargs)) - - def add(self, node: Node) -> None: - """Adds a Node to this Graph. Called by Nodes on __init__. - - When adding `Node`s to the `Graph`, if the `Graph`'s validate attribute is set to `True`, \ - it will execute the `Node`'s target with its arguments' `.proxy_value` attributes (essentially executing the Node, with FakeTensors in FakeTensorMode). - This 1.) checks to see of the operation is valid on the tensor shape's within the `.proxy_value`s (this would catch an indexing error) and \ - 2.) populating this new `Node`'s `.proxy_value` attribute with the result. - - - Args: - node (Node): Node to add. - """ - - # If we're validating and the user did not provide a proxy_value, execute the given target with meta proxy values to compute new proxy_value. - if self.validate and node.proxy_value is inspect._empty: - - node.proxy_value = validate(node.target, *node.args, **node.kwargs) - - # Get name of target. - name = ( - node.target - if isinstance(node.target, str) - else node.target.__name__ - ) - - # Init name_idx tracker for this Node's name if not already added. - if name not in self.name_idx: - self.name_idx[name] = 0 - - # If Node's name is not set, set it to the name_idxed version. - if node.name is None: - node.name = f"{name}_{self.name_idx[name]}" - - # Increment name_idx for name. - self.name_idx[name] += 1 - - # Add Node. - self.nodes[node.name] = node - - def copy(self): - """Copy constructs a new Graph and then recursively - creates new Nodes on the graph. - """ - new_graph = Graph( - validate=self.validate, - sequential=self.sequential, - proxy_class=self.proxy_class, - ) - - def compile(graph: Graph, old_node: Node): - if old_node.name in graph.nodes: - return graph.nodes[old_node.name] - - node = graph.create( - target=old_node.target, - name=old_node.name, - proxy_value=None, - args=apply(old_node.args, lambda x: compile(graph, x), Node), - kwargs=apply( - old_node.kwargs, lambda x: compile(graph, x), Node - ), - ).node - - if isinstance(node.target, type) and issubclass( - node.target, Protocol - ): - node.target.compile(node) - - return node - - # To preserve order - nodes = {} - - for node in self.nodes.values(): - - compile(new_graph, node) - - # To preserve order - nodes[node.name] = new_graph.nodes[node.name] - - # To preserve order - new_graph.nodes = nodes - - return new_graph - - def vis( - self, - title: str = "graph", - path: str = ".", - display: bool = True, - save: bool = False, - recursive: bool = False, - ): - """Generates and saves a graphical visualization of the Intervention Graph using the pygraphviz library. - Args: - title (str): Name of the Intervention Graph. Defaults to "graph". - path (str): Directory path to save the graphic in. If None saves content to the current directory. - display (bool): If True, shows the graph image. - save (bool): If True, saves the graph to the specified path. - recursive (bool): If True, recursively visualize sub-graphs. - """ - - try: - - import pygraphviz as pgv - - except Exception as e: - - raise type(e)( - "Visualization of the Graph requires `pygraphviz` which requires `graphviz` to be installed on your machine." - ) from e - - from IPython.display import Image - from IPython.display import display as IDisplay - - graph: pgv.AGraph = pgv.AGraph(strict=True, directed=True) - - graph.graph_attr.update( - label=title, fontsize="20", labelloc="t", labeljust="c" - ) - - for node in self.nodes.values(): - # draw bottom up - if len(node.listeners) == 0: - node.visualize(graph, recursive) - - def display_graph(file_name): - in_notebook = True - - # Credit: Till Hoffmann - https://stackoverflow.com/a/22424821 - try: - from IPython import get_ipython - - if "IPKernelApp" not in get_ipython().config: - in_notebook = False - except ImportError: - in_notebook = False - except AttributeError: - in_notebook = False - - if in_notebook: - IDisplay(Image(filename=file_name)) - else: - img = PILImage.open(file_name) - img.show() - img.close() - - if not save: - with tempfile.NamedTemporaryFile(suffix=".png") as temp_file: - graph.draw(temp_file.name, prog="dot") - if display: - display_graph(temp_file.name) - else: - graph.draw(f"{path}/{title}.png", prog="dot") - if display: - display_graph(f"{path}/{title}.png") - - def __str__(self) -> str: - result = "" - - for name, node in self.nodes.items(): - result += f" %{node}\n" - - return result diff --git a/src/nnsight/tracing/Node.py b/src/nnsight/tracing/Node.py deleted file mode 100755 index 73b253e7..00000000 --- a/src/nnsight/tracing/Node.py +++ /dev/null @@ -1,570 +0,0 @@ -from __future__ import annotations - -import inspect -import weakref -from collections import defaultdict -from collections.abc import Iterable -from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union - -import torch - -from .. import util -from ..logger import logger -from . import protocols -from .Proxy import Proxy - -if TYPE_CHECKING: - from .Graph import Graph - - try: - from pygraphviz import AGraph - except: - pass - - -class Node: - """A Node represents some action that should be carried out during execution of a Graph. - - The class represents the operations (and the resulting output of said operations) they are tracing AND nodes that actually execute the operations when executing the Graph. The Nodes you are Tracing are the same object as the ones that are executed. - - * Nodes have a ``.proxy_value`` attribute that are a result of the tracing operation, and are FakeTensors allowing you to view the shape and datatypes of the actual resulting value that will be populated when the node' operation is executed. - * Nodes carry out their operation in ``.execute()`` where their arguments are pre-processed and their value is set in ``.set_value()``. - * Arguments passed to the node are other nodes, where a bi-directional dependency graph is formed. During execution pre-processing, the arguments that are nodes and converted to their value. - * Nodes are responsible for updating their listeners that one of their dependencies are completed, and if all are completed that they should execute. Similarly, nodes must inform their dependencies when one of their listeners has ceased "listening." If the node has no listeners, it's value is destroyed by calling ``.destroy()`` in order to free memory. When re-executing the same graph and therefore the same nodes, the remaining listeners and dependencies are reset on each node. - - Attributes: - name (str): Unique name of node. - graph (Graph): Weak reference to parent Graph object. - proxy (Proxy): Weak reference to Proxy created from this Node. - proxy_value (Any): Fake Tensor version of value. Used when graph has validate = True to check of Node actions are possible. - target (Union[Callable, str]): Function to execute or name of Protocol. - args (List[Any], optional): Positional arguments. Defaults to None. - kwargs (Dict[str, Any], optional): Keyword arguments. Defaults to None. - listeners (List[Node]): Nodes that depend on this node. - arg_dependencies (List[Node]): Nodes that this node depends on. - cond_dependency (Optional[Node]): ConditionalProtocol node if this node was defined within a Conditional context. - value (Any): Actual value to be populated during execution. - """ - - def __init__( - self, - target: Union[Callable, str], - graph: "Graph" = None, - proxy_value: Any = inspect._empty, - args: List[Any] = None, - kwargs: Dict[str, Any] = None, - name: str = None, - ) -> None: - super().__init__() - - if args is None: - args = list() - if kwargs is None: - kwargs = dict() - - args = list(args) - - self.graph: "Graph" = graph - self.proxy_value = proxy_value - self.target = target - self.args, self.kwargs = args, kwargs - - self.proxy: Optional[Proxy] = None - - self._value: Any = inspect._empty - - self.listeners: List[Node] = list() - self.arg_dependencies: List[Node] = list() - self.cond_dependency: Optional[Node] = None - - self.remaining_listeners = 0 - self.remaining_dependencies = 0 - - # Preprocess args. - self.preprocess() - - # Node.graph is a weak reference to avoid reference loops. - self.graph = ( - weakref.proxy(self.graph) if self.graph is not None else None - ) - - self.name: str = name - - # If theres an alive Graph, add it. - if self.attached(): - - self.graph.add(self) - - def preprocess(self) -> None: - """Preprocess Node.args and Node.kwargs.""" - - # bridge graph redirection - if self.attached(): - self.graph = ( - protocols.BridgeProtocol.peek_graph(self.graph) - if ( - self.target.redirect - if isinstance(self.target, type) - and issubclass(self.target, protocols.Protocol) - else True - ) - else self.graph - ) - - def preprocess_node(node: Union[Node, Proxy]): - - if isinstance(node, Proxy): - - node = node.node - - if node.done(): - - return node.value - - if self.attached() and self.graph.id != node.graph.id: - - node = protocols.BridgeProtocol.add(node).node - - self.arg_dependencies.append(node) - # Weakref so no reference loop - node.listeners.append(weakref.proxy(self)) - - return node - - self.args, self.kwargs = util.apply( - (self.args, self.kwargs), preprocess_node, (Node, Proxy) - ) - - # conditional context handling - if ( - self.attached() - and protocols.ConditionalProtocol.has_conditional(self.graph) - and ( - self.target.condition - if isinstance(self.target, type) - and issubclass(self.target, protocols.Protocol) - else True - ) - ): - - conditional_node = protocols.ConditionalProtocol.peek_conditional( - self.graph - ) - - # only the top dependency needs to add the Conditional as a dependency - # if none of the dependent are dependent on the Conditional, then add it - if conditional_node: - if all( - [ - not protocols.ConditionalProtocol.is_node_conditioned( - arg - ) - for arg in self.arg_dependencies - ] - ): - self.cond_dependency = conditional_node - conditional_node.listeners.append(weakref.proxy(self)) - - protocols.ConditionalProtocol.add_conditioned_node(self) - - @property - def value(self) -> Any: - """Property to return the value of this node. - - Returns: - Any: The stored value of the node, populated during execution of the model. - - Raises: - ValueError: If the underlying ._value is inspect._empty (therefore never set or destroyed). - """ - - if not self.done(): - raise ValueError("Accessing value before it's been set.") - - return self._value - - def attached(self) -> bool: - """Checks to see if the weakref to the Graph is alive or dead. - - Returns: - bool: Is Node attached. - """ - - try: - - return self.graph.alive - - except: - return False - - def create( - self, - target: Union[Callable, str], - proxy_value: Any = inspect._empty, - args: List[Any] = None, - kwargs: Dict[str, Any] = None, - name: str = None, - ) -> Union[Proxy, Any]: - """We use Node.add vs Graph.add in case graph is dead. - If the graph is dead, we assume this node is ready to execute and therefore we try and execute it and then return its value. - - Returns: - Union[Proxy, Any]: Proxy or value - """ - - if not self.attached(): - - graph: "Graph" = None - - def find_attached_graph(node: Union[Proxy, Node]): - - if isinstance(node, Proxy): - - node = node.node - - nonlocal graph - - if node.attached(): - - graph = node.graph - - util.apply((args, kwargs), find_attached_graph, (Proxy, Node)) - - if graph is not None: - - return graph.create( - target=target, - name=name, - proxy_value=proxy_value, - args=args, - kwargs=kwargs, - ) - - # Create Node with no values or Graph. - node = Node( - target=target, - graph=None, - proxy_value=None, - args=args, - kwargs=kwargs, - ) - - # Reset it. - node.reset() - - # So it doesn't get destroyed. - node.remaining_listeners = 1 - - # Execute Node - node.execute() - - # Get value. - value = node.value - - # Destroy. - node.destroy() - - return value - - # Otherwise just create the Node on the Graph like normal. - return self.graph.create( - target=target, - name=name, - proxy_value=proxy_value, - args=args, - kwargs=kwargs, - ) - - def reset(self) -> None: - """Resets this Nodes remaining_listeners and remaining_dependencies.""" - - self.remaining_listeners = len(self.listeners) - self.remaining_dependencies = len(self.arg_dependencies) + int( - not (self.cond_dependency is None) - ) - - def done(self) -> bool: - """Returns true if the value of this node has been set. - - Returns: - bool: If done. - """ - return self._value is not inspect._empty - - def executed(self) -> bool: - """Returns true if remaining_dependencies is less than 0. - - Returns: - bool: If executed. - """ - return self.remaining_dependencies < 0 - - def fulfilled(self) -> bool: - """Returns true if remaining_dependencies is 0. - - Returns: - bool: If fulfilled. - """ - return self.remaining_dependencies == 0 - - def redundant(self) -> bool: - """Returns true if remaining_listeners is 0. - - Returns: - bool: If redundant. - """ - return self.remaining_listeners == 0 - - @classmethod - def prepare_inputs( - cls, inputs: Any, device: torch.device = None, proxy: bool = False - ) -> Any: - """Prepare arguments for executing this node's target. - Converts Nodes in args and kwargs to their value and moves tensors to correct device. - - Returns: - Any: Prepared inputs. - """ - - inputs = util.apply(inputs, lambda x: x, inspect._empty) - - def _value(node: Proxy | Node): - - if isinstance(node, Proxy): - node = node.node - - if proxy: - return node.proxy_value - - return node.value - - inputs = util.apply(inputs, _value, (Node, Proxy), inplace=not proxy) - - if device is None: - - def _device(value: torch.Tensor): - nonlocal device - - if device is None: - device = value.device - - util.apply(inputs, _device, torch.Tensor) - - def _to(value: torch.Tensor): - return value.to(device) - - inputs = util.apply(inputs, _to, torch.Tensor, inplace=not proxy) - - return inputs - - def execute(self) -> None: - """Actually executes this node. - Lets protocol execute if target is str. - Else prepares args and kwargs and passes them to target. Gets output of target and sets the Node's value to it. - """ - - try: - - if isinstance(self.target, type) and issubclass( - self.target, protocols.Protocol - ): - - self.target.execute(self) - - else: - - # Prepare arguments. - args, kwargs = Node.prepare_inputs((self.args, self.kwargs)) - - # Call the target to get value. - output = self.target(*args, **kwargs) - - # Set value. - self.set_value(output) - - except Exception as e: - - raise type(e)( - f"Above exception when execution Node: '{self.name}' in Graph: '{self.graph.id}'" - ) from e - - finally: - self.remaining_dependencies -= 1 - - def set_value(self, value: Any) -> None: - """Sets the value of this Node and logs the event. - Updates remaining_dependencies of listeners. If they are now fulfilled, execute them. - Updates remaining_listeners of dependencies. If they are now redundant, destroy them. - - Args: - value (Any): Value. - """ - self._value = value - - logger.info(f"=> SET({self.name})") - - for listener in self.listeners: - listener.remaining_dependencies -= 1 - - if listener.fulfilled() and not self.graph.sequential: - listener.execute() - - for dependency in self.arg_dependencies: - dependency.remaining_listeners -= 1 - - if dependency.redundant(): - dependency.destroy() - - if self.done() and self.redundant(): - self.destroy() - - def destroy(self) -> None: - """Removes the reference to the node's value and logs it's destruction.""" - - logger.info(f"=> DEL({self.name})") - - self._value = inspect._empty - - def clean(self) -> None: - """Clean up dependencies during early execution stop""" - - # BridgeProtocol nodes must clean up their corresponding external proxy - if isinstance(self.target, type) and issubclass( - self.target, protocols.BridgeProtocol - ): - bridge = protocols.BridgeProtocol.get_bridge(self.graph) - lock_node = bridge.get_graph(self.args[0]).nodes[self.args[1]] - lock_dependency = lock_node.args[0] - lock_dependency.remaining_listeners -= 1 - lock_node.destroy() - if lock_dependency.redundant(): - lock_dependency.destroy() - else: - for dependency in self.arg_dependencies: - dependency.remaining_listeners -= 1 - if dependency.redundant(): - dependency.destroy() - - def visualize( - self, viz_graph: "AGraph", recursive: bool, backend_name: str = "" - ) -> str: - """Adds this node to the visualization graph and recursively visualizes its arguments and adds edges between them. - - Args: - - viz_graph (AGraph): Visualization graph. - - recursive (bool): If True, recursively visualizes all sub-graphs. - - backend_name (str): Inherent parent graph name for unique differentiation in recursive visualization. - - Returns: - - str: name of this node. - """ - - styles = { - "node": {"color": "black", "shape": "ellipse"}, - "label": (self.target if isinstance(self.target, str) else self.target.__name__), - "arg": defaultdict(lambda: {"color": "gray", "shape": "box"}), - "arg_kname": defaultdict(lambda: None), - "edge": defaultdict(lambda: "solid"), - } - - node_name = backend_name + self.name - - if isinstance(self.target, type) and issubclass( - self.target, protocols.Protocol - ): - styles = self.target.style() - viz_graph.add_node(node_name, label=styles["label"], **styles["node"]) - if ( - recursive - and self.target == protocols.LocalBackendExecuteProtocol - ): - # recursively draw all sub-graphs - for sub_node in self.args[0].graph.nodes.values(): - # draw root nodes and attach them to their LocalBackendExecuteProtocol node - if ( - len(sub_node.arg_dependencies) - + int(not (sub_node.cond_dependency is None)) - ) == 0: - sub_node_name = sub_node.visualize( - viz_graph, recursive, node_name + "_" - ) - viz_graph.add_edge( - node_name, - sub_node_name, - style="dotted", - color="purple", - ) - # draw bottom up - elif len(sub_node.listeners) == 0: - sub_node_name = sub_node.visualize( - viz_graph, recursive, node_name + "_" - ) - else: - viz_graph.add_node(node_name, label=styles["label"], **styles["node"]) - - def visualize_args(arg_collection): - """Recursively visualizes the arguments of this node. - - Args: - - arg_collection (Union[List[Any], Dict[str, Any]]): Collection of Node arguments. - """ - - for key, arg in arg_collection: - if isinstance(arg, Node): - name = arg.visualize(viz_graph, recursive, backend_name) - else: - # show link between iterable values with Node dependencies - iter_val_dependencies = [] - if isinstance(arg, Iterable): - for element in arg: - if isinstance(element, Node): - dep_name = element.visualize(viz_graph, recursive, backend_name) - iter_val_dependencies.append(dep_name) - - name = node_name - if isinstance(arg, torch.Tensor): - name += f"_Tensor_{key}" - label = "Tensor" - elif isinstance(arg, str): - name += f"_{arg}_{key}" - label = f'"{arg}"' - else: - name += f"_{arg}_{key}" - label = str(arg) - - if isinstance(key, int): - if not styles["arg_kname"][key] is None: - label = f"{styles['arg_kname'][key]}={label}" - else: - label = f"{key}={label}" - - viz_graph.add_node(name, label=label, **styles["arg"][key]) - - for dep_name in iter_val_dependencies: - viz_graph.add_edge(dep_name, name, style="dashed", color="gray") - - viz_graph.add_edge(name, node_name, style=styles["edge"][key]) - - visualize_args(enumerate(self.args)) - - visualize_args(self.kwargs.items()) - - if isinstance(self.cond_dependency, Node): - name = self.cond_dependency.visualize( - viz_graph, recursive, backend_name - ) - viz_graph.add_edge( - name, node_name, style=styles["edge"][None], color="#FF8C00" - ) - - return node_name - - def __str__(self) -> str: - args = util.apply(self.args, lambda x: f"'{x}'", str) - args = util.apply(args, lambda x: x.name, Node) - args = [str(arg) for arg in args] - - return f"{self.name}:[args:({','.join(args)}) l:{len(self.listeners)} a_d:{len(self.arg_dependencies)} c_d{bool(self.cond_dependency)}]" - - def __repr__(self) -> str: - return f"<{self.__class__.__name__} at {hex(id(self))}>" diff --git a/src/nnsight/tracing/Proxy.py b/src/nnsight/tracing/Proxy.py deleted file mode 100755 index a5fd0a1d..00000000 --- a/src/nnsight/tracing/Proxy.py +++ /dev/null @@ -1,319 +0,0 @@ -from __future__ import annotations - -import inspect -import operator -import weakref -from typing import TYPE_CHECKING, Any, Callable, Union - -from typing_extensions import Self - -from .. import util - -if TYPE_CHECKING: - from .Node import Node - - -class Proxy: - """Proxy objects are the actual objects that interact with operations in order to update the graph to create new Nodes. - - The operations that are traceable on base Proxy objects are many python built-in and magic methods, as well as implementing __torch_function__ to trace torch operations. - - Attributes: - node (Node): This proxy's node. - """ - - def __getstate__(self): - return self.__dict__ - - def __setstate__(self, d: dict): - self.__dict__ = d - - @staticmethod - def proxy_call(callable: Callable, *args, **kwargs) -> Self: - return callable(*args, **kwargs) - - def __init__(self, node: "Node") -> None: - - self.__dict__["node"] = node - - self.node: "Node" - self.node.proxy = weakref.proxy(self) - - @property - def value(self) -> Any: - """Property to return the value of this proxy's node. - - Returns: - Any: The stored value of the proxy, populated during execution of the model. - """ - - return self.node.value - - def __str__(self) -> str: - - if not self.node.attached(): - - return str(self.value) - - return f"{type(self).__name__} ({self.node.name}): {self.node.proxy_value if self.node.proxy_value is not inspect._empty else ''}" - - def __repr__(self) -> str: - - if not self.node.attached(): - - return repr(self.value) - - return str(self) - - def __call__(self, *args, **kwargs) -> Self: - """ - Calling a Proxy object just creates a Proxy.proxy_call operation. - - Returns: - Proxy: New call proxy. - """ - - return self.node.create( - target=Proxy.proxy_call, - args=[self.node] + list(args), - kwargs=kwargs, - ) - - def __getitem__(self, key: Union[Proxy, Any]) -> Self: - return self.node.create( - target=operator.getitem, - args=[self.node, key], - ) - - def __setitem__(self, key: Union[Proxy, Any], value: Union[Self, Any]) -> None: - self.node.create( - target=operator.setitem, - args=[self.node, key, value], - ) - - def __getattr__(self, key: Union[Proxy, Any]) -> Self: - return self.node.create( - target=util.fetch_attr, - args=[self.node, key], - ) - - def __setattr__(self, key: Union[Proxy, Any], value: Union[Self, Any]) -> None: - - if key == "__dict__": - - super().__setattr__(key, value) - - return - - return self.node.create( - target=setattr, - args=[self.node, key, value], - ) - - def __len__(self) -> Self: - return self.node.create( - target=len, - args=[self.node], - ) - - def __abs__(self) -> Self: - return self.node.create( - target=operator.abs, - args=[self.node], - ) - - def __invert__(self) -> Self: - return self.node.create( - target=operator.invert, - args=[self.node], - ) - - def __neg__(self) -> Self: - return self.node.create( - target=operator.neg, - args=[self.node], - ) - - def __index__(self) -> Self: - return self.node.create(target=operator.index, args=[self.node]) - - def __add__(self, other: Union[Proxy, Any]) -> Self: - return self.node.create( - target=operator.add, - args=[self.node, other], - ) - - def __radd__(self, other: Union[Proxy, Any]) -> Self: - return self.node.create( - target=operator.add, - args=[other, self.node], - ) - - def __sub__(self, other: Union[Proxy, Any]) -> Self: - return self.node.create( - target=operator.sub, - args=[self.node, other], - ) - - def __rsub__(self, other: Union[Proxy, Any]) -> Self: - return self.node.create( - target=operator.sub, - args=[other, self.node], - ) - - def __pow__(self, other: Union[Proxy, Any]) -> Self: - return self.node.create( - target=operator.pow, - args=[self.node, other], - ) - - def __rpow__(self, other: Union[Proxy, Any]) -> Self: - return self.node.create( - target=operator.pow, - args=[other, self.node], - ) - - def __mul__(self, other: Union[Proxy, Any]) -> Self: - return self.node.create( - target=operator.mul, - args=[self.node, other], - ) - - def __rmul__(self, other: Union[Proxy, Any]) -> Self: - return self.node.create( - target=operator.mul, - args=[other, self.node], - ) - - def __mod__(self, other: Union[Proxy, Any]) -> Self: - return self.node.create( - target=operator.mod, - args=[self.node, other], - ) - - def __rmod__(self, other: Union[Proxy, Any]) -> Self: - return self.node.create( - target=operator.mod, - args=[other, self.node], - ) - - def __matmul__(self, other: Union[Proxy, Any]) -> Self: - return self.node.create( - target=operator.matmul, - args=[self.node, other], - ) - - def __rmatmul__(self, other: Union[Proxy, Any]) -> Self: - return self.node.create( - target=operator.matmul, - args=[other, self.node], - ) - - def __truediv__(self, other: Union[Proxy, Any]) -> Self: - return self.node.create( - target=operator.truediv, - args=[self.node, other], - ) - - def __rtruediv__(self, other: Union[Proxy, Any]) -> Self: - return self.node.create( - target=operator.truediv, - args=[other, self.node], - ) - - def __floordiv__(self, other: Union[Proxy, Any]) -> Self: - return self.node.add( - target=operator.floordiv, - args=[self.node, other], - ) - - def __rfloordiv__(self, other: Union[Proxy, Any]) -> Self: - return self.node.add( - target=operator.floordiv, - args=[other, self.node], - ) - - def __eq__(self, other: Union[Proxy, Any]) -> Self: - return self.node.create(target=operator.eq, args=[self.node, other]) - - def __ne__(self, other: Union[Proxy, Any]) -> Self: - return self.node.create(target=operator.ne, args=[self.node, other]) - - def __lt__(self, other: Union[Proxy, Any]) -> Self: - return self.node.create(target=operator.lt, args=[self.node, other]) - - def __gt__(self, other: Union[Proxy, Any]) -> Self: - return self.node.create(target=operator.gt, args=[self.node, other]) - - def __le__(self, other: Union[Proxy, Any]) -> Self: - return self.node.create(target=operator.le, args=[self.node, other]) - - def __ge__(self, other: Union[Proxy, Any]) -> Self: - return self.node.create(target=operator.ge, args=[self.node, other]) - - def __index__(self) -> Self: - return self.node.create(target=operator.index, args=[self.node]) - - def __bool__(self) -> bool: - return self.node.proxy_value.__bool__() - - def __instancecheck__(self, __instance: Any) -> bool: - return self.node.proxy_value.__instancecheck__(__instance) - - @classmethod - def __torch_function__(cls, orig_method, types, args=None, kwargs=None) -> Self: - if args is None: - args = list() - if kwargs is None: - kwargs = dict() - - proxy: Proxy = None - - def get_proxy(arg): - nonlocal proxy - - proxy = arg - - util.apply(args, get_proxy, Proxy) - - return proxy.node.create( - target=orig_method, - args=args, - kwargs=kwargs, - ) - - -from functools import wraps - - -def proxy_wrapper(fn) -> Callable: - """Wraps problematic functions (torch functions sometimes). - Checks if any of its args are proxies. If so we return a proxy of the function. - Otherwise just run the function. - - Args: - fn (function): Function to wrap. - - Returns: - function: Wrapped function. - """ - - @wraps(fn) - def patched(*args, **kwargs): - - node = None - - def get_node(proxy: Proxy): - nonlocal node - - node = proxy.node - - util.apply((args, kwargs), get_node, Proxy) - - if node is not None: - return node.create(target=fn, args=args, kwargs=kwargs) - - else: - return fn(*args, **kwargs) - - return patched diff --git a/src/nnsight/tracing/__init__.py b/src/nnsight/tracing/__init__.py old mode 100644 new mode 100755 index d410ec0d..91a5ae2b --- a/src/nnsight/tracing/__init__.py +++ b/src/nnsight/tracing/__init__.py @@ -1,8 +1,7 @@ -"""The `nnsight.tracing `module involves tracing operations in order to form a computation graph. +"""The `tracing` module acts as a standalone library to trace and execute Python based deferred computation graphs. -The :class:`Graph ` class adds and stores operations as `Node`s . - -The :class:`Node ` class represents an individual operation in the :class:`Graph `. - -The :class:`Proxy ` class handles interactions from the user in order to create new `Node`s. There is a `Proxy` for each `Node`. -""" +The `graph` sub-module defines the computation graph primitives. +The `protocol` sub-module contains logic for adding custom operations to the computation graph. +The `contexts` sub-module contains logic for defining scoped sub-graphs that handle execution of their piece of the computation graph. +The `backends` sub-module contains logic for executing the traced computation graph. +""" \ No newline at end of file diff --git a/src/nnsight/tracing/backends/__init__.py b/src/nnsight/tracing/backends/__init__.py new file mode 100755 index 00000000..71642631 --- /dev/null +++ b/src/nnsight/tracing/backends/__init__.py @@ -0,0 +1 @@ +from .base import Backend, ExecutionBackend \ No newline at end of file diff --git a/src/nnsight/tracing/backends/base.py b/src/nnsight/tracing/backends/base.py new file mode 100755 index 00000000..39c95846 --- /dev/null +++ b/src/nnsight/tracing/backends/base.py @@ -0,0 +1,69 @@ +import inspect +import sys + +from ...util import NNsightError +from ..graph import Graph, Proxy +from ..protocols import StopProtocol +from ... import __IPYTHON__ + +class Backend: + + def __call__(self, graph: Graph) -> None: + + raise NotImplementedError() + + +class ExecutionBackend(Backend): + + def __init__(self, injection: bool = True) -> None: + self.injection = injection + + def __call__(self, graph: Graph) -> None: + + try: + + graph.nodes[-1].execute() + + if self.injection: + + from ..contexts import Context + import ctypes + + frame = inspect.currentframe().f_back + while frame.f_back is not None and 'self' in frame.f_locals and isinstance(frame.f_locals['self'], Context): + frame = frame.f_back + + for key, value in frame.f_locals.items(): + if isinstance(value, Proxy) and value.node.done: + frame.f_locals[key] = value.value + ctypes.pythonapi.PyFrame_LocalsToFast(ctypes.py_object(frame), 0) + + except StopProtocol.StopException: + + pass + + except NNsightError as e: + if graph.debug: + node_traceback = graph.nodes[e.node_id].meta_data['traceback'] + + if __IPYTHON__: # in IPython the traceback content is rendered by the Error itself + # add the error node traceback to the the error's traceback + e.traceback_content += "\nDuring handling of the above exception, another exception occurred:\n\n" + e.traceback_content += node_traceback + else: # else we print the traceback manually + print(f"\n{e.traceback_content}") + print( + "During handling of the above exception, another exception occurred:\n" + ) + print(f"{node_traceback}") + + sys.tracebacklimit = 0 + raise e from None + else: + raise e + + finally: + if __IPYTHON__: + sys.tracebacklimit = None + graph.nodes.clear() + graph.stack.clear() diff --git a/src/nnsight/tracing/contexts/__init__.py b/src/nnsight/tracing/contexts/__init__.py new file mode 100755 index 00000000..1f81ac5b --- /dev/null +++ b/src/nnsight/tracing/contexts/__init__.py @@ -0,0 +1,5 @@ +from .base import Context +from .iterator import Iterator +from .conditional import Condition +from .tracer import Tracer +from .globals import GlobalTracingContext \ No newline at end of file diff --git a/src/nnsight/tracing/contexts/base.py b/src/nnsight/tracing/contexts/base.py new file mode 100755 index 00000000..58cc3c38 --- /dev/null +++ b/src/nnsight/tracing/contexts/base.py @@ -0,0 +1,96 @@ +from __future__ import annotations + +from contextlib import AbstractContextManager +from typing import Generic, Optional, Type + +from typing_extensions import Self + +from ... import CONFIG +from ...tracing.graph import Node, NodeType, Proxy, ProxyType +from ..backends import Backend, ExecutionBackend +from ..graph import Graph, GraphType, SubGraph, viz_graph +from ..protocols import Protocol + +class Context(Protocol, AbstractContextManager, Generic[GraphType]): + """A `Context` represents a scope (or slice) of a computation graph with specific logic for adding and executing nodes defined within it. + It has a `SubGraph` which contains the nodes that make up the operations of the context. + As an `AbstractContextManager`, entering adds its sub-graph to the stack, making new nodes created while within this context added to it's sub-graph. + Exiting pops its sub-graph off the stack, allowing nodes to be added to its parent, and adds itself as a node to its parent `Context`/`SubGraph`. ( To say, "execute me") + If the `Context` has a backend, it pops its parent off the stack and passes it to the `Backend` object to execute. + (This only happens if the context is the root-most context, and its parent is therefore the root `Graph`) + As a `Context` is itself a `Protocol`, it defines how to execute it's sub-graph in the `execute` method. + + + Attributes: + + backend (Backend): Backend to execute the deferred root computation graph + """ + + def __init__( + self, + *args, + backend: Optional[Backend] = None, + parent: Optional[GraphType] = None, + graph: Optional[GraphType] = None, + graph_class: Type[SubGraph] = SubGraph, + node_class: Type[NodeType] = Node, + proxy_class: Type[ProxyType] = Proxy, + debug: bool = False, + **kwargs, + ) -> None: + + # If this is the root graph, we want to execute it upon exit. + # Otherwise its a child context/graph and all we want to + if backend is None and parent is None: + backend = ExecutionBackend(injection=CONFIG.APP.FRAME_INJECTION) + + self.backend = backend + + if parent is None: + parent = Graph(node_class=node_class, proxy_class=proxy_class, debug=debug) + parent.stack.append(parent) + + self.graph = graph_class(*args, parent, **kwargs) + + self.graph.stack.append(self.graph) + + if graph is not None: + graph.copy(self.graph) + + self.args = [] + self.kwargs = {} + + def __enter__(self) -> Self: + + return self + + def __exit__(self, exc_type, exc_val, exc_tb) -> None: + + graph = self.graph.stack.pop() + + if isinstance(exc_val, BaseException): + raise exc_val + + self.add(graph.stack[-1], graph, *self.args, **self.kwargs) + + if self.backend is not None: + + graph = graph.stack.pop() + + graph.alive = False + + self.backend(graph) + + def vis(self, *args, **kwargs): + viz_graph(self.graph, *args, **kwargs) + + @classmethod + def execute(cls, node: NodeType): + + graph: GraphType = node.args[0] + + graph.reset() + graph.execute() + + node.set_value(None) + \ No newline at end of file diff --git a/src/nnsight/tracing/contexts/conditional.py b/src/nnsight/tracing/contexts/conditional.py new file mode 100755 index 00000000..50c17e09 --- /dev/null +++ b/src/nnsight/tracing/contexts/conditional.py @@ -0,0 +1,69 @@ +from __future__ import annotations + +from typing import Any, Dict, Optional + +from ...tracing.graph import NodeType, SubGraph +from ..contexts import Context + + +class Condition(Context[SubGraph]): + + def __init__( + self, condition: Optional[NodeType], branch: Optional[NodeType] = None, *args, **kwargs + ) -> None: + super().__init__(*args, **kwargs) + + self.args = [condition, branch] + self.index = None + + def else_(self, condition: Optional[Any] = None): + + return Condition( + condition, + branch=self.graph.nodes[self.index], + parent=self.graph.stack[-1], + ) + + def __exit__(self, exc_type, exc_val, exc_tb) -> None: + super().__exit__(exc_type, exc_val, exc_tb) + + self.index = self.graph.nodes[-1].index + + @classmethod + def execute(cls, node: NodeType): + graph, condition, branch = node.args + + graph: SubGraph + + condition: Any + condition, branch = node.prepare_inputs((condition, branch)) + + # else case has a True condition + if condition is None and not branch: + condition = True + + if not branch and condition: + + graph.reset() + graph.execute() + + node.set_value(True) + else: + graph.clean() + node.set_value(branch) + + @classmethod + def style(cls) -> Dict[str, Any]: + """Visualization style for this protocol node. + + Returns: + - Dict: dictionary style. + """ + + default_style = super().style() + + default_style["node"] = {"color": "#FF8C00", "shape": "polygon", "sides": 6} + default_style["edge"][2] = {"style": "solid", "label": "branch", "color": "#FF8C00", "fontsize": 10} + + return default_style + diff --git a/src/nnsight/tracing/contexts/globals.py b/src/nnsight/tracing/contexts/globals.py new file mode 100755 index 00000000..517ecc5f --- /dev/null +++ b/src/nnsight/tracing/contexts/globals.py @@ -0,0 +1,188 @@ +from __future__ import annotations + +import inspect +from contextlib import AbstractContextManager +from functools import wraps +from types import FunctionType, MethodType +from typing import Any, Type, Union + +from ... import util +from ..graph import Graph +from . import Tracer + + +def global_patch_class(cls: type) -> util.Patch: + + if cls.__new__ is object.__new__: + + def super_new(cls, *args, **kwargs): + + return object.__new__(cls) + + cls.__new__ = super_new + + fn = cls.__new__ + + @wraps(fn) + def inner(cls, *args, **kwargs): + + if not GlobalTracingContext.GLOBAL_TRACING_CONTEXT: + return cls(*args, **kwargs) + + return GlobalTracingContext.GLOBAL_TRACING_CONTEXT.apply(cls, *args, **kwargs) + + return util.Patch(cls, inner, "__new__") + + +def global_patch_fn(fn: FunctionType) -> util.Patch: + + @wraps(fn) + def inner(*args, **kwargs): + + if not GlobalTracingContext.GLOBAL_TRACING_CONTEXT: + return fn(*args, **kwargs) + + return GlobalTracingContext.GLOBAL_TRACING_CONTEXT.apply(fn, *args, **kwargs) + + return util.Patch(inspect.getmodule(fn), inner, fn.__name__) + +def global_patch_method(cls: type, fn: MethodType) -> None: + + @wraps(fn) + def inner(*args, **kwargs): + + if not GlobalTracingContext.GLOBAL_TRACING_CONTEXT: + return fn(*args, **kwargs) + + return GlobalTracingContext.GLOBAL_TRACING_CONTEXT.apply(fn, *args, **kwargs) + + patch = util.Patch(cls, inner, fn.__name__) + + GlobalTracingContext.PATCHER.add(patch) + + +def global_patch(obj: Union[FunctionType, Type]): + + if isinstance(obj, type): + + patch = global_patch_class(obj) + + else: + + patch = global_patch_fn(obj) + + GlobalTracingContext.PATCHER.add(patch) + +class GlobalTracingContext(Tracer): + """The Global Tracing Context handles adding tracing operations globally without reference to a given `GraphBasedContext`. + There should only be one of these and that is `GlobalTracingContext.GLOBAL_TRACING_CONTEXT`. + `GlobalTracingContext.TORCH_HANDLER` handles adding torch functions without reference to a given `GraphBasedContext`. + + """ + + GLOBAL_TRACING_CONTEXT: GlobalTracingContext + PATCHER: util.Patcher = util.Patcher() + + class GlobalTracingExit(AbstractContextManager): + + def __enter__(self) -> Any: + + GlobalTracingContext.PATCHER.__exit__(None, None, None) + + return self + + def __exit__(self, exc_type, exc_val, traceback): + + GlobalTracingContext.PATCHER.__enter__() + + if isinstance(exc_val, BaseException): + + raise exc_val + + def __init__(self) -> None: + """We create an empty `GraphBasedContext` by default.""" + + self.graph: Graph = None + + @staticmethod + def exit_global_tracing_context(): + + return GlobalTracingContext.GlobalTracingExit() + + @staticmethod + def try_register(graph_based_context: Tracer) -> bool: + """Attempts to register a `Graph` globally.] + Will not if one is already registered. + + Args: + graph_based_context (GraphBasedContext): `GraphBasedContext` to register. + + Returns: + bool: True if registering ws successful, False otherwise. + """ + + if GlobalTracingContext.GLOBAL_TRACING_CONTEXT: + + return False + + GlobalTracingContext.register(graph_based_context) + + return True + + @staticmethod + def try_deregister(graph_based_context: Tracer) -> bool: + """Attempts to deregister a `Graph` globally. + Will not if `graph_based_context` does not have the same `Graph` as the currently registered one. + + Args: + graph_based_context (GraphBasedContext): `GraphBasedContext` to deregister. + + Returns: + bool: True if deregistering ws successful, False otherwise. + """ + if ( + not GlobalTracingContext.GLOBAL_TRACING_CONTEXT + or graph_based_context.graph + is not GlobalTracingContext.GLOBAL_TRACING_CONTEXT.graph + ): + + return False + + GlobalTracingContext.deregister() + + return True + + @staticmethod + def register(graph_based_context: Tracer) -> None: + """Register `GraphBasedContext` globally. + + Args: + graph_based_context (GraphBasedContext): GraphBasedContext to register. + """ + + assert GlobalTracingContext.GLOBAL_TRACING_CONTEXT.graph is None + + GlobalTracingContext.GLOBAL_TRACING_CONTEXT.graph = graph_based_context.graph + + GlobalTracingContext.PATCHER.__enter__() + + @staticmethod + def deregister() -> None: + """Deregister `GraphBasedContext` globally. + + Args: + graph_based_context (GraphBasedContext): GraphBasedContext to deregister. + """ + + assert GlobalTracingContext.GLOBAL_TRACING_CONTEXT.graph is not None + + GlobalTracingContext.GLOBAL_TRACING_CONTEXT.graph = None + + GlobalTracingContext.PATCHER.__exit__(None, None, None) + + def __bool__(self) -> bool: + """True if there is a `GraphBasedContext` registered globally. False otherwise.""" + + return GlobalTracingContext.GLOBAL_TRACING_CONTEXT.graph is not None + +GlobalTracingContext.GLOBAL_TRACING_CONTEXT = GlobalTracingContext() diff --git a/src/nnsight/tracing/contexts/iterator.py b/src/nnsight/tracing/contexts/iterator.py new file mode 100755 index 00000000..b8d4b5a1 --- /dev/null +++ b/src/nnsight/tracing/contexts/iterator.py @@ -0,0 +1,76 @@ +import copy +from typing import Collection, Dict, Any + +from ...tracing.graph import SubGraph +from ...tracing.graph import Node +from ...tracing.graph import Proxy +from . import Context +from ..protocols import VariableProtocol, StopProtocol + +class Iterator(Context[SubGraph]): + + def __init__(self, collection: Collection, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + + self.args = [collection] + + def __enter__(self) -> Proxy: + + super().__enter__() + + return VariableProtocol.add(self.graph) + + + @classmethod + def execute(cls, node: Node): + + graph, collection = node.args + + graph: SubGraph + collection: Collection + + collection = node.prepare_inputs(collection) + + variable_node = next(iter(graph)) + + graph.defer_stack.append(variable_node.index) + + for idx, value in enumerate(copy.copy(collection)): + + VariableProtocol.set(variable_node, value) + + if idx == len(collection) - 1: + graph.defer_stack.pop() + + + graph.reset() + try: + graph.execute() + except Exception as e: + + if idx != len(collection) - 1: + + graph.defer_stack.pop() + + if not isinstance(e, StopProtocol.StopException): + + raise e + + else: + break + + node.set_value(None) + + @classmethod + def style(cls) -> Dict[str, Any]: + """Visualization style for this protocol node. + + Returns: + - Dict: dictionary style. + """ + + default_style = super().style() + + default_style["node"] = {"color": "blue", "shape": "polygon", "sides": 6} + + return default_style diff --git a/src/nnsight/tracing/contexts/tracer.py b/src/nnsight/tracing/contexts/tracer.py new file mode 100755 index 00000000..07479197 --- /dev/null +++ b/src/nnsight/tracing/contexts/tracer.py @@ -0,0 +1,51 @@ +from typing import Callable, TypeVar, Union +from typing_extensions import Self + +from ..graph import ProxyType, SubGraph, NodeType, Proxy +from ..protocols import StopProtocol +from . import Condition, Context, Iterator + +class Tracer(Context[SubGraph[NodeType, ProxyType]]): + + + def __enter__(self) -> Self: + + from .globals import GlobalTracingContext + + GlobalTracingContext.try_register(self) + + return super().__enter__() + + def __exit__(self, exc_type, exc_val, exc_tb) -> None: + + from .globals import GlobalTracingContext + + GlobalTracingContext.try_deregister(self) + + return super().__exit__(exc_type, exc_val, exc_tb) + + def iter(self, collection): + + return Iterator(collection, parent=self.graph) + + def cond(self, condition): + + return Condition(condition, parent=self.graph) + + def stop(self): + + StopProtocol.add(self.graph) + + def log(self, *args): + + self.apply(print, *args) + + R = TypeVar('R') + + def apply(self, target: Callable[..., R], *args, **kwargs) -> Union[Proxy, R]: + + return self.graph.create( + target, + *args, + **kwargs, + ) diff --git a/src/nnsight/tracing/graph/__init__.py b/src/nnsight/tracing/graph/__init__.py new file mode 100755 index 00000000..6a9c2169 --- /dev/null +++ b/src/nnsight/tracing/graph/__init__.py @@ -0,0 +1,4 @@ +from .proxy import Proxy, ProxyType +from .node import Node, NodeType +from .graph import Graph, SubGraph, GraphType +from .viz import viz_graph diff --git a/src/nnsight/tracing/graph/graph.py b/src/nnsight/tracing/graph/graph.py new file mode 100755 index 00000000..daa88f86 --- /dev/null +++ b/src/nnsight/tracing/graph/graph.py @@ -0,0 +1,342 @@ +from __future__ import annotations +from typing import (Callable, Dict, Generic, Iterator, List, Optional, + Tuple, Type, TypeVar, Union, overload) + +from typing_extensions import Self + +from ... import util +from ...util import NNsightError +from .. import protocols +from . import Node, NodeType, Proxy, ProxyType + + +class Graph(Generic[NodeType, ProxyType]): + """The `Graph` class represents a computation graph composed of individual `Node`s (operations). + It contains logic to both trace/build the computation graph, as well as how to execute it. + Sections of the graph can be divided into `SubGraphs`, but there will always be one root `Graph`. + The final `Node` of the graph (graph[-1]) should be the root `Node` which when executed, downstream executes the entire `Graph`. + + Attributes: + node_class (Type[NodeType]): Class used to create `Node`s. Can be changed to add additional functionality to `Node's. Defaults to `Node`. + proxy_class (Type[ProxyType]): Class used to create `Proxy`s for 'Node's. Can be changed to add additional functionality to `Proxy's. Defaults to `Proxy`. + nodes (List[Node]): Ordered list of all `Node`s. Used to access `Nodes` via their index. + stack (List[Graph]): List of `Graph`s as a stack. Used to move `Node`s onto the most recent graph, as opposed to the `Graph` used to create the `Node`. + Managed outside the `Graph` class by the `Context` objects. + defer_stack (List[int]): List of `Node` indexes as a stack. Used to prevent destruction/memory cleanup of `Node`s whose index is less than the most recent index on the stack. + This happens when you have `Node`s that will be executed more than once. In a loop for example, you only want to destroy a `Node`s dependencies on the final iteration. + Also managed outside the `Graph` object. + alive (bool): If the `Graph` is "alive". Alive meaning its still open for tracing (adding new `Node`s). Set to False before executing the `Graph`. + """ + + def __init__( + self, + node_class: Type[NodeType] = Node, + proxy_class: Type[ProxyType] = Proxy, + debug: bool = False, + ) -> None: + + self.node_class = node_class + self.proxy_class = proxy_class + self.debug = debug + + self._alive = [True] + + self.nodes: List[Node] = [] + self.stack: List[Graph] = [] + self.defer_stack: List[int] = [] + + @property + def alive(self) -> bool: + return self._alive[0] + + @alive.setter + def alive(self, value: bool): + + self._alive[0] = value + + def reset(self) -> None: + """Resets the `Graph` to prepare for execution. + Simply resets all `Node`s in the `Graph`. + """ + + for node in self: + node.reset() + + def execute(self) -> None: + """Executes all `Node`s (operations) in this `Graph`. + + Raises: + exception: If there is an exception during executing a `Node`. If so, we need to clean up the dependencies of `Node`s yet to be executed. + """ + + err: Tuple[int, NNsightError] = None + + for node in self: + try: + node.execute() + except NNsightError as e: + err = (node.index, e) + break + + if err is not None: + defer_stack = self.defer_stack.copy() + self.defer_stack.clear() + self.clean(err[0]) + self.defer_stack.extend(defer_stack) + raise err[1] + + def clean(self, start: Optional[int] = None): + """Cleans up dependencies of `Node`s so their values are appropriately memory managed. + Cleans all `Node`s from start to end regardless if they are on this `Graph`. + + Args: + start (Optional[int], optional): `Node` index to start cleaning up from. Defaults to None. + """ + + if len(self) == 0: + return + + if start is None: + start = self[0].index + + end = self[-1].index + 1 + + # Loop over ALL nodes within the span of this graph. + for index in range(start, end): + + node = self.nodes[index] + + node.update_dependencies() + + def create( + self, + target: Union[Callable, protocols.Protocol], + *args, + redirect: bool = True, + **kwargs, + ) -> ProxyType: + """Creates a new `Node` using this `Graph`'s node_class and returns a `Proxy` for it with this `Graph`'s proxy_class. + + Args: + target (Union[Callable, protocols.Protocol]): Target for the new `Node`. + redirect (bool, optional): If to move the newly created `Node` to the most recent `Graph` on the Graph.stack. Defaults to True. + + Returns: + ProxyType: `Proxy` for newly created `Node`. + """ + + # Redirection. + graph = self.stack[-1] if redirect and self.stack else self + + return self.proxy_class(self.node_class(target, *args, graph=graph, **kwargs)) + + def add(self, node: NodeType) -> None: + """Adds a `Node` to this `Graph`. + Sets the `Node`'s .index attribute so it knows its own index within the entire computation graph. + + Args: + node (NodeType): `Node` to add. + """ + + # Tag the Node with its own index. + node.index = len(self.nodes) + + # Add Node. + self.nodes.append(node) + + def copy(self, new_graph: Optional[Graph[NodeType, ProxyType]] = None) -> Graph: + """Creates a shallow copy of the root `Graph` object. + + Args: + new_graph (Optional[Graph[NodeType, ProxyType]], optional): `Graph` to copy into. Defaults to None and creates a new `Graph`. + + Returns: + Graph: New `Graph`. + """ + + if new_graph is None: + new_graph = Graph(node_class=self.node_class, proxy_class=self.proxy_class, debug=self.debug) + + node = self[-1] + + def process(arg: Union[Node, SubGraph]): + + if isinstance(arg, SubGraph): + return arg.copy(parent=new_graph) + + if arg.done: + return arg.value + + new_graph.create( + node.target, + *util.apply(node.args, process, (Node, SubGraph)), + **util.apply(node.kwargs, process, (Node, SubGraph)), + ) + + return new_graph + + ### Magic Methods ###################################### + + def __str__(self) -> str: + result = f"{self.__class__.__name__}:\n" + + for node in self: + result += f" {str(node)}\n" + + return result + + @overload + def __getitem__(self, key: int) -> Node: ... + + @overload + def __getitem__(self, key: Union[slice, List[int]]) -> List[Node]: ... + + def __getitem__(self, key: Union[int, Union[slice, List[int]]]) -> Union[Node, List[Node]]: + return self.nodes[key] + + def __iter__(self) -> Iterator[Node]: + return iter(self.nodes) + + def __len__(self) -> int: + return len(self.nodes) + + +class SubGraph(Graph[NodeType, ProxyType]): + """Represents a slice of the greater computation graph. It has a reference to the same underlying list of nodes and simply maintains a subset of node indexes. + + Attributes: + subset (List[int]): Node indexes for `Node`s contained within this subgraph. + """ + + def __init__( + self, + parent: GraphType, + subset: Optional[List[int]] = None, + ): + """Init + + Args: + parent (GraphType): Graph to inherit attributes from. + subset (Optional[List[int]], optional): Subset to start from when loading a pre-defined `SubGraph` + """ + + self.__dict__.update(parent.__dict__) + + self.subset: List[int] = [] if subset is None else subset + + def __getstate__(self): + + return { + "nodes":self.nodes, + "subset":self.subset, + "defer_stack": self.defer_stack, + } + + def __setstate__(self, state: Dict) -> None: + + self.__dict__.update(state) + + def add(self, node: NodeType) -> None: + + super().add(node) + + # Also add the index to this SubGraph's subset upon adding. + self.subset.append(self.nodes[-1].index) + + @overload + def __getitem__(self, key: int) -> Node: ... + + @overload + def __getitem__(self, key: Union[slice, List[int]]) -> List[Node]: ... + + def __getitem__(self, key: Union[int, Union[slice, List[int]]]) -> Union[Node, List[Node]]: + + index = self.subset[key] + + # We iterate over indexes and get their Nodes. + node = ( + [self.nodes[idx] for idx in index] + if isinstance(index, list) + else self.nodes[index] + ) + + return node + + def __iter__(self) -> Iterator[Node]: + return self.Iterator(self) + + def __len__(self) -> int: + return len(self.subset) + + class Iterator(Iterator): + + def __init__(self, subgraph: SubGraph[GraphType]) -> None: + + self.subgraph = subgraph + self.start = 0 + self.end = len(self.subgraph) + + def __next__(self) -> NodeType: + + if self.start < self.end: + value = self.subgraph[self.start] + self.start += 1 + return value + + raise StopIteration + + def copy( + self, + new_graph: Optional[SubGraph[NodeType, ProxyType]] = None, + parent: Optional[Graph[NodeType, ProxyType]] = None, + memo: Optional[Dict[int, NodeType]] = None, + ) -> Self: + """Creates a shallow copy of this SubGraph. + + Args: + new_graph (Optional[SubGraph[NodeType, ProxyType]], optional): SubGraph to copy into. Defaults to None and creates a new SubGraph of the same type. + parent (Optional[Graph[NodeType, ProxyType]], optional): Parent graph. Defaults to None and will create a root `Graph` as the parent. + + Returns: + Self: New graph. + """ + if parent is None: + parent = Graph(node_class=self.node_class, proxy_class=self.proxy_class) + + if new_graph is None: + new_graph = type(self)(parent) + + if memo is None: + memo = {} + + def process(arg: Union[Node, SubGraph]): + + if isinstance(arg, SubGraph): + return arg.copy(parent=new_graph, memo=memo) + + if arg.done: + return arg.value + + return new_graph.nodes[memo[arg.index]] + + for node in self: + + new_node = new_graph.create( + node.target, + *util.apply(node.args, process, (Node, SubGraph)), + **util.apply(node.kwargs, process, (Node, SubGraph)), + ).node + + memo[node.index] = new_node.index + + return new_graph + + +# class MultiGraph(Graph): + + +# def __init__(self, *args, **kwargs) -> None: +# super().__init__(proxy_class, validate) + + +GraphType = TypeVar("GraphType", bound=SubGraph) diff --git a/src/nnsight/tracing/graph/node.py b/src/nnsight/tracing/graph/node.py new file mode 100755 index 00000000..4eeaacd3 --- /dev/null +++ b/src/nnsight/tracing/graph/node.py @@ -0,0 +1,423 @@ +from __future__ import annotations + +import inspect +import re +import traceback +from typing import (TYPE_CHECKING, Any, Callable, Dict, List, + Optional, Set, TypeVar, Union) + +from typing_extensions import Self + +from ... import util +from ..protocols import Protocol +from .proxy import Proxy, ProxyType + +from ...util import NNsightError + +if TYPE_CHECKING: + from .graph import Graph + + +class Node: + """A computation `Graph` is made up of individual `Node`s which represent a single operation. + It has a `target` which the operation this `Node` will execute. + It has `args` and `kwargs` to execute its `target` with. These may contain other `Node`s and are therefore `dependencies` of this `Node`. + Conversely this `Node` is a `listener` of its `dependencies`. + + During execution of the computation graph and therefore the `Node`s, each + + Attributes: + index (Optional[int]): Integer index of this `Node` within its greater computation graph. + graph (Graph): + target (Union[Callable, Protocol]): Callable to execute as this `Node`'s operation. Might be a `Protocol` which is handled differently in node execution. + """ + + def __init__( + self, + target: Union[Callable, Protocol], + *args, + graph: "Graph" = None, + **kwargs, + ) -> None: + + self.index: Optional[int] = None + + # No tuples. Only lists. + args = list(args) + + self.graph: "Graph" = graph + + self.target = target + + self.args = args + self.kwargs = kwargs + + self._listeners: Set[int] = set() + self._dependencies: Set[int] = set() + + self._value: Any = inspect._empty + self.remaining_listeners = 0 + self.remaining_dependencies = 0 + self.executed = False + + self.meta_data = self._meta_data() + + # If theres an alive Graph, add it. + if self.attached: + + self.graph.add(self) + + # Preprocess args. + self.preprocess() + + def __getstate__(self): + + state = self.__dict__.copy() + + return state + + def __setstate__(self, state: Dict) -> None: + + self.__dict__.update(state) + + @property + def listeners(self) -> List[Self]: + """Iterator from index to `Node`. + + Returns: + List[Self]: List of listener `Node`s. + """ + + return [self.graph.nodes[index] for index in self._listeners] + + @property + def dependencies(self) -> List[Self]: + """Iterator from index to `Node`. + + Returns: + List[Self]: List of dependency `Node`s. + """ + + return [self.graph.nodes[index] for index in self._dependencies] + + def preprocess(self) -> None: + """Preprocess Node.args and Node.kwargs. + Converts Proxies to their Node. + Converts Nodes that are done to their value. + Adds Node arguments to self dependencies. + Add self to Node argument listeners. + """ + + def preprocess_node(node: Union[NodeType, ProxyType]): + + if isinstance(node, Proxy): + + node = node.node + + if node.done: + + return node.value + + self._dependencies.add(node.index) + node._listeners.add(self.index) + + return node + + self.args, self.kwargs = util.apply( + (self.args, self.kwargs), preprocess_node, (Node, Proxy) + ) + + ### Properties ######################## + @property + def value(self) -> Any: + """Property to return the value of this node. + + Returns: + Any: The stored value of the node, populated during execution. + + Raises: + ValueError: If the underlying ._value is inspect._empty (therefore never set or was destroyed). + """ + + if not self.done: + raise ValueError("Accessing value before it's been set.") + + return self._value + + @property + def attached(self) -> bool: + """Checks to see if the `Graph` this `Node` is a part of is alive.. + Alive meaning the Graph is still open to tracing new Nodes. + + Returns: + bool: Is Node attached. + """ + + try: + + return self.graph.alive + + except: + return False + + @property + def done(self) -> bool: + """Returns true if the value of this node has been set. + + Returns: + bool: If done. + """ + return self._value is not inspect._empty + + @property + def fulfilled(self) -> bool: + """Returns true if remaining_dependencies is 0. + + Returns: + bool: If fulfilled. + """ + return self.remaining_dependencies == 0 + + @property + def redundant(self) -> bool: + """Returns true if remaining_listeners is 0. + + Returns: + bool: If redundant. + """ + return self.remaining_listeners == 0 + + ### API ############################# + def reset(self) -> None: + """Resets this Nodes remaining_listeners and remaining_dependencies.""" + + self.executed = False + self._value = inspect._empty + + self.remaining_listeners = len(self._listeners) + self.remaining_dependencies = sum( + [not node.executed for node in self.dependencies] + ) + + def create( + self, + *args, + **kwargs, + ) -> Union[NodeType, Any]: + """We use Node.create vs Graph.create in case graph is dead. + If the graph is dead, we first check the GlobalTracing Context to add + assume this node is ready to execute and therefore we try and execute it and then return its value. + + Returns: + Union[NodeType, Any]: Proxy or value + """ + + if not self.attached: + + from ..contexts.globals import GlobalTracingContext + + if GlobalTracingContext.GLOBAL_TRACING_CONTEXT: + + return GlobalTracingContext.GLOBAL_TRACING_CONTEXT.graph.create( + *args, + **kwargs, + ) + + # Create dangling Node. + node = type(self)( + *args, + **kwargs, + ) + + # Reset it. + node.reset() + + # So it doesn't get destroyed. + node.remaining_listeners = 1 + + # Execute Node + node.execute() + + # Get value. + value = node.value + + # Destroy. + node.destroy() + + return value + + # Otherwise just create the Node on the Graph like normal. + return self.graph.create( + *args, + **kwargs, + ) + + @classmethod + def prepare_inputs(cls, inputs: Any) -> Any: + """Prepare arguments for executing this node's target. + Converts Nodes in args and kwargs to their value. + + Returns: + Any: Prepared inputs. + """ + + inputs = util.apply(inputs, lambda x: x, inspect._empty) + + def _value(node: Union[ProxyType, NodeType]): + + if isinstance(node, Proxy): + node = node.node + + return node.value + + inputs = util.apply(inputs, _value, (Node, Proxy)) + + return inputs + + def execute(self) -> None: + """Actually executes this node. + Lets protocol execute if target is Protocol. + Else prepares args and kwargs and passes them to target. Gets output of target and sets the Node's value to it. + """ + + self.executed = True + + try: + + if isinstance(self.target, type) and issubclass(self.target, Protocol): + + self.target.execute(self) + + else: + + # Prepare arguments. + args, kwargs = self.prepare_inputs((self.args, self.kwargs)) + + # Call the target to get value. + output = self.target(*args, **kwargs) + + + # Set value. + self.set_value(output) + except NNsightError as e: + raise e + except Exception as e: + traceback_content = traceback.format_exc() + raise NNsightError(str(e), self.index, traceback_content) + + def set_value(self, value: Any) -> None: + """Sets the value of this Node and logs the event. + Updates remaining_dependencies of listeners. If they are now fulfilled, execute them. + Updates remaining_listeners of dependencies. If they are now redundant, destroy them. + + Args: + value (Any): Value. + """ + self._value = value + + if self.graph is not None: + + self.update_listeners() + + self.update_dependencies() + + if self.done and self.redundant: + self.destroy() + + def update_listeners(self): + """Updates remaining_dependencies of listeners.""" + + for listener in self.listeners: + listener.remaining_dependencies -= 1 + + def update_dependencies(self): + """Updates remaining_listeners of dependencies. If they are now redundant, destroy them.""" + + for dependency in self.dependencies: + if len(self.graph.defer_stack) > 0 and dependency.index < self.graph.defer_stack[-1]: + continue + + dependency.remaining_listeners -= 1 + + if dependency.redundant: + dependency.destroy() + + def destroy(self) -> None: + """Removes the reference to the node's value and logs it's destruction.""" + + self._value = inspect._empty + + def subgraph(self, subgraph: Optional[Set[int]] = None) -> Set[int]: + """Returns a Set of indexes starting from this node, and recursively iterating over all the Node's listeners. + + Args: + subgraph (Optional[Set[int]], optional): Current subgraph. Defaults to None. + + Returns: + Set[int]: Set of Node indexes. + """ + + if subgraph is None: + subgraph = set() + + if self.index in subgraph: + return subgraph + + subgraph.add(self.index) + + for listener in self.listeners: + listener.subgraph(subgraph) + + return subgraph + + def _meta_data(self) -> Dict[str, Any]: + """ Creates a dictionary of meta-data for this node. + Contains the following key-value pairs: + - traceback: Optional[str]: If the Graph is in debug mode, + a traceback string is compiled to be used if the execution of this Node raises an error. + + Returns: + Dict[str, Any]: Meta-Data dictionary. + """ + + meta_data = dict() + + def traceback_str() -> str: + """ Compiles a string of all the lines in the Traceback up until nnsight code is called. + Returns: + Str: Call Stack + """ + traceback_str = "" + stack = traceback.extract_stack() + for frame in stack: + # exclude frames created by nnsight or from the python environment + if not bool(re.search((r'/lib/python3\.\d+/'), frame.filename)) and not ('/nnsight/src/nnsight/' in frame.filename): + traceback_str += f" File \"{frame.filename}\", line {frame.lineno}, in {frame.name}\n" + traceback_str += f" {frame.line}\n" + else: + if traceback_str == "": + continue + else: + break + + traceback_str = "Traceback (most recent call last):\n" + traceback_str + + return traceback_str + + if self.attached and self.graph.debug: + meta_data["traceback"] = traceback_str() + + return meta_data + + ### Magic Methods ##################################### + def __str__(self) -> str: + return f"{self.target.__name__} {self.index}" + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} at {hex(id(self))}>" + + def __hash__(self) -> int: + return id(self) + + +NodeType = TypeVar("NodeType", bound=Node) diff --git a/src/nnsight/tracing/graph/proxy.py b/src/nnsight/tracing/graph/proxy.py new file mode 100755 index 00000000..233fb296 --- /dev/null +++ b/src/nnsight/tracing/graph/proxy.py @@ -0,0 +1,338 @@ +from __future__ import annotations + +import inspect +import operator +from functools import wraps +from typing import TYPE_CHECKING, Any, Callable, Iterator, TypeVar, Union + +from typing_extensions import Self + +from ... import CONFIG, util +from .. import protocols + +if TYPE_CHECKING: + from .node import Node + + +class Proxy: + """Proxy objects are the actual objects that interact with operations in order to update the graph to create new Nodes. + + The operations that are traceable on base Proxy objects are many python built-in and magic methods. + + Attributes: + node (NodeType): This proxy's Node. + """ + + def __init__(self, node: "Node") -> None: + + self.__dict__["node"] = node + + self.node: "Node" + + ### API ############################## + + def save(self) -> Self: + """Adds a lock Node to prevent its value from being cleared where normally it would be cleared when its no longer needed to save memory. + Used to access values outside of the tracing context, after execution. + + Returns: + InterventionProxy: Proxy. + """ + + # Add a 'lock' node with the save proxy as an argument to ensure the values are never deleted. + # This is because 'lock' nodes never actually get set and therefore there will always be a + # dependency for the save proxy. + + protocols.LockProtocol.add(self.node) + + return self + + def stop(self) -> None: + protocols.StopProtocol.add( + self.node.graph, + self.node, + ) + + @property + def value(self) -> Any: + """Property to return the value of this proxy's node. + + Returns: + Any: The stored value of the proxy, populated during execution of the model. + """ + + return self.node.value + + def __str__(self) -> str: + + if not self.node.attached: + + return str(self.value) + + return f"{type(self).__name__} ({self.node.target.__name__})" + + def __repr__(self) -> str: + + if not self.node.attached: + + return repr(self.value) + + return str(self) + + ### Special ################ + + @staticmethod + def call(callable: Callable, *args, **kwargs) -> Self: + return callable(*args, **kwargs) + + def __call__(self, *args, **kwargs) -> Self: + """ + Calling a Proxy object just creates a Proxy.proxy_call operation. + + Returns: + Proxy: New call proxy. + """ + + return self.node.create( + Proxy.call, + *([self.node] + list(args)), + **kwargs, + ) + + def __getattr__(self, key: Union[Self, Any]) -> Self: + return self.node.create(util.fetch_attr, self.node, key) + + def __setattr__(self, key: Union[Proxy, Any], value: Union[Self, Any]) -> None: + + if key == "__dict__": + + super().__setattr__(key, value) + + return + + return self.node.create( + setattr, + self.node, + key, + value, + ) + + ### Regular Operators ######################### + + def __getitem__(self, key: Union[Self, Any]) -> Self: + return self.node.create(operator.getitem, self.node, key) + + def __setitem__(self, key: Union[Self, Any], value: Union[Self, Any]) -> None: + self.node.create( + operator.setitem, + self.node, + key, + value, + ) + + def __abs__(self) -> Self: + return self.node.create( + operator.abs, + self.node, + ) + + def __invert__(self) -> Self: + return self.node.create( + operator.invert, + self.node, + ) + + def __neg__(self) -> Self: + return self.node.create( + operator.neg, + self.node, + ) + + def __add__(self, other: Union[Self, Any]) -> Self: + return self.node.create(operator.add, self.node, other) + + def __radd__(self, other: Union[Self, Any]) -> Self: + return self.node.create( + operator.add, + other, + self.node, + ) + + def __sub__(self, other: Union[Self, Any]) -> Self: + return self.node.create( + operator.sub, + self.node, + other, + ) + + def __rsub__(self, other: Union[Self, Any]) -> Self: + return self.node.create(operator.sub, other, self.node) + + def __pow__(self, other: Union[Self, Any]) -> Self: + return self.node.create( + operator.pow, + self.node, + other, + ) + + def __rpow__(self, other: Union[Self, Any]) -> Self: + return self.node.create( + operator.pow, + other, + self.node, + ) + + def __mul__(self, other: Union[Self, Any]) -> Self: + return self.node.create( + operator.mul, + self.node, + other, + ) + + def __rmul__(self, other: Union[Self, Any]) -> Self: + return self.node.create( + operator.mul, + other, + self.node, + ) + + def __mod__(self, other: Union[Self, Any]) -> Self: + return self.node.create( + operator.mod, + self.node, + other, + ) + + def __rmod__(self, other: Union[Self, Any]) -> Self: + return self.node.create( + operator.mod, + other, + self.node, + ) + + def __matmul__(self, other: Union[Self, Any]) -> Self: + return self.node.create( + operator.matmul, + self.node, + other, + ) + + def __rmatmul__(self, other: Union[Self, Any]) -> Self: + return self.node.create( + operator.matmul, + other, + self.node, + ) + + def __truediv__(self, other: Union[Self, Any]) -> Self: + return self.node.create( + operator.truediv, + self.node, + other, + ) + + def __rtruediv__(self, other: Union[Self, Any]) -> Self: + return self.node.create( + operator.truediv, + other, + self.node, + ) + + def __floordiv__(self, other: Union[Self, Any]) -> Self: + return self.node.create( + operator.floordiv, + self.node, + other, + ) + + def __rfloordiv__(self, other: Union[Self, Any]) -> Self: + return self.node.create( + operator.floordiv, + other, + self.node, + ) + + def __eq__(self, other: Union[Self, Any]) -> Self: + return self.node.create(operator.eq, self.node, other) + + def __ne__(self, other: Union[Self, Any]) -> Self: + return self.node.create(operator.ne, self.node, other) + + def __lt__(self, other: Union[Self, Any]) -> Self: + return self.node.create(operator.lt, self.node, other) + + def __gt__(self, other: Union[Self, Any]) -> Self: + return self.node.create(operator.gt, self.node, other) + + def __le__(self, other: Union[Self, Any]) -> Self: + return self.node.create(operator.le, self.node, other) + + def __ge__(self, other: Union[Self, Any]) -> Self: + return self.node.create(operator.ge, self.node, other) + + def __index__(self) -> Self: + return self.node.create(operator.index, self.node) + + def __len__(self) -> Self: + return self.node.create( + len, + self.node, + ) + + ### Hacks ############################## + + def __iter__(self) -> Iterator[Self]: + + if not CONFIG.APP.CONTROL_FLOW_HACKS: + raise Exception( + 'Iteration control flow encountered but "CONFIG.APP.CONTROL_FLOW_HACKS" is set to False' + ) + + from ..hacks import iterator + + return iterator.handle_proxy(inspect.currentframe().f_back, self) + + def __bool__(self) -> Self: + + if not CONFIG.APP.CONTROL_FLOW_HACKS: + raise Exception( + 'Conditional control flow encountered but "CONFIG.APP.CONTROL_FLOW_HACKS" is set to False' + ) + + from ..hacks import conditional + + return conditional.handle_proxy(inspect.currentframe().f_back, self) + + def __instancecheck__(self, __instance: Any) -> bool: + return self.node.fake_value.__instancecheck__(__instance) + + +ProxyType = TypeVar("ProxyType", bound=Proxy) + + +def proxy_patch(fn: Callable): + + @wraps(fn) + def inner(*args, **kwargs): + + found: Proxy = None + + def find(proxy: Proxy): + + nonlocal found + + found = proxy + + util.apply((args, kwargs), find, Proxy) + + if found is not None: + + return found.node.graph.create( + fn, + *args, + **kwargs, + ) + + return fn(*args, **kwargs) + + return inner diff --git a/src/nnsight/tracing/graph/viz.py b/src/nnsight/tracing/graph/viz.py new file mode 100644 index 00000000..80b9127c --- /dev/null +++ b/src/nnsight/tracing/graph/viz.py @@ -0,0 +1,222 @@ +import tempfile +from collections import defaultdict +from collections.abc import Iterable +from typing import TYPE_CHECKING, Dict + + +import torch +from PIL import Image as PILImage + +from ..protocols import Protocol +from . import Node, SubGraph + +if TYPE_CHECKING: + from . import Graph + + +def viz_graph( + graph: "Graph", + title: str = "graph", + display: bool = True, + save: bool = False, + path: str = ".", + recursive: bool = False, + group: bool = False, + ) -> None: + """ + Utility funciton to visualize the NNsight Graph structures built during tracing. + + Args: + - graph (Graph): NNsight Graph to be visualized. + - title (str): Title given to the visualization. Default: "graph". + - display (bool): Displays the rendered graph visualization. Default: True. + - save (bool): Saves the rendered graph to a file with the title as the name. Default: False. + - path (str): Path to store the saved visualization. Default: ".". + - recursive (bool): Recursively visualize all the inner Subgraphs of a given Graph. Default: False. + - group (bool): Visually group all the nodes belonging to the same Subgraph together. Default: False. + """ + + from ..contexts.globals import GlobalTracingContext + + with GlobalTracingContext.exit_global_tracing_context(): + + try: + + import pygraphviz as pgv + + except Exception as e: + + raise type(e)( + "Visualization of the Graph requires `pygraphviz` which requires `graphviz` to be installed on your machine." + ) from e + + if group and not recursive: + print("Warning: set `recursive=True` to visualize all subgraphs and make use of the 'group' functionality.") + group = False + + from IPython.display import Image + from IPython.display import display as IDisplay + + graph_viz: pgv.AGraph = pgv.AGraph(strict=True, directed=True) + + graph_viz.graph_attr.update( + label=title, fontsize="20", labelloc="t", labeljust="c" + ) + + def style_node(node: Node) -> Dict: + """Gets the style of the node based on it's target. + If the target is a Protocol, then it gets the style directly from the protocol class. + + Args: + - node (Node): node. + + Returns: + - Dict: dictionary style. + """ + + if isinstance(node.target, type) and issubclass(node.target, Protocol): + return node.target.style() + else: + return { + "node": {"color": "black", "shape": "ellipse"}, + "label": (node.target if isinstance(node.target, str) else node.target.__name__), + "arg": defaultdict(lambda: {"color": "gray", "shape": "box"}), + "arg_kname": defaultdict(lambda: None), + "edge": defaultdict(lambda: {"style": "solid"}), + } + + subgraphs: Dict[int, pgv.AGraph] = {} + subgraph_names_count: Dict[str, int] = defaultdict(lambda: 0) + def get_subgraph(node: Node) -> pgv.AGraph: + """ Returns the Graph Visualization Object where this node should be rendered. + + Args: + - node (Node: ) + + + Returns: + - pgv.AGraph: Graph Visualization Object. + """ + + nonlocal subgraphs + if group: + if id(node.graph) != id(graph): + if not id(node.graph) in subgraphs.keys(): + subgraph = graph_viz.subgraph(name=f"cluster_{id(node.graph)}") + subgraph.graph_attr['penwidth'] = 0.25 + subgraphs[id(node.graph)] = subgraph + + return subgraphs[id(node.graph)] + else: + return graph_viz + else: + return graph_viz + + if recursive: + nodes = [node for node in graph.nodes if id(node.graph) >= id(graph)] + else: + nodes = graph + + visualized_nodes = set() + for node in nodes: + + styles: Dict = style_node(node) + + subgraph: pgv.AGraph = get_subgraph(node) + + subgraph.add_node(node.index, label=styles["label"], **styles["node"]) + visualized_nodes.add(node.index) + + for idx, arg in enumerate(node.args): + if isinstance(arg, SubGraph): + name: str = f"{node.index}_{arg}_{idx}" + label: str = f"Subgraph" + + subgraph.add_node(name, label=label, **{"color": "purple", "shape": "box"}) + + if recursive: + for sub_node in arg: + root_node: bool = True + for dep_idx in sub_node._dependencies: + root_node = root_node and (dep_idx not in arg.subset) + + if root_node: + graph_viz.add_edge(node.index, sub_node.index, **{"style": "dashed", "color": styles["node"]["color"]}) + + if group: + subgraph_label: str = styles['label'] + subgraphs[id(arg)].graph_attr['label'] = f"{subgraph_label}_{subgraph_names_count[subgraph_label]}" + subgraph_names_count[subgraph_label] += 1 + + elif isinstance(arg, Node): + name = arg.index + label = node.index + + if arg.index not in visualized_nodes: + arg_label = (arg.target if isinstance(arg.target, str) else arg.target.__name__) + + subgraph.add_node(arg.index, label=arg_label, **{"color": "brown", "shape": "box"}) + + visualized_nodes.add(arg.index) + else: + name = str(node.index) + if isinstance(arg, torch.Tensor): + name += f"_Tensor_{idx}" + label = "Tensor" + elif isinstance(arg, str): + name += f"_{arg}_{idx}" + label = f'"{arg}"' + else: + name += f"_{arg}_{idx}" + label = str(arg) + + if not styles["arg_kname"][idx] is None: + label = f"{styles['arg_kname'][idx]}={label}" + else: + label = label + + subgraph.add_node(name, label=label, **{"color": "gray", "shape": "box"}) + + if isinstance(arg, Iterable): + for idx, element in enumerate(arg): + if isinstance(element, Node): + if element.index not in visualized_nodes: + + element_label = (element.target if isinstance(element.target, str) else element.target.__name__) + subgraph.add_node(element.index, label=element_label, color="brown", shape="box") + visualized_nodes.add(element.index) + + graph_viz.add_edge(element.index, name, style="dashed", color="gray", label=f"{idx}", fontsize=10) + + subgraph.add_edge(name, node.index, **styles["edge"][idx]) + + def display_graph(file_name): + in_notebook = True + + # Credit: Till Hoffmann - https://stackoverflow.com/a/22424821 + try: + from IPython import get_ipython + + if "IPKernelApp" not in get_ipython().config: + in_notebook = False + except ImportError: + in_notebook = False + except AttributeError: + in_notebook = False + + if in_notebook: + IDisplay(Image(filename=file_name)) + else: + img = PILImage.open(file_name) + img.show() + img.close() + + if not save: + with tempfile.NamedTemporaryFile(suffix=".png") as temp_file: + graph_viz.draw(temp_file.name, prog="dot") + if display: + display_graph(temp_file.name) + else: + graph_viz.draw(f"{path}/{title}.png", prog="dot") + if display: + display_graph(f"{path}/{title}.png") diff --git a/src/nnsight/tracing/hacks/__init__.py b/src/nnsight/tracing/hacks/__init__.py new file mode 100755 index 00000000..e5fa9b63 --- /dev/null +++ b/src/nnsight/tracing/hacks/__init__.py @@ -0,0 +1,23 @@ +import ast +from types import FrameType + +from ..graph import Graph +from .conditional import handle as handle_conditional +from .iterator import handle as handle_iterator + + +def handle_inner(node:ast.stmt, frame: FrameType, graph: Graph): + + if isinstance(node, ast.If): + + handle_conditional(node, frame, graph) + + return True + + elif isinstance(node, ast.For): + + handle_iterator(node, frame, graph) + + return True + + return False \ No newline at end of file diff --git a/src/nnsight/tracing/hacks/comprehension.py b/src/nnsight/tracing/hacks/comprehension.py new file mode 100755 index 00000000..9bdeb1bd --- /dev/null +++ b/src/nnsight/tracing/hacks/comprehension.py @@ -0,0 +1,67 @@ +import ast +import ctypes +import inspect +import sys +from types import FrameType +from typing import TYPE_CHECKING + +from ..contexts import Iterator +from ..graph import Graph +from .util import execute, execute_body, execute_until, visit + +if TYPE_CHECKING: + from ..graph import Proxy + +COMPS = [ast.SetComp, ast.DictComp, ast.ListComp, ast.GeneratorExp] + +def handle(node: ast.For, frame: FrameType, graph: Graph): + + iter_expr = ast.Expression( + body=node.iter, lineno=node.lineno, col_offset=node.col_offset + ) + + iter = execute(iter_expr, frame) + + context = Iterator(iter, parent=graph) + + target = node.target + + with context as item: + if isinstance(target, ast.Name): + frame.f_locals[target.id] = item + elif isinstance(target, ast.Tuple): + for t, v in zip(target.elts, item): + if isinstance(t, ast.Name): + frame.f_locals[t.id] = v + + ctypes.pythonapi.PyFrame_LocalsToFast(ctypes.py_object(frame), 0) + + execute_body(node.body, frame, context.graph) + + +def handle_proxy(node:ast.stmt, frame: FrameType, collection:"Proxy"): + + graph = collection.node.graph + + + iterator = Iterator(collection, parent=graph) + + item = iterator.__enter__() + + def callback(new_frame:FrameType, list_proxy, iterator:Iterator): + + + key, result = next(iter(new_frame.f_locals.items())) + print(node, node.elt.elt.ctx.__dict__) + + # list_proxy.append(result[0]) + + # new_frame.f_locals[key] = list_proxy + # ctypes.pythonapi.PyFrame_LocalsToFast(ctypes.py_object(new_frame), 0) + + iterator.__exit__(None, None, None) + + + execute_until(frame.f_lineno -1, frame.f_lineno - 1, frame, callback= lambda new_frame: callback(new_frame, [], iterator)) + + return iter([item]) diff --git a/src/nnsight/tracing/hacks/conditional.py b/src/nnsight/tracing/hacks/conditional.py new file mode 100755 index 00000000..87c12ad8 --- /dev/null +++ b/src/nnsight/tracing/hacks/conditional.py @@ -0,0 +1,74 @@ +import ast +import inspect +import sys +from types import FrameType +from typing import TYPE_CHECKING + +from ..contexts import Condition +from .util import execute, execute_body, execute_until, visit +from ..graph import Graph +if TYPE_CHECKING: + from ..graph import Proxy + +def get_else(node: ast.If): + + return ( + node.orelse[0] + if isinstance(node.orelse[0], ast.If) + else ast.If( + test=ast.Constant(value=None), + body=node.orelse, + orelse=[], + lineno=node.lineno, + col_offset=node.col_offset, + ) + ) + +def handle(node: ast.If, frame:FrameType, graph:Graph, branch:Condition = None): + + condition_expr = ast.Expression( + body=node.test, lineno=node.lineno, col_offset=node.col_offset + ) + + condition = execute(condition_expr, frame) + + context = Condition(condition, parent = graph) if branch is None else branch.else_(condition) + + with context as branch: + execute_body(node.body, frame, branch.graph) + + if node.orelse: + return handle(get_else(node), frame, graph, branch) + +def handle_proxy(frame: FrameType, condition: "Proxy"): + + class Visitor(ast.NodeVisitor): + def __init__(self, line_no): + self.target = None + self.line_no = line_no + + def visit_If(self, node): + if node.lineno == self.line_no: + self.target = node + self.generic_visit(node) + + visitor = visit(frame, Visitor) + + if_node:ast.If = visitor.target + + graph = condition.node.graph + + branch = Condition(condition, parent=graph) + + def callback(node: ast.If, frame: FrameType, graph:Graph, branch:Condition): + + branch.__exit__(None, None, None) + + if node.orelse: + handle(get_else(if_node), frame, graph, branch) + + branch.__enter__() + end = frame.f_lineno + (if_node.end_lineno - if_node.lineno) + execute_until(frame.f_lineno, end, frame, callback=lambda _: callback(if_node, frame, graph, branch)) + + return True \ No newline at end of file diff --git a/src/nnsight/tracing/hacks/iterator.py b/src/nnsight/tracing/hacks/iterator.py new file mode 100755 index 00000000..9fe350b4 --- /dev/null +++ b/src/nnsight/tracing/hacks/iterator.py @@ -0,0 +1,106 @@ +import ast +import ctypes +import inspect +import sys +from types import FrameType +from typing import TYPE_CHECKING + +from ..contexts import Iterator +from ..graph import Graph +from .util import execute, execute_body, execute_until, visit +from .comprehension import handle_proxy as handle_comprehension +if TYPE_CHECKING: + from ..graph import Proxy + +COMPS = [ast.SetComp, ast.DictComp, ast.ListComp, ast.GeneratorExp] + +def handle(node: ast.For, frame: FrameType, graph: Graph): + + iter_expr = ast.Expression( + body=node.iter, lineno=node.lineno, col_offset=node.col_offset + ) + + iter = execute(iter_expr, frame) + + context = Iterator(iter, parent=graph) + + target = node.target + + with context as item: + if isinstance(target, ast.Name): + frame.f_locals[target.id] = item + elif isinstance(target, ast.Tuple): + for t, v in zip(target.elts, item): + if isinstance(t, ast.Name): + frame.f_locals[t.id] = v + + ctypes.pythonapi.PyFrame_LocalsToFast(ctypes.py_object(frame), 0) + + execute_body(node.body, frame, context.graph) + + +def handle_proxy(frame: FrameType, collection: "Proxy"): + + class Visitor(ast.NodeVisitor): + def __init__(self, line_no): + self.target = None + self.line_no = line_no + self.assign = None + self.nodes_on_line = [] + + + def generic_visit(self, node): + if hasattr(node, 'lineno') and node.lineno == self.line_no: + self.nodes_on_line.append(node) + super().generic_visit(node) + def visit_Assign(self, node): + if node.lineno == self.line_no: + self.assign = node + self.generic_visit(node) + + def visit_For(self, node): + if node.lineno == self.line_no: + self.target = node + self.generic_visit(node) + + def visit_ListComp(self, node): + if self.target is None and node.lineno == self.line_no: + self.target = node + self.generic_visit(node) + + def visit_DictComp(self, node): + if self.target is None and node.lineno == self.line_no: + self.target = node + self.generic_visit(node) + + def visit_SetComp(self, node): + if self.target is None and node.lineno == self.line_no: + self.target = node + self.generic_visit(node) + + def visit_GeneratorExp(self, node): + if self.target is None and node.lineno == self.line_no: + self.target = node + self.generic_visit(node) + + visitor = visit(frame, Visitor) + + for_node:ast.If = visitor.target + + + if type(for_node) in COMPS: + return handle_comprehension(for_node, frame, collection) + + graph = collection.node.graph + + iterator = Iterator(collection, parent=graph) + + item = iterator.__enter__() + + def callback(iterator: Iterator): + + iterator.__exit__(None, None, None) + end = frame.f_lineno + (for_node.end_lineno - for_node.lineno) + execute_until(frame.f_lineno, end, frame, callback=lambda _: callback(iterator)) + + return iter([item]) diff --git a/src/nnsight/tracing/hacks/util.py b/src/nnsight/tracing/hacks/util.py new file mode 100755 index 00000000..230a2958 --- /dev/null +++ b/src/nnsight/tracing/hacks/util.py @@ -0,0 +1,101 @@ +import ast +import ctypes +import inspect +import sys +from types import FrameType +from typing import Any, Callable, List, Optional, Type + +from ..contexts import Context +from ..graph import Graph + + +def execute(expr: ast.expr, frame: FrameType) -> Any: + ast.fix_missing_locations(expr) + return eval( + compile(expr, "", "eval"), + frame.f_globals, + frame.f_locals, + ) + + +def execute_body(body: List[ast.stmt], frame: FrameType, graph: Graph) -> None: + + from . import handle_inner + + for stmt in body: + + if not handle_inner(stmt, frame, graph): + module = ast.Module(body=[stmt], type_ignores=[]) + ast.fix_missing_locations(module) + exec( + compile(module, "", "exec"), + frame.f_globals, + frame.f_locals, + ) + + +def execute_until( + first_line: int, + last_line: int, + frame: FrameType, + callback: Optional[Callable] = None, +): + + prev_trace = frame.f_trace + + def trace(new_frame: FrameType, *args): + + if new_frame.f_code.co_filename == frame.f_code.co_filename and ( + new_frame.f_lineno > last_line or new_frame.f_lineno < first_line + ): + + frame.f_trace = prev_trace + sys.settrace(prev_trace) + + if prev_trace is not None: + + prev_trace(new_frame, *args) + + if callback is not None: + + callback(new_frame) + + frame.f_trace = trace + sys.settrace(trace) + + +def is_ipython(): + return "_ih" in locals() + + +def visit(frame: FrameType, visitor_cls: Type[ast.NodeVisitor]) -> ast.stmt: + + line_no = frame.f_lineno + + if "_ih" in frame.f_locals: + import IPython + + ipython = IPython.get_ipython() + source_lines = ipython.user_global_ns["_ih"][-1] + inner_line_no = 0 + + else: + source_lines, inner_line_no = inspect.getsourcelines(frame) + + if inner_line_no > 0: + line_no = line_no - inner_line_no + 1 + + shift = len(source_lines[0]) - len(source_lines[0].lstrip()) + + if shift > 0: + + source_lines = [source_line[shift:] for source_line in source_lines] + + source = "".join(source_lines) + + tree = ast.parse(source) + + visitor = visitor_cls(line_no) + visitor.visit(tree) + + return visitor diff --git a/src/nnsight/tracing/protocols.py b/src/nnsight/tracing/protocols.py deleted file mode 100755 index 19a52a13..00000000 --- a/src/nnsight/tracing/protocols.py +++ /dev/null @@ -1,965 +0,0 @@ -import inspect -import weakref -from collections import defaultdict -from typing import TYPE_CHECKING, Any, Dict, Optional, Union - -import torch -from torch._subclasses.fake_tensor import FakeCopyMode, FakeTensorMode -from torch.fx.experimental.symbolic_shapes import ShapeEnv - -from nnsight.tracing.Node import Node - -from .. import util -from ..contexts.Conditional import ConditionalManager -from .util import validate - -if TYPE_CHECKING: - from ..contexts.backends.LocalBackend import LocalMixin - from ..contexts.Conditional import Conditional - from ..intervention import InterventionProxy - from .Bridge import Bridge - from .Graph import Graph - from .Node import Node - - -class Protocol: - """A `Protocol` represents some complex action a user might want to create a `Node` and `Proxy` for as well as add to a `Graph`. - Unlike normal `Node` target execution, these have access to the `Node` itself and therefore the `Graph`, enabling more powerful functionality than with just functions and methods. - """ - - redirect: bool = True - condition: bool = True - - @classmethod - def add(cls, *args, **kwargs) -> "InterventionProxy": - """Class method to be implemented in order to add a Node of this Protocol to a Graph.""" - - raise NotImplementedError() - - @classmethod - def execute(cls, node: "Node"): - """Class method to be implemented which contains the actual execution logic of the Protocol. By default, does nothing - - Args: - node (Node): Node to execute using this Protocols execution logic. - """ - pass - - @classmethod - def compile(cls, node: "Node") -> None: - pass - - @classmethod - def style(cls) -> Dict[str, Any]: - """Visualization style for this protocol node. - - Returns: - - Dict: dictionary style. - """ - - return { - "node": {"color": "black", "shape": "ellipse"}, # Node display - "label": cls.__name__, - "arg": defaultdict( - lambda: {"color": "gray", "shape": "box"} - ), # Non-node argument display - "arg_kname": defaultdict(lambda: None), # Argument label key word - "edge": defaultdict(lambda: "solid"), - } # Argument edge display - - -class ApplyModuleProtocol(Protocol): - """Protocol that references some root model, and calls its .forward() method given some input. - Using .forward() vs .__call__() means it wont trigger hooks. - Uses an attachment to the Graph to store the model. - """ - - attachment_name = "nnsight_root_module" - - @classmethod - def add( - cls, graph: "Graph", module_path: str, *args, hook=False, **kwargs - ) -> "InterventionProxy": - """Creates and adds an ApplyModuleProtocol to the Graph. - Assumes the attachment has already been added via ApplyModuleProtocol.set_module(). - - Args: - graph (Graph): Graph to add the Protocol to. - module_path (str): Module path (model.module1.module2 etc), of module to apply from the root module. - - Returns: - InterventionProxy: ApplyModule Proxy. - """ - - value = inspect._empty - - # If the Graph is validating, we need to compute the proxy_value for this node. - if graph.validate: - - from .Node import Node - - # If the module has parameters, get its device to move input tensors to. - module: torch.nn.Module = util.fetch_attr( - cls.get_module(graph), module_path - ) - - try: - device = next(module.parameters()).device - except: - device = None - - # Enter FakeMode for proxy_value computing. - value = validate(module.forward, *args, **kwargs) - - kwargs["hook"] = hook - - # Create and attach Node. - return graph.create( - target=cls, - proxy_value=value, - args=[module_path] + list(args), - kwargs=kwargs, - ) - - @classmethod - def execute(cls, node: "Node") -> None: - """Executes the ApplyModuleProtocol on Node. - - Args: - node (Node): ApplyModule Node. - """ - - module: torch.nn.Module = util.fetch_attr( - cls.get_module(node.graph), node.args[0] - ) - - try: - device = next(module.parameters()).device - except: - device = None - - args, kwargs = node.prepare_inputs( - (node.args, node.kwargs), device=device - ) - - module_path, *args = args - - hook = kwargs.pop("hook") - - if hook: - output = module(*args, **kwargs) - else: - output = module.forward(*args, **kwargs) - - node.set_value(output) - - @classmethod - def set_module(cls, graph: "Graph", module: torch.nn.Module) -> None: - """Sets the nnsight root module as an attachment on a Graph. - - Args: - graph (Graph): Graph. - module (torch.nn.Module): Root module. - """ - - graph.attachments[cls.attachment_name] = module - - @classmethod - def get_module(cls, graph: "Graph") -> torch.nn.Module: - """Returns the nnsight root module from an attachment on a Graph. - - Args: - graph (Graph): Graph - - Returns: - torch.nn.Module: Root Module. - """ - - return graph.attachments[cls.attachment_name] - - @classmethod - def style(cls) -> Dict[str, Any]: - """Visualization style for this protocol node. - - Returns: - - Dict: dictionary style. - """ - - return { - "node": { - "color": "blue", - "shape": "polygon", - "sides": 6, - }, # Node display - "label": cls.__name__, - "arg": defaultdict( - lambda: {"color": "gray", "shape": "box"} - ), # Non-node argument display - "arg_kname": defaultdict(lambda: None), # Argument label word - "edge": defaultdict(lambda: "solid"), - } # Argument edge display - - -class LockProtocol(Protocol): - """Simple Protocol who's .execute() method does nothing. This means not calling .set_value() on the Node, therefore the Node won't be destroyed.""" - - redirect: bool = False - - @classmethod - def add(cls, node: "Node") -> "InterventionProxy": - - return node.create( - proxy_value=None, - target=cls, - args=[node], - ) - - @classmethod - def style(cls) -> Dict[str, Any]: - """Visualization style for this protocol node. - - Returns: - - Dict: dictionary style. - """ - - return { - "node": {"color": "brown", "shape": "ellipse"}, # Node display - "label": cls.__name__, - "arg": defaultdict( - lambda: {"color": "gray", "shape": "box"} - ), # Non-node argument display - "arg_kname": defaultdict(lambda: None), # Argument lable key word - "edge": defaultdict(lambda: "solid"), - } # Argument edge display - - -class GradProtocol(Protocol): - """Protocol which adds a backwards hook via .register_hook() to a Tensor. The hook injects the gradients into the node's value on hook execution. - Nodes created via this protocol are relative to the next time .backward() was called during tracing allowing separate .grads to reference separate backwards passes: - - .. code-block:: python - with model.trace(...): - - grad1 = model.module.output.grad.save() - - model.output.sum().backward(retain_graph=True) - - grad2 = model.module.output.grad.save() - - model.output.sum().backward() - - Uses an attachment to store number of times .backward() has been called during tracing so a given .grad hook is only value injected at the appropriate backwards pass. - """ - - attachment_name = "nnsight_backward_idx" - - @classmethod - def add(cls, node: "Node") -> "InterventionProxy": - - # Get number of times .backward() was called during tracing from an attachment. Use as Node argument. - backward_idx = node.graph.attachments.get(cls.attachment_name, 0) - - return node.create( - proxy_value=node.proxy_value, - target=cls, - args=[node, backward_idx], - ) - - @classmethod - def execute(cls, node: "Node") -> None: - - args, kwargs = node.prepare_inputs((node.args, node.kwargs)) - - # First arg is the Tensor to add hook to. - tensor: torch.Tensor = args[0] - # Second is which backward pass this Node refers to. - backward_idx: int = args[1] - - # Hook to remove when hook is executed at the appropriate backward pass. - hook = None - - def grad(value): - - nonlocal backward_idx - - # If backward_idx == 0, this is the correct backward pass and we should actually execute. - if backward_idx == 0: - - # Set the value of the Node. - node.set_value(value) - - if node.attached(): - - # There may be a swap Protocol executed during the resolution of this part of the graph. - # If so get it and replace value with it. - value = SwapProtocol.get_swap(node.graph, value) - - # Don't execute this hook again. - backward_idx = -1 - - # Remove hook (if this is not done memory issues occur) - hook.remove() - - return value - - # Otherwise decrement backward_idx - else: - - backward_idx -= 1 - - return None - - # Register hook. - hook = tensor.register_hook(grad) - - @classmethod - def increment(cls, graph: "Graph"): - """Increments the backward_idx attachment to track the number of times .backward() is called in tracing for this Graph. - - Args: - graph (Graph): Graph. - """ - - backward_idx = graph.attachments.get(cls.attachment_name, 0) - - graph.attachments[cls.attachment_name] = backward_idx + 1 - - @classmethod - def style(cls) -> Dict[str, Any]: - """Visualization style for this protocol node. - - Returns: - - Dict: dictionary style. - """ - - return { - "node": {"color": "green4", "shape": "box"}, # Node display - "label": cls.__name__, - "arg": defaultdict( - lambda: {"color": "gray", "shape": "box"} - ), # Non-node argument display - "arg_kname": defaultdict(lambda: None), # Argument label key word - "edge": defaultdict(lambda: "solid"), - } # Argument edge display - - -class SwapProtocol(Protocol): - """Protocol which adds an attachment to the Graph which can store some value. Used to replace ('swap') a value with another value.""" - - attachment_name = "nnsight_swap" - - @classmethod - def add(cls, node: "Node", value: Any) -> "InterventionProxy": - - return node.create(target=cls, args=[node, value], proxy_value=True) - - @classmethod - def execute(cls, node: "Node") -> None: - - # In case there is already a swap, get it from attachments. - swap: "Node" = node.graph.attachments.get(cls.attachment_name, None) - - # And set it to False to destroy it. - if swap is not None: - swap.set_value(False) - - # Set the swap to this Node. - node.graph.attachments[cls.attachment_name] = node - - @classmethod - def get_swap(cls, graph: "Graph", value: Any) -> Any: - """Checks if a swap exists on a Graph. If so get and return it, otherwise return the given value. - - Args: - graph (Graph): Graph - value (Any): Default value. - - Returns: - Any: Default value or swap value. - """ - - # Tries to get the swap. - swap: "Node" = graph.attachments.get(cls.attachment_name, None) - - # If there was one: - if swap is not None: - - device = None - - def _device(value: torch.Tensor): - nonlocal device - - device = value.device - - # Get device of default value. - util.apply(value, _device, torch.Tensor) - - # Get swap Node's value. - value = util.apply(swap.args[1], lambda x: x.value, type(swap)) - - if device is not None: - - def _to(value: torch.Tensor): - return value.to(device) - - # Move swap values to default value's device. - value = util.apply(value, _to, torch.Tensor) - - # Set value of 'swap' node so it destroys itself and listeners. - swap.set_value(True) - - # Un-set swap. - graph.attachments[cls.attachment_name] = None - - return value - - @classmethod - def style(cls) -> Dict[str, Any]: - """Visualization style for this protocol node. - - Returns: - - Dict: dictionary style. - """ - - return { - "node": {"color": "green4", "shape": "ellipse"}, # Node display - "label": cls.__name__, - "arg": defaultdict( - lambda: {"color": "gray", "shape": "box"} - ), # Non-node argument display - "arg_kname": defaultdict(lambda: None), # Argument label key word - "edge": defaultdict(lambda: "solid"), - } # Argument edge key word - - -class BridgeProtocol(Protocol): - """Protocol to connect two Graphs by grabbing a value from one and injecting it into another. - Uses an attachment to store a Bridge object which references all relevant Graphs and their ordering. - """ - - attachment_name = "nnsight_bridge" - condition: bool = False - - class BridgeException(Exception): - def __init__(self): - super.__init__( - "Must define a Session context to make use of the Bridge" - ) - - @classmethod - def add(cls, node: "Node") -> "InterventionProxy": - - bridge = cls.get_bridge(node.graph) - curr_graph = bridge.peek_graph() - bridge_proxy = bridge.get_bridge_proxy( - node, curr_graph.id - ) # a bridged node has a unique bridge node proxy per graph reference - - # if the bridge node does not exist, create one - if bridge_proxy is None: - # Adds a Lock Node. One, so the value from_node isn't destroyed until the to_nodes are done with it, - # and two acts as an easy reference to the from_node to get its value from the lock Node args. - lock_node = LockProtocol.add(node).node - - # Args for a Bridge Node are the id of the Graph and node name of the Lock Node. - bridge_proxy = node.create( - target=cls, - proxy_value=node.proxy_value, - args=[node.graph.id, lock_node.name], - ) - bridge.add_bridge_proxy(node, bridge_proxy) - - return bridge_proxy - - @classmethod - def execute(cls, node: "Node") -> None: - - # Gets Bridge object from the Node's Graph. - bridge = cls.get_bridge(node.graph) - - # Args are Graph's id and name of the Lock Node on it. - from_graph_id, lock_node_name = node.args - - # Gets the from_node's Graph via its id with the Bridge and get the Lock Node. - lock_node = bridge.get_graph(from_graph_id).nodes[lock_node_name] - - # Value node is Lock Node's only arg - value_node: "Node" = lock_node.args[0] - - if value_node.done(): - - # Set value to that of the value Node. - node.set_value(value_node.value) - - # Bridge.release tells this Protocol when to release all Lock Nodes as we no longer need the data (useful when running a Graph in a loop, only release on last iteration) - if bridge.release: - - lock_node.set_value(None) - - @classmethod - def set_bridge(cls, graph: "Graph", bridge: "Bridge") -> None: - """Sets Bridge object as an attachment on a Graph. - - Args: - graph (Graph): Graph. - bridge (Bridge): Bridge. - """ - - graph.attachments[cls.attachment_name] = weakref.proxy(bridge) - - @classmethod - def get_bridge(cls, graph: "Graph") -> "Bridge": - """Gets Bridge object from a Graph. Assumes Bridge has been set as an attachment on this Graph via BridgeProtocol.set_bridge(). - - Args: - graph (Graph): Graph. - - Returns: - Bridge: Bridge. - """ - - if not cls.has_bridge(graph): - raise cls.BridgeException() - - return graph.attachments[cls.attachment_name] - - @classmethod - def has_bridge(cls, graph: "Graph") -> bool: - """Checks to see if a Bridge was added as an attachment on this Graph via BridgeProtocol.set_bridge(). - - Args: - graph (Graph): Graph - - Returns: - bool: If Graph has Bridge attachment. - """ - - return cls.attachment_name in graph.attachments - - @classmethod - def peek_graph(cls, graph: "Graph") -> "Graph": - """Returns current Intervention Graph. - - Args: - - graph (Graph): Graph. - - Returns: - Graph: Graph. - """ - - if not BridgeProtocol.has_bridge(graph): - return graph - else: - bridge = BridgeProtocol.get_bridge(graph) - return bridge.peek_graph() - - @classmethod - def style(cls) -> Dict[str, Any]: - """Visualization style for this protocol node. - - Returns: - - Dict: dictionary style. - """ - - return { - "node": {"color": "brown", "shape": "box"}, # Node display - "label": cls.__name__, - "arg": defaultdict( - lambda: { - "color": "gray", - "shape": "box", - }, # Non-node argument display - {0: {"color": "gray", "shape": "box", "style": "dashed"}}, - ), - "arg_kname": defaultdict( - lambda: None, {0: "graph_id"} - ), # Arugment label key word - "edge": defaultdict(lambda: "solid", {0: "dashed"}), - } # Argument edge display - - -class EarlyStopProtocol(Protocol): - """Protocol to stop the execution of a model early.""" - - class EarlyStopException(Exception): - pass - - @classmethod - def add( - cls, graph: "Graph", stop_point_node: Optional["Node"] = None - ) -> "InterventionProxy": - return graph.create( - target=cls, - proxy_value=None, - args=([stop_point_node] if stop_point_node is not None else []), - ) - - @classmethod - def execute(cls, node: "Node") -> None: - - node.set_value(True) - - raise cls.EarlyStopException() - - @classmethod - def style(cls) -> Dict[str, Any]: - """Visualization style for this protocol node. - - Returns: - - Dict: dictionary style. - """ - - return { - "node": { - "color": "red", - "shape": "polygon", - "sides": 6, - }, # Node display - "label": cls.__name__, - "arg": defaultdict( - lambda: {"color": "gray", "shape": "box"} - ), # Non-node argument display - "arg_kname": defaultdict(lambda: None), # Argument label key word - "edge": defaultdict(lambda: "solid"), - } # Argument edge display - - -class LocalBackendExecuteProtocol(Protocol): - - @classmethod - def add(cls, object: "LocalMixin", graph: "Graph") -> "InterventionProxy": - - return graph.create(target=cls, proxy_value=None, args=[object]) - - @classmethod - def execute(cls, node: Node) -> None: - - object: "LocalMixin" = node.args[0] - - object.local_backend_execute() - - node.set_value(None) - - @classmethod - def style(cls) -> Dict[str, Any]: - """Visualization style for this protocol node. - - Returns: - - Dict: dictionary style. - """ - - return { - "node": { - "color": "purple", - "shape": "polygon", - "sides": 6, - }, # Node display - "label": "ExecuteProtocol", - "arg": defaultdict( - lambda: {"color": "gray", "shape": "box"} - ), # Non-node argument display - "arg_kname": defaultdict(lambda: None), # Argument label key word - "edge": defaultdict(lambda: "solid"), - } # Argument edge display - - -class ValueProtocol(Protocol): - - @classmethod - def add(cls, graph: "Graph", default: Any = None) -> "InterventionProxy": - - return graph.create(target=cls, proxy_value=default, args=[default]) - - @classmethod - def execute(cls, node: Node) -> None: - - node.set_value(node.args[0]) - - @classmethod - def set(cls, node: Node, value: Any) -> None: - - node.args[0] = value - - @classmethod - def style(cls) -> Dict[str, Any]: - """Visualization style for this protocol node. - - Returns: - - Dict: dictionary style. - """ - - return { - "node": {"color": "blue", "shape": "box"}, # Node display - "label": cls.__name__, - "arg": defaultdict( - lambda: {"color": "gray", "shape": "box"} - ), # Non-node argument - "arg_kname": defaultdict(lambda: None), # Argument label key word - "edge": defaultdict(lambda: "solid"), - } # Argument edge display - - -class ConditionalProtocol(Protocol): - """Protocol operating as a conditional statement. - Uses the ConditionalManager attachment to handle all visited Conditional contexts within a single Intervention Graph. - Evaluates the condition value of the Conditional as a boolean. - - Example: - - Setup: - .. code-block:: python - import torch - from collections import OrderedDict - - input_size = 5 - hidden_dims = 10 - output_size = 2 - - model = nn.Sequential(OrderedDict([ - ('layer1', torch.nn.Linear(input_size, hidden_dims)), - ('layer2', torch.nn.Linear(hidden_dims, output_size)), - ])) - - input = torch.rand((1, input_size)) å - - Ex 1: The .save() on the model output will only be executed if the condition (x > 0) is evaluated to True. - - .. code-block:: python - with model.trace(input) as tracer: - num = 5 - with tracer.cond(x > 0): - out = model.output.save() - - Ex 2: The condition is a tensor boolean operation on the Envoy's output InterventionProxy. - - .. code-block:: python - with model.trace(input) as tracer: - l1_out = model.layer1.output - with tracer.cond(l1_out[:, 0] > 0): - out = model.output.save() - """ - - attachment_name = "nnsight_conditional_manager" - - @classmethod - def add( - cls, graph: "Graph", condition: Union["Node", Any] - ) -> "InterventionProxy": - - return graph.create(target=cls, proxy_value=True, args=[condition]) - - @classmethod - def execute(cls, node: "Node") -> None: - """Evaluate the node condition to a boolean. - - Args: - node (Node): ConditionalProtocol node. - """ - - cond_value = Node.prepare_inputs(node.args[0]) - if cond_value: - # cond_value is True - node.set_value(True) - return - - def update_conditioned_nodes(conditioned_node: "Node") -> None: - """Recursively decrement the remaining listeners count of all the dependencies of conditioned nodes. - - Args: - - conditioned_node (Node): Conditioned Node - """ - for listener in conditioned_node.listeners: - for listener_arg in listener.arg_dependencies: - listener_arg.remaining_listeners -= 1 - if listener_arg.done() and listener_arg.redundant(): - listener_arg.destroy() - update_conditioned_nodes(listener) - - # If the condition value is ignore or evaluated to False, update conditioned nodes - update_conditioned_nodes(node) - - @classmethod - def has_conditional(cls, graph: "Graph") -> bool: - """Checks if the Intervention Graph has a ConditionalManager attached to it. - - Args: - graph (Graph): Intervention Graph. - - Returns: - bool: If graph has a ConditionalManager attachement. - """ - return cls.attachment_name in graph.attachments.keys() - - @classmethod - def get_conditional( - cls, graph: "Graph", cond_node_name: str - ) -> "Conditional": - """Gets the ConditionalProtocol node by its name. - - Args: - graph (Graph): Intervention Graph. - cond_node_name (str): ConditionalProtocol name. - - Returns: - Node: ConditionalProtocol Node. - """ - return graph.attachments[cls.attachment_name].get(cond_node_name) - - @classmethod - def push_conditional(cls, node: "Node") -> None: - """Attaches a Conditional context to its graph. - - Args: - node (Node): ConditionalProtocol of the current Conditional context. - """ - - # All ConditionalProtocols associated with a graph are stored and managed by the ConditionalManager. - # Create a ConditionalManager attachement to the graph if this the first Conditional context to be entered. - if cls.attachment_name not in node.graph.attachments.keys(): - node.graph.attachments[cls.attachment_name] = ConditionalManager() - - # Push the ConditionalProtocol node to the ConditionalManager - node.graph.attachments[cls.attachment_name].push(node) - - @classmethod - def pop_conditional(cls, graph: "Graph") -> None: - """Pops latest ConditionalProtocol from the ConditionalManager attached to the graph. - - Args: - graph (Graph): Intervention Graph. - """ - graph.attachments[cls.attachment_name].pop() - - @classmethod - def peek_conditional(cls, graph: "Graph") -> "Node": - """Gets the ConditionalProtocol node of the current Conditional context. - - Args: - - graph (Graph): Graph. - - Returns: - Node: ConditionalProtocol of the current Conditional context. - """ - return graph.attachments[cls.attachment_name].peek() - - @classmethod - def add_conditioned_node(cls, node: "Node") -> None: - """Adds a conditioned Node the ConditionalManager attached to its graph. - - Args: - - node (Node): Conditioned Node. - """ - - node.graph.attachments[cls.attachment_name].add_conditioned_node(node) - - @classmethod - def is_node_conditioned(cls, node: "Node") -> bool: - """Checks if the Node is conditoned by the current Conditional context. - - Args: - - node (Node): Conditioned Node. - - Returns: - bool: Whether the Node is conditioned. - """ - - return node.graph.attachments[cls.attachment_name].is_node_conditioned( - node - ) - - @classmethod - def style(cls) -> Dict[str, Any]: - """Visualization style for this protocol node. - - Returns: - - Dict: dictionary style. - """ - - return { - "node": { - "color": "#FF8C00", - "shape": "polygon", - "sides": 6, - }, # Node display - "label": cls.__name__, - "arg": defaultdict( - lambda: {"color": "gray", "shape": "box"} - ), # Non-node argument - "arg_kname": defaultdict(lambda: None), # Argument label key word - "edge": defaultdict(lambda: "solid"), - } # Argument edge display - - -class UpdateProtocol(Protocol): - """Protocol to update the value of an InterventionProxy node. - - .. codeb-block:: python - with model.trace(input) as tracer: - num = tracer.apply(int, 0) - num.update(5) - """ - - @classmethod - def add( - cls, node: "Node", new_value: Union[Node, Any] - ) -> "InterventionProxy": - """Creates an UpdateProtocol node. - - Args: - node (Node): Original node. - new_value (Union[Node, Any]): The update value. - - Returns: - InterventionProxy: proxy. - """ - - return node.create( - target=cls, - proxy_value=node.proxy_value, - args=[ - node, - new_value, - ], - ) - - @classmethod - def execute(cls, node: "Node") -> None: - """Sets the value of the original node to the new value. - If the original is defined outside the context, it uses the bridge to get the node. - - Args: - node (Node): UpdateProtocol node. - """ - - value_node, new_value = node.args - new_value = Node.prepare_inputs(new_value) - - if value_node.target == BridgeProtocol: - value_node._value = new_value - bridge = BridgeProtocol.get_bridge(value_node.graph) - lock_node = bridge.id_to_graph[value_node.args[0]].nodes[ - value_node.args[1] - ] - value_node = lock_node.args[0] - - value_node._value = new_value - - node.set_value(new_value) - - @classmethod - def style(cls) -> Dict[str, Any]: - """Visualization style for this protocol node. - - Returns: - - Dict: dictionary style. - """ - - return { - "node": {"color": "blue", "shape": "ellipse"}, # Node display - "label": cls.__name__, - "arg": defaultdict( - lambda: {"color": "gray", "shape": "box"} - ), # Non-node argument - "arg_kname": defaultdict(lambda: None), # Argument label key word - "edge": defaultdict(lambda: "solid"), - } # Argument edge display diff --git a/src/nnsight/tracing/protocols/__init__.py b/src/nnsight/tracing/protocols/__init__.py new file mode 100755 index 00000000..7bfa0c95 --- /dev/null +++ b/src/nnsight/tracing/protocols/__init__.py @@ -0,0 +1,4 @@ +from .base import Protocol +from .variable import VariableProtocol +from .stop import StopProtocol +from .lock import LockProtocol \ No newline at end of file diff --git a/src/nnsight/tracing/protocols/base.py b/src/nnsight/tracing/protocols/base.py new file mode 100755 index 00000000..89710555 --- /dev/null +++ b/src/nnsight/tracing/protocols/base.py @@ -0,0 +1,45 @@ +from collections import defaultdict +from typing import TYPE_CHECKING, Any, Dict + +if TYPE_CHECKING: + from ..graph import GraphType, Node, Proxy + +class Protocol: + + @staticmethod + def is_protocol(thing:Any): + + return isinstance(thing, type) and issubclass(thing, Protocol) + + @classmethod + def add(cls, graph:"GraphType",*args, **kwargs) -> "Proxy": + + return graph.create( + cls, + *args, + **kwargs + + ) + + @classmethod + def execute(cls, node: "Node"): + + pass + + @classmethod + def style(cls) -> Dict[str, Any]: + """Visualization style for this protocol node. + + Returns: + - Dict: dictionary style. + """ + + return { + "node": {"color": "black", "shape": "ellipse"}, # Node display + "label": cls.__name__, + "arg": defaultdict( + lambda: {"color": "gray", "shape": "box"} + ), # Non-node argument display + "arg_kname": defaultdict(lambda: None), # Argument label key word + "edge": defaultdict(lambda: {"style": "solid"}), + } # Argument edge display diff --git a/src/nnsight/tracing/protocols/lock.py b/src/nnsight/tracing/protocols/lock.py new file mode 100755 index 00000000..6da4fbf3 --- /dev/null +++ b/src/nnsight/tracing/protocols/lock.py @@ -0,0 +1,31 @@ +from typing import TYPE_CHECKING, Any, Dict + +from . import Protocol + +if TYPE_CHECKING: + from ..graph import ProxyType, NodeType + + +class LockProtocol(Protocol): + + @classmethod + def add(cls, node: "NodeType") -> "ProxyType": + return node.create( + cls, + node, + fake_value=None, + ) + + @classmethod + def style(cls) -> Dict[str, Any]: + """Visualization style for this protocol node. + + Returns: + - Dict: dictionary style. + """ + + default_style = super().style() + + default_style["node"] = {"color": "brown", "shape": "ellipse"} + + return default_style diff --git a/src/nnsight/tracing/protocols/stop.py b/src/nnsight/tracing/protocols/stop.py new file mode 100755 index 00000000..40d94336 --- /dev/null +++ b/src/nnsight/tracing/protocols/stop.py @@ -0,0 +1,33 @@ +from typing import TYPE_CHECKING, Any, Dict + +from . import Protocol +from ...util import NNsightError + +if TYPE_CHECKING: + from ..graph import Node + + +class StopProtocol(Protocol): + + class StopException(NNsightError): + pass + + @classmethod + def execute(cls, node: "Node") -> None: + + raise cls.StopException("Early Stop Exception!", node.index) + + @classmethod + def style(cls) -> Dict[str, Any]: + """Visualization style for this protocol node. + + Returns: + - Dict: dictionary style. + """ + + default_style = super().style() + + default_style["node"] = {"color": "red", "shape": "polygon", "sides": 6} + + return default_style + diff --git a/src/nnsight/tracing/protocols/variable.py b/src/nnsight/tracing/protocols/variable.py new file mode 100755 index 00000000..ce2dcc51 --- /dev/null +++ b/src/nnsight/tracing/protocols/variable.py @@ -0,0 +1,34 @@ +from typing import TYPE_CHECKING, Any, Dict + +from . import Protocol + +if TYPE_CHECKING: + from ..graph import Node + +class VariableProtocol(Protocol): + + @classmethod + def set(cls, node: "Node", value: Any): + + node.args = [value] + + @classmethod + def execute(cls, node: "Node"): + + value = node.prepare_inputs(node.args[0]) + + node.set_value(value) + + @classmethod + def style(cls) -> Dict[str, Any]: + """ Visualization style for this protocol node. + + Returns: + - Dict: dictionary style. + """ + + default_style = super().style() + + default_style["node"] = {"color": "blue", "shape": "box"} + + return default_style diff --git a/src/nnsight/tracing/util.py b/src/nnsight/tracing/util.py deleted file mode 100755 index cb875fd2..00000000 --- a/src/nnsight/tracing/util.py +++ /dev/null @@ -1,27 +0,0 @@ -from typing import Callable - -from torch._subclasses.fake_tensor import FakeCopyMode, FakeTensorMode -from torch.fx.experimental.symbolic_shapes import ShapeEnv - -from .Node import Node - - -def validate(target: Callable, *args, **kwargs): - - from ..contexts.GraphBasedContext import GlobalTracingContext - - # Enter FakeMode. - with FakeTensorMode( - allow_non_fake_inputs=True, - shape_env=ShapeEnv(assume_static_by_default=True), - ) as fake_mode: - with FakeCopyMode(fake_mode): - - with GlobalTracingContext.exit_global_tracing_context(): - - args, kwargs = Node.prepare_inputs((args, kwargs), proxy=True) - - return target( - *args, - **kwargs, - ) diff --git a/src/nnsight/util.py b/src/nnsight/util.py index 42d52956..8e6601ab 100755 --- a/src/nnsight/util.py +++ b/src/nnsight/util.py @@ -1,30 +1,21 @@ """Module for utility functions and classes used throughout the package.""" import importlib -import types -from functools import wraps -from typing import ( - TYPE_CHECKING, - Any, - Callable, - Collection, - Dict, - Optional, - Tuple, - Type, - Union, -) +from contextlib import AbstractContextManager +from typing import Any, Callable, Collection, List, Optional, Type, TypeVar import torch - -if TYPE_CHECKING: - from .tracing.Node import Node +from typing_extensions import Self # TODO Have an Exception you can raise to stop apply early +T = TypeVar("T") +C = TypeVar("C", bound=Collection[T]) + + def apply( - data: Any, fn: Callable, cls: Type, inplace: bool = False -) -> Collection: + data: C, fn: Callable[[T], Any], cls: Type[T], inplace: bool = False +) -> C: """Applies some function to all members of a collection of a give type (or types) Args: @@ -71,7 +62,6 @@ def apply( return data - def fetch_attr(object: object, target: str) -> Any: """Retrieves an attribute from an object hierarchy given an attribute path. Levels are separated by '.' e.x (transformer.h.1) @@ -96,44 +86,85 @@ def fetch_attr(object: object, target: str) -> Any: return object +def to_import_path(type: type) -> str: -def wrap(object: object, wrapper: Type, *args, **kwargs) -> object: - """Wraps some object given some wrapper type. - Updates the __class__ attribute of the object and calls the wrapper type's __init__ method. + return f"{type.__module__}.{type.__name__}" - Args: - object (object): Object to wrap. - wrapper (Type): Type to wrap the object in. - Returns: - object: Wrapped object. +def from_import_path(import_path: str) -> type: + + *import_path, classname = import_path.split(".") + import_path = ".".join(import_path) + + return getattr(importlib.import_module(import_path), classname) + + +class Patch: + """Class representing a replacement of an attribute on a module. + + Attributes: + obj (Any): Object to replace. + replacement (Any): Object that replaces. + parent (Any): Module or class to replace attribute. """ - if isinstance(object, wrapper): - return object - new_class = types.new_class( - object.__class__.__name__, - (object.__class__, wrapper), - ) + def __init__(self, parent: Any, replacement: Any, key: str) -> None: + self.parent = parent + self.replacement = replacement + self.key = key + self.orig = getattr(self.parent, key) - object.__class__ = new_class + def patch(self) -> None: + """Carries out the replacement of an object in a module/class.""" + setattr(self.parent, self.key, self.replacement) - wrapper.__init__(object, *args, **kwargs) + def restore(self) -> None: + """Carries out the restoration of the original object on the objects module/class.""" - return object + setattr(self.parent, self.key, self.orig) +class Patcher(AbstractContextManager): + """Context manager that patches from a list of Patches on __enter__ and restores the patch on __exit__. -def to_import_path(type: type) -> str: + Attributes: + patches (List[Patch]): + """ - return f"{type.__module__}.{type.__name__}" + def __init__(self, patches: Optional[List[Patch]] = None) -> None: + self.patches = patches or [] + + self.entered = False + def add(self, patch: Patch) -> None: + """Adds a Patch to the patches. Also calls `.patch()` on the Patch. -def from_import_path(import_path: str) -> type: + Args: + patch (Patch): Patch to add. + """ + self.patches.append(patch) - *import_path, classname = import_path.split(".") - import_path = ".".join(import_path) + if self.entered: + patch.patch() - return getattr(importlib.import_module(import_path), classname) + def __enter__(self) -> Self: + """Enters the patching context. Calls `.patch()` on all patches. + + Returns: + Patcher: Patcher + """ + + self.entered = True + + for patch in self.patches: + patch.patch() + + return self + + def __exit__(self, exc_type, exc_val, exc_tb) -> None: + """Calls `.restore()` on all patches.""" + self.entered = False + for patch in self.patches: + patch.restore() class WrapperModule(torch.nn.Module): @@ -146,3 +177,31 @@ def forward(self, *args, **kwargs): args = args[0] return args + +class NNsightError(Exception): + """NNsight Execption class for raising error during execution. + + Attributes: + - message (str): error message. + - node_id (int): node id. + - traceback_content (Optional[str]): traceback of the original exception being raised. + """ + + def __init__(self, message: str, node_id: int, traceback_content: Optional[str] = None): + self.message = message + self.node_id = node_id + self.traceback_content = traceback_content + super().__init__(self.message) + + + def _render_traceback_(self) -> List[str]: + """ + This function allows custom rendering of traceback in IPython + + Returns: + - List of string lines. + """ + traceback_list = self.traceback_content.split("\n") + traceback_list.append(f"{str(self.__class__.__name__)}: {self.message}") + + return traceback_list diff --git a/tests/test_lm.py b/tests/test_lm.py index 5d26f477..347e349b 100755 --- a/tests/test_lm.py +++ b/tests/test_lm.py @@ -1,13 +1,38 @@ import pytest import torch - +import msgspec +import zlib import nnsight -from nnsight.contexts.GraphBasedContext import GlobalTracingContext -from nnsight.contexts.Tracer import Tracer -from nnsight.schema.Request import RequestModel -from nnsight.tracing.Graph import Graph - - +from nnsight.intervention.contexts import InterventionTracer, Session +from nnsight.tracing.backends import Backend +from nnsight.tracing.graph import Graph +from nnsight.tracing.protocols import StopProtocol +from nnsight.tracing.contexts import GlobalTracingContext +from nnsight.schema.request import RequestModel +class AssertSavedLenBackend(Backend): + + def __init__(self, len:int) -> None: + self.len = len + + def __call__(self, graph: Graph) -> None: + + try: + + graph.nodes[-1].execute() + + except StopProtocol.StopException: + + pass + + finally: + + assert self.len == len([node for node in graph.nodes if node.done]) + + graph.nodes.clear() + graph.stack.clear() + + + @pytest.fixture(scope="module") def gpt2(device: str): return nnsight.LanguageModel( @@ -20,23 +45,20 @@ def MSG_prompt(): return "Madison Square Garden is located in the city of" -def _test_serialize(tracer: Tracer): +def _test_serialize(tracer: InterventionTracer): + with GlobalTracingContext.exit_global_tracing_context(): - request = RequestModel( - object=tracer, model_key=tracer.remote_backend_get_model_key() - ) - request_json = request.model_dump( - mode="json", exclude=["session_id", "received", "id"] - ) + request = RequestModel.serialize(tracer.graph.stack[0], 'json', True) - request2 = RequestModel(**request_json) - tracer = request2.deserialize(tracer.model) - assert isinstance(tracer.graph, Graph) + model = tracer.model if isinstance(tracer, Session) else tracer._model + + graph = RequestModel.deserialize(model, request, 'json', True) + assert isinstance(graph, Graph) @torch.no_grad() def test_generation(gpt2: nnsight.LanguageModel, MSG_prompt: str): - with gpt2.generate(max_new_tokens=3, validate=True) as generator: + with gpt2.generate(max_new_tokens=3, validate=True, backend=AssertSavedLenBackend(1)) as generator: with generator.invoke(MSG_prompt, scan=True) as invoker: output = gpt2.generator.output.save() @@ -52,13 +74,13 @@ def test_generation(gpt2: nnsight.LanguageModel, MSG_prompt: str): @torch.no_grad() def test_save(gpt2: nnsight.LanguageModel): - with gpt2.generate("Hello world", validate=True, scan=True) as tracer: + with gpt2.generate("Hello world", validate=True, scan=True, backend=AssertSavedLenBackend(2)) as tracer: hs = gpt2.transformer.h[-1].output[0].save() hs_input = gpt2.transformer.h[-1].input.save() _test_serialize(tracer) - + assert hs.value is not None assert isinstance(hs.value, torch.Tensor) assert hs.value.ndim == 3 @@ -70,7 +92,7 @@ def test_save(gpt2: nnsight.LanguageModel): @torch.no_grad() def test_set1(gpt2: nnsight.LanguageModel, MSG_prompt: str): - with gpt2.generate(validate=True) as tracer: + with gpt2.generate(validate=True, backend=AssertSavedLenBackend(3)) as tracer: with tracer.invoke(MSG_prompt, scan=True) as invoker: pre = gpt2.transformer.h[-1].output[0].clone().save() @@ -91,7 +113,7 @@ def test_set1(gpt2: nnsight.LanguageModel, MSG_prompt: str): @torch.no_grad() def test_set2(gpt2: nnsight.LanguageModel, MSG_prompt: str): - with gpt2.generate(validate=True) as generator: + with gpt2.generate(validate=True, backend=AssertSavedLenBackend(3)) as generator: with generator.invoke(MSG_prompt, scan=True) as invoker: pre = gpt2.transformer.wte.output.clone().save() @@ -112,7 +134,7 @@ def test_set2(gpt2: nnsight.LanguageModel, MSG_prompt: str): @torch.no_grad() def test_adhoc_module(gpt2: nnsight.LanguageModel): - with gpt2.generate(validate=True) as generator: + with gpt2.generate(validate=True, backend=AssertSavedLenBackend(1)) as generator: with generator.invoke( "The Eiffel Tower is in the city of", scan=True ) as invoker: @@ -129,7 +151,7 @@ def test_adhoc_module(gpt2: nnsight.LanguageModel): @torch.no_grad() def test_embeddings_set1(gpt2: nnsight.LanguageModel, MSG_prompt: str): - with gpt2.generate(max_new_tokens=3, validate=True) as generator: + with gpt2.generate(max_new_tokens=3, validate=True, backend=AssertSavedLenBackend(2)) as generator: with generator.invoke(MSG_prompt, scan=True) as invoker: embeddings = gpt2.transformer.wte.output @@ -149,12 +171,13 @@ def test_embeddings_set1(gpt2: nnsight.LanguageModel, MSG_prompt: str): output1 == "Madison Square Garden is located in the city of New York City" ) + assert output2 == "_ _ _ _ _ _ _ _ _ New York City" @torch.no_grad() def test_embeddings_set2(gpt2: nnsight.LanguageModel, MSG_prompt: str): - with gpt2.generate(max_new_tokens=3, validate=True) as generator: + with gpt2.generate(max_new_tokens=3, validate=True, backend=AssertSavedLenBackend(2)) as generator: with generator.invoke(MSG_prompt, scan=True) as invoker: embeddings = gpt2.transformer.wte.output.save() @@ -162,7 +185,7 @@ def test_embeddings_set2(gpt2: nnsight.LanguageModel, MSG_prompt: str): output1 = gpt2.tokenizer.decode(output.value[0]) - with gpt2.generate(max_new_tokens=3, validate=True) as generator: + with gpt2.generate(max_new_tokens=3, validate=True, backend=AssertSavedLenBackend(1)) as generator: with generator.invoke("_ _ _ _ _ _ _ _ _", scan=True) as invoker: gpt2.transformer.wte.output = embeddings.value @@ -180,7 +203,7 @@ def test_embeddings_set2(gpt2: nnsight.LanguageModel, MSG_prompt: str): def test_retain_grad(gpt2: nnsight.LanguageModel): - with gpt2.trace(validate=True) as tracer: + with gpt2.trace(validate=True, backend=AssertSavedLenBackend(1)) as tracer: with tracer.invoke("Hello World", scan=True) as invoker: hidden_states = gpt2.transformer.h[-1].output[0].save() hidden_states.retain_grad() @@ -195,7 +218,7 @@ def test_retain_grad(gpt2: nnsight.LanguageModel): def test_grad(gpt2: nnsight.LanguageModel): - with gpt2.trace(validate=True) as tracer: + with gpt2.trace(validate=True, backend=AssertSavedLenBackend(2)) as tracer: with tracer.invoke("Hello World", scan=True) as invoker: hidden_states = gpt2.transformer.h[-1].output[0].save() hidden_states_grad = hidden_states.grad.save() @@ -211,7 +234,7 @@ def test_grad(gpt2: nnsight.LanguageModel): assert (hidden_states_grad.value == 0).all().item() - with gpt2.trace(validate=True) as tracer: + with gpt2.trace(validate=True, backend=AssertSavedLenBackend(1)) as tracer: with tracer.invoke("Hello World", scan=True) as invoker: hidden_states = gpt2.transformer.h[-1].output[0].save() grad = hidden_states.grad.clone() @@ -238,7 +261,7 @@ def test_other_device_tensors(gpt2: nnsight.LanguageModel): def fun(x): return torch.nn.ReLU()(lin(x) - bias) - with gpt2.trace("fish", validate=True, scan=True) as tracer: + with gpt2.trace("fish", validate=True, scan=True, backend=AssertSavedLenBackend(1)) as tracer: x = gpt2.transformer.h[0].mlp.output y = fun(x) z = y.save() @@ -250,7 +273,7 @@ def fun(x): def test_multi_grad(gpt2: nnsight.LanguageModel): - with gpt2.trace(validate=True) as tracer: + with gpt2.trace(validate=True, backend=AssertSavedLenBackend(3)) as tracer: with tracer.invoke("Hello World", scan=True) as invoker: hidden_states = gpt2.transformer.h[-1].output[0].save() @@ -290,12 +313,12 @@ def forward(self, x): l0.output[0][:] = l0.attachment(acts, hook=True) # Get values pre editing - with gpt2.trace(MSG_prompt): + with gpt2.trace(MSG_prompt, backend=AssertSavedLenBackend(2)): original = l0.output[0].clone().save() l0.output[0][:] *= 0.0 original_output = gpt2.output.logits.save() - with gpt2_edited.trace(MSG_prompt): + with gpt2_edited.trace(MSG_prompt, backend=AssertSavedLenBackend(2)): one = l0.attachment.one.output.clone().save() l0.attachment.output *= 0.0 edited_output = gpt2.output.logits.save() @@ -315,10 +338,10 @@ def test_non_inplace_editing(gpt2: nnsight.LanguageModel, MSG_prompt: str): with gpt2.edit() as gpt2_edited: gpt2.transformer.h[1].output[0][:, 1] = 0 - with gpt2.trace(MSG_prompt): + with gpt2.trace(MSG_prompt, backend=AssertSavedLenBackend(1)): l1_out = gpt2.transformer.h[1].output[0].save() - with gpt2_edited.trace(MSG_prompt): + with gpt2_edited.trace(MSG_prompt, backend=AssertSavedLenBackend(1)): l1_out_edited = gpt2_edited.transformer.h[1].output[0].save() assert torch.all(l1_out[:, 0] == 0) and torch.all(l1_out[:, 1] != 0) @@ -329,12 +352,12 @@ def test_clear_edits(gpt2: nnsight.LanguageModel, MSG_prompt: str): with gpt2.edit(inplace=True): gpt2.transformer.h[1].output[0][:] = 0 - with gpt2.trace(MSG_prompt): + with gpt2.trace(MSG_prompt, backend=AssertSavedLenBackend(1)): l1_out = gpt2.transformer.h[1].output[0].save() gpt2.clear_edits() - with gpt2.trace(MSG_prompt): + with gpt2.trace(MSG_prompt, backend=AssertSavedLenBackend(1)): l1_out_unedited = gpt2.transformer.h[1].output[0].save() assert torch.all(l1_out == 0) @@ -362,7 +385,7 @@ def forward(self, x): acts = l0.output[0] l0.output[0][:] = l0.attachment(acts, hook=True) - with gpt2_edited.trace(batch): + with gpt2_edited.trace(batch, backend=AssertSavedLenBackend(1)): edited = l0.attachment.output.save() # Check that the batch size does not narrow @@ -370,7 +393,7 @@ def forward(self, x): def test_conditional_interventions(gpt2: nnsight.LanguageModel): - with gpt2.session() as session: + with gpt2.session(backend=AssertSavedLenBackend(1)) as session: with gpt2.trace("Hello World", validate=True, scan=True) as tracer: with tracer.cond( torch.all(gpt2.transformer.h[5].output[0] < 100000) @@ -385,7 +408,7 @@ def test_conditional_interventions(gpt2: nnsight.LanguageModel): def test_input_setting(gpt2: nnsight.LanguageModel, MSG_prompt: str): - with gpt2.session(): + with gpt2.session(backend=AssertSavedLenBackend(2)): with gpt2.trace(MSG_prompt): hs = gpt2.transformer.h[6].inputs tokens_out_1 = gpt2.lm_head.output.argmax(dim=-1).save() diff --git a/tests/test_tiny.py b/tests/test_tiny.py index 95db7800..784bf6ea 100755 --- a/tests/test_tiny.py +++ b/tests/test_tiny.py @@ -4,14 +4,36 @@ import pytest import torch -import nnsight +from nnsight.tracing.backends import Backend +from nnsight.tracing.graph import Graph +from nnsight.tracing.protocols import StopProtocol from nnsight import NNsight - +class AssertSavedLenBackend(Backend): + + def __init__(self, len:int) -> None: + self.len = len + + def __call__(self, graph: Graph) -> None: + + try: + + graph.nodes[-1].execute() + + except StopProtocol.StopException: + + pass + + finally: + + assert self.len == len([node for node in graph.nodes if node.done]) + + graph.nodes.clear() + graph.stack.clear() + input_size = 5 hidden_dims = 10 output_size = 2 - @pytest.fixture(scope="module") def tiny_model(device: str): @@ -40,7 +62,7 @@ def tiny_input(): @torch.no_grad() def test_tiny(tiny_model: NNsight, tiny_input: torch.Tensor): - with tiny_model.trace(tiny_input): + with tiny_model.trace(tiny_input, backend=AssertSavedLenBackend(1)): hs = tiny_model.layer2.output.save() @@ -48,7 +70,7 @@ def test_tiny(tiny_model: NNsight, tiny_input: torch.Tensor): def test_grad_setting(tiny_model: NNsight, tiny_input: torch.Tensor): - with tiny_model.trace(tiny_input, validate=True, scan=True): + with tiny_model.trace(tiny_input, validate=True, scan=True, backend=AssertSavedLenBackend(2)): l1_grad = tiny_model.layer1.output.grad.clone().save() tiny_model.layer1.output.grad = ( @@ -66,26 +88,22 @@ def test_grad_setting(tiny_model: NNsight, tiny_input: torch.Tensor): def test_external_proxy_intervention_executed_locally( tiny_model: NNsight, tiny_input: torch.Tensor ): - with tiny_model.session(validate=True) as sesh: + with tiny_model.session(validate=True, backend=AssertSavedLenBackend(1)) as sesh: with tiny_model.trace(tiny_input, validate=True, scan=True) as tracer_1: l1_out = tiny_model.layer1.output.save() with tiny_model.trace(tiny_input, validate=True, scan=True) as tracer_2: l1_out[:, 2] = 5 - assert list(tracer_2.graph.nodes.keys()) == [ - "BridgeProtocol_0", - "setitem_0", - ] - assert l1_out[:, 2] == 5 def test_early_stop_protocol(tiny_model: NNsight, tiny_input: torch.Tensor): - with tiny_model.trace(tiny_input, validate=True, scan=True): + with tiny_model.trace(tiny_input, validate=True, scan=True, backend=AssertSavedLenBackend(1)) as tracer: l1_out = tiny_model.layer1.output.save() + tracer.stop() l2_out = tiny_model.layer2.output.save() - tiny_model.layer1.output.stop() + assert isinstance(l1_out.value, torch.Tensor) @@ -96,7 +114,7 @@ def test_early_stop_protocol(tiny_model: NNsight, tiny_input: torch.Tensor): def test_true_conditional_protocol( tiny_model: NNsight, tiny_input: torch.Tensor ): - with tiny_model.trace(tiny_input, validate=True, scan=True) as tracer: + with tiny_model.trace(tiny_input, validate=True, scan=True, backend=AssertSavedLenBackend(1)) as tracer: num = 5 with tracer.cond(num > 0): tiny_model.layer1.output[:] = 1 @@ -109,7 +127,7 @@ def test_true_conditional_protocol( def test_false_conditional_protocol( tiny_model: NNsight, tiny_input: torch.Tensor ): - with tiny_model.trace(tiny_input, validate=True, scan=True) as tracer: + with tiny_model.trace(tiny_input, validate=True, scan=True, backend=AssertSavedLenBackend(1)) as tracer: num = 5 with tracer.cond(num < 0): tiny_model.layer1.output[:] = 1 @@ -126,7 +144,7 @@ def test_false_conditional_protocol( def test_node_as_condition(tiny_model: NNsight, tiny_input: torch.Tensor): """Test a Tensor a boolean value as a result of a boolean operation on an InterventionProxy""" - with tiny_model.trace(tiny_input, validate=True, scan=True) as tracer: + with tiny_model.trace(tiny_input, validate=True, scan=True, backend=AssertSavedLenBackend(0)) as tracer: out = tiny_model.layer1.output out[:, 0] = 1 with tracer.cond(out[:, 0] != 1): @@ -142,7 +160,7 @@ def test_multiple_dependent_conditionals( ): """Test that interventions defined within different Intervention contexts can be referenced if their conditions evaluated to True.""" - with tiny_model.trace(tiny_input, validate=True, scan=True) as tracer: + with tiny_model.trace(tiny_input, validate=True, scan=True, backend=AssertSavedLenBackend(1)) as tracer: num = 5 l1_out = tiny_model.layer1.output l2_out = tiny_model.layer2.output.save() @@ -159,7 +177,7 @@ def test_multiple_dependent_conditionals( def test_nested_conditionals(tiny_model: NNsight, tiny_input: torch.Tensor): - with tiny_model.trace(tiny_input, validate=True, scan=True) as tracer: + with tiny_model.trace(tiny_input, validate=True, scan=True, backend=AssertSavedLenBackend(1)) as tracer: num = 5 with tracer.cond(num > 0): # True l1_out = tiny_model.layer1.output.save() @@ -183,7 +201,7 @@ def test_nested_conditionals(tiny_model: NNsight, tiny_input: torch.Tensor): def test_conditional_trace(tiny_model: NNsight, tiny_input: torch.Tensor): - with tiny_model.session(validate=True) as session: + with tiny_model.session(validate=True, backend=AssertSavedLenBackend(1)) as session: num = 5 with session.cond(num > 0): with tiny_model.trace(tiny_input, validate=True, scan=True): @@ -193,13 +211,10 @@ def test_conditional_trace(tiny_model: NNsight, tiny_input: torch.Tensor): def test_conditional_iteration(tiny_model: NNsight, tiny_input: torch.Tensor): - with tiny_model.session(validate=True) as session: + with tiny_model.session(validate=False, backend=AssertSavedLenBackend(1)) as session: result = session.apply(list).save() - with session.iter([0, 1, 2], return_context=True, validate=True) as ( - item, - iterator, - ): - with iterator.cond(item % 2 == 0): + with session.iter([0, 1, 2]) as item: + with session.cond(item % 2 == 0): with tiny_model.trace(tiny_input, validate=True, scan=True): result.append(item) @@ -207,7 +222,7 @@ def test_conditional_iteration(tiny_model: NNsight, tiny_input: torch.Tensor): def test_bridge_protocol(tiny_model: NNsight, tiny_input: torch.Tensor): - with tiny_model.session(validate=True) as session: + with tiny_model.session(validate=True, backend=AssertSavedLenBackend(1)) as session: val = session.apply(int, 0) with tiny_model.trace(tiny_input, validate=True, scan=True): tiny_model.layer1.output[:] = ( @@ -218,44 +233,26 @@ def test_bridge_protocol(tiny_model: NNsight, tiny_input: torch.Tensor): assert torch.all(l1_out.value == 0).item() -def test_update_protocol(tiny_model: NNsight, tiny_input: torch.Tensor): - with tiny_model.session(validate=True) as session: - sum = session.apply(int, 0).save() - with session.iter([0, 1, 2], validate=True) as item: - sum.update(sum + item) - - sum.update(sum + 4) - - with tiny_model.trace(tiny_input, validate=True, scan=True): - sum.update(sum + 3) - double_sum = (sum * 2).save() - - assert double_sum.value == 20 - - def test_sequential_graph_based_context_exit(tiny_model: NNsight): - with tiny_model.session(validate=True) as session: + with tiny_model.session(validate=True, backend=AssertSavedLenBackend(1)) as session: l = session.apply(list).save() l.append(0) - with session.iter([1, 2, 3, 4], return_context=True, validate=True) as ( - item, - iterator, - ): - with iterator.cond(item == 3): - iterator.exit() + with session.iter([1, 2, 3, 4]) as item: + with session.cond(item == 3): + session.stop() l.append(item) l.append(5) - session.exit() + session.stop() l.append(6) assert l.value == [0, 1, 2, 5] def test_tracer_stop(tiny_model: NNsight, tiny_input: torch.Tensor): - with tiny_model.trace(tiny_input, validate=True, scan=True): + with tiny_model.trace(tiny_input, validate=True, scan=True, backend=AssertSavedLenBackend(0)) as tracer: l1_out = tiny_model.layer1.output - tiny_model.layer1.output.stop() + tracer.stop() l1_out_double = l1_out * 2 with pytest.raises(ValueError): @@ -263,11 +260,11 @@ def test_tracer_stop(tiny_model: NNsight, tiny_input: torch.Tensor): def test_bridged_node_cleanup(tiny_model: NNsight): - with tiny_model.session(validate=True) as session: + with tiny_model.session(validate=True, backend=AssertSavedLenBackend(0)) as session: l = session.apply(list) - with session.iter([0, 1, 2], return_context=True, validate=True) as (item, iterator): - with iterator.cond(item == 2): - iterator.exit() + with session.iter([0, 1, 2]) as item: + with session.cond(item == 2): + session.stop() l.append(item) with pytest.raises(ValueError): @@ -276,34 +273,34 @@ def test_bridged_node_cleanup(tiny_model: NNsight): def test_nested_iterator(tiny_model: NNsight): - with tiny_model.session(validate=True) as session: + with tiny_model.session(validate=True, backend=AssertSavedLenBackend(1)) as session: l = session.apply(list) l.append([0]) l.append([1]) l.append([2]) l2 = session.apply(list).save() - with session.iter(l, validate=True) as item: - with session.iter(item, validate=True) as item_2: + with session.iter(l) as item: + with session.iter(item) as item_2: l2.append(item_2) assert l2.value == [0, 1, 2] def test_nnsight_builtins(tiny_model: NNsight): - with tiny_model.session() as session: - nn_list = nnsight.list().save() - sesh_list = session.list().save() + with tiny_model.session(backend=AssertSavedLenBackend(3)) as session: + nn_list = session.apply(list).save() + sesh_list = session.apply(list).save() apply_list = session.apply(list).save() - with session.iter([nn_list, sesh_list, apply_list], return_context=True) as (l, iterator): - l.append(nnsight.int(0)) - l.append(iterator.str("Hello World")) - l.append(nnsight.dict({"a": "1"})) + with session.iter([nn_list, sesh_list, apply_list]) as l: + l.append(session.apply(int)) + l.append(session.apply(str, "Hello World")) + l.append(session.apply(dict, {"a": "1"})) - assert nn_list == sesh_list - assert sesh_list == apply_list + assert nn_list.value == sesh_list.value + assert sesh_list.value == apply_list.value def test_torch_creation_operations_patch(tiny_model: NNsight, tiny_input: torch.Tensor): - with tiny_model.trace(tiny_input, scan=False, validate=False): + with tiny_model.trace(tiny_input, scan=False, validate=False, backend=AssertSavedLenBackend(0)): l1_output = tiny_model.layer1.output torch.arange(l1_output.shape[0], l1_output.shape[1]) torch.empty(l1_output.shape) diff --git a/tests/test_vllm.py b/tests/test_vllm.py new file mode 100644 index 00000000..7d339fd9 --- /dev/null +++ b/tests/test_vllm.py @@ -0,0 +1,264 @@ +import pytest +import nnsight +import torch +from typing import TYPE_CHECKING + +from nnsight.tracing.backends import Backend +from nnsight.tracing.protocols import StopProtocol + +if TYPE_CHECKING: + from nnsight.tracing.graph import Graph + +try: + from nnsight.modeling.vllm import VLLM +except: + pytest.skip("Skipping VLLM tests", allow_module_level=True) + + +class AssertSavedLenBackend(Backend): + + def __init__(self, len:int) -> None: + self.len = len + + def __call__(self, graph: "Graph") -> None: + + try: + + graph.nodes[-1].execute() + + except StopProtocol.StopException: + + pass + + finally: + + assert self.len == len([node for node in graph.nodes if node.done]) + + graph.nodes.clear() + graph.stack.clear() + + +@pytest.fixture(scope="module") +def tp(request): + tp = request.config.getoption("--tp") + if tp > torch.cuda.device_count() or tp < 1: + pytest.exit("--tp can't be higher than the number of availale GPUs.") + return tp + +@pytest.fixture(scope="module") +def vllm_gpt2(tp: int): + return VLLM("gpt2", tensor_parallel_size=tp, dispatch=True) + +@pytest.fixture +def ET_prompt(): + return "The Eiffel Tower is located in the city of" + +@pytest.fixture +def MSG_prompt(): + return "Madison Square Garden is located in the city of" + + +def test_single_logit(vllm_gpt2, ET_prompt: str): + with vllm_gpt2.trace(ET_prompt, temperature=0.0, top_p=1, backend=AssertSavedLenBackend(1)): + logits = vllm_gpt2.logits.output.save() + + next_token = vllm_gpt2.tokenizer.decode(logits.argmax(dim=-1)) + assert next_token == " Paris" + + +def test_multi_token_generation(vllm_gpt2, MSG_prompt: str): + with vllm_gpt2.trace(MSG_prompt, temperature=0.0, top_p=1.0, max_tokens=3): + logits = nnsight.list().save() + for ii in range(3): + logits.append(vllm_gpt2.logits.output) + vllm_gpt2.logits.next() + + assert vllm_gpt2.tokenizer.batch_decode([logit.argmax(dim=-1) for logit in logits]) == [" New", " York", " City"] + + +def test_sampling(vllm_gpt2, MSG_prompt: str): + with vllm_gpt2.trace(max_tokens=3) as tracer: + with tracer.invoke(MSG_prompt, temperature=0.0, top_p=1.0, max_tokens=3): + samples_1 = nnsight.list().save() + for ii in range(3): + samples_1.append(vllm_gpt2.samples.output) + vllm_gpt2.samples.next() + with tracer.invoke(MSG_prompt, temperature=0.8, top_p=0.95): + samples_2 = nnsight.list().save() + for ii in range(3): + samples_2.append(vllm_gpt2.samples.output) + vllm_gpt2.samples.next() + + assert vllm_gpt2.tokenizer.batch_decode(samples_1) == [" New", " York", " City"] + assert vllm_gpt2.tokenizer.batch_decode(samples_2) == [" Richmond", " on", " the"] + + +""" def test_max_token_generation(vllm_gpt2, ET_prompt: str): + with vllm_gpt2.trace(ET_prompt, max_tokens=10): + logits = nnsight.list().save() + with vllm_gpt2.logits.all(): + logits.append(vllm_gpt2.logits.output) + + assert len(logits) == 10 """ + + +""" def test_sampling(vllm_gpt2, ET_prompt:str): + with vllm_gpt2.trace(ET_prompt, temperature=0.8, top_p=0.95, max_tokens=3): + samples = nnsight.list().save() + with vllm_gpt2.sample.all(): + li.append(vllm_gpt2.sample.output) + + samples = vllm_gpt2.batch_decode([sample.argmax(dim=-1) for sample in samples]) + assert samples == [' Canary', ' Wh', 'arf'] """ + + +def test_intervention(vllm_gpt2, ET_prompt: str): + with vllm_gpt2.trace(ET_prompt, temperature=0.0, top_p=1, backend=AssertSavedLenBackend(2)) as tracer: + vllm_gpt2.transformer.h[-2].mlp.output[:] = 0 + hs = vllm_gpt2.transformer.h[-2].mlp.output.save() + logits = vllm_gpt2.logits.output.save() + + next_token = vllm_gpt2.tokenizer.decode(logits.argmax(dim=-1)) + assert next_token == " London" + assert torch.all(hs == 0) + + +def test_swap_intervention(vllm_gpt2, ET_prompt: str): + with vllm_gpt2.trace(ET_prompt, temperature=0.0, top_p=1, backend=AssertSavedLenBackend(2)) as tracer: + vllm_gpt2.transformer.h[-2].mlp.output = torch.zeros_like(vllm_gpt2.transformer.h[-2].mlp.output) + hs = vllm_gpt2.transformer.h[-2].mlp.output.save() + logits = vllm_gpt2.logits.output.save() + + next_token = vllm_gpt2.tokenizer.decode(logits.argmax(dim=-1)) + assert next_token == " London" + assert torch.all(hs == 0) + + +def test_batched_intervention(vllm_gpt2, ET_prompt: str,): + with vllm_gpt2.trace(temperature=0.0, top_p=1, backend=AssertSavedLenBackend(4)) as tracer: + + with tracer.invoke(ET_prompt): + clean_hs = vllm_gpt2.transformer.h[-2].mlp.output.save() + clean_logits = vllm_gpt2.logits.output.save() + with tracer.invoke(ET_prompt): + vllm_gpt2.transformer.h[-2].mlp.output[:] = 0 + corrupted_hs = vllm_gpt2.transformer.h[-2].mlp.output.save() + corrupted_logits = vllm_gpt2.logits.output.save() + + clean_token = vllm_gpt2.tokenizer.decode(clean_logits.argmax(dim=-1)) + corrupted_token = vllm_gpt2.tokenizer.decode(corrupted_logits.argmax(dim=-1)) + + assert clean_token == " Paris" + assert corrupted_token == " London" + assert not torch.all(clean_hs == 0) + assert torch.all(corrupted_hs == 0) + + +def test_batched_multi_token_generation(vllm_gpt2, ET_prompt: str, MSG_prompt: str): + max_token_1: int = 3 + max_token_2: int = 5 + + num_prompts_1: int = 2 + num_prompts_2: int = 1 + + with vllm_gpt2.trace() as tracer: + with tracer.invoke([MSG_prompt, ET_prompt], max_tokens=max_token_1): + MSG_ET_hs = nnsight.list().save() + MSG_ET_logits = nnsight.list().save() + MSG_ET_samples = nnsight.list().save() + for ii in range(max_token_1): + MSG_ET_hs.append(vllm_gpt2.transformer.h[5].output) + vllm_gpt2.transformer.h[5].next() + MSG_ET_logits.append(vllm_gpt2.logits.output) + vllm_gpt2.logits.next() + MSG_ET_samples.append(vllm_gpt2.samples.output) + vllm_gpt2.samples.next() + with tracer.invoke(MSG_prompt, max_tokens=max_token_2): + MSG_hs = nnsight.list().save() + MSG_logits = nnsight.list().save() + MSG_samples = nnsight.list().save() + for ii in range(max_token_2): + MSG_hs.append(vllm_gpt2.transformer.h[5].output) + vllm_gpt2.transformer.h[5].next() + MSG_logits.append(vllm_gpt2.logits.output) + vllm_gpt2.logits.next() + MSG_samples.append(vllm_gpt2.samples.output) + vllm_gpt2.samples.next() + + assert len(MSG_ET_hs) == max_token_1 + assert all(hs.shape[0] == num_prompts_1 for hs in MSG_ET_hs[1:]) + + assert len(MSG_ET_logits) == max_token_1 + assert all(logit.shape[0] == num_prompts_1 for logit in MSG_ET_logits) + + assert len(MSG_ET_samples) == max_token_1 + assert all(sample.shape[0] == num_prompts_1 for sample in MSG_ET_samples) + + + assert len(MSG_hs) == max_token_2 + assert all(hs.shape[0] == num_prompts_2 for hs in MSG_hs[1:]) + + assert len(MSG_logits) == max_token_2 + assert all(logit.shape[0] == num_prompts_2 for logit in MSG_logits) + + assert len(MSG_samples) == max_token_2 + assert all(sample.shape[0] == num_prompts_2 for sample in MSG_samples) + + +""" def test_batched_multi_token_generation_with_iter(vllm_gpt2, ET_prompt: str, MSG_prompt: str): + with vllm_gpt2.trace(max_tokens=10) as tracer: + with tracer.invoke(ET_prompt): + ET_logits = nnsight.list().save() + with vllm_gpt2.logits.iter[:3]: + ET_logits.append(vllm_gpt2.logits.output) + #vllm_gpt2.output.save() + with tracer.invoke(MSG_prompt, max_tokens=5): + MSG_logits = nnsight.list().save() + with vllm_gpt2.logits.iter[:5]: + MSG_logits.append(vllm_gpt2.logits.output) + + assert len(ET_logits.value) == 3 + assert len(MSG_logits.value) == 5 """ + + +def test_mutli_token_generation_with_intervention(tp, vllm_gpt2, MSG_prompt: str): + with vllm_gpt2.trace(MSG_prompt, temperature=0.0, top_p=1.0, max_tokens=5) as tracer: + logits = nnsight.list().save() + hs_list = nnsight.list().save() + for ii in range(5): + if ii == 2: + vllm_gpt2.transformer.h[-2].output[0][:] = 0 + hs_list.append(vllm_gpt2.transformer.h[-2].output[0]) + vllm_gpt2.transformer.h[-2].next() + logits.append(vllm_gpt2.logits.output) + vllm_gpt2.logits.next() + + assert [torch.all(hs == 0) for hs in hs_list] == [False, False, True, False, False] + + if tp == 1: + assert vllm_gpt2.tokenizer.batch_decode([logit.argmax(dim=-1) for logit in logits]) == [' New', ' York', '\n', '\n', 'The'] + + +""" def test_multi_referenced_module(vllm_gpt2, ET_prompt: str): + with vllm_gpt2.trace(ET_prompt): + act_in = vllm_gpt2.transformer.h[0].mlp.act.input.save() + vllm_gpt2.transformer.h[0].mlp.act.next() + act_in_other = vllm_gpt2.transformer.h[1].mlp.act.input.save() + + assert not torch.equal(act_in, act_in_other) """ + + +def test_tensor_parallelism(tp, vllm_gpt2, ET_prompt: str): + if tp < 2: + pytest.skip("Skipping test for tp>1!") + + with vllm_gpt2.trace(ET_prompt, temperature=0.0, top_p=1.0): + vllm_gpt2.transformer.h[5].mlp.c_fc.output[0][:, 2000:] = 0 + hs = vllm_gpt2.transformer.h[5].mlp.c_fc.output[0].save() + logit = vllm_gpt2.logits.output.save() + + next_token = vllm_gpt2.tokenizer.decode(logit.argmax(dim=-1)) + + #assert next_token != " Paris" + assert hs.shape == torch.Size([11, 3072]) + assert torch.all(hs[:, 2000:] == 0) From b2a0c47e567d9b121170990d24dd4eb88b391a10 Mon Sep 17 00:00:00 2001 From: "jadenfk@outlook.com" Date: Fri, 31 Jan 2025 12:22:59 -0500 Subject: [PATCH 2/2] Rename control flow --- src/nnsight/config.yaml | 2 +- src/nnsight/schema/config.py | 2 +- src/nnsight/tracing/graph/proxy.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/nnsight/config.yaml b/src/nnsight/config.yaml index f76f0b42..813f74df 100755 --- a/src/nnsight/config.yaml +++ b/src/nnsight/config.yaml @@ -8,6 +8,6 @@ API: APP: DEBUG: true FRAME_INJECTION: true - CONTROL_FLOW_HACKS: true + CONTROL_FLOW_HANDLING: true LOGGING: false REMOTE_LOGGING: true diff --git a/src/nnsight/schema/config.py b/src/nnsight/schema/config.py index 2e5a7c48..0d73b8df 100755 --- a/src/nnsight/schema/config.py +++ b/src/nnsight/schema/config.py @@ -16,7 +16,7 @@ class AppConfigModel(BaseModel): LOGGING: bool = False REMOTE_LOGGING: bool = True DEBUG: bool = True - CONTROL_FLOW_HACKS:bool = True + CONTROL_FLOW_HANDLING:bool = True FRAME_INJECTION:bool = True class ConfigModel(BaseModel): diff --git a/src/nnsight/tracing/graph/proxy.py b/src/nnsight/tracing/graph/proxy.py index 233fb296..b0c5a5d5 100755 --- a/src/nnsight/tracing/graph/proxy.py +++ b/src/nnsight/tracing/graph/proxy.py @@ -283,7 +283,7 @@ def __len__(self) -> Self: def __iter__(self) -> Iterator[Self]: - if not CONFIG.APP.CONTROL_FLOW_HACKS: + if not CONFIG.APP.CONTROL_FLOW_HANDLING: raise Exception( 'Iteration control flow encountered but "CONFIG.APP.CONTROL_FLOW_HACKS" is set to False' ) @@ -294,7 +294,7 @@ def __iter__(self) -> Iterator[Self]: def __bool__(self) -> Self: - if not CONFIG.APP.CONTROL_FLOW_HACKS: + if not CONFIG.APP.CONTROL_FLOW_HANDLING: raise Exception( 'Conditional control flow encountered but "CONFIG.APP.CONTROL_FLOW_HACKS" is set to False' )