From c424740b0f921b67fd8e28c020fe122d7780d9f9 Mon Sep 17 00:00:00 2001 From: Petros Toupas Date: Wed, 8 Jan 2025 21:21:00 +0200 Subject: [PATCH 01/28] Update benchmark scirpt for RVC2 using daiv3 --- .pre-commit-config.yaml | 2 + modelconverter/__main__.py | 4 +- modelconverter/packages/base_benchmark.py | 11 +- modelconverter/packages/rvc2/benchmark.py | 216 +++++++++------------- modelconverter/utils/__init__.py | 2 + modelconverter/utils/hubai_utils.py | 36 ++++ 6 files changed, 138 insertions(+), 133 deletions(-) create mode 100644 modelconverter/utils/hubai_utils.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 232e7c4..aedcf16 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,3 +1,5 @@ +default_language_version: + python: python3 repos: - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.1.2 diff --git a/modelconverter/__main__.py b/modelconverter/__main__.py index 5d73669..5e320fa 100644 --- a/modelconverter/__main__.py +++ b/modelconverter/__main__.py @@ -175,10 +175,12 @@ def benchmark( **RVC2** - - `--repetitions`: The number of repetitions to perform. Default: `1` + - `--repetitions`: The number of repetitions to perform. Default: `10` - `--num-threads`: The number of threads to use for inference. Default: `2` + - `--num-messages`: The number of messages to measure for each report. Default: `50` + --- **RVC3** diff --git a/modelconverter/packages/base_benchmark.py b/modelconverter/packages/base_benchmark.py index e3eba9a..0934d0b 100644 --- a/modelconverter/packages/base_benchmark.py +++ b/modelconverter/packages/base_benchmark.py @@ -7,7 +7,7 @@ import pandas as pd from typing_extensions import TypeAlias -from modelconverter.utils import resolve_path +from modelconverter.utils import is_hubai_available, resolve_path logger = getLogger(__name__) @@ -28,9 +28,14 @@ def __init__( model_path: str, dataset_path: Optional[Path] = None, ): - self.model_path = resolve_path(model_path, Path.cwd()) + if not is_hubai_available(model_path): + self.model_path = resolve_path(model_path, Path.cwd()) + self.model_name = self.model_path.stem + else: + self.model_path = model_path + self.model_name = self.model_path.split("/", 1)[-1] self.dataset_path = dataset_path - self.model_name = self.model_path.stem + self.header = [ *self.default_configuration.keys(), "fps", diff --git a/modelconverter/packages/rvc2/benchmark.py b/modelconverter/packages/rvc2/benchmark.py index 808edd0..42a633d 100644 --- a/modelconverter/packages/rvc2/benchmark.py +++ b/modelconverter/packages/rvc2/benchmark.py @@ -1,11 +1,9 @@ import logging -import time from pathlib import Path -from typing import Dict, List, cast +from typing import List import depthai as dai import numpy as np -from depthai import NNData from rich.progress import Progress from ..base_benchmark import Benchmark, BenchmarkResult, Configuration @@ -20,14 +18,14 @@ def default_configuration(self) -> Configuration: repetitions: The number of repetitions to perform. num_threads: The number of threads to use for inference. """ - return {"repetitions": 1, "num_threads": 2} + return {"repetitions": 10, "num_messages": 50, "num_threads": 2} @property def all_configurations(self) -> List[Configuration]: return [ - {"repetitions": 5, "num_threads": 1}, - {"repetitions": 5, "num_threads": 2}, - {"repetitions": 5, "num_threads": 3}, + {"repetitions": 10, "num_messages": 50, "num_threads": 1}, + {"repetitions": 10, "num_messages": 50, "num_threads": 2}, + {"repetitions": 10, "num_messages": 50, "num_threads": 3}, ] def benchmark(self, configuration: Configuration) -> BenchmarkResult: @@ -35,135 +33,95 @@ def benchmark(self, configuration: Configuration) -> BenchmarkResult: @staticmethod def _benchmark( - model_path: Path, repetitions: int, num_threads: int + model_path: Path | str, + repetitions: int, + num_messages: int, + num_threads: int, ) -> BenchmarkResult: - model = dai.OpenVINO.Blob(model_path) - input_name_shape: Dict[str, List[int]] = {} - input_name_type = {} - for i in list(model.networkInputs): - input_name_shape[i] = model.networkInputs[i].dims - input_name_type[i] = model.networkInputs[i].dataType.name - - output_name_shape = {} - output_name_type = {} - for i in list(model.networkOutputs): - output_name_shape[i] = model.networkOutputs[i].dims - output_name_type[i] = model.networkOutputs[i].dataType.name - - pipeline = dai.Pipeline() - - detection_nn = pipeline.createNeuralNetwork() - detection_nn.setBlobPath(model_path) - detection_nn.setNumInferenceThreads(num_threads) - detection_nn.input.setBlocking(True) - detection_nn.input.setQueueSize(1) - - nn_in = pipeline.createXLinkIn() - nn_in.setMaxDataSize(6291456) - nn_in.setStreamName("in_nn") - nn_in.out.link(detection_nn.input) - - xout_nn = pipeline.createXLinkOut() - xout_nn.setStreamName("nn") - xout_nn.input.setQueueSize(1) - xout_nn.input.setBlocking(True) - detection_nn.out.link(xout_nn.input) - - xlink_buffer_max_size = 5 * 1024 * 1024 - product_sum = sum( - map(lambda x: np.product(np.array(x)), output_name_shape.values()) - ) - - xlink_buffer_count = int(xlink_buffer_max_size / product_sum) - - logger.info(f"XLink buffer count: {xlink_buffer_count}") - if xlink_buffer_count > 1000: - logger.warning( - "XLink buffer count is too high! " - "The benchmarking will take more time and " - "the results may be overestimated." + device = dai.Device() + + if isinstance(model_path, str): + modelPath = dai.getModelFromZoo( + dai.NNModelDescription( + model_path, + platform=device.getPlatformAsString(), + ) + ) + elif str(model_path).endswith(".tar.xz"): + modelPath = str(model_path) + elif str(model_path).endswith(".blob"): + modelPath = model_path + else: + raise ValueError( + "Unsupported model format. Supported formats: .tar.xz, .blob, or HubAI model slug." ) - with dai.Device(pipeline) as device, Progress() as progress: - device = cast(dai.Device, device) - detection_in_count = 100 + xlink_buffer_count - detection_in = device.getInputQueue( - "in_nn", maxSize=detection_in_count, blocking=True + inputSizes = [] + inputNames = [] + if isinstance(model_path, str) or str(model_path).endswith(".tar.xz"): + modelArhive = dai.NNArchive(modelPath) + for input in modelArhive.getConfig().model.inputs: + inputSizes.append(input.shape[::-1]) + inputNames.append(input.name) + elif str(model_path).endswith(".blob"): + blob_model = dai.OpenVINO.Blob(modelPath) + for input in blob_model.networkInputs: + inputSizes.append(blob_model.networkInputs[input].dims) + inputNames.append(input) + + inputData = dai.NNData() + for name, inputSize in zip(inputNames, inputSizes): + img = np.random.randint( + 0, 255, (inputSize[1], inputSize[0], 3), np.uint8 ) - q_nn = device.getOutputQueue(name="nn", maxSize=1, blocking=True) + inputData.addTensor(name, img) - fps_storage = [] - diffs = [] - time.sleep(1) + with dai.Pipeline(device) as pipeline, Progress() as progress: repet_task = progress.add_task( "[magenta]Repetition", total=repetitions ) - infer_task = progress.add_task( - "[magenta]Inference", total=300 + 2 * xlink_buffer_count - ) - for _ in range(repetitions): - progress.reset(infer_task, total=300 + 2 * xlink_buffer_count) - for _ in range(100 + xlink_buffer_count): - nn_data = dai.NNData() - for inp_name in input_name_shape: - if input_name_type[inp_name] in ["FLOAT16", "FLOAT32"]: - frame = cast( - np.ndarray, - np.random.rand(*input_name_shape[inp_name]), - ) - frame = frame.astype( - "float16" - if input_name_type[inp_name] == "FLOAT16" - else "float32" - ) - elif input_name_type[inp_name] in ["INT", "I8", "U8F"]: - frame = np.random.randint( - 256, - size=input_name_shape[inp_name], - dtype=( - np.int32 - if input_name_type[inp_name] == "INT" - else ( - np.uint8 - if input_name_type[inp_name] == "U8F" - else np.int8 - ) - ), - ) - else: - raise RuntimeError( - f"Unknown input type detected: {input_name_type[inp_name]}!" - ) - - nn_data.setLayer(inp_name, frame) - - if len(input_name_shape) == 0: - raise RuntimeError( - "Failed to create input data: missing required information for one or more input layers." - ) - detection_in.send(nn_data) - progress.update(infer_task, advance=1) - - for _ in range(100): - progress.update(infer_task, advance=1) - time.sleep(3 / 100) - - for _ in range(40 + xlink_buffer_count): - cast(NNData, q_nn.get()).getFirstLayerFp16() - progress.update(infer_task, advance=1) - - start = time.time() - for _ in range(50): - cast(NNData, q_nn.get()).getFirstLayerFp16() - progress.update(infer_task, advance=1) - diff = time.time() - start - diffs.append(diff / 50) - fps_storage.append(50 / diff) - - for _ in range(10): - cast(NNData, q_nn.get()).getFirstLayerFp16() - progress.update(infer_task, advance=1) + + benchmarkOut = pipeline.create(dai.node.BenchmarkOut) + benchmarkOut.setRunOnHost(False) + benchmarkOut.setFps(-1) + + neuralNetwork = pipeline.create(dai.node.NeuralNetwork) + if isinstance(model_path, str) or str(model_path).endswith( + ".tar.xz" + ): + neuralNetwork.setNNArchive(modelArhive) + elif str(model_path).endswith(".blob"): + neuralNetwork.setBlobPath(modelPath) + neuralNetwork.setNumInferenceThreads(num_threads) + + benchmarkIn = pipeline.create(dai.node.BenchmarkIn) + benchmarkIn.setRunOnHost(False) + benchmarkIn.sendReportEveryNMessages(num_messages) + benchmarkIn.logReportsAsWarnings(False) + + benchmarkOut.out.link(neuralNetwork.input) + neuralNetwork.out.link(benchmarkIn.input) + + outputQueue = benchmarkIn.report.createOutputQueue() + inputQueue = benchmarkOut.input.createInputQueue() + + pipeline.start() + inputQueue.send(inputData) + + rep = 0 + fps_list = [] + avg_latency_list = [] + while pipeline.isRunning() and rep < repetitions: + benchmarkReport = outputQueue.get() + assert isinstance(benchmarkReport, dai.BenchmarkReport) + fps = benchmarkReport.fps + avg_latency = benchmarkReport.averageLatency + + fps_list.append(fps) + avg_latency_list.append(avg_latency) progress.update(repet_task, advance=1) + rep += 1 - diffs = np.array(diffs) * 1000 - return BenchmarkResult(np.mean(fps_storage), np.mean(diffs)) + return BenchmarkResult( + np.mean(fps_list), np.mean(avg_latency_list) + ) diff --git a/modelconverter/utils/__init__.py b/modelconverter/utils/__init__.py index 9566125..7d7bf33 100644 --- a/modelconverter/utils/__init__.py +++ b/modelconverter/utils/__init__.py @@ -19,6 +19,7 @@ resolve_path, upload_file_to_remote, ) +from .hubai_utils import is_hubai_available from .image import read_calib_dir, read_image from .layout import guess_new_layout, make_default_layout from .metadata import Metadata, get_metadata @@ -45,6 +46,7 @@ "subprocess_run", "download_from_remote", "upload_file_to_remote", + "is_hubai_available", "get_protocol", "process_nn_archive", "modelconverter_config_to_nn", diff --git a/modelconverter/utils/hubai_utils.py b/modelconverter/utils/hubai_utils.py new file mode 100644 index 0000000..618c086 --- /dev/null +++ b/modelconverter/utils/hubai_utils.py @@ -0,0 +1,36 @@ +import requests + + +def is_hubai_available(model_slug: str) -> bool: + url = "https://easyml.cloud.luxonis.com/models/api/v1/models?is_public=true&limit=1000" + response = requests.get(url) + if response.status_code != 200: + raise ValueError( + f"Failed to get models. Status code: {response.status_code}" + ) + hub_ai_models = response.json() + for model in hub_ai_models: + slug = f"{model['team_slug']}/{model['slug']}" + if ( + slug in model_slug + or slug.removeprefix(f"{model['team_slug']}/") in model_slug + ): + model_id = model["id"] + + url = f"https://easyml.cloud.luxonis.com/models/api/v1/modelVersions?model_id={model_id}&is_public=true" + response = requests.get(url) + if response.status_code != 200: + raise ValueError( + f"Failed to get model versions. Status code: {response.status_code}" + ) + model_versions = response.json() + for version in model_versions: + if ( + f"{slug}:{version['variant_slug']}" == model_slug + or f"{slug}:{version['variant_slug']}".removeprefix( + f"{model['team_slug']}/" + ) + == model_slug + ): + return True + return False From 6cb5a2c89aa0569b85985ea91fe2acf6d1ceeff7 Mon Sep 17 00:00:00 2001 From: Petros Toupas Date: Thu, 9 Jan 2025 15:39:19 +0200 Subject: [PATCH 02/28] Add dai based benchmark execution for RVC4 device --- modelconverter/__main__.py | 17 +++ modelconverter/packages/base_benchmark.py | 8 +- modelconverter/packages/rvc2/benchmark.py | 11 +- modelconverter/packages/rvc4/benchmark.py | 174 ++++++++++++++++++++-- 4 files changed, 198 insertions(+), 12 deletions(-) diff --git a/modelconverter/__main__.py b/modelconverter/__main__.py index 5e320fa..6c2a7ab 100644 --- a/modelconverter/__main__.py +++ b/modelconverter/__main__.py @@ -193,8 +193,18 @@ def benchmark( - `--profile`: The SNPE profile to use for inference. Default: `"default"` + - `--runtime`: The SNPE runtime to use for inference (dsp or cpu). Default: `"dsp"` + - `--num-images`: The number of images to use for inference. Default: `1000` + - `--dai-benchmark`: Whether to run the benchmark using the DAI V3. If False the SNPE tools are used. Default: `True` + + - `--repetitions`: The number of repetitions to perform (dai-benchmark only). Default: `10` + + - `--num-threads`: The number of threads to use for inference (dai-benchmark only). Default: `1` + + - `--num-messages`: The number of messages to measure for each report (dai-benchmark only). Default: `50` + --- """ @@ -205,6 +215,13 @@ def benchmark( key = key[2:].replace("-", "_") else: raise typer.BadParameter(f"Unknown argument: {key}") + if key == "dai_benchmark": + value = value.capitalize() + if value not in ["True", "False"]: + raise typer.BadParameter( + "dai_benchmark must be either True or False" + ) + value = value == "True" kwargs[key] = value Benchmark = get_benchmark(target) benchmark = Benchmark(str(model_path)) diff --git a/modelconverter/packages/base_benchmark.py b/modelconverter/packages/base_benchmark.py index 0934d0b..1b367f3 100644 --- a/modelconverter/packages/base_benchmark.py +++ b/modelconverter/packages/base_benchmark.py @@ -69,7 +69,13 @@ def print_results( title=f"Benchmark Results for [yellow]{self.model_name}", box=box.ROUNDED, ) - for field in self.header: + + updated_header = [ + *results[0][0].keys(), + "fps", + "latency (ms)", + ] + for field in updated_header: table.add_column(f"[cyan]{field}") for configuration, result in results: fps_color = ( diff --git a/modelconverter/packages/rvc2/benchmark.py b/modelconverter/packages/rvc2/benchmark.py index 42a633d..c758996 100644 --- a/modelconverter/packages/rvc2/benchmark.py +++ b/modelconverter/packages/rvc2/benchmark.py @@ -39,6 +39,10 @@ def _benchmark( num_threads: int, ) -> BenchmarkResult: device = dai.Device() + if device.getPlatform() != dai.Platform.RVC2: + raise ValueError( + f"Found {device.getPlatformAsString()}, expected RVC2 platform." + ) if isinstance(model_path, str): modelPath = dai.getModelFromZoo( @@ -113,9 +117,12 @@ def _benchmark( avg_latency_list = [] while pipeline.isRunning() and rep < repetitions: benchmarkReport = outputQueue.get() - assert isinstance(benchmarkReport, dai.BenchmarkReport) + if not isinstance(benchmarkReport, dai.BenchmarkReport): + raise ValueError( + f"Expected BenchmarkReport, got {type(benchmarkReport)}" + ) fps = benchmarkReport.fps - avg_latency = benchmarkReport.averageLatency + avg_latency = benchmarkReport.averageLatency * 1000 fps_list.append(fps) avg_latency_list.append(avg_latency) diff --git a/modelconverter/packages/rvc4/benchmark.py b/modelconverter/packages/rvc4/benchmark.py index 4e71bd0..ce12394 100644 --- a/modelconverter/packages/rvc4/benchmark.py +++ b/modelconverter/packages/rvc4/benchmark.py @@ -6,8 +6,10 @@ from pathlib import Path from typing import Dict, Final, List, Optional, Tuple, cast +import depthai as dai import numpy as np import pandas as pd +from rich.progress import Progress from modelconverter.utils import subprocess_run @@ -29,6 +31,11 @@ "system_settings", ] +RUNTIMES: Dict[str, str] = { + "dsp": "use_dsp", + "cpu": "use_cpu", +} + class AdbHandler: def __init__(self, device_id: Optional[str] = None) -> None: @@ -76,9 +83,22 @@ class RVC4Benchmark(Benchmark): def default_configuration(self) -> Configuration: """ profile: The SNPE profile to use for inference. + runtime: The SNPE runtime to use for inference. num_images: The number of images to use for inference. + dai_benchmark: Whether to use the DepthAI for benchmarking. + repetitions: The number of repetitions to perform (dai-benchmark only). + num_threads: The number of threads to use for inference (dai-benchmark only). + num_messages: The number of messages to use for inference (dai-benchmark only). """ - return {"profile": "default", "num_images": 1000} + return { + "profile": "default", + "runtime": "dsp", + "num_images": 1000, + "dai_benchmark": True, + "repetitions": 10, + "num_threads": 1, + "num_messages": 50, + } @property def all_configurations(self) -> List[Configuration]: @@ -87,7 +107,13 @@ def all_configurations(self) -> List[Configuration]: def _get_input_sizes(self) -> Dict[str, List[int]]: csv_path = Path("info.csv") subprocess_run( - ["snpe-dlc-info", "-i", self.model_path, "-s", csv_path] + [ + "snpe-dlc-info", + "-i", + self.model_path, + "-s", + csv_path, + ] ) content = csv_path.read_text() csv_path.unlink() @@ -132,17 +158,36 @@ def _prepare_raw_inputs(self, num_images: int) -> None: ) def benchmark(self, configuration: Configuration) -> BenchmarkResult: + dai_benchmark = configuration.get("dai_benchmark") try: - return self._benchmark(self.model_path, **configuration) + if dai_benchmark: + for key in ["dai_benchmark", "num_images"]: + configuration.pop(key) + return self._benchmark_dai(self.model_path, **configuration) + else: + for key in [ + "dai_benchmark", + "repetitions", + "num_threads", + "num_messages", + ]: + configuration.pop(key) + return self._benchmark_snpe(self.model_path, **configuration) finally: - # so we don't delete the wrong directory - assert self.model_name + if not dai_benchmark: + # so we don't delete the wrong directory + assert self.model_name - self.adb.shell(f"rm -rf /data/local/tmp/{self.model_name}") + self.adb.shell(f"rm -rf /data/local/tmp/{self.model_name}") - def _benchmark( - self, model_path: Path, num_images: int, profile: str + def _benchmark_snpe( + self, + model_path: Path | str, + num_images: int, + profile: str, + runtime: str, ) -> BenchmarkResult: + runtime = RUNTIMES[runtime] if runtime in RUNTIMES else "use_dsp" self.adb.shell(f"mkdir /data/local/tmp/{self.model_name}") self.adb.push( str(model_path), f"/data/local/tmp/{self.model_name}/model.dlc" @@ -157,7 +202,7 @@ def _benchmark( f"--output_dir /data/local/tmp/{self.model_name}/outputs " f"--perf_profile {profile} " "--cpu_fallback false " - "--use_dsp" + f"--{runtime}" ) pattern = re.compile(r"(\d+\.\d+) infs/sec") match = pattern.search(stdout) @@ -168,3 +213,114 @@ def _benchmark( ) fps = float(match.group(1)) return BenchmarkResult(fps=fps, latency=0) + + def _benchmark_dai( + self, + model_path: Path | str, + profile: str, + runtime: str, + repetitions: int, + num_threads: int, + num_messages: int, + ) -> BenchmarkResult: + device = dai.Device() + + if device.getPlatform() != dai.Platform.RVC4: + raise ValueError( + f"Found {device.getPlatformAsString()}, expected RVC4 platform." + ) + + if isinstance(model_path, str): + modelPath = dai.getModelFromZoo( + dai.NNModelDescription( + model_path, + platform=device.getPlatformAsString(), + ) + ) + elif str(model_path).endswith(".tar.xz"): + modelPath = str(model_path) + elif str(model_path).endswith(".dlc"): + raise ValueError( + "DLC model format is not currently supported for dai-benchmark. Please use SNPE for DLC models." + ) + else: + raise ValueError( + "Unsupported model format. Supported formats: .tar.xz, or HubAI model slug." + ) + + inputSizes = [] + inputNames = [] + if isinstance(model_path, str) or str(model_path).endswith(".tar.xz"): + modelArhive = dai.NNArchive(modelPath) + for input in modelArhive.getConfig().model.inputs: + inputSizes.append(input.shape) + inputNames.append(input.name) + + inputData = dai.NNData() + for name, inputSize in zip(inputNames, inputSizes): + img = np.random.randint( + 0, 255, (1, inputSize[1], inputSize[2], 3), np.uint8 + ) + inputData.addTensor(name, img) + + with dai.Pipeline(device) as pipeline, Progress() as progress: + repet_task = progress.add_task( + "[magenta]Repetition", total=repetitions + ) + + benchmarkOut = pipeline.create(dai.node.BenchmarkOut) + benchmarkOut.setRunOnHost(False) + benchmarkOut.setFps(-1) + + neuralNetwork = pipeline.create(dai.node.NeuralNetwork) + if isinstance(model_path, str) or str(model_path).endswith( + ".tar.xz" + ): + neuralNetwork.setNNArchive(modelArhive) + neuralNetwork.setBackendProperties( + { + "runtime": runtime, + "performance_profile": profile, + } + ) + if num_threads > 1: + logger.warning( + "num_threads > 1 is not supported for RVC4. Setting num_threads to 1." + ) + num_threads = 1 + neuralNetwork.setNumInferenceThreads(num_threads) + + benchmarkIn = pipeline.create(dai.node.BenchmarkIn) + benchmarkIn.setRunOnHost(False) + benchmarkIn.sendReportEveryNMessages(num_messages) + benchmarkIn.logReportsAsWarnings(False) + + benchmarkOut.out.link(neuralNetwork.input) + neuralNetwork.out.link(benchmarkIn.input) + + outputQueue = benchmarkIn.report.createOutputQueue() + inputQueue = benchmarkOut.input.createInputQueue() + + pipeline.start() + inputQueue.send(inputData) + + rep = 0 + fps_list = [] + avg_latency_list = [] + while pipeline.isRunning() and rep < repetitions: + benchmarkReport = outputQueue.get() + if not isinstance(benchmarkReport, dai.BenchmarkReport): + raise ValueError( + f"Expected BenchmarkReport, got {type(benchmarkReport)}" + ) + fps = benchmarkReport.fps + avg_latency = benchmarkReport.averageLatency * 1000 + + fps_list.append(fps) + avg_latency_list.append(avg_latency) + progress.update(repet_task, advance=1) + rep += 1 + + return BenchmarkResult( + np.mean(fps_list), np.mean(avg_latency_list) + ) From c4b1a5d51c2f78c5a326a74e0b8056a90762cdf4 Mon Sep 17 00:00:00 2001 From: Petros Toupas Date: Thu, 9 Jan 2025 16:01:50 +0200 Subject: [PATCH 03/28] Ignore latency measurements on dai based benchmark reports --- modelconverter/packages/base_benchmark.py | 21 +++++++++++++-------- modelconverter/packages/rvc2/benchmark.py | 5 ++--- modelconverter/packages/rvc4/benchmark.py | 5 ++--- 3 files changed, 17 insertions(+), 14 deletions(-) diff --git a/modelconverter/packages/base_benchmark.py b/modelconverter/packages/base_benchmark.py index 1b367f3..a54f400 100644 --- a/modelconverter/packages/base_benchmark.py +++ b/modelconverter/packages/base_benchmark.py @@ -85,17 +85,22 @@ def print_results( if result.fps < 5 else "green" ) - latency_color = ( - "yellow" - if 50 < result.latency < 100 - else "red" - if result.latency > 100 - else "green" - ) + if isinstance(result.latency, str): + latency_color = "orange3" + else: + latency_color = ( + "yellow" + if 50 < result.latency < 100 + else "red" + if result.latency > 100 + else "green" + ) table.add_row( *map(lambda x: f"[magenta]{x}", configuration.values()), f"[{fps_color}]{result.fps:.2f}", - f"[{latency_color}]{result.latency:.5f}", + f"[{latency_color}]{result.latency}" + if isinstance(result.latency, str) + else f"[{latency_color}]{result.latency:.5f}", ) console = Console() console.print(table) diff --git a/modelconverter/packages/rvc2/benchmark.py b/modelconverter/packages/rvc2/benchmark.py index c758996..0065127 100644 --- a/modelconverter/packages/rvc2/benchmark.py +++ b/modelconverter/packages/rvc2/benchmark.py @@ -129,6 +129,5 @@ def _benchmark( progress.update(repet_task, advance=1) rep += 1 - return BenchmarkResult( - np.mean(fps_list), np.mean(avg_latency_list) - ) + # Currently, the latency measurement is not supported on RVC2 by the depthai library. + return BenchmarkResult(np.mean(fps_list), "N/A") diff --git a/modelconverter/packages/rvc4/benchmark.py b/modelconverter/packages/rvc4/benchmark.py index ce12394..1391335 100644 --- a/modelconverter/packages/rvc4/benchmark.py +++ b/modelconverter/packages/rvc4/benchmark.py @@ -321,6 +321,5 @@ def _benchmark_dai( progress.update(repet_task, advance=1) rep += 1 - return BenchmarkResult( - np.mean(fps_list), np.mean(avg_latency_list) - ) + # Currently, the latency measurement is only supported on RVC4 when using ImgFrame as the input to the BenchmarkOut which we don't do here. + return BenchmarkResult(np.mean(fps_list), "N/A") From e97a45348897c2c9bb206ce894a2fe36d468fe6f Mon Sep 17 00:00:00 2001 From: Petros Toupas Date: Fri, 10 Jan 2025 11:06:09 +0200 Subject: [PATCH 04/28] Update is_hubai_available to work with hubAI API calls --- modelconverter/utils/hubai_utils.py | 49 ++++++++++++----------------- 1 file changed, 20 insertions(+), 29 deletions(-) diff --git a/modelconverter/utils/hubai_utils.py b/modelconverter/utils/hubai_utils.py index 618c086..1a29985 100644 --- a/modelconverter/utils/hubai_utils.py +++ b/modelconverter/utils/hubai_utils.py @@ -1,36 +1,27 @@ -import requests - - def is_hubai_available(model_slug: str) -> bool: - url = "https://easyml.cloud.luxonis.com/models/api/v1/models?is_public=true&limit=1000" - response = requests.get(url) - if response.status_code != 200: + from modelconverter.cli import Request, slug_to_id + + model_name = model_slug.split(":")[0] + if len(model_slug.split(":")) < 2: raise ValueError( - f"Failed to get models. Status code: {response.status_code}" + f"Model variant not found in {model_slug}. Please specify it." ) - hub_ai_models = response.json() - for model in hub_ai_models: - slug = f"{model['team_slug']}/{model['slug']}" + + model_id = slug_to_id( + model_slug.removeprefix("luxonis/").split(":")[0], "models" + ) + model_variants = Request.get( + "modelVersions/", params={"model_id": model_id, "is_public": True} + ) + + for version in model_variants: if ( - slug in model_slug - or slug.removeprefix(f"{model['team_slug']}/") in model_slug + f"{model_name}:{version['variant_slug']}" == model_slug + or f"{model_name}:{version['variant_slug']}".removeprefix( + "luxonis/" + ) + == model_slug ): - model_id = model["id"] + return True - url = f"https://easyml.cloud.luxonis.com/models/api/v1/modelVersions?model_id={model_id}&is_public=true" - response = requests.get(url) - if response.status_code != 200: - raise ValueError( - f"Failed to get model versions. Status code: {response.status_code}" - ) - model_versions = response.json() - for version in model_versions: - if ( - f"{slug}:{version['variant_slug']}" == model_slug - or f"{slug}:{version['variant_slug']}".removeprefix( - f"{model['team_slug']}/" - ) - == model_slug - ): - return True return False From 7295d968a707a78131c398cf739a5bdf5f0e67c1 Mon Sep 17 00:00:00 2001 From: Petros Toupas Date: Fri, 10 Jan 2025 12:02:29 +0200 Subject: [PATCH 05/28] Update is_hubai_available to work with various teams from HubAI --- modelconverter/utils/hubai_utils.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/modelconverter/utils/hubai_utils.py b/modelconverter/utils/hubai_utils.py index 1a29985..04922d6 100644 --- a/modelconverter/utils/hubai_utils.py +++ b/modelconverter/utils/hubai_utils.py @@ -1,27 +1,24 @@ def is_hubai_available(model_slug: str) -> bool: from modelconverter.cli import Request, slug_to_id - model_name = model_slug.split(":")[0] - if len(model_slug.split(":")) < 2: + team_name = model_slug.split("/", 1)[0] + if len(model_slug.split(":", 1)) < 2: + team_name = "" + model_name = model_slug.split(":", 1)[0] + if len(model_slug.split(":", 1)) < 2: raise ValueError( f"Model variant not found in {model_slug}. Please specify it." ) model_id = slug_to_id( - model_slug.removeprefix("luxonis/").split(":")[0], "models" + model_slug.removeprefix(f"{team_name}/").split(":")[0], "models" ) model_variants = Request.get( "modelVersions/", params={"model_id": model_id, "is_public": True} ) for version in model_variants: - if ( - f"{model_name}:{version['variant_slug']}" == model_slug - or f"{model_name}:{version['variant_slug']}".removeprefix( - "luxonis/" - ) - == model_slug - ): + if f"{model_name}:{version['variant_slug']}" == model_slug: return True return False From 82a704412c5f5932d34c672129df2c4ae3c79cae Mon Sep 17 00:00:00 2001 From: Petros Toupas Date: Fri, 10 Jan 2025 12:49:54 +0200 Subject: [PATCH 06/28] Remove removeprefix to work with python version 3.8 [skip ci] --- modelconverter/utils/hubai_utils.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/modelconverter/utils/hubai_utils.py b/modelconverter/utils/hubai_utils.py index 04922d6..5aa6e01 100644 --- a/modelconverter/utils/hubai_utils.py +++ b/modelconverter/utils/hubai_utils.py @@ -11,7 +11,12 @@ def is_hubai_available(model_slug: str) -> bool: ) model_id = slug_to_id( - model_slug.removeprefix(f"{team_name}/").split(":")[0], "models" + ( + model_slug[len(f"{team_name}/") :] + if model_slug.startswith(f"{team_name}/") + else model_slug + ).split(":")[0], + "models", ) model_variants = Request.get( "modelVersions/", params={"model_id": model_id, "is_public": True} From a34b9eda4404cedef9a72248aa6f951d911939dc Mon Sep 17 00:00:00 2001 From: Petros Toupas Date: Fri, 10 Jan 2025 16:37:55 +0200 Subject: [PATCH 07/28] Fix test_modifier test error with EfficientVIT model and change the API calls. --- modelconverter/utils/onnx_tools.py | 19 ++++++++--- tests/test_utils/test_modifier.py | 54 ++++++++++++++++-------------- 2 files changed, 43 insertions(+), 30 deletions(-) diff --git a/modelconverter/utils/onnx_tools.py b/modelconverter/utils/onnx_tools.py index fe01ab5..a615fbf 100644 --- a/modelconverter/utils/onnx_tools.py +++ b/modelconverter/utils/onnx_tools.py @@ -191,11 +191,19 @@ class ONNXModifier: Path to the base ONNX model output_path : Path Path to save the modified ONNX model + skip_optimisation : bool + Flag to skip optimization of the ONNX model """ - def __init__(self, model_path: Path, output_path: Path) -> None: + def __init__( + self, + model_path: Path, + output_path: Path, + skip_optimisation: bool = False, + ) -> None: self.model_path = model_path self.output_path = output_path + self.skip_optimisation = skip_optimisation self.load_onnx() self.prev_onnx_model = self.onnx_model self.prev_onnx_gs = self.onnx_gs @@ -207,7 +215,8 @@ def load_onnx(self) -> None: logger.info(f"Loading model: {self.model_path.stem}") self.onnx_model, _ = simplify( - self.model_path.as_posix(), perform_optimization=True + self.model_path.as_posix(), + perform_optimization=True and not self.skip_optimisation, ) self.dtype = onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[ @@ -232,8 +241,10 @@ def optimize_onnx(self, passes: Optional[List[str]] = None) -> None: @type passes: Optional[List[str]] """ - optimised_onnx_model = onnxoptimizer.optimize( - self.onnx_model, passes=passes + optimised_onnx_model = ( + self.onnx_model + if self.skip_optimisation + else onnxoptimizer.optimize(self.onnx_model, passes=passes) ) optimised_onnx_model, _ = simplify( diff --git a/tests/test_utils/test_modifier.py b/tests/test_utils/test_modifier.py index 43a44d4..66ddeb0 100644 --- a/tests/test_utils/test_modifier.py +++ b/tests/test_utils/test_modifier.py @@ -4,11 +4,11 @@ from pathlib import Path from typing import Tuple -import requests import wget from luxonis_ml.nn_archive.config import Config as NNArchiveConfig from luxonis_ml.nn_archive.config_building_blocks import InputType +from modelconverter.cli import Request from modelconverter.utils import ONNXModifier from modelconverter.utils.config import Config from modelconverter.utils.onnx_tools import onnx_attach_normalization_to_inputs @@ -25,18 +25,18 @@ "mult_512x288", ] +EXCEMPT_OPTIMISATION = [ + "efficientvit-b1-224", +] + def download_onnx_models(): if not os.path.exists(DATA_DIR): os.makedirs(DATA_DIR) - url = "https://easyml.cloud.luxonis.com/models/api/v1/models?is_public=true&limit=1000" - response = requests.get(url, headers=HEADERS) - if response.status_code != 200: - raise ValueError( - f"Failed to get models. Status code: {response.status_code}" - ) - hub_ai_models = response.json() + hub_ai_models = Request.get( + "models/", params={"is_public": True, "limit": 1000} + ) for model in hub_ai_models: if "ONNX" in model["exportable_types"]: @@ -46,25 +46,22 @@ def download_onnx_models(): os.makedirs(model_dir) model_id = model["id"] - url = f"https://easyml.cloud.luxonis.com/models/api/v1/modelVersions?model_id={model_id}" - response = requests.get(url, headers=HEADERS) - if response.status_code != 200: - raise ValueError( - f"Failed to get model versions. Status code: {response.status_code}" - ) - model_versions = response.json() + model_variants = Request.get( + "modelVersions/", + params={ + "model_id": model_id, + "is_public": True, + "limit": 1000, + }, + ) - for version in model_versions: - if "ONNX" in version["exportable_types"]: - model_version_id = version["id"] + for variant in model_variants: + if "ONNX" in variant["exportable_types"]: + model_version_id = variant["id"] break - url = f"https://easyml.cloud.luxonis.com/models/api/v1/modelVersions/{model_version_id}/download" - response = requests.get(url, headers=HEADERS) - if response.status_code != 200: - raise ValueError( - f"Failed to download model. Status code: {response.status_code}" - ) - download_info = response.json() + download_info = Request.get( + f"modelVersions/{model_version_id}/download" + ) model_download_link = download_info[0]["download_link"] @@ -210,6 +207,9 @@ def pytest_generate_tests(metafunc): def test_onnx_model(onnx_file): + skip_optimisation = ( + True if onnx_file.stem in EXCEMPT_OPTIMISATION else False + ) nn_config = onnx_file.parent / f"{onnx_file.stem}_config.json" cfg, main_stage_key = get_config(nn_config) @@ -229,7 +229,9 @@ def test_onnx_model(onnx_file): onnx_file.parent / f"{onnx_file.stem}_modified_optimised.onnx" ) onnx_modifier = ONNXModifier( - model_path=modified_onnx, output_path=modified_optimised_onnx + model_path=modified_onnx, + output_path=modified_optimised_onnx, + skip_optimisation=skip_optimisation, ) if onnx_modifier.has_dynamic_shape: From 44a097ba6b633d418cc68a0941ce28d44b4d0c8c Mon Sep 17 00:00:00 2001 From: Petros Toupas Date: Fri, 10 Jan 2025 17:00:07 +0200 Subject: [PATCH 08/28] Update .pre-commit-config.yaml --- .pre-commit-config.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index aedcf16..232e7c4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,5 +1,3 @@ -default_language_version: - python: python3 repos: - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.1.2 From 7d4d22399a4e12328d877ed037d400fb7cbd6e33 Mon Sep 17 00:00:00 2001 From: Petros Toupas Date: Mon, 13 Jan 2025 12:29:41 +0200 Subject: [PATCH 09/28] Fix model path and HubAI model slug parsing [ci skip] --- .pre-commit-config.yaml | 2 ++ modelconverter/packages/base_benchmark.py | 24 ++++++++++++++++++++--- modelconverter/utils/hubai_utils.py | 17 +++------------- 3 files changed, 26 insertions(+), 17 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 232e7c4..aedcf16 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,3 +1,5 @@ +default_language_version: + python: python3 repos: - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.1.2 diff --git a/modelconverter/packages/base_benchmark.py b/modelconverter/packages/base_benchmark.py index a54f400..fd30d26 100644 --- a/modelconverter/packages/base_benchmark.py +++ b/modelconverter/packages/base_benchmark.py @@ -1,3 +1,4 @@ +import re from abc import ABC, abstractmethod from collections import namedtuple from logging import getLogger @@ -23,17 +24,34 @@ class Benchmark(ABC): + VALID_EXTENSIONS = (".tar.xz", ".blob") + HUB_MODEL_PATTERN = re.compile(r"^(?:([^/]+)/)?([^:]+):(.+)$") + def __init__( self, model_path: str, dataset_path: Optional[Path] = None, ): - if not is_hubai_available(model_path): + if any(model_path.endswith(ext) for ext in self.VALID_EXTENSIONS): self.model_path = resolve_path(model_path, Path.cwd()) self.model_name = self.model_path.stem else: - self.model_path = model_path - self.model_name = self.model_path.split("/", 1)[-1] + hub_match = self.HUB_MODEL_PATTERN.match(model_path) + if not hub_match: + raise ValueError( + "Invalid 'model-path' format. Expected either:\n" + "- Model file path: path/to/model.blob or path/to/model.tar.xz\n" + "- HubAI model slug: [team_name/]model_name:variant" + ) + team_name, model_name, model_variant = hub_match.groups() + if is_hubai_available(model_name, model_variant): + self.model_path = model_path + self.model_name = model_name + else: + raise ValueError( + f"Model {team_name+'/' if team_name else ''}{model_name}:{model_variant} not found in HubAI." + ) + self.dataset_path = dataset_path self.header = [ diff --git a/modelconverter/utils/hubai_utils.py b/modelconverter/utils/hubai_utils.py index 5aa6e01..4add73a 100644 --- a/modelconverter/utils/hubai_utils.py +++ b/modelconverter/utils/hubai_utils.py @@ -1,21 +1,10 @@ -def is_hubai_available(model_slug: str) -> bool: +def is_hubai_available(model_name: str, model_variant: str) -> bool: from modelconverter.cli import Request, slug_to_id - team_name = model_slug.split("/", 1)[0] - if len(model_slug.split(":", 1)) < 2: - team_name = "" - model_name = model_slug.split(":", 1)[0] - if len(model_slug.split(":", 1)) < 2: - raise ValueError( - f"Model variant not found in {model_slug}. Please specify it." - ) + model_slug = f"{model_name}:{model_variant}" model_id = slug_to_id( - ( - model_slug[len(f"{team_name}/") :] - if model_slug.startswith(f"{team_name}/") - else model_slug - ).split(":")[0], + model_name, "models", ) model_variants = Request.get( From 57b8982c76722303af34f655a8be92d41c93a312 Mon Sep 17 00:00:00 2001 From: Petros Toupas Date: Mon, 13 Jan 2025 12:47:09 +0200 Subject: [PATCH 10/28] Add HUBAI_API_KEY to getModelFromZoo calls [ci skip] --- modelconverter/packages/rvc2/benchmark.py | 5 ++++- modelconverter/packages/rvc4/benchmark.py | 5 +++-- modelconverter/utils/hubai_utils.py | 10 +++++++--- 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/modelconverter/packages/rvc2/benchmark.py b/modelconverter/packages/rvc2/benchmark.py index 0065127..e57ff3d 100644 --- a/modelconverter/packages/rvc2/benchmark.py +++ b/modelconverter/packages/rvc2/benchmark.py @@ -6,6 +6,8 @@ import numpy as np from rich.progress import Progress +from modelconverter.utils import environ + from ..base_benchmark import Benchmark, BenchmarkResult, Configuration logger = logging.getLogger(__name__) @@ -49,7 +51,8 @@ def _benchmark( dai.NNModelDescription( model_path, platform=device.getPlatformAsString(), - ) + ), + apiKey=environ.HUBAI_API_KEY if environ.HUBAI_API_KEY else "", ) elif str(model_path).endswith(".tar.xz"): modelPath = str(model_path) diff --git a/modelconverter/packages/rvc4/benchmark.py b/modelconverter/packages/rvc4/benchmark.py index 1391335..8dd203e 100644 --- a/modelconverter/packages/rvc4/benchmark.py +++ b/modelconverter/packages/rvc4/benchmark.py @@ -11,7 +11,7 @@ import pandas as pd from rich.progress import Progress -from modelconverter.utils import subprocess_run +from modelconverter.utils import environ, subprocess_run from ..base_benchmark import Benchmark, BenchmarkResult, Configuration @@ -235,7 +235,8 @@ def _benchmark_dai( dai.NNModelDescription( model_path, platform=device.getPlatformAsString(), - ) + ), + apiKey=environ.HUBAI_API_KEY if environ.HUBAI_API_KEY else "", ) elif str(model_path).endswith(".tar.xz"): modelPath = str(model_path) diff --git a/modelconverter/utils/hubai_utils.py b/modelconverter/utils/hubai_utils.py index 4add73a..92b310e 100644 --- a/modelconverter/utils/hubai_utils.py +++ b/modelconverter/utils/hubai_utils.py @@ -7,9 +7,13 @@ def is_hubai_available(model_name: str, model_variant: str) -> bool: model_name, "models", ) - model_variants = Request.get( - "modelVersions/", params={"model_id": model_id, "is_public": True} - ) + + model_variants = [] + for is_public in [True, False]: + model_variants += Request.get( + "modelVersions/", + params={"model_id": model_id, "is_public": is_public}, + ) for version in model_variants: if f"{model_name}:{version['variant_slug']}" == model_slug: From d6e5da103694a294d97cb0f6d1c2a5f4773b08e4 Mon Sep 17 00:00:00 2001 From: Petros Toupas Date: Mon, 13 Jan 2025 13:10:46 +0200 Subject: [PATCH 11/28] Update Benchmarking Section of README file [ci skip] --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index b24ee18..dc809d5 100644 --- a/README.md +++ b/README.md @@ -437,3 +437,6 @@ modelconverter benchmark rvc3 --model-path The command prints a table with the benchmark results to the console and optionally saves the results to a `.csv` file. + +> \[!NOTE\] +> For **RVC2** and **RVC4**: The `--model-path` can be a path to a local .blob file, a NN Archive file (.tar.xz), or a name of a model slug from [Luxonis HubAI](https://hub.luxonis.com/ai). To access models from different teams in Luxonis HubAI, remember to update the HUBAI_API_KEY environment variable respectively. From 4d3bc5b3945dc973662f972d9d80c9598ad21b24 Mon Sep 17 00:00:00 2001 From: Petros Toupas Date: Mon, 13 Jan 2025 13:11:54 +0200 Subject: [PATCH 12/28] Update .pre-commit-config.yaml [ci skip] --- .pre-commit-config.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index aedcf16..232e7c4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,5 +1,3 @@ -default_language_version: - python: python3 repos: - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.1.2 From e8bc9741ec9725f41ec9dbb2d41e12b7866d59d4 Mon Sep 17 00:00:00 2001 From: Petros Toupas Date: Tue, 14 Jan 2025 14:01:12 +0200 Subject: [PATCH 13/28] Fix dlc parsing on Benchmark __init__ --- .pre-commit-config.yaml | 2 ++ modelconverter/packages/base_benchmark.py | 4 ++-- modelconverter/utils/hubai_utils.py | 11 +++++++---- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 232e7c4..aedcf16 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,3 +1,5 @@ +default_language_version: + python: python3 repos: - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.1.2 diff --git a/modelconverter/packages/base_benchmark.py b/modelconverter/packages/base_benchmark.py index fd30d26..0fb3d33 100644 --- a/modelconverter/packages/base_benchmark.py +++ b/modelconverter/packages/base_benchmark.py @@ -24,7 +24,7 @@ class Benchmark(ABC): - VALID_EXTENSIONS = (".tar.xz", ".blob") + VALID_EXTENSIONS = (".tar.xz", ".blob", ".dlc") HUB_MODEL_PATTERN = re.compile(r"^(?:([^/]+)/)?([^:]+):(.+)$") def __init__( @@ -40,7 +40,7 @@ def __init__( if not hub_match: raise ValueError( "Invalid 'model-path' format. Expected either:\n" - "- Model file path: path/to/model.blob or path/to/model.tar.xz\n" + "- Model file path: path/to/model.blob, path/to/model.dlc or path/to/model.tar.xz\n" "- HubAI model slug: [team_name/]model_name:variant" ) team_name, model_name, model_variant = hub_match.groups() diff --git a/modelconverter/utils/hubai_utils.py b/modelconverter/utils/hubai_utils.py index 92b310e..1229179 100644 --- a/modelconverter/utils/hubai_utils.py +++ b/modelconverter/utils/hubai_utils.py @@ -10,10 +10,13 @@ def is_hubai_available(model_name: str, model_variant: str) -> bool: model_variants = [] for is_public in [True, False]: - model_variants += Request.get( - "modelVersions/", - params={"model_id": model_id, "is_public": is_public}, - ) + try: + model_variants += Request.get( + "modelVersions/", + params={"model_id": model_id, "is_public": is_public}, + ) + except Exception: + pass for version in model_variants: if f"{model_name}:{version['variant_slug']}" == model_slug: From e2a7ed7afcc4fcd20a81ad53551e70339b15e758 Mon Sep 17 00:00:00 2001 From: Petros Toupas Date: Tue, 14 Jan 2025 17:47:43 +0200 Subject: [PATCH 14/28] Update the way modify_onnx optimisation runs are conducted in the ONNXModifier class --- modelconverter/utils/onnx_tools.py | 116 +++++++++++++++-------------- 1 file changed, 62 insertions(+), 54 deletions(-) diff --git a/modelconverter/utils/onnx_tools.py b/modelconverter/utils/onnx_tools.py index a615fbf..bbc61f6 100644 --- a/modelconverter/utils/onnx_tools.py +++ b/modelconverter/utils/onnx_tools.py @@ -1,6 +1,6 @@ import logging from pathlib import Path -from typing import Dict, List, Optional, Tuple +from typing import Callable, Dict, List, Optional, Tuple import numpy as np import onnx @@ -1054,6 +1054,33 @@ def fuse_split_concat_to_conv(self) -> None: self.optimize_onnx() + def revert_changes(self): + """Reverts ONNX model to previous state.""" + self.onnx_model = self.prev_onnx_model + self.onnx_gs = self.prev_onnx_gs + + def apply_optimization_step( + self, step_name: str, optimization_func: Callable + ): + """Applies a single optimization step to the ONNX model. + + @param step_name: Name of the optimization step + @type step_name: str + @param optimization_func: Optimization function to apply + @type optimization_func: Callable + """ + logger.debug(f"Attempting: {step_name}...") + try: + optimization_func() + if not self.compare_outputs(from_modelproto=True): + logger.warning(f"Failed: {step_name}, reverting changes...") + self.revert_changes() + except Exception as e: + logger.warning( + f"Failed: {step_name} with error: {e}, reverting changes..." + ) + self.revert_changes() + def modify_onnx(self) -> bool: """Modify the ONNX model by applying a series of optimizations. @@ -1066,65 +1093,46 @@ def modify_onnx(self) -> bool: ) return False - try: - logger.debug("Substituting Div -> Mul nodes...") - self.substitute_node_by_type(source_node="Div", target_node="Mul") - if not self.compare_outputs(from_modelproto=True): - logger.warning( - "Failed to substitute Div -> Mul nodes, reverting changes..." - ) - self.onnx_model = self.prev_onnx_model - self.onnx_gs = self.prev_onnx_gs - - logger.debug("Substituting Sub -> Add nodes...") - self.substitute_node_by_type(source_node="Sub", target_node="Add") - if not self.compare_outputs(from_modelproto=True): - logger.warning( - "Failed to substitute Sub -> Add nodes, reverting changes..." - ) - self.onnx_model = self.prev_onnx_model - self.onnx_gs = self.prev_onnx_gs - - logger.debug( - "Fusing Add and Mul nodes to BatchNormalization nodes and then into Conv nodes..." - ) - self.fuse_add_mul_to_bn() - if not self.compare_outputs(from_modelproto=True): - logger.warning( - "Failed to fuse Add and Mul nodes to BatchNormalization nodes, reverting changes..." - ) - self.onnx_model = self.prev_onnx_model - self.onnx_gs = self.prev_onnx_gs - - logger.debug("Fusing Add and Mul nodes to Conv nodes...") - self.fuse_comb_add_mul_to_conv() - if not self.compare_outputs(from_modelproto=True): - logger.warning( - "Failed to fuse Add and Mul nodes (combined) to Conv nodes, reverting changes..." - ) - self.onnx_model = self.prev_onnx_model - self.onnx_gs = self.prev_onnx_gs - self.fuse_single_add_mul_to_conv() - if not self.compare_outputs(from_modelproto=True): - logger.warning( - "Failed to fuse Add and Mul nodes (single) to Conv nodes, reverting changes..." - ) - self.onnx_model = self.prev_onnx_model - self.onnx_gs = self.prev_onnx_gs + optimization_steps = [ + ( + "Substitute Div -> Mul nodes", + lambda: self.substitute_node_by_type( + source_node="Div", target_node="Mul" + ), + ), + ( + "Substitute Sub -> Add nodes", + lambda: self.substitute_node_by_type( + source_node="Sub", target_node="Add" + ), + ), + ( + "Fuse Add and Mul nodes to BatchNormalization nodes", + self.fuse_add_mul_to_bn, + ), + ( + "Fuse Add and Mul nodes to Conv nodes (combined)", + self.fuse_comb_add_mul_to_conv, + ), + ( + "Fuse Add and Mul nodes to Conv nodes (single)", + self.fuse_single_add_mul_to_conv, + ), + ( + "Fuse Split and Concat nodes to Conv nodes", + self.fuse_split_concat_to_conv, + ), + ] - logger.debug("Fusing Split and Concat nodes to Conv nodes...") - self.fuse_split_concat_to_conv() - if not self.compare_outputs(from_modelproto=True): - logger.warning( - "Failed to fuse Split and Concat nodes to Conv nodes, reverting changes..." - ) - self.onnx_model = self.prev_onnx_model - self.onnx_gs = self.prev_onnx_gs + for step_name, optimization_func in optimization_steps: + self.apply_optimization_step(step_name, optimization_func) + try: self.export_onnx() except Exception as e: logger.error(f"Failed to modify the ONNX model: {e}") return False + return True def compare_outputs(self, from_modelproto: bool = False) -> bool: From cd2b088dd50b184584c594c475039a3a83593e78 Mon Sep 17 00:00:00 2001 From: Petros Toupas Date: Tue, 14 Jan 2025 18:46:52 +0200 Subject: [PATCH 15/28] Fix SNPE benchmark on RVC4 and added support for benchmark over model slugs from HubAI --- modelconverter/packages/rvc4/benchmark.py | 45 +++++++++++++++++++---- 1 file changed, 38 insertions(+), 7 deletions(-) diff --git a/modelconverter/packages/rvc4/benchmark.py b/modelconverter/packages/rvc4/benchmark.py index 8dd203e..d64219a 100644 --- a/modelconverter/packages/rvc4/benchmark.py +++ b/modelconverter/packages/rvc4/benchmark.py @@ -1,6 +1,7 @@ import io import logging import re +import shutil import subprocess import tempfile from pathlib import Path @@ -119,7 +120,7 @@ def _get_input_sizes(self) -> Dict[str, List[int]]: csv_path.unlink() start_marker = "Input Name,Dimensions,Type,Encoding Info" - end_marker = "Total parameters:" + end_marker = "Output Name,Dimensions,Type,Encoding Info" start_index = content.find(start_marker) end_index = content.find(end_marker, start_index) @@ -143,7 +144,7 @@ def _prepare_raw_inputs(self, num_images: int) -> None: img = cast(np.ndarray, np.random.rand(*size)).astype( np.float32 ) - with tempfile.TemporaryFile() as f: + with tempfile.NamedTemporaryFile() as f: img.tofile(f) self.adb.push( f.name, @@ -151,11 +152,18 @@ def _prepare_raw_inputs(self, num_images: int) -> None: ) input_list += f"{name}:=/data/local/tmp/{self.model_name}/inputs/{name}_{i}.raw " - with tempfile.NamedTemporaryFile(mode="w", delete=False) as f: + input_list += "\n" + + temp_path = tempfile.mktemp() + with open(temp_path, "w") as f: f.write(input_list) + f.flush() + try: self.adb.push( - f.name, f"/data/local/tmp/{self.model_name}/input_list.txt" + temp_path, f"/data/local/tmp/{self.model_name}/input_list.txt" ) + finally: + Path(temp_path).unlink() def benchmark(self, configuration: Configuration) -> BenchmarkResult: dai_benchmark = configuration.get("dai_benchmark") @@ -188,14 +196,37 @@ def _benchmark_snpe( runtime: str, ) -> BenchmarkResult: runtime = RUNTIMES[runtime] if runtime in RUNTIMES else "use_dsp" + + if isinstance(model_path, str): + model_archive = dai.getModelFromZoo( + dai.NNModelDescription( + model_path, + platform=dai.Platform.RVC4.name, + ), + apiKey=environ.HUBAI_API_KEY if environ.HUBAI_API_KEY else "", + ) + tmp_dir = Path(model_archive).parent / "tmp" + shutil.unpack_archive(model_archive, tmp_dir) + + dlc_path = next(tmp_dir.rglob("*.dlc"), None) + if not dlc_path: + raise ValueError("Could not find model.dlc in the archive.") + self.model_path = dlc_path + elif str(model_path).endswith(".dlc"): + dlc_path = model_path + else: + raise ValueError( + "Unsupported model format. Supported formats: .dlc, or HubAI model slug." + ) + self.adb.shell(f"mkdir /data/local/tmp/{self.model_name}") self.adb.push( - str(model_path), f"/data/local/tmp/{self.model_name}/model.dlc" + str(dlc_path), f"/data/local/tmp/{self.model_name}/model.dlc" ) self._prepare_raw_inputs(num_images) _, stdout, _ = self.adb.shell( - "source /data/local/tmp/source_me.sh && " + # "source /data/local/tmp/source_me.sh && " "snpe-parallel-run " f"--container /data/local/tmp/{self.model_name}/model.dlc " f"--input_list /data/local/tmp/{self.model_name}/input_list.txt " @@ -212,7 +243,7 @@ def _benchmark_snpe( f"stdout:\n{stdout}" ) fps = float(match.group(1)) - return BenchmarkResult(fps=fps, latency=0) + return BenchmarkResult(fps=fps, latency="N/A") def _benchmark_dai( self, From addc5f1602db9cef82f64d6af3cfd931401a0218 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martin=20Kozlovsk=C3=BD?= Date: Wed, 15 Jan 2025 22:42:56 +0100 Subject: [PATCH 16/28] Updated ONNX version (#56) --- modelconverter/utils/environ.py | 8 +++++--- requirements.txt | 2 +- tests/test_utils/test_config.py | 34 ++++++++++++++++++++++++++----- tests/test_utils/test_modifier.py | 5 ++--- 4 files changed, 37 insertions(+), 12 deletions(-) diff --git a/modelconverter/utils/environ.py b/modelconverter/utils/environ.py index 60d3cae..1e39322 100644 --- a/modelconverter/utils/environ.py +++ b/modelconverter/utils/environ.py @@ -3,12 +3,14 @@ import keyring from luxonis_ml.utils import Environ as BaseEnviron -from pydantic import model_validator -from typing_extensions import Self +from pydantic import AliasChoices, Field, model_validator +from typing_extensions import Annotated, Self class Environ(BaseEnviron): - HUBAI_API_KEY: Optional[str] = None + HUBAI_API_KEY: Annotated[ + Optional[str], Field(validation_alias=AliasChoices("HUB_AI_API_KEY")) + ] = None HUBAI_URL: str = "https://easyml.cloud.luxonis.com/models/" @model_validator(mode="after") diff --git a/requirements.txt b/requirements.txt index 534d102..556b196 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ Pillow PyYAML gcsfs luxonis-ml[data,nn_archive] >= 0.5.0 -onnx<1.17.0 +onnx>=1.17.0 onnxruntime onnxsim s3fs diff --git a/tests/test_utils/test_config.py b/tests/test_utils/test_config.py index 00f4933..9e2ec9f 100644 --- a/tests/test_utils/test_config.py +++ b/tests/test_utils/test_config.py @@ -129,11 +129,39 @@ def setup(): output1 = helper.make_tensor_value_info( "output1", TensorProto.FLOAT, [1, 5, 5, 5] ) + + shape_tensor = helper.make_tensor( + name="shape_tensor", + data_type=TensorProto.INT64, + dims=[4], + vals=[1, 5, 5, 5], + ) + + node0 = helper.make_node( + "Add", inputs=["input0", "input0"], outputs=["intermediate0"] + ) + node1 = helper.make_node( + "Add", inputs=["input1", "input1"], outputs=["intermediate1"] + ) + node2 = helper.make_node( + "Flatten", inputs=["intermediate0"], outputs=["output0"] + ) + node3 = helper.make_node( + "Reshape", + inputs=["intermediate1", "shape_tensor"], + outputs=["output1"], + ) + graph = helper.make_graph( - [], "DummyModel", [input0, input1], [output0, output1] + [node0, node1, node2, node3], + "DummyModel", + [input0, input1], + [output0, output1], + initializer=[shape_tensor], ) model = helper.make_model(graph, producer_name="DummyModelProducer") + checker.check_model(model) onnx.save(model, str(DATA_DIR / "dummy_model.onnx")) yield @@ -147,16 +175,12 @@ def set_nested_config_value( keys = key.split(".") current_level = config["model"] - # Traverse through the keys except for the last one for k in keys[:-1]: - # Handle integer keys for list indexing if re.match(r"^\d+$", k): k = int(k) - # Move to the next level current_level = current_level[k] - # Set the final key to the value final_key = keys[-1] current_level[final_key] = value diff --git a/tests/test_utils/test_modifier.py b/tests/test_utils/test_modifier.py index 66ddeb0..2dcfc6b 100644 --- a/tests/test_utils/test_modifier.py +++ b/tests/test_utils/test_modifier.py @@ -9,14 +9,13 @@ from luxonis_ml.nn_archive.config_building_blocks import InputType from modelconverter.cli import Request -from modelconverter.utils import ONNXModifier +from modelconverter.utils import ONNXModifier, environ from modelconverter.utils.config import Config from modelconverter.utils.onnx_tools import onnx_attach_normalization_to_inputs DATA_DIR = Path("tests/data/test_utils/hub_ai_models") -API_KEY = os.getenv("HUB_AI_API_KEY", None) -HEADERS = {"Authorization": f"Bearer {API_KEY}"} +HEADERS = {"Authorization": f"Bearer {environ.HUBAI_API_KEY}"} EXCEMPTED_MODELS = [ "l2cs", From f0149cd38cb915b594d4a65aa77e0e67428dc889 Mon Sep 17 00:00:00 2001 From: Petros Toupas Date: Thu, 16 Jan 2025 11:18:19 +0200 Subject: [PATCH 17/28] Update the RVC4 benchmark to take into account the data type for each input when generating the parameters for the dai or the snpe --- modelconverter/packages/rvc4/benchmark.py | 111 ++++++++++++++++++++-- 1 file changed, 102 insertions(+), 9 deletions(-) diff --git a/modelconverter/packages/rvc4/benchmark.py b/modelconverter/packages/rvc4/benchmark.py index d64219a..0607d58 100644 --- a/modelconverter/packages/rvc4/benchmark.py +++ b/modelconverter/packages/rvc4/benchmark.py @@ -79,6 +79,7 @@ def push(self, src: str, dst: str) -> Tuple[int, str, str]: class RVC4Benchmark(Benchmark): adb = AdbHandler() + force_cpu: bool = False @property def default_configuration(self) -> Configuration: @@ -105,7 +106,7 @@ def default_configuration(self) -> Configuration: def all_configurations(self) -> List[Configuration]: return [{"profile": profile} for profile in PROFILES] - def _get_input_sizes(self) -> Dict[str, List[int]]: + def _get_input_sizes(self) -> Tuple[Dict[str, List[int]], Dict[str, str]]: csv_path = Path("info.csv") subprocess_run( [ @@ -133,16 +134,32 @@ def _get_input_sizes(self) -> Dict[str, List[int]]: ) for _, row in df.iterrows() } - return sizes + data_types = { + str(row["Input Name"]): str(row["Type"]) + for _, row in df.iterrows() + } + + return sizes, data_types def _prepare_raw_inputs(self, num_images: int) -> None: - input_sizes = self._get_input_sizes() + input_sizes, data_types = self._get_input_sizes() input_list = "" self.adb.shell(f"mkdir /data/local/tmp/{self.model_name}/inputs") for i in range(num_images): for name, size in input_sizes.items(): + if data_types[name] == "Float_32": + self.force_cpu = True + numpy_type = np.float32 + elif data_types[name] == "Float_16": + numpy_type = np.float16 + elif data_types[name] == "uFxp_8": + numpy_type = np.uint8 + else: + raise ValueError( + f"Unsupported data type {data_types[name]} for input {name}." + ) img = cast(np.ndarray, np.random.rand(*size)).astype( - np.float32 + numpy_type ) with tempfile.NamedTemporaryFile() as f: img.tofile(f) @@ -165,6 +182,72 @@ def _prepare_raw_inputs(self, num_images: int) -> None: finally: Path(temp_path).unlink() + def _get_data_type(self) -> dai.TensorInfo.DataType: + """Retrieve the data type of the model inputs. If the model is not a HubAI + model, it defaults to dai.TensorInfo.DataType.U8F (INT8). + + @return: The data type of the model inputs. + @rtype: dai.TensorInfo.DataType + """ + from modelconverter.cli import Request, slug_to_id + + if not isinstance( + self.model_path, str + ) or not self.HUB_MODEL_PATTERN.match(self.model_path): + return dai.TensorInfo.DataType.U8F + + model_id = slug_to_id(self.model_name, "models") + model_variant = self.model_path.split(":")[1] + + model_variants = [] + for is_public in [True, False]: + try: + model_variants += Request.get( + "modelVersions/", + params={"model_id": model_id, "is_public": is_public}, + ) + except Exception: + continue + + model_version_id = None + for version in model_variants: + if version["variant_slug"] == model_variant: + model_version_id = version["id"] + break + + if not model_version_id: + return dai.TensorInfo.DataType.U8F + + model_instances = [] + for is_public in [True, False]: + try: + model_instances += Request.get( + "modelInstances/", + params={ + "model_id": model_id, + "model_version_id": model_version_id, + "is_public": is_public, + }, + ) + except Exception: + continue + + model_precision_type = "INT8" + for instance in model_instances: + if instance["platforms"] == ["RVC4"]: + model_precision_type = instance.get( + "model_precision_type", "INT8" + ) + break + + if model_precision_type == "FP16": + return dai.TensorInfo.DataType.FP16 + elif model_precision_type == "FP32": + self.force_cpu = True + return dai.TensorInfo.DataType.FP32 + + return dai.TensorInfo.DataType.U8F + def benchmark(self, configuration: Configuration) -> BenchmarkResult: dai_benchmark = configuration.get("dai_benchmark") try: @@ -224,6 +307,11 @@ def _benchmark_snpe( str(dlc_path), f"/data/local/tmp/{self.model_name}/model.dlc" ) self._prepare_raw_inputs(num_images) + if self.force_cpu: + logger.warning( + "Forcing CPU runtime due to Float_32 input data type." + ) + runtime = "use_cpu" _, stdout, _ = self.adb.shell( # "source /data/local/tmp/source_me.sh && " @@ -232,7 +320,7 @@ def _benchmark_snpe( f"--input_list /data/local/tmp/{self.model_name}/input_list.txt " f"--output_dir /data/local/tmp/{self.model_name}/outputs " f"--perf_profile {profile} " - "--cpu_fallback false " + "--cpu_fallback true " f"--{runtime}" ) pattern = re.compile(r"(\d+\.\d+) infs/sec") @@ -288,12 +376,11 @@ def _benchmark_dai( inputSizes.append(input.shape) inputNames.append(input.name) + data_type = self._get_data_type() inputData = dai.NNData() for name, inputSize in zip(inputNames, inputSizes): - img = np.random.randint( - 0, 255, (1, inputSize[1], inputSize[2], 3), np.uint8 - ) - inputData.addTensor(name, img) + img = np.random.randint(0, 255, inputSize, np.uint8) + inputData.addTensor(name, img, dataType=data_type) with dai.Pipeline(device) as pipeline, Progress() as progress: repet_task = progress.add_task( @@ -309,6 +396,12 @@ def _benchmark_dai( ".tar.xz" ): neuralNetwork.setNNArchive(modelArhive) + + if self.force_cpu: + logger.warning( + "Forcing CPU runtime due to Float_32 input data type." + ) + runtime = "cpu" neuralNetwork.setBackendProperties( { "runtime": runtime, From 8dfdb8468359918595fe4d9d1820aedf08a2fc91 Mon Sep 17 00:00:00 2001 From: Petros Toupas Date: Thu, 16 Jan 2025 11:54:19 +0200 Subject: [PATCH 18/28] Update .pre-commit-config.yaml [ci skip] --- .pre-commit-config.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index aedcf16..232e7c4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,5 +1,3 @@ -default_language_version: - python: python3 repos: - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.1.2 From b58782c66004fe0689257c2ce161eb39b5704583 Mon Sep 17 00:00:00 2001 From: Petros Toupas Date: Mon, 27 Jan 2025 10:23:34 +0200 Subject: [PATCH 19/28] Fix issue when extracting the model from NNArchive in snpe benchmark [ci skip] --- modelconverter/packages/rvc4/benchmark.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/modelconverter/packages/rvc4/benchmark.py b/modelconverter/packages/rvc4/benchmark.py index 0607d58..b449766 100644 --- a/modelconverter/packages/rvc4/benchmark.py +++ b/modelconverter/packages/rvc4/benchmark.py @@ -1,4 +1,5 @@ import io +import json import logging import re import shutil @@ -291,7 +292,10 @@ def _benchmark_snpe( tmp_dir = Path(model_archive).parent / "tmp" shutil.unpack_archive(model_archive, tmp_dir) - dlc_path = next(tmp_dir.rglob("*.dlc"), None) + dlc_model_name = json.loads((tmp_dir / "config.json").read_text())[ + "model" + ]["metadata"]["path"] + dlc_path = next(tmp_dir.rglob(dlc_model_name), None) if not dlc_path: raise ValueError("Could not find model.dlc in the archive.") self.model_path = dlc_path From 9b2a602e48e1ca211782f472cd2f68bdb796ab02 Mon Sep 17 00:00:00 2001 From: Petros Toupas Date: Mon, 27 Jan 2025 11:02:44 +0200 Subject: [PATCH 20/28] Add bool tensor type during evaluation of onnx models on ONNXModifier [ci skip] --- modelconverter/utils/onnx_tools.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modelconverter/utils/onnx_tools.py b/modelconverter/utils/onnx_tools.py index bbc61f6..2bf8638 100644 --- a/modelconverter/utils/onnx_tools.py +++ b/modelconverter/utils/onnx_tools.py @@ -1172,6 +1172,8 @@ def compare_outputs(self, from_modelproto: bool = False) -> bool: input_type = np.int16 elif input.type in ["tensor(int8)"]: input_type = np.int8 + elif input.type in ["tensor(bool)"]: + input_type = "bool" inputs[input.name] = np.random.rand(*input.shape).astype( input_type From e0811817d74d2e3f6cd336e9a480e25660e2f436 Mon Sep 17 00:00:00 2001 From: Petros Toupas Date: Mon, 27 Jan 2025 11:04:12 +0200 Subject: [PATCH 21/28] Add a try except block on onnx optimisation and validation. --- modelconverter/packages/rvc4/exporter.py | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/modelconverter/packages/rvc4/exporter.py b/modelconverter/packages/rvc4/exporter.py index fd5f94e..d0d1789 100644 --- a/modelconverter/packages/rvc4/exporter.py +++ b/modelconverter/packages/rvc4/exporter.py @@ -68,13 +68,21 @@ def __init__(self, config: SingleStageConfig, output_dir: Path): ), ) - if ( - onnx_modifier.modify_onnx() - and onnx_modifier.compare_outputs() - ): - logger.info("ONNX model has been optimised for RVC4.") - shutil.move(onnx_modifier.output_path, self.input_model) - else: + try: + if ( + onnx_modifier.modify_onnx() + and onnx_modifier.compare_outputs() + ): + logger.info("ONNX model has been optimised for RVC4.") + shutil.move( + onnx_modifier.output_path, self.input_model + ) + except Exception as e: + logger.warning( + f"Failed to optimise ONNX model: {e}. " + "Proceeding with unoptimised model." + ) + finally: if os.path.exists(onnx_modifier.output_path): os.remove(onnx_modifier.output_path) else: From 565ae6e4c0a1c51704a85d6bed468c752e317e57 Mon Sep 17 00:00:00 2001 From: Petros Toupas Date: Tue, 28 Jan 2025 16:38:24 +0200 Subject: [PATCH 22/28] add disable_onnx_optimisation flag on the example defaults.yaml file --- shared_with_container/configs/defaults.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/shared_with_container/configs/defaults.yaml b/shared_with_container/configs/defaults.yaml index 095bfbe..395b175 100644 --- a/shared_with_container/configs/defaults.yaml +++ b/shared_with_container/configs/defaults.yaml @@ -85,6 +85,9 @@ stages: # Do not run ONNX simplifier on the provided model. disable_onnx_simplification: false + # Do not run ONNX graph optimisations on the provided model. + disable_onnx_optimisation: false + # List of input names with shapes, # data types, values for freezing and input modifiers. # Overrides the top-level input modifiers. From d37ec5e6a40ab862ee1f77677639e3083f21d5d7 Mon Sep 17 00:00:00 2001 From: Petros Toupas Date: Wed, 29 Jan 2025 02:39:35 +0200 Subject: [PATCH 23/28] Update dai requirement to version 3.0.0a12 [ci skip] --- requirements-bench.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/requirements-bench.txt b/requirements-bench.txt index a2d1086..c3e1706 100644 --- a/requirements-bench.txt +++ b/requirements-bench.txt @@ -1,2 +1,3 @@ -depthai +--extra-index-url https://artifacts.luxonis.com/artifactory/luxonis-python-release-local/ +depthai==3.0.0a12 pandas From 0548541ec23a769a6adb888ace44b79386c009b3 Mon Sep 17 00:00:00 2001 From: Petros Toupas Date: Wed, 29 Jan 2025 02:41:44 +0200 Subject: [PATCH 24/28] Add botocore requirement --- requirements.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 556b196..3f8684f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,4 +12,5 @@ docker keyring onnx_graphsurgeon onnxoptimizer -wget \ No newline at end of file +wget +aiobotocore<2.18 From c2f91f2d6b3e13a83ce725063ab750751e1067ba Mon Sep 17 00:00:00 2001 From: Petros Toupas Date: Wed, 29 Jan 2025 02:47:57 +0200 Subject: [PATCH 25/28] Remove the extra-index-url from the requirements-bench.txt file --- requirements-bench.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements-bench.txt b/requirements-bench.txt index c3e1706..bac8378 100644 --- a/requirements-bench.txt +++ b/requirements-bench.txt @@ -1,3 +1,2 @@ ---extra-index-url https://artifacts.luxonis.com/artifactory/luxonis-python-release-local/ depthai==3.0.0a12 pandas From 8fd09f1d6b2013da3195f99e49bcfa82a986032e Mon Sep 17 00:00:00 2001 From: Petros Toupas Date: Wed, 29 Jan 2025 11:47:08 +0200 Subject: [PATCH 26/28] Update the README file regarding the depthai v3 installation. --- .github/workflows/modelconverter_test.yaml | 2 +- .github/workflows/unittests.yaml | 2 +- .pre-commit-config.yaml | 2 + README.md | 44 +++++++++++++--------- requirements-bench.txt | 2 +- 5 files changed, 32 insertions(+), 20 deletions(-) diff --git a/.github/workflows/modelconverter_test.yaml b/.github/workflows/modelconverter_test.yaml index 50f9c73..e5bb82a 100644 --- a/.github/workflows/modelconverter_test.yaml +++ b/.github/workflows/modelconverter_test.yaml @@ -51,7 +51,7 @@ jobs: cache: pip - name: Install dependencies - run: pip install -e .[dev] + run: pip install -e .[dev] --extra-index-url https://artifacts.luxonis.com/artifactory/luxonis-python-release-local/ - name: Authenticate to Google Cloud id: google-auth diff --git a/.github/workflows/unittests.yaml b/.github/workflows/unittests.yaml index 39fbfcb..a92ef85 100644 --- a/.github/workflows/unittests.yaml +++ b/.github/workflows/unittests.yaml @@ -25,7 +25,7 @@ jobs: cache: pip - name: Install package - run: python -m pip install -e .[dev] + run: python -m pip install -e .[dev] --extra-index-url https://artifacts.luxonis.com/artifactory/luxonis-python-release-local/ - name: Run Unit Tests env: diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 232e7c4..aedcf16 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,3 +1,5 @@ +default_language_version: + python: python3 repos: - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.1.2 diff --git a/README.md b/README.md index dc809d5..40c7e98 100644 --- a/README.md +++ b/README.md @@ -23,23 +23,26 @@ Convert your **ONNX** models to a format compatible with any generation of Luxon ## Table of Contents -- [Installation](#installation) -- [Configuration](#configuration) - - [YAML Configuration File](#yaml-configuration-file) - - [NN Archive Configuration File](#nn-archive-configuration-file) -- [Online Usage](#online-usage) -- [Local Usage](#local-usage) - - [Prerequisites](#prerequisites) - - [GPU Support](#gpu-support) - - [Sharing Files](#sharing-files) - - [Running ModelConverter](#running-modelconverter) - - [Examples](#examples) -- [Multi-Stage Conversion](#multi-stage-conversion) -- [Interactive Mode](#interactive-mode) -- [Calibration Data](#calibration-data) -- [Inference](#inference) - - [Inference Example](#inference-example) -- [Benchmarking](#benchmarking) +- [ModelConverter - Compilation Library](#modelconverter---compilation-library) + - [Status](#status) + - [Table of Contents](#table-of-contents) + - [Installation](#installation) + - [Configuration](#configuration) + - [YAML Configuration File](#yaml-configuration-file) + - [NN Archive Configuration File](#nn-archive-configuration-file) + - [Online Usage](#online-usage) + - [Local Usage](#local-usage) + - [Prerequisites](#prerequisites) + - [GPU Support](#gpu-support) + - [Sharing Files](#sharing-files) + - [Running ModelConverter](#running-modelconverter) + - [Examples](#examples) + - [Multi-Stage Conversion](#multi-stage-conversion) + - [Interactive Mode](#interactive-mode) + - [Calibration Data](#calibration-data) + - [Inference](#inference) + - [Inference Example](#inference-example) + - [Benchmarking](#benchmarking) ## Installation @@ -52,6 +55,13 @@ pip install modelconv Run `modelconverter --help` to see the available commands and options. +> \[!NOTE\] +> To use the [benchmarking feature](#benchmarking), the `depthai v3` package must be installed. While the `depthai v3` is not yet released on PyPI, you can install it with the following command: +> +> ```bash +> pip install -r requirements-bench.txt --extra-index-url https://artifacts.luxonis.com/artifactory/luxonis-python-release-local/ +> ``` + ## Configuration There are two main ways to execute configure the conversion process: diff --git a/requirements-bench.txt b/requirements-bench.txt index bac8378..9f3e337 100644 --- a/requirements-bench.txt +++ b/requirements-bench.txt @@ -1,2 +1,2 @@ -depthai==3.0.0a12 +depthai>=3.0.0a12 pandas From d409c6c45f70c0d116935355cf0b828ee2a12f30 Mon Sep 17 00:00:00 2001 From: Petros Toupas Date: Wed, 29 Jan 2025 12:08:28 +0200 Subject: [PATCH 27/28] Update .pre-commit-config.yaml [ci skip] --- .pre-commit-config.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index aedcf16..232e7c4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,5 +1,3 @@ -default_language_version: - python: python3 repos: - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.1.2 From 6f3e950781cadf818955d58578377bffefb6d270 Mon Sep 17 00:00:00 2001 From: Petros Toupas Date: Wed, 29 Jan 2025 12:08:50 +0200 Subject: [PATCH 28/28] Update README.md [ci skip] --- README.md | 37 +++++++++++++++++-------------------- 1 file changed, 17 insertions(+), 20 deletions(-) diff --git a/README.md b/README.md index 40c7e98..5f7bd71 100644 --- a/README.md +++ b/README.md @@ -23,26 +23,23 @@ Convert your **ONNX** models to a format compatible with any generation of Luxon ## Table of Contents -- [ModelConverter - Compilation Library](#modelconverter---compilation-library) - - [Status](#status) - - [Table of Contents](#table-of-contents) - - [Installation](#installation) - - [Configuration](#configuration) - - [YAML Configuration File](#yaml-configuration-file) - - [NN Archive Configuration File](#nn-archive-configuration-file) - - [Online Usage](#online-usage) - - [Local Usage](#local-usage) - - [Prerequisites](#prerequisites) - - [GPU Support](#gpu-support) - - [Sharing Files](#sharing-files) - - [Running ModelConverter](#running-modelconverter) - - [Examples](#examples) - - [Multi-Stage Conversion](#multi-stage-conversion) - - [Interactive Mode](#interactive-mode) - - [Calibration Data](#calibration-data) - - [Inference](#inference) - - [Inference Example](#inference-example) - - [Benchmarking](#benchmarking) +- [Installation](#installation) +- [Configuration](#configuration) + - [YAML Configuration File](#yaml-configuration-file) + - [NN Archive Configuration File](#nn-archive-configuration-file) +- [Online Usage](#online-usage) +- [Local Usage](#local-usage) + - [Prerequisites](#prerequisites) + - [GPU Support](#gpu-support) + - [Sharing Files](#sharing-files) + - [Running ModelConverter](#running-modelconverter) + - [Examples](#examples) +- [Multi-Stage Conversion](#multi-stage-conversion) +- [Interactive Mode](#interactive-mode) +- [Calibration Data](#calibration-data) +- [Inference](#inference) + - [Inference Example](#inference-example) +- [Benchmarking](#benchmarking) ## Installation