From 631b1037be3e28c8eb25b1ecd460fe28b65bb7f2 Mon Sep 17 00:00:00 2001 From: Alvaro Bartolome <36760800+alvarobartt@users.noreply.github.com> Date: Wed, 16 Oct 2024 19:16:22 +0200 Subject: [PATCH] Add reference to GitHub in documentation examples (#114) * Add final note with GitHub reference * Replace `WARN` with `WARNING` for correct rendering * Update GitHub URL note formatting * Update GitHub URL note formatting * Use a single line-break before/after `---` --- docs/scripts/auto-generate-examples.py | 10 ++++++++++ .../deploy-bert-on-vertex-ai/vertex-notebook.ipynb | 2 +- .../vertex-notebook.ipynb | 2 +- .../deploy-flux-on-vertex-ai/vertex-notebook.ipynb | 2 +- .../vertex-notebook.ipynb | 2 +- .../vertex-notebook.ipynb | 2 +- .../evaluate-llms-with-vertex-ai/vertex-notebook.ipynb | 2 +- 7 files changed, 16 insertions(+), 6 deletions(-) diff --git a/docs/scripts/auto-generate-examples.py b/docs/scripts/auto-generate-examples.py index 4f87a105..0732a216 100644 --- a/docs/scripts/auto-generate-examples.py +++ b/docs/scripts/auto-generate-examples.py @@ -93,6 +93,16 @@ def replacement(match): else: print("No relative paths found in the processed file.") + # Calculate the example URL + example_url = ( + f"https://github.com/huggingface/Google-Cloud-Containers/tree/main/{root}" + ) + if file.__contains__("ipynb"): + example_url += "/vertex-notebook.ipynb" + + # Add the final note + content += f"\n---\n\n\nšŸ“ Find the complete example on GitHub [here]({example_url})!\n\n" + with open(target, "w") as f: f.write(content) diff --git a/examples/vertex-ai/notebooks/deploy-bert-on-vertex-ai/vertex-notebook.ipynb b/examples/vertex-ai/notebooks/deploy-bert-on-vertex-ai/vertex-notebook.ipynb index a8e7b88b..bd15307a 100644 --- a/examples/vertex-ai/notebooks/deploy-bert-on-vertex-ai/vertex-notebook.ipynb +++ b/examples/vertex-ai/notebooks/deploy-bert-on-vertex-ai/vertex-notebook.ipynb @@ -262,7 +262,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "> [!WARN]\n", + "> [!WARNING]\n", "> The Vertex AI endpoint deployment via the `deploy` method may take from 15 to 25 minutes." ] }, diff --git a/examples/vertex-ai/notebooks/deploy-embedding-on-vertex-ai/vertex-notebook.ipynb b/examples/vertex-ai/notebooks/deploy-embedding-on-vertex-ai/vertex-notebook.ipynb index c915b1cb..39ab3bc4 100644 --- a/examples/vertex-ai/notebooks/deploy-embedding-on-vertex-ai/vertex-notebook.ipynb +++ b/examples/vertex-ai/notebooks/deploy-embedding-on-vertex-ai/vertex-notebook.ipynb @@ -260,7 +260,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "> [!WARN]\n", + "> [!WARNING]\n", "> The Vertex AI endpoint deployment via the `deploy` method may take from 15 to 25 minutes." ] }, diff --git a/examples/vertex-ai/notebooks/deploy-flux-on-vertex-ai/vertex-notebook.ipynb b/examples/vertex-ai/notebooks/deploy-flux-on-vertex-ai/vertex-notebook.ipynb index 6a6068bf..932c0d74 100644 --- a/examples/vertex-ai/notebooks/deploy-flux-on-vertex-ai/vertex-notebook.ipynb +++ b/examples/vertex-ai/notebooks/deploy-flux-on-vertex-ai/vertex-notebook.ipynb @@ -296,7 +296,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "> [!WARN]\n", + "> [!WARNING]\n", "> The Vertex AI endpoint deployment via the `deploy` method may take from 15 to 25 minutes." ] }, diff --git a/examples/vertex-ai/notebooks/deploy-llama-3-1-405b-on-vertex-ai/vertex-notebook.ipynb b/examples/vertex-ai/notebooks/deploy-llama-3-1-405b-on-vertex-ai/vertex-notebook.ipynb index bdf85f23..76dad24b 100644 --- a/examples/vertex-ai/notebooks/deploy-llama-3-1-405b-on-vertex-ai/vertex-notebook.ipynb +++ b/examples/vertex-ai/notebooks/deploy-llama-3-1-405b-on-vertex-ai/vertex-notebook.ipynb @@ -348,7 +348,7 @@ "id": "18dd3890-eeb1-42a5-86f1-471c06194147", "metadata": {}, "source": [ - "> [!WARN]\n", + "> [!WARNING]\n", "> [`meta-llama/Meta-Llama-3.1-405B-Instruct-FP8`](https://huggingface.co/meta-llama/Meta-Llama-3.1-405B-Instruct-FP8) deployment on Vertex AI will take \\~30 minutes to deploy, as it needs to allocate the resources on Google Cloud, and then download the weights from the Hugging Face Hub (\\~10 minutes) and load those for inference in TGI (\\~3 minutes)." ] }, diff --git a/examples/vertex-ai/notebooks/deploy-llama-vision-on-vertex-ai/vertex-notebook.ipynb b/examples/vertex-ai/notebooks/deploy-llama-vision-on-vertex-ai/vertex-notebook.ipynb index 42c40b62..cbb4877f 100644 --- a/examples/vertex-ai/notebooks/deploy-llama-vision-on-vertex-ai/vertex-notebook.ipynb +++ b/examples/vertex-ai/notebooks/deploy-llama-vision-on-vertex-ai/vertex-notebook.ipynb @@ -217,7 +217,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "> [!WARN]\n", + "> [!WARNING]\n", "> Note that the `MESSAGES_API_ENABLED` flag will only work from the TGI 2.3 DLC i.e. `us-docker.pkg.dev/deeplearning-platform-release/gcr.io/huggingface-text-generation-inference-cu124.2-3.ubuntu2204.py311`, onwards.\n", ">\n", "> For the previous releases the `MESSAGES_API_ENABLED` flag won't work as it was introduced [in the following TGI PR](https://github.com/huggingface/text-generation-inference/pull/2481), the uncompatible releases being:\n", diff --git a/examples/vertex-ai/notebooks/evaluate-llms-with-vertex-ai/vertex-notebook.ipynb b/examples/vertex-ai/notebooks/evaluate-llms-with-vertex-ai/vertex-notebook.ipynb index d40e3faf..09cc79d9 100644 --- a/examples/vertex-ai/notebooks/evaluate-llms-with-vertex-ai/vertex-notebook.ipynb +++ b/examples/vertex-ai/notebooks/evaluate-llms-with-vertex-ai/vertex-notebook.ipynb @@ -207,7 +207,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "> [!WARN]\n", + "> [!WARNING]\n", "> The Vertex AI endpoint deployment via the `deploy` method may take from 15 to 25 minutes.\n", "\n", "After the model is deployed, we can test our endpoint. We generate a helper `generate` function to send requests to the deployed model. This will be later used to send requests to the deployed model and collect the outputs for evaluation."