From a169611be7c46a13110d67ac6415bef14fbe18d4 Mon Sep 17 00:00:00 2001 From: Vaibhav Srivastav Date: Tue, 9 Apr 2024 14:46:05 +0200 Subject: [PATCH] Created using Colaboratory --- CodeGemma_colab.ipynb | 23 +---------------------- 1 file changed, 1 insertion(+), 22 deletions(-) diff --git a/CodeGemma_colab.ipynb b/CodeGemma_colab.ipynb index 83a89e5..cbd0205 100644 --- a/CodeGemma_colab.ipynb +++ b/CodeGemma_colab.ipynb @@ -3858,27 +3858,6 @@ "id": "k95vA7tEz3XO" } }, - { - "cell_type": "code", - "source": [ - "# pip install accelerate\n", - "from transformers import GemmaTokenizer, AutoModelForCausalLM\n", - "\n", - "tokenizer = GemmaTokenizer.from_pretrained(\"google/codegemma-7b-it\")\n", - "model = AutoModelForCausalLM.from_pretrained(\"google/codegemma-7b-it\")\n", - "\n", - "input_text = \"Write a Python function to calculate the nth fibonacci number.\\n\"\n", - "input_ids = tokenizer(input_text, return_tensors=\"pt\")\n", - "\n", - "outputs = model.generate(**input_ids)\n", - "print(tokenizer.decode(outputs[0]))" - ], - "metadata": { - "id": "m3u4T5PSfhuE" - }, - "execution_count": null, - "outputs": [] - }, { "cell_type": "code", "execution_count": null, @@ -4173,7 +4152,7 @@ "from transformers import GemmaTokenizer, AutoModelForCausalLM\n", "import torch\n", "\n", - "model_id = \"gg-hf/codegemma-2b\"\n", + "model_id = \"google/codegemma-2b\"\n", "\n", "tokenizer = GemmaTokenizer.from_pretrained(model_id)\n", "model = AutoModelForCausalLM.from_pretrained(\n",