Skip to content

Commit

Permalink
Revise backend to use llama-cpp-python instead of transformers
Browse files Browse the repository at this point in the history
  • Loading branch information
dylanebert committed Dec 3, 2024
1 parent e6500f8 commit 593bd6a
Show file tree
Hide file tree
Showing 22 changed files with 184 additions and 149 deletions.
21 changes: 15 additions & 6 deletions .github/workflows/package-release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,12 @@ jobs:
strategy:
matrix:
platform:
- requirements: win-linux-cuda.txt
- filename: cpu
os: windows-latest
filename: windows-cuda
- requirements: mac-mps-cpu.txt
os: macos-14
filename: macos-arm
requirements: cpu.txt
- filename: cuda
os: windows-latest
requirements: cuda.txt
runs-on: ${{ matrix.platform.os }}
steps:
- name: Checkout repository
Expand All @@ -28,9 +28,18 @@ jobs:
with:
python-version: '3.11'

- name: Upgrade pip
shell: bash
run: "python -m pip install --upgrade setuptools pip"

- name: Install dependencies
shell: bash
run: "python -m pip install -r requirements/${{ matrix.platform.requirements }} --no-cache-dir --target .python_dependencies"
run: "python -m pip install -r requirements/${{ matrix.platform.requirements }} --only-binary=llama_cpp_python --no-cache-dir --target .python_dependencies"
working-directory: meshgen

- name: Bundle models
shell: bash
run: "python scripts/bundle_models.py"
working-directory: meshgen

- name: Archive release
Expand Down
24 changes: 24 additions & 0 deletions .github/workflows/stale.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
name: Close inactive issues
on:
schedule:
- cron: "30 1 * * *"
workflow_dispatch:

jobs:
close-issues:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- uses: actions/stale@v5
with:
days-before-issue-stale: 60
days-before-issue-close: 7
stale-issue-label: "stale"
stale-issue-message: "This issue is stale because it has been open for 60 days with no activity."
close-issue-message: "This issue was closed because it has been inactive for 7 days since being marked as stale."
days-before-pr-stale: -1
days-before-pr-close: -1
repo-token: ${{ secrets.GITHUB_TOKEN }}
only-labels: bug
3 changes: 3 additions & 0 deletions .models/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
*
!.gitignore
!manifest.json
8 changes: 8 additions & 0 deletions .models/manifest.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
{
"required_models": [
{
"repo_id": "bartowski/LLaMA-Mesh-GGUF",
"filename": "LLaMA-Mesh-Q4_K_M.gguf"
}
]
}
13 changes: 0 additions & 13 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,6 @@ This initial release contains a minimal integration of [LLaMa-Mesh](https://gith

Go to the [Latest Release](https://github.com/huggingface/meshgen/releases/latest) page for a download link and installation instructions.

# Setup
- In `MeshGen` addon preferences, click `Download Required Models`
- If set up correctly, addon preferences should display the message `Ready to generate. Press 'N' -> MeshGen to get started.'

# Usage

- Press `N` -> `MeshGen` (or `View` -> `Sidebar` -> Select the `MeshGen` tab)
Expand All @@ -23,15 +19,6 @@ Go to the [Latest Release](https://github.com/huggingface/meshgen/releases/lates

# Troubleshooting

- ModuleNotFoundError when loading generator:
- Go to Scripting and give the following command in the Python Interactive Console to get the path to your modules folder
```
>>> bpy.utils.user_resource("SCRIPTS", path="modules")
```
- Install missing modules by specifying target using pip. For example installing PyTorch in Windows with cuda 12.1:
```
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121 --target=<path-to-your-modules-folder>
```
- Find errors in the console:
- Windows: In Blender, go to `Window` -> `Toggle System Console`
- Mac/Linux: Launch Blender from the terminal
Expand Down
2 changes: 1 addition & 1 deletion __init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"name": "MeshGen",
"description": "A Blender addon for generating meshes with AI",
"author": "Hugging Face",
"version": (0, 2, 3),
"version": (0, 3, 0),
"blender": (4, 1, 0),
"category": "Mesh",
"support": "COMMUNITY",
Expand Down
81 changes: 26 additions & 55 deletions generator/generator.py
Original file line number Diff line number Diff line change
@@ -1,67 +1,52 @@
import json
import os
import sys
import traceback
from threading import Lock


from ..operators.install_dependencies import load_dependencies
from ..utils import absolute_path


class Generator:
_instance = None
_lock = Lock()

def __new__(cls):
if not cls._instance:
with cls._lock:
if not cls._instance:
cls._instance = super(Generator, cls).__new__(cls)
cls._instance.initialized = False
if not cls._instance:
cls._instance = super(Generator, cls).__new__(cls)
cls._instance.initialized = False
return cls._instance

def __init__(self):
if self.initialized:
return
self.required_models = ["Zhengyi/LLaMa-Mesh"]
import json
manifest_path = absolute_path(".models/manifest.json")
with open(manifest_path, "r") as f:
manifest = json.load(f)
self.required_models = manifest["required_models"]
self.downloaded_models = []
self.dependencies_installed = False
self.dependencies_loaded = False
self.device = "cpu"
self.tokenizer = None
self.pipeline = None
self.terminators = []
self.llm = None
self.initialized = True

def _process_model_dir(self, dir_name):
from huggingface_hub.constants import HF_HUB_CACHE

model_dir = os.path.join(HF_HUB_CACHE, dir_name)
if not os.path.isdir(model_dir):
return None
model_name = os.path.basename(model_dir).replace("models--", "").replace("--", "/")
return model_name

def _list_downloaded_models(self):
from huggingface_hub.constants import HF_HUB_CACHE

models = []

if not os.path.exists(HF_HUB_CACHE):
models_dir = absolute_path(".models")

if not os.path.exists(models_dir):
self.downloaded_models = models
return

for dir_name in os.listdir(HF_HUB_CACHE):
model_name = self._process_model_dir(dir_name)
if model_name:
models.append(model_name)
for filename in os.listdir(models_dir):
if filename.endswith(".gguf"):
models.append(filename)

self.downloaded_models = models

def _ensure_dependencies(self):
if not self.dependencies_installed:
self.dependencies_installed = len(os.listdir(absolute_path(".python_dependencies"))) > 2
self.dependencies_installed = len(os.listdir(absolute_path(".python_dependencies"))) > 2

if self.dependencies_installed and not self.dependencies_loaded:
load_dependencies()
Expand All @@ -73,39 +58,25 @@ def has_dependencies(self):

def has_required_models(self):
self._list_downloaded_models()
return all(model in self.downloaded_models for model in self.required_models)
return all(model["filename"] in self.downloaded_models for model in self.required_models)

def is_generator_loaded(self):
return self.pipeline is not None
return self.llm is not None

def load_generator(self):
print("Loading generator...")

self._ensure_dependencies()

try:
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer

if torch.cuda.is_available():
self.device = "cuda"
print("Using CUDA")
elif torch.backends.mps.is_available():
self.device = "mps"
print("Using MPS")
else:
print("Running on CPU")

model_path = "Zhengyi/LLaMa-Mesh"
self.tokenizer = AutoTokenizer.from_pretrained(model_path)
self.pipeline = AutoModelForCausalLM.from_pretrained(
model_path,
torch_dtype=torch.bfloat16,
).to(self.device)
self.terminators = [
self.tokenizer.eos_token_id,
self.tokenizer.convert_tokens_to_ids("<|eot_id|>")
]
import llama_cpp

self.llm = llama_cpp.Llama(
model_path=absolute_path(".models/LLaMA-Mesh-Q4_K_M.gguf"),
n_gpu_layers=-1,
seed=1337,
n_ctx=4096,
)

print("Finished loading generator.")

Expand Down
15 changes: 10 additions & 5 deletions operators/download_models.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
import bpy
import os
import sys

from ..generator.generator import Generator
from ..utils import open_console
from ..generator import Generator
from ..utils import absolute_path, open_console


class MESHGEN_OT_DownloadRequiredModels(bpy.types.Operator):
Expand All @@ -14,17 +15,21 @@ def execute(self, context):
if sys.platform == "win32":
open_console()

from huggingface_hub import snapshot_download
from huggingface_hub import hf_hub_download

generator = Generator.instance()
models_to_download = [model for model in generator.required_models if model not in generator.downloaded_models]

if not models_to_download:
print("All required models are already downloaded.")
return

models_dir = absolute_path(".models")
if not os.path.exists(models_dir):
os.makedirs(models_dir)

for model in models_to_download:
print(f"Downloading model: {model}")
snapshot_download(model)
print(f"Downloading model: {model['repo_id']}:{model['filename']}")
hf_hub_download(model["repo_id"], filename=model["filename"], local_dir=models_dir)
generator._list_downloaded_models()
return {"FINISHED"}
Loading

0 comments on commit 593bd6a

Please sign in to comment.