Skip to content

Commit

Permalink
update versions
Browse files Browse the repository at this point in the history
  • Loading branch information
ProKil committed Jan 7, 2024
1 parent bffa389 commit 84a8ef1
Show file tree
Hide file tree
Showing 3 changed files with 3 additions and 3 deletions.
2 changes: 1 addition & 1 deletion lmlib/serve/lm_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def get_gpu_memory(max_gpus: Union[int, None] = None) -> list[float]:
with torch.cuda.device(gpu_id):
device = torch.cuda.current_device()
gpu_properties = torch.cuda.get_device_properties(device)
total_memory = gpu_properties.total_memory / (1024**3)
total_memory = gpu_properties.total_memory / (1024**3) # type: ignore
allocated_memory = torch.cuda.memory_allocated() / (1024**3)
available_memory = total_memory - allocated_memory
gpu_memory.append(available_memory)
Expand Down
2 changes: 1 addition & 1 deletion poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ absl-py = "^2.0.0"
names = "^0.3.0"
together = "^0.2.4"
pydantic = "1.10.12"
mypy = "^1.6.0"
mypy = "^1.8.0"
beartype = "^0.14.0"
torch = "^2.1"
transformers = "^4.34.0"
Expand Down

0 comments on commit 84a8ef1

Please sign in to comment.