Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Initialize edge streamlit page #59

Merged
merged 12 commits into from
Jan 13, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions deployment/edge/ansible/files/docker-compose.template.yml
Original file line number Diff line number Diff line change
Expand Up @@ -39,3 +39,15 @@ services:
dockerfile: Dockerfile
ports:
- 8080:80

edge_streamlit:
container_name: edge_streamlit
build:
context: edge_streamlit
dockerfile: Dockerfile
# devices:
# - /dev/video0:/dev/video0
# - /dev/video2:/dev/video2
ports:
- 8502:8501
profiles: [edge]
2 changes: 1 addition & 1 deletion docs/edge_orchestrator.md
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ Here's a template of a config file.
}
},
"item_rule": {
"name": "min_threshold_KO_rule", #the item rule name
"name": "min_threshold_ko_rule", #the item rule name
"parameters": {
"threshold": 1
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
}
},
"item_rule": {
"name": "min_threshold_KO_rule",
"name": "min_threshold_ko_rule",
"parameters": {
"threshold": 1
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
{
"name": "yolo_coco",
"cameras": {
"camera_id3": {
"type": "fake",
"source": "people_dataset",
"position": "back",
"exposition": 100,
"models_graph": {
"model_id4": {
"name": "yolo_coco_nano",
"depends_on": []
}
},
"camera_rule": {
"name": "expected_label_rule",
"parameters": {
"expected_label": ["person"]
}
}
},
"camera_id4": {
"type": "fake",
"source": "people_dataset",
"position": "back",
"exposition": 100,
"models_graph": {
"model_id4": {
"name": "yolo_coco_nano",
"depends_on": []
}
},
"camera_rule": {
"name": "expected_label_rule",
"parameters": {
"expected_label": ["person"]
}
}
}
},
"item_rule": {
"name": "min_threshold_ko_rule",
"parameters": {
"threshold": 1
}
}
}

29 changes: 29 additions & 0 deletions edge_streamlit/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
FROM python:3.9-slim

# let print debug messages log on the console
ENV PYTHONUNBUFFERED=1
ENV PYTHONIOENCODING=UTF-8

ENV ORCHESTRATOR_URL=http://edge_orchestrator:8000/api/v1/

WORKDIR /app

RUN apt-get update && apt-get install -y \
build-essential \
curl \
software-properties-common \
git \
&& rm -rf /var/lib/apt/lists/*

COPY requirements.txt .

RUN pip install -r requirements.txt

COPY app.py .
COPY prediction_boxes.py .

EXPOSE 8501

HEALTHCHECK CMD curl --fail http://localhost:8501/_stcore/health

ENTRYPOINT ["streamlit", "run", "app.py"]
27 changes: 27 additions & 0 deletions edge_streamlit/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
SHELL := /bin/bash
.SHELLFLAGS = -ec
.ONESHELL:
.SILENT:

.PHONY: help
help:
echo "❓ Use \`make <target>\`"
grep -E '^\.PHONY: [a-zA-Z0-9_-]+ .*?## .*$$' $(MAKEFILE_LIST) | \
awk 'BEGIN {FS = "(: |##)"}; {printf "\033[36m%-30s\033[0m %s\n", $$2, $$3}'

.PHONY: env ## Create a virtual environment
env:
python3 -m venv env
. env/bin/activate
pip install --upgrade pip
pip install -r requirements.txt

.PHONY: run ## Run the streamlit app
run:
streamlit run app.py

.PHONY: linting ## 🐍 Lint Python files to conform to the PEP 8 style guide
linting:
black .
isort . --gitignore
autoflake -i --remove-all-unused-imports -r --ignore-init-module-imports . --exclude .venv
110 changes: 110 additions & 0 deletions edge_streamlit/app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
import json
import os
import time
import requests
import streamlit as st
from prediction_boxes import filtering_items_that_have_predictions, plot_predictions
from PIL import Image
from io import BytesIO

# Page configuration
LouisonR-octo marked this conversation as resolved.
Show resolved Hide resolved
st.set_page_config(page_title="VIO-edge", page_icon="🔦", layout="wide")

URL_ORCH = os.getenv("ORCHESTRATOR_URL", "http://localhost:8000/api/v1/")

url_config = URL_ORCH + "configs"
url_active_config = URL_ORCH + "configs/active"
url_trigger = URL_ORCH + "trigger"


def main():
active_config = json.loads(requests.get(url_active_config).text)
if active_config:
st.session_state.active_config = active_config

if "active_config" not in st.session_state:
st.session_state.active_config = None
if "trigger" not in st.session_state:
st.session_state.trigger = False
if "item_id" not in st.session_state:
st.session_state.item_id = None

col1, col2, col3 = st.columns(3)

configs = json.loads(requests.get(url_config).text)

active_config_index = 0
if st.session_state.active_config:
active_config_name = st.session_state.active_config.get("name")
active_config_index = next(
(
index
for (index, config) in enumerate(configs.values())
if config["name"] == active_config_name
),
0,
)
option = col1.selectbox(
"Select an option",
tuple(configs),
index=active_config_index,
label_visibility="collapsed",
)

if col2.button("Active", use_container_width=True):
st.session_state.item_id = None
body = {"config_name": option}
requests.post(url=url_active_config, json=body)
st.session_state.active_config = json.loads(
requests.get(url_active_config).text
)

if st.session_state.active_config:
active_config_name = st.session_state.active_config.get("name")
col2.write(f"active config: {active_config_name}")

if st.session_state.active_config:
if col3.button("Trigger", use_container_width=True):
st.session_state.trigger = True
response = requests.post(url_trigger)
item_id = response.json().get("item_id")
st.session_state.item_id = item_id
col3.write(f"item id: {item_id}")

columns = st.columns(2)

if st.session_state.item_id and (st.session_state.active_config is not None):
time.sleep(5)

url_metadata = URL_ORCH + f"items/{st.session_state.item_id}"
response = requests.get(url_metadata)
metadata = response.json()
decision = metadata["decision"]
inferences = metadata["inferences"]

print("decision", decision)
print("inferences", inferences)

cameras = st.session_state.active_config["cameras"]
for i, camera in enumerate(cameras):
url_binaries = (
URL_ORCH + f"items/{st.session_state.item_id}/binaries/{camera}"
)
response = requests.get(url_binaries)
image = response.content
# If metadata is not empty, we plot the predictions
if filtering_items_that_have_predictions(metadata, camera):
image = Image.open(BytesIO(image))
image = plot_predictions(image, camera, metadata)
columns[i].image(image, channels="BGR", width=450)
if inferences.get(camera):
columns[i].markdown(inferences[camera])

st.markdown(
f"<h1 style='text-align: center; color: #e67e22;'>{decision}</h1>",
unsafe_allow_html=True,
)


if __name__ == "__main__":
main()
64 changes: 64 additions & 0 deletions edge_streamlit/prediction_boxes.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
from PIL import Image, ImageDraw, ImageFont


def filtering_items_that_have_predictions(metadata: dict, camera_id: str):
if metadata == {} or metadata is None:
return False
elif metadata["inferences"] == {}:
return False
for model_results in metadata["inferences"][camera_id].values():
if model_results == "NO_DECISION" or model_results == {}:
return False
for prediction in model_results.values():
if "location" not in prediction:
return False
return True


def plot_predictions(img: Image, camera_id: str, metadata: dict):
if metadata["inferences"] == {}:
return img
camera_prediction_metadata = metadata["inferences"][camera_id]
models = camera_prediction_metadata.keys()
for model in models:
detected_objects = camera_prediction_metadata[model].values()
for detected_object in detected_objects:
bbox = detected_object["location"]
label = detected_object["label"]
img = draw_bbox(img, bbox, label)

return img


def draw_bbox(img, bbox, label):
draw = ImageDraw.Draw(img)
width, height = img.size

# Convert normalized coordinates to pixel values
top_left_x = int(bbox[0] * width)
top_left_y = int(bbox[1] * height)
bottom_right_x = int(bbox[2] * width)
bottom_right_y = int(bbox[3] * height)

# Draw the bounding box
draw.rectangle(
[top_left_x, top_left_y, bottom_right_x, bottom_right_y], outline="red", width=2
)

# Load a font
font = ImageFont.load_default()

# Calculate text size and position
text_size = draw.textbbox((0, 0), label, font=font)[2:]
text_x = top_left_x
text_y = top_left_y - text_size[1] if top_left_y - text_size[1] > 0 else top_left_y

# Draw the label background
draw.rectangle(
[text_x, text_y, text_x + text_size[0], text_y + text_size[1]], fill="red"
)

# Draw the label text
draw.text((text_x, text_y), label, fill="white", font=font)

return img
3 changes: 3 additions & 0 deletions edge_streamlit/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
opencv-python-headless==4.7.0.72
streamlit==1.41.0
numpy==1.24.1
Loading