diff --git a/edge_streamlit/Dockerfile b/edge_streamlit/Dockerfile index bcca18a7..4397d86a 100644 --- a/edge_streamlit/Dockerfile +++ b/edge_streamlit/Dockerfile @@ -27,4 +27,4 @@ EXPOSE 8501 HEALTHCHECK CMD curl --fail http://localhost:8501/_stcore/health -ENTRYPOINT ["streamlit", "run", "app-basic.py"] +ENTRYPOINT ["streamlit", "run", "app.py"] diff --git a/edge_streamlit/Makefile b/edge_streamlit/Makefile index a12a93c7..0c03e73f 100644 --- a/edge_streamlit/Makefile +++ b/edge_streamlit/Makefile @@ -18,7 +18,7 @@ env: .PHONY: run ## Run the streamlit app run: - streamlit run app-basic.py + streamlit run app.py .PHONY: linting ## 🐍 Lint Python files to conform to the PEP 8 style guide linting: diff --git a/edge_streamlit/app-basic.py b/edge_streamlit/app-basic.py deleted file mode 100644 index 5b3656a6..00000000 --- a/edge_streamlit/app-basic.py +++ /dev/null @@ -1,106 +0,0 @@ -import json -import os -import time -import requests -import streamlit as st -from prediction_boxes import filtering_items_that_have_predictions, plot_predictions -from PIL import Image -from io import BytesIO - -# Page configuration -st.set_page_config(page_title="VIO-edge", page_icon="🔦", layout="wide") - -URL_ORCH = os.getenv("ORCHESTRATOR_URL", "http://localhost:8000/api/v1/") - -url_config = URL_ORCH + "configs" -url_active_config = URL_ORCH + "configs/active" -url_trigger = URL_ORCH + "trigger" - - -def main(): - """ - Fonction principale de l'application Streamlit. - """ - # Init variables - active_config = json.loads(requests.get(url_active_config).text) - if active_config: - st.session_state.active_config = active_config - - if "active_config" not in st.session_state: - st.session_state.active_config = None - if "trigger" not in st.session_state: - st.session_state.trigger = False - if "item_id" not in st.session_state: - st.session_state.item_id = None - - col1, col2, col3 = st.columns(3) - - configs = json.loads(requests.get(url_config).text) - - active_config_index = 0 - if st.session_state.active_config: - active_config_name = st.session_state.active_config.get('name') - active_config_index = next((index for (index, config) in enumerate(configs.values()) if config["name"] == active_config_name), 0) - option = col1.selectbox("Select an option", tuple(configs), index=active_config_index, label_visibility="collapsed") - - if col2.button("Active", use_container_width=True): - st.session_state.item_id = None - body = { - "config_name": option - } - requests.post(url=url_active_config, json=body) - st.session_state.active_config = json.loads(requests.get(url_active_config).text) - - if st.session_state.active_config: - active_config_name = st.session_state.active_config.get('name') - col2.write(f"active config: {active_config_name}") - - - if st.session_state.active_config: - if col3.button("Trigger", use_container_width=True): - st.session_state.trigger = True - response = requests.post(url_trigger) - item_id = response.json().get("item_id") - st.session_state.item_id = item_id - col3.write(f"item id: {item_id}") - - # TODO: Add camera inputs - - columns = st.columns(2) - - if st.session_state.item_id and (st.session_state.active_config is not None): - - time.sleep(5) - - url_metadata = URL_ORCH + f"items/{st.session_state.item_id}" - response = requests.get(url_metadata) - metadata = response.json() - decision = metadata["decision"] - inferences = metadata["inferences"] - - print("decision", decision) - print("inferences", inferences) - - cameras = st.session_state.active_config["cameras"] - for i, camera in enumerate(cameras): - url_binaries = URL_ORCH + f"items/{st.session_state.item_id}/binaries/{camera}" - response = requests.get(url_binaries) - image = response.content - # If metadata is not empty, we plot the predictions - if filtering_items_that_have_predictions(metadata, camera): - image = Image.open(BytesIO(image)) - image = plot_predictions(image, camera, metadata) - columns[i].image(image, channels="BGR", width=450) - if inferences.get(camera): - columns[i].markdown(inferences[camera]) - - st.markdown( - f"

{decision}

", - unsafe_allow_html=True, - ) - - # TODO: add prediction on picture - -# Exécution du script principal -if __name__ == "__main__": - main() diff --git a/edge_streamlit/app.py b/edge_streamlit/app.py index 548925ba..fd69e7ab 100644 --- a/edge_streamlit/app.py +++ b/edge_streamlit/app.py @@ -1,100 +1,117 @@ -from utils import display_camera_checkboxes, list_cameras -import streamlit as st -import requests import json -import cv2 +import os +import time +import requests +import streamlit as st +from prediction_boxes import filtering_items_that_have_predictions, plot_predictions +from PIL import Image +from io import BytesIO # Page configuration -st.set_page_config( - page_title="Mon Application Streamlit", - page_icon="🔦", - layout="wide" -) +st.set_page_config(page_title="VIO-edge", page_icon="🔦", layout="wide") -# Page content -st.title("VIO Edge") +URL_ORCH = os.getenv("ORCHESTRATOR_URL", "http://localhost:8000/api/v1/") -URL_ORCH = "http://localhost:8000/api/v1/" url_config = URL_ORCH + "configs" url_active_config = URL_ORCH + "configs/active" url_trigger = URL_ORCH + "trigger" + def main(): """ Fonction principale de l'application Streamlit. """ # Init variables - if "recording" not in st.session_state: - st.session_state.recording = False - if "selected_cameras" not in st.session_state: - st.session_state.selected_cameras = [] + active_config = json.loads(requests.get(url_active_config).text) + if active_config: + st.session_state.active_config = active_config + + if "active_config" not in st.session_state: + st.session_state.active_config = None if "trigger" not in st.session_state: st.session_state.trigger = False - if "image" not in st.session_state: - st.session_state.image = None - - col_1, col_2 = st.columns(2) + if "item_id" not in st.session_state: + st.session_state.item_id = None - configs = json.loads(requests.get(url_config).text) + col1, col2, col3 = st.columns(3) - option = col_1.selectbox("Select an option", tuple(configs), label_visibility="collapsed") + configs = json.loads(requests.get(url_config).text) - if col_2.button("Active Config"): - body = { - "config_name": option - } + active_config_index = 0 + if st.session_state.active_config: + active_config_name = st.session_state.active_config.get("name") + active_config_index = next( + ( + index + for (index, config) in enumerate(configs.values()) + if config["name"] == active_config_name + ), + 0, + ) + option = col1.selectbox( + "Select an option", + tuple(configs), + index=active_config_index, + label_visibility="collapsed", + ) + + if col2.button("Active", use_container_width=True): + st.session_state.item_id = None + body = {"config_name": option} requests.post(url=url_active_config, json=body) - active_config = None - active_config = json.loads(requests.get(url_active_config).text).get("name") - col_2.write(f"active config: {active_config}" ) - - # Sidebar parameters - st.sidebar.title("Configuration") - available_cameras = list_cameras() - selected_cameras = display_camera_checkboxes(available_cameras) - st.session_state.selected_cameras = selected_cameras - - if st.sidebar.button("Start/Stop Recording"): - st.session_state.recording = not st.session_state.recording - - if st.button("Trigger"): - st.session_state.trigger = True - response = requests.post(url_trigger) - item_id = response.json().get("item_id") - st.subheader(item_id) - - # Video capture logic - if st.session_state.recording and selected_cameras: - caps = {index: cv2.VideoCapture(index) for index in selected_cameras} - columns = st.columns(len(selected_cameras)) - frames_video = {index: columns[i].empty() for i, index in enumerate(selected_cameras)} - - while st.session_state.recording: - for index in selected_cameras: - ret, frame = caps[index].read() - if st.session_state.trigger: - st.session_state.image = frame - - if ret: - frames_video[index].image(frame, channels="BGR") - else: - frames_video[index].text(f"Camera {index} - Failed to capture video") - - if st.session_state.image is not None and st.session_state.trigger: - columns[index].image(st.session_state.image , channels="BGR") - # Convertir le tableau NumPy en bytes - # _, buffer = cv2.imencode('.jpg', st.session_state.image) - # img_byte_arr = BytesIO(buffer) - # response = requests.post(url_trigger, files={"image": ("filename", img_byte_arr, "image/jpeg")}) - # print(response.text) - # columns[index].write(response.text) - - st.session_state.trigger = False - - for cap in caps.values(): - cap.release() - - + st.session_state.active_config = json.loads( + requests.get(url_active_config).text + ) + + if st.session_state.active_config: + active_config_name = st.session_state.active_config.get("name") + col2.write(f"active config: {active_config_name}") + + if st.session_state.active_config: + if col3.button("Trigger", use_container_width=True): + st.session_state.trigger = True + response = requests.post(url_trigger) + item_id = response.json().get("item_id") + st.session_state.item_id = item_id + col3.write(f"item id: {item_id}") + + # TODO: Add camera inputs + + columns = st.columns(2) + + if st.session_state.item_id and (st.session_state.active_config is not None): + time.sleep(5) + + url_metadata = URL_ORCH + f"items/{st.session_state.item_id}" + response = requests.get(url_metadata) + metadata = response.json() + decision = metadata["decision"] + inferences = metadata["inferences"] + + print("decision", decision) + print("inferences", inferences) + + cameras = st.session_state.active_config["cameras"] + for i, camera in enumerate(cameras): + url_binaries = ( + URL_ORCH + f"items/{st.session_state.item_id}/binaries/{camera}" + ) + response = requests.get(url_binaries) + image = response.content + # If metadata is not empty, we plot the predictions + if filtering_items_that_have_predictions(metadata, camera): + image = Image.open(BytesIO(image)) + image = plot_predictions(image, camera, metadata) + columns[i].image(image, channels="BGR", width=450) + if inferences.get(camera): + columns[i].markdown(inferences[camera]) + + st.markdown( + f"

{decision}

", + unsafe_allow_html=True, + ) + + # TODO: add prediction on picture # Exécution du script principal diff --git a/edge_streamlit/utils.py b/edge_streamlit/utils.py deleted file mode 100644 index a5398ce7..00000000 --- a/edge_streamlit/utils.py +++ /dev/null @@ -1,44 +0,0 @@ -import logging -from typing import List - -import cv2 -import streamlit as st - -logger = logging.getLogger(__name__) -logger.addHandler(logging.StreamHandler()) -logger.setLevel(logging.DEBUG) - - -def list_cameras() -> List[str]: - """ - Liste toutes les caméras disponibles sur le système. - """ - logger.info("Listing available cameras") - index = 0 - available_cameras = [] - while True: - cap = cv2.VideoCapture(index) - if cap.isOpened(): - available_cameras.append(f"Cam {index}") - else: - break - cap.release() - index += 1 - logger.debug(f"{len(available_cameras)} cameras found") - return available_cameras - - -def display_camera_checkboxes(available_cameras: List[str]) -> List[int]: - """ - Affiche les caméras disponibles sous forme de cases à cocher. - Retourne les indices des caméras sélectionnées. - """ - selected_cameras = [] - if len(available_cameras) == 0: - st.warning("No camera detected.") - else: - for i, camera_name in enumerate(available_cameras): - if st.sidebar.checkbox(camera_name, key=f"camera_{i}"): - selected_cameras.append(i) - logger.debug(f"Selected cameras are {selected_cameras}") - return selected_cameras