Spaces:
Sleeping
Sleeping
Upload 8 files
Browse files
back
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.11-slim-bullseye
|
| 2 |
+
|
| 3 |
+
WORKDIR /backend
|
| 4 |
+
|
| 5 |
+
COPY backend/requirements.txt .
|
| 6 |
+
|
| 7 |
+
RUN apt-get -y update && pip install --upgrade pip && pip install --no-cache-dir -r requirements.txt && apt-get -y install libgl1 libglib2.0-0
|
| 8 |
+
|
| 9 |
+
COPY backend .
|
| 10 |
+
|
| 11 |
+
# ENTRYPOINT ["uvicorn", "main:app", "--port", "5000", "--host", "0.0.0.0", "--reload", "--reload-dir", "/backend"]
|
| 12 |
+
ENTRYPOINT ["python", "main.py", "-f"]
|
| 13 |
+
CMD ["false"]
|
main.py
CHANGED
|
@@ -7,16 +7,27 @@ from PIL import Image
|
|
| 7 |
from io import BytesIO
|
| 8 |
from model import fetch_model
|
| 9 |
from ultralytics import YOLO
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
# Initialize FastAPI app
|
| 12 |
app = FastAPI()
|
| 13 |
|
| 14 |
-
# Load the YOLO model
|
| 15 |
-
if not os.path.exists(os.path.join('app_temp', 'model', 'best.pt')):
|
| 16 |
-
model = fetch_model() # Load YOLO model
|
| 17 |
-
else:
|
| 18 |
-
model = YOLO(os.path.join('app_temp', 'model', 'best.pt'))
|
| 19 |
-
|
| 20 |
def preprocess_image(image_bytes):
|
| 21 |
"""Convert image bytes to a PIL Image."""
|
| 22 |
image = Image.open(BytesIO(image_bytes)).convert("RGB")
|
|
@@ -45,68 +56,68 @@ async def predict_image(file: UploadFile = File(...)):
|
|
| 45 |
# Return the image file
|
| 46 |
return StreamingResponse(io_buf, media_type="image/jpeg")
|
| 47 |
|
| 48 |
-
@app.post("/process/video")
|
| 49 |
-
async def predict_video(file: UploadFile = File(...)):
|
| 50 |
-
|
| 51 |
-
|
| 52 |
|
| 53 |
-
|
| 54 |
-
|
| 55 |
|
| 56 |
-
|
| 57 |
-
|
| 58 |
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
|
| 74 |
-
|
| 75 |
-
|
| 76 |
|
| 77 |
-
|
| 78 |
-
|
| 79 |
|
| 80 |
-
|
| 81 |
-
|
| 82 |
|
| 83 |
-
|
| 84 |
-
|
| 85 |
|
| 86 |
-
|
| 87 |
-
|
| 88 |
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
|
| 93 |
-
|
| 94 |
-
|
| 95 |
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
|
| 103 |
-
#
|
| 104 |
-
|
| 105 |
-
#
|
| 106 |
|
| 107 |
-
#
|
| 108 |
-
|
| 109 |
|
| 110 |
|
| 111 |
-
|
| 112 |
-
|
|
|
|
| 7 |
from io import BytesIO
|
| 8 |
from model import fetch_model
|
| 9 |
from ultralytics import YOLO
|
| 10 |
+
import argparse
|
| 11 |
+
|
| 12 |
+
parser = argparse.ArgumentParser(description='Smart Vision')
|
| 13 |
+
parser.add_argument('-f','--fetch', help='Do you want to fetch the latest model from the registry? [true, false]', required=False, default=False)
|
| 14 |
+
args = parser.parse_args()
|
| 15 |
+
|
| 16 |
+
if args.fetch == 'true' or not os.path.isfile('best.pt'):
|
| 17 |
+
fetch_model()
|
| 18 |
+
model = YOLO('best.pt')
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
# # Load the YOLO model
|
| 22 |
+
# if not os.path.exists(os.path.join('app_temp', 'model', 'best.pt')):
|
| 23 |
+
# model = fetch_model() # Load YOLO model
|
| 24 |
+
# else:
|
| 25 |
+
# model = YOLO(os.path.join('app_temp', 'model', 'best.pt'))
|
| 26 |
+
|
| 27 |
|
| 28 |
# Initialize FastAPI app
|
| 29 |
app = FastAPI()
|
| 30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
def preprocess_image(image_bytes):
|
| 32 |
"""Convert image bytes to a PIL Image."""
|
| 33 |
image = Image.open(BytesIO(image_bytes)).convert("RGB")
|
|
|
|
| 56 |
# Return the image file
|
| 57 |
return StreamingResponse(io_buf, media_type="image/jpeg")
|
| 58 |
|
| 59 |
+
# @app.post("/process/video")
|
| 60 |
+
# async def predict_video(file: UploadFile = File(...)):
|
| 61 |
+
# temp_input = "temp_input.mp4"
|
| 62 |
+
# temp_output = "temp_output.mp4"
|
| 63 |
|
| 64 |
+
# with open(temp_input, "wb") as f:
|
| 65 |
+
# f.write(await file.read())
|
| 66 |
|
| 67 |
+
# # Open the video
|
| 68 |
+
# cap = cv2.VideoCapture(temp_input)
|
| 69 |
|
| 70 |
+
# # Get video properties
|
| 71 |
+
# width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
| 72 |
+
# height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
| 73 |
+
# fps = cap.get(cv2.CAP_PROP_FPS)
|
| 74 |
|
| 75 |
+
# # Create VideoWriter object
|
| 76 |
+
# fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
| 77 |
+
# out = cv2.VideoWriter(temp_output, fourcc, fps, (width, height))
|
| 78 |
|
| 79 |
+
# # Process each frame
|
| 80 |
+
# while cap.isOpened():
|
| 81 |
+
# ret, frame = cap.read()
|
| 82 |
+
# if not ret:
|
| 83 |
+
# break
|
| 84 |
|
| 85 |
+
# # Convert to RGB for YOLO
|
| 86 |
+
# rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 87 |
|
| 88 |
+
# # Run YOLO prediction
|
| 89 |
+
# results = model(rgb_frame)[0]
|
| 90 |
|
| 91 |
+
# # Get frame with detection boxes
|
| 92 |
+
# result_frame = results.plot()
|
| 93 |
|
| 94 |
+
# # Convert back to BGR for OpenCV
|
| 95 |
+
# result_frame_bgr = cv2.cvtColor(result_frame, cv2.COLOR_RGB2BGR)
|
| 96 |
|
| 97 |
+
# # Write to output video
|
| 98 |
+
# out.write(result_frame_bgr)
|
| 99 |
|
| 100 |
+
# # Release resources
|
| 101 |
+
# cap.release()
|
| 102 |
+
# out.release()
|
| 103 |
|
| 104 |
+
# # Return the video file as response
|
| 105 |
+
# return FileResponse(temp_output, media_type="video/mp4", filename="result.mp4")
|
| 106 |
|
| 107 |
+
@app.post("/process/video")
|
| 108 |
+
async def predict_video(file: UploadFile = File(...)):
|
| 109 |
+
"""Predict objects in a video file."""
|
| 110 |
+
video_bytes = await file.read()
|
| 111 |
+
with open("video.mp4", "wb") as f:
|
| 112 |
+
f.write(video_bytes)
|
| 113 |
|
| 114 |
+
# Perform inference on the video
|
| 115 |
+
model.predict('video.mp4', save = True, device = 'cuda', project = 'results', name = 'avi', verbose = False, exist_ok = True)
|
| 116 |
+
# model.predict("video.mp4", device = 'cuda', verbose = False)
|
| 117 |
|
| 118 |
+
# Return the video file
|
| 119 |
+
return FileResponse("results/avi/video.avi", filename="result.avi", media_type="video/avi")
|
| 120 |
|
| 121 |
|
| 122 |
+
if __name__ == "__main__":
|
| 123 |
+
uvicorn.run("main:app", port=5000, reload=True, host='0.0.0.0')
|
model.py
CHANGED
|
@@ -1,8 +1,8 @@
|
|
| 1 |
import mlflow
|
| 2 |
from utils import read_yaml, mkdirs
|
| 3 |
-
from os.path import join, exists, dirname
|
| 4 |
from os import environ
|
| 5 |
-
from ultralytics import YOLO
|
| 6 |
from dotenv import load_dotenv
|
| 7 |
import dagshub
|
| 8 |
|
|
@@ -12,24 +12,24 @@ dagshub.init(repo_owner='3bdullah3yad', repo_name='SmartVision', mlflow=True)
|
|
| 12 |
|
| 13 |
# model_path = join('app_temp', 'model', 'best.pt')
|
| 14 |
|
| 15 |
-
def fetch_model(
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
experiment = dict(mlflow.get_experiment_by_name(params.inference.exp_name))
|
| 25 |
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
|
| 32 |
-
|
| 33 |
-
|
|
|
|
| 34 |
|
| 35 |
-
return YOLO('best.pt')
|
|
|
|
| 1 |
import mlflow
|
| 2 |
from utils import read_yaml, mkdirs
|
| 3 |
+
# from os.path import join, exists, dirname
|
| 4 |
from os import environ
|
| 5 |
+
# from ultralytics import YOLO
|
| 6 |
from dotenv import load_dotenv
|
| 7 |
import dagshub
|
| 8 |
|
|
|
|
| 12 |
|
| 13 |
# model_path = join('app_temp', 'model', 'best.pt')
|
| 14 |
|
| 15 |
+
def fetch_model():
|
| 16 |
+
# mkdirs([model_path])
|
| 17 |
+
params = read_yaml("params.yaml")
|
| 18 |
+
cfg = read_yaml("config.yaml")
|
| 19 |
+
|
| 20 |
+
if params.inference.exp_name == 'from_cfg_train':
|
| 21 |
+
experiment = dict(mlflow.get_experiment_by_name(cfg.train.exp_name))
|
| 22 |
+
else:
|
| 23 |
+
experiment = dict(mlflow.get_experiment_by_name(params.inference.exp_name))
|
|
|
|
| 24 |
|
| 25 |
+
run_id = params.inference.run_id
|
| 26 |
+
if run_id == 'latest':
|
| 27 |
+
experiment_id = experiment['experiment_id']
|
| 28 |
+
df = mlflow.search_runs([experiment_id], order_by=["end_time DESC"])
|
| 29 |
+
run_id = df['run_id'][0]
|
| 30 |
|
| 31 |
+
# mlflow.artifacts.download_artifacts(f'runs:/{run_id}/weights/best.pt', dst_path = dirname(model_path))
|
| 32 |
+
mlflow.artifacts.download_artifacts(f'runs:/{run_id}/weights/best.pt', dst_path = '.')
|
| 33 |
+
print(f"\n\nModel downloaded from MLFlow, Run ID: {run_id}\n\n")
|
| 34 |
|
| 35 |
+
# return YOLO('best.pt')
|