ayad33 commited on
Commit
14cec2f
·
verified ·
1 Parent(s): bb672f7

Upload 8 files

Browse files
Files changed (3) hide show
  1. back +13 -0
  2. main.py +65 -54
  3. model.py +20 -20
back ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.11-slim-bullseye
2
+
3
+ WORKDIR /backend
4
+
5
+ COPY backend/requirements.txt .
6
+
7
+ RUN apt-get -y update && pip install --upgrade pip && pip install --no-cache-dir -r requirements.txt && apt-get -y install libgl1 libglib2.0-0
8
+
9
+ COPY backend .
10
+
11
+ # ENTRYPOINT ["uvicorn", "main:app", "--port", "5000", "--host", "0.0.0.0", "--reload", "--reload-dir", "/backend"]
12
+ ENTRYPOINT ["python", "main.py", "-f"]
13
+ CMD ["false"]
main.py CHANGED
@@ -7,16 +7,27 @@ from PIL import Image
7
  from io import BytesIO
8
  from model import fetch_model
9
  from ultralytics import YOLO
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
  # Initialize FastAPI app
12
  app = FastAPI()
13
 
14
- # Load the YOLO model
15
- if not os.path.exists(os.path.join('app_temp', 'model', 'best.pt')):
16
- model = fetch_model() # Load YOLO model
17
- else:
18
- model = YOLO(os.path.join('app_temp', 'model', 'best.pt'))
19
-
20
  def preprocess_image(image_bytes):
21
  """Convert image bytes to a PIL Image."""
22
  image = Image.open(BytesIO(image_bytes)).convert("RGB")
@@ -45,68 +56,68 @@ async def predict_image(file: UploadFile = File(...)):
45
  # Return the image file
46
  return StreamingResponse(io_buf, media_type="image/jpeg")
47
 
48
- @app.post("/process/video")
49
- async def predict_video(file: UploadFile = File(...)):
50
- temp_input = "temp_input.mp4"
51
- temp_output = "temp_output.mp4"
52
 
53
- with open(temp_input, "wb") as f:
54
- f.write(await file.read())
55
 
56
- # Open the video
57
- cap = cv2.VideoCapture(temp_input)
58
 
59
- # Get video properties
60
- width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
61
- height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
62
- fps = cap.get(cv2.CAP_PROP_FPS)
63
 
64
- # Create VideoWriter object
65
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
66
- out = cv2.VideoWriter(temp_output, fourcc, fps, (width, height))
67
 
68
- # Process each frame
69
- while cap.isOpened():
70
- ret, frame = cap.read()
71
- if not ret:
72
- break
73
 
74
- # Convert to RGB for YOLO
75
- rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
76
 
77
- # Run YOLO prediction
78
- results = model(rgb_frame)[0]
79
 
80
- # Get frame with detection boxes
81
- result_frame = results.plot()
82
 
83
- # Convert back to BGR for OpenCV
84
- result_frame_bgr = cv2.cvtColor(result_frame, cv2.COLOR_RGB2BGR)
85
 
86
- # Write to output video
87
- out.write(result_frame_bgr)
88
 
89
- # Release resources
90
- cap.release()
91
- out.release()
92
 
93
- # Return the video file as response
94
- return FileResponse(temp_output, media_type="video/mp4", filename="result.mp4")
95
 
96
- # @app.post("/process/video")
97
- # async def predict_video(file: UploadFile = File(...)):
98
- # """Predict objects in a video file."""
99
- # video_bytes = await file.read()
100
- # with open("video.mp4", "wb") as f:
101
- # f.write(video_bytes)
102
 
103
- # # Perform inference on the video
104
- # # model.predict(video_path, save = True, device = 'cuda', project = os.path.join('app_temp', 'results'), name = 'avi', verbose = False, exist_ok = True)
105
- # model.predict("video.mp4", device = 'cuda', verbose = False)
106
 
107
- # # Return the video file
108
- # return FileResponse(os.path.join('app_temp', 'result', "vid.avi"), filename="result.mp4")
109
 
110
 
111
- # if __name__ == "__main__":
112
- # uvicorn.run("main:app", port=5000, reload=True)
 
7
  from io import BytesIO
8
  from model import fetch_model
9
  from ultralytics import YOLO
10
+ import argparse
11
+
12
+ parser = argparse.ArgumentParser(description='Smart Vision')
13
+ parser.add_argument('-f','--fetch', help='Do you want to fetch the latest model from the registry? [true, false]', required=False, default=False)
14
+ args = parser.parse_args()
15
+
16
+ if args.fetch == 'true' or not os.path.isfile('best.pt'):
17
+ fetch_model()
18
+ model = YOLO('best.pt')
19
+
20
+
21
+ # # Load the YOLO model
22
+ # if not os.path.exists(os.path.join('app_temp', 'model', 'best.pt')):
23
+ # model = fetch_model() # Load YOLO model
24
+ # else:
25
+ # model = YOLO(os.path.join('app_temp', 'model', 'best.pt'))
26
+
27
 
28
  # Initialize FastAPI app
29
  app = FastAPI()
30
 
 
 
 
 
 
 
31
  def preprocess_image(image_bytes):
32
  """Convert image bytes to a PIL Image."""
33
  image = Image.open(BytesIO(image_bytes)).convert("RGB")
 
56
  # Return the image file
57
  return StreamingResponse(io_buf, media_type="image/jpeg")
58
 
59
+ # @app.post("/process/video")
60
+ # async def predict_video(file: UploadFile = File(...)):
61
+ # temp_input = "temp_input.mp4"
62
+ # temp_output = "temp_output.mp4"
63
 
64
+ # with open(temp_input, "wb") as f:
65
+ # f.write(await file.read())
66
 
67
+ # # Open the video
68
+ # cap = cv2.VideoCapture(temp_input)
69
 
70
+ # # Get video properties
71
+ # width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
72
+ # height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
73
+ # fps = cap.get(cv2.CAP_PROP_FPS)
74
 
75
+ # # Create VideoWriter object
76
+ # fourcc = cv2.VideoWriter_fourcc(*'mp4v')
77
+ # out = cv2.VideoWriter(temp_output, fourcc, fps, (width, height))
78
 
79
+ # # Process each frame
80
+ # while cap.isOpened():
81
+ # ret, frame = cap.read()
82
+ # if not ret:
83
+ # break
84
 
85
+ # # Convert to RGB for YOLO
86
+ # rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
87
 
88
+ # # Run YOLO prediction
89
+ # results = model(rgb_frame)[0]
90
 
91
+ # # Get frame with detection boxes
92
+ # result_frame = results.plot()
93
 
94
+ # # Convert back to BGR for OpenCV
95
+ # result_frame_bgr = cv2.cvtColor(result_frame, cv2.COLOR_RGB2BGR)
96
 
97
+ # # Write to output video
98
+ # out.write(result_frame_bgr)
99
 
100
+ # # Release resources
101
+ # cap.release()
102
+ # out.release()
103
 
104
+ # # Return the video file as response
105
+ # return FileResponse(temp_output, media_type="video/mp4", filename="result.mp4")
106
 
107
+ @app.post("/process/video")
108
+ async def predict_video(file: UploadFile = File(...)):
109
+ """Predict objects in a video file."""
110
+ video_bytes = await file.read()
111
+ with open("video.mp4", "wb") as f:
112
+ f.write(video_bytes)
113
 
114
+ # Perform inference on the video
115
+ model.predict('video.mp4', save = True, device = 'cuda', project = 'results', name = 'avi', verbose = False, exist_ok = True)
116
+ # model.predict("video.mp4", device = 'cuda', verbose = False)
117
 
118
+ # Return the video file
119
+ return FileResponse("results/avi/video.avi", filename="result.avi", media_type="video/avi")
120
 
121
 
122
+ if __name__ == "__main__":
123
+ uvicorn.run("main:app", port=5000, reload=True, host='0.0.0.0')
model.py CHANGED
@@ -1,8 +1,8 @@
1
  import mlflow
2
  from utils import read_yaml, mkdirs
3
- from os.path import join, exists, dirname
4
  from os import environ
5
- from ultralytics import YOLO
6
  from dotenv import load_dotenv
7
  import dagshub
8
 
@@ -12,24 +12,24 @@ dagshub.init(repo_owner='3bdullah3yad', repo_name='SmartVision', mlflow=True)
12
 
13
  # model_path = join('app_temp', 'model', 'best.pt')
14
 
15
- def fetch_model(repull=False):
16
- if not exists('best.pt') or repull:
17
- # mkdirs([model_path])
18
- params = read_yaml("params.yaml")
19
- cfg = read_yaml("config.yaml")
20
-
21
- if params.inference.exp_name == 'from_cfg_train':
22
- experiment = dict(mlflow.get_experiment_by_name(cfg.train.exp_name))
23
- else:
24
- experiment = dict(mlflow.get_experiment_by_name(params.inference.exp_name))
25
 
26
- run_id = params.inference.run_id
27
- if run_id == 'latest':
28
- experiment_id = experiment['experiment_id']
29
- df = mlflow.search_runs([experiment_id], order_by=["end_time DESC"])
30
- run_id = df['run_id'][0]
31
 
32
- # mlflow.artifacts.download_artifacts(f'runs:/{run_id}/weights/best.pt', dst_path = dirname(model_path))
33
- mlflow.artifacts.download_artifacts(f'runs:/{run_id}/weights/best.pt', dst_path = dirname('best.pt'))
 
34
 
35
- return YOLO('best.pt')
 
1
  import mlflow
2
  from utils import read_yaml, mkdirs
3
+ # from os.path import join, exists, dirname
4
  from os import environ
5
+ # from ultralytics import YOLO
6
  from dotenv import load_dotenv
7
  import dagshub
8
 
 
12
 
13
  # model_path = join('app_temp', 'model', 'best.pt')
14
 
15
+ def fetch_model():
16
+ # mkdirs([model_path])
17
+ params = read_yaml("params.yaml")
18
+ cfg = read_yaml("config.yaml")
19
+
20
+ if params.inference.exp_name == 'from_cfg_train':
21
+ experiment = dict(mlflow.get_experiment_by_name(cfg.train.exp_name))
22
+ else:
23
+ experiment = dict(mlflow.get_experiment_by_name(params.inference.exp_name))
 
24
 
25
+ run_id = params.inference.run_id
26
+ if run_id == 'latest':
27
+ experiment_id = experiment['experiment_id']
28
+ df = mlflow.search_runs([experiment_id], order_by=["end_time DESC"])
29
+ run_id = df['run_id'][0]
30
 
31
+ # mlflow.artifacts.download_artifacts(f'runs:/{run_id}/weights/best.pt', dst_path = dirname(model_path))
32
+ mlflow.artifacts.download_artifacts(f'runs:/{run_id}/weights/best.pt', dst_path = '.')
33
+ print(f"\n\nModel downloaded from MLFlow, Run ID: {run_id}\n\n")
34
 
35
+ # return YOLO('best.pt')