diff --git a/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/detection_custom.py b/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/detection_custom.py
index d0b0931b7d092797e28ae5094e21e1e2c3eba1a5..85cb50e7f38a5f4520422f199e900e484fa09459 100644
--- a/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/detection_custom.py	
+++ b/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/detection_custom.py	
@@ -20,9 +20,9 @@ image_path   = "./Calibration_cam/2m.jpg"
 video_path   = "./IMAGES/test.mp4"
 
 yolo = Load_Yolo_model()
-_, boxes = detect_image(yolo, image_path, "./Calibration_cam/0.5_detect.jpg", input_size=YOLO_INPUT_SIZE, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0))
+#_, boxes = detect_image(yolo, image_path, "./Calibration_cam/0.5_detect.jpg", input_size=YOLO_INPUT_SIZE, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0))
 #detect_video(yolo, video_path, './IMAGES/detected.mp4', input_size=YOLO_INPUT_SIZE, show=False, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0))
-#detect_realtime(yolo, '', input_size=YOLO_INPUT_SIZE, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255, 0, 0))
+detect_realtime(yolo, '', input_size=YOLO_INPUT_SIZE, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255, 0, 0))
 
 #detect_video_realtime_mp(video_path, "Output.mp4", input_size=YOLO_INPUT_SIZE, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0), realtime=False)
 
diff --git a/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/yolov3/utils.py b/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/yolov3/utils.py
index afdd82e4044601404d014daddd222e8023be8109..aa5ab489d11dc81604a19143c9abea30c2471f0a 100644
--- a/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/yolov3/utils.py	
+++ b/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/yolov3/utils.py	
@@ -210,7 +210,7 @@ def draw_bbox(image, bboxes,liste_pos_car=liste_pos((0,0,0),1), CLASSES=YOLO_COC
     """x_car, y_car"""
     x_car, y_car = pos_car[0], pos_car[1]
     print(f"x: {x_car}, y: {y_car}")
-    x_car, y_car = (x_car-1.23855)/2.4+1, (y_car-0.889)/2.4+0.5
+    x_car, y_car = (x_car-278/225)/2.4+1, (y_car-200/225)/2.4+0.5
     print(f"recalé x: {x_car}, y: {y_car}")
     
     """loading trajectory"""#TODO: load it only once
@@ -219,7 +219,7 @@ def draw_bbox(image, bboxes,liste_pos_car=liste_pos((0,0,0),1), CLASSES=YOLO_COC
     
     plt.plot(x_car, y_car,'+')
     for x,y in zip(cone_x, cone_y):
-        x, y= (x-1.23855)/2.4+1, (y-0.889)/2.4+0.5
+        x, y= (x--278/225)/2.4+1, (y-200/225)/2.4+0.5
         plt.plot(x,y,'*')
     plt.plot([i for i,j in track_points], [j for i,j in track_points])
     plt.show()
@@ -589,7 +589,7 @@ def detect_realtime(Yolo, output_path, input_size=416, show=False, CLASSES=YOLO_
         codec = cv2.VideoWriter_fourcc(*'XVID')
         out = cv2.VideoWriter(output_path, codec, fps, (width, height)) # output_path must be .mp4
 
-    liste_pos_car = liste_pos((1, 0.5,3.1415/2), 50)
+    liste_pos_car = liste_pos((278/225, 200/225,3.1415/2), 50)
 
     ser = get_ser()
     start_ser(ser)
@@ -598,7 +598,8 @@ def detect_realtime(Yolo, output_path, input_size=416, show=False, CLASSES=YOLO_
     
     while True:
         ret, frame = vid.read()
-
+        frame = np.rot90(frame, k=3, axes=(0, 1))
+        
         try:
             original_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
             original_frame = cv2.cvtColor(original_frame, cv2.COLOR_BGR2RGB)