diff --git a/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/detection_custom.py b/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/detection_custom.py
index 567c150b6e59ce6cbd50973c54aec9d1e705deb1..1339cdf2c1f3b899fc910d2cbd5f177fc1b0cf13 100644
--- a/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/detection_custom.py	
+++ b/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/detection_custom.py	
@@ -20,8 +20,8 @@ image_path   = "./IMAGES/cone.jpg"
 video_path   = "./IMAGES/test.mp4"
 
 yolo = Load_Yolo_model()
-detect_image(yolo, image_path, "./IMAGES/cone_detect.jpg", input_size=YOLO_INPUT_SIZE, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0))
+#detect_image(yolo, image_path, "./IMAGES/cone_detect.jpg", input_size=YOLO_INPUT_SIZE, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0))
 #detect_video(yolo, video_path, './IMAGES/detected.mp4', input_size=YOLO_INPUT_SIZE, show=False, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0))
-#detect_realtime(yolo, '', input_size=YOLO_INPUT_SIZE, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255, 0, 0))
+detect_realtime(yolo, '', input_size=YOLO_INPUT_SIZE, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255, 0, 0))
 
 #detect_video_realtime_mp(video_path, "Output.mp4", input_size=YOLO_INPUT_SIZE, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0), realtime=False)
diff --git a/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/yolov3/MCL.py b/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/yolov3/MCL.py
index 0e645aa97daf33a47aee163c1c86853b4b136833..0cd3bc3a4e2776fd76c441aefe94d9b0699d4831 100644
--- a/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/yolov3/MCL.py	
+++ b/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/yolov3/MCL.py	
@@ -40,7 +40,7 @@ h_reel = 0.234  #Hauteur d'un plot
 y_0 = 1500      #Milieu de l'image
 
 dist_roue = 0.25    #Distance entre les roues avant et les roues arrieres
-dt = 0.1            #Frequence de calcul
+dt = 1/3.3            #Frequence de calcul
 
 
 # class position:
diff --git a/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/yolov3/car_interface.py b/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/yolov3/car_interface.py
new file mode 100644
index 0000000000000000000000000000000000000000..7b099a6efae1482c78490fbb784abc2a0628f809
--- /dev/null
+++ b/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/yolov3/car_interface.py	
@@ -0,0 +1,23 @@
+import serial #Importation de la bibliothèque « pySerial »
+import time
+
+def get_ser():
+    ser = serial.Serial(port="COM3", baudrate=115200, timeout=0.1,write_timeout=0.2) #Création du port lié au COM6 a une vitesse de 115200 bauds et un timout d’une seconde
+    #time.sleep(3)
+    return ser
+
+def start_ser(ser):
+    ser.close() #Cloture du port pour le cas ou il serait déjà ouvert ailleurs
+    ser.open() #Ouverture du port
+
+def close_ser(ser):
+    ser.close()
+
+def send_infos(ser, a,b,c):
+    n = str(a)+' '+str(b)+' '+str(c)
+    #print("input: "+str(n))
+    ser.write((str(n)+'\n').encode("utf-8"))
+    cc=str(ser.readline())
+    #print("output: "+cc[2:][:-5])
+    #print("Success")
+    return cc[2:][:-5]
\ No newline at end of file
diff --git a/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/yolov3/utils.py b/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/yolov3/utils.py
index a66669f29c215a553f39d734b5f00105bd307538..7363448fa6c152822aa5d716f65386d1130c0924 100644
--- a/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/yolov3/utils.py	
+++ b/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/yolov3/utils.py	
@@ -18,9 +18,13 @@ import tensorflow as tf
 from yolov3.configs import *
 from yolov3.yolov4 import *
 from tensorflow.python.saved_model import tag_constants
-from track_2_generator import create_track
-from pid_controller import PidController
+
+import matplotlib.pyplot as plt
+
+from yolov3.track_2_generator import create_track
+from yolov3.pid_controller import PidController
 from shapely.geometry import Polygon, Point, LineString
+from yolov3.car_interface import get_ser, start_ser, close_ser, send_infos
 
 from MCL import get_position
 
@@ -34,9 +38,6 @@ class liste_pos:
     def update_liste(self,new_liste):
         self.liste = new_liste
 
-liste_pos_car = liste_pos((278/225, 200/225,0), 50)
-
-
 def load_yolo_weights(model, weights_file):
     tf.keras.backend.clear_session() # used to reset layer names
     # load Darknet original weights to TensorFlow model
@@ -145,7 +146,7 @@ def image_preprocess(image, target_size, gt_boxes=None):
         return image_paded, gt_boxes
 
 
-def draw_bbox(image, bboxes, CLASSES=YOLO_COCO_CLASSES, show_label=True, show_confidence = True, Text_colors=(255,255,0), rectangle_colors='', tracking=False):   
+def draw_bbox(image, bboxes,liste_pos_car, CLASSES=YOLO_COCO_CLASSES, show_label=True, show_confidence = True, Text_colors=(255,255,0), rectangle_colors='', tracking=False, ser=None, previous_command=(0,0)):   
     NUM_CLASS = read_class_names(CLASSES)
     num_classes = len(NUM_CLASS)
     image_h, image_w, _ = image.shape
@@ -171,7 +172,7 @@ def draw_bbox(image, bboxes, CLASSES=YOLO_COCO_CLASSES, show_label=True, show_co
         # put object rectangle
         cv2.rectangle(image, (x1, y1), (x2, y2), bbox_color, bbox_thick*2)
         
-        print((x1, y1),(x2, y2))
+        #print((x1, y1),(x2, y2))
 
         """Determinate position"""
         
@@ -201,28 +202,43 @@ def draw_bbox(image, bboxes, CLASSES=YOLO_COCO_CLASSES, show_label=True, show_co
             
     
     #Determination de la position de la voiture
-    pos_car, liste_pos_car_temp = get_position(bboxes, commande, liste_pos_car.liste)
+    #speed with 10= 3.8/40
+    pos_car, liste_pos_car_temp = get_position(bboxes, (previous_command[0]*3.7/170, previous_command[1]/200*35/360*2*3.1415), liste_pos_car.liste)
     liste_pos_car.update_liste(liste_pos_car_temp)
 
     """Algorithme de conduite"""
     """x_car, y_car"""
+    x_car, y_car = pos_car[0], pos_car[1]
+    print(f"x: {x_car}, y: {y_car}")
+    x_car, y_car = (x_car-1.23855)/2.4+1, (y_car-0.889)/2.4+0.5
+    print(f"recalé x: {x_car}, y: {y_car}")
     
     """loading trajectory"""#TODO: load it only once
     track_points = create_track(0.5,0,0, (0,0))[:-1]+[(1,0.5)]
     track = Polygon(track_points)
     
+    plt.plot(x_car, y_car,'+')
+    plt.plot([i for i,j in track_points], [j for i,j in track_points])
+    plt.show()
+
     """calculate distance"""
     front_center = Point(np.array([x_car,y_car]))
     error = track.distance(front_center)
     
     """calculate PID response"""
-    pid = PidController(1,1,1) #P, I, D
+    pid = PidController(50,0.005,5) #P, I, D
     commande = pid.get_control(error)
+    commande -= 30
+    
+    print(f"command: {commande}")
     
     """sending info to car"""
+    try:
+        print(f"poten: {send_infos(ser,10,commande,0)}")
+    except:
+        pass
     
-
-    return image
+    return image, commande, liste_pos_car
 
 
 def bboxes_iou(boxes1, boxes2):
@@ -545,7 +561,7 @@ def detect_video(Yolo, video_path, output_path, input_size=416, show=False, CLAS
         image = cv2.putText(image, "Time: {:.1f}FPS".format(fps), (0, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
         # CreateXMLfile("XML_Detections", str(int(time.time())), original_image, bboxes, read_class_names(CLASSES))
         
-        print("Time: {:.2f}ms, Detection FPS: {:.1f}, total FPS: {:.1f}".format(ms, fps, fps2))
+        #print("Time: {:.2f}ms, Detection FPS: {:.1f}, total FPS: {:.1f}".format(ms, fps, fps2))
         if output_path != '': out.write(image)
         if show:
             cv2.imshow('output', image)
@@ -567,7 +583,12 @@ def detect_realtime(Yolo, output_path, input_size=416, show=False, CLASSES=YOLO_
         fps = int(vid.get(cv2.CAP_PROP_FPS))
         codec = cv2.VideoWriter_fourcc(*'XVID')
         out = cv2.VideoWriter(output_path, codec, fps, (width, height)) # output_path must be .mp4
-    
+
+    liste_pos_car = liste_pos((278/225, 200/225,3.1415/2), 50)
+
+    ser = get_ser()
+    start_ser(ser)
+    command=0
     
     while True:
         ret, frame = vid.read()
@@ -612,7 +633,7 @@ def detect_realtime(Yolo, output_path, input_size=416, show=False, CLASSES=YOLO_
         
         print("Time: {:.2f}ms, {:.1f} FPS".format(ms, fps))
 
-        frame = draw_bbox(original_frame, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
+        frame, command, liste_pos_car = draw_bbox(original_frame, bboxes,liste_pos_car, CLASSES=CLASSES, rectangle_colors=rectangle_colors, ser=ser, previous_command=(10, command))
         # CreateXMLfile("XML_Detections", str(int(time.time())), original_frame, bboxes, read_class_names(CLASSES))
         image = cv2.putText(frame, "Time: {:.1f}FPS".format(fps), (0, 30),
                           cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
diff --git a/PAR 152/carte_cones.py b/PAR 152/carte_cones.py
index b3bc1512939e17d085cca5f1421d6dc37e4b786a..34a4efc75a7e8cdb232dc44c991494a2f77f9bae 100644
--- a/PAR 152/carte_cones.py	
+++ b/PAR 152/carte_cones.py	
@@ -82,7 +82,7 @@ for pt in coord_ext:
 plt.figure(1)
 plt.plot(coord_x_ext, coord_y_ext,'+')
 plt.plot(coord_x_int, coord_y_int,'+')
-plt.arrow(x,y,0.1*cos(theta),0.1*sin(theta), width=0.01)
+#plt.arrow(x,y,0.1*cos(theta),0.1*sin(theta), width=0.01)
 
 tab = motion_update((0.1,-3.14/6), (x,y,theta))
 x2,y2,theta2 = tab[0]