diff --git a/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/IMAGES/cone_1.jpg b/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/IMAGES/cone_1.jpg
deleted file mode 100644
index a240c9eeff112a5221e50166a21713154d02c8fc..0000000000000000000000000000000000000000
Binary files a/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/IMAGES/cone_1.jpg and /dev/null differ
diff --git a/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/IMAGES/product-image-1178993187_480x480.webp b/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/IMAGES/product-image-1178993187_480x480.webp
deleted file mode 100644
index 407747b3a4273db5a60a9f8f40fd7b1141f33fa3..0000000000000000000000000000000000000000
Binary files a/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/IMAGES/product-image-1178993187_480x480.webp and /dev/null differ
diff --git a/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/MCL.py b/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/MCL.py
index 4838cc52ca89390ea7c9f9e00d1e35aa4414baf2..0e645aa97daf33a47aee163c1c86853b4b136833 100644
--- a/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/MCL.py	
+++ b/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/MCL.py	
@@ -1,17 +1,17 @@
-import os
-os.environ['CUDA_VISIBLE_DEVICES'] = '0'
-import cv2
-import numpy as np
-import tensorflow as tf
-from yolov3.utils import detect_image, detect_realtime, detect_video, Load_Yolo_model, detect_video_realtime_mp
-from yolov3.configs import *
-import matplotlib.pyplot as plt
+#import os
+#os.environ['CUDA_VISIBLE_DEVICES'] = '0'
+#import cv2
+#import tensorflow as tf
+#from yolov3.utils import detect_image, detect_realtime, detect_video, Load_Yolo_model, detect_video_realtime_mp
+#from yolov3.configs import *
+#import matplotlib.pyplot as plt
 
+import numpy as np
 import random as rd
-from math import cos, sin, tan, pi
+from math import cos, sin, tan
 
 
-yolo = Load_Yolo_model()
+#yolo = Load_Yolo_model()
 
 ### Carte ###
 
@@ -28,6 +28,7 @@ coord_int = [(i/diametre , j/diametre) for i,j in zip(pixel_x_int, pixel_y_int)]
 ### Paramètres ###
 sigma_position = 0.02
 sigma_direction = 0.05
+seuil_cone = 0.6
 
 
 dep_x, dep_y, dep_theta = 1.314, 1.162, 3.14/3.6
@@ -38,8 +39,8 @@ F = 3423        #Focale camera
 h_reel = 0.234  #Hauteur d'un plot
 y_0 = 1500      #Milieu de l'image
 
-dist_roue = 0.2    #Distance entre les roues avant et les roues arrieres
-dt = 1            #Frequence de calcul
+dist_roue = 0.25    #Distance entre les roues avant et les roues arrieres
+dt = 0.1            #Frequence de calcul
 
 
 # class position:
@@ -156,7 +157,7 @@ def sensor_update(observation, position):
         obs_x.append(i[0])
         obs_y.append(i[1])
             
-    return distance_Chamfer(cones_vu_x, cones_vu_y, obs_x, obs_y)
+    return 1/distance_Chamfer(cones_vu_x, cones_vu_y, obs_x, obs_y)
 
 
 
@@ -194,19 +195,7 @@ def low_variance_resampling(X,W):
     return X_t
 
 
-def get_position(u_t,pos):
-    
-    
-    ### Récupération de la photo
-    
-    image_path   = "./IMAGES/IMG_2345.JPEG"
-    video_path   = "./IMAGES/test.mp4"
-    
-
-    boxes = detect_image(yolo, image_path, "./IMAGES/111.jpg", input_size=YOLO_INPUT_SIZE, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0))
-    #detect_video(yolo, video_path, './IMAGES/detected.mp4', input_size=YOLO_INPUT_SIZE, show=False, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0))
-    #detect_realtime(yolo, '', input_size=YOLO_INPUT_SIZE, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255, 0, 0))
-    #detect_video_realtime_mp(video_path, "Output.mp4", input_size=YOLO_INPUT_SIZE, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0), realtime=False)
+def get_position(boxes,commande,pos):
     
     
     # Positionnement des plots à partir des boites
@@ -214,9 +203,10 @@ def get_position(u_t,pos):
     liste_x =[]
     liste_y =[]
     for i in boxes:
-        x,y = boite2coord(i[0],i[1],i[2],i[3])
-        liste_x.append(x)
-        liste_y.append(y)
+        if i[4] >= seuil_cone:
+            x,y = boite2coord(i[0],i[1],i[2],i[3])
+            liste_x.append(x)
+            liste_y.append(y)
         
     z_t = []
     for i in range(len(liste_x)):
@@ -224,7 +214,7 @@ def get_position(u_t,pos):
     
     
     #Positionnement
-    pos, W = particle_filter(pos, u_t, z_t)
+    pos, W = particle_filter(pos, commande, z_t)
     
     pos_calc = [0,0,0]
     for i in range(len(pos)):
@@ -235,7 +225,7 @@ def get_position(u_t,pos):
     return pos_calc, pos
     
 
-
+'''
 if __name__ == "__main__":
     # pos = [(1.314, 1.162, 3.14/3.6) for i in range(10)]
     # #pos = [(2.5*rd.random(), 1.5*rd.random(), 6.28*rd.random()) for i in range(50)]
@@ -305,4 +295,6 @@ if __name__ == "__main__":
     
     
     plt.legend()
-    plt.show()
\ No newline at end of file
+    plt.show()
+    
+    '''
\ No newline at end of file
diff --git a/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/yolov3/MCL.py b/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/yolov3/MCL.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e645aa97daf33a47aee163c1c86853b4b136833
--- /dev/null
+++ b/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/yolov3/MCL.py	
@@ -0,0 +1,300 @@
+#import os
+#os.environ['CUDA_VISIBLE_DEVICES'] = '0'
+#import cv2
+#import tensorflow as tf
+#from yolov3.utils import detect_image, detect_realtime, detect_video, Load_Yolo_model, detect_video_realtime_mp
+#from yolov3.configs import *
+#import matplotlib.pyplot as plt
+
+import numpy as np
+import random as rd
+from math import cos, sin, tan
+
+
+#yolo = Load_Yolo_model()
+
+### Carte ###
+
+pixel_x_ext, pixel_y_ext = [242, 220, 165, 110, 63, 33, 22, 34, 63, 110, 165, 220, 243, 310, 334, 388, 443, 490, 521, 531, 520, 489, 443, 388, 333, 310], [76, 64, 52, 64, 95, 141, 196, 252, 298, 330, 340, 328, 318, 316, 328, 339, 329, 298, 251, 196, 142, 95, 64, 53, 64, 77]
+pixel_x_int, pixel_y_int = [245, 238, 222, 196, 166, 134, 108, 91, 85, 90, 109, 134, 165, 196, 222, 239, 308, 314, 332, 358, 388, 419, 445, 462, 468, 462, 445, 419, 388, 359, 332, 314], [201, 167, 140, 123, 116, 123, 140, 165, 195, 228, 253, 270, 277, 270, 253, 227, 200, 226, 253, 270, 277, 270, 253, 228, 197, 166, 140, 122, 117, 123, 140, 166]
+diametre = 225
+centre_x, centre_y = 278, 200
+coord_x_ext, coord_y_ext = [i/diametre for i in pixel_x_ext], [i/diametre for i in pixel_y_ext]
+coord_x_int, coord_y_int = [i/diametre for i in pixel_x_int], [i/diametre for i in pixel_y_int]
+
+coord_ext = [(i/diametre , j/diametre) for i,j in zip(pixel_x_ext, pixel_y_ext)]
+coord_int = [(i/diametre , j/diametre) for i,j in zip(pixel_x_int, pixel_y_int)]
+
+### Paramètres ###
+sigma_position = 0.02
+sigma_direction = 0.05
+seuil_cone = 0.6
+
+
+dep_x, dep_y, dep_theta = 1.314, 1.162, 3.14/3.6
+nb_particule = 30 
+pos = [[dep_x, dep_y, dep_theta] for i in range(nb_particule)]
+
+F = 3423        #Focale camera
+h_reel = 0.234  #Hauteur d'un plot
+y_0 = 1500      #Milieu de l'image
+
+dist_roue = 0.25    #Distance entre les roues avant et les roues arrieres
+dt = 0.1            #Frequence de calcul
+
+
+# class position:
+    
+#     def __init__(self, pos_x, pos_y, nb_particule):
+#         self.__pos = [pos_x, pos_y]*nb_particule
+
+
+def normalisation(W):
+    a = sum(W)
+    return [w/a for w in W]
+
+def distance(x_1, y_1, x_2, y_2):
+    return np.sqrt((x_1-x_2)**2 + (y_1-y_2)**2)
+
+def boite2coord(x1,y1,x2,y2):
+    
+    d = F*h_reel/abs(y1-y2)
+    y_r = -((x1+x2)/2 - y_0)*d/F
+    sin_theta= y_r/d
+    x_r = d*(1-sin_theta**2)**0.5
+    
+    return x_r,y_r
+
+def rotation(point, centre, angle):
+    
+    x1,y1 = point
+    x2,y2 = centre
+    
+    new_x = (x1-x2)*cos(angle) - (y1-y2)*sin(angle) + x2
+    new_y = (x1-x2)*sin(angle) + (y1-y2)*cos(angle) + y2
+    
+    return new_x, new_y
+
+def distance_Chamfer(A_x, A_y, B_x, B_y):
+    m = len(A_x)
+    n = len(B_x)
+    
+    if m==0 or n==0:
+        return 0.0001
+    
+    res = 0
+    
+    tab = [[distance(A_x[i], A_y[i], B_x[j], B_y[j]) for i in range(m)] for j in range(n)]
+    tab = np.array(tab)
+    for i in range(m):
+        res += np.min(tab[:,i])
+    for j in range(n):
+        res += np.min(tab[j,:])
+    
+    return res
+
+
+def motion_update(commande, position):
+    vitesse, direction = commande
+    x,y,theta = position
+    
+    if direction ==0:
+        new_x = x + dt*vitesse*cos(theta)
+        new_y = y + dt*vitesse*sin(theta)
+        new_theta = theta
+        
+    else:
+        R = dist_roue/tan(direction)
+        angle_rotation = dt*vitesse/R
+        
+        if direction>0:
+            centre_rotation_x = x + R*sin(theta)
+            centre_rotation_y = y - R*cos(theta)
+        else:
+            centre_rotation_x = x - R*sin(theta)
+            centre_rotation_y = y + R*cos(theta)
+        
+        new_x, new_y = rotation((x,y),(centre_rotation_x, centre_rotation_y), angle_rotation)
+        new_theta = theta + angle_rotation
+    
+    new_x += rd.gauss(0, sigma_position)
+    new_y += rd.gauss(0, sigma_position)
+    new_theta += rd.gauss(0, sigma_direction)
+        
+    return (new_x, new_y, new_theta)
+
+
+
+def sensor_update(observation, position):
+    x,y,theta = position
+    vision_x = []
+    vision_y = []
+    vision_x_ext = []
+    vision_y_ext = []
+    for pt in coord_int:
+        vision_x.append((pt[0]-x)*cos(theta) + (pt[1]-y)*sin(theta))
+        vision_y.append(-(pt[0]-x)*sin(theta) + (pt[1]-y)*cos(theta))
+    
+    for pt in coord_ext:
+        vision_x_ext.append((pt[0]-x)*cos(theta) + (pt[1]-y)*sin(theta))
+        vision_y_ext.append(-(pt[0]-x)*sin(theta) + (pt[1]-y)*cos(theta))
+        
+    cones_vu_x = []
+    cones_vu_y = []
+    for i in range(len(vision_x)):
+        if vision_x[i]>0 and abs(vision_y[i])<vision_x[i]:
+            cones_vu_x.append(vision_x[i])
+            cones_vu_y.append(vision_y[i])
+
+    for i in range(len(vision_x_ext)):
+        if vision_x_ext[i]>0 and abs(vision_y_ext[i])<vision_x_ext[i]:
+            cones_vu_x.append(vision_x_ext[i])
+            cones_vu_y.append(vision_y_ext[i])
+            
+    obs_x = []
+    obs_y = []
+    for i in observation:
+        obs_x.append(i[0])
+        obs_y.append(i[1])
+            
+    return 1/distance_Chamfer(cones_vu_x, cones_vu_y, obs_x, obs_y)
+
+
+
+
+
+
+def particle_filter(pos,u_t,z_t): #Position, commande, observation
+    X_t_barre, X_t = [], []
+    M = len(pos)
+    for m in range(M):
+        x = motion_update(u_t, pos[m])
+        w = sensor_update(z_t, x)
+        X_t_barre.append((x,w))
+    
+    X = [X_t_barre[i][0] for i in range(M)]
+    W = [X_t_barre[i][1] for i in range(M)]
+    W = normalisation(W)
+    X_t = low_variance_resampling(X, W)
+    
+    return X_t,W
+
+
+def low_variance_resampling(X,W):
+    X_t = []
+    J = len(X)
+    r = rd.random()/J
+    c = W[0]
+    i=0
+    for j in range(J):
+        U = r + (j-1)/J
+        while U > c:
+            i += 1
+            c += W[i]
+        X_t.append(X[i])
+    return X_t
+
+
+def get_position(boxes,commande,pos):
+    
+    
+    # Positionnement des plots à partir des boites
+    
+    liste_x =[]
+    liste_y =[]
+    for i in boxes:
+        if i[4] >= seuil_cone:
+            x,y = boite2coord(i[0],i[1],i[2],i[3])
+            liste_x.append(x)
+            liste_y.append(y)
+        
+    z_t = []
+    for i in range(len(liste_x)):
+        z_t.append((liste_x[i], liste_y[i]))
+    
+    
+    #Positionnement
+    pos, W = particle_filter(pos, commande, z_t)
+    
+    pos_calc = [0,0,0]
+    for i in range(len(pos)):
+        pos_calc[0] += pos[i][0]*W[i]
+        pos_calc[1] += pos[i][1]*W[i]
+        pos_calc[2] += pos[i][2]*W[i]
+    
+    return pos_calc, pos
+    
+
+'''
+if __name__ == "__main__":
+    # pos = [(1.314, 1.162, 3.14/3.6) for i in range(10)]
+    # #pos = [(2.5*rd.random(), 1.5*rd.random(), 6.28*rd.random()) for i in range(50)]
+    
+    # liste_x = [0.37409758380473157,
+    #              0.6517064494114153,
+    #              0.23060853761333963,
+    #              0.5278583908503303,
+    #              0.14161368355256793,
+    #              0.5134652832573952]
+    
+    # liste_y = [0.14021924676581576,
+    #              -0.3119493901540909,
+    #              -0.3464004029844368,
+    #              0.01390277627039628,
+    #              -0.2754514724880131,
+    #              -0.5902545559074325]
+    
+    # observation = []
+    # for i in range(len(liste_x)):
+    #     observation.append((liste_x[i], liste_y[i]))
+    
+    commande = (0.138841, -pi/6)
+    
+    # a,W = particle_filter(pos, commande, observation)
+    #print(a,W)
+    
+    
+    
+    
+    
+    
+    pos_initiale = (1.314, 1.162, 3.14/3.6)
+    pos_finale = (1.4, 1.271, 3.14/5.5)
+    
+    pos = [pos_initiale for i in range(30)]
+    
+    pos_calc,pos = get_position(commande,pos)
+    
+    
+    plt.figure(1)
+    plt.plot(pos_initiale[0], pos_initiale[1], 'o', label='Position initale')
+    plt.arrow(pos_initiale[0], pos_initiale[1], 0.07*cos(pos_initiale[2]), 0.07*sin(pos_initiale[2]))
+    plt.plot(pos_finale[0], pos_finale[1], 'o', label='Position finale')
+    plt.arrow(pos_finale[0], pos_finale[1], 0.07*cos(pos_finale[2]), 0.07*sin(pos_finale[2]))
+    
+    plt.plot(coord_x_ext, coord_y_ext,'+')
+    plt.plot(coord_x_int, coord_y_int,'+')
+    
+    pos_x = []
+    pos_y = []
+    for k in range(len(pos)):
+        i = pos[k]
+        pos_x.append(i[0])
+        pos_y.append(i[1])
+    
+    plt.plot(pos_x,pos_y,'x', label='Positions possibles')
+    
+    # pos_calc = [0,0,0]
+    # for i in range(len(a)):
+    #     pos_calc[0] += a[i][0]*W[i]
+    #     pos_calc[1] += a[i][1]*W[i]
+    #     pos_calc[2] += a[i][2]*W[i]
+    
+    plt.plot(pos_calc[0], pos_calc[1], 'o', label='Moyenne pondérée des positions calculées')
+    plt.arrow(pos_calc[0], pos_calc[1], 0.07*cos(pos_calc[2]), 0.07*sin(pos_calc[2]))
+    
+    
+    plt.legend()
+    plt.show()
+    
+    '''
\ No newline at end of file
diff --git a/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/yolov3/utils.py b/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/yolov3/utils.py
index 95874aa4c256f3abc6ab7041c6aac1f36d744d45..a66669f29c215a553f39d734b5f00105bd307538 100644
--- a/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/yolov3/utils.py	
+++ b/PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/yolov3/utils.py	
@@ -22,6 +22,21 @@ from track_2_generator import create_track
 from pid_controller import PidController
 from shapely.geometry import Polygon, Point, LineString
 
+from MCL import get_position
+
+
+
+class liste_pos:
+    
+    def __init__(self, pos_depart, nb_particle):
+        self.liste = [(pos_depart) for i in range(nb_particle)]
+        
+    def update_liste(self,new_liste):
+        self.liste = new_liste
+
+liste_pos_car = liste_pos((278/225, 200/225,0), 50)
+
+
 def load_yolo_weights(model, weights_file):
     tf.keras.backend.clear_session() # used to reset layer names
     # load Darknet original weights to TensorFlow model
@@ -184,7 +199,11 @@ def draw_bbox(image, bboxes, CLASSES=YOLO_COCO_CLASSES, show_label=True, show_co
             cv2.putText(image, label, (x1, y1-4), cv2.FONT_HERSHEY_COMPLEX_SMALL,
                         fontScale, Text_colors, bbox_thick, lineType=cv2.LINE_AA)
             
-            
+    
+    #Determination de la position de la voiture
+    pos_car, liste_pos_car_temp = get_position(bboxes, commande, liste_pos_car.liste)
+    liste_pos_car.update_liste(liste_pos_car_temp)
+
     """Algorithme de conduite"""
     """x_car, y_car"""
     
@@ -548,7 +567,8 @@ def detect_realtime(Yolo, output_path, input_size=416, show=False, CLASSES=YOLO_
         fps = int(vid.get(cv2.CAP_PROP_FPS))
         codec = cv2.VideoWriter_fourcc(*'XVID')
         out = cv2.VideoWriter(output_path, codec, fps, (width, height)) # output_path must be .mp4
-
+    
+    
     while True:
         ret, frame = vid.read()