Skip to content
Snippets Groups Projects
Commit c9905b12 authored by Ouhouuhu's avatar Ouhouuhu
Browse files

avancement

parent 5b706328
Branches
No related tags found
No related merge requests found
......@@ -29,7 +29,7 @@ coord_int = [(i/diametre , j/diametre) for i,j in zip(pixel_x_int, pixel_y_int)]
### Paramètres ###
sigma_position = 0.02
sigma_direction = 15*3.1415/180
sigma_direction = 1*3.1415/180
seuil_cone = 0.6
......@@ -37,9 +37,9 @@ dep_x, dep_y, dep_theta = 1.314, 1.162, 3.14/3.6
nb_particule = 30
pos = [[dep_x, dep_y, dep_theta] for i in range(nb_particule)]
F = 3951/3 #Focale camera
h_reel = 0.27 #Hauteur d'un plot
y_0 = 1500 #Milieu de l'image
F = 156.25 #3957/3 #Focale camera
h_reel = 1 #Hauteur d'un plot
y_0 = 240 #Milieu de l'image
dist_roue = 0.25 #Distance entre les roues avant et les roues arrieres
......@@ -60,7 +60,10 @@ def distance(x_1, y_1, x_2, y_2):
def boite2coord(x1,y1,x2,y2):
d = F*h_reel/abs(y1-y2)
y_r = -((x1+x2)/2 - y_0)*d/F
#y_r = -((x1+x2)/2 - y_0)*d/F
ypmax=480/2
ymax=0.54
y_r=((x1+x2)/2-ypmax)/ypmax*ymax
sin_theta= y_r/d
x_r = d*(1-sin_theta**2)**0.5
......@@ -146,7 +149,7 @@ def sensor_update(observation, position):
cones_vu_x = []
cones_vu_y = []
for i in range(len(vision_x)):
if vision_x[i]>0 and abs(vision_y[i])<0.67*vision_x[i]:
if vision_x[i]>0 and abs(vision_y[i])<0.54*vision_x[i]:
cones_vu_x.append(vision_x[i])
cones_vu_y.append(vision_y[i])
......
......@@ -20,10 +20,8 @@ image_path = "./Calibration_cam/0.5m.jpg"
video_path = "./IMAGES/test.mp4"
yolo = Load_Yolo_model()
_, boxes = detect_image(yolo, image_path, "./images test/test2_pred.jpg", input_size=YOLO_INPUT_SIZE, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0))
#_, boxes = detect_image(yolo, image_path, "./images test/test2_pred.jpg", input_size=YOLO_INPUT_SIZE, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0))
#detect_video(yolo, video_path, './IMAGES/detected.mp4', input_size=YOLO_INPUT_SIZE, show=False, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0))
#detect_realtime(yolo, '', input_size=YOLO_INPUT_SIZE, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255, 0, 0))
detect_realtime(yolo, '', input_size=YOLO_INPUT_SIZE, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255, 0, 0))
#detect_video_realtime_mp(video_path, "Output.mp4", input_size=YOLO_INPUT_SIZE, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0), realtime=False)
print(boxes)
\ No newline at end of file
#import os
#os.environ['CUDA_VISIBLE_DEVICES'] = '0'
#import cv2
#import tensorflow as tf
#from yolov3.utils import detect_image, detect_realtime, detect_video, Load_Yolo_model, detect_video_realtime_mp
#from yolov3.configs import *
#import matplotlib.pyplot as plt
import numpy as np
import random as rd
from math import cos, sin, tan
#yolo = Load_Yolo_model()
### Carte ###
pixel_x_ext, pixel_y_ext = [242, 220, 165, 110, 63, 33, 22, 34, 63, 110, 165, 220, 243, 310, 334, 388, 443, 490, 521, 531, 520, 489, 443, 388, 333, 310], [76, 64, 52, 64, 95, 141, 196, 252, 298, 330, 340, 328, 318, 316, 328, 339, 329, 298, 251, 196, 142, 95, 64, 53, 64, 77]
pixel_x_int, pixel_y_int = [245, 238, 222, 196, 166, 134, 108, 91, 85, 90, 109, 134, 165, 196, 222, 239, 308, 314, 332, 358, 388, 419, 445, 462, 468, 462, 445, 419, 388, 359, 332, 314], [201, 167, 140, 123, 116, 123, 140, 165, 195, 228, 253, 270, 277, 270, 253, 227, 200, 226, 253, 270, 277, 270, 253, 228, 197, 166, 140, 122, 117, 123, 140, 166]
diametre = 225
centre_x, centre_y = 278, 200
coord_x_ext, coord_y_ext = [i/diametre for i in pixel_x_ext], [i/diametre for i in pixel_y_ext]
coord_x_int, coord_y_int = [i/diametre for i in pixel_x_int], [i/diametre for i in pixel_y_int]
coord_ext = [(i/diametre , j/diametre) for i,j in zip(pixel_x_ext, pixel_y_ext)]
coord_int = [(i/diametre , j/diametre) for i,j in zip(pixel_x_int, pixel_y_int)]
### Paramètres ###
sigma_position = 0.02
sigma_direction = 0.05
seuil_cone = 0.6
dep_x, dep_y, dep_theta = 1.314, 1.162, 3.14/3.6
nb_particule = 30
pos = [[dep_x, dep_y, dep_theta] for i in range(nb_particule)]
F = 3423 #Focale camera
h_reel = 0.234 #Hauteur d'un plot
y_0 = 1500 #Milieu de l'image
dist_roue = 0.25 #Distance entre les roues avant et les roues arrieres
dt = 1/3.3 #Frequence de calcul
# class position:
# def __init__(self, pos_x, pos_y, nb_particule):
# self.__pos = [pos_x, pos_y]*nb_particule
def normalisation(W):
a = sum(W)
return [w/a for w in W]
def distance(x_1, y_1, x_2, y_2):
return np.sqrt((x_1-x_2)**2 + (y_1-y_2)**2)
def boite2coord(x1,y1,x2,y2):
d = F*h_reel/abs(y1-y2)
y_r = -((x1+x2)/2 - y_0)*d/F
sin_theta= y_r/d
x_r = d*(1-sin_theta**2)**0.5
return x_r,y_r
def rotation(point, centre, angle):
x1,y1 = point
x2,y2 = centre
new_x = (x1-x2)*cos(angle) - (y1-y2)*sin(angle) + x2
new_y = (x1-x2)*sin(angle) + (y1-y2)*cos(angle) + y2
return new_x, new_y
def distance_Chamfer(A_x, A_y, B_x, B_y):
m = len(A_x)
n = len(B_x)
if m==0 or n==0:
return 0.0001
res = 0
tab = [[distance(A_x[i], A_y[i], B_x[j], B_y[j]) for i in range(m)] for j in range(n)]
tab = np.array(tab)
for i in range(m):
res += np.min(tab[:,i])
for j in range(n):
res += np.min(tab[j,:])
return res
def motion_update(commande, position):
vitesse, direction = commande
x,y,theta = position
if direction ==0:
new_x = x + dt*vitesse*cos(theta)
new_y = y + dt*vitesse*sin(theta)
new_theta = theta
else:
R = dist_roue/tan(direction)
angle_rotation = dt*vitesse/R
if direction>0:
centre_rotation_x = x + R*sin(theta)
centre_rotation_y = y - R*cos(theta)
else:
centre_rotation_x = x - R*sin(theta)
centre_rotation_y = y + R*cos(theta)
new_x, new_y = rotation((x,y),(centre_rotation_x, centre_rotation_y), angle_rotation)
new_theta = theta + angle_rotation
new_x += rd.gauss(0, sigma_position)
new_y += rd.gauss(0, sigma_position)
new_theta += rd.gauss(0, sigma_direction)
return (new_x, new_y, new_theta)
def sensor_update(observation, position):
x,y,theta = position
vision_x = []
vision_y = []
vision_x_ext = []
vision_y_ext = []
for pt in coord_int:
vision_x.append((pt[0]-x)*cos(theta) + (pt[1]-y)*sin(theta))
vision_y.append(-(pt[0]-x)*sin(theta) + (pt[1]-y)*cos(theta))
for pt in coord_ext:
vision_x_ext.append((pt[0]-x)*cos(theta) + (pt[1]-y)*sin(theta))
vision_y_ext.append(-(pt[0]-x)*sin(theta) + (pt[1]-y)*cos(theta))
cones_vu_x = []
cones_vu_y = []
for i in range(len(vision_x)):
if vision_x[i]>0 and abs(vision_y[i])<vision_x[i]:
cones_vu_x.append(vision_x[i])
cones_vu_y.append(vision_y[i])
for i in range(len(vision_x_ext)):
if vision_x_ext[i]>0 and abs(vision_y_ext[i])<vision_x_ext[i]:
cones_vu_x.append(vision_x_ext[i])
cones_vu_y.append(vision_y_ext[i])
obs_x = []
obs_y = []
for i in observation:
obs_x.append(i[0])
obs_y.append(i[1])
return 1/distance_Chamfer(cones_vu_x, cones_vu_y, obs_x, obs_y)
def particle_filter(pos,u_t,z_t): #Position, commande, observation
X_t_barre, X_t = [], []
M = len(pos)
for m in range(M):
x = motion_update(u_t, pos[m])
w = sensor_update(z_t, x)
X_t_barre.append((x,w))
X = [X_t_barre[i][0] for i in range(M)]
W = [X_t_barre[i][1] for i in range(M)]
W = normalisation(W)
X_t = low_variance_resampling(X, W)
return X_t,W
def low_variance_resampling(X,W):
X_t = []
J = len(X)
r = rd.random()/J
c = W[0]
i=0
for j in range(J):
U = r + (j-1)/J
while U > c:
i += 1
c += W[i]
X_t.append(X[i])
return X_t
def get_position(boxes,commande,pos):
# Positionnement des plots à partir des boites
liste_x =[]
liste_y =[]
for i in boxes:
if i[4] >= seuil_cone:
x,y = boite2coord(i[0],i[1],i[2],i[3])
liste_x.append(x)
liste_y.append(y)
z_t = []
for i in range(len(liste_x)):
z_t.append((liste_x[i], liste_y[i]))
#Positionnement
pos, W = particle_filter(pos, commande, z_t)
pos_calc = [0,0,0]
for i in range(len(pos)):
pos_calc[0] += pos[i][0]*W[i]
pos_calc[1] += pos[i][1]*W[i]
pos_calc[2] += pos[i][2]*W[i]
return pos_calc, pos
'''
if __name__ == "__main__":
# pos = [(1.314, 1.162, 3.14/3.6) for i in range(10)]
# #pos = [(2.5*rd.random(), 1.5*rd.random(), 6.28*rd.random()) for i in range(50)]
# liste_x = [0.37409758380473157,
# 0.6517064494114153,
# 0.23060853761333963,
# 0.5278583908503303,
# 0.14161368355256793,
# 0.5134652832573952]
# liste_y = [0.14021924676581576,
# -0.3119493901540909,
# -0.3464004029844368,
# 0.01390277627039628,
# -0.2754514724880131,
# -0.5902545559074325]
# observation = []
# for i in range(len(liste_x)):
# observation.append((liste_x[i], liste_y[i]))
commande = (0.138841, -pi/6)
# a,W = particle_filter(pos, commande, observation)
#print(a,W)
pos_initiale = (1.314, 1.162, 3.14/3.6)
pos_finale = (1.4, 1.271, 3.14/5.5)
pos = [pos_initiale for i in range(30)]
pos_calc,pos = get_position(commande,pos)
plt.figure(1)
plt.plot(pos_initiale[0], pos_initiale[1], 'o', label='Position initale')
plt.arrow(pos_initiale[0], pos_initiale[1], 0.07*cos(pos_initiale[2]), 0.07*sin(pos_initiale[2]))
plt.plot(pos_finale[0], pos_finale[1], 'o', label='Position finale')
plt.arrow(pos_finale[0], pos_finale[1], 0.07*cos(pos_finale[2]), 0.07*sin(pos_finale[2]))
plt.plot(coord_x_ext, coord_y_ext,'+')
plt.plot(coord_x_int, coord_y_int,'+')
pos_x = []
pos_y = []
for k in range(len(pos)):
i = pos[k]
pos_x.append(i[0])
pos_y.append(i[1])
plt.plot(pos_x,pos_y,'x', label='Positions possibles')
# pos_calc = [0,0,0]
# for i in range(len(a)):
# pos_calc[0] += a[i][0]*W[i]
# pos_calc[1] += a[i][1]*W[i]
# pos_calc[2] += a[i][2]*W[i]
plt.plot(pos_calc[0], pos_calc[1], 'o', label='Moyenne pondérée des positions calculées')
plt.arrow(pos_calc[0], pos_calc[1], 0.07*cos(pos_calc[2]), 0.07*sin(pos_calc[2]))
plt.legend()
plt.show()
'''
\ No newline at end of file
......@@ -58,6 +58,17 @@ def create_double_circles(r, left_center, right_center):
return l_l+l_r
def get_cone_map():
pixel_x_ext, pixel_y_ext = [242, 220, 165, 110, 63, 33, 22, 34, 63, 110, 165, 220, 243, 310, 334, 388, 443, 490, 521, 531, 520, 489, 443, 388, 333, 310], [76, 64, 52, 64, 95, 141, 196, 252, 298, 330, 340, 328, 318, 316, 328, 339, 329, 298, 251, 196, 142, 95, 64, 53, 64, 77]
pixel_x_int, pixel_y_int = [245, 238, 222, 196, 166, 134, 108, 91, 85, 90, 109, 134, 165, 196, 222, 239, 308, 314, 332, 358, 388, 419, 445, 462, 468, 462, 445, 419, 388, 359, 332, 314], [201, 167, 140, 123, 116, 123, 140, 165, 195, 228, 253, 270, 277, 270, 253, 227, 200, 226, 253, 270, 277, 270, 253, 228, 197, 166, 140, 122, 117, 123, 140, 166]
diametre = 225
centre_x, centre_y = 278,200
coord_ext = [((i-centre_x)/diametre+1 , (j-centre_y)/diametre+0.5) for i,j in zip(pixel_x_ext, pixel_y_ext)]
coord_int = [((i-centre_x)/diametre+1 , (j-centre_y)/diametre+0.5) for i,j in zip(pixel_x_int, pixel_y_int)]
return coord_int+coord_ext
if __name__ =="__main__":
points = create_track(1, -20, 20)
#construire le svg
......
......@@ -21,7 +21,7 @@ from tensorflow.python.saved_model import tag_constants
import matplotlib.pyplot as plt
from yolov3.track_2_generator import create_track
from yolov3.track_2_generator import create_track, get_cone_map
from yolov3.pid_controller import PidController
from shapely.geometry import Polygon, Point, LineString
from yolov3.car_interface import get_ser, start_ser, close_ser, send_infos
......@@ -150,6 +150,7 @@ def draw_bbox(image, bboxes,liste_pos_car=liste_pos((0,0,0),1), CLASSES=YOLO_COC
NUM_CLASS = read_class_names(CLASSES)
num_classes = len(NUM_CLASS)
image_h, image_w, _ = image.shape
print(image_h, image_w)
hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)]
#print("hsv_tuples", hsv_tuples)
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
......@@ -217,9 +218,13 @@ def draw_bbox(image, bboxes,liste_pos_car=liste_pos((0,0,0),1), CLASSES=YOLO_COC
track_points = create_track(0.5,0,0, (0,0))[:-1]+[(1,0.5)]
track = Polygon(track_points)
"""loading cones"""
cone_map = get_cone_map()
plt.plot([x for x,y in cone_map], [y for x,y in cone_map], 'o')
plt.plot(x_car, y_car,'+')
for x,y in zip(cone_x, cone_y):
x, y= (x--278/225)/2.4+1, (y-200/225)/2.4+0.5
x, y= (x-278/225)/2.4+1, (y-200/225)/2.4+0.5
plt.plot(x,y,'*')
plt.plot([i for i,j in track_points], [j for i,j in track_points])
plt.show()
......@@ -239,7 +244,8 @@ def draw_bbox(image, bboxes,liste_pos_car=liste_pos((0,0,0),1), CLASSES=YOLO_COC
"""sending info to car"""
try:
print(f"poten: {send_infos(ser,speed,commande,0)}")
a = send_infos(ser,speed,commande,0)
#print(f"poten: {a}")
except:
pass
......@@ -649,7 +655,8 @@ def detect_realtime(Yolo, output_path, input_size=416, show=False, CLASSES=YOLO_
if show:
cv2.imshow('output', frame)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break
pass
#cv2.destroyAllWindows()
#break
cv2.destroyAllWindows()
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment