Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
E
EPSA Pae AI Self Driving
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Deploy
Releases
Package registry
Model registry
Operate
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
EPSA Personnal Git
EPSA Pae AI Self Driving
Commits
629a753d
Commit
629a753d
authored
2 years ago
by
Chauchat Eymeric
Browse files
Options
Downloads
Plain Diff
Merge branch 'main' of gitlab.ec-lyon.fr:epsa-personnal-git/epsa-pae-ai-self-driving
parents
76a94216
ef22b4f8
Branches
Branches containing commit
No related tags found
No related merge requests found
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/MCL.py
+91
-14
91 additions, 14 deletions
PAR 152/Yolo V3/TensorFlow-2.x-YOLOv3-master/MCL.py
with
91 additions
and
14 deletions
PAR 152/MCL.py
→
PAR 152/
Yolo V3/TensorFlow-2.x-YOLOv3-master/
MCL.py
+
91
−
14
View file @
629a753d
...
...
@@ -3,6 +3,8 @@ from math import cos, sin
import
numpy
as
np
import
matplotlib.pyplot
as
plt
from
yolov3.utils
import
detect_image
,
detect_realtime
,
detect_video
,
Load_Yolo_model
,
detect_video_realtime_mp
### Carte ###
pixel_x_ext
,
pixel_y_ext
=
[
242
,
220
,
165
,
110
,
63
,
33
,
22
,
34
,
63
,
110
,
165
,
220
,
243
,
310
,
334
,
388
,
443
,
490
,
521
,
531
,
520
,
489
,
443
,
388
,
333
,
310
],
[
76
,
64
,
52
,
64
,
95
,
141
,
196
,
252
,
298
,
330
,
340
,
328
,
318
,
316
,
328
,
339
,
329
,
298
,
251
,
196
,
142
,
95
,
64
,
53
,
64
,
77
]
...
...
@@ -20,6 +22,20 @@ sigma_position = 0.02
sigma_direction
=
0.05
dep_x
,
dep_y
,
dep_theta
=
0
,
0
,
0
nb_particule
=
30
pos
=
[[
dep_x
,
dep_y
,
dep_theta
]
for
i
in
range
(
nb_particule
)]
F
=
1000
#Focale camera
h_reel
=
0.2
#Hauteur d'un plot
y_0
=
2048
/
2
#Milieu de l'image
# class position:
# def __init__(self, pos_x, pos_y, nb_particule):
# self.__pos = [pos_x, pos_y]*nb_particule
def
normalisation
(
W
):
a
=
sum
(
W
)
...
...
@@ -28,10 +44,23 @@ def normalisation(W):
def
distance
(
x_1
,
y_1
,
x_2
,
y_2
):
return
np
.
sqrt
((
x_1
-
x_2
)
**
2
+
(
y_1
-
y_2
)
**
2
)
def
boite2coord
(
x1
,
y1
,
x2
,
y2
):
d
=
F
*
h_reel
/
abs
(
y1
-
y2
)
y_r
=
-
((
x1
+
x2
)
/
2
-
y0
)
*
d
/
F
sin_theta
=
y_r
/
d
x_r
=
d
*
(
1
-
sin_theta
**
2
)
**
0.5
return
x_r
,
y_r
def
distance_Chamfer
(
A_x
,
A_y
,
B_x
,
B_y
):
m
=
len
(
A_x
)
n
=
len
(
B_x
)
if
m
==
0
or
n
==
0
:
return
0.0001
res
=
0
tab
=
[[
distance
(
A_x
[
i
],
A_y
[
i
],
B_x
[
j
],
B_y
[
j
])
for
i
in
range
(
m
)]
for
j
in
range
(
n
)]
...
...
@@ -47,9 +76,9 @@ def distance_Chamfer(A_x, A_y, B_x, B_y):
def
motion_update
(
commande
,
position
):
vitesse
,
direction
=
commande
x
,
y
,
theta
=
position
new_x
=
x
+
vitesse
*
cos
(
direction
)
+
rd
.
gauss
(
0
,
sigma_position
)
new_y
=
y
+
vitesse
*
sin
(
direction
)
+
rd
.
gauss
(
0
,
sigma_position
)
new_theta
=
theta
-
direction
+
rd
.
gauss
(
0
,
sigma_direction
)
new_x
=
x
+
vitesse
*
cos
(
direction
+
theta
)
+
rd
.
gauss
(
0
,
sigma_position
)
new_y
=
y
+
vitesse
*
sin
(
direction
+
theta
)
+
rd
.
gauss
(
0
,
sigma_position
)
new_theta
=
theta
+
direction
+
rd
.
gauss
(
0
,
sigma_direction
)
return
(
new_x
,
new_y
,
new_theta
)
...
...
@@ -123,8 +152,46 @@ def low_variance_resampling(X,W):
X_t
.
append
(
X
[
i
])
return
X_t
def
get_position
(
pos
,
u_t
):
image_path
=
"
./IMAGES/IMG_2345.JPEG
"
video_path
=
"
./IMAGES/test.mp4
"
yolo
=
Load_Yolo_model
()
boxes
=
detect_image
(
yolo
,
image_path
,
"
./IMAGES/111.jpg
"
,
input_size
=
YOLO_INPUT_SIZE
,
show
=
True
,
CLASSES
=
TRAIN_CLASSES
,
rectangle_colors
=
(
255
,
0
,
0
))
#detect_video(yolo, video_path, './IMAGES/detected.mp4', input_size=YOLO_INPUT_SIZE, show=False, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0))
#detect_realtime(yolo, '', input_size=YOLO_INPUT_SIZE, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255, 0, 0))
#detect_video_realtime_mp(video_path, "Output.mp4", input_size=YOLO_INPUT_SIZE, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0), realtime=False)
liste_x
=
[]
liste_y
=
[]
for
i
in
boxes
:
x
,
y
=
boite2coord
(
i
[
0
],
i
[
1
],
i
[
2
],
i
[
3
])
liste_x
.
append
(
x
)
liste_y
.
append
(
y
)
z_t
=
[]
for
i
in
range
(
len
(
liste_x
)):
z_t
.
append
((
liste_x
[
i
],
liste_y
[
i
]))
pos
,
W
=
particle_filter
(
pos
,
u_t
,
z_t
)
pos_calc
=
[
0
,
0
,
0
]
for
i
in
range
(
len
(
a
)):
pos_calc
[
0
]
+=
a
[
i
][
0
]
*
W
[
i
]
pos_calc
[
1
]
+=
a
[
i
][
1
]
*
W
[
i
]
pos_calc
[
2
]
+=
a
[
i
][
2
]
*
W
[
i
]
return
pos_calc
if
__name__
==
"
__main__
"
:
pos
=
[(
1.314
,
1.162
,
3.14
/
3.6
)
for
i
in
range
(
10
)]
#pos = [(2.5*rd.random(), 1.5*rd.random(), 6.28*rd.random()) for i in range(50)]
liste_x
=
[
0.37409758380473157
,
0.6517064494114153
,
...
...
@@ -144,7 +211,7 @@ if __name__ == "__main__":
for
i
in
range
(
len
(
liste_x
)):
observation
.
append
((
liste_x
[
i
],
liste_y
[
i
]))
commande
=
(
0.138841
,
0.
3
)
commande
=
(
0.138841
,
-
0.
)
a
,
W
=
particle_filter
(
pos
,
commande
,
observation
)
print
(
a
,
W
)
...
...
@@ -154,9 +221,9 @@ if __name__ == "__main__":
plt
.
figure
(
1
)
plt
.
plot
(
pos_initiale
[
0
],
pos_initiale
[
1
],
'
o
'
)
plt
.
plot
(
pos_initiale
[
0
],
pos_initiale
[
1
],
'
o
'
,
label
=
'
Position initale
'
)
plt
.
arrow
(
pos_initiale
[
0
],
pos_initiale
[
1
],
0.07
*
cos
(
pos_initiale
[
2
]),
0.07
*
sin
(
pos_initiale
[
2
]))
plt
.
plot
(
pos_finale
[
0
],
pos_finale
[
1
],
'
o
'
)
plt
.
plot
(
pos_finale
[
0
],
pos_finale
[
1
],
'
o
'
,
label
=
'
Position finale
'
)
plt
.
arrow
(
pos_finale
[
0
],
pos_finale
[
1
],
0.07
*
cos
(
pos_finale
[
2
]),
0.07
*
sin
(
pos_finale
[
2
]))
plt
.
plot
(
coord_x_ext
,
coord_y_ext
,
'
+
'
)
...
...
@@ -164,14 +231,24 @@ if __name__ == "__main__":
pos_x
=
[]
pos_y
=
[]
for
k
in
range
(
len
(
a
)):
i
=
a
[
k
]
pos_x
.
append
(
i
[
0
])
pos_y
.
append
(
i
[
1
])
plt
.
arrow
(
i
[
0
],
i
[
1
],
0.07
*
cos
(
i
[
2
]),
0.07
*
sin
(
i
[
2
]))
plt
.
annotate
(
W
[
k
],
[
i
[
0
],
i
[
1
]])
#for k in range(len(a)):
#i = a[k]
#pos_x.append(i[0])
#pos_y.append(i[1])
#plt.arrow(i[0], i[1], 0.07*cos(i[2]), 0.07*sin(i[2]))
#plt.annotate(W[k], [i[0], i[1]])
plt
.
plot
(
pos_x
,
pos_y
,
'
x
'
,
label
=
'
Positions possibles
'
)
pos_calc
=
[
0
,
0
,
0
]
for
i
in
range
(
len
(
a
)):
pos_calc
[
0
]
+=
a
[
i
][
0
]
*
W
[
i
]
pos_calc
[
1
]
+=
a
[
i
][
1
]
*
W
[
i
]
pos_calc
[
2
]
+=
a
[
i
][
2
]
*
W
[
i
]
plt
.
plot
(
pos_x
,
pos_y
,
'
.
'
)
plt
.
plot
(
pos_calc
[
0
],
pos_calc
[
1
],
'
o
'
,
label
=
'
Moyenne pondérée des positions calculées
'
)
plt
.
arrow
(
pos_calc
[
0
],
pos_calc
[
1
],
0.07
*
cos
(
pos_calc
[
2
]),
0.07
*
sin
(
pos_calc
[
2
]))
plt
.
legend
()
plt
.
show
()
\ No newline at end of file
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment