没有合适的资源?快使用搜索试试~ 我知道了~
首页无人驾驶汽车综述论文.pdf
无人驾驶汽车综述论文.pdf
需积分: 0 468 浏览量
更新于2023-05-29
评论
收藏 6.77MB PDF 举报
环境感知技术是无人驾驶车辆安全的保证。 当前,关于环境感知的研究和综述很多,旨在实现无人驾驶,同时确保人类生命的安全。 但是,该技术在新时代面临着新的挑战。 这篇综述文章试图系统地总结环境感知技术,并讨论当前面临的新挑战。 为此,首先总结了几种常用的传感方法的优点,缺点和适用场合,以提供明确的选择指南。 从三个方面探讨了环境感知技术面临的新挑战:技术,外部环境和应用。 最后,文章还指出了环境感知技术的未来发展趋势和努力。
资源详情
资源评论
资源推荐

Sensors
and
Actuators
A
319
(2021)
112566
Contents
lists
available
at
ScienceDirect
Sensors
and
Actuators
A:
Physical
journal
h
om
epage:
www.elsevier.com/locate/sna
Sensing
system
of
environmental
perception
technologies
for
driverless
vehicle:
A
review
of
state
of
the
art
and
challenges
Qiping
Chen
a
,
Yinfei
Xie
a
,
Shifeng
Guo
b,∗
,
Jie
Bai
c
,
Qiang
Shu
d
a
Key
Laboratory
of
Conveyance
and
Equipment
Ministry
of
Education,
East
China
Jiaotong
University,
Nanchang,
330013,
China
b
Shenzhen
Key
Laboratory
of
Smart
Sensing
and
Intelligent
Systems,
Shenzhen
Institutes
of
Advanced
Technology,
Chinese
Academy
of
Sciences,
Shenzhen,
518055,
China
c
School
of
Automotive
Studies,
Tongji
University,
Shanghai,
201804,
China
d
Shanghai
Tongyu
Automotive
Technology
Co.,
LTD.,
Shanghai,
201806,
China
a
r
t
i
c
l
e
i
n
f
o
Article
history:
Received
23
August
2020
Received
in
revised
form
8
January
2021
Accepted
12
January
2021
Available
online
15
January
2021
Keywords:
Driverless
vehicle
Environmental
perception
Selection
guidance
New
challenges
a
b
s
t
r
a
c
t
Environmental
perception
technology
is
the
guarantee
of
the
safety
of
driverless
vehicles.
At
present,
there
are
a
lot
of
researches
and
reviews
on
environmental
perception,
aiming
to
realize
unmanned
driving
while
ensuring
the
safety
of
human
life.
However,
the
technology
is
facing
new
challenges
in
the
new
era.
This
review
paper
attempts
to
systematically
summarize
environment
perception
technol-
ogy
and
discuss
the
new
challenges
currently
faced.
To
this
end,
we
first
summarized
the
advantages,
disadvantages
and
applicable
occasions
of
several
commonly
used
sensing
methods
to
provide
a
clear
selection
guide.
The
new
challenges
faced
by
environmental
perception
technology
are
discussed
from
three
aspects:
technology,
external
environment
and
applications.
Finally,
the
article
also
points
out
the
future
development
trends
and
efforts
of
environmental
perception
technology.
©
2021
Elsevier
B.V.
All
rights
reserved.
Contents
1.
Introduction
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
2
2.
Technology
overview.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.2
2.1.
Vision
sensing.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.3
2.1.1.
Image
denoising
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
3
2.1.2.
Image
segmentation
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
3
2.1.3.
Image
recognition.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.4
2.1.4.
Image
restoration
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
4
2.1.5.
Image
compression
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
4
2.1.6.
Application
of
visual
sensing
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
4
2.1.7.
Summary
about
visual
sensing
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
5
2.2.
Laser
sensing
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
6
2.2.1.
Classification
by
function
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
6
2.2.2.
Classification
by
line
number
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
6
2.2.3.
Classification
by
emission
waveform
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
6
2.2.4.
Summary
of
laser
sensor.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.6
2.3.
Ultrasonic
sensor
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
7
2.4.
Microwave
radar
sensing
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
7
2.4.1.
Interference
radar
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
7
2.4.2.
PWU-WB
radar.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.7
2.4.3.
SFCW
radar.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.7
2.4.4.
Summary
of
microwave
radar
sensing
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
7
∗
Corresponding
author.
E-mail
address:
Sf.guo@siat.ac.cn
(S.
Guo).
https://doi.org/10.1016/j.sna.2021.112566
0924-4247/©
2021
Elsevier
B.V.
All
rights
reserved.

Q.
Chen,
Y.
Xie,
S.
Guo
et
al.
Sensors
and
Actuators
A
319
(2021)
112566
2.5.
MMW
radar
sensing
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
7
2.6.
Fusion
sensing
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
9
2.7.
Performance
comparison
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
9
2.8.
Guidelines
for
selecting
sensors
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
11
2.8.1.
Sensor
selection
status
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
11
2.8.2.
Sensor
selection
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
11
3.
Challenges
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
11
3.1.
Technical
challenges
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
11
3.1.1.
Poor
stability
of
visual
sensors
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
11
3.1.2.
The
detection
distance
of
Lidar
is
limited
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
11
3.1.3.
Prone
to
omissions
of
MMW
radar
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
11
3.1.4.
Immature
fusion
sensing
technology
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
12
3.2.
External
environment
challenges
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
12
3.3.
Application
challenges
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
12
3.3.1.
Appearance
and
modeling.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.12
3.3.2.
Size
and
weight
limits
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
12
3.3.3.
Production
cost
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
12
3.3.4.
Ethical
decision
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
12
4.
Future
development
trend
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
14
4.1.
Sensor
material
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
15
4.2.
Sensor
sensitivity
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
15
4.3.
Lidar
solid
state.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.15
4.4.
Dynamic
and
static
detection
combined
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
15
4.5.
Multisensor
fusion
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
15
5.
Conclusions
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
15
Acknowledgments
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
15
References
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
15
Biography
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
.
18
1.
Introduction
The
driverless
vehicle
is
a
kind
of
intelligent
vehicle
that
mainly
relies
on
the
smart
driving
instrument
installed
in
the
vehicle
to
realize
the
driverless
control.
It
helps
to
eliminate
car
accidents
and
traffic
jams
and
greatly
reduces
carbon
dioxide
emissions.
The
technical
model
of
the
driverless
vehicle
is
shown
in
Fig.
1(a).
It
is
generally
divided
into
four
basic
technologies,
namely
environ-
ment
perception
and
modeling,
positioning
and
map
construction,
path
planning
and
decision
making,
and
motion
control
[1].
Driver-
less
vehicle
can
reach
the
destination
safely
and
normatively
by
perceiving
the
surrounding
environment
and
planning
the
cor-
responding
driving
path
without
human
interference.
Therefore,
the
technology
of
environmental
perception
is
the
core
of
driver-
less
vehicle.
The
main
functions
of
environmental
perception
of
driverless
vehicle
are
based
on
lane
and
road
detection,
traffic
sign
recognition,
vehicle
tracking,
behavior
analysis
and
scene
understanding
[2].
The
perception
system
of
driverless
vehicle
should
have
the
following
characteristics.
(1).
Accurate:
to
pro-
vide
the
vehicle
with
accurate
driving
environment
information;
(2).
Sturdy:
to
work
normally
and
not
easily
malfunctioned;
and
(3).
Real-time:
to
avoid
obstacles
in
time.
To
achieve
those
above
objectives,
a
variety
of
sensors
and
robust
fusion
algorithms
are
required.
[1].
The
field
of
environment
perception
has
become
a
major
research
topic
in
the
world.
A
wealth
of
research
has
been
dedi-
cated
to
the
development
of
environment
perception
to
enhance
the
safety
of
driverless
driving
[3–6].
However,
there
are
still
many
technical
and
non-technical
challenges
before
the
real
driverless
driving.
A
comprehensive
literature
review
on
the
envi-
ronmental
perception
of
driverless
vehicle
was
provided
in
[1].
It
gave
the
latest
algorithms
and
modeling
methods
for
driver-
less
vehicle.
But
this
review
did
not
describe
the
non-technical
challenges
that
driverless
vehicle
may
face
in
the
future.
The
structure
of
driverless
vehicle
with
the
combination
of
naviga-
tion
and
positioning
was
introduced
in
[7],
and
predicted
the
future
technologies
for
environmental
perception.
But
this
paper
lacked
comparison
of
various
on-board
sensors
and
did
not
dis-
cuss
the
question
of
“how
to
choose”.
Through
research
on
the
above
mentioned
relevant
work,
two
major
questions
of
the
environment
perception
technology
are
still
existed
and
challeng-
ing.
Question
1:
The
classification
and
summary
of
the
exist-
ing
sensor
technology
are
not
detailed
enough,
so
that
the
answer
to
the
question
of
how
to
select
a
sensor
is
not
specific
enough.
Question
2:
The
overview
of
the
new
challenges
faced
by
the
technology
is
not
comprehensive,
and
there
is
a
lack
of
overview
of
multiple
perspectives.
This
review
article
attempts
to
systematically
summarize
the
driverless
vehicle
environment
perception
technology,
provide
clearer
sensor
selection
guidance,
and
discuss
the
new
challenges
facing
the
technology
in
multiple
directions.
The
contributions
of
this
paper
are
as
follows:
For
Question
1,
the
several
conventional
sensing
methods
used
in
environmental
perception
technology
were
outlined.
The
advan-
tages
and
disadvantages
of
various
perception
methods,
as
well
as
their
applicable
occasions
are
summarized.
A
clearer
selection
guid-
ance
of
perception
methods
is
provided
in
Sec.
2.
For
Question
2,
the
new
challenges
and
current
unresolved
problems
faced
by
percep-
tion
technology
are
summarized
in
Sec.
3.
The
future
development
trend
and
direction
of
the
technology
are
summarized
in
Sec.
4
and
Sec.
5,
respectively.
2.
Technology
overview
The
environment
perception
technologies
of
driverless
vehicle
provide
necessary
driving
environmental
information
for
the
vehi-
cle.
The
schematic
diagram
of
the
environment
sensing
system
[8]
is
shown
in
Fig.
1(b).
It
is
composed
of
vision
sensing,
radar
sensing,
ultrasonic
sensing,
laser
sensing,
etc.
2

Q.
Chen,
Y.
Xie,
S.
Guo
et
al.
Sensors
and
Actuators
A
319
(2021)
112566
Fig.
1.
Overall
diagram
of
driverless
vehicle.
(a)
Four
basic
technologies
of
driverless
vehicle
[1].
(b)
Sensing
system
of
environmental
perception
[8].
2.1.
Vision
sensing
Vision
sensing
adopts
image
analysis
and
recognition
technol-
ogy
to
detect
the
driving
environment.
Image
processing
is
an
important
part
of
the
vision
sensing
technology.
It
uses
an
auto-
matic
machine
system
to
realize
the
understanding
of
complex
environmental
image.
Image
processing
technology
is
composed
of
five
modules:
image
denoising,
image
segmentation,
image
recog-
nition,
image
restoration
and
image
compression.
2.1.1.
Image
denoising
The
noise
in
the
images
disrupts
the
useful
information
sequence
in
the
image,
making
the
images
blur.
The
image
denois-
ing
is
to
improve
the
visual
effect
of
image
[9].
Ojha
et
al.
used
K-SVD
algorithm
to
obtain
a
suitable
dictionary
in
the
interference
data
and
to
extract
the
phase
image
content
effectively
[10].
The
block
diagram
of
the
algorithm
is
shown
in
Fig.
2.
In
addition,
there
are
many
other
noise
reduction
methods,
such
as
Block
Matching
and
3D
(BM3D)
[11],
Non-local
mean
noise
reduction
[12,13]
and
Discrete
cosine
transform
(DCT)
[14].
The
image
is
firstly
analyzed
by
unsupervised
principal
com-
ponent
analysis
(PCA)
[15].
Then,
each
small
picture
named
X
k
is
rearranged
in
a
one-dimensional
array
with
proximity,
and
an
initial
dictionary
named
D
(0)
k
is
selected.
The
K-SVD
algorithm
is
implemented
at
the
same
time
to
obtain
the
denoised
small
picture
named
∧
X
k
and
the
denoised
version
of
the
related
dictionary
named
D
k
.
Finally,
all
the
small
pieces
after
denoising
are
recombined
to
form
the
denoising
interferogram.
2.1.2.
Image
segmentation
Image
segmentation
refers
to
the
extraction
of
meaningful
features
from
the
image.
With
the
development
of
deep
learn-
ing
technology,
the
image
segmentation
effect
is
significantly
improved
[16–18].
According
to
segmentation
principles,
the
image
segmentation
can
be
subdivided
into
the
following
three
methods:
graph
theory
(GT),
pixel
clustering
(PC)
and
depth
semantic
(DS).
2.1.2.1.
Image
degmentation
based
on
graph
theory.
Image
segmen-
tation
based
on
graph
theory
is
to
treat
the
image
as
a
weighted
graph
G
=
(V,
E).
Then,
the
optimal
cut
set
to
complete
the
image
segmentation
is
achieved.
Where,
V
is
a
set
of
nodes,
and
E
is
a
finite
set
of
edges
[19].
This
theory
is
widely
recognized
because
of
its
insensitivity
to
data
shape
and
robustness.
It
can
be
subdivided
into
NormalizedCut
[20],
GraphCut
[21]
and
GrabCut
[22].
2.1.2.2.
Image
segmentation
based
on
pixel
clustering.
Image
seg-
mentation
based
on
pixel
clustering
is
to
segment
the
feature
space
according
to
the
aggregation
of
pixels
in
the
feature
space,
and
then
map
them
back
to
the
original
image
space
to
obtain
the
seg-
mentation
result
[23].
An
interactive
image
segmentation
method
based
on
diffusion
mapping
was
presented
in
[24,25].
This
method
combined
with
K-means
clustering
algorithm
to
obtain
a
better
seg-
mentation
effect.
Besides,
there
are
other
methods
such
as
spectral
3

Q.
Chen,
Y.
Xie,
S.
Guo
et
al.
Sensors
and
Actuators
A
319
(2021)
112566
Fig.
2.
The
K-SVD
algorithm
block
diagram
[10].
clustering
[26],
simple
linear
iterative
cluster
algorithm
(SLIC)
[27],
Turbopixels
algorithm
[28],
etc.
2.1.2.3.
Image
segmentation
based
on
depth
demantic.
This
method
means
that
the
deeper
content
information
provided
by
the
image
needs
to
be
combined
to
assist
in
image
segmentation.
A
method
for
extracting
dense
feature
vectors
using
multi-scale
convolutional
network
was
introducted
in
[29],
as
shown
in
Fig.
3(a).
A
multimode
network
incorporating
digital
surface
models
(DSMS)
into
the
digi-
tal
film
cloud
network
structure
was
proposed
in
[30],
as
shown
in
Fig.
3(b).
Moreover,
there
are
other
methods
such
as
Pyramid
Scene
Parsing
Network
[31],
Segmentation
Network
[32],
U-Net
method
[
33],
etc.
2.1.3.
Image
recognition
Image
recognition
refers
to
the
work
of
extracting
connotation
features,
classification
and
structure
analysis
of
images
[34–37].
A
residual
convolution
recurrent
neural
network
was
used
to
solve
the
problem
of
scene
image
recognition
in
[38],
as
shown
in
Fig.
4(a).
Three
deep
learning
methods
based
on
YoLo
model
were
proposed
in
[39],
as
shown
in
Fig.
4(b–e).
The
results
showed
that
the
third
method
(Fig.
4(e))
was
more
effective
in
detecting
pedestrians
in
cloudy
weather.
It
was
superior
to
other
methods
in
accuracy
and
speed.
2.1.4.
Image
restoration
Image
restoration
is
to
keep
the
original
image
in
the
process
of
image
acquisition,
transmission,
preservation
and
presentation
[
42–44].
A
new
method
of
multi
frame
image
restoration
using
vari-
able
decibel
leaves
(VB)
was
proposed
in
[45].
This
method
uses
Bayesian
inference
to
automatically
adjust
parameters
and
ensure
image
quality.
2.1.5.
Image
compression
The
basic
goal
of
image
compression
is
to
achieve
the
best
image
quality
at
a
given
compression
ratio
[46,47].
Two
memory
image
compression
algorithms
based
on
region
of
interest
and
hybrid
image
scaling
technology
were
proposed
in
[48,49],
respectively.
They
used
CNN
and
limited
Boltzmann
machine
features
[50]
to
calculate
the
memorability
score
of
each
plaque
in
the
image,
and
generated
a
memorable
map
that
are
used
to
optimize
image
com-
pression.
2.1.6.
Application
of
visual
sensing
2.1.6.1.
Monocular
stereo
vision.
Monocular
stereo
vision
is
a
method
of
measuring
the
environment
by
taking
pictures
with
one
camera.
The
advantages
of
this
method
are
simple
structure
and
easy
camera
calibration
[154].
It
reduced
the
complexity
of
exist-
ing
multi-body
motions
based
on
tightly
coupled
tracking
between
detection
strategies
[51,52].
2.1.6.2.
Binocular
stereo
vision.
Binocular
stereo
vision
is
to
use
two
cameras
to
perceive
the
same
object
in
front
from
different
angles,
as
shown
in
Fig.
5(a).
Then
the
position
relationship
between
the
two
cameras
is
considered.
Finally,
a
three-dimensional
image
of
the
object
in
front
is
formed
[53–55].
2.1.6.3.
Multi-view
stereo
vision.
Multi-view
stereo
vision
is
to
use
multiple
cameras
to
perceive
the
same
object
in
front
from
different
angles,
as
shown
in
Fig.
5(b).
Then
the
position
relationship
among
multiple
cameras
is
considered.
Finally,
a
3D
image
of
the
object
in
front
is
formed
[56–58].
2.1.6.4.
Performance
comparison.
The
structure
of
monocular
stereo
vision
is
simple,
the
algorithm
is
mature,
and
the
amount
of
calculation
is
small.
However,
the
measurement
range
and
distance
of
the
camera
are
a
pair
of
contradictory
characteristics,
which
must
be
balanced
and
adjusted.
Binocular
stereo
vision
has
been
widely
used
in
mobile
robot
positioning
and
navigation,
obstacle
avoid-
ance
and
map
building.
However,
its
difficulty
is
the
matching
of
corresponding
points.
Multi-eye
stereo
vision
makes
full
use
of
the
information
of
multiple
cameras
and
reduces
the
error
matching.
It
improves
the
positioning
accuracy.
However,
its
matching
algo-
rithm
is
more
complex
and
needs
more
time.
The
performance
of
three
kinds
of
stereovision
in
visual
sensing
property
is
summa-
rized
in
Table
1.
4
剩余17页未读,继续阅读


















wrjsxyf
- 粉丝: 0
- 资源: 1
上传资源 快速赚钱
我的内容管理 收起
我的资源 快来上传第一个资源
我的收益
登录查看自己的收益我的积分 登录查看自己的积分
我的C币 登录后查看C币余额
我的收藏
我的下载
下载帮助

会员权益专享
最新资源
- ARM Cortex-A(armV7)编程手册V4.0.pdf
- ABB机器人保养总结解析.ppt
- 【超详细图解】菜鸡如何理解双向链表的python代码实现
- 常用网络命令的使用 ipconfig ping ARP FTP Netstat Route Tftp Tracert Telnet nslookup
- 基于单片机控制的DC-DC变换电路
- RS-232接口电路的ESD保护.pdf
- linux下用time(NULL)函数和localtime()获取当前时间的方法
- Openstack用户使用手册.docx
- KUKA KR 30 hA,KR 60 hA机器人产品手册.pdf
- Java programming with JNI
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈



安全验证
评论0