-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathindex.html
More file actions
458 lines (374 loc) · 22.1 KB
/
index.html
File metadata and controls
458 lines (374 loc) · 22.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
<!DOCTYPE html>
<html lang="en">
<!-- Head -->
<head> <!-- Metadata, OpenGraph and Schema.org -->
<!-- Standard metadata -->
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<title>The UrbanV2X Datasets </title>
<meta name="author" content="The UrbanV2X Datasets " />
<meta name="description" content=" Multisensory Vehicle-Infrastructure Dataset for Cooperative Navigation in Urban Areas.
" />
<meta name="keywords" content="Multi-sensor Fusion, RoadSide Infrastructure, SLAM, Autonomous Driving, Dataset" />
<!-- OpenGraph -->
<meta property="og:site_name" content="The UrbanV2X Datasets " />
<meta property="og:type" content="website" />
<meta property="og:title" content="The UrbanV2X Datasets | Home" />
<meta property="og:url" content="http://localhost:4000/UrbanV2X/" />
<meta property="og:description" content="Multisensory Vehicle-Infrastructure Dataset for Cooperative Navigation in Urban Areas.
" />
<meta property="og:locale" content="en" />
<!-- Twitter card -->
<meta name="twitter:card" content="summary" />
<meta name="twitter:title" content="Home" />
<meta name="twitter:description" content="Multisensory Vehicle-Infrastructure Dataset for Cooperative Navigation in Urban Areas.
" />
<!-- Schema.org -->
<script type="application/ld+json">
{
"author":
{
"@type": "Person",
"name": "The UrbanV2X Datasets "
},
"url": "http://localhost:4000/UrbanV2X/",
"@type": "WebSite",
"description": "An Event-Centric Multisensory Driving Dataset for SLAM.
",
"headline": "Home",
"sameAs": ["https://github.com/arclab-hku/Event_based_VO-VIO-SLAM", "https://arclab.hku.hk/"],
"name": "The UrbanV2X Datasets ",
"@context": "https://schema.org"
}
</script>
<!-- Bootstrap & MDB -->
<link href="https://cdn.jsdelivr.net/npm/bootstrap@4.6.1/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha256-DF7Zhf293AJxJNTmh5zhoYYIMs2oXitRfBjY+9L//AY=" crossorigin="anonymous">
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/mdbootstrap@4.20.0/css/mdb.min.css" integrity="sha256-jpjYvU3G3N6nrrBwXJoVEYI/0zw8htfFnhT9ljN3JJw=" crossorigin="anonymous" />
<!-- Fonts & Icons -->
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/@fortawesome/fontawesome-free@5.15.4/css/all.min.css" integrity="sha256-mUZM63G8m73Mcidfrv5E+Y61y7a12O5mW4ezU3bxqW4=" crossorigin="anonymous">
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/academicons@1.9.1/css/academicons.min.css" integrity="sha256-i1+4qU2G2860dGGIOJscdC30s9beBXjFfzjWLjBRsBg=" crossorigin="anonymous">
<link rel="preconnect" href="https://fonts.googleapis.com">
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
<link rel="stylesheet" type="text/css" href="https://fonts.googleapis.com/css2?family=Barriecito&family=Poppins:ital,wght@0,400;0,500;0,600;0,700;1,400;1,500;1,600;1,700">
<!-- Code Syntax Highlighting -->
<link rel="stylesheet" href="https://cdn.jsdelivr.net/gh/jwarby/jekyll-pygments-themes@master/PASTIE.css" media="none" id="highlight_theme_light" />
<!-- Styles -->
<link rel="shortcut icon" href="https://polyu-taslab.github.io/UrbanV2X/assets/img/icon.png"/>
<link rel="stylesheet" href="https://polyu-taslab.github.io/UrbanV2X/assets/css/main.css">
<link rel="canonical" href="http://localhost:4000/UrbanV2X/">
<link rel="stylesheet" href="https://polyu-taslab.github.io/UrbanV2X/assets/css/fonts.css">
<link rel="stylesheet" href="/UrbanV2X/assets/css/fonts.css">
<!-- Dark Mode -->
</head>
<!-- Body -->
<body class="fixed-top-nav ">
<!-- Header -->
<header>
<!-- Nav Bar -->
<nav id="navbar" class="navbar navbar-light navbar-expand-sm fixed-top">
<div class="container">
<!-- Navbar Toggle -->
<button class="navbar-toggler collapsed ml-auto" type="button" data-toggle="collapse" data-target="#navbarNav" aria-controls="navbarNav" aria-expanded="false" aria-label="Toggle navigation">
<span class="sr-only">Toggle navigation</span>
<span class="icon-bar top-bar"></span>
<span class="icon-bar middle-bar"></span>
<span class="icon-bar bottom-bar"></span>
</button>
<div class="collapse navbar-collapse text-right" id="navbarNav">
<ul class="navbar-nav ml-auto flex-nowrap">
<!-- Home -->
<li class="nav-item active">
<a class="nav-link" href="/UrbanV2X/index.html">Home<span class="sr-only">(current)</span></a>
</li>
<!-- Other pages -->
<!-- <li class="nav-item dropdown ">
<a class="nav-link dropdown-toggle" href="#" id="navbarDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Sensors</a>
<div class="dropdown-menu dropdown-menu-right" aria-labelledby="navbarDropdown">
<a class="dropdown-item" href="/UrbanV2X/about/sensor/">Sensor Suite</a>
<div class="dropdown-divider"></div>
<a class="dropdown-item" href="/UrbanV2X/about/synchronization/">Synchronization</a>
<div class="dropdown-divider"></div>
<a class="dropdown-item" href="/UrbanV2X/about/ground_truth/">Ground Truth</a>
</div>
</li> -->
<li class="nav-item ">
<a class="nav-link" href="/UrbanV2X/sensors/index.html">Sensors</a>
</li>
<li class="nav-item ">
<a class="nav-link" href="/UrbanV2X/calibration/">Calibration</a>
</li>
<li class="nav-item ">
<a class="nav-link" href="/UrbanV2X/download/">Download</a>
</li>
<!-- <li class="nav-item dropdown ">
<a class="nav-link dropdown-toggle" href="#" id="navbarDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Contact Us</a>
<div class="dropdown-menu dropdown-menu-right" aria-labelledby="navbarDropdown">
<a class="dropdown-item" href="https://github.com/mgaoling/mpl_calibration_toolbox/issues" target="_blank" rel="noopener noreferrer">Calibration Issue</a>
<div class="dropdown-divider"></div>
<a class="dropdown-item" href="https://github.com/mgaoling/mpl_dataset_toolbox/issues" target="_blank" rel="noopener noreferrer">Dataset Issue</a>
</div>
</li> -->
</ul>
</div>
</div>
</nav>
</header>
<!-- Content -->
<div class="container mt-5">
<!-- home.html -->
<div class="post">
<header class="post-header">
<h1 class="post-title">
<span style="font-weight: 600;">The UrbanV2X Datasets</span>
</h1>
<p class="desc">
Multisensory Vehicle-Infrastructure Dataset for Cooperative Navigation in Urban Areas.
</p>
</header>
<article>
<div class="clearfix">
<p><br></p>
<!-- <picture style="text-align: center;">
<img class="img-fluid rounded z-depth-1" src="https://polyu-taslab.github.io/UrbanV2X/assets/gif/total_homepage.gif" title="total_homepage">
</picture> -->
<div style="text-align: center;">
<picture>
<img style="width: 90%;" class="img-fluid rounded z-depth-1" src="assets/gif/dataset_overview_15secs.gif" title="sensor_platform">
</picture>
</div>
<p><br></p>
<div style="text-align: center;">
<picture>
<img style="width: 90%;" class="img-fluid rounded z-depth-1" src="https://polyu-taslab.github.io/UrbanV2X/assets/gif/sensorview_camera.gif" title="homepage_vis">
</picture>
</div>
<p><br></p>
<p style="text-align: justify;">
Due to the limitations of single autonomous vehicles, Cellular Vehicle-to-Everything (C-V2X) technology opens a new window for achieving fully autonomous driving through sensor information sharing. However, real-world datasets supporting vehicle–infrastructure cooperative navigation in complex urban environments remain rare.
</p>
<p style="text-align: justify;">
To address this gap, we present UrbanV2X, a comprehensive multisensory dataset collected from both vehicles and roadside infrastructure in the Hong Kong C-V2X testbed, designed to support research on smart mobility applications in dense urban areas.
</p>
<p style="text-align: justify;">
Our onboard platform provides time-synchronized data from multiple industrial cameras, LiDARs, 4D radar, UWB, IMU, and high-precision GNSS-RTK/INS navigation systems. Meanwhile, our roadside infrastructure provides LiDAR, GNSS, and UWB measurements.
</p>
<p style="text-align: justify;">
The entire vehicle–infrastructure platform is synchronized using the Precision Time Protocol (PTP), with sensor calibration data provided. We also benchmark various navigation algorithms to evaluate the collected cooperative data.
</p>
<!-- After the paper is accepted, we will release all the datasets and keep the dataset maintenance. -->
<br>
The contributions of our work can be summarized as follows:
</p>
<ul>
<li style="text-align: justify;"> UrbanV2X Dataset and Sensor Integration: Our <a href="/sensors">sensor platform</a> integrating various sensors like multiple event cameras, GNSS/INS, 4D Radar, LiDAR and sky-pointing cameara with roadside infrastructure, including GNSS, LiDAR, and UWB.
</li>
<!-- <li style="text-align: justify;">
ALl sensors are <a href="/UrbanV2X/calibration">well-calibrated</a> and temporally synchronized at the hardware level, with recording data simultaneously.
</li> -->
<li style="text-align: justify;"> UrbanV2X collects comprehensive data from both urban canyons and open areas. These <a href="/UrbanV2X/download">sequences</a> are recorded under various conditions. Our dataset and benchmark results are publicly available on our website.
</li>
<!-- <li style="text-align: justify;"> We present a comprehensive <a href="/UrbanV2X/benchmark">benchmark</a> that evaluates existing state-of-the-art SLAM algorithms of various
sensor modalities and analyzes their limitations.
</li> -->
</ul>
<!-- <p style="text-align: justify;">
We hope that we can make some contributions for the development of event-based vision, especially event-based multi-sensor fusioin for autonomous driving.
<br>
The visualization of each sequence is available in <a href="/UrbanV2X/download">Download section</a> and <a href="https://www.bilibili.com/video/BV1Km4y157KC/?spm_id_from=333.999.0.0&vd_source=a88e426798937812a8ffc1a9be5a3cb7">Bilibili</a>.
<br>
If you have any suggestions or questions, do not hesitate to propose an issue to our <a href="https://polyu-taslab.github.io/UrbanV2X">Github Repository</a>.
</p> -->
<p><br></p>
</div>
<!-- News -->
<!-- <div class="news">
<h2>News</h2>
<div class="table-responsive">
<table class="table table-sm table-borderless">
<tr>
<th scope="row">December 2, 2023</th>
<td>
We release our GNSS-RTK/INS gt and M8T/F9P GNSS at <a href="/UrbanV2X/download">Download section</a>.
</td>
</tr>
<tr>
<th scope="row">November 28, 2023</th>
<td>
Our work has been accepted by IEEE Transactions on Intelligent Vehicles!
</td>
</tr>
<tr>
<th scope="row">November 21, 2023</th>
<td>
Calibration results and rosbag are avaliable at <a href="/UrbanV2X/calibration">Calibration page</a>.
</td>
</tr>
<tr>
<th scope="row">November 19, 2023</th>
<td>
We release our sequences at <a href="/UrbanV2X/download">Download section</a>.
</td>
</tr>
<tr>
<th scope="row">November 07, 2023</th>
<td>
The preprint version is available at <a href="https://arxiv.org/abs/2311.02327">arXiv</a>.
</td>
</tr>
<tr>
<th scope="row">October 31, 2023</th>
<td>
Watch our video presentation on <a href="https://www.bilibili.com/video/BV1pN411s79g/?spm_id_from=333.999.list.card_archive.click&vd_source=a88e426798937812a8ffc1a9be5a3cb7">Bilibili</a> or <a href="https://youtu.be/Q1F9M_DZLws">Youtube</a>.
</td>
</tr>
<tr>
<th scope="row">August 28, 2023</th>
<td>
We finish the evaluation of UrbanV2X using various LiDAR SLAM (<a href="https://space.bilibili.com/499377825/channel/collectiondetail?sid
</td>
</tr>
<tr>
<th scope="row">August 22, 2023</th>
<td>
We complete the collection of all sequences (<a href="https://www.bilibili.com/video/BV1Km4y157KC/?share_source=copy_web&vd_source=f5ac2a23210d1b4b7b4aa5e374feae10">bilibili visulization</a>).
</td>
</tr>
<tr>
<th scope="row">June 2, 2023</th>
<td>
Driver code and time synchronization of event cameras are now available (<a href="https://github.com/arclab-hku/Event_based_VO-VIO-SLAM/tree/main/driver_code/dv-ros-master/script">Code</a>, <a href="https://www.bilibili.com/video/BV168411o7BJ/?spm_id_from=333.999.0.0&vd_source=a88e426798937812a8ffc1a9be5a3cb7">Bilibili</a>).
</td>
</tr>
<tr>
<th scope="row">June 1, 2023</th>
<td>
UrbanV2X Datasets goes live!
</td>
</tr>
</table>
</div>
</div> -->
<!-- Selected papers -->
<div class="publications">
<h2>BibTeX</h2>
Please cite the following publication when using this benchmark in an academic context:
<pre style="background-color: #f8f9fa; padding: 10px; border: 1px solid #ddd; border-radius: 5px; overflow-x: auto;">
@INPROCEEDINGS{11423762,
author={Qin, Qijun and Zhang, Ziqi and Zhong, Yihan and Huang, Feng and Liu, Xikun and Hu, Runzhi and Chen, Hang and Hu, Wei and Su, Dongzhe and Zhang, Jun and Ng, Hoi-Fung and Wen, Weisong},
booktitle={2025 IEEE 28th International Conference on Intelligent Transportation Systems (ITSC)},
title={UrbanV2X: A Multisensory Vehicle-Infrastructure Dataset for Cooperative Navigation in Urban Areas},
year={2025},
volume={},
number={},
pages={815-822},
keywords={Global navigation satellite system;Simultaneous localization and mapping;Laser radar;Navigation;Benchmark testing;Cameras;Synchronization;Ultra wideband radar;Autonomous vehicles;Vehicle-to-everything;Multi-sensor Fusion;Road Side;SLAM;Autonomous Driving;Dataset},
doi={10.1109/ITSC60802.2025.11423762}}
</pre>
<h2></h2>
<ol class="bibliography"><li>
<!-- _layouts/bib.html -->
<div class="row">
<!-- <div class="col-sm-2 abbr"><abbr class="badge">RA-L</abbr></div> -->
<!-- 上面这个RAL显示好像有点问题 -->
<!-- Entry bib key -->
<div id="UrbanV2X2023" class="col-sm-8">
<!-- Author -->
<div class="paper" style="text-align: justify;">Qin, Q., Zhang, Z., Zhong, Y., Huang, F., Liu, X., Hu, R., Chen, H., Hu, W., Su, D., Zhang, J., Ng, H.-F., & Wen, W. (2025).
UrbanV2X: A Multisensory Vehicle-Infrastructure
Dataset for Cooperative Navigation in Urban Areas
</div>
<!-- Title -->
<!-- <div class="title" style="text-align: justify;">UrbanV2X: An Event-Centric Multisensory Driving Dataset for SLAM.</div> -->
<!-- Journal/Book title and date -->
<!-- <div class="periodical" style="text-align: justify;">
IEEE Transactions on Intelligent Vehicles, vol. 9, no. 1, pp. 407-416, 2023.
</div> -->
<!-- Links/Buttons -->
<div class="links">
<!-- <a class="abstract btn btn-sm z-depth-0" role="button">Abs</a>
<a href="https://arxiv.org/abs/2311.02327" class="btn btn-sm z-depth-0" role="button" target="_blank" rel="noopener noreferrer">arXiv</a>
<a href="https://polyu-taslab.github.io/UrbanV2X/assets/pdf/2311.02327.pdf" class="btn btn-sm z-depth-0" role="button">PDF</a> -->
<!-- <a href="/UrbanV2X/assets/pdf/supplementary_material.pdf" class="btn btn-sm z-depth-0" role="button">Supp</a> -->
<!-- 以上三个内容依次放,arxiv网址,我们论文的pdf,如果有补充材料就加,没有就不加 -->
</div>
<!-- Hidden abstract block -->
<div class="abstract hidden">
<p style="text-align: justify;">
Leveraging multiple sensors enhances complex environmental perception and increases resilience to varying luminance conditions and high-speed motion patterns, achieving precise localization and mapping.
This paper proposes, UrbanV2X, an event-centric multisensory dataset containing 81 sequences and covering over 200 km of various challenging driving scenarios including high-speed motion, repetitive scenarios, dynamic objects, etc.
UrbanV2X provides data from two sets of stereo event cameras with different resolutions (640×480, 346×260), stereo industrial cameras, an infrared camera, a top-installed mechanical LiDAR with two slanted LiDARs,
two consumer-level GNSS receivers, and an onboard IMU.
Meanwhile, the ground-truth of the vehicle was obtained using a centimeter-level high-accuracy GNSS-RTK/INS navigation system.
All sensors are well-calibrated and temporally synchronized at the hardware level, with recording data simultaneously.
We additionally evaluate several state-of-the-art SLAM algorithms for benchmarking visual and LiDAR SLAM and identifying their limitations.
<!-- The full dataset can be found at \url{https://polyu-taslab.github.io/UrbanV2X/}. -->
</p>
</div>
</div>
</div>
</li></ol>
</div>
<br>
<div class="Other resources">
<h2>Other resources</h2>
<p style="text-align: justify;">
Some tools for pre-processing the dataset
and HKU event-based handheld & drone dataset are available at <a href="https://github.com/arclab-hku/Event_based_VO-VIO-SLAM" target="_blank" rel="noopener noreferrer">here</a>.
</p>
</div>
<!-- License -->
<div class="License">
<h2>License</h2>
<p>
This work is released under <a href="https://www.gnu.org/licenses/" target="_blank" rel="noopener noreferrer">GPLv3</a> license.
For commercial inquires, please contact Dr. Wen Weisong(welson.wen@polyu.edu.hk).
</p>
</div>
<br>
<br>
<!-- Acknowledgements -->
<div class="Acknowledgement">
<h2>Acknowledgement</h2>
<p style="text-align: justify;">
The authors thank Yuteng Wang, Shaoting Qiu from PolyU, Jiashi Feng, Alpamys Urtay and Siqiao from ASTRI for their kind support in this data evaluation.
</p>
</div>
<br>
<br>
</article>
</div>
</div>
<!-- Footer -->
<footer class="nofixed-bottom">
<div class="container mt-0" style="width:100%;text-align:center;">
© 2025 Tas Lab, The Hong Kong Polytechnic University. All rights reserved.
</div>
</footer>
<!-- JavaScripts -->
<!-- jQuery -->
<script src="https://cdn.jsdelivr.net/npm/jquery@3.6.0/dist/jquery.min.js" integrity="sha256-/xUj+3OJU5yExlq6GSYGSHk7tPXikynS7ogEvDej/m4=" crossorigin="anonymous"></script>
<!-- Bootsrap & MDB scripts -->
<script src="https://cdn.jsdelivr.net/npm/@popperjs/core@2.11.2/dist/umd/popper.min.js" integrity="sha256-l/1pMF/+J4TThfgARS6KwWrk/egwuVvhRzfLAMQ6Ds4=" crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/bootstrap@4.6.1/dist/js/bootstrap.min.js" integrity="sha256-SyTu6CwrfOhaznYZPoolVw2rxoY7lKYKQvqbtqN93HI=" crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/mdbootstrap@4.20.0/js/mdb.min.js" integrity="sha256-NdbiivsvWt7VYCt6hYNT3h/th9vSTL4EDWeGs5SN3DA=" crossorigin="anonymous"></script>
<!-- Masonry & imagesLoaded -->
<script defer src="https://cdn.jsdelivr.net/npm/masonry-layout@4.2.2/dist/masonry.pkgd.min.js" integrity="sha256-Nn1q/fx0H7SNLZMQ5Hw5JLaTRZp0yILA/FRexe19VdI=" crossorigin="anonymous"></script>
<script defer src="https://cdn.jsdelivr.net/npm/imagesloaded@4/imagesloaded.pkgd.min.js"></script>
<script defer src="/UrbanV2X/assets/js/masonry.js" type="text/javascript"></script>
<!-- Medium Zoom JS -->
<script src="https://cdn.jsdelivr.net/npm/medium-zoom@1.0.6/dist/medium-zoom.min.js" integrity="sha256-EdPgYcPk/IIrw7FYeuJQexva49pVRZNmt3LculEr7zM=" crossorigin="anonymous"></script>
<script src="/UrbanV2X/assets/js/zoom.js"></script><!-- Load Common JS -->
<script src="/UrbanV2X/assets/js/common.js"></script>
<!-- MathJax -->
<script type="text/javascript">
window.MathJax = {
tex: {
tags: 'ams'
}
};
</script>
<script defer type="text/javascript" id="MathJax-script" src="https://cdn.jsdelivr.net/npm/mathjax@3.2.0/es5/tex-mml-chtml.js"></script>
<script defer src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
</body>
</html>