-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathindex.html
400 lines (367 loc) · 31.1 KB
/
index.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
<!DOCTYPE html>
<html>
<head>
<title>[2024 ICME Grand Challenge] Multi-Modal Video Reasoning and Analyzing Competition (MMVRAC)</title>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="https://www.w3schools.com/w3css/4/w3.css">
<link rel="stylesheet" href="https://www.w3schools.com/lib/w3-theme-blue-grey.css">
<link rel='stylesheet' href='https://fonts.googleapis.com/css?family=Open+Sans'>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
<style>
html, body, h1, h2, h3, h4, h5, h6, h7, h8 {font-family: "Open Sans", sans-serif}
iframe {aspect-ratio: 16/9; height: 400px; width: 100%;}
</style>
<!--iframe {aspect-ratio: 16/9; height: 720px; width: 100%;}-->
</head>
<body class="w3-theme-l5">
<!-- Navbar -->
<div class="w3-top">
<div class="w3-bar w3-theme-d2 w3-left-align w3-large">
<div class="w3-dropdown-hover w3-hide-small w3-left">
<button class="w3-button w3-padding-large" title="Notifications"><i class="fa fa-bell"></i><span class="w3-badge w3-right w3-small w3-red">0</span></button>
<div class="w3-dropdown-content w3-card-4 w3-bar-block" style="width:800px">
<!--
<a href="https://forms.office.com/r/uX4V7XDMD8" class="w3-bar-item w3-button w3-teal">
<h8>06 Feb 2024: <i class="fa fa-trophy" aria-hidden="true"></i> Registration for MMVRAC Challenge opens and Challenge starts</h8>
</a>
-->
<!--<a href="https://forms.office.com/r/DAULcDhhYY" class="w3-bar-item w3-button w3-pink">
<h8>31 Mar 2024: <i class="fa fa-upload" aria-hidden="true"></i> MMVRAC Challenge ends and submit your codes here</h8>
</a>
-->
</div>
</div>
<a class="w3-bar-item w3-button w3-hide-medium w3-hide-large w3-center w3-right w3-padding-large w3-hover-white w3-large w3-theme-d2"><i class="fa fa-bars"></i></a>
<a href="https://sutdcv.github.io/MMVRAC" class="w3-bar-item w3-button w3-padding-large w3-theme-d4 " style="width:90%"><i class="fa fa-home w3-margin-right"></i><b>[2024 ICME Grand Challenge] Multi-Modal Video Reasoning and Analyzing Competition (MMVRAC)</b></a>
</div>
</div>
<!-- Page Container -->
<div class="w3-container w3-content" style="max-width:1500px;margin-top:80px">
<!-- The Grid -->
<div class="w3-row">
<!-- Left Column -->
<div class="w3-col m3">
<!-- Alert Box -->
<div class="w3-container w3-display-container w3-round w3-theme-l4 w3-border w3-theme-border w3-margin-bottom w3-hide-small">
<span onclick="this.parentElement.style.display='none'" class="w3-button w3-theme-l3 w3-display-topright">
<i class="fa fa-remove"></i>
</span>
<p><strong>Hi!</strong></p>
<p>Will you be our next Grand Challenge winner?</p>
</div>
<!-- Profile -->
<div class="w3-card w3-round w3-white">
<div class="w3-container">
<h5 class="w3-center"><b>2024 MMVRAC Winners</b></h5>
<p class="w3-center">
<img src="https://www.w3schools.com/w3images/avatar3.png" class="w3-circle" style="height:80px;width:80px" alt="Avatar">
<img src="https://www.w3schools.com/w3images/avatar4.png" class="w3-circle" style="height:80px;width:80px" alt="Avatar">
</p>
<hr>
<p><i class="fa fa-pencil fa-fw w3-margin-right w3-text-theme"></i><b>Check out the leaderboard posted on 15 July 2024 under MMVRAC Workshop</b></p>
<p><i class="fa fa-birthday-cake fa-fw w3-margin-right w3-text-theme"></i>ICME 2024</p>
</div>
<!--<div class="w3-white">
<button onclick="accordion('Number_of_Teams')" class="w3-button w3-block w3-theme-l1 w3-center"><i class="fa fa-users fa-fw w3-margin-right"></i><b># Participating Teams <br> up till now</b></button>
<div id="Number_of_Teams" class="w3-show w3-container">
<p> <b>10</b> teams for <br><b> Track #1: Spatiotemporal Action Localization (Chaotic World dataset) </b>
<p> <b>5</b> teams for <br><b> Track #2: Behavioral Graph Analysis (Scene Graph Generation) (Chaotic World dataset) </b>
<p> <b>7</b> teams for <br><b> Track #3: Spatiotemporal Event Grounding (Chaotic World dataset) </b>
<p> <b>7</b> teams for <br><b> Track #4: Sound Source Localization (Chaotic World dataset) </b>
<p> <b>10</b> teams for <br><b> Track #5: Video Grounding (Animal Kingdom dataset) </b>
<p> <b>20</b> teams for <br><b> Track #6: Animal Action Recognition (Animal Kingdom dataset) </b>
<p> <b>13</b> teams for <br><b> Track #7: Animal Pose Estimation (Animal Kingdom dataset) </b>
<p> <b>18</b> teams for <br><b> Track #8: Video Question-Answering (SUTD Traffic-QA dataset) </b>
<p> <b>21</b> teams for <br><b> Track #9: Pose Estimation (UAV-Human dataset) </b>
<p> <b>26</b> teams for <br><b> Track #10: Skeleton-based Action Recognition (UAV-Human dataset) </b>
<p> <b>20</b> teams for <br><b> Track #11: Attribute Recognition (UAV-Human dataset) </b>
<p> <b>23</b> teams for <br><b> Track #12: Person Reidentification (UAV-Human dataset) </b>
<br>Updated as of 25 Mar 2024
</div>
</div>-->
</div>
<br>
<!-- Accordion -->
<div class="w3-card w3-round">
<div class="w3-white">
<button onclick="accordion('Organizers')" class="w3-button w3-block w3-theme-l1 w3-center"><i class="fa fa-users fa-fw w3-margin-right"></i><b>Organizers</b></button>
<div id="Organizers" class="w3-show w3-container">
<p><b>Jun LIU</b><br>Singapore University of Technology and Design</p>
<p><b>Bingquan SHEN</b><br>DSO National Laboratories and National University of Singapore</p>
<p><b>Ping HU</b><br>Boston University</p>
<p><b>Kian Eng ONG</b><br>Singapore University of Technology and Design</p>
<p><b>Haoxuan QU</b><br>Singapore University of Technology and Design</p>
<p><b>Duo PENG</b><br>Singapore University of Technology and Design</p>
<p><b>Lanyun ZHU</b><br>Singapore University of Technology and Design</p>
<p><b>Lin Geng FOO</b><br>Singapore University of Technology and Design</p>
<p><b>Xun Long NG</b><br>Singapore University of Technology and Design</p>
</div>
</div>
<button onclick="accordion('Contact')" class="w3-button w3-block w3-theme-l1 w3-center"><i class="fa fa-envelope w3-margin-right"></i><b>Contact</b></button>
<div id="Contact" class="w3-hide w3-container">
<div class="w3-row w3-opacity">
<div class="w3">
<button class="w3-button w3-block w3-green w3-section w3-round-xxlarge" title="Email">
<a href="mailto:[email protected]">
<i class="fa fa-envelope" aria-hidden="true">
<h8>Email Kian Eng ONG</h8>
</i>
</a>
</button>
</div>
</div>
</div>
</div>
<!-- End Left Column -->
</div>
<!-- Middle Column -->
<div class="w3-col m7">
<div class="w3-row-padding">
<div class="w3-col m12">
<div class="w3-card w3-round w3-white">
<div class="w3-container w3-padding w3-center">
The MMVRAC Grand Challenge has concluded.
<br><i class="fa fa-trophy fa-fw"></i> We thank all participants for their effort and time. <i class="fa fa-trophy fa-fw"></i>
<!--<p><b>Participants are required to register their interest to participate in the Challenge.</b><a href="https://forms.office.com/r/uX4V7XDMD8"><button class="w3-button w3-block w3-teal w3-round-xxlarge"><i class="fa fa-trophy" aria-hidden="true"></i> <b>Register for Challenge here</b></button></a></p>-->
<!--<p><b>Deadline has been extended to 31 March 2024 UTC 23:59:00</b><a href="https://forms.office.com/r/DAULcDhhYY"><button class="w3-button w3-block w3-pink w3-round-xxlarge"><i class="fa fa-upload" aria-hidden="true"></i> <b>Submit your Challenge solution here</b></button></a></p>
-->
</div>
</div>
</div>
</div>
<div class="w3-container w3-card w3-white w3-round w3-margin">
<br>
<img src="https://www.w3schools.com/w3images/avatar2.png" alt="Avatar" class="w3-left w3-circle w3-margin-right" style="width:60px">
<span class="w3-right w3-opacity">15 July 2024</span>
<h4><b>MMVRAC Workshop</b></h4>
<hr class="w3-clear">
<div class="w3-card w3-round">
<b>Programme Schedule</b>
<br>
2pm to 5pm, Hannepin Ballroom A & B 1F
<ul>
<li> Keynote Address: Physics-driven AI for multi-modal imaging and restoration by Prof. Bihan Wen, Nanyang Assistant Professor, Nanyang Technological University
<li> Certificate presentation to winners, followed by oral presentation by 1st place winner of each track
<br><b><i class="fa fa-download fa-fw"></i> <a href="https://drive.google.com/drive/folders/1dvw7qC0ccZrv0bnYcB4jaP5ZZDvYbZ5M?usp=sharing"> Download Certificate of Achievement for Winner of each track</a>
</b>
<li> Poster presentation and Networking session
<br><b><i class="fa fa-download fa-fw"></i> <a href="https://drive.google.com/drive/folders/1cghmHe01DjCr7SlSHW1g-9EnBFIfNPSO?usp=sharing"> Download posters</a>
</b>
</ul>
</div>
<div class="w3-card w3-round">
<b>Keynote Address: Physics-driven AI for multi-modal imaging and restoration</b> <br>
by <b>Prof. Bihan Wen, Nanyang Assistant Professor, Nanyang Technological University</b>
<p>
Machine learning techniques have received increasing attention in recent years for solving various imaging problems, creating impacts in important applications, such as remote sensing, medical imaging, smart manufacture, etc. This talk will first review some recent advances in machine learning from modeling, algorithmic, and mathematical perspectives, for imaging and reconstruction tasks. In particular, he will share some of their recent works on the physics-driven deep learning and how it can be applied to the imaging applications for different modalities. He will show how the underlying image model evolves from signal processing to building deep neural networks, while the "old" ways can also join the new. It is critical to incorporate both the flexibility of deep learning and the underlying imaging physics to achieve the state-of-the-art results.
</p>
</div>
<b>Competition Ranking</b>
<iframe src="https://docs.google.com/spreadsheets/d/e/2PACX-1vSK1q-8yhHIGStS1KmvABN6TNKVNh7aulmfIdLgt4zS8sZGiGWcD2MGrsqF6pvJLm-IwTVOfLgOOjo5/pubhtml?gid=909476870&single=true&widget=true&headers=false"></iframe>
<hr class="w3-clear">
</div>
<div class="w3-container w3-card w3-white w3-round w3-margin">
<br>
<img src="https://www.w3schools.com/w3images/avatar2.png" alt="Avatar" class="w3-left w3-circle w3-margin-right" style="width:60px">
<span class="w3-right w3-opacity">21 April 2024</span>
<h4><b>Certificate of Participation</b></h4>
<br>
<hr class="w3-clear">
<div class="w3-card w3-round">
</div>
<p><b><i class="fa fa-download fa-fw"></i> <a href="https://drive.google.com/drive/folders/10A87CkdPaXFqQ86H-WY79c26JeGxn5SJ?usp=sharing"> Download Certificate of Participation</a>
</b></p>
<p>Participants who have successfully completed the Challenge (submitted code, models etc) should have received their Certificate of Participation through email.</p>
The Winning Teams of each track will be presented with their Winner's Certificate at the ICME workshop.
<!--
<iframe src="https://docs.google.com/spreadsheets/d/e/2PACX-1vSK1q-8yhHIGStS1KmvABN6TNKVNh7aulmfIdLgt4zS8sZGiGWcD2MGrsqF6pvJLm-IwTVOfLgOOjo5/pubhtml?gid=626725591&single=true&widget=true&headers=false"></iframe>-->
<hr class="w3-clear">
</div>
<div class="w3-container w3-card w3-white w3-round w3-margin">
<br>
<img src="https://www.w3schools.com/w3images/avatar2.png" alt="Avatar" class="w3-left w3-circle w3-margin-right" style="width:60px">
<span class="w3-right w3-opacity">25 March 2024</span>
<h4><b>Submission Guidelines</b></h4>
<br>
<hr class="w3-clear">
<div class="w3-card w3-round">
<div class="w3-white">
<button onclick="accordion('Code')" class="w3-button w3-block w3-theme-l1 w3-center"><i class="fa fa-file-code-o w3-margin-right" aria-hidden="true"></i><b>Code Submission Terms and Conditions</b></button>
<div id="Code" class="w3-show w3-container">
<p><strong>Code Submission Terms and Conditions</strong></p>
<p>Participants are to adhere to the following terms and conditions. Failure to do so will result in disqualification from the Challenge.</p>
<ul>
<li>Participants can utilize one or more modalities provided in the dataset for the task.</li>
<li>Participants can also use other datasets or pre-trained models etc.</li>
<li>The model should not be trained on any data from the test set.</li>
<li>Participants are required to submit their results the link to their training data, code, and model repositories by the Challenge submission deadline.
<li>The codes need to be <strong><u>open-source</u></strong> and the same results can be <strong><u>reproduced</u></strong> by others. Hence, the submission should include:</li>
<ul>
<li>list of Python libraries / dependencies for the codes</li>
<li>pre-training / training data (if different from what we provided) and pre-processing or pre-training codes</li>
<li>training and inference codes</li>
<li>trained model</li>
<li>documentation / instruction / other relevant information to ensure the seamless execution of the codes (e.g., easy means to read in test data directly by simply replacing the test data directory <em>DIR_DATA_TEST</em>)
</ul>
<li>Participants can upload their project (codes and models) to GitHub, Google Drive or any other online storage providers.</li>
</ul>
<a href="https://forms.office.com/r/DAULcDhhYY"><button class="w3-button w3-block w3-pink w3-round-xxlarge"><i class="fa fa-upload" aria-hidden="true"></i> <b>Submit your codes for Challenge here</b></button></a></p>
</div>
</div>
<div class="w3-white">
<button onclick="accordion('Paper')" class="w3-button w3-block w3-theme-l1 w3-center"><i class="fa fa-paperclip w3-margin-right" aria-hidden="true"></i><b>Paper Submission</b></button>
<div id="Paper" class="w3-show w3-container">
<p><strong>Paper Submission</strong></p>
<ul>
<li>We will contact the top teams of each track to submit the paper.</li>
<li>The paper submission will follow the ICME 2024 guidelines (<a href="https://2024.ieeeicme.org/author-information-and-submission-instructions">https://2024.ieeeicme.org/author-information-and-submission-instructions</a>)</li>
<li>Length: Papers must be no longer than 6 pages, including all text, figures, and references. Submissions may be accompanied by up to 20 MB of supplemental material following the same guidelines as regular and special session papers.</li>
<li>Format: Workshop papers have the same format as regular papers.</li>
</ul>
</div>
</div>
</div>
<hr class="w3-clear">
</div>
<div class="w3-container w3-card w3-white w3-round w3-margin">
<br>
<img src="https://www.w3schools.com/w3images/avatar2.png" alt="Avatar" class="w3-left w3-circle w3-margin-right" style="width:60px">
<span class="w3-right w3-opacity">06 February 2024</span>
<h4><b>Challenge Details</b></h4>
<br>
<hr class="w3-clear">
<div class="w3-card w3-round">
<div class="w3-center">Click on the respective headers to view / collapse details</div>
<div class="w3-white">
<button onclick="accordion('Challenge_Synopsis')" class="w3-button w3-block w3-theme-l1 w3-center"><i class="fa fa-info w3-margin-right" aria-hidden="true"></i><b>Challenge Synopsis</b></button>
<div id="Challenge_Synopsis" class="w3-show w3-container">
<p>Given the enormous amount of multi-modal multi-media information that we encounter in our daily lives (including visuals, sounds, texts, and interactions with their surroundings), we humans process such information with great ease – we understand and analyze, think rationally and reason logically, and then predict and make sound judgments and informed decision based on various modalities of information available. </p>
<p>For machines to assist us in holistic understanding and analysis of events, or even to achieve such sophistication of human intelligence (e.g., Artificial General Intelligence (AGI) or even Artificial Super Intelligence (ASI)), they need to process visual information from real-world videos, alongside complementary audio and textual data, about the events, scenes, objects, their attributes, actions, and interactions.</p>
<p>Hence, we hope to further advance such developments in multi-modal video reasoning and analyzing for different scenarios and real-world applications through this Grand Challenge using various challenging multi-modal datasets with different types of computer vision tasks (i.e., video grounding, spatiotemporal event grounding, video question answering, sound source localization, person reidentification, attribute recognition, pose estimation, skeleton-based action recognition, spatiotemporal action localization, behavioral graph analysis, animal pose estimation and action recognition) and multiple types of annotations (i.e., audio, visual, text). This Grand Challenge will culminate in the 2nd Workshop on Multi-Modal Video Reasoning and Analyzing Competition (MMVRAC). </p>
</div>
</div>
<div class="w3-white">
<button onclick="accordion('Challenge_Tracks')" class="w3-button w3-block w3-theme-l1 w3-center"><i class="fa fa-list-ol w3-margin-right" aria-hidden="true"></i><b>Challenge Tracks</b></button>
<div id="Challenge_Tracks" class="w3-show w3-container">
<p>The Challenge Tracks are based on the following video datasets:</p>
<p><b><a href="https://github.com/sutdcv/Chaotic-World">Chaotic World</a></b> (<a href="https://github.com/sutdcv/Chaotic-World">https://github.com/sutdcv/Chaotic-World</a>)
<br>This challenging multi-modal (video, audio, and text) video dataset that focuses on chaotic situations around the world comprises complex and dynamic scenes with severe occlusions and contains over 200,000 annotated instances for various tasks such as spatiotemporal action localization (i.e., spatially and temporally locate the action), behavioral graph analysis (i.e., analyze interactions between people), spatiotemporal event grounding (i.e., identifying relevant segments in long videos and localizing people, scene, and behavior-of-interest), and sound source localization (i.e., spatially locate the source of sound). This will promote deeper research into more robust models that capitalize various modalities and handle such complex human behaviors / interactions in dynamic and complex environments.
<b><br>Track: Spatiotemporal Action Localization
<!--<br>Track #2: Behavioral Graph Analysis (Scene Graph Generation)
<br>Track #3: Spatiotemporal Event Grounding-->
<br>Track: Sound Source Localization</b>
<br><img src="https://www.researchgate.net/profile/Kian-Eng-Ong/publication/373692522/figure/fig1/AS:11431281186670455@1694003491574/Our-Chaotic-World-dataset-contains-complicated-graphs-of-actions-interactions-and-sound_W640.jpg" width="400px"/>
</p>
<p><b><a href="https://github.com/sutdcv/Animal-Kingdom">Animal Kingdom</a></b> (<a href="https://github.com/sutdcv/Animal-Kingdom">https://github.com/sutdcv/Animal-Kingdom</a>)
<br>This animal behavioral analysis dataset comprises 50 hours of long videos with corresponding text descriptions of the scene, animal, and behaviors to localize the relevant temporal segments in long videos for video grounding (i.e., identifying relevant segments in long videos), 50 hours of annotated video sequences for animal action recognition (i.e. identifying the action of the animal), and 33,000 frames for animal pose estimation (i.e., predicting the keypoints of the animal) for a diversity of over 850 species. This will facilitate animal behavior analysis, which is especially important for wildlife surveillance and conservation.
<b><br>Track: Video Grounding
<br>Track: Animal Action Recognition
<br>Track: Animal Pose Estimation for Protocol 1 (All animals)</b>
<br><img src="https://raw.githubusercontent.com/sutdcv/Animal-Kingdom/master/image/eg_ar.png" width="400px"/>
</p>
<!--<p><b><a href="https://github.com/sutdcv/SUTD-TrafficQA">SUTD-TrafficQA</a></b> (<a href="https://github.com/sutdcv/SUTD-TrafficQA">https://github.com/sutdcv/SUTD-TrafficQA</a>)
<br>This traffic event-based multi-modal video reasoning dataset about the traffic scenes contains over 10,000 RGB video samples and 60,000 questions and answers about the traffic scene for video question-answering (i.e., answer questions according to the given videos) task. This will encourage deeper research into more robust models with low latency that can analyze human and vehicle behaviors in dynamic split-seconds scenarios, and will be useful for development of safer autonomous vehicles.
<b><br>Track #8: Video Question-Answering for Setting 1/4</b>
<br><img src="https://raw.githubusercontent.com/sutdcv/SUTD-TrafficQA/master/imgs/featured.png" width="400px"/>
</p>-->
<p><b><a href="https://github.com/sutdcv/UAV-Human">UAV-Human</a></b> (<a href="https://github.com/sutdcv/UAV-Human">https://github.com/sutdcv/UAV-Human</a>)
<br>This human behavior analysis dataset contains more than 60,000 video samples (bird-eye views of people and actions taken from a UAV) for pose estimation (i.e., predicting the keypoints of the individual), skeleton-based action recognition (i.e. identifying the action of the individual based on keypoints), attribute recognition (i.e. identifying the features / characteristics of the individual), as well as person reidentification (i.e. matching a person's identity across various videos) tasks. This will facilitate human behavior analysis from different vantage points, and will be useful for the development and applications of UAVs.
<b><!--<br>Track: Pose Estimation-->
<br>Track: Skeleton-based Action Recognition
<br>Track: Attribute Recognition
<br>Track: Person Reidentification </b>
<br><img src="https://raw.githubusercontent.com/sutdcv/UAV-Human/master/imgs/samples.png" width="400px"/>
</p>
</div>
</div>
<div class="w3-white">
<button onclick="accordion('Challenge_Details')" class="w3-button w3-block w3-theme-l1 w3-center"><i class="fa fa-book w3-margin-right" aria-hidden="true"></i><b>Challenge Details</b></button>
<div id="Challenge_Details" class="w3-show w3-container">
<p><b>Participants are required to register their interest to participate in the Challenge.</b>
<!--<a href="https://forms.office.com/r/uX4V7XDMD8"><button class="w3-button w3-block w3-teal w3-round-xxlarge"><i class="fa fa-trophy" aria-hidden="true"></i> <b>Register for Challenge here</b></button></a></p>-->
<p>The Grand Challenge is open to individuals from institutions of higher education, research institutes, enterprises, or other organizations.</p>
<ul>
<li>Each team can consist of a maximum of 6 members. The team can comprise individuals from different institutions or organizations.</li>
<li>Participants can also participate as individuals.</li>
<li>Participants will take on any one of the tracks or more than one track in the Challenge Tracks.</li>
</ul>
<ul>
<li>All participants who successfully complete the Challenge by the Challenge deadline and adhere to the Submission Guidelines will receive a Certificate of Participation.</li>
<li>
The top three teams of each track are required to submit a paper and present at the MMVRAC workshop (oral or poster session).
<ul>
<li>They will be notified by the Organizers and are required to submit their paper for review by 06 April 2024.</li>
<li>They will need to submit their camera-ready paper, register for the conference by 01 May 2024, and present at the conference; otherwise they their paper will not be included in IEEE Xplore.</li>
<li>A Grand Challenge paper is covered by a full-conference registration only.</li>
<li>They will receive the Winners' Certificate and their work (link to their training data, code, and model repositories) will be featured on this website.</li>
</ul>
</li>
</ul>
<p><strong>Evaluation Metric and Judging Criteria</strong></p>
<p>The evaluation metric of each task follows the metric indicated in the original paper.</p>
<ul>
<li>In cases of task whereby there are sub-tasks, the sub-task is specified in the Challenge track.</li>
<li>In cases whereby there are a few evaluation metrics for the task, the combination of all metrics will be taken into consideration to evaluate the robustness of the model.</li>
<li>In cases whereby there are multiple submissions for the same task, only the last submission will be considered.</li>
<li>In the event of same results for the same task, the team with the earlier upload date and time will be considered as the winner.</li>
</ul>
</div>
</div>
</div>
<hr class="w3-clear">
</div>
<!-- End Middle Column -->
</div>
<!-- Right Column -->
<div class="w3-col m2">
<div class="w3-card w3-round w3-white">
<button onclick="accordion('Timeline')" class="w3-button w3-block w3-theme-l1 w3-left-align"><i class="fa fa-calendar-check-o fa-fw w3-margin-right"></i><b>Timeline</b></button>
<div id="Timeline" class="w3-hide w3-container">
<!--<p class="w3-block w3-round w3-padding-small w3-teal"><b>06 February 2024</b><br><i class="fa fa-trophy" aria-hidden="true"></i> <b>Registration opens and Challenge starts</b></p>
<p class="w3-block w3-round w3-padding-small w3-pink"><b>31 Mar 2024<br>UTC 23:59:00</b><br><i class="fa fa-upload" aria-hidden="true"></i> <b><a href="https://forms.office.com/r/DAULcDhhYY">Deadline for Challenge submission and submit your codes here</a> </b></p>
<p class="w3-block w3-round w3-padding-small w3-white"><b>07 April 2024<br>UTC 23:59:00</b><br><i class="fa fa-upload" aria-hidden="true"></i> Deadline for submitting invited papers</p>
<p class="w3-block w3-round w3-padding-small w3-white"><b>11 April 2024</b><br><i class="fa fa-envelope" aria-hidden="true"></i> Notification of paper acceptance</p>-->
<p class="w3-block w3-round w3-padding-small w3-white"><b>01 May 2024</b><br><i class="fa fa-upload" aria-hidden="true"></i> Deadline for camera-ready submission of accepted paper</p>
<p class="w3-block w3-round w3-padding-small w3-white"><b>01 May 2024</b><br><i class="fa fa-user-plus" aria-hidden="true"></i> Author Full Conference Registration</p>
</div>
</div>
<div class="w3-card w3-round w3-white">
<button onclick="accordion('Other_Events')" class="w3-button w3-block w3-theme-l1 w3-left-align"><i class="fa fa-calendar fa-fw w3-margin-right"></i><b>Other Events</b></button>
<div id="Other_Events" class="w3-show w3-container">
<p class="w3-block w3-round w3-padding-small w3-blue"><i class="fa fa-paperclip" aria-hidden="true"></i> <a href="https://sutdcv.github.io/VLMetaverse"><b>Might you also be interested in our Metaverse workshop?</b></a></p>
</div>
</div>
<!-- End Right Column -->
</div>
<!-- End Grid -->
</div>
<!-- End Page Container -->
</div>
<br>
<!-- Footer -->
<footer class="w3-container w3-theme-d3 w3-padding-16 w3-center">
<h5><a href="https://2024.ieeeicme.org/">[2024 ICME Grand Challenge]</a> Multi-Modal Video Reasoning and Analyzing Competition (MMVRAC)</h5>
</footer>
<footer class="w3-container w3-theme-d5 w3-center">
<p>Powered by <a href="https://www.w3schools.com/w3css/default.asp" target="_blank">w3.css</a></p>
</footer>
<script>
// Accordion
function accordion(id) {
var x = document.getElementById(id);
if (x.className.indexOf("w3-show") == -1) {
x.className += " w3-show";
x.previousElementSibling.className += " w3-theme-d1";
} else {
x.className = x.className.replace("w3-show", "");
x.previousElementSibling.className =
x.previousElementSibling.className.replace(" w3-theme-d1", "");
}
if (x.className.indexOf("w3-hide") == -1) {
x.className = x.className.replace("w3-show", "w3-hide");
}
}
</script>
</body>
</html>