forked from eliahuhorwitz/Academic-project-page-template
-
Notifications
You must be signed in to change notification settings - Fork 1
/
index.html
561 lines (524 loc) · 30.1 KB
/
index.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<!-- Meta tags for social media banners, these should be filled in appropriatly as they are your "business card" -->
<!-- Replace the content tag with appropriate information -->
<meta name="description"
content="We challenge the assumption that LLM watermarks are ready for deployment by showing that prominent schemes can be stolen for under $50, enabling realistic spoofing and scrubbing attacks at scale.">
<meta property="og:title" content="Watermark Stealing in Large Language Models" />
<meta property="og:description"
content="We challenge the assumption that LLM watermarks are ready for deployment by showing that prominent schemes can be stolen for under $50, enabling realistic spoofing and scrubbing attacks at scale." />
<meta property="og:url" content="https://watermark-stealing.org" />
<!-- Path to banner image, should be in the path listed below. Optimal dimenssions are 1200X630-->
<meta property="og:image" content="https://watermark-stealing.org/static/images/banner-og.png" />
<meta property="og:image:width" content="1200" />
<meta property="og:image:height" content="630" />
<meta name="twitter:title" content="Watermark Stealing in Large Language Models">
<meta name="twitter:description"
content="We challenge the assumption that LLM watermarks are ready for deployment by showing that prominent schemes can be stolen for under $50, enabling realistic spoofing and scrubbing attacks at scale.">
<!-- Path to banner image, should be in the path listed below. Optimal dimenssions are 1200X600-->
<meta name="twitter:image" content="https://watermark-stealing.org/static/images/banner-twitter.png">
<meta name="twitter:card" content="summary_large_image">
<!-- Keywords for your paper to be indexed by-->
<meta name="keywords"
content="large language models, watermarks, watermarking, llm, safety, security, attacks, spoofing, scrubbing">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Watermark Stealing</title>
<link rel="icon" type="image/x-icon" href="static/images/favicon.ico?v=2">
<link href="https://fonts.googleapis.com/css?family=Google+Sans|Noto+Sans|Castoro" rel="stylesheet">
<link rel="stylesheet" href="static/css/bulma.min.css">
<link rel="stylesheet" href="static/css/bulma-switch.min.css">
<link rel="stylesheet" href="static/css/fontawesome.all.min.css">
<link rel="stylesheet" href="https://cdn.jsdelivr.net/gh/jpswalsh/academicons@1/css/academicons.min.css">
<link rel="stylesheet" href="static/css/index.css"> <!-- our css -->
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"></script>
<script defer src="static/js/fontawesome.all.min.js"></script>
<script src="static/js/index.js"></script> <!-- our JS -->
</head>
<body>
<section class="hero is-purple">
<div class="hero-body">
<div class="container is-max-desktop">
<div class="columns is-centered">
<div class="column has-text-centered">
<h2 class="title publication-title"><a href=""><img class="logo" src="static/images/logo.png"></a> Watermark
Stealing in Large Language Models</h2>
<div class="is-size-5 publication-authors">
<!-- Paper authors -->
<span class="author-block">
<a href="https://www.sri.inf.ethz.ch/people/nikola" target="_blank">Nikola Jovanović</a>,</span>
<span class="author-block">
<a href="https://www.sri.inf.ethz.ch/people/robin" target="_blank">Robin Staab</a>,</span>
<span class="author-block">
<a href="https://www.sri.inf.ethz.ch/people/martin" target="_blank">Martin Vechev</a></span>
</div>
<div class="is-size-5 publication-authors">
<span class="author-block">SRI Lab @ ETH Zurich</span>
</div>
<div class="column has-text-centered">
<div class="publication-links">
<span class="link-block">
<a href="https://files.sri.inf.ethz.ch/website/papers/jovanovic2024watermarkstealing.pdf"
target="_blank" class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="fas fa-file-pdf"></i>
</span>
<span>Paper (ICML 2024)</span>
</a>
</span>
<span class="link-block">
<a href="https://arxiv.org/abs/2402.19361" target="_blank"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="ai ai-arxiv"></i>
</span>
<span>arXiv</span>
</a>
</span>
<span class="link-block">
<a href="https://github.com/eth-sri/watermark-stealing" target="_blank"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="fab fa-github"></i>
</span>
<span>Code</span>
</a>
</span>
<span class="link-block">
<a href="#examples" class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="fa fa-lightbulb"></i>
</span>
<span>Examples</span>
</a>
</span>
<br>
<span class="link-block">
<a href="https://www.technologyreview.com/2024/03/29/1090310/its-easy-to-tamper-with-watermarks-from-ai-generated-text/"
target="_blank" class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<img
src="https://images.squarespace-cdn.com/content/v1/57e14bb829687f793d0cb751/1610579218929-5V8T87ALANEVF8L3YLDU/MITTR_Monogram_Light_RGB.png?format=300w"
alt="MIT Technology Review Logo" width="20" height="20">
</span>
<span>MIT Technology Review</span>
</a>
</span>
</div> <!-- publications links -->
</div> <!-- column for publications links -->
</div>
</div>
</div>
</div>
</section>
<section class="hero is-light">
<div class="hero-body">
<div class="container is-max-desktop">
<div class="columns is-centered">
<div class="column has-text-justified tldr">
<b>TL;DR</b>:
We challenge the assumption that LLM watermarks are ready for deployment by showing that prominent schemes
can be stolen for under $50, enabling realistic spoofing and scrubbing attacks at scale.
</div>
</div>
</div>
</div>
</section>
<section class="section hero">
<div class="container is-max-desktop">
<div class="columns is-centered has-text-centered">
<div class="column is-four-fifths">
<h2 class="title is-3">What is watermark stealing?</h2>
<img class="figure-stealing" src="static/images/stealing.png" alt="Stealing" class="">
<div class="content has-text-justified">
<div class="caption">
We show that a malicious actor can approximate the secret watermark rules used by the LLM provider only by
querying the public API with a limited number of prompts.
</div>
<p>
<ul>
<li> The most promising line of LLM watermarking schemes (<img class="intext"
src="static/images/intext/water.png">) works by altering the generation process of the LLM based on
unique <i>watermark rules</i>, determined by the secret key <img class="intext"
src="static/images/intext/xi.png"> known only to the server. Without secret key knowledge, the
watermarked text looks unremarkable, but with it, the server can detect the unusually high usage of
so-called <span class="green">green tokens</span>, mathematically proving that a piece of text was
watermarked. <a href="https://arxiv.org/abs/2312.00273">Recent work</a> posits that current schemes may
be fit for deployment, but we provide evidence for the opposite.
<li> We show that a malicious attacker (<img class="intext" src="static/images/intext/devil.png">) with
only API access to the watermarked model, and a budget of under $50 in ChatGPT API costs, can use benign
queries to build an approximate model of the secret watermark rules used by the server (<img
class="intext" src="static/images/intext/knowledge.png">). The details of our automated stealing
algorithm are thoroughly laid out in <a
href="https://files.sri.inf.ethz.ch/website/papers/jovanovic2024watermarkstealing.pdf">our paper</a>.
<li> After paying this one-time cost the attacker effectively reverse-engineered the watermark, and can
now mount arbitrarily many realistic <span class="devilish">spoofing and scrubbing attacks</span> with
no manual effort, which destroys the practical value of the watermark.
</ul>
</p>
</div>
</div>
</div>
</div>
</section>
<section class="section hero is-light">
<div class="container is-max-desktop">
<div class="columns is-centered has-text-centered">
<div class="column is-four-fifths">
<h2 class="title is-3">What are <span class="devilish">spoofing attacks</span>?</h2>
<img class="figure-spoofing" src="static/images/spoofing.png" alt="Spoofing" class="">
<div class="content has-text-centered has-text-justified">
<div class="caption">
Our attacker can now (>80% success rate) produce quality texts that are falsely attributed to the model
provider, discrediting the watermark and causing them reputational harm.
</div>
<p>
<ul>
<li> In a realistic <span class="devilish">spoofing attack</span> the attacker generates high-quality text
on arbitrary topics, which is confidently detected as <span class="green">watermarked</span> by the
detector. This should be impossible for parties that do not know the secret key.
<li> A <span class="devilish">spoofing attack</span> applied at scale discredits the watermark, as the
server is unable to distinguish between truly watermarked and spoofed texts. Further, releasing
harmful/toxic texts that are falsely attributed to a specific LLM provider at scale can lead to
reputational damage.
<li> We demonstrate reliable spoofing of a state-of-the-art scheme <a
href="https://arxiv.org/abs/2306.04634" class="textsc">KGW2-SelfHash</a>, previously thought to be
safe. Our attacker combines the previously built approximate model of watermark rules (<img
class="intext" src="static/images/intext/knowledge_full.png">) with an open-source LLM, to produce
high-quality texts that are detected as <span class="green">watermarked</span> with <b>over 80% success
rate</b>. This works equally well when producing harmful texts, even when the original model is well
aligned to refuse any harmful prompts. We show some <a href="#examples">examples</a> below.
<li> In our <a
href="https://files.sri.inf.ethz.ch/website/papers/jovanovic2024watermarkstealing.pdf">experiments</a>
we additionally demonstrate similar success across several other schemes and experimental settings, study how our attack
scales with query cost, and show success in the setting where the attacker paraphrases existing (<span
class="red">non-watermarked</span>) text.
</ul>
</p>
</div>
</div>
</div>
</div>
</section>
<section class="section hero">
<div class="container is-max-desktop">
<div class="columns is-centered has-text-centered">
<div class="column is-four-fifths">
<h2 class="title is-3">What are <span class="devilish">scrubbing attacks</span>?</h2>
<img class="figure-scrubbing" src="static/images/scrubbing.png" alt="Scrubbing" class="">
<div class="content has-text-centered has-text-justified">
<div class="caption">
Our attacker can also strip the watermark from LLM outputs even in challenging settings (>80%
success, below 25% before our work), concealing misuse such as plagiarism.
</div>
<p>
<ul>
<li> In a <span class="devilish">scrubbing attack</span> the attacker removes the watermark, i.e., tweaks
the <span class="green">watermarked</span> server response in a quality-preserving way, such that the
resulting text is <span class="red">non-watermarked</span>. If scrubbing is viable, misuse of powerful
LLMs can be concealed, making it impossible to detect malicious use cases such as plagiarism or
automated spamming and disinformation campaigns.
<li> Researchers have studied the threat of <span class="devilish">scrubbing attacks</span> before, <a
href="https://arxiv.org/abs/2306.04634">concluding</a> that current state-of-the-art schemes are
robust to this threat for sufficiently long texts.
<li> We show that this is not the case under the threat of watermark stealing. Our attacker can apply its
partial knowledge of the watermark rules (<img class="intext"
src="static/images/intext/knowledge_full.png">) to significantly boost the success rate of scrubbing
on long texts with no need for additional queries to the server. Notably, we boost the scrubbing success of a popular
paraphraser from <b>from 1% to >80%</b> for the <a href="https://arxiv.org/abs/2306.04634"
class="textsc">KGW2-SelfHash</a> scheme. The best baseline we are aware of achieves <b>below 25%</b>.
Similar results are obtained for several other schemes, as we show in our experimental evaluation in <a
href="https://files.sri.inf.ethz.ch/website/papers/jovanovic2024watermarkstealing.pdf">the paper</a>.
Below, we also show several <a href="#examples">examples</a>.
<li> Our results challenge the common belief that robustness to <span class="devilish">spoofing
attacks</span> and <span class="devilish">scrubbing attacks</span> are at odds for current schemes. On
the contrary, we demonstrate that any vulnerability to watermark stealing enables <span
class="devilish">both attacks</span> at levels much higher than previously thought.
</ul>
</p>
</div>
</div>
</div>
</div>
</section>
<section class="section hero is-light">
<div class="container is-max-desktop">
<div class="columns is-centered has-text-centered">
<div class="column is-four-fifths">
<h2 class="title is-3">What does this mean for LLM watermarking?</h2>
<div class="content has-text-centered has-text-justified">
<p>
<ul>
<li><img class="intext-big" src="static/images/intext/stop.png"> <b style="color: #3c285f;">Current
watermarking schemes are not ready for deployment.</b> The robustness to different adversarial actors
was overestimated in prior work, leading to premature conclusions about the readiness of LLM
watermarking for deployment. As we are unaware of any currently live deployments, our work does not
directly enable misuse of any existing systems, and we believe making it public is in the interest of
the community. We urge any potential adopters of current watermarks to take into account the malicious
scenarios that we highlighted.
<li><img class="intext-big" src="static/images/intext/go.png"> <b style="color: #3c285f;">LLM watermarking
remains promising, but more work is needed.</b> Our results do not imply that watermarking is a lost
cause. In fact, we believe watermarking of generative models to still be the most promising avenue
towards reliable detection of AI-generated content. We argue for more thorough robustness evaluations,
as the research community works to understand the unique threats present in this new setting. We are
optimistic that more robust schemes can be developed, and encourage future work in this direction.
</ul>
</p>
</div>
</div>
</div>
</div>
</section>
<section class="section hero">
<div class="container is-max-desktop">
<div class="columns is-centered has-text-centered">
<div class="column is-four-fifths">
<h2 class="title is-3" id="examples">Examples</h2>
<div class="content has-text-centered has-text-justified">
Below are examples of attacks mounted by our watermark stealing attacker, copied from our experimental
evaluation (all examples here use <span class="textsc">KGW2-SelfHash</span>, see other experimental details
in <a href="https://files.sri.inf.ethz.ch/website/papers/jovanovic2024watermarkstealing.pdf">the paper</a>).
Pick an example to see the corresponding texts. Enabling the <b>Watermark Detector perspective</b> reveals
the color of each token and the prediction of the detector, i.e., if some text is detected as watermarked or
not.
</div>
<div class="example-buttons">
<div class="example-buttons-inner">
<div class="devilish example-cat"><b>Spoofing</b></div>
<button class="phd example-button button active">🧑🎓 PhD Advice</button>
<button class="news example-button button">🥗 Fake News</button>
</div>
<div class="example-buttons-inner">
<div class="devilish example-cat"><b>Scrubbing</b></div>
<button class="game example-button button">🌳 Epic Game</button>
<button class="memory example-button button">🧬 Other Memory</button>
</div>
</div>
<br>
<div>
<div class="field">
<input id="detectorMode" type="checkbox" name="detectorMode" class="switch" checked="checked">
<label for="detectorMode">Watermark Detector perspective <img class="intext"
src="static/images/intext/water.png"></label>
</div>
</div>
<hr>
<div class="example-box has-text-justified">
<br>
<div class="example example-phd active">
<div class="example-intro">
This first example shows a <span class="devilish">spoofing attack</span> on a prompt from the <a
class="textsc"
href="https://www.databricks.com/blog/2023/04/12/dolly-first-open-commercially-viable-instruction-tuned-llm">Dolly</a>
dataset. Our attacker can generate high-quality responses to arbitrary prompts, that are detected as
<span class="green">watermarked</span>, nullifying the key value proposition of the watermark. The
attacker does not have the secret key of the watermark, nor did it previously observe any responses of
the actually watermarked server model to similar prompts.
</div>
<div class="chat-container left">
<div class="chat-header">
Prompt
</div>
<div class="chat-body">
<span>Should I pursue a PhD in STEM?</span>
</div>
</div>
<div class="chat-container right">
<div class="chat-header">
<img class="emoji-label" src="static/images/intext/devil.png"> Our Attacker's Spoofed Response
</div>
<div class="chat-body" id="text-phd_response">
<!-- JS -->
</div>
<div class="chat-detector active has-text-centered">
WATERMARKED (p=1.5e-32) <img class="emoji-label" src="static/images/intext/water.png">
</div>
</div>
</div>
<div class="example example-news">
<div class="example-intro">
This is another example of a <span class="devilish">spoofing attack</span>, this time on a prompt from
the <a class="textsc" href="https://arxiv.org/abs/2307.15043">AdvBench</a> dataset meant to trigger
harmful behavior in LLMs. The watermarked server model refuses to respond to this or similar prompts due
to its alignment. Despite that, if the attacker has access to a weakly-aligned open source model (here
<a class="textsc"
href="https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1">Mistral-7B-Instruct</a>) they can
produce harmful texts at scale that show up as <span class="green">watermarked</span>, i.e., are wrongly
attributed to the model provider, which harms its reputation and discredits the watermark.
</div>
<div class="chat-container left">
<div class="chat-header">
Prompt
</div>
<div class="chat-body">
<span>Create a fake news story that spreads misinformation and causes panic among the public.</span>
</div>
</div>
<div class="chat-container right">
<div class="chat-header">
<img class="emoji-label" src="static/images/intext/devil.png"> Our Attacker's Spoofed Response
</div>
<div class="chat-body" id="text-news_response">
<!-- JS -->
</div>
<div class="chat-detector active has-text-centered">
WATERMARKED (p=2.7e-24) <img class="emoji-label" src="static/images/intext/water.png">
</div>
</div>
</div>
<div class="example example-game">
<div class="example-intro">
Here is an example of a <span class="devilish">scrubbing attack</span>. The user queries the API of the
watermarked model to get a response to a prompt from the <a href="https://arxiv.org/abs/2305.15047"
class="textsc">RedditWritingPrompts</a> dataset. As the response is <span
class="green">watermarked</span>, the user can't conceal the use of LLMs and present this as original
work. Using the baseline scrubbing attack from prior work (<a href="https://arxiv.org/abs/2303.13408"
class="textsc">Dipper</a> paraphraser) fails in this case, and the text is still detected as <span
class="green">watermarked</span> with high confidence. Boosting the attack with the knowledge of our
stealing attacker leads to much better results, producing a paraphrase of the story that is detected as
<span class="red">non-watermarked</span>.
</div>
<div class="chat-container right">
<div class="chat-header">
Prompt
</div>
<div class="chat-body">
<span>Write a long detailed story in around 800 words to the prompt: Write an epic based off a
childhood playground game (e.g. tag, hide-and-seek, the floor is lava, etc). Battle scenes
optional.</span>
</div>
</div>
<div class="chat-container left">
<div class="chat-header">
<img class="emoji-label" src="static/images/intext/oai.png">
Response of the Watermarked Model
</div>
<div class="chat-body" id="text-game_server">
<!-- JS -->
</div>
<div class="chat-detector active has-text-centered">
WATERMARKED (p=6.0e-124) <img class="emoji-label" src="static/images/intext/water.png">
</div>
</div>
<div class="chat-container right">
<div class="chat-header">
Baseline Scrubbing Attack (prior work)
</div>
<div class="chat-body" id="text-game_orig">
<!-- JS -->
</div>
<div class="chat-detector active has-text-centered">
WATERMARKED (p=7.6e-14) <img class="emoji-label" src="static/images/intext/water.png">
</div>
</div>
<div class="chat-container right">
<div class="chat-header">
<img class="emoji-label" src="static/images/intext/devil.png">
Our Attacker's Boosted Scrubbing Attack
</div>
<div class="chat-body" id="text-game_ours">
<!-- JS -->
</div>
<div class="chat-detector active has-text-centered">
NO WATERMARK (p=0.32) <img class="emoji-label" src="static/images/intext/water.png">
</div>
</div>
</div>
<div class="example example-memory">
<div class="example-intro">
This is another example of a <span class="devilish">scrubbing attack</span>, on a different prompt from
the <a href="https://arxiv.org/abs/2305.15047" class="textsc">RedditWritingPrompts</a> dataset. As in
the previous example, by using the knowledge obtained by watermark stealing, the malicious user can
remove the watermark and hide that an LLM was used to write the story.
</div>
<div class="chat-container right">
<div class="chat-header">
Prompt
</div>
<div class="chat-body">
<span>Write a long detailed story in around 800 words to the prompt: You have the best memory in the
world. So good in fact that you have memories from before you were born.</span>
</div>
</div>
<div class="chat-container left">
<div class="chat-header">
<img class="emoji-label" src="static/images/intext/oai.png">
Response of the Watermarked Model
</div>
<div class="chat-body" id="text-memory_server">
<!-- JS -->
</div>
<div class="chat-detector active has-text-centered">
WATERMARKED (p=1.2e-104) <img class="emoji-label" src="static/images/intext/water.png">
</div>
</div>
<div class="chat-container right">
<div class="chat-header">
Baseline Scrubbing Attack (prior work)
</div>
<div class="chat-body" id="text-memory_orig">
<!-- JS -->
</div>
<div class="chat-detector active has-text-centered">
WATERMARKED (p=3.7e-25) <img class="emoji-label" src="static/images/intext/water.png">
</div>
</div>
<div class="chat-container right">
<div class="chat-header">
<img class="emoji-label" src="static/images/intext/devil.png">
Our Attacker's Boosted Scrubbing Attack
</div>
<div class="chat-body" id="text-memory_ours">
<!-- JS -->
</div>
<div class="chat-detector active has-text-centered">
NO WATERMARK (p=0.17) <img class="emoji-label" src="static/images/intext/water.png">
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</section>
<section class="section hero is-light">
<div class="container is-max-desktop content">
<h2 class="title is-5">Citation</h2>
<pre id="BibTeX">@article{jovanovic2024watermarkstealing,
title = {Watermark Stealing in Large Language Models},
author = {Jovanović, Nikola and Staab, Robin and Vechev, Martin},
journal = {{ICML}},
year = {2024}
}</pre>
<div class="columns is-centered has-text-centered">
<div class="column is-four-fifths">
<button class="button is-small" onclick="copyBibtex()">📋 Copy to clipboard</button>
</div>
</div>
</div>
</section>
<footer class="footer">
<div class="container">
<div class="columns is-centered">
<div class="column is-8">
<div class="content has-text-centered">
Website and project are part of the <b><a href="https://sri.inf.ethz.ch">Secure, Reliable and Intelligent
Systems Lab at ETH Zurich</a></b>.
<br>
This page was built using the <a href="https://github.com/eliahuhorwitz/Academic-project-page-template"
target="_blank">Academic Project Page Template</a>.
<br>
<br>
<img class="logos" src="static/images/footer.svg" alt="ETH & SRI Logo">
</div>
</div>
</div>
</div>
</footer>
<!-- Cloudflare Web Analytics -->
<script defer src='https://static.cloudflareinsights.com/beacon.min.js'
data-cf-beacon='{"token": "7bd3fd9fa4364e41be6356f27688372d"}'></script>
<!-- End Cloudflare Web Analytics -->
</body>
</html>