Skip to content

Commit 6bd66c0

Browse files
committed
more pictures & news
2 parents bbfe9fe + a506df1 commit 6bd66c0

33 files changed

+138
-45
lines changed

_bibliography/preprints.bib

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ @misc{jacobs2025hamhyperbolicstepregulate
88
archivePrefix={arXiv},
99
primaryClass={cs.LG},
1010
url={https://arxiv.org/abs/2506.02630},
11+
img={ham-hyperbolic-step.png},
1112
}
1213
@misc{reddy2025shifthappensconfounding,
1314
title={When Shift Happens - Confounding Is to Blame},
@@ -17,6 +18,7 @@ @misc{reddy2025shifthappensconfounding
1718
archivePrefix={arXiv},
1819
primaryClass={cs.LG},
1920
url={https://arxiv.org/abs/2505.21422},
21+
img={when-shift-happens.png},
2022
}
2123
@misc{gadhikar2024cyclicsparsetrainingenough,
2224
title={Cyclic Sparse Training: Is it Enough?},
@@ -27,6 +29,7 @@ @misc{gadhikar2024cyclicsparsetrainingenough
2729
primaryClass={cs.LG},
2830
url={https://arxiv.org/abs/2406.02773},
2931
code={https://github.com/RelationalML/TurboPrune},
32+
img={cyclic-train.png},
3033
}
3134
@misc{fischer2022lotteryticketsnonzerobiases,
3235
title={Lottery Tickets with Nonzero Biases},

_bibliography/references.bib

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ @inproceedings{ zhou2025payattentionsmallweights
88
url={https://openreview.net/forum?id=XKnOA7MhCz},
99
pdf={https://openreview.net/pdf?id=XKnOA7MhCz},
1010
abstract={Finetuning large pretrained neural networks is known to be resource-intensive, both in terms of memory and computational cost. To mitigate this, a common approach is to restrict training to a subset of the model parameters. By analyzing the relationship between gradients and weights during finetuning, we observe a notable pattern: large gradients are often associated with small-magnitude weights. This correlation is more pronounced in finetuning settings than in training from scratch. Motivated by this observation, we propose NANOADAM, which dynamically updates only the small-magnitude weights during finetuning and offers several practical advantages: first, this criterion is gradient-free -- the parameter subset can be determined without gradient computation; second, it preserves large-magnitude weights, which are likely to encode critical features learned during pretraining, thereby reducing the risk of catastrophic forgetting; thirdly, it permits the use of larger learning rates and consistently leads to better generalization performance in experiments. We demonstrate this for both NLP and vision tasks.},
11+
img={smallweights.png}
1112
}
1213

1314
@inproceedings{ Gadhikar2025SignInTT,
@@ -29,6 +30,7 @@ @inproceedings{ pham2025the
2930
url={https://openreview.net/forum?id=EEZLBhyer1},
3031
pdf={https://openreview.net/pdf?id=EEZLBhyer1},
3132
abstract={Sparse neural networks promise efficiency, yet training them effectively remains a fundamental challenge. Despite advances in pruning methods that create sparse architectures, understanding why some sparse structures are better trainable than others with the same level of sparsity remains poorly understood. Aiming to develop a systematic approach to this fundamental problem, we propose a novel theoretical framework based on the theory of graph limits, particularly graphons, that characterizes sparse neural networks in the infinite-width regime. Our key insight is that connectivity patterns of sparse neural networks induced by pruning methods converge to specific graphons as networks' width tends to infinity, which encodes implicit structural biases of different pruning methods. We postulate the Graphon Limit Hypothesis and provide empirical evidence to support it. Leveraging this graphon representation, we derive a Graphon Neural Tangent Kernel (Graphon NTK) to study the training dynamics of sparse networks in the infinite width limit. Graphon NTK provides a general framework for the theoretical analysis of sparse networks. We empirically show that the spectral analysis of Graphon NTK correlates with observed training dynamics of sparse networks, explaining the varying convergence behaviours of different pruning methods. Our framework provides theoretical insights into the impact of connectivity patterns on the trainability of various sparse network architectures.},
33+
img={graphons.png}
3234
}
3335

3436
@inproceedings{jacobs2025mirror,
@@ -62,7 +64,7 @@ @inproceedings{
6264
year={2025},
6365
url={https://openreview.net/forum?id=g6v09VxgFw},
6466
pdf={https://openreview.net/pdf?id=g6v09VxgFw},
65-
img={gnns-getting-comfy.png},
67+
img={small-comfy.png},
6668
abstract={Maximizing the spectral gap through graph rewiring has been proposed to enhance the performance of message-passing graph neural networks (GNNs) by addressing over-squashing. However, as we show, minimizing the spectral gap can also improve generalization. To explain this, we analyze how rewiring can benefit GNNs within the context of stochastic block models. Since spectral gap optimization primarily influences community strength, it improves performance when the community structure aligns with node labels. Building on this insight, we propose three distinct rewiring strategies that explicitly target community structure, node labels, and their alignment: (a) community structure-based rewiring (ComMa), a more computationally efficient alternative to spectral gap optimization that achieves similar goals; (b) feature similarity-based rewiring (FeaSt), which focuses on maximizing global homophily; and (c) a hybrid approach (ComFy), which enhances local feature similarity while preserving community structure to optimize label-community alignment. Extensive experiments confirm the effectiveness of these strategies and support our theoretical insights.},
6769
code={https://github.com/RelationalML/ComFy}
6870
}
@@ -75,7 +77,7 @@ @inproceedings{
7577
year={2024},
7678
url={https://openreview.net/forum?id=EMkrwJY2de},
7779
pdf={https://openreview.net/pdf?id=EMkrwJY2de},
78-
img={spectral-graph-pruning.png},
80+
img={small-spectral.png},
7981
abstract={Message Passing Graph Neural Networks are known to suffer from two problems that are sometimes believed to be diametrically opposed: over-squashing and over-smoothing. The former results from topological bottlenecks that hamper the information flow from distant nodes and are mitigated by spectral gap maximization, primarily, by means of edge additions. However, such additions often promote over-smoothing that renders nodes of different classes less distinguishable. Inspired by the Braess phenomenon, we argue that deleting edges can address over-squashing and over-smoothing simultaneously. This insight explains how edge deletions can improve generalization, thus connecting spectral gap optimization to a seemingly disconnected objective of reducing computational resources by pruning graphs for lottery tickets. To this end, we propose a computationally effective spectral gap optimization framework to add or delete edges and demonstrate its effectiveness on the long range graph benchmark and on larger heterophilous datasets.},
8082
code={https://github.com/RelationalML/SpectralPruningBraess}
8183
}

_data/alumni_members.yml

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
start_date: Aug 23
2323
end_date: Jul 24
2424
url: https://nelaturuharsha.github.io/
25-
next: PhD candidate at Zuse Institute Berlin
25+
next: Applied Scientist Intern at Amazon
2626

2727
- name: Advait Athreya
2828
last_name: Athreya
@@ -61,8 +61,14 @@
6161
start_date: Jun 24
6262
end_date: Aug 24
6363

64-
- role: Master's thesis
64+
- role: Past Master's theses
6565
members:
66+
- name: Sree Harsha Nelaturu
67+
thesis: "Bridging the Gap: Improving Random Sparse Masks with Distillation-Guided Optimization"
68+
date_thesis: 2025
69+
- name: Nirav Shenoy
70+
thesis: "Efficient Sparse Training: Combining Continuous Sparsification with LRR and Rewiring"
71+
date_thesis: 2025
6672
- name: Adarsh Jamadandi
6773
thesis: On the Importance of Graph-Task Alignment for Graph Neural Networks
6874
url_thesis: https://adarshmj.github.io/assets/Graph-TaskAlignment.pdf

_data/news.yml

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,12 @@
11
- date: 10. November 2025
22
headline: "Celia and Tom are presenting at the Workshop on Geometry, Topology, and Machine Learning ([GTML](https://www.mis.mpg.de/events/series/workshop-on-geometry-topology-and-machine-learning-gtml-2025)) in Leipzig."
33

4+
- date: 8. October 2025
5+
headline: "Baraah is presenting her work on game-aware optimization at the [Shocklab](https://shocklab.net/seminars/) online seminar."
6+
7+
- date: 6. October 2025
8+
headline: "Welcome to Adnan (University of Calgary) and Levy (University of Campinas) for research stays. Furthermore, Prof. [Yani Ioannou](https://yani.ai/) (University of Calgary) is visiting us for a talk on sparse training."
9+
410
- date: 18. September 2025
511
headline: "Three papers
612
[(1)](https://openreview.net/forum?id=XKnOA7MhCz)
@@ -12,7 +18,7 @@
1218
headline: "Rebekka and Celia are presenting at the Workshop on Mining and Learning with Graphs ([MLG](https://mlg-europe.github.io/2025/)) in Porto with a keynote and two posters, respectively."
1319

1420
- date: 14. August 2025
15-
headline: "Tom is [presenting](https://cohere.com/events/Cohere-Labs-Tom-Jacobs-2025) his work on implicit regularization at Cohere Labs: Open Science Community ([video](/outreach#tom-jacobs--cohere-labs-aug-14-2025))."
21+
headline: "Tom is presenting his work on implicit regularization at [Cohere Labs](https://cohere.com/events/Cohere-Labs-Tom-Jacobs-2025): Open Science Community ([video](/outreach#tom-jacobs--cohere-labs-aug-14-2025))."
1622

1723
- date: 12. June 2025
1824
headline: "Tom is attending the AI & Mathematics Workshop ([AIM](https://aimath.nl/index.php/2025/03/13/4th-aim-cluster-event-tilburg/)) at Tilburg University."
@@ -36,7 +42,7 @@
3642
headline: "Rebekka is at [CPAL](https://cpal.cc/spotlight_track/) presenting three [papers](/publications) as recent spotlights."
3743

3844
- date: 13. February 2025
39-
headline: "Celia is presenting her work on graph rewiring at Cohere Labs: Open Science Community ([video](/outreach#celia-rubio-madrigal--cohere-labs-feb-13-2025))."
45+
headline: "Celia is presenting her work on graph rewiring at [Cohere Labs](https://cohere.com/events/cohere-for-ai-Celia-Rubio-Madrigal-2025): Open Science Community ([video](/outreach#celia-rubio-madrigal--cohere-labs-feb-13-2025))."
4046

4147
- date: 22. January 2025
4248
headline: "Two papers

_data/outreach.yml

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -137,10 +137,17 @@ podcasts:
137137
height: 364
138138

139139
other_links:
140+
141+
- title: "Dialogforum beim Tag der Deutschen Einheit 2025"
142+
url: ["https://www.tag-der-deutschen-einheit.de/programm/paneldiskussion-mit-rebekka-burkholz", "https://www.linkedin.com/posts/cispa_tde25-apollonfestival-cybersicherheit-activity-7379617165827534848-hfja"]
143+
description: "„KI-Kompass für Krisenzeiten“ – Echte Hilfe oder Black-Box-Gefahr? Dr. Rebekka Burkholz, CISPA Helmholtz-Zentrum für Informationssicherheit im Gespräch mit Christian Seel, Landesbeauftragter für zivil-militärische Zusammenarbeit und Bevölkerungsschutz."
144+
date: "2025-10-02"
145+
140146
- title: "NetBioMed 2025 Keynote"
141-
url: "https://bsky.app/profile/netbiomed2025.bsky.social/post/3lqmzjkcks22l"
142-
description: "After coffee break we had Rebekka Burkholz discussing current challenges when modelling gene regulation and how to fix them. Her approach is innovative and allows us to infer biological processes with both scalability and interpretability."
147+
# url: "https://bsky.app/profile/netbiomed2025.bsky.social/post/3lqmzjkcks22l"
148+
# description: "After coffee break we had Rebekka Burkholz discussing current challenges when modelling gene regulation and how to fix them. Her approach is innovative and allows us to infer biological processes with both scalability and interpretability."
143149
date: "2025-06-02"
150+
embeds: '<blockquote class="bluesky-embed" data-bluesky-uri="at://did:plc:6azaynaddykjpnn6a2gj7rms/app.bsky.feed.post/3lqmzjkcks22l" data-bluesky-cid="bafyreid4sijbp7ftpkc4bam7e4mvxem4nyi7idji3vvuh2zc5mnexkzhpi" data-bluesky-embed-color-mode="system"><p lang="en">After coffee break we had Rebekka Burkholz discussing current challenges when modelling gene regulation and how to fix them. Her approach is innovative and allows us to infer biological processes with both scalability and interpretability. #NetBioMed2025 #NetSci2025<br><br><a href="https://bsky.app/profile/did:plc:6azaynaddykjpnn6a2gj7rms/post/3lqmzjkcks22l?ref_src=embed">[image or embed]</a></p>&mdash; NetBioMed 2025 (<a href="https://bsky.app/profile/did:plc:6azaynaddykjpnn6a2gj7rms?ref_src=embed">@netbiomed2025.bsky.social</a>) <a href="https://bsky.app/profile/did:plc:6azaynaddykjpnn6a2gj7rms/post/3lqmzjkcks22l?ref_src=embed">June 2, 2025 at 4:49 PM</a></blockquote>'
144151

145152
- title: "Die Abenteuer der KI in der Genomik"
146153
url: "https://www.mdc-berlin.de/de/news/news/die-abenteuer-der-ki-der-genomik"

_includes/header.html

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
<li><a href="{{ site.url }}{{ site.baseurl }}/">Home</a></li>
1717
<li><a href="{{ site.url }}{{ site.baseurl }}/team">Team</a></li>
1818
<li><a href="{{ site.url }}{{ site.baseurl }}/openings" style="font-weight: bold; color: #d9534f;">Openings</a></li>
19-
<li><a href="{{ site.url }}{{ site.baseurl }}/research">Research</a></li>
19+
<!-- <li><a href="{{ site.url }}{{ site.baseurl }}/research">Research</a></li> -->
2020
<li><a href="{{ site.url }}{{ site.baseurl }}/outreach">Outreach</a></li>
2121
<li><a href="{{ site.url }}{{ site.baseurl }}/publications">Publications</a></li>
2222
<li><a href="https://www.linkedin.com/company/relationalml/">

_pages/home.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,6 @@ permalink: /
1414
Welcome! We are the Relational ML research group.
1515
We are part of the [CISPA Helmholtz Center for Information Security](https://cispa.de) in Saarbrücken and St. Ingbert, Germany, and are grateful to [Saarland University (UdS)](https://www.uni-saarland.de) for granting us supervision rights.
1616

17-
Our research is supported by an [ERC starting grant](https://cispa.de/en/research/grants/sparse-ml) and Apple Research to improve the **efficiency of deep learning**. The aim is to design smaller-scale neural networks, which excel in noisy and potentially changing environments and require minimal sample sizes for learning. This is of particular interest in the sciences and application domains where data is scarce.
17+
Our research is supported by an [ERC starting grant](https://cispa.de/en/research/grants/sparse-ml) to improve the **efficiency of deep learning**. The aim is to design smaller-scale neural networks, which excel in noisy and potentially changing environments and require minimal sample sizes for learning. This is of particular interest in the sciences and application domains where data is scarce.
1818
We care deeply about solving real world problems in collaboration with domain experts. Of special interest to us are problems related to gene regulation and its alterations during cancer progression, drug design, and international food trade.
1919
From a methodological point of view, we combine robust algorithm design with complex network science to advance deep learning theory and efficiency in general and in various applications ranging from biomedicine to pharmacy, physics, and economics.

_pages/outreach.md

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,5 +92,22 @@ title="video player" frameborder="0" allow="accelerometer; clipboard-write; encr
9292
### {{ link.title }} ({{ link.date | date: "%b %-d, %Y" }})
9393
{{ link.description }}
9494

95-
Link: <a href="{{ link.url }}" target="_blank" rel="noopener">{{ link.url }}</a>
95+
{% if link.url %}
96+
Link: {% for url in link.url %}
97+
<a href="{{ url }}" target="_blank" rel="noopener">{{ url }}</a>
98+
{% if forloop.last == false %}<br>{% endif %}
99+
{% endfor %}
100+
{% endif %}
101+
102+
{% if link.images %}
103+
{% for image in link.images %}
104+
<img src="{{ image }}" alt="{{ link.title }}" referrerpolicy="no-referrer" style="max-width: 100%; height: 200px; margin-top: 10px;">
105+
{%- endfor %}
106+
{% endif %}
107+
108+
{% if link.embeds %}
109+
{% for embed in link.embeds %}
110+
{{ embed | markdownify }}
111+
{%- endfor %}
112+
{% endif %}
96113
{% endfor %}

_pages/team.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ permalink: /team/
8989
{% for member in group.members %}
9090
{% if member.url %}<a href="{{ member.url }}">{{ member.name }}</a>{% else %}{{ member.name }}{% endif -%}
9191
{%- if member.from %} ({{ member.from }}){%- endif -%}
92-
{%- if member.date_thesis -%}: <i>{% if member.url_thesis -%}<a href="{{ member.url_thesis }}">{{ member.thesis }}</a>{% else %}{{ member.thesis }}{% endif %}</i>, {{ member.date_thesis }}.{%- else -%}:
92+
{%- if member.thesis -%}: <i>{% if member.url_thesis -%}<a href="{{ member.url_thesis }}">{{ member.thesis }}</a>{% else %}{{ member.thesis }}{% endif %}</i>{%- if member.date_thesis -%}, {{ member.date_thesis }}{%- endif -%}.{%- else -%}:
9393
{{ member.start_date }}-{{ member.end_date }}{%- if member.next -%}. Next {%- endif -%}
9494
{% for next in member.next %} ⇢ {{ next }}{% endfor %}.{%- endif %}
9595
{% endfor %}

_site/404.html

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@
5050
<li><a href="/">Home</a></li>
5151
<li><a href="/team">Team</a></li>
5252
<li><a href="/openings" style="font-weight: bold; color: #d9534f;">Openings</a></li>
53-
<li><a href="/research">Research</a></li>
53+
<!-- <li><a href="/research">Research</a></li> -->
5454
<li><a href="/outreach">Outreach</a></li>
5555
<li><a href="/publications">Publications</a></li>
5656
<li><a href="https://www.linkedin.com/company/relationalml/">

0 commit comments

Comments
 (0)