@misc{bill2025focus,
title={Optimal Control Meets Flow Matching: A Principled Route to Multi-Subject Fidelity},
author={Eric Tillmann Bill and Enis Simsar and Thomas Hofmann},
year={2025},
eprint={2510.02315},
archivePrefix={arXiv},
primaryClass={cs.CV},
url={https://arxiv.org/abs/2510.02315},
}
Exploring Magnitude Preservation and Rotation Modulation in Diffusion Transformers
Eric Tillmann Bill, Cristian Perez Jensen, Sotiris Anagnostidis, Dimitri von Rütte
NeurIPS 2025, Optimization for Machine Learning Workshop
@inproceedings{
bill2025jedi,
title={{JEDI}: The Force of Jensen-Shannon Divergence in Disentangling Diffusion Models},
author={Eric Tillmann Bill and Enis Simsar and Thomas Hofmann},
booktitle={Second Workshop on Test-Time Adaptation: Putting Updates to the Test! at ICML 2025},
year={2025},
url={https://openreview.net/forum?id=HVQ3wL2jPI}
}
2023
Performance Analysis of Different Reward Functions in Reinforcement Learning for the Scheduling of Modular Automotive Production Systems
Jan Markus Gelfgren, Eric Tillmann Bill, Tim Luther, Simon Hagemann, Sigrid Wenzel
Conference on Intelligent Computation in Manufacturing Engineering
@article{GELFGREN202481,
title = {Performance Analysis of Different Reward Functions in Reinforcement Learning for the Scheduling of Modular Automotive Production Systems},
journal = {Procedia CIRP},
volume = {126},
pages = {81-86},
year = {2024},
note = {17th CIRP Conference on Intelligent Computation in Manufacturing Engineering (CIRP ICME ‘23)},
issn = {2212-8271},
doi = {https://doi.org/10.1016/j.procir.2024.08.299},
url = {https://www.sciencedirect.com/science/article/pii/S2212827124008709},
author = {Jan Markus Gelfgren and Eric Tillmann Bill and Tim Luther and Simon Hagemann and Sigrid Wenzel},
keywords = {Reinforcement Learning, Reward Functions, Production Scheduling, Modular Production},
abstract = {Conventional, linear production lines struggle with the new flexibility requirements of the automotive market. Modular production has the potential to radically improve the production flexibility. However, scheduling modular production systems is still an open research question. Reinforcement learning (RL) is a form of artificial intelligence that shows great potential in scheduling complex modular production systems. Nonetheless, the performance of RL agents heavily depends on their reward function. Designing an optimal reward function is highly complex. This paper addresses this research gap and systematically compares six different reward functions for a variety of different modular production scenarios. In addition, a new learning method using resets is proposed and its performance is compared with the standard learning approach. The results suggest that dense reward functions perform better than a sparse one. However, there are major case-to-case discrepancies. The proposed learning method outperforms the standard learning method by 7 % on average. Nonetheless, the performance difference between different scenarios is larger than with the standard approach.}
}
A Theoretical and Empirical Investigation into the Equivalence of Graph Neural Networks and the Weisfeiler-Leman Algorithm