![]() |
Maximilian Zorn, M.Sc. Lehrstuhl für Mobile und Verteilte Systeme Ludwig-Maximilians-Universität München, Institut für Informatik Oettingenstraße 67 Zoom Personal Meeting Room: Raum E105 Telefon: +49 89 / 2180-9259 (derzeit nicht besetzt) Fax: |
🔬 Research Interests
- (Quantum/Hybrid) Reinforcement Learning
- Quantum Circuit-Construction
- Self-Replication (Properties) in Neural Networks
- Cooperation in Multi-Agent Systems
🎓 Teaching (Assistance)
- Natural Computing: SoSe22, SoSe23
- Computational Intelligence: WiSe22/23, WiSe23/24
- TIMS SoSe23, WiSe23/24 & VTIMS SoSe23, WiSe23/24
- Autonome Systeme Praktikum (ASP): WiSe23/24
📚 Publications
2023
- T. Phan, F. Ritz, P. Altmann, M. Zorn, J. NNüßlein, M. Kölle, T. Gabor, and C. Linnhoff-Popien, „Attention-Based Recurrence for Multi-Agent Reinforcement Learning under Stochastic Partial Observability,“ in Proceedings of the 40th International Conference on Machine Learning (ICML), 2023.
[BibTeX] [Download PDF]@inproceedings{phanICML23, author = {Thomy Phan and Fabian Ritz and Philipp Altmann and Maximilian Zorn and Jonas NN{\"u}{\ss}lein and Michael K{\"o}lle and Thomas Gabor and Claudia Linnhoff-Popien}, title = {Attention-Based Recurrence for Multi-Agent Reinforcement Learning under Stochastic Partial Observability}, year = {2023}, publisher = {PMLR}, booktitle = {Proceedings of the 40th International Conference on Machine Learning (ICML)}, location = {Hawaii, USA}, url = {https://thomyphan.github.io/publication/2023-07-01-icml-phan}, eprint = {https://thomyphan.github.io/files/2023-icml-preprint.pdf}, }
- J. Stein, F. Chamanian, M. Zorn, J. Nüßlein, S. Zielinski, M. Kölle, and C. Linnhoff-Popien, „Evidence that PUBO outperforms QUBO when solving continuous optimization problems with the QAOA,“ arXiv preprint arXiv:2305.03390, 2023.
[BibTeX]@article{stein2023evidence, title={Evidence that PUBO outperforms QUBO when solving continuous optimization problems with the QAOA}, author={Stein, Jonas and Chamanian, Farbod and Zorn, Maximilian and N{\"u}{\ss}lein, Jonas and Zielinski, Sebastian and K{\"o}lle, Michael and Linnhoff-Popien, Claudia}, journal={arXiv preprint arXiv:2305.03390}, year={2023}, }
- M. Kölle, S. Illium, M. Zorn, J. Nüßlein, P. Suchostawski, and C. Linnhoff-Popien, „Improving Primate Sounds Classification using Binary Presorting for Deep Learning.“ 2023.
[BibTeX]@inproceedings {koelle23primate, title = {Improving Primate Sounds Classification using Binary Presorting for Deep Learning}, author = {K{\"o}lle, Michael and Illium, Steffen and Zorn, Maximilian and N{\"u}{\ss}lein, Jonas and Suchostawski, Patrick and Linnhoff-Popien, Claudia}, year = {2023}, organization = {Int. Conference on Deep Learning Theory and Application - DeLTA 2023}, publisher = {Springer CCIS Series}, }
- M. Zorn, S. Illium, T. Phan, T. K. Kaiser, C. Linnhoff-Popien, and T. Gabor, „Social Neural Network Soups with Surprise Minimization.“ 2023.
[BibTeX]@inproceedings{zorn23surprise, author = {Zorn, Maximilian and Illium, Steffen and Phan, Thomy and Kaiser, Tanja Katharina and Linnhoff-Popien, Claudia and Gabor, Thomas}, title = {Social Neural Network Soups with Surprise Minimization}, year = {2023}, publisher = {MIT Press Direct}, organization={Conference on Artificial Life - Alife 2023}, }
2022
- S. Illium, G. Griffin, M. Kölle, M. Zorn, J. Nüßlein, and C. Linnhoff-Popien, VoronoiPatches: Evaluating A New Data Augmentation MethodarXiv, 2022. doi:10.48550/ARXIV.2212.10054
[BibTeX] [Download PDF]@misc{https://doi.org/10.48550/arxiv.2212.10054, doi = {10.48550/ARXIV.2212.10054}, url = {https://arxiv.org/abs/2212.10054}, author = {Illium, Steffen and Griffin, Gretchen and Kölle, Michael and Zorn, Maximilian and Nüßlein, Jonas and Linnhoff-Popien, Claudia}, keywords = {Computer Vision and Pattern Recognition (cs.CV), Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {VoronoiPatches: Evaluating A New Data Augmentation Method}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} }
- S. Illium, M. Zorn, C. Lenta, M. Kölle, C. Linnhoff-Popien, and T. Gabor, Constructing Organism Networks from Collaborative Self-ReplicatorsarXiv, 2022. doi:10.48550/ARXIV.2212.10078
[BibTeX] [Download PDF]@misc{https://doi.org/10.48550/arxiv.2212.10078, doi = {10.48550/ARXIV.2212.10078}, url = {https://arxiv.org/abs/2212.10078}, author = {Illium, Steffen and Zorn, Maximilian and Lenta, Cristian and Kölle, Michael and Linnhoff-Popien, Claudia and Gabor, Thomas}, keywords = {Neural and Evolutionary Computing (cs.NE), Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Constructing Organism Networks from Collaborative Self-Replicators}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} }
- T. Gabor, M. Zorn, and C. Linnhoff-Popien, „The Applicability of Reinforcement Learning for the Automatic Generation of State Preparation Circuits,“ in Proceedings of the Genetic and Evolutionary Computation Conference Companion, New York, NY, USA, 2022, p. 2196–2204. doi:10.1145/3520304.3534039
[BibTeX] [Abstract] [Download PDF]
State preparation is currently the only means to provide input data for quantum algorithm, but finding the shortest possible sequence of gates to prepare a given state is not trivial. We approach this problem using reinforcement learning (RL), first on an agent that is trained to only prepare a single fixed quantum state. Despite the overhead of training a whole network to just produce one single data point, gradient-based backpropagation appears competitive to genetic algorithms in this scenario and single state preparation thus seems a worthwhile task. In a second case we then train a single network to prepare arbitrary quantum states to some degree of success, despite a complete lack of structure in the training data set. In both cases we find that training is severely improved by using QR decomposition to automatically map the agents‘ outputs to unitary operators to solve the problem of sparse rewards that usually makes this task challenging.
@inproceedings{10.1145/3520304.3534039, author = {Gabor, Thomas and Zorn, Maximilian and Linnhoff-Popien, Claudia}, title = {The Applicability of Reinforcement Learning for the Automatic Generation of State Preparation Circuits}, year = {2022}, isbn = {9781450392686}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, url = {https://doi.org/10.1145/3520304.3534039}, doi = {10.1145/3520304.3534039}, abstract = {State preparation is currently the only means to provide input data for quantum algorithm, but finding the shortest possible sequence of gates to prepare a given state is not trivial. We approach this problem using reinforcement learning (RL), first on an agent that is trained to only prepare a single fixed quantum state. Despite the overhead of training a whole network to just produce one single data point, gradient-based backpropagation appears competitive to genetic algorithms in this scenario and single state preparation thus seems a worthwhile task. In a second case we then train a single network to prepare arbitrary quantum states to some degree of success, despite a complete lack of structure in the training data set. In both cases we find that training is severely improved by using QR decomposition to automatically map the agents' outputs to unitary operators to solve the problem of sparse rewards that usually makes this task challenging.}, booktitle = {Proceedings of the Genetic and Evolutionary Computation Conference Companion}, pages = {2196–2204}, numpages = {9}, keywords = {state preparation, actor/critic, quantum computing, circuit design, neural network, reinforcement learning}, location = {Boston, Massachusetts}, series = {GECCO '22} }
- T. Gabor, S. Illium, M. Zorn, C. Lenta, A. Mattausch, L. Belzner, and C. Linnhoff-Popien, „Self-Replication in Neural Networks,“ Artificial Life, pp. 205-223, 2022. doi:10.1162/artl_a_00359
[BibTeX] [Abstract] [Download PDF]
{A key element of biological structures is self-replication. Neural networks are the prime structure used for the emergent construction of complex behavior in computers. We analyze how various network types lend themselves to self-replication. Backpropagation turns out to be the natural way to navigate the space of network weights and allows non-trivial self-replicators to arise naturally. We perform an in-depth analysis to show the self-replicators’ robustness to noise. We then introduce artificial chemistry environments consisting of several neural networks and examine their emergent behavior. In extension to this works previous version (Gabor et al., 2019), we provide an extensive analysis of the occurrence of fixpoint weight configurations within the weight space and an approximation of their respective attractor basins.}
@article{10.1162/artl_a_00359, author = {Gabor, Thomas and Illium, Steffen and Zorn, Maximilian and Lenta, Cristian and Mattausch, Andy and Belzner, Lenz and Linnhoff-Popien, Claudia}, title = {{Self-Replication in Neural Networks}}, journal = {Artificial Life}, pages = {205-223}, year = {2022}, month = {06}, abstract = {{A key element of biological structures is self-replication. Neural networks are the prime structure used for the emergent construction of complex behavior in computers. We analyze how various network types lend themselves to self-replication. Backpropagation turns out to be the natural way to navigate the space of network weights and allows non-trivial self-replicators to arise naturally. We perform an in-depth analysis to show the self-replicators’ robustness to noise. We then introduce artificial chemistry environments consisting of several neural networks and examine their emergent behavior. In extension to this works previous version (Gabor et al., 2019), we provide an extensive analysis of the occurrence of fixpoint weight configurations within the weight space and an approximation of their respective attractor basins.}}, issn = {1064-5462}, doi = {10.1162/artl_a_00359}, url = {https://doi.org/10.1162/artl\_a\_00359}, eprint = {https://direct.mit.edu/artl/article-pdf/doi/10.1162/artl\_a\_00359/2030914/artl\_a\_00359.pdf} }
2021
- T. Gabor, S. Illium, M. Zorn, and C. Linnhoff-Popien, Goals for Self-Replicating Neural Networks, 2021. doi:10.1162/isal_a_00439
[BibTeX] [Download PDF]@proceedings{10.1162/isal_a_00439, author = {Gabor, Thomas and Illium, Steffen and Zorn, Maximilian and Linnhoff-Popien, Claudia}, title = {{Goals for Self-Replicating Neural Networks}}, volume = {ALIFE 2021: The 2021 Conference on Artificial Life}, series = {ALIFE 2021: The 2021 Conference on Artificial Life}, year = {2021}, month = {07}, doi = {10.1162/isal_a_00439}, url = {https://doi.org/10.1162/isal\_a\_00439}, note = {101} }
on Google Scholar, ResearchGate, and LinkedIn