![]() |
Maximilian Zorn, M.Sc. Lehrstuhl für Mobile und Verteilte Systeme Ludwig-Maximilians-Universität München, Institut für Informatik Oettingenstraße 67 Zoom Personal Meeting Room: Raum E105 Telefon: +49 89 / 2180-9259 (derzeit nicht besetzt) Fax: |
🔬 Research Interests
- (Quantum) Reinforcement Learning
- Self-Replication (Properties) in Neural Networks
🎓 Teaching (Assistance)
📚 Publications
2022
- S. Illium, G. Griffin, M. Kölle, M. Zorn, J. Nüßlein, and C. Linnhoff-Popien, VoronoiPatches: Evaluating A New Data Augmentation MethodarXiv, 2022. doi:10.48550/ARXIV.2212.10054
[BibTeX] [Download PDF]@misc{https://doi.org/10.48550/arxiv.2212.10054, doi = {10.48550/ARXIV.2212.10054}, url = {https://arxiv.org/abs/2212.10054}, author = {Illium, Steffen and Griffin, Gretchen and Kölle, Michael and Zorn, Maximilian and Nüßlein, Jonas and Linnhoff-Popien, Claudia}, keywords = {Computer Vision and Pattern Recognition (cs.CV), Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {VoronoiPatches: Evaluating A New Data Augmentation Method}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} }
- S. Illium, M. Zorn, C. Lenta, M. Kölle, C. Linnhoff-Popien, and T. Gabor, Constructing Organism Networks from Collaborative Self-ReplicatorsarXiv, 2022. doi:10.48550/ARXIV.2212.10078
[BibTeX] [Download PDF]@misc{https://doi.org/10.48550/arxiv.2212.10078, doi = {10.48550/ARXIV.2212.10078}, url = {https://arxiv.org/abs/2212.10078}, author = {Illium, Steffen and Zorn, Maximilian and Lenta, Cristian and Kölle, Michael and Linnhoff-Popien, Claudia and Gabor, Thomas}, keywords = {Neural and Evolutionary Computing (cs.NE), Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Constructing Organism Networks from Collaborative Self-Replicators}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} }
- T. Gabor, M. Zorn, and C. Linnhoff-Popien, „The Applicability of Reinforcement Learning for the Automatic Generation of State Preparation Circuits,“ in Proceedings of the Genetic and Evolutionary Computation Conference Companion, New York, NY, USA, 2022, p. 2196–2204. doi:10.1145/3520304.3534039
[BibTeX] [Abstract] [Download PDF]
State preparation is currently the only means to provide input data for quantum algorithm, but finding the shortest possible sequence of gates to prepare a given state is not trivial. We approach this problem using reinforcement learning (RL), first on an agent that is trained to only prepare a single fixed quantum state. Despite the overhead of training a whole network to just produce one single data point, gradient-based backpropagation appears competitive to genetic algorithms in this scenario and single state preparation thus seems a worthwhile task. In a second case we then train a single network to prepare arbitrary quantum states to some degree of success, despite a complete lack of structure in the training data set. In both cases we find that training is severely improved by using QR decomposition to automatically map the agents‘ outputs to unitary operators to solve the problem of sparse rewards that usually makes this task challenging.
@inproceedings{10.1145/3520304.3534039, author = {Gabor, Thomas and Zorn, Maximilian and Linnhoff-Popien, Claudia}, title = {The Applicability of Reinforcement Learning for the Automatic Generation of State Preparation Circuits}, year = {2022}, isbn = {9781450392686}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, url = {https://doi.org/10.1145/3520304.3534039}, doi = {10.1145/3520304.3534039}, abstract = {State preparation is currently the only means to provide input data for quantum algorithm, but finding the shortest possible sequence of gates to prepare a given state is not trivial. We approach this problem using reinforcement learning (RL), first on an agent that is trained to only prepare a single fixed quantum state. Despite the overhead of training a whole network to just produce one single data point, gradient-based backpropagation appears competitive to genetic algorithms in this scenario and single state preparation thus seems a worthwhile task. In a second case we then train a single network to prepare arbitrary quantum states to some degree of success, despite a complete lack of structure in the training data set. In both cases we find that training is severely improved by using QR decomposition to automatically map the agents' outputs to unitary operators to solve the problem of sparse rewards that usually makes this task challenging.}, booktitle = {Proceedings of the Genetic and Evolutionary Computation Conference Companion}, pages = {2196–2204}, numpages = {9}, keywords = {state preparation, actor/critic, quantum computing, circuit design, neural network, reinforcement learning}, location = {Boston, Massachusetts}, series = {GECCO '22} }
- T. Gabor, S. Illium, M. Zorn, C. Lenta, A. Mattausch, L. Belzner, and C. Linnhoff-Popien, „Self-Replication in Neural Networks,“ Artificial Life, pp. 205-223, 2022. doi:10.1162/artl_a_00359
[BibTeX] [Abstract] [Download PDF]
{A key element of biological structures is self-replication. Neural networks are the prime structure used for the emergent construction of complex behavior in computers. We analyze how various network types lend themselves to self-replication. Backpropagation turns out to be the natural way to navigate the space of network weights and allows non-trivial self-replicators to arise naturally. We perform an in-depth analysis to show the self-replicators’ robustness to noise. We then introduce artificial chemistry environments consisting of several neural networks and examine their emergent behavior. In extension to this works previous version (Gabor et al., 2019), we provide an extensive analysis of the occurrence of fixpoint weight configurations within the weight space and an approximation of their respective attractor basins.}
@article{10.1162/artl_a_00359, author = {Gabor, Thomas and Illium, Steffen and Zorn, Maximilian and Lenta, Cristian and Mattausch, Andy and Belzner, Lenz and Linnhoff-Popien, Claudia}, title = {{Self-Replication in Neural Networks}}, journal = {Artificial Life}, pages = {205-223}, year = {2022}, month = {06}, abstract = {{A key element of biological structures is self-replication. Neural networks are the prime structure used for the emergent construction of complex behavior in computers. We analyze how various network types lend themselves to self-replication. Backpropagation turns out to be the natural way to navigate the space of network weights and allows non-trivial self-replicators to arise naturally. We perform an in-depth analysis to show the self-replicators’ robustness to noise. We then introduce artificial chemistry environments consisting of several neural networks and examine their emergent behavior. In extension to this works previous version (Gabor et al., 2019), we provide an extensive analysis of the occurrence of fixpoint weight configurations within the weight space and an approximation of their respective attractor basins.}}, issn = {1064-5462}, doi = {10.1162/artl_a_00359}, url = {https://doi.org/10.1162/artl\_a\_00359}, eprint = {https://direct.mit.edu/artl/article-pdf/doi/10.1162/artl\_a\_00359/2030914/artl\_a\_00359.pdf} }
2021
- T. Gabor, S. Illium, M. Zorn, and C. Linnhoff-Popien, Goals for Self-Replicating Neural Networks, 2021. doi:10.1162/isal_a_00439
[BibTeX] [Download PDF]@proceedings{10.1162/isal_a_00439, author = {Gabor, Thomas and Illium, Steffen and Zorn, Maximilian and Linnhoff-Popien, Claudia}, title = {{Goals for Self-Replicating Neural Networks}}, volume = {ALIFE 2021: The 2021 Conference on Artificial Life}, series = {ALIFE 2021: The 2021 Conference on Artificial Life}, year = {2021}, month = {07}, doi = {10.1162/isal_a_00439}, url = {https://doi.org/10.1162/isal\_a\_00439}, note = {101} }