10.35097-VDPCEFSThBWlDPFL\10.35097-VDPCEFSThBWlDPFL\data\dataset\MinT.tar.zst
Exploring the intricate dynamics between muscular and skeletal structures is pivotal to understanding human motion. However, acquiring ground-truth muscle activation data is resource-intensive, resulting in a scarcity of datasets. The Muscles in Time (MinT) dataset aims to address this by introducing a large-scale synthetic muscle activation dataset. MinT is created by enriching existing motion capture datasets with muscle activation simulations from biomechanical models using the OpenSim platform, a widely accepted tool in biomechanics and human motion research.
Neural networks designed for human motion understanding have historically relied on indirect data, like video or motion capture, similar to prisoners in Plato's cave who see only shadows of true objects. Current systems, despite advances in capturing human motion, do not account for the complex inner mechanics—particularly the muscle activations driving human movement. These activations are key to understanding physical exertion and motion difficulty but are often overlooked due to the limitations of traditional data collection methods such as EMG.
To overcome these challenges, our dataset, MinT, incorporates simulations that provide detailed muscle activation information. Starting from simple pose sequences, we extract fine-grained muscle activation timings and interactions within the human musculoskeletal system. MinT contains over nine hours of simulation data, spanning 227 subjects and 402 muscle strands, providing a comprehensive and scalable resource for further research into human motion.
By bridging computer vision and biomechanical research, we expand the set of tools available for understanding human motion. MinT facilitates the study of muscle activations with unprecedented scope, enabling researchers to investigate the complex interplay between human pose sequences and the underlying biomechanics. This dataset marks a step forward in modeling human motion using machine learning methods, with potential applications in robotics, physical rehabilitation, and sports science.
mint_
so overleaf will automatically offer all of them as autocompletion:
% MinT
@inproceedings{mint_schneider2024muscles,
title={Muscles in Time: Learning to Understand Human Motion In-Depth by Simulating Muscle Activations},
author={Schneider, David and Rei{\ss}, Simon and Kugler, Marco and Jaus, Alexander and Peng, Kunyu and Sutschet, Susanne and Sarfraz, M Saquib and Matthiesen, Sven and Stiefelhagen, Rainer},
booktitle={The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track},
year={2024}
},
% KIT Whole-Body Human Motion Database
@inproceedings{mint_kit_human_motion_db_1,
author = {Christian Mandery and \"Omer Terlemez and Martin Do and Nikolaus Vahrenkamp and Tamim Asfour},
title = {The {KIT} Whole-Body Human Motion Database},
booktitle = {International Conference on Advanced Robotics (ICAR)},
pages = {329--336},
year = {2015},
},
@article{mint_kit_human_motion_db_2,
author = {Christian Mandery and \"Omer Terlemez and Martin Do and Nikolaus Vahrenkamp and Tamim Asfour},
title = {Unifying Representations and Large-Scale Whole-Body Motion Databases for Studying Human Motion},
pages = {796--809},
volume = {32},
number = {4},
journal = {IEEE Transactions on Robotics},
year = {2016},
},
@inproceedings{mint_kit_human_motion_db_3,
author = {Franziska Krebs and Andre Meixner and Isabel Patzer and Tamim Asfour},
title = {The {KIT} Bimanual Manipulation Dataset},
booktitle = {IEEE/RAS International Conference on Humanoid Robots (Humanoids)},
pages = {499--506},
year = {2021},
},
% Total Capture
@inproceedings{mint_TotalCapture,
author = {Trumble, Matt and Gilbert, Andrew and Malleson, Charles and Hilton, Adrian and Collomosse, John},
title = {{Total Capture}: 3D Human Pose Estimation Fusing Video and Inertial Sensors},
booktitle = {2017 British Machine Vision Conference (BMVC)},
year = {2017}
},
% Eyes Japan
@misc{mint_EyesJapanDataset,
title = {{Eyes Japan MoCap Dataset}},
author = {Eyes JAPAN Co. Ltd.},
url = {http://mocapdata.com}
},
% BML
@article{mint_ghorbani2021movi,
title={MoVi: A large multi-purpose human motion and video dataset},
author={Ghorbani, Saeed and Mahdaviani, Kimia and Thaler, Anne and Kording, Konrad and Cook, Douglas James and Blohm, Gunnar and Troje, Nikolaus F},
journal={Plos one},
volume={16},
number={6},
pages={e0253157},
year={2021},
publisher={Public Library of Science San Francisco, CA USA}
},
% AMASS
@inproceedings{mint_AMASS:2019,
title={AMASS: Archive of Motion Capture as Surface Shapes},
author={Mahmood, Naureen and Ghorbani, Nima and F. Troje, Nikolaus and Pons-Moll, Gerard and Black, Michael J.},
booktitle = {The IEEE International Conference on Computer Vision (ICCV)},
year={2019},
month = {Oct},
url = {https://amass.is.tue.mpg.de},
month_numeric = {10}
},