From e63cf7476f47d378d7adae0ca4b68882c9ca1fcb Mon Sep 17 00:00:00 2001 From: Michail Kaseris Date: Wed, 17 Jan 2024 15:16:18 +0200 Subject: [PATCH 1/4] Update requirements for amass dataset --- requirements.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index e44534f..461590d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,4 +7,7 @@ matplotlib randomname pyyaml tensorboard -discord \ No newline at end of file +discord +git+https://github.com/nghorbani/configer +git+https://github.com/nghorbani/human_body_prior +git+https://github.com/nghorbani/amass \ No newline at end of file From 9b10a5614542ad06698fd6c55886ba424f79963c Mon Sep 17 00:00:00 2001 From: Michail Kaseris Date: Wed, 17 Jan 2024 15:52:23 +0200 Subject: [PATCH 2/4] AMASS dataset comprehension notebook --- notebooks/amass_visualization.ipynb | 175 ++++++++++++++++++++++++++++ 1 file changed, 175 insertions(+) create mode 100644 notebooks/amass_visualization.ipynb diff --git a/notebooks/amass_visualization.ipynb b/notebooks/amass_visualization.ipynb new file mode 100644 index 0000000..da6732c --- /dev/null +++ b/notebooks/amass_visualization.ipynb @@ -0,0 +1,175 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Experimenting with the AMASS dataset" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import os.path as osp\n", + "\n", + "import torch\n", + "import numpy as np\n", + "\n", + "from human_body_prior.tools.omni_tools import copy2cpu as c2c\n", + "\n", + "support_dir = '/home/kaseris/Documents/amass/support_data'\n", + "comp_device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/tmp/ipykernel_680365/1067486171.py:4: DeprecationWarning: tostring() is deprecated. Use tobytes() instead.\n", + " subject_gender = bdata['gender'].tostring().decode('utf-8')\n" + ] + } + ], + "source": [ + "amass_npz_fname = osp.join(support_dir, 'github_data/dmpl_sample.npz')\n", + "bdata = np.load(amass_npz_fname)\n", + "\n", + "subject_gender = bdata['gender'].tostring().decode('utf-8')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's see what's inside the bdata\n", + "\n", + "- What are the `dmpls`? Some body parameters????\n", + "- What are the `betas`? Body parameters" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The available keys from the read file are: ['poses', 'gender', 'mocap_framerate', 'betas', 'marker_data', 'dmpls', 'marker_labels', 'trans']\n", + "betas: (16,)\n", + "dmpls: (235, 8)\n", + "The subject gender is: female\n" + ] + } + ], + "source": [ + "print(f'The available keys from the read file are: {list(bdata.keys())}')\n", + "print(f'betas: {bdata[\"betas\"].shape}')\n", + "print(f'dmpls: {bdata[\"dmpls\"].shape}')\n", + "print(f'The subject gender is: {subject_gender}')" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": {}, + "outputs": [], + "source": [ + "from human_body_prior.body_model.body_model import BodyModel\n", + "\n", + "dmpl_fname = osp.join(support_dir, f'body_models/dmpls/{subject_gender}/model.npz')\n", + "bm_fname = osp.join(support_dir, f'body_models/smplh/{subject_gender}/model.npz')\n", + "\n", + "num_betas = 16 # number of body parameters\n", + "num_dmpls = 8 # number of DMPL parameters\n", + "\n", + "bm = BodyModel(bm_fname=bm_fname, num_betas=num_betas, num_dmpls=num_dmpls, dmpl_fname=dmpl_fname).to(comp_device)\n", + "faces = c2c(bm.f)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The provided sample data has the original mocap marker data.\n", + "\n", + "In the real AMASS dataset, we include only markers for the test set.\n", + "\n", + "For the rest of the subsets you can obtain the marker data from their respective websites.\n", + "\n", + "In the following we make PyTorch tensors for parameters controlling different part of the body model." + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Body parameter vector shapes: \n", + "root_orient: torch.Size([235, 3]) \n", + "pose_body: torch.Size([235, 63]) \n", + "pose_hand: torch.Size([235, 90]) \n", + "trans: torch.Size([235, 3]) \n", + "betas: torch.Size([235, 16]) \n", + "dmpls: torch.Size([235, 8])\n", + "time_length = 235\n" + ] + } + ], + "source": [ + "time_length = len(bdata['trans'])\n", + "body_parms = {\n", + " 'root_orient': torch.Tensor(bdata['poses'][:, :3]).to(comp_device), # controls the global root orientation\n", + " 'pose_body': torch.Tensor(bdata['poses'][:, 3:66]).to(comp_device), # controls the body\n", + " 'pose_hand': torch.Tensor(bdata['poses'][:, 66:]).to(comp_device), # controls the finger articulation\n", + " 'trans': torch.Tensor(bdata['trans']).to(comp_device), # controls the global body position\n", + " 'betas': torch.Tensor(np.repeat(bdata['betas'][:num_betas][np.newaxis], repeats=time_length, axis=0)).to(comp_device), # controls the body shape. Body shape is static\n", + " 'dmpls': torch.Tensor(bdata['dmpls'][:, :num_dmpls]).to(comp_device) # controls soft tissue dynamics\n", + "}\n", + "print('Body parameter vector shapes: \\n{}'.format(' \\n'.join(['{}: {}'.format(k,v.shape) for k,v in body_parms.items()])))\n", + "print('time_length = {}'.format(time_length))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "scraping", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From e109b5bb40d3929b7330fecd27d4d25da987b227 Mon Sep 17 00:00:00 2001 From: Michail Kaseris Date: Wed, 17 Jan 2024 15:54:27 +0200 Subject: [PATCH 3/4] More requiremetns for the amass visualizations --- requirements.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 461590d..0d5033f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,4 +10,5 @@ tensorboard discord git+https://github.com/nghorbani/configer git+https://github.com/nghorbani/human_body_prior -git+https://github.com/nghorbani/amass \ No newline at end of file +git+https://github.com/nghorbani/amass +git+https://github.com/nghorbani/body_visualizer \ No newline at end of file From 078127f73eb9456c62e8853630e3c0a4341271f6 Mon Sep 17 00:00:00 2001 From: Michail Kaseris Date: Wed, 17 Jan 2024 16:01:53 +0200 Subject: [PATCH 4/4] Amass visualizations --- notebooks/amass_visualization.ipynb | 163 ++++++++++++++++++++-------- 1 file changed, 116 insertions(+), 47 deletions(-) diff --git a/notebooks/amass_visualization.ipynb b/notebooks/amass_visualization.ipynb index da6732c..1b6283d 100644 --- a/notebooks/amass_visualization.ipynb +++ b/notebooks/amass_visualization.ipynb @@ -9,7 +9,19 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2\n", + "%matplotlib notebook\n", + "%matplotlib inline" + ] + }, + { + "cell_type": "code", + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -26,18 +38,9 @@ }, { "cell_type": "code", - "execution_count": 27, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/tmp/ipykernel_680365/1067486171.py:4: DeprecationWarning: tostring() is deprecated. Use tobytes() instead.\n", - " subject_gender = bdata['gender'].tostring().decode('utf-8')\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "amass_npz_fname = osp.join(support_dir, 'github_data/dmpl_sample.npz')\n", "bdata = np.load(amass_npz_fname)\n", @@ -57,20 +60,9 @@ }, { "cell_type": "code", - "execution_count": 28, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The available keys from the read file are: ['poses', 'gender', 'mocap_framerate', 'betas', 'marker_data', 'dmpls', 'marker_labels', 'trans']\n", - "betas: (16,)\n", - "dmpls: (235, 8)\n", - "The subject gender is: female\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "print(f'The available keys from the read file are: {list(bdata.keys())}')\n", "print(f'betas: {bdata[\"betas\"].shape}')\n", @@ -80,7 +72,7 @@ }, { "cell_type": "code", - "execution_count": 29, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -111,24 +103,9 @@ }, { "cell_type": "code", - "execution_count": 33, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Body parameter vector shapes: \n", - "root_orient: torch.Size([235, 3]) \n", - "pose_body: torch.Size([235, 63]) \n", - "pose_hand: torch.Size([235, 90]) \n", - "trans: torch.Size([235, 3]) \n", - "betas: torch.Size([235, 16]) \n", - "dmpls: torch.Size([235, 8])\n", - "time_length = 235\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "time_length = len(bdata['trans'])\n", "body_parms = {\n", @@ -148,7 +125,99 @@ "execution_count": null, "metadata": {}, "outputs": [], - "source": [] + "source": [ + "import trimesh\n", + "from body_visualizer.tools.vis_tools import colors\n", + "from body_visualizer.mesh.mesh_viewer import MeshViewer\n", + "from body_visualizer.mesh.sphere import points_to_spheres\n", + "from body_visualizer.tools.vis_tools import show_image\n", + "\n", + "imw, imh=1600, 1600\n", + "mv = MeshViewer(width=imw, height=imh, use_offscreen=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "body_pose_beta = bm(**{k:v for k,v in body_parms.items() if k in ['pose_body', 'betas']})\n", + "\n", + "def vis_body_pose_beta(fId = 0):\n", + " body_mesh = trimesh.Trimesh(vertices=c2c(body_pose_beta.v[fId]), faces=faces, vertex_colors=np.tile(colors['grey'], (6890, 1)))\n", + " mv.set_static_meshes([body_mesh])\n", + " body_image = mv.render(render_wireframe=False)\n", + " show_image(body_image)\n", + "\n", + "vis_body_pose_beta(fId=0)\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now the model has a more realistic hand posture" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "body_pose_hand = bm(**{k:v for k,v in body_parms.items() if k in ['pose_body', 'betas', 'pose_hand']})\n", + "\n", + "def vis_body_pose_hand(fId = 0):\n", + " body_mesh = trimesh.Trimesh(vertices=c2c(body_pose_hand.v[fId]), faces=faces, vertex_colors=np.tile(colors['grey'], (6890, 1)))\n", + " mv.set_static_meshes([body_mesh])\n", + " body_image = mv.render(render_wireframe=False)\n", + " show_image(body_image)\n", + "\n", + "vis_body_pose_hand(fId=0)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Visualize the body joints" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def vis_body_joints(fId = 0):\n", + " joints = c2c(body_pose_hand.Jtr[fId])\n", + " joints_mesh = points_to_spheres(joints, point_color = colors['red'], radius=0.005)\n", + "\n", + " mv.set_static_meshes([joints_mesh])\n", + " body_image = mv.render(render_wireframe=False)\n", + " show_image(body_image)\n", + "\n", + "vis_body_joints(fId=0)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "body_dmpls = bm(**{k:v for k,v in body_parms.items() if k in ['pose_body', 'betas', 'pose_hand', 'dmpls']})\n", + "\n", + "def vis_body_dmpls(fId = 0):\n", + " body_mesh = trimesh.Trimesh(vertices=c2c(body_dmpls.v[fId]), faces=faces, vertex_colors=np.tile(colors['grey'], (6890, 1)))\n", + " mv.set_static_meshes([body_mesh])\n", + " body_image = mv.render(render_wireframe=False)\n", + " show_image(body_image)\n", + "\n", + "vis_body_dmpls(fId=0)" + ] } ], "metadata": {