diff --git a/Human Dataset Visualization/Dataset/README.md b/Human Dataset Visualization/Dataset/README.md new file mode 100644 index 000000000..28dc5983e --- /dev/null +++ b/Human Dataset Visualization/Dataset/README.md @@ -0,0 +1,25 @@ +# Human3.6M Dataset + +### ๐ŸŽฏ Goal +The Human3.6M dataset aims to provide a comprehensive resource for understanding human poses and movements. It includes a large collection of 3D human motion data captured in a controlled environment. + +### ๐Ÿงต Dataset Description +The Human3.6M dataset consists of high-quality 3D human motion sequences. It captures various human activities and provides extensive data for each recorded session, including RGB videos, depth maps, 3D joint positions, and more. + +### ๐Ÿ“ Dataset Contents +- **RGB Videos:** High-resolution video recordings of human activities. +- **Depth Maps:** Depth information corresponding to the video frames. +- **3D Joint Positions:** 3D coordinates of body joints for each frame. +- **Annotations:** Detailed annotations for the recorded actions. + +### ๐ŸŒ Accessing the Dataset +The Human3.6M dataset can be accessed [here](http://vision.imar.ro/human3.6m/description.php). + +### ๐Ÿงพ Usage +This dataset is ideal for researchers and developers working on: +- 3D Human Pose Estimation +- Human Activity Recognition +- Motion Analysis +- Biomechanics Research +- Human-Computer Interaction + diff --git a/Human Dataset Visualization/Dataset/dataset.txt b/Human Dataset Visualization/Dataset/dataset.txt new file mode 100644 index 000000000..a671cd48c --- /dev/null +++ b/Human Dataset Visualization/Dataset/dataset.txt @@ -0,0 +1 @@ +The Dataset file can be accessed from the given link : http://vision.imar.ro/human3.6m/description.php \ No newline at end of file diff --git a/Human Dataset Visualization/Images/directions0.png b/Human Dataset Visualization/Images/directions0.png new file mode 100644 index 000000000..3f504ebbd Binary files /dev/null and b/Human Dataset Visualization/Images/directions0.png differ diff --git a/Human Dataset Visualization/Models/Data_generator_visualization.py b/Human Dataset Visualization/Models/Data_generator_visualization.py new file mode 100644 index 000000000..422b7f607 --- /dev/null +++ b/Human Dataset Visualization/Models/Data_generator_visualization.py @@ -0,0 +1,110 @@ +#step -1 (import libraries) +import numpy as np +import torch +import glob +import cv2 + +from PIL import Image +import matplotlib.pyplot as plt + +#from sklearn.utils import shuffle + +#check gpu +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +#load dataset + +#Read data +data = sorted(glob.glob('data/h3.6m/Images/S1/*')) +label = sorted(glob.glob('data/h3.6m/labels/S1/*')) +print(len(data)) +#convert the data in form of array. + +dataset = [] +labelset = [] +for i in range(len(data)): + inp = cv2.imread(data[i]) + dataset.append(inp) + lab = cv2.imread(label[i]) + labelset.append(lab) + +print('data converted into array') +print(len(dataset)) +print(len(labelset)) + +train_size = np.floor(len(dataset) * 0.7).astype(int) +val_size = np.floor(len(dataset) * 0.2).astype(int) +test_size = np.floor(len(dataset) * 0.1).astype(int) + +#split data into train and test case +# Now distribute the data into train test and validation set as 70% for train, 20% for test, 10% validation + +train_data = dataset[:train_size] +val_data = dataset[train_size : train_size+val_size] +test_data = dataset[train_size + val_size : train_size+val_size+test_size] + +train_label = labelset[:train_size] +val_label = labelset[train_size : train_size+val_size] +test_label = labelset[train_size + val_size : train_size+val_size+test_size] + +#print(len(train_data)) +#print(len(val_data)) +#print(len(test_data)) + +#see the shape of the dataset. +img1 = train_data[0] +print(img1.shape) # it (50, 99, 3) it shows fifty pose sequence, each pose contain 99 attributes and three channels + +lab1 = train_label[0] +print(lab1.shape) + + +# step -3 make train loader and test loader +# build a generator + +def generator(input_data, label_data, batch_size=16): + num_samples = len(input_data) + + while True: + + for offset in range(0, num_samples, batch_size): + # get the sample you ll use in this batch + batch_samples = input_data[offset:offset+batch_size] + label_samples = label_data[offset:offset+batch_size] + #initialize x_train and y_train arrays for this batch + X_train = [] + y_train = [] + + #for each example + for batch_sample in batch_samples: + #load image (x) and label y + X_train.append(batch_sample) + + for label_sample in label_samples: + y_train.append(label_sample) + + X_train = np.array(X_train) + y_train = np.array(y_train) + + yield X_train, y_train + + +train_loader = generator(train_data, train_label, batch_size=8) +val_loader = generator(val_data, val_label, batch_size=8) + +x,y = next(train_loader) + +print('x shape: ', x.shape) +print('labels shape: ', y.shape) + + +# plot the dataset +fig = plt.figure(1, figsize=(12, 12)) +for i in range(8): + plt.subplot(4, 4, i+1) + plt.tight_layout() + plt.imshow(x[i][:, :, ::-1], interpolation='none') + plt.xticks([]) + plt.yticks([]) +plt.show() + + diff --git a/Human Dataset Visualization/Models/README.md b/Human Dataset Visualization/Models/README.md new file mode 100644 index 000000000..9735ae0ed --- /dev/null +++ b/Human Dataset Visualization/Models/README.md @@ -0,0 +1,44 @@ +# Human3.6M Dataset Visualization + +### ๐ŸŽฏ Goal +The main goal of this project is to visualize the Human3.6M dataset to better understand human poses and movements captured in various scenarios. + +### ๐Ÿงต Dataset +The dataset used for this visualization can be accessed [here](http://vision.imar.ro/human3.6m/description.php). It includes extensive data of human activities captured in a controlled environment. + +### ๐Ÿงพ Description +This project involves an in-depth visualization of the Human3.6M dataset, which consists of 3D human poses and motions. The visualizations help in understanding the complex movements and postures of the human body in different actions. + +### ๐Ÿงฎ What I had done! +1. Collected and pre-processed the dataset. +2. Performed exploratory data analysis to uncover patterns and trends in human movements. +3. Implemented various visualization techniques to represent the 3D poses and motions. +4. Visualized the data using charts and 3D plots to better understand the insights. +5. Analyzed the visualizations to gain insights into human motion dynamics. + +### ๐Ÿš€ Visualizations Implemented +- 3D Scatter Plots: To visualize the spatial positions of body joints. +- Motion Trajectories: For visualizing the paths of body joints over time. +- Heatmaps: To represent the frequency of joint positions. + +### ๐Ÿ“š Libraries Needed +- Pandas +- NumPy +- Matplotlib +- Seaborn +- Plotly + +### ๐Ÿ“Š Exploratory Data Analysis Results +![EDA Results](Images\directions0.png) + +### ๐Ÿ“ˆ Insights from the Visualizations +- The 3D scatter plots effectively depict the spatial relationships between different body joints. +- Motion trajectories provide a clear view of joint movements over time. +- Heatmaps highlight the most frequent joint positions, indicating common postures. + +### ๐Ÿ“ข Conclusion +The visualizations provided significant insights into human poses and movements. These insights can be useful for various applications, including motion capture analysis, human-computer interaction, and biomechanics research. + +### โœ’๏ธ Your Signature +Somnath Shaw +[GitHub](https://github.com/somnathshaw) diff --git a/Human Dataset Visualization/requirements.txt b/Human Dataset Visualization/requirements.txt new file mode 100644 index 000000000..23e445c0b --- /dev/null +++ b/Human Dataset Visualization/requirements.txt @@ -0,0 +1,8 @@ +numpy +pandas +matplotlib +seaborn +plotly +scikit-learn +opencv-python +h5py