import math import cv2 import mediapipe as mp import matplotlib.pyplot as plt import numpy as np front_image = cv2.imread("/home/aparna/body_measurements_mp/assests/aparna.jpg") side_image = cv2.imread( "/home/aparna/body_measurements_mp/body_measurement_using_cv/Images/final3.jpg" ) resized_height = 256 resized_width = int(front_image.shape[1] * (resized_height / front_image.shape[0])) front_image_resized = cv2.resize(front_image, (resized_width, resized_height)) side_image_resized = cv2.resize(side_image, (resized_width, resized_height)) front_image_gray = cv2.cvtColor(front_image_resized, cv2.COLOR_BGR2GRAY) side_image_gray = cv2.cvtColor(side_image_resized, cv2.COLOR_BGR2GRAY) front_image_normalized = front_image_gray.astype("float32") / 255.0 side_image_normalized = side_image_gray.astype("float32") / 255.0 cv2.imshow("front_img", front_image_normalized) cv2.imshow("side_image", side_image_normalized) mp_pose = mp.solutions.pose pose = mp_pose.Pose( static_image_mode=True, min_detection_confidence=0.5, min_tracking_confidence=0.5 ) resized_height = 256 resized_width = int(front_image.shape[1] * (resized_height / front_image.shape[0])) front_image_resized = cv2.resize(front_image, (resized_width, resized_height)) landmark_indices = [11, 12, 13, 14, 15, 16, 23, 24, 25, 26, 27, 28] front_results = pose.process(cv2.cvtColor(front_image_resized, cv2.COLOR_BGR2RGB)) side_results = pose.process(cv2.cvtColor(side_image_resized, cv2.COLOR_BGR2RGB)) front_image_keypoints = front_image_resized.copy() side_image_keypoints = side_image_resized.copy() def calculate_distance(landmark1, landmark2, pixel_to_metric_ratio): l1 = landmark1.x, landmark1.y l1 = int(l1[0] * resized_width), int(l1[1] * resized_height) x1, y1 = l1 l2 = landmark2.x, landmark2.y l2 = int(l2[0] * resized_width), int(l2[1] * resized_height) x2, y2 = l2 print( "x1: ", landmark1.x, "y1: ", landmark1.y, "x2: ", landmark2.x, "y2: ", landmark2.y, ) pixel_distance = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2) real_life_distance = pixel_distance * pixel_to_metric_ratio return real_life_distance def draw_landmarks(image, landmarks, indices): for idx in indices: landmark = landmarks.landmark[idx] h, w, _ = image.shape cx, cy = int(landmark.x * w), int(landmark.y * h) cv2.circle(image, (cx, cy), 2, (255, 0, 0), -1) if front_results.pose_landmarks: draw_landmarks( front_image_keypoints, front_results.pose_landmarks, landmark_indices ) if side_results.pose_landmarks: draw_landmarks(side_image_keypoints, side_results.pose_landmarks, landmark_indices) h, w, _ = front_image_keypoints.shape height_cm = 150 pixel_distance_height = 126.32 * 2 pixel_to_metric_ratio = height_cm / pixel_distance_height if front_results.pose_landmarks: shoulder_left = front_results.pose_landmarks.landmark[11] elbow_left = front_results.pose_landmarks.landmark[13] distance_left_hand_up = calculate_distance( shoulder_left, elbow_left, pixel_to_metric_ratio ) print("Distance between left shoulder and left elbow:", distance_left_hand_up) elbow_left = front_results.pose_landmarks.landmark[13] wrist_left = front_results.pose_landmarks.landmark[15] distance_left_hand_down = calculate_distance( elbow_left, wrist_left, pixel_to_metric_ratio ) print("Distance between left elbow and left wrist:", distance_left_hand_down) hip_left = front_results.pose_landmarks.landmark[23] knee_left = front_results.pose_landmarks.landmark[25] distance_left_leg_up = calculate_distance( hip_left, knee_left, pixel_to_metric_ratio ) print("Distance between left hip and left knee:", distance_left_leg_up) knee_left = front_results.pose_landmarks.landmark[25] ankle_left = front_results.pose_landmarks.landmark[27] distance_left_leg_down = calculate_distance( knee_left, ankle_left, pixel_to_metric_ratio ) print("Distance between left knee and left ankle:", distance_left_leg_down) gray_image = cv2.cvtColor(front_image_keypoints, cv2.COLOR_BGR2GRAY) blurred_image = cv2.GaussianBlur(gray_image, (5, 5), 0) roi = blurred_image[0 : int(resized_height / 2), :] edges = cv2.Canny(roi, 50, 150) contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) outermost_contours = [ contours[i] for i in range(len(contours)) if hierarchy[0][i][3] == -1 ] topmost_point = None if outermost_contours: largest_contour = max(outermost_contours, key=cv2.contourArea) topmost_point = tuple(largest_contour[largest_contour[:, :, 1].argmin()][0]) topmost_point = (topmost_point[0], topmost_point[1]) cv2.circle(front_image_keypoints, topmost_point, 2, (255, 0, 0), -1) left_hip_idx = 23 right_hip_idx = 24 center_point = None if front_results.pose_landmarks: left_hip = front_results.pose_landmarks.landmark[left_hip_idx] right_hip = front_results.pose_landmarks.landmark[right_hip_idx] center_point = ((left_hip.x + right_hip.x) / 2, (left_hip.y + right_hip.y) / 2) center_point = ( int(center_point[0] * resized_width), int(center_point[1] * resized_height), ) cv2.circle(front_image_keypoints, center_point, 2, (255, 0, 0), -1) print("Pixel distance for height:", pixel_distance_height) print("Pixel-to-metric ratio:", pixel_to_metric_ratio) cv2.imshow("front_img_keypoints", front_image_keypoints) cv2.imshow("side_img_keypoints", side_image_keypoints) cv2.waitKey(0) cv2.destroyAllWindows() pose.close()