Skip to content

Instantly share code, notes, and snippets.

@chawasit
Created October 30, 2018 17:46
Show Gist options
  • Select an option

  • Save chawasit/55419c1f9b69267cf385fc34fcfadd66 to your computer and use it in GitHub Desktop.

Select an option

Save chawasit/55419c1f9b69267cf385fc34fcfadd66 to your computer and use it in GitHub Desktop.
# coding: utf-8
import os
import sys
from sys import platform
import numpy
import cv2 as cv2
import pandas
import codecs
import matplotlib.pyplot as plt
import util
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + '/openpose-master/build/python/openpose/')
# Parameters for OpenPose. Take a look at C++ OpenPose example for meaning of components. Ensure all below are filled
try:
from openpose import *
except:
raise Exception(
'Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?')
params = dict()
params["logging_level"] = 3
params["output_resolution"] = "-1x-1"
params["net_resolution"] = "-1x368"
params["model_pose"] = "BODY_25"
params["alpha_pose"] = 0.6
params["scale_gap"] = 0.3
params["scale_number"] = 1
params["render_threshold"] = 0.05
# If GPU version is built, and multiple GPUs are available, set the ID here
params["num_gpu_start"] = 0
params["disable_blending"] = False
# Ensure you point to the correct path where models are located
# dir_path + "/../../../models/"
params["default_model_folder"] = "openpose-master/models/"
# Construct OpenPose object allocates GPU memory
openpose = OpenPose(params)
df = pandas.read_csv('punch-counting-dataset.csv', encoding='utf8')
sum_error = 0
count = 0
left_count = 0
right_count = 0
none_count = 0
df['error'] = -1
for i in range(1, 145):
ans = df.iloc[int(i) - 1]
if numpy.isnan(ans[4]):
continue
video_no = i
cap = cv2.VideoCapture('Examples/%s.mp4' % (video_no))
left_punch_count = 0
right_punch_count = 0
is_left_punching = False
is_right_punching = False
threshold = 0.85
frame_count = 1
x_data = []
left_data = []
right_data = []
shoulder_distance = None
left_distance = None
right_distance = None
WEIGHT_RATIO = 0.2
while 1:
# Read new image
grabbed, img = cap.read()
if not grabbed:
break
h, w, _ = img.shape
# Output keypoints and the image with the human skeleton blended on it
keypoints, output_image = openpose.forward(img, True)
# Print the human pose keypoints, i.e., a [#people x #keypoints x 3]-dimensional numpy object with the keypoints of all the people on that image
if keypoints.shape[0] >= 1:
keypoint = keypoints[0]
neck = keypoint[1]
right_shoulder = keypoint[2]
right_elbow = keypoint[3]
left_shoulder = keypoint[5]
left_elbow = keypoint[6]
new_shoulder_distance = (util.euclidean_distance(left_shoulder[:2], neck[:2]) + util.euclidean_distance(right_shoulder[:2], neck[:2]))
# new_left_distance = util.euclidean_distance(left_shoulder[:2], left_elbow[:2]) / new_shoulder_distance
# new_right_distance = util.euclidean_distance(right_shoulder[:2], right_elbow[:2]) / new_shoulder_distance
new_left_distance = (-left_shoulder[1] + left_elbow[1]) / new_shoulder_distance
new_right_distance = (-right_shoulder[1] + right_elbow[1]) / new_shoulder_distance
if shoulder_distance is None:
shoulder_distance = new_shoulder_distance
left_distance = new_left_distance
right_distance = new_right_distance
else:
shoulder_distance = shoulder_distance * WEIGHT_RATIO + new_shoulder_distance * (1 - WEIGHT_RATIO)
left_distance = new_left_distance * WEIGHT_RATIO + left_distance * (1 - WEIGHT_RATIO)
right_distance = new_right_distance * WEIGHT_RATIO + right_distance * (1 - WEIGHT_RATIO)
if left_distance < 0.6 and not is_left_punching:
left_punch_count += 1
is_left_punching = True
elif left_distance > 0.6:
is_left_punching = False
if right_distance < 0.6 and not is_right_punching:
right_punch_count += 1
is_right_punching = True
elif right_distance > 0.6:
is_right_punching = False
# if is_left_punching:
# cv2.imwrite('images/left/%06d.png' % (left_count), img)
# left_count += 1
# if is_right_punching:
# cv2.imwrite('images/right/%06d.png' % (right_count), img)
# right_count += 1
# if not (is_left_punching and is_right_punching):
# cv2.imwrite('images/none/%06d.png' % (none_count), img)
# none_count += 1
# stat for analysis
# x_data.append(frame_count)
# left_data.append((left_shoulder[1] - left_elbow[1]) / shoulder_distance)
# right_data.append((right_shoulder[1] - right_elbow[1]) / shoulder_distance)
# left_data.append(left_distance)
# right_data.append(right_distance)
# Display the image
cv2.putText(img, str(right_punch_count), (int(w / 4) - 40, int(h / 2)), cv2.FONT_HERSHEY_PLAIN , 8, (220, 200, 0, 0.5) , 4)
cv2.putText(img, str(left_punch_count), (int(w / 4 * 3) - 40, int(h / 2)), cv2.FONT_HERSHEY_PLAIN, 8, (20, 0, 200, 0.5), 4)
cv2.imshow("output", img)
frame_count += 1
cv2.waitKey(1)
# plt.plot(x_data, left_data, 'r', x_data, right_data, 'b')
# plt.show(False)
# plt.draw()
ans = df.iloc[int(video_no) - 1]
left_error = numpy.abs(left_punch_count - ans[2])
right_error = numpy.abs(right_punch_count - ans[3])
error = (left_error + right_error) / 2
df.at[int(video_no) - 1, 'error'] = error
print('no.', i, 'e', error, 'le', left_error, 're', right_error)
print(' left', left_punch_count, 'right', right_punch_count)
count += 1
sum_error += error
print(sum_error / count)
df.to_csv('save.csv')
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment