From bee3623a571743019eceae74addbe682e3104ba8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20BUSSIERE?= Date: Sun, 19 Feb 2023 20:07:12 +0100 Subject: [PATCH] =?UTF-8?q?Transf=C3=A9rer=20les=20fichiers=20vers=20'pyth?= =?UTF-8?q?on/mediapipe.py'?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- python/mediapipe.py/test3.py | 92 ++++++++++++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) create mode 100644 python/mediapipe.py/test3.py diff --git a/python/mediapipe.py/test3.py b/python/mediapipe.py/test3.py new file mode 100644 index 0000000..1fcb5d5 --- /dev/null +++ b/python/mediapipe.py/test3.py @@ -0,0 +1,92 @@ +import cv2 +import mediapipe as mp + +mp_drawing = mp.solutions.drawing_utils +mp_drawing_styles = mp.solutions.drawing_styles +mp_face_mesh = mp.solutions.face_mesh + +drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1) +cap = cv2.VideoCapture('X:/video.mp4') + +R = 0 +G = 0 +B = 0 +red = green = blue = 0 +n = 0 +i = 0 + +with mp_face_mesh.FaceMesh( + max_num_faces=1, + refine_landmarks=True, + min_detection_confidence=0.5, + min_tracking_confidence=0.5) as face_mesh: + while cap.isOpened(): + success, image = cap.read() + if not success: + print("Ignoring empty camera frame.") + # If loading a video, use 'break' instead of 'continue'. + continue + i += 1 + # To improve performance, optionally mark the image as not writeable to + # pass by reference. + image.flags.writeable = False + img = cv2.resize(image, (640, 480)) + image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + results = face_mesh.process(image) + + # Draw the face mesh annotations on the image. + image.flags.writeable = False + image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) + if results.multi_face_landmarks: + for face_landmarks in results.multi_face_landmarks: + shape = image.shape + for landmark in face_landmarks.landmark: + n += 1 + x = landmark.x + y = landmark.y + relative_x = int(x * shape[1]) + relative_y = int(y * shape[0]) + #cv2.circle() + + if relative_x >= shape[0]: + relative_x = shape[0]-1 + if relative_y >= shape[1]: + relative_y = shape[1]-1 + B, G, R = img[relative_x, relative_y] + #cv2.circle(image, [relative_x, relative_y], 3, [int(B), int(G), int(R)], 3) + red += R + blue += B + green += G + color = [int(blue/n), int(green/n), int(red/n)] + print(color) + red = green = blue = n = 0 + #cv2.rectangle(image, [400, 450], [450, 450], [255,255,255], 10) + #color_hex = '#'+str(hex(color[2])[2:])+str(hex(color[1])[2:])+str(hex(color[0])[2:]) + #cv2.putText(img,str(i), [400,450], cv2.FONT_HERSHEY_PLAIN, 1, color, 2) + #cv2.text(image, [600, 400], ) + mp_drawing.draw_landmarks( + image=image, + landmark_list=face_landmarks, + connections=mp_face_mesh.FACEMESH_TESSELATION, + landmark_drawing_spec=None, + connection_drawing_spec=mp_drawing_styles + .get_default_face_mesh_tesselation_style()) + '''mp_drawing.draw_landmarks( + image=image, + landmark_list=face_landmarks, + connections=mp_face_mesh.FACEMESH_CONTOURS, + landmark_drawing_spec=None, + connection_drawing_spec=mp_drawing_styles + .get_default_face_mesh_contours_style()) + mp_drawing.draw_landmarks( + image=image, + landmark_list=face_landmarks, + connections=mp_face_mesh.FACEMESH_IRISES, + landmark_drawing_spec=None, + connection_drawing_spec=mp_drawing_styles + .get_default_face_mesh_iris_connections_style())''' + # Flip the image horizontally for a selfie-view display. + cv2.imshow('MediaPipe Face Mesh', cv2.flip(image, 1)) + if cv2.waitKey(5) & 0xFF == 27: + break +cap.release()