import pywavefront.material import pywavefront from pywavefront import visualization import pyglet from pyglet.gl import * import ctypes import numpy as np import copy import mediapipe as mp import cv2 from cv2 import VideoCapture from cv2 import waitKey obj = pywavefront.Wavefront('Inputs/XiaoTian/ALl.obj') v_v = [] for material in obj.materials: material = obj.materials[material] vx = np.zeros((int(len(material.vertices)/8), 3), np.float64) for i in range(int(len(material.vertices) / 8)): material.vertices[8 * i + 5] = material.vertices[8 * i + 5] + 0.0 vx[i, 0] = material.vertices[8 * i + 5] vx[i, 1] = material.vertices[8 * i + 6] vx[i, 2] = material.vertices[8 * i + 7] print('xyz', np.max(vx, axis=0) - np.min(vx, axis=0)) print('cccxyz', np.max(vx, axis=0) * 0.5 + np.min(vx, axis=0) * 0.5) v_v.append(vx) window = pyglet.window.Window(width=512, height=512) lightfv = ctypes.c_float * 4 @window.event def on_resize(width, height): glMatrixMode(GL_PROJECTION) glLoadIdentity() glOrtho(-height/4000, height/4000, -width/4000, width/4000, -100, 100.0) glMatrixMode(GL_MODELVIEW) return True global outName outName = "test.png" mp_drawing = mp.solutions.drawing_utils mp_drawing_styles = mp.solutions.drawing_styles mp_face_mesh = mp.solutions.face_mesh @window.event def on_draw(): global obj lightfv = ctypes.c_float * 4 glViewport(0, 0, 512, 512) window.clear() glLoadIdentity() glLightfv(GL_LIGHT0, GL_AMBIENT, lightfv(0.2, 0.2, 0.2, 0.3)) glLightfv(GL_LIGHT0, GL_DIFFUSE, lightfv(0.5, 1.0, 0.5, 1.0)) glLightfv(GL_LIGHT0, GL_DIFFUSE, lightfv(0.5, 1.0, -0.5, 1.0)) glLightfv(GL_LIGHT1, GL_DIFFUSE, lightfv(0.5, 0.5, -0.5, 1.0)) # glLightfv(GL_LIGHT0, GL_DI, lightfv(0.2, 0.2, 0.2, 1.0)) glEnable(GL_LIGHT0) glEnable(GL_LIGHT1) glEnable(GL_LIGHTING) glEnable(GL_COLOR_MATERIAL) glEnable(GL_DEPTH_TEST) glShadeModel(GL_SMOOTH) glTranslated(0, -1.6, -0.0) glRotatef(180, 0, 1, 0) visualization.draw(obj, 'dynamic') print("One") pyglet.image.get_buffer_manager().get_color_buffer().save(outName) # exit() pyglet.app.run() image = cv2.imread(outName) with mp_face_mesh.FaceMesh( max_num_faces=1, refine_landmarks=True, min_detection_confidence=0.5, min_tracking_confidence=0.5) as face_mesh: image.flags.writeable = False image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) results = face_mesh.process(image) # Draw the face mesh annotations on the image. image.flags.writeable = True image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) if results.multi_face_landmarks: for face_landmarks in results.multi_face_landmarks: mp_drawing.draw_landmarks( image=image, landmark_list=face_landmarks, connections=mp_face_mesh.FACEMESH_TESSELATION, landmark_drawing_spec=None, connection_drawing_spec=mp_drawing_styles .get_default_face_mesh_tesselation_style()) mp_drawing.draw_landmarks( image=image, landmark_list=face_landmarks, connections=mp_face_mesh.FACEMESH_CONTOURS, landmark_drawing_spec=None, connection_drawing_spec=mp_drawing_styles .get_default_face_mesh_contours_style()) mp_drawing.draw_landmarks( image=image, landmark_list=face_landmarks, connections=mp_face_mesh.FACEMESH_IRISES, landmark_drawing_spec=None, connection_drawing_spec=mp_drawing_styles .get_default_face_mesh_iris_connections_style()) cv2.imshow('MediaPipe Face Mesh', cv2.flip(image, 1)) while True: if cv2.waitKey(0) & 0xFF == 27: break # break