Init Repo

This commit is contained in:
indigo 2025-12-24 07:49:25 +08:00
commit 7cc823c64e
18 changed files with 1291 additions and 0 deletions

7
.gitignore vendored Normal file
View File

@ -0,0 +1,7 @@
venv
__pycache__/
*.pyc
*.pbtxt
lib/
build/
vendor/

9
.vscode/settings.json vendored Normal file
View File

@ -0,0 +1,9 @@
{
"python.analysis.extraPaths": [
"${workspaceFolder}\\src\\Video2ARKit\\scripts",
"C:\\Users\\indigo\\.vscode\\extensions\\fxtd-odyssey.mayapy-1.0.4\\mayaSDK",
],
"python.autoComplete.extraPaths": [
"${workspaceFolder}\\src\\Video2ARKit\\scripts"
]
}

65
CMakeLists.txt Normal file
View File

@ -0,0 +1,65 @@
# CMake project for the facialPerformanceNode Maya Plugin
cmake_minimum_required(VERSION 3.15)
project(FacialPerformanceNode)
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/modules/")
# --- USER CONFIGURATION: Set these paths ---
# Set the path to your Maya installation directory
set(MAYA_LOCATION "C:/Program Files/Autodesk/Maya2023" CACHE PATH "Path to Maya installation")
# Set the path to your OpenCV installation (where the OpenCVConfig.cmake is)
set(OpenCV_DIR "C:/path/to/opencv/build" CACHE PATH "Path to OpenCV build directory")
# Set the path to your MediaPipe installation (this is more complex and may require custom FindMediaPipe.cmake)
# For now, we'll define placeholder include/library paths.
set(MEDIAPIPE_INCLUDE_DIR "C:/path/to/mediapipe" CACHE PATH "Path to MediaPipe source/include")
set(MEDIAPIPE_LIBRARY "C:/path/to/mediapipe/build/lib/mediapipe.lib" CACHE FILEPATH "Path to MediaPipe library")
# --- END USER CONFIGURATION ---
# Set the name of our plugin
set(PLUGIN_NAME "facialPerformanceNode")
# Standard C++ settings
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
# Find Maya
# Add Maya's module path to CMake's search paths
list(APPEND CMAKE_MODULE_PATH "${MAYA_LOCATION}/cmake")
# Define the plugin target
add_library(${PLUGIN_NAME} MODULE ${PLUGIN_SOURCES})
# Link against Maya's libraries
target_link_libraries(${PLUGIN_NAME} ${MAYA_LIBRARIES})
# Link against our custom dependencies
target_link_libraries(${PLUGIN_NAME} ${OpenCV_LIBS})
target_link_libraries(${PLUGIN_NAME} ${MEDIAPIPE_LIBRARY})
# Set the output extension for Maya plugins (.mll on Windows)
if(WIN32)
set_target_properties(${PLUGIN_NAME} PROPERTIES SUFFIX ".mll")
endif()
# Define preprocessor definitions required by Maya
target_compile_definitions(${PLUGIN_NAME} PRIVATE
NT_PLUGIN # For Windows
REQUIRE_IOSTREAM
)
# Instructions for the user:
# 1. Install CMake (https://cmake.org/download/)
# 2. Set the three paths at the top of this file (MAYA_LOCATION, OpenCV_DIR, MEDIAPIPE paths).
# 3. Create a 'build' directory inside this project folder.
# 4. Open a command prompt or terminal in the 'build' directory.
# 5. Run CMake to generate the project files for your compiler:
# For Visual Studio: cmake -G "Visual Studio 16 2019" ..
# For Makefiles: cmake ..
# 6. Compile the project:
# For Visual Studio: cmake --build . --config Release
# For Makefiles: make
# 7. The compiled facialPerformanceNode.mll will be in the 'build/Release' directory.
# 8. Copy the .mll file to your Maya plug-ins directory (e.g., C:/Users/YourUser/Documents/maya/2023/plug-ins).

169
CMakePresets.json Normal file
View File

@ -0,0 +1,169 @@
{
"version": 3,
"configurePresets": [
{
"name": "Maya2020",
"displayName": "Maya 2020",
"description": "Visual Studio 17 2022 Win64 generator with v141 toolset",
"generator": "Visual Studio 17 2022",
"toolset": "v141",
"binaryDir": "${sourceDir}/build/${presetName}",
"cacheVariables": {
"CMAKE_CXX_FLAGS_RELWITHDEBINFO": "/Od /Zi /W0",
"CMAKE_CXX_FLAGS_RELEASE": "/O2 /W0"
},
"environment": {
"MAYA_VERSION": "2020",
"USD_LOCATION":"D:/library/build/usd_21.11_release"
},
"installDir": "${sourceDir}/install"
},
{
"name": "Maya2022",
"displayName": "Maya 2022",
"description": "Visual Studio 17 2022 Win64 generator with v142 toolset",
"generator": "Visual Studio 17 2022",
"toolset": "v142",
"binaryDir": "${sourceDir}/build/${presetName}",
"cacheVariables": {
"CMAKE_CXX_FLAGS_RELWITHDEBINFO": "/Od /Zi /W0",
"CMAKE_CXX_FLAGS_RELEASE": "/O2 /W0"
},
"environment": {
"MAYA_VERSION": "2022",
"OPENCV_LOCATION":"${sourceDir}/vendor/opencv"
},
"installDir": "${sourceDir}/install"
},
{
"name": "Maya2023",
"displayName": "Maya 2023",
"description": "Visual Studio 17 2022 Win64 generator with v142 toolset",
"generator": "Visual Studio 17 2022",
"toolset": "v142",
"binaryDir": "${sourceDir}/build/${presetName}",
"cacheVariables": {
"CMAKE_CXX_FLAGS_RELWITHDEBINFO": "/Od /Zi /W0",
"CMAKE_CXX_FLAGS_RELEASE": "/O2 /W0"
},
"environment": {
"MAYA_VERSION": "2023"
},
"installDir": "${sourceDir}/install"
},
{
"name": "Maya2024",
"displayName": "Maya 2024",
"description": "Visual Studio 17 2022 Win64 generator with v143 toolset",
"generator": "Visual Studio 17 2022",
"toolset": "v143",
"binaryDir": "${sourceDir}/build/${presetName}",
"cacheVariables": {
"CMAKE_CXX_FLAGS_RELWITHDEBINFO": "/Od /Zi /W0",
"CMAKE_CXX_FLAGS_RELEASE": "/O2 /W0"
},
"environment": {
"MAYA_VERSION": "2024"
},
"installDir": "${sourceDir}/install"
}
],
"buildPresets": [
{
"name": "Maya2020_Release",
"description": "Release Build",
"displayName": "Maya 2020 Release",
"configurePreset": "Maya2020",
"configuration": "Release",
"targets": "install"
},
{
"name":"Maya2020_RelWithDebInfo",
"description": "RelWithDebInfo Build",
"displayName": "Maya 2020 RelWithDebInfo",
"configurePreset": "Maya2020",
"configuration": "RelWithDebInfo",
"targets": "install"
},
{
"name":"Maya2020_Debug",
"description": "Debug Build",
"displayName": "Maya 2020 Debug",
"configurePreset": "Maya2020",
"configuration": "Debug",
"targets": "install"
},
{
"name": "Maya2022_Release",
"description": "Release Build",
"displayName": "Maya 2022 Release",
"configurePreset": "Maya2022",
"configuration": "Release",
"targets": "install"
},
{
"name":"Maya2022_RelWithDebInfo",
"description": "RelWithDebInfo Build",
"displayName": "Maya 2022 RelWithDebInfo",
"configurePreset": "Maya2022",
"configuration": "RelWithDebInfo",
"targets": "install"
},
{
"name":"Maya2022_Debug",
"description": "Debug Build",
"displayName": "Maya 2022 Debug",
"configurePreset": "Maya2022",
"configuration": "Debug",
"targets": "install"
},
{
"name": "Maya2023_Release",
"description": "Release Build",
"displayName": "Maya 2023 Release",
"configurePreset": "Maya2023",
"configuration": "Release",
"targets": "install"
},
{
"name":"Maya2023_RelWithDebInfo",
"description": "RelWithDebInfo Build",
"displayName": "Maya 2023 RelWithDebInfo",
"configurePreset": "Maya2023",
"configuration": "RelWithDebInfo",
"targets": "install"
},
{
"name":"Maya2023_Debug",
"description": "Debug Build",
"displayName": "Maya 2023 Debug",
"configurePreset": "Maya2023",
"configuration": "Debug",
"targets": "install"
},
{
"name": "Maya2024_Release",
"description": "Release Build",
"displayName": "Maya 2024 Release",
"configurePreset": "Maya2024",
"configuration": "Release",
"targets": "install"
},
{
"name":"Maya2024_RelWithDebInfo",
"description": "RelWithDebInfo Build",
"displayName": "Maya 2024 RelWithDebInfo",
"configurePreset": "Maya2024",
"configuration": "RelWithDebInfo",
"targets": "install"
},
{
"name":"Maya2024_Debug",
"description": "Debug Build",
"displayName": "Maya 2024 Debug",
"configurePreset": "Maya2024",
"configuration": "Debug",
"targets": "install"
}
]
}

33
README.md Normal file
View File

@ -0,0 +1,33 @@
# Video2ARKit for Maya
Use google mediapipe to convert video to ARKit blendshape in maya
## Requirement
* Maya 2023+
* Python 3.9+
## Build and Install
```python
mayapy -m pip install mediapipe opencv-python
```
Set `MAYA_MODULE_PATH` to Video2ARKit.mod directory in Maya.env
```powershell
# Maya.env
MAYA_MODULE_PATH=[plugin_install_dir]/Video2ARKit
```
## Usage
The model file is in `Video2ARKit/models/face_landmarker.task` folder.
The source link is [Face Landmarker](https://ai.google.dev/edge/mediapipe/solutions/vision/face_landmarker?hl=zh-tw), download the `face_landmarker.task` and place into `Video2ARKit/models` folder.
**Load Plugin**
```python
from maya import cmds
cmds.loadPlugin("Video2ARKit.py")
```
**Convert Video to ARKit blendshape `.json`**
```python
from maya import cmds
cmds.a2vCmds(videoPath=r'C:\MyVideo.mp4', outputPath=r'C:\Output.json')
```

View File

@ -0,0 +1,210 @@
include(FindPackageHandleStandardArgs)
if(NOT DEFINED MAYA_VERSION)
set(MAYA_VERSION 2018)
endif()
if(DEFINED ENV{MAYA_VERSION})
set(MAYA_VERSION $ENV{MAYA_VERSION})
endif()
set(MAYA_BIN_SUFFIX "bin")
set(MAYA_INC_SUFFIX "include")
set(MAYA_LIB_SUFFIX "lib")
if(WIN32)
set(MAYA_LOCATION "$ENV{ProgramFiles}/Autodesk/Maya${MAYA_VERSION}")
set(MAYA_PLUGIN_EXT ".mll")
set(MAYA_COMPILE_FLAGS "/MD /D \"NT_PLUGIN\" /D \"REQUIRE_IOSTREAM\" /D \"_BOOL\"" )
set(MAYA_LINK_FLAGS " /export:initializePlugin /export:uninitializePlugin " )
elseif(APPLE)
set(MAYA_LOCATION "/Applications/Autodesk/maya${MAYA_VERSION}")
set(MAYA_PLUGIN_EXT ".bundle")
set(MAYA_COMPILE_FLAGS "-DAW_NEW_IOSTREAMS -DCC_GNU_ -DOSMac_ -DOSMacOSX_ -DOSMac_MachO_ -DREQUIRE_IOSTREAM -fno-gnu-keywords -D_LANGUAGE_C_PLUS_PLUS")
set(MAYA_LINK_FLAGS "-fno-gnu-keywords -framework System -framework SystemConfiguration -framework CoreServices -framework Carbon -framework Cocoa -framework ApplicationServices -framework Quicktime -framework IOKit -bundle -fPIC -L${ALEMBIC_MAYA_LIB_ROOT} -Wl,-executable_path,${ALEMBIC_MAYA_LIB_ROOT}")
else()
set(MAYA_LOCATION "/usr/autodesk/maya${MAYA_VERSION}")
set(MAYA_PLUGIN_EXT ".so")
set(MAYA_COMPILE_FLAGS "-m64 -g -pthread -pipe -D_BOOL -DLINUX -DLINUX_64 -DREQUIRE_IOSTREAM -fPIC -Wno-deprecated -fno-gnu-keywords")
set(MAYA_LINK_FLAGS "-shared -m64 -g -pthread -pipe -D_BOOL -DLINUX -DLINUX_64 -DREQUIRE_IOSTREAM -fPIC -Wno-deprecated -fno-gnu-keywords -Wl,-Bsymbolic")
endif()
set(MAYA_ROOT ${MAYA_LOCATION})
string(REPLACE "\\" "/" MAYA_ROOT ${MAYA_ROOT})
if(DEFINED ENV{MAYA_LOCATION})
set(MAYA_ROOT $ENV{MAYA_LOCATION})
endif()
set(MAYA_EXE MAYA_EXE-NOTFOUND)
find_program(MAYA_EXE maya
PATHS
${MAYA_ROOT}
PATH_SUFFIXES
"${MAYA_BIN_SUFFIX}"
NO_DEFAULT_PATH
DOC "Maya Executable Path")
set(MAYA_PY_EXE MAYA_PY_EXE-NOTFOUND)
find_program(MAYA_PY_EXE mayapy
PATHS
${MAYA_ROOT}
PATH_SUFFIXES
"${MAYA_BIN_SUFFIX}"
NO_DEFAULT_PATH
DOC "Maya Python Executable Path")
set(MAYA_INCLUDE_PATH MAYA_INCLUDE_PATH-NOTFOUND)
find_path(MAYA_INCLUDE_PATH "maya/MFn.h"
PATHS
${MAYA_ROOT}
PATH_SUFFIXES
"${MAYA_INC_SUFFIX}"
NO_DEFAULT_PATH
DOC "Maya Include Dir")
set(MAYA_OPENMAYA_LIBRARY MAYA_OPENMAYA_LIBRARY-NOTFOUND)
find_library(MAYA_OPENMAYA_LIBRARY OpenMaya
PATHS
"${MAYA_ROOT}/lib"
DOC "OpenMaya Libray Path")
if(MAYA_OPENMAYA_LIBRARY)
set(MAYA_LIBRARY_PATH "${MAYA_ROOT}/lib")
endif()
set(MAYA_OPENMAYAFX_LIBRARY MAYA_OPENMAYAFX_LIBRARY-NOTFOUND)
find_library(MAYA_OPENMAYAFX_LIBRARY OpenMayaFX
PATHS
"${MAYA_ROOT}/lib"
DOC "OpenMayaFX Libray Path")
set(MAYA_OPENMAYAANIM_LIBRARY MAYA_OPENMAYAANIM_LIBRARY-NOTFOUND)
find_library(MAYA_OPENMAYAANIM_LIBRARY OpenMayaAnim
PATHS
"${MAYA_ROOT}/lib"
DOC "OpenMayaAnim Libray Path")
set(MAYA_OPENMAYAUI_LIBRARY MAYA_OPENMAYAUI_LIBRARY-NOTFOUND)
find_library(MAYA_OPENMAYAUI_LIBRARY OpenMayaUI
PATHS
"${MAYA_ROOT}/lib"
DOC "OpenMayaUI Libray Path")
set(MAYA_OPENMAYARENDER_LIBRARY MAYA_OPENMAYARENDER_LIBRARY-NOTFOUND)
find_library(MAYA_OPENMAYARENDER_LIBRARY OpenMayaRender
PATHS
"${MAYA_ROOT}/lib"
DOC "OpenMayaRender Libray Path")
set(MAYA_FOUNDATION_LIBRARY MAYA_FOUNDATION_LIBRARY-NOTFOUND)
find_library(MAYA_FOUNDATION_LIBRARY Foundation
PATHS
"${MAYA_ROOT}/lib"
DOC "Foundation Libray Path")
set(XGEN_PLUGIN "plug-ins/xgen")
set(MAYA_XGEN_ROOT MAYA_XGEN_ROOT-NOTFOUND)
if(EXISTS "${MAYA_ROOT}/plug-ins/xgen")
set(MAYA_XGEN_ROOT "${MAYA_ROOT}/plug-ins/xgen")
endif()
set(MAYA_XGEN_INCLUDE_PATH MAYA_XGEN_INCLUDE_PATH-NOTFOUND)
find_path(MAYA_XGEN_INCLUDE_PATH "XGen/XgWinExport.h"
PATHS
${MAYA_ROOT}
PATH_SUFFIXES
"${XGEN_PLUGIN}/${MAYA_INC_SUFFIX}"
NO_DEFAULT_PATH
DOC "Maya XGen Include Dir")
if(MAYA_VERSION GREATER_EQUAL 2020)
find_path(MAYA_XGEN_INCLUDE_PATH "xgen/src/xgcore/XgConfig.h"
PATHS
${MAYA_ROOT}
PATH_SUFFIXES
"${XGEN_PLUGIN}/${MAYA_INC_SUFFIX}"
NO_DEFAULT_PATH
DOC "Maya XGen Include Dir")
endif()
set(MAYA_ADSKXGEN_LIBRARY MAYA_ADSKXGEN_LIBRARY-NOTFOUND)
find_library(MAYA_ADSKXGEN_LIBRARY
AdskXGen
libAdskXGen
PATHS
"${MAYA_ROOT}/plug-ins/xgen/lib"
DOC "libAdskXGen Libray Path")
set(MAYA_XGEN_LIBRARY_PATH MAYA_XGEN_LIBRARY_PATH-NOTFOUND)
if(MAYA_ADSKXGEN_LIBRARY)
set(MAYA_XGEN_LIBRARY_PATH "${MAYA_ROOT}/plug-ins/xgen/lib")
endif()
set(MAYA_ADSKXPD_LIBRARY MAYA_ADSKXPD_LIBRARY-NOTFOUND)
find_library(MAYA_ADSKXPD_LIBRARY
AdskXpd
libAdskXpd
PATHS
"${MAYA_ROOT}/plug-ins/xgen/lib"
DOC "libAdskXpd Libray Path")
set(MAYA_ADSKSEEXPR_LIBRARY MAYA_ADSKSEEXPR_LIBRARY-NOTFOUND)
find_library(MAYA_ADSKSEEXPR_LIBRARY
AdskSeExpr
libAdskSeExpr
PATHS
"${MAYA_ROOT}/plug-ins/xgen/lib"
DOC "libAdskSeExpr Libray Path")
set(MAYA_LIBRARIES
${MAYA_FOUNDATION_LIBRARY}
${MAYA_OPENMAYA_LIBRARY}
${MAYA_OPENMAYAANIM_LIBRARY}
${MAYA_OPENMAYAFX_LIBRARY}
${MAYA_OPENMAYARENDER_LIBRARY}
${MAYA_OPENMAYAUI_LIBRARY})
set(MAYA_XGEN_LIBRARIES
${MAYA_ADSKXGEN_LIBRARY}
${MAYA_ADSKXPD_LIBRARY}
${MAYA_ADSKSEEXPR_LIBRARY})
message(STATUS "MAYA_ROOT : ${MAYA_ROOT}")
message(STATUS "MAYA_EXE : ${MAYA_EXE}")
message(STATUS "MAYA_PY_EXE : ${MAYA_PY_EXE}")
message(STATUS "MAYA_INCLUDE_PATH : ${MAYA_INCLUDE_PATH}")
message(STATUS "MAYA_LIBRARY_PATH : ${MAYA_LIBRARY_PATH}")
message(STATUS "MAYA_FOUNDATION_LIBRARY : ${MAYA_FOUNDATION_LIBRARY}")
message(STATUS "MAYA_OPENMAYA_LIBRARY : ${MAYA_OPENMAYA_LIBRARY}")
message(STATUS "MAYA_OPENMAYAANIM_LIBRARY : ${MAYA_OPENMAYAANIM_LIBRARY}")
message(STATUS "MAYA_OPENMAYAFX_LIBRARY : ${MAYA_OPENMAYAFX_LIBRARY}")
message(STATUS "MAYA_OPENMAYARENDER_LIBRARY : ${MAYA_OPENMAYARENDER_LIBRARY}")
message(STATUS "MAYA_OPENMAYAUI_LIBRARY : ${MAYA_OPENMAYAUI_LIBRARY}")
message(STATUS "----------------------------------------------")
message(STATUS "Maya XGen SDK")
message(STATUS "----------------------------------------------")
message(STATUS "MAYA_XGEN_ROOT : ${MAYA_XGEN_ROOT}")
message(STATUS "MAYA_XGEN_INCLUDE_PATH : ${MAYA_XGEN_INCLUDE_PATH}")
message(STATUS "MAYA_XGEN_LIBRARY_PATH : ${MAYA_XGEN_LIBRARY_PATH}")
message(STATUS "MAYA_ADSKXGEN_LIBRARY : ${MAYA_ADSKXGEN_LIBRARY}")
message(STATUS "MAYA_ADSKXPD_LIBRARY : ${MAYA_ADSKXPD_LIBRARY}")
message(STATUS "MAYA_ADSKSEEXPR_LIBRARY : ${MAYA_ADSKSEEXPR_LIBRARY}")
find_package_handle_standard_args(Maya
REQUIRED_VARS
MAYA_ROOT
MAYA_INCLUDE_PATH
MAYA_LIBRARY_PATH
MAYA_FOUNDATION_LIBRARY
MAYA_OPENMAYA_LIBRARY
MAYA_OPENMAYAANIM_LIBRARY
MAYA_OPENMAYAFX_LIBRARY
MAYA_OPENMAYARENDER_LIBRARY
MAYA_OPENMAYAUI_LIBRARY
VERSION_VAR
MAYA_VERSION
)

View File

@ -0,0 +1,5 @@
if(DEFINED $ENV{OPENCV_LOCATION})
set(OPENCV_LOCATION $ENV{OPENCV_LOCATION})
endif()
message(OPENCV_LOCATION: ${OPENCV_LOCATION})

38
src/CMakeLists.txt Normal file
View File

@ -0,0 +1,38 @@
project(Video2ARKit)
set(SOURCE_FILES
Video2ARKitNode.h
Video2ARKitNode.cpp
plugin.cpp
)
find_package(Maya REQUIRED)
# Find OpenCV
find_package(OpenCV REQUIRED)
# Add Maya's include directories
include_directories(${MAYA_INCLUDE_DIRS})
# Add our custom dependency include directories
include_directories(${OpenCV_INCLUDE_DIRS})
include_directories(${MEDIAPIPE_INCLUDE_DIR})
add_library(${PROJECT_NAME} SHARED ${SOURCE_FILES})
target_include_directories(${PROJECT_NAME} PRIVATE
${MAYA_INCLUDE_DIRS}
${OpenCV_INCLUDE_DIRS}
${MEDIAPIPE_INCLUDE_DIR}
)
target_link_libraries(${PROJECT_NAME} ${MAYA_LIBRARIES})
target_link_libraries(${PROJECT_NAME} ${OpenCV_LIBS})
target_link_libraries(${PROJECT_NAME} ${MEDIAPIPE_LIBRARY})
if(WIN32)
set_target_properties(${PROJECT_NAME} PROPERTIES SUFFIX ".mll")
endif()
target_compile_definitions(${PROJECT_NAME} PRIVATE
NT_PLUGIN
REQUIRE_IOSTREAM
)

View File

@ -0,0 +1,7 @@
+ MAYAVERSION:2022 Video2ARKit 1.0 .
V2A_MODEL_PATH:=models/face_landmarker.task
PYTHONPATH+:=lib/python3.7/site-packages
+ MAYAVERSION:2023 Video2ARKit 1.0 .
V2A_MODEL_PATH:=models/face_landmarker.task
PYTHONPATH+:=lib/python3.9/site-packages

Binary file not shown.

View File

@ -0,0 +1,34 @@
import maya.api.OpenMaya as om
from V2A.Video2ARKitNode import Video2ARKitNode
from V2A.Video2ARKitCommand import Video2ARKitCommand
def maya_useNewAPI():
pass
def initializePlugin(mobject):
mplugin = om.MFnPlugin(mobject, "YourName", "1.0", "Any")
try:
mplugin.registerNode(Video2ARKitNode.NODE_NAME, Video2ARKitNode.NODE_ID,
Video2ARKitNode.creator, Video2ARKitNode.initialize)
except:
om.MGlobal.displayError("Failed to register node")
try:
mplugin.registerCommand(Video2ARKitCommand.COMMAND_NAME,
Video2ARKitCommand.creator,
Video2ARKitCommand.createSyntax)
except:
om.MGlobal.displayError("Failed to register command")
def uninitializePlugin(mobject):
mplugin = om.MFnPlugin(mobject)
try:
mplugin.deregisterNode(Video2ARKitNode.NODE_ID)
except:
om.MGlobal.displayError("Failed to deregister node")
try:
mplugin.deregisterCommand(Video2ARKitCommand.COMMAND_NAME)
except:
om.MGlobal.displayError("Failed to deregister command")

View File

@ -0,0 +1,83 @@
import maya.api.OpenMaya as om
import os
import json
from V2A.core import process
def maya_useNewAPI():
pass
class Video2ARKitCommand(om.MPxCommand):
COMMAND_NAME = "v2aCmds"
kVideoPath = 'v'
kVideoPathLong = 'videoPath'
kModelPath = 'm'
kModelPathLong = 'modelPath'
kOutputPath = 'o'
kOutputPathLong = 'outputPath'
kReload = 'r'
kReloadLong = 'reload'
def __init__(self):
om.MPxCommand.__init__(self)
def createSyntax():
syntax = om.MSyntax()
syntax.addFlag("v", "videoPath", om.MSyntax.kString)
syntax.addFlag("m", "modelPath", om.MSyntax.kString)
syntax.addFlag("o", "outputPath", om.MSyntax.kString)
syntax.addFlag("r", "reload", om.MSyntax.kString)
return syntax
def doIt(self, args):
argData = om.MArgParser(self.syntax(), args)
reload_object = None
if argData.isFlagSet(self.kReload):
# 重新加載快取
node_name = argData.flagArgumentString("reload", 0)
s_list = om.MGlobal.getSelectionListByName(node_name) if node_name else om.MGlobal.getActiveSelectionList()
s_iter = om.MItSelectionList(s_list, om.MFn.kDependencyNode)
if not s_iter.isDone():
depend_node = s_iter.getDependNode()
node_fn = om.MFnDependencyNode(depend_node)
# 如果是 Video2ARKit 節點,觸發重新加載快取
if node_fn.typeName == "Video2ARKit":
trigger = node_fn.findPlug("processTrigger", True)
value = trigger.getValue()
trigger.setValue(value)
else:
# 處理影片並返回快取結果
if argData.isFlagSet("videoPath"):
video_path = argData.flagArgumentString("videoPath", 0)
model_path = os.getenv('V2A_MODEL_PATH')
if argData.isFlagSet("modelPath"):
model_path = argData.flagArgumentString("modelPath", 0)
output_path = None
if argData.isFlagSet("outputPath"):
output_path = argData.flagArgumentString("outputPath", 0)
if not video_path:
om.MGlobal.displayError("Video path is required.")
return
if not model_path:
om.MGlobal.displayError("Model path is required.")
return
if not output_path:
om.MGlobal.displayInfo("No output path specified. Results will not be saved to file.")
else:
om.MGlobal.displayInfo(f"Output path set to: {output_path}")
om.MGlobal.displayInfo(f"Processing video: {video_path} with model: {model_path}")
cache = process.process_video(video_path, model_path, output_path)
if not output_path:
super(Video2ARKitCommand, self).setResult(json.dumps(cache))
else:
om.MGlobal.displayInfo(f"Output path set to: {output_path}")
# super(Video2ARKitCommand, self).setResult(json.dumps(cache))
# om.MGlobal.displayInfo("Video2ARKit processing command executed.")
@staticmethod
def creator():
return Video2ARKitCommand()

View File

@ -0,0 +1,122 @@
import maya.api.OpenMaya as om
import mediapipe as mp
from mediapipe.tasks import python
from mediapipe.tasks.python import vision
import cv2
import os
from V2A.core import process
# 註冊資訊
maya_use_NewApi = True
# ARKit 52 個標準表情清單
ARKIT_SHAPES = [
"browDownLeft", "browDownRight", "browInnerUp", "browOuterUpLeft", "browOuterUpRight",
"cheekPuff", "cheekSquintLeft", "cheekSquintRight", "eyeBlinkLeft", "eyeBlinkRight",
"eyeLookDownLeft", "eyeLookDownRight", "eyeLookInLeft", "eyeLookInRight", "eyeLookOutLeft",
"eyeLookOutRight", "eyeLookUpLeft", "eyeLookUpRight", "eyeSquintLeft", "eyeSquintRight",
"eyeWideLeft", "eyeWideRight", "jawForward", "jawLeft", "jawOpen", "jawRight",
"mouthClose", "mouthDimpleLeft", "mouthDimpleRight", "mouthFrownLeft", "mouthFrownRight",
"mouthFunnel", "mouthLeft", "mouthLowerDownLeft", "mouthLowerDownRight", "mouthPressLeft",
"mouthPressRight", "mouthPucker", "mouthRight", "mouthRollLower", "mouthRollUpper",
"mouthShrugLower", "mouthShrugUpper", "mouthSmileLeft", "mouthSmileRight", "mouthStretchLeft",
"mouthStretchRight", "mouthUpperUpLeft", "mouthUpperUpRight", "noseSneerLeft", "noseSneerRight", "tongueOut"
]
class Video2ARKitNode(om.MPxNode):
NODE_NAME = "Video2ARKit"
NODE_ID = om.MTypeId(0x00123456) # 測試用 ID
# 屬性定義
aInTime = om.MObject()
aVideoPath = om.MObject()
aModelPath = om.MObject()
aProcessTrigger = om.MObject()
# 輸出屬性字典
output_attrs = {}
def __init__(self):
om.MPxNode.__init__(self)
self._cache = {} # 格式: { frame_index: { shape_name: value } }
def compute(self, plug, data_block):
# 如果正在請求輸出屬性 (52個中的任何一個)
if plug in self.output_attrs.values():
# 獲取輸入值
current_time = data_block.inputValue(self.aInTime).asMTime().asUnits(om.MTime.kFilm) # 假設 24fps
trigger = data_block.inputValue(self.aProcessTrigger).asBool()
video_path = data_block.inputValue(self.aVideoPath).asString()
model_path = data_block.inputValue(self.aModelPath).asString()
# 檢查是否需要重新分析影片
if trigger and not self._cache:
self._cache = process.process_video(video_path, model_path)
# 獲取當前幀數的權重
frame_idx = int(current_time)
frame_data = self._cache.get(frame_idx, {})
# 輸出對應的值
for name, attr_obj in self.output_attrs.items():
if plug == attr_obj:
val = frame_data.get(name, 0.0)
handle = data_block.outputValue(attr_obj)
handle.setFloat(val)
data_block.setClean(plug)
return
def setDependencyDirty(self, plug, affectedPlugs):
if plug == self.aProcessTrigger:
self._cache = {}
super().setDependencyDirty(plug, affectedPlugs)
def _reload_cache(self):
current_value = self.aProcessTrigger.asBool()
self.aProcessTrigger.setBool(current_value)
om.MGlobal.displayInfo("Reload cache complete. cache populated.")
@staticmethod
def creator():
return Video2ARKitNode()
@staticmethod
def initialize():
nAttr = om.MFnNumericAttribute()
tAttr = om.MFnTypedAttribute()
uAttr = om.MFnUnitAttribute()
# 輸入:時間
Video2ARKitNode.aInTime = uAttr.create("inTime", "it", om.MFnUnitAttribute.kTime, 0.0)
om.MPxNode.addAttribute(Video2ARKitNode.aInTime)
# 輸入:影片路徑
Video2ARKitNode.aVideoPath = tAttr.create("videoPath", "vp", om.MFnData.kString)
om.MPxNode.addAttribute(Video2ARKitNode.aVideoPath)
# 輸入:模型路徑
data = om.MFnStringData()
default_model_path = data.create(os.getenv('V2A_MODEL_PATH'))
Video2ARKitNode.aModelPath = tAttr.create("modelPath", "mp", om.MFnData.kString, default_model_path)
om.MPxNode.addAttribute(Video2ARKitNode.aModelPath)
# 輸入:觸發器 (勾選後開始分析)
Video2ARKitNode.aProcessTrigger = nAttr.create("processTrigger", "pt", om.MFnNumericData.kBoolean, False)
nAttr.keyable = True
nAttr.hidden = True
om.MPxNode.addAttribute(Video2ARKitNode.aProcessTrigger)
# 輸出52 個表情係數
for shape_name in ARKIT_SHAPES:
attr = nAttr.create(shape_name, shape_name, om.MFnNumericData.kFloat, 0.0)
nAttr.writable = False
nAttr.storable = False
om.MPxNode.addAttribute(attr)
Video2ARKitNode.output_attrs[shape_name] = attr
# 建立依賴
om.MPxNode.attributeAffects(Video2ARKitNode.aVideoPath, attr)
om.MPxNode.attributeAffects(Video2ARKitNode.aInTime, attr)
om.MPxNode.attributeAffects(Video2ARKitNode.aProcessTrigger, attr)

View File

View File

@ -0,0 +1,116 @@
import os
import cv2
import av
import logging
import json
import mediapipe as mp
from mediapipe.tasks import python
from mediapipe.tasks.python import vision
logging.basicConfig(format='%(asctime)s - %(levelname)s : %(message)s', level=logging.DEBUG)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
DEFAULT_MODEL_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'models', 'face_landmarker.task'))
def process_video(video_path, model_path=DEFAULT_MODEL_PATH, output_json=None, method='av'):
if method == 'cv2':
return process_video_cv2(video_path, model_path, output_json)
else:
return process_video_av(video_path, model_path, output_json)
def process_video_cv2(video_path, model_path, output_json=None):
"""Core MediaPipe processing logic using OpenCV"""
# Initialize MediaPipe
cache = {}
base_options = python.BaseOptions(model_asset_path=model_path)
options = vision.FaceLandmarkerOptions(
base_options=base_options,
output_face_blendshapes=True,
running_mode=vision.RunningMode.VIDEO
)
with vision.FaceLandmarker.create_from_options(options) as detector:
cap = cv2.VideoCapture(video_path)
fps = cap.get(cv2.CAP_PROP_FPS)
frame_count = 0
while cap.isOpened():
ret, frame = cap.read()
if not ret: break
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=rgb_frame)
ts = int((frame_count / fps) * 1000)
result = detector.detect_for_video(mp_image, ts)
if result.face_blendshapes:
# Capture first face from blendshapes
shapes = {c.category_name: c.score for c in result.face_blendshapes[0]}
cache[frame_count] = shapes
frame_count += 1
cap.release()
return cache
def process_video_av(video_path, model_path, output_json=None):
# 這裡是處理影片的邏輯
"""Core MediaPipe processing logic using PyAV"""
cache = {}
if not os.path.exists(video_path) or not os.path.exists(model_path):
logger.error(f"File not found: {video_path} or {model_path}")
return
logger.info('Start Analyzing Video: {video_path}')
# Initialize MediaPipe
base_options = python.BaseOptions(model_asset_path=model_path)
options = vision.FaceLandmarkerOptions(
base_options=base_options,
output_face_blendshapes=True,
running_mode=vision.RunningMode.VIDEO
)
with vision.FaceLandmarker.create_from_options(options) as detector:
with av.open(video_path) as container:
video_stream = container.streams.video[0]
video_stream.thread_type = "AUTO"
frame_count = 0
for frame in container.decode(video_stream):
rgb_array = frame.to_ndarray(format='rgb24')
mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=rgb_array)
# ts = int((frame_count / fps) * 1000)
ts_ms = int(frame.time * 1000)
result = detector.detect_for_video(mp_image, ts_ms)
if result.face_blendshapes:
# 抓取第一張臉的 blendshapes
shapes = {c.category_name: c.score for c in result.face_blendshapes[0]}
cache[frame.index] = shapes
frame_count += 1
if output_json:
if output_json.endswith('.json'):
output_json_dir = os.path.dirname(output_json)
if not os.path.exists(output_json_dir):
os.makedirs(output_json_dir)
with open(output_json, 'w') as f:
json.dump(cache, f, indent=4)
logger.info(f"Output JSON saved to {output_json}")
return cache
if __name__ == "__main__":
# 測試用主程式
test_video_path = "test_videos/sample_video.mp4"
test_model_path = "models/face_landmarker.task"
test_output_json = "output/blendshapes_output.json"
process_video(test_video_path, test_model_path, test_output_json)

297
src/Video2ARKitNode.cpp Normal file
View File

@ -0,0 +1,297 @@
#include "Video2ARKitNode.h"
#include <maya/MPlug.h>
#include <maya/MDataBlock.h>
#include <maya/MDataHandle.h>
#include <maya/MGlobal.h>
#include <maya/MTime.h>
#include <maya/MFnUnitAttribute.h>
#include <maya/MFnTypedAttribute.h>
#include <maya/MFnNumericAttribute.h>
#include <maya/MFnCompoundAttribute.h>
#include <fstream>
#include <streambuf>
// --- Dependencies (Must be installed on your system) ---
#include <opencv2/opencv.hpp>
#include "mediapipe/framework/calculator_framework.h"
#include "mediapipe/framework/formats/image_frame.h"
#include "mediapipe/framework/formats/classification.pb.h"
#include "mediapipe/framework/port/parse_text_proto.h"
#include "mediapipe/framework/port/status.h"
// --- Helper Function for Video Processing ---
BlendshapeCache processVideo_impl(const MString& videoPath)
{
BlendshapeCache cache;
MGlobal::displayInfo("Processing video: " + videoPath);
std::string graph_path = "D:/workspace/Video2ARkit/src/face_blendshapes_graph.pbtxt";
std::ifstream graph_file(graph_path);
if (!graph_file.is_open()) {
MGlobal::displayError("Failed to open MediaPipe graph file: " + MString(graph_path.c_str()));
return cache;
}
std::string graph_config_string((std::istreambuf_iterator<char>(graph_file)), std::istreambuf_iterator<char>());
mediapipe::CalculatorGraphConfig config;
if (!google::protobuf::TextFormat::ParseFromString(graph_config_string, &config)) {
MGlobal::displayError("Failed to parse MediaPipe graph config.");
return cache;
}
mediapipe::CalculatorGraph graph;
auto status = graph.Initialize(config);
if (!status.ok()) {
MGlobal::displayError("Failed to initialize MediaPipe graph.");
return cache;
}
auto poller_status = graph.AddOutputStreamPoller("face_blendshapes");
if (!poller_status.ok()) {
MGlobal::displayError("Failed to add output stream poller.");
return cache;
}
mediapipe::OutputStreamPoller poller = std::move(poller_status.value());
status = graph.StartRun({});
if (!status.ok()) {
MGlobal::displayError("Failed to start MediaPipe graph.");
return cache;
}
cv::VideoCapture cap(videoPath.asChar());
if (!cap.isOpened()) {
MGlobal::displayError("Failed to open video file with OpenCV: " + videoPath);
return cache;
}
long frame_count = 0;
while (true) {
cv::Mat frame;
cap >> frame;
if (frame.empty()) {
break;
}
auto input_frame = std::make_unique<mediapipe::ImageFrame>(
mediapipe::ImageFormat::SRGB, frame.cols, frame.rows, mediapipe::ImageFrame::kDefaultAlignmentBoundary);
cv::cvtColor(frame, input_frame->MatView(), cv::COLOR_BGR2RGB);
mediapipe::Timestamp timestamp(frame_count);
status = graph.AddPacketToInputStream("input_video", mediapipe::MakePacket<mediapipe::ImageFrame>(std::move(input_frame)).At(timestamp));
if (!status.ok()) {
MGlobal::displayError("Failed to add packet to input stream.");
break;
}
mediapipe::Packet packet;
if (poller.Next(&packet)) {
auto& classification_list_vec = packet.Get<std::vector<mediapipe::ClassificationList>>();
if (!classification_list_vec.empty()) {
BlendshapeFrame frameData;
const auto& blendshapes = classification_list_vec[0];
for (int i = 0; i < blendshapes.classification_size(); ++i) {
const auto& blendshape = blendshapes.classification(i);
frameData[blendshape.label()] = blendshape.score();
}
cache.push_back(frameData);
}
}
frame_count++;
if (frame_count % 100 == 0) {
MGlobal::displayInfo("Processed " + MString() + frame_count + " frames...");
}
}
graph.CloseInputStream("input_video");
graph.WaitUntilDone();
cap.release();
MGlobal::displayInfo("Finished processing video. Cached " + MString() + (long)cache.size() + " frames.");
return cache;
}
// --- Static Member Initialization ---
MTypeId Video2ARKitNode::id(0x00081031);
BlendshapeCache Video2ARKitNode::m_blendshapeCache;
MString Video2ARKitNode::m_cachedVideoPath;
MObject Video2ARKitNode::videoPath, Video2ARKitNode::currentTime, Video2ARKitNode::enable, Video2ARKitNode::outputBlendshapes;
MObject Video2ARKitNode::browDownLeft, Video2ARKitNode::browDownRight, Video2ARKitNode::browInnerUp,
Video2ARKitNode::browOuterUpLeft, Video2ARKitNode::browOuterUpRight, Video2ARKitNode::cheekPuff,
Video2ARKitNode::cheekSquintLeft, Video2ARKitNode::cheekSquintRight, Video2ARKitNode::eyeBlinkLeft,
Video2ARKitNode::eyeBlinkRight, Video2ARKitNode::eyeLookDownLeft, Video2ARKitNode::eyeLookDownRight,
Video2ARKitNode::eyeLookInLeft, Video2ARKitNode::eyeLookInRight, Video2ARKitNode::eyeLookOutLeft,
Video2ARKitNode::eyeLookOutRight, Video2ARKitNode::eyeLookUpLeft, Video2ARKitNode::eyeLookUpRight,
Video2ARKitNode::eyeSquintLeft, Video2ARKitNode::eyeSquintRight, Video2ARKitNode::eyeWideLeft,
Video2ARKitNode::eyeWideRight, Video2ARKitNode::jawForward, Video2ARKitNode::jawLeft,
Video2ARKitNode::jawOpen, Video2ARKitNode::jawRight, Video2ARKitNode::mouthClose,
Video2ARKitNode::mouthDimpleLeft, Video2ARKitNode::mouthDimpleRight, Video2ARKitNode::mouthFrownLeft,
Video2ARKitNode::mouthFrownRight, Video2ARKitNode::mouthFunnel, Video2ARKitNode::mouthLowerDownLeft,
Video2ARKitNode::mouthLowerDownRight, Video2ARKitNode::mouthPressLeft, Video2ARKitNode::mouthPressRight,
Video2ARKitNode::mouthPucker, Video2ARKitNode::mouthRight, Video2ARKitNode::mouthRollLower,
Video2ARKitNode::mouthRollUpper, Video2ARKitNode::mouthShrugLower, Video2ARKitNode::mouthShrugUpper,
Video2ARKitNode::mouthSmileLeft, Video2ARKitNode::mouthSmileRight, Video2ARKitNode::mouthStretchLeft,
Video2ARKitNode::mouthStretchRight, Video2ARKitNode::mouthUpperUpLeft, Video2ARKitNode::mouthUpperUpRight,
Video2ARKitNode::noseSneerLeft, Video2ARKitNode::noseSneerRight;
// --- Node Method Implementations ---
Video2ARKitNode::Video2ARKitNode() {}
Video2ARKitNode::~Video2ARKitNode() {}
void* Video2ARKitNode::creator() {
return new Video2ARKitNode();
}
MStatus Video2ARKitNode::compute(const MPlug& plug, MDataBlock& data) {
if (plug.parent() != outputBlendshapes) {
return MS::kUnknownParameter;
}
MDataHandle pathHandle = data.inputValue(videoPath);
MString path = pathHandle.asString();
if (path != m_cachedVideoPath && path.length() > 0) {
m_blendshapeCache = processVideo_impl(path);
m_cachedVideoPath = path;
}
MDataHandle outputCompoundHandle = data.outputValue(outputBlendshapes);
MDataHandle enableHandle = data.inputValue(enable);
bool isEnabled = enableHandle.asBool();
if (isEnabled && !m_blendshapeCache.empty()) {
MDataHandle timeHandle = data.inputValue(currentTime);
MTime time = timeHandle.asTime();
int frame = static_cast<int>(time.as(MTime::kFilm));
if (frame >= 0 && frame < m_blendshapeCache.size()) {
const auto& frameData = m_blendshapeCache[frame];
#define SET_OUTPUT_VALUE(attr, name) \
if (frameData.count(name)) { \
outputCompoundHandle.child(attr).setFloat(frameData.at(name)); \
} else { \
outputCompoundHandle.child(attr).setFloat(0.0f); \
}
SET_OUTPUT_VALUE(browDownLeft, "browDown_L"); SET_OUTPUT_VALUE(browDownRight, "browDown_R");
SET_OUTPUT_VALUE(browInnerUp, "browInnerUp"); SET_OUTPUT_VALUE(browOuterUpLeft, "browOuterUp_L");
SET_OUTPUT_VALUE(browOuterUpRight, "browOuterUp_R"); SET_OUTPUT_VALUE(cheekPuff, "cheekPuff");
SET_OUTPUT_VALUE(cheekSquintLeft, "cheekSquint_L"); SET_OUTPUT_VALUE(cheekSquintRight, "cheekSquint_R");
SET_OUTPUT_VALUE(eyeBlinkLeft, "eyeBlink_L"); SET_OUTPUT_VALUE(eyeBlinkRight, "eyeBlink_R");
SET_OUTPUT_VALUE(eyeLookDownLeft, "eyeLookDown_L"); SET_OUTPUT_VALUE(eyeLookDownRight, "eyeLookDown_R");
SET_OUTPUT_VALUE(eyeLookInLeft, "eyeLookIn_L"); SET_OUTPUT_VALUE(eyeLookInRight, "eyeLookIn_R");
SET_OUTPUT_VALUE(eyeLookOutLeft, "eyeLookOut_L"); SET_OUTPUT_VALUE(eyeLookOutRight, "eyeLookOut_R");
SET_OUTPUT_VALUE(eyeLookUpLeft, "eyeLookUp_L"); SET_OUTPUT_VALUE(eyeLookUpRight, "eyeLookUp_R");
SET_OUTPUT_VALUE(eyeSquintLeft, "eyeSquint_L"); SET_OUTPUT_VALUE(eyeSquintRight, "eyeSquint_R");
SET_OUTPUT_VALUE(eyeWideLeft, "eyeWide_L"); SET_OUTPUT_VALUE(eyeWideRight, "eyeWide_R");
SET_OUTPUT_VALUE(jawForward, "jawForward"); SET_OUTPUT_VALUE(jawLeft, "jawLeft");
SET_OUTPUT_VALUE(jawOpen, "jawOpen"); SET_OUTPUT_VALUE(jawRight, "jawRight");
SET_OUTPUT_VALUE(mouthClose, "mouthClose"); SET_OUTPUT_VALUE(mouthDimpleLeft, "mouthDimple_L");
SET_OUTPUT_VALUE(mouthDimpleRight, "mouthDimple_R"); SET_OUTPUT_VALUE(mouthFrownLeft, "mouthFrown_L");
SET_OUTPUT_VALUE(mouthFrownRight, "mouthFrown_R"); SET_OUTPUT_VALUE(mouthFunnel, "mouthFunnel");
SET_OUTPUT_VALUE(mouthLowerDownLeft, "mouthLowerDown_L"); SET_OUTPUT_VALUE(mouthLowerDownRight, "mouthLowerDown_R");
SET_OUTPUT_VALUE(mouthPressLeft, "mouthPress_L"); SET_OUTPUT_VALUE(mouthPressRight, "mouthPress_R");
SET_OUTPUT_VALUE(mouthPucker, "mouthPucker"); SET_OUTPUT_VALUE(mouthRight, "mouthRight");
SET_OUTPUT_VALUE(mouthRollLower, "mouthRollLower"); SET_OUTPUT_VALUE(mouthRollUpper, "mouthRollUpper");
SET_OUTPUT_VALUE(mouthShrugLower, "mouthShrugLower"); SET_OUTPUT_VALUE(mouthShrugUpper, "mouthShrugUpper");
SET_OUTPUT_VALUE(mouthSmileLeft, "mouthSmile_L"); SET_OUTPUT_VALUE(mouthSmileRight, "mouthSmile_R");
SET_OUTPUT_VALUE(mouthStretchLeft, "mouthStretch_L"); SET_OUTPUT_VALUE(mouthStretchRight, "mouthStretch_R");
SET_OUTPUT_VALUE(mouthUpperUpLeft, "mouthUpperUp_L"); SET_OUTPUT_VALUE(mouthUpperUpRight, "mouthUpperUp_R");
SET_OUTPUT_VALUE(noseSneerLeft, "noseSneer_L"); SET_OUTPUT_VALUE(noseSneerRight, "noseSneer_R");
}
} else {
#define ZERO_OUTPUT_VALUE(attr) outputCompoundHandle.child(attr).setFloat(0.0f)
// ... Zero out all 52 attributes ...
}
outputCompoundHandle.setClean();
data.setClean(plug);
return MS::kSuccess;
}
MStatus Video2ARKitNode::initialize() {
MFnNumericAttribute nAttr;
MFnTypedAttribute tAttr;
MFnUnitAttribute uAttr;
MFnCompoundAttribute cAttr;
videoPath = tAttr.create("videoPath", "vp", MFnData::kString); tAttr.setStorable(true); addAttribute(videoPath);
currentTime = uAttr.create("currentTime", "ct", MFnUnitAttribute::kTime, 0.0); uAttr.setStorable(true); addAttribute(currentTime);
enable = nAttr.create("enable", "en", MFnNumericData::kBoolean, 1); nAttr.setKeyable(true); addAttribute(enable);
#define CREATE_BLENDSHAPE_ATTR(longName, shortName, attrObject) \
attrObject = nAttr.create(longName, shortName, MFnNumericData::kFloat, 0.0); \
nAttr.setWritable(false); nAttr.setStorable(false);
CREATE_BLENDSHAPE_ATTR("browDownLeft", "bdl", browDownLeft); CREATE_BLENDSHAPE_ATTR("browDownRight", "bdr", browDownRight);
CREATE_BLENDSHAPE_ATTR("browInnerUp", "biu", browInnerUp); CREATE_BLENDSHAPE_ATTR("browOuterUpLeft", "boul", browOuterUpLeft);
CREATE_BLENDSHAPE_ATTR("browOuterUpRight", "bour", browOuterUpRight); CREATE_BLENDSHAPE_ATTR("cheekPuff", "cp", cheekPuff);
CREATE_BLENDSHAPE_ATTR("cheekSquintLeft", "csl", cheekSquintLeft); CREATE_BLENDSHAPE_ATTR("cheekSquintRight", "csr", cheekSquintRight);
CREATE_BLENDSHAPE_ATTR("eyeBlinkLeft", "ebl", eyeBlinkLeft); CREATE_BLENDSHAPE_ATTR("eyeBlinkRight", "ebr", eyeBlinkRight);
CREATE_BLENDSHAPE_ATTR("eyeLookDownLeft", "eldl", eyeLookDownLeft); CREATE_BLENDSHAPE_ATTR("eyeLookDownRight", "eldr", eyeLookDownRight);
CREATE_BLENDSHAPE_ATTR("eyeLookInLeft", "elil", eyeLookInLeft); CREATE_BLENDSHAPE_ATTR("eyeLookInRight", "elir", eyeLookInRight);
CREATE_BLENDSHAPE_ATTR("eyeLookOutLeft", "elol", eyeLookOutLeft); CREATE_BLENDSHAPE_ATTR("eyeLookOutRight", "elor", eyeLookOutRight);
CREATE_BLENDSHAPE_ATTR("eyeLookUpLeft", "elul", eyeLookUpLeft); CREATE_BLENDSHAPE_ATTR("eyeLookUpRight", "elur", eyeLookUpRight);
CREATE_BLENDSHAPE_ATTR("eyeSquintLeft", "esl", eyeSquintLeft); CREATE_BLENDSHAPE_ATTR("eyeSquintRight", "esr", eyeSquintRight);
CREATE_BLENDSHAPE_ATTR("eyeWideLeft", "ewl", eyeWideLeft); CREATE_BLENDSHAPE_ATTR("eyeWideRight", "ewr", eyeWideRight);
CREATE_BLENDSHAPE_ATTR("jawForward", "jf", jawForward); CREATE_BLENDSHAPE_ATTR("jawLeft", "jl", jawLeft);
CREATE_BLENDSHAPE_ATTR("jawOpen", "jo", jawOpen); CREATE_BLENDSHAPE_ATTR("jawRight", "jr", jawRight);
CREATE_BLENDSHAPE_ATTR("mouthClose", "mc", mouthClose); CREATE_BLENDSHAPE_ATTR("mouthDimpleLeft", "mdl", mouthDimpleLeft);
CREATE_BLENDSHAPE_ATTR("mouthDimpleRight", "mdr", mouthDimpleRight); CREATE_BLENDSHAPE_ATTR("mouthFrownLeft", "mfl", mouthFrownLeft);
CREATE_BLENDSHAPE_ATTR("mouthFrownRight", "mfr", mouthFrownRight); CREATE_BLENDSHAPE_ATTR("mouthFunnel", "mf", mouthFunnel);
CREATE_BLENDSHAPE_ATTR("mouthLowerDownLeft", "mldl", mouthLowerDownLeft); CREATE_BLENDSHAPE_ATTR("mouthLowerDownRight", "mldr", mouthLowerDownRight);
CREATE_BLENDSHAPE_ATTR("mouthPressLeft", "mpl", mouthPressLeft); CREATE_BLENDSHAPE_ATTR("mouthPressRight", "mpr", mouthPressRight);
CREATE_BLENDSHAPE_ATTR("mouthPucker", "mp", mouthPucker); CREATE_BLENDSHAPE_ATTR("mouthRight", "mr", mouthRight);
CREATE_BLENDSHAPE_ATTR("mouthRollLower", "mrl", mouthRollLower); CREATE_BLENDSHAPE_ATTR("mouthRollUpper", "mru", mouthRollUpper);
CREATE_BLENDSHAPE_ATTR("mouthShrugLower", "msl", mouthShrugLower); CREATE_BLENDSHAPE_ATTR("mouthShrugUpper", "msu", mouthShrugUpper);
CREATE_BLENDSHAPE_ATTR("mouthSmileLeft", "msml", mouthSmileLeft); CREATE_BLENDSHAPE_ATTR("mouthSmileRight", "msmr", mouthSmileRight);
CREATE_BLENDSHAPE_ATTR("mouthStretchLeft", "mstl", mouthStretchLeft); CREATE_BLENDSHAPE_ATTR("mouthStretchRight", "mstr", mouthStretchRight);
CREATE_BLENDSHAPE_ATTR("mouthUpperUpLeft", "muul", mouthUpperUpLeft); CREATE_BLENDSHAPE_ATTR("mouthUpperUpRight", "muur", mouthUpperUpRight);
CREATE_BLENDSHAPE_ATTR("noseSneerLeft", "nsl", noseSneerLeft); CREATE_BLENDSHAPE_ATTR("noseSneerRight", "nsr", noseSneerRight);
outputBlendshapes = cAttr.create("outputBlendshapes", "obs"); cAttr.setArray(false);
#define ADD_BLENDSHAPE_CHILD(attr) cAttr.addChild(attr)
ADD_BLENDSHAPE_CHILD(browDownLeft); ADD_BLENDSHAPE_CHILD(browDownRight); ADD_BLENDSHAPE_CHILD(browInnerUp);
ADD_BLENDSHAPE_CHILD(browOuterUpLeft); ADD_BLENDSHAPE_CHILD(browOuterUpRight); ADD_BLENDSHAPE_CHILD(cheekPuff);
ADD_BLENDSHAPE_CHILD(cheekSquintLeft); ADD_BLENDSHAPE_CHILD(cheekSquintRight); ADD_BLENDSHAPE_CHILD(eyeBlinkLeft);
ADD_BLENDSHAPE_CHILD(eyeBlinkRight); ADD_BLENDSHAPE_CHILD(eyeLookDownLeft); ADD_BLENDSHAPE_CHILD(eyeLookDownRight);
ADD_BLENDSHAPE_CHILD(eyeLookInLeft); ADD_BLENDSHAPE_CHILD(eyeLookInRight); ADD_BLENDSHAPE_CHILD(eyeLookOutLeft);
ADD_BLENDSHAPE_CHILD(eyeLookOutRight); ADD_BLENDSHAPE_CHILD(eyeLookUpLeft); ADD_BLENDSHAPE_CHILD(eyeLookUpRight);
ADD_BLENDSHAPE_CHILD(eyeSquintLeft); ADD_BLENDSHAPE_CHILD(eyeSquintRight); ADD_BLENDSHAPE_CHILD(eyeWideLeft);
ADD_BLENDSHAPE_CHILD(eyeWideRight); ADD_BLENDSHAPE_CHILD(jawForward); ADD_BLENDSHAPE_CHILD(jawLeft);
ADD_BLENDSHAPE_CHILD(jawOpen); ADD_BLENDSHAPE_CHILD(jawRight); ADD_BLENDSHAPE_CHILD(mouthClose);
ADD_BLENDSHAPE_CHILD(mouthDimpleLeft); ADD_BLENDSHAPE_CHILD(mouthDimpleRight); ADD_BLENDSHAPE_CHILD(mouthFrownLeft);
ADD_BLENDSHAPE_CHILD(mouthFrownRight); ADD_BLENDSHAPE_CHILD(mouthFunnel); ADD_BLENDSHAPE_CHILD(mouthLowerDownLeft);
ADD_BLENDSHAPE_CHILD(mouthLowerDownRight); ADD_BLENDSHAPE_CHILD(mouthPressLeft); ADD_BLENDSHAPE_CHILD(mouthPressRight);
ADD_BLENDSHAPE_CHILD(mouthPucker); ADD_BLENDSHAPE_CHILD(mouthRight); ADD_BLENDSHAPE_CHILD(mouthRollLower);
ADD_BLENDSHAPE_CHILD(mouthRollUpper); ADD_BLENDSHAPE_CHILD(mouthShrugLower); ADD_BLENDSHAPE_CHILD(mouthShrugUpper);
ADD_BLENDSHAPE_CHILD(mouthSmileLeft); ADD_BLENDSHAPE_CHILD(mouthSmileRight); ADD_BLENDSHAPE_CHILD(mouthStretchLeft);
ADD_BLENDSHAPE_CHILD(mouthStretchRight); ADD_BLENDSHAPE_CHILD(mouthUpperUpLeft); ADD_BLENDSHAPE_CHILD(mouthUpperUpRight);
ADD_BLENDSHAPE_CHILD(noseSneerLeft); ADD_BLENDSHAPE_CHILD(noseSneerRight);
addAttribute(outputBlendshapes);
attributeAffects(videoPath, outputBlendshapes);
attributeAffects(currentTime, outputBlendshapes);
attributeAffects(enable, outputBlendshapes);
return MS::kSuccess;
}
MStatus initializePlugin(MObject obj) {
MFnPlugin plugin(obj, "Video2ARKit_Node", "1.0", "Any");
return plugin.registerNode("Video2ARKitNode", Video2ARKitNode::id, Video2ARKitNode::creator, Video2ARKitNode::initialize);
}
MStatus uninitializePlugin(MObject obj) {
MFnPlugin plugin(obj);
return plugin.deregisterNode(Video2ARKitNode::id);
}

57
src/Video2ARKitNode.h Normal file
View File

@ -0,0 +1,57 @@
#pragma once
#include <maya/MPxNode.h>
#include <maya/MTypeId.h>
#include <maya/MString.h>
#include <vector>
#include <map>
#include <string>
// Forward declarations
class MObject;
class MPlug;
class MDataBlock;
class MStatus;
// Data structure definitions
using BlendshapeFrame = std::map<std::string, float>;
using BlendshapeCache = std::vector<BlendshapeFrame>;
class Video2ARKitNode : public MPxNode
{
public:
Video2ARKitNode();
virtual ~Video2ARKitNode() override;
virtual MStatus compute(const MPlug& plug, MDataBlock& data) override;
static void* creator();
static MStatus initialize();
public:
// Unique node ID
static MTypeId id;
// Node Attributes
static MObject videoPath;
static MObject currentTime;
static MObject enable;
static MObject outputBlendshapes;
// Individual Blendshape Attributes
static MObject browDownLeft, browDownRight, browInnerUp, browOuterUpLeft, browOuterUpRight, cheekPuff,
cheekSquintLeft, cheekSquintRight, eyeBlinkLeft, eyeBlinkRight, eyeLookDownLeft, eyeLookDownRight,
eyeLookInLeft, eyeLookInRight, eyeLookOutLeft, eyeLookOutRight, eyeLookUpLeft, eyeLookUpRight,
eyeSquintLeft, eyeSquintRight, eyeWideLeft, eyeWideRight, jawForward, jawLeft, jawOpen, jawRight,
mouthClose, mouthDimpleLeft, mouthDimpleRight, mouthFrownLeft, mouthFrownRight, mouthFunnel,
mouthLowerDownLeft, mouthLowerDownRight, mouthPressLeft, mouthPressRight, mouthPucker, mouthRight,
mouthRollLower, mouthRollUpper, mouthShrugLower, mouthShrugUpper, mouthSmileLeft, mouthSmileRight,
mouthStretchLeft, mouthStretchRight, mouthUpperUpLeft, mouthUpperUpRight, noseSneerLeft, noseSneerRight;
private:
// This cache implementation is static. This means all nodes of this type will
// share the same cache. For this specific use case (one video driving one rig),
// this is a simple and effective approach.
static BlendshapeCache m_blendshapeCache;
static MString m_cachedVideoPath;
};

39
src/plugin.cpp Normal file
View File

@ -0,0 +1,39 @@
#include <maya/MFnPlugin.h>
#include <maya/MStatus.h>
#include "facialPerformanceNode.h"
// Called when the plugin is loaded
MStatus initializePlugin(MObject obj)
{
MStatus status;
MFnPlugin plugin(obj, "Video2ARKit", "1.0", "Any");
status = plugin.registerNode(
"facialPerformanceNode",
FacialPerformanceNode::id,
FacialPerformanceNode::creator,
FacialPerformanceNode::initialize
);
if (!status) {
status.perror("Failed to register facialPerformanceNode");
}
return status;
}
// Called when the plugin is unloaded
MStatus uninitializePlugin(MObject obj)
{
MStatus status;
MFnPlugin plugin(obj);
status = plugin.deregisterNode(FacialPerformanceNode::id);
if (!status) {
status.perror("Failed to deregister facialPerformanceNode");
}
return status;
}