Update source code and add resources
This commit is contained in:
parent
7cc823c64e
commit
531726bd70
|
|
@ -2,6 +2,8 @@ venv
|
|||
__pycache__/
|
||||
*.pyc
|
||||
*.pbtxt
|
||||
*.mp4
|
||||
example/
|
||||
lib/
|
||||
build/
|
||||
vendor/
|
||||
|
|
@ -7,7 +7,7 @@ Use google mediapipe to convert video to ARKit blendshape in maya
|
|||
|
||||
## Build and Install
|
||||
```python
|
||||
mayapy -m pip install mediapipe opencv-python
|
||||
mayapy -m pip install -r requirements.txt
|
||||
```
|
||||
Set `MAYA_MODULE_PATH` to Video2ARKit.mod directory in Maya.env
|
||||
```powershell
|
||||
|
|
|
|||
Binary file not shown.
|
|
@ -1,12 +1,14 @@
|
|||
import maya.api.OpenMaya as om
|
||||
from V2A.Video2ARKitNode import Video2ARKitNode
|
||||
from V2A.Video2ARKitCommand import Video2ARKitCommand
|
||||
from V2A.Video2ARKitCommand import V2ACommand
|
||||
from V2A.Video2ARKitCommand import V2AConvertCommnad
|
||||
|
||||
|
||||
def maya_useNewAPI():
|
||||
pass
|
||||
|
||||
def initializePlugin(mobject):
|
||||
mplugin = om.MFnPlugin(mobject, "YourName", "1.0", "Any")
|
||||
mplugin = om.MFnPlugin(mobject, "Video2ARKit", "1.0", "Any")
|
||||
try:
|
||||
mplugin.registerNode(Video2ARKitNode.NODE_NAME, Video2ARKitNode.NODE_ID,
|
||||
Video2ARKitNode.creator, Video2ARKitNode.initialize)
|
||||
|
|
@ -15,12 +17,20 @@ def initializePlugin(mobject):
|
|||
om.MGlobal.displayError("Failed to register node")
|
||||
|
||||
try:
|
||||
mplugin.registerCommand(Video2ARKitCommand.COMMAND_NAME,
|
||||
Video2ARKitCommand.creator,
|
||||
Video2ARKitCommand.createSyntax)
|
||||
mplugin.registerCommand(V2ACommand.COMMAND_NAME,
|
||||
V2ACommand.creator,
|
||||
V2ACommand.createSyntax)
|
||||
|
||||
mplugin.registerCommand(V2AConvertCommnad.COMMAND_NAME,
|
||||
V2AConvertCommnad.creator,
|
||||
V2AConvertCommnad.createSyntax)
|
||||
except:
|
||||
import traceback
|
||||
om.MGlobal.displayError(traceback.format_exc())
|
||||
om.MGlobal.displayError("Failed to register command")
|
||||
|
||||
|
||||
|
||||
def uninitializePlugin(mobject):
|
||||
mplugin = om.MFnPlugin(mobject)
|
||||
try:
|
||||
|
|
@ -29,6 +39,7 @@ def uninitializePlugin(mobject):
|
|||
om.MGlobal.displayError("Failed to deregister node")
|
||||
|
||||
try:
|
||||
mplugin.deregisterCommand(Video2ARKitCommand.COMMAND_NAME)
|
||||
mplugin.deregisterCommand(V2ACommand.COMMAND_NAME)
|
||||
mplugin.deregisterCommand(V2AConvertCommnad.COMMAND_NAME)
|
||||
except:
|
||||
om.MGlobal.displayError("Failed to deregister command")
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Binary file not shown.
|
|
@ -0,0 +1,134 @@
|
|||
global proc AEInputDeviceNew (string $attr)
|
||||
{
|
||||
setUITemplate -pst attributeEditorTemplate;
|
||||
optionMenuGrp -label "Input Device" a2vAEInputDevice;
|
||||
string $deviceNames[] = `v2aCmds -d`;
|
||||
for($device in $deviceNames)
|
||||
{
|
||||
menuItem -label $device;
|
||||
}
|
||||
// AEInputDeviceReplace($attr);
|
||||
setUITemplate -ppt;
|
||||
}
|
||||
|
||||
global proc AEInputDeviceReplace(string $attr)
|
||||
{
|
||||
string $value = `getAttr $attr`;
|
||||
optionMenuGrp -e -v $value -changeCommand ("AEadjustInputDevice "+$attr) a2vAEInputDevice;
|
||||
|
||||
}
|
||||
|
||||
global proc AEadjustInputDevice(string $attr)
|
||||
{
|
||||
string $value = `optionMenuGrp -q -v a2vAEInputDevice`;
|
||||
setAttr $attr $value -type "string";
|
||||
}
|
||||
|
||||
global proc AEInputTypeNew(string $attr)
|
||||
{
|
||||
setUITemplate -pst attributeEditorTemplate;
|
||||
optionMenuGrp -label "Input Type" a2vAEInputType;
|
||||
menuItem -l "Video File"
|
||||
menuItem -l "Camera Stream"
|
||||
menuItem -l "Network Stream"
|
||||
menuItem -l "VMC Portocol"
|
||||
setUITemplate -ppt;
|
||||
}
|
||||
|
||||
global proc AEInputTypeReplace(string $attr)
|
||||
{
|
||||
string $value = `getAttr $attr`;
|
||||
optionMenuGrp -e -v $value -changeCommand ("AEadjustInputType "+$attr) a2vAEInputType;
|
||||
}
|
||||
|
||||
global proc AEadjustInputType(string $attr)
|
||||
{
|
||||
string $value = `optionMenuGrp -q -sl a2vAEInputType` - 1;
|
||||
setAttr $attr $value;
|
||||
}
|
||||
|
||||
global AEVideoPathNew(string $attr)
|
||||
{
|
||||
setUITemplate -pst attributeEditorTemplate;
|
||||
textFieldButtonGrp -l "Video Path" a2VAEVideoPath;
|
||||
|
||||
setUITemplate -ppt;
|
||||
textFieldButtonGrp -e
|
||||
-buttonCommand ("AEVideoPathBrowse "+$attr)
|
||||
-changeCommand ("AEAdjustVideoPath "+$attr)
|
||||
-forceChangeCommand
|
||||
a2VAEVideoPath;
|
||||
|
||||
}
|
||||
|
||||
global AEVideoPathReplace(string $attr)
|
||||
{
|
||||
string $value = `getAttr $attr`;
|
||||
textFieldButtonGrp -e -tx $value a2VAEVideoPath;
|
||||
}
|
||||
|
||||
global AEVideoPathBrowse(string $attr)
|
||||
{
|
||||
string $value = `getAttr $attr`;
|
||||
string $currentDir = `dirname $value`;
|
||||
string $videoFilters = "MP4 Files (*.mp4);;MOV Files (*.mov);;AVI Files (*.avi);;MPEG Files (*.mpg);;All Files (*.*)";
|
||||
string $result[] = `fileDialog2 -caption "Select Video File" -fileMode 1 -fileFilter $videoFilters -selectFileFilter "MP4 Files" -dialogStyle 2`
|
||||
|
||||
if($result){
|
||||
textFieldButtonGrp -e -tx $result[0] a2VAEVideoPath;
|
||||
setAttr $attr $result[0] -type "string";
|
||||
}
|
||||
}
|
||||
|
||||
global AEAdjustVideoPath(string $attr)
|
||||
{
|
||||
string $value = `textFieldButtonGrp -q -tx a2VAEVideoPath`;
|
||||
setAttr $attr $value;
|
||||
}
|
||||
|
||||
global proc AEVideo2ARKitTemplate(string $nodeName)
|
||||
{
|
||||
string $shapeNames[] = {
|
||||
"browDownLeft", "browDownRight", "browInnerUp", "browOuterUpLeft", "browOuterUpRight",
|
||||
"cheekPuff", "cheekSquintLeft", "cheekSquintRight", "eyeBlinkLeft", "eyeBlinkRight",
|
||||
"eyeLookDownLeft", "eyeLookDownRight", "eyeLookInLeft", "eyeLookInRight", "eyeLookOutLeft",
|
||||
"eyeLookOutRight", "eyeLookUpLeft", "eyeLookUpRight", "eyeSquintLeft", "eyeSquintRight",
|
||||
"eyeWideLeft", "eyeWideRight", "jawForward", "jawLeft", "jawOpen", "jawRight",
|
||||
"mouthClose", "mouthDimpleLeft", "mouthDimpleRight", "mouthFrownLeft", "mouthFrownRight",
|
||||
"mouthFunnel", "mouthLeft", "mouthLowerDownLeft", "mouthLowerDownRight", "mouthPressLeft",
|
||||
"mouthPressRight", "mouthPucker", "mouthRight", "mouthRollLower", "mouthRollUpper",
|
||||
"mouthShrugLower", "mouthShrugUpper", "mouthSmileLeft", "mouthSmileRight", "mouthStretchLeft",
|
||||
"mouthStretchRight", "mouthUpperUpLeft", "mouthUpperUpRight", "noseSneerLeft", "noseSneerRight", "tongueOut"
|
||||
};
|
||||
|
||||
// Placeholder for future AE template customization
|
||||
editorTemplate -beginScrollLayout;
|
||||
editorTemplate -beginLayout "General" -collapse false;
|
||||
editorTemplate -addControl "inTime";
|
||||
editorTemplate -addControl "inputType";
|
||||
// editorTemplate -addControl "inputDevice";
|
||||
editorTemplate -callCustom "AEInputDeviceNew"
|
||||
"AEInputDeviceReplace"
|
||||
$nodeName;
|
||||
editorTemplate -addControl "videoPath";
|
||||
editorTemplate -addControl "networkUrl";
|
||||
editorTemplate -addControl "networkPort";
|
||||
editorTemplate -addControl "modelPath";
|
||||
editorTemplate -addControl "processTrigger";
|
||||
editorTemplate -suppress "attribute";
|
||||
editorTemplate -endLayout;
|
||||
editorTemplate -beginLayout "Weights" -collapse false;
|
||||
for($shape in $shapeNames)
|
||||
{
|
||||
editorTemplate -addControl ($shape+"Weight");
|
||||
}
|
||||
editorTemplate -endLayout;
|
||||
editorTemplate -suppress "parentList";
|
||||
editorTemplate -suppress "previous";
|
||||
editorTemplate -suppress "selfEnabled";
|
||||
editorTemplate -suppress "parentEnabled";
|
||||
editorTemplate -suppress "parentNumIsolatedChildren";
|
||||
editorTemplate -suppress "localRender";
|
||||
editorTemplate -addExtraControls;
|
||||
editorTemplate -endScrollLayout;
|
||||
}
|
||||
|
|
@ -3,42 +3,114 @@ import os
|
|||
import json
|
||||
|
||||
from V2A.core import process
|
||||
from V2A.core.device import CameraDeviceManager
|
||||
|
||||
def maya_useNewAPI():
|
||||
pass
|
||||
class V2AConvertCommnad(om.MPxCommand):
|
||||
COMMAND_NAME = "v2aConvert"
|
||||
kInputVideo = 'i'
|
||||
kInputVideoLong = 'inputVideo'
|
||||
kModelPath = 'm'
|
||||
kModelPathLong = 'modelPath'
|
||||
kOutputPath = 'o'
|
||||
kOutputPathLong = 'outputPath'
|
||||
kVerb = 'v'
|
||||
kVerbLong = 'verbose'
|
||||
|
||||
class Video2ARKitCommand(om.MPxCommand):
|
||||
|
||||
def __init__(self):
|
||||
om.MPxCommand.__init__(self)
|
||||
|
||||
@staticmethod
|
||||
def createSyntax():
|
||||
syntax = om.MSyntax()
|
||||
syntax.addFlag(V2AConvertCommnad.kInputVideo, V2AConvertCommnad.kInputVideoLong, om.MSyntax.kString)
|
||||
syntax.addFlag(V2AConvertCommnad.kModelPath, V2AConvertCommnad.kModelPathLong, om.MSyntax.kString)
|
||||
syntax.addFlag(V2AConvertCommnad.kOutputPath, V2AConvertCommnad.kOutputPathLong, om.MSyntax.kString)
|
||||
syntax.addFlag(V2AConvertCommnad.kInfo, V2AConvertCommnad.kInfoLong, om.MSyntax.kBoolean)
|
||||
|
||||
return syntax
|
||||
|
||||
def doIt(self, args):
|
||||
argData = om.MArgParser(self.syntax(), args)
|
||||
|
||||
# 處理影片並返回快取結果
|
||||
if not argData.isFlagSet(self.kInputVideo):
|
||||
om.MGlobal.displayError("Video path is required.")
|
||||
return
|
||||
|
||||
video_path = argData.flagArgumentString(self.kInputVideo, 0)
|
||||
|
||||
if not video_path:
|
||||
om.MGlobal.displayError(f"Invalid video path : {video_path}")
|
||||
|
||||
if video_path and not os.path.exists(video_path):
|
||||
om.MGlobal.displayError(f"Video path does not exists : {video_path}")
|
||||
|
||||
model_path = os.getenv('V2A_MODEL_PATH')
|
||||
if argData.isFlagSet(self.kModelPath):
|
||||
model_path = argData.flagArgumentString(self.kModelPath, 0)
|
||||
else:
|
||||
if argData.isFlagSet(self.kVerb):
|
||||
om.MGlobal.displayInfo(f'Using default model : {model_path}')
|
||||
|
||||
if not model_path:
|
||||
om.MGlobal.displayError(f'Invalid model path : {model_path}')
|
||||
|
||||
if model_path and not os.path.exists(model_path):
|
||||
om.MGlobal.displayError(f'Model path does not exists : {model_path}')
|
||||
|
||||
output_path = None
|
||||
if argData.isFlagSet(self.kOutputPath):
|
||||
output_path = argData.flagArgumentString(self.kOutputPath, 0)
|
||||
|
||||
if not output_path:
|
||||
om.MGlobal.displayError('Please specify output path.')
|
||||
|
||||
if video_path and output_path and model_path:
|
||||
if argData.isFlagSet(self.kVerbLong):
|
||||
om.MGlobal.displayInfo(f'Processing video: {video_path} with model: {model_path}')
|
||||
om.MGlobal.displayInfo(f'Model: {model_path}')
|
||||
om.MGlobal.displayInfo(f'Output path: {output_path}')
|
||||
|
||||
result = process.process_video(video_path, model_path=model_path, output_json=output_path)
|
||||
|
||||
if not result:
|
||||
om.MGlobal.displayWarning('Output result is invalid')
|
||||
|
||||
@staticmethod
|
||||
def creator():
|
||||
return V2AConvertCommnad()
|
||||
|
||||
class V2ACommand(om.MPxCommand):
|
||||
COMMAND_NAME = "v2aCmds"
|
||||
kVideoPath = 'v'
|
||||
kVideoPathLong = 'videoPath'
|
||||
kModelPath = 'm'
|
||||
kModelPathLong = 'modelPath'
|
||||
kOutputPath = 'o'
|
||||
kOutputPathLong = 'outputPath'
|
||||
kReload = 'r'
|
||||
kReloadLong = 'reload'
|
||||
kDeviceList = 'd'
|
||||
kDeviceListLong = 'deviceList'
|
||||
kDecodeMethod = 'm'
|
||||
kDecodeMethodLong = 'decodeMethod'
|
||||
kReload = 'r'
|
||||
kReloadLong = 'reload'
|
||||
|
||||
def __init__(self):
|
||||
om.MPxCommand.__init__(self)
|
||||
|
||||
@staticmethod
|
||||
def createSyntax():
|
||||
syntax = om.MSyntax()
|
||||
syntax.addFlag("v", "videoPath", om.MSyntax.kString)
|
||||
syntax.addFlag("m", "modelPath", om.MSyntax.kString)
|
||||
syntax.addFlag("o", "outputPath", om.MSyntax.kString)
|
||||
syntax.addFlag("r", "reload", om.MSyntax.kString)
|
||||
|
||||
syntax.useSelectionAsDefault(True)
|
||||
syntax.addFlag(V2ACommand.kDeviceList, V2ACommand.kDeviceListLong)
|
||||
syntax.addFlag(V2ACommand.kReload, V2ACommand.kReloadLong)
|
||||
return syntax
|
||||
|
||||
def doIt(self, args):
|
||||
argData = om.MArgParser(self.syntax(), args)
|
||||
|
||||
reload_object = None
|
||||
|
||||
if argData.isFlagSet(self.kReload):
|
||||
# 重新加載快取
|
||||
node_name = argData.flagArgumentString("reload", 0)
|
||||
s_list = om.MGlobal.getSelectionListByName(node_name) if node_name else om.MGlobal.getActiveSelectionList()
|
||||
s_iter = om.MItSelectionList(s_list, om.MFn.kDependencyNode)
|
||||
selection = argData.getObjectList() if not argData.getObjectList().isEmpty() else om.MGlobal.getActiveSelectionList()
|
||||
|
||||
s_iter = om.MItSelectionList(selection, om.MFn.kDependencyNode)
|
||||
if not s_iter.isDone():
|
||||
depend_node = s_iter.getDependNode()
|
||||
node_fn = om.MFnDependencyNode(depend_node)
|
||||
|
|
@ -47,37 +119,18 @@ class Video2ARKitCommand(om.MPxCommand):
|
|||
trigger = node_fn.findPlug("processTrigger", True)
|
||||
value = trigger.getValue()
|
||||
trigger.setValue(value)
|
||||
else:
|
||||
# 處理影片並返回快取結果
|
||||
if argData.isFlagSet("videoPath"):
|
||||
video_path = argData.flagArgumentString("videoPath", 0)
|
||||
model_path = os.getenv('V2A_MODEL_PATH')
|
||||
if argData.isFlagSet("modelPath"):
|
||||
model_path = argData.flagArgumentString("modelPath", 0)
|
||||
output_path = None
|
||||
if argData.isFlagSet("outputPath"):
|
||||
output_path = argData.flagArgumentString("outputPath", 0)
|
||||
if not video_path:
|
||||
om.MGlobal.displayError("Video path is required.")
|
||||
return
|
||||
if not model_path:
|
||||
om.MGlobal.displayError("Model path is required.")
|
||||
return
|
||||
if not output_path:
|
||||
om.MGlobal.displayInfo("No output path specified. Results will not be saved to file.")
|
||||
else:
|
||||
om.MGlobal.displayInfo(f"Output path set to: {output_path}")
|
||||
|
||||
om.MGlobal.displayInfo(f"Processing video: {video_path} with model: {model_path}")
|
||||
cache = process.process_video(video_path, model_path, output_path)
|
||||
if not output_path:
|
||||
super(Video2ARKitCommand, self).setResult(json.dumps(cache))
|
||||
else:
|
||||
om.MGlobal.displayInfo(f"Output path set to: {output_path}")
|
||||
# super(Video2ARKitCommand, self).setResult(json.dumps(cache))
|
||||
|
||||
# om.MGlobal.displayInfo("Video2ARKit processing command executed.")
|
||||
s_iter.next()
|
||||
return
|
||||
|
||||
if argData.isFlagSet(self.kDecodeMethod):
|
||||
self.setResult(process.support_decode_methods())
|
||||
return
|
||||
|
||||
if argData.isFlagSet(self.kDeviceList):
|
||||
device_manager = CameraDeviceManager()
|
||||
self.setResult(device_manager.list_available_devices())
|
||||
return
|
||||
|
||||
@staticmethod
|
||||
def creator():
|
||||
return Video2ARKitCommand()
|
||||
return V2ACommand()
|
||||
|
|
@ -1,4 +1,5 @@
|
|||
import maya.api.OpenMaya as om
|
||||
from maya import cmds, mel
|
||||
import mediapipe as mp
|
||||
from mediapipe.tasks import python
|
||||
from mediapipe.tasks.python import vision
|
||||
|
|
@ -30,12 +31,17 @@ class Video2ARKitNode(om.MPxNode):
|
|||
|
||||
# 屬性定義
|
||||
aInTime = om.MObject()
|
||||
aInputType = om.MObject()
|
||||
aInputDevice = om.MObject()
|
||||
aNetworkUrl = om.MObject()
|
||||
aNetworkPort = om.MObject()
|
||||
aVideoPath = om.MObject()
|
||||
aModelPath = om.MObject()
|
||||
aProcessTrigger = om.MObject()
|
||||
|
||||
# 輸出屬性字典
|
||||
output_attrs = {}
|
||||
output_weights = {}
|
||||
|
||||
def __init__(self):
|
||||
om.MPxNode.__init__(self)
|
||||
|
|
@ -50,6 +56,7 @@ class Video2ARKitNode(om.MPxNode):
|
|||
trigger = data_block.inputValue(self.aProcessTrigger).asBool()
|
||||
video_path = data_block.inputValue(self.aVideoPath).asString()
|
||||
model_path = data_block.inputValue(self.aModelPath).asString()
|
||||
input_type = data_block.inputValue(self.aInputType).asInt()
|
||||
|
||||
# 檢查是否需要重新分析影片
|
||||
if trigger and not self._cache:
|
||||
|
|
@ -78,12 +85,19 @@ class Video2ARKitNode(om.MPxNode):
|
|||
self.aProcessTrigger.setBool(current_value)
|
||||
om.MGlobal.displayInfo("Reload cache complete. cache populated.")
|
||||
|
||||
def postConstructor(self):
|
||||
device = om.MGlobal.executeCommandStringResult('v2aCmds -d')
|
||||
device_plug = om.MPlug(self.thisMObject(), self.aInputDevice)
|
||||
device_plug.setString(device)
|
||||
om.MGlobal.displayInfo(device)
|
||||
|
||||
@staticmethod
|
||||
def creator():
|
||||
return Video2ARKitNode()
|
||||
|
||||
@staticmethod
|
||||
def initialize():
|
||||
eAttr = om.MFnEnumAttribute()
|
||||
nAttr = om.MFnNumericAttribute()
|
||||
tAttr = om.MFnTypedAttribute()
|
||||
uAttr = om.MFnUnitAttribute()
|
||||
|
|
@ -92,6 +106,23 @@ class Video2ARKitNode(om.MPxNode):
|
|||
Video2ARKitNode.aInTime = uAttr.create("inTime", "it", om.MFnUnitAttribute.kTime, 0.0)
|
||||
om.MPxNode.addAttribute(Video2ARKitNode.aInTime)
|
||||
|
||||
# Input: 輸入類型 (預留未來擴展)
|
||||
Video2ARKitNode.aInputType = eAttr.create("inputType", "ity", 0)
|
||||
eAttr.addField("Video File", 0)
|
||||
eAttr.addField("Camera Stream", 1)
|
||||
eAttr.addField("Network Stream", 2)
|
||||
eAttr.addField("VMC Protocol", 3)
|
||||
om.MPxNode.addAttribute(Video2ARKitNode.aInputType)
|
||||
|
||||
Video2ARKitNode.aInputDevice = tAttr.create("inputDevice", "idev", om.MFnData.kString)
|
||||
om.MPxNode.addAttribute(Video2ARKitNode.aInputDevice)
|
||||
|
||||
Video2ARKitNode.aNetworkUrl = tAttr.create("networkUrl", "nu", om.MFnData.kString)
|
||||
om.MPxNode.addAttribute(Video2ARKitNode.aNetworkUrl)
|
||||
|
||||
Video2ARKitNode.aNetworkPort = nAttr.create("networkPort", "np", om.MFnNumericData.kInt, 8080)
|
||||
om.MPxNode.addAttribute(Video2ARKitNode.aNetworkPort)
|
||||
|
||||
# 輸入:影片路徑
|
||||
Video2ARKitNode.aVideoPath = tAttr.create("videoPath", "vp", om.MFnData.kString)
|
||||
om.MPxNode.addAttribute(Video2ARKitNode.aVideoPath)
|
||||
|
|
@ -115,6 +146,14 @@ class Video2ARKitNode(om.MPxNode):
|
|||
nAttr.storable = False
|
||||
om.MPxNode.addAttribute(attr)
|
||||
Video2ARKitNode.output_attrs[shape_name] = attr
|
||||
|
||||
weight_attr = nAttr.create(f'{shape_name}Weight', f'{shape_name}_w', om.MFnNumericData.kFloat, 1.0)
|
||||
nAttr.writable = True
|
||||
nAttr.storable = True
|
||||
nAttr.keyable = True
|
||||
om.MPxNode.addAttribute(weight_attr)
|
||||
om.MPxNode.attributeAffects(weight_attr, attr)
|
||||
|
||||
# 建立依賴
|
||||
om.MPxNode.attributeAffects(Video2ARKitNode.aVideoPath, attr)
|
||||
om.MPxNode.attributeAffects(Video2ARKitNode.aInTime, attr)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,13 @@
|
|||
from maya import cmds, mel
|
||||
|
||||
def Video2ARKitTemplate(node):
|
||||
cmds.editorTemplate(beginScrollLayout=True)
|
||||
cmds.editorTemplate(beginLayout=True, label='General Settings')
|
||||
cmds.editorTemplate(addControl='inTime', label='Current Time')
|
||||
cmds.editorTemplate(addControl='inputType', label='Input Type')
|
||||
cmds.editorTemplate(addControl='inputDevice', label='Input Device')
|
||||
cmds.editorTemplate(addControl='videoPath', label='Video Path')
|
||||
cmds.editorTemplate(addControl='modelPath', label='Model Path')
|
||||
cmds.editorTemplate(addControl='processTrigger', label='Reload Cache')
|
||||
cmds.editorTemplate(endLayout=True)
|
||||
cmds.editorTemplate(endScrollLayout=True)
|
||||
|
|
@ -0,0 +1,88 @@
|
|||
import av
|
||||
import platform
|
||||
import subprocess
|
||||
import os
|
||||
import logging
|
||||
|
||||
# Disable comtypes logger
|
||||
logging.getLogger("comtypes").setLevel(logging.INFO)
|
||||
|
||||
class CameraDeviceManager:
|
||||
@staticmethod
|
||||
def list_available_devices():
|
||||
"""列出當前系統所有可用的攝影機名稱或路徑"""
|
||||
os_name = platform.system()
|
||||
devices = []
|
||||
|
||||
if os_name == "Windows":
|
||||
try:
|
||||
from pygrabber.dshow_graph import FilterGraph
|
||||
devices = FilterGraph().get_input_devices()
|
||||
except ImportError:
|
||||
print("請安裝 pygrabber 以獲取 Windows 裝置清單")
|
||||
|
||||
elif os_name == "Linux":
|
||||
# 搜尋 /dev/video* 檔案
|
||||
devices = [f"/dev/{d}" for d in os.listdir('/dev') if d.startswith('video')]
|
||||
|
||||
elif os_name == "Darwin": # macOS
|
||||
# macOS 通常使用索引 "0", "1"
|
||||
devices = ["0", "1", "2"]
|
||||
|
||||
return devices
|
||||
|
||||
@staticmethod
|
||||
def get_pyav_config(device_identifier):
|
||||
"""根據裝置與系統生成 PyAV 的開啟參數"""
|
||||
os_name = platform.system()
|
||||
|
||||
if os_name == "Windows":
|
||||
return {
|
||||
"file": f"video={device_identifier}",
|
||||
"format": "dshow",
|
||||
"options": {"rtbufsize": "64M", "framerate": "30"}
|
||||
}
|
||||
elif os_name == "Darwin":
|
||||
return {
|
||||
"file": str(device_identifier),
|
||||
"format": "avfoundation",
|
||||
"options": {"framerate": "30", "pixel_format": "uyvy422"}
|
||||
}
|
||||
else: # Linux
|
||||
return {
|
||||
"file": device_identifier,
|
||||
"format": "v4l2",
|
||||
"options": {"framerate": "30"}
|
||||
}
|
||||
|
||||
# --- Maya 插件內部的讀取迴圈範例 ---
|
||||
|
||||
def run_camera_capture(device_name, model_path, result_callback):
|
||||
config = CameraDeviceManager.get_pyav_config(device_name)
|
||||
|
||||
try:
|
||||
# 開啟攝影機容器
|
||||
container = av.open(
|
||||
config["file"],
|
||||
format=config["format"],
|
||||
options=config["options"]
|
||||
)
|
||||
|
||||
stream = container.streams.video[0]
|
||||
# 設定低延遲模式:丟棄舊影格,只拿最新影格
|
||||
stream.thread_type = "AUTO"
|
||||
|
||||
for frame in container.decode(stream):
|
||||
# 將 PyAV 影格轉為 RGB
|
||||
rgb_image = frame.to_ndarray(format='rgb24')
|
||||
|
||||
# 這裡接入你的 MediaPipe 處理邏輯
|
||||
# process_mediapipe(rgb_image)
|
||||
|
||||
# 這是為了防止 Maya 畫面卡死的關鍵:不要在迴圈裡做太重的運算
|
||||
# if some_stop_condition: # 建立停止機制
|
||||
# break
|
||||
|
||||
container.close()
|
||||
except Exception as e:
|
||||
print(f"PyAV Camera Error: {e}")
|
||||
|
|
@ -13,14 +13,35 @@ logger = logging.getLogger(__name__)
|
|||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
DEFAULT_MODEL_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'models', 'face_landmarker.task'))
|
||||
SUPPORT_DECODE_METHODS = ['cv2', 'av']
|
||||
|
||||
def process_video(video_path, model_path=DEFAULT_MODEL_PATH, output_json=None, method='av'):
|
||||
if method == 'cv2':
|
||||
return process_video_cv2(video_path, model_path, output_json)
|
||||
else:
|
||||
return process_video_av(video_path, model_path, output_json)
|
||||
def support_decode_methods():
|
||||
return SUPPORT_DECODE_METHODS
|
||||
|
||||
def process_video(video_path, model_path=DEFAULT_MODEL_PATH, output_json=None, method='av', callback=None):
|
||||
cache = {}
|
||||
|
||||
if method not in SUPPORT_DECODE_METHODS:
|
||||
return {}
|
||||
|
||||
def process_video_cv2(video_path, model_path, output_json=None):
|
||||
if method == ['cv2']:
|
||||
cache = process_video_cv2(video_path, model_path, output_json=output_json, callback=callback)
|
||||
if method in ['av']:
|
||||
cache = process_video_av(video_path, model_path, output_json=output_json, callback=callback)
|
||||
|
||||
if output_json:
|
||||
if output_json.endswith('.json'):
|
||||
output_json_dir = os.path.dirname(output_json)
|
||||
if not os.path.exists(output_json_dir):
|
||||
os.makedirs(output_json_dir)
|
||||
|
||||
with open(output_json, 'w') as f:
|
||||
json.dump(cache, f, indent=4)
|
||||
logger.info(f"Output JSON saved to {output_json}")
|
||||
|
||||
return output_json
|
||||
|
||||
def process_video_cv2(video_path, model_path, output_json=None, callback=None):
|
||||
"""Core MediaPipe processing logic using OpenCV"""
|
||||
# Initialize MediaPipe
|
||||
cache = {}
|
||||
|
|
@ -56,7 +77,7 @@ def process_video_cv2(video_path, model_path, output_json=None):
|
|||
|
||||
return cache
|
||||
|
||||
def process_video_av(video_path, model_path, output_json=None):
|
||||
def process_video_av(video_path, model_path, output_json=None, callback=None):
|
||||
# 這裡是處理影片的邏輯
|
||||
"""Core MediaPipe processing logic using PyAV"""
|
||||
cache = {}
|
||||
|
|
@ -95,17 +116,10 @@ def process_video_av(video_path, model_path, output_json=None):
|
|||
|
||||
frame_count += 1
|
||||
|
||||
if output_json:
|
||||
if output_json.endswith('.json'):
|
||||
output_json_dir = os.path.dirname(output_json)
|
||||
if not os.path.exists(output_json_dir):
|
||||
os.makedirs(output_json_dir)
|
||||
|
||||
|
||||
with open(output_json, 'w') as f:
|
||||
json.dump(cache, f, indent=4)
|
||||
logger.info(f"Output JSON saved to {output_json}")
|
||||
|
||||
return cache
|
||||
return output_json
|
||||
# return cache
|
||||
|
||||
if __name__ == "__main__":
|
||||
# 測試用主程式
|
||||
|
|
|
|||
|
|
@ -0,0 +1,5 @@
|
|||
from maya import cmds
|
||||
|
||||
def init_ae_templetes():
|
||||
pass
|
||||
|
||||
Loading…
Reference in New Issue