Files
unity-application/Assets/MediaPipeUnity/Wesign_extractor.cs
2023-03-12 22:36:32 +01:00

204 lines
7.0 KiB
C#

// Copyright (c) 2021 homuler
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file or at
// https://opensource.org/licenses/MIT.
// ATTENTION!: This code is for a tutorial.
using System.Collections;
using System.Collections.Generic;
using System.Diagnostics;
using Unity.VisualScripting;
using UnityEngine;
using UnityEngine.UI;
using Mediapipe.Unity.CoordinateSystem;
namespace Mediapipe.Unity.Tutorial
{
public class Wesign_extractor : MonoBehaviour
{
/// <summary>
/// Config file to set up the graph
/// </summary>
[SerializeField] private TextAsset _configAsset;
/// <summary>
/// The screen object on which the video is displayed
/// </summary>
[SerializeField] private RawImage _screen;
/// <summary>
/// width of the screen
/// </summary>
[SerializeField] private int _width;
/// <summary>
/// height of the screen
/// </summary>
[SerializeField] private int _height;
/// <summary>
/// fps of the screen
/// </summary>
[SerializeField] private int _fps;
/// <summary>
/// Landmark annotation controller to show the landmarks on the screen
/// </summary>
[SerializeField] private PoseLandmarkListAnnotationController _poseLandmarkListAnnotationController;
/// <summary>
/// MediaPipe graph
/// </summary>
private CalculatorGraph _graph;
/// <summary>
/// Resource manager for graph resources
/// </summary>
private ResourceManager _resourceManager;
/// <summary>
/// Webcam texture
/// </summary>
private WebCamTexture _webCamTexture;
/// <summary>
/// Input texture
/// </summary>
private Texture2D _inputTexture;
/// <summary>
/// Screen pixel data
/// </summary>
private Color32[] _pixelData;
/// <summary>
/// Stopwatch to give a timestamp to video frames
/// </summary>
private Stopwatch _stopwatch;
/// <summary>
/// Google Mediapipe setup & run
/// </summary>
/// <returns> IEnumerator </returns>
/// <exception cref="System.Exception"></exception>
private IEnumerator Start()
{
// Webcam setup
if (WebCamTexture.devices.Length == 0)
{
throw new System.Exception("Web Camera devices are not found");
}
var webCamDevice = WebCamTexture.devices[0];
_webCamTexture = new WebCamTexture(webCamDevice.name, _width, _height, _fps);
_webCamTexture.Play();
yield return new WaitUntil(() => _webCamTexture.width > 16);
_screen.rectTransform.sizeDelta = new Vector2(_width, _height);
_screen.texture = _webCamTexture;
// TODO this method is kinda meh you should use ImageFrame
_inputTexture = new Texture2D(_width, _height, TextureFormat.RGBA32, false);
_pixelData = new Color32[_width * _height];
//_resourceManager = new LocalResourceManager();
_resourceManager = new StreamingAssetsResourceManager();
yield return _resourceManager.PrepareAssetAsync("pose_detection.bytes");
yield return _resourceManager.PrepareAssetAsync("pose_landmark_full.bytes");
yield return _resourceManager.PrepareAssetAsync("face_landmark.bytes");
yield return _resourceManager.PrepareAssetAsync("hand_landmark_full.bytes");
yield return _resourceManager.PrepareAssetAsync("face_detection_short_range.bytes");
yield return _resourceManager.PrepareAssetAsync("hand_recrop.bytes");
yield return _resourceManager.PrepareAssetAsync("handedness.txt");
_stopwatch = new Stopwatch();
// Setting up the graph
_graph = new CalculatorGraph(_configAsset.text);
var posestream = new OutputStream<NormalizedLandmarkListPacket, NormalizedLandmarkList>(_graph, "pose_landmarks");
var leftstream = new OutputStream<NormalizedLandmarkListPacket, NormalizedLandmarkList>(_graph, "left_hand_landmarks");
var rightstream = new OutputStream<NormalizedLandmarkListPacket, NormalizedLandmarkList>(_graph, "right_hand_landmarks");
posestream.StartPolling().AssertOk();
leftstream.StartPolling().AssertOk();
rightstream.StartPolling().AssertOk();
_graph.StartRun().AssertOk();
_stopwatch.Start();
while (true)
{
_inputTexture.SetPixels32(_webCamTexture.GetPixels32(_pixelData));
var imageFrame = new ImageFrame(ImageFormat.Types.Format.Srgba, _width, _height, _width * 4, _inputTexture.GetRawTextureData<byte>());
var currentTimestamp = _stopwatch.ElapsedTicks / (System.TimeSpan.TicksPerMillisecond / 1000);
_graph.AddPacketToInputStream("input_video", new ImageFramePacket(imageFrame, new Timestamp(currentTimestamp))).AssertOk();
yield return new WaitForEndOfFrame();
//posestream.TryGetNext(out var poseLandmarks);
if (posestream.TryGetNext(out var poseLandmarks))
{
if (poseLandmarks != null)
{
// Draw the poseLandmarks on the screen
_poseLandmarkListAnnotationController.DrawNow(poseLandmarks);
var x = poseLandmarks.Landmark[0];
UnityEngine.Debug.Log($"Pose Coordinates: {x}");
}
}
if (leftstream.TryGetNext(out var leftLandmarks))
{
if (leftLandmarks != null)
{
var x = leftLandmarks.Landmark[0];
UnityEngine.Debug.Log($"Pose left Coordinates: {x}");
}
}
if (rightstream.TryGetNext(out var rightLandmarks))
{
if (rightLandmarks != null)
{
var x = rightLandmarks.Landmark[0];
UnityEngine.Debug.Log($"Pose right Coordinates: {x}");
}
}
}
}
/// <summary>
/// Propper destruction on the Mediapipegraph
/// </summary>
private void OnDestroy()
{
if (_webCamTexture != null)
{
_webCamTexture.Stop();
}
if (_graph != null)
{
try
{
_graph.CloseInputStream("input_video").AssertOk();
_graph.WaitUntilDone().AssertOk();
}
finally
{
_graph.Dispose();
}
}
}
}
}