我正在运行下面的C#代码,它计算光流图,并在游戏期间将它们保存为PNG,使用的是VR耳机,上限为90 FPS。如果没有这段代码,该项目将在90 FPS下顺利运行。要在同一项目的基础上运行此代码,必须始终保持在80 FPS以上,我必须在协同线中使用WaitForSeconds(0.2f),但理想的情况是计算和保存游戏的每一帧的光流图,或者至少以较低的延迟大约0.01秒。我已经在使用AsyncGPUReadback和WriteAsync了。
using System.Collections;
using UnityEngine;
using System.IO;
using UnityEngine.Rendering;
namespace OpticalFlowAlternative
{
public class OpticalFlow : MonoBehaviour {
protected enum Pass {
Flow = 0,
DownSample = 1,
BlurH = 2,
BlurV = 3,
Visualize = 4
};
public RenderTexture Flow { get { return resultBuffer; } }
[SerializeField] protected Material flowMaterial;
protected RenderTexture prevFrame, flowBuffer, resultBuffer, renderTexture, rt;
public string customOutputFolderPath = "";
private string filepathforflow;
private int imageCount = 0;
int targetTextureWidth, targetTextureHeight;
private EyeTrackingV2 eyeTracking;
protected void Start () {
eyeTracking = GameObject.Find("XR Rig").GetComponent<EyeTrackingV2>();
targetTextureWidth = Screen.width / 16;
targetTextureHeight = Screen.height / 16;
flowMaterial.SetFloat("_Ratio", 1f * Screen.height / Screen.width);
renderTexture = new RenderTexture(targetTextureWidth, targetTextureHeight, 0);
rt = new RenderTexture(Screen.width, Screen.height, 0);
StartCoroutine("StartCapture");
}
protected void LateUpdate()
{
eyeTracking.flowCount = imageCount;
}
protected void OnDestroy ()
{
if(prevFrame != null)
{
prevFrame.Release();
prevFrame = null;
flowBuffer.Release();
flowBuffer = null;
rt.Release();
rt = null;
renderTexture.Release();
renderTexture = null;
}
}
IEnumerator StartCapture()
{
while (true)
{
yield return new WaitForSeconds(0.2f);
ScreenCapture.CaptureScreenshotIntoRenderTexture(rt);
//compensating for image flip
Graphics.Blit(rt, renderTexture, new Vector2(1, -1), new Vector2(0, 1));
if (prevFrame == null)
{
Setup(targetTextureWidth, targetTextureHeight);
Graphics.Blit(renderTexture, prevFrame);
}
flowMaterial.SetTexture("_PrevTex", prevFrame);
//calculating motion flow frame here
Graphics.Blit(renderTexture, flowBuffer, flowMaterial, (int)Pass.Flow);
Graphics.Blit(renderTexture, prevFrame);
AsyncGPUReadback.Request(flowBuffer, 0, TextureFormat.ARGB32, OnCompleteReadback);
}
}
void OnCompleteReadback(AsyncGPUReadbackRequest request)
{
if (request.hasError)
return;
var tex = new Texture2D(targetTextureWidth, targetTextureHeight, TextureFormat.ARGB32, false);
tex.LoadRawTextureData(request.GetData<uint>());
tex.Apply();
WriteTextureAsync(tex);
}
async void WriteTextureAsync(Texture2D tex)
{
imageCount++;
filepathforflow = customOutputFolderPath + imageCount + ".png";
var stream = new FileStream(filepathforflow, FileMode.OpenOrCreate);
var bytes = tex.EncodeToPNG();
await stream.WriteAsync(bytes, 0, bytes.Length);
}
protected void Setup(int width, int height)
{
prevFrame = new RenderTexture(width, height, 0);
prevFrame.format = RenderTextureFormat.ARGBFloat;
prevFrame.wrapMode = TextureWrapMode.Repeat;
prevFrame.Create();
flowBuffer = new RenderTexture(width, height, 0);
flowBuffer.format = RenderTextureFormat.ARGBFloat;
flowBuffer.wrapMode = TextureWrapMode.Repeat;
flowBuffer.Create();
}
}
}发布于 2022-11-13 08:15:51
首先,使用CommandBuffers,您可以执行屏幕的无拷贝读取,应用您的计算并将它们存储在单独的缓冲区(纹理)中。然后,您可以请求在框架上读取部分纹理/多个纹理,而无需阻止对当前计算纹理的访问。在执行回读时,最好的方法是将其编码为单独线程中的PNG/JPG,而不阻塞主线程。
另外,如果您在of 11/Desktop上,也可以将D3D缓冲区配置为快速的cpu读取,如果您希望避免由于使用异步读取而导致的很少帧延迟,则可以将其映射为每个帧。
从缓冲区创建纹理是这里的另一个性能浪费,因为readback提供像素值,您可以使用通用png编码器并将其保存为多线程(而纹理创建只允许在“主”线程中创建)。
如果延迟对您来说是可以的,但是您希望有精确的帧数到图像映射,那么也可以将帧号编码到目标图像中,所以在保存到png中之前总是有它。
关于侧问题,CSV可能比默认PNG编码更快,因为PNG在里面使用类似zip的压缩,而CSV只是一堆用字符串编译的数字。
https://stackoverflow.com/questions/74415944
复制相似问题