我想要读取rtsp视频源,添加覆盖文本并将其推送到RTMP端点,我正在使用video读取视频源,并使用python子进程将帧写回RTMP端点。我把这个从帧到rtmp的FFmpeg流视频OpenCV python
import sys
import subprocess
import cv2
import ffmpeg
rtmp_url = "rtmp://127.0.0.1:1935/live/test"
path = 0
cap = cv2.VideoCapture("rtsp://10.0.1.7/media.sdp")
# gather video info to ffmpeg
fps = int(cap.get(cv2.CAP_PROP_FPS))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
command = ['ffmpeg', '-i', '-', "-c", "copy", '-f', 'flv', rtmp_url]
p = subprocess.Popen(command, stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
font = cv2.FONT_HERSHEY_SIMPLEX
while cap.isOpened():
ret, frame = cap.read()
cv2.putText(frame, 'TEXT ON VIDEO', (50, 50), font, 1, (0, 255, 255), 2, cv2.LINE_4)
cv2.imshow('video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if not ret:
print("frame read failed")
break
try:
p.stdin.write(frame.tobytes())
except Exception as e:
print (e)
cap.release()
p.stdin.close()
p.stderr.close()
p.wait()python脚本返回"Errno 32断管“。在终端中运行ffmpeg命令很好。
-i rtsp://10.0.1.7/media.sdp -c拷贝-f rtmp://127.0.0.1:1935/live/test
上面的命令工作得很好,我可以将输入流推到RTMP端点。但是我不能把处理过的帧写到运行ffmpeg的子进程。
如果我错过了什么,请告诉我。
发布于 2021-09-16 09:51:30
当将原始框架写入stdin管道时,不能使用"-c", "copy"。
frame由ret, frame = cap.read()返回是一个BGR颜色格式的uint8 NumPy数组(cap.read()对视频进行解码并转换颜色格式)。
在FFmpeg术语中,frame格式是"rawvideo“。
command应该告诉FFmpeg使用特定大小和像素格式的原始视频作为输入:
command = ['ffmpeg', '-f', 'rawvideo', '-s', f'{width}x{height}', '-pixel_format', 'bgr24', ...'-pix_fmt', 'yuv420p', '-c:v', 'libx264' ...。备注:
'-bufsize', '64M')。执行侦听器应用程序:
流合成视频帧:
从一个简单的代码示例开始,它流合成帧(而不捕获RTSP视频)。
下面的“自包含”代码示例在灰色背景上写入黄色文本,并将框架传递给FFmpeg进行RTMP流处理:
import cv2
import numpy as np
import subprocess as sp
width = 320
height = 240
fps = 5
rtmp_url = "rtmp://127.0.0.1:1935/live/test"
# Start the TCP server first, before the sending client.
ffplay_process = sp.Popen(['ffplay', '-listen', '1', '-i', rtmp_url]) # Use FFplay sub-process for receiving the RTMP video.
command = ['ffmpeg',
'-re',
'-f', 'rawvideo', # Apply raw video as input
'-s', f'{width}x{height}',
'-pixel_format', 'bgr24',
'-r', f'{fps}',
'-i', '-',
'-pix_fmt', 'yuv420p',
'-c:v', 'libx264',
'-bufsize', '64M',
'-maxrate', '4M',
'-f', 'flv',
rtmp_url]
process = sp.Popen(command, stdin=sp.PIPE) # Execute FFmpeg sub-process for RTSP streaming
frame_counter = 0;
while True:
# Build sythetic frame in BGR color format (3D NumPy array).
frame = np.full((height, width, 3), 60, np.uint8)
cv2.putText(frame, 'TEXT ON VIDEO ' + str(frame_counter), (20, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2, cv2.LINE_4) # Put a frame counter for showing progress.
process.stdin.write(frame.tobytes()) # Write raw frame to stdin pipe.
cv2.imshow('frame', frame) # Show frame for testing
key = cv2.waitKey(int(round(1000/fps))) # We need to call cv2.waitKey after cv2.imshow
if key == ord('q'): # Press 'q' for exit
break
frame_counter += 1
process.stdin.close() # Close stdin pipe
process.wait() # Wait for FFmpeg sub-process to finish
ffplay_process.kill() # Forcefully close FFplay sub-process
cv2.destroyAllWindows() # Close OpenCV window输出样本:

从RTSP流中捕获视频帧。
下面的代码示例从公共RTSP流中捕获视频帧,写入文本,并将这些帧传递给FFmpeg以进行RTMP流:
import cv2
import numpy as np
import subprocess as sp
# Use public RTSP Streaming for testing.
rtsp_stream = "rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov"
rtmp_url = "rtmp://127.0.0.1:1935/live/test"
cap = cv2.VideoCapture(rtsp_stream)
# gather video info to ffmpeg
fps = int(cap.get(cv2.CAP_PROP_FPS))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Start the TCP server first, before the sending client.
ffplay_process = sp.Popen(['ffplay', '-listen', '1', '-i', rtmp_url]) # Use FFplay sub-process for receiving the RTMP video.
command = ['ffmpeg',
'-re',
'-f', 'rawvideo', # Apply raw video as input
'-s', f'{width}x{height}',
'-pixel_format', 'bgr24',
'-r', f'{fps}',
'-i', '-',
'-pix_fmt', 'yuv420p',
'-c:v', 'libx264',
'-bufsize', '64M',
'-maxrate', '4M',
'-f', 'flv',
rtmp_url]
process = sp.Popen(command, stdin=sp.PIPE) # Execute FFmpeg sub-process for RTSP streaming
frame_counter = 0;
while cap.isOpened():
# Read frame from RTSP stream.
ret, frame = cap.read()
if not ret:
print("frame read failed")
break
cv2.putText(frame, 'TEXT ON VIDEO', (0, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2, cv2.LINE_4)
cv2.putText(frame, str(frame_counter), (100, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2, cv2.LINE_4)
process.stdin.write(frame.tobytes()) # Write raw frame to stdin pipe.
cv2.imshow('frame', frame) # Show frame for testing
key = cv2.waitKey(1) # We need to call cv2.waitKey after cv2.imshow
if key == ord('q'): # Press 'q' for exit
break
frame_counter += 1
cap.release()
process.stdin.close() # Close stdin pipe
process.wait() # Wait for FFmpeg sub-process to finish
ffplay_process.kill() # Forcefully close FFplay sub-process
cv2.destroyAllWindows() # Close OpenCV window输出样本:

https://stackoverflow.com/questions/69188430
复制相似问题