1. 程式人生 > >Python+opencv+pyaudio實現帶聲音螢幕錄製

Python+opencv+pyaudio實現帶聲音螢幕錄製

文章目錄


基於個人的愛好和現實的需求,決定用Python做一個螢幕錄製的指令碼。因為要看一些加密的視訊,每次都要登入,特別麻煩,遂決定用自己寫的指令碼,將加密視訊的播放過程全程錄製下來,這樣以後看自己的錄播就好了。結合近期自己學習的內容,正好用Python來練練手,鞏固自己的學習效果。
經過多番搜尋,決定採用Python+opencv+pyaudio來實現螢幕錄製。網上搜索到的錄屏,基本都是不帶聲音的,而我要實現的是帶聲音的螢幕錄製。下面就開始一步一步的實現吧。

聲音錄製

import pyaudio
import wave
import sys

CHUNK = 1024
if len(sys.argv) < 2:
    print("Plays a wave file.\n\nUsage: %s filename.wav" % sys.argv[0])
    sys.exit(-1)

wf = wave.open(sys.argv[1], 'rb')
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
                channels=
wf.getnchannels(), rate=wf.getframerate(), output=True) data = wf.readframes(CHUNK) while data != '': stream.write(data) data = wf.readframes(CHUNK) stream.stop_stream() stream.close() p.terminate()

簡潔回撥函式版音訊錄製

import pyaudio
import wave
import time
import
sys CHUNK = 1024 FORMAT = pyaudio.paInt16 CHANNELS = 2 RATE = 44100 RECORD_SECONDS = 10 WAVE_OUTPUT_FILENAME = "output.wav" p = pyaudio.PyAudio() wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb') wf.setnchannels(CHANNELS) wf.setsampwidth(p.get_sample_size(FORMAT)) wf.setframerate(RATE) time_count = 0 def callback(in_data, frame_count, time_info, status): wf.writeframes(in_data) if(time_count < 10): return (in_data, pyaudio.paContinue) else: return (in_data, pyaudio.paComplete) stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), input=True, stream_callback=callback) stream.start_stream() print("* recording") while stream.is_active(): time.sleep(1) time_count += 1 stream.stop_stream() stream.close() wf.close() p.terminate() print("* recording done!")

視訊錄製(無聲音)

from PIL import ImageGrab
import numpy as np
import cv2

image = ImageGrab.grab()#獲得當前螢幕
width = image.size[0]
height = image.size[1]
print("width:", width, "height:", height)
print("image mode:",image.mode)
k=np.zeros((width,height),np.uint8)
fourcc = cv2.VideoWriter_fourcc(*'XVID')#編碼格式
video = cv2.VideoWriter('test.avi', fourcc, 25, (width, height))
#輸出檔案命名為test.mp4,幀率為16,可以自己設定
while True:
    img_rgb = ImageGrab.grab()
    img_bgr=cv2.cvtColor(np.array(img_rgb), cv2.COLOR_RGB2BGR)#轉為opencv的BGR格式
    video.write(img_bgr)
    cv2.imshow('imm', img_bgr)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
video.release()
cv2.destroyAllWindows()

錄製的音訊與視訊合成為帶聲音的視訊

錄製200幀,帶音訊的MP4視訊,單執行緒

import wave
from pyaudio import PyAudio,paInt16
from PIL import ImageGrab
import numpy as np
import cv2
from moviepy.editor import *
from moviepy.audio.fx import all
import time

CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
WAVE_OUTPUT_FILENAME = "output.wav"

p = pyaudio.PyAudio()
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
audio_record_flag = True
def callback(in_data, frame_count, time_info, status):
    wf.writeframes(in_data)
    if audio_record_flag:
        return (in_data, pyaudio.paContinue)
    else:
        return (in_data, pyaudio.paComplete)
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
                channels=wf.getnchannels(),
                rate=wf.getframerate(),
                input=True,
                stream_callback=callback)
image = ImageGrab.grab()#獲得當前螢幕
width = image.size[0]
height = image.size[1]
print("width:", width, "height:", height)
print("image mode:",image.mode)
k=np.zeros((width,height),np.uint8)

fourcc = cv2.VideoWriter_fourcc(*'XVID')#編碼格式
video = cv2.VideoWriter('test.mp4', fourcc, 9.5, (width, height))
#經實際測試,單執行緒下最高幀率為10幀/秒,且會變動,因此選擇9.5幀/秒
#若設定幀率與實際幀率不一致,會導致視訊時間與音訊時間不一致

print("video recording!!!!!")
stream.start_stream()
print("audio recording!!!!!")
record_count = 0
while True:
    img_rgb = ImageGrab.grab()
    img_bgr=cv2.cvtColor(np.array(img_rgb), cv2.COLOR_RGB2BGR)#轉為opencv的BGR格式
    video.write(img_bgr)
    record_count += 1
    if(record_count > 200):
        break
    print(record_count, time.time())

audio_record_flag = False
while stream.is_active():
    time.sleep(1)

stream.stop_stream()
stream.close()
wf.close()
p.terminate()
print("audio recording done!!!!!")

video.release()
cv2.destroyAllWindows()
print("video recording done!!!!!")

print("video audio merge!!!!!")
audioclip = AudioFileClip("output.wav")
videoclip = VideoFileClip("test.mp4")
videoclip2 = videoclip.set_audio(audioclip)
video = CompositeVideoClip([videoclip2])
video.write_videofile("test2.mp4",codec='mpeg4')

看來要提高幀率必須使用佇列加多執行緒了,這一步等到以後來新增吧。不過總是覺得用OpenCV來實現視訊錄製,有點怪異,畢竟opencv是用來做影象與視訊分析的,還是走正道認真搗鼓opencv該做的事情吧。