通過python的opencv獲取rtsp視訊流,然後websocket實時傳送base64圖片到前端html顯示。缺點無法播放聲音。

1.安裝opencv庫

pip install opencv-python

2.安裝websocket_server庫

pip install websocket_server

websocket_server庫使python作為websocket服務端,向客戶端傳送資料。

3.websocket_server使用

# Server Port
PORT=8124
# 建立Websocket Server
server = WebsocketServer(PORT,'127.0.0.1')
from_vedio()
# 有裝置連線上了
server.set_fn_new_client(new_client)
# 斷開連線
server.set_fn_client_left(client_left)
# 接收到資訊
server.set_fn_message_received(message_received)
# 開始監聽
server.run_forever()

4.opencv讀取視訊

在這裡用了一個執行緒實時獲取視訊並複製到frame

def vedio_thread2(n):
	global camera1
	camera1 = cv2.VideoCapture(rtsp_path)
	global frame
	while True:
		_, img_bgr = camera1.read()
		if img_bgr is None:
			camera1 = cv2.VideoCapture(rtsp_path)
			print('丟失幀') 
		else:
			frame=img_bgr

5.websocket實時傳送到前端HTML顯示

這裡把圖片轉換成base64傳送

def vedio_thread1(n):
	print('send')
	while True:
		if len(server.clients)>0:
			image = cv2.imencode('.jpg', frame)[1]
			base64_data = base64.b64encode(image)
			s = base64_data.decode()
			#print('data:image/jpeg;base64,%s'%s)
			server.send_message_to_all('data:image/jpeg;base64,%s'%s)
		time.sleep(0.05)

6.HTML顯示畫面

這裡把websocket接受到的base64資料賦值給img的src達到類似視訊畫面顯示

var ws;
var pyip='127.0.0.1';
function startWS() {
    console.log('start once again');
    ws = new WebSocket("ws://"+pyip+":8124");
    ws.onopen =  function (msg) {
        console.log('webSocket opened');
    };
    ws.onmessage = function (message) {
        //console.log('receive message : ' + message.data); 
		$("#img").attr("src",message.data);
		//var image = new Image();  
		//image.onload = function () {  
		//	context.clearRect(0, 0,  canvas.width, canvas.height);  
		//	context.drawImage(image, 0, 0,canvas.width, canvas.height);  
		//}
		//image.src =message.data;
    };
    ws.onerror = function (error) {
        console.log('error :' + error.name + error.number);
    };
    ws.onclose =  function () {
        console.log('webSocket closed');
    };
    // ws.send("websocket from js");
}
startWS();

後端程式碼

from websocket_server import WebsocketServer
import threading
import cv2
import base64
import time
camera1=None
frame=cv2.imread("1.jpg", cv2.IMREAD_COLOR)
rtsp_path=0

# Called for every client connecting (after handshake)
def new_client(client, server):
	print("New client connected and was given id %d" % client['id'])
	# 傳送給所有的連線
	server.send_message_to_all("Hey all, a new client has joined us")

# Called for every client disconnecting
def client_left(client, server):
	print("Client(%d) disconnected" % client['id'])

# Called when a client sends a message
def message_received(client, server, message):
	if len(message) > 200:
		message = message[:200]+'..'
	print("Client(%d) said: %s" % (client['id'], message))
	global camera1
	camera1 = cv2.VideoCapture(message)
	# 傳送給所有的連線
	#server.send_message_to_all(message)
def from_vedio():
	thread1 = threading.Thread(target=vedio_thread1, args=(1,))
#     thread1.setDaemon(True)
	thread1.start()
	thread2 = threading.Thread(target=vedio_thread2, args=(1,))
#     thread1.setDaemon(True)
	thread2.start()
	print('start')
def vedio_thread1(n):
	print('send')
	while True:
		if len(server.clients)>0:
			image = cv2.imencode('.jpg', frame)[1]
			base64_data = base64.b64encode(image)
			s = base64_data.decode()
			#print('data:image/jpeg;base64,%s'%s)
			server.send_message_to_all('data:image/jpeg;base64,%s'%s)
		time.sleep(0.05)
def vedio_thread2(n):
	global camera1
	camera1 = cv2.VideoCapture(rtsp_path)
	global frame
	while True:
		_, img_bgr = camera1.read()
		if img_bgr is None:
			camera1 = cv2.VideoCapture(rtsp_path)
			print('丟失幀') 
		else:
			frame=img_bgr

# Server Port
PORT=8124
# 建立Websocket Server
server = WebsocketServer(PORT,'127.0.0.1')
from_vedio()
# 有裝置連線上了
server.set_fn_new_client(new_client)
# 斷開連線
server.set_fn_client_left(client_left)
# 接收到資訊
server.set_fn_message_received(message_received)
# 開始監聽
server.run_forever()

此方法能簡單實現html播放rtsp