1. 程式人生 > >ROS+科大訊飛語音=讓你的機器人能聽會說---(二)說話篇

ROS+科大訊飛語音=讓你的機器人能聽會說---(二)說話篇

(二)說話篇

基本環境配置

建立工作空間

$ mkdir -p ~/catkin_ws/src
$ cd ~/catkin_ws/src
$ catkin_init_workspace
  • 首次編譯
$ cd ~/catkin_ws/
$ catkin_make
  • 建立語音包
$ cd src/
$ catkin_create_pkg xf_voice std_msgs rospy roscpp

安裝語音庫

  • 建立/Robot資料夾將所有者改為當前使用者
$ sudo mkdir /Robot
$ sudo chown zhouge /Robot/
  • 然後將你下載的語音sdk中的libmsc.so放到/Robot/voice/lib/資料夾下
  • 將bin目錄和lib/inc目錄都放到/Robot/voice/資料夾下,
  • 安裝mpalyer播放器
  • 建立/Robot/voice/wav和/Robot/cmd資料夾,前者用來儲存臨時音訊檔案,後者用來儲存管道通訊檔案。
$ sudo apt-get install mplayer
$ mkdir /Robot/voice/wav
$ mkdir /Robot/cmd
  • 至此你的/Robot資料夾應該有cmd和voice兩個資料夾,voice下應該有bin inc lib wav四個資料夾
  • 然後我們把語音庫放到系統庫資料夾/usr/lib/
$ sudo cp /Robot/voice/lib/libmsc.so /usr/lib
/

編寫語音合成節點

原始碼

  • 回到工作空間目錄,~/catkin_ws
  • 在~/catkin_ws/src/xf_voice/src目錄下新建檔案xf_tts.cpp 並將以下內容複製進去
  • 注意:將裡面的appid改成你自己的;
#include <stdio.h>
#include <unistd.h>
#include <errno.h>
#include "/Robot/voice/inc/qtts.h"
#include "/Robot/voice/inc/msp_cmn.h"
#include "/Robot/voice/inc/msp_errors.h"
#include "ros/ros.h"
#include "std_msgs/String.h" #include <sstream> #include <sys/types.h> #include <sys/stat.h> #define SAYIT system("cp /Robot/voice/wav/say.wav /Robot/voice/wav/temp.wav>/Robot/cmd/Mplayer_cmd");system("echo loadfile /Robot/voice/wav/temp.wav>/Robot/cmd/Mplayer_cmd") typedef int SR_DWORD; typedef short int SR_WORD ; /* wav音訊頭部格式 */ typedef struct _wave_pcm_hdr { char riff[4]; // = "RIFF" int size_8; // = FileSize - 8 char wave[4]; // = "WAVE" char fmt[4]; // = "fmt " int fmt_size; // = 下一個結構體的大小 : 16 short int format_tag; // = PCM : 1 short int channels; // = 通道數 : 1 int samples_per_sec; // = 取樣率 : 8000 | 6000 | 11025 | 16000 int avg_bytes_per_sec; // = 每秒位元組數 : samples_per_sec * bits_per_sample / 8 short int block_align; // = 每取樣點位元組數 : wBitsPerSample / 8 short int bits_per_sample; // = 量化位元數: 8 | 16 char data[4]; // = "data"; int data_size; // = 純資料長度 : FileSize - 44 } wave_pcm_hdr; /* 預設wav音訊頭部資料 */ wave_pcm_hdr default_wav_hdr = { { 'R', 'I', 'F', 'F' }, 0, {'W', 'A', 'V', 'E'}, {'f', 'm', 't', ' '}, 16, 1, 1, 16000, 32000, 2, 16, {'d', 'a', 't', 'a'}, 0 }; /* 文字合成 */ int text_to_speech(const char* src_text, const char* des_path, const char* params) { int ret = -1; FILE* fp = NULL; const char* sessionID = NULL; unsigned int audio_len = 0; wave_pcm_hdr wav_hdr = default_wav_hdr; int synth_status = MSP_TTS_FLAG_STILL_HAVE_DATA; if (NULL == src_text || NULL == des_path) { printf("params is error!\n"); return ret; } fp = fopen(des_path, "wb"); if (NULL == fp) { printf("open %s error.\n", des_path); return ret; } /* 開始合成 */ sessionID = QTTSSessionBegin(params, &ret); if (MSP_SUCCESS != ret) { printf("QTTSSessionBegin failed, error code: %d.\n", ret); fclose(fp); return ret; } ret = QTTSTextPut(sessionID, src_text, (unsigned int)strlen(src_text), NULL); if (MSP_SUCCESS != ret) { printf("QTTSTextPut failed, error code: %d.\n",ret); QTTSSessionEnd(sessionID, "TextPutError"); fclose(fp); return ret; } printf("正在合成 ...\n"); fwrite(&wav_hdr, sizeof(wav_hdr) ,1, fp); //新增wav音訊頭,使用取樣率為16000 while (1) { /* 獲取合成音訊 */ const void* data = QTTSAudioGet(sessionID, &audio_len, &synth_status, &ret); if (MSP_SUCCESS != ret) break; if (NULL != data) { fwrite(data, audio_len, 1, fp); wav_hdr.data_size += audio_len; //計算data_size大小 } if (MSP_TTS_FLAG_DATA_END == synth_status) break; }//合成狀態synth_status取值請參閱《訊飛語音雲API文件》 printf("\n"); if (MSP_SUCCESS != ret) { printf("QTTSAudioGet failed, error code: %d.\n",ret); QTTSSessionEnd(sessionID, "AudioGetError"); fclose(fp); return ret; } /* 修正wav檔案頭資料的大小 */ wav_hdr.size_8 += wav_hdr.data_size + (sizeof(wav_hdr) - 8); /* 將修正過的資料寫回檔案頭部,音訊檔案為wav格式 */ fseek(fp, 4, 0); fwrite(&wav_hdr.size_8,sizeof(wav_hdr.size_8), 1, fp); //寫入size_8的值 fseek(fp, 40, 0); //將檔案指標偏移到儲存data_size值的位置 fwrite(&wav_hdr.data_size,sizeof(wav_hdr.data_size), 1, fp); //寫入data_size的值 fclose(fp); fp = NULL; /* 合成完畢 */ ret = QTTSSessionEnd(sessionID, "Normal"); if (MSP_SUCCESS != ret) { printf("QTTSSessionEnd failed, error code: %d.\n",ret); } return ret; } int xf_tts(const char* text,const char *filename) { int ret = MSP_SUCCESS; const char* login_params = "appid = 573bdbff, work_dir = .";//登入引數,appid與msc庫繫結,請勿隨意改動 const char* session_begin_params = "engine_type =local, text_encoding = UTF8, tts_res_path = fo|/Robot/voice/bin/msc/res/tts/xiaoyan.jet;fo|/Robot/voice/bin/msc/res/tts/common.jet, sample_rate = 16000, speed = 50, volume = 50, pitch = 50, rdn = 2"; /* 使用者登入 */ ret = MSPLogin(NULL, NULL, login_params); //第一個引數是使用者名稱,第二個引數是密碼,第三個引數是登入引數,使用者名稱和密碼可在http://open.voicecloud.cn註冊獲取 if (MSP_SUCCESS != ret) { printf("MSPLogin failed, error code: %d.\n", ret); goto exit ;//登入失敗,退出登入 } /* 文字合成 */ printf("開始合成 ...\n"); ret = text_to_speech(text, filename, session_begin_params); if (MSP_SUCCESS != ret) { printf("text_to_speech failed, error code: %d.\n", ret); } printf("合成完畢\n"); exit: MSPLogout(); //退出登入 return 0; } void xfcallback(const std_msgs::String::ConstPtr& msg) { char cmd[2000]; std::cout<<"I heard,I will say:"<<msg->data.c_str()<<std::endl; xf_tts(msg->data.c_str(),"/Robot/voice/wav/say.wav"); sprintf(cmd,"echo %s>/Robot/cmd/saywords",msg->data.c_str()); popen(cmd,"r"); SAYIT; } int main(int argc,char **argv) { unlink("/Robot/cmd/Mplayer_cmd"); mkfifo("/Robot/cmd/Mplayer_cmd", 0777); popen("mplayer -quiet -slave -input file=/Robot/cmd/Mplayer_cmd -idle","r"); printf("Mplayer Run Success"); const char* filename = "/Robot/voice/wav/say.wav"; //合成的語音檔名稱 const char* text = "語音合成模組啟動成功!"; //合成文字 xf_tts(text,filename); SAYIT; ros::init(argc,argv,"xf_tts"); ros::NodeHandle n; ros::Subscriber sub =n.subscribe("xfsaywords",1000,xfcallback); ros::spin(); return 0; }

CMakeLists.txt程式碼

  • 在xf_vocie包裡的CMakeLists.txt增加以下程式碼

add_executable(xf_tts src/xf_tts.cpp)
target_link_libraries(xf_tts ${catkin_LIBRARIES} -lmsc  -ldl -lpthread -lm -lrt)
add_dependencies(xf_tts xf_voice_generate_messages_cpp)
  • 至此,我的CMakeLists.txt是這樣的
cmake_minimum_required(VERSION 2.8.3)
project(xf_voice)
find_package(catkin REQUIRED COMPONENTS
  roscpp
  rospy
  std_msgs
)
catkin_package()
include_directories(include ${catkin_INCLUDE_DIRS})

add_executable(xf_tts src/xf_tts.cpp)
target_link_libraries(xf_tts ${catkin_LIBRARIES} -lmsc  -ldl -lpthread -lm -lrt)
add_dependencies(xf_tts xf_voice_generate_messages_cpp)

編譯

  • 回到catkin_ws目錄
  • 然後執行catkin_make
$ catkin_make

除錯執行

執行三個終端

  • 第一個終端 執行主節點
$ roscore
  • 第二個終端 執行語音合成節點
$ cd ~/catkin_ws/
$ source devel/setup.sh
$ rosrun xf_voice xf_tts
  • 第三個終端 釋出語音資訊
$ cd ~/catkin_ws/
$ source devel/setup.sh
$ rostopic list
$ rostopic pub /xfsasywords std_msgs/String 語音合成節點測試

效果圖

  • 通過以上測試,你應該已經聽到效果了。
    image
  • 執行rqt_graph,可以看到目前的節點圖,應該與下圖相似,
    image