我正在寻找一种方法来知道espeak什么时候结束它的演讲。有人告诉我使用espeakEVENT_MSG_TERMINATED。但是,当我试图将这个部分放到我的代码中时,它会给出以下错误:
#include <espeak/speak_lib.h>
espeak_EVENT_TYPE;
if( espeak_EVENT_TYPE == espeakEVENT_MSG_TERMINATED)
{
do something;
}application.cpp:31:1: error:声明不声明任何-fpermissive espeak_EVENT_TYPE;^ application.cpp:在函数‘void(char*)’:application.cpp:116:27: application.cpp:116:27:==令牌if之前的预期主表达式( espeak_EVENT_TYPE == espeakEVENT_MSG_TERMINATED)
编辑:我使用这个简单的代码来使用espeak
#include <string.h>
#include <malloc.h>
#include <espeak/speak_lib.h>
espeak_POSITION_TYPE position_type;
espeak_AUDIO_OUTPUT output;
char *path=NULL;
int Buflength = 1000, Options=0;
void* user_data;
t_espeak_callback *SynthCallback;
espeak_PARAMETER Parm;
char Voice[] = {"English"};
char text[30] = {"this is an english text"};
unsigned int Size,position=0, end_position=0, flags=espeakCHARS_AUTO, *unique_identifier;
int main(int argc, char* argv[] )
{
output = AUDIO_OUTPUT_PLAYBACK;
espeak_Initialize(output, Buflength, path, AUDIO_OUTPUT_SYNCHRONOUS ); //Options );
espeak_SetVoiceByName(Voice);
const char *langNativeString = "en";
espeak_VOICE voice = {0};
// memset(&voice, 0, sizeof(espeak_VOICE));
voice.languages = langNativeString;
voice.name = "US";
voice.variant = 2;
voice.gender = 1;
espeak_SetVoiceByProperties(&voice);
Size = strlen(text)+1;
espeak_Synth( text, Size, position, position_type, end_position, flags,unique_identifier, user_data );
espeak_Synchronize( );
return 0;
}Edit2:
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <sphinxbase/err.h>
#include <sphinxbase/ad.h>
#include <espeak/speak_lib.h>
#include <string>
#include <iostream>
using namespace std;
#include "pocketsphinx.h"
static ps_decoder_t *ps;
static cmd_ln_t *config;
static FILE *rawfd;
ad_rec_t *ad;
espeak_POSITION_TYPE position_type;
espeak_AUDIO_OUTPUT output;
char *path=NULL;
int Buflength = 1000, Options=0;
void* user_data;
char Voice[] = {"English"};
unsigned int Size,position=0, end_position=0, flags=espeakCHARS_AUTO, *unique_identifier;
t_espeak_callback *SynthCallback;
espeak_PARAMETER Parm;
static void initFuncs()
{
output = AUDIO_OUTPUT_PLAYBACK;
espeak_Initialize(output, Buflength, path, AUDIO_OUTPUT_SYNCHRONOUS );
espeak_SetVoiceByName(Voice);
const char *langNativeString = "en";
espeak_VOICE voice;
memset(&voice, 0, sizeof(espeak_VOICE));
voice.languages = langNativeString;
voice.name = "US";
voice.variant = 2;
voice.gender = 1;
espeak_SetVoiceByProperties(&voice);
}
int receive_espeak_events(short *wav, int numsamples, espeak_EVENT *event)
{
while (event->type != espeakEVENT_LIST_TERMINATED) {
if (event->type == espeakEVENT_MSG_TERMINATED) {
/* do something */
ad_start_rec(ad);
}
++event; // Examine the next event.
}
return 0; // Continue speaking.
}
static void sleep_msec(int32 ms)
{
struct timeval tmo;
tmo.tv_sec = 0;
tmo.tv_usec = ms * 1000;
select(0, NULL, NULL, NULL, &tmo);
}
static void speech(char* hyp)
{
Size = strlen(hyp)+1;
espeak_SetSynthCallback(receive_espeak_events);
espeak_Synth( hyp, Size, position, position_type, end_position, flags,unique_identifier, user_data );
espeak_Synchronize( );
}
static void recognize_from_microphone()
{
ad_rec_t *ad;
int16 adbuf[2048];
uint8 utt_started, in_speech;
int32 k;
char *hyp=0;
if ((ad = ad_open_dev(cmd_ln_str_r(config, "-adcdev"),(int) cmd_ln_float32_r(config,"-samprate"))) == NULL)
E_FATAL("Failed to open audio device\n");
if (ad_start_rec(ad) < 0)
E_FATAL("Failed to start recording\n");
if (ps_start_utt(ps) < 0)
E_FATAL("Failed to start utterance\n");
utt_started = FALSE;
E_INFO("Ready....\n");
for (;;)
{
if ((k = ad_read(ad, adbuf, 2048)) < 0)
E_FATAL("Failed to read audio\n");
ps_process_raw(ps, adbuf, k, FALSE, FALSE);
in_speech = ps_get_in_speech(ps);
if (in_speech && !utt_started)
{
utt_started = TRUE;
E_INFO("Listening...\n");
}
if (!in_speech && utt_started)
{
ps_end_utt(ps);
hyp = (char*)ps_get_hyp(ps, NULL );
if (hyp != NULL)
{
///////////////////I am passing hyp to espeak heere ////////////////////
ad_stop_rec(ad);
speech(hyp);
printf("%s\n",hyp);
fflush(stdout);
// sleep_msec(3000);
}
if (ps_start_utt(ps) < 0)
E_FATAL("Failed to start utterance\n");
utt_started = FALSE;
E_INFO("Ready....\n");
}
}//for
ad_close(ad);
}
int main(int argc, char *argv[])
{
initFuncs();
config = cmd_ln_init(NULL, ps_args(), TRUE,
"-hmm", "/home/m/myrobot3/robot/model_parameters/robot.cd_cont_1000",
"-lm","/home/m/myrobot3/robot/etc/robot.lm.bin",
"-dict", "/home/m/myrobot3/robot/etc/robot.dic",
NULL);
ps = ps_init(config);
recognize_from_microphone();
ps_free(ps);
cmd_ln_free_r(config);
return 0;
}错误:
致命:"application.cpp",第163行:无法读取音频
发布于 2018-04-28 18:21:57
espeak_EVENT_TYPE;这一行对编译器来说是不明智的。espeak_EVENT_TYPE是一种数据类型。它不是一个可以与espeakEVENT_MSG_TERMINATED这样的值相比较的变量。为了声明一个变量,语法是:
espeak_EVENT_TYPE an_event_type {};
if (an_event_type == espeakEVENT_MSG_TERMINATED) {
/* ... */但是,如果我们这样做,那么我们刚刚创建的变量an_event_type实际上将不会包含任何关于实际espeak事件的信息。当然,对于是否终止任何实际消息,它也不会透露任何信息。
从espeak接收真实事件信息
为了获取有关消息是否终止的信息,程序需要从espe库中获取一个类型为espeak_EVENT_TYPE的变量。
纵观此标头,espeak_EVENT_TYPE被用作espeak_EVENT结构的一部分。要接收espeak_EVENT通知,必须编写一个函数,该函数将由espeak库调用。(这称为“回调”函数)。然后通过调用espeak_SetSynthCallback将回调函数注册到库中。
在相同的标题中,回调函数的原型必须如下:
int SynthCallback(短*wav,int数字样本,espeak_EVENT *事件); wav:是已经产生的语音数据。NULL表示合成已经完成。 数字示例:是wav中的条目数。这个数字可能会变化,可能小于> espeak_Initialize中给出的buflength参数所暗示的值,有时可能为零(这并不表示合成的结束)。 事件:表示单词和句子事件以及文本中发生的if和元素的espeak_EVENT项的数组。事件列表被类型>= 0的事件终止。 回调返回: 0=continue合成,1=abort合成。
综合起来,我们需要一个函数来循环events变量,就像它是一个数组一样,直到它遇到0类型的事件。然后,函数需要返回0才能继续语音活动。
int receive_espeak_events(short *wav, int numsamples, espeak_EVENT *event)
{
while (event->type != espeakEVENT_LIST_TERMINATED) {
if (event->type == espeakEVENT_MSG_TERMINATED) {
/* do something */
}
++event; // Examine the next event.
}
return 0; // Continue speaking.
}要告诉espeak调用此函数,请在开始任何综合操作之前将该函数传递给espeak_SetSynthCallback。
espeak_SetSynthCallback(receive_espeak_events);https://stackoverflow.com/questions/50072174
复制相似问题