我试图在我的Windows8上使用C# (4.5)上的语音识别库。
我安装了“11”,我正在接收一个使用LoadGrammar的异常。
我的节目:
using System;
using System.Speech.Recognition;
using System.Speech.Synthesis;
using System.Collections.Generic;
using System.Globalization;
using System.Linq;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
namespace SpeechRecognition
{
class Program
{
static void Main(string[] args)
{
// Create an in-process speech recognizer for the en-US locale.
using (SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine())
{
// Create and load a dictation grammar.
// An unhandled exception of type 'System.UnauthorizedAccessException' occurred in System.Speech.dll
recognizer.LoadGrammar(new DictationGrammar());
// Add a handler for the speech recognized event.
recognizer.SpeechRecognized +=
new EventHandler<SpeechRecognizedEventArgs>(recognizer_SpeechRecognized);
// Configure input to the speech recognizer.
recognizer.SetInputToDefaultAudioDevice();
// Start asynchronous, continuous speech recognition.
recognizer.RecognizeAsync(RecognizeMode.Multiple);
// Keep the console window open.
while (true)
{
Console.ReadLine();
}
}
}
// Handle the SpeechRecognized event.
static void recognizer_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
Console.WriteLine("Recognized text: " + e.Result.Text);
}
}
}System.UnauthorizedAccessException类型的未处理异常发生在System.Speech.dll中
堆栈跟踪:
em System.Speech.Recognition.RecognizerBase.Initialize(SapiRecognizer识别器,Boolean inproc) em System.Speech.Recognition.SpeechRecognitionEngine.get_RecoBase() em System.Speech.Recognition.SpeechRecognitionEngine.LoadGrammar(Grammar语法) em SpeechRecognition.Program.Main(String[] args) na e:\TestCenter\SpeechRecognition\SpeechRecognition\Program.cs:linha 23 em System.AppDomain._nExecuteAssembly(RuntimeAssembly程序集,String[] args) em Microsoft.VisualStudio.HostingProcess.HostProc.RunUsersAssembly() em System.Threading.ExecutionContext.RunInternal(ExecutionContext executionContext,ContextCallback回调,对象状态,Boolean ) em System.Threading.ExecutionContext.Run(ExecutionContext executionContext,ContextCallback回调,对象状态,布尔preserveSyncCtx) em System.Threading.ExecutionContext.Run(ExecutionContext executionContext,ContextCallback回调,Object状态)
我在Win7和Win8上进行了测试,但是没有人在工作。
有人能帮我个忙吗?
发布于 2014-09-04 21:15:21
奇怪的是,我似乎还记得在Speech中遇到了类似的问题,但却找不到解决方案。我认为这涉及更改所有者或访问您机器上的某个文件或文件夹的权限。也许更多的googlebinging可以帮助您找到我当时找到的解决方案,或者您可以使用ProcessMonitor来查看流程试图做什么和失败了。也许这件事能证明什么。
我尝试过安装语音平台SDK 11和语音平台运行时,但我认为这可能是在其Microsoft.Speech包装器中使用.NET命名空间的技术的服务器版本。我还安装了SpeechSDK5.3,但我认为这不是最新版本。最终,我安装了Windows8.1SDK,我认为这是为了我自己。这在我的WPF应用程序中运行得很好:
XAML:
<Window x:Class="SpeechTestApp.MainWindow"
xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation"
xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"
Title="MainWindow" Height="350" Width="525">
<Grid>
<TextBlock
x:Name="tb"/>
</Grid>
</Window>C#
using System.Diagnostics;
using System.Globalization;
using System.Speech.Recognition;
using System.Windows;
namespace SpeechTestApp
{
public partial class MainWindow : Window
{
private SpeechRecognitionEngine recognizer;
public MainWindow()
{
InitializeComponent();
// Create a SpeechRecognitionEngine object for the default recognizer in the en-US locale.
this.recognizer = new SpeechRecognitionEngine(new CultureInfo("en-US"));
{
// Create a grammar for finding services in different cities.
Choices services = new Choices(new string[] { "restaurants", "hotels", "gas stations" });
Choices cities = new Choices(new string[] { "Seattle", "Boston", "Dallas" });
GrammarBuilder findServices = new GrammarBuilder("Find");
findServices.Append(services);
findServices.Append("near");
findServices.Append(cities);
// Create a Grammar object from the GrammarBuilder and load it to the recognizer.
Grammar servicesGrammar = new Grammar(findServices);
recognizer.LoadGrammarAsync(servicesGrammar);
// Add a handler for the speech recognized event.
recognizer.SpeechRecognized += recognizer_SpeechRecognized;
recognizer.SpeechDetected += RecognizerOnSpeechDetected;
recognizer.SpeechHypothesized += RecognizerOnSpeechHypothesized;
recognizer.SpeechRecognitionRejected += RecognizerOnSpeechRecognitionRejected;
recognizer.AudioStateChanged += RecognizerOnAudioStateChanged;
recognizer.AudioSignalProblemOccurred += RecognizerOnAudioSignalProblemOccurred;
// Configure the input to the speech recognizer.
recognizer.SetInputToDefaultAudioDevice();
// Start asynchronous, continuous speech recognition.
recognizer.RecognizeAsync(RecognizeMode.Multiple);
}
}
private void RecognizerOnAudioSignalProblemOccurred(object sender, AudioSignalProblemOccurredEventArgs audioSignalProblemOccurredEventArgs)
{
Debug.WriteLine(audioSignalProblemOccurredEventArgs.AudioSignalProblem.ToString());
}
private void RecognizerOnAudioStateChanged(object sender, AudioStateChangedEventArgs audioStateChangedEventArgs)
{
Debug.WriteLine(audioStateChangedEventArgs.AudioState.ToString());
}
private void RecognizerOnSpeechRecognitionRejected(object sender, SpeechRecognitionRejectedEventArgs speechRecognitionRejectedEventArgs)
{
Debug.WriteLine("RecognizerOnSpeechRecognitionRejected: " + speechRecognitionRejectedEventArgs.Result.Text);
}
private void RecognizerOnSpeechHypothesized(object sender, SpeechHypothesizedEventArgs speechHypothesizedEventArgs)
{
Debug.WriteLine("Hypothesized: " + speechHypothesizedEventArgs.Result.Text);
tb.Text = speechHypothesizedEventArgs.Result.Text;
}
private void RecognizerOnSpeechDetected(object sender, SpeechDetectedEventArgs e)
{
Debug.WriteLine("Detected position: " + e.AudioPosition);
}
private void recognizer_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
Debug.WriteLine("Recognized text: " + e.Result.Text);
tb.Text = e.Result.Text;
}
}
}https://stackoverflow.com/questions/25650956
复制相似问题