我将遵循本教程:https://developer.amazon.com/en-US/docs/alexa/alexa-voice-service/manage-http2-connection.html
总结一下这个问题,在使用安卓中的OkHttp向AVS发送事件https请求后,我没有收到任何使用response.body().string()返回的downchannelStream响应。
在这里,我通过创建一个指令http请求来建立下行通道流,这意味着要根据教程保持打开状态:
private void establishDownChanDirective(String accessToken, OkHttpClient downChannelClient) throws IOException {
// OKHttp header creation.
final Request getRequest = new Request.Builder()
.url("https://alexa.na.gateway.devices.a2z.com/" + AVS_API_VERSION + "/directives")//endpoint url
.get()
.addHeader("authorization", "Bearer " + accessToken)
.build();
Log.d("Request_header", getRequest.toString());
downChannelClient.newCall(getRequest).enqueue(new Callback() {
@Override
public void onFailure(@NotNull Call call, @NotNull IOException e) {
Log.d("downChannelResp", "failure: " + e.getMessage());
call.cancel();
}
@Override
public void onResponse(@NotNull Call call, @NotNull Response response) throws IOException {
Log.d("downChannelResp", "Down channel recieved! Test 1");
processResponse(response, "downChannelResp", true);
Log.d("downChannelResp", "Down channel recieved! Test 2");
responseDirective = response;
}
});
}接下来,我尝试通过发送一个事件来与AVS同步:
private void sendSyncEvent(OkHttpClient downChannelClient, String accessToken) throws IOException {
String msgId = UUID.randomUUID().toString();
String speakToken = "";
long offsetMili = 20; // if lags put down to 10.
String playerActivity = "PLAYING";
final String JSON_SYNC = "{\"context\":[{\"header\":{\"namespace\":\"SpeechRecognizer\",\"name\":\"RecognizerState\"},\"payload\":{\"wakeword\":\"ALEXA\"}},{\"header\":{\"namespace\":\"SpeechSynthesizer\",\"name\":\"SpeechState\"},\"payload\":{\"token\":\"" + speakToken + "\",\"offsetInMilliseconds\":" + offsetMili + ",\"playerActivity\":\"" + playerActivity + "\"}}],\"event\":{\"header\":{\"namespace\":\"System\",\"name\":\"SynchronizeState\",\"messageId\":\"" + msgId + "\"},\"payload\":{}}}";
List<MultipartBody.Part> partList = new ArrayList<>();
MultipartBody.Part syncPart = MultipartBody.Part.create(Headers.of(
"Content-Disposition", "form-data; name=\"metadata\""),
RequestBody.create(JSON_SYNC, JSON_TYPE));
partList.add(syncPart);
RequestBody body = new MultipartBody(ByteString.encodeUtf8(BOUNDARY_TERM), MultipartBody.FORM, partList);
Log.d("part", syncPart.headers().toString());
Log.d("body", body.contentType().toString());
final Request postRequest = new Request.Builder()
.url("https://alexa.na.gateway.devices.a2z.com/"+AVS_API_VERSION+"/events")//endpoint url
.post(body)
.addHeader("authorization", "Bearer " + accessToken)
.addHeader("content-type", "multipart/form-data; boundary=" + BOUNDARY_TERM) // Don't know whether or not this is needed.
.build();
Log.d("post_request", postRequest.toString());
Log.d("post_req_body", JSON_SYNC);
downChannelClient.newCall(postRequest).enqueue(new Callback() {
@Override
public void onFailure(@NotNull Call call, @NotNull IOException e) {
Log.d("syncResp", "failure: " + e.getMessage());
call.cancel();
}
@Override
public void onResponse(@NotNull Call call, @NotNull Response response) throws IOException {
processResponse(response, "syncResp", false);
}
});
}然后,我尝试发送一个测试识别事件,该事件(根据本教程)旨在通过初始downChannelStream返回响应:
private void testRecognizeEventAVS(OkHttpClient downChannelClient, String accessToken) throws IOException {
final MediaType AUDIO_TYPE = MediaType.parse("application/octet-stream");
String audioMsgId = UUID.randomUUID().toString();
String dialogId = UUID.randomUUID().toString();
final String JSON_SPEECH_EVENT = "{\"event\": {\"header\": {\"namespace\": \"SpeechRecognizer\",\"name\": \"Recognize\",\"messageId\": \"" + audioMsgId + "\",\"dialogRequestId\": \"" + dialogId + "\"},\"payload\": {\"profile\": \"CLOSE_TALK\", \"format\": \"AUDIO_L16_RATE_16000_CHANNELS_1\"}},\"context\": [{\"header\": {\"namespace\": \"AudioPlayer\",\"name\": \"PlaybackState\"},\"payload\": {\"token\": \"\",\"offsetInMilliseconds\": 0,\"playerActivity\": \"FINISHED\"}}, {\"header\": {\"namespace\": \"SpeechSynthesizer\",\"name\": \"SpeechState\"},\"payload\": {\"token\": \"\",\"offsetInMilliseconds\": 0,\"playerActivity\": \"FINISHED\"}}, { \"header\" : { \"namespace\" : \"Alerts\", \"name\" : \"AlertsState\" }, \"payload\" : { \"allAlerts\" : [ ], \"activeAlerts\" : [ ] } }, {\"header\": {\"namespace\": \"Speaker\",\"name\": \"VolumeState\"},\"payload\": {\"volume\": 25,\"muted\": false}}]}";
List<MultipartBody.Part> partList = new ArrayList<>();
// Metadata Part
Map<String, String> metaHeaders = new HashMap<String, String>();
metaHeaders.put("Content-Disposition", "form-data; name=\"metadata\"");
MultipartBody.Part metaPart = MultipartBody.Part.create(Headers.of(metaHeaders), RequestBody.create(JSON_SPEECH_EVENT, JSON_TYPE));
partList.add(metaPart);
// Audio Part
Map<String, String> audioHeaders = new HashMap<String, String>();
audioHeaders.put("Content-Disposition", "form-data; name=\"metadata\"");
MultipartBody.Part audioPart = MultipartBody.Part.create(Headers.of(audioHeaders), RequestBody.create(createTestFile(), AUDIO_TYPE));
partList.add(audioPart);
RequestBody reqBody = new MultipartBody(ByteString.encodeUtf8(BOUNDARY_TERM), MultipartBody.FORM, partList);
Log.d("metaPart", metaPart.headers().toString());
Log.d("audioPart", audioPart.headers().toString());
Log.d("body", reqBody.contentType().toString());
// https://developer.amazon.com/en-US/docs/alexa/alexa-voice-service/structure-http2-request.html
Request speechRequest = new Request.Builder()
.url("https://alexa.na.gateway.devices.a2z.com/"+AVS_API_VERSION+"/events")
.addHeader("authorization", "Bearer " + accessToken)
.addHeader("content-type", "multipart/form-data; boundary=" + BOUNDARY_TERM) // Don't know whether or not this is needed.
.post(reqBody)
.build();
Log.d("speech_request", speechRequest.toString());
downChannelClient.newCall(speechRequest).enqueue(new Callback() {
@Override
public void onFailure(@NotNull Call call, @NotNull IOException e) {
Log.d("speechResp", "failure: " + e.getMessage());
call.cancel();
}
@Override
public void onResponse(@NotNull Call call, @NotNull Response response) throws IOException {
processResponse(response, "speechResp", false);
}
});
}这是上述每个方法中使用的processResponse方法,用于获取响应并将响应信息输出到Android日志中:
private void processResponse(Response response, final String TAG, boolean readBodySource) throws IOException {
//Log.d(TAG, "response-string: " + response.body().string()); // This never shows up and always stops the rest of this method running for the response from establishDownChanDirective().
Log.d(TAG, "response-success: " + response.isSuccessful());
Log.d(TAG, "response" + response.toString());
// Tried this from stack over flow posts, but right now we aren't even receiving a response-string from the downChannelDirective, so we need to figure that out first.
if (readBodySource) {
BufferedSource bufferedSource = response.body().source();
Buffer buffer = new Buffer();
while (!bufferedSource.exhausted()) {
Log.w("bufferedSource", "downchannel recieved!");
long bs = bufferedSource.read(buffer, 8192);
Log.d("bufferedSource_read", String.valueOf(bs));
Log.d("buffersize", String.valueOf(buffer.size()));
}
Log.d("buffer_response", buffer.toString());
}
}该方法注释掉了string-response,但是当它没有被注释掉时,它只给出D/syncResp: response-string:作为输出,而response-string对于syncResp和speechResp来说只是一个空字符串。但是,对于downChannelResp,它不提供任何输出,并且完全停止运行Log.d(TAG, "response-string: " + response.body().string());下面的其余代码。
现在当我运行这个的时候。
try {
establishDownChanDirective(accessToken, downChannelClient); // Establish a down channel directive that will remain open.
sendSyncEvent(downChannelClient, accessToken); // Send a Syncronize event through the same connection as the down channel directive.
testRecognizeEventAVS(downChannelClient, accessToken); // Send a Speech directive through the same connection as the down channel directive.
Log.d("OkHttp", "Test: Http stuff finished.");
if (responseDirective != null) {
Log.d("OkHttp", "Response: " + responseDirective.body().string());
} else {
Log.d("OkHttp", "No response!");
}
} catch (IOException e) {
Log.d("OkHttpError", "error: START{" + e.toString() + "}END");
e.printStackTrace();
}...it将此作为输出:
D/Request_header: Request{method=GET, url=https://alexa.na.gateway.devices.a2z.com/v20160207/directives, headers=[authorization:Bearer <the access token - censored for this post>]}
D/part: Content-Disposition: form-data; name="metadata"
D/body: multipart/form-data; boundary=------------------------qM9tn4VZyj
D/post_request: Request{method=POST, url=https://alexa.na.gateway.devices.a2z.com/v20160207/events, headers=[authorization:Bearer <the access token - censored for this post>, content-type:multipart/form-data; boundary=------------------------qM9tn4VZyj]}
D/post_req_body: {"context":[{"header":{"namespace":"SpeechRecognizer","name":"RecognizerState"},"payload":{"wakeword":"ALEXA"}},{"header":{"namespace":"SpeechSynthesizer","name":"SpeechState"},"payload":{"token":"","offsetInMilliseconds":20,"playerActivity":"PLAYING"}}],"event":{"header":{"namespace":"System","name":"SynchronizeState","messageId":"2c46b1a9-8b41-47be-bd09-61166b78492e"},"payload":{}}}
D/parent: /storage/emulated/0/Android/data/aut.rnd.alexa/files
D/fileexists: true
D/media_file: successfully created: true
D/metaPart: Content-Disposition: form-data; name="metadata"
D/audioPart: Content-Disposition: form-data; name="metadata"
D/body: multipart/form-data; boundary=------------------------qM9tn4VZyj
D/speech_request: Request{method=POST, url=https://alexa.na.gateway.devices.a2z.com/v20160207/events, headers=[authorization:Bearer <the access token - censored for this post>, content-type:multipart/form-data; boundary=------------------------qM9tn4VZyj]}
D/OkHttp: Test: Http stuff finished.
No response!
D/downChannelResp: Down channel recieved! Test 1
response-success: true
responseResponse{protocol=h2, code=200, message=, url=https://alexa.na.gateway.devices.a2z.com/v20160207/directives}
W/bufferedSource: downchannel recieved!
D/bufferedSource_read: 18
D/buffersize: 18
D/syncResp: response-success: true
responseResponse{protocol=h2, code=204, message=, url=https://alexa.na.gateway.devices.a2z.com/v20160207/events}
D/speechResp: response-success: true
responseResponse{protocol=h2, code=204, message=, url=https://alexa.na.gateway.devices.a2z.com/v20160207/events}这是意想不到的,因为响应应该返回可以转换为JSON的数据,但它似乎根本没有返回任何东西。
发布于 2020-09-19 22:14:00
响应body只能消费一次。
这个类可以用来流式传输非常大的响应。例如,可以使用此类读取大于分配给当前进程的整个内存的响应。它甚至可以流式传输比当前设备上总存储量更大的响应,这是视频流应用程序的常见要求。
https://stackoverflow.com/questions/63968977
复制相似问题