From 00571c318bfdd529ca0ec78ac7590089a214230b Mon Sep 17 00:00:00 2001 From: Jonathan Lui Date: Fri, 21 Sep 2018 13:05:36 -0700 Subject: [PATCH] fix: send streamingConfig as a separate write before audioContent (#176) --- packages/google-cloud-speech/src/helpers.js | 24 +++++++------------ packages/google-cloud-speech/synth.py | 23 +++++++++++++++--- packages/google-cloud-speech/test/gapic-v1.js | 14 ++++------- .../test/gapic-v1p1beta1.js | 14 ++++------- .../google-cloud-speech/test/helpers.test.js | 18 ++++++++++---- 5 files changed, 50 insertions(+), 43 deletions(-) diff --git a/packages/google-cloud-speech/src/helpers.js b/packages/google-cloud-speech/src/helpers.js index 5dc37e5dd1a..1b12c9b7f97 100644 --- a/packages/google-cloud-speech/src/helpers.js +++ b/packages/google-cloud-speech/src/helpers.js @@ -63,10 +63,9 @@ module.exports = () => { * // Write request objects. * stream.write(request); */ - methods.streamingRecognize = function(config, options) { - if (options === undefined) { - options = {}; - } + methods.streamingRecognize = function(streamingConfig, options) { + options = options || {}; + streamingConfig = streamingConfig || {}; // Format the audio content as input request for pipeline const recognizeStream = streamEvents(pumpify.obj()); @@ -87,7 +86,7 @@ module.exports = () => { // config) is delayed until we get the first burst of data. recognizeStream.once('writing', () => { // The first message should contain the streaming config. - const firstMessage = true; + requestStream.write({streamingConfig}); // Set up appropriate piping between the stream returned by // the underlying API method and the one that we return. @@ -95,18 +94,13 @@ module.exports = () => { // Format the user's input. // This entails that the user sends raw audio; it is wrapped in // the appropriate request structure. - through.obj((obj, _, next) => { - const payload = {}; - if (firstMessage && config !== undefined) { - // Write the initial configuration to the stream. - payload.streamingConfig = config; - } - - if (Object.keys(obj || {}).length) { - payload.audioContent = obj; + through.obj((audioContent, _, next) => { + if (audioContent !== undefined) { + next(null, {audioContent}); + return; } - next(null, payload); + next(); }), requestStream, through.obj((response, enc, next) => { diff --git a/packages/google-cloud-speech/synth.py b/packages/google-cloud-speech/synth.py index e26e3b1af09..d3c940feb0e 100644 --- a/packages/google-cloud-speech/synth.py +++ b/packages/google-cloud-speech/synth.py @@ -35,12 +35,29 @@ excludes=['package.json', 'README.md', 'src/index.js',] ) -templates = common_templates.node_library(package_name="@google-cloud/speech") -s.copy(templates) + # Manual helper methods overrides the streaming API so that it + # accepts streamingConfig when calling streamingRecognize. Fix + # the gapic tests to use the overridden method signature. + s.replace( f"test/gapic-{version}.js", + "(mockBidiStreamingGrpcMethod\()request", + r"\1{ streamingConfig: {} }") + + s.replace( + f"test/gapic-{version}.js", + "stream\.write\(request\)", + "stream.write()") + + s.replace( + f"test/gapic-{version}.js", + "// Mock request\n\s*const request = {};", + "") + +templates = common_templates.node_library() +# TODO: remove excludes once var's are converted to const/let +s.copy(templates, excludes=['.eslintrc.yml']) # # Node.js specific cleanup # subprocess.run(['npm', 'install']) subprocess.run(['npm', 'run', 'prettier']) -subprocess.run(['npm', 'run', 'lint']) diff --git a/packages/google-cloud-speech/test/gapic-v1.js b/packages/google-cloud-speech/test/gapic-v1.js index fdf5cf5b26e..ecca1fa6a99 100644 --- a/packages/google-cloud-speech/test/gapic-v1.js +++ b/packages/google-cloud-speech/test/gapic-v1.js @@ -224,15 +224,12 @@ describe('SpeechClient', () => { projectId: 'bogus', }); - // Mock request - const request = {}; - // Mock response const expectedResponse = {}; // Mock Grpc layer client._innerApiCalls.streamingRecognize = mockBidiStreamingGrpcMethod( - request, + {streamingConfig: {}}, expectedResponse ); @@ -246,7 +243,7 @@ describe('SpeechClient', () => { done(err); }); - stream.write(request); + stream.write(); }); it('invokes streamingRecognize with error', done => { @@ -255,12 +252,9 @@ describe('SpeechClient', () => { projectId: 'bogus', }); - // Mock request - const request = {}; - // Mock Grpc layer client._innerApiCalls.streamingRecognize = mockBidiStreamingGrpcMethod( - request, + {streamingConfig: {}}, null, error ); @@ -276,7 +270,7 @@ describe('SpeechClient', () => { done(); }); - stream.write(request); + stream.write(); }); }); }); diff --git a/packages/google-cloud-speech/test/gapic-v1p1beta1.js b/packages/google-cloud-speech/test/gapic-v1p1beta1.js index bbb1ee42d04..bb5e79e0bd7 100644 --- a/packages/google-cloud-speech/test/gapic-v1p1beta1.js +++ b/packages/google-cloud-speech/test/gapic-v1p1beta1.js @@ -224,15 +224,12 @@ describe('SpeechClient', () => { projectId: 'bogus', }); - // Mock request - const request = {}; - // Mock response const expectedResponse = {}; // Mock Grpc layer client._innerApiCalls.streamingRecognize = mockBidiStreamingGrpcMethod( - request, + {streamingConfig: {}}, expectedResponse ); @@ -246,7 +243,7 @@ describe('SpeechClient', () => { done(err); }); - stream.write(request); + stream.write(); }); it('invokes streamingRecognize with error', done => { @@ -255,12 +252,9 @@ describe('SpeechClient', () => { projectId: 'bogus', }); - // Mock request - const request = {}; - // Mock Grpc layer client._innerApiCalls.streamingRecognize = mockBidiStreamingGrpcMethod( - request, + {streamingConfig: {}}, null, error ); @@ -276,7 +270,7 @@ describe('SpeechClient', () => { done(); }); - stream.write(request); + stream.write(); }); }); }); diff --git a/packages/google-cloud-speech/test/helpers.test.js b/packages/google-cloud-speech/test/helpers.test.js index 5fff3631d54..bd8194c44f4 100644 --- a/packages/google-cloud-speech/test/helpers.test.js +++ b/packages/google-cloud-speech/test/helpers.test.js @@ -184,6 +184,7 @@ describe('Speech helper methods', () => { // Stub the underlying _streamingRecognize method to just return // a bogus stream. const requestStream = new stream.PassThrough({objectMode: true}); + sandbox .stub(client._innerApiCalls, 'streamingRecognize') .returns(requestStream); @@ -191,12 +192,19 @@ describe('Speech helper methods', () => { const userStream = client.streamingRecognize(CONFIG, OPTIONS); const audioContent = Buffer.from('audio content'); + let count = 0; requestStream._write = (data, enc, next) => { - assert.deepStrictEqual(data, { - audioContent: audioContent, - streamingConfig: CONFIG, - }); - setImmediate(done); + if (count === 0) + assert.deepStrictEqual(data, { + streamingConfig: CONFIG, + }); + else if (count === 1) { + assert.deepStrictEqual(data, { + audioContent: audioContent, + }); + setImmediate(done); + } + count++; next(null, data); };