diff --git a/speech/recognize.js b/speech/recognize.js
index 3f9da7eb5e..d22f1268bc 100644
--- a/speech/recognize.js
+++ b/speech/recognize.js
@@ -453,7 +453,237 @@ function streamingMicRecognize(encoding, sampleRateHertz, languageCode) {
   // [END speech_transcribe_streaming_mic]
 }
 
-require(`yargs`)
+function syncRecognizeModelSelection(
+  filename,
+  model,
+  encoding,
+  sampleRateHertz,
+  languageCode
+) {
+  // [START speech_transcribe_model_selection]
+  // Imports the Google Cloud client library for Beta API
+  /**
+   * TODO(developer): Update client library import to use new
+   * version of API when desired features become available
+   */
+  const speech = require('@google-cloud/speech').v1p1beta1;
+  const fs = require('fs');
+
+  // Creates a client
+  const client = new speech.SpeechClient();
+
+  /**
+   * TODO(developer): Uncomment the following lines before running the sample.
+   */
+  // const filename = 'Local path to audio file, e.g. /path/to/audio.raw';
+  // const model = 'Model to use, e.g. phone_call, video, default';
+  // const encoding = 'Encoding of the audio file, e.g. LINEAR16';
+  // const sampleRateHertz = 16000;
+  // const languageCode = 'BCP-47 language code, e.g. en-US';
+
+  const config = {
+    encoding: encoding,
+    sampleRateHertz: sampleRateHertz,
+    languageCode: languageCode,
+    model: model,
+  };
+  const audio = {
+    content: fs.readFileSync(filename).toString('base64'),
+  };
+
+  const request = {
+    config: config,
+    audio: audio,
+  };
+
+  // Detects speech in the audio file
+  client
+    .recognize(request)
+    .then(data => {
+      const response = data[0];
+      const transcription = response.results
+        .map(result => result.alternatives[0].transcript)
+        .join('\n');
+      console.log(`Transcription: `, transcription);
+    })
+    .catch(err => {
+      console.error('ERROR:', err);
+    });
+  // [END speech_transcribe_model_selection]
+}
+
+function syncRecognizeModelSelectionGCS(
+  gcsUri,
+  model,
+  encoding,
+  sampleRateHertz,
+  languageCode
+) {
+  // [START speech_transcribe_model_selection_gcs]
+  // Imports the Google Cloud client library for Beta API
+  /**
+   * TODO(developer): Update client library import to use new
+   * version of API when desired features become available
+   */
+  const speech = require('@google-cloud/speech').v1p1beta1;
+
+  // Creates a client
+  const client = new speech.SpeechClient();
+
+  /**
+   * TODO(developer): Uncomment the following lines before running the sample.
+   */
+  // const gcsUri = 'gs://my-bucket/audio.raw';
+  // const model = 'Model to use, e.g. phone_call, video, default';
+  // const encoding = 'Encoding of the audio file, e.g. LINEAR16';
+  // const sampleRateHertz = 16000;
+  // const languageCode = 'BCP-47 language code, e.g. en-US';
+
+  const config = {
+    encoding: encoding,
+    sampleRateHertz: sampleRateHertz,
+    languageCode: languageCode,
+    model: model,
+  };
+  const audio = {
+    uri: gcsUri,
+  };
+
+  const request = {
+    config: config,
+    audio: audio,
+  };
+
+  // Detects speech in the audio file
+  client
+    .recognize(request)
+    .then(data => {
+      const response = data[0];
+      const transcription = response.results
+        .map(result => result.alternatives[0].transcript)
+        .join('\n');
+      console.log(`Transcription: `, transcription);
+    })
+    .catch(err => {
+      console.error('ERROR:', err);
+    });
+  // [END speech_transcribe_model_selection_gcs]
+}
+
+function syncRecognizeWithAutoPunctuation(
+  filename,
+  encoding,
+  sampleRateHertz,
+  languageCode
+) {
+  // [START speech_transcribe_auto_punctuation]
+  // Imports the Google Cloud client library for Beta API
+  /**
+   * TODO(developer): Update client library import to use new
+   * version of API when desired features become available
+   */
+  const speech = require('@google-cloud/speech').v1p1beta1;
+  const fs = require('fs');
+
+  // Creates a client
+  const client = new speech.SpeechClient();
+
+  /**
+   * TODO(developer): Uncomment the following lines before running the sample.
+   */
+  // const filename = 'Local path to audio file, e.g. /path/to/audio.raw';
+  // const encoding = 'Encoding of the audio file, e.g. LINEAR16';
+  // const sampleRateHertz = 16000;
+  // const languageCode = 'BCP-47 language code, e.g. en-US';
+
+  const config = {
+    encoding: encoding,
+    languageCode: languageCode,
+    enableAutomaticPunctuation: true,
+  };
+  const audio = {
+    content: fs.readFileSync(filename).toString('base64'),
+  };
+
+  const request = {
+    config: config,
+    audio: audio,
+  };
+
+  // Detects speech in the audio file
+  client
+    .recognize(request)
+    .then(data => {
+      const response = data[0];
+      const transcription = response.results
+        .map(result => result.alternatives[0].transcript)
+        .join('\n');
+      console.log(`Transcription: `, transcription);
+    })
+    .catch(err => {
+      console.error('ERROR:', err);
+    });
+  // [END speech_transcribe_auto_punctuation]
+}
+
+function syncRecognizeWithEnhancedModel(
+  filename,
+  encoding,
+  sampleRateHertz,
+  languageCode
+) {
+  // [START speech_transcribe_enhanced_model]
+  // Imports the Google Cloud client library for Beta API
+  /**
+   * TODO(developer): Update client library import to use new
+   * version of API when desired features become available
+   */
+  const speech = require('@google-cloud/speech').v1p1beta1;
+  const fs = require('fs');
+
+  // Creates a client
+  const client = new speech.SpeechClient();
+
+  /**
+   * TODO(developer): Uncomment the following lines before running the sample.
+   */
+  // const filename = 'Local path to audio file, e.g. /path/to/audio.raw';
+  // const encoding = 'Encoding of the audio file, e.g. LINEAR16';
+  // const sampleRateHertz = 16000;
+  // const languageCode = 'BCP-47 language code, e.g. en-US';
+
+  const config = {
+    encoding: encoding,
+    languageCode: languageCode,
+    useEnhanced: true,
+    model: 'phone_call',
+  };
+  const audio = {
+    content: fs.readFileSync(filename).toString('base64'),
+  };
+
+  const request = {
+    config: config,
+    audio: audio,
+  };
+
+  // Detects speech in the audio file
+  client
+    .recognize(request)
+    .then(data => {
+      const response = data[0];
+      response.results.forEach(result => {
+        const alternative = result.alternatives[0];
+        console.log(alternative.transcript);
+      });
+    })
+    .catch(err => {
+      console.error('ERROR:', err);
+    });
+  // [END speech_transcribe_enhanced_model]
+}
+
+require(`yargs`)  // eslint-disable-line
   .demand(1)
   .command(
     `sync <filename>`,
@@ -550,6 +780,56 @@ require(`yargs`)
         opts.languageCode
       )
   )
+  .command(
+    `sync-model <filename> <model>`,
+    `Detects speech in a local audio file using provided model.`,
+    {},
+    opts =>
+      syncRecognizeModelSelection(
+        opts.filename,
+        opts.model,
+        opts.encoding,
+        opts.sampleRateHertz,
+        opts.languageCode
+      )
+  )
+  .command(
+    `sync-model-gcs <gcsUri> <model>`,
+    `Detects speech in an audio file located in a Google Cloud Storage bucket using provided model.`,
+    {},
+    opts =>
+      syncRecognizeModelSelectionGCS(
+        opts.gcsUri,
+        opts.model,
+        opts.encoding,
+        opts.sampleRateHertz,
+        opts.languageCode
+      )
+  )
+  .command(
+    `sync-auto-punctuation <filename>`,
+    `Detects speech in a local audio file with auto punctuation.`,
+    {},
+    opts =>
+      syncRecognizeWithAutoPunctuation(
+        opts.filename,
+        opts.encoding,
+        opts.sampleRateHertz,
+        opts.languageCode
+      )
+  )
+  .command(
+    `sync-enhanced-model <filename>`,
+    `Detects speech in a local audio file using an enhanced model.`,
+    {},
+    opts =>
+      syncRecognizeWithEnhancedModel(
+        opts.filename,
+        opts.encoding,
+        opts.sampleRateHertz,
+        opts.languageCode
+      )
+  )
   .options({
     encoding: {
       alias: 'e',
@@ -577,6 +857,14 @@ require(`yargs`)
   .example(`node $0 async-gcs gs://gcs-test-data/vr.flac -e FLAC -r 16000`)
   .example(`node $0 stream ./resources/audio.raw  -e LINEAR16 -r 16000`)
   .example(`node $0 listen`)
+  .example(
+    `node $0 sync-model ./resources/Google_Gnome.wav video -e LINEAR16 -r 16000`
+  )
+  .example(
+    `node $0 sync-model-gcs gs://gcs-test-data/Google_Gnome.wav phone_call -e LINEAR16 -r 16000`
+  )
+  .example(`node $0 sync-auto-punctuation ./resources/commercial_mono.wav`)
+  .example(`node $0 sync-enhanced-model ./resources/commercial_mono.wav`)
   .wrap(120)
   .recommendCommands()
   .epilogue(`For more information, see https://cloud.google.com/speech/docs`)
diff --git a/speech/recognize.v1p1beta1.js b/speech/recognize.v1p1beta1.js
index 6d568a3b7e..1b2e0e032d 100644
--- a/speech/recognize.v1p1beta1.js
+++ b/speech/recognize.v1p1beta1.js
@@ -23,179 +23,6 @@
 
 'use strict';
 
-function syncRecognizeModelSelection(
-  filename,
-  model,
-  encoding,
-  sampleRateHertz,
-  languageCode
-) {
-  // [START speech_transcribe_model_selection_beta]
-  // Imports the Google Cloud client library for Beta API
-  /**
-   * TODO(developer): Update client library import to use new
-   * version of API when desired features become available
-   */
-  const speech = require('@google-cloud/speech').v1p1beta1;
-  const fs = require('fs');
-
-  // Creates a client
-  const client = new speech.SpeechClient();
-
-  /**
-   * TODO(developer): Uncomment the following lines before running the sample.
-   */
-  // const filename = 'Local path to audio file, e.g. /path/to/audio.raw';
-  // const model = 'Model to use, e.g. phone_call, video, default';
-  // const encoding = 'Encoding of the audio file, e.g. LINEAR16';
-  // const sampleRateHertz = 16000;
-  // const languageCode = 'BCP-47 language code, e.g. en-US';
-
-  const config = {
-    encoding: encoding,
-    sampleRateHertz: sampleRateHertz,
-    languageCode: languageCode,
-    model: model,
-  };
-  const audio = {
-    content: fs.readFileSync(filename).toString('base64'),
-  };
-
-  const request = {
-    config: config,
-    audio: audio,
-  };
-
-  // Detects speech in the audio file
-  client
-    .recognize(request)
-    .then(data => {
-      const response = data[0];
-      const transcription = response.results
-        .map(result => result.alternatives[0].transcript)
-        .join('\n');
-      console.log(`Transcription: `, transcription);
-    })
-    .catch(err => {
-      console.error('ERROR:', err);
-    });
-  // [END speech_transcribe_model_selection_beta]
-}
-
-function syncRecognizeModelSelectionGCS(
-  gcsUri,
-  model,
-  encoding,
-  sampleRateHertz,
-  languageCode
-) {
-  // [START speech_transcribe_model_selection_gcs_beta]
-  // Imports the Google Cloud client library for Beta API
-  /**
-   * TODO(developer): Update client library import to use new
-   * version of API when desired features become available
-   */
-  const speech = require('@google-cloud/speech').v1p1beta1;
-
-  // Creates a client
-  const client = new speech.SpeechClient();
-
-  /**
-   * TODO(developer): Uncomment the following lines before running the sample.
-   */
-  // const gcsUri = 'gs://my-bucket/audio.raw';
-  // const model = 'Model to use, e.g. phone_call, video, default';
-  // const encoding = 'Encoding of the audio file, e.g. LINEAR16';
-  // const sampleRateHertz = 16000;
-  // const languageCode = 'BCP-47 language code, e.g. en-US';
-
-  const config = {
-    encoding: encoding,
-    sampleRateHertz: sampleRateHertz,
-    languageCode: languageCode,
-    model: model,
-  };
-  const audio = {
-    uri: gcsUri,
-  };
-
-  const request = {
-    config: config,
-    audio: audio,
-  };
-
-  // Detects speech in the audio file
-  client
-    .recognize(request)
-    .then(data => {
-      const response = data[0];
-      const transcription = response.results
-        .map(result => result.alternatives[0].transcript)
-        .join('\n');
-      console.log(`Transcription: `, transcription);
-    })
-    .catch(err => {
-      console.error('ERROR:', err);
-    });
-  // [END speech_transcribe_model_selection_gcs_beta]
-}
-
-function syncRecognizeWithAutoPunctuation(
-  filename,
-  encoding,
-  sampleRateHertz,
-  languageCode
-) {
-  // [START speech_transcribe_auto_punctuation_beta]
-  // Imports the Google Cloud client library for Beta API
-  /**
-   * TODO(developer): Update client library import to use new
-   * version of API when desired features become available
-   */
-  const speech = require('@google-cloud/speech').v1p1beta1;
-  const fs = require('fs');
-
-  // Creates a client
-  const client = new speech.SpeechClient();
-
-  /**
-   * TODO(developer): Uncomment the following lines before running the sample.
-   */
-  // const filename = 'Local path to audio file, e.g. /path/to/audio.raw';
-  // const encoding = 'Encoding of the audio file, e.g. LINEAR16';
-  // const sampleRateHertz = 16000;
-  // const languageCode = 'BCP-47 language code, e.g. en-US';
-
-  const config = {
-    encoding: encoding,
-    languageCode: languageCode,
-    enableAutomaticPunctuation: true,
-  };
-  const audio = {
-    content: fs.readFileSync(filename).toString('base64'),
-  };
-
-  const request = {
-    config: config,
-    audio: audio,
-  };
-
-  // Detects speech in the audio file
-  client
-    .recognize(request)
-    .then(data => {
-      const response = data[0];
-      const transcription = response.results
-        .map(result => result.alternatives[0].transcript)
-        .join('\n');
-      console.log(`Transcription: `, transcription);
-    })
-    .catch(err => {
-      console.error('ERROR:', err);
-    });
-  // [END speech_transcribe_auto_punctuation_beta]
-}
-
 function syncRecognizeWithMetaData(
   filename,
   encoding,
@@ -261,103 +88,8 @@ function syncRecognizeWithMetaData(
   // [END speech_transcribe_recognition_metadata_beta]
 }
 
-function syncRecognizeWithEnhancedModel(
-  filename,
-  encoding,
-  sampleRateHertz,
-  languageCode
-) {
-  // [START speech_transcribe_enhanced_model_beta]
-  // Imports the Google Cloud client library for Beta API
-  /**
-   * TODO(developer): Update client library import to use new
-   * version of API when desired features become available
-   */
-  const speech = require('@google-cloud/speech').v1p1beta1;
-  const fs = require('fs');
-
-  // Creates a client
-  const client = new speech.SpeechClient();
-
-  /**
-   * TODO(developer): Uncomment the following lines before running the sample.
-   */
-  // const filename = 'Local path to audio file, e.g. /path/to/audio.raw';
-  // const encoding = 'Encoding of the audio file, e.g. LINEAR16';
-  // const sampleRateHertz = 16000;
-  // const languageCode = 'BCP-47 language code, e.g. en-US';
-
-  const config = {
-    encoding: encoding,
-    languageCode: languageCode,
-    useEnhanced: true,
-    model: 'phone_call',
-  };
-  const audio = {
-    content: fs.readFileSync(filename).toString('base64'),
-  };
-
-  const request = {
-    config: config,
-    audio: audio,
-  };
-
-  // Detects speech in the audio file
-  client
-    .recognize(request)
-    .then(data => {
-      const response = data[0];
-      response.results.forEach(result => {
-        const alternative = result.alternatives[0];
-        console.log(alternative.transcript);
-      });
-    })
-    .catch(err => {
-      console.error('ERROR:', err);
-    });
-  // [END speech_transcribe_enhanced_model_beta]
-}
-
 require(`yargs`)
   .demand(1)
-  .command(
-    `sync-model <filename> <model>`,
-    `Detects speech in a local audio file using provided model.`,
-    {},
-    opts =>
-      syncRecognizeModelSelection(
-        opts.filename,
-        opts.model,
-        opts.encoding,
-        opts.sampleRateHertz,
-        opts.languageCode
-      )
-  )
-  .command(
-    `sync-model-gcs <gcsUri> <model>`,
-    `Detects speech in an audio file located in a Google Cloud Storage bucket using provided model.`,
-    {},
-    opts =>
-      syncRecognizeModelSelectionGCS(
-        opts.gcsUri,
-        opts.model,
-        opts.encoding,
-        opts.sampleRateHertz,
-        opts.languageCode
-      )
-  )
-  .command(
-    `sync-auto-punctuation <filename>`,
-    `Detects speech in a local audio file with auto punctuation.`,
-    {},
-    opts =>
-      syncRecognizeWithAutoPunctuation(
-        opts.filename,
-        opts.encoding,
-        opts.sampleRateHertz,
-        opts.languageCode
-      )
-  )
   .command(
     `sync-metadata <filename>`,
     `Detects speech in a local audio file with metadata.`,
@@ -370,18 +102,6 @@ require(`yargs`)
         opts.languageCode
       )
   )
-  .command(
-    `sync-enhanced-model <filename>`,
-    `Detects speech in a local audio file using an enhanced model.`,
-    {},
-    opts =>
-      syncRecognizeWithEnhancedModel(
-        opts.filename,
-        opts.encoding,
-        opts.sampleRateHertz,
-        opts.languageCode
-      )
-  )
   .options({
     encoding: {
       alias: 'e',
@@ -405,15 +125,7 @@ require(`yargs`)
       type: 'string',
     },
   })
-  .example(
-    `node $0 sync-model ./resources/Google_Gnome.wav video -e LINEAR16 -r 16000`
-  )
-  .example(
-    `node $0 sync-model-gcs gs://gcs-test-data/Google_Gnome.wav phone_call -e FLAC -r 16000`
-  )
-  .example(`node $0 sync-auto-punctuation ./resources/commercial_mono.wav`)
   .example(`node $0 sync-metadata ./resources/commercial_mono.wav`)
-  .example(`node $0 sync-enhanced-model ./resources/commercial_mono.wav`)
   .wrap(120)
   .recommendCommands()
   .epilogue(`For more information, see https://cloud.google.com/speech/docs`)
diff --git a/speech/system-test/recognize.test.js b/speech/system-test/recognize.test.js
index c02dde84df..700ad25b5e 100644
--- a/speech/system-test/recognize.test.js
+++ b/speech/system-test/recognize.test.js
@@ -13,8 +13,6 @@
  * limitations under the License.
  */
 
-/* eslint-disable */
-
 'use strict';
 
 const path = require(`path`);
@@ -29,12 +27,20 @@ const bucketName = `nodejs-docs-samples-test-${uuid.v4()}`;
 const cmd = `node recognize.js`;
 const cwd = path.join(__dirname, `..`);
 const filename = `audio.raw`;
+const filename1 = `Google_Gnome.wav`;
+const filename2 = `commercial_mono.wav`;
 const filepath = path.join(__dirname, `../resources/${filename}`);
+const filepath1 = path.join(__dirname, `../resources/${filename1}`);
+const filepath2 = path.join(__dirname, `../resources/${filename2}`);
 const text = `how old is the Brooklyn Bridge`;
+const text1 = `the weather outside is sunny`;
+const text2 = `Terrific. It's on the way.`;
+const text3 = `Chrome`;
 
 test.before(async () => {
   const [bucket] = await storage.createBucket(bucketName);
   await bucket.upload(filepath);
+  await bucket.upload(filepath1);
 });
 
 test.after.always(async () => {
@@ -90,3 +96,33 @@ test(`should run streaming recognize`, async t => {
   const output = await runAsync(`${cmd} stream ${filepath}`, cwd);
   t.true(output.includes(`Transcription: ${text}`));
 });
+
+test(`should run sync recognize with model selection`, async t => {
+  const model = `video`;
+  const output = await runAsync(`${cmd} sync-model ${filepath1} ${model}`, cwd);
+  t.true(output.includes(`Transcription:`));
+  t.true(output.includes(text1));
+});
+
+test(`should run sync recognize on a GCS file with model selection`, async t => {
+  const model = `video`;
+  const output = await runAsync(
+    `${cmd} sync-model-gcs gs://${bucketName}/${filename1} ${model}`,
+    cwd
+  );
+  t.true(output.includes(`Transcription:`));
+  t.true(output.includes(text1));
+});
+
+test(`should run sync recognize with auto punctuation`, async t => {
+  const output = await runAsync(
+    `${cmd} sync-auto-punctuation ${filepath2}`,
+    cwd
+  );
+  t.true(output.includes(text2));
+});
+
+test(`should run sync recognize with enhanced model`, async t => {
+  const output = await runAsync(`${cmd} sync-enhanced-model ${filepath2}`, cwd);
+  t.true(output.includes(text3));
+});
diff --git a/speech/system-test/recognize.v1p1beta1.test.js b/speech/system-test/recognize.v1p1beta1.test.js
index 7ea9039f3e..792850b5c7 100644
--- a/speech/system-test/recognize.v1p1beta1.test.js
+++ b/speech/system-test/recognize.v1p1beta1.test.js
@@ -13,17 +13,13 @@
  * limitations under the License.
  */
 
-/* eslint-disable */
-
 'use strict';
 
 const path = require(`path`);
 const {Storage} = require(`@google-cloud/storage`);
 const test = require(`ava`);
 const uuid = require(`uuid`);
-
 const {runAsync} = require(`@google-cloud/nodejs-repo-tools`);
-
 const storage = new Storage();
 const bucketName = `nodejs-docs-samples-test-${uuid.v4()}`;
 const cmd = `node recognize.v1p1beta1.js`;
@@ -32,9 +28,8 @@ const filename1 = `Google_Gnome.wav`;
 const filename2 = `commercial_mono.wav`;
 const filepath1 = path.join(__dirname, `../resources/${filename1}`);
 const filepath2 = path.join(__dirname, `../resources/${filename2}`);
-const text1 = `the weather outside is sunny`;
-const text2 = `Terrific. It's on the way.`;
-const text3 = `Chrome`;
+
+const text = `Chrome`;
 
 test.before(async () => {
   const [bucket] = await storage.createBucket(bucketName);
@@ -48,37 +43,7 @@ test.after.always(async () => {
   await bucket.delete();
 });
 
-test(`should run sync recognize with model selection`, async t => {
-  const model = `video`;
-  const output = await runAsync(`${cmd} sync-model ${filepath1} ${model}`, cwd);
-  t.true(output.includes(`Transcription:`));
-  t.true(output.includes(text1));
-});
-
-test(`should run sync recognize on a GCS file with model selection`, async t => {
-  const model = `video`;
-  const output = await runAsync(
-    `${cmd} sync-model-gcs gs://${bucketName}/${filename1} ${model}`,
-    cwd
-  );
-  t.true(output.includes(`Transcription:`));
-  t.true(output.includes(text1));
-});
-
-test(`should run sync recognize with auto punctuation`, async t => {
-  const output = await runAsync(
-    `${cmd} sync-auto-punctuation ${filepath2}`,
-    cwd
-  );
-  t.true(output.includes(text2));
-});
-
 test(`should run sync recognize with metadata`, async t => {
   const output = await runAsync(`${cmd} sync-metadata ${filepath2}`, cwd);
-  t.true(output.includes(text3));
-});
-
-test(`should run sync recognize with enhanced model`, async t => {
-  const output = await runAsync(`${cmd} sync-enhanced-model ${filepath2}`, cwd);
-  t.true(output.includes(text3));
+  t.true(output.includes(text));
 });