diff --git a/pom.xml b/pom.xml
index 0f74d0e8e5e..6f8c58ab175 100644
--- a/pom.xml
+++ b/pom.xml
@@ -92,6 +92,7 @@
translate/cloud-client
unittests
+ video/beta
video/cloud-client
vision/beta/cloud-client
vision/cloud-client
diff --git a/video/beta/README.md b/video/beta/README.md
new file mode 100644
index 00000000000..a77add90782
--- /dev/null
+++ b/video/beta/README.md
@@ -0,0 +1,52 @@
+# Video Feature Detection Sample
+
+
+
+
+[Google Cloud Video Intelligence API][video] provides feature detection for
+videos. This API is part of the larger collection of Cloud Machine Learning
+APIs.
+
+This sample Java application demonstrates how to access the Cloud Video API
+using the [Google Cloud Client Library for Java][google-cloud-java].
+
+[video]: https://cloud.google.com/video-intelligence/docs/
+[google-cloud-java]: https://github.com/GoogleCloudPlatform/google-cloud-java
+
+## Build the sample
+
+Install [Maven](http://maven.apache.org/).
+
+Build your project with:
+
+```
+mvn clean package -DskipTests
+```
+
+### Analyze a video
+Please follow the [Set Up Your Project](https://cloud.google.com/video-intelligence/docs/getting-started#set_up_your_project)
+steps in the Quickstart doc to create a project and enable the Google Cloud
+Video Intelligence API. Following those steps, make sure that you
+[Set Up a Service Account](https://cloud.google.com/video-intelligence/docs/common/auth#set_up_a_service_account),
+and export the following environment variable:
+
+```
+export GOOGLE_APPLICATION_CREDENTIALS=/path/to/your-project-credentials.json
+```
+
+After you have authorized, you can analyze videos.
+
+Detect Faces' Bounding Boxes
+```
+mvn exec:java -DDetect -Dexec.args="faces-bounding-boxes gs://YOUR_BUCKET/YOUR_VIDEO.mp4"
+```
+
+Detect Faces' Emotions
+```
+mvn exec:java -DDetect -Dexec.args="faces-emotions gs://YOUR_BUCKET/YOUR_VIDEO.mp4"
+```
+
+Video Transcription
+```
+mvn exec:java -DDetect -Dexec.args="speech-transcription gs://YOUR_BUCKET/YOUR_VIDEO.mp4"
+```
\ No newline at end of file
diff --git a/video/beta/pom.xml b/video/beta/pom.xml
new file mode 100644
index 00000000000..ae129963c7c
--- /dev/null
+++ b/video/beta/pom.xml
@@ -0,0 +1,108 @@
+
+
+ 4.0.0
+ com.example.video
+ video-google-cloud-samples-beta
+ jar
+
+
+
+ com.google.cloud.samples
+ shared-configuration
+ 1.0.8
+
+
+
+ 1.8
+ 1.8
+ UTF-8
+
+
+
+
+
+ com.google.cloud
+ google-cloud-video-intelligence
+ 0.41.0-beta
+
+
+
+
+
+ junit
+ junit
+ 4.12
+ test
+
+
+ com.google.truth
+ truth
+ 0.39
+ test
+
+
+
+
+
+ maven-assembly-plugin
+
+
+
+ com.example.video.Detect
+
+
+
+ jar-with-dependencies
+
+
+
+
+
+
+
+ Detect
+
+
+ Detect
+
+
+
+
+
+ org.codehaus.mojo
+ exec-maven-plugin
+ 1.6.0
+
+
+
+ java
+
+
+
+
+ com.example.video.Detect
+ false
+
+
+
+
+
+
+
diff --git a/video/beta/src/main/java/com/example/video/Detect.java b/video/beta/src/main/java/com/example/video/Detect.java
new file mode 100644
index 00000000000..25512b7466e
--- /dev/null
+++ b/video/beta/src/main/java/com/example/video/Detect.java
@@ -0,0 +1,308 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.example.video;
+
+import com.google.api.gax.longrunning.OperationFuture;
+import com.google.cloud.videointelligence.v1p1beta1.AnnotateVideoProgress;
+import com.google.cloud.videointelligence.v1p1beta1.AnnotateVideoRequest;
+import com.google.cloud.videointelligence.v1p1beta1.AnnotateVideoResponse;
+import com.google.cloud.videointelligence.v1p1beta1.EmotionAttribute;
+import com.google.cloud.videointelligence.v1p1beta1.FaceConfig;
+import com.google.cloud.videointelligence.v1p1beta1.FaceDetectionAnnotation;
+import com.google.cloud.videointelligence.v1p1beta1.FaceDetectionFrame;
+import com.google.cloud.videointelligence.v1p1beta1.FaceSegment;
+import com.google.cloud.videointelligence.v1p1beta1.Feature;
+import com.google.cloud.videointelligence.v1p1beta1.NormalizedBoundingBox;
+import com.google.cloud.videointelligence.v1p1beta1.SpeechRecognitionAlternative;
+import com.google.cloud.videointelligence.v1p1beta1.SpeechTranscription;
+import com.google.cloud.videointelligence.v1p1beta1.SpeechTranscriptionConfig;
+import com.google.cloud.videointelligence.v1p1beta1.VideoAnnotationResults;
+import com.google.cloud.videointelligence.v1p1beta1.VideoContext;
+import com.google.cloud.videointelligence.v1p1beta1.VideoIntelligenceServiceClient;
+import com.google.cloud.videointelligence.v1p1beta1.WordInfo;
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+public class Detect {
+ /**
+ * Detects face's bounding boxes, emotions, and video transcription using the Video Intelligence
+ * API
+ * @param args specifies features to detect and the path to the video on Google Cloud Storage.
+ */
+ public static void main(String[] args) {
+ try {
+ argsHelper(args);
+ } catch (Exception e) {
+ System.out.println("Exception while running:\n" + e.getMessage() + "\n");
+ e.printStackTrace(System.out);
+ }
+ }
+
+ /**
+ * Helper that handles the input passed to the program.
+ * @param args specifies features to detect and the path to the video on Google Cloud Storage.
+ *
+ * @throws IOException on Input/Output errors.
+ */
+ public static void argsHelper(String[] args) throws Exception {
+ if (args.length < 1) {
+ System.out.println("Usage:");
+ System.out.printf(
+ "\tjava %s \"\" \"\"\n"
+ + "Commands:\n"
+ + "\tfaces-bounding-boxes | faces-emotions | speech-transcription\n"
+ + "Path:\n\tA URI for a Cloud Storage resource (gs://...)\n"
+ + "Examples: ",
+ Detect.class.getCanonicalName());
+ return;
+ }
+ String command = args[0];
+ String path = args.length > 1 ? args[1] : "";
+
+ if (command.equals("faces-bounding-boxes")) {
+ analyzeFacesBoundingBoxes(path);
+ }
+ if (command.equals("faces-emotions")) {
+ analyzeFaceEmotions(path);
+ }
+ if (command.equals("speech-transcription")) {
+ speechTranscription(path);
+ }
+ }
+
+
+ // [START video_face_bounding_boxes]
+ /**
+ * Detects faces' bounding boxes on the video at the provided Cloud Storage path.
+ *
+ * @param gcsUri the path to the video file to analyze.
+ */
+ public static void analyzeFacesBoundingBoxes(String gcsUri) throws Exception {
+ // Instantiate a com.google.cloud.videointelligence.v1p1beta1.VideoIntelligenceServiceClient
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Set the configuration to include bounding boxes
+ FaceConfig config = FaceConfig.newBuilder()
+ .setIncludeBoundingBoxes(true)
+ .build();
+
+ // Set the video context with the above configuration
+ VideoContext context = VideoContext.newBuilder()
+ .setFaceDetectionConfig(config)
+ .build();
+
+ // Create the request
+ AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder()
+ .setInputUri(gcsUri)
+ .addFeatures(Feature.FACE_DETECTION)
+ .setVideoContext(context)
+ .build();
+
+ // asynchronously perform facial analysis on videos
+ OperationFuture response =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ boolean faceFound = false;
+ // Display the results
+ for (VideoAnnotationResults results : response.get(900, TimeUnit.SECONDS)
+ .getAnnotationResultsList()) {
+ int faceCount = 0;
+ // Display the results for each face
+ for (FaceDetectionAnnotation faceAnnotation : results.getFaceDetectionAnnotationsList()) {
+ faceFound = true;
+ System.out.println("\nFace: " + ++faceCount);
+ // Each FaceDetectionAnnotation has only one segment.
+ for (FaceSegment segment : faceAnnotation.getSegmentsList()) {
+ double startTime = segment.getSegment().getStartTimeOffset().getSeconds()
+ + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
+ double endTime = segment.getSegment().getEndTimeOffset().getSeconds()
+ + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
+ System.out.printf("Segment location: %.3fs to %.3f\n", startTime, endTime);
+ }
+ // There are typically many frames for each face,
+ try {
+ // Here we process only the first frame.
+ if (faceAnnotation.getFramesCount() > 0) {
+ FaceDetectionFrame frame = faceAnnotation.getFrames(0); // get the first frame
+ double timeOffset = frame.getTimeOffset().getSeconds()
+ + frame.getTimeOffset().getNanos() / 1e9;
+ System.out.printf("First frame time offset: %.3fs\n", timeOffset);
+ // print info on the first normalized bounding box
+ NormalizedBoundingBox box = frame.getAttributes(0).getNormalizedBoundingBox();
+ System.out.printf("\tLeft: %.3f\n", box.getLeft());
+ System.out.printf("\tTop: %.3f\n", box.getTop());
+ System.out.printf("\tBottom: %.3f\n", box.getBottom());
+ System.out.printf("\tRight: %.3f\n", box.getRight());
+ } else {
+ System.out.println("No frames found in annotation");
+ }
+ } catch (IndexOutOfBoundsException ioe) {
+ System.out.println("Could not retrieve frame: " + ioe.getMessage());
+ }
+ }
+ }
+
+ if (!faceFound) {
+ System.out.println("No faces detected in " + gcsUri);
+ }
+ }
+ }
+ // [END video_face_bounding_boxes]
+
+ // [START video_face_emotions]
+ /**
+ * Analyze faces' emotions over frames on the video at the provided Cloud Storage path.
+ *
+ * @param gcsUri the path to the video file to analyze.
+ */
+ public static void analyzeFaceEmotions(String gcsUri) throws Exception {
+ // Instantiate a com.google.cloud.videointelligence.v1p1beta1.VideoIntelligenceServiceClient
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Set the configuration to include bounding boxes
+ FaceConfig config = FaceConfig.newBuilder()
+ .setIncludeEmotions(true)
+ .build();
+
+ // Set the video context with the above configuration
+ VideoContext context = VideoContext.newBuilder()
+ .setFaceDetectionConfig(config)
+ .build();
+
+ // Create the request
+ AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder()
+ .setInputUri(gcsUri)
+ .addFeatures(Feature.FACE_DETECTION)
+ .setVideoContext(context)
+ .build();
+
+ // asynchronously perform facial analysis on videos
+ OperationFuture response =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ boolean faceFound = false;
+ // Display the results
+ for (VideoAnnotationResults results : response.get(600, TimeUnit.SECONDS)
+ .getAnnotationResultsList()) {
+ int faceCount = 0;
+ // Display the results for each face
+ for (FaceDetectionAnnotation faceAnnotation : results.getFaceDetectionAnnotationsList()) {
+ faceFound = true;
+ System.out.println("\nFace: " + ++faceCount);
+ // Each FaceDetectionAnnotation has only one segment.
+ for (FaceSegment segment : faceAnnotation.getSegmentsList()) {
+ double startTime = segment.getSegment().getStartTimeOffset().getSeconds()
+ + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
+ double endTime = segment.getSegment().getEndTimeOffset().getSeconds()
+ + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
+ System.out.printf("Segment location: %.3fs to %.3f\n", startTime, endTime);
+ }
+
+ try {
+ // Print each frame's highest emotion
+ for (FaceDetectionFrame frame : faceAnnotation.getFramesList()) {
+ double timeOffset = frame.getTimeOffset().getSeconds()
+ + frame.getTimeOffset().getNanos() / 1e9;
+ float highestScore = 0.0f;
+ String emotion = "";
+ // Get the highest scoring emotion for the current frame
+ for (EmotionAttribute emotionAttribute : frame.getAttributes(0).getEmotionsList()) {
+ if (emotionAttribute.getScore() > highestScore) {
+ highestScore = emotionAttribute.getScore();
+ emotion = emotionAttribute.getEmotion().name();
+ }
+ }
+ System.out.printf("\t%4.2fs: %14s %4.3f\n", timeOffset, emotion, highestScore);
+ }
+
+ } catch (IndexOutOfBoundsException ioe) {
+ System.out.println("Could not retrieve frame: " + ioe.getMessage());
+ }
+ }
+ }
+
+ if (!faceFound) {
+ System.out.println("No faces detected in " + gcsUri);
+ }
+ }
+ }
+ // [END video_face_emotions]
+
+ // [START video_speech_transcription]
+ /**
+ * Transcribe speech from a video stored on GCS.
+ *
+ * @param gcsUri the path to the video file to analyze.
+ */
+ public static void speechTranscription(String gcsUri) throws Exception {
+ // Instantiate a com.google.cloud.videointelligence.v1p1beta1.VideoIntelligenceServiceClient
+ try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
+ // Set the language code
+ SpeechTranscriptionConfig config = SpeechTranscriptionConfig.newBuilder()
+ .setLanguageCode("en-US")
+ .build();
+
+ // Set the video context with the above configuration
+ VideoContext context = VideoContext.newBuilder()
+ .setSpeechTranscriptionConfig(config)
+ .build();
+
+ // Create the request
+ AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder()
+ .setInputUri(gcsUri)
+ .addFeatures(Feature.SPEECH_TRANSCRIPTION)
+ .setVideoContext(context)
+ .build();
+
+ // asynchronously perform facial analysis on videos
+ OperationFuture response =
+ client.annotateVideoAsync(request);
+
+ System.out.println("Waiting for operation to complete...");
+ // Display the results
+ for (VideoAnnotationResults results : response.get(180, TimeUnit.SECONDS)
+ .getAnnotationResultsList()) {
+ for (SpeechTranscription speechTranscription : results.getSpeechTranscriptionsList()) {
+ try {
+ // Print the transcription
+ if (speechTranscription.getAlternativesCount() > 0) {
+ SpeechRecognitionAlternative alternative = speechTranscription.getAlternatives(0);
+
+ System.out.printf("Transcript: %s\n", alternative.getTranscript());
+ System.out.printf("Confidence: %.2f\n", alternative.getConfidence());
+
+ System.out.println("Word level information:");
+ for (WordInfo wordInfo : alternative.getWordsList()) {
+ double startTime = wordInfo.getStartTime().getSeconds()
+ + wordInfo.getStartTime().getNanos() / 1e9;
+ double endTime = wordInfo.getEndTime().getSeconds()
+ + wordInfo.getEndTime().getNanos() / 1e9;
+ System.out.printf("\t%4.2fs - %4.2fs: %s\n",
+ startTime, endTime, wordInfo.getWord());
+ }
+ } else {
+ System.out.println("No transcription found");
+ }
+ } catch (IndexOutOfBoundsException ioe) {
+ System.out.println("Could not retrieve frame: " + ioe.getMessage());
+ }
+ }
+ }
+ }
+ }
+ // [END video_speech_transcription]
+}
diff --git a/video/beta/src/test/java/com/example/video/DetectIT.java b/video/beta/src/test/java/com/example/video/DetectIT.java
new file mode 100644
index 00000000000..d6d78e5adc5
--- /dev/null
+++ b/video/beta/src/test/java/com/example/video/DetectIT.java
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.example.video;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/** Tests for video analysis sample. */
+@RunWith(JUnit4.class)
+@SuppressWarnings("checkstyle:abbreviationaswordinname")
+public class DetectIT {
+
+ private ByteArrayOutputStream bout;
+ private PrintStream out;
+
+ static final String FACES_FILE_LOCATION =
+ "gs://java-docs-samples-testing/video/googlework_short.mp4";
+
+ @Before
+ public void setUp() {
+ bout = new ByteArrayOutputStream();
+ out = new PrintStream(bout);
+ System.setOut(out);
+ }
+
+ @After
+ public void tearDown() {
+ System.setOut(null);
+ }
+
+ @Test
+ public void testFacesBoundingBoxes() throws Exception {
+ String[] args = {"faces-bounding-boxes", FACES_FILE_LOCATION};
+ Detect.argsHelper(args);
+ String got = bout.toString();
+
+ assertThat(got).contains("Top:");
+ }
+
+ @Test
+ public void testFacesEmotions() throws Exception {
+ String[] args = {"faces-emotions", FACES_FILE_LOCATION};
+ Detect.argsHelper(args);
+ String got = bout.toString();
+
+ assertThat(got).contains("CONCENTRATION");
+ }
+
+ @Test
+ public void testSpeechTranscription() throws Exception {
+ String[] args = {"speech-transcription", FACES_FILE_LOCATION};
+ Detect.argsHelper(args);
+ String got = bout.toString();
+
+ assertThat(got).contains("cultural");
+ }
+}
diff --git a/video/cloud-client/README.md b/video/cloud-client/README.md
index 3d95d8a1436..21d95360769 100644
--- a/video/cloud-client/README.md
+++ b/video/cloud-client/README.md
@@ -20,7 +20,7 @@ Install [Maven](http://maven.apache.org/).
Build your project with:
```
-mvn clean compile assembly:single
+mvn clean package -DskipTests
```
### Analyze a video
@@ -36,38 +36,28 @@ export GOOGLE_APPLICATION_CREDENTIALS=/path/to/your-project-credentials.json
After you have authorized, you can analyze videos.
-Detect Faces
-```
-java -cp target/video-google-cloud-samples-1.0.0-jar-with-dependencies.jar \
- com.example.video.Detect faces gs://demomaker/google_gmail.mp4
-```
-
Detect Labels
```
-java -cp target/video-google-cloud-samples-1.0.0-jar-with-dependencies.jar \
- com.example.video.Detect labels gs://demomaker/cat.mp4
+mvn exec:java -DDetect -Dexec.args="labels gs://demomaker/cat.mp4"
-java -cp target/video-google-cloud-samples-1.0.0-jar-with-dependencies.jar \
- com.example.video.Detect labels-file ./resources/cat.mp4
+mvn exec:java -DDetect -Dexec.args="labels-file ./resources/cat.mp4"
```
Detect Explicit content annotations
```
-java -cp target/video-google-cloud-samples-1.0.0-jar-with-dependencies.jar \
- com.example.video.Detect explicit-content gs://demomaker/gbikes_dinosaur.mp4
+mvn exec:java -DDetect -Dexec.args="explicit-content gs://demomaker/gbikes_dinosaur.mp4"
```
Detect Shots
```
-java -cp target/video-google-cloud-samples-1.0.0-jar-with-dependencies.jar \
- com.example.video.Detect shots gs://demomaker/gbikes_dinosaur.mp4
+mvn exec:java -DDetect -Dexec.args="shots gs://demomaker/gbikes_dinosaur.mp4"
```
From Windows, you may need to supply your classpath differently, for example:
```
-java -cp target\\video-google-cloud-samples-1.0.0-jar-with-dependencies.jar com.example.video.Detect labels gs://demomaker/cat.mp4
+mvn exec:java -DDetect -Dexec.args="labels gs://demomaker/cat.mp4"
```
or
```
-java -cp target\\video-google-cloud-samples-1.0.0-jar-with-dependencies.jar com.example.video.Detect labels-file resources\\cat.mp4
+mvn exec:java -DDetect -Dexec.args="labels-file resources\\cat.mp4"
```
diff --git a/video/cloud-client/pom.xml b/video/cloud-client/pom.xml
index 45ce571cc09..58b8fa68885 100644
--- a/video/cloud-client/pom.xml
+++ b/video/cloud-client/pom.xml
@@ -37,15 +37,10 @@
-
- com.google.guava
- guava
- 20.0
-
com.google.cloud
google-cloud-video-intelligence
- 0.40.0-beta
+ 0.41.0-beta
@@ -80,4 +75,34 @@
+
+
+ Detect
+
+
+ Detect
+
+
+
+
+
+ org.codehaus.mojo
+ exec-maven-plugin
+ 1.6.0
+
+
+
+ java
+
+
+
+
+ com.example.video.Detect
+ false
+
+
+
+
+
+
diff --git a/video/cloud-client/src/main/java/com/example/video/Detect.java b/video/cloud-client/src/main/java/com/example/video/Detect.java
index 0e5ad20d6c7..3cc879f37fa 100644
--- a/video/cloud-client/src/main/java/com/example/video/Detect.java
+++ b/video/cloud-client/src/main/java/com/example/video/Detect.java
@@ -22,13 +22,9 @@
import com.google.cloud.videointelligence.v1.AnnotateVideoResponse;
import com.google.cloud.videointelligence.v1.Entity;
import com.google.cloud.videointelligence.v1.ExplicitContentFrame;
-import com.google.cloud.videointelligence.v1.FaceAnnotation;
-import com.google.cloud.videointelligence.v1.FaceFrame;
-import com.google.cloud.videointelligence.v1.FaceSegment;
import com.google.cloud.videointelligence.v1.Feature;
import com.google.cloud.videointelligence.v1.LabelAnnotation;
import com.google.cloud.videointelligence.v1.LabelSegment;
-import com.google.cloud.videointelligence.v1.NormalizedBoundingBox;
import com.google.cloud.videointelligence.v1.VideoAnnotationResults;
import com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient;
import com.google.cloud.videointelligence.v1.VideoSegment;
@@ -42,12 +38,10 @@
public class Detect {
/**
- * Detects entities,sentiment and syntax in a document using the Natural Language API.
+ * Detects labels, shots, and explicit content in a video using the Video Intelligence API
* @param args specifies features to detect and the path to the video on Google Cloud Storage.
- *
- * @throws IOException on Input/Output errors.
*/
- public static void main(String[] args) throws Exception {
+ public static void main(String[] args) {
try {
argsHelper(args);
} catch (Exception e) {
@@ -68,7 +62,7 @@ public static void argsHelper(String[] args) throws Exception {
System.out.printf(
"\tjava %s \"\" \"\"\n"
+ "Commands:\n"
- + "\tfaces | labels | shots\n"
+ + "\tlabels | shots\n"
+ "Path:\n\tA URI for a Cloud Storage resource (gs://...)\n"
+ "Examples: ",
Detect.class.getCanonicalName());
@@ -77,9 +71,6 @@ public static void argsHelper(String[] args) throws Exception {
String command = args[0];
String path = args.length > 1 ? args[1] : "";
- if (command.equals("faces")) {
- analyzeFaces(path);
- }
if (command.equals("labels")) {
analyzeLabels(path);
}
@@ -94,68 +85,6 @@ public static void argsHelper(String[] args) throws Exception {
}
}
- /**
- * Performs facial analysis on the video at the provided Cloud Storage path.
- *
- * @param gcsUri the path to the video file to analyze.
- */
- public static void analyzeFaces(String gcsUri) throws Exception {
- // [START detect_faces]
- // Instantiate a com.google.cloud.videointelligence.v1.VideoIntelligenceServiceClient
- try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
- AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder()
- .setInputUri(gcsUri)
- .addFeatures(Feature.FACE_DETECTION)
- .build();
-
- // asynchronously perform facial analysis on videos
- OperationFuture response =
- client.annotateVideoAsync(request);
-
- System.out.println("Waiting for operation to complete...");
- boolean faceFound = false;
- for (VideoAnnotationResults results : response.get().getAnnotationResultsList()) {
- int faceCount = 0;
- for (FaceAnnotation faceAnnotation : results.getFaceAnnotationsList()) {
- faceFound = true;
- System.out.println("Face: " + ++faceCount);
- System.out.println("Thumbnail size: " + faceAnnotation.getThumbnail().size());
- for (FaceSegment segment : faceAnnotation.getSegmentsList()) {
- double startTime = segment.getSegment().getStartTimeOffset().getSeconds()
- + segment.getSegment().getStartTimeOffset().getNanos() / 1e9;
- double endTime = segment.getSegment().getEndTimeOffset().getSeconds()
- + segment.getSegment().getEndTimeOffset().getNanos() / 1e9;
- System.out.printf("Segment location : %.3f:%.3f\n", startTime, endTime);
- }
- try {
- // printing info on the first frame
- if (faceAnnotation.getFramesCount() > 0) {
- System.out.println(faceAnnotation.getFramesList().get(0));
- FaceFrame frame = faceAnnotation.getFrames(0);
- double timeOffset = frame.getTimeOffset().getSeconds()
- + frame.getTimeOffset().getNanos() / 1e9;
- System.out.printf("First frame time offset: %.3fs", timeOffset);
- // print info on the first normalized bounding box
- NormalizedBoundingBox box = frame.getNormalizedBoundingBoxesList().get(0);
- System.out.printf("Left: %.3f\n", box.getLeft());
- System.out.printf("Top: %.3f\n", box.getTop());
- System.out.printf("Bottom: %.3f\n", box.getBottom());
- System.out.printf("Right: %.3f\n", box.getRight());
- } else {
- System.out.println("No frames found in annotation");
- }
- } catch (IndexOutOfBoundsException ioe) {
- System.out.println("Could not retrieve frame: " + ioe.getMessage());
- }
- }
- }
- if (!faceFound) {
- System.out.println("No faces detected in " + gcsUri);
- }
- // [END detect_faces]
- }
- }
-
/**
* Performs label analysis on the video at the provided Cloud Storage path.
*
diff --git a/video/cloud-client/src/test/java/com/example/video/DetectIT.java b/video/cloud-client/src/test/java/com/example/video/DetectIT.java
index a0d040575fc..a86e119fbf3 100644
--- a/video/cloud-client/src/test/java/com/example/video/DetectIT.java
+++ b/video/cloud-client/src/test/java/com/example/video/DetectIT.java
@@ -33,7 +33,6 @@ public class DetectIT {
private ByteArrayOutputStream bout;
private PrintStream out;
- static final String FACES_FILE_LOCATION = "gs://demomaker/gbike.mp4";
static final String LABEL_FILE_LOCATION = "gs://demomaker/cat.mp4";
static final String SHOTS_FILE_LOCATION = "gs://demomaker/gbikes_dinosaur.mp4";
static final String EXPLICIT_CONTENT_LOCATION = "gs://demomaker/cat.mp4";
@@ -50,22 +49,6 @@ public void tearDown() {
System.setOut(null);
}
- @Test
- public void testFaces() throws Exception {
- String[] args = {"faces", FACES_FILE_LOCATION};
- Detect.argsHelper(args);
- String got = bout.toString();
- // Model changes have caused the results from face detection to change to an
- // empty response (e.g. no faces detected) so we check either for an empty
- // response or that a response with face thumbnails was returned.
- if (got.indexOf("No faces detected") == -1) {
- assertThat(got).contains("Thumbnail size:");
- } else {
- // No faces detected, verify sample reports this.
- assertThat(got).contains("No faces detected in " + FACES_FILE_LOCATION);
- }
- }
-
@Test
public void testLabels() throws Exception {
String[] args = {"labels", LABEL_FILE_LOCATION};