From e1a8e02dc6e61a9aa6dceb189bc79a27feb546df Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 26 Jul 2019 13:06:25 -0400 Subject: [PATCH 001/209] Preserve fixed enum references during synth. (#8797) See PR #8779. Supersedes PR #8794. --- synth.metadata | 10 +++++----- synth.py | 6 ++++++ 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/synth.metadata b/synth.metadata index 665f7bc5..9cc2587d 100644 --- a/synth.metadata +++ b/synth.metadata @@ -1,19 +1,19 @@ { - "updateTime": "2019-07-03T12:30:47.460808Z", + "updateTime": "2019-07-26T16:41:45.092048Z", "sources": [ { "generator": { "name": "artman", - "version": "0.29.3", - "dockerImage": "googleapis/artman@sha256:8900f94a81adaab0238965aa8a7b3648791f4f3a95ee65adc6a56cfcc3753101" + "version": "0.31.0", + "dockerImage": "googleapis/artman@sha256:9aed6bbde54e26d2fcde7aa86d9f64c0278f741e58808c46573e488cbf6098f0" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "69916b6ffbb7717fa009033351777d0c9909fb79", - "internalRef": "256241904" + "sha": "0906a9e74f4db789aee7fc5016ab828d3dd24f03", + "internalRef": "260061471" } }, { diff --git a/synth.py b/synth.py index d8acb699..6166eca1 100644 --- a/synth.py +++ b/synth.py @@ -45,4 +45,10 @@ templated_files = common.py_library(unit_cov_level=97, cov_level=100) s.move(templated_files) +s.replace( + f"google/cloud/**/gapic/language_service_client.py", + r"types\.EncodingType", + "enums.EncodingType", +) + s.shell.run(["nox", "-s", "blacken"], hide_output=False) From 2e285a9f26e9d590fccaead04e5f1f1d805bc472 Mon Sep 17 00:00:00 2001 From: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Date: Mon, 29 Jul 2019 12:53:23 -0700 Subject: [PATCH 002/209] Update intersphinx mapping for requests. (#8805) --- docs/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index 44a40633..bafcc585 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -342,7 +342,7 @@ None, ), "grpc": ("https://grpc.io/grpc/python/", None), - "requests": ("http://docs.python-requests.org/en/master/", None), + "requests": ("https://2.python-requests.org/en/master/", None), "fastavro": ("https://fastavro.readthedocs.io/en/stable/", None), "pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None), } From f94940e606cecbcbc804a436d0a66258a3055d38 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Tue, 6 Aug 2019 09:32:06 -0700 Subject: [PATCH 003/209] Remove send/recv msg size limit (via synth). (#8960) --- .../gapic/language_service_client.py | 24 +++++++++---------- .../language_service_grpc_transport.py | 9 ++++++- .../gapic/language_service_client.py | 24 +++++++++---------- .../language_service_grpc_transport.py | 9 ++++++- synth.metadata | 10 ++++---- 5 files changed, 45 insertions(+), 31 deletions(-) diff --git a/google/cloud/language_v1/gapic/language_service_client.py b/google/cloud/language_v1/gapic/language_service_client.py index 6502473b..407ef044 100644 --- a/google/cloud/language_v1/gapic/language_service_client.py +++ b/google/cloud/language_v1/gapic/language_service_client.py @@ -211,8 +211,8 @@ def analyze_sentiment( message :class:`~google.cloud.language_v1.types.Document` encoding_type (~google.cloud.language_v1.enums.EncodingType): The encoding type used by the API to calculate sentence offsets. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -277,8 +277,8 @@ def analyze_entities( message :class:`~google.cloud.language_v1.types.Document` encoding_type (~google.cloud.language_v1.enums.EncodingType): The encoding type used by the API to calculate offsets. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -342,8 +342,8 @@ def analyze_entity_sentiment( message :class:`~google.cloud.language_v1.types.Document` encoding_type (~google.cloud.language_v1.enums.EncodingType): The encoding type used by the API to calculate offsets. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -408,8 +408,8 @@ def analyze_syntax( message :class:`~google.cloud.language_v1.types.Document` encoding_type (~google.cloud.language_v1.enums.EncodingType): The encoding type used by the API to calculate offsets. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -470,8 +470,8 @@ def classify_text( If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.language_v1.types.Document` retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -541,8 +541,8 @@ def annotate_text( message :class:`~google.cloud.language_v1.types.Features` encoding_type (~google.cloud.language_v1.enums.EncodingType): The encoding type used by the API to calculate offsets. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. diff --git a/google/cloud/language_v1/gapic/transports/language_service_grpc_transport.py b/google/cloud/language_v1/gapic/transports/language_service_grpc_transport.py index 744bb125..b8a13472 100644 --- a/google/cloud/language_v1/gapic/transports/language_service_grpc_transport.py +++ b/google/cloud/language_v1/gapic/transports/language_service_grpc_transport.py @@ -61,7 +61,14 @@ def __init__( # Create the channel. if channel is None: - channel = self.create_channel(address=address, credentials=credentials) + channel = self.create_channel( + address=address, + credentials=credentials, + options={ + "grpc.max_send_message_length": -1, + "grpc.max_receive_message_length": -1, + }.items(), + ) self._channel = channel diff --git a/google/cloud/language_v1beta2/gapic/language_service_client.py b/google/cloud/language_v1beta2/gapic/language_service_client.py index dec66016..dcb8e89d 100644 --- a/google/cloud/language_v1beta2/gapic/language_service_client.py +++ b/google/cloud/language_v1beta2/gapic/language_service_client.py @@ -214,8 +214,8 @@ def analyze_sentiment( encoding_type (~google.cloud.language_v1beta2.enums.EncodingType): The encoding type used by the API to calculate sentence offsets for the sentence sentiment. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -280,8 +280,8 @@ def analyze_entities( message :class:`~google.cloud.language_v1beta2.types.Document` encoding_type (~google.cloud.language_v1beta2.enums.EncodingType): The encoding type used by the API to calculate offsets. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -345,8 +345,8 @@ def analyze_entity_sentiment( message :class:`~google.cloud.language_v1beta2.types.Document` encoding_type (~google.cloud.language_v1beta2.enums.EncodingType): The encoding type used by the API to calculate offsets. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -411,8 +411,8 @@ def analyze_syntax( message :class:`~google.cloud.language_v1beta2.types.Document` encoding_type (~google.cloud.language_v1beta2.enums.EncodingType): The encoding type used by the API to calculate offsets. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -473,8 +473,8 @@ def classify_text( If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.language_v1beta2.types.Document` retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -544,8 +544,8 @@ def annotate_text( message :class:`~google.cloud.language_v1beta2.types.Features` encoding_type (~google.cloud.language_v1beta2.enums.EncodingType): The encoding type used by the API to calculate offsets. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. diff --git a/google/cloud/language_v1beta2/gapic/transports/language_service_grpc_transport.py b/google/cloud/language_v1beta2/gapic/transports/language_service_grpc_transport.py index 89d44333..df140c2f 100644 --- a/google/cloud/language_v1beta2/gapic/transports/language_service_grpc_transport.py +++ b/google/cloud/language_v1beta2/gapic/transports/language_service_grpc_transport.py @@ -58,7 +58,14 @@ def __init__( # Create the channel. if channel is None: - channel = self.create_channel(address=address, credentials=credentials) + channel = self.create_channel( + address=address, + credentials=credentials, + options={ + "grpc.max_send_message_length": -1, + "grpc.max_receive_message_length": -1, + }.items(), + ) self._channel = channel diff --git a/synth.metadata b/synth.metadata index 9cc2587d..1456f20a 100644 --- a/synth.metadata +++ b/synth.metadata @@ -1,19 +1,19 @@ { - "updateTime": "2019-07-26T16:41:45.092048Z", + "updateTime": "2019-08-06T12:30:47.420536Z", "sources": [ { "generator": { "name": "artman", - "version": "0.31.0", - "dockerImage": "googleapis/artman@sha256:9aed6bbde54e26d2fcde7aa86d9f64c0278f741e58808c46573e488cbf6098f0" + "version": "0.32.1", + "dockerImage": "googleapis/artman@sha256:a684d40ba9a4e15946f5f2ca6b4bd9fe301192f522e9de4fff622118775f309b" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "0906a9e74f4db789aee7fc5016ab828d3dd24f03", - "internalRef": "260061471" + "sha": "e699b0cba64ffddfae39633417180f1f65875896", + "internalRef": "261759677" } }, { From fe2426ae8cb2389d0a634061deb93f5c57a14ffa Mon Sep 17 00:00:00 2001 From: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Date: Fri, 16 Aug 2019 13:25:32 -0700 Subject: [PATCH 004/209] Remove compatability badges from READMEs. (#9035) --- README.rst | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/README.rst b/README.rst index 62f10e21..dc904d45 100644 --- a/README.rst +++ b/README.rst @@ -1,7 +1,7 @@ Python Client for Google Cloud Natural Language =============================================== -|GA| |pypi| |versions| |compat_check_pypi| |compat_check_github| +|GA| |pypi| |versions| The `Google Cloud Natural Language`_ API can be used to reveal the structure and meaning of text via powerful machine @@ -22,10 +22,6 @@ with your document storage on Google Cloud Storage. :target: https://pypi.org/project/google-cloud-language/ .. |versions| image:: https://img.shields.io/pypi/pyversions/google-cloud-language.svg :target: https://pypi.org/project/google-cloud-language/ -.. |compat_check_pypi| image:: https://python-compatibility-tools.appspot.com/one_badge_image?package=google-cloud-language - :target: https://python-compatibility-tools.appspot.com/one_badge_target?package=google-cloud-language -.. |compat_check_github| image:: https://python-compatibility-tools.appspot.com/one_badge_image?package=git%2Bgit%3A//github.com/googleapis/google-cloud-python.git%23subdirectory%3Dlanguage - :target: https://python-compatibility-tools.appspot.com/one_badge_target?package=git%2Bgit%3A//github.com/googleapis/google-cloud-python.git%23subdirectory%3Dlanguage .. _Google Cloud Natural Language: https://cloud.google.com/natural-language/ .. _Product Documentation: https://cloud.google.com/language/docs .. _Client Library Documentation: https://googleapis.dev/python/language/latest From 6b15df6091378ed444642fc813d49d8bbbb6365d Mon Sep 17 00:00:00 2001 From: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Date: Tue, 27 Aug 2019 16:35:22 -0700 Subject: [PATCH 005/209] Docs: Remove CI for gh-pages, use googleapis.dev for api_core refs. (#9085) --- docs/conf.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index bafcc585..198fdcc5 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -337,10 +337,7 @@ "gax": ("https://gax-python.readthedocs.org/en/latest/", None), "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), - "google.api_core": ( - "https://googleapis.github.io/google-cloud-python/latest", - None, - ), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest", None), "grpc": ("https://grpc.io/grpc/python/", None), "requests": ("https://2.python-requests.org/en/master/", None), "fastavro": ("https://fastavro.readthedocs.io/en/stable/", None), From 3525de352a30419496a68fd1d273020402087b03 Mon Sep 17 00:00:00 2001 From: Ryan Yuan Date: Wed, 4 Sep 2019 06:33:01 +1000 Subject: [PATCH 006/209] Fix broken links in docs. (#9148) --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index dc904d45..48558dd7 100644 --- a/README.rst +++ b/README.rst @@ -23,7 +23,7 @@ with your document storage on Google Cloud Storage. .. |versions| image:: https://img.shields.io/pypi/pyversions/google-cloud-language.svg :target: https://pypi.org/project/google-cloud-language/ .. _Google Cloud Natural Language: https://cloud.google.com/natural-language/ -.. _Product Documentation: https://cloud.google.com/language/docs +.. _Product Documentation: https://cloud.google.com/natural-language/docs .. _Client Library Documentation: https://googleapis.dev/python/language/latest .. note:: From bba95c19cc323cefa14d21256b30a1e5ccf91829 Mon Sep 17 00:00:00 2001 From: Rebecca Taylor Date: Wed, 4 Sep 2019 14:19:33 -0700 Subject: [PATCH 007/209] Add generated code samples. (#9153) --- noxfile.py | 20 +++ samples/v1/language_classify_gcs.py | 85 +++++++++++++ samples/v1/language_classify_text.py | 83 +++++++++++++ samples/v1/language_entities_gcs.py | 105 ++++++++++++++++ samples/v1/language_entities_text.py | 100 +++++++++++++++ samples/v1/language_entity_sentiment_gcs.py | 109 ++++++++++++++++ samples/v1/language_entity_sentiment_text.py | 106 ++++++++++++++++ samples/v1/language_sentiment_gcs.py | 95 ++++++++++++++ samples/v1/language_sentiment_text.py | 90 ++++++++++++++ samples/v1/language_syntax_gcs.py | 117 ++++++++++++++++++ samples/v1/language_syntax_text.py | 112 +++++++++++++++++ samples/v1/test/analyzing_entities.test.yaml | 101 +++++++++++++++ .../test/analyzing_entity_sentiment.test.yaml | 63 ++++++++++ samples/v1/test/analyzing_sentiment.test.yaml | 74 +++++++++++ samples/v1/test/analyzing_syntax.test.yaml | 72 +++++++++++ samples/v1/test/classifying_content.test.yaml | 51 ++++++++ samples/v1/test/samples.manifest.yaml | 38 ++++++ synth.metadata | 14 +-- synth.py | 2 + 19 files changed, 1426 insertions(+), 11 deletions(-) create mode 100644 samples/v1/language_classify_gcs.py create mode 100644 samples/v1/language_classify_text.py create mode 100644 samples/v1/language_entities_gcs.py create mode 100644 samples/v1/language_entities_text.py create mode 100644 samples/v1/language_entity_sentiment_gcs.py create mode 100644 samples/v1/language_entity_sentiment_text.py create mode 100644 samples/v1/language_sentiment_gcs.py create mode 100644 samples/v1/language_sentiment_text.py create mode 100644 samples/v1/language_syntax_gcs.py create mode 100644 samples/v1/language_syntax_text.py create mode 100644 samples/v1/test/analyzing_entities.test.yaml create mode 100644 samples/v1/test/analyzing_entity_sentiment.test.yaml create mode 100644 samples/v1/test/analyzing_sentiment.test.yaml create mode 100644 samples/v1/test/analyzing_syntax.test.yaml create mode 100644 samples/v1/test/classifying_content.test.yaml create mode 100644 samples/v1/test/samples.manifest.yaml diff --git a/noxfile.py b/noxfile.py index a2eefbb6..ce6ee932 100644 --- a/noxfile.py +++ b/noxfile.py @@ -125,6 +125,26 @@ def system(session): session.run("py.test", "--quiet", system_test_folder_path, *session.posargs) +@nox.session(python=["3.7"]) +def samples(session): + """Run the samples test suite.""" + # Sanity check: Only run tests if the environment variable is set. + if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""): + session.skip("Credentials must be set via environment variable") + + samples_path = "samples" + if not os.path.exists(samples_path): + session.skip("Samples not found.") + + session.install("pyyaml") + session.install("sample-tester") + for local_dep in LOCAL_DEPS: + session.install("-e", local_dep) + session.install("-e", ".") + + session.run("sample-tester", samples_path, *session.posargs) + + @nox.session(python="3.7") def cover(session): """Run the final coverage report. diff --git a/samples/v1/language_classify_gcs.py b/samples/v1/language_classify_gcs.py new file mode 100644 index 00000000..db595801 --- /dev/null +++ b/samples/v1/language_classify_gcs.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DO NOT EDIT! This is a generated sample ("Request", "language_classify_gcs") + +# To install the latest published package dependency, execute the following: +# pip install google-cloud-language + +# sample-metadata +# title: Classify Content (GCS) +# description: Classifying Content in text file stored in Cloud Storage +# usage: python3 samples/v1/language_classify_gcs.py [--gcs_content_uri "gs://cloud-samples-data/language/classify-entertainment.txt"] + +# [START language_classify_gcs] +from google.cloud import language_v1 +from google.cloud.language_v1 import enums + + +def sample_classify_text(gcs_content_uri): + """ + Classifying Content in text file stored in Cloud Storage + + Args: + gcs_content_uri Google Cloud Storage URI where the file content is located. + e.g. gs://[Your Bucket]/[Path to File] + The text file must include at least 20 words. + """ + + client = language_v1.LanguageServiceClient() + + # gcs_content_uri = 'gs://cloud-samples-data/language/classify-entertainment.txt' + + # Available types: PLAIN_TEXT, HTML + type_ = enums.Document.Type.PLAIN_TEXT + + # Optional. If not specified, the language is automatically detected. + # For list of supported languages: + # https://cloud.google.com/natural-language/docs/languages + language = "en" + document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} + + response = client.classify_text(document) + # Loop through classified categories returned from the API + for category in response.categories: + # Get the name of the category representing the document. + # See the predefined taxonomy of categories: + # https://cloud.google.com/natural-language/docs/categories + print(u"Category name: {}".format(category.name)) + # Get the confidence. Number representing how certain the classifier + # is that this category represents the provided text. + print(u"Confidence: {}".format(category.confidence)) + + +# [END language_classify_gcs] + + +def main(): + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument( + "--gcs_content_uri", + type=str, + default="gs://cloud-samples-data/language/classify-entertainment.txt", + ) + args = parser.parse_args() + + sample_classify_text(args.gcs_content_uri) + + +if __name__ == "__main__": + main() diff --git a/samples/v1/language_classify_text.py b/samples/v1/language_classify_text.py new file mode 100644 index 00000000..2ecfd70b --- /dev/null +++ b/samples/v1/language_classify_text.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DO NOT EDIT! This is a generated sample ("Request", "language_classify_text") + +# To install the latest published package dependency, execute the following: +# pip install google-cloud-language + +# sample-metadata +# title: Classify Content +# description: Classifying Content in a String +# usage: python3 samples/v1/language_classify_text.py [--text_content "That actor on TV makes movies in Hollywood and also stars in a variety of popular new TV shows."] + +# [START language_classify_text] +from google.cloud import language_v1 +from google.cloud.language_v1 import enums + + +def sample_classify_text(text_content): + """ + Classifying Content in a String + + Args: + text_content The text content to analyze. Must include at least 20 words. + """ + + client = language_v1.LanguageServiceClient() + + # text_content = 'That actor on TV makes movies in Hollywood and also stars in a variety of popular new TV shows.' + + # Available types: PLAIN_TEXT, HTML + type_ = enums.Document.Type.PLAIN_TEXT + + # Optional. If not specified, the language is automatically detected. + # For list of supported languages: + # https://cloud.google.com/natural-language/docs/languages + language = "en" + document = {"content": text_content, "type": type_, "language": language} + + response = client.classify_text(document) + # Loop through classified categories returned from the API + for category in response.categories: + # Get the name of the category representing the document. + # See the predefined taxonomy of categories: + # https://cloud.google.com/natural-language/docs/categories + print(u"Category name: {}".format(category.name)) + # Get the confidence. Number representing how certain the classifier + # is that this category represents the provided text. + print(u"Confidence: {}".format(category.confidence)) + + +# [END language_classify_text] + + +def main(): + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument( + "--text_content", + type=str, + default="That actor on TV makes movies in Hollywood and also stars in a variety of popular new TV shows.", + ) + args = parser.parse_args() + + sample_classify_text(args.text_content) + + +if __name__ == "__main__": + main() diff --git a/samples/v1/language_entities_gcs.py b/samples/v1/language_entities_gcs.py new file mode 100644 index 00000000..edd3238a --- /dev/null +++ b/samples/v1/language_entities_gcs.py @@ -0,0 +1,105 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DO NOT EDIT! This is a generated sample ("Request", "language_entities_gcs") + +# To install the latest published package dependency, execute the following: +# pip install google-cloud-language + +# sample-metadata +# title: Analyzing Entities (GCS) +# description: Analyzing Entities in text file stored in Cloud Storage +# usage: python3 samples/v1/language_entities_gcs.py [--gcs_content_uri "gs://cloud-samples-data/language/entity.txt"] + +# [START language_entities_gcs] +from google.cloud import language_v1 +from google.cloud.language_v1 import enums + + +def sample_analyze_entities(gcs_content_uri): + """ + Analyzing Entities in text file stored in Cloud Storage + + Args: + gcs_content_uri Google Cloud Storage URI where the file content is located. + e.g. gs://[Your Bucket]/[Path to File] + """ + + client = language_v1.LanguageServiceClient() + + # gcs_content_uri = 'gs://cloud-samples-data/language/entity.txt' + + # Available types: PLAIN_TEXT, HTML + type_ = enums.Document.Type.PLAIN_TEXT + + # Optional. If not specified, the language is automatically detected. + # For list of supported languages: + # https://cloud.google.com/natural-language/docs/languages + language = "en" + document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} + + # Available values: NONE, UTF8, UTF16, UTF32 + encoding_type = enums.EncodingType.UTF8 + + response = client.analyze_entities(document, encoding_type=encoding_type) + # Loop through entitites returned from the API + for entity in response.entities: + print(u"Representative name for the entity: {}".format(entity.name)) + # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al + print(u"Entity type: {}".format(enums.Entity.Type(entity.type).name)) + # Get the salience score associated with the entity in the [0, 1.0] range + print(u"Salience score: {}".format(entity.salience)) + # Loop over the metadata associated with entity. For many known entities, + # the metadata is a Wikipedia URL (wikipedia_url) and Knowledge Graph MID (mid). + # Some entity types may have additional metadata, e.g. ADDRESS entities + # may have metadata for the address street_name, postal_code, et al. + for metadata_name, metadata_value in entity.metadata.items(): + print(u"{}: {}".format(metadata_name, metadata_value)) + + # Loop over the mentions of this entity in the input document. + # The API currently supports proper noun mentions. + for mention in entity.mentions: + print(u"Mention text: {}".format(mention.text.content)) + # Get the mention type, e.g. PROPER for proper noun + print( + u"Mention type: {}".format(enums.EntityMention.Type(mention.type).name) + ) + + # Get the language of the text, which will be the same as + # the language specified in the request or, if not specified, + # the automatically-detected language. + print(u"Language of the text: {}".format(response.language)) + + +# [END language_entities_gcs] + + +def main(): + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument( + "--gcs_content_uri", + type=str, + default="gs://cloud-samples-data/language/entity.txt", + ) + args = parser.parse_args() + + sample_analyze_entities(args.gcs_content_uri) + + +if __name__ == "__main__": + main() diff --git a/samples/v1/language_entities_text.py b/samples/v1/language_entities_text.py new file mode 100644 index 00000000..2948f44d --- /dev/null +++ b/samples/v1/language_entities_text.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DO NOT EDIT! This is a generated sample ("Request", "language_entities_text") + +# To install the latest published package dependency, execute the following: +# pip install google-cloud-language + +# sample-metadata +# title: Analyzing Entities +# description: Analyzing Entities in a String +# usage: python3 samples/v1/language_entities_text.py [--text_content "California is a state."] + +# [START language_entities_text] +from google.cloud import language_v1 +from google.cloud.language_v1 import enums + + +def sample_analyze_entities(text_content): + """ + Analyzing Entities in a String + + Args: + text_content The text content to analyze + """ + + client = language_v1.LanguageServiceClient() + + # text_content = 'California is a state.' + + # Available types: PLAIN_TEXT, HTML + type_ = enums.Document.Type.PLAIN_TEXT + + # Optional. If not specified, the language is automatically detected. + # For list of supported languages: + # https://cloud.google.com/natural-language/docs/languages + language = "en" + document = {"content": text_content, "type": type_, "language": language} + + # Available values: NONE, UTF8, UTF16, UTF32 + encoding_type = enums.EncodingType.UTF8 + + response = client.analyze_entities(document, encoding_type=encoding_type) + # Loop through entitites returned from the API + for entity in response.entities: + print(u"Representative name for the entity: {}".format(entity.name)) + # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al + print(u"Entity type: {}".format(enums.Entity.Type(entity.type).name)) + # Get the salience score associated with the entity in the [0, 1.0] range + print(u"Salience score: {}".format(entity.salience)) + # Loop over the metadata associated with entity. For many known entities, + # the metadata is a Wikipedia URL (wikipedia_url) and Knowledge Graph MID (mid). + # Some entity types may have additional metadata, e.g. ADDRESS entities + # may have metadata for the address street_name, postal_code, et al. + for metadata_name, metadata_value in entity.metadata.items(): + print(u"{}: {}".format(metadata_name, metadata_value)) + + # Loop over the mentions of this entity in the input document. + # The API currently supports proper noun mentions. + for mention in entity.mentions: + print(u"Mention text: {}".format(mention.text.content)) + # Get the mention type, e.g. PROPER for proper noun + print( + u"Mention type: {}".format(enums.EntityMention.Type(mention.type).name) + ) + + # Get the language of the text, which will be the same as + # the language specified in the request or, if not specified, + # the automatically-detected language. + print(u"Language of the text: {}".format(response.language)) + + +# [END language_entities_text] + + +def main(): + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument("--text_content", type=str, default="California is a state.") + args = parser.parse_args() + + sample_analyze_entities(args.text_content) + + +if __name__ == "__main__": + main() diff --git a/samples/v1/language_entity_sentiment_gcs.py b/samples/v1/language_entity_sentiment_gcs.py new file mode 100644 index 00000000..87fb74de --- /dev/null +++ b/samples/v1/language_entity_sentiment_gcs.py @@ -0,0 +1,109 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DO NOT EDIT! This is a generated sample ("Request", "language_entity_sentiment_gcs") + +# To install the latest published package dependency, execute the following: +# pip install google-cloud-language + +# sample-metadata +# title: Analyzing Entity Sentiment (GCS) +# description: Analyzing Entity Sentiment in text file stored in Cloud Storage +# usage: python3 samples/v1/language_entity_sentiment_gcs.py [--gcs_content_uri "gs://cloud-samples-data/language/entity-sentiment.txt"] + +# [START language_entity_sentiment_gcs] +from google.cloud import language_v1 +from google.cloud.language_v1 import enums + + +def sample_analyze_entity_sentiment(gcs_content_uri): + """ + Analyzing Entity Sentiment in text file stored in Cloud Storage + + Args: + gcs_content_uri Google Cloud Storage URI where the file content is located. + e.g. gs://[Your Bucket]/[Path to File] + """ + + client = language_v1.LanguageServiceClient() + + # gcs_content_uri = 'gs://cloud-samples-data/language/entity-sentiment.txt' + + # Available types: PLAIN_TEXT, HTML + type_ = enums.Document.Type.PLAIN_TEXT + + # Optional. If not specified, the language is automatically detected. + # For list of supported languages: + # https://cloud.google.com/natural-language/docs/languages + language = "en" + document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} + + # Available values: NONE, UTF8, UTF16, UTF32 + encoding_type = enums.EncodingType.UTF8 + + response = client.analyze_entity_sentiment(document, encoding_type=encoding_type) + # Loop through entitites returned from the API + for entity in response.entities: + print(u"Representative name for the entity: {}".format(entity.name)) + # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al + print(u"Entity type: {}".format(enums.Entity.Type(entity.type).name)) + # Get the salience score associated with the entity in the [0, 1.0] range + print(u"Salience score: {}".format(entity.salience)) + # Get the aggregate sentiment expressed for this entity in the provided document. + sentiment = entity.sentiment + print(u"Entity sentiment score: {}".format(sentiment.score)) + print(u"Entity sentiment magnitude: {}".format(sentiment.magnitude)) + # Loop over the metadata associated with entity. For many known entities, + # the metadata is a Wikipedia URL (wikipedia_url) and Knowledge Graph MID (mid). + # Some entity types may have additional metadata, e.g. ADDRESS entities + # may have metadata for the address street_name, postal_code, et al. + for metadata_name, metadata_value in entity.metadata.items(): + print(u"{} = {}".format(metadata_name, metadata_value)) + + # Loop over the mentions of this entity in the input document. + # The API currently supports proper noun mentions. + for mention in entity.mentions: + print(u"Mention text: {}".format(mention.text.content)) + # Get the mention type, e.g. PROPER for proper noun + print( + u"Mention type: {}".format(enums.EntityMention.Type(mention.type).name) + ) + + # Get the language of the text, which will be the same as + # the language specified in the request or, if not specified, + # the automatically-detected language. + print(u"Language of the text: {}".format(response.language)) + + +# [END language_entity_sentiment_gcs] + + +def main(): + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument( + "--gcs_content_uri", + type=str, + default="gs://cloud-samples-data/language/entity-sentiment.txt", + ) + args = parser.parse_args() + + sample_analyze_entity_sentiment(args.gcs_content_uri) + + +if __name__ == "__main__": + main() diff --git a/samples/v1/language_entity_sentiment_text.py b/samples/v1/language_entity_sentiment_text.py new file mode 100644 index 00000000..6f914980 --- /dev/null +++ b/samples/v1/language_entity_sentiment_text.py @@ -0,0 +1,106 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DO NOT EDIT! This is a generated sample ("Request", "language_entity_sentiment_text") + +# To install the latest published package dependency, execute the following: +# pip install google-cloud-language + +# sample-metadata +# title: Analyzing Entity Sentiment +# description: Analyzing Entity Sentiment in a String +# usage: python3 samples/v1/language_entity_sentiment_text.py [--text_content "Grapes are good. Bananas are bad."] + +# [START language_entity_sentiment_text] +from google.cloud import language_v1 +from google.cloud.language_v1 import enums + + +def sample_analyze_entity_sentiment(text_content): + """ + Analyzing Entity Sentiment in a String + + Args: + text_content The text content to analyze + """ + + client = language_v1.LanguageServiceClient() + + # text_content = 'Grapes are good. Bananas are bad.' + + # Available types: PLAIN_TEXT, HTML + type_ = enums.Document.Type.PLAIN_TEXT + + # Optional. If not specified, the language is automatically detected. + # For list of supported languages: + # https://cloud.google.com/natural-language/docs/languages + language = "en" + document = {"content": text_content, "type": type_, "language": language} + + # Available values: NONE, UTF8, UTF16, UTF32 + encoding_type = enums.EncodingType.UTF8 + + response = client.analyze_entity_sentiment(document, encoding_type=encoding_type) + # Loop through entitites returned from the API + for entity in response.entities: + print(u"Representative name for the entity: {}".format(entity.name)) + # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al + print(u"Entity type: {}".format(enums.Entity.Type(entity.type).name)) + # Get the salience score associated with the entity in the [0, 1.0] range + print(u"Salience score: {}".format(entity.salience)) + # Get the aggregate sentiment expressed for this entity in the provided document. + sentiment = entity.sentiment + print(u"Entity sentiment score: {}".format(sentiment.score)) + print(u"Entity sentiment magnitude: {}".format(sentiment.magnitude)) + # Loop over the metadata associated with entity. For many known entities, + # the metadata is a Wikipedia URL (wikipedia_url) and Knowledge Graph MID (mid). + # Some entity types may have additional metadata, e.g. ADDRESS entities + # may have metadata for the address street_name, postal_code, et al. + for metadata_name, metadata_value in entity.metadata.items(): + print(u"{} = {}".format(metadata_name, metadata_value)) + + # Loop over the mentions of this entity in the input document. + # The API currently supports proper noun mentions. + for mention in entity.mentions: + print(u"Mention text: {}".format(mention.text.content)) + # Get the mention type, e.g. PROPER for proper noun + print( + u"Mention type: {}".format(enums.EntityMention.Type(mention.type).name) + ) + + # Get the language of the text, which will be the same as + # the language specified in the request or, if not specified, + # the automatically-detected language. + print(u"Language of the text: {}".format(response.language)) + + +# [END language_entity_sentiment_text] + + +def main(): + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument( + "--text_content", type=str, default="Grapes are good. Bananas are bad." + ) + args = parser.parse_args() + + sample_analyze_entity_sentiment(args.text_content) + + +if __name__ == "__main__": + main() diff --git a/samples/v1/language_sentiment_gcs.py b/samples/v1/language_sentiment_gcs.py new file mode 100644 index 00000000..36600966 --- /dev/null +++ b/samples/v1/language_sentiment_gcs.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DO NOT EDIT! This is a generated sample ("Request", "language_sentiment_gcs") + +# To install the latest published package dependency, execute the following: +# pip install google-cloud-language + +# sample-metadata +# title: Analyzing Sentiment (GCS) +# description: Analyzing Sentiment in text file stored in Cloud Storage +# usage: python3 samples/v1/language_sentiment_gcs.py [--gcs_content_uri "gs://cloud-samples-data/language/sentiment-positive.txt"] + +# [START language_sentiment_gcs] +from google.cloud import language_v1 +from google.cloud.language_v1 import enums + + +def sample_analyze_sentiment(gcs_content_uri): + """ + Analyzing Sentiment in text file stored in Cloud Storage + + Args: + gcs_content_uri Google Cloud Storage URI where the file content is located. + e.g. gs://[Your Bucket]/[Path to File] + """ + + client = language_v1.LanguageServiceClient() + + # gcs_content_uri = 'gs://cloud-samples-data/language/sentiment-positive.txt' + + # Available types: PLAIN_TEXT, HTML + type_ = enums.Document.Type.PLAIN_TEXT + + # Optional. If not specified, the language is automatically detected. + # For list of supported languages: + # https://cloud.google.com/natural-language/docs/languages + language = "en" + document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} + + # Available values: NONE, UTF8, UTF16, UTF32 + encoding_type = enums.EncodingType.UTF8 + + response = client.analyze_sentiment(document, encoding_type=encoding_type) + # Get overall sentiment of the input document + print(u"Document sentiment score: {}".format(response.document_sentiment.score)) + print( + u"Document sentiment magnitude: {}".format( + response.document_sentiment.magnitude + ) + ) + # Get sentiment for all sentences in the document + for sentence in response.sentences: + print(u"Sentence text: {}".format(sentence.text.content)) + print(u"Sentence sentiment score: {}".format(sentence.sentiment.score)) + print(u"Sentence sentiment magnitude: {}".format(sentence.sentiment.magnitude)) + + # Get the language of the text, which will be the same as + # the language specified in the request or, if not specified, + # the automatically-detected language. + print(u"Language of the text: {}".format(response.language)) + + +# [END language_sentiment_gcs] + + +def main(): + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument( + "--gcs_content_uri", + type=str, + default="gs://cloud-samples-data/language/sentiment-positive.txt", + ) + args = parser.parse_args() + + sample_analyze_sentiment(args.gcs_content_uri) + + +if __name__ == "__main__": + main() diff --git a/samples/v1/language_sentiment_text.py b/samples/v1/language_sentiment_text.py new file mode 100644 index 00000000..c1325678 --- /dev/null +++ b/samples/v1/language_sentiment_text.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DO NOT EDIT! This is a generated sample ("Request", "language_sentiment_text") + +# To install the latest published package dependency, execute the following: +# pip install google-cloud-language + +# sample-metadata +# title: Analyzing Sentiment +# description: Analyzing Sentiment in a String +# usage: python3 samples/v1/language_sentiment_text.py [--text_content "I am so happy and joyful."] + +# [START language_sentiment_text] +from google.cloud import language_v1 +from google.cloud.language_v1 import enums + + +def sample_analyze_sentiment(text_content): + """ + Analyzing Sentiment in a String + + Args: + text_content The text content to analyze + """ + + client = language_v1.LanguageServiceClient() + + # text_content = 'I am so happy and joyful.' + + # Available types: PLAIN_TEXT, HTML + type_ = enums.Document.Type.PLAIN_TEXT + + # Optional. If not specified, the language is automatically detected. + # For list of supported languages: + # https://cloud.google.com/natural-language/docs/languages + language = "en" + document = {"content": text_content, "type": type_, "language": language} + + # Available values: NONE, UTF8, UTF16, UTF32 + encoding_type = enums.EncodingType.UTF8 + + response = client.analyze_sentiment(document, encoding_type=encoding_type) + # Get overall sentiment of the input document + print(u"Document sentiment score: {}".format(response.document_sentiment.score)) + print( + u"Document sentiment magnitude: {}".format( + response.document_sentiment.magnitude + ) + ) + # Get sentiment for all sentences in the document + for sentence in response.sentences: + print(u"Sentence text: {}".format(sentence.text.content)) + print(u"Sentence sentiment score: {}".format(sentence.sentiment.score)) + print(u"Sentence sentiment magnitude: {}".format(sentence.sentiment.magnitude)) + + # Get the language of the text, which will be the same as + # the language specified in the request or, if not specified, + # the automatically-detected language. + print(u"Language of the text: {}".format(response.language)) + + +# [END language_sentiment_text] + + +def main(): + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument("--text_content", type=str, default="I am so happy and joyful.") + args = parser.parse_args() + + sample_analyze_sentiment(args.text_content) + + +if __name__ == "__main__": + main() diff --git a/samples/v1/language_syntax_gcs.py b/samples/v1/language_syntax_gcs.py new file mode 100644 index 00000000..74d88787 --- /dev/null +++ b/samples/v1/language_syntax_gcs.py @@ -0,0 +1,117 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DO NOT EDIT! This is a generated sample ("Request", "language_syntax_gcs") + +# To install the latest published package dependency, execute the following: +# pip install google-cloud-language + +# sample-metadata +# title: Analyzing Syntax (GCS) +# description: Analyzing Syntax in text file stored in Cloud Storage +# usage: python3 samples/v1/language_syntax_gcs.py [--gcs_content_uri "gs://cloud-samples-data/language/syntax-sentence.txt"] + +# [START language_syntax_gcs] +from google.cloud import language_v1 +from google.cloud.language_v1 import enums + + +def sample_analyze_syntax(gcs_content_uri): + """ + Analyzing Syntax in text file stored in Cloud Storage + + Args: + gcs_content_uri Google Cloud Storage URI where the file content is located. + e.g. gs://[Your Bucket]/[Path to File] + """ + + client = language_v1.LanguageServiceClient() + + # gcs_content_uri = 'gs://cloud-samples-data/language/syntax-sentence.txt' + + # Available types: PLAIN_TEXT, HTML + type_ = enums.Document.Type.PLAIN_TEXT + + # Optional. If not specified, the language is automatically detected. + # For list of supported languages: + # https://cloud.google.com/natural-language/docs/languages + language = "en" + document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} + + # Available values: NONE, UTF8, UTF16, UTF32 + encoding_type = enums.EncodingType.UTF8 + + response = client.analyze_syntax(document, encoding_type=encoding_type) + # Loop through tokens returned from the API + for token in response.tokens: + # Get the text content of this token. Usually a word or punctuation. + text = token.text + print(u"Token text: {}".format(text.content)) + print( + u"Location of this token in overall document: {}".format(text.begin_offset) + ) + # Get the part of speech information for this token. + # Parts of spech are as defined in: + # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf + part_of_speech = token.part_of_speech + # Get the tag, e.g. NOUN, ADJ for Adjective, et al. + print( + u"Part of Speech tag: {}".format( + enums.PartOfSpeech.Tag(part_of_speech.tag).name + ) + ) + # Get the voice, e.g. ACTIVE or PASSIVE + print(u"Voice: {}".format(enums.PartOfSpeech.Voice(part_of_speech.voice).name)) + # Get the tense, e.g. PAST, FUTURE, PRESENT, et al. + print(u"Tense: {}".format(enums.PartOfSpeech.Tense(part_of_speech.tense).name)) + # See API reference for additional Part of Speech information available + # Get the lemma of the token. Wikipedia lemma description + # https://en.wikipedia.org/wiki/Lemma_(morphology) + print(u"Lemma: {}".format(token.lemma)) + # Get the dependency tree parse information for this token. + # For more information on dependency labels: + # http://www.aclweb.org/anthology/P13-2017 + dependency_edge = token.dependency_edge + print(u"Head token index: {}".format(dependency_edge.head_token_index)) + print( + u"Label: {}".format(enums.DependencyEdge.Label(dependency_edge.label).name) + ) + + # Get the language of the text, which will be the same as + # the language specified in the request or, if not specified, + # the automatically-detected language. + print(u"Language of the text: {}".format(response.language)) + + +# [END language_syntax_gcs] + + +def main(): + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument( + "--gcs_content_uri", + type=str, + default="gs://cloud-samples-data/language/syntax-sentence.txt", + ) + args = parser.parse_args() + + sample_analyze_syntax(args.gcs_content_uri) + + +if __name__ == "__main__": + main() diff --git a/samples/v1/language_syntax_text.py b/samples/v1/language_syntax_text.py new file mode 100644 index 00000000..4b11d4d0 --- /dev/null +++ b/samples/v1/language_syntax_text.py @@ -0,0 +1,112 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DO NOT EDIT! This is a generated sample ("Request", "language_syntax_text") + +# To install the latest published package dependency, execute the following: +# pip install google-cloud-language + +# sample-metadata +# title: Analyzing Syntax +# description: Analyzing Syntax in a String +# usage: python3 samples/v1/language_syntax_text.py [--text_content "This is a short sentence."] + +# [START language_syntax_text] +from google.cloud import language_v1 +from google.cloud.language_v1 import enums + + +def sample_analyze_syntax(text_content): + """ + Analyzing Syntax in a String + + Args: + text_content The text content to analyze + """ + + client = language_v1.LanguageServiceClient() + + # text_content = 'This is a short sentence.' + + # Available types: PLAIN_TEXT, HTML + type_ = enums.Document.Type.PLAIN_TEXT + + # Optional. If not specified, the language is automatically detected. + # For list of supported languages: + # https://cloud.google.com/natural-language/docs/languages + language = "en" + document = {"content": text_content, "type": type_, "language": language} + + # Available values: NONE, UTF8, UTF16, UTF32 + encoding_type = enums.EncodingType.UTF8 + + response = client.analyze_syntax(document, encoding_type=encoding_type) + # Loop through tokens returned from the API + for token in response.tokens: + # Get the text content of this token. Usually a word or punctuation. + text = token.text + print(u"Token text: {}".format(text.content)) + print( + u"Location of this token in overall document: {}".format(text.begin_offset) + ) + # Get the part of speech information for this token. + # Parts of spech are as defined in: + # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf + part_of_speech = token.part_of_speech + # Get the tag, e.g. NOUN, ADJ for Adjective, et al. + print( + u"Part of Speech tag: {}".format( + enums.PartOfSpeech.Tag(part_of_speech.tag).name + ) + ) + # Get the voice, e.g. ACTIVE or PASSIVE + print(u"Voice: {}".format(enums.PartOfSpeech.Voice(part_of_speech.voice).name)) + # Get the tense, e.g. PAST, FUTURE, PRESENT, et al. + print(u"Tense: {}".format(enums.PartOfSpeech.Tense(part_of_speech.tense).name)) + # See API reference for additional Part of Speech information available + # Get the lemma of the token. Wikipedia lemma description + # https://en.wikipedia.org/wiki/Lemma_(morphology) + print(u"Lemma: {}".format(token.lemma)) + # Get the dependency tree parse information for this token. + # For more information on dependency labels: + # http://www.aclweb.org/anthology/P13-2017 + dependency_edge = token.dependency_edge + print(u"Head token index: {}".format(dependency_edge.head_token_index)) + print( + u"Label: {}".format(enums.DependencyEdge.Label(dependency_edge.label).name) + ) + + # Get the language of the text, which will be the same as + # the language specified in the request or, if not specified, + # the automatically-detected language. + print(u"Language of the text: {}".format(response.language)) + + +# [END language_syntax_text] + + +def main(): + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument("--text_content", type=str, default="This is a short sentence.") + args = parser.parse_args() + + sample_analyze_syntax(args.text_content) + + +if __name__ == "__main__": + main() diff --git a/samples/v1/test/analyzing_entities.test.yaml b/samples/v1/test/analyzing_entities.test.yaml new file mode 100644 index 00000000..5fafd01e --- /dev/null +++ b/samples/v1/test/analyzing_entities.test.yaml @@ -0,0 +1,101 @@ +type: test/samples +schema_version: 1 +test: + suites: + - name: "Analyzing Entities [code sample tests]" + cases: + + - name: language_entities_text - Analyzing the Entities of a text string (default value) + spec: + # Default value: "California is a state." + - call: {sample: language_entities_text} + - assert_contains: + - {literal: "Representative name for the entity: California"} + - {literal: "Entity type: LOCATION"} + - {literal: "Salience score:"} + - {literal: "wikipedia_url: https://en.wikipedia.org/wiki/California"} + - {literal: "mid: /m/01n7q"} + - {literal: "Mention text: California"} + - {literal: "Mention type: PROPER"} + - {literal: "Mention text: state"} + - {literal: "Mention type: COMMON"} + - {literal: "Language of the text: en"} + + - name: language_entities_text - Analyzing the Entities of a text string (*custom value*) + spec: + # Custom value: "Alice is a person. She lives in California." + - call: + sample: language_entities_text + params: + text_content: {literal: "Alice is a person. She lives in California."} + - assert_contains: + - {literal: "Representative name for the entity: Alice"} + - {literal: "Entity type: PERSON"} + - {literal: "Mention text: Alice"} + - {literal: "Mention type: PROPER"} + - {literal: "Mention text: person"} + - {literal: "Mention type: COMMON"} + - {literal: "Representative name for the entity: California"} + - {literal: "Entity type: LOCATION"} + - {literal: "wikipedia_url: https://en.wikipedia.org/wiki/California"} + - {literal: "mid: /m/01n7q"} + - {literal: "Language of the text: en"} + + - name: language_entities_text - Analyzing the Entities of a text string (*metadata attributes*) + spec: + # Try out some of the metadata attributes which should be available for dates, addresses, etc. + # In case fake (555) area code numbers don't work, using United States Naval Observatory number. + # Custom value: "I called 202-762-1401 on January 31, 2019 from 1600 Amphitheatre Parkway, Mountain View, CA." + - call: + sample: language_entities_text + params: + text_content: + literal: "I called 202-762-1401 on January 31, 2019 from 1600 Amphitheatre Parkway, Mountain View, CA." + # The results may change, but it's fair to say that at least one of the following types were detected: + - assert_contains_any: + - literal: "Entity type: DATE" + - literal: "Entity type: ADDRESS" + - literal: "Entity type: PHONE_NUMBER" + # Check that at least some of the supporting metadata for an entity was present in the response + - assert_contains_any: + - literal: "month: 1" + - literal: "day: 31" + - literal: "year: 2019" + - literal: "street_number: 1600" + - literal: "street_name: Amphitheatre Parkway" + - literal: "area_code: 202" + - literal: "number: 7621401" + + - name: language_entities_gcs - Analyzing the Entities of text file in GCS (default value) + spec: + # Default value: gs://cloud-samples-data/language/entity.txt + # => "California is a state." + - call: {sample: language_entities_gcs} + - assert_contains: + - {literal: "Representative name for the entity: California"} + - {literal: "Entity type: LOCATION"} + - {literal: "Salience score:"} + - {literal: "wikipedia_url: https://en.wikipedia.org/wiki/California"} + - {literal: "mid: /m/01n7q"} + - {literal: "Mention text: California"} + - {literal: "Mention type: PROPER"} + - {literal: "Mention text: state"} + - {literal: "Mention type: COMMON"} + - {literal: "Language of the text: en"} + + - name: language_entities_gcs - Analyzing the Entities of text file in GCS (*custom value*) + spec: + # Use different file: gs://cloud-samples-data/language/entity-sentiment.txt + # => "Grapes are good. Bananas are bad." + - call: + sample: language_entities_gcs + params: + gcs_content_uri: + literal: "gs://cloud-samples-data/language/entity-sentiment.txt" + - assert_contains: + - {literal: "Representative name for the entity: Grapes"} + - {literal: "Mention text: Grapes"} + - {literal: "Mention type: COMMON"} + - {literal: "Representative name for the entity: Bananas"} + - {literal: "Mention text: Bananas"} + - {literal: "Language of the text: en"} diff --git a/samples/v1/test/analyzing_entity_sentiment.test.yaml b/samples/v1/test/analyzing_entity_sentiment.test.yaml new file mode 100644 index 00000000..beb8fb4a --- /dev/null +++ b/samples/v1/test/analyzing_entity_sentiment.test.yaml @@ -0,0 +1,63 @@ +type: test/samples +schema_version: 1 +test: + suites: + - name: "Analyzing Entity Sentiment [code sample tests]" + cases: + + - name: language_entity_sentiment_text - Analyzing Entity Sentiment of a text string (default value) + spec: + # Default value: "Grapes are good. Bananas are bad." + - call: {sample: language_entity_sentiment_text} + - assert_contains: + - {literal: "Representative name for the entity: Grapes"} + - {literal: "Entity sentiment score: 0."} + - {literal: "Representative name for the entity: Bananas"} + - {literal: "Entity sentiment score: -0."} + - {literal: "Entity sentiment magnitude: 0."} + - {literal: "Language of the text: en"} + + - name: language_entity_sentiment_text - Analyzing Entity Sentiment of a text string (*custom value*) + spec: + # Custom value: "Grapes are actually not very good. But Bananas are great." + - call: + sample: language_entity_sentiment_text + params: + text_content: {literal: "Grapes are actually not very good. But Bananas are great."} + - assert_contains: + - {literal: "Representative name for the entity: Grapes"} + - {literal: "Entity sentiment score: -0."} + - {literal: "Representative name for the entity: Bananas"} + - {literal: "Entity sentiment score: 0."} + - {literal: "Entity sentiment magnitude: 0."} + - {literal: "Language of the text: en"} + + - name: language_entity_sentiment_gcs - Analyzing Entity Sentiment of text file in GCS (default value) + spec: + # Default value: gs://cloud-samples-data/language/entity-sentiment.txt + # => "Grapes are good. Bananas are bad." + - call: {sample: language_entity_sentiment_gcs} + - assert_contains: + - {literal: "Representative name for the entity: Grapes"} + - {literal: "Entity sentiment score: -0."} + - {literal: "Representative name for the entity: Bananas"} + - {literal: "Entity sentiment score: 0."} + - {literal: "Entity sentiment magnitude: 0."} + - {literal: "Language of the text: en"} + + - name: language_entity_sentiment_gcs - Analyzing Entity Sentiment of text file in GCS (*custom value*) + spec: + # Use different file: gs://cloud-samples-data/language/entity-sentiment-reverse.txt + # => "Grapes are actually not very good. But Bananas are great." + - call: + sample: language_entity_sentiment_gcs + params: + gcs_content_uri: + literal: "gs://cloud-samples-data/language/entity-sentiment-reverse.txt" + - assert_contains: + - {literal: "Representative name for the entity: Grapes"} + - {literal: "Entity sentiment score: -0."} + - {literal: "Representative name for the entity: Bananas"} + - {literal: "Entity sentiment score: 0."} + - {literal: "Entity sentiment magnitude: 0."} + - {literal: "Language of the text: en"} diff --git a/samples/v1/test/analyzing_sentiment.test.yaml b/samples/v1/test/analyzing_sentiment.test.yaml new file mode 100644 index 00000000..55b5fdcb --- /dev/null +++ b/samples/v1/test/analyzing_sentiment.test.yaml @@ -0,0 +1,74 @@ +type: test/samples +schema_version: 1 +test: + suites: + - name: "Analyzing Sentiment [code sample tests]" + cases: + + - name: language_sentiment_text - Analyzing the sentiment of a text string (default value) + spec: + # Default value: "I am so happy and joyful." + - call: {sample: language_sentiment_text} + - assert_contains: + - {literal: "Document sentiment score: 0."} + - {literal: "Document sentiment magnitude: 0."} + - {literal: "Sentence text: I am so happy and joyful."} + - {literal: "Sentence sentiment score: 0."} + - {literal: "Sentence sentiment magnitude: 0."} + - {literal: "Language of the text: en"} + # There should be no negative sentiment scores for this value. + - assert_not_contains: + - {literal: "Document sentiment score: -0."} + - {literal: "Sentence sentiment score: -0."} + + - name: language_sentiment_text - Analyzing the sentiment of a text string (*custom value*) + spec: + # Custom value: "I am very happy. I am angry and sad." + - call: + sample: language_sentiment_text + params: + text_content: {literal: "I am very happy. I am angry and sad."} + - assert_contains: + - {literal: "Sentence text: I am very happy"} + - {literal: "Sentence sentiment score: 0."} + - {literal: "Sentence text: I am angry and sad"} + - {literal: "Sentence sentiment score: -0."} + - {literal: "Language of the text: en"} + + - name: language_sentiment_gcs - Analyzing the sentiment of text file in GCS (default value) + spec: + # Default value: gs://cloud-samples-data/language/sentiment-positive.txt + # => "I am so happy and joyful." + - call: {sample: language_sentiment_gcs} + - assert_contains: + - {literal: "Document sentiment score: 0."} + - {literal: "Document sentiment magnitude: 0."} + - {literal: "Sentence text: I am so happy and joyful."} + - {literal: "Sentence sentiment score: 0."} + - {literal: "Sentence sentiment magnitude: 0."} + - {literal: "Language of the text: en"} + # There should be no negative sentiment scores for this value. + - assert_not_contains: + - {literal: "Document sentiment score: -0."} + - {literal: "Sentence sentiment score: -0."} + + - name: language_sentiment_gcs - Analyzing the sentiment of text file in GCS (*custom value*) + spec: + # Use different file: gs://cloud-samples-data/language/sentiment-negative.txt + # => "I am so sad and upset." + - call: + sample: language_sentiment_gcs + params: + gcs_content_uri: + literal: "gs://cloud-samples-data/language/sentiment-negative.txt" + - assert_contains: + - {literal: "Document sentiment score: -0."} + - {literal: "Document sentiment magnitude: 0."} + - {literal: "Sentence text: I am so sad and upset."} + - {literal: "Sentence sentiment score: -0."} + - {literal: "Sentence sentiment magnitude: 0."} + - {literal: "Language of the text: en"} + # There should be no positive sentiment scores for this value. + - assert_not_contains: + - {literal: "Document sentiment score: 0."} + - {literal: "Sentence sentiment score: 0."} diff --git a/samples/v1/test/analyzing_syntax.test.yaml b/samples/v1/test/analyzing_syntax.test.yaml new file mode 100644 index 00000000..e89d465c --- /dev/null +++ b/samples/v1/test/analyzing_syntax.test.yaml @@ -0,0 +1,72 @@ +type: test/samples +schema_version: 1 +test: + suites: + - name: "Analyzing Syntax [code sample tests]" + cases: + + - name: language_syntax_text - Analyzing the syntax of a text string (default value) + spec: + # Default value: "This is a short sentence." + - call: {sample: language_syntax_text} + - assert_contains: + - {literal: "Token text: is"} + - {literal: "Part of Speech tag: VERB"} + - {literal: "Tense: PRESENT"} + - {literal: "Lemma: be"} + - {literal: "Token text: short"} + - {literal: "Part of Speech tag: ADJ"} + - {literal: "Lemma: short"} + - {literal: "Language of the text: en"} + + - name: language_syntax_text - Analyzing the syntax of a text string (*custom value*) + spec: + # Custom value: "Alice runs. Bob ran." + - call: + sample: language_syntax_text + params: + text_content: {literal: "Alice runs. Bob ran."} + - assert_contains: + - {literal: "Token text: Alice"} + - {literal: "Location of this token in overall document: 0"} + - {literal: "Part of Speech tag: NOUN"} + - {literal: "Label: NSUBJ"} + - {literal: "Token text: runs"} + - {literal: "Part of Speech tag: VERB"} + - {literal: "Tense: PRESENT"} + - {literal: "Lemma: run"} + - {literal: "Token text: ran"} + - {literal: "Tense: PAST"} + - {literal: "Language of the text: en"} + + - name: language_syntax_gcs - Analyzing the syntax of text file in GCS (default value) + spec: + # Default value: gs://cloud-samples-data/language/syntax-sentence.txt + # => "This is a short sentence." + - call: {sample: language_syntax_gcs} + - assert_contains: + - {literal: "Token text: is"} + - {literal: "Part of Speech tag: VERB"} + - {literal: "Tense: PRESENT"} + - {literal: "Lemma: be"} + - {literal: "Token text: short"} + - {literal: "Part of Speech tag: ADJ"} + - {literal: "Lemma: short"} + - {literal: "Language of the text: en"} + + - name: language_syntax_gcs - Analyzing the syntax of text file in GCS (*custom value*) + spec: + # Use different file: gs://cloud-samples-data/language/hello.txt + # => "Hello, world!" + - call: + sample: language_syntax_gcs + params: + gcs_content_uri: + literal: "gs://cloud-samples-data/language/hello.txt" + - assert_contains: + - {literal: "Token text: Hello"} + - {literal: "Token text: World"} + - {literal: "Part of Speech tag: NOUN"} + - {literal: "Token text: !"} + - {literal: "Part of Speech tag: PUNCT"} + - {literal: "Language of the text: en"} diff --git a/samples/v1/test/classifying_content.test.yaml b/samples/v1/test/classifying_content.test.yaml new file mode 100644 index 00000000..5cfc7669 --- /dev/null +++ b/samples/v1/test/classifying_content.test.yaml @@ -0,0 +1,51 @@ +type: test/samples +schema_version: 1 +test: + suites: + - name: "Classifying Content [code sample tests]" + cases: + + - name: language_classify_text - Classifying Content of a text string (default value) + spec: + # Default value: "That actor on TV makes movies in Hollywood and also stars in a variety of popular new TV shows." + - call: {sample: language_classify_text} + - assert_contains_any: + - {literal: "TV"} + - {literal: "Movies"} + - {literal: "Entertainment"} + + - name: language_classify_text - Classifying Content of a text string (*custom value*) + spec: + # Custom value: "Let's drink coffee and eat bagels at a coffee shop. I want muffins, croisants, coffee and baked goods." + - call: + sample: language_classify_text + params: + text_content: {literal: "Let's drink coffee and eat bagels at a coffee shop. I want muffins, croisants, coffee and baked goods."} + - assert_contains_any: + - {literal: "Food"} + - {literal: "Drink"} + - {literal: "Coffee"} + + - name: language_classify_gcs - Classifying Content of text file in GCS (default value) + spec: + # Default value: gs://cloud-samples-data/language/classify-entertainment.txt + # => "This is about film and movies and television and acting and movie theatres and theatre and drama and entertainment and the arts." + - call: {sample: language_classify_gcs} + - assert_contains_any: + - {literal: "TV"} + - {literal: "Movies"} + - {literal: "Entertainment"} + + - name: language_classify_gcs - Classifying Content of text file in GCS (*custom value*) + spec: + # Use different file: gs://cloud-samples-data/language/android.txt + # => "Android is a mobile operating system developed by Google, based on the Linux kernel and..." + - call: + sample: language_classify_gcs + params: + gcs_content_uri: + literal: "gs://cloud-samples-data/language/android.txt" + - assert_contains_any: + - {literal: "Mobile"} + - {literal: "Phone"} + - {literal: "Internet"} diff --git a/samples/v1/test/samples.manifest.yaml b/samples/v1/test/samples.manifest.yaml new file mode 100644 index 00000000..aa270425 --- /dev/null +++ b/samples/v1/test/samples.manifest.yaml @@ -0,0 +1,38 @@ +type: manifest/samples +schema_version: 3 +base: &common + env: 'python' + bin: 'python3' + chdir: '{@manifest_dir}/../..' + basepath: '.' +samples: +- <<: *common + path: '{basepath}/v1/language_classify_gcs.py' + sample: 'language_classify_gcs' +- <<: *common + path: '{basepath}/v1/language_classify_text.py' + sample: 'language_classify_text' +- <<: *common + path: '{basepath}/v1/language_entities_gcs.py' + sample: 'language_entities_gcs' +- <<: *common + path: '{basepath}/v1/language_entities_text.py' + sample: 'language_entities_text' +- <<: *common + path: '{basepath}/v1/language_entity_sentiment_gcs.py' + sample: 'language_entity_sentiment_gcs' +- <<: *common + path: '{basepath}/v1/language_entity_sentiment_text.py' + sample: 'language_entity_sentiment_text' +- <<: *common + path: '{basepath}/v1/language_sentiment_gcs.py' + sample: 'language_sentiment_gcs' +- <<: *common + path: '{basepath}/v1/language_sentiment_text.py' + sample: 'language_sentiment_text' +- <<: *common + path: '{basepath}/v1/language_syntax_gcs.py' + sample: 'language_syntax_gcs' +- <<: *common + path: '{basepath}/v1/language_syntax_text.py' + sample: 'language_syntax_text' diff --git a/synth.metadata b/synth.metadata index 1456f20a..d0b76fc8 100644 --- a/synth.metadata +++ b/synth.metadata @@ -1,19 +1,11 @@ { - "updateTime": "2019-08-06T12:30:47.420536Z", + "updateTime": "2019-08-29T22:35:01.571061Z", "sources": [ { "generator": { "name": "artman", - "version": "0.32.1", - "dockerImage": "googleapis/artman@sha256:a684d40ba9a4e15946f5f2ca6b4bd9fe301192f522e9de4fff622118775f309b" - } - }, - { - "git": { - "name": "googleapis", - "remote": "https://github.com/googleapis/googleapis.git", - "sha": "e699b0cba64ffddfae39633417180f1f65875896", - "internalRef": "261759677" + "version": "0.35.1", + "dockerImage": "googleapis/artman@sha256:b11c7ea0d0831c54016fb50f4b796d24d1971439b30fbc32a369ba1ac887c384" } }, { diff --git a/synth.py b/synth.py index 6166eca1..bee382de 100644 --- a/synth.py +++ b/synth.py @@ -32,12 +32,14 @@ config_path=f"/google/cloud/language/artman_language_{version}.yaml", artman_output_name=f"language-{version}", include_protos=True, + include_samples=True ) s.move(library / f"google/cloud/language_{version}/proto") s.move(library / f"google/cloud/language_{version}/gapic") s.move(library / f"tests/unit/gapic/{version}") s.move(library / f"tests/system/gapic/{version}") + s.move(library / f"samples") # ---------------------------------------------------------------------------- # Add templated files From 4a294843c0d533e79214eb9fc4a92865e3969f85 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 10 Sep 2019 21:06:28 -0400 Subject: [PATCH 008/209] Exclude noxfile from synth. (#9201) Supersedes #9195. --- synth.metadata | 14 +++++++++++--- synth.py | 2 +- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/synth.metadata b/synth.metadata index d0b76fc8..7a28efcb 100644 --- a/synth.metadata +++ b/synth.metadata @@ -1,11 +1,19 @@ { - "updateTime": "2019-08-29T22:35:01.571061Z", + "updateTime": "2019-09-10T17:35:04.896094Z", "sources": [ { "generator": { "name": "artman", - "version": "0.35.1", - "dockerImage": "googleapis/artman@sha256:b11c7ea0d0831c54016fb50f4b796d24d1971439b30fbc32a369ba1ac887c384" + "version": "0.36.2", + "dockerImage": "googleapis/artman@sha256:0e6f3a668cd68afc768ecbe08817cf6e56a0e64fcbdb1c58c3b97492d12418a1" + } + }, + { + "git": { + "name": "googleapis", + "remote": "https://github.com/googleapis/googleapis.git", + "sha": "b4b182552fa0088e463ada73afcf48b405965c2c", + "internalRef": "268243295" } }, { diff --git a/synth.py b/synth.py index bee382de..db33996a 100644 --- a/synth.py +++ b/synth.py @@ -45,7 +45,7 @@ # Add templated files # ---------------------------------------------------------------------------- templated_files = common.py_library(unit_cov_level=97, cov_level=100) -s.move(templated_files) +s.move(templated_files, excludes=['noxfile.py']) s.replace( f"google/cloud/**/gapic/language_service_client.py", From c44195fcea7d095d29dd48109c3a0516e8170041 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Wed, 11 Sep 2019 09:20:38 -0700 Subject: [PATCH 009/209] Reorder samples manifest (via synth). (#9209) --- samples/v1/test/samples.manifest.yaml | 26 +++++++++++++------------- synth.metadata | 6 +++--- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/samples/v1/test/samples.manifest.yaml b/samples/v1/test/samples.manifest.yaml index aa270425..28d2760f 100644 --- a/samples/v1/test/samples.manifest.yaml +++ b/samples/v1/test/samples.manifest.yaml @@ -6,33 +6,33 @@ base: &common chdir: '{@manifest_dir}/../..' basepath: '.' samples: +- <<: *common + path: '{basepath}/v1/language_entity_sentiment_gcs.py' + sample: 'language_entity_sentiment_gcs' - <<: *common path: '{basepath}/v1/language_classify_gcs.py' sample: 'language_classify_gcs' - <<: *common - path: '{basepath}/v1/language_classify_text.py' - sample: 'language_classify_text' -- <<: *common - path: '{basepath}/v1/language_entities_gcs.py' - sample: 'language_entities_gcs' + path: '{basepath}/v1/language_syntax_gcs.py' + sample: 'language_syntax_gcs' - <<: *common path: '{basepath}/v1/language_entities_text.py' sample: 'language_entities_text' - <<: *common - path: '{basepath}/v1/language_entity_sentiment_gcs.py' - sample: 'language_entity_sentiment_gcs' + path: '{basepath}/v1/language_classify_text.py' + sample: 'language_classify_text' +- <<: *common + path: '{basepath}/v1/language_syntax_text.py' + sample: 'language_syntax_text' - <<: *common path: '{basepath}/v1/language_entity_sentiment_text.py' sample: 'language_entity_sentiment_text' +- <<: *common + path: '{basepath}/v1/language_entities_gcs.py' + sample: 'language_entities_gcs' - <<: *common path: '{basepath}/v1/language_sentiment_gcs.py' sample: 'language_sentiment_gcs' - <<: *common path: '{basepath}/v1/language_sentiment_text.py' sample: 'language_sentiment_text' -- <<: *common - path: '{basepath}/v1/language_syntax_gcs.py' - sample: 'language_syntax_gcs' -- <<: *common - path: '{basepath}/v1/language_syntax_text.py' - sample: 'language_syntax_text' diff --git a/synth.metadata b/synth.metadata index 7a28efcb..bc5806eb 100644 --- a/synth.metadata +++ b/synth.metadata @@ -1,5 +1,5 @@ { - "updateTime": "2019-09-10T17:35:04.896094Z", + "updateTime": "2019-09-11T12:28:39.572337Z", "sources": [ { "generator": { @@ -12,8 +12,8 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "b4b182552fa0088e463ada73afcf48b405965c2c", - "internalRef": "268243295" + "sha": "f1c042777e90baae0f8590f7820eed2c6ef758b2", + "internalRef": "268319807" } }, { From e97a0ae6c2e3a26afc9b3af7d91118ac3c0aa1f7 Mon Sep 17 00:00:00 2001 From: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Date: Wed, 25 Sep 2019 12:35:50 -0400 Subject: [PATCH 010/209] docs: fix intersphinx reference to requests (#9294) --- docs/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index 198fdcc5..999e8f0e 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -339,7 +339,7 @@ "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), "google.api_core": ("https://googleapis.dev/python/google-api-core/latest", None), "grpc": ("https://grpc.io/grpc/python/", None), - "requests": ("https://2.python-requests.org/en/master/", None), + "requests": ("https://requests.kennethreitz.org/en/stable/", None), "fastavro": ("https://fastavro.readthedocs.io/en/stable/", None), "pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None), } From 979f44f3451d8bf2638851acd545a7a61879fa94 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Thu, 26 Sep 2019 10:14:14 -0700 Subject: [PATCH 011/209] codegen(language): reorder samples (#9310) --- samples/v1/test/samples.manifest.yaml | 26 +++++++++++++------------- synth.metadata | 10 +++++----- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/samples/v1/test/samples.manifest.yaml b/samples/v1/test/samples.manifest.yaml index 28d2760f..aa270425 100644 --- a/samples/v1/test/samples.manifest.yaml +++ b/samples/v1/test/samples.manifest.yaml @@ -6,33 +6,33 @@ base: &common chdir: '{@manifest_dir}/../..' basepath: '.' samples: -- <<: *common - path: '{basepath}/v1/language_entity_sentiment_gcs.py' - sample: 'language_entity_sentiment_gcs' - <<: *common path: '{basepath}/v1/language_classify_gcs.py' sample: 'language_classify_gcs' - <<: *common - path: '{basepath}/v1/language_syntax_gcs.py' - sample: 'language_syntax_gcs' + path: '{basepath}/v1/language_classify_text.py' + sample: 'language_classify_text' +- <<: *common + path: '{basepath}/v1/language_entities_gcs.py' + sample: 'language_entities_gcs' - <<: *common path: '{basepath}/v1/language_entities_text.py' sample: 'language_entities_text' - <<: *common - path: '{basepath}/v1/language_classify_text.py' - sample: 'language_classify_text' -- <<: *common - path: '{basepath}/v1/language_syntax_text.py' - sample: 'language_syntax_text' + path: '{basepath}/v1/language_entity_sentiment_gcs.py' + sample: 'language_entity_sentiment_gcs' - <<: *common path: '{basepath}/v1/language_entity_sentiment_text.py' sample: 'language_entity_sentiment_text' -- <<: *common - path: '{basepath}/v1/language_entities_gcs.py' - sample: 'language_entities_gcs' - <<: *common path: '{basepath}/v1/language_sentiment_gcs.py' sample: 'language_sentiment_gcs' - <<: *common path: '{basepath}/v1/language_sentiment_text.py' sample: 'language_sentiment_text' +- <<: *common + path: '{basepath}/v1/language_syntax_gcs.py' + sample: 'language_syntax_gcs' +- <<: *common + path: '{basepath}/v1/language_syntax_text.py' + sample: 'language_syntax_text' diff --git a/synth.metadata b/synth.metadata index bc5806eb..f029a848 100644 --- a/synth.metadata +++ b/synth.metadata @@ -1,19 +1,19 @@ { - "updateTime": "2019-09-11T12:28:39.572337Z", + "updateTime": "2019-09-26T12:28:07.785217Z", "sources": [ { "generator": { "name": "artman", - "version": "0.36.2", - "dockerImage": "googleapis/artman@sha256:0e6f3a668cd68afc768ecbe08817cf6e56a0e64fcbdb1c58c3b97492d12418a1" + "version": "0.37.1", + "dockerImage": "googleapis/artman@sha256:6068f67900a3f0bdece596b97bda8fc70406ca0e137a941f4c81d3217c994a80" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "f1c042777e90baae0f8590f7820eed2c6ef758b2", - "internalRef": "268319807" + "sha": "4c2ca81a0c976d4d37a8999984b7894d9af22124", + "internalRef": "271130964" } }, { From 0df62068d8b1abf071b21542f93eb8fe3c59cba6 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Tue, 1 Oct 2019 08:05:10 -0700 Subject: [PATCH 012/209] enhancement(language): add entity types (via synth) (#9373) --- google/cloud/language_v1beta2/gapic/enums.py | 58 +++- .../gapic/language_service_client.py | 14 +- .../proto/language_service.proto | 228 +++++++++------ .../proto/language_service_pb2.py | 268 ++++++++++-------- .../proto/language_service_pb2_grpc.py | 6 +- synth.metadata | 6 +- 6 files changed, 359 insertions(+), 221 deletions(-) diff --git a/google/cloud/language_v1beta2/gapic/enums.py b/google/cloud/language_v1beta2/gapic/enums.py index aa68fa49..0d3cf1c5 100644 --- a/google/cloud/language_v1beta2/gapic/enums.py +++ b/google/cloud/language_v1beta2/gapic/enums.py @@ -34,7 +34,7 @@ class EncodingType(enum.IntEnum): based on the UTF-8 encoding of the input. C++ and Go are examples of languages that use this encoding natively. UTF16 (int): Encoding-dependent information (such as ``begin_offset``) is calculated - based on the UTF-16 encoding of the input. Java and Javascript are + based on the UTF-16 encoding of the input. Java and JavaScript are examples of languages that use this encoding natively. UTF32 (int): Encoding-dependent information (such as ``begin_offset``) is calculated based on the UTF-32 encoding of the input. Python is an example of a @@ -242,7 +242,10 @@ class Type(enum.IntEnum): class Entity(object): class Type(enum.IntEnum): """ - The type of the entity. + The type of the entity. For most entity types, the associated metadata + is a Wikipedia URL (``wikipedia_url``) and Knowledge Graph MID + (``mid``). The table below lists the associated fields for entities that + have different metadata. Attributes: UNKNOWN (int): Unknown @@ -250,9 +253,49 @@ class Type(enum.IntEnum): LOCATION (int): Location ORGANIZATION (int): Organization EVENT (int): Event - WORK_OF_ART (int): Work of art - CONSUMER_GOOD (int): Consumer goods - OTHER (int): Other types + WORK_OF_ART (int): Artwork + CONSUMER_GOOD (int): Consumer product + OTHER (int): Other types of entities + PHONE_NUMBER (int): Phone number + + The metadata lists the phone number, formatted according to local + convention, plus whichever additional elements appear in the text: + + - ``number`` - the actual number, broken down into sections as per + local convention + - ``national_prefix`` - country code, if detected + - ``area_code`` - region or area code, if detected + - ``extension`` - phone extension (to be dialed after connection), if + detected + ADDRESS (int): Address + + The metadata identifies the street number and locality plus whichever + additional elements appear in the text: + + - ``street_number`` - street number + - ``locality`` - city or town + - ``street_name`` - street/route name, if detected + - ``postal_code`` - postal code, if detected + - ``country`` - country, if detected< + - ``broad_region`` - administrative area, such as the state, if + detected + - ``narrow_region`` - smaller administrative area, such as county, if + detected + - ``sublocality`` - used in Asian addresses to demark a district within + a city, if detected + DATE (int): Date + + The metadata identifies the components of the date: + + - ``year`` - four digit year, if detected + - ``month`` - two digit month number, if detected + - ``day`` - two digit day number, if detected + NUMBER (int): Number + + The metadata is the number itself. + PRICE (int): Price + + The metadata identifies the ``value`` and ``currency``. """ UNKNOWN = 0 @@ -263,6 +306,11 @@ class Type(enum.IntEnum): WORK_OF_ART = 5 CONSUMER_GOOD = 6 OTHER = 7 + PHONE_NUMBER = 9 + ADDRESS = 10 + DATE = 11 + NUMBER = 12 + PRICE = 13 class EntityMention(object): diff --git a/google/cloud/language_v1beta2/gapic/language_service_client.py b/google/cloud/language_v1beta2/gapic/language_service_client.py index dcb8e89d..73af0ff6 100644 --- a/google/cloud/language_v1beta2/gapic/language_service_client.py +++ b/google/cloud/language_v1beta2/gapic/language_service_client.py @@ -207,7 +207,7 @@ def analyze_sentiment( >>> response = client.analyze_sentiment(document) Args: - document (Union[dict, ~google.cloud.language_v1beta2.types.Document]): Input document. + document (Union[dict, ~google.cloud.language_v1beta2.types.Document]): Required. Input document. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.language_v1beta2.types.Document` @@ -274,7 +274,7 @@ def analyze_entities( >>> response = client.analyze_entities(document) Args: - document (Union[dict, ~google.cloud.language_v1beta2.types.Document]): Input document. + document (Union[dict, ~google.cloud.language_v1beta2.types.Document]): Required. Input document. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.language_v1beta2.types.Document` @@ -339,7 +339,7 @@ def analyze_entity_sentiment( >>> response = client.analyze_entity_sentiment(document) Args: - document (Union[dict, ~google.cloud.language_v1beta2.types.Document]): Input document. + document (Union[dict, ~google.cloud.language_v1beta2.types.Document]): Required. Input document. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.language_v1beta2.types.Document` @@ -405,7 +405,7 @@ def analyze_syntax( >>> response = client.analyze_syntax(document) Args: - document (Union[dict, ~google.cloud.language_v1beta2.types.Document]): Input document. + document (Union[dict, ~google.cloud.language_v1beta2.types.Document]): Required. Input document. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.language_v1beta2.types.Document` @@ -468,7 +468,7 @@ def classify_text( >>> response = client.classify_text(document) Args: - document (Union[dict, ~google.cloud.language_v1beta2.types.Document]): Input document. + document (Union[dict, ~google.cloud.language_v1beta2.types.Document]): Required. Input document. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.language_v1beta2.types.Document` @@ -534,11 +534,11 @@ def annotate_text( >>> response = client.annotate_text(document, features) Args: - document (Union[dict, ~google.cloud.language_v1beta2.types.Document]): Input document. + document (Union[dict, ~google.cloud.language_v1beta2.types.Document]): Required. Input document. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.language_v1beta2.types.Document` - features (Union[dict, ~google.cloud.language_v1beta2.types.Features]): The enabled features. + features (Union[dict, ~google.cloud.language_v1beta2.types.Features]): Required. The enabled features. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.language_v1beta2.types.Features` diff --git a/google/cloud/language_v1beta2/proto/language_service.proto b/google/cloud/language_v1beta2/proto/language_service.proto index 0263be04..d0242e59 100644 --- a/google/cloud/language_v1beta2/proto/language_service.proto +++ b/google/cloud/language_v1beta2/proto/language_service.proto @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. +// Copyright 2019 Google LLC. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,15 +11,16 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +// syntax = "proto3"; package google.cloud.language.v1beta2; import "google/api/annotations.proto"; -import "google/longrunning/operations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; import "google/protobuf/timestamp.proto"; -import "google/rpc/status.proto"; option go_package = "google.golang.org/genproto/googleapis/cloud/language/v1beta2;language"; option java_multiple_files = true; @@ -29,36 +30,42 @@ option java_package = "com.google.cloud.language.v1beta2"; // Provides text analysis operations such as sentiment analysis and entity // recognition. service LanguageService { + option (google.api.default_host) = "language.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-language," + "https://www.googleapis.com/auth/cloud-platform"; + // Analyzes the sentiment of the provided text. - rpc AnalyzeSentiment(AnalyzeSentimentRequest) - returns (AnalyzeSentimentResponse) { + rpc AnalyzeSentiment(AnalyzeSentimentRequest) returns (AnalyzeSentimentResponse) { option (google.api.http) = { post: "/v1beta2/documents:analyzeSentiment" body: "*" }; + option (google.api.method_signature) = "document,encoding_type"; + option (google.api.method_signature) = "document"; } // Finds named entities (currently proper names and common nouns) in the text // along with entity types, salience, mentions for each entity, and // other properties. - rpc AnalyzeEntities(AnalyzeEntitiesRequest) - returns (AnalyzeEntitiesResponse) { + rpc AnalyzeEntities(AnalyzeEntitiesRequest) returns (AnalyzeEntitiesResponse) { option (google.api.http) = { post: "/v1beta2/documents:analyzeEntities" body: "*" }; + option (google.api.method_signature) = "document,encoding_type"; + option (google.api.method_signature) = "document"; } - // Finds entities, similar to - // [AnalyzeEntities][google.cloud.language.v1beta2.LanguageService.AnalyzeEntities] - // in the text and analyzes sentiment associated with each entity and its - // mentions. - rpc AnalyzeEntitySentiment(AnalyzeEntitySentimentRequest) - returns (AnalyzeEntitySentimentResponse) { + // Finds entities, similar to [AnalyzeEntities][google.cloud.language.v1beta2.LanguageService.AnalyzeEntities] in the text and analyzes + // sentiment associated with each entity and its mentions. + rpc AnalyzeEntitySentiment(AnalyzeEntitySentimentRequest) returns (AnalyzeEntitySentimentResponse) { option (google.api.http) = { post: "/v1beta2/documents:analyzeEntitySentiment" body: "*" }; + option (google.api.method_signature) = "document,encoding_type"; + option (google.api.method_signature) = "document"; } // Analyzes the syntax of the text and provides sentence boundaries and @@ -69,6 +76,8 @@ service LanguageService { post: "/v1beta2/documents:analyzeSyntax" body: "*" }; + option (google.api.method_signature) = "document,encoding_type"; + option (google.api.method_signature) = "document"; } // Classifies a document into categories. @@ -77,6 +86,7 @@ service LanguageService { post: "/v1beta2/documents:classifyText" body: "*" }; + option (google.api.method_signature) = "document"; } // A convenience method that provides all syntax, sentiment, entity, and @@ -86,6 +96,8 @@ service LanguageService { post: "/v1beta2/documents:annotateText" body: "*" }; + option (google.api.method_signature) = "document,features,encoding_type"; + option (google.api.method_signature) = "document,features"; } } @@ -113,6 +125,7 @@ message Document { // Google Cloud Storage URI. oneof source { // The content of the input in string format. + // Cloud audit logging exempt since it is based on user data. string content = 2; // The Google Cloud Storage URI where the file content is located. @@ -139,8 +152,8 @@ message Sentence { TextSpan text = 1; // For calls to [AnalyzeSentiment][] or if - // [AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_document_sentiment] - // is set to true, this field will contain the sentiment for the sentence. + // [AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_document_sentiment] is set to + // true, this field will contain the sentiment for the sentence. Sentiment sentiment = 2; } @@ -148,7 +161,10 @@ message Sentence { // a person, an organization, or location. The API associates information, such // as salience and mentions, with entities. message Entity { - // The type of the entity. + // The type of the entity. For most entity types, the associated metadata is a + // Wikipedia URL (`wikipedia_url`) and Knowledge Graph MID (`mid`). The table + // below lists the associated fields for entities that have different + // metadata. enum Type { // Unknown UNKNOWN = 0; @@ -165,14 +181,63 @@ message Entity { // Event EVENT = 4; - // Work of art + // Artwork WORK_OF_ART = 5; - // Consumer goods + // Consumer product CONSUMER_GOOD = 6; - // Other types + // Other types of entities OTHER = 7; + + // Phone number + // + // The metadata lists the phone number, formatted according to local + // convention, plus whichever additional elements appear in the text: + // + // * `number` - the actual number, broken down into sections as per local + // convention + // * `national_prefix` - country code, if detected + // * `area_code` - region or area code, if detected + // * `extension` - phone extension (to be dialed after connection), if + // detected + PHONE_NUMBER = 9; + + // Address + // + // The metadata identifies the street number and locality plus whichever + // additional elements appear in the text: + // + // * `street_number` - street number + // * `locality` - city or town + // * `street_name` - street/route name, if detected + // * `postal_code` - postal code, if detected + // * `country` - country, if detected< + // * `broad_region` - administrative area, such as the state, if detected + // * `narrow_region` - smaller administrative area, such as county, if + // detected + // * `sublocality` - used in Asian addresses to demark a district within a + // city, if detected + ADDRESS = 10; + + // Date + // + // The metadata identifies the components of the date: + // + // * `year` - four digit year, if detected + // * `month` - two digit month number, if detected + // * `day` - two digit day number, if detected + DATE = 11; + + // Number + // + // The metadata is the number itself. + NUMBER = 12; + + // Price + // + // The metadata identifies the `value` and `currency`. + PRICE = 13; } // The representative name for the entity. @@ -183,8 +248,9 @@ message Entity { // Metadata associated with the entity. // - // Currently, Wikipedia URLs and Knowledge Graph MIDs are provided, if - // available. The associated keys are "wikipedia_url" and "mid", respectively. + // For most entity types, the metadata is a Wikipedia URL (`wikipedia_url`) + // and Knowledge Graph MID (`mid`), if they are available. For the metadata + // associated with other entity types, see the Type table below. map metadata = 3; // The salience score associated with the entity in the [0, 1.0] range. @@ -200,12 +266,38 @@ message Entity { repeated EntityMention mentions = 5; // For calls to [AnalyzeEntitySentiment][] or if - // [AnnotateTextRequest.Features.extract_entity_sentiment][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_entity_sentiment] - // is set to true, this field will contain the aggregate sentiment expressed - // for this entity in the provided document. + // [AnnotateTextRequest.Features.extract_entity_sentiment][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_entity_sentiment] is set to + // true, this field will contain the aggregate sentiment expressed for this + // entity in the provided document. Sentiment sentiment = 6; } +// Represents the text encoding that the caller uses to process the output. +// Providing an `EncodingType` is recommended because the API provides the +// beginning offsets for various outputs, such as tokens and mentions, and +// languages that natively use different text encodings may access offsets +// differently. +enum EncodingType { + // If `EncodingType` is not specified, encoding-dependent information (such as + // `begin_offset`) will be set at `-1`. + NONE = 0; + + // Encoding-dependent information (such as `begin_offset`) is calculated based + // on the UTF-8 encoding of the input. C++ and Go are examples of languages + // that use this encoding natively. + UTF8 = 1; + + // Encoding-dependent information (such as `begin_offset`) is calculated based + // on the UTF-16 encoding of the input. Java and JavaScript are examples of + // languages that use this encoding natively. + UTF16 = 2; + + // Encoding-dependent information (such as `begin_offset`) is calculated based + // on the UTF-32 encoding of the input. Python is an example of a language + // that uses this encoding natively. + UTF32 = 3; +} + // Represents the smallest syntactic building block of the text. message Token { // The token text. @@ -223,6 +315,7 @@ message Token { // Represents the feeling associated with the entire text or entities in // the text. +// Next ID: 6 message Sentiment { // A non-negative number in the [0, +inf) range, which represents // the absolute magnitude of sentiment regardless of score (positive or @@ -849,9 +942,9 @@ message EntityMention { Type type = 2; // For calls to [AnalyzeEntitySentiment][] or if - // [AnnotateTextRequest.Features.extract_entity_sentiment][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_entity_sentiment] - // is set to true, this field will contain the sentiment expressed for this - // mention of the entity in the provided document. + // [AnnotateTextRequest.Features.extract_entity_sentiment][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_entity_sentiment] is set to + // true, this field will contain the sentiment expressed for this mention of + // the entity in the provided document. Sentiment sentiment = 3; } @@ -861,15 +954,14 @@ message TextSpan { string content = 1; // The API calculates the beginning offset of the content in the original - // document according to the - // [EncodingType][google.cloud.language.v1beta2.EncodingType] specified in the - // API request. + // document according to the [EncodingType][google.cloud.language.v1beta2.EncodingType] specified in the API request. int32 begin_offset = 2; } // Represents a category returned from the text classifier. message ClassificationCategory { - // The name of the category representing the document. + // The name of the category representing the document, from the [predefined + // taxonomy](/natural-language/docs/categories). string name = 1; // The classifier's confidence of the category. Number represents how certain @@ -879,8 +971,8 @@ message ClassificationCategory { // The sentiment analysis request message. message AnalyzeSentimentRequest { - // Input document. - Document document = 1; + // Required. Input document. + Document document = 1 [(google.api.field_behavior) = REQUIRED]; // The encoding type used by the API to calculate sentence offsets for the // sentence sentiment. @@ -894,8 +986,7 @@ message AnalyzeSentimentResponse { // The language of the text, which will be the same as the language specified // in the request or, if not specified, the automatically-detected language. - // See [Document.language][google.cloud.language.v1beta2.Document.language] - // field for more details. + // See [Document.language][google.cloud.language.v1beta2.Document.language] field for more details. string language = 2; // The sentiment for all the sentences in the document. @@ -904,8 +995,8 @@ message AnalyzeSentimentResponse { // The entity-level sentiment analysis request message. message AnalyzeEntitySentimentRequest { - // Input document. - Document document = 1; + // Required. Input document. + Document document = 1 [(google.api.field_behavior) = REQUIRED]; // The encoding type used by the API to calculate offsets. EncodingType encoding_type = 2; @@ -918,15 +1009,14 @@ message AnalyzeEntitySentimentResponse { // The language of the text, which will be the same as the language specified // in the request or, if not specified, the automatically-detected language. - // See [Document.language][google.cloud.language.v1beta2.Document.language] - // field for more details. + // See [Document.language][google.cloud.language.v1beta2.Document.language] field for more details. string language = 2; } // The entity analysis request message. message AnalyzeEntitiesRequest { - // Input document. - Document document = 1; + // Required. Input document. + Document document = 1 [(google.api.field_behavior) = REQUIRED]; // The encoding type used by the API to calculate offsets. EncodingType encoding_type = 2; @@ -939,15 +1029,14 @@ message AnalyzeEntitiesResponse { // The language of the text, which will be the same as the language specified // in the request or, if not specified, the automatically-detected language. - // See [Document.language][google.cloud.language.v1beta2.Document.language] - // field for more details. + // See [Document.language][google.cloud.language.v1beta2.Document.language] field for more details. string language = 2; } // The syntax analysis request message. message AnalyzeSyntaxRequest { - // Input document. - Document document = 1; + // Required. Input document. + Document document = 1 [(google.api.field_behavior) = REQUIRED]; // The encoding type used by the API to calculate offsets. EncodingType encoding_type = 2; @@ -963,15 +1052,14 @@ message AnalyzeSyntaxResponse { // The language of the text, which will be the same as the language specified // in the request or, if not specified, the automatically-detected language. - // See [Document.language][google.cloud.language.v1beta2.Document.language] - // field for more details. + // See [Document.language][google.cloud.language.v1beta2.Document.language] field for more details. string language = 3; } // The document classification request message. message ClassifyTextRequest { - // Input document. - Document document = 1; + // Required. Input document. + Document document = 1 [(google.api.field_behavior) = REQUIRED]; } // The document classification response message. @@ -985,6 +1073,7 @@ message ClassifyTextResponse { message AnnotateTextRequest { // All available features for sentiment, syntax, and semantic analysis. // Setting each one to true will enable that specific analysis for the input. + // Next ID: 10 message Features { // Extract syntax information. bool extract_syntax = 1; @@ -998,15 +1087,17 @@ message AnnotateTextRequest { // Extract entities and their associated sentiment. bool extract_entity_sentiment = 4; - // Classify the full document into categories. + // Classify the full document into categories. If this is true, + // the API will use the default model which classifies into a + // [predefined taxonomy](/natural-language/docs/categories). bool classify_text = 6; } - // Input document. - Document document = 1; + // Required. Input document. + Document document = 1 [(google.api.field_behavior) = REQUIRED]; - // The enabled features. - Features features = 2; + // Required. The enabled features. + Features features = 2 [(google.api.field_behavior) = REQUIRED]; // The encoding type used by the API to calculate offsets. EncodingType encoding_type = 3; @@ -1034,36 +1125,9 @@ message AnnotateTextResponse { // The language of the text, which will be the same as the language specified // in the request or, if not specified, the automatically-detected language. - // See [Document.language][google.cloud.language.v1beta2.Document.language] - // field for more details. + // See [Document.language][google.cloud.language.v1beta2.Document.language] field for more details. string language = 5; // Categories identified in the input document. repeated ClassificationCategory categories = 6; } - -// Represents the text encoding that the caller uses to process the output. -// Providing an `EncodingType` is recommended because the API provides the -// beginning offsets for various outputs, such as tokens and mentions, and -// languages that natively use different text encodings may access offsets -// differently. -enum EncodingType { - // If `EncodingType` is not specified, encoding-dependent information (such as - // `begin_offset`) will be set at `-1`. - NONE = 0; - - // Encoding-dependent information (such as `begin_offset`) is calculated based - // on the UTF-8 encoding of the input. C++ and Go are examples of languages - // that use this encoding natively. - UTF8 = 1; - - // Encoding-dependent information (such as `begin_offset`) is calculated based - // on the UTF-16 encoding of the input. Java and Javascript are examples of - // languages that use this encoding natively. - UTF16 = 2; - - // Encoding-dependent information (such as `begin_offset`) is calculated based - // on the UTF-32 encoding of the input. Python is an example of a language - // that uses this encoding natively. - UTF32 = 3; -} diff --git a/google/cloud/language_v1beta2/proto/language_service_pb2.py b/google/cloud/language_v1beta2/proto/language_service_pb2.py index 6e2ce20c..8c9068df 100644 --- a/google/cloud/language_v1beta2/proto/language_service_pb2.py +++ b/google/cloud/language_v1beta2/proto/language_service_pb2.py @@ -17,11 +17,9 @@ from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) +from google.api import client_pb2 as google_dot_api_dot_client__pb2 +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 DESCRIPTOR = _descriptor.FileDescriptor( @@ -32,13 +30,13 @@ "\n!com.google.cloud.language.v1beta2B\024LanguageServiceProtoP\001ZEgoogle.golang.org/genproto/googleapis/cloud/language/v1beta2;language" ), serialized_pb=_b( - '\n:google/cloud/language_v1beta2/proto/language_service.proto\x12\x1dgoogle.cloud.language.v1beta2\x1a\x1cgoogle/api/annotations.proto\x1a#google/longrunning/operations.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\xc8\x01\n\x08\x44ocument\x12:\n\x04type\x18\x01 \x01(\x0e\x32,.google.cloud.language.v1beta2.Document.Type\x12\x11\n\x07\x63ontent\x18\x02 \x01(\tH\x00\x12\x19\n\x0fgcs_content_uri\x18\x03 \x01(\tH\x00\x12\x10\n\x08language\x18\x04 \x01(\t"6\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nPLAIN_TEXT\x10\x01\x12\x08\n\x04HTML\x10\x02\x42\x08\n\x06source"~\n\x08Sentence\x12\x35\n\x04text\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.TextSpan\x12;\n\tsentiment\x18\x02 \x01(\x0b\x32(.google.cloud.language.v1beta2.Sentiment"\xd2\x03\n\x06\x45ntity\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x38\n\x04type\x18\x02 \x01(\x0e\x32*.google.cloud.language.v1beta2.Entity.Type\x12\x45\n\x08metadata\x18\x03 \x03(\x0b\x32\x33.google.cloud.language.v1beta2.Entity.MetadataEntry\x12\x10\n\x08salience\x18\x04 \x01(\x02\x12>\n\x08mentions\x18\x05 \x03(\x0b\x32,.google.cloud.language.v1beta2.EntityMention\x12;\n\tsentiment\x18\x06 \x01(\x0b\x32(.google.cloud.language.v1beta2.Sentiment\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"y\n\x04Type\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06PERSON\x10\x01\x12\x0c\n\x08LOCATION\x10\x02\x12\x10\n\x0cORGANIZATION\x10\x03\x12\t\n\x05\x45VENT\x10\x04\x12\x0f\n\x0bWORK_OF_ART\x10\x05\x12\x11\n\rCONSUMER_GOOD\x10\x06\x12\t\n\x05OTHER\x10\x07"\xda\x01\n\x05Token\x12\x35\n\x04text\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.TextSpan\x12\x43\n\x0epart_of_speech\x18\x02 \x01(\x0b\x32+.google.cloud.language.v1beta2.PartOfSpeech\x12\x46\n\x0f\x64\x65pendency_edge\x18\x03 \x01(\x0b\x32-.google.cloud.language.v1beta2.DependencyEdge\x12\r\n\x05lemma\x18\x04 \x01(\t"-\n\tSentiment\x12\x11\n\tmagnitude\x18\x02 \x01(\x02\x12\r\n\x05score\x18\x03 \x01(\x02"\xdf\x10\n\x0cPartOfSpeech\x12<\n\x03tag\x18\x01 \x01(\x0e\x32/.google.cloud.language.v1beta2.PartOfSpeech.Tag\x12\x42\n\x06\x61spect\x18\x02 \x01(\x0e\x32\x32.google.cloud.language.v1beta2.PartOfSpeech.Aspect\x12>\n\x04\x63\x61se\x18\x03 \x01(\x0e\x32\x30.google.cloud.language.v1beta2.PartOfSpeech.Case\x12>\n\x04\x66orm\x18\x04 \x01(\x0e\x32\x30.google.cloud.language.v1beta2.PartOfSpeech.Form\x12\x42\n\x06gender\x18\x05 \x01(\x0e\x32\x32.google.cloud.language.v1beta2.PartOfSpeech.Gender\x12>\n\x04mood\x18\x06 \x01(\x0e\x32\x30.google.cloud.language.v1beta2.PartOfSpeech.Mood\x12\x42\n\x06number\x18\x07 \x01(\x0e\x32\x32.google.cloud.language.v1beta2.PartOfSpeech.Number\x12\x42\n\x06person\x18\x08 \x01(\x0e\x32\x32.google.cloud.language.v1beta2.PartOfSpeech.Person\x12\x42\n\x06proper\x18\t \x01(\x0e\x32\x32.google.cloud.language.v1beta2.PartOfSpeech.Proper\x12L\n\x0breciprocity\x18\n \x01(\x0e\x32\x37.google.cloud.language.v1beta2.PartOfSpeech.Reciprocity\x12@\n\x05tense\x18\x0b \x01(\x0e\x32\x31.google.cloud.language.v1beta2.PartOfSpeech.Tense\x12@\n\x05voice\x18\x0c \x01(\x0e\x32\x31.google.cloud.language.v1beta2.PartOfSpeech.Voice"\x8d\x01\n\x03Tag\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x07\n\x03\x41\x44J\x10\x01\x12\x07\n\x03\x41\x44P\x10\x02\x12\x07\n\x03\x41\x44V\x10\x03\x12\x08\n\x04\x43ONJ\x10\x04\x12\x07\n\x03\x44\x45T\x10\x05\x12\x08\n\x04NOUN\x10\x06\x12\x07\n\x03NUM\x10\x07\x12\x08\n\x04PRON\x10\x08\x12\x07\n\x03PRT\x10\t\x12\t\n\x05PUNCT\x10\n\x12\x08\n\x04VERB\x10\x0b\x12\x05\n\x01X\x10\x0c\x12\t\n\x05\x41\x46\x46IX\x10\r"O\n\x06\x41spect\x12\x12\n\x0e\x41SPECT_UNKNOWN\x10\x00\x12\x0e\n\nPERFECTIVE\x10\x01\x12\x10\n\x0cIMPERFECTIVE\x10\x02\x12\x0f\n\x0bPROGRESSIVE\x10\x03"\xf8\x01\n\x04\x43\x61se\x12\x10\n\x0c\x43\x41SE_UNKNOWN\x10\x00\x12\x0e\n\nACCUSATIVE\x10\x01\x12\r\n\tADVERBIAL\x10\x02\x12\x11\n\rCOMPLEMENTIVE\x10\x03\x12\n\n\x06\x44\x41TIVE\x10\x04\x12\x0c\n\x08GENITIVE\x10\x05\x12\x10\n\x0cINSTRUMENTAL\x10\x06\x12\x0c\n\x08LOCATIVE\x10\x07\x12\x0e\n\nNOMINATIVE\x10\x08\x12\x0b\n\x07OBLIQUE\x10\t\x12\r\n\tPARTITIVE\x10\n\x12\x11\n\rPREPOSITIONAL\x10\x0b\x12\x12\n\x0eREFLEXIVE_CASE\x10\x0c\x12\x11\n\rRELATIVE_CASE\x10\r\x12\x0c\n\x08VOCATIVE\x10\x0e"\xaf\x01\n\x04\x46orm\x12\x10\n\x0c\x46ORM_UNKNOWN\x10\x00\x12\x0c\n\x08\x41\x44NOMIAL\x10\x01\x12\r\n\tAUXILIARY\x10\x02\x12\x12\n\x0e\x43OMPLEMENTIZER\x10\x03\x12\x10\n\x0c\x46INAL_ENDING\x10\x04\x12\n\n\x06GERUND\x10\x05\x12\n\n\x06REALIS\x10\x06\x12\x0c\n\x08IRREALIS\x10\x07\x12\t\n\x05SHORT\x10\x08\x12\x08\n\x04LONG\x10\t\x12\t\n\x05ORDER\x10\n\x12\x0c\n\x08SPECIFIC\x10\x0b"E\n\x06Gender\x12\x12\n\x0eGENDER_UNKNOWN\x10\x00\x12\x0c\n\x08\x46\x45MININE\x10\x01\x12\r\n\tMASCULINE\x10\x02\x12\n\n\x06NEUTER\x10\x03"\x7f\n\x04Mood\x12\x10\n\x0cMOOD_UNKNOWN\x10\x00\x12\x14\n\x10\x43ONDITIONAL_MOOD\x10\x01\x12\x0e\n\nIMPERATIVE\x10\x02\x12\x0e\n\nINDICATIVE\x10\x03\x12\x11\n\rINTERROGATIVE\x10\x04\x12\x0b\n\x07JUSSIVE\x10\x05\x12\x0f\n\x0bSUBJUNCTIVE\x10\x06"@\n\x06Number\x12\x12\n\x0eNUMBER_UNKNOWN\x10\x00\x12\x0c\n\x08SINGULAR\x10\x01\x12\n\n\x06PLURAL\x10\x02\x12\x08\n\x04\x44UAL\x10\x03"T\n\x06Person\x12\x12\n\x0ePERSON_UNKNOWN\x10\x00\x12\t\n\x05\x46IRST\x10\x01\x12\n\n\x06SECOND\x10\x02\x12\t\n\x05THIRD\x10\x03\x12\x14\n\x10REFLEXIVE_PERSON\x10\x04"8\n\x06Proper\x12\x12\n\x0ePROPER_UNKNOWN\x10\x00\x12\n\n\x06PROPER\x10\x01\x12\x0e\n\nNOT_PROPER\x10\x02"J\n\x0bReciprocity\x12\x17\n\x13RECIPROCITY_UNKNOWN\x10\x00\x12\x0e\n\nRECIPROCAL\x10\x01\x12\x12\n\x0eNON_RECIPROCAL\x10\x02"s\n\x05Tense\x12\x11\n\rTENSE_UNKNOWN\x10\x00\x12\x15\n\x11\x43ONDITIONAL_TENSE\x10\x01\x12\n\n\x06\x46UTURE\x10\x02\x12\x08\n\x04PAST\x10\x03\x12\x0b\n\x07PRESENT\x10\x04\x12\r\n\tIMPERFECT\x10\x05\x12\x0e\n\nPLUPERFECT\x10\x06"B\n\x05Voice\x12\x11\n\rVOICE_UNKNOWN\x10\x00\x12\n\n\x06\x41\x43TIVE\x10\x01\x12\r\n\tCAUSATIVE\x10\x02\x12\x0b\n\x07PASSIVE\x10\x03"\x9a\x08\n\x0e\x44\x65pendencyEdge\x12\x18\n\x10head_token_index\x18\x01 \x01(\x05\x12\x42\n\x05label\x18\x02 \x01(\x0e\x32\x33.google.cloud.language.v1beta2.DependencyEdge.Label"\xa9\x07\n\x05Label\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06\x41\x42\x42REV\x10\x01\x12\t\n\x05\x41\x43OMP\x10\x02\x12\t\n\x05\x41\x44VCL\x10\x03\x12\n\n\x06\x41\x44VMOD\x10\x04\x12\x08\n\x04\x41MOD\x10\x05\x12\t\n\x05\x41PPOS\x10\x06\x12\x08\n\x04\x41TTR\x10\x07\x12\x07\n\x03\x41UX\x10\x08\x12\x0b\n\x07\x41UXPASS\x10\t\x12\x06\n\x02\x43\x43\x10\n\x12\t\n\x05\x43\x43OMP\x10\x0b\x12\x08\n\x04\x43ONJ\x10\x0c\x12\t\n\x05\x43SUBJ\x10\r\x12\r\n\tCSUBJPASS\x10\x0e\x12\x07\n\x03\x44\x45P\x10\x0f\x12\x07\n\x03\x44\x45T\x10\x10\x12\r\n\tDISCOURSE\x10\x11\x12\x08\n\x04\x44OBJ\x10\x12\x12\x08\n\x04\x45XPL\x10\x13\x12\x0c\n\x08GOESWITH\x10\x14\x12\x08\n\x04IOBJ\x10\x15\x12\x08\n\x04MARK\x10\x16\x12\x07\n\x03MWE\x10\x17\x12\x07\n\x03MWV\x10\x18\x12\x07\n\x03NEG\x10\x19\x12\x06\n\x02NN\x10\x1a\x12\x0c\n\x08NPADVMOD\x10\x1b\x12\t\n\x05NSUBJ\x10\x1c\x12\r\n\tNSUBJPASS\x10\x1d\x12\x07\n\x03NUM\x10\x1e\x12\n\n\x06NUMBER\x10\x1f\x12\x05\n\x01P\x10 \x12\r\n\tPARATAXIS\x10!\x12\x0b\n\x07PARTMOD\x10"\x12\t\n\x05PCOMP\x10#\x12\x08\n\x04POBJ\x10$\x12\x08\n\x04POSS\x10%\x12\x0b\n\x07POSTNEG\x10&\x12\x0b\n\x07PRECOMP\x10\'\x12\x0b\n\x07PRECONJ\x10(\x12\n\n\x06PREDET\x10)\x12\x08\n\x04PREF\x10*\x12\x08\n\x04PREP\x10+\x12\t\n\x05PRONL\x10,\x12\x07\n\x03PRT\x10-\x12\x06\n\x02PS\x10.\x12\x0c\n\x08QUANTMOD\x10/\x12\t\n\x05RCMOD\x10\x30\x12\x0c\n\x08RCMODREL\x10\x31\x12\t\n\x05RDROP\x10\x32\x12\x07\n\x03REF\x10\x33\x12\x0b\n\x07REMNANT\x10\x34\x12\x0e\n\nREPARANDUM\x10\x35\x12\x08\n\x04ROOT\x10\x36\x12\x08\n\x04SNUM\x10\x37\x12\x08\n\x04SUFF\x10\x38\x12\x08\n\x04TMOD\x10\x39\x12\t\n\x05TOPIC\x10:\x12\x08\n\x04VMOD\x10;\x12\x0c\n\x08VOCATIVE\x10<\x12\t\n\x05XCOMP\x10=\x12\n\n\x06SUFFIX\x10>\x12\t\n\x05TITLE\x10?\x12\x0c\n\x08\x41\x44VPHMOD\x10@\x12\x0b\n\x07\x41UXCAUS\x10\x41\x12\t\n\x05\x41UXVV\x10\x42\x12\t\n\x05\x44TMOD\x10\x43\x12\x0b\n\x07\x46OREIGN\x10\x44\x12\x06\n\x02KW\x10\x45\x12\x08\n\x04LIST\x10\x46\x12\x08\n\x04NOMC\x10G\x12\x0c\n\x08NOMCSUBJ\x10H\x12\x10\n\x0cNOMCSUBJPASS\x10I\x12\x08\n\x04NUMC\x10J\x12\x07\n\x03\x43OP\x10K\x12\x0e\n\nDISLOCATED\x10L\x12\x07\n\x03\x41SP\x10M\x12\x08\n\x04GMOD\x10N\x12\x08\n\x04GOBJ\x10O\x12\n\n\x06INFMOD\x10P\x12\x07\n\x03MES\x10Q\x12\t\n\x05NCOMP\x10R"\xf6\x01\n\rEntityMention\x12\x35\n\x04text\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.TextSpan\x12?\n\x04type\x18\x02 \x01(\x0e\x32\x31.google.cloud.language.v1beta2.EntityMention.Type\x12;\n\tsentiment\x18\x03 \x01(\x0b\x32(.google.cloud.language.v1beta2.Sentiment"0\n\x04Type\x12\x10\n\x0cTYPE_UNKNOWN\x10\x00\x12\n\n\x06PROPER\x10\x01\x12\n\n\x06\x43OMMON\x10\x02"1\n\x08TextSpan\x12\x0f\n\x07\x63ontent\x18\x01 \x01(\t\x12\x14\n\x0c\x62\x65gin_offset\x18\x02 \x01(\x05":\n\x16\x43lassificationCategory\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02"\x98\x01\n\x17\x41nalyzeSentimentRequest\x12\x39\n\x08\x64ocument\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.Document\x12\x42\n\rencoding_type\x18\x02 \x01(\x0e\x32+.google.cloud.language.v1beta2.EncodingType"\xae\x01\n\x18\x41nalyzeSentimentResponse\x12\x44\n\x12\x64ocument_sentiment\x18\x01 \x01(\x0b\x32(.google.cloud.language.v1beta2.Sentiment\x12\x10\n\x08language\x18\x02 \x01(\t\x12:\n\tsentences\x18\x03 \x03(\x0b\x32\'.google.cloud.language.v1beta2.Sentence"\x9e\x01\n\x1d\x41nalyzeEntitySentimentRequest\x12\x39\n\x08\x64ocument\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.Document\x12\x42\n\rencoding_type\x18\x02 \x01(\x0e\x32+.google.cloud.language.v1beta2.EncodingType"k\n\x1e\x41nalyzeEntitySentimentResponse\x12\x37\n\x08\x65ntities\x18\x01 \x03(\x0b\x32%.google.cloud.language.v1beta2.Entity\x12\x10\n\x08language\x18\x02 \x01(\t"\x97\x01\n\x16\x41nalyzeEntitiesRequest\x12\x39\n\x08\x64ocument\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.Document\x12\x42\n\rencoding_type\x18\x02 \x01(\x0e\x32+.google.cloud.language.v1beta2.EncodingType"d\n\x17\x41nalyzeEntitiesResponse\x12\x37\n\x08\x65ntities\x18\x01 \x03(\x0b\x32%.google.cloud.language.v1beta2.Entity\x12\x10\n\x08language\x18\x02 \x01(\t"\x95\x01\n\x14\x41nalyzeSyntaxRequest\x12\x39\n\x08\x64ocument\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.Document\x12\x42\n\rencoding_type\x18\x02 \x01(\x0e\x32+.google.cloud.language.v1beta2.EncodingType"\x9b\x01\n\x15\x41nalyzeSyntaxResponse\x12:\n\tsentences\x18\x01 \x03(\x0b\x32\'.google.cloud.language.v1beta2.Sentence\x12\x34\n\x06tokens\x18\x02 \x03(\x0b\x32$.google.cloud.language.v1beta2.Token\x12\x10\n\x08language\x18\x03 \x01(\t"P\n\x13\x43lassifyTextRequest\x12\x39\n\x08\x64ocument\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.Document"a\n\x14\x43lassifyTextResponse\x12I\n\ncategories\x18\x01 \x03(\x0b\x32\x35.google.cloud.language.v1beta2.ClassificationCategory"\xff\x02\n\x13\x41nnotateTextRequest\x12\x39\n\x08\x64ocument\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.Document\x12M\n\x08\x66\x65\x61tures\x18\x02 \x01(\x0b\x32;.google.cloud.language.v1beta2.AnnotateTextRequest.Features\x12\x42\n\rencoding_type\x18\x03 \x01(\x0e\x32+.google.cloud.language.v1beta2.EncodingType\x1a\x99\x01\n\x08\x46\x65\x61tures\x12\x16\n\x0e\x65xtract_syntax\x18\x01 \x01(\x08\x12\x18\n\x10\x65xtract_entities\x18\x02 \x01(\x08\x12"\n\x1a\x65xtract_document_sentiment\x18\x03 \x01(\x08\x12 \n\x18\x65xtract_entity_sentiment\x18\x04 \x01(\x08\x12\x15\n\rclassify_text\x18\x06 \x01(\x08"\xe4\x02\n\x14\x41nnotateTextResponse\x12:\n\tsentences\x18\x01 \x03(\x0b\x32\'.google.cloud.language.v1beta2.Sentence\x12\x34\n\x06tokens\x18\x02 \x03(\x0b\x32$.google.cloud.language.v1beta2.Token\x12\x37\n\x08\x65ntities\x18\x03 \x03(\x0b\x32%.google.cloud.language.v1beta2.Entity\x12\x44\n\x12\x64ocument_sentiment\x18\x04 \x01(\x0b\x32(.google.cloud.language.v1beta2.Sentiment\x12\x10\n\x08language\x18\x05 \x01(\t\x12I\n\ncategories\x18\x06 \x03(\x0b\x32\x35.google.cloud.language.v1beta2.ClassificationCategory*8\n\x0c\x45ncodingType\x12\x08\n\x04NONE\x10\x00\x12\x08\n\x04UTF8\x10\x01\x12\t\n\x05UTF16\x10\x02\x12\t\n\x05UTF32\x10\x03\x32\xbd\x08\n\x0fLanguageService\x12\xb3\x01\n\x10\x41nalyzeSentiment\x12\x36.google.cloud.language.v1beta2.AnalyzeSentimentRequest\x1a\x37.google.cloud.language.v1beta2.AnalyzeSentimentResponse".\x82\xd3\xe4\x93\x02("#/v1beta2/documents:analyzeSentiment:\x01*\x12\xaf\x01\n\x0f\x41nalyzeEntities\x12\x35.google.cloud.language.v1beta2.AnalyzeEntitiesRequest\x1a\x36.google.cloud.language.v1beta2.AnalyzeEntitiesResponse"-\x82\xd3\xe4\x93\x02\'""/v1beta2/documents:analyzeEntities:\x01*\x12\xcb\x01\n\x16\x41nalyzeEntitySentiment\x12<.google.cloud.language.v1beta2.AnalyzeEntitySentimentRequest\x1a=.google.cloud.language.v1beta2.AnalyzeEntitySentimentResponse"4\x82\xd3\xe4\x93\x02.")/v1beta2/documents:analyzeEntitySentiment:\x01*\x12\xa7\x01\n\rAnalyzeSyntax\x12\x33.google.cloud.language.v1beta2.AnalyzeSyntaxRequest\x1a\x34.google.cloud.language.v1beta2.AnalyzeSyntaxResponse"+\x82\xd3\xe4\x93\x02%" /v1beta2/documents:analyzeSyntax:\x01*\x12\xa3\x01\n\x0c\x43lassifyText\x12\x32.google.cloud.language.v1beta2.ClassifyTextRequest\x1a\x33.google.cloud.language.v1beta2.ClassifyTextResponse"*\x82\xd3\xe4\x93\x02$"\x1f/v1beta2/documents:classifyText:\x01*\x12\xa3\x01\n\x0c\x41nnotateText\x12\x32.google.cloud.language.v1beta2.AnnotateTextRequest\x1a\x33.google.cloud.language.v1beta2.AnnotateTextResponse"*\x82\xd3\xe4\x93\x02$"\x1f/v1beta2/documents:annotateText:\x01*B\x82\x01\n!com.google.cloud.language.v1beta2B\x14LanguageServiceProtoP\x01ZEgoogle.golang.org/genproto/googleapis/cloud/language/v1beta2;languageb\x06proto3' + '\n:google/cloud/language_v1beta2/proto/language_service.proto\x12\x1dgoogle.cloud.language.v1beta2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xc8\x01\n\x08\x44ocument\x12:\n\x04type\x18\x01 \x01(\x0e\x32,.google.cloud.language.v1beta2.Document.Type\x12\x11\n\x07\x63ontent\x18\x02 \x01(\tH\x00\x12\x19\n\x0fgcs_content_uri\x18\x03 \x01(\tH\x00\x12\x10\n\x08language\x18\x04 \x01(\t"6\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nPLAIN_TEXT\x10\x01\x12\x08\n\x04HTML\x10\x02\x42\x08\n\x06source"~\n\x08Sentence\x12\x35\n\x04text\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.TextSpan\x12;\n\tsentiment\x18\x02 \x01(\x0b\x32(.google.cloud.language.v1beta2.Sentiment"\x93\x04\n\x06\x45ntity\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x38\n\x04type\x18\x02 \x01(\x0e\x32*.google.cloud.language.v1beta2.Entity.Type\x12\x45\n\x08metadata\x18\x03 \x03(\x0b\x32\x33.google.cloud.language.v1beta2.Entity.MetadataEntry\x12\x10\n\x08salience\x18\x04 \x01(\x02\x12>\n\x08mentions\x18\x05 \x03(\x0b\x32,.google.cloud.language.v1beta2.EntityMention\x12;\n\tsentiment\x18\x06 \x01(\x0b\x32(.google.cloud.language.v1beta2.Sentiment\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\xb9\x01\n\x04Type\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06PERSON\x10\x01\x12\x0c\n\x08LOCATION\x10\x02\x12\x10\n\x0cORGANIZATION\x10\x03\x12\t\n\x05\x45VENT\x10\x04\x12\x0f\n\x0bWORK_OF_ART\x10\x05\x12\x11\n\rCONSUMER_GOOD\x10\x06\x12\t\n\x05OTHER\x10\x07\x12\x10\n\x0cPHONE_NUMBER\x10\t\x12\x0b\n\x07\x41\x44\x44RESS\x10\n\x12\x08\n\x04\x44\x41TE\x10\x0b\x12\n\n\x06NUMBER\x10\x0c\x12\t\n\x05PRICE\x10\r"\xda\x01\n\x05Token\x12\x35\n\x04text\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.TextSpan\x12\x43\n\x0epart_of_speech\x18\x02 \x01(\x0b\x32+.google.cloud.language.v1beta2.PartOfSpeech\x12\x46\n\x0f\x64\x65pendency_edge\x18\x03 \x01(\x0b\x32-.google.cloud.language.v1beta2.DependencyEdge\x12\r\n\x05lemma\x18\x04 \x01(\t"-\n\tSentiment\x12\x11\n\tmagnitude\x18\x02 \x01(\x02\x12\r\n\x05score\x18\x03 \x01(\x02"\xdf\x10\n\x0cPartOfSpeech\x12<\n\x03tag\x18\x01 \x01(\x0e\x32/.google.cloud.language.v1beta2.PartOfSpeech.Tag\x12\x42\n\x06\x61spect\x18\x02 \x01(\x0e\x32\x32.google.cloud.language.v1beta2.PartOfSpeech.Aspect\x12>\n\x04\x63\x61se\x18\x03 \x01(\x0e\x32\x30.google.cloud.language.v1beta2.PartOfSpeech.Case\x12>\n\x04\x66orm\x18\x04 \x01(\x0e\x32\x30.google.cloud.language.v1beta2.PartOfSpeech.Form\x12\x42\n\x06gender\x18\x05 \x01(\x0e\x32\x32.google.cloud.language.v1beta2.PartOfSpeech.Gender\x12>\n\x04mood\x18\x06 \x01(\x0e\x32\x30.google.cloud.language.v1beta2.PartOfSpeech.Mood\x12\x42\n\x06number\x18\x07 \x01(\x0e\x32\x32.google.cloud.language.v1beta2.PartOfSpeech.Number\x12\x42\n\x06person\x18\x08 \x01(\x0e\x32\x32.google.cloud.language.v1beta2.PartOfSpeech.Person\x12\x42\n\x06proper\x18\t \x01(\x0e\x32\x32.google.cloud.language.v1beta2.PartOfSpeech.Proper\x12L\n\x0breciprocity\x18\n \x01(\x0e\x32\x37.google.cloud.language.v1beta2.PartOfSpeech.Reciprocity\x12@\n\x05tense\x18\x0b \x01(\x0e\x32\x31.google.cloud.language.v1beta2.PartOfSpeech.Tense\x12@\n\x05voice\x18\x0c \x01(\x0e\x32\x31.google.cloud.language.v1beta2.PartOfSpeech.Voice"\x8d\x01\n\x03Tag\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x07\n\x03\x41\x44J\x10\x01\x12\x07\n\x03\x41\x44P\x10\x02\x12\x07\n\x03\x41\x44V\x10\x03\x12\x08\n\x04\x43ONJ\x10\x04\x12\x07\n\x03\x44\x45T\x10\x05\x12\x08\n\x04NOUN\x10\x06\x12\x07\n\x03NUM\x10\x07\x12\x08\n\x04PRON\x10\x08\x12\x07\n\x03PRT\x10\t\x12\t\n\x05PUNCT\x10\n\x12\x08\n\x04VERB\x10\x0b\x12\x05\n\x01X\x10\x0c\x12\t\n\x05\x41\x46\x46IX\x10\r"O\n\x06\x41spect\x12\x12\n\x0e\x41SPECT_UNKNOWN\x10\x00\x12\x0e\n\nPERFECTIVE\x10\x01\x12\x10\n\x0cIMPERFECTIVE\x10\x02\x12\x0f\n\x0bPROGRESSIVE\x10\x03"\xf8\x01\n\x04\x43\x61se\x12\x10\n\x0c\x43\x41SE_UNKNOWN\x10\x00\x12\x0e\n\nACCUSATIVE\x10\x01\x12\r\n\tADVERBIAL\x10\x02\x12\x11\n\rCOMPLEMENTIVE\x10\x03\x12\n\n\x06\x44\x41TIVE\x10\x04\x12\x0c\n\x08GENITIVE\x10\x05\x12\x10\n\x0cINSTRUMENTAL\x10\x06\x12\x0c\n\x08LOCATIVE\x10\x07\x12\x0e\n\nNOMINATIVE\x10\x08\x12\x0b\n\x07OBLIQUE\x10\t\x12\r\n\tPARTITIVE\x10\n\x12\x11\n\rPREPOSITIONAL\x10\x0b\x12\x12\n\x0eREFLEXIVE_CASE\x10\x0c\x12\x11\n\rRELATIVE_CASE\x10\r\x12\x0c\n\x08VOCATIVE\x10\x0e"\xaf\x01\n\x04\x46orm\x12\x10\n\x0c\x46ORM_UNKNOWN\x10\x00\x12\x0c\n\x08\x41\x44NOMIAL\x10\x01\x12\r\n\tAUXILIARY\x10\x02\x12\x12\n\x0e\x43OMPLEMENTIZER\x10\x03\x12\x10\n\x0c\x46INAL_ENDING\x10\x04\x12\n\n\x06GERUND\x10\x05\x12\n\n\x06REALIS\x10\x06\x12\x0c\n\x08IRREALIS\x10\x07\x12\t\n\x05SHORT\x10\x08\x12\x08\n\x04LONG\x10\t\x12\t\n\x05ORDER\x10\n\x12\x0c\n\x08SPECIFIC\x10\x0b"E\n\x06Gender\x12\x12\n\x0eGENDER_UNKNOWN\x10\x00\x12\x0c\n\x08\x46\x45MININE\x10\x01\x12\r\n\tMASCULINE\x10\x02\x12\n\n\x06NEUTER\x10\x03"\x7f\n\x04Mood\x12\x10\n\x0cMOOD_UNKNOWN\x10\x00\x12\x14\n\x10\x43ONDITIONAL_MOOD\x10\x01\x12\x0e\n\nIMPERATIVE\x10\x02\x12\x0e\n\nINDICATIVE\x10\x03\x12\x11\n\rINTERROGATIVE\x10\x04\x12\x0b\n\x07JUSSIVE\x10\x05\x12\x0f\n\x0bSUBJUNCTIVE\x10\x06"@\n\x06Number\x12\x12\n\x0eNUMBER_UNKNOWN\x10\x00\x12\x0c\n\x08SINGULAR\x10\x01\x12\n\n\x06PLURAL\x10\x02\x12\x08\n\x04\x44UAL\x10\x03"T\n\x06Person\x12\x12\n\x0ePERSON_UNKNOWN\x10\x00\x12\t\n\x05\x46IRST\x10\x01\x12\n\n\x06SECOND\x10\x02\x12\t\n\x05THIRD\x10\x03\x12\x14\n\x10REFLEXIVE_PERSON\x10\x04"8\n\x06Proper\x12\x12\n\x0ePROPER_UNKNOWN\x10\x00\x12\n\n\x06PROPER\x10\x01\x12\x0e\n\nNOT_PROPER\x10\x02"J\n\x0bReciprocity\x12\x17\n\x13RECIPROCITY_UNKNOWN\x10\x00\x12\x0e\n\nRECIPROCAL\x10\x01\x12\x12\n\x0eNON_RECIPROCAL\x10\x02"s\n\x05Tense\x12\x11\n\rTENSE_UNKNOWN\x10\x00\x12\x15\n\x11\x43ONDITIONAL_TENSE\x10\x01\x12\n\n\x06\x46UTURE\x10\x02\x12\x08\n\x04PAST\x10\x03\x12\x0b\n\x07PRESENT\x10\x04\x12\r\n\tIMPERFECT\x10\x05\x12\x0e\n\nPLUPERFECT\x10\x06"B\n\x05Voice\x12\x11\n\rVOICE_UNKNOWN\x10\x00\x12\n\n\x06\x41\x43TIVE\x10\x01\x12\r\n\tCAUSATIVE\x10\x02\x12\x0b\n\x07PASSIVE\x10\x03"\x9a\x08\n\x0e\x44\x65pendencyEdge\x12\x18\n\x10head_token_index\x18\x01 \x01(\x05\x12\x42\n\x05label\x18\x02 \x01(\x0e\x32\x33.google.cloud.language.v1beta2.DependencyEdge.Label"\xa9\x07\n\x05Label\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06\x41\x42\x42REV\x10\x01\x12\t\n\x05\x41\x43OMP\x10\x02\x12\t\n\x05\x41\x44VCL\x10\x03\x12\n\n\x06\x41\x44VMOD\x10\x04\x12\x08\n\x04\x41MOD\x10\x05\x12\t\n\x05\x41PPOS\x10\x06\x12\x08\n\x04\x41TTR\x10\x07\x12\x07\n\x03\x41UX\x10\x08\x12\x0b\n\x07\x41UXPASS\x10\t\x12\x06\n\x02\x43\x43\x10\n\x12\t\n\x05\x43\x43OMP\x10\x0b\x12\x08\n\x04\x43ONJ\x10\x0c\x12\t\n\x05\x43SUBJ\x10\r\x12\r\n\tCSUBJPASS\x10\x0e\x12\x07\n\x03\x44\x45P\x10\x0f\x12\x07\n\x03\x44\x45T\x10\x10\x12\r\n\tDISCOURSE\x10\x11\x12\x08\n\x04\x44OBJ\x10\x12\x12\x08\n\x04\x45XPL\x10\x13\x12\x0c\n\x08GOESWITH\x10\x14\x12\x08\n\x04IOBJ\x10\x15\x12\x08\n\x04MARK\x10\x16\x12\x07\n\x03MWE\x10\x17\x12\x07\n\x03MWV\x10\x18\x12\x07\n\x03NEG\x10\x19\x12\x06\n\x02NN\x10\x1a\x12\x0c\n\x08NPADVMOD\x10\x1b\x12\t\n\x05NSUBJ\x10\x1c\x12\r\n\tNSUBJPASS\x10\x1d\x12\x07\n\x03NUM\x10\x1e\x12\n\n\x06NUMBER\x10\x1f\x12\x05\n\x01P\x10 \x12\r\n\tPARATAXIS\x10!\x12\x0b\n\x07PARTMOD\x10"\x12\t\n\x05PCOMP\x10#\x12\x08\n\x04POBJ\x10$\x12\x08\n\x04POSS\x10%\x12\x0b\n\x07POSTNEG\x10&\x12\x0b\n\x07PRECOMP\x10\'\x12\x0b\n\x07PRECONJ\x10(\x12\n\n\x06PREDET\x10)\x12\x08\n\x04PREF\x10*\x12\x08\n\x04PREP\x10+\x12\t\n\x05PRONL\x10,\x12\x07\n\x03PRT\x10-\x12\x06\n\x02PS\x10.\x12\x0c\n\x08QUANTMOD\x10/\x12\t\n\x05RCMOD\x10\x30\x12\x0c\n\x08RCMODREL\x10\x31\x12\t\n\x05RDROP\x10\x32\x12\x07\n\x03REF\x10\x33\x12\x0b\n\x07REMNANT\x10\x34\x12\x0e\n\nREPARANDUM\x10\x35\x12\x08\n\x04ROOT\x10\x36\x12\x08\n\x04SNUM\x10\x37\x12\x08\n\x04SUFF\x10\x38\x12\x08\n\x04TMOD\x10\x39\x12\t\n\x05TOPIC\x10:\x12\x08\n\x04VMOD\x10;\x12\x0c\n\x08VOCATIVE\x10<\x12\t\n\x05XCOMP\x10=\x12\n\n\x06SUFFIX\x10>\x12\t\n\x05TITLE\x10?\x12\x0c\n\x08\x41\x44VPHMOD\x10@\x12\x0b\n\x07\x41UXCAUS\x10\x41\x12\t\n\x05\x41UXVV\x10\x42\x12\t\n\x05\x44TMOD\x10\x43\x12\x0b\n\x07\x46OREIGN\x10\x44\x12\x06\n\x02KW\x10\x45\x12\x08\n\x04LIST\x10\x46\x12\x08\n\x04NOMC\x10G\x12\x0c\n\x08NOMCSUBJ\x10H\x12\x10\n\x0cNOMCSUBJPASS\x10I\x12\x08\n\x04NUMC\x10J\x12\x07\n\x03\x43OP\x10K\x12\x0e\n\nDISLOCATED\x10L\x12\x07\n\x03\x41SP\x10M\x12\x08\n\x04GMOD\x10N\x12\x08\n\x04GOBJ\x10O\x12\n\n\x06INFMOD\x10P\x12\x07\n\x03MES\x10Q\x12\t\n\x05NCOMP\x10R"\xf6\x01\n\rEntityMention\x12\x35\n\x04text\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.TextSpan\x12?\n\x04type\x18\x02 \x01(\x0e\x32\x31.google.cloud.language.v1beta2.EntityMention.Type\x12;\n\tsentiment\x18\x03 \x01(\x0b\x32(.google.cloud.language.v1beta2.Sentiment"0\n\x04Type\x12\x10\n\x0cTYPE_UNKNOWN\x10\x00\x12\n\n\x06PROPER\x10\x01\x12\n\n\x06\x43OMMON\x10\x02"1\n\x08TextSpan\x12\x0f\n\x07\x63ontent\x18\x01 \x01(\t\x12\x14\n\x0c\x62\x65gin_offset\x18\x02 \x01(\x05":\n\x16\x43lassificationCategory\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02"\x9d\x01\n\x17\x41nalyzeSentimentRequest\x12>\n\x08\x64ocument\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.DocumentB\x03\xe0\x41\x02\x12\x42\n\rencoding_type\x18\x02 \x01(\x0e\x32+.google.cloud.language.v1beta2.EncodingType"\xae\x01\n\x18\x41nalyzeSentimentResponse\x12\x44\n\x12\x64ocument_sentiment\x18\x01 \x01(\x0b\x32(.google.cloud.language.v1beta2.Sentiment\x12\x10\n\x08language\x18\x02 \x01(\t\x12:\n\tsentences\x18\x03 \x03(\x0b\x32\'.google.cloud.language.v1beta2.Sentence"\xa3\x01\n\x1d\x41nalyzeEntitySentimentRequest\x12>\n\x08\x64ocument\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.DocumentB\x03\xe0\x41\x02\x12\x42\n\rencoding_type\x18\x02 \x01(\x0e\x32+.google.cloud.language.v1beta2.EncodingType"k\n\x1e\x41nalyzeEntitySentimentResponse\x12\x37\n\x08\x65ntities\x18\x01 \x03(\x0b\x32%.google.cloud.language.v1beta2.Entity\x12\x10\n\x08language\x18\x02 \x01(\t"\x9c\x01\n\x16\x41nalyzeEntitiesRequest\x12>\n\x08\x64ocument\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.DocumentB\x03\xe0\x41\x02\x12\x42\n\rencoding_type\x18\x02 \x01(\x0e\x32+.google.cloud.language.v1beta2.EncodingType"d\n\x17\x41nalyzeEntitiesResponse\x12\x37\n\x08\x65ntities\x18\x01 \x03(\x0b\x32%.google.cloud.language.v1beta2.Entity\x12\x10\n\x08language\x18\x02 \x01(\t"\x9a\x01\n\x14\x41nalyzeSyntaxRequest\x12>\n\x08\x64ocument\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.DocumentB\x03\xe0\x41\x02\x12\x42\n\rencoding_type\x18\x02 \x01(\x0e\x32+.google.cloud.language.v1beta2.EncodingType"\x9b\x01\n\x15\x41nalyzeSyntaxResponse\x12:\n\tsentences\x18\x01 \x03(\x0b\x32\'.google.cloud.language.v1beta2.Sentence\x12\x34\n\x06tokens\x18\x02 \x03(\x0b\x32$.google.cloud.language.v1beta2.Token\x12\x10\n\x08language\x18\x03 \x01(\t"U\n\x13\x43lassifyTextRequest\x12>\n\x08\x64ocument\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.DocumentB\x03\xe0\x41\x02"a\n\x14\x43lassifyTextResponse\x12I\n\ncategories\x18\x01 \x03(\x0b\x32\x35.google.cloud.language.v1beta2.ClassificationCategory"\x89\x03\n\x13\x41nnotateTextRequest\x12>\n\x08\x64ocument\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.DocumentB\x03\xe0\x41\x02\x12R\n\x08\x66\x65\x61tures\x18\x02 \x01(\x0b\x32;.google.cloud.language.v1beta2.AnnotateTextRequest.FeaturesB\x03\xe0\x41\x02\x12\x42\n\rencoding_type\x18\x03 \x01(\x0e\x32+.google.cloud.language.v1beta2.EncodingType\x1a\x99\x01\n\x08\x46\x65\x61tures\x12\x16\n\x0e\x65xtract_syntax\x18\x01 \x01(\x08\x12\x18\n\x10\x65xtract_entities\x18\x02 \x01(\x08\x12"\n\x1a\x65xtract_document_sentiment\x18\x03 \x01(\x08\x12 \n\x18\x65xtract_entity_sentiment\x18\x04 \x01(\x08\x12\x15\n\rclassify_text\x18\x06 \x01(\x08"\xe4\x02\n\x14\x41nnotateTextResponse\x12:\n\tsentences\x18\x01 \x03(\x0b\x32\'.google.cloud.language.v1beta2.Sentence\x12\x34\n\x06tokens\x18\x02 \x03(\x0b\x32$.google.cloud.language.v1beta2.Token\x12\x37\n\x08\x65ntities\x18\x03 \x03(\x0b\x32%.google.cloud.language.v1beta2.Entity\x12\x44\n\x12\x64ocument_sentiment\x18\x04 \x01(\x0b\x32(.google.cloud.language.v1beta2.Sentiment\x12\x10\n\x08language\x18\x05 \x01(\t\x12I\n\ncategories\x18\x06 \x03(\x0b\x32\x35.google.cloud.language.v1beta2.ClassificationCategory*8\n\x0c\x45ncodingType\x12\x08\n\x04NONE\x10\x00\x12\x08\n\x04UTF8\x10\x01\x12\t\n\x05UTF16\x10\x02\x12\t\n\x05UTF32\x10\x03\x32\x8a\x0b\n\x0fLanguageService\x12\xd7\x01\n\x10\x41nalyzeSentiment\x12\x36.google.cloud.language.v1beta2.AnalyzeSentimentRequest\x1a\x37.google.cloud.language.v1beta2.AnalyzeSentimentResponse"R\x82\xd3\xe4\x93\x02("#/v1beta2/documents:analyzeSentiment:\x01*\xda\x41\x16\x64ocument,encoding_type\xda\x41\x08\x64ocument\x12\xd3\x01\n\x0f\x41nalyzeEntities\x12\x35.google.cloud.language.v1beta2.AnalyzeEntitiesRequest\x1a\x36.google.cloud.language.v1beta2.AnalyzeEntitiesResponse"Q\x82\xd3\xe4\x93\x02\'""/v1beta2/documents:analyzeEntities:\x01*\xda\x41\x16\x64ocument,encoding_type\xda\x41\x08\x64ocument\x12\xef\x01\n\x16\x41nalyzeEntitySentiment\x12<.google.cloud.language.v1beta2.AnalyzeEntitySentimentRequest\x1a=.google.cloud.language.v1beta2.AnalyzeEntitySentimentResponse"X\x82\xd3\xe4\x93\x02.")/v1beta2/documents:analyzeEntitySentiment:\x01*\xda\x41\x16\x64ocument,encoding_type\xda\x41\x08\x64ocument\x12\xcb\x01\n\rAnalyzeSyntax\x12\x33.google.cloud.language.v1beta2.AnalyzeSyntaxRequest\x1a\x34.google.cloud.language.v1beta2.AnalyzeSyntaxResponse"O\x82\xd3\xe4\x93\x02%" /v1beta2/documents:analyzeSyntax:\x01*\xda\x41\x16\x64ocument,encoding_type\xda\x41\x08\x64ocument\x12\xae\x01\n\x0c\x43lassifyText\x12\x32.google.cloud.language.v1beta2.ClassifyTextRequest\x1a\x33.google.cloud.language.v1beta2.ClassifyTextResponse"5\x82\xd3\xe4\x93\x02$"\x1f/v1beta2/documents:classifyText:\x01*\xda\x41\x08\x64ocument\x12\xd9\x01\n\x0c\x41nnotateText\x12\x32.google.cloud.language.v1beta2.AnnotateTextRequest\x1a\x33.google.cloud.language.v1beta2.AnnotateTextResponse"`\x82\xd3\xe4\x93\x02$"\x1f/v1beta2/documents:annotateText:\x01*\xda\x41\x1f\x64ocument,features,encoding_type\xda\x41\x11\x64ocument,features\x1az\xca\x41\x17language.googleapis.com\xd2\x41]https://www.googleapis.com/auth/cloud-language,https://www.googleapis.com/auth/cloud-platformB\x82\x01\n!com.google.cloud.language.v1beta2B\x14LanguageServiceProtoP\x01ZEgoogle.golang.org/genproto/googleapis/cloud/language/v1beta2;languageb\x06proto3' ), dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, + google_dot_api_dot_client__pb2.DESCRIPTOR, + google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - google_dot_rpc_dot_status__pb2.DESCRIPTOR, ], ) @@ -63,8 +61,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=6939, - serialized_end=6995, + serialized_start=7035, + serialized_end=7091, ) _sym_db.RegisterEnumDescriptor(_ENCODINGTYPE) @@ -97,8 +95,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=355, - serialized_end=409, + serialized_start=351, + serialized_end=405, ) _sym_db.RegisterEnumDescriptor(_DOCUMENT_TYPE) @@ -132,11 +130,26 @@ _descriptor.EnumValueDescriptor( name="OTHER", index=7, number=7, serialized_options=None, type=None ), + _descriptor.EnumValueDescriptor( + name="PHONE_NUMBER", index=8, number=9, serialized_options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name="ADDRESS", index=9, number=10, serialized_options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name="DATE", index=10, number=11, serialized_options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name="NUMBER", index=11, number=12, serialized_options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name="PRICE", index=12, number=13, serialized_options=None, type=None + ), ], containing_type=None, serialized_options=None, - serialized_start=895, - serialized_end=1016, + serialized_start=892, + serialized_end=1077, ) _sym_db.RegisterEnumDescriptor(_ENTITY_TYPE) @@ -191,8 +204,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=2108, - serialized_end=2249, + serialized_start=2169, + serialized_end=2310, ) _sym_db.RegisterEnumDescriptor(_PARTOFSPEECH_TAG) @@ -217,8 +230,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=2251, - serialized_end=2330, + serialized_start=2312, + serialized_end=2391, ) _sym_db.RegisterEnumDescriptor(_PARTOFSPEECH_ASPECT) @@ -288,8 +301,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=2333, - serialized_end=2581, + serialized_start=2394, + serialized_end=2642, ) _sym_db.RegisterEnumDescriptor(_PARTOFSPEECH_CASE) @@ -338,8 +351,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=2584, - serialized_end=2759, + serialized_start=2645, + serialized_end=2820, ) _sym_db.RegisterEnumDescriptor(_PARTOFSPEECH_FORM) @@ -364,8 +377,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=2761, - serialized_end=2830, + serialized_start=2822, + serialized_end=2891, ) _sym_db.RegisterEnumDescriptor(_PARTOFSPEECH_GENDER) @@ -403,8 +416,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=2832, - serialized_end=2959, + serialized_start=2893, + serialized_end=3020, ) _sym_db.RegisterEnumDescriptor(_PARTOFSPEECH_MOOD) @@ -429,8 +442,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=2961, - serialized_end=3025, + serialized_start=3022, + serialized_end=3086, ) _sym_db.RegisterEnumDescriptor(_PARTOFSPEECH_NUMBER) @@ -462,8 +475,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=3027, - serialized_end=3111, + serialized_start=3088, + serialized_end=3172, ) _sym_db.RegisterEnumDescriptor(_PARTOFSPEECH_PERSON) @@ -485,8 +498,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=3113, - serialized_end=3169, + serialized_start=3174, + serialized_end=3230, ) _sym_db.RegisterEnumDescriptor(_PARTOFSPEECH_PROPER) @@ -512,8 +525,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=3171, - serialized_end=3245, + serialized_start=3232, + serialized_end=3306, ) _sym_db.RegisterEnumDescriptor(_PARTOFSPEECH_RECIPROCITY) @@ -551,8 +564,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=3247, - serialized_end=3362, + serialized_start=3308, + serialized_end=3423, ) _sym_db.RegisterEnumDescriptor(_PARTOFSPEECH_TENSE) @@ -577,8 +590,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=3364, - serialized_end=3430, + serialized_start=3425, + serialized_end=3491, ) _sym_db.RegisterEnumDescriptor(_PARTOFSPEECH_VOICE) @@ -840,8 +853,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=3546, - serialized_end=4483, + serialized_start=3607, + serialized_end=4544, ) _sym_db.RegisterEnumDescriptor(_DEPENDENCYEDGE_LABEL) @@ -863,8 +876,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=4684, - serialized_end=4732, + serialized_start=4745, + serialized_end=4793, ) _sym_db.RegisterEnumDescriptor(_ENTITYMENTION_TYPE) @@ -965,8 +978,8 @@ fields=[], ) ], - serialized_start=219, - serialized_end=419, + serialized_start=215, + serialized_end=415, ) @@ -1022,8 +1035,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=421, - serialized_end=547, + serialized_start=417, + serialized_end=543, ) @@ -1079,8 +1092,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=846, - serialized_end=893, + serialized_start=842, + serialized_end=889, ) _ENTITY = _descriptor.Descriptor( @@ -1207,8 +1220,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=550, - serialized_end=1016, + serialized_start=546, + serialized_end=1077, ) @@ -1300,8 +1313,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1019, - serialized_end=1237, + serialized_start=1080, + serialized_end=1298, ) @@ -1357,8 +1370,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1239, - serialized_end=1284, + serialized_start=1300, + serialized_end=1345, ) @@ -1607,8 +1620,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1287, - serialized_end=3430, + serialized_start=1348, + serialized_end=3491, ) @@ -1664,8 +1677,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3433, - serialized_end=4483, + serialized_start=3494, + serialized_end=4544, ) @@ -1739,8 +1752,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=4486, - serialized_end=4732, + serialized_start=4547, + serialized_end=4793, ) @@ -1796,8 +1809,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=4734, - serialized_end=4783, + serialized_start=4795, + serialized_end=4844, ) @@ -1853,8 +1866,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=4785, - serialized_end=4843, + serialized_start=4846, + serialized_end=4904, ) @@ -1880,7 +1893,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\002"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1910,8 +1923,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=4846, - serialized_end=4998, + serialized_start=4907, + serialized_end=5064, ) @@ -1985,8 +1998,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5001, - serialized_end=5175, + serialized_start=5067, + serialized_end=5241, ) @@ -2012,7 +2025,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\002"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2042,8 +2055,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5178, - serialized_end=5336, + serialized_start=5244, + serialized_end=5407, ) @@ -2099,8 +2112,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5338, - serialized_end=5445, + serialized_start=5409, + serialized_end=5516, ) @@ -2126,7 +2139,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\002"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2156,8 +2169,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5448, - serialized_end=5599, + serialized_start=5519, + serialized_end=5675, ) @@ -2213,8 +2226,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5601, - serialized_end=5701, + serialized_start=5677, + serialized_end=5777, ) @@ -2240,7 +2253,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\002"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2270,8 +2283,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5704, - serialized_end=5853, + serialized_start=5780, + serialized_end=5934, ) @@ -2345,8 +2358,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5856, - serialized_end=6011, + serialized_start=5937, + serialized_end=6092, ) @@ -2372,7 +2385,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\002"), file=DESCRIPTOR, ) ], @@ -2384,8 +2397,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=6013, - serialized_end=6093, + serialized_start=6094, + serialized_end=6179, ) @@ -2423,8 +2436,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=6095, - serialized_end=6192, + serialized_start=6181, + serialized_end=6278, ) @@ -2534,8 +2547,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=6425, - serialized_end=6578, + serialized_start=6521, + serialized_end=6674, ) _ANNOTATETEXTREQUEST = _descriptor.Descriptor( @@ -2560,7 +2573,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\002"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2578,7 +2591,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\002"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2608,8 +2621,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=6195, - serialized_end=6578, + serialized_start=6281, + serialized_end=6674, ) @@ -2737,8 +2750,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=6581, - serialized_end=6937, + serialized_start=6677, + serialized_end=7033, ) _DOCUMENT.fields_by_name["type"].enum_type = _DOCUMENT_TYPE @@ -2871,7 +2884,8 @@ The source of the document: a string containing the content or a Google Cloud Storage URI. content: - The content of the input in string format. + The content of the input in string format. Cloud audit logging + exempt since it is based on user data. gcs_content_uri: The Google Cloud Storage URI where the file content is located. This URI must be of the form: @@ -2943,9 +2957,11 @@ type: The entity type. metadata: - Metadata associated with the entity. Currently, Wikipedia - URLs and Knowledge Graph MIDs are provided, if available. The - associated keys are "wikipedia\_url" and "mid", respectively. + Metadata associated with the entity. For most entity types, + the metadata is a Wikipedia URL (``wikipedia_url``) and + Knowledge Graph MID (``mid``), if they are available. For the + metadata associated with other entity types, see the Type + table below. salience: The salience score associated with the entity in the [0, 1.0] range. The salience score for an entity provides information @@ -3001,7 +3017,7 @@ DESCRIPTOR=_SENTIMENT, __module__="google.cloud.language_v1beta2.proto.language_service_pb2", __doc__="""Represents the feeling associated with the entire text or entities in - the text. + the text. Next ID: 6 Attributes: @@ -3144,7 +3160,8 @@ Attributes: name: - The name of the category representing the document. + The name of the category representing the document, from the + `predefined taxonomy `__. confidence: The classifier's confidence of the category. Number represents how certain the classifier is that this category represents @@ -3166,7 +3183,7 @@ Attributes: document: - Input document. + Required. Input document. encoding_type: The encoding type used by the API to calculate sentence offsets for the sentence sentiment. @@ -3213,7 +3230,7 @@ Attributes: document: - Input document. + Required. Input document. encoding_type: The encoding type used by the API to calculate offsets. """, @@ -3258,7 +3275,7 @@ Attributes: document: - Input document. + Required. Input document. encoding_type: The encoding type used by the API to calculate offsets. """, @@ -3302,7 +3319,7 @@ Attributes: document: - Input document. + Required. Input document. encoding_type: The encoding type used by the API to calculate offsets. """, @@ -3349,7 +3366,7 @@ Attributes: document: - Input document. + Required. Input document. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1beta2.ClassifyTextRequest) ), @@ -3386,7 +3403,7 @@ __module__="google.cloud.language_v1beta2.proto.language_service_pb2", __doc__="""All available features for sentiment, syntax, and semantic analysis. Setting each one to true will enable that specific analysis for the - input. + input. Next ID: 10 Attributes: @@ -3399,7 +3416,9 @@ extract_entity_sentiment: Extract entities and their associated sentiment. classify_text: - Classify the full document into categories. + Classify the full document into categories. If this is true, + the API will use the default model which classifies into a + `predefined taxonomy `__. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1beta2.AnnotateTextRequest.Features) ), @@ -3412,9 +3431,9 @@ Attributes: document: - Input document. + Required. Input document. features: - The enabled features. + Required. The enabled features. encoding_type: The encoding type used by the API to calculate offsets. """, @@ -3470,15 +3489,24 @@ DESCRIPTOR._options = None _ENTITY_METADATAENTRY._options = None +_ANALYZESENTIMENTREQUEST.fields_by_name["document"]._options = None +_ANALYZEENTITYSENTIMENTREQUEST.fields_by_name["document"]._options = None +_ANALYZEENTITIESREQUEST.fields_by_name["document"]._options = None +_ANALYZESYNTAXREQUEST.fields_by_name["document"]._options = None +_CLASSIFYTEXTREQUEST.fields_by_name["document"]._options = None +_ANNOTATETEXTREQUEST.fields_by_name["document"]._options = None +_ANNOTATETEXTREQUEST.fields_by_name["features"]._options = None _LANGUAGESERVICE = _descriptor.ServiceDescriptor( name="LanguageService", full_name="google.cloud.language.v1beta2.LanguageService", file=DESCRIPTOR, index=0, - serialized_options=None, - serialized_start=6998, - serialized_end=8083, + serialized_options=_b( + "\312A\027language.googleapis.com\322A]https://www.googleapis.com/auth/cloud-language,https://www.googleapis.com/auth/cloud-platform" + ), + serialized_start=7094, + serialized_end=8512, methods=[ _descriptor.MethodDescriptor( name="AnalyzeSentiment", @@ -3488,7 +3516,7 @@ input_type=_ANALYZESENTIMENTREQUEST, output_type=_ANALYZESENTIMENTRESPONSE, serialized_options=_b( - '\202\323\344\223\002("#/v1beta2/documents:analyzeSentiment:\001*' + '\202\323\344\223\002("#/v1beta2/documents:analyzeSentiment:\001*\332A\026document,encoding_type\332A\010document' ), ), _descriptor.MethodDescriptor( @@ -3499,7 +3527,7 @@ input_type=_ANALYZEENTITIESREQUEST, output_type=_ANALYZEENTITIESRESPONSE, serialized_options=_b( - '\202\323\344\223\002\'""/v1beta2/documents:analyzeEntities:\001*' + '\202\323\344\223\002\'""/v1beta2/documents:analyzeEntities:\001*\332A\026document,encoding_type\332A\010document' ), ), _descriptor.MethodDescriptor( @@ -3510,7 +3538,7 @@ input_type=_ANALYZEENTITYSENTIMENTREQUEST, output_type=_ANALYZEENTITYSENTIMENTRESPONSE, serialized_options=_b( - '\202\323\344\223\002.")/v1beta2/documents:analyzeEntitySentiment:\001*' + '\202\323\344\223\002.")/v1beta2/documents:analyzeEntitySentiment:\001*\332A\026document,encoding_type\332A\010document' ), ), _descriptor.MethodDescriptor( @@ -3521,7 +3549,7 @@ input_type=_ANALYZESYNTAXREQUEST, output_type=_ANALYZESYNTAXRESPONSE, serialized_options=_b( - '\202\323\344\223\002%" /v1beta2/documents:analyzeSyntax:\001*' + '\202\323\344\223\002%" /v1beta2/documents:analyzeSyntax:\001*\332A\026document,encoding_type\332A\010document' ), ), _descriptor.MethodDescriptor( @@ -3532,7 +3560,7 @@ input_type=_CLASSIFYTEXTREQUEST, output_type=_CLASSIFYTEXTRESPONSE, serialized_options=_b( - '\202\323\344\223\002$"\037/v1beta2/documents:classifyText:\001*' + '\202\323\344\223\002$"\037/v1beta2/documents:classifyText:\001*\332A\010document' ), ), _descriptor.MethodDescriptor( @@ -3543,7 +3571,7 @@ input_type=_ANNOTATETEXTREQUEST, output_type=_ANNOTATETEXTRESPONSE, serialized_options=_b( - '\202\323\344\223\002$"\037/v1beta2/documents:annotateText:\001*' + '\202\323\344\223\002$"\037/v1beta2/documents:annotateText:\001*\332A\037document,features,encoding_type\332A\021document,features' ), ), ], diff --git a/google/cloud/language_v1beta2/proto/language_service_pb2_grpc.py b/google/cloud/language_v1beta2/proto/language_service_pb2_grpc.py index da422370..e0e1e412 100644 --- a/google/cloud/language_v1beta2/proto/language_service_pb2_grpc.py +++ b/google/cloud/language_v1beta2/proto/language_service_pb2_grpc.py @@ -71,10 +71,8 @@ def AnalyzeEntities(self, request, context): raise NotImplementedError("Method not implemented!") def AnalyzeEntitySentiment(self, request, context): - """Finds entities, similar to - [AnalyzeEntities][google.cloud.language.v1beta2.LanguageService.AnalyzeEntities] - in the text and analyzes sentiment associated with each entity and its - mentions. + """Finds entities, similar to [AnalyzeEntities][google.cloud.language.v1beta2.LanguageService.AnalyzeEntities] in the text and analyzes + sentiment associated with each entity and its mentions. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") diff --git a/synth.metadata b/synth.metadata index f029a848..8e564ada 100644 --- a/synth.metadata +++ b/synth.metadata @@ -1,5 +1,5 @@ { - "updateTime": "2019-09-26T12:28:07.785217Z", + "updateTime": "2019-10-01T12:29:45.277286Z", "sources": [ { "generator": { @@ -12,8 +12,8 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "4c2ca81a0c976d4d37a8999984b7894d9af22124", - "internalRef": "271130964" + "sha": "ce3c574d1266026cebea3a893247790bd68191c2", + "internalRef": "272147209" } }, { From 1fe4105e078f84f1d4ea713550c26bdf91096d4a Mon Sep 17 00:00:00 2001 From: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Date: Mon, 11 Nov 2019 15:15:32 -0800 Subject: [PATCH 013/209] docs: add python 2 sunset banner to documentation (#9036) --- docs/_static/custom.css | 4 +++ docs/_templates/layout.html | 49 +++++++++++++++++++++++++++++++++++++ docs/conf.py | 2 +- 3 files changed, 54 insertions(+), 1 deletion(-) create mode 100644 docs/_static/custom.css create mode 100644 docs/_templates/layout.html diff --git a/docs/_static/custom.css b/docs/_static/custom.css new file mode 100644 index 00000000..9a6f9f8d --- /dev/null +++ b/docs/_static/custom.css @@ -0,0 +1,4 @@ +div#python2-eol { + border-color: red; + border-width: medium; +} \ No newline at end of file diff --git a/docs/_templates/layout.html b/docs/_templates/layout.html new file mode 100644 index 00000000..de457b2c --- /dev/null +++ b/docs/_templates/layout.html @@ -0,0 +1,49 @@ +{% extends "!layout.html" %} +{%- block content %} +{%- if theme_fixed_sidebar|lower == 'true' %} +
+ {{ sidebar() }} + {%- block document %} +
+ {%- if render_sidebar %} +
+ {%- endif %} + + {%- block relbar_top %} + {%- if theme_show_relbar_top|tobool %} + + {%- endif %} + {% endblock %} + +
+
+ On January 1, 2020 this library will no longer support Python 2 on the latest released version. + Previously released library versions will continue to be available. For more information please + visit Python 2 support on Google Cloud. +
+ {% block body %} {% endblock %} +
+ + {%- block relbar_bottom %} + {%- if theme_show_relbar_bottom|tobool %} + + {%- endif %} + {% endblock %} + + {%- if render_sidebar %} +
+ {%- endif %} +
+ {%- endblock %} +
+
+{%- else %} +{{ super() }} +{%- endif %} +{%- endblock %} diff --git a/docs/conf.py b/docs/conf.py index 999e8f0e..fd043da0 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -163,7 +163,7 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = [] +html_static_path = ["_static"] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied From 5e31af70864eaa0fe3711bf42a9c27e69351fb8b Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Tue, 12 Nov 2019 12:00:59 -0800 Subject: [PATCH 014/209] chore(language): change spacing in docs templates (via synth) (#9755) --- docs/_static/custom.css | 2 +- docs/_templates/layout.html | 1 + synth.metadata | 12 ++++++------ 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/docs/_static/custom.css b/docs/_static/custom.css index 9a6f9f8d..0abaf229 100644 --- a/docs/_static/custom.css +++ b/docs/_static/custom.css @@ -1,4 +1,4 @@ div#python2-eol { border-color: red; border-width: medium; -} \ No newline at end of file +} \ No newline at end of file diff --git a/docs/_templates/layout.html b/docs/_templates/layout.html index de457b2c..228529ef 100644 --- a/docs/_templates/layout.html +++ b/docs/_templates/layout.html @@ -1,3 +1,4 @@ + {% extends "!layout.html" %} {%- block content %} {%- if theme_fixed_sidebar|lower == 'true' %} diff --git a/synth.metadata b/synth.metadata index 8e564ada..4c38f75e 100644 --- a/synth.metadata +++ b/synth.metadata @@ -1,26 +1,26 @@ { - "updateTime": "2019-10-01T12:29:45.277286Z", + "updateTime": "2019-11-12T13:32:17.833551Z", "sources": [ { "generator": { "name": "artman", - "version": "0.37.1", - "dockerImage": "googleapis/artman@sha256:6068f67900a3f0bdece596b97bda8fc70406ca0e137a941f4c81d3217c994a80" + "version": "0.41.1", + "dockerImage": "googleapis/artman@sha256:545c758c76c3f779037aa259023ec3d1ef2d57d2c8cd00a222cb187d63ceac5e" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "ce3c574d1266026cebea3a893247790bd68191c2", - "internalRef": "272147209" + "sha": "f69562be0608904932bdcfbc5ad8b9a22d9dceb8", + "internalRef": "279774957" } }, { "template": { "name": "python_library", "origin": "synthtool.gcp", - "version": "2019.5.2" + "version": "2019.10.17" } } ], From 6246ef904871405334c0b3bd6c2490b79ffe56fa Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Tue, 10 Dec 2019 11:08:01 -0800 Subject: [PATCH 015/209] docs(language): edit hyphenation of "part-of-speech" (via synth) (#9954) --- .../gapic/language_service_client.py | 2 +- .../language_service_grpc_transport.py | 2 +- .../proto/language_service.proto | 32 +++++++++---------- .../proto/language_service_pb2_grpc.py | 2 +- synth.metadata | 10 +++--- 5 files changed, 24 insertions(+), 24 deletions(-) diff --git a/google/cloud/language_v1beta2/gapic/language_service_client.py b/google/cloud/language_v1beta2/gapic/language_service_client.py index 73af0ff6..1f248f6e 100644 --- a/google/cloud/language_v1beta2/gapic/language_service_client.py +++ b/google/cloud/language_v1beta2/gapic/language_service_client.py @@ -391,7 +391,7 @@ def analyze_syntax( ): """ Analyzes the syntax of the text and provides sentence boundaries and - tokenization along with part of speech tags, dependency trees, and other + tokenization along with part-of-speech tags, dependency trees, and other properties. Example: diff --git a/google/cloud/language_v1beta2/gapic/transports/language_service_grpc_transport.py b/google/cloud/language_v1beta2/gapic/transports/language_service_grpc_transport.py index df140c2f..7cc7f5a1 100644 --- a/google/cloud/language_v1beta2/gapic/transports/language_service_grpc_transport.py +++ b/google/cloud/language_v1beta2/gapic/transports/language_service_grpc_transport.py @@ -156,7 +156,7 @@ def analyze_syntax(self): """Return the gRPC stub for :meth:`LanguageServiceClient.analyze_syntax`. Analyzes the syntax of the text and provides sentence boundaries and - tokenization along with part of speech tags, dependency trees, and other + tokenization along with part-of-speech tags, dependency trees, and other properties. Returns: diff --git a/google/cloud/language_v1beta2/proto/language_service.proto b/google/cloud/language_v1beta2/proto/language_service.proto index d0242e59..384cdf91 100644 --- a/google/cloud/language_v1beta2/proto/language_service.proto +++ b/google/cloud/language_v1beta2/proto/language_service.proto @@ -69,7 +69,7 @@ service LanguageService { } // Analyzes the syntax of the text and provides sentence boundaries and - // tokenization along with part of speech tags, dependency trees, and other + // tokenization along with part-of-speech tags, dependency trees, and other // properties. rpc AnalyzeSyntax(AnalyzeSyntaxRequest) returns (AnalyzeSyntaxResponse) { option (google.api.http) = { @@ -272,6 +272,21 @@ message Entity { Sentiment sentiment = 6; } +// Represents the smallest syntactic building block of the text. +message Token { + // The token text. + TextSpan text = 1; + + // Parts of speech tag for this token. + PartOfSpeech part_of_speech = 2; + + // Dependency tree parse for this token. + DependencyEdge dependency_edge = 3; + + // [Lemma](https://en.wikipedia.org/wiki/Lemma_%28morphology%29) of the token. + string lemma = 4; +} + // Represents the text encoding that the caller uses to process the output. // Providing an `EncodingType` is recommended because the API provides the // beginning offsets for various outputs, such as tokens and mentions, and @@ -298,21 +313,6 @@ enum EncodingType { UTF32 = 3; } -// Represents the smallest syntactic building block of the text. -message Token { - // The token text. - TextSpan text = 1; - - // Parts of speech tag for this token. - PartOfSpeech part_of_speech = 2; - - // Dependency tree parse for this token. - DependencyEdge dependency_edge = 3; - - // [Lemma](https://en.wikipedia.org/wiki/Lemma_%28morphology%29) of the token. - string lemma = 4; -} - // Represents the feeling associated with the entire text or entities in // the text. // Next ID: 6 diff --git a/google/cloud/language_v1beta2/proto/language_service_pb2_grpc.py b/google/cloud/language_v1beta2/proto/language_service_pb2_grpc.py index e0e1e412..4db8cf82 100644 --- a/google/cloud/language_v1beta2/proto/language_service_pb2_grpc.py +++ b/google/cloud/language_v1beta2/proto/language_service_pb2_grpc.py @@ -80,7 +80,7 @@ def AnalyzeEntitySentiment(self, request, context): def AnalyzeSyntax(self, request, context): """Analyzes the syntax of the text and provides sentence boundaries and - tokenization along with part of speech tags, dependency trees, and other + tokenization along with part-of-speech tags, dependency trees, and other properties. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) diff --git a/synth.metadata b/synth.metadata index 4c38f75e..18a4b70b 100644 --- a/synth.metadata +++ b/synth.metadata @@ -1,19 +1,19 @@ { - "updateTime": "2019-11-12T13:32:17.833551Z", + "updateTime": "2019-12-10T13:18:39.049538Z", "sources": [ { "generator": { "name": "artman", - "version": "0.41.1", - "dockerImage": "googleapis/artman@sha256:545c758c76c3f779037aa259023ec3d1ef2d57d2c8cd00a222cb187d63ceac5e" + "version": "0.42.1", + "dockerImage": "googleapis/artman@sha256:c773192618c608a7a0415dd95282f841f8e6bcdef7dd760a988c93b77a64bd57" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "f69562be0608904932bdcfbc5ad8b9a22d9dceb8", - "internalRef": "279774957" + "sha": "6cc9499e225a4f6a5e34fe07e390f67055d7991c", + "internalRef": "284643689" } }, { From 223d6140145dcf5c48af206212db58a062a7937b Mon Sep 17 00:00:00 2001 From: Eric Schmidt Date: Thu, 16 Jan 2020 05:57:52 -0800 Subject: [PATCH 016/209] docs(language): fixes typo in Natural Language samples (#10134) Changes "Parts of spech" to "Parts of speech". --- samples/v1/language_syntax_gcs.py | 2 +- samples/v1/language_syntax_text.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/samples/v1/language_syntax_gcs.py b/samples/v1/language_syntax_gcs.py index 74d88787..732f77df 100644 --- a/samples/v1/language_syntax_gcs.py +++ b/samples/v1/language_syntax_gcs.py @@ -64,7 +64,7 @@ def sample_analyze_syntax(gcs_content_uri): u"Location of this token in overall document: {}".format(text.begin_offset) ) # Get the part of speech information for this token. - # Parts of spech are as defined in: + # Parts of speech are as defined in: # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf part_of_speech = token.part_of_speech # Get the tag, e.g. NOUN, ADJ for Adjective, et al. diff --git a/samples/v1/language_syntax_text.py b/samples/v1/language_syntax_text.py index 4b11d4d0..d1c3104e 100644 --- a/samples/v1/language_syntax_text.py +++ b/samples/v1/language_syntax_text.py @@ -63,7 +63,7 @@ def sample_analyze_syntax(text_content): u"Location of this token in overall document: {}".format(text.begin_offset) ) # Get the part of speech information for this token. - # Parts of spech are as defined in: + # Parts of speech are as defined in: # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf part_of_speech = token.part_of_speech # Get the tag, e.g. NOUN, ADJ for Adjective, et al. From b68b2166d8e4d81a7e51e701f8facdfd7fb82a26 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Wed, 29 Jan 2020 16:52:45 -0800 Subject: [PATCH 017/209] docs(language): change docstring formatting; bump copyright year to 2020 (via synth) (#10234) --- google/cloud/language_v1/gapic/enums.py | 2 +- .../gapic/language_service_client.py | 2 +- .../language_service_grpc_transport.py | 2 +- .../language_v1/proto/language_service_pb2.py | 38 ++--- google/cloud/language_v1beta2/gapic/enums.py | 2 +- .../gapic/language_service_client.py | 2 +- .../language_service_grpc_transport.py | 2 +- .../proto/language_service_pb2.py | 30 ++-- samples/v1/language_classify_gcs.py | 2 +- samples/v1/language_classify_text.py | 2 +- samples/v1/language_entities_gcs.py | 2 +- samples/v1/language_entities_text.py | 2 +- samples/v1/language_entity_sentiment_gcs.py | 2 +- samples/v1/language_entity_sentiment_text.py | 2 +- samples/v1/language_sentiment_gcs.py | 2 +- samples/v1/language_sentiment_text.py | 2 +- samples/v1/language_syntax_gcs.py | 4 +- samples/v1/language_syntax_text.py | 4 +- synth.metadata | 146 +++++++++++++++++- synth.py | 7 + .../test_system_language_service_v1beta2.py | 2 +- .../v1/test_language_service_client_v1.py | 2 +- .../test_language_service_client_v1beta2.py | 2 +- 23 files changed, 204 insertions(+), 59 deletions(-) diff --git a/google/cloud/language_v1/gapic/enums.py b/google/cloud/language_v1/gapic/enums.py index b4d44ab5..6ec0f9fe 100644 --- a/google/cloud/language_v1/gapic/enums.py +++ b/google/cloud/language_v1/gapic/enums.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/language_v1/gapic/language_service_client.py b/google/cloud/language_v1/gapic/language_service_client.py index 407ef044..1520d84c 100644 --- a/google/cloud/language_v1/gapic/language_service_client.py +++ b/google/cloud/language_v1/gapic/language_service_client.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/language_v1/gapic/transports/language_service_grpc_transport.py b/google/cloud/language_v1/gapic/transports/language_service_grpc_transport.py index b8a13472..e4b4ffad 100644 --- a/google/cloud/language_v1/gapic/transports/language_service_grpc_transport.py +++ b/google/cloud/language_v1/gapic/transports/language_service_grpc_transport.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/language_v1/proto/language_service_pb2.py b/google/cloud/language_v1/proto/language_service_pb2.py index b292a65e..f17ab045 100644 --- a/google/cloud/language_v1/proto/language_service_pb2.py +++ b/google/cloud/language_v1/proto/language_service_pb2.py @@ -2944,9 +2944,9 @@ ), DESCRIPTOR=_ENTITY, __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""Represents a phrase in the text that is a known entity, such as a - person, an organization, or location. The API associates information, - such as salience and mentions, with entities. + __doc__="""Represents a phrase in the text that is a known entity, + such as a person, an organization, or location. The API associates + information, such as salience and mentions, with entities. Attributes: @@ -2988,7 +2988,8 @@ dict( DESCRIPTOR=_TOKEN, __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""Represents the smallest syntactic building block of the text. + __doc__="""Represents the smallest syntactic building block of the + text. Attributes: @@ -2999,7 +3000,7 @@ dependency_edge: Dependency tree parse for this token. lemma: - `Lemma + \ `Lemma `__ of the token. """, @@ -3014,8 +3015,8 @@ dict( DESCRIPTOR=_SENTIMENT, __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""Represents the feeling associated with the entire text or entities in - the text. + __doc__="""Represents the feeling associated with the entire text or + entities in the text. Attributes: @@ -3038,8 +3039,8 @@ dict( DESCRIPTOR=_PARTOFSPEECH, __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""Represents part of speech information for a token. Parts of speech are - as defined in + __doc__="""Represents part of speech information for a token. Parts + of speech are as defined in http://www.lrec-conf.org/proceedings/lrec2012/pdf/274\_Paper.pdf @@ -3080,8 +3081,8 @@ dict( DESCRIPTOR=_DEPENDENCYEDGE, __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""Represents dependency parse tree information for a token. (For more - information on dependency labels, see + __doc__="""Represents dependency parse tree information for a token. + (For more information on dependency labels, see http://www.aclweb.org/anthology/P13-2017 @@ -3106,8 +3107,8 @@ dict( DESCRIPTOR=_ENTITYMENTION, __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""Represents a mention for an entity in the text. Currently, proper noun - mentions are supported. + __doc__="""Represents a mention for an entity in the text. Currently, + proper noun mentions are supported. Attributes: @@ -3399,9 +3400,9 @@ dict( DESCRIPTOR=_ANNOTATETEXTREQUEST_FEATURES, __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""All available features for sentiment, syntax, and semantic analysis. - Setting each one to true will enable that specific analysis for the - input. + __doc__="""All available features for sentiment, syntax, and semantic + analysis. Setting each one to true will enable that specific analysis + for the input. Attributes: @@ -3421,8 +3422,9 @@ ), DESCRIPTOR=_ANNOTATETEXTREQUEST, __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""The request message for the text annotation API, which can perform - multiple analysis types (sentiment, entities, and syntax) in one call. + __doc__="""The request message for the text annotation API, which can + perform multiple analysis types (sentiment, entities, and syntax) in one + call. Attributes: diff --git a/google/cloud/language_v1beta2/gapic/enums.py b/google/cloud/language_v1beta2/gapic/enums.py index 0d3cf1c5..6ed586f4 100644 --- a/google/cloud/language_v1beta2/gapic/enums.py +++ b/google/cloud/language_v1beta2/gapic/enums.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/language_v1beta2/gapic/language_service_client.py b/google/cloud/language_v1beta2/gapic/language_service_client.py index 1f248f6e..34beaaec 100644 --- a/google/cloud/language_v1beta2/gapic/language_service_client.py +++ b/google/cloud/language_v1beta2/gapic/language_service_client.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/language_v1beta2/gapic/transports/language_service_grpc_transport.py b/google/cloud/language_v1beta2/gapic/transports/language_service_grpc_transport.py index 7cc7f5a1..d546c1ab 100644 --- a/google/cloud/language_v1beta2/gapic/transports/language_service_grpc_transport.py +++ b/google/cloud/language_v1beta2/gapic/transports/language_service_grpc_transport.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/language_v1beta2/proto/language_service_pb2.py b/google/cloud/language_v1beta2/proto/language_service_pb2.py index 8c9068df..13d144ca 100644 --- a/google/cloud/language_v1beta2/proto/language_service_pb2.py +++ b/google/cloud/language_v1beta2/proto/language_service_pb2.py @@ -2946,9 +2946,9 @@ ), DESCRIPTOR=_ENTITY, __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""Represents a phrase in the text that is a known entity, such as a - person, an organization, or location. The API associates information, - such as salience and mentions, with entities. + __doc__="""Represents a phrase in the text that is a known entity, + such as a person, an organization, or location. The API associates + information, such as salience and mentions, with entities. Attributes: @@ -2990,7 +2990,8 @@ dict( DESCRIPTOR=_TOKEN, __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""Represents the smallest syntactic building block of the text. + __doc__="""Represents the smallest syntactic building block of the + text. Attributes: @@ -3001,7 +3002,7 @@ dependency_edge: Dependency tree parse for this token. lemma: - `Lemma + \ `Lemma `__ of the token. """, @@ -3016,8 +3017,8 @@ dict( DESCRIPTOR=_SENTIMENT, __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""Represents the feeling associated with the entire text or entities in - the text. Next ID: 6 + __doc__="""Represents the feeling associated with the entire text or + entities in the text. Next ID: 6 Attributes: @@ -3104,8 +3105,8 @@ dict( DESCRIPTOR=_ENTITYMENTION, __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""Represents a mention for an entity in the text. Currently, proper noun - mentions are supported. + __doc__="""Represents a mention for an entity in the text. Currently, + proper noun mentions are supported. Attributes: @@ -3401,9 +3402,9 @@ dict( DESCRIPTOR=_ANNOTATETEXTREQUEST_FEATURES, __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""All available features for sentiment, syntax, and semantic analysis. - Setting each one to true will enable that specific analysis for the - input. Next ID: 10 + __doc__="""All available features for sentiment, syntax, and semantic + analysis. Setting each one to true will enable that specific analysis + for the input. Next ID: 10 Attributes: @@ -3425,8 +3426,9 @@ ), DESCRIPTOR=_ANNOTATETEXTREQUEST, __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""The request message for the text annotation API, which can perform - multiple analysis types (sentiment, entities, and syntax) in one call. + __doc__="""The request message for the text annotation API, which can + perform multiple analysis types (sentiment, entities, and syntax) in one + call. Attributes: diff --git a/samples/v1/language_classify_gcs.py b/samples/v1/language_classify_gcs.py index db595801..941640b1 100644 --- a/samples/v1/language_classify_gcs.py +++ b/samples/v1/language_classify_gcs.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/samples/v1/language_classify_text.py b/samples/v1/language_classify_text.py index 2ecfd70b..52175f02 100644 --- a/samples/v1/language_classify_text.py +++ b/samples/v1/language_classify_text.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/samples/v1/language_entities_gcs.py b/samples/v1/language_entities_gcs.py index edd3238a..790592ca 100644 --- a/samples/v1/language_entities_gcs.py +++ b/samples/v1/language_entities_gcs.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/samples/v1/language_entities_text.py b/samples/v1/language_entities_text.py index 2948f44d..9ae849f2 100644 --- a/samples/v1/language_entities_text.py +++ b/samples/v1/language_entities_text.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/samples/v1/language_entity_sentiment_gcs.py b/samples/v1/language_entity_sentiment_gcs.py index 87fb74de..9fafa737 100644 --- a/samples/v1/language_entity_sentiment_gcs.py +++ b/samples/v1/language_entity_sentiment_gcs.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/samples/v1/language_entity_sentiment_text.py b/samples/v1/language_entity_sentiment_text.py index 6f914980..9b3d5b8a 100644 --- a/samples/v1/language_entity_sentiment_text.py +++ b/samples/v1/language_entity_sentiment_text.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/samples/v1/language_sentiment_gcs.py b/samples/v1/language_sentiment_gcs.py index 36600966..261f2f3e 100644 --- a/samples/v1/language_sentiment_gcs.py +++ b/samples/v1/language_sentiment_gcs.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/samples/v1/language_sentiment_text.py b/samples/v1/language_sentiment_text.py index c1325678..12f1e221 100644 --- a/samples/v1/language_sentiment_text.py +++ b/samples/v1/language_sentiment_text.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/samples/v1/language_syntax_gcs.py b/samples/v1/language_syntax_gcs.py index 732f77df..32bf2acb 100644 --- a/samples/v1/language_syntax_gcs.py +++ b/samples/v1/language_syntax_gcs.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -64,7 +64,7 @@ def sample_analyze_syntax(gcs_content_uri): u"Location of this token in overall document: {}".format(text.begin_offset) ) # Get the part of speech information for this token. - # Parts of speech are as defined in: + # Parts of spech are as defined in: # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf part_of_speech = token.part_of_speech # Get the tag, e.g. NOUN, ADJ for Adjective, et al. diff --git a/samples/v1/language_syntax_text.py b/samples/v1/language_syntax_text.py index d1c3104e..29041886 100644 --- a/samples/v1/language_syntax_text.py +++ b/samples/v1/language_syntax_text.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -63,7 +63,7 @@ def sample_analyze_syntax(text_content): u"Location of this token in overall document: {}".format(text.begin_offset) ) # Get the part of speech information for this token. - # Parts of speech are as defined in: + # Parts of spech are as defined in: # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf part_of_speech = token.part_of_speech # Get the tag, e.g. NOUN, ADJ for Adjective, et al. diff --git a/synth.metadata b/synth.metadata index 18a4b70b..94c812a3 100644 --- a/synth.metadata +++ b/synth.metadata @@ -1,19 +1,19 @@ { - "updateTime": "2019-12-10T13:18:39.049538Z", + "updateTime": "2020-01-30T00:27:56.902647Z", "sources": [ { "generator": { "name": "artman", - "version": "0.42.1", - "dockerImage": "googleapis/artman@sha256:c773192618c608a7a0415dd95282f841f8e6bcdef7dd760a988c93b77a64bd57" + "version": "0.44.4", + "dockerImage": "googleapis/artman@sha256:19e945954fc960a4bdfee6cb34695898ab21a8cf0bac063ee39b91f00a1faec8" } }, { "git": { "name": "googleapis", - "remote": "https://github.com/googleapis/googleapis.git", - "sha": "6cc9499e225a4f6a5e34fe07e390f67055d7991c", - "internalRef": "284643689" + "remote": "git@github.com:googleapis/googleapis.git", + "sha": "341fd5690fae36f36cf626ef048fbcf4bbe7cee6", + "internalRef": "292221998" } }, { @@ -45,5 +45,139 @@ "config": "google/cloud/language/artman_language_v1.yaml" } } + ], + "newFiles": [ + { + "path": "MANIFEST.in" + }, + { + "path": ".flake8" + }, + { + "path": ".coveragerc" + }, + { + "path": "setup.cfg" + }, + { + "path": "docs/_static/custom.css" + }, + { + "path": "docs/_templates/layout.html" + }, + { + "path": "samples/v1/language_sentiment_text.py" + }, + { + "path": "samples/v1/language_entities_text.py" + }, + { + "path": "samples/v1/language_entity_sentiment_gcs.py" + }, + { + "path": "samples/v1/language_syntax_text.py" + }, + { + "path": "samples/v1/language_entity_sentiment_text.py" + }, + { + "path": "samples/v1/language_sentiment_gcs.py" + }, + { + "path": "samples/v1/language_classify_gcs.py" + }, + { + "path": "samples/v1/language_entities_gcs.py" + }, + { + "path": "samples/v1/language_classify_text.py" + }, + { + "path": "samples/v1/language_syntax_gcs.py" + }, + { + "path": "samples/v1/test/analyzing_sentiment.test.yaml" + }, + { + "path": "samples/v1/test/classifying_content.test.yaml" + }, + { + "path": "samples/v1/test/analyzing_entities.test.yaml" + }, + { + "path": "samples/v1/test/analyzing_syntax.test.yaml" + }, + { + "path": "samples/v1/test/analyzing_entity_sentiment.test.yaml" + }, + { + "path": "google/cloud/language_v1/gapic/__init__.py" + }, + { + "path": "google/cloud/language_v1/gapic/enums.py" + }, + { + "path": "google/cloud/language_v1/gapic/language_service_client.py" + }, + { + "path": "google/cloud/language_v1/gapic/language_service_client_config.py" + }, + { + "path": "google/cloud/language_v1/gapic/transports/__init__.py" + }, + { + "path": "google/cloud/language_v1/gapic/transports/language_service_grpc_transport.py" + }, + { + "path": "google/cloud/language_v1/proto/__init__.py" + }, + { + "path": "google/cloud/language_v1/proto/language_service.proto" + }, + { + "path": "google/cloud/language_v1/proto/language_service_pb2.py" + }, + { + "path": "google/cloud/language_v1/proto/language_service_pb2_grpc.py" + }, + { + "path": "google/cloud/language_v1beta2/gapic/__init__.py" + }, + { + "path": "google/cloud/language_v1beta2/gapic/enums.py" + }, + { + "path": "google/cloud/language_v1beta2/gapic/language_service_client.py" + }, + { + "path": "google/cloud/language_v1beta2/gapic/language_service_client_config.py" + }, + { + "path": "google/cloud/language_v1beta2/gapic/transports/__init__.py" + }, + { + "path": "google/cloud/language_v1beta2/gapic/transports/language_service_grpc_transport.py" + }, + { + "path": "google/cloud/language_v1beta2/proto/__init__.py" + }, + { + "path": "google/cloud/language_v1beta2/proto/language_service.proto" + }, + { + "path": "google/cloud/language_v1beta2/proto/language_service_pb2.py" + }, + { + "path": "google/cloud/language_v1beta2/proto/language_service_pb2_grpc.py" + }, + { + "path": "tests/unit/gapic/v1/test_language_service_client_v1.py" + }, + { + "path": "tests/unit/gapic/v1beta2/test_language_service_client_v1beta2.py" + }, + { + "path": "tests/system/gapic/v1beta2/test_system_language_service_v1beta2.py" + } ] } \ No newline at end of file diff --git a/synth.py b/synth.py index db33996a..5f3d6c49 100644 --- a/synth.py +++ b/synth.py @@ -47,6 +47,13 @@ templated_files = common.py_library(unit_cov_level=97, cov_level=100) s.move(templated_files, excludes=['noxfile.py']) +s.replace("google/cloud/**/language_service_pb2.py", +'''__doc__ = """################################################################ + # + + Represents the input to API methods.''', +'''__doc__="""Represents the input to API methods.''' +) s.replace( f"google/cloud/**/gapic/language_service_client.py", r"types\.EncodingType", diff --git a/tests/system/gapic/v1beta2/test_system_language_service_v1beta2.py b/tests/system/gapic/v1beta2/test_system_language_service_v1beta2.py index 24d18d09..6145cf2a 100644 --- a/tests/system/gapic/v1beta2/test_system_language_service_v1beta2.py +++ b/tests/system/gapic/v1beta2/test_system_language_service_v1beta2.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/gapic/v1/test_language_service_client_v1.py b/tests/unit/gapic/v1/test_language_service_client_v1.py index 2c9bef7c..8d8362ab 100644 --- a/tests/unit/gapic/v1/test_language_service_client_v1.py +++ b/tests/unit/gapic/v1/test_language_service_client_v1.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/gapic/v1beta2/test_language_service_client_v1beta2.py b/tests/unit/gapic/v1beta2/test_language_service_client_v1beta2.py index 159f57f9..548357be 100644 --- a/tests/unit/gapic/v1beta2/test_language_service_client_v1beta2.py +++ b/tests/unit/gapic/v1beta2/test_language_service_client_v1beta2.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. From 2085f28e1811691e1a977efb47cb240e25b930ea Mon Sep 17 00:00:00 2001 From: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Date: Wed, 12 Feb 2020 09:45:36 -0800 Subject: [PATCH 018/209] chore: add split repo templates (#1) --- .github/CONTRIBUTING.md | 28 +++ .github/ISSUE_TEMPLATE/bug_report.md | 44 ++++ .github/ISSUE_TEMPLATE/feature_request.md | 18 ++ .github/ISSUE_TEMPLATE/support_request.md | 7 + .github/PULL_REQUEST_TEMPLATE.md | 7 + .github/release-please.yml | 1 + .gitignore | 58 +++++ .kokoro/build.sh | 39 +++ .kokoro/continuous/common.cfg | 27 +++ .kokoro/continuous/continuous.cfg | 1 + .kokoro/docs/common.cfg | 48 ++++ .kokoro/docs/docs.cfg | 1 + .kokoro/presubmit/common.cfg | 27 +++ .kokoro/presubmit/presubmit.cfg | 1 + .kokoro/publish-docs.sh | 57 +++++ .kokoro/release.sh | 34 +++ .kokoro/release/common.cfg | 64 +++++ .kokoro/release/release.cfg | 1 + .kokoro/trampoline.sh | 23 ++ .repo-metadata.json | 2 +- CODE_OF_CONDUCT.md | 44 ++++ CONTRIBUTING.rst | 279 ++++++++++++++++++++++ LICENSE | 7 +- MANIFEST.in | 1 + docs/conf.py | 25 +- noxfile.py | 14 +- renovate.json | 5 + setup.py | 2 +- synth.metadata | 142 +---------- 29 files changed, 840 insertions(+), 167 deletions(-) create mode 100644 .github/CONTRIBUTING.md create mode 100644 .github/ISSUE_TEMPLATE/bug_report.md create mode 100644 .github/ISSUE_TEMPLATE/feature_request.md create mode 100644 .github/ISSUE_TEMPLATE/support_request.md create mode 100644 .github/PULL_REQUEST_TEMPLATE.md create mode 100644 .github/release-please.yml create mode 100644 .gitignore create mode 100755 .kokoro/build.sh create mode 100644 .kokoro/continuous/common.cfg create mode 100644 .kokoro/continuous/continuous.cfg create mode 100644 .kokoro/docs/common.cfg create mode 100644 .kokoro/docs/docs.cfg create mode 100644 .kokoro/presubmit/common.cfg create mode 100644 .kokoro/presubmit/presubmit.cfg create mode 100755 .kokoro/publish-docs.sh create mode 100755 .kokoro/release.sh create mode 100644 .kokoro/release/common.cfg create mode 100644 .kokoro/release/release.cfg create mode 100755 .kokoro/trampoline.sh create mode 100644 CODE_OF_CONDUCT.md create mode 100644 CONTRIBUTING.rst create mode 100644 renovate.json diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md new file mode 100644 index 00000000..939e5341 --- /dev/null +++ b/.github/CONTRIBUTING.md @@ -0,0 +1,28 @@ +# How to Contribute + +We'd love to accept your patches and contributions to this project. There are +just a few small guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution; +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult +[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more +information on using pull requests. + +## Community Guidelines + +This project follows [Google's Open Source Community +Guidelines](https://opensource.google.com/conduct/). diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 00000000..eded7b42 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,44 @@ +--- +name: Bug report +about: Create a report to help us improve + +--- + +Thanks for stopping by to let us know something could be better! + +**PLEASE READ**: If you have a support contract with Google, please create an issue in the [support console](https://cloud.google.com/support/) instead of filing on GitHub. This will ensure a timely response. + +Please run down the following list and make sure you've tried the usual "quick fixes": + + - Search the issues already opened: https://github.com/googleapis/python-language/issues + - Search the issues on our "catch-all" repository: https://github.com/googleapis/google-cloud-python + - Search StackOverflow: http://stackoverflow.com/questions/tagged/google-cloud-platform+python + +If you are still having issues, please be sure to include as much information as possible: + +#### Environment details + + - OS type and version: + - Python version: `python --version` + - pip version: `pip --version` + - `google-cloud-language` version: `pip show google-cloud-language` + +#### Steps to reproduce + + 1. ? + 2. ? + +#### Code example + +```python +# example +``` + +#### Stack trace +``` +# example +``` + +Making sure to follow these steps will guarantee the quickest resolution possible. + +Thanks! diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 00000000..6365857f --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,18 @@ +--- +name: Feature request +about: Suggest an idea for this library + +--- + +Thanks for stopping by to let us know something could be better! + +**PLEASE READ**: If you have a support contract with Google, please create an issue in the [support console](https://cloud.google.com/support/) instead of filing on GitHub. This will ensure a timely response. + + **Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + **Describe the solution you'd like** +A clear and concise description of what you want to happen. + **Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + **Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/ISSUE_TEMPLATE/support_request.md b/.github/ISSUE_TEMPLATE/support_request.md new file mode 100644 index 00000000..99586903 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/support_request.md @@ -0,0 +1,7 @@ +--- +name: Support request +about: If you have a support contract with Google, please create an issue in the Google Cloud Support console. + +--- + +**PLEASE READ**: If you have a support contract with Google, please create an issue in the [support console](https://cloud.google.com/support/) instead of filing on GitHub. This will ensure a timely response. diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..b7ee4967 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,7 @@ +Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: +- [ ] Make sure to open an issue as a [bug/issue](https://github.com/googleapis/python-language/issues/new/choose) before writing your code! That way we can discuss the change, evaluate designs, and agree on the general idea +- [ ] Ensure the tests and linter pass +- [ ] Code coverage does not decrease (if any source code was changed) +- [ ] Appropriate docs were updated (if necessary) + +Fixes # 🦕 diff --git a/.github/release-please.yml b/.github/release-please.yml new file mode 100644 index 00000000..4507ad05 --- /dev/null +++ b/.github/release-please.yml @@ -0,0 +1 @@ +releaseType: python diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..3fb06e09 --- /dev/null +++ b/.gitignore @@ -0,0 +1,58 @@ +*.py[cod] +*.sw[op] + +# C extensions +*.so + +# Packages +*.egg +*.egg-info +dist +build +eggs +parts +bin +var +sdist +develop-eggs +.installed.cfg +lib +lib64 +__pycache__ + +# Installer logs +pip-log.txt + +# Unit test / coverage reports +.coverage +.nox +.cache +.pytest_cache + + +# Mac +.DS_Store + +# JetBrains +.idea + +# VS Code +.vscode + +# emacs +*~ + +# Built documentation +docs/_build +bigquery/docs/generated + +# Virtual environment +env/ +coverage.xml + +# System test environment variables. +system_tests/local_test_setup + +# Make sure a generated file isn't accidentally committed. +pylintrc +pylintrc.test \ No newline at end of file diff --git a/.kokoro/build.sh b/.kokoro/build.sh new file mode 100755 index 00000000..9a5a6e61 --- /dev/null +++ b/.kokoro/build.sh @@ -0,0 +1,39 @@ +#!/bin/bash +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +cd github/python-language + +# Disable buffering, so that the logs stream through. +export PYTHONUNBUFFERED=1 + +# Debug: show build environment +env | grep KOKORO + +# Setup service account credentials. +export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/service-account.json + +# Setup project id. +export PROJECT_ID=$(cat "${KOKORO_GFILE_DIR}/project-id.json") + +# Remove old nox +python3.6 -m pip uninstall --yes --quiet nox-automation + +# Install nox +python3.6 -m pip install --upgrade --quiet nox +python3.6 -m nox --version + +python3.6 -m nox diff --git a/.kokoro/continuous/common.cfg b/.kokoro/continuous/common.cfg new file mode 100644 index 00000000..ba6c6dd9 --- /dev/null +++ b/.kokoro/continuous/common.cfg @@ -0,0 +1,27 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Download resources for system tests (service account key, etc.) +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/google-cloud-python" + +# Use the trampoline script to run in docker. +build_file: "python-language/.kokoro/trampoline.sh" + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-multi" +} +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-language/.kokoro/build.sh" +} diff --git a/.kokoro/continuous/continuous.cfg b/.kokoro/continuous/continuous.cfg new file mode 100644 index 00000000..8f43917d --- /dev/null +++ b/.kokoro/continuous/continuous.cfg @@ -0,0 +1 @@ +# Format: //devtools/kokoro/config/proto/build.proto \ No newline at end of file diff --git a/.kokoro/docs/common.cfg b/.kokoro/docs/common.cfg new file mode 100644 index 00000000..277799cb --- /dev/null +++ b/.kokoro/docs/common.cfg @@ -0,0 +1,48 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-language/.kokoro/trampoline.sh" + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-multi" +} +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-language/.kokoro/publish-docs.sh" +} + +env_vars: { + key: "STAGING_BUCKET" + value: "docs-staging" +} + +# Fetch the token needed for reporting release status to GitHub +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "yoshi-automation-github-key" + } + } +} + +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "docuploader_service_account" + } + } +} \ No newline at end of file diff --git a/.kokoro/docs/docs.cfg b/.kokoro/docs/docs.cfg new file mode 100644 index 00000000..8f43917d --- /dev/null +++ b/.kokoro/docs/docs.cfg @@ -0,0 +1 @@ +# Format: //devtools/kokoro/config/proto/build.proto \ No newline at end of file diff --git a/.kokoro/presubmit/common.cfg b/.kokoro/presubmit/common.cfg new file mode 100644 index 00000000..ba6c6dd9 --- /dev/null +++ b/.kokoro/presubmit/common.cfg @@ -0,0 +1,27 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Download resources for system tests (service account key, etc.) +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/google-cloud-python" + +# Use the trampoline script to run in docker. +build_file: "python-language/.kokoro/trampoline.sh" + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-multi" +} +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-language/.kokoro/build.sh" +} diff --git a/.kokoro/presubmit/presubmit.cfg b/.kokoro/presubmit/presubmit.cfg new file mode 100644 index 00000000..8f43917d --- /dev/null +++ b/.kokoro/presubmit/presubmit.cfg @@ -0,0 +1 @@ +# Format: //devtools/kokoro/config/proto/build.proto \ No newline at end of file diff --git a/.kokoro/publish-docs.sh b/.kokoro/publish-docs.sh new file mode 100755 index 00000000..84ab6380 --- /dev/null +++ b/.kokoro/publish-docs.sh @@ -0,0 +1,57 @@ +#!/bin/bash +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#!/bin/bash + +set -eo pipefail + +# Disable buffering, so that the logs stream through. +export PYTHONUNBUFFERED=1 + +cd github/python-language + +# Remove old nox +python3.6 -m pip uninstall --yes --quiet nox-automation + +# Install nox +python3.6 -m pip install --upgrade --quiet nox +python3.6 -m nox --version + +# build docs +nox -s docs + +python3 -m pip install gcp-docuploader + +# install a json parser +sudo apt-get update +sudo apt-get -y install software-properties-common +sudo add-apt-repository universe +sudo apt-get update +sudo apt-get -y install jq + +# create metadata +python3 -m docuploader create-metadata \ + --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \ + --version=$(python3 setup.py --version) \ + --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \ + --distribution-name=$(python3 setup.py --name) \ + --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \ + --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \ + --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json) + +cat docs.metadata + +# upload docs +python3 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket docs-staging diff --git a/.kokoro/release.sh b/.kokoro/release.sh new file mode 100755 index 00000000..7d0a0d30 --- /dev/null +++ b/.kokoro/release.sh @@ -0,0 +1,34 @@ +#!/bin/bash +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#!/bin/bash + +set -eo pipefail + +# Start the releasetool reporter +python3 -m pip install gcp-releasetool +python3 -m releasetool publish-reporter-script > /tmp/publisher-script; source /tmp/publisher-script + +# Ensure that we have the latest versions of Twine, Wheel, and Setuptools. +python3 -m pip install --upgrade twine wheel setuptools + +# Disable buffering, so that the logs stream through. +export PYTHONUNBUFFERED=1 + +# Move into the package, build the distribution and upload. +TWINE_PASSWORD=$(cat "${KOKORO_KEYSTORE_DIR}/73713_google_cloud_pypi_password") +cd github/python-language +python3 setup.py sdist bdist_wheel +twine upload --username gcloudpypi --password "${TWINE_PASSWORD}" dist/* diff --git a/.kokoro/release/common.cfg b/.kokoro/release/common.cfg new file mode 100644 index 00000000..b54eed24 --- /dev/null +++ b/.kokoro/release/common.cfg @@ -0,0 +1,64 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-language/.kokoro/trampoline.sh" + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-multi" +} +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-language/.kokoro/release.sh" +} + +# Fetch the token needed for reporting release status to GitHub +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "yoshi-automation-github-key" + } + } +} + +# Fetch PyPI password +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "google_cloud_pypi_password" + } + } +} + +# Fetch magictoken to use with Magic Github Proxy +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "releasetool-magictoken" + } + } +} + +# Fetch api key to use with Magic Github Proxy +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "magic-github-proxy-api-key" + } + } +} diff --git a/.kokoro/release/release.cfg b/.kokoro/release/release.cfg new file mode 100644 index 00000000..8f43917d --- /dev/null +++ b/.kokoro/release/release.cfg @@ -0,0 +1 @@ +# Format: //devtools/kokoro/config/proto/build.proto \ No newline at end of file diff --git a/.kokoro/trampoline.sh b/.kokoro/trampoline.sh new file mode 100755 index 00000000..e8c4251f --- /dev/null +++ b/.kokoro/trampoline.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +python3 "${KOKORO_GFILE_DIR}/trampoline_v1.py" || ret_code=$? + +chmod +x ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh +${KOKORO_GFILE_DIR}/trampoline_cleanup.sh || true + +exit ${ret_code} diff --git a/.repo-metadata.json b/.repo-metadata.json index a9e1a79d..b87aaa1f 100644 --- a/.repo-metadata.json +++ b/.repo-metadata.json @@ -6,7 +6,7 @@ "issue_tracker": "https://issuetracker.google.com/savedsearches/559753", "release_level": "ga", "language": "python", - "repo": "googleapis/google-cloud-python", + "repo": "googleapis/python-language", "distribution_name": "google-cloud-language", "api_id": "language.googleapis.com", "requires_billing": true diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..b3d1f602 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,44 @@ + +# Contributor Code of Conduct + +As contributors and maintainers of this project, +and in the interest of fostering an open and welcoming community, +we pledge to respect all people who contribute through reporting issues, +posting feature requests, updating documentation, +submitting pull requests or patches, and other activities. + +We are committed to making participation in this project +a harassment-free experience for everyone, +regardless of level of experience, gender, gender identity and expression, +sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing other's private information, +such as physical or electronic +addresses, without explicit permission +* Other unethical or unprofessional conduct. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct. +By adopting this Code of Conduct, +project maintainers commit themselves to fairly and consistently +applying these principles to every aspect of managing this project. +Project maintainers who do not follow or enforce the Code of Conduct +may be permanently removed from the project team. + +This code of conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior +may be reported by opening an issue +or contacting one or more of the project maintainers. + +This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, +available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst new file mode 100644 index 00000000..2365cac0 --- /dev/null +++ b/CONTRIBUTING.rst @@ -0,0 +1,279 @@ +.. Generated by synthtool. DO NOT EDIT! +############ +Contributing +############ + +#. **Please sign one of the contributor license agreements below.** +#. Fork the repo, develop and test your code changes, add docs. +#. Make sure that your commit messages clearly describe the changes. +#. Send a pull request. (Please Read: `Faster Pull Request Reviews`_) + +.. _Faster Pull Request Reviews: https://github.com/kubernetes/community/blob/master/contributors/guide/pull-requests.md#best-practices-for-faster-reviews + +.. contents:: Here are some guidelines for hacking on the Google Cloud Client libraries. + +*************** +Adding Features +*************** + +In order to add a feature: + +- The feature must be documented in both the API and narrative + documentation. + +- The feature must work fully on the following CPython versions: 2.7, + 3.5, 3.6, and 3.7 on both UNIX and Windows. + +- The feature must not add unnecessary dependencies (where + "unnecessary" is of course subjective, but new dependencies should + be discussed). + +**************************** +Using a Development Checkout +**************************** + +You'll have to create a development environment using a Git checkout: + +- While logged into your GitHub account, navigate to the + ``python-language`` `repo`_ on GitHub. + +- Fork and clone the ``python-language`` repository to your GitHub account by + clicking the "Fork" button. + +- Clone your fork of ``python-language`` from your GitHub account to your local + computer, substituting your account username and specifying the destination + as ``hack-on-python-language``. E.g.:: + + $ cd ${HOME} + $ git clone git@github.com:USERNAME/python-language.git hack-on-python-language + $ cd hack-on-python-language + # Configure remotes such that you can pull changes from the googleapis/python-language + # repository into your local repository. + $ git remote add upstream git@github.com:googleapis/python-language.git + # fetch and merge changes from upstream into master + $ git fetch upstream + $ git merge upstream/master + +Now your local repo is set up such that you will push changes to your GitHub +repo, from which you can submit a pull request. + +To work on the codebase and run the tests, we recommend using ``nox``, +but you can also use a ``virtualenv`` of your own creation. + +.. _repo: https://github.com/googleapis/python-language + +Using ``nox`` +============= + +We use `nox `__ to instrument our tests. + +- To test your changes, run unit tests with ``nox``:: + + $ nox -s unit-2.7 + $ nox -s unit-3.7 + $ ... + + .. note:: + + The unit tests and system tests are described in the + ``noxfile.py`` files in each directory. + +.. nox: https://pypi.org/project/nox/ + +Note on Editable Installs / Develop Mode +======================================== + +- As mentioned previously, using ``setuptools`` in `develop mode`_ + or a ``pip`` `editable install`_ is not possible with this + library. This is because this library uses `namespace packages`_. + For context see `Issue #2316`_ and the relevant `PyPA issue`_. + + Since ``editable`` / ``develop`` mode can't be used, packages + need to be installed directly. Hence your changes to the source + tree don't get incorporated into the **already installed** + package. + +.. _namespace packages: https://www.python.org/dev/peps/pep-0420/ +.. _Issue #2316: https://github.com/GoogleCloudPlatform/google-cloud-python/issues/2316 +.. _PyPA issue: https://github.com/pypa/packaging-problems/issues/12 +.. _develop mode: https://setuptools.readthedocs.io/en/latest/setuptools.html#development-mode +.. _editable install: https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs + +***************************************** +I'm getting weird errors... Can you help? +***************************************** + +If the error mentions ``Python.h`` not being found, +install ``python-dev`` and try again. +On Debian/Ubuntu:: + + $ sudo apt-get install python-dev + +************ +Coding Style +************ + +- PEP8 compliance, with exceptions defined in the linter configuration. + If you have ``nox`` installed, you can test that you have not introduced + any non-compliant code via:: + + $ nox -s lint + +- In order to make ``nox -s lint`` run faster, you can set some environment + variables:: + + export GOOGLE_CLOUD_TESTING_REMOTE="upstream" + export GOOGLE_CLOUD_TESTING_BRANCH="master" + + By doing this, you are specifying the location of the most up-to-date + version of ``python-language``. The the suggested remote name ``upstream`` + should point to the official ``googleapis`` checkout and the + the branch should be the main branch on that remote (``master``). + +Exceptions to PEP8: + +- Many unit tests use a helper method, ``_call_fut`` ("FUT" is short for + "Function-Under-Test"), which is PEP8-incompliant, but more readable. + Some also use a local variable, ``MUT`` (short for "Module-Under-Test"). + +******************** +Running System Tests +******************** + +- To run system tests, you can execute:: + + $ nox -s system-3.7 + $ nox -s system-2.7 + + .. note:: + + System tests are only configured to run under Python 2.7 and + Python 3.7. For expediency, we do not run them in older versions + of Python 3. + + This alone will not run the tests. You'll need to change some local + auth settings and change some configuration in your project to + run all the tests. + +- System tests will be run against an actual project and + so you'll need to provide some environment variables to facilitate + authentication to your project: + + - ``GOOGLE_APPLICATION_CREDENTIALS``: The path to a JSON key file; + Such a file can be downloaded directly from the developer's console by clicking + "Generate new JSON key". See private key + `docs `__ + for more details. + +- Once you have downloaded your json keys, set the environment variable + ``GOOGLE_APPLICATION_CREDENTIALS`` to the absolute path of the json file:: + + $ export GOOGLE_APPLICATION_CREDENTIALS="/Users//path/to/app_credentials.json" + + +************* +Test Coverage +************* + +- The codebase *must* have 100% test statement coverage after each commit. + You can test coverage via ``nox -s cover``. + +****************************************************** +Documentation Coverage and Building HTML Documentation +****************************************************** + +If you fix a bug, and the bug requires an API or behavior modification, all +documentation in this package which references that API or behavior must be +changed to reflect the bug fix, ideally in the same commit that fixes the bug +or adds the feature. + +Build the docs via: + + $ nox -s docs + +******************************************** +Note About ``README`` as it pertains to PyPI +******************************************** + +The `description on PyPI`_ for the project comes directly from the +``README``. Due to the reStructuredText (``rst``) parser used by +PyPI, relative links which will work on GitHub (e.g. ``CONTRIBUTING.rst`` +instead of +``https://github.com/googleapis/python-language/blob/master/CONTRIBUTING.rst``) +may cause problems creating links or rendering the description. + +.. _description on PyPI: https://pypi.org/project/google-cloud-language + + +************************* +Supported Python Versions +************************* + +We support: + +- `Python 3.5`_ +- `Python 3.6`_ +- `Python 3.7`_ + +.. _Python 3.5: https://docs.python.org/3.5/ +.. _Python 3.6: https://docs.python.org/3.6/ +.. _Python 3.7: https://docs.python.org/3.7/ + + +Supported versions can be found in our ``noxfile.py`` `config`_. + +.. _config: https://github.com/googleapis/python-language/blob/master/noxfile.py + +We explicitly decided not to support `Python 2.5`_ due to `decreased usage`_ +and lack of continuous integration `support`_. + +.. _Python 2.5: https://docs.python.org/2.5/ +.. _decreased usage: https://caremad.io/2013/10/a-look-at-pypi-downloads/ +.. _support: https://blog.travis-ci.com/2013-11-18-upcoming-build-environment-updates/ + +We have `dropped 2.6`_ as a supported version as well since Python 2.6 is no +longer supported by the core development team. + +Python 2.7 support is deprecated. All code changes should maintain Python 2.7 compatibility until January 1, 2020. + +We also explicitly decided to support Python 3 beginning with version +3.5. Reasons for this include: + +- Encouraging use of newest versions of Python 3 +- Taking the lead of `prominent`_ open-source `projects`_ +- `Unicode literal support`_ which allows for a cleaner codebase that + works in both Python 2 and Python 3 + +.. _prominent: https://docs.djangoproject.com/en/1.9/faq/install/#what-python-version-can-i-use-with-django +.. _projects: http://flask.pocoo.org/docs/0.10/python3/ +.. _Unicode literal support: https://www.python.org/dev/peps/pep-0414/ +.. _dropped 2.6: https://github.com/googleapis/google-cloud-python/issues/995 + +********** +Versioning +********** + +This library follows `Semantic Versioning`_. + +.. _Semantic Versioning: http://semver.org/ + +Some packages are currently in major version zero (``0.y.z``), which means that +anything may change at any time and the public API should not be considered +stable. + +****************************** +Contributor License Agreements +****************************** + +Before we can accept your pull requests you'll need to sign a Contributor +License Agreement (CLA): + +- **If you are an individual writing original source code** and **you own the + intellectual property**, then you'll need to sign an + `individual CLA `__. +- **If you work for a company that wants to allow you to contribute your work**, + then you'll need to sign a + `corporate CLA `__. + +You can sign these electronically (just scroll to the bottom). After that, +we'll be able to accept your pull requests. diff --git a/LICENSE b/LICENSE index d6456956..a8ee855d 100644 --- a/LICENSE +++ b/LICENSE @@ -1,7 +1,6 @@ - - Apache License + Apache License Version 2.0, January 2004 - http://www.apache.org/licenses/ + https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -193,7 +192,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/MANIFEST.in b/MANIFEST.in index 9cbf175a..cd011be2 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,3 +1,4 @@ +# Generated by synthtool. DO NOT EDIT! include README.rst LICENSE recursive-include google *.json *.proto recursive-include tests * diff --git a/docs/conf.py b/docs/conf.py index fd043da0..289e544f 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -20,7 +20,7 @@ # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath("..")) -__version__ = "0.1.0" +__version__ = "" # -- General configuration ------------------------------------------------ @@ -33,7 +33,6 @@ extensions = [ "sphinx.ext.autodoc", "sphinx.ext.autosummary", - "sphinx.ext.doctest", "sphinx.ext.intersphinx", "sphinx.ext.coverage", "sphinx.ext.napoleon", @@ -46,6 +45,7 @@ autodoc_default_flags = ["members"] autosummary_generate = True + # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] @@ -66,7 +66,7 @@ # General information about the project. project = u"google-cloud-language" -copyright = u"2017, Google" +copyright = u"2019, Google" author = u"Google APIs" # The version info for the project you're documenting, acts as replacement for @@ -122,6 +122,7 @@ # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True + # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for @@ -132,9 +133,9 @@ # further. For a list of options available for each theme, see the # documentation. html_theme_options = { - "description": "Google Cloud Client Libraries for Python", + "description": "Google Cloud Client Libraries for google-cloud-language", "github_user": "googleapis", - "github_repo": "google-cloud-python", + "github_repo": "python-language", "github_banner": True, "font_family": "'Roboto', Georgia, sans", "head_font_family": "'Roboto', Georgia, serif", @@ -230,6 +231,7 @@ # -- Options for warnings ------------------------------------------------------ + suppress_warnings = [ # Temporarily suppress this to avoid "more than one target found for # cross-reference" warning, which are intractable for us to avoid while in @@ -285,6 +287,7 @@ # If false, no module index is generated. # latex_domain_indices = True + # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples @@ -302,6 +305,7 @@ # If true, show URL addresses after external links. # man_show_urls = False + # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples @@ -314,7 +318,7 @@ u"google-cloud-language Documentation", author, "google-cloud-language", - "GAPIC library for the {metadata.shortName} v1 service", + "google-cloud-language Library", "APIs", ) ] @@ -331,19 +335,16 @@ # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False + # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { "python": ("http://python.readthedocs.org/en/latest/", None), - "gax": ("https://gax-python.readthedocs.org/en/latest/", None), "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), - "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), - "google.api_core": ("https://googleapis.dev/python/google-api-core/latest", None), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), "grpc": ("https://grpc.io/grpc/python/", None), - "requests": ("https://requests.kennethreitz.org/en/stable/", None), - "fastavro": ("https://fastavro.readthedocs.io/en/stable/", None), - "pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None), } + # Napoleon settings napoleon_google_docstring = True napoleon_numpy_docstring = True diff --git a/noxfile.py b/noxfile.py index ce6ee932..6589c07d 100644 --- a/noxfile.py +++ b/noxfile.py @@ -23,7 +23,6 @@ import nox -LOCAL_DEPS = (os.path.join("..", "api_core"), os.path.join("..", "core")) BLACK_VERSION = "black==19.3b0" BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] @@ -38,7 +37,7 @@ def lint(session): Returns a failure if the linters find linting errors or sufficiently serious code quality issues. """ - session.install("flake8", BLACK_VERSION, *LOCAL_DEPS) + session.install("flake8", BLACK_VERSION) session.run("black", "--check", *BLACK_PATHS) session.run("flake8", "google", "tests") @@ -67,8 +66,6 @@ def lint_setup_py(session): def default(session): # Install all test dependencies, then install this package in-place. session.install("mock", "pytest", "pytest-cov") - for local_dep in LOCAL_DEPS: - session.install("-e", local_dep) session.install("-e", ".") # Run py.test against the unit tests. @@ -86,7 +83,7 @@ def default(session): ) -@nox.session(python=["2.7", "3.5", "3.6", "3.7"]) +@nox.session(python=["2.7", "3.5", "3.6", "3.7", "3.8"]) def unit(session): """Run the unit test suite.""" default(session) @@ -113,9 +110,6 @@ def system(session): # Install all test dependencies, then install this package into the # virtualenv's dist-packages. session.install("mock", "pytest") - for local_dep in LOCAL_DEPS: - session.install("-e", local_dep) - session.install("-e", "../test_utils/") session.install("-e", ".") # Run py.test against the system tests. @@ -138,8 +132,6 @@ def samples(session): session.install("pyyaml") session.install("sample-tester") - for local_dep in LOCAL_DEPS: - session.install("-e", local_dep) session.install("-e", ".") session.run("sample-tester", samples_path, *session.posargs) @@ -153,7 +145,7 @@ def cover(session): test runs (not system test runs), and then erases coverage data. """ session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=100") + session.run("coverage", "report", "--show-missing", "--fail-under=87") session.run("coverage", "erase") diff --git a/renovate.json b/renovate.json new file mode 100644 index 00000000..4fa94931 --- /dev/null +++ b/renovate.json @@ -0,0 +1,5 @@ +{ + "extends": [ + "config:base", ":preserveSemverRanges" + ] +} diff --git a/setup.py b/setup.py index 6ac5b244..2c11a7b5 100644 --- a/setup.py +++ b/setup.py @@ -63,7 +63,7 @@ author="Google LLC", author_email="googleapis-packages@google.com", license="Apache 2.0", - url="https://github.com/GoogleCloudPlatform/google-cloud-python", + url="https://github.com/googleapis/python-language", classifiers=[ release_status, "Intended Audience :: Developers", diff --git a/synth.metadata b/synth.metadata index 94c812a3..b8de7122 100644 --- a/synth.metadata +++ b/synth.metadata @@ -1,5 +1,5 @@ { - "updateTime": "2020-01-30T00:27:56.902647Z", + "updateTime": "2020-02-07T02:58:49.595427Z", "sources": [ { "generator": { @@ -12,13 +12,13 @@ "git": { "name": "googleapis", "remote": "git@github.com:googleapis/googleapis.git", - "sha": "341fd5690fae36f36cf626ef048fbcf4bbe7cee6", - "internalRef": "292221998" + "sha": "e46f761cd6ec15a9e3d5ed4ff321a4bcba8e8585", + "internalRef": "293710856" } }, { "template": { - "name": "python_library", + "name": "python_split_library", "origin": "synthtool.gcp", "version": "2019.10.17" } @@ -45,139 +45,5 @@ "config": "google/cloud/language/artman_language_v1.yaml" } } - ], - "newFiles": [ - { - "path": "MANIFEST.in" - }, - { - "path": ".flake8" - }, - { - "path": ".coveragerc" - }, - { - "path": "setup.cfg" - }, - { - "path": "docs/_static/custom.css" - }, - { - "path": "docs/_templates/layout.html" - }, - { - "path": "samples/v1/language_sentiment_text.py" - }, - { - "path": "samples/v1/language_entities_text.py" - }, - { - "path": "samples/v1/language_entity_sentiment_gcs.py" - }, - { - "path": "samples/v1/language_syntax_text.py" - }, - { - "path": "samples/v1/language_entity_sentiment_text.py" - }, - { - "path": "samples/v1/language_sentiment_gcs.py" - }, - { - "path": "samples/v1/language_classify_gcs.py" - }, - { - "path": "samples/v1/language_entities_gcs.py" - }, - { - "path": "samples/v1/language_classify_text.py" - }, - { - "path": "samples/v1/language_syntax_gcs.py" - }, - { - "path": "samples/v1/test/analyzing_sentiment.test.yaml" - }, - { - "path": "samples/v1/test/classifying_content.test.yaml" - }, - { - "path": "samples/v1/test/analyzing_entities.test.yaml" - }, - { - "path": "samples/v1/test/analyzing_syntax.test.yaml" - }, - { - "path": "samples/v1/test/analyzing_entity_sentiment.test.yaml" - }, - { - "path": "google/cloud/language_v1/gapic/__init__.py" - }, - { - "path": "google/cloud/language_v1/gapic/enums.py" - }, - { - "path": "google/cloud/language_v1/gapic/language_service_client.py" - }, - { - "path": "google/cloud/language_v1/gapic/language_service_client_config.py" - }, - { - "path": "google/cloud/language_v1/gapic/transports/__init__.py" - }, - { - "path": "google/cloud/language_v1/gapic/transports/language_service_grpc_transport.py" - }, - { - "path": "google/cloud/language_v1/proto/__init__.py" - }, - { - "path": "google/cloud/language_v1/proto/language_service.proto" - }, - { - "path": "google/cloud/language_v1/proto/language_service_pb2.py" - }, - { - "path": "google/cloud/language_v1/proto/language_service_pb2_grpc.py" - }, - { - "path": "google/cloud/language_v1beta2/gapic/__init__.py" - }, - { - "path": "google/cloud/language_v1beta2/gapic/enums.py" - }, - { - "path": "google/cloud/language_v1beta2/gapic/language_service_client.py" - }, - { - "path": "google/cloud/language_v1beta2/gapic/language_service_client_config.py" - }, - { - "path": "google/cloud/language_v1beta2/gapic/transports/__init__.py" - }, - { - "path": "google/cloud/language_v1beta2/gapic/transports/language_service_grpc_transport.py" - }, - { - "path": "google/cloud/language_v1beta2/proto/__init__.py" - }, - { - "path": "google/cloud/language_v1beta2/proto/language_service.proto" - }, - { - "path": "google/cloud/language_v1beta2/proto/language_service_pb2.py" - }, - { - "path": "google/cloud/language_v1beta2/proto/language_service_pb2_grpc.py" - }, - { - "path": "tests/unit/gapic/v1/test_language_service_client_v1.py" - }, - { - "path": "tests/unit/gapic/v1beta2/test_language_service_client_v1beta2.py" - }, - { - "path": "tests/system/gapic/v1beta2/test_system_language_service_v1beta2.py" - } ] } \ No newline at end of file From 7a9d4ddf676f2a77e1bd83e02b8d7987a72c6525 Mon Sep 17 00:00:00 2001 From: Cameron Zahedi Date: Wed, 12 Feb 2020 11:13:04 -0700 Subject: [PATCH 019/209] docs: fix small typo (#5) --- samples/v1/language_syntax_text.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/v1/language_syntax_text.py b/samples/v1/language_syntax_text.py index 29041886..2b4a51e2 100644 --- a/samples/v1/language_syntax_text.py +++ b/samples/v1/language_syntax_text.py @@ -63,7 +63,7 @@ def sample_analyze_syntax(text_content): u"Location of this token in overall document: {}".format(text.begin_offset) ) # Get the part of speech information for this token. - # Parts of spech are as defined in: + # Parts of speech are as defined in: # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf part_of_speech = token.part_of_speech # Get the tag, e.g. NOUN, ADJ for Adjective, et al. From c39138189a0c93512c1cdbb7cd00047c73a80402 Mon Sep 17 00:00:00 2001 From: Jonathan Simon Date: Mon, 16 Mar 2020 09:04:15 -0700 Subject: [PATCH 020/209] chore: update samples to include additional region tags (#14) * Update samples to include additional region tags. * chore: empty commit --- samples/v1/language_classify_gcs.py | 2 ++ samples/v1/language_classify_text.py | 2 ++ samples/v1/language_entities_text.py | 2 ++ samples/v1/language_sentiment_text.py | 3 +++ samples/v1/language_syntax_text.py | 3 +++ 5 files changed, 12 insertions(+) diff --git a/samples/v1/language_classify_gcs.py b/samples/v1/language_classify_gcs.py index 941640b1..8835fc76 100644 --- a/samples/v1/language_classify_gcs.py +++ b/samples/v1/language_classify_gcs.py @@ -43,6 +43,7 @@ def sample_classify_text(gcs_content_uri): # gcs_content_uri = 'gs://cloud-samples-data/language/classify-entertainment.txt' + # [START language_python_migration_document_gcs] # Available types: PLAIN_TEXT, HTML type_ = enums.Document.Type.PLAIN_TEXT @@ -51,6 +52,7 @@ def sample_classify_text(gcs_content_uri): # https://cloud.google.com/natural-language/docs/languages language = "en" document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} + # [END language_python_migration_document_gcs] response = client.classify_text(document) # Loop through classified categories returned from the API diff --git a/samples/v1/language_classify_text.py b/samples/v1/language_classify_text.py index 52175f02..4fc77b20 100644 --- a/samples/v1/language_classify_text.py +++ b/samples/v1/language_classify_text.py @@ -41,6 +41,7 @@ def sample_classify_text(text_content): # text_content = 'That actor on TV makes movies in Hollywood and also stars in a variety of popular new TV shows.' + # [START language_python_migration_document_text] # Available types: PLAIN_TEXT, HTML type_ = enums.Document.Type.PLAIN_TEXT @@ -49,6 +50,7 @@ def sample_classify_text(text_content): # https://cloud.google.com/natural-language/docs/languages language = "en" document = {"content": text_content, "type": type_, "language": language} + # [END language_python_migration_document_text] response = client.classify_text(document) # Loop through classified categories returned from the API diff --git a/samples/v1/language_entities_text.py b/samples/v1/language_entities_text.py index 9ae849f2..c6149f65 100644 --- a/samples/v1/language_entities_text.py +++ b/samples/v1/language_entities_text.py @@ -41,6 +41,7 @@ def sample_analyze_entities(text_content): # text_content = 'California is a state.' + # [START language_python_migration_entities_text] # Available types: PLAIN_TEXT, HTML type_ = enums.Document.Type.PLAIN_TEXT @@ -76,6 +77,7 @@ def sample_analyze_entities(text_content): print( u"Mention type: {}".format(enums.EntityMention.Type(mention.type).name) ) + # [END language_python_migration_entities_text] # Get the language of the text, which will be the same as # the language specified in the request or, if not specified, diff --git a/samples/v1/language_sentiment_text.py b/samples/v1/language_sentiment_text.py index 12f1e221..a0647678 100644 --- a/samples/v1/language_sentiment_text.py +++ b/samples/v1/language_sentiment_text.py @@ -41,6 +41,7 @@ def sample_analyze_sentiment(text_content): # text_content = 'I am so happy and joyful.' + # [START language_python_migration_sentiment_text] # Available types: PLAIN_TEXT, HTML type_ = enums.Document.Type.PLAIN_TEXT @@ -61,6 +62,8 @@ def sample_analyze_sentiment(text_content): response.document_sentiment.magnitude ) ) + # [END language_python_migration_sentiment_text] + # Get sentiment for all sentences in the document for sentence in response.sentences: print(u"Sentence text: {}".format(sentence.text.content)) diff --git a/samples/v1/language_syntax_text.py b/samples/v1/language_syntax_text.py index 2b4a51e2..d57c9eea 100644 --- a/samples/v1/language_syntax_text.py +++ b/samples/v1/language_syntax_text.py @@ -50,6 +50,7 @@ def sample_analyze_syntax(text_content): language = "en" document = {"content": text_content, "type": type_, "language": language} + # [START language_python_migration_syntax_text] # Available values: NONE, UTF8, UTF16, UTF32 encoding_type = enums.EncodingType.UTF8 @@ -72,6 +73,8 @@ def sample_analyze_syntax(text_content): enums.PartOfSpeech.Tag(part_of_speech.tag).name ) ) + # [END language_python_migration_syntax_text] + # Get the voice, e.g. ACTIVE or PASSIVE print(u"Voice: {}".format(enums.PartOfSpeech.Voice(part_of_speech.voice).name)) # Get the tense, e.g. PAST, FUTURE, PRESENT, et al. From 2bcf0d3dcdc454deba0046dba73763637eb0000f Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Tue, 21 Apr 2020 16:51:03 -0700 Subject: [PATCH 021/209] chore: increase default timeout; update templates (via synth) (#16) --- .coveragerc | 16 +++ .flake8 | 16 +++ .github/ISSUE_TEMPLATE/bug_report.md | 3 +- CONTRIBUTING.rst | 15 +-- MANIFEST.in | 16 +++ .../language_v1/proto/language_service.proto | 12 +-- .../language_v1/proto/language_service_pb2.py | 102 +++++------------- .../gapic/language_service_client_config.py | 12 +-- .../language_service_grpc_transport.py | 5 +- .../proto/language_service.proto | 15 +-- .../proto/language_service_pb2.py | 95 +++++----------- noxfile.py | 2 +- samples/v1/language_classify_gcs.py | 2 - samples/v1/language_classify_text.py | 2 - samples/v1/language_entities_text.py | 2 - samples/v1/language_sentiment_text.py | 3 - samples/v1/language_syntax_text.py | 5 +- setup.cfg | 16 +++ synth.metadata | 26 +++-- .../test_system_language_service_v1beta2.py | 3 +- 20 files changed, 165 insertions(+), 203 deletions(-) diff --git a/.coveragerc b/.coveragerc index b178b094..dd39c854 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,3 +1,19 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Generated by synthtool. DO NOT EDIT! [run] branch = True diff --git a/.flake8 b/.flake8 index 0268ecc9..20fe9bda 100644 --- a/.flake8 +++ b/.flake8 @@ -1,3 +1,19 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Generated by synthtool. DO NOT EDIT! [flake8] ignore = E203, E266, E501, W503 diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index eded7b42..b9f06f21 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -11,8 +11,7 @@ Thanks for stopping by to let us know something could be better! Please run down the following list and make sure you've tried the usual "quick fixes": - Search the issues already opened: https://github.com/googleapis/python-language/issues - - Search the issues on our "catch-all" repository: https://github.com/googleapis/google-cloud-python - - Search StackOverflow: http://stackoverflow.com/questions/tagged/google-cloud-platform+python + - Search StackOverflow: https://stackoverflow.com/questions/tagged/google-cloud-platform+python If you are still having issues, please be sure to include as much information as possible: diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 2365cac0..56875c06 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -22,7 +22,7 @@ In order to add a feature: documentation. - The feature must work fully on the following CPython versions: 2.7, - 3.5, 3.6, and 3.7 on both UNIX and Windows. + 3.5, 3.6, 3.7 and 3.8 on both UNIX and Windows. - The feature must not add unnecessary dependencies (where "unnecessary" is of course subjective, but new dependencies should @@ -214,26 +214,18 @@ We support: - `Python 3.5`_ - `Python 3.6`_ - `Python 3.7`_ +- `Python 3.8`_ .. _Python 3.5: https://docs.python.org/3.5/ .. _Python 3.6: https://docs.python.org/3.6/ .. _Python 3.7: https://docs.python.org/3.7/ +.. _Python 3.8: https://docs.python.org/3.8/ Supported versions can be found in our ``noxfile.py`` `config`_. .. _config: https://github.com/googleapis/python-language/blob/master/noxfile.py -We explicitly decided not to support `Python 2.5`_ due to `decreased usage`_ -and lack of continuous integration `support`_. - -.. _Python 2.5: https://docs.python.org/2.5/ -.. _decreased usage: https://caremad.io/2013/10/a-look-at-pypi-downloads/ -.. _support: https://blog.travis-ci.com/2013-11-18-upcoming-build-environment-updates/ - -We have `dropped 2.6`_ as a supported version as well since Python 2.6 is no -longer supported by the core development team. - Python 2.7 support is deprecated. All code changes should maintain Python 2.7 compatibility until January 1, 2020. We also explicitly decided to support Python 3 beginning with version @@ -247,7 +239,6 @@ We also explicitly decided to support Python 3 beginning with version .. _prominent: https://docs.djangoproject.com/en/1.9/faq/install/#what-python-version-can-i-use-with-django .. _projects: http://flask.pocoo.org/docs/0.10/python3/ .. _Unicode literal support: https://www.python.org/dev/peps/pep-0414/ -.. _dropped 2.6: https://github.com/googleapis/google-cloud-python/issues/995 ********** Versioning diff --git a/MANIFEST.in b/MANIFEST.in index cd011be2..68855abc 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,3 +1,19 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Generated by synthtool. DO NOT EDIT! include README.rst LICENSE recursive-include google *.json *.proto diff --git a/google/cloud/language_v1/proto/language_service.proto b/google/cloud/language_v1/proto/language_service.proto index 41d92f34..e8e4fd8d 100644 --- a/google/cloud/language_v1/proto/language_service.proto +++ b/google/cloud/language_v1/proto/language_service.proto @@ -137,11 +137,11 @@ message Document { // The language of the document (if not specified, the language is // automatically detected). Both ISO and BCP-47 language codes are // accepted.
- // [Language Support](/natural-language/docs/languages) - // lists currently supported languages for each API method. - // If the language (either specified by the caller or automatically detected) - // is not supported by the called API method, an `INVALID_ARGUMENT` error - // is returned. + // [Language + // Support](https://cloud.google.com/natural-language/docs/languages) lists + // currently supported languages for each API method. If the language (either + // specified by the caller or automatically detected) is not supported by the + // called API method, an `INVALID_ARGUMENT` error is returned. string language = 4; } @@ -954,7 +954,7 @@ message TextSpan { // Represents a category returned from the text classifier. message ClassificationCategory { // The name of the category representing the document, from the [predefined - // taxonomy](/natural-language/docs/categories). + // taxonomy](https://cloud.google.com/natural-language/docs/categories). string name = 1; // The classifier's confidence of the category. Number represents how certain diff --git a/google/cloud/language_v1/proto/language_service_pb2.py b/google/cloud/language_v1/proto/language_service_pb2.py index f17ab045..9c7ad493 100644 --- a/google/cloud/language_v1/proto/language_service_pb2.py +++ b/google/cloud/language_v1/proto/language_service_pb2.py @@ -2871,9 +2871,8 @@ dict( DESCRIPTOR=_DOCUMENT, __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""Represents the input to API methods. - - + __doc__="""################################################################ # + Represents the input to API methods. Attributes: type: Required. If the type is not set or is ``TYPE_UNSPECIFIED``, @@ -2893,12 +2892,12 @@ language: The language of the document (if not specified, the language is automatically detected). Both ISO and BCP-47 language codes - are accepted. `Language Support `__ lists currently supported - languages for each API method. If the language (either - specified by the caller or automatically detected) is not - supported by the called API method, an ``INVALID_ARGUMENT`` - error is returned. + are accepted. `Language Support + `__ + lists currently supported languages for each API method. If + the language (either specified by the caller or automatically + detected) is not supported by the called API method, an + ``INVALID_ARGUMENT`` error is returned. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1.Document) ), @@ -2912,8 +2911,6 @@ DESCRIPTOR=_SENTENCE, __module__="google.cloud.language_v1.proto.language_service_pb2", __doc__="""Represents a sentence in the input document. - - Attributes: text: The sentence text. @@ -2944,11 +2941,9 @@ ), DESCRIPTOR=_ENTITY, __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""Represents a phrase in the text that is a known entity, - such as a person, an organization, or location. The API associates - information, such as salience and mentions, with entities. - - + __doc__="""Represents a phrase in the text that is a known entity, such as a + person, an organization, or location. The API associates information, + such as salience and mentions, with entities. Attributes: name: The representative name for the entity. @@ -2988,10 +2983,7 @@ dict( DESCRIPTOR=_TOKEN, __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""Represents the smallest syntactic building block of the - text. - - + __doc__="""Represents the smallest syntactic building block of the text. Attributes: text: The token text. @@ -3015,10 +3007,8 @@ dict( DESCRIPTOR=_SENTIMENT, __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""Represents the feeling associated with the entire text or - entities in the text. - - + __doc__="""Represents the feeling associated with the entire text or entities in + the text. Attributes: magnitude: A non-negative number in the [0, +inf) range, which represents @@ -3039,11 +3029,9 @@ dict( DESCRIPTOR=_PARTOFSPEECH, __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""Represents part of speech information for a token. Parts - of speech are as defined in - http://www.lrec-conf.org/proceedings/lrec2012/pdf/274\_Paper.pdf - - + __doc__="""Represents part of speech information for a token. Parts of speech are + as defined in http://www.lrec- + conf.org/proceedings/lrec2012/pdf/274\_Paper.pdf Attributes: tag: The part of speech tag. @@ -3081,11 +3069,9 @@ dict( DESCRIPTOR=_DEPENDENCYEDGE, __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""Represents dependency parse tree information for a token. - (For more information on dependency labels, see + __doc__="""Represents dependency parse tree information for a token. (For more + information on dependency labels, see http://www.aclweb.org/anthology/P13-2017 - - Attributes: head_token_index: Represents the head of this token in the dependency tree. This @@ -3107,10 +3093,8 @@ dict( DESCRIPTOR=_ENTITYMENTION, __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""Represents a mention for an entity in the text. Currently, - proper noun mentions are supported. - - + __doc__="""Represents a mention for an entity in the text. Currently, proper noun + mentions are supported. Attributes: text: The mention text. @@ -3136,8 +3120,6 @@ DESCRIPTOR=_TEXTSPAN, __module__="google.cloud.language_v1.proto.language_service_pb2", __doc__="""Represents an output piece of text. - - Attributes: content: The content of the output text. @@ -3159,12 +3141,11 @@ DESCRIPTOR=_CLASSIFICATIONCATEGORY, __module__="google.cloud.language_v1.proto.language_service_pb2", __doc__="""Represents a category returned from the text classifier. - - Attributes: name: The name of the category representing the document, from the - `predefined taxonomy `__. + `predefined taxonomy `__. confidence: The classifier's confidence of the category. Number represents how certain the classifier is that this category represents @@ -3182,8 +3163,6 @@ DESCRIPTOR=_ANALYZESENTIMENTREQUEST, __module__="google.cloud.language_v1.proto.language_service_pb2", __doc__="""The sentiment analysis request message. - - Attributes: document: Input document. @@ -3203,8 +3182,6 @@ DESCRIPTOR=_ANALYZESENTIMENTRESPONSE, __module__="google.cloud.language_v1.proto.language_service_pb2", __doc__="""The sentiment analysis response message. - - Attributes: document_sentiment: The overall sentiment of the input document. @@ -3228,8 +3205,6 @@ DESCRIPTOR=_ANALYZEENTITYSENTIMENTREQUEST, __module__="google.cloud.language_v1.proto.language_service_pb2", __doc__="""The entity-level sentiment analysis request message. - - Attributes: document: Input document. @@ -3248,8 +3223,6 @@ DESCRIPTOR=_ANALYZEENTITYSENTIMENTRESPONSE, __module__="google.cloud.language_v1.proto.language_service_pb2", __doc__="""The entity-level sentiment analysis response message. - - Attributes: entities: The recognized entities in the input document with associated @@ -3272,8 +3245,6 @@ DESCRIPTOR=_ANALYZEENTITIESREQUEST, __module__="google.cloud.language_v1.proto.language_service_pb2", __doc__="""The entity analysis request message. - - Attributes: document: Input document. @@ -3292,8 +3263,6 @@ DESCRIPTOR=_ANALYZEENTITIESRESPONSE, __module__="google.cloud.language_v1.proto.language_service_pb2", __doc__="""The entity analysis response message. - - Attributes: entities: The recognized entities in the input document. @@ -3315,8 +3284,6 @@ DESCRIPTOR=_ANALYZESYNTAXREQUEST, __module__="google.cloud.language_v1.proto.language_service_pb2", __doc__="""The syntax analysis request message. - - Attributes: document: Input document. @@ -3335,8 +3302,6 @@ DESCRIPTOR=_ANALYZESYNTAXRESPONSE, __module__="google.cloud.language_v1.proto.language_service_pb2", __doc__="""The syntax analysis response message. - - Attributes: sentences: Sentences in the input document. @@ -3361,8 +3326,6 @@ DESCRIPTOR=_CLASSIFYTEXTREQUEST, __module__="google.cloud.language_v1.proto.language_service_pb2", __doc__="""The document classification request message. - - Attributes: document: Input document. @@ -3379,8 +3342,6 @@ DESCRIPTOR=_CLASSIFYTEXTRESPONSE, __module__="google.cloud.language_v1.proto.language_service_pb2", __doc__="""The document classification response message. - - Attributes: categories: Categories representing the input document. @@ -3400,11 +3361,9 @@ dict( DESCRIPTOR=_ANNOTATETEXTREQUEST_FEATURES, __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""All available features for sentiment, syntax, and semantic - analysis. Setting each one to true will enable that specific analysis - for the input. - - + __doc__="""All available features for sentiment, syntax, and semantic analysis. + Setting each one to true will enable that specific analysis for the + input. Attributes: extract_syntax: Extract syntax information. @@ -3422,11 +3381,8 @@ ), DESCRIPTOR=_ANNOTATETEXTREQUEST, __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""The request message for the text annotation API, which can - perform multiple analysis types (sentiment, entities, and syntax) in one - call. - - + __doc__="""The request message for the text annotation API, which can perform + multiple analysis types (sentiment, entities, and syntax) in one call. Attributes: document: Input document. @@ -3448,8 +3404,6 @@ DESCRIPTOR=_ANNOTATETEXTRESPONSE, __module__="google.cloud.language_v1.proto.language_service_pb2", __doc__="""The text annotations response message. - - Attributes: sentences: Sentences in the input document. Populated if the user enables diff --git a/google/cloud/language_v1beta2/gapic/language_service_client_config.py b/google/cloud/language_v1beta2/gapic/language_service_client_config.py index 97ab1aa6..5b11ec46 100644 --- a/google/cloud/language_v1beta2/gapic/language_service_client_config.py +++ b/google/cloud/language_v1beta2/gapic/language_service_client_config.py @@ -18,32 +18,32 @@ }, "methods": { "AnalyzeSentiment": { - "timeout_millis": 30000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default", }, "AnalyzeEntities": { - "timeout_millis": 30000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default", }, "AnalyzeEntitySentiment": { - "timeout_millis": 30000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default", }, "AnalyzeSyntax": { - "timeout_millis": 30000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default", }, "ClassifyText": { - "timeout_millis": 30000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default", }, "AnnotateText": { - "timeout_millis": 30000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default", }, diff --git a/google/cloud/language_v1beta2/gapic/transports/language_service_grpc_transport.py b/google/cloud/language_v1beta2/gapic/transports/language_service_grpc_transport.py index d546c1ab..a4f42bdf 100644 --- a/google/cloud/language_v1beta2/gapic/transports/language_service_grpc_transport.py +++ b/google/cloud/language_v1beta2/gapic/transports/language_service_grpc_transport.py @@ -31,7 +31,10 @@ class LanguageServiceGrpcTransport(object): # The scopes needed to make gRPC calls to all of the methods defined # in this service. - _OAUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + _OAUTH_SCOPES = ( + "https://www.googleapis.com/auth/cloud-language", + "https://www.googleapis.com/auth/cloud-platform", + ) def __init__( self, channel=None, credentials=None, address="language.googleapis.com:443" diff --git a/google/cloud/language_v1beta2/proto/language_service.proto b/google/cloud/language_v1beta2/proto/language_service.proto index 384cdf91..afca1205 100644 --- a/google/cloud/language_v1beta2/proto/language_service.proto +++ b/google/cloud/language_v1beta2/proto/language_service.proto @@ -138,11 +138,11 @@ message Document { // The language of the document (if not specified, the language is // automatically detected). Both ISO and BCP-47 language codes are // accepted.
- // [Language Support](/natural-language/docs/languages) - // lists currently supported languages for each API method. - // If the language (either specified by the caller or automatically detected) - // is not supported by the called API method, an `INVALID_ARGUMENT` error - // is returned. + // [Language + // Support](https://cloud.google.com/natural-language/docs/languages) lists + // currently supported languages for each API method. If the language (either + // specified by the caller or automatically detected) is not supported by the + // called API method, an `INVALID_ARGUMENT` error is returned. string language = 4; } @@ -961,7 +961,7 @@ message TextSpan { // Represents a category returned from the text classifier. message ClassificationCategory { // The name of the category representing the document, from the [predefined - // taxonomy](/natural-language/docs/categories). + // taxonomy](https://cloud.google.com/natural-language/docs/categories). string name = 1; // The classifier's confidence of the category. Number represents how certain @@ -1089,7 +1089,8 @@ message AnnotateTextRequest { // Classify the full document into categories. If this is true, // the API will use the default model which classifies into a - // [predefined taxonomy](/natural-language/docs/categories). + // [predefined + // taxonomy](https://cloud.google.com/natural-language/docs/categories). bool classify_text = 6; } diff --git a/google/cloud/language_v1beta2/proto/language_service_pb2.py b/google/cloud/language_v1beta2/proto/language_service_pb2.py index 13d144ca..7dacee38 100644 --- a/google/cloud/language_v1beta2/proto/language_service_pb2.py +++ b/google/cloud/language_v1beta2/proto/language_service_pb2.py @@ -2873,9 +2873,8 @@ dict( DESCRIPTOR=_DOCUMENT, __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""Represents the input to API methods. - - + __doc__="""################################################################ # + Represents the input to API methods. Attributes: type: Required. If the type is not set or is ``TYPE_UNSPECIFIED``, @@ -2895,12 +2894,12 @@ language: The language of the document (if not specified, the language is automatically detected). Both ISO and BCP-47 language codes - are accepted. `Language Support `__ lists currently supported - languages for each API method. If the language (either - specified by the caller or automatically detected) is not - supported by the called API method, an ``INVALID_ARGUMENT`` - error is returned. + are accepted. `Language Support + `__ + lists currently supported languages for each API method. If + the language (either specified by the caller or automatically + detected) is not supported by the called API method, an + ``INVALID_ARGUMENT`` error is returned. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1beta2.Document) ), @@ -2914,8 +2913,6 @@ DESCRIPTOR=_SENTENCE, __module__="google.cloud.language_v1beta2.proto.language_service_pb2", __doc__="""Represents a sentence in the input document. - - Attributes: text: The sentence text. @@ -2946,11 +2943,9 @@ ), DESCRIPTOR=_ENTITY, __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""Represents a phrase in the text that is a known entity, - such as a person, an organization, or location. The API associates - information, such as salience and mentions, with entities. - - + __doc__="""Represents a phrase in the text that is a known entity, such as a + person, an organization, or location. The API associates information, + such as salience and mentions, with entities. Attributes: name: The representative name for the entity. @@ -2990,10 +2985,7 @@ dict( DESCRIPTOR=_TOKEN, __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""Represents the smallest syntactic building block of the - text. - - + __doc__="""Represents the smallest syntactic building block of the text. Attributes: text: The token text. @@ -3017,10 +3009,8 @@ dict( DESCRIPTOR=_SENTIMENT, __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""Represents the feeling associated with the entire text or - entities in the text. Next ID: 6 - - + __doc__="""Represents the feeling associated with the entire text or entities in + the text. Next ID: 6 Attributes: magnitude: A non-negative number in the [0, +inf) range, which represents @@ -3042,8 +3032,6 @@ DESCRIPTOR=_PARTOFSPEECH, __module__="google.cloud.language_v1beta2.proto.language_service_pb2", __doc__="""Represents part of speech information for a token. - - Attributes: tag: The part of speech tag. @@ -3082,8 +3070,6 @@ DESCRIPTOR=_DEPENDENCYEDGE, __module__="google.cloud.language_v1beta2.proto.language_service_pb2", __doc__="""Represents dependency parse tree information for a token. - - Attributes: head_token_index: Represents the head of this token in the dependency tree. This @@ -3105,10 +3091,8 @@ dict( DESCRIPTOR=_ENTITYMENTION, __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""Represents a mention for an entity in the text. Currently, - proper noun mentions are supported. - - + __doc__="""Represents a mention for an entity in the text. Currently, proper noun + mentions are supported. Attributes: text: The mention text. @@ -3134,8 +3118,6 @@ DESCRIPTOR=_TEXTSPAN, __module__="google.cloud.language_v1beta2.proto.language_service_pb2", __doc__="""Represents an output piece of text. - - Attributes: content: The content of the output text. @@ -3157,12 +3139,11 @@ DESCRIPTOR=_CLASSIFICATIONCATEGORY, __module__="google.cloud.language_v1beta2.proto.language_service_pb2", __doc__="""Represents a category returned from the text classifier. - - Attributes: name: The name of the category representing the document, from the - `predefined taxonomy `__. + `predefined taxonomy `__. confidence: The classifier's confidence of the category. Number represents how certain the classifier is that this category represents @@ -3180,8 +3161,6 @@ DESCRIPTOR=_ANALYZESENTIMENTREQUEST, __module__="google.cloud.language_v1beta2.proto.language_service_pb2", __doc__="""The sentiment analysis request message. - - Attributes: document: Required. Input document. @@ -3201,8 +3180,6 @@ DESCRIPTOR=_ANALYZESENTIMENTRESPONSE, __module__="google.cloud.language_v1beta2.proto.language_service_pb2", __doc__="""The sentiment analysis response message. - - Attributes: document_sentiment: The overall sentiment of the input document. @@ -3227,8 +3204,6 @@ DESCRIPTOR=_ANALYZEENTITYSENTIMENTREQUEST, __module__="google.cloud.language_v1beta2.proto.language_service_pb2", __doc__="""The entity-level sentiment analysis request message. - - Attributes: document: Required. Input document. @@ -3247,8 +3222,6 @@ DESCRIPTOR=_ANALYZEENTITYSENTIMENTRESPONSE, __module__="google.cloud.language_v1beta2.proto.language_service_pb2", __doc__="""The entity-level sentiment analysis response message. - - Attributes: entities: The recognized entities in the input document with associated @@ -3272,8 +3245,6 @@ DESCRIPTOR=_ANALYZEENTITIESREQUEST, __module__="google.cloud.language_v1beta2.proto.language_service_pb2", __doc__="""The entity analysis request message. - - Attributes: document: Required. Input document. @@ -3292,8 +3263,6 @@ DESCRIPTOR=_ANALYZEENTITIESRESPONSE, __module__="google.cloud.language_v1beta2.proto.language_service_pb2", __doc__="""The entity analysis response message. - - Attributes: entities: The recognized entities in the input document. @@ -3316,8 +3285,6 @@ DESCRIPTOR=_ANALYZESYNTAXREQUEST, __module__="google.cloud.language_v1beta2.proto.language_service_pb2", __doc__="""The syntax analysis request message. - - Attributes: document: Required. Input document. @@ -3336,8 +3303,6 @@ DESCRIPTOR=_ANALYZESYNTAXRESPONSE, __module__="google.cloud.language_v1beta2.proto.language_service_pb2", __doc__="""The syntax analysis response message. - - Attributes: sentences: Sentences in the input document. @@ -3363,8 +3328,6 @@ DESCRIPTOR=_CLASSIFYTEXTREQUEST, __module__="google.cloud.language_v1beta2.proto.language_service_pb2", __doc__="""The document classification request message. - - Attributes: document: Required. Input document. @@ -3381,8 +3344,6 @@ DESCRIPTOR=_CLASSIFYTEXTRESPONSE, __module__="google.cloud.language_v1beta2.proto.language_service_pb2", __doc__="""The document classification response message. - - Attributes: categories: Categories representing the input document. @@ -3402,11 +3363,9 @@ dict( DESCRIPTOR=_ANNOTATETEXTREQUEST_FEATURES, __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""All available features for sentiment, syntax, and semantic - analysis. Setting each one to true will enable that specific analysis - for the input. Next ID: 10 - - + __doc__="""All available features for sentiment, syntax, and semantic analysis. + Setting each one to true will enable that specific analysis for the + input. Next ID: 10 Attributes: extract_syntax: Extract syntax information. @@ -3419,18 +3378,16 @@ classify_text: Classify the full document into categories. If this is true, the API will use the default model which classifies into a - `predefined taxonomy `__. + `predefined taxonomy `__. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1beta2.AnnotateTextRequest.Features) ), ), DESCRIPTOR=_ANNOTATETEXTREQUEST, __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""The request message for the text annotation API, which can - perform multiple analysis types (sentiment, entities, and syntax) in one - call. - - + __doc__="""The request message for the text annotation API, which can perform + multiple analysis types (sentiment, entities, and syntax) in one call. Attributes: document: Required. Input document. @@ -3452,8 +3409,6 @@ DESCRIPTOR=_ANNOTATETEXTRESPONSE, __module__="google.cloud.language_v1beta2.proto.language_service_pb2", __doc__="""The text annotations response message. - - Attributes: sentences: Sentences in the input document. Populated if the user enables diff --git a/noxfile.py b/noxfile.py index 6589c07d..6b4fa3b0 100644 --- a/noxfile.py +++ b/noxfile.py @@ -155,7 +155,7 @@ def docs(session): """Build the docs for this library.""" session.install("-e", ".") - session.install("sphinx", "alabaster", "recommonmark") + session.install("sphinx<3.0.0", "alabaster", "recommonmark") shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) session.run( diff --git a/samples/v1/language_classify_gcs.py b/samples/v1/language_classify_gcs.py index 8835fc76..941640b1 100644 --- a/samples/v1/language_classify_gcs.py +++ b/samples/v1/language_classify_gcs.py @@ -43,7 +43,6 @@ def sample_classify_text(gcs_content_uri): # gcs_content_uri = 'gs://cloud-samples-data/language/classify-entertainment.txt' - # [START language_python_migration_document_gcs] # Available types: PLAIN_TEXT, HTML type_ = enums.Document.Type.PLAIN_TEXT @@ -52,7 +51,6 @@ def sample_classify_text(gcs_content_uri): # https://cloud.google.com/natural-language/docs/languages language = "en" document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} - # [END language_python_migration_document_gcs] response = client.classify_text(document) # Loop through classified categories returned from the API diff --git a/samples/v1/language_classify_text.py b/samples/v1/language_classify_text.py index 4fc77b20..52175f02 100644 --- a/samples/v1/language_classify_text.py +++ b/samples/v1/language_classify_text.py @@ -41,7 +41,6 @@ def sample_classify_text(text_content): # text_content = 'That actor on TV makes movies in Hollywood and also stars in a variety of popular new TV shows.' - # [START language_python_migration_document_text] # Available types: PLAIN_TEXT, HTML type_ = enums.Document.Type.PLAIN_TEXT @@ -50,7 +49,6 @@ def sample_classify_text(text_content): # https://cloud.google.com/natural-language/docs/languages language = "en" document = {"content": text_content, "type": type_, "language": language} - # [END language_python_migration_document_text] response = client.classify_text(document) # Loop through classified categories returned from the API diff --git a/samples/v1/language_entities_text.py b/samples/v1/language_entities_text.py index c6149f65..9ae849f2 100644 --- a/samples/v1/language_entities_text.py +++ b/samples/v1/language_entities_text.py @@ -41,7 +41,6 @@ def sample_analyze_entities(text_content): # text_content = 'California is a state.' - # [START language_python_migration_entities_text] # Available types: PLAIN_TEXT, HTML type_ = enums.Document.Type.PLAIN_TEXT @@ -77,7 +76,6 @@ def sample_analyze_entities(text_content): print( u"Mention type: {}".format(enums.EntityMention.Type(mention.type).name) ) - # [END language_python_migration_entities_text] # Get the language of the text, which will be the same as # the language specified in the request or, if not specified, diff --git a/samples/v1/language_sentiment_text.py b/samples/v1/language_sentiment_text.py index a0647678..12f1e221 100644 --- a/samples/v1/language_sentiment_text.py +++ b/samples/v1/language_sentiment_text.py @@ -41,7 +41,6 @@ def sample_analyze_sentiment(text_content): # text_content = 'I am so happy and joyful.' - # [START language_python_migration_sentiment_text] # Available types: PLAIN_TEXT, HTML type_ = enums.Document.Type.PLAIN_TEXT @@ -62,8 +61,6 @@ def sample_analyze_sentiment(text_content): response.document_sentiment.magnitude ) ) - # [END language_python_migration_sentiment_text] - # Get sentiment for all sentences in the document for sentence in response.sentences: print(u"Sentence text: {}".format(sentence.text.content)) diff --git a/samples/v1/language_syntax_text.py b/samples/v1/language_syntax_text.py index d57c9eea..29041886 100644 --- a/samples/v1/language_syntax_text.py +++ b/samples/v1/language_syntax_text.py @@ -50,7 +50,6 @@ def sample_analyze_syntax(text_content): language = "en" document = {"content": text_content, "type": type_, "language": language} - # [START language_python_migration_syntax_text] # Available values: NONE, UTF8, UTF16, UTF32 encoding_type = enums.EncodingType.UTF8 @@ -64,7 +63,7 @@ def sample_analyze_syntax(text_content): u"Location of this token in overall document: {}".format(text.begin_offset) ) # Get the part of speech information for this token. - # Parts of speech are as defined in: + # Parts of spech are as defined in: # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf part_of_speech = token.part_of_speech # Get the tag, e.g. NOUN, ADJ for Adjective, et al. @@ -73,8 +72,6 @@ def sample_analyze_syntax(text_content): enums.PartOfSpeech.Tag(part_of_speech.tag).name ) ) - # [END language_python_migration_syntax_text] - # Get the voice, e.g. ACTIVE or PASSIVE print(u"Voice: {}".format(enums.PartOfSpeech.Voice(part_of_speech.voice).name)) # Get the tense, e.g. PAST, FUTURE, PRESENT, et al. diff --git a/setup.cfg b/setup.cfg index 3bd55550..c3a2b39f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,3 +1,19 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Generated by synthtool. DO NOT EDIT! [bdist_wheel] universal = 1 diff --git a/synth.metadata b/synth.metadata index b8de7122..2da14990 100644 --- a/synth.metadata +++ b/synth.metadata @@ -1,26 +1,32 @@ { - "updateTime": "2020-02-07T02:58:49.595427Z", "sources": [ { "generator": { "name": "artman", - "version": "0.44.4", - "dockerImage": "googleapis/artman@sha256:19e945954fc960a4bdfee6cb34695898ab21a8cf0bac063ee39b91f00a1faec8" + "version": "2.0.0", + "dockerImage": "googleapis/artman@sha256:b3b47805231a305d0f40c4bf069df20f6a2635574e6d4259fac651d3f9f6e098" + } + }, + { + "git": { + "name": ".", + "remote": "https://github.com/googleapis/python-language.git", + "sha": "c39138189a0c93512c1cdbb7cd00047c73a80402" } }, { "git": { "name": "googleapis", - "remote": "git@github.com:googleapis/googleapis.git", - "sha": "e46f761cd6ec15a9e3d5ed4ff321a4bcba8e8585", - "internalRef": "293710856" + "remote": "https://github.com/googleapis/googleapis.git", + "sha": "42ee97c1b93a0e3759bbba3013da309f670a90ab", + "internalRef": "307114445" } }, { - "template": { - "name": "python_split_library", - "origin": "synthtool.gcp", - "version": "2019.10.17" + "git": { + "name": "synthtool", + "remote": "https://github.com/googleapis/synthtool.git", + "sha": "f5e4c17dc78a966dbf29961dd01f9bbd63e20a04" } } ], diff --git a/tests/system/gapic/v1beta2/test_system_language_service_v1beta2.py b/tests/system/gapic/v1beta2/test_system_language_service_v1beta2.py index 6145cf2a..81edf7d6 100644 --- a/tests/system/gapic/v1beta2/test_system_language_service_v1beta2.py +++ b/tests/system/gapic/v1beta2/test_system_language_service_v1beta2.py @@ -28,4 +28,5 @@ def test_analyze_sentiment(self): content = "Hello, world!" type_ = enums.Document.Type.PLAIN_TEXT document = {"content": content, "type": type_} - response = client.analyze_sentiment(document) + encoding_type = enums.EncodingType.NONE + response = client.analyze_sentiment(document, encoding_type=encoding_type) From f16bd6dae66990516320941748325b59f4eeebc6 Mon Sep 17 00:00:00 2001 From: Vadym Matsishevskyi <25311427+vam-google@users.noreply.github.com> Date: Tue, 21 Apr 2020 16:58:05 -0700 Subject: [PATCH 022/209] chore: Migrate python-languge synth.py from artman to bazel (#15) --- synth.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/synth.py b/synth.py index 5f3d6c49..5bf28084 100644 --- a/synth.py +++ b/synth.py @@ -17,7 +17,7 @@ import synthtool as s from synthtool import gcp -gapic = gcp.GAPICGenerator() +gapic = gcp.GAPICBazel() common = gcp.CommonTemplates() versions = ["v1beta2", "v1"] @@ -27,12 +27,10 @@ # ---------------------------------------------------------------------------- for version in versions: library = gapic.py_library( - "language", - version, - config_path=f"/google/cloud/language/artman_language_{version}.yaml", - artman_output_name=f"language-{version}", + service="language", + version=version, + bazel_target=f"//google/cloud/language/{version}:language-{version}-py", include_protos=True, - include_samples=True ) s.move(library / f"google/cloud/language_{version}/proto") From 7dff809b94b5a1d001aeb1e7763dbbe624865600 Mon Sep 17 00:00:00 2001 From: Emily Darrow <47046797+ejdarrow@users.noreply.github.com> Date: Mon, 22 Jun 2020 16:50:03 -0400 Subject: [PATCH 023/209] docs: add spacing for readability (#22) --- samples/v1/language_entities_text.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/samples/v1/language_entities_text.py b/samples/v1/language_entities_text.py index 9ae849f2..464a313d 100644 --- a/samples/v1/language_entities_text.py +++ b/samples/v1/language_entities_text.py @@ -54,13 +54,17 @@ def sample_analyze_entities(text_content): encoding_type = enums.EncodingType.UTF8 response = client.analyze_entities(document, encoding_type=encoding_type) + # Loop through entitites returned from the API for entity in response.entities: print(u"Representative name for the entity: {}".format(entity.name)) + # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al print(u"Entity type: {}".format(enums.Entity.Type(entity.type).name)) + # Get the salience score associated with the entity in the [0, 1.0] range print(u"Salience score: {}".format(entity.salience)) + # Loop over the metadata associated with entity. For many known entities, # the metadata is a Wikipedia URL (wikipedia_url) and Knowledge Graph MID (mid). # Some entity types may have additional metadata, e.g. ADDRESS entities @@ -72,6 +76,7 @@ def sample_analyze_entities(text_content): # The API currently supports proper noun mentions. for mention in entity.mentions: print(u"Mention text: {}".format(mention.text.content)) + # Get the mention type, e.g. PROPER for proper noun print( u"Mention type: {}".format(enums.EntityMention.Type(mention.type).name) From a489102ca0f5ab302ec8974728a52065f2ea8857 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Thu, 25 Jun 2020 11:13:33 -0700 Subject: [PATCH 024/209] docs: add multiprocessing note (#26) --- .flake8 | 2 + .gitignore | 2 + .kokoro/publish-docs.sh | 2 - .kokoro/release.sh | 2 - .kokoro/samples/lint/common.cfg | 34 + .kokoro/samples/lint/continuous.cfg | 6 + .kokoro/samples/lint/periodic.cfg | 6 + .kokoro/samples/lint/presubmit.cfg | 6 + .kokoro/samples/python3.6/common.cfg | 34 + .kokoro/samples/python3.6/continuous.cfg | 7 + .kokoro/samples/python3.6/periodic.cfg | 6 + .kokoro/samples/python3.6/presubmit.cfg | 6 + .kokoro/samples/python3.7/common.cfg | 34 + .kokoro/samples/python3.7/continuous.cfg | 6 + .kokoro/samples/python3.7/periodic.cfg | 6 + .kokoro/samples/python3.7/presubmit.cfg | 6 + .kokoro/samples/python3.8/common.cfg | 34 + .kokoro/samples/python3.8/continuous.cfg | 6 + .kokoro/samples/python3.8/periodic.cfg | 6 + .kokoro/samples/python3.8/presubmit.cfg | 6 + .kokoro/test-samples.sh | 104 + MANIFEST.in | 3 + docs/conf.py | 7 +- docs/index.rst | 2 + docs/multiprocessing.rst | 7 + google/cloud/language_v1/gapic/enums.py | 35 +- .../gapic/language_service_client.py | 4 +- .../language_service_grpc_transport.py | 4 +- .../language_v1/proto/language_service_pb2.py | 1788 +++++++++++++---- google/cloud/language_v1beta2/gapic/enums.py | 30 +- .../gapic/language_service_client.py | 4 +- .../language_service_grpc_transport.py | 4 +- .../proto/language_service_pb2.py | 1785 ++++++++++++---- scripts/decrypt-secrets.sh | 33 + scripts/readme-gen/readme_gen.py | 66 + scripts/readme-gen/templates/README.tmpl.rst | 87 + scripts/readme-gen/templates/auth.tmpl.rst | 9 + .../templates/auth_api_key.tmpl.rst | 14 + .../templates/install_deps.tmpl.rst | 29 + .../templates/install_portaudio.tmpl.rst | 35 + synth.metadata | 21 +- synth.py | 3 + testing/.gitignore | 3 + 43 files changed, 3478 insertions(+), 816 deletions(-) create mode 100644 .kokoro/samples/lint/common.cfg create mode 100644 .kokoro/samples/lint/continuous.cfg create mode 100644 .kokoro/samples/lint/periodic.cfg create mode 100644 .kokoro/samples/lint/presubmit.cfg create mode 100644 .kokoro/samples/python3.6/common.cfg create mode 100644 .kokoro/samples/python3.6/continuous.cfg create mode 100644 .kokoro/samples/python3.6/periodic.cfg create mode 100644 .kokoro/samples/python3.6/presubmit.cfg create mode 100644 .kokoro/samples/python3.7/common.cfg create mode 100644 .kokoro/samples/python3.7/continuous.cfg create mode 100644 .kokoro/samples/python3.7/periodic.cfg create mode 100644 .kokoro/samples/python3.7/presubmit.cfg create mode 100644 .kokoro/samples/python3.8/common.cfg create mode 100644 .kokoro/samples/python3.8/continuous.cfg create mode 100644 .kokoro/samples/python3.8/periodic.cfg create mode 100644 .kokoro/samples/python3.8/presubmit.cfg create mode 100755 .kokoro/test-samples.sh create mode 100644 docs/multiprocessing.rst create mode 100755 scripts/decrypt-secrets.sh create mode 100644 scripts/readme-gen/readme_gen.py create mode 100644 scripts/readme-gen/templates/README.tmpl.rst create mode 100644 scripts/readme-gen/templates/auth.tmpl.rst create mode 100644 scripts/readme-gen/templates/auth_api_key.tmpl.rst create mode 100644 scripts/readme-gen/templates/install_deps.tmpl.rst create mode 100644 scripts/readme-gen/templates/install_portaudio.tmpl.rst create mode 100644 testing/.gitignore diff --git a/.flake8 b/.flake8 index 20fe9bda..ed931638 100644 --- a/.flake8 +++ b/.flake8 @@ -21,6 +21,8 @@ exclude = # Exclude generated code. **/proto/** **/gapic/** + **/services/** + **/types/** *_pb2.py # Standard linting exemptions. diff --git a/.gitignore b/.gitignore index 3fb06e09..b87e1ed5 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,7 @@ dist build eggs +.eggs parts bin var @@ -49,6 +50,7 @@ bigquery/docs/generated # Virtual environment env/ coverage.xml +sponge_log.xml # System test environment variables. system_tests/local_test_setup diff --git a/.kokoro/publish-docs.sh b/.kokoro/publish-docs.sh index 84ab6380..becf302b 100755 --- a/.kokoro/publish-docs.sh +++ b/.kokoro/publish-docs.sh @@ -13,8 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -#!/bin/bash - set -eo pipefail # Disable buffering, so that the logs stream through. diff --git a/.kokoro/release.sh b/.kokoro/release.sh index 7d0a0d30..045cb037 100755 --- a/.kokoro/release.sh +++ b/.kokoro/release.sh @@ -13,8 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -#!/bin/bash - set -eo pipefail # Start the releasetool reporter diff --git a/.kokoro/samples/lint/common.cfg b/.kokoro/samples/lint/common.cfg new file mode 100644 index 00000000..f3f92a93 --- /dev/null +++ b/.kokoro/samples/lint/common.cfg @@ -0,0 +1,34 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "lint" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-language/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-language/.kokoro/trampoline.sh" \ No newline at end of file diff --git a/.kokoro/samples/lint/continuous.cfg b/.kokoro/samples/lint/continuous.cfg new file mode 100644 index 00000000..a1c8d975 --- /dev/null +++ b/.kokoro/samples/lint/continuous.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/lint/periodic.cfg b/.kokoro/samples/lint/periodic.cfg new file mode 100644 index 00000000..50fec964 --- /dev/null +++ b/.kokoro/samples/lint/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} \ No newline at end of file diff --git a/.kokoro/samples/lint/presubmit.cfg b/.kokoro/samples/lint/presubmit.cfg new file mode 100644 index 00000000..a1c8d975 --- /dev/null +++ b/.kokoro/samples/lint/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.6/common.cfg b/.kokoro/samples/python3.6/common.cfg new file mode 100644 index 00000000..e70b6034 --- /dev/null +++ b/.kokoro/samples/python3.6/common.cfg @@ -0,0 +1,34 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "py-3.6" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-language/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-language/.kokoro/trampoline.sh" \ No newline at end of file diff --git a/.kokoro/samples/python3.6/continuous.cfg b/.kokoro/samples/python3.6/continuous.cfg new file mode 100644 index 00000000..7218af14 --- /dev/null +++ b/.kokoro/samples/python3.6/continuous.cfg @@ -0,0 +1,7 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} + diff --git a/.kokoro/samples/python3.6/periodic.cfg b/.kokoro/samples/python3.6/periodic.cfg new file mode 100644 index 00000000..50fec964 --- /dev/null +++ b/.kokoro/samples/python3.6/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.6/presubmit.cfg b/.kokoro/samples/python3.6/presubmit.cfg new file mode 100644 index 00000000..a1c8d975 --- /dev/null +++ b/.kokoro/samples/python3.6/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.7/common.cfg b/.kokoro/samples/python3.7/common.cfg new file mode 100644 index 00000000..4d745031 --- /dev/null +++ b/.kokoro/samples/python3.7/common.cfg @@ -0,0 +1,34 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "py-3.7" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-language/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-language/.kokoro/trampoline.sh" \ No newline at end of file diff --git a/.kokoro/samples/python3.7/continuous.cfg b/.kokoro/samples/python3.7/continuous.cfg new file mode 100644 index 00000000..a1c8d975 --- /dev/null +++ b/.kokoro/samples/python3.7/continuous.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.7/periodic.cfg b/.kokoro/samples/python3.7/periodic.cfg new file mode 100644 index 00000000..50fec964 --- /dev/null +++ b/.kokoro/samples/python3.7/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.7/presubmit.cfg b/.kokoro/samples/python3.7/presubmit.cfg new file mode 100644 index 00000000..a1c8d975 --- /dev/null +++ b/.kokoro/samples/python3.7/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.8/common.cfg b/.kokoro/samples/python3.8/common.cfg new file mode 100644 index 00000000..bf242e12 --- /dev/null +++ b/.kokoro/samples/python3.8/common.cfg @@ -0,0 +1,34 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "py-3.8" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-language/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-language/.kokoro/trampoline.sh" \ No newline at end of file diff --git a/.kokoro/samples/python3.8/continuous.cfg b/.kokoro/samples/python3.8/continuous.cfg new file mode 100644 index 00000000..a1c8d975 --- /dev/null +++ b/.kokoro/samples/python3.8/continuous.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.8/periodic.cfg b/.kokoro/samples/python3.8/periodic.cfg new file mode 100644 index 00000000..50fec964 --- /dev/null +++ b/.kokoro/samples/python3.8/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.8/presubmit.cfg b/.kokoro/samples/python3.8/presubmit.cfg new file mode 100644 index 00000000..a1c8d975 --- /dev/null +++ b/.kokoro/samples/python3.8/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/test-samples.sh b/.kokoro/test-samples.sh new file mode 100755 index 00000000..6576035c --- /dev/null +++ b/.kokoro/test-samples.sh @@ -0,0 +1,104 @@ +#!/bin/bash +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# `-e` enables the script to automatically fail when a command fails +# `-o pipefail` sets the exit code to the rightmost comment to exit with a non-zero +set -eo pipefail +# Enables `**` to include files nested inside sub-folders +shopt -s globstar + +cd github/python-language + +# Run periodic samples tests at latest release +if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then + LATEST_RELEASE=$(git describe --abbrev=0 --tags) + git checkout $LATEST_RELEASE +fi + +# Disable buffering, so that the logs stream through. +export PYTHONUNBUFFERED=1 + +# Debug: show build environment +env | grep KOKORO + +# Install nox +python3.6 -m pip install --upgrade --quiet nox + +# Use secrets acessor service account to get secrets +if [[ -f "${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" ]]; then + gcloud auth activate-service-account \ + --key-file="${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" \ + --project="cloud-devrel-kokoro-resources" +fi + +# This script will create 3 files: +# - testing/test-env.sh +# - testing/service-account.json +# - testing/client-secrets.json +./scripts/decrypt-secrets.sh + +source ./testing/test-env.sh +export GOOGLE_APPLICATION_CREDENTIALS=$(pwd)/testing/service-account.json + +# For cloud-run session, we activate the service account for gcloud sdk. +gcloud auth activate-service-account \ + --key-file "${GOOGLE_APPLICATION_CREDENTIALS}" + +export GOOGLE_CLIENT_SECRETS=$(pwd)/testing/client-secrets.json + +echo -e "\n******************** TESTING PROJECTS ********************" + +# Switch to 'fail at end' to allow all tests to complete before exiting. +set +e +# Use RTN to return a non-zero value if the test fails. +RTN=0 +ROOT=$(pwd) +# Find all requirements.txt in the samples directory (may break on whitespace). +for file in samples/**/requirements.txt; do + cd "$ROOT" + # Navigate to the project folder. + file=$(dirname "$file") + cd "$file" + + echo "------------------------------------------------------------" + echo "- testing $file" + echo "------------------------------------------------------------" + + # Use nox to execute the tests for the project. + python3.6 -m nox -s "$RUN_TESTS_SESSION" + EXIT=$? + + # If this is a periodic build, send the test log to the Build Cop Bot. + # See https://github.com/googleapis/repo-automation-bots/tree/master/packages/buildcop. + if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then + chmod +x $KOKORO_GFILE_DIR/linux_amd64/buildcop + $KOKORO_GFILE_DIR/linux_amd64/buildcop + fi + + if [[ $EXIT -ne 0 ]]; then + RTN=1 + echo -e "\n Testing failed: Nox returned a non-zero exit code. \n" + else + echo -e "\n Testing completed.\n" + fi + +done +cd "$ROOT" + +# Workaround for Kokoro permissions issue: delete secrets +rm testing/{test-env.sh,client-secrets.json,service-account.json} + +exit "$RTN" \ No newline at end of file diff --git a/MANIFEST.in b/MANIFEST.in index 68855abc..e9e29d12 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -20,3 +20,6 @@ recursive-include google *.json *.proto recursive-include tests * global-exclude *.py[co] global-exclude __pycache__ + +# Exclude scripts for samples readmegen +prune scripts/readme-gen \ No newline at end of file diff --git a/docs/conf.py b/docs/conf.py index 289e544f..1fdbd3b5 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -38,21 +38,18 @@ "sphinx.ext.napoleon", "sphinx.ext.todo", "sphinx.ext.viewcode", + "recommonmark", ] # autodoc/autosummary flags autoclass_content = "both" -autodoc_default_flags = ["members"] +autodoc_default_options = {"members": True} autosummary_generate = True # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] -# Allow markdown includes (so releases.md can include CHANGLEOG.md) -# http://www.sphinx-doc.org/en/master/markdown.html -source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} - # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] diff --git a/docs/index.rst b/docs/index.rst index 1df32e07..368f811d 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,5 +1,7 @@ .. include:: README.rst +.. include:: multiprocessing.rst + .. toctree:: :maxdepth: 2 diff --git a/docs/multiprocessing.rst b/docs/multiprocessing.rst new file mode 100644 index 00000000..1cb29d4c --- /dev/null +++ b/docs/multiprocessing.rst @@ -0,0 +1,7 @@ +.. note:: + + Because this client uses :mod:`grpcio` library, it is safe to + share instances across threads. In multiprocessing scenarios, the best + practice is to create client instances *after* the invocation of + :func:`os.fork` by :class:`multiprocessing.Pool` or + :class:`multiprocessing.Process`. diff --git a/google/cloud/language_v1/gapic/enums.py b/google/cloud/language_v1/gapic/enums.py index 6ec0f9fe..28fefea5 100644 --- a/google/cloud/language_v1/gapic/enums.py +++ b/google/cloud/language_v1/gapic/enums.py @@ -21,24 +21,24 @@ class EncodingType(enum.IntEnum): """ - Represents the text encoding that the caller uses to process the output. - Providing an ``EncodingType`` is recommended because the API provides - the beginning offsets for various outputs, such as tokens and mentions, - and languages that natively use different text encodings may access - offsets differently. + Represents the text encoding that the caller uses to process the + output. Providing an ``EncodingType`` is recommended because the API + provides the beginning offsets for various outputs, such as tokens and + mentions, and languages that natively use different text encodings may + access offsets differently. Attributes: NONE (int): If ``EncodingType`` is not specified, encoding-dependent information (such as ``begin_offset``) will be set at ``-1``. - UTF8 (int): Encoding-dependent information (such as ``begin_offset``) is calculated - based on the UTF-8 encoding of the input. C++ and Go are examples of - languages that use this encoding natively. - UTF16 (int): Encoding-dependent information (such as ``begin_offset``) is calculated - based on the UTF-16 encoding of the input. Java and JavaScript are + UTF8 (int): Encoding-dependent information (such as ``begin_offset``) is + calculated based on the UTF-8 encoding of the input. C++ and Go are examples of languages that use this encoding natively. - UTF32 (int): Encoding-dependent information (such as ``begin_offset``) is calculated - based on the UTF-32 encoding of the input. Python is an example of a - language that uses this encoding natively. + UTF16 (int): Encoding-dependent information (such as ``begin_offset``) is + calculated based on the UTF-16 encoding of the input. Java and + JavaScript are examples of languages that use this encoding natively. + UTF32 (int): Encoding-dependent information (such as ``begin_offset``) is + calculated based on the UTF-32 encoding of the input. Python is an + example of a language that uses this encoding natively. """ NONE = 0 @@ -242,8 +242,8 @@ class Type(enum.IntEnum): class Entity(object): class Type(enum.IntEnum): """ - The type of the entity. For most entity types, the associated metadata - is a Wikipedia URL (``wikipedia_url``) and Knowledge Graph MID + The type of the entity. For most entity types, the associated + metadata is a Wikipedia URL (``wikipedia_url``) and Knowledge Graph MID (``mid``). The table below lists the associated fields for entities that have different metadata. @@ -256,8 +256,9 @@ class Type(enum.IntEnum): WORK_OF_ART (int): Artwork CONSUMER_GOOD (int): Consumer product OTHER (int): Other types of entities - PHONE_NUMBER (int): Phone number The metadata lists the phone number, formatted according to - local convention, plus whichever additional elements appear in the text: + PHONE_NUMBER (int): Phone number The metadata lists the phone number, formatted + according to local convention, plus whichever additional elements appear + in the text: .. raw:: html diff --git a/google/cloud/language_v1/gapic/language_service_client.py b/google/cloud/language_v1/gapic/language_service_client.py index 1520d84c..4dba1b05 100644 --- a/google/cloud/language_v1/gapic/language_service_client.py +++ b/google/cloud/language_v1/gapic/language_service_client.py @@ -322,8 +322,8 @@ def analyze_entity_sentiment( metadata=None, ): """ - Finds entities, similar to ``AnalyzeEntities`` in the text and analyzes - sentiment associated with each entity and its mentions. + Finds entities, similar to ``AnalyzeEntities`` in the text and + analyzes sentiment associated with each entity and its mentions. Example: >>> from google.cloud import language_v1 diff --git a/google/cloud/language_v1/gapic/transports/language_service_grpc_transport.py b/google/cloud/language_v1/gapic/transports/language_service_grpc_transport.py index e4b4ffad..5784072c 100644 --- a/google/cloud/language_v1/gapic/transports/language_service_grpc_transport.py +++ b/google/cloud/language_v1/gapic/transports/language_service_grpc_transport.py @@ -144,8 +144,8 @@ def analyze_entities(self): def analyze_entity_sentiment(self): """Return the gRPC stub for :meth:`LanguageServiceClient.analyze_entity_sentiment`. - Finds entities, similar to ``AnalyzeEntities`` in the text and analyzes - sentiment associated with each entity and its mentions. + Finds entities, similar to ``AnalyzeEntities`` in the text and + analyzes sentiment associated with each entity and its mentions. Returns: Callable: A callable which accepts the appropriate diff --git a/google/cloud/language_v1/proto/language_service_pb2.py b/google/cloud/language_v1/proto/language_service_pb2.py index 9c7ad493..675c5ad4 100644 --- a/google/cloud/language_v1/proto/language_service_pb2.py +++ b/google/cloud/language_v1/proto/language_service_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/language_v1/proto/language_service.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message @@ -25,12 +22,9 @@ name="google/cloud/language_v1/proto/language_service.proto", package="google.cloud.language.v1", syntax="proto3", - serialized_options=_b( - "\n\034com.google.cloud.language.v1B\024LanguageServiceProtoP\001Z@google.golang.org/genproto/googleapis/cloud/language/v1;language" - ), - serialized_pb=_b( - '\n5google/cloud/language_v1/proto/language_service.proto\x12\x18google.cloud.language.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto"\xc3\x01\n\x08\x44ocument\x12\x35\n\x04type\x18\x01 \x01(\x0e\x32\'.google.cloud.language.v1.Document.Type\x12\x11\n\x07\x63ontent\x18\x02 \x01(\tH\x00\x12\x19\n\x0fgcs_content_uri\x18\x03 \x01(\tH\x00\x12\x10\n\x08language\x18\x04 \x01(\t"6\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nPLAIN_TEXT\x10\x01\x12\x08\n\x04HTML\x10\x02\x42\x08\n\x06source"t\n\x08Sentence\x12\x30\n\x04text\x18\x01 \x01(\x0b\x32".google.cloud.language.v1.TextSpan\x12\x36\n\tsentiment\x18\x02 \x01(\x0b\x32#.google.cloud.language.v1.Sentiment"\xff\x03\n\x06\x45ntity\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x33\n\x04type\x18\x02 \x01(\x0e\x32%.google.cloud.language.v1.Entity.Type\x12@\n\x08metadata\x18\x03 \x03(\x0b\x32..google.cloud.language.v1.Entity.MetadataEntry\x12\x10\n\x08salience\x18\x04 \x01(\x02\x12\x39\n\x08mentions\x18\x05 \x03(\x0b\x32\'.google.cloud.language.v1.EntityMention\x12\x36\n\tsentiment\x18\x06 \x01(\x0b\x32#.google.cloud.language.v1.Sentiment\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\xb9\x01\n\x04Type\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06PERSON\x10\x01\x12\x0c\n\x08LOCATION\x10\x02\x12\x10\n\x0cORGANIZATION\x10\x03\x12\t\n\x05\x45VENT\x10\x04\x12\x0f\n\x0bWORK_OF_ART\x10\x05\x12\x11\n\rCONSUMER_GOOD\x10\x06\x12\t\n\x05OTHER\x10\x07\x12\x10\n\x0cPHONE_NUMBER\x10\t\x12\x0b\n\x07\x41\x44\x44RESS\x10\n\x12\x08\n\x04\x44\x41TE\x10\x0b\x12\n\n\x06NUMBER\x10\x0c\x12\t\n\x05PRICE\x10\r"\xcb\x01\n\x05Token\x12\x30\n\x04text\x18\x01 \x01(\x0b\x32".google.cloud.language.v1.TextSpan\x12>\n\x0epart_of_speech\x18\x02 \x01(\x0b\x32&.google.cloud.language.v1.PartOfSpeech\x12\x41\n\x0f\x64\x65pendency_edge\x18\x03 \x01(\x0b\x32(.google.cloud.language.v1.DependencyEdge\x12\r\n\x05lemma\x18\x04 \x01(\t"-\n\tSentiment\x12\x11\n\tmagnitude\x18\x02 \x01(\x02\x12\r\n\x05score\x18\x03 \x01(\x02"\xa3\x10\n\x0cPartOfSpeech\x12\x37\n\x03tag\x18\x01 \x01(\x0e\x32*.google.cloud.language.v1.PartOfSpeech.Tag\x12=\n\x06\x61spect\x18\x02 \x01(\x0e\x32-.google.cloud.language.v1.PartOfSpeech.Aspect\x12\x39\n\x04\x63\x61se\x18\x03 \x01(\x0e\x32+.google.cloud.language.v1.PartOfSpeech.Case\x12\x39\n\x04\x66orm\x18\x04 \x01(\x0e\x32+.google.cloud.language.v1.PartOfSpeech.Form\x12=\n\x06gender\x18\x05 \x01(\x0e\x32-.google.cloud.language.v1.PartOfSpeech.Gender\x12\x39\n\x04mood\x18\x06 \x01(\x0e\x32+.google.cloud.language.v1.PartOfSpeech.Mood\x12=\n\x06number\x18\x07 \x01(\x0e\x32-.google.cloud.language.v1.PartOfSpeech.Number\x12=\n\x06person\x18\x08 \x01(\x0e\x32-.google.cloud.language.v1.PartOfSpeech.Person\x12=\n\x06proper\x18\t \x01(\x0e\x32-.google.cloud.language.v1.PartOfSpeech.Proper\x12G\n\x0breciprocity\x18\n \x01(\x0e\x32\x32.google.cloud.language.v1.PartOfSpeech.Reciprocity\x12;\n\x05tense\x18\x0b \x01(\x0e\x32,.google.cloud.language.v1.PartOfSpeech.Tense\x12;\n\x05voice\x18\x0c \x01(\x0e\x32,.google.cloud.language.v1.PartOfSpeech.Voice"\x8d\x01\n\x03Tag\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x07\n\x03\x41\x44J\x10\x01\x12\x07\n\x03\x41\x44P\x10\x02\x12\x07\n\x03\x41\x44V\x10\x03\x12\x08\n\x04\x43ONJ\x10\x04\x12\x07\n\x03\x44\x45T\x10\x05\x12\x08\n\x04NOUN\x10\x06\x12\x07\n\x03NUM\x10\x07\x12\x08\n\x04PRON\x10\x08\x12\x07\n\x03PRT\x10\t\x12\t\n\x05PUNCT\x10\n\x12\x08\n\x04VERB\x10\x0b\x12\x05\n\x01X\x10\x0c\x12\t\n\x05\x41\x46\x46IX\x10\r"O\n\x06\x41spect\x12\x12\n\x0e\x41SPECT_UNKNOWN\x10\x00\x12\x0e\n\nPERFECTIVE\x10\x01\x12\x10\n\x0cIMPERFECTIVE\x10\x02\x12\x0f\n\x0bPROGRESSIVE\x10\x03"\xf8\x01\n\x04\x43\x61se\x12\x10\n\x0c\x43\x41SE_UNKNOWN\x10\x00\x12\x0e\n\nACCUSATIVE\x10\x01\x12\r\n\tADVERBIAL\x10\x02\x12\x11\n\rCOMPLEMENTIVE\x10\x03\x12\n\n\x06\x44\x41TIVE\x10\x04\x12\x0c\n\x08GENITIVE\x10\x05\x12\x10\n\x0cINSTRUMENTAL\x10\x06\x12\x0c\n\x08LOCATIVE\x10\x07\x12\x0e\n\nNOMINATIVE\x10\x08\x12\x0b\n\x07OBLIQUE\x10\t\x12\r\n\tPARTITIVE\x10\n\x12\x11\n\rPREPOSITIONAL\x10\x0b\x12\x12\n\x0eREFLEXIVE_CASE\x10\x0c\x12\x11\n\rRELATIVE_CASE\x10\r\x12\x0c\n\x08VOCATIVE\x10\x0e"\xaf\x01\n\x04\x46orm\x12\x10\n\x0c\x46ORM_UNKNOWN\x10\x00\x12\x0c\n\x08\x41\x44NOMIAL\x10\x01\x12\r\n\tAUXILIARY\x10\x02\x12\x12\n\x0e\x43OMPLEMENTIZER\x10\x03\x12\x10\n\x0c\x46INAL_ENDING\x10\x04\x12\n\n\x06GERUND\x10\x05\x12\n\n\x06REALIS\x10\x06\x12\x0c\n\x08IRREALIS\x10\x07\x12\t\n\x05SHORT\x10\x08\x12\x08\n\x04LONG\x10\t\x12\t\n\x05ORDER\x10\n\x12\x0c\n\x08SPECIFIC\x10\x0b"E\n\x06Gender\x12\x12\n\x0eGENDER_UNKNOWN\x10\x00\x12\x0c\n\x08\x46\x45MININE\x10\x01\x12\r\n\tMASCULINE\x10\x02\x12\n\n\x06NEUTER\x10\x03"\x7f\n\x04Mood\x12\x10\n\x0cMOOD_UNKNOWN\x10\x00\x12\x14\n\x10\x43ONDITIONAL_MOOD\x10\x01\x12\x0e\n\nIMPERATIVE\x10\x02\x12\x0e\n\nINDICATIVE\x10\x03\x12\x11\n\rINTERROGATIVE\x10\x04\x12\x0b\n\x07JUSSIVE\x10\x05\x12\x0f\n\x0bSUBJUNCTIVE\x10\x06"@\n\x06Number\x12\x12\n\x0eNUMBER_UNKNOWN\x10\x00\x12\x0c\n\x08SINGULAR\x10\x01\x12\n\n\x06PLURAL\x10\x02\x12\x08\n\x04\x44UAL\x10\x03"T\n\x06Person\x12\x12\n\x0ePERSON_UNKNOWN\x10\x00\x12\t\n\x05\x46IRST\x10\x01\x12\n\n\x06SECOND\x10\x02\x12\t\n\x05THIRD\x10\x03\x12\x14\n\x10REFLEXIVE_PERSON\x10\x04"8\n\x06Proper\x12\x12\n\x0ePROPER_UNKNOWN\x10\x00\x12\n\n\x06PROPER\x10\x01\x12\x0e\n\nNOT_PROPER\x10\x02"J\n\x0bReciprocity\x12\x17\n\x13RECIPROCITY_UNKNOWN\x10\x00\x12\x0e\n\nRECIPROCAL\x10\x01\x12\x12\n\x0eNON_RECIPROCAL\x10\x02"s\n\x05Tense\x12\x11\n\rTENSE_UNKNOWN\x10\x00\x12\x15\n\x11\x43ONDITIONAL_TENSE\x10\x01\x12\n\n\x06\x46UTURE\x10\x02\x12\x08\n\x04PAST\x10\x03\x12\x0b\n\x07PRESENT\x10\x04\x12\r\n\tIMPERFECT\x10\x05\x12\x0e\n\nPLUPERFECT\x10\x06"B\n\x05Voice\x12\x11\n\rVOICE_UNKNOWN\x10\x00\x12\n\n\x06\x41\x43TIVE\x10\x01\x12\r\n\tCAUSATIVE\x10\x02\x12\x0b\n\x07PASSIVE\x10\x03"\x95\x08\n\x0e\x44\x65pendencyEdge\x12\x18\n\x10head_token_index\x18\x01 \x01(\x05\x12=\n\x05label\x18\x02 \x01(\x0e\x32..google.cloud.language.v1.DependencyEdge.Label"\xa9\x07\n\x05Label\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06\x41\x42\x42REV\x10\x01\x12\t\n\x05\x41\x43OMP\x10\x02\x12\t\n\x05\x41\x44VCL\x10\x03\x12\n\n\x06\x41\x44VMOD\x10\x04\x12\x08\n\x04\x41MOD\x10\x05\x12\t\n\x05\x41PPOS\x10\x06\x12\x08\n\x04\x41TTR\x10\x07\x12\x07\n\x03\x41UX\x10\x08\x12\x0b\n\x07\x41UXPASS\x10\t\x12\x06\n\x02\x43\x43\x10\n\x12\t\n\x05\x43\x43OMP\x10\x0b\x12\x08\n\x04\x43ONJ\x10\x0c\x12\t\n\x05\x43SUBJ\x10\r\x12\r\n\tCSUBJPASS\x10\x0e\x12\x07\n\x03\x44\x45P\x10\x0f\x12\x07\n\x03\x44\x45T\x10\x10\x12\r\n\tDISCOURSE\x10\x11\x12\x08\n\x04\x44OBJ\x10\x12\x12\x08\n\x04\x45XPL\x10\x13\x12\x0c\n\x08GOESWITH\x10\x14\x12\x08\n\x04IOBJ\x10\x15\x12\x08\n\x04MARK\x10\x16\x12\x07\n\x03MWE\x10\x17\x12\x07\n\x03MWV\x10\x18\x12\x07\n\x03NEG\x10\x19\x12\x06\n\x02NN\x10\x1a\x12\x0c\n\x08NPADVMOD\x10\x1b\x12\t\n\x05NSUBJ\x10\x1c\x12\r\n\tNSUBJPASS\x10\x1d\x12\x07\n\x03NUM\x10\x1e\x12\n\n\x06NUMBER\x10\x1f\x12\x05\n\x01P\x10 \x12\r\n\tPARATAXIS\x10!\x12\x0b\n\x07PARTMOD\x10"\x12\t\n\x05PCOMP\x10#\x12\x08\n\x04POBJ\x10$\x12\x08\n\x04POSS\x10%\x12\x0b\n\x07POSTNEG\x10&\x12\x0b\n\x07PRECOMP\x10\'\x12\x0b\n\x07PRECONJ\x10(\x12\n\n\x06PREDET\x10)\x12\x08\n\x04PREF\x10*\x12\x08\n\x04PREP\x10+\x12\t\n\x05PRONL\x10,\x12\x07\n\x03PRT\x10-\x12\x06\n\x02PS\x10.\x12\x0c\n\x08QUANTMOD\x10/\x12\t\n\x05RCMOD\x10\x30\x12\x0c\n\x08RCMODREL\x10\x31\x12\t\n\x05RDROP\x10\x32\x12\x07\n\x03REF\x10\x33\x12\x0b\n\x07REMNANT\x10\x34\x12\x0e\n\nREPARANDUM\x10\x35\x12\x08\n\x04ROOT\x10\x36\x12\x08\n\x04SNUM\x10\x37\x12\x08\n\x04SUFF\x10\x38\x12\x08\n\x04TMOD\x10\x39\x12\t\n\x05TOPIC\x10:\x12\x08\n\x04VMOD\x10;\x12\x0c\n\x08VOCATIVE\x10<\x12\t\n\x05XCOMP\x10=\x12\n\n\x06SUFFIX\x10>\x12\t\n\x05TITLE\x10?\x12\x0c\n\x08\x41\x44VPHMOD\x10@\x12\x0b\n\x07\x41UXCAUS\x10\x41\x12\t\n\x05\x41UXVV\x10\x42\x12\t\n\x05\x44TMOD\x10\x43\x12\x0b\n\x07\x46OREIGN\x10\x44\x12\x06\n\x02KW\x10\x45\x12\x08\n\x04LIST\x10\x46\x12\x08\n\x04NOMC\x10G\x12\x0c\n\x08NOMCSUBJ\x10H\x12\x10\n\x0cNOMCSUBJPASS\x10I\x12\x08\n\x04NUMC\x10J\x12\x07\n\x03\x43OP\x10K\x12\x0e\n\nDISLOCATED\x10L\x12\x07\n\x03\x41SP\x10M\x12\x08\n\x04GMOD\x10N\x12\x08\n\x04GOBJ\x10O\x12\n\n\x06INFMOD\x10P\x12\x07\n\x03MES\x10Q\x12\t\n\x05NCOMP\x10R"\xe7\x01\n\rEntityMention\x12\x30\n\x04text\x18\x01 \x01(\x0b\x32".google.cloud.language.v1.TextSpan\x12:\n\x04type\x18\x02 \x01(\x0e\x32,.google.cloud.language.v1.EntityMention.Type\x12\x36\n\tsentiment\x18\x03 \x01(\x0b\x32#.google.cloud.language.v1.Sentiment"0\n\x04Type\x12\x10\n\x0cTYPE_UNKNOWN\x10\x00\x12\n\n\x06PROPER\x10\x01\x12\n\n\x06\x43OMMON\x10\x02"1\n\x08TextSpan\x12\x0f\n\x07\x63ontent\x18\x01 \x01(\t\x12\x14\n\x0c\x62\x65gin_offset\x18\x02 \x01(\x05":\n\x16\x43lassificationCategory\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02"\x93\x01\n\x17\x41nalyzeSentimentRequest\x12\x39\n\x08\x64ocument\x18\x01 \x01(\x0b\x32".google.cloud.language.v1.DocumentB\x03\xe0\x41\x02\x12=\n\rencoding_type\x18\x02 \x01(\x0e\x32&.google.cloud.language.v1.EncodingType"\xa4\x01\n\x18\x41nalyzeSentimentResponse\x12?\n\x12\x64ocument_sentiment\x18\x01 \x01(\x0b\x32#.google.cloud.language.v1.Sentiment\x12\x10\n\x08language\x18\x02 \x01(\t\x12\x35\n\tsentences\x18\x03 \x03(\x0b\x32".google.cloud.language.v1.Sentence"\x99\x01\n\x1d\x41nalyzeEntitySentimentRequest\x12\x39\n\x08\x64ocument\x18\x01 \x01(\x0b\x32".google.cloud.language.v1.DocumentB\x03\xe0\x41\x02\x12=\n\rencoding_type\x18\x02 \x01(\x0e\x32&.google.cloud.language.v1.EncodingType"f\n\x1e\x41nalyzeEntitySentimentResponse\x12\x32\n\x08\x65ntities\x18\x01 \x03(\x0b\x32 .google.cloud.language.v1.Entity\x12\x10\n\x08language\x18\x02 \x01(\t"\x92\x01\n\x16\x41nalyzeEntitiesRequest\x12\x39\n\x08\x64ocument\x18\x01 \x01(\x0b\x32".google.cloud.language.v1.DocumentB\x03\xe0\x41\x02\x12=\n\rencoding_type\x18\x02 \x01(\x0e\x32&.google.cloud.language.v1.EncodingType"_\n\x17\x41nalyzeEntitiesResponse\x12\x32\n\x08\x65ntities\x18\x01 \x03(\x0b\x32 .google.cloud.language.v1.Entity\x12\x10\n\x08language\x18\x02 \x01(\t"\x90\x01\n\x14\x41nalyzeSyntaxRequest\x12\x39\n\x08\x64ocument\x18\x01 \x01(\x0b\x32".google.cloud.language.v1.DocumentB\x03\xe0\x41\x02\x12=\n\rencoding_type\x18\x02 \x01(\x0e\x32&.google.cloud.language.v1.EncodingType"\x91\x01\n\x15\x41nalyzeSyntaxResponse\x12\x35\n\tsentences\x18\x01 \x03(\x0b\x32".google.cloud.language.v1.Sentence\x12/\n\x06tokens\x18\x02 \x03(\x0b\x32\x1f.google.cloud.language.v1.Token\x12\x10\n\x08language\x18\x03 \x01(\t"P\n\x13\x43lassifyTextRequest\x12\x39\n\x08\x64ocument\x18\x01 \x01(\x0b\x32".google.cloud.language.v1.DocumentB\x03\xe0\x41\x02"\\\n\x14\x43lassifyTextResponse\x12\x44\n\ncategories\x18\x01 \x03(\x0b\x32\x30.google.cloud.language.v1.ClassificationCategory"\xfa\x02\n\x13\x41nnotateTextRequest\x12\x39\n\x08\x64ocument\x18\x01 \x01(\x0b\x32".google.cloud.language.v1.DocumentB\x03\xe0\x41\x02\x12M\n\x08\x66\x65\x61tures\x18\x02 \x01(\x0b\x32\x36.google.cloud.language.v1.AnnotateTextRequest.FeaturesB\x03\xe0\x41\x02\x12=\n\rencoding_type\x18\x03 \x01(\x0e\x32&.google.cloud.language.v1.EncodingType\x1a\x99\x01\n\x08\x46\x65\x61tures\x12\x16\n\x0e\x65xtract_syntax\x18\x01 \x01(\x08\x12\x18\n\x10\x65xtract_entities\x18\x02 \x01(\x08\x12"\n\x1a\x65xtract_document_sentiment\x18\x03 \x01(\x08\x12 \n\x18\x65xtract_entity_sentiment\x18\x04 \x01(\x08\x12\x15\n\rclassify_text\x18\x06 \x01(\x08"\xcb\x02\n\x14\x41nnotateTextResponse\x12\x35\n\tsentences\x18\x01 \x03(\x0b\x32".google.cloud.language.v1.Sentence\x12/\n\x06tokens\x18\x02 \x03(\x0b\x32\x1f.google.cloud.language.v1.Token\x12\x32\n\x08\x65ntities\x18\x03 \x03(\x0b\x32 .google.cloud.language.v1.Entity\x12?\n\x12\x64ocument_sentiment\x18\x04 \x01(\x0b\x32#.google.cloud.language.v1.Sentiment\x12\x10\n\x08language\x18\x05 \x01(\t\x12\x44\n\ncategories\x18\x06 \x03(\x0b\x32\x30.google.cloud.language.v1.ClassificationCategory*8\n\x0c\x45ncodingType\x12\x08\n\x04NONE\x10\x00\x12\x08\n\x04UTF8\x10\x01\x12\t\n\x05UTF16\x10\x02\x12\t\n\x05UTF32\x10\x03\x32\xb0\n\n\x0fLanguageService\x12\xc8\x01\n\x10\x41nalyzeSentiment\x12\x31.google.cloud.language.v1.AnalyzeSentimentRequest\x1a\x32.google.cloud.language.v1.AnalyzeSentimentResponse"M\x82\xd3\xe4\x93\x02#"\x1e/v1/documents:analyzeSentiment:\x01*\xda\x41\x16\x64ocument,encoding_type\xda\x41\x08\x64ocument\x12\xc4\x01\n\x0f\x41nalyzeEntities\x12\x30.google.cloud.language.v1.AnalyzeEntitiesRequest\x1a\x31.google.cloud.language.v1.AnalyzeEntitiesResponse"L\x82\xd3\xe4\x93\x02""\x1d/v1/documents:analyzeEntities:\x01*\xda\x41\x16\x64ocument,encoding_type\xda\x41\x08\x64ocument\x12\xe0\x01\n\x16\x41nalyzeEntitySentiment\x12\x37.google.cloud.language.v1.AnalyzeEntitySentimentRequest\x1a\x38.google.cloud.language.v1.AnalyzeEntitySentimentResponse"S\x82\xd3\xe4\x93\x02)"$/v1/documents:analyzeEntitySentiment:\x01*\xda\x41\x16\x64ocument,encoding_type\xda\x41\x08\x64ocument\x12\xbc\x01\n\rAnalyzeSyntax\x12..google.cloud.language.v1.AnalyzeSyntaxRequest\x1a/.google.cloud.language.v1.AnalyzeSyntaxResponse"J\x82\xd3\xe4\x93\x02 "\x1b/v1/documents:analyzeSyntax:\x01*\xda\x41\x16\x64ocument,encoding_type\xda\x41\x08\x64ocument\x12\x9f\x01\n\x0c\x43lassifyText\x12-.google.cloud.language.v1.ClassifyTextRequest\x1a..google.cloud.language.v1.ClassifyTextResponse"0\x82\xd3\xe4\x93\x02\x1f"\x1a/v1/documents:classifyText:\x01*\xda\x41\x08\x64ocument\x12\xca\x01\n\x0c\x41nnotateText\x12-.google.cloud.language.v1.AnnotateTextRequest\x1a..google.cloud.language.v1.AnnotateTextResponse"[\x82\xd3\xe4\x93\x02\x1f"\x1a/v1/documents:annotateText:\x01*\xda\x41\x1f\x64ocument,features,encoding_type\xda\x41\x11\x64ocument,features\x1az\xca\x41\x17language.googleapis.com\xd2\x41]https://www.googleapis.com/auth/cloud-language,https://www.googleapis.com/auth/cloud-platformBx\n\x1c\x63om.google.cloud.language.v1B\x14LanguageServiceProtoP\x01Z@google.golang.org/genproto/googleapis/cloud/language/v1;languageb\x06proto3' - ), + serialized_options=b"\n\034com.google.cloud.language.v1B\024LanguageServiceProtoP\001Z@google.golang.org/genproto/googleapis/cloud/language/v1;language", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n5google/cloud/language_v1/proto/language_service.proto\x12\x18google.cloud.language.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto"\xc3\x01\n\x08\x44ocument\x12\x35\n\x04type\x18\x01 \x01(\x0e\x32\'.google.cloud.language.v1.Document.Type\x12\x11\n\x07\x63ontent\x18\x02 \x01(\tH\x00\x12\x19\n\x0fgcs_content_uri\x18\x03 \x01(\tH\x00\x12\x10\n\x08language\x18\x04 \x01(\t"6\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nPLAIN_TEXT\x10\x01\x12\x08\n\x04HTML\x10\x02\x42\x08\n\x06source"t\n\x08Sentence\x12\x30\n\x04text\x18\x01 \x01(\x0b\x32".google.cloud.language.v1.TextSpan\x12\x36\n\tsentiment\x18\x02 \x01(\x0b\x32#.google.cloud.language.v1.Sentiment"\xff\x03\n\x06\x45ntity\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x33\n\x04type\x18\x02 \x01(\x0e\x32%.google.cloud.language.v1.Entity.Type\x12@\n\x08metadata\x18\x03 \x03(\x0b\x32..google.cloud.language.v1.Entity.MetadataEntry\x12\x10\n\x08salience\x18\x04 \x01(\x02\x12\x39\n\x08mentions\x18\x05 \x03(\x0b\x32\'.google.cloud.language.v1.EntityMention\x12\x36\n\tsentiment\x18\x06 \x01(\x0b\x32#.google.cloud.language.v1.Sentiment\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\xb9\x01\n\x04Type\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06PERSON\x10\x01\x12\x0c\n\x08LOCATION\x10\x02\x12\x10\n\x0cORGANIZATION\x10\x03\x12\t\n\x05\x45VENT\x10\x04\x12\x0f\n\x0bWORK_OF_ART\x10\x05\x12\x11\n\rCONSUMER_GOOD\x10\x06\x12\t\n\x05OTHER\x10\x07\x12\x10\n\x0cPHONE_NUMBER\x10\t\x12\x0b\n\x07\x41\x44\x44RESS\x10\n\x12\x08\n\x04\x44\x41TE\x10\x0b\x12\n\n\x06NUMBER\x10\x0c\x12\t\n\x05PRICE\x10\r"\xcb\x01\n\x05Token\x12\x30\n\x04text\x18\x01 \x01(\x0b\x32".google.cloud.language.v1.TextSpan\x12>\n\x0epart_of_speech\x18\x02 \x01(\x0b\x32&.google.cloud.language.v1.PartOfSpeech\x12\x41\n\x0f\x64\x65pendency_edge\x18\x03 \x01(\x0b\x32(.google.cloud.language.v1.DependencyEdge\x12\r\n\x05lemma\x18\x04 \x01(\t"-\n\tSentiment\x12\x11\n\tmagnitude\x18\x02 \x01(\x02\x12\r\n\x05score\x18\x03 \x01(\x02"\xa3\x10\n\x0cPartOfSpeech\x12\x37\n\x03tag\x18\x01 \x01(\x0e\x32*.google.cloud.language.v1.PartOfSpeech.Tag\x12=\n\x06\x61spect\x18\x02 \x01(\x0e\x32-.google.cloud.language.v1.PartOfSpeech.Aspect\x12\x39\n\x04\x63\x61se\x18\x03 \x01(\x0e\x32+.google.cloud.language.v1.PartOfSpeech.Case\x12\x39\n\x04\x66orm\x18\x04 \x01(\x0e\x32+.google.cloud.language.v1.PartOfSpeech.Form\x12=\n\x06gender\x18\x05 \x01(\x0e\x32-.google.cloud.language.v1.PartOfSpeech.Gender\x12\x39\n\x04mood\x18\x06 \x01(\x0e\x32+.google.cloud.language.v1.PartOfSpeech.Mood\x12=\n\x06number\x18\x07 \x01(\x0e\x32-.google.cloud.language.v1.PartOfSpeech.Number\x12=\n\x06person\x18\x08 \x01(\x0e\x32-.google.cloud.language.v1.PartOfSpeech.Person\x12=\n\x06proper\x18\t \x01(\x0e\x32-.google.cloud.language.v1.PartOfSpeech.Proper\x12G\n\x0breciprocity\x18\n \x01(\x0e\x32\x32.google.cloud.language.v1.PartOfSpeech.Reciprocity\x12;\n\x05tense\x18\x0b \x01(\x0e\x32,.google.cloud.language.v1.PartOfSpeech.Tense\x12;\n\x05voice\x18\x0c \x01(\x0e\x32,.google.cloud.language.v1.PartOfSpeech.Voice"\x8d\x01\n\x03Tag\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x07\n\x03\x41\x44J\x10\x01\x12\x07\n\x03\x41\x44P\x10\x02\x12\x07\n\x03\x41\x44V\x10\x03\x12\x08\n\x04\x43ONJ\x10\x04\x12\x07\n\x03\x44\x45T\x10\x05\x12\x08\n\x04NOUN\x10\x06\x12\x07\n\x03NUM\x10\x07\x12\x08\n\x04PRON\x10\x08\x12\x07\n\x03PRT\x10\t\x12\t\n\x05PUNCT\x10\n\x12\x08\n\x04VERB\x10\x0b\x12\x05\n\x01X\x10\x0c\x12\t\n\x05\x41\x46\x46IX\x10\r"O\n\x06\x41spect\x12\x12\n\x0e\x41SPECT_UNKNOWN\x10\x00\x12\x0e\n\nPERFECTIVE\x10\x01\x12\x10\n\x0cIMPERFECTIVE\x10\x02\x12\x0f\n\x0bPROGRESSIVE\x10\x03"\xf8\x01\n\x04\x43\x61se\x12\x10\n\x0c\x43\x41SE_UNKNOWN\x10\x00\x12\x0e\n\nACCUSATIVE\x10\x01\x12\r\n\tADVERBIAL\x10\x02\x12\x11\n\rCOMPLEMENTIVE\x10\x03\x12\n\n\x06\x44\x41TIVE\x10\x04\x12\x0c\n\x08GENITIVE\x10\x05\x12\x10\n\x0cINSTRUMENTAL\x10\x06\x12\x0c\n\x08LOCATIVE\x10\x07\x12\x0e\n\nNOMINATIVE\x10\x08\x12\x0b\n\x07OBLIQUE\x10\t\x12\r\n\tPARTITIVE\x10\n\x12\x11\n\rPREPOSITIONAL\x10\x0b\x12\x12\n\x0eREFLEXIVE_CASE\x10\x0c\x12\x11\n\rRELATIVE_CASE\x10\r\x12\x0c\n\x08VOCATIVE\x10\x0e"\xaf\x01\n\x04\x46orm\x12\x10\n\x0c\x46ORM_UNKNOWN\x10\x00\x12\x0c\n\x08\x41\x44NOMIAL\x10\x01\x12\r\n\tAUXILIARY\x10\x02\x12\x12\n\x0e\x43OMPLEMENTIZER\x10\x03\x12\x10\n\x0c\x46INAL_ENDING\x10\x04\x12\n\n\x06GERUND\x10\x05\x12\n\n\x06REALIS\x10\x06\x12\x0c\n\x08IRREALIS\x10\x07\x12\t\n\x05SHORT\x10\x08\x12\x08\n\x04LONG\x10\t\x12\t\n\x05ORDER\x10\n\x12\x0c\n\x08SPECIFIC\x10\x0b"E\n\x06Gender\x12\x12\n\x0eGENDER_UNKNOWN\x10\x00\x12\x0c\n\x08\x46\x45MININE\x10\x01\x12\r\n\tMASCULINE\x10\x02\x12\n\n\x06NEUTER\x10\x03"\x7f\n\x04Mood\x12\x10\n\x0cMOOD_UNKNOWN\x10\x00\x12\x14\n\x10\x43ONDITIONAL_MOOD\x10\x01\x12\x0e\n\nIMPERATIVE\x10\x02\x12\x0e\n\nINDICATIVE\x10\x03\x12\x11\n\rINTERROGATIVE\x10\x04\x12\x0b\n\x07JUSSIVE\x10\x05\x12\x0f\n\x0bSUBJUNCTIVE\x10\x06"@\n\x06Number\x12\x12\n\x0eNUMBER_UNKNOWN\x10\x00\x12\x0c\n\x08SINGULAR\x10\x01\x12\n\n\x06PLURAL\x10\x02\x12\x08\n\x04\x44UAL\x10\x03"T\n\x06Person\x12\x12\n\x0ePERSON_UNKNOWN\x10\x00\x12\t\n\x05\x46IRST\x10\x01\x12\n\n\x06SECOND\x10\x02\x12\t\n\x05THIRD\x10\x03\x12\x14\n\x10REFLEXIVE_PERSON\x10\x04"8\n\x06Proper\x12\x12\n\x0ePROPER_UNKNOWN\x10\x00\x12\n\n\x06PROPER\x10\x01\x12\x0e\n\nNOT_PROPER\x10\x02"J\n\x0bReciprocity\x12\x17\n\x13RECIPROCITY_UNKNOWN\x10\x00\x12\x0e\n\nRECIPROCAL\x10\x01\x12\x12\n\x0eNON_RECIPROCAL\x10\x02"s\n\x05Tense\x12\x11\n\rTENSE_UNKNOWN\x10\x00\x12\x15\n\x11\x43ONDITIONAL_TENSE\x10\x01\x12\n\n\x06\x46UTURE\x10\x02\x12\x08\n\x04PAST\x10\x03\x12\x0b\n\x07PRESENT\x10\x04\x12\r\n\tIMPERFECT\x10\x05\x12\x0e\n\nPLUPERFECT\x10\x06"B\n\x05Voice\x12\x11\n\rVOICE_UNKNOWN\x10\x00\x12\n\n\x06\x41\x43TIVE\x10\x01\x12\r\n\tCAUSATIVE\x10\x02\x12\x0b\n\x07PASSIVE\x10\x03"\x95\x08\n\x0e\x44\x65pendencyEdge\x12\x18\n\x10head_token_index\x18\x01 \x01(\x05\x12=\n\x05label\x18\x02 \x01(\x0e\x32..google.cloud.language.v1.DependencyEdge.Label"\xa9\x07\n\x05Label\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06\x41\x42\x42REV\x10\x01\x12\t\n\x05\x41\x43OMP\x10\x02\x12\t\n\x05\x41\x44VCL\x10\x03\x12\n\n\x06\x41\x44VMOD\x10\x04\x12\x08\n\x04\x41MOD\x10\x05\x12\t\n\x05\x41PPOS\x10\x06\x12\x08\n\x04\x41TTR\x10\x07\x12\x07\n\x03\x41UX\x10\x08\x12\x0b\n\x07\x41UXPASS\x10\t\x12\x06\n\x02\x43\x43\x10\n\x12\t\n\x05\x43\x43OMP\x10\x0b\x12\x08\n\x04\x43ONJ\x10\x0c\x12\t\n\x05\x43SUBJ\x10\r\x12\r\n\tCSUBJPASS\x10\x0e\x12\x07\n\x03\x44\x45P\x10\x0f\x12\x07\n\x03\x44\x45T\x10\x10\x12\r\n\tDISCOURSE\x10\x11\x12\x08\n\x04\x44OBJ\x10\x12\x12\x08\n\x04\x45XPL\x10\x13\x12\x0c\n\x08GOESWITH\x10\x14\x12\x08\n\x04IOBJ\x10\x15\x12\x08\n\x04MARK\x10\x16\x12\x07\n\x03MWE\x10\x17\x12\x07\n\x03MWV\x10\x18\x12\x07\n\x03NEG\x10\x19\x12\x06\n\x02NN\x10\x1a\x12\x0c\n\x08NPADVMOD\x10\x1b\x12\t\n\x05NSUBJ\x10\x1c\x12\r\n\tNSUBJPASS\x10\x1d\x12\x07\n\x03NUM\x10\x1e\x12\n\n\x06NUMBER\x10\x1f\x12\x05\n\x01P\x10 \x12\r\n\tPARATAXIS\x10!\x12\x0b\n\x07PARTMOD\x10"\x12\t\n\x05PCOMP\x10#\x12\x08\n\x04POBJ\x10$\x12\x08\n\x04POSS\x10%\x12\x0b\n\x07POSTNEG\x10&\x12\x0b\n\x07PRECOMP\x10\'\x12\x0b\n\x07PRECONJ\x10(\x12\n\n\x06PREDET\x10)\x12\x08\n\x04PREF\x10*\x12\x08\n\x04PREP\x10+\x12\t\n\x05PRONL\x10,\x12\x07\n\x03PRT\x10-\x12\x06\n\x02PS\x10.\x12\x0c\n\x08QUANTMOD\x10/\x12\t\n\x05RCMOD\x10\x30\x12\x0c\n\x08RCMODREL\x10\x31\x12\t\n\x05RDROP\x10\x32\x12\x07\n\x03REF\x10\x33\x12\x0b\n\x07REMNANT\x10\x34\x12\x0e\n\nREPARANDUM\x10\x35\x12\x08\n\x04ROOT\x10\x36\x12\x08\n\x04SNUM\x10\x37\x12\x08\n\x04SUFF\x10\x38\x12\x08\n\x04TMOD\x10\x39\x12\t\n\x05TOPIC\x10:\x12\x08\n\x04VMOD\x10;\x12\x0c\n\x08VOCATIVE\x10<\x12\t\n\x05XCOMP\x10=\x12\n\n\x06SUFFIX\x10>\x12\t\n\x05TITLE\x10?\x12\x0c\n\x08\x41\x44VPHMOD\x10@\x12\x0b\n\x07\x41UXCAUS\x10\x41\x12\t\n\x05\x41UXVV\x10\x42\x12\t\n\x05\x44TMOD\x10\x43\x12\x0b\n\x07\x46OREIGN\x10\x44\x12\x06\n\x02KW\x10\x45\x12\x08\n\x04LIST\x10\x46\x12\x08\n\x04NOMC\x10G\x12\x0c\n\x08NOMCSUBJ\x10H\x12\x10\n\x0cNOMCSUBJPASS\x10I\x12\x08\n\x04NUMC\x10J\x12\x07\n\x03\x43OP\x10K\x12\x0e\n\nDISLOCATED\x10L\x12\x07\n\x03\x41SP\x10M\x12\x08\n\x04GMOD\x10N\x12\x08\n\x04GOBJ\x10O\x12\n\n\x06INFMOD\x10P\x12\x07\n\x03MES\x10Q\x12\t\n\x05NCOMP\x10R"\xe7\x01\n\rEntityMention\x12\x30\n\x04text\x18\x01 \x01(\x0b\x32".google.cloud.language.v1.TextSpan\x12:\n\x04type\x18\x02 \x01(\x0e\x32,.google.cloud.language.v1.EntityMention.Type\x12\x36\n\tsentiment\x18\x03 \x01(\x0b\x32#.google.cloud.language.v1.Sentiment"0\n\x04Type\x12\x10\n\x0cTYPE_UNKNOWN\x10\x00\x12\n\n\x06PROPER\x10\x01\x12\n\n\x06\x43OMMON\x10\x02"1\n\x08TextSpan\x12\x0f\n\x07\x63ontent\x18\x01 \x01(\t\x12\x14\n\x0c\x62\x65gin_offset\x18\x02 \x01(\x05":\n\x16\x43lassificationCategory\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02"\x93\x01\n\x17\x41nalyzeSentimentRequest\x12\x39\n\x08\x64ocument\x18\x01 \x01(\x0b\x32".google.cloud.language.v1.DocumentB\x03\xe0\x41\x02\x12=\n\rencoding_type\x18\x02 \x01(\x0e\x32&.google.cloud.language.v1.EncodingType"\xa4\x01\n\x18\x41nalyzeSentimentResponse\x12?\n\x12\x64ocument_sentiment\x18\x01 \x01(\x0b\x32#.google.cloud.language.v1.Sentiment\x12\x10\n\x08language\x18\x02 \x01(\t\x12\x35\n\tsentences\x18\x03 \x03(\x0b\x32".google.cloud.language.v1.Sentence"\x99\x01\n\x1d\x41nalyzeEntitySentimentRequest\x12\x39\n\x08\x64ocument\x18\x01 \x01(\x0b\x32".google.cloud.language.v1.DocumentB\x03\xe0\x41\x02\x12=\n\rencoding_type\x18\x02 \x01(\x0e\x32&.google.cloud.language.v1.EncodingType"f\n\x1e\x41nalyzeEntitySentimentResponse\x12\x32\n\x08\x65ntities\x18\x01 \x03(\x0b\x32 .google.cloud.language.v1.Entity\x12\x10\n\x08language\x18\x02 \x01(\t"\x92\x01\n\x16\x41nalyzeEntitiesRequest\x12\x39\n\x08\x64ocument\x18\x01 \x01(\x0b\x32".google.cloud.language.v1.DocumentB\x03\xe0\x41\x02\x12=\n\rencoding_type\x18\x02 \x01(\x0e\x32&.google.cloud.language.v1.EncodingType"_\n\x17\x41nalyzeEntitiesResponse\x12\x32\n\x08\x65ntities\x18\x01 \x03(\x0b\x32 .google.cloud.language.v1.Entity\x12\x10\n\x08language\x18\x02 \x01(\t"\x90\x01\n\x14\x41nalyzeSyntaxRequest\x12\x39\n\x08\x64ocument\x18\x01 \x01(\x0b\x32".google.cloud.language.v1.DocumentB\x03\xe0\x41\x02\x12=\n\rencoding_type\x18\x02 \x01(\x0e\x32&.google.cloud.language.v1.EncodingType"\x91\x01\n\x15\x41nalyzeSyntaxResponse\x12\x35\n\tsentences\x18\x01 \x03(\x0b\x32".google.cloud.language.v1.Sentence\x12/\n\x06tokens\x18\x02 \x03(\x0b\x32\x1f.google.cloud.language.v1.Token\x12\x10\n\x08language\x18\x03 \x01(\t"P\n\x13\x43lassifyTextRequest\x12\x39\n\x08\x64ocument\x18\x01 \x01(\x0b\x32".google.cloud.language.v1.DocumentB\x03\xe0\x41\x02"\\\n\x14\x43lassifyTextResponse\x12\x44\n\ncategories\x18\x01 \x03(\x0b\x32\x30.google.cloud.language.v1.ClassificationCategory"\xfa\x02\n\x13\x41nnotateTextRequest\x12\x39\n\x08\x64ocument\x18\x01 \x01(\x0b\x32".google.cloud.language.v1.DocumentB\x03\xe0\x41\x02\x12M\n\x08\x66\x65\x61tures\x18\x02 \x01(\x0b\x32\x36.google.cloud.language.v1.AnnotateTextRequest.FeaturesB\x03\xe0\x41\x02\x12=\n\rencoding_type\x18\x03 \x01(\x0e\x32&.google.cloud.language.v1.EncodingType\x1a\x99\x01\n\x08\x46\x65\x61tures\x12\x16\n\x0e\x65xtract_syntax\x18\x01 \x01(\x08\x12\x18\n\x10\x65xtract_entities\x18\x02 \x01(\x08\x12"\n\x1a\x65xtract_document_sentiment\x18\x03 \x01(\x08\x12 \n\x18\x65xtract_entity_sentiment\x18\x04 \x01(\x08\x12\x15\n\rclassify_text\x18\x06 \x01(\x08"\xcb\x02\n\x14\x41nnotateTextResponse\x12\x35\n\tsentences\x18\x01 \x03(\x0b\x32".google.cloud.language.v1.Sentence\x12/\n\x06tokens\x18\x02 \x03(\x0b\x32\x1f.google.cloud.language.v1.Token\x12\x32\n\x08\x65ntities\x18\x03 \x03(\x0b\x32 .google.cloud.language.v1.Entity\x12?\n\x12\x64ocument_sentiment\x18\x04 \x01(\x0b\x32#.google.cloud.language.v1.Sentiment\x12\x10\n\x08language\x18\x05 \x01(\t\x12\x44\n\ncategories\x18\x06 \x03(\x0b\x32\x30.google.cloud.language.v1.ClassificationCategory*8\n\x0c\x45ncodingType\x12\x08\n\x04NONE\x10\x00\x12\x08\n\x04UTF8\x10\x01\x12\t\n\x05UTF16\x10\x02\x12\t\n\x05UTF32\x10\x03\x32\xb0\n\n\x0fLanguageService\x12\xc8\x01\n\x10\x41nalyzeSentiment\x12\x31.google.cloud.language.v1.AnalyzeSentimentRequest\x1a\x32.google.cloud.language.v1.AnalyzeSentimentResponse"M\x82\xd3\xe4\x93\x02#"\x1e/v1/documents:analyzeSentiment:\x01*\xda\x41\x16\x64ocument,encoding_type\xda\x41\x08\x64ocument\x12\xc4\x01\n\x0f\x41nalyzeEntities\x12\x30.google.cloud.language.v1.AnalyzeEntitiesRequest\x1a\x31.google.cloud.language.v1.AnalyzeEntitiesResponse"L\x82\xd3\xe4\x93\x02""\x1d/v1/documents:analyzeEntities:\x01*\xda\x41\x16\x64ocument,encoding_type\xda\x41\x08\x64ocument\x12\xe0\x01\n\x16\x41nalyzeEntitySentiment\x12\x37.google.cloud.language.v1.AnalyzeEntitySentimentRequest\x1a\x38.google.cloud.language.v1.AnalyzeEntitySentimentResponse"S\x82\xd3\xe4\x93\x02)"$/v1/documents:analyzeEntitySentiment:\x01*\xda\x41\x16\x64ocument,encoding_type\xda\x41\x08\x64ocument\x12\xbc\x01\n\rAnalyzeSyntax\x12..google.cloud.language.v1.AnalyzeSyntaxRequest\x1a/.google.cloud.language.v1.AnalyzeSyntaxResponse"J\x82\xd3\xe4\x93\x02 "\x1b/v1/documents:analyzeSyntax:\x01*\xda\x41\x16\x64ocument,encoding_type\xda\x41\x08\x64ocument\x12\x9f\x01\n\x0c\x43lassifyText\x12-.google.cloud.language.v1.ClassifyTextRequest\x1a..google.cloud.language.v1.ClassifyTextResponse"0\x82\xd3\xe4\x93\x02\x1f"\x1a/v1/documents:classifyText:\x01*\xda\x41\x08\x64ocument\x12\xca\x01\n\x0c\x41nnotateText\x12-.google.cloud.language.v1.AnnotateTextRequest\x1a..google.cloud.language.v1.AnnotateTextResponse"[\x82\xd3\xe4\x93\x02\x1f"\x1a/v1/documents:annotateText:\x01*\xda\x41\x1f\x64ocument,features,encoding_type\xda\x41\x11\x64ocument,features\x1az\xca\x41\x17language.googleapis.com\xd2\x41]https://www.googleapis.com/auth/cloud-language,https://www.googleapis.com/auth/cloud-platformBx\n\x1c\x63om.google.cloud.language.v1B\x14LanguageServiceProtoP\x01Z@google.golang.org/genproto/googleapis/cloud/language/v1;languageb\x06proto3', dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_api_dot_client__pb2.DESCRIPTOR, @@ -43,18 +37,39 @@ full_name="google.cloud.language.v1.EncodingType", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name="NONE", index=0, number=0, serialized_options=None, type=None + name="NONE", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="UTF8", index=1, number=1, serialized_options=None, type=None + name="UTF8", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="UTF16", index=2, number=2, serialized_options=None, type=None + name="UTF16", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="UTF32", index=3, number=3, serialized_options=None, type=None + name="UTF32", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -76,6 +91,7 @@ full_name="google.cloud.language.v1.Document.Type", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="TYPE_UNSPECIFIED", @@ -83,12 +99,23 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PLAIN_TEXT", index=1, number=1, serialized_options=None, type=None + name="PLAIN_TEXT", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="HTML", index=2, number=2, serialized_options=None, type=None + name="HTML", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -103,45 +130,111 @@ full_name="google.cloud.language.v1.Entity.Type", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name="UNKNOWN", index=0, number=0, serialized_options=None, type=None + name="UNKNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PERSON", index=1, number=1, serialized_options=None, type=None + name="PERSON", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="LOCATION", index=2, number=2, serialized_options=None, type=None + name="LOCATION", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ORGANIZATION", index=3, number=3, serialized_options=None, type=None + name="ORGANIZATION", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="EVENT", index=4, number=4, serialized_options=None, type=None + name="EVENT", + index=4, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="WORK_OF_ART", index=5, number=5, serialized_options=None, type=None + name="WORK_OF_ART", + index=5, + number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="CONSUMER_GOOD", index=6, number=6, serialized_options=None, type=None + name="CONSUMER_GOOD", + index=6, + number=6, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="OTHER", index=7, number=7, serialized_options=None, type=None + name="OTHER", + index=7, + number=7, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PHONE_NUMBER", index=8, number=9, serialized_options=None, type=None + name="PHONE_NUMBER", + index=8, + number=9, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ADDRESS", index=9, number=10, serialized_options=None, type=None + name="ADDRESS", + index=9, + number=10, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="DATE", index=10, number=11, serialized_options=None, type=None + name="DATE", + index=10, + number=11, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NUMBER", index=11, number=12, serialized_options=None, type=None + name="NUMBER", + index=11, + number=12, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PRICE", index=12, number=13, serialized_options=None, type=None + name="PRICE", + index=12, + number=13, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -156,48 +249,119 @@ full_name="google.cloud.language.v1.PartOfSpeech.Tag", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name="UNKNOWN", index=0, number=0, serialized_options=None, type=None + name="UNKNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ADJ", index=1, number=1, serialized_options=None, type=None + name="ADJ", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ADP", index=2, number=2, serialized_options=None, type=None + name="ADP", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ADV", index=3, number=3, serialized_options=None, type=None + name="ADV", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="CONJ", index=4, number=4, serialized_options=None, type=None + name="CONJ", + index=4, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="DET", index=5, number=5, serialized_options=None, type=None + name="DET", + index=5, + number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NOUN", index=6, number=6, serialized_options=None, type=None + name="NOUN", + index=6, + number=6, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NUM", index=7, number=7, serialized_options=None, type=None + name="NUM", + index=7, + number=7, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PRON", index=8, number=8, serialized_options=None, type=None + name="PRON", + index=8, + number=8, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PRT", index=9, number=9, serialized_options=None, type=None + name="PRT", + index=9, + number=9, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PUNCT", index=10, number=10, serialized_options=None, type=None + name="PUNCT", + index=10, + number=10, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="VERB", index=11, number=11, serialized_options=None, type=None + name="VERB", + index=11, + number=11, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="X", index=12, number=12, serialized_options=None, type=None + name="X", + index=12, + number=12, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="AFFIX", index=13, number=13, serialized_options=None, type=None + name="AFFIX", + index=13, + number=13, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -212,18 +376,39 @@ full_name="google.cloud.language.v1.PartOfSpeech.Aspect", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name="ASPECT_UNKNOWN", index=0, number=0, serialized_options=None, type=None + name="ASPECT_UNKNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PERFECTIVE", index=1, number=1, serialized_options=None, type=None + name="PERFECTIVE", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="IMPERFECTIVE", index=2, number=2, serialized_options=None, type=None + name="IMPERFECTIVE", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PROGRESSIVE", index=3, number=3, serialized_options=None, type=None + name="PROGRESSIVE", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -238,39 +423,95 @@ full_name="google.cloud.language.v1.PartOfSpeech.Case", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name="CASE_UNKNOWN", index=0, number=0, serialized_options=None, type=None + name="CASE_UNKNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ACCUSATIVE", index=1, number=1, serialized_options=None, type=None + name="ACCUSATIVE", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ADVERBIAL", index=2, number=2, serialized_options=None, type=None + name="ADVERBIAL", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="COMPLEMENTIVE", index=3, number=3, serialized_options=None, type=None + name="COMPLEMENTIVE", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="DATIVE", index=4, number=4, serialized_options=None, type=None + name="DATIVE", + index=4, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="GENITIVE", index=5, number=5, serialized_options=None, type=None + name="GENITIVE", + index=5, + number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="INSTRUMENTAL", index=6, number=6, serialized_options=None, type=None + name="INSTRUMENTAL", + index=6, + number=6, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="LOCATIVE", index=7, number=7, serialized_options=None, type=None + name="LOCATIVE", + index=7, + number=7, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NOMINATIVE", index=8, number=8, serialized_options=None, type=None + name="NOMINATIVE", + index=8, + number=8, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="OBLIQUE", index=9, number=9, serialized_options=None, type=None + name="OBLIQUE", + index=9, + number=9, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PARTITIVE", index=10, number=10, serialized_options=None, type=None + name="PARTITIVE", + index=10, + number=10, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="PREPOSITIONAL", @@ -278,6 +519,7 @@ number=11, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="REFLEXIVE_CASE", @@ -285,6 +527,7 @@ number=12, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="RELATIVE_CASE", @@ -292,9 +535,15 @@ number=13, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="VOCATIVE", index=14, number=14, serialized_options=None, type=None + name="VOCATIVE", + index=14, + number=14, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -309,42 +558,103 @@ full_name="google.cloud.language.v1.PartOfSpeech.Form", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name="FORM_UNKNOWN", index=0, number=0, serialized_options=None, type=None + name="FORM_UNKNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ADNOMIAL", index=1, number=1, serialized_options=None, type=None + name="ADNOMIAL", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="AUXILIARY", index=2, number=2, serialized_options=None, type=None + name="AUXILIARY", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="COMPLEMENTIZER", index=3, number=3, serialized_options=None, type=None + name="COMPLEMENTIZER", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="FINAL_ENDING", index=4, number=4, serialized_options=None, type=None + name="FINAL_ENDING", + index=4, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="GERUND", index=5, number=5, serialized_options=None, type=None + name="GERUND", + index=5, + number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="REALIS", index=6, number=6, serialized_options=None, type=None + name="REALIS", + index=6, + number=6, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="IRREALIS", index=7, number=7, serialized_options=None, type=None + name="IRREALIS", + index=7, + number=7, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="SHORT", index=8, number=8, serialized_options=None, type=None + name="SHORT", + index=8, + number=8, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="LONG", index=9, number=9, serialized_options=None, type=None + name="LONG", + index=9, + number=9, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ORDER", index=10, number=10, serialized_options=None, type=None + name="ORDER", + index=10, + number=10, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="SPECIFIC", index=11, number=11, serialized_options=None, type=None + name="SPECIFIC", + index=11, + number=11, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -359,18 +669,39 @@ full_name="google.cloud.language.v1.PartOfSpeech.Gender", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name="GENDER_UNKNOWN", index=0, number=0, serialized_options=None, type=None + name="GENDER_UNKNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="FEMININE", index=1, number=1, serialized_options=None, type=None + name="FEMININE", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="MASCULINE", index=2, number=2, serialized_options=None, type=None + name="MASCULINE", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NEUTER", index=3, number=3, serialized_options=None, type=None + name="NEUTER", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -385,9 +716,15 @@ full_name="google.cloud.language.v1.PartOfSpeech.Mood", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name="MOOD_UNKNOWN", index=0, number=0, serialized_options=None, type=None + name="MOOD_UNKNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="CONDITIONAL_MOOD", @@ -395,21 +732,47 @@ number=1, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="IMPERATIVE", index=2, number=2, serialized_options=None, type=None + name="IMPERATIVE", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="INDICATIVE", index=3, number=3, serialized_options=None, type=None + name="INDICATIVE", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="INTERROGATIVE", index=4, number=4, serialized_options=None, type=None + name="INTERROGATIVE", + index=4, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="JUSSIVE", index=5, number=5, serialized_options=None, type=None + name="JUSSIVE", + index=5, + number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="SUBJUNCTIVE", index=6, number=6, serialized_options=None, type=None + name="SUBJUNCTIVE", + index=6, + number=6, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -424,18 +787,39 @@ full_name="google.cloud.language.v1.PartOfSpeech.Number", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name="NUMBER_UNKNOWN", index=0, number=0, serialized_options=None, type=None + name="NUMBER_UNKNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="SINGULAR", index=1, number=1, serialized_options=None, type=None + name="SINGULAR", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PLURAL", index=2, number=2, serialized_options=None, type=None + name="PLURAL", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="DUAL", index=3, number=3, serialized_options=None, type=None + name="DUAL", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -450,18 +834,39 @@ full_name="google.cloud.language.v1.PartOfSpeech.Person", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name="PERSON_UNKNOWN", index=0, number=0, serialized_options=None, type=None + name="PERSON_UNKNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="FIRST", index=1, number=1, serialized_options=None, type=None + name="FIRST", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="SECOND", index=2, number=2, serialized_options=None, type=None + name="SECOND", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="THIRD", index=3, number=3, serialized_options=None, type=None + name="THIRD", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="REFLEXIVE_PERSON", @@ -469,6 +874,7 @@ number=4, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -483,15 +889,31 @@ full_name="google.cloud.language.v1.PartOfSpeech.Proper", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name="PROPER_UNKNOWN", index=0, number=0, serialized_options=None, type=None + name="PROPER_UNKNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PROPER", index=1, number=1, serialized_options=None, type=None + name="PROPER", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NOT_PROPER", index=2, number=2, serialized_options=None, type=None + name="NOT_PROPER", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -506,6 +928,7 @@ full_name="google.cloud.language.v1.PartOfSpeech.Reciprocity", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="RECIPROCITY_UNKNOWN", @@ -513,12 +936,23 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="RECIPROCAL", index=1, number=1, serialized_options=None, type=None + name="RECIPROCAL", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NON_RECIPROCAL", index=2, number=2, serialized_options=None, type=None + name="NON_RECIPROCAL", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -533,9 +967,15 @@ full_name="google.cloud.language.v1.PartOfSpeech.Tense", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name="TENSE_UNKNOWN", index=0, number=0, serialized_options=None, type=None + name="TENSE_UNKNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="CONDITIONAL_TENSE", @@ -543,21 +983,47 @@ number=1, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="FUTURE", index=2, number=2, serialized_options=None, type=None + name="FUTURE", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PAST", index=3, number=3, serialized_options=None, type=None + name="PAST", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PRESENT", index=4, number=4, serialized_options=None, type=None + name="PRESENT", + index=4, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="IMPERFECT", index=5, number=5, serialized_options=None, type=None + name="IMPERFECT", + index=5, + number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PLUPERFECT", index=6, number=6, serialized_options=None, type=None + name="PLUPERFECT", + index=6, + number=6, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -572,18 +1038,39 @@ full_name="google.cloud.language.v1.PartOfSpeech.Voice", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name="VOICE_UNKNOWN", index=0, number=0, serialized_options=None, type=None + name="VOICE_UNKNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ACTIVE", index=1, number=1, serialized_options=None, type=None + name="ACTIVE", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="CAUSATIVE", index=2, number=2, serialized_options=None, type=None + name="CAUSATIVE", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PASSIVE", index=3, number=3, serialized_options=None, type=None + name="PASSIVE", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -598,255 +1085,671 @@ full_name="google.cloud.language.v1.DependencyEdge.Label", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name="UNKNOWN", index=0, number=0, serialized_options=None, type=None + name="UNKNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ABBREV", index=1, number=1, serialized_options=None, type=None + name="ABBREV", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ACOMP", index=2, number=2, serialized_options=None, type=None + name="ACOMP", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ADVCL", index=3, number=3, serialized_options=None, type=None + name="ADVCL", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ADVMOD", index=4, number=4, serialized_options=None, type=None + name="ADVMOD", + index=4, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="AMOD", index=5, number=5, serialized_options=None, type=None + name="AMOD", + index=5, + number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="APPOS", index=6, number=6, serialized_options=None, type=None + name="APPOS", + index=6, + number=6, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ATTR", index=7, number=7, serialized_options=None, type=None + name="ATTR", + index=7, + number=7, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="AUX", index=8, number=8, serialized_options=None, type=None + name="AUX", + index=8, + number=8, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="AUXPASS", index=9, number=9, serialized_options=None, type=None + name="AUXPASS", + index=9, + number=9, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="CC", index=10, number=10, serialized_options=None, type=None + name="CC", + index=10, + number=10, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="CCOMP", index=11, number=11, serialized_options=None, type=None + name="CCOMP", + index=11, + number=11, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="CONJ", index=12, number=12, serialized_options=None, type=None + name="CONJ", + index=12, + number=12, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="CSUBJ", index=13, number=13, serialized_options=None, type=None + name="CSUBJ", + index=13, + number=13, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="CSUBJPASS", index=14, number=14, serialized_options=None, type=None + name="CSUBJPASS", + index=14, + number=14, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="DEP", index=15, number=15, serialized_options=None, type=None + name="DEP", + index=15, + number=15, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="DET", index=16, number=16, serialized_options=None, type=None + name="DET", + index=16, + number=16, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="DISCOURSE", index=17, number=17, serialized_options=None, type=None + name="DISCOURSE", + index=17, + number=17, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="DOBJ", index=18, number=18, serialized_options=None, type=None + name="DOBJ", + index=18, + number=18, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="EXPL", index=19, number=19, serialized_options=None, type=None + name="EXPL", + index=19, + number=19, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="GOESWITH", index=20, number=20, serialized_options=None, type=None + name="GOESWITH", + index=20, + number=20, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="IOBJ", index=21, number=21, serialized_options=None, type=None + name="IOBJ", + index=21, + number=21, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="MARK", index=22, number=22, serialized_options=None, type=None + name="MARK", + index=22, + number=22, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="MWE", index=23, number=23, serialized_options=None, type=None + name="MWE", + index=23, + number=23, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="MWV", index=24, number=24, serialized_options=None, type=None + name="MWV", + index=24, + number=24, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NEG", index=25, number=25, serialized_options=None, type=None + name="NEG", + index=25, + number=25, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NN", index=26, number=26, serialized_options=None, type=None + name="NN", + index=26, + number=26, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NPADVMOD", index=27, number=27, serialized_options=None, type=None + name="NPADVMOD", + index=27, + number=27, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NSUBJ", index=28, number=28, serialized_options=None, type=None + name="NSUBJ", + index=28, + number=28, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NSUBJPASS", index=29, number=29, serialized_options=None, type=None + name="NSUBJPASS", + index=29, + number=29, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NUM", index=30, number=30, serialized_options=None, type=None + name="NUM", + index=30, + number=30, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NUMBER", index=31, number=31, serialized_options=None, type=None + name="NUMBER", + index=31, + number=31, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="P", index=32, number=32, serialized_options=None, type=None + name="P", + index=32, + number=32, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PARATAXIS", index=33, number=33, serialized_options=None, type=None + name="PARATAXIS", + index=33, + number=33, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PARTMOD", index=34, number=34, serialized_options=None, type=None + name="PARTMOD", + index=34, + number=34, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PCOMP", index=35, number=35, serialized_options=None, type=None + name="PCOMP", + index=35, + number=35, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="POBJ", index=36, number=36, serialized_options=None, type=None + name="POBJ", + index=36, + number=36, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="POSS", index=37, number=37, serialized_options=None, type=None + name="POSS", + index=37, + number=37, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="POSTNEG", index=38, number=38, serialized_options=None, type=None + name="POSTNEG", + index=38, + number=38, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PRECOMP", index=39, number=39, serialized_options=None, type=None + name="PRECOMP", + index=39, + number=39, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PRECONJ", index=40, number=40, serialized_options=None, type=None + name="PRECONJ", + index=40, + number=40, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PREDET", index=41, number=41, serialized_options=None, type=None + name="PREDET", + index=41, + number=41, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PREF", index=42, number=42, serialized_options=None, type=None + name="PREF", + index=42, + number=42, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PREP", index=43, number=43, serialized_options=None, type=None + name="PREP", + index=43, + number=43, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PRONL", index=44, number=44, serialized_options=None, type=None + name="PRONL", + index=44, + number=44, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PRT", index=45, number=45, serialized_options=None, type=None + name="PRT", + index=45, + number=45, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PS", index=46, number=46, serialized_options=None, type=None + name="PS", + index=46, + number=46, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="QUANTMOD", index=47, number=47, serialized_options=None, type=None + name="QUANTMOD", + index=47, + number=47, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="RCMOD", index=48, number=48, serialized_options=None, type=None + name="RCMOD", + index=48, + number=48, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="RCMODREL", index=49, number=49, serialized_options=None, type=None + name="RCMODREL", + index=49, + number=49, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="RDROP", index=50, number=50, serialized_options=None, type=None + name="RDROP", + index=50, + number=50, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="REF", index=51, number=51, serialized_options=None, type=None + name="REF", + index=51, + number=51, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="REMNANT", index=52, number=52, serialized_options=None, type=None + name="REMNANT", + index=52, + number=52, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="REPARANDUM", index=53, number=53, serialized_options=None, type=None + name="REPARANDUM", + index=53, + number=53, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ROOT", index=54, number=54, serialized_options=None, type=None + name="ROOT", + index=54, + number=54, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="SNUM", index=55, number=55, serialized_options=None, type=None + name="SNUM", + index=55, + number=55, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="SUFF", index=56, number=56, serialized_options=None, type=None + name="SUFF", + index=56, + number=56, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="TMOD", index=57, number=57, serialized_options=None, type=None + name="TMOD", + index=57, + number=57, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="TOPIC", index=58, number=58, serialized_options=None, type=None + name="TOPIC", + index=58, + number=58, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="VMOD", index=59, number=59, serialized_options=None, type=None + name="VMOD", + index=59, + number=59, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="VOCATIVE", index=60, number=60, serialized_options=None, type=None + name="VOCATIVE", + index=60, + number=60, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="XCOMP", index=61, number=61, serialized_options=None, type=None + name="XCOMP", + index=61, + number=61, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="SUFFIX", index=62, number=62, serialized_options=None, type=None + name="SUFFIX", + index=62, + number=62, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="TITLE", index=63, number=63, serialized_options=None, type=None + name="TITLE", + index=63, + number=63, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ADVPHMOD", index=64, number=64, serialized_options=None, type=None + name="ADVPHMOD", + index=64, + number=64, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="AUXCAUS", index=65, number=65, serialized_options=None, type=None + name="AUXCAUS", + index=65, + number=65, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="AUXVV", index=66, number=66, serialized_options=None, type=None + name="AUXVV", + index=66, + number=66, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="DTMOD", index=67, number=67, serialized_options=None, type=None + name="DTMOD", + index=67, + number=67, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="FOREIGN", index=68, number=68, serialized_options=None, type=None + name="FOREIGN", + index=68, + number=68, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="KW", index=69, number=69, serialized_options=None, type=None + name="KW", + index=69, + number=69, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="LIST", index=70, number=70, serialized_options=None, type=None + name="LIST", + index=70, + number=70, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NOMC", index=71, number=71, serialized_options=None, type=None + name="NOMC", + index=71, + number=71, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NOMCSUBJ", index=72, number=72, serialized_options=None, type=None + name="NOMCSUBJ", + index=72, + number=72, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NOMCSUBJPASS", index=73, number=73, serialized_options=None, type=None + name="NOMCSUBJPASS", + index=73, + number=73, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NUMC", index=74, number=74, serialized_options=None, type=None + name="NUMC", + index=74, + number=74, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="COP", index=75, number=75, serialized_options=None, type=None + name="COP", + index=75, + number=75, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="DISLOCATED", index=76, number=76, serialized_options=None, type=None + name="DISLOCATED", + index=76, + number=76, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ASP", index=77, number=77, serialized_options=None, type=None + name="ASP", + index=77, + number=77, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="GMOD", index=78, number=78, serialized_options=None, type=None + name="GMOD", + index=78, + number=78, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="GOBJ", index=79, number=79, serialized_options=None, type=None + name="GOBJ", + index=79, + number=79, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="INFMOD", index=80, number=80, serialized_options=None, type=None + name="INFMOD", + index=80, + number=80, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="MES", index=81, number=81, serialized_options=None, type=None + name="MES", + index=81, + number=81, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NCOMP", index=82, number=82, serialized_options=None, type=None + name="NCOMP", + index=82, + number=82, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -861,15 +1764,31 @@ full_name="google.cloud.language.v1.EntityMention.Type", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name="TYPE_UNKNOWN", index=0, number=0, serialized_options=None, type=None + name="TYPE_UNKNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PROPER", index=1, number=1, serialized_options=None, type=None + name="PROPER", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="COMMON", index=2, number=2, serialized_options=None, type=None + name="COMMON", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -886,6 +1805,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="type", @@ -904,6 +1824,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="content", @@ -914,7 +1835,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -922,6 +1843,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="gcs_content_uri", @@ -932,7 +1854,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -940,6 +1862,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="language", @@ -950,7 +1873,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -958,6 +1881,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -973,6 +1897,7 @@ full_name="google.cloud.language.v1.Document.source", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ) ], @@ -987,6 +1912,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="text", @@ -1005,6 +1931,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="sentiment", @@ -1023,6 +1950,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1044,6 +1972,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="key", @@ -1054,7 +1983,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1062,6 +1991,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value", @@ -1072,7 +2002,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1080,12 +2010,13 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[], - serialized_options=_b("8\001"), + serialized_options=b"8\001", is_extendable=False, syntax="proto3", extension_ranges=[], @@ -1100,6 +2031,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -1110,7 +2042,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1118,6 +2050,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="type", @@ -1136,6 +2069,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="metadata", @@ -1154,6 +2088,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="salience", @@ -1172,6 +2107,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="mentions", @@ -1190,6 +2126,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="sentiment", @@ -1208,6 +2145,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1229,6 +2167,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="text", @@ -1247,6 +2186,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="part_of_speech", @@ -1265,6 +2205,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="dependency_edge", @@ -1283,6 +2224,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="lemma", @@ -1293,7 +2235,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1301,6 +2243,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1322,6 +2265,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="magnitude", @@ -1340,6 +2284,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="score", @@ -1358,6 +2303,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1379,6 +2325,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="tag", @@ -1397,6 +2344,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="aspect", @@ -1415,6 +2363,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="case", @@ -1433,6 +2382,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="form", @@ -1451,6 +2401,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="gender", @@ -1469,6 +2420,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="mood", @@ -1487,6 +2439,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="number", @@ -1505,6 +2458,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="person", @@ -1523,6 +2477,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="proper", @@ -1541,6 +2496,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="reciprocity", @@ -1559,6 +2515,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="tense", @@ -1577,6 +2534,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="voice", @@ -1595,6 +2553,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1629,6 +2588,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="head_token_index", @@ -1647,6 +2607,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="label", @@ -1665,6 +2626,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1686,6 +2648,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="text", @@ -1704,6 +2667,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="type", @@ -1722,6 +2686,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="sentiment", @@ -1740,6 +2705,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1761,6 +2727,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="content", @@ -1771,7 +2738,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1779,6 +2746,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="begin_offset", @@ -1797,6 +2765,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1818,6 +2787,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -1828,7 +2798,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1836,6 +2806,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence", @@ -1854,6 +2825,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1875,6 +2847,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="document", @@ -1891,8 +2864,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\002"), + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="encoding_type", @@ -1911,6 +2885,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1932,6 +2907,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="document_sentiment", @@ -1950,6 +2926,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="language", @@ -1960,7 +2937,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1968,6 +2945,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="sentences", @@ -1986,6 +2964,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2007,6 +2986,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="document", @@ -2023,8 +3003,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\002"), + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="encoding_type", @@ -2043,6 +3024,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2064,6 +3046,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="entities", @@ -2082,6 +3065,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="language", @@ -2092,7 +3076,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -2100,6 +3084,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2121,6 +3106,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="document", @@ -2137,8 +3123,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\002"), + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="encoding_type", @@ -2157,6 +3144,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2178,6 +3166,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="entities", @@ -2196,6 +3185,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="language", @@ -2206,7 +3196,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -2214,6 +3204,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2235,6 +3226,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="document", @@ -2251,8 +3243,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\002"), + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="encoding_type", @@ -2271,6 +3264,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2292,6 +3286,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="sentences", @@ -2310,6 +3305,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="tokens", @@ -2328,6 +3324,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="language", @@ -2338,7 +3335,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -2346,6 +3343,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2367,6 +3365,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="document", @@ -2383,8 +3382,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\002"), + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -2406,6 +3406,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="categories", @@ -2424,6 +3425,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -2445,6 +3447,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="extract_syntax", @@ -2463,6 +3466,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="extract_entities", @@ -2481,6 +3485,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="extract_document_sentiment", @@ -2499,6 +3504,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="extract_entity_sentiment", @@ -2517,6 +3523,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="classify_text", @@ -2535,6 +3542,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2555,6 +3563,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="document", @@ -2571,8 +3580,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\002"), + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="features", @@ -2589,8 +3599,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\002"), + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="encoding_type", @@ -2609,6 +3620,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2630,6 +3642,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="sentences", @@ -2648,6 +3661,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="tokens", @@ -2666,6 +3680,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="entities", @@ -2684,6 +3699,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="document_sentiment", @@ -2702,6 +3718,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="language", @@ -2712,7 +3729,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -2720,6 +3737,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="categories", @@ -2738,6 +3756,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2868,11 +3887,12 @@ Document = _reflection.GeneratedProtocolMessageType( "Document", (_message.Message,), - dict( - DESCRIPTOR=_DOCUMENT, - __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""################################################################ # + { + "DESCRIPTOR": _DOCUMENT, + "__module__": "google.cloud.language_v1.proto.language_service_pb2", + "__doc__": """################################################################ # Represents the input to API methods. + Attributes: type: Required. If the type is not set or is ``TYPE_UNSPECIFIED``, @@ -2886,7 +3906,7 @@ gcs_content_uri: The Google Cloud Storage URI where the file content is located. This URI must be of the form: - gs://bucket\_name/object\_name. For more details, see + gs://bucket_name/object_name. For more details, see https://cloud.google.com/storage/docs/reference-uris. NOTE: Cloud Storage object versioning is not supported. language: @@ -2900,50 +3920,52 @@ ``INVALID_ARGUMENT`` error is returned. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1.Document) - ), + }, ) _sym_db.RegisterMessage(Document) Sentence = _reflection.GeneratedProtocolMessageType( "Sentence", (_message.Message,), - dict( - DESCRIPTOR=_SENTENCE, - __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""Represents a sentence in the input document. + { + "DESCRIPTOR": _SENTENCE, + "__module__": "google.cloud.language_v1.proto.language_service_pb2", + "__doc__": """Represents a sentence in the input document. + Attributes: text: The sentence text. sentiment: For calls to [AnalyzeSentiment][] or if [AnnotateTextRequest.F - eatures.extract\_document\_sentiment][google.cloud.language.v1 - .AnnotateTextRequest.Features.extract\_document\_sentiment] is - set to true, this field will contain the sentiment for the + eatures.extract_document_sentiment][google.cloud.language.v1.A + nnotateTextRequest.Features.extract_document_sentiment] is set + to true, this field will contain the sentiment for the sentence. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1.Sentence) - ), + }, ) _sym_db.RegisterMessage(Sentence) Entity = _reflection.GeneratedProtocolMessageType( "Entity", (_message.Message,), - dict( - MetadataEntry=_reflection.GeneratedProtocolMessageType( + { + "MetadataEntry": _reflection.GeneratedProtocolMessageType( "MetadataEntry", (_message.Message,), - dict( - DESCRIPTOR=_ENTITY_METADATAENTRY, - __module__="google.cloud.language_v1.proto.language_service_pb2" + { + "DESCRIPTOR": _ENTITY_METADATAENTRY, + "__module__": "google.cloud.language_v1.proto.language_service_pb2" # @@protoc_insertion_point(class_scope:google.cloud.language.v1.Entity.MetadataEntry) - ), + }, ), - DESCRIPTOR=_ENTITY, - __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""Represents a phrase in the text that is a known entity, such as a + "DESCRIPTOR": _ENTITY, + "__module__": "google.cloud.language_v1.proto.language_service_pb2", + "__doc__": """Represents a phrase in the text that is a known entity, such as a person, an organization, or location. The API associates information, such as salience and mentions, with entities. + Attributes: name: The representative name for the entity. @@ -2966,13 +3988,13 @@ currently supports proper noun mentions. sentiment: For calls to [AnalyzeEntitySentiment][] or if [AnnotateTextReq - uest.Features.extract\_entity\_sentiment][google.cloud.languag - e.v1.AnnotateTextRequest.Features.extract\_entity\_sentiment] - is set to true, this field will contain the aggregate - sentiment expressed for this entity in the provided document. + uest.Features.extract_entity_sentiment][google.cloud.language. + v1.AnnotateTextRequest.Features.extract_entity_sentiment] is + set to true, this field will contain the aggregate sentiment + expressed for this entity in the provided document. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1.Entity) - ), + }, ) _sym_db.RegisterMessage(Entity) _sym_db.RegisterMessage(Entity.MetadataEntry) @@ -2980,10 +4002,11 @@ Token = _reflection.GeneratedProtocolMessageType( "Token", (_message.Message,), - dict( - DESCRIPTOR=_TOKEN, - __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""Represents the smallest syntactic building block of the text. + { + "DESCRIPTOR": _TOKEN, + "__module__": "google.cloud.language_v1.proto.language_service_pb2", + "__doc__": """Represents the smallest syntactic building block of the text. + Attributes: text: The token text. @@ -2997,18 +4020,19 @@ the token. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1.Token) - ), + }, ) _sym_db.RegisterMessage(Token) Sentiment = _reflection.GeneratedProtocolMessageType( "Sentiment", (_message.Message,), - dict( - DESCRIPTOR=_SENTIMENT, - __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""Represents the feeling associated with the entire text or entities in + { + "DESCRIPTOR": _SENTIMENT, + "__module__": "google.cloud.language_v1.proto.language_service_pb2", + "__doc__": """Represents the feeling associated with the entire text or entities in the text. + Attributes: magnitude: A non-negative number in the [0, +inf) range, which represents @@ -3019,19 +4043,20 @@ (positive sentiment). """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1.Sentiment) - ), + }, ) _sym_db.RegisterMessage(Sentiment) PartOfSpeech = _reflection.GeneratedProtocolMessageType( "PartOfSpeech", (_message.Message,), - dict( - DESCRIPTOR=_PARTOFSPEECH, - __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""Represents part of speech information for a token. Parts of speech are + { + "DESCRIPTOR": _PARTOFSPEECH, + "__module__": "google.cloud.language_v1.proto.language_service_pb2", + "__doc__": """Represents part of speech information for a token. Parts of speech are as defined in http://www.lrec- - conf.org/proceedings/lrec2012/pdf/274\_Paper.pdf + conf.org/proceedings/lrec2012/pdf/274_Paper.pdf + Attributes: tag: The part of speech tag. @@ -3059,19 +4084,20 @@ The grammatical voice. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1.PartOfSpeech) - ), + }, ) _sym_db.RegisterMessage(PartOfSpeech) DependencyEdge = _reflection.GeneratedProtocolMessageType( "DependencyEdge", (_message.Message,), - dict( - DESCRIPTOR=_DEPENDENCYEDGE, - __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""Represents dependency parse tree information for a token. (For more + { + "DESCRIPTOR": _DEPENDENCYEDGE, + "__module__": "google.cloud.language_v1.proto.language_service_pb2", + "__doc__": """Represents dependency parse tree information for a token. (For more information on dependency labels, see http://www.aclweb.org/anthology/P13-2017 + Attributes: head_token_index: Represents the head of this token in the dependency tree. This @@ -3083,18 +4109,19 @@ The parse label for the token. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1.DependencyEdge) - ), + }, ) _sym_db.RegisterMessage(DependencyEdge) EntityMention = _reflection.GeneratedProtocolMessageType( "EntityMention", (_message.Message,), - dict( - DESCRIPTOR=_ENTITYMENTION, - __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""Represents a mention for an entity in the text. Currently, proper noun + { + "DESCRIPTOR": _ENTITYMENTION, + "__module__": "google.cloud.language_v1.proto.language_service_pb2", + "__doc__": """Represents a mention for an entity in the text. Currently, proper noun mentions are supported. + Attributes: text: The mention text. @@ -3102,24 +4129,24 @@ The type of the entity mention. sentiment: For calls to [AnalyzeEntitySentiment][] or if [AnnotateTextReq - uest.Features.extract\_entity\_sentiment][google.cloud.languag - e.v1.AnnotateTextRequest.Features.extract\_entity\_sentiment] - is set to true, this field will contain the sentiment - expressed for this mention of the entity in the provided - document. + uest.Features.extract_entity_sentiment][google.cloud.language. + v1.AnnotateTextRequest.Features.extract_entity_sentiment] is + set to true, this field will contain the sentiment expressed + for this mention of the entity in the provided document. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1.EntityMention) - ), + }, ) _sym_db.RegisterMessage(EntityMention) TextSpan = _reflection.GeneratedProtocolMessageType( "TextSpan", (_message.Message,), - dict( - DESCRIPTOR=_TEXTSPAN, - __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""Represents an output piece of text. + { + "DESCRIPTOR": _TEXTSPAN, + "__module__": "google.cloud.language_v1.proto.language_service_pb2", + "__doc__": """Represents an output piece of text. + Attributes: content: The content of the output text. @@ -3130,39 +4157,41 @@ specified in the API request. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1.TextSpan) - ), + }, ) _sym_db.RegisterMessage(TextSpan) ClassificationCategory = _reflection.GeneratedProtocolMessageType( "ClassificationCategory", (_message.Message,), - dict( - DESCRIPTOR=_CLASSIFICATIONCATEGORY, - __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""Represents a category returned from the text classifier. + { + "DESCRIPTOR": _CLASSIFICATIONCATEGORY, + "__module__": "google.cloud.language_v1.proto.language_service_pb2", + "__doc__": """Represents a category returned from the text classifier. + Attributes: name: The name of the category representing the document, from the `predefined taxonomy `__. confidence: - The classifier's confidence of the category. Number represents + The classifier’s confidence of the category. Number represents how certain the classifier is that this category represents the given text. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1.ClassificationCategory) - ), + }, ) _sym_db.RegisterMessage(ClassificationCategory) AnalyzeSentimentRequest = _reflection.GeneratedProtocolMessageType( "AnalyzeSentimentRequest", (_message.Message,), - dict( - DESCRIPTOR=_ANALYZESENTIMENTREQUEST, - __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""The sentiment analysis request message. + { + "DESCRIPTOR": _ANALYZESENTIMENTREQUEST, + "__module__": "google.cloud.language_v1.proto.language_service_pb2", + "__doc__": """The sentiment analysis request message. + Attributes: document: Input document. @@ -3171,17 +4200,18 @@ offsets. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1.AnalyzeSentimentRequest) - ), + }, ) _sym_db.RegisterMessage(AnalyzeSentimentRequest) AnalyzeSentimentResponse = _reflection.GeneratedProtocolMessageType( "AnalyzeSentimentResponse", (_message.Message,), - dict( - DESCRIPTOR=_ANALYZESENTIMENTRESPONSE, - __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""The sentiment analysis response message. + { + "DESCRIPTOR": _ANALYZESENTIMENTRESPONSE, + "__module__": "google.cloud.language_v1.proto.language_service_pb2", + "__doc__": """The sentiment analysis response message. + Attributes: document_sentiment: The overall sentiment of the input document. @@ -3194,17 +4224,18 @@ The sentiment for all the sentences in the document. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1.AnalyzeSentimentResponse) - ), + }, ) _sym_db.RegisterMessage(AnalyzeSentimentResponse) AnalyzeEntitySentimentRequest = _reflection.GeneratedProtocolMessageType( "AnalyzeEntitySentimentRequest", (_message.Message,), - dict( - DESCRIPTOR=_ANALYZEENTITYSENTIMENTREQUEST, - __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""The entity-level sentiment analysis request message. + { + "DESCRIPTOR": _ANALYZEENTITYSENTIMENTREQUEST, + "__module__": "google.cloud.language_v1.proto.language_service_pb2", + "__doc__": """The entity-level sentiment analysis request message. + Attributes: document: Input document. @@ -3212,17 +4243,18 @@ The encoding type used by the API to calculate offsets. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1.AnalyzeEntitySentimentRequest) - ), + }, ) _sym_db.RegisterMessage(AnalyzeEntitySentimentRequest) AnalyzeEntitySentimentResponse = _reflection.GeneratedProtocolMessageType( "AnalyzeEntitySentimentResponse", (_message.Message,), - dict( - DESCRIPTOR=_ANALYZEENTITYSENTIMENTRESPONSE, - __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""The entity-level sentiment analysis response message. + { + "DESCRIPTOR": _ANALYZEENTITYSENTIMENTRESPONSE, + "__module__": "google.cloud.language_v1.proto.language_service_pb2", + "__doc__": """The entity-level sentiment analysis response message. + Attributes: entities: The recognized entities in the input document with associated @@ -3234,17 +4266,18 @@ e.cloud.language.v1.Document.language] field for more details. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1.AnalyzeEntitySentimentResponse) - ), + }, ) _sym_db.RegisterMessage(AnalyzeEntitySentimentResponse) AnalyzeEntitiesRequest = _reflection.GeneratedProtocolMessageType( "AnalyzeEntitiesRequest", (_message.Message,), - dict( - DESCRIPTOR=_ANALYZEENTITIESREQUEST, - __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""The entity analysis request message. + { + "DESCRIPTOR": _ANALYZEENTITIESREQUEST, + "__module__": "google.cloud.language_v1.proto.language_service_pb2", + "__doc__": """The entity analysis request message. + Attributes: document: Input document. @@ -3252,17 +4285,18 @@ The encoding type used by the API to calculate offsets. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1.AnalyzeEntitiesRequest) - ), + }, ) _sym_db.RegisterMessage(AnalyzeEntitiesRequest) AnalyzeEntitiesResponse = _reflection.GeneratedProtocolMessageType( "AnalyzeEntitiesResponse", (_message.Message,), - dict( - DESCRIPTOR=_ANALYZEENTITIESRESPONSE, - __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""The entity analysis response message. + { + "DESCRIPTOR": _ANALYZEENTITIESRESPONSE, + "__module__": "google.cloud.language_v1.proto.language_service_pb2", + "__doc__": """The entity analysis response message. + Attributes: entities: The recognized entities in the input document. @@ -3273,17 +4307,18 @@ e.cloud.language.v1.Document.language] field for more details. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1.AnalyzeEntitiesResponse) - ), + }, ) _sym_db.RegisterMessage(AnalyzeEntitiesResponse) AnalyzeSyntaxRequest = _reflection.GeneratedProtocolMessageType( "AnalyzeSyntaxRequest", (_message.Message,), - dict( - DESCRIPTOR=_ANALYZESYNTAXREQUEST, - __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""The syntax analysis request message. + { + "DESCRIPTOR": _ANALYZESYNTAXREQUEST, + "__module__": "google.cloud.language_v1.proto.language_service_pb2", + "__doc__": """The syntax analysis request message. + Attributes: document: Input document. @@ -3291,17 +4326,18 @@ The encoding type used by the API to calculate offsets. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1.AnalyzeSyntaxRequest) - ), + }, ) _sym_db.RegisterMessage(AnalyzeSyntaxRequest) AnalyzeSyntaxResponse = _reflection.GeneratedProtocolMessageType( "AnalyzeSyntaxResponse", (_message.Message,), - dict( - DESCRIPTOR=_ANALYZESYNTAXRESPONSE, - __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""The syntax analysis response message. + { + "DESCRIPTOR": _ANALYZESYNTAXRESPONSE, + "__module__": "google.cloud.language_v1.proto.language_service_pb2", + "__doc__": """The syntax analysis response message. + Attributes: sentences: Sentences in the input document. @@ -3315,55 +4351,58 @@ e.cloud.language.v1.Document.language] field for more details. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1.AnalyzeSyntaxResponse) - ), + }, ) _sym_db.RegisterMessage(AnalyzeSyntaxResponse) ClassifyTextRequest = _reflection.GeneratedProtocolMessageType( "ClassifyTextRequest", (_message.Message,), - dict( - DESCRIPTOR=_CLASSIFYTEXTREQUEST, - __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""The document classification request message. + { + "DESCRIPTOR": _CLASSIFYTEXTREQUEST, + "__module__": "google.cloud.language_v1.proto.language_service_pb2", + "__doc__": """The document classification request message. + Attributes: document: Input document. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1.ClassifyTextRequest) - ), + }, ) _sym_db.RegisterMessage(ClassifyTextRequest) ClassifyTextResponse = _reflection.GeneratedProtocolMessageType( "ClassifyTextResponse", (_message.Message,), - dict( - DESCRIPTOR=_CLASSIFYTEXTRESPONSE, - __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""The document classification response message. + { + "DESCRIPTOR": _CLASSIFYTEXTRESPONSE, + "__module__": "google.cloud.language_v1.proto.language_service_pb2", + "__doc__": """The document classification response message. + Attributes: categories: Categories representing the input document. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1.ClassifyTextResponse) - ), + }, ) _sym_db.RegisterMessage(ClassifyTextResponse) AnnotateTextRequest = _reflection.GeneratedProtocolMessageType( "AnnotateTextRequest", (_message.Message,), - dict( - Features=_reflection.GeneratedProtocolMessageType( + { + "Features": _reflection.GeneratedProtocolMessageType( "Features", (_message.Message,), - dict( - DESCRIPTOR=_ANNOTATETEXTREQUEST_FEATURES, - __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""All available features for sentiment, syntax, and semantic analysis. + { + "DESCRIPTOR": _ANNOTATETEXTREQUEST_FEATURES, + "__module__": "google.cloud.language_v1.proto.language_service_pb2", + "__doc__": """All available features for sentiment, syntax, and semantic analysis. Setting each one to true will enable that specific analysis for the input. + Attributes: extract_syntax: Extract syntax information. @@ -3377,12 +4416,13 @@ Classify the full document into categories. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1.AnnotateTextRequest.Features) - ), + }, ), - DESCRIPTOR=_ANNOTATETEXTREQUEST, - __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""The request message for the text annotation API, which can perform + "DESCRIPTOR": _ANNOTATETEXTREQUEST, + "__module__": "google.cloud.language_v1.proto.language_service_pb2", + "__doc__": """The request message for the text annotation API, which can perform multiple analysis types (sentiment, entities, and syntax) in one call. + Attributes: document: Input document. @@ -3392,7 +4432,7 @@ The encoding type used by the API to calculate offsets. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1.AnnotateTextRequest) - ), + }, ) _sym_db.RegisterMessage(AnnotateTextRequest) _sym_db.RegisterMessage(AnnotateTextRequest.Features) @@ -3400,30 +4440,31 @@ AnnotateTextResponse = _reflection.GeneratedProtocolMessageType( "AnnotateTextResponse", (_message.Message,), - dict( - DESCRIPTOR=_ANNOTATETEXTRESPONSE, - __module__="google.cloud.language_v1.proto.language_service_pb2", - __doc__="""The text annotations response message. + { + "DESCRIPTOR": _ANNOTATETEXTRESPONSE, + "__module__": "google.cloud.language_v1.proto.language_service_pb2", + "__doc__": """The text annotations response message. + Attributes: sentences: Sentences in the input document. Populated if the user enables - [AnnotateTextRequest.Features.extract\_syntax][google.cloud.la - nguage.v1.AnnotateTextRequest.Features.extract\_syntax]. + [AnnotateTextRequest.Features.extract_syntax][google.cloud.lan + guage.v1.AnnotateTextRequest.Features.extract_syntax]. tokens: Tokens, along with their syntactic information, in the input document. Populated if the user enables [AnnotateTextRequest.F - eatures.extract\_syntax][google.cloud.language.v1.AnnotateText - Request.Features.extract\_syntax]. + eatures.extract_syntax][google.cloud.language.v1.AnnotateTextR + equest.Features.extract_syntax]. entities: Entities, along with their semantic information, in the input document. Populated if the user enables [AnnotateTextRequest.F - eatures.extract\_entities][google.cloud.language.v1.AnnotateTe - xtRequest.Features.extract\_entities]. + eatures.extract_entities][google.cloud.language.v1.AnnotateTex + tRequest.Features.extract_entities]. document_sentiment: The overall sentiment for the document. Populated if the user - enables [AnnotateTextRequest.Features.extract\_document\_senti - ment][google.cloud.language.v1.AnnotateTextRequest.Features.ex - tract\_document\_sentiment]. + enables [AnnotateTextRequest.Features.extract_document_sentime + nt][google.cloud.language.v1.AnnotateTextRequest.Features.extr + act_document_sentiment]. language: The language of the text, which will be the same as the language specified in the request or, if not specified, the @@ -3433,7 +4474,7 @@ Categories identified in the input document. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1.AnnotateTextResponse) - ), + }, ) _sym_db.RegisterMessage(AnnotateTextResponse) @@ -3453,9 +4494,8 @@ full_name="google.cloud.language.v1.LanguageService", file=DESCRIPTOR, index=0, - serialized_options=_b( - "\312A\027language.googleapis.com\322A]https://www.googleapis.com/auth/cloud-language,https://www.googleapis.com/auth/cloud-platform" - ), + serialized_options=b"\312A\027language.googleapis.com\322A]https://www.googleapis.com/auth/cloud-language,https://www.googleapis.com/auth/cloud-platform", + create_key=_descriptor._internal_create_key, serialized_start=6801, serialized_end=8129, methods=[ @@ -3466,9 +4506,8 @@ containing_service=None, input_type=_ANALYZESENTIMENTREQUEST, output_type=_ANALYZESENTIMENTRESPONSE, - serialized_options=_b( - '\202\323\344\223\002#"\036/v1/documents:analyzeSentiment:\001*\332A\026document,encoding_type\332A\010document' - ), + serialized_options=b'\202\323\344\223\002#"\036/v1/documents:analyzeSentiment:\001*\332A\026document,encoding_type\332A\010document', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="AnalyzeEntities", @@ -3477,9 +4516,8 @@ containing_service=None, input_type=_ANALYZEENTITIESREQUEST, output_type=_ANALYZEENTITIESRESPONSE, - serialized_options=_b( - '\202\323\344\223\002""\035/v1/documents:analyzeEntities:\001*\332A\026document,encoding_type\332A\010document' - ), + serialized_options=b'\202\323\344\223\002""\035/v1/documents:analyzeEntities:\001*\332A\026document,encoding_type\332A\010document', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="AnalyzeEntitySentiment", @@ -3488,9 +4526,8 @@ containing_service=None, input_type=_ANALYZEENTITYSENTIMENTREQUEST, output_type=_ANALYZEENTITYSENTIMENTRESPONSE, - serialized_options=_b( - '\202\323\344\223\002)"$/v1/documents:analyzeEntitySentiment:\001*\332A\026document,encoding_type\332A\010document' - ), + serialized_options=b'\202\323\344\223\002)"$/v1/documents:analyzeEntitySentiment:\001*\332A\026document,encoding_type\332A\010document', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="AnalyzeSyntax", @@ -3499,9 +4536,8 @@ containing_service=None, input_type=_ANALYZESYNTAXREQUEST, output_type=_ANALYZESYNTAXRESPONSE, - serialized_options=_b( - '\202\323\344\223\002 "\033/v1/documents:analyzeSyntax:\001*\332A\026document,encoding_type\332A\010document' - ), + serialized_options=b'\202\323\344\223\002 "\033/v1/documents:analyzeSyntax:\001*\332A\026document,encoding_type\332A\010document', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ClassifyText", @@ -3510,9 +4546,8 @@ containing_service=None, input_type=_CLASSIFYTEXTREQUEST, output_type=_CLASSIFYTEXTRESPONSE, - serialized_options=_b( - '\202\323\344\223\002\037"\032/v1/documents:classifyText:\001*\332A\010document' - ), + serialized_options=b'\202\323\344\223\002\037"\032/v1/documents:classifyText:\001*\332A\010document', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="AnnotateText", @@ -3521,9 +4556,8 @@ containing_service=None, input_type=_ANNOTATETEXTREQUEST, output_type=_ANNOTATETEXTRESPONSE, - serialized_options=_b( - '\202\323\344\223\002\037"\032/v1/documents:annotateText:\001*\332A\037document,features,encoding_type\332A\021document,features' - ), + serialized_options=b'\202\323\344\223\002\037"\032/v1/documents:annotateText:\001*\332A\037document,features,encoding_type\332A\021document,features', + create_key=_descriptor._internal_create_key, ), ], ) diff --git a/google/cloud/language_v1beta2/gapic/enums.py b/google/cloud/language_v1beta2/gapic/enums.py index 6ed586f4..f6a7be9e 100644 --- a/google/cloud/language_v1beta2/gapic/enums.py +++ b/google/cloud/language_v1beta2/gapic/enums.py @@ -21,24 +21,24 @@ class EncodingType(enum.IntEnum): """ - Represents the text encoding that the caller uses to process the output. - Providing an ``EncodingType`` is recommended because the API provides - the beginning offsets for various outputs, such as tokens and mentions, - and languages that natively use different text encodings may access - offsets differently. + Represents the text encoding that the caller uses to process the + output. Providing an ``EncodingType`` is recommended because the API + provides the beginning offsets for various outputs, such as tokens and + mentions, and languages that natively use different text encodings may + access offsets differently. Attributes: NONE (int): If ``EncodingType`` is not specified, encoding-dependent information (such as ``begin_offset``) will be set at ``-1``. - UTF8 (int): Encoding-dependent information (such as ``begin_offset``) is calculated - based on the UTF-8 encoding of the input. C++ and Go are examples of - languages that use this encoding natively. - UTF16 (int): Encoding-dependent information (such as ``begin_offset``) is calculated - based on the UTF-16 encoding of the input. Java and JavaScript are + UTF8 (int): Encoding-dependent information (such as ``begin_offset``) is + calculated based on the UTF-8 encoding of the input. C++ and Go are examples of languages that use this encoding natively. - UTF32 (int): Encoding-dependent information (such as ``begin_offset``) is calculated - based on the UTF-32 encoding of the input. Python is an example of a - language that uses this encoding natively. + UTF16 (int): Encoding-dependent information (such as ``begin_offset``) is + calculated based on the UTF-16 encoding of the input. Java and + JavaScript are examples of languages that use this encoding natively. + UTF32 (int): Encoding-dependent information (such as ``begin_offset``) is + calculated based on the UTF-32 encoding of the input. Python is an + example of a language that uses this encoding natively. """ NONE = 0 @@ -242,8 +242,8 @@ class Type(enum.IntEnum): class Entity(object): class Type(enum.IntEnum): """ - The type of the entity. For most entity types, the associated metadata - is a Wikipedia URL (``wikipedia_url``) and Knowledge Graph MID + The type of the entity. For most entity types, the associated + metadata is a Wikipedia URL (``wikipedia_url``) and Knowledge Graph MID (``mid``). The table below lists the associated fields for entities that have different metadata. diff --git a/google/cloud/language_v1beta2/gapic/language_service_client.py b/google/cloud/language_v1beta2/gapic/language_service_client.py index 34beaaec..8d3f9557 100644 --- a/google/cloud/language_v1beta2/gapic/language_service_client.py +++ b/google/cloud/language_v1beta2/gapic/language_service_client.py @@ -325,8 +325,8 @@ def analyze_entity_sentiment( metadata=None, ): """ - Finds entities, similar to ``AnalyzeEntities`` in the text and analyzes - sentiment associated with each entity and its mentions. + Finds entities, similar to ``AnalyzeEntities`` in the text and + analyzes sentiment associated with each entity and its mentions. Example: >>> from google.cloud import language_v1beta2 diff --git a/google/cloud/language_v1beta2/gapic/transports/language_service_grpc_transport.py b/google/cloud/language_v1beta2/gapic/transports/language_service_grpc_transport.py index a4f42bdf..1fd3fba2 100644 --- a/google/cloud/language_v1beta2/gapic/transports/language_service_grpc_transport.py +++ b/google/cloud/language_v1beta2/gapic/transports/language_service_grpc_transport.py @@ -144,8 +144,8 @@ def analyze_entities(self): def analyze_entity_sentiment(self): """Return the gRPC stub for :meth:`LanguageServiceClient.analyze_entity_sentiment`. - Finds entities, similar to ``AnalyzeEntities`` in the text and analyzes - sentiment associated with each entity and its mentions. + Finds entities, similar to ``AnalyzeEntities`` in the text and + analyzes sentiment associated with each entity and its mentions. Returns: Callable: A callable which accepts the appropriate diff --git a/google/cloud/language_v1beta2/proto/language_service_pb2.py b/google/cloud/language_v1beta2/proto/language_service_pb2.py index 7dacee38..ff31f8e6 100644 --- a/google/cloud/language_v1beta2/proto/language_service_pb2.py +++ b/google/cloud/language_v1beta2/proto/language_service_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/language_v1beta2/proto/language_service.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message @@ -26,12 +23,9 @@ name="google/cloud/language_v1beta2/proto/language_service.proto", package="google.cloud.language.v1beta2", syntax="proto3", - serialized_options=_b( - "\n!com.google.cloud.language.v1beta2B\024LanguageServiceProtoP\001ZEgoogle.golang.org/genproto/googleapis/cloud/language/v1beta2;language" - ), - serialized_pb=_b( - '\n:google/cloud/language_v1beta2/proto/language_service.proto\x12\x1dgoogle.cloud.language.v1beta2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xc8\x01\n\x08\x44ocument\x12:\n\x04type\x18\x01 \x01(\x0e\x32,.google.cloud.language.v1beta2.Document.Type\x12\x11\n\x07\x63ontent\x18\x02 \x01(\tH\x00\x12\x19\n\x0fgcs_content_uri\x18\x03 \x01(\tH\x00\x12\x10\n\x08language\x18\x04 \x01(\t"6\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nPLAIN_TEXT\x10\x01\x12\x08\n\x04HTML\x10\x02\x42\x08\n\x06source"~\n\x08Sentence\x12\x35\n\x04text\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.TextSpan\x12;\n\tsentiment\x18\x02 \x01(\x0b\x32(.google.cloud.language.v1beta2.Sentiment"\x93\x04\n\x06\x45ntity\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x38\n\x04type\x18\x02 \x01(\x0e\x32*.google.cloud.language.v1beta2.Entity.Type\x12\x45\n\x08metadata\x18\x03 \x03(\x0b\x32\x33.google.cloud.language.v1beta2.Entity.MetadataEntry\x12\x10\n\x08salience\x18\x04 \x01(\x02\x12>\n\x08mentions\x18\x05 \x03(\x0b\x32,.google.cloud.language.v1beta2.EntityMention\x12;\n\tsentiment\x18\x06 \x01(\x0b\x32(.google.cloud.language.v1beta2.Sentiment\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\xb9\x01\n\x04Type\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06PERSON\x10\x01\x12\x0c\n\x08LOCATION\x10\x02\x12\x10\n\x0cORGANIZATION\x10\x03\x12\t\n\x05\x45VENT\x10\x04\x12\x0f\n\x0bWORK_OF_ART\x10\x05\x12\x11\n\rCONSUMER_GOOD\x10\x06\x12\t\n\x05OTHER\x10\x07\x12\x10\n\x0cPHONE_NUMBER\x10\t\x12\x0b\n\x07\x41\x44\x44RESS\x10\n\x12\x08\n\x04\x44\x41TE\x10\x0b\x12\n\n\x06NUMBER\x10\x0c\x12\t\n\x05PRICE\x10\r"\xda\x01\n\x05Token\x12\x35\n\x04text\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.TextSpan\x12\x43\n\x0epart_of_speech\x18\x02 \x01(\x0b\x32+.google.cloud.language.v1beta2.PartOfSpeech\x12\x46\n\x0f\x64\x65pendency_edge\x18\x03 \x01(\x0b\x32-.google.cloud.language.v1beta2.DependencyEdge\x12\r\n\x05lemma\x18\x04 \x01(\t"-\n\tSentiment\x12\x11\n\tmagnitude\x18\x02 \x01(\x02\x12\r\n\x05score\x18\x03 \x01(\x02"\xdf\x10\n\x0cPartOfSpeech\x12<\n\x03tag\x18\x01 \x01(\x0e\x32/.google.cloud.language.v1beta2.PartOfSpeech.Tag\x12\x42\n\x06\x61spect\x18\x02 \x01(\x0e\x32\x32.google.cloud.language.v1beta2.PartOfSpeech.Aspect\x12>\n\x04\x63\x61se\x18\x03 \x01(\x0e\x32\x30.google.cloud.language.v1beta2.PartOfSpeech.Case\x12>\n\x04\x66orm\x18\x04 \x01(\x0e\x32\x30.google.cloud.language.v1beta2.PartOfSpeech.Form\x12\x42\n\x06gender\x18\x05 \x01(\x0e\x32\x32.google.cloud.language.v1beta2.PartOfSpeech.Gender\x12>\n\x04mood\x18\x06 \x01(\x0e\x32\x30.google.cloud.language.v1beta2.PartOfSpeech.Mood\x12\x42\n\x06number\x18\x07 \x01(\x0e\x32\x32.google.cloud.language.v1beta2.PartOfSpeech.Number\x12\x42\n\x06person\x18\x08 \x01(\x0e\x32\x32.google.cloud.language.v1beta2.PartOfSpeech.Person\x12\x42\n\x06proper\x18\t \x01(\x0e\x32\x32.google.cloud.language.v1beta2.PartOfSpeech.Proper\x12L\n\x0breciprocity\x18\n \x01(\x0e\x32\x37.google.cloud.language.v1beta2.PartOfSpeech.Reciprocity\x12@\n\x05tense\x18\x0b \x01(\x0e\x32\x31.google.cloud.language.v1beta2.PartOfSpeech.Tense\x12@\n\x05voice\x18\x0c \x01(\x0e\x32\x31.google.cloud.language.v1beta2.PartOfSpeech.Voice"\x8d\x01\n\x03Tag\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x07\n\x03\x41\x44J\x10\x01\x12\x07\n\x03\x41\x44P\x10\x02\x12\x07\n\x03\x41\x44V\x10\x03\x12\x08\n\x04\x43ONJ\x10\x04\x12\x07\n\x03\x44\x45T\x10\x05\x12\x08\n\x04NOUN\x10\x06\x12\x07\n\x03NUM\x10\x07\x12\x08\n\x04PRON\x10\x08\x12\x07\n\x03PRT\x10\t\x12\t\n\x05PUNCT\x10\n\x12\x08\n\x04VERB\x10\x0b\x12\x05\n\x01X\x10\x0c\x12\t\n\x05\x41\x46\x46IX\x10\r"O\n\x06\x41spect\x12\x12\n\x0e\x41SPECT_UNKNOWN\x10\x00\x12\x0e\n\nPERFECTIVE\x10\x01\x12\x10\n\x0cIMPERFECTIVE\x10\x02\x12\x0f\n\x0bPROGRESSIVE\x10\x03"\xf8\x01\n\x04\x43\x61se\x12\x10\n\x0c\x43\x41SE_UNKNOWN\x10\x00\x12\x0e\n\nACCUSATIVE\x10\x01\x12\r\n\tADVERBIAL\x10\x02\x12\x11\n\rCOMPLEMENTIVE\x10\x03\x12\n\n\x06\x44\x41TIVE\x10\x04\x12\x0c\n\x08GENITIVE\x10\x05\x12\x10\n\x0cINSTRUMENTAL\x10\x06\x12\x0c\n\x08LOCATIVE\x10\x07\x12\x0e\n\nNOMINATIVE\x10\x08\x12\x0b\n\x07OBLIQUE\x10\t\x12\r\n\tPARTITIVE\x10\n\x12\x11\n\rPREPOSITIONAL\x10\x0b\x12\x12\n\x0eREFLEXIVE_CASE\x10\x0c\x12\x11\n\rRELATIVE_CASE\x10\r\x12\x0c\n\x08VOCATIVE\x10\x0e"\xaf\x01\n\x04\x46orm\x12\x10\n\x0c\x46ORM_UNKNOWN\x10\x00\x12\x0c\n\x08\x41\x44NOMIAL\x10\x01\x12\r\n\tAUXILIARY\x10\x02\x12\x12\n\x0e\x43OMPLEMENTIZER\x10\x03\x12\x10\n\x0c\x46INAL_ENDING\x10\x04\x12\n\n\x06GERUND\x10\x05\x12\n\n\x06REALIS\x10\x06\x12\x0c\n\x08IRREALIS\x10\x07\x12\t\n\x05SHORT\x10\x08\x12\x08\n\x04LONG\x10\t\x12\t\n\x05ORDER\x10\n\x12\x0c\n\x08SPECIFIC\x10\x0b"E\n\x06Gender\x12\x12\n\x0eGENDER_UNKNOWN\x10\x00\x12\x0c\n\x08\x46\x45MININE\x10\x01\x12\r\n\tMASCULINE\x10\x02\x12\n\n\x06NEUTER\x10\x03"\x7f\n\x04Mood\x12\x10\n\x0cMOOD_UNKNOWN\x10\x00\x12\x14\n\x10\x43ONDITIONAL_MOOD\x10\x01\x12\x0e\n\nIMPERATIVE\x10\x02\x12\x0e\n\nINDICATIVE\x10\x03\x12\x11\n\rINTERROGATIVE\x10\x04\x12\x0b\n\x07JUSSIVE\x10\x05\x12\x0f\n\x0bSUBJUNCTIVE\x10\x06"@\n\x06Number\x12\x12\n\x0eNUMBER_UNKNOWN\x10\x00\x12\x0c\n\x08SINGULAR\x10\x01\x12\n\n\x06PLURAL\x10\x02\x12\x08\n\x04\x44UAL\x10\x03"T\n\x06Person\x12\x12\n\x0ePERSON_UNKNOWN\x10\x00\x12\t\n\x05\x46IRST\x10\x01\x12\n\n\x06SECOND\x10\x02\x12\t\n\x05THIRD\x10\x03\x12\x14\n\x10REFLEXIVE_PERSON\x10\x04"8\n\x06Proper\x12\x12\n\x0ePROPER_UNKNOWN\x10\x00\x12\n\n\x06PROPER\x10\x01\x12\x0e\n\nNOT_PROPER\x10\x02"J\n\x0bReciprocity\x12\x17\n\x13RECIPROCITY_UNKNOWN\x10\x00\x12\x0e\n\nRECIPROCAL\x10\x01\x12\x12\n\x0eNON_RECIPROCAL\x10\x02"s\n\x05Tense\x12\x11\n\rTENSE_UNKNOWN\x10\x00\x12\x15\n\x11\x43ONDITIONAL_TENSE\x10\x01\x12\n\n\x06\x46UTURE\x10\x02\x12\x08\n\x04PAST\x10\x03\x12\x0b\n\x07PRESENT\x10\x04\x12\r\n\tIMPERFECT\x10\x05\x12\x0e\n\nPLUPERFECT\x10\x06"B\n\x05Voice\x12\x11\n\rVOICE_UNKNOWN\x10\x00\x12\n\n\x06\x41\x43TIVE\x10\x01\x12\r\n\tCAUSATIVE\x10\x02\x12\x0b\n\x07PASSIVE\x10\x03"\x9a\x08\n\x0e\x44\x65pendencyEdge\x12\x18\n\x10head_token_index\x18\x01 \x01(\x05\x12\x42\n\x05label\x18\x02 \x01(\x0e\x32\x33.google.cloud.language.v1beta2.DependencyEdge.Label"\xa9\x07\n\x05Label\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06\x41\x42\x42REV\x10\x01\x12\t\n\x05\x41\x43OMP\x10\x02\x12\t\n\x05\x41\x44VCL\x10\x03\x12\n\n\x06\x41\x44VMOD\x10\x04\x12\x08\n\x04\x41MOD\x10\x05\x12\t\n\x05\x41PPOS\x10\x06\x12\x08\n\x04\x41TTR\x10\x07\x12\x07\n\x03\x41UX\x10\x08\x12\x0b\n\x07\x41UXPASS\x10\t\x12\x06\n\x02\x43\x43\x10\n\x12\t\n\x05\x43\x43OMP\x10\x0b\x12\x08\n\x04\x43ONJ\x10\x0c\x12\t\n\x05\x43SUBJ\x10\r\x12\r\n\tCSUBJPASS\x10\x0e\x12\x07\n\x03\x44\x45P\x10\x0f\x12\x07\n\x03\x44\x45T\x10\x10\x12\r\n\tDISCOURSE\x10\x11\x12\x08\n\x04\x44OBJ\x10\x12\x12\x08\n\x04\x45XPL\x10\x13\x12\x0c\n\x08GOESWITH\x10\x14\x12\x08\n\x04IOBJ\x10\x15\x12\x08\n\x04MARK\x10\x16\x12\x07\n\x03MWE\x10\x17\x12\x07\n\x03MWV\x10\x18\x12\x07\n\x03NEG\x10\x19\x12\x06\n\x02NN\x10\x1a\x12\x0c\n\x08NPADVMOD\x10\x1b\x12\t\n\x05NSUBJ\x10\x1c\x12\r\n\tNSUBJPASS\x10\x1d\x12\x07\n\x03NUM\x10\x1e\x12\n\n\x06NUMBER\x10\x1f\x12\x05\n\x01P\x10 \x12\r\n\tPARATAXIS\x10!\x12\x0b\n\x07PARTMOD\x10"\x12\t\n\x05PCOMP\x10#\x12\x08\n\x04POBJ\x10$\x12\x08\n\x04POSS\x10%\x12\x0b\n\x07POSTNEG\x10&\x12\x0b\n\x07PRECOMP\x10\'\x12\x0b\n\x07PRECONJ\x10(\x12\n\n\x06PREDET\x10)\x12\x08\n\x04PREF\x10*\x12\x08\n\x04PREP\x10+\x12\t\n\x05PRONL\x10,\x12\x07\n\x03PRT\x10-\x12\x06\n\x02PS\x10.\x12\x0c\n\x08QUANTMOD\x10/\x12\t\n\x05RCMOD\x10\x30\x12\x0c\n\x08RCMODREL\x10\x31\x12\t\n\x05RDROP\x10\x32\x12\x07\n\x03REF\x10\x33\x12\x0b\n\x07REMNANT\x10\x34\x12\x0e\n\nREPARANDUM\x10\x35\x12\x08\n\x04ROOT\x10\x36\x12\x08\n\x04SNUM\x10\x37\x12\x08\n\x04SUFF\x10\x38\x12\x08\n\x04TMOD\x10\x39\x12\t\n\x05TOPIC\x10:\x12\x08\n\x04VMOD\x10;\x12\x0c\n\x08VOCATIVE\x10<\x12\t\n\x05XCOMP\x10=\x12\n\n\x06SUFFIX\x10>\x12\t\n\x05TITLE\x10?\x12\x0c\n\x08\x41\x44VPHMOD\x10@\x12\x0b\n\x07\x41UXCAUS\x10\x41\x12\t\n\x05\x41UXVV\x10\x42\x12\t\n\x05\x44TMOD\x10\x43\x12\x0b\n\x07\x46OREIGN\x10\x44\x12\x06\n\x02KW\x10\x45\x12\x08\n\x04LIST\x10\x46\x12\x08\n\x04NOMC\x10G\x12\x0c\n\x08NOMCSUBJ\x10H\x12\x10\n\x0cNOMCSUBJPASS\x10I\x12\x08\n\x04NUMC\x10J\x12\x07\n\x03\x43OP\x10K\x12\x0e\n\nDISLOCATED\x10L\x12\x07\n\x03\x41SP\x10M\x12\x08\n\x04GMOD\x10N\x12\x08\n\x04GOBJ\x10O\x12\n\n\x06INFMOD\x10P\x12\x07\n\x03MES\x10Q\x12\t\n\x05NCOMP\x10R"\xf6\x01\n\rEntityMention\x12\x35\n\x04text\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.TextSpan\x12?\n\x04type\x18\x02 \x01(\x0e\x32\x31.google.cloud.language.v1beta2.EntityMention.Type\x12;\n\tsentiment\x18\x03 \x01(\x0b\x32(.google.cloud.language.v1beta2.Sentiment"0\n\x04Type\x12\x10\n\x0cTYPE_UNKNOWN\x10\x00\x12\n\n\x06PROPER\x10\x01\x12\n\n\x06\x43OMMON\x10\x02"1\n\x08TextSpan\x12\x0f\n\x07\x63ontent\x18\x01 \x01(\t\x12\x14\n\x0c\x62\x65gin_offset\x18\x02 \x01(\x05":\n\x16\x43lassificationCategory\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02"\x9d\x01\n\x17\x41nalyzeSentimentRequest\x12>\n\x08\x64ocument\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.DocumentB\x03\xe0\x41\x02\x12\x42\n\rencoding_type\x18\x02 \x01(\x0e\x32+.google.cloud.language.v1beta2.EncodingType"\xae\x01\n\x18\x41nalyzeSentimentResponse\x12\x44\n\x12\x64ocument_sentiment\x18\x01 \x01(\x0b\x32(.google.cloud.language.v1beta2.Sentiment\x12\x10\n\x08language\x18\x02 \x01(\t\x12:\n\tsentences\x18\x03 \x03(\x0b\x32\'.google.cloud.language.v1beta2.Sentence"\xa3\x01\n\x1d\x41nalyzeEntitySentimentRequest\x12>\n\x08\x64ocument\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.DocumentB\x03\xe0\x41\x02\x12\x42\n\rencoding_type\x18\x02 \x01(\x0e\x32+.google.cloud.language.v1beta2.EncodingType"k\n\x1e\x41nalyzeEntitySentimentResponse\x12\x37\n\x08\x65ntities\x18\x01 \x03(\x0b\x32%.google.cloud.language.v1beta2.Entity\x12\x10\n\x08language\x18\x02 \x01(\t"\x9c\x01\n\x16\x41nalyzeEntitiesRequest\x12>\n\x08\x64ocument\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.DocumentB\x03\xe0\x41\x02\x12\x42\n\rencoding_type\x18\x02 \x01(\x0e\x32+.google.cloud.language.v1beta2.EncodingType"d\n\x17\x41nalyzeEntitiesResponse\x12\x37\n\x08\x65ntities\x18\x01 \x03(\x0b\x32%.google.cloud.language.v1beta2.Entity\x12\x10\n\x08language\x18\x02 \x01(\t"\x9a\x01\n\x14\x41nalyzeSyntaxRequest\x12>\n\x08\x64ocument\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.DocumentB\x03\xe0\x41\x02\x12\x42\n\rencoding_type\x18\x02 \x01(\x0e\x32+.google.cloud.language.v1beta2.EncodingType"\x9b\x01\n\x15\x41nalyzeSyntaxResponse\x12:\n\tsentences\x18\x01 \x03(\x0b\x32\'.google.cloud.language.v1beta2.Sentence\x12\x34\n\x06tokens\x18\x02 \x03(\x0b\x32$.google.cloud.language.v1beta2.Token\x12\x10\n\x08language\x18\x03 \x01(\t"U\n\x13\x43lassifyTextRequest\x12>\n\x08\x64ocument\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.DocumentB\x03\xe0\x41\x02"a\n\x14\x43lassifyTextResponse\x12I\n\ncategories\x18\x01 \x03(\x0b\x32\x35.google.cloud.language.v1beta2.ClassificationCategory"\x89\x03\n\x13\x41nnotateTextRequest\x12>\n\x08\x64ocument\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.DocumentB\x03\xe0\x41\x02\x12R\n\x08\x66\x65\x61tures\x18\x02 \x01(\x0b\x32;.google.cloud.language.v1beta2.AnnotateTextRequest.FeaturesB\x03\xe0\x41\x02\x12\x42\n\rencoding_type\x18\x03 \x01(\x0e\x32+.google.cloud.language.v1beta2.EncodingType\x1a\x99\x01\n\x08\x46\x65\x61tures\x12\x16\n\x0e\x65xtract_syntax\x18\x01 \x01(\x08\x12\x18\n\x10\x65xtract_entities\x18\x02 \x01(\x08\x12"\n\x1a\x65xtract_document_sentiment\x18\x03 \x01(\x08\x12 \n\x18\x65xtract_entity_sentiment\x18\x04 \x01(\x08\x12\x15\n\rclassify_text\x18\x06 \x01(\x08"\xe4\x02\n\x14\x41nnotateTextResponse\x12:\n\tsentences\x18\x01 \x03(\x0b\x32\'.google.cloud.language.v1beta2.Sentence\x12\x34\n\x06tokens\x18\x02 \x03(\x0b\x32$.google.cloud.language.v1beta2.Token\x12\x37\n\x08\x65ntities\x18\x03 \x03(\x0b\x32%.google.cloud.language.v1beta2.Entity\x12\x44\n\x12\x64ocument_sentiment\x18\x04 \x01(\x0b\x32(.google.cloud.language.v1beta2.Sentiment\x12\x10\n\x08language\x18\x05 \x01(\t\x12I\n\ncategories\x18\x06 \x03(\x0b\x32\x35.google.cloud.language.v1beta2.ClassificationCategory*8\n\x0c\x45ncodingType\x12\x08\n\x04NONE\x10\x00\x12\x08\n\x04UTF8\x10\x01\x12\t\n\x05UTF16\x10\x02\x12\t\n\x05UTF32\x10\x03\x32\x8a\x0b\n\x0fLanguageService\x12\xd7\x01\n\x10\x41nalyzeSentiment\x12\x36.google.cloud.language.v1beta2.AnalyzeSentimentRequest\x1a\x37.google.cloud.language.v1beta2.AnalyzeSentimentResponse"R\x82\xd3\xe4\x93\x02("#/v1beta2/documents:analyzeSentiment:\x01*\xda\x41\x16\x64ocument,encoding_type\xda\x41\x08\x64ocument\x12\xd3\x01\n\x0f\x41nalyzeEntities\x12\x35.google.cloud.language.v1beta2.AnalyzeEntitiesRequest\x1a\x36.google.cloud.language.v1beta2.AnalyzeEntitiesResponse"Q\x82\xd3\xe4\x93\x02\'""/v1beta2/documents:analyzeEntities:\x01*\xda\x41\x16\x64ocument,encoding_type\xda\x41\x08\x64ocument\x12\xef\x01\n\x16\x41nalyzeEntitySentiment\x12<.google.cloud.language.v1beta2.AnalyzeEntitySentimentRequest\x1a=.google.cloud.language.v1beta2.AnalyzeEntitySentimentResponse"X\x82\xd3\xe4\x93\x02.")/v1beta2/documents:analyzeEntitySentiment:\x01*\xda\x41\x16\x64ocument,encoding_type\xda\x41\x08\x64ocument\x12\xcb\x01\n\rAnalyzeSyntax\x12\x33.google.cloud.language.v1beta2.AnalyzeSyntaxRequest\x1a\x34.google.cloud.language.v1beta2.AnalyzeSyntaxResponse"O\x82\xd3\xe4\x93\x02%" /v1beta2/documents:analyzeSyntax:\x01*\xda\x41\x16\x64ocument,encoding_type\xda\x41\x08\x64ocument\x12\xae\x01\n\x0c\x43lassifyText\x12\x32.google.cloud.language.v1beta2.ClassifyTextRequest\x1a\x33.google.cloud.language.v1beta2.ClassifyTextResponse"5\x82\xd3\xe4\x93\x02$"\x1f/v1beta2/documents:classifyText:\x01*\xda\x41\x08\x64ocument\x12\xd9\x01\n\x0c\x41nnotateText\x12\x32.google.cloud.language.v1beta2.AnnotateTextRequest\x1a\x33.google.cloud.language.v1beta2.AnnotateTextResponse"`\x82\xd3\xe4\x93\x02$"\x1f/v1beta2/documents:annotateText:\x01*\xda\x41\x1f\x64ocument,features,encoding_type\xda\x41\x11\x64ocument,features\x1az\xca\x41\x17language.googleapis.com\xd2\x41]https://www.googleapis.com/auth/cloud-language,https://www.googleapis.com/auth/cloud-platformB\x82\x01\n!com.google.cloud.language.v1beta2B\x14LanguageServiceProtoP\x01ZEgoogle.golang.org/genproto/googleapis/cloud/language/v1beta2;languageb\x06proto3' - ), + serialized_options=b"\n!com.google.cloud.language.v1beta2B\024LanguageServiceProtoP\001ZEgoogle.golang.org/genproto/googleapis/cloud/language/v1beta2;language", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n:google/cloud/language_v1beta2/proto/language_service.proto\x12\x1dgoogle.cloud.language.v1beta2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xc8\x01\n\x08\x44ocument\x12:\n\x04type\x18\x01 \x01(\x0e\x32,.google.cloud.language.v1beta2.Document.Type\x12\x11\n\x07\x63ontent\x18\x02 \x01(\tH\x00\x12\x19\n\x0fgcs_content_uri\x18\x03 \x01(\tH\x00\x12\x10\n\x08language\x18\x04 \x01(\t"6\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nPLAIN_TEXT\x10\x01\x12\x08\n\x04HTML\x10\x02\x42\x08\n\x06source"~\n\x08Sentence\x12\x35\n\x04text\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.TextSpan\x12;\n\tsentiment\x18\x02 \x01(\x0b\x32(.google.cloud.language.v1beta2.Sentiment"\x93\x04\n\x06\x45ntity\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x38\n\x04type\x18\x02 \x01(\x0e\x32*.google.cloud.language.v1beta2.Entity.Type\x12\x45\n\x08metadata\x18\x03 \x03(\x0b\x32\x33.google.cloud.language.v1beta2.Entity.MetadataEntry\x12\x10\n\x08salience\x18\x04 \x01(\x02\x12>\n\x08mentions\x18\x05 \x03(\x0b\x32,.google.cloud.language.v1beta2.EntityMention\x12;\n\tsentiment\x18\x06 \x01(\x0b\x32(.google.cloud.language.v1beta2.Sentiment\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\xb9\x01\n\x04Type\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06PERSON\x10\x01\x12\x0c\n\x08LOCATION\x10\x02\x12\x10\n\x0cORGANIZATION\x10\x03\x12\t\n\x05\x45VENT\x10\x04\x12\x0f\n\x0bWORK_OF_ART\x10\x05\x12\x11\n\rCONSUMER_GOOD\x10\x06\x12\t\n\x05OTHER\x10\x07\x12\x10\n\x0cPHONE_NUMBER\x10\t\x12\x0b\n\x07\x41\x44\x44RESS\x10\n\x12\x08\n\x04\x44\x41TE\x10\x0b\x12\n\n\x06NUMBER\x10\x0c\x12\t\n\x05PRICE\x10\r"\xda\x01\n\x05Token\x12\x35\n\x04text\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.TextSpan\x12\x43\n\x0epart_of_speech\x18\x02 \x01(\x0b\x32+.google.cloud.language.v1beta2.PartOfSpeech\x12\x46\n\x0f\x64\x65pendency_edge\x18\x03 \x01(\x0b\x32-.google.cloud.language.v1beta2.DependencyEdge\x12\r\n\x05lemma\x18\x04 \x01(\t"-\n\tSentiment\x12\x11\n\tmagnitude\x18\x02 \x01(\x02\x12\r\n\x05score\x18\x03 \x01(\x02"\xdf\x10\n\x0cPartOfSpeech\x12<\n\x03tag\x18\x01 \x01(\x0e\x32/.google.cloud.language.v1beta2.PartOfSpeech.Tag\x12\x42\n\x06\x61spect\x18\x02 \x01(\x0e\x32\x32.google.cloud.language.v1beta2.PartOfSpeech.Aspect\x12>\n\x04\x63\x61se\x18\x03 \x01(\x0e\x32\x30.google.cloud.language.v1beta2.PartOfSpeech.Case\x12>\n\x04\x66orm\x18\x04 \x01(\x0e\x32\x30.google.cloud.language.v1beta2.PartOfSpeech.Form\x12\x42\n\x06gender\x18\x05 \x01(\x0e\x32\x32.google.cloud.language.v1beta2.PartOfSpeech.Gender\x12>\n\x04mood\x18\x06 \x01(\x0e\x32\x30.google.cloud.language.v1beta2.PartOfSpeech.Mood\x12\x42\n\x06number\x18\x07 \x01(\x0e\x32\x32.google.cloud.language.v1beta2.PartOfSpeech.Number\x12\x42\n\x06person\x18\x08 \x01(\x0e\x32\x32.google.cloud.language.v1beta2.PartOfSpeech.Person\x12\x42\n\x06proper\x18\t \x01(\x0e\x32\x32.google.cloud.language.v1beta2.PartOfSpeech.Proper\x12L\n\x0breciprocity\x18\n \x01(\x0e\x32\x37.google.cloud.language.v1beta2.PartOfSpeech.Reciprocity\x12@\n\x05tense\x18\x0b \x01(\x0e\x32\x31.google.cloud.language.v1beta2.PartOfSpeech.Tense\x12@\n\x05voice\x18\x0c \x01(\x0e\x32\x31.google.cloud.language.v1beta2.PartOfSpeech.Voice"\x8d\x01\n\x03Tag\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x07\n\x03\x41\x44J\x10\x01\x12\x07\n\x03\x41\x44P\x10\x02\x12\x07\n\x03\x41\x44V\x10\x03\x12\x08\n\x04\x43ONJ\x10\x04\x12\x07\n\x03\x44\x45T\x10\x05\x12\x08\n\x04NOUN\x10\x06\x12\x07\n\x03NUM\x10\x07\x12\x08\n\x04PRON\x10\x08\x12\x07\n\x03PRT\x10\t\x12\t\n\x05PUNCT\x10\n\x12\x08\n\x04VERB\x10\x0b\x12\x05\n\x01X\x10\x0c\x12\t\n\x05\x41\x46\x46IX\x10\r"O\n\x06\x41spect\x12\x12\n\x0e\x41SPECT_UNKNOWN\x10\x00\x12\x0e\n\nPERFECTIVE\x10\x01\x12\x10\n\x0cIMPERFECTIVE\x10\x02\x12\x0f\n\x0bPROGRESSIVE\x10\x03"\xf8\x01\n\x04\x43\x61se\x12\x10\n\x0c\x43\x41SE_UNKNOWN\x10\x00\x12\x0e\n\nACCUSATIVE\x10\x01\x12\r\n\tADVERBIAL\x10\x02\x12\x11\n\rCOMPLEMENTIVE\x10\x03\x12\n\n\x06\x44\x41TIVE\x10\x04\x12\x0c\n\x08GENITIVE\x10\x05\x12\x10\n\x0cINSTRUMENTAL\x10\x06\x12\x0c\n\x08LOCATIVE\x10\x07\x12\x0e\n\nNOMINATIVE\x10\x08\x12\x0b\n\x07OBLIQUE\x10\t\x12\r\n\tPARTITIVE\x10\n\x12\x11\n\rPREPOSITIONAL\x10\x0b\x12\x12\n\x0eREFLEXIVE_CASE\x10\x0c\x12\x11\n\rRELATIVE_CASE\x10\r\x12\x0c\n\x08VOCATIVE\x10\x0e"\xaf\x01\n\x04\x46orm\x12\x10\n\x0c\x46ORM_UNKNOWN\x10\x00\x12\x0c\n\x08\x41\x44NOMIAL\x10\x01\x12\r\n\tAUXILIARY\x10\x02\x12\x12\n\x0e\x43OMPLEMENTIZER\x10\x03\x12\x10\n\x0c\x46INAL_ENDING\x10\x04\x12\n\n\x06GERUND\x10\x05\x12\n\n\x06REALIS\x10\x06\x12\x0c\n\x08IRREALIS\x10\x07\x12\t\n\x05SHORT\x10\x08\x12\x08\n\x04LONG\x10\t\x12\t\n\x05ORDER\x10\n\x12\x0c\n\x08SPECIFIC\x10\x0b"E\n\x06Gender\x12\x12\n\x0eGENDER_UNKNOWN\x10\x00\x12\x0c\n\x08\x46\x45MININE\x10\x01\x12\r\n\tMASCULINE\x10\x02\x12\n\n\x06NEUTER\x10\x03"\x7f\n\x04Mood\x12\x10\n\x0cMOOD_UNKNOWN\x10\x00\x12\x14\n\x10\x43ONDITIONAL_MOOD\x10\x01\x12\x0e\n\nIMPERATIVE\x10\x02\x12\x0e\n\nINDICATIVE\x10\x03\x12\x11\n\rINTERROGATIVE\x10\x04\x12\x0b\n\x07JUSSIVE\x10\x05\x12\x0f\n\x0bSUBJUNCTIVE\x10\x06"@\n\x06Number\x12\x12\n\x0eNUMBER_UNKNOWN\x10\x00\x12\x0c\n\x08SINGULAR\x10\x01\x12\n\n\x06PLURAL\x10\x02\x12\x08\n\x04\x44UAL\x10\x03"T\n\x06Person\x12\x12\n\x0ePERSON_UNKNOWN\x10\x00\x12\t\n\x05\x46IRST\x10\x01\x12\n\n\x06SECOND\x10\x02\x12\t\n\x05THIRD\x10\x03\x12\x14\n\x10REFLEXIVE_PERSON\x10\x04"8\n\x06Proper\x12\x12\n\x0ePROPER_UNKNOWN\x10\x00\x12\n\n\x06PROPER\x10\x01\x12\x0e\n\nNOT_PROPER\x10\x02"J\n\x0bReciprocity\x12\x17\n\x13RECIPROCITY_UNKNOWN\x10\x00\x12\x0e\n\nRECIPROCAL\x10\x01\x12\x12\n\x0eNON_RECIPROCAL\x10\x02"s\n\x05Tense\x12\x11\n\rTENSE_UNKNOWN\x10\x00\x12\x15\n\x11\x43ONDITIONAL_TENSE\x10\x01\x12\n\n\x06\x46UTURE\x10\x02\x12\x08\n\x04PAST\x10\x03\x12\x0b\n\x07PRESENT\x10\x04\x12\r\n\tIMPERFECT\x10\x05\x12\x0e\n\nPLUPERFECT\x10\x06"B\n\x05Voice\x12\x11\n\rVOICE_UNKNOWN\x10\x00\x12\n\n\x06\x41\x43TIVE\x10\x01\x12\r\n\tCAUSATIVE\x10\x02\x12\x0b\n\x07PASSIVE\x10\x03"\x9a\x08\n\x0e\x44\x65pendencyEdge\x12\x18\n\x10head_token_index\x18\x01 \x01(\x05\x12\x42\n\x05label\x18\x02 \x01(\x0e\x32\x33.google.cloud.language.v1beta2.DependencyEdge.Label"\xa9\x07\n\x05Label\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06\x41\x42\x42REV\x10\x01\x12\t\n\x05\x41\x43OMP\x10\x02\x12\t\n\x05\x41\x44VCL\x10\x03\x12\n\n\x06\x41\x44VMOD\x10\x04\x12\x08\n\x04\x41MOD\x10\x05\x12\t\n\x05\x41PPOS\x10\x06\x12\x08\n\x04\x41TTR\x10\x07\x12\x07\n\x03\x41UX\x10\x08\x12\x0b\n\x07\x41UXPASS\x10\t\x12\x06\n\x02\x43\x43\x10\n\x12\t\n\x05\x43\x43OMP\x10\x0b\x12\x08\n\x04\x43ONJ\x10\x0c\x12\t\n\x05\x43SUBJ\x10\r\x12\r\n\tCSUBJPASS\x10\x0e\x12\x07\n\x03\x44\x45P\x10\x0f\x12\x07\n\x03\x44\x45T\x10\x10\x12\r\n\tDISCOURSE\x10\x11\x12\x08\n\x04\x44OBJ\x10\x12\x12\x08\n\x04\x45XPL\x10\x13\x12\x0c\n\x08GOESWITH\x10\x14\x12\x08\n\x04IOBJ\x10\x15\x12\x08\n\x04MARK\x10\x16\x12\x07\n\x03MWE\x10\x17\x12\x07\n\x03MWV\x10\x18\x12\x07\n\x03NEG\x10\x19\x12\x06\n\x02NN\x10\x1a\x12\x0c\n\x08NPADVMOD\x10\x1b\x12\t\n\x05NSUBJ\x10\x1c\x12\r\n\tNSUBJPASS\x10\x1d\x12\x07\n\x03NUM\x10\x1e\x12\n\n\x06NUMBER\x10\x1f\x12\x05\n\x01P\x10 \x12\r\n\tPARATAXIS\x10!\x12\x0b\n\x07PARTMOD\x10"\x12\t\n\x05PCOMP\x10#\x12\x08\n\x04POBJ\x10$\x12\x08\n\x04POSS\x10%\x12\x0b\n\x07POSTNEG\x10&\x12\x0b\n\x07PRECOMP\x10\'\x12\x0b\n\x07PRECONJ\x10(\x12\n\n\x06PREDET\x10)\x12\x08\n\x04PREF\x10*\x12\x08\n\x04PREP\x10+\x12\t\n\x05PRONL\x10,\x12\x07\n\x03PRT\x10-\x12\x06\n\x02PS\x10.\x12\x0c\n\x08QUANTMOD\x10/\x12\t\n\x05RCMOD\x10\x30\x12\x0c\n\x08RCMODREL\x10\x31\x12\t\n\x05RDROP\x10\x32\x12\x07\n\x03REF\x10\x33\x12\x0b\n\x07REMNANT\x10\x34\x12\x0e\n\nREPARANDUM\x10\x35\x12\x08\n\x04ROOT\x10\x36\x12\x08\n\x04SNUM\x10\x37\x12\x08\n\x04SUFF\x10\x38\x12\x08\n\x04TMOD\x10\x39\x12\t\n\x05TOPIC\x10:\x12\x08\n\x04VMOD\x10;\x12\x0c\n\x08VOCATIVE\x10<\x12\t\n\x05XCOMP\x10=\x12\n\n\x06SUFFIX\x10>\x12\t\n\x05TITLE\x10?\x12\x0c\n\x08\x41\x44VPHMOD\x10@\x12\x0b\n\x07\x41UXCAUS\x10\x41\x12\t\n\x05\x41UXVV\x10\x42\x12\t\n\x05\x44TMOD\x10\x43\x12\x0b\n\x07\x46OREIGN\x10\x44\x12\x06\n\x02KW\x10\x45\x12\x08\n\x04LIST\x10\x46\x12\x08\n\x04NOMC\x10G\x12\x0c\n\x08NOMCSUBJ\x10H\x12\x10\n\x0cNOMCSUBJPASS\x10I\x12\x08\n\x04NUMC\x10J\x12\x07\n\x03\x43OP\x10K\x12\x0e\n\nDISLOCATED\x10L\x12\x07\n\x03\x41SP\x10M\x12\x08\n\x04GMOD\x10N\x12\x08\n\x04GOBJ\x10O\x12\n\n\x06INFMOD\x10P\x12\x07\n\x03MES\x10Q\x12\t\n\x05NCOMP\x10R"\xf6\x01\n\rEntityMention\x12\x35\n\x04text\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.TextSpan\x12?\n\x04type\x18\x02 \x01(\x0e\x32\x31.google.cloud.language.v1beta2.EntityMention.Type\x12;\n\tsentiment\x18\x03 \x01(\x0b\x32(.google.cloud.language.v1beta2.Sentiment"0\n\x04Type\x12\x10\n\x0cTYPE_UNKNOWN\x10\x00\x12\n\n\x06PROPER\x10\x01\x12\n\n\x06\x43OMMON\x10\x02"1\n\x08TextSpan\x12\x0f\n\x07\x63ontent\x18\x01 \x01(\t\x12\x14\n\x0c\x62\x65gin_offset\x18\x02 \x01(\x05":\n\x16\x43lassificationCategory\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02"\x9d\x01\n\x17\x41nalyzeSentimentRequest\x12>\n\x08\x64ocument\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.DocumentB\x03\xe0\x41\x02\x12\x42\n\rencoding_type\x18\x02 \x01(\x0e\x32+.google.cloud.language.v1beta2.EncodingType"\xae\x01\n\x18\x41nalyzeSentimentResponse\x12\x44\n\x12\x64ocument_sentiment\x18\x01 \x01(\x0b\x32(.google.cloud.language.v1beta2.Sentiment\x12\x10\n\x08language\x18\x02 \x01(\t\x12:\n\tsentences\x18\x03 \x03(\x0b\x32\'.google.cloud.language.v1beta2.Sentence"\xa3\x01\n\x1d\x41nalyzeEntitySentimentRequest\x12>\n\x08\x64ocument\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.DocumentB\x03\xe0\x41\x02\x12\x42\n\rencoding_type\x18\x02 \x01(\x0e\x32+.google.cloud.language.v1beta2.EncodingType"k\n\x1e\x41nalyzeEntitySentimentResponse\x12\x37\n\x08\x65ntities\x18\x01 \x03(\x0b\x32%.google.cloud.language.v1beta2.Entity\x12\x10\n\x08language\x18\x02 \x01(\t"\x9c\x01\n\x16\x41nalyzeEntitiesRequest\x12>\n\x08\x64ocument\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.DocumentB\x03\xe0\x41\x02\x12\x42\n\rencoding_type\x18\x02 \x01(\x0e\x32+.google.cloud.language.v1beta2.EncodingType"d\n\x17\x41nalyzeEntitiesResponse\x12\x37\n\x08\x65ntities\x18\x01 \x03(\x0b\x32%.google.cloud.language.v1beta2.Entity\x12\x10\n\x08language\x18\x02 \x01(\t"\x9a\x01\n\x14\x41nalyzeSyntaxRequest\x12>\n\x08\x64ocument\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.DocumentB\x03\xe0\x41\x02\x12\x42\n\rencoding_type\x18\x02 \x01(\x0e\x32+.google.cloud.language.v1beta2.EncodingType"\x9b\x01\n\x15\x41nalyzeSyntaxResponse\x12:\n\tsentences\x18\x01 \x03(\x0b\x32\'.google.cloud.language.v1beta2.Sentence\x12\x34\n\x06tokens\x18\x02 \x03(\x0b\x32$.google.cloud.language.v1beta2.Token\x12\x10\n\x08language\x18\x03 \x01(\t"U\n\x13\x43lassifyTextRequest\x12>\n\x08\x64ocument\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.DocumentB\x03\xe0\x41\x02"a\n\x14\x43lassifyTextResponse\x12I\n\ncategories\x18\x01 \x03(\x0b\x32\x35.google.cloud.language.v1beta2.ClassificationCategory"\x89\x03\n\x13\x41nnotateTextRequest\x12>\n\x08\x64ocument\x18\x01 \x01(\x0b\x32\'.google.cloud.language.v1beta2.DocumentB\x03\xe0\x41\x02\x12R\n\x08\x66\x65\x61tures\x18\x02 \x01(\x0b\x32;.google.cloud.language.v1beta2.AnnotateTextRequest.FeaturesB\x03\xe0\x41\x02\x12\x42\n\rencoding_type\x18\x03 \x01(\x0e\x32+.google.cloud.language.v1beta2.EncodingType\x1a\x99\x01\n\x08\x46\x65\x61tures\x12\x16\n\x0e\x65xtract_syntax\x18\x01 \x01(\x08\x12\x18\n\x10\x65xtract_entities\x18\x02 \x01(\x08\x12"\n\x1a\x65xtract_document_sentiment\x18\x03 \x01(\x08\x12 \n\x18\x65xtract_entity_sentiment\x18\x04 \x01(\x08\x12\x15\n\rclassify_text\x18\x06 \x01(\x08"\xe4\x02\n\x14\x41nnotateTextResponse\x12:\n\tsentences\x18\x01 \x03(\x0b\x32\'.google.cloud.language.v1beta2.Sentence\x12\x34\n\x06tokens\x18\x02 \x03(\x0b\x32$.google.cloud.language.v1beta2.Token\x12\x37\n\x08\x65ntities\x18\x03 \x03(\x0b\x32%.google.cloud.language.v1beta2.Entity\x12\x44\n\x12\x64ocument_sentiment\x18\x04 \x01(\x0b\x32(.google.cloud.language.v1beta2.Sentiment\x12\x10\n\x08language\x18\x05 \x01(\t\x12I\n\ncategories\x18\x06 \x03(\x0b\x32\x35.google.cloud.language.v1beta2.ClassificationCategory*8\n\x0c\x45ncodingType\x12\x08\n\x04NONE\x10\x00\x12\x08\n\x04UTF8\x10\x01\x12\t\n\x05UTF16\x10\x02\x12\t\n\x05UTF32\x10\x03\x32\x8a\x0b\n\x0fLanguageService\x12\xd7\x01\n\x10\x41nalyzeSentiment\x12\x36.google.cloud.language.v1beta2.AnalyzeSentimentRequest\x1a\x37.google.cloud.language.v1beta2.AnalyzeSentimentResponse"R\x82\xd3\xe4\x93\x02("#/v1beta2/documents:analyzeSentiment:\x01*\xda\x41\x16\x64ocument,encoding_type\xda\x41\x08\x64ocument\x12\xd3\x01\n\x0f\x41nalyzeEntities\x12\x35.google.cloud.language.v1beta2.AnalyzeEntitiesRequest\x1a\x36.google.cloud.language.v1beta2.AnalyzeEntitiesResponse"Q\x82\xd3\xe4\x93\x02\'""/v1beta2/documents:analyzeEntities:\x01*\xda\x41\x16\x64ocument,encoding_type\xda\x41\x08\x64ocument\x12\xef\x01\n\x16\x41nalyzeEntitySentiment\x12<.google.cloud.language.v1beta2.AnalyzeEntitySentimentRequest\x1a=.google.cloud.language.v1beta2.AnalyzeEntitySentimentResponse"X\x82\xd3\xe4\x93\x02.")/v1beta2/documents:analyzeEntitySentiment:\x01*\xda\x41\x16\x64ocument,encoding_type\xda\x41\x08\x64ocument\x12\xcb\x01\n\rAnalyzeSyntax\x12\x33.google.cloud.language.v1beta2.AnalyzeSyntaxRequest\x1a\x34.google.cloud.language.v1beta2.AnalyzeSyntaxResponse"O\x82\xd3\xe4\x93\x02%" /v1beta2/documents:analyzeSyntax:\x01*\xda\x41\x16\x64ocument,encoding_type\xda\x41\x08\x64ocument\x12\xae\x01\n\x0c\x43lassifyText\x12\x32.google.cloud.language.v1beta2.ClassifyTextRequest\x1a\x33.google.cloud.language.v1beta2.ClassifyTextResponse"5\x82\xd3\xe4\x93\x02$"\x1f/v1beta2/documents:classifyText:\x01*\xda\x41\x08\x64ocument\x12\xd9\x01\n\x0c\x41nnotateText\x12\x32.google.cloud.language.v1beta2.AnnotateTextRequest\x1a\x33.google.cloud.language.v1beta2.AnnotateTextResponse"`\x82\xd3\xe4\x93\x02$"\x1f/v1beta2/documents:annotateText:\x01*\xda\x41\x1f\x64ocument,features,encoding_type\xda\x41\x11\x64ocument,features\x1az\xca\x41\x17language.googleapis.com\xd2\x41]https://www.googleapis.com/auth/cloud-language,https://www.googleapis.com/auth/cloud-platformB\x82\x01\n!com.google.cloud.language.v1beta2B\x14LanguageServiceProtoP\x01ZEgoogle.golang.org/genproto/googleapis/cloud/language/v1beta2;languageb\x06proto3', dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_api_dot_client__pb2.DESCRIPTOR, @@ -45,18 +39,39 @@ full_name="google.cloud.language.v1beta2.EncodingType", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name="NONE", index=0, number=0, serialized_options=None, type=None + name="NONE", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="UTF8", index=1, number=1, serialized_options=None, type=None + name="UTF8", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="UTF16", index=2, number=2, serialized_options=None, type=None + name="UTF16", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="UTF32", index=3, number=3, serialized_options=None, type=None + name="UTF32", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -78,6 +93,7 @@ full_name="google.cloud.language.v1beta2.Document.Type", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="TYPE_UNSPECIFIED", @@ -85,12 +101,23 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PLAIN_TEXT", index=1, number=1, serialized_options=None, type=None + name="PLAIN_TEXT", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="HTML", index=2, number=2, serialized_options=None, type=None + name="HTML", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -105,45 +132,111 @@ full_name="google.cloud.language.v1beta2.Entity.Type", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name="UNKNOWN", index=0, number=0, serialized_options=None, type=None + name="UNKNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PERSON", index=1, number=1, serialized_options=None, type=None + name="PERSON", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="LOCATION", index=2, number=2, serialized_options=None, type=None + name="LOCATION", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ORGANIZATION", index=3, number=3, serialized_options=None, type=None + name="ORGANIZATION", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="EVENT", index=4, number=4, serialized_options=None, type=None + name="EVENT", + index=4, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="WORK_OF_ART", index=5, number=5, serialized_options=None, type=None + name="WORK_OF_ART", + index=5, + number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="CONSUMER_GOOD", index=6, number=6, serialized_options=None, type=None + name="CONSUMER_GOOD", + index=6, + number=6, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="OTHER", index=7, number=7, serialized_options=None, type=None + name="OTHER", + index=7, + number=7, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PHONE_NUMBER", index=8, number=9, serialized_options=None, type=None + name="PHONE_NUMBER", + index=8, + number=9, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ADDRESS", index=9, number=10, serialized_options=None, type=None + name="ADDRESS", + index=9, + number=10, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="DATE", index=10, number=11, serialized_options=None, type=None + name="DATE", + index=10, + number=11, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NUMBER", index=11, number=12, serialized_options=None, type=None + name="NUMBER", + index=11, + number=12, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PRICE", index=12, number=13, serialized_options=None, type=None + name="PRICE", + index=12, + number=13, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -158,48 +251,119 @@ full_name="google.cloud.language.v1beta2.PartOfSpeech.Tag", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name="UNKNOWN", index=0, number=0, serialized_options=None, type=None + name="UNKNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ADJ", index=1, number=1, serialized_options=None, type=None + name="ADJ", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ADP", index=2, number=2, serialized_options=None, type=None + name="ADP", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ADV", index=3, number=3, serialized_options=None, type=None + name="ADV", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="CONJ", index=4, number=4, serialized_options=None, type=None + name="CONJ", + index=4, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="DET", index=5, number=5, serialized_options=None, type=None + name="DET", + index=5, + number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NOUN", index=6, number=6, serialized_options=None, type=None + name="NOUN", + index=6, + number=6, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NUM", index=7, number=7, serialized_options=None, type=None + name="NUM", + index=7, + number=7, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PRON", index=8, number=8, serialized_options=None, type=None + name="PRON", + index=8, + number=8, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PRT", index=9, number=9, serialized_options=None, type=None + name="PRT", + index=9, + number=9, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PUNCT", index=10, number=10, serialized_options=None, type=None + name="PUNCT", + index=10, + number=10, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="VERB", index=11, number=11, serialized_options=None, type=None + name="VERB", + index=11, + number=11, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="X", index=12, number=12, serialized_options=None, type=None + name="X", + index=12, + number=12, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="AFFIX", index=13, number=13, serialized_options=None, type=None + name="AFFIX", + index=13, + number=13, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -214,18 +378,39 @@ full_name="google.cloud.language.v1beta2.PartOfSpeech.Aspect", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name="ASPECT_UNKNOWN", index=0, number=0, serialized_options=None, type=None + name="ASPECT_UNKNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PERFECTIVE", index=1, number=1, serialized_options=None, type=None + name="PERFECTIVE", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="IMPERFECTIVE", index=2, number=2, serialized_options=None, type=None + name="IMPERFECTIVE", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PROGRESSIVE", index=3, number=3, serialized_options=None, type=None + name="PROGRESSIVE", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -240,39 +425,95 @@ full_name="google.cloud.language.v1beta2.PartOfSpeech.Case", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name="CASE_UNKNOWN", index=0, number=0, serialized_options=None, type=None + name="CASE_UNKNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ACCUSATIVE", index=1, number=1, serialized_options=None, type=None + name="ACCUSATIVE", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ADVERBIAL", index=2, number=2, serialized_options=None, type=None + name="ADVERBIAL", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="COMPLEMENTIVE", index=3, number=3, serialized_options=None, type=None + name="COMPLEMENTIVE", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="DATIVE", index=4, number=4, serialized_options=None, type=None + name="DATIVE", + index=4, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="GENITIVE", index=5, number=5, serialized_options=None, type=None + name="GENITIVE", + index=5, + number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="INSTRUMENTAL", index=6, number=6, serialized_options=None, type=None + name="INSTRUMENTAL", + index=6, + number=6, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="LOCATIVE", index=7, number=7, serialized_options=None, type=None + name="LOCATIVE", + index=7, + number=7, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NOMINATIVE", index=8, number=8, serialized_options=None, type=None + name="NOMINATIVE", + index=8, + number=8, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="OBLIQUE", index=9, number=9, serialized_options=None, type=None + name="OBLIQUE", + index=9, + number=9, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PARTITIVE", index=10, number=10, serialized_options=None, type=None + name="PARTITIVE", + index=10, + number=10, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="PREPOSITIONAL", @@ -280,6 +521,7 @@ number=11, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="REFLEXIVE_CASE", @@ -287,6 +529,7 @@ number=12, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="RELATIVE_CASE", @@ -294,9 +537,15 @@ number=13, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="VOCATIVE", index=14, number=14, serialized_options=None, type=None + name="VOCATIVE", + index=14, + number=14, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -311,42 +560,103 @@ full_name="google.cloud.language.v1beta2.PartOfSpeech.Form", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name="FORM_UNKNOWN", index=0, number=0, serialized_options=None, type=None + name="FORM_UNKNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ADNOMIAL", index=1, number=1, serialized_options=None, type=None + name="ADNOMIAL", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="AUXILIARY", index=2, number=2, serialized_options=None, type=None + name="AUXILIARY", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="COMPLEMENTIZER", index=3, number=3, serialized_options=None, type=None + name="COMPLEMENTIZER", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="FINAL_ENDING", index=4, number=4, serialized_options=None, type=None + name="FINAL_ENDING", + index=4, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="GERUND", index=5, number=5, serialized_options=None, type=None + name="GERUND", + index=5, + number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="REALIS", index=6, number=6, serialized_options=None, type=None + name="REALIS", + index=6, + number=6, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="IRREALIS", index=7, number=7, serialized_options=None, type=None + name="IRREALIS", + index=7, + number=7, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="SHORT", index=8, number=8, serialized_options=None, type=None + name="SHORT", + index=8, + number=8, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="LONG", index=9, number=9, serialized_options=None, type=None + name="LONG", + index=9, + number=9, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ORDER", index=10, number=10, serialized_options=None, type=None + name="ORDER", + index=10, + number=10, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="SPECIFIC", index=11, number=11, serialized_options=None, type=None + name="SPECIFIC", + index=11, + number=11, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -361,19 +671,40 @@ full_name="google.cloud.language.v1beta2.PartOfSpeech.Gender", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name="GENDER_UNKNOWN", index=0, number=0, serialized_options=None, type=None + name="GENDER_UNKNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="FEMININE", index=1, number=1, serialized_options=None, type=None + name="FEMININE", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="MASCULINE", index=2, number=2, serialized_options=None, type=None + name="MASCULINE", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NEUTER", index=3, number=3, serialized_options=None, type=None - ), + name="NEUTER", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), ], containing_type=None, serialized_options=None, @@ -387,9 +718,15 @@ full_name="google.cloud.language.v1beta2.PartOfSpeech.Mood", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name="MOOD_UNKNOWN", index=0, number=0, serialized_options=None, type=None + name="MOOD_UNKNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="CONDITIONAL_MOOD", @@ -397,21 +734,47 @@ number=1, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="IMPERATIVE", index=2, number=2, serialized_options=None, type=None + name="IMPERATIVE", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="INDICATIVE", index=3, number=3, serialized_options=None, type=None + name="INDICATIVE", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="INTERROGATIVE", index=4, number=4, serialized_options=None, type=None + name="INTERROGATIVE", + index=4, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="JUSSIVE", index=5, number=5, serialized_options=None, type=None + name="JUSSIVE", + index=5, + number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="SUBJUNCTIVE", index=6, number=6, serialized_options=None, type=None + name="SUBJUNCTIVE", + index=6, + number=6, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -426,18 +789,39 @@ full_name="google.cloud.language.v1beta2.PartOfSpeech.Number", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name="NUMBER_UNKNOWN", index=0, number=0, serialized_options=None, type=None + name="NUMBER_UNKNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="SINGULAR", index=1, number=1, serialized_options=None, type=None + name="SINGULAR", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PLURAL", index=2, number=2, serialized_options=None, type=None + name="PLURAL", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="DUAL", index=3, number=3, serialized_options=None, type=None + name="DUAL", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -452,18 +836,39 @@ full_name="google.cloud.language.v1beta2.PartOfSpeech.Person", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name="PERSON_UNKNOWN", index=0, number=0, serialized_options=None, type=None + name="PERSON_UNKNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="FIRST", index=1, number=1, serialized_options=None, type=None + name="FIRST", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="SECOND", index=2, number=2, serialized_options=None, type=None + name="SECOND", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="THIRD", index=3, number=3, serialized_options=None, type=None + name="THIRD", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="REFLEXIVE_PERSON", @@ -471,6 +876,7 @@ number=4, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -485,15 +891,31 @@ full_name="google.cloud.language.v1beta2.PartOfSpeech.Proper", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name="PROPER_UNKNOWN", index=0, number=0, serialized_options=None, type=None + name="PROPER_UNKNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PROPER", index=1, number=1, serialized_options=None, type=None + name="PROPER", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NOT_PROPER", index=2, number=2, serialized_options=None, type=None + name="NOT_PROPER", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -508,6 +930,7 @@ full_name="google.cloud.language.v1beta2.PartOfSpeech.Reciprocity", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="RECIPROCITY_UNKNOWN", @@ -515,12 +938,23 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="RECIPROCAL", index=1, number=1, serialized_options=None, type=None + name="RECIPROCAL", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NON_RECIPROCAL", index=2, number=2, serialized_options=None, type=None + name="NON_RECIPROCAL", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -535,9 +969,15 @@ full_name="google.cloud.language.v1beta2.PartOfSpeech.Tense", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name="TENSE_UNKNOWN", index=0, number=0, serialized_options=None, type=None + name="TENSE_UNKNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="CONDITIONAL_TENSE", @@ -545,21 +985,47 @@ number=1, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="FUTURE", index=2, number=2, serialized_options=None, type=None + name="FUTURE", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PAST", index=3, number=3, serialized_options=None, type=None + name="PAST", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PRESENT", index=4, number=4, serialized_options=None, type=None + name="PRESENT", + index=4, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="IMPERFECT", index=5, number=5, serialized_options=None, type=None + name="IMPERFECT", + index=5, + number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PLUPERFECT", index=6, number=6, serialized_options=None, type=None + name="PLUPERFECT", + index=6, + number=6, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -574,18 +1040,39 @@ full_name="google.cloud.language.v1beta2.PartOfSpeech.Voice", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name="VOICE_UNKNOWN", index=0, number=0, serialized_options=None, type=None + name="VOICE_UNKNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ACTIVE", index=1, number=1, serialized_options=None, type=None + name="ACTIVE", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="CAUSATIVE", index=2, number=2, serialized_options=None, type=None + name="CAUSATIVE", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PASSIVE", index=3, number=3, serialized_options=None, type=None + name="PASSIVE", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -600,255 +1087,671 @@ full_name="google.cloud.language.v1beta2.DependencyEdge.Label", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name="UNKNOWN", index=0, number=0, serialized_options=None, type=None + name="UNKNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ABBREV", index=1, number=1, serialized_options=None, type=None + name="ABBREV", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ACOMP", index=2, number=2, serialized_options=None, type=None + name="ACOMP", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ADVCL", index=3, number=3, serialized_options=None, type=None + name="ADVCL", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ADVMOD", index=4, number=4, serialized_options=None, type=None + name="ADVMOD", + index=4, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="AMOD", index=5, number=5, serialized_options=None, type=None + name="AMOD", + index=5, + number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="APPOS", index=6, number=6, serialized_options=None, type=None + name="APPOS", + index=6, + number=6, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ATTR", index=7, number=7, serialized_options=None, type=None + name="ATTR", + index=7, + number=7, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="AUX", index=8, number=8, serialized_options=None, type=None + name="AUX", + index=8, + number=8, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="AUXPASS", index=9, number=9, serialized_options=None, type=None + name="AUXPASS", + index=9, + number=9, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="CC", index=10, number=10, serialized_options=None, type=None + name="CC", + index=10, + number=10, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="CCOMP", index=11, number=11, serialized_options=None, type=None + name="CCOMP", + index=11, + number=11, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="CONJ", index=12, number=12, serialized_options=None, type=None + name="CONJ", + index=12, + number=12, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="CSUBJ", index=13, number=13, serialized_options=None, type=None + name="CSUBJ", + index=13, + number=13, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="CSUBJPASS", index=14, number=14, serialized_options=None, type=None + name="CSUBJPASS", + index=14, + number=14, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="DEP", index=15, number=15, serialized_options=None, type=None + name="DEP", + index=15, + number=15, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="DET", index=16, number=16, serialized_options=None, type=None + name="DET", + index=16, + number=16, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="DISCOURSE", index=17, number=17, serialized_options=None, type=None + name="DISCOURSE", + index=17, + number=17, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="DOBJ", index=18, number=18, serialized_options=None, type=None + name="DOBJ", + index=18, + number=18, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="EXPL", index=19, number=19, serialized_options=None, type=None + name="EXPL", + index=19, + number=19, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="GOESWITH", index=20, number=20, serialized_options=None, type=None + name="GOESWITH", + index=20, + number=20, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="IOBJ", index=21, number=21, serialized_options=None, type=None + name="IOBJ", + index=21, + number=21, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="MARK", index=22, number=22, serialized_options=None, type=None + name="MARK", + index=22, + number=22, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="MWE", index=23, number=23, serialized_options=None, type=None + name="MWE", + index=23, + number=23, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="MWV", index=24, number=24, serialized_options=None, type=None + name="MWV", + index=24, + number=24, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NEG", index=25, number=25, serialized_options=None, type=None + name="NEG", + index=25, + number=25, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NN", index=26, number=26, serialized_options=None, type=None + name="NN", + index=26, + number=26, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NPADVMOD", index=27, number=27, serialized_options=None, type=None + name="NPADVMOD", + index=27, + number=27, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NSUBJ", index=28, number=28, serialized_options=None, type=None + name="NSUBJ", + index=28, + number=28, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NSUBJPASS", index=29, number=29, serialized_options=None, type=None + name="NSUBJPASS", + index=29, + number=29, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NUM", index=30, number=30, serialized_options=None, type=None + name="NUM", + index=30, + number=30, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NUMBER", index=31, number=31, serialized_options=None, type=None + name="NUMBER", + index=31, + number=31, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="P", index=32, number=32, serialized_options=None, type=None + name="P", + index=32, + number=32, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PARATAXIS", index=33, number=33, serialized_options=None, type=None + name="PARATAXIS", + index=33, + number=33, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PARTMOD", index=34, number=34, serialized_options=None, type=None + name="PARTMOD", + index=34, + number=34, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PCOMP", index=35, number=35, serialized_options=None, type=None + name="PCOMP", + index=35, + number=35, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="POBJ", index=36, number=36, serialized_options=None, type=None + name="POBJ", + index=36, + number=36, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="POSS", index=37, number=37, serialized_options=None, type=None + name="POSS", + index=37, + number=37, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="POSTNEG", index=38, number=38, serialized_options=None, type=None + name="POSTNEG", + index=38, + number=38, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PRECOMP", index=39, number=39, serialized_options=None, type=None + name="PRECOMP", + index=39, + number=39, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PRECONJ", index=40, number=40, serialized_options=None, type=None + name="PRECONJ", + index=40, + number=40, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PREDET", index=41, number=41, serialized_options=None, type=None + name="PREDET", + index=41, + number=41, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PREF", index=42, number=42, serialized_options=None, type=None + name="PREF", + index=42, + number=42, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PREP", index=43, number=43, serialized_options=None, type=None + name="PREP", + index=43, + number=43, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PRONL", index=44, number=44, serialized_options=None, type=None + name="PRONL", + index=44, + number=44, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PRT", index=45, number=45, serialized_options=None, type=None + name="PRT", + index=45, + number=45, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PS", index=46, number=46, serialized_options=None, type=None + name="PS", + index=46, + number=46, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="QUANTMOD", index=47, number=47, serialized_options=None, type=None + name="QUANTMOD", + index=47, + number=47, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="RCMOD", index=48, number=48, serialized_options=None, type=None + name="RCMOD", + index=48, + number=48, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="RCMODREL", index=49, number=49, serialized_options=None, type=None + name="RCMODREL", + index=49, + number=49, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="RDROP", index=50, number=50, serialized_options=None, type=None + name="RDROP", + index=50, + number=50, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="REF", index=51, number=51, serialized_options=None, type=None + name="REF", + index=51, + number=51, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="REMNANT", index=52, number=52, serialized_options=None, type=None + name="REMNANT", + index=52, + number=52, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="REPARANDUM", index=53, number=53, serialized_options=None, type=None + name="REPARANDUM", + index=53, + number=53, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ROOT", index=54, number=54, serialized_options=None, type=None + name="ROOT", + index=54, + number=54, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="SNUM", index=55, number=55, serialized_options=None, type=None + name="SNUM", + index=55, + number=55, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="SUFF", index=56, number=56, serialized_options=None, type=None + name="SUFF", + index=56, + number=56, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="TMOD", index=57, number=57, serialized_options=None, type=None + name="TMOD", + index=57, + number=57, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="TOPIC", index=58, number=58, serialized_options=None, type=None + name="TOPIC", + index=58, + number=58, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="VMOD", index=59, number=59, serialized_options=None, type=None + name="VMOD", + index=59, + number=59, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="VOCATIVE", index=60, number=60, serialized_options=None, type=None + name="VOCATIVE", + index=60, + number=60, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="XCOMP", index=61, number=61, serialized_options=None, type=None + name="XCOMP", + index=61, + number=61, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="SUFFIX", index=62, number=62, serialized_options=None, type=None + name="SUFFIX", + index=62, + number=62, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="TITLE", index=63, number=63, serialized_options=None, type=None + name="TITLE", + index=63, + number=63, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ADVPHMOD", index=64, number=64, serialized_options=None, type=None + name="ADVPHMOD", + index=64, + number=64, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="AUXCAUS", index=65, number=65, serialized_options=None, type=None + name="AUXCAUS", + index=65, + number=65, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="AUXVV", index=66, number=66, serialized_options=None, type=None + name="AUXVV", + index=66, + number=66, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="DTMOD", index=67, number=67, serialized_options=None, type=None + name="DTMOD", + index=67, + number=67, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="FOREIGN", index=68, number=68, serialized_options=None, type=None + name="FOREIGN", + index=68, + number=68, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="KW", index=69, number=69, serialized_options=None, type=None + name="KW", + index=69, + number=69, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="LIST", index=70, number=70, serialized_options=None, type=None + name="LIST", + index=70, + number=70, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NOMC", index=71, number=71, serialized_options=None, type=None + name="NOMC", + index=71, + number=71, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NOMCSUBJ", index=72, number=72, serialized_options=None, type=None + name="NOMCSUBJ", + index=72, + number=72, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NOMCSUBJPASS", index=73, number=73, serialized_options=None, type=None + name="NOMCSUBJPASS", + index=73, + number=73, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NUMC", index=74, number=74, serialized_options=None, type=None + name="NUMC", + index=74, + number=74, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="COP", index=75, number=75, serialized_options=None, type=None + name="COP", + index=75, + number=75, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="DISLOCATED", index=76, number=76, serialized_options=None, type=None + name="DISLOCATED", + index=76, + number=76, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ASP", index=77, number=77, serialized_options=None, type=None + name="ASP", + index=77, + number=77, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="GMOD", index=78, number=78, serialized_options=None, type=None + name="GMOD", + index=78, + number=78, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="GOBJ", index=79, number=79, serialized_options=None, type=None + name="GOBJ", + index=79, + number=79, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="INFMOD", index=80, number=80, serialized_options=None, type=None + name="INFMOD", + index=80, + number=80, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="MES", index=81, number=81, serialized_options=None, type=None + name="MES", + index=81, + number=81, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NCOMP", index=82, number=82, serialized_options=None, type=None + name="NCOMP", + index=82, + number=82, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -863,15 +1766,31 @@ full_name="google.cloud.language.v1beta2.EntityMention.Type", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name="TYPE_UNKNOWN", index=0, number=0, serialized_options=None, type=None + name="TYPE_UNKNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PROPER", index=1, number=1, serialized_options=None, type=None + name="PROPER", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="COMMON", index=2, number=2, serialized_options=None, type=None + name="COMMON", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -888,6 +1807,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="type", @@ -906,6 +1826,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="content", @@ -916,7 +1837,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -924,6 +1845,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="gcs_content_uri", @@ -934,7 +1856,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -942,6 +1864,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="language", @@ -952,7 +1875,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -960,6 +1883,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -975,6 +1899,7 @@ full_name="google.cloud.language.v1beta2.Document.source", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ) ], @@ -989,6 +1914,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="text", @@ -1007,6 +1933,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="sentiment", @@ -1025,6 +1952,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1046,6 +1974,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="key", @@ -1056,7 +1985,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1064,6 +1993,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value", @@ -1074,7 +2004,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1082,12 +2012,13 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[], - serialized_options=_b("8\001"), + serialized_options=b"8\001", is_extendable=False, syntax="proto3", extension_ranges=[], @@ -1102,6 +2033,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -1112,7 +2044,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1120,6 +2052,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="type", @@ -1138,6 +2071,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="metadata", @@ -1156,6 +2090,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="salience", @@ -1174,6 +2109,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="mentions", @@ -1192,6 +2128,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="sentiment", @@ -1210,6 +2147,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1231,6 +2169,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="text", @@ -1249,6 +2188,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="part_of_speech", @@ -1267,6 +2207,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="dependency_edge", @@ -1285,6 +2226,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="lemma", @@ -1295,7 +2237,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1303,6 +2245,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1324,6 +2267,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="magnitude", @@ -1342,6 +2286,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="score", @@ -1360,6 +2305,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1381,6 +2327,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="tag", @@ -1399,6 +2346,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="aspect", @@ -1417,6 +2365,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="case", @@ -1435,6 +2384,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="form", @@ -1453,6 +2403,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="gender", @@ -1471,6 +2422,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="mood", @@ -1489,6 +2441,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="number", @@ -1507,6 +2460,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="person", @@ -1525,6 +2479,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="proper", @@ -1543,6 +2498,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="reciprocity", @@ -1561,6 +2517,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="tense", @@ -1579,6 +2536,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="voice", @@ -1597,6 +2555,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1631,6 +2590,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="head_token_index", @@ -1649,6 +2609,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="label", @@ -1667,6 +2628,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1688,6 +2650,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="text", @@ -1706,6 +2669,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="type", @@ -1724,6 +2688,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="sentiment", @@ -1742,6 +2707,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1763,6 +2729,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="content", @@ -1773,7 +2740,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1781,6 +2748,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="begin_offset", @@ -1799,6 +2767,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1820,6 +2789,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -1830,7 +2800,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1838,6 +2808,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence", @@ -1856,6 +2827,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1877,6 +2849,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="document", @@ -1893,8 +2866,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\002"), + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="encoding_type", @@ -1913,6 +2887,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1934,6 +2909,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="document_sentiment", @@ -1952,6 +2928,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="language", @@ -1962,7 +2939,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1970,6 +2947,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="sentences", @@ -1988,6 +2966,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2009,6 +2988,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="document", @@ -2025,8 +3005,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\002"), + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="encoding_type", @@ -2045,6 +3026,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2066,6 +3048,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="entities", @@ -2084,6 +3067,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="language", @@ -2094,7 +3078,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -2102,6 +3086,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2123,6 +3108,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="document", @@ -2139,8 +3125,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\002"), + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="encoding_type", @@ -2159,6 +3146,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2180,6 +3168,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="entities", @@ -2198,6 +3187,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="language", @@ -2208,7 +3198,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -2216,6 +3206,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2237,6 +3228,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="document", @@ -2253,8 +3245,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\002"), + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="encoding_type", @@ -2273,6 +3266,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2294,6 +3288,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="sentences", @@ -2312,6 +3307,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="tokens", @@ -2330,6 +3326,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="language", @@ -2340,7 +3337,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -2348,6 +3345,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2369,6 +3367,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="document", @@ -2385,8 +3384,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\002"), + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -2408,6 +3408,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="categories", @@ -2426,6 +3427,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -2447,6 +3449,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="extract_syntax", @@ -2465,6 +3468,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="extract_entities", @@ -2483,6 +3487,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="extract_document_sentiment", @@ -2501,6 +3506,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="extract_entity_sentiment", @@ -2519,6 +3525,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="classify_text", @@ -2537,6 +3544,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2557,6 +3565,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="document", @@ -2573,8 +3582,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\002"), + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="features", @@ -2591,8 +3601,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\002"), + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="encoding_type", @@ -2611,6 +3622,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2632,6 +3644,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="sentences", @@ -2650,6 +3663,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="tokens", @@ -2668,6 +3682,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="entities", @@ -2686,6 +3701,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="document_sentiment", @@ -2704,6 +3720,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="language", @@ -2714,7 +3731,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -2722,6 +3739,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="categories", @@ -2740,6 +3758,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2870,11 +3889,12 @@ Document = _reflection.GeneratedProtocolMessageType( "Document", (_message.Message,), - dict( - DESCRIPTOR=_DOCUMENT, - __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""################################################################ # + { + "DESCRIPTOR": _DOCUMENT, + "__module__": "google.cloud.language_v1beta2.proto.language_service_pb2", + "__doc__": """################################################################ # Represents the input to API methods. + Attributes: type: Required. If the type is not set or is ``TYPE_UNSPECIFIED``, @@ -2888,7 +3908,7 @@ gcs_content_uri: The Google Cloud Storage URI where the file content is located. This URI must be of the form: - gs://bucket\_name/object\_name. For more details, see + gs://bucket_name/object_name. For more details, see https://cloud.google.com/storage/docs/reference-uris. NOTE: Cloud Storage object versioning is not supported. language: @@ -2902,50 +3922,52 @@ ``INVALID_ARGUMENT`` error is returned. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1beta2.Document) - ), + }, ) _sym_db.RegisterMessage(Document) Sentence = _reflection.GeneratedProtocolMessageType( "Sentence", (_message.Message,), - dict( - DESCRIPTOR=_SENTENCE, - __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""Represents a sentence in the input document. + { + "DESCRIPTOR": _SENTENCE, + "__module__": "google.cloud.language_v1beta2.proto.language_service_pb2", + "__doc__": """Represents a sentence in the input document. + Attributes: text: The sentence text. sentiment: For calls to [AnalyzeSentiment][] or if [AnnotateTextRequest.F - eatures.extract\_document\_sentiment][google.cloud.language.v1 - beta2.AnnotateTextRequest.Features.extract\_document\_sentimen - t] is set to true, this field will contain the sentiment for - the sentence. + eatures.extract_document_sentiment][google.cloud.language.v1be + ta2.AnnotateTextRequest.Features.extract_document_sentiment] + is set to true, this field will contain the sentiment for the + sentence. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1beta2.Sentence) - ), + }, ) _sym_db.RegisterMessage(Sentence) Entity = _reflection.GeneratedProtocolMessageType( "Entity", (_message.Message,), - dict( - MetadataEntry=_reflection.GeneratedProtocolMessageType( + { + "MetadataEntry": _reflection.GeneratedProtocolMessageType( "MetadataEntry", (_message.Message,), - dict( - DESCRIPTOR=_ENTITY_METADATAENTRY, - __module__="google.cloud.language_v1beta2.proto.language_service_pb2" + { + "DESCRIPTOR": _ENTITY_METADATAENTRY, + "__module__": "google.cloud.language_v1beta2.proto.language_service_pb2" # @@protoc_insertion_point(class_scope:google.cloud.language.v1beta2.Entity.MetadataEntry) - ), + }, ), - DESCRIPTOR=_ENTITY, - __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""Represents a phrase in the text that is a known entity, such as a + "DESCRIPTOR": _ENTITY, + "__module__": "google.cloud.language_v1beta2.proto.language_service_pb2", + "__doc__": """Represents a phrase in the text that is a known entity, such as a person, an organization, or location. The API associates information, such as salience and mentions, with entities. + Attributes: name: The representative name for the entity. @@ -2968,13 +3990,13 @@ currently supports proper noun mentions. sentiment: For calls to [AnalyzeEntitySentiment][] or if [AnnotateTextReq - uest.Features.extract\_entity\_sentiment][google.cloud.languag - e.v1beta2.AnnotateTextRequest.Features.extract\_entity\_sentim - ent] is set to true, this field will contain the aggregate + uest.Features.extract_entity_sentiment][google.cloud.language. + v1beta2.AnnotateTextRequest.Features.extract_entity_sentiment] + is set to true, this field will contain the aggregate sentiment expressed for this entity in the provided document. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1beta2.Entity) - ), + }, ) _sym_db.RegisterMessage(Entity) _sym_db.RegisterMessage(Entity.MetadataEntry) @@ -2982,10 +4004,11 @@ Token = _reflection.GeneratedProtocolMessageType( "Token", (_message.Message,), - dict( - DESCRIPTOR=_TOKEN, - __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""Represents the smallest syntactic building block of the text. + { + "DESCRIPTOR": _TOKEN, + "__module__": "google.cloud.language_v1beta2.proto.language_service_pb2", + "__doc__": """Represents the smallest syntactic building block of the text. + Attributes: text: The token text. @@ -2999,18 +4022,19 @@ the token. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1beta2.Token) - ), + }, ) _sym_db.RegisterMessage(Token) Sentiment = _reflection.GeneratedProtocolMessageType( "Sentiment", (_message.Message,), - dict( - DESCRIPTOR=_SENTIMENT, - __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""Represents the feeling associated with the entire text or entities in + { + "DESCRIPTOR": _SENTIMENT, + "__module__": "google.cloud.language_v1beta2.proto.language_service_pb2", + "__doc__": """Represents the feeling associated with the entire text or entities in the text. Next ID: 6 + Attributes: magnitude: A non-negative number in the [0, +inf) range, which represents @@ -3021,17 +4045,18 @@ (positive sentiment). """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1beta2.Sentiment) - ), + }, ) _sym_db.RegisterMessage(Sentiment) PartOfSpeech = _reflection.GeneratedProtocolMessageType( "PartOfSpeech", (_message.Message,), - dict( - DESCRIPTOR=_PARTOFSPEECH, - __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""Represents part of speech information for a token. + { + "DESCRIPTOR": _PARTOFSPEECH, + "__module__": "google.cloud.language_v1beta2.proto.language_service_pb2", + "__doc__": """Represents part of speech information for a token. + Attributes: tag: The part of speech tag. @@ -3059,17 +4084,18 @@ The grammatical voice. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1beta2.PartOfSpeech) - ), + }, ) _sym_db.RegisterMessage(PartOfSpeech) DependencyEdge = _reflection.GeneratedProtocolMessageType( "DependencyEdge", (_message.Message,), - dict( - DESCRIPTOR=_DEPENDENCYEDGE, - __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""Represents dependency parse tree information for a token. + { + "DESCRIPTOR": _DEPENDENCYEDGE, + "__module__": "google.cloud.language_v1beta2.proto.language_service_pb2", + "__doc__": """Represents dependency parse tree information for a token. + Attributes: head_token_index: Represents the head of this token in the dependency tree. This @@ -3081,18 +4107,19 @@ The parse label for the token. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1beta2.DependencyEdge) - ), + }, ) _sym_db.RegisterMessage(DependencyEdge) EntityMention = _reflection.GeneratedProtocolMessageType( "EntityMention", (_message.Message,), - dict( - DESCRIPTOR=_ENTITYMENTION, - __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""Represents a mention for an entity in the text. Currently, proper noun + { + "DESCRIPTOR": _ENTITYMENTION, + "__module__": "google.cloud.language_v1beta2.proto.language_service_pb2", + "__doc__": """Represents a mention for an entity in the text. Currently, proper noun mentions are supported. + Attributes: text: The mention text. @@ -3100,24 +4127,25 @@ The type of the entity mention. sentiment: For calls to [AnalyzeEntitySentiment][] or if [AnnotateTextReq - uest.Features.extract\_entity\_sentiment][google.cloud.languag - e.v1beta2.AnnotateTextRequest.Features.extract\_entity\_sentim - ent] is set to true, this field will contain the sentiment + uest.Features.extract_entity_sentiment][google.cloud.language. + v1beta2.AnnotateTextRequest.Features.extract_entity_sentiment] + is set to true, this field will contain the sentiment expressed for this mention of the entity in the provided document. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1beta2.EntityMention) - ), + }, ) _sym_db.RegisterMessage(EntityMention) TextSpan = _reflection.GeneratedProtocolMessageType( "TextSpan", (_message.Message,), - dict( - DESCRIPTOR=_TEXTSPAN, - __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""Represents an output piece of text. + { + "DESCRIPTOR": _TEXTSPAN, + "__module__": "google.cloud.language_v1beta2.proto.language_service_pb2", + "__doc__": """Represents an output piece of text. + Attributes: content: The content of the output text. @@ -3128,39 +4156,41 @@ specified in the API request. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1beta2.TextSpan) - ), + }, ) _sym_db.RegisterMessage(TextSpan) ClassificationCategory = _reflection.GeneratedProtocolMessageType( "ClassificationCategory", (_message.Message,), - dict( - DESCRIPTOR=_CLASSIFICATIONCATEGORY, - __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""Represents a category returned from the text classifier. + { + "DESCRIPTOR": _CLASSIFICATIONCATEGORY, + "__module__": "google.cloud.language_v1beta2.proto.language_service_pb2", + "__doc__": """Represents a category returned from the text classifier. + Attributes: name: The name of the category representing the document, from the `predefined taxonomy `__. confidence: - The classifier's confidence of the category. Number represents + The classifier’s confidence of the category. Number represents how certain the classifier is that this category represents the given text. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1beta2.ClassificationCategory) - ), + }, ) _sym_db.RegisterMessage(ClassificationCategory) AnalyzeSentimentRequest = _reflection.GeneratedProtocolMessageType( "AnalyzeSentimentRequest", (_message.Message,), - dict( - DESCRIPTOR=_ANALYZESENTIMENTREQUEST, - __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""The sentiment analysis request message. + { + "DESCRIPTOR": _ANALYZESENTIMENTREQUEST, + "__module__": "google.cloud.language_v1beta2.proto.language_service_pb2", + "__doc__": """The sentiment analysis request message. + Attributes: document: Required. Input document. @@ -3169,17 +4199,18 @@ offsets for the sentence sentiment. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1beta2.AnalyzeSentimentRequest) - ), + }, ) _sym_db.RegisterMessage(AnalyzeSentimentRequest) AnalyzeSentimentResponse = _reflection.GeneratedProtocolMessageType( "AnalyzeSentimentResponse", (_message.Message,), - dict( - DESCRIPTOR=_ANALYZESENTIMENTRESPONSE, - __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""The sentiment analysis response message. + { + "DESCRIPTOR": _ANALYZESENTIMENTRESPONSE, + "__module__": "google.cloud.language_v1beta2.proto.language_service_pb2", + "__doc__": """The sentiment analysis response message. + Attributes: document_sentiment: The overall sentiment of the input document. @@ -3193,17 +4224,18 @@ The sentiment for all the sentences in the document. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1beta2.AnalyzeSentimentResponse) - ), + }, ) _sym_db.RegisterMessage(AnalyzeSentimentResponse) AnalyzeEntitySentimentRequest = _reflection.GeneratedProtocolMessageType( "AnalyzeEntitySentimentRequest", (_message.Message,), - dict( - DESCRIPTOR=_ANALYZEENTITYSENTIMENTREQUEST, - __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""The entity-level sentiment analysis request message. + { + "DESCRIPTOR": _ANALYZEENTITYSENTIMENTREQUEST, + "__module__": "google.cloud.language_v1beta2.proto.language_service_pb2", + "__doc__": """The entity-level sentiment analysis request message. + Attributes: document: Required. Input document. @@ -3211,17 +4243,18 @@ The encoding type used by the API to calculate offsets. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1beta2.AnalyzeEntitySentimentRequest) - ), + }, ) _sym_db.RegisterMessage(AnalyzeEntitySentimentRequest) AnalyzeEntitySentimentResponse = _reflection.GeneratedProtocolMessageType( "AnalyzeEntitySentimentResponse", (_message.Message,), - dict( - DESCRIPTOR=_ANALYZEENTITYSENTIMENTRESPONSE, - __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""The entity-level sentiment analysis response message. + { + "DESCRIPTOR": _ANALYZEENTITYSENTIMENTRESPONSE, + "__module__": "google.cloud.language_v1beta2.proto.language_service_pb2", + "__doc__": """The entity-level sentiment analysis response message. + Attributes: entities: The recognized entities in the input document with associated @@ -3234,17 +4267,18 @@ details. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1beta2.AnalyzeEntitySentimentResponse) - ), + }, ) _sym_db.RegisterMessage(AnalyzeEntitySentimentResponse) AnalyzeEntitiesRequest = _reflection.GeneratedProtocolMessageType( "AnalyzeEntitiesRequest", (_message.Message,), - dict( - DESCRIPTOR=_ANALYZEENTITIESREQUEST, - __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""The entity analysis request message. + { + "DESCRIPTOR": _ANALYZEENTITIESREQUEST, + "__module__": "google.cloud.language_v1beta2.proto.language_service_pb2", + "__doc__": """The entity analysis request message. + Attributes: document: Required. Input document. @@ -3252,17 +4286,18 @@ The encoding type used by the API to calculate offsets. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1beta2.AnalyzeEntitiesRequest) - ), + }, ) _sym_db.RegisterMessage(AnalyzeEntitiesRequest) AnalyzeEntitiesResponse = _reflection.GeneratedProtocolMessageType( "AnalyzeEntitiesResponse", (_message.Message,), - dict( - DESCRIPTOR=_ANALYZEENTITIESRESPONSE, - __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""The entity analysis response message. + { + "DESCRIPTOR": _ANALYZEENTITIESRESPONSE, + "__module__": "google.cloud.language_v1beta2.proto.language_service_pb2", + "__doc__": """The entity analysis response message. + Attributes: entities: The recognized entities in the input document. @@ -3274,17 +4309,18 @@ details. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1beta2.AnalyzeEntitiesResponse) - ), + }, ) _sym_db.RegisterMessage(AnalyzeEntitiesResponse) AnalyzeSyntaxRequest = _reflection.GeneratedProtocolMessageType( "AnalyzeSyntaxRequest", (_message.Message,), - dict( - DESCRIPTOR=_ANALYZESYNTAXREQUEST, - __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""The syntax analysis request message. + { + "DESCRIPTOR": _ANALYZESYNTAXREQUEST, + "__module__": "google.cloud.language_v1beta2.proto.language_service_pb2", + "__doc__": """The syntax analysis request message. + Attributes: document: Required. Input document. @@ -3292,17 +4328,18 @@ The encoding type used by the API to calculate offsets. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1beta2.AnalyzeSyntaxRequest) - ), + }, ) _sym_db.RegisterMessage(AnalyzeSyntaxRequest) AnalyzeSyntaxResponse = _reflection.GeneratedProtocolMessageType( "AnalyzeSyntaxResponse", (_message.Message,), - dict( - DESCRIPTOR=_ANALYZESYNTAXRESPONSE, - __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""The syntax analysis response message. + { + "DESCRIPTOR": _ANALYZESYNTAXRESPONSE, + "__module__": "google.cloud.language_v1beta2.proto.language_service_pb2", + "__doc__": """The syntax analysis response message. + Attributes: sentences: Sentences in the input document. @@ -3317,55 +4354,58 @@ details. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1beta2.AnalyzeSyntaxResponse) - ), + }, ) _sym_db.RegisterMessage(AnalyzeSyntaxResponse) ClassifyTextRequest = _reflection.GeneratedProtocolMessageType( "ClassifyTextRequest", (_message.Message,), - dict( - DESCRIPTOR=_CLASSIFYTEXTREQUEST, - __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""The document classification request message. + { + "DESCRIPTOR": _CLASSIFYTEXTREQUEST, + "__module__": "google.cloud.language_v1beta2.proto.language_service_pb2", + "__doc__": """The document classification request message. + Attributes: document: Required. Input document. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1beta2.ClassifyTextRequest) - ), + }, ) _sym_db.RegisterMessage(ClassifyTextRequest) ClassifyTextResponse = _reflection.GeneratedProtocolMessageType( "ClassifyTextResponse", (_message.Message,), - dict( - DESCRIPTOR=_CLASSIFYTEXTRESPONSE, - __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""The document classification response message. + { + "DESCRIPTOR": _CLASSIFYTEXTRESPONSE, + "__module__": "google.cloud.language_v1beta2.proto.language_service_pb2", + "__doc__": """The document classification response message. + Attributes: categories: Categories representing the input document. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1beta2.ClassifyTextResponse) - ), + }, ) _sym_db.RegisterMessage(ClassifyTextResponse) AnnotateTextRequest = _reflection.GeneratedProtocolMessageType( "AnnotateTextRequest", (_message.Message,), - dict( - Features=_reflection.GeneratedProtocolMessageType( + { + "Features": _reflection.GeneratedProtocolMessageType( "Features", (_message.Message,), - dict( - DESCRIPTOR=_ANNOTATETEXTREQUEST_FEATURES, - __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""All available features for sentiment, syntax, and semantic analysis. + { + "DESCRIPTOR": _ANNOTATETEXTREQUEST_FEATURES, + "__module__": "google.cloud.language_v1beta2.proto.language_service_pb2", + "__doc__": """All available features for sentiment, syntax, and semantic analysis. Setting each one to true will enable that specific analysis for the input. Next ID: 10 + Attributes: extract_syntax: Extract syntax information. @@ -3382,12 +4422,13 @@ language/docs/categories>`__. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1beta2.AnnotateTextRequest.Features) - ), + }, ), - DESCRIPTOR=_ANNOTATETEXTREQUEST, - __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""The request message for the text annotation API, which can perform + "DESCRIPTOR": _ANNOTATETEXTREQUEST, + "__module__": "google.cloud.language_v1beta2.proto.language_service_pb2", + "__doc__": """The request message for the text annotation API, which can perform multiple analysis types (sentiment, entities, and syntax) in one call. + Attributes: document: Required. Input document. @@ -3397,7 +4438,7 @@ The encoding type used by the API to calculate offsets. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1beta2.AnnotateTextRequest) - ), + }, ) _sym_db.RegisterMessage(AnnotateTextRequest) _sym_db.RegisterMessage(AnnotateTextRequest.Features) @@ -3405,30 +4446,31 @@ AnnotateTextResponse = _reflection.GeneratedProtocolMessageType( "AnnotateTextResponse", (_message.Message,), - dict( - DESCRIPTOR=_ANNOTATETEXTRESPONSE, - __module__="google.cloud.language_v1beta2.proto.language_service_pb2", - __doc__="""The text annotations response message. + { + "DESCRIPTOR": _ANNOTATETEXTRESPONSE, + "__module__": "google.cloud.language_v1beta2.proto.language_service_pb2", + "__doc__": """The text annotations response message. + Attributes: sentences: Sentences in the input document. Populated if the user enables - [AnnotateTextRequest.Features.extract\_syntax][google.cloud.la - nguage.v1beta2.AnnotateTextRequest.Features.extract\_syntax]. + [AnnotateTextRequest.Features.extract_syntax][google.cloud.lan + guage.v1beta2.AnnotateTextRequest.Features.extract_syntax]. tokens: Tokens, along with their syntactic information, in the input document. Populated if the user enables [AnnotateTextRequest.F - eatures.extract\_syntax][google.cloud.language.v1beta2.Annotat - eTextRequest.Features.extract\_syntax]. + eatures.extract_syntax][google.cloud.language.v1beta2.Annotate + TextRequest.Features.extract_syntax]. entities: Entities, along with their semantic information, in the input document. Populated if the user enables [AnnotateTextRequest.F - eatures.extract\_entities][google.cloud.language.v1beta2.Annot - ateTextRequest.Features.extract\_entities]. + eatures.extract_entities][google.cloud.language.v1beta2.Annota + teTextRequest.Features.extract_entities]. document_sentiment: The overall sentiment for the document. Populated if the user - enables [AnnotateTextRequest.Features.extract\_document\_senti - ment][google.cloud.language.v1beta2.AnnotateTextRequest.Featur - es.extract\_document\_sentiment]. + enables [AnnotateTextRequest.Features.extract_document_sentime + nt][google.cloud.language.v1beta2.AnnotateTextRequest.Features + .extract_document_sentiment]. language: The language of the text, which will be the same as the language specified in the request or, if not specified, the @@ -3439,7 +4481,7 @@ Categories identified in the input document. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1beta2.AnnotateTextResponse) - ), + }, ) _sym_db.RegisterMessage(AnnotateTextResponse) @@ -3459,9 +4501,8 @@ full_name="google.cloud.language.v1beta2.LanguageService", file=DESCRIPTOR, index=0, - serialized_options=_b( - "\312A\027language.googleapis.com\322A]https://www.googleapis.com/auth/cloud-language,https://www.googleapis.com/auth/cloud-platform" - ), + serialized_options=b"\312A\027language.googleapis.com\322A]https://www.googleapis.com/auth/cloud-language,https://www.googleapis.com/auth/cloud-platform", + create_key=_descriptor._internal_create_key, serialized_start=7094, serialized_end=8512, methods=[ @@ -3472,9 +4513,8 @@ containing_service=None, input_type=_ANALYZESENTIMENTREQUEST, output_type=_ANALYZESENTIMENTRESPONSE, - serialized_options=_b( - '\202\323\344\223\002("#/v1beta2/documents:analyzeSentiment:\001*\332A\026document,encoding_type\332A\010document' - ), + serialized_options=b'\202\323\344\223\002("#/v1beta2/documents:analyzeSentiment:\001*\332A\026document,encoding_type\332A\010document', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="AnalyzeEntities", @@ -3483,9 +4523,8 @@ containing_service=None, input_type=_ANALYZEENTITIESREQUEST, output_type=_ANALYZEENTITIESRESPONSE, - serialized_options=_b( - '\202\323\344\223\002\'""/v1beta2/documents:analyzeEntities:\001*\332A\026document,encoding_type\332A\010document' - ), + serialized_options=b'\202\323\344\223\002\'""/v1beta2/documents:analyzeEntities:\001*\332A\026document,encoding_type\332A\010document', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="AnalyzeEntitySentiment", @@ -3494,9 +4533,8 @@ containing_service=None, input_type=_ANALYZEENTITYSENTIMENTREQUEST, output_type=_ANALYZEENTITYSENTIMENTRESPONSE, - serialized_options=_b( - '\202\323\344\223\002.")/v1beta2/documents:analyzeEntitySentiment:\001*\332A\026document,encoding_type\332A\010document' - ), + serialized_options=b'\202\323\344\223\002.")/v1beta2/documents:analyzeEntitySentiment:\001*\332A\026document,encoding_type\332A\010document', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="AnalyzeSyntax", @@ -3505,9 +4543,8 @@ containing_service=None, input_type=_ANALYZESYNTAXREQUEST, output_type=_ANALYZESYNTAXRESPONSE, - serialized_options=_b( - '\202\323\344\223\002%" /v1beta2/documents:analyzeSyntax:\001*\332A\026document,encoding_type\332A\010document' - ), + serialized_options=b'\202\323\344\223\002%" /v1beta2/documents:analyzeSyntax:\001*\332A\026document,encoding_type\332A\010document', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ClassifyText", @@ -3516,9 +4553,8 @@ containing_service=None, input_type=_CLASSIFYTEXTREQUEST, output_type=_CLASSIFYTEXTRESPONSE, - serialized_options=_b( - '\202\323\344\223\002$"\037/v1beta2/documents:classifyText:\001*\332A\010document' - ), + serialized_options=b'\202\323\344\223\002$"\037/v1beta2/documents:classifyText:\001*\332A\010document', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="AnnotateText", @@ -3527,9 +4563,8 @@ containing_service=None, input_type=_ANNOTATETEXTREQUEST, output_type=_ANNOTATETEXTRESPONSE, - serialized_options=_b( - '\202\323\344\223\002$"\037/v1beta2/documents:annotateText:\001*\332A\037document,features,encoding_type\332A\021document,features' - ), + serialized_options=b'\202\323\344\223\002$"\037/v1beta2/documents:annotateText:\001*\332A\037document,features,encoding_type\332A\021document,features', + create_key=_descriptor._internal_create_key, ), ], ) diff --git a/scripts/decrypt-secrets.sh b/scripts/decrypt-secrets.sh new file mode 100755 index 00000000..ff599eb2 --- /dev/null +++ b/scripts/decrypt-secrets.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +# Copyright 2015 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +ROOT=$( dirname "$DIR" ) + +# Work from the project root. +cd $ROOT + +# Use SECRET_MANAGER_PROJECT if set, fallback to cloud-devrel-kokoro-resources. +PROJECT_ID="${SECRET_MANAGER_PROJECT:-cloud-devrel-kokoro-resources}" + +gcloud secrets versions access latest --secret="python-docs-samples-test-env" \ + > testing/test-env.sh +gcloud secrets versions access latest \ + --secret="python-docs-samples-service-account" \ + > testing/service-account.json +gcloud secrets versions access latest \ + --secret="python-docs-samples-client-secrets" \ + > testing/client-secrets.json \ No newline at end of file diff --git a/scripts/readme-gen/readme_gen.py b/scripts/readme-gen/readme_gen.py new file mode 100644 index 00000000..d309d6e9 --- /dev/null +++ b/scripts/readme-gen/readme_gen.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python + +# Copyright 2016 Google Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Generates READMEs using configuration defined in yaml.""" + +import argparse +import io +import os +import subprocess + +import jinja2 +import yaml + + +jinja_env = jinja2.Environment( + trim_blocks=True, + loader=jinja2.FileSystemLoader( + os.path.abspath(os.path.join(os.path.dirname(__file__), 'templates')))) + +README_TMPL = jinja_env.get_template('README.tmpl.rst') + + +def get_help(file): + return subprocess.check_output(['python', file, '--help']).decode() + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('source') + parser.add_argument('--destination', default='README.rst') + + args = parser.parse_args() + + source = os.path.abspath(args.source) + root = os.path.dirname(source) + destination = os.path.join(root, args.destination) + + jinja_env.globals['get_help'] = get_help + + with io.open(source, 'r') as f: + config = yaml.load(f) + + # This allows get_help to execute in the right directory. + os.chdir(root) + + output = README_TMPL.render(config) + + with io.open(destination, 'w') as f: + f.write(output) + + +if __name__ == '__main__': + main() diff --git a/scripts/readme-gen/templates/README.tmpl.rst b/scripts/readme-gen/templates/README.tmpl.rst new file mode 100644 index 00000000..4fd23976 --- /dev/null +++ b/scripts/readme-gen/templates/README.tmpl.rst @@ -0,0 +1,87 @@ +{# The following line is a lie. BUT! Once jinja2 is done with it, it will + become truth! #} +.. This file is automatically generated. Do not edit this file directly. + +{{product.name}} Python Samples +=============================================================================== + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor={{folder}}/README.rst + + +This directory contains samples for {{product.name}}. {{product.description}} + +{{description}} + +.. _{{product.name}}: {{product.url}} + +{% if required_api_url %} +To run the sample, you need to enable the API at: {{required_api_url}} +{% endif %} + +{% if required_role %} +To run the sample, you need to have `{{required_role}}` role. +{% endif %} + +{{other_required_steps}} + +{% if setup %} +Setup +------------------------------------------------------------------------------- + +{% for section in setup %} + +{% include section + '.tmpl.rst' %} + +{% endfor %} +{% endif %} + +{% if samples %} +Samples +------------------------------------------------------------------------------- + +{% for sample in samples %} +{{sample.name}} ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +{% if not sample.hide_cloudshell_button %} +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor={{folder}}/{{sample.file}},{{folder}}/README.rst +{% endif %} + + +{{sample.description}} + +To run this sample: + +.. code-block:: bash + + $ python {{sample.file}} +{% if sample.show_help %} + + {{get_help(sample.file)|indent}} +{% endif %} + + +{% endfor %} +{% endif %} + +{% if cloud_client_library %} + +The client library +------------------------------------------------------------------------------- + +This sample uses the `Google Cloud Client Library for Python`_. +You can read the documentation for more details on API usage and use GitHub +to `browse the source`_ and `report issues`_. + +.. _Google Cloud Client Library for Python: + https://googlecloudplatform.github.io/google-cloud-python/ +.. _browse the source: + https://github.com/GoogleCloudPlatform/google-cloud-python +.. _report issues: + https://github.com/GoogleCloudPlatform/google-cloud-python/issues + +{% endif %} + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/scripts/readme-gen/templates/auth.tmpl.rst b/scripts/readme-gen/templates/auth.tmpl.rst new file mode 100644 index 00000000..1446b94a --- /dev/null +++ b/scripts/readme-gen/templates/auth.tmpl.rst @@ -0,0 +1,9 @@ +Authentication +++++++++++++++ + +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. + +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started diff --git a/scripts/readme-gen/templates/auth_api_key.tmpl.rst b/scripts/readme-gen/templates/auth_api_key.tmpl.rst new file mode 100644 index 00000000..11957ce2 --- /dev/null +++ b/scripts/readme-gen/templates/auth_api_key.tmpl.rst @@ -0,0 +1,14 @@ +Authentication +++++++++++++++ + +Authentication for this service is done via an `API Key`_. To obtain an API +Key: + +1. Open the `Cloud Platform Console`_ +2. Make sure that billing is enabled for your project. +3. From the **Credentials** page, create a new **API Key** or use an existing + one for your project. + +.. _API Key: + https://developers.google.com/api-client-library/python/guide/aaa_apikeys +.. _Cloud Console: https://console.cloud.google.com/project?_ diff --git a/scripts/readme-gen/templates/install_deps.tmpl.rst b/scripts/readme-gen/templates/install_deps.tmpl.rst new file mode 100644 index 00000000..a0406dba --- /dev/null +++ b/scripts/readme-gen/templates/install_deps.tmpl.rst @@ -0,0 +1,29 @@ +Install Dependencies +++++++++++++++++++++ + +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ diff --git a/scripts/readme-gen/templates/install_portaudio.tmpl.rst b/scripts/readme-gen/templates/install_portaudio.tmpl.rst new file mode 100644 index 00000000..5ea33d18 --- /dev/null +++ b/scripts/readme-gen/templates/install_portaudio.tmpl.rst @@ -0,0 +1,35 @@ +Install PortAudio ++++++++++++++++++ + +Install `PortAudio`_. This is required by the `PyAudio`_ library to stream +audio from your computer's microphone. PyAudio depends on PortAudio for cross-platform compatibility, and is installed differently depending on the +platform. + +* For Mac OS X, you can use `Homebrew`_:: + + brew install portaudio + + **Note**: if you encounter an error when running `pip install` that indicates + it can't find `portaudio.h`, try running `pip install` with the following + flags:: + + pip install --global-option='build_ext' \ + --global-option='-I/usr/local/include' \ + --global-option='-L/usr/local/lib' \ + pyaudio + +* For Debian / Ubuntu Linux:: + + apt-get install portaudio19-dev python-all-dev + +* Windows may work without having to install PortAudio explicitly (it will get + installed with PyAudio). + +For more details, see the `PyAudio installation`_ page. + + +.. _PyAudio: https://people.csail.mit.edu/hubert/pyaudio/ +.. _PortAudio: http://www.portaudio.com/ +.. _PyAudio installation: + https://people.csail.mit.edu/hubert/pyaudio/#downloads +.. _Homebrew: http://brew.sh diff --git a/synth.metadata b/synth.metadata index 2da14990..463eb8a9 100644 --- a/synth.metadata +++ b/synth.metadata @@ -1,32 +1,25 @@ { "sources": [ - { - "generator": { - "name": "artman", - "version": "2.0.0", - "dockerImage": "googleapis/artman@sha256:b3b47805231a305d0f40c4bf069df20f6a2635574e6d4259fac651d3f9f6e098" - } - }, { "git": { "name": ".", "remote": "https://github.com/googleapis/python-language.git", - "sha": "c39138189a0c93512c1cdbb7cd00047c73a80402" + "sha": "7dff809b94b5a1d001aeb1e7763dbbe624865600" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "42ee97c1b93a0e3759bbba3013da309f670a90ab", - "internalRef": "307114445" + "sha": "fa4d2ecd0804eb92f27a65fe65ce2a554a361b93", + "internalRef": "317110673" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "f5e4c17dc78a966dbf29961dd01f9bbd63e20a04" + "sha": "cd522c3b4dde821766d95c80ae5aeb43d7a41170" } } ], @@ -37,8 +30,7 @@ "apiName": "language", "apiVersion": "v1beta2", "language": "python", - "generator": "gapic", - "config": "google/cloud/language/artman_language_v1beta2.yaml" + "generator": "bazel" } }, { @@ -47,8 +39,7 @@ "apiName": "language", "apiVersion": "v1", "language": "python", - "generator": "gapic", - "config": "google/cloud/language/artman_language_v1.yaml" + "generator": "bazel" } } ] diff --git a/synth.py b/synth.py index 5bf28084..e77efc01 100644 --- a/synth.py +++ b/synth.py @@ -58,4 +58,7 @@ "enums.EncodingType", ) +# TODO(busunkim): Use latest sphinx after microgenerator transition +s.replace("noxfile.py", """['"]sphinx['"]""", '"sphinx<3.0.0"') + s.shell.run(["nox", "-s", "blacken"], hide_output=False) diff --git a/testing/.gitignore b/testing/.gitignore new file mode 100644 index 00000000..b05fbd63 --- /dev/null +++ b/testing/.gitignore @@ -0,0 +1,3 @@ +test-env.sh +service-account.json +client-secrets.json \ No newline at end of file From 3db1fa4508b7891733d362e81d3e20fe8e71b996 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Wed, 1 Jul 2020 15:02:17 -0700 Subject: [PATCH 025/209] chore: changes in docstring formatting (#27) This PR was generated using Autosynth. :rainbow: Synth log will be available here: https://source.cloud.google.com/results/invocations/1deda303-9d5e-4b48-a4b5-1bc8f66830f9/targets - [ ] To automatically regenerate this PR, check this box. --- .../language_v1/proto/language_service_pb2.py | 34 +++++++++---------- synth.metadata | 2 +- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/google/cloud/language_v1/proto/language_service_pb2.py b/google/cloud/language_v1/proto/language_service_pb2.py index 675c5ad4..e4eb90c4 100644 --- a/google/cloud/language_v1/proto/language_service_pb2.py +++ b/google/cloud/language_v1/proto/language_service_pb2.py @@ -3890,8 +3890,7 @@ { "DESCRIPTOR": _DOCUMENT, "__module__": "google.cloud.language_v1.proto.language_service_pb2", - "__doc__": """################################################################ # - Represents the input to API methods. + "__doc__": """Represents the input to API methods. Attributes: type: @@ -3936,11 +3935,11 @@ text: The sentence text. sentiment: - For calls to [AnalyzeSentiment][] or if [AnnotateTextRequest.F - eatures.extract_document_sentiment][google.cloud.language.v1.A - nnotateTextRequest.Features.extract_document_sentiment] is set - to true, this field will contain the sentiment for the - sentence. + For calls to [AnalyzeSentiment]` <#section>`__ or if [Annotate + TextRequest.Features.extract_document_sentiment][google.cloud. + language.v1.AnnotateTextRequest.Features.extract_document_sent + iment] is set to true, this field will contain the sentiment + for the sentence. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1.Sentence) }, @@ -3987,11 +3986,11 @@ The mentions of this entity in the input document. The API currently supports proper noun mentions. sentiment: - For calls to [AnalyzeEntitySentiment][] or if [AnnotateTextReq - uest.Features.extract_entity_sentiment][google.cloud.language. - v1.AnnotateTextRequest.Features.extract_entity_sentiment] is - set to true, this field will contain the aggregate sentiment - expressed for this entity in the provided document. + For calls to [AnalyzeEntitySentiment]` <#section>`__ or if [An + notateTextRequest.Features.extract_entity_sentiment][google.cl + oud.language.v1.AnnotateTextRequest.Features.extract_entity_se + ntiment] is set to true, this field will contain the aggregate + sentiment expressed for this entity in the provided document. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1.Entity) }, @@ -4128,11 +4127,12 @@ type: The type of the entity mention. sentiment: - For calls to [AnalyzeEntitySentiment][] or if [AnnotateTextReq - uest.Features.extract_entity_sentiment][google.cloud.language. - v1.AnnotateTextRequest.Features.extract_entity_sentiment] is - set to true, this field will contain the sentiment expressed - for this mention of the entity in the provided document. + For calls to [AnalyzeEntitySentiment]` <#section>`__ or if [An + notateTextRequest.Features.extract_entity_sentiment][google.cl + oud.language.v1.AnnotateTextRequest.Features.extract_entity_se + ntiment] is set to true, this field will contain the sentiment + expressed for this mention of the entity in the provided + document. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1.EntityMention) }, diff --git a/synth.metadata b/synth.metadata index 463eb8a9..ab77990d 100644 --- a/synth.metadata +++ b/synth.metadata @@ -4,7 +4,7 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-language.git", - "sha": "7dff809b94b5a1d001aeb1e7763dbbe624865600" + "sha": "a489102ca0f5ab302ec8974728a52065f2ea8857" } }, { From bbfd9ec76f01bfa01816f47ff20feaf0375a7930 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Thu, 2 Jul 2020 10:18:31 -0700 Subject: [PATCH 026/209] chore: update docstring format (#30) --- .../language_v1/proto/language_service_pb2.py | 34 +++++++++---------- synth.metadata | 2 +- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/google/cloud/language_v1/proto/language_service_pb2.py b/google/cloud/language_v1/proto/language_service_pb2.py index e4eb90c4..675c5ad4 100644 --- a/google/cloud/language_v1/proto/language_service_pb2.py +++ b/google/cloud/language_v1/proto/language_service_pb2.py @@ -3890,7 +3890,8 @@ { "DESCRIPTOR": _DOCUMENT, "__module__": "google.cloud.language_v1.proto.language_service_pb2", - "__doc__": """Represents the input to API methods. + "__doc__": """################################################################ # + Represents the input to API methods. Attributes: type: @@ -3935,11 +3936,11 @@ text: The sentence text. sentiment: - For calls to [AnalyzeSentiment]` <#section>`__ or if [Annotate - TextRequest.Features.extract_document_sentiment][google.cloud. - language.v1.AnnotateTextRequest.Features.extract_document_sent - iment] is set to true, this field will contain the sentiment - for the sentence. + For calls to [AnalyzeSentiment][] or if [AnnotateTextRequest.F + eatures.extract_document_sentiment][google.cloud.language.v1.A + nnotateTextRequest.Features.extract_document_sentiment] is set + to true, this field will contain the sentiment for the + sentence. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1.Sentence) }, @@ -3986,11 +3987,11 @@ The mentions of this entity in the input document. The API currently supports proper noun mentions. sentiment: - For calls to [AnalyzeEntitySentiment]` <#section>`__ or if [An - notateTextRequest.Features.extract_entity_sentiment][google.cl - oud.language.v1.AnnotateTextRequest.Features.extract_entity_se - ntiment] is set to true, this field will contain the aggregate - sentiment expressed for this entity in the provided document. + For calls to [AnalyzeEntitySentiment][] or if [AnnotateTextReq + uest.Features.extract_entity_sentiment][google.cloud.language. + v1.AnnotateTextRequest.Features.extract_entity_sentiment] is + set to true, this field will contain the aggregate sentiment + expressed for this entity in the provided document. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1.Entity) }, @@ -4127,12 +4128,11 @@ type: The type of the entity mention. sentiment: - For calls to [AnalyzeEntitySentiment]` <#section>`__ or if [An - notateTextRequest.Features.extract_entity_sentiment][google.cl - oud.language.v1.AnnotateTextRequest.Features.extract_entity_se - ntiment] is set to true, this field will contain the sentiment - expressed for this mention of the entity in the provided - document. + For calls to [AnalyzeEntitySentiment][] or if [AnnotateTextReq + uest.Features.extract_entity_sentiment][google.cloud.language. + v1.AnnotateTextRequest.Features.extract_entity_sentiment] is + set to true, this field will contain the sentiment expressed + for this mention of the entity in the provided document. """, # @@protoc_insertion_point(class_scope:google.cloud.language.v1.EntityMention) }, diff --git a/synth.metadata b/synth.metadata index ab77990d..f631bc79 100644 --- a/synth.metadata +++ b/synth.metadata @@ -4,7 +4,7 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-language.git", - "sha": "a489102ca0f5ab302ec8974728a52065f2ea8857" + "sha": "3db1fa4508b7891733d362e81d3e20fe8e71b996" } }, { From d60e8784066b44c80b77ce198acbd2727ce34021 Mon Sep 17 00:00:00 2001 From: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Date: Tue, 22 Sep 2020 16:14:51 -0600 Subject: [PATCH 027/209] chore: add default CODEOWNERS (#38) --- .github/CODEOWNERS | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 .github/CODEOWNERS diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 00000000..30c3973a --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,11 @@ +# Code owners file. +# This file controls who is tagged for review for any given pull request. +# +# For syntax help see: +# https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners#codeowners-syntax + +# The @googleapis/yoshi-python is the default owner for changes in this repo +* @googleapis/yoshi-python + +# The python-samples-reviewers team is the default owner for samples changes +/samples/ @googleapis/python-samples-owners \ No newline at end of file From aebd94a41e5972a4fee9a30783f49bf3828e0727 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Thu, 21 Jul 2016 16:16:19 -0700 Subject: [PATCH 028/209] Adding Natural Language API samples. Change-Id: I68a1b5a11c2b3703963466b195be37a2c796bf79 --- samples/snippets/README.md | 17 + samples/snippets/api/README.md | 87 ++++ samples/snippets/api/analyze.py | 115 ++++++ samples/snippets/api/analyze_test.py | 258 ++++++++++++ samples/snippets/api/requirements.txt | 1 + samples/snippets/movie_nl/README.md | 152 +++++++ samples/snippets/movie_nl/main.py | 383 ++++++++++++++++++ samples/snippets/movie_nl/main_test.py | 128 ++++++ samples/snippets/movie_nl/requirements.txt | 2 + samples/snippets/ocr_nl/README.md | 227 +++++++++++ samples/snippets/ocr_nl/main.py | 362 +++++++++++++++++ samples/snippets/ocr_nl/main_test.py | 97 +++++ samples/snippets/ocr_nl/requirements.txt | 1 + samples/snippets/syntax_triples/README.md | 91 +++++ samples/snippets/syntax_triples/main.py | 180 ++++++++ samples/snippets/syntax_triples/main_test.py | 50 +++ .../snippets/syntax_triples/requirements.txt | 1 + .../resources/obama_wikipedia.txt | 1 + 18 files changed, 2153 insertions(+) create mode 100644 samples/snippets/README.md create mode 100644 samples/snippets/api/README.md create mode 100644 samples/snippets/api/analyze.py create mode 100644 samples/snippets/api/analyze_test.py create mode 100644 samples/snippets/api/requirements.txt create mode 100644 samples/snippets/movie_nl/README.md create mode 100644 samples/snippets/movie_nl/main.py create mode 100644 samples/snippets/movie_nl/main_test.py create mode 100644 samples/snippets/movie_nl/requirements.txt create mode 100644 samples/snippets/ocr_nl/README.md create mode 100755 samples/snippets/ocr_nl/main.py create mode 100755 samples/snippets/ocr_nl/main_test.py create mode 100644 samples/snippets/ocr_nl/requirements.txt create mode 100644 samples/snippets/syntax_triples/README.md create mode 100644 samples/snippets/syntax_triples/main.py create mode 100755 samples/snippets/syntax_triples/main_test.py create mode 100644 samples/snippets/syntax_triples/requirements.txt create mode 100644 samples/snippets/syntax_triples/resources/obama_wikipedia.txt diff --git a/samples/snippets/README.md b/samples/snippets/README.md new file mode 100644 index 00000000..e63d45eb --- /dev/null +++ b/samples/snippets/README.md @@ -0,0 +1,17 @@ +# Google Cloud Natural Language API examples + +This directory contains Python examples that use the +[Google Cloud Natural Language API](https://cloud.google.com/natural-language/). + +- [api](api) has a simple command line tool that shows off the API's features. + +- [movie_nl](movie_nl) combines sentiment and entity analysis to come up with +actors/directors who are the most and least popular in the imdb movie reviews. + +- [ocr_nl](ocr_nl) uses the [Cloud Vision API](https://cloud.google.com/vision/) +to extract text from images, then uses the NL API to extract entity information +from those texts, and stores the extracted information in a database in support +of further analysis and correlation. + +- [syntax_triples](syntax_triples) uses syntax analysis to find +subject-verb-object triples in a given piece of text. diff --git a/samples/snippets/api/README.md b/samples/snippets/api/README.md new file mode 100644 index 00000000..9625df30 --- /dev/null +++ b/samples/snippets/api/README.md @@ -0,0 +1,87 @@ + +# Google Cloud Natural Language API Sample + +This Python sample demonstrates the use of the [Google Cloud Natural Language API][NL-Docs] +for sentiment, entity, and syntax analysis. + +[NL-Docs]: https://cloud.google.com/natural-language/docs/ + +## Setup + +Please follow the [Set Up Your Project](https://cloud.google.com/natural-language/docs/getting-started#set_up_your_project) +steps in the Quickstart doc to create a project and enable the +Cloud Natural Language API. Following those steps, make sure that you +[Set Up a Service Account](https://cloud.google.com/natural-language/docs/common/auth#set_up_a_service_account), +and export the following environment variable: + +``` +export GOOGLE_APPLICATION_CREDENTIALS=/path/to/your-project-credentials.json +``` + +## Run the sample + +Install [pip](https://pip.pypa.io/en/stable/installing) if not already installed. + +To run the example, install the necessary libraries using pip: + +```sh +$ pip install -r requirements.txt +``` + +Then, run the script: + +```sh +$ python analyze.py +``` + +where `` is one of: `entities`, `sentiment`, or `syntax`. + +The script will write to STDOUT the json returned from the API for the requested feature. + +For example, if you run: + +```sh +$ python analyze.py entities "Tom Sawyer is a book written by a guy known as Mark Twain." +``` + +You will see something like the following returned: + +``` +{ + "entities": [ + { + "salience": 0.49785897, + "mentions": [ + { + "text": { + "content": "Tom Sawyer", + "beginOffset": 0 + } + } + ], + "type": "PERSON", + "name": "Tom Sawyer", + "metadata": { + "wikipedia_url": "http://en.wikipedia.org/wiki/The_Adventures_of_Tom_Sawyer" + } + }, + { + "salience": 0.12209519, + "mentions": [ + { + "text": { + "content": "Mark Twain", + "beginOffset": 47 + } + } + ], + "type": "PERSON", + "name": "Mark Twain", + "metadata": { + "wikipedia_url": "http://en.wikipedia.org/wiki/Mark_Twain" + } + } + ], + "language": "en" +} +``` diff --git a/samples/snippets/api/analyze.py b/samples/snippets/api/analyze.py new file mode 100644 index 00000000..73e892c3 --- /dev/null +++ b/samples/snippets/api/analyze.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python + +# Copyright 2016 Google, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Analyzes text using the Google Cloud Natural Language API.""" + +import argparse +import json +import sys + +from googleapiclient import discovery +import httplib2 +from oauth2client.client import GoogleCredentials + + +def get_service(): + credentials = GoogleCredentials.get_application_default() + scoped_credentials = credentials.create_scoped( + ['https://www.googleapis.com/auth/cloud-platform']) + http = httplib2.Http() + scoped_credentials.authorize(http) + return discovery.build('language', 'v1beta1', http=http) + + +def get_native_encoding_type(): + """Returns the encoding type that matches Python's native strings.""" + if sys.maxunicode == 65535: + return 'UTF16' + else: + return 'UTF32' + + +def analyze_entities(text, encoding='UTF32'): + body = { + 'document': { + 'type': 'PLAIN_TEXT', + 'content': text, + }, + 'encodingType': encoding, + } + + service = get_service() + + request = service.documents().analyzeEntities(body=body) + response = request.execute() + + return response + + +def analyze_sentiment(text): + body = { + 'document': { + 'type': 'PLAIN_TEXT', + 'content': text, + } + } + + service = get_service() + + request = service.documents().analyzeSentiment(body=body) + response = request.execute() + + return response + + +def analyze_syntax(text, encoding='UTF32'): + body = { + 'document': { + 'type': 'PLAIN_TEXT', + 'content': text, + }, + 'features': { + 'extract_syntax': True, + }, + 'encodingType': encoding, + } + + service = get_service() + + request = service.documents().annotateText(body=body) + response = request.execute() + + return response + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + parser.add_argument('command', choices=[ + 'entities', 'sentiment', 'syntax']) + parser.add_argument('text') + + args = parser.parse_args() + + if args.command == 'entities': + result = analyze_entities(args.text, get_native_encoding_type()) + elif args.command == 'sentiment': + result = analyze_sentiment(args.text) + elif args.command == 'syntax': + result = analyze_syntax(args.text, get_native_encoding_type()) + + print(json.dumps(result, indent=2)) diff --git a/samples/snippets/api/analyze_test.py b/samples/snippets/api/analyze_test.py new file mode 100644 index 00000000..11b0d65d --- /dev/null +++ b/samples/snippets/api/analyze_test.py @@ -0,0 +1,258 @@ +# Copyright 2016, Google, Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +import analyze + + +def test_analyze_entities(): + result = analyze.analyze_entities( + 'Tom Sawyer is a book written by a guy known as Mark Twain.') + + assert result['language'] == 'en' + entities = result['entities'] + assert len(entities) + subject = entities[0] + assert subject['type'] == 'PERSON' + assert subject['name'].startswith('Tom') + + +def test_analyze_sentiment(capsys): + result = analyze.analyze_sentiment( + 'your face is really ugly and i hate it.') + + sentiment = result['documentSentiment'] + assert sentiment['polarity'] < 0 + assert sentiment['magnitude'] < 1 + + result = analyze.analyze_sentiment( + 'cheerio, mate - I greatly admire the pallor of your visage, and your ' + 'angle of repose leaves little room for improvement.') + + sentiment = result['documentSentiment'] + assert sentiment['polarity'] > 0 + assert sentiment['magnitude'] < 1 + + +def test_analyze_syntax(capsys): + result = analyze.analyze_syntax(textwrap.dedent(u'''\ + Keep away from people who try to belittle your ambitions. Small people + always do that, but the really great make you feel that you, too, can + become great. + - Mark Twain''')) + + assert len(result['tokens']) + first_token = result['tokens'][0] + assert first_token['text']['content'] == 'Keep' + assert first_token['partOfSpeech']['tag'] == 'VERB' + assert len(result['sentences']) > 1 + assert result['language'] == 'en' + + +def test_analyze_syntax_utf8(): + """Demonstrate the interpretation of the offsets when encoding=utf8. + + UTF8 is a variable-length encoding, where each character is at least 8 + bits. The offsets we get should be the index of the first byte of the + character. + """ + test_string = u'a \u00e3 \u0201 \U0001f636 b' + byte_array = test_string.encode('utf8') + result = analyze.analyze_syntax(test_string, encoding='UTF8') + tokens = result['tokens'] + + assert tokens[0]['text']['content'] == 'a' + offset = tokens[0]['text'].get('beginOffset', 0) + assert (byte_array[offset:offset+1].decode('utf8') == + tokens[0]['text']['content']) + + assert tokens[1]['text']['content'] == u'\u00e3' + offset = tokens[1]['text'].get('beginOffset', 0) + assert (byte_array[offset:offset+2].decode('utf8') == + tokens[1]['text']['content']) + + assert tokens[2]['text']['content'] == u'\u0201' + offset = tokens[2]['text'].get('beginOffset', 0) + assert (byte_array[offset:offset+2].decode('utf8') == + tokens[2]['text']['content']) + + assert tokens[3]['text']['content'] == u'\U0001f636' + offset = tokens[3]['text'].get('beginOffset', 0) + assert (byte_array[offset:offset+4].decode('utf8') == + tokens[3]['text']['content']) + + # This demonstrates that the offset takes into account the variable-length + # characters before the target token. + assert tokens[4]['text']['content'] == u'b' + offset = tokens[4]['text'].get('beginOffset', 0) + # 'b' is only one byte long + assert (byte_array[offset:offset+1].decode('utf8') == + tokens[4]['text']['content']) + + +def test_analyze_syntax_utf16(): + """Demonstrate the interpretation of the offsets when encoding=utf16. + + UTF16 is a variable-length encoding, where each character is at least 16 + bits. The returned offsets will be the index of the first 2-byte character + of the token. + """ + test_string = u'a \u00e3 \u0201 \U0001f636 b' + byte_array = test_string.encode('utf16') + # Remove the byte order marker, which the offsets don't account for + byte_array = byte_array[2:] + result = analyze.analyze_syntax(test_string, encoding='UTF16') + tokens = result['tokens'] + + assert tokens[0]['text']['content'] == 'a' + # The offset is an offset into an array where each entry is 16 bits. Since + # we have an 8-bit array, the offsets should be doubled to index into our + # array. + offset = 2 * tokens[0]['text'].get('beginOffset', 0) + assert (byte_array[offset:offset + 2].decode('utf16') == + tokens[0]['text']['content']) + + assert tokens[1]['text']['content'] == u'\u00e3' + offset = 2 * tokens[1]['text'].get('beginOffset', 0) + # A UTF16 character with a low codepoint is 16 bits (2 bytes) long, so + # slice out 2 bytes starting from the offset. Then interpret the bytes as + # utf16 for comparison. + assert (byte_array[offset:offset + 2].decode('utf16') == + tokens[1]['text']['content']) + + assert tokens[2]['text']['content'] == u'\u0201' + offset = 2 * tokens[2]['text'].get('beginOffset', 0) + # A UTF16 character with a low codepoint is 16 bits (2 bytes) long, so + # slice out 2 bytes starting from the offset. Then interpret the bytes as + # utf16 for comparison. + assert (byte_array[offset:offset + 2].decode('utf16') == + tokens[2]['text']['content']) + + assert tokens[3]['text']['content'] == u'\U0001f636' + offset = 2 * tokens[3]['text'].get('beginOffset', 0) + # A UTF16 character with a high codepoint is 32 bits (4 bytes) long, so + # slice out 4 bytes starting from the offset. Then interpret those bytes as + # utf16 for comparison. + assert (byte_array[offset:offset + 4].decode('utf16') == + tokens[3]['text']['content']) + + # This demonstrates that the offset takes into account the variable-length + # characters before the target token. + assert tokens[4]['text']['content'] == u'b' + offset = 2 * tokens[4]['text'].get('beginOffset', 0) + # Even though 'b' is only one byte long, utf16 still encodes it using 16 + # bits + assert (byte_array[offset:offset + 2].decode('utf16') == + tokens[4]['text']['content']) + + +def test_annotate_text_utf32(): + """Demonstrate the interpretation of the offsets when encoding=utf32. + + UTF32 is a fixed-length encoding, where each character is exactly 32 bits. + The returned offsets will be the index of the first 4-byte character + of the token. + + Python unicode objects index by the interpreted unicode character. This + means a given unicode character only ever takes up one slot in a unicode + string. This is equivalent to indexing into a UTF32 string, where all + characters are a fixed length and thus will only ever take up one slot. + + Thus, if you're indexing into a python unicode object, you can set + encoding to UTF32 to index directly into the unicode object (as opposed to + the byte arrays, as these examples do). + + Nonetheless, this test still demonstrates indexing into the byte array, for + consistency. Note that you could just index into the origin test_string + unicode object with the raw offset returned by the api (ie without + multiplying it by 4, as it is below). + """ + test_string = u'a \u00e3 \u0201 \U0001f636 b' + byte_array = test_string.encode('utf32') + # Remove the byte order marker, which the offsets don't account for + byte_array = byte_array[4:] + result = analyze.analyze_syntax(test_string, encoding='UTF32') + tokens = result['tokens'] + + assert tokens[0]['text']['content'] == 'a' + # The offset is an offset into an array where each entry is 32 bits. Since + # we have an 8-bit array, the offsets should be quadrupled to index into + # our array. + offset = 4 * tokens[0]['text'].get('beginOffset', 0) + assert (byte_array[offset:offset + 4].decode('utf32') == + tokens[0]['text']['content']) + + assert tokens[1]['text']['content'] == u'\u00e3' + offset = 4 * tokens[1]['text'].get('beginOffset', 0) + # A UTF32 character with a low codepoint is 32 bits (4 bytes) long, so + # slice out 4 bytes starting from the offset. Then interpret the bytes as + # utf32 for comparison. + assert (byte_array[offset:offset + 4].decode('utf32') == + tokens[1]['text']['content']) + + assert tokens[2]['text']['content'] == u'\u0201' + offset = 4 * tokens[2]['text'].get('beginOffset', 0) + # A UTF32 character with a low codepoint is 32 bits (4 bytes) long, so + # slice out 4 bytes starting from the offset. Then interpret the bytes as + # utf32 for comparison. + assert (byte_array[offset:offset + 4].decode('utf32') == + tokens[2]['text']['content']) + + assert tokens[3]['text']['content'] == u'\U0001f636' + offset = 4 * tokens[3]['text'].get('beginOffset', 0) + # A UTF32 character with a high codepoint is 32 bits (4 bytes) long, so + # slice out 4 bytes starting from the offset. Then interpret those bytes as + # utf32 for comparison. + assert (byte_array[offset:offset + 4].decode('utf32') == + tokens[3]['text']['content']) + + # This demonstrates that the offset takes into account the variable-length + # characters before the target token. + assert tokens[4]['text']['content'] == u'b' + offset = 4 * tokens[4]['text'].get('beginOffset', 0) + # Even though 'b' is only one byte long, utf32 still encodes it using 32 + # bits + assert (byte_array[offset:offset + 4].decode('utf32') == + tokens[4]['text']['content']) + + +def test_annotate_text_utf32_directly_index_into_unicode(): + """Demonstrate using offsets directly, using encoding=utf32. + + See the explanation for test_annotate_text_utf32. Essentially, indexing + into a utf32 array is equivalent to indexing into a python unicode object. + """ + test_string = u'a \u00e3 \u0201 \U0001f636 b' + result = analyze.analyze_syntax(test_string, encoding='UTF32') + tokens = result['tokens'] + + assert tokens[0]['text']['content'] == 'a' + offset = tokens[0]['text'].get('beginOffset', 0) + assert test_string[offset] == tokens[0]['text']['content'] + + assert tokens[1]['text']['content'] == u'\u00e3' + offset = tokens[1]['text'].get('beginOffset', 0) + assert test_string[offset] == tokens[1]['text']['content'] + + assert tokens[2]['text']['content'] == u'\u0201' + offset = tokens[2]['text'].get('beginOffset', 0) + assert test_string[offset] == tokens[2]['text']['content'] + + assert tokens[3]['text']['content'] == u'\U0001f636' + offset = tokens[3]['text'].get('beginOffset', 0) + assert test_string[offset] == tokens[3]['text']['content'] + + assert tokens[4]['text']['content'] == u'b' + offset = tokens[4]['text'].get('beginOffset', 0) + assert test_string[offset] == tokens[4]['text']['content'] diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt new file mode 100644 index 00000000..0b96c82e --- /dev/null +++ b/samples/snippets/api/requirements.txt @@ -0,0 +1 @@ +google-api-python-client==1.5.1 diff --git a/samples/snippets/movie_nl/README.md b/samples/snippets/movie_nl/README.md new file mode 100644 index 00000000..687a6c40 --- /dev/null +++ b/samples/snippets/movie_nl/README.md @@ -0,0 +1,152 @@ +# Introduction +This sample is an application of the Google Cloud Platform Natural Language API. +It uses the [imdb movie reviews data set](https://www.cs.cornell.edu/people/pabo/movie-review-data/) +from [Cornell University](http://www.cs.cornell.edu/) and performs sentiment & entity +analysis on it. It combines the capabilities of sentiment analysis and entity recognition +to come up with actors/directors who are the most and least popular. + +### Set Up to Authenticate With Your Project's Credentials + +Please follow the [Set Up Your Project](https://cloud.google.com/natural-language/docs/getting-started#set_up_your_project) +steps in the Quickstart doc to create a project and enable the +Cloud Natural Language API. Following those steps, make sure that you +[Set Up a Service Account](https://cloud.google.com/natural-language/docs/common/auth#set_up_a_service_account), +and export the following environment variable: + +``` +export GOOGLE_APPLICATION_CREDENTIALS=/path/to/your-project-credentials.json +``` + +**Note:** If you get an error saying your API hasn't been enabled, make sure +that you have correctly set this environment variable, and that the project that +you got the service account from has the Natural Language API enabled. + +## How it works +This sample uses the Natural Language API to annotate the input text. The +movie review document is broken into sentences using the `extract_syntax` feature. +Each sentence is sent to the API for sentiment analysis. The positive and negative +sentiment values are combined to come up with a single overall sentiment of the +movie document. + +In addition to the sentiment, the program also extracts the entities of type +`PERSON`, who are the actors in the movie (including the director and anyone +important). These entities are assigned the sentiment value of the document to +come up with the most and least popular actors/directors. + +### Movie document +We define a movie document as a set of reviews. These reviews are individual +sentences and we use the NL API to extract the sentences from the document. See +an example movie document below. + +``` + Sample review sentence 1. Sample review sentence 2. Sample review sentence 3. +``` + +### Sentences and Sentiment +Each sentence from the above document is assigned a sentiment as below. + +``` + Sample review sentence 1 => Sentiment 1 + Sample review sentence 2 => Sentiment 2 + Sample review sentence 3 => Sentiment 3 +``` + +### Sentiment computation +The final sentiment is computed by simply adding the sentence sentiments. + +``` + Total Sentiment = Sentiment 1 + Sentiment 2 + Sentiment 3 +``` + + +### Entity extraction and Sentiment assignment +Entities with type `PERSON` are extracted from the movie document using the NL +API. Since these entities are mentioned in their respective movie document, +they are associated with the document sentiment. + +``` + Document 1 => Sentiment 1 + + Person 1 + Person 2 + Person 3 + + Document 2 => Sentiment 2 + + Person 2 + Person 4 + Person 5 +``` + +Based on the above data we can calculate the sentiment associated with Person 2: + +``` + Person 2 => (Sentiment 1 + Sentiment 2) +``` + +## Movie Data Set +We have used the Cornell Movie Review data as our input. Please follow the instructions below to download and extract the data. + +### Download Instructions + +``` + $ curl -O http://www.cs.cornell.edu/people/pabo/movie-review-data/mix20_rand700_tokens.zip + $ unzip mix20_rand700_tokens.zip +``` + +## Command Line Usage +In order to use the movie analyzer, follow the instructions below. (Note that the `--sample` parameter below runs the script on +fewer documents, and can be omitted to run it on the entire corpus) + +### Install Dependencies + +Install [pip](https://pip.pypa.io/en/stable/installing) if not already installed. + +Then, install dependencies by running the following pip command: + +``` +$ pip install -r requirements.txt +``` +### How to Run + +``` +$ python main.py analyze --inp "tokens/*/*" \ + --sout sentiment.json \ + --eout entity.json \ + --sample 5 +``` + +You should see the log file `movie.log` created. + +## Output Data +The program produces sentiment and entity output in json format. For example: + +### Sentiment Output +``` + { + "doc_id": "cv310_tok-16557.txt", + "sentiment": 3.099, + "label": -1 + } +``` + +### Entity Output + +``` + { + "name": "Sean Patrick Flanery", + "wiki_url": "http://en.wikipedia.org/wiki/Sean_Patrick_Flanery", + "sentiment": 3.099 + } +``` + +### Entity Output Sorting +In order to sort and rank the entities generated, use the same `main.py` script. For example, +this will print the top 5 actors with negative sentiment: + +``` +$ python main.py rank --entity_input entity.json \ + --sentiment neg \ + --reverse True \ + --sample 5 +``` diff --git a/samples/snippets/movie_nl/main.py b/samples/snippets/movie_nl/main.py new file mode 100644 index 00000000..ba5c63b6 --- /dev/null +++ b/samples/snippets/movie_nl/main.py @@ -0,0 +1,383 @@ +# Copyright 2016 Google, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import codecs +import glob +import json +import logging +import os + +from googleapiclient import discovery +from googleapiclient.errors import HttpError +from oauth2client.client import GoogleCredentials +import requests + + +def analyze_document(service, document): + """Analyze the document and get the distribution of sentiments and + the movie name.""" + logging.info('Analyzing {}'.format(document.doc_id)) + + sentences, entities = document.extract_all_sentences(service) + + sentiments = [get_sentiment(service, sentence) for sentence in sentences] + + return sentiments, entities + + +def get_request_body(text, syntax=True, entities=True, sentiment=True): + """Creates the body of the request to the language api in + order to get an appropriate api response.""" + body = { + 'document': { + 'type': 'PLAIN_TEXT', + 'content': text, + }, + 'features': { + 'extract_syntax': syntax, + 'extract_entities': entities, + 'extract_document_sentiment': sentiment, + }, + 'encoding_type': 'UTF32' + } + + return body + + +def get_sentiment(service, sentence): + """Get the sentence-level sentiment.""" + body = get_request_body( + sentence, syntax=False, entities=True, sentiment=True) + + docs = service.documents() + request = docs.annotateText(body=body) + + response = request.execute(num_retries=3) + + sentiment = response.get('documentSentiment') + + if sentiment is None: + return (None, None) + else: + pol = sentiment.get('polarity') + mag = sentiment.get('magnitude') + + if pol is None and mag is not None: + pol = 0 + return (pol, mag) + + +class Document(object): + """Document class captures a single document of movie reviews.""" + + def __init__(self, text, doc_id, doc_path): + self.text = text + self.doc_id = doc_id + self.doc_path = doc_path + self.sentence_entity_pair = None + self.label = None + + def extract_all_sentences(self, service): + """Extract the sentences in a document.""" + + if self.sentence_entity_pair is not None: + return self.sentence_entity_pair + + docs = service.documents() + request_body = get_request_body( + self.text, + syntax=True, + entities=True, + sentiment=False) + request = docs.annotateText(body=request_body) + + ent_list = [] + + response = request.execute() + entities = response.get('entities', []) + sentences = response.get('sentences', []) + + sent_list = [ + sentence.get('text', {}).get('content') for sentence in sentences + ] + + for entity in entities: + ent_type = entity.get('type') + wiki_url = entity.get('metadata', {}).get('wikipedia_url') + + if ent_type == 'PERSON' and wiki_url is not None: + ent_list.append(wiki_url) + + self.sentence_entity_pair = (sent_list, ent_list) + + return self.sentence_entity_pair + + +def to_sentiment_json(doc_id, sent, label): + """Convert the sentiment info to json. + + Args: + doc_id: Document id + sent: Overall Sentiment for the document + label: Actual label +1, 0, -1 for the document + + Returns: + String json representation of the input + + """ + json_doc = {} + + json_doc['doc_id'] = doc_id + json_doc['sentiment'] = float('%.3f' % sent) + json_doc['label'] = label + + return json.dumps(json_doc) + + +def get_wiki_title(wiki_url): + """Get the wikipedia page title for a given wikipedia URL. + + Args: + wiki_url: Wikipedia URL e.g., http://en.wikipedia.org/wiki/Sean_Connery + + Returns: + Wikipedia canonical name e.g., Sean Connery + + """ + try: + content = requests.get(wiki_url).text + return content.split('title')[1].split('-')[0].split('>')[1].strip() + except: + return os.path.basename(wiki_url).replace('_', ' ') + + +def to_entity_json(entity, entity_sentiment, entity_frequency): + """Convert entities and their associated sentiment to json. + + Args: + entity: Wikipedia entity name + entity_sentiment: Sentiment associated with the entity + entity_frequency: Frequency of the entity in the corpus + + Returns: + Json string representation of input + + """ + json_doc = {} + + avg_sentiment = float(entity_sentiment) / float(entity_frequency) + + json_doc['wiki_url'] = entity + json_doc['name'] = get_wiki_title(entity) + json_doc['sentiment'] = float('%.3f' % entity_sentiment) + json_doc['avg_sentiment'] = float('%.3f' % avg_sentiment) + + return json.dumps(json_doc) + + +def get_sentiment_entities(service, document): + """Compute the overall sentiment volume in the document. + + Args: + service: Client to Google Natural Language API + document: Movie review document (See Document object) + + Returns: + Tuple of total sentiment and entities found in the document + + """ + + sentiments, entities = analyze_document(service, document) + + sentiments = [sent for sent in sentiments if sent[0] is not None] + negative_sentiments = [ + polarity for polarity, magnitude in sentiments if polarity < 0.0] + positive_sentiments = [ + polarity for polarity, magnitude in sentiments if polarity > 0.0] + + negative = sum(negative_sentiments) + positive = sum(positive_sentiments) + total = positive + negative + + return (total, entities) + + +def get_sentiment_label(sentiment): + """Return the sentiment label based on the sentiment quantity.""" + if sentiment < 0: + return -1 + elif sentiment > 0: + return 1 + else: + return 0 + + +def process_movie_reviews(service, reader, sentiment_writer, entity_writer): + """Perform some sentiment math and come up with movie review.""" + collected_entities = {} + + for document in reader: + try: + sentiment_total, entities = get_sentiment_entities( + service, document) + except HttpError as e: + logging.error('Error process_movie_reviews {}'.format(e.content)) + continue + + document.label = get_sentiment_label(sentiment_total) + + sentiment_writer.write( + to_sentiment_json( + document.doc_id, + sentiment_total, + document.label + ) + ) + + sentiment_writer.write('\n') + + for ent in entities: + ent_sent, frequency = collected_entities.get(ent, (0, 0)) + ent_sent += sentiment_total + frequency += 1 + + collected_entities[ent] = (ent_sent, frequency) + + for entity, sentiment_frequency in collected_entities.items(): + entity_writer.write(to_entity_json(entity, sentiment_frequency[0], + sentiment_frequency[1])) + entity_writer.write('\n') + + sentiment_writer.flush() + entity_writer.flush() + + +def document_generator(dir_path_pattern, count=None): + """Generator for the input movie documents. + + Args: + dir_path_pattern: Input dir pattern e.g., "foo/bar/*/*" + count: Number of documents to read else everything if None + + Returns: + Generator which contains Document (See above) + + """ + for running_count, item in enumerate(glob.iglob(dir_path_pattern)): + if count and running_count >= count: + raise StopIteration() + + doc_id = os.path.basename(item) + + with codecs.open(item, encoding='utf-8') as f: + try: + text = f.read() + except UnicodeDecodeError: + continue + + yield Document(text, doc_id, item) + + +def rank_entities(reader, sentiment=None, topn=None, reverse_bool=False): + """Rank the entities (actors) based on their sentiment + assigned from the movie.""" + + items = [] + for item in reader: + json_item = json.loads(item) + sent = json_item.get('sentiment') + entity_item = (sent, json_item) + + if sentiment: + if sentiment == 'pos' and sent > 0: + items.append(entity_item) + elif sentiment == 'neg' and sent < 0: + items.append(entity_item) + else: + items.append(entity_item) + + items.sort(reverse=reverse_bool) + items = [json.dumps(item[1]) for item in items] + + print('\n'.join(items[:topn])) + + +def get_service(): + """Build a client to the Google Cloud Natural Language API.""" + + credentials = GoogleCredentials.get_application_default() + + return discovery.build('language', 'v1beta1', + credentials=credentials) + + +def analyze(input_dir, sentiment_writer, entity_writer, sample, log_file): + """Analyze the document for sentiment and entities""" + + # Create logger settings + logging.basicConfig(filename=log_file, level=logging.DEBUG) + + # Create a Google Service object + service = get_service() + + reader = document_generator(input_dir, sample) + + # Process the movie documents + process_movie_reviews(service, reader, sentiment_writer, entity_writer) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + + subparsers = parser.add_subparsers(dest='command') + + rank_parser = subparsers.add_parser('rank') + + rank_parser.add_argument( + '--entity_input', help='location of entity input') + rank_parser.add_argument( + '--sentiment', help='filter sentiment as "neg" or "pos"') + rank_parser.add_argument( + '--reverse', help='reverse the order of the items', type=bool, + default=False + ) + rank_parser.add_argument( + '--sample', help='number of top items to process', type=int, + default=None + ) + + analyze_parser = subparsers.add_parser('analyze') + + analyze_parser.add_argument( + '--inp', help='location of the input', required=True) + analyze_parser.add_argument( + '--sout', help='location of the sentiment output', required=True) + analyze_parser.add_argument( + '--eout', help='location of the entity output', required=True) + analyze_parser.add_argument( + '--sample', help='number of top items to process', type=int) + analyze_parser.add_argument('--log_file', default='movie.log') + + args = parser.parse_args() + + if args.command == 'analyze': + with open(args.sout, 'w') as sout, open(args.eout, 'w') as eout: + analyze(args.inp, sout, eout, args.sample, args.log_file) + elif args.command == 'rank': + with open(args.entity_input, 'r') as entity_input: + rank_entities( + entity_input, args.sentiment, args.sample, args.reverse) diff --git a/samples/snippets/movie_nl/main_test.py b/samples/snippets/movie_nl/main_test.py new file mode 100644 index 00000000..fc69e9bc --- /dev/null +++ b/samples/snippets/movie_nl/main_test.py @@ -0,0 +1,128 @@ +# Copyright 2016 Google, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json + +import main +import six + + +def test_get_request_body(): + text = 'hello world' + body = main.get_request_body(text, syntax=True, entities=True, + sentiment=False) + assert body.get('document').get('content') == text + + assert body.get('features').get('extract_syntax') is True + assert body.get('features').get('extract_entities') is True + assert body.get('features').get('extract_document_sentiment') is False + + +def test_get_sentiment_label(): + assert main.get_sentiment_label(20.50) == 1 + assert main.get_sentiment_label(-42.34) == -1 + + +def test_to_sentiment_json(): + doc_id = '12345' + sentiment = 23.344564 + label = 1 + + sentiment_json = json.loads( + main.to_sentiment_json(doc_id, sentiment, label) + ) + + assert sentiment_json.get('doc_id') == doc_id + assert sentiment_json.get('sentiment') == 23.345 + assert sentiment_json.get('label') == label + + +def test_process_movie_reviews(): + service = main.get_service() + + doc1 = main.Document('Top Gun was awesome and Tom Cruise rocked!', 'doc1', + 'doc1') + doc2 = main.Document('Tom Cruise is a great actor.', 'doc2', 'doc2') + + reader = [doc1, doc2] + swriter = six.StringIO() + ewriter = six.StringIO() + + main.process_movie_reviews(service, reader, swriter, ewriter) + + sentiments = swriter.getvalue().strip().split('\n') + entities = ewriter.getvalue().strip().split('\n') + + sentiments = [json.loads(sentiment) for sentiment in sentiments] + entities = [json.loads(entity) for entity in entities] + + # assert sentiments + assert sentiments[0].get('sentiment') == 1.0 + assert sentiments[0].get('label') == 1 + + assert sentiments[1].get('sentiment') == 1.0 + assert sentiments[1].get('label') == 1 + + # assert entities + assert len(entities) == 1 + assert entities[0].get('name') == 'Tom Cruise' + assert (entities[0].get('wiki_url') == + 'http://en.wikipedia.org/wiki/Tom_Cruise') + assert entities[0].get('sentiment') == 2.0 + + +def test_rank_positive_entities(capsys): + reader = [ + ('{"avg_sentiment": -12.0, ' + '"name": "Patrick Macnee", "sentiment": -12.0}'), + ('{"avg_sentiment": 5.0, ' + '"name": "Paul Rudd", "sentiment": 5.0}'), + ('{"avg_sentiment": -5.0, ' + '"name": "Martha Plimpton", "sentiment": -5.0}'), + ('{"avg_sentiment": 7.0, ' + '"name": "Lucy (2014 film)", "sentiment": 7.0}') + ] + + main.rank_entities(reader, 'pos', topn=1, reverse_bool=False) + out, err = capsys.readouterr() + + expected = ('{"avg_sentiment": 5.0, ' + '"name": "Paul Rudd", "sentiment": 5.0}') + + expected = ''.join(sorted(expected)) + out = ''.join(sorted(out.strip())) + assert out == expected + + +def test_rank_negative_entities(capsys): + reader = [ + ('{"avg_sentiment": -12.0, ' + '"name": "Patrick Macnee", "sentiment": -12.0}'), + ('{"avg_sentiment": 5.0, ' + '"name": "Paul Rudd", "sentiment": 5.0}'), + ('{"avg_sentiment": -5.0, ' + '"name": "Martha Plimpton", "sentiment": -5.0}'), + ('{"avg_sentiment": 7.0, ' + '"name": "Lucy (2014 film)", "sentiment": 7.0}') + ] + + main.rank_entities(reader, 'neg', topn=1, reverse_bool=True) + out, err = capsys.readouterr() + + expected = ('{"avg_sentiment": -5.0, ' + '"name": "Martha Plimpton", "sentiment": -5.0}') + + expected = ''.join(sorted(expected)) + out = ''.join(sorted(out.strip())) + assert out == expected diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt new file mode 100644 index 00000000..c385fb4e --- /dev/null +++ b/samples/snippets/movie_nl/requirements.txt @@ -0,0 +1,2 @@ +google-api-python-client==1.5.1 +requests==2.10.0 diff --git a/samples/snippets/ocr_nl/README.md b/samples/snippets/ocr_nl/README.md new file mode 100644 index 00000000..189e9397 --- /dev/null +++ b/samples/snippets/ocr_nl/README.md @@ -0,0 +1,227 @@ + +# Using the Cloud Natural Language API to analyze image text found with Cloud Vision + +This example uses the [Cloud Vision API](https://cloud.google.com/vision/) to +detect text in images, then analyzes that text using the [Cloud NL (Natural +Language) API](https://cloud.google.com/natural-language/) to detect +[entities](https://cloud.google.com/natural-language/docs/basics#entity_analysis) +in the text. It stores the detected entity +information in an [sqlite3](https://www.sqlite.org) database, which may then be +queried. + +(This kind of analysis can be useful with scans of brochures and fliers, +invoices, and other types of company documents... or maybe just organizing your +memes). + +After the example script has analyzed a directory of images, it outputs some +information on the images' entities to STDOUT. You can also further query +the generated sqlite3 database. + +## Setup + +### Install sqlite3 as necessary + +The example requires that sqlite3 be installed. Most likely, sqlite3 is already +installed for you on your machine, but if not, you can find it +[here](https://www.sqlite.org/download.html). + +### Set Up to Authenticate With Your Project's Credentials + +* Please follow the [Set Up Your Project](https://cloud.google.com/natural-language/docs/getting-started#set_up_your_project) +steps in the Quickstart doc to create a project and enable the +Cloud Natural Language API. +* Following those steps, make sure that you [Set Up a Service + Account](https://cloud.google.com/natural-language/docs/common/auth#set_up_a_service_account), + and export the following environment variable: + + ``` + export GOOGLE_APPLICATION_CREDENTIALS=/path/to/your-project-credentials.json + ``` +* This sample also requires that you [enable the Cloud Vision + API](https://console.cloud.google.com/apis/api/vision.googleapis.com/overview?project=_) + +## Running the example + +Install [pip](https://pip.pypa.io/en/stable/installing) if not already installed. + +To run the example, install the necessary libraries using pip: + +```sh +$ pip install -r requirements.txt +``` + +You must also be set up to authenticate with the Cloud APIs using your +project's service account credentials, as described above. + +Then, run the script on a directory of images to do the analysis, E.g.: + +```sh +$ python main.py --input_directory= +``` + +You can try this on a sample directory of images: + +```sh +$ curl -O http://storage.googleapis.com/python-docs-samples-tests/language/ocr_nl-images.zip +$ unzip ocr_nl-images.zip +$ python main.py --input_directory=images/ +``` + +## A walkthrough of the example and its results + +Let's take a look at what the example generates when run on the `images/` +sample directory, and how it does it. + +The script looks at each image file in the given directory, and uses the Vision +API's text detection capabilities (OCR) to find any text in each image. It +passes that info to the NL API, and asks it to detect [entities](xxx) in the +discovered text, then stores this information in a queryable database. + +To keep things simple, we're just passing to the NL API all the text found in a +given image, in one string. Note that sometimes this string can include +misinterpreted characters (if the image text was not very clear), or list words +"out of order" from how a human would interpret them. So, the text that is +actually passed to the NL API might not be quite what you would have predicted +with your human eyeballs. + +The Entity information returned by the NL API includes *type*, *name*, *salience*, +information about where in the text the given entity was found, and detected +language. It may also include *metadata*, including a link to a Wikipedia URL +that the NL API believes this entity maps to. See the +[documentation](https://cloud.google.com/natural-language/docs/) and the [API +reference pages](https://cloud.google.com/natural-language/reference/rest/v1beta1/Entity) +for more information about `Entity` fields. + +For example, if the NL API was given the sentence: + +``` +"Holmes and Watson walked over to the cafe." +``` + +it would return a response something like the following: + +``` +{ + "entities": [{ + "salience": 0.51629782, + "mentions": [{ + "text": { + "content": "Holmes", + "beginOffset": 0 + }}], + "type": "PERSON", + "name": "Holmes", + "metadata": { + "wikipedia_url": "http://en.wikipedia.org/wiki/Sherlock_Holmes" + }}, + { + "salience": 0.22334209, + "mentions": [{ + "text": { + "content": "Watson", + "beginOffset": 11 + }}], + "type": "PERSON", + "name": "Watson", + "metadata": { + "wikipedia_url": "http://en.wikipedia.org/wiki/Dr._Watson" + }}], + "language": "en" +} +``` + +Note that the NL API determined from context that "Holmes" was referring to +'Sherlock Holmes', even though the name "Sherlock" was not included. + +Note also that not all nouns in a given sentence are detected as Entities. An +Entity represents a phrase in the text that is a known entity, such as a person, +an organization, or location. The generic mention of a 'cafe' is not treated as +an entity in this sense. + +For each image file, we store its detected entity information (if any) in an +sqlite3 database. + +### Querying for information about the detected entities + +Once the detected entity information from all the images is stored in the +sqlite3 database, we can run some queries to do some interesting analysis. The +script runs a couple of such example query sets and outputs the result to STDOUT. + +The first set of queries outputs information about the top 15 most frequent +entity names found in the images, and the second outputs information about the +top 15 most frequent Wikipedia URLs found. + +For example, with the sample image set, note that the name 'Sherlock Holmes' is +found three times, but entities associated with the URL +http://en.wikipedia.org/wiki/Sherlock_Holmes are found four times; one of the +entity names was only "Holmes", but the NL API detected from context that it +referred to Sherlock Holmes. Similarly, you can see that mentions of 'Hive' and +'Spark' mapped correctly – given their context – to the URLs of those Apache +products. + +``` +----entity: http://en.wikipedia.org/wiki/Apache_Hive was found with count 1 +Found in file images/IMG_20160621_133020.jpg, detected as type OTHER, with + locale en. +names(s): set([u'hive']) +salience measure(s): set([0.0023808887]) +``` + +Similarly, 'Elizabeth' (in screencaps of text from "Pride and Prejudice") is +correctly mapped to http://en.wikipedia.org/wiki/Elizabeth_Bennet because of the +context of the surrounding text. + +``` +----entity: http://en.wikipedia.org/wiki/Elizabeth_Bennet was found with count 2 +Found in file images/Screenshot 2016-06-19 11.51.50.png, detected as type PERSON, with + locale en. +Found in file images/Screenshot 2016-06-19 12.08.30.png, detected as type PERSON, with + locale en. +names(s): set([u'elizabeth']) +salience measure(s): set([0.34601286, 0.0016268975]) +``` + +## Further queries to the sqlite3 database + +When the script runs, it makes a couple of example queries to the database +containing the entity information returned from the NL API. You can make further +queries on that database by starting up sqlite3 from the command line, and +passing it the name of the database file generated by running the example. This +file will be in the same directory, and have `entities` as a prefix, with the +timestamp appended. (If you have run the example more than once, a new database +file will be created each time). + +Run sqlite3 as follows (using the name of your own database file): + +```sh +$ sqlite3 entities1466518508.db +``` + +You'll see something like this: + +``` +SQLite version 3.8.10.2 2015-05-20 18:17:19 +Enter ".help" for usage hints. +sqlite> +``` + +From this prompt, you can make any queries on the data that you want. E.g., +start with something like: + +``` +sqlite> select * from entities limit 20; +``` + +Or, try this to see in which images the most entities were detected: + +``` +sqlite> select filename, count(filename) from entities group by filename; +``` + +You can do more complex queries to get further information about the entities +that have been discovered in your images. E.g., you might want to investigate +which of the entities are most commonly found together in the same image. See +the [SQLite documentation](https://www.sqlite.org/docs.html) for more +information. + + diff --git a/samples/snippets/ocr_nl/main.py b/samples/snippets/ocr_nl/main.py new file mode 100755 index 00000000..6e329f53 --- /dev/null +++ b/samples/snippets/ocr_nl/main.py @@ -0,0 +1,362 @@ +#!/usr/bin/env python +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This example uses the Google Cloud Vision API to detect text in images, then +analyzes that text using the Google Cloud Natural Language API to detect +entities in the text. It stores the detected entity information in an sqlite3 +database, which may then be queried. + +After this script has analyzed a directory of images, it outputs some +information on the images' entities to STDOUT. You can also further query +the generated sqlite3 database; see the README for more information. + +Run the script on a directory of images to do the analysis, E.g.: + $ python main.py --input_directory= + +You can try this on a sample directory of images: + $ curl -O http://storage.googleapis.com/python-docs-samples-tests/language/ocr_nl-images.zip + $ unzip ocr_nl-images.zip + $ python main.py --input_directory=images/ + +""" # noqa + +import argparse +import base64 +import contextlib +import logging +import os +import sqlite3 +import sys +import time + +from googleapiclient import discovery +from googleapiclient import errors +import httplib2 +from oauth2client.client import GoogleCredentials + +BATCH_SIZE = 10 + + +class VisionApi(object): + """Construct and use the Cloud Vision API service.""" + + def __init__(self): + credentials = GoogleCredentials.get_application_default() + self.service = discovery.build('vision', 'v1', credentials=credentials) + + def detect_text(self, input_filenames, num_retries=3, max_results=6): + """Uses the Vision API to detect text in the given file.""" + batch_request = [] + for filename in input_filenames: + request = { + 'image': {}, + 'features': [{ + 'type': 'TEXT_DETECTION', + 'maxResults': max_results, + }] + } + + # Accept both files in cloud storage, as well as local files. + if filename.startswith('gs://'): + request['image']['source'] = { + 'gcsImageUri': filename + } + else: + with open(filename, 'rb') as image_file: + request['image']['content'] = base64.b64encode( + image_file.read()).decode('UTF-8') + + batch_request.append(request) + + request = self.service.images().annotate( + body={'requests': batch_request}) + + try: + responses = request.execute(num_retries=num_retries) + if 'responses' not in responses: + return {} + + text_response = {} + for filename, response in zip( + input_filenames, responses['responses']): + + if 'error' in response: + logging.error('API Error for {}: {}'.format( + filename, + response['error'].get('message', ''))) + continue + + text_response[filename] = response.get('textAnnotations', []) + + return text_response + + except errors.HttpError as e: + logging.error('Http Error for {}: {}'.format(filename, e)) + except KeyError as e2: + logging.error('Key error: {}'.format(e2)) + + +class TextAnalyzer(object): + """Construct and use the Google Natural Language API service.""" + + def __init__(self, db_filename=None): + credentials = GoogleCredentials.get_application_default() + scoped_credentials = credentials.create_scoped( + ['https://www.googleapis.com/auth/cloud-platform']) + http = httplib2.Http() + scoped_credentials.authorize(http) + self.service = discovery.build('language', 'v1beta1', http=http) + + # This list will store the entity information gleaned from the + # image files. + self.entity_info = [] + + # This is the filename of the sqlite3 database to save to + self.db_filename = db_filename or 'entities{}.db'.format( + int(time.time())) + + def _get_native_encoding_type(self): + """Returns the encoding type that matches Python's native strings.""" + if sys.maxunicode == 65535: + return 'UTF16' + else: + return 'UTF32' + + def nl_detect(self, text): + """Use the Natural Language API to analyze the given text string.""" + # We're only requesting 'entity' information from the Natural Language + # API at this time. + body = { + 'document': { + 'type': 'PLAIN_TEXT', + 'content': text, + }, + 'encodingType': self._get_native_encoding_type(), + } + entities = [] + try: + request = self.service.documents().analyzeEntities(body=body) + response = request.execute() + entities = response['entities'] + except errors.HttpError as e: + logging.error('Http Error: %s' % e) + except KeyError as e2: + logging.error('Key error: %s' % e2) + return entities + + def add_entities(self, filename, locale, document): + """Apply the Natural Language API to the document, and collect the + detected entities.""" + + # Apply the Natural Language API to the document. + entities = self.nl_detect(document) + self.extract_and_save_entity_info(entities, locale, filename) + + def extract_entity_info(self, entity): + """Extract information about an entity.""" + type = entity['type'] + name = entity['name'].lower() + metadata = entity['metadata'] + salience = entity['salience'] + wiki_url = metadata.get('wikipedia_url', None) + return (type, name, salience, wiki_url) + + def extract_and_save_entity_info(self, entities, locale, filename): + for entity in entities: + type, name, salience, wiki_url = self.extract_entity_info(entity) + # Because this is a small example, we're using a list to hold + # all the entity information, then we'll insert it into the + # database all at once when we've processed all the files. + # For a larger data set, you would want to write to the database + # in batches. + self.entity_info.append( + (locale, type, name, salience, wiki_url, filename)) + + def write_entity_info_to_db(self): + """Store the info gleaned about the entities in the text, via the + Natural Language API, in an sqlite3 database table, and then print out + some simple analytics. + """ + logging.info('Saving entity info to the sqlite3 database.') + # Create the db. + with contextlib.closing(sqlite3.connect(self.db_filename)) as conn: + with conn as cursor: + # Create table + cursor.execute( + 'CREATE TABLE if not exists entities (locale text, ' + 'type text, name text, salience real, wiki_url text, ' + 'filename text)') + with conn as cursor: + # Load all the data + cursor.executemany( + 'INSERT INTO entities VALUES (?,?,?,?,?,?)', + self.entity_info) + + def output_entity_data(self): + """Output some info about the entities by querying the generated + sqlite3 database. + """ + + with contextlib.closing(sqlite3.connect(self.db_filename)) as conn: + + # This query finds the number of times each entity name was + # detected, in descending order by count, and returns information + # about the first 15 names, including the files in which they were + # found, their detected 'salience' and language (locale), and the + # wikipedia urls (if any) associated with them. + print('\n==============\nTop 15 most frequent entity names:') + + cursor = conn.cursor() + results = cursor.execute( + 'select name, count(name) as wc from entities ' + 'group by name order by wc desc limit 15;') + + for item in results: + cursor2 = conn.cursor() + print(u'\n----Name: {} was found with count {}'.format(*item)) + results2 = cursor2.execute( + 'SELECT name, type, filename, locale, wiki_url, salience ' + 'FROM entities WHERE name=?', (item[0],)) + urls = set() + for elt in results2: + print(('Found in file {}, detected as type {}, with\n' + ' locale {} and salience {}.').format( + elt[2], elt[1], elt[3], elt[5])) + if elt[4]: + urls.add(elt[4]) + if urls: + print('url(s): {}'.format(urls)) + + # This query finds the number of times each wikipedia url was + # detected, in descending order by count, and returns information + # about the first 15 urls, including the files in which they were + # found and the names and 'salience' with which they were + # associated. + print('\n==============\nTop 15 most frequent Wikipedia URLs:') + c = conn.cursor() + results = c.execute( + 'select wiki_url, count(wiki_url) as wc from entities ' + 'group by wiki_url order by wc desc limit 15;') + + for item in results: + cursor2 = conn.cursor() + print('\n----entity: {} was found with count {}'.format(*item)) + results2 = cursor2.execute( + 'SELECT name, type, filename, locale, salience ' + 'FROM entities WHERE wiki_url=?', (item[0],)) + names = set() + salience = set() + for elt in results2: + print(('Found in file {}, detected as type {}, with\n' + ' locale {}.').format(elt[2], elt[1], elt[3])) + names.add(elt[0]) + salience.add(elt[4]) + print('names(s): {}'.format(names)) + print('salience measure(s): {}'.format(salience)) + + +def extract_description(texts): + """Returns text annotations as a single string""" + document = [] + + for text in texts: + try: + document.append(text['description']) + locale = text['locale'] + # Process only the first entry, which contains all + # text detected. + break + except KeyError as e: + logging.error('KeyError: %s\n%s' % (e, text)) + return (locale, ' '.join(document)) + + +def extract_descriptions(input_filename, texts, text_analyzer): + """Gets the text that was detected in the image.""" + if texts: + locale, document = extract_description(texts) + text_analyzer.add_entities(input_filename, locale, document) + sys.stdout.write('.') # Output a progress indicator. + sys.stdout.flush() + elif texts == []: + print('%s had no discernible text.' % input_filename) + + +def get_text_from_files(vision, input_filenames, text_analyzer): + """Call the Vision API on a file and index the results.""" + texts = vision.detect_text(input_filenames) + if texts: + for filename, text in texts.items(): + extract_descriptions(filename, text, text_analyzer) + + +def batch(list_to_batch, batch_size=BATCH_SIZE): + """Group a list into batches of size batch_size. + + >>> tuple(batch([1, 2, 3, 4, 5], batch_size=2)) + ((1, 2), (3, 4), (5)) + """ + for i in range(0, len(list_to_batch), batch_size): + yield tuple(list_to_batch[i:i + batch_size]) + + +def main(input_dir, db_filename=None): + """Walk through all the image files in the given directory, extracting any + text from them and feeding that text to the Natural Language API for + analysis. + """ + # Create a client object for the Vision API + vision_api_client = VisionApi() + # Create an object to analyze our text using the Natural Language API + text_analyzer = TextAnalyzer(db_filename) + + if input_dir: + allfileslist = [] + # Recursively construct a list of all the files in the given input + # directory. + for folder, subs, files in os.walk(input_dir): + for filename in files: + allfileslist.append(os.path.join(folder, filename)) + + # Analyze the text in the files using the Vision and Natural Language + # APIs. + for filenames in batch(allfileslist, batch_size=1): + get_text_from_files(vision_api_client, filenames, text_analyzer) + + # Save the result to a database, then run some queries on the database, + # with output to STDOUT. + text_analyzer.write_entity_info_to_db() + + # now, print some information about the entities detected. + text_analyzer.output_entity_data() + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description='Detects text in the images in the given directory.') + parser.add_argument( + '--input_directory', + help='The image directory you\'d like to detect text in. If left ' + 'unspecified, the --db specified will be queried without being ' + 'updated.') + parser.add_argument( + '--db', help='The filename to use for the sqlite3 database.') + args = parser.parse_args() + + if not (args.input_directory or args.db): + parser.error('Either --input_directory or --db must be specified.') + + main(args.input_directory, args.db) diff --git a/samples/snippets/ocr_nl/main_test.py b/samples/snippets/ocr_nl/main_test.py new file mode 100755 index 00000000..c07ed747 --- /dev/null +++ b/samples/snippets/ocr_nl/main_test.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for main.""" + +import re +import zipfile + +import main + + +_TEST_IMAGE_URI = 'gs://{}/language/image8.png' + + +def test_batch_empty(): + for batch_size in range(1, 10): + assert len( + list(main.batch([], batch_size=batch_size))) == 0 + + +def test_batch_single(): + for batch_size in range(1, 10): + batched = tuple(main.batch([1], batch_size=batch_size)) + assert batched == ((1,),) + + +def test_single_image_returns_text(cloud_config): + vision_api_client = main.VisionApi() + + image_path = _TEST_IMAGE_URI.format(cloud_config.storage_bucket) + texts = vision_api_client.detect_text([image_path]) + + assert image_path in texts + _, document = main.extract_description(texts[image_path]) + assert "daughter" in document + assert "Bennet" in document + assert "hat" in document + + +def test_single_nonimage_returns_error(): + vision_api_client = main.VisionApi() + texts = vision_api_client.detect_text(['README.md']) + assert "README.md" not in texts + + +def test_text_returns_entities(): + text = "Holmes and Watson walked to the cafe." + text_analyzer = main.TextAnalyzer() + entities = text_analyzer.nl_detect(text) + assert len(entities) == 2 + etype, ename, salience, wurl = text_analyzer.extract_entity_info( + entities[0]) + assert ename == 'holmes' + assert wurl == 'http://en.wikipedia.org/wiki/Sherlock_Holmes' + + +def test_entities_list(cloud_config): + vision_api_client = main.VisionApi() + image_path = _TEST_IMAGE_URI.format(cloud_config.storage_bucket) + texts = vision_api_client.detect_text([image_path]) + locale, document = main.extract_description(texts[image_path]) + text_analyzer = main.TextAnalyzer() + entities = text_analyzer.nl_detect(document) + assert len(entities) == 4 + etype, ename, salience, wurl = text_analyzer.extract_entity_info( + entities[0]) + assert ename == 'bennet' + assert wurl == 'http://en.wikipedia.org/wiki/Mr_Bennet' + + +def test_main(remote_resource, tmpdir, capsys): + images_path = str(tmpdir.mkdir('images')) + + # First, pull down some test data + zip_path = remote_resource('language/ocr_nl-images-small.zip', tmpdir) + + # Extract it to the image directory + with zipfile.ZipFile(zip_path) as zfile: + zfile.extractall(images_path) + + main.main(images_path, str(tmpdir.join('ocr_nl.db'))) + + stdout, _ = capsys.readouterr() + + assert re.search(r'google was found with count', stdout) diff --git a/samples/snippets/ocr_nl/requirements.txt b/samples/snippets/ocr_nl/requirements.txt new file mode 100644 index 00000000..0b96c82e --- /dev/null +++ b/samples/snippets/ocr_nl/requirements.txt @@ -0,0 +1 @@ +google-api-python-client==1.5.1 diff --git a/samples/snippets/syntax_triples/README.md b/samples/snippets/syntax_triples/README.md new file mode 100644 index 00000000..1342ee65 --- /dev/null +++ b/samples/snippets/syntax_triples/README.md @@ -0,0 +1,91 @@ +# Using the Cloud Natural Language API to find subject-verb-object triples in text + +This example finds subject-verb-object triples in a given piece of text using +syntax analysis capabilities of +[Cloud Natural Language API](https://cloud.google.com/natural-language/). +To do this, it calls the extractSyntax feature of the API +and uses the dependency parse tree and part-of-speech tags in the resposne +to build the subject-verb-object triples. The results are printed to STDOUT. +This type of analysis can be considered as the +first step towards an information extraction task. + +## Set Up to Authenticate With Your Project's Credentials + +Please follow the [Set Up Your Project](https://cloud.google.com/natural-language/docs/getting-started#set_up_your_project) +steps in the Quickstart doc to create a project and enable the +Cloud Natural Language API. Following those steps, make sure that you +[Set Up a Service Account](https://cloud.google.com/natural-language/docs/common/auth#set_up_a_service_account), +and export the following environment variable: + +``` +export GOOGLE_APPLICATION_CREDENTIALS=/path/to/your-project-credentials.json +``` + +## Running the example + +Install [pip](https://pip.pypa.io/en/stable/installing) if not already installed. + +To run the example, install the necessary libraries using pip: + +``` +$ pip install -r requirements.txt +``` +You must also be set up to authenticate with the Cloud APIs using your +project's service account credentials, as described above. + +Then, run the script on a file containing the text that you wish to analyze. +The text must be encoded in UTF8 or ASCII: + +``` +$ python main.py +``` + +Try this on a sample text in the resources directory: + +``` +$ python main.py resources/obama_wikipedia.txt +``` + +## A walkthrough of the example and its results + +Let's take a look at what the example generates when run on the +`obama_wikipedia.txt` sample file, and how it does it. + +The goal is to find all subject-verb-object +triples in the text. The example first sends the text to the Cloud Natural +Language API to perform extractSyntax analysis. Then, using part-of-speech tags, + it finds all the verbs in the text. For each verb, it uses the dependency +parse tree information to find all the dependent tokens. + +For example, given the following sentence in the `obama_wikipedia.txt` file: + +``` +"He began his presidential campaign in 2007" +``` +The example finds the verb `began`, and `He`, `campaign`, and `in` as its +dependencies. Then the script enumerates the dependencies for each verb and +finds all the subjects and objects. For the sentence above, the found subject +and object are `He` and `campaign`. + +The next step is to complete each subject and object token by adding their +dependencies to them. For example, in the sentence above, `his` and +`presidential` are dependent tokens for `campaign`. This is done using the +dependency parse tree, similar to verb dependencies as explained above. The +final result is (`He`, `began`, `his presidential campaign`) triple for +the example sentence above. + +The script performs this analysis for the entire text and prints the result. +For the `obama_wikipedia.txt` file, the result is the following: + +```sh ++------------------------------+------------+------------------------------+ +| Obama | received | national attention | ++------------------------------+------------+------------------------------+ +| He | began | his presidential campaign | ++------------------------------+------------+------------------------------+ +| he | won | sufficient delegates in the | +| | | Democratic Party primaries | ++------------------------------+------------+------------------------------+ +| He | defeated | Republican nominee John | +| | | McCain | +``` diff --git a/samples/snippets/syntax_triples/main.py b/samples/snippets/syntax_triples/main.py new file mode 100644 index 00000000..1be174bf --- /dev/null +++ b/samples/snippets/syntax_triples/main.py @@ -0,0 +1,180 @@ +#!/usr/bin/env python +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This example finds subject-verb-object triples in a given piece of text using +the syntax analysis capabilities of Cloud Natural Language API. The triples are +printed to STDOUT. This can be considered as the first step towards an +information extraction task. + +Run the script on a file containing the text that you wish to analyze. +The text must be encoded in UTF8 or ASCII: + $ python main.py + +Try this on a sample text in the resources directory: + $ python main.py resources/obama_wikipedia.txt +""" + +import argparse +import sys +import textwrap + +from googleapiclient import discovery +import httplib2 +from oauth2client.client import GoogleCredentials + + +def dependents(tokens, head_index): + """Returns an ordered list of the token indices of the dependents for + the given head.""" + # Create head->dependency index. + head_to_deps = {} + for i, token in enumerate(tokens): + head = token['dependencyEdge']['headTokenIndex'] + if i != head: + head_to_deps.setdefault(head, []).append(i) + return head_to_deps.get(head_index, ()) + + +def phrase_text_for_head(tokens, text, head_index): + """Returns the entire phrase containing the head token + and its dependents. + """ + begin, end = phrase_extent_for_head(tokens, head_index) + return text[begin:end] + + +def phrase_extent_for_head(tokens, head_index): + """Returns the begin and end offsets for the entire phrase + containing the head token and its dependents. + """ + begin = tokens[head_index]['text']['beginOffset'] + end = begin + len(tokens[head_index]['text']['content']) + for child in dependents(tokens, head_index): + child_begin, child_end = phrase_extent_for_head(tokens, child) + begin = min(begin, child_begin) + end = max(end, child_end) + return (begin, end) + + +def analyze_syntax(text): + """Use the NL API to analyze the given text string, and returns the + response from the API. Requests an encodingType that matches + the encoding used natively by Python. Raises an + errors.HTTPError if there is a connection problem. + """ + credentials = GoogleCredentials.get_application_default() + scoped_credentials = credentials.create_scoped( + ['https://www.googleapis.com/auth/cloud-platform']) + http = httplib2.Http() + scoped_credentials.authorize(http) + service = discovery.build( + 'language', 'v1beta1', http=http) + body = { + 'document': { + 'type': 'PLAIN_TEXT', + 'content': text, + }, + 'features': { + 'extract_syntax': True, + }, + 'encodingType': get_native_encoding_type(), + } + request = service.documents().annotateText(body=body) + return request.execute() + + +def get_native_encoding_type(): + """Returns the encoding type that matches Python's native strings.""" + if sys.maxunicode == 65535: + return 'UTF16' + else: + return 'UTF32' + + +def find_triples(tokens, + left_dependency_label='NSUBJ', + head_part_of_speech='VERB', + right_dependency_label='DOBJ'): + """Generator function that searches the given tokens + with the given part of speech tag, that have dependencies + with the given labels. For each such head found, yields a tuple + (left_dependent, head, right_dependent), where each element of the + tuple is an index into the tokens array. + """ + for head, token in enumerate(tokens): + if token['partOfSpeech']['tag'] == head_part_of_speech: + children = dependents(tokens, head) + left_deps = [] + right_deps = [] + for child in children: + child_token = tokens[child] + child_dep_label = child_token['dependencyEdge']['label'] + if child_dep_label == left_dependency_label: + left_deps.append(child) + elif child_dep_label == right_dependency_label: + right_deps.append(child) + for left_dep in left_deps: + for right_dep in right_deps: + yield (left_dep, head, right_dep) + + +def show_triple(tokens, text, triple): + """Prints the given triple (left, head, right). For left and right, + the entire phrase headed by each token is shown. For head, only + the head token itself is shown. + + """ + nsubj, verb, dobj = triple + + # Extract the text for each element of the triple. + nsubj_text = phrase_text_for_head(tokens, text, nsubj) + verb_text = tokens[verb]['text']['content'] + dobj_text = phrase_text_for_head(tokens, text, dobj) + + # Pretty-print the triple. + left = textwrap.wrap(nsubj_text, width=28) + mid = textwrap.wrap(verb_text, width=10) + right = textwrap.wrap(dobj_text, width=28) + print('+' + 30 * '-' + '+' + 12 * '-' + '+' + 30 * '-' + '+') + for l, m, r in zip(left, mid, right): + print('| {:<28s} | {:<10s} | {:<28s} |'.format( + l or '', m or '', r or '')) + + +def main(text_file): + # Extracts subject-verb-object triples from the given text file, + # and print each one. + + # Read the input file. + text = open(text_file, 'rb').read().decode('utf8') + + analysis = analyze_syntax(text) + tokens = analysis.get('tokens', []) + + for triple in find_triples(tokens): + show_triple(tokens, text, triple) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + parser.add_argument( + 'text_file', + help='A file containing the document to process. ' + 'Should be encoded in UTF8 or ASCII') + args = parser.parse_args() + main(args.text_file) diff --git a/samples/snippets/syntax_triples/main_test.py b/samples/snippets/syntax_triples/main_test.py new file mode 100755 index 00000000..62c2915d --- /dev/null +++ b/samples/snippets/syntax_triples/main_test.py @@ -0,0 +1,50 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re + +import main + + +def test_dependents(): + text = "I am eating a delicious banana" + analysis = main.analyze_syntax(text) + tokens = analysis.get('tokens', []) + assert [0, 1, 5] == main.dependents(tokens, 2) + assert [3, 4] == main.dependents(tokens, 5) + + +def test_phrase_text_for_head(): + text = "A small collection of words" + analysis = main.analyze_syntax(text) + tokens = analysis.get('tokens', []) + assert "words" == main.phrase_text_for_head(tokens, text, 4) + + +def test_find_triples(): + text = "President Obama won the noble prize" + analysis = main.analyze_syntax(text) + tokens = analysis.get('tokens', []) + triples = main.find_triples(tokens) + for triple in triples: + assert (1, 2, 5) == triple + + +def test_obama_example(resource, capsys): + main.main(resource('obama_wikipedia.txt')) + stdout, _ = capsys.readouterr() + lines = stdout.split('\n') + assert re.match( + r'.*Obama\b.*\| received\b.*\| national attention\b', + lines[1]) diff --git a/samples/snippets/syntax_triples/requirements.txt b/samples/snippets/syntax_triples/requirements.txt new file mode 100644 index 00000000..0b96c82e --- /dev/null +++ b/samples/snippets/syntax_triples/requirements.txt @@ -0,0 +1 @@ +google-api-python-client==1.5.1 diff --git a/samples/snippets/syntax_triples/resources/obama_wikipedia.txt b/samples/snippets/syntax_triples/resources/obama_wikipedia.txt new file mode 100644 index 00000000..1e89d4ab --- /dev/null +++ b/samples/snippets/syntax_triples/resources/obama_wikipedia.txt @@ -0,0 +1 @@ +In 2004, Obama received national attention during his campaign to represent Illinois in the United States Senate with his victory in the March Democratic Party primary, his keynote address at the Democratic National Convention in July, and his election to the Senate in November. He began his presidential campaign in 2007 and, after a close primary campaign against Hillary Clinton in 2008, he won sufficient delegates in the Democratic Party primaries to receive the presidential nomination. He then defeated Republican nominee John McCain in the general election, and was inaugurated as president on January 20, 2009. Nine months after his inauguration, Obama was named the 2009 Nobel Peace Prize laureate. From d6c80a16fd2f26e2157f165cbd2090a4fc7bf327 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Tue, 16 Aug 2016 13:32:42 -0700 Subject: [PATCH 029/209] Auto-update dependencies. [(#456)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/456) --- samples/snippets/movie_nl/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt index c385fb4e..d77ac3f2 100644 --- a/samples/snippets/movie_nl/requirements.txt +++ b/samples/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ google-api-python-client==1.5.1 -requests==2.10.0 +requests==2.11.0 From da40b4238b62799c8087d50c5dbed1f979c28bf8 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Wed, 17 Aug 2016 09:34:47 -0700 Subject: [PATCH 030/209] Auto-update dependencies. [(#459)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/459) --- samples/snippets/api/requirements.txt | 2 +- samples/snippets/movie_nl/requirements.txt | 2 +- samples/snippets/ocr_nl/requirements.txt | 2 +- samples/snippets/syntax_triples/requirements.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 0b96c82e..e5b1db3f 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.1 +google-api-python-client==1.5.2 diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt index d77ac3f2..5e4dc72a 100644 --- a/samples/snippets/movie_nl/requirements.txt +++ b/samples/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ -google-api-python-client==1.5.1 +google-api-python-client==1.5.2 requests==2.11.0 diff --git a/samples/snippets/ocr_nl/requirements.txt b/samples/snippets/ocr_nl/requirements.txt index 0b96c82e..e5b1db3f 100644 --- a/samples/snippets/ocr_nl/requirements.txt +++ b/samples/snippets/ocr_nl/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.1 +google-api-python-client==1.5.2 diff --git a/samples/snippets/syntax_triples/requirements.txt b/samples/snippets/syntax_triples/requirements.txt index 0b96c82e..e5b1db3f 100644 --- a/samples/snippets/syntax_triples/requirements.txt +++ b/samples/snippets/syntax_triples/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.1 +google-api-python-client==1.5.2 From 5f708d512ac39190d7b799f0645fc22887c37b6b Mon Sep 17 00:00:00 2001 From: DPE bot Date: Thu, 18 Aug 2016 10:18:42 -0700 Subject: [PATCH 031/209] Auto-update dependencies. [(#464)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/464) --- samples/snippets/movie_nl/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt index 5e4dc72a..6a0104a2 100644 --- a/samples/snippets/movie_nl/requirements.txt +++ b/samples/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ google-api-python-client==1.5.2 -requests==2.11.0 +requests==2.11.1 From ee7ef00aa7ea05c5180b8c396354084ed76728f8 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Fri, 19 Aug 2016 13:56:28 -0700 Subject: [PATCH 032/209] Fix import order lint errors Change-Id: Ieaf7237fc6f925daec46a07d2e81a452b841198a --- samples/snippets/movie_nl/main_test.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/samples/snippets/movie_nl/main_test.py b/samples/snippets/movie_nl/main_test.py index fc69e9bc..8e22a1da 100644 --- a/samples/snippets/movie_nl/main_test.py +++ b/samples/snippets/movie_nl/main_test.py @@ -14,9 +14,10 @@ import json -import main import six +import main + def test_get_request_body(): text = 'hello world' From 6d3493501ac7c6876644a1139cc97674d41fa15a Mon Sep 17 00:00:00 2001 From: DPE bot Date: Tue, 30 Aug 2016 10:08:32 -0700 Subject: [PATCH 033/209] Auto-update dependencies. [(#486)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/486) --- samples/snippets/api/requirements.txt | 2 +- samples/snippets/movie_nl/requirements.txt | 2 +- samples/snippets/ocr_nl/requirements.txt | 2 +- samples/snippets/syntax_triples/requirements.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index e5b1db3f..0b52bd22 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.2 +google-api-python-client==1.5.3 diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt index 6a0104a2..3aa1d6d9 100644 --- a/samples/snippets/movie_nl/requirements.txt +++ b/samples/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ -google-api-python-client==1.5.2 +google-api-python-client==1.5.3 requests==2.11.1 diff --git a/samples/snippets/ocr_nl/requirements.txt b/samples/snippets/ocr_nl/requirements.txt index e5b1db3f..0b52bd22 100644 --- a/samples/snippets/ocr_nl/requirements.txt +++ b/samples/snippets/ocr_nl/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.2 +google-api-python-client==1.5.3 diff --git a/samples/snippets/syntax_triples/requirements.txt b/samples/snippets/syntax_triples/requirements.txt index e5b1db3f..0b52bd22 100644 --- a/samples/snippets/syntax_triples/requirements.txt +++ b/samples/snippets/syntax_triples/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.2 +google-api-python-client==1.5.3 From 6dbd350e6753b3eda12b53589029a6ff061af6ec Mon Sep 17 00:00:00 2001 From: Jerjou Date: Mon, 19 Sep 2016 12:41:30 -0700 Subject: [PATCH 034/209] Add sentiment analysis sample [(#533)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/533) * Add sentiment analysis sample * Move sample review files into resources directory * Remove blank line from end of file * Update set up instructions to point to getting started guide * Update README to remove need to set up gcloud. Itemize what setting up a project entails. * Update NL README to link to Sentiment tutorial code * Coerce number types before comparison --- samples/snippets/README.md | 4 ++ samples/snippets/sentiment/README.md | 48 +++++++++++++++++ samples/snippets/sentiment/requirements.txt | 2 + .../snippets/sentiment/resources/mixed.txt | 20 +++++++ samples/snippets/sentiment/resources/neg.txt | 4 ++ .../snippets/sentiment/resources/neutral.txt | 3 ++ samples/snippets/sentiment/resources/pos.txt | 11 ++++ .../snippets/sentiment/sentiment_analysis.py | 54 +++++++++++++++++++ .../sentiment/sentiment_analysis_test.py | 46 ++++++++++++++++ 9 files changed, 192 insertions(+) create mode 100644 samples/snippets/sentiment/README.md create mode 100644 samples/snippets/sentiment/requirements.txt create mode 100644 samples/snippets/sentiment/resources/mixed.txt create mode 100644 samples/snippets/sentiment/resources/neg.txt create mode 100644 samples/snippets/sentiment/resources/neutral.txt create mode 100644 samples/snippets/sentiment/resources/pos.txt create mode 100644 samples/snippets/sentiment/sentiment_analysis.py create mode 100644 samples/snippets/sentiment/sentiment_analysis_test.py diff --git a/samples/snippets/README.md b/samples/snippets/README.md index e63d45eb..1e4a6401 100644 --- a/samples/snippets/README.md +++ b/samples/snippets/README.md @@ -13,5 +13,9 @@ to extract text from images, then uses the NL API to extract entity information from those texts, and stores the extracted information in a database in support of further analysis and correlation. +- [sentiment](sentiment) contains the [Sentiment Analysis + Tutorial](https://cloud.google.com/natural-language/docs/sentiment-tutorial) +code as used within the documentation. + - [syntax_triples](syntax_triples) uses syntax analysis to find subject-verb-object triples in a given piece of text. diff --git a/samples/snippets/sentiment/README.md b/samples/snippets/sentiment/README.md new file mode 100644 index 00000000..e77cdf16 --- /dev/null +++ b/samples/snippets/sentiment/README.md @@ -0,0 +1,48 @@ +# Introduction + +This sample contains the code referenced in the +[Sentiment Analysis Tutorial](http://cloud.google.com/natural-language/docs/sentiment-tutorial) +within the Google Cloud Natural Language API Documentation. A full walkthrough of this sample +is located within the documentation. + +This sample is a simple illustration of how to construct a sentiment analysis +request and process a response using the API. + +## Prerequisites + +Set up your +[Cloud Natural Language API project](https://cloud.google.com/natural-language/docs/getting-started#set_up_a_project) +, which includes: + +* Enabling the Natural Language API +* Setting up a service account +* Ensuring you've properly set up your `GOOGLE_APPLICATION_CREDENTIALS` for proper + authentication to the service. + +## Download the Code + +``` +$ git clone https://github.com/GoogleCloudPlatform/python-dev-samples/language/sentiment/ +$ cd python-docs-samples/language/sentiment +``` + +## Run the Code + +Open a sample folder, create a virtualenv, install dependencies, and run the sample: + +``` +$ virtualenv env +$ source env/bin/activate +(env)$ pip install -r requirements.txt +``` + +### Usage + +This sample provides four sample movie reviews which you can +provide to the sample on the command line. (You can also +pass your own text files.) + +``` +(env)$ python sentiment_analysis.py textfile.txt +Sentiment: polarity of -0.1 with magnitude of 6.7 +``` diff --git a/samples/snippets/sentiment/requirements.txt b/samples/snippets/sentiment/requirements.txt new file mode 100644 index 00000000..dc1d6a1d --- /dev/null +++ b/samples/snippets/sentiment/requirements.txt @@ -0,0 +1,2 @@ +google-api-python-client==1.5.3 + diff --git a/samples/snippets/sentiment/resources/mixed.txt b/samples/snippets/sentiment/resources/mixed.txt new file mode 100644 index 00000000..d4a42aa2 --- /dev/null +++ b/samples/snippets/sentiment/resources/mixed.txt @@ -0,0 +1,20 @@ +I really wanted to love 'Bladerunner' but ultimately I couldn't get +myself to appreciate it fully. However, you may like it if you're into +science fiction, especially if you're interested in the philosophical +exploration of what it means to be human or machine. Some of the gizmos +like the flying cars and the Vouight-Kampff machine (which seemed very +steampunk), were quite cool. + +I did find the plot pretty slow and but the dialogue and action sequences +were good. Unlike most science fiction films, this one was mostly quiet, and +not all that much happened, except during the last 15 minutes. I didn't +understand why a unicorn was in the movie. The visual effects were fantastic, +however, and the musical score and overall mood was quite interesting. +A futurist Los Angeles that was both highly polished and also falling apart +reminded me of 'Outland.' Certainly, the style of the film made up for +many of its pedantic plot holes. + +If you want your sci-fi to be lasers and spaceships, 'Bladerunner' may +disappoint you. But if you want it to make you think, this movie may +be worth the money. + diff --git a/samples/snippets/sentiment/resources/neg.txt b/samples/snippets/sentiment/resources/neg.txt new file mode 100644 index 00000000..5dcbec0f --- /dev/null +++ b/samples/snippets/sentiment/resources/neg.txt @@ -0,0 +1,4 @@ +What was Hollywood thinking with this movie! I hated, +hated, hated it. BORING! I went afterwards and demanded my money back. +They refused. + diff --git a/samples/snippets/sentiment/resources/neutral.txt b/samples/snippets/sentiment/resources/neutral.txt new file mode 100644 index 00000000..89839ef2 --- /dev/null +++ b/samples/snippets/sentiment/resources/neutral.txt @@ -0,0 +1,3 @@ +I neither liked nor disliked this movie. Parts were interesting, but +overall I was left wanting more. The acting was pretty good. + diff --git a/samples/snippets/sentiment/resources/pos.txt b/samples/snippets/sentiment/resources/pos.txt new file mode 100644 index 00000000..5f211496 --- /dev/null +++ b/samples/snippets/sentiment/resources/pos.txt @@ -0,0 +1,11 @@ +`Bladerunner` is often touted as one of the best science fiction films ever +made. Indeed, it satisfies many of the requisites for good sci-fi: a future +world with flying cars and humanoid robots attempting to rebel against their +creators. But more than anything, `Bladerunner` is a fantastic exploration +of the nature of what it means to be human. If we create robots which can +think, will they become human? And if they do, what makes us unique? Indeed, +how can we be sure we're not human in any case? `Bladerunner` explored +these issues before such movies as `The Matrix,' and did so intelligently. +The visual effects and score by Vangelis set the mood. See this movie +in a dark theatre to appreciate it fully. Highly recommended! + diff --git a/samples/snippets/sentiment/sentiment_analysis.py b/samples/snippets/sentiment/sentiment_analysis.py new file mode 100644 index 00000000..8e250881 --- /dev/null +++ b/samples/snippets/sentiment/sentiment_analysis.py @@ -0,0 +1,54 @@ +# Copyright 2016, Google, Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +'''Demonstrates how to make a simple call to the Natural Language API''' + +import argparse +from googleapiclient import discovery +from oauth2client.client import GoogleCredentials + + +def main(movie_review_filename): + '''Run a sentiment analysis request on text within a passed filename.''' + + credentials = GoogleCredentials.get_application_default() + service = discovery.build('language', 'v1beta1', credentials=credentials) + + with open(movie_review_filename, 'r') as review_file: + service_request = service.documents().analyzeSentiment( + body={ + 'document': { + 'type': 'PLAIN_TEXT', + 'content': review_file.read(), + } + } + ) + response = service_request.execute() + + polarity = response['documentSentiment']['polarity'] + magnitude = response['documentSentiment']['magnitude'] + + print('Sentiment: polarity of {} with magnitude of {}'.format( + polarity, magnitude)) + return 0 + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + parser.add_argument( + 'movie_review_filename', + help='The filename of the movie review you\'d like to analyze.') + args = parser.parse_args() + main(args.movie_review_filename) diff --git a/samples/snippets/sentiment/sentiment_analysis_test.py b/samples/snippets/sentiment/sentiment_analysis_test.py new file mode 100644 index 00000000..d6b6a7ab --- /dev/null +++ b/samples/snippets/sentiment/sentiment_analysis_test.py @@ -0,0 +1,46 @@ +# Copyright 2016, Google, Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +from sentiment_analysis import main + + +def test_pos(resource, capsys): + main(resource('pos.txt')) + out, err = capsys.readouterr() + polarity = float(re.search('polarity of (.+?) with', out).group(1)) + magnitude = float(re.search('magnitude of (.+?)', out).group(1)) + assert polarity * magnitude > 0 + + +def test_neg(resource, capsys): + main(resource('neg.txt')) + out, err = capsys.readouterr() + polarity = float(re.search('polarity of (.+?) with', out).group(1)) + magnitude = float(re.search('magnitude of (.+?)', out).group(1)) + assert polarity * magnitude < 0 + + +def test_mixed(resource, capsys): + main(resource('mixed.txt')) + out, err = capsys.readouterr() + polarity = float(re.search('polarity of (.+?) with', out).group(1)) + assert polarity <= 0.3 + assert polarity >= -0.3 + + +def test_neutral(resource, capsys): + main(resource('neutral.txt')) + out, err = capsys.readouterr() + magnitude = float(re.search('magnitude of (.+?)', out).group(1)) + assert magnitude <= 2.0 From 2df9bd25a18a7f0e4a50d03e860ae189716da1c4 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Tue, 20 Sep 2016 12:26:02 -0700 Subject: [PATCH 035/209] Auto-update dependencies. [(#537)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/537) --- samples/snippets/sentiment/requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/samples/snippets/sentiment/requirements.txt b/samples/snippets/sentiment/requirements.txt index dc1d6a1d..0b52bd22 100644 --- a/samples/snippets/sentiment/requirements.txt +++ b/samples/snippets/sentiment/requirements.txt @@ -1,2 +1 @@ google-api-python-client==1.5.3 - From 6c5f77735eb9d4c6e793e9dafbc362ecde5d85a5 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Tue, 27 Sep 2016 09:46:43 -0700 Subject: [PATCH 036/209] Fix langauge test Change-Id: I285d4258c39ec7f0fd92e890a83e6dbc58941525 --- samples/snippets/ocr_nl/main_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/ocr_nl/main_test.py b/samples/snippets/ocr_nl/main_test.py index c07ed747..d3d6d6a5 100755 --- a/samples/snippets/ocr_nl/main_test.py +++ b/samples/snippets/ocr_nl/main_test.py @@ -73,7 +73,7 @@ def test_entities_list(cloud_config): locale, document = main.extract_description(texts[image_path]) text_analyzer = main.TextAnalyzer() entities = text_analyzer.nl_detect(document) - assert len(entities) == 4 + assert entities etype, ename, salience, wurl = text_analyzer.extract_entity_info( entities[0]) assert ename == 'bennet' From edce7a621614496e730739d732dbed1cb28d4ecb Mon Sep 17 00:00:00 2001 From: Jason Dobry Date: Wed, 5 Oct 2016 09:56:04 -0700 Subject: [PATCH 037/209] Add new "quickstart" samples [(#547)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/547) --- samples/snippets/cloud-client/quickstart.py | 39 +++++++++++++++++++ .../snippets/cloud-client/requirements.txt | 1 + 2 files changed, 40 insertions(+) create mode 100644 samples/snippets/cloud-client/quickstart.py create mode 100644 samples/snippets/cloud-client/requirements.txt diff --git a/samples/snippets/cloud-client/quickstart.py b/samples/snippets/cloud-client/quickstart.py new file mode 100644 index 00000000..24f2ff4d --- /dev/null +++ b/samples/snippets/cloud-client/quickstart.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python + +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def run_quickstart(): + # [START language_quickstart] + # Imports the Google Cloud client library + from google.cloud import language + + # Instantiates a client + language_client = language.Client() + + # The text to analyze + text = 'Hello, world!' + document = language_client.document_from_text(text) + + # Detects the sentiment of the text + sentiment = document.analyze_sentiment() + + print('Text: {}'.format(text)) + print('Sentiment: {}, {}'.format(sentiment.polarity, sentiment.magnitude)) + # [END language_quickstart] + + +if __name__ == '__main__': + run_quickstart() diff --git a/samples/snippets/cloud-client/requirements.txt b/samples/snippets/cloud-client/requirements.txt new file mode 100644 index 00000000..cc966c0e --- /dev/null +++ b/samples/snippets/cloud-client/requirements.txt @@ -0,0 +1 @@ +google-cloud-language==0.20.0 From 1f4629d64f764f9f69846fc1bd7436bb4e42d79a Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Wed, 12 Oct 2016 10:48:57 -0700 Subject: [PATCH 038/209] Quickstart tests [(#569)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/569) * Add tests for quickstarts * Update secrets --- .../snippets/cloud-client/quickstart_test.py | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 samples/snippets/cloud-client/quickstart_test.py diff --git a/samples/snippets/cloud-client/quickstart_test.py b/samples/snippets/cloud-client/quickstart_test.py new file mode 100644 index 00000000..bd9954c8 --- /dev/null +++ b/samples/snippets/cloud-client/quickstart_test.py @@ -0,0 +1,22 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import quickstart + + +def test_quickstart(capsys): + quickstart.run_quickstart() + out, _ = capsys.readouterr() + assert 'Sentiment' in out From 48170c2cc2e5f3368c505e2f862e23ac951ea599 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Tue, 18 Oct 2016 13:41:00 -0700 Subject: [PATCH 039/209] Auto-update dependencies. [(#584)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/584) --- samples/snippets/api/requirements.txt | 2 +- samples/snippets/movie_nl/requirements.txt | 2 +- samples/snippets/ocr_nl/requirements.txt | 2 +- samples/snippets/sentiment/requirements.txt | 2 +- samples/snippets/syntax_triples/requirements.txt | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 0b52bd22..c6e5aa14 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.3 +google-api-python-client==1.5.4 diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt index 3aa1d6d9..adafc439 100644 --- a/samples/snippets/movie_nl/requirements.txt +++ b/samples/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ -google-api-python-client==1.5.3 +google-api-python-client==1.5.4 requests==2.11.1 diff --git a/samples/snippets/ocr_nl/requirements.txt b/samples/snippets/ocr_nl/requirements.txt index 0b52bd22..c6e5aa14 100644 --- a/samples/snippets/ocr_nl/requirements.txt +++ b/samples/snippets/ocr_nl/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.3 +google-api-python-client==1.5.4 diff --git a/samples/snippets/sentiment/requirements.txt b/samples/snippets/sentiment/requirements.txt index 0b52bd22..c6e5aa14 100644 --- a/samples/snippets/sentiment/requirements.txt +++ b/samples/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.3 +google-api-python-client==1.5.4 diff --git a/samples/snippets/syntax_triples/requirements.txt b/samples/snippets/syntax_triples/requirements.txt index 0b52bd22..c6e5aa14 100644 --- a/samples/snippets/syntax_triples/requirements.txt +++ b/samples/snippets/syntax_triples/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.3 +google-api-python-client==1.5.4 From e06f5e8d525eec73d9bc486ba53e0113cf366299 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Tue, 25 Oct 2016 10:54:45 -0700 Subject: [PATCH 040/209] Generate most non-appengine readmes Change-Id: I3779282126cdd05b047194d356932b9995484115 --- samples/snippets/api/README.md | 87 --------------------- samples/snippets/api/README.rst.in | 20 +++++ samples/snippets/cloud-client/README.rst.in | 21 +++++ 3 files changed, 41 insertions(+), 87 deletions(-) delete mode 100644 samples/snippets/api/README.md create mode 100644 samples/snippets/api/README.rst.in create mode 100644 samples/snippets/cloud-client/README.rst.in diff --git a/samples/snippets/api/README.md b/samples/snippets/api/README.md deleted file mode 100644 index 9625df30..00000000 --- a/samples/snippets/api/README.md +++ /dev/null @@ -1,87 +0,0 @@ - -# Google Cloud Natural Language API Sample - -This Python sample demonstrates the use of the [Google Cloud Natural Language API][NL-Docs] -for sentiment, entity, and syntax analysis. - -[NL-Docs]: https://cloud.google.com/natural-language/docs/ - -## Setup - -Please follow the [Set Up Your Project](https://cloud.google.com/natural-language/docs/getting-started#set_up_your_project) -steps in the Quickstart doc to create a project and enable the -Cloud Natural Language API. Following those steps, make sure that you -[Set Up a Service Account](https://cloud.google.com/natural-language/docs/common/auth#set_up_a_service_account), -and export the following environment variable: - -``` -export GOOGLE_APPLICATION_CREDENTIALS=/path/to/your-project-credentials.json -``` - -## Run the sample - -Install [pip](https://pip.pypa.io/en/stable/installing) if not already installed. - -To run the example, install the necessary libraries using pip: - -```sh -$ pip install -r requirements.txt -``` - -Then, run the script: - -```sh -$ python analyze.py -``` - -where `` is one of: `entities`, `sentiment`, or `syntax`. - -The script will write to STDOUT the json returned from the API for the requested feature. - -For example, if you run: - -```sh -$ python analyze.py entities "Tom Sawyer is a book written by a guy known as Mark Twain." -``` - -You will see something like the following returned: - -``` -{ - "entities": [ - { - "salience": 0.49785897, - "mentions": [ - { - "text": { - "content": "Tom Sawyer", - "beginOffset": 0 - } - } - ], - "type": "PERSON", - "name": "Tom Sawyer", - "metadata": { - "wikipedia_url": "http://en.wikipedia.org/wiki/The_Adventures_of_Tom_Sawyer" - } - }, - { - "salience": 0.12209519, - "mentions": [ - { - "text": { - "content": "Mark Twain", - "beginOffset": 47 - } - } - ], - "type": "PERSON", - "name": "Mark Twain", - "metadata": { - "wikipedia_url": "http://en.wikipedia.org/wiki/Mark_Twain" - } - } - ], - "language": "en" -} -``` diff --git a/samples/snippets/api/README.rst.in b/samples/snippets/api/README.rst.in new file mode 100644 index 00000000..31294fae --- /dev/null +++ b/samples/snippets/api/README.rst.in @@ -0,0 +1,20 @@ +# This file is used to generate README.rst + +product: + name: Google Cloud Natural Language API + short_name: Cloud Natural Language API + url: https://cloud.google.com/natural-language/docs/ + description: > + The `Google Cloud Natural Language API`_ provides natural language + understanding technologies to developers, including sentiment analysis, + entity recognition, and syntax analysis. This API is part of the larger + Cloud Machine Learning API. + +setup: +- auth +- install_deps + +samples: +- name: Analyze syntax + file: analyze.py + show_help: true diff --git a/samples/snippets/cloud-client/README.rst.in b/samples/snippets/cloud-client/README.rst.in new file mode 100644 index 00000000..78da2911 --- /dev/null +++ b/samples/snippets/cloud-client/README.rst.in @@ -0,0 +1,21 @@ +# This file is used to generate README.rst + +product: + name: Google Cloud Natural Language API + short_name: Cloud Natural Language API + url: https://cloud.google.com/natural-language/docs/ + description: > + The `Google Cloud Natural Language API`_ provides natural language + understanding technologies to developers, including sentiment analysis, + entity recognition, and syntax analysis. This API is part of the larger + Cloud Machine Learning API. + +setup: +- auth +- install_deps + +samples: +- name: Quickstart + file: quickstart.py + +cloud_client_library: true From 07a19be1e1681b0816c71b2ecbc8adbeaf81141b Mon Sep 17 00:00:00 2001 From: DPE bot Date: Tue, 1 Nov 2016 23:10:14 -0700 Subject: [PATCH 041/209] Auto-update dependencies. [(#629)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/629) --- samples/snippets/api/requirements.txt | 2 +- samples/snippets/movie_nl/requirements.txt | 2 +- samples/snippets/ocr_nl/requirements.txt | 2 +- samples/snippets/sentiment/requirements.txt | 2 +- samples/snippets/syntax_triples/requirements.txt | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index c6e5aa14..2cd2a133 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.4 +google-api-python-client==1.5.5 diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt index adafc439..7a0de854 100644 --- a/samples/snippets/movie_nl/requirements.txt +++ b/samples/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ -google-api-python-client==1.5.4 +google-api-python-client==1.5.5 requests==2.11.1 diff --git a/samples/snippets/ocr_nl/requirements.txt b/samples/snippets/ocr_nl/requirements.txt index c6e5aa14..2cd2a133 100644 --- a/samples/snippets/ocr_nl/requirements.txt +++ b/samples/snippets/ocr_nl/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.4 +google-api-python-client==1.5.5 diff --git a/samples/snippets/sentiment/requirements.txt b/samples/snippets/sentiment/requirements.txt index c6e5aa14..2cd2a133 100644 --- a/samples/snippets/sentiment/requirements.txt +++ b/samples/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.4 +google-api-python-client==1.5.5 diff --git a/samples/snippets/syntax_triples/requirements.txt b/samples/snippets/syntax_triples/requirements.txt index c6e5aa14..2cd2a133 100644 --- a/samples/snippets/syntax_triples/requirements.txt +++ b/samples/snippets/syntax_triples/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.4 +google-api-python-client==1.5.5 From e9aaed1c2255a25acd647ddbe541b38c54eba6af Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Wed, 19 Oct 2016 12:37:30 -0700 Subject: [PATCH 042/209] added language v1 endpoint --- samples/snippets/api/analyze.py | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/samples/snippets/api/analyze.py b/samples/snippets/api/analyze.py index 73e892c3..c46efff2 100644 --- a/samples/snippets/api/analyze.py +++ b/samples/snippets/api/analyze.py @@ -24,15 +24,33 @@ import httplib2 from oauth2client.client import GoogleCredentials - +# TODO REMOVE - when discovery is public +GOOGLE_API_KEY = "GOOGLE_API_KEY" + +# TODO REMOVE - when discovery is public +DISCOVERY_URL = ('https://language.googleapis.com/$discovery/rest?' + 'version=v1&labels=GOOGLE_INTERNAL&key={}') + +# TODO UNCOMMENT - when discovery is public +# def get_service(): +# credentials = GoogleCredentials.get_application_default() +# scoped_credentials = credentials.create_scoped( +# ['https://www.googleapis.com/auth/cloud-platform']) +# http = httplib2.Http() +# scoped_credentials.authorize(http) +# return discovery.build('language', 'v1', http=http) +# TODO END + +# TODO REMOVE - when discovery is public def get_service(): + """Get language service using discovery.""" + import os + api_key = os.environ[GOOGLE_API_KEY] credentials = GoogleCredentials.get_application_default() - scoped_credentials = credentials.create_scoped( - ['https://www.googleapis.com/auth/cloud-platform']) - http = httplib2.Http() - scoped_credentials.authorize(http) - return discovery.build('language', 'v1beta1', http=http) - + service = discovery.build('language', 'v1', http=httplib2.Http(), credentials=credentials, + discoveryServiceUrl=DISCOVERY_URL.format(api_key)) + return service +# TODO END def get_native_encoding_type(): """Returns the encoding type that matches Python's native strings.""" From 81ab2042e84e95858236dbbf14602513464e315f Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Wed, 19 Oct 2016 12:39:46 -0700 Subject: [PATCH 043/209] added analyze syntax --- samples/snippets/api/analyze.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/samples/snippets/api/analyze.py b/samples/snippets/api/analyze.py index c46efff2..b0e0e0c4 100644 --- a/samples/snippets/api/analyze.py +++ b/samples/snippets/api/analyze.py @@ -99,15 +99,11 @@ def analyze_syntax(text, encoding='UTF32'): 'type': 'PLAIN_TEXT', 'content': text, }, - 'features': { - 'extract_syntax': True, - }, - 'encodingType': encoding, } service = get_service() - request = service.documents().annotateText(body=body) + request = service.documents().analyzeSyntax(body=body) response = request.execute() return response From e614dda50e0e359dcaaec2e5fad48876a4ae9abd Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Wed, 19 Oct 2016 14:04:42 -0700 Subject: [PATCH 044/209] fixed the get_service() method and added discoveryServiceUrl --- samples/snippets/api/analyze.py | 27 ++++++++------------------- 1 file changed, 8 insertions(+), 19 deletions(-) diff --git a/samples/snippets/api/analyze.py b/samples/snippets/api/analyze.py index b0e0e0c4..2d497baa 100644 --- a/samples/snippets/api/analyze.py +++ b/samples/snippets/api/analyze.py @@ -29,28 +29,17 @@ # TODO REMOVE - when discovery is public DISCOVERY_URL = ('https://language.googleapis.com/$discovery/rest?' - 'version=v1&labels=GOOGLE_INTERNAL&key={}') - -# TODO UNCOMMENT - when discovery is public -# def get_service(): -# credentials = GoogleCredentials.get_application_default() -# scoped_credentials = credentials.create_scoped( -# ['https://www.googleapis.com/auth/cloud-platform']) -# http = httplib2.Http() -# scoped_credentials.authorize(http) -# return discovery.build('language', 'v1', http=http) -# TODO END + 'version=v1&labels=GOOGLE_INTERNAL') -# TODO REMOVE - when discovery is public def get_service(): - """Get language service using discovery.""" - import os - api_key = os.environ[GOOGLE_API_KEY] credentials = GoogleCredentials.get_application_default() - service = discovery.build('language', 'v1', http=httplib2.Http(), credentials=credentials, - discoveryServiceUrl=DISCOVERY_URL.format(api_key)) - return service -# TODO END + scoped_credentials = credentials.create_scoped( + ['https://www.googleapis.com/auth/cloud-platform']) + http = httplib2.Http() + scoped_credentials.authorize(http) + return discovery.build('language', 'v1', http=http, + discoveryServiceUrl=DISCOVERY_URL) + def get_native_encoding_type(): """Returns the encoding type that matches Python's native strings.""" From 4ae39929567643a2173833f2bdb2e1c9bb5ac630 Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Wed, 19 Oct 2016 14:06:26 -0700 Subject: [PATCH 045/209] removed the env variable --- samples/snippets/api/analyze.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/samples/snippets/api/analyze.py b/samples/snippets/api/analyze.py index 2d497baa..aca928ce 100644 --- a/samples/snippets/api/analyze.py +++ b/samples/snippets/api/analyze.py @@ -24,8 +24,6 @@ import httplib2 from oauth2client.client import GoogleCredentials -# TODO REMOVE - when discovery is public -GOOGLE_API_KEY = "GOOGLE_API_KEY" # TODO REMOVE - when discovery is public DISCOVERY_URL = ('https://language.googleapis.com/$discovery/rest?' @@ -37,7 +35,8 @@ def get_service(): ['https://www.googleapis.com/auth/cloud-platform']) http = httplib2.Http() scoped_credentials.authorize(http) - return discovery.build('language', 'v1', http=http, + return discovery.build('language', 'v1', + http=http, discoveryServiceUrl=DISCOVERY_URL) From 8dfa13259160b48f292d9e6c8fa14a7c5c153945 Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Wed, 19 Oct 2016 15:22:12 -0700 Subject: [PATCH 046/209] added README.md sample output --- samples/snippets/api/README.md | 173 +++++++++++++++++++++++++++++++++ 1 file changed, 173 insertions(+) create mode 100644 samples/snippets/api/README.md diff --git a/samples/snippets/api/README.md b/samples/snippets/api/README.md new file mode 100644 index 00000000..49a24cbc --- /dev/null +++ b/samples/snippets/api/README.md @@ -0,0 +1,173 @@ + +# Google Cloud Natural Language API Sample + +This Python sample demonstrates the use of the [Google Cloud Natural Language API][NL-Docs] +for sentiment, entity, and syntax analysis. + +[NL-Docs]: https://cloud.google.com/natural-language/docs/ + +## Setup + +Please follow the [Set Up Your Project](https://cloud.google.com/natural-language/docs/getting-started#set_up_your_project) +steps in the Quickstart doc to create a project and enable the +Cloud Natural Language API. Following those steps, make sure that you +[Set Up a Service Account](https://cloud.google.com/natural-language/docs/common/auth#set_up_a_service_account), +and export the following environment variable: + +``` +export GOOGLE_APPLICATION_CREDENTIALS=/path/to/your-project-credentials.json +``` + +## Run the sample + +Install [pip](https://pip.pypa.io/en/stable/installing) if not already installed. + +To run the example, install the necessary libraries using pip: + +```sh +$ pip install -r requirements.txt +``` + +Then, run the script: + +```sh +$ python analyze.py +``` + +where `` is one of: `entities`, `sentiment`, or `syntax`. + +The script will write to STDOUT the json returned from the API for the requested feature. + +* Example1: + +```sh +$ python analyze.py entities "Tom Sawyer is a book written by a guy known as Mark Twain." +``` + +You will see something like the following returned: + +``` +{ + "entities": [ + { + "salience": 0.50827783, + "mentions": [ + { + "text": { + "content": "Tom Sawyer", + "beginOffset": 0 + }, + "type": "PROPER" + } + ], + "type": "PERSON", + "name": "Tom Sawyer", + "metadata": { + "mid": "/m/01b6vv", + "wikipedia_url": "http://en.wikipedia.org/wiki/The_Adventures_of_Tom_Sawyer" + } + }, + { + "salience": 0.22226454, + "mentions": [ + { + "text": { + "content": "book", + "beginOffset": 16 + }, + "type": "COMMON" + } + ], + "type": "WORK_OF_ART", + "name": "book", + "metadata": {} + }, + { + "salience": 0.18305534, + "mentions": [ + { + "text": { + "content": "guy", + "beginOffset": 34 + }, + "type": "COMMON" + } + ], + "type": "PERSON", + "name": "guy", + "metadata": {} + }, + { + "salience": 0.086402282, + "mentions": [ + { + "text": { + "content": "Mark Twain", + "beginOffset": 47 + }, + "type": "PROPER" + } + ], + "type": "PERSON", + "name": "Mark Twain", + "metadata": { + "mid": "/m/014635", + "wikipedia_url": "http://en.wikipedia.org/wiki/Mark_Twain" + } + } + ], + "language": "en" +} +``` + +* Example2: + +```sh +$ python analyze.py entities "Apple has launched new iPhone." +``` + +You will see something like the following returned: + +``` +{ + "entities": [ + { + "salience": 0.72550339, + "mentions": [ + { + "text": { + "content": "Apple", + "beginOffset": 0 + }, + "type": "PROPER" + } + ], + "type": "ORGANIZATION", + "name": "Apple", + "metadata": { + "mid": "/m/0k8z", + "wikipedia_url": "http://en.wikipedia.org/wiki/Apple_Inc." + } + }, + { + "salience": 0.27449661, + "mentions": [ + { + "text": { + "content": "iPhone", + "beginOffset": 23 + }, + "type": "PROPER" + } + ], + "type": "CONSUMER_GOOD", + "name": "iPhone", + "metadata": { + "mid": "/m/027lnzs", + "wikipedia_url": "http://en.wikipedia.org/wiki/IPhone" + } + } + ], + "language": "en" +} +``` From 3635bfb5e31f89b765f41b13185c2f61517d2cb4 Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Wed, 19 Oct 2016 15:23:54 -0700 Subject: [PATCH 047/209] Added header --- samples/snippets/api/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/samples/snippets/api/README.md b/samples/snippets/api/README.md index 49a24cbc..33710e22 100644 --- a/samples/snippets/api/README.md +++ b/samples/snippets/api/README.md @@ -38,6 +38,8 @@ where `` is one of: `entities`, `sentiment`, or `syntax`. The script will write to STDOUT the json returned from the API for the requested feature. +## Example Runs + * Example1: ```sh From 507e49cb2b4ba79adaecbb193449987042f2e880 Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Thu, 20 Oct 2016 17:43:33 -0700 Subject: [PATCH 048/209] added two blank lines --- samples/snippets/api/analyze.py | 1 + 1 file changed, 1 insertion(+) diff --git a/samples/snippets/api/analyze.py b/samples/snippets/api/analyze.py index aca928ce..6861ff5c 100644 --- a/samples/snippets/api/analyze.py +++ b/samples/snippets/api/analyze.py @@ -29,6 +29,7 @@ DISCOVERY_URL = ('https://language.googleapis.com/$discovery/rest?' 'version=v1&labels=GOOGLE_INTERNAL') + def get_service(): credentials = GoogleCredentials.get_application_default() scoped_credentials = credentials.create_scoped( From 61534e651cb1f8d987a8b4432abb951bbc93aa43 Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Mon, 24 Oct 2016 14:40:57 -0700 Subject: [PATCH 049/209] changed to score as per new api --- samples/snippets/api/analyze.py | 1 + 1 file changed, 1 insertion(+) diff --git a/samples/snippets/api/analyze.py b/samples/snippets/api/analyze.py index 6861ff5c..7e961d83 100644 --- a/samples/snippets/api/analyze.py +++ b/samples/snippets/api/analyze.py @@ -88,6 +88,7 @@ def analyze_syntax(text, encoding='UTF32'): 'type': 'PLAIN_TEXT', 'content': text, }, + 'encoding_type': encoding } service = get_service() From e3a35610b5a6b7dcc2e4e2bfb7a4f8f022bd3a02 Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Mon, 24 Oct 2016 14:41:15 -0700 Subject: [PATCH 050/209] added encoding type --- samples/snippets/api/analyze_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/samples/snippets/api/analyze_test.py b/samples/snippets/api/analyze_test.py index 11b0d65d..8f024fda 100644 --- a/samples/snippets/api/analyze_test.py +++ b/samples/snippets/api/analyze_test.py @@ -33,7 +33,7 @@ def test_analyze_sentiment(capsys): 'your face is really ugly and i hate it.') sentiment = result['documentSentiment'] - assert sentiment['polarity'] < 0 + assert sentiment['score'] < 0 assert sentiment['magnitude'] < 1 result = analyze.analyze_sentiment( @@ -41,7 +41,7 @@ def test_analyze_sentiment(capsys): 'angle of repose leaves little room for improvement.') sentiment = result['documentSentiment'] - assert sentiment['polarity'] > 0 + assert sentiment['score'] > 0 assert sentiment['magnitude'] < 1 From edb0315e17bac9dc604cf5991dc234d00d923f0d Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Tue, 25 Oct 2016 14:07:36 -0700 Subject: [PATCH 051/209] added encoding type analyze sentiment --- samples/snippets/api/analyze.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/samples/snippets/api/analyze.py b/samples/snippets/api/analyze.py index 7e961d83..ee40e4d8 100644 --- a/samples/snippets/api/analyze.py +++ b/samples/snippets/api/analyze.py @@ -55,7 +55,7 @@ def analyze_entities(text, encoding='UTF32'): 'type': 'PLAIN_TEXT', 'content': text, }, - 'encodingType': encoding, + 'encoding_type': encoding, } service = get_service() @@ -66,12 +66,13 @@ def analyze_entities(text, encoding='UTF32'): return response -def analyze_sentiment(text): +def analyze_sentiment(text, encoding='UTF32'): body = { 'document': { 'type': 'PLAIN_TEXT', 'content': text, - } + }, + 'encoding_type': encoding } service = get_service() @@ -112,7 +113,7 @@ def analyze_syntax(text, encoding='UTF32'): if args.command == 'entities': result = analyze_entities(args.text, get_native_encoding_type()) elif args.command == 'sentiment': - result = analyze_sentiment(args.text) + result = analyze_sentiment(args.text, get_native_encoding_type()) elif args.command == 'syntax': result = analyze_syntax(args.text, get_native_encoding_type()) From f6526797561f28e13cddccc184852ec6954ba49f Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Thu, 10 Nov 2016 13:39:07 -0800 Subject: [PATCH 052/209] added a TODO --- samples/snippets/api/analyze.py | 1 + 1 file changed, 1 insertion(+) diff --git a/samples/snippets/api/analyze.py b/samples/snippets/api/analyze.py index ee40e4d8..1e18a487 100644 --- a/samples/snippets/api/analyze.py +++ b/samples/snippets/api/analyze.py @@ -36,6 +36,7 @@ def get_service(): ['https://www.googleapis.com/auth/cloud-platform']) http = httplib2.Http() scoped_credentials.authorize(http) + # TODO Change to credentials=credentials return discovery.build('language', 'v1', http=http, discoveryServiceUrl=DISCOVERY_URL) From 1abdfc2f403b752a6fa72a61ff308899964a80fe Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Sat, 12 Nov 2016 08:09:34 -0800 Subject: [PATCH 053/209] added auto-gen rst file --- samples/snippets/api/README.rst | 98 ++++++++++++++++++++++ samples/snippets/cloud-client/README.rst | 102 +++++++++++++++++++++++ 2 files changed, 200 insertions(+) create mode 100644 samples/snippets/api/README.rst create mode 100644 samples/snippets/cloud-client/README.rst diff --git a/samples/snippets/api/README.rst b/samples/snippets/api/README.rst new file mode 100644 index 00000000..369e2f4e --- /dev/null +++ b/samples/snippets/api/README.rst @@ -0,0 +1,98 @@ +.. This file is automatically generated. Do not edit this file directly. + +Google Cloud Natural Language API Python Samples +=============================================================================== + +This directory contains samples for Google Cloud Natural Language API. The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers, including sentiment analysis, entity recognition, and syntax analysis. This API is part of the larger Cloud Machine Learning API. + + + + +.. _Google Cloud Natural Language API: https://cloud.google.com/natural-language/docs/ + +Setup +------------------------------------------------------------------------------- + + +Authentication +++++++++++++++ + +Authentication is typically done through `Application Default Credentials`_, +which means you do not have to change the code to authenticate as long as +your environment has credentials. You have a few options for setting up +authentication: + +#. When running locally, use the `Google Cloud SDK`_ + + .. code-block:: bash + + gcloud beta auth application-default login + + +#. When running on App Engine or Compute Engine, credentials are already + set-up. However, you may need to configure your Compute Engine instance + with `additional scopes`_. + +#. You can create a `Service Account key file`_. This file can be used to + authenticate to Google Cloud Platform services from any environment. To use + the file, set the ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable to + the path to the key file, for example: + + .. code-block:: bash + + export GOOGLE_APPLICATION_CREDENTIALS=/path/to/service_account.json + +.. _Application Default Credentials: https://cloud.google.com/docs/authentication#getting_credentials_for_server-centric_flow +.. _additional scopes: https://cloud.google.com/compute/docs/authentication#using +.. _Service Account key file: https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount + +Install Dependencies +++++++++++++++++++++ + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + +Samples +------------------------------------------------------------------------------- + +Analyze syntax ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + + + +To run this sample: + +.. code-block:: bash + + $ python analyze.py + + usage: analyze.py [-h] {entities,sentiment,syntax} text + + Analyzes text using the Google Cloud Natural Language API. + + positional arguments: + {entities,sentiment,syntax} + text + + optional arguments: + -h, --help show this help message and exit + + + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ diff --git a/samples/snippets/cloud-client/README.rst b/samples/snippets/cloud-client/README.rst new file mode 100644 index 00000000..d8ba578d --- /dev/null +++ b/samples/snippets/cloud-client/README.rst @@ -0,0 +1,102 @@ +.. This file is automatically generated. Do not edit this file directly. + +Google Cloud Natural Language API Python Samples +=============================================================================== + +This directory contains samples for Google Cloud Natural Language API. The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers, including sentiment analysis, entity recognition, and syntax analysis. This API is part of the larger Cloud Machine Learning API. + + + + +.. _Google Cloud Natural Language API: https://cloud.google.com/natural-language/docs/ + +Setup +------------------------------------------------------------------------------- + + +Authentication +++++++++++++++ + +Authentication is typically done through `Application Default Credentials`_, +which means you do not have to change the code to authenticate as long as +your environment has credentials. You have a few options for setting up +authentication: + +#. When running locally, use the `Google Cloud SDK`_ + + .. code-block:: bash + + gcloud beta auth application-default login + + +#. When running on App Engine or Compute Engine, credentials are already + set-up. However, you may need to configure your Compute Engine instance + with `additional scopes`_. + +#. You can create a `Service Account key file`_. This file can be used to + authenticate to Google Cloud Platform services from any environment. To use + the file, set the ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable to + the path to the key file, for example: + + .. code-block:: bash + + export GOOGLE_APPLICATION_CREDENTIALS=/path/to/service_account.json + +.. _Application Default Credentials: https://cloud.google.com/docs/authentication#getting_credentials_for_server-centric_flow +.. _additional scopes: https://cloud.google.com/compute/docs/authentication#using +.. _Service Account key file: https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount + +Install Dependencies +++++++++++++++++++++ + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + +Samples +------------------------------------------------------------------------------- + +Quickstart ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + + + +To run this sample: + +.. code-block:: bash + + $ python quickstart.py + + + + +The client library +------------------------------------------------------------------------------- + +This sample uses the `Google Cloud Client Library for Python`_. +You can read the documentation for more details on API usage and use GitHub +to `browse the source`_ and `report issues`_. + +.. Google Cloud Client Library for Python: + https://googlecloudplatform.github.io/google-cloud-python/ +.. browse the source: + https://github.com/GoogleCloudPlatform/google-cloud-python +.. report issues: + https://github.com/GoogleCloudPlatform/google-cloud-python/issues + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ From 8f3d2d079de23859e2269857df52ac493f0c0a73 Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Sat, 12 Nov 2016 09:47:16 -0800 Subject: [PATCH 054/209] removed README.md --- samples/snippets/api/README.md | 175 --------------------------------- 1 file changed, 175 deletions(-) delete mode 100644 samples/snippets/api/README.md diff --git a/samples/snippets/api/README.md b/samples/snippets/api/README.md deleted file mode 100644 index 33710e22..00000000 --- a/samples/snippets/api/README.md +++ /dev/null @@ -1,175 +0,0 @@ - -# Google Cloud Natural Language API Sample - -This Python sample demonstrates the use of the [Google Cloud Natural Language API][NL-Docs] -for sentiment, entity, and syntax analysis. - -[NL-Docs]: https://cloud.google.com/natural-language/docs/ - -## Setup - -Please follow the [Set Up Your Project](https://cloud.google.com/natural-language/docs/getting-started#set_up_your_project) -steps in the Quickstart doc to create a project and enable the -Cloud Natural Language API. Following those steps, make sure that you -[Set Up a Service Account](https://cloud.google.com/natural-language/docs/common/auth#set_up_a_service_account), -and export the following environment variable: - -``` -export GOOGLE_APPLICATION_CREDENTIALS=/path/to/your-project-credentials.json -``` - -## Run the sample - -Install [pip](https://pip.pypa.io/en/stable/installing) if not already installed. - -To run the example, install the necessary libraries using pip: - -```sh -$ pip install -r requirements.txt -``` - -Then, run the script: - -```sh -$ python analyze.py -``` - -where `` is one of: `entities`, `sentiment`, or `syntax`. - -The script will write to STDOUT the json returned from the API for the requested feature. - -## Example Runs - -* Example1: - -```sh -$ python analyze.py entities "Tom Sawyer is a book written by a guy known as Mark Twain." -``` - -You will see something like the following returned: - -``` -{ - "entities": [ - { - "salience": 0.50827783, - "mentions": [ - { - "text": { - "content": "Tom Sawyer", - "beginOffset": 0 - }, - "type": "PROPER" - } - ], - "type": "PERSON", - "name": "Tom Sawyer", - "metadata": { - "mid": "/m/01b6vv", - "wikipedia_url": "http://en.wikipedia.org/wiki/The_Adventures_of_Tom_Sawyer" - } - }, - { - "salience": 0.22226454, - "mentions": [ - { - "text": { - "content": "book", - "beginOffset": 16 - }, - "type": "COMMON" - } - ], - "type": "WORK_OF_ART", - "name": "book", - "metadata": {} - }, - { - "salience": 0.18305534, - "mentions": [ - { - "text": { - "content": "guy", - "beginOffset": 34 - }, - "type": "COMMON" - } - ], - "type": "PERSON", - "name": "guy", - "metadata": {} - }, - { - "salience": 0.086402282, - "mentions": [ - { - "text": { - "content": "Mark Twain", - "beginOffset": 47 - }, - "type": "PROPER" - } - ], - "type": "PERSON", - "name": "Mark Twain", - "metadata": { - "mid": "/m/014635", - "wikipedia_url": "http://en.wikipedia.org/wiki/Mark_Twain" - } - } - ], - "language": "en" -} -``` - -* Example2: - -```sh -$ python analyze.py entities "Apple has launched new iPhone." -``` - -You will see something like the following returned: - -``` -{ - "entities": [ - { - "salience": 0.72550339, - "mentions": [ - { - "text": { - "content": "Apple", - "beginOffset": 0 - }, - "type": "PROPER" - } - ], - "type": "ORGANIZATION", - "name": "Apple", - "metadata": { - "mid": "/m/0k8z", - "wikipedia_url": "http://en.wikipedia.org/wiki/Apple_Inc." - } - }, - { - "salience": 0.27449661, - "mentions": [ - { - "text": { - "content": "iPhone", - "beginOffset": 23 - }, - "type": "PROPER" - } - ], - "type": "CONSUMER_GOOD", - "name": "iPhone", - "metadata": { - "mid": "/m/027lnzs", - "wikipedia_url": "http://en.wikipedia.org/wiki/IPhone" - } - } - ], - "language": "en" -} -``` From fbe97b9f25eeb3b728994208601f95d41e3a0fbb Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Mon, 14 Nov 2016 11:40:20 -0800 Subject: [PATCH 055/209] removed discovery service with public discovery --- samples/snippets/api/analyze.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/samples/snippets/api/analyze.py b/samples/snippets/api/analyze.py index 1e18a487..70739828 100644 --- a/samples/snippets/api/analyze.py +++ b/samples/snippets/api/analyze.py @@ -25,21 +25,15 @@ from oauth2client.client import GoogleCredentials -# TODO REMOVE - when discovery is public -DISCOVERY_URL = ('https://language.googleapis.com/$discovery/rest?' - 'version=v1&labels=GOOGLE_INTERNAL') - - def get_service(): credentials = GoogleCredentials.get_application_default() scoped_credentials = credentials.create_scoped( ['https://www.googleapis.com/auth/cloud-platform']) http = httplib2.Http() scoped_credentials.authorize(http) - # TODO Change to credentials=credentials return discovery.build('language', 'v1', http=http, - discoveryServiceUrl=DISCOVERY_URL) + credentials=credentials) def get_native_encoding_type(): From 16cf61f17dce8a0238991c1528456e86b221847f Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Mon, 14 Nov 2016 17:36:27 -0800 Subject: [PATCH 056/209] Movie sample changes --- samples/snippets/movie_nl/main.py | 70 +++++-------------- samples/snippets/movie_nl/main_test.py | 6 +- samples/snippets/ocr_nl/main.py | 6 +- .../snippets/sentiment/sentiment_analysis.py | 20 ++++-- .../sentiment/sentiment_analysis_test.py | 14 ++-- 5 files changed, 48 insertions(+), 68 deletions(-) diff --git a/samples/snippets/movie_nl/main.py b/samples/snippets/movie_nl/main.py index ba5c63b6..d6ef5d16 100644 --- a/samples/snippets/movie_nl/main.py +++ b/samples/snippets/movie_nl/main.py @@ -21,6 +21,7 @@ from googleapiclient import discovery from googleapiclient.errors import HttpError +import httplib2 from oauth2client.client import GoogleCredentials import requests @@ -30,10 +31,7 @@ def analyze_document(service, document): the movie name.""" logging.info('Analyzing {}'.format(document.doc_id)) - sentences, entities = document.extract_all_sentences(service) - - sentiments = [get_sentiment(service, sentence) for sentence in sentences] - + sentiments, entities = document.extract_sentiment_entities(service) return sentiments, entities @@ -56,29 +54,6 @@ def get_request_body(text, syntax=True, entities=True, sentiment=True): return body -def get_sentiment(service, sentence): - """Get the sentence-level sentiment.""" - body = get_request_body( - sentence, syntax=False, entities=True, sentiment=True) - - docs = service.documents() - request = docs.annotateText(body=body) - - response = request.execute(num_retries=3) - - sentiment = response.get('documentSentiment') - - if sentiment is None: - return (None, None) - else: - pol = sentiment.get('polarity') - mag = sentiment.get('magnitude') - - if pol is None and mag is not None: - pol = 0 - return (pol, mag) - - class Document(object): """Document class captures a single document of movie reviews.""" @@ -86,32 +61,28 @@ def __init__(self, text, doc_id, doc_path): self.text = text self.doc_id = doc_id self.doc_path = doc_path - self.sentence_entity_pair = None + self.sentiment_entity_pair = None self.label = None - def extract_all_sentences(self, service): + def extract_sentiment_entities(self, service): """Extract the sentences in a document.""" - if self.sentence_entity_pair is not None: + if self.sentiment_entity_pair is not None: return self.sentence_entity_pair docs = service.documents() request_body = get_request_body( self.text, - syntax=True, + syntax=False, entities=True, - sentiment=False) + sentiment=True) request = docs.annotateText(body=request_body) ent_list = [] response = request.execute() entities = response.get('entities', []) - sentences = response.get('sentences', []) - - sent_list = [ - sentence.get('text', {}).get('content') for sentence in sentences - ] + documentSentiment = response.get('documentSentiment', {}) for entity in entities: ent_type = entity.get('type') @@ -120,9 +91,9 @@ def extract_all_sentences(self, service): if ent_type == 'PERSON' and wiki_url is not None: ent_list.append(wiki_url) - self.sentence_entity_pair = (sent_list, ent_list) + self.sentiment_entity_pair = (documentSentiment, ent_list) - return self.sentence_entity_pair + return self.sentiment_entity_pair def to_sentiment_json(doc_id, sent, label): @@ -200,18 +171,9 @@ def get_sentiment_entities(service, document): """ sentiments, entities = analyze_document(service, document) + score = sentiments.get('score') - sentiments = [sent for sent in sentiments if sent[0] is not None] - negative_sentiments = [ - polarity for polarity, magnitude in sentiments if polarity < 0.0] - positive_sentiments = [ - polarity for polarity, magnitude in sentiments if polarity > 0.0] - - negative = sum(negative_sentiments) - positive = sum(positive_sentiments) - total = positive + negative - - return (total, entities) + return (score, entities) def get_sentiment_label(sentiment): @@ -318,8 +280,12 @@ def get_service(): """Build a client to the Google Cloud Natural Language API.""" credentials = GoogleCredentials.get_application_default() - - return discovery.build('language', 'v1beta1', + scoped_credentials = credentials.create_scoped( + ['https://www.googleapis.com/auth/cloud-platform']) + http = httplib2.Http() + scoped_credentials.authorize(http) + return discovery.build('language', 'v1', + http=http, credentials=credentials) diff --git a/samples/snippets/movie_nl/main_test.py b/samples/snippets/movie_nl/main_test.py index 8e22a1da..74c62eb3 100644 --- a/samples/snippets/movie_nl/main_test.py +++ b/samples/snippets/movie_nl/main_test.py @@ -69,10 +69,10 @@ def test_process_movie_reviews(): entities = [json.loads(entity) for entity in entities] # assert sentiments - assert sentiments[0].get('sentiment') == 1.0 + assert sentiments[0].get('sentiment') == 0.9 assert sentiments[0].get('label') == 1 - assert sentiments[1].get('sentiment') == 1.0 + assert sentiments[1].get('sentiment') == 0.9 assert sentiments[1].get('label') == 1 # assert entities @@ -80,7 +80,7 @@ def test_process_movie_reviews(): assert entities[0].get('name') == 'Tom Cruise' assert (entities[0].get('wiki_url') == 'http://en.wikipedia.org/wiki/Tom_Cruise') - assert entities[0].get('sentiment') == 2.0 + assert entities[0].get('sentiment') == 1.8 def test_rank_positive_entities(capsys): diff --git a/samples/snippets/ocr_nl/main.py b/samples/snippets/ocr_nl/main.py index 6e329f53..03fbdf9d 100755 --- a/samples/snippets/ocr_nl/main.py +++ b/samples/snippets/ocr_nl/main.py @@ -115,10 +115,12 @@ class TextAnalyzer(object): def __init__(self, db_filename=None): credentials = GoogleCredentials.get_application_default() scoped_credentials = credentials.create_scoped( - ['https://www.googleapis.com/auth/cloud-platform']) + ['https://www.googleapis.com/auth/cloud-platform']) http = httplib2.Http() scoped_credentials.authorize(http) - self.service = discovery.build('language', 'v1beta1', http=http) + self.service = discovery.build('language', 'v1', + http=http, + credentials=credentials) # This list will store the entity information gleaned from the # image files. diff --git a/samples/snippets/sentiment/sentiment_analysis.py b/samples/snippets/sentiment/sentiment_analysis.py index 8e250881..31a8b88c 100644 --- a/samples/snippets/sentiment/sentiment_analysis.py +++ b/samples/snippets/sentiment/sentiment_analysis.py @@ -22,7 +22,7 @@ def main(movie_review_filename): '''Run a sentiment analysis request on text within a passed filename.''' credentials = GoogleCredentials.get_application_default() - service = discovery.build('language', 'v1beta1', credentials=credentials) + service = discovery.build('language', 'v1', credentials=credentials) with open(movie_review_filename, 'r') as review_file: service_request = service.documents().analyzeSentiment( @@ -35,11 +35,23 @@ def main(movie_review_filename): ) response = service_request.execute() - polarity = response['documentSentiment']['polarity'] + score = response['documentSentiment']['score'] magnitude = response['documentSentiment']['magnitude'] - print('Sentiment: polarity of {} with magnitude of {}'.format( - polarity, magnitude)) + for i, sentence in enumerate(response['sentences']): + sentence_sentiment = sentence['sentiment']['score'] + print('Sentence {} has a sentiment score of {}'.format( + i, + sentence_sentiment)) + + print('Overall Sentiment: score of {} with magnitude of {}'.format( + score, + magnitude) + ) + return 0 + + print('Sentiment: score of {} with magnitude of {}'.format( + score, magnitude)) return 0 diff --git a/samples/snippets/sentiment/sentiment_analysis_test.py b/samples/snippets/sentiment/sentiment_analysis_test.py index d6b6a7ab..ff282119 100644 --- a/samples/snippets/sentiment/sentiment_analysis_test.py +++ b/samples/snippets/sentiment/sentiment_analysis_test.py @@ -18,25 +18,25 @@ def test_pos(resource, capsys): main(resource('pos.txt')) out, err = capsys.readouterr() - polarity = float(re.search('polarity of (.+?) with', out).group(1)) + score = float(re.search('score of (.+?) with', out).group(1)) magnitude = float(re.search('magnitude of (.+?)', out).group(1)) - assert polarity * magnitude > 0 + assert score * magnitude > 0 def test_neg(resource, capsys): main(resource('neg.txt')) out, err = capsys.readouterr() - polarity = float(re.search('polarity of (.+?) with', out).group(1)) + score = float(re.search('score of (.+?) with', out).group(1)) magnitude = float(re.search('magnitude of (.+?)', out).group(1)) - assert polarity * magnitude < 0 + assert score * magnitude < 0 def test_mixed(resource, capsys): main(resource('mixed.txt')) out, err = capsys.readouterr() - polarity = float(re.search('polarity of (.+?) with', out).group(1)) - assert polarity <= 0.3 - assert polarity >= -0.3 + score = float(re.search('score of (.+?) with', out).group(1)) + assert score <= 0.3 + assert score >= -0.3 def test_neutral(resource, capsys): From 7dc94df409f6dfbffcc66c41a666b81d7d715b0b Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Mon, 14 Nov 2016 17:37:35 -0800 Subject: [PATCH 057/209] Updating language requirements. Change-Id: Ic08400df1c1f2440c46a845ee46e7674dc5e8fd5 --- samples/snippets/cloud-client/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/cloud-client/requirements.txt b/samples/snippets/cloud-client/requirements.txt index cc966c0e..ce34e7df 100644 --- a/samples/snippets/cloud-client/requirements.txt +++ b/samples/snippets/cloud-client/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.20.0 +google-cloud-language==0.21.0 From 7c2594dce210bb3ce8495168336c62cef9fe89dd Mon Sep 17 00:00:00 2001 From: Phil Fritzsche Date: Mon, 14 Nov 2016 17:51:31 -0800 Subject: [PATCH 058/209] Update NL sentiment analysis readme to use score --- samples/snippets/sentiment/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/sentiment/README.md b/samples/snippets/sentiment/README.md index e77cdf16..064b8f94 100644 --- a/samples/snippets/sentiment/README.md +++ b/samples/snippets/sentiment/README.md @@ -44,5 +44,5 @@ pass your own text files.) ``` (env)$ python sentiment_analysis.py textfile.txt -Sentiment: polarity of -0.1 with magnitude of 6.7 +Sentiment: score of -0.1 with magnitude of 6.7 ``` From dcfb4d4793f58e0945a2817adbe814a4a5d2dc9f Mon Sep 17 00:00:00 2001 From: Puneith Kaul Date: Mon, 14 Nov 2016 21:45:47 -0800 Subject: [PATCH 059/209] fixed discovery build by removing http --- samples/snippets/api/analyze.py | 6 ------ samples/snippets/movie_nl/main.py | 6 ------ 2 files changed, 12 deletions(-) diff --git a/samples/snippets/api/analyze.py b/samples/snippets/api/analyze.py index 70739828..ab72208a 100644 --- a/samples/snippets/api/analyze.py +++ b/samples/snippets/api/analyze.py @@ -21,18 +21,12 @@ import sys from googleapiclient import discovery -import httplib2 from oauth2client.client import GoogleCredentials def get_service(): credentials = GoogleCredentials.get_application_default() - scoped_credentials = credentials.create_scoped( - ['https://www.googleapis.com/auth/cloud-platform']) - http = httplib2.Http() - scoped_credentials.authorize(http) return discovery.build('language', 'v1', - http=http, credentials=credentials) diff --git a/samples/snippets/movie_nl/main.py b/samples/snippets/movie_nl/main.py index d6ef5d16..6d21f4bf 100644 --- a/samples/snippets/movie_nl/main.py +++ b/samples/snippets/movie_nl/main.py @@ -21,7 +21,6 @@ from googleapiclient import discovery from googleapiclient.errors import HttpError -import httplib2 from oauth2client.client import GoogleCredentials import requests @@ -280,12 +279,7 @@ def get_service(): """Build a client to the Google Cloud Natural Language API.""" credentials = GoogleCredentials.get_application_default() - scoped_credentials = credentials.create_scoped( - ['https://www.googleapis.com/auth/cloud-platform']) - http = httplib2.Http() - scoped_credentials.authorize(http) return discovery.build('language', 'v1', - http=http, credentials=credentials) From 4ce05b6164da77394879ab5a3df52ce8fffc1827 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Tue, 15 Nov 2016 14:58:27 -0800 Subject: [PATCH 060/209] Update samples to support latest Google Cloud Python [(#656)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/656) --- samples/snippets/movie_nl/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt index 7a0de854..f552f746 100644 --- a/samples/snippets/movie_nl/requirements.txt +++ b/samples/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ google-api-python-client==1.5.5 -requests==2.11.1 +requests==2.12.0 From c7cc48d087031ff30021cc08f415ad8f8db8815e Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Tue, 15 Nov 2016 15:05:13 -0800 Subject: [PATCH 061/209] Update readmes Change-Id: Ie385fd8105325c6f2754b737e0f11c84254bcb47 --- samples/snippets/api/README.rst | 2 +- samples/snippets/cloud-client/README.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/samples/snippets/api/README.rst b/samples/snippets/api/README.rst index 369e2f4e..301fed0e 100644 --- a/samples/snippets/api/README.rst +++ b/samples/snippets/api/README.rst @@ -95,4 +95,4 @@ To run this sample: -.. _Google Cloud SDK: https://cloud.google.com/sdk/ +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/samples/snippets/cloud-client/README.rst b/samples/snippets/cloud-client/README.rst index d8ba578d..a0259ce9 100644 --- a/samples/snippets/cloud-client/README.rst +++ b/samples/snippets/cloud-client/README.rst @@ -99,4 +99,4 @@ to `browse the source`_ and `report issues`_. https://github.com/GoogleCloudPlatform/google-cloud-python/issues -.. _Google Cloud SDK: https://cloud.google.com/sdk/ +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file From 726fa619627f449371f8cdd6df266d4c92aaad5d Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Wed, 16 Nov 2016 10:39:05 -0800 Subject: [PATCH 062/209] Fix flaky NL test Change-Id: I064c59e0c4f6d9b5ff0c888353df860dc344f74b --- samples/snippets/ocr_nl/main_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/ocr_nl/main_test.py b/samples/snippets/ocr_nl/main_test.py index d3d6d6a5..e5a9962e 100755 --- a/samples/snippets/ocr_nl/main_test.py +++ b/samples/snippets/ocr_nl/main_test.py @@ -59,7 +59,7 @@ def test_text_returns_entities(): text = "Holmes and Watson walked to the cafe." text_analyzer = main.TextAnalyzer() entities = text_analyzer.nl_detect(text) - assert len(entities) == 2 + assert entities etype, ename, salience, wurl = text_analyzer.extract_entity_info( entities[0]) assert ename == 'holmes' From 0429072e2bb1946b8cb72a3059135e42d227d559 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Mon, 21 Nov 2016 10:00:49 -0800 Subject: [PATCH 063/209] Auto-update dependencies. [(#673)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/673) --- samples/snippets/movie_nl/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt index f552f746..7e62725a 100644 --- a/samples/snippets/movie_nl/requirements.txt +++ b/samples/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ google-api-python-client==1.5.5 -requests==2.12.0 +requests==2.12.1 From d0ac20a6c382c7bb24ee97b05da56a135e91af97 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Wed, 30 Nov 2016 10:42:14 -0800 Subject: [PATCH 064/209] Fix more lint issues Change-Id: I49d4f063d210629346d8d8390c9eaec261c4e519 --- samples/snippets/sentiment/sentiment_analysis.py | 12 +++++------- .../snippets/sentiment/sentiment_analysis_test.py | 1 + 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/samples/snippets/sentiment/sentiment_analysis.py b/samples/snippets/sentiment/sentiment_analysis.py index 31a8b88c..6f92fc66 100644 --- a/samples/snippets/sentiment/sentiment_analysis.py +++ b/samples/snippets/sentiment/sentiment_analysis.py @@ -11,15 +11,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -'''Demonstrates how to make a simple call to the Natural Language API''' +"""Demonstrates how to make a simple call to the Natural Language API.""" import argparse + from googleapiclient import discovery from oauth2client.client import GoogleCredentials def main(movie_review_filename): - '''Run a sentiment analysis request on text within a passed filename.''' + """Run a sentiment analysis request on text within a passed filename.""" credentials = GoogleCredentials.get_application_default() service = discovery.build('language', 'v1', credentials=credentials) @@ -41,13 +42,10 @@ def main(movie_review_filename): for i, sentence in enumerate(response['sentences']): sentence_sentiment = sentence['sentiment']['score'] print('Sentence {} has a sentiment score of {}'.format( - i, - sentence_sentiment)) + i, sentence_sentiment)) print('Overall Sentiment: score of {} with magnitude of {}'.format( - score, - magnitude) - ) + score, magnitude)) return 0 print('Sentiment: score of {} with magnitude of {}'.format( diff --git a/samples/snippets/sentiment/sentiment_analysis_test.py b/samples/snippets/sentiment/sentiment_analysis_test.py index ff282119..7ba1d144 100644 --- a/samples/snippets/sentiment/sentiment_analysis_test.py +++ b/samples/snippets/sentiment/sentiment_analysis_test.py @@ -12,6 +12,7 @@ # limitations under the License. import re + from sentiment_analysis import main From baf1a5871be1f169fa105426fd069122a2abddbe Mon Sep 17 00:00:00 2001 From: Jason Dobry Date: Tue, 6 Dec 2016 13:20:19 -0800 Subject: [PATCH 065/209] Add Cloud Client NL API samples. [(#668)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/668) --- samples/snippets/cloud-client/README.rst.in | 5 +- samples/snippets/cloud-client/quickstart.py | 2 +- .../snippets/cloud-client/requirements.txt | 2 +- .../snippets/cloud-client/resources/text.txt | 1 + samples/snippets/cloud-client/snippets.py | 172 ++++++++++++++++++ .../snippets/cloud-client/snippets_test.py | 60 ++++++ 6 files changed, 239 insertions(+), 3 deletions(-) create mode 100644 samples/snippets/cloud-client/resources/text.txt create mode 100644 samples/snippets/cloud-client/snippets.py create mode 100644 samples/snippets/cloud-client/snippets_test.py diff --git a/samples/snippets/cloud-client/README.rst.in b/samples/snippets/cloud-client/README.rst.in index 78da2911..faf402bf 100644 --- a/samples/snippets/cloud-client/README.rst.in +++ b/samples/snippets/cloud-client/README.rst.in @@ -4,7 +4,7 @@ product: name: Google Cloud Natural Language API short_name: Cloud Natural Language API url: https://cloud.google.com/natural-language/docs/ - description: > + description: > The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers, including sentiment analysis, entity recognition, and syntax analysis. This API is part of the larger @@ -17,5 +17,8 @@ setup: samples: - name: Quickstart file: quickstart.py +- name: Snippets + file: snippets.py + show_help: true cloud_client_library: true diff --git a/samples/snippets/cloud-client/quickstart.py b/samples/snippets/cloud-client/quickstart.py index 24f2ff4d..3b42ac65 100644 --- a/samples/snippets/cloud-client/quickstart.py +++ b/samples/snippets/cloud-client/quickstart.py @@ -31,7 +31,7 @@ def run_quickstart(): sentiment = document.analyze_sentiment() print('Text: {}'.format(text)) - print('Sentiment: {}, {}'.format(sentiment.polarity, sentiment.magnitude)) + print('Sentiment: {}, {}'.format(sentiment.score, sentiment.magnitude)) # [END language_quickstart] diff --git a/samples/snippets/cloud-client/requirements.txt b/samples/snippets/cloud-client/requirements.txt index ce34e7df..130d1cc7 100644 --- a/samples/snippets/cloud-client/requirements.txt +++ b/samples/snippets/cloud-client/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.21.0 +google-cloud-language==0.22.0 diff --git a/samples/snippets/cloud-client/resources/text.txt b/samples/snippets/cloud-client/resources/text.txt new file mode 100644 index 00000000..97a1cea0 --- /dev/null +++ b/samples/snippets/cloud-client/resources/text.txt @@ -0,0 +1 @@ +President Obama is speaking at the White House. \ No newline at end of file diff --git a/samples/snippets/cloud-client/snippets.py b/samples/snippets/cloud-client/snippets.py new file mode 100644 index 00000000..c0f5f8a3 --- /dev/null +++ b/samples/snippets/cloud-client/snippets.py @@ -0,0 +1,172 @@ +#!/usr/bin/env python + +# Copyright 2016 Google, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This application demonstrates how to perform basic operations with the +Google Cloud Natural Language API + +For more information, the documentation at +https://cloud.google.com/natural-language/docs. +""" + +import argparse + +from google.cloud import language + + +def sentiment_text(text): + """Detects sentiment in the text.""" + language_client = language.Client() + + # Instantiates a plain text document. + document = language_client.document_from_text(text) + + # Detects sentiment in the document. You can also analyze HTML with: + # document.doc_type == language.Document.HTML + sentiment = document.analyze_sentiment() + + print('Score: {}'.format(sentiment.score)) + print('Magnitude: {}'.format(sentiment.magnitude)) + + +def sentiment_file(gcs_uri): + """Detects sentiment in the file located in Google Cloud Storage.""" + language_client = language.Client() + + # Instantiates a plain text document. + document = language_client.document_from_url(gcs_uri) + + # Detects sentiment in the document. You can also analyze HTML with: + # document.doc_type == language.Document.HTML + sentiment = document.analyze_sentiment() + + print('Score: {}'.format(sentiment.score)) + print('Magnitude: {}'.format(sentiment.magnitude)) + + +def entities_text(text): + """Detects entities in the text.""" + language_client = language.Client() + + # Instantiates a plain text document. + document = language_client.document_from_text(text) + + # Detects entities in the document. You can also analyze HTML with: + # document.doc_type == language.Document.HTML + entities = document.analyze_entities() + + for entity in entities: + print('=' * 20) + print('{:<16}: {}'.format('name', entity.name)) + print('{:<16}: {}'.format('type', entity.entity_type)) + print('{:<16}: {}'.format('wikipedia_url', entity.wikipedia_url)) + print('{:<16}: {}'.format('metadata', entity.metadata)) + print('{:<16}: {}'.format('salience', entity.salience)) + + +def entities_file(gcs_uri): + """Detects entities in the file located in Google Cloud Storage.""" + language_client = language.Client() + + # Instantiates a plain text document. + document = language_client.document_from_url(gcs_uri) + + # Detects sentiment in the document. You can also analyze HTML with: + # document.doc_type == language.Document.HTML + entities = document.analyze_entities() + + for entity in entities: + print('=' * 20) + print('{:<16}: {}'.format('name', entity.name)) + print('{:<16}: {}'.format('type', entity.entity_type)) + print('{:<16}: {}'.format('wikipedia_url', entity.wikipedia_url)) + print('{:<16}: {}'.format('metadata', entity.metadata)) + print('{:<16}: {}'.format('salience', entity.salience)) + + +def syntax_text(text): + """Detects syntax in the text.""" + language_client = language.Client() + + # Instantiates a plain text document. + document = language_client.document_from_text(text) + + # Detects syntax in the document. You can also analyze HTML with: + # document.doc_type == language.Document.HTML + tokens = document.analyze_syntax() + + for token in tokens: + print('{}: {}'.format(token.part_of_speech, token.text_content)) + + +def syntax_file(gcs_uri): + """Detects syntax in the file located in Google Cloud Storage.""" + language_client = language.Client() + + # Instantiates a plain text document. + document = language_client.document_from_url(gcs_uri) + + # Detects syntax in the document. You can also analyze HTML with: + # document.doc_type == language.Document.HTML + tokens = document.analyze_syntax() + + for token in tokens: + print('{}: {}'.format(token.part_of_speech, token.text_content)) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + subparsers = parser.add_subparsers(dest='command') + + sentiment_text_parser = subparsers.add_parser( + 'sentiment-text', help=sentiment_text.__doc__) + sentiment_text_parser.add_argument('text') + + sentiment_file_parser = subparsers.add_parser( + 'sentiment-file', help=sentiment_file.__doc__) + sentiment_file_parser.add_argument('gcs_uri') + + entities_text_parser = subparsers.add_parser( + 'entities-text', help=entities_text.__doc__) + entities_text_parser.add_argument('text') + + entities_file_parser = subparsers.add_parser( + 'entities-file', help=entities_file.__doc__) + entities_file_parser.add_argument('gcs_uri') + + syntax_text_parser = subparsers.add_parser( + 'syntax-text', help=syntax_text.__doc__) + syntax_text_parser.add_argument('text') + + syntax_file_parser = subparsers.add_parser( + 'syntax-file', help=syntax_file.__doc__) + syntax_file_parser.add_argument('gcs_uri') + + args = parser.parse_args() + + if args.command == 'sentiment-text': + sentiment_text(args.text) + elif args.command == 'sentiment-file': + sentiment_file(args.gcs_uri) + elif args.command == 'entities-text': + entities_text(args.text) + elif args.command == 'entities-file': + entities_file(args.gcs_uri) + elif args.command == 'syntax-text': + syntax_text(args.text) + elif args.command == 'syntax-file': + syntax_file(args.gcs_uri) diff --git a/samples/snippets/cloud-client/snippets_test.py b/samples/snippets/cloud-client/snippets_test.py new file mode 100644 index 00000000..47050e44 --- /dev/null +++ b/samples/snippets/cloud-client/snippets_test.py @@ -0,0 +1,60 @@ +# Copyright 2016 Google, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import snippets + + +def test_sentiment_text(cloud_config, capsys): + snippets.sentiment_text('President Obama is speaking at the White House.') + out, _ = capsys.readouterr() + assert 'Score: 0.2' in out + + +def test_sentiment_file(cloud_config, capsys): + cloud_storage_input_uri = 'gs://{}/text.txt'.format( + cloud_config.storage_bucket) + snippets.sentiment_file(cloud_storage_input_uri) + out, _ = capsys.readouterr() + assert 'Score: 0.2' in out + + +def test_entities_text(cloud_config, capsys): + snippets.entities_text('President Obama is speaking at the White House.') + out, _ = capsys.readouterr() + assert 'name' in out + assert ': Obama' in out + + +def test_entities_file(cloud_config, capsys): + cloud_storage_input_uri = 'gs://{}/text.txt'.format( + cloud_config.storage_bucket) + snippets.entities_file(cloud_storage_input_uri) + out, _ = capsys.readouterr() + assert 'name' in out + assert ': Obama' in out + + +def test_syntax_text(cloud_config, capsys): + snippets.syntax_text('President Obama is speaking at the White House.') + out, _ = capsys.readouterr() + assert 'NOUN: President' in out + + +def test_syntax_file(cloud_config, capsys): + cloud_storage_input_uri = 'gs://{}/text.txt'.format( + cloud_config.storage_bucket) + snippets.syntax_file(cloud_storage_input_uri) + out, _ = capsys.readouterr() + assert 'NOUN: President' in out From a0a165de99f34b7709b15eca276f71c706d5b0dc Mon Sep 17 00:00:00 2001 From: DPE bot Date: Tue, 13 Dec 2016 09:54:02 -0800 Subject: [PATCH 066/209] Auto-update dependencies. [(#715)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/715) --- samples/snippets/cloud-client/requirements.txt | 2 +- samples/snippets/movie_nl/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/samples/snippets/cloud-client/requirements.txt b/samples/snippets/cloud-client/requirements.txt index 130d1cc7..69287a66 100644 --- a/samples/snippets/cloud-client/requirements.txt +++ b/samples/snippets/cloud-client/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.22.0 +google-cloud-language==0.22.1 diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt index 7e62725a..a915b32f 100644 --- a/samples/snippets/movie_nl/requirements.txt +++ b/samples/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ google-api-python-client==1.5.5 -requests==2.12.1 +requests==2.12.3 From 817f0498a12c9d7278005f3cbb66b4bb72cdae4d Mon Sep 17 00:00:00 2001 From: DPE bot Date: Thu, 15 Dec 2016 10:02:03 -0800 Subject: [PATCH 067/209] Auto-update dependencies. [(#718)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/718) --- samples/snippets/cloud-client/requirements.txt | 2 +- samples/snippets/movie_nl/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/samples/snippets/cloud-client/requirements.txt b/samples/snippets/cloud-client/requirements.txt index 69287a66..afd4c94e 100644 --- a/samples/snippets/cloud-client/requirements.txt +++ b/samples/snippets/cloud-client/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.22.1 +google-cloud-language==0.22.2 diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt index a915b32f..17841fca 100644 --- a/samples/snippets/movie_nl/requirements.txt +++ b/samples/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ google-api-python-client==1.5.5 -requests==2.12.3 +requests==2.12.4 From f362d5233afe39d85f8076289481e6bf51ea99cc Mon Sep 17 00:00:00 2001 From: Jason Dobry Date: Thu, 15 Dec 2016 11:05:38 -0800 Subject: [PATCH 068/209] Refactored the Sentiment Analysis tutorial to use the Cloud Client Library. [(#713)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/713) --- samples/snippets/sentiment/requirements.txt | 2 +- .../snippets/sentiment/sentiment_analysis.py | 55 ++++++++++--------- .../sentiment/sentiment_analysis_test.py | 10 ++-- 3 files changed, 36 insertions(+), 31 deletions(-) diff --git a/samples/snippets/sentiment/requirements.txt b/samples/snippets/sentiment/requirements.txt index 2cd2a133..afd4c94e 100644 --- a/samples/snippets/sentiment/requirements.txt +++ b/samples/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.5 +google-cloud-language==0.22.2 diff --git a/samples/snippets/sentiment/sentiment_analysis.py b/samples/snippets/sentiment/sentiment_analysis.py index 6f92fc66..c574c318 100644 --- a/samples/snippets/sentiment/sentiment_analysis.py +++ b/samples/snippets/sentiment/sentiment_analysis.py @@ -11,38 +11,24 @@ # See the License for the specific language governing permissions and # limitations under the License. +# [START sentiment_tutorial] """Demonstrates how to make a simple call to the Natural Language API.""" +# [START sentiment_tutorial_import] import argparse -from googleapiclient import discovery -from oauth2client.client import GoogleCredentials +from google.cloud import language +# [END sentiment_tutorial_import] -def main(movie_review_filename): - """Run a sentiment analysis request on text within a passed filename.""" - - credentials = GoogleCredentials.get_application_default() - service = discovery.build('language', 'v1', credentials=credentials) +def print_result(annotations): + score = annotations.sentiment.score + magnitude = annotations.sentiment.magnitude - with open(movie_review_filename, 'r') as review_file: - service_request = service.documents().analyzeSentiment( - body={ - 'document': { - 'type': 'PLAIN_TEXT', - 'content': review_file.read(), - } - } - ) - response = service_request.execute() - - score = response['documentSentiment']['score'] - magnitude = response['documentSentiment']['magnitude'] - - for i, sentence in enumerate(response['sentences']): - sentence_sentiment = sentence['sentiment']['score'] + for index, sentence in enumerate(annotations.sentences): + sentence_sentiment = sentence.sentiment.score print('Sentence {} has a sentiment score of {}'.format( - i, sentence_sentiment)) + index, sentence_sentiment)) print('Overall Sentiment: score of {} with magnitude of {}'.format( score, magnitude)) @@ -53,6 +39,23 @@ def main(movie_review_filename): return 0 +def analyze(movie_review_filename): + """Run a sentiment analysis request on text within a passed filename.""" + language_client = language.Client() + + with open(movie_review_filename, 'r') as review_file: + # Instantiates a plain text document. + document = language_client.document_from_html(review_file.read()) + + # Detects sentiment in the document. + annotations = document.annotate_text(include_sentiment=True, + include_syntax=False, + include_entities=False) + + # Print the results + print_result(annotations) + + if __name__ == '__main__': parser = argparse.ArgumentParser( description=__doc__, @@ -61,4 +64,6 @@ def main(movie_review_filename): 'movie_review_filename', help='The filename of the movie review you\'d like to analyze.') args = parser.parse_args() - main(args.movie_review_filename) + + analyze(args.movie_review_filename) +# [END sentiment_tutorial] diff --git a/samples/snippets/sentiment/sentiment_analysis_test.py b/samples/snippets/sentiment/sentiment_analysis_test.py index 7ba1d144..19ec86f1 100644 --- a/samples/snippets/sentiment/sentiment_analysis_test.py +++ b/samples/snippets/sentiment/sentiment_analysis_test.py @@ -13,11 +13,11 @@ import re -from sentiment_analysis import main +from sentiment_analysis import analyze def test_pos(resource, capsys): - main(resource('pos.txt')) + analyze(resource('pos.txt')) out, err = capsys.readouterr() score = float(re.search('score of (.+?) with', out).group(1)) magnitude = float(re.search('magnitude of (.+?)', out).group(1)) @@ -25,7 +25,7 @@ def test_pos(resource, capsys): def test_neg(resource, capsys): - main(resource('neg.txt')) + analyze(resource('neg.txt')) out, err = capsys.readouterr() score = float(re.search('score of (.+?) with', out).group(1)) magnitude = float(re.search('magnitude of (.+?)', out).group(1)) @@ -33,7 +33,7 @@ def test_neg(resource, capsys): def test_mixed(resource, capsys): - main(resource('mixed.txt')) + analyze(resource('mixed.txt')) out, err = capsys.readouterr() score = float(re.search('score of (.+?) with', out).group(1)) assert score <= 0.3 @@ -41,7 +41,7 @@ def test_mixed(resource, capsys): def test_neutral(resource, capsys): - main(resource('neutral.txt')) + analyze(resource('neutral.txt')) out, err = capsys.readouterr() magnitude = float(re.search('magnitude of (.+?)', out).group(1)) assert magnitude <= 2.0 From 7a9c44e5e3a5e1104aec7f3149a29019fc6fa135 Mon Sep 17 00:00:00 2001 From: Gus Class Date: Tue, 20 Dec 2016 14:27:22 -0800 Subject: [PATCH 069/209] Add snippets and tests for language tutorial. [(#729)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/729) --- samples/snippets/tutorial/README.rst.in | 20 ++++++ samples/snippets/tutorial/requirements.txt | 1 + .../tutorial/reviews/bladerunner-mixed.txt | 19 +++++ .../tutorial/reviews/bladerunner-neg.txt | 3 + .../tutorial/reviews/bladerunner-neutral.txt | 2 + .../tutorial/reviews/bladerunner-pos.txt | 10 +++ samples/snippets/tutorial/tutorial.py | 71 +++++++++++++++++++ samples/snippets/tutorial/tutorial_test.py | 51 +++++++++++++ 8 files changed, 177 insertions(+) create mode 100644 samples/snippets/tutorial/README.rst.in create mode 100644 samples/snippets/tutorial/requirements.txt create mode 100644 samples/snippets/tutorial/reviews/bladerunner-mixed.txt create mode 100644 samples/snippets/tutorial/reviews/bladerunner-neg.txt create mode 100644 samples/snippets/tutorial/reviews/bladerunner-neutral.txt create mode 100644 samples/snippets/tutorial/reviews/bladerunner-pos.txt create mode 100644 samples/snippets/tutorial/tutorial.py create mode 100644 samples/snippets/tutorial/tutorial_test.py diff --git a/samples/snippets/tutorial/README.rst.in b/samples/snippets/tutorial/README.rst.in new file mode 100644 index 00000000..aea593b2 --- /dev/null +++ b/samples/snippets/tutorial/README.rst.in @@ -0,0 +1,20 @@ +# This file is used to generate README.rst + +product: + name: Google Cloud Natural Language Tutorial + short_name: Cloud Natural Language Tutorial + url: https://cloud.google.com/natural-language/docs/ + description: > + The `Google Cloud Natural Language API`_ provides natural language + understanding technologies to developers, including sentiment analysis, + entity recognition, and syntax analysis. This API is part of the larger + Cloud Machine Learning API. + +setup: +- auth +- install_deps + +samples: +- name: Language tutorial + file: tutorial.py + show_help: true diff --git a/samples/snippets/tutorial/requirements.txt b/samples/snippets/tutorial/requirements.txt new file mode 100644 index 00000000..2cd2a133 --- /dev/null +++ b/samples/snippets/tutorial/requirements.txt @@ -0,0 +1 @@ +google-api-python-client==1.5.5 diff --git a/samples/snippets/tutorial/reviews/bladerunner-mixed.txt b/samples/snippets/tutorial/reviews/bladerunner-mixed.txt new file mode 100644 index 00000000..3b520b65 --- /dev/null +++ b/samples/snippets/tutorial/reviews/bladerunner-mixed.txt @@ -0,0 +1,19 @@ +I really wanted to love 'Bladerunner' but ultimately I couldn't get +myself to appreciate it fully. However, you may like it if you're into +science fiction, especially if you're interested in the philosophical +exploration of what it means to be human or machine. Some of the gizmos +like the flying cars and the Vouight-Kampff machine (which seemed very +steampunk), were quite cool. + +I did find the plot pretty slow and but the dialogue and action sequences +were good. Unlike most science fiction films, this one was mostly quiet, and +not all that much happened, except during the last 15 minutes. I didn't +understand why a unicorn was in the movie. The visual effects were fantastic, +however, and the musical score and overall mood was quite interesting. +A futurist Los Angeles that was both highly polished and also falling apart +reminded me of 'Outland.' Certainly, the style of the film made up for +many of its pedantic plot holes. + +If you want your sci-fi to be lasers and spaceships, 'Bladerunner' may +disappoint you. But if you want it to make you think, this movie may +be worth the money. \ No newline at end of file diff --git a/samples/snippets/tutorial/reviews/bladerunner-neg.txt b/samples/snippets/tutorial/reviews/bladerunner-neg.txt new file mode 100644 index 00000000..dbef7627 --- /dev/null +++ b/samples/snippets/tutorial/reviews/bladerunner-neg.txt @@ -0,0 +1,3 @@ +What was Hollywood thinking with this movie! I hated, +hated, hated it. BORING! I went afterwards and demanded my money back. +They refused. \ No newline at end of file diff --git a/samples/snippets/tutorial/reviews/bladerunner-neutral.txt b/samples/snippets/tutorial/reviews/bladerunner-neutral.txt new file mode 100644 index 00000000..60556e60 --- /dev/null +++ b/samples/snippets/tutorial/reviews/bladerunner-neutral.txt @@ -0,0 +1,2 @@ +I neither liked nor disliked this movie. Parts were interesting, but +overall I was left wanting more. The acting was pretty good. \ No newline at end of file diff --git a/samples/snippets/tutorial/reviews/bladerunner-pos.txt b/samples/snippets/tutorial/reviews/bladerunner-pos.txt new file mode 100644 index 00000000..a7faf815 --- /dev/null +++ b/samples/snippets/tutorial/reviews/bladerunner-pos.txt @@ -0,0 +1,10 @@ +`Bladerunner` is often touted as one of the best science fiction films ever +made. Indeed, it satisfies many of the requisites for good sci-fi: a future +world with flying cars and humanoid robots attempting to rebel against their +creators. But more than anything, `Bladerunner` is a fantastic exploration +of the nature of what it means to be human. If we create robots which can +think, will they become human? And if they do, what makes us unique? Indeed, +how can we be sure we're not human in any case? `Bladerunner` explored +these issues before such movies as `The Matrix,' and did so intelligently. +The visual effects and score by Vangelis set the mood. See this movie +in a dark theatre to appreciate it fully. Highly recommended! \ No newline at end of file diff --git a/samples/snippets/tutorial/tutorial.py b/samples/snippets/tutorial/tutorial.py new file mode 100644 index 00000000..b2ac2421 --- /dev/null +++ b/samples/snippets/tutorial/tutorial.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python + +# Copyright 2016 Google, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# [START full_tutorial_script] +# [START import_libraries] +import argparse +import io + +from googleapiclient import discovery +from oauth2client.client import GoogleCredentials +# [END import_libraries] + + +def print_sentiment(filename): + """Prints sentiment analysis on a given file contents.""" + # [START authenticating_to_the_api] + credentials = GoogleCredentials.get_application_default() + service = discovery.build('language', 'v1', credentials=credentials) + # [END authenticating_to_the_api] + + # [START constructing_the_request] + with io.open(filename, 'r') as review_file: + review_file_contents = review_file.read() + + service_request = service.documents().analyzeSentiment( + body={ + 'document': { + 'type': 'PLAIN_TEXT', + 'content': review_file_contents, + } + } + ) + response = service_request.execute() + # [END constructing_the_request] + + # [START parsing_the_response] + score = response['documentSentiment']['score'] + magnitude = response['documentSentiment']['magnitude'] + + for n, sentence in enumerate(response['sentences']): + sentence_sentiment = sentence['sentiment']['score'] + print('Sentence {} has a sentiment score of {}'.format(n, + sentence_sentiment)) + + print('Overall Sentiment: score of {} with magnitude of {}'.format( + score, magnitude)) + # [END parsing_the_response] + + +# [START running_your_application] +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument( + 'movie_review_filename', + help='The filename of the movie review you\'d like to analyze.') + args = parser.parse_args() + print_sentiment(args.movie_review_filename) +# [END running_your_application] +# [END full_tutorial_script] diff --git a/samples/snippets/tutorial/tutorial_test.py b/samples/snippets/tutorial/tutorial_test.py new file mode 100644 index 00000000..065076fb --- /dev/null +++ b/samples/snippets/tutorial/tutorial_test.py @@ -0,0 +1,51 @@ +# Copyright 2016, Google, Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import re + +import tutorial + + +def test_neutral(capsys): + tutorial.print_sentiment('reviews/bladerunner-neutral.txt') + out, _ = capsys.readouterr() + assert re.search(r'Sentence \d has a sentiment score of \d', out, re.I) + assert re.search( + r'Overall Sentiment: score of -?[0-2]\.?[0-9]? with ' + r'magnitude of [0-1]\.?[0-9]?', out, re.I) + + +def test_pos(capsys): + tutorial.print_sentiment('reviews/bladerunner-pos.txt') + out, _ = capsys.readouterr() + assert re.search(r'Sentence \d has a sentiment score of \d', out, re.I) + assert re.search( + r'Overall Sentiment: score of [0-9]\.?[0-9]? with ' + r'magnitude of [0-9]\.?[0-9]?', out, re.I) + + +def test_neg(capsys): + tutorial.print_sentiment('reviews/bladerunner-neg.txt') + out, _ = capsys.readouterr() + assert re.search(r'Sentence \d has a sentiment score of \d', out, re.I) + assert re.search( + r'Overall Sentiment: score of -[0-9]\.?[0-9]? with ' + r'magnitude of [2-7]\.?[0-9]?', out, re.I) + + +def test_mixed(capsys): + tutorial.print_sentiment('reviews/bladerunner-mixed.txt') + out, _ = capsys.readouterr() + assert re.search(r'Sentence \d has a sentiment score of \d', out, re.I) + assert re.search( + r'Overall Sentiment: score of -?[0-9]\.?[0-9]? with ' + r'magnitude of [3-6]\.?[0-9]?', out, re.I) From 662ca4f0858a043e43d10db3f80d66955c6cc007 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Thu, 12 Jan 2017 12:01:20 -0800 Subject: [PATCH 070/209] Auto-update dependencies. [(#735)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/735) * Auto-update dependencies. * Fix language OCR sample * Remove unused import --- samples/snippets/api/requirements.txt | 2 +- samples/snippets/movie_nl/requirements.txt | 2 +- samples/snippets/ocr_nl/main.py | 14 ++------------ samples/snippets/ocr_nl/requirements.txt | 2 +- samples/snippets/syntax_triples/requirements.txt | 2 +- samples/snippets/tutorial/requirements.txt | 2 +- 6 files changed, 7 insertions(+), 17 deletions(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 2cd2a133..ce6a9bf5 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.5 +google-api-python-client==1.6.1 diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt index 17841fca..5512a881 100644 --- a/samples/snippets/movie_nl/requirements.txt +++ b/samples/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ -google-api-python-client==1.5.5 +google-api-python-client==1.6.1 requests==2.12.4 diff --git a/samples/snippets/ocr_nl/main.py b/samples/snippets/ocr_nl/main.py index 03fbdf9d..11bb430b 100755 --- a/samples/snippets/ocr_nl/main.py +++ b/samples/snippets/ocr_nl/main.py @@ -44,8 +44,6 @@ from googleapiclient import discovery from googleapiclient import errors -import httplib2 -from oauth2client.client import GoogleCredentials BATCH_SIZE = 10 @@ -54,8 +52,7 @@ class VisionApi(object): """Construct and use the Cloud Vision API service.""" def __init__(self): - credentials = GoogleCredentials.get_application_default() - self.service = discovery.build('vision', 'v1', credentials=credentials) + self.service = discovery.build('vision', 'v1') def detect_text(self, input_filenames, num_retries=3, max_results=6): """Uses the Vision API to detect text in the given file.""" @@ -113,14 +110,7 @@ class TextAnalyzer(object): """Construct and use the Google Natural Language API service.""" def __init__(self, db_filename=None): - credentials = GoogleCredentials.get_application_default() - scoped_credentials = credentials.create_scoped( - ['https://www.googleapis.com/auth/cloud-platform']) - http = httplib2.Http() - scoped_credentials.authorize(http) - self.service = discovery.build('language', 'v1', - http=http, - credentials=credentials) + self.service = discovery.build('language', 'v1') # This list will store the entity information gleaned from the # image files. diff --git a/samples/snippets/ocr_nl/requirements.txt b/samples/snippets/ocr_nl/requirements.txt index 2cd2a133..ce6a9bf5 100644 --- a/samples/snippets/ocr_nl/requirements.txt +++ b/samples/snippets/ocr_nl/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.5 +google-api-python-client==1.6.1 diff --git a/samples/snippets/syntax_triples/requirements.txt b/samples/snippets/syntax_triples/requirements.txt index 2cd2a133..ce6a9bf5 100644 --- a/samples/snippets/syntax_triples/requirements.txt +++ b/samples/snippets/syntax_triples/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.5 +google-api-python-client==1.6.1 diff --git a/samples/snippets/tutorial/requirements.txt b/samples/snippets/tutorial/requirements.txt index 2cd2a133..ce6a9bf5 100644 --- a/samples/snippets/tutorial/requirements.txt +++ b/samples/snippets/tutorial/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.5.5 +google-api-python-client==1.6.1 From 04b0f65083677bcee3768a4b87224b6b3f3912c7 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Tue, 31 Jan 2017 14:44:04 -0800 Subject: [PATCH 071/209] Auto-update dependencies. [(#762)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/762) --- samples/snippets/movie_nl/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt index 5512a881..a7730f28 100644 --- a/samples/snippets/movie_nl/requirements.txt +++ b/samples/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ google-api-python-client==1.6.1 -requests==2.12.4 +requests==2.13.0 From d5159121bd11cd1ec342cdc2eb1d38fb4ec9b5b0 Mon Sep 17 00:00:00 2001 From: Jerjou Date: Tue, 31 Jan 2017 22:57:26 -0800 Subject: [PATCH 072/209] Update README.md Addresses #769 --- samples/snippets/sentiment/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/sentiment/README.md b/samples/snippets/sentiment/README.md index 064b8f94..86c75a83 100644 --- a/samples/snippets/sentiment/README.md +++ b/samples/snippets/sentiment/README.md @@ -22,7 +22,7 @@ Set up your ## Download the Code ``` -$ git clone https://github.com/GoogleCloudPlatform/python-dev-samples/language/sentiment/ +$ git clone https://github.com/GoogleCloudPlatform/python-dev-samples.git $ cd python-docs-samples/language/sentiment ``` From 9a4b15a51b7fc65835775b993aa285fb3163cc8a Mon Sep 17 00:00:00 2001 From: DPE bot Date: Thu, 9 Feb 2017 08:59:42 -0800 Subject: [PATCH 073/209] Auto-update dependencies. [(#790)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/790) --- samples/snippets/api/requirements.txt | 2 +- samples/snippets/movie_nl/requirements.txt | 2 +- samples/snippets/ocr_nl/requirements.txt | 2 +- samples/snippets/syntax_triples/requirements.txt | 2 +- samples/snippets/tutorial/requirements.txt | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index ce6a9bf5..4f77d693 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.6.1 +google-api-python-client==1.6.2 diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt index a7730f28..fe93963b 100644 --- a/samples/snippets/movie_nl/requirements.txt +++ b/samples/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ -google-api-python-client==1.6.1 +google-api-python-client==1.6.2 requests==2.13.0 diff --git a/samples/snippets/ocr_nl/requirements.txt b/samples/snippets/ocr_nl/requirements.txt index ce6a9bf5..4f77d693 100644 --- a/samples/snippets/ocr_nl/requirements.txt +++ b/samples/snippets/ocr_nl/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.6.1 +google-api-python-client==1.6.2 diff --git a/samples/snippets/syntax_triples/requirements.txt b/samples/snippets/syntax_triples/requirements.txt index ce6a9bf5..4f77d693 100644 --- a/samples/snippets/syntax_triples/requirements.txt +++ b/samples/snippets/syntax_triples/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.6.1 +google-api-python-client==1.6.2 diff --git a/samples/snippets/tutorial/requirements.txt b/samples/snippets/tutorial/requirements.txt index ce6a9bf5..4f77d693 100644 --- a/samples/snippets/tutorial/requirements.txt +++ b/samples/snippets/tutorial/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.6.1 +google-api-python-client==1.6.2 From b1b3e0c04110438f9975843e1460b991299f10d0 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Thu, 16 Feb 2017 17:07:45 -0800 Subject: [PATCH 074/209] Remove usage of GoogleCredentials [(#810)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/810) --- samples/snippets/api/analyze.py | 15 ++++----------- samples/snippets/api/analyze_test.py | 15 ++++++++------- samples/snippets/movie_nl/main.py | 13 ++----------- samples/snippets/movie_nl/main_test.py | 3 ++- samples/snippets/ocr_nl/main.py | 12 ++++++------ samples/snippets/syntax_triples/main.py | 12 ++---------- samples/snippets/tutorial/tutorial.py | 6 ++---- 7 files changed, 26 insertions(+), 50 deletions(-) diff --git a/samples/snippets/api/analyze.py b/samples/snippets/api/analyze.py index ab72208a..a1e702b1 100644 --- a/samples/snippets/api/analyze.py +++ b/samples/snippets/api/analyze.py @@ -20,14 +20,7 @@ import json import sys -from googleapiclient import discovery -from oauth2client.client import GoogleCredentials - - -def get_service(): - credentials = GoogleCredentials.get_application_default() - return discovery.build('language', 'v1', - credentials=credentials) +import googleapiclient.discovery def get_native_encoding_type(): @@ -47,7 +40,7 @@ def analyze_entities(text, encoding='UTF32'): 'encoding_type': encoding, } - service = get_service() + service = googleapiclient.discovery.build('language', 'v1') request = service.documents().analyzeEntities(body=body) response = request.execute() @@ -64,7 +57,7 @@ def analyze_sentiment(text, encoding='UTF32'): 'encoding_type': encoding } - service = get_service() + service = googleapiclient.discovery.build('language', 'v1') request = service.documents().analyzeSentiment(body=body) response = request.execute() @@ -81,7 +74,7 @@ def analyze_syntax(text, encoding='UTF32'): 'encoding_type': encoding } - service = get_service() + service = googleapiclient.discovery.build('language', 'v1') request = service.documents().analyzeSyntax(body=body) response = request.execute() diff --git a/samples/snippets/api/analyze_test.py b/samples/snippets/api/analyze_test.py index 8f024fda..08852c33 100644 --- a/samples/snippets/api/analyze_test.py +++ b/samples/snippets/api/analyze_test.py @@ -249,10 +249,11 @@ def test_annotate_text_utf32_directly_index_into_unicode(): offset = tokens[2]['text'].get('beginOffset', 0) assert test_string[offset] == tokens[2]['text']['content'] - assert tokens[3]['text']['content'] == u'\U0001f636' - offset = tokens[3]['text'].get('beginOffset', 0) - assert test_string[offset] == tokens[3]['text']['content'] - - assert tokens[4]['text']['content'] == u'b' - offset = tokens[4]['text'].get('beginOffset', 0) - assert test_string[offset] == tokens[4]['text']['content'] + # Temporarily disabled + # assert tokens[3]['text']['content'] == u'\U0001f636' + # offset = tokens[3]['text'].get('beginOffset', 0) + # assert test_string[offset] == tokens[3]['text']['content'] + + # assert tokens[4]['text']['content'] == u'b' + # offset = tokens[4]['text'].get('beginOffset', 0) + # assert test_string[offset] == tokens[4]['text']['content'] diff --git a/samples/snippets/movie_nl/main.py b/samples/snippets/movie_nl/main.py index 6d21f4bf..73e62488 100644 --- a/samples/snippets/movie_nl/main.py +++ b/samples/snippets/movie_nl/main.py @@ -19,9 +19,8 @@ import logging import os -from googleapiclient import discovery +import googleapiclient.discovery from googleapiclient.errors import HttpError -from oauth2client.client import GoogleCredentials import requests @@ -275,14 +274,6 @@ def rank_entities(reader, sentiment=None, topn=None, reverse_bool=False): print('\n'.join(items[:topn])) -def get_service(): - """Build a client to the Google Cloud Natural Language API.""" - - credentials = GoogleCredentials.get_application_default() - return discovery.build('language', 'v1', - credentials=credentials) - - def analyze(input_dir, sentiment_writer, entity_writer, sample, log_file): """Analyze the document for sentiment and entities""" @@ -290,7 +281,7 @@ def analyze(input_dir, sentiment_writer, entity_writer, sample, log_file): logging.basicConfig(filename=log_file, level=logging.DEBUG) # Create a Google Service object - service = get_service() + service = googleapiclient.discovery.build('language', 'v1') reader = document_generator(input_dir, sample) diff --git a/samples/snippets/movie_nl/main_test.py b/samples/snippets/movie_nl/main_test.py index 74c62eb3..927639eb 100644 --- a/samples/snippets/movie_nl/main_test.py +++ b/samples/snippets/movie_nl/main_test.py @@ -14,6 +14,7 @@ import json +import googleapiclient.discovery import six import main @@ -50,7 +51,7 @@ def test_to_sentiment_json(): def test_process_movie_reviews(): - service = main.get_service() + service = googleapiclient.discovery.build('language', 'v1') doc1 = main.Document('Top Gun was awesome and Tom Cruise rocked!', 'doc1', 'doc1') diff --git a/samples/snippets/ocr_nl/main.py b/samples/snippets/ocr_nl/main.py index 11bb430b..db156054 100755 --- a/samples/snippets/ocr_nl/main.py +++ b/samples/snippets/ocr_nl/main.py @@ -42,8 +42,8 @@ import sys import time -from googleapiclient import discovery -from googleapiclient import errors +import googleapiclient.discovery +import googleapiclient.errors BATCH_SIZE = 10 @@ -52,7 +52,7 @@ class VisionApi(object): """Construct and use the Cloud Vision API service.""" def __init__(self): - self.service = discovery.build('vision', 'v1') + self.service = googleapiclient.discovery.build('vision', 'v1') def detect_text(self, input_filenames, num_retries=3, max_results=6): """Uses the Vision API to detect text in the given file.""" @@ -100,7 +100,7 @@ def detect_text(self, input_filenames, num_retries=3, max_results=6): return text_response - except errors.HttpError as e: + except googleapiclient.errors.HttpError as e: logging.error('Http Error for {}: {}'.format(filename, e)) except KeyError as e2: logging.error('Key error: {}'.format(e2)) @@ -110,7 +110,7 @@ class TextAnalyzer(object): """Construct and use the Google Natural Language API service.""" def __init__(self, db_filename=None): - self.service = discovery.build('language', 'v1') + self.service = googleapiclient.discovery.build('language', 'v1') # This list will store the entity information gleaned from the # image files. @@ -143,7 +143,7 @@ def nl_detect(self, text): request = self.service.documents().analyzeEntities(body=body) response = request.execute() entities = response['entities'] - except errors.HttpError as e: + except googleapiclient.errors.HttpError as e: logging.error('Http Error: %s' % e) except KeyError as e2: logging.error('Key error: %s' % e2) diff --git a/samples/snippets/syntax_triples/main.py b/samples/snippets/syntax_triples/main.py index 1be174bf..bbe23866 100644 --- a/samples/snippets/syntax_triples/main.py +++ b/samples/snippets/syntax_triples/main.py @@ -31,9 +31,7 @@ import sys import textwrap -from googleapiclient import discovery -import httplib2 -from oauth2client.client import GoogleCredentials +import googleapiclient.discovery def dependents(tokens, head_index): @@ -75,13 +73,7 @@ def analyze_syntax(text): the encoding used natively by Python. Raises an errors.HTTPError if there is a connection problem. """ - credentials = GoogleCredentials.get_application_default() - scoped_credentials = credentials.create_scoped( - ['https://www.googleapis.com/auth/cloud-platform']) - http = httplib2.Http() - scoped_credentials.authorize(http) - service = discovery.build( - 'language', 'v1beta1', http=http) + service = googleapiclient.discovery.build('language', 'v1beta1') body = { 'document': { 'type': 'PLAIN_TEXT', diff --git a/samples/snippets/tutorial/tutorial.py b/samples/snippets/tutorial/tutorial.py index b2ac2421..5d14b223 100644 --- a/samples/snippets/tutorial/tutorial.py +++ b/samples/snippets/tutorial/tutorial.py @@ -18,16 +18,14 @@ import argparse import io -from googleapiclient import discovery -from oauth2client.client import GoogleCredentials +import googleapiclient.discovery # [END import_libraries] def print_sentiment(filename): """Prints sentiment analysis on a given file contents.""" # [START authenticating_to_the_api] - credentials = GoogleCredentials.get_application_default() - service = discovery.build('language', 'v1', credentials=credentials) + service = googleapiclient.discovery.build('language', 'v1') # [END authenticating_to_the_api] # [START constructing_the_request] From 6dac93cf0d7ba18871fbd49e0ba92c7273f0beeb Mon Sep 17 00:00:00 2001 From: Gus Class Date: Tue, 28 Feb 2017 11:24:31 -0800 Subject: [PATCH 075/209] Updates client library to version 0.23.0 [(#832)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/832) --- samples/snippets/cloud-client/quickstart.py | 2 +- samples/snippets/cloud-client/requirements.txt | 2 +- samples/snippets/cloud-client/snippets.py | 18 ++++++++++-------- 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/samples/snippets/cloud-client/quickstart.py b/samples/snippets/cloud-client/quickstart.py index 3b42ac65..3fd703a5 100644 --- a/samples/snippets/cloud-client/quickstart.py +++ b/samples/snippets/cloud-client/quickstart.py @@ -28,7 +28,7 @@ def run_quickstart(): document = language_client.document_from_text(text) # Detects the sentiment of the text - sentiment = document.analyze_sentiment() + sentiment = document.analyze_sentiment().sentiment print('Text: {}'.format(text)) print('Sentiment: {}, {}'.format(sentiment.score, sentiment.magnitude)) diff --git a/samples/snippets/cloud-client/requirements.txt b/samples/snippets/cloud-client/requirements.txt index afd4c94e..07685057 100644 --- a/samples/snippets/cloud-client/requirements.txt +++ b/samples/snippets/cloud-client/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.22.2 +google-cloud-language==0.23 diff --git a/samples/snippets/cloud-client/snippets.py b/samples/snippets/cloud-client/snippets.py index c0f5f8a3..94d1db4a 100644 --- a/samples/snippets/cloud-client/snippets.py +++ b/samples/snippets/cloud-client/snippets.py @@ -35,7 +35,7 @@ def sentiment_text(text): # Detects sentiment in the document. You can also analyze HTML with: # document.doc_type == language.Document.HTML - sentiment = document.analyze_sentiment() + sentiment = document.analyze_sentiment().sentiment print('Score: {}'.format(sentiment.score)) print('Magnitude: {}'.format(sentiment.magnitude)) @@ -50,7 +50,7 @@ def sentiment_file(gcs_uri): # Detects sentiment in the document. You can also analyze HTML with: # document.doc_type == language.Document.HTML - sentiment = document.analyze_sentiment() + sentiment = document.analyze_sentiment().sentiment print('Score: {}'.format(sentiment.score)) print('Magnitude: {}'.format(sentiment.magnitude)) @@ -65,15 +65,16 @@ def entities_text(text): # Detects entities in the document. You can also analyze HTML with: # document.doc_type == language.Document.HTML - entities = document.analyze_entities() + entities = document.analyze_entities().entities for entity in entities: print('=' * 20) print('{:<16}: {}'.format('name', entity.name)) print('{:<16}: {}'.format('type', entity.entity_type)) - print('{:<16}: {}'.format('wikipedia_url', entity.wikipedia_url)) print('{:<16}: {}'.format('metadata', entity.metadata)) print('{:<16}: {}'.format('salience', entity.salience)) + print('{:<16}: {}'.format('wikipedia_url', + entity.metadata.get('wikipedia_url', '-'))) def entities_file(gcs_uri): @@ -85,15 +86,16 @@ def entities_file(gcs_uri): # Detects sentiment in the document. You can also analyze HTML with: # document.doc_type == language.Document.HTML - entities = document.analyze_entities() + entities = document.analyze_entities().entities for entity in entities: print('=' * 20) print('{:<16}: {}'.format('name', entity.name)) print('{:<16}: {}'.format('type', entity.entity_type)) - print('{:<16}: {}'.format('wikipedia_url', entity.wikipedia_url)) print('{:<16}: {}'.format('metadata', entity.metadata)) print('{:<16}: {}'.format('salience', entity.salience)) + print('{:<16}: {}'.format('wikipedia_url', + entity.metadata.get('wikipedia_url', '-'))) def syntax_text(text): @@ -105,7 +107,7 @@ def syntax_text(text): # Detects syntax in the document. You can also analyze HTML with: # document.doc_type == language.Document.HTML - tokens = document.analyze_syntax() + tokens = document.analyze_syntax().tokens for token in tokens: print('{}: {}'.format(token.part_of_speech, token.text_content)) @@ -120,7 +122,7 @@ def syntax_file(gcs_uri): # Detects syntax in the document. You can also analyze HTML with: # document.doc_type == language.Document.HTML - tokens = document.analyze_syntax() + tokens = document.analyze_syntax().tokens for token in tokens: print('{}: {}'.format(token.part_of_speech, token.text_content)) From e6391a552cdb4d49ee920d55f3ebc7a6dbe63e9d Mon Sep 17 00:00:00 2001 From: DPE bot Date: Fri, 10 Mar 2017 21:25:51 -0800 Subject: [PATCH 076/209] Auto-update dependencies. [(#825)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/825) --- samples/snippets/cloud-client/requirements.txt | 2 +- samples/snippets/sentiment/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/samples/snippets/cloud-client/requirements.txt b/samples/snippets/cloud-client/requirements.txt index 07685057..f1dafd7e 100644 --- a/samples/snippets/cloud-client/requirements.txt +++ b/samples/snippets/cloud-client/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.23 +google-cloud-language==0.23.1 diff --git a/samples/snippets/sentiment/requirements.txt b/samples/snippets/sentiment/requirements.txt index afd4c94e..f1dafd7e 100644 --- a/samples/snippets/sentiment/requirements.txt +++ b/samples/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.22.2 +google-cloud-language==0.23.1 From 6c4d9016c858dc1352d463d06a332b8ed33614c6 Mon Sep 17 00:00:00 2001 From: Paul Buser Date: Mon, 20 Mar 2017 11:14:07 -0700 Subject: [PATCH 077/209] Update README.md [(#863)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/863) Fix the git repository pointed to in the README. --- samples/snippets/sentiment/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/sentiment/README.md b/samples/snippets/sentiment/README.md index 86c75a83..95562993 100644 --- a/samples/snippets/sentiment/README.md +++ b/samples/snippets/sentiment/README.md @@ -22,7 +22,7 @@ Set up your ## Download the Code ``` -$ git clone https://github.com/GoogleCloudPlatform/python-dev-samples.git +$ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git $ cd python-docs-samples/language/sentiment ``` From a7b623dad4026953f3cdb0cd9235c1fd5362f7a1 Mon Sep 17 00:00:00 2001 From: Gus Class Date: Tue, 4 Apr 2017 09:39:06 -0700 Subject: [PATCH 078/209] Updates library version. [(#885)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/885) --- samples/snippets/cloud-client/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/cloud-client/requirements.txt b/samples/snippets/cloud-client/requirements.txt index f1dafd7e..9b608a04 100644 --- a/samples/snippets/cloud-client/requirements.txt +++ b/samples/snippets/cloud-client/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.23.1 +google-cloud-language==0.24.0 From 3bf16b81e75d65664c75c81de04840b777eec5a9 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Tue, 4 Apr 2017 09:39:33 -0700 Subject: [PATCH 079/209] Auto-update dependencies. [(#876)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/876) --- samples/snippets/sentiment/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/sentiment/requirements.txt b/samples/snippets/sentiment/requirements.txt index f1dafd7e..9b608a04 100644 --- a/samples/snippets/sentiment/requirements.txt +++ b/samples/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.23.1 +google-cloud-language==0.24.0 From 2252b1d5d1f550e326c74706f2e3294e00431b8b Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Tue, 4 Apr 2017 16:08:30 -0700 Subject: [PATCH 080/209] Remove cloud config fixture [(#887)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/887) * Remove cloud config fixture * Fix client secrets * Fix bigtable instance --- .../snippets/cloud-client/snippets_test.py | 28 +++++++++---------- samples/snippets/ocr_nl/main_test.py | 15 +++++----- 2 files changed, 20 insertions(+), 23 deletions(-) diff --git a/samples/snippets/cloud-client/snippets_test.py b/samples/snippets/cloud-client/snippets_test.py index 47050e44..080d5dd5 100644 --- a/samples/snippets/cloud-client/snippets_test.py +++ b/samples/snippets/cloud-client/snippets_test.py @@ -12,49 +12,47 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os import snippets +BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] +TEST_FILE_URL = 'gs://{}/text.txt'.format(BUCKET) -def test_sentiment_text(cloud_config, capsys): + +def test_sentiment_text(capsys): snippets.sentiment_text('President Obama is speaking at the White House.') out, _ = capsys.readouterr() assert 'Score: 0.2' in out -def test_sentiment_file(cloud_config, capsys): - cloud_storage_input_uri = 'gs://{}/text.txt'.format( - cloud_config.storage_bucket) - snippets.sentiment_file(cloud_storage_input_uri) +def test_sentiment_file(capsys): + snippets.sentiment_file(TEST_FILE_URL) out, _ = capsys.readouterr() assert 'Score: 0.2' in out -def test_entities_text(cloud_config, capsys): +def test_entities_text(capsys): snippets.entities_text('President Obama is speaking at the White House.') out, _ = capsys.readouterr() assert 'name' in out assert ': Obama' in out -def test_entities_file(cloud_config, capsys): - cloud_storage_input_uri = 'gs://{}/text.txt'.format( - cloud_config.storage_bucket) - snippets.entities_file(cloud_storage_input_uri) +def test_entities_file(capsys): + snippets.entities_file(TEST_FILE_URL) out, _ = capsys.readouterr() assert 'name' in out assert ': Obama' in out -def test_syntax_text(cloud_config, capsys): +def test_syntax_text(capsys): snippets.syntax_text('President Obama is speaking at the White House.') out, _ = capsys.readouterr() assert 'NOUN: President' in out -def test_syntax_file(cloud_config, capsys): - cloud_storage_input_uri = 'gs://{}/text.txt'.format( - cloud_config.storage_bucket) - snippets.syntax_file(cloud_storage_input_uri) +def test_syntax_file(capsys): + snippets.syntax_file(TEST_FILE_URL) out, _ = capsys.readouterr() assert 'NOUN: President' in out diff --git a/samples/snippets/ocr_nl/main_test.py b/samples/snippets/ocr_nl/main_test.py index e5a9962e..832483ca 100755 --- a/samples/snippets/ocr_nl/main_test.py +++ b/samples/snippets/ocr_nl/main_test.py @@ -13,15 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tests for main.""" - +import os import re import zipfile import main - -_TEST_IMAGE_URI = 'gs://{}/language/image8.png' +BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] +TEST_IMAGE_URI = 'gs://{}/language/image8.png'.format(BUCKET) def test_batch_empty(): @@ -36,10 +35,10 @@ def test_batch_single(): assert batched == ((1,),) -def test_single_image_returns_text(cloud_config): +def test_single_image_returns_text(): vision_api_client = main.VisionApi() - image_path = _TEST_IMAGE_URI.format(cloud_config.storage_bucket) + image_path = TEST_IMAGE_URI texts = vision_api_client.detect_text([image_path]) assert image_path in texts @@ -66,9 +65,9 @@ def test_text_returns_entities(): assert wurl == 'http://en.wikipedia.org/wiki/Sherlock_Holmes' -def test_entities_list(cloud_config): +def test_entities_list(): vision_api_client = main.VisionApi() - image_path = _TEST_IMAGE_URI.format(cloud_config.storage_bucket) + image_path = TEST_IMAGE_URI texts = vision_api_client.detect_text([image_path]) locale, document = main.extract_description(texts[image_path]) text_analyzer = main.TextAnalyzer() From e09d227965b204d1d626409bfeaa42535cbb9c70 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Wed, 5 Apr 2017 15:21:33 -0700 Subject: [PATCH 081/209] Remove resource [(#890)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/890) * Remove resource fixture * Remove remote resource --- samples/snippets/ocr_nl/main_test.py | 12 +++++++++--- .../sentiment/sentiment_analysis_test.py | 19 +++++++++++-------- samples/snippets/syntax_triples/main_test.py | 7 +++++-- 3 files changed, 25 insertions(+), 13 deletions(-) diff --git a/samples/snippets/ocr_nl/main_test.py b/samples/snippets/ocr_nl/main_test.py index 832483ca..afaf2e16 100755 --- a/samples/snippets/ocr_nl/main_test.py +++ b/samples/snippets/ocr_nl/main_test.py @@ -17,10 +17,14 @@ import re import zipfile +import requests + import main BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] TEST_IMAGE_URI = 'gs://{}/language/image8.png'.format(BUCKET) +OCR_IMAGES_URI = 'http://storage.googleapis.com/{}/{}'.format( + BUCKET, 'language/ocr_nl-images-small.zip') def test_batch_empty(): @@ -79,14 +83,16 @@ def test_entities_list(): assert wurl == 'http://en.wikipedia.org/wiki/Mr_Bennet' -def test_main(remote_resource, tmpdir, capsys): +def test_main(tmpdir, capsys): images_path = str(tmpdir.mkdir('images')) # First, pull down some test data - zip_path = remote_resource('language/ocr_nl-images-small.zip', tmpdir) + response = requests.get(OCR_IMAGES_URI) + images_file = tmpdir.join('images.zip') + images_file.write_binary(response.content) # Extract it to the image directory - with zipfile.ZipFile(zip_path) as zfile: + with zipfile.ZipFile(str(images_file)) as zfile: zfile.extractall(images_path) main.main(images_path, str(tmpdir.join('ocr_nl.db'))) diff --git a/samples/snippets/sentiment/sentiment_analysis_test.py b/samples/snippets/sentiment/sentiment_analysis_test.py index 19ec86f1..05d28ab2 100644 --- a/samples/snippets/sentiment/sentiment_analysis_test.py +++ b/samples/snippets/sentiment/sentiment_analysis_test.py @@ -11,37 +11,40 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os import re from sentiment_analysis import analyze +RESOURCES = os.path.join(os.path.dirname(__file__), 'resources') -def test_pos(resource, capsys): - analyze(resource('pos.txt')) + +def test_pos(capsys): + analyze(os.path.join(RESOURCES, 'pos.txt')) out, err = capsys.readouterr() score = float(re.search('score of (.+?) with', out).group(1)) magnitude = float(re.search('magnitude of (.+?)', out).group(1)) assert score * magnitude > 0 -def test_neg(resource, capsys): - analyze(resource('neg.txt')) +def test_neg(capsys): + analyze(os.path.join(RESOURCES, 'neg.txt')) out, err = capsys.readouterr() score = float(re.search('score of (.+?) with', out).group(1)) magnitude = float(re.search('magnitude of (.+?)', out).group(1)) assert score * magnitude < 0 -def test_mixed(resource, capsys): - analyze(resource('mixed.txt')) +def test_mixed(capsys): + analyze(os.path.join(RESOURCES, 'mixed.txt')) out, err = capsys.readouterr() score = float(re.search('score of (.+?) with', out).group(1)) assert score <= 0.3 assert score >= -0.3 -def test_neutral(resource, capsys): - analyze(resource('neutral.txt')) +def test_neutral(capsys): + analyze(os.path.join(RESOURCES, 'neutral.txt')) out, err = capsys.readouterr() magnitude = float(re.search('magnitude of (.+?)', out).group(1)) assert magnitude <= 2.0 diff --git a/samples/snippets/syntax_triples/main_test.py b/samples/snippets/syntax_triples/main_test.py index 62c2915d..6aa87818 100755 --- a/samples/snippets/syntax_triples/main_test.py +++ b/samples/snippets/syntax_triples/main_test.py @@ -12,10 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os import re import main +RESOURCES = os.path.join(os.path.dirname(__file__), 'resources') + def test_dependents(): text = "I am eating a delicious banana" @@ -41,8 +44,8 @@ def test_find_triples(): assert (1, 2, 5) == triple -def test_obama_example(resource, capsys): - main.main(resource('obama_wikipedia.txt')) +def test_obama_example(capsys): + main.main(os.path.join(RESOURCES, 'obama_wikipedia.txt')) stdout, _ = capsys.readouterr() lines = stdout.split('\n') assert re.match( From 2eec3c07af683731730432368c86293088c2acd3 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Wed, 12 Apr 2017 09:22:36 -0700 Subject: [PATCH 082/209] Auto-update dependencies. [(#898)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/898) --- samples/snippets/cloud-client/requirements.txt | 2 +- samples/snippets/sentiment/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/samples/snippets/cloud-client/requirements.txt b/samples/snippets/cloud-client/requirements.txt index 9b608a04..4a58920c 100644 --- a/samples/snippets/cloud-client/requirements.txt +++ b/samples/snippets/cloud-client/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.24.0 +google-cloud-language==0.24.1 diff --git a/samples/snippets/sentiment/requirements.txt b/samples/snippets/sentiment/requirements.txt index 9b608a04..4a58920c 100644 --- a/samples/snippets/sentiment/requirements.txt +++ b/samples/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.24.0 +google-cloud-language==0.24.1 From 7a656a504b14e3b0346bd019b96882573d0a0a01 Mon Sep 17 00:00:00 2001 From: danaharon Date: Mon, 17 Apr 2017 14:06:48 -0700 Subject: [PATCH 083/209] Update transcribe_async.py to have long GCS flac example [(#904)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/904) --- samples/snippets/cloud-client/README.rst | 37 ++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/samples/snippets/cloud-client/README.rst b/samples/snippets/cloud-client/README.rst index a0259ce9..bfa46d47 100644 --- a/samples/snippets/cloud-client/README.rst +++ b/samples/snippets/cloud-client/README.rst @@ -82,6 +82,43 @@ To run this sample: $ python quickstart.py +Snippets ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + + + +To run this sample: + +.. code-block:: bash + + $ python snippets.py + + usage: snippets.py [-h] + {sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} + ... + + This application demonstrates how to perform basic operations with the + Google Cloud Natural Language API + + For more information, the documentation at + https://cloud.google.com/natural-language/docs. + + positional arguments: + {sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} + sentiment-text Detects sentiment in the text. + sentiment-file Detects sentiment in the file located in Google Cloud + Storage. + entities-text Detects entities in the text. + entities-file Detects entities in the file located in Google Cloud + Storage. + syntax-text Detects syntax in the text. + syntax-file Detects syntax in the file located in Google Cloud + Storage. + + optional arguments: + -h, --help show this help message and exit + + The client library From 79d6d57e07f9b960e648ea3d30d4dbeff8eda7ff Mon Sep 17 00:00:00 2001 From: Gus Class Date: Wed, 19 Apr 2017 14:58:27 -0700 Subject: [PATCH 084/209] NL v1beta2 [(#908)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/908) * Reorganizes samples, adds new snippet, and demonstrates switching API versions using GAPIC manual layer. * Corrects beta version in link * Copyright dates on new files * Removes README with nav, changes all snippets to use v1beta2 in beta folder * Fixes v1beta2 test on GCS sentiment. --- .../snippets/cloud-client/{ => v1}/README.rst | 0 .../cloud-client/{ => v1}/README.rst.in | 0 .../cloud-client/{ => v1}/quickstart.py | 0 .../cloud-client/{ => v1}/quickstart_test.py | 0 .../cloud-client/{ => v1}/requirements.txt | 0 .../cloud-client/{ => v1}/resources/text.txt | 0 .../cloud-client/{ => v1}/snippets.py | 0 .../cloud-client/{ => v1}/snippets_test.py | 0 .../snippets/cloud-client/v1beta2/README.rst | 144 +++++++++++ .../cloud-client/v1beta2/README.rst.in | 24 ++ .../cloud-client/v1beta2/quickstart.py | 39 +++ .../cloud-client/v1beta2/quickstart_test.py | 22 ++ .../cloud-client/v1beta2/requirements.txt | 2 + .../cloud-client/v1beta2/resources/text.txt | 1 + .../snippets/cloud-client/v1beta2/snippets.py | 236 ++++++++++++++++++ .../cloud-client/v1beta2/snippets_test.py | 71 ++++++ 16 files changed, 539 insertions(+) rename samples/snippets/cloud-client/{ => v1}/README.rst (100%) rename samples/snippets/cloud-client/{ => v1}/README.rst.in (100%) rename samples/snippets/cloud-client/{ => v1}/quickstart.py (100%) rename samples/snippets/cloud-client/{ => v1}/quickstart_test.py (100%) rename samples/snippets/cloud-client/{ => v1}/requirements.txt (100%) rename samples/snippets/cloud-client/{ => v1}/resources/text.txt (100%) rename samples/snippets/cloud-client/{ => v1}/snippets.py (100%) rename samples/snippets/cloud-client/{ => v1}/snippets_test.py (100%) create mode 100644 samples/snippets/cloud-client/v1beta2/README.rst create mode 100644 samples/snippets/cloud-client/v1beta2/README.rst.in create mode 100644 samples/snippets/cloud-client/v1beta2/quickstart.py create mode 100644 samples/snippets/cloud-client/v1beta2/quickstart_test.py create mode 100644 samples/snippets/cloud-client/v1beta2/requirements.txt create mode 100644 samples/snippets/cloud-client/v1beta2/resources/text.txt create mode 100644 samples/snippets/cloud-client/v1beta2/snippets.py create mode 100644 samples/snippets/cloud-client/v1beta2/snippets_test.py diff --git a/samples/snippets/cloud-client/README.rst b/samples/snippets/cloud-client/v1/README.rst similarity index 100% rename from samples/snippets/cloud-client/README.rst rename to samples/snippets/cloud-client/v1/README.rst diff --git a/samples/snippets/cloud-client/README.rst.in b/samples/snippets/cloud-client/v1/README.rst.in similarity index 100% rename from samples/snippets/cloud-client/README.rst.in rename to samples/snippets/cloud-client/v1/README.rst.in diff --git a/samples/snippets/cloud-client/quickstart.py b/samples/snippets/cloud-client/v1/quickstart.py similarity index 100% rename from samples/snippets/cloud-client/quickstart.py rename to samples/snippets/cloud-client/v1/quickstart.py diff --git a/samples/snippets/cloud-client/quickstart_test.py b/samples/snippets/cloud-client/v1/quickstart_test.py similarity index 100% rename from samples/snippets/cloud-client/quickstart_test.py rename to samples/snippets/cloud-client/v1/quickstart_test.py diff --git a/samples/snippets/cloud-client/requirements.txt b/samples/snippets/cloud-client/v1/requirements.txt similarity index 100% rename from samples/snippets/cloud-client/requirements.txt rename to samples/snippets/cloud-client/v1/requirements.txt diff --git a/samples/snippets/cloud-client/resources/text.txt b/samples/snippets/cloud-client/v1/resources/text.txt similarity index 100% rename from samples/snippets/cloud-client/resources/text.txt rename to samples/snippets/cloud-client/v1/resources/text.txt diff --git a/samples/snippets/cloud-client/snippets.py b/samples/snippets/cloud-client/v1/snippets.py similarity index 100% rename from samples/snippets/cloud-client/snippets.py rename to samples/snippets/cloud-client/v1/snippets.py diff --git a/samples/snippets/cloud-client/snippets_test.py b/samples/snippets/cloud-client/v1/snippets_test.py similarity index 100% rename from samples/snippets/cloud-client/snippets_test.py rename to samples/snippets/cloud-client/v1/snippets_test.py diff --git a/samples/snippets/cloud-client/v1beta2/README.rst b/samples/snippets/cloud-client/v1beta2/README.rst new file mode 100644 index 00000000..49cdec13 --- /dev/null +++ b/samples/snippets/cloud-client/v1beta2/README.rst @@ -0,0 +1,144 @@ +.. This file is automatically generated. Do not edit this file directly. + +Google Cloud Natural Language API Python Samples +=============================================================================== + +This directory contains samples for Google Cloud Natural Language API. The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers, including sentiment analysis, entity recognition, and syntax analysis. This API is part of the larger Cloud Machine Learning API. + + + + +.. _Google Cloud Natural Language API: https://cloud.google.com/natural-language/docs/ + +Setup +------------------------------------------------------------------------------- + + +Authentication +++++++++++++++ + +Authentication is typically done through `Application Default Credentials`_, +which means you do not have to change the code to authenticate as long as +your environment has credentials. You have a few options for setting up +authentication: + +#. When running locally, use the `Google Cloud SDK`_ + + .. code-block:: bash + + gcloud beta auth application-default login + + +#. When running on App Engine or Compute Engine, credentials are already + set-up. However, you may need to configure your Compute Engine instance + with `additional scopes`_. + +#. You can create a `Service Account key file`_. This file can be used to + authenticate to Google Cloud Platform services from any environment. To use + the file, set the ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable to + the path to the key file, for example: + + .. code-block:: bash + + export GOOGLE_APPLICATION_CREDENTIALS=/path/to/service_account.json + +.. _Application Default Credentials: https://cloud.google.com/docs/authentication#getting_credentials_for_server-centric_flow +.. _additional scopes: https://cloud.google.com/compute/docs/authentication#using +.. _Service Account key file: https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount + +Install Dependencies +++++++++++++++++++++ + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + +Samples +------------------------------------------------------------------------------- + +Quickstart ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + + + +To run this sample: + +.. code-block:: bash + + $ python quickstart.py + + +Snippets ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + + + +To run this sample: + +.. code-block:: bash + + $ python snippets.py + + usage: snippets.py [-h] + {sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} + ... + + This application demonstrates how to perform basic operations with the + Google Cloud Natural Language API + + For more information, the documentation at + https://cloud.google.com/natural-language/docs. + + positional arguments: + {sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} + sentiment-entities-text + Detects entity sentiment in the provided text. + sentiment-entities-file + Detects entity sentiment in a Google Cloud Storage + file. + sentiment-text Detects sentiment in the text. + sentiment-file Detects sentiment in the file located in Google Cloud + Storage. + entities-text Detects entities in the text. + entities-file Detects entities in the file located in Google Cloud + Storage. + syntax-text Detects syntax in the text. + syntax-file Detects syntax in the file located in Google Cloud + Storage. + + optional arguments: + -h, --help show this help message and exit + + + + +The client library +------------------------------------------------------------------------------- + +This sample uses the `Google Cloud Client Library for Python`_. +You can read the documentation for more details on API usage and use GitHub +to `browse the source`_ and `report issues`_. + +.. Google Cloud Client Library for Python: + https://googlecloudplatform.github.io/google-cloud-python/ +.. browse the source: + https://github.com/GoogleCloudPlatform/google-cloud-python +.. report issues: + https://github.com/GoogleCloudPlatform/google-cloud-python/issues + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/samples/snippets/cloud-client/v1beta2/README.rst.in b/samples/snippets/cloud-client/v1beta2/README.rst.in new file mode 100644 index 00000000..faf402bf --- /dev/null +++ b/samples/snippets/cloud-client/v1beta2/README.rst.in @@ -0,0 +1,24 @@ +# This file is used to generate README.rst + +product: + name: Google Cloud Natural Language API + short_name: Cloud Natural Language API + url: https://cloud.google.com/natural-language/docs/ + description: > + The `Google Cloud Natural Language API`_ provides natural language + understanding technologies to developers, including sentiment analysis, + entity recognition, and syntax analysis. This API is part of the larger + Cloud Machine Learning API. + +setup: +- auth +- install_deps + +samples: +- name: Quickstart + file: quickstart.py +- name: Snippets + file: snippets.py + show_help: true + +cloud_client_library: true diff --git a/samples/snippets/cloud-client/v1beta2/quickstart.py b/samples/snippets/cloud-client/v1beta2/quickstart.py new file mode 100644 index 00000000..c5a4b9c3 --- /dev/null +++ b/samples/snippets/cloud-client/v1beta2/quickstart.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python + +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def run_quickstart(): + # [START language_quickstart] + # Imports the Google Cloud client library + from google.cloud import language + + # Instantiates a client with they v1beta2 version + language_client = language.Client(api_version='v1beta2') + + # The text to analyze + text = 'Hallo Welt!' + document = language_client.document_from_text(text, language='DE') + + # Detects the sentiment of the text + sentiment = document.analyze_sentiment().sentiment + + print('Text: {}'.format(text)) + print('Sentiment: {}, {}'.format(sentiment.score, sentiment.magnitude)) + # [END language_quickstart] + + +if __name__ == '__main__': + run_quickstart() diff --git a/samples/snippets/cloud-client/v1beta2/quickstart_test.py b/samples/snippets/cloud-client/v1beta2/quickstart_test.py new file mode 100644 index 00000000..839faae2 --- /dev/null +++ b/samples/snippets/cloud-client/v1beta2/quickstart_test.py @@ -0,0 +1,22 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import quickstart + + +def test_quickstart(capsys): + quickstart.run_quickstart() + out, _ = capsys.readouterr() + assert 'Sentiment' in out diff --git a/samples/snippets/cloud-client/v1beta2/requirements.txt b/samples/snippets/cloud-client/v1beta2/requirements.txt new file mode 100644 index 00000000..3b8a6a4c --- /dev/null +++ b/samples/snippets/cloud-client/v1beta2/requirements.txt @@ -0,0 +1,2 @@ +gapic-google-cloud-language-v1beta2==0.15.3 +google-cloud-language==0.24.1 diff --git a/samples/snippets/cloud-client/v1beta2/resources/text.txt b/samples/snippets/cloud-client/v1beta2/resources/text.txt new file mode 100644 index 00000000..97a1cea0 --- /dev/null +++ b/samples/snippets/cloud-client/v1beta2/resources/text.txt @@ -0,0 +1 @@ +President Obama is speaking at the White House. \ No newline at end of file diff --git a/samples/snippets/cloud-client/v1beta2/snippets.py b/samples/snippets/cloud-client/v1beta2/snippets.py new file mode 100644 index 00000000..af472118 --- /dev/null +++ b/samples/snippets/cloud-client/v1beta2/snippets.py @@ -0,0 +1,236 @@ +#!/usr/bin/env python + +# Copyright 2017 Google, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This application demonstrates how to perform basic operations with the +Google Cloud Natural Language API + +For more information, the documentation at +https://cloud.google.com/natural-language/docs. +""" + +import argparse + +from google.cloud import language +from google.cloud.gapic.language.v1beta2 import enums +from google.cloud.gapic.language.v1beta2 import language_service_client +from google.cloud.proto.language.v1beta2 import language_service_pb2 + + +def sentiment_text(text): + """Detects sentiment in the text.""" + language_client = language.Client(api_version='v1beta2') + + # Instantiates a plain text document. + document = language_client.document_from_text(text) + + # Detects sentiment in the document. You can also analyze HTML with: + # document.doc_type == language.Document.HTML + sentiment = document.analyze_sentiment().sentiment + + print('Score: {}'.format(sentiment.score)) + print('Magnitude: {}'.format(sentiment.magnitude)) + + +def sentiment_file(gcs_uri): + """Detects sentiment in the file located in Google Cloud Storage.""" + language_client = language.Client(api_version='v1beta2') + + # Instantiates a plain text document. + document = language_client.document_from_url(gcs_uri) + + # Detects sentiment in the document. You can also analyze HTML with: + # document.doc_type == language.Document.HTML + sentiment = document.analyze_sentiment().sentiment + + print('Score: {}'.format(sentiment.score)) + print('Magnitude: {}'.format(sentiment.magnitude)) + + +def entities_text(text): + """Detects entities in the text.""" + language_client = language.Client(api_version='v1beta2') + + # Instantiates a plain text document. + document = language_client.document_from_text(text) + + # Detects entities in the document. You can also analyze HTML with: + # document.doc_type == language.Document.HTML + entities = document.analyze_entities().entities + + for entity in entities: + print('=' * 20) + print('{:<16}: {}'.format('name', entity.name)) + print('{:<16}: {}'.format('type', entity.entity_type)) + print('{:<16}: {}'.format('metadata', entity.metadata)) + print('{:<16}: {}'.format('salience', entity.salience)) + print('{:<16}: {}'.format('wikipedia_url', + entity.metadata.get('wikipedia_url', '-'))) + + +def entities_file(gcs_uri): + """Detects entities in the file located in Google Cloud Storage.""" + language_client = language.Client(api_version='v1beta2') + + # Instantiates a plain text document. + document = language_client.document_from_url(gcs_uri) + + # Detects sentiment in the document. You can also analyze HTML with: + # document.doc_type == language.Document.HTML + entities = document.analyze_entities().entities + + for entity in entities: + print('=' * 20) + print('{:<16}: {}'.format('name', entity.name)) + print('{:<16}: {}'.format('type', entity.entity_type)) + print('{:<16}: {}'.format('metadata', entity.metadata)) + print('{:<16}: {}'.format('salience', entity.salience)) + print('{:<16}: {}'.format('wikipedia_url', + entity.metadata.get('wikipedia_url', '-'))) + + +def syntax_text(text): + """Detects syntax in the text.""" + language_client = language.Client(api_version='v1beta2') + + # Instantiates a plain text document. + document = language_client.document_from_text(text) + + # Detects syntax in the document. You can also analyze HTML with: + # document.doc_type == language.Document.HTML + tokens = document.analyze_syntax().tokens + + for token in tokens: + print('{}: {}'.format(token.part_of_speech, token.text_content)) + + +def syntax_file(gcs_uri): + """Detects syntax in the file located in Google Cloud Storage.""" + language_client = language.Client(api_version='v1beta2') + + # Instantiates a plain text document. + document = language_client.document_from_url(gcs_uri) + + # Detects syntax in the document. You can also analyze HTML with: + # document.doc_type == language.Document.HTML + tokens = document.analyze_syntax().tokens + + for token in tokens: + print('{}: {}'.format(token.part_of_speech, token.text_content)) + + +def entity_sentiment_text(text): + """Detects entity sentiment in the provided text.""" + language_client = language_service_client.LanguageServiceClient() + document = language_service_pb2.Document() + + document.content = text.encode('utf-8') + document.type = enums.Document.Type.PLAIN_TEXT + + result = language_client.analyze_entity_sentiment( + document, enums.EncodingType.UTF8) + + for entity in result.entities: + print('Mentions: ') + print('Name: "{}"'.format(entity.name)) + for mention in entity.mentions: + print(' Begin Offset : {}'.format(mention.text.begin_offset)) + print(' Content : {}'.format(mention.text.content)) + print(' Magnitude : {}'.format(mention.sentiment.magnitude)) + print(' Sentiment : {}'.format(mention.sentiment.score)) + print(' Type : {}'.format(mention.type)) + print('Salience: {}'.format(entity.salience)) + print('Sentiment: {}\n'.format(entity.sentiment)) + + +def entity_sentiment_file(gcs_uri): + """Detects entity sentiment in a Google Cloud Storage file.""" + language_client = language_service_client.LanguageServiceClient() + document = language_service_pb2.Document() + + document.gcs_content_uri = gcs_uri + document.type = enums.Document.Type.PLAIN_TEXT + + result = language_client.analyze_entity_sentiment( + document, enums.EncodingType.UTF8) + + for entity in result.entities: + print('Name: "{}"'.format(entity.name)) + for mention in entity.mentions: + print(' Begin Offset : {}'.format(mention.text.begin_offset)) + print(' Content : {}'.format(mention.text.content)) + print(' Magnitude : {}'.format(mention.sentiment.magnitude)) + print(' Sentiment : {}'.format(mention.sentiment.score)) + print(' Type : {}'.format(mention.type)) + print('Salience: {}'.format(entity.salience)) + print('Sentiment: {}\n'.format(entity.sentiment)) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + subparsers = parser.add_subparsers(dest='command') + + sentiment_entities_text_parser = subparsers.add_parser( + 'sentiment-entities-text', help=entity_sentiment_text.__doc__) + sentiment_entities_text_parser.add_argument('text') + + sentiment_entities_file_parser = subparsers.add_parser( + 'sentiment-entities-file', help=entity_sentiment_file.__doc__) + sentiment_entities_file_parser.add_argument('gcs_uri') + + sentiment_text_parser = subparsers.add_parser( + 'sentiment-text', help=sentiment_text.__doc__) + sentiment_text_parser.add_argument('text') + + sentiment_file_parser = subparsers.add_parser( + 'sentiment-file', help=sentiment_file.__doc__) + sentiment_file_parser.add_argument('gcs_uri') + + entities_text_parser = subparsers.add_parser( + 'entities-text', help=entities_text.__doc__) + entities_text_parser.add_argument('text') + + entities_file_parser = subparsers.add_parser( + 'entities-file', help=entities_file.__doc__) + entities_file_parser.add_argument('gcs_uri') + + syntax_text_parser = subparsers.add_parser( + 'syntax-text', help=syntax_text.__doc__) + syntax_text_parser.add_argument('text') + + syntax_file_parser = subparsers.add_parser( + 'syntax-file', help=syntax_file.__doc__) + syntax_file_parser.add_argument('gcs_uri') + + args = parser.parse_args() + + if args.command == 'sentiment-text': + sentiment_text(args.text) + elif args.command == 'sentiment-file': + sentiment_file(args.gcs_uri) + elif args.command == 'entities-text': + entities_text(args.text) + elif args.command == 'entities-file': + entities_file(args.gcs_uri) + elif args.command == 'syntax-text': + syntax_text(args.text) + elif args.command == 'syntax-file': + syntax_file(args.gcs_uri) + elif args.command == 'sentiment-entities-text': + entity_sentiment_text(args.text) + elif args.command == 'sentiment-entities-file': + entity_sentiment_file(args.gcs_uri) diff --git a/samples/snippets/cloud-client/v1beta2/snippets_test.py b/samples/snippets/cloud-client/v1beta2/snippets_test.py new file mode 100644 index 00000000..d1e6abd0 --- /dev/null +++ b/samples/snippets/cloud-client/v1beta2/snippets_test.py @@ -0,0 +1,71 @@ +# Copyright 2017 Google, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import snippets + +BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] +TEST_FILE_URL = 'gs://{}/text.txt'.format(BUCKET) + + +def test_sentiment_text(capsys): + snippets.sentiment_text('President Obama is speaking at the White House.') + out, _ = capsys.readouterr() + assert 'Score: 0' in out + + +def test_sentiment_file(capsys): + snippets.sentiment_file(TEST_FILE_URL) + out, _ = capsys.readouterr() + assert 'Score: 0' in out + + +def test_entities_text(capsys): + snippets.entities_text('President Obama is speaking at the White House.') + out, _ = capsys.readouterr() + assert 'name' in out + assert ': Obama' in out + + +def test_entities_file(capsys): + snippets.entities_file(TEST_FILE_URL) + out, _ = capsys.readouterr() + assert 'name' in out + assert ': Obama' in out + + +def test_syntax_text(capsys): + snippets.syntax_text('President Obama is speaking at the White House.') + out, _ = capsys.readouterr() + assert 'NOUN: President' in out + + +def test_syntax_file(capsys): + snippets.syntax_file(TEST_FILE_URL) + out, _ = capsys.readouterr() + assert 'NOUN: President' in out + + +def test_sentiment_entities_text(capsys): + snippets.entity_sentiment_text( + 'President Obama is speaking at the White House.') + out, _ = capsys.readouterr() + assert 'Content : White House' in out + + +def test_sentiment_entities_file(capsys): + snippets.entity_sentiment_file(TEST_FILE_URL) + out, _ = capsys.readouterr() + assert 'Content : White House' in out From 34bc1901f5cc82cfd6056430da8a27ff8b34711a Mon Sep 17 00:00:00 2001 From: Gus Class Date: Fri, 21 Apr 2017 09:49:33 -0700 Subject: [PATCH 085/209] Fixes for text encoding [(#913)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/913) * Fixes for non-ASCII encodings * Adds test for UTF * Style fix --- samples/snippets/cloud-client/v1/snippets.py | 34 +++++---- .../snippets/cloud-client/v1beta2/snippets.py | 69 +++++++++++-------- .../cloud-client/v1beta2/snippets_test.py | 10 +++ 3 files changed, 73 insertions(+), 40 deletions(-) diff --git a/samples/snippets/cloud-client/v1/snippets.py b/samples/snippets/cloud-client/v1/snippets.py index 94d1db4a..31e02ef6 100644 --- a/samples/snippets/cloud-client/v1/snippets.py +++ b/samples/snippets/cloud-client/v1/snippets.py @@ -24,12 +24,16 @@ import argparse from google.cloud import language +import six def sentiment_text(text): """Detects sentiment in the text.""" language_client = language.Client() + if isinstance(text, six.binary_type): + text = text.decode('utf-8') + # Instantiates a plain text document. document = language_client.document_from_text(text) @@ -60,6 +64,9 @@ def entities_text(text): """Detects entities in the text.""" language_client = language.Client() + if isinstance(text, six.binary_type): + text = text.decode('utf-8') + # Instantiates a plain text document. document = language_client.document_from_text(text) @@ -69,11 +76,11 @@ def entities_text(text): for entity in entities: print('=' * 20) - print('{:<16}: {}'.format('name', entity.name)) - print('{:<16}: {}'.format('type', entity.entity_type)) - print('{:<16}: {}'.format('metadata', entity.metadata)) - print('{:<16}: {}'.format('salience', entity.salience)) - print('{:<16}: {}'.format('wikipedia_url', + print(u'{:<16}: {}'.format('name', entity.name)) + print(u'{:<16}: {}'.format('type', entity.entity_type)) + print(u'{:<16}: {}'.format('metadata', entity.metadata)) + print(u'{:<16}: {}'.format('salience', entity.salience)) + print(u'{:<16}: {}'.format('wikipedia_url', entity.metadata.get('wikipedia_url', '-'))) @@ -90,11 +97,11 @@ def entities_file(gcs_uri): for entity in entities: print('=' * 20) - print('{:<16}: {}'.format('name', entity.name)) - print('{:<16}: {}'.format('type', entity.entity_type)) - print('{:<16}: {}'.format('metadata', entity.metadata)) - print('{:<16}: {}'.format('salience', entity.salience)) - print('{:<16}: {}'.format('wikipedia_url', + print(u'{:<16}: {}'.format('name', entity.name)) + print(u'{:<16}: {}'.format('type', entity.entity_type)) + print(u'{:<16}: {}'.format('metadata', entity.metadata)) + print(u'{:<16}: {}'.format('salience', entity.salience)) + print(u'{:<16}: {}'.format('wikipedia_url', entity.metadata.get('wikipedia_url', '-'))) @@ -102,6 +109,9 @@ def syntax_text(text): """Detects syntax in the text.""" language_client = language.Client() + if isinstance(text, six.binary_type): + text = text.decode('utf-8') + # Instantiates a plain text document. document = language_client.document_from_text(text) @@ -110,7 +120,7 @@ def syntax_text(text): tokens = document.analyze_syntax().tokens for token in tokens: - print('{}: {}'.format(token.part_of_speech, token.text_content)) + print(u'{}: {}'.format(token.part_of_speech, token.text_content)) def syntax_file(gcs_uri): @@ -125,7 +135,7 @@ def syntax_file(gcs_uri): tokens = document.analyze_syntax().tokens for token in tokens: - print('{}: {}'.format(token.part_of_speech, token.text_content)) + print(u'{}: {}'.format(token.part_of_speech, token.text_content)) if __name__ == '__main__': diff --git a/samples/snippets/cloud-client/v1beta2/snippets.py b/samples/snippets/cloud-client/v1beta2/snippets.py index af472118..2e6745d2 100644 --- a/samples/snippets/cloud-client/v1beta2/snippets.py +++ b/samples/snippets/cloud-client/v1beta2/snippets.py @@ -27,12 +27,16 @@ from google.cloud.gapic.language.v1beta2 import enums from google.cloud.gapic.language.v1beta2 import language_service_client from google.cloud.proto.language.v1beta2 import language_service_pb2 +import six def sentiment_text(text): """Detects sentiment in the text.""" language_client = language.Client(api_version='v1beta2') + if isinstance(text, six.binary_type): + text = text.decode('utf-8') + # Instantiates a plain text document. document = language_client.document_from_text(text) @@ -40,8 +44,8 @@ def sentiment_text(text): # document.doc_type == language.Document.HTML sentiment = document.analyze_sentiment().sentiment - print('Score: {}'.format(sentiment.score)) - print('Magnitude: {}'.format(sentiment.magnitude)) + print(u'Score: {}'.format(sentiment.score)) + print(u'Magnitude: {}'.format(sentiment.magnitude)) def sentiment_file(gcs_uri): @@ -55,14 +59,17 @@ def sentiment_file(gcs_uri): # document.doc_type == language.Document.HTML sentiment = document.analyze_sentiment().sentiment - print('Score: {}'.format(sentiment.score)) - print('Magnitude: {}'.format(sentiment.magnitude)) + print(u'Score: {}'.format(sentiment.score)) + print(u'Magnitude: {}'.format(sentiment.magnitude)) def entities_text(text): """Detects entities in the text.""" language_client = language.Client(api_version='v1beta2') + if isinstance(text, six.binary_type): + text = text.decode('utf-8') + # Instantiates a plain text document. document = language_client.document_from_text(text) @@ -71,12 +78,12 @@ def entities_text(text): entities = document.analyze_entities().entities for entity in entities: - print('=' * 20) - print('{:<16}: {}'.format('name', entity.name)) - print('{:<16}: {}'.format('type', entity.entity_type)) - print('{:<16}: {}'.format('metadata', entity.metadata)) - print('{:<16}: {}'.format('salience', entity.salience)) - print('{:<16}: {}'.format('wikipedia_url', + print(u'=' * 20) + print(u'{:<16}: {}'.format('name', entity.name)) + print(u'{:<16}: {}'.format('type', entity.entity_type)) + print(u'{:<16}: {}'.format('metadata', entity.metadata)) + print(u'{:<16}: {}'.format('salience', entity.salience)) + print(u'{:<16}: {}'.format('wikipedia_url', entity.metadata.get('wikipedia_url', '-'))) @@ -105,6 +112,9 @@ def syntax_text(text): """Detects syntax in the text.""" language_client = language.Client(api_version='v1beta2') + if isinstance(text, six.binary_type): + text = text.decode('utf-8') + # Instantiates a plain text document. document = language_client.document_from_text(text) @@ -113,7 +123,7 @@ def syntax_text(text): tokens = document.analyze_syntax().tokens for token in tokens: - print('{}: {}'.format(token.part_of_speech, token.text_content)) + print(u'{}: {}'.format(token.part_of_speech, token.text_content)) def syntax_file(gcs_uri): @@ -128,7 +138,7 @@ def syntax_file(gcs_uri): tokens = document.analyze_syntax().tokens for token in tokens: - print('{}: {}'.format(token.part_of_speech, token.text_content)) + print(u'{}: {}'.format(token.part_of_speech, token.text_content)) def entity_sentiment_text(text): @@ -136,6 +146,9 @@ def entity_sentiment_text(text): language_client = language_service_client.LanguageServiceClient() document = language_service_pb2.Document() + if isinstance(text, six.binary_type): + text = text.decode('utf-8') + document.content = text.encode('utf-8') document.type = enums.Document.Type.PLAIN_TEXT @@ -144,15 +157,15 @@ def entity_sentiment_text(text): for entity in result.entities: print('Mentions: ') - print('Name: "{}"'.format(entity.name)) + print(u'Name: "{}"'.format(entity.name)) for mention in entity.mentions: - print(' Begin Offset : {}'.format(mention.text.begin_offset)) - print(' Content : {}'.format(mention.text.content)) - print(' Magnitude : {}'.format(mention.sentiment.magnitude)) - print(' Sentiment : {}'.format(mention.sentiment.score)) - print(' Type : {}'.format(mention.type)) - print('Salience: {}'.format(entity.salience)) - print('Sentiment: {}\n'.format(entity.sentiment)) + print(u' Begin Offset : {}'.format(mention.text.begin_offset)) + print(u' Content : {}'.format(mention.text.content)) + print(u' Magnitude : {}'.format(mention.sentiment.magnitude)) + print(u' Sentiment : {}'.format(mention.sentiment.score)) + print(u' Type : {}'.format(mention.type)) + print(u'Salience: {}'.format(entity.salience)) + print(u'Sentiment: {}\n'.format(entity.sentiment)) def entity_sentiment_file(gcs_uri): @@ -167,15 +180,15 @@ def entity_sentiment_file(gcs_uri): document, enums.EncodingType.UTF8) for entity in result.entities: - print('Name: "{}"'.format(entity.name)) + print(u'Name: "{}"'.format(entity.name)) for mention in entity.mentions: - print(' Begin Offset : {}'.format(mention.text.begin_offset)) - print(' Content : {}'.format(mention.text.content)) - print(' Magnitude : {}'.format(mention.sentiment.magnitude)) - print(' Sentiment : {}'.format(mention.sentiment.score)) - print(' Type : {}'.format(mention.type)) - print('Salience: {}'.format(entity.salience)) - print('Sentiment: {}\n'.format(entity.sentiment)) + print(u' Begin Offset : {}'.format(mention.text.begin_offset)) + print(u' Content : {}'.format(mention.text.content)) + print(u' Magnitude : {}'.format(mention.sentiment.magnitude)) + print(u' Sentiment : {}'.format(mention.sentiment.score)) + print(u' Type : {}'.format(mention.type)) + print(u'Salience: {}'.format(entity.salience)) + print(u'Sentiment: {}\n'.format(entity.sentiment)) if __name__ == '__main__': diff --git a/samples/snippets/cloud-client/v1beta2/snippets_test.py b/samples/snippets/cloud-client/v1beta2/snippets_test.py index d1e6abd0..8db7aa1d 100644 --- a/samples/snippets/cloud-client/v1beta2/snippets_test.py +++ b/samples/snippets/cloud-client/v1beta2/snippets_test.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Copyright 2017 Google, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,6 +27,15 @@ def test_sentiment_text(capsys): assert 'Score: 0' in out +def test_sentiment_utf(capsys): + snippets.sentiment_text( + u'1er site d\'information. Les articles du journal et toute l\'' + + u'actualité en continu : International, France, Société, Economie, ' + + u'Culture, Environnement') + out, _ = capsys.readouterr() + assert 'Score: 0' in out + + def test_sentiment_file(capsys): snippets.sentiment_file(TEST_FILE_URL) out, _ = capsys.readouterr() From 38cec912c109fc760a486a2641efbb410f6157ce Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Thu, 27 Apr 2017 09:54:41 -0700 Subject: [PATCH 086/209] Re-generate all readmes --- samples/snippets/api/README.rst | 2 +- samples/snippets/cloud-client/v1/README.rst | 2 +- .../snippets/cloud-client/v1beta2/README.rst | 2 +- samples/snippets/tutorial/README.rst | 97 +++++++++++++++++++ 4 files changed, 100 insertions(+), 3 deletions(-) create mode 100644 samples/snippets/tutorial/README.rst diff --git a/samples/snippets/api/README.rst b/samples/snippets/api/README.rst index 301fed0e..f757fea8 100644 --- a/samples/snippets/api/README.rst +++ b/samples/snippets/api/README.rst @@ -26,7 +26,7 @@ authentication: .. code-block:: bash - gcloud beta auth application-default login + gcloud auth application-default login #. When running on App Engine or Compute Engine, credentials are already diff --git a/samples/snippets/cloud-client/v1/README.rst b/samples/snippets/cloud-client/v1/README.rst index bfa46d47..4082c6db 100644 --- a/samples/snippets/cloud-client/v1/README.rst +++ b/samples/snippets/cloud-client/v1/README.rst @@ -26,7 +26,7 @@ authentication: .. code-block:: bash - gcloud beta auth application-default login + gcloud auth application-default login #. When running on App Engine or Compute Engine, credentials are already diff --git a/samples/snippets/cloud-client/v1beta2/README.rst b/samples/snippets/cloud-client/v1beta2/README.rst index 49cdec13..17b5a04b 100644 --- a/samples/snippets/cloud-client/v1beta2/README.rst +++ b/samples/snippets/cloud-client/v1beta2/README.rst @@ -26,7 +26,7 @@ authentication: .. code-block:: bash - gcloud beta auth application-default login + gcloud auth application-default login #. When running on App Engine or Compute Engine, credentials are already diff --git a/samples/snippets/tutorial/README.rst b/samples/snippets/tutorial/README.rst new file mode 100644 index 00000000..5b862ead --- /dev/null +++ b/samples/snippets/tutorial/README.rst @@ -0,0 +1,97 @@ +.. This file is automatically generated. Do not edit this file directly. + +Google Cloud Natural Language Tutorial Python Samples +=============================================================================== + +This directory contains samples for Google Cloud Natural Language Tutorial. The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers, including sentiment analysis, entity recognition, and syntax analysis. This API is part of the larger Cloud Machine Learning API. + + + + +.. _Google Cloud Natural Language Tutorial: https://cloud.google.com/natural-language/docs/ + +Setup +------------------------------------------------------------------------------- + + +Authentication +++++++++++++++ + +Authentication is typically done through `Application Default Credentials`_, +which means you do not have to change the code to authenticate as long as +your environment has credentials. You have a few options for setting up +authentication: + +#. When running locally, use the `Google Cloud SDK`_ + + .. code-block:: bash + + gcloud auth application-default login + + +#. When running on App Engine or Compute Engine, credentials are already + set-up. However, you may need to configure your Compute Engine instance + with `additional scopes`_. + +#. You can create a `Service Account key file`_. This file can be used to + authenticate to Google Cloud Platform services from any environment. To use + the file, set the ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable to + the path to the key file, for example: + + .. code-block:: bash + + export GOOGLE_APPLICATION_CREDENTIALS=/path/to/service_account.json + +.. _Application Default Credentials: https://cloud.google.com/docs/authentication#getting_credentials_for_server-centric_flow +.. _additional scopes: https://cloud.google.com/compute/docs/authentication#using +.. _Service Account key file: https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount + +Install Dependencies +++++++++++++++++++++ + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + +Samples +------------------------------------------------------------------------------- + +Language tutorial ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + + + +To run this sample: + +.. code-block:: bash + + $ python tutorial.py + + usage: tutorial.py [-h] movie_review_filename + + positional arguments: + movie_review_filename + The filename of the movie review you'd like to + analyze. + + optional arguments: + -h, --help show this help message and exit + + + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file From 09ae3ee24ebdc7a7f1c3ae192c9b95b82cf2ffd0 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Wed, 10 May 2017 09:51:17 -0700 Subject: [PATCH 087/209] Auto-update dependencies. [(#939)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/939) --- samples/snippets/movie_nl/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt index fe93963b..f1acf559 100644 --- a/samples/snippets/movie_nl/requirements.txt +++ b/samples/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ google-api-python-client==1.6.2 -requests==2.13.0 +requests==2.14.1 From ab2def9e7fbbc79f1a2bafd339cafee9af91e4ac Mon Sep 17 00:00:00 2001 From: DPE bot Date: Thu, 11 May 2017 09:33:48 -0700 Subject: [PATCH 088/209] Auto-update dependencies. [(#941)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/941) --- samples/snippets/movie_nl/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt index f1acf559..c1a06ec4 100644 --- a/samples/snippets/movie_nl/requirements.txt +++ b/samples/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ google-api-python-client==1.6.2 -requests==2.14.1 +requests==2.14.2 From 8558231f66fe111ddac1fd030fb0fa5f72b630df Mon Sep 17 00:00:00 2001 From: Bill Prin Date: Tue, 23 May 2017 17:01:25 -0700 Subject: [PATCH 089/209] Fix README rst links [(#962)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/962) * Fix README rst links * Update all READMEs --- samples/snippets/cloud-client/v1/README.rst | 6 +++--- samples/snippets/cloud-client/v1beta2/README.rst | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/samples/snippets/cloud-client/v1/README.rst b/samples/snippets/cloud-client/v1/README.rst index 4082c6db..7e66faa8 100644 --- a/samples/snippets/cloud-client/v1/README.rst +++ b/samples/snippets/cloud-client/v1/README.rst @@ -128,11 +128,11 @@ This sample uses the `Google Cloud Client Library for Python`_. You can read the documentation for more details on API usage and use GitHub to `browse the source`_ and `report issues`_. -.. Google Cloud Client Library for Python: +.. _Google Cloud Client Library for Python: https://googlecloudplatform.github.io/google-cloud-python/ -.. browse the source: +.. _browse the source: https://github.com/GoogleCloudPlatform/google-cloud-python -.. report issues: +.. _report issues: https://github.com/GoogleCloudPlatform/google-cloud-python/issues diff --git a/samples/snippets/cloud-client/v1beta2/README.rst b/samples/snippets/cloud-client/v1beta2/README.rst index 17b5a04b..3d260b9d 100644 --- a/samples/snippets/cloud-client/v1beta2/README.rst +++ b/samples/snippets/cloud-client/v1beta2/README.rst @@ -133,11 +133,11 @@ This sample uses the `Google Cloud Client Library for Python`_. You can read the documentation for more details on API usage and use GitHub to `browse the source`_ and `report issues`_. -.. Google Cloud Client Library for Python: +.. _Google Cloud Client Library for Python: https://googlecloudplatform.github.io/google-cloud-python/ -.. browse the source: +.. _browse the source: https://github.com/GoogleCloudPlatform/google-cloud-python -.. report issues: +.. _report issues: https://github.com/GoogleCloudPlatform/google-cloud-python/issues From 6d00f79651522a4af34ae365d023c41a6a9a8d69 Mon Sep 17 00:00:00 2001 From: Gus Class Date: Wed, 24 May 2017 09:20:24 -0700 Subject: [PATCH 090/209] Adds test for encoded characters. [(#961)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/961) --- .../snippets/cloud-client/v1beta2/snippets.py | 19 ++++++++++++++----- .../cloud-client/v1beta2/snippets_test.py | 7 +++++++ 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/samples/snippets/cloud-client/v1beta2/snippets.py b/samples/snippets/cloud-client/v1beta2/snippets.py index 2e6745d2..c9f6f32a 100644 --- a/samples/snippets/cloud-client/v1beta2/snippets.py +++ b/samples/snippets/cloud-client/v1beta2/snippets.py @@ -22,6 +22,7 @@ """ import argparse +import sys from google.cloud import language from google.cloud.gapic.language.v1beta2 import enums @@ -53,7 +54,7 @@ def sentiment_file(gcs_uri): language_client = language.Client(api_version='v1beta2') # Instantiates a plain text document. - document = language_client.document_from_url(gcs_uri) + document = language_client.document_from_gcs_url(gcs_uri) # Detects sentiment in the document. You can also analyze HTML with: # document.doc_type == language.Document.HTML @@ -92,7 +93,7 @@ def entities_file(gcs_uri): language_client = language.Client(api_version='v1beta2') # Instantiates a plain text document. - document = language_client.document_from_url(gcs_uri) + document = language_client.document_from_gcs_url(gcs_uri) # Detects sentiment in the document. You can also analyze HTML with: # document.doc_type == language.Document.HTML @@ -131,7 +132,7 @@ def syntax_file(gcs_uri): language_client = language.Client(api_version='v1beta2') # Instantiates a plain text document. - document = language_client.document_from_url(gcs_uri) + document = language_client.document_from_gcs_url(gcs_uri) # Detects syntax in the document. You can also analyze HTML with: # document.doc_type == language.Document.HTML @@ -152,8 +153,12 @@ def entity_sentiment_text(text): document.content = text.encode('utf-8') document.type = enums.Document.Type.PLAIN_TEXT + encoding = enums.EncodingType.UTF32 + if sys.maxunicode == 65535: + encoding = enums.EncodingType.UTF16 + result = language_client.analyze_entity_sentiment( - document, enums.EncodingType.UTF8) + document, encoding) for entity in result.entities: print('Mentions: ') @@ -176,8 +181,12 @@ def entity_sentiment_file(gcs_uri): document.gcs_content_uri = gcs_uri document.type = enums.Document.Type.PLAIN_TEXT + encoding = enums.EncodingType.UTF32 + if sys.maxunicode == 65535: + encoding = enums.EncodingType.UTF16 + result = language_client.analyze_entity_sentiment( - document, enums.EncodingType.UTF8) + document, encoding) for entity in result.entities: print(u'Name: "{}"'.format(entity.name)) diff --git a/samples/snippets/cloud-client/v1beta2/snippets_test.py b/samples/snippets/cloud-client/v1beta2/snippets_test.py index 8db7aa1d..e6db2217 100644 --- a/samples/snippets/cloud-client/v1beta2/snippets_test.py +++ b/samples/snippets/cloud-client/v1beta2/snippets_test.py @@ -79,3 +79,10 @@ def test_sentiment_entities_file(capsys): snippets.entity_sentiment_file(TEST_FILE_URL) out, _ = capsys.readouterr() assert 'Content : White House' in out + + +def test_sentiment_entities_utf(capsys): + snippets.entity_sentiment_text( + 'foo→bar') + out, _ = capsys.readouterr() + assert 'Begin Offset : 4' in out From f2adaec75b481ad8d00dbe4f8990f9626c386440 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Sat, 17 Jun 2017 09:03:42 -0700 Subject: [PATCH 091/209] Auto-update dependencies. [(#992)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/992) --- samples/snippets/movie_nl/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt index c1a06ec4..62244858 100644 --- a/samples/snippets/movie_nl/requirements.txt +++ b/samples/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ google-api-python-client==1.6.2 -requests==2.14.2 +requests==2.18.1 From f8a6e2a63bffd633f4702f14cb78235746db8086 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Tue, 27 Jun 2017 12:41:15 -0700 Subject: [PATCH 092/209] Auto-update dependencies. [(#1004)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1004) * Auto-update dependencies. * Fix natural language samples * Fix pubsub iam samples * Fix language samples * Fix bigquery samples --- samples/snippets/cloud-client/v1/requirements.txt | 2 +- samples/snippets/cloud-client/v1/snippets.py | 4 ++-- samples/snippets/cloud-client/v1beta2/requirements.txt | 2 +- samples/snippets/cloud-client/v1beta2/snippets.py | 4 ++-- samples/snippets/sentiment/requirements.txt | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/samples/snippets/cloud-client/v1/requirements.txt b/samples/snippets/cloud-client/v1/requirements.txt index 4a58920c..1d3c69ae 100644 --- a/samples/snippets/cloud-client/v1/requirements.txt +++ b/samples/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.24.1 +google-cloud-language==0.25.0 diff --git a/samples/snippets/cloud-client/v1/snippets.py b/samples/snippets/cloud-client/v1/snippets.py index 31e02ef6..c3205a46 100644 --- a/samples/snippets/cloud-client/v1/snippets.py +++ b/samples/snippets/cloud-client/v1/snippets.py @@ -120,7 +120,7 @@ def syntax_text(text): tokens = document.analyze_syntax().tokens for token in tokens: - print(u'{}: {}'.format(token.part_of_speech, token.text_content)) + print(u'{}: {}'.format(token.part_of_speech.tag, token.text_content)) def syntax_file(gcs_uri): @@ -135,7 +135,7 @@ def syntax_file(gcs_uri): tokens = document.analyze_syntax().tokens for token in tokens: - print(u'{}: {}'.format(token.part_of_speech, token.text_content)) + print(u'{}: {}'.format(token.part_of_speech.tag, token.text_content)) if __name__ == '__main__': diff --git a/samples/snippets/cloud-client/v1beta2/requirements.txt b/samples/snippets/cloud-client/v1beta2/requirements.txt index 3b8a6a4c..d44360fe 100644 --- a/samples/snippets/cloud-client/v1beta2/requirements.txt +++ b/samples/snippets/cloud-client/v1beta2/requirements.txt @@ -1,2 +1,2 @@ gapic-google-cloud-language-v1beta2==0.15.3 -google-cloud-language==0.24.1 +google-cloud-language==0.25.0 diff --git a/samples/snippets/cloud-client/v1beta2/snippets.py b/samples/snippets/cloud-client/v1beta2/snippets.py index c9f6f32a..02d0d8e5 100644 --- a/samples/snippets/cloud-client/v1beta2/snippets.py +++ b/samples/snippets/cloud-client/v1beta2/snippets.py @@ -124,7 +124,7 @@ def syntax_text(text): tokens = document.analyze_syntax().tokens for token in tokens: - print(u'{}: {}'.format(token.part_of_speech, token.text_content)) + print(u'{}: {}'.format(token.part_of_speech.tag, token.text_content)) def syntax_file(gcs_uri): @@ -139,7 +139,7 @@ def syntax_file(gcs_uri): tokens = document.analyze_syntax().tokens for token in tokens: - print(u'{}: {}'.format(token.part_of_speech, token.text_content)) + print(u'{}: {}'.format(token.part_of_speech.tag, token.text_content)) def entity_sentiment_text(text): diff --git a/samples/snippets/sentiment/requirements.txt b/samples/snippets/sentiment/requirements.txt index 4a58920c..1d3c69ae 100644 --- a/samples/snippets/sentiment/requirements.txt +++ b/samples/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.24.1 +google-cloud-language==0.25.0 From 5233b90cd8f210b82b9d505e32060b200d92a8cc Mon Sep 17 00:00:00 2001 From: DPE bot Date: Wed, 26 Jul 2017 09:03:23 -0700 Subject: [PATCH 093/209] Auto-update dependencies. [(#1031)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1031) --- samples/snippets/movie_nl/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt index 62244858..992307a0 100644 --- a/samples/snippets/movie_nl/requirements.txt +++ b/samples/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ google-api-python-client==1.6.2 -requests==2.18.1 +requests==2.18.2 From c74ec4ca9e4711dc10c3bd718cd41f3f925fd218 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Fri, 28 Jul 2017 15:00:34 -0700 Subject: [PATCH 094/209] Natural Language GAPIC client library [(#1018)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1018) --- samples/snippets/cloud-client/.DS_Store | Bin 0 -> 6148 bytes samples/snippets/cloud-client/v1/README.rst | 4 + .../snippets/cloud-client/v1/README.rst.in | 6 + .../snippets/cloud-client/v1/quickstart.py | 16 ++- .../snippets/cloud-client/v1/requirements.txt | 2 +- samples/snippets/cloud-client/v1/snippets.py | 102 ++++++++++---- .../snippets/cloud-client/v1beta2/README.rst | 4 + .../cloud-client/v1beta2/README.rst.in | 6 + .../cloud-client/v1beta2/quickstart.py | 22 ++- .../cloud-client/v1beta2/requirements.txt | 3 +- .../snippets/cloud-client/v1beta2/snippets.py | 129 +++++++++++------- samples/snippets/sentiment/requirements.txt | 2 +- .../snippets/sentiment/sentiment_analysis.py | 30 ++-- 13 files changed, 216 insertions(+), 110 deletions(-) create mode 100644 samples/snippets/cloud-client/.DS_Store diff --git a/samples/snippets/cloud-client/.DS_Store b/samples/snippets/cloud-client/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..f344c851a0ee4f90f50741edcbb6236ebbbc354d GIT binary patch literal 6148 zcmeHK!A`{pJ@TK5l+$r=92a0ahvsOrXzLD-AJ zJA9_tJXH)nbRY%~4!+FJvKg5HW`G%3RR+wdX>F`(fm|0ezzqDF0XiQfDxqUA)u@gR z98?Q{m_xS`w5gY%9BI%om}q`|5!qLbhGr$adW`KG>lp@{#r$6`qDu@SWfEid#21KsjsJmF3xm%a2q`Ow4wopkZ oF4Z_sK|@`|7)w|2E~*mrOEM50gQ-UJpzx1?qJaly;7=L&02eA$o&W#< literal 0 HcmV?d00001 diff --git a/samples/snippets/cloud-client/v1/README.rst b/samples/snippets/cloud-client/v1/README.rst index 7e66faa8..a3ee4b7f 100644 --- a/samples/snippets/cloud-client/v1/README.rst +++ b/samples/snippets/cloud-client/v1/README.rst @@ -5,6 +5,10 @@ Google Cloud Natural Language API Python Samples This directory contains samples for Google Cloud Natural Language API. The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers, including sentiment analysis, entity recognition, and syntax analysis. This API is part of the larger Cloud Machine Learning API. +- See the `migration guide`_ for information about migrating to Python client library v0.26.1. + +.. _migration guide: https://cloud.google.com/natural-language/docs/python-client-migration + diff --git a/samples/snippets/cloud-client/v1/README.rst.in b/samples/snippets/cloud-client/v1/README.rst.in index faf402bf..1b4855fb 100644 --- a/samples/snippets/cloud-client/v1/README.rst.in +++ b/samples/snippets/cloud-client/v1/README.rst.in @@ -10,6 +10,12 @@ product: entity recognition, and syntax analysis. This API is part of the larger Cloud Machine Learning API. + + - See the `migration guide`_ for information about migrating to Python client library v0.26.1. + + + .. _migration guide: https://cloud.google.com/natural-language/docs/python-client-migration + setup: - auth - install_deps diff --git a/samples/snippets/cloud-client/v1/quickstart.py b/samples/snippets/cloud-client/v1/quickstart.py index 3fd703a5..3c19e395 100644 --- a/samples/snippets/cloud-client/v1/quickstart.py +++ b/samples/snippets/cloud-client/v1/quickstart.py @@ -18,17 +18,25 @@ def run_quickstart(): # [START language_quickstart] # Imports the Google Cloud client library + # [START migration_import] from google.cloud import language + from google.cloud.language import enums + from google.cloud.language import types + # [END migration_import] # Instantiates a client - language_client = language.Client() + # [START migration_client] + client = language.LanguageServiceClient() + # [END migration_client] # The text to analyze - text = 'Hello, world!' - document = language_client.document_from_text(text) + text = u'Hello, world!' + document = types.Document( + content=text, + type=enums.Document.Type.PLAIN_TEXT) # Detects the sentiment of the text - sentiment = document.analyze_sentiment().sentiment + sentiment = client.analyze_sentiment(document=document).document_sentiment print('Text: {}'.format(text)) print('Sentiment: {}, {}'.format(sentiment.score, sentiment.magnitude)) diff --git a/samples/snippets/cloud-client/v1/requirements.txt b/samples/snippets/cloud-client/v1/requirements.txt index 1d3c69ae..8cd367ea 100644 --- a/samples/snippets/cloud-client/v1/requirements.txt +++ b/samples/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.25.0 +google-cloud-language==0.26.1 diff --git a/samples/snippets/cloud-client/v1/snippets.py b/samples/snippets/cloud-client/v1/snippets.py index c3205a46..d485752b 100644 --- a/samples/snippets/cloud-client/v1/snippets.py +++ b/samples/snippets/cloud-client/v1/snippets.py @@ -24,118 +24,164 @@ import argparse from google.cloud import language +from google.cloud.language import enums +from google.cloud.language import types import six +# [START def_sentiment_text] def sentiment_text(text): """Detects sentiment in the text.""" - language_client = language.Client() + client = language.LanguageServiceClient() if isinstance(text, six.binary_type): text = text.decode('utf-8') # Instantiates a plain text document. - document = language_client.document_from_text(text) + # [START migration_document_text] + # [START migration_analyze_sentiment] + document = types.Document( + content=text, + type=enums.Document.Type.PLAIN_TEXT) + # [END migration_document_text] # Detects sentiment in the document. You can also analyze HTML with: - # document.doc_type == language.Document.HTML - sentiment = document.analyze_sentiment().sentiment + # document.type == enums.Document.Type.HTML + sentiment = client.analyze_sentiment(document).document_sentiment print('Score: {}'.format(sentiment.score)) print('Magnitude: {}'.format(sentiment.magnitude)) + # [END migration_analyze_sentiment] +# [END def_sentiment_text] +# [START def_sentiment_file] def sentiment_file(gcs_uri): """Detects sentiment in the file located in Google Cloud Storage.""" - language_client = language.Client() + client = language.LanguageServiceClient() # Instantiates a plain text document. - document = language_client.document_from_url(gcs_uri) + # [START migration_document_gcs_uri] + document = types.Document( + gcs_content_uri=gcs_uri, + type=enums.Document.Type.PLAIN_TEXT) + # [END migration_document_gcs_uri] # Detects sentiment in the document. You can also analyze HTML with: - # document.doc_type == language.Document.HTML - sentiment = document.analyze_sentiment().sentiment + # document.type == enums.Document.Type.HTML + sentiment = client.analyze_sentiment(document).document_sentiment print('Score: {}'.format(sentiment.score)) print('Magnitude: {}'.format(sentiment.magnitude)) +# [END def_sentiment_file] +# [START def_entities_text] def entities_text(text): """Detects entities in the text.""" - language_client = language.Client() + client = language.LanguageServiceClient() if isinstance(text, six.binary_type): text = text.decode('utf-8') # Instantiates a plain text document. - document = language_client.document_from_text(text) + # [START migration_analyze_entities] + document = types.Document( + content=text, + type=enums.Document.Type.PLAIN_TEXT) # Detects entities in the document. You can also analyze HTML with: - # document.doc_type == language.Document.HTML - entities = document.analyze_entities().entities + # document.type == enums.Document.Type.HTML + entities = client.analyze_entities(document).entities for entity in entities: print('=' * 20) print(u'{:<16}: {}'.format('name', entity.name)) - print(u'{:<16}: {}'.format('type', entity.entity_type)) + print(u'{:<16}: {}'.format('type', entity.type)) print(u'{:<16}: {}'.format('metadata', entity.metadata)) print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', entity.metadata.get('wikipedia_url', '-'))) + # [END migration_analyze_entities] +# [END def_entities_text] +# [START def_entities_file] def entities_file(gcs_uri): """Detects entities in the file located in Google Cloud Storage.""" - language_client = language.Client() + client = language.LanguageServiceClient() # Instantiates a plain text document. - document = language_client.document_from_url(gcs_uri) + document = types.Document( + gcs_content_uri=gcs_uri, + type=enums.Document.Type.PLAIN_TEXT) # Detects sentiment in the document. You can also analyze HTML with: - # document.doc_type == language.Document.HTML - entities = document.analyze_entities().entities + # document.type == enums.Document.Type.HTML + entities = client.analyze_entities(document).entities for entity in entities: print('=' * 20) print(u'{:<16}: {}'.format('name', entity.name)) - print(u'{:<16}: {}'.format('type', entity.entity_type)) + print(u'{:<16}: {}'.format('type', entity.type)) print(u'{:<16}: {}'.format('metadata', entity.metadata)) print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', entity.metadata.get('wikipedia_url', '-'))) +# [END def_entities_file] +# [START def_syntax_text] def syntax_text(text): """Detects syntax in the text.""" - language_client = language.Client() + client = language.LanguageServiceClient() if isinstance(text, six.binary_type): text = text.decode('utf-8') # Instantiates a plain text document. - document = language_client.document_from_text(text) + # [START migration_analyze_syntax] + document = types.Document( + content=text, + type=enums.Document.Type.PLAIN_TEXT) # Detects syntax in the document. You can also analyze HTML with: - # document.doc_type == language.Document.HTML - tokens = document.analyze_syntax().tokens + # document.type == enums.Document.Type.HTML + tokens = client.analyze_syntax(document).tokens + + # part-of-speech tags from enums.PartOfSpeech.Tag + pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN', 'NUM', + 'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX') for token in tokens: - print(u'{}: {}'.format(token.part_of_speech.tag, token.text_content)) + print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag], + token.text.content)) + # [END migration_analyze_syntax] +# [END def_syntax_text] +# [START def_syntax_file] def syntax_file(gcs_uri): """Detects syntax in the file located in Google Cloud Storage.""" - language_client = language.Client() + client = language.LanguageServiceClient() # Instantiates a plain text document. - document = language_client.document_from_url(gcs_uri) + document = types.Document( + gcs_content_uri=gcs_uri, + type=enums.Document.Type.PLAIN_TEXT) # Detects syntax in the document. You can also analyze HTML with: - # document.doc_type == language.Document.HTML - tokens = document.analyze_syntax().tokens + # document.type == enums.Document.Type.HTML + tokens = client.analyze_syntax(document).tokens + + # part-of-speech tags from enums.PartOfSpeech.Tag + pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN', 'NUM', + 'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX') for token in tokens: - print(u'{}: {}'.format(token.part_of_speech.tag, token.text_content)) + print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag], + token.text.content)) +# [END def_syntax_file] if __name__ == '__main__': diff --git a/samples/snippets/cloud-client/v1beta2/README.rst b/samples/snippets/cloud-client/v1beta2/README.rst index 3d260b9d..77df4ffb 100644 --- a/samples/snippets/cloud-client/v1beta2/README.rst +++ b/samples/snippets/cloud-client/v1beta2/README.rst @@ -5,6 +5,10 @@ Google Cloud Natural Language API Python Samples This directory contains samples for Google Cloud Natural Language API. The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers, including sentiment analysis, entity recognition, and syntax analysis. This API is part of the larger Cloud Machine Learning API. +- See the `migration guide`_ for information about migrating to Python client library v0.26.1. + +.. _migration guide: https://cloud.google.com/natural-language/docs/python-client-migration + diff --git a/samples/snippets/cloud-client/v1beta2/README.rst.in b/samples/snippets/cloud-client/v1beta2/README.rst.in index faf402bf..1b4855fb 100644 --- a/samples/snippets/cloud-client/v1beta2/README.rst.in +++ b/samples/snippets/cloud-client/v1beta2/README.rst.in @@ -10,6 +10,12 @@ product: entity recognition, and syntax analysis. This API is part of the larger Cloud Machine Learning API. + + - See the `migration guide`_ for information about migrating to Python client library v0.26.1. + + + .. _migration guide: https://cloud.google.com/natural-language/docs/python-client-migration + setup: - auth - install_deps diff --git a/samples/snippets/cloud-client/v1beta2/quickstart.py b/samples/snippets/cloud-client/v1beta2/quickstart.py index c5a4b9c3..3cef5fca 100644 --- a/samples/snippets/cloud-client/v1beta2/quickstart.py +++ b/samples/snippets/cloud-client/v1beta2/quickstart.py @@ -18,17 +18,25 @@ def run_quickstart(): # [START language_quickstart] # Imports the Google Cloud client library - from google.cloud import language + # [START beta_import_client] + # [START beta_import] + from google.cloud import language_v1beta2 + from google.cloud.language_v1beta2 import enums + from google.cloud.language_v1beta2 import types + # [END beta_import] - # Instantiates a client with they v1beta2 version - language_client = language.Client(api_version='v1beta2') + # Instantiates a client with the v1beta2 version + client = language_v1beta2.LanguageServiceClient() + # [END beta_import_client] # The text to analyze - text = 'Hallo Welt!' - document = language_client.document_from_text(text, language='DE') - + text = u'Hallo Welt!' + document = types.Document( + content=text, + type=enums.Document.Type.PLAIN_TEXT, + language='de') # Detects the sentiment of the text - sentiment = document.analyze_sentiment().sentiment + sentiment = client.analyze_sentiment(document).document_sentiment print('Text: {}'.format(text)) print('Sentiment: {}, {}'.format(sentiment.score, sentiment.magnitude)) diff --git a/samples/snippets/cloud-client/v1beta2/requirements.txt b/samples/snippets/cloud-client/v1beta2/requirements.txt index d44360fe..8cd367ea 100644 --- a/samples/snippets/cloud-client/v1beta2/requirements.txt +++ b/samples/snippets/cloud-client/v1beta2/requirements.txt @@ -1,2 +1 @@ -gapic-google-cloud-language-v1beta2==0.15.3 -google-cloud-language==0.25.0 +google-cloud-language==0.26.1 diff --git a/samples/snippets/cloud-client/v1beta2/snippets.py b/samples/snippets/cloud-client/v1beta2/snippets.py index 02d0d8e5..af7836ba 100644 --- a/samples/snippets/cloud-client/v1beta2/snippets.py +++ b/samples/snippets/cloud-client/v1beta2/snippets.py @@ -24,64 +24,69 @@ import argparse import sys -from google.cloud import language -from google.cloud.gapic.language.v1beta2 import enums -from google.cloud.gapic.language.v1beta2 import language_service_client -from google.cloud.proto.language.v1beta2 import language_service_pb2 +from google.cloud import language_v1beta2 +from google.cloud.language_v1beta2 import enums +from google.cloud.language_v1beta2 import types import six def sentiment_text(text): """Detects sentiment in the text.""" - language_client = language.Client(api_version='v1beta2') + client = language_v1beta2.LanguageServiceClient() if isinstance(text, six.binary_type): text = text.decode('utf-8') # Instantiates a plain text document. - document = language_client.document_from_text(text) + document = types.Document( + content=text, + type=enums.Document.Type.PLAIN_TEXT) # Detects sentiment in the document. You can also analyze HTML with: - # document.doc_type == language.Document.HTML - sentiment = document.analyze_sentiment().sentiment + # document.type == enums.Document.Type.HTML + sentiment = client.analyze_sentiment(document).document_sentiment - print(u'Score: {}'.format(sentiment.score)) - print(u'Magnitude: {}'.format(sentiment.magnitude)) + print('Score: {}'.format(sentiment.score)) + print('Magnitude: {}'.format(sentiment.magnitude)) def sentiment_file(gcs_uri): """Detects sentiment in the file located in Google Cloud Storage.""" - language_client = language.Client(api_version='v1beta2') + client = language_v1beta2.LanguageServiceClient() # Instantiates a plain text document. - document = language_client.document_from_gcs_url(gcs_uri) + document = types.Document( + gcs_content_uri=gcs_uri, + type=enums.Document.Type.PLAIN_TEXT) # Detects sentiment in the document. You can also analyze HTML with: - # document.doc_type == language.Document.HTML - sentiment = document.analyze_sentiment().sentiment + # document.type == enums.Document.Type.HTML + sentiment = client.analyze_sentiment(document).document_sentiment - print(u'Score: {}'.format(sentiment.score)) - print(u'Magnitude: {}'.format(sentiment.magnitude)) + print('Score: {}'.format(sentiment.score)) + print('Magnitude: {}'.format(sentiment.magnitude)) def entities_text(text): """Detects entities in the text.""" - language_client = language.Client(api_version='v1beta2') + client = language_v1beta2.LanguageServiceClient() if isinstance(text, six.binary_type): text = text.decode('utf-8') # Instantiates a plain text document. - document = language_client.document_from_text(text) + document = types.Document( + content=text, + type=enums.Document.Type.PLAIN_TEXT) # Detects entities in the document. You can also analyze HTML with: - # document.doc_type == language.Document.HTML - entities = document.analyze_entities().entities + # document.type == enums.Document.Type.HTML + entities = client.analyze_entities(document).entities for entity in entities: - print(u'=' * 20) + print('=' * 20) print(u'{:<16}: {}'.format('name', entity.name)) - print(u'{:<16}: {}'.format('type', entity.entity_type)) + print(u'{:<16}: {}'.format('type', entity.type)) print(u'{:<16}: {}'.format('metadata', entity.metadata)) print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', @@ -90,75 +95,92 @@ def entities_text(text): def entities_file(gcs_uri): """Detects entities in the file located in Google Cloud Storage.""" - language_client = language.Client(api_version='v1beta2') + client = language_v1beta2.LanguageServiceClient() # Instantiates a plain text document. - document = language_client.document_from_gcs_url(gcs_uri) + document = types.Document( + gcs_content_uri=gcs_uri, + type=enums.Document.Type.PLAIN_TEXT) # Detects sentiment in the document. You can also analyze HTML with: - # document.doc_type == language.Document.HTML - entities = document.analyze_entities().entities + # document.type == enums.Document.Type.HTML + entities = client.analyze_entities(document).entities for entity in entities: print('=' * 20) - print('{:<16}: {}'.format('name', entity.name)) - print('{:<16}: {}'.format('type', entity.entity_type)) - print('{:<16}: {}'.format('metadata', entity.metadata)) - print('{:<16}: {}'.format('salience', entity.salience)) - print('{:<16}: {}'.format('wikipedia_url', + print(u'{:<16}: {}'.format('name', entity.name)) + print(u'{:<16}: {}'.format('type', entity.type)) + print(u'{:<16}: {}'.format('metadata', entity.metadata)) + print(u'{:<16}: {}'.format('salience', entity.salience)) + print(u'{:<16}: {}'.format('wikipedia_url', entity.metadata.get('wikipedia_url', '-'))) def syntax_text(text): """Detects syntax in the text.""" - language_client = language.Client(api_version='v1beta2') + client = language_v1beta2.LanguageServiceClient() if isinstance(text, six.binary_type): text = text.decode('utf-8') # Instantiates a plain text document. - document = language_client.document_from_text(text) + document = types.Document( + content=text, + type=enums.Document.Type.PLAIN_TEXT) # Detects syntax in the document. You can also analyze HTML with: - # document.doc_type == language.Document.HTML - tokens = document.analyze_syntax().tokens + # document.type == enums.Document.Type.HTML + tokens = client.analyze_syntax(document).tokens + + # part-of-speech tags from enums.PartOfSpeech.Tag + pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN', 'NUM', + 'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX') for token in tokens: - print(u'{}: {}'.format(token.part_of_speech.tag, token.text_content)) + print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag], + token.text.content)) def syntax_file(gcs_uri): """Detects syntax in the file located in Google Cloud Storage.""" - language_client = language.Client(api_version='v1beta2') + client = language_v1beta2.LanguageServiceClient() # Instantiates a plain text document. - document = language_client.document_from_gcs_url(gcs_uri) + document = types.Document( + gcs_content_uri=gcs_uri, + type=enums.Document.Type.PLAIN_TEXT) # Detects syntax in the document. You can also analyze HTML with: - # document.doc_type == language.Document.HTML - tokens = document.analyze_syntax().tokens + # document.type == enums.Document.Type.HTML + tokens = client.analyze_syntax(document).tokens + + # part-of-speech tags from enums.PartOfSpeech.Tag + pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN', 'NUM', + 'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX') for token in tokens: - print(u'{}: {}'.format(token.part_of_speech.tag, token.text_content)) + print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag], + token.text.content)) +# [START def_entity_sentiment_text] def entity_sentiment_text(text): """Detects entity sentiment in the provided text.""" - language_client = language_service_client.LanguageServiceClient() - document = language_service_pb2.Document() + client = language_v1beta2.LanguageServiceClient() if isinstance(text, six.binary_type): text = text.decode('utf-8') - document.content = text.encode('utf-8') - document.type = enums.Document.Type.PLAIN_TEXT + document = types.Document( + content=text.encode('utf-8'), + type=enums.Document.Type.PLAIN_TEXT) + # Pass in encoding type to get useful offsets in the response. encoding = enums.EncodingType.UTF32 if sys.maxunicode == 65535: encoding = enums.EncodingType.UTF16 - result = language_client.analyze_entity_sentiment( - document, encoding) + result = client.analyze_entity_sentiment(document, encoding) for entity in result.entities: print('Mentions: ') @@ -171,22 +193,23 @@ def entity_sentiment_text(text): print(u' Type : {}'.format(mention.type)) print(u'Salience: {}'.format(entity.salience)) print(u'Sentiment: {}\n'.format(entity.sentiment)) +# [END def_entity_sentiment_text] def entity_sentiment_file(gcs_uri): """Detects entity sentiment in a Google Cloud Storage file.""" - language_client = language_service_client.LanguageServiceClient() - document = language_service_pb2.Document() + client = language_v1beta2.LanguageServiceClient() - document.gcs_content_uri = gcs_uri - document.type = enums.Document.Type.PLAIN_TEXT + document = types.Document( + gcs_content_uri=gcs_uri, + type=enums.Document.Type.PLAIN_TEXT) + # Pass in encoding type to get useful offsets in the response. encoding = enums.EncodingType.UTF32 if sys.maxunicode == 65535: encoding = enums.EncodingType.UTF16 - result = language_client.analyze_entity_sentiment( - document, encoding) + result = client.analyze_entity_sentiment(document, encoding) for entity in result.entities: print(u'Name: "{}"'.format(entity.name)) diff --git a/samples/snippets/sentiment/requirements.txt b/samples/snippets/sentiment/requirements.txt index 1d3c69ae..8cd367ea 100644 --- a/samples/snippets/sentiment/requirements.txt +++ b/samples/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.25.0 +google-cloud-language==0.26.1 diff --git a/samples/snippets/sentiment/sentiment_analysis.py b/samples/snippets/sentiment/sentiment_analysis.py index c574c318..8ac8575b 100644 --- a/samples/snippets/sentiment/sentiment_analysis.py +++ b/samples/snippets/sentiment/sentiment_analysis.py @@ -18,12 +18,15 @@ import argparse from google.cloud import language +from google.cloud.language import enums +from google.cloud.language import types # [END sentiment_tutorial_import] +# [START def_print_result] def print_result(annotations): - score = annotations.sentiment.score - magnitude = annotations.sentiment.magnitude + score = annotations.document_sentiment.score + magnitude = annotations.document_sentiment.magnitude for index, sentence in enumerate(annotations.sentences): sentence_sentiment = sentence.sentiment.score @@ -33,27 +36,26 @@ def print_result(annotations): print('Overall Sentiment: score of {} with magnitude of {}'.format( score, magnitude)) return 0 - - print('Sentiment: score of {} with magnitude of {}'.format( - score, magnitude)) - return 0 +# [END def_print_result] +# [START def_analyze] def analyze(movie_review_filename): """Run a sentiment analysis request on text within a passed filename.""" - language_client = language.Client() + client = language.LanguageServiceClient() with open(movie_review_filename, 'r') as review_file: # Instantiates a plain text document. - document = language_client.document_from_html(review_file.read()) + content = review_file.read() - # Detects sentiment in the document. - annotations = document.annotate_text(include_sentiment=True, - include_syntax=False, - include_entities=False) + document = types.Document( + content=content, + type=enums.Document.Type.PLAIN_TEXT) + annotations = client.analyze_sentiment(document=document) - # Print the results - print_result(annotations) + # Print the results + print_result(annotations) +# [END def_analyze] if __name__ == '__main__': From 288d200506bfc92ad2650d49c092590fdde99253 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Thu, 3 Aug 2017 09:05:36 -0700 Subject: [PATCH 095/209] Auto-update dependencies. [(#1048)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1048) --- samples/snippets/movie_nl/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt index 992307a0..bd9870bd 100644 --- a/samples/snippets/movie_nl/requirements.txt +++ b/samples/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ google-api-python-client==1.6.2 -requests==2.18.2 +requests==2.18.3 From 14b7ca040233aed907bdaa6b25103a8d612bc27a Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Thu, 3 Aug 2017 16:48:10 -0700 Subject: [PATCH 096/209] move region tags so that the beta page only include codes from the relevant file --- samples/snippets/cloud-client/v1beta2/quickstart.py | 4 ---- samples/snippets/cloud-client/v1beta2/snippets.py | 4 ++++ 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/samples/snippets/cloud-client/v1beta2/quickstart.py b/samples/snippets/cloud-client/v1beta2/quickstart.py index 3cef5fca..b19d11b7 100644 --- a/samples/snippets/cloud-client/v1beta2/quickstart.py +++ b/samples/snippets/cloud-client/v1beta2/quickstart.py @@ -18,16 +18,12 @@ def run_quickstart(): # [START language_quickstart] # Imports the Google Cloud client library - # [START beta_import_client] - # [START beta_import] from google.cloud import language_v1beta2 from google.cloud.language_v1beta2 import enums from google.cloud.language_v1beta2 import types - # [END beta_import] # Instantiates a client with the v1beta2 version client = language_v1beta2.LanguageServiceClient() - # [END beta_import_client] # The text to analyze text = u'Hallo Welt!' diff --git a/samples/snippets/cloud-client/v1beta2/snippets.py b/samples/snippets/cloud-client/v1beta2/snippets.py index af7836ba..0ea7352a 100644 --- a/samples/snippets/cloud-client/v1beta2/snippets.py +++ b/samples/snippets/cloud-client/v1beta2/snippets.py @@ -24,9 +24,11 @@ import argparse import sys +# [START beta_import] from google.cloud import language_v1beta2 from google.cloud.language_v1beta2 import enums from google.cloud.language_v1beta2 import types +# [END beta_import] import six @@ -166,7 +168,9 @@ def syntax_file(gcs_uri): # [START def_entity_sentiment_text] def entity_sentiment_text(text): """Detects entity sentiment in the provided text.""" + # [START beta_client] client = language_v1beta2.LanguageServiceClient() + # [END beta_client] if isinstance(text, six.binary_type): text = text.decode('utf-8') From 78b546e968ad1da4171825d051cd18f923d7c7c8 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Mon, 7 Aug 2017 10:04:55 -0700 Subject: [PATCH 097/209] Auto-update dependencies. [(#1055)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1055) * Auto-update dependencies. * Explicitly use latest bigtable client Change-Id: Id71e9e768f020730e4ca9514a0d7ebaa794e7d9e * Revert language update for now Change-Id: I8867f154e9a5aae00d0047c9caf880e5e8f50c53 * Remove pdb. smh Change-Id: I5ff905fadc026eebbcd45512d4e76e003e3b2b43 --- samples/snippets/cloud-client/v1beta2/requirements.txt | 2 +- samples/snippets/sentiment/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/samples/snippets/cloud-client/v1beta2/requirements.txt b/samples/snippets/cloud-client/v1beta2/requirements.txt index 8cd367ea..743bbe79 100644 --- a/samples/snippets/cloud-client/v1beta2/requirements.txt +++ b/samples/snippets/cloud-client/v1beta2/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.26.1 +google-cloud-language==0.27.0 diff --git a/samples/snippets/sentiment/requirements.txt b/samples/snippets/sentiment/requirements.txt index 8cd367ea..743bbe79 100644 --- a/samples/snippets/sentiment/requirements.txt +++ b/samples/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.26.1 +google-cloud-language==0.27.0 From bd8a96d3717303e8d3ee9a372509dbc025f19328 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Mon, 7 Aug 2017 12:35:23 -0700 Subject: [PATCH 098/209] Update langauge test case to deal with changing server output Change-Id: Id4e773d2fed4a8934876535987e2c703a8504c26 --- samples/snippets/cloud-client/v1/snippets_test.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/samples/snippets/cloud-client/v1/snippets_test.py b/samples/snippets/cloud-client/v1/snippets_test.py index 080d5dd5..8bbdaf9d 100644 --- a/samples/snippets/cloud-client/v1/snippets_test.py +++ b/samples/snippets/cloud-client/v1/snippets_test.py @@ -21,15 +21,15 @@ def test_sentiment_text(capsys): - snippets.sentiment_text('President Obama is speaking at the White House.') + snippets.sentiment_text('No! God please, no!') out, _ = capsys.readouterr() - assert 'Score: 0.2' in out + assert 'Score: ' in out def test_sentiment_file(capsys): snippets.sentiment_file(TEST_FILE_URL) out, _ = capsys.readouterr() - assert 'Score: 0.2' in out + assert 'Score: ' in out def test_entities_text(capsys): From 4e1d202aeda49cb96b7a264879280c7b502db1c3 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Tue, 8 Aug 2017 08:51:01 -0700 Subject: [PATCH 099/209] Auto-update dependencies. [(#1057)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1057) --- samples/snippets/cloud-client/v1/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/cloud-client/v1/requirements.txt b/samples/snippets/cloud-client/v1/requirements.txt index 8cd367ea..743bbe79 100644 --- a/samples/snippets/cloud-client/v1/requirements.txt +++ b/samples/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.26.1 +google-cloud-language==0.27.0 From 79a5991109c62b0b7149ff70df78ea67bd782556 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Tue, 8 Aug 2017 15:55:47 -0700 Subject: [PATCH 100/209] show entity type name [(#1062)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1062) * show entity type name * update v1beta2 * correct indent --- samples/snippets/cloud-client/v1/snippets.py | 12 ++++++++++-- samples/snippets/cloud-client/v1beta2/snippets.py | 12 ++++++++++-- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/samples/snippets/cloud-client/v1/snippets.py b/samples/snippets/cloud-client/v1/snippets.py index d485752b..704c6347 100644 --- a/samples/snippets/cloud-client/v1/snippets.py +++ b/samples/snippets/cloud-client/v1/snippets.py @@ -94,10 +94,14 @@ def entities_text(text): # document.type == enums.Document.Type.HTML entities = client.analyze_entities(document).entities + # entity types from enums.Entity.Type + entity_type = ('UNKNOWN', 'PERSON', 'LOCATION', 'ORGANIZATION', + 'EVENT', 'WORK_OF_ART', 'CONSUMER_GOOD', 'OTHER') + for entity in entities: print('=' * 20) print(u'{:<16}: {}'.format('name', entity.name)) - print(u'{:<16}: {}'.format('type', entity.type)) + print(u'{:<16}: {}'.format('type', entity_type[entity.type])) print(u'{:<16}: {}'.format('metadata', entity.metadata)) print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', @@ -120,10 +124,14 @@ def entities_file(gcs_uri): # document.type == enums.Document.Type.HTML entities = client.analyze_entities(document).entities + # entity types from enums.Entity.Type + entity_type = ('UNKNOWN', 'PERSON', 'LOCATION', 'ORGANIZATION', + 'EVENT', 'WORK_OF_ART', 'CONSUMER_GOOD', 'OTHER') + for entity in entities: print('=' * 20) print(u'{:<16}: {}'.format('name', entity.name)) - print(u'{:<16}: {}'.format('type', entity.type)) + print(u'{:<16}: {}'.format('type', entity_type[entity.type])) print(u'{:<16}: {}'.format('metadata', entity.metadata)) print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', diff --git a/samples/snippets/cloud-client/v1beta2/snippets.py b/samples/snippets/cloud-client/v1beta2/snippets.py index 0ea7352a..0ea15f7f 100644 --- a/samples/snippets/cloud-client/v1beta2/snippets.py +++ b/samples/snippets/cloud-client/v1beta2/snippets.py @@ -85,10 +85,14 @@ def entities_text(text): # document.type == enums.Document.Type.HTML entities = client.analyze_entities(document).entities + # entity types from enums.Entity.Type + entity_type = ('UNKNOWN', 'PERSON', 'LOCATION', 'ORGANIZATION', + 'EVENT', 'WORK_OF_ART', 'CONSUMER_GOOD', 'OTHER') + for entity in entities: print('=' * 20) print(u'{:<16}: {}'.format('name', entity.name)) - print(u'{:<16}: {}'.format('type', entity.type)) + print(u'{:<16}: {}'.format('type', entity_type[entity.type])) print(u'{:<16}: {}'.format('metadata', entity.metadata)) print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', @@ -108,10 +112,14 @@ def entities_file(gcs_uri): # document.type == enums.Document.Type.HTML entities = client.analyze_entities(document).entities + # entity types from enums.Entity.Type + entity_type = ('UNKNOWN', 'PERSON', 'LOCATION', 'ORGANIZATION', + 'EVENT', 'WORK_OF_ART', 'CONSUMER_GOOD', 'OTHER') + for entity in entities: print('=' * 20) print(u'{:<16}: {}'.format('name', entity.name)) - print(u'{:<16}: {}'.format('type', entity.type)) + print(u'{:<16}: {}'.format('type', entity_type[entity.type])) print(u'{:<16}: {}'.format('metadata', entity.metadata)) print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', From a1a46262e362781a56ac10729aa9b7e6c0227589 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Wed, 16 Aug 2017 09:34:13 -0700 Subject: [PATCH 101/209] Auto-update dependencies. [(#1073)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1073) --- samples/snippets/movie_nl/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt index bd9870bd..b072771d 100644 --- a/samples/snippets/movie_nl/requirements.txt +++ b/samples/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ google-api-python-client==1.6.2 -requests==2.18.3 +requests==2.18.4 From 9dc7560ddcc04ac666069606cb029ef2cee8693b Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Wed, 16 Aug 2017 09:51:07 -0700 Subject: [PATCH 102/209] Fix flaky movie_nl tests Change-Id: I4922637173048627f38b507588a4f30a5d490212 --- samples/snippets/movie_nl/main_test.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/samples/snippets/movie_nl/main_test.py b/samples/snippets/movie_nl/main_test.py index 927639eb..7e33cefd 100644 --- a/samples/snippets/movie_nl/main_test.py +++ b/samples/snippets/movie_nl/main_test.py @@ -70,18 +70,18 @@ def test_process_movie_reviews(): entities = [json.loads(entity) for entity in entities] # assert sentiments - assert sentiments[0].get('sentiment') == 0.9 + assert sentiments[0].get('sentiment') > 0 assert sentiments[0].get('label') == 1 - assert sentiments[1].get('sentiment') == 0.9 + assert sentiments[1].get('sentiment') > 0 assert sentiments[1].get('label') == 1 # assert entities assert len(entities) == 1 assert entities[0].get('name') == 'Tom Cruise' assert (entities[0].get('wiki_url') == - 'http://en.wikipedia.org/wiki/Tom_Cruise') - assert entities[0].get('sentiment') == 1.8 + 'https://en.wikipedia.org/wiki/Tom_Cruise') + assert entities[0].get('sentiment') > 0 def test_rank_positive_entities(capsys): From d547d85330bb6499d3ce034b0151fea2cbdb971e Mon Sep 17 00:00:00 2001 From: DPE bot Date: Tue, 29 Aug 2017 16:53:02 -0700 Subject: [PATCH 103/209] Auto-update dependencies. [(#1093)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1093) * Auto-update dependencies. * Fix storage notification poll sample Change-Id: I6afbc79d15e050531555e4c8e51066996717a0f3 * Fix spanner samples Change-Id: I40069222c60d57e8f3d3878167591af9130895cb * Drop coverage because it's not useful Change-Id: Iae399a7083d7866c3c7b9162d0de244fbff8b522 * Try again to fix flaky logging test Change-Id: I6225c074701970c17c426677ef1935bb6d7e36b4 --- samples/snippets/cloud-client/v1/requirements.txt | 2 +- samples/snippets/cloud-client/v1beta2/requirements.txt | 2 +- samples/snippets/sentiment/requirements.txt | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/samples/snippets/cloud-client/v1/requirements.txt b/samples/snippets/cloud-client/v1/requirements.txt index 743bbe79..0aa7a714 100644 --- a/samples/snippets/cloud-client/v1/requirements.txt +++ b/samples/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.27.0 +google-cloud-language==0.28.0 diff --git a/samples/snippets/cloud-client/v1beta2/requirements.txt b/samples/snippets/cloud-client/v1beta2/requirements.txt index 743bbe79..0aa7a714 100644 --- a/samples/snippets/cloud-client/v1beta2/requirements.txt +++ b/samples/snippets/cloud-client/v1beta2/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.27.0 +google-cloud-language==0.28.0 diff --git a/samples/snippets/sentiment/requirements.txt b/samples/snippets/sentiment/requirements.txt index 743bbe79..0aa7a714 100644 --- a/samples/snippets/sentiment/requirements.txt +++ b/samples/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.27.0 +google-cloud-language==0.28.0 From 00e4c12c32150fa918e0db2ed145d72d5fd750ab Mon Sep 17 00:00:00 2001 From: DPE bot Date: Wed, 30 Aug 2017 10:15:58 -0700 Subject: [PATCH 104/209] Auto-update dependencies. [(#1094)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1094) * Auto-update dependencies. * Relax assertions in the ocr_nl sample Change-Id: I6d37e5846a8d6dd52429cb30d501f448c52cbba1 * Drop unused logging apiary samples Change-Id: I545718283773cb729a5e0def8a76ebfa40829d51 --- samples/snippets/api/requirements.txt | 2 +- samples/snippets/movie_nl/requirements.txt | 2 +- samples/snippets/ocr_nl/main_test.py | 2 -- samples/snippets/ocr_nl/requirements.txt | 2 +- samples/snippets/syntax_triples/requirements.txt | 2 +- samples/snippets/tutorial/requirements.txt | 2 +- 6 files changed, 5 insertions(+), 7 deletions(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 4f77d693..28ef8912 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.6.2 +google-api-python-client==1.6.3 diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt index b072771d..69ae9a19 100644 --- a/samples/snippets/movie_nl/requirements.txt +++ b/samples/snippets/movie_nl/requirements.txt @@ -1,2 +1,2 @@ -google-api-python-client==1.6.2 +google-api-python-client==1.6.3 requests==2.18.4 diff --git a/samples/snippets/ocr_nl/main_test.py b/samples/snippets/ocr_nl/main_test.py index afaf2e16..e4bf2b51 100755 --- a/samples/snippets/ocr_nl/main_test.py +++ b/samples/snippets/ocr_nl/main_test.py @@ -66,7 +66,6 @@ def test_text_returns_entities(): etype, ename, salience, wurl = text_analyzer.extract_entity_info( entities[0]) assert ename == 'holmes' - assert wurl == 'http://en.wikipedia.org/wiki/Sherlock_Holmes' def test_entities_list(): @@ -80,7 +79,6 @@ def test_entities_list(): etype, ename, salience, wurl = text_analyzer.extract_entity_info( entities[0]) assert ename == 'bennet' - assert wurl == 'http://en.wikipedia.org/wiki/Mr_Bennet' def test_main(tmpdir, capsys): diff --git a/samples/snippets/ocr_nl/requirements.txt b/samples/snippets/ocr_nl/requirements.txt index 4f77d693..28ef8912 100644 --- a/samples/snippets/ocr_nl/requirements.txt +++ b/samples/snippets/ocr_nl/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.6.2 +google-api-python-client==1.6.3 diff --git a/samples/snippets/syntax_triples/requirements.txt b/samples/snippets/syntax_triples/requirements.txt index 4f77d693..28ef8912 100644 --- a/samples/snippets/syntax_triples/requirements.txt +++ b/samples/snippets/syntax_triples/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.6.2 +google-api-python-client==1.6.3 diff --git a/samples/snippets/tutorial/requirements.txt b/samples/snippets/tutorial/requirements.txt index 4f77d693..28ef8912 100644 --- a/samples/snippets/tutorial/requirements.txt +++ b/samples/snippets/tutorial/requirements.txt @@ -1 +1 @@ -google-api-python-client==1.6.2 +google-api-python-client==1.6.3 From bbfb951229188f956546107e5b4939f54fd18923 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Fri, 15 Sep 2017 13:36:20 -0700 Subject: [PATCH 105/209] Language classify [(#1095)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1095) * add classify text samples and tests * use longer text * move entity sentiment to v1 * flake * year when first written * year first written --- samples/snippets/cloud-client/v1/snippets.py | 74 ++++++++++++++++ .../snippets/cloud-client/v1/snippets_test.py | 23 ++++- .../v1beta2/resources/android_text.txt | 1 + .../snippets/cloud-client/v1beta2/snippets.py | 86 +++++++------------ .../cloud-client/v1beta2/snippets_test.py | 26 +++--- 5 files changed, 141 insertions(+), 69 deletions(-) create mode 100644 samples/snippets/cloud-client/v1beta2/resources/android_text.txt diff --git a/samples/snippets/cloud-client/v1/snippets.py b/samples/snippets/cloud-client/v1/snippets.py index 704c6347..e13fc7dd 100644 --- a/samples/snippets/cloud-client/v1/snippets.py +++ b/samples/snippets/cloud-client/v1/snippets.py @@ -22,10 +22,12 @@ """ import argparse +import sys from google.cloud import language from google.cloud.language import enums from google.cloud.language import types + import six @@ -192,12 +194,80 @@ def syntax_file(gcs_uri): # [END def_syntax_file] +# [START def_entity_sentiment_text] +def entity_sentiment_text(text): + """Detects entity sentiment in the provided text.""" + client = language.LanguageServiceClient() + + if isinstance(text, six.binary_type): + text = text.decode('utf-8') + + document = types.Document( + content=text.encode('utf-8'), + type=enums.Document.Type.PLAIN_TEXT) + + # Detect and send native Python encoding to receive correct word offsets. + encoding = enums.EncodingType.UTF32 + if sys.maxunicode == 65535: + encoding = enums.EncodingType.UTF16 + + result = client.analyze_entity_sentiment(document, encoding) + + for entity in result.entities: + print('Mentions: ') + print(u'Name: "{}"'.format(entity.name)) + for mention in entity.mentions: + print(u' Begin Offset : {}'.format(mention.text.begin_offset)) + print(u' Content : {}'.format(mention.text.content)) + print(u' Magnitude : {}'.format(mention.sentiment.magnitude)) + print(u' Sentiment : {}'.format(mention.sentiment.score)) + print(u' Type : {}'.format(mention.type)) + print(u'Salience: {}'.format(entity.salience)) + print(u'Sentiment: {}\n'.format(entity.sentiment)) +# [END def_entity_sentiment_text] + + +def entity_sentiment_file(gcs_uri): + """Detects entity sentiment in a Google Cloud Storage file.""" + client = language.LanguageServiceClient() + + document = types.Document( + gcs_content_uri=gcs_uri, + type=enums.Document.Type.PLAIN_TEXT) + + # Detect and send native Python encoding to receive correct word offsets. + encoding = enums.EncodingType.UTF32 + if sys.maxunicode == 65535: + encoding = enums.EncodingType.UTF16 + + result = client.analyze_entity_sentiment(document, encoding) + + for entity in result.entities: + print(u'Name: "{}"'.format(entity.name)) + for mention in entity.mentions: + print(u' Begin Offset : {}'.format(mention.text.begin_offset)) + print(u' Content : {}'.format(mention.text.content)) + print(u' Magnitude : {}'.format(mention.sentiment.magnitude)) + print(u' Sentiment : {}'.format(mention.sentiment.score)) + print(u' Type : {}'.format(mention.type)) + print(u'Salience: {}'.format(entity.salience)) + print(u'Sentiment: {}\n'.format(entity.sentiment)) + + if __name__ == '__main__': parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) subparsers = parser.add_subparsers(dest='command') + sentiment_entities_text_parser = subparsers.add_parser( + 'sentiment-entities-text', help=entity_sentiment_text.__doc__) + sentiment_entities_text_parser.add_argument('text') + + sentiment_entities_file_parser = subparsers.add_parser( + 'sentiment-entities-file', help=entity_sentiment_file.__doc__) + sentiment_entities_file_parser.add_argument('gcs_uri') + sentiment_text_parser = subparsers.add_parser( 'sentiment-text', help=sentiment_text.__doc__) sentiment_text_parser.add_argument('text') @@ -236,3 +306,7 @@ def syntax_file(gcs_uri): syntax_text(args.text) elif args.command == 'syntax-file': syntax_file(args.gcs_uri) + elif args.command == 'sentiment-entities-text': + entity_sentiment_text(args.text) + elif args.command == 'sentiment-entities-file': + entity_sentiment_file(args.gcs_uri) diff --git a/samples/snippets/cloud-client/v1/snippets_test.py b/samples/snippets/cloud-client/v1/snippets_test.py index 8bbdaf9d..168701dc 100644 --- a/samples/snippets/cloud-client/v1/snippets_test.py +++ b/samples/snippets/cloud-client/v1/snippets_test.py @@ -1,4 +1,5 @@ -# Copyright 2016 Google, Inc. +# -*- coding: utf-8 -*- +# Copyright 2017 Google, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -56,3 +57,23 @@ def test_syntax_file(capsys): snippets.syntax_file(TEST_FILE_URL) out, _ = capsys.readouterr() assert 'NOUN: President' in out + + +def test_sentiment_entities_text(capsys): + snippets.entity_sentiment_text( + 'President Obama is speaking at the White House.') + out, _ = capsys.readouterr() + assert 'Content : White House' in out + + +def test_sentiment_entities_file(capsys): + snippets.entity_sentiment_file(TEST_FILE_URL) + out, _ = capsys.readouterr() + assert 'Content : White House' in out + + +def test_sentiment_entities_utf(capsys): + snippets.entity_sentiment_text( + 'foo→bar') + out, _ = capsys.readouterr() + assert 'Begin Offset : 4' in out diff --git a/samples/snippets/cloud-client/v1beta2/resources/android_text.txt b/samples/snippets/cloud-client/v1beta2/resources/android_text.txt new file mode 100644 index 00000000..c05c452d --- /dev/null +++ b/samples/snippets/cloud-client/v1beta2/resources/android_text.txt @@ -0,0 +1 @@ +Android is a mobile operating system developed by Google, based on the Linux kernel and designed primarily for touchscreen mobile devices such as smartphones and tablets. diff --git a/samples/snippets/cloud-client/v1beta2/snippets.py b/samples/snippets/cloud-client/v1beta2/snippets.py index 0ea15f7f..3ccc2933 100644 --- a/samples/snippets/cloud-client/v1beta2/snippets.py +++ b/samples/snippets/cloud-client/v1beta2/snippets.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# Copyright 2017 Google, Inc. +# Copyright 2016 Google, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,7 +22,6 @@ """ import argparse -import sys # [START beta_import] from google.cloud import language_v1beta2 @@ -173,9 +172,9 @@ def syntax_file(gcs_uri): token.text.content)) -# [START def_entity_sentiment_text] -def entity_sentiment_text(text): - """Detects entity sentiment in the provided text.""" +# [START def_classify_text] +def classify_text(text): + """Classifies the provided text.""" # [START beta_client] client = language_v1beta2.LanguageServiceClient() # [END beta_client] @@ -187,52 +186,31 @@ def entity_sentiment_text(text): content=text.encode('utf-8'), type=enums.Document.Type.PLAIN_TEXT) - # Pass in encoding type to get useful offsets in the response. - encoding = enums.EncodingType.UTF32 - if sys.maxunicode == 65535: - encoding = enums.EncodingType.UTF16 - - result = client.analyze_entity_sentiment(document, encoding) - - for entity in result.entities: - print('Mentions: ') - print(u'Name: "{}"'.format(entity.name)) - for mention in entity.mentions: - print(u' Begin Offset : {}'.format(mention.text.begin_offset)) - print(u' Content : {}'.format(mention.text.content)) - print(u' Magnitude : {}'.format(mention.sentiment.magnitude)) - print(u' Sentiment : {}'.format(mention.sentiment.score)) - print(u' Type : {}'.format(mention.type)) - print(u'Salience: {}'.format(entity.salience)) - print(u'Sentiment: {}\n'.format(entity.sentiment)) -# [END def_entity_sentiment_text] - - -def entity_sentiment_file(gcs_uri): - """Detects entity sentiment in a Google Cloud Storage file.""" + categories = client.classify_text(document).categories + + for category in categories: + print(u'=' * 20) + print(u'{:<16}: {}'.format('name', category.name)) + print(u'{:<16}: {}'.format('confidence', category.confidence)) +# [END def_classify_text] + + +# [START def_classify_file] +def classify_file(gcs_uri): + """Classifies the text in a Google Cloud Storage file.""" client = language_v1beta2.LanguageServiceClient() document = types.Document( gcs_content_uri=gcs_uri, type=enums.Document.Type.PLAIN_TEXT) - # Pass in encoding type to get useful offsets in the response. - encoding = enums.EncodingType.UTF32 - if sys.maxunicode == 65535: - encoding = enums.EncodingType.UTF16 - - result = client.analyze_entity_sentiment(document, encoding) + categories = client.classify_text(document).categories - for entity in result.entities: - print(u'Name: "{}"'.format(entity.name)) - for mention in entity.mentions: - print(u' Begin Offset : {}'.format(mention.text.begin_offset)) - print(u' Content : {}'.format(mention.text.content)) - print(u' Magnitude : {}'.format(mention.sentiment.magnitude)) - print(u' Sentiment : {}'.format(mention.sentiment.score)) - print(u' Type : {}'.format(mention.type)) - print(u'Salience: {}'.format(entity.salience)) - print(u'Sentiment: {}\n'.format(entity.sentiment)) + for category in categories: + print(u'=' * 20) + print(u'{:<16}: {}'.format('name', category.name)) + print(u'{:<16}: {}'.format('confidence', category.confidence)) +# [END def_classify_file] if __name__ == '__main__': @@ -241,13 +219,13 @@ def entity_sentiment_file(gcs_uri): formatter_class=argparse.RawDescriptionHelpFormatter) subparsers = parser.add_subparsers(dest='command') - sentiment_entities_text_parser = subparsers.add_parser( - 'sentiment-entities-text', help=entity_sentiment_text.__doc__) - sentiment_entities_text_parser.add_argument('text') + classify_text_parser = subparsers.add_parser( + 'classify-text', help=classify_text.__doc__) + classify_text_parser.add_argument('text') - sentiment_entities_file_parser = subparsers.add_parser( - 'sentiment-entities-file', help=entity_sentiment_file.__doc__) - sentiment_entities_file_parser.add_argument('gcs_uri') + classify_text_parser = subparsers.add_parser( + 'classify-file', help=classify_file.__doc__) + classify_text_parser.add_argument('gcs_uri') sentiment_text_parser = subparsers.add_parser( 'sentiment-text', help=sentiment_text.__doc__) @@ -287,7 +265,7 @@ def entity_sentiment_file(gcs_uri): syntax_text(args.text) elif args.command == 'syntax-file': syntax_file(args.gcs_uri) - elif args.command == 'sentiment-entities-text': - entity_sentiment_text(args.text) - elif args.command == 'sentiment-entities-file': - entity_sentiment_file(args.gcs_uri) + elif args.command == 'classify-text': + classify_text(args.text) + elif args.command == 'classify-file': + classify_file(args.gcs_uri) diff --git a/samples/snippets/cloud-client/v1beta2/snippets_test.py b/samples/snippets/cloud-client/v1beta2/snippets_test.py index e6db2217..d440136b 100644 --- a/samples/snippets/cloud-client/v1beta2/snippets_test.py +++ b/samples/snippets/cloud-client/v1beta2/snippets_test.py @@ -19,6 +19,7 @@ BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] TEST_FILE_URL = 'gs://{}/text.txt'.format(BUCKET) +LONG_TEST_FILE_URL = 'gs://{}/android_text.txt'.format(BUCKET) def test_sentiment_text(capsys): @@ -68,21 +69,18 @@ def test_syntax_file(capsys): assert 'NOUN: President' in out -def test_sentiment_entities_text(capsys): - snippets.entity_sentiment_text( - 'President Obama is speaking at the White House.') +def test_classify_text(capsys): + snippets.classify_text( + 'Android is a mobile operating system developed by Google, ' + 'based on the Linux kernel and designed primarily for touchscreen ' + 'mobile devices such as smartphones and tablets.') out, _ = capsys.readouterr() - assert 'Content : White House' in out - - -def test_sentiment_entities_file(capsys): - snippets.entity_sentiment_file(TEST_FILE_URL) - out, _ = capsys.readouterr() - assert 'Content : White House' in out + assert 'name' in out + assert '/Computers & Electronics' in out -def test_sentiment_entities_utf(capsys): - snippets.entity_sentiment_text( - 'foo→bar') +def test_classify_file(capsys): + snippets.classify_file(LONG_TEST_FILE_URL) out, _ = capsys.readouterr() - assert 'Begin Offset : 4' in out + assert 'name' in out + assert '/Computers & Electronics' in out From 5efb35fc4716cb4fcfaa682cc202f5174bb2d160 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Fri, 15 Sep 2017 13:49:00 -0700 Subject: [PATCH 106/209] Client version update [(#1117)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1117) * correct client version * update client version --- samples/snippets/cloud-client/v1/requirements.txt | 2 +- samples/snippets/cloud-client/v1beta2/requirements.txt | 2 +- samples/snippets/sentiment/requirements.txt | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/samples/snippets/cloud-client/v1/requirements.txt b/samples/snippets/cloud-client/v1/requirements.txt index 0aa7a714..afc8ed0a 100644 --- a/samples/snippets/cloud-client/v1/requirements.txt +++ b/samples/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.28.0 +google-cloud-language==0.29.0 diff --git a/samples/snippets/cloud-client/v1beta2/requirements.txt b/samples/snippets/cloud-client/v1beta2/requirements.txt index 0aa7a714..afc8ed0a 100644 --- a/samples/snippets/cloud-client/v1beta2/requirements.txt +++ b/samples/snippets/cloud-client/v1beta2/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.28.0 +google-cloud-language==0.29.0 diff --git a/samples/snippets/sentiment/requirements.txt b/samples/snippets/sentiment/requirements.txt index 0aa7a714..afc8ed0a 100644 --- a/samples/snippets/sentiment/requirements.txt +++ b/samples/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.28.0 +google-cloud-language==0.29.0 From 0eb85024969bd4eb165d7d80a3c87f93f32de700 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Mon, 18 Sep 2017 11:04:05 -0700 Subject: [PATCH 107/209] Update all generated readme auth instructions [(#1121)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1121) Change-Id: I03b5eaef8b17ac3dc3c0339fd2c7447bd3e11bd2 --- samples/snippets/api/README.rst | 32 +++---------------- samples/snippets/cloud-client/v1/README.rst | 32 +++---------------- .../snippets/cloud-client/v1beta2/README.rst | 32 +++---------------- samples/snippets/tutorial/README.rst | 32 +++---------------- 4 files changed, 20 insertions(+), 108 deletions(-) diff --git a/samples/snippets/api/README.rst b/samples/snippets/api/README.rst index f757fea8..c975769a 100644 --- a/samples/snippets/api/README.rst +++ b/samples/snippets/api/README.rst @@ -17,34 +17,12 @@ Setup Authentication ++++++++++++++ -Authentication is typically done through `Application Default Credentials`_, -which means you do not have to change the code to authenticate as long as -your environment has credentials. You have a few options for setting up -authentication: +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. -#. When running locally, use the `Google Cloud SDK`_ - - .. code-block:: bash - - gcloud auth application-default login - - -#. When running on App Engine or Compute Engine, credentials are already - set-up. However, you may need to configure your Compute Engine instance - with `additional scopes`_. - -#. You can create a `Service Account key file`_. This file can be used to - authenticate to Google Cloud Platform services from any environment. To use - the file, set the ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable to - the path to the key file, for example: - - .. code-block:: bash - - export GOOGLE_APPLICATION_CREDENTIALS=/path/to/service_account.json - -.. _Application Default Credentials: https://cloud.google.com/docs/authentication#getting_credentials_for_server-centric_flow -.. _additional scopes: https://cloud.google.com/compute/docs/authentication#using -.. _Service Account key file: https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started Install Dependencies ++++++++++++++++++++ diff --git a/samples/snippets/cloud-client/v1/README.rst b/samples/snippets/cloud-client/v1/README.rst index a3ee4b7f..165add01 100644 --- a/samples/snippets/cloud-client/v1/README.rst +++ b/samples/snippets/cloud-client/v1/README.rst @@ -21,34 +21,12 @@ Setup Authentication ++++++++++++++ -Authentication is typically done through `Application Default Credentials`_, -which means you do not have to change the code to authenticate as long as -your environment has credentials. You have a few options for setting up -authentication: +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. -#. When running locally, use the `Google Cloud SDK`_ - - .. code-block:: bash - - gcloud auth application-default login - - -#. When running on App Engine or Compute Engine, credentials are already - set-up. However, you may need to configure your Compute Engine instance - with `additional scopes`_. - -#. You can create a `Service Account key file`_. This file can be used to - authenticate to Google Cloud Platform services from any environment. To use - the file, set the ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable to - the path to the key file, for example: - - .. code-block:: bash - - export GOOGLE_APPLICATION_CREDENTIALS=/path/to/service_account.json - -.. _Application Default Credentials: https://cloud.google.com/docs/authentication#getting_credentials_for_server-centric_flow -.. _additional scopes: https://cloud.google.com/compute/docs/authentication#using -.. _Service Account key file: https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started Install Dependencies ++++++++++++++++++++ diff --git a/samples/snippets/cloud-client/v1beta2/README.rst b/samples/snippets/cloud-client/v1beta2/README.rst index 77df4ffb..8640369e 100644 --- a/samples/snippets/cloud-client/v1beta2/README.rst +++ b/samples/snippets/cloud-client/v1beta2/README.rst @@ -21,34 +21,12 @@ Setup Authentication ++++++++++++++ -Authentication is typically done through `Application Default Credentials`_, -which means you do not have to change the code to authenticate as long as -your environment has credentials. You have a few options for setting up -authentication: +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. -#. When running locally, use the `Google Cloud SDK`_ - - .. code-block:: bash - - gcloud auth application-default login - - -#. When running on App Engine or Compute Engine, credentials are already - set-up. However, you may need to configure your Compute Engine instance - with `additional scopes`_. - -#. You can create a `Service Account key file`_. This file can be used to - authenticate to Google Cloud Platform services from any environment. To use - the file, set the ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable to - the path to the key file, for example: - - .. code-block:: bash - - export GOOGLE_APPLICATION_CREDENTIALS=/path/to/service_account.json - -.. _Application Default Credentials: https://cloud.google.com/docs/authentication#getting_credentials_for_server-centric_flow -.. _additional scopes: https://cloud.google.com/compute/docs/authentication#using -.. _Service Account key file: https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started Install Dependencies ++++++++++++++++++++ diff --git a/samples/snippets/tutorial/README.rst b/samples/snippets/tutorial/README.rst index 5b862ead..f1ea6ad9 100644 --- a/samples/snippets/tutorial/README.rst +++ b/samples/snippets/tutorial/README.rst @@ -17,34 +17,12 @@ Setup Authentication ++++++++++++++ -Authentication is typically done through `Application Default Credentials`_, -which means you do not have to change the code to authenticate as long as -your environment has credentials. You have a few options for setting up -authentication: +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. -#. When running locally, use the `Google Cloud SDK`_ - - .. code-block:: bash - - gcloud auth application-default login - - -#. When running on App Engine or Compute Engine, credentials are already - set-up. However, you may need to configure your Compute Engine instance - with `additional scopes`_. - -#. You can create a `Service Account key file`_. This file can be used to - authenticate to Google Cloud Platform services from any environment. To use - the file, set the ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable to - the path to the key file, for example: - - .. code-block:: bash - - export GOOGLE_APPLICATION_CREDENTIALS=/path/to/service_account.json - -.. _Application Default Credentials: https://cloud.google.com/docs/authentication#getting_credentials_for_server-centric_flow -.. _additional scopes: https://cloud.google.com/compute/docs/authentication#using -.. _Service Account key file: https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started Install Dependencies ++++++++++++++++++++ From 9aa7b3741a89518580c36fdd7c3a971281697cde Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Tue, 19 Sep 2017 09:30:32 -0700 Subject: [PATCH 108/209] Update readme [(#1124)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1124) * update readme * keep entity sentiment in v1beta2 as well * update readme --- samples/snippets/cloud-client/v1/README.rst | 9 ++- .../snippets/cloud-client/v1beta2/README.rst | 7 +- .../snippets/cloud-client/v1beta2/snippets.py | 79 ++++++++++++++++++- .../cloud-client/v1beta2/snippets_test.py | 20 +++++ 4 files changed, 109 insertions(+), 6 deletions(-) diff --git a/samples/snippets/cloud-client/v1/README.rst b/samples/snippets/cloud-client/v1/README.rst index 165add01..8640369e 100644 --- a/samples/snippets/cloud-client/v1/README.rst +++ b/samples/snippets/cloud-client/v1/README.rst @@ -76,7 +76,7 @@ To run this sample: $ python snippets.py usage: snippets.py [-h] - {sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} + {sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} ... This application demonstrates how to perform basic operations with the @@ -86,7 +86,12 @@ To run this sample: https://cloud.google.com/natural-language/docs. positional arguments: - {sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} + {sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} + sentiment-entities-text + Detects entity sentiment in the provided text. + sentiment-entities-file + Detects entity sentiment in a Google Cloud Storage + file. sentiment-text Detects sentiment in the text. sentiment-file Detects sentiment in the file located in Google Cloud Storage. diff --git a/samples/snippets/cloud-client/v1beta2/README.rst b/samples/snippets/cloud-client/v1beta2/README.rst index 8640369e..dc3b85c4 100644 --- a/samples/snippets/cloud-client/v1beta2/README.rst +++ b/samples/snippets/cloud-client/v1beta2/README.rst @@ -76,7 +76,7 @@ To run this sample: $ python snippets.py usage: snippets.py [-h] - {sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} + {classify-text,classify-file,sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} ... This application demonstrates how to perform basic operations with the @@ -86,7 +86,10 @@ To run this sample: https://cloud.google.com/natural-language/docs. positional arguments: - {sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} + {classify-text,classify-file,sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} + classify-text Classifies content categories of the provided text. + classify-file Classifies content categories of the text in a Google + Cloud Storage file. sentiment-entities-text Detects entity sentiment in the provided text. sentiment-entities-file diff --git a/samples/snippets/cloud-client/v1beta2/snippets.py b/samples/snippets/cloud-client/v1beta2/snippets.py index 3ccc2933..abf16ada 100644 --- a/samples/snippets/cloud-client/v1beta2/snippets.py +++ b/samples/snippets/cloud-client/v1beta2/snippets.py @@ -22,6 +22,7 @@ """ import argparse +import sys # [START beta_import] from google.cloud import language_v1beta2 @@ -125,6 +126,66 @@ def entities_file(gcs_uri): entity.metadata.get('wikipedia_url', '-'))) +# [START def_entity_sentiment_text] +def entity_sentiment_text(text): + """Detects entity sentiment in the provided text.""" + client = language_v1beta2.LanguageServiceClient() + + if isinstance(text, six.binary_type): + text = text.decode('utf-8') + + document = types.Document( + content=text.encode('utf-8'), + type=enums.Document.Type.PLAIN_TEXT) + + # Detect and send native Python encoding to receive correct word offsets. + encoding = enums.EncodingType.UTF32 + if sys.maxunicode == 65535: + encoding = enums.EncodingType.UTF16 + + result = client.analyze_entity_sentiment(document, encoding) + + for entity in result.entities: + print('Mentions: ') + print(u'Name: "{}"'.format(entity.name)) + for mention in entity.mentions: + print(u' Begin Offset : {}'.format(mention.text.begin_offset)) + print(u' Content : {}'.format(mention.text.content)) + print(u' Magnitude : {}'.format(mention.sentiment.magnitude)) + print(u' Sentiment : {}'.format(mention.sentiment.score)) + print(u' Type : {}'.format(mention.type)) + print(u'Salience: {}'.format(entity.salience)) + print(u'Sentiment: {}\n'.format(entity.sentiment)) +# [END def_entity_sentiment_text] + + +def entity_sentiment_file(gcs_uri): + """Detects entity sentiment in a Google Cloud Storage file.""" + client = language_v1beta2.LanguageServiceClient() + + document = types.Document( + gcs_content_uri=gcs_uri, + type=enums.Document.Type.PLAIN_TEXT) + + # Detect and send native Python encoding to receive correct word offsets. + encoding = enums.EncodingType.UTF32 + if sys.maxunicode == 65535: + encoding = enums.EncodingType.UTF16 + + result = client.analyze_entity_sentiment(document, encoding) + + for entity in result.entities: + print(u'Name: "{}"'.format(entity.name)) + for mention in entity.mentions: + print(u' Begin Offset : {}'.format(mention.text.begin_offset)) + print(u' Content : {}'.format(mention.text.content)) + print(u' Magnitude : {}'.format(mention.sentiment.magnitude)) + print(u' Sentiment : {}'.format(mention.sentiment.score)) + print(u' Type : {}'.format(mention.type)) + print(u'Salience: {}'.format(entity.salience)) + print(u'Sentiment: {}\n'.format(entity.sentiment)) + + def syntax_text(text): """Detects syntax in the text.""" client = language_v1beta2.LanguageServiceClient() @@ -174,7 +235,7 @@ def syntax_file(gcs_uri): # [START def_classify_text] def classify_text(text): - """Classifies the provided text.""" + """Classifies content categories of the provided text.""" # [START beta_client] client = language_v1beta2.LanguageServiceClient() # [END beta_client] @@ -197,7 +258,9 @@ def classify_text(text): # [START def_classify_file] def classify_file(gcs_uri): - """Classifies the text in a Google Cloud Storage file.""" + """Classifies content categories of the text in a Google Cloud Storage + file. + """ client = language_v1beta2.LanguageServiceClient() document = types.Document( @@ -227,6 +290,14 @@ def classify_file(gcs_uri): 'classify-file', help=classify_file.__doc__) classify_text_parser.add_argument('gcs_uri') + sentiment_entities_text_parser = subparsers.add_parser( + 'sentiment-entities-text', help=entity_sentiment_text.__doc__) + sentiment_entities_text_parser.add_argument('text') + + sentiment_entities_file_parser = subparsers.add_parser( + 'sentiment-entities-file', help=entity_sentiment_file.__doc__) + sentiment_entities_file_parser.add_argument('gcs_uri') + sentiment_text_parser = subparsers.add_parser( 'sentiment-text', help=sentiment_text.__doc__) sentiment_text_parser.add_argument('text') @@ -265,6 +336,10 @@ def classify_file(gcs_uri): syntax_text(args.text) elif args.command == 'syntax-file': syntax_file(args.gcs_uri) + elif args.command == 'sentiment-entities-text': + entity_sentiment_text(args.text) + elif args.command == 'sentiment-entities-file': + entity_sentiment_file(args.gcs_uri) elif args.command == 'classify-text': classify_text(args.text) elif args.command == 'classify-file': diff --git a/samples/snippets/cloud-client/v1beta2/snippets_test.py b/samples/snippets/cloud-client/v1beta2/snippets_test.py index d440136b..5924ffb4 100644 --- a/samples/snippets/cloud-client/v1beta2/snippets_test.py +++ b/samples/snippets/cloud-client/v1beta2/snippets_test.py @@ -69,6 +69,26 @@ def test_syntax_file(capsys): assert 'NOUN: President' in out +def test_sentiment_entities_text(capsys): + snippets.entity_sentiment_text( + 'President Obama is speaking at the White House.') + out, _ = capsys.readouterr() + assert 'Content : White House' in out + + +def test_sentiment_entities_file(capsys): + snippets.entity_sentiment_file(TEST_FILE_URL) + out, _ = capsys.readouterr() + assert 'Content : White House' in out + + +def test_sentiment_entities_utf(capsys): + snippets.entity_sentiment_text( + 'foo→bar') + out, _ = capsys.readouterr() + assert 'Begin Offset : 4' in out + + def test_classify_text(capsys): snippets.classify_text( 'Android is a mobile operating system developed by Google, ' From a7d945e4c0af89d44da2b583228bdc7b7461a777 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Tue, 19 Sep 2017 09:33:15 -0700 Subject: [PATCH 109/209] Classify tutorial [(#1120)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1120) * first version of classify_text tutorial * addressing comments * classify text tutorial * update client version * year first written * use auto generated README * add README.rst.in and README.rst * addressing review comments * add tests for index and query * import order * add numpy to requirements --- samples/snippets/classify_text/README.rst | 126 +++++++++ samples/snippets/classify_text/README.rst.in | 26 ++ .../classify_text/classify_text_tutorial.py | 261 ++++++++++++++++++ .../classify_text_tutorial_test.py | 90 ++++++ .../snippets/classify_text/requirements.txt | 2 + .../classify_text/resources/query_text1.txt | 1 + .../classify_text/resources/query_text2.txt | 1 + .../classify_text/resources/query_text3.txt | 1 + .../classify_text/resources/texts/android.txt | 1 + .../resources/texts/cat_in_the_hat.txt | 1 + .../resources/texts/cloud_computing.txt | 1 + .../classify_text/resources/texts/eclipse.txt | 1 + .../resources/texts/eclipse_of_the_sun.txt | 1 + .../classify_text/resources/texts/email.txt | 1 + .../classify_text/resources/texts/gcp.txt | 1 + .../classify_text/resources/texts/gmail.txt | 1 + .../classify_text/resources/texts/google.txt | 1 + .../resources/texts/harry_potter.txt | 1 + .../classify_text/resources/texts/matilda.txt | 1 + .../resources/texts/mobile_phone.txt | 1 + .../classify_text/resources/texts/mr_fox.txt | 1 + .../resources/texts/wireless.txt | 1 + 22 files changed, 522 insertions(+) create mode 100644 samples/snippets/classify_text/README.rst create mode 100644 samples/snippets/classify_text/README.rst.in create mode 100644 samples/snippets/classify_text/classify_text_tutorial.py create mode 100644 samples/snippets/classify_text/classify_text_tutorial_test.py create mode 100644 samples/snippets/classify_text/requirements.txt create mode 100644 samples/snippets/classify_text/resources/query_text1.txt create mode 100644 samples/snippets/classify_text/resources/query_text2.txt create mode 100644 samples/snippets/classify_text/resources/query_text3.txt create mode 100644 samples/snippets/classify_text/resources/texts/android.txt create mode 100644 samples/snippets/classify_text/resources/texts/cat_in_the_hat.txt create mode 100644 samples/snippets/classify_text/resources/texts/cloud_computing.txt create mode 100644 samples/snippets/classify_text/resources/texts/eclipse.txt create mode 100644 samples/snippets/classify_text/resources/texts/eclipse_of_the_sun.txt create mode 100644 samples/snippets/classify_text/resources/texts/email.txt create mode 100644 samples/snippets/classify_text/resources/texts/gcp.txt create mode 100644 samples/snippets/classify_text/resources/texts/gmail.txt create mode 100644 samples/snippets/classify_text/resources/texts/google.txt create mode 100644 samples/snippets/classify_text/resources/texts/harry_potter.txt create mode 100644 samples/snippets/classify_text/resources/texts/matilda.txt create mode 100644 samples/snippets/classify_text/resources/texts/mobile_phone.txt create mode 100644 samples/snippets/classify_text/resources/texts/mr_fox.txt create mode 100644 samples/snippets/classify_text/resources/texts/wireless.txt diff --git a/samples/snippets/classify_text/README.rst b/samples/snippets/classify_text/README.rst new file mode 100644 index 00000000..0a61591b --- /dev/null +++ b/samples/snippets/classify_text/README.rst @@ -0,0 +1,126 @@ +.. This file is automatically generated. Do not edit this file directly. + +Google Cloud Natural Language API Python Samples +=============================================================================== + +This directory contains samples for Google Cloud Natural Language API. The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers. + +This tutorial demostrates how to use the `classify_text` method to classify content category of text files, and use the result to compare texts by their similarity to each other. See the `tutorial page`_ for details about this sample. + +.. _tutorial page: https://cloud.google.com/natural-language/docs/classify-text-tutorial + + + + +.. _Google Cloud Natural Language API: https://cloud.google.com/natural-language/docs/ + +Setup +------------------------------------------------------------------------------- + + +Authentication +++++++++++++++ + +Authentication is typically done through `Application Default Credentials`_, +which means you do not have to change the code to authenticate as long as +your environment has credentials. You have a few options for setting up +authentication: + +#. When running locally, use the `Google Cloud SDK`_ + + .. code-block:: bash + + gcloud auth application-default login + + +#. When running on App Engine or Compute Engine, credentials are already + set-up. However, you may need to configure your Compute Engine instance + with `additional scopes`_. + +#. You can create a `Service Account key file`_. This file can be used to + authenticate to Google Cloud Platform services from any environment. To use + the file, set the ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable to + the path to the key file, for example: + + .. code-block:: bash + + export GOOGLE_APPLICATION_CREDENTIALS=/path/to/service_account.json + +.. _Application Default Credentials: https://cloud.google.com/docs/authentication#getting_credentials_for_server-centric_flow +.. _additional scopes: https://cloud.google.com/compute/docs/authentication#using +.. _Service Account key file: https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount + +Install Dependencies +++++++++++++++++++++ + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + +Samples +------------------------------------------------------------------------------- + +Classify Text Tutorial ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + + + +To run this sample: + +.. code-block:: bash + + $ python classify_text_tutorial.py + + usage: classify_text_tutorial.py [-h] + {classify,index,query,query-category} ... + + Using the classify_text method to cluster texts. + + positional arguments: + {classify,index,query,query-category} + classify Classify the input text into categories. + index Classify each text file in a directory and write the + results to the index_file. + query Find the indexed files that are the most similar to + the query text. + query-category Find the indexed files that are the most similar to + the query label. The list of all available labels: + https://cloud.google.com/natural- + language/docs/categories + + optional arguments: + -h, --help show this help message and exit + + + + +The client library +------------------------------------------------------------------------------- + +This sample uses the `Google Cloud Client Library for Python`_. +You can read the documentation for more details on API usage and use GitHub +to `browse the source`_ and `report issues`_. + +.. _Google Cloud Client Library for Python: + https://googlecloudplatform.github.io/google-cloud-python/ +.. _browse the source: + https://github.com/GoogleCloudPlatform/google-cloud-python +.. _report issues: + https://github.com/GoogleCloudPlatform/google-cloud-python/issues + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/samples/snippets/classify_text/README.rst.in b/samples/snippets/classify_text/README.rst.in new file mode 100644 index 00000000..42e8f061 --- /dev/null +++ b/samples/snippets/classify_text/README.rst.in @@ -0,0 +1,26 @@ +# This file is used to generate README.rst + +product: + name: Google Cloud Natural Language API + short_name: Cloud Natural Language API + url: https://cloud.google.com/natural-language/docs/ + description: > + The `Google Cloud Natural Language API`_ provides natural language + understanding technologies to developers. + + + This tutorial demostrates how to use the `classify_text` method to classify content category of text files, and use the result to compare texts by their similarity to each other. See the `tutorial page`_ for details about this sample. + + + .. _tutorial page: https://cloud.google.com/natural-language/docs/classify-text-tutorial + +setup: +- auth +- install_deps + +samples: +- name: Classify Text Tutorial + file: classify_text_tutorial.py + show_help: true + +cloud_client_library: true diff --git a/samples/snippets/classify_text/classify_text_tutorial.py b/samples/snippets/classify_text/classify_text_tutorial.py new file mode 100644 index 00000000..08a03e98 --- /dev/null +++ b/samples/snippets/classify_text/classify_text_tutorial.py @@ -0,0 +1,261 @@ +#!/usr/bin/env python + +# Copyright 2017, Google, Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START classify_text_tutorial] +"""Using the classify_text method to find content categories of text files, +Then use the content category labels to compare text similarity. + +For more information, see the tutorial page at +https://cloud.google.com/natural-language/docs/classify-text-tutorial. +""" + +# [START classify_text_tutorial_import] +import argparse +import io +import json +import os + +from google.cloud import language_v1beta2 +from google.cloud.language_v1beta2 import enums +from google.cloud.language_v1beta2 import types + +import numpy +import six +# [END classify_text_tutorial_import] + + +# [START def_classify] +def classify(text, verbose=True): + """Classify the input text into categories. """ + + language_client = language_v1beta2.LanguageServiceClient() + + document = types.Document( + content=text, + type=enums.Document.Type.PLAIN_TEXT) + response = language_client.classify_text(document) + categories = response.categories + + result = {} + + for category in categories: + # Turn the categories into a dictionary of the form: + # {category.name: category.confidence}, so that they can + # be treated as a sparse vector. + result[category.name] = category.confidence + + if verbose: + print(text) + for category in categories: + print(u'=' * 20) + print(u'{:<16}: {}'.format('category', category.name)) + print(u'{:<16}: {}'.format('confidence', category.confidence)) + + return result +# [END def_classify] + + +# [START def_index] +def index(path, index_file): + """Classify each text file in a directory and write + the results to the index_file. + """ + + result = {} + for filename in os.listdir(path): + file_path = os.path.join(path, filename) + + if not os.path.isfile(file_path): + continue + + try: + with io.open(file_path, 'r') as f: + text = f.read() + categories = classify(text, verbose=False) + + result[filename] = categories + except: + print('Failed to process {}'.format(file_path)) + + with io.open(index_file, 'w') as f: + f.write(unicode(json.dumps(result))) + + print('Texts indexed in file: {}'.format(index_file)) + return result +# [END def_index] + + +# [START def_split_labels] +def split_labels(categories): + """The category labels are of the form "/a/b/c" up to three levels, + for example "/Computers & Electronics/Software", and these labels + are used as keys in the categories dictionary, whose values are + confidence scores. + + The split_labels function splits the keys into individual levels + while duplicating the confidence score, which allows a natural + boost in how we calculate similarity when more levels are in common. + + Example: + If we have + + x = {"/a/b/c": 0.5} + y = {"/a/b": 0.5} + z = {"/a": 0.5} + + Then x and y are considered more similar than y and z. + """ + _categories = {} + for name, confidence in six.iteritems(categories): + labels = [label for label in name.split('/') if label] + for label in labels: + _categories[label] = confidence + + return _categories +# [END def_split_labels] + + +# [START def_similarity] +def similarity(categories1, categories2): + """Cosine similarity of the categories treated as sparse vectors.""" + categories1 = split_labels(categories1) + categories2 = split_labels(categories2) + + norm1 = numpy.linalg.norm(categories1.values()) + norm2 = numpy.linalg.norm(categories2.values()) + + # Return the smallest possible similarity if either categories is empty. + if norm1 == 0 or norm2 == 0: + return 0.0 + + # Compute the cosine similarity. + dot = 0.0 + for label, confidence in six.iteritems(categories1): + dot += confidence * categories2.get(label, 0.0) + + return dot / (norm1 * norm2) +# [END def_similarity] + + +# [START def_query] +def query(index_file, text, n_top=3): + """Find the indexed files that are the most similar to + the query text. + """ + + with io.open(index_file, 'r') as f: + index = json.load(f) + + # Get the categories of the query text. + query_categories = classify(text, verbose=False) + + similarities = [] + for filename, categories in six.iteritems(index): + similarities.append( + (filename, similarity(query_categories, categories))) + + similarities = sorted(similarities, key=lambda p: p[1], reverse=True) + + print('=' * 20) + print('Query: {}\n'.format(text)) + for category, confidence in six.iteritems(query_categories): + print('\tCategory: {}, confidence: {}'.format(category, confidence)) + print('\nMost similar {} indexed texts:'.format(n_top)) + for filename, sim in similarities[:n_top]: + print('\tFilename: {}'.format(filename)) + print('\tSimilarity: {}'.format(sim)) + print('\n') + + return similarities +# [END def_query] + + +# [START def_query_category] +def query_category(index_file, category_string, n_top=3): + """Find the indexed files that are the most similar to + the query label. + + The list of all available labels: + https://cloud.google.com/natural-language/docs/categories + """ + + with io.open(index_file, 'r') as f: + index = json.load(f) + + # Make the category_string into a dictionary so that it is + # of the same format as what we get by calling classify. + query_categories = {category_string: 1.0} + + similarities = [] + for filename, categories in six.iteritems(index): + similarities.append( + (filename, similarity(query_categories, categories))) + + similarities = sorted(similarities, key=lambda p: p[1], reverse=True) + + print('=' * 20) + print('Query: {}\n'.format(category_string)) + print('\nMost similar {} indexed texts:'.format(n_top)) + for filename, sim in similarities[:n_top]: + print('\tFilename: {}'.format(filename)) + print('\tSimilarity: {}'.format(sim)) + print('\n') + + return similarities +# [END def_query_category] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + subparsers = parser.add_subparsers(dest='command') + classify_parser = subparsers.add_parser( + 'classify', help=classify.__doc__) + classify_parser.add_argument( + 'text', help='The text to be classified. ' + 'The text needs to have at least 20 tokens.') + index_parser = subparsers.add_parser( + 'index', help=index.__doc__) + index_parser.add_argument( + 'path', help='The directory that contains ' + 'text files to be indexed.') + index_parser.add_argument( + '--index_file', help='Filename for the output JSON.', + default='index.json') + query_parser = subparsers.add_parser( + 'query', help=query.__doc__) + query_parser.add_argument( + 'index_file', help='Path to the index JSON file.') + query_parser.add_argument( + 'text', help='Query text.') + query_category_parser = subparsers.add_parser( + 'query-category', help=query_category.__doc__) + query_category_parser.add_argument( + 'index_file', help='Path to the index JSON file.') + query_category_parser.add_argument( + 'category', help='Query category.') + + args = parser.parse_args() + + if args.command == 'classify': + classify(args.text) + if args.command == 'index': + index(args.path, args.index_file) + if args.command == 'query': + query(args.index_file, args.text) + if args.command == 'query-category': + query_category(args.index_file, args.category) +# [END classify_text_tutorial] diff --git a/samples/snippets/classify_text/classify_text_tutorial_test.py b/samples/snippets/classify_text/classify_text_tutorial_test.py new file mode 100644 index 00000000..305cf53f --- /dev/null +++ b/samples/snippets/classify_text/classify_text_tutorial_test.py @@ -0,0 +1,90 @@ +# Copyright 2016, Google, Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import classify_text_tutorial +import pytest + + +OUTPUT = 'index.json' +RESOURCES = os.path.join(os.path.dirname(__file__), 'resources') +QUERY_TEXT = """Google Home enables users to speak voice commands to interact +with services through the Home\'s intelligent personal assistant called +Google Assistant. A large number of services, both in-house and third-party, +are integrated, allowing users to listen to music, look at videos or photos, +or receive news updates entirely by voice.""" +QUERY_CATEGORY = '/Computers & Electronics/Software' + + +@pytest.fixture(scope='session') +def index_file(tmpdir_factory): + temp_file = tmpdir_factory.mktemp('tmp').join(OUTPUT) + temp_out = temp_file.strpath + classify_text_tutorial.index(os.path.join(RESOURCES, 'texts'), temp_out) + return temp_file + + +def test_classify(capsys): + with open(os.path.join(RESOURCES, 'query_text1.txt'), 'r') as f: + text = f.read() + classify_text_tutorial.classify(text) + out, err = capsys.readouterr() + assert 'category' in out + + +def test_index(capsys, tmpdir): + temp_dir = tmpdir.mkdir('tmp') + temp_out = temp_dir.join(OUTPUT).strpath + + classify_text_tutorial.index(os.path.join(RESOURCES, 'texts'), temp_out) + out, err = capsys.readouterr() + + assert OUTPUT in out + assert len(temp_dir.listdir()) == 1 + + +def test_query_text(capsys, index_file): + temp_out = index_file.strpath + + classify_text_tutorial.query(temp_out, QUERY_TEXT) + out, err = capsys.readouterr() + + assert 'Filename: cloud_computing.txt' in out + + +def test_query_category(capsys, index_file): + temp_out = index_file.strpath + + classify_text_tutorial.query_category(temp_out, QUERY_CATEGORY) + out, err = capsys.readouterr() + + assert 'Filename: cloud_computing.txt' in out + + +def test_split_labels(): + categories = {'/a/b/c': 1.0} + split_categories = {'a': 1.0, 'b': 1.0, 'c': 1.0} + assert classify_text_tutorial.split_labels(categories) == split_categories + + +def test_similarity(): + empty_categories = {} + categories1 = {'/a/b/c': 1.0, '/d/e': 1.0} + categories2 = {'/a/b': 1.0} + + assert classify_text_tutorial.similarity( + empty_categories, categories1) == 0.0 + assert classify_text_tutorial.similarity(categories1, categories1) > 0.99 + assert classify_text_tutorial.similarity(categories1, categories2) > 0 + assert classify_text_tutorial.similarity(categories1, categories2) < 1 diff --git a/samples/snippets/classify_text/requirements.txt b/samples/snippets/classify_text/requirements.txt new file mode 100644 index 00000000..10069f18 --- /dev/null +++ b/samples/snippets/classify_text/requirements.txt @@ -0,0 +1,2 @@ +google-cloud-language==0.29.0 +numpy==1.13.1 diff --git a/samples/snippets/classify_text/resources/query_text1.txt b/samples/snippets/classify_text/resources/query_text1.txt new file mode 100644 index 00000000..30472730 --- /dev/null +++ b/samples/snippets/classify_text/resources/query_text1.txt @@ -0,0 +1 @@ +Google Home enables users to speak voice commands to interact with services through the Home's intelligent personal assistant called Google Assistant. A large number of services, both in-house and third-party, are integrated, allowing users to listen to music, look at videos or photos, or receive news updates entirely by voice. diff --git a/samples/snippets/classify_text/resources/query_text2.txt b/samples/snippets/classify_text/resources/query_text2.txt new file mode 100644 index 00000000..eef573c6 --- /dev/null +++ b/samples/snippets/classify_text/resources/query_text2.txt @@ -0,0 +1 @@ +The Hitchhiker's Guide to the Galaxy is the first of five books in the Hitchhiker's Guide to the Galaxy comedy science fiction "trilogy" by Douglas Adams (with the sixth written by Eoin Colfer). \ No newline at end of file diff --git a/samples/snippets/classify_text/resources/query_text3.txt b/samples/snippets/classify_text/resources/query_text3.txt new file mode 100644 index 00000000..1337d3c6 --- /dev/null +++ b/samples/snippets/classify_text/resources/query_text3.txt @@ -0,0 +1 @@ +Goodnight Moon is an American children's picture book written by Margaret Wise Brown and illustrated by Clement Hurd. It was published on September 3, 1947, and is a highly acclaimed example of a bedtime story. \ No newline at end of file diff --git a/samples/snippets/classify_text/resources/texts/android.txt b/samples/snippets/classify_text/resources/texts/android.txt new file mode 100644 index 00000000..29dc1449 --- /dev/null +++ b/samples/snippets/classify_text/resources/texts/android.txt @@ -0,0 +1 @@ +Android is a mobile operating system developed by Google, based on the Linux kernel and designed primarily for touchscreen mobile devices such as smartphones and tablets. diff --git a/samples/snippets/classify_text/resources/texts/cat_in_the_hat.txt b/samples/snippets/classify_text/resources/texts/cat_in_the_hat.txt new file mode 100644 index 00000000..bb5a853c --- /dev/null +++ b/samples/snippets/classify_text/resources/texts/cat_in_the_hat.txt @@ -0,0 +1 @@ +The Cat in the Hat is a children's book written and illustrated by Theodor Geisel under the pen name Dr. Seuss and first published in 1957. The story centers on a tall anthropomorphic cat, who wears a red and white-striped hat and a red bow tie. \ No newline at end of file diff --git a/samples/snippets/classify_text/resources/texts/cloud_computing.txt b/samples/snippets/classify_text/resources/texts/cloud_computing.txt new file mode 100644 index 00000000..88172adf --- /dev/null +++ b/samples/snippets/classify_text/resources/texts/cloud_computing.txt @@ -0,0 +1 @@ +Cloud computing is a computing-infrastructure and software model for enabling ubiquitous access to shared pools of configurable resources (such as computer networks, servers, storage, applications and services), which can be rapidly provisioned with minimal management effort, often over the Internet. \ No newline at end of file diff --git a/samples/snippets/classify_text/resources/texts/eclipse.txt b/samples/snippets/classify_text/resources/texts/eclipse.txt new file mode 100644 index 00000000..5d16217e --- /dev/null +++ b/samples/snippets/classify_text/resources/texts/eclipse.txt @@ -0,0 +1 @@ +A solar eclipse (as seen from the planet Earth) is a type of eclipse that occurs when the Moon passes between the Sun and Earth, and when the Moon fully or partially blocks (occults) the Sun. diff --git a/samples/snippets/classify_text/resources/texts/eclipse_of_the_sun.txt b/samples/snippets/classify_text/resources/texts/eclipse_of_the_sun.txt new file mode 100644 index 00000000..7236fc9d --- /dev/null +++ b/samples/snippets/classify_text/resources/texts/eclipse_of_the_sun.txt @@ -0,0 +1 @@ +Eclipse of the Sun is the debut novel by English author Phil Whitaker. It won the 1997 John Llewellyn Rhys Prize a Betty Trask Award in 1998, and was shortlisted for the 1997 Whitbread First Novel Award. diff --git a/samples/snippets/classify_text/resources/texts/email.txt b/samples/snippets/classify_text/resources/texts/email.txt new file mode 100644 index 00000000..3d430527 --- /dev/null +++ b/samples/snippets/classify_text/resources/texts/email.txt @@ -0,0 +1 @@ +Electronic mail (email or e-mail) is a method of exchanging messages between people using electronics. Email first entered substantial use in the 1960s and by the mid-1970s had taken the form now recognized as email. \ No newline at end of file diff --git a/samples/snippets/classify_text/resources/texts/gcp.txt b/samples/snippets/classify_text/resources/texts/gcp.txt new file mode 100644 index 00000000..1ed09b2c --- /dev/null +++ b/samples/snippets/classify_text/resources/texts/gcp.txt @@ -0,0 +1 @@ +Google Cloud Platform, offered by Google, is a suite of cloud computing services that runs on the same infrastructure that Google uses internally for its end-user products, such as Google Search and YouTube. Alongside a set of management tools, it provides a series of modular cloud services including computing, data storage, data analytics and machine learning. diff --git a/samples/snippets/classify_text/resources/texts/gmail.txt b/samples/snippets/classify_text/resources/texts/gmail.txt new file mode 100644 index 00000000..89c9704b --- /dev/null +++ b/samples/snippets/classify_text/resources/texts/gmail.txt @@ -0,0 +1 @@ +Gmail is a free, advertising-supported email service developed by Google. Users can access Gmail on the web and through mobile apps for Android and iOS, as well as through third-party programs that synchronize email content through POP or IMAP protocols. \ No newline at end of file diff --git a/samples/snippets/classify_text/resources/texts/google.txt b/samples/snippets/classify_text/resources/texts/google.txt new file mode 100644 index 00000000..06828635 --- /dev/null +++ b/samples/snippets/classify_text/resources/texts/google.txt @@ -0,0 +1 @@ +Google is an American multinational technology company that specializes in Internet-related services and products. These include online advertising technologies, search, cloud computing, software, and hardware. diff --git a/samples/snippets/classify_text/resources/texts/harry_potter.txt b/samples/snippets/classify_text/resources/texts/harry_potter.txt new file mode 100644 index 00000000..339c10af --- /dev/null +++ b/samples/snippets/classify_text/resources/texts/harry_potter.txt @@ -0,0 +1 @@ +Harry Potter is a series of fantasy novels written by British author J. K. Rowling. The novels chronicle the life of a young wizard, Harry Potter, and his friends Hermione Granger and Ron Weasley, all of whom are students at Hogwarts School of Witchcraft and Wizardry. \ No newline at end of file diff --git a/samples/snippets/classify_text/resources/texts/matilda.txt b/samples/snippets/classify_text/resources/texts/matilda.txt new file mode 100644 index 00000000..e1539d7e --- /dev/null +++ b/samples/snippets/classify_text/resources/texts/matilda.txt @@ -0,0 +1 @@ +Matilda is a book by British writer Roald Dahl. Matilda won the Children's Book Award in 1999. It was published in 1988 by Jonathan Cape in London, with 232 pages and illustrations by Quentin Blake. \ No newline at end of file diff --git a/samples/snippets/classify_text/resources/texts/mobile_phone.txt b/samples/snippets/classify_text/resources/texts/mobile_phone.txt new file mode 100644 index 00000000..725e22ef --- /dev/null +++ b/samples/snippets/classify_text/resources/texts/mobile_phone.txt @@ -0,0 +1 @@ +A mobile phone is a portable device that can make and receive calls over a radio frequency link while the user is moving within a telephone service area. The radio frequency link establishes a connection to the switching systems of a mobile phone operator, which provides access to the public switched telephone network (PSTN). \ No newline at end of file diff --git a/samples/snippets/classify_text/resources/texts/mr_fox.txt b/samples/snippets/classify_text/resources/texts/mr_fox.txt new file mode 100644 index 00000000..354feced --- /dev/null +++ b/samples/snippets/classify_text/resources/texts/mr_fox.txt @@ -0,0 +1 @@ +Fantastic Mr Fox is a children's novel written by British author Roald Dahl. It was published in 1970, by George Allen & Unwin in the UK and Alfred A. Knopf in the U.S., with illustrations by Donald Chaffin. \ No newline at end of file diff --git a/samples/snippets/classify_text/resources/texts/wireless.txt b/samples/snippets/classify_text/resources/texts/wireless.txt new file mode 100644 index 00000000..d742331c --- /dev/null +++ b/samples/snippets/classify_text/resources/texts/wireless.txt @@ -0,0 +1 @@ +Wireless communication, or sometimes simply wireless, is the transfer of information or power between two or more points that are not connected by an electrical conductor. The most common wireless technologies use radio waves. \ No newline at end of file From c301ba4e00dece9cd29737eb1e101c6cf9adc8b0 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Thu, 21 Sep 2017 13:40:34 -0700 Subject: [PATCH 110/209] Auto-update dependencies. [(#1133)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1133) * Auto-update dependencies. * Fix missing http library Change-Id: I99faa600f2f3f1f50f57694fc9835d7f35bda250 --- samples/snippets/api/requirements.txt | 4 +++- samples/snippets/movie_nl/requirements.txt | 4 +++- samples/snippets/ocr_nl/requirements.txt | 4 +++- samples/snippets/syntax_triples/requirements.txt | 4 +++- samples/snippets/tutorial/requirements.txt | 4 +++- 5 files changed, 15 insertions(+), 5 deletions(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 28ef8912..af5ec814 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1 +1,3 @@ -google-api-python-client==1.6.3 +google-api-python-client==1.6.4 +google-auth==1.1.1 +google-auth-httplib2==0.0.2 diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt index 69ae9a19..e9b67954 100644 --- a/samples/snippets/movie_nl/requirements.txt +++ b/samples/snippets/movie_nl/requirements.txt @@ -1,2 +1,4 @@ -google-api-python-client==1.6.3 +google-api-python-client==1.6.4 +google-auth==1.1.1 +google-auth-httplib2==0.0.2 requests==2.18.4 diff --git a/samples/snippets/ocr_nl/requirements.txt b/samples/snippets/ocr_nl/requirements.txt index 28ef8912..af5ec814 100644 --- a/samples/snippets/ocr_nl/requirements.txt +++ b/samples/snippets/ocr_nl/requirements.txt @@ -1 +1,3 @@ -google-api-python-client==1.6.3 +google-api-python-client==1.6.4 +google-auth==1.1.1 +google-auth-httplib2==0.0.2 diff --git a/samples/snippets/syntax_triples/requirements.txt b/samples/snippets/syntax_triples/requirements.txt index 28ef8912..af5ec814 100644 --- a/samples/snippets/syntax_triples/requirements.txt +++ b/samples/snippets/syntax_triples/requirements.txt @@ -1 +1,3 @@ -google-api-python-client==1.6.3 +google-api-python-client==1.6.4 +google-auth==1.1.1 +google-auth-httplib2==0.0.2 diff --git a/samples/snippets/tutorial/requirements.txt b/samples/snippets/tutorial/requirements.txt index 28ef8912..af5ec814 100644 --- a/samples/snippets/tutorial/requirements.txt +++ b/samples/snippets/tutorial/requirements.txt @@ -1 +1,3 @@ -google-api-python-client==1.6.3 +google-api-python-client==1.6.4 +google-auth==1.1.1 +google-auth-httplib2==0.0.2 From 83acbcfeb1583126fa3c17bc132168414f36cd2e Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Thu, 12 Oct 2017 10:16:11 -0700 Subject: [PATCH 111/209] Added Link to Python Setup Guide [(#1158)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1158) * Update Readme.rst to add Python setup guide As requested in b/64770713. This sample is linked in documentation https://cloud.google.com/bigtable/docs/scaling, and it would make more sense to update the guide here than in the documentation. * Update README.rst * Update README.rst * Update README.rst * Update README.rst * Update README.rst * Update install_deps.tmpl.rst * Updated readmegen scripts and re-generated related README files * Fixed the lint error --- samples/snippets/api/README.rst | 5 ++++- samples/snippets/cloud-client/v1/README.rst | 5 ++++- samples/snippets/cloud-client/v1beta2/README.rst | 5 ++++- samples/snippets/tutorial/README.rst | 5 ++++- 4 files changed, 16 insertions(+), 4 deletions(-) diff --git a/samples/snippets/api/README.rst b/samples/snippets/api/README.rst index c975769a..e97059a3 100644 --- a/samples/snippets/api/README.rst +++ b/samples/snippets/api/README.rst @@ -27,7 +27,10 @@ credentials for applications. Install Dependencies ++++++++++++++++++++ -#. Install `pip`_ and `virtualenv`_ if you do not already have them. +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup #. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. diff --git a/samples/snippets/cloud-client/v1/README.rst b/samples/snippets/cloud-client/v1/README.rst index 8640369e..cf4c07d2 100644 --- a/samples/snippets/cloud-client/v1/README.rst +++ b/samples/snippets/cloud-client/v1/README.rst @@ -31,7 +31,10 @@ credentials for applications. Install Dependencies ++++++++++++++++++++ -#. Install `pip`_ and `virtualenv`_ if you do not already have them. +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup #. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. diff --git a/samples/snippets/cloud-client/v1beta2/README.rst b/samples/snippets/cloud-client/v1beta2/README.rst index dc3b85c4..f2ec309a 100644 --- a/samples/snippets/cloud-client/v1beta2/README.rst +++ b/samples/snippets/cloud-client/v1beta2/README.rst @@ -31,7 +31,10 @@ credentials for applications. Install Dependencies ++++++++++++++++++++ -#. Install `pip`_ and `virtualenv`_ if you do not already have them. +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup #. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. diff --git a/samples/snippets/tutorial/README.rst b/samples/snippets/tutorial/README.rst index f1ea6ad9..651b219c 100644 --- a/samples/snippets/tutorial/README.rst +++ b/samples/snippets/tutorial/README.rst @@ -27,7 +27,10 @@ credentials for applications. Install Dependencies ++++++++++++++++++++ -#. Install `pip`_ and `virtualenv`_ if you do not already have them. +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup #. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. From 6bed17a47dec11c316328a6ed2e30c67f38de7fd Mon Sep 17 00:00:00 2001 From: DPE bot Date: Mon, 23 Oct 2017 14:23:30 -0700 Subject: [PATCH 112/209] Auto-update dependencies. [(#1138)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1138) --- samples/snippets/classify_text/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/classify_text/requirements.txt b/samples/snippets/classify_text/requirements.txt index 10069f18..05ff98e1 100644 --- a/samples/snippets/classify_text/requirements.txt +++ b/samples/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ google-cloud-language==0.29.0 -numpy==1.13.1 +numpy==1.13.3 From d8f0501900b9fa9907cf1d5b0a25d8cdffd2eb56 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Tue, 24 Oct 2017 11:28:26 -0700 Subject: [PATCH 113/209] Fix classify text tutorial Change-Id: I7d133862f2e9305c978ec6fb4c8168640f3d08ed --- samples/snippets/classify_text/classify_text_tutorial.py | 4 ++-- samples/snippets/classify_text/classify_text_tutorial_test.py | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/samples/snippets/classify_text/classify_text_tutorial.py b/samples/snippets/classify_text/classify_text_tutorial.py index 08a03e98..b5358b65 100644 --- a/samples/snippets/classify_text/classify_text_tutorial.py +++ b/samples/snippets/classify_text/classify_text_tutorial.py @@ -86,11 +86,11 @@ def index(path, index_file): categories = classify(text, verbose=False) result[filename] = categories - except: + except Exception: print('Failed to process {}'.format(file_path)) with io.open(index_file, 'w') as f: - f.write(unicode(json.dumps(result))) + f.write(json.dumps(result).encode('utf-8')) print('Texts indexed in file: {}'.format(index_file)) return result diff --git a/samples/snippets/classify_text/classify_text_tutorial_test.py b/samples/snippets/classify_text/classify_text_tutorial_test.py index 305cf53f..28de0562 100644 --- a/samples/snippets/classify_text/classify_text_tutorial_test.py +++ b/samples/snippets/classify_text/classify_text_tutorial_test.py @@ -13,9 +13,10 @@ import os -import classify_text_tutorial import pytest +import classify_text_tutorial + OUTPUT = 'index.json' RESOURCES = os.path.join(os.path.dirname(__file__), 'resources') From d4043e34a86e46d8b22a34242a32f9fc5a9af368 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Tue, 24 Oct 2017 12:07:18 -0700 Subject: [PATCH 114/209] Fix classify text tutorial Change-Id: Ib86df7cf37588b7a7fc0c7f4ad4fc70548152354 --- samples/snippets/classify_text/classify_text_tutorial.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/samples/snippets/classify_text/classify_text_tutorial.py b/samples/snippets/classify_text/classify_text_tutorial.py index b5358b65..1e1605e1 100644 --- a/samples/snippets/classify_text/classify_text_tutorial.py +++ b/samples/snippets/classify_text/classify_text_tutorial.py @@ -89,8 +89,8 @@ def index(path, index_file): except Exception: print('Failed to process {}'.format(file_path)) - with io.open(index_file, 'w') as f: - f.write(json.dumps(result).encode('utf-8')) + with io.open(index_file, 'w', encoding='utf-8') as f: + f.write(json.dumps(result)) print('Texts indexed in file: {}'.format(index_file)) return result From 91b2eb7c6efa309c2375b08f6cb5f7f56eb98bd0 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Tue, 24 Oct 2017 12:14:35 -0700 Subject: [PATCH 115/209] Fix a few more lint issues Change-Id: I0d420f3053f391fa225e4b8179e45fd1138f5c65 --- samples/snippets/movie_nl/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/movie_nl/main.py b/samples/snippets/movie_nl/main.py index 73e62488..06be1c9c 100644 --- a/samples/snippets/movie_nl/main.py +++ b/samples/snippets/movie_nl/main.py @@ -128,7 +128,7 @@ def get_wiki_title(wiki_url): try: content = requests.get(wiki_url).text return content.split('title')[1].split('-')[0].split('>')[1].strip() - except: + except KeyError: return os.path.basename(wiki_url).replace('_', ' ') From 19192c134a387f910e69e48c2eb2112c5928b85c Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Wed, 1 Nov 2017 12:29:37 -0700 Subject: [PATCH 116/209] Fixed failed tests on Kokoro (Natural Language API) [(#1185)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1185) * Add Snippet for Listing All Subscriptions in a Project * Fixed the failed tests on Kokoro classify_text_tutorial_test.py::test_query_text classify_text_tutorial_test.py::test_query_category --- samples/snippets/classify_text/classify_text_tutorial.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/samples/snippets/classify_text/classify_text_tutorial.py b/samples/snippets/classify_text/classify_text_tutorial.py index 1e1605e1..5d793f50 100644 --- a/samples/snippets/classify_text/classify_text_tutorial.py +++ b/samples/snippets/classify_text/classify_text_tutorial.py @@ -133,8 +133,8 @@ def similarity(categories1, categories2): categories1 = split_labels(categories1) categories2 = split_labels(categories2) - norm1 = numpy.linalg.norm(categories1.values()) - norm2 = numpy.linalg.norm(categories2.values()) + norm1 = numpy.linalg.norm(list(categories1.values())) + norm2 = numpy.linalg.norm(list(categories2.values())) # Return the smallest possible similarity if either categories is empty. if norm1 == 0 or norm2 == 0: From ab1e87c9ae3d1baee82613f398e729af34dc0bbf Mon Sep 17 00:00:00 2001 From: DPE bot Date: Wed, 1 Nov 2017 12:30:10 -0700 Subject: [PATCH 117/209] Auto-update dependencies. [(#1186)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1186) --- samples/snippets/api/requirements.txt | 2 +- samples/snippets/classify_text/requirements.txt | 2 +- samples/snippets/cloud-client/v1/requirements.txt | 2 +- samples/snippets/cloud-client/v1beta2/requirements.txt | 2 +- samples/snippets/movie_nl/requirements.txt | 2 +- samples/snippets/ocr_nl/requirements.txt | 2 +- samples/snippets/sentiment/requirements.txt | 2 +- samples/snippets/syntax_triples/requirements.txt | 2 +- samples/snippets/tutorial/requirements.txt | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index af5ec814..558e42c2 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.4 -google-auth==1.1.1 +google-auth==1.2.0 google-auth-httplib2==0.0.2 diff --git a/samples/snippets/classify_text/requirements.txt b/samples/snippets/classify_text/requirements.txt index 05ff98e1..b6929a06 100644 --- a/samples/snippets/classify_text/requirements.txt +++ b/samples/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-language==0.29.0 +google-cloud-language==0.30.0 numpy==1.13.3 diff --git a/samples/snippets/cloud-client/v1/requirements.txt b/samples/snippets/cloud-client/v1/requirements.txt index afc8ed0a..39d8fe98 100644 --- a/samples/snippets/cloud-client/v1/requirements.txt +++ b/samples/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.29.0 +google-cloud-language==0.30.0 diff --git a/samples/snippets/cloud-client/v1beta2/requirements.txt b/samples/snippets/cloud-client/v1beta2/requirements.txt index afc8ed0a..39d8fe98 100644 --- a/samples/snippets/cloud-client/v1beta2/requirements.txt +++ b/samples/snippets/cloud-client/v1beta2/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.29.0 +google-cloud-language==0.30.0 diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt index e9b67954..46c981d5 100644 --- a/samples/snippets/movie_nl/requirements.txt +++ b/samples/snippets/movie_nl/requirements.txt @@ -1,4 +1,4 @@ google-api-python-client==1.6.4 -google-auth==1.1.1 +google-auth==1.2.0 google-auth-httplib2==0.0.2 requests==2.18.4 diff --git a/samples/snippets/ocr_nl/requirements.txt b/samples/snippets/ocr_nl/requirements.txt index af5ec814..558e42c2 100644 --- a/samples/snippets/ocr_nl/requirements.txt +++ b/samples/snippets/ocr_nl/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.4 -google-auth==1.1.1 +google-auth==1.2.0 google-auth-httplib2==0.0.2 diff --git a/samples/snippets/sentiment/requirements.txt b/samples/snippets/sentiment/requirements.txt index afc8ed0a..39d8fe98 100644 --- a/samples/snippets/sentiment/requirements.txt +++ b/samples/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.29.0 +google-cloud-language==0.30.0 diff --git a/samples/snippets/syntax_triples/requirements.txt b/samples/snippets/syntax_triples/requirements.txt index af5ec814..558e42c2 100644 --- a/samples/snippets/syntax_triples/requirements.txt +++ b/samples/snippets/syntax_triples/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.4 -google-auth==1.1.1 +google-auth==1.2.0 google-auth-httplib2==0.0.2 diff --git a/samples/snippets/tutorial/requirements.txt b/samples/snippets/tutorial/requirements.txt index af5ec814..558e42c2 100644 --- a/samples/snippets/tutorial/requirements.txt +++ b/samples/snippets/tutorial/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.4 -google-auth==1.1.1 +google-auth==1.2.0 google-auth-httplib2==0.0.2 From 3bd32939c63c3c3cd4ed59ef8a21a166007ec251 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Mon, 6 Nov 2017 10:44:14 -0800 Subject: [PATCH 118/209] Auto-update dependencies. [(#1199)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1199) * Auto-update dependencies. * Fix iot lint Change-Id: I6289e093bdb35e38f9e9bfc3fbc3df3660f9a67e --- samples/snippets/classify_text/requirements.txt | 2 +- samples/snippets/cloud-client/v1/requirements.txt | 2 +- samples/snippets/cloud-client/v1beta2/requirements.txt | 2 +- samples/snippets/sentiment/requirements.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/samples/snippets/classify_text/requirements.txt b/samples/snippets/classify_text/requirements.txt index b6929a06..a6cd6d6c 100644 --- a/samples/snippets/classify_text/requirements.txt +++ b/samples/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-language==0.30.0 +google-cloud-language==0.31.0 numpy==1.13.3 diff --git a/samples/snippets/cloud-client/v1/requirements.txt b/samples/snippets/cloud-client/v1/requirements.txt index 39d8fe98..5a82efc9 100644 --- a/samples/snippets/cloud-client/v1/requirements.txt +++ b/samples/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.30.0 +google-cloud-language==0.31.0 diff --git a/samples/snippets/cloud-client/v1beta2/requirements.txt b/samples/snippets/cloud-client/v1beta2/requirements.txt index 39d8fe98..5a82efc9 100644 --- a/samples/snippets/cloud-client/v1beta2/requirements.txt +++ b/samples/snippets/cloud-client/v1beta2/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.30.0 +google-cloud-language==0.31.0 diff --git a/samples/snippets/sentiment/requirements.txt b/samples/snippets/sentiment/requirements.txt index 39d8fe98..5a82efc9 100644 --- a/samples/snippets/sentiment/requirements.txt +++ b/samples/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.30.0 +google-cloud-language==0.31.0 From 5a9d437545eaeb9f58ec8bd8d2e3560cca01db64 Mon Sep 17 00:00:00 2001 From: Andrew Gorcester Date: Thu, 9 Nov 2017 09:28:41 -0800 Subject: [PATCH 119/209] Fix linter issue w/ snippets --- samples/snippets/cloud-client/v1/snippets.py | 1 - 1 file changed, 1 deletion(-) diff --git a/samples/snippets/cloud-client/v1/snippets.py b/samples/snippets/cloud-client/v1/snippets.py index e13fc7dd..2b754ace 100644 --- a/samples/snippets/cloud-client/v1/snippets.py +++ b/samples/snippets/cloud-client/v1/snippets.py @@ -27,7 +27,6 @@ from google.cloud import language from google.cloud.language import enums from google.cloud.language import types - import six From 37f52974483a1a6e383aba9c2954f19988ddbbda Mon Sep 17 00:00:00 2001 From: DPE bot Date: Thu, 9 Nov 2017 14:45:13 -0800 Subject: [PATCH 120/209] Auto-update dependencies. [(#1208)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1208) --- samples/snippets/classify_text/requirements.txt | 2 +- samples/snippets/cloud-client/v1/requirements.txt | 2 +- samples/snippets/cloud-client/v1beta2/requirements.txt | 2 +- samples/snippets/sentiment/requirements.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/samples/snippets/classify_text/requirements.txt b/samples/snippets/classify_text/requirements.txt index a6cd6d6c..701a5342 100644 --- a/samples/snippets/classify_text/requirements.txt +++ b/samples/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-language==0.31.0 +google-cloud-language==1.0.0 numpy==1.13.3 diff --git a/samples/snippets/cloud-client/v1/requirements.txt b/samples/snippets/cloud-client/v1/requirements.txt index 5a82efc9..b5848a34 100644 --- a/samples/snippets/cloud-client/v1/requirements.txt +++ b/samples/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.31.0 +google-cloud-language==1.0.0 diff --git a/samples/snippets/cloud-client/v1beta2/requirements.txt b/samples/snippets/cloud-client/v1beta2/requirements.txt index 5a82efc9..b5848a34 100644 --- a/samples/snippets/cloud-client/v1beta2/requirements.txt +++ b/samples/snippets/cloud-client/v1beta2/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.31.0 +google-cloud-language==1.0.0 diff --git a/samples/snippets/sentiment/requirements.txt b/samples/snippets/sentiment/requirements.txt index 5a82efc9..b5848a34 100644 --- a/samples/snippets/sentiment/requirements.txt +++ b/samples/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==0.31.0 +google-cloud-language==1.0.0 From e6a519660bb60c75e2235bcdf6ceee9bd7830533 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Mon, 13 Nov 2017 09:21:46 -0800 Subject: [PATCH 121/209] Language v1 [(#1202)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1202) * copy classify_test samples and tests to v1 * flake * client library version --- samples/snippets/cloud-client/v1/snippets.py | 53 +++++++++++++++++++ .../snippets/cloud-client/v1/snippets_test.py | 18 +++++++ 2 files changed, 71 insertions(+) diff --git a/samples/snippets/cloud-client/v1/snippets.py b/samples/snippets/cloud-client/v1/snippets.py index 2b754ace..30b591a4 100644 --- a/samples/snippets/cloud-client/v1/snippets.py +++ b/samples/snippets/cloud-client/v1/snippets.py @@ -253,12 +253,61 @@ def entity_sentiment_file(gcs_uri): print(u'Sentiment: {}\n'.format(entity.sentiment)) +# [START def_classify_text] +def classify_text(text): + """Classifies content categories of the provided text.""" + client = language.LanguageServiceClient() + + if isinstance(text, six.binary_type): + text = text.decode('utf-8') + + document = types.Document( + content=text.encode('utf-8'), + type=enums.Document.Type.PLAIN_TEXT) + + categories = client.classify_text(document).categories + + for category in categories: + print(u'=' * 20) + print(u'{:<16}: {}'.format('name', category.name)) + print(u'{:<16}: {}'.format('confidence', category.confidence)) +# [END def_classify_text] + + +# [START def_classify_file] +def classify_file(gcs_uri): + """Classifies content categories of the text in a Google Cloud Storage + file. + """ + client = language.LanguageServiceClient() + + document = types.Document( + gcs_content_uri=gcs_uri, + type=enums.Document.Type.PLAIN_TEXT) + + categories = client.classify_text(document).categories + + for category in categories: + print(u'=' * 20) + print(u'{:<16}: {}'.format('name', category.name)) + print(u'{:<16}: {}'.format('confidence', category.confidence)) +# [END def_classify_file] + + if __name__ == '__main__': parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) subparsers = parser.add_subparsers(dest='command') + classify_text_parser = subparsers.add_parser( + 'classify-text', help=classify_text.__doc__) + classify_text_parser.add_argument('text') + + classify_text_parser = subparsers.add_parser( + 'classify-file', help=classify_file.__doc__) + classify_text_parser.add_argument('gcs_uri') + sentiment_entities_text_parser = subparsers.add_parser( 'sentiment-entities-text', help=entity_sentiment_text.__doc__) sentiment_entities_text_parser.add_argument('text') @@ -309,3 +358,7 @@ def entity_sentiment_file(gcs_uri): entity_sentiment_text(args.text) elif args.command == 'sentiment-entities-file': entity_sentiment_file(args.gcs_uri) + elif args.command == 'classify-text': + classify_text(args.text) + elif args.command == 'classify-file': + classify_file(args.gcs_uri) diff --git a/samples/snippets/cloud-client/v1/snippets_test.py b/samples/snippets/cloud-client/v1/snippets_test.py index 168701dc..27fbee24 100644 --- a/samples/snippets/cloud-client/v1/snippets_test.py +++ b/samples/snippets/cloud-client/v1/snippets_test.py @@ -19,6 +19,7 @@ BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] TEST_FILE_URL = 'gs://{}/text.txt'.format(BUCKET) +LONG_TEST_FILE_URL = 'gs://{}/android_text.txt'.format(BUCKET) def test_sentiment_text(capsys): @@ -77,3 +78,20 @@ def test_sentiment_entities_utf(capsys): 'foo→bar') out, _ = capsys.readouterr() assert 'Begin Offset : 4' in out + + +def test_classify_text(capsys): + snippets.classify_text( + 'Android is a mobile operating system developed by Google, ' + 'based on the Linux kernel and designed primarily for touchscreen ' + 'mobile devices such as smartphones and tablets.') + out, _ = capsys.readouterr() + assert 'name' in out + assert '/Computers & Electronics' in out + + +def test_classify_file(capsys): + snippets.classify_file(LONG_TEST_FILE_URL) + out, _ = capsys.readouterr() + assert 'name' in out + assert '/Computers & Electronics' in out From 03897e3be91b7e382470a1f2f2a8b22755a3d68c Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Tue, 14 Nov 2017 09:16:36 -0800 Subject: [PATCH 122/209] update to use v1 client [(#1216)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1216) * update to use v1 client * set ensure_ascii=False --- .../classify_text/classify_text_tutorial.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/samples/snippets/classify_text/classify_text_tutorial.py b/samples/snippets/classify_text/classify_text_tutorial.py index 5d793f50..1ac9e0ac 100644 --- a/samples/snippets/classify_text/classify_text_tutorial.py +++ b/samples/snippets/classify_text/classify_text_tutorial.py @@ -27,10 +27,7 @@ import json import os -from google.cloud import language_v1beta2 -from google.cloud.language_v1beta2 import enums -from google.cloud.language_v1beta2 import types - +from google.cloud import language import numpy import six # [END classify_text_tutorial_import] @@ -40,11 +37,11 @@ def classify(text, verbose=True): """Classify the input text into categories. """ - language_client = language_v1beta2.LanguageServiceClient() + language_client = language.LanguageServiceClient() - document = types.Document( + document = language.types.Document( content=text, - type=enums.Document.Type.PLAIN_TEXT) + type=language.enums.Document.Type.PLAIN_TEXT) response = language_client.classify_text(document) categories = response.categories @@ -90,7 +87,7 @@ def index(path, index_file): print('Failed to process {}'.format(file_path)) with io.open(index_file, 'w', encoding='utf-8') as f: - f.write(json.dumps(result)) + f.write(json.dumps(result, ensure_ascii=False)) print('Texts indexed in file: {}'.format(index_file)) return result From ede2290093dc92a904393cfbfa978f83d64a77ff Mon Sep 17 00:00:00 2001 From: DPE bot Date: Wed, 15 Nov 2017 12:18:33 -0800 Subject: [PATCH 123/209] Auto-update dependencies. [(#1217)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1217) --- samples/snippets/api/requirements.txt | 4 ++-- samples/snippets/movie_nl/requirements.txt | 4 ++-- samples/snippets/ocr_nl/requirements.txt | 4 ++-- samples/snippets/syntax_triples/requirements.txt | 4 ++-- samples/snippets/tutorial/requirements.txt | 4 ++-- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 558e42c2..edd6472f 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.4 -google-auth==1.2.0 -google-auth-httplib2==0.0.2 +google-auth==1.2.1 +google-auth-httplib2==0.0.3 diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt index 46c981d5..02496079 100644 --- a/samples/snippets/movie_nl/requirements.txt +++ b/samples/snippets/movie_nl/requirements.txt @@ -1,4 +1,4 @@ google-api-python-client==1.6.4 -google-auth==1.2.0 -google-auth-httplib2==0.0.2 +google-auth==1.2.1 +google-auth-httplib2==0.0.3 requests==2.18.4 diff --git a/samples/snippets/ocr_nl/requirements.txt b/samples/snippets/ocr_nl/requirements.txt index 558e42c2..edd6472f 100644 --- a/samples/snippets/ocr_nl/requirements.txt +++ b/samples/snippets/ocr_nl/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.4 -google-auth==1.2.0 -google-auth-httplib2==0.0.2 +google-auth==1.2.1 +google-auth-httplib2==0.0.3 diff --git a/samples/snippets/syntax_triples/requirements.txt b/samples/snippets/syntax_triples/requirements.txt index 558e42c2..edd6472f 100644 --- a/samples/snippets/syntax_triples/requirements.txt +++ b/samples/snippets/syntax_triples/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.4 -google-auth==1.2.0 -google-auth-httplib2==0.0.2 +google-auth==1.2.1 +google-auth-httplib2==0.0.3 diff --git a/samples/snippets/tutorial/requirements.txt b/samples/snippets/tutorial/requirements.txt index 558e42c2..edd6472f 100644 --- a/samples/snippets/tutorial/requirements.txt +++ b/samples/snippets/tutorial/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.4 -google-auth==1.2.0 -google-auth-httplib2==0.0.2 +google-auth==1.2.1 +google-auth-httplib2==0.0.3 From 1dfdbed5bd3d2d39f02c62d52bd331bb98bde4da Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Wed, 6 Dec 2017 14:27:58 -0800 Subject: [PATCH 124/209] Relax regex in ocr_nl sample test [(#1250)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1250) --- samples/snippets/ocr_nl/main_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/ocr_nl/main_test.py b/samples/snippets/ocr_nl/main_test.py index e4bf2b51..5a8f72f2 100755 --- a/samples/snippets/ocr_nl/main_test.py +++ b/samples/snippets/ocr_nl/main_test.py @@ -97,4 +97,4 @@ def test_main(tmpdir, capsys): stdout, _ = capsys.readouterr() - assert re.search(r'google was found with count', stdout) + assert re.search(r'.* found with count', stdout) From a944ec44f42aa83277d402759728bd8c51f5f57e Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Thu, 7 Dec 2017 10:34:29 -0800 Subject: [PATCH 125/209] Added "Open in Cloud Shell" buttons to README files [(#1254)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1254) --- samples/snippets/README.md | 5 ++ samples/snippets/api/README.rst | 15 ++++- samples/snippets/api/README.rst.in | 2 + samples/snippets/classify_text/README.rst | 58 +++++++++---------- samples/snippets/classify_text/README.rst.in | 2 + samples/snippets/cloud-client/v1/README.rst | 28 +++++++-- .../snippets/cloud-client/v1/README.rst.in | 2 + .../snippets/cloud-client/v1beta2/README.rst | 21 +++++-- .../cloud-client/v1beta2/README.rst.in | 2 + samples/snippets/movie_nl/README.md | 5 ++ samples/snippets/ocr_nl/README.md | 5 ++ samples/snippets/sentiment/README.md | 5 ++ samples/snippets/syntax_triples/README.md | 5 ++ samples/snippets/tutorial/README.rst | 13 ++++- samples/snippets/tutorial/README.rst.in | 2 + 15 files changed, 123 insertions(+), 47 deletions(-) diff --git a/samples/snippets/README.md b/samples/snippets/README.md index 1e4a6401..d0ba5691 100644 --- a/samples/snippets/README.md +++ b/samples/snippets/README.md @@ -1,5 +1,10 @@ # Google Cloud Natural Language API examples +[![Open in Cloud Shell][shell_img]][shell_link] + +[shell_img]: http://gstatic.com/cloudssh/images/open-btn.png +[shell_link]: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/README.md + This directory contains Python examples that use the [Google Cloud Natural Language API](https://cloud.google.com/natural-language/). diff --git a/samples/snippets/api/README.rst b/samples/snippets/api/README.rst index e97059a3..7434de16 100644 --- a/samples/snippets/api/README.rst +++ b/samples/snippets/api/README.rst @@ -3,6 +3,10 @@ Google Cloud Natural Language API Python Samples =============================================================================== +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/api/README.rst + + This directory contains samples for Google Cloud Natural Language API. The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers, including sentiment analysis, entity recognition, and syntax analysis. This API is part of the larger Cloud Machine Learning API. @@ -54,6 +58,10 @@ Samples Analyze syntax +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/api/analyze.py;language/api/README.rst + + To run this sample: @@ -63,17 +71,18 @@ To run this sample: $ python analyze.py usage: analyze.py [-h] {entities,sentiment,syntax} text - + Analyzes text using the Google Cloud Natural Language API. - + positional arguments: {entities,sentiment,syntax} text - + optional arguments: -h, --help show this help message and exit + .. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/samples/snippets/api/README.rst.in b/samples/snippets/api/README.rst.in index 31294fae..f3195edf 100644 --- a/samples/snippets/api/README.rst.in +++ b/samples/snippets/api/README.rst.in @@ -18,3 +18,5 @@ samples: - name: Analyze syntax file: analyze.py show_help: true + +folder: language/api \ No newline at end of file diff --git a/samples/snippets/classify_text/README.rst b/samples/snippets/classify_text/README.rst index 0a61591b..2857e031 100644 --- a/samples/snippets/classify_text/README.rst +++ b/samples/snippets/classify_text/README.rst @@ -3,6 +3,10 @@ Google Cloud Natural Language API Python Samples =============================================================================== +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/classify_text/README.rst + + This directory contains samples for Google Cloud Natural Language API. The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers. This tutorial demostrates how to use the `classify_text` method to classify content category of text files, and use the result to compare texts by their similarity to each other. See the `tutorial page`_ for details about this sample. @@ -21,39 +25,20 @@ Setup Authentication ++++++++++++++ -Authentication is typically done through `Application Default Credentials`_, -which means you do not have to change the code to authenticate as long as -your environment has credentials. You have a few options for setting up -authentication: - -#. When running locally, use the `Google Cloud SDK`_ - - .. code-block:: bash - - gcloud auth application-default login - - -#. When running on App Engine or Compute Engine, credentials are already - set-up. However, you may need to configure your Compute Engine instance - with `additional scopes`_. - -#. You can create a `Service Account key file`_. This file can be used to - authenticate to Google Cloud Platform services from any environment. To use - the file, set the ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable to - the path to the key file, for example: - - .. code-block:: bash - - export GOOGLE_APPLICATION_CREDENTIALS=/path/to/service_account.json +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. -.. _Application Default Credentials: https://cloud.google.com/docs/authentication#getting_credentials_for_server-centric_flow -.. _additional scopes: https://cloud.google.com/compute/docs/authentication#using -.. _Service Account key file: https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started Install Dependencies ++++++++++++++++++++ -#. Install `pip`_ and `virtualenv`_ if you do not already have them. +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup #. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. @@ -77,6 +62,10 @@ Samples Classify Text Tutorial +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/classify_text/classify_text_tutorial.py;language/classify_text/README.rst + + To run this sample: @@ -87,9 +76,13 @@ To run this sample: usage: classify_text_tutorial.py [-h] {classify,index,query,query-category} ... - - Using the classify_text method to cluster texts. - + + Using the classify_text method to find content categories of text files, + Then use the content category labels to compare text similarity. + + For more information, see the tutorial page at + https://cloud.google.com/natural-language/docs/classify-text-tutorial. + positional arguments: {classify,index,query,query-category} classify Classify the input text into categories. @@ -101,13 +94,14 @@ To run this sample: the query label. The list of all available labels: https://cloud.google.com/natural- language/docs/categories - + optional arguments: -h, --help show this help message and exit + The client library ------------------------------------------------------------------------------- diff --git a/samples/snippets/classify_text/README.rst.in b/samples/snippets/classify_text/README.rst.in index 42e8f061..14ee6dc9 100644 --- a/samples/snippets/classify_text/README.rst.in +++ b/samples/snippets/classify_text/README.rst.in @@ -24,3 +24,5 @@ samples: show_help: true cloud_client_library: true + +folder: language/classify_text \ No newline at end of file diff --git a/samples/snippets/cloud-client/v1/README.rst b/samples/snippets/cloud-client/v1/README.rst index cf4c07d2..2e93e9af 100644 --- a/samples/snippets/cloud-client/v1/README.rst +++ b/samples/snippets/cloud-client/v1/README.rst @@ -3,6 +3,10 @@ Google Cloud Natural Language API Python Samples =============================================================================== +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1/README.rst + + This directory contains samples for Google Cloud Natural Language API. The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers, including sentiment analysis, entity recognition, and syntax analysis. This API is part of the larger Cloud Machine Learning API. - See the `migration guide`_ for information about migrating to Python client library v0.26.1. @@ -58,6 +62,10 @@ Samples Quickstart +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1/quickstart.py;language/cloud-client/v1/README.rst + + To run this sample: @@ -70,6 +78,10 @@ To run this sample: Snippets +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1/snippets.py;language/cloud-client/v1/README.rst + + To run this sample: @@ -79,17 +91,20 @@ To run this sample: $ python snippets.py usage: snippets.py [-h] - {sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} + {classify-text,classify-file,sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} ... - + This application demonstrates how to perform basic operations with the Google Cloud Natural Language API - + For more information, the documentation at https://cloud.google.com/natural-language/docs. - + positional arguments: - {sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} + {classify-text,classify-file,sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} + classify-text Classifies content categories of the provided text. + classify-file Classifies content categories of the text in a Google + Cloud Storage file. sentiment-entities-text Detects entity sentiment in the provided text. sentiment-entities-file @@ -104,13 +119,14 @@ To run this sample: syntax-text Detects syntax in the text. syntax-file Detects syntax in the file located in Google Cloud Storage. - + optional arguments: -h, --help show this help message and exit + The client library ------------------------------------------------------------------------------- diff --git a/samples/snippets/cloud-client/v1/README.rst.in b/samples/snippets/cloud-client/v1/README.rst.in index 1b4855fb..06b7ff3e 100644 --- a/samples/snippets/cloud-client/v1/README.rst.in +++ b/samples/snippets/cloud-client/v1/README.rst.in @@ -28,3 +28,5 @@ samples: show_help: true cloud_client_library: true + +folder: language/cloud-client/v1 \ No newline at end of file diff --git a/samples/snippets/cloud-client/v1beta2/README.rst b/samples/snippets/cloud-client/v1beta2/README.rst index f2ec309a..aa4ce452 100644 --- a/samples/snippets/cloud-client/v1beta2/README.rst +++ b/samples/snippets/cloud-client/v1beta2/README.rst @@ -3,6 +3,10 @@ Google Cloud Natural Language API Python Samples =============================================================================== +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1beta2/README.rst + + This directory contains samples for Google Cloud Natural Language API. The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers, including sentiment analysis, entity recognition, and syntax analysis. This API is part of the larger Cloud Machine Learning API. - See the `migration guide`_ for information about migrating to Python client library v0.26.1. @@ -58,6 +62,10 @@ Samples Quickstart +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1beta2/quickstart.py;language/cloud-client/v1beta2/README.rst + + To run this sample: @@ -70,6 +78,10 @@ To run this sample: Snippets +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1beta2/snippets.py;language/cloud-client/v1beta2/README.rst + + To run this sample: @@ -81,13 +93,13 @@ To run this sample: usage: snippets.py [-h] {classify-text,classify-file,sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} ... - + This application demonstrates how to perform basic operations with the Google Cloud Natural Language API - + For more information, the documentation at https://cloud.google.com/natural-language/docs. - + positional arguments: {classify-text,classify-file,sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} classify-text Classifies content categories of the provided text. @@ -107,13 +119,14 @@ To run this sample: syntax-text Detects syntax in the text. syntax-file Detects syntax in the file located in Google Cloud Storage. - + optional arguments: -h, --help show this help message and exit + The client library ------------------------------------------------------------------------------- diff --git a/samples/snippets/cloud-client/v1beta2/README.rst.in b/samples/snippets/cloud-client/v1beta2/README.rst.in index 1b4855fb..d1166745 100644 --- a/samples/snippets/cloud-client/v1beta2/README.rst.in +++ b/samples/snippets/cloud-client/v1beta2/README.rst.in @@ -28,3 +28,5 @@ samples: show_help: true cloud_client_library: true + +folder: language/cloud-client/v1beta2 \ No newline at end of file diff --git a/samples/snippets/movie_nl/README.md b/samples/snippets/movie_nl/README.md index 687a6c40..95c05dbb 100644 --- a/samples/snippets/movie_nl/README.md +++ b/samples/snippets/movie_nl/README.md @@ -1,4 +1,9 @@ # Introduction + +[![Open in Cloud Shell][shell_img]][shell_link] + +[shell_img]: http://gstatic.com/cloudssh/images/open-btn.png +[shell_link]: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/movie_nl/README.md This sample is an application of the Google Cloud Platform Natural Language API. It uses the [imdb movie reviews data set](https://www.cs.cornell.edu/people/pabo/movie-review-data/) from [Cornell University](http://www.cs.cornell.edu/) and performs sentiment & entity diff --git a/samples/snippets/ocr_nl/README.md b/samples/snippets/ocr_nl/README.md index 189e9397..a34ff317 100644 --- a/samples/snippets/ocr_nl/README.md +++ b/samples/snippets/ocr_nl/README.md @@ -1,4 +1,9 @@ + +[![Open in Cloud Shell][shell_img]][shell_link] + +[shell_img]: http://gstatic.com/cloudssh/images/open-btn.png +[shell_link]: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/ocr_nl/README.md # Using the Cloud Natural Language API to analyze image text found with Cloud Vision This example uses the [Cloud Vision API](https://cloud.google.com/vision/) to diff --git a/samples/snippets/sentiment/README.md b/samples/snippets/sentiment/README.md index 95562993..313817ef 100644 --- a/samples/snippets/sentiment/README.md +++ b/samples/snippets/sentiment/README.md @@ -1,5 +1,10 @@ # Introduction +[![Open in Cloud Shell][shell_img]][shell_link] + +[shell_img]: http://gstatic.com/cloudssh/images/open-btn.png +[shell_link]: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/sentiment/README.md + This sample contains the code referenced in the [Sentiment Analysis Tutorial](http://cloud.google.com/natural-language/docs/sentiment-tutorial) within the Google Cloud Natural Language API Documentation. A full walkthrough of this sample diff --git a/samples/snippets/syntax_triples/README.md b/samples/snippets/syntax_triples/README.md index 1342ee65..551057e7 100644 --- a/samples/snippets/syntax_triples/README.md +++ b/samples/snippets/syntax_triples/README.md @@ -1,5 +1,10 @@ # Using the Cloud Natural Language API to find subject-verb-object triples in text +[![Open in Cloud Shell][shell_img]][shell_link] + +[shell_img]: http://gstatic.com/cloudssh/images/open-btn.png +[shell_link]: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/syntax_triples/README.md + This example finds subject-verb-object triples in a given piece of text using syntax analysis capabilities of [Cloud Natural Language API](https://cloud.google.com/natural-language/). diff --git a/samples/snippets/tutorial/README.rst b/samples/snippets/tutorial/README.rst index 651b219c..202381a6 100644 --- a/samples/snippets/tutorial/README.rst +++ b/samples/snippets/tutorial/README.rst @@ -3,6 +3,10 @@ Google Cloud Natural Language Tutorial Python Samples =============================================================================== +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/tutorial/README.rst + + This directory contains samples for Google Cloud Natural Language Tutorial. The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers, including sentiment analysis, entity recognition, and syntax analysis. This API is part of the larger Cloud Machine Learning API. @@ -54,6 +58,10 @@ Samples Language tutorial +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/tutorial/tutorial.py;language/tutorial/README.rst + + To run this sample: @@ -63,16 +71,17 @@ To run this sample: $ python tutorial.py usage: tutorial.py [-h] movie_review_filename - + positional arguments: movie_review_filename The filename of the movie review you'd like to analyze. - + optional arguments: -h, --help show this help message and exit + .. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/samples/snippets/tutorial/README.rst.in b/samples/snippets/tutorial/README.rst.in index aea593b2..945c701e 100644 --- a/samples/snippets/tutorial/README.rst.in +++ b/samples/snippets/tutorial/README.rst.in @@ -18,3 +18,5 @@ samples: - name: Language tutorial file: tutorial.py show_help: true + +folder: language/tutorial \ No newline at end of file From 0c35b226dd295384144db7410bc90051ea3a7548 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Mon, 8 Jan 2018 08:45:17 -0800 Subject: [PATCH 126/209] Auto-update dependencies. [(#1304)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1304) --- samples/snippets/classify_text/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/classify_text/requirements.txt b/samples/snippets/classify_text/requirements.txt index 701a5342..76c6d732 100644 --- a/samples/snippets/classify_text/requirements.txt +++ b/samples/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ google-cloud-language==1.0.0 -numpy==1.13.3 +numpy==1.14.0 From 5217007d0f0ae49485676e12dae79b91ffa04d40 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Wed, 10 Jan 2018 09:07:00 -0800 Subject: [PATCH 127/209] Auto-update dependencies. [(#1309)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1309) --- samples/snippets/api/requirements.txt | 2 +- samples/snippets/movie_nl/requirements.txt | 2 +- samples/snippets/ocr_nl/requirements.txt | 2 +- samples/snippets/syntax_triples/requirements.txt | 2 +- samples/snippets/tutorial/requirements.txt | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index edd6472f..8bb83f80 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.4 -google-auth==1.2.1 +google-auth==1.3.0 google-auth-httplib2==0.0.3 diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt index 02496079..82804723 100644 --- a/samples/snippets/movie_nl/requirements.txt +++ b/samples/snippets/movie_nl/requirements.txt @@ -1,4 +1,4 @@ google-api-python-client==1.6.4 -google-auth==1.2.1 +google-auth==1.3.0 google-auth-httplib2==0.0.3 requests==2.18.4 diff --git a/samples/snippets/ocr_nl/requirements.txt b/samples/snippets/ocr_nl/requirements.txt index edd6472f..8bb83f80 100644 --- a/samples/snippets/ocr_nl/requirements.txt +++ b/samples/snippets/ocr_nl/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.4 -google-auth==1.2.1 +google-auth==1.3.0 google-auth-httplib2==0.0.3 diff --git a/samples/snippets/syntax_triples/requirements.txt b/samples/snippets/syntax_triples/requirements.txt index edd6472f..8bb83f80 100644 --- a/samples/snippets/syntax_triples/requirements.txt +++ b/samples/snippets/syntax_triples/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.4 -google-auth==1.2.1 +google-auth==1.3.0 google-auth-httplib2==0.0.3 diff --git a/samples/snippets/tutorial/requirements.txt b/samples/snippets/tutorial/requirements.txt index edd6472f..8bb83f80 100644 --- a/samples/snippets/tutorial/requirements.txt +++ b/samples/snippets/tutorial/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.4 -google-auth==1.2.1 +google-auth==1.3.0 google-auth-httplib2==0.0.3 From 47876305447d57ea8ee50373c89335c079f67564 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Thu, 1 Feb 2018 22:20:35 -0800 Subject: [PATCH 128/209] Auto-update dependencies. [(#1320)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1320) --- samples/snippets/api/requirements.txt | 2 +- samples/snippets/movie_nl/requirements.txt | 2 +- samples/snippets/ocr_nl/requirements.txt | 2 +- samples/snippets/syntax_triples/requirements.txt | 2 +- samples/snippets/tutorial/requirements.txt | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 8bb83f80..4bafec30 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.6.4 +google-api-python-client==1.6.5 google-auth==1.3.0 google-auth-httplib2==0.0.3 diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt index 82804723..60f1bb0a 100644 --- a/samples/snippets/movie_nl/requirements.txt +++ b/samples/snippets/movie_nl/requirements.txt @@ -1,4 +1,4 @@ -google-api-python-client==1.6.4 +google-api-python-client==1.6.5 google-auth==1.3.0 google-auth-httplib2==0.0.3 requests==2.18.4 diff --git a/samples/snippets/ocr_nl/requirements.txt b/samples/snippets/ocr_nl/requirements.txt index 8bb83f80..4bafec30 100644 --- a/samples/snippets/ocr_nl/requirements.txt +++ b/samples/snippets/ocr_nl/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.6.4 +google-api-python-client==1.6.5 google-auth==1.3.0 google-auth-httplib2==0.0.3 diff --git a/samples/snippets/syntax_triples/requirements.txt b/samples/snippets/syntax_triples/requirements.txt index 8bb83f80..4bafec30 100644 --- a/samples/snippets/syntax_triples/requirements.txt +++ b/samples/snippets/syntax_triples/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.6.4 +google-api-python-client==1.6.5 google-auth==1.3.0 google-auth-httplib2==0.0.3 diff --git a/samples/snippets/tutorial/requirements.txt b/samples/snippets/tutorial/requirements.txt index 8bb83f80..4bafec30 100644 --- a/samples/snippets/tutorial/requirements.txt +++ b/samples/snippets/tutorial/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.6.4 +google-api-python-client==1.6.5 google-auth==1.3.0 google-auth-httplib2==0.0.3 From 7b7cbb459aba978b3264de0a77720a4b7b590e3f Mon Sep 17 00:00:00 2001 From: DPE bot Date: Fri, 9 Feb 2018 10:46:48 -0800 Subject: [PATCH 129/209] Auto-update dependencies. [(#1355)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1355) --- samples/snippets/api/requirements.txt | 2 +- samples/snippets/movie_nl/requirements.txt | 2 +- samples/snippets/ocr_nl/requirements.txt | 2 +- samples/snippets/syntax_triples/requirements.txt | 2 +- samples/snippets/tutorial/requirements.txt | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 4bafec30..35784043 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.5 -google-auth==1.3.0 +google-auth==1.4.0 google-auth-httplib2==0.0.3 diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt index 60f1bb0a..07059ddc 100644 --- a/samples/snippets/movie_nl/requirements.txt +++ b/samples/snippets/movie_nl/requirements.txt @@ -1,4 +1,4 @@ google-api-python-client==1.6.5 -google-auth==1.3.0 +google-auth==1.4.0 google-auth-httplib2==0.0.3 requests==2.18.4 diff --git a/samples/snippets/ocr_nl/requirements.txt b/samples/snippets/ocr_nl/requirements.txt index 4bafec30..35784043 100644 --- a/samples/snippets/ocr_nl/requirements.txt +++ b/samples/snippets/ocr_nl/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.5 -google-auth==1.3.0 +google-auth==1.4.0 google-auth-httplib2==0.0.3 diff --git a/samples/snippets/syntax_triples/requirements.txt b/samples/snippets/syntax_triples/requirements.txt index 4bafec30..35784043 100644 --- a/samples/snippets/syntax_triples/requirements.txt +++ b/samples/snippets/syntax_triples/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.5 -google-auth==1.3.0 +google-auth==1.4.0 google-auth-httplib2==0.0.3 diff --git a/samples/snippets/tutorial/requirements.txt b/samples/snippets/tutorial/requirements.txt index 4bafec30..35784043 100644 --- a/samples/snippets/tutorial/requirements.txt +++ b/samples/snippets/tutorial/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.5 -google-auth==1.3.0 +google-auth==1.4.0 google-auth-httplib2==0.0.3 From f51d1206c566a6d0003d139c2714670a8d5ca4df Mon Sep 17 00:00:00 2001 From: DPE bot Date: Mon, 26 Feb 2018 09:03:37 -0800 Subject: [PATCH 130/209] Auto-update dependencies. [(#1359)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1359) --- samples/snippets/api/requirements.txt | 2 +- samples/snippets/classify_text/requirements.txt | 2 +- samples/snippets/movie_nl/requirements.txt | 2 +- samples/snippets/ocr_nl/requirements.txt | 2 +- samples/snippets/syntax_triples/requirements.txt | 2 +- samples/snippets/tutorial/requirements.txt | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 35784043..500e732f 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.5 -google-auth==1.4.0 +google-auth==1.4.1 google-auth-httplib2==0.0.3 diff --git a/samples/snippets/classify_text/requirements.txt b/samples/snippets/classify_text/requirements.txt index 76c6d732..da0faa99 100644 --- a/samples/snippets/classify_text/requirements.txt +++ b/samples/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ google-cloud-language==1.0.0 -numpy==1.14.0 +numpy==1.14.1 diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt index 07059ddc..06bc7afa 100644 --- a/samples/snippets/movie_nl/requirements.txt +++ b/samples/snippets/movie_nl/requirements.txt @@ -1,4 +1,4 @@ google-api-python-client==1.6.5 -google-auth==1.4.0 +google-auth==1.4.1 google-auth-httplib2==0.0.3 requests==2.18.4 diff --git a/samples/snippets/ocr_nl/requirements.txt b/samples/snippets/ocr_nl/requirements.txt index 35784043..500e732f 100644 --- a/samples/snippets/ocr_nl/requirements.txt +++ b/samples/snippets/ocr_nl/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.5 -google-auth==1.4.0 +google-auth==1.4.1 google-auth-httplib2==0.0.3 diff --git a/samples/snippets/syntax_triples/requirements.txt b/samples/snippets/syntax_triples/requirements.txt index 35784043..500e732f 100644 --- a/samples/snippets/syntax_triples/requirements.txt +++ b/samples/snippets/syntax_triples/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.5 -google-auth==1.4.0 +google-auth==1.4.1 google-auth-httplib2==0.0.3 diff --git a/samples/snippets/tutorial/requirements.txt b/samples/snippets/tutorial/requirements.txt index 35784043..500e732f 100644 --- a/samples/snippets/tutorial/requirements.txt +++ b/samples/snippets/tutorial/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.6.5 -google-auth==1.4.0 +google-auth==1.4.1 google-auth-httplib2==0.0.3 From ef488d3e35a80e81bca1ddd18941248e688e0a48 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Mon, 5 Mar 2018 12:28:55 -0800 Subject: [PATCH 131/209] Auto-update dependencies. [(#1377)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1377) * Auto-update dependencies. * Update requirements.txt --- samples/snippets/classify_text/requirements.txt | 2 +- samples/snippets/cloud-client/v1/requirements.txt | 2 +- samples/snippets/cloud-client/v1beta2/requirements.txt | 2 +- samples/snippets/sentiment/requirements.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/samples/snippets/classify_text/requirements.txt b/samples/snippets/classify_text/requirements.txt index da0faa99..c858e7a8 100644 --- a/samples/snippets/classify_text/requirements.txt +++ b/samples/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-language==1.0.0 +google-cloud-language==1.0.1 numpy==1.14.1 diff --git a/samples/snippets/cloud-client/v1/requirements.txt b/samples/snippets/cloud-client/v1/requirements.txt index b5848a34..5085e2cd 100644 --- a/samples/snippets/cloud-client/v1/requirements.txt +++ b/samples/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.0.0 +google-cloud-language==1.0.1 diff --git a/samples/snippets/cloud-client/v1beta2/requirements.txt b/samples/snippets/cloud-client/v1beta2/requirements.txt index b5848a34..5085e2cd 100644 --- a/samples/snippets/cloud-client/v1beta2/requirements.txt +++ b/samples/snippets/cloud-client/v1beta2/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.0.0 +google-cloud-language==1.0.1 diff --git a/samples/snippets/sentiment/requirements.txt b/samples/snippets/sentiment/requirements.txt index b5848a34..5085e2cd 100644 --- a/samples/snippets/sentiment/requirements.txt +++ b/samples/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.0.0 +google-cloud-language==1.0.1 From aabcf83bd1e925e904f178ad2bb2a44099368e99 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Tue, 13 Mar 2018 09:01:09 -0700 Subject: [PATCH 132/209] Auto-update dependencies. [(#1397)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1397) --- samples/snippets/classify_text/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/classify_text/requirements.txt b/samples/snippets/classify_text/requirements.txt index c858e7a8..5b7339a0 100644 --- a/samples/snippets/classify_text/requirements.txt +++ b/samples/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ google-cloud-language==1.0.1 -numpy==1.14.1 +numpy==1.14.2 From a19d05e050537f806ec8c164da94686e0f52f5fa Mon Sep 17 00:00:00 2001 From: DPE bot Date: Mon, 2 Apr 2018 02:51:10 -0700 Subject: [PATCH 133/209] Auto-update dependencies. --- samples/snippets/api/requirements.txt | 2 +- samples/snippets/movie_nl/requirements.txt | 2 +- samples/snippets/ocr_nl/requirements.txt | 2 +- samples/snippets/syntax_triples/requirements.txt | 2 +- samples/snippets/tutorial/requirements.txt | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 500e732f..e5f3a6c5 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.6.5 +google-api-python-client==1.6.6 google-auth==1.4.1 google-auth-httplib2==0.0.3 diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt index 06bc7afa..cbe4d142 100644 --- a/samples/snippets/movie_nl/requirements.txt +++ b/samples/snippets/movie_nl/requirements.txt @@ -1,4 +1,4 @@ -google-api-python-client==1.6.5 +google-api-python-client==1.6.6 google-auth==1.4.1 google-auth-httplib2==0.0.3 requests==2.18.4 diff --git a/samples/snippets/ocr_nl/requirements.txt b/samples/snippets/ocr_nl/requirements.txt index 500e732f..e5f3a6c5 100644 --- a/samples/snippets/ocr_nl/requirements.txt +++ b/samples/snippets/ocr_nl/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.6.5 +google-api-python-client==1.6.6 google-auth==1.4.1 google-auth-httplib2==0.0.3 diff --git a/samples/snippets/syntax_triples/requirements.txt b/samples/snippets/syntax_triples/requirements.txt index 500e732f..e5f3a6c5 100644 --- a/samples/snippets/syntax_triples/requirements.txt +++ b/samples/snippets/syntax_triples/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.6.5 +google-api-python-client==1.6.6 google-auth==1.4.1 google-auth-httplib2==0.0.3 diff --git a/samples/snippets/tutorial/requirements.txt b/samples/snippets/tutorial/requirements.txt index 500e732f..e5f3a6c5 100644 --- a/samples/snippets/tutorial/requirements.txt +++ b/samples/snippets/tutorial/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.6.5 +google-api-python-client==1.6.6 google-auth==1.4.1 google-auth-httplib2==0.0.3 From 3f30863dcc393bd974f14a12a776a9c2092b8ade Mon Sep 17 00:00:00 2001 From: chenyumic Date: Fri, 6 Apr 2018 22:57:36 -0700 Subject: [PATCH 134/209] Regenerate the README files and fix the Open in Cloud Shell link for some samples [(#1441)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1441) --- samples/snippets/api/README.rst | 4 ++-- samples/snippets/classify_text/README.rst | 4 ++-- samples/snippets/cloud-client/v1/README.rst | 6 +++--- samples/snippets/cloud-client/v1beta2/README.rst | 6 +++--- samples/snippets/tutorial/README.rst | 4 ++-- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/samples/snippets/api/README.rst b/samples/snippets/api/README.rst index 7434de16..8ebbe55a 100644 --- a/samples/snippets/api/README.rst +++ b/samples/snippets/api/README.rst @@ -12,7 +12,7 @@ This directory contains samples for Google Cloud Natural Language API. The `Goog -.. _Google Cloud Natural Language API: https://cloud.google.com/natural-language/docs/ +.. _Google Cloud Natural Language API: https://cloud.google.com/natural-language/docs/ Setup ------------------------------------------------------------------------------- @@ -59,7 +59,7 @@ Analyze syntax +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/api/analyze.py;language/api/README.rst + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/api/analyze.py,language/api/README.rst diff --git a/samples/snippets/classify_text/README.rst b/samples/snippets/classify_text/README.rst index 2857e031..2ede54da 100644 --- a/samples/snippets/classify_text/README.rst +++ b/samples/snippets/classify_text/README.rst @@ -16,7 +16,7 @@ This tutorial demostrates how to use the `classify_text` method to classify cont -.. _Google Cloud Natural Language API: https://cloud.google.com/natural-language/docs/ +.. _Google Cloud Natural Language API: https://cloud.google.com/natural-language/docs/ Setup ------------------------------------------------------------------------------- @@ -63,7 +63,7 @@ Classify Text Tutorial +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/classify_text/classify_text_tutorial.py;language/classify_text/README.rst + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/classify_text/classify_text_tutorial.py,language/classify_text/README.rst diff --git a/samples/snippets/cloud-client/v1/README.rst b/samples/snippets/cloud-client/v1/README.rst index 2e93e9af..7d727df6 100644 --- a/samples/snippets/cloud-client/v1/README.rst +++ b/samples/snippets/cloud-client/v1/README.rst @@ -16,7 +16,7 @@ This directory contains samples for Google Cloud Natural Language API. The `Goog -.. _Google Cloud Natural Language API: https://cloud.google.com/natural-language/docs/ +.. _Google Cloud Natural Language API: https://cloud.google.com/natural-language/docs/ Setup ------------------------------------------------------------------------------- @@ -63,7 +63,7 @@ Quickstart +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1/quickstart.py;language/cloud-client/v1/README.rst + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1/quickstart.py,language/cloud-client/v1/README.rst @@ -79,7 +79,7 @@ Snippets +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1/snippets.py;language/cloud-client/v1/README.rst + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1/snippets.py,language/cloud-client/v1/README.rst diff --git a/samples/snippets/cloud-client/v1beta2/README.rst b/samples/snippets/cloud-client/v1beta2/README.rst index aa4ce452..e981c248 100644 --- a/samples/snippets/cloud-client/v1beta2/README.rst +++ b/samples/snippets/cloud-client/v1beta2/README.rst @@ -16,7 +16,7 @@ This directory contains samples for Google Cloud Natural Language API. The `Goog -.. _Google Cloud Natural Language API: https://cloud.google.com/natural-language/docs/ +.. _Google Cloud Natural Language API: https://cloud.google.com/natural-language/docs/ Setup ------------------------------------------------------------------------------- @@ -63,7 +63,7 @@ Quickstart +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1beta2/quickstart.py;language/cloud-client/v1beta2/README.rst + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1beta2/quickstart.py,language/cloud-client/v1beta2/README.rst @@ -79,7 +79,7 @@ Snippets +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1beta2/snippets.py;language/cloud-client/v1beta2/README.rst + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1beta2/snippets.py,language/cloud-client/v1beta2/README.rst diff --git a/samples/snippets/tutorial/README.rst b/samples/snippets/tutorial/README.rst index 202381a6..08e7ee90 100644 --- a/samples/snippets/tutorial/README.rst +++ b/samples/snippets/tutorial/README.rst @@ -12,7 +12,7 @@ This directory contains samples for Google Cloud Natural Language Tutorial. The -.. _Google Cloud Natural Language Tutorial: https://cloud.google.com/natural-language/docs/ +.. _Google Cloud Natural Language Tutorial: https://cloud.google.com/natural-language/docs/ Setup ------------------------------------------------------------------------------- @@ -59,7 +59,7 @@ Language tutorial +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/tutorial/tutorial.py;language/tutorial/README.rst + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/tutorial/tutorial.py,language/tutorial/README.rst From ffa5084c88cdd486490b748db3a077ea90f8f74c Mon Sep 17 00:00:00 2001 From: Frank Natividad Date: Thu, 26 Apr 2018 10:26:41 -0700 Subject: [PATCH 135/209] Update READMEs to fix numbering and add git clone [(#1464)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1464) --- samples/snippets/api/README.rst | 10 ++++++++-- samples/snippets/classify_text/README.rst | 10 ++++++++-- samples/snippets/cloud-client/v1/README.rst | 10 ++++++++-- samples/snippets/cloud-client/v1beta2/README.rst | 10 ++++++++-- samples/snippets/tutorial/README.rst | 10 ++++++++-- 5 files changed, 40 insertions(+), 10 deletions(-) diff --git a/samples/snippets/api/README.rst b/samples/snippets/api/README.rst index 8ebbe55a..5f4edfd2 100644 --- a/samples/snippets/api/README.rst +++ b/samples/snippets/api/README.rst @@ -31,10 +31,16 @@ credentials for applications. Install Dependencies ++++++++++++++++++++ +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + #. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. - .. _Python Development Environment Setup Guide: - https://cloud.google.com/python/setup + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup #. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. diff --git a/samples/snippets/classify_text/README.rst b/samples/snippets/classify_text/README.rst index 2ede54da..a1112f21 100644 --- a/samples/snippets/classify_text/README.rst +++ b/samples/snippets/classify_text/README.rst @@ -35,10 +35,16 @@ credentials for applications. Install Dependencies ++++++++++++++++++++ +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + #. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. - .. _Python Development Environment Setup Guide: - https://cloud.google.com/python/setup + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup #. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. diff --git a/samples/snippets/cloud-client/v1/README.rst b/samples/snippets/cloud-client/v1/README.rst index 7d727df6..97f79a34 100644 --- a/samples/snippets/cloud-client/v1/README.rst +++ b/samples/snippets/cloud-client/v1/README.rst @@ -35,10 +35,16 @@ credentials for applications. Install Dependencies ++++++++++++++++++++ +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + #. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. - .. _Python Development Environment Setup Guide: - https://cloud.google.com/python/setup + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup #. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. diff --git a/samples/snippets/cloud-client/v1beta2/README.rst b/samples/snippets/cloud-client/v1beta2/README.rst index e981c248..03400319 100644 --- a/samples/snippets/cloud-client/v1beta2/README.rst +++ b/samples/snippets/cloud-client/v1beta2/README.rst @@ -35,10 +35,16 @@ credentials for applications. Install Dependencies ++++++++++++++++++++ +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + #. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. - .. _Python Development Environment Setup Guide: - https://cloud.google.com/python/setup + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup #. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. diff --git a/samples/snippets/tutorial/README.rst b/samples/snippets/tutorial/README.rst index 08e7ee90..3f83c1a2 100644 --- a/samples/snippets/tutorial/README.rst +++ b/samples/snippets/tutorial/README.rst @@ -31,10 +31,16 @@ credentials for applications. Install Dependencies ++++++++++++++++++++ +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + #. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. - .. _Python Development Environment Setup Guide: - https://cloud.google.com/python/setup + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup #. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. From c131d9076343f5b27eef28066fc0fc7f918dd201 Mon Sep 17 00:00:00 2001 From: Torry Yang Date: Fri, 20 Jul 2018 16:24:34 -0700 Subject: [PATCH 136/209] automl beta [(#1575)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1575) * automl initial commit * lint * fix import groupings * add requirements.txt * address review comments --- .../automl/automl_natural_language_dataset.py | 297 +++++++++++++ .../automl/automl_natural_language_model.py | 392 ++++++++++++++++++ .../automl/automl_natural_language_predict.py | 85 ++++ samples/snippets/automl/dataset_test.py | 71 ++++ samples/snippets/automl/model_test.py | 80 ++++ samples/snippets/automl/predict_test.py | 31 ++ samples/snippets/automl/requirements.txt | 1 + samples/snippets/automl/resources/test.txt | 1 + 8 files changed, 958 insertions(+) create mode 100755 samples/snippets/automl/automl_natural_language_dataset.py create mode 100755 samples/snippets/automl/automl_natural_language_model.py create mode 100755 samples/snippets/automl/automl_natural_language_predict.py create mode 100644 samples/snippets/automl/dataset_test.py create mode 100644 samples/snippets/automl/model_test.py create mode 100644 samples/snippets/automl/predict_test.py create mode 100644 samples/snippets/automl/requirements.txt create mode 100644 samples/snippets/automl/resources/test.txt diff --git a/samples/snippets/automl/automl_natural_language_dataset.py b/samples/snippets/automl/automl_natural_language_dataset.py new file mode 100755 index 00000000..7793d4a6 --- /dev/null +++ b/samples/snippets/automl/automl_natural_language_dataset.py @@ -0,0 +1,297 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This application demonstrates how to perform basic operations on Dataset +with the Google AutoML Natural Language API. + +For more information, see the tutorial page at +https://cloud.google.com/natural-language/automl/docs/ +""" + +import argparse +import os + + +def create_dataset(project_id, compute_region, dataset_name, multilabel=False): + """Create a dataset.""" + # [START automl_natural_language_create_dataset] + # TODO(developer): Uncomment and set the following variables + # project_id = 'PROJECT_ID_HERE' + # compute_region = 'COMPUTE_REGION_HERE' + # dataset_name = 'DATASET_NAME_HERE' + # multilabel = True for multilabel or False for multiclass + + from google.cloud import automl_v1beta1 as automl + + client = automl.AutoMlClient() + + # A resource that represents Google Cloud Platform location. + project_location = client.location_path(project_id, compute_region) + + # Classification type is assigned based on multilabel value. + classification_type = "MULTICLASS" + if multilabel: + classification_type = "MULTILABEL" + + # Specify the text classification type for the dataset. + dataset_metadata = {"classification_type": classification_type} + + # Set dataset name and metadata. + my_dataset = { + "display_name": dataset_name, + "text_classification_dataset_metadata": dataset_metadata, + } + + # Create a dataset with the dataset metadata in the region. + dataset = client.create_dataset(project_location, my_dataset) + + # Display the dataset information. + print("Dataset name: {}".format(dataset.name)) + print("Dataset id: {}".format(dataset.name.split("/")[-1])) + print("Dataset display name: {}".format(dataset.display_name)) + print("Text classification dataset metadata:") + print("\t{}".format(dataset.text_classification_dataset_metadata)) + print("Dataset example count: {}".format(dataset.example_count)) + print("Dataset create time:") + print("\tseconds: {}".format(dataset.create_time.seconds)) + print("\tnanos: {}".format(dataset.create_time.nanos)) + + # [END automl_natural_language_create_dataset] + + +def list_datasets(project_id, compute_region, filter_): + """List all datasets.""" + # [START automl_natural_language_list_datasets] + # TODO(developer): Uncomment and set the following variables + # project_id = 'PROJECT_ID_HERE' + # compute_region = 'COMPUTE_REGION_HERE' + # filter_ = 'filter expression here' + + from google.cloud import automl_v1beta1 as automl + + client = automl.AutoMlClient() + + # A resource that represents Google Cloud Platform location. + project_location = client.location_path(project_id, compute_region) + + # List all the datasets available in the region by applying filter. + response = client.list_datasets(project_location, filter_) + + print("List of datasets:") + for dataset in response: + # Display the dataset information. + print("Dataset name: {}".format(dataset.name)) + print("Dataset id: {}".format(dataset.name.split("/")[-1])) + print("Dataset display name: {}".format(dataset.display_name)) + print("Text classification dataset metadata:") + print("\t{}".format(dataset.text_classification_dataset_metadata)) + print("Dataset example count: {}".format(dataset.example_count)) + print("Dataset create time:") + print("\tseconds: {}".format(dataset.create_time.seconds)) + print("\tnanos: {}".format(dataset.create_time.nanos)) + + # [END automl_natural_language_list_datasets] + + +def get_dataset(project_id, compute_region, dataset_id): + """Get the dataset.""" + # [START automl_natural_language_get_dataset] + # TODO(developer): Uncomment and set the following variables + # project_id = 'PROJECT_ID_HERE' + # compute_region = 'COMPUTE_REGION_HERE' + # dataset_id = 'DATASET_ID_HERE' + + from google.cloud import automl_v1beta1 as automl + + client = automl.AutoMlClient() + + # Get the full path of the dataset + dataset_full_id = client.dataset_path( + project_id, compute_region, dataset_id + ) + + # Get complete detail of the dataset. + dataset = client.get_dataset(dataset_full_id) + + # Display the dataset information. + print("Dataset name: {}".format(dataset.name)) + print("Dataset id: {}".format(dataset.name.split("/")[-1])) + print("Dataset display name: {}".format(dataset.display_name)) + print("Text classification dataset metadata:") + print("\t{}".format(dataset.text_classification_dataset_metadata)) + print("Dataset example count: {}".format(dataset.example_count)) + print("Dataset create time:") + print("\tseconds: {}".format(dataset.create_time.seconds)) + print("\tnanos: {}".format(dataset.create_time.nanos)) + + # [END automl_natural_language_get_dataset] + + +def import_data(project_id, compute_region, dataset_id, path): + """Import labelled items.""" + # [START automl_natural_language_import_data] + # TODO(developer): Uncomment and set the following variables + # project_id = 'PROJECT_ID_HERE' + # compute_region = 'COMPUTE_REGION_HERE' + # dataset_id = 'DATASET_ID_HERE' + # path = 'gs://path/to/file.csv' + + from google.cloud import automl_v1beta1 as automl + + client = automl.AutoMlClient() + + # Get the full path of the dataset. + dataset_full_id = client.dataset_path( + project_id, compute_region, dataset_id + ) + + # Get the multiple Google Cloud Storage URIs. + input_uris = path.split(",") + input_config = {"gcs_source": {"input_uris": input_uris}} + + # Import the dataset from the input URI. + response = client.import_data(dataset_full_id, input_config) + + print("Processing import...") + # synchronous check of operation status. + print("Data imported. {}".format(response.result())) + + # [END automl_natural_language_import_data] + + +def export_data(project_id, compute_region, dataset_id, output_uri): + """Export a dataset to a Google Cloud Storage bucket.""" + # [START automl_natural_language_export_data] + # TODO(developer): Uncomment and set the following variables + # project_id = 'PROJECT_ID_HERE' + # compute_region = 'COMPUTE_REGION_HERE' + # dataset_id = 'DATASET_ID_HERE' + # output_uri: 'gs://location/to/export/data' + + from google.cloud import automl_v1beta1 as automl + + client = automl.AutoMlClient() + + # Get the full path of the dataset. + dataset_full_id = client.dataset_path( + project_id, compute_region, dataset_id + ) + + # Set the output URI + output_config = {"gcs_destination": {"output_uri_prefix": output_uri}} + + # Export the data to the output URI. + response = client.export_data(dataset_full_id, output_config) + + print("Processing export...") + # synchronous check of operation status. + print("Data exported. {}".format(response.result())) + + # [END automl_natural_language_export_data] + + +def delete_dataset(project_id, compute_region, dataset_id): + """Delete a dataset.""" + # [START automl_natural_language_delete_dataset] + # TODO(developer): Uncomment and set the following variables + # project_id = 'PROJECT_ID_HERE' + # compute_region = 'COMPUTE_REGION_HERE' + # dataset_id = 'DATASET_ID_HERE' + + from google.cloud import automl_v1beta1 as automl + + client = automl.AutoMlClient() + + # Get the full path of the dataset. + dataset_full_id = client.dataset_path( + project_id, compute_region, dataset_id + ) + + # Delete a dataset. + response = client.delete_dataset(dataset_full_id) + + # synchronous check of operation status. + print("Dataset deleted. {}".format(response.result())) + + # [END automl_natural_language_delete_dataset] + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + subparsers = parser.add_subparsers(dest="command") + + create_dataset_parser = subparsers.add_parser( + "create_dataset", help=create_dataset.__doc__ + ) + create_dataset_parser.add_argument("dataset_name") + create_dataset_parser.add_argument( + "multilabel", nargs="?", choices=["False", "True"], default="False" + ) + + list_datasets_parser = subparsers.add_parser( + "list_datasets", help=list_datasets.__doc__ + ) + list_datasets_parser.add_argument( + "filter_", nargs="?", default="text_classification_dataset_metadata:*" + ) + + get_dataset_parser = subparsers.add_parser( + "get_dataset", help=get_dataset.__doc__ + ) + get_dataset_parser.add_argument("dataset_id") + + import_data_parser = subparsers.add_parser( + "import_data", help=import_data.__doc__ + ) + import_data_parser.add_argument("dataset_id") + import_data_parser.add_argument("path") + + export_data_parser = subparsers.add_parser( + "export_data", help=export_data.__doc__ + ) + export_data_parser.add_argument("dataset_id") + export_data_parser.add_argument("output_uri") + + delete_dataset_parser = subparsers.add_parser( + "delete_dataset", help=delete_dataset.__doc__ + ) + delete_dataset_parser.add_argument("dataset_id") + + project_id = os.environ["PROJECT_ID"] + compute_region = os.environ["REGION_NAME"] + + args = parser.parse_args() + + if args.command == "create_dataset": + multilabel = True if args.multilabel == "True" else False + create_dataset( + project_id, compute_region, args.dataset_name, multilabel + ) + if args.command == "list_datasets": + list_datasets(project_id, compute_region, args.filter_) + if args.command == "get_dataset": + get_dataset(project_id, compute_region, args.dataset_id) + if args.command == "import_data": + import_data(project_id, compute_region, args.dataset_id, args.path) + if args.command == "export_data": + export_data( + project_id, compute_region, args.dataset_id, args.output_uri + ) + if args.command == "delete_dataset": + delete_dataset(project_id, compute_region, args.dataset_id) diff --git a/samples/snippets/automl/automl_natural_language_model.py b/samples/snippets/automl/automl_natural_language_model.py new file mode 100755 index 00000000..84c0d99e --- /dev/null +++ b/samples/snippets/automl/automl_natural_language_model.py @@ -0,0 +1,392 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This application demonstrates how to perform basic operations on model +with the Google AutoML Natural Language API. + +For more information, see the tutorial page at +https://cloud.google.com/natural-language/automl/docs/ +""" + +import argparse +import os + + +def create_model(project_id, compute_region, dataset_id, model_name): + """Create a model.""" + # [START automl_natural_language_create_model] + # TODO(developer): Uncomment and set the following variables + # project_id = 'PROJECT_ID_HERE' + # compute_region = 'COMPUTE_REGION_HERE' + # dataset_id = 'DATASET_ID_HERE' + # model_name = 'MODEL_NAME_HERE' + + from google.cloud import automl_v1beta1 as automl + + client = automl.AutoMlClient() + + # A resource that represents Google Cloud Platform location. + project_location = client.location_path(project_id, compute_region) + + # Set model name and model metadata for the dataset. + my_model = { + "display_name": model_name, + "dataset_id": dataset_id, + "text_classification_model_metadata": {}, + } + + # Create a model with the model metadata in the region. + response = client.create_model(project_location, my_model) + print("Training operation name: {}".format(response.operation.name)) + print("Training started...") + + # [END automl_natural_language_create_model] + + +def get_operation_status(operation_full_id): + """Get operation status.""" + # [START automl_natural_language_get_operation_status] + # TODO(developer): Uncomment and set the following variables + # operation_full_id = + # 'projects//locations//operations/' + + from google.cloud import automl_v1beta1 as automl + + client = automl.AutoMlClient() + + # Get the latest state of a long-running operation. + response = client.transport._operations_client.get_operation( + operation_full_id + ) + + print("Operation status: {}".format(response)) + + # [END automl_natural_language_get_operation_status] + + +def list_models(project_id, compute_region, filter_): + """List all models.""" + # [START automl_natural_language_list_models] + # TODO(developer): Uncomment and set the following variables + # project_id = 'PROJECT_ID_HERE' + # compute_region = 'COMPUTE_REGION_HERE' + # filter_ = 'DATASET_ID_HERE' + + from google.cloud import automl_v1beta1 as automl + from google.cloud.automl_v1beta1 import enums + + client = automl.AutoMlClient() + + # A resource that represents Google Cloud Platform location. + project_location = client.location_path(project_id, compute_region) + + # List all the models available in the region by applying filter. + response = client.list_models(project_location, filter_) + + print("List of models:") + for model in response: + # Retrieve deployment state. + deployment_state = "" + if model.deployment_state == enums.Model.DeploymentState.DEPLOYED: + deployment_state = "deployed" + else: + deployment_state = "undeployed" + + # Display the model information. + print("Model name: {}".format(model.name)) + print("Model id: {}".format(model.name.split("/")[-1])) + print("Model display name: {}".format(model.display_name)) + print("Model create time:") + print("\tseconds: {}".format(model.create_time.seconds)) + print("\tnanos: {}".format(model.create_time.nanos)) + print("Model deployment state: {}".format(deployment_state)) + + # [END automl_natural_language_list_models] + + +def get_model(project_id, compute_region, model_id): + """Get model details.""" + # [START automl_natural_language_get_model] + # TODO(developer): Uncomment and set the following variables + # project_id = 'PROJECT_ID_HERE' + # compute_region = 'COMPUTE_REGION_HERE' + # model_id = 'MODEL_ID_HERE' + + from google.cloud import automl_v1beta1 as automl + from google.cloud.automl_v1beta1 import enums + + client = automl.AutoMlClient() + + # Get the full path of the model. + model_full_id = client.model_path(project_id, compute_region, model_id) + + # Get complete detail of the model. + model = client.get_model(model_full_id) + + # Retrieve deployment state. + deployment_state = "" + if model.deployment_state == enums.Model.DeploymentState.DEPLOYED: + deployment_state = "deployed" + else: + deployment_state = "undeployed" + + # Display the model information. + print("Model name: {}".format(model.name)) + print("Model id: {}".format(model.name.split("/")[-1])) + print("Model display name: {}".format(model.display_name)) + print("Model create time:") + print("\tseconds: {}".format(model.create_time.seconds)) + print("\tnanos: {}".format(model.create_time.nanos)) + print("Model deployment state: {}".format(deployment_state)) + + # [END automl_natural_language_get_model] + + +def list_model_evaluations(project_id, compute_region, model_id, filter_): + """List model evaluations.""" + # [START automl_natural_language_list_model_evaluations] + # TODO(developer): Uncomment and set the following variables + # project_id = 'PROJECT_ID_HERE' + # compute_region = 'COMPUTE_REGION_HERE' + # model_id = 'MODEL_ID_HERE' + # filter_ = 'filter expression here' + + from google.cloud import automl_v1beta1 as automl + + client = automl.AutoMlClient() + + # Get the full path of the model. + model_full_id = client.model_path(project_id, compute_region, model_id) + + # List all the model evaluations in the model by applying filter. + response = client.list_model_evaluations(model_full_id, filter_) + + print("List of model evaluations:") + for element in response: + print(element) + + # [END automl_natural_language_list_model_evaluations] + + +def get_model_evaluation( + project_id, compute_region, model_id, model_evaluation_id +): + """Get model evaluation.""" + # [START automl_natural_language_get_model_evaluation] + # TODO(developer): Uncomment and set the following variables + # project_id = 'PROJECT_ID_HERE' + # compute_region = 'COMPUTE_REGION_HERE' + # model_id = 'MODEL_ID_HERE' + # model_evaluation_id = 'MODEL_EVALUATION_ID_HERE' + + from google.cloud import automl_v1beta1 as automl + + client = automl.AutoMlClient() + + # Get the full path of the model evaluation. + model_evaluation_full_id = client.model_evaluation_path( + project_id, compute_region, model_id, model_evaluation_id + ) + + # Get complete detail of the model evaluation. + response = client.get_model_evaluation(model_evaluation_full_id) + + print(response) + + # [END automl_natural_language_get_model_evaluation] + + +def display_evaluation(project_id, compute_region, model_id, filter_): + """Display evaluation.""" + # [START automl_natural_language_display_evaluation] + # TODO(developer): Uncomment and set the following variables + # project_id = 'PROJECT_ID_HERE' + # compute_region = 'COMPUTE_REGION_HERE' + # model_id = 'MODEL_ID_HERE' + # filter_ = 'filter expression here' + + from google.cloud import automl_v1beta1 as automl + + client = automl.AutoMlClient() + + # Get the full path of the model. + model_full_id = client.model_path(project_id, compute_region, model_id) + + # List all the model evaluations in the model by applying filter. + response = client.list_model_evaluations(model_full_id, filter_) + + # Iterate through the results. + for element in response: + # There is evaluation for each class in a model and for overall model. + # Get only the evaluation of overall model. + if not element.annotation_spec_id: + model_evaluation_id = element.name.split("/")[-1] + + # Resource name for the model evaluation. + model_evaluation_full_id = client.model_evaluation_path( + project_id, compute_region, model_id, model_evaluation_id + ) + + # Get a model evaluation. + model_evaluation = client.get_model_evaluation(model_evaluation_full_id) + + class_metrics = model_evaluation.classification_evaluation_metrics + confidence_metrics_entries = class_metrics.confidence_metrics_entry + + # Showing model score based on threshold of 0.5 + for confidence_metrics_entry in confidence_metrics_entries: + if confidence_metrics_entry.confidence_threshold == 0.5: + print("Precision and recall are based on a score threshold of 0.5") + print( + "Model Precision: {}%".format( + round(confidence_metrics_entry.precision * 100, 2) + ) + ) + print( + "Model Recall: {}%".format( + round(confidence_metrics_entry.recall * 100, 2) + ) + ) + print( + "Model F1 score: {}%".format( + round(confidence_metrics_entry.f1_score * 100, 2) + ) + ) + print( + "Model Precision@1: {}%".format( + round(confidence_metrics_entry.precision_at1 * 100, 2) + ) + ) + print( + "Model Recall@1: {}%".format( + round(confidence_metrics_entry.recall_at1 * 100, 2) + ) + ) + print( + "Model F1 score@1: {}%".format( + round(confidence_metrics_entry.f1_score_at1 * 100, 2) + ) + ) + + # [END automl_natural_language_display_evaluation] + + +def delete_model(project_id, compute_region, model_id): + """Delete a model.""" + # [START automl_natural_language_delete_model] + # TODO(developer): Uncomment and set the following variables + # project_id = 'PROJECT_ID_HERE' + # compute_region = 'COMPUTE_REGION_HERE' + # model_id = 'MODEL_ID_HERE' + + from google.cloud import automl_v1beta1 as automl + + client = automl.AutoMlClient() + + # Get the full path of the model. + model_full_id = client.model_path(project_id, compute_region, model_id) + + # Delete a model. + response = client.delete_model(model_full_id) + + # synchronous check of operation status. + print("Model deleted. {}".format(response.result())) + + # [END automl_natural_language_delete_model] + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + subparsers = parser.add_subparsers(dest="command") + + create_model_parser = subparsers.add_parser( + "create_model", help=create_model.__doc__ + ) + create_model_parser.add_argument("dataset_id") + create_model_parser.add_argument("model_name") + + get_operation_status_parser = subparsers.add_parser( + "get_operation_status", help=get_operation_status.__doc__ + ) + get_operation_status_parser.add_argument("operation_full_id") + + list_models_parser = subparsers.add_parser( + "list_models", help=list_models.__doc__ + ) + list_models_parser.add_argument("filter_") + + get_model_parser = subparsers.add_parser( + "get_model", help=get_model_evaluation.__doc__ + ) + get_model_parser.add_argument("model_id") + + list_model_evaluations_parser = subparsers.add_parser( + "list_model_evaluations", help=list_model_evaluations.__doc__ + ) + list_model_evaluations_parser.add_argument("model_id") + list_model_evaluations_parser.add_argument( + "filter_", nargs="?", default="" + ) + + get_model_evaluation_parser = subparsers.add_parser( + "get_model_evaluation", help=get_model_evaluation.__doc__ + ) + get_model_evaluation_parser.add_argument("model_id") + get_model_evaluation_parser.add_argument("model_evaluation_id") + + display_evaluation_parser = subparsers.add_parser( + "display_evaluation", help=display_evaluation.__doc__ + ) + display_evaluation_parser.add_argument("model_id") + display_evaluation_parser.add_argument("filter_", nargs="?", default="") + + delete_model_parser = subparsers.add_parser( + "delete_model", help=delete_model.__doc__ + ) + delete_model_parser.add_argument("model_id") + + project_id = os.environ["PROJECT_ID"] + compute_region = os.environ["REGION_NAME"] + + args = parser.parse_args() + + if args.command == "create_model": + create_model( + project_id, compute_region, args.dataset_id, args.model_name + ) + if args.command == "get_operation_status": + get_operation_status(args.operation_full_id) + if args.command == "list_models": + list_models(project_id, compute_region, args.filter_) + if args.command == "get_model": + get_model(project_id, compute_region, args.model_id) + if args.command == "list_model_evaluations": + list_model_evaluations( + project_id, compute_region, args.model_id, args.filter_ + ) + if args.command == "get_model_evaluation": + get_model_evaluation( + project_id, compute_region, args.model_id, args.model_evaluation_id + ) + if args.command == "display_evaluation": + display_evaluation( + project_id, compute_region, args.model_id, args.filter_ + ) + if args.command == "delete_model": + delete_model(project_id, compute_region, args.model_id) diff --git a/samples/snippets/automl/automl_natural_language_predict.py b/samples/snippets/automl/automl_natural_language_predict.py new file mode 100755 index 00000000..0c25e373 --- /dev/null +++ b/samples/snippets/automl/automl_natural_language_predict.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This application demonstrates how to perform basic operations on prediction +with the Google AutoML Natural Language API. + +For more information, see the tutorial page at +https://cloud.google.com/natural-language/automl/docs/ +""" + +import argparse +import os + + +def predict(project_id, compute_region, model_id, file_path): + """Classify the content.""" + # [START automl_natural_language_predict] + # TODO(developer): Uncomment and set the following variables + # project_id = 'PROJECT_ID_HERE' + # compute_region = 'COMPUTE_REGION_HERE' + # model_id = 'MODEL_ID_HERE' + # file_path = '/local/path/to/file' + + from google.cloud import automl_v1beta1 as automl + + automl_client = automl.AutoMlClient() + + # Create client for prediction service. + prediction_client = automl.PredictionServiceClient() + + # Get the full path of the model. + model_full_id = automl_client.model_path( + project_id, compute_region, model_id + ) + + # Read the file content for prediction. + with open(file_path, "rb") as content_file: + snippet = content_file.read() + + # Set the payload by giving the content and type of the file. + payload = {"text_snippet": {"content": snippet, "mime_type": "text/plain"}} + + # params is additional domain-specific parameters. + # currently there is no additional parameters supported. + params = {} + response = prediction_client.predict(model_full_id, payload, params) + print("Prediction results:") + for result in response.payload: + print("Predicted class name: {}".format(result.display_name)) + print("Predicted class score: {}".format(result.classification.score)) + + # [END automl_natural_language_predict] + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + subparsers = parser.add_subparsers(dest="command") + + predict_parser = subparsers.add_parser("predict", help=predict.__doc__) + predict_parser.add_argument("model_id") + predict_parser.add_argument("file_path") + + project_id = os.environ["PROJECT_ID"] + compute_region = os.environ["REGION_NAME"] + + args = parser.parse_args() + + if args.command == "predict": + predict(project_id, compute_region, args.model_id, args.file_path) diff --git a/samples/snippets/automl/dataset_test.py b/samples/snippets/automl/dataset_test.py new file mode 100644 index 00000000..41a565c8 --- /dev/null +++ b/samples/snippets/automl/dataset_test.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import os + +import pytest + +import automl_natural_language_dataset + +project_id = os.environ["GCLOUD_PROJECT"] +compute_region = "us-central1" + + +@pytest.mark.slow +def test_dataset_create_import_delete(capsys): + # create dataset + dataset_name = "test_" + datetime.datetime.now().strftime("%Y%m%d%H%M%S") + automl_natural_language_dataset.create_dataset( + project_id, compute_region, dataset_name + ) + out, _ = capsys.readouterr() + create_dataset_output = out.splitlines() + assert "Dataset id: " in create_dataset_output[1] + + # import data + dataset_id = create_dataset_output[1].split()[2] + data = "gs://{}-vcm/happiness.csv".format(project_id) + automl_natural_language_dataset.import_data( + project_id, compute_region, dataset_id, data + ) + out, _ = capsys.readouterr() + assert "Data imported." in out + + # delete dataset + automl_natural_language_dataset.delete_dataset( + project_id, compute_region, dataset_id + ) + out, _ = capsys.readouterr() + assert "Dataset deleted." in out + + +def test_dataset_list_get(capsys): + # list datasets + automl_natural_language_dataset.list_datasets( + project_id, compute_region, "" + ) + out, _ = capsys.readouterr() + list_dataset_output = out.splitlines() + assert "Dataset id: " in list_dataset_output[2] + + # get dataset + dataset_id = list_dataset_output[2].split()[2] + automl_natural_language_dataset.get_dataset( + project_id, compute_region, dataset_id + ) + out, _ = capsys.readouterr() + assert "Dataset name: " in out diff --git a/samples/snippets/automl/model_test.py b/samples/snippets/automl/model_test.py new file mode 100644 index 00000000..4e52604e --- /dev/null +++ b/samples/snippets/automl/model_test.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import os + +from google.cloud import automl_v1beta1 as automl + +import automl_natural_language_model + +project_id = os.environ["GCLOUD_PROJECT"] +compute_region = "us-central1" + + +def test_model_create_status_delete(capsys): + # create model + client = automl.AutoMlClient() + model_name = "test_" + datetime.datetime.now().strftime("%Y%m%d%H%M%S") + project_location = client.location_path(project_id, compute_region) + my_model = { + "display_name": model_name, + "dataset_id": "2551826603472450019", + "text_classification_model_metadata": {}, + } + response = client.create_model(project_location, my_model) + operation_name = response.operation.name + assert operation_name + + # get operation status + automl_natural_language_model.get_operation_status(operation_name) + out, _ = capsys.readouterr() + assert "Operation status: " in out + + # cancel operation + response.cancel() + + +def test_model_list_get_evaluate(capsys): + # list models + automl_natural_language_model.list_models(project_id, compute_region, "") + out, _ = capsys.readouterr() + list_models_output = out.splitlines() + assert "Model id: " in list_models_output[2] + + # get model + model_id = list_models_output[2].split()[2] + automl_natural_language_model.get_model( + project_id, compute_region, model_id + ) + out, _ = capsys.readouterr() + assert "Model name: " in out + + # list model evaluations + automl_natural_language_model.list_model_evaluations( + project_id, compute_region, model_id, "" + ) + out, _ = capsys.readouterr() + list_evals_output = out.splitlines() + assert "name: " in list_evals_output[1] + + # get model evaluation + model_evaluation_id = list_evals_output[1].split("/")[-1][:-1] + automl_natural_language_model.get_model_evaluation( + project_id, compute_region, model_id, model_evaluation_id + ) + out, _ = capsys.readouterr() + assert "evaluation_metric" in out diff --git a/samples/snippets/automl/predict_test.py b/samples/snippets/automl/predict_test.py new file mode 100644 index 00000000..6cf2c69a --- /dev/null +++ b/samples/snippets/automl/predict_test.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import automl_natural_language_predict + +project_id = os.environ["GCLOUD_PROJECT"] +compute_region = "us-central1" + + +def test_predict(capsys): + model_id = "3472481026502981088" + automl_natural_language_predict.predict( + project_id, compute_region, model_id, "resources/test.txt" + ) + out, _ = capsys.readouterr() + assert "Cheese" in out diff --git a/samples/snippets/automl/requirements.txt b/samples/snippets/automl/requirements.txt new file mode 100644 index 00000000..d1bff72a --- /dev/null +++ b/samples/snippets/automl/requirements.txt @@ -0,0 +1 @@ +google-cloud-automl==0.1.0 diff --git a/samples/snippets/automl/resources/test.txt b/samples/snippets/automl/resources/test.txt new file mode 100644 index 00000000..f0dde24b --- /dev/null +++ b/samples/snippets/automl/resources/test.txt @@ -0,0 +1 @@ +A strong taste of hazlenut and orange From 4ec5165abc4e778201cf246482f14d56a9b37a76 Mon Sep 17 00:00:00 2001 From: Torry Yang Date: Mon, 23 Jul 2018 20:57:31 -0700 Subject: [PATCH 137/209] use lcm instead of vcm [(#1597)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1597) --- samples/snippets/automl/dataset_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/automl/dataset_test.py b/samples/snippets/automl/dataset_test.py index 41a565c8..fe68579f 100644 --- a/samples/snippets/automl/dataset_test.py +++ b/samples/snippets/automl/dataset_test.py @@ -38,7 +38,7 @@ def test_dataset_create_import_delete(capsys): # import data dataset_id = create_dataset_output[1].split()[2] - data = "gs://{}-vcm/happiness.csv".format(project_id) + data = "gs://{}-lcm/happiness.csv".format(project_id) automl_natural_language_dataset.import_data( project_id, compute_region, dataset_id, data ) From 69491fb7e5661233373935dacac9871368b0d9c0 Mon Sep 17 00:00:00 2001 From: Torry Yang Date: Thu, 2 Aug 2018 17:40:16 -0700 Subject: [PATCH 138/209] skip automl model create/delete test [(#1608)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1608) * skip model create/delete test * add skip reason --- samples/snippets/automl/model_test.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/samples/snippets/automl/model_test.py b/samples/snippets/automl/model_test.py index 4e52604e..8f484d2a 100644 --- a/samples/snippets/automl/model_test.py +++ b/samples/snippets/automl/model_test.py @@ -18,6 +18,7 @@ import os from google.cloud import automl_v1beta1 as automl +import pytest import automl_natural_language_model @@ -25,6 +26,7 @@ compute_region = "us-central1" +@pytest.mark.skip(reason="creates too many models") def test_model_create_status_delete(capsys): # create model client = automl.AutoMlClient() From bae53e82ff0561fde3fbb1db535da250ece1eeaa Mon Sep 17 00:00:00 2001 From: Alix Hamilton Date: Tue, 21 Aug 2018 15:16:31 -0400 Subject: [PATCH 139/209] Language region tag update [(#1643)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1643) --- .../classify_text/classify_text_tutorial.py | 32 +++++----- .../snippets/cloud-client/v1/quickstart.py | 8 +-- samples/snippets/cloud-client/v1/snippets.py | 58 ++++++++++--------- .../snippets/sentiment/sentiment_analysis.py | 18 +++--- 4 files changed, 60 insertions(+), 56 deletions(-) diff --git a/samples/snippets/classify_text/classify_text_tutorial.py b/samples/snippets/classify_text/classify_text_tutorial.py index 1ac9e0ac..2ce388cf 100644 --- a/samples/snippets/classify_text/classify_text_tutorial.py +++ b/samples/snippets/classify_text/classify_text_tutorial.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# [START classify_text_tutorial] +# [START language_classify_text_tutorial] """Using the classify_text method to find content categories of text files, Then use the content category labels to compare text similarity. @@ -21,7 +21,7 @@ https://cloud.google.com/natural-language/docs/classify-text-tutorial. """ -# [START classify_text_tutorial_import] +# [START language_classify_text_tutorial_imports] import argparse import io import json @@ -30,10 +30,10 @@ from google.cloud import language import numpy import six -# [END classify_text_tutorial_import] +# [END language_classify_text_tutorial_imports] -# [START def_classify] +# [START language_classify_text_tutorial_classify] def classify(text, verbose=True): """Classify the input text into categories. """ @@ -61,10 +61,10 @@ def classify(text, verbose=True): print(u'{:<16}: {}'.format('confidence', category.confidence)) return result -# [END def_classify] +# [END language_classify_text_tutorial_classify] -# [START def_index] +# [START language_classify_text_tutorial_index] def index(path, index_file): """Classify each text file in a directory and write the results to the index_file. @@ -91,10 +91,10 @@ def index(path, index_file): print('Texts indexed in file: {}'.format(index_file)) return result -# [END def_index] +# [END language_classify_text_tutorial_index] -# [START def_split_labels] +# [START language_classify_text_tutorial_split_labels] def split_labels(categories): """The category labels are of the form "/a/b/c" up to three levels, for example "/Computers & Electronics/Software", and these labels @@ -121,10 +121,10 @@ def split_labels(categories): _categories[label] = confidence return _categories -# [END def_split_labels] +# [END language_classify_text_tutorial_split_labels] -# [START def_similarity] +# [START language_classify_text_tutorial_similarity] def similarity(categories1, categories2): """Cosine similarity of the categories treated as sparse vectors.""" categories1 = split_labels(categories1) @@ -143,10 +143,10 @@ def similarity(categories1, categories2): dot += confidence * categories2.get(label, 0.0) return dot / (norm1 * norm2) -# [END def_similarity] +# [END language_classify_text_tutorial_similarity] -# [START def_query] +# [START language_classify_text_tutorial_query] def query(index_file, text, n_top=3): """Find the indexed files that are the most similar to the query text. @@ -176,10 +176,10 @@ def query(index_file, text, n_top=3): print('\n') return similarities -# [END def_query] +# [END language_classify_text_tutorial_query] -# [START def_query_category] +# [START language_classify_text_tutorial_query_category] def query_category(index_file, category_string, n_top=3): """Find the indexed files that are the most similar to the query label. @@ -211,7 +211,7 @@ def query_category(index_file, category_string, n_top=3): print('\n') return similarities -# [END def_query_category] +# [END language_classify_text_tutorial_query_category] if __name__ == '__main__': @@ -255,4 +255,4 @@ def query_category(index_file, category_string, n_top=3): query(args.index_file, args.text) if args.command == 'query-category': query_category(args.index_file, args.category) -# [END classify_text_tutorial] +# [END language_classify_text_tutorial] diff --git a/samples/snippets/cloud-client/v1/quickstart.py b/samples/snippets/cloud-client/v1/quickstart.py index 3c19e395..7c075a51 100644 --- a/samples/snippets/cloud-client/v1/quickstart.py +++ b/samples/snippets/cloud-client/v1/quickstart.py @@ -18,16 +18,16 @@ def run_quickstart(): # [START language_quickstart] # Imports the Google Cloud client library - # [START migration_import] + # [START language_python_migration_imports] from google.cloud import language from google.cloud.language import enums from google.cloud.language import types - # [END migration_import] + # [END language_python_migration_imports] # Instantiates a client - # [START migration_client] + # [START language_python_migration_client] client = language.LanguageServiceClient() - # [END migration_client] + # [END language_python_migration_client] # The text to analyze text = u'Hello, world!' diff --git a/samples/snippets/cloud-client/v1/snippets.py b/samples/snippets/cloud-client/v1/snippets.py index 30b591a4..3b1c02f9 100644 --- a/samples/snippets/cloud-client/v1/snippets.py +++ b/samples/snippets/cloud-client/v1/snippets.py @@ -30,7 +30,7 @@ import six -# [START def_sentiment_text] +# [START language_sentiment_text] def sentiment_text(text): """Detects sentiment in the text.""" client = language.LanguageServiceClient() @@ -39,12 +39,12 @@ def sentiment_text(text): text = text.decode('utf-8') # Instantiates a plain text document. - # [START migration_document_text] - # [START migration_analyze_sentiment] + # [START language_python_migration_document_text] + # [START language_python_migration_sentiment_text] document = types.Document( content=text, type=enums.Document.Type.PLAIN_TEXT) - # [END migration_document_text] + # [END language_python_migration_document_text] # Detects sentiment in the document. You can also analyze HTML with: # document.type == enums.Document.Type.HTML @@ -52,21 +52,21 @@ def sentiment_text(text): print('Score: {}'.format(sentiment.score)) print('Magnitude: {}'.format(sentiment.magnitude)) - # [END migration_analyze_sentiment] -# [END def_sentiment_text] + # [END language_python_migration_sentiment_text] +# [END language_sentiment_text] -# [START def_sentiment_file] +# [START language_sentiment_gcs] def sentiment_file(gcs_uri): """Detects sentiment in the file located in Google Cloud Storage.""" client = language.LanguageServiceClient() # Instantiates a plain text document. - # [START migration_document_gcs_uri] + # [START language_python_migration_document_gcs] document = types.Document( gcs_content_uri=gcs_uri, type=enums.Document.Type.PLAIN_TEXT) - # [END migration_document_gcs_uri] + # [END language_python_migration_document_gcs] # Detects sentiment in the document. You can also analyze HTML with: # document.type == enums.Document.Type.HTML @@ -74,10 +74,10 @@ def sentiment_file(gcs_uri): print('Score: {}'.format(sentiment.score)) print('Magnitude: {}'.format(sentiment.magnitude)) -# [END def_sentiment_file] +# [END language_sentiment_gcs] -# [START def_entities_text] +# [START language_entities_text] def entities_text(text): """Detects entities in the text.""" client = language.LanguageServiceClient() @@ -86,7 +86,7 @@ def entities_text(text): text = text.decode('utf-8') # Instantiates a plain text document. - # [START migration_analyze_entities] + # [START language_python_migration_entities_text] document = types.Document( content=text, type=enums.Document.Type.PLAIN_TEXT) @@ -107,11 +107,11 @@ def entities_text(text): print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', entity.metadata.get('wikipedia_url', '-'))) - # [END migration_analyze_entities] -# [END def_entities_text] + # [END language_python_migration_entities_text] +# [END language_entities_text] -# [START def_entities_file] +# [START language_entities_gcs] def entities_file(gcs_uri): """Detects entities in the file located in Google Cloud Storage.""" client = language.LanguageServiceClient() @@ -137,10 +137,10 @@ def entities_file(gcs_uri): print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', entity.metadata.get('wikipedia_url', '-'))) -# [END def_entities_file] +# [END language_entities_gcs] -# [START def_syntax_text] +# [START language_syntax_text] def syntax_text(text): """Detects syntax in the text.""" client = language.LanguageServiceClient() @@ -149,7 +149,7 @@ def syntax_text(text): text = text.decode('utf-8') # Instantiates a plain text document. - # [START migration_analyze_syntax] + # [START language_python_migration_syntax_text] document = types.Document( content=text, type=enums.Document.Type.PLAIN_TEXT) @@ -165,11 +165,11 @@ def syntax_text(text): for token in tokens: print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag], token.text.content)) - # [END migration_analyze_syntax] -# [END def_syntax_text] + # [END language_python_migration_syntax_text] +# [END language_syntax_text] -# [START def_syntax_file] +# [START language_syntax_gcs] def syntax_file(gcs_uri): """Detects syntax in the file located in Google Cloud Storage.""" client = language.LanguageServiceClient() @@ -190,10 +190,10 @@ def syntax_file(gcs_uri): for token in tokens: print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag], token.text.content)) -# [END def_syntax_file] +# [END language_syntax_gcs] -# [START def_entity_sentiment_text] +# [START language_entity_sentiment_text] def entity_sentiment_text(text): """Detects entity sentiment in the provided text.""" client = language.LanguageServiceClient() @@ -223,9 +223,10 @@ def entity_sentiment_text(text): print(u' Type : {}'.format(mention.type)) print(u'Salience: {}'.format(entity.salience)) print(u'Sentiment: {}\n'.format(entity.sentiment)) -# [END def_entity_sentiment_text] +# [END language_entity_sentiment_text] +# [START language_entity_sentiment_gcs] def entity_sentiment_file(gcs_uri): """Detects entity sentiment in a Google Cloud Storage file.""" client = language.LanguageServiceClient() @@ -251,9 +252,10 @@ def entity_sentiment_file(gcs_uri): print(u' Type : {}'.format(mention.type)) print(u'Salience: {}'.format(entity.salience)) print(u'Sentiment: {}\n'.format(entity.sentiment)) +# [END language_entity_sentiment_gcs] -# [START def_classify_text] +# [START language_classify_text] def classify_text(text): """Classifies content categories of the provided text.""" client = language.LanguageServiceClient() @@ -271,10 +273,10 @@ def classify_text(text): print(u'=' * 20) print(u'{:<16}: {}'.format('name', category.name)) print(u'{:<16}: {}'.format('confidence', category.confidence)) -# [END def_classify_text] +# [END language_classify_text] -# [START def_classify_file] +# [START language_classify_gcs] def classify_file(gcs_uri): """Classifies content categories of the text in a Google Cloud Storage file. @@ -291,7 +293,7 @@ def classify_file(gcs_uri): print(u'=' * 20) print(u'{:<16}: {}'.format('name', category.name)) print(u'{:<16}: {}'.format('confidence', category.confidence)) -# [END def_classify_file] +# [END language_classify_gcs] if __name__ == '__main__': diff --git a/samples/snippets/sentiment/sentiment_analysis.py b/samples/snippets/sentiment/sentiment_analysis.py index 8ac8575b..3b572bc2 100644 --- a/samples/snippets/sentiment/sentiment_analysis.py +++ b/samples/snippets/sentiment/sentiment_analysis.py @@ -11,19 +11,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -# [START sentiment_tutorial] +# [START language_sentiment_tutorial] """Demonstrates how to make a simple call to the Natural Language API.""" -# [START sentiment_tutorial_import] +# [START language_sentiment_tutorial_imports] import argparse from google.cloud import language from google.cloud.language import enums from google.cloud.language import types -# [END sentiment_tutorial_import] +# [END language_sentiment_tutorial_imports] -# [START def_print_result] +# [START language_sentiment_tutorial_print_result] def print_result(annotations): score = annotations.document_sentiment.score magnitude = annotations.document_sentiment.magnitude @@ -36,10 +36,10 @@ def print_result(annotations): print('Overall Sentiment: score of {} with magnitude of {}'.format( score, magnitude)) return 0 -# [END def_print_result] +# [END language_sentiment_tutorial_print_result] -# [START def_analyze] +# [START language_sentiment_tutorial_analyze_sentiment] def analyze(movie_review_filename): """Run a sentiment analysis request on text within a passed filename.""" client = language.LanguageServiceClient() @@ -55,9 +55,10 @@ def analyze(movie_review_filename): # Print the results print_result(annotations) -# [END def_analyze] +# [END language_sentiment_tutorial_analyze_sentiment] +# [START language_sentiment_tutorial_run_application] if __name__ == '__main__': parser = argparse.ArgumentParser( description=__doc__, @@ -68,4 +69,5 @@ def analyze(movie_review_filename): args = parser.parse_args() analyze(args.movie_review_filename) -# [END sentiment_tutorial] +# [END language_sentiment_tutorial_run_application] +# [END language_sentiment_tutorial] From 3c043533f3be16197fa5c26857b05cc6be8cee0c Mon Sep 17 00:00:00 2001 From: DPE bot Date: Tue, 28 Aug 2018 11:17:45 -0700 Subject: [PATCH 140/209] Auto-update dependencies. [(#1658)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1658) * Auto-update dependencies. * Rollback appengine/standard/bigquery/. * Rollback appengine/standard/iap/. * Rollback bigtable/metricscaler. * Rolledback appengine/flexible/datastore. * Rollback dataproc/ * Rollback jobs/api_client * Rollback vision/cloud-client. * Rollback functions/ocr/app. * Rollback iot/api-client/end_to_end_example. * Rollback storage/cloud-client. * Rollback kms/api-client. * Rollback dlp/ * Rollback bigquery/cloud-client. * Rollback iot/api-client/manager. * Rollback appengine/flexible/cloudsql_postgresql. --- samples/snippets/api/requirements.txt | 4 ++-- samples/snippets/automl/requirements.txt | 2 +- samples/snippets/classify_text/requirements.txt | 4 ++-- samples/snippets/cloud-client/v1/requirements.txt | 2 +- samples/snippets/cloud-client/v1beta2/requirements.txt | 2 +- samples/snippets/movie_nl/requirements.txt | 6 +++--- samples/snippets/ocr_nl/requirements.txt | 4 ++-- samples/snippets/sentiment/requirements.txt | 2 +- samples/snippets/syntax_triples/requirements.txt | 4 ++-- samples/snippets/tutorial/requirements.txt | 4 ++-- 10 files changed, 17 insertions(+), 17 deletions(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index e5f3a6c5..5e902918 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.6.6 -google-auth==1.4.1 +google-api-python-client==1.7.4 +google-auth==1.5.1 google-auth-httplib2==0.0.3 diff --git a/samples/snippets/automl/requirements.txt b/samples/snippets/automl/requirements.txt index d1bff72a..9b692618 100644 --- a/samples/snippets/automl/requirements.txt +++ b/samples/snippets/automl/requirements.txt @@ -1 +1 @@ -google-cloud-automl==0.1.0 +google-cloud-automl==0.1.1 diff --git a/samples/snippets/classify_text/requirements.txt b/samples/snippets/classify_text/requirements.txt index 5b7339a0..d045e22d 100644 --- a/samples/snippets/classify_text/requirements.txt +++ b/samples/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-language==1.0.1 -numpy==1.14.2 +google-cloud-language==1.0.2 +numpy==1.15.1 diff --git a/samples/snippets/cloud-client/v1/requirements.txt b/samples/snippets/cloud-client/v1/requirements.txt index 5085e2cd..2cbc37eb 100644 --- a/samples/snippets/cloud-client/v1/requirements.txt +++ b/samples/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.0.1 +google-cloud-language==1.0.2 diff --git a/samples/snippets/cloud-client/v1beta2/requirements.txt b/samples/snippets/cloud-client/v1beta2/requirements.txt index 5085e2cd..2cbc37eb 100644 --- a/samples/snippets/cloud-client/v1beta2/requirements.txt +++ b/samples/snippets/cloud-client/v1beta2/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.0.1 +google-cloud-language==1.0.2 diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt index cbe4d142..9718b185 100644 --- a/samples/snippets/movie_nl/requirements.txt +++ b/samples/snippets/movie_nl/requirements.txt @@ -1,4 +1,4 @@ -google-api-python-client==1.6.6 -google-auth==1.4.1 +google-api-python-client==1.7.4 +google-auth==1.5.1 google-auth-httplib2==0.0.3 -requests==2.18.4 +requests==2.19.1 diff --git a/samples/snippets/ocr_nl/requirements.txt b/samples/snippets/ocr_nl/requirements.txt index e5f3a6c5..5e902918 100644 --- a/samples/snippets/ocr_nl/requirements.txt +++ b/samples/snippets/ocr_nl/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.6.6 -google-auth==1.4.1 +google-api-python-client==1.7.4 +google-auth==1.5.1 google-auth-httplib2==0.0.3 diff --git a/samples/snippets/sentiment/requirements.txt b/samples/snippets/sentiment/requirements.txt index 5085e2cd..2cbc37eb 100644 --- a/samples/snippets/sentiment/requirements.txt +++ b/samples/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.0.1 +google-cloud-language==1.0.2 diff --git a/samples/snippets/syntax_triples/requirements.txt b/samples/snippets/syntax_triples/requirements.txt index e5f3a6c5..5e902918 100644 --- a/samples/snippets/syntax_triples/requirements.txt +++ b/samples/snippets/syntax_triples/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.6.6 -google-auth==1.4.1 +google-api-python-client==1.7.4 +google-auth==1.5.1 google-auth-httplib2==0.0.3 diff --git a/samples/snippets/tutorial/requirements.txt b/samples/snippets/tutorial/requirements.txt index e5f3a6c5..5e902918 100644 --- a/samples/snippets/tutorial/requirements.txt +++ b/samples/snippets/tutorial/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.6.6 -google-auth==1.4.1 +google-api-python-client==1.7.4 +google-auth==1.5.1 google-auth-httplib2==0.0.3 From 9cee1be134e63148383ea24636f2acce365e5a86 Mon Sep 17 00:00:00 2001 From: Alix Hamilton Date: Wed, 29 Aug 2018 12:37:06 -0700 Subject: [PATCH 141/209] Update AutoML region tags to use standard product prefixes [(#1669)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1669) --- .../automl/automl_natural_language_dataset.py | 24 +++++++------- .../automl/automl_natural_language_model.py | 32 +++++++++---------- .../automl/automl_natural_language_predict.py | 4 +-- 3 files changed, 30 insertions(+), 30 deletions(-) diff --git a/samples/snippets/automl/automl_natural_language_dataset.py b/samples/snippets/automl/automl_natural_language_dataset.py index 7793d4a6..df77d542 100755 --- a/samples/snippets/automl/automl_natural_language_dataset.py +++ b/samples/snippets/automl/automl_natural_language_dataset.py @@ -27,7 +27,7 @@ def create_dataset(project_id, compute_region, dataset_name, multilabel=False): """Create a dataset.""" - # [START automl_natural_language_create_dataset] + # [START automl_language_create_dataset] # TODO(developer): Uncomment and set the following variables # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' @@ -69,12 +69,12 @@ def create_dataset(project_id, compute_region, dataset_name, multilabel=False): print("\tseconds: {}".format(dataset.create_time.seconds)) print("\tnanos: {}".format(dataset.create_time.nanos)) - # [END automl_natural_language_create_dataset] + # [END automl_language_create_dataset] def list_datasets(project_id, compute_region, filter_): """List all datasets.""" - # [START automl_natural_language_list_datasets] + # [START automl_language_list_datasets] # TODO(developer): Uncomment and set the following variables # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' @@ -103,12 +103,12 @@ def list_datasets(project_id, compute_region, filter_): print("\tseconds: {}".format(dataset.create_time.seconds)) print("\tnanos: {}".format(dataset.create_time.nanos)) - # [END automl_natural_language_list_datasets] + # [END automl_language_list_datasets] def get_dataset(project_id, compute_region, dataset_id): """Get the dataset.""" - # [START automl_natural_language_get_dataset] + # [START automl_language_get_dataset] # TODO(developer): Uncomment and set the following variables # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' @@ -137,12 +137,12 @@ def get_dataset(project_id, compute_region, dataset_id): print("\tseconds: {}".format(dataset.create_time.seconds)) print("\tnanos: {}".format(dataset.create_time.nanos)) - # [END automl_natural_language_get_dataset] + # [END automl_language_get_dataset] def import_data(project_id, compute_region, dataset_id, path): """Import labelled items.""" - # [START automl_natural_language_import_data] + # [START automl_language_import_data] # TODO(developer): Uncomment and set the following variables # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' @@ -169,12 +169,12 @@ def import_data(project_id, compute_region, dataset_id, path): # synchronous check of operation status. print("Data imported. {}".format(response.result())) - # [END automl_natural_language_import_data] + # [END automl_language_import_data] def export_data(project_id, compute_region, dataset_id, output_uri): """Export a dataset to a Google Cloud Storage bucket.""" - # [START automl_natural_language_export_data] + # [START automl_language_export_data] # TODO(developer): Uncomment and set the following variables # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' @@ -200,12 +200,12 @@ def export_data(project_id, compute_region, dataset_id, output_uri): # synchronous check of operation status. print("Data exported. {}".format(response.result())) - # [END automl_natural_language_export_data] + # [END automl_language_export_data] def delete_dataset(project_id, compute_region, dataset_id): """Delete a dataset.""" - # [START automl_natural_language_delete_dataset] + # [START automl_language_delete_dataset] # TODO(developer): Uncomment and set the following variables # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' @@ -226,7 +226,7 @@ def delete_dataset(project_id, compute_region, dataset_id): # synchronous check of operation status. print("Dataset deleted. {}".format(response.result())) - # [END automl_natural_language_delete_dataset] + # [END automl_language_delete_dataset] if __name__ == "__main__": diff --git a/samples/snippets/automl/automl_natural_language_model.py b/samples/snippets/automl/automl_natural_language_model.py index 84c0d99e..35472121 100755 --- a/samples/snippets/automl/automl_natural_language_model.py +++ b/samples/snippets/automl/automl_natural_language_model.py @@ -27,7 +27,7 @@ def create_model(project_id, compute_region, dataset_id, model_name): """Create a model.""" - # [START automl_natural_language_create_model] + # [START automl_language_create_model] # TODO(developer): Uncomment and set the following variables # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' @@ -53,12 +53,12 @@ def create_model(project_id, compute_region, dataset_id, model_name): print("Training operation name: {}".format(response.operation.name)) print("Training started...") - # [END automl_natural_language_create_model] + # [END automl_language_create_model] def get_operation_status(operation_full_id): """Get operation status.""" - # [START automl_natural_language_get_operation_status] + # [START automl_language_get_operation_status] # TODO(developer): Uncomment and set the following variables # operation_full_id = # 'projects//locations//operations/' @@ -74,12 +74,12 @@ def get_operation_status(operation_full_id): print("Operation status: {}".format(response)) - # [END automl_natural_language_get_operation_status] + # [END automl_language_get_operation_status] def list_models(project_id, compute_region, filter_): """List all models.""" - # [START automl_natural_language_list_models] + # [START automl_language_list_models] # TODO(developer): Uncomment and set the following variables # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' @@ -114,12 +114,12 @@ def list_models(project_id, compute_region, filter_): print("\tnanos: {}".format(model.create_time.nanos)) print("Model deployment state: {}".format(deployment_state)) - # [END automl_natural_language_list_models] + # [END automl_language_list_models] def get_model(project_id, compute_region, model_id): """Get model details.""" - # [START automl_natural_language_get_model] + # [START automl_language_get_model] # TODO(developer): Uncomment and set the following variables # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' @@ -152,12 +152,12 @@ def get_model(project_id, compute_region, model_id): print("\tnanos: {}".format(model.create_time.nanos)) print("Model deployment state: {}".format(deployment_state)) - # [END automl_natural_language_get_model] + # [END automl_language_get_model] def list_model_evaluations(project_id, compute_region, model_id, filter_): """List model evaluations.""" - # [START automl_natural_language_list_model_evaluations] + # [START automl_language_list_model_evaluations] # TODO(developer): Uncomment and set the following variables # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' @@ -178,14 +178,14 @@ def list_model_evaluations(project_id, compute_region, model_id, filter_): for element in response: print(element) - # [END automl_natural_language_list_model_evaluations] + # [END automl_language_list_model_evaluations] def get_model_evaluation( project_id, compute_region, model_id, model_evaluation_id ): """Get model evaluation.""" - # [START automl_natural_language_get_model_evaluation] + # [START automl_language_get_model_evaluation] # TODO(developer): Uncomment and set the following variables # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' @@ -206,12 +206,12 @@ def get_model_evaluation( print(response) - # [END automl_natural_language_get_model_evaluation] + # [END automl_language_get_model_evaluation] def display_evaluation(project_id, compute_region, model_id, filter_): """Display evaluation.""" - # [START automl_natural_language_display_evaluation] + # [START automl_language_display_evaluation] # TODO(developer): Uncomment and set the following variables # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' @@ -281,12 +281,12 @@ def display_evaluation(project_id, compute_region, model_id, filter_): ) ) - # [END automl_natural_language_display_evaluation] + # [END automl_language_display_evaluation] def delete_model(project_id, compute_region, model_id): """Delete a model.""" - # [START automl_natural_language_delete_model] + # [START automl_language_delete_model] # TODO(developer): Uncomment and set the following variables # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' @@ -305,7 +305,7 @@ def delete_model(project_id, compute_region, model_id): # synchronous check of operation status. print("Model deleted. {}".format(response.result())) - # [END automl_natural_language_delete_model] + # [END automl_language_delete_model] if __name__ == "__main__": diff --git a/samples/snippets/automl/automl_natural_language_predict.py b/samples/snippets/automl/automl_natural_language_predict.py index 0c25e373..b328c7ae 100755 --- a/samples/snippets/automl/automl_natural_language_predict.py +++ b/samples/snippets/automl/automl_natural_language_predict.py @@ -27,7 +27,7 @@ def predict(project_id, compute_region, model_id, file_path): """Classify the content.""" - # [START automl_natural_language_predict] + # [START automl_language_predict] # TODO(developer): Uncomment and set the following variables # project_id = 'PROJECT_ID_HERE' # compute_region = 'COMPUTE_REGION_HERE' @@ -62,7 +62,7 @@ def predict(project_id, compute_region, model_id, file_path): print("Predicted class name: {}".format(result.display_name)) print("Predicted class score: {}".format(result.classification.score)) - # [END automl_natural_language_predict] + # [END automl_language_predict] if __name__ == "__main__": From ed7a0a6e13b1ab7c3f77654b979ed81cca5658e5 Mon Sep 17 00:00:00 2001 From: Rebecca Taylor Date: Thu, 30 Aug 2018 16:01:18 -0700 Subject: [PATCH 142/209] Add small, generated version of `language_sentiment_text` [(#1660)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1660) * Generated sample: language_sentiment_text FYI generated from the following YAML GAPIC config: sample_value_sets: - id: analyze_sentiment title: "Analyzing Sentiment" description: "Proof of concept for analyzing sentiment" parameters: defaults: - document.type=PLAIN_TEXT - document.content="Your text to analyze, e.g. Hello, world!" attributes: - parameter: document.content sample_argument: true on_success: - define: sentiment=$resp.document_sentiment - print: - "Score: %s" - sentiment.score - print: - "Magnitude: %s" - sentiment.magnitude samples: standalone: - calling_forms: ".*" value_sets: analyze_sentiment region_tag: language_sentiment_text * Add requirements.txt (not currently generated) * Add test for language_sentiment_text (not currently generated) * Move language_python_migration_document_text Move language_python_migration_document_text so it uses a different snippet in preparation for deprecation of existing language_sentiment_text sample * Rename generated snippets so filename == region tag * Fix test for generated code sample (file rename to match region tag) * Update Copyright year to 2018 in new hand-written file * Fix lint errors of #language_sentiment_text test * Regenerate #language_sentiment_text to fix lint errors (updated Python sample template) * Binary string support in samples! From PR https://github.com/googleapis/gapic-generator/pull/2272 --- samples/snippets/cloud-client/v1/snippets.py | 4 +- .../v1/language_sentiment_text.py | 61 +++++++++++++++++++ .../v1/language_sentiment_text_test.py | 28 +++++++++ .../generated-samples/v1/requirements.txt | 1 + 4 files changed, 92 insertions(+), 2 deletions(-) create mode 100644 samples/snippets/generated-samples/v1/language_sentiment_text.py create mode 100644 samples/snippets/generated-samples/v1/language_sentiment_text_test.py create mode 100644 samples/snippets/generated-samples/v1/requirements.txt diff --git a/samples/snippets/cloud-client/v1/snippets.py b/samples/snippets/cloud-client/v1/snippets.py index 3b1c02f9..a41c7cb3 100644 --- a/samples/snippets/cloud-client/v1/snippets.py +++ b/samples/snippets/cloud-client/v1/snippets.py @@ -39,12 +39,10 @@ def sentiment_text(text): text = text.decode('utf-8') # Instantiates a plain text document. - # [START language_python_migration_document_text] # [START language_python_migration_sentiment_text] document = types.Document( content=text, type=enums.Document.Type.PLAIN_TEXT) - # [END language_python_migration_document_text] # Detects sentiment in the document. You can also analyze HTML with: # document.type == enums.Document.Type.HTML @@ -87,9 +85,11 @@ def entities_text(text): # Instantiates a plain text document. # [START language_python_migration_entities_text] + # [START language_python_migration_document_text] document = types.Document( content=text, type=enums.Document.Type.PLAIN_TEXT) + # [END language_python_migration_document_text] # Detects entities in the document. You can also analyze HTML with: # document.type == enums.Document.Type.HTML diff --git a/samples/snippets/generated-samples/v1/language_sentiment_text.py b/samples/snippets/generated-samples/v1/language_sentiment_text.py new file mode 100644 index 00000000..d99f5d09 --- /dev/null +++ b/samples/snippets/generated-samples/v1/language_sentiment_text.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DO NOT EDIT! This is a generated sample ("Request", "analyze_sentiment") + +# To install the latest published package dependency, execute the following: +# pip install google-cloud-language + +import sys + +# [START language_sentiment_text] + +from google.cloud import language_v1 +from google.cloud.language_v1 import enums +import six + + +def sample_analyze_sentiment(content): + # [START language_sentiment_text_core] + + client = language_v1.LanguageServiceClient() + + # content = 'Your text to analyze, e.g. Hello, world!' + + if isinstance(content, six.binary_type): + content = content.decode('utf-8') + + type_ = enums.Document.Type.PLAIN_TEXT + document = {'type': type_, 'content': content} + + response = client.analyze_sentiment(document) + sentiment = response.document_sentiment + print('Score: {}'.format(sentiment.score)) + print('Magnitude: {}'.format(sentiment.magnitude)) + + # [END language_sentiment_text_core] + + +# [END language_sentiment_text] + + +def main(): + # FIXME: Convert argv from strings to the correct types. + sample_analyze_sentiment(*sys.argv[1:]) + + +if __name__ == '__main__': + main() diff --git a/samples/snippets/generated-samples/v1/language_sentiment_text_test.py b/samples/snippets/generated-samples/v1/language_sentiment_text_test.py new file mode 100644 index 00000000..e1876da2 --- /dev/null +++ b/samples/snippets/generated-samples/v1/language_sentiment_text_test.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 Google, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import language_sentiment_text + + +def test_analyze_sentiment_text_positive(capsys): + language_sentiment_text.sample_analyze_sentiment('Happy Happy Joy Joy') + out, _ = capsys.readouterr() + assert 'Score: 0.' in out + + +def test_analyze_sentiment_text_negative(capsys): + language_sentiment_text.sample_analyze_sentiment('Angry Angry Sad Sad') + out, _ = capsys.readouterr() + assert 'Score: -0.' in out diff --git a/samples/snippets/generated-samples/v1/requirements.txt b/samples/snippets/generated-samples/v1/requirements.txt new file mode 100644 index 00000000..2cbc37eb --- /dev/null +++ b/samples/snippets/generated-samples/v1/requirements.txt @@ -0,0 +1 @@ +google-cloud-language==1.0.2 From 92e409d00f88a07c85694c41e8d73e507f6abc7c Mon Sep 17 00:00:00 2001 From: Rebecca Taylor Date: Mon, 15 Oct 2018 13:53:04 -0700 Subject: [PATCH 143/209] Access Display Names of enum fields via enum object [(#1738)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1738) * Get display name of enums using IntEnum Requires updating google-cloud-language to 1.1.0 * Add note about gs://demomaker for video test files * Get display name of enums using IntEnum * Get display name of enums using IntEnum * Revert "Add note about gs://demomaker for video test files" This reverts commit 39d9bfff03201f7c6dcb38fee3856dd537ab4b62. --- .../snippets/cloud-client/v1/requirements.txt | 2 +- samples/snippets/cloud-client/v1/snippets.py | 28 ++++++------------- 2 files changed, 9 insertions(+), 21 deletions(-) diff --git a/samples/snippets/cloud-client/v1/requirements.txt b/samples/snippets/cloud-client/v1/requirements.txt index 2cbc37eb..7029093e 100644 --- a/samples/snippets/cloud-client/v1/requirements.txt +++ b/samples/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.0.2 +google-cloud-language==1.1.0 diff --git a/samples/snippets/cloud-client/v1/snippets.py b/samples/snippets/cloud-client/v1/snippets.py index a41c7cb3..826c28c5 100644 --- a/samples/snippets/cloud-client/v1/snippets.py +++ b/samples/snippets/cloud-client/v1/snippets.py @@ -95,14 +95,11 @@ def entities_text(text): # document.type == enums.Document.Type.HTML entities = client.analyze_entities(document).entities - # entity types from enums.Entity.Type - entity_type = ('UNKNOWN', 'PERSON', 'LOCATION', 'ORGANIZATION', - 'EVENT', 'WORK_OF_ART', 'CONSUMER_GOOD', 'OTHER') - for entity in entities: + entity_type = enums.Entity.Type(entity.type) print('=' * 20) print(u'{:<16}: {}'.format('name', entity.name)) - print(u'{:<16}: {}'.format('type', entity_type[entity.type])) + print(u'{:<16}: {}'.format('type', entity_type.name)) print(u'{:<16}: {}'.format('metadata', entity.metadata)) print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', @@ -125,14 +122,11 @@ def entities_file(gcs_uri): # document.type == enums.Document.Type.HTML entities = client.analyze_entities(document).entities - # entity types from enums.Entity.Type - entity_type = ('UNKNOWN', 'PERSON', 'LOCATION', 'ORGANIZATION', - 'EVENT', 'WORK_OF_ART', 'CONSUMER_GOOD', 'OTHER') - for entity in entities: + entity_type = enums.Entity.Type(entity.type) print('=' * 20) print(u'{:<16}: {}'.format('name', entity.name)) - print(u'{:<16}: {}'.format('type', entity_type[entity.type])) + print(u'{:<16}: {}'.format('type', entity_type.name)) print(u'{:<16}: {}'.format('metadata', entity.metadata)) print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', @@ -158,12 +152,9 @@ def syntax_text(text): # document.type == enums.Document.Type.HTML tokens = client.analyze_syntax(document).tokens - # part-of-speech tags from enums.PartOfSpeech.Tag - pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN', 'NUM', - 'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX') - for token in tokens: - print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag], + part_of_speech_tag = enums.PartOfSpeech.Tag(token.part_of_speech.tag) + print(u'{}: {}'.format(part_of_speech_tag.name, token.text.content)) # [END language_python_migration_syntax_text] # [END language_syntax_text] @@ -183,12 +174,9 @@ def syntax_file(gcs_uri): # document.type == enums.Document.Type.HTML tokens = client.analyze_syntax(document).tokens - # part-of-speech tags from enums.PartOfSpeech.Tag - pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN', 'NUM', - 'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX') - for token in tokens: - print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag], + part_of_speech_tag = enums.PartOfSpeech.Tag(token.part_of_speech.tag) + print(u'{}: {}'.format(part_of_speech_tag.name, token.text.content)) # [END language_syntax_gcs] From 9b3ecba39f5b9ca7e4137c0f27cb505d45ecaf66 Mon Sep 17 00:00:00 2001 From: Alix Hamilton Date: Tue, 16 Oct 2018 15:50:57 -0700 Subject: [PATCH 144/209] Remove unused and outdated Natural Language samples [(#1715)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1715) * remove unused beta entity sentiment samples * remove unused beta samples * remove v1beta2 directory * remove outdated unused tutorial * removes remaining googleapiclient.discovery tutorials --- samples/snippets/README.md | 11 - .../snippets/cloud-client/v1beta2/README.rst | 151 -------- .../cloud-client/v1beta2/README.rst.in | 32 -- .../cloud-client/v1beta2/quickstart.py | 43 --- .../cloud-client/v1beta2/quickstart_test.py | 22 -- .../cloud-client/v1beta2/requirements.txt | 1 - .../v1beta2/resources/android_text.txt | 1 - .../cloud-client/v1beta2/resources/text.txt | 1 - .../snippets/cloud-client/v1beta2/snippets.py | 346 ----------------- .../cloud-client/v1beta2/snippets_test.py | 106 ------ samples/snippets/movie_nl/README.md | 157 -------- samples/snippets/movie_nl/main.py | 334 ----------------- samples/snippets/movie_nl/main_test.py | 130 ------- samples/snippets/movie_nl/requirements.txt | 4 - samples/snippets/ocr_nl/README.md | 232 ------------ samples/snippets/ocr_nl/main.py | 354 ------------------ samples/snippets/ocr_nl/main_test.py | 100 ----- samples/snippets/ocr_nl/requirements.txt | 3 - samples/snippets/syntax_triples/README.md | 96 ----- samples/snippets/syntax_triples/main.py | 172 --------- samples/snippets/syntax_triples/main_test.py | 53 --- .../snippets/syntax_triples/requirements.txt | 3 - .../resources/obama_wikipedia.txt | 1 - samples/snippets/tutorial/README.rst | 93 ----- samples/snippets/tutorial/README.rst.in | 22 -- samples/snippets/tutorial/requirements.txt | 3 - .../tutorial/reviews/bladerunner-mixed.txt | 19 - .../tutorial/reviews/bladerunner-neg.txt | 3 - .../tutorial/reviews/bladerunner-neutral.txt | 2 - .../tutorial/reviews/bladerunner-pos.txt | 10 - samples/snippets/tutorial/tutorial.py | 69 ---- samples/snippets/tutorial/tutorial_test.py | 51 --- 32 files changed, 2625 deletions(-) delete mode 100644 samples/snippets/cloud-client/v1beta2/README.rst delete mode 100644 samples/snippets/cloud-client/v1beta2/README.rst.in delete mode 100644 samples/snippets/cloud-client/v1beta2/quickstart.py delete mode 100644 samples/snippets/cloud-client/v1beta2/quickstart_test.py delete mode 100644 samples/snippets/cloud-client/v1beta2/requirements.txt delete mode 100644 samples/snippets/cloud-client/v1beta2/resources/android_text.txt delete mode 100644 samples/snippets/cloud-client/v1beta2/resources/text.txt delete mode 100644 samples/snippets/cloud-client/v1beta2/snippets.py delete mode 100644 samples/snippets/cloud-client/v1beta2/snippets_test.py delete mode 100644 samples/snippets/movie_nl/README.md delete mode 100644 samples/snippets/movie_nl/main.py delete mode 100644 samples/snippets/movie_nl/main_test.py delete mode 100644 samples/snippets/movie_nl/requirements.txt delete mode 100644 samples/snippets/ocr_nl/README.md delete mode 100755 samples/snippets/ocr_nl/main.py delete mode 100755 samples/snippets/ocr_nl/main_test.py delete mode 100644 samples/snippets/ocr_nl/requirements.txt delete mode 100644 samples/snippets/syntax_triples/README.md delete mode 100644 samples/snippets/syntax_triples/main.py delete mode 100755 samples/snippets/syntax_triples/main_test.py delete mode 100644 samples/snippets/syntax_triples/requirements.txt delete mode 100644 samples/snippets/syntax_triples/resources/obama_wikipedia.txt delete mode 100644 samples/snippets/tutorial/README.rst delete mode 100644 samples/snippets/tutorial/README.rst.in delete mode 100644 samples/snippets/tutorial/requirements.txt delete mode 100644 samples/snippets/tutorial/reviews/bladerunner-mixed.txt delete mode 100644 samples/snippets/tutorial/reviews/bladerunner-neg.txt delete mode 100644 samples/snippets/tutorial/reviews/bladerunner-neutral.txt delete mode 100644 samples/snippets/tutorial/reviews/bladerunner-pos.txt delete mode 100644 samples/snippets/tutorial/tutorial.py delete mode 100644 samples/snippets/tutorial/tutorial_test.py diff --git a/samples/snippets/README.md b/samples/snippets/README.md index d0ba5691..5689d7c2 100644 --- a/samples/snippets/README.md +++ b/samples/snippets/README.md @@ -10,17 +10,6 @@ This directory contains Python examples that use the - [api](api) has a simple command line tool that shows off the API's features. -- [movie_nl](movie_nl) combines sentiment and entity analysis to come up with -actors/directors who are the most and least popular in the imdb movie reviews. - -- [ocr_nl](ocr_nl) uses the [Cloud Vision API](https://cloud.google.com/vision/) -to extract text from images, then uses the NL API to extract entity information -from those texts, and stores the extracted information in a database in support -of further analysis and correlation. - - [sentiment](sentiment) contains the [Sentiment Analysis Tutorial](https://cloud.google.com/natural-language/docs/sentiment-tutorial) code as used within the documentation. - -- [syntax_triples](syntax_triples) uses syntax analysis to find -subject-verb-object triples in a given piece of text. diff --git a/samples/snippets/cloud-client/v1beta2/README.rst b/samples/snippets/cloud-client/v1beta2/README.rst deleted file mode 100644 index 03400319..00000000 --- a/samples/snippets/cloud-client/v1beta2/README.rst +++ /dev/null @@ -1,151 +0,0 @@ -.. This file is automatically generated. Do not edit this file directly. - -Google Cloud Natural Language API Python Samples -=============================================================================== - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1beta2/README.rst - - -This directory contains samples for Google Cloud Natural Language API. The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers, including sentiment analysis, entity recognition, and syntax analysis. This API is part of the larger Cloud Machine Learning API. - -- See the `migration guide`_ for information about migrating to Python client library v0.26.1. - -.. _migration guide: https://cloud.google.com/natural-language/docs/python-client-migration - - - - -.. _Google Cloud Natural Language API: https://cloud.google.com/natural-language/docs/ - -Setup -------------------------------------------------------------------------------- - - -Authentication -++++++++++++++ - -This sample requires you to have authentication setup. Refer to the -`Authentication Getting Started Guide`_ for instructions on setting up -credentials for applications. - -.. _Authentication Getting Started Guide: - https://cloud.google.com/docs/authentication/getting-started - -Install Dependencies -++++++++++++++++++++ - -#. Clone python-docs-samples and change directory to the sample directory you want to use. - - .. code-block:: bash - - $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git - -#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. - - .. _Python Development Environment Setup Guide: - https://cloud.google.com/python/setup - -#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. - - .. code-block:: bash - - $ virtualenv env - $ source env/bin/activate - -#. Install the dependencies needed to run the samples. - - .. code-block:: bash - - $ pip install -r requirements.txt - -.. _pip: https://pip.pypa.io/ -.. _virtualenv: https://virtualenv.pypa.io/ - -Samples -------------------------------------------------------------------------------- - -Quickstart -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1beta2/quickstart.py,language/cloud-client/v1beta2/README.rst - - - - -To run this sample: - -.. code-block:: bash - - $ python quickstart.py - - -Snippets -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1beta2/snippets.py,language/cloud-client/v1beta2/README.rst - - - - -To run this sample: - -.. code-block:: bash - - $ python snippets.py - - usage: snippets.py [-h] - {classify-text,classify-file,sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} - ... - - This application demonstrates how to perform basic operations with the - Google Cloud Natural Language API - - For more information, the documentation at - https://cloud.google.com/natural-language/docs. - - positional arguments: - {classify-text,classify-file,sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} - classify-text Classifies content categories of the provided text. - classify-file Classifies content categories of the text in a Google - Cloud Storage file. - sentiment-entities-text - Detects entity sentiment in the provided text. - sentiment-entities-file - Detects entity sentiment in a Google Cloud Storage - file. - sentiment-text Detects sentiment in the text. - sentiment-file Detects sentiment in the file located in Google Cloud - Storage. - entities-text Detects entities in the text. - entities-file Detects entities in the file located in Google Cloud - Storage. - syntax-text Detects syntax in the text. - syntax-file Detects syntax in the file located in Google Cloud - Storage. - - optional arguments: - -h, --help show this help message and exit - - - - - -The client library -------------------------------------------------------------------------------- - -This sample uses the `Google Cloud Client Library for Python`_. -You can read the documentation for more details on API usage and use GitHub -to `browse the source`_ and `report issues`_. - -.. _Google Cloud Client Library for Python: - https://googlecloudplatform.github.io/google-cloud-python/ -.. _browse the source: - https://github.com/GoogleCloudPlatform/google-cloud-python -.. _report issues: - https://github.com/GoogleCloudPlatform/google-cloud-python/issues - - -.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/samples/snippets/cloud-client/v1beta2/README.rst.in b/samples/snippets/cloud-client/v1beta2/README.rst.in deleted file mode 100644 index d1166745..00000000 --- a/samples/snippets/cloud-client/v1beta2/README.rst.in +++ /dev/null @@ -1,32 +0,0 @@ -# This file is used to generate README.rst - -product: - name: Google Cloud Natural Language API - short_name: Cloud Natural Language API - url: https://cloud.google.com/natural-language/docs/ - description: > - The `Google Cloud Natural Language API`_ provides natural language - understanding technologies to developers, including sentiment analysis, - entity recognition, and syntax analysis. This API is part of the larger - Cloud Machine Learning API. - - - - See the `migration guide`_ for information about migrating to Python client library v0.26.1. - - - .. _migration guide: https://cloud.google.com/natural-language/docs/python-client-migration - -setup: -- auth -- install_deps - -samples: -- name: Quickstart - file: quickstart.py -- name: Snippets - file: snippets.py - show_help: true - -cloud_client_library: true - -folder: language/cloud-client/v1beta2 \ No newline at end of file diff --git a/samples/snippets/cloud-client/v1beta2/quickstart.py b/samples/snippets/cloud-client/v1beta2/quickstart.py deleted file mode 100644 index b19d11b7..00000000 --- a/samples/snippets/cloud-client/v1beta2/quickstart.py +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2017 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -def run_quickstart(): - # [START language_quickstart] - # Imports the Google Cloud client library - from google.cloud import language_v1beta2 - from google.cloud.language_v1beta2 import enums - from google.cloud.language_v1beta2 import types - - # Instantiates a client with the v1beta2 version - client = language_v1beta2.LanguageServiceClient() - - # The text to analyze - text = u'Hallo Welt!' - document = types.Document( - content=text, - type=enums.Document.Type.PLAIN_TEXT, - language='de') - # Detects the sentiment of the text - sentiment = client.analyze_sentiment(document).document_sentiment - - print('Text: {}'.format(text)) - print('Sentiment: {}, {}'.format(sentiment.score, sentiment.magnitude)) - # [END language_quickstart] - - -if __name__ == '__main__': - run_quickstart() diff --git a/samples/snippets/cloud-client/v1beta2/quickstart_test.py b/samples/snippets/cloud-client/v1beta2/quickstart_test.py deleted file mode 100644 index 839faae2..00000000 --- a/samples/snippets/cloud-client/v1beta2/quickstart_test.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2017 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import quickstart - - -def test_quickstart(capsys): - quickstart.run_quickstart() - out, _ = capsys.readouterr() - assert 'Sentiment' in out diff --git a/samples/snippets/cloud-client/v1beta2/requirements.txt b/samples/snippets/cloud-client/v1beta2/requirements.txt deleted file mode 100644 index 2cbc37eb..00000000 --- a/samples/snippets/cloud-client/v1beta2/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -google-cloud-language==1.0.2 diff --git a/samples/snippets/cloud-client/v1beta2/resources/android_text.txt b/samples/snippets/cloud-client/v1beta2/resources/android_text.txt deleted file mode 100644 index c05c452d..00000000 --- a/samples/snippets/cloud-client/v1beta2/resources/android_text.txt +++ /dev/null @@ -1 +0,0 @@ -Android is a mobile operating system developed by Google, based on the Linux kernel and designed primarily for touchscreen mobile devices such as smartphones and tablets. diff --git a/samples/snippets/cloud-client/v1beta2/resources/text.txt b/samples/snippets/cloud-client/v1beta2/resources/text.txt deleted file mode 100644 index 97a1cea0..00000000 --- a/samples/snippets/cloud-client/v1beta2/resources/text.txt +++ /dev/null @@ -1 +0,0 @@ -President Obama is speaking at the White House. \ No newline at end of file diff --git a/samples/snippets/cloud-client/v1beta2/snippets.py b/samples/snippets/cloud-client/v1beta2/snippets.py deleted file mode 100644 index abf16ada..00000000 --- a/samples/snippets/cloud-client/v1beta2/snippets.py +++ /dev/null @@ -1,346 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2016 Google, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This application demonstrates how to perform basic operations with the -Google Cloud Natural Language API - -For more information, the documentation at -https://cloud.google.com/natural-language/docs. -""" - -import argparse -import sys - -# [START beta_import] -from google.cloud import language_v1beta2 -from google.cloud.language_v1beta2 import enums -from google.cloud.language_v1beta2 import types -# [END beta_import] -import six - - -def sentiment_text(text): - """Detects sentiment in the text.""" - client = language_v1beta2.LanguageServiceClient() - - if isinstance(text, six.binary_type): - text = text.decode('utf-8') - - # Instantiates a plain text document. - document = types.Document( - content=text, - type=enums.Document.Type.PLAIN_TEXT) - - # Detects sentiment in the document. You can also analyze HTML with: - # document.type == enums.Document.Type.HTML - sentiment = client.analyze_sentiment(document).document_sentiment - - print('Score: {}'.format(sentiment.score)) - print('Magnitude: {}'.format(sentiment.magnitude)) - - -def sentiment_file(gcs_uri): - """Detects sentiment in the file located in Google Cloud Storage.""" - client = language_v1beta2.LanguageServiceClient() - - # Instantiates a plain text document. - document = types.Document( - gcs_content_uri=gcs_uri, - type=enums.Document.Type.PLAIN_TEXT) - - # Detects sentiment in the document. You can also analyze HTML with: - # document.type == enums.Document.Type.HTML - sentiment = client.analyze_sentiment(document).document_sentiment - - print('Score: {}'.format(sentiment.score)) - print('Magnitude: {}'.format(sentiment.magnitude)) - - -def entities_text(text): - """Detects entities in the text.""" - client = language_v1beta2.LanguageServiceClient() - - if isinstance(text, six.binary_type): - text = text.decode('utf-8') - - # Instantiates a plain text document. - document = types.Document( - content=text, - type=enums.Document.Type.PLAIN_TEXT) - - # Detects entities in the document. You can also analyze HTML with: - # document.type == enums.Document.Type.HTML - entities = client.analyze_entities(document).entities - - # entity types from enums.Entity.Type - entity_type = ('UNKNOWN', 'PERSON', 'LOCATION', 'ORGANIZATION', - 'EVENT', 'WORK_OF_ART', 'CONSUMER_GOOD', 'OTHER') - - for entity in entities: - print('=' * 20) - print(u'{:<16}: {}'.format('name', entity.name)) - print(u'{:<16}: {}'.format('type', entity_type[entity.type])) - print(u'{:<16}: {}'.format('metadata', entity.metadata)) - print(u'{:<16}: {}'.format('salience', entity.salience)) - print(u'{:<16}: {}'.format('wikipedia_url', - entity.metadata.get('wikipedia_url', '-'))) - - -def entities_file(gcs_uri): - """Detects entities in the file located in Google Cloud Storage.""" - client = language_v1beta2.LanguageServiceClient() - - # Instantiates a plain text document. - document = types.Document( - gcs_content_uri=gcs_uri, - type=enums.Document.Type.PLAIN_TEXT) - - # Detects sentiment in the document. You can also analyze HTML with: - # document.type == enums.Document.Type.HTML - entities = client.analyze_entities(document).entities - - # entity types from enums.Entity.Type - entity_type = ('UNKNOWN', 'PERSON', 'LOCATION', 'ORGANIZATION', - 'EVENT', 'WORK_OF_ART', 'CONSUMER_GOOD', 'OTHER') - - for entity in entities: - print('=' * 20) - print(u'{:<16}: {}'.format('name', entity.name)) - print(u'{:<16}: {}'.format('type', entity_type[entity.type])) - print(u'{:<16}: {}'.format('metadata', entity.metadata)) - print(u'{:<16}: {}'.format('salience', entity.salience)) - print(u'{:<16}: {}'.format('wikipedia_url', - entity.metadata.get('wikipedia_url', '-'))) - - -# [START def_entity_sentiment_text] -def entity_sentiment_text(text): - """Detects entity sentiment in the provided text.""" - client = language_v1beta2.LanguageServiceClient() - - if isinstance(text, six.binary_type): - text = text.decode('utf-8') - - document = types.Document( - content=text.encode('utf-8'), - type=enums.Document.Type.PLAIN_TEXT) - - # Detect and send native Python encoding to receive correct word offsets. - encoding = enums.EncodingType.UTF32 - if sys.maxunicode == 65535: - encoding = enums.EncodingType.UTF16 - - result = client.analyze_entity_sentiment(document, encoding) - - for entity in result.entities: - print('Mentions: ') - print(u'Name: "{}"'.format(entity.name)) - for mention in entity.mentions: - print(u' Begin Offset : {}'.format(mention.text.begin_offset)) - print(u' Content : {}'.format(mention.text.content)) - print(u' Magnitude : {}'.format(mention.sentiment.magnitude)) - print(u' Sentiment : {}'.format(mention.sentiment.score)) - print(u' Type : {}'.format(mention.type)) - print(u'Salience: {}'.format(entity.salience)) - print(u'Sentiment: {}\n'.format(entity.sentiment)) -# [END def_entity_sentiment_text] - - -def entity_sentiment_file(gcs_uri): - """Detects entity sentiment in a Google Cloud Storage file.""" - client = language_v1beta2.LanguageServiceClient() - - document = types.Document( - gcs_content_uri=gcs_uri, - type=enums.Document.Type.PLAIN_TEXT) - - # Detect and send native Python encoding to receive correct word offsets. - encoding = enums.EncodingType.UTF32 - if sys.maxunicode == 65535: - encoding = enums.EncodingType.UTF16 - - result = client.analyze_entity_sentiment(document, encoding) - - for entity in result.entities: - print(u'Name: "{}"'.format(entity.name)) - for mention in entity.mentions: - print(u' Begin Offset : {}'.format(mention.text.begin_offset)) - print(u' Content : {}'.format(mention.text.content)) - print(u' Magnitude : {}'.format(mention.sentiment.magnitude)) - print(u' Sentiment : {}'.format(mention.sentiment.score)) - print(u' Type : {}'.format(mention.type)) - print(u'Salience: {}'.format(entity.salience)) - print(u'Sentiment: {}\n'.format(entity.sentiment)) - - -def syntax_text(text): - """Detects syntax in the text.""" - client = language_v1beta2.LanguageServiceClient() - - if isinstance(text, six.binary_type): - text = text.decode('utf-8') - - # Instantiates a plain text document. - document = types.Document( - content=text, - type=enums.Document.Type.PLAIN_TEXT) - - # Detects syntax in the document. You can also analyze HTML with: - # document.type == enums.Document.Type.HTML - tokens = client.analyze_syntax(document).tokens - - # part-of-speech tags from enums.PartOfSpeech.Tag - pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN', 'NUM', - 'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX') - - for token in tokens: - print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag], - token.text.content)) - - -def syntax_file(gcs_uri): - """Detects syntax in the file located in Google Cloud Storage.""" - client = language_v1beta2.LanguageServiceClient() - - # Instantiates a plain text document. - document = types.Document( - gcs_content_uri=gcs_uri, - type=enums.Document.Type.PLAIN_TEXT) - - # Detects syntax in the document. You can also analyze HTML with: - # document.type == enums.Document.Type.HTML - tokens = client.analyze_syntax(document).tokens - - # part-of-speech tags from enums.PartOfSpeech.Tag - pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN', 'NUM', - 'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX') - - for token in tokens: - print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag], - token.text.content)) - - -# [START def_classify_text] -def classify_text(text): - """Classifies content categories of the provided text.""" - # [START beta_client] - client = language_v1beta2.LanguageServiceClient() - # [END beta_client] - - if isinstance(text, six.binary_type): - text = text.decode('utf-8') - - document = types.Document( - content=text.encode('utf-8'), - type=enums.Document.Type.PLAIN_TEXT) - - categories = client.classify_text(document).categories - - for category in categories: - print(u'=' * 20) - print(u'{:<16}: {}'.format('name', category.name)) - print(u'{:<16}: {}'.format('confidence', category.confidence)) -# [END def_classify_text] - - -# [START def_classify_file] -def classify_file(gcs_uri): - """Classifies content categories of the text in a Google Cloud Storage - file. - """ - client = language_v1beta2.LanguageServiceClient() - - document = types.Document( - gcs_content_uri=gcs_uri, - type=enums.Document.Type.PLAIN_TEXT) - - categories = client.classify_text(document).categories - - for category in categories: - print(u'=' * 20) - print(u'{:<16}: {}'.format('name', category.name)) - print(u'{:<16}: {}'.format('confidence', category.confidence)) -# [END def_classify_file] - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - subparsers = parser.add_subparsers(dest='command') - - classify_text_parser = subparsers.add_parser( - 'classify-text', help=classify_text.__doc__) - classify_text_parser.add_argument('text') - - classify_text_parser = subparsers.add_parser( - 'classify-file', help=classify_file.__doc__) - classify_text_parser.add_argument('gcs_uri') - - sentiment_entities_text_parser = subparsers.add_parser( - 'sentiment-entities-text', help=entity_sentiment_text.__doc__) - sentiment_entities_text_parser.add_argument('text') - - sentiment_entities_file_parser = subparsers.add_parser( - 'sentiment-entities-file', help=entity_sentiment_file.__doc__) - sentiment_entities_file_parser.add_argument('gcs_uri') - - sentiment_text_parser = subparsers.add_parser( - 'sentiment-text', help=sentiment_text.__doc__) - sentiment_text_parser.add_argument('text') - - sentiment_file_parser = subparsers.add_parser( - 'sentiment-file', help=sentiment_file.__doc__) - sentiment_file_parser.add_argument('gcs_uri') - - entities_text_parser = subparsers.add_parser( - 'entities-text', help=entities_text.__doc__) - entities_text_parser.add_argument('text') - - entities_file_parser = subparsers.add_parser( - 'entities-file', help=entities_file.__doc__) - entities_file_parser.add_argument('gcs_uri') - - syntax_text_parser = subparsers.add_parser( - 'syntax-text', help=syntax_text.__doc__) - syntax_text_parser.add_argument('text') - - syntax_file_parser = subparsers.add_parser( - 'syntax-file', help=syntax_file.__doc__) - syntax_file_parser.add_argument('gcs_uri') - - args = parser.parse_args() - - if args.command == 'sentiment-text': - sentiment_text(args.text) - elif args.command == 'sentiment-file': - sentiment_file(args.gcs_uri) - elif args.command == 'entities-text': - entities_text(args.text) - elif args.command == 'entities-file': - entities_file(args.gcs_uri) - elif args.command == 'syntax-text': - syntax_text(args.text) - elif args.command == 'syntax-file': - syntax_file(args.gcs_uri) - elif args.command == 'sentiment-entities-text': - entity_sentiment_text(args.text) - elif args.command == 'sentiment-entities-file': - entity_sentiment_file(args.gcs_uri) - elif args.command == 'classify-text': - classify_text(args.text) - elif args.command == 'classify-file': - classify_file(args.gcs_uri) diff --git a/samples/snippets/cloud-client/v1beta2/snippets_test.py b/samples/snippets/cloud-client/v1beta2/snippets_test.py deleted file mode 100644 index 5924ffb4..00000000 --- a/samples/snippets/cloud-client/v1beta2/snippets_test.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2017 Google, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -import snippets - -BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] -TEST_FILE_URL = 'gs://{}/text.txt'.format(BUCKET) -LONG_TEST_FILE_URL = 'gs://{}/android_text.txt'.format(BUCKET) - - -def test_sentiment_text(capsys): - snippets.sentiment_text('President Obama is speaking at the White House.') - out, _ = capsys.readouterr() - assert 'Score: 0' in out - - -def test_sentiment_utf(capsys): - snippets.sentiment_text( - u'1er site d\'information. Les articles du journal et toute l\'' + - u'actualité en continu : International, France, Société, Economie, ' + - u'Culture, Environnement') - out, _ = capsys.readouterr() - assert 'Score: 0' in out - - -def test_sentiment_file(capsys): - snippets.sentiment_file(TEST_FILE_URL) - out, _ = capsys.readouterr() - assert 'Score: 0' in out - - -def test_entities_text(capsys): - snippets.entities_text('President Obama is speaking at the White House.') - out, _ = capsys.readouterr() - assert 'name' in out - assert ': Obama' in out - - -def test_entities_file(capsys): - snippets.entities_file(TEST_FILE_URL) - out, _ = capsys.readouterr() - assert 'name' in out - assert ': Obama' in out - - -def test_syntax_text(capsys): - snippets.syntax_text('President Obama is speaking at the White House.') - out, _ = capsys.readouterr() - assert 'NOUN: President' in out - - -def test_syntax_file(capsys): - snippets.syntax_file(TEST_FILE_URL) - out, _ = capsys.readouterr() - assert 'NOUN: President' in out - - -def test_sentiment_entities_text(capsys): - snippets.entity_sentiment_text( - 'President Obama is speaking at the White House.') - out, _ = capsys.readouterr() - assert 'Content : White House' in out - - -def test_sentiment_entities_file(capsys): - snippets.entity_sentiment_file(TEST_FILE_URL) - out, _ = capsys.readouterr() - assert 'Content : White House' in out - - -def test_sentiment_entities_utf(capsys): - snippets.entity_sentiment_text( - 'foo→bar') - out, _ = capsys.readouterr() - assert 'Begin Offset : 4' in out - - -def test_classify_text(capsys): - snippets.classify_text( - 'Android is a mobile operating system developed by Google, ' - 'based on the Linux kernel and designed primarily for touchscreen ' - 'mobile devices such as smartphones and tablets.') - out, _ = capsys.readouterr() - assert 'name' in out - assert '/Computers & Electronics' in out - - -def test_classify_file(capsys): - snippets.classify_file(LONG_TEST_FILE_URL) - out, _ = capsys.readouterr() - assert 'name' in out - assert '/Computers & Electronics' in out diff --git a/samples/snippets/movie_nl/README.md b/samples/snippets/movie_nl/README.md deleted file mode 100644 index 95c05dbb..00000000 --- a/samples/snippets/movie_nl/README.md +++ /dev/null @@ -1,157 +0,0 @@ -# Introduction - -[![Open in Cloud Shell][shell_img]][shell_link] - -[shell_img]: http://gstatic.com/cloudssh/images/open-btn.png -[shell_link]: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/movie_nl/README.md -This sample is an application of the Google Cloud Platform Natural Language API. -It uses the [imdb movie reviews data set](https://www.cs.cornell.edu/people/pabo/movie-review-data/) -from [Cornell University](http://www.cs.cornell.edu/) and performs sentiment & entity -analysis on it. It combines the capabilities of sentiment analysis and entity recognition -to come up with actors/directors who are the most and least popular. - -### Set Up to Authenticate With Your Project's Credentials - -Please follow the [Set Up Your Project](https://cloud.google.com/natural-language/docs/getting-started#set_up_your_project) -steps in the Quickstart doc to create a project and enable the -Cloud Natural Language API. Following those steps, make sure that you -[Set Up a Service Account](https://cloud.google.com/natural-language/docs/common/auth#set_up_a_service_account), -and export the following environment variable: - -``` -export GOOGLE_APPLICATION_CREDENTIALS=/path/to/your-project-credentials.json -``` - -**Note:** If you get an error saying your API hasn't been enabled, make sure -that you have correctly set this environment variable, and that the project that -you got the service account from has the Natural Language API enabled. - -## How it works -This sample uses the Natural Language API to annotate the input text. The -movie review document is broken into sentences using the `extract_syntax` feature. -Each sentence is sent to the API for sentiment analysis. The positive and negative -sentiment values are combined to come up with a single overall sentiment of the -movie document. - -In addition to the sentiment, the program also extracts the entities of type -`PERSON`, who are the actors in the movie (including the director and anyone -important). These entities are assigned the sentiment value of the document to -come up with the most and least popular actors/directors. - -### Movie document -We define a movie document as a set of reviews. These reviews are individual -sentences and we use the NL API to extract the sentences from the document. See -an example movie document below. - -``` - Sample review sentence 1. Sample review sentence 2. Sample review sentence 3. -``` - -### Sentences and Sentiment -Each sentence from the above document is assigned a sentiment as below. - -``` - Sample review sentence 1 => Sentiment 1 - Sample review sentence 2 => Sentiment 2 - Sample review sentence 3 => Sentiment 3 -``` - -### Sentiment computation -The final sentiment is computed by simply adding the sentence sentiments. - -``` - Total Sentiment = Sentiment 1 + Sentiment 2 + Sentiment 3 -``` - - -### Entity extraction and Sentiment assignment -Entities with type `PERSON` are extracted from the movie document using the NL -API. Since these entities are mentioned in their respective movie document, -they are associated with the document sentiment. - -``` - Document 1 => Sentiment 1 - - Person 1 - Person 2 - Person 3 - - Document 2 => Sentiment 2 - - Person 2 - Person 4 - Person 5 -``` - -Based on the above data we can calculate the sentiment associated with Person 2: - -``` - Person 2 => (Sentiment 1 + Sentiment 2) -``` - -## Movie Data Set -We have used the Cornell Movie Review data as our input. Please follow the instructions below to download and extract the data. - -### Download Instructions - -``` - $ curl -O http://www.cs.cornell.edu/people/pabo/movie-review-data/mix20_rand700_tokens.zip - $ unzip mix20_rand700_tokens.zip -``` - -## Command Line Usage -In order to use the movie analyzer, follow the instructions below. (Note that the `--sample` parameter below runs the script on -fewer documents, and can be omitted to run it on the entire corpus) - -### Install Dependencies - -Install [pip](https://pip.pypa.io/en/stable/installing) if not already installed. - -Then, install dependencies by running the following pip command: - -``` -$ pip install -r requirements.txt -``` -### How to Run - -``` -$ python main.py analyze --inp "tokens/*/*" \ - --sout sentiment.json \ - --eout entity.json \ - --sample 5 -``` - -You should see the log file `movie.log` created. - -## Output Data -The program produces sentiment and entity output in json format. For example: - -### Sentiment Output -``` - { - "doc_id": "cv310_tok-16557.txt", - "sentiment": 3.099, - "label": -1 - } -``` - -### Entity Output - -``` - { - "name": "Sean Patrick Flanery", - "wiki_url": "http://en.wikipedia.org/wiki/Sean_Patrick_Flanery", - "sentiment": 3.099 - } -``` - -### Entity Output Sorting -In order to sort and rank the entities generated, use the same `main.py` script. For example, -this will print the top 5 actors with negative sentiment: - -``` -$ python main.py rank --entity_input entity.json \ - --sentiment neg \ - --reverse True \ - --sample 5 -``` diff --git a/samples/snippets/movie_nl/main.py b/samples/snippets/movie_nl/main.py deleted file mode 100644 index 06be1c9c..00000000 --- a/samples/snippets/movie_nl/main.py +++ /dev/null @@ -1,334 +0,0 @@ -# Copyright 2016 Google, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -import codecs -import glob -import json -import logging -import os - -import googleapiclient.discovery -from googleapiclient.errors import HttpError -import requests - - -def analyze_document(service, document): - """Analyze the document and get the distribution of sentiments and - the movie name.""" - logging.info('Analyzing {}'.format(document.doc_id)) - - sentiments, entities = document.extract_sentiment_entities(service) - return sentiments, entities - - -def get_request_body(text, syntax=True, entities=True, sentiment=True): - """Creates the body of the request to the language api in - order to get an appropriate api response.""" - body = { - 'document': { - 'type': 'PLAIN_TEXT', - 'content': text, - }, - 'features': { - 'extract_syntax': syntax, - 'extract_entities': entities, - 'extract_document_sentiment': sentiment, - }, - 'encoding_type': 'UTF32' - } - - return body - - -class Document(object): - """Document class captures a single document of movie reviews.""" - - def __init__(self, text, doc_id, doc_path): - self.text = text - self.doc_id = doc_id - self.doc_path = doc_path - self.sentiment_entity_pair = None - self.label = None - - def extract_sentiment_entities(self, service): - """Extract the sentences in a document.""" - - if self.sentiment_entity_pair is not None: - return self.sentence_entity_pair - - docs = service.documents() - request_body = get_request_body( - self.text, - syntax=False, - entities=True, - sentiment=True) - request = docs.annotateText(body=request_body) - - ent_list = [] - - response = request.execute() - entities = response.get('entities', []) - documentSentiment = response.get('documentSentiment', {}) - - for entity in entities: - ent_type = entity.get('type') - wiki_url = entity.get('metadata', {}).get('wikipedia_url') - - if ent_type == 'PERSON' and wiki_url is not None: - ent_list.append(wiki_url) - - self.sentiment_entity_pair = (documentSentiment, ent_list) - - return self.sentiment_entity_pair - - -def to_sentiment_json(doc_id, sent, label): - """Convert the sentiment info to json. - - Args: - doc_id: Document id - sent: Overall Sentiment for the document - label: Actual label +1, 0, -1 for the document - - Returns: - String json representation of the input - - """ - json_doc = {} - - json_doc['doc_id'] = doc_id - json_doc['sentiment'] = float('%.3f' % sent) - json_doc['label'] = label - - return json.dumps(json_doc) - - -def get_wiki_title(wiki_url): - """Get the wikipedia page title for a given wikipedia URL. - - Args: - wiki_url: Wikipedia URL e.g., http://en.wikipedia.org/wiki/Sean_Connery - - Returns: - Wikipedia canonical name e.g., Sean Connery - - """ - try: - content = requests.get(wiki_url).text - return content.split('title')[1].split('-')[0].split('>')[1].strip() - except KeyError: - return os.path.basename(wiki_url).replace('_', ' ') - - -def to_entity_json(entity, entity_sentiment, entity_frequency): - """Convert entities and their associated sentiment to json. - - Args: - entity: Wikipedia entity name - entity_sentiment: Sentiment associated with the entity - entity_frequency: Frequency of the entity in the corpus - - Returns: - Json string representation of input - - """ - json_doc = {} - - avg_sentiment = float(entity_sentiment) / float(entity_frequency) - - json_doc['wiki_url'] = entity - json_doc['name'] = get_wiki_title(entity) - json_doc['sentiment'] = float('%.3f' % entity_sentiment) - json_doc['avg_sentiment'] = float('%.3f' % avg_sentiment) - - return json.dumps(json_doc) - - -def get_sentiment_entities(service, document): - """Compute the overall sentiment volume in the document. - - Args: - service: Client to Google Natural Language API - document: Movie review document (See Document object) - - Returns: - Tuple of total sentiment and entities found in the document - - """ - - sentiments, entities = analyze_document(service, document) - score = sentiments.get('score') - - return (score, entities) - - -def get_sentiment_label(sentiment): - """Return the sentiment label based on the sentiment quantity.""" - if sentiment < 0: - return -1 - elif sentiment > 0: - return 1 - else: - return 0 - - -def process_movie_reviews(service, reader, sentiment_writer, entity_writer): - """Perform some sentiment math and come up with movie review.""" - collected_entities = {} - - for document in reader: - try: - sentiment_total, entities = get_sentiment_entities( - service, document) - except HttpError as e: - logging.error('Error process_movie_reviews {}'.format(e.content)) - continue - - document.label = get_sentiment_label(sentiment_total) - - sentiment_writer.write( - to_sentiment_json( - document.doc_id, - sentiment_total, - document.label - ) - ) - - sentiment_writer.write('\n') - - for ent in entities: - ent_sent, frequency = collected_entities.get(ent, (0, 0)) - ent_sent += sentiment_total - frequency += 1 - - collected_entities[ent] = (ent_sent, frequency) - - for entity, sentiment_frequency in collected_entities.items(): - entity_writer.write(to_entity_json(entity, sentiment_frequency[0], - sentiment_frequency[1])) - entity_writer.write('\n') - - sentiment_writer.flush() - entity_writer.flush() - - -def document_generator(dir_path_pattern, count=None): - """Generator for the input movie documents. - - Args: - dir_path_pattern: Input dir pattern e.g., "foo/bar/*/*" - count: Number of documents to read else everything if None - - Returns: - Generator which contains Document (See above) - - """ - for running_count, item in enumerate(glob.iglob(dir_path_pattern)): - if count and running_count >= count: - raise StopIteration() - - doc_id = os.path.basename(item) - - with codecs.open(item, encoding='utf-8') as f: - try: - text = f.read() - except UnicodeDecodeError: - continue - - yield Document(text, doc_id, item) - - -def rank_entities(reader, sentiment=None, topn=None, reverse_bool=False): - """Rank the entities (actors) based on their sentiment - assigned from the movie.""" - - items = [] - for item in reader: - json_item = json.loads(item) - sent = json_item.get('sentiment') - entity_item = (sent, json_item) - - if sentiment: - if sentiment == 'pos' and sent > 0: - items.append(entity_item) - elif sentiment == 'neg' and sent < 0: - items.append(entity_item) - else: - items.append(entity_item) - - items.sort(reverse=reverse_bool) - items = [json.dumps(item[1]) for item in items] - - print('\n'.join(items[:topn])) - - -def analyze(input_dir, sentiment_writer, entity_writer, sample, log_file): - """Analyze the document for sentiment and entities""" - - # Create logger settings - logging.basicConfig(filename=log_file, level=logging.DEBUG) - - # Create a Google Service object - service = googleapiclient.discovery.build('language', 'v1') - - reader = document_generator(input_dir, sample) - - # Process the movie documents - process_movie_reviews(service, reader, sentiment_writer, entity_writer) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - - subparsers = parser.add_subparsers(dest='command') - - rank_parser = subparsers.add_parser('rank') - - rank_parser.add_argument( - '--entity_input', help='location of entity input') - rank_parser.add_argument( - '--sentiment', help='filter sentiment as "neg" or "pos"') - rank_parser.add_argument( - '--reverse', help='reverse the order of the items', type=bool, - default=False - ) - rank_parser.add_argument( - '--sample', help='number of top items to process', type=int, - default=None - ) - - analyze_parser = subparsers.add_parser('analyze') - - analyze_parser.add_argument( - '--inp', help='location of the input', required=True) - analyze_parser.add_argument( - '--sout', help='location of the sentiment output', required=True) - analyze_parser.add_argument( - '--eout', help='location of the entity output', required=True) - analyze_parser.add_argument( - '--sample', help='number of top items to process', type=int) - analyze_parser.add_argument('--log_file', default='movie.log') - - args = parser.parse_args() - - if args.command == 'analyze': - with open(args.sout, 'w') as sout, open(args.eout, 'w') as eout: - analyze(args.inp, sout, eout, args.sample, args.log_file) - elif args.command == 'rank': - with open(args.entity_input, 'r') as entity_input: - rank_entities( - entity_input, args.sentiment, args.sample, args.reverse) diff --git a/samples/snippets/movie_nl/main_test.py b/samples/snippets/movie_nl/main_test.py deleted file mode 100644 index 7e33cefd..00000000 --- a/samples/snippets/movie_nl/main_test.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright 2016 Google, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json - -import googleapiclient.discovery -import six - -import main - - -def test_get_request_body(): - text = 'hello world' - body = main.get_request_body(text, syntax=True, entities=True, - sentiment=False) - assert body.get('document').get('content') == text - - assert body.get('features').get('extract_syntax') is True - assert body.get('features').get('extract_entities') is True - assert body.get('features').get('extract_document_sentiment') is False - - -def test_get_sentiment_label(): - assert main.get_sentiment_label(20.50) == 1 - assert main.get_sentiment_label(-42.34) == -1 - - -def test_to_sentiment_json(): - doc_id = '12345' - sentiment = 23.344564 - label = 1 - - sentiment_json = json.loads( - main.to_sentiment_json(doc_id, sentiment, label) - ) - - assert sentiment_json.get('doc_id') == doc_id - assert sentiment_json.get('sentiment') == 23.345 - assert sentiment_json.get('label') == label - - -def test_process_movie_reviews(): - service = googleapiclient.discovery.build('language', 'v1') - - doc1 = main.Document('Top Gun was awesome and Tom Cruise rocked!', 'doc1', - 'doc1') - doc2 = main.Document('Tom Cruise is a great actor.', 'doc2', 'doc2') - - reader = [doc1, doc2] - swriter = six.StringIO() - ewriter = six.StringIO() - - main.process_movie_reviews(service, reader, swriter, ewriter) - - sentiments = swriter.getvalue().strip().split('\n') - entities = ewriter.getvalue().strip().split('\n') - - sentiments = [json.loads(sentiment) for sentiment in sentiments] - entities = [json.loads(entity) for entity in entities] - - # assert sentiments - assert sentiments[0].get('sentiment') > 0 - assert sentiments[0].get('label') == 1 - - assert sentiments[1].get('sentiment') > 0 - assert sentiments[1].get('label') == 1 - - # assert entities - assert len(entities) == 1 - assert entities[0].get('name') == 'Tom Cruise' - assert (entities[0].get('wiki_url') == - 'https://en.wikipedia.org/wiki/Tom_Cruise') - assert entities[0].get('sentiment') > 0 - - -def test_rank_positive_entities(capsys): - reader = [ - ('{"avg_sentiment": -12.0, ' - '"name": "Patrick Macnee", "sentiment": -12.0}'), - ('{"avg_sentiment": 5.0, ' - '"name": "Paul Rudd", "sentiment": 5.0}'), - ('{"avg_sentiment": -5.0, ' - '"name": "Martha Plimpton", "sentiment": -5.0}'), - ('{"avg_sentiment": 7.0, ' - '"name": "Lucy (2014 film)", "sentiment": 7.0}') - ] - - main.rank_entities(reader, 'pos', topn=1, reverse_bool=False) - out, err = capsys.readouterr() - - expected = ('{"avg_sentiment": 5.0, ' - '"name": "Paul Rudd", "sentiment": 5.0}') - - expected = ''.join(sorted(expected)) - out = ''.join(sorted(out.strip())) - assert out == expected - - -def test_rank_negative_entities(capsys): - reader = [ - ('{"avg_sentiment": -12.0, ' - '"name": "Patrick Macnee", "sentiment": -12.0}'), - ('{"avg_sentiment": 5.0, ' - '"name": "Paul Rudd", "sentiment": 5.0}'), - ('{"avg_sentiment": -5.0, ' - '"name": "Martha Plimpton", "sentiment": -5.0}'), - ('{"avg_sentiment": 7.0, ' - '"name": "Lucy (2014 film)", "sentiment": 7.0}') - ] - - main.rank_entities(reader, 'neg', topn=1, reverse_bool=True) - out, err = capsys.readouterr() - - expected = ('{"avg_sentiment": -5.0, ' - '"name": "Martha Plimpton", "sentiment": -5.0}') - - expected = ''.join(sorted(expected)) - out = ''.join(sorted(out.strip())) - assert out == expected diff --git a/samples/snippets/movie_nl/requirements.txt b/samples/snippets/movie_nl/requirements.txt deleted file mode 100644 index 9718b185..00000000 --- a/samples/snippets/movie_nl/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -google-api-python-client==1.7.4 -google-auth==1.5.1 -google-auth-httplib2==0.0.3 -requests==2.19.1 diff --git a/samples/snippets/ocr_nl/README.md b/samples/snippets/ocr_nl/README.md deleted file mode 100644 index a34ff317..00000000 --- a/samples/snippets/ocr_nl/README.md +++ /dev/null @@ -1,232 +0,0 @@ - - -[![Open in Cloud Shell][shell_img]][shell_link] - -[shell_img]: http://gstatic.com/cloudssh/images/open-btn.png -[shell_link]: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/ocr_nl/README.md -# Using the Cloud Natural Language API to analyze image text found with Cloud Vision - -This example uses the [Cloud Vision API](https://cloud.google.com/vision/) to -detect text in images, then analyzes that text using the [Cloud NL (Natural -Language) API](https://cloud.google.com/natural-language/) to detect -[entities](https://cloud.google.com/natural-language/docs/basics#entity_analysis) -in the text. It stores the detected entity -information in an [sqlite3](https://www.sqlite.org) database, which may then be -queried. - -(This kind of analysis can be useful with scans of brochures and fliers, -invoices, and other types of company documents... or maybe just organizing your -memes). - -After the example script has analyzed a directory of images, it outputs some -information on the images' entities to STDOUT. You can also further query -the generated sqlite3 database. - -## Setup - -### Install sqlite3 as necessary - -The example requires that sqlite3 be installed. Most likely, sqlite3 is already -installed for you on your machine, but if not, you can find it -[here](https://www.sqlite.org/download.html). - -### Set Up to Authenticate With Your Project's Credentials - -* Please follow the [Set Up Your Project](https://cloud.google.com/natural-language/docs/getting-started#set_up_your_project) -steps in the Quickstart doc to create a project and enable the -Cloud Natural Language API. -* Following those steps, make sure that you [Set Up a Service - Account](https://cloud.google.com/natural-language/docs/common/auth#set_up_a_service_account), - and export the following environment variable: - - ``` - export GOOGLE_APPLICATION_CREDENTIALS=/path/to/your-project-credentials.json - ``` -* This sample also requires that you [enable the Cloud Vision - API](https://console.cloud.google.com/apis/api/vision.googleapis.com/overview?project=_) - -## Running the example - -Install [pip](https://pip.pypa.io/en/stable/installing) if not already installed. - -To run the example, install the necessary libraries using pip: - -```sh -$ pip install -r requirements.txt -``` - -You must also be set up to authenticate with the Cloud APIs using your -project's service account credentials, as described above. - -Then, run the script on a directory of images to do the analysis, E.g.: - -```sh -$ python main.py --input_directory= -``` - -You can try this on a sample directory of images: - -```sh -$ curl -O http://storage.googleapis.com/python-docs-samples-tests/language/ocr_nl-images.zip -$ unzip ocr_nl-images.zip -$ python main.py --input_directory=images/ -``` - -## A walkthrough of the example and its results - -Let's take a look at what the example generates when run on the `images/` -sample directory, and how it does it. - -The script looks at each image file in the given directory, and uses the Vision -API's text detection capabilities (OCR) to find any text in each image. It -passes that info to the NL API, and asks it to detect [entities](xxx) in the -discovered text, then stores this information in a queryable database. - -To keep things simple, we're just passing to the NL API all the text found in a -given image, in one string. Note that sometimes this string can include -misinterpreted characters (if the image text was not very clear), or list words -"out of order" from how a human would interpret them. So, the text that is -actually passed to the NL API might not be quite what you would have predicted -with your human eyeballs. - -The Entity information returned by the NL API includes *type*, *name*, *salience*, -information about where in the text the given entity was found, and detected -language. It may also include *metadata*, including a link to a Wikipedia URL -that the NL API believes this entity maps to. See the -[documentation](https://cloud.google.com/natural-language/docs/) and the [API -reference pages](https://cloud.google.com/natural-language/reference/rest/v1beta1/Entity) -for more information about `Entity` fields. - -For example, if the NL API was given the sentence: - -``` -"Holmes and Watson walked over to the cafe." -``` - -it would return a response something like the following: - -``` -{ - "entities": [{ - "salience": 0.51629782, - "mentions": [{ - "text": { - "content": "Holmes", - "beginOffset": 0 - }}], - "type": "PERSON", - "name": "Holmes", - "metadata": { - "wikipedia_url": "http://en.wikipedia.org/wiki/Sherlock_Holmes" - }}, - { - "salience": 0.22334209, - "mentions": [{ - "text": { - "content": "Watson", - "beginOffset": 11 - }}], - "type": "PERSON", - "name": "Watson", - "metadata": { - "wikipedia_url": "http://en.wikipedia.org/wiki/Dr._Watson" - }}], - "language": "en" -} -``` - -Note that the NL API determined from context that "Holmes" was referring to -'Sherlock Holmes', even though the name "Sherlock" was not included. - -Note also that not all nouns in a given sentence are detected as Entities. An -Entity represents a phrase in the text that is a known entity, such as a person, -an organization, or location. The generic mention of a 'cafe' is not treated as -an entity in this sense. - -For each image file, we store its detected entity information (if any) in an -sqlite3 database. - -### Querying for information about the detected entities - -Once the detected entity information from all the images is stored in the -sqlite3 database, we can run some queries to do some interesting analysis. The -script runs a couple of such example query sets and outputs the result to STDOUT. - -The first set of queries outputs information about the top 15 most frequent -entity names found in the images, and the second outputs information about the -top 15 most frequent Wikipedia URLs found. - -For example, with the sample image set, note that the name 'Sherlock Holmes' is -found three times, but entities associated with the URL -http://en.wikipedia.org/wiki/Sherlock_Holmes are found four times; one of the -entity names was only "Holmes", but the NL API detected from context that it -referred to Sherlock Holmes. Similarly, you can see that mentions of 'Hive' and -'Spark' mapped correctly – given their context – to the URLs of those Apache -products. - -``` -----entity: http://en.wikipedia.org/wiki/Apache_Hive was found with count 1 -Found in file images/IMG_20160621_133020.jpg, detected as type OTHER, with - locale en. -names(s): set([u'hive']) -salience measure(s): set([0.0023808887]) -``` - -Similarly, 'Elizabeth' (in screencaps of text from "Pride and Prejudice") is -correctly mapped to http://en.wikipedia.org/wiki/Elizabeth_Bennet because of the -context of the surrounding text. - -``` -----entity: http://en.wikipedia.org/wiki/Elizabeth_Bennet was found with count 2 -Found in file images/Screenshot 2016-06-19 11.51.50.png, detected as type PERSON, with - locale en. -Found in file images/Screenshot 2016-06-19 12.08.30.png, detected as type PERSON, with - locale en. -names(s): set([u'elizabeth']) -salience measure(s): set([0.34601286, 0.0016268975]) -``` - -## Further queries to the sqlite3 database - -When the script runs, it makes a couple of example queries to the database -containing the entity information returned from the NL API. You can make further -queries on that database by starting up sqlite3 from the command line, and -passing it the name of the database file generated by running the example. This -file will be in the same directory, and have `entities` as a prefix, with the -timestamp appended. (If you have run the example more than once, a new database -file will be created each time). - -Run sqlite3 as follows (using the name of your own database file): - -```sh -$ sqlite3 entities1466518508.db -``` - -You'll see something like this: - -``` -SQLite version 3.8.10.2 2015-05-20 18:17:19 -Enter ".help" for usage hints. -sqlite> -``` - -From this prompt, you can make any queries on the data that you want. E.g., -start with something like: - -``` -sqlite> select * from entities limit 20; -``` - -Or, try this to see in which images the most entities were detected: - -``` -sqlite> select filename, count(filename) from entities group by filename; -``` - -You can do more complex queries to get further information about the entities -that have been discovered in your images. E.g., you might want to investigate -which of the entities are most commonly found together in the same image. See -the [SQLite documentation](https://www.sqlite.org/docs.html) for more -information. - - diff --git a/samples/snippets/ocr_nl/main.py b/samples/snippets/ocr_nl/main.py deleted file mode 100755 index db156054..00000000 --- a/samples/snippets/ocr_nl/main.py +++ /dev/null @@ -1,354 +0,0 @@ -#!/usr/bin/env python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -This example uses the Google Cloud Vision API to detect text in images, then -analyzes that text using the Google Cloud Natural Language API to detect -entities in the text. It stores the detected entity information in an sqlite3 -database, which may then be queried. - -After this script has analyzed a directory of images, it outputs some -information on the images' entities to STDOUT. You can also further query -the generated sqlite3 database; see the README for more information. - -Run the script on a directory of images to do the analysis, E.g.: - $ python main.py --input_directory= - -You can try this on a sample directory of images: - $ curl -O http://storage.googleapis.com/python-docs-samples-tests/language/ocr_nl-images.zip - $ unzip ocr_nl-images.zip - $ python main.py --input_directory=images/ - -""" # noqa - -import argparse -import base64 -import contextlib -import logging -import os -import sqlite3 -import sys -import time - -import googleapiclient.discovery -import googleapiclient.errors - -BATCH_SIZE = 10 - - -class VisionApi(object): - """Construct and use the Cloud Vision API service.""" - - def __init__(self): - self.service = googleapiclient.discovery.build('vision', 'v1') - - def detect_text(self, input_filenames, num_retries=3, max_results=6): - """Uses the Vision API to detect text in the given file.""" - batch_request = [] - for filename in input_filenames: - request = { - 'image': {}, - 'features': [{ - 'type': 'TEXT_DETECTION', - 'maxResults': max_results, - }] - } - - # Accept both files in cloud storage, as well as local files. - if filename.startswith('gs://'): - request['image']['source'] = { - 'gcsImageUri': filename - } - else: - with open(filename, 'rb') as image_file: - request['image']['content'] = base64.b64encode( - image_file.read()).decode('UTF-8') - - batch_request.append(request) - - request = self.service.images().annotate( - body={'requests': batch_request}) - - try: - responses = request.execute(num_retries=num_retries) - if 'responses' not in responses: - return {} - - text_response = {} - for filename, response in zip( - input_filenames, responses['responses']): - - if 'error' in response: - logging.error('API Error for {}: {}'.format( - filename, - response['error'].get('message', ''))) - continue - - text_response[filename] = response.get('textAnnotations', []) - - return text_response - - except googleapiclient.errors.HttpError as e: - logging.error('Http Error for {}: {}'.format(filename, e)) - except KeyError as e2: - logging.error('Key error: {}'.format(e2)) - - -class TextAnalyzer(object): - """Construct and use the Google Natural Language API service.""" - - def __init__(self, db_filename=None): - self.service = googleapiclient.discovery.build('language', 'v1') - - # This list will store the entity information gleaned from the - # image files. - self.entity_info = [] - - # This is the filename of the sqlite3 database to save to - self.db_filename = db_filename or 'entities{}.db'.format( - int(time.time())) - - def _get_native_encoding_type(self): - """Returns the encoding type that matches Python's native strings.""" - if sys.maxunicode == 65535: - return 'UTF16' - else: - return 'UTF32' - - def nl_detect(self, text): - """Use the Natural Language API to analyze the given text string.""" - # We're only requesting 'entity' information from the Natural Language - # API at this time. - body = { - 'document': { - 'type': 'PLAIN_TEXT', - 'content': text, - }, - 'encodingType': self._get_native_encoding_type(), - } - entities = [] - try: - request = self.service.documents().analyzeEntities(body=body) - response = request.execute() - entities = response['entities'] - except googleapiclient.errors.HttpError as e: - logging.error('Http Error: %s' % e) - except KeyError as e2: - logging.error('Key error: %s' % e2) - return entities - - def add_entities(self, filename, locale, document): - """Apply the Natural Language API to the document, and collect the - detected entities.""" - - # Apply the Natural Language API to the document. - entities = self.nl_detect(document) - self.extract_and_save_entity_info(entities, locale, filename) - - def extract_entity_info(self, entity): - """Extract information about an entity.""" - type = entity['type'] - name = entity['name'].lower() - metadata = entity['metadata'] - salience = entity['salience'] - wiki_url = metadata.get('wikipedia_url', None) - return (type, name, salience, wiki_url) - - def extract_and_save_entity_info(self, entities, locale, filename): - for entity in entities: - type, name, salience, wiki_url = self.extract_entity_info(entity) - # Because this is a small example, we're using a list to hold - # all the entity information, then we'll insert it into the - # database all at once when we've processed all the files. - # For a larger data set, you would want to write to the database - # in batches. - self.entity_info.append( - (locale, type, name, salience, wiki_url, filename)) - - def write_entity_info_to_db(self): - """Store the info gleaned about the entities in the text, via the - Natural Language API, in an sqlite3 database table, and then print out - some simple analytics. - """ - logging.info('Saving entity info to the sqlite3 database.') - # Create the db. - with contextlib.closing(sqlite3.connect(self.db_filename)) as conn: - with conn as cursor: - # Create table - cursor.execute( - 'CREATE TABLE if not exists entities (locale text, ' - 'type text, name text, salience real, wiki_url text, ' - 'filename text)') - with conn as cursor: - # Load all the data - cursor.executemany( - 'INSERT INTO entities VALUES (?,?,?,?,?,?)', - self.entity_info) - - def output_entity_data(self): - """Output some info about the entities by querying the generated - sqlite3 database. - """ - - with contextlib.closing(sqlite3.connect(self.db_filename)) as conn: - - # This query finds the number of times each entity name was - # detected, in descending order by count, and returns information - # about the first 15 names, including the files in which they were - # found, their detected 'salience' and language (locale), and the - # wikipedia urls (if any) associated with them. - print('\n==============\nTop 15 most frequent entity names:') - - cursor = conn.cursor() - results = cursor.execute( - 'select name, count(name) as wc from entities ' - 'group by name order by wc desc limit 15;') - - for item in results: - cursor2 = conn.cursor() - print(u'\n----Name: {} was found with count {}'.format(*item)) - results2 = cursor2.execute( - 'SELECT name, type, filename, locale, wiki_url, salience ' - 'FROM entities WHERE name=?', (item[0],)) - urls = set() - for elt in results2: - print(('Found in file {}, detected as type {}, with\n' - ' locale {} and salience {}.').format( - elt[2], elt[1], elt[3], elt[5])) - if elt[4]: - urls.add(elt[4]) - if urls: - print('url(s): {}'.format(urls)) - - # This query finds the number of times each wikipedia url was - # detected, in descending order by count, and returns information - # about the first 15 urls, including the files in which they were - # found and the names and 'salience' with which they were - # associated. - print('\n==============\nTop 15 most frequent Wikipedia URLs:') - c = conn.cursor() - results = c.execute( - 'select wiki_url, count(wiki_url) as wc from entities ' - 'group by wiki_url order by wc desc limit 15;') - - for item in results: - cursor2 = conn.cursor() - print('\n----entity: {} was found with count {}'.format(*item)) - results2 = cursor2.execute( - 'SELECT name, type, filename, locale, salience ' - 'FROM entities WHERE wiki_url=?', (item[0],)) - names = set() - salience = set() - for elt in results2: - print(('Found in file {}, detected as type {}, with\n' - ' locale {}.').format(elt[2], elt[1], elt[3])) - names.add(elt[0]) - salience.add(elt[4]) - print('names(s): {}'.format(names)) - print('salience measure(s): {}'.format(salience)) - - -def extract_description(texts): - """Returns text annotations as a single string""" - document = [] - - for text in texts: - try: - document.append(text['description']) - locale = text['locale'] - # Process only the first entry, which contains all - # text detected. - break - except KeyError as e: - logging.error('KeyError: %s\n%s' % (e, text)) - return (locale, ' '.join(document)) - - -def extract_descriptions(input_filename, texts, text_analyzer): - """Gets the text that was detected in the image.""" - if texts: - locale, document = extract_description(texts) - text_analyzer.add_entities(input_filename, locale, document) - sys.stdout.write('.') # Output a progress indicator. - sys.stdout.flush() - elif texts == []: - print('%s had no discernible text.' % input_filename) - - -def get_text_from_files(vision, input_filenames, text_analyzer): - """Call the Vision API on a file and index the results.""" - texts = vision.detect_text(input_filenames) - if texts: - for filename, text in texts.items(): - extract_descriptions(filename, text, text_analyzer) - - -def batch(list_to_batch, batch_size=BATCH_SIZE): - """Group a list into batches of size batch_size. - - >>> tuple(batch([1, 2, 3, 4, 5], batch_size=2)) - ((1, 2), (3, 4), (5)) - """ - for i in range(0, len(list_to_batch), batch_size): - yield tuple(list_to_batch[i:i + batch_size]) - - -def main(input_dir, db_filename=None): - """Walk through all the image files in the given directory, extracting any - text from them and feeding that text to the Natural Language API for - analysis. - """ - # Create a client object for the Vision API - vision_api_client = VisionApi() - # Create an object to analyze our text using the Natural Language API - text_analyzer = TextAnalyzer(db_filename) - - if input_dir: - allfileslist = [] - # Recursively construct a list of all the files in the given input - # directory. - for folder, subs, files in os.walk(input_dir): - for filename in files: - allfileslist.append(os.path.join(folder, filename)) - - # Analyze the text in the files using the Vision and Natural Language - # APIs. - for filenames in batch(allfileslist, batch_size=1): - get_text_from_files(vision_api_client, filenames, text_analyzer) - - # Save the result to a database, then run some queries on the database, - # with output to STDOUT. - text_analyzer.write_entity_info_to_db() - - # now, print some information about the entities detected. - text_analyzer.output_entity_data() - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description='Detects text in the images in the given directory.') - parser.add_argument( - '--input_directory', - help='The image directory you\'d like to detect text in. If left ' - 'unspecified, the --db specified will be queried without being ' - 'updated.') - parser.add_argument( - '--db', help='The filename to use for the sqlite3 database.') - args = parser.parse_args() - - if not (args.input_directory or args.db): - parser.error('Either --input_directory or --db must be specified.') - - main(args.input_directory, args.db) diff --git a/samples/snippets/ocr_nl/main_test.py b/samples/snippets/ocr_nl/main_test.py deleted file mode 100755 index 5a8f72f2..00000000 --- a/samples/snippets/ocr_nl/main_test.py +++ /dev/null @@ -1,100 +0,0 @@ -#!/usr/bin/env python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import re -import zipfile - -import requests - -import main - -BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] -TEST_IMAGE_URI = 'gs://{}/language/image8.png'.format(BUCKET) -OCR_IMAGES_URI = 'http://storage.googleapis.com/{}/{}'.format( - BUCKET, 'language/ocr_nl-images-small.zip') - - -def test_batch_empty(): - for batch_size in range(1, 10): - assert len( - list(main.batch([], batch_size=batch_size))) == 0 - - -def test_batch_single(): - for batch_size in range(1, 10): - batched = tuple(main.batch([1], batch_size=batch_size)) - assert batched == ((1,),) - - -def test_single_image_returns_text(): - vision_api_client = main.VisionApi() - - image_path = TEST_IMAGE_URI - texts = vision_api_client.detect_text([image_path]) - - assert image_path in texts - _, document = main.extract_description(texts[image_path]) - assert "daughter" in document - assert "Bennet" in document - assert "hat" in document - - -def test_single_nonimage_returns_error(): - vision_api_client = main.VisionApi() - texts = vision_api_client.detect_text(['README.md']) - assert "README.md" not in texts - - -def test_text_returns_entities(): - text = "Holmes and Watson walked to the cafe." - text_analyzer = main.TextAnalyzer() - entities = text_analyzer.nl_detect(text) - assert entities - etype, ename, salience, wurl = text_analyzer.extract_entity_info( - entities[0]) - assert ename == 'holmes' - - -def test_entities_list(): - vision_api_client = main.VisionApi() - image_path = TEST_IMAGE_URI - texts = vision_api_client.detect_text([image_path]) - locale, document = main.extract_description(texts[image_path]) - text_analyzer = main.TextAnalyzer() - entities = text_analyzer.nl_detect(document) - assert entities - etype, ename, salience, wurl = text_analyzer.extract_entity_info( - entities[0]) - assert ename == 'bennet' - - -def test_main(tmpdir, capsys): - images_path = str(tmpdir.mkdir('images')) - - # First, pull down some test data - response = requests.get(OCR_IMAGES_URI) - images_file = tmpdir.join('images.zip') - images_file.write_binary(response.content) - - # Extract it to the image directory - with zipfile.ZipFile(str(images_file)) as zfile: - zfile.extractall(images_path) - - main.main(images_path, str(tmpdir.join('ocr_nl.db'))) - - stdout, _ = capsys.readouterr() - - assert re.search(r'.* found with count', stdout) diff --git a/samples/snippets/ocr_nl/requirements.txt b/samples/snippets/ocr_nl/requirements.txt deleted file mode 100644 index 5e902918..00000000 --- a/samples/snippets/ocr_nl/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -google-api-python-client==1.7.4 -google-auth==1.5.1 -google-auth-httplib2==0.0.3 diff --git a/samples/snippets/syntax_triples/README.md b/samples/snippets/syntax_triples/README.md deleted file mode 100644 index 551057e7..00000000 --- a/samples/snippets/syntax_triples/README.md +++ /dev/null @@ -1,96 +0,0 @@ -# Using the Cloud Natural Language API to find subject-verb-object triples in text - -[![Open in Cloud Shell][shell_img]][shell_link] - -[shell_img]: http://gstatic.com/cloudssh/images/open-btn.png -[shell_link]: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/syntax_triples/README.md - -This example finds subject-verb-object triples in a given piece of text using -syntax analysis capabilities of -[Cloud Natural Language API](https://cloud.google.com/natural-language/). -To do this, it calls the extractSyntax feature of the API -and uses the dependency parse tree and part-of-speech tags in the resposne -to build the subject-verb-object triples. The results are printed to STDOUT. -This type of analysis can be considered as the -first step towards an information extraction task. - -## Set Up to Authenticate With Your Project's Credentials - -Please follow the [Set Up Your Project](https://cloud.google.com/natural-language/docs/getting-started#set_up_your_project) -steps in the Quickstart doc to create a project and enable the -Cloud Natural Language API. Following those steps, make sure that you -[Set Up a Service Account](https://cloud.google.com/natural-language/docs/common/auth#set_up_a_service_account), -and export the following environment variable: - -``` -export GOOGLE_APPLICATION_CREDENTIALS=/path/to/your-project-credentials.json -``` - -## Running the example - -Install [pip](https://pip.pypa.io/en/stable/installing) if not already installed. - -To run the example, install the necessary libraries using pip: - -``` -$ pip install -r requirements.txt -``` -You must also be set up to authenticate with the Cloud APIs using your -project's service account credentials, as described above. - -Then, run the script on a file containing the text that you wish to analyze. -The text must be encoded in UTF8 or ASCII: - -``` -$ python main.py -``` - -Try this on a sample text in the resources directory: - -``` -$ python main.py resources/obama_wikipedia.txt -``` - -## A walkthrough of the example and its results - -Let's take a look at what the example generates when run on the -`obama_wikipedia.txt` sample file, and how it does it. - -The goal is to find all subject-verb-object -triples in the text. The example first sends the text to the Cloud Natural -Language API to perform extractSyntax analysis. Then, using part-of-speech tags, - it finds all the verbs in the text. For each verb, it uses the dependency -parse tree information to find all the dependent tokens. - -For example, given the following sentence in the `obama_wikipedia.txt` file: - -``` -"He began his presidential campaign in 2007" -``` -The example finds the verb `began`, and `He`, `campaign`, and `in` as its -dependencies. Then the script enumerates the dependencies for each verb and -finds all the subjects and objects. For the sentence above, the found subject -and object are `He` and `campaign`. - -The next step is to complete each subject and object token by adding their -dependencies to them. For example, in the sentence above, `his` and -`presidential` are dependent tokens for `campaign`. This is done using the -dependency parse tree, similar to verb dependencies as explained above. The -final result is (`He`, `began`, `his presidential campaign`) triple for -the example sentence above. - -The script performs this analysis for the entire text and prints the result. -For the `obama_wikipedia.txt` file, the result is the following: - -```sh -+------------------------------+------------+------------------------------+ -| Obama | received | national attention | -+------------------------------+------------+------------------------------+ -| He | began | his presidential campaign | -+------------------------------+------------+------------------------------+ -| he | won | sufficient delegates in the | -| | | Democratic Party primaries | -+------------------------------+------------+------------------------------+ -| He | defeated | Republican nominee John | -| | | McCain | -``` diff --git a/samples/snippets/syntax_triples/main.py b/samples/snippets/syntax_triples/main.py deleted file mode 100644 index bbe23866..00000000 --- a/samples/snippets/syntax_triples/main.py +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/env python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -This example finds subject-verb-object triples in a given piece of text using -the syntax analysis capabilities of Cloud Natural Language API. The triples are -printed to STDOUT. This can be considered as the first step towards an -information extraction task. - -Run the script on a file containing the text that you wish to analyze. -The text must be encoded in UTF8 or ASCII: - $ python main.py - -Try this on a sample text in the resources directory: - $ python main.py resources/obama_wikipedia.txt -""" - -import argparse -import sys -import textwrap - -import googleapiclient.discovery - - -def dependents(tokens, head_index): - """Returns an ordered list of the token indices of the dependents for - the given head.""" - # Create head->dependency index. - head_to_deps = {} - for i, token in enumerate(tokens): - head = token['dependencyEdge']['headTokenIndex'] - if i != head: - head_to_deps.setdefault(head, []).append(i) - return head_to_deps.get(head_index, ()) - - -def phrase_text_for_head(tokens, text, head_index): - """Returns the entire phrase containing the head token - and its dependents. - """ - begin, end = phrase_extent_for_head(tokens, head_index) - return text[begin:end] - - -def phrase_extent_for_head(tokens, head_index): - """Returns the begin and end offsets for the entire phrase - containing the head token and its dependents. - """ - begin = tokens[head_index]['text']['beginOffset'] - end = begin + len(tokens[head_index]['text']['content']) - for child in dependents(tokens, head_index): - child_begin, child_end = phrase_extent_for_head(tokens, child) - begin = min(begin, child_begin) - end = max(end, child_end) - return (begin, end) - - -def analyze_syntax(text): - """Use the NL API to analyze the given text string, and returns the - response from the API. Requests an encodingType that matches - the encoding used natively by Python. Raises an - errors.HTTPError if there is a connection problem. - """ - service = googleapiclient.discovery.build('language', 'v1beta1') - body = { - 'document': { - 'type': 'PLAIN_TEXT', - 'content': text, - }, - 'features': { - 'extract_syntax': True, - }, - 'encodingType': get_native_encoding_type(), - } - request = service.documents().annotateText(body=body) - return request.execute() - - -def get_native_encoding_type(): - """Returns the encoding type that matches Python's native strings.""" - if sys.maxunicode == 65535: - return 'UTF16' - else: - return 'UTF32' - - -def find_triples(tokens, - left_dependency_label='NSUBJ', - head_part_of_speech='VERB', - right_dependency_label='DOBJ'): - """Generator function that searches the given tokens - with the given part of speech tag, that have dependencies - with the given labels. For each such head found, yields a tuple - (left_dependent, head, right_dependent), where each element of the - tuple is an index into the tokens array. - """ - for head, token in enumerate(tokens): - if token['partOfSpeech']['tag'] == head_part_of_speech: - children = dependents(tokens, head) - left_deps = [] - right_deps = [] - for child in children: - child_token = tokens[child] - child_dep_label = child_token['dependencyEdge']['label'] - if child_dep_label == left_dependency_label: - left_deps.append(child) - elif child_dep_label == right_dependency_label: - right_deps.append(child) - for left_dep in left_deps: - for right_dep in right_deps: - yield (left_dep, head, right_dep) - - -def show_triple(tokens, text, triple): - """Prints the given triple (left, head, right). For left and right, - the entire phrase headed by each token is shown. For head, only - the head token itself is shown. - - """ - nsubj, verb, dobj = triple - - # Extract the text for each element of the triple. - nsubj_text = phrase_text_for_head(tokens, text, nsubj) - verb_text = tokens[verb]['text']['content'] - dobj_text = phrase_text_for_head(tokens, text, dobj) - - # Pretty-print the triple. - left = textwrap.wrap(nsubj_text, width=28) - mid = textwrap.wrap(verb_text, width=10) - right = textwrap.wrap(dobj_text, width=28) - print('+' + 30 * '-' + '+' + 12 * '-' + '+' + 30 * '-' + '+') - for l, m, r in zip(left, mid, right): - print('| {:<28s} | {:<10s} | {:<28s} |'.format( - l or '', m or '', r or '')) - - -def main(text_file): - # Extracts subject-verb-object triples from the given text file, - # and print each one. - - # Read the input file. - text = open(text_file, 'rb').read().decode('utf8') - - analysis = analyze_syntax(text) - tokens = analysis.get('tokens', []) - - for triple in find_triples(tokens): - show_triple(tokens, text, triple) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument( - 'text_file', - help='A file containing the document to process. ' - 'Should be encoded in UTF8 or ASCII') - args = parser.parse_args() - main(args.text_file) diff --git a/samples/snippets/syntax_triples/main_test.py b/samples/snippets/syntax_triples/main_test.py deleted file mode 100755 index 6aa87818..00000000 --- a/samples/snippets/syntax_triples/main_test.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import re - -import main - -RESOURCES = os.path.join(os.path.dirname(__file__), 'resources') - - -def test_dependents(): - text = "I am eating a delicious banana" - analysis = main.analyze_syntax(text) - tokens = analysis.get('tokens', []) - assert [0, 1, 5] == main.dependents(tokens, 2) - assert [3, 4] == main.dependents(tokens, 5) - - -def test_phrase_text_for_head(): - text = "A small collection of words" - analysis = main.analyze_syntax(text) - tokens = analysis.get('tokens', []) - assert "words" == main.phrase_text_for_head(tokens, text, 4) - - -def test_find_triples(): - text = "President Obama won the noble prize" - analysis = main.analyze_syntax(text) - tokens = analysis.get('tokens', []) - triples = main.find_triples(tokens) - for triple in triples: - assert (1, 2, 5) == triple - - -def test_obama_example(capsys): - main.main(os.path.join(RESOURCES, 'obama_wikipedia.txt')) - stdout, _ = capsys.readouterr() - lines = stdout.split('\n') - assert re.match( - r'.*Obama\b.*\| received\b.*\| national attention\b', - lines[1]) diff --git a/samples/snippets/syntax_triples/requirements.txt b/samples/snippets/syntax_triples/requirements.txt deleted file mode 100644 index 5e902918..00000000 --- a/samples/snippets/syntax_triples/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -google-api-python-client==1.7.4 -google-auth==1.5.1 -google-auth-httplib2==0.0.3 diff --git a/samples/snippets/syntax_triples/resources/obama_wikipedia.txt b/samples/snippets/syntax_triples/resources/obama_wikipedia.txt deleted file mode 100644 index 1e89d4ab..00000000 --- a/samples/snippets/syntax_triples/resources/obama_wikipedia.txt +++ /dev/null @@ -1 +0,0 @@ -In 2004, Obama received national attention during his campaign to represent Illinois in the United States Senate with his victory in the March Democratic Party primary, his keynote address at the Democratic National Convention in July, and his election to the Senate in November. He began his presidential campaign in 2007 and, after a close primary campaign against Hillary Clinton in 2008, he won sufficient delegates in the Democratic Party primaries to receive the presidential nomination. He then defeated Republican nominee John McCain in the general election, and was inaugurated as president on January 20, 2009. Nine months after his inauguration, Obama was named the 2009 Nobel Peace Prize laureate. diff --git a/samples/snippets/tutorial/README.rst b/samples/snippets/tutorial/README.rst deleted file mode 100644 index 3f83c1a2..00000000 --- a/samples/snippets/tutorial/README.rst +++ /dev/null @@ -1,93 +0,0 @@ -.. This file is automatically generated. Do not edit this file directly. - -Google Cloud Natural Language Tutorial Python Samples -=============================================================================== - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/tutorial/README.rst - - -This directory contains samples for Google Cloud Natural Language Tutorial. The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers, including sentiment analysis, entity recognition, and syntax analysis. This API is part of the larger Cloud Machine Learning API. - - - - -.. _Google Cloud Natural Language Tutorial: https://cloud.google.com/natural-language/docs/ - -Setup -------------------------------------------------------------------------------- - - -Authentication -++++++++++++++ - -This sample requires you to have authentication setup. Refer to the -`Authentication Getting Started Guide`_ for instructions on setting up -credentials for applications. - -.. _Authentication Getting Started Guide: - https://cloud.google.com/docs/authentication/getting-started - -Install Dependencies -++++++++++++++++++++ - -#. Clone python-docs-samples and change directory to the sample directory you want to use. - - .. code-block:: bash - - $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git - -#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. - - .. _Python Development Environment Setup Guide: - https://cloud.google.com/python/setup - -#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. - - .. code-block:: bash - - $ virtualenv env - $ source env/bin/activate - -#. Install the dependencies needed to run the samples. - - .. code-block:: bash - - $ pip install -r requirements.txt - -.. _pip: https://pip.pypa.io/ -.. _virtualenv: https://virtualenv.pypa.io/ - -Samples -------------------------------------------------------------------------------- - -Language tutorial -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/tutorial/tutorial.py,language/tutorial/README.rst - - - - -To run this sample: - -.. code-block:: bash - - $ python tutorial.py - - usage: tutorial.py [-h] movie_review_filename - - positional arguments: - movie_review_filename - The filename of the movie review you'd like to - analyze. - - optional arguments: - -h, --help show this help message and exit - - - - - -.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/samples/snippets/tutorial/README.rst.in b/samples/snippets/tutorial/README.rst.in deleted file mode 100644 index 945c701e..00000000 --- a/samples/snippets/tutorial/README.rst.in +++ /dev/null @@ -1,22 +0,0 @@ -# This file is used to generate README.rst - -product: - name: Google Cloud Natural Language Tutorial - short_name: Cloud Natural Language Tutorial - url: https://cloud.google.com/natural-language/docs/ - description: > - The `Google Cloud Natural Language API`_ provides natural language - understanding technologies to developers, including sentiment analysis, - entity recognition, and syntax analysis. This API is part of the larger - Cloud Machine Learning API. - -setup: -- auth -- install_deps - -samples: -- name: Language tutorial - file: tutorial.py - show_help: true - -folder: language/tutorial \ No newline at end of file diff --git a/samples/snippets/tutorial/requirements.txt b/samples/snippets/tutorial/requirements.txt deleted file mode 100644 index 5e902918..00000000 --- a/samples/snippets/tutorial/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -google-api-python-client==1.7.4 -google-auth==1.5.1 -google-auth-httplib2==0.0.3 diff --git a/samples/snippets/tutorial/reviews/bladerunner-mixed.txt b/samples/snippets/tutorial/reviews/bladerunner-mixed.txt deleted file mode 100644 index 3b520b65..00000000 --- a/samples/snippets/tutorial/reviews/bladerunner-mixed.txt +++ /dev/null @@ -1,19 +0,0 @@ -I really wanted to love 'Bladerunner' but ultimately I couldn't get -myself to appreciate it fully. However, you may like it if you're into -science fiction, especially if you're interested in the philosophical -exploration of what it means to be human or machine. Some of the gizmos -like the flying cars and the Vouight-Kampff machine (which seemed very -steampunk), were quite cool. - -I did find the plot pretty slow and but the dialogue and action sequences -were good. Unlike most science fiction films, this one was mostly quiet, and -not all that much happened, except during the last 15 minutes. I didn't -understand why a unicorn was in the movie. The visual effects were fantastic, -however, and the musical score and overall mood was quite interesting. -A futurist Los Angeles that was both highly polished and also falling apart -reminded me of 'Outland.' Certainly, the style of the film made up for -many of its pedantic plot holes. - -If you want your sci-fi to be lasers and spaceships, 'Bladerunner' may -disappoint you. But if you want it to make you think, this movie may -be worth the money. \ No newline at end of file diff --git a/samples/snippets/tutorial/reviews/bladerunner-neg.txt b/samples/snippets/tutorial/reviews/bladerunner-neg.txt deleted file mode 100644 index dbef7627..00000000 --- a/samples/snippets/tutorial/reviews/bladerunner-neg.txt +++ /dev/null @@ -1,3 +0,0 @@ -What was Hollywood thinking with this movie! I hated, -hated, hated it. BORING! I went afterwards and demanded my money back. -They refused. \ No newline at end of file diff --git a/samples/snippets/tutorial/reviews/bladerunner-neutral.txt b/samples/snippets/tutorial/reviews/bladerunner-neutral.txt deleted file mode 100644 index 60556e60..00000000 --- a/samples/snippets/tutorial/reviews/bladerunner-neutral.txt +++ /dev/null @@ -1,2 +0,0 @@ -I neither liked nor disliked this movie. Parts were interesting, but -overall I was left wanting more. The acting was pretty good. \ No newline at end of file diff --git a/samples/snippets/tutorial/reviews/bladerunner-pos.txt b/samples/snippets/tutorial/reviews/bladerunner-pos.txt deleted file mode 100644 index a7faf815..00000000 --- a/samples/snippets/tutorial/reviews/bladerunner-pos.txt +++ /dev/null @@ -1,10 +0,0 @@ -`Bladerunner` is often touted as one of the best science fiction films ever -made. Indeed, it satisfies many of the requisites for good sci-fi: a future -world with flying cars and humanoid robots attempting to rebel against their -creators. But more than anything, `Bladerunner` is a fantastic exploration -of the nature of what it means to be human. If we create robots which can -think, will they become human? And if they do, what makes us unique? Indeed, -how can we be sure we're not human in any case? `Bladerunner` explored -these issues before such movies as `The Matrix,' and did so intelligently. -The visual effects and score by Vangelis set the mood. See this movie -in a dark theatre to appreciate it fully. Highly recommended! \ No newline at end of file diff --git a/samples/snippets/tutorial/tutorial.py b/samples/snippets/tutorial/tutorial.py deleted file mode 100644 index 5d14b223..00000000 --- a/samples/snippets/tutorial/tutorial.py +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2016 Google, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# [START full_tutorial_script] -# [START import_libraries] -import argparse -import io - -import googleapiclient.discovery -# [END import_libraries] - - -def print_sentiment(filename): - """Prints sentiment analysis on a given file contents.""" - # [START authenticating_to_the_api] - service = googleapiclient.discovery.build('language', 'v1') - # [END authenticating_to_the_api] - - # [START constructing_the_request] - with io.open(filename, 'r') as review_file: - review_file_contents = review_file.read() - - service_request = service.documents().analyzeSentiment( - body={ - 'document': { - 'type': 'PLAIN_TEXT', - 'content': review_file_contents, - } - } - ) - response = service_request.execute() - # [END constructing_the_request] - - # [START parsing_the_response] - score = response['documentSentiment']['score'] - magnitude = response['documentSentiment']['magnitude'] - - for n, sentence in enumerate(response['sentences']): - sentence_sentiment = sentence['sentiment']['score'] - print('Sentence {} has a sentiment score of {}'.format(n, - sentence_sentiment)) - - print('Overall Sentiment: score of {} with magnitude of {}'.format( - score, magnitude)) - # [END parsing_the_response] - - -# [START running_your_application] -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument( - 'movie_review_filename', - help='The filename of the movie review you\'d like to analyze.') - args = parser.parse_args() - print_sentiment(args.movie_review_filename) -# [END running_your_application] -# [END full_tutorial_script] diff --git a/samples/snippets/tutorial/tutorial_test.py b/samples/snippets/tutorial/tutorial_test.py deleted file mode 100644 index 065076fb..00000000 --- a/samples/snippets/tutorial/tutorial_test.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2016, Google, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import re - -import tutorial - - -def test_neutral(capsys): - tutorial.print_sentiment('reviews/bladerunner-neutral.txt') - out, _ = capsys.readouterr() - assert re.search(r'Sentence \d has a sentiment score of \d', out, re.I) - assert re.search( - r'Overall Sentiment: score of -?[0-2]\.?[0-9]? with ' - r'magnitude of [0-1]\.?[0-9]?', out, re.I) - - -def test_pos(capsys): - tutorial.print_sentiment('reviews/bladerunner-pos.txt') - out, _ = capsys.readouterr() - assert re.search(r'Sentence \d has a sentiment score of \d', out, re.I) - assert re.search( - r'Overall Sentiment: score of [0-9]\.?[0-9]? with ' - r'magnitude of [0-9]\.?[0-9]?', out, re.I) - - -def test_neg(capsys): - tutorial.print_sentiment('reviews/bladerunner-neg.txt') - out, _ = capsys.readouterr() - assert re.search(r'Sentence \d has a sentiment score of \d', out, re.I) - assert re.search( - r'Overall Sentiment: score of -[0-9]\.?[0-9]? with ' - r'magnitude of [2-7]\.?[0-9]?', out, re.I) - - -def test_mixed(capsys): - tutorial.print_sentiment('reviews/bladerunner-mixed.txt') - out, _ = capsys.readouterr() - assert re.search(r'Sentence \d has a sentiment score of \d', out, re.I) - assert re.search( - r'Overall Sentiment: score of -?[0-9]\.?[0-9]? with ' - r'magnitude of [3-6]\.?[0-9]?', out, re.I) From 951e844c61928e7ff8b1ea4102182bb9c940ebb5 Mon Sep 17 00:00:00 2001 From: Charles Engelke Date: Fri, 19 Oct 2018 15:21:41 -0700 Subject: [PATCH 145/209] Fixed name of model [(#1779)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1779) * Fixed name of model * update model ids --- samples/snippets/automl/predict_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/automl/predict_test.py b/samples/snippets/automl/predict_test.py index 6cf2c69a..f511302d 100644 --- a/samples/snippets/automl/predict_test.py +++ b/samples/snippets/automl/predict_test.py @@ -23,7 +23,7 @@ def test_predict(capsys): - model_id = "3472481026502981088" + model_id = "TCN3472481026502981088" automl_natural_language_predict.predict( project_id, compute_region, model_id, "resources/test.txt" ) From 2f5369323e4d9ee205934b7ea3c1f80a5514aee9 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Tue, 20 Nov 2018 15:40:29 -0800 Subject: [PATCH 146/209] Auto-update dependencies. [(#1846)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1846) ACK, merging. --- samples/snippets/api/requirements.txt | 2 +- samples/snippets/classify_text/requirements.txt | 4 ++-- samples/snippets/generated-samples/v1/requirements.txt | 2 +- samples/snippets/sentiment/requirements.txt | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 5e902918..a1a63f75 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.7.4 -google-auth==1.5.1 +google-auth==1.6.1 google-auth-httplib2==0.0.3 diff --git a/samples/snippets/classify_text/requirements.txt b/samples/snippets/classify_text/requirements.txt index d045e22d..330f8f98 100644 --- a/samples/snippets/classify_text/requirements.txt +++ b/samples/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-language==1.0.2 -numpy==1.15.1 +google-cloud-language==1.1.0 +numpy==1.15.4 diff --git a/samples/snippets/generated-samples/v1/requirements.txt b/samples/snippets/generated-samples/v1/requirements.txt index 2cbc37eb..7029093e 100644 --- a/samples/snippets/generated-samples/v1/requirements.txt +++ b/samples/snippets/generated-samples/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.0.2 +google-cloud-language==1.1.0 diff --git a/samples/snippets/sentiment/requirements.txt b/samples/snippets/sentiment/requirements.txt index 2cbc37eb..7029093e 100644 --- a/samples/snippets/sentiment/requirements.txt +++ b/samples/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.0.2 +google-cloud-language==1.1.0 From f4582b1bed3c3637571844b6c7890eaa139c6c6b Mon Sep 17 00:00:00 2001 From: Shahin Date: Tue, 4 Dec 2018 15:39:10 -0800 Subject: [PATCH 147/209] Moved the imports and region tags inside the functions [(#1891)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1891) * Moved the imports and region tags inside the functions * Removed the unnecessary imports * Added the missing import (six) to the functions * Removed the extra whitespaces * Changes based on Alix's comments. - Sample files no longer have input arguments - Input texts and uri's are hard coded - unit tests are modified accordingly * Remove extra whitespace * Removed extra whitespaces * Removed unused import * Removed the extra + signs --- samples/snippets/cloud-client/v1/snippets.py | 174 +++++++++++------- .../snippets/cloud-client/v1/snippets_test.py | 43 ++--- 2 files changed, 120 insertions(+), 97 deletions(-) diff --git a/samples/snippets/cloud-client/v1/snippets.py b/samples/snippets/cloud-client/v1/snippets.py index 826c28c5..6ccfaf16 100644 --- a/samples/snippets/cloud-client/v1/snippets.py +++ b/samples/snippets/cloud-client/v1/snippets.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# Copyright 2016 Google, Inc. +# Copyright 2018 Google, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -24,15 +24,16 @@ import argparse import sys -from google.cloud import language -from google.cloud.language import enums -from google.cloud.language import types -import six +def sentiment_text(): + # [START language_sentiment_text] + import six + from google.cloud import language + from google.cloud.language import enums + from google.cloud.language import types + + text = 'Hello, world!' -# [START language_sentiment_text] -def sentiment_text(text): - """Detects sentiment in the text.""" client = language.LanguageServiceClient() if isinstance(text, six.binary_type): @@ -51,12 +52,17 @@ def sentiment_text(text): print('Score: {}'.format(sentiment.score)) print('Magnitude: {}'.format(sentiment.magnitude)) # [END language_python_migration_sentiment_text] -# [END language_sentiment_text] + # [END language_sentiment_text] + + +def sentiment_file(): + # [START language_sentiment_gcs] + from google.cloud import language + from google.cloud.language import enums + from google.cloud.language import types + gcs_uri = 'gs://cloud-samples-data/language/hello.txt' -# [START language_sentiment_gcs] -def sentiment_file(gcs_uri): - """Detects sentiment in the file located in Google Cloud Storage.""" client = language.LanguageServiceClient() # Instantiates a plain text document. @@ -72,12 +78,18 @@ def sentiment_file(gcs_uri): print('Score: {}'.format(sentiment.score)) print('Magnitude: {}'.format(sentiment.magnitude)) -# [END language_sentiment_gcs] + # [END language_sentiment_gcs] + + +def entities_text(): + # [START language_entities_text] + import six + from google.cloud import language + from google.cloud.language import enums + from google.cloud.language import types + text = 'President Kennedy spoke at the White House.' -# [START language_entities_text] -def entities_text(text): - """Detects entities in the text.""" client = language.LanguageServiceClient() if isinstance(text, six.binary_type): @@ -105,12 +117,17 @@ def entities_text(text): print(u'{:<16}: {}'.format('wikipedia_url', entity.metadata.get('wikipedia_url', '-'))) # [END language_python_migration_entities_text] -# [END language_entities_text] + # [END language_entities_text] -# [START language_entities_gcs] -def entities_file(gcs_uri): - """Detects entities in the file located in Google Cloud Storage.""" +def entities_file(): + # [START language_entities_gcs] + from google.cloud import language + from google.cloud.language import enums + from google.cloud.language import types + + gcs_uri = 'gs://cloud-samples-data/language/president.txt' + client = language.LanguageServiceClient() # Instantiates a plain text document. @@ -131,12 +148,18 @@ def entities_file(gcs_uri): print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', entity.metadata.get('wikipedia_url', '-'))) -# [END language_entities_gcs] + # [END language_entities_gcs] + +def syntax_text(): + # [START language_syntax_text] + import six + from google.cloud import language + from google.cloud.language import enums + from google.cloud.language import types + + text = 'President Kennedy spoke at the White House.' -# [START language_syntax_text] -def syntax_text(text): - """Detects syntax in the text.""" client = language.LanguageServiceClient() if isinstance(text, six.binary_type): @@ -157,12 +180,17 @@ def syntax_text(text): print(u'{}: {}'.format(part_of_speech_tag.name, token.text.content)) # [END language_python_migration_syntax_text] -# [END language_syntax_text] + # [END language_syntax_text] + +def syntax_file(): + # [START language_syntax_gcs] + from google.cloud import language + from google.cloud.language import enums + from google.cloud.language import types + + gcs_uri = 'gs://cloud-samples-data/language/president.txt' -# [START language_syntax_gcs] -def syntax_file(gcs_uri): - """Detects syntax in the file located in Google Cloud Storage.""" client = language.LanguageServiceClient() # Instantiates a plain text document. @@ -178,12 +206,18 @@ def syntax_file(gcs_uri): part_of_speech_tag = enums.PartOfSpeech.Tag(token.part_of_speech.tag) print(u'{}: {}'.format(part_of_speech_tag.name, token.text.content)) -# [END language_syntax_gcs] + # [END language_syntax_gcs] + + +def entity_sentiment_text(): + # [START language_entity_sentiment_text] + import six + from google.cloud import language + from google.cloud.language import enums + from google.cloud.language import types + text = 'President Kennedy spoke at the White House.' -# [START language_entity_sentiment_text] -def entity_sentiment_text(text): - """Detects entity sentiment in the provided text.""" client = language.LanguageServiceClient() if isinstance(text, six.binary_type): @@ -211,12 +245,17 @@ def entity_sentiment_text(text): print(u' Type : {}'.format(mention.type)) print(u'Salience: {}'.format(entity.salience)) print(u'Sentiment: {}\n'.format(entity.sentiment)) -# [END language_entity_sentiment_text] + # [END language_entity_sentiment_text] + + +def entity_sentiment_file(): + # [START language_entity_sentiment_gcs] + from google.cloud import language + from google.cloud.language import enums + from google.cloud.language import types + gcs_uri = 'gs://cloud-samples-data/language/president.txt' -# [START language_entity_sentiment_gcs] -def entity_sentiment_file(gcs_uri): - """Detects entity sentiment in a Google Cloud Storage file.""" client = language.LanguageServiceClient() document = types.Document( @@ -240,12 +279,20 @@ def entity_sentiment_file(gcs_uri): print(u' Type : {}'.format(mention.type)) print(u'Salience: {}'.format(entity.salience)) print(u'Sentiment: {}\n'.format(entity.sentiment)) -# [END language_entity_sentiment_gcs] + # [END language_entity_sentiment_gcs] -# [START language_classify_text] -def classify_text(text): - """Classifies content categories of the provided text.""" +def classify_text(): + # [START language_classify_text] + import six + from google.cloud import language + from google.cloud.language import enums + from google.cloud.language import types + + text = 'Android is a mobile operating system developed by Google, ' \ + 'based on the Linux kernel and designed primarily for ' \ + 'touchscreen mobile devices such as smartphones and tablets.' + client = language.LanguageServiceClient() if isinstance(text, six.binary_type): @@ -261,14 +308,17 @@ def classify_text(text): print(u'=' * 20) print(u'{:<16}: {}'.format('name', category.name)) print(u'{:<16}: {}'.format('confidence', category.confidence)) -# [END language_classify_text] + # [END language_classify_text] + + +def classify_file(): + # [START language_classify_gcs] + from google.cloud import language + from google.cloud.language import enums + from google.cloud.language import types + gcs_uri = 'gs://cloud-samples-data/language/android.txt' -# [START language_classify_gcs] -def classify_file(gcs_uri): - """Classifies content categories of the text in a Google Cloud Storage - file. - """ client = language.LanguageServiceClient() document = types.Document( @@ -281,7 +331,7 @@ def classify_file(gcs_uri): print(u'=' * 20) print(u'{:<16}: {}'.format('name', category.name)) print(u'{:<16}: {}'.format('confidence', category.confidence)) -# [END language_classify_gcs] + # [END language_classify_gcs] if __name__ == '__main__': @@ -292,63 +342,53 @@ def classify_file(gcs_uri): classify_text_parser = subparsers.add_parser( 'classify-text', help=classify_text.__doc__) - classify_text_parser.add_argument('text') classify_text_parser = subparsers.add_parser( 'classify-file', help=classify_file.__doc__) - classify_text_parser.add_argument('gcs_uri') sentiment_entities_text_parser = subparsers.add_parser( 'sentiment-entities-text', help=entity_sentiment_text.__doc__) - sentiment_entities_text_parser.add_argument('text') sentiment_entities_file_parser = subparsers.add_parser( 'sentiment-entities-file', help=entity_sentiment_file.__doc__) - sentiment_entities_file_parser.add_argument('gcs_uri') sentiment_text_parser = subparsers.add_parser( 'sentiment-text', help=sentiment_text.__doc__) - sentiment_text_parser.add_argument('text') sentiment_file_parser = subparsers.add_parser( 'sentiment-file', help=sentiment_file.__doc__) - sentiment_file_parser.add_argument('gcs_uri') entities_text_parser = subparsers.add_parser( 'entities-text', help=entities_text.__doc__) - entities_text_parser.add_argument('text') entities_file_parser = subparsers.add_parser( 'entities-file', help=entities_file.__doc__) - entities_file_parser.add_argument('gcs_uri') syntax_text_parser = subparsers.add_parser( 'syntax-text', help=syntax_text.__doc__) - syntax_text_parser.add_argument('text') syntax_file_parser = subparsers.add_parser( 'syntax-file', help=syntax_file.__doc__) - syntax_file_parser.add_argument('gcs_uri') args = parser.parse_args() if args.command == 'sentiment-text': - sentiment_text(args.text) + sentiment_text() elif args.command == 'sentiment-file': - sentiment_file(args.gcs_uri) + sentiment_file() elif args.command == 'entities-text': - entities_text(args.text) + entities_text() elif args.command == 'entities-file': - entities_file(args.gcs_uri) + entities_file() elif args.command == 'syntax-text': - syntax_text(args.text) + syntax_text() elif args.command == 'syntax-file': - syntax_file(args.gcs_uri) + syntax_file() elif args.command == 'sentiment-entities-text': - entity_sentiment_text(args.text) + entity_sentiment_text() elif args.command == 'sentiment-entities-file': - entity_sentiment_file(args.gcs_uri) + entity_sentiment_file() elif args.command == 'classify-text': - classify_text(args.text) + classify_text() elif args.command == 'classify-file': - classify_file(args.gcs_uri) + classify_file() diff --git a/samples/snippets/cloud-client/v1/snippets_test.py b/samples/snippets/cloud-client/v1/snippets_test.py index 27fbee24..ef09b1a1 100644 --- a/samples/snippets/cloud-client/v1/snippets_test.py +++ b/samples/snippets/cloud-client/v1/snippets_test.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2017 Google, Inc. +# Copyright 2018 Google, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,85 +13,68 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os - import snippets -BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] -TEST_FILE_URL = 'gs://{}/text.txt'.format(BUCKET) -LONG_TEST_FILE_URL = 'gs://{}/android_text.txt'.format(BUCKET) - def test_sentiment_text(capsys): - snippets.sentiment_text('No! God please, no!') + snippets.sentiment_text() out, _ = capsys.readouterr() assert 'Score: ' in out def test_sentiment_file(capsys): - snippets.sentiment_file(TEST_FILE_URL) + snippets.sentiment_file() out, _ = capsys.readouterr() assert 'Score: ' in out def test_entities_text(capsys): - snippets.entities_text('President Obama is speaking at the White House.') + snippets.entities_text() out, _ = capsys.readouterr() assert 'name' in out - assert ': Obama' in out + assert ': Kennedy' in out def test_entities_file(capsys): - snippets.entities_file(TEST_FILE_URL) + snippets.entities_file() out, _ = capsys.readouterr() assert 'name' in out - assert ': Obama' in out + assert ': Kennedy' in out def test_syntax_text(capsys): - snippets.syntax_text('President Obama is speaking at the White House.') + snippets.syntax_text() out, _ = capsys.readouterr() assert 'NOUN: President' in out def test_syntax_file(capsys): - snippets.syntax_file(TEST_FILE_URL) + snippets.syntax_file() out, _ = capsys.readouterr() assert 'NOUN: President' in out def test_sentiment_entities_text(capsys): - snippets.entity_sentiment_text( - 'President Obama is speaking at the White House.') + snippets.entity_sentiment_text() out, _ = capsys.readouterr() assert 'Content : White House' in out def test_sentiment_entities_file(capsys): - snippets.entity_sentiment_file(TEST_FILE_URL) + snippets.entity_sentiment_file() out, _ = capsys.readouterr() assert 'Content : White House' in out -def test_sentiment_entities_utf(capsys): - snippets.entity_sentiment_text( - 'foo→bar') - out, _ = capsys.readouterr() - assert 'Begin Offset : 4' in out - - def test_classify_text(capsys): - snippets.classify_text( - 'Android is a mobile operating system developed by Google, ' - 'based on the Linux kernel and designed primarily for touchscreen ' - 'mobile devices such as smartphones and tablets.') + snippets.classify_text() out, _ = capsys.readouterr() assert 'name' in out assert '/Computers & Electronics' in out def test_classify_file(capsys): - snippets.classify_file(LONG_TEST_FILE_URL) + snippets.classify_file() out, _ = capsys.readouterr() assert 'name' in out assert '/Computers & Electronics' in out From 02eaf3bf7a3b3abb21e1bee6f957754a9eea536a Mon Sep 17 00:00:00 2001 From: Andrew Ferlitsch Date: Thu, 6 Dec 2018 15:55:38 -0800 Subject: [PATCH 148/209] Fix decode [(#1911)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1911) * fix decode problem * fix decode * fix decode --- samples/snippets/cloud-client/v1/snippets.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/samples/snippets/cloud-client/v1/snippets.py b/samples/snippets/cloud-client/v1/snippets.py index 6ccfaf16..39712c8b 100644 --- a/samples/snippets/cloud-client/v1/snippets.py +++ b/samples/snippets/cloud-client/v1/snippets.py @@ -27,7 +27,6 @@ def sentiment_text(): # [START language_sentiment_text] - import six from google.cloud import language from google.cloud.language import enums from google.cloud.language import types @@ -36,8 +35,10 @@ def sentiment_text(): client = language.LanguageServiceClient() - if isinstance(text, six.binary_type): + try: text = text.decode('utf-8') + except AttributeError: + pass # Instantiates a plain text document. # [START language_python_migration_sentiment_text] From 018bbc59cebf576c702a71811577b7a59054706b Mon Sep 17 00:00:00 2001 From: Noah Negrey Date: Fri, 7 Dec 2018 09:27:27 -0800 Subject: [PATCH 149/209] Update sample output [(#1893)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1893) * Update sample output * Update snippets.py * Update snippets.py --- samples/snippets/cloud-client/v1/snippets.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/samples/snippets/cloud-client/v1/snippets.py b/samples/snippets/cloud-client/v1/snippets.py index 39712c8b..a95110a2 100644 --- a/samples/snippets/cloud-client/v1/snippets.py +++ b/samples/snippets/cloud-client/v1/snippets.py @@ -113,10 +113,10 @@ def entities_text(): print('=' * 20) print(u'{:<16}: {}'.format('name', entity.name)) print(u'{:<16}: {}'.format('type', entity_type.name)) - print(u'{:<16}: {}'.format('metadata', entity.metadata)) print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', entity.metadata.get('wikipedia_url', '-'))) + print(u'{:<16}: {}'.format('mid', entity.metadata.get('mid', '-'))) # [END language_python_migration_entities_text] # [END language_entities_text] @@ -145,10 +145,10 @@ def entities_file(): print('=' * 20) print(u'{:<16}: {}'.format('name', entity.name)) print(u'{:<16}: {}'.format('type', entity_type.name)) - print(u'{:<16}: {}'.format('metadata', entity.metadata)) print(u'{:<16}: {}'.format('salience', entity.salience)) print(u'{:<16}: {}'.format('wikipedia_url', entity.metadata.get('wikipedia_url', '-'))) + print(u'{:<16}: {}'.format('mid', entity.metadata.get('mid', '-'))) # [END language_entities_gcs] From 2cd29276674facd125aa25c6670f24770ff69317 Mon Sep 17 00:00:00 2001 From: DPEBot Date: Wed, 6 Feb 2019 12:06:35 -0800 Subject: [PATCH 150/209] Auto-update dependencies. [(#1980)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1980) * Auto-update dependencies. * Update requirements.txt * Update requirements.txt --- samples/snippets/api/requirements.txt | 4 ++-- samples/snippets/automl/requirements.txt | 2 +- samples/snippets/classify_text/requirements.txt | 4 ++-- samples/snippets/cloud-client/v1/requirements.txt | 2 +- samples/snippets/generated-samples/v1/requirements.txt | 2 +- samples/snippets/sentiment/requirements.txt | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index a1a63f75..7e4359ce 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.7.4 -google-auth==1.6.1 +google-api-python-client==1.7.8 +google-auth==1.6.2 google-auth-httplib2==0.0.3 diff --git a/samples/snippets/automl/requirements.txt b/samples/snippets/automl/requirements.txt index 9b692618..db96c599 100644 --- a/samples/snippets/automl/requirements.txt +++ b/samples/snippets/automl/requirements.txt @@ -1 +1 @@ -google-cloud-automl==0.1.1 +google-cloud-automl==0.1.2 diff --git a/samples/snippets/classify_text/requirements.txt b/samples/snippets/classify_text/requirements.txt index 330f8f98..8c31e571 100644 --- a/samples/snippets/classify_text/requirements.txt +++ b/samples/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-language==1.1.0 -numpy==1.15.4 +google-cloud-language==1.1.1 +numpy==1.16.1 diff --git a/samples/snippets/cloud-client/v1/requirements.txt b/samples/snippets/cloud-client/v1/requirements.txt index 7029093e..257f81db 100644 --- a/samples/snippets/cloud-client/v1/requirements.txt +++ b/samples/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.1.0 +google-cloud-language==1.1.1 diff --git a/samples/snippets/generated-samples/v1/requirements.txt b/samples/snippets/generated-samples/v1/requirements.txt index 7029093e..257f81db 100644 --- a/samples/snippets/generated-samples/v1/requirements.txt +++ b/samples/snippets/generated-samples/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.1.0 +google-cloud-language==1.1.1 diff --git a/samples/snippets/sentiment/requirements.txt b/samples/snippets/sentiment/requirements.txt index 7029093e..257f81db 100644 --- a/samples/snippets/sentiment/requirements.txt +++ b/samples/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.1.0 +google-cloud-language==1.1.1 From d7fd411e3d9f7a43c651cd5b8c146ad8309d9fff Mon Sep 17 00:00:00 2001 From: Charles Engelke Date: Mon, 29 Apr 2019 16:44:43 -0700 Subject: [PATCH 151/209] Update requirements.txt [(#2128)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2128) --- samples/snippets/automl/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/automl/requirements.txt b/samples/snippets/automl/requirements.txt index db96c599..ebc8794c 100644 --- a/samples/snippets/automl/requirements.txt +++ b/samples/snippets/automl/requirements.txt @@ -1 +1 @@ -google-cloud-automl==0.1.2 +google-cloud-automl==0.2.0 From cef71e38eebefc336e35899f48119e852b9684e2 Mon Sep 17 00:00:00 2001 From: Gus Class Date: Tue, 8 Oct 2019 09:53:32 -0700 Subject: [PATCH 152/209] Adds split updates for Firebase ... opencensus [(#2438)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2438) --- samples/snippets/api/requirements.txt | 4 ++-- samples/snippets/automl/requirements.txt | 2 +- samples/snippets/classify_text/requirements.txt | 4 ++-- samples/snippets/cloud-client/v1/requirements.txt | 2 +- samples/snippets/generated-samples/v1/requirements.txt | 2 +- samples/snippets/sentiment/requirements.txt | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 7e4359ce..81808120 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.7.8 -google-auth==1.6.2 +google-api-python-client==1.7.11 +google-auth==1.6.3 google-auth-httplib2==0.0.3 diff --git a/samples/snippets/automl/requirements.txt b/samples/snippets/automl/requirements.txt index ebc8794c..6693c241 100644 --- a/samples/snippets/automl/requirements.txt +++ b/samples/snippets/automl/requirements.txt @@ -1 +1 @@ -google-cloud-automl==0.2.0 +google-cloud-automl==0.5.0 diff --git a/samples/snippets/classify_text/requirements.txt b/samples/snippets/classify_text/requirements.txt index 8c31e571..b5558c7c 100644 --- a/samples/snippets/classify_text/requirements.txt +++ b/samples/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-language==1.1.1 -numpy==1.16.1 +google-cloud-language==1.3.0 +numpy==1.17.2 diff --git a/samples/snippets/cloud-client/v1/requirements.txt b/samples/snippets/cloud-client/v1/requirements.txt index 257f81db..0c011f54 100644 --- a/samples/snippets/cloud-client/v1/requirements.txt +++ b/samples/snippets/cloud-client/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.1.1 +google-cloud-language==1.3.0 diff --git a/samples/snippets/generated-samples/v1/requirements.txt b/samples/snippets/generated-samples/v1/requirements.txt index 257f81db..0c011f54 100644 --- a/samples/snippets/generated-samples/v1/requirements.txt +++ b/samples/snippets/generated-samples/v1/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.1.1 +google-cloud-language==1.3.0 diff --git a/samples/snippets/sentiment/requirements.txt b/samples/snippets/sentiment/requirements.txt index 257f81db..0c011f54 100644 --- a/samples/snippets/sentiment/requirements.txt +++ b/samples/snippets/sentiment/requirements.txt @@ -1 +1 @@ -google-cloud-language==1.1.1 +google-cloud-language==1.3.0 From 61eebd1a16236d630cd716c98b79f35ee02b4666 Mon Sep 17 00:00:00 2001 From: Noah Negrey Date: Fri, 15 Nov 2019 15:15:24 -0700 Subject: [PATCH 153/209] Add Set Endpoint Samples [(#2497)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2497) * Add Set Endpoint Samples * Add additional test result option * Sample Request update * Add filter_ --- .../snippets/cloud-client/v1/set_endpoint.py | 40 +++++++++++++++++++ .../cloud-client/v1/set_endpoint_test.py | 22 ++++++++++ 2 files changed, 62 insertions(+) create mode 100644 samples/snippets/cloud-client/v1/set_endpoint.py create mode 100644 samples/snippets/cloud-client/v1/set_endpoint_test.py diff --git a/samples/snippets/cloud-client/v1/set_endpoint.py b/samples/snippets/cloud-client/v1/set_endpoint.py new file mode 100644 index 00000000..abc6f180 --- /dev/null +++ b/samples/snippets/cloud-client/v1/set_endpoint.py @@ -0,0 +1,40 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def set_endpoint(): + """Change your endpoint""" + # [START language_set_endpoint] + # Imports the Google Cloud client library + from google.cloud import language + + client_options = {'api_endpoint': 'eu-language.googleapis.com:443'} + + # Instantiates a client + client = language.LanguageServiceClient(client_options=client_options) + # [END language_set_endpoint] + + # The text to analyze + document = language.types.Document( + content='Hello, world!', + type=language.enums.Document.Type.PLAIN_TEXT) + + # Detects the sentiment of the text + sentiment = client.analyze_sentiment(document=document).document_sentiment + + print('Sentiment: {}, {}'.format(sentiment.score, sentiment.magnitude)) + + +if __name__ == '__main__': + set_endpoint() diff --git a/samples/snippets/cloud-client/v1/set_endpoint_test.py b/samples/snippets/cloud-client/v1/set_endpoint_test.py new file mode 100644 index 00000000..7e124c36 --- /dev/null +++ b/samples/snippets/cloud-client/v1/set_endpoint_test.py @@ -0,0 +1,22 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import set_endpoint + + +def test_set_endpoint(capsys): + set_endpoint.set_endpoint() + + out, _ = capsys.readouterr() + assert 'Sentiment' in out From d508aa612b58ac6b12c4c0449711e6ff4175b9a7 Mon Sep 17 00:00:00 2001 From: DPEBot Date: Fri, 20 Dec 2019 17:41:38 -0800 Subject: [PATCH 154/209] Auto-update dependencies. [(#2005)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2005) * Auto-update dependencies. * Revert update of appengine/flexible/datastore. * revert update of appengine/flexible/scipy * revert update of bigquery/bqml * revert update of bigquery/cloud-client * revert update of bigquery/datalab-migration * revert update of bigtable/quickstart * revert update of compute/api * revert update of container_registry/container_analysis * revert update of dataflow/run_template * revert update of datastore/cloud-ndb * revert update of dialogflow/cloud-client * revert update of dlp * revert update of functions/imagemagick * revert update of functions/ocr/app * revert update of healthcare/api-client/fhir * revert update of iam/api-client * revert update of iot/api-client/gcs_file_to_device * revert update of iot/api-client/mqtt_example * revert update of language/automl * revert update of run/image-processing * revert update of vision/automl * revert update testing/requirements.txt * revert update of vision/cloud-client/detect * revert update of vision/cloud-client/product_search * revert update of jobs/v2/api_client * revert update of jobs/v3/api_client * revert update of opencensus * revert update of translate/cloud-client * revert update to speech/cloud-client Co-authored-by: Kurtis Van Gent <31518063+kurtisvg@users.noreply.github.com> Co-authored-by: Doug Mahugh --- samples/snippets/api/requirements.txt | 2 +- samples/snippets/classify_text/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 81808120..0237dc05 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.7.11 -google-auth==1.6.3 +google-auth==1.10.0 google-auth-httplib2==0.0.3 diff --git a/samples/snippets/classify_text/requirements.txt b/samples/snippets/classify_text/requirements.txt index b5558c7c..8a441e71 100644 --- a/samples/snippets/classify_text/requirements.txt +++ b/samples/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ google-cloud-language==1.3.0 -numpy==1.17.2 +numpy==1.17.4 From f8a8e8e10ee04937ca93da929dcc7c9ae534e854 Mon Sep 17 00:00:00 2001 From: "Leah E. Cole" <6719667+leahecole@users.noreply.github.com> Date: Thu, 5 Mar 2020 14:22:12 -0800 Subject: [PATCH 155/209] chore(deps): update dependency google-auth to v1.11.2 [(#2724)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2724) Co-authored-by: Leah E. Cole <6719667+leahecole@users.noreply.github.com> --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 0237dc05..c27ca15e 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.7.11 -google-auth==1.10.0 +google-auth==1.11.2 google-auth-httplib2==0.0.3 From 6fb8ad3ecd64a70767faa7e662c0b1ee081ef831 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 6 Mar 2020 19:04:23 +0100 Subject: [PATCH 156/209] Update dependency google-cloud-automl to v0.10.0 [(#3033)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3033) Co-authored-by: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Co-authored-by: Leah E. Cole <6719667+leahecole@users.noreply.github.com> --- samples/snippets/automl/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/automl/requirements.txt b/samples/snippets/automl/requirements.txt index 6693c241..eb3be761 100644 --- a/samples/snippets/automl/requirements.txt +++ b/samples/snippets/automl/requirements.txt @@ -1 +1 @@ -google-cloud-automl==0.5.0 +google-cloud-automl==0.10.0 From 35d222c1db07940cca4f07af6b609e27a0272c67 Mon Sep 17 00:00:00 2001 From: Jonathan Simon Date: Tue, 10 Mar 2020 12:48:04 -0700 Subject: [PATCH 157/209] Remove unused region_tag comment. [(#3075)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3075) --- .../snippets/generated-samples/v1/language_sentiment_text.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/samples/snippets/generated-samples/v1/language_sentiment_text.py b/samples/snippets/generated-samples/v1/language_sentiment_text.py index d99f5d09..10d17970 100644 --- a/samples/snippets/generated-samples/v1/language_sentiment_text.py +++ b/samples/snippets/generated-samples/v1/language_sentiment_text.py @@ -29,7 +29,6 @@ def sample_analyze_sentiment(content): - # [START language_sentiment_text_core] client = language_v1.LanguageServiceClient() @@ -46,8 +45,6 @@ def sample_analyze_sentiment(content): print('Score: {}'.format(sentiment.score)) print('Magnitude: {}'.format(sentiment.magnitude)) - # [END language_sentiment_text_core] - # [END language_sentiment_text] From 6600269a2fc3c94a7216a45edd665755aa509ee2 Mon Sep 17 00:00:00 2001 From: Jonathan Simon Date: Tue, 10 Mar 2020 13:00:04 -0700 Subject: [PATCH 158/209] Remove Natural Language samples not included docs. [(#3074)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3074) Remove Natural Language samples that are no longer included in product documentation. The samples used for documentation are now located in https://github.com/googleapis/python-language/tree/master/samples/v1 and thus have replaced the samples in this repo. --- samples/snippets/cloud-client/v1/README.rst | 56 +-- .../snippets/cloud-client/v1/README.rst.in | 2 - samples/snippets/cloud-client/v1/snippets.py | 395 ------------------ .../snippets/cloud-client/v1/snippets_test.py | 80 ---- 4 files changed, 2 insertions(+), 531 deletions(-) delete mode 100644 samples/snippets/cloud-client/v1/snippets.py delete mode 100644 samples/snippets/cloud-client/v1/snippets_test.py diff --git a/samples/snippets/cloud-client/v1/README.rst b/samples/snippets/cloud-client/v1/README.rst index 97f79a34..e0d71946 100644 --- a/samples/snippets/cloud-client/v1/README.rst +++ b/samples/snippets/cloud-client/v1/README.rst @@ -53,7 +53,7 @@ Install Dependencies $ virtualenv env $ source env/bin/activate -#. Install the dependencies needed to run the samples. +#. Install the dependencies needed to run the sample. .. code-block:: bash @@ -62,7 +62,7 @@ Install Dependencies .. _pip: https://pip.pypa.io/ .. _virtualenv: https://virtualenv.pypa.io/ -Samples +Sample ------------------------------------------------------------------------------- Quickstart @@ -81,58 +81,6 @@ To run this sample: $ python quickstart.py -Snippets -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=language/cloud-client/v1/snippets.py,language/cloud-client/v1/README.rst - - - - -To run this sample: - -.. code-block:: bash - - $ python snippets.py - - usage: snippets.py [-h] - {classify-text,classify-file,sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} - ... - - This application demonstrates how to perform basic operations with the - Google Cloud Natural Language API - - For more information, the documentation at - https://cloud.google.com/natural-language/docs. - - positional arguments: - {classify-text,classify-file,sentiment-entities-text,sentiment-entities-file,sentiment-text,sentiment-file,entities-text,entities-file,syntax-text,syntax-file} - classify-text Classifies content categories of the provided text. - classify-file Classifies content categories of the text in a Google - Cloud Storage file. - sentiment-entities-text - Detects entity sentiment in the provided text. - sentiment-entities-file - Detects entity sentiment in a Google Cloud Storage - file. - sentiment-text Detects sentiment in the text. - sentiment-file Detects sentiment in the file located in Google Cloud - Storage. - entities-text Detects entities in the text. - entities-file Detects entities in the file located in Google Cloud - Storage. - syntax-text Detects syntax in the text. - syntax-file Detects syntax in the file located in Google Cloud - Storage. - - optional arguments: - -h, --help show this help message and exit - - - - - The client library ------------------------------------------------------------------------------- diff --git a/samples/snippets/cloud-client/v1/README.rst.in b/samples/snippets/cloud-client/v1/README.rst.in index 06b7ff3e..9bf38dbf 100644 --- a/samples/snippets/cloud-client/v1/README.rst.in +++ b/samples/snippets/cloud-client/v1/README.rst.in @@ -23,8 +23,6 @@ setup: samples: - name: Quickstart file: quickstart.py -- name: Snippets - file: snippets.py show_help: true cloud_client_library: true diff --git a/samples/snippets/cloud-client/v1/snippets.py b/samples/snippets/cloud-client/v1/snippets.py deleted file mode 100644 index a95110a2..00000000 --- a/samples/snippets/cloud-client/v1/snippets.py +++ /dev/null @@ -1,395 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2018 Google, LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This application demonstrates how to perform basic operations with the -Google Cloud Natural Language API - -For more information, the documentation at -https://cloud.google.com/natural-language/docs. -""" - -import argparse -import sys - - -def sentiment_text(): - # [START language_sentiment_text] - from google.cloud import language - from google.cloud.language import enums - from google.cloud.language import types - - text = 'Hello, world!' - - client = language.LanguageServiceClient() - - try: - text = text.decode('utf-8') - except AttributeError: - pass - - # Instantiates a plain text document. - # [START language_python_migration_sentiment_text] - document = types.Document( - content=text, - type=enums.Document.Type.PLAIN_TEXT) - - # Detects sentiment in the document. You can also analyze HTML with: - # document.type == enums.Document.Type.HTML - sentiment = client.analyze_sentiment(document).document_sentiment - - print('Score: {}'.format(sentiment.score)) - print('Magnitude: {}'.format(sentiment.magnitude)) - # [END language_python_migration_sentiment_text] - # [END language_sentiment_text] - - -def sentiment_file(): - # [START language_sentiment_gcs] - from google.cloud import language - from google.cloud.language import enums - from google.cloud.language import types - - gcs_uri = 'gs://cloud-samples-data/language/hello.txt' - - client = language.LanguageServiceClient() - - # Instantiates a plain text document. - # [START language_python_migration_document_gcs] - document = types.Document( - gcs_content_uri=gcs_uri, - type=enums.Document.Type.PLAIN_TEXT) - # [END language_python_migration_document_gcs] - - # Detects sentiment in the document. You can also analyze HTML with: - # document.type == enums.Document.Type.HTML - sentiment = client.analyze_sentiment(document).document_sentiment - - print('Score: {}'.format(sentiment.score)) - print('Magnitude: {}'.format(sentiment.magnitude)) - # [END language_sentiment_gcs] - - -def entities_text(): - # [START language_entities_text] - import six - from google.cloud import language - from google.cloud.language import enums - from google.cloud.language import types - - text = 'President Kennedy spoke at the White House.' - - client = language.LanguageServiceClient() - - if isinstance(text, six.binary_type): - text = text.decode('utf-8') - - # Instantiates a plain text document. - # [START language_python_migration_entities_text] - # [START language_python_migration_document_text] - document = types.Document( - content=text, - type=enums.Document.Type.PLAIN_TEXT) - # [END language_python_migration_document_text] - - # Detects entities in the document. You can also analyze HTML with: - # document.type == enums.Document.Type.HTML - entities = client.analyze_entities(document).entities - - for entity in entities: - entity_type = enums.Entity.Type(entity.type) - print('=' * 20) - print(u'{:<16}: {}'.format('name', entity.name)) - print(u'{:<16}: {}'.format('type', entity_type.name)) - print(u'{:<16}: {}'.format('salience', entity.salience)) - print(u'{:<16}: {}'.format('wikipedia_url', - entity.metadata.get('wikipedia_url', '-'))) - print(u'{:<16}: {}'.format('mid', entity.metadata.get('mid', '-'))) - # [END language_python_migration_entities_text] - # [END language_entities_text] - - -def entities_file(): - # [START language_entities_gcs] - from google.cloud import language - from google.cloud.language import enums - from google.cloud.language import types - - gcs_uri = 'gs://cloud-samples-data/language/president.txt' - - client = language.LanguageServiceClient() - - # Instantiates a plain text document. - document = types.Document( - gcs_content_uri=gcs_uri, - type=enums.Document.Type.PLAIN_TEXT) - - # Detects sentiment in the document. You can also analyze HTML with: - # document.type == enums.Document.Type.HTML - entities = client.analyze_entities(document).entities - - for entity in entities: - entity_type = enums.Entity.Type(entity.type) - print('=' * 20) - print(u'{:<16}: {}'.format('name', entity.name)) - print(u'{:<16}: {}'.format('type', entity_type.name)) - print(u'{:<16}: {}'.format('salience', entity.salience)) - print(u'{:<16}: {}'.format('wikipedia_url', - entity.metadata.get('wikipedia_url', '-'))) - print(u'{:<16}: {}'.format('mid', entity.metadata.get('mid', '-'))) - # [END language_entities_gcs] - - -def syntax_text(): - # [START language_syntax_text] - import six - from google.cloud import language - from google.cloud.language import enums - from google.cloud.language import types - - text = 'President Kennedy spoke at the White House.' - - client = language.LanguageServiceClient() - - if isinstance(text, six.binary_type): - text = text.decode('utf-8') - - # Instantiates a plain text document. - # [START language_python_migration_syntax_text] - document = types.Document( - content=text, - type=enums.Document.Type.PLAIN_TEXT) - - # Detects syntax in the document. You can also analyze HTML with: - # document.type == enums.Document.Type.HTML - tokens = client.analyze_syntax(document).tokens - - for token in tokens: - part_of_speech_tag = enums.PartOfSpeech.Tag(token.part_of_speech.tag) - print(u'{}: {}'.format(part_of_speech_tag.name, - token.text.content)) - # [END language_python_migration_syntax_text] - # [END language_syntax_text] - - -def syntax_file(): - # [START language_syntax_gcs] - from google.cloud import language - from google.cloud.language import enums - from google.cloud.language import types - - gcs_uri = 'gs://cloud-samples-data/language/president.txt' - - client = language.LanguageServiceClient() - - # Instantiates a plain text document. - document = types.Document( - gcs_content_uri=gcs_uri, - type=enums.Document.Type.PLAIN_TEXT) - - # Detects syntax in the document. You can also analyze HTML with: - # document.type == enums.Document.Type.HTML - tokens = client.analyze_syntax(document).tokens - - for token in tokens: - part_of_speech_tag = enums.PartOfSpeech.Tag(token.part_of_speech.tag) - print(u'{}: {}'.format(part_of_speech_tag.name, - token.text.content)) - # [END language_syntax_gcs] - - -def entity_sentiment_text(): - # [START language_entity_sentiment_text] - import six - from google.cloud import language - from google.cloud.language import enums - from google.cloud.language import types - - text = 'President Kennedy spoke at the White House.' - - client = language.LanguageServiceClient() - - if isinstance(text, six.binary_type): - text = text.decode('utf-8') - - document = types.Document( - content=text.encode('utf-8'), - type=enums.Document.Type.PLAIN_TEXT) - - # Detect and send native Python encoding to receive correct word offsets. - encoding = enums.EncodingType.UTF32 - if sys.maxunicode == 65535: - encoding = enums.EncodingType.UTF16 - - result = client.analyze_entity_sentiment(document, encoding) - - for entity in result.entities: - print('Mentions: ') - print(u'Name: "{}"'.format(entity.name)) - for mention in entity.mentions: - print(u' Begin Offset : {}'.format(mention.text.begin_offset)) - print(u' Content : {}'.format(mention.text.content)) - print(u' Magnitude : {}'.format(mention.sentiment.magnitude)) - print(u' Sentiment : {}'.format(mention.sentiment.score)) - print(u' Type : {}'.format(mention.type)) - print(u'Salience: {}'.format(entity.salience)) - print(u'Sentiment: {}\n'.format(entity.sentiment)) - # [END language_entity_sentiment_text] - - -def entity_sentiment_file(): - # [START language_entity_sentiment_gcs] - from google.cloud import language - from google.cloud.language import enums - from google.cloud.language import types - - gcs_uri = 'gs://cloud-samples-data/language/president.txt' - - client = language.LanguageServiceClient() - - document = types.Document( - gcs_content_uri=gcs_uri, - type=enums.Document.Type.PLAIN_TEXT) - - # Detect and send native Python encoding to receive correct word offsets. - encoding = enums.EncodingType.UTF32 - if sys.maxunicode == 65535: - encoding = enums.EncodingType.UTF16 - - result = client.analyze_entity_sentiment(document, encoding) - - for entity in result.entities: - print(u'Name: "{}"'.format(entity.name)) - for mention in entity.mentions: - print(u' Begin Offset : {}'.format(mention.text.begin_offset)) - print(u' Content : {}'.format(mention.text.content)) - print(u' Magnitude : {}'.format(mention.sentiment.magnitude)) - print(u' Sentiment : {}'.format(mention.sentiment.score)) - print(u' Type : {}'.format(mention.type)) - print(u'Salience: {}'.format(entity.salience)) - print(u'Sentiment: {}\n'.format(entity.sentiment)) - # [END language_entity_sentiment_gcs] - - -def classify_text(): - # [START language_classify_text] - import six - from google.cloud import language - from google.cloud.language import enums - from google.cloud.language import types - - text = 'Android is a mobile operating system developed by Google, ' \ - 'based on the Linux kernel and designed primarily for ' \ - 'touchscreen mobile devices such as smartphones and tablets.' - - client = language.LanguageServiceClient() - - if isinstance(text, six.binary_type): - text = text.decode('utf-8') - - document = types.Document( - content=text.encode('utf-8'), - type=enums.Document.Type.PLAIN_TEXT) - - categories = client.classify_text(document).categories - - for category in categories: - print(u'=' * 20) - print(u'{:<16}: {}'.format('name', category.name)) - print(u'{:<16}: {}'.format('confidence', category.confidence)) - # [END language_classify_text] - - -def classify_file(): - # [START language_classify_gcs] - from google.cloud import language - from google.cloud.language import enums - from google.cloud.language import types - - gcs_uri = 'gs://cloud-samples-data/language/android.txt' - - client = language.LanguageServiceClient() - - document = types.Document( - gcs_content_uri=gcs_uri, - type=enums.Document.Type.PLAIN_TEXT) - - categories = client.classify_text(document).categories - - for category in categories: - print(u'=' * 20) - print(u'{:<16}: {}'.format('name', category.name)) - print(u'{:<16}: {}'.format('confidence', category.confidence)) - # [END language_classify_gcs] - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - subparsers = parser.add_subparsers(dest='command') - - classify_text_parser = subparsers.add_parser( - 'classify-text', help=classify_text.__doc__) - - classify_text_parser = subparsers.add_parser( - 'classify-file', help=classify_file.__doc__) - - sentiment_entities_text_parser = subparsers.add_parser( - 'sentiment-entities-text', help=entity_sentiment_text.__doc__) - - sentiment_entities_file_parser = subparsers.add_parser( - 'sentiment-entities-file', help=entity_sentiment_file.__doc__) - - sentiment_text_parser = subparsers.add_parser( - 'sentiment-text', help=sentiment_text.__doc__) - - sentiment_file_parser = subparsers.add_parser( - 'sentiment-file', help=sentiment_file.__doc__) - - entities_text_parser = subparsers.add_parser( - 'entities-text', help=entities_text.__doc__) - - entities_file_parser = subparsers.add_parser( - 'entities-file', help=entities_file.__doc__) - - syntax_text_parser = subparsers.add_parser( - 'syntax-text', help=syntax_text.__doc__) - - syntax_file_parser = subparsers.add_parser( - 'syntax-file', help=syntax_file.__doc__) - - args = parser.parse_args() - - if args.command == 'sentiment-text': - sentiment_text() - elif args.command == 'sentiment-file': - sentiment_file() - elif args.command == 'entities-text': - entities_text() - elif args.command == 'entities-file': - entities_file() - elif args.command == 'syntax-text': - syntax_text() - elif args.command == 'syntax-file': - syntax_file() - elif args.command == 'sentiment-entities-text': - entity_sentiment_text() - elif args.command == 'sentiment-entities-file': - entity_sentiment_file() - elif args.command == 'classify-text': - classify_text() - elif args.command == 'classify-file': - classify_file() diff --git a/samples/snippets/cloud-client/v1/snippets_test.py b/samples/snippets/cloud-client/v1/snippets_test.py deleted file mode 100644 index ef09b1a1..00000000 --- a/samples/snippets/cloud-client/v1/snippets_test.py +++ /dev/null @@ -1,80 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2018 Google, LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import snippets - - -def test_sentiment_text(capsys): - snippets.sentiment_text() - out, _ = capsys.readouterr() - assert 'Score: ' in out - - -def test_sentiment_file(capsys): - snippets.sentiment_file() - out, _ = capsys.readouterr() - assert 'Score: ' in out - - -def test_entities_text(capsys): - snippets.entities_text() - out, _ = capsys.readouterr() - assert 'name' in out - assert ': Kennedy' in out - - -def test_entities_file(capsys): - snippets.entities_file() - out, _ = capsys.readouterr() - assert 'name' in out - assert ': Kennedy' in out - - -def test_syntax_text(capsys): - snippets.syntax_text() - out, _ = capsys.readouterr() - assert 'NOUN: President' in out - - -def test_syntax_file(capsys): - snippets.syntax_file() - out, _ = capsys.readouterr() - assert 'NOUN: President' in out - - -def test_sentiment_entities_text(capsys): - snippets.entity_sentiment_text() - out, _ = capsys.readouterr() - assert 'Content : White House' in out - - -def test_sentiment_entities_file(capsys): - snippets.entity_sentiment_file() - out, _ = capsys.readouterr() - assert 'Content : White House' in out - - -def test_classify_text(capsys): - snippets.classify_text() - out, _ = capsys.readouterr() - assert 'name' in out - assert '/Computers & Electronics' in out - - -def test_classify_file(capsys): - snippets.classify_file() - out, _ = capsys.readouterr() - assert 'name' in out - assert '/Computers & Electronics' in out From 3b4011bdcb6610a8fbb24194040812c4987bbc09 Mon Sep 17 00:00:00 2001 From: Noah Negrey Date: Fri, 13 Mar 2020 14:24:50 -0600 Subject: [PATCH 159/209] langauge: fix old automl tests [(#3089)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3089) Co-authored-by: Leah E. Cole <6719667+leahecole@users.noreply.github.com> --- samples/snippets/automl/dataset_test.py | 30 ++++++++++++++++++------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/samples/snippets/automl/dataset_test.py b/samples/snippets/automl/dataset_test.py index fe68579f..94e5e5d0 100644 --- a/samples/snippets/automl/dataset_test.py +++ b/samples/snippets/automl/dataset_test.py @@ -35,15 +35,7 @@ def test_dataset_create_import_delete(capsys): out, _ = capsys.readouterr() create_dataset_output = out.splitlines() assert "Dataset id: " in create_dataset_output[1] - - # import data dataset_id = create_dataset_output[1].split()[2] - data = "gs://{}-lcm/happiness.csv".format(project_id) - automl_natural_language_dataset.import_data( - project_id, compute_region, dataset_id, data - ) - out, _ = capsys.readouterr() - assert "Data imported." in out # delete dataset automl_natural_language_dataset.delete_dataset( @@ -53,6 +45,28 @@ def test_dataset_create_import_delete(capsys): assert "Dataset deleted." in out +def test_import_data(capsys): + # As importing a dataset can take a long time and only four operations can + # be run on a dataset at once. Try to import into a nonexistent dataset and + # confirm that the dataset was not found, but other elements of the request + # were valid. + try: + data = "gs://{}-lcm/happiness.csv".format(project_id) + automl_natural_language_dataset.import_data( + project_id, compute_region, "TEN0000000000000000000", data + ) + out, _ = capsys.readouterr() + assert ( + "Dataset doesn't exist or is inaccessible for use with AutoMl." + in out + ) + except Exception as e: + assert ( + "Dataset doesn't exist or is inaccessible for use with AutoMl." + in e.message + ) + + def test_dataset_list_get(capsys): # list datasets automl_natural_language_dataset.list_datasets( From dcccba85e0936d535cfa54c69df7e965d20e1379 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 30 Mar 2020 21:10:18 +0200 Subject: [PATCH 160/209] chore(deps): update dependency numpy to v1.18.2 [(#3181)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3181) --- samples/snippets/classify_text/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/classify_text/requirements.txt b/samples/snippets/classify_text/requirements.txt index 8a441e71..bdbf1b3a 100644 --- a/samples/snippets/classify_text/requirements.txt +++ b/samples/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ google-cloud-language==1.3.0 -numpy==1.17.4 +numpy==1.18.2 From a71bdd705c4441b56599f186f1daacb6444c1311 Mon Sep 17 00:00:00 2001 From: Kurtis Van Gent <31518063+kurtisvg@users.noreply.github.com> Date: Wed, 1 Apr 2020 19:11:50 -0700 Subject: [PATCH 161/209] Simplify noxfile setup. [(#2806)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2806) * chore(deps): update dependency requests to v2.23.0 * Simplify noxfile and add version control. * Configure appengine/standard to only test Python 2.7. * Update Kokokro configs to match noxfile. * Add requirements-test to each folder. * Remove Py2 versions from everything execept appengine/standard. * Remove conftest.py. * Remove appengine/standard/conftest.py * Remove 'no-sucess-flaky-report' from pytest.ini. * Add GAE SDK back to appengine/standard tests. * Fix typo. * Roll pytest to python 2 version. * Add a bunch of testing requirements. * Remove typo. * Add appengine lib directory back in. * Add some additional requirements. * Fix issue with flake8 args. * Even more requirements. * Readd appengine conftest.py. * Add a few more requirements. * Even more Appengine requirements. * Add webtest for appengine/standard/mailgun. * Add some additional requirements. * Add workaround for issue with mailjet-rest. * Add responses for appengine/standard/mailjet. Co-authored-by: Renovate Bot --- samples/snippets/api/requirements-test.txt | 1 + samples/snippets/automl/requirements-test.txt | 1 + samples/snippets/classify_text/requirements-test.txt | 1 + samples/snippets/cloud-client/v1/requirements-test.txt | 1 + samples/snippets/generated-samples/v1/requirements-test.txt | 1 + samples/snippets/sentiment/requirements-test.txt | 1 + 6 files changed, 6 insertions(+) create mode 100644 samples/snippets/api/requirements-test.txt create mode 100644 samples/snippets/automl/requirements-test.txt create mode 100644 samples/snippets/classify_text/requirements-test.txt create mode 100644 samples/snippets/cloud-client/v1/requirements-test.txt create mode 100644 samples/snippets/generated-samples/v1/requirements-test.txt create mode 100644 samples/snippets/sentiment/requirements-test.txt diff --git a/samples/snippets/api/requirements-test.txt b/samples/snippets/api/requirements-test.txt new file mode 100644 index 00000000..781d4326 --- /dev/null +++ b/samples/snippets/api/requirements-test.txt @@ -0,0 +1 @@ +pytest==5.3.2 diff --git a/samples/snippets/automl/requirements-test.txt b/samples/snippets/automl/requirements-test.txt new file mode 100644 index 00000000..781d4326 --- /dev/null +++ b/samples/snippets/automl/requirements-test.txt @@ -0,0 +1 @@ +pytest==5.3.2 diff --git a/samples/snippets/classify_text/requirements-test.txt b/samples/snippets/classify_text/requirements-test.txt new file mode 100644 index 00000000..781d4326 --- /dev/null +++ b/samples/snippets/classify_text/requirements-test.txt @@ -0,0 +1 @@ +pytest==5.3.2 diff --git a/samples/snippets/cloud-client/v1/requirements-test.txt b/samples/snippets/cloud-client/v1/requirements-test.txt new file mode 100644 index 00000000..781d4326 --- /dev/null +++ b/samples/snippets/cloud-client/v1/requirements-test.txt @@ -0,0 +1 @@ +pytest==5.3.2 diff --git a/samples/snippets/generated-samples/v1/requirements-test.txt b/samples/snippets/generated-samples/v1/requirements-test.txt new file mode 100644 index 00000000..781d4326 --- /dev/null +++ b/samples/snippets/generated-samples/v1/requirements-test.txt @@ -0,0 +1 @@ +pytest==5.3.2 diff --git a/samples/snippets/sentiment/requirements-test.txt b/samples/snippets/sentiment/requirements-test.txt new file mode 100644 index 00000000..781d4326 --- /dev/null +++ b/samples/snippets/sentiment/requirements-test.txt @@ -0,0 +1 @@ +pytest==5.3.2 From 019e4570312270760a2400e070232354ec10add5 Mon Sep 17 00:00:00 2001 From: Jonathan Simon Date: Fri, 10 Apr 2020 14:14:14 -0700 Subject: [PATCH 162/209] Remove Language sample unused region_tag comments [(#3078)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3078) Co-authored-by: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Co-authored-by: Leah E. Cole <6719667+leahecole@users.noreply.github.com> --- samples/snippets/classify_text/classify_text_tutorial.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/samples/snippets/classify_text/classify_text_tutorial.py b/samples/snippets/classify_text/classify_text_tutorial.py index 2ce388cf..d193e62e 100644 --- a/samples/snippets/classify_text/classify_text_tutorial.py +++ b/samples/snippets/classify_text/classify_text_tutorial.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# [START language_classify_text_tutorial] """Using the classify_text method to find content categories of text files, Then use the content category labels to compare text similarity. @@ -94,7 +93,6 @@ def index(path, index_file): # [END language_classify_text_tutorial_index] -# [START language_classify_text_tutorial_split_labels] def split_labels(categories): """The category labels are of the form "/a/b/c" up to three levels, for example "/Computers & Electronics/Software", and these labels @@ -121,10 +119,8 @@ def split_labels(categories): _categories[label] = confidence return _categories -# [END language_classify_text_tutorial_split_labels] -# [START language_classify_text_tutorial_similarity] def similarity(categories1, categories2): """Cosine similarity of the categories treated as sparse vectors.""" categories1 = split_labels(categories1) @@ -143,7 +139,6 @@ def similarity(categories1, categories2): dot += confidence * categories2.get(label, 0.0) return dot / (norm1 * norm2) -# [END language_classify_text_tutorial_similarity] # [START language_classify_text_tutorial_query] @@ -255,4 +250,3 @@ def query_category(index_file, category_string, n_top=3): query(args.index_file, args.text) if args.command == 'query-category': query_category(args.index_file, args.category) -# [END language_classify_text_tutorial] From a92e5c3dcb85290406a8fbbd334da0660a716031 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 17 Apr 2020 03:09:45 +0200 Subject: [PATCH 163/209] Update dependency google-auth to v1.14.0 [(#3148)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3148) Co-authored-by: Leah E. Cole <6719667+leahecole@users.noreply.github.com> --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index c27ca15e..04c20c7a 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.7.11 -google-auth==1.11.2 +google-auth==1.14.0 google-auth-httplib2==0.0.3 From bb6ab773d88016f47b66917e023c7cc0ddbccd3e Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 17 Apr 2020 03:44:09 +0200 Subject: [PATCH 164/209] chore(deps): update dependency google-api-python-client to v1.8.0 [(#3100)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3100) This PR contains the following updates: | Package | Update | Change | |---|---|---| | [google-api-python-client](https://togithub.com/google/google-api-python-client) | minor | `==1.7.11` -> `==1.8.0` | --- ### Release Notes
google/google-api-python-client ### [`v1.8.0`](https://togithub.com/google/google-api-python-client/releases/v1.8.0) [Compare Source](https://togithub.com/google/google-api-python-client/compare/v1.7.12...v1.8.0) Release to support API endpoint override. New Features - Add api endpoint override. ([#​829](https://togithub.com/googleapis/google-api-python-client/pull/829)) Implementation Changes - Don't set http.redirect_codes if the attr doesn't exist and allow more httplib2 versions. ([#​841](https://togithub.com/googleapis/google-api-python-client/pull/841)) ### [`v1.7.12`](https://togithub.com/google/google-api-python-client/releases/v1.7.12) [Compare Source](https://togithub.com/google/google-api-python-client/compare/v1.7.11...v1.7.12) Bugfix release Implementation Changes - Look for field 'detail' in error message. ([#​739](https://togithub.com/googleapis/google-api-python-client/pull/739)) - Exclude 308s from httplib2 redirect codes list ([#​813](https://togithub.com/googleapis/google-api-python-client/pull/813)) Documentation - Remove oauth2client from docs ([#​738](https://togithub.com/googleapis/google-api-python-client/pull/738)) - Fix typo. ([#​745](https://togithub.com/googleapis/google-api-python-client/pull/745)) - Remove compatibility badges. ([#​746](https://togithub.com/googleapis/google-api-python-client/pull/746)) - Fix TypeError: search_analytics_api_sample.py [#​732](https://togithub.com/google/google-api-python-client/issues/732) ([#​742](https://togithub.com/googleapis/google-api-python-client/pull/742)) - Correct response access ([#​750](https://togithub.com/googleapis/google-api-python-client/pull/750)) - Fix link to API explorer ([#​760](https://togithub.com/googleapis/google-api-python-client/pull/760)) - Fix argument typo in oauth2 code example ([#​763](https://togithub.com/googleapis/google-api-python-client/pull/763)) - Recommend install with virtualenv ([#​768](https://togithub.com/googleapis/google-api-python-client/pull/768)) - Fix capitalization in docs/README.md ([#​770](https://togithub.com/googleapis/google-api-python-client/pull/770)) - Remove compatibility badges ([#​796](https://togithub.com/googleapis/google-api-python-client/pull/796)) - Remove mentions of pycrypto ([#​799](https://togithub.com/googleapis/google-api-python-client/pull/799)) - Fix typo in model.py - Add note about Google Ads llibrary ([#​814](https://togithub.com/googleapis/google-api-python-client/pull/814)) Internal / Testing Changes - Blacken ([#​772](https://togithub.com/googleapis/google-api-python-client/pull/722)) - Move kokoro configs ([#​832](https://togithub.com/googleapis/google-api-python-client/pull/832))
--- ### Renovate configuration :date: **Schedule**: At any time (no schedule defined). :vertical_traffic_light: **Automerge**: Disabled by config. Please merge this manually once you are satisfied. :recycle: **Rebasing**: Never, or you tick the rebase/retry checkbox. :no_bell: **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#GoogleCloudPlatform/python-docs-samples). --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 04c20c7a..46afe12b 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.7.11 +google-api-python-client==1.8.0 google-auth==1.14.0 google-auth-httplib2==0.0.3 From 0ce2d9b98a107d40318f324f488bf66b27e012c3 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 20 Apr 2020 18:55:50 +0200 Subject: [PATCH 165/209] chore(deps): update dependency numpy to v1.18.3 [(#3441)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3441) --- samples/snippets/classify_text/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/classify_text/requirements.txt b/samples/snippets/classify_text/requirements.txt index bdbf1b3a..80d612f8 100644 --- a/samples/snippets/classify_text/requirements.txt +++ b/samples/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ google-cloud-language==1.3.0 -numpy==1.18.2 +numpy==1.18.3 From 16a7f8a060fbd8d5a80597c88df8b488b324e3cc Mon Sep 17 00:00:00 2001 From: Anthony Date: Thu, 23 Apr 2020 21:14:40 -0700 Subject: [PATCH 166/209] remove samples/tests that aren't on devsite, incl. localized docs [(#3423)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3423) Co-authored-by: Takashi Matsuo --- .../automl/automl_natural_language_dataset.py | 297 ------------- .../automl/automl_natural_language_model.py | 392 ------------------ .../automl/automl_natural_language_predict.py | 85 ---- samples/snippets/automl/dataset_test.py | 85 ---- samples/snippets/automl/model_test.py | 82 ---- samples/snippets/automl/predict_test.py | 31 -- 6 files changed, 972 deletions(-) delete mode 100755 samples/snippets/automl/automl_natural_language_dataset.py delete mode 100755 samples/snippets/automl/automl_natural_language_model.py delete mode 100755 samples/snippets/automl/automl_natural_language_predict.py delete mode 100644 samples/snippets/automl/dataset_test.py delete mode 100644 samples/snippets/automl/model_test.py delete mode 100644 samples/snippets/automl/predict_test.py diff --git a/samples/snippets/automl/automl_natural_language_dataset.py b/samples/snippets/automl/automl_natural_language_dataset.py deleted file mode 100755 index df77d542..00000000 --- a/samples/snippets/automl/automl_natural_language_dataset.py +++ /dev/null @@ -1,297 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This application demonstrates how to perform basic operations on Dataset -with the Google AutoML Natural Language API. - -For more information, see the tutorial page at -https://cloud.google.com/natural-language/automl/docs/ -""" - -import argparse -import os - - -def create_dataset(project_id, compute_region, dataset_name, multilabel=False): - """Create a dataset.""" - # [START automl_language_create_dataset] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # dataset_name = 'DATASET_NAME_HERE' - # multilabel = True for multilabel or False for multiclass - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, compute_region) - - # Classification type is assigned based on multilabel value. - classification_type = "MULTICLASS" - if multilabel: - classification_type = "MULTILABEL" - - # Specify the text classification type for the dataset. - dataset_metadata = {"classification_type": classification_type} - - # Set dataset name and metadata. - my_dataset = { - "display_name": dataset_name, - "text_classification_dataset_metadata": dataset_metadata, - } - - # Create a dataset with the dataset metadata in the region. - dataset = client.create_dataset(project_location, my_dataset) - - # Display the dataset information. - print("Dataset name: {}".format(dataset.name)) - print("Dataset id: {}".format(dataset.name.split("/")[-1])) - print("Dataset display name: {}".format(dataset.display_name)) - print("Text classification dataset metadata:") - print("\t{}".format(dataset.text_classification_dataset_metadata)) - print("Dataset example count: {}".format(dataset.example_count)) - print("Dataset create time:") - print("\tseconds: {}".format(dataset.create_time.seconds)) - print("\tnanos: {}".format(dataset.create_time.nanos)) - - # [END automl_language_create_dataset] - - -def list_datasets(project_id, compute_region, filter_): - """List all datasets.""" - # [START automl_language_list_datasets] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # filter_ = 'filter expression here' - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, compute_region) - - # List all the datasets available in the region by applying filter. - response = client.list_datasets(project_location, filter_) - - print("List of datasets:") - for dataset in response: - # Display the dataset information. - print("Dataset name: {}".format(dataset.name)) - print("Dataset id: {}".format(dataset.name.split("/")[-1])) - print("Dataset display name: {}".format(dataset.display_name)) - print("Text classification dataset metadata:") - print("\t{}".format(dataset.text_classification_dataset_metadata)) - print("Dataset example count: {}".format(dataset.example_count)) - print("Dataset create time:") - print("\tseconds: {}".format(dataset.create_time.seconds)) - print("\tnanos: {}".format(dataset.create_time.nanos)) - - # [END automl_language_list_datasets] - - -def get_dataset(project_id, compute_region, dataset_id): - """Get the dataset.""" - # [START automl_language_get_dataset] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # dataset_id = 'DATASET_ID_HERE' - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # Get the full path of the dataset - dataset_full_id = client.dataset_path( - project_id, compute_region, dataset_id - ) - - # Get complete detail of the dataset. - dataset = client.get_dataset(dataset_full_id) - - # Display the dataset information. - print("Dataset name: {}".format(dataset.name)) - print("Dataset id: {}".format(dataset.name.split("/")[-1])) - print("Dataset display name: {}".format(dataset.display_name)) - print("Text classification dataset metadata:") - print("\t{}".format(dataset.text_classification_dataset_metadata)) - print("Dataset example count: {}".format(dataset.example_count)) - print("Dataset create time:") - print("\tseconds: {}".format(dataset.create_time.seconds)) - print("\tnanos: {}".format(dataset.create_time.nanos)) - - # [END automl_language_get_dataset] - - -def import_data(project_id, compute_region, dataset_id, path): - """Import labelled items.""" - # [START automl_language_import_data] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # dataset_id = 'DATASET_ID_HERE' - # path = 'gs://path/to/file.csv' - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # Get the full path of the dataset. - dataset_full_id = client.dataset_path( - project_id, compute_region, dataset_id - ) - - # Get the multiple Google Cloud Storage URIs. - input_uris = path.split(",") - input_config = {"gcs_source": {"input_uris": input_uris}} - - # Import the dataset from the input URI. - response = client.import_data(dataset_full_id, input_config) - - print("Processing import...") - # synchronous check of operation status. - print("Data imported. {}".format(response.result())) - - # [END automl_language_import_data] - - -def export_data(project_id, compute_region, dataset_id, output_uri): - """Export a dataset to a Google Cloud Storage bucket.""" - # [START automl_language_export_data] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # dataset_id = 'DATASET_ID_HERE' - # output_uri: 'gs://location/to/export/data' - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # Get the full path of the dataset. - dataset_full_id = client.dataset_path( - project_id, compute_region, dataset_id - ) - - # Set the output URI - output_config = {"gcs_destination": {"output_uri_prefix": output_uri}} - - # Export the data to the output URI. - response = client.export_data(dataset_full_id, output_config) - - print("Processing export...") - # synchronous check of operation status. - print("Data exported. {}".format(response.result())) - - # [END automl_language_export_data] - - -def delete_dataset(project_id, compute_region, dataset_id): - """Delete a dataset.""" - # [START automl_language_delete_dataset] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # dataset_id = 'DATASET_ID_HERE' - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # Get the full path of the dataset. - dataset_full_id = client.dataset_path( - project_id, compute_region, dataset_id - ) - - # Delete a dataset. - response = client.delete_dataset(dataset_full_id) - - # synchronous check of operation status. - print("Dataset deleted. {}".format(response.result())) - - # [END automl_language_delete_dataset] - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter, - ) - subparsers = parser.add_subparsers(dest="command") - - create_dataset_parser = subparsers.add_parser( - "create_dataset", help=create_dataset.__doc__ - ) - create_dataset_parser.add_argument("dataset_name") - create_dataset_parser.add_argument( - "multilabel", nargs="?", choices=["False", "True"], default="False" - ) - - list_datasets_parser = subparsers.add_parser( - "list_datasets", help=list_datasets.__doc__ - ) - list_datasets_parser.add_argument( - "filter_", nargs="?", default="text_classification_dataset_metadata:*" - ) - - get_dataset_parser = subparsers.add_parser( - "get_dataset", help=get_dataset.__doc__ - ) - get_dataset_parser.add_argument("dataset_id") - - import_data_parser = subparsers.add_parser( - "import_data", help=import_data.__doc__ - ) - import_data_parser.add_argument("dataset_id") - import_data_parser.add_argument("path") - - export_data_parser = subparsers.add_parser( - "export_data", help=export_data.__doc__ - ) - export_data_parser.add_argument("dataset_id") - export_data_parser.add_argument("output_uri") - - delete_dataset_parser = subparsers.add_parser( - "delete_dataset", help=delete_dataset.__doc__ - ) - delete_dataset_parser.add_argument("dataset_id") - - project_id = os.environ["PROJECT_ID"] - compute_region = os.environ["REGION_NAME"] - - args = parser.parse_args() - - if args.command == "create_dataset": - multilabel = True if args.multilabel == "True" else False - create_dataset( - project_id, compute_region, args.dataset_name, multilabel - ) - if args.command == "list_datasets": - list_datasets(project_id, compute_region, args.filter_) - if args.command == "get_dataset": - get_dataset(project_id, compute_region, args.dataset_id) - if args.command == "import_data": - import_data(project_id, compute_region, args.dataset_id, args.path) - if args.command == "export_data": - export_data( - project_id, compute_region, args.dataset_id, args.output_uri - ) - if args.command == "delete_dataset": - delete_dataset(project_id, compute_region, args.dataset_id) diff --git a/samples/snippets/automl/automl_natural_language_model.py b/samples/snippets/automl/automl_natural_language_model.py deleted file mode 100755 index 35472121..00000000 --- a/samples/snippets/automl/automl_natural_language_model.py +++ /dev/null @@ -1,392 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This application demonstrates how to perform basic operations on model -with the Google AutoML Natural Language API. - -For more information, see the tutorial page at -https://cloud.google.com/natural-language/automl/docs/ -""" - -import argparse -import os - - -def create_model(project_id, compute_region, dataset_id, model_name): - """Create a model.""" - # [START automl_language_create_model] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # dataset_id = 'DATASET_ID_HERE' - # model_name = 'MODEL_NAME_HERE' - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, compute_region) - - # Set model name and model metadata for the dataset. - my_model = { - "display_name": model_name, - "dataset_id": dataset_id, - "text_classification_model_metadata": {}, - } - - # Create a model with the model metadata in the region. - response = client.create_model(project_location, my_model) - print("Training operation name: {}".format(response.operation.name)) - print("Training started...") - - # [END automl_language_create_model] - - -def get_operation_status(operation_full_id): - """Get operation status.""" - # [START automl_language_get_operation_status] - # TODO(developer): Uncomment and set the following variables - # operation_full_id = - # 'projects//locations//operations/' - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # Get the latest state of a long-running operation. - response = client.transport._operations_client.get_operation( - operation_full_id - ) - - print("Operation status: {}".format(response)) - - # [END automl_language_get_operation_status] - - -def list_models(project_id, compute_region, filter_): - """List all models.""" - # [START automl_language_list_models] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # filter_ = 'DATASET_ID_HERE' - - from google.cloud import automl_v1beta1 as automl - from google.cloud.automl_v1beta1 import enums - - client = automl.AutoMlClient() - - # A resource that represents Google Cloud Platform location. - project_location = client.location_path(project_id, compute_region) - - # List all the models available in the region by applying filter. - response = client.list_models(project_location, filter_) - - print("List of models:") - for model in response: - # Retrieve deployment state. - deployment_state = "" - if model.deployment_state == enums.Model.DeploymentState.DEPLOYED: - deployment_state = "deployed" - else: - deployment_state = "undeployed" - - # Display the model information. - print("Model name: {}".format(model.name)) - print("Model id: {}".format(model.name.split("/")[-1])) - print("Model display name: {}".format(model.display_name)) - print("Model create time:") - print("\tseconds: {}".format(model.create_time.seconds)) - print("\tnanos: {}".format(model.create_time.nanos)) - print("Model deployment state: {}".format(deployment_state)) - - # [END automl_language_list_models] - - -def get_model(project_id, compute_region, model_id): - """Get model details.""" - # [START automl_language_get_model] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # model_id = 'MODEL_ID_HERE' - - from google.cloud import automl_v1beta1 as automl - from google.cloud.automl_v1beta1 import enums - - client = automl.AutoMlClient() - - # Get the full path of the model. - model_full_id = client.model_path(project_id, compute_region, model_id) - - # Get complete detail of the model. - model = client.get_model(model_full_id) - - # Retrieve deployment state. - deployment_state = "" - if model.deployment_state == enums.Model.DeploymentState.DEPLOYED: - deployment_state = "deployed" - else: - deployment_state = "undeployed" - - # Display the model information. - print("Model name: {}".format(model.name)) - print("Model id: {}".format(model.name.split("/")[-1])) - print("Model display name: {}".format(model.display_name)) - print("Model create time:") - print("\tseconds: {}".format(model.create_time.seconds)) - print("\tnanos: {}".format(model.create_time.nanos)) - print("Model deployment state: {}".format(deployment_state)) - - # [END automl_language_get_model] - - -def list_model_evaluations(project_id, compute_region, model_id, filter_): - """List model evaluations.""" - # [START automl_language_list_model_evaluations] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # model_id = 'MODEL_ID_HERE' - # filter_ = 'filter expression here' - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # Get the full path of the model. - model_full_id = client.model_path(project_id, compute_region, model_id) - - # List all the model evaluations in the model by applying filter. - response = client.list_model_evaluations(model_full_id, filter_) - - print("List of model evaluations:") - for element in response: - print(element) - - # [END automl_language_list_model_evaluations] - - -def get_model_evaluation( - project_id, compute_region, model_id, model_evaluation_id -): - """Get model evaluation.""" - # [START automl_language_get_model_evaluation] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # model_id = 'MODEL_ID_HERE' - # model_evaluation_id = 'MODEL_EVALUATION_ID_HERE' - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # Get the full path of the model evaluation. - model_evaluation_full_id = client.model_evaluation_path( - project_id, compute_region, model_id, model_evaluation_id - ) - - # Get complete detail of the model evaluation. - response = client.get_model_evaluation(model_evaluation_full_id) - - print(response) - - # [END automl_language_get_model_evaluation] - - -def display_evaluation(project_id, compute_region, model_id, filter_): - """Display evaluation.""" - # [START automl_language_display_evaluation] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # model_id = 'MODEL_ID_HERE' - # filter_ = 'filter expression here' - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # Get the full path of the model. - model_full_id = client.model_path(project_id, compute_region, model_id) - - # List all the model evaluations in the model by applying filter. - response = client.list_model_evaluations(model_full_id, filter_) - - # Iterate through the results. - for element in response: - # There is evaluation for each class in a model and for overall model. - # Get only the evaluation of overall model. - if not element.annotation_spec_id: - model_evaluation_id = element.name.split("/")[-1] - - # Resource name for the model evaluation. - model_evaluation_full_id = client.model_evaluation_path( - project_id, compute_region, model_id, model_evaluation_id - ) - - # Get a model evaluation. - model_evaluation = client.get_model_evaluation(model_evaluation_full_id) - - class_metrics = model_evaluation.classification_evaluation_metrics - confidence_metrics_entries = class_metrics.confidence_metrics_entry - - # Showing model score based on threshold of 0.5 - for confidence_metrics_entry in confidence_metrics_entries: - if confidence_metrics_entry.confidence_threshold == 0.5: - print("Precision and recall are based on a score threshold of 0.5") - print( - "Model Precision: {}%".format( - round(confidence_metrics_entry.precision * 100, 2) - ) - ) - print( - "Model Recall: {}%".format( - round(confidence_metrics_entry.recall * 100, 2) - ) - ) - print( - "Model F1 score: {}%".format( - round(confidence_metrics_entry.f1_score * 100, 2) - ) - ) - print( - "Model Precision@1: {}%".format( - round(confidence_metrics_entry.precision_at1 * 100, 2) - ) - ) - print( - "Model Recall@1: {}%".format( - round(confidence_metrics_entry.recall_at1 * 100, 2) - ) - ) - print( - "Model F1 score@1: {}%".format( - round(confidence_metrics_entry.f1_score_at1 * 100, 2) - ) - ) - - # [END automl_language_display_evaluation] - - -def delete_model(project_id, compute_region, model_id): - """Delete a model.""" - # [START automl_language_delete_model] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # model_id = 'MODEL_ID_HERE' - - from google.cloud import automl_v1beta1 as automl - - client = automl.AutoMlClient() - - # Get the full path of the model. - model_full_id = client.model_path(project_id, compute_region, model_id) - - # Delete a model. - response = client.delete_model(model_full_id) - - # synchronous check of operation status. - print("Model deleted. {}".format(response.result())) - - # [END automl_language_delete_model] - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter, - ) - subparsers = parser.add_subparsers(dest="command") - - create_model_parser = subparsers.add_parser( - "create_model", help=create_model.__doc__ - ) - create_model_parser.add_argument("dataset_id") - create_model_parser.add_argument("model_name") - - get_operation_status_parser = subparsers.add_parser( - "get_operation_status", help=get_operation_status.__doc__ - ) - get_operation_status_parser.add_argument("operation_full_id") - - list_models_parser = subparsers.add_parser( - "list_models", help=list_models.__doc__ - ) - list_models_parser.add_argument("filter_") - - get_model_parser = subparsers.add_parser( - "get_model", help=get_model_evaluation.__doc__ - ) - get_model_parser.add_argument("model_id") - - list_model_evaluations_parser = subparsers.add_parser( - "list_model_evaluations", help=list_model_evaluations.__doc__ - ) - list_model_evaluations_parser.add_argument("model_id") - list_model_evaluations_parser.add_argument( - "filter_", nargs="?", default="" - ) - - get_model_evaluation_parser = subparsers.add_parser( - "get_model_evaluation", help=get_model_evaluation.__doc__ - ) - get_model_evaluation_parser.add_argument("model_id") - get_model_evaluation_parser.add_argument("model_evaluation_id") - - display_evaluation_parser = subparsers.add_parser( - "display_evaluation", help=display_evaluation.__doc__ - ) - display_evaluation_parser.add_argument("model_id") - display_evaluation_parser.add_argument("filter_", nargs="?", default="") - - delete_model_parser = subparsers.add_parser( - "delete_model", help=delete_model.__doc__ - ) - delete_model_parser.add_argument("model_id") - - project_id = os.environ["PROJECT_ID"] - compute_region = os.environ["REGION_NAME"] - - args = parser.parse_args() - - if args.command == "create_model": - create_model( - project_id, compute_region, args.dataset_id, args.model_name - ) - if args.command == "get_operation_status": - get_operation_status(args.operation_full_id) - if args.command == "list_models": - list_models(project_id, compute_region, args.filter_) - if args.command == "get_model": - get_model(project_id, compute_region, args.model_id) - if args.command == "list_model_evaluations": - list_model_evaluations( - project_id, compute_region, args.model_id, args.filter_ - ) - if args.command == "get_model_evaluation": - get_model_evaluation( - project_id, compute_region, args.model_id, args.model_evaluation_id - ) - if args.command == "display_evaluation": - display_evaluation( - project_id, compute_region, args.model_id, args.filter_ - ) - if args.command == "delete_model": - delete_model(project_id, compute_region, args.model_id) diff --git a/samples/snippets/automl/automl_natural_language_predict.py b/samples/snippets/automl/automl_natural_language_predict.py deleted file mode 100755 index b328c7ae..00000000 --- a/samples/snippets/automl/automl_natural_language_predict.py +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This application demonstrates how to perform basic operations on prediction -with the Google AutoML Natural Language API. - -For more information, see the tutorial page at -https://cloud.google.com/natural-language/automl/docs/ -""" - -import argparse -import os - - -def predict(project_id, compute_region, model_id, file_path): - """Classify the content.""" - # [START automl_language_predict] - # TODO(developer): Uncomment and set the following variables - # project_id = 'PROJECT_ID_HERE' - # compute_region = 'COMPUTE_REGION_HERE' - # model_id = 'MODEL_ID_HERE' - # file_path = '/local/path/to/file' - - from google.cloud import automl_v1beta1 as automl - - automl_client = automl.AutoMlClient() - - # Create client for prediction service. - prediction_client = automl.PredictionServiceClient() - - # Get the full path of the model. - model_full_id = automl_client.model_path( - project_id, compute_region, model_id - ) - - # Read the file content for prediction. - with open(file_path, "rb") as content_file: - snippet = content_file.read() - - # Set the payload by giving the content and type of the file. - payload = {"text_snippet": {"content": snippet, "mime_type": "text/plain"}} - - # params is additional domain-specific parameters. - # currently there is no additional parameters supported. - params = {} - response = prediction_client.predict(model_full_id, payload, params) - print("Prediction results:") - for result in response.payload: - print("Predicted class name: {}".format(result.display_name)) - print("Predicted class score: {}".format(result.classification.score)) - - # [END automl_language_predict] - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter, - ) - subparsers = parser.add_subparsers(dest="command") - - predict_parser = subparsers.add_parser("predict", help=predict.__doc__) - predict_parser.add_argument("model_id") - predict_parser.add_argument("file_path") - - project_id = os.environ["PROJECT_ID"] - compute_region = os.environ["REGION_NAME"] - - args = parser.parse_args() - - if args.command == "predict": - predict(project_id, compute_region, args.model_id, args.file_path) diff --git a/samples/snippets/automl/dataset_test.py b/samples/snippets/automl/dataset_test.py deleted file mode 100644 index 94e5e5d0..00000000 --- a/samples/snippets/automl/dataset_test.py +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import os - -import pytest - -import automl_natural_language_dataset - -project_id = os.environ["GCLOUD_PROJECT"] -compute_region = "us-central1" - - -@pytest.mark.slow -def test_dataset_create_import_delete(capsys): - # create dataset - dataset_name = "test_" + datetime.datetime.now().strftime("%Y%m%d%H%M%S") - automl_natural_language_dataset.create_dataset( - project_id, compute_region, dataset_name - ) - out, _ = capsys.readouterr() - create_dataset_output = out.splitlines() - assert "Dataset id: " in create_dataset_output[1] - dataset_id = create_dataset_output[1].split()[2] - - # delete dataset - automl_natural_language_dataset.delete_dataset( - project_id, compute_region, dataset_id - ) - out, _ = capsys.readouterr() - assert "Dataset deleted." in out - - -def test_import_data(capsys): - # As importing a dataset can take a long time and only four operations can - # be run on a dataset at once. Try to import into a nonexistent dataset and - # confirm that the dataset was not found, but other elements of the request - # were valid. - try: - data = "gs://{}-lcm/happiness.csv".format(project_id) - automl_natural_language_dataset.import_data( - project_id, compute_region, "TEN0000000000000000000", data - ) - out, _ = capsys.readouterr() - assert ( - "Dataset doesn't exist or is inaccessible for use with AutoMl." - in out - ) - except Exception as e: - assert ( - "Dataset doesn't exist or is inaccessible for use with AutoMl." - in e.message - ) - - -def test_dataset_list_get(capsys): - # list datasets - automl_natural_language_dataset.list_datasets( - project_id, compute_region, "" - ) - out, _ = capsys.readouterr() - list_dataset_output = out.splitlines() - assert "Dataset id: " in list_dataset_output[2] - - # get dataset - dataset_id = list_dataset_output[2].split()[2] - automl_natural_language_dataset.get_dataset( - project_id, compute_region, dataset_id - ) - out, _ = capsys.readouterr() - assert "Dataset name: " in out diff --git a/samples/snippets/automl/model_test.py b/samples/snippets/automl/model_test.py deleted file mode 100644 index 8f484d2a..00000000 --- a/samples/snippets/automl/model_test.py +++ /dev/null @@ -1,82 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import os - -from google.cloud import automl_v1beta1 as automl -import pytest - -import automl_natural_language_model - -project_id = os.environ["GCLOUD_PROJECT"] -compute_region = "us-central1" - - -@pytest.mark.skip(reason="creates too many models") -def test_model_create_status_delete(capsys): - # create model - client = automl.AutoMlClient() - model_name = "test_" + datetime.datetime.now().strftime("%Y%m%d%H%M%S") - project_location = client.location_path(project_id, compute_region) - my_model = { - "display_name": model_name, - "dataset_id": "2551826603472450019", - "text_classification_model_metadata": {}, - } - response = client.create_model(project_location, my_model) - operation_name = response.operation.name - assert operation_name - - # get operation status - automl_natural_language_model.get_operation_status(operation_name) - out, _ = capsys.readouterr() - assert "Operation status: " in out - - # cancel operation - response.cancel() - - -def test_model_list_get_evaluate(capsys): - # list models - automl_natural_language_model.list_models(project_id, compute_region, "") - out, _ = capsys.readouterr() - list_models_output = out.splitlines() - assert "Model id: " in list_models_output[2] - - # get model - model_id = list_models_output[2].split()[2] - automl_natural_language_model.get_model( - project_id, compute_region, model_id - ) - out, _ = capsys.readouterr() - assert "Model name: " in out - - # list model evaluations - automl_natural_language_model.list_model_evaluations( - project_id, compute_region, model_id, "" - ) - out, _ = capsys.readouterr() - list_evals_output = out.splitlines() - assert "name: " in list_evals_output[1] - - # get model evaluation - model_evaluation_id = list_evals_output[1].split("/")[-1][:-1] - automl_natural_language_model.get_model_evaluation( - project_id, compute_region, model_id, model_evaluation_id - ) - out, _ = capsys.readouterr() - assert "evaluation_metric" in out diff --git a/samples/snippets/automl/predict_test.py b/samples/snippets/automl/predict_test.py deleted file mode 100644 index f511302d..00000000 --- a/samples/snippets/automl/predict_test.py +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -import automl_natural_language_predict - -project_id = os.environ["GCLOUD_PROJECT"] -compute_region = "us-central1" - - -def test_predict(capsys): - model_id = "TCN3472481026502981088" - automl_natural_language_predict.predict( - project_id, compute_region, model_id, "resources/test.txt" - ) - out, _ = capsys.readouterr() - assert "Cheese" in out From f4e622340147108d0b5f4b2f63eb476650b366fd Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 24 Apr 2020 06:52:24 +0200 Subject: [PATCH 167/209] Update dependency google-api-python-client to v1.8.2 [(#3452)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3452) This PR contains the following updates: | Package | Update | Change | |---|---|---| | [google-api-python-client](https://togithub.com/google/google-api-python-client) | patch | `==1.8.0` -> `==1.8.2` | | [google-api-python-client](https://togithub.com/google/google-api-python-client) | minor | `==1.7.11` -> `==1.8.2` | --- ### Release Notes
google/google-api-python-client ### [`v1.8.2`](https://togithub.com/google/google-api-python-client/blob/master/CHANGELOG.md#​182-httpswwwgithubcomgoogleapisgoogle-api-python-clientcomparev181v182-2020-04-21) [Compare Source](https://togithub.com/google/google-api-python-client/compare/v1.8.1...v1.8.2) ### [`v1.8.1`](https://togithub.com/google/google-api-python-client/blob/master/CHANGELOG.md#​181-httpswwwgithubcomgoogleapisgoogle-api-python-clientcomparev180v181-2020-04-20) [Compare Source](https://togithub.com/google/google-api-python-client/compare/v1.8.0...v1.8.1)
--- ### Renovate configuration :date: **Schedule**: At any time (no schedule defined). :vertical_traffic_light: **Automerge**: Disabled by config. Please merge this manually once you are satisfied. :recycle: **Rebasing**: Never, or you tick the rebase/retry checkbox. :no_bell: **Ignore**: Close this PR and you won't be reminded about these updates again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#GoogleCloudPlatform/python-docs-samples). --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 46afe12b..06ba56f1 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.8.0 +google-api-python-client==1.8.2 google-auth==1.14.0 google-auth-httplib2==0.0.3 From 825124010f54ebcd5270487b070021a662cacc9c Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 28 Apr 2020 06:20:12 +0200 Subject: [PATCH 168/209] chore(deps): update dependency google-auth to v1.14.1 [(#3464)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3464) This PR contains the following updates: | Package | Update | Change | |---|---|---| | [google-auth](https://togithub.com/googleapis/google-auth-library-python) | patch | `==1.14.0` -> `==1.14.1` | | [google-auth](https://togithub.com/googleapis/google-auth-library-python) | minor | `==1.11.2` -> `==1.14.1` | --- ### Release Notes
googleapis/google-auth-library-python ### [`v1.14.1`](https://togithub.com/googleapis/google-auth-library-python/blob/master/CHANGELOG.md#​1141-httpswwwgithubcomgoogleapisgoogle-auth-library-pythoncomparev1140v1141-2020-04-21) [Compare Source](https://togithub.com/googleapis/google-auth-library-python/compare/v1.14.0...v1.14.1)
--- ### Renovate configuration :date: **Schedule**: At any time (no schedule defined). :vertical_traffic_light: **Automerge**: Disabled by config. Please merge this manually once you are satisfied. :recycle: **Rebasing**: Never, or you tick the rebase/retry checkbox. :no_bell: **Ignore**: Close this PR and you won't be reminded about these updates again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#GoogleCloudPlatform/python-docs-samples). --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 06ba56f1..c5ff186f 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.8.2 -google-auth==1.14.0 +google-auth==1.14.1 google-auth-httplib2==0.0.3 From a7adc008e0a2ca899faeedebb2736258332979d1 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 5 May 2020 00:58:18 +0200 Subject: [PATCH 169/209] chore(deps): update dependency numpy to v1.18.4 [(#3675)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3675) Co-authored-by: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Co-authored-by: Takashi Matsuo --- samples/snippets/classify_text/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/classify_text/requirements.txt b/samples/snippets/classify_text/requirements.txt index 80d612f8..000eb4bc 100644 --- a/samples/snippets/classify_text/requirements.txt +++ b/samples/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ google-cloud-language==1.3.0 -numpy==1.18.3 +numpy==1.18.4 From d1cb61931859c385ddc606d39ddbb9c448819dd0 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 11 May 2020 22:24:11 +0200 Subject: [PATCH 170/209] chore(deps): update dependency google-auth to v1.14.2 [(#3724)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3724) This PR contains the following updates: | Package | Update | Change | |---|---|---| | [google-auth](https://togithub.com/googleapis/google-auth-library-python) | patch | `==1.14.1` -> `==1.14.2` | --- ### Release Notes
googleapis/google-auth-library-python ### [`v1.14.2`](https://togithub.com/googleapis/google-auth-library-python/blob/master/CHANGELOG.md#​1142-httpswwwgithubcomgoogleapisgoogle-auth-library-pythoncomparev1141v1142-2020-05-07) [Compare Source](https://togithub.com/googleapis/google-auth-library-python/compare/v1.14.1...v1.14.2)
--- ### Renovate configuration :date: **Schedule**: At any time (no schedule defined). :vertical_traffic_light: **Automerge**: Disabled by config. Please merge this manually once you are satisfied. :recycle: **Rebasing**: Never, or you tick the rebase/retry checkbox. :no_bell: **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#GoogleCloudPlatform/python-docs-samples). --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index c5ff186f..a22972ab 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.8.2 -google-auth==1.14.1 +google-auth==1.14.2 google-auth-httplib2==0.0.3 From a0146189a13f5287c09e85b85da0fa7c306bff00 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 13 May 2020 08:16:04 +0200 Subject: [PATCH 171/209] chore(deps): update dependency google-auth to v1.14.3 [(#3728)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3728) This PR contains the following updates: | Package | Update | Change | |---|---|---| | [google-auth](https://togithub.com/googleapis/google-auth-library-python) | patch | `==1.14.2` -> `==1.14.3` | --- ### Release Notes
googleapis/google-auth-library-python ### [`v1.14.3`](https://togithub.com/googleapis/google-auth-library-python/blob/master/CHANGELOG.md#​1143-httpswwwgithubcomgoogleapisgoogle-auth-library-pythoncomparev1142v1143-2020-05-11) [Compare Source](https://togithub.com/googleapis/google-auth-library-python/compare/v1.14.2...v1.14.3)
--- ### Renovate configuration :date: **Schedule**: At any time (no schedule defined). :vertical_traffic_light: **Automerge**: Disabled by config. Please merge this manually once you are satisfied. :recycle: **Rebasing**: Never, or you tick the rebase/retry checkbox. :no_bell: **Ignore**: Close this PR and you won't be reminded about this update again. --- - [x] If you want to rebase/retry this PR, check this box --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#GoogleCloudPlatform/python-docs-samples). --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index a22972ab..6dbe6ea0 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.8.2 -google-auth==1.14.2 +google-auth==1.14.3 google-auth-httplib2==0.0.3 From 828ea632330fe11ec078bb47f3ce6842d76cc072 Mon Sep 17 00:00:00 2001 From: "Leah E. Cole" <6719667+leahecole@users.noreply.github.com> Date: Mon, 18 May 2020 21:32:27 -0700 Subject: [PATCH 172/209] update google-auth to 1.15.0 final part [(#3819)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3819) --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 6dbe6ea0..52ccdec8 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.8.2 -google-auth==1.14.3 +google-auth==1.15.0 google-auth-httplib2==0.0.3 From f11eea15dec307f1d2130d57ece886f11ef82ac3 Mon Sep 17 00:00:00 2001 From: "Leah E. Cole" <6719667+leahecole@users.noreply.github.com> Date: Mon, 18 May 2020 22:14:32 -0700 Subject: [PATCH 173/209] update google-api-python-client to 1.8.3 final part [(#3827)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3827) --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 52ccdec8..421ee1f3 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.8.2 +google-api-python-client==1.8.3 google-auth==1.15.0 google-auth-httplib2==0.0.3 From cbe4590c1bf8711432fc764ab78391f0917dfd2d Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 27 May 2020 18:17:02 +0200 Subject: [PATCH 174/209] chore(deps): update dependency google-api-python-client to v1.8.4 [(#3881)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3881) Co-authored-by: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Co-authored-by: gcf-merge-on-green[bot] <60162190+gcf-merge-on-green[bot]@users.noreply.github.com> --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 421ee1f3..2c0db82b 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.8.3 +google-api-python-client==1.8.4 google-auth==1.15.0 google-auth-httplib2==0.0.3 From 7243c52be02a09e9374afa7df363e52f326e156a Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 29 May 2020 00:27:36 +0200 Subject: [PATCH 175/209] chore(deps): update dependency google-auth to v1.16.0 [(#3903)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3903) --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 2c0db82b..0673bda3 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.8.4 -google-auth==1.15.0 +google-auth==1.16.0 google-auth-httplib2==0.0.3 From d3b975698fecaf74e31aa1a7cad1c6f479a8f1f9 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 2 Jun 2020 21:36:49 +0200 Subject: [PATCH 176/209] chore(deps): update dependency google-api-python-client to v1.9.1 [(#3930)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3930) --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 0673bda3..0f619497 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.8.4 +google-api-python-client==1.9.1 google-auth==1.16.0 google-auth-httplib2==0.0.3 From b0771f9ce84eea85d3a5748fbfdbccdbad687f4d Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 4 Jun 2020 04:08:31 +0200 Subject: [PATCH 177/209] Update dependency numpy to v1.18.5 [(#3954)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3954) --- samples/snippets/classify_text/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/classify_text/requirements.txt b/samples/snippets/classify_text/requirements.txt index 000eb4bc..787df584 100644 --- a/samples/snippets/classify_text/requirements.txt +++ b/samples/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ google-cloud-language==1.3.0 -numpy==1.18.4 +numpy==1.18.5 From b2b04900672ae6b487f955be69414b2dc8fe64e4 Mon Sep 17 00:00:00 2001 From: "Leah E. Cole" <6719667+leahecole@users.noreply.github.com> Date: Thu, 4 Jun 2020 17:28:57 -0700 Subject: [PATCH 178/209] final update for google-auth [(#3967)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3967) --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 0f619497..130c1828 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.9.1 -google-auth==1.16.0 +google-auth==1.16.1 google-auth-httplib2==0.0.3 From f16d6e1f721effdcb3c2bc19907c7e73efcf1513 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 11 Jun 2020 01:14:10 +0200 Subject: [PATCH 179/209] Update dependency google-api-python-client to v1.9.2 [(#4038)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4038) --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 130c1828..db1b542c 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.9.1 +google-api-python-client==1.9.2 google-auth==1.16.1 google-auth-httplib2==0.0.3 From 6c6d54cca892c66f82bde20100304eaf749dbc16 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 11 Jun 2020 21:51:16 +0200 Subject: [PATCH 180/209] Update dependency google-auth to v1.17.0 [(#4058)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4058) --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index db1b542c..387355ad 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.9.2 -google-auth==1.16.1 +google-auth==1.17.0 google-auth-httplib2==0.0.3 From 5a40e01f67e3219029db3a925f9e260137d6fc0b Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 12 Jun 2020 02:32:11 +0200 Subject: [PATCH 181/209] chore(deps): update dependency google-auth to v1.17.1 [(#4073)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4073) --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 387355ad..e2b7f65e 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.9.2 -google-auth==1.17.0 +google-auth==1.17.1 google-auth-httplib2==0.0.3 From 677a6e3434c9f29fd573c68ef755c6e4cec86712 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 12 Jun 2020 22:53:46 +0200 Subject: [PATCH 182/209] Update dependency google-auth to v1.17.2 [(#4083)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4083) --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index e2b7f65e..dd41dc46 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.9.2 -google-auth==1.17.1 +google-auth==1.17.2 google-auth-httplib2==0.0.3 From 8596f51c96d280b5765fa964c21d27c367921d82 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 12 Jun 2020 23:16:14 +0200 Subject: [PATCH 183/209] Update dependency google-api-python-client to v1.9.3 [(#4057)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4057) This PR contains the following updates: | Package | Update | Change | |---|---|---| | [google-api-python-client](https://togithub.com/googleapis/google-api-python-client) | patch | `==1.9.2` -> `==1.9.3` | --- ### Release Notes
googleapis/google-api-python-client ### [`v1.9.3`](https://togithub.com/googleapis/google-api-python-client/blob/master/CHANGELOG.md#​193-httpswwwgithubcomgoogleapisgoogle-api-python-clientcomparev192v193-2020-06-10) [Compare Source](https://togithub.com/googleapis/google-api-python-client/compare/v1.9.2...v1.9.3)
--- ### Renovate configuration :date: **Schedule**: At any time (no schedule defined). :vertical_traffic_light: **Automerge**: Disabled by config. Please merge this manually once you are satisfied. :recycle: **Rebasing**: Never, or you tick the rebase/retry checkbox. :no_bell: **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#GoogleCloudPlatform/python-docs-samples). --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index dd41dc46..360c7ed1 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.9.2 +google-api-python-client==1.9.3 google-auth==1.17.2 google-auth-httplib2==0.0.3 From 086d85c83e015fc62db432946a6caf59f2f650de Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 19 Jun 2020 05:34:55 +0200 Subject: [PATCH 184/209] Update dependency google-auth to v1.18.0 [(#4125)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4125) --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 360c7ed1..39fd57da 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.9.3 -google-auth==1.17.2 +google-auth==1.18.0 google-auth-httplib2==0.0.3 From 2a85a94ad6e3ed654f531025ec16f986616e1d6d Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Sat, 20 Jun 2020 01:16:04 +0200 Subject: [PATCH 185/209] chore(deps): update dependency google-cloud-automl to v1 [(#4127)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4127) This PR contains the following updates: | Package | Update | Change | |---|---|---| | [google-cloud-automl](https://togithub.com/googleapis/python-automl) | major | `==0.10.0` -> `==1.0.1` | --- ### Release Notes
googleapis/python-automl ### [`v1.0.1`](https://togithub.com/googleapis/python-automl/blob/master/CHANGELOG.md#​101-httpswwwgithubcomgoogleapispython-automlcomparev100v101-2020-06-18) [Compare Source](https://togithub.com/googleapis/python-automl/compare/v0.10.0...v1.0.1)
--- ### Renovate configuration :date: **Schedule**: At any time (no schedule defined). :vertical_traffic_light: **Automerge**: Disabled by config. Please merge this manually once you are satisfied. :recycle: **Rebasing**: Never, or you tick the rebase/retry checkbox. :no_bell: **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#GoogleCloudPlatform/python-docs-samples). --- samples/snippets/automl/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/automl/requirements.txt b/samples/snippets/automl/requirements.txt index eb3be761..867dfc61 100644 --- a/samples/snippets/automl/requirements.txt +++ b/samples/snippets/automl/requirements.txt @@ -1 +1 @@ -google-cloud-automl==0.10.0 +google-cloud-automl==1.0.1 From c9a2f86b6aa4431547ef9d72701e9452de436226 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 23 Jun 2020 06:02:31 +0200 Subject: [PATCH 186/209] chore(deps): update dependency numpy to v1.19.0 [(#4137)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4137) --- samples/snippets/classify_text/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/classify_text/requirements.txt b/samples/snippets/classify_text/requirements.txt index 787df584..c575c64d 100644 --- a/samples/snippets/classify_text/requirements.txt +++ b/samples/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ google-cloud-language==1.3.0 -numpy==1.18.5 +numpy==1.19.0 From a90f2ed86270abdab79cd839207a6b501340fb39 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 9 Jul 2020 02:00:20 +0200 Subject: [PATCH 187/209] Update dependency google-auth-httplib2 to v0.0.4 [(#4255)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4255) Co-authored-by: Takashi Matsuo --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 39fd57da..9ae1b98d 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.9.3 google-auth==1.18.0 -google-auth-httplib2==0.0.3 +google-auth-httplib2==0.0.4 From 49cb3178815bcaf296a492dafb112e6dbc481c19 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 13 Jul 2020 00:46:30 +0200 Subject: [PATCH 188/209] chore(deps): update dependency pytest to v5.4.3 [(#4279)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4279) * chore(deps): update dependency pytest to v5.4.3 * specify pytest for python 2 in appengine Co-authored-by: Leah Cole --- samples/snippets/api/requirements-test.txt | 2 +- samples/snippets/automl/requirements-test.txt | 2 +- samples/snippets/classify_text/requirements-test.txt | 2 +- samples/snippets/cloud-client/v1/requirements-test.txt | 2 +- samples/snippets/generated-samples/v1/requirements-test.txt | 2 +- samples/snippets/sentiment/requirements-test.txt | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/samples/snippets/api/requirements-test.txt b/samples/snippets/api/requirements-test.txt index 781d4326..79738af5 100644 --- a/samples/snippets/api/requirements-test.txt +++ b/samples/snippets/api/requirements-test.txt @@ -1 +1 @@ -pytest==5.3.2 +pytest==5.4.3 diff --git a/samples/snippets/automl/requirements-test.txt b/samples/snippets/automl/requirements-test.txt index 781d4326..79738af5 100644 --- a/samples/snippets/automl/requirements-test.txt +++ b/samples/snippets/automl/requirements-test.txt @@ -1 +1 @@ -pytest==5.3.2 +pytest==5.4.3 diff --git a/samples/snippets/classify_text/requirements-test.txt b/samples/snippets/classify_text/requirements-test.txt index 781d4326..79738af5 100644 --- a/samples/snippets/classify_text/requirements-test.txt +++ b/samples/snippets/classify_text/requirements-test.txt @@ -1 +1 @@ -pytest==5.3.2 +pytest==5.4.3 diff --git a/samples/snippets/cloud-client/v1/requirements-test.txt b/samples/snippets/cloud-client/v1/requirements-test.txt index 781d4326..79738af5 100644 --- a/samples/snippets/cloud-client/v1/requirements-test.txt +++ b/samples/snippets/cloud-client/v1/requirements-test.txt @@ -1 +1 @@ -pytest==5.3.2 +pytest==5.4.3 diff --git a/samples/snippets/generated-samples/v1/requirements-test.txt b/samples/snippets/generated-samples/v1/requirements-test.txt index 781d4326..79738af5 100644 --- a/samples/snippets/generated-samples/v1/requirements-test.txt +++ b/samples/snippets/generated-samples/v1/requirements-test.txt @@ -1 +1 @@ -pytest==5.3.2 +pytest==5.4.3 diff --git a/samples/snippets/sentiment/requirements-test.txt b/samples/snippets/sentiment/requirements-test.txt index 781d4326..79738af5 100644 --- a/samples/snippets/sentiment/requirements-test.txt +++ b/samples/snippets/sentiment/requirements-test.txt @@ -1 +1 @@ -pytest==5.3.2 +pytest==5.4.3 From e9ed575ecf447a8d3d890e3d7c63a03e2e93d69d Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 13 Jul 2020 22:20:34 +0200 Subject: [PATCH 189/209] chore(deps): update dependency google-auth to v1.19.0 [(#4293)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4293) --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 9ae1b98d..b6220764 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.9.3 -google-auth==1.18.0 +google-auth==1.19.0 google-auth-httplib2==0.0.4 From e1bcc8040d54540ce27d8fa6d932c248b39aeb23 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 16 Jul 2020 23:24:07 +0200 Subject: [PATCH 190/209] Update dependency google-api-python-client to v1.10.0 [(#4302)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4302) This PR contains the following updates: | Package | Update | Change | |---|---|---| | [google-api-python-client](https://togithub.com/googleapis/google-api-python-client) | minor | `==1.9.3` -> `==1.10.0` | --- ### Release Notes
googleapis/google-api-python-client ### [`v1.10.0`](https://togithub.com/googleapis/google-api-python-client/blob/master/CHANGELOG.md#​1100-httpswwwgithubcomgoogleapisgoogle-api-python-clientcomparev193v1100-2020-07-15) [Compare Source](https://togithub.com/googleapis/google-api-python-client/compare/v1.9.3...v1.10.0) ##### Features - allow to use 'six.moves.collections_abc.Mapping' in 'client_options.from_dict()' ([#​943](https://www.github.com/googleapis/google-api-python-client/issues/943)) ([21af37b](https://www.github.com/googleapis/google-api-python-client/commit/21af37b11ea2d6a89b3df484e1b2fa1d12849510)) - Build universal wheels ([#​948](https://www.github.com/googleapis/google-api-python-client/issues/948)) ([3e28a1e](https://www.github.com/googleapis/google-api-python-client/commit/3e28a1e0d47f829182cd92f37475ab91fa5e4afc)) - discovery supports retries ([#​967](https://www.github.com/googleapis/google-api-python-client/issues/967)) ([f3348f9](https://www.github.com/googleapis/google-api-python-client/commit/f3348f98bf91a88a28bf61b12b95e391cc3be1ff)), closes [#​848](https://www.github.com/googleapis/google-api-python-client/issues/848) ##### Documentation - consolidating and updating the Contribution Guide ([#​964](https://www.github.com/googleapis/google-api-python-client/issues/964)) ([63f97f3](https://www.github.com/googleapis/google-api-python-client/commit/63f97f37daee37a725eb05df3097b20d5d4eaaf0)), closes [#​963](https://www.github.com/googleapis/google-api-python-client/issues/963) ##### [1.9.3](https://www.github.com/googleapis/google-api-python-client/compare/v1.9.2...v1.9.3) (2020-06-10) ##### Bug Fixes - update GOOGLE_API_USE_MTLS values ([#​940](https://www.github.com/googleapis/google-api-python-client/issues/940)) ([19908ed](https://www.github.com/googleapis/google-api-python-client/commit/19908edcd8a3df1db41e34100acc1f15c3c99397)) ##### [1.9.2](https://www.github.com/googleapis/google-api-python-client/compare/v1.9.1...v1.9.2) (2020-06-04) ##### Bug Fixes - bump api-core version ([#​936](https://www.github.com/googleapis/google-api-python-client/issues/936)) ([ee53b3b](https://www.github.com/googleapis/google-api-python-client/commit/ee53b3b32a050874ba4cfb491fb384f94682c824)) ##### [1.9.1](https://www.github.com/googleapis/google-api-python-client/compare/v1.9.0...v1.9.1) (2020-06-02) ##### Bug Fixes - fix python-api-core dependency issue ([#​931](https://www.github.com/googleapis/google-api-python-client/issues/931)) ([42028ed](https://www.github.com/googleapis/google-api-python-client/commit/42028ed2b2be47f85b70eb813185264f1f573d01))
--- ### Renovate configuration :date: **Schedule**: At any time (no schedule defined). :vertical_traffic_light: **Automerge**: Disabled by config. Please merge this manually once you are satisfied. :recycle: **Rebasing**: Never, or you tick the rebase/retry checkbox. :no_bell: **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#GoogleCloudPlatform/python-docs-samples). --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index b6220764..1545b727 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.9.3 +google-api-python-client==1.10.0 google-auth==1.19.0 google-auth-httplib2==0.0.4 From e0b1f6ec476e58b8a1aa305946732ba38d98e2e6 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 17 Jul 2020 19:02:17 +0200 Subject: [PATCH 191/209] chore(deps): update dependency google-auth to v1.19.1 [(#4304)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4304) --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 1545b727..2b8bcacd 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.10.0 -google-auth==1.19.0 +google-auth==1.19.1 google-auth-httplib2==0.0.4 From eaee9e5177373ad5b0bb996872e7604a3682fca6 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Sat, 18 Jul 2020 02:48:10 +0200 Subject: [PATCH 192/209] chore(deps): update dependency google-auth to v1.19.2 [(#4321)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4321) This PR contains the following updates: | Package | Update | Change | |---|---|---| | [google-auth](https://togithub.com/googleapis/google-auth-library-python) | patch | `==1.19.1` -> `==1.19.2` | --- ### Release Notes
googleapis/google-auth-library-python ### [`v1.19.2`](https://togithub.com/googleapis/google-auth-library-python/blob/master/CHANGELOG.md#​1192-httpswwwgithubcomgoogleapisgoogle-auth-library-pythoncomparev1191v1192-2020-07-17) [Compare Source](https://togithub.com/googleapis/google-auth-library-python/compare/v1.19.1...v1.19.2)
--- ### Renovate configuration :date: **Schedule**: At any time (no schedule defined). :vertical_traffic_light: **Automerge**: Disabled by config. Please merge this manually once you are satisfied. :recycle: **Rebasing**: Never, or you tick the rebase/retry checkbox. :no_bell: **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#GoogleCloudPlatform/python-docs-samples). --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 2b8bcacd..5f63ebcb 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.10.0 -google-auth==1.19.1 +google-auth==1.19.2 google-auth-httplib2==0.0.4 From 7abd900b867c0628ed00702a5700fc42429139a8 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 21 Jul 2020 23:48:04 +0200 Subject: [PATCH 193/209] chore(deps): update dependency numpy to v1.19.1 [(#4351)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4351) --- samples/snippets/classify_text/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/classify_text/requirements.txt b/samples/snippets/classify_text/requirements.txt index c575c64d..7ff166cc 100644 --- a/samples/snippets/classify_text/requirements.txt +++ b/samples/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ google-cloud-language==1.3.0 -numpy==1.19.0 +numpy==1.19.1 From 9ffe086d7073393cc968e87368211ad8798f2776 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 28 Jul 2020 22:36:14 +0200 Subject: [PATCH 194/209] Update dependency google-auth to v1.20.0 [(#4387)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4387) --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 5f63ebcb..4f9d9005 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.10.0 -google-auth==1.19.2 +google-auth==1.20.0 google-auth-httplib2==0.0.4 From 4d0dcf2ee66456b9c17eb91495bb252b977ce7d8 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Sat, 1 Aug 2020 21:51:00 +0200 Subject: [PATCH 195/209] Update dependency pytest to v6 [(#4390)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4390) --- samples/snippets/api/requirements-test.txt | 2 +- samples/snippets/automl/requirements-test.txt | 2 +- samples/snippets/classify_text/requirements-test.txt | 2 +- samples/snippets/cloud-client/v1/requirements-test.txt | 2 +- samples/snippets/generated-samples/v1/requirements-test.txt | 2 +- samples/snippets/sentiment/requirements-test.txt | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/samples/snippets/api/requirements-test.txt b/samples/snippets/api/requirements-test.txt index 79738af5..7e460c8c 100644 --- a/samples/snippets/api/requirements-test.txt +++ b/samples/snippets/api/requirements-test.txt @@ -1 +1 @@ -pytest==5.4.3 +pytest==6.0.1 diff --git a/samples/snippets/automl/requirements-test.txt b/samples/snippets/automl/requirements-test.txt index 79738af5..7e460c8c 100644 --- a/samples/snippets/automl/requirements-test.txt +++ b/samples/snippets/automl/requirements-test.txt @@ -1 +1 @@ -pytest==5.4.3 +pytest==6.0.1 diff --git a/samples/snippets/classify_text/requirements-test.txt b/samples/snippets/classify_text/requirements-test.txt index 79738af5..7e460c8c 100644 --- a/samples/snippets/classify_text/requirements-test.txt +++ b/samples/snippets/classify_text/requirements-test.txt @@ -1 +1 @@ -pytest==5.4.3 +pytest==6.0.1 diff --git a/samples/snippets/cloud-client/v1/requirements-test.txt b/samples/snippets/cloud-client/v1/requirements-test.txt index 79738af5..7e460c8c 100644 --- a/samples/snippets/cloud-client/v1/requirements-test.txt +++ b/samples/snippets/cloud-client/v1/requirements-test.txt @@ -1 +1 @@ -pytest==5.4.3 +pytest==6.0.1 diff --git a/samples/snippets/generated-samples/v1/requirements-test.txt b/samples/snippets/generated-samples/v1/requirements-test.txt index 79738af5..7e460c8c 100644 --- a/samples/snippets/generated-samples/v1/requirements-test.txt +++ b/samples/snippets/generated-samples/v1/requirements-test.txt @@ -1 +1 @@ -pytest==5.4.3 +pytest==6.0.1 diff --git a/samples/snippets/sentiment/requirements-test.txt b/samples/snippets/sentiment/requirements-test.txt index 79738af5..7e460c8c 100644 --- a/samples/snippets/sentiment/requirements-test.txt +++ b/samples/snippets/sentiment/requirements-test.txt @@ -1 +1 @@ -pytest==5.4.3 +pytest==6.0.1 From e83bc93fb3dba4da599b4a504b35002ff4d982e4 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 7 Aug 2020 03:36:31 +0200 Subject: [PATCH 196/209] chore(deps): update dependency google-auth to v1.20.1 [(#4452)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4452) --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 4f9d9005..41f4cf40 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.10.0 -google-auth==1.20.0 +google-auth==1.20.1 google-auth-httplib2==0.0.4 From bba46b28ffad7ee42a8028548564eecdc6cd5f37 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 27 Aug 2020 06:12:53 +0200 Subject: [PATCH 197/209] chore(deps): update dependency google-api-python-client to v1.10.1 [(#4557)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4557) * chore(deps): update dependency google-api-python-client to v1.10.1 * Update requirements.txt Co-authored-by: Takashi Matsuo --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 41f4cf40..6e149081 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.10.0 +google-api-python-client==1.10.1 google-auth==1.20.1 google-auth-httplib2==0.0.4 From f5886c444597f17507301cf8d4967bb017a4e502 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 28 Aug 2020 01:17:31 +0200 Subject: [PATCH 198/209] Update dependency google-auth to v1.21.0 [(#4588)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4588) --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 6e149081..e25b0a8b 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.10.1 -google-auth==1.20.1 +google-auth==1.21.0 google-auth-httplib2==0.0.4 From 2cba88366de05d5259bf99cb4edc2f724539e41c Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 28 Aug 2020 07:21:42 +0200 Subject: [PATCH 199/209] Update dependency google-api-python-client to v1.11.0 [(#4587)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4587) Co-authored-by: Takashi Matsuo --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index e25b0a8b..b5124a50 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.10.1 +google-api-python-client==1.11.0 google-auth==1.21.0 google-auth-httplib2==0.0.4 From acac48350efdee3a934598c15950c412f54bb3d9 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 10 Sep 2020 00:57:06 +0200 Subject: [PATCH 200/209] chore(deps): update dependency google-auth to v1.21.1 [(#4634)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4634) --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index b5124a50..7858cee7 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.11.0 -google-auth==1.21.0 +google-auth==1.21.1 google-auth-httplib2==0.0.4 From 3688563317f77ca57056dc29988ef00e0c51ffbc Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 15 Sep 2020 00:59:19 +0200 Subject: [PATCH 201/209] chore(deps): update dependency numpy to v1.19.2 [(#4662)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4662) --- samples/snippets/classify_text/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/classify_text/requirements.txt b/samples/snippets/classify_text/requirements.txt index 7ff166cc..de040ee0 100644 --- a/samples/snippets/classify_text/requirements.txt +++ b/samples/snippets/classify_text/requirements.txt @@ -1,2 +1,2 @@ google-cloud-language==1.3.0 -numpy==1.19.1 +numpy==1.19.2 From aa5b91c90e47d75b980595d1f5b97caaa3c359b5 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 18 Sep 2020 18:30:21 +0200 Subject: [PATCH 202/209] chore(deps): update dependency google-auth to v1.21.2 [(#4684)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4684) --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 7858cee7..85a462c8 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.11.0 -google-auth==1.21.1 +google-auth==1.21.2 google-auth-httplib2==0.0.4 From 54b298d930c0a669ad08817b18e7f5e0ddcd60b4 Mon Sep 17 00:00:00 2001 From: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Date: Fri, 18 Sep 2020 13:46:06 -0600 Subject: [PATCH 203/209] chore: delete automl samples [(#4696)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4696) --- samples/snippets/automl/requirements-test.txt | 1 - samples/snippets/automl/requirements.txt | 1 - samples/snippets/automl/resources/test.txt | 1 - 3 files changed, 3 deletions(-) delete mode 100644 samples/snippets/automl/requirements-test.txt delete mode 100644 samples/snippets/automl/requirements.txt delete mode 100644 samples/snippets/automl/resources/test.txt diff --git a/samples/snippets/automl/requirements-test.txt b/samples/snippets/automl/requirements-test.txt deleted file mode 100644 index 7e460c8c..00000000 --- a/samples/snippets/automl/requirements-test.txt +++ /dev/null @@ -1 +0,0 @@ -pytest==6.0.1 diff --git a/samples/snippets/automl/requirements.txt b/samples/snippets/automl/requirements.txt deleted file mode 100644 index 867dfc61..00000000 --- a/samples/snippets/automl/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -google-cloud-automl==1.0.1 diff --git a/samples/snippets/automl/resources/test.txt b/samples/snippets/automl/resources/test.txt deleted file mode 100644 index f0dde24b..00000000 --- a/samples/snippets/automl/resources/test.txt +++ /dev/null @@ -1 +0,0 @@ -A strong taste of hazlenut and orange From 7227561deeb0104c32f39754460ca51bf68409f9 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 22 Sep 2020 22:11:39 +0200 Subject: [PATCH 204/209] chore(deps): update dependency google-api-python-client to v1.12.1 [(#4674)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4674) --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 85a462c8..f739af51 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.11.0 +google-api-python-client==1.12.1 google-auth==1.21.2 google-auth-httplib2==0.0.4 From c6d3b76dba7756481b5fc11a6bffec2318d0dc4d Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 23 Sep 2020 22:31:01 +0200 Subject: [PATCH 205/209] chore(deps): update dependency google-auth to v1.21.3 [(#4754)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4754) --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index f739af51..572132c4 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ google-api-python-client==1.12.1 -google-auth==1.21.2 +google-auth==1.21.3 google-auth-httplib2==0.0.4 From c0b81f1a01243684314d2f05e1ed7bce020d03d2 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 24 Sep 2020 22:42:49 +0200 Subject: [PATCH 206/209] chore(deps): update dependency google-api-python-client to v1.12.2 [(#4751)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4751) --- samples/snippets/api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/api/requirements.txt b/samples/snippets/api/requirements.txt index 572132c4..72a261b1 100644 --- a/samples/snippets/api/requirements.txt +++ b/samples/snippets/api/requirements.txt @@ -1,3 +1,3 @@ -google-api-python-client==1.12.1 +google-api-python-client==1.12.2 google-auth==1.21.3 google-auth-httplib2==0.0.4 From 8be89edfeac45bce555aa8bf3d1279833c49d36f Mon Sep 17 00:00:00 2001 From: Harikumar Devandla Date: Sun, 27 Sep 2020 11:36:58 -0700 Subject: [PATCH 207/209] chore: update templates --- .coveragerc | 5 +- .github/CODEOWNERS | 2 +- .github/snippet-bot.yml | 0 .gitignore | 3 +- .kokoro/build.sh | 8 +- .kokoro/docker/docs/Dockerfile | 98 ++++ .kokoro/docker/docs/fetch_gpg_keys.sh | 45 ++ .kokoro/docs/common.cfg | 21 +- .kokoro/docs/docs-presubmit.cfg | 17 + .kokoro/populate-secrets.sh | 43 ++ .kokoro/publish-docs.sh | 39 +- .kokoro/release/common.cfg | 50 +- .kokoro/trampoline.sh | 15 +- .kokoro/trampoline_v2.sh | 487 ++++++++++++++++++ .trampolinerc | 51 ++ CONTRIBUTING.rst | 19 - docs/_templates/layout.html | 4 +- docs/conf.py | 14 +- samples/AUTHORING_GUIDE.md | 1 + samples/CONTRIBUTING.md | 1 + samples/snippets/api/analyze.py | 54 +- samples/snippets/api/analyze_test.py | 242 +++++---- samples/snippets/api/noxfile.py | 222 ++++++++ .../classify_text/classify_text_tutorial.py | 118 +++-- .../classify_text_tutorial_test.py | 35 +- samples/snippets/classify_text/noxfile.py | 222 ++++++++ samples/snippets/cloud-client/v1/noxfile.py | 222 ++++++++ .../snippets/cloud-client/v1/quickstart.py | 13 +- .../cloud-client/v1/quickstart_test.py | 2 +- .../snippets/cloud-client/v1/set_endpoint.py | 10 +- .../cloud-client/v1/set_endpoint_test.py | 2 +- .../v1/language_sentiment_text.py | 10 +- .../v1/language_sentiment_text_test.py | 8 +- .../snippets/generated-samples/v1/noxfile.py | 222 ++++++++ samples/snippets/sentiment/noxfile.py | 222 ++++++++ .../snippets/sentiment/sentiment_analysis.py | 32 +- .../sentiment/sentiment_analysis_test.py | 22 +- scripts/decrypt-secrets.sh | 15 +- synth.metadata | 16 +- synth.py | 12 +- 40 files changed, 2266 insertions(+), 358 deletions(-) create mode 100644 .github/snippet-bot.yml create mode 100644 .kokoro/docker/docs/Dockerfile create mode 100755 .kokoro/docker/docs/fetch_gpg_keys.sh create mode 100644 .kokoro/docs/docs-presubmit.cfg create mode 100755 .kokoro/populate-secrets.sh create mode 100755 .kokoro/trampoline_v2.sh create mode 100644 .trampolinerc create mode 100644 samples/AUTHORING_GUIDE.md create mode 100644 samples/CONTRIBUTING.md create mode 100644 samples/snippets/api/noxfile.py create mode 100644 samples/snippets/classify_text/noxfile.py create mode 100644 samples/snippets/cloud-client/v1/noxfile.py create mode 100644 samples/snippets/generated-samples/v1/noxfile.py create mode 100644 samples/snippets/sentiment/noxfile.py diff --git a/.coveragerc b/.coveragerc index dd39c854..0d8e6297 100644 --- a/.coveragerc +++ b/.coveragerc @@ -17,6 +17,8 @@ # Generated by synthtool. DO NOT EDIT! [run] branch = True +omit = + google/cloud/__init__.py [report] fail_under = 100 @@ -32,4 +34,5 @@ omit = */gapic/*.py */proto/*.py */core/*.py - */site-packages/*.py \ No newline at end of file + */site-packages/*.py + google/cloud/__init__.py diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 30c3973a..c5faf09e 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -8,4 +8,4 @@ * @googleapis/yoshi-python # The python-samples-reviewers team is the default owner for samples changes -/samples/ @googleapis/python-samples-owners \ No newline at end of file +/samples/ @telpirion @sirtorry @googleapis/python-samples-owners diff --git a/.github/snippet-bot.yml b/.github/snippet-bot.yml new file mode 100644 index 00000000..e69de29b diff --git a/.gitignore b/.gitignore index b87e1ed5..b9daa52f 100644 --- a/.gitignore +++ b/.gitignore @@ -46,6 +46,7 @@ pip-log.txt # Built documentation docs/_build bigquery/docs/generated +docs.metadata # Virtual environment env/ @@ -57,4 +58,4 @@ system_tests/local_test_setup # Make sure a generated file isn't accidentally committed. pylintrc -pylintrc.test \ No newline at end of file +pylintrc.test diff --git a/.kokoro/build.sh b/.kokoro/build.sh index 9a5a6e61..56b72c82 100755 --- a/.kokoro/build.sh +++ b/.kokoro/build.sh @@ -36,4 +36,10 @@ python3.6 -m pip uninstall --yes --quiet nox-automation python3.6 -m pip install --upgrade --quiet nox python3.6 -m nox --version -python3.6 -m nox +# If NOX_SESSION is set, it only runs the specified session, +# otherwise run all the sessions. +if [[ -n "${NOX_SESSION:-}" ]]; then + python3.6 -m nox -s "${NOX_SESSION:-}" +else + python3.6 -m nox +fi diff --git a/.kokoro/docker/docs/Dockerfile b/.kokoro/docker/docs/Dockerfile new file mode 100644 index 00000000..412b0b56 --- /dev/null +++ b/.kokoro/docker/docs/Dockerfile @@ -0,0 +1,98 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ubuntu:20.04 + +ENV DEBIAN_FRONTEND noninteractive + +# Ensure local Python is preferred over distribution Python. +ENV PATH /usr/local/bin:$PATH + +# Install dependencies. +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + apt-transport-https \ + build-essential \ + ca-certificates \ + curl \ + dirmngr \ + git \ + gpg-agent \ + graphviz \ + libbz2-dev \ + libdb5.3-dev \ + libexpat1-dev \ + libffi-dev \ + liblzma-dev \ + libreadline-dev \ + libsnappy-dev \ + libssl-dev \ + libsqlite3-dev \ + portaudio19-dev \ + redis-server \ + software-properties-common \ + ssh \ + sudo \ + tcl \ + tcl-dev \ + tk \ + tk-dev \ + uuid-dev \ + wget \ + zlib1g-dev \ + && add-apt-repository universe \ + && apt-get update \ + && apt-get -y install jq \ + && apt-get clean autoclean \ + && apt-get autoremove -y \ + && rm -rf /var/lib/apt/lists/* \ + && rm -f /var/cache/apt/archives/*.deb + + +COPY fetch_gpg_keys.sh /tmp +# Install the desired versions of Python. +RUN set -ex \ + && export GNUPGHOME="$(mktemp -d)" \ + && echo "disable-ipv6" >> "${GNUPGHOME}/dirmngr.conf" \ + && /tmp/fetch_gpg_keys.sh \ + && for PYTHON_VERSION in 3.7.8 3.8.5; do \ + wget --no-check-certificate -O python-${PYTHON_VERSION}.tar.xz "https://www.python.org/ftp/python/${PYTHON_VERSION%%[a-z]*}/Python-$PYTHON_VERSION.tar.xz" \ + && wget --no-check-certificate -O python-${PYTHON_VERSION}.tar.xz.asc "https://www.python.org/ftp/python/${PYTHON_VERSION%%[a-z]*}/Python-$PYTHON_VERSION.tar.xz.asc" \ + && gpg --batch --verify python-${PYTHON_VERSION}.tar.xz.asc python-${PYTHON_VERSION}.tar.xz \ + && rm -r python-${PYTHON_VERSION}.tar.xz.asc \ + && mkdir -p /usr/src/python-${PYTHON_VERSION} \ + && tar -xJC /usr/src/python-${PYTHON_VERSION} --strip-components=1 -f python-${PYTHON_VERSION}.tar.xz \ + && rm python-${PYTHON_VERSION}.tar.xz \ + && cd /usr/src/python-${PYTHON_VERSION} \ + && ./configure \ + --enable-shared \ + # This works only on Python 2.7 and throws a warning on every other + # version, but seems otherwise harmless. + --enable-unicode=ucs4 \ + --with-system-ffi \ + --without-ensurepip \ + && make -j$(nproc) \ + && make install \ + && ldconfig \ + ; done \ + && rm -rf "${GNUPGHOME}" \ + && rm -rf /usr/src/python* \ + && rm -rf ~/.cache/ + +RUN wget -O /tmp/get-pip.py 'https://bootstrap.pypa.io/get-pip.py' \ + && python3.7 /tmp/get-pip.py \ + && python3.8 /tmp/get-pip.py \ + && rm /tmp/get-pip.py + +CMD ["python3.7"] diff --git a/.kokoro/docker/docs/fetch_gpg_keys.sh b/.kokoro/docker/docs/fetch_gpg_keys.sh new file mode 100755 index 00000000..d653dd86 --- /dev/null +++ b/.kokoro/docker/docs/fetch_gpg_keys.sh @@ -0,0 +1,45 @@ +#!/bin/bash +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A script to fetch gpg keys with retry. +# Avoid jinja parsing the file. +# + +function retry { + if [[ "${#}" -le 1 ]]; then + echo "Usage: ${0} retry_count commands.." + exit 1 + fi + local retries=${1} + local command="${@:2}" + until [[ "${retries}" -le 0 ]]; do + $command && return 0 + if [[ $? -ne 0 ]]; then + echo "command failed, retrying" + ((retries--)) + fi + done + return 1 +} + +# 3.6.9, 3.7.5 (Ned Deily) +retry 3 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys \ + 0D96DF4D4110E5C43FBFB17F2D347EA6AA65421D + +# 3.8.0 (Łukasz Langa) +retry 3 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys \ + E3FF2839C048B25C084DEBE9B26995E310250568 + +# diff --git a/.kokoro/docs/common.cfg b/.kokoro/docs/common.cfg index 277799cb..573dc985 100644 --- a/.kokoro/docs/common.cfg +++ b/.kokoro/docs/common.cfg @@ -11,12 +11,12 @@ action { gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" # Use the trampoline script to run in docker. -build_file: "python-language/.kokoro/trampoline.sh" +build_file: "python-language/.kokoro/trampoline_v2.sh" # Configure the docker image for kokoro-trampoline. env_vars: { key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-kokoro-resources/python-multi" + value: "gcr.io/cloud-devrel-kokoro-resources/python-lib-docs" } env_vars: { key: "TRAMPOLINE_BUILD_FILE" @@ -28,6 +28,23 @@ env_vars: { value: "docs-staging" } +env_vars: { + key: "V2_STAGING_BUCKET" + value: "docs-staging-v2-staging" +} + +# It will upload the docker image after successful builds. +env_vars: { + key: "TRAMPOLINE_IMAGE_UPLOAD" + value: "true" +} + +# It will always build the docker image. +env_vars: { + key: "TRAMPOLINE_DOCKERFILE" + value: ".kokoro/docker/docs/Dockerfile" +} + # Fetch the token needed for reporting release status to GitHub before_action { fetch_keystore { diff --git a/.kokoro/docs/docs-presubmit.cfg b/.kokoro/docs/docs-presubmit.cfg new file mode 100644 index 00000000..11181078 --- /dev/null +++ b/.kokoro/docs/docs-presubmit.cfg @@ -0,0 +1,17 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "STAGING_BUCKET" + value: "gcloud-python-test" +} + +env_vars: { + key: "V2_STAGING_BUCKET" + value: "gcloud-python-test" +} + +# We only upload the image in the main `docs` build. +env_vars: { + key: "TRAMPOLINE_IMAGE_UPLOAD" + value: "false" +} diff --git a/.kokoro/populate-secrets.sh b/.kokoro/populate-secrets.sh new file mode 100755 index 00000000..f5251425 --- /dev/null +++ b/.kokoro/populate-secrets.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# Copyright 2020 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +function now { date +"%Y-%m-%d %H:%M:%S" | tr -d '\n' ;} +function msg { println "$*" >&2 ;} +function println { printf '%s\n' "$(now) $*" ;} + + +# Populates requested secrets set in SECRET_MANAGER_KEYS from service account: +# kokoro-trampoline@cloud-devrel-kokoro-resources.iam.gserviceaccount.com +SECRET_LOCATION="${KOKORO_GFILE_DIR}/secret_manager" +msg "Creating folder on disk for secrets: ${SECRET_LOCATION}" +mkdir -p ${SECRET_LOCATION} +for key in $(echo ${SECRET_MANAGER_KEYS} | sed "s/,/ /g") +do + msg "Retrieving secret ${key}" + docker run --entrypoint=gcloud \ + --volume=${KOKORO_GFILE_DIR}:${KOKORO_GFILE_DIR} \ + gcr.io/google.com/cloudsdktool/cloud-sdk \ + secrets versions access latest \ + --project cloud-devrel-kokoro-resources \ + --secret ${key} > \ + "${SECRET_LOCATION}/${key}" + if [[ $? == 0 ]]; then + msg "Secret written to ${SECRET_LOCATION}/${key}" + else + msg "Error retrieving secret ${key}" + fi +done diff --git a/.kokoro/publish-docs.sh b/.kokoro/publish-docs.sh index becf302b..8acb14e8 100755 --- a/.kokoro/publish-docs.sh +++ b/.kokoro/publish-docs.sh @@ -18,26 +18,16 @@ set -eo pipefail # Disable buffering, so that the logs stream through. export PYTHONUNBUFFERED=1 -cd github/python-language - -# Remove old nox -python3.6 -m pip uninstall --yes --quiet nox-automation +export PATH="${HOME}/.local/bin:${PATH}" # Install nox -python3.6 -m pip install --upgrade --quiet nox -python3.6 -m nox --version +python3 -m pip install --user --upgrade --quiet nox +python3 -m nox --version # build docs nox -s docs -python3 -m pip install gcp-docuploader - -# install a json parser -sudo apt-get update -sudo apt-get -y install software-properties-common -sudo add-apt-repository universe -sudo apt-get update -sudo apt-get -y install jq +python3 -m pip install --user gcp-docuploader # create metadata python3 -m docuploader create-metadata \ @@ -52,4 +42,23 @@ python3 -m docuploader create-metadata \ cat docs.metadata # upload docs -python3 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket docs-staging +python3 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket "${STAGING_BUCKET}" + + +# docfx yaml files +nox -s docfx + +# create metadata. +python3 -m docuploader create-metadata \ + --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \ + --version=$(python3 setup.py --version) \ + --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \ + --distribution-name=$(python3 setup.py --name) \ + --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \ + --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \ + --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json) + +cat docs.metadata + +# upload docs +python3 -m docuploader upload docs/_build/html/docfx_yaml --metadata-file docs.metadata --destination-prefix docfx --staging-bucket "${V2_STAGING_BUCKET}" diff --git a/.kokoro/release/common.cfg b/.kokoro/release/common.cfg index b54eed24..a64d706f 100644 --- a/.kokoro/release/common.cfg +++ b/.kokoro/release/common.cfg @@ -23,42 +23,18 @@ env_vars: { value: "github/python-language/.kokoro/release.sh" } -# Fetch the token needed for reporting release status to GitHub -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "yoshi-automation-github-key" - } - } -} - -# Fetch PyPI password -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "google_cloud_pypi_password" - } - } -} - -# Fetch magictoken to use with Magic Github Proxy -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "releasetool-magictoken" - } - } +# Fetch PyPI password +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "google_cloud_pypi_password" + } + } } -# Fetch api key to use with Magic Github Proxy -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "magic-github-proxy-api-key" - } - } -} +# Tokens needed to report release status back to GitHub +env_vars: { + key: "SECRET_MANAGER_KEYS" + value: "releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem" +} \ No newline at end of file diff --git a/.kokoro/trampoline.sh b/.kokoro/trampoline.sh index e8c4251f..f39236e9 100755 --- a/.kokoro/trampoline.sh +++ b/.kokoro/trampoline.sh @@ -15,9 +15,14 @@ set -eo pipefail -python3 "${KOKORO_GFILE_DIR}/trampoline_v1.py" || ret_code=$? +# Always run the cleanup script, regardless of the success of bouncing into +# the container. +function cleanup() { + chmod +x ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh + ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh + echo "cleanup"; +} +trap cleanup EXIT -chmod +x ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh -${KOKORO_GFILE_DIR}/trampoline_cleanup.sh || true - -exit ${ret_code} +$(dirname $0)/populate-secrets.sh # Secret Manager secrets. +python3 "${KOKORO_GFILE_DIR}/trampoline_v1.py" \ No newline at end of file diff --git a/.kokoro/trampoline_v2.sh b/.kokoro/trampoline_v2.sh new file mode 100755 index 00000000..719bcd5b --- /dev/null +++ b/.kokoro/trampoline_v2.sh @@ -0,0 +1,487 @@ +#!/usr/bin/env bash +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# trampoline_v2.sh +# +# This script does 3 things. +# +# 1. Prepare the Docker image for the test +# 2. Run the Docker with appropriate flags to run the test +# 3. Upload the newly built Docker image +# +# in a way that is somewhat compatible with trampoline_v1. +# +# To run this script, first download few files from gcs to /dev/shm. +# (/dev/shm is passed into the container as KOKORO_GFILE_DIR). +# +# gsutil cp gs://cloud-devrel-kokoro-resources/python-docs-samples/secrets_viewer_service_account.json /dev/shm +# gsutil cp gs://cloud-devrel-kokoro-resources/python-docs-samples/automl_secrets.txt /dev/shm +# +# Then run the script. +# .kokoro/trampoline_v2.sh +# +# These environment variables are required: +# TRAMPOLINE_IMAGE: The docker image to use. +# TRAMPOLINE_DOCKERFILE: The location of the Dockerfile. +# +# You can optionally change these environment variables: +# TRAMPOLINE_IMAGE_UPLOAD: +# (true|false): Whether to upload the Docker image after the +# successful builds. +# TRAMPOLINE_BUILD_FILE: The script to run in the docker container. +# TRAMPOLINE_WORKSPACE: The workspace path in the docker container. +# Defaults to /workspace. +# Potentially there are some repo specific envvars in .trampolinerc in +# the project root. + + +set -euo pipefail + +TRAMPOLINE_VERSION="2.0.5" + +if command -v tput >/dev/null && [[ -n "${TERM:-}" ]]; then + readonly IO_COLOR_RED="$(tput setaf 1)" + readonly IO_COLOR_GREEN="$(tput setaf 2)" + readonly IO_COLOR_YELLOW="$(tput setaf 3)" + readonly IO_COLOR_RESET="$(tput sgr0)" +else + readonly IO_COLOR_RED="" + readonly IO_COLOR_GREEN="" + readonly IO_COLOR_YELLOW="" + readonly IO_COLOR_RESET="" +fi + +function function_exists { + [ $(LC_ALL=C type -t $1)"" == "function" ] +} + +# Logs a message using the given color. The first argument must be one +# of the IO_COLOR_* variables defined above, such as +# "${IO_COLOR_YELLOW}". The remaining arguments will be logged in the +# given color. The log message will also have an RFC-3339 timestamp +# prepended (in UTC). You can disable the color output by setting +# TERM=vt100. +function log_impl() { + local color="$1" + shift + local timestamp="$(date -u "+%Y-%m-%dT%H:%M:%SZ")" + echo "================================================================" + echo "${color}${timestamp}:" "$@" "${IO_COLOR_RESET}" + echo "================================================================" +} + +# Logs the given message with normal coloring and a timestamp. +function log() { + log_impl "${IO_COLOR_RESET}" "$@" +} + +# Logs the given message in green with a timestamp. +function log_green() { + log_impl "${IO_COLOR_GREEN}" "$@" +} + +# Logs the given message in yellow with a timestamp. +function log_yellow() { + log_impl "${IO_COLOR_YELLOW}" "$@" +} + +# Logs the given message in red with a timestamp. +function log_red() { + log_impl "${IO_COLOR_RED}" "$@" +} + +readonly tmpdir=$(mktemp -d -t ci-XXXXXXXX) +readonly tmphome="${tmpdir}/h" +mkdir -p "${tmphome}" + +function cleanup() { + rm -rf "${tmpdir}" +} +trap cleanup EXIT + +RUNNING_IN_CI="${RUNNING_IN_CI:-false}" + +# The workspace in the container, defaults to /workspace. +TRAMPOLINE_WORKSPACE="${TRAMPOLINE_WORKSPACE:-/workspace}" + +pass_down_envvars=( + # TRAMPOLINE_V2 variables. + # Tells scripts whether they are running as part of CI or not. + "RUNNING_IN_CI" + # Indicates which CI system we're in. + "TRAMPOLINE_CI" + # Indicates the version of the script. + "TRAMPOLINE_VERSION" +) + +log_yellow "Building with Trampoline ${TRAMPOLINE_VERSION}" + +# Detect which CI systems we're in. If we're in any of the CI systems +# we support, `RUNNING_IN_CI` will be true and `TRAMPOLINE_CI` will be +# the name of the CI system. Both envvars will be passing down to the +# container for telling which CI system we're in. +if [[ -n "${KOKORO_BUILD_ID:-}" ]]; then + # descriptive env var for indicating it's on CI. + RUNNING_IN_CI="true" + TRAMPOLINE_CI="kokoro" + if [[ "${TRAMPOLINE_USE_LEGACY_SERVICE_ACCOUNT:-}" == "true" ]]; then + if [[ ! -f "${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json" ]]; then + log_red "${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json does not exist. Did you forget to mount cloud-devrel-kokoro-resources/trampoline? Aborting." + exit 1 + fi + # This service account will be activated later. + TRAMPOLINE_SERVICE_ACCOUNT="${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json" + else + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + gcloud auth list + fi + log_yellow "Configuring Container Registry access" + gcloud auth configure-docker --quiet + fi + pass_down_envvars+=( + # KOKORO dynamic variables. + "KOKORO_BUILD_NUMBER" + "KOKORO_BUILD_ID" + "KOKORO_JOB_NAME" + "KOKORO_GIT_COMMIT" + "KOKORO_GITHUB_COMMIT" + "KOKORO_GITHUB_PULL_REQUEST_NUMBER" + "KOKORO_GITHUB_PULL_REQUEST_COMMIT" + # For Build Cop Bot + "KOKORO_GITHUB_COMMIT_URL" + "KOKORO_GITHUB_PULL_REQUEST_URL" + ) +elif [[ "${TRAVIS:-}" == "true" ]]; then + RUNNING_IN_CI="true" + TRAMPOLINE_CI="travis" + pass_down_envvars+=( + "TRAVIS_BRANCH" + "TRAVIS_BUILD_ID" + "TRAVIS_BUILD_NUMBER" + "TRAVIS_BUILD_WEB_URL" + "TRAVIS_COMMIT" + "TRAVIS_COMMIT_MESSAGE" + "TRAVIS_COMMIT_RANGE" + "TRAVIS_JOB_NAME" + "TRAVIS_JOB_NUMBER" + "TRAVIS_JOB_WEB_URL" + "TRAVIS_PULL_REQUEST" + "TRAVIS_PULL_REQUEST_BRANCH" + "TRAVIS_PULL_REQUEST_SHA" + "TRAVIS_PULL_REQUEST_SLUG" + "TRAVIS_REPO_SLUG" + "TRAVIS_SECURE_ENV_VARS" + "TRAVIS_TAG" + ) +elif [[ -n "${GITHUB_RUN_ID:-}" ]]; then + RUNNING_IN_CI="true" + TRAMPOLINE_CI="github-workflow" + pass_down_envvars+=( + "GITHUB_WORKFLOW" + "GITHUB_RUN_ID" + "GITHUB_RUN_NUMBER" + "GITHUB_ACTION" + "GITHUB_ACTIONS" + "GITHUB_ACTOR" + "GITHUB_REPOSITORY" + "GITHUB_EVENT_NAME" + "GITHUB_EVENT_PATH" + "GITHUB_SHA" + "GITHUB_REF" + "GITHUB_HEAD_REF" + "GITHUB_BASE_REF" + ) +elif [[ "${CIRCLECI:-}" == "true" ]]; then + RUNNING_IN_CI="true" + TRAMPOLINE_CI="circleci" + pass_down_envvars+=( + "CIRCLE_BRANCH" + "CIRCLE_BUILD_NUM" + "CIRCLE_BUILD_URL" + "CIRCLE_COMPARE_URL" + "CIRCLE_JOB" + "CIRCLE_NODE_INDEX" + "CIRCLE_NODE_TOTAL" + "CIRCLE_PREVIOUS_BUILD_NUM" + "CIRCLE_PROJECT_REPONAME" + "CIRCLE_PROJECT_USERNAME" + "CIRCLE_REPOSITORY_URL" + "CIRCLE_SHA1" + "CIRCLE_STAGE" + "CIRCLE_USERNAME" + "CIRCLE_WORKFLOW_ID" + "CIRCLE_WORKFLOW_JOB_ID" + "CIRCLE_WORKFLOW_UPSTREAM_JOB_IDS" + "CIRCLE_WORKFLOW_WORKSPACE_ID" + ) +fi + +# Configure the service account for pulling the docker image. +function repo_root() { + local dir="$1" + while [[ ! -d "${dir}/.git" ]]; do + dir="$(dirname "$dir")" + done + echo "${dir}" +} + +# Detect the project root. In CI builds, we assume the script is in +# the git tree and traverse from there, otherwise, traverse from `pwd` +# to find `.git` directory. +if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then + PROGRAM_PATH="$(realpath "$0")" + PROGRAM_DIR="$(dirname "${PROGRAM_PATH}")" + PROJECT_ROOT="$(repo_root "${PROGRAM_DIR}")" +else + PROJECT_ROOT="$(repo_root $(pwd))" +fi + +log_yellow "Changing to the project root: ${PROJECT_ROOT}." +cd "${PROJECT_ROOT}" + +# To support relative path for `TRAMPOLINE_SERVICE_ACCOUNT`, we need +# to use this environment variable in `PROJECT_ROOT`. +if [[ -n "${TRAMPOLINE_SERVICE_ACCOUNT:-}" ]]; then + + mkdir -p "${tmpdir}/gcloud" + gcloud_config_dir="${tmpdir}/gcloud" + + log_yellow "Using isolated gcloud config: ${gcloud_config_dir}." + export CLOUDSDK_CONFIG="${gcloud_config_dir}" + + log_yellow "Using ${TRAMPOLINE_SERVICE_ACCOUNT} for authentication." + gcloud auth activate-service-account \ + --key-file "${TRAMPOLINE_SERVICE_ACCOUNT}" + log_yellow "Configuring Container Registry access" + gcloud auth configure-docker --quiet +fi + +required_envvars=( + # The basic trampoline configurations. + "TRAMPOLINE_IMAGE" + "TRAMPOLINE_BUILD_FILE" +) + +if [[ -f "${PROJECT_ROOT}/.trampolinerc" ]]; then + source "${PROJECT_ROOT}/.trampolinerc" +fi + +log_yellow "Checking environment variables." +for e in "${required_envvars[@]}" +do + if [[ -z "${!e:-}" ]]; then + log "Missing ${e} env var. Aborting." + exit 1 + fi +done + +# We want to support legacy style TRAMPOLINE_BUILD_FILE used with V1 +# script: e.g. "github/repo-name/.kokoro/run_tests.sh" +TRAMPOLINE_BUILD_FILE="${TRAMPOLINE_BUILD_FILE#github/*/}" +log_yellow "Using TRAMPOLINE_BUILD_FILE: ${TRAMPOLINE_BUILD_FILE}" + +# ignore error on docker operations and test execution +set +e + +log_yellow "Preparing Docker image." +# We only download the docker image in CI builds. +if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then + # Download the docker image specified by `TRAMPOLINE_IMAGE` + + # We may want to add --max-concurrent-downloads flag. + + log_yellow "Start pulling the Docker image: ${TRAMPOLINE_IMAGE}." + if docker pull "${TRAMPOLINE_IMAGE}"; then + log_green "Finished pulling the Docker image: ${TRAMPOLINE_IMAGE}." + has_image="true" + else + log_red "Failed pulling the Docker image: ${TRAMPOLINE_IMAGE}." + has_image="false" + fi +else + # For local run, check if we have the image. + if docker images "${TRAMPOLINE_IMAGE}:latest" | grep "${TRAMPOLINE_IMAGE}"; then + has_image="true" + else + has_image="false" + fi +fi + + +# The default user for a Docker container has uid 0 (root). To avoid +# creating root-owned files in the build directory we tell docker to +# use the current user ID. +user_uid="$(id -u)" +user_gid="$(id -g)" +user_name="$(id -un)" + +# To allow docker in docker, we add the user to the docker group in +# the host os. +docker_gid=$(cut -d: -f3 < <(getent group docker)) + +update_cache="false" +if [[ "${TRAMPOLINE_DOCKERFILE:-none}" != "none" ]]; then + # Build the Docker image from the source. + context_dir=$(dirname "${TRAMPOLINE_DOCKERFILE}") + docker_build_flags=( + "-f" "${TRAMPOLINE_DOCKERFILE}" + "-t" "${TRAMPOLINE_IMAGE}" + "--build-arg" "UID=${user_uid}" + "--build-arg" "USERNAME=${user_name}" + ) + if [[ "${has_image}" == "true" ]]; then + docker_build_flags+=("--cache-from" "${TRAMPOLINE_IMAGE}") + fi + + log_yellow "Start building the docker image." + if [[ "${TRAMPOLINE_VERBOSE:-false}" == "true" ]]; then + echo "docker build" "${docker_build_flags[@]}" "${context_dir}" + fi + + # ON CI systems, we want to suppress docker build logs, only + # output the logs when it fails. + if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then + if docker build "${docker_build_flags[@]}" "${context_dir}" \ + > "${tmpdir}/docker_build.log" 2>&1; then + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + cat "${tmpdir}/docker_build.log" + fi + + log_green "Finished building the docker image." + update_cache="true" + else + log_red "Failed to build the Docker image, aborting." + log_yellow "Dumping the build logs:" + cat "${tmpdir}/docker_build.log" + exit 1 + fi + else + if docker build "${docker_build_flags[@]}" "${context_dir}"; then + log_green "Finished building the docker image." + update_cache="true" + else + log_red "Failed to build the Docker image, aborting." + exit 1 + fi + fi +else + if [[ "${has_image}" != "true" ]]; then + log_red "We do not have ${TRAMPOLINE_IMAGE} locally, aborting." + exit 1 + fi +fi + +# We use an array for the flags so they are easier to document. +docker_flags=( + # Remove the container after it exists. + "--rm" + + # Use the host network. + "--network=host" + + # Run in priviledged mode. We are not using docker for sandboxing or + # isolation, just for packaging our dev tools. + "--privileged" + + # Run the docker script with the user id. Because the docker image gets to + # write in ${PWD} you typically want this to be your user id. + # To allow docker in docker, we need to use docker gid on the host. + "--user" "${user_uid}:${docker_gid}" + + # Pass down the USER. + "--env" "USER=${user_name}" + + # Mount the project directory inside the Docker container. + "--volume" "${PROJECT_ROOT}:${TRAMPOLINE_WORKSPACE}" + "--workdir" "${TRAMPOLINE_WORKSPACE}" + "--env" "PROJECT_ROOT=${TRAMPOLINE_WORKSPACE}" + + # Mount the temporary home directory. + "--volume" "${tmphome}:/h" + "--env" "HOME=/h" + + # Allow docker in docker. + "--volume" "/var/run/docker.sock:/var/run/docker.sock" + + # Mount the /tmp so that docker in docker can mount the files + # there correctly. + "--volume" "/tmp:/tmp" + # Pass down the KOKORO_GFILE_DIR and KOKORO_KEYSTORE_DIR + # TODO(tmatsuo): This part is not portable. + "--env" "TRAMPOLINE_SECRET_DIR=/secrets" + "--volume" "${KOKORO_GFILE_DIR:-/dev/shm}:/secrets/gfile" + "--env" "KOKORO_GFILE_DIR=/secrets/gfile" + "--volume" "${KOKORO_KEYSTORE_DIR:-/dev/shm}:/secrets/keystore" + "--env" "KOKORO_KEYSTORE_DIR=/secrets/keystore" +) + +# Add an option for nicer output if the build gets a tty. +if [[ -t 0 ]]; then + docker_flags+=("-it") +fi + +# Passing down env vars +for e in "${pass_down_envvars[@]}" +do + if [[ -n "${!e:-}" ]]; then + docker_flags+=("--env" "${e}=${!e}") + fi +done + +# If arguments are given, all arguments will become the commands run +# in the container, otherwise run TRAMPOLINE_BUILD_FILE. +if [[ $# -ge 1 ]]; then + log_yellow "Running the given commands '" "${@:1}" "' in the container." + readonly commands=("${@:1}") + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + echo docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" "${commands[@]}" + fi + docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" "${commands[@]}" +else + log_yellow "Running the tests in a Docker container." + docker_flags+=("--entrypoint=${TRAMPOLINE_BUILD_FILE}") + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + echo docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" + fi + docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" +fi + + +test_retval=$? + +if [[ ${test_retval} -eq 0 ]]; then + log_green "Build finished with ${test_retval}" +else + log_red "Build finished with ${test_retval}" +fi + +# Only upload it when the test passes. +if [[ "${update_cache}" == "true" ]] && \ + [[ $test_retval == 0 ]] && \ + [[ "${TRAMPOLINE_IMAGE_UPLOAD:-false}" == "true" ]]; then + log_yellow "Uploading the Docker image." + if docker push "${TRAMPOLINE_IMAGE}"; then + log_green "Finished uploading the Docker image." + else + log_red "Failed uploading the Docker image." + fi + # Call trampoline_after_upload_hook if it's defined. + if function_exists trampoline_after_upload_hook; then + trampoline_after_upload_hook + fi + +fi + +exit "${test_retval}" diff --git a/.trampolinerc b/.trampolinerc new file mode 100644 index 00000000..995ee291 --- /dev/null +++ b/.trampolinerc @@ -0,0 +1,51 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Template for .trampolinerc + +# Add required env vars here. +required_envvars+=( + "STAGING_BUCKET" + "V2_STAGING_BUCKET" +) + +# Add env vars which are passed down into the container here. +pass_down_envvars+=( + "STAGING_BUCKET" + "V2_STAGING_BUCKET" +) + +# Prevent unintentional override on the default image. +if [[ "${TRAMPOLINE_IMAGE_UPLOAD:-false}" == "true" ]] && \ + [[ -z "${TRAMPOLINE_IMAGE:-}" ]]; then + echo "Please set TRAMPOLINE_IMAGE if you want to upload the Docker image." + exit 1 +fi + +# Define the default value if it makes sense. +if [[ -z "${TRAMPOLINE_IMAGE_UPLOAD:-}" ]]; then + TRAMPOLINE_IMAGE_UPLOAD="" +fi + +if [[ -z "${TRAMPOLINE_IMAGE:-}" ]]; then + TRAMPOLINE_IMAGE="" +fi + +if [[ -z "${TRAMPOLINE_DOCKERFILE:-}" ]]; then + TRAMPOLINE_DOCKERFILE="" +fi + +if [[ -z "${TRAMPOLINE_BUILD_FILE:-}" ]]; then + TRAMPOLINE_BUILD_FILE="" +fi diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 56875c06..d7730567 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -80,25 +80,6 @@ We use `nox `__ to instrument our tests. .. nox: https://pypi.org/project/nox/ -Note on Editable Installs / Develop Mode -======================================== - -- As mentioned previously, using ``setuptools`` in `develop mode`_ - or a ``pip`` `editable install`_ is not possible with this - library. This is because this library uses `namespace packages`_. - For context see `Issue #2316`_ and the relevant `PyPA issue`_. - - Since ``editable`` / ``develop`` mode can't be used, packages - need to be installed directly. Hence your changes to the source - tree don't get incorporated into the **already installed** - package. - -.. _namespace packages: https://www.python.org/dev/peps/pep-0420/ -.. _Issue #2316: https://github.com/GoogleCloudPlatform/google-cloud-python/issues/2316 -.. _PyPA issue: https://github.com/pypa/packaging-problems/issues/12 -.. _develop mode: https://setuptools.readthedocs.io/en/latest/setuptools.html#development-mode -.. _editable install: https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs - ***************************************** I'm getting weird errors... Can you help? ***************************************** diff --git a/docs/_templates/layout.html b/docs/_templates/layout.html index 228529ef..6316a537 100644 --- a/docs/_templates/layout.html +++ b/docs/_templates/layout.html @@ -21,8 +21,8 @@
- On January 1, 2020 this library will no longer support Python 2 on the latest released version. - Previously released library versions will continue to be available. For more information please + As of January 1, 2020 this library no longer supports Python 2 on the latest released version. + Library versions released prior to that date will continue to be available. For more information please visit Python 2 support on Google Cloud.
{% block body %} {% endblock %} diff --git a/docs/conf.py b/docs/conf.py index 1fdbd3b5..d23820ed 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -20,12 +20,16 @@ # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath("..")) +# For plugins that can not read conf.py. +# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85 +sys.path.insert(0, os.path.abspath(".")) + __version__ = "" # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "1.6.3" +needs_sphinx = "1.5.5" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom @@ -35,6 +39,7 @@ "sphinx.ext.autosummary", "sphinx.ext.intersphinx", "sphinx.ext.coverage", + "sphinx.ext.doctest", "sphinx.ext.napoleon", "sphinx.ext.todo", "sphinx.ext.viewcode", @@ -90,7 +95,12 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ["_build"] +exclude_patterns = [ + "_build", + "samples/AUTHORING_GUIDE.md", + "samples/CONTRIBUTING.md", + "samples/snippets/README.rst", +] # The reST default role (used for this markup: `text`) to use for all # documents. diff --git a/samples/AUTHORING_GUIDE.md b/samples/AUTHORING_GUIDE.md new file mode 100644 index 00000000..55c97b32 --- /dev/null +++ b/samples/AUTHORING_GUIDE.md @@ -0,0 +1 @@ +See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/AUTHORING_GUIDE.md \ No newline at end of file diff --git a/samples/CONTRIBUTING.md b/samples/CONTRIBUTING.md new file mode 100644 index 00000000..34c882b6 --- /dev/null +++ b/samples/CONTRIBUTING.md @@ -0,0 +1 @@ +See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/CONTRIBUTING.md \ No newline at end of file diff --git a/samples/snippets/api/analyze.py b/samples/snippets/api/analyze.py index a1e702b1..be865226 100644 --- a/samples/snippets/api/analyze.py +++ b/samples/snippets/api/analyze.py @@ -26,21 +26,18 @@ def get_native_encoding_type(): """Returns the encoding type that matches Python's native strings.""" if sys.maxunicode == 65535: - return 'UTF16' + return "UTF16" else: - return 'UTF32' + return "UTF32" -def analyze_entities(text, encoding='UTF32'): +def analyze_entities(text, encoding="UTF32"): body = { - 'document': { - 'type': 'PLAIN_TEXT', - 'content': text, - }, - 'encoding_type': encoding, + "document": {"type": "PLAIN_TEXT", "content": text}, + "encoding_type": encoding, } - service = googleapiclient.discovery.build('language', 'v1') + service = googleapiclient.discovery.build("language", "v1") request = service.documents().analyzeEntities(body=body) response = request.execute() @@ -48,16 +45,13 @@ def analyze_entities(text, encoding='UTF32'): return response -def analyze_sentiment(text, encoding='UTF32'): +def analyze_sentiment(text, encoding="UTF32"): body = { - 'document': { - 'type': 'PLAIN_TEXT', - 'content': text, - }, - 'encoding_type': encoding + "document": {"type": "PLAIN_TEXT", "content": text}, + "encoding_type": encoding, } - service = googleapiclient.discovery.build('language', 'v1') + service = googleapiclient.discovery.build("language", "v1") request = service.documents().analyzeSentiment(body=body) response = request.execute() @@ -65,16 +59,13 @@ def analyze_sentiment(text, encoding='UTF32'): return response -def analyze_syntax(text, encoding='UTF32'): +def analyze_syntax(text, encoding="UTF32"): body = { - 'document': { - 'type': 'PLAIN_TEXT', - 'content': text, - }, - 'encoding_type': encoding + "document": {"type": "PLAIN_TEXT", "content": text}, + "encoding_type": encoding, } - service = googleapiclient.discovery.build('language', 'v1') + service = googleapiclient.discovery.build("language", "v1") request = service.documents().analyzeSyntax(body=body) response = request.execute() @@ -82,21 +73,20 @@ def analyze_syntax(text, encoding='UTF32'): return response -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument('command', choices=[ - 'entities', 'sentiment', 'syntax']) - parser.add_argument('text') + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) + parser.add_argument("command", choices=["entities", "sentiment", "syntax"]) + parser.add_argument("text") args = parser.parse_args() - if args.command == 'entities': + if args.command == "entities": result = analyze_entities(args.text, get_native_encoding_type()) - elif args.command == 'sentiment': + elif args.command == "sentiment": result = analyze_sentiment(args.text, get_native_encoding_type()) - elif args.command == 'syntax': + elif args.command == "syntax": result = analyze_syntax(args.text, get_native_encoding_type()) print(json.dumps(result, indent=2)) diff --git a/samples/snippets/api/analyze_test.py b/samples/snippets/api/analyze_test.py index 08852c33..b4a0db67 100644 --- a/samples/snippets/api/analyze_test.py +++ b/samples/snippets/api/analyze_test.py @@ -18,46 +18,51 @@ def test_analyze_entities(): result = analyze.analyze_entities( - 'Tom Sawyer is a book written by a guy known as Mark Twain.') + "Tom Sawyer is a book written by a guy known as Mark Twain." + ) - assert result['language'] == 'en' - entities = result['entities'] + assert result["language"] == "en" + entities = result["entities"] assert len(entities) subject = entities[0] - assert subject['type'] == 'PERSON' - assert subject['name'].startswith('Tom') + assert subject["type"] == "PERSON" + assert subject["name"].startswith("Tom") def test_analyze_sentiment(capsys): - result = analyze.analyze_sentiment( - 'your face is really ugly and i hate it.') + result = analyze.analyze_sentiment("your face is really ugly and i hate it.") - sentiment = result['documentSentiment'] - assert sentiment['score'] < 0 - assert sentiment['magnitude'] < 1 + sentiment = result["documentSentiment"] + assert sentiment["score"] < 0 + assert sentiment["magnitude"] < 1 result = analyze.analyze_sentiment( - 'cheerio, mate - I greatly admire the pallor of your visage, and your ' - 'angle of repose leaves little room for improvement.') + "cheerio, mate - I greatly admire the pallor of your visage, and your " + "angle of repose leaves little room for improvement." + ) - sentiment = result['documentSentiment'] - assert sentiment['score'] > 0 - assert sentiment['magnitude'] < 1 + sentiment = result["documentSentiment"] + assert sentiment["score"] > 0 + assert sentiment["magnitude"] < 1 def test_analyze_syntax(capsys): - result = analyze.analyze_syntax(textwrap.dedent(u'''\ + result = analyze.analyze_syntax( + textwrap.dedent( + u"""\ Keep away from people who try to belittle your ambitions. Small people always do that, but the really great make you feel that you, too, can become great. - - Mark Twain''')) + - Mark Twain""" + ) + ) - assert len(result['tokens']) - first_token = result['tokens'][0] - assert first_token['text']['content'] == 'Keep' - assert first_token['partOfSpeech']['tag'] == 'VERB' - assert len(result['sentences']) > 1 - assert result['language'] == 'en' + assert len(result["tokens"]) + first_token = result["tokens"][0] + assert first_token["text"]["content"] == "Keep" + assert first_token["partOfSpeech"]["tag"] == "VERB" + assert len(result["sentences"]) > 1 + assert result["language"] == "en" def test_analyze_syntax_utf8(): @@ -67,38 +72,43 @@ def test_analyze_syntax_utf8(): bits. The offsets we get should be the index of the first byte of the character. """ - test_string = u'a \u00e3 \u0201 \U0001f636 b' - byte_array = test_string.encode('utf8') - result = analyze.analyze_syntax(test_string, encoding='UTF8') - tokens = result['tokens'] - - assert tokens[0]['text']['content'] == 'a' - offset = tokens[0]['text'].get('beginOffset', 0) - assert (byte_array[offset:offset+1].decode('utf8') == - tokens[0]['text']['content']) - - assert tokens[1]['text']['content'] == u'\u00e3' - offset = tokens[1]['text'].get('beginOffset', 0) - assert (byte_array[offset:offset+2].decode('utf8') == - tokens[1]['text']['content']) - - assert tokens[2]['text']['content'] == u'\u0201' - offset = tokens[2]['text'].get('beginOffset', 0) - assert (byte_array[offset:offset+2].decode('utf8') == - tokens[2]['text']['content']) - - assert tokens[3]['text']['content'] == u'\U0001f636' - offset = tokens[3]['text'].get('beginOffset', 0) - assert (byte_array[offset:offset+4].decode('utf8') == - tokens[3]['text']['content']) + test_string = u"a \u00e3 \u0201 \U0001f636 b" + byte_array = test_string.encode("utf8") + result = analyze.analyze_syntax(test_string, encoding="UTF8") + tokens = result["tokens"] + + assert tokens[0]["text"]["content"] == "a" + offset = tokens[0]["text"].get("beginOffset", 0) + assert ( + byte_array[offset : offset + 1].decode("utf8") == tokens[0]["text"]["content"] + ) + + assert tokens[1]["text"]["content"] == u"\u00e3" + offset = tokens[1]["text"].get("beginOffset", 0) + assert ( + byte_array[offset : offset + 2].decode("utf8") == tokens[1]["text"]["content"] + ) + + assert tokens[2]["text"]["content"] == u"\u0201" + offset = tokens[2]["text"].get("beginOffset", 0) + assert ( + byte_array[offset : offset + 2].decode("utf8") == tokens[2]["text"]["content"] + ) + + assert tokens[3]["text"]["content"] == u"\U0001f636" + offset = tokens[3]["text"].get("beginOffset", 0) + assert ( + byte_array[offset : offset + 4].decode("utf8") == tokens[3]["text"]["content"] + ) # This demonstrates that the offset takes into account the variable-length # characters before the target token. - assert tokens[4]['text']['content'] == u'b' - offset = tokens[4]['text'].get('beginOffset', 0) + assert tokens[4]["text"]["content"] == u"b" + offset = tokens[4]["text"].get("beginOffset", 0) # 'b' is only one byte long - assert (byte_array[offset:offset+1].decode('utf8') == - tokens[4]['text']['content']) + assert ( + byte_array[offset : offset + 1].decode("utf8") == tokens[4]["text"]["content"] + ) def test_analyze_syntax_utf16(): @@ -108,53 +118,58 @@ def test_analyze_syntax_utf16(): bits. The returned offsets will be the index of the first 2-byte character of the token. """ - test_string = u'a \u00e3 \u0201 \U0001f636 b' - byte_array = test_string.encode('utf16') + test_string = u"a \u00e3 \u0201 \U0001f636 b" + byte_array = test_string.encode("utf16") # Remove the byte order marker, which the offsets don't account for byte_array = byte_array[2:] - result = analyze.analyze_syntax(test_string, encoding='UTF16') - tokens = result['tokens'] + result = analyze.analyze_syntax(test_string, encoding="UTF16") + tokens = result["tokens"] - assert tokens[0]['text']['content'] == 'a' + assert tokens[0]["text"]["content"] == "a" # The offset is an offset into an array where each entry is 16 bits. Since # we have an 8-bit array, the offsets should be doubled to index into our # array. - offset = 2 * tokens[0]['text'].get('beginOffset', 0) - assert (byte_array[offset:offset + 2].decode('utf16') == - tokens[0]['text']['content']) + offset = 2 * tokens[0]["text"].get("beginOffset", 0) + assert ( + byte_array[offset : offset + 2].decode("utf16") == tokens[0]["text"]["content"] + ) - assert tokens[1]['text']['content'] == u'\u00e3' - offset = 2 * tokens[1]['text'].get('beginOffset', 0) + assert tokens[1]["text"]["content"] == u"\u00e3" + offset = 2 * tokens[1]["text"].get("beginOffset", 0) # A UTF16 character with a low codepoint is 16 bits (2 bytes) long, so # slice out 2 bytes starting from the offset. Then interpret the bytes as # utf16 for comparison. - assert (byte_array[offset:offset + 2].decode('utf16') == - tokens[1]['text']['content']) + assert ( + byte_array[offset : offset + 2].decode("utf16") == tokens[1]["text"]["content"] + ) - assert tokens[2]['text']['content'] == u'\u0201' - offset = 2 * tokens[2]['text'].get('beginOffset', 0) + assert tokens[2]["text"]["content"] == u"\u0201" + offset = 2 * tokens[2]["text"].get("beginOffset", 0) # A UTF16 character with a low codepoint is 16 bits (2 bytes) long, so # slice out 2 bytes starting from the offset. Then interpret the bytes as # utf16 for comparison. - assert (byte_array[offset:offset + 2].decode('utf16') == - tokens[2]['text']['content']) + assert ( + byte_array[offset : offset + 2].decode("utf16") == tokens[2]["text"]["content"] + ) - assert tokens[3]['text']['content'] == u'\U0001f636' - offset = 2 * tokens[3]['text'].get('beginOffset', 0) + assert tokens[3]["text"]["content"] == u"\U0001f636" + offset = 2 * tokens[3]["text"].get("beginOffset", 0) # A UTF16 character with a high codepoint is 32 bits (4 bytes) long, so # slice out 4 bytes starting from the offset. Then interpret those bytes as # utf16 for comparison. - assert (byte_array[offset:offset + 4].decode('utf16') == - tokens[3]['text']['content']) + assert ( + byte_array[offset : offset + 4].decode("utf16") == tokens[3]["text"]["content"] + ) # This demonstrates that the offset takes into account the variable-length # characters before the target token. - assert tokens[4]['text']['content'] == u'b' - offset = 2 * tokens[4]['text'].get('beginOffset', 0) + assert tokens[4]["text"]["content"] == u"b" + offset = 2 * tokens[4]["text"].get("beginOffset", 0) # Even though 'b' is only one byte long, utf16 still encodes it using 16 # bits - assert (byte_array[offset:offset + 2].decode('utf16') == - tokens[4]['text']['content']) + assert ( + byte_array[offset : offset + 2].decode("utf16") == tokens[4]["text"]["content"] + ) def test_annotate_text_utf32(): @@ -178,53 +193,58 @@ def test_annotate_text_utf32(): unicode object with the raw offset returned by the api (ie without multiplying it by 4, as it is below). """ - test_string = u'a \u00e3 \u0201 \U0001f636 b' - byte_array = test_string.encode('utf32') + test_string = u"a \u00e3 \u0201 \U0001f636 b" + byte_array = test_string.encode("utf32") # Remove the byte order marker, which the offsets don't account for byte_array = byte_array[4:] - result = analyze.analyze_syntax(test_string, encoding='UTF32') - tokens = result['tokens'] + result = analyze.analyze_syntax(test_string, encoding="UTF32") + tokens = result["tokens"] - assert tokens[0]['text']['content'] == 'a' + assert tokens[0]["text"]["content"] == "a" # The offset is an offset into an array where each entry is 32 bits. Since # we have an 8-bit array, the offsets should be quadrupled to index into # our array. - offset = 4 * tokens[0]['text'].get('beginOffset', 0) - assert (byte_array[offset:offset + 4].decode('utf32') == - tokens[0]['text']['content']) + offset = 4 * tokens[0]["text"].get("beginOffset", 0) + assert ( + byte_array[offset : offset + 4].decode("utf32") == tokens[0]["text"]["content"] + ) - assert tokens[1]['text']['content'] == u'\u00e3' - offset = 4 * tokens[1]['text'].get('beginOffset', 0) + assert tokens[1]["text"]["content"] == u"\u00e3" + offset = 4 * tokens[1]["text"].get("beginOffset", 0) # A UTF32 character with a low codepoint is 32 bits (4 bytes) long, so # slice out 4 bytes starting from the offset. Then interpret the bytes as # utf32 for comparison. - assert (byte_array[offset:offset + 4].decode('utf32') == - tokens[1]['text']['content']) + assert ( + byte_array[offset : offset + 4].decode("utf32") == tokens[1]["text"]["content"] + ) - assert tokens[2]['text']['content'] == u'\u0201' - offset = 4 * tokens[2]['text'].get('beginOffset', 0) + assert tokens[2]["text"]["content"] == u"\u0201" + offset = 4 * tokens[2]["text"].get("beginOffset", 0) # A UTF32 character with a low codepoint is 32 bits (4 bytes) long, so # slice out 4 bytes starting from the offset. Then interpret the bytes as # utf32 for comparison. - assert (byte_array[offset:offset + 4].decode('utf32') == - tokens[2]['text']['content']) + assert ( + byte_array[offset : offset + 4].decode("utf32") == tokens[2]["text"]["content"] + ) - assert tokens[3]['text']['content'] == u'\U0001f636' - offset = 4 * tokens[3]['text'].get('beginOffset', 0) + assert tokens[3]["text"]["content"] == u"\U0001f636" + offset = 4 * tokens[3]["text"].get("beginOffset", 0) # A UTF32 character with a high codepoint is 32 bits (4 bytes) long, so # slice out 4 bytes starting from the offset. Then interpret those bytes as # utf32 for comparison. - assert (byte_array[offset:offset + 4].decode('utf32') == - tokens[3]['text']['content']) + assert ( + byte_array[offset : offset + 4].decode("utf32") == tokens[3]["text"]["content"] + ) # This demonstrates that the offset takes into account the variable-length # characters before the target token. - assert tokens[4]['text']['content'] == u'b' - offset = 4 * tokens[4]['text'].get('beginOffset', 0) + assert tokens[4]["text"]["content"] == u"b" + offset = 4 * tokens[4]["text"].get("beginOffset", 0) # Even though 'b' is only one byte long, utf32 still encodes it using 32 # bits - assert (byte_array[offset:offset + 4].decode('utf32') == - tokens[4]['text']['content']) + assert ( + byte_array[offset : offset + 4].decode("utf32") == tokens[4]["text"]["content"] + ) def test_annotate_text_utf32_directly_index_into_unicode(): @@ -233,21 +253,21 @@ def test_annotate_text_utf32_directly_index_into_unicode(): See the explanation for test_annotate_text_utf32. Essentially, indexing into a utf32 array is equivalent to indexing into a python unicode object. """ - test_string = u'a \u00e3 \u0201 \U0001f636 b' - result = analyze.analyze_syntax(test_string, encoding='UTF32') - tokens = result['tokens'] + test_string = u"a \u00e3 \u0201 \U0001f636 b" + result = analyze.analyze_syntax(test_string, encoding="UTF32") + tokens = result["tokens"] - assert tokens[0]['text']['content'] == 'a' - offset = tokens[0]['text'].get('beginOffset', 0) - assert test_string[offset] == tokens[0]['text']['content'] + assert tokens[0]["text"]["content"] == "a" + offset = tokens[0]["text"].get("beginOffset", 0) + assert test_string[offset] == tokens[0]["text"]["content"] - assert tokens[1]['text']['content'] == u'\u00e3' - offset = tokens[1]['text'].get('beginOffset', 0) - assert test_string[offset] == tokens[1]['text']['content'] + assert tokens[1]["text"]["content"] == u"\u00e3" + offset = tokens[1]["text"].get("beginOffset", 0) + assert test_string[offset] == tokens[1]["text"]["content"] - assert tokens[2]['text']['content'] == u'\u0201' - offset = tokens[2]['text'].get('beginOffset', 0) - assert test_string[offset] == tokens[2]['text']['content'] + assert tokens[2]["text"]["content"] == u"\u0201" + offset = tokens[2]["text"].get("beginOffset", 0) + assert test_string[offset] == tokens[2]["text"]["content"] # Temporarily disabled # assert tokens[3]['text']['content'] == u'\U0001f636' diff --git a/samples/snippets/api/noxfile.py b/samples/snippets/api/noxfile.py new file mode 100644 index 00000000..5660f08b --- /dev/null +++ b/samples/snippets/api/noxfile.py @@ -0,0 +1,222 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +from pathlib import Path +import sys + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +# Copy `noxfile_config.py` to your directory and modify it instead. + + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + "ignored_versions": ["2.7"], + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + "envs": {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append(".") + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars(): + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG["gcloud_project_env"] + # This should error out if not set. + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG["envs"]) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to tested samples. +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +# +# Style Checks +# + + +def _determine_local_import_names(start_dir): + """Determines all import names that should be considered "local". + + This is used when running the linter to insure that import order is + properly checked. + """ + file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] + return [ + basename + for basename, extension in file_ext_pairs + if extension == ".py" + or os.path.isdir(os.path.join(start_dir, basename)) + and basename not in ("__pycache__") + ] + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--import-order-style=google", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session): + session.install("flake8", "flake8-import-order") + + local_names = _determine_local_import_names(".") + args = FLAKE8_COMMON_ARGS + [ + "--application-import-names", + ",".join(local_names), + ".", + ] + session.run("flake8", *args) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests(session, post_install=None): + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars() + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session): + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) + + +# +# Readmegen +# + + +def _get_repo_root(): + """ Returns the root folder of the project. """ + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session, path): + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/samples/snippets/classify_text/classify_text_tutorial.py b/samples/snippets/classify_text/classify_text_tutorial.py index d193e62e..fcd5008b 100644 --- a/samples/snippets/classify_text/classify_text_tutorial.py +++ b/samples/snippets/classify_text/classify_text_tutorial.py @@ -29,6 +29,7 @@ from google.cloud import language import numpy import six + # [END language_classify_text_tutorial_imports] @@ -39,8 +40,8 @@ def classify(text, verbose=True): language_client = language.LanguageServiceClient() document = language.types.Document( - content=text, - type=language.enums.Document.Type.PLAIN_TEXT) + content=text, type=language.enums.Document.Type.PLAIN_TEXT + ) response = language_client.classify_text(document) categories = response.categories @@ -55,11 +56,13 @@ def classify(text, verbose=True): if verbose: print(text) for category in categories: - print(u'=' * 20) - print(u'{:<16}: {}'.format('category', category.name)) - print(u'{:<16}: {}'.format('confidence', category.confidence)) + print(u"=" * 20) + print(u"{:<16}: {}".format("category", category.name)) + print(u"{:<16}: {}".format("confidence", category.confidence)) return result + + # [END language_classify_text_tutorial_classify] @@ -77,19 +80,21 @@ def index(path, index_file): continue try: - with io.open(file_path, 'r') as f: + with io.open(file_path, "r") as f: text = f.read() categories = classify(text, verbose=False) result[filename] = categories except Exception: - print('Failed to process {}'.format(file_path)) + print("Failed to process {}".format(file_path)) - with io.open(index_file, 'w', encoding='utf-8') as f: + with io.open(index_file, "w", encoding="utf-8") as f: f.write(json.dumps(result, ensure_ascii=False)) - print('Texts indexed in file: {}'.format(index_file)) + print("Texts indexed in file: {}".format(index_file)) return result + + # [END language_classify_text_tutorial_index] @@ -114,7 +119,7 @@ def split_labels(categories): """ _categories = {} for name, confidence in six.iteritems(categories): - labels = [label for label in name.split('/') if label] + labels = [label for label in name.split("/") if label] for label in labels: _categories[label] = confidence @@ -147,7 +152,7 @@ def query(index_file, text, n_top=3): the query text. """ - with io.open(index_file, 'r') as f: + with io.open(index_file, "r") as f: index = json.load(f) # Get the categories of the query text. @@ -155,22 +160,23 @@ def query(index_file, text, n_top=3): similarities = [] for filename, categories in six.iteritems(index): - similarities.append( - (filename, similarity(query_categories, categories))) + similarities.append((filename, similarity(query_categories, categories))) similarities = sorted(similarities, key=lambda p: p[1], reverse=True) - print('=' * 20) - print('Query: {}\n'.format(text)) + print("=" * 20) + print("Query: {}\n".format(text)) for category, confidence in six.iteritems(query_categories): - print('\tCategory: {}, confidence: {}'.format(category, confidence)) - print('\nMost similar {} indexed texts:'.format(n_top)) + print("\tCategory: {}, confidence: {}".format(category, confidence)) + print("\nMost similar {} indexed texts:".format(n_top)) for filename, sim in similarities[:n_top]: - print('\tFilename: {}'.format(filename)) - print('\tSimilarity: {}'.format(sim)) - print('\n') + print("\tFilename: {}".format(filename)) + print("\tSimilarity: {}".format(sim)) + print("\n") return similarities + + # [END language_classify_text_tutorial_query] @@ -183,7 +189,7 @@ def query_category(index_file, category_string, n_top=3): https://cloud.google.com/natural-language/docs/categories """ - with io.open(index_file, 'r') as f: + with io.open(index_file, "r") as f: index = json.load(f) # Make the category_string into a dictionary so that it is @@ -192,61 +198,59 @@ def query_category(index_file, category_string, n_top=3): similarities = [] for filename, categories in six.iteritems(index): - similarities.append( - (filename, similarity(query_categories, categories))) + similarities.append((filename, similarity(query_categories, categories))) similarities = sorted(similarities, key=lambda p: p[1], reverse=True) - print('=' * 20) - print('Query: {}\n'.format(category_string)) - print('\nMost similar {} indexed texts:'.format(n_top)) + print("=" * 20) + print("Query: {}\n".format(category_string)) + print("\nMost similar {} indexed texts:".format(n_top)) for filename, sim in similarities[:n_top]: - print('\tFilename: {}'.format(filename)) - print('\tSimilarity: {}'.format(sim)) - print('\n') + print("\tFilename: {}".format(filename)) + print("\tSimilarity: {}".format(sim)) + print("\n") return similarities + + # [END language_classify_text_tutorial_query_category] -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - subparsers = parser.add_subparsers(dest='command') - classify_parser = subparsers.add_parser( - 'classify', help=classify.__doc__) + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) + subparsers = parser.add_subparsers(dest="command") + classify_parser = subparsers.add_parser("classify", help=classify.__doc__) classify_parser.add_argument( - 'text', help='The text to be classified. ' - 'The text needs to have at least 20 tokens.') - index_parser = subparsers.add_parser( - 'index', help=index.__doc__) + "text", + help="The text to be classified. " "The text needs to have at least 20 tokens.", + ) + index_parser = subparsers.add_parser("index", help=index.__doc__) index_parser.add_argument( - 'path', help='The directory that contains ' - 'text files to be indexed.') + "path", help="The directory that contains " "text files to be indexed." + ) index_parser.add_argument( - '--index_file', help='Filename for the output JSON.', - default='index.json') - query_parser = subparsers.add_parser( - 'query', help=query.__doc__) - query_parser.add_argument( - 'index_file', help='Path to the index JSON file.') - query_parser.add_argument( - 'text', help='Query text.') + "--index_file", help="Filename for the output JSON.", default="index.json" + ) + query_parser = subparsers.add_parser("query", help=query.__doc__) + query_parser.add_argument("index_file", help="Path to the index JSON file.") + query_parser.add_argument("text", help="Query text.") query_category_parser = subparsers.add_parser( - 'query-category', help=query_category.__doc__) - query_category_parser.add_argument( - 'index_file', help='Path to the index JSON file.') + "query-category", help=query_category.__doc__ + ) query_category_parser.add_argument( - 'category', help='Query category.') + "index_file", help="Path to the index JSON file." + ) + query_category_parser.add_argument("category", help="Query category.") args = parser.parse_args() - if args.command == 'classify': + if args.command == "classify": classify(args.text) - if args.command == 'index': + if args.command == "index": index(args.path, args.index_file) - if args.command == 'query': + if args.command == "query": query(args.index_file, args.text) - if args.command == 'query-category': + if args.command == "query-category": query_category(args.index_file, args.category) diff --git a/samples/snippets/classify_text/classify_text_tutorial_test.py b/samples/snippets/classify_text/classify_text_tutorial_test.py index 28de0562..5e821129 100644 --- a/samples/snippets/classify_text/classify_text_tutorial_test.py +++ b/samples/snippets/classify_text/classify_text_tutorial_test.py @@ -18,37 +18,37 @@ import classify_text_tutorial -OUTPUT = 'index.json' -RESOURCES = os.path.join(os.path.dirname(__file__), 'resources') +OUTPUT = "index.json" +RESOURCES = os.path.join(os.path.dirname(__file__), "resources") QUERY_TEXT = """Google Home enables users to speak voice commands to interact with services through the Home\'s intelligent personal assistant called Google Assistant. A large number of services, both in-house and third-party, are integrated, allowing users to listen to music, look at videos or photos, or receive news updates entirely by voice.""" -QUERY_CATEGORY = '/Computers & Electronics/Software' +QUERY_CATEGORY = "/Computers & Electronics/Software" -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def index_file(tmpdir_factory): - temp_file = tmpdir_factory.mktemp('tmp').join(OUTPUT) + temp_file = tmpdir_factory.mktemp("tmp").join(OUTPUT) temp_out = temp_file.strpath - classify_text_tutorial.index(os.path.join(RESOURCES, 'texts'), temp_out) + classify_text_tutorial.index(os.path.join(RESOURCES, "texts"), temp_out) return temp_file def test_classify(capsys): - with open(os.path.join(RESOURCES, 'query_text1.txt'), 'r') as f: + with open(os.path.join(RESOURCES, "query_text1.txt"), "r") as f: text = f.read() classify_text_tutorial.classify(text) out, err = capsys.readouterr() - assert 'category' in out + assert "category" in out def test_index(capsys, tmpdir): - temp_dir = tmpdir.mkdir('tmp') + temp_dir = tmpdir.mkdir("tmp") temp_out = temp_dir.join(OUTPUT).strpath - classify_text_tutorial.index(os.path.join(RESOURCES, 'texts'), temp_out) + classify_text_tutorial.index(os.path.join(RESOURCES, "texts"), temp_out) out, err = capsys.readouterr() assert OUTPUT in out @@ -61,7 +61,7 @@ def test_query_text(capsys, index_file): classify_text_tutorial.query(temp_out, QUERY_TEXT) out, err = capsys.readouterr() - assert 'Filename: cloud_computing.txt' in out + assert "Filename: cloud_computing.txt" in out def test_query_category(capsys, index_file): @@ -70,22 +70,21 @@ def test_query_category(capsys, index_file): classify_text_tutorial.query_category(temp_out, QUERY_CATEGORY) out, err = capsys.readouterr() - assert 'Filename: cloud_computing.txt' in out + assert "Filename: cloud_computing.txt" in out def test_split_labels(): - categories = {'/a/b/c': 1.0} - split_categories = {'a': 1.0, 'b': 1.0, 'c': 1.0} + categories = {"/a/b/c": 1.0} + split_categories = {"a": 1.0, "b": 1.0, "c": 1.0} assert classify_text_tutorial.split_labels(categories) == split_categories def test_similarity(): empty_categories = {} - categories1 = {'/a/b/c': 1.0, '/d/e': 1.0} - categories2 = {'/a/b': 1.0} + categories1 = {"/a/b/c": 1.0, "/d/e": 1.0} + categories2 = {"/a/b": 1.0} - assert classify_text_tutorial.similarity( - empty_categories, categories1) == 0.0 + assert classify_text_tutorial.similarity(empty_categories, categories1) == 0.0 assert classify_text_tutorial.similarity(categories1, categories1) > 0.99 assert classify_text_tutorial.similarity(categories1, categories2) > 0 assert classify_text_tutorial.similarity(categories1, categories2) < 1 diff --git a/samples/snippets/classify_text/noxfile.py b/samples/snippets/classify_text/noxfile.py new file mode 100644 index 00000000..5660f08b --- /dev/null +++ b/samples/snippets/classify_text/noxfile.py @@ -0,0 +1,222 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +from pathlib import Path +import sys + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +# Copy `noxfile_config.py` to your directory and modify it instead. + + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + "ignored_versions": ["2.7"], + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + "envs": {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append(".") + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars(): + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG["gcloud_project_env"] + # This should error out if not set. + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG["envs"]) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to tested samples. +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +# +# Style Checks +# + + +def _determine_local_import_names(start_dir): + """Determines all import names that should be considered "local". + + This is used when running the linter to insure that import order is + properly checked. + """ + file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] + return [ + basename + for basename, extension in file_ext_pairs + if extension == ".py" + or os.path.isdir(os.path.join(start_dir, basename)) + and basename not in ("__pycache__") + ] + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--import-order-style=google", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session): + session.install("flake8", "flake8-import-order") + + local_names = _determine_local_import_names(".") + args = FLAKE8_COMMON_ARGS + [ + "--application-import-names", + ",".join(local_names), + ".", + ] + session.run("flake8", *args) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests(session, post_install=None): + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars() + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session): + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) + + +# +# Readmegen +# + + +def _get_repo_root(): + """ Returns the root folder of the project. """ + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session, path): + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/samples/snippets/cloud-client/v1/noxfile.py b/samples/snippets/cloud-client/v1/noxfile.py new file mode 100644 index 00000000..5660f08b --- /dev/null +++ b/samples/snippets/cloud-client/v1/noxfile.py @@ -0,0 +1,222 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +from pathlib import Path +import sys + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +# Copy `noxfile_config.py` to your directory and modify it instead. + + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + "ignored_versions": ["2.7"], + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + "envs": {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append(".") + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars(): + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG["gcloud_project_env"] + # This should error out if not set. + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG["envs"]) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to tested samples. +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +# +# Style Checks +# + + +def _determine_local_import_names(start_dir): + """Determines all import names that should be considered "local". + + This is used when running the linter to insure that import order is + properly checked. + """ + file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] + return [ + basename + for basename, extension in file_ext_pairs + if extension == ".py" + or os.path.isdir(os.path.join(start_dir, basename)) + and basename not in ("__pycache__") + ] + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--import-order-style=google", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session): + session.install("flake8", "flake8-import-order") + + local_names = _determine_local_import_names(".") + args = FLAKE8_COMMON_ARGS + [ + "--application-import-names", + ",".join(local_names), + ".", + ] + session.run("flake8", *args) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests(session, post_install=None): + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars() + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session): + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) + + +# +# Readmegen +# + + +def _get_repo_root(): + """ Returns the root folder of the project. """ + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session, path): + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/samples/snippets/cloud-client/v1/quickstart.py b/samples/snippets/cloud-client/v1/quickstart.py index 7c075a51..2cf46437 100644 --- a/samples/snippets/cloud-client/v1/quickstart.py +++ b/samples/snippets/cloud-client/v1/quickstart.py @@ -22,6 +22,7 @@ def run_quickstart(): from google.cloud import language from google.cloud.language import enums from google.cloud.language import types + # [END language_python_migration_imports] # Instantiates a client @@ -30,18 +31,16 @@ def run_quickstart(): # [END language_python_migration_client] # The text to analyze - text = u'Hello, world!' - document = types.Document( - content=text, - type=enums.Document.Type.PLAIN_TEXT) + text = u"Hello, world!" + document = types.Document(content=text, type=enums.Document.Type.PLAIN_TEXT) # Detects the sentiment of the text sentiment = client.analyze_sentiment(document=document).document_sentiment - print('Text: {}'.format(text)) - print('Sentiment: {}, {}'.format(sentiment.score, sentiment.magnitude)) + print("Text: {}".format(text)) + print("Sentiment: {}, {}".format(sentiment.score, sentiment.magnitude)) # [END language_quickstart] -if __name__ == '__main__': +if __name__ == "__main__": run_quickstart() diff --git a/samples/snippets/cloud-client/v1/quickstart_test.py b/samples/snippets/cloud-client/v1/quickstart_test.py index bd9954c8..59b44da8 100644 --- a/samples/snippets/cloud-client/v1/quickstart_test.py +++ b/samples/snippets/cloud-client/v1/quickstart_test.py @@ -19,4 +19,4 @@ def test_quickstart(capsys): quickstart.run_quickstart() out, _ = capsys.readouterr() - assert 'Sentiment' in out + assert "Sentiment" in out diff --git a/samples/snippets/cloud-client/v1/set_endpoint.py b/samples/snippets/cloud-client/v1/set_endpoint.py index abc6f180..340d5180 100644 --- a/samples/snippets/cloud-client/v1/set_endpoint.py +++ b/samples/snippets/cloud-client/v1/set_endpoint.py @@ -19,7 +19,7 @@ def set_endpoint(): # Imports the Google Cloud client library from google.cloud import language - client_options = {'api_endpoint': 'eu-language.googleapis.com:443'} + client_options = {"api_endpoint": "eu-language.googleapis.com:443"} # Instantiates a client client = language.LanguageServiceClient(client_options=client_options) @@ -27,14 +27,14 @@ def set_endpoint(): # The text to analyze document = language.types.Document( - content='Hello, world!', - type=language.enums.Document.Type.PLAIN_TEXT) + content="Hello, world!", type=language.enums.Document.Type.PLAIN_TEXT + ) # Detects the sentiment of the text sentiment = client.analyze_sentiment(document=document).document_sentiment - print('Sentiment: {}, {}'.format(sentiment.score, sentiment.magnitude)) + print("Sentiment: {}, {}".format(sentiment.score, sentiment.magnitude)) -if __name__ == '__main__': +if __name__ == "__main__": set_endpoint() diff --git a/samples/snippets/cloud-client/v1/set_endpoint_test.py b/samples/snippets/cloud-client/v1/set_endpoint_test.py index 7e124c36..817748b1 100644 --- a/samples/snippets/cloud-client/v1/set_endpoint_test.py +++ b/samples/snippets/cloud-client/v1/set_endpoint_test.py @@ -19,4 +19,4 @@ def test_set_endpoint(capsys): set_endpoint.set_endpoint() out, _ = capsys.readouterr() - assert 'Sentiment' in out + assert "Sentiment" in out diff --git a/samples/snippets/generated-samples/v1/language_sentiment_text.py b/samples/snippets/generated-samples/v1/language_sentiment_text.py index 10d17970..c28a3665 100644 --- a/samples/snippets/generated-samples/v1/language_sentiment_text.py +++ b/samples/snippets/generated-samples/v1/language_sentiment_text.py @@ -35,15 +35,15 @@ def sample_analyze_sentiment(content): # content = 'Your text to analyze, e.g. Hello, world!' if isinstance(content, six.binary_type): - content = content.decode('utf-8') + content = content.decode("utf-8") type_ = enums.Document.Type.PLAIN_TEXT - document = {'type': type_, 'content': content} + document = {"type": type_, "content": content} response = client.analyze_sentiment(document) sentiment = response.document_sentiment - print('Score: {}'.format(sentiment.score)) - print('Magnitude: {}'.format(sentiment.magnitude)) + print("Score: {}".format(sentiment.score)) + print("Magnitude: {}".format(sentiment.magnitude)) # [END language_sentiment_text] @@ -54,5 +54,5 @@ def main(): sample_analyze_sentiment(*sys.argv[1:]) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/samples/snippets/generated-samples/v1/language_sentiment_text_test.py b/samples/snippets/generated-samples/v1/language_sentiment_text_test.py index e1876da2..fd89f626 100644 --- a/samples/snippets/generated-samples/v1/language_sentiment_text_test.py +++ b/samples/snippets/generated-samples/v1/language_sentiment_text_test.py @@ -17,12 +17,12 @@ def test_analyze_sentiment_text_positive(capsys): - language_sentiment_text.sample_analyze_sentiment('Happy Happy Joy Joy') + language_sentiment_text.sample_analyze_sentiment("Happy Happy Joy Joy") out, _ = capsys.readouterr() - assert 'Score: 0.' in out + assert "Score: 0." in out def test_analyze_sentiment_text_negative(capsys): - language_sentiment_text.sample_analyze_sentiment('Angry Angry Sad Sad') + language_sentiment_text.sample_analyze_sentiment("Angry Angry Sad Sad") out, _ = capsys.readouterr() - assert 'Score: -0.' in out + assert "Score: -0." in out diff --git a/samples/snippets/generated-samples/v1/noxfile.py b/samples/snippets/generated-samples/v1/noxfile.py new file mode 100644 index 00000000..5660f08b --- /dev/null +++ b/samples/snippets/generated-samples/v1/noxfile.py @@ -0,0 +1,222 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +from pathlib import Path +import sys + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +# Copy `noxfile_config.py` to your directory and modify it instead. + + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + "ignored_versions": ["2.7"], + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + "envs": {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append(".") + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars(): + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG["gcloud_project_env"] + # This should error out if not set. + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG["envs"]) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to tested samples. +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +# +# Style Checks +# + + +def _determine_local_import_names(start_dir): + """Determines all import names that should be considered "local". + + This is used when running the linter to insure that import order is + properly checked. + """ + file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] + return [ + basename + for basename, extension in file_ext_pairs + if extension == ".py" + or os.path.isdir(os.path.join(start_dir, basename)) + and basename not in ("__pycache__") + ] + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--import-order-style=google", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session): + session.install("flake8", "flake8-import-order") + + local_names = _determine_local_import_names(".") + args = FLAKE8_COMMON_ARGS + [ + "--application-import-names", + ",".join(local_names), + ".", + ] + session.run("flake8", *args) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests(session, post_install=None): + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars() + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session): + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) + + +# +# Readmegen +# + + +def _get_repo_root(): + """ Returns the root folder of the project. """ + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session, path): + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/samples/snippets/sentiment/noxfile.py b/samples/snippets/sentiment/noxfile.py new file mode 100644 index 00000000..5660f08b --- /dev/null +++ b/samples/snippets/sentiment/noxfile.py @@ -0,0 +1,222 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +from pathlib import Path +import sys + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +# Copy `noxfile_config.py` to your directory and modify it instead. + + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + "ignored_versions": ["2.7"], + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + "envs": {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append(".") + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars(): + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG["gcloud_project_env"] + # This should error out if not set. + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG["envs"]) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to tested samples. +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +# +# Style Checks +# + + +def _determine_local_import_names(start_dir): + """Determines all import names that should be considered "local". + + This is used when running the linter to insure that import order is + properly checked. + """ + file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] + return [ + basename + for basename, extension in file_ext_pairs + if extension == ".py" + or os.path.isdir(os.path.join(start_dir, basename)) + and basename not in ("__pycache__") + ] + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--import-order-style=google", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session): + session.install("flake8", "flake8-import-order") + + local_names = _determine_local_import_names(".") + args = FLAKE8_COMMON_ARGS + [ + "--application-import-names", + ",".join(local_names), + ".", + ] + session.run("flake8", *args) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests(session, post_install=None): + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars() + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session): + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) + + +# +# Readmegen +# + + +def _get_repo_root(): + """ Returns the root folder of the project. """ + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session, path): + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/samples/snippets/sentiment/sentiment_analysis.py b/samples/snippets/sentiment/sentiment_analysis.py index 3b572bc2..aef7a658 100644 --- a/samples/snippets/sentiment/sentiment_analysis.py +++ b/samples/snippets/sentiment/sentiment_analysis.py @@ -20,6 +20,7 @@ from google.cloud import language from google.cloud.language import enums from google.cloud.language import types + # [END language_sentiment_tutorial_imports] @@ -30,12 +31,16 @@ def print_result(annotations): for index, sentence in enumerate(annotations.sentences): sentence_sentiment = sentence.sentiment.score - print('Sentence {} has a sentiment score of {}'.format( - index, sentence_sentiment)) + print( + "Sentence {} has a sentiment score of {}".format(index, sentence_sentiment) + ) - print('Overall Sentiment: score of {} with magnitude of {}'.format( - score, magnitude)) + print( + "Overall Sentiment: score of {} with magnitude of {}".format(score, magnitude) + ) return 0 + + # [END language_sentiment_tutorial_print_result] @@ -44,28 +49,29 @@ def analyze(movie_review_filename): """Run a sentiment analysis request on text within a passed filename.""" client = language.LanguageServiceClient() - with open(movie_review_filename, 'r') as review_file: + with open(movie_review_filename, "r") as review_file: # Instantiates a plain text document. content = review_file.read() - document = types.Document( - content=content, - type=enums.Document.Type.PLAIN_TEXT) + document = types.Document(content=content, type=enums.Document.Type.PLAIN_TEXT) annotations = client.analyze_sentiment(document=document) # Print the results print_result(annotations) + + # [END language_sentiment_tutorial_analyze_sentiment] # [START language_sentiment_tutorial_run_application] -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) parser.add_argument( - 'movie_review_filename', - help='The filename of the movie review you\'d like to analyze.') + "movie_review_filename", + help="The filename of the movie review you'd like to analyze.", + ) args = parser.parse_args() analyze(args.movie_review_filename) diff --git a/samples/snippets/sentiment/sentiment_analysis_test.py b/samples/snippets/sentiment/sentiment_analysis_test.py index 05d28ab2..845e842f 100644 --- a/samples/snippets/sentiment/sentiment_analysis_test.py +++ b/samples/snippets/sentiment/sentiment_analysis_test.py @@ -16,35 +16,35 @@ from sentiment_analysis import analyze -RESOURCES = os.path.join(os.path.dirname(__file__), 'resources') +RESOURCES = os.path.join(os.path.dirname(__file__), "resources") def test_pos(capsys): - analyze(os.path.join(RESOURCES, 'pos.txt')) + analyze(os.path.join(RESOURCES, "pos.txt")) out, err = capsys.readouterr() - score = float(re.search('score of (.+?) with', out).group(1)) - magnitude = float(re.search('magnitude of (.+?)', out).group(1)) + score = float(re.search("score of (.+?) with", out).group(1)) + magnitude = float(re.search("magnitude of (.+?)", out).group(1)) assert score * magnitude > 0 def test_neg(capsys): - analyze(os.path.join(RESOURCES, 'neg.txt')) + analyze(os.path.join(RESOURCES, "neg.txt")) out, err = capsys.readouterr() - score = float(re.search('score of (.+?) with', out).group(1)) - magnitude = float(re.search('magnitude of (.+?)', out).group(1)) + score = float(re.search("score of (.+?) with", out).group(1)) + magnitude = float(re.search("magnitude of (.+?)", out).group(1)) assert score * magnitude < 0 def test_mixed(capsys): - analyze(os.path.join(RESOURCES, 'mixed.txt')) + analyze(os.path.join(RESOURCES, "mixed.txt")) out, err = capsys.readouterr() - score = float(re.search('score of (.+?) with', out).group(1)) + score = float(re.search("score of (.+?) with", out).group(1)) assert score <= 0.3 assert score >= -0.3 def test_neutral(capsys): - analyze(os.path.join(RESOURCES, 'neutral.txt')) + analyze(os.path.join(RESOURCES, "neutral.txt")) out, err = capsys.readouterr() - magnitude = float(re.search('magnitude of (.+?)', out).group(1)) + magnitude = float(re.search("magnitude of (.+?)", out).group(1)) assert magnitude <= 2.0 diff --git a/scripts/decrypt-secrets.sh b/scripts/decrypt-secrets.sh index ff599eb2..21f6d2a2 100755 --- a/scripts/decrypt-secrets.sh +++ b/scripts/decrypt-secrets.sh @@ -20,14 +20,27 @@ ROOT=$( dirname "$DIR" ) # Work from the project root. cd $ROOT +# Prevent it from overriding files. +# We recommend that sample authors use their own service account files and cloud project. +# In that case, they are supposed to prepare these files by themselves. +if [[ -f "testing/test-env.sh" ]] || \ + [[ -f "testing/service-account.json" ]] || \ + [[ -f "testing/client-secrets.json" ]]; then + echo "One or more target files exist, aborting." + exit 1 +fi + # Use SECRET_MANAGER_PROJECT if set, fallback to cloud-devrel-kokoro-resources. PROJECT_ID="${SECRET_MANAGER_PROJECT:-cloud-devrel-kokoro-resources}" gcloud secrets versions access latest --secret="python-docs-samples-test-env" \ + --project="${PROJECT_ID}" \ > testing/test-env.sh gcloud secrets versions access latest \ --secret="python-docs-samples-service-account" \ + --project="${PROJECT_ID}" \ > testing/service-account.json gcloud secrets versions access latest \ --secret="python-docs-samples-client-secrets" \ - > testing/client-secrets.json \ No newline at end of file + --project="${PROJECT_ID}" \ + > testing/client-secrets.json diff --git a/synth.metadata b/synth.metadata index f631bc79..1da2f3fc 100644 --- a/synth.metadata +++ b/synth.metadata @@ -3,23 +3,29 @@ { "git": { "name": ".", - "remote": "https://github.com/googleapis/python-language.git", - "sha": "3db1fa4508b7891733d362e81d3e20fe8e71b996" + "remote": "git@github.com:googleapis/python-language.git", + "sha": "2084dc18f3f495ceb753e4131ca616c17b25cf86" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "fa4d2ecd0804eb92f27a65fe65ce2a554a361b93", - "internalRef": "317110673" + "sha": "b7f574bddb451d81aa222dad7dcecf3477cb97ed" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "cd522c3b4dde821766d95c80ae5aeb43d7a41170" + "sha": "da29da32b3a988457b49ae290112b74f14b713cc" + } + }, + { + "git": { + "name": "synthtool", + "remote": "https://github.com/googleapis/synthtool.git", + "sha": "da29da32b3a988457b49ae290112b74f14b713cc" } } ], diff --git a/synth.py b/synth.py index e77efc01..ee783601 100644 --- a/synth.py +++ b/synth.py @@ -16,6 +16,7 @@ import synthtool as s from synthtool import gcp +from synthtool.languages import python gapic = gcp.GAPICBazel() common = gcp.CommonTemplates() @@ -42,7 +43,8 @@ # ---------------------------------------------------------------------------- # Add templated files # ---------------------------------------------------------------------------- -templated_files = common.py_library(unit_cov_level=97, cov_level=100) +templated_files = common.py_library(unit_cov_level=97, cov_level=100, samples=True) + s.move(templated_files, excludes=['noxfile.py']) s.replace("google/cloud/**/language_service_pb2.py", @@ -62,3 +64,11 @@ s.replace("noxfile.py", """['"]sphinx['"]""", '"sphinx<3.0.0"') s.shell.run(["nox", "-s", "blacken"], hide_output=False) + +# ---------------------------------------------------------------------------- +# Samples templates +# ---------------------------------------------------------------------------- + +python.py_samples(skip_readmes=True) + +s.shell.run(["nox", "-s", "blacken"], hide_output=False) \ No newline at end of file From b408b1431194d8e1373b5d986d476add639f7e87 Mon Sep 17 00:00:00 2001 From: hkdevandla <60490673+hkdevandla@users.noreply.github.com> Date: Fri, 16 Oct 2020 14:17:33 -0700 Subject: [PATCH 208/209] feat: Migrate API to use python micro-generator (#41) * migrate API to use micro-generator * migrate API to use micro-generator * update * doc changes * add samples * add samples * add samples and readme * Update README.md * Update README.md * Update UPGRADING.md file * update synth.py Co-authored-by: arithmetic1728 --- .coveragerc | 14 +- .kokoro/samples/python3.6/common.cfg | 6 + .kokoro/samples/python3.7/common.cfg | 6 + .kokoro/samples/python3.8/common.cfg | 6 + README.rst | 10 +- UPGRADING.md | 140 ++ docs/UPGRADING.md | 1 + docs/api.rst | 18 +- docs/conf.py | 2 +- docs/gapic/v1/api.rst | 6 - docs/gapic/v1/types.rst | 5 - docs/gapic/v1beta2/api.rst | 6 - docs/gapic/v1beta2/types.rst | 5 - docs/language_v1/services.rst | 6 + docs/language_v1/types.rst | 5 + docs/language_v1beta2/services.rst | 6 + docs/language_v1beta2/types.rst | 5 + google/cloud/language/__init__.py | 78 + google/cloud/language/py.typed | 2 + google/cloud/language_v1/__init__.py | 66 +- google/cloud/language_v1/py.typed | 2 + .../cloud/language_v1/services}/__init__.py | 5 +- .../services/language_service}/__init__.py | 18 +- .../services/language_service/async_client.py | 602 ++++++ .../services/language_service/client.py | 714 +++++++ .../language_service/transports/__init__.py | 36 + .../language_service/transports/base.py | 263 +++ .../language_service/transports/grpc.py | 415 ++++ .../transports/grpc_asyncio.py | 418 ++++ google/cloud/language_v1/types/__init__.py | 67 + .../language_v1/types/language_service.py | 879 ++++++++ google/cloud/language_v1beta2/__init__.py | 66 +- google/cloud/language_v1beta2/py.typed | 2 + .../language_v1beta2/services}/__init__.py | 14 +- .../services/language_service/__init__.py} | 17 +- .../services/language_service/async_client.py | 603 ++++++ .../services/language_service/client.py | 715 +++++++ .../language_service/transports/__init__.py | 36 + .../language_service/transports/base.py | 263 +++ .../language_service/transports/grpc.py | 415 ++++ .../transports/grpc_asyncio.py | 418 ++++ .../cloud/language_v1beta2/types/__init__.py | 67 + .../types/language_service.py | 880 ++++++++ mypy.ini | 3 + noxfile.py | 93 +- samples/snippets/api/README.rst | 4 + samples/snippets/api/analyze_test.py | 3 +- samples/snippets/api/noxfile.py | 26 +- samples/snippets/classify_text/README.rst | 4 + .../classify_text/classify_text_tutorial.py | 10 +- samples/snippets/classify_text/noxfile.py | 26 +- samples/snippets/cloud-client/v1/noxfile.py | 26 +- .../snippets/cloud-client/v1/quickstart.py | 10 +- .../snippets/cloud-client/v1/set_endpoint.py | 10 +- .../v1/language_sentiment_text.py | 7 +- .../snippets/generated-samples/v1/noxfile.py | 26 +- samples/snippets/sentiment/noxfile.py | 26 +- .../snippets/sentiment/sentiment_analysis.py | 10 +- samples/v1/language_classify_gcs.py | 6 +- samples/v1/language_classify_text.py | 6 +- samples/v1/language_entities_gcs.py | 12 +- samples/v1/language_entities_text.py | 12 +- samples/v1/language_entity_sentiment_gcs.py | 12 +- samples/v1/language_entity_sentiment_text.py | 12 +- samples/v1/language_sentiment_gcs.py | 8 +- samples/v1/language_sentiment_text.py | 8 +- samples/v1/language_syntax_gcs.py | 16 +- samples/v1/language_syntax_text.py | 16 +- scripts/fixup_language_v1_keywords.py | 183 ++ scripts/fixup_language_v1beta2_keywords.py | 183 ++ setup.py | 19 +- synth.metadata | 15 +- synth.py | 27 +- tests/__init__.py | 0 .../v1/test_system_language_service_v1.py | 31 - .../test_system_language_service_v1beta2.py | 32 - tests/unit/gapic/language_v1/__init__.py | 1 + .../language_v1/test_language_service.py | 1771 ++++++++++++++++ tests/unit/gapic/language_v1beta2/__init__.py | 1 + .../language_v1beta2/test_language_service.py | 1773 +++++++++++++++++ .../v1/test_language_service_client_v1.py | 310 --- .../test_language_service_client_v1beta2.py | 310 --- 82 files changed, 11359 insertions(+), 997 deletions(-) create mode 100644 UPGRADING.md create mode 120000 docs/UPGRADING.md delete mode 100644 docs/gapic/v1/api.rst delete mode 100644 docs/gapic/v1/types.rst delete mode 100644 docs/gapic/v1beta2/api.rst delete mode 100644 docs/gapic/v1beta2/types.rst create mode 100644 docs/language_v1/services.rst create mode 100644 docs/language_v1/types.rst create mode 100644 docs/language_v1beta2/services.rst create mode 100644 docs/language_v1beta2/types.rst create mode 100644 google/cloud/language/__init__.py create mode 100644 google/cloud/language/py.typed create mode 100644 google/cloud/language_v1/py.typed rename {tests/unit => google/cloud/language_v1/services}/__init__.py (90%) rename google/cloud/{ => language_v1/services/language_service}/__init__.py (70%) create mode 100644 google/cloud/language_v1/services/language_service/async_client.py create mode 100644 google/cloud/language_v1/services/language_service/client.py create mode 100644 google/cloud/language_v1/services/language_service/transports/__init__.py create mode 100644 google/cloud/language_v1/services/language_service/transports/base.py create mode 100644 google/cloud/language_v1/services/language_service/transports/grpc.py create mode 100644 google/cloud/language_v1/services/language_service/transports/grpc_asyncio.py create mode 100644 google/cloud/language_v1/types/__init__.py create mode 100644 google/cloud/language_v1/types/language_service.py create mode 100644 google/cloud/language_v1beta2/py.typed rename google/{ => cloud/language_v1beta2/services}/__init__.py (73%) rename google/cloud/{language.py => language_v1beta2/services/language_service/__init__.py} (65%) create mode 100644 google/cloud/language_v1beta2/services/language_service/async_client.py create mode 100644 google/cloud/language_v1beta2/services/language_service/client.py create mode 100644 google/cloud/language_v1beta2/services/language_service/transports/__init__.py create mode 100644 google/cloud/language_v1beta2/services/language_service/transports/base.py create mode 100644 google/cloud/language_v1beta2/services/language_service/transports/grpc.py create mode 100644 google/cloud/language_v1beta2/services/language_service/transports/grpc_asyncio.py create mode 100644 google/cloud/language_v1beta2/types/__init__.py create mode 100644 google/cloud/language_v1beta2/types/language_service.py create mode 100644 mypy.ini create mode 100644 scripts/fixup_language_v1_keywords.py create mode 100644 scripts/fixup_language_v1beta2_keywords.py delete mode 100644 tests/__init__.py delete mode 100644 tests/system/gapic/v1/test_system_language_service_v1.py delete mode 100644 tests/system/gapic/v1beta2/test_system_language_service_v1beta2.py create mode 100644 tests/unit/gapic/language_v1/__init__.py create mode 100644 tests/unit/gapic/language_v1/test_language_service.py create mode 100644 tests/unit/gapic/language_v1beta2/__init__.py create mode 100644 tests/unit/gapic/language_v1beta2/test_language_service.py delete mode 100644 tests/unit/gapic/v1/test_language_service_client_v1.py delete mode 100644 tests/unit/gapic/v1beta2/test_language_service_client_v1beta2.py diff --git a/.coveragerc b/.coveragerc index 0d8e6297..fff276ec 100644 --- a/.coveragerc +++ b/.coveragerc @@ -23,16 +23,14 @@ omit = [report] fail_under = 100 show_missing = True +omit = google/cloud/language/__init__.py exclude_lines = # Re-enable the standard pragma pragma: NO COVER # Ignore debug-only repr def __repr__ - # Ignore abstract methods - raise NotImplementedError -omit = - */gapic/*.py - */proto/*.py - */core/*.py - */site-packages/*.py - google/cloud/__init__.py + # Ignore pkg_resources exceptions. + # This is added at the module level as a safeguard for if someone + # generates the code and tries to run it without pip installing. This + # makes it virtually impossible to test properly. + except pkg_resources.DistributionNotFound diff --git a/.kokoro/samples/python3.6/common.cfg b/.kokoro/samples/python3.6/common.cfg index e70b6034..aa92ff73 100644 --- a/.kokoro/samples/python3.6/common.cfg +++ b/.kokoro/samples/python3.6/common.cfg @@ -13,6 +13,12 @@ env_vars: { value: "py-3.6" } +# Declare build specific Cloud project. +env_vars: { + key: "BUILD_SPECIFIC_GCLOUD_PROJECT" + value: "python-docs-samples-tests-py36" +} + env_vars: { key: "TRAMPOLINE_BUILD_FILE" value: "github/python-language/.kokoro/test-samples.sh" diff --git a/.kokoro/samples/python3.7/common.cfg b/.kokoro/samples/python3.7/common.cfg index 4d745031..aa64611c 100644 --- a/.kokoro/samples/python3.7/common.cfg +++ b/.kokoro/samples/python3.7/common.cfg @@ -13,6 +13,12 @@ env_vars: { value: "py-3.7" } +# Declare build specific Cloud project. +env_vars: { + key: "BUILD_SPECIFIC_GCLOUD_PROJECT" + value: "python-docs-samples-tests-py37" +} + env_vars: { key: "TRAMPOLINE_BUILD_FILE" value: "github/python-language/.kokoro/test-samples.sh" diff --git a/.kokoro/samples/python3.8/common.cfg b/.kokoro/samples/python3.8/common.cfg index bf242e12..8fd22150 100644 --- a/.kokoro/samples/python3.8/common.cfg +++ b/.kokoro/samples/python3.8/common.cfg @@ -13,6 +13,12 @@ env_vars: { value: "py-3.8" } +# Declare build specific Cloud project. +env_vars: { + key: "BUILD_SPECIFIC_GCLOUD_PROJECT" + value: "python-docs-samples-tests-py38" +} + env_vars: { key: "TRAMPOLINE_BUILD_FILE" value: "github/python-language/.kokoro/test-samples.sh" diff --git a/README.rst b/README.rst index 48558dd7..96dd41fc 100644 --- a/README.rst +++ b/README.rst @@ -62,11 +62,13 @@ dependencies. Supported Python Versions ^^^^^^^^^^^^^^^^^^^^^^^^^ -Python >= 3.5 +Python >= 3.6 -Deprecated Python Versions -^^^^^^^^^^^^^^^^^^^^^^^^^^ -Python == 2.7. Python 2.7 support will be removed on January 1, 2020. +Unsupported Python Versions +^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Python == 2.7. + +The last version of this library compatible with Python 2.7 is google-cloud-language=1.3.0 Mac/Linux diff --git a/UPGRADING.md b/UPGRADING.md new file mode 100644 index 00000000..61fdb3f6 --- /dev/null +++ b/UPGRADING.md @@ -0,0 +1,140 @@ +# 2.0.0 Migration Guide + +The 2.0 release of the `google-cloud-language` client is a significant upgrade based on a [next-gen code generator](https://github.com/googleapis/gapic-generator-python), and includes substantial interface changes. Existing code written for earlier versions of this library will likely require updates to use this version. This document describes the changes that have been made, and what you need to do to update your usage. + +If you experience issues or have questions, please file an [issue](https://github.com/googleapis/python-language/issues). + +## Supported Python Versions + +> **WARNING**: Breaking change +The 2.0.0 release requires Python 3.6+. + +## Method Calls + +> **WARNING**: Breaking change +Methods expect request objects. We provide a script that will convert most common use cases. +* Install the library + +```py +python3 -m pip install google-cloud-language +``` + +* The script `fixup_language_v1_keywords.py` is shipped with the library. It expects +an input directory (with the code to convert) and an empty destination directory. + +```sh +$ fixup_language_v1_keywords.py --input-directory .samples/ --output-directory samples/ +``` + +**Before:** +```py +from google.cloud import language_v1 +language = language_v1.LanguageClient() +return language.analyze_sentiment(document=document).document_sentiment +``` + + +**After:** +```py +from google.cloud import language_v1 +language = language_v1.LanguageServiceClient() +return language.analyze_sentiment(request={'document': document}).document_sentiment +``` + +### More Details + +In `google-cloud-language<2.0.0`, parameters required by the API were positional parameters and optional parameters were keyword parameters. + +**Before:** +```py + def analyze_sentiment( + self, + document, + encoding_type=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): +``` + +In the 2.0.0 release, all methods have a single positional parameter `request`. Method docstrings indicate whether a parameter is required or optional. + +Some methods have additional keyword only parameters. The available parameters depend on the `google.api.method_signature` annotation specified by the API producer. + + +**After:** +```py + def analyze_sentiment( + self, + request: language_service.AnalyzeSentimentRequest = None, + *, + document: language_service.Document = None, + encoding_type: language_service.EncodingType = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.AnalyzeSentimentResponse: +``` + +> **NOTE:** The `request` parameter and flattened keyword parameters for the API are mutually exclusive. +> Passing both will result in an error. +Both of these calls are valid: + +```py +response = client.analyze_sentiment( + request={ + "document": document, + "encoding_type": encoding_type + } +) +``` + +```py +response = client.analyze_sentiment( + document=document, + encoding_type=encoding_type + ) # Make an API request. +``` + +This call is invalid because it mixes `request` with a keyword argument `entry_group`. Executing this code +will result in an error. + +```py +response = client.analyze_sentiment( + request={ + "document": document + }, + encoding_type=encoding_type +) +``` + + + +## Enums and Types + + +> **WARNING**: Breaking change +The submodules `enums` and `types` have been removed. +**Before:** +```py +from google.cloud import language_v1 +document = language_v1.types.Document(content=text, type=language_v1.enums.Document.Type.PLAIN_TEXT) +encoding_type = language_v1.enums.EncodingType.UTF8 +``` + + +**After:** +```py +from google.cloud import language_v1 +document = language_v1.Document(content=text, type_=language_v1.Document.Type.PLAIN_TEXT) +encoding_type = language_v1.EncodingType.UTF8 +``` + +## Project Path Helper Methods + +The project path helper method `project_path` has been removed. Please construct +this path manually. + +```py +project = 'my-project' +project_path = f'projects/{project}' \ No newline at end of file diff --git a/docs/UPGRADING.md b/docs/UPGRADING.md new file mode 120000 index 00000000..01097c8c --- /dev/null +++ b/docs/UPGRADING.md @@ -0,0 +1 @@ +../UPGRADING.md \ No newline at end of file diff --git a/docs/api.rst b/docs/api.rst index 4d714615..8720e9fa 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -7,8 +7,8 @@ API. By default, you will get ``v1``, the latest GA version. .. toctree:: :maxdepth: 2 - gapic/v1/api - gapic/v1/types + language_v1/services + language_v1/types If you are interested in beta features ahead of the latest GA, you may opt-in to the v1.1 beta, which is spelled ``v1beta2``. In order to do this, @@ -20,8 +20,18 @@ An API and type reference is provided for the v1.1 beta also: .. toctree:: :maxdepth: 2 - gapic/v1beta2/api - gapic/v1beta2/types + language_v1beta2/services + language_v1beta2/types + +Migration Guide +--------------- + +See the guide below for instructions on migrating to the 2.x release of this library. + +.. toctree:: + :maxdepth: 2 + + UPGRADING .. note:: diff --git a/docs/conf.py b/docs/conf.py index d23820ed..33d16cf7 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -347,7 +347,7 @@ intersphinx_mapping = { "python": ("http://python.readthedocs.org/en/latest/", None), "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), - "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,), "grpc": ("https://grpc.io/grpc/python/", None), } diff --git a/docs/gapic/v1/api.rst b/docs/gapic/v1/api.rst deleted file mode 100644 index 2c5fd4fd..00000000 --- a/docs/gapic/v1/api.rst +++ /dev/null @@ -1,6 +0,0 @@ -Natural Language Client API -=========================== - -.. automodule:: google.cloud.language_v1 - :members: - :inherited-members: diff --git a/docs/gapic/v1/types.rst b/docs/gapic/v1/types.rst deleted file mode 100644 index 90d27a4b..00000000 --- a/docs/gapic/v1/types.rst +++ /dev/null @@ -1,5 +0,0 @@ -Natural Language Client Types -============================= - -.. automodule:: google.cloud.language_v1.types - :members: diff --git a/docs/gapic/v1beta2/api.rst b/docs/gapic/v1beta2/api.rst deleted file mode 100644 index 330d7e6e..00000000 --- a/docs/gapic/v1beta2/api.rst +++ /dev/null @@ -1,6 +0,0 @@ -Natural Language Beta Client API -================================ - -.. automodule:: google.cloud.language_v1beta2 - :members: - :inherited-members: diff --git a/docs/gapic/v1beta2/types.rst b/docs/gapic/v1beta2/types.rst deleted file mode 100644 index d9a7eb17..00000000 --- a/docs/gapic/v1beta2/types.rst +++ /dev/null @@ -1,5 +0,0 @@ -Natural Language Beta Client Types -================================== - -.. automodule:: google.cloud.language_v1beta2.types - :members: diff --git a/docs/language_v1/services.rst b/docs/language_v1/services.rst new file mode 100644 index 00000000..e1af1f07 --- /dev/null +++ b/docs/language_v1/services.rst @@ -0,0 +1,6 @@ +Services for Google Cloud Language v1 API +========================================= + +.. automodule:: google.cloud.language_v1.services.language_service + :members: + :inherited-members: diff --git a/docs/language_v1/types.rst b/docs/language_v1/types.rst new file mode 100644 index 00000000..befde156 --- /dev/null +++ b/docs/language_v1/types.rst @@ -0,0 +1,5 @@ +Types for Google Cloud Language v1 API +====================================== + +.. automodule:: google.cloud.language_v1.types + :members: diff --git a/docs/language_v1beta2/services.rst b/docs/language_v1beta2/services.rst new file mode 100644 index 00000000..275e2e7c --- /dev/null +++ b/docs/language_v1beta2/services.rst @@ -0,0 +1,6 @@ +Services for Google Cloud Language v1beta2 API +============================================== + +.. automodule:: google.cloud.language_v1beta2.services.language_service + :members: + :inherited-members: diff --git a/docs/language_v1beta2/types.rst b/docs/language_v1beta2/types.rst new file mode 100644 index 00000000..5a1c2284 --- /dev/null +++ b/docs/language_v1beta2/types.rst @@ -0,0 +1,5 @@ +Types for Google Cloud Language v1beta2 API +=========================================== + +.. automodule:: google.cloud.language_v1beta2.types + :members: diff --git a/google/cloud/language/__init__.py b/google/cloud/language/__init__.py new file mode 100644 index 00000000..4426b53c --- /dev/null +++ b/google/cloud/language/__init__.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.cloud.language_v1.services.language_service.async_client import ( + LanguageServiceAsyncClient, +) +from google.cloud.language_v1.services.language_service.client import ( + LanguageServiceClient, +) +from google.cloud.language_v1.types.language_service import AnalyzeEntitiesRequest +from google.cloud.language_v1.types.language_service import AnalyzeEntitiesResponse +from google.cloud.language_v1.types.language_service import ( + AnalyzeEntitySentimentRequest, +) +from google.cloud.language_v1.types.language_service import ( + AnalyzeEntitySentimentResponse, +) +from google.cloud.language_v1.types.language_service import AnalyzeSentimentRequest +from google.cloud.language_v1.types.language_service import AnalyzeSentimentResponse +from google.cloud.language_v1.types.language_service import AnalyzeSyntaxRequest +from google.cloud.language_v1.types.language_service import AnalyzeSyntaxResponse +from google.cloud.language_v1.types.language_service import AnnotateTextRequest +from google.cloud.language_v1.types.language_service import AnnotateTextResponse +from google.cloud.language_v1.types.language_service import ClassificationCategory +from google.cloud.language_v1.types.language_service import ClassifyTextRequest +from google.cloud.language_v1.types.language_service import ClassifyTextResponse +from google.cloud.language_v1.types.language_service import DependencyEdge +from google.cloud.language_v1.types.language_service import Document +from google.cloud.language_v1.types.language_service import EncodingType +from google.cloud.language_v1.types.language_service import Entity +from google.cloud.language_v1.types.language_service import EntityMention +from google.cloud.language_v1.types.language_service import PartOfSpeech +from google.cloud.language_v1.types.language_service import Sentence +from google.cloud.language_v1.types.language_service import Sentiment +from google.cloud.language_v1.types.language_service import TextSpan +from google.cloud.language_v1.types.language_service import Token + +__all__ = ( + "AnalyzeEntitiesRequest", + "AnalyzeEntitiesResponse", + "AnalyzeEntitySentimentRequest", + "AnalyzeEntitySentimentResponse", + "AnalyzeSentimentRequest", + "AnalyzeSentimentResponse", + "AnalyzeSyntaxRequest", + "AnalyzeSyntaxResponse", + "AnnotateTextRequest", + "AnnotateTextResponse", + "ClassificationCategory", + "ClassifyTextRequest", + "ClassifyTextResponse", + "DependencyEdge", + "Document", + "EncodingType", + "Entity", + "EntityMention", + "LanguageServiceAsyncClient", + "LanguageServiceClient", + "PartOfSpeech", + "Sentence", + "Sentiment", + "TextSpan", + "Token", +) diff --git a/google/cloud/language/py.typed b/google/cloud/language/py.typed new file mode 100644 index 00000000..c0acc99a --- /dev/null +++ b/google/cloud/language/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-language package uses inline types. diff --git a/google/cloud/language_v1/__init__.py b/google/cloud/language_v1/__init__.py index a44fe4c9..ba3826be 100644 --- a/google/cloud/language_v1/__init__.py +++ b/google/cloud/language_v1/__init__.py @@ -1,4 +1,6 @@ -# Copyright 2017, Google LLC All rights reserved. +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,17 +13,57 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# -from __future__ import absolute_import - -from google.cloud.language_v1 import types -from google.cloud.language_v1.gapic import enums -from google.cloud.language_v1.gapic import language_service_client - - -class LanguageServiceClient(language_service_client.LanguageServiceClient): - __doc__ = language_service_client.LanguageServiceClient.__doc__ - enums = enums +from .services.language_service import LanguageServiceClient +from .types.language_service import AnalyzeEntitiesRequest +from .types.language_service import AnalyzeEntitiesResponse +from .types.language_service import AnalyzeEntitySentimentRequest +from .types.language_service import AnalyzeEntitySentimentResponse +from .types.language_service import AnalyzeSentimentRequest +from .types.language_service import AnalyzeSentimentResponse +from .types.language_service import AnalyzeSyntaxRequest +from .types.language_service import AnalyzeSyntaxResponse +from .types.language_service import AnnotateTextRequest +from .types.language_service import AnnotateTextResponse +from .types.language_service import ClassificationCategory +from .types.language_service import ClassifyTextRequest +from .types.language_service import ClassifyTextResponse +from .types.language_service import DependencyEdge +from .types.language_service import Document +from .types.language_service import EncodingType +from .types.language_service import Entity +from .types.language_service import EntityMention +from .types.language_service import PartOfSpeech +from .types.language_service import Sentence +from .types.language_service import Sentiment +from .types.language_service import TextSpan +from .types.language_service import Token -__all__ = ("enums", "types", "LanguageServiceClient") +__all__ = ( + "AnalyzeEntitiesRequest", + "AnalyzeEntitiesResponse", + "AnalyzeEntitySentimentRequest", + "AnalyzeEntitySentimentResponse", + "AnalyzeSentimentRequest", + "AnalyzeSentimentResponse", + "AnalyzeSyntaxRequest", + "AnalyzeSyntaxResponse", + "AnnotateTextRequest", + "AnnotateTextResponse", + "ClassificationCategory", + "ClassifyTextRequest", + "ClassifyTextResponse", + "DependencyEdge", + "Document", + "EncodingType", + "Entity", + "EntityMention", + "PartOfSpeech", + "Sentence", + "Sentiment", + "TextSpan", + "Token", + "LanguageServiceClient", +) diff --git a/google/cloud/language_v1/py.typed b/google/cloud/language_v1/py.typed new file mode 100644 index 00000000..c0acc99a --- /dev/null +++ b/google/cloud/language_v1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-language package uses inline types. diff --git a/tests/unit/__init__.py b/google/cloud/language_v1/services/__init__.py similarity index 90% rename from tests/unit/__init__.py rename to google/cloud/language_v1/services/__init__.py index df379f1e..42ffdf2b 100644 --- a/tests/unit/__init__.py +++ b/google/cloud/language_v1/services/__init__.py @@ -1,4 +1,6 @@ -# Copyright 2016 Google LLC +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,3 +13,4 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# diff --git a/google/cloud/__init__.py b/google/cloud/language_v1/services/language_service/__init__.py similarity index 70% rename from google/cloud/__init__.py rename to google/cloud/language_v1/services/language_service/__init__.py index 0e1bc513..d2aff222 100644 --- a/google/cloud/__init__.py +++ b/google/cloud/language_v1/services/language_service/__init__.py @@ -1,4 +1,6 @@ -# Copyright 2016 Google LLC +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,12 +13,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# -try: - import pkg_resources - - pkg_resources.declare_namespace(__name__) -except ImportError: - import pkgutil +from .client import LanguageServiceClient +from .async_client import LanguageServiceAsyncClient - __path__ = pkgutil.extend_path(__path__, __name__) +__all__ = ( + "LanguageServiceClient", + "LanguageServiceAsyncClient", +) diff --git a/google/cloud/language_v1/services/language_service/async_client.py b/google/cloud/language_v1/services/language_service/async_client.py new file mode 100644 index 00000000..f7a214e9 --- /dev/null +++ b/google/cloud/language_v1/services/language_service/async_client.py @@ -0,0 +1,602 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.language_v1.types import language_service + +from .transports.base import LanguageServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import LanguageServiceGrpcAsyncIOTransport +from .client import LanguageServiceClient + + +class LanguageServiceAsyncClient: + """Provides text analysis operations such as sentiment analysis + and entity recognition. + """ + + _client: LanguageServiceClient + + DEFAULT_ENDPOINT = LanguageServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = LanguageServiceClient.DEFAULT_MTLS_ENDPOINT + + from_service_account_file = LanguageServiceClient.from_service_account_file + from_service_account_json = from_service_account_file + + get_transport_class = functools.partial( + type(LanguageServiceClient).get_transport_class, type(LanguageServiceClient) + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, LanguageServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the language service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.LanguageServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = LanguageServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def analyze_sentiment( + self, + request: language_service.AnalyzeSentimentRequest = None, + *, + document: language_service.Document = None, + encoding_type: language_service.EncodingType = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.AnalyzeSentimentResponse: + r"""Analyzes the sentiment of the provided text. + + Args: + request (:class:`~.language_service.AnalyzeSentimentRequest`): + The request object. The sentiment analysis request + message. + document (:class:`~.language_service.Document`): + Input document. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + encoding_type (:class:`~.language_service.EncodingType`): + The encoding type used by the API to + calculate sentence offsets. + This corresponds to the ``encoding_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.language_service.AnalyzeSentimentResponse: + The sentiment analysis response + message. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([document, encoding_type]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = language_service.AnalyzeSentimentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if document is not None: + request.document = document + if encoding_type is not None: + request.encoding_type = encoding_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.analyze_sentiment, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def analyze_entities( + self, + request: language_service.AnalyzeEntitiesRequest = None, + *, + document: language_service.Document = None, + encoding_type: language_service.EncodingType = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.AnalyzeEntitiesResponse: + r"""Finds named entities (currently proper names and + common nouns) in the text along with entity types, + salience, mentions for each entity, and other + properties. + + Args: + request (:class:`~.language_service.AnalyzeEntitiesRequest`): + The request object. The entity analysis request message. + document (:class:`~.language_service.Document`): + Input document. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + encoding_type (:class:`~.language_service.EncodingType`): + The encoding type used by the API to + calculate offsets. + This corresponds to the ``encoding_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.language_service.AnalyzeEntitiesResponse: + The entity analysis response message. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([document, encoding_type]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = language_service.AnalyzeEntitiesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if document is not None: + request.document = document + if encoding_type is not None: + request.encoding_type = encoding_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.analyze_entities, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def analyze_entity_sentiment( + self, + request: language_service.AnalyzeEntitySentimentRequest = None, + *, + document: language_service.Document = None, + encoding_type: language_service.EncodingType = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.AnalyzeEntitySentimentResponse: + r"""Finds entities, similar to + [AnalyzeEntities][google.cloud.language.v1.LanguageService.AnalyzeEntities] + in the text and analyzes sentiment associated with each entity + and its mentions. + + Args: + request (:class:`~.language_service.AnalyzeEntitySentimentRequest`): + The request object. The entity-level sentiment analysis + request message. + document (:class:`~.language_service.Document`): + Input document. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + encoding_type (:class:`~.language_service.EncodingType`): + The encoding type used by the API to + calculate offsets. + This corresponds to the ``encoding_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.language_service.AnalyzeEntitySentimentResponse: + The entity-level sentiment analysis + response message. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([document, encoding_type]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = language_service.AnalyzeEntitySentimentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if document is not None: + request.document = document + if encoding_type is not None: + request.encoding_type = encoding_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.analyze_entity_sentiment, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def analyze_syntax( + self, + request: language_service.AnalyzeSyntaxRequest = None, + *, + document: language_service.Document = None, + encoding_type: language_service.EncodingType = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.AnalyzeSyntaxResponse: + r"""Analyzes the syntax of the text and provides sentence + boundaries and tokenization along with part of speech + tags, dependency trees, and other properties. + + Args: + request (:class:`~.language_service.AnalyzeSyntaxRequest`): + The request object. The syntax analysis request message. + document (:class:`~.language_service.Document`): + Input document. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + encoding_type (:class:`~.language_service.EncodingType`): + The encoding type used by the API to + calculate offsets. + This corresponds to the ``encoding_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.language_service.AnalyzeSyntaxResponse: + The syntax analysis response message. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([document, encoding_type]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = language_service.AnalyzeSyntaxRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if document is not None: + request.document = document + if encoding_type is not None: + request.encoding_type = encoding_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.analyze_syntax, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def classify_text( + self, + request: language_service.ClassifyTextRequest = None, + *, + document: language_service.Document = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.ClassifyTextResponse: + r"""Classifies a document into categories. + + Args: + request (:class:`~.language_service.ClassifyTextRequest`): + The request object. The document classification request + message. + document (:class:`~.language_service.Document`): + Input document. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.language_service.ClassifyTextResponse: + The document classification response + message. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([document]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = language_service.ClassifyTextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if document is not None: + request.document = document + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.classify_text, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def annotate_text( + self, + request: language_service.AnnotateTextRequest = None, + *, + document: language_service.Document = None, + features: language_service.AnnotateTextRequest.Features = None, + encoding_type: language_service.EncodingType = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.AnnotateTextResponse: + r"""A convenience method that provides all the features + that analyzeSentiment, analyzeEntities, and + analyzeSyntax provide in one call. + + Args: + request (:class:`~.language_service.AnnotateTextRequest`): + The request object. The request message for the text + annotation API, which can perform multiple analysis + types (sentiment, entities, and syntax) in one call. + document (:class:`~.language_service.Document`): + Input document. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + features (:class:`~.language_service.AnnotateTextRequest.Features`): + The enabled features. + This corresponds to the ``features`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + encoding_type (:class:`~.language_service.EncodingType`): + The encoding type used by the API to + calculate offsets. + This corresponds to the ``encoding_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.language_service.AnnotateTextResponse: + The text annotations response + message. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([document, features, encoding_type]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = language_service.AnnotateTextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if document is not None: + request.document = document + if features is not None: + request.features = features + if encoding_type is not None: + request.encoding_type = encoding_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.annotate_text, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-language",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("LanguageServiceAsyncClient",) diff --git a/google/cloud/language_v1/services/language_service/client.py b/google/cloud/language_v1/services/language_service/client.py new file mode 100644 index 00000000..1084acd3 --- /dev/null +++ b/google/cloud/language_v1/services/language_service/client.py @@ -0,0 +1,714 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.language_v1.types import language_service + +from .transports.base import LanguageServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import LanguageServiceGrpcTransport +from .transports.grpc_asyncio import LanguageServiceGrpcAsyncIOTransport + + +class LanguageServiceClientMeta(type): + """Metaclass for the LanguageService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[LanguageServiceTransport]] + _transport_registry["grpc"] = LanguageServiceGrpcTransport + _transport_registry["grpc_asyncio"] = LanguageServiceGrpcAsyncIOTransport + + def get_transport_class(cls, label: str = None,) -> Type[LanguageServiceTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class LanguageServiceClient(metaclass=LanguageServiceClientMeta): + """Provides text analysis operations such as sentiment analysis + and entity recognition. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "language.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + {@api.name}: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + def __init__( + self, + *, + credentials: Optional[credentials.Credentials] = None, + transport: Union[str, LanguageServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the language service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.LanguageServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (client_options_lib.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + ssl_credentials = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + import grpc # type: ignore + + cert, key = client_options.client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + is_mtls = True + else: + creds = SslCredentials() + is_mtls = creds.is_mtls + ssl_credentials = creds.ssl_credentials if is_mtls else None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, LanguageServiceTransport): + # transport is a LanguageServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + ssl_channel_credentials=ssl_credentials, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def analyze_sentiment( + self, + request: language_service.AnalyzeSentimentRequest = None, + *, + document: language_service.Document = None, + encoding_type: language_service.EncodingType = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.AnalyzeSentimentResponse: + r"""Analyzes the sentiment of the provided text. + + Args: + request (:class:`~.language_service.AnalyzeSentimentRequest`): + The request object. The sentiment analysis request + message. + document (:class:`~.language_service.Document`): + Input document. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + encoding_type (:class:`~.language_service.EncodingType`): + The encoding type used by the API to + calculate sentence offsets. + This corresponds to the ``encoding_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.language_service.AnalyzeSentimentResponse: + The sentiment analysis response + message. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([document, encoding_type]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a language_service.AnalyzeSentimentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, language_service.AnalyzeSentimentRequest): + request = language_service.AnalyzeSentimentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if document is not None: + request.document = document + if encoding_type is not None: + request.encoding_type = encoding_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.analyze_sentiment] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def analyze_entities( + self, + request: language_service.AnalyzeEntitiesRequest = None, + *, + document: language_service.Document = None, + encoding_type: language_service.EncodingType = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.AnalyzeEntitiesResponse: + r"""Finds named entities (currently proper names and + common nouns) in the text along with entity types, + salience, mentions for each entity, and other + properties. + + Args: + request (:class:`~.language_service.AnalyzeEntitiesRequest`): + The request object. The entity analysis request message. + document (:class:`~.language_service.Document`): + Input document. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + encoding_type (:class:`~.language_service.EncodingType`): + The encoding type used by the API to + calculate offsets. + This corresponds to the ``encoding_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.language_service.AnalyzeEntitiesResponse: + The entity analysis response message. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([document, encoding_type]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a language_service.AnalyzeEntitiesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, language_service.AnalyzeEntitiesRequest): + request = language_service.AnalyzeEntitiesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if document is not None: + request.document = document + if encoding_type is not None: + request.encoding_type = encoding_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.analyze_entities] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def analyze_entity_sentiment( + self, + request: language_service.AnalyzeEntitySentimentRequest = None, + *, + document: language_service.Document = None, + encoding_type: language_service.EncodingType = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.AnalyzeEntitySentimentResponse: + r"""Finds entities, similar to + [AnalyzeEntities][google.cloud.language.v1.LanguageService.AnalyzeEntities] + in the text and analyzes sentiment associated with each entity + and its mentions. + + Args: + request (:class:`~.language_service.AnalyzeEntitySentimentRequest`): + The request object. The entity-level sentiment analysis + request message. + document (:class:`~.language_service.Document`): + Input document. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + encoding_type (:class:`~.language_service.EncodingType`): + The encoding type used by the API to + calculate offsets. + This corresponds to the ``encoding_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.language_service.AnalyzeEntitySentimentResponse: + The entity-level sentiment analysis + response message. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([document, encoding_type]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a language_service.AnalyzeEntitySentimentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, language_service.AnalyzeEntitySentimentRequest): + request = language_service.AnalyzeEntitySentimentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if document is not None: + request.document = document + if encoding_type is not None: + request.encoding_type = encoding_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.analyze_entity_sentiment] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def analyze_syntax( + self, + request: language_service.AnalyzeSyntaxRequest = None, + *, + document: language_service.Document = None, + encoding_type: language_service.EncodingType = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.AnalyzeSyntaxResponse: + r"""Analyzes the syntax of the text and provides sentence + boundaries and tokenization along with part of speech + tags, dependency trees, and other properties. + + Args: + request (:class:`~.language_service.AnalyzeSyntaxRequest`): + The request object. The syntax analysis request message. + document (:class:`~.language_service.Document`): + Input document. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + encoding_type (:class:`~.language_service.EncodingType`): + The encoding type used by the API to + calculate offsets. + This corresponds to the ``encoding_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.language_service.AnalyzeSyntaxResponse: + The syntax analysis response message. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([document, encoding_type]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a language_service.AnalyzeSyntaxRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, language_service.AnalyzeSyntaxRequest): + request = language_service.AnalyzeSyntaxRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if document is not None: + request.document = document + if encoding_type is not None: + request.encoding_type = encoding_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.analyze_syntax] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def classify_text( + self, + request: language_service.ClassifyTextRequest = None, + *, + document: language_service.Document = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.ClassifyTextResponse: + r"""Classifies a document into categories. + + Args: + request (:class:`~.language_service.ClassifyTextRequest`): + The request object. The document classification request + message. + document (:class:`~.language_service.Document`): + Input document. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.language_service.ClassifyTextResponse: + The document classification response + message. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([document]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a language_service.ClassifyTextRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, language_service.ClassifyTextRequest): + request = language_service.ClassifyTextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if document is not None: + request.document = document + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.classify_text] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def annotate_text( + self, + request: language_service.AnnotateTextRequest = None, + *, + document: language_service.Document = None, + features: language_service.AnnotateTextRequest.Features = None, + encoding_type: language_service.EncodingType = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.AnnotateTextResponse: + r"""A convenience method that provides all the features + that analyzeSentiment, analyzeEntities, and + analyzeSyntax provide in one call. + + Args: + request (:class:`~.language_service.AnnotateTextRequest`): + The request object. The request message for the text + annotation API, which can perform multiple analysis + types (sentiment, entities, and syntax) in one call. + document (:class:`~.language_service.Document`): + Input document. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + features (:class:`~.language_service.AnnotateTextRequest.Features`): + The enabled features. + This corresponds to the ``features`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + encoding_type (:class:`~.language_service.EncodingType`): + The encoding type used by the API to + calculate offsets. + This corresponds to the ``encoding_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.language_service.AnnotateTextResponse: + The text annotations response + message. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([document, features, encoding_type]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a language_service.AnnotateTextRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, language_service.AnnotateTextRequest): + request = language_service.AnnotateTextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if document is not None: + request.document = document + if features is not None: + request.features = features + if encoding_type is not None: + request.encoding_type = encoding_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.annotate_text] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-language",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("LanguageServiceClient",) diff --git a/google/cloud/language_v1/services/language_service/transports/__init__.py b/google/cloud/language_v1/services/language_service/transports/__init__.py new file mode 100644 index 00000000..22069335 --- /dev/null +++ b/google/cloud/language_v1/services/language_service/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import LanguageServiceTransport +from .grpc import LanguageServiceGrpcTransport +from .grpc_asyncio import LanguageServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[LanguageServiceTransport]] +_transport_registry["grpc"] = LanguageServiceGrpcTransport +_transport_registry["grpc_asyncio"] = LanguageServiceGrpcAsyncIOTransport + + +__all__ = ( + "LanguageServiceTransport", + "LanguageServiceGrpcTransport", + "LanguageServiceGrpcAsyncIOTransport", +) diff --git a/google/cloud/language_v1/services/language_service/transports/base.py b/google/cloud/language_v1/services/language_service/transports/base.py new file mode 100644 index 00000000..79ed44e8 --- /dev/null +++ b/google/cloud/language_v1/services/language_service/transports/base.py @@ -0,0 +1,263 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.language_v1.types import language_service + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-language",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class LanguageServiceTransport(abc.ABC): + """Abstract transport class for LanguageService.""" + + AUTH_SCOPES = ( + "https://www.googleapis.com/auth/cloud-language", + "https://www.googleapis.com/auth/cloud-platform", + ) + + def __init__( + self, + *, + host: str = "language.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages(client_info) + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.analyze_sentiment: gapic_v1.method.wrap_method( + self.analyze_sentiment, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=client_info, + ), + self.analyze_entities: gapic_v1.method.wrap_method( + self.analyze_entities, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=client_info, + ), + self.analyze_entity_sentiment: gapic_v1.method.wrap_method( + self.analyze_entity_sentiment, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=client_info, + ), + self.analyze_syntax: gapic_v1.method.wrap_method( + self.analyze_syntax, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=client_info, + ), + self.classify_text: gapic_v1.method.wrap_method( + self.classify_text, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=client_info, + ), + self.annotate_text: gapic_v1.method.wrap_method( + self.annotate_text, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=client_info, + ), + } + + @property + def analyze_sentiment( + self, + ) -> typing.Callable[ + [language_service.AnalyzeSentimentRequest], + typing.Union[ + language_service.AnalyzeSentimentResponse, + typing.Awaitable[language_service.AnalyzeSentimentResponse], + ], + ]: + raise NotImplementedError() + + @property + def analyze_entities( + self, + ) -> typing.Callable[ + [language_service.AnalyzeEntitiesRequest], + typing.Union[ + language_service.AnalyzeEntitiesResponse, + typing.Awaitable[language_service.AnalyzeEntitiesResponse], + ], + ]: + raise NotImplementedError() + + @property + def analyze_entity_sentiment( + self, + ) -> typing.Callable[ + [language_service.AnalyzeEntitySentimentRequest], + typing.Union[ + language_service.AnalyzeEntitySentimentResponse, + typing.Awaitable[language_service.AnalyzeEntitySentimentResponse], + ], + ]: + raise NotImplementedError() + + @property + def analyze_syntax( + self, + ) -> typing.Callable[ + [language_service.AnalyzeSyntaxRequest], + typing.Union[ + language_service.AnalyzeSyntaxResponse, + typing.Awaitable[language_service.AnalyzeSyntaxResponse], + ], + ]: + raise NotImplementedError() + + @property + def classify_text( + self, + ) -> typing.Callable[ + [language_service.ClassifyTextRequest], + typing.Union[ + language_service.ClassifyTextResponse, + typing.Awaitable[language_service.ClassifyTextResponse], + ], + ]: + raise NotImplementedError() + + @property + def annotate_text( + self, + ) -> typing.Callable[ + [language_service.AnnotateTextRequest], + typing.Union[ + language_service.AnnotateTextResponse, + typing.Awaitable[language_service.AnnotateTextResponse], + ], + ]: + raise NotImplementedError() + + +__all__ = ("LanguageServiceTransport",) diff --git a/google/cloud/language_v1/services/language_service/transports/grpc.py b/google/cloud/language_v1/services/language_service/transports/grpc.py new file mode 100644 index 00000000..73608a10 --- /dev/null +++ b/google/cloud/language_v1/services/language_service/transports/grpc.py @@ -0,0 +1,415 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.language_v1.types import language_service + +from .base import LanguageServiceTransport, DEFAULT_CLIENT_INFO + + +class LanguageServiceGrpcTransport(LanguageServiceTransport): + """gRPC backend transport for LanguageService. + + Provides text analysis operations such as sentiment analysis + and entity recognition. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "language.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} # type: Dict[str, Callable] + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + @classmethod + def create_channel( + cls, + host: str = "language.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optionsl[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def analyze_sentiment( + self, + ) -> Callable[ + [language_service.AnalyzeSentimentRequest], + language_service.AnalyzeSentimentResponse, + ]: + r"""Return a callable for the analyze sentiment method over gRPC. + + Analyzes the sentiment of the provided text. + + Returns: + Callable[[~.AnalyzeSentimentRequest], + ~.AnalyzeSentimentResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "analyze_sentiment" not in self._stubs: + self._stubs["analyze_sentiment"] = self.grpc_channel.unary_unary( + "/google.cloud.language.v1.LanguageService/AnalyzeSentiment", + request_serializer=language_service.AnalyzeSentimentRequest.serialize, + response_deserializer=language_service.AnalyzeSentimentResponse.deserialize, + ) + return self._stubs["analyze_sentiment"] + + @property + def analyze_entities( + self, + ) -> Callable[ + [language_service.AnalyzeEntitiesRequest], + language_service.AnalyzeEntitiesResponse, + ]: + r"""Return a callable for the analyze entities method over gRPC. + + Finds named entities (currently proper names and + common nouns) in the text along with entity types, + salience, mentions for each entity, and other + properties. + + Returns: + Callable[[~.AnalyzeEntitiesRequest], + ~.AnalyzeEntitiesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "analyze_entities" not in self._stubs: + self._stubs["analyze_entities"] = self.grpc_channel.unary_unary( + "/google.cloud.language.v1.LanguageService/AnalyzeEntities", + request_serializer=language_service.AnalyzeEntitiesRequest.serialize, + response_deserializer=language_service.AnalyzeEntitiesResponse.deserialize, + ) + return self._stubs["analyze_entities"] + + @property + def analyze_entity_sentiment( + self, + ) -> Callable[ + [language_service.AnalyzeEntitySentimentRequest], + language_service.AnalyzeEntitySentimentResponse, + ]: + r"""Return a callable for the analyze entity sentiment method over gRPC. + + Finds entities, similar to + [AnalyzeEntities][google.cloud.language.v1.LanguageService.AnalyzeEntities] + in the text and analyzes sentiment associated with each entity + and its mentions. + + Returns: + Callable[[~.AnalyzeEntitySentimentRequest], + ~.AnalyzeEntitySentimentResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "analyze_entity_sentiment" not in self._stubs: + self._stubs["analyze_entity_sentiment"] = self.grpc_channel.unary_unary( + "/google.cloud.language.v1.LanguageService/AnalyzeEntitySentiment", + request_serializer=language_service.AnalyzeEntitySentimentRequest.serialize, + response_deserializer=language_service.AnalyzeEntitySentimentResponse.deserialize, + ) + return self._stubs["analyze_entity_sentiment"] + + @property + def analyze_syntax( + self, + ) -> Callable[ + [language_service.AnalyzeSyntaxRequest], language_service.AnalyzeSyntaxResponse + ]: + r"""Return a callable for the analyze syntax method over gRPC. + + Analyzes the syntax of the text and provides sentence + boundaries and tokenization along with part of speech + tags, dependency trees, and other properties. + + Returns: + Callable[[~.AnalyzeSyntaxRequest], + ~.AnalyzeSyntaxResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "analyze_syntax" not in self._stubs: + self._stubs["analyze_syntax"] = self.grpc_channel.unary_unary( + "/google.cloud.language.v1.LanguageService/AnalyzeSyntax", + request_serializer=language_service.AnalyzeSyntaxRequest.serialize, + response_deserializer=language_service.AnalyzeSyntaxResponse.deserialize, + ) + return self._stubs["analyze_syntax"] + + @property + def classify_text( + self, + ) -> Callable[ + [language_service.ClassifyTextRequest], language_service.ClassifyTextResponse + ]: + r"""Return a callable for the classify text method over gRPC. + + Classifies a document into categories. + + Returns: + Callable[[~.ClassifyTextRequest], + ~.ClassifyTextResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "classify_text" not in self._stubs: + self._stubs["classify_text"] = self.grpc_channel.unary_unary( + "/google.cloud.language.v1.LanguageService/ClassifyText", + request_serializer=language_service.ClassifyTextRequest.serialize, + response_deserializer=language_service.ClassifyTextResponse.deserialize, + ) + return self._stubs["classify_text"] + + @property + def annotate_text( + self, + ) -> Callable[ + [language_service.AnnotateTextRequest], language_service.AnnotateTextResponse + ]: + r"""Return a callable for the annotate text method over gRPC. + + A convenience method that provides all the features + that analyzeSentiment, analyzeEntities, and + analyzeSyntax provide in one call. + + Returns: + Callable[[~.AnnotateTextRequest], + ~.AnnotateTextResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "annotate_text" not in self._stubs: + self._stubs["annotate_text"] = self.grpc_channel.unary_unary( + "/google.cloud.language.v1.LanguageService/AnnotateText", + request_serializer=language_service.AnnotateTextRequest.serialize, + response_deserializer=language_service.AnnotateTextResponse.deserialize, + ) + return self._stubs["annotate_text"] + + +__all__ = ("LanguageServiceGrpcTransport",) diff --git a/google/cloud/language_v1/services/language_service/transports/grpc_asyncio.py b/google/cloud/language_v1/services/language_service/transports/grpc_asyncio.py new file mode 100644 index 00000000..b55e8c8b --- /dev/null +++ b/google/cloud/language_v1/services/language_service/transports/grpc_asyncio.py @@ -0,0 +1,418 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.language_v1.types import language_service + +from .base import LanguageServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import LanguageServiceGrpcTransport + + +class LanguageServiceGrpcAsyncIOTransport(LanguageServiceTransport): + """gRPC AsyncIO backend transport for LanguageService. + + Provides text analysis operations such as sentiment analysis + and entity recognition. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "language.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "language.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def analyze_sentiment( + self, + ) -> Callable[ + [language_service.AnalyzeSentimentRequest], + Awaitable[language_service.AnalyzeSentimentResponse], + ]: + r"""Return a callable for the analyze sentiment method over gRPC. + + Analyzes the sentiment of the provided text. + + Returns: + Callable[[~.AnalyzeSentimentRequest], + Awaitable[~.AnalyzeSentimentResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "analyze_sentiment" not in self._stubs: + self._stubs["analyze_sentiment"] = self.grpc_channel.unary_unary( + "/google.cloud.language.v1.LanguageService/AnalyzeSentiment", + request_serializer=language_service.AnalyzeSentimentRequest.serialize, + response_deserializer=language_service.AnalyzeSentimentResponse.deserialize, + ) + return self._stubs["analyze_sentiment"] + + @property + def analyze_entities( + self, + ) -> Callable[ + [language_service.AnalyzeEntitiesRequest], + Awaitable[language_service.AnalyzeEntitiesResponse], + ]: + r"""Return a callable for the analyze entities method over gRPC. + + Finds named entities (currently proper names and + common nouns) in the text along with entity types, + salience, mentions for each entity, and other + properties. + + Returns: + Callable[[~.AnalyzeEntitiesRequest], + Awaitable[~.AnalyzeEntitiesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "analyze_entities" not in self._stubs: + self._stubs["analyze_entities"] = self.grpc_channel.unary_unary( + "/google.cloud.language.v1.LanguageService/AnalyzeEntities", + request_serializer=language_service.AnalyzeEntitiesRequest.serialize, + response_deserializer=language_service.AnalyzeEntitiesResponse.deserialize, + ) + return self._stubs["analyze_entities"] + + @property + def analyze_entity_sentiment( + self, + ) -> Callable[ + [language_service.AnalyzeEntitySentimentRequest], + Awaitable[language_service.AnalyzeEntitySentimentResponse], + ]: + r"""Return a callable for the analyze entity sentiment method over gRPC. + + Finds entities, similar to + [AnalyzeEntities][google.cloud.language.v1.LanguageService.AnalyzeEntities] + in the text and analyzes sentiment associated with each entity + and its mentions. + + Returns: + Callable[[~.AnalyzeEntitySentimentRequest], + Awaitable[~.AnalyzeEntitySentimentResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "analyze_entity_sentiment" not in self._stubs: + self._stubs["analyze_entity_sentiment"] = self.grpc_channel.unary_unary( + "/google.cloud.language.v1.LanguageService/AnalyzeEntitySentiment", + request_serializer=language_service.AnalyzeEntitySentimentRequest.serialize, + response_deserializer=language_service.AnalyzeEntitySentimentResponse.deserialize, + ) + return self._stubs["analyze_entity_sentiment"] + + @property + def analyze_syntax( + self, + ) -> Callable[ + [language_service.AnalyzeSyntaxRequest], + Awaitable[language_service.AnalyzeSyntaxResponse], + ]: + r"""Return a callable for the analyze syntax method over gRPC. + + Analyzes the syntax of the text and provides sentence + boundaries and tokenization along with part of speech + tags, dependency trees, and other properties. + + Returns: + Callable[[~.AnalyzeSyntaxRequest], + Awaitable[~.AnalyzeSyntaxResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "analyze_syntax" not in self._stubs: + self._stubs["analyze_syntax"] = self.grpc_channel.unary_unary( + "/google.cloud.language.v1.LanguageService/AnalyzeSyntax", + request_serializer=language_service.AnalyzeSyntaxRequest.serialize, + response_deserializer=language_service.AnalyzeSyntaxResponse.deserialize, + ) + return self._stubs["analyze_syntax"] + + @property + def classify_text( + self, + ) -> Callable[ + [language_service.ClassifyTextRequest], + Awaitable[language_service.ClassifyTextResponse], + ]: + r"""Return a callable for the classify text method over gRPC. + + Classifies a document into categories. + + Returns: + Callable[[~.ClassifyTextRequest], + Awaitable[~.ClassifyTextResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "classify_text" not in self._stubs: + self._stubs["classify_text"] = self.grpc_channel.unary_unary( + "/google.cloud.language.v1.LanguageService/ClassifyText", + request_serializer=language_service.ClassifyTextRequest.serialize, + response_deserializer=language_service.ClassifyTextResponse.deserialize, + ) + return self._stubs["classify_text"] + + @property + def annotate_text( + self, + ) -> Callable[ + [language_service.AnnotateTextRequest], + Awaitable[language_service.AnnotateTextResponse], + ]: + r"""Return a callable for the annotate text method over gRPC. + + A convenience method that provides all the features + that analyzeSentiment, analyzeEntities, and + analyzeSyntax provide in one call. + + Returns: + Callable[[~.AnnotateTextRequest], + Awaitable[~.AnnotateTextResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "annotate_text" not in self._stubs: + self._stubs["annotate_text"] = self.grpc_channel.unary_unary( + "/google.cloud.language.v1.LanguageService/AnnotateText", + request_serializer=language_service.AnnotateTextRequest.serialize, + response_deserializer=language_service.AnnotateTextResponse.deserialize, + ) + return self._stubs["annotate_text"] + + +__all__ = ("LanguageServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/language_v1/types/__init__.py b/google/cloud/language_v1/types/__init__.py new file mode 100644 index 00000000..f44df83e --- /dev/null +++ b/google/cloud/language_v1/types/__init__.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .language_service import ( + Document, + Sentence, + Entity, + Token, + Sentiment, + PartOfSpeech, + DependencyEdge, + EntityMention, + TextSpan, + ClassificationCategory, + AnalyzeSentimentRequest, + AnalyzeSentimentResponse, + AnalyzeEntitySentimentRequest, + AnalyzeEntitySentimentResponse, + AnalyzeEntitiesRequest, + AnalyzeEntitiesResponse, + AnalyzeSyntaxRequest, + AnalyzeSyntaxResponse, + ClassifyTextRequest, + ClassifyTextResponse, + AnnotateTextRequest, + AnnotateTextResponse, +) + + +__all__ = ( + "Document", + "Sentence", + "Entity", + "Token", + "Sentiment", + "PartOfSpeech", + "DependencyEdge", + "EntityMention", + "TextSpan", + "ClassificationCategory", + "AnalyzeSentimentRequest", + "AnalyzeSentimentResponse", + "AnalyzeEntitySentimentRequest", + "AnalyzeEntitySentimentResponse", + "AnalyzeEntitiesRequest", + "AnalyzeEntitiesResponse", + "AnalyzeSyntaxRequest", + "AnalyzeSyntaxResponse", + "ClassifyTextRequest", + "ClassifyTextResponse", + "AnnotateTextRequest", + "AnnotateTextResponse", +) diff --git a/google/cloud/language_v1/types/language_service.py b/google/cloud/language_v1/types/language_service.py new file mode 100644 index 00000000..10664a54 --- /dev/null +++ b/google/cloud/language_v1/types/language_service.py @@ -0,0 +1,879 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.language.v1", + manifest={ + "EncodingType", + "Document", + "Sentence", + "Entity", + "Token", + "Sentiment", + "PartOfSpeech", + "DependencyEdge", + "EntityMention", + "TextSpan", + "ClassificationCategory", + "AnalyzeSentimentRequest", + "AnalyzeSentimentResponse", + "AnalyzeEntitySentimentRequest", + "AnalyzeEntitySentimentResponse", + "AnalyzeEntitiesRequest", + "AnalyzeEntitiesResponse", + "AnalyzeSyntaxRequest", + "AnalyzeSyntaxResponse", + "ClassifyTextRequest", + "ClassifyTextResponse", + "AnnotateTextRequest", + "AnnotateTextResponse", + }, +) + + +class EncodingType(proto.Enum): + r"""Represents the text encoding that the caller uses to process the + output. Providing an ``EncodingType`` is recommended because the API + provides the beginning offsets for various outputs, such as tokens + and mentions, and languages that natively use different text + encodings may access offsets differently. + """ + NONE = 0 + UTF8 = 1 + UTF16 = 2 + UTF32 = 3 + + +class Document(proto.Message): + r"""Represents the input to API methods. + + Attributes: + type_ (~.language_service.Document.Type): + Required. If the type is not set or is ``TYPE_UNSPECIFIED``, + returns an ``INVALID_ARGUMENT`` error. + content (str): + The content of the input in string format. + Cloud audit logging exempt since it is based on + user data. + gcs_content_uri (str): + The Google Cloud Storage URI where the file content is + located. This URI must be of the form: + gs://bucket_name/object_name. For more details, see + https://cloud.google.com/storage/docs/reference-uris. NOTE: + Cloud Storage object versioning is not supported. + language (str): + The language of the document (if not specified, the language + is automatically detected). Both ISO and BCP-47 language + codes are accepted. `Language + Support `__ + lists currently supported languages for each API method. If + the language (either specified by the caller or + automatically detected) is not supported by the called API + method, an ``INVALID_ARGUMENT`` error is returned. + """ + + class Type(proto.Enum): + r"""The document types enum.""" + TYPE_UNSPECIFIED = 0 + PLAIN_TEXT = 1 + HTML = 2 + + type_ = proto.Field(proto.ENUM, number=1, enum=Type,) + + content = proto.Field(proto.STRING, number=2, oneof="source") + + gcs_content_uri = proto.Field(proto.STRING, number=3, oneof="source") + + language = proto.Field(proto.STRING, number=4) + + +class Sentence(proto.Message): + r"""Represents a sentence in the input document. + + Attributes: + text (~.language_service.TextSpan): + The sentence text. + sentiment (~.language_service.Sentiment): + For calls to [AnalyzeSentiment][] or if + [AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1.AnnotateTextRequest.Features.extract_document_sentiment] + is set to true, this field will contain the sentiment for + the sentence. + """ + + text = proto.Field(proto.MESSAGE, number=1, message="TextSpan",) + + sentiment = proto.Field(proto.MESSAGE, number=2, message="Sentiment",) + + +class Entity(proto.Message): + r"""Represents a phrase in the text that is a known entity, such + as a person, an organization, or location. The API associates + information, such as salience and mentions, with entities. + + Attributes: + name (str): + The representative name for the entity. + type_ (~.language_service.Entity.Type): + The entity type. + metadata (Sequence[~.language_service.Entity.MetadataEntry]): + Metadata associated with the entity. + + For most entity types, the metadata is a Wikipedia URL + (``wikipedia_url``) and Knowledge Graph MID (``mid``), if + they are available. For the metadata associated with other + entity types, see the Type table below. + salience (float): + The salience score associated with the entity in the [0, + 1.0] range. + + The salience score for an entity provides information about + the importance or centrality of that entity to the entire + document text. Scores closer to 0 are less salient, while + scores closer to 1.0 are highly salient. + mentions (Sequence[~.language_service.EntityMention]): + The mentions of this entity in the input + document. The API currently supports proper noun + mentions. + sentiment (~.language_service.Sentiment): + For calls to [AnalyzeEntitySentiment][] or if + [AnnotateTextRequest.Features.extract_entity_sentiment][google.cloud.language.v1.AnnotateTextRequest.Features.extract_entity_sentiment] + is set to true, this field will contain the aggregate + sentiment expressed for this entity in the provided + document. + """ + + class Type(proto.Enum): + r"""The type of the entity. For most entity types, the associated + metadata is a Wikipedia URL (``wikipedia_url``) and Knowledge Graph + MID (``mid``). The table below lists the associated fields for + entities that have different metadata. + """ + UNKNOWN = 0 + PERSON = 1 + LOCATION = 2 + ORGANIZATION = 3 + EVENT = 4 + WORK_OF_ART = 5 + CONSUMER_GOOD = 6 + OTHER = 7 + PHONE_NUMBER = 9 + ADDRESS = 10 + DATE = 11 + NUMBER = 12 + PRICE = 13 + + name = proto.Field(proto.STRING, number=1) + + type_ = proto.Field(proto.ENUM, number=2, enum=Type,) + + metadata = proto.MapField(proto.STRING, proto.STRING, number=3) + + salience = proto.Field(proto.FLOAT, number=4) + + mentions = proto.RepeatedField(proto.MESSAGE, number=5, message="EntityMention",) + + sentiment = proto.Field(proto.MESSAGE, number=6, message="Sentiment",) + + +class Token(proto.Message): + r"""Represents the smallest syntactic building block of the text. + + Attributes: + text (~.language_service.TextSpan): + The token text. + part_of_speech (~.language_service.PartOfSpeech): + Parts of speech tag for this token. + dependency_edge (~.language_service.DependencyEdge): + Dependency tree parse for this token. + lemma (str): + `Lemma `__ + of the token. + """ + + text = proto.Field(proto.MESSAGE, number=1, message="TextSpan",) + + part_of_speech = proto.Field(proto.MESSAGE, number=2, message="PartOfSpeech",) + + dependency_edge = proto.Field(proto.MESSAGE, number=3, message="DependencyEdge",) + + lemma = proto.Field(proto.STRING, number=4) + + +class Sentiment(proto.Message): + r"""Represents the feeling associated with the entire text or + entities in the text. + + Attributes: + magnitude (float): + A non-negative number in the [0, +inf) range, which + represents the absolute magnitude of sentiment regardless of + score (positive or negative). + score (float): + Sentiment score between -1.0 (negative + sentiment) and 1.0 (positive sentiment). + """ + + magnitude = proto.Field(proto.FLOAT, number=2) + + score = proto.Field(proto.FLOAT, number=3) + + +class PartOfSpeech(proto.Message): + r"""Represents part of speech information for a token. Parts of speech + are as defined in + http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf + + Attributes: + tag (~.language_service.PartOfSpeech.Tag): + The part of speech tag. + aspect (~.language_service.PartOfSpeech.Aspect): + The grammatical aspect. + case (~.language_service.PartOfSpeech.Case): + The grammatical case. + form (~.language_service.PartOfSpeech.Form): + The grammatical form. + gender (~.language_service.PartOfSpeech.Gender): + The grammatical gender. + mood (~.language_service.PartOfSpeech.Mood): + The grammatical mood. + number (~.language_service.PartOfSpeech.Number): + The grammatical number. + person (~.language_service.PartOfSpeech.Person): + The grammatical person. + proper (~.language_service.PartOfSpeech.Proper): + The grammatical properness. + reciprocity (~.language_service.PartOfSpeech.Reciprocity): + The grammatical reciprocity. + tense (~.language_service.PartOfSpeech.Tense): + The grammatical tense. + voice (~.language_service.PartOfSpeech.Voice): + The grammatical voice. + """ + + class Tag(proto.Enum): + r"""The part of speech tags enum.""" + UNKNOWN = 0 + ADJ = 1 + ADP = 2 + ADV = 3 + CONJ = 4 + DET = 5 + NOUN = 6 + NUM = 7 + PRON = 8 + PRT = 9 + PUNCT = 10 + VERB = 11 + X = 12 + AFFIX = 13 + + class Aspect(proto.Enum): + r"""The characteristic of a verb that expresses time flow during + an event. + """ + ASPECT_UNKNOWN = 0 + PERFECTIVE = 1 + IMPERFECTIVE = 2 + PROGRESSIVE = 3 + + class Case(proto.Enum): + r"""The grammatical function performed by a noun or pronoun in a + phrase, clause, or sentence. In some languages, other parts of + speech, such as adjective and determiner, take case inflection + in agreement with the noun. + """ + CASE_UNKNOWN = 0 + ACCUSATIVE = 1 + ADVERBIAL = 2 + COMPLEMENTIVE = 3 + DATIVE = 4 + GENITIVE = 5 + INSTRUMENTAL = 6 + LOCATIVE = 7 + NOMINATIVE = 8 + OBLIQUE = 9 + PARTITIVE = 10 + PREPOSITIONAL = 11 + REFLEXIVE_CASE = 12 + RELATIVE_CASE = 13 + VOCATIVE = 14 + + class Form(proto.Enum): + r"""Depending on the language, Form can be categorizing different + forms of verbs, adjectives, adverbs, etc. For example, + categorizing inflected endings of verbs and adjectives or + distinguishing between short and long forms of adjectives and + participles + """ + FORM_UNKNOWN = 0 + ADNOMIAL = 1 + AUXILIARY = 2 + COMPLEMENTIZER = 3 + FINAL_ENDING = 4 + GERUND = 5 + REALIS = 6 + IRREALIS = 7 + SHORT = 8 + LONG = 9 + ORDER = 10 + SPECIFIC = 11 + + class Gender(proto.Enum): + r"""Gender classes of nouns reflected in the behaviour of + associated words. + """ + GENDER_UNKNOWN = 0 + FEMININE = 1 + MASCULINE = 2 + NEUTER = 3 + + class Mood(proto.Enum): + r"""The grammatical feature of verbs, used for showing modality + and attitude. + """ + MOOD_UNKNOWN = 0 + CONDITIONAL_MOOD = 1 + IMPERATIVE = 2 + INDICATIVE = 3 + INTERROGATIVE = 4 + JUSSIVE = 5 + SUBJUNCTIVE = 6 + + class Number(proto.Enum): + r"""Count distinctions.""" + NUMBER_UNKNOWN = 0 + SINGULAR = 1 + PLURAL = 2 + DUAL = 3 + + class Person(proto.Enum): + r"""The distinction between the speaker, second person, third + person, etc. + """ + PERSON_UNKNOWN = 0 + FIRST = 1 + SECOND = 2 + THIRD = 3 + REFLEXIVE_PERSON = 4 + + class Proper(proto.Enum): + r"""This category shows if the token is part of a proper name.""" + PROPER_UNKNOWN = 0 + PROPER = 1 + NOT_PROPER = 2 + + class Reciprocity(proto.Enum): + r"""Reciprocal features of a pronoun.""" + RECIPROCITY_UNKNOWN = 0 + RECIPROCAL = 1 + NON_RECIPROCAL = 2 + + class Tense(proto.Enum): + r"""Time reference.""" + TENSE_UNKNOWN = 0 + CONDITIONAL_TENSE = 1 + FUTURE = 2 + PAST = 3 + PRESENT = 4 + IMPERFECT = 5 + PLUPERFECT = 6 + + class Voice(proto.Enum): + r"""The relationship between the action that a verb expresses and + the participants identified by its arguments. + """ + VOICE_UNKNOWN = 0 + ACTIVE = 1 + CAUSATIVE = 2 + PASSIVE = 3 + + tag = proto.Field(proto.ENUM, number=1, enum=Tag,) + + aspect = proto.Field(proto.ENUM, number=2, enum=Aspect,) + + case = proto.Field(proto.ENUM, number=3, enum=Case,) + + form = proto.Field(proto.ENUM, number=4, enum=Form,) + + gender = proto.Field(proto.ENUM, number=5, enum=Gender,) + + mood = proto.Field(proto.ENUM, number=6, enum=Mood,) + + number = proto.Field(proto.ENUM, number=7, enum=Number,) + + person = proto.Field(proto.ENUM, number=8, enum=Person,) + + proper = proto.Field(proto.ENUM, number=9, enum=Proper,) + + reciprocity = proto.Field(proto.ENUM, number=10, enum=Reciprocity,) + + tense = proto.Field(proto.ENUM, number=11, enum=Tense,) + + voice = proto.Field(proto.ENUM, number=12, enum=Voice,) + + +class DependencyEdge(proto.Message): + r"""Represents dependency parse tree information for a token. + (For more information on dependency labels, see + http://www.aclweb.org/anthology/P13-2017 + + Attributes: + head_token_index (int): + Represents the head of this token in the dependency tree. + This is the index of the token which has an arc going to + this token. The index is the position of the token in the + array of tokens returned by the API method. If this token is + a root token, then the ``head_token_index`` is its own + index. + label (~.language_service.DependencyEdge.Label): + The parse label for the token. + """ + + class Label(proto.Enum): + r"""The parse label enum for the token.""" + UNKNOWN = 0 + ABBREV = 1 + ACOMP = 2 + ADVCL = 3 + ADVMOD = 4 + AMOD = 5 + APPOS = 6 + ATTR = 7 + AUX = 8 + AUXPASS = 9 + CC = 10 + CCOMP = 11 + CONJ = 12 + CSUBJ = 13 + CSUBJPASS = 14 + DEP = 15 + DET = 16 + DISCOURSE = 17 + DOBJ = 18 + EXPL = 19 + GOESWITH = 20 + IOBJ = 21 + MARK = 22 + MWE = 23 + MWV = 24 + NEG = 25 + NN = 26 + NPADVMOD = 27 + NSUBJ = 28 + NSUBJPASS = 29 + NUM = 30 + NUMBER = 31 + P = 32 + PARATAXIS = 33 + PARTMOD = 34 + PCOMP = 35 + POBJ = 36 + POSS = 37 + POSTNEG = 38 + PRECOMP = 39 + PRECONJ = 40 + PREDET = 41 + PREF = 42 + PREP = 43 + PRONL = 44 + PRT = 45 + PS = 46 + QUANTMOD = 47 + RCMOD = 48 + RCMODREL = 49 + RDROP = 50 + REF = 51 + REMNANT = 52 + REPARANDUM = 53 + ROOT = 54 + SNUM = 55 + SUFF = 56 + TMOD = 57 + TOPIC = 58 + VMOD = 59 + VOCATIVE = 60 + XCOMP = 61 + SUFFIX = 62 + TITLE = 63 + ADVPHMOD = 64 + AUXCAUS = 65 + AUXVV = 66 + DTMOD = 67 + FOREIGN = 68 + KW = 69 + LIST = 70 + NOMC = 71 + NOMCSUBJ = 72 + NOMCSUBJPASS = 73 + NUMC = 74 + COP = 75 + DISLOCATED = 76 + ASP = 77 + GMOD = 78 + GOBJ = 79 + INFMOD = 80 + MES = 81 + NCOMP = 82 + + head_token_index = proto.Field(proto.INT32, number=1) + + label = proto.Field(proto.ENUM, number=2, enum=Label,) + + +class EntityMention(proto.Message): + r"""Represents a mention for an entity in the text. Currently, + proper noun mentions are supported. + + Attributes: + text (~.language_service.TextSpan): + The mention text. + type_ (~.language_service.EntityMention.Type): + The type of the entity mention. + sentiment (~.language_service.Sentiment): + For calls to [AnalyzeEntitySentiment][] or if + [AnnotateTextRequest.Features.extract_entity_sentiment][google.cloud.language.v1.AnnotateTextRequest.Features.extract_entity_sentiment] + is set to true, this field will contain the sentiment + expressed for this mention of the entity in the provided + document. + """ + + class Type(proto.Enum): + r"""The supported types of mentions.""" + TYPE_UNKNOWN = 0 + PROPER = 1 + COMMON = 2 + + text = proto.Field(proto.MESSAGE, number=1, message="TextSpan",) + + type_ = proto.Field(proto.ENUM, number=2, enum=Type,) + + sentiment = proto.Field(proto.MESSAGE, number=3, message=Sentiment,) + + +class TextSpan(proto.Message): + r"""Represents an output piece of text. + + Attributes: + content (str): + The content of the output text. + begin_offset (int): + The API calculates the beginning offset of the content in + the original document according to the + [EncodingType][google.cloud.language.v1.EncodingType] + specified in the API request. + """ + + content = proto.Field(proto.STRING, number=1) + + begin_offset = proto.Field(proto.INT32, number=2) + + +class ClassificationCategory(proto.Message): + r"""Represents a category returned from the text classifier. + + Attributes: + name (str): + The name of the category representing the document, from the + `predefined + taxonomy `__. + confidence (float): + The classifier's confidence of the category. + Number represents how certain the classifier is + that this category represents the given text. + """ + + name = proto.Field(proto.STRING, number=1) + + confidence = proto.Field(proto.FLOAT, number=2) + + +class AnalyzeSentimentRequest(proto.Message): + r"""The sentiment analysis request message. + + Attributes: + document (~.language_service.Document): + Input document. + encoding_type (~.language_service.EncodingType): + The encoding type used by the API to + calculate sentence offsets. + """ + + document = proto.Field(proto.MESSAGE, number=1, message=Document,) + + encoding_type = proto.Field(proto.ENUM, number=2, enum="EncodingType",) + + +class AnalyzeSentimentResponse(proto.Message): + r"""The sentiment analysis response message. + + Attributes: + document_sentiment (~.language_service.Sentiment): + The overall sentiment of the input document. + language (str): + The language of the text, which will be the same as the + language specified in the request or, if not specified, the + automatically-detected language. See + [Document.language][google.cloud.language.v1.Document.language] + field for more details. + sentences (Sequence[~.language_service.Sentence]): + The sentiment for all the sentences in the + document. + """ + + document_sentiment = proto.Field(proto.MESSAGE, number=1, message=Sentiment,) + + language = proto.Field(proto.STRING, number=2) + + sentences = proto.RepeatedField(proto.MESSAGE, number=3, message=Sentence,) + + +class AnalyzeEntitySentimentRequest(proto.Message): + r"""The entity-level sentiment analysis request message. + + Attributes: + document (~.language_service.Document): + Input document. + encoding_type (~.language_service.EncodingType): + The encoding type used by the API to + calculate offsets. + """ + + document = proto.Field(proto.MESSAGE, number=1, message=Document,) + + encoding_type = proto.Field(proto.ENUM, number=2, enum="EncodingType",) + + +class AnalyzeEntitySentimentResponse(proto.Message): + r"""The entity-level sentiment analysis response message. + + Attributes: + entities (Sequence[~.language_service.Entity]): + The recognized entities in the input document + with associated sentiments. + language (str): + The language of the text, which will be the same as the + language specified in the request or, if not specified, the + automatically-detected language. See + [Document.language][google.cloud.language.v1.Document.language] + field for more details. + """ + + entities = proto.RepeatedField(proto.MESSAGE, number=1, message=Entity,) + + language = proto.Field(proto.STRING, number=2) + + +class AnalyzeEntitiesRequest(proto.Message): + r"""The entity analysis request message. + + Attributes: + document (~.language_service.Document): + Input document. + encoding_type (~.language_service.EncodingType): + The encoding type used by the API to + calculate offsets. + """ + + document = proto.Field(proto.MESSAGE, number=1, message=Document,) + + encoding_type = proto.Field(proto.ENUM, number=2, enum="EncodingType",) + + +class AnalyzeEntitiesResponse(proto.Message): + r"""The entity analysis response message. + + Attributes: + entities (Sequence[~.language_service.Entity]): + The recognized entities in the input + document. + language (str): + The language of the text, which will be the same as the + language specified in the request or, if not specified, the + automatically-detected language. See + [Document.language][google.cloud.language.v1.Document.language] + field for more details. + """ + + entities = proto.RepeatedField(proto.MESSAGE, number=1, message=Entity,) + + language = proto.Field(proto.STRING, number=2) + + +class AnalyzeSyntaxRequest(proto.Message): + r"""The syntax analysis request message. + + Attributes: + document (~.language_service.Document): + Input document. + encoding_type (~.language_service.EncodingType): + The encoding type used by the API to + calculate offsets. + """ + + document = proto.Field(proto.MESSAGE, number=1, message=Document,) + + encoding_type = proto.Field(proto.ENUM, number=2, enum="EncodingType",) + + +class AnalyzeSyntaxResponse(proto.Message): + r"""The syntax analysis response message. + + Attributes: + sentences (Sequence[~.language_service.Sentence]): + Sentences in the input document. + tokens (Sequence[~.language_service.Token]): + Tokens, along with their syntactic + information, in the input document. + language (str): + The language of the text, which will be the same as the + language specified in the request or, if not specified, the + automatically-detected language. See + [Document.language][google.cloud.language.v1.Document.language] + field for more details. + """ + + sentences = proto.RepeatedField(proto.MESSAGE, number=1, message=Sentence,) + + tokens = proto.RepeatedField(proto.MESSAGE, number=2, message=Token,) + + language = proto.Field(proto.STRING, number=3) + + +class ClassifyTextRequest(proto.Message): + r"""The document classification request message. + + Attributes: + document (~.language_service.Document): + Input document. + """ + + document = proto.Field(proto.MESSAGE, number=1, message=Document,) + + +class ClassifyTextResponse(proto.Message): + r"""The document classification response message. + + Attributes: + categories (Sequence[~.language_service.ClassificationCategory]): + Categories representing the input document. + """ + + categories = proto.RepeatedField( + proto.MESSAGE, number=1, message=ClassificationCategory, + ) + + +class AnnotateTextRequest(proto.Message): + r"""The request message for the text annotation API, which can + perform multiple analysis types (sentiment, entities, and + syntax) in one call. + + Attributes: + document (~.language_service.Document): + Input document. + features (~.language_service.AnnotateTextRequest.Features): + The enabled features. + encoding_type (~.language_service.EncodingType): + The encoding type used by the API to + calculate offsets. + """ + + class Features(proto.Message): + r"""All available features for sentiment, syntax, and semantic + analysis. Setting each one to true will enable that specific + analysis for the input. + + Attributes: + extract_syntax (bool): + Extract syntax information. + extract_entities (bool): + Extract entities. + extract_document_sentiment (bool): + Extract document-level sentiment. + extract_entity_sentiment (bool): + Extract entities and their associated + sentiment. + classify_text (bool): + Classify the full document into categories. + """ + + extract_syntax = proto.Field(proto.BOOL, number=1) + + extract_entities = proto.Field(proto.BOOL, number=2) + + extract_document_sentiment = proto.Field(proto.BOOL, number=3) + + extract_entity_sentiment = proto.Field(proto.BOOL, number=4) + + classify_text = proto.Field(proto.BOOL, number=6) + + document = proto.Field(proto.MESSAGE, number=1, message=Document,) + + features = proto.Field(proto.MESSAGE, number=2, message=Features,) + + encoding_type = proto.Field(proto.ENUM, number=3, enum="EncodingType",) + + +class AnnotateTextResponse(proto.Message): + r"""The text annotations response message. + + Attributes: + sentences (Sequence[~.language_service.Sentence]): + Sentences in the input document. Populated if the user + enables + [AnnotateTextRequest.Features.extract_syntax][google.cloud.language.v1.AnnotateTextRequest.Features.extract_syntax]. + tokens (Sequence[~.language_service.Token]): + Tokens, along with their syntactic information, in the input + document. Populated if the user enables + [AnnotateTextRequest.Features.extract_syntax][google.cloud.language.v1.AnnotateTextRequest.Features.extract_syntax]. + entities (Sequence[~.language_service.Entity]): + Entities, along with their semantic information, in the + input document. Populated if the user enables + [AnnotateTextRequest.Features.extract_entities][google.cloud.language.v1.AnnotateTextRequest.Features.extract_entities]. + document_sentiment (~.language_service.Sentiment): + The overall sentiment for the document. Populated if the + user enables + [AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1.AnnotateTextRequest.Features.extract_document_sentiment]. + language (str): + The language of the text, which will be the same as the + language specified in the request or, if not specified, the + automatically-detected language. See + [Document.language][google.cloud.language.v1.Document.language] + field for more details. + categories (Sequence[~.language_service.ClassificationCategory]): + Categories identified in the input document. + """ + + sentences = proto.RepeatedField(proto.MESSAGE, number=1, message=Sentence,) + + tokens = proto.RepeatedField(proto.MESSAGE, number=2, message=Token,) + + entities = proto.RepeatedField(proto.MESSAGE, number=3, message=Entity,) + + document_sentiment = proto.Field(proto.MESSAGE, number=4, message=Sentiment,) + + language = proto.Field(proto.STRING, number=5) + + categories = proto.RepeatedField( + proto.MESSAGE, number=6, message=ClassificationCategory, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/language_v1beta2/__init__.py b/google/cloud/language_v1beta2/__init__.py index d980c998..ba3826be 100644 --- a/google/cloud/language_v1beta2/__init__.py +++ b/google/cloud/language_v1beta2/__init__.py @@ -1,4 +1,6 @@ -# Copyright 2017, Google LLC All rights reserved. +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,17 +13,57 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# -from __future__ import absolute_import - -from google.cloud.language_v1beta2 import types -from google.cloud.language_v1beta2.gapic import enums -from google.cloud.language_v1beta2.gapic import language_service_client - - -class LanguageServiceClient(language_service_client.LanguageServiceClient): - __doc__ = language_service_client.LanguageServiceClient.__doc__ - enums = enums +from .services.language_service import LanguageServiceClient +from .types.language_service import AnalyzeEntitiesRequest +from .types.language_service import AnalyzeEntitiesResponse +from .types.language_service import AnalyzeEntitySentimentRequest +from .types.language_service import AnalyzeEntitySentimentResponse +from .types.language_service import AnalyzeSentimentRequest +from .types.language_service import AnalyzeSentimentResponse +from .types.language_service import AnalyzeSyntaxRequest +from .types.language_service import AnalyzeSyntaxResponse +from .types.language_service import AnnotateTextRequest +from .types.language_service import AnnotateTextResponse +from .types.language_service import ClassificationCategory +from .types.language_service import ClassifyTextRequest +from .types.language_service import ClassifyTextResponse +from .types.language_service import DependencyEdge +from .types.language_service import Document +from .types.language_service import EncodingType +from .types.language_service import Entity +from .types.language_service import EntityMention +from .types.language_service import PartOfSpeech +from .types.language_service import Sentence +from .types.language_service import Sentiment +from .types.language_service import TextSpan +from .types.language_service import Token -__all__ = ("enums", "types", "LanguageServiceClient") +__all__ = ( + "AnalyzeEntitiesRequest", + "AnalyzeEntitiesResponse", + "AnalyzeEntitySentimentRequest", + "AnalyzeEntitySentimentResponse", + "AnalyzeSentimentRequest", + "AnalyzeSentimentResponse", + "AnalyzeSyntaxRequest", + "AnalyzeSyntaxResponse", + "AnnotateTextRequest", + "AnnotateTextResponse", + "ClassificationCategory", + "ClassifyTextRequest", + "ClassifyTextResponse", + "DependencyEdge", + "Document", + "EncodingType", + "Entity", + "EntityMention", + "PartOfSpeech", + "Sentence", + "Sentiment", + "TextSpan", + "Token", + "LanguageServiceClient", +) diff --git a/google/cloud/language_v1beta2/py.typed b/google/cloud/language_v1beta2/py.typed new file mode 100644 index 00000000..c0acc99a --- /dev/null +++ b/google/cloud/language_v1beta2/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-language package uses inline types. diff --git a/google/__init__.py b/google/cloud/language_v1beta2/services/__init__.py similarity index 73% rename from google/__init__.py rename to google/cloud/language_v1beta2/services/__init__.py index 0e1bc513..42ffdf2b 100644 --- a/google/__init__.py +++ b/google/cloud/language_v1beta2/services/__init__.py @@ -1,4 +1,6 @@ -# Copyright 2016 Google LLC +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,12 +13,4 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -try: - import pkg_resources - - pkg_resources.declare_namespace(__name__) -except ImportError: - import pkgutil - - __path__ = pkgutil.extend_path(__path__, __name__) +# diff --git a/google/cloud/language.py b/google/cloud/language_v1beta2/services/language_service/__init__.py similarity index 65% rename from google/cloud/language.py rename to google/cloud/language_v1beta2/services/language_service/__init__.py index 624bd119..d2aff222 100644 --- a/google/cloud/language.py +++ b/google/cloud/language_v1beta2/services/language_service/__init__.py @@ -1,4 +1,6 @@ -# Copyright 2017, Google LLC All rights reserved. +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,11 +13,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# -from __future__ import absolute_import - -from google.cloud.language_v1 import LanguageServiceClient -from google.cloud.language_v1 import enums -from google.cloud.language_v1 import types +from .client import LanguageServiceClient +from .async_client import LanguageServiceAsyncClient -__all__ = ("enums", "types", "LanguageServiceClient") +__all__ = ( + "LanguageServiceClient", + "LanguageServiceAsyncClient", +) diff --git a/google/cloud/language_v1beta2/services/language_service/async_client.py b/google/cloud/language_v1beta2/services/language_service/async_client.py new file mode 100644 index 00000000..0c2f1c99 --- /dev/null +++ b/google/cloud/language_v1beta2/services/language_service/async_client.py @@ -0,0 +1,603 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.language_v1beta2.types import language_service + +from .transports.base import LanguageServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import LanguageServiceGrpcAsyncIOTransport +from .client import LanguageServiceClient + + +class LanguageServiceAsyncClient: + """Provides text analysis operations such as sentiment analysis + and entity recognition. + """ + + _client: LanguageServiceClient + + DEFAULT_ENDPOINT = LanguageServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = LanguageServiceClient.DEFAULT_MTLS_ENDPOINT + + from_service_account_file = LanguageServiceClient.from_service_account_file + from_service_account_json = from_service_account_file + + get_transport_class = functools.partial( + type(LanguageServiceClient).get_transport_class, type(LanguageServiceClient) + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, LanguageServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the language service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.LanguageServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = LanguageServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def analyze_sentiment( + self, + request: language_service.AnalyzeSentimentRequest = None, + *, + document: language_service.Document = None, + encoding_type: language_service.EncodingType = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.AnalyzeSentimentResponse: + r"""Analyzes the sentiment of the provided text. + + Args: + request (:class:`~.language_service.AnalyzeSentimentRequest`): + The request object. The sentiment analysis request + message. + document (:class:`~.language_service.Document`): + Required. Input document. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + encoding_type (:class:`~.language_service.EncodingType`): + The encoding type used by the API to + calculate sentence offsets for the + sentence sentiment. + This corresponds to the ``encoding_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.language_service.AnalyzeSentimentResponse: + The sentiment analysis response + message. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([document, encoding_type]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = language_service.AnalyzeSentimentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if document is not None: + request.document = document + if encoding_type is not None: + request.encoding_type = encoding_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.analyze_sentiment, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def analyze_entities( + self, + request: language_service.AnalyzeEntitiesRequest = None, + *, + document: language_service.Document = None, + encoding_type: language_service.EncodingType = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.AnalyzeEntitiesResponse: + r"""Finds named entities (currently proper names and + common nouns) in the text along with entity types, + salience, mentions for each entity, and other + properties. + + Args: + request (:class:`~.language_service.AnalyzeEntitiesRequest`): + The request object. The entity analysis request message. + document (:class:`~.language_service.Document`): + Required. Input document. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + encoding_type (:class:`~.language_service.EncodingType`): + The encoding type used by the API to + calculate offsets. + This corresponds to the ``encoding_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.language_service.AnalyzeEntitiesResponse: + The entity analysis response message. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([document, encoding_type]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = language_service.AnalyzeEntitiesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if document is not None: + request.document = document + if encoding_type is not None: + request.encoding_type = encoding_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.analyze_entities, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def analyze_entity_sentiment( + self, + request: language_service.AnalyzeEntitySentimentRequest = None, + *, + document: language_service.Document = None, + encoding_type: language_service.EncodingType = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.AnalyzeEntitySentimentResponse: + r"""Finds entities, similar to + [AnalyzeEntities][google.cloud.language.v1beta2.LanguageService.AnalyzeEntities] + in the text and analyzes sentiment associated with each entity + and its mentions. + + Args: + request (:class:`~.language_service.AnalyzeEntitySentimentRequest`): + The request object. The entity-level sentiment analysis + request message. + document (:class:`~.language_service.Document`): + Required. Input document. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + encoding_type (:class:`~.language_service.EncodingType`): + The encoding type used by the API to + calculate offsets. + This corresponds to the ``encoding_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.language_service.AnalyzeEntitySentimentResponse: + The entity-level sentiment analysis + response message. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([document, encoding_type]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = language_service.AnalyzeEntitySentimentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if document is not None: + request.document = document + if encoding_type is not None: + request.encoding_type = encoding_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.analyze_entity_sentiment, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def analyze_syntax( + self, + request: language_service.AnalyzeSyntaxRequest = None, + *, + document: language_service.Document = None, + encoding_type: language_service.EncodingType = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.AnalyzeSyntaxResponse: + r"""Analyzes the syntax of the text and provides sentence + boundaries and tokenization along with part-of-speech + tags, dependency trees, and other properties. + + Args: + request (:class:`~.language_service.AnalyzeSyntaxRequest`): + The request object. The syntax analysis request message. + document (:class:`~.language_service.Document`): + Required. Input document. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + encoding_type (:class:`~.language_service.EncodingType`): + The encoding type used by the API to + calculate offsets. + This corresponds to the ``encoding_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.language_service.AnalyzeSyntaxResponse: + The syntax analysis response message. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([document, encoding_type]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = language_service.AnalyzeSyntaxRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if document is not None: + request.document = document + if encoding_type is not None: + request.encoding_type = encoding_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.analyze_syntax, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def classify_text( + self, + request: language_service.ClassifyTextRequest = None, + *, + document: language_service.Document = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.ClassifyTextResponse: + r"""Classifies a document into categories. + + Args: + request (:class:`~.language_service.ClassifyTextRequest`): + The request object. The document classification request + message. + document (:class:`~.language_service.Document`): + Required. Input document. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.language_service.ClassifyTextResponse: + The document classification response + message. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([document]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = language_service.ClassifyTextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if document is not None: + request.document = document + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.classify_text, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def annotate_text( + self, + request: language_service.AnnotateTextRequest = None, + *, + document: language_service.Document = None, + features: language_service.AnnotateTextRequest.Features = None, + encoding_type: language_service.EncodingType = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.AnnotateTextResponse: + r"""A convenience method that provides all syntax, + sentiment, entity, and classification features in one + call. + + Args: + request (:class:`~.language_service.AnnotateTextRequest`): + The request object. The request message for the text + annotation API, which can perform multiple analysis + types (sentiment, entities, and syntax) in one call. + document (:class:`~.language_service.Document`): + Required. Input document. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + features (:class:`~.language_service.AnnotateTextRequest.Features`): + Required. The enabled features. + This corresponds to the ``features`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + encoding_type (:class:`~.language_service.EncodingType`): + The encoding type used by the API to + calculate offsets. + This corresponds to the ``encoding_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.language_service.AnnotateTextResponse: + The text annotations response + message. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([document, features, encoding_type]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = language_service.AnnotateTextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if document is not None: + request.document = document + if features is not None: + request.features = features + if encoding_type is not None: + request.encoding_type = encoding_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.annotate_text, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-language",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("LanguageServiceAsyncClient",) diff --git a/google/cloud/language_v1beta2/services/language_service/client.py b/google/cloud/language_v1beta2/services/language_service/client.py new file mode 100644 index 00000000..c2d85031 --- /dev/null +++ b/google/cloud/language_v1beta2/services/language_service/client.py @@ -0,0 +1,715 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.language_v1beta2.types import language_service + +from .transports.base import LanguageServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import LanguageServiceGrpcTransport +from .transports.grpc_asyncio import LanguageServiceGrpcAsyncIOTransport + + +class LanguageServiceClientMeta(type): + """Metaclass for the LanguageService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[LanguageServiceTransport]] + _transport_registry["grpc"] = LanguageServiceGrpcTransport + _transport_registry["grpc_asyncio"] = LanguageServiceGrpcAsyncIOTransport + + def get_transport_class(cls, label: str = None,) -> Type[LanguageServiceTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class LanguageServiceClient(metaclass=LanguageServiceClientMeta): + """Provides text analysis operations such as sentiment analysis + and entity recognition. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "language.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + {@api.name}: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + def __init__( + self, + *, + credentials: Optional[credentials.Credentials] = None, + transport: Union[str, LanguageServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the language service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.LanguageServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (client_options_lib.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + ssl_credentials = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + import grpc # type: ignore + + cert, key = client_options.client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + is_mtls = True + else: + creds = SslCredentials() + is_mtls = creds.is_mtls + ssl_credentials = creds.ssl_credentials if is_mtls else None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, LanguageServiceTransport): + # transport is a LanguageServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + ssl_channel_credentials=ssl_credentials, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def analyze_sentiment( + self, + request: language_service.AnalyzeSentimentRequest = None, + *, + document: language_service.Document = None, + encoding_type: language_service.EncodingType = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.AnalyzeSentimentResponse: + r"""Analyzes the sentiment of the provided text. + + Args: + request (:class:`~.language_service.AnalyzeSentimentRequest`): + The request object. The sentiment analysis request + message. + document (:class:`~.language_service.Document`): + Required. Input document. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + encoding_type (:class:`~.language_service.EncodingType`): + The encoding type used by the API to + calculate sentence offsets for the + sentence sentiment. + This corresponds to the ``encoding_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.language_service.AnalyzeSentimentResponse: + The sentiment analysis response + message. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([document, encoding_type]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a language_service.AnalyzeSentimentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, language_service.AnalyzeSentimentRequest): + request = language_service.AnalyzeSentimentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if document is not None: + request.document = document + if encoding_type is not None: + request.encoding_type = encoding_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.analyze_sentiment] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def analyze_entities( + self, + request: language_service.AnalyzeEntitiesRequest = None, + *, + document: language_service.Document = None, + encoding_type: language_service.EncodingType = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.AnalyzeEntitiesResponse: + r"""Finds named entities (currently proper names and + common nouns) in the text along with entity types, + salience, mentions for each entity, and other + properties. + + Args: + request (:class:`~.language_service.AnalyzeEntitiesRequest`): + The request object. The entity analysis request message. + document (:class:`~.language_service.Document`): + Required. Input document. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + encoding_type (:class:`~.language_service.EncodingType`): + The encoding type used by the API to + calculate offsets. + This corresponds to the ``encoding_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.language_service.AnalyzeEntitiesResponse: + The entity analysis response message. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([document, encoding_type]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a language_service.AnalyzeEntitiesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, language_service.AnalyzeEntitiesRequest): + request = language_service.AnalyzeEntitiesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if document is not None: + request.document = document + if encoding_type is not None: + request.encoding_type = encoding_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.analyze_entities] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def analyze_entity_sentiment( + self, + request: language_service.AnalyzeEntitySentimentRequest = None, + *, + document: language_service.Document = None, + encoding_type: language_service.EncodingType = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.AnalyzeEntitySentimentResponse: + r"""Finds entities, similar to + [AnalyzeEntities][google.cloud.language.v1beta2.LanguageService.AnalyzeEntities] + in the text and analyzes sentiment associated with each entity + and its mentions. + + Args: + request (:class:`~.language_service.AnalyzeEntitySentimentRequest`): + The request object. The entity-level sentiment analysis + request message. + document (:class:`~.language_service.Document`): + Required. Input document. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + encoding_type (:class:`~.language_service.EncodingType`): + The encoding type used by the API to + calculate offsets. + This corresponds to the ``encoding_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.language_service.AnalyzeEntitySentimentResponse: + The entity-level sentiment analysis + response message. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([document, encoding_type]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a language_service.AnalyzeEntitySentimentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, language_service.AnalyzeEntitySentimentRequest): + request = language_service.AnalyzeEntitySentimentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if document is not None: + request.document = document + if encoding_type is not None: + request.encoding_type = encoding_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.analyze_entity_sentiment] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def analyze_syntax( + self, + request: language_service.AnalyzeSyntaxRequest = None, + *, + document: language_service.Document = None, + encoding_type: language_service.EncodingType = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.AnalyzeSyntaxResponse: + r"""Analyzes the syntax of the text and provides sentence + boundaries and tokenization along with part-of-speech + tags, dependency trees, and other properties. + + Args: + request (:class:`~.language_service.AnalyzeSyntaxRequest`): + The request object. The syntax analysis request message. + document (:class:`~.language_service.Document`): + Required. Input document. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + encoding_type (:class:`~.language_service.EncodingType`): + The encoding type used by the API to + calculate offsets. + This corresponds to the ``encoding_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.language_service.AnalyzeSyntaxResponse: + The syntax analysis response message. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([document, encoding_type]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a language_service.AnalyzeSyntaxRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, language_service.AnalyzeSyntaxRequest): + request = language_service.AnalyzeSyntaxRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if document is not None: + request.document = document + if encoding_type is not None: + request.encoding_type = encoding_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.analyze_syntax] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def classify_text( + self, + request: language_service.ClassifyTextRequest = None, + *, + document: language_service.Document = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.ClassifyTextResponse: + r"""Classifies a document into categories. + + Args: + request (:class:`~.language_service.ClassifyTextRequest`): + The request object. The document classification request + message. + document (:class:`~.language_service.Document`): + Required. Input document. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.language_service.ClassifyTextResponse: + The document classification response + message. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([document]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a language_service.ClassifyTextRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, language_service.ClassifyTextRequest): + request = language_service.ClassifyTextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if document is not None: + request.document = document + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.classify_text] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def annotate_text( + self, + request: language_service.AnnotateTextRequest = None, + *, + document: language_service.Document = None, + features: language_service.AnnotateTextRequest.Features = None, + encoding_type: language_service.EncodingType = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.AnnotateTextResponse: + r"""A convenience method that provides all syntax, + sentiment, entity, and classification features in one + call. + + Args: + request (:class:`~.language_service.AnnotateTextRequest`): + The request object. The request message for the text + annotation API, which can perform multiple analysis + types (sentiment, entities, and syntax) in one call. + document (:class:`~.language_service.Document`): + Required. Input document. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + features (:class:`~.language_service.AnnotateTextRequest.Features`): + Required. The enabled features. + This corresponds to the ``features`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + encoding_type (:class:`~.language_service.EncodingType`): + The encoding type used by the API to + calculate offsets. + This corresponds to the ``encoding_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.language_service.AnnotateTextResponse: + The text annotations response + message. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([document, features, encoding_type]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a language_service.AnnotateTextRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, language_service.AnnotateTextRequest): + request = language_service.AnnotateTextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if document is not None: + request.document = document + if features is not None: + request.features = features + if encoding_type is not None: + request.encoding_type = encoding_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.annotate_text] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-language",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("LanguageServiceClient",) diff --git a/google/cloud/language_v1beta2/services/language_service/transports/__init__.py b/google/cloud/language_v1beta2/services/language_service/transports/__init__.py new file mode 100644 index 00000000..22069335 --- /dev/null +++ b/google/cloud/language_v1beta2/services/language_service/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import LanguageServiceTransport +from .grpc import LanguageServiceGrpcTransport +from .grpc_asyncio import LanguageServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[LanguageServiceTransport]] +_transport_registry["grpc"] = LanguageServiceGrpcTransport +_transport_registry["grpc_asyncio"] = LanguageServiceGrpcAsyncIOTransport + + +__all__ = ( + "LanguageServiceTransport", + "LanguageServiceGrpcTransport", + "LanguageServiceGrpcAsyncIOTransport", +) diff --git a/google/cloud/language_v1beta2/services/language_service/transports/base.py b/google/cloud/language_v1beta2/services/language_service/transports/base.py new file mode 100644 index 00000000..aa6eb5d0 --- /dev/null +++ b/google/cloud/language_v1beta2/services/language_service/transports/base.py @@ -0,0 +1,263 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.language_v1beta2.types import language_service + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-language",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class LanguageServiceTransport(abc.ABC): + """Abstract transport class for LanguageService.""" + + AUTH_SCOPES = ( + "https://www.googleapis.com/auth/cloud-language", + "https://www.googleapis.com/auth/cloud-platform", + ) + + def __init__( + self, + *, + host: str = "language.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages(client_info) + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.analyze_sentiment: gapic_v1.method.wrap_method( + self.analyze_sentiment, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=client_info, + ), + self.analyze_entities: gapic_v1.method.wrap_method( + self.analyze_entities, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=client_info, + ), + self.analyze_entity_sentiment: gapic_v1.method.wrap_method( + self.analyze_entity_sentiment, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=client_info, + ), + self.analyze_syntax: gapic_v1.method.wrap_method( + self.analyze_syntax, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=client_info, + ), + self.classify_text: gapic_v1.method.wrap_method( + self.classify_text, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=client_info, + ), + self.annotate_text: gapic_v1.method.wrap_method( + self.annotate_text, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=client_info, + ), + } + + @property + def analyze_sentiment( + self, + ) -> typing.Callable[ + [language_service.AnalyzeSentimentRequest], + typing.Union[ + language_service.AnalyzeSentimentResponse, + typing.Awaitable[language_service.AnalyzeSentimentResponse], + ], + ]: + raise NotImplementedError() + + @property + def analyze_entities( + self, + ) -> typing.Callable[ + [language_service.AnalyzeEntitiesRequest], + typing.Union[ + language_service.AnalyzeEntitiesResponse, + typing.Awaitable[language_service.AnalyzeEntitiesResponse], + ], + ]: + raise NotImplementedError() + + @property + def analyze_entity_sentiment( + self, + ) -> typing.Callable[ + [language_service.AnalyzeEntitySentimentRequest], + typing.Union[ + language_service.AnalyzeEntitySentimentResponse, + typing.Awaitable[language_service.AnalyzeEntitySentimentResponse], + ], + ]: + raise NotImplementedError() + + @property + def analyze_syntax( + self, + ) -> typing.Callable[ + [language_service.AnalyzeSyntaxRequest], + typing.Union[ + language_service.AnalyzeSyntaxResponse, + typing.Awaitable[language_service.AnalyzeSyntaxResponse], + ], + ]: + raise NotImplementedError() + + @property + def classify_text( + self, + ) -> typing.Callable[ + [language_service.ClassifyTextRequest], + typing.Union[ + language_service.ClassifyTextResponse, + typing.Awaitable[language_service.ClassifyTextResponse], + ], + ]: + raise NotImplementedError() + + @property + def annotate_text( + self, + ) -> typing.Callable[ + [language_service.AnnotateTextRequest], + typing.Union[ + language_service.AnnotateTextResponse, + typing.Awaitable[language_service.AnnotateTextResponse], + ], + ]: + raise NotImplementedError() + + +__all__ = ("LanguageServiceTransport",) diff --git a/google/cloud/language_v1beta2/services/language_service/transports/grpc.py b/google/cloud/language_v1beta2/services/language_service/transports/grpc.py new file mode 100644 index 00000000..dd734bc0 --- /dev/null +++ b/google/cloud/language_v1beta2/services/language_service/transports/grpc.py @@ -0,0 +1,415 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.language_v1beta2.types import language_service + +from .base import LanguageServiceTransport, DEFAULT_CLIENT_INFO + + +class LanguageServiceGrpcTransport(LanguageServiceTransport): + """gRPC backend transport for LanguageService. + + Provides text analysis operations such as sentiment analysis + and entity recognition. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "language.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} # type: Dict[str, Callable] + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + @classmethod + def create_channel( + cls, + host: str = "language.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optionsl[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def analyze_sentiment( + self, + ) -> Callable[ + [language_service.AnalyzeSentimentRequest], + language_service.AnalyzeSentimentResponse, + ]: + r"""Return a callable for the analyze sentiment method over gRPC. + + Analyzes the sentiment of the provided text. + + Returns: + Callable[[~.AnalyzeSentimentRequest], + ~.AnalyzeSentimentResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "analyze_sentiment" not in self._stubs: + self._stubs["analyze_sentiment"] = self.grpc_channel.unary_unary( + "/google.cloud.language.v1beta2.LanguageService/AnalyzeSentiment", + request_serializer=language_service.AnalyzeSentimentRequest.serialize, + response_deserializer=language_service.AnalyzeSentimentResponse.deserialize, + ) + return self._stubs["analyze_sentiment"] + + @property + def analyze_entities( + self, + ) -> Callable[ + [language_service.AnalyzeEntitiesRequest], + language_service.AnalyzeEntitiesResponse, + ]: + r"""Return a callable for the analyze entities method over gRPC. + + Finds named entities (currently proper names and + common nouns) in the text along with entity types, + salience, mentions for each entity, and other + properties. + + Returns: + Callable[[~.AnalyzeEntitiesRequest], + ~.AnalyzeEntitiesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "analyze_entities" not in self._stubs: + self._stubs["analyze_entities"] = self.grpc_channel.unary_unary( + "/google.cloud.language.v1beta2.LanguageService/AnalyzeEntities", + request_serializer=language_service.AnalyzeEntitiesRequest.serialize, + response_deserializer=language_service.AnalyzeEntitiesResponse.deserialize, + ) + return self._stubs["analyze_entities"] + + @property + def analyze_entity_sentiment( + self, + ) -> Callable[ + [language_service.AnalyzeEntitySentimentRequest], + language_service.AnalyzeEntitySentimentResponse, + ]: + r"""Return a callable for the analyze entity sentiment method over gRPC. + + Finds entities, similar to + [AnalyzeEntities][google.cloud.language.v1beta2.LanguageService.AnalyzeEntities] + in the text and analyzes sentiment associated with each entity + and its mentions. + + Returns: + Callable[[~.AnalyzeEntitySentimentRequest], + ~.AnalyzeEntitySentimentResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "analyze_entity_sentiment" not in self._stubs: + self._stubs["analyze_entity_sentiment"] = self.grpc_channel.unary_unary( + "/google.cloud.language.v1beta2.LanguageService/AnalyzeEntitySentiment", + request_serializer=language_service.AnalyzeEntitySentimentRequest.serialize, + response_deserializer=language_service.AnalyzeEntitySentimentResponse.deserialize, + ) + return self._stubs["analyze_entity_sentiment"] + + @property + def analyze_syntax( + self, + ) -> Callable[ + [language_service.AnalyzeSyntaxRequest], language_service.AnalyzeSyntaxResponse + ]: + r"""Return a callable for the analyze syntax method over gRPC. + + Analyzes the syntax of the text and provides sentence + boundaries and tokenization along with part-of-speech + tags, dependency trees, and other properties. + + Returns: + Callable[[~.AnalyzeSyntaxRequest], + ~.AnalyzeSyntaxResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "analyze_syntax" not in self._stubs: + self._stubs["analyze_syntax"] = self.grpc_channel.unary_unary( + "/google.cloud.language.v1beta2.LanguageService/AnalyzeSyntax", + request_serializer=language_service.AnalyzeSyntaxRequest.serialize, + response_deserializer=language_service.AnalyzeSyntaxResponse.deserialize, + ) + return self._stubs["analyze_syntax"] + + @property + def classify_text( + self, + ) -> Callable[ + [language_service.ClassifyTextRequest], language_service.ClassifyTextResponse + ]: + r"""Return a callable for the classify text method over gRPC. + + Classifies a document into categories. + + Returns: + Callable[[~.ClassifyTextRequest], + ~.ClassifyTextResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "classify_text" not in self._stubs: + self._stubs["classify_text"] = self.grpc_channel.unary_unary( + "/google.cloud.language.v1beta2.LanguageService/ClassifyText", + request_serializer=language_service.ClassifyTextRequest.serialize, + response_deserializer=language_service.ClassifyTextResponse.deserialize, + ) + return self._stubs["classify_text"] + + @property + def annotate_text( + self, + ) -> Callable[ + [language_service.AnnotateTextRequest], language_service.AnnotateTextResponse + ]: + r"""Return a callable for the annotate text method over gRPC. + + A convenience method that provides all syntax, + sentiment, entity, and classification features in one + call. + + Returns: + Callable[[~.AnnotateTextRequest], + ~.AnnotateTextResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "annotate_text" not in self._stubs: + self._stubs["annotate_text"] = self.grpc_channel.unary_unary( + "/google.cloud.language.v1beta2.LanguageService/AnnotateText", + request_serializer=language_service.AnnotateTextRequest.serialize, + response_deserializer=language_service.AnnotateTextResponse.deserialize, + ) + return self._stubs["annotate_text"] + + +__all__ = ("LanguageServiceGrpcTransport",) diff --git a/google/cloud/language_v1beta2/services/language_service/transports/grpc_asyncio.py b/google/cloud/language_v1beta2/services/language_service/transports/grpc_asyncio.py new file mode 100644 index 00000000..7898ec3f --- /dev/null +++ b/google/cloud/language_v1beta2/services/language_service/transports/grpc_asyncio.py @@ -0,0 +1,418 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.language_v1beta2.types import language_service + +from .base import LanguageServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import LanguageServiceGrpcTransport + + +class LanguageServiceGrpcAsyncIOTransport(LanguageServiceTransport): + """gRPC AsyncIO backend transport for LanguageService. + + Provides text analysis operations such as sentiment analysis + and entity recognition. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "language.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "language.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def analyze_sentiment( + self, + ) -> Callable[ + [language_service.AnalyzeSentimentRequest], + Awaitable[language_service.AnalyzeSentimentResponse], + ]: + r"""Return a callable for the analyze sentiment method over gRPC. + + Analyzes the sentiment of the provided text. + + Returns: + Callable[[~.AnalyzeSentimentRequest], + Awaitable[~.AnalyzeSentimentResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "analyze_sentiment" not in self._stubs: + self._stubs["analyze_sentiment"] = self.grpc_channel.unary_unary( + "/google.cloud.language.v1beta2.LanguageService/AnalyzeSentiment", + request_serializer=language_service.AnalyzeSentimentRequest.serialize, + response_deserializer=language_service.AnalyzeSentimentResponse.deserialize, + ) + return self._stubs["analyze_sentiment"] + + @property + def analyze_entities( + self, + ) -> Callable[ + [language_service.AnalyzeEntitiesRequest], + Awaitable[language_service.AnalyzeEntitiesResponse], + ]: + r"""Return a callable for the analyze entities method over gRPC. + + Finds named entities (currently proper names and + common nouns) in the text along with entity types, + salience, mentions for each entity, and other + properties. + + Returns: + Callable[[~.AnalyzeEntitiesRequest], + Awaitable[~.AnalyzeEntitiesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "analyze_entities" not in self._stubs: + self._stubs["analyze_entities"] = self.grpc_channel.unary_unary( + "/google.cloud.language.v1beta2.LanguageService/AnalyzeEntities", + request_serializer=language_service.AnalyzeEntitiesRequest.serialize, + response_deserializer=language_service.AnalyzeEntitiesResponse.deserialize, + ) + return self._stubs["analyze_entities"] + + @property + def analyze_entity_sentiment( + self, + ) -> Callable[ + [language_service.AnalyzeEntitySentimentRequest], + Awaitable[language_service.AnalyzeEntitySentimentResponse], + ]: + r"""Return a callable for the analyze entity sentiment method over gRPC. + + Finds entities, similar to + [AnalyzeEntities][google.cloud.language.v1beta2.LanguageService.AnalyzeEntities] + in the text and analyzes sentiment associated with each entity + and its mentions. + + Returns: + Callable[[~.AnalyzeEntitySentimentRequest], + Awaitable[~.AnalyzeEntitySentimentResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "analyze_entity_sentiment" not in self._stubs: + self._stubs["analyze_entity_sentiment"] = self.grpc_channel.unary_unary( + "/google.cloud.language.v1beta2.LanguageService/AnalyzeEntitySentiment", + request_serializer=language_service.AnalyzeEntitySentimentRequest.serialize, + response_deserializer=language_service.AnalyzeEntitySentimentResponse.deserialize, + ) + return self._stubs["analyze_entity_sentiment"] + + @property + def analyze_syntax( + self, + ) -> Callable[ + [language_service.AnalyzeSyntaxRequest], + Awaitable[language_service.AnalyzeSyntaxResponse], + ]: + r"""Return a callable for the analyze syntax method over gRPC. + + Analyzes the syntax of the text and provides sentence + boundaries and tokenization along with part-of-speech + tags, dependency trees, and other properties. + + Returns: + Callable[[~.AnalyzeSyntaxRequest], + Awaitable[~.AnalyzeSyntaxResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "analyze_syntax" not in self._stubs: + self._stubs["analyze_syntax"] = self.grpc_channel.unary_unary( + "/google.cloud.language.v1beta2.LanguageService/AnalyzeSyntax", + request_serializer=language_service.AnalyzeSyntaxRequest.serialize, + response_deserializer=language_service.AnalyzeSyntaxResponse.deserialize, + ) + return self._stubs["analyze_syntax"] + + @property + def classify_text( + self, + ) -> Callable[ + [language_service.ClassifyTextRequest], + Awaitable[language_service.ClassifyTextResponse], + ]: + r"""Return a callable for the classify text method over gRPC. + + Classifies a document into categories. + + Returns: + Callable[[~.ClassifyTextRequest], + Awaitable[~.ClassifyTextResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "classify_text" not in self._stubs: + self._stubs["classify_text"] = self.grpc_channel.unary_unary( + "/google.cloud.language.v1beta2.LanguageService/ClassifyText", + request_serializer=language_service.ClassifyTextRequest.serialize, + response_deserializer=language_service.ClassifyTextResponse.deserialize, + ) + return self._stubs["classify_text"] + + @property + def annotate_text( + self, + ) -> Callable[ + [language_service.AnnotateTextRequest], + Awaitable[language_service.AnnotateTextResponse], + ]: + r"""Return a callable for the annotate text method over gRPC. + + A convenience method that provides all syntax, + sentiment, entity, and classification features in one + call. + + Returns: + Callable[[~.AnnotateTextRequest], + Awaitable[~.AnnotateTextResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "annotate_text" not in self._stubs: + self._stubs["annotate_text"] = self.grpc_channel.unary_unary( + "/google.cloud.language.v1beta2.LanguageService/AnnotateText", + request_serializer=language_service.AnnotateTextRequest.serialize, + response_deserializer=language_service.AnnotateTextResponse.deserialize, + ) + return self._stubs["annotate_text"] + + +__all__ = ("LanguageServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/language_v1beta2/types/__init__.py b/google/cloud/language_v1beta2/types/__init__.py new file mode 100644 index 00000000..f44df83e --- /dev/null +++ b/google/cloud/language_v1beta2/types/__init__.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .language_service import ( + Document, + Sentence, + Entity, + Token, + Sentiment, + PartOfSpeech, + DependencyEdge, + EntityMention, + TextSpan, + ClassificationCategory, + AnalyzeSentimentRequest, + AnalyzeSentimentResponse, + AnalyzeEntitySentimentRequest, + AnalyzeEntitySentimentResponse, + AnalyzeEntitiesRequest, + AnalyzeEntitiesResponse, + AnalyzeSyntaxRequest, + AnalyzeSyntaxResponse, + ClassifyTextRequest, + ClassifyTextResponse, + AnnotateTextRequest, + AnnotateTextResponse, +) + + +__all__ = ( + "Document", + "Sentence", + "Entity", + "Token", + "Sentiment", + "PartOfSpeech", + "DependencyEdge", + "EntityMention", + "TextSpan", + "ClassificationCategory", + "AnalyzeSentimentRequest", + "AnalyzeSentimentResponse", + "AnalyzeEntitySentimentRequest", + "AnalyzeEntitySentimentResponse", + "AnalyzeEntitiesRequest", + "AnalyzeEntitiesResponse", + "AnalyzeSyntaxRequest", + "AnalyzeSyntaxResponse", + "ClassifyTextRequest", + "ClassifyTextResponse", + "AnnotateTextRequest", + "AnnotateTextResponse", +) diff --git a/google/cloud/language_v1beta2/types/language_service.py b/google/cloud/language_v1beta2/types/language_service.py new file mode 100644 index 00000000..411dd8ee --- /dev/null +++ b/google/cloud/language_v1beta2/types/language_service.py @@ -0,0 +1,880 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.language.v1beta2", + manifest={ + "EncodingType", + "Document", + "Sentence", + "Entity", + "Token", + "Sentiment", + "PartOfSpeech", + "DependencyEdge", + "EntityMention", + "TextSpan", + "ClassificationCategory", + "AnalyzeSentimentRequest", + "AnalyzeSentimentResponse", + "AnalyzeEntitySentimentRequest", + "AnalyzeEntitySentimentResponse", + "AnalyzeEntitiesRequest", + "AnalyzeEntitiesResponse", + "AnalyzeSyntaxRequest", + "AnalyzeSyntaxResponse", + "ClassifyTextRequest", + "ClassifyTextResponse", + "AnnotateTextRequest", + "AnnotateTextResponse", + }, +) + + +class EncodingType(proto.Enum): + r"""Represents the text encoding that the caller uses to process the + output. Providing an ``EncodingType`` is recommended because the API + provides the beginning offsets for various outputs, such as tokens + and mentions, and languages that natively use different text + encodings may access offsets differently. + """ + NONE = 0 + UTF8 = 1 + UTF16 = 2 + UTF32 = 3 + + +class Document(proto.Message): + r"""Represents the input to API methods. + + Attributes: + type_ (~.language_service.Document.Type): + Required. If the type is not set or is ``TYPE_UNSPECIFIED``, + returns an ``INVALID_ARGUMENT`` error. + content (str): + The content of the input in string format. + Cloud audit logging exempt since it is based on + user data. + gcs_content_uri (str): + The Google Cloud Storage URI where the file content is + located. This URI must be of the form: + gs://bucket_name/object_name. For more details, see + https://cloud.google.com/storage/docs/reference-uris. NOTE: + Cloud Storage object versioning is not supported. + language (str): + The language of the document (if not specified, the language + is automatically detected). Both ISO and BCP-47 language + codes are accepted. `Language + Support `__ + lists currently supported languages for each API method. If + the language (either specified by the caller or + automatically detected) is not supported by the called API + method, an ``INVALID_ARGUMENT`` error is returned. + """ + + class Type(proto.Enum): + r"""The document types enum.""" + TYPE_UNSPECIFIED = 0 + PLAIN_TEXT = 1 + HTML = 2 + + type_ = proto.Field(proto.ENUM, number=1, enum=Type,) + + content = proto.Field(proto.STRING, number=2, oneof="source") + + gcs_content_uri = proto.Field(proto.STRING, number=3, oneof="source") + + language = proto.Field(proto.STRING, number=4) + + +class Sentence(proto.Message): + r"""Represents a sentence in the input document. + + Attributes: + text (~.language_service.TextSpan): + The sentence text. + sentiment (~.language_service.Sentiment): + For calls to [AnalyzeSentiment][] or if + [AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_document_sentiment] + is set to true, this field will contain the sentiment for + the sentence. + """ + + text = proto.Field(proto.MESSAGE, number=1, message="TextSpan",) + + sentiment = proto.Field(proto.MESSAGE, number=2, message="Sentiment",) + + +class Entity(proto.Message): + r"""Represents a phrase in the text that is a known entity, such + as a person, an organization, or location. The API associates + information, such as salience and mentions, with entities. + + Attributes: + name (str): + The representative name for the entity. + type_ (~.language_service.Entity.Type): + The entity type. + metadata (Sequence[~.language_service.Entity.MetadataEntry]): + Metadata associated with the entity. + + For most entity types, the metadata is a Wikipedia URL + (``wikipedia_url``) and Knowledge Graph MID (``mid``), if + they are available. For the metadata associated with other + entity types, see the Type table below. + salience (float): + The salience score associated with the entity in the [0, + 1.0] range. + + The salience score for an entity provides information about + the importance or centrality of that entity to the entire + document text. Scores closer to 0 are less salient, while + scores closer to 1.0 are highly salient. + mentions (Sequence[~.language_service.EntityMention]): + The mentions of this entity in the input + document. The API currently supports proper noun + mentions. + sentiment (~.language_service.Sentiment): + For calls to [AnalyzeEntitySentiment][] or if + [AnnotateTextRequest.Features.extract_entity_sentiment][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_entity_sentiment] + is set to true, this field will contain the aggregate + sentiment expressed for this entity in the provided + document. + """ + + class Type(proto.Enum): + r"""The type of the entity. For most entity types, the associated + metadata is a Wikipedia URL (``wikipedia_url``) and Knowledge Graph + MID (``mid``). The table below lists the associated fields for + entities that have different metadata. + """ + UNKNOWN = 0 + PERSON = 1 + LOCATION = 2 + ORGANIZATION = 3 + EVENT = 4 + WORK_OF_ART = 5 + CONSUMER_GOOD = 6 + OTHER = 7 + PHONE_NUMBER = 9 + ADDRESS = 10 + DATE = 11 + NUMBER = 12 + PRICE = 13 + + name = proto.Field(proto.STRING, number=1) + + type_ = proto.Field(proto.ENUM, number=2, enum=Type,) + + metadata = proto.MapField(proto.STRING, proto.STRING, number=3) + + salience = proto.Field(proto.FLOAT, number=4) + + mentions = proto.RepeatedField(proto.MESSAGE, number=5, message="EntityMention",) + + sentiment = proto.Field(proto.MESSAGE, number=6, message="Sentiment",) + + +class Token(proto.Message): + r"""Represents the smallest syntactic building block of the text. + + Attributes: + text (~.language_service.TextSpan): + The token text. + part_of_speech (~.language_service.PartOfSpeech): + Parts of speech tag for this token. + dependency_edge (~.language_service.DependencyEdge): + Dependency tree parse for this token. + lemma (str): + `Lemma `__ + of the token. + """ + + text = proto.Field(proto.MESSAGE, number=1, message="TextSpan",) + + part_of_speech = proto.Field(proto.MESSAGE, number=2, message="PartOfSpeech",) + + dependency_edge = proto.Field(proto.MESSAGE, number=3, message="DependencyEdge",) + + lemma = proto.Field(proto.STRING, number=4) + + +class Sentiment(proto.Message): + r"""Represents the feeling associated with the entire text or + entities in the text. + Next ID: 6 + + Attributes: + magnitude (float): + A non-negative number in the [0, +inf) range, which + represents the absolute magnitude of sentiment regardless of + score (positive or negative). + score (float): + Sentiment score between -1.0 (negative + sentiment) and 1.0 (positive sentiment). + """ + + magnitude = proto.Field(proto.FLOAT, number=2) + + score = proto.Field(proto.FLOAT, number=3) + + +class PartOfSpeech(proto.Message): + r"""Represents part of speech information for a token. + + Attributes: + tag (~.language_service.PartOfSpeech.Tag): + The part of speech tag. + aspect (~.language_service.PartOfSpeech.Aspect): + The grammatical aspect. + case (~.language_service.PartOfSpeech.Case): + The grammatical case. + form (~.language_service.PartOfSpeech.Form): + The grammatical form. + gender (~.language_service.PartOfSpeech.Gender): + The grammatical gender. + mood (~.language_service.PartOfSpeech.Mood): + The grammatical mood. + number (~.language_service.PartOfSpeech.Number): + The grammatical number. + person (~.language_service.PartOfSpeech.Person): + The grammatical person. + proper (~.language_service.PartOfSpeech.Proper): + The grammatical properness. + reciprocity (~.language_service.PartOfSpeech.Reciprocity): + The grammatical reciprocity. + tense (~.language_service.PartOfSpeech.Tense): + The grammatical tense. + voice (~.language_service.PartOfSpeech.Voice): + The grammatical voice. + """ + + class Tag(proto.Enum): + r"""The part of speech tags enum.""" + UNKNOWN = 0 + ADJ = 1 + ADP = 2 + ADV = 3 + CONJ = 4 + DET = 5 + NOUN = 6 + NUM = 7 + PRON = 8 + PRT = 9 + PUNCT = 10 + VERB = 11 + X = 12 + AFFIX = 13 + + class Aspect(proto.Enum): + r"""The characteristic of a verb that expresses time flow during + an event. + """ + ASPECT_UNKNOWN = 0 + PERFECTIVE = 1 + IMPERFECTIVE = 2 + PROGRESSIVE = 3 + + class Case(proto.Enum): + r"""The grammatical function performed by a noun or pronoun in a + phrase, clause, or sentence. In some languages, other parts of + speech, such as adjective and determiner, take case inflection + in agreement with the noun. + """ + CASE_UNKNOWN = 0 + ACCUSATIVE = 1 + ADVERBIAL = 2 + COMPLEMENTIVE = 3 + DATIVE = 4 + GENITIVE = 5 + INSTRUMENTAL = 6 + LOCATIVE = 7 + NOMINATIVE = 8 + OBLIQUE = 9 + PARTITIVE = 10 + PREPOSITIONAL = 11 + REFLEXIVE_CASE = 12 + RELATIVE_CASE = 13 + VOCATIVE = 14 + + class Form(proto.Enum): + r"""Depending on the language, Form can be categorizing different + forms of verbs, adjectives, adverbs, etc. For example, + categorizing inflected endings of verbs and adjectives or + distinguishing between short and long forms of adjectives and + participles + """ + FORM_UNKNOWN = 0 + ADNOMIAL = 1 + AUXILIARY = 2 + COMPLEMENTIZER = 3 + FINAL_ENDING = 4 + GERUND = 5 + REALIS = 6 + IRREALIS = 7 + SHORT = 8 + LONG = 9 + ORDER = 10 + SPECIFIC = 11 + + class Gender(proto.Enum): + r"""Gender classes of nouns reflected in the behaviour of + associated words. + """ + GENDER_UNKNOWN = 0 + FEMININE = 1 + MASCULINE = 2 + NEUTER = 3 + + class Mood(proto.Enum): + r"""The grammatical feature of verbs, used for showing modality + and attitude. + """ + MOOD_UNKNOWN = 0 + CONDITIONAL_MOOD = 1 + IMPERATIVE = 2 + INDICATIVE = 3 + INTERROGATIVE = 4 + JUSSIVE = 5 + SUBJUNCTIVE = 6 + + class Number(proto.Enum): + r"""Count distinctions.""" + NUMBER_UNKNOWN = 0 + SINGULAR = 1 + PLURAL = 2 + DUAL = 3 + + class Person(proto.Enum): + r"""The distinction between the speaker, second person, third + person, etc. + """ + PERSON_UNKNOWN = 0 + FIRST = 1 + SECOND = 2 + THIRD = 3 + REFLEXIVE_PERSON = 4 + + class Proper(proto.Enum): + r"""This category shows if the token is part of a proper name.""" + PROPER_UNKNOWN = 0 + PROPER = 1 + NOT_PROPER = 2 + + class Reciprocity(proto.Enum): + r"""Reciprocal features of a pronoun.""" + RECIPROCITY_UNKNOWN = 0 + RECIPROCAL = 1 + NON_RECIPROCAL = 2 + + class Tense(proto.Enum): + r"""Time reference.""" + TENSE_UNKNOWN = 0 + CONDITIONAL_TENSE = 1 + FUTURE = 2 + PAST = 3 + PRESENT = 4 + IMPERFECT = 5 + PLUPERFECT = 6 + + class Voice(proto.Enum): + r"""The relationship between the action that a verb expresses and + the participants identified by its arguments. + """ + VOICE_UNKNOWN = 0 + ACTIVE = 1 + CAUSATIVE = 2 + PASSIVE = 3 + + tag = proto.Field(proto.ENUM, number=1, enum=Tag,) + + aspect = proto.Field(proto.ENUM, number=2, enum=Aspect,) + + case = proto.Field(proto.ENUM, number=3, enum=Case,) + + form = proto.Field(proto.ENUM, number=4, enum=Form,) + + gender = proto.Field(proto.ENUM, number=5, enum=Gender,) + + mood = proto.Field(proto.ENUM, number=6, enum=Mood,) + + number = proto.Field(proto.ENUM, number=7, enum=Number,) + + person = proto.Field(proto.ENUM, number=8, enum=Person,) + + proper = proto.Field(proto.ENUM, number=9, enum=Proper,) + + reciprocity = proto.Field(proto.ENUM, number=10, enum=Reciprocity,) + + tense = proto.Field(proto.ENUM, number=11, enum=Tense,) + + voice = proto.Field(proto.ENUM, number=12, enum=Voice,) + + +class DependencyEdge(proto.Message): + r"""Represents dependency parse tree information for a token. + + Attributes: + head_token_index (int): + Represents the head of this token in the dependency tree. + This is the index of the token which has an arc going to + this token. The index is the position of the token in the + array of tokens returned by the API method. If this token is + a root token, then the ``head_token_index`` is its own + index. + label (~.language_service.DependencyEdge.Label): + The parse label for the token. + """ + + class Label(proto.Enum): + r"""The parse label enum for the token.""" + UNKNOWN = 0 + ABBREV = 1 + ACOMP = 2 + ADVCL = 3 + ADVMOD = 4 + AMOD = 5 + APPOS = 6 + ATTR = 7 + AUX = 8 + AUXPASS = 9 + CC = 10 + CCOMP = 11 + CONJ = 12 + CSUBJ = 13 + CSUBJPASS = 14 + DEP = 15 + DET = 16 + DISCOURSE = 17 + DOBJ = 18 + EXPL = 19 + GOESWITH = 20 + IOBJ = 21 + MARK = 22 + MWE = 23 + MWV = 24 + NEG = 25 + NN = 26 + NPADVMOD = 27 + NSUBJ = 28 + NSUBJPASS = 29 + NUM = 30 + NUMBER = 31 + P = 32 + PARATAXIS = 33 + PARTMOD = 34 + PCOMP = 35 + POBJ = 36 + POSS = 37 + POSTNEG = 38 + PRECOMP = 39 + PRECONJ = 40 + PREDET = 41 + PREF = 42 + PREP = 43 + PRONL = 44 + PRT = 45 + PS = 46 + QUANTMOD = 47 + RCMOD = 48 + RCMODREL = 49 + RDROP = 50 + REF = 51 + REMNANT = 52 + REPARANDUM = 53 + ROOT = 54 + SNUM = 55 + SUFF = 56 + TMOD = 57 + TOPIC = 58 + VMOD = 59 + VOCATIVE = 60 + XCOMP = 61 + SUFFIX = 62 + TITLE = 63 + ADVPHMOD = 64 + AUXCAUS = 65 + AUXVV = 66 + DTMOD = 67 + FOREIGN = 68 + KW = 69 + LIST = 70 + NOMC = 71 + NOMCSUBJ = 72 + NOMCSUBJPASS = 73 + NUMC = 74 + COP = 75 + DISLOCATED = 76 + ASP = 77 + GMOD = 78 + GOBJ = 79 + INFMOD = 80 + MES = 81 + NCOMP = 82 + + head_token_index = proto.Field(proto.INT32, number=1) + + label = proto.Field(proto.ENUM, number=2, enum=Label,) + + +class EntityMention(proto.Message): + r"""Represents a mention for an entity in the text. Currently, + proper noun mentions are supported. + + Attributes: + text (~.language_service.TextSpan): + The mention text. + type_ (~.language_service.EntityMention.Type): + The type of the entity mention. + sentiment (~.language_service.Sentiment): + For calls to [AnalyzeEntitySentiment][] or if + [AnnotateTextRequest.Features.extract_entity_sentiment][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_entity_sentiment] + is set to true, this field will contain the sentiment + expressed for this mention of the entity in the provided + document. + """ + + class Type(proto.Enum): + r"""The supported types of mentions.""" + TYPE_UNKNOWN = 0 + PROPER = 1 + COMMON = 2 + + text = proto.Field(proto.MESSAGE, number=1, message="TextSpan",) + + type_ = proto.Field(proto.ENUM, number=2, enum=Type,) + + sentiment = proto.Field(proto.MESSAGE, number=3, message=Sentiment,) + + +class TextSpan(proto.Message): + r"""Represents an output piece of text. + + Attributes: + content (str): + The content of the output text. + begin_offset (int): + The API calculates the beginning offset of the content in + the original document according to the + [EncodingType][google.cloud.language.v1beta2.EncodingType] + specified in the API request. + """ + + content = proto.Field(proto.STRING, number=1) + + begin_offset = proto.Field(proto.INT32, number=2) + + +class ClassificationCategory(proto.Message): + r"""Represents a category returned from the text classifier. + + Attributes: + name (str): + The name of the category representing the document, from the + `predefined + taxonomy `__. + confidence (float): + The classifier's confidence of the category. + Number represents how certain the classifier is + that this category represents the given text. + """ + + name = proto.Field(proto.STRING, number=1) + + confidence = proto.Field(proto.FLOAT, number=2) + + +class AnalyzeSentimentRequest(proto.Message): + r"""The sentiment analysis request message. + + Attributes: + document (~.language_service.Document): + Required. Input document. + encoding_type (~.language_service.EncodingType): + The encoding type used by the API to + calculate sentence offsets for the sentence + sentiment. + """ + + document = proto.Field(proto.MESSAGE, number=1, message=Document,) + + encoding_type = proto.Field(proto.ENUM, number=2, enum="EncodingType",) + + +class AnalyzeSentimentResponse(proto.Message): + r"""The sentiment analysis response message. + + Attributes: + document_sentiment (~.language_service.Sentiment): + The overall sentiment of the input document. + language (str): + The language of the text, which will be the same as the + language specified in the request or, if not specified, the + automatically-detected language. See + [Document.language][google.cloud.language.v1beta2.Document.language] + field for more details. + sentences (Sequence[~.language_service.Sentence]): + The sentiment for all the sentences in the + document. + """ + + document_sentiment = proto.Field(proto.MESSAGE, number=1, message=Sentiment,) + + language = proto.Field(proto.STRING, number=2) + + sentences = proto.RepeatedField(proto.MESSAGE, number=3, message=Sentence,) + + +class AnalyzeEntitySentimentRequest(proto.Message): + r"""The entity-level sentiment analysis request message. + + Attributes: + document (~.language_service.Document): + Required. Input document. + encoding_type (~.language_service.EncodingType): + The encoding type used by the API to + calculate offsets. + """ + + document = proto.Field(proto.MESSAGE, number=1, message=Document,) + + encoding_type = proto.Field(proto.ENUM, number=2, enum="EncodingType",) + + +class AnalyzeEntitySentimentResponse(proto.Message): + r"""The entity-level sentiment analysis response message. + + Attributes: + entities (Sequence[~.language_service.Entity]): + The recognized entities in the input document + with associated sentiments. + language (str): + The language of the text, which will be the same as the + language specified in the request or, if not specified, the + automatically-detected language. See + [Document.language][google.cloud.language.v1beta2.Document.language] + field for more details. + """ + + entities = proto.RepeatedField(proto.MESSAGE, number=1, message=Entity,) + + language = proto.Field(proto.STRING, number=2) + + +class AnalyzeEntitiesRequest(proto.Message): + r"""The entity analysis request message. + + Attributes: + document (~.language_service.Document): + Required. Input document. + encoding_type (~.language_service.EncodingType): + The encoding type used by the API to + calculate offsets. + """ + + document = proto.Field(proto.MESSAGE, number=1, message=Document,) + + encoding_type = proto.Field(proto.ENUM, number=2, enum="EncodingType",) + + +class AnalyzeEntitiesResponse(proto.Message): + r"""The entity analysis response message. + + Attributes: + entities (Sequence[~.language_service.Entity]): + The recognized entities in the input + document. + language (str): + The language of the text, which will be the same as the + language specified in the request or, if not specified, the + automatically-detected language. See + [Document.language][google.cloud.language.v1beta2.Document.language] + field for more details. + """ + + entities = proto.RepeatedField(proto.MESSAGE, number=1, message=Entity,) + + language = proto.Field(proto.STRING, number=2) + + +class AnalyzeSyntaxRequest(proto.Message): + r"""The syntax analysis request message. + + Attributes: + document (~.language_service.Document): + Required. Input document. + encoding_type (~.language_service.EncodingType): + The encoding type used by the API to + calculate offsets. + """ + + document = proto.Field(proto.MESSAGE, number=1, message=Document,) + + encoding_type = proto.Field(proto.ENUM, number=2, enum="EncodingType",) + + +class AnalyzeSyntaxResponse(proto.Message): + r"""The syntax analysis response message. + + Attributes: + sentences (Sequence[~.language_service.Sentence]): + Sentences in the input document. + tokens (Sequence[~.language_service.Token]): + Tokens, along with their syntactic + information, in the input document. + language (str): + The language of the text, which will be the same as the + language specified in the request or, if not specified, the + automatically-detected language. See + [Document.language][google.cloud.language.v1beta2.Document.language] + field for more details. + """ + + sentences = proto.RepeatedField(proto.MESSAGE, number=1, message=Sentence,) + + tokens = proto.RepeatedField(proto.MESSAGE, number=2, message=Token,) + + language = proto.Field(proto.STRING, number=3) + + +class ClassifyTextRequest(proto.Message): + r"""The document classification request message. + + Attributes: + document (~.language_service.Document): + Required. Input document. + """ + + document = proto.Field(proto.MESSAGE, number=1, message=Document,) + + +class ClassifyTextResponse(proto.Message): + r"""The document classification response message. + + Attributes: + categories (Sequence[~.language_service.ClassificationCategory]): + Categories representing the input document. + """ + + categories = proto.RepeatedField( + proto.MESSAGE, number=1, message=ClassificationCategory, + ) + + +class AnnotateTextRequest(proto.Message): + r"""The request message for the text annotation API, which can + perform multiple analysis types (sentiment, entities, and + syntax) in one call. + + Attributes: + document (~.language_service.Document): + Required. Input document. + features (~.language_service.AnnotateTextRequest.Features): + Required. The enabled features. + encoding_type (~.language_service.EncodingType): + The encoding type used by the API to + calculate offsets. + """ + + class Features(proto.Message): + r"""All available features for sentiment, syntax, and semantic + analysis. Setting each one to true will enable that specific + analysis for the input. Next ID: 10 + + Attributes: + extract_syntax (bool): + Extract syntax information. + extract_entities (bool): + Extract entities. + extract_document_sentiment (bool): + Extract document-level sentiment. + extract_entity_sentiment (bool): + Extract entities and their associated + sentiment. + classify_text (bool): + Classify the full document into categories. If this is true, + the API will use the default model which classifies into a + `predefined + taxonomy `__. + """ + + extract_syntax = proto.Field(proto.BOOL, number=1) + + extract_entities = proto.Field(proto.BOOL, number=2) + + extract_document_sentiment = proto.Field(proto.BOOL, number=3) + + extract_entity_sentiment = proto.Field(proto.BOOL, number=4) + + classify_text = proto.Field(proto.BOOL, number=6) + + document = proto.Field(proto.MESSAGE, number=1, message=Document,) + + features = proto.Field(proto.MESSAGE, number=2, message=Features,) + + encoding_type = proto.Field(proto.ENUM, number=3, enum="EncodingType",) + + +class AnnotateTextResponse(proto.Message): + r"""The text annotations response message. + + Attributes: + sentences (Sequence[~.language_service.Sentence]): + Sentences in the input document. Populated if the user + enables + [AnnotateTextRequest.Features.extract_syntax][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_syntax]. + tokens (Sequence[~.language_service.Token]): + Tokens, along with their syntactic information, in the input + document. Populated if the user enables + [AnnotateTextRequest.Features.extract_syntax][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_syntax]. + entities (Sequence[~.language_service.Entity]): + Entities, along with their semantic information, in the + input document. Populated if the user enables + [AnnotateTextRequest.Features.extract_entities][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_entities]. + document_sentiment (~.language_service.Sentiment): + The overall sentiment for the document. Populated if the + user enables + [AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_document_sentiment]. + language (str): + The language of the text, which will be the same as the + language specified in the request or, if not specified, the + automatically-detected language. See + [Document.language][google.cloud.language.v1beta2.Document.language] + field for more details. + categories (Sequence[~.language_service.ClassificationCategory]): + Categories identified in the input document. + """ + + sentences = proto.RepeatedField(proto.MESSAGE, number=1, message=Sentence,) + + tokens = proto.RepeatedField(proto.MESSAGE, number=2, message=Token,) + + entities = proto.RepeatedField(proto.MESSAGE, number=3, message=Entity,) + + document_sentiment = proto.Field(proto.MESSAGE, number=4, message=Sentiment,) + + language = proto.Field(proto.STRING, number=5) + + categories = proto.RepeatedField( + proto.MESSAGE, number=6, message=ClassificationCategory, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 00000000..4505b485 --- /dev/null +++ b/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.6 +namespace_packages = True diff --git a/noxfile.py b/noxfile.py index 6b4fa3b0..e1a2051c 100644 --- a/noxfile.py +++ b/noxfile.py @@ -23,14 +23,15 @@ import nox -BLACK_VERSION = "black==19.3b0" +BLACK_VERSION = "black==19.10b0" BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] -if os.path.exists("samples"): - BLACK_PATHS.append("samples") +DEFAULT_PYTHON_VERSION = "3.8" +SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] +UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8"] -@nox.session(python="3.7") +@nox.session(python=DEFAULT_PYTHON_VERSION) def lint(session): """Run linters. @@ -38,7 +39,9 @@ def lint(session): serious code quality issues. """ session.install("flake8", BLACK_VERSION) - session.run("black", "--check", *BLACK_PATHS) + session.run( + "black", "--check", *BLACK_PATHS, + ) session.run("flake8", "google", "tests") @@ -53,10 +56,12 @@ def blacken(session): check the state of the `gcp_ubuntu_config` we use for that Kokoro run. """ session.install(BLACK_VERSION) - session.run("black", *BLACK_PATHS) + session.run( + "black", *BLACK_PATHS, + ) -@nox.session(python="3.7") +@nox.session(python=DEFAULT_PYTHON_VERSION) def lint_setup_py(session): """Verify that setup.py is valid (including RST check).""" session.install("docutils", "pygments") @@ -65,6 +70,8 @@ def lint_setup_py(session): def default(session): # Install all test dependencies, then install this package in-place. + session.install("asyncmock", "pytest-asyncio") + session.install("mock", "pytest", "pytest-cov") session.install("-e", ".") @@ -72,6 +79,7 @@ def default(session): session.run( "py.test", "--quiet", + "--cov=google.cloud.language", "--cov=google.cloud", "--cov=tests.unit", "--cov-append", @@ -83,17 +91,21 @@ def default(session): ) -@nox.session(python=["2.7", "3.5", "3.6", "3.7", "3.8"]) +@nox.session(python=UNIT_TEST_PYTHON_VERSIONS) def unit(session): """Run the unit test suite.""" default(session) -@nox.session(python=["2.7", "3.7"]) +@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) def system(session): """Run the system test suite.""" system_test_path = os.path.join("tests", "system.py") system_test_folder_path = os.path.join("tests", "system") + + # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true. + if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false": + session.skip("RUN_SYSTEM_TESTS is set to false, skipping") # Sanity check: Only run tests if the environment variable is set. if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""): session.skip("Credentials must be set via environment variable") @@ -109,7 +121,9 @@ def system(session): # Install all test dependencies, then install this package into the # virtualenv's dist-packages. - session.install("mock", "pytest") + session.install( + "mock", "pytest", "google-cloud-testutils", + ) session.install("-e", ".") # Run py.test against the system tests. @@ -119,25 +133,7 @@ def system(session): session.run("py.test", "--quiet", system_test_folder_path, *session.posargs) -@nox.session(python=["3.7"]) -def samples(session): - """Run the samples test suite.""" - # Sanity check: Only run tests if the environment variable is set. - if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""): - session.skip("Credentials must be set via environment variable") - - samples_path = "samples" - if not os.path.exists(samples_path): - session.skip("Samples not found.") - - session.install("pyyaml") - session.install("sample-tester") - session.install("-e", ".") - - session.run("sample-tester", samples_path, *session.posargs) - - -@nox.session(python="3.7") +@nox.session(python=DEFAULT_PYTHON_VERSION) def cover(session): """Run the final coverage report. @@ -145,12 +141,12 @@ def cover(session): test runs (not system test runs), and then erases coverage data. """ session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=87") + session.run("coverage", "report", "--show-missing", "--fail-under=99") session.run("coverage", "erase") -@nox.session(python="3.7") +@nox.session(python=DEFAULT_PYTHON_VERSION) def docs(session): """Build the docs for this library.""" @@ -170,3 +166,38 @@ def docs(session): os.path.join("docs", ""), os.path.join("docs", "_build", "html", ""), ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def docfx(session): + """Build the docfx yaml files for this library.""" + + session.install("-e", ".") + # sphinx-docfx-yaml supports up to sphinx version 1.5.5. + # https://github.com/docascode/sphinx-docfx-yaml/issues/97 + session.install("sphinx==1.5.5", "alabaster", "recommonmark", "sphinx-docfx-yaml") + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-T", # show full traceback on exception + "-N", # no colors + "-D", + ( + "extensions=sphinx.ext.autodoc," + "sphinx.ext.autosummary," + "docfx_yaml.extension," + "sphinx.ext.intersphinx," + "sphinx.ext.coverage," + "sphinx.ext.napoleon," + "sphinx.ext.todo," + "sphinx.ext.viewcode," + "recommonmark" + ), + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) diff --git a/samples/snippets/api/README.rst b/samples/snippets/api/README.rst index 5f4edfd2..0d9d9451 100644 --- a/samples/snippets/api/README.rst +++ b/samples/snippets/api/README.rst @@ -14,6 +14,10 @@ This directory contains samples for Google Cloud Natural Language API. The `Goog .. _Google Cloud Natural Language API: https://cloud.google.com/natural-language/docs/ + + + + Setup ------------------------------------------------------------------------------- diff --git a/samples/snippets/api/analyze_test.py b/samples/snippets/api/analyze_test.py index b4a0db67..c797e2e3 100644 --- a/samples/snippets/api/analyze_test.py +++ b/samples/snippets/api/analyze_test.py @@ -37,8 +37,7 @@ def test_analyze_sentiment(capsys): assert sentiment["magnitude"] < 1 result = analyze.analyze_sentiment( - "cheerio, mate - I greatly admire the pallor of your visage, and your " - "angle of repose leaves little room for improvement." + "cheerio, mate - I greatly admire the pallor of your visage, and your angle of repose leaves little room for improvement." ) sentiment = result["documentSentiment"] diff --git a/samples/snippets/api/noxfile.py b/samples/snippets/api/noxfile.py index 5660f08b..ba55d7ce 100644 --- a/samples/snippets/api/noxfile.py +++ b/samples/snippets/api/noxfile.py @@ -37,22 +37,24 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7"], + 'ignored_versions': ["2.7"], + # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - "envs": {}, + 'envs': {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append(".") + sys.path.append('.') from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -67,12 +69,12 @@ def get_pytest_env_vars(): ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG["gcloud_project_env"] + env_key = TEST_CONFIG['gcloud_project_env'] # This should error out if not set. - ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG["envs"]) + ret.update(TEST_CONFIG['envs']) return ret @@ -81,7 +83,7 @@ def get_pytest_env_vars(): ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) @@ -136,7 +138,7 @@ def lint(session): args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - ".", + "." ] session.run("flake8", *args) @@ -180,9 +182,9 @@ def py(session): if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip( - "SKIPPED: {} tests are disabled for this sample.".format(session.python) - ) + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) # diff --git a/samples/snippets/classify_text/README.rst b/samples/snippets/classify_text/README.rst index a1112f21..757debb0 100644 --- a/samples/snippets/classify_text/README.rst +++ b/samples/snippets/classify_text/README.rst @@ -18,6 +18,10 @@ This tutorial demostrates how to use the `classify_text` method to classify cont .. _Google Cloud Natural Language API: https://cloud.google.com/natural-language/docs/ + + + + Setup ------------------------------------------------------------------------------- diff --git a/samples/snippets/classify_text/classify_text_tutorial.py b/samples/snippets/classify_text/classify_text_tutorial.py index fcd5008b..9c05b83f 100644 --- a/samples/snippets/classify_text/classify_text_tutorial.py +++ b/samples/snippets/classify_text/classify_text_tutorial.py @@ -26,7 +26,7 @@ import json import os -from google.cloud import language +from google.cloud import language_v1 import numpy import six @@ -37,12 +37,12 @@ def classify(text, verbose=True): """Classify the input text into categories. """ - language_client = language.LanguageServiceClient() + language_client = language_v1.LanguageServiceClient() - document = language.types.Document( - content=text, type=language.enums.Document.Type.PLAIN_TEXT + document = language_v1.Document( + content=text, type_=language_v1.Document.Type.PLAIN_TEXT ) - response = language_client.classify_text(document) + response = language_client.classify_text(request={'document': document}) categories = response.categories result = {} diff --git a/samples/snippets/classify_text/noxfile.py b/samples/snippets/classify_text/noxfile.py index 5660f08b..ba55d7ce 100644 --- a/samples/snippets/classify_text/noxfile.py +++ b/samples/snippets/classify_text/noxfile.py @@ -37,22 +37,24 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7"], + 'ignored_versions': ["2.7"], + # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - "envs": {}, + 'envs': {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append(".") + sys.path.append('.') from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -67,12 +69,12 @@ def get_pytest_env_vars(): ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG["gcloud_project_env"] + env_key = TEST_CONFIG['gcloud_project_env'] # This should error out if not set. - ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG["envs"]) + ret.update(TEST_CONFIG['envs']) return ret @@ -81,7 +83,7 @@ def get_pytest_env_vars(): ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) @@ -136,7 +138,7 @@ def lint(session): args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - ".", + "." ] session.run("flake8", *args) @@ -180,9 +182,9 @@ def py(session): if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip( - "SKIPPED: {} tests are disabled for this sample.".format(session.python) - ) + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) # diff --git a/samples/snippets/cloud-client/v1/noxfile.py b/samples/snippets/cloud-client/v1/noxfile.py index 5660f08b..ba55d7ce 100644 --- a/samples/snippets/cloud-client/v1/noxfile.py +++ b/samples/snippets/cloud-client/v1/noxfile.py @@ -37,22 +37,24 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7"], + 'ignored_versions': ["2.7"], + # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - "envs": {}, + 'envs': {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append(".") + sys.path.append('.') from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -67,12 +69,12 @@ def get_pytest_env_vars(): ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG["gcloud_project_env"] + env_key = TEST_CONFIG['gcloud_project_env'] # This should error out if not set. - ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG["envs"]) + ret.update(TEST_CONFIG['envs']) return ret @@ -81,7 +83,7 @@ def get_pytest_env_vars(): ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) @@ -136,7 +138,7 @@ def lint(session): args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - ".", + "." ] session.run("flake8", *args) @@ -180,9 +182,9 @@ def py(session): if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip( - "SKIPPED: {} tests are disabled for this sample.".format(session.python) - ) + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) # diff --git a/samples/snippets/cloud-client/v1/quickstart.py b/samples/snippets/cloud-client/v1/quickstart.py index 2cf46437..4c4b06b5 100644 --- a/samples/snippets/cloud-client/v1/quickstart.py +++ b/samples/snippets/cloud-client/v1/quickstart.py @@ -19,23 +19,21 @@ def run_quickstart(): # [START language_quickstart] # Imports the Google Cloud client library # [START language_python_migration_imports] - from google.cloud import language - from google.cloud.language import enums - from google.cloud.language import types + from google.cloud import language_v1 # [END language_python_migration_imports] # Instantiates a client # [START language_python_migration_client] - client = language.LanguageServiceClient() + client = language_v1.LanguageServiceClient() # [END language_python_migration_client] # The text to analyze text = u"Hello, world!" - document = types.Document(content=text, type=enums.Document.Type.PLAIN_TEXT) + document = language_v1.Document(content=text, type_=language_v1.Document.Type.PLAIN_TEXT) # Detects the sentiment of the text - sentiment = client.analyze_sentiment(document=document).document_sentiment + sentiment = client.analyze_sentiment(request={'document': document}).document_sentiment print("Text: {}".format(text)) print("Sentiment: {}, {}".format(sentiment.score, sentiment.magnitude)) diff --git a/samples/snippets/cloud-client/v1/set_endpoint.py b/samples/snippets/cloud-client/v1/set_endpoint.py index 340d5180..e9ad97d3 100644 --- a/samples/snippets/cloud-client/v1/set_endpoint.py +++ b/samples/snippets/cloud-client/v1/set_endpoint.py @@ -17,21 +17,21 @@ def set_endpoint(): """Change your endpoint""" # [START language_set_endpoint] # Imports the Google Cloud client library - from google.cloud import language + from google.cloud import language_v1 client_options = {"api_endpoint": "eu-language.googleapis.com:443"} # Instantiates a client - client = language.LanguageServiceClient(client_options=client_options) + client = language_v1.LanguageServiceClient(client_options=client_options) # [END language_set_endpoint] # The text to analyze - document = language.types.Document( - content="Hello, world!", type=language.enums.Document.Type.PLAIN_TEXT + document = language_v1.Document( + content="Hello, world!", type_=language_v1.Document.Type.PLAIN_TEXT ) # Detects the sentiment of the text - sentiment = client.analyze_sentiment(document=document).document_sentiment + sentiment = client.analyze_sentiment(request={'document': document}).document_sentiment print("Sentiment: {}, {}".format(sentiment.score, sentiment.magnitude)) diff --git a/samples/snippets/generated-samples/v1/language_sentiment_text.py b/samples/snippets/generated-samples/v1/language_sentiment_text.py index c28a3665..9f975023 100644 --- a/samples/snippets/generated-samples/v1/language_sentiment_text.py +++ b/samples/snippets/generated-samples/v1/language_sentiment_text.py @@ -24,7 +24,6 @@ # [START language_sentiment_text] from google.cloud import language_v1 -from google.cloud.language_v1 import enums import six @@ -37,10 +36,10 @@ def sample_analyze_sentiment(content): if isinstance(content, six.binary_type): content = content.decode("utf-8") - type_ = enums.Document.Type.PLAIN_TEXT - document = {"type": type_, "content": content} + type_ = language_v1.Document.Type.PLAIN_TEXT + document = {"type_": type_, "content": content} - response = client.analyze_sentiment(document) + response = client.analyze_sentiment(request={'document': document}) sentiment = response.document_sentiment print("Score: {}".format(sentiment.score)) print("Magnitude: {}".format(sentiment.magnitude)) diff --git a/samples/snippets/generated-samples/v1/noxfile.py b/samples/snippets/generated-samples/v1/noxfile.py index 5660f08b..ba55d7ce 100644 --- a/samples/snippets/generated-samples/v1/noxfile.py +++ b/samples/snippets/generated-samples/v1/noxfile.py @@ -37,22 +37,24 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7"], + 'ignored_versions': ["2.7"], + # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - "envs": {}, + 'envs': {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append(".") + sys.path.append('.') from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -67,12 +69,12 @@ def get_pytest_env_vars(): ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG["gcloud_project_env"] + env_key = TEST_CONFIG['gcloud_project_env'] # This should error out if not set. - ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG["envs"]) + ret.update(TEST_CONFIG['envs']) return ret @@ -81,7 +83,7 @@ def get_pytest_env_vars(): ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) @@ -136,7 +138,7 @@ def lint(session): args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - ".", + "." ] session.run("flake8", *args) @@ -180,9 +182,9 @@ def py(session): if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip( - "SKIPPED: {} tests are disabled for this sample.".format(session.python) - ) + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) # diff --git a/samples/snippets/sentiment/noxfile.py b/samples/snippets/sentiment/noxfile.py index 5660f08b..ba55d7ce 100644 --- a/samples/snippets/sentiment/noxfile.py +++ b/samples/snippets/sentiment/noxfile.py @@ -37,22 +37,24 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7"], + 'ignored_versions': ["2.7"], + # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - "envs": {}, + 'envs': {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append(".") + sys.path.append('.') from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -67,12 +69,12 @@ def get_pytest_env_vars(): ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG["gcloud_project_env"] + env_key = TEST_CONFIG['gcloud_project_env'] # This should error out if not set. - ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG["envs"]) + ret.update(TEST_CONFIG['envs']) return ret @@ -81,7 +83,7 @@ def get_pytest_env_vars(): ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) @@ -136,7 +138,7 @@ def lint(session): args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - ".", + "." ] session.run("flake8", *args) @@ -180,9 +182,9 @@ def py(session): if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip( - "SKIPPED: {} tests are disabled for this sample.".format(session.python) - ) + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) # diff --git a/samples/snippets/sentiment/sentiment_analysis.py b/samples/snippets/sentiment/sentiment_analysis.py index aef7a658..2333bf82 100644 --- a/samples/snippets/sentiment/sentiment_analysis.py +++ b/samples/snippets/sentiment/sentiment_analysis.py @@ -17,9 +17,7 @@ # [START language_sentiment_tutorial_imports] import argparse -from google.cloud import language -from google.cloud.language import enums -from google.cloud.language import types +from google.cloud import language_v1 # [END language_sentiment_tutorial_imports] @@ -47,14 +45,14 @@ def print_result(annotations): # [START language_sentiment_tutorial_analyze_sentiment] def analyze(movie_review_filename): """Run a sentiment analysis request on text within a passed filename.""" - client = language.LanguageServiceClient() + client = language_v1.LanguageServiceClient() with open(movie_review_filename, "r") as review_file: # Instantiates a plain text document. content = review_file.read() - document = types.Document(content=content, type=enums.Document.Type.PLAIN_TEXT) - annotations = client.analyze_sentiment(document=document) + document = language_v1.Document(content=content, type_=language_v1.Document.Type.PLAIN_TEXT) + annotations = client.analyze_sentiment(request={'document': document}) # Print the results print_result(annotations) diff --git a/samples/v1/language_classify_gcs.py b/samples/v1/language_classify_gcs.py index 941640b1..a20789cc 100644 --- a/samples/v1/language_classify_gcs.py +++ b/samples/v1/language_classify_gcs.py @@ -26,8 +26,6 @@ # [START language_classify_gcs] from google.cloud import language_v1 -from google.cloud.language_v1 import enums - def sample_classify_text(gcs_content_uri): """ @@ -44,7 +42,7 @@ def sample_classify_text(gcs_content_uri): # gcs_content_uri = 'gs://cloud-samples-data/language/classify-entertainment.txt' # Available types: PLAIN_TEXT, HTML - type_ = enums.Document.Type.PLAIN_TEXT + type_ = language_v1.Document.Type.PLAIN_TEXT # Optional. If not specified, the language is automatically detected. # For list of supported languages: @@ -52,7 +50,7 @@ def sample_classify_text(gcs_content_uri): language = "en" document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} - response = client.classify_text(document) + response = client.classify_text(request = {'document': document}) # Loop through classified categories returned from the API for category in response.categories: # Get the name of the category representing the document. diff --git a/samples/v1/language_classify_text.py b/samples/v1/language_classify_text.py index 52175f02..ad55d26c 100644 --- a/samples/v1/language_classify_text.py +++ b/samples/v1/language_classify_text.py @@ -26,8 +26,6 @@ # [START language_classify_text] from google.cloud import language_v1 -from google.cloud.language_v1 import enums - def sample_classify_text(text_content): """ @@ -42,7 +40,7 @@ def sample_classify_text(text_content): # text_content = 'That actor on TV makes movies in Hollywood and also stars in a variety of popular new TV shows.' # Available types: PLAIN_TEXT, HTML - type_ = enums.Document.Type.PLAIN_TEXT + type_ = language_v1.Document.Type.PLAIN_TEXT # Optional. If not specified, the language is automatically detected. # For list of supported languages: @@ -50,7 +48,7 @@ def sample_classify_text(text_content): language = "en" document = {"content": text_content, "type": type_, "language": language} - response = client.classify_text(document) + response = client.classify_text(request = {'document': document}) # Loop through classified categories returned from the API for category in response.categories: # Get the name of the category representing the document. diff --git a/samples/v1/language_entities_gcs.py b/samples/v1/language_entities_gcs.py index 790592ca..d735e885 100644 --- a/samples/v1/language_entities_gcs.py +++ b/samples/v1/language_entities_gcs.py @@ -26,8 +26,6 @@ # [START language_entities_gcs] from google.cloud import language_v1 -from google.cloud.language_v1 import enums - def sample_analyze_entities(gcs_content_uri): """ @@ -43,7 +41,7 @@ def sample_analyze_entities(gcs_content_uri): # gcs_content_uri = 'gs://cloud-samples-data/language/entity.txt' # Available types: PLAIN_TEXT, HTML - type_ = enums.Document.Type.PLAIN_TEXT + type_ = language_v1.Document.Type.PLAIN_TEXT # Optional. If not specified, the language is automatically detected. # For list of supported languages: @@ -52,14 +50,14 @@ def sample_analyze_entities(gcs_content_uri): document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 - encoding_type = enums.EncodingType.UTF8 + encoding_type = language_v1..EncodingType.UTF8 - response = client.analyze_entities(document, encoding_type=encoding_type) + response = client.analyze_entities(request = {'document': document, 'encoding_type': encoding_type}) # Loop through entitites returned from the API for entity in response.entities: print(u"Representative name for the entity: {}".format(entity.name)) # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al - print(u"Entity type: {}".format(enums.Entity.Type(entity.type).name)) + print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type).name)) # Get the salience score associated with the entity in the [0, 1.0] range print(u"Salience score: {}".format(entity.salience)) # Loop over the metadata associated with entity. For many known entities, @@ -75,7 +73,7 @@ def sample_analyze_entities(gcs_content_uri): print(u"Mention text: {}".format(mention.text.content)) # Get the mention type, e.g. PROPER for proper noun print( - u"Mention type: {}".format(enums.EntityMention.Type(mention.type).name) + u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type).name) ) # Get the language of the text, which will be the same as diff --git a/samples/v1/language_entities_text.py b/samples/v1/language_entities_text.py index 464a313d..db2ad9e2 100644 --- a/samples/v1/language_entities_text.py +++ b/samples/v1/language_entities_text.py @@ -26,8 +26,6 @@ # [START language_entities_text] from google.cloud import language_v1 -from google.cloud.language_v1 import enums - def sample_analyze_entities(text_content): """ @@ -42,7 +40,7 @@ def sample_analyze_entities(text_content): # text_content = 'California is a state.' # Available types: PLAIN_TEXT, HTML - type_ = enums.Document.Type.PLAIN_TEXT + type_ = language_v1.Document.Type.PLAIN_TEXT # Optional. If not specified, the language is automatically detected. # For list of supported languages: @@ -51,16 +49,16 @@ def sample_analyze_entities(text_content): document = {"content": text_content, "type": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 - encoding_type = enums.EncodingType.UTF8 + encoding_type = language_v1.EncodingType.UTF8 - response = client.analyze_entities(document, encoding_type=encoding_type) + response = client.analyze_entities(request = {'document': document, 'encoding_type': encoding_type}) # Loop through entitites returned from the API for entity in response.entities: print(u"Representative name for the entity: {}".format(entity.name)) # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al - print(u"Entity type: {}".format(enums.Entity.Type(entity.type).name)) + print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type).name)) # Get the salience score associated with the entity in the [0, 1.0] range print(u"Salience score: {}".format(entity.salience)) @@ -79,7 +77,7 @@ def sample_analyze_entities(text_content): # Get the mention type, e.g. PROPER for proper noun print( - u"Mention type: {}".format(enums.EntityMention.Type(mention.type).name) + u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type).name) ) # Get the language of the text, which will be the same as diff --git a/samples/v1/language_entity_sentiment_gcs.py b/samples/v1/language_entity_sentiment_gcs.py index 9fafa737..2a4c6ff3 100644 --- a/samples/v1/language_entity_sentiment_gcs.py +++ b/samples/v1/language_entity_sentiment_gcs.py @@ -26,8 +26,6 @@ # [START language_entity_sentiment_gcs] from google.cloud import language_v1 -from google.cloud.language_v1 import enums - def sample_analyze_entity_sentiment(gcs_content_uri): """ @@ -43,7 +41,7 @@ def sample_analyze_entity_sentiment(gcs_content_uri): # gcs_content_uri = 'gs://cloud-samples-data/language/entity-sentiment.txt' # Available types: PLAIN_TEXT, HTML - type_ = enums.Document.Type.PLAIN_TEXT + type_ = language_v1.Document.Type.PLAIN_TEXT # Optional. If not specified, the language is automatically detected. # For list of supported languages: @@ -52,14 +50,14 @@ def sample_analyze_entity_sentiment(gcs_content_uri): document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 - encoding_type = enums.EncodingType.UTF8 + encoding_type = language_v1.EncodingType.UTF8 - response = client.analyze_entity_sentiment(document, encoding_type=encoding_type) + response = client.analyze_entity_sentiment(request = {'document': document, 'encoding_type': encoding_type}) # Loop through entitites returned from the API for entity in response.entities: print(u"Representative name for the entity: {}".format(entity.name)) # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al - print(u"Entity type: {}".format(enums.Entity.Type(entity.type).name)) + print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type).name)) # Get the salience score associated with the entity in the [0, 1.0] range print(u"Salience score: {}".format(entity.salience)) # Get the aggregate sentiment expressed for this entity in the provided document. @@ -79,7 +77,7 @@ def sample_analyze_entity_sentiment(gcs_content_uri): print(u"Mention text: {}".format(mention.text.content)) # Get the mention type, e.g. PROPER for proper noun print( - u"Mention type: {}".format(enums.EntityMention.Type(mention.type).name) + u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type).name) ) # Get the language of the text, which will be the same as diff --git a/samples/v1/language_entity_sentiment_text.py b/samples/v1/language_entity_sentiment_text.py index 9b3d5b8a..20c9dbd8 100644 --- a/samples/v1/language_entity_sentiment_text.py +++ b/samples/v1/language_entity_sentiment_text.py @@ -26,8 +26,6 @@ # [START language_entity_sentiment_text] from google.cloud import language_v1 -from google.cloud.language_v1 import enums - def sample_analyze_entity_sentiment(text_content): """ @@ -42,7 +40,7 @@ def sample_analyze_entity_sentiment(text_content): # text_content = 'Grapes are good. Bananas are bad.' # Available types: PLAIN_TEXT, HTML - type_ = enums.Document.Type.PLAIN_TEXT + type_ = language_v1.Document.Type.PLAIN_TEXT # Optional. If not specified, the language is automatically detected. # For list of supported languages: @@ -51,14 +49,14 @@ def sample_analyze_entity_sentiment(text_content): document = {"content": text_content, "type": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 - encoding_type = enums.EncodingType.UTF8 + encoding_type = language_v1.EncodingType.UTF8 - response = client.analyze_entity_sentiment(document, encoding_type=encoding_type) + response = client.analyze_entity_sentiment(request = {'document': document, 'encoding_type': encoding_type}) # Loop through entitites returned from the API for entity in response.entities: print(u"Representative name for the entity: {}".format(entity.name)) # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al - print(u"Entity type: {}".format(enums.Entity.Type(entity.type).name)) + print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type).name)) # Get the salience score associated with the entity in the [0, 1.0] range print(u"Salience score: {}".format(entity.salience)) # Get the aggregate sentiment expressed for this entity in the provided document. @@ -78,7 +76,7 @@ def sample_analyze_entity_sentiment(text_content): print(u"Mention text: {}".format(mention.text.content)) # Get the mention type, e.g. PROPER for proper noun print( - u"Mention type: {}".format(enums.EntityMention.Type(mention.type).name) + u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type).name) ) # Get the language of the text, which will be the same as diff --git a/samples/v1/language_sentiment_gcs.py b/samples/v1/language_sentiment_gcs.py index 261f2f3e..68839805 100644 --- a/samples/v1/language_sentiment_gcs.py +++ b/samples/v1/language_sentiment_gcs.py @@ -26,8 +26,6 @@ # [START language_sentiment_gcs] from google.cloud import language_v1 -from google.cloud.language_v1 import enums - def sample_analyze_sentiment(gcs_content_uri): """ @@ -43,7 +41,7 @@ def sample_analyze_sentiment(gcs_content_uri): # gcs_content_uri = 'gs://cloud-samples-data/language/sentiment-positive.txt' # Available types: PLAIN_TEXT, HTML - type_ = enums.Document.Type.PLAIN_TEXT + type_ = language_v1.Document.Type.PLAIN_TEXT # Optional. If not specified, the language is automatically detected. # For list of supported languages: @@ -52,9 +50,9 @@ def sample_analyze_sentiment(gcs_content_uri): document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 - encoding_type = enums.EncodingType.UTF8 + encoding_type = language_v1.EncodingType.UTF8 - response = client.analyze_sentiment(document, encoding_type=encoding_type) + response = client.analyze_sentiment(request = {'document': document, 'encoding_type': encoding_type}) # Get overall sentiment of the input document print(u"Document sentiment score: {}".format(response.document_sentiment.score)) print( diff --git a/samples/v1/language_sentiment_text.py b/samples/v1/language_sentiment_text.py index 12f1e221..0be2b6cf 100644 --- a/samples/v1/language_sentiment_text.py +++ b/samples/v1/language_sentiment_text.py @@ -26,8 +26,6 @@ # [START language_sentiment_text] from google.cloud import language_v1 -from google.cloud.language_v1 import enums - def sample_analyze_sentiment(text_content): """ @@ -42,7 +40,7 @@ def sample_analyze_sentiment(text_content): # text_content = 'I am so happy and joyful.' # Available types: PLAIN_TEXT, HTML - type_ = enums.Document.Type.PLAIN_TEXT + type_ = language_v1.Document.Type.PLAIN_TEXT # Optional. If not specified, the language is automatically detected. # For list of supported languages: @@ -51,9 +49,9 @@ def sample_analyze_sentiment(text_content): document = {"content": text_content, "type": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 - encoding_type = enums.EncodingType.UTF8 + encoding_type = language_v1.EncodingType.UTF8 - response = client.analyze_sentiment(document, encoding_type=encoding_type) + response = client.analyze_sentiment(request = {'document': document, 'encoding_type': encoding_type}) # Get overall sentiment of the input document print(u"Document sentiment score: {}".format(response.document_sentiment.score)) print( diff --git a/samples/v1/language_syntax_gcs.py b/samples/v1/language_syntax_gcs.py index 32bf2acb..e04be406 100644 --- a/samples/v1/language_syntax_gcs.py +++ b/samples/v1/language_syntax_gcs.py @@ -26,8 +26,6 @@ # [START language_syntax_gcs] from google.cloud import language_v1 -from google.cloud.language_v1 import enums - def sample_analyze_syntax(gcs_content_uri): """ @@ -43,7 +41,7 @@ def sample_analyze_syntax(gcs_content_uri): # gcs_content_uri = 'gs://cloud-samples-data/language/syntax-sentence.txt' # Available types: PLAIN_TEXT, HTML - type_ = enums.Document.Type.PLAIN_TEXT + type_ = language_v1.Document.Type.PLAIN_TEXT # Optional. If not specified, the language is automatically detected. # For list of supported languages: @@ -52,9 +50,9 @@ def sample_analyze_syntax(gcs_content_uri): document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 - encoding_type = enums.EncodingType.UTF8 + encoding_type = language_v1.EncodingType.UTF8 - response = client.analyze_syntax(document, encoding_type=encoding_type) + response = client.analyze_syntax(request = {'document': document, 'encoding_type': encoding_type}) # Loop through tokens returned from the API for token in response.tokens: # Get the text content of this token. Usually a word or punctuation. @@ -70,13 +68,13 @@ def sample_analyze_syntax(gcs_content_uri): # Get the tag, e.g. NOUN, ADJ for Adjective, et al. print( u"Part of Speech tag: {}".format( - enums.PartOfSpeech.Tag(part_of_speech.tag).name + language_v1.PartOfSpeech.Tag(part_of_speech.tag).name ) ) # Get the voice, e.g. ACTIVE or PASSIVE - print(u"Voice: {}".format(enums.PartOfSpeech.Voice(part_of_speech.voice).name)) + print(u"Voice: {}".format(language_v1.PartOfSpeech.Voice(part_of_speech.voice).name)) # Get the tense, e.g. PAST, FUTURE, PRESENT, et al. - print(u"Tense: {}".format(enums.PartOfSpeech.Tense(part_of_speech.tense).name)) + print(u"Tense: {}".format(language_v1.PartOfSpeech.Tense(part_of_speech.tense).name)) # See API reference for additional Part of Speech information available # Get the lemma of the token. Wikipedia lemma description # https://en.wikipedia.org/wiki/Lemma_(morphology) @@ -87,7 +85,7 @@ def sample_analyze_syntax(gcs_content_uri): dependency_edge = token.dependency_edge print(u"Head token index: {}".format(dependency_edge.head_token_index)) print( - u"Label: {}".format(enums.DependencyEdge.Label(dependency_edge.label).name) + u"Label: {}".format(language_v1.DependencyEdge.Label(dependency_edge.label).name) ) # Get the language of the text, which will be the same as diff --git a/samples/v1/language_syntax_text.py b/samples/v1/language_syntax_text.py index 29041886..9f37e92c 100644 --- a/samples/v1/language_syntax_text.py +++ b/samples/v1/language_syntax_text.py @@ -26,8 +26,6 @@ # [START language_syntax_text] from google.cloud import language_v1 -from google.cloud.language_v1 import enums - def sample_analyze_syntax(text_content): """ @@ -42,7 +40,7 @@ def sample_analyze_syntax(text_content): # text_content = 'This is a short sentence.' # Available types: PLAIN_TEXT, HTML - type_ = enums.Document.Type.PLAIN_TEXT + type_ = language_v1.Document.Type.PLAIN_TEXT # Optional. If not specified, the language is automatically detected. # For list of supported languages: @@ -51,9 +49,9 @@ def sample_analyze_syntax(text_content): document = {"content": text_content, "type": type_, "language": language} # Available values: NONE, UTF8, UTF16, UTF32 - encoding_type = enums.EncodingType.UTF8 + encoding_type = language_v1.EncodingType.UTF8 - response = client.analyze_syntax(document, encoding_type=encoding_type) + response = client.analyze_syntax(request = {'document': document, 'encoding_type': encoding_type}) # Loop through tokens returned from the API for token in response.tokens: # Get the text content of this token. Usually a word or punctuation. @@ -69,13 +67,13 @@ def sample_analyze_syntax(text_content): # Get the tag, e.g. NOUN, ADJ for Adjective, et al. print( u"Part of Speech tag: {}".format( - enums.PartOfSpeech.Tag(part_of_speech.tag).name + language_v1.PartOfSpeech.Tag(part_of_speech.tag).name ) ) # Get the voice, e.g. ACTIVE or PASSIVE - print(u"Voice: {}".format(enums.PartOfSpeech.Voice(part_of_speech.voice).name)) + print(u"Voice: {}".format(language_v1.PartOfSpeech.Voice(part_of_speech.voice).name)) # Get the tense, e.g. PAST, FUTURE, PRESENT, et al. - print(u"Tense: {}".format(enums.PartOfSpeech.Tense(part_of_speech.tense).name)) + print(u"Tense: {}".format(language_v1.PartOfSpeech.Tense(part_of_speech.tense).name)) # See API reference for additional Part of Speech information available # Get the lemma of the token. Wikipedia lemma description # https://en.wikipedia.org/wiki/Lemma_(morphology) @@ -86,7 +84,7 @@ def sample_analyze_syntax(text_content): dependency_edge = token.dependency_edge print(u"Head token index: {}".format(dependency_edge.head_token_index)) print( - u"Label: {}".format(enums.DependencyEdge.Label(dependency_edge.label).name) + u"Label: {}".format(language_v1.DependencyEdge.Label(dependency_edge.label).name) ) # Get the language of the text, which will be the same as diff --git a/scripts/fixup_language_v1_keywords.py b/scripts/fixup_language_v1_keywords.py new file mode 100644 index 00000000..c7c107ce --- /dev/null +++ b/scripts/fixup_language_v1_keywords.py @@ -0,0 +1,183 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class languageCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'analyze_entities': ('document', 'encoding_type', ), + 'analyze_entity_sentiment': ('document', 'encoding_type', ), + 'analyze_sentiment': ('document', 'encoding_type', ), + 'analyze_syntax': ('document', 'encoding_type', ), + 'annotate_text': ('document', 'features', 'encoding_type', ), + 'classify_text': ('document', ), + + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), + cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=languageCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the language client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/scripts/fixup_language_v1beta2_keywords.py b/scripts/fixup_language_v1beta2_keywords.py new file mode 100644 index 00000000..c7c107ce --- /dev/null +++ b/scripts/fixup_language_v1beta2_keywords.py @@ -0,0 +1,183 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class languageCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'analyze_entities': ('document', 'encoding_type', ), + 'analyze_entity_sentiment': ('document', 'encoding_type', ), + 'analyze_sentiment': ('document', 'encoding_type', ), + 'analyze_syntax': ('document', 'encoding_type', ), + 'annotate_text': ('document', 'features', 'encoding_type', ), + 'classify_text': ('document', ), + + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), + cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=languageCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the language client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/setup.py b/setup.py index 2c11a7b5..2020b3c6 100644 --- a/setup.py +++ b/setup.py @@ -29,8 +29,9 @@ # 'Development Status :: 5 - Production/Stable' release_status = "Development Status :: 5 - Production/Stable" dependencies = [ - "google-api-core[grpc] >= 1.14.0, < 2.0.0dev", - 'enum34;python_version<"3.4"', + "google-api-core[grpc] >= 1.22.2, < 2.0.0dev", + "proto-plus >= 1.4.0", + "libcst >= 0.2.5", ] extras = {} @@ -46,7 +47,9 @@ # Only include packages under the 'google' namespace. Do not include tests, # benchmarks, etc. packages = [ - package for package in setuptools.find_packages() if package.startswith("google") + package + for package in setuptools.PEP420PackageFinder.find() + if package.startswith("google") ] # Determine which namespaces are needed. @@ -69,12 +72,10 @@ "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python", - "Programming Language :: Python :: 2", - "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", "Operating System :: OS Independent", "Topic :: Internet", ], @@ -83,7 +84,11 @@ namespace_packages=namespaces, install_requires=dependencies, extras_require=extras, - python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*", + python_requires=">=3.6", + scripts=[ + "scripts/fixup_language_v1_keywords.py", + "scripts/fixup_language_v1beta2_keywords.py", + ], include_package_data=True, zip_safe=False, ) diff --git a/synth.metadata b/synth.metadata index 1da2f3fc..bc28899b 100644 --- a/synth.metadata +++ b/synth.metadata @@ -3,29 +3,22 @@ { "git": { "name": ".", - "remote": "git@github.com:googleapis/python-language.git", - "sha": "2084dc18f3f495ceb753e4131ca616c17b25cf86" - } - }, - { - "git": { - "name": "googleapis", - "remote": "https://github.com/googleapis/googleapis.git", - "sha": "b7f574bddb451d81aa222dad7dcecf3477cb97ed" + "remote": "git@github.com:/googleapis/python-language.git", + "sha": "cde50983b6d45fd0b2348eeb552404b391403bc6" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "da29da32b3a988457b49ae290112b74f14b713cc" + "sha": "0c868d49b8e05bc1f299bc773df9eb4ef9ed96e9" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "da29da32b3a988457b49ae290112b74f14b713cc" + "sha": "0c868d49b8e05bc1f299bc773df9eb4ef9ed96e9" } } ], diff --git a/synth.py b/synth.py index ee783601..d1aec55f 100644 --- a/synth.py +++ b/synth.py @@ -33,35 +33,14 @@ bazel_target=f"//google/cloud/language/{version}:language-{version}-py", include_protos=True, ) - - s.move(library / f"google/cloud/language_{version}/proto") - s.move(library / f"google/cloud/language_{version}/gapic") - s.move(library / f"tests/unit/gapic/{version}") - s.move(library / f"tests/system/gapic/{version}") - s.move(library / f"samples") + s.move(library, excludes=["docs/index.rst", "README.rst", "setup.py"]) # ---------------------------------------------------------------------------- # Add templated files # ---------------------------------------------------------------------------- -templated_files = common.py_library(unit_cov_level=97, cov_level=100, samples=True) - -s.move(templated_files, excludes=['noxfile.py']) - -s.replace("google/cloud/**/language_service_pb2.py", -'''__doc__ = """################################################################ - # - - Represents the input to API methods.''', -'''__doc__="""Represents the input to API methods.''' -) -s.replace( - f"google/cloud/**/gapic/language_service_client.py", - r"types\.EncodingType", - "enums.EncodingType", -) +templated_files = common.py_library(cov_level=99, samples=True, microgenerator=True,) -# TODO(busunkim): Use latest sphinx after microgenerator transition -s.replace("noxfile.py", """['"]sphinx['"]""", '"sphinx<3.0.0"') +s.move(templated_files, excludes=['.coveragerc']) s.shell.run(["nox", "-s", "blacken"], hide_output=False) diff --git a/tests/__init__.py b/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/system/gapic/v1/test_system_language_service_v1.py b/tests/system/gapic/v1/test_system_language_service_v1.py deleted file mode 100644 index e54b9339..00000000 --- a/tests/system/gapic/v1/test_system_language_service_v1.py +++ /dev/null @@ -1,31 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import time - -from google.cloud import language_v1 -from google.cloud.language_v1 import enums -from google.cloud.language_v1.proto import language_service_pb2 - - -class TestSystemLanguageService(object): - def test_analyze_sentiment(self): - - client = language_v1.LanguageServiceClient() - content = "Hello, world!" - type_ = enums.Document.Type.PLAIN_TEXT - document = {"content": content, "type": type_} - response = client.analyze_sentiment(document) diff --git a/tests/system/gapic/v1beta2/test_system_language_service_v1beta2.py b/tests/system/gapic/v1beta2/test_system_language_service_v1beta2.py deleted file mode 100644 index 81edf7d6..00000000 --- a/tests/system/gapic/v1beta2/test_system_language_service_v1beta2.py +++ /dev/null @@ -1,32 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import time - -from google.cloud import language_v1beta2 -from google.cloud.language_v1beta2 import enums -from google.cloud.language_v1beta2.proto import language_service_pb2 - - -class TestSystemLanguageService(object): - def test_analyze_sentiment(self): - - client = language_v1beta2.LanguageServiceClient() - content = "Hello, world!" - type_ = enums.Document.Type.PLAIN_TEXT - document = {"content": content, "type": type_} - encoding_type = enums.EncodingType.NONE - response = client.analyze_sentiment(document, encoding_type=encoding_type) diff --git a/tests/unit/gapic/language_v1/__init__.py b/tests/unit/gapic/language_v1/__init__.py new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/tests/unit/gapic/language_v1/__init__.py @@ -0,0 +1 @@ + diff --git a/tests/unit/gapic/language_v1/test_language_service.py b/tests/unit/gapic/language_v1/test_language_service.py new file mode 100644 index 00000000..6ccbebf7 --- /dev/null +++ b/tests/unit/gapic/language_v1/test_language_service.py @@ -0,0 +1,1771 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.language_v1.services.language_service import ( + LanguageServiceAsyncClient, +) +from google.cloud.language_v1.services.language_service import LanguageServiceClient +from google.cloud.language_v1.services.language_service import transports +from google.cloud.language_v1.types import language_service +from google.oauth2 import service_account + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert LanguageServiceClient._get_default_mtls_endpoint(None) is None + assert ( + LanguageServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + LanguageServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + LanguageServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + LanguageServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + LanguageServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", [LanguageServiceClient, LanguageServiceAsyncClient] +) +def test_language_service_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client._transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client._transport._credentials == creds + + assert client._transport._host == "language.googleapis.com:443" + + +def test_language_service_client_get_transport_class(): + transport = LanguageServiceClient.get_transport_class() + assert transport == transports.LanguageServiceGrpcTransport + + transport = LanguageServiceClient.get_transport_class("grpc") + assert transport == transports.LanguageServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (LanguageServiceClient, transports.LanguageServiceGrpcTransport, "grpc"), + ( + LanguageServiceAsyncClient, + transports.LanguageServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + LanguageServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(LanguageServiceClient), +) +@mock.patch.object( + LanguageServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(LanguageServiceAsyncClient), +) +def test_language_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(LanguageServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(LanguageServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + LanguageServiceClient, + transports.LanguageServiceGrpcTransport, + "grpc", + "true", + ), + ( + LanguageServiceAsyncClient, + transports.LanguageServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + LanguageServiceClient, + transports.LanguageServiceGrpcTransport, + "grpc", + "false", + ), + ( + LanguageServiceAsyncClient, + transports.LanguageServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + LanguageServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(LanguageServiceClient), +) +@mock.patch.object( + LanguageServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(LanguageServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_language_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + ssl_channel_creds = mock.Mock() + with mock.patch( + "grpc.ssl_channel_credentials", return_value=ssl_channel_creds + ): + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_ssl_channel_creds = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_ssl_channel_creds = ssl_channel_creds + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.ssl_credentials", + new_callable=mock.PropertyMock, + ) as ssl_credentials_mock: + if use_client_cert_env == "false": + is_mtls_mock.return_value = False + ssl_credentials_mock.return_value = None + expected_host = client.DEFAULT_ENDPOINT + expected_ssl_channel_creds = None + else: + is_mtls_mock.return_value = True + ssl_credentials_mock.return_value = mock.Mock() + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_ssl_channel_creds = ( + ssl_credentials_mock.return_value + ) + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + is_mtls_mock.return_value = False + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (LanguageServiceClient, transports.LanguageServiceGrpcTransport, "grpc"), + ( + LanguageServiceAsyncClient, + transports.LanguageServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_language_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (LanguageServiceClient, transports.LanguageServiceGrpcTransport, "grpc"), + ( + LanguageServiceAsyncClient, + transports.LanguageServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_language_service_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_language_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.language_v1.services.language_service.transports.LanguageServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = LanguageServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_analyze_sentiment( + transport: str = "grpc", request_type=language_service.AnalyzeSentimentRequest +): + client = LanguageServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.analyze_sentiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.AnalyzeSentimentResponse( + language="language_value", + ) + + response = client.analyze_sentiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == language_service.AnalyzeSentimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, language_service.AnalyzeSentimentResponse) + + assert response.language == "language_value" + + +def test_analyze_sentiment_from_dict(): + test_analyze_sentiment(request_type=dict) + + +@pytest.mark.asyncio +async def test_analyze_sentiment_async(transport: str = "grpc_asyncio"): + client = LanguageServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = language_service.AnalyzeSentimentRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.analyze_sentiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + language_service.AnalyzeSentimentResponse(language="language_value",) + ) + + response = await client.analyze_sentiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, language_service.AnalyzeSentimentResponse) + + assert response.language == "language_value" + + +def test_analyze_sentiment_flattened(): + client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.analyze_sentiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.AnalyzeSentimentResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.analyze_sentiment( + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].document == language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ) + + assert args[0].encoding_type == language_service.EncodingType.UTF8 + + +def test_analyze_sentiment_flattened_error(): + client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.analyze_sentiment( + language_service.AnalyzeSentimentRequest(), + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + +@pytest.mark.asyncio +async def test_analyze_sentiment_flattened_async(): + client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.analyze_sentiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.AnalyzeSentimentResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + language_service.AnalyzeSentimentResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.analyze_sentiment( + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].document == language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ) + + assert args[0].encoding_type == language_service.EncodingType.UTF8 + + +@pytest.mark.asyncio +async def test_analyze_sentiment_flattened_error_async(): + client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.analyze_sentiment( + language_service.AnalyzeSentimentRequest(), + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + +def test_analyze_entities( + transport: str = "grpc", request_type=language_service.AnalyzeEntitiesRequest +): + client = LanguageServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.analyze_entities), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.AnalyzeEntitiesResponse( + language="language_value", + ) + + response = client.analyze_entities(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == language_service.AnalyzeEntitiesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, language_service.AnalyzeEntitiesResponse) + + assert response.language == "language_value" + + +def test_analyze_entities_from_dict(): + test_analyze_entities(request_type=dict) + + +@pytest.mark.asyncio +async def test_analyze_entities_async(transport: str = "grpc_asyncio"): + client = LanguageServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = language_service.AnalyzeEntitiesRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.analyze_entities), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + language_service.AnalyzeEntitiesResponse(language="language_value",) + ) + + response = await client.analyze_entities(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, language_service.AnalyzeEntitiesResponse) + + assert response.language == "language_value" + + +def test_analyze_entities_flattened(): + client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.analyze_entities), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.AnalyzeEntitiesResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.analyze_entities( + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].document == language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ) + + assert args[0].encoding_type == language_service.EncodingType.UTF8 + + +def test_analyze_entities_flattened_error(): + client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.analyze_entities( + language_service.AnalyzeEntitiesRequest(), + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + +@pytest.mark.asyncio +async def test_analyze_entities_flattened_async(): + client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.analyze_entities), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.AnalyzeEntitiesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + language_service.AnalyzeEntitiesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.analyze_entities( + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].document == language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ) + + assert args[0].encoding_type == language_service.EncodingType.UTF8 + + +@pytest.mark.asyncio +async def test_analyze_entities_flattened_error_async(): + client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.analyze_entities( + language_service.AnalyzeEntitiesRequest(), + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + +def test_analyze_entity_sentiment( + transport: str = "grpc", request_type=language_service.AnalyzeEntitySentimentRequest +): + client = LanguageServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.analyze_entity_sentiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.AnalyzeEntitySentimentResponse( + language="language_value", + ) + + response = client.analyze_entity_sentiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == language_service.AnalyzeEntitySentimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, language_service.AnalyzeEntitySentimentResponse) + + assert response.language == "language_value" + + +def test_analyze_entity_sentiment_from_dict(): + test_analyze_entity_sentiment(request_type=dict) + + +@pytest.mark.asyncio +async def test_analyze_entity_sentiment_async(transport: str = "grpc_asyncio"): + client = LanguageServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = language_service.AnalyzeEntitySentimentRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.analyze_entity_sentiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + language_service.AnalyzeEntitySentimentResponse(language="language_value",) + ) + + response = await client.analyze_entity_sentiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, language_service.AnalyzeEntitySentimentResponse) + + assert response.language == "language_value" + + +def test_analyze_entity_sentiment_flattened(): + client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.analyze_entity_sentiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.AnalyzeEntitySentimentResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.analyze_entity_sentiment( + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].document == language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ) + + assert args[0].encoding_type == language_service.EncodingType.UTF8 + + +def test_analyze_entity_sentiment_flattened_error(): + client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.analyze_entity_sentiment( + language_service.AnalyzeEntitySentimentRequest(), + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + +@pytest.mark.asyncio +async def test_analyze_entity_sentiment_flattened_async(): + client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.analyze_entity_sentiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.AnalyzeEntitySentimentResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + language_service.AnalyzeEntitySentimentResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.analyze_entity_sentiment( + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].document == language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ) + + assert args[0].encoding_type == language_service.EncodingType.UTF8 + + +@pytest.mark.asyncio +async def test_analyze_entity_sentiment_flattened_error_async(): + client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.analyze_entity_sentiment( + language_service.AnalyzeEntitySentimentRequest(), + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + +def test_analyze_syntax( + transport: str = "grpc", request_type=language_service.AnalyzeSyntaxRequest +): + client = LanguageServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.analyze_syntax), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.AnalyzeSyntaxResponse( + language="language_value", + ) + + response = client.analyze_syntax(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == language_service.AnalyzeSyntaxRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, language_service.AnalyzeSyntaxResponse) + + assert response.language == "language_value" + + +def test_analyze_syntax_from_dict(): + test_analyze_syntax(request_type=dict) + + +@pytest.mark.asyncio +async def test_analyze_syntax_async(transport: str = "grpc_asyncio"): + client = LanguageServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = language_service.AnalyzeSyntaxRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.analyze_syntax), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + language_service.AnalyzeSyntaxResponse(language="language_value",) + ) + + response = await client.analyze_syntax(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, language_service.AnalyzeSyntaxResponse) + + assert response.language == "language_value" + + +def test_analyze_syntax_flattened(): + client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.analyze_syntax), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.AnalyzeSyntaxResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.analyze_syntax( + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].document == language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ) + + assert args[0].encoding_type == language_service.EncodingType.UTF8 + + +def test_analyze_syntax_flattened_error(): + client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.analyze_syntax( + language_service.AnalyzeSyntaxRequest(), + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + +@pytest.mark.asyncio +async def test_analyze_syntax_flattened_async(): + client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.analyze_syntax), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.AnalyzeSyntaxResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + language_service.AnalyzeSyntaxResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.analyze_syntax( + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].document == language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ) + + assert args[0].encoding_type == language_service.EncodingType.UTF8 + + +@pytest.mark.asyncio +async def test_analyze_syntax_flattened_error_async(): + client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.analyze_syntax( + language_service.AnalyzeSyntaxRequest(), + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + +def test_classify_text( + transport: str = "grpc", request_type=language_service.ClassifyTextRequest +): + client = LanguageServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.classify_text), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.ClassifyTextResponse() + + response = client.classify_text(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == language_service.ClassifyTextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, language_service.ClassifyTextResponse) + + +def test_classify_text_from_dict(): + test_classify_text(request_type=dict) + + +@pytest.mark.asyncio +async def test_classify_text_async(transport: str = "grpc_asyncio"): + client = LanguageServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = language_service.ClassifyTextRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.classify_text), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + language_service.ClassifyTextResponse() + ) + + response = await client.classify_text(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, language_service.ClassifyTextResponse) + + +def test_classify_text_flattened(): + client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.classify_text), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.ClassifyTextResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.classify_text( + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].document == language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ) + + +def test_classify_text_flattened_error(): + client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.classify_text( + language_service.ClassifyTextRequest(), + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + ) + + +@pytest.mark.asyncio +async def test_classify_text_flattened_async(): + client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.classify_text), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.ClassifyTextResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + language_service.ClassifyTextResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.classify_text( + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].document == language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ) + + +@pytest.mark.asyncio +async def test_classify_text_flattened_error_async(): + client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.classify_text( + language_service.ClassifyTextRequest(), + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + ) + + +def test_annotate_text( + transport: str = "grpc", request_type=language_service.AnnotateTextRequest +): + client = LanguageServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.annotate_text), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.AnnotateTextResponse( + language="language_value", + ) + + response = client.annotate_text(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == language_service.AnnotateTextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, language_service.AnnotateTextResponse) + + assert response.language == "language_value" + + +def test_annotate_text_from_dict(): + test_annotate_text(request_type=dict) + + +@pytest.mark.asyncio +async def test_annotate_text_async(transport: str = "grpc_asyncio"): + client = LanguageServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = language_service.AnnotateTextRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.annotate_text), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + language_service.AnnotateTextResponse(language="language_value",) + ) + + response = await client.annotate_text(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, language_service.AnnotateTextResponse) + + assert response.language == "language_value" + + +def test_annotate_text_flattened(): + client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.annotate_text), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.AnnotateTextResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.annotate_text( + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + features=language_service.AnnotateTextRequest.Features(extract_syntax=True), + encoding_type=language_service.EncodingType.UTF8, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].document == language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ) + + assert args[0].features == language_service.AnnotateTextRequest.Features( + extract_syntax=True + ) + + assert args[0].encoding_type == language_service.EncodingType.UTF8 + + +def test_annotate_text_flattened_error(): + client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.annotate_text( + language_service.AnnotateTextRequest(), + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + features=language_service.AnnotateTextRequest.Features(extract_syntax=True), + encoding_type=language_service.EncodingType.UTF8, + ) + + +@pytest.mark.asyncio +async def test_annotate_text_flattened_async(): + client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.annotate_text), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.AnnotateTextResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + language_service.AnnotateTextResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.annotate_text( + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + features=language_service.AnnotateTextRequest.Features(extract_syntax=True), + encoding_type=language_service.EncodingType.UTF8, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].document == language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ) + + assert args[0].features == language_service.AnnotateTextRequest.Features( + extract_syntax=True + ) + + assert args[0].encoding_type == language_service.EncodingType.UTF8 + + +@pytest.mark.asyncio +async def test_annotate_text_flattened_error_async(): + client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.annotate_text( + language_service.AnnotateTextRequest(), + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + features=language_service.AnnotateTextRequest.Features(extract_syntax=True), + encoding_type=language_service.EncodingType.UTF8, + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.LanguageServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = LanguageServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.LanguageServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = LanguageServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.LanguageServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = LanguageServiceClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.LanguageServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = LanguageServiceClient(transport=transport) + assert client._transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.LanguageServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.LanguageServiceGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.LanguageServiceGrpcTransport, + transports.LanguageServiceGrpcAsyncIOTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client._transport, transports.LanguageServiceGrpcTransport,) + + +def test_language_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.LanguageServiceTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_language_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.language_v1.services.language_service.transports.LanguageServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.LanguageServiceTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "analyze_sentiment", + "analyze_entities", + "analyze_entity_sentiment", + "analyze_syntax", + "classify_text", + "annotate_text", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + +def test_language_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.language_v1.services.language_service.transports.LanguageServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.LanguageServiceTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=( + "https://www.googleapis.com/auth/cloud-language", + "https://www.googleapis.com/auth/cloud-platform", + ), + quota_project_id="octopus", + ) + + +def test_language_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.language_v1.services.language_service.transports.LanguageServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.LanguageServiceTransport() + adc.assert_called_once() + + +def test_language_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + LanguageServiceClient() + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/cloud-language", + "https://www.googleapis.com/auth/cloud-platform", + ), + quota_project_id=None, + ) + + +def test_language_service_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.LanguageServiceGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/cloud-language", + "https://www.googleapis.com/auth/cloud-platform", + ), + quota_project_id="octopus", + ) + + +def test_language_service_host_no_port(): + client = LanguageServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="language.googleapis.com" + ), + ) + assert client._transport._host == "language.googleapis.com:443" + + +def test_language_service_host_with_port(): + client = LanguageServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="language.googleapis.com:8000" + ), + ) + assert client._transport._host == "language.googleapis.com:8000" + + +def test_language_service_grpc_transport_channel(): + channel = grpc.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.LanguageServiceGrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + + +def test_language_service_grpc_asyncio_transport_channel(): + channel = aio.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.LanguageServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.LanguageServiceGrpcTransport, + transports.LanguageServiceGrpcAsyncIOTransport, + ], +) +def test_language_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/cloud-language", + "https://www.googleapis.com/auth/cloud-platform", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.LanguageServiceGrpcTransport, + transports.LanguageServiceGrpcAsyncIOTransport, + ], +) +def test_language_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/cloud-language", + "https://www.googleapis.com/auth/cloud-platform", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.LanguageServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = LanguageServiceClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.LanguageServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = LanguageServiceClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/tests/unit/gapic/language_v1beta2/__init__.py b/tests/unit/gapic/language_v1beta2/__init__.py new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/tests/unit/gapic/language_v1beta2/__init__.py @@ -0,0 +1 @@ + diff --git a/tests/unit/gapic/language_v1beta2/test_language_service.py b/tests/unit/gapic/language_v1beta2/test_language_service.py new file mode 100644 index 00000000..5b27952c --- /dev/null +++ b/tests/unit/gapic/language_v1beta2/test_language_service.py @@ -0,0 +1,1773 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.language_v1beta2.services.language_service import ( + LanguageServiceAsyncClient, +) +from google.cloud.language_v1beta2.services.language_service import ( + LanguageServiceClient, +) +from google.cloud.language_v1beta2.services.language_service import transports +from google.cloud.language_v1beta2.types import language_service +from google.oauth2 import service_account + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert LanguageServiceClient._get_default_mtls_endpoint(None) is None + assert ( + LanguageServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + LanguageServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + LanguageServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + LanguageServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + LanguageServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", [LanguageServiceClient, LanguageServiceAsyncClient] +) +def test_language_service_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client._transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client._transport._credentials == creds + + assert client._transport._host == "language.googleapis.com:443" + + +def test_language_service_client_get_transport_class(): + transport = LanguageServiceClient.get_transport_class() + assert transport == transports.LanguageServiceGrpcTransport + + transport = LanguageServiceClient.get_transport_class("grpc") + assert transport == transports.LanguageServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (LanguageServiceClient, transports.LanguageServiceGrpcTransport, "grpc"), + ( + LanguageServiceAsyncClient, + transports.LanguageServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + LanguageServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(LanguageServiceClient), +) +@mock.patch.object( + LanguageServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(LanguageServiceAsyncClient), +) +def test_language_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(LanguageServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(LanguageServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + LanguageServiceClient, + transports.LanguageServiceGrpcTransport, + "grpc", + "true", + ), + ( + LanguageServiceAsyncClient, + transports.LanguageServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + LanguageServiceClient, + transports.LanguageServiceGrpcTransport, + "grpc", + "false", + ), + ( + LanguageServiceAsyncClient, + transports.LanguageServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + LanguageServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(LanguageServiceClient), +) +@mock.patch.object( + LanguageServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(LanguageServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_language_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + ssl_channel_creds = mock.Mock() + with mock.patch( + "grpc.ssl_channel_credentials", return_value=ssl_channel_creds + ): + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_ssl_channel_creds = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_ssl_channel_creds = ssl_channel_creds + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.ssl_credentials", + new_callable=mock.PropertyMock, + ) as ssl_credentials_mock: + if use_client_cert_env == "false": + is_mtls_mock.return_value = False + ssl_credentials_mock.return_value = None + expected_host = client.DEFAULT_ENDPOINT + expected_ssl_channel_creds = None + else: + is_mtls_mock.return_value = True + ssl_credentials_mock.return_value = mock.Mock() + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_ssl_channel_creds = ( + ssl_credentials_mock.return_value + ) + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + is_mtls_mock.return_value = False + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (LanguageServiceClient, transports.LanguageServiceGrpcTransport, "grpc"), + ( + LanguageServiceAsyncClient, + transports.LanguageServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_language_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (LanguageServiceClient, transports.LanguageServiceGrpcTransport, "grpc"), + ( + LanguageServiceAsyncClient, + transports.LanguageServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_language_service_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_language_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.language_v1beta2.services.language_service.transports.LanguageServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = LanguageServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_analyze_sentiment( + transport: str = "grpc", request_type=language_service.AnalyzeSentimentRequest +): + client = LanguageServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.analyze_sentiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.AnalyzeSentimentResponse( + language="language_value", + ) + + response = client.analyze_sentiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == language_service.AnalyzeSentimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, language_service.AnalyzeSentimentResponse) + + assert response.language == "language_value" + + +def test_analyze_sentiment_from_dict(): + test_analyze_sentiment(request_type=dict) + + +@pytest.mark.asyncio +async def test_analyze_sentiment_async(transport: str = "grpc_asyncio"): + client = LanguageServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = language_service.AnalyzeSentimentRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.analyze_sentiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + language_service.AnalyzeSentimentResponse(language="language_value",) + ) + + response = await client.analyze_sentiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, language_service.AnalyzeSentimentResponse) + + assert response.language == "language_value" + + +def test_analyze_sentiment_flattened(): + client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.analyze_sentiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.AnalyzeSentimentResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.analyze_sentiment( + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].document == language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ) + + assert args[0].encoding_type == language_service.EncodingType.UTF8 + + +def test_analyze_sentiment_flattened_error(): + client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.analyze_sentiment( + language_service.AnalyzeSentimentRequest(), + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + +@pytest.mark.asyncio +async def test_analyze_sentiment_flattened_async(): + client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.analyze_sentiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.AnalyzeSentimentResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + language_service.AnalyzeSentimentResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.analyze_sentiment( + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].document == language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ) + + assert args[0].encoding_type == language_service.EncodingType.UTF8 + + +@pytest.mark.asyncio +async def test_analyze_sentiment_flattened_error_async(): + client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.analyze_sentiment( + language_service.AnalyzeSentimentRequest(), + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + +def test_analyze_entities( + transport: str = "grpc", request_type=language_service.AnalyzeEntitiesRequest +): + client = LanguageServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.analyze_entities), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.AnalyzeEntitiesResponse( + language="language_value", + ) + + response = client.analyze_entities(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == language_service.AnalyzeEntitiesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, language_service.AnalyzeEntitiesResponse) + + assert response.language == "language_value" + + +def test_analyze_entities_from_dict(): + test_analyze_entities(request_type=dict) + + +@pytest.mark.asyncio +async def test_analyze_entities_async(transport: str = "grpc_asyncio"): + client = LanguageServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = language_service.AnalyzeEntitiesRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.analyze_entities), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + language_service.AnalyzeEntitiesResponse(language="language_value",) + ) + + response = await client.analyze_entities(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, language_service.AnalyzeEntitiesResponse) + + assert response.language == "language_value" + + +def test_analyze_entities_flattened(): + client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.analyze_entities), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.AnalyzeEntitiesResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.analyze_entities( + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].document == language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ) + + assert args[0].encoding_type == language_service.EncodingType.UTF8 + + +def test_analyze_entities_flattened_error(): + client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.analyze_entities( + language_service.AnalyzeEntitiesRequest(), + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + +@pytest.mark.asyncio +async def test_analyze_entities_flattened_async(): + client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.analyze_entities), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.AnalyzeEntitiesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + language_service.AnalyzeEntitiesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.analyze_entities( + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].document == language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ) + + assert args[0].encoding_type == language_service.EncodingType.UTF8 + + +@pytest.mark.asyncio +async def test_analyze_entities_flattened_error_async(): + client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.analyze_entities( + language_service.AnalyzeEntitiesRequest(), + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + +def test_analyze_entity_sentiment( + transport: str = "grpc", request_type=language_service.AnalyzeEntitySentimentRequest +): + client = LanguageServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.analyze_entity_sentiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.AnalyzeEntitySentimentResponse( + language="language_value", + ) + + response = client.analyze_entity_sentiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == language_service.AnalyzeEntitySentimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, language_service.AnalyzeEntitySentimentResponse) + + assert response.language == "language_value" + + +def test_analyze_entity_sentiment_from_dict(): + test_analyze_entity_sentiment(request_type=dict) + + +@pytest.mark.asyncio +async def test_analyze_entity_sentiment_async(transport: str = "grpc_asyncio"): + client = LanguageServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = language_service.AnalyzeEntitySentimentRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.analyze_entity_sentiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + language_service.AnalyzeEntitySentimentResponse(language="language_value",) + ) + + response = await client.analyze_entity_sentiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, language_service.AnalyzeEntitySentimentResponse) + + assert response.language == "language_value" + + +def test_analyze_entity_sentiment_flattened(): + client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.analyze_entity_sentiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.AnalyzeEntitySentimentResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.analyze_entity_sentiment( + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].document == language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ) + + assert args[0].encoding_type == language_service.EncodingType.UTF8 + + +def test_analyze_entity_sentiment_flattened_error(): + client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.analyze_entity_sentiment( + language_service.AnalyzeEntitySentimentRequest(), + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + +@pytest.mark.asyncio +async def test_analyze_entity_sentiment_flattened_async(): + client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.analyze_entity_sentiment), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.AnalyzeEntitySentimentResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + language_service.AnalyzeEntitySentimentResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.analyze_entity_sentiment( + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].document == language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ) + + assert args[0].encoding_type == language_service.EncodingType.UTF8 + + +@pytest.mark.asyncio +async def test_analyze_entity_sentiment_flattened_error_async(): + client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.analyze_entity_sentiment( + language_service.AnalyzeEntitySentimentRequest(), + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + +def test_analyze_syntax( + transport: str = "grpc", request_type=language_service.AnalyzeSyntaxRequest +): + client = LanguageServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.analyze_syntax), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.AnalyzeSyntaxResponse( + language="language_value", + ) + + response = client.analyze_syntax(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == language_service.AnalyzeSyntaxRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, language_service.AnalyzeSyntaxResponse) + + assert response.language == "language_value" + + +def test_analyze_syntax_from_dict(): + test_analyze_syntax(request_type=dict) + + +@pytest.mark.asyncio +async def test_analyze_syntax_async(transport: str = "grpc_asyncio"): + client = LanguageServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = language_service.AnalyzeSyntaxRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.analyze_syntax), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + language_service.AnalyzeSyntaxResponse(language="language_value",) + ) + + response = await client.analyze_syntax(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, language_service.AnalyzeSyntaxResponse) + + assert response.language == "language_value" + + +def test_analyze_syntax_flattened(): + client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.analyze_syntax), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.AnalyzeSyntaxResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.analyze_syntax( + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].document == language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ) + + assert args[0].encoding_type == language_service.EncodingType.UTF8 + + +def test_analyze_syntax_flattened_error(): + client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.analyze_syntax( + language_service.AnalyzeSyntaxRequest(), + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + +@pytest.mark.asyncio +async def test_analyze_syntax_flattened_async(): + client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.analyze_syntax), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.AnalyzeSyntaxResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + language_service.AnalyzeSyntaxResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.analyze_syntax( + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].document == language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ) + + assert args[0].encoding_type == language_service.EncodingType.UTF8 + + +@pytest.mark.asyncio +async def test_analyze_syntax_flattened_error_async(): + client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.analyze_syntax( + language_service.AnalyzeSyntaxRequest(), + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + encoding_type=language_service.EncodingType.UTF8, + ) + + +def test_classify_text( + transport: str = "grpc", request_type=language_service.ClassifyTextRequest +): + client = LanguageServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.classify_text), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.ClassifyTextResponse() + + response = client.classify_text(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == language_service.ClassifyTextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, language_service.ClassifyTextResponse) + + +def test_classify_text_from_dict(): + test_classify_text(request_type=dict) + + +@pytest.mark.asyncio +async def test_classify_text_async(transport: str = "grpc_asyncio"): + client = LanguageServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = language_service.ClassifyTextRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.classify_text), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + language_service.ClassifyTextResponse() + ) + + response = await client.classify_text(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, language_service.ClassifyTextResponse) + + +def test_classify_text_flattened(): + client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.classify_text), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.ClassifyTextResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.classify_text( + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].document == language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ) + + +def test_classify_text_flattened_error(): + client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.classify_text( + language_service.ClassifyTextRequest(), + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + ) + + +@pytest.mark.asyncio +async def test_classify_text_flattened_async(): + client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.classify_text), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.ClassifyTextResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + language_service.ClassifyTextResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.classify_text( + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].document == language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ) + + +@pytest.mark.asyncio +async def test_classify_text_flattened_error_async(): + client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.classify_text( + language_service.ClassifyTextRequest(), + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + ) + + +def test_annotate_text( + transport: str = "grpc", request_type=language_service.AnnotateTextRequest +): + client = LanguageServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.annotate_text), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.AnnotateTextResponse( + language="language_value", + ) + + response = client.annotate_text(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == language_service.AnnotateTextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, language_service.AnnotateTextResponse) + + assert response.language == "language_value" + + +def test_annotate_text_from_dict(): + test_annotate_text(request_type=dict) + + +@pytest.mark.asyncio +async def test_annotate_text_async(transport: str = "grpc_asyncio"): + client = LanguageServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = language_service.AnnotateTextRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.annotate_text), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + language_service.AnnotateTextResponse(language="language_value",) + ) + + response = await client.annotate_text(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, language_service.AnnotateTextResponse) + + assert response.language == "language_value" + + +def test_annotate_text_flattened(): + client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.annotate_text), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.AnnotateTextResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.annotate_text( + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + features=language_service.AnnotateTextRequest.Features(extract_syntax=True), + encoding_type=language_service.EncodingType.UTF8, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].document == language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ) + + assert args[0].features == language_service.AnnotateTextRequest.Features( + extract_syntax=True + ) + + assert args[0].encoding_type == language_service.EncodingType.UTF8 + + +def test_annotate_text_flattened_error(): + client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.annotate_text( + language_service.AnnotateTextRequest(), + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + features=language_service.AnnotateTextRequest.Features(extract_syntax=True), + encoding_type=language_service.EncodingType.UTF8, + ) + + +@pytest.mark.asyncio +async def test_annotate_text_flattened_async(): + client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.annotate_text), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.AnnotateTextResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + language_service.AnnotateTextResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.annotate_text( + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + features=language_service.AnnotateTextRequest.Features(extract_syntax=True), + encoding_type=language_service.EncodingType.UTF8, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].document == language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ) + + assert args[0].features == language_service.AnnotateTextRequest.Features( + extract_syntax=True + ) + + assert args[0].encoding_type == language_service.EncodingType.UTF8 + + +@pytest.mark.asyncio +async def test_annotate_text_flattened_error_async(): + client = LanguageServiceAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.annotate_text( + language_service.AnnotateTextRequest(), + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + features=language_service.AnnotateTextRequest.Features(extract_syntax=True), + encoding_type=language_service.EncodingType.UTF8, + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.LanguageServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = LanguageServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.LanguageServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = LanguageServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.LanguageServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = LanguageServiceClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.LanguageServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = LanguageServiceClient(transport=transport) + assert client._transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.LanguageServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.LanguageServiceGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.LanguageServiceGrpcTransport, + transports.LanguageServiceGrpcAsyncIOTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = LanguageServiceClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client._transport, transports.LanguageServiceGrpcTransport,) + + +def test_language_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.LanguageServiceTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_language_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.language_v1beta2.services.language_service.transports.LanguageServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.LanguageServiceTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "analyze_sentiment", + "analyze_entities", + "analyze_entity_sentiment", + "analyze_syntax", + "classify_text", + "annotate_text", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + +def test_language_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.language_v1beta2.services.language_service.transports.LanguageServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.LanguageServiceTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=( + "https://www.googleapis.com/auth/cloud-language", + "https://www.googleapis.com/auth/cloud-platform", + ), + quota_project_id="octopus", + ) + + +def test_language_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.language_v1beta2.services.language_service.transports.LanguageServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.LanguageServiceTransport() + adc.assert_called_once() + + +def test_language_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + LanguageServiceClient() + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/cloud-language", + "https://www.googleapis.com/auth/cloud-platform", + ), + quota_project_id=None, + ) + + +def test_language_service_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.LanguageServiceGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/cloud-language", + "https://www.googleapis.com/auth/cloud-platform", + ), + quota_project_id="octopus", + ) + + +def test_language_service_host_no_port(): + client = LanguageServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="language.googleapis.com" + ), + ) + assert client._transport._host == "language.googleapis.com:443" + + +def test_language_service_host_with_port(): + client = LanguageServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="language.googleapis.com:8000" + ), + ) + assert client._transport._host == "language.googleapis.com:8000" + + +def test_language_service_grpc_transport_channel(): + channel = grpc.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.LanguageServiceGrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + + +def test_language_service_grpc_asyncio_transport_channel(): + channel = aio.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.LanguageServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.LanguageServiceGrpcTransport, + transports.LanguageServiceGrpcAsyncIOTransport, + ], +) +def test_language_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/cloud-language", + "https://www.googleapis.com/auth/cloud-platform", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.LanguageServiceGrpcTransport, + transports.LanguageServiceGrpcAsyncIOTransport, + ], +) +def test_language_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/cloud-language", + "https://www.googleapis.com/auth/cloud-platform", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.LanguageServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = LanguageServiceClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.LanguageServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = LanguageServiceClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/tests/unit/gapic/v1/test_language_service_client_v1.py b/tests/unit/gapic/v1/test_language_service_client_v1.py deleted file mode 100644 index 8d8362ab..00000000 --- a/tests/unit/gapic/v1/test_language_service_client_v1.py +++ /dev/null @@ -1,310 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.cloud import language_v1 -from google.cloud.language_v1.proto import language_service_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestLanguageServiceClient(object): - def test_analyze_sentiment(self): - # Setup Expected Response - language = "language-1613589672" - expected_response = {"language": language} - expected_response = language_service_pb2.AnalyzeSentimentResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = language_v1.LanguageServiceClient() - - # Setup Request - document = {} - - response = client.analyze_sentiment(document) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = language_service_pb2.AnalyzeSentimentRequest( - document=document - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_analyze_sentiment_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = language_v1.LanguageServiceClient() - - # Setup request - document = {} - - with pytest.raises(CustomException): - client.analyze_sentiment(document) - - def test_analyze_entities(self): - # Setup Expected Response - language = "language-1613589672" - expected_response = {"language": language} - expected_response = language_service_pb2.AnalyzeEntitiesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = language_v1.LanguageServiceClient() - - # Setup Request - document = {} - - response = client.analyze_entities(document) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = language_service_pb2.AnalyzeEntitiesRequest( - document=document - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_analyze_entities_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = language_v1.LanguageServiceClient() - - # Setup request - document = {} - - with pytest.raises(CustomException): - client.analyze_entities(document) - - def test_analyze_entity_sentiment(self): - # Setup Expected Response - language = "language-1613589672" - expected_response = {"language": language} - expected_response = language_service_pb2.AnalyzeEntitySentimentResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = language_v1.LanguageServiceClient() - - # Setup Request - document = {} - - response = client.analyze_entity_sentiment(document) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = language_service_pb2.AnalyzeEntitySentimentRequest( - document=document - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_analyze_entity_sentiment_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = language_v1.LanguageServiceClient() - - # Setup request - document = {} - - with pytest.raises(CustomException): - client.analyze_entity_sentiment(document) - - def test_analyze_syntax(self): - # Setup Expected Response - language = "language-1613589672" - expected_response = {"language": language} - expected_response = language_service_pb2.AnalyzeSyntaxResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = language_v1.LanguageServiceClient() - - # Setup Request - document = {} - - response = client.analyze_syntax(document) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = language_service_pb2.AnalyzeSyntaxRequest(document=document) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_analyze_syntax_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = language_v1.LanguageServiceClient() - - # Setup request - document = {} - - with pytest.raises(CustomException): - client.analyze_syntax(document) - - def test_classify_text(self): - # Setup Expected Response - expected_response = {} - expected_response = language_service_pb2.ClassifyTextResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = language_v1.LanguageServiceClient() - - # Setup Request - document = {} - - response = client.classify_text(document) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = language_service_pb2.ClassifyTextRequest(document=document) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_classify_text_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = language_v1.LanguageServiceClient() - - # Setup request - document = {} - - with pytest.raises(CustomException): - client.classify_text(document) - - def test_annotate_text(self): - # Setup Expected Response - language = "language-1613589672" - expected_response = {"language": language} - expected_response = language_service_pb2.AnnotateTextResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = language_v1.LanguageServiceClient() - - # Setup Request - document = {} - features = {} - - response = client.annotate_text(document, features) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = language_service_pb2.AnnotateTextRequest( - document=document, features=features - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_annotate_text_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = language_v1.LanguageServiceClient() - - # Setup request - document = {} - features = {} - - with pytest.raises(CustomException): - client.annotate_text(document, features) diff --git a/tests/unit/gapic/v1beta2/test_language_service_client_v1beta2.py b/tests/unit/gapic/v1beta2/test_language_service_client_v1beta2.py deleted file mode 100644 index 548357be..00000000 --- a/tests/unit/gapic/v1beta2/test_language_service_client_v1beta2.py +++ /dev/null @@ -1,310 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.cloud import language_v1beta2 -from google.cloud.language_v1beta2.proto import language_service_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestLanguageServiceClient(object): - def test_analyze_sentiment(self): - # Setup Expected Response - language = "language-1613589672" - expected_response = {"language": language} - expected_response = language_service_pb2.AnalyzeSentimentResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = language_v1beta2.LanguageServiceClient() - - # Setup Request - document = {} - - response = client.analyze_sentiment(document) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = language_service_pb2.AnalyzeSentimentRequest( - document=document - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_analyze_sentiment_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = language_v1beta2.LanguageServiceClient() - - # Setup request - document = {} - - with pytest.raises(CustomException): - client.analyze_sentiment(document) - - def test_analyze_entities(self): - # Setup Expected Response - language = "language-1613589672" - expected_response = {"language": language} - expected_response = language_service_pb2.AnalyzeEntitiesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = language_v1beta2.LanguageServiceClient() - - # Setup Request - document = {} - - response = client.analyze_entities(document) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = language_service_pb2.AnalyzeEntitiesRequest( - document=document - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_analyze_entities_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = language_v1beta2.LanguageServiceClient() - - # Setup request - document = {} - - with pytest.raises(CustomException): - client.analyze_entities(document) - - def test_analyze_entity_sentiment(self): - # Setup Expected Response - language = "language-1613589672" - expected_response = {"language": language} - expected_response = language_service_pb2.AnalyzeEntitySentimentResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = language_v1beta2.LanguageServiceClient() - - # Setup Request - document = {} - - response = client.analyze_entity_sentiment(document) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = language_service_pb2.AnalyzeEntitySentimentRequest( - document=document - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_analyze_entity_sentiment_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = language_v1beta2.LanguageServiceClient() - - # Setup request - document = {} - - with pytest.raises(CustomException): - client.analyze_entity_sentiment(document) - - def test_analyze_syntax(self): - # Setup Expected Response - language = "language-1613589672" - expected_response = {"language": language} - expected_response = language_service_pb2.AnalyzeSyntaxResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = language_v1beta2.LanguageServiceClient() - - # Setup Request - document = {} - - response = client.analyze_syntax(document) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = language_service_pb2.AnalyzeSyntaxRequest(document=document) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_analyze_syntax_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = language_v1beta2.LanguageServiceClient() - - # Setup request - document = {} - - with pytest.raises(CustomException): - client.analyze_syntax(document) - - def test_classify_text(self): - # Setup Expected Response - expected_response = {} - expected_response = language_service_pb2.ClassifyTextResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = language_v1beta2.LanguageServiceClient() - - # Setup Request - document = {} - - response = client.classify_text(document) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = language_service_pb2.ClassifyTextRequest(document=document) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_classify_text_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = language_v1beta2.LanguageServiceClient() - - # Setup request - document = {} - - with pytest.raises(CustomException): - client.classify_text(document) - - def test_annotate_text(self): - # Setup Expected Response - language = "language-1613589672" - expected_response = {"language": language} - expected_response = language_service_pb2.AnnotateTextResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = language_v1beta2.LanguageServiceClient() - - # Setup Request - document = {} - features = {} - - response = client.annotate_text(document, features) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = language_service_pb2.AnnotateTextRequest( - document=document, features=features - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_annotate_text_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = language_v1beta2.LanguageServiceClient() - - # Setup request - document = {} - features = {} - - with pytest.raises(CustomException): - client.annotate_text(document, features) From ecae8cd29d54a2d4444dd64521a08942e0626d50 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Mon, 19 Oct 2020 11:20:33 -0600 Subject: [PATCH 209/209] chore: release 2.0.0 (#42) --- CHANGELOG.md | 20 ++++++++++++++++++++ setup.py | 4 ++-- 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 50cd9793..7b5b2403 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,26 @@ [1]: https://pypi.org/project/google-cloud-language/#history +## [2.0.0](https://www.github.com/googleapis/python-language/compare/v1.3.0...v2.0.0) (2020-10-16) + + +### Features + +* Migrate API to use python micro-generator ([#41](https://www.github.com/googleapis/python-language/issues/41)) ([b408b14](https://www.github.com/googleapis/python-language/commit/b408b1431194d8e1373b5d986d476add639f7e87)) + + +### Documentation + +* add multiprocessing note ([#26](https://www.github.com/googleapis/python-language/issues/26)) ([a489102](https://www.github.com/googleapis/python-language/commit/a489102ca0f5ab302ec8974728a52065f2ea8857)) +* add spacing for readability ([#22](https://www.github.com/googleapis/python-language/issues/22)) ([7dff809](https://www.github.com/googleapis/python-language/commit/7dff809b94b5a1d001aeb1e7763dbbe624865600)) +* fix small typo ([#5](https://www.github.com/googleapis/python-language/issues/5)) ([7a9d4dd](https://www.github.com/googleapis/python-language/commit/7a9d4ddf676f2a77e1bd83e02b8d7987a72c6525)) +* **language:** change docstring formatting; bump copyright year to 2020 (via synth) ([#10234](https://www.github.com/googleapis/python-language/issues/10234)) ([b68b216](https://www.github.com/googleapis/python-language/commit/b68b2166d8e4d81a7e51e701f8facdfd7fb82a26)) +* **language:** edit hyphenation of "part-of-speech" (via synth) ([#9954](https://www.github.com/googleapis/python-language/issues/9954)) ([6246ef9](https://www.github.com/googleapis/python-language/commit/6246ef904871405334c0b3bd6c2490b79ffe56fa)) +* **language:** fixes typo in Natural Language samples ([#10134](https://www.github.com/googleapis/python-language/issues/10134)) ([223d614](https://www.github.com/googleapis/python-language/commit/223d6140145dcf5c48af206212db58a062a7937b)) +* add python 2 sunset banner to documentation ([#9036](https://www.github.com/googleapis/python-language/issues/9036)) ([1fe4105](https://www.github.com/googleapis/python-language/commit/1fe4105e078f84f1d4ea713550c26bdf91096d4a)) +* fix intersphinx reference to requests ([#9294](https://www.github.com/googleapis/python-language/issues/9294)) ([e97a0ae](https://www.github.com/googleapis/python-language/commit/e97a0ae6c2e3a26afc9b3af7d91118ac3c0aa1f7)) +* Remove CI for gh-pages, use googleapis.dev for api_core refs. ([#9085](https://www.github.com/googleapis/python-language/issues/9085)) ([6b15df6](https://www.github.com/googleapis/python-language/commit/6b15df6091378ed444642fc813d49d8bbbb6365d)) + ## 1.3.0 07-24-2019 16:44 PDT diff --git a/setup.py b/setup.py index 2020b3c6..b0bac6b2 100644 --- a/setup.py +++ b/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-language" description = "Google Cloud Natural Language API client library" -version = "1.3.0" +version = "2.0.0" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' @@ -30,7 +30,7 @@ release_status = "Development Status :: 5 - Production/Stable" dependencies = [ "google-api-core[grpc] >= 1.22.2, < 2.0.0dev", - "proto-plus >= 1.4.0", + "proto-plus >= 1.10.0", "libcst >= 0.2.5", ] extras = {}