Skip to content

Commit

Permalink
Update python samples and readme (#31226)
Browse files Browse the repository at this point in the history
* update samples

* update all

* update again

* fix typo

* fix typo

* update

* fix
  • Loading branch information
zhaiyutong authored Jul 21, 2023
1 parent 4f883d3 commit 446c172
Show file tree
Hide file tree
Showing 6 changed files with 29 additions and 25 deletions.
17 changes: 9 additions & 8 deletions sdk/contentsafety/azure-ai-contentsafety/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,13 @@

## Getting started

### Prequisites
### Prerequisites

- Python 3.7 or later is required to use this package.
- You need an [Azure subscription][azure_sub] to use this package.
- An existing [Azure AI Content Safety][contentsafety_overview] instance.

### Installating the package
### Install the package

```bash
pip install azure-ai-contentsafety
Expand Down Expand Up @@ -114,19 +114,16 @@ Please refer to [sample data](https://github.com/Azure/azure-sdk-for-python/tree
from azure.ai.contentsafety import ContentSafetyClient
from azure.core.credentials import AzureKeyCredential
from azure.core.exceptions import HttpResponseError
from azure.ai.contentsafety.models import AnalyzeTextOptions, TextCategory
from azure.ai.contentsafety.models import AnalyzeTextOptions

key = os.environ["CONTENT_SAFETY_KEY"]
endpoint = os.environ["CONTENT_SAFETY_ENDPOINT"]
text_path = os.path.abspath(os.path.join(os.path.abspath(__file__), "..", "./sample_data/text.txt"))

# Create an Content Safety client
client = ContentSafetyClient(endpoint, AzureKeyCredential(key))

# Read sample data
with open(text_path) as f:
# Build request
request = AnalyzeTextOptions(text=f.readline(), categories=[TextCategory.HATE, TextCategory.SELF_HARM])
# Construct a request
request = AnalyzeTextOptions(text="You are an idiot")

# Analyze text
try:
Expand All @@ -144,6 +141,10 @@ Please refer to [sample data](https://github.com/Azure/azure-sdk-for-python/tree
print(f"Hate severity: {response.hate_result.severity}")
if response.self_harm_result:
print(f"SelfHarm severity: {response.self_harm_result.severity}")
if response.sexual_result:
print(f"Sexual severity: {response.sexual_result.severity}")
if response.violence_result:
print(f"Violence severity: {response.violence_result.severity}")
```

<!-- END SNIPPET -->
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,4 +51,4 @@ def analyze_image():


if __name__ == "__main__":
analyze_image()
analyze_image()
Original file line number Diff line number Diff line change
Expand Up @@ -56,4 +56,4 @@ async def main():

if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.run_until_complete(main())
Original file line number Diff line number Diff line change
Expand Up @@ -6,26 +6,24 @@
# license information.
# --------------------------------------------------------------------------


def analyze_text():
# [START analyze_text]

import os
from azure.ai.contentsafety import ContentSafetyClient
from azure.core.credentials import AzureKeyCredential
from azure.core.exceptions import HttpResponseError
from azure.ai.contentsafety.models import AnalyzeTextOptions, TextCategory
from azure.ai.contentsafety.models import AnalyzeTextOptions

key = os.environ["CONTENT_SAFETY_KEY"]
endpoint = os.environ["CONTENT_SAFETY_ENDPOINT"]
text_path = os.path.abspath(os.path.join(os.path.abspath(__file__), "..", "./sample_data/text.txt"))

# Create an Content Safety client
client = ContentSafetyClient(endpoint, AzureKeyCredential(key))

# Read sample data
with open(text_path) as f:
# Build request
request = AnalyzeTextOptions(text=f.readline(), categories=[TextCategory.HATE, TextCategory.SELF_HARM])
# Construct a request
request = AnalyzeTextOptions(text="You are an idiot")

# Analyze text
try:
Expand All @@ -43,8 +41,12 @@ def analyze_text():
print(f"Hate severity: {response.hate_result.severity}")
if response.self_harm_result:
print(f"SelfHarm severity: {response.self_harm_result.severity}")
if response.sexual_result:
print(f"Sexual severity: {response.sexual_result.severity}")
if response.violence_result:
print(f"Violence severity: {response.violence_result.severity}")

# [END analyze_text]

if __name__ == "__main__":
analyze_text()
analyze_text()
Original file line number Diff line number Diff line change
Expand Up @@ -14,20 +14,17 @@ async def analyze_text_async():
from azure.ai.contentsafety.aio import ContentSafetyClient
from azure.core.credentials import AzureKeyCredential
from azure.core.exceptions import HttpResponseError
from azure.ai.contentsafety.models import AnalyzeTextOptions, TextCategory
from azure.ai.contentsafety.models import AnalyzeTextOptions

key = os.environ["CONTENT_SAFETY_KEY"]
endpoint = os.environ["CONTENT_SAFETY_ENDPOINT"]
text_path = os.path.abspath(os.path.join(os.path.abspath(__file__), "..", "./sample_data/text.txt"))

# Create an Content Safety client
client = ContentSafetyClient(endpoint, AzureKeyCredential(key))

async with client:
# Read sample data
with open(text_path) as f:
# Build request
request = AnalyzeTextOptions(text=f.readline(), categories=[TextCategory.HATE, TextCategory.SELF_HARM])
# Construct a request
request = AnalyzeTextOptions(text="You are an idiot")

# Analyze text
try:
Expand All @@ -45,6 +42,10 @@ async def analyze_text_async():
print(f"Hate severity: {response.hate_result.severity}")
if response.self_harm_result:
print(f"SelfHarm severity: {response.self_harm_result.severity}")
if response.sexual_result:
print(f"Sexual severity: {response.sexual_result.severity}")
if response.violence_result:
print(f"Violence severity: {response.violence_result.severity}")

# [END analyze_text_async]

Expand All @@ -53,4 +54,4 @@ async def main():

if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.run_until_complete(main())
Original file line number Diff line number Diff line change
Expand Up @@ -349,4 +349,4 @@ def delete_blocklist():
list_block_items()
get_block_item()
remove_block_items()
delete_blocklist()
delete_blocklist()

0 comments on commit 446c172

Please sign in to comment.