diff --git a/sdk/contentsafety/azure-ai-contentsafety/README.md b/sdk/contentsafety/azure-ai-contentsafety/README.md index 24943c8d3331..da2f87c38f93 100644 --- a/sdk/contentsafety/azure-ai-contentsafety/README.md +++ b/sdk/contentsafety/azure-ai-contentsafety/README.md @@ -4,13 +4,13 @@ ## Getting started -### Prequisites +### Prerequisites - Python 3.7 or later is required to use this package. - You need an [Azure subscription][azure_sub] to use this package. - An existing [Azure AI Content Safety][contentsafety_overview] instance. -### Installating the package +### Install the package ```bash pip install azure-ai-contentsafety @@ -114,19 +114,16 @@ Please refer to [sample data](https://github.com/Azure/azure-sdk-for-python/tree from azure.ai.contentsafety import ContentSafetyClient from azure.core.credentials import AzureKeyCredential from azure.core.exceptions import HttpResponseError - from azure.ai.contentsafety.models import AnalyzeTextOptions, TextCategory + from azure.ai.contentsafety.models import AnalyzeTextOptions key = os.environ["CONTENT_SAFETY_KEY"] endpoint = os.environ["CONTENT_SAFETY_ENDPOINT"] - text_path = os.path.abspath(os.path.join(os.path.abspath(__file__), "..", "./sample_data/text.txt")) # Create an Content Safety client client = ContentSafetyClient(endpoint, AzureKeyCredential(key)) - # Read sample data - with open(text_path) as f: - # Build request - request = AnalyzeTextOptions(text=f.readline(), categories=[TextCategory.HATE, TextCategory.SELF_HARM]) + # Construct a request + request = AnalyzeTextOptions(text="You are an idiot") # Analyze text try: @@ -144,6 +141,10 @@ Please refer to [sample data](https://github.com/Azure/azure-sdk-for-python/tree print(f"Hate severity: {response.hate_result.severity}") if response.self_harm_result: print(f"SelfHarm severity: {response.self_harm_result.severity}") + if response.sexual_result: + print(f"Sexual severity: {response.sexual_result.severity}") + if response.violence_result: + print(f"Violence severity: {response.violence_result.severity}") ``` diff --git a/sdk/contentsafety/azure-ai-contentsafety/samples/sample_analyze_image.py b/sdk/contentsafety/azure-ai-contentsafety/samples/sample_analyze_image.py index 99361744994c..2901b3a63d47 100644 --- a/sdk/contentsafety/azure-ai-contentsafety/samples/sample_analyze_image.py +++ b/sdk/contentsafety/azure-ai-contentsafety/samples/sample_analyze_image.py @@ -51,4 +51,4 @@ def analyze_image(): if __name__ == "__main__": - analyze_image() + analyze_image() \ No newline at end of file diff --git a/sdk/contentsafety/azure-ai-contentsafety/samples/sample_analyze_image_async.py b/sdk/contentsafety/azure-ai-contentsafety/samples/sample_analyze_image_async.py index 08420cd0b3a1..c53674fae41e 100644 --- a/sdk/contentsafety/azure-ai-contentsafety/samples/sample_analyze_image_async.py +++ b/sdk/contentsafety/azure-ai-contentsafety/samples/sample_analyze_image_async.py @@ -56,4 +56,4 @@ async def main(): if __name__ == "__main__": loop = asyncio.get_event_loop() - loop.run_until_complete(main()) + loop.run_until_complete(main()) \ No newline at end of file diff --git a/sdk/contentsafety/azure-ai-contentsafety/samples/sample_analyze_text.py b/sdk/contentsafety/azure-ai-contentsafety/samples/sample_analyze_text.py index b7d3908c8f18..bfc0717c4fac 100644 --- a/sdk/contentsafety/azure-ai-contentsafety/samples/sample_analyze_text.py +++ b/sdk/contentsafety/azure-ai-contentsafety/samples/sample_analyze_text.py @@ -6,6 +6,7 @@ # license information. # -------------------------------------------------------------------------- + def analyze_text(): # [START analyze_text] @@ -13,19 +14,16 @@ def analyze_text(): from azure.ai.contentsafety import ContentSafetyClient from azure.core.credentials import AzureKeyCredential from azure.core.exceptions import HttpResponseError - from azure.ai.contentsafety.models import AnalyzeTextOptions, TextCategory + from azure.ai.contentsafety.models import AnalyzeTextOptions key = os.environ["CONTENT_SAFETY_KEY"] endpoint = os.environ["CONTENT_SAFETY_ENDPOINT"] - text_path = os.path.abspath(os.path.join(os.path.abspath(__file__), "..", "./sample_data/text.txt")) # Create an Content Safety client client = ContentSafetyClient(endpoint, AzureKeyCredential(key)) - # Read sample data - with open(text_path) as f: - # Build request - request = AnalyzeTextOptions(text=f.readline(), categories=[TextCategory.HATE, TextCategory.SELF_HARM]) + # Construct a request + request = AnalyzeTextOptions(text="You are an idiot") # Analyze text try: @@ -43,8 +41,12 @@ def analyze_text(): print(f"Hate severity: {response.hate_result.severity}") if response.self_harm_result: print(f"SelfHarm severity: {response.self_harm_result.severity}") + if response.sexual_result: + print(f"Sexual severity: {response.sexual_result.severity}") + if response.violence_result: + print(f"Violence severity: {response.violence_result.severity}") # [END analyze_text] if __name__ == "__main__": - analyze_text() + analyze_text() \ No newline at end of file diff --git a/sdk/contentsafety/azure-ai-contentsafety/samples/sample_analyze_text_async.py b/sdk/contentsafety/azure-ai-contentsafety/samples/sample_analyze_text_async.py index 94be3662ea01..445d9bde8308 100644 --- a/sdk/contentsafety/azure-ai-contentsafety/samples/sample_analyze_text_async.py +++ b/sdk/contentsafety/azure-ai-contentsafety/samples/sample_analyze_text_async.py @@ -14,20 +14,17 @@ async def analyze_text_async(): from azure.ai.contentsafety.aio import ContentSafetyClient from azure.core.credentials import AzureKeyCredential from azure.core.exceptions import HttpResponseError - from azure.ai.contentsafety.models import AnalyzeTextOptions, TextCategory + from azure.ai.contentsafety.models import AnalyzeTextOptions key = os.environ["CONTENT_SAFETY_KEY"] endpoint = os.environ["CONTENT_SAFETY_ENDPOINT"] - text_path = os.path.abspath(os.path.join(os.path.abspath(__file__), "..", "./sample_data/text.txt")) # Create an Content Safety client client = ContentSafetyClient(endpoint, AzureKeyCredential(key)) async with client: - # Read sample data - with open(text_path) as f: - # Build request - request = AnalyzeTextOptions(text=f.readline(), categories=[TextCategory.HATE, TextCategory.SELF_HARM]) + # Construct a request + request = AnalyzeTextOptions(text="You are an idiot") # Analyze text try: @@ -45,6 +42,10 @@ async def analyze_text_async(): print(f"Hate severity: {response.hate_result.severity}") if response.self_harm_result: print(f"SelfHarm severity: {response.self_harm_result.severity}") + if response.sexual_result: + print(f"Sexual severity: {response.sexual_result.severity}") + if response.violence_result: + print(f"Violence severity: {response.violence_result.severity}") # [END analyze_text_async] @@ -53,4 +54,4 @@ async def main(): if __name__ == "__main__": loop = asyncio.get_event_loop() - loop.run_until_complete(main()) + loop.run_until_complete(main()) \ No newline at end of file diff --git a/sdk/contentsafety/azure-ai-contentsafety/samples/sample_manage_blocklist.py b/sdk/contentsafety/azure-ai-contentsafety/samples/sample_manage_blocklist.py index 8d06e9b78b6e..1e8a108de823 100644 --- a/sdk/contentsafety/azure-ai-contentsafety/samples/sample_manage_blocklist.py +++ b/sdk/contentsafety/azure-ai-contentsafety/samples/sample_manage_blocklist.py @@ -349,4 +349,4 @@ def delete_blocklist(): list_block_items() get_block_item() remove_block_items() - delete_blocklist() + delete_blocklist() \ No newline at end of file