import os
from azure.ai.textanalytics import TextAnalyticsClient
from azure.core.credentials import AzureKeyCredential
# Set your Azure Cognitive Services endpoint and key
endpoint ="YOUR_ENDPOINT"
key ="YOUR_KEY"# Instantiate a Text Analytics client
text_analytics_client = TextAnalyticsClient(endpoint=endpoint, credential=AzureKeyCredential(key))# Text for analysis
documents =["I love the weather today! It makes me so happy.","The movie was not good. I did not enjoy it at all.","Microsoft is a technology company based in Redmond, Washington.","The new product launch was a success. Customers are excited about it."]# Perform sentiment analysis
response = text_analytics_client.analyze_sentiment(documents=documents)[0]for idx, doc inenumerate(response):print("Document: {}".format(documents[idx]))print("Sentiment: {}".format(doc.sentiment))print()
```python
# Perform entity recognition
response = text_analytics_client.recognize_entities(documents=documents)[0]for idx, doc inenumerate(response):print("Document: {}".format(documents[idx]))print("Entities:")for entity in doc.entities:print("\tText: {}, Type: {}".format(entity.text, entity.category))print()# Perform key phrase extraction
response = text_analytics_client.extract_key_phrases(documents=documents)[0]for idx, doc inenumerate(response):print("Document: {}".format(documents[idx]))print("Key Phrases:")for phrase in doc.key_phrases:print("\t{}".format(phrase))print()
# Additional text for analysis including multiple languages, dates, and names
additional_documents =["今天的天气真好!","La película fue excelente. ¡Me encantó!","The meeting is scheduled for February 28th.","John Smith will be attending the conference next week."]# Perform sentiment analysis for additional documents
response = text_analytics_client.analyze_sentiment(documents=additional_documents, language="es")[0]for idx, doc inenumerate(response):print("Document: {}".format(additional_documents[idx]))print("Sentiment: {}".format(doc.sentiment))print()# Perform entity recognition for additional documents
response = text_analytics_client.recognize_entities(documents=additional_documents)[0]for idx, doc inenumerate(response):print("Document: {}".format(additional_documents[idx]))print("Entities:")for entity in doc.entities:print("\tText: {}, Type: {}".format(entity.text, entity.category))print()# Perform key phrase extraction for additional documents
response = text_analytics_client.extract_key_phrases(documents=additional_documents)[0]for idx, doc inenumerate(response):print("Document: {}".format(additional_documents[idx]))print("Key Phrases:")for phrase in doc.key_phrases:print("\t{}".format(phrase))print()# Perform date recognition for additional documents
response = text_analytics_client.recognize_pii_entities(documents=additional_documents)[0]for idx, doc inenumerate(response):print("Document: {}".format(additional_documents[idx]))print("PII Entities:")for entity in doc.entities:if entity.category =="DateTime":print("\tDate: {}".format(entity.text))print()# Perform name recognition for additional documents
response = text_analytics_client.recognize_pii_entities(documents=additional_documents)[0]for idx, doc inenumerate(response):print("Document: {}".format(additional_documents[idx]))print("PII Entities:")for entity in doc.entities:if entity.category =="PersonName":print("\tName: {}".format(entity.text))print()
from azure.cognitiveservices.language.luis.authoring import LUISAuthoringClient
from azure.cognitiveservices.language.luis.authoring.models import ApplicationCreateObject
from msrest.authentication import CognitiveServicesCredentials
# Set up the LUIS authoring client
authoring_key ="YOUR_LUIS_AUTHORING_KEY"
authoring_endpoint ="YOUR_LUIS_AUTHORING_ENDPOINT"
authoring_client = LUISAuthoringClient(authoring_endpoint, CognitiveServicesCredentials(authoring_key))# Define and create a new LUIS application
app_name ="MyChatbotApp"
app_description ="Custom language model for a chatbot"
app_version ="1.0"
culture ="en-us"
app_definition = ApplicationCreateObject(name=app_name, description=app_description, culture=culture, version=app_version)
created_app = authoring_client.apps.add(app_definition)# Define intents, utterances, and entities
intents =["Greeting","Search","Weather"]
utterances =[{"text":"Hello","intent":"Greeting"},{"text":"What's the weather like today?","intent":"Weather"},{"text":"Find a restaurant nearby","intent":"Search"}]
entities =[]# Add intents, utterances, and entities to the LUIS applicationfor intent in intents:
authoring_client.model.add_intent(created_app, version_id=app_version, name=intent)for utterance in utterances:
authoring_client.examples.add(created_app, version_id=app_version, example_label=utterance["intent"], utterance_text=utterance["text"])for entity in entities:
authoring_client.model.add_entity(created_app, version_id=app_version, name=entity)# Train and publish the LUIS application
authoring_client.train.train_version(created_app, app_version)
authoring_client.apps.publish(created_app, app_version, is_staging=False)print("LUIS application published successfully!")
当扩展示例代码以包括LUIS语言理解功能时,你可以考虑添加以下部分来演示如何使用LUIS API 来解析用户输入并获取相应的意图和实体:
from azure.cognitiveservices.language.luis.runtime import LUISRuntimeClient
from msrest.authentication import CognitiveServicesCredentials
# Set up the LUIS runtime client
runtime_key ="YOUR_LUIS_RUNTIME_KEY"
runtime_endpoint ="YOUR_LUIS_RUNTIME_ENDPOINT"
runtime_client = LUISRuntimeClient(runtime_endpoint, CognitiveServicesCredentials(runtime_key))# Define user input
user_input ="What's the weather like tomorrow in New York?"# Call LUIS to predict user intent and entities
prediction_response = runtime_client.prediction.resolve(app_id=created_app.id, slot_name="production", prediction_request={"query": user_input})# Extract intent and entities from the prediction response
top_intent = prediction_response.prediction.top_intent
entities = prediction_response.prediction.entities
print("Predicted Intent: {}".format(top_intent))print("Entities:")for entity in entities:print("\t- Type: {}, Value: {}".format(entity.entity, entity.value))
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
这部分代码展示了如何使用LUIS Runtime API 来解析用户输入并获取预测的意图和实体。通过调用runtime_client.prediction.resolve方法,你可以向LUIS发送用户输入并获取预测的结果。在这个示例中,我们打印出了预测的意图和实体,以便你可以进一步处理这些信息,根据用户意图执行相应的操作。
通过将这部分代码与前面的示例代码整合,你可以构建一个更完整的应用程序,其中包括了自定义的语言模型(通过LUIS Authoring API 创建)和使用该模型解析用户输入的功能(通过LUIS Runtime API)。这样的应用程序可以更智能地理解用户的意图和上下文,从而提供更加个性化和高效的交互体验。
import azure.cognitiveservices.speech as speechsdk
# Set up the speech config
speech_key ="YOUR_SPEECH_KEY"
service_region ="YOUR_SERVICE_REGION"
speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)# Set up the speech recognizer
speech_recognizer = speechsdk.SpeechRecognizer(speech_config)print("Say something...")# Start speech recognition
result = speech_recognizer.recognize_once()# Get recognized textif result.reason == speechsdk.ResultReason.RecognizedSpeech:print("Recognized: {}".format(result.text))elif result.reason == speechsdk.ResultReason.NoMatch:print("No speech could be recognized")elif result.reason == speechsdk.ResultReason.Canceled:
cancellation_details = result.cancellation_details
print("Speech recognition canceled: {}".format(cancellation_details.reason))# Set up the speech synthesizer
speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config)# Synthesize text to speech
text_to_speak ="Hello! How can I help you today?"
result = speech_synthesizer.speak_text_async(text_to_speak).get()if result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted:print("Speech synthesized to audio.")elif result.reason == speechsdk.ResultReason.Canceled:
cancellation_details = result.cancellation_details
print("Speech synthesis canceled: {}".format(cancellation_details.reason))
import azure.cognitiveservices.speech as speechsdk
# Set up the speech config for speech recognition
speech_key ="YOUR_SPEECH_KEY"
service_region ="YOUR_SERVICE_REGION"
speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)# Set up the speech recognizer
speech_recognizer = speechsdk.SpeechRecognizer(speech_config)print("Listening...")# Start speech recognition
result = speech_recognizer.recognize_once()# Get recognized textif result.reason == speechsdk.ResultReason.RecognizedSpeech:
user_input = result.text
print("Recognized: {}".format(user_input))elif result.reason == speechsdk.ResultReason.NoMatch:print("No speech could be recognized")elif result.reason == speechsdk.ResultReason.Canceled:
cancellation_details = result.cancellation_details
print("Speech recognition canceled: {}".format(cancellation_details.reason))# Set up the speech synthesizer for text-to-speech
speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config)# Define the response text
response_text ="You said: {}".format(user_input)# Synthesize text to speech
result = speech_synthesizer.speak_text_async(response_text).get()if result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted:print("Speech synthesized to audio.")elif result.reason == speechsdk.ResultReason.Canceled:
cancellation_details = result.cancellation_details
print("Speech synthesis canceled: {}".format(cancellation_details.reason))
from azure.cognitiveservices.knowledge.qnamaker import QnAMakerClient
from azure.cognitiveservices.knowledge.qnamaker.models import QueryDTO
# Set up QnA Maker client
subscription_key ="YOUR_SUBSCRIPTION_KEY"
endpoint ="YOUR_QNA_MAKER_ENDPOINT"
kb_id ="YOUR_KNOWLEDGE_BASE_ID"
client = QnAMakerClient(endpoint, CognitiveServicesCredentials(subscription_key))# Define a function to query the QnA Maker knowledge basedefquery_knowledge_base(question):
query = QueryDTO(question=question)
response = client.runtime.generate_answer(kb_id, query)if response.answers:return response.answers[0].answer
else:return"Sorry, I don't have an answer to that question."# Example usage
question ="What is the capital of France?"
answer = query_knowledge_base(question)print("Question: {}".format(question))print("Answer: {}".format(answer))
from azure.cognitiveservices.knowledge.qnamaker import QnAMakerClient
from azure.cognitiveservices.knowledge.qnamaker.models import QueryDTO
from msrest.authentication import CognitiveServicesCredentials
# Set up QnA Maker client
subscription_key ="YOUR_SUBSCRIPTION_KEY"
endpoint ="YOUR_QNA_MAKER_ENDPOINT"
kb_id ="YOUR_KNOWLEDGE_BASE_ID"
client = QnAMakerClient(endpoint, CognitiveServicesCredentials(subscription_key))# Define a function to query the QnA Maker knowledge base with language supportdefquery_knowledge_base_multilanguage(question, language="en"):
query = QueryDTO(question=question, top=1, strict_filters=[], metadata_filter={"language": language})
response = client.runtime.generate_answer(kb_id, query)if response.answers:return response.answers[0].answer
else:return"Sorry, I don't have an answer to that question in the specified language."# Example usage with multiple languages
question ="What is the weather like today?"
answer_en = query_knowledge_base_multilanguage(question,"en")
answer_es = query_knowledge_base_multilanguage(question,"es")print("Question: {}".format(question))print("Answer (English): {}".format(answer_en))print("Answer (Spanish): {}".format(answer_es))
当使用Cognitive Services中的自然语言生成功能来让聊天机器人以自然的方式回复用户的消息时,你可以考虑以下示例代码,演示如何使用Azure Text Analytics中的Text Analytics API来生成自然语言回复:
import os
from azure.core.credentials import AzureKeyCredential
from azure.ai.textanalytics import TextAnalyticsClient
from azure.ai.textanalytics import TextAnalyticsApiVersion
from azure.ai.textanalytics.models import TextDocumentInput
# Set up Text Analytics client
key = os.environ["AZURE_TEXT_ANALYTICS_KEY"]
endpoint = os.environ["AZURE_TEXT_ANALYTICS_ENDPOINT"]
credential = AzureKeyCredential(key)
text_analytics_client = TextAnalyticsClient(endpoint, credential, api_version=TextAnalyticsApiVersion.V3_2)# Define a function to generate natural language responsedefgenerate_response(input_text):
documents =[TextDocumentInput(id="1", text=input_text)]
response = text_analytics_client.begin_analyze_actions(documents, actions=["generate_answer"])
result = response.result()if result.answers:return result.answers[0].generated_answer.text
else:return"I'm sorry, I couldn't generate a response for that input."# Example usage
input_text ="What are the top tourist attractions in Paris?"
response = generate_response(input_text)print("User Input: {}".format(input_text))print("Bot Response: {}".format(response))
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
在这段代码中,我们使用Azure Text Analytics API来生成自然语言回复。首先,需要设置Azure Text Analytics服务的密钥和终结点,然后创建Text Analytics客户端。接着定义了一个generate_response函数,该函数接受用户输入的文本,并使用Text Analytics API生成自然语言回复。
import spacy
import os
from azure.core.credentials import AzureKeyCredential
from azure.ai.textanalytics import TextAnalyticsClient
from azure.ai.textanalytics import TextAnalyticsApiVersion
from azure.ai.textanalytics.models import TextDocumentInput
# Load Spacy model for NLP tasks
nlp = spacy.load("en_core_web_sm")# Set up Text Analytics client
key = os.environ["AZURE_TEXT_ANALYTICS_KEY"]
endpoint = os.environ["AZURE_TEXT_ANALYTICS_ENDPOINT"]
credential = AzureKeyCredential(key)
text_analytics_client = TextAnalyticsClient(endpoint, credential, api_version=TextAnalyticsApiVersion.V3_2)# Define a function to generate natural language response with enhanced featuresdefgenerate_enhanced_response(input_text):# Perform NLP tasks with Spacy
doc = nlp(input_text)# Extract entities like dates and names
dates =[ent.text for ent in doc.ents if ent.label_ =="DATE"]
names =[ent.text for ent in doc.ents if ent.label_ =="PERSON"]# Prepare text for Text Analytics API
documents =[TextDocumentInput(id="1", text=input_text)]# Use Text Analytics API to generate answer
response = text_analytics_client.begin_analyze_actions(documents, actions=["generate_answer"])
result = response.result()if result.answers:
generated_answer = result.answers[0].generated_answer.text
# Incorporate extracted entities into the response
response_with_entities =f"{generated_answer}. Dates mentioned: {dates}. Names mentioned: {names}"return response_with_entities
else:return"I'm sorry, I couldn't generate a response for that input."# Example usage
input_text ="Who is the president of the United States and when was he born?"
response = generate_enhanced_response(input_text)print("User Input: {}".format(input_text))print("Bot Response: {}".format(response))
import os
from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials
# Set up Face client
key = os.environ["AZURE_FACE_KEY"]
endpoint = os.environ["AZURE_FACE_ENDPOINT"]
credentials = CognitiveServicesCredentials(key)
face_client = FaceClient(endpoint, credentials)# Define a function to detect emotions from a given image URLdefdetect_emotions(image_url):
detected_faces = face_client.face.detect_with_url(url=image_url, return_face_attributes=["emotion"])if detected_faces:
emotions = detected_faces[0].face_attributes.emotion
return emotions
else:return"No faces detected in the image."# Example usage
image_url ="URL_TO_USER_IMAGE"
emotions = detect_emotions(image_url)print("Emotions detected in the image:")print(emotions)
import os
from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials
# Set up Face client
key = os.environ["AZURE_FACE_KEY"]
endpoint = os.environ["AZURE_FACE_ENDPOINT"]
credentials = CognitiveServicesCredentials(key)
face_client = FaceClient(endpoint, credentials)# Define a function to detect faces and extract facial features from a given image URLdefdetect_faces_and_features(image_url):
detected_faces = face_client.face.detect_with_url(url=image_url, return_face_attributes=["age","gender","facialHair","emotion"])if detected_faces:
face_data =[]for face in detected_faces:
face_attributes ={"age": face.face_attributes.age,"gender": face.face_attributes.gender,"facial_hair": face.face_attributes.facial_hair,"emotion": face.face_attributes.emotion
}
face_data.append(face_attributes)return face_data
else:return"No faces detected in the image."# Example usage
image_url ="URL_TO_IMAGE_WITH_FACES"
faces_data = detect_faces_and_features(image_url)print("Facial features detected in the image:")for idx, face_data inenumerate(faces_data):print(f"Face {idx+1}: {face_data}")
import os
from azure.cognitiveservices.vision.computervision import ComputerVisionClient
from azure.cognitiveservices.vision.computervision.models import VisualFeatureTypes
from msrest.authentication import CognitiveServicesCredentials
# Set up Computer Vision client
key = os.environ["AZURE_CV_KEY"]
endpoint = os.environ["AZURE_CV_ENDPOINT"]
credentials = CognitiveServicesCredentials(key)
computervision_client = ComputerVisionClient(endpoint, credentials)# Define a function to analyze an image from a given URLdefanalyze_image(image_url):
image_analysis = computervision_client.analyze_image(image_url, visual_features=[VisualFeatureTypes.categories, VisualFeatureTypes.tags, VisualFeatureTypes.description])if image_analysis.categories:return image_analysis.categories
else:return"No categories detected in the image."# Example usage
image_url ="URL_TO_IMAGE"
image_categories = analyze_image(image_url)print("Categories detected in the image:")for category in image_categories:print(category.name)
import os
from azure.cognitiveservices.vision.computervision import ComputerVisionClient
from azure.cognitiveservices.vision.computervision.models import VisualFeatureTypes
from msrest.authentication import CognitiveServicesCredentials
# Set up Computer Vision client
key = os.environ["AZURE_CV_KEY"]
endpoint = os.environ["AZURE_CV_ENDPOINT"]
credentials = CognitiveServicesCredentials(key)
computervision_client = ComputerVisionClient(endpoint, credentials)# Define a function to describe an image from a given URLdefdescribe_image(image_url):
image_description = computervision_client.describe_image(image_url)if image_description.captions:return image_description.captions[0].text
else:return"No description available for the image."# Define a function to analyze the objects and scenes in an imagedefanalyze_image_objects(image_url):
image_analysis = computervision_client.analyze_image(image_url, visual_features=[VisualFeatureTypes.objects, VisualFeatureTypes.scenes])
objects =[obj.name for obj in image_analysis.objects]
scenes =[scene.name for scene in image_analysis.scenes]return objects, scenes
# Example usage
image_url ="URL_TO_IMAGE"
image_description = describe_image(image_url)print("Description of the image:")print(image_description)
objects, scenes = analyze_image_objects(image_url)print("Objects detected in the image:")print(objects)print("Scenes detected in the image:")print(scenes)
import os
import requests
from azure.cognitiveservices.vision.computervision import ComputerVisionClient
from azure.cognitiveservices.vision.computervision.models import VisualFeatureTypes
from msrest.authentication import CognitiveServicesCredentials
# Set up Computer Vision client
key = os.environ["AZURE_CV_KEY"]
endpoint = os.environ["AZURE_CV_ENDPOINT"]
credentials = CognitiveServicesCredentials(key)
computervision_client = ComputerVisionClient(endpoint, credentials)# Define a function to analyze an image from a given URLdefanalyze_image(image_url):
image_analysis = computervision_client.analyze_image(image_url, visual_features=[VisualFeatureTypes.description])if image_analysis.description.captions:return image_analysis.description.captions[0].text
else:return"No description available for the image."# Define a function to get location information based on latitude and longitudedefget_location_info(latitude, longitude):
api_key ="YOUR_LOCATION_API_KEY"
url =f"https://api.locationiq.com/v1/reverse.php?key={api_key}&lat={latitude}&lon={longitude}&format=json"
response = requests.get(url)
location_data = response.json()return location_data["display_name"]# Define a function to get weather information based on locationdefget_weather_info(location):
api_key ="YOUR_WEATHER_API_KEY"
url =f"https://api.openweathermap.org/data/2.5/weather?q={location}&appid={api_key}&units=metric"
response = requests.get(url)
weather_data = response.json()return weather_data["weather"][0]["description"], weather_data["main"]["temp"]# Example usage
image_url ="URL_TO_IMAGE"
image_description = analyze_image(image_url)print("Description of the image:")print(image_description)
latitude =40.7128# Example latitude
longitude =-74.0060# Example longitude
location = get_location_info(latitude, longitude)print("Location information:")print(location)
weather_description, temperature = get_weather_info(location)print("Weather information:")print("Description:", weather_description)print("Temperature:", temperature,"°C")
import os
import requests
from azure.cognitiveservices.vision.computervision import ComputerVisionClient
from azure.cognitiveservices.vision.computervision.models import VisualFeatureTypes
from msrest.authentication import CognitiveServicesCredentials
# Set up Computer Vision client
key = os.environ["AZURE_CV_KEY"]
endpoint = os.environ["AZURE_CV_ENDPOINT"]
credentials = CognitiveServicesCredentials(key)
computervision_client = ComputerVisionClient(endpoint, credentials)# Define a function to analyze an image from a given URLdefanalyze_image(image_url):
image_analysis = computervision_client.analyze_image(image_url, visual_features=[VisualFeatureTypes.description])if image_analysis.description.captions:return image_analysis.description.captions[0].text
else:return"No description available for the image."# Define a function to get location information based on latitude and longitudedefget_location_info(latitude, longitude):
api_key ="YOUR_LOCATION_API_KEY"
```python
url =f"https://api.locationiq.com/v1/reverse.php?key={api_key}&lat={latitude}&lon={longitude}&format=json"
response = requests.get(url)
location_data = response.json()return location_data["display_name"]# Define a function to get weather information based on locationdefget_weather_info(location):
api_key ="YOUR_WEATHER_API_KEY"
url =f"https://api.openweathermap.org/data/2.5/weather?q={location}&appid={api_key}&units=metric"
response = requests.get(url)
weather_data = response.json()return weather_data["weather"][0]["description"], weather_data["main"]["temp"]# Example usage
image_url ="URL_TO_IMAGE"
image_description = analyze_image(image_url)print("Description of the image:")print(image_description)
latitude =40.7128# Example latitude
longitude =-74.0060# Example longitude
location = get_location_info(latitude, longitude)print("Location information:")print(location)
weather_description, temperature = get_weather_info(location)print("Weather information:")
```python
print("Weather description:", weather_description)print("Temperature:", temperature,"°C")