Skip to content

Commit c637e99

Browse files
committed
Fix CI lint
1 parent 452eb11 commit c637e99

File tree

2 files changed

+87
-91
lines changed
  • .github/workflows
  • training_and_examples/mobile_apps/emotional_dialogue/web_server

2 files changed

+87
-91
lines changed

.github/workflows/lint.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ jobs:
2727
run: echo "PYTHONPATH=$(pwd)" >> $GITHUB_ENV
2828

2929
- name: Run Black
30-
run: black --check .
30+
run: black --check --diff .
3131

3232
- name: Run isort
3333
run: isort --check-only --diff .
Lines changed: 86 additions & 90 deletions
Original file line numberDiff line numberDiff line change
@@ -1,134 +1,130 @@
1-
import os
21
import io
3-
import sys
4-
from subprocess import call
5-
from flask_cors import CORS
6-
from flask import Flask, jsonify, send_file, Response
7-
from flask import request
82
import json
3+
import os
4+
import sys
95
import uuid
10-
import requests
6+
from subprocess import call
117

8+
import requests
129
import urllib3
10+
from flask import Flask, Response, jsonify, request, send_file
11+
from flask_cors import CORS
12+
1313
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
1414

1515
app = Flask(__name__)
1616
CORS(app)
1717

18-
@app.route('/')
19-
def status_check():
20-
return 'Server is running'
2118

19+
@app.route("/")
20+
def status_check():
21+
return "Server is running"
2222

2323

2424
if True:
25+
import os
26+
2527
import httpx
2628
from openai import OpenAI
27-
import os
28-
29-
os.environ["OPENAI_API_KEY"] = "" #use your openai credentials or repace this block tu your LLM
29+
30+
os.environ[
31+
"OPENAI_API_KEY"
32+
] = "" # use your openai credentials or repace this block tu your LLM
3033
os.environ["OPENAI_PROXY_URL"] = ""
31-
34+
3235
proxy_url = os.environ.get("OPENAI_PROXY_URL")
33-
client = OpenAI() if proxy_url is None or proxy_url == "" else OpenAI(http_client=httpx.Client(proxy=proxy_url))
36+
client = (
37+
OpenAI()
38+
if proxy_url is None or proxy_url == ""
39+
else OpenAI(http_client=httpx.Client(proxy=proxy_url))
40+
)
3441
print(client)
3542

36-
model_name="gpt-4o"
43+
model_name = "gpt-4o"
44+
3745
def get_completion(content):
3846
response = client.chat.completions.create(
39-
model=model_name,
40-
messages=[{"role": "user", "content": content}],
41-
temperature=0
47+
model=model_name, messages=[{"role": "user", "content": content}], temperature=0
4248
)
4349
return response.choices[0].message.content
4450

4551
def get_completion_from_messages(messages):
4652
response = client.chat.completions.create(
47-
model=model_name,
48-
messages=messages,
49-
temperature=0
53+
model=model_name, messages=messages, temperature=0
5054
)
5155
return response.choices[0].message.content
52-
53-
54-
emotions=["happy","sad","fear","angry"]
55-
emotion_prompt="What is the {} one-paragraph response to "
56-
additional_emotion_prompt="What is the {} one-paragraph response to the initial question?"
57-
aggregation_prompt="Aggregate the answers to form a final response "
58-
user_emotion_prompt=". Take into account that I am {} now."
5956

60-
def get_emotion_prompt(emotion,prompt):
61-
return emotion_prompt.format(emotion)+prompt
57+
58+
emotions = ["happy", "sad", "fear", "angry"]
59+
emotion_prompt = "What is the {} one-paragraph response to "
60+
additional_emotion_prompt = "What is the {} one-paragraph response to the initial question?"
61+
aggregation_prompt = "Aggregate the answers to form a final response "
62+
user_emotion_prompt = ". Take into account that I am {} now."
63+
64+
65+
def get_emotion_prompt(emotion, prompt):
66+
return emotion_prompt.format(emotion) + prompt
67+
6268

6369
def get_additional_emotion_prompt(emotion):
6470
return additional_emotion_prompt.format(emotion)
6571

72+
6673
def get_aggregation_prompt(user_emotion):
67-
return aggregation_prompt if user_emotion=='neutral' else aggregation_prompt+user_emotion_prompt.format(user_emotion)
68-
69-
def process_multiple_emotional_agents2(question,user_emotion='neutral'):
70-
ai_answers={}
71-
messages=[]
72-
for i,emotion in enumerate(emotions):
73-
if i==0:
74-
messages.append({
75-
"role": "user",
76-
"content": get_emotion_prompt(emotion,question)
77-
})
74+
return (
75+
aggregation_prompt
76+
if user_emotion == "neutral"
77+
else aggregation_prompt + user_emotion_prompt.format(user_emotion)
78+
)
79+
80+
81+
def process_multiple_emotional_agents2(question, user_emotion="neutral"):
82+
ai_answers = {}
83+
messages = []
84+
for i, emotion in enumerate(emotions):
85+
if i == 0:
86+
messages.append({"role": "user", "content": get_emotion_prompt(emotion, question)})
7887
else:
79-
messages.append({
80-
"role": "user",
81-
"content": get_additional_emotion_prompt(emotion)
82-
})
83-
assistant=get_completion_from_messages(messages)
84-
messages.append({
85-
"role": "assistant",
86-
"content": assistant
87-
})
88-
print(emotion,assistant,"\n\n")
89-
ai_answers[emotion]=assistant
90-
#time.sleep(delay)
91-
92-
messages.append({
93-
"role": "user",
94-
"content": get_aggregation_prompt(user_emotion)
95-
})
96-
assistant=get_completion_from_messages(messages)
97-
print('Summary:',assistant)
98-
ai_answers['Summary']=assistant
88+
messages.append({"role": "user", "content": get_additional_emotion_prompt(emotion)})
89+
assistant = get_completion_from_messages(messages)
90+
messages.append({"role": "assistant", "content": assistant})
91+
print(emotion, assistant, "\n\n")
92+
ai_answers[emotion] = assistant
93+
# time.sleep(delay)
94+
95+
messages.append({"role": "user", "content": get_aggregation_prompt(user_emotion)})
96+
assistant = get_completion_from_messages(messages)
97+
print("Summary:", assistant)
98+
ai_answers["Summary"] = assistant
9999
return ai_answers
100-
#ai_answer="Summary (user emotion "+user_emotion+"): "+assistant
101-
102-
103-
@app.route('/insideout', methods=['POST'])
100+
# ai_answer="Summary (user emotion "+user_emotion+"): "+assistant
101+
102+
103+
@app.route("/insideout", methods=["POST"])
104104
def process_insideout_request():
105105
content = request.json
106106
print(content)
107-
#photo = request.files['photo'].read()
108-
question=content['question']
109-
user_emotion=content['userEmotion']
110-
ai_answers=process_multiple_emotional_agents2(question,user_emotion)
111-
return Response(response=json.dumps(ai_answers),
112-
status=200,
113-
mimetype="application/json")
114-
115-
116-
@app.route('/single', methods=['POST'])
107+
# photo = request.files['photo'].read()
108+
question = content["question"]
109+
user_emotion = content["userEmotion"]
110+
ai_answers = process_multiple_emotional_agents2(question, user_emotion)
111+
return Response(response=json.dumps(ai_answers), status=200, mimetype="application/json")
112+
113+
114+
@app.route("/single", methods=["POST"])
117115
def process_single_request():
118116
content = request.json
119-
#print(content)
120-
question=content['question']
121-
user_emotion=content['userEmotion']
122-
prompt=get_emotion_prompt(user_emotion,question)
123-
#if user_emotion!='neutral':
117+
# print(content)
118+
question = content["question"]
119+
user_emotion = content["userEmotion"]
120+
prompt = get_emotion_prompt(user_emotion, question)
121+
# if user_emotion!='neutral':
124122
# prompt+=user_emotion_prompt.format(user_emotion)
125-
assistant=get_completion(prompt)
126-
print(prompt,'\n',assistant)
127-
ai_answers={'Summary':assistant}
128-
return Response(response=json.dumps(ai_answers),
129-
status=200,
130-
mimetype="application/json")
131-
132-
133-
if __name__ == '__main__':
123+
assistant = get_completion(prompt)
124+
print(prompt, "\n", assistant)
125+
ai_answers = {"Summary": assistant}
126+
return Response(response=json.dumps(ai_answers), status=200, mimetype="application/json")
127+
128+
129+
if __name__ == "__main__":
134130
app.run(host="0.0.0.0", port=5050)

0 commit comments

Comments
 (0)