|
1 |
| -import os |
2 | 1 | import io
|
3 |
| -import sys |
4 |
| -from subprocess import call |
5 |
| -from flask_cors import CORS |
6 |
| -from flask import Flask, jsonify, send_file, Response |
7 |
| -from flask import request |
8 | 2 | import json
|
| 3 | +import os |
| 4 | +import sys |
9 | 5 | import uuid
|
10 |
| -import requests |
| 6 | +from subprocess import call |
11 | 7 |
|
| 8 | +import requests |
12 | 9 | import urllib3
|
| 10 | +from flask import Flask, Response, jsonify, request, send_file |
| 11 | +from flask_cors import CORS |
| 12 | + |
13 | 13 | urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
14 | 14 |
|
15 | 15 | app = Flask(__name__)
|
16 | 16 | CORS(app)
|
17 | 17 |
|
18 |
| -@app.route('/') |
19 |
| -def status_check(): |
20 |
| - return 'Server is running' |
21 | 18 |
|
| 19 | +@app.route("/") |
| 20 | +def status_check(): |
| 21 | + return "Server is running" |
22 | 22 |
|
23 | 23 |
|
24 | 24 | if True:
|
| 25 | + import os |
| 26 | + |
25 | 27 | import httpx
|
26 | 28 | from openai import OpenAI
|
27 |
| - import os |
28 |
| - |
29 |
| - os.environ["OPENAI_API_KEY"] = "" #use your openai credentials or repace this block tu your LLM |
| 29 | + |
| 30 | + os.environ[ |
| 31 | + "OPENAI_API_KEY" |
| 32 | + ] = "" # use your openai credentials or repace this block tu your LLM |
30 | 33 | os.environ["OPENAI_PROXY_URL"] = ""
|
31 |
| - |
| 34 | + |
32 | 35 | proxy_url = os.environ.get("OPENAI_PROXY_URL")
|
33 |
| - client = OpenAI() if proxy_url is None or proxy_url == "" else OpenAI(http_client=httpx.Client(proxy=proxy_url)) |
| 36 | + client = ( |
| 37 | + OpenAI() |
| 38 | + if proxy_url is None or proxy_url == "" |
| 39 | + else OpenAI(http_client=httpx.Client(proxy=proxy_url)) |
| 40 | + ) |
34 | 41 | print(client)
|
35 | 42 |
|
36 |
| - model_name="gpt-4o" |
| 43 | + model_name = "gpt-4o" |
| 44 | + |
37 | 45 | def get_completion(content):
|
38 | 46 | response = client.chat.completions.create(
|
39 |
| - model=model_name, |
40 |
| - messages=[{"role": "user", "content": content}], |
41 |
| - temperature=0 |
| 47 | + model=model_name, messages=[{"role": "user", "content": content}], temperature=0 |
42 | 48 | )
|
43 | 49 | return response.choices[0].message.content
|
44 | 50 |
|
45 | 51 | def get_completion_from_messages(messages):
|
46 | 52 | response = client.chat.completions.create(
|
47 |
| - model=model_name, |
48 |
| - messages=messages, |
49 |
| - temperature=0 |
| 53 | + model=model_name, messages=messages, temperature=0 |
50 | 54 | )
|
51 | 55 | return response.choices[0].message.content
|
52 |
| - |
53 |
| - |
54 |
| -emotions=["happy","sad","fear","angry"] |
55 |
| -emotion_prompt="What is the {} one-paragraph response to " |
56 |
| -additional_emotion_prompt="What is the {} one-paragraph response to the initial question?" |
57 |
| -aggregation_prompt="Aggregate the answers to form a final response " |
58 |
| -user_emotion_prompt=". Take into account that I am {} now." |
59 | 56 |
|
60 |
| -def get_emotion_prompt(emotion,prompt): |
61 |
| - return emotion_prompt.format(emotion)+prompt |
| 57 | + |
| 58 | +emotions = ["happy", "sad", "fear", "angry"] |
| 59 | +emotion_prompt = "What is the {} one-paragraph response to " |
| 60 | +additional_emotion_prompt = "What is the {} one-paragraph response to the initial question?" |
| 61 | +aggregation_prompt = "Aggregate the answers to form a final response " |
| 62 | +user_emotion_prompt = ". Take into account that I am {} now." |
| 63 | + |
| 64 | + |
| 65 | +def get_emotion_prompt(emotion, prompt): |
| 66 | + return emotion_prompt.format(emotion) + prompt |
| 67 | + |
62 | 68 |
|
63 | 69 | def get_additional_emotion_prompt(emotion):
|
64 | 70 | return additional_emotion_prompt.format(emotion)
|
65 | 71 |
|
| 72 | + |
66 | 73 | def get_aggregation_prompt(user_emotion):
|
67 |
| - return aggregation_prompt if user_emotion=='neutral' else aggregation_prompt+user_emotion_prompt.format(user_emotion) |
68 |
| - |
69 |
| -def process_multiple_emotional_agents2(question,user_emotion='neutral'): |
70 |
| - ai_answers={} |
71 |
| - messages=[] |
72 |
| - for i,emotion in enumerate(emotions): |
73 |
| - if i==0: |
74 |
| - messages.append({ |
75 |
| - "role": "user", |
76 |
| - "content": get_emotion_prompt(emotion,question) |
77 |
| - }) |
| 74 | + return ( |
| 75 | + aggregation_prompt |
| 76 | + if user_emotion == "neutral" |
| 77 | + else aggregation_prompt + user_emotion_prompt.format(user_emotion) |
| 78 | + ) |
| 79 | + |
| 80 | + |
| 81 | +def process_multiple_emotional_agents2(question, user_emotion="neutral"): |
| 82 | + ai_answers = {} |
| 83 | + messages = [] |
| 84 | + for i, emotion in enumerate(emotions): |
| 85 | + if i == 0: |
| 86 | + messages.append({"role": "user", "content": get_emotion_prompt(emotion, question)}) |
78 | 87 | else:
|
79 |
| - messages.append({ |
80 |
| - "role": "user", |
81 |
| - "content": get_additional_emotion_prompt(emotion) |
82 |
| - }) |
83 |
| - assistant=get_completion_from_messages(messages) |
84 |
| - messages.append({ |
85 |
| - "role": "assistant", |
86 |
| - "content": assistant |
87 |
| - }) |
88 |
| - print(emotion,assistant,"\n\n") |
89 |
| - ai_answers[emotion]=assistant |
90 |
| - #time.sleep(delay) |
91 |
| - |
92 |
| - messages.append({ |
93 |
| - "role": "user", |
94 |
| - "content": get_aggregation_prompt(user_emotion) |
95 |
| - }) |
96 |
| - assistant=get_completion_from_messages(messages) |
97 |
| - print('Summary:',assistant) |
98 |
| - ai_answers['Summary']=assistant |
| 88 | + messages.append({"role": "user", "content": get_additional_emotion_prompt(emotion)}) |
| 89 | + assistant = get_completion_from_messages(messages) |
| 90 | + messages.append({"role": "assistant", "content": assistant}) |
| 91 | + print(emotion, assistant, "\n\n") |
| 92 | + ai_answers[emotion] = assistant |
| 93 | + # time.sleep(delay) |
| 94 | + |
| 95 | + messages.append({"role": "user", "content": get_aggregation_prompt(user_emotion)}) |
| 96 | + assistant = get_completion_from_messages(messages) |
| 97 | + print("Summary:", assistant) |
| 98 | + ai_answers["Summary"] = assistant |
99 | 99 | return ai_answers
|
100 |
| - #ai_answer="Summary (user emotion "+user_emotion+"): "+assistant |
101 |
| - |
102 |
| - |
103 |
| -@app.route('/insideout', methods=['POST']) |
| 100 | + # ai_answer="Summary (user emotion "+user_emotion+"): "+assistant |
| 101 | + |
| 102 | + |
| 103 | +@app.route("/insideout", methods=["POST"]) |
104 | 104 | def process_insideout_request():
|
105 | 105 | content = request.json
|
106 | 106 | print(content)
|
107 |
| - #photo = request.files['photo'].read() |
108 |
| - question=content['question'] |
109 |
| - user_emotion=content['userEmotion'] |
110 |
| - ai_answers=process_multiple_emotional_agents2(question,user_emotion) |
111 |
| - return Response(response=json.dumps(ai_answers), |
112 |
| - status=200, |
113 |
| - mimetype="application/json") |
114 |
| - |
115 |
| - |
116 |
| -@app.route('/single', methods=['POST']) |
| 107 | + # photo = request.files['photo'].read() |
| 108 | + question = content["question"] |
| 109 | + user_emotion = content["userEmotion"] |
| 110 | + ai_answers = process_multiple_emotional_agents2(question, user_emotion) |
| 111 | + return Response(response=json.dumps(ai_answers), status=200, mimetype="application/json") |
| 112 | + |
| 113 | + |
| 114 | +@app.route("/single", methods=["POST"]) |
117 | 115 | def process_single_request():
|
118 | 116 | content = request.json
|
119 |
| - #print(content) |
120 |
| - question=content['question'] |
121 |
| - user_emotion=content['userEmotion'] |
122 |
| - prompt=get_emotion_prompt(user_emotion,question) |
123 |
| - #if user_emotion!='neutral': |
| 117 | + # print(content) |
| 118 | + question = content["question"] |
| 119 | + user_emotion = content["userEmotion"] |
| 120 | + prompt = get_emotion_prompt(user_emotion, question) |
| 121 | + # if user_emotion!='neutral': |
124 | 122 | # prompt+=user_emotion_prompt.format(user_emotion)
|
125 |
| - assistant=get_completion(prompt) |
126 |
| - print(prompt,'\n',assistant) |
127 |
| - ai_answers={'Summary':assistant} |
128 |
| - return Response(response=json.dumps(ai_answers), |
129 |
| - status=200, |
130 |
| - mimetype="application/json") |
131 |
| - |
132 |
| - |
133 |
| -if __name__ == '__main__': |
| 123 | + assistant = get_completion(prompt) |
| 124 | + print(prompt, "\n", assistant) |
| 125 | + ai_answers = {"Summary": assistant} |
| 126 | + return Response(response=json.dumps(ai_answers), status=200, mimetype="application/json") |
| 127 | + |
| 128 | + |
| 129 | +if __name__ == "__main__": |
134 | 130 | app.run(host="0.0.0.0", port=5050)
|
0 commit comments