210 lines
8.9 KiB
Python
210 lines
8.9 KiB
Python
import logging
|
|
from openai import OpenAI
|
|
from telegram import ForceReply, Update
|
|
from telegram.ext import Application, CommandHandler, ContextTypes, MessageHandler, filters
|
|
import json
|
|
import random
|
|
|
|
# Enable logging
|
|
logging.basicConfig(
|
|
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
|
|
)
|
|
logger = logging.getLogger(__name__)
|
|
|
|
async def dream_city_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
|
"""Send a message with a link to the random city generator."""
|
|
# Construct the message
|
|
message = (
|
|
"Descubre la ciudad de tus sueños! Haz clic en el enlace para explorar una ciudad generada aleatoriamente que podría reflejar tus aspiraciones urbanas: "
|
|
"https://app.polisplexity.tech/city/digital/twin/dream/?innovation=30&technology=30&science=40"
|
|
)
|
|
# Send the message
|
|
await update.message.reply_text(message)
|
|
|
|
|
|
async def start(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
|
"""Send a message when the command /start is issued."""
|
|
user = update.effective_user
|
|
await update.message.reply_html(
|
|
rf"Hi {user.mention_html()}!",
|
|
reply_markup=ForceReply(selective=True),
|
|
)
|
|
|
|
async def help_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
|
"""Send a message when the command /help is issued."""
|
|
user = update.effective_user
|
|
location = update.message.location
|
|
await update.message.reply_text(rf"Help! {user.mention_html()} in {location}")
|
|
|
|
async def handle_location(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
|
"""Respond to a location message."""
|
|
location = update.message.location
|
|
|
|
if location:
|
|
# Extract latitude and longitude
|
|
latitude = location.latitude
|
|
longitude = location.longitude
|
|
|
|
# You can now use the latitude and longitude for your bot's purposes
|
|
await update.message.reply_text(f"Thanks for sharing your location! Latitude: {latitude}, Longitude: {longitude}")
|
|
else:
|
|
# Respond if no location is found in the message
|
|
await update.message.reply_text("Please share your location.")
|
|
|
|
|
|
|
|
async def assistant(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
|
"""Respond to user message using OpenAI."""
|
|
bot_response = "Thinking..."
|
|
try:
|
|
user_message = update.message.text
|
|
client = OpenAI(api_key='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',)
|
|
|
|
|
|
neurolitiks_assistant = client.beta.assistants.retrieve("asst_LcyxtrwgxgdHoVwSsuTUf5Ec")
|
|
|
|
my_thread = client.beta.threads.create()
|
|
|
|
my_thread_message = client.beta.threads.messages.create(
|
|
thread_id=my_thread.id,
|
|
role="user",
|
|
content=user_message,
|
|
)
|
|
|
|
my_run = client.beta.threads.runs.create(
|
|
thread_id=my_thread.id,
|
|
assistant_id=neurolitiks_assistant.id,
|
|
)
|
|
|
|
|
|
# Step 6: Periodically retrieve the Run to check on its status to see if it has moved to completed
|
|
while my_run.status in ["queued", "in_progress"]:
|
|
keep_retrieving_run = client.beta.threads.runs.retrieve(
|
|
thread_id=my_thread.id,
|
|
run_id=my_run.id
|
|
)
|
|
|
|
print(keep_retrieving_run)
|
|
|
|
if keep_retrieving_run.status == "completed":
|
|
|
|
# Step 7: Retrieve the Messages added by the Assistant to the Thread
|
|
all_messages = client.beta.threads.messages.list(
|
|
thread_id=my_thread.id
|
|
)
|
|
bot_response = all_messages.data[0].content[0].text.value #openai_response.choices[0].message.content
|
|
break
|
|
elif keep_retrieving_run.status == "queued" or keep_retrieving_run.status == "in_progress":
|
|
# bot_response += ' ' + my_run.status
|
|
pass
|
|
else:
|
|
bot_response += ' ' + keep_retrieving_run.status
|
|
break
|
|
|
|
if bot_response:
|
|
await update.message.reply_text(bot_response)
|
|
else:
|
|
# In case the response is empty or only whitespace
|
|
await update.message.reply_text("I'm not sure how to respond to that. 🤔")
|
|
except Exception as e:
|
|
logger.error(f"Error while processing the message: {e}")
|
|
# Send a default response in case of an error
|
|
await update.message.reply_text(f"Oops, I encountered an issue. Please try again later. 😓 {e}")
|
|
|
|
def prepare_persona_feedback(user_query):
|
|
# Example function to fetch and format feedback based on user query
|
|
# This could query a database or API for more dynamic data
|
|
persona_feedback = {
|
|
"Other Citizens": "Will analyze and propose community driven improvements.",
|
|
"Technology Makers": "Will analyze integration with technologies, and will say what technologies and how.",
|
|
"Scientific Innovators": "Will analyze from the urbanist persepective first, and then will recommen what scientific or urban models to use"
|
|
}
|
|
return persona_feedback
|
|
|
|
|
|
def generate_system_prompt(persona_feedback):
|
|
return (f"Based on the urban improvement proposal, seek feedback from various perspectives: "
|
|
f"Other Citizens suggest: {persona_feedback['Other Citizens']}; "
|
|
f"Technology Makers recommend: {persona_feedback['Technology Makers']}; "
|
|
f"Scientific Innovators advise: {persona_feedback['Scientific Innovators']}. "
|
|
"Evaluate and compile these suggestions to enhance the proposal.")
|
|
|
|
|
|
async def respond(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
|
"""Respond to user message using OpenAI."""
|
|
bot_response = "Thinking..."
|
|
try:
|
|
user_query = update.message.text
|
|
client = OpenAI(api_key='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')
|
|
|
|
# Assuming prepare_persona_feedback and generate_system_prompt are defined elsewhere
|
|
persona_feedback = prepare_persona_feedback(user_query)
|
|
system_prompt = generate_system_prompt(persona_feedback)
|
|
|
|
openai_response = client.chat.completions.create(
|
|
model="gpt-3.5-turbo-0125",
|
|
messages=[
|
|
{"role": "system", "content": "Act as a citizen technology advisor. You will receive enquiries from citizens and will advise on what technologies they could use to resolve them."},
|
|
{"role": "user", "content": user_query}
|
|
],
|
|
)
|
|
|
|
bot_response = openai_response.choices[0].message.content
|
|
|
|
# Send the main response
|
|
if bot_response:
|
|
await update.message.reply_text(bot_response)
|
|
else:
|
|
await update.message.reply_text("I'm not sure how to respond to that. 🤔")
|
|
|
|
# Generate random percentages for the URL
|
|
innovation = random.randint(1, 100)
|
|
technology = random.randint(1, 100)
|
|
science = random.randint(1, 100)
|
|
|
|
# Prepare the promotional message
|
|
dream_city_link = (
|
|
f"Descubre la ciudad de tus sueños! Haz clic en el enlace para explorar una ciudad matemática generada en VR "
|
|
f"que podría reflejar tus aspiraciones urbanas: https://app.polisplexity.tech/city/digital/twin/dream/?"
|
|
f"innovation={innovation}&technology={technology}&science={science}"
|
|
)
|
|
|
|
# Send the promotional message in a separate follow-up message
|
|
await update.message.reply_text(dream_city_link)
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error while processing the message: {e}")
|
|
await update.message.reply_text("Oops, I encountered an issue. Please try again later. 😓")
|
|
|
|
|
|
def main() -> None:
|
|
"""Start the bot."""
|
|
application = Application.builder().token("6474402815:AAHCSXM7VKwyj5-lVh8p3365eRQ5Nj94H4I").build()
|
|
|
|
application.add_handler(CommandHandler("start", start))
|
|
application.add_handler(CommandHandler("help", help_command))
|
|
application.add_handler(CommandHandler("dream_city", dream_city_command))
|
|
|
|
debate_filter_private = filters.ChatType.PRIVATE & filters.Regex(r'\bmi ciudad de los sueños\b.*:\s*')
|
|
application.add_handler(MessageHandler(debate_filter_private, dream_city_command))
|
|
application.add_handler(MessageHandler(filters.ChatType.PRIVATE, respond))
|
|
|
|
# Replace 'your_bot_username' with your bot's username without '@'
|
|
bot_username = 'PolisplexityBot'
|
|
mention_pattern = fr'@{bot_username}\b'
|
|
|
|
# Use the regex filter to check for mentions of the bot
|
|
mention_filter = filters.Regex(mention_pattern)
|
|
|
|
debate_filter_groups = filters.ChatType.GROUPS & filters.Regex(r'\bmi ciudad de los sueños\b.*:\s*')
|
|
application.add_handler(MessageHandler(mention_filter & debate_filter_groups, dream_city_command))
|
|
application.add_handler(MessageHandler(mention_filter & filters.ChatType.GROUPS, respond))
|
|
|
|
# application.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, respond))
|
|
application.add_handler(MessageHandler(filters.LOCATION, handle_location))
|
|
|
|
application.run_polling(allowed_updates=Update.ALL_TYPES)
|
|
|
|
if __name__ == "__main__":
|
|
main()
|