Adding voice handler for trash report
All checks were successful
continuous-integration/drone/push Build is passing

This commit is contained in:
Ekaropolus 2025-07-07 02:51:18 -06:00
parent ef7bf27e14
commit 30c6af7f8e

View File

@ -1,9 +1,13 @@
import os
import json import json
import logging import logging
import openai
from telegram import Update, Bot from telegram import Update, Bot
from django.http import JsonResponse from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt from django.views.decorators.csrf import csrf_exempt
from asgiref.sync import sync_to_async from asgiref.sync import sync_to_async
from .models import TelegramBot from .models import TelegramBot
from pxy_langchain.services import LangchainAIService from pxy_langchain.services import LangchainAIService
from .handlers import ( from .handlers import (
@ -15,6 +19,9 @@ from .handlers import (
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
# Configura tu API Key de OpenAI
openai.api_key = os.getenv("OPENAI_API_KEY")
# ------------------------------- # -------------------------------
# 🛠 Modular local handlers inside views.py # 🛠 Modular local handlers inside views.py
@ -26,15 +33,6 @@ async def handle_location_message(update):
return True return True
return False return False
async def handle_voice_or_general_message(update):
if update.message.voice:
file_id = update.message.voice.file_id
await update.message.reply_text(
f"🎙 Recibí tu mensaje de voz con ID {file_id}. Pronto calcularé tu CO₂."
)
return True
return False
async def dispatch_citizen_commands(update, text): async def dispatch_citizen_commands(update, text):
if text == "/start": if text == "/start":
@ -92,6 +90,27 @@ async def dispatch_private_commands(update, text):
return True return True
# -------------------------------
# 🛠 Voice transcription helper
# -------------------------------
async def transcribe_with_whisper(update, bot):
# 1. Descarga el archivo de voz desde Telegram
tg_file = await sync_to_async(bot.get_file)(update.message.voice.file_id)
download_path = f"/tmp/{update.message.voice.file_id}.ogg"
await sync_to_async(tg_file.download)(download_path)
# 2. Envía el audio a la API Whisper de OpenAI
with open(download_path, "rb") as audio:
transcript = openai.Audio.transcribe(
model="whisper-1",
file=audio,
response_format="text",
language="es"
)
return transcript.strip()
# ------------------------------- # -------------------------------
# 🌐 Main webhook # 🌐 Main webhook
# ------------------------------- # -------------------------------
@ -101,58 +120,65 @@ async def telegram_webhook(request, bot_name):
try: try:
logger.info(f"Webhook called for bot: {bot_name}") logger.info(f"Webhook called for bot: {bot_name}")
# Carga la configuración del bot
try: try:
bot_instance = await sync_to_async(TelegramBot.objects.get)(name=bot_name, is_active=True) bot_instance = await sync_to_async(TelegramBot.objects.get)(
logger.info(f"Loaded bot configuration: {bot_instance}") name=bot_name, is_active=True
)
except TelegramBot.DoesNotExist: except TelegramBot.DoesNotExist:
logger.error(f"Bot '{bot_name}' not found or inactive.") logger.error(f"Bot '{bot_name}' not found or inactive.")
return JsonResponse({"error": f"Bot '{bot_name}' not found."}, status=400) return JsonResponse({"error": f"Bot '{bot_name}' not found."}, status=400)
if not bot_instance.assistant: if not bot_instance.assistant:
logger.error(f"No assistant configured for bot '{bot_name}'.")
return JsonResponse({"error": "Assistant not configured."}, status=400) return JsonResponse({"error": "Assistant not configured."}, status=400)
if request.method == "POST": if request.method != "POST":
return JsonResponse({"error": "Invalid request method"}, status=400)
# Decodifica el payload de Telegram
try: try:
request_body = json.loads(request.body.decode("utf-8")) payload = json.loads(request.body.decode("utf-8"))
update = Update.de_json(request_body, Bot(token=bot_instance.token)) update = Update.de_json(payload, Bot(token=bot_instance.token))
logger.info(f"Update received: {update}")
except json.JSONDecodeError as e: except json.JSONDecodeError as e:
logger.error(f"Failed to decode JSON: {e}") logger.error(f"Failed to decode JSON: {e}")
return JsonResponse({"error": "Invalid JSON payload"}, status=400) return JsonResponse({"error": "Invalid JSON payload"}, status=400)
if update.message: if not update.message:
text = update.message.text or "" return JsonResponse({"status": "no message"})
# Handle location always first # 1) Geolocalización
if await handle_location_message(update): if await handle_location_message(update):
return JsonResponse({"status": "ok"}) return JsonResponse({"status": "ok"})
# Handle voice or general simple responses next # 2) Voz: transcribir y llamar a report_trash
if await handle_voice_or_general_message(update): if update.message.voice:
bot = Bot(token=bot_instance.token)
transcript = await transcribe_with_whisper(update, bot)
if not transcript:
await update.message.reply_text(
"No pude entender tu mensaje de voz. Intenta de nuevo."
)
return JsonResponse({"status": "ok"})
update.message.text = transcript
await report_trash(update)
return JsonResponse({"status": "ok"}) return JsonResponse({"status": "ok"})
# Then dispatch commands per bot # 3) Comandos de texto por bot
if bot_name == "PepeBasuritaCoinsBot": text = update.message.text or ""
if await dispatch_citizen_commands(update, text): if bot_name == "PepeBasuritaCoinsBot" and await dispatch_citizen_commands(update, text):
return JsonResponse({"status": "ok"}) return JsonResponse({"status": "ok"})
elif bot_name == "PepeCamioncitoBot": if bot_name == "PepeCamioncitoBot" and await dispatch_city_commands(update, text):
if await dispatch_city_commands(update, text):
return JsonResponse({"status": "ok"}) return JsonResponse({"status": "ok"})
elif bot_name == "PepeMotitoBot": if bot_name == "PepeMotitoBot" and await dispatch_private_commands(update, text):
if await dispatch_private_commands(update, text):
return JsonResponse({"status": "ok"}) return JsonResponse({"status": "ok"})
# Otherwise fallback to LLM # 4) Fallback a LLM
assistant_instance = await sync_to_async(LangchainAIService)(bot_instance.assistant) assistant_instance = await sync_to_async(LangchainAIService)(bot_instance.assistant)
bot_response = await sync_to_async(assistant_instance.generate_response)(text) bot_response = await sync_to_async(assistant_instance.generate_response)(text)
await update.message.reply_text(bot_response) await update.message.reply_text(bot_response)
return JsonResponse({"status": "ok"}) return JsonResponse({"status": "ok"})
logger.warning("Received non-POST request")
return JsonResponse({"error": "Invalid request method"}, status=400)
except Exception as e: except Exception as e:
logger.error(f"Error in webhook: {e}") logger.error(f"Error in webhook: {e}")
return JsonResponse({"error": f"Unexpected error: {str(e)}"}, status=500) return JsonResponse({"error": f"Unexpected error: {str(e)}"}, status=500)