Adding voice handler for trash report
All checks were successful
continuous-integration/drone/push Build is passing
All checks were successful
continuous-integration/drone/push Build is passing
This commit is contained in:
parent
ef7bf27e14
commit
30c6af7f8e
@ -1,9 +1,13 @@
|
||||
import os
|
||||
import json
|
||||
import logging
|
||||
|
||||
import openai
|
||||
from telegram import Update, Bot
|
||||
from django.http import JsonResponse
|
||||
from django.views.decorators.csrf import csrf_exempt
|
||||
from asgiref.sync import sync_to_async
|
||||
|
||||
from .models import TelegramBot
|
||||
from pxy_langchain.services import LangchainAIService
|
||||
from .handlers import (
|
||||
@ -15,6 +19,9 @@ from .handlers import (
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Configura tu API Key de OpenAI
|
||||
openai.api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
|
||||
# -------------------------------
|
||||
# 🛠 Modular local handlers inside views.py
|
||||
@ -26,15 +33,6 @@ async def handle_location_message(update):
|
||||
return True
|
||||
return False
|
||||
|
||||
async def handle_voice_or_general_message(update):
|
||||
if update.message.voice:
|
||||
file_id = update.message.voice.file_id
|
||||
await update.message.reply_text(
|
||||
f"🎙 Recibí tu mensaje de voz con ID {file_id}. Pronto calcularé tu CO₂."
|
||||
)
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
async def dispatch_citizen_commands(update, text):
|
||||
if text == "/start":
|
||||
@ -92,6 +90,27 @@ async def dispatch_private_commands(update, text):
|
||||
return True
|
||||
|
||||
|
||||
# -------------------------------
|
||||
# 🛠 Voice transcription helper
|
||||
# -------------------------------
|
||||
|
||||
async def transcribe_with_whisper(update, bot):
|
||||
# 1. Descarga el archivo de voz desde Telegram
|
||||
tg_file = await sync_to_async(bot.get_file)(update.message.voice.file_id)
|
||||
download_path = f"/tmp/{update.message.voice.file_id}.ogg"
|
||||
await sync_to_async(tg_file.download)(download_path)
|
||||
|
||||
# 2. Envía el audio a la API Whisper de OpenAI
|
||||
with open(download_path, "rb") as audio:
|
||||
transcript = openai.Audio.transcribe(
|
||||
model="whisper-1",
|
||||
file=audio,
|
||||
response_format="text",
|
||||
language="es"
|
||||
)
|
||||
return transcript.strip()
|
||||
|
||||
|
||||
# -------------------------------
|
||||
# 🌐 Main webhook
|
||||
# -------------------------------
|
||||
@ -101,57 +120,64 @@ async def telegram_webhook(request, bot_name):
|
||||
try:
|
||||
logger.info(f"Webhook called for bot: {bot_name}")
|
||||
|
||||
# Carga la configuración del bot
|
||||
try:
|
||||
bot_instance = await sync_to_async(TelegramBot.objects.get)(name=bot_name, is_active=True)
|
||||
logger.info(f"Loaded bot configuration: {bot_instance}")
|
||||
bot_instance = await sync_to_async(TelegramBot.objects.get)(
|
||||
name=bot_name, is_active=True
|
||||
)
|
||||
except TelegramBot.DoesNotExist:
|
||||
logger.error(f"Bot '{bot_name}' not found or inactive.")
|
||||
return JsonResponse({"error": f"Bot '{bot_name}' not found."}, status=400)
|
||||
|
||||
if not bot_instance.assistant:
|
||||
logger.error(f"No assistant configured for bot '{bot_name}'.")
|
||||
return JsonResponse({"error": "Assistant not configured."}, status=400)
|
||||
|
||||
if request.method == "POST":
|
||||
try:
|
||||
request_body = json.loads(request.body.decode("utf-8"))
|
||||
update = Update.de_json(request_body, Bot(token=bot_instance.token))
|
||||
logger.info(f"Update received: {update}")
|
||||
except json.JSONDecodeError as e:
|
||||
logger.error(f"Failed to decode JSON: {e}")
|
||||
return JsonResponse({"error": "Invalid JSON payload"}, status=400)
|
||||
if request.method != "POST":
|
||||
return JsonResponse({"error": "Invalid request method"}, status=400)
|
||||
|
||||
if update.message:
|
||||
text = update.message.text or ""
|
||||
# Decodifica el payload de Telegram
|
||||
try:
|
||||
payload = json.loads(request.body.decode("utf-8"))
|
||||
update = Update.de_json(payload, Bot(token=bot_instance.token))
|
||||
except json.JSONDecodeError as e:
|
||||
logger.error(f"Failed to decode JSON: {e}")
|
||||
return JsonResponse({"error": "Invalid JSON payload"}, status=400)
|
||||
|
||||
# Handle location always first
|
||||
if await handle_location_message(update):
|
||||
return JsonResponse({"status": "ok"})
|
||||
|
||||
# Handle voice or general simple responses next
|
||||
if await handle_voice_or_general_message(update):
|
||||
return JsonResponse({"status": "ok"})
|
||||
|
||||
# Then dispatch commands per bot
|
||||
if bot_name == "PepeBasuritaCoinsBot":
|
||||
if await dispatch_citizen_commands(update, text):
|
||||
return JsonResponse({"status": "ok"})
|
||||
elif bot_name == "PepeCamioncitoBot":
|
||||
if await dispatch_city_commands(update, text):
|
||||
return JsonResponse({"status": "ok"})
|
||||
elif bot_name == "PepeMotitoBot":
|
||||
if await dispatch_private_commands(update, text):
|
||||
return JsonResponse({"status": "ok"})
|
||||
|
||||
# Otherwise fallback to LLM
|
||||
assistant_instance = await sync_to_async(LangchainAIService)(bot_instance.assistant)
|
||||
bot_response = await sync_to_async(assistant_instance.generate_response)(text)
|
||||
await update.message.reply_text(bot_response)
|
||||
if not update.message:
|
||||
return JsonResponse({"status": "no message"})
|
||||
|
||||
# 1) Geolocalización
|
||||
if await handle_location_message(update):
|
||||
return JsonResponse({"status": "ok"})
|
||||
|
||||
logger.warning("Received non-POST request")
|
||||
return JsonResponse({"error": "Invalid request method"}, status=400)
|
||||
# 2) Voz: transcribir y llamar a report_trash
|
||||
if update.message.voice:
|
||||
bot = Bot(token=bot_instance.token)
|
||||
transcript = await transcribe_with_whisper(update, bot)
|
||||
if not transcript:
|
||||
await update.message.reply_text(
|
||||
"No pude entender tu mensaje de voz. Intenta de nuevo."
|
||||
)
|
||||
return JsonResponse({"status": "ok"})
|
||||
update.message.text = transcript
|
||||
await report_trash(update)
|
||||
return JsonResponse({"status": "ok"})
|
||||
|
||||
# 3) Comandos de texto por bot
|
||||
text = update.message.text or ""
|
||||
if bot_name == "PepeBasuritaCoinsBot" and await dispatch_citizen_commands(update, text):
|
||||
return JsonResponse({"status": "ok"})
|
||||
if bot_name == "PepeCamioncitoBot" and await dispatch_city_commands(update, text):
|
||||
return JsonResponse({"status": "ok"})
|
||||
if bot_name == "PepeMotitoBot" and await dispatch_private_commands(update, text):
|
||||
return JsonResponse({"status": "ok"})
|
||||
|
||||
# 4) Fallback a LLM
|
||||
assistant_instance = await sync_to_async(LangchainAIService)(bot_instance.assistant)
|
||||
bot_response = await sync_to_async(assistant_instance.generate_response)(text)
|
||||
await update.message.reply_text(bot_response)
|
||||
|
||||
return JsonResponse({"status": "ok"})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in webhook: {e}")
|
||||
|
Loading…
x
Reference in New Issue
Block a user