diff --git a/pxy_bots/views.py b/pxy_bots/views.py index 9e93023..9d658fa 100644 --- a/pxy_bots/views.py +++ b/pxy_bots/views.py @@ -2,7 +2,7 @@ import os import json import logging -import openai +from openai import OpenAI from telegram import Update, Bot from django.http import JsonResponse from django.views.decorators.csrf import csrf_exempt @@ -19,9 +19,8 @@ from .handlers import ( logger = logging.getLogger(__name__) -# Configura tu API Key de OpenAI -openai.api_key = os.getenv("OPENAI_API_KEY") - +# Configura tu cliente de OpenAI +client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) # ------------------------------- # 🛠 Modular local handlers inside views.py @@ -91,29 +90,25 @@ async def dispatch_private_commands(update, text): # ------------------------------- -# 🛠 Voice transcription helper +# 🛠 Voice transcription helper (Whisper via OpenAI client) # ------------------------------- async def transcribe_with_whisper(update, bot): # 1. Descarga el archivo de voz desde Telegram tg_file = await bot.get_file(update.message.voice.file_id) download_path = f"/tmp/{update.message.voice.file_id}.ogg" - # En python-telegram-bot v21: download_to_drive es coroutine await tg_file.download_to_drive(download_path) - # 2. Envía el audio a la API Whisper de OpenAI - with open(download_path, "rb") as audio: - transcript = openai.Audio.transcribe( - model="whisper-1", - file=audio, + # 2. Envía el audio a la API de OpenAI para transcripción + with open(download_path, "rb") as audio_file: + transcript = client.audio.transcriptions.create( + model="gpt-4o-transcribe", # o "whisper-1" + file=audio_file, response_format="text", language="es" ) - return transcript.strip() + return transcript.text if hasattr(transcript, 'text') else transcript - -# ------------------------------- -# 🌐 Main webhook # ------------------------------- @csrf_exempt