# pxy_openai/assistants.py import logging import tempfile import requests # NEW from typing import Optional # NEW from .client import OpenAIClient from .models import OpenAIAssistant as OpenAIAssistantModel logger = logging.getLogger(__name__) class OpenAIAssistant: """ OpenAI Assistant for handling AI interactions (chat + voice). """ def __init__(self, name): try: self.config = OpenAIAssistantModel.objects.get(name=name) self.client = OpenAIClient(self.config.api_key).get_client() except OpenAIAssistantModel.DoesNotExist: raise ValueError(f"Assistant '{name}' not found in the database.") # ---------- NEW: Whisper helpers ---------- def transcribe_file(self, path: str, language: Optional[str] = "es") -> str: """ Transcribe a local audio file using Whisper. Returns plain text. Supports both new OpenAI SDK (client.audio.transcriptions.create) and legacy (openai.Audio.transcriptions.create). """ try: # New SDK path if hasattr(self.client, "audio") and hasattr(self.client.audio, "transcriptions"): with open(path, "rb") as f: tx = self.client.audio.transcriptions.create( model="whisper-1", file=f, response_format="text", language=language or None, ) return tx.strip() if isinstance(tx, str) else str(tx) # Legacy SDK fallback with open(path, "rb") as f: tx = self.client.Audio.transcriptions.create( # type: ignore[attr-defined] model="whisper-1", file=f, response_format="text", language=language or None, ) return tx.strip() if isinstance(tx, str) else str(tx) except Exception as e: logger.error(f"Whisper transcription error: {e}") raise def transcribe_telegram(self, bot_token: str, file_id: str, language: Optional[str] = "es") -> str: """ Download a Telegram voice/audio by file_id and transcribe it. """ # 1) getFile r = requests.get( f"https://api.telegram.org/bot{bot_token}/getFile", params={"file_id": file_id}, timeout=10, ) r.raise_for_status() file_path = r.json()["result"]["file_path"] # 2) download actual bytes url = f"https://api.telegram.org/file/bot{bot_token}/{file_path}" with tempfile.NamedTemporaryFile(delete=False, suffix="." + file_path.split(".")[-1]) as tmp: resp = requests.get(url, timeout=30) resp.raise_for_status() tmp.write(resp.content) local_path = tmp.name # 3) transcribe return self.transcribe_file(local_path, language=language) # ---------- existing chat/agents methods ---------- def chat_completion(self, user_message): try: response = self.client.chat.completions.create( model="gpt-4o-mini", messages=[ {"role": "system", "content": self.config.description}, {"role": "user", "content": user_message}, ], ) return response.choices[0].message.content except Exception as e: logger.error(f"Error in chat completion: {e}") return f"Error in chat completion: {e}" def agent_workflow(self, user_message): try: if not self.config.assistant_id: raise ValueError(f"Assistant '{self.config.name}' does not have an associated assistant ID.") assistant = self.client.beta.assistants.retrieve(self.config.assistant_id) thread = self.client.beta.threads.create() self.client.beta.threads.messages.create(thread_id=thread.id, role="user", content=user_message) run = self.client.beta.threads.runs.create(thread_id=thread.id, assistant_id=assistant.id) while run.status in ["queued", "in_progress"]: run = self.client.beta.threads.runs.retrieve(thread_id=thread.id, run_id=run.id) if run.status == "completed": messages = self.client.beta.threads.messages.list(thread_id=thread.id) return messages.data[0].content[0].text.value return "Unexpected error: Workflow did not complete." except Exception as e: logger.error(f"Error in agent workflow: {e}") return f"Error in agent workflow: {e}" def handle_message(self, user_message): if self.config.is_special_assistant(): return self.agent_workflow(user_message) return self.chat_completion(user_message)