Ekaropolus be3481a707
All checks were successful
continuous-integration/drone/push Build is passing
API for lagchain r1
2025-09-17 14:48:20 -06:00

86 lines
2.8 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

# pxy_langchain/api/views.py
import json
import logging
from typing import Optional
from django.http import JsonResponse, HttpResponse
from django.views.decorators.csrf import csrf_exempt
from pxy_langchain.models import AIAssistant
from pxy_langchain.services import LangchainAIService
# We read the bot->assistant mapping from pxy_bots, so bots can decide which assistant to use.
from pxy_bots.models import TelegramBot
logger = logging.getLogger(__name__)
def _get_assistant_for_req(req: dict) -> Optional[AIAssistant]:
"""
Resolve which AIAssistant to use:
1) Try via TelegramBot (req['bot']['username'] matches TelegramBot.name or .username).
2) Fallback: optional req['context']['assistant_name'].
"""
bot_username = (((req.get("bot") or {}).get("username")) or "").strip()
if bot_username:
# Try Bot.name first (thats how your req.v1 is built), then .username
bot = (TelegramBot.objects.filter(name=bot_username).first() or
TelegramBot.objects.filter(username=bot_username).first())
if bot and bot.assistant_id:
return bot.assistant
# Fallback: explicit assistant name (optional)
ctx = req.get("context") or {}
assistant_name = (ctx.get("assistant_name") or "").strip()
if assistant_name:
return AIAssistant.objects.filter(name=assistant_name).first()
return None
@csrf_exempt
def chat(request):
"""
POST /api/langchain/chat
Body: req.v1 (canonical envelope produced by pxy_bots)
Returns: render.v1 (text message with the LLM answer)
"""
if request.method != "POST":
return HttpResponse(status=405)
try:
env = json.loads(request.body.decode("utf-8") or "{}")
except Exception:
return JsonResponse({"ok": False, "error": "invalid_json"}, status=400)
assistant = _get_assistant_for_req(env)
if not assistant:
return JsonResponse({"ok": False, "error": "assistant_not_found"}, status=400)
# Pull user text (or caption) from req.v1
inp = env.get("input") or {}
user_text = (inp.get("text") or inp.get("caption") or "").strip()
# If nothing to say, keep it explicit
if not user_text:
return JsonResponse({
"schema_version": "render.v1",
"messages": [{"type": "text", "text": "No text received."}]
})
try:
svc = LangchainAIService(assistant)
answer = svc.generate_response(user_text) # synchronous call
except Exception as e:
logger.exception("langchain.chat.error")
return JsonResponse({"ok": False, "error": f"llm_error:{e.__class__.__name__}"}, status=500)
# Minimal render.v1
spec = {
"schema_version": "render.v1",
"messages": [
{"type": "text", "text": str(answer)}
]
}
return JsonResponse(spec)