API for lagchain r1
All checks were successful
continuous-integration/drone/push Build is passing

This commit is contained in:
Ekaropolus 2025-09-17 14:48:20 -06:00
parent c48e191fc8
commit be3481a707
3 changed files with 93 additions and 0 deletions

View File

@ -46,6 +46,7 @@ urlpatterns = [
path("share/", include("pxy_dashboard.share_urls")), # ← NEW
path("api/", include("pxy_bots.api.urls")),
path("api/langchain/", include("pxy_langchain.api.urls")),

View File

@ -0,0 +1,7 @@
# pxy_langchain/api/urls.py
from django.urls import path
from .views import chat
urlpatterns = [
path("chat", chat, name="langchain_chat_api"),
]

View File

@ -0,0 +1,85 @@
# pxy_langchain/api/views.py
import json
import logging
from typing import Optional
from django.http import JsonResponse, HttpResponse
from django.views.decorators.csrf import csrf_exempt
from pxy_langchain.models import AIAssistant
from pxy_langchain.services import LangchainAIService
# We read the bot->assistant mapping from pxy_bots, so bots can decide which assistant to use.
from pxy_bots.models import TelegramBot
logger = logging.getLogger(__name__)
def _get_assistant_for_req(req: dict) -> Optional[AIAssistant]:
"""
Resolve which AIAssistant to use:
1) Try via TelegramBot (req['bot']['username'] matches TelegramBot.name or .username).
2) Fallback: optional req['context']['assistant_name'].
"""
bot_username = (((req.get("bot") or {}).get("username")) or "").strip()
if bot_username:
# Try Bot.name first (thats how your req.v1 is built), then .username
bot = (TelegramBot.objects.filter(name=bot_username).first() or
TelegramBot.objects.filter(username=bot_username).first())
if bot and bot.assistant_id:
return bot.assistant
# Fallback: explicit assistant name (optional)
ctx = req.get("context") or {}
assistant_name = (ctx.get("assistant_name") or "").strip()
if assistant_name:
return AIAssistant.objects.filter(name=assistant_name).first()
return None
@csrf_exempt
def chat(request):
"""
POST /api/langchain/chat
Body: req.v1 (canonical envelope produced by pxy_bots)
Returns: render.v1 (text message with the LLM answer)
"""
if request.method != "POST":
return HttpResponse(status=405)
try:
env = json.loads(request.body.decode("utf-8") or "{}")
except Exception:
return JsonResponse({"ok": False, "error": "invalid_json"}, status=400)
assistant = _get_assistant_for_req(env)
if not assistant:
return JsonResponse({"ok": False, "error": "assistant_not_found"}, status=400)
# Pull user text (or caption) from req.v1
inp = env.get("input") or {}
user_text = (inp.get("text") or inp.get("caption") or "").strip()
# If nothing to say, keep it explicit
if not user_text:
return JsonResponse({
"schema_version": "render.v1",
"messages": [{"type": "text", "text": "No text received."}]
})
try:
svc = LangchainAIService(assistant)
answer = svc.generate_response(user_text) # synchronous call
except Exception as e:
logger.exception("langchain.chat.error")
return JsonResponse({"ok": False, "error": f"llm_error:{e.__class__.__name__}"}, status=500)
# Minimal render.v1
spec = {
"schema_version": "render.v1",
"messages": [
{"type": "text", "text": str(answer)}
]
}
return JsonResponse(spec)