From be3481a7079076d144ac31679e0c2f6fcd7d2bdd Mon Sep 17 00:00:00 2001 From: Ekaropolus Date: Wed, 17 Sep 2025 14:48:20 -0600 Subject: [PATCH] API for lagchain r1 --- polisplexity/urls.py | 1 + pxy_langchain/api/urls.py | 7 ++++ pxy_langchain/api/views.py | 85 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 93 insertions(+) create mode 100644 pxy_langchain/api/urls.py create mode 100644 pxy_langchain/api/views.py diff --git a/polisplexity/urls.py b/polisplexity/urls.py index 65f47ec..f365ffd 100644 --- a/polisplexity/urls.py +++ b/polisplexity/urls.py @@ -46,6 +46,7 @@ urlpatterns = [ path("share/", include("pxy_dashboard.share_urls")), # ← NEW path("api/", include("pxy_bots.api.urls")), + path("api/langchain/", include("pxy_langchain.api.urls")), diff --git a/pxy_langchain/api/urls.py b/pxy_langchain/api/urls.py new file mode 100644 index 0000000..63a418a --- /dev/null +++ b/pxy_langchain/api/urls.py @@ -0,0 +1,7 @@ +# pxy_langchain/api/urls.py +from django.urls import path +from .views import chat + +urlpatterns = [ + path("chat", chat, name="langchain_chat_api"), +] diff --git a/pxy_langchain/api/views.py b/pxy_langchain/api/views.py new file mode 100644 index 0000000..f6d8afb --- /dev/null +++ b/pxy_langchain/api/views.py @@ -0,0 +1,85 @@ +# pxy_langchain/api/views.py +import json +import logging +from typing import Optional + +from django.http import JsonResponse, HttpResponse +from django.views.decorators.csrf import csrf_exempt + +from pxy_langchain.models import AIAssistant +from pxy_langchain.services import LangchainAIService + +# We read the bot->assistant mapping from pxy_bots, so bots can decide which assistant to use. +from pxy_bots.models import TelegramBot + +logger = logging.getLogger(__name__) + + +def _get_assistant_for_req(req: dict) -> Optional[AIAssistant]: + """ + Resolve which AIAssistant to use: + 1) Try via TelegramBot (req['bot']['username'] matches TelegramBot.name or .username). + 2) Fallback: optional req['context']['assistant_name']. + """ + bot_username = (((req.get("bot") or {}).get("username")) or "").strip() + if bot_username: + # Try Bot.name first (that’s how your req.v1 is built), then .username + bot = (TelegramBot.objects.filter(name=bot_username).first() or + TelegramBot.objects.filter(username=bot_username).first()) + if bot and bot.assistant_id: + return bot.assistant + + # Fallback: explicit assistant name (optional) + ctx = req.get("context") or {} + assistant_name = (ctx.get("assistant_name") or "").strip() + if assistant_name: + return AIAssistant.objects.filter(name=assistant_name).first() + + return None + + +@csrf_exempt +def chat(request): + """ + POST /api/langchain/chat + Body: req.v1 (canonical envelope produced by pxy_bots) + Returns: render.v1 (text message with the LLM answer) + """ + if request.method != "POST": + return HttpResponse(status=405) + + try: + env = json.loads(request.body.decode("utf-8") or "{}") + except Exception: + return JsonResponse({"ok": False, "error": "invalid_json"}, status=400) + + assistant = _get_assistant_for_req(env) + if not assistant: + return JsonResponse({"ok": False, "error": "assistant_not_found"}, status=400) + + # Pull user text (or caption) from req.v1 + inp = env.get("input") or {} + user_text = (inp.get("text") or inp.get("caption") or "").strip() + + # If nothing to say, keep it explicit + if not user_text: + return JsonResponse({ + "schema_version": "render.v1", + "messages": [{"type": "text", "text": "No text received."}] + }) + + try: + svc = LangchainAIService(assistant) + answer = svc.generate_response(user_text) # synchronous call + except Exception as e: + logger.exception("langchain.chat.error") + return JsonResponse({"ok": False, "error": f"llm_error:{e.__class__.__name__}"}, status=500) + + # Minimal render.v1 + spec = { + "schema_version": "render.v1", + "messages": [ + {"type": "text", "text": str(answer)} + ] + } + return JsonResponse(spec)