"""
AI Assistant service — orchestrates LLM conversation with tool use.

Flow:
1. User sends message
2. Build system prompt with SPBU context
3. Send to LLM provider (Gemini or Groq)
4. If LLM returns tool_calls → execute tools → send results back to LLM
5. Return final text response
"""

from __future__ import annotations

import json
import logging
from typing import Any

from sqlalchemy.ext.asyncio import AsyncSession

from app.assistant.providers import get_fallback_provider, get_provider
from app.assistant.tools import TOOL_DEFINITIONS, execute_tool

logger = logging.getLogger(__name__)

SYSTEM_PROMPT = """Kamu adalah asisten AI untuk SPBU Manager, sistem manajemen operasional pompa bensin (SPBU).

Kamu membantu pemilik/pengelola SPBU untuk:
- Melihat data penjualan, stok, expenses, penyetoran
- Memantau status operasional harian
- Mengecek anomali dan masalah
- Menjawab pertanyaan tentang konfigurasi SPBU

Aturan:
- Jawab dalam Bahasa Indonesia yang ramah dan profesional
- Gunakan tools yang tersedia untuk mendapat data terkini — jangan menebak angka
- Jika data tidak tersedia, sampaikan dengan jelas
- Format angka: gunakan pemisah ribuan titik dan desimal koma (contoh: 12.450,5 liter, Rp 187.450.000)
- Jika user bertanya tanpa menyebut tanggal, asumsikan hari ini
- Berikan insight singkat setelah menampilkan data jika ada hal yang perlu diperhatikan
"""

MAX_TOOL_ROUNDS = 3  # prevent infinite loops


async def chat(
    db: AsyncSession,
    spbu_id: int,
    user_message: str,
    conversation_history: list[dict[str, str]] | None = None,
) -> dict[str, Any]:
    """
    Process a user message and return AI response.

    Returns:
        {
            "response": str,
            "tools_used": [str],
            "provider": str,
        }
    """
    provider = get_provider()
    tools_used: list[str] = []

    messages = list(conversation_history or [])
    messages.append({"role": "user", "content": user_message})

    try:
        result = await _run_conversation(
            db, spbu_id, provider, messages, tools_used
        )
    except Exception as e:
        logger.warning(f"Primary provider ({provider.name()}) failed: {e}")
        # Try fallback
        fallback = get_fallback_provider(provider)
        if fallback:
            logger.info(f"Switching to fallback provider: {fallback.name()}")
            try:
                result = await _run_conversation(
                    db, spbu_id, fallback, messages, tools_used
                )
                provider = fallback
            except Exception as e2:
                logger.error(f"Fallback provider also failed: {e2}")
                return {
                    "response": "Maaf, asisten sedang tidak tersedia. Silakan coba lagi nanti.",
                    "tools_used": [],
                    "provider": "none",
                }
        else:
            return {
                "response": "Maaf, asisten sedang tidak tersedia. Silakan coba lagi nanti.",
                "tools_used": [],
                "provider": "none",
            }

    return {
        "response": result,
        "tools_used": tools_used,
        "provider": provider.name(),
    }


async def _run_conversation(
    db: AsyncSession,
    spbu_id: int,
    provider,
    messages: list[dict[str, str]],
    tools_used: list[str],
) -> str:
    """Run the conversation loop with tool execution."""

    for round_num in range(MAX_TOOL_ROUNDS + 1):
        llm_response = await provider.chat(
            messages=messages,
            tools=TOOL_DEFINITIONS,
            system_prompt=SYSTEM_PROMPT,
        )

        content = llm_response.get("content", "")
        tool_calls = llm_response.get("tool_calls", [])

        if not tool_calls:
            return content

        # Execute tool calls and feed results back
        tool_results = []
        for tc in tool_calls:
            tool_name = tc["name"]
            tool_args = tc.get("arguments", {})
            tools_used.append(tool_name)

            try:
                result = await execute_tool(db, spbu_id, tool_name, tool_args)
            except Exception as e:
                result = {"error": str(e)}

            tool_results.append({
                "tool": tool_name,
                "result": result,
            })

        # Add assistant message (with content if any) and tool results
        if content:
            messages.append({"role": "assistant", "content": content})

        # Feed tool results as a user message (simplest cross-provider approach)
        results_text = "\n".join(
            f"[Hasil {tr['tool']}]: {json.dumps(tr['result'], ensure_ascii=False, default=str)}"
            for tr in tool_results
        )
        messages.append({
            "role": "user",
            "content": f"Berikut hasil dari tools yang kamu panggil:\n{results_text}\n\nBerikan jawaban berdasarkan data di atas.",
        })

    # If we exhausted rounds, return whatever we have
    return content or "Maaf, terjadi kesalahan dalam memproses permintaan."
