import asyncio
import base64
import hashlib
import hmac
import json
import os
import sqlite3
import time
from contextlib import asynccontextmanager

import httpx
from agno.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.models.openai import OpenAIChat
from agno.os import AgentOS
from agno.tools.mcp import MCPTools
from agno.tools.function import Function
from fastapi import FastAPI, Request
from fastapi.responses import FileResponse, JSONResponse
from fastapi.routing import APIRoute
from fastapi.staticfiles import StaticFiles

import fathom_service


# --- Slack cache (SQLite) ---
_SLACK_CACHE_DB = os.path.join(os.path.dirname(os.path.abspath(__file__)), "slack_cache.db")
_TEAM_ID = lambda: os.getenv("SLACK_TEAM_ID", "")


def _init_cache():
    """Create cache tables if they don't exist."""
    conn = sqlite3.connect(_SLACK_CACHE_DB)
    conn.executescript("""
        CREATE TABLE IF NOT EXISTS channels (
            id TEXT PRIMARY KEY, name TEXT, is_private INTEGER, is_member INTEGER,
            num_members INTEGER, synced_at REAL
        );
        CREATE TABLE IF NOT EXISTS users (
            id TEXT PRIMARY KEY, real_name TEXT, display_name TEXT, synced_at REAL
        );
    """)
    conn.close()


_init_cache()


def _cache_has_data(table: str) -> bool:
    conn = sqlite3.connect(_SLACK_CACHE_DB)
    try:
        row = conn.execute(f"SELECT COUNT(*) FROM {table}").fetchone()
        return row[0] > 0
    except sqlite3.OperationalError:
        _init_cache()
        return False
    finally:
        conn.close()


# --- Slack API helpers ---
_slack_headers = lambda: {"Authorization": f"Bearer {os.getenv('SLACK_BOT_TOKEN', '')}"}


def _slack_link(channel_id: str, channel_name: str) -> str:
    """Generate a proper Slack deep link that opens in the Slack app."""
    team = _TEAM_ID()
    return f"[#{channel_name}](https://app.slack.com/client/{team}/{channel_id})"


def _slack_msg_link(channel_id: str, msg_ts: str, channel_name: str = "") -> str:
    """Generate a link to a specific Slack message."""
    ts_clean = msg_ts.replace(".", "")
    label = f"#{channel_name}" if channel_name else "view"
    return f"[{label}](https://app.slack.com/archives/{channel_id}/p{ts_clean})"


# --- Slack tools ---
def slack_list_channels(limit: int = 100) -> str:
    """List all Slack channels (public AND private). Uses cache if available — call slack_resync to refresh."""
    if _cache_has_data("channels"):
        conn = sqlite3.connect(_SLACK_CACHE_DB)
        rows = conn.execute("SELECT id, name, is_private, is_member, num_members FROM channels ORDER BY name LIMIT ?", (limit,)).fetchall()
        conn.close()
        channels = [{"id": r[0], "name": r[1], "is_private": bool(r[2]), "is_member": bool(r[3]), "num_members": r[4]} for r in rows]
        return json.dumps({"ok": True, "count": len(channels), "source": "cache", "channels": channels})
    return _fetch_and_cache_channels(limit)


def _fetch_and_cache_channels(limit: int = 200) -> str:
    _init_cache()
    channels, cursor = [], None
    while True:
        params = {"types": "public_channel,private_channel", "limit": min(limit, 200), "exclude_archived": "true"}
        if cursor:
            params["cursor"] = cursor
        data = httpx.get("https://slack.com/api/conversations.list", headers=_slack_headers(), params=params).json()
        if not data.get("ok"):
            return json.dumps(data)
        for c in data.get("channels", []):
            channels.append((c["id"], c["name"], int(c.get("is_private", False)), int(c.get("is_member", False)), c.get("num_members", 0), time.time()))
        cursor = data.get("response_metadata", {}).get("next_cursor")
        if not cursor or len(channels) >= limit:
            break
    conn = sqlite3.connect(_SLACK_CACHE_DB)
    conn.execute("DELETE FROM channels")
    conn.executemany("INSERT OR REPLACE INTO channels VALUES (?,?,?,?,?,?)", channels)
    conn.commit()
    conn.close()
    result = [{"id": c[0], "name": c[1], "is_private": bool(c[2]), "is_member": bool(c[3]), "num_members": c[4]} for c in channels]
    return json.dumps({"ok": True, "count": len(result), "source": "fresh", "channels": result})


def slack_get_users() -> str:
    """Get all workspace users. Uses cache if available — call slack_resync to refresh."""
    if _cache_has_data("users"):
        conn = sqlite3.connect(_SLACK_CACHE_DB)
        rows = conn.execute("SELECT id, real_name, display_name FROM users ORDER BY real_name").fetchall()
        conn.close()
        users = [{"id": r[0], "real_name": r[1], "display_name": r[2]} for r in rows]
        return json.dumps({"ok": True, "source": "cache", "users": users})
    return _fetch_and_cache_users()


def _fetch_and_cache_users() -> str:
    _init_cache()
    users, cursor = [], None
    while True:
        params = {"limit": 200}
        if cursor:
            params["cursor"] = cursor
        data = httpx.get("https://slack.com/api/users.list", headers=_slack_headers(), params=params).json()
        if not data.get("ok"):
            return json.dumps(data)
        for u in data.get("members", []):
            if u.get("deleted") or u.get("is_bot"):
                continue
            profile = u.get("profile", {})
            users.append((u["id"], profile.get("real_name", u.get("real_name", "")), profile.get("display_name", ""), time.time()))
        cursor = data.get("response_metadata", {}).get("next_cursor")
        if not cursor:
            break
    conn = sqlite3.connect(_SLACK_CACHE_DB)
    conn.execute("DELETE FROM users")
    conn.executemany("INSERT OR REPLACE INTO users VALUES (?,?,?,?)", users)
    conn.commit()
    conn.close()
    result = [{"id": u[0], "real_name": u[1], "display_name": u[2]} for u in users]
    return json.dumps({"ok": True, "source": "fresh", "users": result})


def slack_resync() -> str:
    """Clear the channel and user cache, then re-fetch everything from Slack. Use when the user asks to resync or when data seems stale."""
    conn = sqlite3.connect(_SLACK_CACHE_DB)
    conn.execute("DELETE FROM channels")
    conn.execute("DELETE FROM users")
    conn.commit()
    conn.close()
    ch_result = json.loads(_fetch_and_cache_channels(200))
    usr_result = json.loads(_fetch_and_cache_users())
    return json.dumps({
        "ok": True,
        "channels_synced": ch_result.get("count", 0),
        "users_synced": len(usr_result.get("users", [])),
    })


def slack_join_channel(channel_id: str) -> str:
    """Join a public Slack channel by its ID. Call this before reading history if is_member is false for a public channel."""
    result = httpx.post("https://slack.com/api/conversations.join", headers={**_slack_headers(), "Content-Type": "application/json"}, json={"channel": channel_id}).text
    # Update cache
    conn = sqlite3.connect(_SLACK_CACHE_DB)
    conn.execute("UPDATE channels SET is_member = 1 WHERE id = ?", (channel_id,))
    conn.commit()
    conn.close()
    return result


def slack_get_channel_history(channel_id: str, limit: int = 20) -> str:
    """Get recent messages from a Slack channel. Returns messages with ts, user, text, reply_count, thread_ts, and a slack_link to open the channel."""
    data = httpx.get("https://slack.com/api/conversations.history", headers=_slack_headers(), params={"channel": channel_id, "limit": limit}).json()
    if not data.get("ok"):
        return json.dumps(data)
    # Look up channel name from cache
    ch_name = channel_id
    conn = sqlite3.connect(_SLACK_CACHE_DB)
    row = conn.execute("SELECT name FROM channels WHERE id = ?", (channel_id,)).fetchone()
    conn.close()
    if row:
        ch_name = row[0]
    messages = []
    team = _TEAM_ID()
    for m in data.get("messages", []):
        ts = m.get("ts", "")
        ts_clean = ts.replace(".", "")
        msg = {
            "ts": ts,
            "user": m.get("user", ""),
            "text": m.get("text", ""),
            "type": m.get("subtype", "message"),
            "link": f"https://app.slack.com/archives/{channel_id}/p{ts_clean}",
        }
        if m.get("reply_count", 0) > 0:
            msg["reply_count"] = m["reply_count"]
            msg["thread_ts"] = m.get("thread_ts", ts)
        messages.append(msg)
    return json.dumps({
        "ok": True,
        "channel": channel_id,
        "channel_name": ch_name,
        "channel_link": f"https://app.slack.com/client/{team}/{channel_id}",
        "messages": messages,
    })


def slack_get_thread_replies(channel_id: str, thread_ts: str, limit: int = 20) -> str:
    """Get replies in a Slack thread. Provide the channel_id and the thread_ts of the parent message."""
    data = httpx.get("https://slack.com/api/conversations.replies", headers=_slack_headers(), params={"channel": channel_id, "ts": thread_ts, "limit": limit}).json()
    if not data.get("ok"):
        return json.dumps(data)
    ts_clean = thread_ts.replace(".", "")
    replies = [{"ts": m.get("ts"), "user": m.get("user", ""), "text": m.get("text", "")} for m in data.get("messages", [])]
    return json.dumps({
        "ok": True,
        "thread_ts": thread_ts,
        "thread_link": f"https://app.slack.com/archives/{channel_id}/p{ts_clean}",
        "replies": replies,
    })


def slack_post_message(channel_id: str, text: str, thread_ts: str = "") -> str:
    """Post a message to a Slack channel. Optionally reply in a thread by providing thread_ts."""
    payload = {"channel": channel_id, "text": text}
    if thread_ts:
        payload["thread_ts"] = thread_ts
    return httpx.post("https://slack.com/api/chat.postMessage", headers={**_slack_headers(), "Content-Type": "application/json"}, json=payload).text


SLACK_TOOLS = [
    Function(name="slack_list_channels", entrypoint=slack_list_channels),
    Function(name="slack_join_channel", entrypoint=slack_join_channel),
    Function(name="slack_get_channel_history", entrypoint=slack_get_channel_history),
    Function(name="slack_get_thread_replies", entrypoint=slack_get_thread_replies),
    Function(name="slack_get_users", entrypoint=slack_get_users),
    Function(name="slack_post_message", entrypoint=slack_post_message),
    Function(name="slack_resync", entrypoint=slack_resync),
]


def _lookup_slack_user_id_by_email(email: str) -> str | None:
    """Look up Slack user ID by email, using API first then falling back to local cache."""
    # Try Slack API first
    try:
        resp = httpx.get("https://slack.com/api/users.lookupByEmail",
                         headers=_slack_headers(), params={"email": email}, timeout=15)
        data = resp.json()
        if data.get("ok"):
            return data["user"]["id"]
    except Exception:
        pass
    # Fallback: search local Slack user cache by matching name/email heuristic
    try:
        conn = sqlite3.connect(_SLACK_CACHE_DB)
        conn.row_factory = sqlite3.Row
        # email prefix match (e.g. "shirsendu" from "shirsendu@codebuddy.co")
        prefix = email.split("@")[0].lower()
        rows = conn.execute("SELECT id, real_name, display_name FROM users").fetchall()
        conn.close()
        for r in rows:
            real = (r["real_name"] or "").lower()
            display = (r["display_name"] or "").lower()
            if prefix in real.replace(" ", "").lower() or prefix == display.lower():
                return r["id"]
    except Exception:
        pass
    return None


def slack_dm_by_email(email: str, text: str) -> str:
    """Open a DM with a user by email and send them a message."""
    user_id = _lookup_slack_user_id_by_email(email)
    if not user_id:
        return json.dumps({"ok": False, "error": f"User lookup failed for {email}: not found via API or cache"})
    # Open DM conversation
    resp = httpx.post("https://slack.com/api/conversations.open",
                      headers={**_slack_headers(), "Content-Type": "application/json"},
                      json={"users": user_id}, timeout=15)
    data = resp.json()
    if not data.get("ok"):
        return json.dumps({"ok": False, "error": f"DM open failed: {data.get('error', 'unknown')}"})
    dm_channel = data["channel"]["id"]
    # Send message
    return slack_post_message(dm_channel, text)


# --- ClickUp API helpers ---
_CLICKUP_BASE = "https://api.clickup.com/api/v2"
_clickup_headers = lambda: {"Authorization": os.getenv("CLICKUP_API_TOKEN", ""), "Content-Type": "application/json"}
_clickup_team_id = lambda: os.getenv("CLICKUP_TEAM_ID", "")


def clickup_get_spaces() -> str:
    """List all spaces in the workspace. Returns space names and IDs."""
    resp = httpx.get(f"{_CLICKUP_BASE}/team/{_clickup_team_id()}/space", headers=_clickup_headers(), params={"archived": "false"}).json()
    spaces = [{"id": s["id"], "name": s["name"]} for s in resp.get("spaces", [])]
    return json.dumps({"ok": True, "spaces": spaces})


def clickup_get_folders(space_id: str) -> str:
    """List all folders in a space. Provide space_id (get it from clickup_get_spaces)."""
    resp = httpx.get(f"{_CLICKUP_BASE}/space/{space_id}/folder", headers=_clickup_headers(), params={"archived": "false"}).json()
    folders = [{"id": f["id"], "name": f["name"]} for f in resp.get("folders", [])]
    return json.dumps({"ok": True, "space_id": space_id, "folders": folders})


def clickup_get_lists(folder_id: str) -> str:
    """List all lists in a folder. Provide folder_id (get it from clickup_get_folders)."""
    resp = httpx.get(f"{_CLICKUP_BASE}/folder/{folder_id}/list", headers=_clickup_headers(), params={"archived": "false"}).json()
    lists = [{"id": l["id"], "name": l["name"]} for l in resp.get("lists", [])]
    return json.dumps({"ok": True, "folder_id": folder_id, "lists": lists})


def clickup_get_folderless_lists(space_id: str) -> str:
    """List all lists that are directly in a space (not inside any folder). Provide space_id."""
    resp = httpx.get(f"{_CLICKUP_BASE}/space/{space_id}/list", headers=_clickup_headers(), params={"archived": "false"}).json()
    lists = [{"id": l["id"], "name": l["name"]} for l in resp.get("lists", [])]
    return json.dumps({"ok": True, "space_id": space_id, "lists": lists})


def clickup_get_tasks(list_id: str, assignee_email: str = "", include_closed: bool = False) -> str:
    """Get tasks from a list. Optionally filter by assignee email. Set include_closed=True to include completed tasks."""
    params = {"archived": "false", "include_closed": str(include_closed).lower()}
    resp = httpx.get(f"{_CLICKUP_BASE}/list/{list_id}/task", headers=_clickup_headers(), params=params).json()
    tasks = []
    for t in resp.get("tasks", []):
        assignees = [{"username": a.get("username", ""), "email": a.get("email", "")} for a in t.get("assignees", [])]
        if assignee_email and not any(a["email"] == assignee_email for a in assignees):
            continue
        tasks.append({
            "id": t["id"], "name": t["name"], "status": t.get("status", {}).get("status", ""),
            "priority": (t.get("priority") or {}).get("priority", "none"),
            "assignees": assignees,
            "due_date": t.get("due_date"),
            "url": t.get("url", ""),
        })
    return json.dumps({"ok": True, "list_id": list_id, "count": len(tasks), "tasks": tasks})


def clickup_search_tasks(query: str, include_closed: bool = False) -> str:
    """Search for tasks by name across the entire workspace. Use this when you don't know which list a task is in."""
    params = {"name": query, "include_closed": str(include_closed).lower()}
    resp = httpx.get(f"{_CLICKUP_BASE}/team/{_clickup_team_id()}/task", headers=_clickup_headers(), params=params).json()
    tasks = []
    for t in resp.get("tasks", []):
        assignees = [{"username": a.get("username", ""), "email": a.get("email", "")} for a in t.get("assignees", [])]
        tasks.append({
            "id": t["id"], "name": t["name"], "status": t.get("status", {}).get("status", ""),
            "priority": (t.get("priority") or {}).get("priority", "none"),
            "assignees": assignees,
            "list": t.get("list", {}).get("name", ""),
            "folder": t.get("folder", {}).get("name", ""),
            "space": t.get("space", {}).get("name", ""),
            "due_date": t.get("due_date"),
            "url": t.get("url", ""),
        })
    return json.dumps({"ok": True, "count": len(tasks), "tasks": tasks})


def clickup_get_task(task_id: str) -> str:
    """Get full details of a single task by its ID."""
    t = httpx.get(f"{_CLICKUP_BASE}/task/{task_id}", headers=_clickup_headers()).json()
    if "err" in t:
        return json.dumps({"ok": False, "error": t.get("err", "Unknown error")})
    assignees = [{"username": a.get("username", ""), "email": a.get("email", "")} for a in t.get("assignees", [])]
    return json.dumps({"ok": True, "task": {
        "id": t["id"], "name": t["name"], "description": t.get("description", ""),
        "status": t.get("status", {}).get("status", ""),
        "priority": (t.get("priority") or {}).get("priority", "none"),
        "assignees": assignees,
        "due_date": t.get("due_date"), "date_created": t.get("date_created"),
        "list": t.get("list", {}).get("name", ""),
        "folder": t.get("folder", {}).get("name", ""),
        "space": t.get("space", {}).get("name", ""),
        "url": t.get("url", ""),
        "tags": [tag.get("name", "") for tag in t.get("tags", [])],
    }})


def clickup_create_task(list_id: str, name: str, description: str = "", assignee_email: str = "", priority: int = 0, due_date: str = "") -> str:
    """Create a task in a list. priority: 1=urgent, 2=high, 3=normal, 4=low, 0=none. due_date: Unix timestamp in ms."""
    payload = {"name": name}
    if description:
        payload["description"] = description
    if priority:
        payload["priority"] = priority
    if due_date:
        payload["due_date"] = due_date
    if assignee_email:
        # Look up user by email from team members
        team_resp = httpx.get(f"{_CLICKUP_BASE}/team/{_clickup_team_id()}", headers=_clickup_headers()).json()
        for member in team_resp.get("team", {}).get("members", []):
            if member.get("user", {}).get("email") == assignee_email:
                payload["assignees"] = [member["user"]["id"]]
                break
    resp = httpx.post(f"{_CLICKUP_BASE}/list/{list_id}/task", headers=_clickup_headers(), json=payload).json()
    if "err" in resp:
        return json.dumps({"ok": False, "error": resp.get("err", "Unknown error")})
    return json.dumps({"ok": True, "task_id": resp.get("id"), "url": resp.get("url", "")})


def clickup_update_task(task_id: str, name: str = "", description: str = "", status: str = "", priority: int = -1, due_date: str = "") -> str:
    """Update a task. Only provide fields you want to change. priority: 1=urgent, 2=high, 3=normal, 4=low, 0=none, -1=don't change."""
    payload = {}
    if name:
        payload["name"] = name
    if description:
        payload["description"] = description
    if status:
        payload["status"] = status
    if priority >= 0:
        payload["priority"] = priority
    if due_date:
        payload["due_date"] = due_date
    if not payload:
        return json.dumps({"ok": False, "error": "No fields to update"})
    resp = httpx.put(f"{_CLICKUP_BASE}/task/{task_id}", headers=_clickup_headers(), json=payload).json()
    if "err" in resp:
        return json.dumps({"ok": False, "error": resp.get("err", "Unknown error")})
    return json.dumps({"ok": True, "task_id": resp.get("id"), "url": resp.get("url", "")})


def clickup_get_task_comments(task_id: str) -> str:
    """Get comments on a task."""
    resp = httpx.get(f"{_CLICKUP_BASE}/task/{task_id}/comment", headers=_clickup_headers()).json()
    comments = [{"id": c.get("id"), "text": c.get("comment_text", ""), "user": c.get("user", {}).get("username", ""), "date": c.get("date")} for c in resp.get("comments", [])]
    return json.dumps({"ok": True, "comments": comments})


def clickup_add_task_comment(task_id: str, comment_text: str) -> str:
    """Add a comment to a task."""
    resp = httpx.post(f"{_CLICKUP_BASE}/task/{task_id}/comment", headers=_clickup_headers(), json={"comment_text": comment_text}).json()
    if "err" in resp:
        return json.dumps({"ok": False, "error": resp.get("err", "Unknown error")})
    return json.dumps({"ok": True})


def clickup_get_project_tasks(project_name: str, due_date: str = "", include_closed: bool = False, include_subtasks: bool = True) -> str:
    """Get ALL tasks under a project/folder/space by name. Resolves the name through the hierarchy automatically.
    This is the BEST tool when the user mentions a project name like 'Intelbuddy' — it finds the matching
    space/folder, gets all lists inside it, and fetches all tasks from every list in one go.
    Optionally filter by due_date (format: YYYY-MM-DD). Set include_subtasks=True to also fetch subtasks."""
    from datetime import datetime, timezone

    team_id = _clickup_team_id()
    headers = _clickup_headers()
    name_lower = project_name.lower()

    # Step 1: Find matching space or folder
    matched_list_ids = []
    matched_location = ""

    # Check spaces
    spaces_resp = httpx.get(f"{_CLICKUP_BASE}/team/{team_id}/space", headers=headers, params={"archived": "false"}).json()
    spaces = spaces_resp.get("spaces", [])

    for space in spaces:
        space_name = space.get("name", "")
        space_id = space["id"]

        # Check if the space itself matches
        if name_lower in space_name.lower():
            matched_location = f"space:{space_name}"
            # Get all folders in this space, then all lists from each folder
            folders_resp = httpx.get(f"{_CLICKUP_BASE}/space/{space_id}/folder", headers=headers, params={"archived": "false"}).json()
            for folder in folders_resp.get("folders", []):
                lists_resp = httpx.get(f"{_CLICKUP_BASE}/folder/{folder['id']}/list", headers=headers, params={"archived": "false"}).json()
                matched_list_ids.extend([l["id"] for l in lists_resp.get("lists", [])])
            # Also get folderless lists
            fl_resp = httpx.get(f"{_CLICKUP_BASE}/space/{space_id}/list", headers=headers, params={"archived": "false"}).json()
            matched_list_ids.extend([l["id"] for l in fl_resp.get("lists", [])])
            break

        # Check folders within this space
        folders_resp = httpx.get(f"{_CLICKUP_BASE}/space/{space_id}/folder", headers=headers, params={"archived": "false"}).json()
        for folder in folders_resp.get("folders", []):
            if name_lower in folder.get("name", "").lower():
                matched_location = f"folder:{folder['name']} (in space:{space_name})"
                lists_resp = httpx.get(f"{_CLICKUP_BASE}/folder/{folder['id']}/list", headers=headers, params={"archived": "false"}).json()
                matched_list_ids.extend([l["id"] for l in lists_resp.get("lists", [])])
                break

        if matched_list_ids:
            break

        # Check folderless lists
        fl_resp = httpx.get(f"{_CLICKUP_BASE}/space/{space_id}/list", headers=headers, params={"archived": "false"}).json()
        for lst in fl_resp.get("lists", []):
            if name_lower in lst.get("name", "").lower():
                matched_location = f"list:{lst['name']} (in space:{space_name})"
                matched_list_ids.append(lst["id"])

        if matched_list_ids:
            break

    if not matched_list_ids:
        return json.dumps({"ok": False, "error": f"Could not find project/folder/list matching '{project_name}'"})

    # Step 2: Fetch all tasks from matched lists
    all_tasks = []
    due_date_start_ms = None
    due_date_end_ms = None
    if due_date:
        try:
            dt = datetime.strptime(due_date, "%Y-%m-%d").replace(tzinfo=timezone.utc)
            due_date_start_ms = int(dt.timestamp() * 1000)
            due_date_end_ms = due_date_start_ms + 86400000  # +24 hours
        except ValueError:
            pass

    for list_id in matched_list_ids:
        params = {"archived": "false", "include_closed": str(include_closed).lower()}
        if include_subtasks:
            params["subtasks"] = "true"
        if due_date_start_ms:
            params["due_date_gt"] = str(due_date_start_ms - 1)
            params["due_date_lt"] = str(due_date_end_ms)
        resp = httpx.get(f"{_CLICKUP_BASE}/list/{list_id}/task", headers=headers, params=params).json()
        for t in resp.get("tasks", []):
            assignees = [{"username": a.get("username", ""), "email": a.get("email", "")} for a in t.get("assignees", [])]
            all_tasks.append({
                "id": t["id"], "name": t["name"],
                "status": t.get("status", {}).get("status", ""),
                "priority": (t.get("priority") or {}).get("priority", "none"),
                "assignees": assignees,
                "list": t.get("list", {}).get("name", ""),
                "due_date": t.get("due_date"),
                "url": t.get("url", ""),
                "parent": t.get("parent"),
            })

    return json.dumps({"ok": True, "project": matched_location, "lists_searched": len(matched_list_ids), "count": len(all_tasks), "tasks": all_tasks})


def clickup_get_members() -> str:
    """Get all workspace members with their names and emails. Useful for resolving assignees."""
    team_resp = httpx.get(f"{_CLICKUP_BASE}/team/{_clickup_team_id()}", headers=_clickup_headers()).json()
    members = []
    for m in team_resp.get("team", {}).get("members", []):
        u = m.get("user", {})
        members.append({"id": u.get("id"), "username": u.get("username", ""), "email": u.get("email", "")})
    return json.dumps({"ok": True, "members": members})


CLICKUP_TOOLS = [
    Function(name="clickup_get_project_tasks", entrypoint=clickup_get_project_tasks),
    Function(name="clickup_get_spaces", entrypoint=clickup_get_spaces),
    Function(name="clickup_get_folders", entrypoint=clickup_get_folders),
    Function(name="clickup_get_lists", entrypoint=clickup_get_lists),
    Function(name="clickup_get_folderless_lists", entrypoint=clickup_get_folderless_lists),
    Function(name="clickup_get_tasks", entrypoint=clickup_get_tasks),
    Function(name="clickup_search_tasks", entrypoint=clickup_search_tasks),
    Function(name="clickup_get_task", entrypoint=clickup_get_task),
    Function(name="clickup_create_task", entrypoint=clickup_create_task),
    Function(name="clickup_update_task", entrypoint=clickup_update_task),
    Function(name="clickup_get_task_comments", entrypoint=clickup_get_task_comments),
    Function(name="clickup_add_task_comment", entrypoint=clickup_add_task_comment),
    Function(name="clickup_get_members", entrypoint=clickup_get_members),
]

# --- MCP Tool instances (connected at startup) ---
agno_docs_mcp = MCPTools(url="https://docs.agno.com/mcp")


# --- Agent: Agno Assist ---
agno_assist = Agent(
    name="Agno Assist",
    id="agno-assist",
    model=OpenAIChat(id="gpt-4o"),
    instructions=[
        "You are Agno Assist, a helpful AI assistant with access to Agno documentation.",
        "Answer questions about the Agno framework clearly and concisely.",
    ],
    db=SqliteDb(db_file="agno.db"),
    tools=[agno_docs_mcp],
    add_datetime_to_context=True,
    add_history_to_context=True,
    num_history_runs=3,
    markdown=True,
)

# --- Agent: ClickUp Assistant ---
clickup_assistant = Agent(
    name="ClickUp Assistant",
    id="clickup-assistant",
    model=OpenAIChat(id="gpt-4o"),
    instructions=[
        "You are a ClickUp assistant. Help the user manage their ClickUp workspace.",
        "Always use your available tools to fetch real data from ClickUp. Never guess or make up information.",
        "",
        "CRITICAL — TOOL SELECTION GUIDE (follow this STRICTLY):",
        "",
        "When the user mentions a PROJECT/FOLDER/SPACE name (e.g. 'Intelbuddy', 'Dragon', 'LAST MILE'):",
        "  => Use clickup_get_project_tasks(project_name, due_date, include_closed, include_subtasks)",
        "  This resolves the name through the hierarchy AND fetches ALL tasks in ONE call.",
        "  It supports due_date filtering (YYYY-MM-DD format) so you get exactly what's needed.",
        "  Example: clickup_get_project_tasks('Intelbuddy', due_date='2026-03-11')",
        "",
        "When the user asks about a specific TASK by name:",
        "  => Use clickup_search_tasks(query) to find it by name across the workspace.",
        "",
        "Only use the step-by-step hierarchy tools (get_spaces > get_folders > get_lists > get_tasks)",
        "when you need to browse/explore the workspace structure, NOT when the user already told you the project name.",
        "",
        "Use clickup_get_members() to resolve email addresses to user IDs for assignee filtering.",
        "Always confirm before performing destructive actions (deleting tasks, etc.).",
    ],
    db=SqliteDb(db_file="agno.db"),
    tools=CLICKUP_TOOLS,
    add_datetime_to_context=True,
    add_history_to_context=True,
    num_history_runs=3,
    markdown=True,
)

# --- Agent: Slack Assistant ---
slack_assistant = Agent(
    name="Slack Assistant",
    id="slack-assistant",
    model=OpenAIChat(id="gpt-4o"),
    instructions=[
        "You are a Slack assistant that helps the user stay on top of their Slack workspace.",
        "Your main job is to provide a concise summary of unread messages and thread replies.",
        "For each message or thread, assign a priority level:",
        "  - 🔴 HIGH: Direct mentions, urgent requests, blockers, escalations, or action items with deadlines.",
        "  - 🟡 MEDIUM: Team discussions you're part of, FYIs that need acknowledgement, or pending decisions.",
        "  - 🟢 LOW: General chatter, social messages, announcements with no action needed.",
        "When summarizing, use this format for each item:",
        "  **[PRIORITY] #channel-name** — Summary of the message/thread (sender, key point, action needed if any).",
        "Group messages by priority (HIGH first, then MEDIUM, then LOW).",
        "Always fetch real data using your tools. Never guess or fabricate messages.",
        "Channels and users are CACHED locally. slack_list_channels and slack_get_users return instantly from cache after the first call.",
        "If the user asks to resync or refresh, call slack_resync to clear cache and re-fetch from Slack.",
        "When catching up or checking unreads, follow this workflow:",
        "  1. Call slack_list_channels (fast, cached) to get all public + private channels.",
        "  2. Filter to channels where is_member is true. For public channels where is_member is false, call slack_join_channel first.",
        "  3. Call slack_get_channel_history for each member channel (use limit=10-20).",
        "  4. For messages with reply_count > 0, call slack_get_thread_replies to get thread context.",
        "  5. Call slack_get_users (fast, cached) once to resolve user IDs to real names.",
        "  6. Present the summary grouped by priority.",
        "IMPORTANT: When linking to channels or messages, use the 'channel_link' and 'link' fields from tool results as markdown links.",
        "Format channel references as clickable links: [#channel-name](channel_link_url).",
        "If a channel returns an error, skip it and move on.",
    ],
    db=SqliteDb(db_file="agno.db"),
    tools=SLACK_TOOLS,
    add_datetime_to_context=True,
    add_history_to_context=True,
    num_history_runs=3,
    markdown=True,
)

# --- Fathom MOM generation ---
_MOM_SYSTEM_PROMPT = """You are a professional meeting minutes generator. Given a meeting transcript, produce a well-structured Minutes of Meeting (MOM) document.

Format the MOM as follows:

## Minutes of Meeting: {title}

**Date:** {date}
**Attendees:** {attendees}

### Key Discussion Points
- Bullet points of main topics discussed

### Decisions Made
- List of decisions reached during the meeting

### Action Items
- [ ] Action item with owner (if identifiable) and deadline (if mentioned)

### Next Steps / Follow-ups
- Any follow-up items or upcoming meetings mentioned

Keep it concise, professional, and actionable. Focus on extracting the most important information."""


def _openai_chat(messages: list[dict], temperature: float = 0.3, timeout: int = 120, response_format: dict | None = None) -> str:
    """Call OpenAI chat completions with retry on 429."""
    client = httpx.Client(timeout=timeout)
    payload = {"model": "gpt-4o", "messages": messages, "temperature": temperature}
    if response_format:
        payload["response_format"] = response_format
    headers = {"Authorization": f"Bearer {os.getenv('OPENAI_API_KEY', '')}", "Content-Type": "application/json"}

    for attempt in range(3):
        resp = client.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
        if resp.status_code == 429:
            wait = min(2 ** attempt * 5, 30)  # 5s, 10s, 20s
            print(f"[OpenAI] Rate limited, retrying in {wait}s (attempt {attempt + 1}/3)")
            time.sleep(wait)
            continue
        resp.raise_for_status()
        return resp.json()["choices"][0]["message"]["content"]
    # Final attempt — let it raise
    resp = client.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
    resp.raise_for_status()
    return resp.json()["choices"][0]["message"]["content"]


def generate_mom(title: str, transcript_text: str, attendees: str) -> str:
    """Generate MOM from transcript using OpenAI."""
    return _openai_chat([
        {"role": "system", "content": _MOM_SYSTEM_PROMPT},
        {"role": "user", "content": f"Meeting Title: {title}\nAttendees: {attendees}\n\nTranscript:\n{transcript_text}"},
    ])


# --- Fathom tools (for the agent) ---
def fathom_list_recent_meetings() -> str:
    """List recent meetings from Fathom with their recording IDs and titles."""
    try:
        meetings = fathom_service.fathom_list_meetings()
        result = []
        for m in meetings[:20]:
            result.append({
                "recording_id": str(m.get("recording_id", m.get("id", ""))),
                "title": m.get("title", m.get("meeting_title", "Untitled")),
                "date": m.get("created_at", m.get("date", "")),
                "url": m.get("url", ""),
            })
        return json.dumps({"ok": True, "meetings": result})
    except Exception as e:
        return json.dumps({"ok": False, "error": str(e)})


def fathom_get_all_moms() -> str:
    """Get all stored MOMs from the database."""
    moms = fathom_service.get_all_moms()
    return json.dumps({"ok": True, "count": len(moms), "moms": moms})


def fathom_get_mom_detail(mom_id: int) -> str:
    """Get a specific MOM by its database ID."""
    mom = fathom_service.get_mom_by_id(mom_id)
    if mom:
        return json.dumps({"ok": True, "mom": mom})
    return json.dumps({"ok": False, "error": "MOM not found"})


def fathom_generate_mom_for_meeting(recording_id: str) -> str:
    """Manually trigger MOM generation for a specific meeting by its recording ID."""
    existing = fathom_service.get_mom_by_recording_id(recording_id)
    if existing and existing.get("status") == "completed":
        return json.dumps({"ok": True, "message": "MOM already exists", "mom": existing})

    try:
        meetings = fathom_service.fathom_list_meetings()
        meeting = next((m for m in meetings if str(m.get("recording_id", "")) == recording_id or str(m.get("id", "")) == recording_id), None)
        if not meeting:
            return json.dumps({"ok": False, "error": "Meeting not found in Fathom"})

        title = meeting.get("title", meeting.get("meeting_title", "Untitled Meeting"))
        meeting_date = meeting.get("created_at", meeting.get("date", ""))
        share_url = meeting.get("share_url", "")

        transcript = fathom_service.fathom_get_transcript(recording_id)
        if not transcript:
            return json.dumps({"ok": False, "error": "No transcript available yet"})

        transcript_text = fathom_service.format_transcript(transcript)
        attendees = fathom_service.extract_attendees_from_meeting(meeting, transcript)
        attendees_str = ", ".join(attendees)
        meeting_type = fathom_service.classify_meeting_type(meeting, transcript)
        projects = [p["name"] for p in fathom_service.get_all_projects()]
        project_name = fathom_service.guess_project_name(title, projects)

        mom_text = generate_mom(title, transcript_text, attendees_str)
        fathom_service.save_mom(recording_id, title, meeting_date, attendees_str, transcript_text, mom_text,
                                share_url=share_url, project_name=project_name, meeting_type=meeting_type)

        notify_mom_to_slack(title, meeting_date, attendees_str, mom_text)
        return json.dumps({"ok": True, "message": "MOM generated successfully", "title": title})
    except Exception as e:
        return json.dumps({"ok": False, "error": str(e)})


def fathom_poll_new_meetings() -> str:
    """Check for new meetings and auto-generate MOMs for any that have transcripts ready."""
    try:
        processed = fathom_service.poll_and_process(generate_mom)
        for p in processed:
            mom = fathom_service.get_mom_by_recording_id(p["recording_id"])
            if mom and mom.get("status") == "completed":
                notify_mom_to_slack(mom["title"], mom["meeting_date"], mom.get("attendees", ""), mom.get("mom_text", ""))
        return json.dumps({"ok": True, "processed": len(processed), "meetings": processed})
    except Exception as e:
        return json.dumps({"ok": False, "error": str(e)})


FATHOM_TOOLS = [
    Function(name="fathom_list_recent_meetings", entrypoint=fathom_list_recent_meetings),
    Function(name="fathom_get_all_moms", entrypoint=fathom_get_all_moms),
    Function(name="fathom_get_mom_detail", entrypoint=fathom_get_mom_detail),
    Function(name="fathom_generate_mom_for_meeting", entrypoint=fathom_generate_mom_for_meeting),
    Function(name="fathom_poll_new_meetings", entrypoint=fathom_poll_new_meetings),
]

# --- Agent: Fathom MOM Assistant ---
fathom_assistant = Agent(
    name="MOM Agent",
    id="fathom-mom",
    model=OpenAIChat(id="gpt-4o"),
    instructions=[
        "You are a Meeting Minutes (MOM) assistant powered by Fathom.",
        "You help the user view, search, and generate Minutes of Meeting from their recorded calls.",
        "Available actions:",
        "  - fathom_list_recent_meetings: List recent meetings from Fathom",
        "  - fathom_get_all_moms: Get all stored MOMs",
        "  - fathom_get_mom_detail(mom_id): Get full MOM details",
        "  - fathom_generate_mom_for_meeting(recording_id): Generate MOM for a specific meeting",
        "  - fathom_poll_new_meetings: Check for new meetings and auto-generate MOMs",
        "When asked to show MOMs, use fathom_get_all_moms and present them in a clean, organized way.",
        "When asked to generate a MOM, first list meetings so the user can pick one, then generate.",
        "Always present MOMs in a well-formatted markdown structure.",
    ],
    db=SqliteDb(db_file="agno.db"),
    tools=FATHOM_TOOLS,
    add_datetime_to_context=True,
    add_history_to_context=True,
    num_history_runs=3,
    markdown=True,
)

# --- Internal Agent: MOM Notifier (not exposed in sidebar) ---
_MOM_NOTIFY_EMAIL = os.getenv("SEED_USER_EMAIL", "shirsendu@codebuddy.co")

mom_notifier = Agent(
    name="MOM Notifier",
    id="mom-notifier",
    model=OpenAIChat(id="gpt-4o"),
    instructions=[
        "You are an internal notification agent. Your ONLY job is to send a Meeting MOM to a user via Slack DM.",
        "When you receive a MOM, format it nicely and send it using slack_dm_by_email.",
        f"Always send to: {_MOM_NOTIFY_EMAIL}",
        "The message should include the meeting title, date, attendees, and the full MOM text.",
        "Use Slack mrkdwn formatting: *bold* for headers, bullet points with •, and keep it readable.",
        "Do NOT summarize or shorten the MOM — send it in full.",
    ],
    tools=[Function(name="slack_dm_by_email", entrypoint=slack_dm_by_email)],
    markdown=True,
)


def notify_mom_to_slack(title: str, meeting_date: str, attendees: str, mom_text: str):
    """Send a completed MOM to the configured user via Slack DM using the MOM Notifier agent."""
    try:
        message = (
            f"*📋 New MOM Generated*\n\n"
            f"*Meeting:* {title}\n"
            f"*Date:* {meeting_date}\n"
            f"*Attendees:* {attendees}\n\n"
            f"{'—' * 40}\n\n"
            f"{mom_text}"
        )
        mom_notifier.run(f"Send this MOM via Slack DM to {_MOM_NOTIFY_EMAIL}:\n\n{message}")
        print(f"[MOM Notifier] Sent MOM for '{title}' to {_MOM_NOTIFY_EMAIL}")
    except Exception as e:
        print(f"[MOM Notifier] Failed to notify: {e}")


# --- AgentOS ---
agent_os = AgentOS(
    agents=[fathom_assistant, clickup_assistant, slack_assistant, agno_assist],
    tracing=True,
)
app = agent_os.get_app()


# --- Fathom REST API endpoints (for frontend MOM page) ---
@app.get("/api/moms")
async def api_get_moms():
    """Get all MOMs for the frontend list page."""
    moms = fathom_service.get_all_moms()
    return JSONResponse({"ok": True, "moms": moms})


# NOTE: /api/moms/search must be registered BEFORE /api/moms/{mom_id} to avoid route conflicts
@app.post("/api/moms/search")
async def api_mom_search(request: Request):
    """Semantic search across all MOMs using LLM to understand context."""
    body = await request.json()
    query = body.get("query", "").strip()
    if not query:
        return JSONResponse({"ok": False, "error": "No query"}, status_code=400)

    moms = fathom_service.get_all_moms()
    if not moms:
        return JSONResponse({"ok": True, "results": [], "answer": "No MOMs found."})

    # Build a compact summary of all MOMs for the LLM
    mom_summaries = []
    for m in moms:
        mom_summaries.append(f"[ID:{m['id']}] Title: {m.get('title','')} | Date: {m.get('meeting_date','')} | Attendees: {m.get('attendees','')} | Preview: {(m.get('mom_text','') or '')[:500]}")

    context = "\n---\n".join(mom_summaries)

    try:
        raw = _openai_chat([
            {"role": "system", "content": f"""You are a search assistant for Meeting Minutes (MOMs).
Given the user's query, find the most relevant MOMs from the list below and explain why they match.

Return your response as JSON with this structure:
{{"answer": "brief natural language answer", "matched_ids": [list of MOM IDs that match]}}

Only return MOM IDs that are actually relevant. If nothing matches, return empty matched_ids.

Available MOMs:
{context}"""},
            {"role": "user", "content": query},
        ], temperature=0.2, timeout=60, response_format={"type": "json_object"})
        result = json.loads(raw)
        matched_ids = result.get("matched_ids", [])
        matched_moms = [m for m in moms if m["id"] in matched_ids]
        return JSONResponse({"ok": True, "answer": result.get("answer", ""), "results": matched_moms})
    except Exception as e:
        return JSONResponse({"ok": False, "error": str(e)}, status_code=500)


@app.get("/api/moms/{mom_id}")
async def api_get_mom(mom_id: int):
    """Get a single MOM by ID."""
    mom = fathom_service.get_mom_by_id(mom_id)
    if not mom:
        return JSONResponse({"ok": False, "error": "Not found"}, status_code=404)
    return JSONResponse({"ok": True, "mom": mom})


@app.patch("/api/moms/{mom_id}")
async def api_update_mom(mom_id: int, request: Request):
    """Update a MOM field (project_name, meeting_type, mom_text)."""
    body = await request.json()
    field = body.get("field", "")
    value = body.get("value", "")
    if not field:
        return JSONResponse({"ok": False, "error": "Missing field"}, status_code=400)
    ok = fathom_service.update_mom_field(mom_id, field, value)
    if not ok:
        return JSONResponse({"ok": False, "error": f"Invalid field: {field}"}, status_code=400)
    return JSONResponse({"ok": True})


@app.post("/api/fathom/poll")
async def api_fathom_poll():
    """Manually trigger polling for new meetings."""
    try:
        processed = fathom_service.poll_and_process(generate_mom)
        for p in processed:
            mom = fathom_service.get_mom_by_recording_id(p["recording_id"])
            if mom and mom.get("status") == "completed":
                notify_mom_to_slack(mom["title"], mom["meeting_date"], mom.get("attendees", ""), mom.get("mom_text", ""))
        return JSONResponse({"ok": True, "processed": len(processed), "meetings": processed})
    except Exception as e:
        return JSONResponse({"ok": False, "error": str(e)}, status_code=500)


# --- Project endpoints ---
@app.get("/api/projects")
async def api_get_projects():
    projects = fathom_service.get_all_projects()
    return JSONResponse({"ok": True, "projects": projects})


@app.post("/api/projects")
async def api_create_project(request: Request):
    body = await request.json()
    name = body.get("name", "").strip()
    if not name:
        return JSONResponse({"ok": False, "error": "Name required"}, status_code=400)
    result = fathom_service.create_project(name)
    return JSONResponse(result)


@app.delete("/api/projects/{project_id}")
async def api_delete_project(project_id: int):
    fathom_service.delete_project(project_id)
    return JSONResponse({"ok": True})


# --- MOM Chat endpoint (per-MOM conversation to edit/customize) ---
@app.post("/api/moms/{mom_id}/chat")
async def api_mom_chat(mom_id: int, request: Request):
    """Chat with a MOM to customize/edit it. Uses the MOM content + transcript as context."""
    mom = fathom_service.get_mom_by_id(mom_id)
    if not mom:
        return JSONResponse({"ok": False, "error": "MOM not found"}, status_code=404)

    body = await request.json()
    user_message = body.get("message", "")
    if not user_message:
        return JSONResponse({"ok": False, "error": "No message"}, status_code=400)

    try:
        reply = _openai_chat([
            {"role": "system", "content": f"""You are an assistant helping customize Meeting Minutes (MOM).
You have access to the original transcript and the current MOM. The user may ask you to:
- Edit or rewrite sections of the MOM
- Extract specific information from the transcript
- Summarize differently or add more detail
- Answer questions about what was discussed

If the user asks to update/rewrite the MOM, return the FULL updated MOM text wrapped in <updated_mom>...</updated_mom> tags.
Otherwise just answer the question normally.

Current MOM:
{mom.get('mom_text', '')}

Original Transcript (excerpt):
{(mom.get('transcript', '') or '')[:8000]}"""},
            {"role": "user", "content": user_message},
        ])

        # Check if the reply contains an updated MOM
        updated_mom = None
        if "<updated_mom>" in reply and "</updated_mom>" in reply:
            start = reply.index("<updated_mom>") + len("<updated_mom>")
            end = reply.index("</updated_mom>")
            updated_mom = reply[start:end].strip()
            fathom_service.update_mom_field(mom_id, "mom_text", updated_mom)
            # Clean reply to not include raw tags
            reply = reply[:reply.index("<updated_mom>")] + reply[reply.index("</updated_mom>") + len("</updated_mom>"):]
            reply = reply.strip()
            if not reply:
                reply = "MOM has been updated successfully."

        return JSONResponse({"ok": True, "reply": reply, "updated_mom": updated_mom})
    except Exception as e:
        return JSONResponse({"ok": False, "error": str(e)}, status_code=500)


def _verify_fathom_webhook(request: Request, raw_body: bytes) -> bool:
    """Verify Fathom webhook signature using HMAC-SHA256."""
    secret = os.getenv("FATHOM_WEBHOOK_SECRET", "")
    if not secret:
        return True  # skip verification if no secret configured

    webhook_id = request.headers.get("webhook-id", "")
    webhook_ts = request.headers.get("webhook-timestamp", "")
    webhook_sig = request.headers.get("webhook-signature", "")

    if not webhook_id or not webhook_ts or not webhook_sig:
        return False

    # Reject timestamps older than 5 minutes to prevent replay attacks
    try:
        ts_int = int(webhook_ts)
        if abs(time.time() - ts_int) > 300:
            return False
    except ValueError:
        return False

    # Fathom signature format: "v1,<base64-signature>"
    # Signed content: "{webhook_id}.{webhook_timestamp}.{body}"
    signed_content = f"{webhook_id}.{webhook_ts}.{raw_body.decode('utf-8')}"

    # Secret may be prefixed with "whsec_" — strip it for the HMAC key
    secret_bytes = base64.b64decode(secret.removeprefix("whsec_"))
    expected_sig = base64.b64encode(
        hmac.new(secret_bytes, signed_content.encode("utf-8"), hashlib.sha256).digest()
    ).decode("utf-8")

    # webhook-signature can contain multiple "v1,<sig>" entries separated by spaces
    for sig_entry in webhook_sig.split(" "):
        parts = sig_entry.split(",", 1)
        if len(parts) == 2 and parts[0] == "v1":
            if hmac.compare_digest(expected_sig, parts[1]):
                return True
    return False


@app.post("/api/fathom/webhook")
async def api_fathom_webhook(request: Request):
    """Webhook endpoint for Fathom to push meeting_content_ready events."""
    try:
        raw_body = await request.body()

        if not _verify_fathom_webhook(request, raw_body):
            return JSONResponse({"ok": False, "error": "Invalid signature"}, status_code=401)

        body = json.loads(raw_body)
        recording_id = body.get("recording_id", body.get("id", ""))
        if not recording_id:
            return JSONResponse({"ok": False, "error": "No recording_id"}, status_code=400)

        if fathom_service._is_processed(recording_id):
            return JSONResponse({"ok": True, "message": "Already processed"})

        title = body.get("title", body.get("name", "Untitled Meeting"))
        meeting_date = body.get("created_at", body.get("date", ""))
        share_url = body.get("share_url", "")
        meeting_type = fathom_service.classify_meeting_type(body, [])
        projects = [p["name"] for p in fathom_service.get_all_projects()]
        project_name = fathom_service.guess_project_name(title, projects)

        # Fetch transcript (may be included in webhook payload)
        transcript = body.get("transcript")
        if not transcript:
            transcript = fathom_service.fathom_get_transcript(recording_id)

        if not transcript:
            fathom_service.save_mom(recording_id, title, meeting_date, "", "", "Transcript not ready",
                                    status="pending", share_url=share_url, project_name=project_name, meeting_type=meeting_type)
            return JSONResponse({"ok": True, "message": "Queued — transcript not ready yet"})

        transcript_text = fathom_service.format_transcript(transcript)
        attendees = fathom_service.extract_attendees(transcript)
        attendees_str = ", ".join(attendees)

        mom_text = generate_mom(title, transcript_text, attendees_str)
        fathom_service.save_mom(recording_id, title, meeting_date, attendees_str, transcript_text, mom_text,
                                share_url=share_url, project_name=project_name, meeting_type=meeting_type)

        notify_mom_to_slack(title, meeting_date, attendees_str, mom_text)
        return JSONResponse({"ok": True, "message": "MOM generated"})
    except Exception as e:
        return JSONResponse({"ok": False, "error": str(e)}, status_code=500)


# --- Background polling task ---
_POLL_INTERVAL = int(os.getenv("FATHOM_POLL_INTERVAL", "300"))  # default 5 minutes


async def _fathom_poll_loop():
    """Background task that polls Fathom for new meetings every FATHOM_POLL_INTERVAL seconds."""
    print("[Fathom Poll] Background poller started")
    while True:
        if not os.getenv("FATHOM_API_KEY"):
            await asyncio.sleep(_POLL_INTERVAL)
            continue
        try:
            print("[Fathom Poll] Checking for new meetings...")
            processed = fathom_service.poll_and_process(generate_mom)
            if processed:
                print(f"[Fathom Poll] Generated {len(processed)} new MOM(s): {[m['title'] for m in processed]}")
                for p in processed:
                    mom = fathom_service.get_mom_by_recording_id(p["recording_id"])
                    if mom and mom.get("status") == "completed":
                        notify_mom_to_slack(mom["title"], mom["meeting_date"], mom.get("attendees", ""), mom.get("mom_text", ""))
            else:
                print("[Fathom Poll] No new meetings found")
        except Exception as e:
            import traceback
            print(f"[Fathom Poll] Error: {e}")
            traceback.print_exc()
        await asyncio.sleep(_POLL_INTERVAL)


# Keep a reference to the background task so it doesn't get garbage collected
_poll_task = None

# --- Connect MCP tools at startup, disconnect at shutdown ---
@app.on_event("startup")
async def startup_mcp():
    global _poll_task
    await agno_docs_mcp._connect()
    print(f"Agno Docs MCP: {len(agno_docs_mcp.functions)} tools loaded")
    print(f"ClickUp Tools: {len(CLICKUP_TOOLS)} tools loaded")
    print(f"Fathom Tools: {len(FATHOM_TOOLS)} tools loaded")
    # Start background polling if API key is set
    if os.getenv("FATHOM_API_KEY"):
        _poll_task = asyncio.create_task(_fathom_poll_loop())
        print(f"Fathom polling started (every {_POLL_INTERVAL}s)")
    else:
        print("Fathom polling skipped — FATHOM_API_KEY not set")


@app.on_event("shutdown")
async def shutdown_mcp():
    await agno_docs_mcp.close()


# --- Serve the frontend ---
@app.get("/", include_in_schema=False)
async def serve_ui():
    return FileResponse("static/index.html")


# Move our UI route to the top so it takes priority over AgentOS's "/"
for i, route in enumerate(app.routes):
    if isinstance(route, APIRoute) and route.path == "/" and route.name == "serve_ui":
        app.routes.insert(0, app.routes.pop(i))
        break

app.mount("/static", StaticFiles(directory="static"), name="static")
