#!/usr/bin/env python3
"""
Session Watcher — Auto-ingest new sessions from all agent directories.

Monitors known agent session directories and auto-ingests new files
into the Engram vault. Runs as a cron job every 2 hours.

Watched sources:
    ~/.goose/sessions/                          (Goose flat files)
    ~/.local/share/goose/sessions/sessions.db   (Goose SQLite DB)
    ~/.hermes/sessions/                         (Hermes/Rook agent)
    ~/.claude/projects/*/                       (Claude Code JSONL sessions)
    ~/.gemini/tmp/*/chats/                      (Gemini CLI sessions)
    ~/.local/share/opencode/opencode.db         (OpenCode SQLite DB)

Usage:
    python3 session_watcher.py          # Run once (for cron)
    python3 session_watcher.py --watch  # Continuous mode (future)
"""

import json
import logging
import re
import sqlite3
import sys
from datetime import datetime
from pathlib import Path

# Add parent to path for imports
sys.path.insert(0, str(Path(__file__).parent))
from universal_ingest import ENGRAM_ENTITIES, ingest_file

logging.basicConfig(level=logging.INFO, format="[watcher] %(message)s")
logger = logging.getLogger(__name__)

_DATA_LOGS = Path.home() / ".local" / "share" / "engram" / "logs"
_LEGACY_LOGS = Path(__file__).parent.parent / "logs"
STATE_FILE = _DATA_LOGS / "watcher_state.json" if _DATA_LOGS.exists() else _LEGACY_LOGS / "watcher_state.json"

# ---------------------------------------------------------------------------
# Summary generation — extract key facts from conversation turns
# ---------------------------------------------------------------------------

# Topic keywords to detect
TOPIC_KEYWORDS = {
    "jetson": ["jetson", "orin", "nano", "jetpack"],
    "engram": ["engram", "memory", "vault", "entity", "entities"],
    "ssh": ["ssh", "sshd", "password auth", "tailscale ssh"],
    "dashboard": ["dashboard", "nerve", "cockpit", "3080", "3081"],
    "security": ["security", "encryption", "luks", "vaultwarden", "sentinel", "auth"],
    "docker": ["docker", "container", "citadel", "sandbox"],
    "model": ["model", "llama", "qwen", "gemma", "nemotron", "openrouter", "nim"],
    "telegram": ["telegram", "bot", "@easy110"],
    "backup": ["backup", "rclone", "google drive", "encrypted"],
    "memory": ["holographic", "memory", "fact", "recall"],
    "git": ["git", "commit", "branch", "merge", "push"],
    "mcp": ["mcp", "tool", "server", "extension"],
    "config": ["config", "setup", "install", "deploy"],
    "test": ["test", "pytest", "verify", "check"],
    "vector": ["vector", "embedding", "semantic", "similarity"],
}


def generate_summary(turns: list[dict], name: str = "") -> tuple[str, list[str]]:
    """Generate a summary and topic list from conversation turns.

    Returns (summary_string, topics_list).
    """
    if not turns:
        return name or "Empty session", []

    # Collect all text
    all_text = " ".join(t.get("content", "")[:500] for t in turns).lower()

    # Extract topics by keyword matching
    topics = []
    for topic, keywords in TOPIC_KEYWORDS.items():
        if any(kw in all_text for kw in keywords):
            topics.append(topic)

    # Count turns
    user_turns = sum(1 for t in turns if t.get("role") == "user")
    asst_turns = sum(1 for t in turns if t.get("role") == "assistant")

    # Build a searchable summary with actual conversation content
    lines = []
    if name:
        lines.append(f"# {name}")
    lines.append(f"**Turns:** {user_turns} user / {asst_turns} assistant\n")

    # Include user messages (these are what people search for)
    lines.append("## Key messages\n")
    char_budget = 4000  # Keep entities reasonably sized
    chars_used = 0
    for t in turns:
        if t.get("role") != "user":
            continue
        content = t.get("content", "").strip()
        if not content:
            continue
        # Take first 500 chars of each user message
        snippet = content[:500]
        if chars_used + len(snippet) > char_budget:
            break
        lines.append(f"- {snippet}\n")
        chars_used += len(snippet)

    # Include first few assistant responses for context
    lines.append("\n## Assistant highlights\n")
    chars_used = 0
    for t in turns:
        if t.get("role") != "assistant":
            continue
        content = t.get("content", "").strip()
        if not content or len(content) < 50:
            continue
        snippet = content[:300]
        if chars_used + len(snippet) > 2000:
            break
        lines.append(f"- {snippet}\n")
        chars_used += len(snippet)

    return "\n".join(lines), topics[:10]


GOOSE_DB = Path.home() / ".local" / "share" / "goose" / "sessions" / "sessions.db"


def load_state() -> dict:
    if STATE_FILE.exists():
        try:
            return json.loads(STATE_FILE.read_text())
        except Exception:
            pass
    return {"last_goose_db_count": 0, "processed_files": []}


def save_state(state: dict):
    STATE_FILE.parent.mkdir(parents=True, exist_ok=True)
    STATE_FILE.write_text(json.dumps(state, indent=2))


def check_goose_db(state: dict) -> int:
    """Check for new sessions in Goose's hidden SQLite DB."""
    if not GOOSE_DB.exists():
        return 0

    try:
        db = sqlite3.connect(str(GOOSE_DB))
        count = db.execute("SELECT COUNT(*) FROM sessions").fetchone()[0]
        last_count = state.get("last_goose_db_count", 0)

        if count <= last_count:
            db.close()
            return 0

        # New sessions found — extract them
        new_sessions = db.execute(
            """
            SELECT id, name, created_at, provider_name, model_config_json
            FROM sessions ORDER BY created_at DESC LIMIT ?
        """,
            (count - last_count,),
        ).fetchall()

        ingested = 0
        for sid, name, created_at, provider, model_json in new_sessions:
            import re

            messages = db.execute(
                """
                SELECT role, content_json FROM messages
                WHERE session_id = ? AND role IN ('user', 'assistant')
                ORDER BY created_timestamp
            """,
                (sid,),
            ).fetchall()

            if not messages:
                continue

            turns = []
            for role, content_json in messages:
                try:
                    parts = json.loads(content_json)
                    text = "\n".join(p.get("text", "") if isinstance(p, dict) else str(p) for p in parts if p)
                except Exception:
                    text = str(content_json)
                if text:
                    turns.append({"role": role, "content": text})

            if not turns:
                continue

            model = ""
            try:
                model = json.loads(model_json or "{}").get("model_name", "")
            except Exception:
                pass

            summary, auto_topics = generate_summary(turns, name or sid)

            fm = {
                "name": name or sid,
                "source": f"goose-{provider or 'unknown'}",
                "provider": provider or "unknown",
                "model": model,
                "created_at": created_at,
                "ingested_at": datetime.now().isoformat(),
                "tags": ["goose", provider or "unknown"],
                "topics": auto_topics,
                "summary": summary,
                "related_ids": [],
            }

            body = f"# {name or sid}\n\n"
            for t in turns:
                if t["role"] == "user":
                    body += f"**User:**\n{t['content']}\n\n"
                else:
                    body += f"**Assistant:**\n{t['content']}\n\n"

            import yaml as _yaml

            fm_yaml = _yaml.dump(fm, default_flow_style=False, allow_unicode=True).strip()
            content = f"---\n{fm_yaml}\n---\n\n{body}"

            safe_name = re.sub(r"[^a-zA-Z0-9_.-]", "_", name or sid)[:60]
            safe_id = re.sub(r"[^a-zA-Z0-9_.-]", "_", sid)
            outfile = ENGRAM_ENTITIES / f"goose_db_{safe_id}_{safe_name}.md"

            if not outfile.exists():
                outfile.write_text(content, encoding="utf-8")
                ingested += 1

        db.close()
        state["last_goose_db_count"] = count
        logger.info(f"Goose DB: {ingested} new sessions ingested (total: {count})")
        return ingested

    except Exception as e:
        logger.error(f"Goose DB error: {e}")
        return 0


def _parse_claude_jsonl(path: Path) -> tuple[list[dict], str, str]:
    """Parse a Claude Code JSONL session file.

    Returns (turns, session_id, first_timestamp).
    Each turn is {"role": str, "content": str}.
    """
    turns = []
    session_id = path.stem
    first_ts = ""

    for line in path.read_text(errors="replace").splitlines():
        line = line.strip()
        if not line:
            continue
        try:
            entry = json.loads(line)
        except json.JSONDecodeError:
            continue

        if entry.get("type") in ("user", "assistant"):
            msg = entry.get("message", {})
            content = msg.get("content", "")
            if isinstance(content, list):
                # content can be a list of blocks
                content = " ".join(
                    b.get("text", "") for b in content if isinstance(b, dict)
                )
            if content:
                turns.append({"role": msg.get("role", "unknown"), "content": content})
            if not first_ts and entry.get("timestamp"):
                first_ts = entry["timestamp"]

    return turns, session_id, first_ts


def check_claude_code(state: dict) -> int:
    """Check Claude Code project directories for new session JSONL files.

    Claude Code stores sessions as UUID.jsonl files under
    ~/.claude/projects/<project-slug>/. Subagent files are skipped.
    """
    claude_dir = Path.home() / ".claude" / "projects"
    if not claude_dir.exists():
        return 0

    processed = set(state.get("processed_files", []))
    ingested = 0

    for project_dir in claude_dir.iterdir():
        if not project_dir.is_dir() or project_dir.name.startswith("."):
            continue
        # Extract project name from slug (e.g. "-home-geodesix-Jetson" -> "Jetson")
        project_name = project_dir.name.rsplit("-", 1)[-1] if "-" in project_dir.name else project_dir.name

        for f in project_dir.iterdir():
            if f.suffix != ".jsonl" or f.is_dir():
                continue
            if str(f) in processed:
                continue

            try:
                turns, session_id, first_ts = _parse_claude_jsonl(f)
                if not turns:
                    logger.debug(f"Skipped (no turns): {f.name}")
                    processed.add(str(f))
                    continue

                # Generate summary from conversation
                summary, topics = generate_summary(turns, name=f"claude-code-{project_name}")

                if not summary.strip():
                    logger.warning(f"EMPTY SUMMARY for {f.name} ({len(turns)} turns) — skipping, will retry next run")
                    continue  # Do NOT mark as processed — retry next run

                # Build entity filename
                short_id = session_id[:8]
                date_str = first_ts[:10].replace("-", "") if first_ts else datetime.now().strftime("%Y%m%d")
                entity_name = f"{date_str}_claude_code_session_{project_name}_{short_id}.md"

                # Write entity
                entity_path = ENGRAM_ENTITIES / entity_name
                frontmatter = {
                    "source": "claude-code",
                    "project": project_name,
                    "session_id": session_id,
                    "created_at": first_ts or datetime.now().isoformat(),
                    "topics": topics,
                    "importance": "medium",
                    "type": "session",
                    "turns": len(turns),
                }
                content = "---\n"
                for k, v in frontmatter.items():
                    if isinstance(v, list):
                        content += f"{k}:\n" + "".join(f"  - {item}\n" for item in v)
                    else:
                        content += f"{k}: {v}\n"
                content += "---\n\n"
                content += summary

                entity_path.write_text(content)

                # Verify the file was actually written
                if not entity_path.exists() or entity_path.stat().st_size < 100:
                    logger.error(f"WRITE FAILED: {entity_name} — file missing or too small after write")
                    continue  # Do NOT mark as processed — retry next run

                ingested += 1
                processed.add(str(f))
                logger.info(f"Ingested: {f.name} → {entity_name} ({len(turns)} turns, {entity_path.stat().st_size} bytes)")

            except Exception as e:
                logger.error(f"FAILED to ingest {f.name}: {e}")
                # Do NOT mark as processed — will retry next run

    state["processed_files"] = list(processed)
    if ingested:
        logger.info(f"Claude Code: {ingested} new sessions ingested")
    return ingested


def check_directory(dirpath: Path, source: str, state: dict) -> int:
    """Check a directory for new session files."""
    if not dirpath.exists():
        return 0

    processed = set(state.get("processed_files", []))
    ingested = 0

    for f in dirpath.iterdir():
        if f.name.startswith(".") or f.is_dir():
            continue
        if f.suffix.lower() not in (".json", ".jsonl", ".md"):
            continue
        if str(f) in processed:
            continue

        try:
            result = ingest_file(f, ENGRAM_ENTITIES, source_hint=source)
            if result["status"] == "ingested":
                ingested += 1
                processed.add(str(f))
            elif result["status"] == "skipped":
                logger.warning(f"Skipped {f.name}: {result.get('reason', 'unknown')} — will NOT retry")
                processed.add(str(f))
            else:
                logger.error(f"FAILED {f.name}: {result} — will retry next run")
                # Do NOT mark as processed
        except Exception as e:
            logger.error(f"FAILED to ingest {f.name}: {e}")
            # Do NOT mark as processed — will retry next run

    state["processed_files"] = list(processed)
    if ingested:
        logger.info(f"{dirpath}: {ingested} new files ingested")
    return ingested


OPENCODE_DB = Path.home() / ".local" / "share" / "opencode" / "opencode.db"


def check_opencode_db(state: dict) -> int:
    """Check OpenCode SQLite database for new sessions."""
    if not OPENCODE_DB.exists():
        return 0

    try:
        db = sqlite3.connect(str(OPENCODE_DB))
        rows = db.execute(
            "SELECT s.id, s.title, s.time_created FROM session s ORDER BY s.time_created"
        ).fetchall()

        if not rows:
            db.close()
            return 0

        last_count = state.get("last_opencode_count", 0)
        if len(rows) <= last_count:
            db.close()
            return 0

        ingested = 0
        processed = set(state.get("processed_files", []))

        for sid, title, ts in rows:
            key = f"opencode:{sid}"
            if key in processed:
                continue

            # Get messages for this session
            msgs = db.execute(
                "SELECT data FROM message WHERE session_id = ? ORDER BY time_created", (sid,)
            ).fetchall()

            turns = []
            for (data,) in msgs:
                try:
                    msg = json.loads(data) if isinstance(data, str) else data
                    role = msg.get("role", "unknown")
                    content = msg.get("content", "")
                    if isinstance(content, list):
                        content = " ".join(
                            b.get("text", "") for b in content if isinstance(b, dict)
                        )
                    if content:
                        turns.append({"role": role, "content": content})
                except (json.JSONDecodeError, TypeError):
                    continue

            if not turns:
                processed.add(key)
                continue

            summary, topics = generate_summary(turns, name=f"opencode-{title or sid[:8]}")
            if not summary.strip():
                logger.warning(f"EMPTY SUMMARY for OpenCode session {sid[:8]} — will retry")
                continue

            date_str = ts[:10].replace("-", "") if ts else datetime.now().strftime("%Y%m%d")
            safe_title = re.sub(r"[^a-zA-Z0-9_]", "_", (title or "untitled"))[:40]
            entity_name = f"{date_str}_opencode_session_{safe_title}_{sid[:8]}.md"
            entity_path = ENGRAM_ENTITIES / entity_name

            frontmatter = {
                "source": "opencode",
                "session_id": sid,
                "title": title or "untitled",
                "created_at": ts or datetime.now().isoformat(),
                "topics": topics,
                "importance": "medium",
                "type": "session",
                "turns": len(turns),
            }
            content_str = "---\n"
            for k, v in frontmatter.items():
                if isinstance(v, list):
                    content_str += f"{k}:\n" + "".join(f"  - {item}\n" for item in v)
                else:
                    content_str += f"{k}: {v}\n"
            content_str += "---\n\n"
            content_str += summary

            entity_path.write_text(content_str)

            if not entity_path.exists() or entity_path.stat().st_size < 100:
                logger.error(f"WRITE FAILED: {entity_name}")
                continue

            ingested += 1
            processed.add(key)
            logger.info(f"Ingested: OpenCode {sid[:8]} → {entity_name} ({len(turns)} turns)")

        state["last_opencode_count"] = len(rows)
        state["processed_files"] = list(processed)
        db.close()

        if ingested:
            logger.info(f"OpenCode: {ingested} new sessions ingested")
        return ingested

    except Exception as e:
        logger.error(f"OpenCode DB error: {e}")
        return 0


def main():
    state = load_state()
    total = 0

    # Check all known session sources
    total += check_goose_db(state)
    total += check_claude_code(state)

    watched = [
        (Path.home() / ".hermes" / "sessions", "hermes"),
        (Path.home() / "goose-sessions", "goose"),
        (Path.home() / "Goose-Sessions", "goose"),
    ]

    for dirpath, source in watched:
        total += check_directory(dirpath, source, state)

    # Gemini CLI: sessions are in ~/.gemini/tmp/<project>/chats/
    gemini_tmp = Path.home() / ".gemini" / "tmp"
    if gemini_tmp.exists():
        for project_dir in gemini_tmp.iterdir():
            chats_dir = project_dir / "chats"
            if chats_dir.is_dir():
                total += check_directory(chats_dir, f"gemini-{project_dir.name}", state)

    # OpenCode: sessions in SQLite at ~/.local/share/opencode/opencode.db
    total += check_opencode_db(state)

    save_state(state)

    if total:
        logger.info(f"Total: {total} new sessions ingested")
    else:
        logger.info("No new sessions found")


if __name__ == "__main__":
    main()
