"""
Hard-stop pipeline retries for one Facebook account.

This is intentionally scoped to:
  organization_auth_account.ad_account_id = 336282307415329
  organization_auth_account.platform = FACEBOOK
  organization_auth_account.id = 4e7e1ec9-8754-459e-b8b7-7620db56f52c

Usage:
  UV_CACHE_DIR=/tmp/uv-cache uv run python manage.py shell < /tmp/hard_stop_fb_account_336282307415329.py

Apply:
  UV_CACHE_DIR=/tmp/uv-cache APPLY=1 uv run python manage.py shell < /tmp/hard_stop_fb_account_336282307415329.py
"""

import os
from datetime import timedelta

from celery import current_app
from django.db import transaction
from django.utils import timezone

from creative_module.models import AdCreativeData, AdIdsCacheData, AdMetricCacheData, AccountSyncLog
from creative_module.types import UpdateStatus
from data_pipeline_module.constants import PLATFORM_BREAKDOWNS, SyncJobType, SyncStatus
from data_pipeline_module.models import BreakdownSyncStatus, PipelineSyncStatus
from jobs.models import Job, JobTask
from organization_auth.constants import UserRoleType
from organization_auth.models import Account, UserAccount


AD_ACCOUNT_ID = "336282307415329"
ACCOUNT_ID = "4e7e1ec9-8754-459e-b8b7-7620db56f52c"
PLATFORM = Account.Platform.FACEBOOK
APPLY = 1
REVOKE_TASKS = 1

REASON = (
    "Hard-stopped manually: Facebook (#200) missing ads_management/ads_read "
    "for ad_account_id=336282307415329."
)


def task_ids(qs, field):
    return set(
        qs.exclude(**{f"{field}__isnull": True})
        .exclude(**{field: ""})
        .values_list(field, flat=True)
        .distinct()
    )


def inspect_task_ids(account_id, ad_account_id):
    found = set()
    try:
        inspector = current_app.control.inspect(timeout=5)
        snapshots = {
            "active": inspector.active() or {},
            "reserved": inspector.reserved() or {},
            "scheduled": inspector.scheduled() or {},
        }
    except Exception as exc:
        print(f"inspect failed: {exc}")
        return found

    needles = (account_id, ad_account_id)
    for state, workers in snapshots.items():
        for worker, tasks in workers.items():
            for item in tasks or []:
                task = item.get("request") if state == "scheduled" else item
                if not isinstance(task, dict):
                    continue
                haystack = repr(task)
                if any(needle in haystack for needle in needles):
                    task_id = task.get("id")
                    if task_id:
                        found.add(task_id)
                        print(f"inspect matched {state} task {task_id} on {worker}")
    return found


account = Account.objects.get(id=ACCOUNT_ID, ad_account_id=AD_ACCOUNT_ID, platform=PLATFORM)
today = timezone.now().date()
start_date = account.first_synced_date or (today - timedelta(days=730))
end_date = today + timedelta(days=1)
all_dates = [start_date + timedelta(days=i) for i in range((end_date - start_date).days + 1)]

pipeline_all = PipelineSyncStatus.objects.filter(account=account, platform=PLATFORM)
pipeline_open = pipeline_all.filter(status__in=[SyncStatus.PENDING.value, SyncStatus.RUNNING.value, SyncStatus.FAILED.value])
breakdown_all = BreakdownSyncStatus.objects.filter(account=account, platform=PLATFORM)
breakdown_open = breakdown_all.filter(status__in=[SyncStatus.PENDING.value, SyncStatus.RUNNING.value, SyncStatus.FAILED.value])

ad_ids_open = AdIdsCacheData.objects.filter(account=account, sync_status__in=[UpdateStatus.PENDING.value, UpdateStatus.FAILED.value])
metric_open = AdMetricCacheData.objects.filter(account=account, sync_status__in=[UpdateStatus.PENDING.value, UpdateStatus.FAILED.value])
creative_open = AdCreativeData.objects.filter(account=account, sync_status__in=[UpdateStatus.PENDING.value, UpdateStatus.FAILED.value])
run_open = AccountSyncLog.objects.filter(
    account=account,
    overall_status__in=[
        AccountSyncLog.OverallStatus.QUEUED,
        AccountSyncLog.OverallStatus.RUNNING,
        AccountSyncLog.OverallStatus.RECONCILING,
    ],
)
job_open = Job.objects.filter(account=account, state__in=[Job.JobState.QUEUED, Job.JobState.RUNNING])
job_task_open = JobTask.objects.filter(
    job__account=account,
    state__in=[JobTask.TaskState.PENDING, JobTask.TaskState.RECEIVED, JobTask.TaskState.STARTED],
)
active_owner = UserAccount.objects.filter(
    account=account,
    role=UserRoleType.OWNER.value,
    status=UserAccount.UserAccountStatus.ACTIVE,
)

to_revoke = set()
if account.task_id:
    to_revoke.add(account.task_id)
to_revoke |= task_ids(pipeline_open, "celery_task_id")
to_revoke |= task_ids(breakdown_open, "celery_task_id")
to_revoke |= task_ids(ad_ids_open, "task_id")
to_revoke |= task_ids(metric_open, "task_id")
to_revoke |= task_ids(creative_open, "task_id")
to_revoke |= task_ids(job_open, "group_id")
to_revoke |= task_ids(job_open, "chord_id")
to_revoke |= task_ids(job_task_open, "task_id")
to_revoke |= inspect_task_ids(ACCOUNT_ID, AD_ACCOUNT_ID)

print(f"APPLY={APPLY}")
print(f"account={account.id} {account.ad_account_name} {account.platform}")
print(f"date guard range={start_date}..{end_date} ({len(all_dates)} days)")
print(f"active owners={active_owner.count()}")
print(f"pipeline open rows before={pipeline_open.count()}")
print(f"breakdown open rows before={breakdown_open.count()}")
print(f"ad ids cache open rows before={ad_ids_open.count()}")
print(f"ad metric cache open rows before={metric_open.count()}")
print(f"creative open rows before={creative_open.count()}")
print(f"account sync logs open before={run_open.count()}")
print(f"jobs open before={job_open.count()}")
print(f"job tasks open before={job_task_open.count()}")
print(f"task ids to revoke={len(to_revoke)}")
for task_id in sorted(to_revoke)[:100]:
    print(f"  {task_id}")
if len(to_revoke) > 100:
    print(f"  ... {len(to_revoke) - 100} more")

if not APPLY:
    print("Dry run only. Set APPLY=1 to mutate prod.")
    raise SystemExit(0)

now = timezone.now()

with transaction.atomic():
    active_owner.update(
        status=UserAccount.UserAccountStatus.EXPIRED,
        last_token_check_time=now,
    )
    Account.objects.filter(pk=account.pk).update(
        assets_update_status=UpdateStatus.FAILED.value,
        task_id=None,
    )

    PipelineSyncStatus.objects.bulk_create(
        [
            PipelineSyncStatus(
                account=account,
                platform=PLATFORM,
                sync_date=sync_date,
                status=SyncStatus.SKIPPED.value,
                job_type=SyncJobType.MANUAL.value,
                account_timezone=account.time_zone or "UTC",
                completed_at=now,
                last_error=REASON,
            )
            for sync_date in all_dates
        ],
        ignore_conflicts=True,
        batch_size=500,
    )
    PipelineSyncStatus.objects.filter(
        account=account,
        platform=PLATFORM,
        sync_date__range=(start_date, end_date),
    ).update(
        status=SyncStatus.SKIPPED.value,
        completed_at=now,
        last_error=REASON,
        celery_task_id=None,
    )

    breakdown_keys = PLATFORM_BREAKDOWNS.get(PLATFORM, [])
    BreakdownSyncStatus.objects.bulk_create(
        [
            BreakdownSyncStatus(
                account=account,
                platform=PLATFORM,
                sync_date=sync_date,
                breakdown_key=breakdown_key,
                status=SyncStatus.SKIPPED.value,
                job_type=SyncJobType.MANUAL.value,
                completed_at=now,
                last_error=REASON,
            )
            for sync_date in all_dates
            for breakdown_key in breakdown_keys
        ],
        ignore_conflicts=True,
        batch_size=500,
    )
    BreakdownSyncStatus.objects.filter(
        account=account,
        platform=PLATFORM,
        sync_date__range=(start_date, end_date),
    ).update(
        status=SyncStatus.SKIPPED.value,
        completed_at=now,
        last_error=REASON,
        celery_task_id=None,
    )

    ad_ids_open.update(sync_status=UpdateStatus.FAILED.value, error_message=REASON, task_id=None)
    metric_open.update(sync_status=UpdateStatus.FAILED.value, error_message=REASON, task_id=None)
    creative_open.update(
        sync_status=UpdateStatus.FAILED.value,
        error_message=REASON,
        task_id=None,
        creative_sync_retry_count=3,
    )
    run_open.update(
        overall_status=AccountSyncLog.OverallStatus.FAILED,
        current_stage=AccountSyncLog.Stage.CLICKHOUSE,
        ad_creative_sync_status=AccountSyncLog.StageStatus.FAILED,
        stats_sync_status=AccountSyncLog.StageStatus.FAILED,
        ai_reports_status=AccountSyncLog.StageStatus.FAILED,
        finished_at=now,
    )
    job_open.update(state=Job.JobState.FAILED, finished_at=now)
    job_task_open.update(state=JobTask.TaskState.FAILED, finished_at=now)

revoked = 0
failed = 0
if REVOKE_TASKS:
    for task_id in sorted(to_revoke):
        try:
            current_app.control.revoke(task_id, terminate=True, signal="SIGTERM")
            revoked += 1
        except Exception as exc:
            failed += 1
            print(f"failed revoke {task_id}: {exc}")

print(f"revoked={revoked} failed={failed}")
print(f"pipeline open rows after={pipeline_open.count()}")
print(f"breakdown open rows after={breakdown_open.count()}")
print("hard stop complete")
