#!/usr/bin/env python3
"""
Fetch all datasets from the Transport Data Commons portal via tRPC API.

The TDC portal (portal.transport-data.org) uses a tRPC API, not a standard
CKAN API. This script paginates through all datasets and saves the full
metadata to JSON.

Usage:
    python3 fetch-all-datasets.py

Output:
    ../raw/all-datasets.json  — full dataset metadata (460+ datasets)
    ../raw/facets.json        — search facet metadata
"""

import urllib.request
import urllib.parse
import json
import os

OUTPUT_DIR = os.path.join(os.path.dirname(__file__), '..', 'raw')
BATCH_SIZE = 100
BASE_URL = "https://portal.transport-data.org/api/trpc/dataset.search"


def build_search_params(offset=0, limit=100, groups=None, tdc_category=None):
    """Build the tRPC search parameters."""
    return {
        "0": {
            "json": {
                "offset": offset,
                "limit": limit,
                "endYear": None,
                "startYear": None,
                "modes": None,
                "services": None,
                "sectors": None,
                "private": True,
                "groups": groups or [],
                "data_provider": None,
                "tdc_category": tdc_category,
                "regions": None,
                "countries": None,
                "query": None,
                "sort": "score desc, metadata_modified desc",
                "facetsFields": '["tags","frequency", "organization", "res_format", "metadata_created"]'
            },
            "meta": {
                "values": {
                    "endYear": ["undefined"],
                    "startYear": ["undefined"],
                    "modes": ["undefined"],
                    "services": ["undefined"],
                    "sectors": ["undefined"],
                    "data_provider": ["undefined"],
                    "tdc_category": ["undefined"],
                    "regions": ["undefined"],
                    "countries": ["undefined"],
                    "query": ["undefined"]
                }
            }
        }
    }


def fetch_page(offset, limit):
    """Fetch a single page of results."""
    params = build_search_params(offset=offset, limit=limit)
    encoded = urllib.parse.quote(json.dumps(params))
    url = f"{BASE_URL}?batch=1&input={encoded}"

    req = urllib.request.Request(url, headers={
        'Accept': '*/*',
        'Content-Type': 'application/json',
        'User-Agent': 'Mozilla/5.0',
        'Referer': 'https://portal.transport-data.org/',
    })

    with urllib.request.urlopen(req, timeout=30) as resp:
        return json.loads(resp.read())


def main():
    os.makedirs(OUTPUT_DIR, exist_ok=True)

    all_datasets = []
    offset = 0
    facets = None

    while True:
        data = fetch_page(offset, BATCH_SIZE)
        result = data[0]['result']['data']['json']
        total = result['count']
        datasets = result['datasets']
        all_datasets.extend(datasets)

        if offset == 0:
            facets = result.get('search_facets', {})

        print(f"Fetched {len(datasets)} at offset {offset} (total: {len(all_datasets)}/{total})")

        offset += BATCH_SIZE
        if offset >= total:
            break

    # Save results
    datasets_path = os.path.join(OUTPUT_DIR, 'all-datasets.json')
    with open(datasets_path, 'w') as f:
        json.dump(all_datasets, f, indent=2)
    print(f"\nSaved {len(all_datasets)} datasets to {datasets_path}")

    if facets:
        facets_path = os.path.join(OUTPUT_DIR, 'facets.json')
        with open(facets_path, 'w') as f:
            json.dump(facets, f, indent=2)
        print(f"Saved facets to {facets_path}")


if __name__ == '__main__':
    main()
