# type: ignore
# flake8: noqa
import os
import json
import csv
from mm_stats.definitions import logger, DUMP_PATH, DATA_PATH
from mm_stats.auth import PostgresDb


def load_json(path: str) -> dict:
    """
    Read a json.

    Returns
    -------
    CONFIG : str
        json file content
    """

    with open(path) as file:
        content = json.load(file)
    return content


def save_to_file(var, path: str):
    """
    Dumps a dict or list of dicts as json file.

    Parameters
    ----------
    var : dict or list
        Dictionary or list of dictionaries to dump to file
    path : str
        Name of file to dump it in

    Returns
    -------
    """
    with open(path, "w") as uno:
        json.dump(var, uno)

    logger.info(f"File {path} dumped ")


def read_from_file(path) -> dict:
    """
    Takes the name of a file to open form dump dir.

    Returns
    -------
    file : dict
        Loaded dictionary
    """
    with open(path, "r") as fob:
        file = json.load(fob)

    logger.info(f"File {path} loaded ")
    return file


def append_to_file(path: str, json_obj: dict) -> bool:
    """

    Parameters
    ----------
    path
    json_obj

    Returns
    -------

    """

    with open(path, "a") as outfile:
        json.dump(json_obj, outfile)
        outfile.write(",\n")
    return True


def create_project_overview_csv() -> bool:
    """
    Function to create a csv containing the most important information on hot TM projects

    Returns
    -------
    bool
        Anonymous bool indicates the function finished
    """
    hot_json = read_from_file("projects")

    path_out = os.path.join(DUMP_PATH, "{}.csv".format("projects_sheet"))

    # check if csv already exists, delete if yes
    if os.path.exists(path_out):
        os.remove(path_out)

    fieldnames = [
        "projectId",
        "name",
        "shortDescription",
        "description",
        "campaignTag",
        "organisationTag",
        "changesetComment",
        "created",
        "url",
    ]
    with open(path_out, "w") as fob:
        w = csv.DictWriter(fob, delimiter=",", quotechar='"', fieldnames=fieldnames)

        w.writeheader()

        for project in hot_json:
            row = {}
            for key in project.keys():

                if key not in fieldnames:
                    continue
                else:
                    row[key] = project[key]
                row["url"] = "https://tasks.hotosm.org/api/v1/project/{}".format(
                    project["projectId"]
                )
            w.writerow(row)
    logger.info(f"Overview CSV written")
    return True


def projects_to_geojson(projects_no_tasks_filename: str, centroid: bool) -> str:
    """
    This function takes a csv where every line represents a HOT TM project as JSON object and converts it into a
    consistent geojson, ready to use in any GIS enabled software.

    Parameters
    ----------
    centroid : bool
        Flag if centroids shall replace the geometry of the exact boundary of projects
    projects_no_tasks_filename : str
        Path and filename of projects without tasks

    Returns
    -------
    project_gjson : str
        Path and filename of created geojson output

    """
    # list to store all json objects form txt
    projects = []
    # init dict to dump geojson
    projects_gj = {"type": "FeatureCollection"}
    features = []

    with open(projects_no_tasks_filename, "r") as pfob:
        for line in pfob:
            line_dict = json.loads(line[:-2])
            projects.append(line_dict)

    # loop over projects
    for prj in projects:

        geometry = prj["areaOfInterest"]
        point = {}

        if centroid:
            # calc centroid
            from shapely.geometry import shape

            point["type"] = "Point"
            point["coordinates"] = [
                shape(geometry).centroid.coords[0][0],
                shape(geometry).centroid.coords[0][1],
            ]
            geometry = point

            project_gjson = os.path.join(
                DATA_PATH, "{}.geojson".format("projects_centroid")
            )

            # check if geojson already exists, delete if yes
            if os.path.exists(project_gjson):
                os.remove(project_gjson)
        else:

            project_gjson = os.path.join(DATA_PATH, "{}.geojson".format("projects"))

            # check if geojson already exists, delete if yes
            if os.path.exists(project_gjson):
                os.remove(project_gjson)

        # get all properties
        properties = prj
        # delete key areaOfInterest
        properties.pop("areaOfInterest", None)

        # get encapsulated properties from key projectInfo
        properties["projectInfoLocale"] = prj["projectInfo"]["locale"]
        properties["projectInfoName"] = prj["projectInfo"]["name"]
        properties["projectInfoShortDescription"] = prj["projectInfo"][
            "shortDescription"
        ]
        properties["projectInfoDescription"] = prj["projectInfo"]["description"]
        properties["projectInfoInstructions"] = prj["projectInfo"]["instructions"]
        properties["projectInfoPerTaskInstructions"] = prj["projectInfo"][
            "perTaskInstructions"
        ]
        # delete projectInfo
        properties.pop("projectInfo", None)
        # append project as new feature
        features.append(
            {"type": "Feature", "properties": properties, "geometry": geometry}
        )

        if projects.index(prj) % 500 == 0:
            logger.info(
                f"Amount of projects processed: {projects.index(prj)} / {len(projects)}"
            )

    logger.info("All projects processed, dumping to geojson..")
    projects_gj["features"] = features

    with open(project_gjson, "w") as pfob:
        json.dump(projects_gj, pfob)

    logger.info("Created projects geojson")

    return project_gjson


def projects_to_csv(projects_only_filename: str) -> str:
    """

    Parameters
    ----------
    projects_only_filename

    Returns
    -------

    """
    # load input
    projects = []
    with open(projects_only_filename, "r") as fob_in:
        for line in fob_in:
            line_dict = json.loads(line[:-2])
            # delete geometry
            line_dict.pop("areaOfInterest", None)

            # get nested attributes
            line_dict["projectInfoLocale"] = line_dict["projectInfo"]["locale"]
            line_dict["projectInfoName"] = line_dict["projectInfo"]["name"]
            line_dict["projectInfoShortDescription"] = line_dict["projectInfo"][
                "shortDescription"
            ]
            line_dict["projectInfoDescription"] = line_dict["projectInfo"][
                "description"
            ]
            line_dict["projectInfoInstructions"] = line_dict["projectInfo"][
                "instructions"
            ]
            line_dict["projectInfoPerTaskInstructions"] = line_dict["projectInfo"][
                "perTaskInstructions"
            ]
            line_dict.pop("projectInfo", None)

            projects.append(line_dict)

    # create and open output
    projects_csv = os.path.join(DATA_PATH, "{}.csv".format("projects"))

    if os.path.exists(projects_csv):
        os.remove(projects_csv)

    with open(projects_csv, "w") as fob_out:
        columns = [
            "projectId",
            "projectStatus",
            "projectPriority",
            "aoiBBOX",
            "defaultLocale",
            "mapperLevel",
            "enforceMapperLevel",
            "enforceValidatorRole",
            "enforceRandomTaskSelection",
            "allowNonBeginners",
            "private",
            "entitiesToMap",
            "changesetComment",
            "osmchaFilterId",
            "dueDate",
            "imagery",
            "josmPreset",
            "mappingTypes",
            "campaignTag",
            "organisationTag",
            "licenseId",
            "allowedUsernames",
            "priorityAreas",
            "created",
            "lastUpdated",
            "author",
            "activeMappers",
            "taskCreationMode",
            "mappingEditors",
            "validationEditors",
            "continent",
            "primaryCat",
            "secondaryCat",
            "comments",
            "projectInfoLocale",
            "projectInfoName",
            "projectInfoShortDescription",
            "projectInfoDescription",
            "projectInfoInstructions",
            "projectInfoPerTaskInstructions",
        ]
        wr = csv.DictWriter(fob_out, fieldnames=columns, delimiter="\t", quotechar='"')
        wr.writeheader()
        for prj in projects:
            wr.writerow(prj)

    return projects_csv


def tasks_to_geojson(projects_tasks_filename: str) -> str:
    """

    Parameters
    ----------
    projects_tasks_filename

    Returns
    -------

    """

    # init dict to dump geojson
    tasks_gj = {"type": "FeatureCollection"}
    features = []

    with open(projects_tasks_filename, "r") as fob_in:
        for line in fob_in:
            line_dict = json.loads(line[:-2])
            for feature in line_dict["features"]:
                task = {
                    "type": feature["type"],
                    "geometry": feature["geometry"],
                    "properties": {
                        "projectId": line_dict["projectId"],
                        "taskId": feature["properties"]["taskId"],
                    },
                }
                features.append(task)
    tasks_gj["Features"] = features

    tasks_gjson = os.path.join(DATA_PATH, "{}.geojson".format("tasks"))

    if os.path.exists(tasks_gjson):
        os.remove(tasks_gjson)

    with open(tasks_gjson, "w") as pfob:
        json.dump(tasks_gj, pfob)

    return tasks_gjson


def country_geojson_to_postgis(path):
    """
    Uploads a geojson containing all country bordes to postgres.

    Parameters:
        path: str
            filename of geojson. Needs to be in dump folder.
    """
    db = PostgresDb()

    data_path = os.path.join(DUMP_PATH, path)
    csv_path = os.path.join(DUMP_PATH, "countries.csv")
    data = load_json(data_path)
    all_names = [x["properties"]["NAME"] for x in data["features"]]
    all_geometries = [x["geometry"] for x in data["features"]]
    with open(csv_path, "w") as f:
        writer = csv.writer(f)
        writer.writerows(zip(all_names, all_geometries))

    sql_insert = """
    DROP TABLE IF EXISTS data_preparation.low_res_countries CASCADE;
    CREATE TABLE data_preparation.low_res_countries(
        name varchar,
        geometry text
    );
    """

    db.query(sql_insert)

    copy_sql = """
        COPY data_preparation.low_res_countries FROM stdin WITH CSV HEADER
        DELIMITER as ',';
        ALTER TABLE data_preparation.low_res_countries ADD geom geometry;
        UPDATE data_preparation.low_res_countries SET geom = ST_SetSRID(ST_GeomFromGeoJSON(geometry), 4326);
    """
    with open(csv_path, "r") as fob:
        db.copy_expert(sql=copy_sql, file=fob)
        logger.info("countries imported into pgsql")
