aboutsummaryrefslogtreecommitdiff
path: root/analysis/scripts/rank_common.py
blob: d12dc5bc972c8e58fc72a60ffec5dbdce7fcb2ad (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
# -*- coding: utf-8 -*-

"""Shared code to do with ranks

Author: Gertjan van den Burg
Copyright (c) 2020 - The Alan Turing Institute
License: See the LICENSE file.

"""

import colorama
import json
import numpy as np
import sys
import termcolor

from scipy.stats import rankdata

colorama.init()


def load_data(filename):
    with open(filename, "r") as fp:
        return json.load(fp)


def compute_ranks(results, keep_methods=None, higher_better=True):
    """Compute the ranks

    Parameters
    ----------
    results : dict
        Mapping from dataset name to dict, where each dict in turn is a map 
        from method name to a score value.

    keep_methods: list
        Methods to include in the ranks

    higher_better: bool
        Whether a higher or a lower value is considered better

    Returns
    -------
    avg_ranks : dict
        Map from method name to average rank

    all_ranks: dict
        Map from dataset name to dictionary, which is in turn a map from method 
        name to rank for that dataset and that method.

    """
    vec_ranks = []
    all_ranks = {}

    for dset in results:
        methods = results[dset].keys()
        methods = sorted(methods)

        methods = [m for m in methods if m in keep_methods]
        assert methods == keep_methods

        if higher_better:
            values = [-results[dset][m] for m in methods]
        else:
            values = [results[dset][m] for m in methods]

        if any(np.isnan(v) for v in values):
            print(
                "Skipping dataset %s because of nans" % dset, file=sys.stderr
            )
            continue

        ranks = rankdata(values, method="average")

        vec_ranks.append(ranks)
        rank_dict = {m: ranks[i] for i, m in enumerate(methods)}

        all_ranks[dset] = rank_dict

    avg_ranks = np.mean(vec_ranks, axis=0)
    avg_ranks = {m: r for m, r in zip(methods, avg_ranks)}
    return avg_ranks, all_ranks


def warning(msg):
    termcolor.cprint(msg, "yellow", file=sys.stderr)


def preprocess_data(data, _type):
    methods = set([m for dset in data.keys() for m in data[dset].keys()])
    methods = sorted(methods)

    # filter out rbocpdms on "best" (uni or multi)
    if _type == "best":
        warning(
            "\nWarning: Filtering out RBOCPDMS due to insufficient results.\n"
        )
        methods = [m for m in methods if not m == "rbocpdms"]

    # filter out methods that have no results on any dataset
    methods_no_result = set()
    for m in methods:
        if all(data[d][m] is None for d in data):
            methods_no_result.add(m)
    if methods_no_result:
        print(
            "\nWarning: Filtering out %r due to no results on any series\n"
            % methods_no_result,
            file=sys.stderr,
        )
        methods = [m for m in methods if not m in methods_no_result]

    data_w_methods = {}
    for dset in data:
        data_w_methods[dset] = {}
        for method in methods:
            data_w_methods[dset][method] = data[dset][method]

    data_no_missing = {}
    for dset in data_w_methods:
        if any((x is None for x in data_w_methods[dset].values())):
            continue
        data_no_missing[dset] = data_w_methods[dset]
    return data_no_missing, methods