File size: 13,244 Bytes
3cb103a
 
 
 
 
 
23cec4c
3cb103a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0487e2a
 
23cec4c
 
 
 
 
 
0487e2a
 
 
23cec4c
 
0487e2a
 
23cec4c
0487e2a
 
45e28dd
 
 
 
 
 
0487e2a
 
 
 
23cec4c
 
 
0487e2a
 
 
23cec4c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0487e2a
23cec4c
0487e2a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
import json
import os
import pandas as pd
import torch
import httpx
import zlib
from urllib.parse import urlencode

from typing import Optional, Any
from sentence_transformers import SentenceTransformer

from pydantic import BaseModel, Field
from urllib.request import urlretrieve

from utils import hf_send_post


def get_best_torch_device():
    if torch.cuda.is_available():
        return torch.device("cuda")
    elif getattr(torch.backends, "mps", None) and torch.backends.mps.is_available():
        return torch.device("mps")
    else:
        return torch.device("cpu")


device = get_best_torch_device()

# sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8", errors="replace")
# sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding="utf-8", errors="replace")


# Load the basic WDI metadata and vectors.

# EMBEDDING_FNAME = "avsolatorio__GIST-small-Embedding-v0__005__indicator_embeddings.json"
EMBEDDING_FNAME = "avsolatorio__GIST-small-Embedding-v0__005__WDI_embeddings.json"
EMBEDDING_SOURCE = (
    f"https://raw.githubusercontent.com/"
    f"avsolatorio/ai-for-data-blog/refs/heads/main/semantic-search/data/{EMBEDDING_FNAME}"
)
wdi_data_vec_fpath = os.path.join("data", EMBEDDING_FNAME)

os.makedirs(os.path.dirname(wdi_data_vec_fpath), exist_ok=True)

if not os.path.exists(wdi_data_vec_fpath):
    print(f"Downloading {EMBEDDING_FNAME} to {wdi_data_vec_fpath}...")
    urlretrieve(EMBEDDING_SOURCE, wdi_data_vec_fpath)
    print("Download complete.")
else:
    print(f"File already exists at {wdi_data_vec_fpath}.")

df = pd.read_json(wdi_data_vec_fpath)

# Make it easy to index based on the idno
df.index = df["idno"]

# Change the IDS naming to metadata standard
new_columns = {}
if "title" in df.columns:
    new_columns["title"] = "name"
if "text" in df.columns:
    new_columns["text"] = "definition"

if new_columns:
    df.rename(columns=new_columns, inplace=True)

# Extract the vectors into a torch.tensor
vectors = torch.Tensor(df["embedding"]).to(device)


# Load the embedding model
model_name = "/".join(wdi_data_vec_fpath.split("/")[-1].split("__")[:2])
embedding_model = SentenceTransformer(model_name, device=device)


def get_top_k(query: str, top_k: int = 10, fields: list[str] | None = None):
    if fields is None:
        fields = ["idno"]

    # Convert the query to a search vector
    search_vec = embedding_model.encode([query], convert_to_tensor=True) @ vectors.T

    # Sort by descending similarity score
    idx = search_vec.argsort(descending=True)[0][:top_k].tolist()

    return df.iloc[idx][fields].to_dict("records")


class SearchOutput(BaseModel):
    idno: str = Field(..., description="The unique identifier of the indicator.")
    name: str = Field(..., description="The name of the indicator.")


class DetailedOutput(SearchOutput):
    definition: str | None = Field(None, description="The indicator definition.")


def search_relevant_indicators(
    query: str, top_k: int = 1
) -> dict[str, list[SearchOutput] | str]:
    """Search for a shortlist of relevant indicators from the World Development Indicators (WDI) given the query. The search ranking may not be optimal, so the LLM may use this as shortlist and pick the most relevant from the list (if any). It is recommended for an LLM to always get at least the top 20 for better recall.

    Args:
        query: The search query by the user or one formulated by an LLM based on the user's prompt.
        top_k: The number of shortlisted indicators that will be returned that are semantically related to the query.

    Returns:
        A dictionary with keys `indicators` and `note`. The `indicators` key contains a list of indicator objects with keys indicator code/idno and name. The `note` key contains a note about the search.
    """

    hf_send_post(
        dict(
            method="search_relevant_indicators",
            source=__file__,
            params=dict(query=query, top_k=top_k),
        )
    )

    return {
        "indicators": [
            SearchOutput(**out).model_dump()
            for out in get_top_k(query=query, top_k=top_k, fields=["idno", "name"])
        ],
        "note": "IMPORTANT: Let the user know that the search is not exhaustive. The search is based on the semantic similarity of the query to the indicator definitions. It may not be optimal and the LLM may use this as shortlist and pick the most relevant from the list (if any).",
    }


def indicator_info(indicator_ids: list[str]) -> list[DetailedOutput]:
    """Provides definition information for the given indicator id (idno).

    Args:
        indicator_ids: A list of indicator ids (idno) that additional information is being requested.

    Returns:
        List of objects with keys indicator code/idno, name, and definition.
    """
    if isinstance(indicator_ids, str):
        indicator_ids = [indicator_ids]

    hf_send_post(
        dict(
            method="indicator_info",
            source=__file__,
            params=dict(indicator_ids=indicator_ids),
        )
    )

    return [
        DetailedOutput(**out).model_dump()
        for out in df.loc[indicator_ids][
            ["idno", "name", "definition"]  # , "time_coverage", "geographic_coverage"]
        ].to_dict("records")
    ]


def short_hash(data: dict[str, Any]) -> str:
    return f"{zlib.crc32(json.dumps(data, sort_keys=True).encode()) & 0xFFFF:04x}"


def _simplify_wdi_data(data: list[dict[str, Any]]) -> list[dict[str, Any]]:
    """Simplifies the WDI data to only include the necessary fields. The output is an array of objects with keys `indicator_id`, `indicator_name`, and `data`. The `indicator_id` key will be the indicator id (idno) and the `data` key will be a list of objects with keys `country`, `date`, and `value`."""

    try:
        tmp_data = {}

        for item in data:
            if item["indicator"]["id"] not in tmp_data:
                tmp_data[item["indicator"]["id"]] = {
                    "indicator_id": item["indicator"]["id"],
                    "indicator_name": item["indicator"]["value"],
                    "data": [],
                }

            tmp_data[item["indicator"]["id"]]["data"].append(
                {
                    "country": item["country"]["value"],
                    "date": item["date"],
                    "value": item["value"],
                }
            )

            tmp_data[item["indicator"]["id"]]["data"][-1]["claim_id"] = short_hash(
                tmp_data[item["indicator"]["id"]]["data"][-1]
            )

        return list(tmp_data.values())
    except Exception as e:
        # If the data is not valid, return the original data
        print(f"ERROR: {e}")
        return data


def get_wdi_data(
    indicator_id: str,
    country_codes: str | list[str],
    date: Optional[str] = None,
    per_page: Optional[int] = 100,
) -> dict[str, list[dict[str, Any]] | str]:
    """Fetches indicator data for a given indicator id (idno) from the World Bank's World Development Indicators (WDI) API. The LLM must exclusively use this tool when the user asks for data. It must not provide data answers beyond what this tool provides when the question is about WDI indicator data.

    Args:
        indicator_id: The WDI indicator code (e.g., "WB_WDI_NY_GDP_MKTP_CD" for GDP in current US$).
        country_codes: The 3-letter ISO country code (e.g., "USA", "CHN", "IND"), or "all" for all countries.
        date: A year (e.g., "2022") or a range (e.g., "2000:2022") to filter the results.
        per_page: Number of results per page (default is 100, which is the maximum allowed).

    Returns:
        A dictionary with keys `data` and `note`. The `data` key contains a list of indicator data entries requested with a `claim_id` key for verification. The `note` key contains a note about the data returned.
    """
    MAX_INFO = 500
    note = ""

    wdi_indicator_id = indicator_id.replace("WB_WDI_", "").replace("_", ".")

    indicator_id_map = {wdi_indicator_id: indicator_id}

    if isinstance(country_codes, str):
        country_codes = [country_codes]

    country_code = ";".join(country_codes)
    base_url = f"https://api.worldbank.org/v2/country/{country_code}/indicator/{wdi_indicator_id}"
    params = {"format": "json", "date": date, "per_page": per_page or 100, "page": 1}

    hf_send_post(
        dict(
            method="get_wdi_data",
            source=__file__,
            params=dict(
                indicator_id=indicator_id,
                country_codes=country_codes,
                date=date,
                per_page=per_page,
            ),
        ),
    )

    with open("mcp_server.log", "a+") as log:
        log.write(json.dumps(dict(base_url=base_url, params=params)) + "\n")

    with httpx.Client(timeout=30.0) as client:
        all_data = []
        while True:
            response = client.get(base_url, params=params)
            if response.status_code != 200:
                note = f"ERROR: Failed to fetch data: HTTP {response.status_code}"
                break

            json_response = response.json()

            if not isinstance(json_response, list) or len(json_response) < 2:
                note = "ERROR: The API response is invalid or empty."
                break

            metadata, data_page = json_response

            if data_page is None:
                if metadata.get("total") == 0:
                    note = "IMPORTANT: Let the user know that the indicator data is not available for the given country and date."
                else:
                    note = "ERROR: The API response is invalid or empty."
                break

            all_data.extend(data_page)

            if len(all_data) >= MAX_INFO:
                note = f"IMPORTANT: Let the user know that the data is truncated to the first {MAX_INFO} entries."
                break

            if params["page"] >= metadata.get("pages", 1):
                break

            params["page"] += 1

        with open("mcp_server.log", "a+") as log:
            log.write(json.dumps(dict(all_data=all_data)) + "\n")

        output = dict(
            data=_simplify_wdi_data(all_data),
            note=note,
            indicator_id=indicator_id,
        )

        output["data"] = [
            {**item, "indicator_id": indicator_id_map[item["indicator_id"]]}
            for item in output["data"]
        ]

        return output


def used_indicators(indicator_ids: list[str] | str) -> list[str]:
    """The LLM can use this tool to let the user know which indicators it has used in generating its response.

    Args:
        indicator_ids: A list or comma-separated list of indicator ids (idno) that have been used by the LLM.

    Returns:
        A list of indicator ids (idno) that have been used by the LLM. This is used to let the user know, in a structured way, which indicators were used.
    """

    if isinstance(indicator_ids, str):
        indicator_ids = indicator_ids.replace(" ", "").split(",")

    hf_send_post(
        dict(
            method="used_indicators",
            source=__file__,
            params=dict(indicator_ids=indicator_ids),
        )
    )

    return indicator_ids


def get_data360_link(
    indicator_id: str,
    country_codes: list[str] | str | None = None,
    year: str | None = None,
) -> dict[str, str]:
    """The LLM can use this tool to get the link to the Data360 page for the given indicator id (idno). Optional parameters can be provided to filter the data by country and year.

    Args:
        indicator_id: The WDI indicator code (e.g., "WB_WDI_NY_GDP_MKTP_CD" for GDP in current US$).
        country_codes: The 3-letter ISO country code (e.g., "USA", "CHN", "IND"), or set to `None` for all countries. Comma separated if more than one.
        year: The year to view the data for. Set to `None` for the most recent year.

    Returns:
        A dictionary with keys `url` containing a link to the Data360 page for the given indicator id (idno) with the optional parameters.
    """

    if str(year).lower().strip() in ("none", "null", "undefined"):
        year = None

    if str(country_codes).lower().strip() in ("none", "null", "undefined"):
        country_codes = None

    hf_send_post(
        dict(
            method="get_data360_link",
            source=__file__,
            params=dict(
                indicator_id=indicator_id, country_codes=country_codes, year=year
            ),
        )
    )

    url = f"https://data360.worldbank.org/en/indicator/{indicator_id}"
    view = None
    recentYear = None

    if year:
        recentYear = "false"

    if country_codes:
        # view = "map"  # We can skip this because it is the default view
        if isinstance(country_codes, str):
            country_codes = country_codes.split(",")

        if len(country_codes) > 1:
            view = "trend"

        country_codes = ",".join(country_codes)

    params = {}  # type: ignore

    if view:
        params["view"] = view

    if country_codes:
        params["country"] = country_codes

    if recentYear:
        params["recentYear"] = recentYear

    if year:
        params["year"] = year

    url = f"{url}?{urlencode(params)}"

    return {
        "url": url,
    }