Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Introduced QID search in translation. #545

Merged
merged 7 commits into from
Jan 11, 2025
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/scribe_data/cli/get.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ def prompt_user_download_all():
parse_wd_lexeme_dump(
language=language,
wikidata_dump_type=["form"],
data_types=data_types,
data_types="all",
type_output_dir=output_dir,
)
else:
Expand Down
35 changes: 1 addition & 34 deletions src/scribe_data/cli/total.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
from typing import List, Union
from urllib.error import HTTPError

import requests
from SPARQLWrapper import JSON

from scribe_data.utils import (
Expand All @@ -34,6 +33,7 @@
language_metadata,
language_to_qid,
list_all_languages,
check_qid_is_language,
)
from scribe_data.wikidata.wikidata_utils import parse_wd_lexeme_dump, sparql

Expand Down Expand Up @@ -124,39 +124,6 @@ def get_datatype_list(language):
return data_type_metadata


def check_qid_is_language(qid: str):
Copy link
Collaborator Author

@axif0 axif0 Jan 8, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Had to move the check_qid_is_language method in utils.py for ImportError.

image

"""
Parameters
----------
qid : str
The QID to check Wikidata to see if it's a language and return its English label.

Outputs
-------
str
The English label of the Wikidata language entity.

Raises
------
ValueError
An invalid QID that's not a language has been passed.
"""
api_endpoint = "https://www.wikidata.org/w/rest.php/wikibase/v0"
request_string = f"{api_endpoint}/entities/items/{qid}"

request = requests.get(request_string, timeout=5)
request_result = request.json()

if request_result["statements"]["P31"]:
instance_of_values = request_result["statements"]["P31"]
for val in instance_of_values:
if val["value"]["content"] == "Q34770":
print(f"{request_result['labels']['en']} ({qid}) is a language.\n")
return request_result["labels"]["en"]

raise ValueError("The passed Wikidata QID is not a language.")


# MARK: Print


Expand Down
68 changes: 68 additions & 0 deletions src/scribe_data/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@

import ast
import contextlib
import requests
import json
import os
import re
Expand Down Expand Up @@ -736,3 +737,70 @@ def check_index_exists(index_path: Path, overwrite_all: bool = False) -> bool:
return choice == "Skip process"

return False


def check_qid_is_language(qid: str):
"""
Parameters
----------
qid : str
The QID to check Wikidata to see if it's a language and return its English label.

Outputs
-------
str
The English label of the Wikidata language entity.

Raises
------
ValueError
An invalid QID that's not a language has been passed.
"""
api_endpoint = "https://www.wikidata.org/w/rest.php/wikibase/v0"
request_string = f"{api_endpoint}/entities/items/{qid}"

request = requests.get(request_string, timeout=5)
request_result = request.json()

if request_result["statements"]["P31"]:
instance_of_values = request_result["statements"]["P31"]
for val in instance_of_values:
if val["value"]["content"] == "Q34770":
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🤔 Coming across this..

was actually wondering if we do have it documented somewhere what P31 or Q34770 refer to in Wikidata (and also all other important IDs that we reference in code too).

If not, should we? Was thinking that a quick markdown table could suffice really.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Or actually - scratch that

It might be more helpful perhaps if that information is closer to where it's used in code.
So in-source comments instead?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does another metadata file make sense for this? That way we reference an object and get the QID from a human readable object key?

Copy link
Member

@wkyoshida wkyoshida Jan 11, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does another metadata file make sense for this?

hmm could be 🤔

Were you thinking of something like this?

        # Instead of...
        # instance_of_values = request_result["statements"]["P31"]
        instance_of_property = wikidata["property"]["instance-of"]
        instance_of_values = request_result["statements"][instance_of_property]
        for val in instance_of_values:
            # Instead of...
            # if val["value"]["content"] == "Q34770":
            if val["value"]["content"] == wikidata["entity"]["language"]:

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Along the lines of this, yes :)

print(f"{request_result['labels']['en']} ({qid}) is a language.\n")
return request_result["labels"]["en"]

raise ValueError("The passed Wikidata QID is not a language.")


def get_language_iso_code(qid: str):
"""
Parameters
----------
qid : str
Get the ISO code of a language given its Wikidata QID.

Outputs
-------
str
The ISO code of the language.

Raises
------
ValueError
An invalid QID that's not a language has been passed.
KeyError
The ISO code for the language is not available.
"""

api_endpoint = f"https://www.wikidata.org/w/api.php?action=wbgetentities&ids={qid}&props=claims&format=json"
response = requests.get(api_endpoint)
data = response.json()
try:
return data["entities"][qid]["claims"]["P305"][0]["mainsnak"]["datavalue"][
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

... was actually wondering if we do have it documented somewhere what P31 or Q34770 refer to in Wikidata (and also all other important IDs that we reference in code too) ...

ditto

"value"
]

except ValueError:
raise ValueError("The passed Wikidata QID is not a language.")
except KeyError:
return KeyError("The ISO code for the language is not available.")
139 changes: 25 additions & 114 deletions src/scribe_data/wiktionary/parse_dump.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@
check_index_exists,
data_type_metadata,
language_metadata,
get_language_iso_code,
check_qid_is_language,
)
from tqdm import tqdm

Expand Down Expand Up @@ -81,7 +83,6 @@ def __init__(

# Build map from ISO to full language name.
self.iso_to_name = self._build_iso_mapping()

# For "total" usage.
self.lexical_category_counts = defaultdict(Counter)
self.translation_counts = defaultdict(Counter)
Expand All @@ -101,120 +102,19 @@ def _build_iso_mapping(self) -> dict:
if iso_code := data.get("iso"):
iso_mapping[iso_code] = lang_name

return iso_mapping

# MARK: process total
def _process_lexeme_total(self, lexeme: dict) -> None:
"""
Gather stats if 'total' is in parse_type: how many entries per language & category,
how many translations, etc.
"""
lexicalCategory = lexeme.get("lexicalCategory")
if not lexicalCategory or lexicalCategory not in data_type_metadata.values():
return

category_name = self._category_lookup.get(lexicalCategory)
if not category_name:
return

# Update counters.
lemmas = lexeme.get("lemmas", {})
for lemma in lemmas.values():
lang = lemma.get("language")

if lang in self.iso_to_name:
self.lexical_category_counts[lang][category_name] += 1
translation_count = sum(
len(sense.get("glosses", {})) for sense in lexeme.get("senses", [])
)
self.translation_counts[lang][category_name] += translation_count

break

# MARK: process translations
def _process_lexeme_translations(self, lexeme: dict) -> None:
"""
Process gloss-based translations if 'translations' is in parse_type.
Store them in self.translations_index.
"""
lemmas = lexeme.get("lemmas", {})
qid = lexeme.get("lexicalCategory")

if not (lemmas and qid):
return

category_name = self._category_lookup.get(qid)
if not category_name:
return

# Only store first valid lemma for translations.
for lang_code, lemma_data in lemmas.items():
if lang_code not in self.iso_to_name:
continue

word = lemma_data.get("value", "").lower()
if not word:
continue

# Build translations from sense glosses.
translations = {}
for sense in lexeme.get("senses", []):
for sense_lang_code, gloss in sense.get("glosses", {}).items():
if sense_lang_code in self.iso_to_name:
translations[sense_lang_code] = gloss["value"]
for language in self.target_iso:
if (
language.startswith("Q")
or language.startswith("q")
andrewtavis marked this conversation as resolved.
Show resolved Hide resolved
and language[1:].isdigit()
):
qid_to_lang = check_qid_is_language(language)
if qid_to_lang:
iso_code = get_language_iso_code(language.upper())
iso_mapping[iso_code] = qid_to_lang
print(f"ISO code for {language} is {iso_code}")

if translations:
self.translations_index[word][lang_code][category_name] = translations

break # only handle the first lemma

# MARK: process forms
def _process_lexeme_forms(self, lexeme: dict) -> None:
"""
Process forms for categories in self.data_types if 'form' is in parse_type.
Store them in self.forms_index.
"""
lemmas = lexeme.get("lemmas", {})
lexical_category = lexeme.get("lexicalCategory")

# Skip if category missing or not recognized.
if not lexical_category or lexical_category not in data_type_metadata.values():
return

# Convert Q1084 -> "nouns", etc.
category_name = self._category_lookup.get(lexical_category)
if not category_name:
return

# If the category_name is NOT in our data_types list, skip
# e.g., category_name = "nouns", but user didn't request "nouns" in data_types.
if category_name not in self.data_types:
return

# Process forms.
for lang_code, lemma_data in lemmas.items():
if lang_code not in self.iso_to_name:
continue

word = lemma_data.get("value", "").lower()
if not word:
continue

forms_data = defaultdict(list)
for form in lexeme.get("forms", []):
representations = form.get("representations", {})
grammatical_features = form.get("grammaticalFeatures", [])

for rep_lang, rep_data in representations.items():
if rep_lang == lang_code:
if form_value := rep_data.get("value"):
forms_data[form_value].extend(grammatical_features)

if forms_data:
self.forms_index[word][lang_code][category_name] = dict(forms_data)
self.forms_counts[lang_code][category_name] += len(forms_data)

break # only first valid lemma
return iso_mapping

# MARK: process lines
def process_lines(self, line: str) -> None:
Expand Down Expand Up @@ -385,6 +285,12 @@ def export_translations_json(self, filepath: str, language_iso: str = None) -> N
for word, lang_data in self.translations_index.items()
if language_iso in lang_data
}

# Check if filtered data is empty before saving.
if not filtered:
print(f"No translations found for {language_iso}, skipping export...")
return

self._save_by_language(filtered, filepath, language_iso, "translations")

# MARK: export forms
Expand Down Expand Up @@ -418,6 +324,11 @@ def export_forms_json(
else:
filtered[word] = {language_iso: lang_data[language_iso]}

# Check if filtered data is empty before saving.
if not filtered:
print(f"No forms found for {language_iso}, skipping export...")
return

self._save_by_language(
filtered, filepath, language_iso, data_type or "forms"
)
Expand Down
2 changes: 1 addition & 1 deletion tests/cli/test_get.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def test_get_all_data_types_for_language_user_says_yes(
mock_parse.assert_called_once_with(
language="English",
wikidata_dump_type=["form"],
data_types=None, # because data_types = [data_type] if provided else None
data_types="all", # because if only language given, data_types is None
type_output_dir="scribe_data_json_export", # default for JSON
)
mock_query_data.assert_not_called()
Expand Down
6 changes: 3 additions & 3 deletions tests/cli/test_total.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,12 @@
from unittest.mock import MagicMock, call, patch

from scribe_data.cli.total import (
check_qid_is_language,
get_datatype_list,
get_qid_by_input,
get_total_lexemes,
total_wrapper,
)
from scribe_data.utils import check_qid_is_language


class TestTotalLexemes(unittest.TestCase):
Expand Down Expand Up @@ -213,7 +213,7 @@ def test_get_datatype_list_no_data_types(self, mock_dir):


class TestCheckQidIsLanguage(unittest.TestCase):
@patch("scribe_data.cli.total.requests.get")
@patch("scribe_data.utils.requests.get")
def test_check_qid_is_language_valid(self, mock_get):
mock_response = MagicMock()
mock_response.json.return_value = {
Expand All @@ -228,7 +228,7 @@ def test_check_qid_is_language_valid(self, mock_get):
self.assertEqual(result, "English")
mock_print.assert_called_once_with("English (Q1860) is a language.\n")

@patch("scribe_data.cli.total.requests.get")
@patch("scribe_data.utils.requests.get")
def test_check_qid_is_language_invalid(self, mock_get):
mock_response = MagicMock()
mock_response.json.return_value = {
Expand Down
Loading