|
|
|
|
|
|
|
|
|
|
|
|
|
|
''' Script to segment md files in en-usamo, en-tstst, en-tst, en-jmo folder using regex. |
|
|
To run: |
|
|
`python segment_usamo.py` |
|
|
''' |
|
|
|
|
|
import json |
|
|
from pathlib import Path |
|
|
import warnings |
|
|
warnings.filterwarnings("ignore", category=DeprecationWarning) |
|
|
|
|
|
import os |
|
|
import re |
|
|
import pandas as pd |
|
|
from rapidfuzz import fuzz |
|
|
|
|
|
|
|
|
|
|
|
section_re = re.compile(r"^#{1,2}\s(?:Contents|Problem|§[\d.]+.*)") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
solution_label_re = re.compile( |
|
|
r"^#{1,2}\s§[\d.]+\s[A-Za-z0-9 ]+\s\d{4}/(\d+)(?:,\s.*)?$" |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
problem_re = re.compile(r"^(\d+)\s?\.\s(.*(?:\n\s+.*)*)") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
solution_re = re.compile(r"^#{0,2}\s?Problem statement\b.*$") |
|
|
|
|
|
|
|
|
pattern_debug = re.compile( |
|
|
r"^[【『\\]*.*?\b(First|Second|Third|Fourth|Fifth|Sixth|Seventh|Eighth|Ninth|Tenth|Complex|Inversion|Synthetic|One|Another|Solution)\b.*\b(solution|approach|proof)\b.*", |
|
|
re.IGNORECASE |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
solution_split_re1 = re.compile(r"\bSolution\s[1-9]\b") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
solution_split_re2 = re.compile(r"\b(First|Second|Third|Fourth|Fifth|Sixth|Seventh|Eighth|Ninth|Synthetic)\b\s+(solution|approach|proof)\b") |
|
|
|
|
|
|
|
|
|
|
|
DEBUG = False |
|
|
special_cases = [ |
|
|
"【 First short solution, by Jeffrey Kwan. Let $p_{0", |
|
|
"II Second longer solution using an invariant. Visu", |
|
|
"【 Complex solution (Evan Chen). Toss on the comple", |
|
|
"Second (longer) solution. If one does not notice t", |
|
|
"『 Second calculation approach (along the lines of ", |
|
|
"T Outline of second approach (by convexity, due t", |
|
|
"I Inversion solution submitted by Ankan Bhattacha", |
|
|
"【 Complex numbers approach with Apollonian circles", |
|
|
" A second solution. Both lemmas above admit varia", |
|
|
"【 A third remixed solution. We use Lemma I and Lem", |
|
|
"【I A fourth remixed solution. We also can combine ", |
|
|
"I First grid-based solution. The following solutio", |
|
|
"Another short solution. Let $Z$ be on line $B D E$", |
|
|
"【 Most common synthetic approach. The solution hin", |
|
|
"\\ First \"local\" solution by swapping two points. L", |
|
|
"Second general solution by angle chasing. By Rei", |
|
|
"Third general solution by Pascal. Extend rays $A", |
|
|
"【 Second length solution by tangent lengths. By $t", |
|
|
"【 Angle chasing solution. Note that $(B D A)$ and", |
|
|
"【 Harmonic solution (mine). Let $T$ be the point o", |
|
|
"【 Pascal solution (Zuming Feng). Extend ray $F D$", |
|
|
"『 A spiral similarity approach (Hans $\mathbf{Y u}", |
|
|
"ब The author's original solution. Complete isoscel", |
|
|
"l Evan's permutation-based solution. Retain the n", |
|
|
"I Original proposer's solution. To this end, let's", |
|
|
"【 Cartesian coordinates approach with power of a p", |
|
|
"【 Cartesian coordinates approach without power of", |
|
|
"I III-advised barycentric approach (outline). Use", |
|
|
"【 Approach using difference of squares (from autho", |
|
|
"【 Divisibility approach (Aharshi Roy). Since $p q-", |
|
|
"Solution with Danielle Wang: the answer is that $|", |
|
|
"【 Homothety solution (Alex Whatley). Let $G, N, O$", |
|
|
"【 Power of a point solution (Zuming Feng, official", |
|
|
"【 Solution by Luke Robitaille. Let $Q$ be the seco", |
|
|
"ๆ Solution with coaxial circles (Pitchayut Saengru", |
|
|
"【 Solution to generalization (Nikolai Beluhov). We", |
|
|
"【 Approach by deleting teams (Gopal Goel). Initial", |
|
|
"【 Approach by adding colors. For a constructive al", |
|
|
"【 Solution using spiral similarity. We will ignore", |
|
|
"『 Barycentric solution (by Carl, Krit, Milan). We", |
|
|
"I A Menelaus-based approach (Kevin Ren). Let $P$ b", |
|
|
"【 Barycentric solution. First, we find the coordin", |
|
|
"【 Angle chasing solution (Mason Fang). Obviously $", |
|
|
"【 Inversive solution (Kelin Zhu). Invert about $A$", |
|
|
"l The one-liner. ", |
|
|
" The external power solution. We distinguish betw", |
|
|
"Cauchy-Schwarz approach. Apply Titu lemma to get", |
|
|
"đ Cauchy-Schwarz approach. The main magical claim ", |
|
|
"『 Alternate solution (by proposer). Let $L$ be dia" |
|
|
] |
|
|
|
|
|
|
|
|
def add_content(current_dict): |
|
|
if not current_dict["lines"] or not current_dict["label"] : |
|
|
return |
|
|
text_str = " ".join(current_dict["lines"]).strip() |
|
|
entry = {"label": current_dict["label"]} |
|
|
if current_dict["class"] == "problem": |
|
|
entry["problem"] = text_str |
|
|
current_dict["problems"].append(entry) |
|
|
elif current_dict["class"] == "solution": |
|
|
entry["solution"] = text_str |
|
|
entry["solution_lines"] = current_dict["lines"] |
|
|
current_dict["solutions"].append(entry) |
|
|
|
|
|
|
|
|
def parse(file): |
|
|
content = file.read_text(encoding="utf-8") |
|
|
current = { |
|
|
"label": None, |
|
|
"class": None, |
|
|
"lines": [], |
|
|
"problems": [], |
|
|
"solutions": [] |
|
|
} |
|
|
for line in content.splitlines(): |
|
|
if match := section_re.match(line): |
|
|
add_content(current) |
|
|
if "problems" in line.lower(): |
|
|
current["class"] = "problem" |
|
|
elif sub_match:= solution_label_re.match(line): |
|
|
current["class"] = "other" |
|
|
current["label"] = sub_match.group(1) |
|
|
elif match := solution_re.match(line): |
|
|
current["class"] = "solution" |
|
|
else: |
|
|
current["class"] = "other" |
|
|
current["lines"] = [] |
|
|
elif match := problem_re.match(line): |
|
|
if current["class"] == "solution": |
|
|
current["lines"].append(line) |
|
|
else: |
|
|
add_content(current) |
|
|
label, text = match.groups() |
|
|
current["label"] = label |
|
|
current["lines"] = [text] |
|
|
else: |
|
|
if current["class"]=="solution" or current["class"]=="problem": |
|
|
current["lines"].append(line) |
|
|
add_content(current) |
|
|
problems_df = pd.DataFrame(current["problems"]) |
|
|
solutions_df = pd.DataFrame(current["solutions"]) |
|
|
return problems_df, solutions_df |
|
|
|
|
|
|
|
|
def parse_solution(lines): |
|
|
"""parses lines of a solution, finds multiple solutions and splits them""" |
|
|
solutions = [] |
|
|
current = [] |
|
|
for line in lines: |
|
|
if match := solution_split_re1.search(line): |
|
|
solutions.append(" ".join(current).strip()) |
|
|
current = [line] |
|
|
elif match := solution_split_re2.search(line): |
|
|
solutions.append(" ".join(current).strip()) |
|
|
current = [line] |
|
|
elif any(case.lower() in line.lower() for case in special_cases): |
|
|
solutions.append(" ".join(current).strip()) |
|
|
current = [line] |
|
|
elif any(case.lower() in line[:50].lower() for case in ["solution", "approach", "proof"]): |
|
|
if DEBUG: |
|
|
if not any(case.lower() in line[:50].lower() for case in ["remark", "proof.", "proof", "approaches", "solutions"]): |
|
|
print(line[:50]) |
|
|
else: |
|
|
current.append(line) |
|
|
solutions.append(" ".join(current).strip()) |
|
|
return solutions |
|
|
|
|
|
def find_mult_solutions(solutions_df): |
|
|
"""apply parse_solution to all df""" |
|
|
solutions_df["solution"] = solutions_df["solution_lines"].apply(lambda v: parse_solution(v)) |
|
|
solutions_df = solutions_df.drop(columns=["solution_lines"]) |
|
|
solutions_df = solutions_df.explode('solution', ignore_index=True) |
|
|
return solutions_df |
|
|
|
|
|
|
|
|
def join(problems_df, solutions_df): |
|
|
pairs_df = problems_df.merge(solutions_df, on=["label"], how="outer") |
|
|
return pairs_df |
|
|
|
|
|
|
|
|
def clean(pairs_df): |
|
|
'''removes the problem statement from the solution in an approximate way''' |
|
|
def find_closest_char(s, i, char): |
|
|
left = s.rfind(char, 0, i) |
|
|
right = s.find(char, i) |
|
|
if left == -1 and right == -1: |
|
|
return None |
|
|
elif left == -1: |
|
|
return right |
|
|
elif right == -1: |
|
|
return left |
|
|
else: |
|
|
return left if abs(i - left) <= abs(i - right) else right |
|
|
def remove_approx_match(row, threshold=90): |
|
|
problem = row["problem"] |
|
|
solution = row["solution"] |
|
|
similarity = fuzz.partial_ratio(problem, solution) |
|
|
if similarity >= threshold: |
|
|
i = find_closest_char(solution, len(problem), problem[-1]) |
|
|
if i is not None: |
|
|
solution = solution[i+1:] |
|
|
return solution |
|
|
pairs_df["solution"] = pairs_df.apply(remove_approx_match, axis=1) |
|
|
return pairs_df |
|
|
|
|
|
|
|
|
def process_mult_solutions(pairs_df): |
|
|
'''in case of multiple solutions, prepend common text to all solutions''' |
|
|
def prepend_to_solution(group): |
|
|
if len(group) == 1: |
|
|
return group |
|
|
first_row = group.iloc[0] |
|
|
comment = f"{first_row['solution']}" |
|
|
group = group.iloc[1:].copy() |
|
|
group["solution"] = group["solution"].apply(lambda x: f"{comment} {x}") |
|
|
return group |
|
|
pairs_df = pairs_df.groupby("label", group_keys=False).apply(prepend_to_solution).reset_index(drop=True) |
|
|
return pairs_df |
|
|
|
|
|
|
|
|
def add_metadata(pairs_df, year, tier, resource_path): |
|
|
pairs_df['year'] = year |
|
|
pairs_df['tier'] = tier |
|
|
pairs_df['exam'] = 'USAMO' |
|
|
pairs_df['metadata'] = [{"resource_path": resource_path}] * len(pairs_df) |
|
|
pairs_df['problem_type'] = None |
|
|
pairs_df.rename(columns={'label': 'problem_label'}, inplace=True) |
|
|
return pairs_df[['year', 'tier', 'problem_label', 'problem_type', 'exam', 'problem', 'solution', 'metadata']] |
|
|
|
|
|
|
|
|
def write_pairs(file_path, pairs_df): |
|
|
pairs_df = pairs_df.replace({pd.NA: None, pd.NaT: None, float("nan"): None}) |
|
|
pairs_dict = pairs_df.to_dict(orient="records") |
|
|
output_text = "" |
|
|
for pair in pairs_dict: |
|
|
output_text += json.dumps(pair, ensure_ascii=False) + "\n" |
|
|
file_path.write_text(output_text, encoding="utf-8") |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
project_root = Path(__file__).parent.parent.parent |
|
|
|
|
|
problem_count = 0 |
|
|
solution_count = 0 |
|
|
|
|
|
tier = "T1" |
|
|
compet_base_path = Path(__file__).resolve().parent.parent |
|
|
compet_md_path = compet_base_path / "md" |
|
|
seg_output_path = compet_base_path / "segmented" |
|
|
|
|
|
for md_file in compet_md_path.glob("**/*.md"): |
|
|
year = re.search(r"\d{4}", md_file.name).group() |
|
|
output_file = seg_output_path / md_file.relative_to(compet_md_path).with_suffix(".jsonl") |
|
|
output_file.parent.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
problems, solutions = parse(md_file) |
|
|
solutions = find_mult_solutions(solutions) |
|
|
pairs_df = join(problems, solutions) |
|
|
pairs_df = clean(pairs_df) |
|
|
pairs_df = process_mult_solutions(pairs_df) |
|
|
pairs_df = add_metadata( |
|
|
pairs_df, year, tier, output_file.relative_to(project_root).as_posix() |
|
|
) |
|
|
problem_count += len(problems) |
|
|
solution_count += len(pairs_df) |
|
|
write_pairs(output_file, pairs_df) |
|
|
|
|
|
print(f"problem count: {problem_count}") |
|
|
print(f"solution count: {solution_count}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|