Dataset Viewer
Auto-converted to Parquet Duplicate
repo
stringlengths
10
39
pull_number
int64
74
29.2k
url
stringlengths
37
68
instance_id
stringlengths
14
45
issue_numbers
stringclasses
26 values
base_commit
stringlengths
40
40
patch
stringlengths
525
15.9k
test_patch
stringlengths
606
17.8k
created_at
timestamp[s]date
2015-03-20 20:39:55
2025-01-02 13:53:18
readmes
stringclasses
2 values
files
stringlengths
365
430k
non_py_patch
stringlengths
0
7.21k
new_components
stringlengths
142
6.63k
version
stringclasses
37 values
FAIL_TO_PASS
stringlengths
13
15.6k
PASS_TO_PASS
stringlengths
2
65k
environment_setup_commit
stringlengths
40
40
problem_statement
stringlengths
367
40.7k
EleutherAI/lm-evaluation-harness
1,566
https://github.com/EleutherAI/lm-evaluation-harness/pull/1566
EleutherAI__lm-evaluation-harness-1566
[]
49695e8d94c3ab011b7ae8814d809de30b1b1182
diff --git a/lm_eval/__main__.py b/lm_eval/__main__.py index 489c1662d41..18c243d431d 100644 --- a/lm_eval/__main__.py +++ b/lm_eval/__main__.py @@ -53,13 +53,30 @@ def parse_value(item): return items -def parse_eval_args() -> argparse.Namespace: +def check_argument_types(parser: argparse.ArgumentParser): + """ + Check to make sure all CLI args are typed, raises error if not + """ + for action in parser._actions: + if action.dest != "help" and not action.const: + if action.type is None: + raise ValueError( + f"Argument '{action.dest}' doesn't have a type specified." + ) + else: + continue + + +def setup_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter) - parser.add_argument("--model", "-m", default="hf", help="Name of model e.g. `hf`") + parser.add_argument( + "--model", "-m", type=str, default="hf", help="Name of model e.g. `hf`" + ) parser.add_argument( "--tasks", "-t", default=None, + type=str, metavar="task1,task2", help="To get full list of tasks, use the command lm-eval --tasks list", ) @@ -67,6 +84,7 @@ def parse_eval_args() -> argparse.Namespace: "--model_args", "-a", default="", + type=str, help="Comma separated string arguments for model, e.g. `pretrained=EleutherAI/pythia-160m,dtype=float32`", ) parser.add_argument( @@ -164,6 +182,7 @@ def parse_eval_args() -> argparse.Namespace: ) parser.add_argument( "--gen_kwargs", + type=dict, default=None, help=( "String arguments for model generation on greedy_until tasks," @@ -180,6 +199,7 @@ def parse_eval_args() -> argparse.Namespace: ) parser.add_argument( "--wandb_args", + type=str, default="", help="Comma separated string arguments passed to wandb.init, e.g. `project=lm-eval,job_type=eval", ) @@ -209,13 +229,19 @@ def parse_eval_args() -> argparse.Namespace: help="Sets trust_remote_code to True to execute code to create HF Datasets from the Hub", ) + return parser + + +def parse_eval_args(parser: argparse.ArgumentParser) -> argparse.Namespace: + check_argument_types(parser) return parser.parse_args() def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None: if not args: # we allow for args to be passed externally, else we parse them ourselves - args = parse_eval_args() + parser = setup_parser() + args = parse_eval_args(parser) if args.wandb_args: wandb_logger = WandbLogger(**simple_parse_args_string(args.wandb_args))
diff --git a/tests/test_cli.py b/tests/test_cli.py new file mode 100644 index 00000000000..feaa7340d6a --- /dev/null +++ b/tests/test_cli.py @@ -0,0 +1,43 @@ +import argparse + +import pytest + +import lm_eval.__main__ + + +def test_cli_parse_error(): + """ + Assert error raised if cli args argument doesn't have type + """ + with pytest.raises(ValueError): + parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter) + parser.add_argument( + "--model", "-m", type=str, default="hf", help="Name of model e.g. `hf`" + ) + parser.add_argument( + "--tasks", + "-t", + default=None, + metavar="task1,task2", + help="To get full list of tasks, use the command lm-eval --tasks list", + ) + lm_eval.__main__.check_argument_types(parser) + + +def test_cli_parse_no_error(): + """ + Assert typed arguments are parsed correctly + """ + parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter) + parser.add_argument( + "--model", "-m", type=str, default="hf", help="Name of model e.g. `hf`" + ) + parser.add_argument( + "--tasks", + "-t", + type=str, + default=None, + metavar="task1,task2", + help="To get full list of tasks, use the command lm-eval --tasks list", + ) + lm_eval.__main__.check_argument_types(parser)
2024-03-12T17:35:39
{"README.md": "<p align=\"center\">\n <a href=\"https://gmago-leway.github.io/fea-bench.github.io/\">\n <img src=\"assets/FEA-Bench-full.png\" style=\"height: 10em\" alt=\"fea-bench\" />\n </a>\n</p>\n\n<p align=\"center\">\n <em>A benchmark that aims to evaluate the capability of implementing new features in the code repositories.</em>\n</p>\n\n<p align=\"center\">\n <a href=\"https://arxiv.org/abs/2503.06680\">\n <img alt=\"paper\" src=\"https://img.shields.io/badge/ArXiv-%23B31B1B?style=for-the-badge&logo=arXiv\">\n </a>\n <a href=\"./LICENSE\">\n <img alt=\"License\" src=\"https://img.shields.io/github/license/SWE-bench/SWE-bench?style=for-the-badge\">\n </a>\n <a href=\"https://gmago-leway.github.io/fea-bench.github.io/\">\n <img alt=\"Leaderboard\" src=\"https://img.shields.io/badge/leaderboard-%F0%9F%8F%86-1?style=for-the-badge\">\n </a>\n <a href=\"https://huggingface.co/datasets/microsoft/FEA-Bench\">\n <img alt=\"dataset\" src=\"https://img.shields.io/badge/Dataset-HF-FFD21E.svg?style=for-the-badge&logo=huggingface&logoColor=FFD21E\">\n </a>\n</p>\n\n---\n\n# Evaluation\n\nThis repository is the official implementation of the paper \"FEA-Bench: A Benchmark for Evaluating Repository-Level Code Generation for Feature Implementation.\" It can be used for baseline evaluation using the prompts mentioned in the paper.\n\nThe repository includes several functionalities, primarily for obtaining the full dataset, running model inference aligned with the paper, and evaluating the results. The complete pipeline is as follows:\n\n## 1. Environment Setup\n\nYou can create a new Python environment and install all dependencies using:\n```bash\npip install -e .\n```\nIf you plan to use VLLM inference, ensure that the installed libraries match your hardware.\n\n## 2. Building the Full Evaluation Dataset\n\nDue to licensing and company policies, we cannot release the full dataset. Our published version ([https://huggingface.co/datasets/microsoft/FEA-Bench](https://huggingface.co/datasets/microsoft/FEA-Bench)) only includes essential attributes, and the remaining content needs to be scraped from GitHub.\n\nTo construct the full FEA-Bench dataset and save it in the `feabench-data` folder, run the following command. Note that you need to replace `GITHUB_TOKEN` with your own GitHub token, which should have read-only access to public repositories:\n```bash\nexport GITHUB_TOKEN=\"xxx\"\n\npython -m feabench.get_dataset \\\n --dataset microsoft/FEA-Bench \\\n --testbed feabench-data/testbed \\\n --lite_ids instances_lite.json \\\n --medium_file feabench-data/FEA-Bench-v1.0-medium.jsonl \\\n --standard_dataset_path feabench-data/FEA-Bench-v1.0-Standard \\\n --oracle_dataset_path feabench-data/FEA-Bench-v1.0-Oracle \\\n --lite_standard_dataset_path feabench-data/FEA-Bench-v1.0-Lite-Standard \\\n --lite_oracle_dataset_path feabench-data/FEA-Bench-v1.0-Lite-Oracle\n```\n\n## 3. Running Model Inference\n\nOur repository only provides inference methods consistent with those in the paper. Agentless and other agent-based inferences can use the `FEA-Bench-v1.0-Lite-Standard` dataset constructed in the previous step, which is aligned with the format of SWE-Bench.\n\n### Example of VLLM Inference:\n```bash\nexport MAX_SEQ_LEN=128000\nexport MAX_GEN_LEN=4096\n\nDATASET_PATH=feabench-data/FEA-Bench-v1.0-Oracle\nMODEL_NAME=Qwen/Qwen2.5-Coder-3B-Instruct\nRESULTS_ROOT_DIR=scripts/experiments/results_full\n\nPROMPT_MODE=natural-detailed\npython -m feabench.run_prediction \\\n --dataset_name_or_path $DATASET_PATH \\\n --model_type vllm \\\n --model_name_or_path $MODEL_NAME \\\n --input_text $PROMPT_MODE \\\n --output_dir $RESULTS_ROOT_DIR/$PROMPT_MODE\n```\n\n### Example of OpenAI API-style Inference:\n(DEEPSEEK_TOKENIZER is only required when using DeepSeek model inference)\n```bash\nexport DEEPSEEK_TOKENIZER_PATH=\"xxx\"\nexport OPENAI_API_KEY=\"xxx\"\nexport OPENAI_BASE_URL=\"https://api.deepseek.com\"\n\nDATASET_PATH=feabench-data/FEA-Bench-v1.0-Oracle\nMODEL_NAME=deepseek-chat\nRESULTS_ROOT_DIR=scripts/experiments/results_full\n\nPROMPT_MODE=natural-detailed\npython -m feabench.run_prediction \\\n --dataset_name_or_path $DATASET_PATH \\\n --model_type openai \\\n --model_name_or_path $MODEL_NAME \\\n --input_text $PROMPT_MODE \\\n --output_dir $RESULTS_ROOT_DIR/$PROMPT_MODE \\\n --num_proc 1\n```\n\nAfter running the inference, you should see the output `.jsonl` result files in the specified `output_dir`.\n\n## 4. Running Model Evaluation\n\nOur evaluation process is based on the code provided by SWE-Bench. We have provided a patch file `swe-bench.diff` to include the environment configurations for the task instances we are involved in.\n\nClone the SWE-Bench repository and apply the patch:\n```bash\nmkdir -p evaluator\ncd evaluator\ngit clone https://github.com/SWE-bench/SWE-bench.git\ncd SWE-bench\ngit checkout a0536ee6f9fd5ff88acf17a36a384bf3da3d93d6\ngit apply ../../swe-bench.diff\nconda create --name fea-eval python=3.11\nconda activate fea-eval\npip install -e .\n```\n\nTo verify that the FEA-Bench task instances can run correctly on your machine, you can build a gold result based on the dataset:\n```bash\npython -m feabench.get_gold_results \\\n --dataset_name_or_path feabench-data/FEA-Bench-v1.0-Standard \\\n --save_dir feabench-data/experiments/gold \\\n --file_name Gold__FEABench_v1.0__test.jsonl\n```\n\nThe command to run the evaluation script is as follows (using the gold result constructed above as an example):\n```bash\npython -m swebench.harness.run_evaluation \\\n --dataset_name ../../feabench-data/FEA-Bench-v1.0-Standard \\\n --predictions_path ../../feabench-data/experiments/gold/Gold__FEABench_v1.0__test.jsonl \\\n --max_workers 10 \\\n --cache_level instance \\\n --timeout 900 \\\n --run_id FEABench_v1_Gold\n```\nThe usage is identical to SWE-Bench. You can set the cache level `cache_level` based on your disk size. You should then obtain a result file similar to the following `.json` format:\n```json\n{\n \"total_instances\": 1401,\n \"submitted_instances\": 1401,\n \"completed_instances\": 1401,\n \"resolved_instances\": 1401,\n \"unresolved_instances\": 0,\n \"empty_patch_instances\": 0,\n \"error_instances\": 0,\n ...\n}\n```\n\nCongratulations! You have completed the usage of FEA-Bench. If you have any questions, please raise them in the issues.\n\n---\n\nFor more details, please refer to the [FEA-Bench Paper](https://arxiv.org/abs/2503.06680).\nIf you find our work helpful, we would be grateful if you could cite our work.\n```\n@misc{li2025feabenchbenchmarkevaluatingrepositorylevel,\n title={FEA-Bench: A Benchmark for Evaluating Repository-Level Code Generation for Feature Implementation}, \n author={Wei Li and Xin Zhang and Zhongxin Guo and Shaoguang Mao and Wen Luo and Guangyue Peng and Yangyu Huang and Houfeng Wang and Scarlett Li},\n year={2025},\n eprint={2503.06680},\n archivePrefix={arXiv},\n primaryClass={cs.SE},\n url={https://arxiv.org/abs/2503.06680}, \n}\n```\n\n\n\n## Contributing\n\nThis project welcomes contributions and suggestions. Most contributions require you to agree to a\nContributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us\nthe rights to use your contribution. For details, visit https://cla.opensource.microsoft.com.\n\nWhen you submit a pull request, a CLA bot will automatically determine whether you need to provide\na CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions\nprovided by the bot. You will only need to do this once across all repos using our CLA.\n\nThis project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).\nFor more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or\ncontact [[email protected]](mailto:[email protected]) with any additional questions or comments.\n\n## Trademarks\n\nThis project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft \ntrademarks or logos is subject to and must follow \n[Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general).\nUse of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship.\nAny use of third-party trademarks or logos are subject to those third-party's policies.\n"}
{"lm_eval/__main__.py": "import argparse\nimport json\nimport logging\nimport os\nimport re\nimport sys\nfrom functools import partial\nfrom pathlib import Path\nfrom typing import Union\n\nimport numpy as np\n\nfrom lm_eval import evaluator, utils\nfrom lm_eval.evaluator import request_caching_arg_to_dict\nfrom lm_eval.logging_utils import WandbLogger\nfrom lm_eval.tasks import TaskManager, include_path, initialize_tasks\nfrom lm_eval.utils import make_table, simple_parse_args_string\n\n\nDEFAULT_RESULTS_FILE = \"results.json\"\n\n\ndef _handle_non_serializable(o):\n if isinstance(o, np.int64) or isinstance(o, np.int32):\n return int(o)\n elif isinstance(o, set):\n return list(o)\n else:\n return str(o)\n\n\ndef _int_or_none_list_arg_type(max_len: int, value: str, split_char: str = \",\"):\n def parse_value(item):\n item = item.strip().lower()\n if item == \"none\":\n return None\n try:\n return int(item)\n except ValueError:\n raise argparse.ArgumentTypeError(f\"{item} is not an integer or None\")\n\n items = [parse_value(v) for v in value.split(split_char)]\n num_items = len(items)\n\n if num_items == 1:\n # Makes downstream handling the same for single and multiple values\n items = items * max_len\n elif num_items != max_len:\n raise argparse.ArgumentTypeError(\n f\"Argument requires {max_len} integers or None, separated by '{split_char}'\"\n )\n\n return items\n\n\ndef parse_eval_args() -> argparse.Namespace:\n parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument(\"--model\", \"-m\", default=\"hf\", help=\"Name of model e.g. `hf`\")\n parser.add_argument(\n \"--tasks\",\n \"-t\",\n default=None,\n metavar=\"task1,task2\",\n help=\"To get full list of tasks, use the command lm-eval --tasks list\",\n )\n parser.add_argument(\n \"--model_args\",\n \"-a\",\n default=\"\",\n help=\"Comma separated string arguments for model, e.g. `pretrained=EleutherAI/pythia-160m,dtype=float32`\",\n )\n parser.add_argument(\n \"--num_fewshot\",\n \"-f\",\n type=int,\n default=None,\n metavar=\"N\",\n help=\"Number of examples in few-shot context\",\n )\n parser.add_argument(\n \"--batch_size\",\n \"-b\",\n type=str,\n default=1,\n metavar=\"auto|auto:N|N\",\n help=\"Acceptable values are 'auto', 'auto:N' or N, where N is an integer. Default 1.\",\n )\n parser.add_argument(\n \"--max_batch_size\",\n type=int,\n default=None,\n metavar=\"N\",\n help=\"Maximal batch size to try with --batch_size auto.\",\n )\n parser.add_argument(\n \"--device\",\n type=str,\n default=None,\n help=\"Device to use (e.g. cuda, cuda:0, cpu).\",\n )\n parser.add_argument(\n \"--output_path\",\n \"-o\",\n default=None,\n type=str,\n metavar=\"DIR|DIR/file.json\",\n help=\"The path to the output file where the result metrics will be saved. If the path is a directory and log_samples is true, the results will be saved in the directory. Else the parent directory will be used.\",\n )\n parser.add_argument(\n \"--limit\",\n \"-L\",\n type=float,\n default=None,\n metavar=\"N|0<N<1\",\n help=\"Limit the number of examples per task. \"\n \"If <1, limit is a percentage of the total number of examples.\",\n )\n parser.add_argument(\n \"--use_cache\",\n \"-c\",\n type=str,\n default=None,\n metavar=\"DIR\",\n help=\"A path to a sqlite db file for caching model responses. `None` if not caching.\",\n )\n parser.add_argument(\n \"--cache_requests\",\n type=str,\n default=None,\n choices=[\"true\", \"refresh\", \"delete\"],\n help=\"Speed up evaluation by caching the building of dataset requests. `None` if not caching.\",\n )\n parser.add_argument(\n \"--check_integrity\",\n action=\"store_true\",\n help=\"Whether to run the relevant part of the test suite for the tasks.\",\n )\n parser.add_argument(\n \"--write_out\",\n \"-w\",\n action=\"store_true\",\n default=False,\n help=\"Prints the prompt for the first few documents.\",\n )\n parser.add_argument(\n \"--log_samples\",\n \"-s\",\n action=\"store_true\",\n default=False,\n help=\"If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis. Use with --output_path.\",\n )\n parser.add_argument(\n \"--show_config\",\n action=\"store_true\",\n default=False,\n help=\"If True, shows the the full config of all tasks at the end of the evaluation.\",\n )\n parser.add_argument(\n \"--include_path\",\n type=str,\n default=None,\n metavar=\"DIR\",\n help=\"Additional path to include if there are external tasks to include.\",\n )\n parser.add_argument(\n \"--gen_kwargs\",\n default=None,\n help=(\n \"String arguments for model generation on greedy_until tasks,\"\n \" e.g. `temperature=0,top_k=0,top_p=0`.\"\n ),\n )\n parser.add_argument(\n \"--verbosity\",\n \"-v\",\n type=str.upper,\n default=\"INFO\",\n metavar=\"CRITICAL|ERROR|WARNING|INFO|DEBUG\",\n help=\"Controls the reported logging error level. Set to DEBUG when testing + adding new task configurations for comprehensive log output.\",\n )\n parser.add_argument(\n \"--wandb_args\",\n default=\"\",\n help=\"Comma separated string arguments passed to wandb.init, e.g. `project=lm-eval,job_type=eval\",\n )\n parser.add_argument(\n \"--predict_only\",\n \"-x\",\n action=\"store_true\",\n default=False,\n help=\"Use with --log_samples. Only model outputs will be saved and metrics will not be evaluated.\",\n )\n parser.add_argument(\n \"--seed\",\n type=partial(_int_or_none_list_arg_type, 3),\n default=\"0,1234,1234\", # for backward compatibility\n help=(\n \"Set seed for python's random, numpy and torch.\\n\"\n \"Accepts a comma-separated list of 3 values for python's random, numpy, and torch seeds, respectively, \"\n \"or a single integer to set the same seed for all three.\\n\"\n \"The values are either an integer or 'None' to not set the seed. Default is `0,1234,1234` (for backward compatibility).\\n\"\n \"E.g. `--seed 0,None,8` sets `random.seed(0)` and `torch.manual_seed(8)`. Here numpy's seed is not set since the second value is `None`.\\n\"\n \"E.g, `--seed 42` sets all three seeds to 42.\"\n ),\n )\n parser.add_argument(\n \"--trust_remote_code\",\n action=\"store_true\",\n help=\"Sets trust_remote_code to True to execute code to create HF Datasets from the Hub\",\n )\n\n return parser.parse_args()\n\n\ndef cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None:\n if not args:\n # we allow for args to be passed externally, else we parse them ourselves\n args = parse_eval_args()\n\n if args.wandb_args:\n wandb_logger = WandbLogger(**simple_parse_args_string(args.wandb_args))\n\n eval_logger = utils.eval_logger\n eval_logger.setLevel(getattr(logging, f\"{args.verbosity}\"))\n eval_logger.info(f\"Verbosity set to {args.verbosity}\")\n os.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\n\n if args.predict_only:\n args.log_samples = True\n if (args.log_samples or args.predict_only) and not args.output_path:\n raise ValueError(\n \"Specify --output_path if providing --log_samples or --predict_only\"\n )\n\n initialize_tasks(args.verbosity)\n task_manager = TaskManager(args.verbosity, include_path=args.include_path)\n\n if args.limit:\n eval_logger.warning(\n \" --limit SHOULD ONLY BE USED FOR TESTING.\"\n \"REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.\"\n )\n if args.include_path is not None:\n eval_logger.info(f\"Including path: {args.include_path}\")\n include_path(args.include_path)\n\n if args.tasks is None:\n eval_logger.error(\"Need to specify task to evaluate.\")\n sys.exit()\n elif args.tasks == \"list\":\n eval_logger.info(\n \"Available Tasks:\\n - {}\".format(\"\\n - \".join(task_manager.all_tasks))\n )\n sys.exit()\n else:\n if os.path.isdir(args.tasks):\n import glob\n\n task_names = []\n yaml_path = os.path.join(args.tasks, \"*.yaml\")\n for yaml_file in glob.glob(yaml_path):\n config = utils.load_yaml_config(yaml_file)\n task_names.append(config)\n else:\n task_list = args.tasks.split(\",\")\n task_names = task_manager.match_tasks(task_list)\n for task in [task for task in task_list if task not in task_names]:\n if os.path.isfile(task):\n config = utils.load_yaml_config(task)\n task_names.append(config)\n task_missing = [\n task for task in task_list if task not in task_names and \"*\" not in task\n ] # we don't want errors if a wildcard (\"*\") task name was used\n\n if task_missing:\n missing = \", \".join(task_missing)\n eval_logger.error(\n f\"Tasks were not found: {missing}\\n\"\n f\"{utils.SPACING}Try `lm-eval --tasks list` for list of available tasks\",\n )\n raise ValueError(\n f\"Tasks not found: {missing}. Try `lm-eval --tasks list` for list of available tasks, or '--verbosity DEBUG' to troubleshoot task registration issues.\"\n )\n\n if args.output_path:\n path = Path(args.output_path)\n # check if file or 'dir/results.json' exists\n if path.is_file():\n raise FileExistsError(f\"File already exists at {path}\")\n output_path_file = path.joinpath(DEFAULT_RESULTS_FILE)\n if output_path_file.is_file():\n eval_logger.warning(\n f\"File {output_path_file} already exists. Results will be overwritten.\"\n )\n # if path json then get parent dir\n elif path.suffix in (\".json\", \".jsonl\"):\n output_path_file = path\n path.parent.mkdir(parents=True, exist_ok=True)\n path = path.parent\n else:\n path.mkdir(parents=True, exist_ok=True)\n\n # Respect user's value passed in via CLI, otherwise default to True and add to comma-separated model args\n if args.trust_remote_code:\n os.environ[\"HF_DATASETS_TRUST_REMOTE_CODE\"] = str(args.trust_remote_code)\n args.model_args = (\n args.model_args\n + f\",trust_remote_code={os.environ['HF_DATASETS_TRUST_REMOTE_CODE']}\"\n )\n\n eval_logger.info(f\"Selected Tasks: {task_names}\")\n eval_logger.info(\"Loading selected tasks...\")\n\n request_caching_args = request_caching_arg_to_dict(\n cache_requests=args.cache_requests\n )\n\n results = evaluator.simple_evaluate(\n model=args.model,\n model_args=args.model_args,\n tasks=task_names,\n num_fewshot=args.num_fewshot,\n batch_size=args.batch_size,\n max_batch_size=args.max_batch_size,\n device=args.device,\n use_cache=args.use_cache,\n limit=args.limit,\n check_integrity=args.check_integrity,\n write_out=args.write_out,\n log_samples=args.log_samples,\n gen_kwargs=args.gen_kwargs,\n task_manager=task_manager,\n verbosity=args.verbosity,\n predict_only=args.predict_only,\n random_seed=args.seed[0],\n numpy_random_seed=args.seed[1],\n torch_random_seed=args.seed[2],\n **request_caching_args,\n )\n\n if results is not None:\n if args.log_samples:\n samples = results.pop(\"samples\")\n dumped = json.dumps(\n results, indent=2, default=_handle_non_serializable, ensure_ascii=False\n )\n if args.show_config:\n print(dumped)\n\n batch_sizes = \",\".join(map(str, results[\"config\"][\"batch_sizes\"]))\n\n # Add W&B logging\n if args.wandb_args:\n try:\n wandb_logger.post_init(results)\n wandb_logger.log_eval_result()\n if args.log_samples:\n wandb_logger.log_eval_samples(samples)\n except Exception as e:\n eval_logger.info(f\"Logging to Weights and Biases failed due to {e}\")\n\n if args.output_path:\n output_path_file.open(\"w\", encoding=\"utf-8\").write(dumped)\n\n if args.log_samples:\n for task_name, config in results[\"configs\"].items():\n output_name = \"{}_{}\".format(\n re.sub(\"/|=\", \"__\", args.model_args), task_name\n )\n filename = path.joinpath(f\"{output_name}.jsonl\")\n samples_dumped = json.dumps(\n samples[task_name],\n indent=2,\n default=_handle_non_serializable,\n ensure_ascii=False,\n )\n filename.write_text(samples_dumped, encoding=\"utf-8\")\n\n print(\n f\"{args.model} ({args.model_args}), gen_kwargs: ({args.gen_kwargs}), limit: {args.limit}, num_fewshot: {args.num_fewshot}, \"\n f\"batch_size: {args.batch_size}{f' ({batch_sizes})' if batch_sizes else ''}\"\n )\n print(make_table(results))\n if \"groups\" in results:\n print(make_table(results, \"groups\"))\n\n if args.wandb_args:\n # Tear down wandb run once all the logging is done.\n wandb_logger.run.finish()\n\n\nif __name__ == \"__main__\":\n cli_evaluate()\n"}
{"lm_eval/__main__.py": [{"type": "function", "name": "check_argument_types", "lines": [56, 67], "signature": "def check_argument_types(parser: argparse.ArgumentParser):", "doc": "Check to make sure all CLI args are typed, raises error if not"}, {"type": "function", "name": "setup_parser", "lines": [70, 232], "signature": "def setup_parser() -> argparse.ArgumentParser:", "doc": ""}]}
null
["tests/test_cli.py::test_cli_parse_error", "tests/test_cli.py::test_cli_parse_no_error"]
[]
decc533d02222f3b866d9a89263277fe0cc2fcb2
{"first_commit_time": 1710264318.0, "pr_title": "Proposed approach for testing CLI arg parsing", "pr_body": "See discussion here: https://github.com/EleutherAI/lm-evaluation-harness/issues/1518\r\n\r\nHere's an approach to start testing CLI argument parsing:\r\n\r\n1. Separate out setting up the argument parser in `parse_eval_args` into a separate method, `setup_parser` that gets called in `parse_eval_args`\r\n2. Create unit tests that call the parser for each of the command line arguments \r\n3. Adding specific TypeError exceptions at each argument entrypoint in the `cli_evaluate` method\r\n\r\nLet me know what you think about this approach. If it seems reasonable, I'll add the tests for the rest of the methods and exceptions where it's reasonable. \r\n\r\n@LSinev @haileyschoelkopf ", "pr_timeline": [{"time": 1710267420.0, "comment": "Combination of HFArgumentParser from transformers with args setup through dataclass like https://github.com/huggingface/transformers/blob/main/examples/research_projects/wav2vec2/run_asr.py#L343 and the `__post_init__` value check like in video (link with timecode) https://youtu.be/zN4VCb0LbQI?t=592\r\nBut this still may not solve points that follow.\r\n\r\nAs for the current code, testing the parser the way presented seems like testing the argument parser, not the code of this repo module. We put 5 to something that should be a number and it works. In this case it might be useful to check that it always fails if the input is like `--numshots five`. What are the cases, which will fail at new written tests, which will not fail inside ArgumentParser? \r\n\r\nThe `try... except' example here seems to be overreacting to an already solved case \u2014 no prevention of new failures. Some future failures may be prevented (though this hypothesis should be tested by turning on failed code and rechecking) after mypy checks are turned back on (even for tests)."}, {"time": 1710426575.0, "comment": "Thanks for the feedback @LSinev ! You're right that these cases don't necessarily cover what we'd like. After thinking about this and checking the videos and the links, I decided to take a different approach and unit test whether each CLI argument, with the exception of booleans, has a type. \r\n\r\nThat way, if you input one without a type unit tests won't pass and if it's a boolean you'll have to delcare a default anyway. Let me know what you think about this approach. "}, {"time": 1710428411.0, "comment": "This seems to be a much better approach.\r\nBy the way, some boolean cli arguments may be also set like\r\n```\r\n parser.add_argument(\r\n \"--some_boolean_arg\",\r\n type=bool,\r\n default=True,\r\n help=\"do something good\",\r\n action=argparse.BooleanOptionalAction, # type: ignore[attr-defined]\r\n )\r\n```\r\nwhich also adds `--no-some_boolean_arg`. Mentioning this way in case you want check those too.\r\n"}, {"time": 1710431811.0, "comment": "Thanks!\r\n\r\n> parser.add_argument(\r\n> \"--some_boolean_arg\",\r\n> type=bool,\r\n> default=True,\r\n> help=\"do something good\",\r\n> action=argparse.BooleanOptionalAction, # type: ignore[attr-defined]\r\n> )\r\n\r\nI checked these and decided not to add a test for them since we use the `store_true` pattern generally in all our arguments and it makes sense to standardize on this, what do you think? "}, {"time": 1710437845.0, "comment": "Standardization is good for future improvements and development. Even more, after reading the documentation I see that `BooleanOptionalAction` is only available since python 3.9, so it is of no use as this repo should support 3.8 as well. But I am not sure if this `store_true` pattern with `default=True` is OK:\r\n ```\r\n parser.add_argument(\r\n \"--trust_remote_code\",\r\n default=True,\r\n action=\"store_true\",\r\n help=\"Sets trust_remote_code to True to execute code to create HF Datasets from the Hub\",\r\n )\r\n```\r\nwith or without this argument, the code is trusted by default. I don't know if this pattern adds an option of `--no-trust_remote_code` (and also if it depends on the Python version)."}, {"time": 1710444571.0, "comment": "The behavior of `store_true` seems somewhat confusing in general. We override to true in the case of the default and respect the user's settings, but if we don't set the default to `True`, then it defaults to `False`, at least in `3.9`: https://gist.github.com/veekaybee/2c8769789a90f219dc83a9e681773000\r\n\r\nIronically this is the [default behavior of the module](https://github.com/python/cpython/blob/c432df6d56f3e02530132321b47dcc7b914a3660/Lib/argparse.py#L1008) \ud83d\ude05 . I figured from that perspective, it was better to explicitly set it (explicit is better than implicit, etc, zen of python) even though we handle it later downstream. I can also check `3.8` if that. helps"}, {"time": 1710448561.0, "comment": "I think, your gist example/test may be more insightful with parsing of same set of args (and also setup when no args is provided) by all three defined parsers.\r\n\r\nI am a bit confused here. As far as I understand now, after this PR (with `default=True` for some boolean `store_true` arguments) merged, calling `lm_eval` with some arguments from commandline, considering `--trust_remote_code` will have effect on datasets invocation like:\r\n\r\n| Command | Trust to remote code state |\r\n|--------|--------|\r\n| (some arguments but no `--trust_remote_code` at all)| `True`|\r\n| `--trust_remote_code` | `True` |\r\n| `--trust_remote_code false` | `False` |\r\n| `--trust_remote_code true` | `True` |\r\n| `--trust_remote_code 0` | `False` |\r\n| `--trust_remote_code 1` | `True` |\r\n\r\nAlso I suppose (in this case) user or any system calling from commandline consider equivalent all typical ways of setting `True` (`1`, `true`, `T`, `True`, `TRUE`, `on`, `On`, `ON`, `Y`, `y`, `yes`, `Yes`, `YES`) and `False` (`0`, `false`, `F`, `False`, `FALSE`, `off`, `Off`, `OFF`, `N`, `n`, `no`, `No`, `NO`). Is this implied somehow, or may be tested also?\r\n\r\nI checked one with ipython 3.8\r\n```\r\nIn [1]: import argparse\r\n\r\nIn [2]: import os\r\n\r\nIn [3]: parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)\r\n\r\nIn [4]: parser.add_argument(\r\n ...: \"--trust_remote_code\",\r\n ...: default=True,\r\n ...: action=\"store_true\",\r\n ...: help=\"Sets trust_remote_code to True to execute code to create HF Datasets from the\r\n ...: Hub\",\r\n ...: )\r\nOut[4]: _StoreTrueAction(option_strings=['--trust_remote_code'], dest='trust_remote_code', nargs=0, const=True, default=True, type=None, choices=None, help='Sets trust_remote_code to True to execute code to create HF Datasets from the Hub', metavar=None)\r\n```\r\nand then\r\n```\r\nIn [20]: parser.parse_args([])\r\nOut[20]: Namespace(trust_remote_code=True)\r\n\r\nIn [21]: parser.parse_args(['--trust_remote_code'])\r\nOut[21]: Namespace(trust_remote_code=True)\r\n```\r\nSeems, there is no way to turn off trust in remote code. I tried some ways to set false and didn't find any.\r\n\r\nWithout `default=True` I thought is like\r\n| Command | Trust to remote code state |\r\n|--------|--------|\r\n| (some arguments but no `--trust_remote_code` at all)| `False`|\r\n| `--trust_remote_code` | `True` |\r\n\r\nNo confusion for user, just having key/argument set \u2014 turns something on, and no trust to remote code if not specified.\r\n\r\nFound big discussion with many ways to implement (still no pre-commit check which I was actually looking for): https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse"}, {"time": 1710449089.0, "comment": "It seems like you mention, we might want to test this flag specifically - I'll see what I can add from a testing perspective to cover `store_true` flags as they are currently implemented (with the intention of keeping code/behavior changes as minimal as possible) \r\n\r\nBased on the thread you posted, this looks like the easiest and most accepted answer https://stackoverflow.com/a/59579733.\r\n\r\nIn looking at how HF implements this, they take a similar approach:https://github.com/huggingface/transformers/blob/11bbb505c77a1d29370cf16a964cfe73b7a76340/src/transformers/hf_argparser.py#L34C5-L34C19\r\n\r\nso we could go this way too if we wanted. \r\n"}, {"time": 1710449786.0, "comment": "> the easiest and most accepted answer\r\n\r\nIf sorted by highest score, there are more interesting answers.\r\n\r\nLeaving argument like it was before, for me seems the best for now\r\n```\r\n parser.add_argument(\r\n \"--trust_remote_code\",\r\n action=\"store_true\",\r\n help=\"Sets trust_remote_code to True to execute code to create HF Datasets from the Hub\",\r\n )\r\n```\r\nno argument \u2014 no trust, and that's all."}, {"time": 1710450162.0, "comment": "\ud83d\udc4d Works for me, I just changed the two args that take it, but am keeping the ones added for args in cases where action is not `store_true`, such as `model`."}, {"time": 1710505357.0, "comment": "Hi @veekaybee ! This approach looks good to me.\r\n\r\nAnd agree we should leave the `store_true` args as is, as was decided here! The desired behavior is for passing `--trust_remote_code` to set it to True and if not provided to be False otherwise."}, {"time": 1710508492.0, "comment": "Thanks so much both for your discussion and comments! This PR is now ready for review. "}], "issues": {}}
Project-MONAI/MONAI
465
https://github.com/Project-MONAI/MONAI/pull/465
Project-MONAI__MONAI-465
["461"]
718d11abb2310ab74321256032a264488a7883b4
diff --git a/docs/source/data.rst b/docs/source/data.rst index 73a6a698bc..d998605bf8 100644 --- a/docs/source/data.rst +++ b/docs/source/data.rst @@ -87,3 +87,7 @@ Utilities .. automodule:: monai.data.utils :members: + +Decathalon DataLoader +~~~~~~~~~~~~~~~~~~~~~ +.. autofunction:: monai.data.load_decathalon_datalist diff --git a/monai/data/__init__.py b/monai/data/__init__.py index 46aec8a01d..2cf7515081 100644 --- a/monai/data/__init__.py +++ b/monai/data/__init__.py @@ -19,3 +19,4 @@ from .utils import * from .png_saver import PNGSaver from .png_writer import write_png +from .decathalon_dataloader import load_decathalon_datalist diff --git a/monai/data/decathalon_dataloader.py b/monai/data/decathalon_dataloader.py new file mode 100644 index 0000000000..13c291b938 --- /dev/null +++ b/monai/data/decathalon_dataloader.py @@ -0,0 +1,75 @@ +# Copyright 2020 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import json + + +def _compute_path(base_dir, element): + if isinstance(element, str): + return os.path.normpath(os.path.join(base_dir, element)) + elif isinstance(element, list): + for e in element: + if not isinstance(e, str): + raise ValueError("file path must be a string.") + return [os.path.normpath(os.path.join(base_dir, e)) for e in element] + else: + raise ValueError("file path must be a string or a list of string.") + + +def _append_paths(base_dir, is_segmentation, items): + for item in items: + if not isinstance(item, dict): + raise ValueError("data item must be dict.") + for k, v in item.items(): + if k == "image": + item[k] = _compute_path(base_dir, v) + elif is_segmentation and k == "label": + item[k] = _compute_path(base_dir, v) + return items + + +def load_decathalon_datalist(data_list_file_path, is_segmentation=True, data_list_key="training", base_dir=None): + """Load image/label paths of decathalon challenge from JSON file + + Json file is similar to what you get from http://medicaldecathlon.com/ + Those dataset.json files + + Args: + data_list_file_path (str): the path to the json file of datalist + is_segmentation (bool): whether the datalist is for segmentation task, default is True + data_list_key (str): the key to get a list of dictionary to be used, default is "training" + base_dir (str): the base directory of the dataset, if None, use the datalist directory + + Returns a list of data items, each of which is a dict keyed by element names, for example: + + .. code-block:: + + [ + {'image': '/workspace/data/chest_19.nii.gz', 'label': 0}, + {'image': '/workspace/data/chest_31.nii.gz', 'label': 1} + ] + + """ + if not os.path.isfile(data_list_file_path): + raise ValueError(f"data list file {data_list_file_path} does not exist.") + with open(data_list_file_path) as json_file: + json_data = json.load(json_file) + if data_list_key not in json_data: + raise ValueError(f"data list {data_list_key} not specified in '{data_list_file_path}'.") + expected_data = json_data[data_list_key] + if data_list_key == "test": + expected_data = [{"image": i} for i in expected_data] + + if base_dir is None: + base_dir = os.path.dirname(data_list_file_path) + + return _append_paths(base_dir, is_segmentation, expected_data)
diff --git a/tests/test_load_decathalon_datalist.py b/tests/test_load_decathalon_datalist.py new file mode 100644 index 0000000000..4afe151482 --- /dev/null +++ b/tests/test_load_decathalon_datalist.py @@ -0,0 +1,104 @@ +# Copyright 2020 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import os +import json +import shutil +import tempfile +from monai.data import load_decathalon_datalist + + +class TestLoadDecathalonDatalist(unittest.TestCase): + def test_seg_values(self): + tempdir = tempfile.mkdtemp() + test_data = { + "name": "Spleen", + "description": "Spleen Segmentation", + "labels": {"0": "background", "1": "spleen"}, + "training": [ + {"image": "spleen_19.nii.gz", "label": "spleen_19.nii.gz"}, + {"image": "spleen_31.nii.gz", "label": "spleen_31.nii.gz"}, + ], + "test": ["spleen_15.nii.gz", "spleen_23.nii.gz"], + } + json_str = json.dumps(test_data) + file_path = os.path.join(tempdir, "test_data.json") + with open(file_path, "w") as json_file: + json_file.write(json_str) + result = load_decathalon_datalist(file_path, True, "training", tempdir) + self.assertEqual(result[0]["image"], os.path.join(tempdir, "spleen_19.nii.gz")) + self.assertEqual(result[0]["label"], os.path.join(tempdir, "spleen_19.nii.gz")) + shutil.rmtree(tempdir) + + def test_cls_values(self): + tempdir = tempfile.mkdtemp() + test_data = { + "name": "ChestXRay", + "description": "Chest X-ray classification", + "labels": {"0": "background", "1": "chest"}, + "training": [{"image": "chest_19.nii.gz", "label": 0}, {"image": "chest_31.nii.gz", "label": 1}], + "test": ["chest_15.nii.gz", "chest_23.nii.gz"], + } + json_str = json.dumps(test_data) + file_path = os.path.join(tempdir, "test_data.json") + with open(file_path, "w") as json_file: + json_file.write(json_str) + result = load_decathalon_datalist(file_path, False, "training", tempdir) + self.assertEqual(result[0]["image"], os.path.join(tempdir, "chest_19.nii.gz")) + self.assertEqual(result[0]["label"], 0) + shutil.rmtree(tempdir) + + def test_seg_no_basedir(self): + tempdir = tempfile.mkdtemp() + test_data = { + "name": "Spleen", + "description": "Spleen Segmentation", + "labels": {"0": "background", "1": "spleen"}, + "training": [ + { + "image": os.path.join(tempdir, "spleen_19.nii.gz"), + "label": os.path.join(tempdir, "spleen_19.nii.gz"), + }, + { + "image": os.path.join(tempdir, "spleen_31.nii.gz"), + "label": os.path.join(tempdir, "spleen_31.nii.gz"), + }, + ], + "test": [os.path.join(tempdir, "spleen_15.nii.gz"), os.path.join(tempdir, "spleen_23.nii.gz")], + } + json_str = json.dumps(test_data) + file_path = os.path.join(tempdir, "test_data.json") + with open(file_path, "w") as json_file: + json_file.write(json_str) + result = load_decathalon_datalist(file_path, True, "training", None) + self.assertEqual(result[0]["image"], os.path.join(tempdir, "spleen_19.nii.gz")) + self.assertEqual(result[0]["label"], os.path.join(tempdir, "spleen_19.nii.gz")) + + def test_seg_no_labels(self): + tempdir = tempfile.mkdtemp() + test_data = { + "name": "Spleen", + "description": "Spleen Segmentation", + "labels": {"0": "background", "1": "spleen"}, + "test": ["spleen_15.nii.gz", "spleen_23.nii.gz"], + } + json_str = json.dumps(test_data) + file_path = os.path.join(tempdir, "test_data.json") + with open(file_path, "w") as json_file: + json_file.write(json_str) + result = load_decathalon_datalist(file_path, True, "test", tempdir) + self.assertEqual(result[0]["image"], os.path.join(tempdir, "spleen_15.nii.gz")) + shutil.rmtree(tempdir) + + +if __name__ == "__main__": + unittest.main()
2020-06-01T14:18:19
{"README.md": "<p align=\"center\">\n <a href=\"https://gmago-leway.github.io/fea-bench.github.io/\">\n <img src=\"assets/FEA-Bench-full.png\" style=\"height: 10em\" alt=\"fea-bench\" />\n </a>\n</p>\n\n<p align=\"center\">\n <em>A benchmark that aims to evaluate the capability of implementing new features in the code repositories.</em>\n</p>\n\n<p align=\"center\">\n <a href=\"https://arxiv.org/abs/2503.06680\">\n <img alt=\"paper\" src=\"https://img.shields.io/badge/ArXiv-%23B31B1B?style=for-the-badge&logo=arXiv\">\n </a>\n <a href=\"./LICENSE\">\n <img alt=\"License\" src=\"https://img.shields.io/github/license/SWE-bench/SWE-bench?style=for-the-badge\">\n </a>\n <a href=\"https://gmago-leway.github.io/fea-bench.github.io/\">\n <img alt=\"Leaderboard\" src=\"https://img.shields.io/badge/leaderboard-%F0%9F%8F%86-1?style=for-the-badge\">\n </a>\n <a href=\"https://huggingface.co/datasets/microsoft/FEA-Bench\">\n <img alt=\"dataset\" src=\"https://img.shields.io/badge/Dataset-HF-FFD21E.svg?style=for-the-badge&logo=huggingface&logoColor=FFD21E\">\n </a>\n</p>\n\n---\n\n# Evaluation\n\nThis repository is the official implementation of the paper \"FEA-Bench: A Benchmark for Evaluating Repository-Level Code Generation for Feature Implementation.\" It can be used for baseline evaluation using the prompts mentioned in the paper.\n\nThe repository includes several functionalities, primarily for obtaining the full dataset, running model inference aligned with the paper, and evaluating the results. The complete pipeline is as follows:\n\n## 1. Environment Setup\n\nYou can create a new Python environment and install all dependencies using:\n```bash\npip install -e .\n```\nIf you plan to use VLLM inference, ensure that the installed libraries match your hardware.\n\n## 2. Building the Full Evaluation Dataset\n\nDue to licensing and company policies, we cannot release the full dataset. Our published version ([https://huggingface.co/datasets/microsoft/FEA-Bench](https://huggingface.co/datasets/microsoft/FEA-Bench)) only includes essential attributes, and the remaining content needs to be scraped from GitHub.\n\nTo construct the full FEA-Bench dataset and save it in the `feabench-data` folder, run the following command. Note that you need to replace `GITHUB_TOKEN` with your own GitHub token, which should have read-only access to public repositories:\n```bash\nexport GITHUB_TOKEN=\"xxx\"\n\npython -m feabench.get_dataset \\\n --dataset microsoft/FEA-Bench \\\n --testbed feabench-data/testbed \\\n --lite_ids instances_lite.json \\\n --medium_file feabench-data/FEA-Bench-v1.0-medium.jsonl \\\n --standard_dataset_path feabench-data/FEA-Bench-v1.0-Standard \\\n --oracle_dataset_path feabench-data/FEA-Bench-v1.0-Oracle \\\n --lite_standard_dataset_path feabench-data/FEA-Bench-v1.0-Lite-Standard \\\n --lite_oracle_dataset_path feabench-data/FEA-Bench-v1.0-Lite-Oracle\n```\n\n## 3. Running Model Inference\n\nOur repository only provides inference methods consistent with those in the paper. Agentless and other agent-based inferences can use the `FEA-Bench-v1.0-Lite-Standard` dataset constructed in the previous step, which is aligned with the format of SWE-Bench.\n\n### Example of VLLM Inference:\n```bash\nexport MAX_SEQ_LEN=128000\nexport MAX_GEN_LEN=4096\n\nDATASET_PATH=feabench-data/FEA-Bench-v1.0-Oracle\nMODEL_NAME=Qwen/Qwen2.5-Coder-3B-Instruct\nRESULTS_ROOT_DIR=scripts/experiments/results_full\n\nPROMPT_MODE=natural-detailed\npython -m feabench.run_prediction \\\n --dataset_name_or_path $DATASET_PATH \\\n --model_type vllm \\\n --model_name_or_path $MODEL_NAME \\\n --input_text $PROMPT_MODE \\\n --output_dir $RESULTS_ROOT_DIR/$PROMPT_MODE\n```\n\n### Example of OpenAI API-style Inference:\n(DEEPSEEK_TOKENIZER is only required when using DeepSeek model inference)\n```bash\nexport DEEPSEEK_TOKENIZER_PATH=\"xxx\"\nexport OPENAI_API_KEY=\"xxx\"\nexport OPENAI_BASE_URL=\"https://api.deepseek.com\"\n\nDATASET_PATH=feabench-data/FEA-Bench-v1.0-Oracle\nMODEL_NAME=deepseek-chat\nRESULTS_ROOT_DIR=scripts/experiments/results_full\n\nPROMPT_MODE=natural-detailed\npython -m feabench.run_prediction \\\n --dataset_name_or_path $DATASET_PATH \\\n --model_type openai \\\n --model_name_or_path $MODEL_NAME \\\n --input_text $PROMPT_MODE \\\n --output_dir $RESULTS_ROOT_DIR/$PROMPT_MODE \\\n --num_proc 1\n```\n\nAfter running the inference, you should see the output `.jsonl` result files in the specified `output_dir`.\n\n## 4. Running Model Evaluation\n\nOur evaluation process is based on the code provided by SWE-Bench. We have provided a patch file `swe-bench.diff` to include the environment configurations for the task instances we are involved in.\n\nClone the SWE-Bench repository and apply the patch:\n```bash\nmkdir -p evaluator\ncd evaluator\ngit clone https://github.com/SWE-bench/SWE-bench.git\ncd SWE-bench\ngit checkout a0536ee6f9fd5ff88acf17a36a384bf3da3d93d6\ngit apply ../../swe-bench.diff\nconda create --name fea-eval python=3.11\nconda activate fea-eval\npip install -e .\n```\n\nTo verify that the FEA-Bench task instances can run correctly on your machine, you can build a gold result based on the dataset:\n```bash\npython -m feabench.get_gold_results \\\n --dataset_name_or_path feabench-data/FEA-Bench-v1.0-Standard \\\n --save_dir feabench-data/experiments/gold \\\n --file_name Gold__FEABench_v1.0__test.jsonl\n```\n\nThe command to run the evaluation script is as follows (using the gold result constructed above as an example):\n```bash\npython -m swebench.harness.run_evaluation \\\n --dataset_name ../../feabench-data/FEA-Bench-v1.0-Standard \\\n --predictions_path ../../feabench-data/experiments/gold/Gold__FEABench_v1.0__test.jsonl \\\n --max_workers 10 \\\n --cache_level instance \\\n --timeout 900 \\\n --run_id FEABench_v1_Gold\n```\nThe usage is identical to SWE-Bench. You can set the cache level `cache_level` based on your disk size. You should then obtain a result file similar to the following `.json` format:\n```json\n{\n \"total_instances\": 1401,\n \"submitted_instances\": 1401,\n \"completed_instances\": 1401,\n \"resolved_instances\": 1401,\n \"unresolved_instances\": 0,\n \"empty_patch_instances\": 0,\n \"error_instances\": 0,\n ...\n}\n```\n\nCongratulations! You have completed the usage of FEA-Bench. If you have any questions, please raise them in the issues.\n\n---\n\nFor more details, please refer to the [FEA-Bench Paper](https://arxiv.org/abs/2503.06680).\nIf you find our work helpful, we would be grateful if you could cite our work.\n```\n@misc{li2025feabenchbenchmarkevaluatingrepositorylevel,\n title={FEA-Bench: A Benchmark for Evaluating Repository-Level Code Generation for Feature Implementation}, \n author={Wei Li and Xin Zhang and Zhongxin Guo and Shaoguang Mao and Wen Luo and Guangyue Peng and Yangyu Huang and Houfeng Wang and Scarlett Li},\n year={2025},\n eprint={2503.06680},\n archivePrefix={arXiv},\n primaryClass={cs.SE},\n url={https://arxiv.org/abs/2503.06680}, \n}\n```\n\n\n\n## Contributing\n\nThis project welcomes contributions and suggestions. Most contributions require you to agree to a\nContributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us\nthe rights to use your contribution. For details, visit https://cla.opensource.microsoft.com.\n\nWhen you submit a pull request, a CLA bot will automatically determine whether you need to provide\na CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions\nprovided by the bot. You will only need to do this once across all repos using our CLA.\n\nThis project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).\nFor more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or\ncontact [[email protected]](mailto:[email protected]) with any additional questions or comments.\n\n## Trademarks\n\nThis project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft \ntrademarks or logos is subject to and must follow \n[Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general).\nUse of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship.\nAny use of third-party trademarks or logos are subject to those third-party's policies.\n"}
{"docs/source/data.rst": ":github_url: https://github.com/Project-MONAI/MONAI\n\n.. _data:\n\nData\n====\n\nGeneric Interfaces\n------------------\n.. currentmodule:: monai.data\n\n`Dataset`\n~~~~~~~~~\n.. autoclass:: Dataset\n :members:\n :special-members: __getitem__\n\n`PersistentDataset`\n~~~~~~~~~~~~~~~~~~~\n.. autoclass:: PersistentDataset\n :members:\n :special-members: __getitem__\n\n`CacheDataset`\n~~~~~~~~~~~~~~\n.. autoclass:: CacheDataset\n :members:\n :special-members: __getitem__\n\n`ZipDataset`\n~~~~~~~~~~~~\n.. autoclass:: ZipDataset\n :members:\n :special-members: __getitem__\n\n`ArrayDataset`\n~~~~~~~~~~~~~~\n.. autoclass:: ArrayDataset\n :members:\n :special-members: __getitem__\n\n\nPatch-based dataset\n-------------------\n\n`GridPatchDataset`\n~~~~~~~~~~~~~~~~~~\n.. autoclass:: GridPatchDataset\n :members:\n\n\nNifti format handling\n---------------------\n\nReading\n~~~~~~~\n.. autoclass:: monai.data.NiftiDataset\n :members:\n\nWriting Nifti\n~~~~~~~~~~~~~\n.. autoclass:: monai.data.NiftiSaver\n :members:\n\n.. autofunction:: monai.data.write_nifti\n\n\nPNG format handling\n-------------------\n\nWriting PNG\n~~~~~~~~~~~\n.. autoclass:: monai.data.PNGSaver\n :members:\n\n.. autofunction:: monai.data.write_png\n\n\nSynthetic\n---------\n.. automodule:: monai.data.synthetic\n :members:\n\n\nUtilities\n---------\n.. automodule:: monai.data.utils\n :members:\n\n", "monai/data/__init__.py": "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom .csv_saver import CSVSaver\nfrom .dataset import Dataset, PersistentDataset, CacheDataset, ZipDataset, ArrayDataset\nfrom .grid_dataset import GridPatchDataset\nfrom .nifti_reader import NiftiDataset\nfrom .nifti_saver import NiftiSaver\nfrom .nifti_writer import write_nifti\nfrom .synthetic import *\nfrom .utils import *\nfrom .png_saver import PNGSaver\nfrom .png_writer import write_png\n", "monai/data/decathalon_dataloader.py": null}
diff --git a/docs/source/data.rst b/docs/source/data.rst index 73a6a698bc..d998605bf8 100644 --- a/docs/source/data.rst +++ b/docs/source/data.rst @@ -87,3 +87,7 @@ Utilities .. automodule:: monai.data.utils :members: + +Decathalon DataLoader +~~~~~~~~~~~~~~~~~~~~~ +.. autofunction:: monai.data.load_decathalon_datalist
{"monai/data/decathalon_dataloader.py": [{"type": "function", "name": "_compute_path", "lines": [16, 25], "signature": "def _compute_path(base_dir, element):", "doc": ""}, {"type": "function", "name": "_append_paths", "lines": [28, 37], "signature": "def _append_paths(base_dir, is_segmentation, items):", "doc": ""}, {"type": "function", "name": "load_decathalon_datalist", "lines": [40, 75], "signature": "def load_decathalon_datalist(data_list_file_path, is_segmentation=True, data_list_key=\"training\", base_dir=None):", "doc": "Load image/label paths of decathalon challenge from JSON file\n\nJson file is similar to what you get from http://medicaldecathlon.com/\nThose dataset.json files\n\nArgs:\n data_list_file_path (str): the path to the json file of datalist\n is_segmentation (bool): whether the datalist is for segmentation task, default is True\n data_list_key (str): the key to get a list of dictionary to be used, default is \"training\"\n base_dir (str): the base directory of the dataset, if None, use the datalist directory\n\nReturns a list of data items, each of which is a dict keyed by element names, for example:\n\n.. code-block::\n\n [\n {'image': '/workspace/data/chest_19.nii.gz', 'label': 0}, \n {'image': '/workspace/data/chest_31.nii.gz', 'label': 1}\n ]"}]}
null
["tests/test_load_decathalon_datalist.py::TestLoadDecathalonDatalist::test_cls_values", "tests/test_load_decathalon_datalist.py::TestLoadDecathalonDatalist::test_seg_no_basedir", "tests/test_load_decathalon_datalist.py::TestLoadDecathalonDatalist::test_seg_no_labels", "tests/test_load_decathalon_datalist.py::TestLoadDecathalonDatalist::test_seg_values"]
[]
e73257caa79309dcce1e93abf1632f4bfd75b11f
{"first_commit_time": 1591001255.0, "pr_title": "461 add support to load decathalon datalist", "pr_body": "Fixes #461 .\r\n\r\n### Description\r\nAs Decathalon challenge dataset is very rich and very popular in medical domain, many researchers and students use Decathalon dataset to learn medical DL skills, and we also have notebooks and examples based on Decathalon dataset.\r\nSo this PR added support to load Decathalon datalist from the JSON config file.\r\nUsers can also convert their own datalist to Decathalon format and use this tool.\r\n\r\n### Status\r\n**Ready**\r\n\r\n### Types of changes\r\n<!--- Put an `x` in all the boxes that apply, and remove the not applicable items -->\r\n- [ ] Bug fix (non-breaking change which fixes an issue)\r\n- [x] Breaking change (fix or new feature that would cause existing functionality to change)\r\n- [x] New tests added to cover the changes\r\n- [x] Docstrings/Documentation updated\r\n", "pr_timeline": [{"time": 1591021110.0, "comment": "/black"}, {"time": 1591022261.0, "comment": "/black"}, {"time": 1591028667.0, "comment": "/black"}, {"time": 1591028697.0, "comment": "Hi @wyli ,\r\n\r\nThanks for your review, I updated according to the comments.\r\nCould you please help review it again?\r\nThanks.\r\n"}, {"time": 1591029057.0, "comment": "/black"}, {"time": 1591050663.0, "comment": "/black"}], "issues": {"461": {"issue_title": "Add utility function to load dataset from JSON file", "issue_body": "**Is your feature request related to a problem? Please describe.**\r\nMost of public datasets has a separate file to store `datalist`, let's add a JSON parser first based on the Decathalon dataset format first.\r\n", "issue_timeline": []}}}
PyThaiNLP/pythainlp
1,054
https://github.com/PyThaiNLP/pythainlp/pull/1054
PyThaiNLP__pythainlp-1054
[]
2252dee57bd7be9503242fa734bf0abc48c5ddf1
diff --git a/docs/api/lm.rst b/docs/api/lm.rst index 471282fd3..063aecb2d 100644 --- a/docs/api/lm.rst +++ b/docs/api/lm.rst @@ -6,4 +6,5 @@ pythainlp.lm Modules ------- +.. autofunction:: calculate_ngram_counts .. autofunction:: remove_repeated_ngrams \ No newline at end of file diff --git a/pythainlp/lm/__init__.py b/pythainlp/lm/__init__.py index f3e43e801..9fe31c161 100644 --- a/pythainlp/lm/__init__.py +++ b/pythainlp/lm/__init__.py @@ -3,6 +3,9 @@ # SPDX-FileType: SOURCE # SPDX-License-Identifier: Apache-2.0 -__all__ = ["remove_repeated_ngrams"] +__all__ = [ + "calculate_ngram_counts", + "remove_repeated_ngrams" +] -from pythainlp.lm.text_util import remove_repeated_ngrams +from pythainlp.lm.text_util import calculate_ngram_counts, remove_repeated_ngrams diff --git a/pythainlp/lm/text_util.py b/pythainlp/lm/text_util.py index 668ded3c5..0d3181d2a 100644 --- a/pythainlp/lm/text_util.py +++ b/pythainlp/lm/text_util.py @@ -4,7 +4,32 @@ # SPDX-License-Identifier: Apache-2.0 # ruff: noqa: C901 -from typing import List +from typing import List, Tuple, Dict + + +def calculate_ngram_counts( + list_words: List[str], + n_min: int = 2, + n_max: int = 4) -> Dict[Tuple[str], int]: + """ + Calculates the counts of n-grams in the list words for the specified range. + + :param List[str] list_words: List of string + :param int n_min: The minimum n-gram size (default: 2). + :param int n_max: The maximum n-gram size (default: 4). + + :return: A dictionary where keys are n-grams and values are their counts. + :rtype: Dict[Tuple[str], int] + """ + + ngram_counts = {} + + for n in range(n_min, n_max + 1): + for i in range(len(list_words) - n + 1): + ngram = tuple(list_words[i:i + n]) + ngram_counts[ngram] = ngram_counts.get(ngram, 0) + 1 + + return ngram_counts def remove_repeated_ngrams(string_list: List[str], n: int = 2) -> List[str]:
diff --git a/tests/core/test_lm.py b/tests/core/test_lm.py index 5d25cc124..9da213d31 100644 --- a/tests/core/test_lm.py +++ b/tests/core/test_lm.py @@ -5,10 +5,23 @@ import unittest -from pythainlp.lm import remove_repeated_ngrams +from pythainlp.lm import calculate_ngram_counts, remove_repeated_ngrams class LMTestCase(unittest.TestCase): + def test_calculate_ngram_counts(self): + self.assertEqual( + calculate_ngram_counts(['1', '2', '3', '4']), + { + ('1', '2'): 1, + ('2', '3'): 1, + ('3', '4'): 1, + ('1', '2', '3'): 1, + ('2', '3', '4'): 1, + ('1', '2', '3', '4'): 1 + } + ) + def test_remove_repeated_ngrams(self): texts = ['เอา', 'เอา', 'แบบ', 'แบบ', 'แบบ', 'ไหน'] self.assertEqual(
2025-01-02T13:53:18
{"README.md": "<p align=\"center\">\n <a href=\"https://gmago-leway.github.io/fea-bench.github.io/\">\n <img src=\"assets/FEA-Bench-full.png\" style=\"height: 10em\" alt=\"fea-bench\" />\n </a>\n</p>\n\n<p align=\"center\">\n <em>A benchmark that aims to evaluate the capability of implementing new features in the code repositories.</em>\n</p>\n\n<p align=\"center\">\n <a href=\"https://arxiv.org/abs/2503.06680\">\n <img alt=\"paper\" src=\"https://img.shields.io/badge/ArXiv-%23B31B1B?style=for-the-badge&logo=arXiv\">\n </a>\n <a href=\"./LICENSE\">\n <img alt=\"License\" src=\"https://img.shields.io/github/license/SWE-bench/SWE-bench?style=for-the-badge\">\n </a>\n <a href=\"https://gmago-leway.github.io/fea-bench.github.io/\">\n <img alt=\"Leaderboard\" src=\"https://img.shields.io/badge/leaderboard-%F0%9F%8F%86-1?style=for-the-badge\">\n </a>\n <a href=\"https://huggingface.co/datasets/microsoft/FEA-Bench\">\n <img alt=\"dataset\" src=\"https://img.shields.io/badge/Dataset-HF-FFD21E.svg?style=for-the-badge&logo=huggingface&logoColor=FFD21E\">\n </a>\n</p>\n\n---\n\n# Evaluation\n\nThis repository is the official implementation of the paper \"FEA-Bench: A Benchmark for Evaluating Repository-Level Code Generation for Feature Implementation.\" It can be used for baseline evaluation using the prompts mentioned in the paper.\n\nThe repository includes several functionalities, primarily for obtaining the full dataset, running model inference aligned with the paper, and evaluating the results. The complete pipeline is as follows:\n\n## 1. Environment Setup\n\nYou can create a new Python environment and install all dependencies using:\n```bash\npip install -e .\n```\nIf you plan to use VLLM inference, ensure that the installed libraries match your hardware.\n\n## 2. Building the Full Evaluation Dataset\n\nDue to licensing and company policies, we cannot release the full dataset. Our published version ([https://huggingface.co/datasets/microsoft/FEA-Bench](https://huggingface.co/datasets/microsoft/FEA-Bench)) only includes essential attributes, and the remaining content needs to be scraped from GitHub.\n\nTo construct the full FEA-Bench dataset and save it in the `feabench-data` folder, run the following command. Note that you need to replace `GITHUB_TOKEN` with your own GitHub token, which should have read-only access to public repositories:\n```bash\nexport GITHUB_TOKEN=\"xxx\"\n\npython -m feabench.get_dataset \\\n --dataset microsoft/FEA-Bench \\\n --testbed feabench-data/testbed \\\n --lite_ids instances_lite.json \\\n --medium_file feabench-data/FEA-Bench-v1.0-medium.jsonl \\\n --standard_dataset_path feabench-data/FEA-Bench-v1.0-Standard \\\n --oracle_dataset_path feabench-data/FEA-Bench-v1.0-Oracle \\\n --lite_standard_dataset_path feabench-data/FEA-Bench-v1.0-Lite-Standard \\\n --lite_oracle_dataset_path feabench-data/FEA-Bench-v1.0-Lite-Oracle\n```\n\n## 3. Running Model Inference\n\nOur repository only provides inference methods consistent with those in the paper. Agentless and other agent-based inferences can use the `FEA-Bench-v1.0-Lite-Standard` dataset constructed in the previous step, which is aligned with the format of SWE-Bench.\n\n### Example of VLLM Inference:\n```bash\nexport MAX_SEQ_LEN=128000\nexport MAX_GEN_LEN=4096\n\nDATASET_PATH=feabench-data/FEA-Bench-v1.0-Oracle\nMODEL_NAME=Qwen/Qwen2.5-Coder-3B-Instruct\nRESULTS_ROOT_DIR=scripts/experiments/results_full\n\nPROMPT_MODE=natural-detailed\npython -m feabench.run_prediction \\\n --dataset_name_or_path $DATASET_PATH \\\n --model_type vllm \\\n --model_name_or_path $MODEL_NAME \\\n --input_text $PROMPT_MODE \\\n --output_dir $RESULTS_ROOT_DIR/$PROMPT_MODE\n```\n\n### Example of OpenAI API-style Inference:\n(DEEPSEEK_TOKENIZER is only required when using DeepSeek model inference)\n```bash\nexport DEEPSEEK_TOKENIZER_PATH=\"xxx\"\nexport OPENAI_API_KEY=\"xxx\"\nexport OPENAI_BASE_URL=\"https://api.deepseek.com\"\n\nDATASET_PATH=feabench-data/FEA-Bench-v1.0-Oracle\nMODEL_NAME=deepseek-chat\nRESULTS_ROOT_DIR=scripts/experiments/results_full\n\nPROMPT_MODE=natural-detailed\npython -m feabench.run_prediction \\\n --dataset_name_or_path $DATASET_PATH \\\n --model_type openai \\\n --model_name_or_path $MODEL_NAME \\\n --input_text $PROMPT_MODE \\\n --output_dir $RESULTS_ROOT_DIR/$PROMPT_MODE \\\n --num_proc 1\n```\n\nAfter running the inference, you should see the output `.jsonl` result files in the specified `output_dir`.\n\n## 4. Running Model Evaluation\n\nOur evaluation process is based on the code provided by SWE-Bench. We have provided a patch file `swe-bench.diff` to include the environment configurations for the task instances we are involved in.\n\nClone the SWE-Bench repository and apply the patch:\n```bash\nmkdir -p evaluator\ncd evaluator\ngit clone https://github.com/SWE-bench/SWE-bench.git\ncd SWE-bench\ngit checkout a0536ee6f9fd5ff88acf17a36a384bf3da3d93d6\ngit apply ../../swe-bench.diff\nconda create --name fea-eval python=3.11\nconda activate fea-eval\npip install -e .\n```\n\nTo verify that the FEA-Bench task instances can run correctly on your machine, you can build a gold result based on the dataset:\n```bash\npython -m feabench.get_gold_results \\\n --dataset_name_or_path feabench-data/FEA-Bench-v1.0-Standard \\\n --save_dir feabench-data/experiments/gold \\\n --file_name Gold__FEABench_v1.0__test.jsonl\n```\n\nThe command to run the evaluation script is as follows (using the gold result constructed above as an example):\n```bash\npython -m swebench.harness.run_evaluation \\\n --dataset_name ../../feabench-data/FEA-Bench-v1.0-Standard \\\n --predictions_path ../../feabench-data/experiments/gold/Gold__FEABench_v1.0__test.jsonl \\\n --max_workers 10 \\\n --cache_level instance \\\n --timeout 900 \\\n --run_id FEABench_v1_Gold\n```\nThe usage is identical to SWE-Bench. You can set the cache level `cache_level` based on your disk size. You should then obtain a result file similar to the following `.json` format:\n```json\n{\n \"total_instances\": 1401,\n \"submitted_instances\": 1401,\n \"completed_instances\": 1401,\n \"resolved_instances\": 1401,\n \"unresolved_instances\": 0,\n \"empty_patch_instances\": 0,\n \"error_instances\": 0,\n ...\n}\n```\n\nCongratulations! You have completed the usage of FEA-Bench. If you have any questions, please raise them in the issues.\n\n---\n\nFor more details, please refer to the [FEA-Bench Paper](https://arxiv.org/abs/2503.06680).\nIf you find our work helpful, we would be grateful if you could cite our work.\n```\n@misc{li2025feabenchbenchmarkevaluatingrepositorylevel,\n title={FEA-Bench: A Benchmark for Evaluating Repository-Level Code Generation for Feature Implementation}, \n author={Wei Li and Xin Zhang and Zhongxin Guo and Shaoguang Mao and Wen Luo and Guangyue Peng and Yangyu Huang and Houfeng Wang and Scarlett Li},\n year={2025},\n eprint={2503.06680},\n archivePrefix={arXiv},\n primaryClass={cs.SE},\n url={https://arxiv.org/abs/2503.06680}, \n}\n```\n\n\n\n## Contributing\n\nThis project welcomes contributions and suggestions. Most contributions require you to agree to a\nContributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us\nthe rights to use your contribution. For details, visit https://cla.opensource.microsoft.com.\n\nWhen you submit a pull request, a CLA bot will automatically determine whether you need to provide\na CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions\nprovided by the bot. You will only need to do this once across all repos using our CLA.\n\nThis project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).\nFor more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or\ncontact [[email protected]](mailto:[email protected]) with any additional questions or comments.\n\n## Trademarks\n\nThis project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft \ntrademarks or logos is subject to and must follow \n[Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general).\nUse of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship.\nAny use of third-party trademarks or logos are subject to those third-party's policies.\n"}
{"docs/api/lm.rst": ".. currentmodule:: pythainlp.lm\n\npythainlp.lm\n============\n\nModules\n-------\n\n.. autofunction:: remove_repeated_ngrams", "pythainlp/lm/__init__.py": "# -*- coding: utf-8 -*-\n# SPDX-FileCopyrightText: 2016-2025 PyThaiNLP Project\n# SPDX-FileType: SOURCE\n# SPDX-License-Identifier: Apache-2.0\n\n__all__ = [\"remove_repeated_ngrams\"]\n\nfrom pythainlp.lm.text_util import remove_repeated_ngrams\n", "pythainlp/lm/text_util.py": "# -*- coding: utf-8 -*-\n# SPDX-FileCopyrightText: 2016-2025 PyThaiNLP Project\n# SPDX-FileType: SOURCE\n# SPDX-License-Identifier: Apache-2.0\n# ruff: noqa: C901\n\nfrom typing import List\n\n\ndef remove_repeated_ngrams(string_list: List[str], n: int = 2) -> List[str]:\n \"\"\"\n Remove repeated n-grams\n\n :param List[str] string_list: List of string\n :param int n: n-gram size\n :return: List of string\n :rtype: List[str]\n\n :Example:\n ::\n\n from pythainlp.lm import remove_repeated_ngrams\n\n remove_repeated_ngrams(['เอา', 'เอา', 'แบบ', 'ไหน'], n=1)\n # output: ['เอา', 'แบบ', 'ไหน']\n \"\"\"\n if not string_list or n <= 0:\n return string_list\n\n unique_ngrams = set()\n\n output_list = []\n\n for i in range(len(string_list)):\n if i + n <= len(string_list):\n ngram = tuple(string_list[i:i + n])\n\n if ngram not in unique_ngrams:\n unique_ngrams.add(ngram)\n\n if not output_list or output_list[-(n - 1):] != list(ngram[:-1]):\n output_list.extend(ngram)\n else:\n output_list.append(ngram[-1])\n else:\n for char in string_list[i:]:\n if not output_list or output_list[-1] != char:\n output_list.append(char)\n\n return output_list\n"}
diff --git a/docs/api/lm.rst b/docs/api/lm.rst index 471282fd3..063aecb2d 100644 --- a/docs/api/lm.rst +++ b/docs/api/lm.rst @@ -6,4 +6,5 @@ pythainlp.lm Modules ------- +.. autofunction:: calculate_ngram_counts .. autofunction:: remove_repeated_ngrams \ No newline at end of file
{"pythainlp/lm/text_util.py": [{"type": "function", "name": "calculate_ngram_counts", "lines": [10, 32], "signature": "def calculate_ngram_counts( list_words: List[str], n_min: int = 2, n_max: int = 4) -> Dict[Tuple[str], int]:", "doc": "Calculates the counts of n-grams in the list words for the specified range.\n\n:param List[str] list_words: List of string\n:param int n_min: The minimum n-gram size (default: 2).\n:param int n_max: The maximum n-gram size (default: 4).\n\n:return: A dictionary where keys are n-grams and values are their counts.\n:rtype: Dict[Tuple[str], int]"}]}
null
["tests/core/test_lm.py::LMTestCase::test_calculate_ngram_counts", "tests/core/test_lm.py::LMTestCase::test_remove_repeated_ngrams"]
[]
2252dee57bd7be9503242fa734bf0abc48c5ddf1
{"first_commit_time": 1735825940.0, "pr_title": "Add pythainlp.lm.calculate_ngram_counts", "pr_body": "Calculates the counts of n-grams in the list words for the specified range.\r\n\r\n```\r\n>>> from pythainlp.lm import calculate_ngram_counts\r\n>>> a=[\"1\",\"2\",\"3\",\"4\"]\r\n>>> calculate_ngram_counts(a)\r\n{('1', '2'): 1, ('2', '3'): 1, ('3', '4'): 1, ('1', '2', '3'): 1, ('2', '3', '4'): 1, ('1', '2', '3', '4'): 1}\r\n>>> \r\n```", "pr_timeline": [{"time": 1735826259.0, "comment": "Hello @wannaphong! Thanks for updating this PR. We checked the lines you've touched for [PEP\u00a08](https://www.python.org/dev/peps/pep-0008) issues, and found:\n\n\n\n\n\n\n\nThere are currently no PEP 8 issues detected in this Pull Request. Cheers! :beers: \n\n##### Comment last updated at 2025-01-02 13:57:39 UTC"}, {"time": 1735826284.0, "comment": "## [![Quality Gate Passed](https://sonarsource.github.io/sonarcloud-github-static-resources/v2/checks/QualityGateBadge/qg-passed-20px.png 'Quality Gate Passed')](https://sonarcloud.io/dashboard?id=PyThaiNLP_pythainlp&pullRequest=1054) **Quality Gate passed** \nIssues \n![](https://sonarsource.github.io/sonarcloud-github-static-resources/v2/common/passed-16px.png '') [0 New issues](https://sonarcloud.io/project/issues?id=PyThaiNLP_pythainlp&pullRequest=1054&issueStatuses=OPEN,CONFIRMED&sinceLeakPeriod=true) \n![](https://sonarsource.github.io/sonarcloud-github-static-resources/v2/common/accepted-16px.png '') [0 Accepted issues](https://sonarcloud.io/project/issues?id=PyThaiNLP_pythainlp&pullRequest=1054&issueStatuses=ACCEPTED)\n\nMeasures \n![](https://sonarsource.github.io/sonarcloud-github-static-resources/v2/common/passed-16px.png '') [0 Security Hotspots](https://sonarcloud.io/project/security_hotspots?id=PyThaiNLP_pythainlp&pullRequest=1054&issueStatuses=OPEN,CONFIRMED&sinceLeakPeriod=true) \n![](https://sonarsource.github.io/sonarcloud-github-static-resources/v2/common/passed-16px.png '') [0.0% Coverage on New Code](https://sonarcloud.io/component_measures?id=PyThaiNLP_pythainlp&pullRequest=1054&metric=new_coverage&view=list) \n![](https://sonarsource.github.io/sonarcloud-github-static-resources/v2/common/passed-16px.png '') [0.0% Duplication on New Code](https://sonarcloud.io/component_measures?id=PyThaiNLP_pythainlp&pullRequest=1054&metric=new_duplicated_lines_density&view=list) \n \n[See analysis details on SonarQube Cloud](https://sonarcloud.io/dashboard?id=PyThaiNLP_pythainlp&pullRequest=1054)\n\n"}, {"time": 1735826661.0, "comment": "\n[![Coverage Status](https://coveralls.io/builds/71557780/badge)](https://coveralls.io/builds/71557780)\n\ncoverage: 52.728% (-0.06%) from 52.79%\nwhen pulling **ca9446d5018159cc83ffd28a10f441022dfe9fec on add-calculate_ngram_counts**\ninto **2252dee57bd7be9503242fa734bf0abc48c5ddf1 on dev**.\n"}], "issues": {}}
RDFLib/rdflib
1,968
https://github.com/RDFLib/rdflib/pull/1968
RDFLib__rdflib-1968
[]
131d9e66e8515aa81d776969d42f58c72bc68f86
"diff --git a/CHANGELOG.md b/CHANGELOG.md\nindex a43d51694..27c9fc414 100644\n--- a/CHANGELOG.md\n++(...TRUNCATED)
"diff --git a/test/test_tools/test_chunk_serializer.py b/test/test_tools/test_chunk_serializer.py\nn(...TRUNCATED)
2022-05-22T12:06:12
"{\"README.md\": \"<p align=\\\"center\\\">\\n <a href=\\\"https://gmago-leway.github.io/fea-bench.(...TRUNCATED)
"{\"CHANGELOG.md\": \"# 2022-10-16 RELEASE MAJOR.MINOR.PATCH\\n\\n## User facing changes\\n\\nThis s(...TRUNCATED)
"diff --git a/CHANGELOG.md b/CHANGELOG.md\nindex a43d51694..27c9fc414 100644\n--- a/CHANGELOG.md\n++(...TRUNCATED)
"{\"rdflib/tools/chunk_serializer.py\": [{\"type\": \"function\", \"name\": \"serialize_in_chunks\",(...TRUNCATED)
null
"[\"test/test_tools/test_chunk_serializer.py::test_chunk_by_triples\", \"test/test_tools/test_chunk_(...TRUNCATED)
[]
0c11debb5178157baeac27b735e49a757916d2a6
"{\"first_commit_time\": 1653220003.0, \"pr_title\": \"add chunk serializer & tests\", \"pr_body\": (...TRUNCATED)
RDFLib/rdflib
968
https://github.com/RDFLib/rdflib/pull/968
RDFLib__rdflib-968
[]
0e5efef78702575e4abff4d8076eac4e2bd9d5f0
"diff --git a/rdflib/graph.py b/rdflib/graph.py\nindex 4a27e6de7..f68300cb1 100644\n--- a/rdflib/gra(...TRUNCATED)
"diff --git a/test/test_graph_cbd.py b/test/test_graph_cbd.py\nnew file mode 100644\nindex 000000000(...TRUNCATED)
2020-03-13T11:56:00
"{\"README.md\": \"<p align=\\\"center\\\">\\n <a href=\\\"https://gmago-leway.github.io/fea-bench.(...TRUNCATED)
"{\"rdflib/graph.py\": \"from __future__ import absolute_import\\nfrom __future__ import division\\n(...TRUNCATED)
"{\"rdflib/graph.py\": [{\"type\": \"function\", \"name\": \"Graph.cbd\", \"lines\": [1272, 1319], \(...TRUNCATED)
null
"[\"test/test_graph_cbd.py::CbdTestCase::testCbd\", \"test/test_graph_cbd.py::CbdTestCase::testCbdRe(...TRUNCATED)
[]
0c11debb5178157baeac27b735e49a757916d2a6
"{\"first_commit_time\": 1584098175.0, \"pr_title\": \"Concise Bounded Description\", \"pr_body\": \(...TRUNCATED)
Textualize/rich
1,706
https://github.com/Textualize/rich/pull/1706
Textualize__rich-1706
[]
008854c40772f647dfcb873bc3489e8a1c02d598
"diff --git a/CHANGELOG.md b/CHANGELOG.md\nindex 77dd5a0a9b..2b1dc69d04 100644\n--- a/CHANGELOG.md\n(...TRUNCATED)
"diff --git a/tests/test_text.py b/tests/test_text.py\nindex 3727d1602c..6eecb9ee73 100644\n--- a/te(...TRUNCATED)
2021-11-17T21:21:55
"{\"README.md\": \"<p align=\\\"center\\\">\\n <a href=\\\"https://gmago-leway.github.io/fea-bench.(...TRUNCATED)
"{\"CHANGELOG.md\": \"# Changelog\\n\\nAll notable changes to this project will be documented in thi(...TRUNCATED)
"diff --git a/CHANGELOG.md b/CHANGELOG.md\nindex 77dd5a0a9b..2b1dc69d04 100644\n--- a/CHANGELOG.md\n(...TRUNCATED)
"{\"rich/text.py\": [{\"type\": \"function\", \"name\": \"Text.from_ansi\", \"lines\": [246, 277], \(...TRUNCATED)
null
["tests/test_text.py::test_from_ansi"]
"[\"tests/test_text.py::test_span\", \"tests/test_text.py::test_span_split\", \"tests/test_text.py::(...TRUNCATED)
b0661de34bab35af9b4b1d3ba8e28b186b225e84
"{\"first_commit_time\": 1636564203.0, \"pr_title\": \"Add a Text.from_ansi helper method\", \"pr_bo(...TRUNCATED)
Textualize/rich
1,894
https://github.com/Textualize/rich/pull/1894
Textualize__rich-1894
[]
633faab16dc3a8c01a6562648cc2186c19a476e3
"diff --git a/rich/progress.py b/rich/progress.py\nindex 1f670db438..fe35b6c175 100644\n--- a/rich/p(...TRUNCATED)
"diff --git a/tests/test_progress.py b/tests/test_progress.py\nindex 2020f91ffb..20b9d32ed4 100644\n(...TRUNCATED)
2022-01-31T14:32:01
"{\"README.md\": \"<p align=\\\"center\\\">\\n <a href=\\\"https://gmago-leway.github.io/fea-bench.(...TRUNCATED)
"{\"rich/progress.py\": \"from abc import ABC, abstractmethod\\nfrom collections import deque\\nfrom(...TRUNCATED)
"{\"rich/progress.py\": [{\"type\": \"function\", \"name\": \"Progress.get_default_columns\", \"line(...TRUNCATED)
null
["tests/test_progress.py::test_using_default_columns"]
"[\"tests/test_progress.py::test_bar_columns\", \"tests/test_progress.py::test_text_column\", \"test(...TRUNCATED)
b0661de34bab35af9b4b1d3ba8e28b186b225e84
"{\"first_commit_time\": 1643639341.0, \"pr_title\": \"Add default_columns classmethod to Progress c(...TRUNCATED)
Textualize/rich
376
https://github.com/Textualize/rich/pull/376
Textualize__rich-376
[]
a83ee864e67d97be926894c7b5d3cf470194d6c1
"diff --git a/CHANGELOG.md b/CHANGELOG.md\nindex f813ffeb0a..132fafbbc2 100644\n--- a/CHANGELOG.md\n(...TRUNCATED)
"diff --git a/tests/test_console.py b/tests/test_console.py\nindex eca300095a..8dbe06933a 100644\n--(...TRUNCATED)
2020-10-11T16:35:48
"{\"README.md\": \"<p align=\\\"center\\\">\\n <a href=\\\"https://gmago-leway.github.io/fea-bench.(...TRUNCATED)
"{\"CHANGELOG.md\": \"# Changelog\\n\\nAll notable changes to this project will be documented in thi(...TRUNCATED)
"diff --git a/CHANGELOG.md b/CHANGELOG.md\nindex f813ffeb0a..132fafbbc2 100644\n--- a/CHANGELOG.md\n(...TRUNCATED)
"{\"rich/console.py\": [{\"type\": \"function\", \"name\": \"Console.out\", \"lines\": [1015, 1043],(...TRUNCATED)
null
["tests/test_console.py::test_out"]
"[\"tests/test_console.py::test_dumb_terminal\", \"tests/test_console.py::test_16color_terminal\", \(...TRUNCATED)
b0661de34bab35af9b4b1d3ba8e28b186b225e84
"{\"first_commit_time\": 1602433996.0, \"pr_title\": \"console out\", \"pr_body\": \"Adds a Console.(...TRUNCATED)
Textualize/rich
901
https://github.com/Textualize/rich/pull/901
Textualize__rich-901
[]
a9c0f917aed8d0bba232b7584742962f03a9a293
"diff --git a/CHANGELOG.md b/CHANGELOG.md\nindex d8378b729a..acb53b1308 100644\n--- a/CHANGELOG.md\n(...TRUNCATED)
"diff --git a/tests/test_console.py b/tests/test_console.py\nindex b82c765911..82bc5b48d4 100644\n--(...TRUNCATED)
2021-01-09T16:12:28
"{\"README.md\": \"<p align=\\\"center\\\">\\n <a href=\\\"https://gmago-leway.github.io/fea-bench.(...TRUNCATED)
"{\"CHANGELOG.md\": \"# Changelog\\n\\nAll notable changes to this project will be documented in thi(...TRUNCATED)
"diff --git a/CHANGELOG.md b/CHANGELOG.md\nindex d8378b729a..acb53b1308 100644\n--- a/CHANGELOG.md\n(...TRUNCATED)
"{\"rich/segment.py\": [{\"type\": \"function\", \"name\": \"Segment.remove_color\", \"lines\": [344(...TRUNCATED)
null
"[\"tests/test_console.py::test_no_color\", \"tests/test_segment.py::test_remove_color\", \"tests/te(...TRUNCATED)
"[\"tests/test_console.py::test_dumb_terminal\", \"tests/test_console.py::test_soft_wrap\", \"tests/(...TRUNCATED)
b0661de34bab35af9b4b1d3ba8e28b186b225e84
"{\"first_commit_time\": 1610208516.0, \"pr_title\": \"No color\", \"pr_body\": \"Fixes https://gith(...TRUNCATED)
Textualize/textual
1,517
https://github.com/Textualize/textual/pull/1517
Textualize__textual-1517
[]
779b10a0e8b8581fab512eb684a06eb90381705d
"diff --git a/src/textual/app.py b/src/textual/app.py\nindex b207bbd025..89da6aaf60 100644\n--- a/sr(...TRUNCATED)
"diff --git a/tests/test_concurrency.py b/tests/test_concurrency.py\nnew file mode 100644\nindex 000(...TRUNCATED)
2023-01-07T14:10:40
"{\"README.md\": \"<p align=\\\"center\\\">\\n <a href=\\\"https://gmago-leway.github.io/fea-bench.(...TRUNCATED)
"{\"src/textual/app.py\": \"from __future__ import annotations\\n\\nimport asyncio\\nimport inspect\(...TRUNCATED)
"{\"src/textual/app.py\": [{\"type\": \"function\", \"name\": \"App.call_from_thread\", \"lines\": [(...TRUNCATED)
null
"[\"tests/test_concurrency.py::test_call_from_thread_app_not_running\", \"tests/test_concurrency.py:(...TRUNCATED)
[]
86e93536b991014e0ea4bf993068202b446bb698
"{\"first_commit_time\": 1673100292.0, \"pr_title\": \"Call from thread method\", \"pr_body\": \"Thi(...TRUNCATED)
End of preview. Expand in Data Studio
README.md exists but content is empty.
Downloads last month
4