diff --git a/README.md b/README.md index a79cb01..880d305 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,36 @@ 1. Create conda env: `conda create -n pos-bert python=3.7` 2. Install dependencies: `pip install -r requirements.txt` 3. Download UD: Use [this link](https://lindat.mff.cuni.cz/repository/xmlui/handle/11234/1-3226) to download all treebanks, unzip and place them inside `data` folder -4. Rename the files in the concerned treebanks to `train/dev/test.conllu` -5. Meta-train pos-tagger: `python meta_trainer.py ewt partut` (can take arbitrary number of datasets) -6. Evaluate pos-tagger: `python test.py ewt partut --split=test` +4. TODO: Make a script to create the 'x' folder inside `data` +5. Create new configs in the `configs` folder and use them to start training +Trainer usage +``` +usage: trainer.py [-h] --config_path CONFIG_PATH [--train_type {meta,mtl}] + +Train a classifier + +optional arguments: + -h, --help show this help message and exit + --config_path CONFIG_PATH + Path of the config containing training params + --train_type {meta,mtl} + Whether to perform MTL or meta-training +``` + +Tester usage +``` +usage: tester.py [-h] --test_path TEST_PATH --model_path MODEL_PATH + [-e {meta,full,both}] + +Test POS tagging on various UD datasets + +optional arguments: + -h, --help show this help message and exit + --test_path TEST_PATH + Datasets to test on + --model_path MODEL_PATH + Path of the model to load + -e {meta,full,both}, --eval_type {meta,full,both} + Type of evaluation to perform +``` \ No newline at end of file diff --git a/tester.py b/tester.py index 1cc73e1..132593d 100644 --- a/tester.py +++ b/tester.py @@ -86,12 +86,12 @@ def meta_evaluate(dataset, label_map, bert_model, clf_head, config): def init_args(): parser = argparse.ArgumentParser(description="Test POS tagging on various UD datasets") - parser.add_argument("--test_path", dest="test_path", type=str, help="Datasets to test on", required=True) + parser.add_argument("--test_path", dest="test_path", type=str, help="Dataset to test on", required=True) parser.add_argument("--model_path", dest="model_path", type=str, help="Path of the model to load", required=True) parser.add_argument( "-e", "--eval_type", - help="Type of evaluation (meta/regular)", + help="Type of evaluation to perform", choices=["meta", "full", "both"], default="both", )