diff --git a/propositional_logic/ln_alm/checkpoint-100000/config.json b/propositional_logic/ln_alm/checkpoint-100000/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ff8809f9ee0000962c182321c304c5b09e84f1ba --- /dev/null +++ b/propositional_logic/ln_alm/checkpoint-100000/config.json @@ -0,0 +1,37 @@ +{ + "activation_function": "gelu_new", + "architectures": [ + "GPT2LMHeadModel" + ], + "attn_pdrop": 0.1, + "bos_token_id": 50256, + "embd_pdrop": 0.1, + "eos_token_id": 50256, + "gradient_checkpointing": false, + "initializer_range": 0.02, + "layer_norm_epsilon": 1e-05, + "model_type": "gpt2", + "n_ctx": 1024, + "n_embd": 768, + "n_head": 12, + "n_inner": null, + "n_layer": 12, + "n_positions": 1024, + "resid_pdrop": 0.1, + "scale_attn_weights": true, + "summary_activation": null, + "summary_first_dropout": 0.1, + "summary_proj_to_labels": true, + "summary_type": "cls_index", + "summary_use_proj": true, + "task_specific_params": { + "text-generation": { + "do_sample": true, + "max_length": 50 + } + }, + "torch_dtype": "float32", + "transformers_version": "4.10.3", + "use_cache": true, + "vocab_size": 1200 +} diff --git a/propositional_logic/ln_alm/checkpoint-100000/optimizer.pt b/propositional_logic/ln_alm/checkpoint-100000/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..87eadd2a746bcac464caf13af13ad3d0665b7701 --- /dev/null +++ b/propositional_logic/ln_alm/checkpoint-100000/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dacc63c09d9d07e13ec9e4ea8aa7bbf2c551ec834c172702d61414374aaf1d59 +size 694198065 diff --git a/propositional_logic/ln_alm/checkpoint-100000/pytorch_model.bin b/propositional_logic/ln_alm/checkpoint-100000/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..ea45f289651c6a19c5113875279faed7fc78683b --- /dev/null +++ b/propositional_logic/ln_alm/checkpoint-100000/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d33019ba41adae198befff715674fa10743b9c2adff598ade2f1594a02b1a7a3 +size 359700713 diff --git a/propositional_logic/ln_alm/checkpoint-100000/rng_state.pth b/propositional_logic/ln_alm/checkpoint-100000/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..1a0052329f405f8d16b98093adef52f7ec5fc386 --- /dev/null +++ b/propositional_logic/ln_alm/checkpoint-100000/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:275268d12ae096293e6c66801629ae22ecc01c39d8a1be7750d5de7ef54cf04f +size 14503 diff --git a/propositional_logic/ln_alm/checkpoint-100000/scaler.pt b/propositional_logic/ln_alm/checkpoint-100000/scaler.pt new file mode 100644 index 0000000000000000000000000000000000000000..c044f241dc2354e4553b1dceb5a706f22139e028 --- /dev/null +++ b/propositional_logic/ln_alm/checkpoint-100000/scaler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6cadaddb0cdb0eb56ab6fdc9548b07e37223df7c02a52dd312f849241c130451 +size 559 diff --git a/propositional_logic/ln_alm/checkpoint-100000/scheduler.pt b/propositional_logic/ln_alm/checkpoint-100000/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..1e8832360461716948976f2e9023e5604c8ee153 --- /dev/null +++ b/propositional_logic/ln_alm/checkpoint-100000/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9cc5801efc8ad93998c06ee5b26161b05bdeaa5fcd97377dd4122f24bd465d35 +size 623 diff --git a/propositional_logic/ln_alm/checkpoint-100000/trainer_state.json b/propositional_logic/ln_alm/checkpoint-100000/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..5983b65da530d0f3e342e3804b00fee1dd21b35a --- /dev/null +++ b/propositional_logic/ln_alm/checkpoint-100000/trainer_state.json @@ -0,0 +1,1216 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 1.0, + "global_step": 100000, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.01, + "learning_rate": 2.9999999999999997e-05, + "loss": 1.2353, + "step": 500 + }, + { + "epoch": 0.01, + "learning_rate": 5.9999999999999995e-05, + "loss": 0.5742, + "step": 1000 + }, + { + "epoch": 0.01, + "learning_rate": 8.999999999999999e-05, + "loss": 0.5163, + "step": 1500 + }, + { + "epoch": 0.02, + "learning_rate": 0.00011999999999999999, + "loss": 0.443, + "step": 2000 + }, + { + "epoch": 0.03, + "learning_rate": 0.00015, + "loss": 0.4029, + "step": 2500 + }, + { + "epoch": 0.03, + "learning_rate": 0.00017999999999999998, + "loss": 0.3979, + "step": 3000 + }, + { + "epoch": 0.04, + "learning_rate": 0.00020999999999999998, + "loss": 0.3979, + "step": 3500 + }, + { + "epoch": 0.04, + "learning_rate": 0.00023999999999999998, + "loss": 0.3973, + "step": 4000 + }, + { + "epoch": 0.04, + "learning_rate": 0.00027, + "loss": 0.3936, + "step": 4500 + }, + { + "epoch": 0.05, + "learning_rate": 0.0003, + "loss": 0.394, + "step": 5000 + }, + { + "epoch": 0.06, + "learning_rate": 0.00033, + "loss": 0.3894, + "step": 5500 + }, + { + "epoch": 0.06, + "learning_rate": 0.00035999999999999997, + "loss": 0.391, + "step": 6000 + }, + { + "epoch": 0.07, + "learning_rate": 0.00039, + "loss": 0.3901, + "step": 6500 + }, + { + "epoch": 0.07, + "learning_rate": 0.00041999999999999996, + "loss": 0.3881, + "step": 7000 + }, + { + "epoch": 0.07, + "learning_rate": 0.00045, + "loss": 0.3888, + "step": 7500 + }, + { + "epoch": 0.08, + "learning_rate": 0.00047999999999999996, + "loss": 0.3862, + "step": 8000 + }, + { + "epoch": 0.09, + "learning_rate": 0.0005099999999999999, + "loss": 0.3892, + "step": 8500 + }, + { + "epoch": 0.09, + "learning_rate": 0.00054, + "loss": 0.3855, + "step": 9000 + }, + { + "epoch": 0.1, + "learning_rate": 0.00057, + "loss": 0.3841, + "step": 9500 + }, + { + "epoch": 0.1, + "learning_rate": 0.0006, + "loss": 0.385, + "step": 10000 + }, + { + "epoch": 0.1, + "learning_rate": 0.0005966666666666667, + "loss": 0.3842, + "step": 10500 + }, + { + "epoch": 0.11, + "learning_rate": 0.0005933333333333333, + "loss": 0.3836, + "step": 11000 + }, + { + "epoch": 0.12, + "learning_rate": 0.0005899999999999999, + "loss": 0.3823, + "step": 11500 + }, + { + "epoch": 0.12, + "learning_rate": 0.0005866666666666665, + "loss": 0.3828, + "step": 12000 + }, + { + "epoch": 0.12, + "learning_rate": 0.0005833333333333333, + "loss": 0.3822, + "step": 12500 + }, + { + "epoch": 0.13, + "learning_rate": 0.00058, + "loss": 0.3815, + "step": 13000 + }, + { + "epoch": 0.14, + "learning_rate": 0.0005766666666666666, + "loss": 0.3802, + "step": 13500 + }, + { + "epoch": 0.14, + "learning_rate": 0.0005733333333333334, + "loss": 0.3802, + "step": 14000 + }, + { + "epoch": 0.14, + "learning_rate": 0.00057, + "loss": 0.3798, + "step": 14500 + }, + { + "epoch": 0.15, + "learning_rate": 0.0005666666666666666, + "loss": 0.3792, + "step": 15000 + }, + { + "epoch": 0.15, + "learning_rate": 0.0005633333333333333, + "loss": 0.3791, + "step": 15500 + }, + { + "epoch": 0.16, + "learning_rate": 0.00056, + "loss": 0.3789, + "step": 16000 + }, + { + "epoch": 0.17, + "learning_rate": 0.0005566666666666667, + "loss": 0.3788, + "step": 16500 + }, + { + "epoch": 0.17, + "learning_rate": 0.0005533333333333333, + "loss": 0.3789, + "step": 17000 + }, + { + "epoch": 0.17, + "learning_rate": 0.0005499999999999999, + "loss": 0.3785, + "step": 17500 + }, + { + "epoch": 0.18, + "learning_rate": 0.0005466666666666667, + "loss": 0.3788, + "step": 18000 + }, + { + "epoch": 0.18, + "learning_rate": 0.0005433399999999999, + "loss": 0.3784, + "step": 18500 + }, + { + "epoch": 0.19, + "learning_rate": 0.0005400066666666666, + "loss": 0.3783, + "step": 19000 + }, + { + "epoch": 0.2, + "learning_rate": 0.0005366733333333333, + "loss": 0.3781, + "step": 19500 + }, + { + "epoch": 0.2, + "learning_rate": 0.00053334, + "loss": 0.3784, + "step": 20000 + }, + { + "epoch": 0.2, + "learning_rate": 0.0005300133333333333, + "loss": 0.3785, + "step": 20500 + }, + { + "epoch": 0.21, + "learning_rate": 0.00052668, + "loss": 0.378, + "step": 21000 + }, + { + "epoch": 0.21, + "learning_rate": 0.0005233466666666667, + "loss": 0.3781, + "step": 21500 + }, + { + "epoch": 0.22, + "learning_rate": 0.0005200133333333332, + "loss": 0.3781, + "step": 22000 + }, + { + "epoch": 0.23, + "learning_rate": 0.0005166866666666667, + "loss": 0.3783, + "step": 22500 + }, + { + "epoch": 0.23, + "learning_rate": 0.0005133533333333333, + "loss": 0.3779, + "step": 23000 + }, + { + "epoch": 0.23, + "learning_rate": 0.0005100199999999999, + "loss": 0.3779, + "step": 23500 + }, + { + "epoch": 0.24, + "learning_rate": 0.0005066866666666666, + "loss": 0.3779, + "step": 24000 + }, + { + "epoch": 0.24, + "learning_rate": 0.0005033599999999999, + "loss": 0.3779, + "step": 24500 + }, + { + "epoch": 0.25, + "learning_rate": 0.0005000266666666666, + "loss": 0.3781, + "step": 25000 + }, + { + "epoch": 0.26, + "learning_rate": 0.0004966933333333332, + "loss": 0.3779, + "step": 25500 + }, + { + "epoch": 0.26, + "learning_rate": 0.00049336, + "loss": 0.378, + "step": 26000 + }, + { + "epoch": 0.27, + "learning_rate": 0.0004900333333333333, + "loss": 0.3776, + "step": 26500 + }, + { + "epoch": 0.27, + "learning_rate": 0.00048669999999999996, + "loss": 0.3779, + "step": 27000 + }, + { + "epoch": 0.28, + "learning_rate": 0.0004833666666666666, + "loss": 0.378, + "step": 27500 + }, + { + "epoch": 0.28, + "learning_rate": 0.00048003333333333326, + "loss": 0.3778, + "step": 28000 + }, + { + "epoch": 0.28, + "learning_rate": 0.0004767066666666666, + "loss": 0.378, + "step": 28500 + }, + { + "epoch": 0.29, + "learning_rate": 0.0004733733333333333, + "loss": 0.3777, + "step": 29000 + }, + { + "epoch": 0.29, + "learning_rate": 0.00047003999999999997, + "loss": 0.3777, + "step": 29500 + }, + { + "epoch": 0.3, + "learning_rate": 0.00046670666666666664, + "loss": 0.3776, + "step": 30000 + }, + { + "epoch": 0.3, + "learning_rate": 0.00046337999999999994, + "loss": 0.3778, + "step": 30500 + }, + { + "epoch": 0.31, + "learning_rate": 0.0004600466666666666, + "loss": 0.3778, + "step": 31000 + }, + { + "epoch": 0.32, + "learning_rate": 0.0004567133333333333, + "loss": 0.3776, + "step": 31500 + }, + { + "epoch": 0.32, + "learning_rate": 0.00045337999999999997, + "loss": 0.3778, + "step": 32000 + }, + { + "epoch": 0.33, + "learning_rate": 0.00045005333333333333, + "loss": 0.3777, + "step": 32500 + }, + { + "epoch": 0.33, + "learning_rate": 0.00044672, + "loss": 0.3777, + "step": 33000 + }, + { + "epoch": 0.34, + "learning_rate": 0.00044338666666666663, + "loss": 0.3776, + "step": 33500 + }, + { + "epoch": 0.34, + "learning_rate": 0.0004400533333333333, + "loss": 0.3777, + "step": 34000 + }, + { + "epoch": 0.34, + "learning_rate": 0.0004367266666666666, + "loss": 0.3776, + "step": 34500 + }, + { + "epoch": 0.35, + "learning_rate": 0.0004333933333333333, + "loss": 0.3777, + "step": 35000 + }, + { + "epoch": 0.35, + "learning_rate": 0.00043005999999999996, + "loss": 0.3775, + "step": 35500 + }, + { + "epoch": 0.36, + "learning_rate": 0.00042672666666666663, + "loss": 0.3776, + "step": 36000 + }, + { + "epoch": 0.36, + "learning_rate": 0.00042339999999999994, + "loss": 0.3776, + "step": 36500 + }, + { + "epoch": 0.37, + "learning_rate": 0.0004200666666666666, + "loss": 0.3776, + "step": 37000 + }, + { + "epoch": 0.38, + "learning_rate": 0.00041673333333333334, + "loss": 0.3777, + "step": 37500 + }, + { + "epoch": 0.38, + "learning_rate": 0.0004133999999999999, + "loss": 0.3777, + "step": 38000 + }, + { + "epoch": 0.39, + "learning_rate": 0.0004100733333333333, + "loss": 0.3775, + "step": 38500 + }, + { + "epoch": 0.39, + "learning_rate": 0.00040673999999999994, + "loss": 0.3777, + "step": 39000 + }, + { + "epoch": 0.4, + "learning_rate": 0.0004034066666666666, + "loss": 0.3776, + "step": 39500 + }, + { + "epoch": 0.4, + "learning_rate": 0.0004000733333333333, + "loss": 0.3776, + "step": 40000 + }, + { + "epoch": 0.41, + "learning_rate": 0.0003967466666666666, + "loss": 0.3776, + "step": 40500 + }, + { + "epoch": 0.41, + "learning_rate": 0.0003934133333333333, + "loss": 0.3776, + "step": 41000 + }, + { + "epoch": 0.41, + "learning_rate": 0.00039007999999999995, + "loss": 0.3776, + "step": 41500 + }, + { + "epoch": 0.42, + "learning_rate": 0.00038674666666666663, + "loss": 0.3775, + "step": 42000 + }, + { + "epoch": 0.42, + "learning_rate": 0.00038342, + "loss": 0.3776, + "step": 42500 + }, + { + "epoch": 0.43, + "learning_rate": 0.00038008666666666666, + "loss": 0.3775, + "step": 43000 + }, + { + "epoch": 0.43, + "learning_rate": 0.00037675333333333334, + "loss": 0.3777, + "step": 43500 + }, + { + "epoch": 0.44, + "learning_rate": 0.00037341999999999996, + "loss": 0.3776, + "step": 44000 + }, + { + "epoch": 0.45, + "learning_rate": 0.0003700933333333333, + "loss": 0.3775, + "step": 44500 + }, + { + "epoch": 0.45, + "learning_rate": 0.00036675999999999994, + "loss": 0.3775, + "step": 45000 + }, + { + "epoch": 0.46, + "learning_rate": 0.0003634266666666666, + "loss": 0.3776, + "step": 45500 + }, + { + "epoch": 0.46, + "learning_rate": 0.0003600933333333333, + "loss": 0.3776, + "step": 46000 + }, + { + "epoch": 0.47, + "learning_rate": 0.00035676666666666665, + "loss": 0.3775, + "step": 46500 + }, + { + "epoch": 0.47, + "learning_rate": 0.0003534333333333333, + "loss": 0.3777, + "step": 47000 + }, + { + "epoch": 0.47, + "learning_rate": 0.0003501, + "loss": 0.3776, + "step": 47500 + }, + { + "epoch": 0.48, + "learning_rate": 0.0003467666666666667, + "loss": 0.3774, + "step": 48000 + }, + { + "epoch": 0.48, + "learning_rate": 0.00034344, + "loss": 0.3776, + "step": 48500 + }, + { + "epoch": 0.49, + "learning_rate": 0.0003401133333333333, + "loss": 0.3776, + "step": 49000 + }, + { + "epoch": 0.49, + "learning_rate": 0.00033677999999999996, + "loss": 0.3775, + "step": 49500 + }, + { + "epoch": 0.5, + "learning_rate": 0.0003334466666666667, + "loss": 0.3776, + "step": 50000 + }, + { + "epoch": 0.51, + "learning_rate": 0.00033011333333333325, + "loss": 0.3776, + "step": 50500 + }, + { + "epoch": 0.51, + "learning_rate": 0.00032678666666666667, + "loss": 0.3776, + "step": 51000 + }, + { + "epoch": 0.52, + "learning_rate": 0.0003234533333333333, + "loss": 0.3775, + "step": 51500 + }, + { + "epoch": 0.52, + "learning_rate": 0.00032011999999999996, + "loss": 0.3776, + "step": 52000 + }, + { + "epoch": 0.53, + "learning_rate": 0.00031678666666666664, + "loss": 0.3776, + "step": 52500 + }, + { + "epoch": 0.53, + "learning_rate": 0.0003134533333333333, + "loss": 0.3776, + "step": 53000 + }, + { + "epoch": 0.54, + "learning_rate": 0.0003101266666666666, + "loss": 0.3775, + "step": 53500 + }, + { + "epoch": 0.54, + "learning_rate": 0.0003067933333333333, + "loss": 0.3777, + "step": 54000 + }, + { + "epoch": 0.55, + "learning_rate": 0.00030345999999999997, + "loss": 0.3775, + "step": 54500 + }, + { + "epoch": 0.55, + "learning_rate": 0.00030012666666666665, + "loss": 0.3776, + "step": 55000 + }, + { + "epoch": 0.56, + "learning_rate": 0.0002967933333333333, + "loss": 0.3775, + "step": 55500 + }, + { + "epoch": 0.56, + "learning_rate": 0.0002934666666666666, + "loss": 0.3776, + "step": 56000 + }, + { + "epoch": 0.56, + "learning_rate": 0.0002901333333333333, + "loss": 0.3776, + "step": 56500 + }, + { + "epoch": 0.57, + "learning_rate": 0.0002868, + "loss": 0.3774, + "step": 57000 + }, + { + "epoch": 0.57, + "learning_rate": 0.00028346666666666665, + "loss": 0.3776, + "step": 57500 + }, + { + "epoch": 0.58, + "learning_rate": 0.00028013999999999996, + "loss": 0.3775, + "step": 58000 + }, + { + "epoch": 0.58, + "learning_rate": 0.00027680666666666663, + "loss": 0.3777, + "step": 58500 + }, + { + "epoch": 0.59, + "learning_rate": 0.0002734733333333333, + "loss": 0.3775, + "step": 59000 + }, + { + "epoch": 0.59, + "learning_rate": 0.00027014, + "loss": 0.3775, + "step": 59500 + }, + { + "epoch": 0.6, + "learning_rate": 0.00026681333333333334, + "loss": 0.3776, + "step": 60000 + }, + { + "epoch": 0.6, + "learning_rate": 0.00026347999999999996, + "loss": 0.3775, + "step": 60500 + }, + { + "epoch": 0.61, + "learning_rate": 0.00026014666666666664, + "loss": 0.3776, + "step": 61000 + }, + { + "epoch": 0.61, + "learning_rate": 0.0002568133333333333, + "loss": 0.3776, + "step": 61500 + }, + { + "epoch": 0.62, + "learning_rate": 0.0002534866666666667, + "loss": 0.3774, + "step": 62000 + }, + { + "epoch": 0.62, + "learning_rate": 0.00025015333333333335, + "loss": 0.3776, + "step": 62500 + }, + { + "epoch": 0.63, + "learning_rate": 0.00024681999999999997, + "loss": 0.3776, + "step": 63000 + }, + { + "epoch": 0.64, + "learning_rate": 0.00024348666666666665, + "loss": 0.3774, + "step": 63500 + }, + { + "epoch": 0.64, + "learning_rate": 0.00024015999999999998, + "loss": 0.3775, + "step": 64000 + }, + { + "epoch": 0.65, + "learning_rate": 0.00023682666666666665, + "loss": 0.3775, + "step": 64500 + }, + { + "epoch": 0.65, + "learning_rate": 0.0002334933333333333, + "loss": 0.3775, + "step": 65000 + }, + { + "epoch": 0.66, + "learning_rate": 0.00023015999999999998, + "loss": 0.3774, + "step": 65500 + }, + { + "epoch": 0.66, + "learning_rate": 0.0002268333333333333, + "loss": 0.3774, + "step": 66000 + }, + { + "epoch": 0.67, + "learning_rate": 0.00022349999999999998, + "loss": 0.3775, + "step": 66500 + }, + { + "epoch": 0.67, + "learning_rate": 0.00022016666666666666, + "loss": 0.3775, + "step": 67000 + }, + { + "epoch": 0.68, + "learning_rate": 0.0002168333333333333, + "loss": 0.3775, + "step": 67500 + }, + { + "epoch": 0.68, + "learning_rate": 0.00021350666666666667, + "loss": 0.3775, + "step": 68000 + }, + { + "epoch": 0.69, + "learning_rate": 0.00021017333333333332, + "loss": 0.3776, + "step": 68500 + }, + { + "epoch": 0.69, + "learning_rate": 0.00020684, + "loss": 0.3774, + "step": 69000 + }, + { + "epoch": 0.69, + "learning_rate": 0.00020350666666666667, + "loss": 0.3773, + "step": 69500 + }, + { + "epoch": 0.7, + "learning_rate": 0.00020018, + "loss": 0.3775, + "step": 70000 + }, + { + "epoch": 0.7, + "learning_rate": 0.00019684666666666667, + "loss": 0.3776, + "step": 70500 + }, + { + "epoch": 0.71, + "learning_rate": 0.00019351333333333332, + "loss": 0.3775, + "step": 71000 + }, + { + "epoch": 0.71, + "learning_rate": 0.00019018, + "loss": 0.3775, + "step": 71500 + }, + { + "epoch": 0.72, + "learning_rate": 0.00018684666666666667, + "loss": 0.3775, + "step": 72000 + }, + { + "epoch": 0.72, + "learning_rate": 0.00018351999999999998, + "loss": 0.3776, + "step": 72500 + }, + { + "epoch": 0.73, + "learning_rate": 0.00018018666666666663, + "loss": 0.3774, + "step": 73000 + }, + { + "epoch": 0.73, + "learning_rate": 0.0001768533333333333, + "loss": 0.3776, + "step": 73500 + }, + { + "epoch": 0.74, + "learning_rate": 0.00017352, + "loss": 0.3776, + "step": 74000 + }, + { + "epoch": 0.74, + "learning_rate": 0.0001701933333333333, + "loss": 0.3775, + "step": 74500 + }, + { + "epoch": 0.75, + "learning_rate": 0.00016685999999999998, + "loss": 0.3775, + "step": 75000 + }, + { + "epoch": 0.76, + "learning_rate": 0.00016352666666666663, + "loss": 0.3776, + "step": 75500 + }, + { + "epoch": 0.76, + "learning_rate": 0.0001601933333333333, + "loss": 0.3775, + "step": 76000 + }, + { + "epoch": 0.77, + "learning_rate": 0.00015686666666666664, + "loss": 0.3775, + "step": 76500 + }, + { + "epoch": 0.77, + "learning_rate": 0.00015353333333333332, + "loss": 0.3775, + "step": 77000 + }, + { + "epoch": 0.78, + "learning_rate": 0.0001502, + "loss": 0.3775, + "step": 77500 + }, + { + "epoch": 0.78, + "learning_rate": 0.00014686666666666667, + "loss": 0.3774, + "step": 78000 + }, + { + "epoch": 0.79, + "learning_rate": 0.00014353999999999997, + "loss": 0.3774, + "step": 78500 + }, + { + "epoch": 0.79, + "learning_rate": 0.00014020666666666667, + "loss": 0.3775, + "step": 79000 + }, + { + "epoch": 0.8, + "learning_rate": 0.00013687333333333332, + "loss": 0.3775, + "step": 79500 + }, + { + "epoch": 0.8, + "learning_rate": 0.00013354, + "loss": 0.3775, + "step": 80000 + }, + { + "epoch": 0.81, + "learning_rate": 0.00013021333333333333, + "loss": 0.3776, + "step": 80500 + }, + { + "epoch": 0.81, + "learning_rate": 0.00012687999999999998, + "loss": 0.3776, + "step": 81000 + }, + { + "epoch": 0.81, + "learning_rate": 0.00012354666666666665, + "loss": 0.3776, + "step": 81500 + }, + { + "epoch": 0.82, + "learning_rate": 0.00012021333333333333, + "loss": 0.3775, + "step": 82000 + }, + { + "epoch": 0.82, + "learning_rate": 0.00011688666666666665, + "loss": 0.3774, + "step": 82500 + }, + { + "epoch": 0.83, + "learning_rate": 0.00011355333333333334, + "loss": 0.3775, + "step": 83000 + }, + { + "epoch": 0.83, + "learning_rate": 0.00011022, + "loss": 0.3775, + "step": 83500 + }, + { + "epoch": 0.84, + "learning_rate": 0.00010688666666666665, + "loss": 0.3774, + "step": 84000 + }, + { + "epoch": 0.84, + "learning_rate": 0.00010355999999999999, + "loss": 0.3775, + "step": 84500 + }, + { + "epoch": 0.85, + "learning_rate": 0.00010022666666666665, + "loss": 0.3775, + "step": 85000 + }, + { + "epoch": 0.85, + "learning_rate": 9.689333333333332e-05, + "loss": 0.3775, + "step": 85500 + }, + { + "epoch": 0.86, + "learning_rate": 9.355999999999999e-05, + "loss": 0.3773, + "step": 86000 + }, + { + "epoch": 0.86, + "learning_rate": 9.023333333333332e-05, + "loss": 0.3775, + "step": 86500 + }, + { + "epoch": 0.87, + "learning_rate": 8.69e-05, + "loss": 0.3775, + "step": 87000 + }, + { + "epoch": 0.88, + "learning_rate": 8.356666666666666e-05, + "loss": 0.3773, + "step": 87500 + }, + { + "epoch": 0.88, + "learning_rate": 8.023333333333332e-05, + "loss": 0.3775, + "step": 88000 + }, + { + "epoch": 0.89, + "learning_rate": 7.690666666666667e-05, + "loss": 0.3775, + "step": 88500 + }, + { + "epoch": 0.89, + "learning_rate": 7.357333333333333e-05, + "loss": 0.3776, + "step": 89000 + }, + { + "epoch": 0.9, + "learning_rate": 7.023999999999999e-05, + "loss": 0.3774, + "step": 89500 + }, + { + "epoch": 0.9, + "learning_rate": 6.690666666666667e-05, + "loss": 0.3776, + "step": 90000 + }, + { + "epoch": 0.91, + "learning_rate": 6.358e-05, + "loss": 0.3774, + "step": 90500 + }, + { + "epoch": 0.91, + "learning_rate": 6.024666666666667e-05, + "loss": 0.3775, + "step": 91000 + }, + { + "epoch": 0.92, + "learning_rate": 5.691333333333333e-05, + "loss": 0.3774, + "step": 91500 + }, + { + "epoch": 0.92, + "learning_rate": 5.358e-05, + "loss": 0.3775, + "step": 92000 + }, + { + "epoch": 0.93, + "learning_rate": 5.025333333333333e-05, + "loss": 0.3775, + "step": 92500 + }, + { + "epoch": 0.93, + "learning_rate": 4.692e-05, + "loss": 0.3773, + "step": 93000 + }, + { + "epoch": 0.94, + "learning_rate": 4.358666666666666e-05, + "loss": 0.3773, + "step": 93500 + }, + { + "epoch": 0.94, + "learning_rate": 4.025333333333333e-05, + "loss": 0.3774, + "step": 94000 + }, + { + "epoch": 0.94, + "learning_rate": 3.692666666666666e-05, + "loss": 0.3774, + "step": 94500 + }, + { + "epoch": 0.95, + "learning_rate": 3.359333333333333e-05, + "loss": 0.3775, + "step": 95000 + }, + { + "epoch": 0.95, + "learning_rate": 3.0259999999999995e-05, + "loss": 0.3774, + "step": 95500 + }, + { + "epoch": 0.96, + "learning_rate": 2.6926666666666667e-05, + "loss": 0.3774, + "step": 96000 + }, + { + "epoch": 0.96, + "learning_rate": 2.3599999999999998e-05, + "loss": 0.3774, + "step": 96500 + }, + { + "epoch": 0.97, + "learning_rate": 2.0266666666666663e-05, + "loss": 0.3775, + "step": 97000 + }, + { + "epoch": 0.97, + "learning_rate": 1.6933333333333333e-05, + "loss": 0.3774, + "step": 97500 + }, + { + "epoch": 0.98, + "learning_rate": 1.36e-05, + "loss": 0.3775, + "step": 98000 + }, + { + "epoch": 0.98, + "learning_rate": 1.0266666666666666e-05, + "loss": 0.3775, + "step": 98500 + }, + { + "epoch": 0.99, + "learning_rate": 6.939999999999999e-06, + "loss": 0.3774, + "step": 99000 + }, + { + "epoch": 0.99, + "learning_rate": 3.6066666666666663e-06, + "loss": 0.3774, + "step": 99500 + }, + { + "epoch": 1.0, + "learning_rate": 2.733333333333333e-07, + "loss": 0.3774, + "step": 100000 + } + ], + "max_steps": 100000, + "num_train_epochs": 1, + "total_flos": 4.005757407028838e+19, + "trial_name": null, + "trial_params": null +} diff --git a/propositional_logic/ln_alm/checkpoint-100000/training_args.bin b/propositional_logic/ln_alm/checkpoint-100000/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..6631b245d4a5c05f6a6b607e12052d7ab6dca82d --- /dev/null +++ b/propositional_logic/ln_alm/checkpoint-100000/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80979526bc93bd92f79329c7626a6e5643dd6e98c75ea67395c79b09f05795cf +size 2735 diff --git a/propositional_logic/ln_alm/config.json b/propositional_logic/ln_alm/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ff8809f9ee0000962c182321c304c5b09e84f1ba --- /dev/null +++ b/propositional_logic/ln_alm/config.json @@ -0,0 +1,37 @@ +{ + "activation_function": "gelu_new", + "architectures": [ + "GPT2LMHeadModel" + ], + "attn_pdrop": 0.1, + "bos_token_id": 50256, + "embd_pdrop": 0.1, + "eos_token_id": 50256, + "gradient_checkpointing": false, + "initializer_range": 0.02, + "layer_norm_epsilon": 1e-05, + "model_type": "gpt2", + "n_ctx": 1024, + "n_embd": 768, + "n_head": 12, + "n_inner": null, + "n_layer": 12, + "n_positions": 1024, + "resid_pdrop": 0.1, + "scale_attn_weights": true, + "summary_activation": null, + "summary_first_dropout": 0.1, + "summary_proj_to_labels": true, + "summary_type": "cls_index", + "summary_use_proj": true, + "task_specific_params": { + "text-generation": { + "do_sample": true, + "max_length": 50 + } + }, + "torch_dtype": "float32", + "transformers_version": "4.10.3", + "use_cache": true, + "vocab_size": 1200 +} diff --git a/propositional_logic/ln_alm/pytorch_model.bin b/propositional_logic/ln_alm/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..ea45f289651c6a19c5113875279faed7fc78683b --- /dev/null +++ b/propositional_logic/ln_alm/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d33019ba41adae198befff715674fa10743b9c2adff598ade2f1594a02b1a7a3 +size 359700713 diff --git a/propositional_logic/ln_alm/runs/Oct27_11-35-54_allennlp-cirrascale-01.reviz.ai2.in/1666977158.968696/events.out.tfevents.1666977158.allennlp-cirrascale-01.reviz.ai2.in.1236.1 b/propositional_logic/ln_alm/runs/Oct27_11-35-54_allennlp-cirrascale-01.reviz.ai2.in/1666977158.968696/events.out.tfevents.1666977158.allennlp-cirrascale-01.reviz.ai2.in.1236.1 new file mode 100644 index 0000000000000000000000000000000000000000..d27e5f803d7ca36c8dcca432b70c6fda58cc52db --- /dev/null +++ b/propositional_logic/ln_alm/runs/Oct27_11-35-54_allennlp-cirrascale-01.reviz.ai2.in/1666977158.968696/events.out.tfevents.1666977158.allennlp-cirrascale-01.reviz.ai2.in.1236.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1abb58bb59be5dafa13472e41f33e99947caba071ecead163de80ea12333d7f +size 4463 diff --git a/propositional_logic/ln_alm/runs/Oct27_11-35-54_allennlp-cirrascale-01.reviz.ai2.in/events.out.tfevents.1666977153.allennlp-cirrascale-01.reviz.ai2.in.1236.0 b/propositional_logic/ln_alm/runs/Oct27_11-35-54_allennlp-cirrascale-01.reviz.ai2.in/events.out.tfevents.1666977153.allennlp-cirrascale-01.reviz.ai2.in.1236.0 new file mode 100644 index 0000000000000000000000000000000000000000..ae4bc9a5d1f737fe8d5bab13b58b2b9bf0b27dfa --- /dev/null +++ b/propositional_logic/ln_alm/runs/Oct27_11-35-54_allennlp-cirrascale-01.reviz.ai2.in/events.out.tfevents.1666977153.allennlp-cirrascale-01.reviz.ai2.in.1236.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e24f78669645c7fa6d59835daf009e81b4f61ccaab0a5bc542abef1af3192bd4 +size 35661 diff --git a/propositional_logic/ln_alm/training_args.bin b/propositional_logic/ln_alm/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..6631b245d4a5c05f6a6b607e12052d7ab6dca82d --- /dev/null +++ b/propositional_logic/ln_alm/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80979526bc93bd92f79329c7626a6e5643dd6e98c75ea67395c79b09f05795cf +size 2735 diff --git a/propositional_logic/ln_mlm/checkpoint-100000/config.json b/propositional_logic/ln_mlm/checkpoint-100000/config.json new file mode 100644 index 0000000000000000000000000000000000000000..102fa0515a2e7f9a348f86a1dcb7d135d6cfa8c6 --- /dev/null +++ b/propositional_logic/ln_mlm/checkpoint-100000/config.json @@ -0,0 +1,27 @@ +{ + "architectures": [ + "RobertaForMaskedLM" + ], + "attention_probs_dropout_prob": 0.1, + "bos_token_id": 0, + "classifier_dropout": null, + "eos_token_id": 2, + "gradient_checkpointing": false, + "hidden_act": "gelu", + "hidden_dropout_prob": 0.1, + "hidden_size": 768, + "initializer_range": 0.02, + "intermediate_size": 3072, + "layer_norm_eps": 1e-05, + "max_position_embeddings": 514, + "model_type": "roberta", + "num_attention_heads": 12, + "num_hidden_layers": 12, + "pad_token_id": 1, + "position_embedding_type": "absolute", + "torch_dtype": "float32", + "transformers_version": "4.10.3", + "type_vocab_size": 1, + "use_cache": true, + "vocab_size": 1200 +} diff --git a/propositional_logic/ln_mlm/checkpoint-100000/optimizer.pt b/propositional_logic/ln_mlm/checkpoint-100000/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..ad38cf46618ea8f7fa2258f77c46273c97b0d285 --- /dev/null +++ b/propositional_logic/ln_mlm/checkpoint-100000/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df9d8863f266529665ce1bd08fc457ddae23f94a274a2c830e9b516ba0ad138d +size 695849113 diff --git a/propositional_logic/ln_mlm/checkpoint-100000/pytorch_model.bin b/propositional_logic/ln_mlm/checkpoint-100000/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..1126259f6fcb441966008604c9c135dab2d2574a --- /dev/null +++ b/propositional_logic/ln_mlm/checkpoint-100000/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:810a13b278b8bdcc7a273dde004d8b35700c28c3c143a425ff66da9ecf1c9740 +size 347937771 diff --git a/propositional_logic/ln_mlm/checkpoint-100000/rng_state.pth b/propositional_logic/ln_mlm/checkpoint-100000/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..1a0052329f405f8d16b98093adef52f7ec5fc386 --- /dev/null +++ b/propositional_logic/ln_mlm/checkpoint-100000/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:275268d12ae096293e6c66801629ae22ecc01c39d8a1be7750d5de7ef54cf04f +size 14503 diff --git a/propositional_logic/ln_mlm/checkpoint-100000/scaler.pt b/propositional_logic/ln_mlm/checkpoint-100000/scaler.pt new file mode 100644 index 0000000000000000000000000000000000000000..18973ed63ba788903f022db6a1421dacb199f986 --- /dev/null +++ b/propositional_logic/ln_mlm/checkpoint-100000/scaler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9d1f280998184f253723b01066537d6427ea14f757cd61741edd038bbb20496 +size 559 diff --git a/propositional_logic/ln_mlm/checkpoint-100000/scheduler.pt b/propositional_logic/ln_mlm/checkpoint-100000/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..f9c53104ef99d6b0bf7a8783ed9f1b0b713b1f32 --- /dev/null +++ b/propositional_logic/ln_mlm/checkpoint-100000/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52aa869af3168494a9663ed52b1edb8f050b60bc9e3263f9e675f8db24049470 +size 623 diff --git a/propositional_logic/ln_mlm/checkpoint-100000/trainer_state.json b/propositional_logic/ln_mlm/checkpoint-100000/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..0547a42f58cdf41a4b0ea5f4385ead028d6ac99c --- /dev/null +++ b/propositional_logic/ln_mlm/checkpoint-100000/trainer_state.json @@ -0,0 +1,1216 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 1.0, + "global_step": 100000, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.01, + "learning_rate": 2.9999999999999997e-05, + "loss": 2.1616, + "step": 500 + }, + { + "epoch": 0.01, + "learning_rate": 5.9999999999999995e-05, + "loss": 1.5106, + "step": 1000 + }, + { + "epoch": 0.01, + "learning_rate": 8.999999999999999e-05, + "loss": 1.478, + "step": 1500 + }, + { + "epoch": 0.02, + "learning_rate": 0.00011999999999999999, + "loss": 1.3936, + "step": 2000 + }, + { + "epoch": 0.03, + "learning_rate": 0.00015, + "loss": 1.3072, + "step": 2500 + }, + { + "epoch": 0.03, + "learning_rate": 0.00017999999999999998, + "loss": 1.2877, + "step": 3000 + }, + { + "epoch": 0.04, + "learning_rate": 0.00020999999999999998, + "loss": 1.2655, + "step": 3500 + }, + { + "epoch": 0.04, + "learning_rate": 0.00023999999999999998, + "loss": 1.2326, + "step": 4000 + }, + { + "epoch": 0.04, + "learning_rate": 0.00027, + "loss": 1.2196, + "step": 4500 + }, + { + "epoch": 0.05, + "learning_rate": 0.0003, + "loss": 1.2138, + "step": 5000 + }, + { + "epoch": 0.06, + "learning_rate": 0.00033, + "loss": 1.21, + "step": 5500 + }, + { + "epoch": 0.06, + "learning_rate": 0.00035999999999999997, + "loss": 1.2076, + "step": 6000 + }, + { + "epoch": 0.07, + "learning_rate": 0.00039, + "loss": 1.2057, + "step": 6500 + }, + { + "epoch": 0.07, + "learning_rate": 0.00041999999999999996, + "loss": 1.2044, + "step": 7000 + }, + { + "epoch": 0.07, + "learning_rate": 0.00045, + "loss": 1.203, + "step": 7500 + }, + { + "epoch": 0.08, + "learning_rate": 0.00047999999999999996, + "loss": 1.2026, + "step": 8000 + }, + { + "epoch": 0.09, + "learning_rate": 0.0005099999999999999, + "loss": 1.2016, + "step": 8500 + }, + { + "epoch": 0.09, + "learning_rate": 0.00054, + "loss": 1.2018, + "step": 9000 + }, + { + "epoch": 0.1, + "learning_rate": 0.00057, + "loss": 1.2005, + "step": 9500 + }, + { + "epoch": 0.1, + "learning_rate": 0.0006, + "loss": 1.1998, + "step": 10000 + }, + { + "epoch": 0.1, + "learning_rate": 0.0005966666666666667, + "loss": 1.201, + "step": 10500 + }, + { + "epoch": 0.11, + "learning_rate": 0.0005933333333333333, + "loss": 1.1983, + "step": 11000 + }, + { + "epoch": 0.12, + "learning_rate": 0.0005899999999999999, + "loss": 1.1981, + "step": 11500 + }, + { + "epoch": 0.12, + "learning_rate": 0.0005866666666666665, + "loss": 1.198, + "step": 12000 + }, + { + "epoch": 0.12, + "learning_rate": 0.0005833333333333333, + "loss": 1.1963, + "step": 12500 + }, + { + "epoch": 0.13, + "learning_rate": 0.00058, + "loss": 1.1961, + "step": 13000 + }, + { + "epoch": 0.14, + "learning_rate": 0.0005766666666666666, + "loss": 1.1953, + "step": 13500 + }, + { + "epoch": 0.14, + "learning_rate": 0.00057334, + "loss": 1.1958, + "step": 14000 + }, + { + "epoch": 0.14, + "learning_rate": 0.0005700066666666666, + "loss": 1.1948, + "step": 14500 + }, + { + "epoch": 0.15, + "learning_rate": 0.0005666733333333333, + "loss": 1.1942, + "step": 15000 + }, + { + "epoch": 0.15, + "learning_rate": 0.00056334, + "loss": 1.1937, + "step": 15500 + }, + { + "epoch": 0.16, + "learning_rate": 0.0005600066666666666, + "loss": 1.1933, + "step": 16000 + }, + { + "epoch": 0.17, + "learning_rate": 0.0005566733333333333, + "loss": 1.1934, + "step": 16500 + }, + { + "epoch": 0.17, + "learning_rate": 0.0005533399999999999, + "loss": 1.193, + "step": 17000 + }, + { + "epoch": 0.17, + "learning_rate": 0.0005500066666666667, + "loss": 1.193, + "step": 17500 + }, + { + "epoch": 0.18, + "learning_rate": 0.0005466733333333333, + "loss": 1.1925, + "step": 18000 + }, + { + "epoch": 0.18, + "learning_rate": 0.0005433399999999999, + "loss": 1.1927, + "step": 18500 + }, + { + "epoch": 0.19, + "learning_rate": 0.0005400133333333333, + "loss": 1.1928, + "step": 19000 + }, + { + "epoch": 0.2, + "learning_rate": 0.0005366799999999999, + "loss": 1.192, + "step": 19500 + }, + { + "epoch": 0.2, + "learning_rate": 0.0005333466666666666, + "loss": 1.1928, + "step": 20000 + }, + { + "epoch": 0.2, + "learning_rate": 0.0005300133333333333, + "loss": 1.1916, + "step": 20500 + }, + { + "epoch": 0.21, + "learning_rate": 0.00052668, + "loss": 1.1924, + "step": 21000 + }, + { + "epoch": 0.21, + "learning_rate": 0.0005233466666666667, + "loss": 1.1916, + "step": 21500 + }, + { + "epoch": 0.22, + "learning_rate": 0.0005200199999999999, + "loss": 1.1939, + "step": 22000 + }, + { + "epoch": 0.23, + "learning_rate": 0.0005166866666666667, + "loss": 1.1912, + "step": 22500 + }, + { + "epoch": 0.23, + "learning_rate": 0.0005133533333333333, + "loss": 1.1921, + "step": 23000 + }, + { + "epoch": 0.23, + "learning_rate": 0.0005100199999999999, + "loss": 1.1908, + "step": 23500 + }, + { + "epoch": 0.24, + "learning_rate": 0.0005066933333333333, + "loss": 1.1917, + "step": 24000 + }, + { + "epoch": 0.24, + "learning_rate": 0.0005033599999999999, + "loss": 1.1912, + "step": 24500 + }, + { + "epoch": 0.25, + "learning_rate": 0.0005000266666666666, + "loss": 1.1906, + "step": 25000 + }, + { + "epoch": 0.26, + "learning_rate": 0.0004966933333333332, + "loss": 1.1906, + "step": 25500 + }, + { + "epoch": 0.26, + "learning_rate": 0.00049336, + "loss": 1.1905, + "step": 26000 + }, + { + "epoch": 0.27, + "learning_rate": 0.0004900333333333333, + "loss": 1.191, + "step": 26500 + }, + { + "epoch": 0.27, + "learning_rate": 0.00048669999999999996, + "loss": 1.1902, + "step": 27000 + }, + { + "epoch": 0.28, + "learning_rate": 0.0004833666666666666, + "loss": 1.19, + "step": 27500 + }, + { + "epoch": 0.28, + "learning_rate": 0.00048003333333333326, + "loss": 1.1906, + "step": 28000 + }, + { + "epoch": 0.28, + "learning_rate": 0.00047669999999999993, + "loss": 1.1905, + "step": 28500 + }, + { + "epoch": 0.29, + "learning_rate": 0.0004733666666666666, + "loss": 1.1903, + "step": 29000 + }, + { + "epoch": 0.29, + "learning_rate": 0.0004700333333333333, + "loss": 1.1899, + "step": 29500 + }, + { + "epoch": 0.3, + "learning_rate": 0.00046670666666666664, + "loss": 1.1904, + "step": 30000 + }, + { + "epoch": 0.3, + "learning_rate": 0.0004633733333333333, + "loss": 1.1898, + "step": 30500 + }, + { + "epoch": 0.31, + "learning_rate": 0.00046004, + "loss": 1.1908, + "step": 31000 + }, + { + "epoch": 0.32, + "learning_rate": 0.0004567066666666666, + "loss": 1.1902, + "step": 31500 + }, + { + "epoch": 0.32, + "learning_rate": 0.0004533733333333333, + "loss": 1.1899, + "step": 32000 + }, + { + "epoch": 0.33, + "learning_rate": 0.0004500466666666666, + "loss": 1.1898, + "step": 32500 + }, + { + "epoch": 0.33, + "learning_rate": 0.00044671333333333327, + "loss": 1.1895, + "step": 33000 + }, + { + "epoch": 0.34, + "learning_rate": 0.00044337999999999995, + "loss": 1.1897, + "step": 33500 + }, + { + "epoch": 0.34, + "learning_rate": 0.0004400466666666666, + "loss": 1.1892, + "step": 34000 + }, + { + "epoch": 0.34, + "learning_rate": 0.0004367133333333333, + "loss": 1.1894, + "step": 34500 + }, + { + "epoch": 0.35, + "learning_rate": 0.00043338, + "loss": 1.1893, + "step": 35000 + }, + { + "epoch": 0.35, + "learning_rate": 0.00043005333333333333, + "loss": 1.1895, + "step": 35500 + }, + { + "epoch": 0.36, + "learning_rate": 0.00042672, + "loss": 1.1894, + "step": 36000 + }, + { + "epoch": 0.36, + "learning_rate": 0.00042338666666666663, + "loss": 1.1896, + "step": 36500 + }, + { + "epoch": 0.37, + "learning_rate": 0.0004200533333333333, + "loss": 1.1891, + "step": 37000 + }, + { + "epoch": 0.38, + "learning_rate": 0.00041672, + "loss": 1.1891, + "step": 37500 + }, + { + "epoch": 0.38, + "learning_rate": 0.0004133933333333333, + "loss": 1.1889, + "step": 38000 + }, + { + "epoch": 0.39, + "learning_rate": 0.00041005999999999996, + "loss": 1.189, + "step": 38500 + }, + { + "epoch": 0.39, + "learning_rate": 0.00040672666666666664, + "loss": 1.1893, + "step": 39000 + }, + { + "epoch": 0.4, + "learning_rate": 0.0004033933333333333, + "loss": 1.1889, + "step": 39500 + }, + { + "epoch": 0.4, + "learning_rate": 0.00040005999999999993, + "loss": 1.1887, + "step": 40000 + }, + { + "epoch": 0.41, + "learning_rate": 0.0003967266666666666, + "loss": 1.1885, + "step": 40500 + }, + { + "epoch": 0.41, + "learning_rate": 0.0003933999999999999, + "loss": 1.1889, + "step": 41000 + }, + { + "epoch": 0.41, + "learning_rate": 0.00039006666666666664, + "loss": 1.1885, + "step": 41500 + }, + { + "epoch": 0.42, + "learning_rate": 0.0003867333333333333, + "loss": 1.1889, + "step": 42000 + }, + { + "epoch": 0.42, + "learning_rate": 0.0003834, + "loss": 1.1888, + "step": 42500 + }, + { + "epoch": 0.43, + "learning_rate": 0.00038006666666666667, + "loss": 1.1889, + "step": 43000 + }, + { + "epoch": 0.43, + "learning_rate": 0.00037673333333333335, + "loss": 1.1883, + "step": 43500 + }, + { + "epoch": 0.44, + "learning_rate": 0.00037339999999999997, + "loss": 1.1883, + "step": 44000 + }, + { + "epoch": 0.45, + "learning_rate": 0.00037006666666666664, + "loss": 1.1886, + "step": 44500 + }, + { + "epoch": 0.45, + "learning_rate": 0.00036674666666666663, + "loss": 1.1882, + "step": 45000 + }, + { + "epoch": 0.46, + "learning_rate": 0.00036341333333333336, + "loss": 1.1881, + "step": 45500 + }, + { + "epoch": 0.46, + "learning_rate": 0.0003600799999999999, + "loss": 1.1879, + "step": 46000 + }, + { + "epoch": 0.47, + "learning_rate": 0.0003567466666666666, + "loss": 1.1882, + "step": 46500 + }, + { + "epoch": 0.47, + "learning_rate": 0.0003534133333333333, + "loss": 1.1883, + "step": 47000 + }, + { + "epoch": 0.47, + "learning_rate": 0.00035008, + "loss": 1.188, + "step": 47500 + }, + { + "epoch": 0.48, + "learning_rate": 0.0003467466666666667, + "loss": 1.1883, + "step": 48000 + }, + { + "epoch": 0.48, + "learning_rate": 0.00034342, + "loss": 1.1881, + "step": 48500 + }, + { + "epoch": 0.49, + "learning_rate": 0.00034008666666666666, + "loss": 1.1876, + "step": 49000 + }, + { + "epoch": 0.49, + "learning_rate": 0.0003367533333333333, + "loss": 1.1889, + "step": 49500 + }, + { + "epoch": 0.5, + "learning_rate": 0.00033341999999999996, + "loss": 1.1881, + "step": 50000 + }, + { + "epoch": 0.51, + "learning_rate": 0.00033008666666666664, + "loss": 1.1879, + "step": 50500 + }, + { + "epoch": 0.51, + "learning_rate": 0.0003267533333333333, + "loss": 1.1881, + "step": 51000 + }, + { + "epoch": 0.52, + "learning_rate": 0.00032342, + "loss": 1.1878, + "step": 51500 + }, + { + "epoch": 0.52, + "learning_rate": 0.00032008666666666667, + "loss": 1.1877, + "step": 52000 + }, + { + "epoch": 0.53, + "learning_rate": 0.00031675999999999997, + "loss": 1.1877, + "step": 52500 + }, + { + "epoch": 0.53, + "learning_rate": 0.0003134333333333333, + "loss": 1.1879, + "step": 53000 + }, + { + "epoch": 0.54, + "learning_rate": 0.0003101, + "loss": 1.1877, + "step": 53500 + }, + { + "epoch": 0.54, + "learning_rate": 0.0003067666666666667, + "loss": 1.1875, + "step": 54000 + }, + { + "epoch": 0.55, + "learning_rate": 0.0003034333333333333, + "loss": 1.1873, + "step": 54500 + }, + { + "epoch": 0.55, + "learning_rate": 0.0003001, + "loss": 1.1873, + "step": 55000 + }, + { + "epoch": 0.56, + "learning_rate": 0.00029677333333333333, + "loss": 1.1877, + "step": 55500 + }, + { + "epoch": 0.56, + "learning_rate": 0.00029343999999999996, + "loss": 1.1876, + "step": 56000 + }, + { + "epoch": 0.56, + "learning_rate": 0.00029010666666666663, + "loss": 1.1874, + "step": 56500 + }, + { + "epoch": 0.57, + "learning_rate": 0.0002867733333333333, + "loss": 1.1874, + "step": 57000 + }, + { + "epoch": 0.57, + "learning_rate": 0.00028344, + "loss": 1.1871, + "step": 57500 + }, + { + "epoch": 0.58, + "learning_rate": 0.00028010666666666666, + "loss": 1.1873, + "step": 58000 + }, + { + "epoch": 0.58, + "learning_rate": 0.00027677999999999996, + "loss": 1.1871, + "step": 58500 + }, + { + "epoch": 0.59, + "learning_rate": 0.00027344666666666664, + "loss": 1.1874, + "step": 59000 + }, + { + "epoch": 0.59, + "learning_rate": 0.0002701133333333333, + "loss": 1.1873, + "step": 59500 + }, + { + "epoch": 0.6, + "learning_rate": 0.00026678, + "loss": 1.1877, + "step": 60000 + }, + { + "epoch": 0.6, + "learning_rate": 0.00026344666666666667, + "loss": 1.1874, + "step": 60500 + }, + { + "epoch": 0.61, + "learning_rate": 0.0002601133333333333, + "loss": 1.1869, + "step": 61000 + }, + { + "epoch": 0.61, + "learning_rate": 0.00025677999999999996, + "loss": 1.1873, + "step": 61500 + }, + { + "epoch": 0.62, + "learning_rate": 0.0002534533333333333, + "loss": 1.1873, + "step": 62000 + }, + { + "epoch": 0.62, + "learning_rate": 0.00025012, + "loss": 1.1872, + "step": 62500 + }, + { + "epoch": 0.63, + "learning_rate": 0.0002467866666666667, + "loss": 1.1871, + "step": 63000 + }, + { + "epoch": 0.64, + "learning_rate": 0.0002434533333333333, + "loss": 1.1871, + "step": 63500 + }, + { + "epoch": 0.64, + "learning_rate": 0.00024011999999999997, + "loss": 1.1866, + "step": 64000 + }, + { + "epoch": 0.65, + "learning_rate": 0.00023678666666666665, + "loss": 1.187, + "step": 64500 + }, + { + "epoch": 0.65, + "learning_rate": 0.0002334533333333333, + "loss": 1.1868, + "step": 65000 + }, + { + "epoch": 0.66, + "learning_rate": 0.00023011999999999997, + "loss": 1.1868, + "step": 65500 + }, + { + "epoch": 0.66, + "learning_rate": 0.0002267933333333333, + "loss": 1.1868, + "step": 66000 + }, + { + "epoch": 0.67, + "learning_rate": 0.00022345999999999998, + "loss": 1.1868, + "step": 66500 + }, + { + "epoch": 0.67, + "learning_rate": 0.00022012666666666665, + "loss": 1.1871, + "step": 67000 + }, + { + "epoch": 0.68, + "learning_rate": 0.0002167933333333333, + "loss": 1.1865, + "step": 67500 + }, + { + "epoch": 0.68, + "learning_rate": 0.00021346666666666666, + "loss": 1.1865, + "step": 68000 + }, + { + "epoch": 0.69, + "learning_rate": 0.0002101333333333333, + "loss": 1.187, + "step": 68500 + }, + { + "epoch": 0.69, + "learning_rate": 0.00020679999999999999, + "loss": 1.1869, + "step": 69000 + }, + { + "epoch": 0.69, + "learning_rate": 0.00020346666666666663, + "loss": 1.1866, + "step": 69500 + }, + { + "epoch": 0.7, + "learning_rate": 0.00020014, + "loss": 1.1865, + "step": 70000 + }, + { + "epoch": 0.7, + "learning_rate": 0.00019680666666666664, + "loss": 1.1865, + "step": 70500 + }, + { + "epoch": 0.71, + "learning_rate": 0.00019347333333333332, + "loss": 1.1867, + "step": 71000 + }, + { + "epoch": 0.71, + "learning_rate": 0.00019014, + "loss": 1.1866, + "step": 71500 + }, + { + "epoch": 0.72, + "learning_rate": 0.00018681333333333332, + "loss": 1.1866, + "step": 72000 + }, + { + "epoch": 0.72, + "learning_rate": 0.00018348, + "loss": 1.1867, + "step": 72500 + }, + { + "epoch": 0.73, + "learning_rate": 0.00018014666666666665, + "loss": 1.1864, + "step": 73000 + }, + { + "epoch": 0.73, + "learning_rate": 0.00017682, + "loss": 1.1865, + "step": 73500 + }, + { + "epoch": 0.74, + "learning_rate": 0.00017348666666666665, + "loss": 1.1865, + "step": 74000 + }, + { + "epoch": 0.74, + "learning_rate": 0.00017015333333333333, + "loss": 1.1866, + "step": 74500 + }, + { + "epoch": 0.75, + "learning_rate": 0.00016682, + "loss": 1.1864, + "step": 75000 + }, + { + "epoch": 0.76, + "learning_rate": 0.00016348666666666665, + "loss": 1.1864, + "step": 75500 + }, + { + "epoch": 0.76, + "learning_rate": 0.00016015333333333333, + "loss": 1.1864, + "step": 76000 + }, + { + "epoch": 0.77, + "learning_rate": 0.00015682, + "loss": 1.1861, + "step": 76500 + }, + { + "epoch": 0.77, + "learning_rate": 0.00015348666666666666, + "loss": 1.1863, + "step": 77000 + }, + { + "epoch": 0.78, + "learning_rate": 0.00015016, + "loss": 1.1864, + "step": 77500 + }, + { + "epoch": 0.78, + "learning_rate": 0.00014682666666666666, + "loss": 1.186, + "step": 78000 + }, + { + "epoch": 0.79, + "learning_rate": 0.00014349333333333334, + "loss": 1.1862, + "step": 78500 + }, + { + "epoch": 0.79, + "learning_rate": 0.00014015999999999999, + "loss": 1.1862, + "step": 79000 + }, + { + "epoch": 0.8, + "learning_rate": 0.00013683333333333332, + "loss": 1.1861, + "step": 79500 + }, + { + "epoch": 0.8, + "learning_rate": 0.0001335, + "loss": 1.186, + "step": 80000 + }, + { + "epoch": 0.81, + "learning_rate": 0.00013016666666666664, + "loss": 1.1859, + "step": 80500 + }, + { + "epoch": 0.81, + "learning_rate": 0.00012684, + "loss": 1.1864, + "step": 81000 + }, + { + "epoch": 0.81, + "learning_rate": 0.00012350666666666665, + "loss": 1.1858, + "step": 81500 + }, + { + "epoch": 0.82, + "learning_rate": 0.00012017333333333331, + "loss": 1.186, + "step": 82000 + }, + { + "epoch": 0.82, + "learning_rate": 0.00011684, + "loss": 1.1857, + "step": 82500 + }, + { + "epoch": 0.83, + "learning_rate": 0.00011350666666666666, + "loss": 1.1859, + "step": 83000 + }, + { + "epoch": 0.83, + "learning_rate": 0.00011017333333333332, + "loss": 1.1856, + "step": 83500 + }, + { + "epoch": 0.84, + "learning_rate": 0.00010684, + "loss": 1.1858, + "step": 84000 + }, + { + "epoch": 0.84, + "learning_rate": 0.00010350666666666666, + "loss": 1.1858, + "step": 84500 + }, + { + "epoch": 0.85, + "learning_rate": 0.00010017999999999999, + "loss": 1.1856, + "step": 85000 + }, + { + "epoch": 0.85, + "learning_rate": 9.684666666666666e-05, + "loss": 1.1857, + "step": 85500 + }, + { + "epoch": 0.86, + "learning_rate": 9.351333333333332e-05, + "loss": 1.1857, + "step": 86000 + }, + { + "epoch": 0.86, + "learning_rate": 9.017999999999998e-05, + "loss": 1.1858, + "step": 86500 + }, + { + "epoch": 0.87, + "learning_rate": 8.685333333333332e-05, + "loss": 1.1855, + "step": 87000 + }, + { + "epoch": 0.88, + "learning_rate": 8.351999999999999e-05, + "loss": 1.1854, + "step": 87500 + }, + { + "epoch": 0.88, + "learning_rate": 8.018666666666666e-05, + "loss": 1.1859, + "step": 88000 + }, + { + "epoch": 0.89, + "learning_rate": 7.685333333333332e-05, + "loss": 1.1859, + "step": 88500 + }, + { + "epoch": 0.89, + "learning_rate": 7.352666666666666e-05, + "loss": 1.1856, + "step": 89000 + }, + { + "epoch": 0.9, + "learning_rate": 7.019333333333333e-05, + "loss": 1.1857, + "step": 89500 + }, + { + "epoch": 0.9, + "learning_rate": 6.686e-05, + "loss": 1.1858, + "step": 90000 + }, + { + "epoch": 0.91, + "learning_rate": 6.352666666666666e-05, + "loss": 1.1855, + "step": 90500 + }, + { + "epoch": 0.91, + "learning_rate": 6.019999999999999e-05, + "loss": 1.1852, + "step": 91000 + }, + { + "epoch": 0.92, + "learning_rate": 5.686666666666666e-05, + "loss": 1.1855, + "step": 91500 + }, + { + "epoch": 0.92, + "learning_rate": 5.3533333333333324e-05, + "loss": 1.1854, + "step": 92000 + }, + { + "epoch": 0.93, + "learning_rate": 5.0199999999999994e-05, + "loss": 1.1853, + "step": 92500 + }, + { + "epoch": 0.93, + "learning_rate": 4.6873333333333324e-05, + "loss": 1.1856, + "step": 93000 + }, + { + "epoch": 0.94, + "learning_rate": 4.354e-05, + "loss": 1.1856, + "step": 93500 + }, + { + "epoch": 0.94, + "learning_rate": 4.020666666666666e-05, + "loss": 1.1852, + "step": 94000 + }, + { + "epoch": 0.94, + "learning_rate": 3.687333333333333e-05, + "loss": 1.1853, + "step": 94500 + }, + { + "epoch": 0.95, + "learning_rate": 3.354666666666666e-05, + "loss": 1.1854, + "step": 95000 + }, + { + "epoch": 0.95, + "learning_rate": 3.021333333333333e-05, + "loss": 1.1853, + "step": 95500 + }, + { + "epoch": 0.96, + "learning_rate": 2.6879999999999997e-05, + "loss": 1.1854, + "step": 96000 + }, + { + "epoch": 0.96, + "learning_rate": 2.3546666666666663e-05, + "loss": 1.1854, + "step": 96500 + }, + { + "epoch": 0.97, + "learning_rate": 2.022e-05, + "loss": 1.1854, + "step": 97000 + }, + { + "epoch": 0.97, + "learning_rate": 1.6886666666666665e-05, + "loss": 1.1852, + "step": 97500 + }, + { + "epoch": 0.98, + "learning_rate": 1.3553333333333331e-05, + "loss": 1.1851, + "step": 98000 + }, + { + "epoch": 0.98, + "learning_rate": 1.022e-05, + "loss": 1.1853, + "step": 98500 + }, + { + "epoch": 0.99, + "learning_rate": 6.893333333333332e-06, + "loss": 1.1851, + "step": 99000 + }, + { + "epoch": 0.99, + "learning_rate": 3.5599999999999994e-06, + "loss": 1.1851, + "step": 99500 + }, + { + "epoch": 1.0, + "learning_rate": 2.2666666666666663e-07, + "loss": 1.1852, + "step": 100000 + } + ], + "max_steps": 100000, + "num_train_epochs": 1, + "total_flos": 4.033700503703942e+19, + "trial_name": null, + "trial_params": null +} diff --git a/propositional_logic/ln_mlm/checkpoint-100000/training_args.bin b/propositional_logic/ln_mlm/checkpoint-100000/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..8d6510e69f7f4b4e104614b057ec57a7a631e373 --- /dev/null +++ b/propositional_logic/ln_mlm/checkpoint-100000/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f21b6f934872fd1ff44db7303058ff9e1201e5175891842ccc1d594dd226d3ae +size 2799 diff --git a/propositional_logic/ln_mlm/config.json b/propositional_logic/ln_mlm/config.json new file mode 100644 index 0000000000000000000000000000000000000000..102fa0515a2e7f9a348f86a1dcb7d135d6cfa8c6 --- /dev/null +++ b/propositional_logic/ln_mlm/config.json @@ -0,0 +1,27 @@ +{ + "architectures": [ + "RobertaForMaskedLM" + ], + "attention_probs_dropout_prob": 0.1, + "bos_token_id": 0, + "classifier_dropout": null, + "eos_token_id": 2, + "gradient_checkpointing": false, + "hidden_act": "gelu", + "hidden_dropout_prob": 0.1, + "hidden_size": 768, + "initializer_range": 0.02, + "intermediate_size": 3072, + "layer_norm_eps": 1e-05, + "max_position_embeddings": 514, + "model_type": "roberta", + "num_attention_heads": 12, + "num_hidden_layers": 12, + "pad_token_id": 1, + "position_embedding_type": "absolute", + "torch_dtype": "float32", + "transformers_version": "4.10.3", + "type_vocab_size": 1, + "use_cache": true, + "vocab_size": 1200 +} diff --git a/propositional_logic/ln_mlm/pytorch_model.bin b/propositional_logic/ln_mlm/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..1126259f6fcb441966008604c9c135dab2d2574a --- /dev/null +++ b/propositional_logic/ln_mlm/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:810a13b278b8bdcc7a273dde004d8b35700c28c3c143a425ff66da9ecf1c9740 +size 347937771 diff --git a/propositional_logic/ln_mlm/runs/Oct27_11-38-51_allennlp-cirrascale-01.reviz.ai2.in/1666978568.2974002/events.out.tfevents.1666978568.allennlp-cirrascale-01.reviz.ai2.in.1011.1 b/propositional_logic/ln_mlm/runs/Oct27_11-38-51_allennlp-cirrascale-01.reviz.ai2.in/1666978568.2974002/events.out.tfevents.1666978568.allennlp-cirrascale-01.reviz.ai2.in.1011.1 new file mode 100644 index 0000000000000000000000000000000000000000..4db72cc8d40be9fccc8b8a50501d3e1887128d75 --- /dev/null +++ b/propositional_logic/ln_mlm/runs/Oct27_11-38-51_allennlp-cirrascale-01.reviz.ai2.in/1666978568.2974002/events.out.tfevents.1666978568.allennlp-cirrascale-01.reviz.ai2.in.1011.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad7f88108614985a48ff44cf54bb2925c76c359bc311e81ba151639072cb2dd7 +size 4511 diff --git a/propositional_logic/ln_mlm/runs/Oct27_11-38-51_allennlp-cirrascale-01.reviz.ai2.in/events.out.tfevents.1666978564.allennlp-cirrascale-01.reviz.ai2.in.1011.0 b/propositional_logic/ln_mlm/runs/Oct27_11-38-51_allennlp-cirrascale-01.reviz.ai2.in/events.out.tfevents.1666978564.allennlp-cirrascale-01.reviz.ai2.in.1011.0 new file mode 100644 index 0000000000000000000000000000000000000000..77f37b0fac78f540ebe056032d61c545c9c859ff --- /dev/null +++ b/propositional_logic/ln_mlm/runs/Oct27_11-38-51_allennlp-cirrascale-01.reviz.ai2.in/events.out.tfevents.1666978564.allennlp-cirrascale-01.reviz.ai2.in.1011.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f44089b3692bf07c2c76a18ffe9a2031072c563542a1bbb3035e8f30047b68d +size 35546 diff --git a/propositional_logic/ln_mlm/training_args.bin b/propositional_logic/ln_mlm/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..8d6510e69f7f4b4e104614b057ec57a7a631e373 --- /dev/null +++ b/propositional_logic/ln_mlm/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f21b6f934872fd1ff44db7303058ff9e1201e5175891842ccc1d594dd226d3ae +size 2799 diff --git a/propositional_logic/lt_alm/checkpoint-100000/config.json b/propositional_logic/lt_alm/checkpoint-100000/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ff8809f9ee0000962c182321c304c5b09e84f1ba --- /dev/null +++ b/propositional_logic/lt_alm/checkpoint-100000/config.json @@ -0,0 +1,37 @@ +{ + "activation_function": "gelu_new", + "architectures": [ + "GPT2LMHeadModel" + ], + "attn_pdrop": 0.1, + "bos_token_id": 50256, + "embd_pdrop": 0.1, + "eos_token_id": 50256, + "gradient_checkpointing": false, + "initializer_range": 0.02, + "layer_norm_epsilon": 1e-05, + "model_type": "gpt2", + "n_ctx": 1024, + "n_embd": 768, + "n_head": 12, + "n_inner": null, + "n_layer": 12, + "n_positions": 1024, + "resid_pdrop": 0.1, + "scale_attn_weights": true, + "summary_activation": null, + "summary_first_dropout": 0.1, + "summary_proj_to_labels": true, + "summary_type": "cls_index", + "summary_use_proj": true, + "task_specific_params": { + "text-generation": { + "do_sample": true, + "max_length": 50 + } + }, + "torch_dtype": "float32", + "transformers_version": "4.10.3", + "use_cache": true, + "vocab_size": 1200 +} diff --git a/propositional_logic/lt_alm/checkpoint-100000/optimizer.pt b/propositional_logic/lt_alm/checkpoint-100000/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..fd0a3de9da921db710909d0fe3a4c50fd6568cd9 --- /dev/null +++ b/propositional_logic/lt_alm/checkpoint-100000/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c779ab9a3d1939b649ebf721d0af1e4e20143515f2a88db3ac3e9e33c2bfc2f +size 694198065 diff --git a/propositional_logic/lt_alm/checkpoint-100000/pytorch_model.bin b/propositional_logic/lt_alm/checkpoint-100000/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..d8604f3a75823ef3631155b986d4d9cf53df986a --- /dev/null +++ b/propositional_logic/lt_alm/checkpoint-100000/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f124fd83e9e8f3118d339e6d8a19797a22ffb26b6972ea38c5239a967fb1250 +size 359700713 diff --git a/propositional_logic/lt_alm/checkpoint-100000/rng_state.pth b/propositional_logic/lt_alm/checkpoint-100000/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..4a3150d29fa8c09ae8334d72c8db783e38d463a0 --- /dev/null +++ b/propositional_logic/lt_alm/checkpoint-100000/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ace806f1cf6b8fea340c8dcb0738206e80dd48ed2f8e8fdeb7d956fbc92c63b +size 14503 diff --git a/propositional_logic/lt_alm/checkpoint-100000/scaler.pt b/propositional_logic/lt_alm/checkpoint-100000/scaler.pt new file mode 100644 index 0000000000000000000000000000000000000000..40ebe2d68777171491b4742ce7d196b125693729 --- /dev/null +++ b/propositional_logic/lt_alm/checkpoint-100000/scaler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9bfb9f24f635e91458f1872f8625881995ec548e5b089e8b716074ac10ba4308 +size 559 diff --git a/propositional_logic/lt_alm/checkpoint-100000/scheduler.pt b/propositional_logic/lt_alm/checkpoint-100000/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..c451d8bad3816a4910e1b57458c1c1386fc59d1a --- /dev/null +++ b/propositional_logic/lt_alm/checkpoint-100000/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d3a2780a4009edea40751d0cb4f0b7a6c72e9ad8fa677d488e630fd61edd523 +size 623 diff --git a/propositional_logic/lt_alm/checkpoint-100000/trainer_state.json b/propositional_logic/lt_alm/checkpoint-100000/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..3e1f8a8c5babbaef7d6ffc02c68d938168b0cd0c --- /dev/null +++ b/propositional_logic/lt_alm/checkpoint-100000/trainer_state.json @@ -0,0 +1,1216 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 1.0, + "global_step": 100000, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.01, + "learning_rate": 2.9999999999999997e-05, + "loss": 1.3008, + "step": 500 + }, + { + "epoch": 0.01, + "learning_rate": 5.9999999999999995e-05, + "loss": 0.632, + "step": 1000 + }, + { + "epoch": 0.01, + "learning_rate": 8.999999999999999e-05, + "loss": 0.568, + "step": 1500 + }, + { + "epoch": 0.02, + "learning_rate": 0.00011999999999999999, + "loss": 0.4923, + "step": 2000 + }, + { + "epoch": 0.03, + "learning_rate": 0.00015, + "loss": 0.444, + "step": 2500 + }, + { + "epoch": 0.03, + "learning_rate": 0.00017999999999999998, + "loss": 0.4349, + "step": 3000 + }, + { + "epoch": 0.04, + "learning_rate": 0.00020999999999999998, + "loss": 0.431, + "step": 3500 + }, + { + "epoch": 0.04, + "learning_rate": 0.00023999999999999998, + "loss": 0.4373, + "step": 4000 + }, + { + "epoch": 0.04, + "learning_rate": 0.00027, + "loss": 0.4326, + "step": 4500 + }, + { + "epoch": 0.05, + "learning_rate": 0.0003, + "loss": 0.4285, + "step": 5000 + }, + { + "epoch": 0.06, + "learning_rate": 0.00033, + "loss": 0.4293, + "step": 5500 + }, + { + "epoch": 0.06, + "learning_rate": 0.00035999999999999997, + "loss": 0.4299, + "step": 6000 + }, + { + "epoch": 0.07, + "learning_rate": 0.00039, + "loss": 0.4273, + "step": 6500 + }, + { + "epoch": 0.07, + "learning_rate": 0.00041999999999999996, + "loss": 0.427, + "step": 7000 + }, + { + "epoch": 0.07, + "learning_rate": 0.00045, + "loss": 0.4243, + "step": 7500 + }, + { + "epoch": 0.08, + "learning_rate": 0.00047999999999999996, + "loss": 0.4261, + "step": 8000 + }, + { + "epoch": 0.09, + "learning_rate": 0.0005099999999999999, + "loss": 0.4256, + "step": 8500 + }, + { + "epoch": 0.09, + "learning_rate": 0.00054, + "loss": 0.4256, + "step": 9000 + }, + { + "epoch": 0.1, + "learning_rate": 0.00057, + "loss": 0.4229, + "step": 9500 + }, + { + "epoch": 0.1, + "learning_rate": 0.0006, + "loss": 0.4216, + "step": 10000 + }, + { + "epoch": 0.1, + "learning_rate": 0.0005966666666666667, + "loss": 0.4225, + "step": 10500 + }, + { + "epoch": 0.11, + "learning_rate": 0.0005933333333333333, + "loss": 0.42, + "step": 11000 + }, + { + "epoch": 0.12, + "learning_rate": 0.0005899999999999999, + "loss": 0.4192, + "step": 11500 + }, + { + "epoch": 0.12, + "learning_rate": 0.0005866666666666665, + "loss": 0.4186, + "step": 12000 + }, + { + "epoch": 0.12, + "learning_rate": 0.0005833333333333333, + "loss": 0.4179, + "step": 12500 + }, + { + "epoch": 0.13, + "learning_rate": 0.00058, + "loss": 0.4173, + "step": 13000 + }, + { + "epoch": 0.14, + "learning_rate": 0.0005766666666666666, + "loss": 0.417, + "step": 13500 + }, + { + "epoch": 0.14, + "learning_rate": 0.0005733333333333334, + "loss": 0.4166, + "step": 14000 + }, + { + "epoch": 0.14, + "learning_rate": 0.00057, + "loss": 0.4168, + "step": 14500 + }, + { + "epoch": 0.15, + "learning_rate": 0.0005666666666666666, + "loss": 0.4167, + "step": 15000 + }, + { + "epoch": 0.15, + "learning_rate": 0.0005633333333333333, + "loss": 0.4169, + "step": 15500 + }, + { + "epoch": 0.16, + "learning_rate": 0.00056, + "loss": 0.4163, + "step": 16000 + }, + { + "epoch": 0.17, + "learning_rate": 0.0005566666666666667, + "loss": 0.4163, + "step": 16500 + }, + { + "epoch": 0.17, + "learning_rate": 0.0005533333333333333, + "loss": 0.4161, + "step": 17000 + }, + { + "epoch": 0.17, + "learning_rate": 0.0005499999999999999, + "loss": 0.416, + "step": 17500 + }, + { + "epoch": 0.18, + "learning_rate": 0.0005466666666666667, + "loss": 0.4159, + "step": 18000 + }, + { + "epoch": 0.18, + "learning_rate": 0.0005433399999999999, + "loss": 0.4159, + "step": 18500 + }, + { + "epoch": 0.19, + "learning_rate": 0.0005400066666666666, + "loss": 0.4159, + "step": 19000 + }, + { + "epoch": 0.2, + "learning_rate": 0.0005366733333333333, + "loss": 0.4157, + "step": 19500 + }, + { + "epoch": 0.2, + "learning_rate": 0.00053334, + "loss": 0.4159, + "step": 20000 + }, + { + "epoch": 0.2, + "learning_rate": 0.0005300133333333333, + "loss": 0.4158, + "step": 20500 + }, + { + "epoch": 0.21, + "learning_rate": 0.00052668, + "loss": 0.4158, + "step": 21000 + }, + { + "epoch": 0.21, + "learning_rate": 0.0005233466666666667, + "loss": 0.4158, + "step": 21500 + }, + { + "epoch": 0.22, + "learning_rate": 0.0005200133333333332, + "loss": 0.4158, + "step": 22000 + }, + { + "epoch": 0.23, + "learning_rate": 0.0005166866666666667, + "loss": 0.4156, + "step": 22500 + }, + { + "epoch": 0.23, + "learning_rate": 0.0005133533333333333, + "loss": 0.4155, + "step": 23000 + }, + { + "epoch": 0.23, + "learning_rate": 0.0005100199999999999, + "loss": 0.4156, + "step": 23500 + }, + { + "epoch": 0.24, + "learning_rate": 0.0005066866666666666, + "loss": 0.4157, + "step": 24000 + }, + { + "epoch": 0.24, + "learning_rate": 0.0005033599999999999, + "loss": 0.4157, + "step": 24500 + }, + { + "epoch": 0.25, + "learning_rate": 0.0005000266666666666, + "loss": 0.4157, + "step": 25000 + }, + { + "epoch": 0.26, + "learning_rate": 0.0004966933333333332, + "loss": 0.4156, + "step": 25500 + }, + { + "epoch": 0.26, + "learning_rate": 0.00049336, + "loss": 0.4154, + "step": 26000 + }, + { + "epoch": 0.27, + "learning_rate": 0.0004900333333333333, + "loss": 0.4156, + "step": 26500 + }, + { + "epoch": 0.27, + "learning_rate": 0.00048669999999999996, + "loss": 0.4156, + "step": 27000 + }, + { + "epoch": 0.28, + "learning_rate": 0.0004833666666666666, + "loss": 0.4156, + "step": 27500 + }, + { + "epoch": 0.28, + "learning_rate": 0.00048003333333333326, + "loss": 0.4156, + "step": 28000 + }, + { + "epoch": 0.28, + "learning_rate": 0.0004767066666666666, + "loss": 0.4156, + "step": 28500 + }, + { + "epoch": 0.29, + "learning_rate": 0.0004733733333333333, + "loss": 0.4155, + "step": 29000 + }, + { + "epoch": 0.29, + "learning_rate": 0.00047003999999999997, + "loss": 0.4155, + "step": 29500 + }, + { + "epoch": 0.3, + "learning_rate": 0.00046670666666666664, + "loss": 0.4155, + "step": 30000 + }, + { + "epoch": 0.3, + "learning_rate": 0.00046337999999999994, + "loss": 0.4155, + "step": 30500 + }, + { + "epoch": 0.31, + "learning_rate": 0.0004600466666666666, + "loss": 0.4155, + "step": 31000 + }, + { + "epoch": 0.32, + "learning_rate": 0.0004567133333333333, + "loss": 0.4153, + "step": 31500 + }, + { + "epoch": 0.32, + "learning_rate": 0.00045337999999999997, + "loss": 0.4154, + "step": 32000 + }, + { + "epoch": 0.33, + "learning_rate": 0.00045005333333333333, + "loss": 0.4155, + "step": 32500 + }, + { + "epoch": 0.33, + "learning_rate": 0.00044672, + "loss": 0.4155, + "step": 33000 + }, + { + "epoch": 0.34, + "learning_rate": 0.00044338666666666663, + "loss": 0.4154, + "step": 33500 + }, + { + "epoch": 0.34, + "learning_rate": 0.0004400533333333333, + "loss": 0.4153, + "step": 34000 + }, + { + "epoch": 0.34, + "learning_rate": 0.0004367266666666666, + "loss": 0.4154, + "step": 34500 + }, + { + "epoch": 0.35, + "learning_rate": 0.0004333933333333333, + "loss": 0.4154, + "step": 35000 + }, + { + "epoch": 0.35, + "learning_rate": 0.00043005999999999996, + "loss": 0.4154, + "step": 35500 + }, + { + "epoch": 0.36, + "learning_rate": 0.00042672666666666663, + "loss": 0.4154, + "step": 36000 + }, + { + "epoch": 0.36, + "learning_rate": 0.00042339999999999994, + "loss": 0.4154, + "step": 36500 + }, + { + "epoch": 0.37, + "learning_rate": 0.0004200666666666666, + "loss": 0.4154, + "step": 37000 + }, + { + "epoch": 0.38, + "learning_rate": 0.00041673333333333334, + "loss": 0.4155, + "step": 37500 + }, + { + "epoch": 0.38, + "learning_rate": 0.0004133999999999999, + "loss": 0.4154, + "step": 38000 + }, + { + "epoch": 0.39, + "learning_rate": 0.0004100733333333333, + "loss": 0.4153, + "step": 38500 + }, + { + "epoch": 0.39, + "learning_rate": 0.00040673999999999994, + "loss": 0.4155, + "step": 39000 + }, + { + "epoch": 0.4, + "learning_rate": 0.0004034066666666666, + "loss": 0.4154, + "step": 39500 + }, + { + "epoch": 0.4, + "learning_rate": 0.0004000733333333333, + "loss": 0.4153, + "step": 40000 + }, + { + "epoch": 0.41, + "learning_rate": 0.0003967466666666666, + "loss": 0.4155, + "step": 40500 + }, + { + "epoch": 0.41, + "learning_rate": 0.0003934133333333333, + "loss": 0.4155, + "step": 41000 + }, + { + "epoch": 0.41, + "learning_rate": 0.00039007999999999995, + "loss": 0.4154, + "step": 41500 + }, + { + "epoch": 0.42, + "learning_rate": 0.00038674666666666663, + "loss": 0.4153, + "step": 42000 + }, + { + "epoch": 0.42, + "learning_rate": 0.00038342, + "loss": 0.4155, + "step": 42500 + }, + { + "epoch": 0.43, + "learning_rate": 0.00038008666666666666, + "loss": 0.4154, + "step": 43000 + }, + { + "epoch": 0.43, + "learning_rate": 0.00037675333333333334, + "loss": 0.4155, + "step": 43500 + }, + { + "epoch": 0.44, + "learning_rate": 0.00037341999999999996, + "loss": 0.4155, + "step": 44000 + }, + { + "epoch": 0.45, + "learning_rate": 0.0003700933333333333, + "loss": 0.4153, + "step": 44500 + }, + { + "epoch": 0.45, + "learning_rate": 0.00036675999999999994, + "loss": 0.4154, + "step": 45000 + }, + { + "epoch": 0.46, + "learning_rate": 0.0003634266666666666, + "loss": 0.4154, + "step": 45500 + }, + { + "epoch": 0.46, + "learning_rate": 0.0003600933333333333, + "loss": 0.4155, + "step": 46000 + }, + { + "epoch": 0.47, + "learning_rate": 0.00035676666666666665, + "loss": 0.4153, + "step": 46500 + }, + { + "epoch": 0.47, + "learning_rate": 0.0003534333333333333, + "loss": 0.4154, + "step": 47000 + }, + { + "epoch": 0.47, + "learning_rate": 0.0003501, + "loss": 0.4154, + "step": 47500 + }, + { + "epoch": 0.48, + "learning_rate": 0.0003467666666666667, + "loss": 0.4153, + "step": 48000 + }, + { + "epoch": 0.48, + "learning_rate": 0.00034344, + "loss": 0.4155, + "step": 48500 + }, + { + "epoch": 0.49, + "learning_rate": 0.00034010666666666665, + "loss": 0.4154, + "step": 49000 + }, + { + "epoch": 0.49, + "learning_rate": 0.0003367733333333333, + "loss": 0.4154, + "step": 49500 + }, + { + "epoch": 0.5, + "learning_rate": 0.00033343999999999995, + "loss": 0.4155, + "step": 50000 + }, + { + "epoch": 0.51, + "learning_rate": 0.00033011333333333325, + "loss": 0.4154, + "step": 50500 + }, + { + "epoch": 0.51, + "learning_rate": 0.00032677999999999993, + "loss": 0.4154, + "step": 51000 + }, + { + "epoch": 0.52, + "learning_rate": 0.0003234466666666666, + "loss": 0.4153, + "step": 51500 + }, + { + "epoch": 0.52, + "learning_rate": 0.00032011333333333334, + "loss": 0.4155, + "step": 52000 + }, + { + "epoch": 0.53, + "learning_rate": 0.00031678666666666664, + "loss": 0.4154, + "step": 52500 + }, + { + "epoch": 0.53, + "learning_rate": 0.0003134533333333333, + "loss": 0.4154, + "step": 53000 + }, + { + "epoch": 0.54, + "learning_rate": 0.00031012, + "loss": 0.4153, + "step": 53500 + }, + { + "epoch": 0.54, + "learning_rate": 0.00030678666666666667, + "loss": 0.4154, + "step": 54000 + }, + { + "epoch": 0.55, + "learning_rate": 0.00030345999999999997, + "loss": 0.4154, + "step": 54500 + }, + { + "epoch": 0.55, + "learning_rate": 0.00030012666666666665, + "loss": 0.4154, + "step": 55000 + }, + { + "epoch": 0.56, + "learning_rate": 0.0002967933333333333, + "loss": 0.4154, + "step": 55500 + }, + { + "epoch": 0.56, + "learning_rate": 0.00029345999999999994, + "loss": 0.4154, + "step": 56000 + }, + { + "epoch": 0.56, + "learning_rate": 0.00029014, + "loss": 0.4154, + "step": 56500 + }, + { + "epoch": 0.57, + "learning_rate": 0.00028680666666666666, + "loss": 0.4153, + "step": 57000 + }, + { + "epoch": 0.57, + "learning_rate": 0.00028347333333333334, + "loss": 0.4154, + "step": 57500 + }, + { + "epoch": 0.58, + "learning_rate": 0.00028013999999999996, + "loss": 0.4154, + "step": 58000 + }, + { + "epoch": 0.58, + "learning_rate": 0.00027680666666666663, + "loss": 0.4155, + "step": 58500 + }, + { + "epoch": 0.59, + "learning_rate": 0.0002734733333333333, + "loss": 0.4153, + "step": 59000 + }, + { + "epoch": 0.59, + "learning_rate": 0.00027014, + "loss": 0.4154, + "step": 59500 + }, + { + "epoch": 0.6, + "learning_rate": 0.00026680666666666666, + "loss": 0.4154, + "step": 60000 + }, + { + "epoch": 0.6, + "learning_rate": 0.00026347999999999996, + "loss": 0.4153, + "step": 60500 + }, + { + "epoch": 0.61, + "learning_rate": 0.00026014666666666664, + "loss": 0.4154, + "step": 61000 + }, + { + "epoch": 0.61, + "learning_rate": 0.0002568133333333333, + "loss": 0.4154, + "step": 61500 + }, + { + "epoch": 0.62, + "learning_rate": 0.00025348, + "loss": 0.4153, + "step": 62000 + }, + { + "epoch": 0.62, + "learning_rate": 0.00025015333333333335, + "loss": 0.4154, + "step": 62500 + }, + { + "epoch": 0.63, + "learning_rate": 0.00024681999999999997, + "loss": 0.4154, + "step": 63000 + }, + { + "epoch": 0.64, + "learning_rate": 0.00024348666666666665, + "loss": 0.4152, + "step": 63500 + }, + { + "epoch": 0.64, + "learning_rate": 0.0002401533333333333, + "loss": 0.4153, + "step": 64000 + }, + { + "epoch": 0.65, + "learning_rate": 0.00023682666666666665, + "loss": 0.4153, + "step": 64500 + }, + { + "epoch": 0.65, + "learning_rate": 0.0002334933333333333, + "loss": 0.4154, + "step": 65000 + }, + { + "epoch": 0.66, + "learning_rate": 0.00023015999999999998, + "loss": 0.4152, + "step": 65500 + }, + { + "epoch": 0.66, + "learning_rate": 0.00022682666666666665, + "loss": 0.4153, + "step": 66000 + }, + { + "epoch": 0.67, + "learning_rate": 0.00022349999999999998, + "loss": 0.4153, + "step": 66500 + }, + { + "epoch": 0.67, + "learning_rate": 0.00022016666666666666, + "loss": 0.4153, + "step": 67000 + }, + { + "epoch": 0.68, + "learning_rate": 0.0002168333333333333, + "loss": 0.4153, + "step": 67500 + }, + { + "epoch": 0.68, + "learning_rate": 0.00021350666666666667, + "loss": 0.4153, + "step": 68000 + }, + { + "epoch": 0.69, + "learning_rate": 0.00021017333333333332, + "loss": 0.4155, + "step": 68500 + }, + { + "epoch": 0.69, + "learning_rate": 0.00020684, + "loss": 0.4153, + "step": 69000 + }, + { + "epoch": 0.69, + "learning_rate": 0.00020350666666666667, + "loss": 0.4152, + "step": 69500 + }, + { + "epoch": 0.7, + "learning_rate": 0.00020017333333333332, + "loss": 0.4153, + "step": 70000 + }, + { + "epoch": 0.7, + "learning_rate": 0.00019684, + "loss": 0.4154, + "step": 70500 + }, + { + "epoch": 0.71, + "learning_rate": 0.00019350666666666667, + "loss": 0.4154, + "step": 71000 + }, + { + "epoch": 0.71, + "learning_rate": 0.00019018, + "loss": 0.4153, + "step": 71500 + }, + { + "epoch": 0.72, + "learning_rate": 0.00018684666666666667, + "loss": 0.4154, + "step": 72000 + }, + { + "epoch": 0.72, + "learning_rate": 0.00018351333333333332, + "loss": 0.4154, + "step": 72500 + }, + { + "epoch": 0.73, + "learning_rate": 0.00018018, + "loss": 0.4153, + "step": 73000 + }, + { + "epoch": 0.73, + "learning_rate": 0.00017684666666666665, + "loss": 0.4154, + "step": 73500 + }, + { + "epoch": 0.74, + "learning_rate": 0.00017352, + "loss": 0.4154, + "step": 74000 + }, + { + "epoch": 0.74, + "learning_rate": 0.00017018666666666663, + "loss": 0.4154, + "step": 74500 + }, + { + "epoch": 0.75, + "learning_rate": 0.0001668533333333333, + "loss": 0.4154, + "step": 75000 + }, + { + "epoch": 0.76, + "learning_rate": 0.00016352, + "loss": 0.4154, + "step": 75500 + }, + { + "epoch": 0.76, + "learning_rate": 0.00016018666666666663, + "loss": 0.4153, + "step": 76000 + }, + { + "epoch": 0.77, + "learning_rate": 0.00015685333333333333, + "loss": 0.4154, + "step": 76500 + }, + { + "epoch": 0.77, + "learning_rate": 0.00015352666666666663, + "loss": 0.4153, + "step": 77000 + }, + { + "epoch": 0.78, + "learning_rate": 0.0001501933333333333, + "loss": 0.4154, + "step": 77500 + }, + { + "epoch": 0.78, + "learning_rate": 0.00014685999999999999, + "loss": 0.4152, + "step": 78000 + }, + { + "epoch": 0.79, + "learning_rate": 0.00014352666666666666, + "loss": 0.4153, + "step": 78500 + }, + { + "epoch": 0.79, + "learning_rate": 0.0001402, + "loss": 0.4154, + "step": 79000 + }, + { + "epoch": 0.8, + "learning_rate": 0.00013686666666666664, + "loss": 0.4153, + "step": 79500 + }, + { + "epoch": 0.8, + "learning_rate": 0.00013353333333333332, + "loss": 0.4153, + "step": 80000 + }, + { + "epoch": 0.81, + "learning_rate": 0.0001302, + "loss": 0.4154, + "step": 80500 + }, + { + "epoch": 0.81, + "learning_rate": 0.00012686666666666664, + "loss": 0.4155, + "step": 81000 + }, + { + "epoch": 0.81, + "learning_rate": 0.00012353333333333332, + "loss": 0.4154, + "step": 81500 + }, + { + "epoch": 0.82, + "learning_rate": 0.00012020666666666665, + "loss": 0.4154, + "step": 82000 + }, + { + "epoch": 0.82, + "learning_rate": 0.00011687333333333332, + "loss": 0.4152, + "step": 82500 + }, + { + "epoch": 0.83, + "learning_rate": 0.00011353999999999999, + "loss": 0.4153, + "step": 83000 + }, + { + "epoch": 0.83, + "learning_rate": 0.00011020666666666665, + "loss": 0.4153, + "step": 83500 + }, + { + "epoch": 0.84, + "learning_rate": 0.00010687333333333331, + "loss": 0.4152, + "step": 84000 + }, + { + "epoch": 0.84, + "learning_rate": 0.00010354666666666666, + "loss": 0.4154, + "step": 84500 + }, + { + "epoch": 0.85, + "learning_rate": 0.00010021333333333332, + "loss": 0.4154, + "step": 85000 + }, + { + "epoch": 0.85, + "learning_rate": 9.688e-05, + "loss": 0.4154, + "step": 85500 + }, + { + "epoch": 0.86, + "learning_rate": 9.354666666666666e-05, + "loss": 0.4152, + "step": 86000 + }, + { + "epoch": 0.86, + "learning_rate": 9.021333333333332e-05, + "loss": 0.4153, + "step": 86500 + }, + { + "epoch": 0.87, + "learning_rate": 8.688666666666666e-05, + "loss": 0.4154, + "step": 87000 + }, + { + "epoch": 0.88, + "learning_rate": 8.355333333333333e-05, + "loss": 0.4152, + "step": 87500 + }, + { + "epoch": 0.88, + "learning_rate": 8.022e-05, + "loss": 0.4153, + "step": 88000 + }, + { + "epoch": 0.89, + "learning_rate": 7.688666666666666e-05, + "loss": 0.4154, + "step": 88500 + }, + { + "epoch": 0.89, + "learning_rate": 7.355333333333333e-05, + "loss": 0.4154, + "step": 89000 + }, + { + "epoch": 0.9, + "learning_rate": 7.022666666666667e-05, + "loss": 0.4153, + "step": 89500 + }, + { + "epoch": 0.9, + "learning_rate": 6.689333333333333e-05, + "loss": 0.4154, + "step": 90000 + }, + { + "epoch": 0.91, + "learning_rate": 6.356e-05, + "loss": 0.4153, + "step": 90500 + }, + { + "epoch": 0.91, + "learning_rate": 6.022666666666666e-05, + "loss": 0.4153, + "step": 91000 + }, + { + "epoch": 0.92, + "learning_rate": 5.689333333333333e-05, + "loss": 0.4152, + "step": 91500 + }, + { + "epoch": 0.92, + "learning_rate": 5.356666666666666e-05, + "loss": 0.4153, + "step": 92000 + }, + { + "epoch": 0.93, + "learning_rate": 5.023333333333333e-05, + "loss": 0.4154, + "step": 92500 + }, + { + "epoch": 0.93, + "learning_rate": 4.6899999999999995e-05, + "loss": 0.4152, + "step": 93000 + }, + { + "epoch": 0.94, + "learning_rate": 4.3566666666666664e-05, + "loss": 0.4151, + "step": 93500 + }, + { + "epoch": 0.94, + "learning_rate": 4.0233333333333326e-05, + "loss": 0.4153, + "step": 94000 + }, + { + "epoch": 0.94, + "learning_rate": 3.6899999999999996e-05, + "loss": 0.4153, + "step": 94500 + }, + { + "epoch": 0.95, + "learning_rate": 3.357333333333333e-05, + "loss": 0.4153, + "step": 95000 + }, + { + "epoch": 0.95, + "learning_rate": 3.024e-05, + "loss": 0.4152, + "step": 95500 + }, + { + "epoch": 0.96, + "learning_rate": 2.6906666666666664e-05, + "loss": 0.4153, + "step": 96000 + }, + { + "epoch": 0.96, + "learning_rate": 2.357333333333333e-05, + "loss": 0.4153, + "step": 96500 + }, + { + "epoch": 0.97, + "learning_rate": 2.0246666666666664e-05, + "loss": 0.4153, + "step": 97000 + }, + { + "epoch": 0.97, + "learning_rate": 1.6913333333333333e-05, + "loss": 0.4153, + "step": 97500 + }, + { + "epoch": 0.98, + "learning_rate": 1.3579999999999997e-05, + "loss": 0.4153, + "step": 98000 + }, + { + "epoch": 0.98, + "learning_rate": 1.0246666666666666e-05, + "loss": 0.4153, + "step": 98500 + }, + { + "epoch": 0.99, + "learning_rate": 6.913333333333333e-06, + "loss": 0.4153, + "step": 99000 + }, + { + "epoch": 0.99, + "learning_rate": 3.58e-06, + "loss": 0.4152, + "step": 99500 + }, + { + "epoch": 1.0, + "learning_rate": 2.533333333333333e-07, + "loss": 0.4153, + "step": 100000 + } + ], + "max_steps": 100000, + "num_train_epochs": 1, + "total_flos": 4.274433427739443e+19, + "trial_name": null, + "trial_params": null +} diff --git a/propositional_logic/lt_alm/checkpoint-100000/training_args.bin b/propositional_logic/lt_alm/checkpoint-100000/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..958ab44b86bef1f2099dbd539a747d080cce5d62 --- /dev/null +++ b/propositional_logic/lt_alm/checkpoint-100000/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5ac725474a043e56f1f775f1db8ecef5606d8ab95da29e9391f919b2d60d457 +size 2735 diff --git a/propositional_logic/lt_alm/config.json b/propositional_logic/lt_alm/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ff8809f9ee0000962c182321c304c5b09e84f1ba --- /dev/null +++ b/propositional_logic/lt_alm/config.json @@ -0,0 +1,37 @@ +{ + "activation_function": "gelu_new", + "architectures": [ + "GPT2LMHeadModel" + ], + "attn_pdrop": 0.1, + "bos_token_id": 50256, + "embd_pdrop": 0.1, + "eos_token_id": 50256, + "gradient_checkpointing": false, + "initializer_range": 0.02, + "layer_norm_epsilon": 1e-05, + "model_type": "gpt2", + "n_ctx": 1024, + "n_embd": 768, + "n_head": 12, + "n_inner": null, + "n_layer": 12, + "n_positions": 1024, + "resid_pdrop": 0.1, + "scale_attn_weights": true, + "summary_activation": null, + "summary_first_dropout": 0.1, + "summary_proj_to_labels": true, + "summary_type": "cls_index", + "summary_use_proj": true, + "task_specific_params": { + "text-generation": { + "do_sample": true, + "max_length": 50 + } + }, + "torch_dtype": "float32", + "transformers_version": "4.10.3", + "use_cache": true, + "vocab_size": 1200 +} diff --git a/propositional_logic/lt_alm/pytorch_model.bin b/propositional_logic/lt_alm/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..d8604f3a75823ef3631155b986d4d9cf53df986a --- /dev/null +++ b/propositional_logic/lt_alm/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f124fd83e9e8f3118d339e6d8a19797a22ffb26b6972ea38c5239a967fb1250 +size 359700713 diff --git a/propositional_logic/lt_alm/runs/Feb20_15-40-53_allennlp-server4/1645484343.3786743/events.out.tfevents.1645484343.allennlp-server4.3588209.1 b/propositional_logic/lt_alm/runs/Feb20_15-40-53_allennlp-server4/1645484343.3786743/events.out.tfevents.1645484343.allennlp-server4.3588209.1 new file mode 100644 index 0000000000000000000000000000000000000000..7aca01c63bf7fd7a719c044e6e9c48b83f10009a --- /dev/null +++ b/propositional_logic/lt_alm/runs/Feb20_15-40-53_allennlp-server4/1645484343.3786743/events.out.tfevents.1645484343.allennlp-server4.3588209.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15db4a80e37335462e0e68f688272b065df9f9d86ad22dc2387fe0e8ffd4d06d +size 4393 diff --git a/propositional_logic/lt_alm/runs/Feb20_15-40-53_allennlp-server4/events.out.tfevents.1645484338.allennlp-server4.3588209.0 b/propositional_logic/lt_alm/runs/Feb20_15-40-53_allennlp-server4/events.out.tfevents.1645484338.allennlp-server4.3588209.0 new file mode 100644 index 0000000000000000000000000000000000000000..f0638d9a6b254a7531b707a60482f67ee34fd1b2 --- /dev/null +++ b/propositional_logic/lt_alm/runs/Feb20_15-40-53_allennlp-server4/events.out.tfevents.1645484338.allennlp-server4.3588209.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5ea8838b7af5cb1a4a2175459b331b5357598e22491db5abeb3188ba53ce1d7 +size 5058 diff --git a/propositional_logic/lt_alm/runs/Feb22_12-59-13_allennlp-server4/1645647176.6903071/events.out.tfevents.1645647176.allennlp-server4.336403.1 b/propositional_logic/lt_alm/runs/Feb22_12-59-13_allennlp-server4/1645647176.6903071/events.out.tfevents.1645647176.allennlp-server4.336403.1 new file mode 100644 index 0000000000000000000000000000000000000000..bfe87badcdd0d78ca105dd9e80ae794a0ddc70bf --- /dev/null +++ b/propositional_logic/lt_alm/runs/Feb22_12-59-13_allennlp-server4/1645647176.6903071/events.out.tfevents.1645647176.allennlp-server4.336403.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e92f6692ecc61ed5c53a498564c62a6ac37eb85e43285da46182bbfef075cfe5 +size 4393 diff --git a/propositional_logic/lt_alm/runs/Feb22_12-59-13_allennlp-server4/events.out.tfevents.1645647171.allennlp-server4.336403.0 b/propositional_logic/lt_alm/runs/Feb22_12-59-13_allennlp-server4/events.out.tfevents.1645647171.allennlp-server4.336403.0 new file mode 100644 index 0000000000000000000000000000000000000000..5b980a1a53958d5d5269dcce73d79a0cfdd9ba97 --- /dev/null +++ b/propositional_logic/lt_alm/runs/Feb22_12-59-13_allennlp-server4/events.out.tfevents.1645647171.allennlp-server4.336403.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06a75521654ba6fc727c3b4f8062862c9fa91633b0596eb71cd3f58e5a2ed164 +size 35594 diff --git a/propositional_logic/lt_alm/training_args.bin b/propositional_logic/lt_alm/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..958ab44b86bef1f2099dbd539a747d080cce5d62 --- /dev/null +++ b/propositional_logic/lt_alm/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5ac725474a043e56f1f775f1db8ecef5606d8ab95da29e9391f919b2d60d457 +size 2735 diff --git a/propositional_logic/lt_alm_noreflexivity/checkpoint-100000/config.json b/propositional_logic/lt_alm_noreflexivity/checkpoint-100000/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ff8809f9ee0000962c182321c304c5b09e84f1ba --- /dev/null +++ b/propositional_logic/lt_alm_noreflexivity/checkpoint-100000/config.json @@ -0,0 +1,37 @@ +{ + "activation_function": "gelu_new", + "architectures": [ + "GPT2LMHeadModel" + ], + "attn_pdrop": 0.1, + "bos_token_id": 50256, + "embd_pdrop": 0.1, + "eos_token_id": 50256, + "gradient_checkpointing": false, + "initializer_range": 0.02, + "layer_norm_epsilon": 1e-05, + "model_type": "gpt2", + "n_ctx": 1024, + "n_embd": 768, + "n_head": 12, + "n_inner": null, + "n_layer": 12, + "n_positions": 1024, + "resid_pdrop": 0.1, + "scale_attn_weights": true, + "summary_activation": null, + "summary_first_dropout": 0.1, + "summary_proj_to_labels": true, + "summary_type": "cls_index", + "summary_use_proj": true, + "task_specific_params": { + "text-generation": { + "do_sample": true, + "max_length": 50 + } + }, + "torch_dtype": "float32", + "transformers_version": "4.10.3", + "use_cache": true, + "vocab_size": 1200 +} diff --git a/propositional_logic/lt_alm_noreflexivity/checkpoint-100000/optimizer.pt b/propositional_logic/lt_alm_noreflexivity/checkpoint-100000/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..27f4ff3fed51b0673738d23a38d0563f69ddec38 --- /dev/null +++ b/propositional_logic/lt_alm_noreflexivity/checkpoint-100000/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd48b4b729cef50798aace235354f92c04defb657372dd7fade3803f53b1ce43 +size 694198065 diff --git a/propositional_logic/lt_alm_noreflexivity/checkpoint-100000/pytorch_model.bin b/propositional_logic/lt_alm_noreflexivity/checkpoint-100000/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..c24a5fc1a6c097c5b1d29e21b8ffe0906d5ac611 --- /dev/null +++ b/propositional_logic/lt_alm_noreflexivity/checkpoint-100000/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:044620c7cdb9a56568ca95b7715f08ab4b1ebe466efc936ac34c69aa496700cf +size 359700713 diff --git a/propositional_logic/lt_alm_noreflexivity/checkpoint-100000/rng_state.pth b/propositional_logic/lt_alm_noreflexivity/checkpoint-100000/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..5206cac95e4215a367485236c10528a3c00afb68 --- /dev/null +++ b/propositional_logic/lt_alm_noreflexivity/checkpoint-100000/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:094559b9c2dfa71e135104716de983b855fdf5edd6d1838d723e40e786b2a786 +size 14503 diff --git a/propositional_logic/lt_alm_noreflexivity/checkpoint-100000/scaler.pt b/propositional_logic/lt_alm_noreflexivity/checkpoint-100000/scaler.pt new file mode 100644 index 0000000000000000000000000000000000000000..227cb46a31084c8420f0bc37a070c36ec29a9f17 --- /dev/null +++ b/propositional_logic/lt_alm_noreflexivity/checkpoint-100000/scaler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cbd17ba3fcebe8d211b2d455e4f630394c54494f8e62d632dbd6c66c30976403 +size 559 diff --git a/propositional_logic/lt_alm_noreflexivity/checkpoint-100000/scheduler.pt b/propositional_logic/lt_alm_noreflexivity/checkpoint-100000/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..15945f035518eb624e24acd009d0fee087074c8e --- /dev/null +++ b/propositional_logic/lt_alm_noreflexivity/checkpoint-100000/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a150dd417a024836c3dc23fd696b007ee1fc159e8de0d8ec1a517cf40baac2ae +size 623 diff --git a/propositional_logic/lt_alm_noreflexivity/checkpoint-100000/trainer_state.json b/propositional_logic/lt_alm_noreflexivity/checkpoint-100000/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..1fcd9abf7365354107a675a316bda2a31bed4ec4 --- /dev/null +++ b/propositional_logic/lt_alm_noreflexivity/checkpoint-100000/trainer_state.json @@ -0,0 +1,1216 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 1.0, + "global_step": 100000, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.01, + "learning_rate": 2.9999999999999997e-05, + "loss": 1.302, + "step": 500 + }, + { + "epoch": 0.01, + "learning_rate": 5.9999999999999995e-05, + "loss": 0.6367, + "step": 1000 + }, + { + "epoch": 0.01, + "learning_rate": 8.999999999999999e-05, + "loss": 0.5862, + "step": 1500 + }, + { + "epoch": 0.02, + "learning_rate": 0.00011999999999999999, + "loss": 0.5642, + "step": 2000 + }, + { + "epoch": 0.03, + "learning_rate": 0.00015, + "loss": 0.5646, + "step": 2500 + }, + { + "epoch": 0.03, + "learning_rate": 0.00017999999999999998, + "loss": 0.5588, + "step": 3000 + }, + { + "epoch": 0.04, + "learning_rate": 0.00020999999999999998, + "loss": 0.5619, + "step": 3500 + }, + { + "epoch": 0.04, + "learning_rate": 0.00023999999999999998, + "loss": 0.5606, + "step": 4000 + }, + { + "epoch": 0.04, + "learning_rate": 0.00027, + "loss": 0.5595, + "step": 4500 + }, + { + "epoch": 0.05, + "learning_rate": 0.0003, + "loss": 0.5576, + "step": 5000 + }, + { + "epoch": 0.06, + "learning_rate": 0.00033, + "loss": 0.557, + "step": 5500 + }, + { + "epoch": 0.06, + "learning_rate": 0.00035999999999999997, + "loss": 0.5564, + "step": 6000 + }, + { + "epoch": 0.07, + "learning_rate": 0.00039, + "loss": 0.56, + "step": 6500 + }, + { + "epoch": 0.07, + "learning_rate": 0.00041999999999999996, + "loss": 0.5536, + "step": 7000 + }, + { + "epoch": 0.07, + "learning_rate": 0.00045, + "loss": 0.5529, + "step": 7500 + }, + { + "epoch": 0.08, + "learning_rate": 0.00047999999999999996, + "loss": 0.5543, + "step": 8000 + }, + { + "epoch": 0.09, + "learning_rate": 0.0005099999999999999, + "loss": 0.5512, + "step": 8500 + }, + { + "epoch": 0.09, + "learning_rate": 0.00054, + "loss": 0.5521, + "step": 9000 + }, + { + "epoch": 0.1, + "learning_rate": 0.00057, + "loss": 0.5526, + "step": 9500 + }, + { + "epoch": 0.1, + "learning_rate": 0.0006, + "loss": 0.5508, + "step": 10000 + }, + { + "epoch": 0.1, + "learning_rate": 0.0005966666666666667, + "loss": 0.552, + "step": 10500 + }, + { + "epoch": 0.11, + "learning_rate": 0.0005933333333333333, + "loss": 0.5517, + "step": 11000 + }, + { + "epoch": 0.12, + "learning_rate": 0.0005899999999999999, + "loss": 0.549, + "step": 11500 + }, + { + "epoch": 0.12, + "learning_rate": 0.0005866666666666665, + "loss": 0.549, + "step": 12000 + }, + { + "epoch": 0.12, + "learning_rate": 0.0005833333333333333, + "loss": 0.5491, + "step": 12500 + }, + { + "epoch": 0.13, + "learning_rate": 0.00058, + "loss": 0.5486, + "step": 13000 + }, + { + "epoch": 0.14, + "learning_rate": 0.0005766666666666666, + "loss": 0.5484, + "step": 13500 + }, + { + "epoch": 0.14, + "learning_rate": 0.0005733333333333334, + "loss": 0.5487, + "step": 14000 + }, + { + "epoch": 0.14, + "learning_rate": 0.00057, + "loss": 0.5483, + "step": 14500 + }, + { + "epoch": 0.15, + "learning_rate": 0.0005666666666666666, + "loss": 0.5487, + "step": 15000 + }, + { + "epoch": 0.15, + "learning_rate": 0.0005633333333333333, + "loss": 0.5485, + "step": 15500 + }, + { + "epoch": 0.16, + "learning_rate": 0.00056, + "loss": 0.5482, + "step": 16000 + }, + { + "epoch": 0.17, + "learning_rate": 0.0005566666666666667, + "loss": 0.5481, + "step": 16500 + }, + { + "epoch": 0.17, + "learning_rate": 0.0005533333333333333, + "loss": 0.5478, + "step": 17000 + }, + { + "epoch": 0.17, + "learning_rate": 0.0005499999999999999, + "loss": 0.548, + "step": 17500 + }, + { + "epoch": 0.18, + "learning_rate": 0.0005466733333333333, + "loss": 0.5478, + "step": 18000 + }, + { + "epoch": 0.18, + "learning_rate": 0.0005433399999999999, + "loss": 0.5477, + "step": 18500 + }, + { + "epoch": 0.19, + "learning_rate": 0.0005400066666666666, + "loss": 0.5479, + "step": 19000 + }, + { + "epoch": 0.2, + "learning_rate": 0.0005366733333333333, + "loss": 0.5479, + "step": 19500 + }, + { + "epoch": 0.2, + "learning_rate": 0.0005333466666666666, + "loss": 0.5477, + "step": 20000 + }, + { + "epoch": 0.2, + "learning_rate": 0.0005300133333333333, + "loss": 0.5478, + "step": 20500 + }, + { + "epoch": 0.21, + "learning_rate": 0.00052668, + "loss": 0.5476, + "step": 21000 + }, + { + "epoch": 0.21, + "learning_rate": 0.0005233466666666667, + "loss": 0.5479, + "step": 21500 + }, + { + "epoch": 0.22, + "learning_rate": 0.0005200199999999999, + "loss": 0.5475, + "step": 22000 + }, + { + "epoch": 0.23, + "learning_rate": 0.0005166866666666667, + "loss": 0.5475, + "step": 22500 + }, + { + "epoch": 0.23, + "learning_rate": 0.0005133533333333333, + "loss": 0.5476, + "step": 23000 + }, + { + "epoch": 0.23, + "learning_rate": 0.0005100199999999999, + "loss": 0.5478, + "step": 23500 + }, + { + "epoch": 0.24, + "learning_rate": 0.0005066933333333333, + "loss": 0.5475, + "step": 24000 + }, + { + "epoch": 0.24, + "learning_rate": 0.0005033599999999999, + "loss": 0.5475, + "step": 24500 + }, + { + "epoch": 0.25, + "learning_rate": 0.0005000266666666666, + "loss": 0.5475, + "step": 25000 + }, + { + "epoch": 0.26, + "learning_rate": 0.0004966933333333332, + "loss": 0.5478, + "step": 25500 + }, + { + "epoch": 0.26, + "learning_rate": 0.0004933666666666666, + "loss": 0.5475, + "step": 26000 + }, + { + "epoch": 0.27, + "learning_rate": 0.0004900333333333333, + "loss": 0.5475, + "step": 26500 + }, + { + "epoch": 0.27, + "learning_rate": 0.00048669999999999996, + "loss": 0.5474, + "step": 27000 + }, + { + "epoch": 0.28, + "learning_rate": 0.0004833666666666666, + "loss": 0.5475, + "step": 27500 + }, + { + "epoch": 0.28, + "learning_rate": 0.00048004, + "loss": 0.5476, + "step": 28000 + }, + { + "epoch": 0.28, + "learning_rate": 0.0004767066666666666, + "loss": 0.5475, + "step": 28500 + }, + { + "epoch": 0.29, + "learning_rate": 0.0004733733333333333, + "loss": 0.5475, + "step": 29000 + }, + { + "epoch": 0.29, + "learning_rate": 0.00047003999999999997, + "loss": 0.5474, + "step": 29500 + }, + { + "epoch": 0.3, + "learning_rate": 0.00046670666666666664, + "loss": 0.5476, + "step": 30000 + }, + { + "epoch": 0.3, + "learning_rate": 0.00046337999999999994, + "loss": 0.5474, + "step": 30500 + }, + { + "epoch": 0.31, + "learning_rate": 0.0004600466666666666, + "loss": 0.5474, + "step": 31000 + }, + { + "epoch": 0.32, + "learning_rate": 0.0004567133333333333, + "loss": 0.5474, + "step": 31500 + }, + { + "epoch": 0.32, + "learning_rate": 0.00045337999999999997, + "loss": 0.5475, + "step": 32000 + }, + { + "epoch": 0.33, + "learning_rate": 0.00045005333333333333, + "loss": 0.5474, + "step": 32500 + }, + { + "epoch": 0.33, + "learning_rate": 0.00044672, + "loss": 0.5474, + "step": 33000 + }, + { + "epoch": 0.34, + "learning_rate": 0.00044338666666666663, + "loss": 0.5475, + "step": 33500 + }, + { + "epoch": 0.34, + "learning_rate": 0.0004400533333333333, + "loss": 0.5474, + "step": 34000 + }, + { + "epoch": 0.34, + "learning_rate": 0.0004367266666666666, + "loss": 0.5474, + "step": 34500 + }, + { + "epoch": 0.35, + "learning_rate": 0.0004333933333333333, + "loss": 0.5474, + "step": 35000 + }, + { + "epoch": 0.35, + "learning_rate": 0.00043005999999999996, + "loss": 0.5474, + "step": 35500 + }, + { + "epoch": 0.36, + "learning_rate": 0.00042672666666666663, + "loss": 0.5473, + "step": 36000 + }, + { + "epoch": 0.36, + "learning_rate": 0.00042339999999999994, + "loss": 0.5474, + "step": 36500 + }, + { + "epoch": 0.37, + "learning_rate": 0.0004200666666666666, + "loss": 0.5474, + "step": 37000 + }, + { + "epoch": 0.38, + "learning_rate": 0.00041673333333333334, + "loss": 0.5473, + "step": 37500 + }, + { + "epoch": 0.38, + "learning_rate": 0.0004133999999999999, + "loss": 0.5474, + "step": 38000 + }, + { + "epoch": 0.39, + "learning_rate": 0.0004100733333333333, + "loss": 0.5473, + "step": 38500 + }, + { + "epoch": 0.39, + "learning_rate": 0.00040673999999999994, + "loss": 0.5474, + "step": 39000 + }, + { + "epoch": 0.4, + "learning_rate": 0.0004034066666666666, + "loss": 0.5473, + "step": 39500 + }, + { + "epoch": 0.4, + "learning_rate": 0.0004000733333333333, + "loss": 0.5474, + "step": 40000 + }, + { + "epoch": 0.41, + "learning_rate": 0.0003967466666666666, + "loss": 0.5474, + "step": 40500 + }, + { + "epoch": 0.41, + "learning_rate": 0.0003934133333333333, + "loss": 0.5474, + "step": 41000 + }, + { + "epoch": 0.41, + "learning_rate": 0.00039007999999999995, + "loss": 0.5474, + "step": 41500 + }, + { + "epoch": 0.42, + "learning_rate": 0.00038674666666666663, + "loss": 0.5474, + "step": 42000 + }, + { + "epoch": 0.42, + "learning_rate": 0.00038342, + "loss": 0.5474, + "step": 42500 + }, + { + "epoch": 0.43, + "learning_rate": 0.00038008666666666666, + "loss": 0.5473, + "step": 43000 + }, + { + "epoch": 0.43, + "learning_rate": 0.00037675333333333334, + "loss": 0.5473, + "step": 43500 + }, + { + "epoch": 0.44, + "learning_rate": 0.00037341999999999996, + "loss": 0.5474, + "step": 44000 + }, + { + "epoch": 0.45, + "learning_rate": 0.0003700933333333333, + "loss": 0.5473, + "step": 44500 + }, + { + "epoch": 0.45, + "learning_rate": 0.00036675999999999994, + "loss": 0.5473, + "step": 45000 + }, + { + "epoch": 0.46, + "learning_rate": 0.0003634266666666666, + "loss": 0.5474, + "step": 45500 + }, + { + "epoch": 0.46, + "learning_rate": 0.0003600933333333333, + "loss": 0.5473, + "step": 46000 + }, + { + "epoch": 0.47, + "learning_rate": 0.00035676666666666665, + "loss": 0.5474, + "step": 46500 + }, + { + "epoch": 0.47, + "learning_rate": 0.0003534333333333333, + "loss": 0.5474, + "step": 47000 + }, + { + "epoch": 0.47, + "learning_rate": 0.0003501, + "loss": 0.5473, + "step": 47500 + }, + { + "epoch": 0.48, + "learning_rate": 0.0003467666666666667, + "loss": 0.5473, + "step": 48000 + }, + { + "epoch": 0.48, + "learning_rate": 0.00034344, + "loss": 0.5473, + "step": 48500 + }, + { + "epoch": 0.49, + "learning_rate": 0.00034010666666666665, + "loss": 0.5474, + "step": 49000 + }, + { + "epoch": 0.49, + "learning_rate": 0.0003367733333333333, + "loss": 0.5474, + "step": 49500 + }, + { + "epoch": 0.5, + "learning_rate": 0.00033343999999999995, + "loss": 0.5474, + "step": 50000 + }, + { + "epoch": 0.51, + "learning_rate": 0.00033011333333333325, + "loss": 0.5473, + "step": 50500 + }, + { + "epoch": 0.51, + "learning_rate": 0.00032677999999999993, + "loss": 0.5473, + "step": 51000 + }, + { + "epoch": 0.52, + "learning_rate": 0.0003234466666666666, + "loss": 0.5473, + "step": 51500 + }, + { + "epoch": 0.52, + "learning_rate": 0.00032011333333333334, + "loss": 0.5473, + "step": 52000 + }, + { + "epoch": 0.53, + "learning_rate": 0.00031678666666666664, + "loss": 0.5473, + "step": 52500 + }, + { + "epoch": 0.53, + "learning_rate": 0.0003134533333333333, + "loss": 0.5473, + "step": 53000 + }, + { + "epoch": 0.54, + "learning_rate": 0.00031012, + "loss": 0.5473, + "step": 53500 + }, + { + "epoch": 0.54, + "learning_rate": 0.00030678666666666667, + "loss": 0.5474, + "step": 54000 + }, + { + "epoch": 0.55, + "learning_rate": 0.00030345999999999997, + "loss": 0.5473, + "step": 54500 + }, + { + "epoch": 0.55, + "learning_rate": 0.00030012666666666665, + "loss": 0.5473, + "step": 55000 + }, + { + "epoch": 0.56, + "learning_rate": 0.0002967933333333333, + "loss": 0.5473, + "step": 55500 + }, + { + "epoch": 0.56, + "learning_rate": 0.00029345999999999994, + "loss": 0.5473, + "step": 56000 + }, + { + "epoch": 0.56, + "learning_rate": 0.0002901333333333333, + "loss": 0.5473, + "step": 56500 + }, + { + "epoch": 0.57, + "learning_rate": 0.0002868, + "loss": 0.5473, + "step": 57000 + }, + { + "epoch": 0.57, + "learning_rate": 0.00028346666666666665, + "loss": 0.5473, + "step": 57500 + }, + { + "epoch": 0.58, + "learning_rate": 0.00028013333333333333, + "loss": 0.5473, + "step": 58000 + }, + { + "epoch": 0.58, + "learning_rate": 0.00027680666666666663, + "loss": 0.5473, + "step": 58500 + }, + { + "epoch": 0.59, + "learning_rate": 0.0002734733333333333, + "loss": 0.5473, + "step": 59000 + }, + { + "epoch": 0.59, + "learning_rate": 0.00027014, + "loss": 0.5473, + "step": 59500 + }, + { + "epoch": 0.6, + "learning_rate": 0.00026680666666666666, + "loss": 0.5473, + "step": 60000 + }, + { + "epoch": 0.6, + "learning_rate": 0.00026347999999999996, + "loss": 0.5473, + "step": 60500 + }, + { + "epoch": 0.61, + "learning_rate": 0.00026014666666666664, + "loss": 0.5473, + "step": 61000 + }, + { + "epoch": 0.61, + "learning_rate": 0.0002568133333333333, + "loss": 0.5473, + "step": 61500 + }, + { + "epoch": 0.62, + "learning_rate": 0.00025348, + "loss": 0.5473, + "step": 62000 + }, + { + "epoch": 0.62, + "learning_rate": 0.00025015333333333335, + "loss": 0.5474, + "step": 62500 + }, + { + "epoch": 0.63, + "learning_rate": 0.00024681999999999997, + "loss": 0.5473, + "step": 63000 + }, + { + "epoch": 0.64, + "learning_rate": 0.00024348666666666665, + "loss": 0.5473, + "step": 63500 + }, + { + "epoch": 0.64, + "learning_rate": 0.0002401533333333333, + "loss": 0.5474, + "step": 64000 + }, + { + "epoch": 0.65, + "learning_rate": 0.00023682666666666665, + "loss": 0.5473, + "step": 64500 + }, + { + "epoch": 0.65, + "learning_rate": 0.0002334933333333333, + "loss": 0.5473, + "step": 65000 + }, + { + "epoch": 0.66, + "learning_rate": 0.00023015999999999998, + "loss": 0.5474, + "step": 65500 + }, + { + "epoch": 0.66, + "learning_rate": 0.00022682666666666665, + "loss": 0.5473, + "step": 66000 + }, + { + "epoch": 0.67, + "learning_rate": 0.00022349999999999998, + "loss": 0.5473, + "step": 66500 + }, + { + "epoch": 0.67, + "learning_rate": 0.00022016666666666666, + "loss": 0.5473, + "step": 67000 + }, + { + "epoch": 0.68, + "learning_rate": 0.0002168333333333333, + "loss": 0.5473, + "step": 67500 + }, + { + "epoch": 0.68, + "learning_rate": 0.00021349999999999999, + "loss": 0.5473, + "step": 68000 + }, + { + "epoch": 0.69, + "learning_rate": 0.00021017333333333332, + "loss": 0.5473, + "step": 68500 + }, + { + "epoch": 0.69, + "learning_rate": 0.00020684, + "loss": 0.5473, + "step": 69000 + }, + { + "epoch": 0.69, + "learning_rate": 0.00020350666666666667, + "loss": 0.5472, + "step": 69500 + }, + { + "epoch": 0.7, + "learning_rate": 0.00020017333333333332, + "loss": 0.5473, + "step": 70000 + }, + { + "epoch": 0.7, + "learning_rate": 0.00019684666666666667, + "loss": 0.5474, + "step": 70500 + }, + { + "epoch": 0.71, + "learning_rate": 0.00019351333333333332, + "loss": 0.5473, + "step": 71000 + }, + { + "epoch": 0.71, + "learning_rate": 0.00019018, + "loss": 0.5473, + "step": 71500 + }, + { + "epoch": 0.72, + "learning_rate": 0.00018684666666666667, + "loss": 0.5473, + "step": 72000 + }, + { + "epoch": 0.72, + "learning_rate": 0.00018351999999999998, + "loss": 0.5473, + "step": 72500 + }, + { + "epoch": 0.73, + "learning_rate": 0.0001801933333333333, + "loss": 0.5473, + "step": 73000 + }, + { + "epoch": 0.73, + "learning_rate": 0.00017685999999999998, + "loss": 0.5473, + "step": 73500 + }, + { + "epoch": 0.74, + "learning_rate": 0.00017352666666666663, + "loss": 0.5473, + "step": 74000 + }, + { + "epoch": 0.74, + "learning_rate": 0.0001701933333333333, + "loss": 0.5473, + "step": 74500 + }, + { + "epoch": 0.75, + "learning_rate": 0.00016685999999999998, + "loss": 0.5472, + "step": 75000 + }, + { + "epoch": 0.76, + "learning_rate": 0.00016352666666666663, + "loss": 0.5473, + "step": 75500 + }, + { + "epoch": 0.76, + "learning_rate": 0.0001601933333333333, + "loss": 0.5473, + "step": 76000 + }, + { + "epoch": 0.77, + "learning_rate": 0.00015685999999999999, + "loss": 0.5473, + "step": 76500 + }, + { + "epoch": 0.77, + "learning_rate": 0.00015353333333333332, + "loss": 0.5473, + "step": 77000 + }, + { + "epoch": 0.78, + "learning_rate": 0.00015020666666666665, + "loss": 0.5473, + "step": 77500 + }, + { + "epoch": 0.78, + "learning_rate": 0.00014687333333333332, + "loss": 0.5473, + "step": 78000 + }, + { + "epoch": 0.79, + "learning_rate": 0.00014353999999999997, + "loss": 0.5472, + "step": 78500 + }, + { + "epoch": 0.79, + "learning_rate": 0.00014020666666666667, + "loss": 0.5472, + "step": 79000 + }, + { + "epoch": 0.8, + "learning_rate": 0.00013687999999999998, + "loss": 0.5472, + "step": 79500 + }, + { + "epoch": 0.8, + "learning_rate": 0.00013354666666666665, + "loss": 0.5473, + "step": 80000 + }, + { + "epoch": 0.81, + "learning_rate": 0.00013021333333333333, + "loss": 0.5473, + "step": 80500 + }, + { + "epoch": 0.81, + "learning_rate": 0.00012687999999999998, + "loss": 0.5472, + "step": 81000 + }, + { + "epoch": 0.81, + "learning_rate": 0.00012355333333333334, + "loss": 0.5472, + "step": 81500 + }, + { + "epoch": 0.82, + "learning_rate": 0.00012021999999999998, + "loss": 0.5473, + "step": 82000 + }, + { + "epoch": 0.82, + "learning_rate": 0.00011688666666666665, + "loss": 0.5472, + "step": 82500 + }, + { + "epoch": 0.83, + "learning_rate": 0.00011355333333333334, + "loss": 0.5472, + "step": 83000 + }, + { + "epoch": 0.83, + "learning_rate": 0.00011022666666666665, + "loss": 0.5473, + "step": 83500 + }, + { + "epoch": 0.84, + "learning_rate": 0.00010689333333333333, + "loss": 0.5473, + "step": 84000 + }, + { + "epoch": 0.84, + "learning_rate": 0.00010355999999999999, + "loss": 0.5472, + "step": 84500 + }, + { + "epoch": 0.85, + "learning_rate": 0.00010022666666666665, + "loss": 0.5473, + "step": 85000 + }, + { + "epoch": 0.85, + "learning_rate": 9.69e-05, + "loss": 0.5472, + "step": 85500 + }, + { + "epoch": 0.86, + "learning_rate": 9.356666666666666e-05, + "loss": 0.5473, + "step": 86000 + }, + { + "epoch": 0.86, + "learning_rate": 9.023333333333332e-05, + "loss": 0.5472, + "step": 86500 + }, + { + "epoch": 0.87, + "learning_rate": 8.69e-05, + "loss": 0.5472, + "step": 87000 + }, + { + "epoch": 0.88, + "learning_rate": 8.357333333333331e-05, + "loss": 0.5472, + "step": 87500 + }, + { + "epoch": 0.88, + "learning_rate": 8.024e-05, + "loss": 0.5472, + "step": 88000 + }, + { + "epoch": 0.89, + "learning_rate": 7.690666666666667e-05, + "loss": 0.5472, + "step": 88500 + }, + { + "epoch": 0.89, + "learning_rate": 7.357333333333333e-05, + "loss": 0.5472, + "step": 89000 + }, + { + "epoch": 0.9, + "learning_rate": 7.024666666666666e-05, + "loss": 0.5472, + "step": 89500 + }, + { + "epoch": 0.9, + "learning_rate": 6.691999999999999e-05, + "loss": 0.5472, + "step": 90000 + }, + { + "epoch": 0.91, + "learning_rate": 6.358666666666667e-05, + "loss": 0.5472, + "step": 90500 + }, + { + "epoch": 0.91, + "learning_rate": 6.025333333333333e-05, + "loss": 0.5472, + "step": 91000 + }, + { + "epoch": 0.92, + "learning_rate": 5.692e-05, + "loss": 0.5472, + "step": 91500 + }, + { + "epoch": 0.92, + "learning_rate": 5.359333333333333e-05, + "loss": 0.5472, + "step": 92000 + }, + { + "epoch": 0.93, + "learning_rate": 5.0259999999999997e-05, + "loss": 0.5472, + "step": 92500 + }, + { + "epoch": 0.93, + "learning_rate": 4.692666666666666e-05, + "loss": 0.5472, + "step": 93000 + }, + { + "epoch": 0.94, + "learning_rate": 4.359333333333333e-05, + "loss": 0.5471, + "step": 93500 + }, + { + "epoch": 0.94, + "learning_rate": 4.026666666666666e-05, + "loss": 0.5473, + "step": 94000 + }, + { + "epoch": 0.94, + "learning_rate": 3.6933333333333334e-05, + "loss": 0.5472, + "step": 94500 + }, + { + "epoch": 0.95, + "learning_rate": 3.36e-05, + "loss": 0.5472, + "step": 95000 + }, + { + "epoch": 0.95, + "learning_rate": 3.0266666666666663e-05, + "loss": 0.5472, + "step": 95500 + }, + { + "epoch": 0.96, + "learning_rate": 2.694e-05, + "loss": 0.5472, + "step": 96000 + }, + { + "epoch": 0.96, + "learning_rate": 2.3606666666666665e-05, + "loss": 0.5472, + "step": 96500 + }, + { + "epoch": 0.97, + "learning_rate": 2.027333333333333e-05, + "loss": 0.5472, + "step": 97000 + }, + { + "epoch": 0.97, + "learning_rate": 1.6939999999999997e-05, + "loss": 0.5472, + "step": 97500 + }, + { + "epoch": 0.98, + "learning_rate": 1.3613333333333332e-05, + "loss": 0.5472, + "step": 98000 + }, + { + "epoch": 0.98, + "learning_rate": 1.028e-05, + "loss": 0.5472, + "step": 98500 + }, + { + "epoch": 0.99, + "learning_rate": 6.946666666666666e-06, + "loss": 0.5471, + "step": 99000 + }, + { + "epoch": 0.99, + "learning_rate": 3.613333333333333e-06, + "loss": 0.5472, + "step": 99500 + }, + { + "epoch": 1.0, + "learning_rate": 2.8666666666666664e-07, + "loss": 0.5472, + "step": 100000 + } + ], + "max_steps": 100000, + "num_train_epochs": 1, + "total_flos": 4.214257693131571e+19, + "trial_name": null, + "trial_params": null +} diff --git a/propositional_logic/lt_alm_noreflexivity/checkpoint-100000/training_args.bin b/propositional_logic/lt_alm_noreflexivity/checkpoint-100000/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..15e2a84cbc0c8386fe339c58b8194aeb0f14c793 --- /dev/null +++ b/propositional_logic/lt_alm_noreflexivity/checkpoint-100000/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4df81e52587acbe0101b10601f3f46dd97916bfc5d01cc59be7943e28c1e412 +size 2735 diff --git a/propositional_logic/lt_alm_noreflexivity/config.json b/propositional_logic/lt_alm_noreflexivity/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ff8809f9ee0000962c182321c304c5b09e84f1ba --- /dev/null +++ b/propositional_logic/lt_alm_noreflexivity/config.json @@ -0,0 +1,37 @@ +{ + "activation_function": "gelu_new", + "architectures": [ + "GPT2LMHeadModel" + ], + "attn_pdrop": 0.1, + "bos_token_id": 50256, + "embd_pdrop": 0.1, + "eos_token_id": 50256, + "gradient_checkpointing": false, + "initializer_range": 0.02, + "layer_norm_epsilon": 1e-05, + "model_type": "gpt2", + "n_ctx": 1024, + "n_embd": 768, + "n_head": 12, + "n_inner": null, + "n_layer": 12, + "n_positions": 1024, + "resid_pdrop": 0.1, + "scale_attn_weights": true, + "summary_activation": null, + "summary_first_dropout": 0.1, + "summary_proj_to_labels": true, + "summary_type": "cls_index", + "summary_use_proj": true, + "task_specific_params": { + "text-generation": { + "do_sample": true, + "max_length": 50 + } + }, + "torch_dtype": "float32", + "transformers_version": "4.10.3", + "use_cache": true, + "vocab_size": 1200 +} diff --git a/propositional_logic/lt_alm_noreflexivity/pytorch_model.bin b/propositional_logic/lt_alm_noreflexivity/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..c24a5fc1a6c097c5b1d29e21b8ffe0906d5ac611 --- /dev/null +++ b/propositional_logic/lt_alm_noreflexivity/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:044620c7cdb9a56568ca95b7715f08ab4b1ebe466efc936ac34c69aa496700cf +size 359700713 diff --git a/propositional_logic/lt_alm_noreflexivity/runs/Jun02_02-52-27_allennlp-server4/1654250114.9441073/events.out.tfevents.1654250114.allennlp-server4.2845725.1 b/propositional_logic/lt_alm_noreflexivity/runs/Jun02_02-52-27_allennlp-server4/1654250114.9441073/events.out.tfevents.1654250114.allennlp-server4.2845725.1 new file mode 100644 index 0000000000000000000000000000000000000000..70cbbfc68ed4bb441099cd37d464f62af7de2700 --- /dev/null +++ b/propositional_logic/lt_alm_noreflexivity/runs/Jun02_02-52-27_allennlp-server4/1654250114.9441073/events.out.tfevents.1654250114.allennlp-server4.2845725.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fe739aa30bb26e223fd2d4897896e7f942b1b2d576e0fda4aee47540310e78a +size 4425 diff --git a/propositional_logic/lt_alm_noreflexivity/runs/Jun02_02-52-27_allennlp-server4/events.out.tfevents.1654250109.allennlp-server4.2845725.0 b/propositional_logic/lt_alm_noreflexivity/runs/Jun02_02-52-27_allennlp-server4/events.out.tfevents.1654250109.allennlp-server4.2845725.0 new file mode 100644 index 0000000000000000000000000000000000000000..f3c6e1f676daac6a70320558c68c714b22a54421 --- /dev/null +++ b/propositional_logic/lt_alm_noreflexivity/runs/Jun02_02-52-27_allennlp-server4/events.out.tfevents.1654250109.allennlp-server4.2845725.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:876b7af5e9be817eec01bce9583998aa3e21a5eca1989169595a021ecf138ff8 +size 35626 diff --git a/propositional_logic/lt_alm_noreflexivity/training_args.bin b/propositional_logic/lt_alm_noreflexivity/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..15e2a84cbc0c8386fe339c58b8194aeb0f14c793 --- /dev/null +++ b/propositional_logic/lt_alm_noreflexivity/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4df81e52587acbe0101b10601f3f46dd97916bfc5d01cc59be7943e28c1e412 +size 2735 diff --git a/propositional_logic/lt_alm_noreflexivity_nosymmetry/checkpoint-100000/config.json b/propositional_logic/lt_alm_noreflexivity_nosymmetry/checkpoint-100000/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ff8809f9ee0000962c182321c304c5b09e84f1ba --- /dev/null +++ b/propositional_logic/lt_alm_noreflexivity_nosymmetry/checkpoint-100000/config.json @@ -0,0 +1,37 @@ +{ + "activation_function": "gelu_new", + "architectures": [ + "GPT2LMHeadModel" + ], + "attn_pdrop": 0.1, + "bos_token_id": 50256, + "embd_pdrop": 0.1, + "eos_token_id": 50256, + "gradient_checkpointing": false, + "initializer_range": 0.02, + "layer_norm_epsilon": 1e-05, + "model_type": "gpt2", + "n_ctx": 1024, + "n_embd": 768, + "n_head": 12, + "n_inner": null, + "n_layer": 12, + "n_positions": 1024, + "resid_pdrop": 0.1, + "scale_attn_weights": true, + "summary_activation": null, + "summary_first_dropout": 0.1, + "summary_proj_to_labels": true, + "summary_type": "cls_index", + "summary_use_proj": true, + "task_specific_params": { + "text-generation": { + "do_sample": true, + "max_length": 50 + } + }, + "torch_dtype": "float32", + "transformers_version": "4.10.3", + "use_cache": true, + "vocab_size": 1200 +} diff --git a/propositional_logic/lt_alm_noreflexivity_nosymmetry/checkpoint-100000/optimizer.pt b/propositional_logic/lt_alm_noreflexivity_nosymmetry/checkpoint-100000/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..4569dddde7ae5ee789c53a69a1e4195438a0abdd --- /dev/null +++ b/propositional_logic/lt_alm_noreflexivity_nosymmetry/checkpoint-100000/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2782cf1e0bfb8bdc248e3d34cc79875e576158ca05daa414c72299f650072ff4 +size 694198065 diff --git a/propositional_logic/lt_alm_noreflexivity_nosymmetry/checkpoint-100000/pytorch_model.bin b/propositional_logic/lt_alm_noreflexivity_nosymmetry/checkpoint-100000/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..4412048281e2dd6ebef89f9e1ebe6d58e3497880 --- /dev/null +++ b/propositional_logic/lt_alm_noreflexivity_nosymmetry/checkpoint-100000/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f4353422edf337e6431cd9b876ce10a85ed87598959c86c2e3e37b7d59a000e +size 359700713 diff --git a/propositional_logic/lt_alm_noreflexivity_nosymmetry/checkpoint-100000/rng_state.pth b/propositional_logic/lt_alm_noreflexivity_nosymmetry/checkpoint-100000/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..35f6fde045a6c640c8505d1e1f0fe1b8ea05f3f3 --- /dev/null +++ b/propositional_logic/lt_alm_noreflexivity_nosymmetry/checkpoint-100000/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9bd6c5f036db3e720e6c9b6038bd5ebba7707a753fba41942931fba45076bdff +size 14503 diff --git a/propositional_logic/lt_alm_noreflexivity_nosymmetry/checkpoint-100000/scaler.pt b/propositional_logic/lt_alm_noreflexivity_nosymmetry/checkpoint-100000/scaler.pt new file mode 100644 index 0000000000000000000000000000000000000000..e2d741d84c76b2dfbf645502922b64f2453d404c --- /dev/null +++ b/propositional_logic/lt_alm_noreflexivity_nosymmetry/checkpoint-100000/scaler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ade96461600bebab219d001f402561ab67ea3b8b429c89ff75ce4db44f23ffae +size 559 diff --git a/propositional_logic/lt_alm_noreflexivity_nosymmetry/checkpoint-100000/scheduler.pt b/propositional_logic/lt_alm_noreflexivity_nosymmetry/checkpoint-100000/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..e60662986041b9dbd11ffc15e5d83dec334e7397 --- /dev/null +++ b/propositional_logic/lt_alm_noreflexivity_nosymmetry/checkpoint-100000/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19c8fa2673f2a310c2a4524c4ab6b58be6ff56aeba25233fe1a478391d398b2b +size 623 diff --git a/propositional_logic/lt_alm_noreflexivity_nosymmetry/checkpoint-100000/trainer_state.json b/propositional_logic/lt_alm_noreflexivity_nosymmetry/checkpoint-100000/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..13461503870c9f47db566da927f34ace6308b8e8 --- /dev/null +++ b/propositional_logic/lt_alm_noreflexivity_nosymmetry/checkpoint-100000/trainer_state.json @@ -0,0 +1,1216 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 1.0, + "global_step": 100000, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.01, + "learning_rate": 2.9999999999999997e-05, + "loss": 1.3017, + "step": 500 + }, + { + "epoch": 0.01, + "learning_rate": 5.9999999999999995e-05, + "loss": 0.6339, + "step": 1000 + }, + { + "epoch": 0.01, + "learning_rate": 8.999999999999999e-05, + "loss": 0.5877, + "step": 1500 + }, + { + "epoch": 0.02, + "learning_rate": 0.00011999999999999999, + "loss": 0.568, + "step": 2000 + }, + { + "epoch": 0.03, + "learning_rate": 0.00015, + "loss": 0.5622, + "step": 2500 + }, + { + "epoch": 0.03, + "learning_rate": 0.00017999999999999998, + "loss": 0.5659, + "step": 3000 + }, + { + "epoch": 0.04, + "learning_rate": 0.00020999999999999998, + "loss": 0.561, + "step": 3500 + }, + { + "epoch": 0.04, + "learning_rate": 0.00023999999999999998, + "loss": 0.559, + "step": 4000 + }, + { + "epoch": 0.04, + "learning_rate": 0.00027, + "loss": 0.5594, + "step": 4500 + }, + { + "epoch": 0.05, + "learning_rate": 0.0003, + "loss": 0.5607, + "step": 5000 + }, + { + "epoch": 0.06, + "learning_rate": 0.00033, + "loss": 0.5592, + "step": 5500 + }, + { + "epoch": 0.06, + "learning_rate": 0.00035999999999999997, + "loss": 0.5539, + "step": 6000 + }, + { + "epoch": 0.07, + "learning_rate": 0.00039, + "loss": 0.5569, + "step": 6500 + }, + { + "epoch": 0.07, + "learning_rate": 0.00041999999999999996, + "loss": 0.5565, + "step": 7000 + }, + { + "epoch": 0.07, + "learning_rate": 0.00045, + "loss": 0.5574, + "step": 7500 + }, + { + "epoch": 0.08, + "learning_rate": 0.00047999999999999996, + "loss": 0.5555, + "step": 8000 + }, + { + "epoch": 0.09, + "learning_rate": 0.0005099999999999999, + "loss": 0.5538, + "step": 8500 + }, + { + "epoch": 0.09, + "learning_rate": 0.00054, + "loss": 0.5526, + "step": 9000 + }, + { + "epoch": 0.1, + "learning_rate": 0.00057, + "loss": 0.5537, + "step": 9500 + }, + { + "epoch": 0.1, + "learning_rate": 0.0006, + "loss": 0.5535, + "step": 10000 + }, + { + "epoch": 0.1, + "learning_rate": 0.0005966666666666667, + "loss": 0.5522, + "step": 10500 + }, + { + "epoch": 0.11, + "learning_rate": 0.0005933333333333333, + "loss": 0.5507, + "step": 11000 + }, + { + "epoch": 0.12, + "learning_rate": 0.0005899999999999999, + "loss": 0.5501, + "step": 11500 + }, + { + "epoch": 0.12, + "learning_rate": 0.0005866666666666665, + "loss": 0.55, + "step": 12000 + }, + { + "epoch": 0.12, + "learning_rate": 0.0005833333333333333, + "loss": 0.5494, + "step": 12500 + }, + { + "epoch": 0.13, + "learning_rate": 0.00058, + "loss": 0.5491, + "step": 13000 + }, + { + "epoch": 0.14, + "learning_rate": 0.0005766666666666666, + "loss": 0.5487, + "step": 13500 + }, + { + "epoch": 0.14, + "learning_rate": 0.0005733333333333334, + "loss": 0.549, + "step": 14000 + }, + { + "epoch": 0.14, + "learning_rate": 0.00057, + "loss": 0.5484, + "step": 14500 + }, + { + "epoch": 0.15, + "learning_rate": 0.0005666666666666666, + "loss": 0.5484, + "step": 15000 + }, + { + "epoch": 0.15, + "learning_rate": 0.0005633333333333333, + "loss": 0.5483, + "step": 15500 + }, + { + "epoch": 0.16, + "learning_rate": 0.00056, + "loss": 0.5479, + "step": 16000 + }, + { + "epoch": 0.17, + "learning_rate": 0.0005566666666666667, + "loss": 0.5484, + "step": 16500 + }, + { + "epoch": 0.17, + "learning_rate": 0.0005533333333333333, + "loss": 0.548, + "step": 17000 + }, + { + "epoch": 0.17, + "learning_rate": 0.0005499999999999999, + "loss": 0.548, + "step": 17500 + }, + { + "epoch": 0.18, + "learning_rate": 0.0005466666666666667, + "loss": 0.5479, + "step": 18000 + }, + { + "epoch": 0.18, + "learning_rate": 0.0005433399999999999, + "loss": 0.5479, + "step": 18500 + }, + { + "epoch": 0.19, + "learning_rate": 0.0005400066666666666, + "loss": 0.5478, + "step": 19000 + }, + { + "epoch": 0.2, + "learning_rate": 0.0005366733333333333, + "loss": 0.5481, + "step": 19500 + }, + { + "epoch": 0.2, + "learning_rate": 0.00053334, + "loss": 0.5479, + "step": 20000 + }, + { + "epoch": 0.2, + "learning_rate": 0.0005300133333333333, + "loss": 0.5476, + "step": 20500 + }, + { + "epoch": 0.21, + "learning_rate": 0.00052668, + "loss": 0.5477, + "step": 21000 + }, + { + "epoch": 0.21, + "learning_rate": 0.0005233466666666667, + "loss": 0.5477, + "step": 21500 + }, + { + "epoch": 0.22, + "learning_rate": 0.0005200133333333332, + "loss": 0.5476, + "step": 22000 + }, + { + "epoch": 0.23, + "learning_rate": 0.0005166866666666667, + "loss": 0.5477, + "step": 22500 + }, + { + "epoch": 0.23, + "learning_rate": 0.0005133533333333333, + "loss": 0.5476, + "step": 23000 + }, + { + "epoch": 0.23, + "learning_rate": 0.0005100199999999999, + "loss": 0.5476, + "step": 23500 + }, + { + "epoch": 0.24, + "learning_rate": 0.0005066866666666666, + "loss": 0.5476, + "step": 24000 + }, + { + "epoch": 0.24, + "learning_rate": 0.0005033599999999999, + "loss": 0.5475, + "step": 24500 + }, + { + "epoch": 0.25, + "learning_rate": 0.0005000266666666666, + "loss": 0.5476, + "step": 25000 + }, + { + "epoch": 0.26, + "learning_rate": 0.0004966933333333332, + "loss": 0.5476, + "step": 25500 + }, + { + "epoch": 0.26, + "learning_rate": 0.00049336, + "loss": 0.5476, + "step": 26000 + }, + { + "epoch": 0.27, + "learning_rate": 0.0004900333333333333, + "loss": 0.5475, + "step": 26500 + }, + { + "epoch": 0.27, + "learning_rate": 0.00048669999999999996, + "loss": 0.5476, + "step": 27000 + }, + { + "epoch": 0.28, + "learning_rate": 0.0004833666666666666, + "loss": 0.5476, + "step": 27500 + }, + { + "epoch": 0.28, + "learning_rate": 0.00048003333333333326, + "loss": 0.5474, + "step": 28000 + }, + { + "epoch": 0.28, + "learning_rate": 0.0004767066666666666, + "loss": 0.5474, + "step": 28500 + }, + { + "epoch": 0.29, + "learning_rate": 0.0004733733333333333, + "loss": 0.5474, + "step": 29000 + }, + { + "epoch": 0.29, + "learning_rate": 0.00047003999999999997, + "loss": 0.5475, + "step": 29500 + }, + { + "epoch": 0.3, + "learning_rate": 0.00046670666666666664, + "loss": 0.5474, + "step": 30000 + }, + { + "epoch": 0.3, + "learning_rate": 0.00046337999999999994, + "loss": 0.5475, + "step": 30500 + }, + { + "epoch": 0.31, + "learning_rate": 0.0004600466666666666, + "loss": 0.5475, + "step": 31000 + }, + { + "epoch": 0.32, + "learning_rate": 0.0004567133333333333, + "loss": 0.5474, + "step": 31500 + }, + { + "epoch": 0.32, + "learning_rate": 0.00045337999999999997, + "loss": 0.5474, + "step": 32000 + }, + { + "epoch": 0.33, + "learning_rate": 0.00045005333333333333, + "loss": 0.5474, + "step": 32500 + }, + { + "epoch": 0.33, + "learning_rate": 0.00044672, + "loss": 0.5474, + "step": 33000 + }, + { + "epoch": 0.34, + "learning_rate": 0.00044338666666666663, + "loss": 0.5474, + "step": 33500 + }, + { + "epoch": 0.34, + "learning_rate": 0.0004400533333333333, + "loss": 0.5475, + "step": 34000 + }, + { + "epoch": 0.34, + "learning_rate": 0.0004367266666666666, + "loss": 0.5475, + "step": 34500 + }, + { + "epoch": 0.35, + "learning_rate": 0.0004333933333333333, + "loss": 0.5474, + "step": 35000 + }, + { + "epoch": 0.35, + "learning_rate": 0.00043005999999999996, + "loss": 0.5474, + "step": 35500 + }, + { + "epoch": 0.36, + "learning_rate": 0.00042672666666666663, + "loss": 0.5474, + "step": 36000 + }, + { + "epoch": 0.36, + "learning_rate": 0.00042339999999999994, + "loss": 0.5475, + "step": 36500 + }, + { + "epoch": 0.37, + "learning_rate": 0.0004200666666666666, + "loss": 0.5474, + "step": 37000 + }, + { + "epoch": 0.38, + "learning_rate": 0.00041673333333333334, + "loss": 0.5473, + "step": 37500 + }, + { + "epoch": 0.38, + "learning_rate": 0.0004133999999999999, + "loss": 0.5474, + "step": 38000 + }, + { + "epoch": 0.39, + "learning_rate": 0.0004100733333333333, + "loss": 0.5475, + "step": 38500 + }, + { + "epoch": 0.39, + "learning_rate": 0.00040673999999999994, + "loss": 0.5473, + "step": 39000 + }, + { + "epoch": 0.4, + "learning_rate": 0.0004034066666666666, + "loss": 0.5474, + "step": 39500 + }, + { + "epoch": 0.4, + "learning_rate": 0.0004000733333333333, + "loss": 0.5474, + "step": 40000 + }, + { + "epoch": 0.41, + "learning_rate": 0.0003967466666666666, + "loss": 0.5474, + "step": 40500 + }, + { + "epoch": 0.41, + "learning_rate": 0.0003934133333333333, + "loss": 0.5473, + "step": 41000 + }, + { + "epoch": 0.41, + "learning_rate": 0.00039007999999999995, + "loss": 0.5473, + "step": 41500 + }, + { + "epoch": 0.42, + "learning_rate": 0.00038674666666666663, + "loss": 0.5473, + "step": 42000 + }, + { + "epoch": 0.42, + "learning_rate": 0.00038342, + "loss": 0.5474, + "step": 42500 + }, + { + "epoch": 0.43, + "learning_rate": 0.00038008666666666666, + "loss": 0.5473, + "step": 43000 + }, + { + "epoch": 0.43, + "learning_rate": 0.00037675333333333334, + "loss": 0.5475, + "step": 43500 + }, + { + "epoch": 0.44, + "learning_rate": 0.00037341999999999996, + "loss": 0.5473, + "step": 44000 + }, + { + "epoch": 0.45, + "learning_rate": 0.0003700933333333333, + "loss": 0.5473, + "step": 44500 + }, + { + "epoch": 0.45, + "learning_rate": 0.00036675999999999994, + "loss": 0.5474, + "step": 45000 + }, + { + "epoch": 0.46, + "learning_rate": 0.0003634266666666666, + "loss": 0.5473, + "step": 45500 + }, + { + "epoch": 0.46, + "learning_rate": 0.0003600933333333333, + "loss": 0.5473, + "step": 46000 + }, + { + "epoch": 0.47, + "learning_rate": 0.00035676666666666665, + "loss": 0.5474, + "step": 46500 + }, + { + "epoch": 0.47, + "learning_rate": 0.00035343999999999995, + "loss": 0.5474, + "step": 47000 + }, + { + "epoch": 0.47, + "learning_rate": 0.0003501066666666666, + "loss": 0.5474, + "step": 47500 + }, + { + "epoch": 0.48, + "learning_rate": 0.0003467733333333333, + "loss": 0.5473, + "step": 48000 + }, + { + "epoch": 0.48, + "learning_rate": 0.00034344, + "loss": 0.5473, + "step": 48500 + }, + { + "epoch": 0.49, + "learning_rate": 0.00034010666666666665, + "loss": 0.5473, + "step": 49000 + }, + { + "epoch": 0.49, + "learning_rate": 0.0003367733333333333, + "loss": 0.5474, + "step": 49500 + }, + { + "epoch": 0.5, + "learning_rate": 0.00033343999999999995, + "loss": 0.5473, + "step": 50000 + }, + { + "epoch": 0.51, + "learning_rate": 0.00033010666666666663, + "loss": 0.5473, + "step": 50500 + }, + { + "epoch": 0.51, + "learning_rate": 0.00032677999999999993, + "loss": 0.5473, + "step": 51000 + }, + { + "epoch": 0.52, + "learning_rate": 0.0003234466666666666, + "loss": 0.5474, + "step": 51500 + }, + { + "epoch": 0.52, + "learning_rate": 0.00032011333333333334, + "loss": 0.5473, + "step": 52000 + }, + { + "epoch": 0.53, + "learning_rate": 0.00031678, + "loss": 0.5473, + "step": 52500 + }, + { + "epoch": 0.53, + "learning_rate": 0.0003134533333333333, + "loss": 0.5473, + "step": 53000 + }, + { + "epoch": 0.54, + "learning_rate": 0.00031012, + "loss": 0.5473, + "step": 53500 + }, + { + "epoch": 0.54, + "learning_rate": 0.00030678666666666667, + "loss": 0.5473, + "step": 54000 + }, + { + "epoch": 0.55, + "learning_rate": 0.0003034533333333333, + "loss": 0.5473, + "step": 54500 + }, + { + "epoch": 0.55, + "learning_rate": 0.00030012666666666665, + "loss": 0.5474, + "step": 55000 + }, + { + "epoch": 0.56, + "learning_rate": 0.0002967933333333333, + "loss": 0.5473, + "step": 55500 + }, + { + "epoch": 0.56, + "learning_rate": 0.00029345999999999994, + "loss": 0.5474, + "step": 56000 + }, + { + "epoch": 0.56, + "learning_rate": 0.0002901266666666666, + "loss": 0.5474, + "step": 56500 + }, + { + "epoch": 0.57, + "learning_rate": 0.0002868, + "loss": 0.5474, + "step": 57000 + }, + { + "epoch": 0.57, + "learning_rate": 0.00028346666666666665, + "loss": 0.5473, + "step": 57500 + }, + { + "epoch": 0.58, + "learning_rate": 0.00028013333333333333, + "loss": 0.5474, + "step": 58000 + }, + { + "epoch": 0.58, + "learning_rate": 0.00027679999999999995, + "loss": 0.5475, + "step": 58500 + }, + { + "epoch": 0.59, + "learning_rate": 0.0002734733333333333, + "loss": 0.5474, + "step": 59000 + }, + { + "epoch": 0.59, + "learning_rate": 0.00027014, + "loss": 0.5474, + "step": 59500 + }, + { + "epoch": 0.6, + "learning_rate": 0.00026680666666666666, + "loss": 0.5473, + "step": 60000 + }, + { + "epoch": 0.6, + "learning_rate": 0.00026347333333333334, + "loss": 0.5473, + "step": 60500 + }, + { + "epoch": 0.61, + "learning_rate": 0.00026014666666666664, + "loss": 0.5473, + "step": 61000 + }, + { + "epoch": 0.61, + "learning_rate": 0.0002568133333333333, + "loss": 0.5475, + "step": 61500 + }, + { + "epoch": 0.62, + "learning_rate": 0.00025348, + "loss": 0.5475, + "step": 62000 + }, + { + "epoch": 0.62, + "learning_rate": 0.00025014666666666667, + "loss": 0.5479, + "step": 62500 + }, + { + "epoch": 0.63, + "learning_rate": 0.00024681999999999997, + "loss": 0.5477, + "step": 63000 + }, + { + "epoch": 0.64, + "learning_rate": 0.00024348666666666665, + "loss": 0.5478, + "step": 63500 + }, + { + "epoch": 0.64, + "learning_rate": 0.0002401533333333333, + "loss": 0.5478, + "step": 64000 + }, + { + "epoch": 0.65, + "learning_rate": 0.00023681999999999997, + "loss": 0.5476, + "step": 64500 + }, + { + "epoch": 0.65, + "learning_rate": 0.0002334933333333333, + "loss": 0.5475, + "step": 65000 + }, + { + "epoch": 0.66, + "learning_rate": 0.00023015999999999998, + "loss": 0.5478, + "step": 65500 + }, + { + "epoch": 0.66, + "learning_rate": 0.00022682666666666665, + "loss": 0.5478, + "step": 66000 + }, + { + "epoch": 0.67, + "learning_rate": 0.0002234933333333333, + "loss": 0.5478, + "step": 66500 + }, + { + "epoch": 0.67, + "learning_rate": 0.00022016666666666666, + "loss": 0.5478, + "step": 67000 + }, + { + "epoch": 0.68, + "learning_rate": 0.0002168333333333333, + "loss": 0.5477, + "step": 67500 + }, + { + "epoch": 0.68, + "learning_rate": 0.00021349999999999999, + "loss": 0.5477, + "step": 68000 + }, + { + "epoch": 0.69, + "learning_rate": 0.00021016666666666666, + "loss": 0.5478, + "step": 68500 + }, + { + "epoch": 0.69, + "learning_rate": 0.00020684, + "loss": 0.5477, + "step": 69000 + }, + { + "epoch": 0.69, + "learning_rate": 0.00020350666666666667, + "loss": 0.5477, + "step": 69500 + }, + { + "epoch": 0.7, + "learning_rate": 0.00020017333333333332, + "loss": 0.5477, + "step": 70000 + }, + { + "epoch": 0.7, + "learning_rate": 0.00019684, + "loss": 0.5477, + "step": 70500 + }, + { + "epoch": 0.71, + "learning_rate": 0.00019351333333333332, + "loss": 0.5477, + "step": 71000 + }, + { + "epoch": 0.71, + "learning_rate": 0.00019018, + "loss": 0.5477, + "step": 71500 + }, + { + "epoch": 0.72, + "learning_rate": 0.00018684666666666667, + "loss": 0.5477, + "step": 72000 + }, + { + "epoch": 0.72, + "learning_rate": 0.00018351333333333332, + "loss": 0.5477, + "step": 72500 + }, + { + "epoch": 0.73, + "learning_rate": 0.00018018666666666663, + "loss": 0.5477, + "step": 73000 + }, + { + "epoch": 0.73, + "learning_rate": 0.0001768533333333333, + "loss": 0.5477, + "step": 73500 + }, + { + "epoch": 0.74, + "learning_rate": 0.00017352, + "loss": 0.5476, + "step": 74000 + }, + { + "epoch": 0.74, + "learning_rate": 0.00017018666666666663, + "loss": 0.5477, + "step": 74500 + }, + { + "epoch": 0.75, + "learning_rate": 0.00016685999999999998, + "loss": 0.5477, + "step": 75000 + }, + { + "epoch": 0.76, + "learning_rate": 0.00016353333333333331, + "loss": 0.5476, + "step": 75500 + }, + { + "epoch": 0.76, + "learning_rate": 0.0001602, + "loss": 0.5476, + "step": 76000 + }, + { + "epoch": 0.77, + "learning_rate": 0.00015686666666666664, + "loss": 0.5476, + "step": 76500 + }, + { + "epoch": 0.77, + "learning_rate": 0.00015353333333333332, + "loss": 0.5477, + "step": 77000 + }, + { + "epoch": 0.78, + "learning_rate": 0.0001502, + "loss": 0.5475, + "step": 77500 + }, + { + "epoch": 0.78, + "learning_rate": 0.00014686666666666667, + "loss": 0.5473, + "step": 78000 + }, + { + "epoch": 0.79, + "learning_rate": 0.00014353333333333332, + "loss": 0.5474, + "step": 78500 + }, + { + "epoch": 0.79, + "learning_rate": 0.00014020666666666667, + "loss": 0.5481, + "step": 79000 + }, + { + "epoch": 0.8, + "learning_rate": 0.00013687333333333332, + "loss": 0.548, + "step": 79500 + }, + { + "epoch": 0.8, + "learning_rate": 0.00013354, + "loss": 0.5481, + "step": 80000 + }, + { + "epoch": 0.81, + "learning_rate": 0.00013020666666666665, + "loss": 0.548, + "step": 80500 + }, + { + "epoch": 0.81, + "learning_rate": 0.00012687999999999998, + "loss": 0.548, + "step": 81000 + }, + { + "epoch": 0.81, + "learning_rate": 0.00012354666666666665, + "loss": 0.5479, + "step": 81500 + }, + { + "epoch": 0.82, + "learning_rate": 0.00012021333333333333, + "loss": 0.5479, + "step": 82000 + }, + { + "epoch": 0.82, + "learning_rate": 0.00011687999999999999, + "loss": 0.5479, + "step": 82500 + }, + { + "epoch": 0.83, + "learning_rate": 0.00011354666666666665, + "loss": 0.5478, + "step": 83000 + }, + { + "epoch": 0.83, + "learning_rate": 0.00011021333333333333, + "loss": 0.5475, + "step": 83500 + }, + { + "epoch": 0.84, + "learning_rate": 0.00010687999999999999, + "loss": 0.5473, + "step": 84000 + }, + { + "epoch": 0.84, + "learning_rate": 0.00010354666666666666, + "loss": 0.5473, + "step": 84500 + }, + { + "epoch": 0.85, + "learning_rate": 0.00010022, + "loss": 0.5473, + "step": 85000 + }, + { + "epoch": 0.85, + "learning_rate": 9.688666666666666e-05, + "loss": 0.5473, + "step": 85500 + }, + { + "epoch": 0.86, + "learning_rate": 9.355333333333332e-05, + "loss": 0.5473, + "step": 86000 + }, + { + "epoch": 0.86, + "learning_rate": 9.022666666666665e-05, + "loss": 0.5472, + "step": 86500 + }, + { + "epoch": 0.87, + "learning_rate": 8.69e-05, + "loss": 0.5472, + "step": 87000 + }, + { + "epoch": 0.88, + "learning_rate": 8.356666666666666e-05, + "loss": 0.5473, + "step": 87500 + }, + { + "epoch": 0.88, + "learning_rate": 8.023333333333332e-05, + "loss": 0.5472, + "step": 88000 + }, + { + "epoch": 0.89, + "learning_rate": 7.69e-05, + "loss": 0.5472, + "step": 88500 + }, + { + "epoch": 0.89, + "learning_rate": 7.356666666666666e-05, + "loss": 0.5472, + "step": 89000 + }, + { + "epoch": 0.9, + "learning_rate": 7.023333333333332e-05, + "loss": 0.5472, + "step": 89500 + }, + { + "epoch": 0.9, + "learning_rate": 6.69e-05, + "loss": 0.5472, + "step": 90000 + }, + { + "epoch": 0.91, + "learning_rate": 6.356666666666666e-05, + "loss": 0.5473, + "step": 90500 + }, + { + "epoch": 0.91, + "learning_rate": 6.024e-05, + "loss": 0.5472, + "step": 91000 + }, + { + "epoch": 0.92, + "learning_rate": 5.690666666666666e-05, + "loss": 0.5472, + "step": 91500 + }, + { + "epoch": 0.92, + "learning_rate": 5.357333333333333e-05, + "loss": 0.5472, + "step": 92000 + }, + { + "epoch": 0.93, + "learning_rate": 5.024e-05, + "loss": 0.5472, + "step": 92500 + }, + { + "epoch": 0.93, + "learning_rate": 4.691333333333333e-05, + "loss": 0.5472, + "step": 93000 + }, + { + "epoch": 0.94, + "learning_rate": 4.357999999999999e-05, + "loss": 0.5472, + "step": 93500 + }, + { + "epoch": 0.94, + "learning_rate": 4.024666666666666e-05, + "loss": 0.5472, + "step": 94000 + }, + { + "epoch": 0.94, + "learning_rate": 3.691333333333333e-05, + "loss": 0.5472, + "step": 94500 + }, + { + "epoch": 0.95, + "learning_rate": 3.358666666666666e-05, + "loss": 0.5472, + "step": 95000 + }, + { + "epoch": 0.95, + "learning_rate": 3.025333333333333e-05, + "loss": 0.5472, + "step": 95500 + }, + { + "epoch": 0.96, + "learning_rate": 2.6919999999999996e-05, + "loss": 0.5472, + "step": 96000 + }, + { + "epoch": 0.96, + "learning_rate": 2.3586666666666662e-05, + "loss": 0.5472, + "step": 96500 + }, + { + "epoch": 0.97, + "learning_rate": 2.026e-05, + "loss": 0.5472, + "step": 97000 + }, + { + "epoch": 0.97, + "learning_rate": 1.6926666666666665e-05, + "loss": 0.5472, + "step": 97500 + }, + { + "epoch": 0.98, + "learning_rate": 1.3593333333333332e-05, + "loss": 0.5472, + "step": 98000 + }, + { + "epoch": 0.98, + "learning_rate": 1.026e-05, + "loss": 0.5471, + "step": 98500 + }, + { + "epoch": 0.99, + "learning_rate": 6.933333333333333e-06, + "loss": 0.5472, + "step": 99000 + }, + { + "epoch": 0.99, + "learning_rate": 3.6e-06, + "loss": 0.5471, + "step": 99500 + }, + { + "epoch": 1.0, + "learning_rate": 2.6666666666666667e-07, + "loss": 0.5472, + "step": 100000 + } + ], + "max_steps": 100000, + "num_train_epochs": 1, + "total_flos": 4.202631887940403e+19, + "trial_name": null, + "trial_params": null +} diff --git a/propositional_logic/lt_alm_noreflexivity_nosymmetry/checkpoint-100000/training_args.bin b/propositional_logic/lt_alm_noreflexivity_nosymmetry/checkpoint-100000/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..32bfed89aed5e13492c22cd464e6a8d260086cd9 --- /dev/null +++ b/propositional_logic/lt_alm_noreflexivity_nosymmetry/checkpoint-100000/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef9a1b19f49599700a19dc280892b717e93dc7910bf7e6a33a4d264084e33ba1 +size 2735 diff --git a/propositional_logic/lt_alm_noreflexivity_nosymmetry/config.json b/propositional_logic/lt_alm_noreflexivity_nosymmetry/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ff8809f9ee0000962c182321c304c5b09e84f1ba --- /dev/null +++ b/propositional_logic/lt_alm_noreflexivity_nosymmetry/config.json @@ -0,0 +1,37 @@ +{ + "activation_function": "gelu_new", + "architectures": [ + "GPT2LMHeadModel" + ], + "attn_pdrop": 0.1, + "bos_token_id": 50256, + "embd_pdrop": 0.1, + "eos_token_id": 50256, + "gradient_checkpointing": false, + "initializer_range": 0.02, + "layer_norm_epsilon": 1e-05, + "model_type": "gpt2", + "n_ctx": 1024, + "n_embd": 768, + "n_head": 12, + "n_inner": null, + "n_layer": 12, + "n_positions": 1024, + "resid_pdrop": 0.1, + "scale_attn_weights": true, + "summary_activation": null, + "summary_first_dropout": 0.1, + "summary_proj_to_labels": true, + "summary_type": "cls_index", + "summary_use_proj": true, + "task_specific_params": { + "text-generation": { + "do_sample": true, + "max_length": 50 + } + }, + "torch_dtype": "float32", + "transformers_version": "4.10.3", + "use_cache": true, + "vocab_size": 1200 +} diff --git a/propositional_logic/lt_alm_noreflexivity_nosymmetry/pytorch_model.bin b/propositional_logic/lt_alm_noreflexivity_nosymmetry/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..4412048281e2dd6ebef89f9e1ebe6d58e3497880 --- /dev/null +++ b/propositional_logic/lt_alm_noreflexivity_nosymmetry/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f4353422edf337e6431cd9b876ce10a85ed87598959c86c2e3e37b7d59a000e +size 359700713 diff --git a/propositional_logic/lt_alm_noreflexivity_nosymmetry/runs/May18_01-00-49_zhaofengw-vm-1/1652932399.9015605/events.out.tfevents.1652932399.zhaofengw-vm-1.88182.1 b/propositional_logic/lt_alm_noreflexivity_nosymmetry/runs/May18_01-00-49_zhaofengw-vm-1/1652932399.9015605/events.out.tfevents.1652932399.zhaofengw-vm-1.88182.1 new file mode 100644 index 0000000000000000000000000000000000000000..2e9828b5a3337b0287c361737efeeb95751d5ca6 --- /dev/null +++ b/propositional_logic/lt_alm_noreflexivity_nosymmetry/runs/May18_01-00-49_zhaofengw-vm-1/1652932399.9015605/events.out.tfevents.1652932399.zhaofengw-vm-1.88182.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:daf7741b4f2ff02741e27ca440d13d4ec6759c2dae3ca44a4fc2688e4ba74f34 +size 4395 diff --git a/propositional_logic/lt_alm_noreflexivity_nosymmetry/runs/May18_01-00-49_zhaofengw-vm-1/events.out.tfevents.1652932396.zhaofengw-vm-1.88182.0 b/propositional_logic/lt_alm_noreflexivity_nosymmetry/runs/May18_01-00-49_zhaofengw-vm-1/events.out.tfevents.1652932396.zhaofengw-vm-1.88182.0 new file mode 100644 index 0000000000000000000000000000000000000000..a14917fc70464ba3dd07424d763ef2d1ac4f9cec --- /dev/null +++ b/propositional_logic/lt_alm_noreflexivity_nosymmetry/runs/May18_01-00-49_zhaofengw-vm-1/events.out.tfevents.1652932396.zhaofengw-vm-1.88182.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fceb38230df0497588286792ea6fc0e7a04d05c31eb0bf2ed66b6df3a6c9d4f3 +size 35596 diff --git a/propositional_logic/lt_alm_noreflexivity_nosymmetry/training_args.bin b/propositional_logic/lt_alm_noreflexivity_nosymmetry/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..32bfed89aed5e13492c22cd464e6a8d260086cd9 --- /dev/null +++ b/propositional_logic/lt_alm_noreflexivity_nosymmetry/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef9a1b19f49599700a19dc280892b717e93dc7910bf7e6a33a4d264084e33ba1 +size 2735 diff --git a/propositional_logic/lt_alm_nosymmetry/checkpoint-100000/config.json b/propositional_logic/lt_alm_nosymmetry/checkpoint-100000/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ff8809f9ee0000962c182321c304c5b09e84f1ba --- /dev/null +++ b/propositional_logic/lt_alm_nosymmetry/checkpoint-100000/config.json @@ -0,0 +1,37 @@ +{ + "activation_function": "gelu_new", + "architectures": [ + "GPT2LMHeadModel" + ], + "attn_pdrop": 0.1, + "bos_token_id": 50256, + "embd_pdrop": 0.1, + "eos_token_id": 50256, + "gradient_checkpointing": false, + "initializer_range": 0.02, + "layer_norm_epsilon": 1e-05, + "model_type": "gpt2", + "n_ctx": 1024, + "n_embd": 768, + "n_head": 12, + "n_inner": null, + "n_layer": 12, + "n_positions": 1024, + "resid_pdrop": 0.1, + "scale_attn_weights": true, + "summary_activation": null, + "summary_first_dropout": 0.1, + "summary_proj_to_labels": true, + "summary_type": "cls_index", + "summary_use_proj": true, + "task_specific_params": { + "text-generation": { + "do_sample": true, + "max_length": 50 + } + }, + "torch_dtype": "float32", + "transformers_version": "4.10.3", + "use_cache": true, + "vocab_size": 1200 +} diff --git a/propositional_logic/lt_alm_nosymmetry/checkpoint-100000/optimizer.pt b/propositional_logic/lt_alm_nosymmetry/checkpoint-100000/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..399f21c7e286a9c2f982ee291f4bca5dba1cedd4 --- /dev/null +++ b/propositional_logic/lt_alm_nosymmetry/checkpoint-100000/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:780a53b3bcf77d6a2b64b200f43d88e18c481c72a92b2afaaf256daa8d9cdf4a +size 694198065 diff --git a/propositional_logic/lt_alm_nosymmetry/checkpoint-100000/pytorch_model.bin b/propositional_logic/lt_alm_nosymmetry/checkpoint-100000/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..d1fe8d12bae692227cb2d31392e2cd13f3fc18be --- /dev/null +++ b/propositional_logic/lt_alm_nosymmetry/checkpoint-100000/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37fd0763cf3d418bd7fbbebbe87a24ea6f917433a1b89ddfbc7cba881a62130f +size 359700713 diff --git a/propositional_logic/lt_alm_nosymmetry/checkpoint-100000/rng_state.pth b/propositional_logic/lt_alm_nosymmetry/checkpoint-100000/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..db1360b0d46f29967341c303c1ce87509d2c3a64 --- /dev/null +++ b/propositional_logic/lt_alm_nosymmetry/checkpoint-100000/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f04f0b4d009d10f7eb65f17fa9bd7ab6d4824fdfef3bdaf701fd26a4a4490a43 +size 14503 diff --git a/propositional_logic/lt_alm_nosymmetry/checkpoint-100000/scaler.pt b/propositional_logic/lt_alm_nosymmetry/checkpoint-100000/scaler.pt new file mode 100644 index 0000000000000000000000000000000000000000..f653870e3a339c3a43e2f60fd0320ea0addf8070 --- /dev/null +++ b/propositional_logic/lt_alm_nosymmetry/checkpoint-100000/scaler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:656f8daee627965f3c20bb09bc52387653e95a8352ccbe2cb42905caa184a9b2 +size 559 diff --git a/propositional_logic/lt_alm_nosymmetry/checkpoint-100000/scheduler.pt b/propositional_logic/lt_alm_nosymmetry/checkpoint-100000/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..5cbfcd3c8ca21bc2c76483147b6f1c8652f9e232 --- /dev/null +++ b/propositional_logic/lt_alm_nosymmetry/checkpoint-100000/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a1270c5626b0e62d4fa47ac4c2e42caccce8b5e7b608cbc7f83868d16ed1977 +size 623 diff --git a/propositional_logic/lt_alm_nosymmetry/checkpoint-100000/trainer_state.json b/propositional_logic/lt_alm_nosymmetry/checkpoint-100000/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..c23948ca0b6a340dd01deb24cfdf9b922afe1138 --- /dev/null +++ b/propositional_logic/lt_alm_nosymmetry/checkpoint-100000/trainer_state.json @@ -0,0 +1,1216 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.9999999218750061, + "global_step": 100000, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.0, + "learning_rate": 2.9999999999999997e-05, + "loss": 1.2997, + "step": 500 + }, + { + "epoch": 0.01, + "learning_rate": 5.9999999999999995e-05, + "loss": 0.6204, + "step": 1000 + }, + { + "epoch": 0.01, + "learning_rate": 8.999999999999999e-05, + "loss": 0.5267, + "step": 1500 + }, + { + "epoch": 0.02, + "learning_rate": 0.00011999999999999999, + "loss": 0.4277, + "step": 2000 + }, + { + "epoch": 0.02, + "learning_rate": 0.00015, + "loss": 0.396, + "step": 2500 + }, + { + "epoch": 0.03, + "learning_rate": 0.00017999999999999998, + "loss": 0.3923, + "step": 3000 + }, + { + "epoch": 0.03, + "learning_rate": 0.00020999999999999998, + "loss": 0.3918, + "step": 3500 + }, + { + "epoch": 0.04, + "learning_rate": 0.00023999999999999998, + "loss": 0.3874, + "step": 4000 + }, + { + "epoch": 0.04, + "learning_rate": 0.00027, + "loss": 0.3845, + "step": 4500 + }, + { + "epoch": 0.05, + "learning_rate": 0.0003, + "loss": 0.3839, + "step": 5000 + }, + { + "epoch": 0.05, + "learning_rate": 0.00033, + "loss": 0.3841, + "step": 5500 + }, + { + "epoch": 0.06, + "learning_rate": 0.00035999999999999997, + "loss": 0.3817, + "step": 6000 + }, + { + "epoch": 0.06, + "learning_rate": 0.00039, + "loss": 0.3811, + "step": 6500 + }, + { + "epoch": 0.07, + "learning_rate": 0.00041999999999999996, + "loss": 0.3822, + "step": 7000 + }, + { + "epoch": 0.07, + "learning_rate": 0.00045, + "loss": 0.3797, + "step": 7500 + }, + { + "epoch": 0.08, + "learning_rate": 0.00047999999999999996, + "loss": 0.3788, + "step": 8000 + }, + { + "epoch": 0.08, + "learning_rate": 0.0005099999999999999, + "loss": 0.3808, + "step": 8500 + }, + { + "epoch": 0.09, + "learning_rate": 0.00054, + "loss": 0.3806, + "step": 9000 + }, + { + "epoch": 0.09, + "learning_rate": 0.00057, + "loss": 0.3782, + "step": 9500 + }, + { + "epoch": 0.1, + "learning_rate": 0.0006, + "loss": 0.3778, + "step": 10000 + }, + { + "epoch": 0.1, + "learning_rate": 0.0005966666666666667, + "loss": 0.3761, + "step": 10500 + }, + { + "epoch": 0.11, + "learning_rate": 0.0005933333333333333, + "loss": 0.377, + "step": 11000 + }, + { + "epoch": 0.11, + "learning_rate": 0.0005899999999999999, + "loss": 0.3753, + "step": 11500 + }, + { + "epoch": 0.12, + "learning_rate": 0.0005866666666666665, + "loss": 0.3748, + "step": 12000 + }, + { + "epoch": 0.12, + "learning_rate": 0.0005833333333333333, + "loss": 0.3752, + "step": 12500 + }, + { + "epoch": 0.13, + "learning_rate": 0.00058, + "loss": 0.3731, + "step": 13000 + }, + { + "epoch": 0.13, + "learning_rate": 0.0005766666666666666, + "loss": 0.3728, + "step": 13500 + }, + { + "epoch": 0.14, + "learning_rate": 0.0005733333333333334, + "loss": 0.3724, + "step": 14000 + }, + { + "epoch": 0.14, + "learning_rate": 0.00057, + "loss": 0.372, + "step": 14500 + }, + { + "epoch": 0.15, + "learning_rate": 0.0005666666666666666, + "loss": 0.3721, + "step": 15000 + }, + { + "epoch": 0.15, + "learning_rate": 0.0005633333333333333, + "loss": 0.372, + "step": 15500 + }, + { + "epoch": 0.16, + "learning_rate": 0.00056, + "loss": 0.3721, + "step": 16000 + }, + { + "epoch": 0.16, + "learning_rate": 0.0005566666666666667, + "loss": 0.3716, + "step": 16500 + }, + { + "epoch": 0.17, + "learning_rate": 0.0005533333333333333, + "loss": 0.3716, + "step": 17000 + }, + { + "epoch": 0.17, + "learning_rate": 0.0005499999999999999, + "loss": 0.3718, + "step": 17500 + }, + { + "epoch": 0.18, + "learning_rate": 0.0005466666666666667, + "loss": 0.3717, + "step": 18000 + }, + { + "epoch": 0.18, + "learning_rate": 0.0005433399999999999, + "loss": 0.3712, + "step": 18500 + }, + { + "epoch": 0.19, + "learning_rate": 0.0005400066666666666, + "loss": 0.3714, + "step": 19000 + }, + { + "epoch": 0.19, + "learning_rate": 0.0005366733333333333, + "loss": 0.3711, + "step": 19500 + }, + { + "epoch": 0.2, + "learning_rate": 0.00053334, + "loss": 0.3715, + "step": 20000 + }, + { + "epoch": 0.2, + "learning_rate": 0.0005300133333333333, + "loss": 0.3714, + "step": 20500 + }, + { + "epoch": 0.21, + "learning_rate": 0.00052668, + "loss": 0.3709, + "step": 21000 + }, + { + "epoch": 0.21, + "learning_rate": 0.0005233466666666667, + "loss": 0.371, + "step": 21500 + }, + { + "epoch": 0.22, + "learning_rate": 0.0005200133333333332, + "loss": 0.3712, + "step": 22000 + }, + { + "epoch": 0.22, + "learning_rate": 0.0005166866666666667, + "loss": 0.3711, + "step": 22500 + }, + { + "epoch": 0.23, + "learning_rate": 0.0005133533333333333, + "loss": 0.371, + "step": 23000 + }, + { + "epoch": 0.23, + "learning_rate": 0.0005100199999999999, + "loss": 0.371, + "step": 23500 + }, + { + "epoch": 0.24, + "learning_rate": 0.0005066866666666666, + "loss": 0.3711, + "step": 24000 + }, + { + "epoch": 0.24, + "learning_rate": 0.0005033599999999999, + "loss": 0.3709, + "step": 24500 + }, + { + "epoch": 0.25, + "learning_rate": 0.0005000266666666666, + "loss": 0.371, + "step": 25000 + }, + { + "epoch": 0.25, + "learning_rate": 0.0004966933333333332, + "loss": 0.3709, + "step": 25500 + }, + { + "epoch": 0.26, + "learning_rate": 0.00049336, + "loss": 0.371, + "step": 26000 + }, + { + "epoch": 0.26, + "learning_rate": 0.0004900333333333333, + "loss": 0.3708, + "step": 26500 + }, + { + "epoch": 0.27, + "learning_rate": 0.00048669999999999996, + "loss": 0.3709, + "step": 27000 + }, + { + "epoch": 0.27, + "learning_rate": 0.0004833666666666666, + "loss": 0.371, + "step": 27500 + }, + { + "epoch": 0.28, + "learning_rate": 0.00048003333333333326, + "loss": 0.3709, + "step": 28000 + }, + { + "epoch": 0.28, + "learning_rate": 0.0004767066666666666, + "loss": 0.371, + "step": 28500 + }, + { + "epoch": 0.29, + "learning_rate": 0.0004733733333333333, + "loss": 0.3709, + "step": 29000 + }, + { + "epoch": 0.29, + "learning_rate": 0.00047003999999999997, + "loss": 0.3708, + "step": 29500 + }, + { + "epoch": 0.3, + "learning_rate": 0.00046670666666666664, + "loss": 0.371, + "step": 30000 + }, + { + "epoch": 0.3, + "learning_rate": 0.00046337999999999994, + "loss": 0.3709, + "step": 30500 + }, + { + "epoch": 0.31, + "learning_rate": 0.0004600466666666666, + "loss": 0.3709, + "step": 31000 + }, + { + "epoch": 0.31, + "learning_rate": 0.0004567133333333333, + "loss": 0.3708, + "step": 31500 + }, + { + "epoch": 0.32, + "learning_rate": 0.00045337999999999997, + "loss": 0.3709, + "step": 32000 + }, + { + "epoch": 0.32, + "learning_rate": 0.00045005333333333333, + "loss": 0.3709, + "step": 32500 + }, + { + "epoch": 0.33, + "learning_rate": 0.00044672, + "loss": 0.3708, + "step": 33000 + }, + { + "epoch": 0.33, + "learning_rate": 0.00044338666666666663, + "loss": 0.3707, + "step": 33500 + }, + { + "epoch": 0.34, + "learning_rate": 0.0004400533333333333, + "loss": 0.3708, + "step": 34000 + }, + { + "epoch": 0.34, + "learning_rate": 0.0004367266666666666, + "loss": 0.3708, + "step": 34500 + }, + { + "epoch": 0.35, + "learning_rate": 0.0004333933333333333, + "loss": 0.3708, + "step": 35000 + }, + { + "epoch": 0.35, + "learning_rate": 0.00043005999999999996, + "loss": 0.3707, + "step": 35500 + }, + { + "epoch": 0.36, + "learning_rate": 0.00042672666666666663, + "loss": 0.3708, + "step": 36000 + }, + { + "epoch": 0.36, + "learning_rate": 0.00042339999999999994, + "loss": 0.3707, + "step": 36500 + }, + { + "epoch": 0.37, + "learning_rate": 0.0004200666666666666, + "loss": 0.3706, + "step": 37000 + }, + { + "epoch": 0.37, + "learning_rate": 0.00041673333333333334, + "loss": 0.3708, + "step": 37500 + }, + { + "epoch": 0.38, + "learning_rate": 0.0004133999999999999, + "loss": 0.3708, + "step": 38000 + }, + { + "epoch": 0.38, + "learning_rate": 0.0004100733333333333, + "loss": 0.3708, + "step": 38500 + }, + { + "epoch": 0.39, + "learning_rate": 0.00040673999999999994, + "loss": 0.3708, + "step": 39000 + }, + { + "epoch": 0.39, + "learning_rate": 0.0004034066666666666, + "loss": 0.3707, + "step": 39500 + }, + { + "epoch": 0.4, + "learning_rate": 0.0004000733333333333, + "loss": 0.3708, + "step": 40000 + }, + { + "epoch": 0.4, + "learning_rate": 0.0003967466666666666, + "loss": 0.3707, + "step": 40500 + }, + { + "epoch": 0.41, + "learning_rate": 0.0003934133333333333, + "loss": 0.3708, + "step": 41000 + }, + { + "epoch": 0.41, + "learning_rate": 0.00039007999999999995, + "loss": 0.3707, + "step": 41500 + }, + { + "epoch": 0.42, + "learning_rate": 0.00038674666666666663, + "loss": 0.3706, + "step": 42000 + }, + { + "epoch": 0.42, + "learning_rate": 0.00038342, + "loss": 0.3706, + "step": 42500 + }, + { + "epoch": 0.43, + "learning_rate": 0.00038008666666666666, + "loss": 0.3709, + "step": 43000 + }, + { + "epoch": 0.43, + "learning_rate": 0.00037675333333333334, + "loss": 0.3707, + "step": 43500 + }, + { + "epoch": 0.44, + "learning_rate": 0.00037341999999999996, + "loss": 0.3708, + "step": 44000 + }, + { + "epoch": 0.44, + "learning_rate": 0.0003700933333333333, + "loss": 0.3707, + "step": 44500 + }, + { + "epoch": 0.45, + "learning_rate": 0.00036675999999999994, + "loss": 0.3707, + "step": 45000 + }, + { + "epoch": 0.45, + "learning_rate": 0.0003634266666666666, + "loss": 0.3707, + "step": 45500 + }, + { + "epoch": 0.46, + "learning_rate": 0.0003600933333333333, + "loss": 0.3708, + "step": 46000 + }, + { + "epoch": 0.46, + "learning_rate": 0.00035676666666666665, + "loss": 0.3707, + "step": 46500 + }, + { + "epoch": 0.47, + "learning_rate": 0.0003534333333333333, + "loss": 0.3708, + "step": 47000 + }, + { + "epoch": 0.47, + "learning_rate": 0.0003501, + "loss": 0.3706, + "step": 47500 + }, + { + "epoch": 0.48, + "learning_rate": 0.0003467666666666667, + "loss": 0.3708, + "step": 48000 + }, + { + "epoch": 0.48, + "learning_rate": 0.00034344, + "loss": 0.3709, + "step": 48500 + }, + { + "epoch": 0.49, + "learning_rate": 0.00034010666666666665, + "loss": 0.3707, + "step": 49000 + }, + { + "epoch": 0.49, + "learning_rate": 0.0003367733333333333, + "loss": 0.3707, + "step": 49500 + }, + { + "epoch": 0.5, + "learning_rate": 0.00033343999999999995, + "loss": 0.3706, + "step": 50000 + }, + { + "epoch": 0.5, + "learning_rate": 0.00033011333333333325, + "loss": 0.3708, + "step": 50500 + }, + { + "epoch": 0.51, + "learning_rate": 0.00032677999999999993, + "loss": 0.3707, + "step": 51000 + }, + { + "epoch": 0.51, + "learning_rate": 0.0003234466666666666, + "loss": 0.3708, + "step": 51500 + }, + { + "epoch": 0.52, + "learning_rate": 0.00032011333333333334, + "loss": 0.3706, + "step": 52000 + }, + { + "epoch": 0.52, + "learning_rate": 0.00031678666666666664, + "loss": 0.3707, + "step": 52500 + }, + { + "epoch": 0.53, + "learning_rate": 0.0003134533333333333, + "loss": 0.3706, + "step": 53000 + }, + { + "epoch": 0.53, + "learning_rate": 0.00031012, + "loss": 0.3707, + "step": 53500 + }, + { + "epoch": 0.54, + "learning_rate": 0.00030678666666666667, + "loss": 0.3707, + "step": 54000 + }, + { + "epoch": 0.54, + "learning_rate": 0.00030345999999999997, + "loss": 0.3707, + "step": 54500 + }, + { + "epoch": 0.55, + "learning_rate": 0.00030012666666666665, + "loss": 0.3709, + "step": 55000 + }, + { + "epoch": 0.55, + "learning_rate": 0.0002967933333333333, + "loss": 0.3708, + "step": 55500 + }, + { + "epoch": 0.56, + "learning_rate": 0.00029345999999999994, + "loss": 0.3707, + "step": 56000 + }, + { + "epoch": 0.56, + "learning_rate": 0.0002901333333333333, + "loss": 0.3709, + "step": 56500 + }, + { + "epoch": 0.57, + "learning_rate": 0.0002868, + "loss": 0.3707, + "step": 57000 + }, + { + "epoch": 0.57, + "learning_rate": 0.00028347333333333334, + "loss": 0.3706, + "step": 57500 + }, + { + "epoch": 0.58, + "learning_rate": 0.00028013999999999996, + "loss": 0.3708, + "step": 58000 + }, + { + "epoch": 0.58, + "learning_rate": 0.00027680666666666663, + "loss": 0.3707, + "step": 58500 + }, + { + "epoch": 0.59, + "learning_rate": 0.0002734733333333333, + "loss": 0.3707, + "step": 59000 + }, + { + "epoch": 0.59, + "learning_rate": 0.00027014666666666667, + "loss": 0.3706, + "step": 59500 + }, + { + "epoch": 0.6, + "learning_rate": 0.00026681333333333334, + "loss": 0.3708, + "step": 60000 + }, + { + "epoch": 0.6, + "learning_rate": 0.00026347999999999996, + "loss": 0.3706, + "step": 60500 + }, + { + "epoch": 0.61, + "learning_rate": 0.00026014666666666664, + "loss": 0.3706, + "step": 61000 + }, + { + "epoch": 0.61, + "learning_rate": 0.0002568133333333333, + "loss": 0.3707, + "step": 61500 + }, + { + "epoch": 0.62, + "learning_rate": 0.00025348, + "loss": 0.3707, + "step": 62000 + }, + { + "epoch": 0.62, + "learning_rate": 0.00025014666666666667, + "loss": 0.3709, + "step": 62500 + }, + { + "epoch": 0.63, + "learning_rate": 0.0002468133333333333, + "loss": 0.3708, + "step": 63000 + }, + { + "epoch": 0.63, + "learning_rate": 0.00024348666666666665, + "loss": 0.3707, + "step": 63500 + }, + { + "epoch": 0.64, + "learning_rate": 0.0002401533333333333, + "loss": 0.3707, + "step": 64000 + }, + { + "epoch": 0.64, + "learning_rate": 0.00023681999999999997, + "loss": 0.3707, + "step": 64500 + }, + { + "epoch": 0.65, + "learning_rate": 0.0002334933333333333, + "loss": 0.3706, + "step": 65000 + }, + { + "epoch": 0.65, + "learning_rate": 0.00023015999999999998, + "loss": 0.3707, + "step": 65500 + }, + { + "epoch": 0.66, + "learning_rate": 0.00022682666666666665, + "loss": 0.3706, + "step": 66000 + }, + { + "epoch": 0.66, + "learning_rate": 0.0002234933333333333, + "loss": 0.3706, + "step": 66500 + }, + { + "epoch": 0.67, + "learning_rate": 0.00022016666666666666, + "loss": 0.3707, + "step": 67000 + }, + { + "epoch": 0.67, + "learning_rate": 0.0002168333333333333, + "loss": 0.3706, + "step": 67500 + }, + { + "epoch": 0.68, + "learning_rate": 0.00021349999999999999, + "loss": 0.3707, + "step": 68000 + }, + { + "epoch": 0.68, + "learning_rate": 0.00021016666666666666, + "loss": 0.3707, + "step": 68500 + }, + { + "epoch": 0.69, + "learning_rate": 0.0002068333333333333, + "loss": 0.3708, + "step": 69000 + }, + { + "epoch": 0.69, + "learning_rate": 0.00020350666666666667, + "loss": 0.3709, + "step": 69500 + }, + { + "epoch": 0.7, + "learning_rate": 0.00020017333333333332, + "loss": 0.3706, + "step": 70000 + }, + { + "epoch": 0.7, + "learning_rate": 0.00019684, + "loss": 0.3707, + "step": 70500 + }, + { + "epoch": 0.71, + "learning_rate": 0.00019350666666666667, + "loss": 0.3706, + "step": 71000 + }, + { + "epoch": 0.71, + "learning_rate": 0.00019018, + "loss": 0.3707, + "step": 71500 + }, + { + "epoch": 0.72, + "learning_rate": 0.00018684666666666667, + "loss": 0.3706, + "step": 72000 + }, + { + "epoch": 0.72, + "learning_rate": 0.00018351333333333332, + "loss": 0.3707, + "step": 72500 + }, + { + "epoch": 0.73, + "learning_rate": 0.00018018, + "loss": 0.3706, + "step": 73000 + }, + { + "epoch": 0.73, + "learning_rate": 0.00017684666666666665, + "loss": 0.3706, + "step": 73500 + }, + { + "epoch": 0.74, + "learning_rate": 0.00017352, + "loss": 0.3706, + "step": 74000 + }, + { + "epoch": 0.74, + "learning_rate": 0.00017018666666666663, + "loss": 0.3707, + "step": 74500 + }, + { + "epoch": 0.75, + "learning_rate": 0.0001668533333333333, + "loss": 0.3707, + "step": 75000 + }, + { + "epoch": 0.75, + "learning_rate": 0.00016352, + "loss": 0.3707, + "step": 75500 + }, + { + "epoch": 0.76, + "learning_rate": 0.00016018666666666663, + "loss": 0.3707, + "step": 76000 + }, + { + "epoch": 0.76, + "learning_rate": 0.00015685999999999999, + "loss": 0.3707, + "step": 76500 + }, + { + "epoch": 0.77, + "learning_rate": 0.00015352666666666663, + "loss": 0.3708, + "step": 77000 + }, + { + "epoch": 0.77, + "learning_rate": 0.0001501933333333333, + "loss": 0.3707, + "step": 77500 + }, + { + "epoch": 0.78, + "learning_rate": 0.00014685999999999999, + "loss": 0.3707, + "step": 78000 + }, + { + "epoch": 0.78, + "learning_rate": 0.00014353333333333332, + "loss": 0.3707, + "step": 78500 + }, + { + "epoch": 0.79, + "learning_rate": 0.0001402, + "loss": 0.3707, + "step": 79000 + }, + { + "epoch": 0.79, + "learning_rate": 0.00013686666666666664, + "loss": 0.3707, + "step": 79500 + }, + { + "epoch": 0.8, + "learning_rate": 0.00013353333333333332, + "loss": 0.3707, + "step": 80000 + }, + { + "epoch": 0.8, + "learning_rate": 0.0001302, + "loss": 0.3705, + "step": 80500 + }, + { + "epoch": 0.81, + "learning_rate": 0.00012687333333333332, + "loss": 0.3707, + "step": 81000 + }, + { + "epoch": 0.81, + "learning_rate": 0.00012354, + "loss": 0.3707, + "step": 81500 + }, + { + "epoch": 0.82, + "learning_rate": 0.00012020666666666665, + "loss": 0.3707, + "step": 82000 + }, + { + "epoch": 0.82, + "learning_rate": 0.00011687333333333332, + "loss": 0.3706, + "step": 82500 + }, + { + "epoch": 0.83, + "learning_rate": 0.00011354666666666665, + "loss": 0.3707, + "step": 83000 + }, + { + "epoch": 0.83, + "learning_rate": 0.00011021333333333333, + "loss": 0.3706, + "step": 83500 + }, + { + "epoch": 0.84, + "learning_rate": 0.00010687999999999999, + "loss": 0.3707, + "step": 84000 + }, + { + "epoch": 0.84, + "learning_rate": 0.00010354666666666666, + "loss": 0.3706, + "step": 84500 + }, + { + "epoch": 0.85, + "learning_rate": 0.00010021333333333332, + "loss": 0.3706, + "step": 85000 + }, + { + "epoch": 0.85, + "learning_rate": 9.688666666666666e-05, + "loss": 0.3707, + "step": 85500 + }, + { + "epoch": 0.86, + "learning_rate": 9.355333333333332e-05, + "loss": 0.3706, + "step": 86000 + }, + { + "epoch": 0.86, + "learning_rate": 9.022e-05, + "loss": 0.3705, + "step": 86500 + }, + { + "epoch": 0.87, + "learning_rate": 8.688666666666666e-05, + "loss": 0.3706, + "step": 87000 + }, + { + "epoch": 0.87, + "learning_rate": 8.355333333333333e-05, + "loss": 0.3707, + "step": 87500 + }, + { + "epoch": 0.88, + "learning_rate": 8.022666666666666e-05, + "loss": 0.3707, + "step": 88000 + }, + { + "epoch": 0.88, + "learning_rate": 7.689333333333332e-05, + "loss": 0.3707, + "step": 88500 + }, + { + "epoch": 0.89, + "learning_rate": 7.356e-05, + "loss": 0.3706, + "step": 89000 + }, + { + "epoch": 0.89, + "learning_rate": 7.022666666666667e-05, + "loss": 0.3707, + "step": 89500 + }, + { + "epoch": 0.9, + "learning_rate": 6.69e-05, + "loss": 0.3706, + "step": 90000 + }, + { + "epoch": 0.9, + "learning_rate": 6.356666666666666e-05, + "loss": 0.3708, + "step": 90500 + }, + { + "epoch": 0.91, + "learning_rate": 6.0233333333333325e-05, + "loss": 0.3707, + "step": 91000 + }, + { + "epoch": 0.91, + "learning_rate": 5.69e-05, + "loss": 0.3707, + "step": 91500 + }, + { + "epoch": 0.92, + "learning_rate": 5.356666666666666e-05, + "loss": 0.3706, + "step": 92000 + }, + { + "epoch": 0.92, + "learning_rate": 5.024e-05, + "loss": 0.3707, + "step": 92500 + }, + { + "epoch": 0.93, + "learning_rate": 4.690666666666666e-05, + "loss": 0.3707, + "step": 93000 + }, + { + "epoch": 0.93, + "learning_rate": 4.357333333333333e-05, + "loss": 0.3707, + "step": 93500 + }, + { + "epoch": 0.94, + "learning_rate": 4.0239999999999994e-05, + "loss": 0.3706, + "step": 94000 + }, + { + "epoch": 0.94, + "learning_rate": 3.6906666666666663e-05, + "loss": 0.3707, + "step": 94500 + }, + { + "epoch": 0.95, + "learning_rate": 3.3579999999999994e-05, + "loss": 0.3706, + "step": 95000 + }, + { + "epoch": 0.95, + "learning_rate": 3.0246666666666663e-05, + "loss": 0.3706, + "step": 95500 + }, + { + "epoch": 0.96, + "learning_rate": 2.6913333333333332e-05, + "loss": 0.3708, + "step": 96000 + }, + { + "epoch": 0.96, + "learning_rate": 2.3579999999999998e-05, + "loss": 0.3706, + "step": 96500 + }, + { + "epoch": 0.97, + "learning_rate": 2.0246666666666664e-05, + "loss": 0.3708, + "step": 97000 + }, + { + "epoch": 0.97, + "learning_rate": 1.6919999999999997e-05, + "loss": 0.3706, + "step": 97500 + }, + { + "epoch": 0.98, + "learning_rate": 1.3586666666666665e-05, + "loss": 0.3705, + "step": 98000 + }, + { + "epoch": 0.98, + "learning_rate": 1.0253333333333332e-05, + "loss": 0.3705, + "step": 98500 + }, + { + "epoch": 0.99, + "learning_rate": 6.919999999999999e-06, + "loss": 0.3706, + "step": 99000 + }, + { + "epoch": 0.99, + "learning_rate": 3.593333333333333e-06, + "loss": 0.3706, + "step": 99500 + }, + { + "epoch": 1.0, + "learning_rate": 2.5999999999999995e-07, + "loss": 0.3706, + "step": 100000 + } + ], + "max_steps": 100000, + "num_train_epochs": 1, + "total_flos": 4.279970369205043e+19, + "trial_name": null, + "trial_params": null +} diff --git a/propositional_logic/lt_alm_nosymmetry/checkpoint-100000/training_args.bin b/propositional_logic/lt_alm_nosymmetry/checkpoint-100000/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..d61ec21f9c74de5c181bef692eafd6efdb12e3eb --- /dev/null +++ b/propositional_logic/lt_alm_nosymmetry/checkpoint-100000/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fa664ed90be2484917e9ff73703f8d5d4df0ad7b33379f7a8b34f7a0526e3d0 +size 2735 diff --git a/propositional_logic/lt_alm_nosymmetry/config.json b/propositional_logic/lt_alm_nosymmetry/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ff8809f9ee0000962c182321c304c5b09e84f1ba --- /dev/null +++ b/propositional_logic/lt_alm_nosymmetry/config.json @@ -0,0 +1,37 @@ +{ + "activation_function": "gelu_new", + "architectures": [ + "GPT2LMHeadModel" + ], + "attn_pdrop": 0.1, + "bos_token_id": 50256, + "embd_pdrop": 0.1, + "eos_token_id": 50256, + "gradient_checkpointing": false, + "initializer_range": 0.02, + "layer_norm_epsilon": 1e-05, + "model_type": "gpt2", + "n_ctx": 1024, + "n_embd": 768, + "n_head": 12, + "n_inner": null, + "n_layer": 12, + "n_positions": 1024, + "resid_pdrop": 0.1, + "scale_attn_weights": true, + "summary_activation": null, + "summary_first_dropout": 0.1, + "summary_proj_to_labels": true, + "summary_type": "cls_index", + "summary_use_proj": true, + "task_specific_params": { + "text-generation": { + "do_sample": true, + "max_length": 50 + } + }, + "torch_dtype": "float32", + "transformers_version": "4.10.3", + "use_cache": true, + "vocab_size": 1200 +} diff --git a/propositional_logic/lt_alm_nosymmetry/pytorch_model.bin b/propositional_logic/lt_alm_nosymmetry/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..d1fe8d12bae692227cb2d31392e2cd13f3fc18be --- /dev/null +++ b/propositional_logic/lt_alm_nosymmetry/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37fd0763cf3d418bd7fbbebbe87a24ea6f917433a1b89ddfbc7cba881a62130f +size 359700713 diff --git a/propositional_logic/lt_alm_nosymmetry/runs/Aug09_21-06-39_allennlp-server4/1660189990.7456152/events.out.tfevents.1660189990.allennlp-server4.1210921.1 b/propositional_logic/lt_alm_nosymmetry/runs/Aug09_21-06-39_allennlp-server4/1660189990.7456152/events.out.tfevents.1660189990.allennlp-server4.1210921.1 new file mode 100644 index 0000000000000000000000000000000000000000..b2f8a5dd09de7ed3ed6faab2a088ee3331cdbaa8 --- /dev/null +++ b/propositional_logic/lt_alm_nosymmetry/runs/Aug09_21-06-39_allennlp-server4/1660189990.7456152/events.out.tfevents.1660189990.allennlp-server4.1210921.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:675cb4d2c6cf877499a1c42b8ccda5f4c6bee99cbe875447442caa0e9ff53c65 +size 4441 diff --git a/propositional_logic/lt_alm_nosymmetry/runs/Aug09_21-06-39_allennlp-server4/events.out.tfevents.1660189984.allennlp-server4.1210921.0 b/propositional_logic/lt_alm_nosymmetry/runs/Aug09_21-06-39_allennlp-server4/events.out.tfevents.1660189984.allennlp-server4.1210921.0 new file mode 100644 index 0000000000000000000000000000000000000000..475e6c500f7db7fd13b315f0c40a43b44f014384 --- /dev/null +++ b/propositional_logic/lt_alm_nosymmetry/runs/Aug09_21-06-39_allennlp-server4/events.out.tfevents.1660189984.allennlp-server4.1210921.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c941a6b8664320179dc6097f71fd86daf0b9ce0a186b33245d3db88bb087372b +size 35642 diff --git a/propositional_logic/lt_alm_nosymmetry/training_args.bin b/propositional_logic/lt_alm_nosymmetry/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..d61ec21f9c74de5c181bef692eafd6efdb12e3eb --- /dev/null +++ b/propositional_logic/lt_alm_nosymmetry/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fa664ed90be2484917e9ff73703f8d5d4df0ad7b33379f7a8b34f7a0526e3d0 +size 2735 diff --git a/propositional_logic/lt_mlm/checkpoint-100000/config.json b/propositional_logic/lt_mlm/checkpoint-100000/config.json new file mode 100644 index 0000000000000000000000000000000000000000..102fa0515a2e7f9a348f86a1dcb7d135d6cfa8c6 --- /dev/null +++ b/propositional_logic/lt_mlm/checkpoint-100000/config.json @@ -0,0 +1,27 @@ +{ + "architectures": [ + "RobertaForMaskedLM" + ], + "attention_probs_dropout_prob": 0.1, + "bos_token_id": 0, + "classifier_dropout": null, + "eos_token_id": 2, + "gradient_checkpointing": false, + "hidden_act": "gelu", + "hidden_dropout_prob": 0.1, + "hidden_size": 768, + "initializer_range": 0.02, + "intermediate_size": 3072, + "layer_norm_eps": 1e-05, + "max_position_embeddings": 514, + "model_type": "roberta", + "num_attention_heads": 12, + "num_hidden_layers": 12, + "pad_token_id": 1, + "position_embedding_type": "absolute", + "torch_dtype": "float32", + "transformers_version": "4.10.3", + "type_vocab_size": 1, + "use_cache": true, + "vocab_size": 1200 +} diff --git a/propositional_logic/lt_mlm/checkpoint-100000/optimizer.pt b/propositional_logic/lt_mlm/checkpoint-100000/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..0f4a2dd0ad72c7c255fb56be72cf914d30478d2f --- /dev/null +++ b/propositional_logic/lt_mlm/checkpoint-100000/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7984f845fb9635b528ee15befc0abe4cf4e24319efe16541d9dba9c330008e3e +size 695849113 diff --git a/propositional_logic/lt_mlm/checkpoint-100000/pytorch_model.bin b/propositional_logic/lt_mlm/checkpoint-100000/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..429948f73d4102f926a1455b98d19f197736281d --- /dev/null +++ b/propositional_logic/lt_mlm/checkpoint-100000/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f23111770b8a777a22ecb76287021a193552ce1614742744e911970c7e51751 +size 347937771 diff --git a/propositional_logic/lt_mlm/checkpoint-100000/rng_state.pth b/propositional_logic/lt_mlm/checkpoint-100000/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..4a3150d29fa8c09ae8334d72c8db783e38d463a0 --- /dev/null +++ b/propositional_logic/lt_mlm/checkpoint-100000/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ace806f1cf6b8fea340c8dcb0738206e80dd48ed2f8e8fdeb7d956fbc92c63b +size 14503 diff --git a/propositional_logic/lt_mlm/checkpoint-100000/scaler.pt b/propositional_logic/lt_mlm/checkpoint-100000/scaler.pt new file mode 100644 index 0000000000000000000000000000000000000000..3149e8d1aa3683f7ad4902a718e70fbcbc0a4105 --- /dev/null +++ b/propositional_logic/lt_mlm/checkpoint-100000/scaler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eaf42f90c719c5e8e25b899abaf67f6cdff129bbeceed648e90c2d5b22d73400 +size 559 diff --git a/propositional_logic/lt_mlm/checkpoint-100000/scheduler.pt b/propositional_logic/lt_mlm/checkpoint-100000/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..fce4e115e566b7e0e0bab426d064c559a4ace555 --- /dev/null +++ b/propositional_logic/lt_mlm/checkpoint-100000/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa032dacd66db7654b168dca5b484fbe4877894e335431edca5660559a893ab0 +size 623 diff --git a/propositional_logic/lt_mlm/checkpoint-100000/trainer_state.json b/propositional_logic/lt_mlm/checkpoint-100000/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..75cbbda0c6f65f266ad54840ff076e95b32770e2 --- /dev/null +++ b/propositional_logic/lt_mlm/checkpoint-100000/trainer_state.json @@ -0,0 +1,1216 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 1.0, + "global_step": 100000, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.01, + "learning_rate": 2.9999999999999997e-05, + "loss": 2.2066, + "step": 500 + }, + { + "epoch": 0.01, + "learning_rate": 5.9999999999999995e-05, + "loss": 1.5564, + "step": 1000 + }, + { + "epoch": 0.01, + "learning_rate": 8.999999999999999e-05, + "loss": 1.5224, + "step": 1500 + }, + { + "epoch": 0.02, + "learning_rate": 0.00011999999999999999, + "loss": 1.4586, + "step": 2000 + }, + { + "epoch": 0.03, + "learning_rate": 0.00015, + "loss": 1.3594, + "step": 2500 + }, + { + "epoch": 0.03, + "learning_rate": 0.00017999999999999998, + "loss": 1.3323, + "step": 3000 + }, + { + "epoch": 0.04, + "learning_rate": 0.00020999999999999998, + "loss": 1.3147, + "step": 3500 + }, + { + "epoch": 0.04, + "learning_rate": 0.00023999999999999998, + "loss": 1.2778, + "step": 4000 + }, + { + "epoch": 0.04, + "learning_rate": 0.00027, + "loss": 1.2596, + "step": 4500 + }, + { + "epoch": 0.05, + "learning_rate": 0.0003, + "loss": 1.2521, + "step": 5000 + }, + { + "epoch": 0.06, + "learning_rate": 0.00033, + "loss": 1.2477, + "step": 5500 + }, + { + "epoch": 0.06, + "learning_rate": 0.00035999999999999997, + "loss": 1.245, + "step": 6000 + }, + { + "epoch": 0.07, + "learning_rate": 0.00039, + "loss": 1.2433, + "step": 6500 + }, + { + "epoch": 0.07, + "learning_rate": 0.00041999999999999996, + "loss": 1.2422, + "step": 7000 + }, + { + "epoch": 0.07, + "learning_rate": 0.00045, + "loss": 1.2404, + "step": 7500 + }, + { + "epoch": 0.08, + "learning_rate": 0.00047999999999999996, + "loss": 1.2397, + "step": 8000 + }, + { + "epoch": 0.09, + "learning_rate": 0.0005099999999999999, + "loss": 1.2385, + "step": 8500 + }, + { + "epoch": 0.09, + "learning_rate": 0.00054, + "loss": 1.2378, + "step": 9000 + }, + { + "epoch": 0.1, + "learning_rate": 0.00057, + "loss": 1.2379, + "step": 9500 + }, + { + "epoch": 0.1, + "learning_rate": 0.0006, + "loss": 1.2372, + "step": 10000 + }, + { + "epoch": 0.1, + "learning_rate": 0.0005966666666666667, + "loss": 1.2359, + "step": 10500 + }, + { + "epoch": 0.11, + "learning_rate": 0.0005933333333333333, + "loss": 1.2361, + "step": 11000 + }, + { + "epoch": 0.12, + "learning_rate": 0.0005899999999999999, + "loss": 1.2339, + "step": 11500 + }, + { + "epoch": 0.12, + "learning_rate": 0.0005866666666666665, + "loss": 1.2344, + "step": 12000 + }, + { + "epoch": 0.12, + "learning_rate": 0.0005833333333333333, + "loss": 1.233, + "step": 12500 + }, + { + "epoch": 0.13, + "learning_rate": 0.00058, + "loss": 1.2328, + "step": 13000 + }, + { + "epoch": 0.14, + "learning_rate": 0.0005766666666666666, + "loss": 1.2321, + "step": 13500 + }, + { + "epoch": 0.14, + "learning_rate": 0.0005733333333333334, + "loss": 1.2309, + "step": 14000 + }, + { + "epoch": 0.14, + "learning_rate": 0.00057, + "loss": 1.2315, + "step": 14500 + }, + { + "epoch": 0.15, + "learning_rate": 0.0005666666666666666, + "loss": 1.2301, + "step": 15000 + }, + { + "epoch": 0.15, + "learning_rate": 0.0005633333333333333, + "loss": 1.2305, + "step": 15500 + }, + { + "epoch": 0.16, + "learning_rate": 0.00056, + "loss": 1.2297, + "step": 16000 + }, + { + "epoch": 0.17, + "learning_rate": 0.0005566799999999999, + "loss": 1.2307, + "step": 16500 + }, + { + "epoch": 0.17, + "learning_rate": 0.0005533466666666666, + "loss": 1.2295, + "step": 17000 + }, + { + "epoch": 0.17, + "learning_rate": 0.0005500133333333333, + "loss": 1.2301, + "step": 17500 + }, + { + "epoch": 0.18, + "learning_rate": 0.0005466799999999999, + "loss": 1.2282, + "step": 18000 + }, + { + "epoch": 0.18, + "learning_rate": 0.0005433466666666666, + "loss": 1.2287, + "step": 18500 + }, + { + "epoch": 0.19, + "learning_rate": 0.0005400133333333333, + "loss": 1.2283, + "step": 19000 + }, + { + "epoch": 0.2, + "learning_rate": 0.0005366799999999999, + "loss": 1.2284, + "step": 19500 + }, + { + "epoch": 0.2, + "learning_rate": 0.0005333533333333332, + "loss": 1.2286, + "step": 20000 + }, + { + "epoch": 0.2, + "learning_rate": 0.00053002, + "loss": 1.2283, + "step": 20500 + }, + { + "epoch": 0.21, + "learning_rate": 0.0005266866666666666, + "loss": 1.2273, + "step": 21000 + }, + { + "epoch": 0.21, + "learning_rate": 0.0005233533333333333, + "loss": 1.2272, + "step": 21500 + }, + { + "epoch": 0.22, + "learning_rate": 0.0005200199999999999, + "loss": 1.2275, + "step": 22000 + }, + { + "epoch": 0.23, + "learning_rate": 0.0005166866666666667, + "loss": 1.2274, + "step": 22500 + }, + { + "epoch": 0.23, + "learning_rate": 0.0005133533333333333, + "loss": 1.2275, + "step": 23000 + }, + { + "epoch": 0.23, + "learning_rate": 0.0005100199999999999, + "loss": 1.2272, + "step": 23500 + }, + { + "epoch": 0.24, + "learning_rate": 0.0005066933333333333, + "loss": 1.2272, + "step": 24000 + }, + { + "epoch": 0.24, + "learning_rate": 0.0005033599999999999, + "loss": 1.2266, + "step": 24500 + }, + { + "epoch": 0.25, + "learning_rate": 0.0005000266666666666, + "loss": 1.2264, + "step": 25000 + }, + { + "epoch": 0.26, + "learning_rate": 0.0004966933333333332, + "loss": 1.2262, + "step": 25500 + }, + { + "epoch": 0.26, + "learning_rate": 0.0004933666666666666, + "loss": 1.2264, + "step": 26000 + }, + { + "epoch": 0.27, + "learning_rate": 0.0004900333333333333, + "loss": 1.2267, + "step": 26500 + }, + { + "epoch": 0.27, + "learning_rate": 0.00048669999999999996, + "loss": 1.2261, + "step": 27000 + }, + { + "epoch": 0.28, + "learning_rate": 0.0004833666666666666, + "loss": 1.2268, + "step": 27500 + }, + { + "epoch": 0.28, + "learning_rate": 0.00048004, + "loss": 1.2264, + "step": 28000 + }, + { + "epoch": 0.28, + "learning_rate": 0.0004767066666666666, + "loss": 1.226, + "step": 28500 + }, + { + "epoch": 0.29, + "learning_rate": 0.0004733733333333333, + "loss": 1.2259, + "step": 29000 + }, + { + "epoch": 0.29, + "learning_rate": 0.00047003999999999997, + "loss": 1.2259, + "step": 29500 + }, + { + "epoch": 0.3, + "learning_rate": 0.00046671333333333327, + "loss": 1.2259, + "step": 30000 + }, + { + "epoch": 0.3, + "learning_rate": 0.00046337999999999994, + "loss": 1.2262, + "step": 30500 + }, + { + "epoch": 0.31, + "learning_rate": 0.0004600466666666666, + "loss": 1.2257, + "step": 31000 + }, + { + "epoch": 0.32, + "learning_rate": 0.0004567133333333333, + "loss": 1.2259, + "step": 31500 + }, + { + "epoch": 0.32, + "learning_rate": 0.00045337999999999997, + "loss": 1.2258, + "step": 32000 + }, + { + "epoch": 0.33, + "learning_rate": 0.00045005333333333333, + "loss": 1.2253, + "step": 32500 + }, + { + "epoch": 0.33, + "learning_rate": 0.00044672, + "loss": 1.2258, + "step": 33000 + }, + { + "epoch": 0.34, + "learning_rate": 0.00044338666666666663, + "loss": 1.2256, + "step": 33500 + }, + { + "epoch": 0.34, + "learning_rate": 0.0004400533333333333, + "loss": 1.2255, + "step": 34000 + }, + { + "epoch": 0.34, + "learning_rate": 0.00043672, + "loss": 1.2249, + "step": 34500 + }, + { + "epoch": 0.35, + "learning_rate": 0.0004333933333333333, + "loss": 1.2253, + "step": 35000 + }, + { + "epoch": 0.35, + "learning_rate": 0.00043005999999999996, + "loss": 1.225, + "step": 35500 + }, + { + "epoch": 0.36, + "learning_rate": 0.00042672666666666663, + "loss": 1.2247, + "step": 36000 + }, + { + "epoch": 0.36, + "learning_rate": 0.0004233933333333333, + "loss": 1.2252, + "step": 36500 + }, + { + "epoch": 0.37, + "learning_rate": 0.00042005999999999993, + "loss": 1.2248, + "step": 37000 + }, + { + "epoch": 0.38, + "learning_rate": 0.0004167266666666666, + "loss": 1.2253, + "step": 37500 + }, + { + "epoch": 0.38, + "learning_rate": 0.0004133933333333333, + "loss": 1.2249, + "step": 38000 + }, + { + "epoch": 0.39, + "learning_rate": 0.00041005999999999996, + "loss": 1.2246, + "step": 38500 + }, + { + "epoch": 0.39, + "learning_rate": 0.00040673333333333326, + "loss": 1.2249, + "step": 39000 + }, + { + "epoch": 0.4, + "learning_rate": 0.0004034066666666666, + "loss": 1.2251, + "step": 39500 + }, + { + "epoch": 0.4, + "learning_rate": 0.0004000733333333333, + "loss": 1.225, + "step": 40000 + }, + { + "epoch": 0.41, + "learning_rate": 0.00039673999999999997, + "loss": 1.2252, + "step": 40500 + }, + { + "epoch": 0.41, + "learning_rate": 0.00039340666666666665, + "loss": 1.2244, + "step": 41000 + }, + { + "epoch": 0.41, + "learning_rate": 0.00039007999999999995, + "loss": 1.2243, + "step": 41500 + }, + { + "epoch": 0.42, + "learning_rate": 0.00038674666666666663, + "loss": 1.2246, + "step": 42000 + }, + { + "epoch": 0.42, + "learning_rate": 0.0003834133333333333, + "loss": 1.2245, + "step": 42500 + }, + { + "epoch": 0.43, + "learning_rate": 0.0003800799999999999, + "loss": 1.2243, + "step": 43000 + }, + { + "epoch": 0.43, + "learning_rate": 0.0003767466666666666, + "loss": 1.2244, + "step": 43500 + }, + { + "epoch": 0.44, + "learning_rate": 0.00037341999999999996, + "loss": 1.2247, + "step": 44000 + }, + { + "epoch": 0.45, + "learning_rate": 0.00037008666666666663, + "loss": 1.2241, + "step": 44500 + }, + { + "epoch": 0.45, + "learning_rate": 0.0003667533333333333, + "loss": 1.2242, + "step": 45000 + }, + { + "epoch": 0.46, + "learning_rate": 0.00036342, + "loss": 1.2246, + "step": 45500 + }, + { + "epoch": 0.46, + "learning_rate": 0.00036008666666666666, + "loss": 1.2239, + "step": 46000 + }, + { + "epoch": 0.47, + "learning_rate": 0.0003567533333333333, + "loss": 1.224, + "step": 46500 + }, + { + "epoch": 0.47, + "learning_rate": 0.00035341999999999996, + "loss": 1.224, + "step": 47000 + }, + { + "epoch": 0.47, + "learning_rate": 0.00035009333333333326, + "loss": 1.2238, + "step": 47500 + }, + { + "epoch": 0.48, + "learning_rate": 0.00034675999999999994, + "loss": 1.2238, + "step": 48000 + }, + { + "epoch": 0.48, + "learning_rate": 0.0003434266666666666, + "loss": 1.2242, + "step": 48500 + }, + { + "epoch": 0.49, + "learning_rate": 0.00034009999999999997, + "loss": 1.2238, + "step": 49000 + }, + { + "epoch": 0.49, + "learning_rate": 0.00033676666666666665, + "loss": 1.2236, + "step": 49500 + }, + { + "epoch": 0.5, + "learning_rate": 0.0003334333333333333, + "loss": 1.224, + "step": 50000 + }, + { + "epoch": 0.51, + "learning_rate": 0.0003301, + "loss": 1.2239, + "step": 50500 + }, + { + "epoch": 0.51, + "learning_rate": 0.0003267666666666667, + "loss": 1.2238, + "step": 51000 + }, + { + "epoch": 0.52, + "learning_rate": 0.0003234333333333333, + "loss": 1.2235, + "step": 51500 + }, + { + "epoch": 0.52, + "learning_rate": 0.0003201, + "loss": 1.2233, + "step": 52000 + }, + { + "epoch": 0.53, + "learning_rate": 0.00031676666666666665, + "loss": 1.2242, + "step": 52500 + }, + { + "epoch": 0.53, + "learning_rate": 0.0003134333333333333, + "loss": 1.223, + "step": 53000 + }, + { + "epoch": 0.54, + "learning_rate": 0.00031010666666666663, + "loss": 1.2236, + "step": 53500 + }, + { + "epoch": 0.54, + "learning_rate": 0.0003067733333333333, + "loss": 1.2234, + "step": 54000 + }, + { + "epoch": 0.55, + "learning_rate": 0.00030344, + "loss": 1.2232, + "step": 54500 + }, + { + "epoch": 0.55, + "learning_rate": 0.00030010666666666666, + "loss": 1.2235, + "step": 55000 + }, + { + "epoch": 0.56, + "learning_rate": 0.00029677999999999996, + "loss": 1.2234, + "step": 55500 + }, + { + "epoch": 0.56, + "learning_rate": 0.00029344666666666664, + "loss": 1.2232, + "step": 56000 + }, + { + "epoch": 0.56, + "learning_rate": 0.0002901133333333333, + "loss": 1.2233, + "step": 56500 + }, + { + "epoch": 0.57, + "learning_rate": 0.00028678, + "loss": 1.223, + "step": 57000 + }, + { + "epoch": 0.57, + "learning_rate": 0.00028345333333333335, + "loss": 1.223, + "step": 57500 + }, + { + "epoch": 0.58, + "learning_rate": 0.00028011999999999997, + "loss": 1.2232, + "step": 58000 + }, + { + "epoch": 0.58, + "learning_rate": 0.00027678666666666664, + "loss": 1.2229, + "step": 58500 + }, + { + "epoch": 0.59, + "learning_rate": 0.0002734533333333333, + "loss": 1.223, + "step": 59000 + }, + { + "epoch": 0.59, + "learning_rate": 0.00027012, + "loss": 1.2234, + "step": 59500 + }, + { + "epoch": 0.6, + "learning_rate": 0.0002667933333333333, + "loss": 1.223, + "step": 60000 + }, + { + "epoch": 0.6, + "learning_rate": 0.00026346, + "loss": 1.2227, + "step": 60500 + }, + { + "epoch": 0.61, + "learning_rate": 0.00026012666666666665, + "loss": 1.2229, + "step": 61000 + }, + { + "epoch": 0.61, + "learning_rate": 0.00025679333333333327, + "loss": 1.2226, + "step": 61500 + }, + { + "epoch": 0.62, + "learning_rate": 0.00025346666666666663, + "loss": 1.2228, + "step": 62000 + }, + { + "epoch": 0.62, + "learning_rate": 0.0002501333333333333, + "loss": 1.2225, + "step": 62500 + }, + { + "epoch": 0.63, + "learning_rate": 0.0002468, + "loss": 1.2222, + "step": 63000 + }, + { + "epoch": 0.64, + "learning_rate": 0.00024346666666666666, + "loss": 1.2223, + "step": 63500 + }, + { + "epoch": 0.64, + "learning_rate": 0.0002401333333333333, + "loss": 1.2225, + "step": 64000 + }, + { + "epoch": 0.65, + "learning_rate": 0.00023680666666666666, + "loss": 1.2225, + "step": 64500 + }, + { + "epoch": 0.65, + "learning_rate": 0.0002334733333333333, + "loss": 1.222, + "step": 65000 + }, + { + "epoch": 0.66, + "learning_rate": 0.00023014, + "loss": 1.2215, + "step": 65500 + }, + { + "epoch": 0.66, + "learning_rate": 0.00022680666666666666, + "loss": 1.2218, + "step": 66000 + }, + { + "epoch": 0.67, + "learning_rate": 0.00022348, + "loss": 1.2219, + "step": 66500 + }, + { + "epoch": 0.67, + "learning_rate": 0.00022014666666666667, + "loss": 1.2217, + "step": 67000 + }, + { + "epoch": 0.68, + "learning_rate": 0.0002168133333333333, + "loss": 1.2214, + "step": 67500 + }, + { + "epoch": 0.68, + "learning_rate": 0.00021348, + "loss": 1.2215, + "step": 68000 + }, + { + "epoch": 0.69, + "learning_rate": 0.0002101533333333333, + "loss": 1.2215, + "step": 68500 + }, + { + "epoch": 0.69, + "learning_rate": 0.00020681999999999997, + "loss": 1.2212, + "step": 69000 + }, + { + "epoch": 0.69, + "learning_rate": 0.00020348666666666668, + "loss": 1.2211, + "step": 69500 + }, + { + "epoch": 0.7, + "learning_rate": 0.0002001533333333333, + "loss": 1.2213, + "step": 70000 + }, + { + "epoch": 0.7, + "learning_rate": 0.00019681999999999998, + "loss": 1.221, + "step": 70500 + }, + { + "epoch": 0.71, + "learning_rate": 0.0001934933333333333, + "loss": 1.2211, + "step": 71000 + }, + { + "epoch": 0.71, + "learning_rate": 0.00019015999999999998, + "loss": 1.2207, + "step": 71500 + }, + { + "epoch": 0.72, + "learning_rate": 0.00018682666666666663, + "loss": 1.2211, + "step": 72000 + }, + { + "epoch": 0.72, + "learning_rate": 0.0001834933333333333, + "loss": 1.221, + "step": 72500 + }, + { + "epoch": 0.73, + "learning_rate": 0.00018016666666666664, + "loss": 1.2208, + "step": 73000 + }, + { + "epoch": 0.73, + "learning_rate": 0.0001768333333333333, + "loss": 1.2208, + "step": 73500 + }, + { + "epoch": 0.74, + "learning_rate": 0.0001735, + "loss": 1.2207, + "step": 74000 + }, + { + "epoch": 0.74, + "learning_rate": 0.00017016666666666664, + "loss": 1.2207, + "step": 74500 + }, + { + "epoch": 0.75, + "learning_rate": 0.00016684, + "loss": 1.2206, + "step": 75000 + }, + { + "epoch": 0.76, + "learning_rate": 0.00016350666666666664, + "loss": 1.2207, + "step": 75500 + }, + { + "epoch": 0.76, + "learning_rate": 0.00016017333333333332, + "loss": 1.2207, + "step": 76000 + }, + { + "epoch": 0.77, + "learning_rate": 0.00015684, + "loss": 1.2209, + "step": 76500 + }, + { + "epoch": 0.77, + "learning_rate": 0.00015350666666666664, + "loss": 1.2205, + "step": 77000 + }, + { + "epoch": 0.78, + "learning_rate": 0.00015018, + "loss": 1.2208, + "step": 77500 + }, + { + "epoch": 0.78, + "learning_rate": 0.00014684666666666665, + "loss": 1.2207, + "step": 78000 + }, + { + "epoch": 0.79, + "learning_rate": 0.00014351333333333333, + "loss": 1.2204, + "step": 78500 + }, + { + "epoch": 0.79, + "learning_rate": 0.00014017999999999998, + "loss": 1.2204, + "step": 79000 + }, + { + "epoch": 0.8, + "learning_rate": 0.00013685333333333333, + "loss": 1.2202, + "step": 79500 + }, + { + "epoch": 0.8, + "learning_rate": 0.00013351999999999998, + "loss": 1.2203, + "step": 80000 + }, + { + "epoch": 0.81, + "learning_rate": 0.00013018666666666666, + "loss": 1.2203, + "step": 80500 + }, + { + "epoch": 0.81, + "learning_rate": 0.00012685333333333333, + "loss": 1.2204, + "step": 81000 + }, + { + "epoch": 0.81, + "learning_rate": 0.00012352666666666666, + "loss": 1.2203, + "step": 81500 + }, + { + "epoch": 0.82, + "learning_rate": 0.00012019333333333333, + "loss": 1.2201, + "step": 82000 + }, + { + "epoch": 0.82, + "learning_rate": 0.00011685999999999999, + "loss": 1.2199, + "step": 82500 + }, + { + "epoch": 0.83, + "learning_rate": 0.00011352666666666665, + "loss": 1.2201, + "step": 83000 + }, + { + "epoch": 0.83, + "learning_rate": 0.00011019333333333331, + "loss": 1.2199, + "step": 83500 + }, + { + "epoch": 0.84, + "learning_rate": 0.00010686666666666666, + "loss": 1.2199, + "step": 84000 + }, + { + "epoch": 0.84, + "learning_rate": 0.00010353333333333332, + "loss": 1.22, + "step": 84500 + }, + { + "epoch": 0.85, + "learning_rate": 0.0001002, + "loss": 1.2201, + "step": 85000 + }, + { + "epoch": 0.85, + "learning_rate": 9.686666666666666e-05, + "loss": 1.2201, + "step": 85500 + }, + { + "epoch": 0.86, + "learning_rate": 9.353333333333332e-05, + "loss": 1.22, + "step": 86000 + }, + { + "epoch": 0.86, + "learning_rate": 9.02e-05, + "loss": 1.2199, + "step": 86500 + }, + { + "epoch": 0.87, + "learning_rate": 8.686666666666666e-05, + "loss": 1.2198, + "step": 87000 + }, + { + "epoch": 0.88, + "learning_rate": 8.353333333333332e-05, + "loss": 1.2198, + "step": 87500 + }, + { + "epoch": 0.88, + "learning_rate": 8.020666666666667e-05, + "loss": 1.2196, + "step": 88000 + }, + { + "epoch": 0.89, + "learning_rate": 7.687333333333333e-05, + "loss": 1.2199, + "step": 88500 + }, + { + "epoch": 0.89, + "learning_rate": 7.353999999999999e-05, + "loss": 1.2198, + "step": 89000 + }, + { + "epoch": 0.9, + "learning_rate": 7.020666666666665e-05, + "loss": 1.2196, + "step": 89500 + }, + { + "epoch": 0.9, + "learning_rate": 6.688e-05, + "loss": 1.2195, + "step": 90000 + }, + { + "epoch": 0.91, + "learning_rate": 6.354666666666666e-05, + "loss": 1.2196, + "step": 90500 + }, + { + "epoch": 0.91, + "learning_rate": 6.021333333333333e-05, + "loss": 1.2194, + "step": 91000 + }, + { + "epoch": 0.92, + "learning_rate": 5.687999999999999e-05, + "loss": 1.2196, + "step": 91500 + }, + { + "epoch": 0.92, + "learning_rate": 5.3553333333333334e-05, + "loss": 1.2194, + "step": 92000 + }, + { + "epoch": 0.93, + "learning_rate": 5.022e-05, + "loss": 1.2194, + "step": 92500 + }, + { + "epoch": 0.93, + "learning_rate": 4.6886666666666666e-05, + "loss": 1.2193, + "step": 93000 + }, + { + "epoch": 0.94, + "learning_rate": 4.355333333333333e-05, + "loss": 1.219, + "step": 93500 + }, + { + "epoch": 0.94, + "learning_rate": 4.0226666666666666e-05, + "loss": 1.2193, + "step": 94000 + }, + { + "epoch": 0.94, + "learning_rate": 3.6893333333333335e-05, + "loss": 1.2193, + "step": 94500 + }, + { + "epoch": 0.95, + "learning_rate": 3.356e-05, + "loss": 1.2194, + "step": 95000 + }, + { + "epoch": 0.95, + "learning_rate": 3.0226666666666663e-05, + "loss": 1.2192, + "step": 95500 + }, + { + "epoch": 0.96, + "learning_rate": 2.69e-05, + "loss": 1.2192, + "step": 96000 + }, + { + "epoch": 0.96, + "learning_rate": 2.3566666666666666e-05, + "loss": 1.2192, + "step": 96500 + }, + { + "epoch": 0.97, + "learning_rate": 2.023333333333333e-05, + "loss": 1.2191, + "step": 97000 + }, + { + "epoch": 0.97, + "learning_rate": 1.6899999999999997e-05, + "loss": 1.2189, + "step": 97500 + }, + { + "epoch": 0.98, + "learning_rate": 1.3573333333333331e-05, + "loss": 1.2191, + "step": 98000 + }, + { + "epoch": 0.98, + "learning_rate": 1.024e-05, + "loss": 1.219, + "step": 98500 + }, + { + "epoch": 0.99, + "learning_rate": 6.906666666666666e-06, + "loss": 1.2192, + "step": 99000 + }, + { + "epoch": 0.99, + "learning_rate": 3.573333333333333e-06, + "loss": 1.2192, + "step": 99500 + }, + { + "epoch": 1.0, + "learning_rate": 2.4666666666666665e-07, + "loss": 1.2192, + "step": 100000 + } + ], + "max_steps": 100000, + "num_train_epochs": 1, + "total_flos": 4.3042507367689535e+19, + "trial_name": null, + "trial_params": null +} diff --git a/propositional_logic/lt_mlm/checkpoint-100000/training_args.bin b/propositional_logic/lt_mlm/checkpoint-100000/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..3a3714d22119f3e9c3715a628a0f0b38ff19395f --- /dev/null +++ b/propositional_logic/lt_mlm/checkpoint-100000/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8977891f234daea77e0d92d656616105cf1eae06c0ba8268d5dfbb09c99054ff +size 2735 diff --git a/propositional_logic/lt_mlm/config.json b/propositional_logic/lt_mlm/config.json new file mode 100644 index 0000000000000000000000000000000000000000..102fa0515a2e7f9a348f86a1dcb7d135d6cfa8c6 --- /dev/null +++ b/propositional_logic/lt_mlm/config.json @@ -0,0 +1,27 @@ +{ + "architectures": [ + "RobertaForMaskedLM" + ], + "attention_probs_dropout_prob": 0.1, + "bos_token_id": 0, + "classifier_dropout": null, + "eos_token_id": 2, + "gradient_checkpointing": false, + "hidden_act": "gelu", + "hidden_dropout_prob": 0.1, + "hidden_size": 768, + "initializer_range": 0.02, + "intermediate_size": 3072, + "layer_norm_eps": 1e-05, + "max_position_embeddings": 514, + "model_type": "roberta", + "num_attention_heads": 12, + "num_hidden_layers": 12, + "pad_token_id": 1, + "position_embedding_type": "absolute", + "torch_dtype": "float32", + "transformers_version": "4.10.3", + "type_vocab_size": 1, + "use_cache": true, + "vocab_size": 1200 +} diff --git a/propositional_logic/lt_mlm/pytorch_model.bin b/propositional_logic/lt_mlm/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..429948f73d4102f926a1455b98d19f197736281d --- /dev/null +++ b/propositional_logic/lt_mlm/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f23111770b8a777a22ecb76287021a193552ce1614742744e911970c7e51751 +size 347937771 diff --git a/propositional_logic/lt_mlm/runs/Mar11_06-48-33_allennlp-server4/1647095768.8678484/events.out.tfevents.1647095768.allennlp-server4.29741.1 b/propositional_logic/lt_mlm/runs/Mar11_06-48-33_allennlp-server4/1647095768.8678484/events.out.tfevents.1647095768.allennlp-server4.29741.1 new file mode 100644 index 0000000000000000000000000000000000000000..a2609bc24d55f4568b8ed7e0d28c3761a1b95aaf --- /dev/null +++ b/propositional_logic/lt_mlm/runs/Mar11_06-48-33_allennlp-server4/1647095768.8678484/events.out.tfevents.1647095768.allennlp-server4.29741.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5063100fa937e6bd565c5f870e693cf84533d0517a96d53cb66dd2f683b84d8d +size 4441 diff --git a/propositional_logic/lt_mlm/runs/Mar11_06-48-33_allennlp-server4/events.out.tfevents.1647095757.allennlp-server4.29741.0 b/propositional_logic/lt_mlm/runs/Mar11_06-48-33_allennlp-server4/events.out.tfevents.1647095757.allennlp-server4.29741.0 new file mode 100644 index 0000000000000000000000000000000000000000..23310bf7aa1595371f423ee2b2c9d05275679e75 --- /dev/null +++ b/propositional_logic/lt_mlm/runs/Mar11_06-48-33_allennlp-server4/events.out.tfevents.1647095757.allennlp-server4.29741.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:557d054d08ab35fe65940752d001e591dd5c80d669d4dd1f4ec9ec3e24afba39 +size 35479 diff --git a/propositional_logic/lt_mlm/training_args.bin b/propositional_logic/lt_mlm/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..3a3714d22119f3e9c3715a628a0f0b38ff19395f --- /dev/null +++ b/propositional_logic/lt_mlm/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8977891f234daea77e0d92d656616105cf1eae06c0ba8268d5dfbb09c99054ff +size 2735